diff --git a/.editorconfig b/.editorconfig new file mode 100644 index 00000000000..55395b7b952 --- /dev/null +++ b/.editorconfig @@ -0,0 +1,718 @@ +root = true + +[*] +charset = utf-8 +end_of_line = lf +indent_size = 4 +indent_style = space +insert_final_newline = true +max_line_length = 140 +tab_width = 4 +ij_continuation_indent_size = 8 +ij_formatter_off_tag = @formatter:off +ij_formatter_on_tag = @formatter:on +ij_formatter_tags_enabled = false +ij_smart_tabs = false +ij_visual_guides = none +ij_wrap_on_typing = true + +[*.java] +ij_java_align_consecutive_assignments = false +ij_java_align_consecutive_variable_declarations = false +ij_java_align_group_field_declarations = false +ij_java_align_multiline_annotation_parameters = false +ij_java_align_multiline_array_initializer_expression = false +ij_java_align_multiline_assignment = false +ij_java_align_multiline_binary_operation = false +ij_java_align_multiline_chained_methods = false +ij_java_align_multiline_extends_list = false +ij_java_align_multiline_for = true +ij_java_align_multiline_method_parentheses = false +ij_java_align_multiline_parameters = false +ij_java_align_multiline_parameters_in_calls = false +ij_java_align_multiline_parenthesized_expression = false +ij_java_align_multiline_records = true +ij_java_align_multiline_resources = true +ij_java_align_multiline_ternary_operation = false +ij_java_align_multiline_text_blocks = false +ij_java_align_multiline_throws_list = false +ij_java_align_subsequent_simple_methods = false +ij_java_align_throws_keyword = false +ij_java_annotation_parameter_wrap = off +ij_java_array_initializer_new_line_after_left_brace = false +ij_java_array_initializer_right_brace_on_new_line = false +ij_java_array_initializer_wrap = off +ij_java_assert_statement_colon_on_next_line = false +ij_java_assert_statement_wrap = off +ij_java_assignment_wrap = off +ij_java_binary_operation_sign_on_next_line = false +ij_java_binary_operation_wrap = off +ij_java_blank_lines_after_anonymous_class_header = 0 +ij_java_blank_lines_after_class_header = 0 +ij_java_blank_lines_after_imports = 1 +ij_java_blank_lines_after_package = 1 +ij_java_blank_lines_around_class = 1 +ij_java_blank_lines_around_field = 0 +ij_java_blank_lines_around_field_in_interface = 0 +ij_java_blank_lines_around_initializer = 1 +ij_java_blank_lines_around_method = 1 +ij_java_blank_lines_around_method_in_interface = 1 +ij_java_blank_lines_before_class_end = 0 +ij_java_blank_lines_before_imports = 1 +ij_java_blank_lines_before_method_body = 0 +ij_java_blank_lines_before_package = 0 +ij_java_block_brace_style = end_of_line +ij_java_block_comment_at_first_column = true +ij_java_call_parameters_new_line_after_left_paren = false +ij_java_call_parameters_right_paren_on_new_line = false +ij_java_call_parameters_wrap = off +ij_java_case_statement_on_separate_line = true +ij_java_catch_on_new_line = false +ij_java_class_annotation_wrap = split_into_lines +ij_java_class_brace_style = end_of_line +ij_java_class_count_to_use_import_on_demand = 100 +ij_java_class_names_in_javadoc = 1 +ij_java_do_not_indent_top_level_class_members = false +ij_java_do_not_wrap_after_single_annotation = false +ij_java_do_while_brace_force = never +ij_java_doc_add_blank_line_after_description = true +ij_java_doc_add_blank_line_after_param_comments = false +ij_java_doc_add_blank_line_after_return = false +ij_java_doc_add_p_tag_on_empty_lines = true +ij_java_doc_align_exception_comments = true +ij_java_doc_align_param_comments = true +ij_java_doc_do_not_wrap_if_one_line = false +ij_java_doc_enable_formatting = true +ij_java_doc_enable_leading_asterisks = true +ij_java_doc_indent_on_continuation = false +ij_java_doc_keep_empty_lines = true +ij_java_doc_keep_empty_parameter_tag = true +ij_java_doc_keep_empty_return_tag = true +ij_java_doc_keep_empty_throws_tag = true +ij_java_doc_keep_invalid_tags = true +ij_java_doc_param_description_on_new_line = false +ij_java_doc_preserve_line_breaks = false +ij_java_doc_use_throws_not_exception_tag = true +ij_java_else_on_new_line = false +ij_java_entity_dd_suffix = EJB +ij_java_entity_eb_suffix = Bean +ij_java_entity_hi_suffix = Home +ij_java_entity_lhi_prefix = Local +ij_java_entity_lhi_suffix = Home +ij_java_entity_li_prefix = Local +ij_java_entity_pk_class = java.lang.String +ij_java_entity_vo_suffix = VO +ij_java_enum_constants_wrap = off +ij_java_extends_keyword_wrap = off +ij_java_extends_list_wrap = off +ij_java_field_annotation_wrap = split_into_lines +ij_java_finally_on_new_line = false +ij_java_for_brace_force = never +ij_java_for_statement_new_line_after_left_paren = false +ij_java_for_statement_right_paren_on_new_line = false +ij_java_for_statement_wrap = off +ij_java_generate_final_locals = false +ij_java_generate_final_parameters = true +ij_java_if_brace_force = never +ij_java_imports_layout = *,|,javax.**,java.**,|,$* +ij_java_indent_case_from_switch = true +ij_java_insert_inner_class_imports = false +ij_java_insert_override_annotation = true +ij_java_keep_blank_lines_before_right_brace = 2 +ij_java_keep_blank_lines_between_package_declaration_and_header = 2 +ij_java_keep_blank_lines_in_code = 2 +ij_java_keep_blank_lines_in_declarations = 2 +ij_java_keep_control_statement_in_one_line = true +ij_java_keep_first_column_comment = true +ij_java_keep_indents_on_empty_lines = false +ij_java_keep_line_breaks = true +ij_java_keep_multiple_expressions_in_one_line = false +ij_java_keep_simple_blocks_in_one_line = false +ij_java_keep_simple_classes_in_one_line = false +ij_java_keep_simple_lambdas_in_one_line = false +ij_java_keep_simple_methods_in_one_line = false +ij_java_label_indent_absolute = false +ij_java_label_indent_size = 0 +ij_java_lambda_brace_style = end_of_line +ij_java_layout_static_imports_separately = true +ij_java_line_comment_add_space = false +ij_java_line_comment_at_first_column = true +ij_java_message_dd_suffix = EJB +ij_java_message_eb_suffix = Bean +ij_java_method_annotation_wrap = split_into_lines +ij_java_method_brace_style = end_of_line +ij_java_method_call_chain_wrap = off +ij_java_method_parameters_new_line_after_left_paren = false +ij_java_method_parameters_right_paren_on_new_line = false +ij_java_method_parameters_wrap = off +ij_java_modifier_list_wrap = false +ij_java_names_count_to_use_import_on_demand = 100 +ij_java_new_line_after_lparen_in_record_header = false +ij_java_packages_to_use_import_on_demand = java.awt.*,javax.swing.* +ij_java_parameter_annotation_wrap = off +ij_java_parentheses_expression_new_line_after_left_paren = false +ij_java_parentheses_expression_right_paren_on_new_line = false +ij_java_place_assignment_sign_on_next_line = false +ij_java_prefer_longer_names = true +ij_java_prefer_parameters_wrap = false +ij_java_record_components_wrap = normal +ij_java_repeat_synchronized = true +ij_java_replace_instanceof_and_cast = false +ij_java_replace_null_check = true +ij_java_replace_sum_lambda_with_method_ref = true +ij_java_resource_list_new_line_after_left_paren = false +ij_java_resource_list_right_paren_on_new_line = false +ij_java_resource_list_wrap = off +ij_java_rparen_on_new_line_in_record_header = false +ij_java_session_dd_suffix = EJB +ij_java_session_eb_suffix = Bean +ij_java_session_hi_suffix = Home +ij_java_session_lhi_prefix = Local +ij_java_session_lhi_suffix = Home +ij_java_session_li_prefix = Local +ij_java_session_si_suffix = Service +ij_java_space_after_closing_angle_bracket_in_type_argument = false +ij_java_space_after_colon = true +ij_java_space_after_comma = true +ij_java_space_after_comma_in_type_arguments = true +ij_java_space_after_for_semicolon = true +ij_java_space_after_quest = true +ij_java_space_after_type_cast = true +ij_java_space_before_annotation_array_initializer_left_brace = false +ij_java_space_before_annotation_parameter_list = false +ij_java_space_before_array_initializer_left_brace = false +ij_java_space_before_catch_keyword = true +ij_java_space_before_catch_left_brace = true +ij_java_space_before_catch_parentheses = true +ij_java_space_before_class_left_brace = true +ij_java_space_before_colon = true +ij_java_space_before_colon_in_foreach = true +ij_java_space_before_comma = false +ij_java_space_before_do_left_brace = true +ij_java_space_before_else_keyword = true +ij_java_space_before_else_left_brace = true +ij_java_space_before_finally_keyword = true +ij_java_space_before_finally_left_brace = true +ij_java_space_before_for_left_brace = true +ij_java_space_before_for_parentheses = true +ij_java_space_before_for_semicolon = false +ij_java_space_before_if_left_brace = true +ij_java_space_before_if_parentheses = true +ij_java_space_before_method_call_parentheses = false +ij_java_space_before_method_left_brace = true +ij_java_space_before_method_parentheses = false +ij_java_space_before_opening_angle_bracket_in_type_parameter = false +ij_java_space_before_quest = true +ij_java_space_before_switch_left_brace = true +ij_java_space_before_switch_parentheses = true +ij_java_space_before_synchronized_left_brace = true +ij_java_space_before_synchronized_parentheses = true +ij_java_space_before_try_left_brace = true +ij_java_space_before_try_parentheses = true +ij_java_space_before_type_parameter_list = false +ij_java_space_before_while_keyword = true +ij_java_space_before_while_left_brace = true +ij_java_space_before_while_parentheses = true +ij_java_space_inside_one_line_enum_braces = false +ij_java_space_within_empty_array_initializer_braces = false +ij_java_space_within_empty_method_call_parentheses = false +ij_java_space_within_empty_method_parentheses = false +ij_java_spaces_around_additive_operators = true +ij_java_spaces_around_assignment_operators = true +ij_java_spaces_around_bitwise_operators = true +ij_java_spaces_around_equality_operators = true +ij_java_spaces_around_lambda_arrow = true +ij_java_spaces_around_logical_operators = true +ij_java_spaces_around_method_ref_dbl_colon = false +ij_java_spaces_around_multiplicative_operators = true +ij_java_spaces_around_relational_operators = true +ij_java_spaces_around_shift_operators = true +ij_java_spaces_around_type_bounds_in_type_parameters = true +ij_java_spaces_around_unary_operator = false +ij_java_spaces_within_angle_brackets = false +ij_java_spaces_within_annotation_parentheses = false +ij_java_spaces_within_array_initializer_braces = false +ij_java_spaces_within_braces = false +ij_java_spaces_within_brackets = false +ij_java_spaces_within_cast_parentheses = false +ij_java_spaces_within_catch_parentheses = false +ij_java_spaces_within_for_parentheses = false +ij_java_spaces_within_if_parentheses = false +ij_java_spaces_within_method_call_parentheses = false +ij_java_spaces_within_method_parentheses = false +ij_java_spaces_within_parentheses = false +ij_java_spaces_within_record_header = false +ij_java_spaces_within_switch_parentheses = false +ij_java_spaces_within_synchronized_parentheses = false +ij_java_spaces_within_try_parentheses = false +ij_java_spaces_within_while_parentheses = false +ij_java_special_else_if_treatment = true +ij_java_subclass_name_suffix = Impl +ij_java_ternary_operation_signs_on_next_line = false +ij_java_ternary_operation_wrap = off +ij_java_test_name_suffix = Test +ij_java_throws_keyword_wrap = off +ij_java_throws_list_wrap = off +ij_java_use_external_annotations = false +ij_java_use_fq_class_names = false +ij_java_use_relative_indents = false +ij_java_use_single_class_imports = true +ij_java_variable_annotation_wrap = off +ij_java_visibility = public +ij_java_while_brace_force = never +ij_java_while_on_new_line = false +ij_java_wrap_comments = false +ij_java_wrap_first_method_in_call_chain = false +ij_java_wrap_long_lines = false + +[*.scala] +indent_size = 2 +tab_width = 2 +ij_continuation_indent_size = 2 +ij_scala_align_composite_pattern = true +ij_scala_align_extends_with = 0 +ij_scala_align_group_field_declarations = false +ij_scala_align_if_else = false +ij_scala_align_in_columns_case_branch = false +ij_scala_align_multiline_binary_operation = false +ij_scala_align_multiline_chained_methods = false +ij_scala_align_multiline_for = true +ij_scala_align_multiline_parameters = false +ij_scala_align_multiline_parameters_in_calls = false +ij_scala_align_multiline_parenthesized_expression = false +ij_scala_align_tuple_elements = false +ij_scala_align_types_in_multiline_declarations = false +ij_scala_alternate_continuation_indent_for_params = 4 +ij_scala_binary_operation_wrap = off +ij_scala_blank_lines_after_anonymous_class_header = 0 +ij_scala_blank_lines_after_class_header = 0 +ij_scala_blank_lines_after_imports = 1 +ij_scala_blank_lines_after_package = 1 +ij_scala_blank_lines_around_class = 1 +ij_scala_blank_lines_around_field = 0 +ij_scala_blank_lines_around_field_in_inner_scopes = 0 +ij_scala_blank_lines_around_field_in_interface = 0 +ij_scala_blank_lines_around_method = 1 +ij_scala_blank_lines_around_method_in_inner_scopes = 1 +ij_scala_blank_lines_around_method_in_interface = 1 +ij_scala_blank_lines_before_imports = 1 +ij_scala_blank_lines_before_method_body = 0 +ij_scala_blank_lines_before_package = 0 +ij_scala_block_brace_style = end_of_line +ij_scala_block_comment_at_first_column = true +ij_scala_call_parameters_new_line_after_lparen = 0 +ij_scala_call_parameters_right_paren_on_new_line = false +ij_scala_call_parameters_wrap = off +ij_scala_case_clause_brace_force = never +ij_scala_catch_on_new_line = false +ij_scala_class_annotation_wrap = split_into_lines +ij_scala_class_brace_style = end_of_line +ij_scala_closure_brace_force = never +ij_scala_do_not_align_block_expr_params = true +ij_scala_do_not_indent_case_clause_body = false +ij_scala_do_not_indent_tuples_close_brace = true +ij_scala_do_while_brace_force = never +ij_scala_else_on_new_line = false +ij_scala_enable_scaladoc_formatting = true +ij_scala_enforce_functional_syntax_for_unit = true +ij_scala_extends_keyword_wrap = off +ij_scala_extends_list_wrap = off +ij_scala_field_annotation_wrap = split_into_lines +ij_scala_finally_brace_force = never +ij_scala_finally_on_new_line = false +ij_scala_for_brace_force = never +ij_scala_for_statement_wrap = off +ij_scala_formatter = 0 +ij_scala_if_brace_force = never +ij_scala_implicit_value_class_suffix = Ops +ij_scala_indent_braced_function_args = true +ij_scala_indent_case_from_switch = true +ij_scala_indent_first_parameter = true +ij_scala_indent_first_parameter_clause = false +ij_scala_indent_type_arguments = true +ij_scala_indent_type_parameters = true +ij_scala_insert_whitespaces_in_simple_one_line_method = true +ij_scala_keep_blank_lines_before_right_brace = 2 +ij_scala_keep_blank_lines_in_code = 2 +ij_scala_keep_blank_lines_in_declarations = 2 +ij_scala_keep_comments_on_same_line = true +ij_scala_keep_first_column_comment = false +ij_scala_keep_indents_on_empty_lines = false +ij_scala_keep_line_breaks = true +ij_scala_keep_one_line_lambdas_in_arg_list = false +ij_scala_keep_simple_blocks_in_one_line = false +ij_scala_keep_simple_methods_in_one_line = false +ij_scala_keep_xml_formatting = false +ij_scala_line_comment_at_first_column = true +ij_scala_method_annotation_wrap = split_into_lines +ij_scala_method_brace_force = never +ij_scala_method_brace_style = end_of_line +ij_scala_method_call_chain_wrap = off +ij_scala_method_parameters_new_line_after_left_paren = false +ij_scala_method_parameters_right_paren_on_new_line = false +ij_scala_method_parameters_wrap = off +ij_scala_modifier_list_wrap = false +ij_scala_multiline_string_align_dangling_closing_quotes = false +ij_scala_multiline_string_closing_quotes_on_new_line = false +ij_scala_multiline_string_insert_margin_on_enter = true +ij_scala_multiline_string_margin_char = | +ij_scala_multiline_string_margin_indent = 2 +ij_scala_multiline_string_opening_quotes_on_new_line = true +ij_scala_multiline_string_process_margin_on_copy_paste = true +ij_scala_newline_after_annotations = false +ij_scala_not_continuation_indent_for_params = false +ij_scala_parameter_annotation_wrap = off +ij_scala_parentheses_expression_new_line_after_left_paren = false +ij_scala_parentheses_expression_right_paren_on_new_line = false +ij_scala_place_closure_parameters_on_new_line = false +ij_scala_place_self_type_on_new_line = true +ij_scala_prefer_parameters_wrap = false +ij_scala_preserve_space_after_method_declaration_name = false +ij_scala_reformat_on_compile = false +ij_scala_replace_case_arrow_with_unicode_char = false +ij_scala_replace_for_generator_arrow_with_unicode_char = false +ij_scala_replace_lambda_with_greek_letter = false +ij_scala_replace_map_arrow_with_unicode_char = false +ij_scala_scalafmt_fallback_to_default_settings = false +ij_scala_scalafmt_reformat_on_files_save = false +ij_scala_scalafmt_show_invalid_code_warnings = true +ij_scala_scalafmt_use_intellij_formatter_for_range_format = true +ij_scala_sd_align_exception_comments = true +ij_scala_sd_align_list_item_content = true +ij_scala_sd_align_other_tags_comments = true +ij_scala_sd_align_parameters_comments = true +ij_scala_sd_align_return_comments = true +ij_scala_sd_blank_line_after_parameters_comments = false +ij_scala_sd_blank_line_after_return_comments = false +ij_scala_sd_blank_line_before_parameters = false +ij_scala_sd_blank_line_before_tags = true +ij_scala_sd_blank_line_between_parameters = false +ij_scala_sd_keep_blank_lines_between_tags = false +ij_scala_sd_preserve_spaces_in_tags = false +ij_scala_space_after_comma = true +ij_scala_space_after_for_semicolon = true +ij_scala_space_after_modifiers_constructor = false +ij_scala_space_after_type_colon = true +ij_scala_space_before_brace_method_call = true +ij_scala_space_before_class_left_brace = true +ij_scala_space_before_for_parentheses = true +ij_scala_space_before_if_parentheses = true +ij_scala_space_before_infix_like_method_parentheses = false +ij_scala_space_before_infix_method_call_parentheses = false +ij_scala_space_before_infix_operator_like_method_call_parentheses = true +ij_scala_space_before_method_call_parentheses = false +ij_scala_space_before_method_left_brace = true +ij_scala_space_before_method_parentheses = false +ij_scala_space_before_type_colon = false +ij_scala_space_before_type_parameter_in_def_list = false +ij_scala_space_before_type_parameter_leading_context_bound_colon = false +ij_scala_space_before_type_parameter_leading_context_bound_colon_hk = true +ij_scala_space_before_type_parameter_list = false +ij_scala_space_before_type_parameter_rest_context_bound_colons = true +ij_scala_space_before_while_parentheses = true +ij_scala_space_inside_closure_braces = true +ij_scala_space_inside_self_type_braces = true +ij_scala_space_within_empty_method_call_parentheses = false +ij_scala_spaces_around_at_in_patterns = false +ij_scala_spaces_in_imports = false +ij_scala_spaces_in_one_line_blocks = false +ij_scala_spaces_within_brackets = false +ij_scala_spaces_within_for_parentheses = false +ij_scala_spaces_within_if_parentheses = false +ij_scala_spaces_within_method_call_parentheses = false +ij_scala_spaces_within_method_parentheses = false +ij_scala_spaces_within_parentheses = false +ij_scala_spaces_within_while_parentheses = false +ij_scala_special_else_if_treatment = true +ij_scala_trailing_comma_arg_list_enabled = true +ij_scala_trailing_comma_import_selector_enabled = false +ij_scala_trailing_comma_mode = trailing_comma_keep +ij_scala_trailing_comma_params_enabled = true +ij_scala_trailing_comma_pattern_arg_list_enabled = false +ij_scala_trailing_comma_tuple_enabled = false +ij_scala_trailing_comma_tuple_type_enabled = false +ij_scala_trailing_comma_type_params_enabled = false +ij_scala_try_brace_force = never +ij_scala_type_annotation_exclude_constant = true +ij_scala_type_annotation_exclude_in_dialect_sources = true +ij_scala_type_annotation_exclude_in_test_sources = false +ij_scala_type_annotation_exclude_member_of_anonymous_class = false +ij_scala_type_annotation_exclude_member_of_private_class = false +ij_scala_type_annotation_exclude_when_type_is_stable = true +ij_scala_type_annotation_function_parameter = false +ij_scala_type_annotation_implicit_modifier = true +ij_scala_type_annotation_local_definition = false +ij_scala_type_annotation_private_member = false +ij_scala_type_annotation_protected_member = true +ij_scala_type_annotation_public_member = true +ij_scala_type_annotation_structural_type = true +ij_scala_type_annotation_underscore_parameter = false +ij_scala_type_annotation_unit_type = true +ij_scala_use_alternate_continuation_indent_for_params = false +ij_scala_use_scaladoc2_formatting = false +ij_scala_variable_annotation_wrap = off +ij_scala_while_brace_force = never +ij_scala_while_on_new_line = false +ij_scala_wrap_before_with_keyword = false +ij_scala_wrap_first_method_in_call_chain = false +ij_scala_wrap_long_lines = false + +[.editorconfig] +ij_editorconfig_align_group_field_declarations = false +ij_editorconfig_space_after_colon = false +ij_editorconfig_space_after_comma = true +ij_editorconfig_space_before_colon = false +ij_editorconfig_space_before_comma = false +ij_editorconfig_spaces_around_assignment_operators = true + +[{*.ant,*.fxml,*.jhm,*.jnlp,*.jrxml,*.pom,*.rng,*.tld,*.wadl,*.wsdd,*.wsdl,*.xjb,*.xml,*.xsd,*.xsl,*.xslt,*.xul}] +ij_xml_align_attributes = true +ij_xml_align_text = false +ij_xml_attribute_wrap = normal +ij_xml_block_comment_at_first_column = true +ij_xml_keep_blank_lines = 2 +ij_xml_keep_indents_on_empty_lines = false +ij_xml_keep_line_breaks = true +ij_xml_keep_line_breaks_in_text = true +ij_xml_keep_whitespaces = false +ij_xml_keep_whitespaces_around_cdata = preserve +ij_xml_keep_whitespaces_inside_cdata = false +ij_xml_line_comment_at_first_column = true +ij_xml_space_after_tag_name = false +ij_xml_space_around_equals_in_attribute = false +ij_xml_space_inside_empty_tag = false +ij_xml_text_wrap = normal +ij_xml_use_custom_settings = false + +[{*.bash,*.sh,*.zsh}] +indent_size = 2 +tab_width = 2 +ij_shell_binary_ops_start_line = false +ij_shell_keep_column_alignment_padding = false +ij_shell_minify_program = false +ij_shell_redirect_followed_by_space = false +ij_shell_switch_cases_indented = false + +[{*.gant,*.gradle,*.groovy,*.gson,*.gy}] +ij_groovy_align_group_field_declarations = false +ij_groovy_align_multiline_array_initializer_expression = false +ij_groovy_align_multiline_assignment = false +ij_groovy_align_multiline_binary_operation = false +ij_groovy_align_multiline_chained_methods = false +ij_groovy_align_multiline_extends_list = false +ij_groovy_align_multiline_for = true +ij_groovy_align_multiline_list_or_map = true +ij_groovy_align_multiline_method_parentheses = false +ij_groovy_align_multiline_parameters = false +ij_groovy_align_multiline_parameters_in_calls = false +ij_groovy_align_multiline_resources = true +ij_groovy_align_multiline_ternary_operation = false +ij_groovy_align_multiline_throws_list = false +ij_groovy_align_named_args_in_map = true +ij_groovy_align_throws_keyword = false +ij_groovy_array_initializer_new_line_after_left_brace = false +ij_groovy_array_initializer_right_brace_on_new_line = false +ij_groovy_array_initializer_wrap = off +ij_groovy_assert_statement_wrap = off +ij_groovy_assignment_wrap = off +ij_groovy_binary_operation_wrap = off +ij_groovy_blank_lines_after_class_header = 0 +ij_groovy_blank_lines_after_imports = 1 +ij_groovy_blank_lines_after_package = 1 +ij_groovy_blank_lines_around_class = 1 +ij_groovy_blank_lines_around_field = 0 +ij_groovy_blank_lines_around_field_in_interface = 0 +ij_groovy_blank_lines_around_method = 1 +ij_groovy_blank_lines_around_method_in_interface = 1 +ij_groovy_blank_lines_before_imports = 1 +ij_groovy_blank_lines_before_method_body = 0 +ij_groovy_blank_lines_before_package = 0 +ij_groovy_block_brace_style = end_of_line +ij_groovy_block_comment_at_first_column = true +ij_groovy_call_parameters_new_line_after_left_paren = false +ij_groovy_call_parameters_right_paren_on_new_line = false +ij_groovy_call_parameters_wrap = off +ij_groovy_catch_on_new_line = false +ij_groovy_class_annotation_wrap = split_into_lines +ij_groovy_class_brace_style = end_of_line +ij_groovy_class_count_to_use_import_on_demand = 100 +ij_groovy_do_while_brace_force = never +ij_groovy_else_on_new_line = false +ij_groovy_enum_constants_wrap = off +ij_groovy_extends_keyword_wrap = off +ij_groovy_extends_list_wrap = off +ij_groovy_field_annotation_wrap = split_into_lines +ij_groovy_finally_on_new_line = false +ij_groovy_for_brace_force = never +ij_groovy_for_statement_new_line_after_left_paren = false +ij_groovy_for_statement_right_paren_on_new_line = false +ij_groovy_for_statement_wrap = off +ij_groovy_if_brace_force = never +ij_groovy_import_annotation_wrap = 2 +ij_groovy_imports_layout = *,|,javax.**,java.**,|,$* +ij_groovy_indent_case_from_switch = true +ij_groovy_indent_label_blocks = true +ij_groovy_insert_inner_class_imports = false +ij_groovy_keep_blank_lines_before_right_brace = 2 +ij_groovy_keep_blank_lines_in_code = 2 +ij_groovy_keep_blank_lines_in_declarations = 2 +ij_groovy_keep_control_statement_in_one_line = true +ij_groovy_keep_first_column_comment = true +ij_groovy_keep_indents_on_empty_lines = false +ij_groovy_keep_line_breaks = true +ij_groovy_keep_multiple_expressions_in_one_line = false +ij_groovy_keep_simple_blocks_in_one_line = false +ij_groovy_keep_simple_classes_in_one_line = true +ij_groovy_keep_simple_lambdas_in_one_line = true +ij_groovy_keep_simple_methods_in_one_line = true +ij_groovy_label_indent_absolute = false +ij_groovy_label_indent_size = 0 +ij_groovy_lambda_brace_style = end_of_line +ij_groovy_layout_static_imports_separately = true +ij_groovy_line_comment_add_space = false +ij_groovy_line_comment_at_first_column = true +ij_groovy_method_annotation_wrap = split_into_lines +ij_groovy_method_brace_style = end_of_line +ij_groovy_method_call_chain_wrap = off +ij_groovy_method_parameters_new_line_after_left_paren = false +ij_groovy_method_parameters_right_paren_on_new_line = false +ij_groovy_method_parameters_wrap = off +ij_groovy_modifier_list_wrap = false +ij_groovy_names_count_to_use_import_on_demand = 100 +ij_groovy_parameter_annotation_wrap = off +ij_groovy_parentheses_expression_new_line_after_left_paren = false +ij_groovy_parentheses_expression_right_paren_on_new_line = false +ij_groovy_prefer_parameters_wrap = false +ij_groovy_resource_list_new_line_after_left_paren = false +ij_groovy_resource_list_right_paren_on_new_line = false +ij_groovy_resource_list_wrap = off +ij_groovy_space_after_assert_separator = true +ij_groovy_space_after_colon = true +ij_groovy_space_after_comma = true +ij_groovy_space_after_comma_in_type_arguments = true +ij_groovy_space_after_for_semicolon = true +ij_groovy_space_after_quest = true +ij_groovy_space_after_type_cast = true +ij_groovy_space_before_annotation_parameter_list = false +ij_groovy_space_before_array_initializer_left_brace = false +ij_groovy_space_before_assert_separator = false +ij_groovy_space_before_catch_keyword = true +ij_groovy_space_before_catch_left_brace = true +ij_groovy_space_before_catch_parentheses = true +ij_groovy_space_before_class_left_brace = true +ij_groovy_space_before_closure_left_brace = true +ij_groovy_space_before_colon = true +ij_groovy_space_before_comma = false +ij_groovy_space_before_do_left_brace = true +ij_groovy_space_before_else_keyword = true +ij_groovy_space_before_else_left_brace = true +ij_groovy_space_before_finally_keyword = true +ij_groovy_space_before_finally_left_brace = true +ij_groovy_space_before_for_left_brace = true +ij_groovy_space_before_for_parentheses = true +ij_groovy_space_before_for_semicolon = false +ij_groovy_space_before_if_left_brace = true +ij_groovy_space_before_if_parentheses = true +ij_groovy_space_before_method_call_parentheses = false +ij_groovy_space_before_method_left_brace = true +ij_groovy_space_before_method_parentheses = false +ij_groovy_space_before_quest = true +ij_groovy_space_before_switch_left_brace = true +ij_groovy_space_before_switch_parentheses = true +ij_groovy_space_before_synchronized_left_brace = true +ij_groovy_space_before_synchronized_parentheses = true +ij_groovy_space_before_try_left_brace = true +ij_groovy_space_before_try_parentheses = true +ij_groovy_space_before_while_keyword = true +ij_groovy_space_before_while_left_brace = true +ij_groovy_space_before_while_parentheses = true +ij_groovy_space_in_named_argument = true +ij_groovy_space_in_named_argument_before_colon = false +ij_groovy_space_within_empty_array_initializer_braces = false +ij_groovy_space_within_empty_method_call_parentheses = false +ij_groovy_spaces_around_additive_operators = true +ij_groovy_spaces_around_assignment_operators = true +ij_groovy_spaces_around_bitwise_operators = true +ij_groovy_spaces_around_equality_operators = true +ij_groovy_spaces_around_lambda_arrow = true +ij_groovy_spaces_around_logical_operators = true +ij_groovy_spaces_around_multiplicative_operators = true +ij_groovy_spaces_around_regex_operators = true +ij_groovy_spaces_around_relational_operators = true +ij_groovy_spaces_around_shift_operators = true +ij_groovy_spaces_within_annotation_parentheses = false +ij_groovy_spaces_within_array_initializer_braces = false +ij_groovy_spaces_within_braces = true +ij_groovy_spaces_within_brackets = false +ij_groovy_spaces_within_cast_parentheses = false +ij_groovy_spaces_within_catch_parentheses = false +ij_groovy_spaces_within_for_parentheses = false +ij_groovy_spaces_within_gstring_injection_braces = false +ij_groovy_spaces_within_if_parentheses = false +ij_groovy_spaces_within_list_or_map = false +ij_groovy_spaces_within_method_call_parentheses = false +ij_groovy_spaces_within_method_parentheses = false +ij_groovy_spaces_within_parentheses = false +ij_groovy_spaces_within_switch_parentheses = false +ij_groovy_spaces_within_synchronized_parentheses = false +ij_groovy_spaces_within_try_parentheses = false +ij_groovy_spaces_within_tuple_expression = false +ij_groovy_spaces_within_while_parentheses = false +ij_groovy_special_else_if_treatment = true +ij_groovy_ternary_operation_wrap = off +ij_groovy_throws_keyword_wrap = off +ij_groovy_throws_list_wrap = off +ij_groovy_use_flying_geese_braces = false +ij_groovy_use_fq_class_names = false +ij_groovy_use_fq_class_names_in_javadoc = true +ij_groovy_use_relative_indents = false +ij_groovy_use_single_class_imports = true +ij_groovy_variable_annotation_wrap = off +ij_groovy_while_brace_force = never +ij_groovy_while_on_new_line = false +ij_groovy_wrap_long_lines = false + +[{*.har,*.jsb2,*.jsb3,*.json,.babelrc,.eslintrc,.stylelintrc,bowerrc,jest.config}] +indent_size = 2 +ij_json_keep_blank_lines_in_code = 0 +ij_json_keep_indents_on_empty_lines = false +ij_json_keep_line_breaks = true +ij_json_space_after_colon = true +ij_json_space_after_comma = true +ij_json_space_before_colon = true +ij_json_space_before_comma = false +ij_json_spaces_within_braces = false +ij_json_spaces_within_brackets = false +ij_json_wrap_long_lines = false + +[{*.markdown,*.md}] +ij_markdown_force_one_space_after_blockquote_symbol = true +ij_markdown_force_one_space_after_header_symbol = true +ij_markdown_force_one_space_after_list_bullet = true +ij_markdown_force_one_space_between_words = true +ij_markdown_keep_indents_on_empty_lines = false +ij_markdown_max_lines_around_block_elements = 1 +ij_markdown_max_lines_around_header = 1 +ij_markdown_max_lines_between_paragraphs = 1 +ij_markdown_min_lines_around_block_elements = 1 +ij_markdown_min_lines_around_header = 1 +ij_markdown_min_lines_between_paragraphs = 1 + +[{*.properties,spring.handlers,spring.schemas}] +ij_properties_align_group_field_declarations = false +ij_properties_keep_blank_lines = false +ij_properties_key_value_delimiter = equals +ij_properties_spaces_around_key_value_delimiter = false + +[{*.yaml,*.yml}] +indent_size = 2 +ij_yaml_align_values_properties = do_not_align +ij_yaml_autoinsert_sequence_marker = true +ij_yaml_block_mapping_on_new_line = false +ij_yaml_indent_sequence_value = true +ij_yaml_keep_indents_on_empty_lines = false +ij_yaml_keep_line_breaks = true +ij_yaml_sequence_on_new_line = false +ij_yaml_space_before_colon = false +ij_yaml_spaces_within_braces = true +ij_yaml_spaces_within_brackets = true diff --git a/.evergreen/.evg.yml b/.evergreen/.evg.yml new file mode 100644 index 00000000000..e3bf87d49de --- /dev/null +++ b/.evergreen/.evg.yml @@ -0,0 +1,2535 @@ +######################################## +# Evergreen Template for MongoDB Drivers +######################################## + +# When a task that used to pass starts to fail +# Go through all versions that may have been skipped to detect +# when the task started failing +stepback: true + +# Mark a failure as a system/bootstrap failure (purple box) rather then a task +# failure by default. +# Actual testing tasks are marked with `type: "test"` +command_type: "system" + +# Protect ourselves against rogue test case, or curl gone wild, that runs forever +exec_timeout_secs: 3600 + +# What to do when evergreen hits the timeout (`post:` tasks are run automatically) +timeout: + - command: shell.exec + params: + script: | + ls -la + +functions: + + # + # Start up and teardown functions + # + + "fetch-source": + # Executes git clone and applies the submitted patch, if any + - command: git.get_project + params: + directory: "src" + # Applies the subitted patch, if any + # Deprecated. Should be removed. But still needed for certain agents (ZAP) + - command: git.apply_patch + # Fetch the specifications submodule + - command: shell.exec + params: + working_dir: "src" + script: | + git submodule update --init + # Make an evergreen expansion file with dynamic values + - command: shell.exec + params: + working_dir: "src" + shell: "bash" + script: | + # Get the current unique version of this checkout + if [ "${is_patch}" = "true" ]; then + CURRENT_VERSION=$(git describe)-patch-${version_id} + else + CURRENT_VERSION=latest + fi + + export DRIVERS_TOOLS="$(pwd)/../drivers-tools" + + # Python has cygwin path problems on Windows. Detect prospective mongo-orchestration home directory + if [ "Windows_NT" == "$OS" ]; then # Magic variable in cygwin + export DRIVERS_TOOLS=$(cygpath -m $DRIVERS_TOOLS) + fi + + export MONGO_ORCHESTRATION_HOME="$DRIVERS_TOOLS/.evergreen/orchestration" + export MONGODB_BINARIES="$DRIVERS_TOOLS/mongodb/bin" + export UPLOAD_BUCKET="${project}" + export PROJECT_DIRECTORY="$(pwd)" + export ARCHIVE_FILE_NAME="mongo-java-driver.tgz" + export ARCHIVE_FILE_PATH="/tmp/$ARCHIVE_FILE_NAME" + + cat < expansion.yml + CURRENT_VERSION: "$CURRENT_VERSION" + DRIVERS_TOOLS: "$DRIVERS_TOOLS" + MONGO_ORCHESTRATION_HOME: "$MONGO_ORCHESTRATION_HOME" + MONGODB_BINARIES: "$MONGODB_BINARIES" + UPLOAD_BUCKET: "$UPLOAD_BUCKET" + PROJECT_DIRECTORY: "$PROJECT_DIRECTORY" + ARCHIVE_FILE_NAME: "$ARCHIVE_FILE_NAME" + ARCHIVE_FILE_PATH: "$ARCHIVE_FILE_PATH" + PREPARE_SHELL: | + set -o errexit + set -o xtrace + export DRIVERS_TOOLS="$DRIVERS_TOOLS" + export MONGO_ORCHESTRATION_HOME="$MONGO_ORCHESTRATION_HOME" + export MONGODB_BINARIES="$MONGODB_BINARIES" + export UPLOAD_BUCKET="$UPLOAD_BUCKET" + export PROJECT_DIRECTORY="$PROJECT_DIRECTORY" + export TMPDIR="$MONGO_ORCHESTRATION_HOME/db" + export PATH="$MONGODB_BINARIES:$PATH" + export PROJECT="${project}" + export ARCHIVE_FILE_NAME="$ARCHIVE_FILE_NAME" + export ARCHIVE_FILE_PATH="$ARCHIVE_FILE_PATH" + EOT + # See what we've done + cat expansion.yml + + # Load the expansion file to make an evergreen variable with the current unique version + - command: expansions.update + params: + file: src/expansion.yml + + "prepare-resources": + - command: shell.exec + params: + shell: "bash" + script: | + ${PREPARE_SHELL} + rm -rf $DRIVERS_TOOLS + if [ "${project}" = "drivers-tools" ]; then + # If this was a patch build, doing a fresh clone would not actually test the patch + cp -R ${PROJECT_DIRECTORY}/ $DRIVERS_TOOLS + else + git clone https://github.com/mongodb-labs/drivers-evergreen-tools.git $DRIVERS_TOOLS + fi + echo "{ \"releases\": { \"default\": \"$MONGODB_BINARIES\" }}" > $MONGO_ORCHESTRATION_HOME/orchestration.config + + "fix-absolute-paths": + - command: shell.exec + params: + script: | + ${PREPARE_SHELL} + for filename in $(find ${DRIVERS_TOOLS} -name \*.json); do + perl -p -i -e "s|ABSOLUTE_PATH_REPLACEMENT_TOKEN|${DRIVERS_TOOLS}|g" $filename + done + + "assume-aws-test-secrets-role": + - command: ec2.assume_role + params: + role_arn: ${aws_test_secrets_role} + + "gradle-cache": + - command: shell.exec + params: + working_dir: "src" + script: | + export GRADLE_RO_DEP_CACHE="$(pwd)/build/gradle-cache" + .evergreen/gradle-cache.sh + + "create-archive-tar-file": + - command: shell.exec + params: + working_dir: "src" + script: | + echo "Creating archive tar file at ${ARCHIVE_FILE_PATH}" + tar --exclude-vcs -czf "${ARCHIVE_FILE_PATH}" . + echo "Created archive tar file at ${ARCHIVE_FILE_PATH}" + + "start-mongo-orchestration": + - command: shell.exec + params: + script: | + ${PREPARE_SHELL} + REQUIRE_API_VERSION=${REQUIRE_API_VERSION} LOAD_BALANCER=${LOAD_BALANCER} MONGODB_VERSION=${VERSION} TOPOLOGY=${TOPOLOGY} \ + AUTH=${AUTH} SSL=${SSL} STORAGE_ENGINE=${STORAGE_ENGINE} ORCHESTRATION_FILE=${ORCHESTRATION_FILE} \ + bash ${DRIVERS_TOOLS}/.evergreen/run-orchestration.sh + # run-orchestration generates expansion file with the MONGODB_URI for the cluster + - command: expansions.update + params: + file: mo-expansion.yml + "stop-mongo-orchestration": + - command: shell.exec + params: + shell: "bash" + script: | + ${PREPARE_SHELL} + bash ${DRIVERS_TOOLS}/.evergreen/stop-orchestration.sh || true + + "start-mongohoused": + - command: shell.exec + params: + include_expansions_in_env: [ "AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY", "AWS_SESSION_TOKEN" ] + script: | + DRIVERS_TOOLS="${DRIVERS_TOOLS}" bash ${DRIVERS_TOOLS}/.evergreen/atlas_data_lake/pull-mongohouse-image.sh + - command: shell.exec + params: + script: | + DRIVERS_TOOLS="${DRIVERS_TOOLS}" bash ${DRIVERS_TOOLS}/.evergreen/atlas_data_lake/run-mongohouse-image.sh + + "stop-mongohoused": + - command: shell.exec + params: + include_expansions_in_env: [ "AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY", "AWS_SESSION_TOKEN" ] + script: | + DRIVERS_TOOLS="${DRIVERS_TOOLS}" bash ${DRIVERS_TOOLS}/.evergreen/atlas_data_lake/teardown.sh || true + + + "start-load-balancer": + - command: shell.exec + params: + script: | + DRIVERS_TOOLS=${DRIVERS_TOOLS} MONGODB_URI=${MONGODB_URI} bash ${DRIVERS_TOOLS}/.evergreen/run-load-balancer.sh start + - command: expansions.update + params: + file: lb-expansion.yml + "stop-load-balancer": + - command: shell.exec + params: + script: | + cd ${DRIVERS_TOOLS}/.evergreen + DRIVERS_TOOLS=${DRIVERS_TOOLS} bash ${DRIVERS_TOOLS}/.evergreen/run-load-balancer.sh stop || true + + "stop-aws": + - command: shell.exec + params: + shell: "bash" + script: | + ${PREPARE_SHELL} + cd "${DRIVERS_TOOLS}/.evergreen/auth_aws" + if [ -f "./aws_e2e_setup.json" ]; then + . ./activate-authawsvenv.sh + python ./lib/aws_assign_instance_profile.py + fi + + "cleanup": + - command: shell.exec + params: + shell: "bash" + script: | + ${PREPARE_SHELL} + rm -rf $DRIVERS_TOOLS || true + + "add-aws-auth-variables-to-file": + - command: shell.exec + type: "test" + params: + include_expansions_in_env: [ "AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY", "AWS_SESSION_TOKEN" ] + shell: "bash" + working_dir: "src" + script: | + ${PREPARE_SHELL} + cd $DRIVERS_TOOLS/.evergreen/auth_aws + ./setup_secrets.sh drivers/aws_auth + + "add-atlas-connect-variables-to-file": + - command: shell.exec + type: "test" + params: + include_expansions_in_env: [ "AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY", "AWS_SESSION_TOKEN" ] + shell: "bash" + working_dir: "src" + script: | + ${PREPARE_SHELL} + ${DRIVERS_TOOLS}/.evergreen/secrets_handling/setup-secrets.sh drivers/atlas_connect + + "start-csfle-servers": + - command: ec2.assume_role + params: + role_arn: ${aws_test_secrets_role} + - command: subprocess.exec + params: + working_dir: "src" + binary: "bash" + include_expansions_in_env: ["AWS_SECRET_ACCESS_KEY", "AWS_ACCESS_KEY_ID", "AWS_SESSION_TOKEN"] + args: + - ${DRIVERS_TOOLS}/.evergreen/csfle/setup.sh + + "stop-csfle-servers": + - command: shell.exec + params: + shell: "bash" + script: | + ${PREPARE_SHELL} + bash ${DRIVERS_TOOLS}/.evergreen/csfle/teardown.sh || true + + # + # Publishing / uploading functions + # + + "upload-mo-artifacts": + - command: ec2.assume_role + params: + role_arn: ${UPLOAD_MO_ARTIFACTS_ROLE_ARN} + - command: shell.exec + params: + shell: "bash" + script: | + ${PREPARE_SHELL} + find $MONGO_ORCHESTRATION_HOME -name \*.log | xargs tar czf mongodb-logs.tar.gz + - command: s3.put + params: + aws_key: ${AWS_ACCESS_KEY_ID} + aws_secret: ${AWS_SECRET_ACCESS_KEY} + aws_session_token: ${AWS_SESSION_TOKEN} + local_file: mongodb-logs.tar.gz + remote_file: ${UPLOAD_BUCKET}/${build_variant}/${revision}/${version_id}/${build_id}/logs/${task_id}-${execution}-mongodb-logs.tar.gz + bucket: mciuploads + permissions: public-read + content_type: "${content_type|application/x-gzip}" + display_name: "mongodb-logs.tar.gz" + - command: s3.put + params: + aws_key: ${AWS_ACCESS_KEY_ID} + aws_secret: ${AWS_SECRET_ACCESS_KEY} + aws_session_token: ${AWS_SESSION_TOKEN} + local_file: drivers-tools/.evergreen/orchestration/server.log + remote_file: ${UPLOAD_BUCKET}/${build_variant}/${revision}/${version_id}/${build_id}/logs/${task_id}-${execution}-orchestration.log + bucket: mciuploads + permissions: public-read + content_type: "${content_type|text/plain}" + display_name: "orchestration.log" + + "create-and-upload-SSDLC-release-assets": + - command: shell.exec + params: + shell: "bash" + working_dir: "src" + env: + PRODUCT_NAME: ${product_name} + PRODUCT_VERSION: ${product_version} + EVERGREEN_VERSION_ID: ${version_id} + script: .evergreen/ssdlc-report.sh + - command: ec2.assume_role + params: + role_arn: ${UPLOAD_SSDLC_RELEASE_ASSETS_ROLE_ARN} + - command: s3.put + params: + aws_key: ${AWS_ACCESS_KEY_ID} + aws_secret: ${AWS_SECRET_ACCESS_KEY} + aws_session_token: ${AWS_SESSION_TOKEN} + local_file: ./src/build/ssdlc/ssdlc_compliance_report.md + remote_file: ${product_name}/${product_version}/ssdlc_compliance_report.md + bucket: java-driver-release-assets + region: us-west-1 + permissions: private + content_type: "text/markdown" + display_name: "ssdlc_compliance_report.md" + - command: s3.put + params: + aws_key: ${AWS_ACCESS_KEY_ID} + aws_secret: ${AWS_SECRET_ACCESS_KEY} + aws_session_token: ${AWS_SESSION_TOKEN} + local_files_include_filter: + - build/ssdlc/static-analysis-reports/*.sarif + local_files_include_filter_prefix: ./src/ + remote_file: ${product_name}/${product_version}/static-analysis-reports/ + bucket: java-driver-release-assets + region: us-west-1 + permissions: private + content_type: "application/sarif+json" + display_name: + + "upload-test-results": + - command: attach.xunit_results + params: + file: ./src/*/build/test-results/*/TEST-*.xml + + "trace-artifacts": + - command: shell.exec + params: + working_dir: "src" + script: | + PRODUCT_VERSION="$(echo -n "$(git describe --tags --always --dirty)" | cut -c 2-)" + cat > ssdlc-expansions.yml < setup.js + const mongo_binaries = "$MONGODB_BINARIES"; + const project_dir = "$PROJECT_DIRECTORY"; + EOF + + mongo --nodb setup.js aws_e2e_ecs.js + cd - + + "run-ocsp-test": + - command: shell.exec + type: "test" + params: + working_dir: "src" + script: | + ${PREPARE_SHELL} + CA_FILE="${DRIVERS_TOOLS}/.evergreen/ocsp/${OCSP_ALGORITHM}/ca.pem" \ + OCSP_TLS_SHOULD_SUCCEED="${OCSP_TLS_SHOULD_SUCCEED}" \ + OCSP_MUST_STAPLE="${OCSP_MUST_STAPLE}" \ + JAVA_VERSION="${JAVA_VERSION}" \ + bash ${PROJECT_DIRECTORY}/.evergreen/run-ocsp-test.sh + + "run-valid-ocsp-server-ca-responder-test": + - command: shell.exec + params: + background: true + shell: "bash" + script: | + ${PREPARE_SHELL} + cd ${DRIVERS_TOOLS}/.evergreen/ocsp + . ./activate-ocspvenv.sh + nohup python ocsp_mock.py \ + --ca_file ${OCSP_ALGORITHM}/ca.pem \ + --ocsp_responder_cert ${OCSP_ALGORITHM}/ca.crt \ + --ocsp_responder_key ${OCSP_ALGORITHM}/ca.key \ + -p 8100 -v + + "run-revoked-ocsp-server-ca-responder-test": + - command: shell.exec + params: + background: true + shell: "bash" + script: | + ${PREPARE_SHELL} + cd ${DRIVERS_TOOLS}/.evergreen/ocsp + . ./activate-ocspvenv.sh + nohup python ocsp_mock.py \ + --ca_file ${OCSP_ALGORITHM}/ca.pem \ + --ocsp_responder_cert ${OCSP_ALGORITHM}/ca.crt \ + --ocsp_responder_key ${OCSP_ALGORITHM}/ca.key \ + -p 8100 \ + -v \ + --fault revoked + + "run-valid-ocsp-server-delegate-responder-test": + - command: shell.exec + params: + background: true + shell: "bash" + script: | + ${PREPARE_SHELL} + cd ${DRIVERS_TOOLS}/.evergreen/ocsp + . ./activate-ocspvenv.sh + nohup python ocsp_mock.py \ + --ca_file ${OCSP_ALGORITHM}/ca.pem \ + --ocsp_responder_cert ${OCSP_ALGORITHM}/ocsp-responder.crt \ + --ocsp_responder_key ${OCSP_ALGORITHM}/ocsp-responder.key \ + -p 8100 -v + + "run-revoked-ocsp-server-delegate-responder-test": + - command: shell.exec + params: + background: true + shell: "bash" + script: | + ${PREPARE_SHELL} + cd ${DRIVERS_TOOLS}/.evergreen/ocsp + . ./activate-ocspvenv.sh + nohup python ocsp_mock.py \ + --ca_file ${OCSP_ALGORITHM}/ca.pem \ + --ocsp_responder_cert ${OCSP_ALGORITHM}/ocsp-responder.crt \ + --ocsp_responder_key ${OCSP_ALGORITHM}/ocsp-responder.key \ + -p 8100 \ + -v \ + --fault revoked + + "run-gssapi-auth-test": + - command: shell.exec + type: "test" + params: + silent: true + working_dir: "src" + script: | + # DO NOT ECHO WITH XTRACE (which PREPARE_SHELL does) + PROJECT_DIRECTORY=${PROJECT_DIRECTORY} JAVA_VERSION=${JAVA_VERSION} MONGODB_URI=${gssapi_auth_mongodb_uri} \ + KDC=${gssapi_auth_kdc} REALM=${gssapi_auth_realm} KEYTAB_BASE64=${gssapi_auth_keytab_base64} \ + LOGIN_CONTEXT_NAME=${LOGIN_CONTEXT_NAME} \ + .evergreen/run-gssapi-auth-test.sh + + "run-socks5-tests": + - command: shell.exec + type: "test" + params: + working_dir: "src" + script: | + ${PREPARE_SHELL} + SOCKS_AUTH="${SOCKS_AUTH}" \ + SSL="${SSL}" MONGODB_URI="${MONGODB_URI}" \ + JAVA_VERSION="${JAVA_VERSION}" \ + .evergreen/run-socks5-tests.sh + + "run-kms-tls-test": + - command: shell.exec + type: "test" + params: + working_dir: "src" + script: | + ${PREPARE_SHELL} + set +o xtrace + MONGODB_URI="${MONGODB_URI}" KMS_TLS_ERROR_TYPE=${KMS_TLS_ERROR_TYPE} .evergreen/run-kms-tls-tests.sh + + "run-csfle-aws-from-environment-test": + - command: shell.exec + type: "test" + params: + working_dir: "src" + script: | + ${PREPARE_SHELL} + set +o xtrace + MONGODB_URI="${MONGODB_URI}" .evergreen/run-csfle-aws-from-environment.sh + + "run-csfle-tests-with-mongocryptd": + - command: shell.exec + type: "test" + params: + working_dir: "src" + env: + AZUREKMS_KEY_VAULT_ENDPOINT: ${testazurekms_keyvaultendpoint} + AZUREKMS_KEY_NAME: ${testazurekms_keyname} + script: | + ${PREPARE_SHELL} + MONGODB_URI="${MONGODB_URI}" JAVA_VERSION="${JAVA_VERSION}" .evergreen/run-csfle-tests-with-mongocryptd.sh + + "run-perf-tests": + - command: shell.exec + type: "test" + params: + working_dir: "src" + env: + PROVIDER: ${PROVIDER} + script: | + ${PREPARE_SHELL} + PROJECT_DIRECTORY=${PROJECT_DIRECTORY} .evergreen/run-perf-tests.sh + + "run-graalvm-native-image-app": + - command: shell.exec + type: "test" + params: + working_dir: "src" + script: | + ${PREPARE_SHELL} + MONGODB_URI="${MONGODB_URI}" JAVA_VERSION="${JAVA_VERSION}" .evergreen/run-graalvm-native-image-app.sh + + "run-oidc-auth-test-k8s-test": + - command: shell.exec + type: "test" + params: + shell: "bash" + working_dir: "src" + include_expansions_in_env: [ "AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY", "AWS_SESSION_TOKEN", "ARCHIVE_FILE_PATH" ] + script: |- + set -o errexit + ${PREPARE_SHELL} + export K8S_VARIANT=${VARIANT} + export K8S_DRIVERS_TAR_FILE=$ARCHIVE_FILE_PATH + export K8S_TEST_CMD="GRADLE_RO_DEP_CACHE='/tmp/test/build/gradle-cache' OIDC_ENV=k8s VARIANT=${VARIANT} ./.evergreen/run-mongodb-oidc-test.sh" + bash $DRIVERS_TOOLS/.evergreen/auth_oidc/k8s/setup-pod.sh + bash $DRIVERS_TOOLS/.evergreen/auth_oidc/k8s/run-self-test.sh + source $DRIVERS_TOOLS/.evergreen/auth_oidc/k8s/secrets-export.sh + bash $DRIVERS_TOOLS/.evergreen/auth_oidc/k8s/run-driver-test.sh + bash $DRIVERS_TOOLS/.evergreen/auth_oidc/k8s/teardown-pod.sh + +# Anchors + +pre: + - func: "fetch-source" + - func: "prepare-resources" + - func: "fix-absolute-paths" + +post: + - func: "stop-mongo-orchestration" + - func: "upload-mo-artifacts" + - func: "upload-test-results" + - func: "assume-aws-test-secrets-role" + - func: "stop-load-balancer" + - func: "stop-aws" + - func: "stop-mongohoused" + - func: "stop-csfle-servers" + - func: "cleanup" + +tasks: + + # Compile / check build variant + - name: "static-analysis-task" + commands: + - command: subprocess.exec + type: "test" + params: + working_dir: "src" + binary: bash + args: + - ".evergreen/static-checks.sh" + + - name: "test-bson-and-crypt-task" + commands: + - func: "run-tests" + vars: + TESTS: 'bson:test bson-record-codec:test mongodb-crypt:test' + + - name: "test-core-task" + commands: + - func: "start-mongo-orchestration" + - func: "run-tests" + vars: + TESTS: 'driver-core:test' + + - name: "test-legacy-task" + commands: + - func: "start-csfle-servers" + - func: "start-mongo-orchestration" + - func: "run-tests" + vars: + TESTS: 'driver-legacy:test' + + - name: "test-sync-task" + commands: + - func: "start-csfle-servers" + - func: "start-mongo-orchestration" + - func: "run-tests" + vars: + TESTS: 'driver-sync:test' + + - name: "test-reactive-task" + commands: + - func: "start-csfle-servers" + - func: "start-mongo-orchestration" + - func: "run-tests" + vars: + TESTS: 'driver-reactive-streams:test' + + - name: "scala-test-task" + commands: + - func: "start-mongo-orchestration" + - func: "run-scala-tests" + + - name: "kotlin-test-task" + commands: + - func: "start-mongo-orchestration" + - func: "run-kotlin-tests" + + - name: "reactive-streams-tck-test-task" + commands: + - func: "start-mongo-orchestration" + vars: + VERSION: "6.0" + TOPOLOGY: "server" + - func: "run-reactive-streams-tck-tests" + + - name: "load-balancer-test-task" + commands: + - func: "start-mongo-orchestration" + vars: + LOAD_BALANCER: 'true' + - func: "start-load-balancer" + - func: "run-load-balancer-tests" + + - name: "oidc-auth-test-task" + commands: + - command: subprocess.exec + type: "test" + params: + working_dir: "src" + binary: bash + include_expansions_in_env: [ "DRIVERS_TOOLS", "AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY", "AWS_SESSION_TOKEN" ] + env: + OIDC_ENV: "test" + args: + - .evergreen/run-mongodb-oidc-test.sh + + - name: "oidc-auth-test-azure-task" + # Might exceed 1 hour of execution. + exec_timeout_secs: 7200 + commands: + - command: shell.exec + params: + shell: "bash" + working_dir: "src" + include_expansions_in_env: [ "AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY", "AWS_SESSION_TOKEN", "ARCHIVE_FILE_PATH" ] + env: + JAVA_HOME: ${JAVA_HOME} + script: |- + set -o errexit + ${PREPARE_SHELL} + export AZUREOIDC_DRIVERS_TAR_FILE=$ARCHIVE_FILE_PATH + export AZUREOIDC_TEST_CMD="GRADLE_RO_DEP_CACHE='/home/azureuser/build/gradle-cache' OIDC_ENV=azure ./.evergreen/run-mongodb-oidc-test.sh" + tar --exclude-vcs -czf $AZUREOIDC_DRIVERS_TAR_FILE . + bash $DRIVERS_TOOLS/.evergreen/auth_oidc/azure/run-driver-test.sh + + - name: "oidc-auth-test-gcp-task" + # Might exceed 1 hour of execution. + exec_timeout_secs: 7200 + commands: + - command: shell.exec + params: + shell: "bash" + working_dir: "src" + include_expansions_in_env: [ "AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY", "AWS_SESSION_TOKEN", "ARCHIVE_FILE_PATH" ] + script: |- + set -o errexit + ${PREPARE_SHELL} + export GCPOIDC_DRIVERS_TAR_FILE=$ARCHIVE_FILE_PATH + # Define the command to run on the VM. + # Ensure that we source the environment file created for us, set up any other variables we need, + # and then run our test suite on the vm. + export GCPOIDC_TEST_CMD="GRADLE_RO_DEP_CACHE='./build/gradle-cache' OIDC_ENV=gcp ./.evergreen/run-mongodb-oidc-test.sh" + tar --exclude-vcs -czf $GCPOIDC_DRIVERS_TAR_FILE . + bash $DRIVERS_TOOLS/.evergreen/auth_oidc/gcp/run-driver-test.sh + + - name: "oidc-auth-test-k8s-eks-task" + # Might exceed 1 hour of execution. + exec_timeout_secs: 7200 + commands: + - func: "assume-aws-test-secrets-role" + - func: "run-oidc-auth-test-k8s-test" + vars: + VARIANT: eks + + - name: "oidc-auth-test-k8s-aks-task" + # Might exceed 1 hour of execution. + exec_timeout_secs: 7200 + commands: + - func: "assume-aws-test-secrets-role" + - func: "run-oidc-auth-test-k8s-test" + vars: + VARIANT: aks + + - name: "oidc-auth-test-k8s-gke-task" + # Might exceed 1 hour of execution. + exec_timeout_secs: 7200 + commands: + - func: "assume-aws-test-secrets-role" + - func: "run-oidc-auth-test-k8s-test" + vars: + VARIANT: gke + + + - name: "accept-api-version-2-test-task" + commands: + - func: "start-csfle-servers" + - func: "start-mongo-orchestration" + vars: + ORCHESTRATION_FILE: "versioned-api-testing.json" + - func: "run-tests" + + - name: "plain-auth-test-task" + commands: + - func: "run-plain-auth-test" + + # Test that x509 auth using server with OpenSSL 3 succeeds. + - name: "atlas-x509-auth-test-task" + commands: + - func: "assume-aws-test-secrets-role" + - func: "add-atlas-connect-variables-to-file" + - func: "run-x509-auth-test" + + - name: "aws-auth-test-with-regular-aws-credentials-task" + commands: + - func: "start-mongo-orchestration" + vars: + AUTH: "auth" + ORCHESTRATION_FILE: "auth-aws.json" + TOPOLOGY: "server" + - func: "assume-aws-test-secrets-role" + - func: "add-aws-auth-variables-to-file" + - func: "run-aws-auth-test-with-regular-aws-credentials" + + - name: "aws-auth-test-with-assume-role-credentials-task" + commands: + - func: "start-mongo-orchestration" + vars: + AUTH: "auth" + ORCHESTRATION_FILE: "auth-aws.json" + TOPOLOGY: "server" + - func: "assume-aws-test-secrets-role" + - func: "add-aws-auth-variables-to-file" + - func: "run-aws-auth-test-with-assume-role-credentials" + + - name: "aws-auth-test-with-aws-credentials-as-environment-variables-task" + commands: + - func: "start-mongo-orchestration" + vars: + AUTH: "auth" + ORCHESTRATION_FILE: "auth-aws.json" + TOPOLOGY: "server" + - func: "assume-aws-test-secrets-role" + - func: "add-aws-auth-variables-to-file" + - func: "run-aws-auth-test-with-aws-credentials-as-environment-variables" + + - name: "aws-auth-test-with-aws-credentials-and-session-token-as-environment-variables-task" + commands: + - func: "start-mongo-orchestration" + vars: + AUTH: "auth" + ORCHESTRATION_FILE: "auth-aws.json" + TOPOLOGY: "server" + - func: "assume-aws-test-secrets-role" + - func: "add-aws-auth-variables-to-file" + - func: "run-aws-auth-test-with-aws-credentials-and-session-token-as-environment-variables" + + - name: "aws-auth-test-with-aws-EC2-credentials-task" + commands: + - func: "start-mongo-orchestration" + vars: + AUTH: "auth" + ORCHESTRATION_FILE: "auth-aws.json" + TOPOLOGY: "server" + - func: "assume-aws-test-secrets-role" + - func: "add-aws-auth-variables-to-file" + - func: "run-aws-auth-test-with-aws-EC2-credentials" + + - name: "aws-auth-test-with-web-identity-credentials-task" + commands: + - func: "start-mongo-orchestration" + vars: + AUTH: "auth" + ORCHESTRATION_FILE: "auth-aws.json" + TOPOLOGY: "server" + - func: "assume-aws-test-secrets-role" + - func: "add-aws-auth-variables-to-file" + - func: "run-aws-auth-test-with-web-identity-credentials" + + - name: "test-ocsp-rsa-valid-cert-server-staples-task" + tags: [ "ocsp" ] + commands: + - func: "run-valid-ocsp-server-ca-responder-test" + vars: + OCSP_ALGORITHM: "rsa" + - func: "start-mongo-orchestration" + vars: + ORCHESTRATION_FILE: "rsa-basic-tls-ocsp-mustStaple-singleEndpoint.json" + VERSION: "latest" + TOPOLOGY: "server" + - func: "run-ocsp-test" + vars: + OCSP_ALGORITHM: "rsa" + OCSP_MUST_STAPLE: "true" + OCSP_TLS_SHOULD_SUCCEED: "1" + + - name: "test-ocsp-rsa-invalid-cert-server-staples-task" + tags: [ "ocsp" ] + commands: + - func: "run-revoked-ocsp-server-ca-responder-test" + vars: + OCSP_ALGORITHM: "rsa" + - func: "start-mongo-orchestration" + vars: + ORCHESTRATION_FILE: "rsa-basic-tls-ocsp-mustStaple-singleEndpoint.json" + VERSION: "latest" + TOPOLOGY: "server" + - func: "run-ocsp-test" + vars: + OCSP_ALGORITHM: "rsa" + OCSP_MUST_STAPLE: "true" + OCSP_TLS_SHOULD_SUCCEED: "0" + + - name: "test-ocsp-rsa-valid-cert-server-does-not-staple-task" + tags: [ "ocsp" ] + commands: + - func: "run-valid-ocsp-server-ca-responder-test" + vars: + OCSP_ALGORITHM: "rsa" + - func: "start-mongo-orchestration" + vars: + ORCHESTRATION_FILE: "rsa-basic-tls-ocsp-disableStapling-singleEndpoint.json" + VERSION: "latest" + TOPOLOGY: "server" + - func: "run-ocsp-test" + vars: + OCSP_ALGORITHM: "rsa" + OCSP_MUST_STAPLE: "false" + OCSP_TLS_SHOULD_SUCCEED: "1" + + - name: "test-ocsp-rsa-invalid-cert-server-does-not-staple-task" + tags: [ "ocsp" ] + commands: + - func: "run-revoked-ocsp-server-ca-responder-test" + vars: + OCSP_ALGORITHM: "rsa" + - func: "start-mongo-orchestration" + vars: + ORCHESTRATION_FILE: "rsa-basic-tls-ocsp-disableStapling-singleEndpoint.json" + VERSION: "latest" + TOPOLOGY: "server" + - func: "run-ocsp-test" + vars: + OCSP_ALGORITHM: "rsa" + OCSP_MUST_STAPLE: "false" + OCSP_TLS_SHOULD_SUCCEED: "0" + + - name: "test-ocsp-rsa-valid-cert-server-staples-delegate-responder-task" + tags: [ "ocsp" ] + commands: + - func: "run-valid-ocsp-server-delegate-responder-test" + vars: + OCSP_ALGORITHM: "rsa" + - func: "start-mongo-orchestration" + vars: + ORCHESTRATION_FILE: "rsa-basic-tls-ocsp-mustStaple-singleEndpoint.json" + VERSION: "latest" + TOPOLOGY: "server" + - func: "run-ocsp-test" + vars: + OCSP_ALGORITHM: "rsa" + OCSP_MUST_STAPLE: "true" + OCSP_TLS_SHOULD_SUCCEED: "1" + + - name: "test-ocsp-rsa-invalid-cert-server-staples-delegate-responder-task" + tags: [ "ocsp" ] + commands: + - func: "run-revoked-ocsp-server-delegate-responder-test" + vars: + OCSP_ALGORITHM: "rsa" + - func: "start-mongo-orchestration" + vars: + ORCHESTRATION_FILE: "rsa-basic-tls-ocsp-mustStaple-singleEndpoint.json" + VERSION: "latest" + TOPOLOGY: "server" + - func: "run-ocsp-test" + vars: + OCSP_ALGORITHM: "rsa" + OCSP_MUST_STAPLE: "true" + OCSP_TLS_SHOULD_SUCCEED: "0" + + - name: "test-ocsp-rsa-valid-cert-server-does-not-staple-delegate-responder-task" + tags: [ "ocsp" ] + commands: + - func: "run-valid-ocsp-server-delegate-responder-test" + vars: + OCSP_ALGORITHM: "rsa" + - func: "start-mongo-orchestration" + vars: + ORCHESTRATION_FILE: "rsa-basic-tls-ocsp-disableStapling-singleEndpoint.json" + VERSION: "latest" + TOPOLOGY: "server" + - func: "run-ocsp-test" + vars: + OCSP_ALGORITHM: "rsa" + OCSP_MUST_STAPLE: "false" + OCSP_TLS_SHOULD_SUCCEED: "1" + + - name: "test-ocsp-rsa-invalid-cert-server-does-not-staple-delegate-responder-task" + tags: [ "ocsp" ] + commands: + - func: "run-revoked-ocsp-server-delegate-responder-test" + vars: + OCSP_ALGORITHM: "rsa" + - func: "start-mongo-orchestration" + vars: + ORCHESTRATION_FILE: "rsa-basic-tls-ocsp-disableStapling-singleEndpoint.json" + VERSION: "latest" + TOPOLOGY: "server" + - func: "run-ocsp-test" + vars: + OCSP_ALGORITHM: "rsa" + OCSP_MUST_STAPLE: "false" + OCSP_TLS_SHOULD_SUCCEED: "0" + + - name: "test-ocsp-rsa-soft-fail-task" + tags: [ "ocsp" ] + commands: + - func: "start-mongo-orchestration" + vars: + OCSP_ALGORITHM: "rsa" + ORCHESTRATION_FILE: "rsa-basic-tls-ocsp-disableStapling-singleEndpoint.json" + VERSION: "latest" + TOPOLOGY: "server" + - func: "run-ocsp-test" + vars: + OCSP_ALGORITHM: "rsa" + OCSP_MUST_STAPLE: "false" + OCSP_TLS_SHOULD_SUCCEED: "0" + + - name: "test-ocsp-rsa-malicious-invalid-cert-mustStaple-server-does-not-staple-task" + tags: [ "ocsp" ] + commands: + - func: "run-revoked-ocsp-server-ca-responder-test" + vars: + OCSP_ALGORITHM: "rsa" + - func: "start-mongo-orchestration" + vars: + ORCHESTRATION_FILE: "rsa-basic-tls-ocsp-mustStaple-disableStapling-singleEndpoint.json" + VERSION: "latest" + TOPOLOGY: "server" + - func: "run-ocsp-test" + vars: + OCSP_ALGORITHM: "rsa" + OCSP_MUST_STAPLE: "false" + OCSP_TLS_SHOULD_SUCCEED: "0" + + - name: "test-ocsp-rsa-malicious-delegate-responder-invalid-cert-mustStaple-server-does-not-staple-task" + tags: [ "ocsp" ] + commands: + - func: "run-revoked-ocsp-server-delegate-responder-test" + vars: + OCSP_ALGORITHM: "rsa" + - func: "start-mongo-orchestration" + vars: + ORCHESTRATION_FILE: "rsa-basic-tls-ocsp-mustStaple-disableStapling-singleEndpoint.json" + VERSION: "latest" + TOPOLOGY: "server" + - func: "run-ocsp-test" + vars: + OCSP_ALGORITHM: "rsa" + OCSP_MUST_STAPLE: "false" + OCSP_TLS_SHOULD_SUCCEED: "0" + + - name: "test-ocsp-rsa-malicious-no-responder-mustStaple-server-does-not-staple-task" + tags: [ "ocsp" ] + commands: + - func: "start-mongo-orchestration" + vars: + ORCHESTRATION_FILE: "rsa-basic-tls-ocsp-mustStaple-disableStapling-singleEndpoint.json" + VERSION: "latest" + TOPOLOGY: "server" + - func: "run-ocsp-test" + vars: + OCSP_ALGORITHM: "rsa" + OCSP_MUST_STAPLE: "false" + OCSP_TLS_SHOULD_SUCCEED: "0" + + - name: "test-ocsp-ecdsa-valid-cert-server-staples-task" + tags: [ "ocsp" ] + commands: + - func: "run-valid-ocsp-server-ca-responder-test" + vars: + OCSP_ALGORITHM: "ecdsa" + - func: "start-mongo-orchestration" + vars: + ORCHESTRATION_FILE: "ecdsa-basic-tls-ocsp-mustStaple-singleEndpoint.json" + VERSION: "latest" + TOPOLOGY: "server" + - func: "run-ocsp-test" + vars: + OCSP_ALGORITHM: "ecdsa" + OCSP_MUST_STAPLE: "true" + OCSP_TLS_SHOULD_SUCCEED: "1" + + - name: "test-ocsp-ecdsa-invalid-cert-server-staples-task" + tags: [ "ocsp" ] + commands: + - func: "run-revoked-ocsp-server-ca-responder-test" + vars: + OCSP_ALGORITHM: "ecdsa" + - func: "start-mongo-orchestration" + vars: + ORCHESTRATION_FILE: "ecdsa-basic-tls-ocsp-mustStaple-singleEndpoint.json" + VERSION: "latest" + TOPOLOGY: "server" + - func: "run-ocsp-test" + vars: + OCSP_ALGORITHM: "ecdsa" + OCSP_MUST_STAPLE: "true" + OCSP_TLS_SHOULD_SUCCEED: "0" + + - name: "test-ocsp-ecdsa-valid-cert-server-does-not-staple-task" + tags: [ "ocsp" ] + commands: + - func: "run-valid-ocsp-server-ca-responder-test" + vars: + OCSP_ALGORITHM: "ecdsa" + - func: "start-mongo-orchestration" + vars: + ORCHESTRATION_FILE: "ecdsa-basic-tls-ocsp-disableStapling-singleEndpoint.json" + VERSION: "latest" + TOPOLOGY: "server" + - func: "run-ocsp-test" + vars: + OCSP_ALGORITHM: "ecdsa" + OCSP_MUST_STAPLE: "false" + OCSP_TLS_SHOULD_SUCCEED: "1" + + - name: "test-ocsp-ecdsa-invalid-cert-server-does-not-staple-task" + tags: [ "ocsp" ] + commands: + - func: "run-revoked-ocsp-server-ca-responder-test" + vars: + OCSP_ALGORITHM: "ecdsa" + - func: "start-mongo-orchestration" + vars: + ORCHESTRATION_FILE: "ecdsa-basic-tls-ocsp-disableStapling-singleEndpoint.json" + VERSION: "latest" + TOPOLOGY: "server" + - func: "run-ocsp-test" + vars: + OCSP_ALGORITHM: "ecdsa" + OCSP_MUST_STAPLE: "false" + OCSP_TLS_SHOULD_SUCCEED: "0" + + - name: "test-ocsp-ecdsa-soft-fail-task" + tags: [ "ocsp" ] + commands: + - func: "start-mongo-orchestration" + vars: + ORCHESTRATION_FILE: "ecdsa-basic-tls-ocsp-disableStapling-singleEndpoint.json" + VERSION: "latest" + TOPOLOGY: "server" + - func: "run-ocsp-test" + vars: + OCSP_ALGORITHM: "ecdsa" + OCSP_MUST_STAPLE: "false" + OCSP_TLS_SHOULD_SUCCEED: "0" + + - name: "test-ocsp-ecdsa-malicious-invalid-cert-mustStaple-server-does-not-staple-task" + tags: [ "ocsp" ] + commands: + - func: "run-revoked-ocsp-server-ca-responder-test" + vars: + OCSP_ALGORITHM: "ecdsa" + - func: "start-mongo-orchestration" + vars: + ORCHESTRATION_FILE: "ecdsa-basic-tls-ocsp-mustStaple-disableStapling-singleEndpoint.json" + VERSION: "latest" + TOPOLOGY: "server" + - func: "run-ocsp-test" + vars: + OCSP_ALGORITHM: "ecdsa" + OCSP_MUST_STAPLE: "false" + OCSP_TLS_SHOULD_SUCCEED: "0" + + - name: "test-ocsp-ecdsa-valid-cert-server-staples-delegate-responder-task" + tags: [ "ocsp" ] + commands: + - func: "run-valid-ocsp-server-delegate-responder-test" + vars: + OCSP_ALGORITHM: "ecdsa" + - func: "start-mongo-orchestration" + vars: + ORCHESTRATION_FILE: "ecdsa-basic-tls-ocsp-mustStaple-singleEndpoint.json" + VERSION: "latest" + TOPOLOGY: "server" + - func: "run-ocsp-test" + vars: + OCSP_ALGORITHM: "ecdsa" + OCSP_MUST_STAPLE: "true" + OCSP_TLS_SHOULD_SUCCEED: "1" + + - name: "test-ocsp-ecdsa-invalid-cert-server-staples-delegate-responder-task" + tags: [ "ocsp" ] + commands: + - func: "run-revoked-ocsp-server-delegate-responder-test" + vars: + OCSP_ALGORITHM: "ecdsa" + - func: "start-mongo-orchestration" + vars: + ORCHESTRATION_FILE: "ecdsa-basic-tls-ocsp-mustStaple-singleEndpoint.json" + VERSION: "latest" + TOPOLOGY: "server" + - func: "run-ocsp-test" + vars: + OCSP_ALGORITHM: "ecdsa" + OCSP_MUST_STAPLE: "true" + OCSP_TLS_SHOULD_SUCCEED: "0" + + - name: "test-ocsp-ecdsa-valid-cert-server-does-not-staple-delegate-responder-task" + tags: [ "ocsp" ] + commands: + - func: "run-valid-ocsp-server-delegate-responder-test" + vars: + OCSP_ALGORITHM: "ecdsa" + - func: "start-mongo-orchestration" + vars: + ORCHESTRATION_FILE: "ecdsa-basic-tls-ocsp-disableStapling-singleEndpoint.json" + VERSION: "latest" + TOPOLOGY: "server" + - func: "run-ocsp-test" + vars: + OCSP_ALGORITHM: "ecdsa" + OCSP_MUST_STAPLE: "false" + OCSP_TLS_SHOULD_SUCCEED: "1" + + - name: "test-ocsp-ecdsa-invalid-cert-server-does-not-staple-delegate-responder-task" + tags: [ "ocsp" ] + commands: + - func: "run-revoked-ocsp-server-delegate-responder-test" + vars: + OCSP_ALGORITHM: "ecdsa" + - func: "start-mongo-orchestration" + vars: + ORCHESTRATION_FILE: "ecdsa-basic-tls-ocsp-disableStapling-singleEndpoint.json" + VERSION: "latest" + TOPOLOGY: "server" + - func: "run-ocsp-test" + vars: + OCSP_ALGORITHM: "ecdsa" + OCSP_MUST_STAPLE: "false" + OCSP_TLS_SHOULD_SUCCEED: "0" + + - name: "test-ocsp-ecdsa-malicious-delegate-responder-invalid-cert-mustStaple-server-does-not-staple-task" + tags: [ "ocsp" ] + commands: + - func: "run-revoked-ocsp-server-delegate-responder-test" + vars: + OCSP_ALGORITHM: "ecdsa" + - func: "start-mongo-orchestration" + vars: + ORCHESTRATION_FILE: "ecdsa-basic-tls-ocsp-mustStaple-disableStapling-singleEndpoint.json" + VERSION: "latest" + TOPOLOGY: "server" + - func: "run-ocsp-test" + vars: + OCSP_ALGORITHM: "ecdsa" + OCSP_MUST_STAPLE: "false" + OCSP_TLS_SHOULD_SUCCEED: "0" + + - name: "test-ocsp-ecdsa-malicious-no-responder-mustStaple-server-does-not-staple-task" + tags: [ "ocsp" ] + commands: + - func: "start-mongo-orchestration" + vars: + ORCHESTRATION_FILE: "ecdsa-basic-tls-ocsp-mustStaple-disableStapling-singleEndpoint.json" + VERSION: "latest" + TOPOLOGY: "server" + - func: "run-ocsp-test" + vars: + OCSP_ALGORITHM: "ecdsa" + OCSP_MUST_STAPLE: "false" + OCSP_TLS_SHOULD_SUCCEED: "0" + + - name: "atlas-search-task" + commands: + - command: shell.exec + type: "test" + params: + working_dir: "src" + script: | + ${PREPARE_SHELL} + MONGODB_URI="${atlas_search_uri}" .evergreen/run-atlas-search-tests.sh + + - name: "atlas-connectivity-task" + commands: + - command: shell.exec + type: "test" + params: + silent: true + working_dir: "src" + script: | + # DO NOT ECHO WITH XTRACE (which PREPARE_SHELL does) + # The connection strings are pipe-delimited + MONGODB_URIS="${atlas_free_tier_uri}|${atlas_replica_set_uri}|${atlas_sharded_uri}|${atlas_tls_v11_uri}|${atlas_tls_v12_uri}|${atlas_free_tier_uri_srv}|${atlas_replica_set_uri_srv}|${atlas_sharded_uri_srv}|${atlas_tls_v11_uri_srv}|${atlas_tls_v12_uri_srv}" \ + JAVA_VERSION="8" \ + .evergreen/run-connectivity-tests.sh + + - name: "atlas-search-index-management-task" + commands: + - command: subprocess.exec + params: + working_dir: "src" + binary: bash + add_expansions_to_env: true + args: + - .evergreen/run-atlas-search-index-management-tests.sh + + - name: "gssapi-auth-test-task" + commands: + - func: "run-gssapi-auth-test" + + - name: "slow-test-task" + commands: + - func: "start-mongo-orchestration" + - func: "run-slow-tests" + + - name: "socket-test-task" + commands: + - func: "start-mongo-orchestration" + - func: "run-socket-tests" + + - name: "publish-snapshot-task" + depends_on: + - variant: "static-checks" + name: "static-analysis-task" + commands: + - func: "publish-snapshot" + - func: "trace-artifacts" + vars: + product_name: mongo-java-driver-snapshot + - func: "create-and-upload-SSDLC-release-assets" + + - name: "publish-release-task" + git_tag_only: true + commands: + - func: "publish-release" + - func: "trace-artifacts" + vars: + product_name: mongo-java-driver + - func: "create-and-upload-SSDLC-release-assets" + + # Do not rename this task – renaming resets the performance time series + - name: "perf-task" + tags: [ "perf" ] + # Benchmark could exceed 1 hour of execution. + exec_timeout_secs: 7200 + commands: + - func: "start-mongo-orchestration" + vars: + VERSION: "v8.0-perf" + TOPOLOGY: "server" + SSL: "nossl" + AUTH: "noauth" + - func: "run-perf-tests" + - func: "send-dashboard-data" + + - name: "perf-netty-task" + tags: [ "perf" ] + # Benchmark could exceed 1 hour of execution. + exec_timeout_secs: 7200 + commands: + - func: "start-mongo-orchestration" + vars: + VERSION: "v8.0-perf" + TOPOLOGY: "server" + SSL: "nossl" + AUTH: "noauth" + - func: "run-perf-tests" + vars: + PROVIDER: "Netty" + - func: "send-dashboard-data" + + - name: "aws-lambda-deployed-task" + commands: + - command: ec2.assume_role + params: + role_arn: ${LAMBDA_AWS_ROLE_ARN} + duration_seconds: 3600 + - command: subprocess.exec + params: + working_dir: "src" + binary: bash + add_expansions_to_env: true + args: + - .evergreen/run-deployed-lambda-aws-tests.sh + env: + TEST_LAMBDA_DIRECTORY: ${PROJECT_DIRECTORY}/driver-lambda/ + AWS_REGION: us-east-1 + + - name: "test-kms-tls-invalid-cert-task" + tags: [ "kms-tls" ] + commands: + - func: "start-mongo-orchestration" + vars: + TOPOLOGY: "server" + AUTH: "noauth" + SSL: "nossl" + - func: "start-csfle-servers" + - func: "run-kms-tls-test" + vars: + KMS_TLS_ERROR_TYPE: "expired" + TOPOLOGY: "server" + AUTH: "noauth" + SSL: "nossl" + + - name: "test-kms-tls-invalid-hostname-task" + tags: [ "kms-tls" ] + commands: + - func: "start-mongo-orchestration" + vars: + TOPOLOGY: "server" + AUTH: "noauth" + SSL: "nossl" + - func: "start-csfle-servers" + vars: + CERT_FILE: "wrong-host.pem" + - func: "run-kms-tls-test" + vars: + KMS_TLS_ERROR_TYPE: "invalidHostname" + TOPOLOGY: "server" + AUTH: "noauth" + SSL: "nossl" + + - name: "test-csfle-aws-from-environment-task" + tags: [ "csfle-aws-from-environment" ] + commands: + - func: "start-csfle-servers" + - func: "start-mongo-orchestration" + vars: + TOPOLOGY: "server" + AUTH: "noauth" + SSL: "nossl" + - func: "run-csfle-aws-from-environment-test" + + - name: "csfle-tests-with-mongocryptd-task" + commands: + - func: "start-csfle-servers" + - func: "start-mongo-orchestration" + - func: "run-csfle-tests-with-mongocryptd" + + - name: "test-gcp-kms-task" + commands: + - command: shell.exec + type: "setup" + params: + working_dir: "src" + shell: "bash" + script: | + ${PREPARE_SHELL} + echo "Copying files ... begin" + export GCPKMS_GCLOUD=${GCPKMS_GCLOUD} + export GCPKMS_PROJECT=${GCPKMS_PROJECT} + export GCPKMS_ZONE=${GCPKMS_ZONE} + export GCPKMS_INSTANCENAME=${GCPKMS_INSTANCENAME} + GCPKMS_SRC=$ARCHIVE_FILE_PATH GCPKMS_DST=$GCPKMS_INSTANCENAME: $DRIVERS_TOOLS/.evergreen/csfle/gcpkms/copy-file.sh + echo "Copying files ... end" + echo "Untarring file ... begin" + GCPKMS_CMD="tar xf $ARCHIVE_FILE_NAME" $DRIVERS_TOOLS/.evergreen/csfle/gcpkms/run-command.sh + echo "Untarring file ... end" + - command: shell.exec + type: "test" + params: + working_dir: "src" + shell: "bash" + script: | + ${PREPARE_SHELL} + export GCPKMS_GCLOUD=${GCPKMS_GCLOUD} + export GCPKMS_PROJECT=${GCPKMS_PROJECT} + export GCPKMS_ZONE=${GCPKMS_ZONE} + export GCPKMS_INSTANCENAME=${GCPKMS_INSTANCENAME} + GCPKMS_CMD="MONGODB_URI=mongodb://localhost:27017 PROVIDER=gcp ./.evergreen/run-fle-on-demand-credential-test.sh" $DRIVERS_TOOLS/.evergreen/csfle/gcpkms/run-command.sh + + - name: "test-azure-kms-task" + # Might exceed 1 hour of execution. + exec_timeout_secs: 7200 + commands: + - command: shell.exec + type: "setup" + params: + working_dir: "src" + shell: "bash" + script: | + ${PREPARE_SHELL} + echo "Copying files ... begin" + export AZUREKMS_RESOURCEGROUP=${testazurekms_resourcegroup} + export AZUREKMS_VMNAME=${AZUREKMS_VMNAME} + export AZUREKMS_PRIVATEKEYPATH=/tmp/testazurekms_privatekey + tar --exclude-vcs -czf $ARCHIVE_FILE_PATH . + AZUREKMS_SRC=$ARCHIVE_FILE_PATH AZUREKMS_DST="~/" $DRIVERS_TOOLS/.evergreen/csfle/azurekms/copy-file.sh + echo "Copying files ... end" + echo "Untarring file ... begin" + AZUREKMS_CMD="tar xf $ARCHIVE_FILE_NAME" $DRIVERS_TOOLS/.evergreen/csfle/azurekms/run-command.sh + echo "Untarring file ... end" + - command: shell.exec + type: "test" + params: + working_dir: "src" + shell: "bash" + script: | + ${PREPARE_SHELL} + export AZUREKMS_RESOURCEGROUP=${testazurekms_resourcegroup} + export AZUREKMS_VMNAME=${AZUREKMS_VMNAME} + export AZUREKMS_PRIVATEKEYPATH=/tmp/testazurekms_privatekey + AZUREKMS_CMD="GRADLE_RO_DEP_CACHE='/home/azureuser/build/gradle-cache' MONGODB_URI=mongodb://localhost:27017 PROVIDER=azure AZUREKMS_KEY_VAULT_ENDPOINT=${testazurekms_keyvaultendpoint} AZUREKMS_KEY_NAME=${testazurekms_keyname} ./.evergreen/run-fle-on-demand-credential-test.sh" $DRIVERS_TOOLS/.evergreen/csfle/azurekms/run-command.sh + + - name: "test-socks5-task" + tags: [ ] + commands: + - func: "start-mongo-orchestration" + vars: + VERSION: "latest" + TOPOLOGY: "replica_set" + - func: "run-socks5-tests" + + - name: "graalvm-native-image-app-task" + commands: + - func: "start-mongo-orchestration" + - func: "run-graalvm-native-image-app" + +axes: + - id: "version" + display_name: "MongoDB Version" + values: + - id: "latest" + display_name: "latest" + variables: + VERSION: "latest" + - id: "8.0" + display_name: "8.0" + variables: + VERSION: "8.0" + - id: "7.0" + display_name: "7.0" + variables: + VERSION: "7.0" + - id: "6.0" + display_name: "6.0" + variables: + VERSION: "6.0" + - id: "5.0" + display_name: "5.0" + variables: + VERSION: "5.0" + - id: "4.4" + display_name: "4.4" + variables: + VERSION: "4.4" + - id: "4.2" + display_name: "4.2" + variables: + VERSION: "4.2" + + - id: "os" + display_name: "OS" + values: + - id: "linux" + display_name: "Linux" + run_on: "rhel80-small" + - id: "ubuntu" + display_name: "Ubuntu" + run_on: "ubuntu2004-small" + + - id: "topology" + display_name: "Topology" + values: + - id: "standalone" + display_name: "Standalone" + variables: + TOPOLOGY: "server" + - id: "replicaset" + display_name: "Replica Set" + variables: + TOPOLOGY: "replica_set" + - id: "sharded-cluster" + display_name: "Sharded Cluster" + variables: + TOPOLOGY: "sharded_cluster" + + - id: "auth" + display_name: "Authentication" + values: + - id: "auth" + display_name: "Auth" + variables: + AUTH: "auth" + - id: "noauth" + display_name: "NoAuth" + variables: + AUTH: "noauth" + + - id: "socks-auth" + display_name: "Socks Proxy Authentication" + values: + - id: "auth" + display_name: "Auth" + variables: + SOCKS_AUTH: "auth" + - id: "noauth" + display_name: "NoAuth" + variables: + SOCKS_AUTH: "noauth" + + - id: "ssl" + display_name: "SSL" + values: + - id: "ssl" + display_name: "SSL" + variables: + SSL: "ssl" + - id: "nossl" + display_name: "NoSSL" + variables: + SSL: "nossl" + + - id: "async-transport" + display_name: Async Transport + values: + - id: "netty" + display_name: Netty + variables: + ASYNC_TRANSPORT: "netty" + + - id: "netty-ssl-provider" + display_name: "Netty TLS/SSL protocol provider" + values: + - id: "jdk" + display_name: "JDK" + variables: + NETTY_SSL_PROVIDER: "JDK" + - id: "openssl" + display_name: "OpenSSL" + variables: + NETTY_SSL_PROVIDER: "OPENSSL" + + - id: "compressor" + display_name: "Compressor" + values: + - id: "snappy" + display_name: "Snappy" + variables: + COMPRESSOR: "snappy" + - id: "zlib" + display_name: "Zlib" + variables: + COMPRESSOR: "zlib" + - id: "zstd" + display_name: "Zstd" + variables: + COMPRESSOR: "zstd" + + - id: "jdk" + display_name: "JDK" + values: + - id: "jdk21" + display_name: "JDK21" + variables: + JAVA_VERSION: "21" + - id: "jdk17" + display_name: "JDK17" + variables: + JAVA_VERSION: "17" + - id: "jdk11" + display_name: "JDK11" + variables: + JAVA_VERSION: "11" + - id: "jdk8" + display_name: "JDK8" + variables: + JAVA_VERSION: "8" + + - id: "scala" + display_name: "SCALA" + values: + - id: "2.11" + display_name: "Scala 2.11" + variables: + SCALA: "2.11" + - id: "2.12" + display_name: "Scala 2.12" + variables: + SCALA: "2.12" + - id: "2.13" + display_name: "Scala 2.13" + variables: + SCALA: "2.13" + + - id: "api-version" + display_name: "API Version" + values: + - id: "required" + display_name: "API Version Required" + variables: + REQUIRE_API_VERSION: "true" + + - id: "gssapi-login-context-name" + display_name: "GSSAPI Login Context Name" + values: + - id: "standard" + display_name: "standard" + variables: + LOGIN_CONTEXT_NAME: "com.sun.security.jgss.krb5.initiate" + - id: "fallback" + display_name: "fallback" + variables: + LOGIN_CONTEXT_NAME: "com.sun.security.jgss.initiate" + + - id: "aws-credential-provider" + display_name: "AWS Credential Provider" + values: + - id: "aws_sdk_v2" + display_name: "AWS SDK V2" + variables: + AWS_CREDENTIAL_PROVIDER: "awsSdkV2" + - id: "aws_sdk_v1" + display_name: "AWS SDK V1" + variables: + AWS_CREDENTIAL_PROVIDER: "awsSdkV1" + - id: "built_in" + display_name: "Built-In" + variables: + AWS_CREDENTIAL_PROVIDER: "builtIn" + +task_groups: + - name: "atlas-deployed-task-group" + max_hosts: -1 + setup_group_can_fail_task: true + setup_group_timeout_secs: 1800 + setup_group: + - func: "fetch-source" + - func: "prepare-resources" + - command: subprocess.exec + type: "setup" + params: + working_dir: "src" + binary: bash + add_expansions_to_env: true + env: + MONGODB_VERSION: "8.0" + args: + - ${DRIVERS_TOOLS}/.evergreen/atlas/setup-atlas-cluster.sh + - command: expansions.update + params: + file: src/atlas-expansion.yml + teardown_group: + - command: subprocess.exec + type: "setup" + params: + working_dir: "src" + binary: bash + add_expansions_to_env: true + args: + - ${DRIVERS_TOOLS}/.evergreen/atlas/teardown-atlas-cluster.sh + tasks: + - "atlas-search-index-management-task" + - "aws-lambda-deployed-task" + + - name: "test-gcp-kms-task-group" + setup_group_can_fail_task: true + setup_group_timeout_secs: 1800 # 30 minutes + setup_group: + - func: "fetch-source" + - func: "prepare-resources" + - func: "fix-absolute-paths" + - func: "gradle-cache" + - func: "create-archive-tar-file" + - command: shell.exec + params: + shell: "bash" + script: | + ${PREPARE_SHELL} + echo '${testgcpkms_key_file}' > /tmp/testgcpkms_key_file.json + export GCPKMS_KEYFILE=/tmp/testgcpkms_key_file.json + export GCPKMS_DRIVERS_TOOLS=$DRIVERS_TOOLS + export GCPKMS_SERVICEACCOUNT="${testgcpkms_service_account}" + export GCPKMS_MACHINETYPE="e2-standard-4" + $DRIVERS_TOOLS/.evergreen/csfle/gcpkms/create-and-setup-instance.sh + # Load the GCPKMS_GCLOUD, GCPKMS_INSTANCE, GCPKMS_REGION, and GCPKMS_ZONE expansions. + - command: expansions.update + params: + file: testgcpkms-expansions.yml + teardown_group: + - command: shell.exec + params: + shell: "bash" + script: | + ${PREPARE_SHELL} + export GCPKMS_GCLOUD=${GCPKMS_GCLOUD} + export GCPKMS_PROJECT=${GCPKMS_PROJECT} + export GCPKMS_ZONE=${GCPKMS_ZONE} + export GCPKMS_INSTANCENAME=${GCPKMS_INSTANCENAME} + $DRIVERS_TOOLS/.evergreen/csfle/gcpkms/delete-instance.sh || true + tasks: + - "test-gcp-kms-task" + + - name: "test-azure-kms-task-group" + setup_group_can_fail_task: true + setup_group_timeout_secs: 1800 # 30 minutes + setup_group: + - func: "fetch-source" + - func: "prepare-resources" + - func: "fix-absolute-paths" + - func: "gradle-cache" + - func: "create-archive-tar-file" + - func: "assume-aws-test-secrets-role" + - command: shell.exec + params: + shell: "bash" + include_expansions_in_env: [ "AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY", "AWS_SESSION_TOKEN" ] + script: | + ${PREPARE_SHELL} + echo '${testazurekms_publickey}' > /tmp/testazurekms_publickey + echo '${testazurekms_privatekey}' > /tmp/testazurekms_privatekey + # Set 600 permissions on private key file. Otherwise ssh / scp may error with permissions "are too open". + chmod 600 /tmp/testazurekms_privatekey + export AZUREKMS_CLIENTID=${testazurekms_clientid} + export AZUREKMS_TENANTID=${testazurekms_tenantid} + export AZUREKMS_SECRET=${testazurekms_secret} + export AZUREKMS_DRIVERS_TOOLS=$DRIVERS_TOOLS + export AZUREKMS_RESOURCEGROUP=${testazurekms_resourcegroup} + export AZUREKMS_PUBLICKEYPATH=/tmp/testazurekms_publickey + export AZUREKMS_PRIVATEKEYPATH=/tmp/testazurekms_privatekey + export AZUREKMS_SCOPE=${testazurekms_scope} + export AZUREKMS_VMNAME_PREFIX=JAVADRIVER + export AZUREKMS_MACHINESIZE="Standard_DS3_v2" + $DRIVERS_TOOLS/.evergreen/csfle/azurekms/create-and-setup-vm.sh + - command: expansions.update + params: + file: testazurekms-expansions.yml + teardown_group: + # Load expansions again. The setup task may have failed before running `expansions.update`. + - command: expansions.update + params: + file: testazurekms-expansions.yml + - command: shell.exec + params: + shell: "bash" + script: | + ${PREPARE_SHELL} + export AZUREKMS_VMNAME=${AZUREKMS_VMNAME} + export AZUREKMS_RESOURCEGROUP=${testazurekms_resourcegroup} + $DRIVERS_TOOLS/.evergreen/csfle/azurekms/delete-vm.sh + tasks: + - "test-azure-kms-task" + + - name: "test-oidc-task-group" + setup_group_can_fail_task: true + setup_group_timeout_secs: 1800 + teardown_task_can_fail_task: true + teardown_task_timeout_secs: 1800 + setup_group: + - func: "fetch-source" + - func: "prepare-resources" + - func: "fix-absolute-paths" + - func: "assume-aws-test-secrets-role" + - command: subprocess.exec + params: + binary: bash + include_expansions_in_env: [ "AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY", "AWS_SESSION_TOKEN" ] + args: + - ${DRIVERS_TOOLS}/.evergreen/auth_oidc/setup.sh + teardown_task: + - command: subprocess.exec + params: + binary: bash + args: + - ${DRIVERS_TOOLS}/.evergreen/auth_oidc/teardown.sh + tasks: + - "oidc-auth-test-task" + + - name: "test-oidc-azure-task-group" + setup_group_can_fail_task: true + setup_group_timeout_secs: 1800 + teardown_task_can_fail_task: true + teardown_task_timeout_secs: 1800 + setup_group: + - func: "fetch-source" + - func: "prepare-resources" + - func: "fix-absolute-paths" + - func: "gradle-cache" + - func: "create-archive-tar-file" + - func: "assume-aws-test-secrets-role" + - command: subprocess.exec + params: + binary: bash + include_expansions_in_env: [ "AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY", "AWS_SESSION_TOKEN" ] + env: + AZUREOIDC_VMNAME_PREFIX: "JAVA_DRIVER" + AZUREKMS_MACHINESIZE: "Standard_DS3_v2" + args: + - ${DRIVERS_TOOLS}/.evergreen/auth_oidc/azure/create-and-setup-vm.sh + teardown_task: + - command: subprocess.exec + params: + binary: bash + args: + - ${DRIVERS_TOOLS}/.evergreen/auth_oidc/azure/delete-vm.sh + tasks: + - "oidc-auth-test-azure-task" + + - name: "test-oidc-gcp-task-group" + setup_group_can_fail_task: true + setup_group_timeout_secs: 1800 + teardown_task_can_fail_task: true + teardown_task_timeout_secs: 1800 + setup_group: + - func: "fetch-source" + - func: "prepare-resources" + - func: "fix-absolute-paths" + - func: "gradle-cache" + - func: "create-archive-tar-file" + - func: "assume-aws-test-secrets-role" + - command: subprocess.exec + params: + binary: bash + include_expansions_in_env: [ "AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY", "AWS_SESSION_TOKEN" ] + env: + GCPOIDC_VMNAME_PREFIX: "JAVA_DRIVER" + GCPKMS_MACHINETYPE: "e2-standard-4" + args: + - ${DRIVERS_TOOLS}/.evergreen/auth_oidc/gcp/setup.sh + teardown_task: + - command: subprocess.exec + params: + binary: bash + args: + - ${DRIVERS_TOOLS}/.evergreen/auth_oidc/gcp/teardown.sh + tasks: + - "oidc-auth-test-gcp-task" + + - name: "test-oidc-k8s-eks-task-group" + setup_group_can_fail_task: true + setup_group_timeout_secs: 1800 + teardown_task_can_fail_task: true + teardown_task_timeout_secs: 1800 + setup_group: + - func: "fetch-source" + - func: "prepare-resources" + - func: "fix-absolute-paths" + - func: "gradle-cache" + - func: "create-archive-tar-file" + - func: "assume-aws-test-secrets-role" + - command: subprocess.exec + params: + binary: bash + include_expansions_in_env: [ "AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY", "AWS_SESSION_TOKEN" ] + args: + - ${DRIVERS_TOOLS}/.evergreen/auth_oidc/k8s/setup.sh + teardown_task: + - command: subprocess.exec + params: + binary: bash + args: + - ${DRIVERS_TOOLS}/.evergreen/auth_oidc/k8s/teardown.sh + tasks: + - "oidc-auth-test-k8s-eks-task" + + - name: "test-oidc-k8s-aks-task-group" + setup_group_can_fail_task: true + setup_group_timeout_secs: 1800 + teardown_task_can_fail_task: true + teardown_task_timeout_secs: 1800 + setup_group: + - func: "fetch-source" + - func: "prepare-resources" + - func: "fix-absolute-paths" + - func: "gradle-cache" + - func: "create-archive-tar-file" + - func: "assume-aws-test-secrets-role" + - command: subprocess.exec + params: + binary: bash + include_expansions_in_env: [ "AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY", "AWS_SESSION_TOKEN" ] + args: + - ${DRIVERS_TOOLS}/.evergreen/auth_oidc/k8s/setup.sh + teardown_task: + - command: subprocess.exec + params: + binary: bash + args: + - ${DRIVERS_TOOLS}/.evergreen/auth_oidc/k8s/teardown.sh + tasks: + - "oidc-auth-test-k8s-aks-task" + + - name: "test-oidc-k8s-gke-task-group" + setup_group_can_fail_task: true + setup_group_timeout_secs: 1800 + teardown_task_can_fail_task: true + teardown_task_timeout_secs: 1800 + setup_group: + - func: "fetch-source" + - func: "prepare-resources" + - func: "fix-absolute-paths" + - func: "gradle-cache" + - func: "create-archive-tar-file" + - func: "assume-aws-test-secrets-role" + - command: subprocess.exec + params: + binary: bash + include_expansions_in_env: [ "AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY", "AWS_SESSION_TOKEN" ] + args: + - ${DRIVERS_TOOLS}/.evergreen/auth_oidc/k8s/setup.sh + teardown_task: + - command: subprocess.exec + params: + binary: bash + args: + - ${DRIVERS_TOOLS}/.evergreen/auth_oidc/k8s/teardown.sh + tasks: + - "oidc-auth-test-k8s-gke-task" + +buildvariants: + + # + # Name based variants + # + + # Test packaging and other release related routines + - name: "static-checks" + display_name: "Static Checks" + run_on: rhel80-small + tasks: + - name: "static-analysis-task" + + - name: "perf" + display_name: "Performance Tests" + tags: [ "perf-variant" ] + run_on: rhel90-dbx-perf-large + tasks: + - name: "perf-task" + - name: "perf-netty-task" + + - name: plain-auth-test + display_name: "PLAIN (LDAP) Auth test" + run_on: rhel80-small + tasks: + - name: "plain-auth-test-task" + + - name: "oidc-auth-test" + display_name: "OIDC Auth" + run_on: ubuntu2204-small + tasks: + - name: "test-oidc-task-group" + batchtime: 20160 # 14 days + + - name: "test-oidc-azure-variant" + display_name: "OIDC Auth Azure" + run_on: ubuntu2204-small + tasks: + - name: "test-oidc-azure-task-group" + batchtime: 20160 # 14 days + + - name: "test-oidc-gcp-variant" + display_name: "OIDC Auth GCP" + run_on: ubuntu2204-small + tasks: + - name: "test-oidc-gcp-task-group" + batchtime: 20160 # 14 days + + - name: "test-oidc-k8s-variant" + display_name: "OIDC Auth K8S" + run_on: ubuntu2204-small + tasks: + - name: "test-oidc-k8s-eks-task-group" + batchtime: 20160 # 14 days + - name: "test-oidc-k8s-aks-task-group" + batchtime: 20160 # 14 days + - name: "test-oidc-k8s-gke-task-group" + batchtime: 20160 # 14 days + + - name: "atlas-search-variant" + display_name: "Atlas Tests" + run_on: rhel80-small + tasks: + - name: "atlas-deployed-task-group" + - name: "atlas-search-task" + - name: "atlas-connectivity-task" + - name: "atlas-x509-auth-test-task" + + - name: "reactive-streams-tck-test" + display_name: "Reactive Streams TCK tests" + run_on: rhel80-small + tasks: + - name: "reactive-streams-tck-test-task" + + - name: "publish-snapshot" + display_name: "Publish Snapshot" + run_on: "ubuntu2204-small" + tasks: + - name: "publish-snapshot-task" + + - name: "publish-release" + display_name: "Publish Release" + run_on: "ubuntu2204-small" + tasks: + - name: "publish-release-task" + + - name: "test-gcp-kms-variant" + display_name: "GCP KMS" + run_on: + - ubuntu2204-small + tasks: + - name: "test-gcp-kms-task-group" + batchtime: 20160 # Use a batchtime of 14 days as suggested by the CSFLE test README + + - name: "test-azure-kms-variant" + display_name: "Azure KMS" + run_on: + - ubuntu2204-small + tasks: + - name: "test-azure-kms-task-group" + batchtime: 20160 # Use a batchtime of 14 days as suggested by the CSFLE test README + + # + # Matrix based variants + # + + - matrix_name: "tests-zlib-compression" + matrix_spec: { compressor: "zlib", auth: "noauth", ssl: "nossl", jdk: "jdk8", version: "*", topology: "standalone", os: "linux" } + display_name: "${version} ${compressor} ${topology} ${auth} ${ssl} ${jdk} ${os} " + tags: [ "tests-variant" ] + tasks: + - name: "test-sync-task" + - name: "test-reactive-task" + - name: "test-core-task" + - name: "test-legacy-task" + + - matrix_name: "tests-snappy-compression" + matrix_spec: { compressor: "snappy", auth: "noauth", ssl: "nossl", jdk: "jdk8", version: "*", topology: "standalone", os: "linux" } + display_name: "${version} ${compressor} ${topology} ${auth} ${ssl} ${jdk} ${os} " + tags: [ "tests-variant" ] + tasks: + - name: "test-sync-task" + - name: "test-reactive-task" + - name: "test-core-task" + - name: "test-legacy-task" + + - matrix_name: "tests-zstd-compression" + matrix_spec: { compressor: "zstd", auth: "noauth", ssl: "nossl", jdk: "jdk8", + version: [ "4.2", "4.4", "5.0", "6.0", "7.0", "8.0", "latest" ], + topology: "standalone", os: "linux" } + display_name: "${version} ${compressor} ${topology} ${auth} ${ssl} ${jdk} ${os} " + tags: [ "tests-variant" ] + tasks: + - name: "test-sync-task" + - name: "test-reactive-task" + - name: "test-core-task" + - name: "test-legacy-task" + + - matrix_name: "tests-unit" + matrix_spec: { jdk: [ "jdk8", "jdk11", "jdk17", "jdk21" ], os: "linux" } + display_name: "${jdk} ${os} Unit" + tags: [ "tests-variant" ] + tasks: + - name: "test-bson-and-crypt-task" + + - matrix_name: "tests-jdk8-unsecure" + matrix_spec: { auth: "noauth", ssl: "nossl", jdk: "jdk8", version: [ "4.2", "4.4", "5.0", "6.0", "7.0", "8.0", "latest" ], + topology: "*", os: "linux" } + display_name: "${version} ${topology} ${auth} ${ssl} ${jdk} ${os} " + tags: [ "tests-variant" ] + tasks: + - name: "test-sync-task" + - name: "test-reactive-task" + - name: "test-core-task" + - name: "test-legacy-task" + + - matrix_name: "tests-jdk-secure" + matrix_spec: { auth: "auth", ssl: "ssl", jdk: [ "jdk8", "jdk17", "jdk21" ], + version: [ "4.2", "4.4", "5.0", "6.0", "7.0", "8.0", "latest" ], + topology: "*", os: "linux" } + display_name: "${version} ${topology} ${auth} ${ssl} ${jdk} ${os} " + tags: [ "tests-variant" ] + tasks: + - name: "test-sync-task" + - name: "test-reactive-task" + - name: "test-core-task" + - name: "test-legacy-task" + + - matrix_name: "tests-jdk-secure-jdk11" + matrix_spec: { auth: "auth", ssl: "ssl", jdk: [ "jdk11" ], version: [ "7.0" ], topology: [ "replicaset" ], os: "linux" } + display_name: "${version} ${topology} ${auth} ${ssl} ${jdk} ${os} " + tags: [ "tests-variant" ] + tasks: + - name: "test-sync-task" + - name: "test-reactive-task" + - name: "test-core-task" + - name: "test-legacy-task" + + - matrix_name: "tests-require-api-version" + matrix_spec: { api-version: "required", auth: "auth", ssl: "nossl", jdk: [ "jdk21" ], version: [ "5.0", "6.0", "7.0", "8.0", "latest" ], + topology: "standalone", os: "linux" } + display_name: "${version} ${topology} ${api-version} " + tags: [ "tests-variant" ] + tasks: + - name: "test-sync-task" + - name: "test-reactive-task" + - name: "test-core-task" + - name: "test-legacy-task" + + - matrix_name: "tests-load-balancer-secure" + matrix_spec: { auth: "auth", ssl: "ssl", jdk: [ "jdk21" ], version: [ "5.0", "6.0", "7.0", "8.0", "latest" ], topology: "sharded-cluster", + os: "ubuntu" } + display_name: "Load Balancer ${version} ${auth} ${ssl} ${jdk} ${os}" + tasks: + - name: "load-balancer-test-task" + + - matrix_name: "tests-slow-task" + matrix_spec: { auth: "noauth", ssl: "nossl", jdk: "jdk21", version: [ "7.0" ], topology: "standalone", os: "linux" } + display_name: "Slow: ${version} ${topology} ${ssl} ${jdk} ${os} " + tags: [ "tests-slow-variant" ] + tasks: + - name: "slow-test-task" + + - matrix_name: "tests-socket-task" + matrix_spec: { auth: "*", ssl: "nossl", jdk: "jdk8", version: [ "4.2" ], topology: "standalone", os: "linux" } + display_name: "Socket: ${version} ${topology} ${auth} ${jdk} ${os} " + tags: [ "tests-socket-variant" ] + tasks: + - name: "socket-test-task" + + - matrix_name: "tests-netty" + matrix_spec: { auth: "noauth", ssl: "*", jdk: "jdk8", version: [ "7.0" ], topology: "replicaset", os: "linux", + async-transport: "netty" } + display_name: "Netty: ${version} ${topology} ${ssl} ${auth} ${jdk} ${os} " + tags: [ "tests-netty-variant" ] + tasks: + - name: "test-reactive-task" + - name: "test-core-task" + + - matrix_name: "tests-netty-ssl-provider" + matrix_spec: { auth: "auth", ssl: "ssl", jdk: "jdk8", version: [ "7.0" ], topology: "replicaset", os: "linux", + async-transport: "netty", netty-ssl-provider: "*" } + display_name: "Netty SSL provider: ${version} ${topology} ${ssl} SslProvider.${netty-ssl-provider} ${auth} ${jdk} ${os} " + tags: [ "tests-netty-variant" ] + tasks: + - name: "test-reactive-task" + - name: "test-core-task" + + - matrix_name: "tests-socket-snappy-compression" + matrix_spec: { compressor: "snappy", auth: "noauth", ssl: "nossl", jdk: "jdk8", version: [ "4.2" ], topology: "standalone", os: "linux" } + display_name: "Socket: ${version} ${compressor} ${topology} ${auth} ${jdk} ${os} " + tags: [ "tests-socket-variant" ] + tasks: + - name: "socket-test-task" + + - matrix_name: "tests-socket-zstd-compression" + matrix_spec: { compressor: "zstd", auth: "noauth", ssl: "nossl", jdk: "jdk8", version: [ "4.2" ], topology: "standalone", os: "linux" } + display_name: "Socket: ${version} ${compressor} ${topology} ${auth} ${jdk} ${os} " + tags: [ "tests-socket-variant" ] + tasks: + - name: "socket-test-task" + + - matrix_name: "test-gssapi" + matrix_spec: { jdk: [ "jdk8", "jdk17", "jdk21" ], os: "linux", gssapi-login-context-name: "*" } + display_name: "GSSAPI (Kerberos) Auth test ${jdk} ${os} ${gssapi-login-context-name}" + tags: [ "test-gssapi-variant" ] + tasks: + - name: "gssapi-auth-test-task" + + - matrix_name: "aws-auth-test" + matrix_spec: { ssl: "nossl", jdk: [ "jdk8", "jdk17", "jdk21" ], version: [ "4.4", "5.0", "6.0", "7.0", "8.0", "latest" ], os: "ubuntu", + aws-credential-provider: "*" } + display_name: "MONGODB-AWS Basic Auth test ${version} ${jdk} ${aws-credential-provider}" + run_on: "ubuntu2204-small" + tasks: + - name: "aws-auth-test-with-regular-aws-credentials-task" + + - matrix_name: "aws-ec2-auth-test" + matrix_spec: { ssl: "nossl", jdk: [ "jdk21" ], version: [ "7.0" ], os: "ubuntu", aws-credential-provider: "*" } + display_name: "MONGODB-AWS Advanced Auth test ${version} ${jdk} ${aws-credential-provider}" + run_on: "ubuntu2204-small" + tasks: + - name: "aws-auth-test-with-aws-EC2-credentials-task" + - name: "aws-auth-test-with-assume-role-credentials-task" + - name: "aws-auth-test-with-aws-credentials-as-environment-variables-task" + - name: "aws-auth-test-with-aws-credentials-and-session-token-as-environment-variables-task" + - name: "aws-auth-test-with-web-identity-credentials-task" + + - matrix_name: "accept-api-version-2-test" + matrix_spec: { ssl: "nossl", auth: "noauth", jdk: "jdk21", version: [ "5.0", "6.0", "7.0", "8.0", "latest" ], topology: "standalone", + os: "linux" } + display_name: "Accept API Version 2 ${version}" + run_on: "ubuntu2204-small" + tasks: + - name: "accept-api-version-2-test-task" + + - matrix_name: "ocsp-test" + matrix_spec: { auth: "noauth", ssl: "ssl", jdk: "jdk21", version: [ "4.4", "5.0", "6.0", "7.0", "8.0", "latest" ], os: "ubuntu" } + display_name: "OCSP test ${version} ${os}" + tasks: + - name: ".ocsp" + + - matrix_name: "scala-tests" + matrix_spec: { auth: "noauth", ssl: "nossl", jdk: [ "jdk8", "jdk17", "jdk21" ], version: [ "7.0" ], topology: "replicaset", + scala: "*", os: "ubuntu" } + display_name: "${scala} ${jdk} ${version} ${topology} ${os}" + tags: [ "test-scala-variant" ] + tasks: + - name: "scala-test-task" + + - matrix_name: "kotlin-tests" + matrix_spec: { auth: "noauth", ssl: "nossl", jdk: [ "jdk8", "jdk17", "jdk21" ], version: [ "7.0" ], topology: "replicaset", os: "ubuntu" } + display_name: "Kotlin: ${jdk} ${version} ${topology} ${os}" + tags: [ "test-kotlin-variant" ] + tasks: + - name: "kotlin-test-task" + + - matrix_name: "kms-tls-test" + matrix_spec: { os: "linux", version: [ "5.0" ], topology: [ "standalone" ] } + display_name: "CSFLE KMS TLS" + tasks: + - name: ".kms-tls" + + - matrix_name: "csfle-aws-from-environment-test" + matrix_spec: { os: "linux", version: [ "5.0" ], topology: [ "standalone" ] } + display_name: "CSFLE AWS From Environment" + tasks: + - name: ".csfle-aws-from-environment" + + - matrix_name: "csfle-tests-with-mongocryptd" + matrix_spec: { os: "linux", version: [ "4.2", "4.4", "5.0", "6.0", "7.0", "8.0", "latest" ], topology: [ "replicaset" ] } + display_name: "CSFLE with mongocryptd: ${version}" + tasks: + - name: "csfle-tests-with-mongocryptd-task" + + - matrix_name: "socks5-tests" + matrix_spec: { os: "linux", ssl: [ "nossl", "ssl" ], version: [ "latest" ], topology: [ "replicaset" ], socks-auth: [ "auth", "noauth" ] } + display_name: "SOCKS5 proxy ${socks-auth} : ${version} ${topology} ${ssl} ${jdk} ${os}" + tasks: + - name: "test-socks5-task" + + - matrix_name: "graalvm-native-image-app" + matrix_spec: { version: [ "7.0" ], topology: [ "replicaset" ], auth: [ "noauth" ], ssl: [ "nossl" ], + jdk: [ "jdk21" ], os: [ "linux" ] } + display_name: "GraalVM native image app: ${version} ${topology} ${auth} ${ssl} ${jdk} ${os}" + tasks: + - name: "graalvm-native-image-app-task" diff --git a/.evergreen/gradle-cache.sh b/.evergreen/gradle-cache.sh new file mode 100755 index 00000000000..2ffa72908fc --- /dev/null +++ b/.evergreen/gradle-cache.sh @@ -0,0 +1,32 @@ +#!/bin/bash + +set -o xtrace # Write all commands first to stderr +set -o errexit # Exit the script with error if any of the commands fail + +############################################ +# Main Program # +############################################ +RELATIVE_DIR_PATH="$(dirname "${BASH_SOURCE[0]:-$0}")" +. "${RELATIVE_DIR_PATH}/setup-env.bash" + +echo "Enable caching" +echo "org.gradle.caching=true" >> gradle.properties +echo "kotlin.caching.enabled=true" >> gradle.properties + +echo "Compiling JVM drivers" +./gradlew -version +./gradlew classes --parallel + +# Copy the Gradle dependency cache to the gradle read only dependency cache directory. +if [ -n "$GRADLE_RO_DEP_CACHE" ];then + echo "Copying Gradle dependency cache to $GRADLE_RO_DEP_CACHE" + mkdir -p $GRADLE_RO_DEP_CACHE + + # https://docs.gradle.org/current/userguide/dependency_caching.html#sec:cache-copy + # Gradle suggests removing the "*.lock" files and the `gc.properties` file for saving/restoring cache + cp -r $HOME/.gradle/caches/modules-2 "$GRADLE_RO_DEP_CACHE" + find "$GRADLE_RO_DEP_CACHE" -name "*.lock" -type f | xargs rm -f + find "$GRADLE_RO_DEP_CACHE" -name "gc.properties" -type f | xargs rm -f + + echo "Copied Gradle dependency cache to $GRADLE_RO_DEP_CACHE" +fi diff --git a/.evergreen/prepare-oidc-get-tokens-docker.sh b/.evergreen/prepare-oidc-get-tokens-docker.sh new file mode 100755 index 00000000000..e904d5d2b89 --- /dev/null +++ b/.evergreen/prepare-oidc-get-tokens-docker.sh @@ -0,0 +1,50 @@ +#!/bin/bash + +set -o xtrace +set -o errexit # Exit the script with error if any of the commands fail + +############################################ +# Main Program # +############################################ + +# Supported/used environment variables: +# DRIVERS_TOOLS The path to evergreeen tools +# OIDC_AWS_* Required OIDC_AWS_* env variables must be configured +# +# Environment variables used as output: +# OIDC_TESTS_ENABLED Allows running OIDC tests +# OIDC_TOKEN_DIR The path to generated OIDC AWS tokens +# AWS_WEB_IDENTITY_TOKEN_FILE The path to AWS token for device workflow + +if [ -z ${DRIVERS_TOOLS+x} ]; then + echo "DRIVERS_TOOLS. is not set"; + exit 1 +fi + +if [ -z ${OIDC_AWS_ROLE_ARN+x} ]; then + echo "OIDC_AWS_ROLE_ARN. is not set"; + exit 1 +fi + +if [ -z ${OIDC_AWS_SECRET_ACCESS_KEY+x} ]; then + echo "OIDC_AWS_SECRET_ACCESS_KEY. is not set"; + exit 1 +fi + +if [ -z ${OIDC_AWS_ACCESS_KEY_ID+x} ]; then + echo "OIDC_AWS_ACCESS_KEY_ID. is not set"; + exit 1 +fi + +export AWS_ROLE_ARN=${OIDC_AWS_ROLE_ARN} +export AWS_SECRET_ACCESS_KEY=${OIDC_AWS_SECRET_ACCESS_KEY} +export AWS_ACCESS_KEY_ID=${OIDC_AWS_ACCESS_KEY_ID} +export OIDC_FOLDER=${DRIVERS_TOOLS}/.evergreen/auth_oidc +export OIDC_TOKEN_DIR=${OIDC_FOLDER}/test_tokens +export AWS_WEB_IDENTITY_TOKEN_FILE=${OIDC_TOKEN_DIR}/test1 +export OIDC_TESTS_ENABLED=true + +echo "Configuring OIDC server for local authentication tests" + +cd ${OIDC_FOLDER} +DRIVERS_TOOLS=${DRIVERS_TOOLS} ./oidc_get_tokens.sh \ No newline at end of file diff --git a/.evergreen/prepare-oidc-server-docker.sh b/.evergreen/prepare-oidc-server-docker.sh new file mode 100755 index 00000000000..0fcd1ed4194 --- /dev/null +++ b/.evergreen/prepare-oidc-server-docker.sh @@ -0,0 +1,50 @@ +#!/bin/bash + +set -o xtrace +set -o errexit # Exit the script with error if any of the commands fail + +############################################ +# Main Program # +############################################ + +# Supported/used environment variables: +# DRIVERS_TOOLS The path to evergreeen tools +# OIDC_AWS_* OIDC_AWS_* env variables must be configured +# +# Environment variables used as output: +# OIDC_TESTS_ENABLED Allows running OIDC tests +# OIDC_TOKEN_DIR The path to generated tokens +# AWS_WEB_IDENTITY_TOKEN_FILE The path to AWS token for device workflow + +if [ -z ${DRIVERS_TOOLS+x} ]; then + echo "DRIVERS_TOOLS. is not set"; + exit 1 +fi + +if [ -z ${OIDC_AWS_ROLE_ARN+x} ]; then + echo "OIDC_AWS_ROLE_ARN. is not set"; + exit 1 +fi + +if [ -z ${OIDC_AWS_SECRET_ACCESS_KEY+x} ]; then + echo "OIDC_AWS_SECRET_ACCESS_KEY. is not set"; + exit 1 +fi + +if [ -z ${OIDC_AWS_ACCESS_KEY_ID+x} ]; then + echo "OIDC_AWS_ACCESS_KEY_ID. is not set"; + exit 1 +fi + +export AWS_ROLE_ARN=${OIDC_AWS_ROLE_ARN} +export AWS_SECRET_ACCESS_KEY=${OIDC_AWS_SECRET_ACCESS_KEY} +export AWS_ACCESS_KEY_ID=${OIDC_AWS_ACCESS_KEY_ID} +export OIDC_FOLDER=${DRIVERS_TOOLS}/.evergreen/auth_oidc +export OIDC_TOKEN_DIR=${OIDC_FOLDER}/test_tokens +export AWS_WEB_IDENTITY_TOKEN_FILE=${OIDC_TOKEN_DIR}/test1 +export OIDC_TESTS_ENABLED=true + +echo "Configuring OIDC server for local authentication tests" + +cd ${OIDC_FOLDER} +DRIVERS_TOOLS=${DRIVERS_TOOLS} ./start_local_server.sh \ No newline at end of file diff --git a/.evergreen/publish.sh b/.evergreen/publish.sh new file mode 100755 index 00000000000..d1182c2f42d --- /dev/null +++ b/.evergreen/publish.sh @@ -0,0 +1,31 @@ +#!/bin/bash + +# DO NOT ECHO COMMANDS AS THEY CONTAIN SECRETS! + +set -o errexit # Exit the script with error if any of the commands fail + +############################################ +# Main Program # +############################################ +RELATIVE_DIR_PATH="$(dirname "${BASH_SOURCE:-$0}")" +. "${RELATIVE_DIR_PATH}/setup-env.bash" + +RELEASE=${RELEASE:false} + +export ORG_GRADLE_PROJECT_nexusUsername=${NEXUS_USERNAME} +export ORG_GRADLE_PROJECT_nexusPassword=${NEXUS_PASSWORD} +export ORG_GRADLE_PROJECT_signingKey="${SIGNING_KEY}" +export ORG_GRADLE_PROJECT_signingPassword=${SIGNING_PASSWORD} + +if [ "$RELEASE" == "true" ]; then + TASK="publishArchives closeAndReleaseSonatypeStagingRepository" +else + TASK="publishSnapshots" +fi + +SYSTEM_PROPERTIES="-Dorg.gradle.internal.publish.checksums.insecure=true" + +./gradlew -version +./gradlew ${SYSTEM_PROPERTIES} --stacktrace --info ${TASK} # Scala 2.13 is published as result of this gradle execution. +./gradlew ${SYSTEM_PROPERTIES} --stacktrace --info :bson-scala:${TASK} :driver-scala:${TASK} -PscalaVersion=2.12 +./gradlew ${SYSTEM_PROPERTIES} --stacktrace --info :bson-scala:${TASK} :driver-scala:${TASK} -PscalaVersion=2.11 diff --git a/.evergreen/run-atlas-search-index-management-tests.sh b/.evergreen/run-atlas-search-index-management-tests.sh new file mode 100755 index 00000000000..784a9b45a0d --- /dev/null +++ b/.evergreen/run-atlas-search-index-management-tests.sh @@ -0,0 +1,20 @@ +#!/bin/bash + +set -o errexit + +# Supported/used environment variables: +# MONGODB_URI Set the connection to an Atlas cluster + +############################################ +# Main Program # +############################################ +RELATIVE_DIR_PATH="$(dirname "${BASH_SOURCE[0]:-$0}")" +source "${RELATIVE_DIR_PATH}/setup-env.bash" + +echo "Running Atlas Search tests" +./gradlew -version +./gradlew --stacktrace --info \ + -Dorg.mongodb.test.atlas.search.index.helpers=true \ + -Dorg.mongodb.test.uri=${MONGODB_URI} \ + driver-sync:test --tests AtlasSearchIndexManagementProseTest \ + driver-reactive-streams:test --tests AtlasSearchIndexManagementProseTest \ diff --git a/.evergreen/run-atlas-search-tests.sh b/.evergreen/run-atlas-search-tests.sh new file mode 100755 index 00000000000..01f6bc78b42 --- /dev/null +++ b/.evergreen/run-atlas-search-tests.sh @@ -0,0 +1,21 @@ +#!/bin/bash + +set -o errexit + +# Supported/used environment variables: +# MONGODB_URI Set the connection to an Atlas cluster + +############################################ +# Main Program # +############################################ +RELATIVE_DIR_PATH="$(dirname "${BASH_SOURCE[0]:-$0}")" +source "${RELATIVE_DIR_PATH}/setup-env.bash" + +echo "Running Atlas Search tests" +./gradlew -version +./gradlew --stacktrace --info \ + -Dorg.mongodb.test.atlas.search=true \ + -Dorg.mongodb.test.uri=${MONGODB_URI} \ + driver-core:test --tests AggregatesSearchIntegrationTest \ + --tests AggregatesBinaryVectorSearchIntegrationTest \ + --tests AggregatesSearchTest \ diff --git a/.evergreen/run-connectivity-tests.sh b/.evergreen/run-connectivity-tests.sh new file mode 100755 index 00000000000..38ccfaaf763 --- /dev/null +++ b/.evergreen/run-connectivity-tests.sh @@ -0,0 +1,23 @@ +#!/bin/bash + +# Exit the script with error if any of the commands fail +set -o errexit + +# Supported/used environment variables: +# JDK Set the version of java to be used. Java versions can be set from the java toolchain /opt/java +# "jdk5", "jdk6", "jdk7", "jdk8", "jdk9" +# Support arguments: +# Pass as many MongoDB URIS as arguments to this script as required + +############################################ +# Main Program # +############################################ +RELATIVE_DIR_PATH="$(dirname "${BASH_SOURCE:-$0}")" +. "${RELATIVE_DIR_PATH}/setup-env.bash" + +echo "Running connectivity tests with Java ${JAVA_VERSION}" + +./gradlew -PjavaVersion=${JAVA_VERSION} -Dorg.mongodb.test.connectivity.uris="${MONGODB_URIS}" --info --continue \ + driver-sync:test --tests ConnectivityTest \ + driver-legacy:test --tests ConnectivityTest \ + driver-reactive-streams:test --tests ConnectivityTest diff --git a/.evergreen/run-csfle-aws-from-environment.sh b/.evergreen/run-csfle-aws-from-environment.sh new file mode 100755 index 00000000000..a3c7b8fa106 --- /dev/null +++ b/.evergreen/run-csfle-aws-from-environment.sh @@ -0,0 +1,40 @@ +#!/bin/bash + +# Don't trace since the URI contains a password that shouldn't show up in the logs +set -o errexit # Exit the script with error if any of the commands fail + +# Supported/used environment variables: +# MONGODB_URI Set the suggested connection MONGODB_URI (including credentials and topology info) +# AWS_ACCESS_KEY_ID The AWS access key identifier for client-side encryption +# AWS_SECRET_ACCESS_KEY The AWS secret access key for client-side encryption + +############################################ +# Main Program # +############################################ +RELATIVE_DIR_PATH="$(dirname "${BASH_SOURCE:-$0}")" +. "${RELATIVE_DIR_PATH}/setup-env.bash" + +echo "Running CSFLE AWS from environment tests" + +./gradlew -version + +export AWS_ACCESS_KEY_ID=${AWS_ACCESS_KEY_ID} +export AWS_SECRET_ACCESS_KEY=${AWS_SECRET_ACCESS_KEY} + +./gradlew --stacktrace --info -Dorg.mongodb.test.uri=${MONGODB_URI} \ + driver-sync:cleanTest driver-sync:test --tests ClientSideEncryptionAwsCredentialFromEnvironmentTest +first=$? +echo $first + +./gradlew --stacktrace --info -Dorg.mongodb.test.uri=${MONGODB_URI} \ + driver-reactive-streams:cleanTest driver-reactive-streams:test --tests ClientSideEncryptionAwsCredentialFromEnvironmentTest +second=$? +echo $second + +if [ $first -ne 0 ]; then + exit $first +elif [ $second -ne 0 ]; then + exit $second +else + exit 0 +fi diff --git a/.evergreen/run-csfle-tests-with-mongocryptd.sh b/.evergreen/run-csfle-tests-with-mongocryptd.sh new file mode 100755 index 00000000000..4e320c32178 --- /dev/null +++ b/.evergreen/run-csfle-tests-with-mongocryptd.sh @@ -0,0 +1,68 @@ +#!/bin/bash + +set -o xtrace # Write all commands first to stderr +set -o errexit # Exit the script with error if any of the commands fail + +# Supported/used environment variables: +# MONGODB_URI Set the suggested connection MONGODB_URI (including credentials and topology info) +# JAVA_VERSION Set the version of java to be used. Java versions can be set from the java toolchain /opt/java +# AWS_ACCESS_KEY_ID The AWS access key identifier for client-side encryption +# AWS_SECRET_ACCESS_KEY The AWS secret access key for client-side encryption +# AWS_ACCESS_KEY_ID_AWS_KMS_NAMED The AWS access key identifier for client-side encryption's named KMS provider. +# AWS_SECRET_ACCESS_KEY_AWS_KMS_NAMED The AWS secret access key for client-side encryption's named KMS provider. +# AWS_TEMP_ACCESS_KEY_ID The temporary AWS access key identifier for client-side encryption +# AWS_TEMP_SECRET_ACCESS_KEY The temporary AWS secret access key for client-side encryption +# AWS_TEMP_SESSION_TOKEN The temporary AWS session token for client-side encryption +# AZURE_TENANT_ID The Azure tenant identifier for client-side encryption +# AZURE_CLIENT_ID The Azure client identifier for client-side encryption +# AZURE_CLIENT_SECRET The Azure client secret for client-side encryption +# GCP_EMAIL The GCP email for client-side encryption +# GCP_PRIVATE_KEY The GCP private key for client-side encryption +# AZUREKMS_KEY_VAULT_ENDPOINT The Azure key vault endpoint for integration tests +# AZUREKMS_KEY_NAME The Azure key name endpoint for integration tests + +MONGODB_URI=${MONGODB_URI:-} + +RELATIVE_DIR_PATH="$(dirname "${BASH_SOURCE:-$0}")" +. "${RELATIVE_DIR_PATH}/setup-env.bash" + +############################################ +# Functions # +############################################ + +provision_ssl () { + # We generate the keystore and truststore on every run with the certs in the drivers-tools repo + if [ ! -f client.pkc ]; then + openssl pkcs12 -CAfile ${DRIVERS_TOOLS}/.evergreen/x509gen/ca.pem -export -in ${DRIVERS_TOOLS}/.evergreen/x509gen/client.pem -out client.pkc -password pass:bithere + fi + + cp ${JAVA_HOME}/lib/security/cacerts mongo-truststore + ${JAVA_HOME}/bin/keytool -importcert -trustcacerts -file ${DRIVERS_TOOLS}/.evergreen/x509gen/ca.pem -keystore mongo-truststore -storepass changeit -storetype JKS -noprompt + + # We add extra gradle arguments for SSL + export GRADLE_EXTRA_VARS="-Pssl.enabled=true -Pssl.keyStoreType=pkcs12 -Pssl.keyStore=`pwd`/client.pkc -Pssl.keyStorePassword=bithere -Pssl.trustStoreType=jks -Pssl.trustStore=`pwd`/mongo-truststore -Pssl.trustStorePassword=changeit" +} + +############################################ +# Main Program # +############################################ + +# Set up keystore/truststore regardless, as they are required for testing KMIP +provision_ssl + +echo "Running tests with Java ${JAVA_VERSION}" + +./gradlew -version + +# By not specifying the path to the `crypt_shared` via the `CRYPT_SHARED_LIB_PATH` Java system property, +# we force the driver to start `mongocryptd` instead of loading and using `crypt_shared`. +./gradlew -PjavaVersion=${JAVA_VERSION} -Dorg.mongodb.test.uri=${MONGODB_URI} \ + ${GRADLE_EXTRA_VARS} \ + -Dorg.mongodb.test.fle.on.demand.credential.test.failure.enabled=true \ + --stacktrace --info --continue \ + driver-sync:test \ + --tests "*.Client*Encryption*" \ + driver-reactive-streams:test \ + --tests "*.Client*Encryption*" \ + driver-scala:integrationTest \ + --tests "*.Client*Encryption*" diff --git a/.evergreen/run-deployed-lambda-aws-tests.sh b/.evergreen/run-deployed-lambda-aws-tests.sh new file mode 100755 index 00000000000..57c23aa7f0b --- /dev/null +++ b/.evergreen/run-deployed-lambda-aws-tests.sh @@ -0,0 +1,13 @@ +#!/bin/bash + +set -o xtrace # Write all commands first to stderr +set -o errexit # Exit the script with error if any of the commands fail + +RELATIVE_DIR_PATH="$(dirname "${BASH_SOURCE[0]:-$0}")" +. "${RELATIVE_DIR_PATH}/setup-env.bash" + +# compiled outside of lambda workflow. Note "SkipBuild: True" in template.yaml +./gradlew -version +./gradlew --info driver-lambda:shadowJar + +. ${DRIVERS_TOOLS}/.evergreen/aws_lambda/run-deployed-lambda-aws-tests.sh diff --git a/.evergreen/run-fle-on-demand-credential-test.sh b/.evergreen/run-fle-on-demand-credential-test.sh new file mode 100755 index 00000000000..6445b53c666 --- /dev/null +++ b/.evergreen/run-fle-on-demand-credential-test.sh @@ -0,0 +1,52 @@ +#!/bin/bash + +set -o xtrace +set -o errexit # Exit the script with error if any of the commands fail + +# Supported/used environment variables: +# MONGODB_URI Set the URI, including an optional username/password to use to connect to the server +# PROVIDER Which KMS provider to test (either "gcp" or "azure") +# AZUREKMS_KEY_VAULT_ENDPOINT The Azure key vault endpoint for Azure integration tests +# AZUREKMS_KEY_NAME The Azure key name endpoint for Azure integration tests + +############################################ +# Main Program # +############################################ + +echo "Running ${PROVIDER}} Credential Acquisition Test" + +if ! which java ; then + echo "Installing java..." + sudo apt install openjdk-17-jdk -y +fi + +export PROVIDER=${PROVIDER} + +echo "Running gradle version" +./gradlew -version + +echo "Running gradle classes compile for driver-sync and driver-reactive-streams" +./gradlew --parallel --build-cache --stacktrace --info \ + driver-sync:classes driver-reactive-streams:classes + +echo "Running driver-sync tests" +./gradlew -Dorg.mongodb.test.uri="${MONGODB_URI}" \ + -Dorg.mongodb.test.fle.on.demand.credential.test.success.enabled=true \ + --build-cache --stacktrace --info driver-sync:test --tests ClientSideEncryptionOnDemandCredentialsTest +first=$? +echo $first + +echo "Running driver-reactive-streams tests" +./gradlew -Dorg.mongodb.test.uri="${MONGODB_URI}" \ + -Dorg.mongodb.test.fle.on.demand.credential.test.success.enabled=true \ + --build-cache --stacktrace --info driver-reactive-streams:test --tests ClientSideEncryptionOnDemandCredentialsTest +second=$? +echo $second + +if [ $first -ne 0 ]; then + exit $first +elif [ $second -ne 0 ]; then + exit $second +else + exit 0 +fi diff --git a/.evergreen/run-graalvm-native-image-app.sh b/.evergreen/run-graalvm-native-image-app.sh new file mode 100755 index 00000000000..e39b9b3a179 --- /dev/null +++ b/.evergreen/run-graalvm-native-image-app.sh @@ -0,0 +1,25 @@ +#!/bin/bash + +# Supported/used environment variables: +# MONGODB_URI The connection string to use, including credentials and topology info. +# JAVA_VERSION The Java SE version for Gradle toolchain. + +set -o errexit + +readonly RELATIVE_DIR_PATH="$(dirname "${BASH_SOURCE[0]:-$0}")" +source "${RELATIVE_DIR_PATH}/setup-env.bash" + +echo "MONGODB_URI: ${MONGODB_URI}" +echo "JAVA_HOME: ${JAVA_HOME}" +readonly JDK_GRAALVM_VAR_NAME="JDK${JAVA_VERSION}_GRAALVM" +readonly JDK_GRAALVM="${!JDK_GRAALVM_VAR_NAME}" +echo "The JDK distribution for running Gradle is" +echo "$("${JAVA_HOME}"/bin/java --version)" +echo "The Java SE version for the Gradle toolchain is ${JAVA_VERSION}" +echo "The GraalVM JDK distribution expected to be found at \`${JDK_GRAALVM}\` by the Gradle toolchain functionality is" +echo "$("${JDK_GRAALVM}"/bin/java --version)" +echo "The Gradle version is" +./gradlew --version + +echo "Building and running the GraalVM native image app" +./gradlew -PincludeGraalvm -PjavaVersion=${JAVA_VERSION} -Dorg.mongodb.test.uri=${MONGODB_URI} :graalvm-native-image-app:nativeRun diff --git a/.evergreen/run-gssapi-auth-test.sh b/.evergreen/run-gssapi-auth-test.sh new file mode 100755 index 00000000000..aa131daeee0 --- /dev/null +++ b/.evergreen/run-gssapi-auth-test.sh @@ -0,0 +1,40 @@ +#!/bin/bash + +# Don't trace since the URI contains a password that shouldn't show up in the logs +set -o errexit # Exit the script with error if any of the commands fail + +# Supported/used environment variables: +# MONGODB_URI Set the URI, including username/password to use to connect to the server via PLAIN authentication mechanism +# JDK Set the version of java to be used. Java versions can be set from the java toolchain /opt/java +# "jdk5", "jdk6", "jdk7", "jdk8", "jdk9", "jdk11" +# KDC The KDC +# REALM The realm +# KEYTAB_BASE64 The BASE64-encoded keytab +# PROJECT_DIRECTORY The project directory +# LOGIN_CONTEXT_NAME The login context name to use to look up the GSSAPI Subject + +############################################ +# Main Program # +############################################ +RELATIVE_DIR_PATH="$(dirname "${BASH_SOURCE:-$0}")" +. "${RELATIVE_DIR_PATH}/setup-env.bash" +echo "Running GSSAPI authentication tests with login context name '${LOGIN_CONTEXT_NAME}'" + +echo ${KEYTAB_BASE64} | base64 -d > ${PROJECT_DIRECTORY}/.evergreen/drivers.keytab + +trap "rm ${PROJECT_DIRECTORY}/.evergreen/drivers.keytab; exit" EXIT HUP + +cat << EOF > .evergreen/java.login.drivers.config +${LOGIN_CONTEXT_NAME} { + com.sun.security.auth.module.Krb5LoginModule required + doNotPrompt=true useKeyTab=true keyTab="${PROJECT_DIRECTORY}/.evergreen/drivers.keytab" principal=drivers; +}; +EOF + +echo "Running tests with Java ${JAVA_VERSION}" +./gradlew -version +./gradlew -PjavaVersion=${JAVA_VERSION} --stacktrace --info \ +-Dorg.mongodb.test.uri=${MONGODB_URI} -Dorg.mongodb.test.gssapi.login.context.name=${LOGIN_CONTEXT_NAME} \ +-Pgssapi.enabled=true -Psun.security.krb5.debug=true -Pauth.login.config=file://${PROJECT_DIRECTORY}/.evergreen/java.login.drivers.config \ +-Pkrb5.kdc=${KDC} -Pkrb5.realm=${REALM} -Psun.security.krb5.debug=true \ +driver-core:test --tests GSSAPIAuthenticationSpecification --tests GSSAPIAuthenticatorSpecification --tests KerberosSubjectProviderTest diff --git a/.evergreen/run-kms-tls-tests.sh b/.evergreen/run-kms-tls-tests.sh new file mode 100755 index 00000000000..df3a38c0eec --- /dev/null +++ b/.evergreen/run-kms-tls-tests.sh @@ -0,0 +1,43 @@ +#!/bin/bash + +# Don't trace since the URI contains a password that shouldn't show up in the logs +set -o errexit # Exit the script with error if any of the commands fail + +# Supported/used environment variables: +# MONGODB_URI Set the suggested connection MONGODB_URI (including credentials and topology info) +# KMS_TLS_ERROR_TYPE Either "expired" or "invalidHostname" + +############################################ +# Main Program # +############################################ +RELATIVE_DIR_PATH="$(dirname "${BASH_SOURCE:-$0}")" +. "${RELATIVE_DIR_PATH}/setup-env.bash" +echo "Running KMS TLS tests" + +cp ${JAVA_HOME}/lib/security/cacerts mongo-truststore +${JAVA_HOME}/bin/keytool -importcert -trustcacerts -file ${DRIVERS_TOOLS}/.evergreen/x509gen/ca.pem -keystore mongo-truststore -storepass changeit -storetype JKS -noprompt + +export GRADLE_EXTRA_VARS="-Pssl.enabled=true -Pssl.trustStoreType=jks -Pssl.trustStore=`pwd`/mongo-truststore -Pssl.trustStorePassword=changeit" +export KMS_TLS_ERROR_TYPE=${KMS_TLS_ERROR_TYPE} + +./gradlew -version + +./gradlew --stacktrace --info ${GRADLE_EXTRA_VARS} -Dorg.mongodb.test.uri=${MONGODB_URI} \ + -Dorg.mongodb.test.kms.tls.error.type=${KMS_TLS_ERROR_TYPE} \ + driver-sync:cleanTest driver-sync:test --tests ClientSideEncryptionKmsTlsTest +first=$? +echo $first + +./gradlew --stacktrace --info ${GRADLE_EXTRA_VARS} -Dorg.mongodb.test.uri=${MONGODB_URI} \ + -Dorg.mongodb.test.kms.tls.error.type=${KMS_TLS_ERROR_TYPE} \ + driver-reactive-streams:cleanTest driver-reactive-streams:test --tests ClientSideEncryptionKmsTlsTest +second=$? +echo $second + +if [ $first -ne 0 ]; then + exit $first +elif [ $second -ne 0 ]; then + exit $second +else + exit 0 +fi diff --git a/.evergreen/run-kotlin-tests.sh b/.evergreen/run-kotlin-tests.sh new file mode 100755 index 00000000000..66acf68809a --- /dev/null +++ b/.evergreen/run-kotlin-tests.sh @@ -0,0 +1,37 @@ +#!/bin/bash + +set -o xtrace # Write all commands first to stderr +set -o errexit # Exit the script with error if any of the commands fail + + +AUTH=${AUTH:-noauth} +SSL=${SSL:-nossl} +MONGODB_URI=${MONGODB_URI:-} +TOPOLOGY=${TOPOLOGY:-standalone} + +############################################ +# Main Program # +############################################ +RELATIVE_DIR_PATH="$(dirname "${BASH_SOURCE:-$0}")" +. "${RELATIVE_DIR_PATH}/setup-env.bash" + + +if [ "$SSL" != "nossl" ]; then + echo -e "\nSSL support not configured for Kotlin tests" + exit 1 +fi + +if [ "$AUTH" != "noauth" ]; then + echo -e "\nAuth support not configured for Kotlin tests" + exit 1 +fi + +export MULTI_MONGOS_URI_SYSTEM_PROPERTY="-Dorg.mongodb.test.multi.mongos.uri=${MONGODB_URI}" + +./gradlew -version + +echo "Running Kotlin Unit Tests" +./gradlew :bson-kotlin:test :bson-kotlinx:test :driver-kotlin-sync:test :driver-kotlin-coroutine:test :driver-kotlin-extensions:test + +echo "Running Kotlin Integration Tests" +./gradlew :driver-kotlin-sync:integrationTest :driver-kotlin-coroutine:integrationTest -Dorg.mongodb.test.uri=${MONGODB_URI} ${MULTI_MONGOS_URI_SYSTEM_PROPERTY} diff --git a/.evergreen/run-load-balancer-tests.sh b/.evergreen/run-load-balancer-tests.sh new file mode 100755 index 00000000000..4ee1510a901 --- /dev/null +++ b/.evergreen/run-load-balancer-tests.sh @@ -0,0 +1,95 @@ +#!/bin/bash + +set -o xtrace # Write all commands first to stderr +set -o errexit # Exit the script with error if any of the commands fail + +# Supported/used environment variables: +# AUTH Set to enable authentication. Values are: "auth" / "noauth" (default) +# SSL Set to enable SSL. Values are "ssl" / "nossl" (default) +# JDK Set the version of java to be used. Java versions can be set from the java toolchain /opt/java +# SINGLE_MONGOS_LB_URI Set the URI pointing to a load balancer configured with a single mongos server +# MULTI_MONGOS_LB_URI Set the URI pointing to a load balancer configured with multiple mongos servers + +AUTH=${AUTH:-noauth} +SSL=${SSL:-nossl} +MONGODB_URI=${MONGODB_URI:-} + +############################################ +# Main Program # +############################################ +RELATIVE_DIR_PATH="$(dirname "${BASH_SOURCE:-$0}")" +. "${RELATIVE_DIR_PATH}/setup-env.bash" + +if [ "$SSL" != "nossl" ]; then + # We generate the keystore and truststore on every run with the certs in the drivers-tools repo + if [ ! -f client.pkc ]; then + openssl pkcs12 -CAfile ${DRIVERS_TOOLS}/.evergreen/x509gen/ca.pem -export -in ${DRIVERS_TOOLS}/.evergreen/x509gen/client.pem -out client.pkc -password pass:bithere + fi + + cp ${JAVA_HOME}/lib/security/cacerts mongo-truststore + ${JAVA_HOME}/bin/keytool -importcert -trustcacerts -file ${DRIVERS_TOOLS}/.evergreen/x509gen/ca.pem -keystore mongo-truststore -storepass changeit -storetype JKS -noprompt + + # We add extra gradle arguments for SSL + GRADLE_EXTRA_VARS="-Pssl.enabled=true -Pssl.keyStoreType=pkcs12 -Pssl.keyStore=$(pwd)/client.pkc -Pssl.keyStorePassword=bithere -Pssl.trustStoreType=jks -Pssl.trustStore=$(pwd)/mongo-truststore -Pssl.trustStorePassword=changeit" + SINGLE_MONGOS_LB_URI="${SINGLE_MONGOS_LB_URI}&ssl=true&sslInvalidHostNameAllowed=true" + MULTI_MONGOS_LB_URI="${MULTI_MONGOS_LB_URI}&ssl=true&sslInvalidHostNameAllowed=true" +fi + +echo "Running $AUTH tests over $SSL and connecting to $SINGLE_MONGOS_LB_URI" + +echo "Running tests with Java ${JAVA_VERSION}" +./gradlew -version + +# Disabling errexit so that both gradle command will run. +# Then we exit with non-zero if either of them exited with non-zero + +set +o errexit + +./gradlew -PjavaVersion=${JAVA_VERSION} \ + -Dorg.mongodb.test.uri=${SINGLE_MONGOS_LB_URI} \ + -Dorg.mongodb.test.multi.mongos.uri=${MULTI_MONGOS_LB_URI} \ + ${GRADLE_EXTRA_VARS} --stacktrace --info --continue driver-sync:test \ + --tests LoadBalancerTest \ + --tests RetryableReadsTest \ + --tests RetryableWritesTest \ + --tests VersionedApiTest \ + --tests ChangeStreamsTest \ + --tests UnifiedCrudTest \ + --tests UnifiedTransactionsTest \ + --tests InitialDnsSeedlistDiscoveryTest +first=$? +echo $first + +./gradlew -PjavaVersion=${JAVA_VERSION} \ + -Dorg.mongodb.test.uri=${SINGLE_MONGOS_LB_URI} \ + -Dorg.mongodb.test.multi.mongos.uri=${MULTI_MONGOS_LB_URI} \ + ${GRADLE_EXTRA_VARS} --stacktrace --info --continue driver-reactive-stream:test \ + --tests LoadBalancerTest \ + --tests RetryableReadsTest \ + --tests RetryableWritesTest \ + --tests VersionedApiTest \ + --tests ChangeStreamsTest \ + --tests UnifiedCrudTest \ + --tests UnifiedTransactionsTest \ + --tests InitialDnsSeedlistDiscoveryTest +second=$? +echo $second + +./gradlew -PjavaVersion=${JAVA_VERSION} \ + -Dorg.mongodb.test.uri=${SINGLE_MONGOS_LB_URI} \ + -Dorg.mongodb.test.multi.mongos.uri=${MULTI_MONGOS_LB_URI} \ + ${GRADLE_EXTRA_VARS} --stacktrace --info --continue driver-core:test \ + --tests CommandBatchCursorFunctionalTest \ + --tests AsyncCommandBatchCursorFunctionalTest +third=$? +echo $third + +if [ $first -ne 0 ]; then + exit $first +elif [ $second -ne 0 ]; then + exit $second +elif [ $third -ne 0 ]; then + exit $third +else + exit 0 +fi diff --git a/.evergreen/run-mongodb-aws-ecs-test.sh b/.evergreen/run-mongodb-aws-ecs-test.sh new file mode 100755 index 00000000000..63e4232839b --- /dev/null +++ b/.evergreen/run-mongodb-aws-ecs-test.sh @@ -0,0 +1,73 @@ +#!/bin/bash + +set -o xtrace +set -o errexit # Exit the script with error if any of the commands fail + +# Supported/used environment variables: +# MONGODB_URI Set the URI, including an optional username/password to use to connect to the server via MONGODB-AWS +# authentication mechanism +# JDK Set the version of java to be used. Java versions can be set from the java toolchain /opt/java +# "jdk5", "jdk6", "jdk7", "jdk8", "jdk9", "jdk11" + +############################################ +# Main Program # +############################################ + +if [[ -z "$1" ]]; then + echo "usage: $0 " + exit 1 +fi +MONGODB_URI="$1" + +echo "Running MONGODB-AWS ECS authentication tests" + +apt update + +if ! which java ; then + echo "Installing java..." + # Ubuntu 18.04 ca-certificates-java and opendjdk-17 bug work around + dpkg --purge --force-depends ca-certificates-java + apt install ca-certificates-java -y + apt install openjdk-17-jdk -y +fi + +if ! which git ; then + echo "installing git..." + apt install git -y +fi + +cd src + +RELATIVE_DIR_PATH="$(dirname "${BASH_SOURCE:-$0}")" +. "${RELATIVE_DIR_PATH}/setup-env.bash" + +./gradlew -version + +echo "Running tests..." +./gradlew -Dorg.mongodb.test.uri=${MONGODB_URI} -Dorg.mongodb.test.aws.credential.provider=awsSdkV2 --stacktrace --debug --info \ + driver-core:test --tests AwsAuthenticationSpecification +first=$? +echo $first + +./gradlew -Dorg.mongodb.test.uri=${MONGODB_URI} -Dorg.mongodb.test.aws.credential.provider=awsSdkV1 --stacktrace --debug --info \ + driver-core:test --tests AwsAuthenticationSpecification +second=$? +echo $second + +./gradlew -Dorg.mongodb.test.uri=${MONGODB_URI} -Dorg.mongodb.test.aws.credential.provider=builtIn --stacktrace --debug --info \ + driver-core:test --tests AwsAuthenticationSpecification +third=$? +echo $third + +if [ $first -ne 0 ]; then + exit $first +elif [ $second -ne 0 ]; then + exit $second +elif [ $third -ne 0 ]; then + exit $third +else + exit 0 +fi + + +cd - diff --git a/.evergreen/run-mongodb-aws-test.sh b/.evergreen/run-mongodb-aws-test.sh new file mode 100755 index 00000000000..3e1e6c3cf5d --- /dev/null +++ b/.evergreen/run-mongodb-aws-test.sh @@ -0,0 +1,31 @@ +#!/bin/bash + +set -o xtrace +set -o errexit # Exit the script with error if any of the commands fail + +# Supported/used environment variables: +# JDK Set the version of java to be used. Java versions can be set from the java toolchain /opt/java +# "jdk5", "jdk6", "jdk7", "jdk8", "jdk9", "jdk11" +# AWS_CREDENTIAL_PROVIDER "builtIn", 'awsSdkV1', 'awsSdkV2' +############################################ +# Main Program # +############################################ +RELATIVE_DIR_PATH="$(dirname "${BASH_SOURCE:-$0}")" +. "${RELATIVE_DIR_PATH}/setup-env.bash" + +echo "Running MONGODB-AWS authentication tests" + +# Handle credentials and environment setup. +. $DRIVERS_TOOLS/.evergreen/auth_aws/aws_setup.sh $1 + +# show test output +set -x + +echo "Running tests with Java ${JAVA_VERSION}" +./gradlew -version + +# As this script may be executed multiple times in a single task, with different values for MONGODB_URI, it's necessary +# to run cleanTest to ensure that the test actually executes each run +./gradlew -PjavaVersion="${JAVA_VERSION}" -Dorg.mongodb.test.uri="${MONGODB_URI}" \ +-Dorg.mongodb.test.aws.credential.provider="${AWS_CREDENTIAL_PROVIDER}" \ +--stacktrace --debug --info driver-core:cleanTest driver-core:test --tests AwsAuthenticationSpecification diff --git a/.evergreen/run-mongodb-oidc-test.sh b/.evergreen/run-mongodb-oidc-test.sh new file mode 100755 index 00000000000..778b8962c09 --- /dev/null +++ b/.evergreen/run-mongodb-oidc-test.sh @@ -0,0 +1,66 @@ +#!/bin/bash + +set +x # Disable debug trace +set -eu + +echo "Running MONGODB-OIDC authentication tests" +echo "OIDC_ENV $OIDC_ENV" +FULL_DESCRIPTION=$OIDC_ENV +if [ $OIDC_ENV == "test" ]; then + if [ -z "$DRIVERS_TOOLS" ]; then + echo "Must specify DRIVERS_TOOLS" + exit 1 + fi + source ${DRIVERS_TOOLS}/.evergreen/auth_oidc/secrets-export.sh + # java will not need to be installed, but we need to config + RELATIVE_DIR_PATH="$(dirname "${BASH_SOURCE:-$0}")" + source "${RELATIVE_DIR_PATH}/setup-env.bash" +elif [ $OIDC_ENV == "azure" ]; then + source ./env.sh +elif [ $OIDC_ENV == "gcp" ]; then + source ./secrets-export.sh +elif [ $OIDC_ENV == "k8s" ]; then + # Make sure K8S_VARIANT is set. + if [ -z "$K8S_VARIANT" ]; then + echo "Must specify K8S_VARIANT" + popd + exit 1 + fi + + FULL_DESCRIPTION="${OIDC_ENV} - ${K8S_VARIANT}" + # fix for git permissions issue: + git config --global --add safe.directory /tmp/test +else + echo "Unrecognized OIDC_ENV $OIDC_ENV" + exit 1 +fi + +if ! which java ; then + echo "Installing java..." + sudo apt install openjdk-17-jdk -y + echo "Installed java." +fi + +which java +export OIDC_TESTS_ENABLED=true + +# use admin credentials for tests +TO_REPLACE="mongodb://" +REPLACEMENT="mongodb://$OIDC_ADMIN_USER:$OIDC_ADMIN_PWD@" +ADMIN_URI=${MONGODB_URI/$TO_REPLACE/$REPLACEMENT} + +echo "Running gradle version" +./gradlew -version + +echo "Running gradle classes compile for driver-sync and driver-reactive-streams: ${FULL_DESCRIPTION}" +./gradlew --parallel --stacktrace --info \ + driver-sync:classes driver-reactive-streams:classes + +echo "Running OIDC authentication tests against driver-sync: ${FULL_DESCRIPTION}" +./gradlew -Dorg.mongodb.test.uri="$ADMIN_URI" \ + --stacktrace --debug --info \ + driver-sync:test --tests OidcAuthenticationProseTests --tests UnifiedAuthTest + +echo "Running OIDC authentication tests against driver-reactive-streams: ${FULL_DESCRIPTION}" +./gradlew -Dorg.mongodb.test.uri="$ADMIN_URI" \ + --stacktrace --debug --info driver-reactive-streams:test --tests OidcAuthenticationAsyncProseTests diff --git a/.evergreen/run-ocsp-test.sh b/.evergreen/run-ocsp-test.sh new file mode 100755 index 00000000000..048935926aa --- /dev/null +++ b/.evergreen/run-ocsp-test.sh @@ -0,0 +1,44 @@ +#!/usr/bin/env bash + +set -o xtrace +set -o errexit # Exit the script with error if any of the commands fail + +# Supported/used environment variables: +# JDK Set the version of java to be used. Java versions can be set from the java toolchain /opt/java +# "jdk5", "jdk6", "jdk7", "jdk8", "jdk9", "jdk11" + +OCSP_MUST_STAPLE=${OCSP_MUST_STAPLE:-} +OCSP_TLS_SHOULD_SUCCEED=${OCSP_TLS_SHOULD_SUCCEED:-} +RELATIVE_DIR_PATH="$(dirname "${BASH_SOURCE:-$0}")" +. "${RELATIVE_DIR_PATH}/setup-env.bash" + +############################################ +# Functions # +############################################ + +provision_ssl () { + echo "SSL !" + + cp ${JAVA_HOME}/lib/security/cacerts mongo-truststore + ${JAVA_HOME}/bin/keytool -import -trustcacerts -file ${CA_FILE} -keystore mongo-truststore -alias ca_ocsp -storepass changeit -noprompt + + # We add extra gradle arguments for SSL + export GRADLE_EXTRA_VARS="-Pssl.enabled=true -Pocsp.property=`pwd`/java-security-ocsp-property -Pssl.trustStoreType=jks -Pssl.trustStore=`pwd`/mongo-truststore -Pssl.trustStorePassword=changeit -Pssl.checkRevocation=true -Pclient.enableStatusRequestExtension=${OCSP_MUST_STAPLE} -Pclient.protocols=TLSv1.2 -Pocsp.tls.should.succeed=${OCSP_TLS_SHOULD_SUCCEED}" +} + +############################################ +# Main Program # +############################################ + +echo "Running OCSP tests" + + +# show test output +set -x + +provision_ssl + +echo "Running OCSP tests with Java ${JAVA_VERSION}" +./gradlew -version +./gradlew -PjavaVersion=${JAVA_VERSION} ${GRADLE_EXTRA_VARS} --stacktrace --debug --info driver-sync:test --tests OcspTest + diff --git a/.evergreen/run-perf-tests.sh b/.evergreen/run-perf-tests.sh new file mode 100755 index 00000000000..472e4348135 --- /dev/null +++ b/.evergreen/run-perf-tests.sh @@ -0,0 +1,30 @@ +#!/bin/bash + +set -o xtrace +set -o errexit + +rm -rf driver-performance-test-data +git clone https://github.com/mongodb-labs/driver-performance-test-data.git +cd driver-performance-test-data +tar xf extended_bson.tgz +tar xf parallel.tgz +tar xf single_and_multi_document.tgz +cd .. + +RELATIVE_DIR_PATH="$(dirname "${BASH_SOURCE:-$0}")" +. "${RELATIVE_DIR_PATH}/setup-env.bash" + +export TEST_PATH="${PROJECT_DIRECTORY}/driver-performance-test-data/" +export OUTPUT_FILE="${PROJECT_DIRECTORY}/results.json" + +if [ "${PROVIDER}" = "Netty" ]; then + TASK="driver-benchmarks:runNetty" +else + TASK="driver-benchmarks:run" +fi + +start_time=$(date +%s) +./gradlew -Dorg.mongodb.benchmarks.data=${TEST_PATH} -Dorg.mongodb.benchmarks.output=${OUTPUT_FILE} ${TASK} +end_time=$(date +%s) +elapsed_secs=$((end_time-start_time)) + diff --git a/.evergreen/run-plain-auth-test.sh b/.evergreen/run-plain-auth-test.sh new file mode 100755 index 00000000000..fbc965df4b4 --- /dev/null +++ b/.evergreen/run-plain-auth-test.sh @@ -0,0 +1,23 @@ +#!/bin/bash + +# Don't trace since the URI contains a password that shouldn't show up in the logs +set -o errexit # Exit the script with error if any of the commands fail + +# Supported/used environment variables: +# MONGODB_URI Set the URI, including username/password to use to connect to the server via PLAIN authentication mechanism +# JDK Set the version of java to be used. Java versions can be set from the java toolchain /opt/java +# "jdk5", "jdk6", "jdk7", "jdk8", "jdk9" + + +############################################ +# Main Program # +############################################ +RELATIVE_DIR_PATH="$(dirname "${BASH_SOURCE:-$0}")" +. "${RELATIVE_DIR_PATH}/setup-env.bash" + +echo "Running PLAIN authentication tests" + + +echo "Running tests with Java ${JAVA_VERSION}" +./gradlew -version +./gradlew -PjavaVersion=${JAVA_VERSION} -Dorg.mongodb.test.uri=${MONGODB_URI} --stacktrace --info driver-core:test --tests PlainAuthenticationSpecification diff --git a/.evergreen/run-reactive-streams-tck-tests.sh b/.evergreen/run-reactive-streams-tck-tests.sh new file mode 100755 index 00000000000..6bd5e91a4ec --- /dev/null +++ b/.evergreen/run-reactive-streams-tck-tests.sh @@ -0,0 +1,15 @@ +#!/bin/bash + +set -o xtrace # Write all commands first to stderr +set -o errexit # Exit the script with error if any of the commands fail + +############################################ +# Main Program # +############################################ +RELATIVE_DIR_PATH="$(dirname "${BASH_SOURCE:-$0}")" +. "${RELATIVE_DIR_PATH}/setup-env.bash" + +echo "Running Reactive Streams TCK tests with Java ${JAVA_VERSION}" + +./gradlew -version +./gradlew --stacktrace --info driver-reactive-streams:tckTest diff --git a/.evergreen/run-scala-tests.sh b/.evergreen/run-scala-tests.sh new file mode 100755 index 00000000000..02fd240d7c5 --- /dev/null +++ b/.evergreen/run-scala-tests.sh @@ -0,0 +1,35 @@ +#!/bin/bash + +set -o xtrace # Write all commands first to stderr +set -o errexit # Exit the script with error if any of the commands fail + + +AUTH=${AUTH:-noauth} +SSL=${SSL:-nossl} +MONGODB_URI=${MONGODB_URI:-} +TOPOLOGY=${TOPOLOGY:-standalone} + +############################################ +# Main Program # +############################################ +RELATIVE_DIR_PATH="$(dirname "${BASH_SOURCE:-$0}")" +. "${RELATIVE_DIR_PATH}/setup-env.bash" + + +if [ "$SSL" != "nossl" ]; then + echo -e "\nSSL support not configured for Scala tests" + exit 1 +fi + +if [ "$AUTH" != "noauth" ]; then + echo -e "\nAuth support not configured for Scala tests" + exit 1 +fi + +export MULTI_MONGOS_URI_SYSTEM_PROPERTY="-Dorg.mongodb.test.multi.mongos.uri=${MONGODB_URI}" + +echo "Running scala tests with Scala $SCALA" + +./gradlew -version +./gradlew -PjavaVersion=${JAVA_VERSION} -PscalaVersion=$SCALA --stacktrace --info scalaCheck \ + -Dorg.mongodb.test.uri=${MONGODB_URI} ${MULTI_MONGOS_URI_SYSTEM_PROPERTY} diff --git a/.evergreen/run-socket-tests.sh b/.evergreen/run-socket-tests.sh new file mode 100755 index 00000000000..df215c953e5 --- /dev/null +++ b/.evergreen/run-socket-tests.sh @@ -0,0 +1,53 @@ +#!/bin/bash + +set -o xtrace # Write all commands first to stderr +set -o errexit # Exit the script with error if any of the commands fail + +# Supported/used environment variables: +# AUTH Set to enable authentication. Values are: "auth" / "noauth" (default) +# MONGODB_URI Set the suggested connection MONGODB_URI (including credentials and topology info) +# TOPOLOGY Allows you to modify variables and the MONGODB_URI based on test topology +# Supported values: "server", "replica_set", "sharded_cluster" +# COMPRESSOR Set to enable compression. Values are "snappy" and "zlib" (default is no compression) +# JDK Set the version of java to be used. Java versions can be set from the java toolchain /opt/java +# "jdk5", "jdk6", "jdk7", "jdk8", "jdk9", "jdk11" + +AUTH=${AUTH:-noauth} +MONGODB_URI=${MONGODB_URI:-} +TOPOLOGY=${TOPOLOGY:-server} +COMPRESSOR=${COMPRESSOR:-} + +############################################ +# Main Program # +############################################ +RELATIVE_DIR_PATH="$(dirname "${BASH_SOURCE:-$0}")" +. "${RELATIVE_DIR_PATH}/setup-env.bash" + +SOCKET_REGEX='(.*)localhost:([0-9]+)?(.*)' +while [[ $MONGODB_URI =~ $SOCKET_REGEX ]]; do + MONGODB_URI="${BASH_REMATCH[1]}%2Ftmp%2Fmongodb-${BASH_REMATCH[2]}.sock${BASH_REMATCH[3]}" +done + +# Provision the correct connection string and set up SSL if needed +if [ "$TOPOLOGY" == "sharded_cluster" ]; then + + if [ "$AUTH" = "auth" ]; then + export MONGODB_URI="mongodb://bob:pwd123@%2Ftmp%2Fmongodb-27017.sock/?authSource=admin" + else + export MONGODB_URI="mongodb://%2Ftmp%2Fmongodb-27017.sock/" + fi +fi + +if [ "$COMPRESSOR" != "" ]; then + if [[ "$MONGODB_URI" == *"?"* ]]; then + export MONGODB_URI="${MONGODB_URI}&compressors=${COMPRESSOR}" + else + export MONGODB_URI="${MONGODB_URI}/?compressors=${COMPRESSOR}" + fi +fi + +echo "Running $AUTH tests over for $TOPOLOGY and connecting to $MONGODB_URI" + +echo "Running tests with Java ${JAVA_VERSION}" +./gradlew -version +./gradlew -PjavaVersion=${JAVA_VERSION} -Dorg.mongodb.test.uri=${MONGODB_URI} ${GRADLE_EXTRA_VARS} --stacktrace --info :driver-legacy:test :driver-sync:test diff --git a/.evergreen/run-socks5-tests.sh b/.evergreen/run-socks5-tests.sh new file mode 100755 index 00000000000..c6cbb812b86 --- /dev/null +++ b/.evergreen/run-socks5-tests.sh @@ -0,0 +1,87 @@ +#!/bin/bash + +set -o xtrace # Write all commands first to stderr +set -o errexit # Exit the script with error if any of the commands fail + +SSL=${SSL:-nossl} +SOCKS_AUTH=${SOCKS_AUTH:-noauth} +MONGODB_URI=${MONGODB_URI:-} +SOCKS5_SERVER_SCRIPT="$DRIVERS_TOOLS/.evergreen/socks5srv.py" +PYTHON_BINARY=${PYTHON_BINARY:-python3} +# Grab a connection string that only refers to *one* of the hosts in MONGODB_URI +FIRST_HOST=$(echo "$MONGODB_URI" | awk -F[/:,] '{print $4":"$5}') +# Use 127.0.0.1:12345 as the URL for the single host that we connect to, +# we configure the Socks5 proxy server script to redirect from this to FIRST_HOST +export MONGODB_URI_SINGLEHOST="mongodb://127.0.0.1:12345" + +if [ "${SSL}" = "ssl" ]; then + MONGODB_URI="${MONGODB_URI}&ssl=true&sslInvalidHostNameAllowed=true" + MONGODB_URI_SINGLEHOST="${MONGODB_URI_SINGLEHOST}/?ssl=true&sslInvalidHostNameAllowed=true" +fi + +# Compute path to socks5 fake server script in a way that works on Windows +if [ "Windows_NT" == "$OS" ]; then + SOCKS5_SERVER_SCRIPT=$(cygpath -m $DRIVERS_TOOLS) +fi + +RELATIVE_DIR_PATH="$(dirname "${BASH_SOURCE:-$0}")" +. "${RELATIVE_DIR_PATH}/setup-env.bash" + +############################################ +# Functions # +############################################ + +provision_ssl () { + # We generate the keystore and truststore on every run with the certs in the drivers-tools repo + if [ ! -f client.pkc ]; then + openssl pkcs12 -CAfile ${DRIVERS_TOOLS}/.evergreen/x509gen/ca.pem -export -in ${DRIVERS_TOOLS}/.evergreen/x509gen/client.pem -out client.pkc -password pass:bithere + fi + + cp ${JAVA_HOME}/lib/security/cacerts mongo-truststore + ${JAVA_HOME}/bin/keytool -importcert -trustcacerts -file ${DRIVERS_TOOLS}/.evergreen/x509gen/ca.pem -keystore mongo-truststore -storepass changeit -storetype JKS -noprompt + + # We add extra gradle arguments for SSL + export GRADLE_SSL_VARS="-Pssl.enabled=true -Pssl.keyStoreType=pkcs12 -Pssl.keyStore=`pwd`/client.pkc -Pssl.keyStorePassword=bithere -Pssl.trustStoreType=jks -Pssl.trustStore=`pwd`/mongo-truststore -Pssl.trustStorePassword=changeit" +} + + +run_socks5_proxy () { +if [ "$SOCKS_AUTH" == "auth" ]; then + "$PYTHON_BINARY" "$SOCKS5_SERVER_SCRIPT" --port 1080 --auth username:p4ssw0rd --map "127.0.0.1:12345 to $FIRST_HOST" & + SOCKS5_SERVER_PID_1=$! + trap "kill $SOCKS5_SERVER_PID_1" EXIT + else + "$PYTHON_BINARY" "$SOCKS5_SERVER_SCRIPT" --port 1080 --map "127.0.0.1:12345 to $FIRST_HOST" & + SOCKS5_SERVER_PID_1=$! + trap "kill $SOCKS5_SERVER_PID_1" EXIT +fi +} + +run_socks5_prose_tests () { +if [ "$SOCKS_AUTH" == "auth" ]; then + local AUTH_ENABLED="true" +else + local AUTH_ENABLED="false" +fi + +echo "Running Socks5 tests with Java ${JAVA_VERSION} over $SSL for $TOPOLOGY and connecting to $MONGODB_URI with socks auth enabled: $AUTH_ENABLED" +./gradlew -PjavaVersion=${JAVA_VERSION} -Dorg.mongodb.test.uri=${MONGODB_URI} \ + -Dorg.mongodb.test.uri.singleHost=${MONGODB_URI_SINGLEHOST} \ + -Dorg.mongodb.test.uri.proxyHost="127.0.0.1" \ + -Dorg.mongodb.test.uri.proxyPort="1080" \ + -Dorg.mongodb.test.uri.socks.auth.enabled=${AUTH_ENABLED} \ + ${GRADLE_SSL_VARS} \ + --stacktrace --info --continue \ + driver-sync:test \ + --tests "com.mongodb.client.Socks5ProseTest*" +} + +############################################ +# Main Program # +############################################ + +# Set up keystore/truststore +provision_ssl +./gradlew -version +run_socks5_proxy +run_socks5_prose_tests diff --git a/.evergreen/run-tests.sh b/.evergreen/run-tests.sh new file mode 100755 index 00000000000..10bd5bc107d --- /dev/null +++ b/.evergreen/run-tests.sh @@ -0,0 +1,141 @@ +#!/bin/bash + +set -o xtrace # Write all commands first to stderr +set -o errexit # Exit the script with error if any of the commands fail +set -o pipefail # Exit if any command in a pipe fails + +# Supported/used environment variables: +# AUTH Set to enable authentication. Values are: "auth" / "noauth" (default) +# SSL Set to enable SSL. Values are "ssl" / "nossl" (default) +# NETTY_SSL_PROVIDER The Netty TLS/SSL protocol provider. Ignored unless SSL is "ssl" and ASYNC_TRANSPORT is "netty". Values are "JDK", "OPENSSL", null (a.k.a. "" or '') (default). +# MONGODB_URI Set the suggested connection MONGODB_URI (including credentials and topology info) +# TOPOLOGY Allows you to modify variables and the MONGODB_URI based on test topology +# Supported values: "server", "replica_set", "sharded_cluster" +# COMPRESSOR Set to enable compression. Values are "snappy" and "zlib" (default is no compression) +# ASYNC_TRANSPORT Set the async transport. Values are "nio2" or "netty". +# JDK Set the version of java to be used. Java versions can be set from the java toolchain /opt/java +# SLOW_TESTS_ONLY Set to true to only run the slow tests +# AWS_ACCESS_KEY_ID The AWS access key identifier for client-side encryption +# AWS_SECRET_ACCESS_KEY The AWS secret access key for client-side encryption +# AWS_ACCESS_KEY_ID_AWS_KMS_NAMED The AWS access key identifier for client-side encryption's named KMS provider. +# AWS_SECRET_ACCESS_KEY_AWS_KMS_NAMED The AWS secret access key for client-side encryption's named KMS provider. +# AWS_TEMP_ACCESS_KEY_ID The temporary AWS access key identifier for client-side encryption +# AWS_TEMP_SECRET_ACCESS_KEY The temporary AWS secret access key for client-side encryption +# AWS_TEMP_SESSION_TOKEN The temporary AWS session token for client-side encryption +# AZURE_TENANT_ID The Azure tenant identifier for client-side encryption +# AZURE_CLIENT_ID The Azure client identifier for client-side encryption +# AZURE_CLIENT_SECRET The Azure client secret for client-side encryption +# GCP_EMAIL The GCP email for client-side encryption +# GCP_PRIVATE_KEY The GCP private key for client-side encryption +# AZUREKMS_KEY_VAULT_ENDPOINT The Azure key vault endpoint for integration tests +# AZUREKMS_KEY_NAME The Azure key name endpoint for integration tests + +AUTH=${AUTH:-noauth} +SSL=${SSL:-nossl} +MONGODB_URI=${MONGODB_URI:-} +TOPOLOGY=${TOPOLOGY:-server} +COMPRESSOR=${COMPRESSOR:-} +TESTS=${TESTS:-test} +SLOW_TESTS_ONLY=${SLOW_TESTS_ONLY:-false} + +if [ -n "${ASYNC_TRANSPORT}" ]; then + readonly JAVA_SYSPROP_ASYNC_TRANSPORT="-Dorg.mongodb.test.async.transport=${ASYNC_TRANSPORT}" +fi + +if [ "${SSL}" = "ssl" ] && [ "${ASYNC_TRANSPORT}" = "netty" ] && [ -n "${NETTY_SSL_PROVIDER}" ]; then + readonly JAVA_SYSPROP_NETTY_SSL_PROVIDER="-Dorg.mongodb.test.netty.ssl.provider=${NETTY_SSL_PROVIDER}" +fi + +RELATIVE_DIR_PATH="$(dirname "${BASH_SOURCE:-$0}")" +. "${RELATIVE_DIR_PATH}/setup-env.bash" + +############################################ +# Functions # +############################################ + +provision_ssl () { + # We generate the keystore and truststore on every run with the certs in the drivers-tools repo + if [ ! -f client.pkc ]; then + openssl pkcs12 -CAfile ${DRIVERS_TOOLS}/.evergreen/x509gen/ca.pem -export -in ${DRIVERS_TOOLS}/.evergreen/x509gen/client.pem -out client.pkc -password pass:bithere + fi + + cp ${JAVA_HOME}/lib/security/cacerts mongo-truststore + ${JAVA_HOME}/bin/keytool -importcert -trustcacerts -file ${DRIVERS_TOOLS}/.evergreen/x509gen/ca.pem -keystore mongo-truststore -storepass changeit -storetype JKS -noprompt + + # We add extra gradle arguments for SSL + export GRADLE_EXTRA_VARS="-Pssl.enabled=true -Pssl.keyStoreType=pkcs12 -Pssl.keyStore=`pwd`/client.pkc -Pssl.keyStorePassword=bithere -Pssl.trustStoreType=jks -Pssl.trustStore=`pwd`/mongo-truststore -Pssl.trustStorePassword=changeit" +} + +provision_multi_mongos_uri_for_ssl () { + # Arguments for auth + SSL + if [ "$AUTH" != "noauth" ] || [ "$TOPOLOGY" == "replica_set" ]; then + export MONGODB_URI="${MONGODB_URI}&ssl=true&sslInvalidHostNameAllowed=true" + export MULTI_MONGOS_URI="${MULTI_MONGOS_URI}&ssl=true&sslInvalidHostNameAllowed=true" + else + export MONGODB_URI="${MONGODB_URI}/?ssl=true&sslInvalidHostNameAllowed=true" + export MULTI_MONGOS_URI="${MULTI_MONGOS_URI}/?ssl=true&sslInvalidHostNameAllowed=true" + fi +} + +############################################ +# Main Program # +############################################ + +# Provision the correct connection string and set up SSL if needed +if [ "$TOPOLOGY" == "sharded_cluster" ]; then + if [ "$AUTH" = "auth" ]; then + export MULTI_MONGOS_URI="mongodb://bob:pwd123@localhost:27017,localhost:27018/?authSource=admin" + else + export MULTI_MONGOS_URI="${MONGODB_URI}" + fi + + if [ "$AUTH" = "auth" ]; then + export MONGODB_URI="mongodb://bob:pwd123@localhost:27017/?authSource=admin" + else + export MONGODB_URI="mongodb://localhost:27017" + fi +fi + +if [ "$COMPRESSOR" != "" ]; then + if [[ "$MONGODB_URI" == *"?"* ]]; then + export MONGODB_URI="${MONGODB_URI}&compressors=${COMPRESSOR}" + else + export MONGODB_URI="${MONGODB_URI}/?compressors=${COMPRESSOR}" + fi + + if [[ "$MULTI_MONGOS_URI" == *"?"* ]]; then + export MULTI_MONGOS_URI="${MULTI_MONGOS_URI}&compressors=${COMPRESSOR}" + else + export MULTI_MONGOS_URI="${MULTI_MONGOS_URI}/?compressors=${COMPRESSOR}" + fi +fi + +# Set up keystore/truststore regardless, as they are required for testing KMIP +provision_ssl + +if [ "$SSL" != "nossl" ]; then + provision_multi_mongos_uri_for_ssl +fi + +export MULTI_MONGOS_URI_SYSTEM_PROPERTY="-Dorg.mongodb.test.multi.mongos.uri=${MULTI_MONGOS_URI}" + +# For now it's sufficient to hard-code the API version to "1", since it's the only API version +if [ ! -z "$REQUIRE_API_VERSION" ]; then + export API_VERSION="-Dorg.mongodb.test.api.version=1" +fi + +echo "Running $AUTH tests over $SSL for $TOPOLOGY and connecting to $MONGODB_URI" + +echo "Running tests with Java ${JAVA_VERSION}" +./gradlew -version + +./gradlew -PjavaVersion=${JAVA_VERSION} -Dorg.mongodb.test.uri=${MONGODB_URI} \ + ${MULTI_MONGOS_URI_SYSTEM_PROPERTY} ${API_VERSION} ${GRADLE_EXTRA_VARS} \ + ${JAVA_SYSPROP_ASYNC_TRANSPORT} ${JAVA_SYSPROP_NETTY_SSL_PROVIDER} \ + -Dorg.mongodb.test.fle.on.demand.credential.test.failure.enabled=true \ + --stacktrace --info --continue ${TESTS} | tee -a logs.txt + +if grep -q 'LEAK:' logs.txt ; then + echo "Netty Leak detected, please inspect build log" + exit 1 +fi diff --git a/.evergreen/run-x509-auth-tests.sh b/.evergreen/run-x509-auth-tests.sh new file mode 100755 index 00000000000..93b23fca1ca --- /dev/null +++ b/.evergreen/run-x509-auth-tests.sh @@ -0,0 +1,56 @@ +#!/bin/bash + +# Exit the script with error if any of the commands fail +set -o errexit + +# Supported/used environment variables: +# JDK Set the version of java to be used. Java versions can be set from the java toolchain /opt/java +# ATLAS_X509_DEV Set the connection string for the Atlas X509 development cluster. +# ATLAS_X509_DEV_CERT_BASE64 Set the base64 encoded contents of a PEM file containing the client certificate (signed by the mongodb dev CA) and client private key for the X509 authentication on development cluster. +# ATLAS_X509_DEV_CERT_NOUSER_BASE64 Set the base64 encoded contents of a PEM file containing the client certificate (signed by the mongodb dev CA) and client private key for the X509 authentication on development cluster with the subject name that does not exist on the server/cluster. + +RELATIVE_DIR_PATH="$(dirname "${BASH_SOURCE:-$0}")" +. "${RELATIVE_DIR_PATH}/setup-env.bash" + +MONGODB_URI=${ATLAS_X509_DEV:-} +echo "$MONGODB_URI" +ATLAS_X509_DEV_CERT_BASE64=${ATLAS_X509_DEV_CERT_BASE64:-} +ATLAS_X509_DEV_CERT_NOUSER_BASE64=${ATLAS_X509_DEV_CERT_NOUSER_BASE64:-} + +############################################ +# Functions # +############################################ + +provision_keystores () { + # Base64 decode contents of a PEM holder for client certificate (signed by the mongodb dev CA) and private key + echo "${ATLAS_X509_DEV_CERT_BASE64}" | base64 --decode > ca_and_pk.pem + echo "${ATLAS_X509_DEV_CERT_NOUSER_BASE64}" | base64 --decode > ca_and_pk_no_user.pem + + # Build the pkcs12 (keystore). We include the leaf-only certificate (with public key) and private key in the keystore, + # assuming the signed certificate is already trusted by the Atlas as issuer is MongoDB dev CA. + echo "Creating PKCS12 keystore from ca_and_pk.pem" + openssl pkcs12 -export \ + -in ca_and_pk.pem \ + -out existing_user.p12 \ + -password pass:test + + echo "Creating PKCS12 keystore from ca_and_pk_no_user.pem" + openssl pkcs12 -export \ + -in ca_and_pk_no_user.pem \ + -out non_existing_user.p12 \ + -password pass:test +} + +############################################ +# Main Program # +############################################ +echo "Running X509 Authentication tests with Java ${JAVA_VERSION}" + +# Set up keystores for x509 authentication. +provision_keystores + +./gradlew -PjavaVersion=${JAVA_VERSION} -Dorg.mongodb.test.uri=${MONGODB_URI} --info --continue \ + -Dorg.mongodb.test.x509.auth.enabled=true \ + -Dorg.mongodb.test.x509.auth.keystore.location="$(pwd)" \ + driver-sync:test --tests X509AuthenticationTest \ + driver-reactive-streams:test --tests X509AuthenticationTest \ No newline at end of file diff --git a/.evergreen/setup-env.bash b/.evergreen/setup-env.bash new file mode 100644 index 00000000000..cae67cd65eb --- /dev/null +++ b/.evergreen/setup-env.bash @@ -0,0 +1,51 @@ +# Java configurations for evergreen + +export JDK8="/opt/java/jdk8" +export JDK11="/opt/java/jdk11" +export JDK17="/opt/java/jdk17" +export JDK21="/opt/java/jdk21" +# note that `JDK21_GRAALVM` is used in `run-graalvm-native-image-app.sh` +# by dynamically constructing the variable name +export JDK21_GRAALVM="/opt/java/jdk21-graalce" + +if [ -d "$JDK17" ]; then + export JAVA_HOME=$JDK17 +fi + +export JAVA_VERSION=${JAVA_VERSION:-17} + +echo "Java Configs:" +echo "Java Home: ${JAVA_HOME}" +echo "Java test version: ${JAVA_VERSION}" + +# Rename environment variables for AWS, Azure, and GCP +if [ -f secrets-export.sh ]; then + echo "Renaming secrets env variables" + . secrets-export.sh + + export AWS_ACCESS_KEY_ID=$FLE_AWS_ACCESS_KEY_ID + export AWS_SECRET_ACCESS_KEY=$FLE_AWS_SECRET_ACCESS_KEY + export AWS_DEFAULT_REGION=$FLE_AWS_DEFAULT_REGION + + export AWS_ACCESS_KEY_ID_AWS_KMS_NAMED=$FLE_AWS_KEY2 + export AWS_SECRET_ACCESS_KEY_AWS_KMS_NAMED=$FLE_AWS_SECRET2 + + export AWS_TEMP_ACCESS_KEY_ID=$CSFLE_AWS_TEMP_ACCESS_KEY_ID + export AWS_TEMP_SECRET_ACCESS_KEY=$CSFLE_AWS_TEMP_SECRET_ACCESS_KEY + export AWS_TEMP_SESSION_TOKEN=$CSFLE_AWS_TEMP_SESSION_TOKEN + + export AZURE_CLIENT_ID=$FLE_AZURE_CLIENTID + export AZURE_TENANT_ID=$FLE_AZURE_TENANTID + export AZURE_CLIENT_SECRET=$FLE_AZURE_CLIENTSECRET + + export GCP_EMAIL=$FLE_GCP_EMAIL + export GCP_PRIVATE_KEY=$FLE_GCP_PRIVATEKEY + + # Unset AWS_SESSION_TOKEN if it is empty + if [ -z "$AWS_SESSION_TOKEN" ];then + unset AWS_SESSION_TOKEN + fi + +else + echo "No secrets env variables found to rename" +fi diff --git a/.evergreen/ssdlc-report.sh b/.evergreen/ssdlc-report.sh new file mode 100755 index 00000000000..56d5957f5ab --- /dev/null +++ b/.evergreen/ssdlc-report.sh @@ -0,0 +1,100 @@ +#!/usr/bin/env bash + +set -eu + +# Supported/used environment variables: +# PRODUCT_NAME +# PRODUCT_VERSION +# EVERGREEN_VERSION_ID + +if [ -z "${PRODUCT_NAME}" ]; then + printf "\nPRODUCT_NAME must be set to a non-empty string\n" + exit 1 +fi +if [ -z "${PRODUCT_VERSION}" ]; then + printf "\nPRODUCT_VERSION must be set to a non-empty string\n" + exit 1 +fi +if [ -z "${EVERGREEN_VERSION_ID}" ]; then + printf "\EVERGREEN_VERSION_ID must be set to a non-empty string\n" + exit 1 +fi + +############################################ +# Main Program # +############################################ +RELATIVE_DIR_PATH="$(dirname "${BASH_SOURCE[0]:-$0}")" +source "${RELATIVE_DIR_PATH}/setup-env.bash" + +printf "\nCreating SSDLC reports\n" +printf "\nProduct name: %s\n" "${PRODUCT_NAME}" +printf "\nProduct version: %s\n" "${PRODUCT_VERSION}" + +declare -r SSDLC_PATH="${RELATIVE_DIR_PATH}/../build/ssdlc" +declare -r SSDLC_STATIC_ANALYSIS_REPORTS_PATH="${SSDLC_PATH}/static-analysis-reports" +mkdir "${SSDLC_PATH}" +mkdir "${SSDLC_STATIC_ANALYSIS_REPORTS_PATH}" + +declare -r EVERGREEN_PROJECT_NAME_PREFIX="${PRODUCT_NAME//-/_}" +declare -r EVERGREEN_BUILD_URL_PREFIX="https://spruce.mongodb.com/version" +declare -r GIT_TAG="r${PRODUCT_VERSION}" +GIT_COMMIT_HASH="$(git rev-list --ignore-missing -n 1 "${GIT_TAG}")" +set +e + GIT_BRANCH_DEFAULT="$(git branch -a --contains "${GIT_TAG}" | grep 'main$')" + GIT_BRANCH_PATCH="$(git branch -a --contains "${GIT_TAG}" | grep '\.x$')" +set -e +if [ -n "${GIT_BRANCH_DEFAULT}" ]; then + declare -r EVERGREEN_BUILD_URL="${EVERGREEN_BUILD_URL_PREFIX}/${EVERGREEN_PROJECT_NAME_PREFIX}_${GIT_COMMIT_HASH}" +elif [ -n "${GIT_BRANCH_PATCH}" ]; then + # strip out the patch version + declare -r EVERGREEN_PROJECT_NAME_SUFFIX="${PRODUCT_VERSION%.*}" + declare -r EVERGREEN_BUILD_URL="${EVERGREEN_BUILD_URL_PREFIX}/${EVERGREEN_PROJECT_NAME_PREFIX}_${EVERGREEN_PROJECT_NAME_SUFFIX}_${GIT_COMMIT_HASH}" +elif [[ "${PRODUCT_NAME}" == *'-snapshot' ]]; then + declare -r EVERGREEN_BUILD_URL="${EVERGREEN_BUILD_URL_PREFIX}/${EVERGREEN_VERSION_ID}" +else + printf "\nFailed to compute EVERGREEN_BUILD_URL\n" + exit 1 +fi +printf "\nEvergreen build URL: %s\n" "${EVERGREEN_BUILD_URL}" + +PRODUCT_RELEASE_CREATOR="$(git log --ignore-missing "${GIT_TAG}"^.."${GIT_TAG}" --simplify-by-decoration --pretty='format:%aN')" +printf "\nProduct release creator: %s\n" "${PRODUCT_RELEASE_CREATOR}" + +printf "\nCreating SpotBugs SARIF reports\n" +./gradlew -version +set +e + # This `gradlew` command is expected to exit with a non-zero exit status, + # because it reports all the findings that we normally explicitly exclude as "No Fix Needed"/"False Positive". + ./gradlew -PssdlcReport.enabled=true --continue -x test -x integrationTest -x spotlessApply check scalaCheck +set -e +printf "\nSpotBugs created the following SARIF reports\n" +IFS=$'\n' +declare -a SARIF_PATHS=($(find "${RELATIVE_DIR_PATH}/.." -path "*/spotbugs/*.sarif")) +unset IFS +for SARIF_PATH in "${SARIF_PATHS[@]}"; do + GRADLE_PROJECT_NAME="$(basename "$(dirname "$(dirname "$(dirname "$(dirname "${SARIF_PATH}")")")")")" + NEW_SARIF_PATH="${SSDLC_STATIC_ANALYSIS_REPORTS_PATH}/${GRADLE_PROJECT_NAME}_$(basename "${SARIF_PATH}")" + cp "${SARIF_PATH}" "${NEW_SARIF_PATH}" + printf "%s\n" "${NEW_SARIF_PATH}" +done + +printf "\nCreating SSDLC compliance report\n" +declare -r TEMPLATE_SSDLC_REPORT_PATH="${RELATIVE_DIR_PATH}/template_ssdlc_compliance_report.md" +declare -r SSDLC_REPORT_PATH="${SSDLC_PATH}/ssdlc_compliance_report.md" +cp "${TEMPLATE_SSDLC_REPORT_PATH}" "${SSDLC_REPORT_PATH}" +declare -a SED_EDIT_IN_PLACE_OPTION +if [[ "$OSTYPE" == "darwin"* ]]; then + SED_EDIT_IN_PLACE_OPTION=(-i '') +else + SED_EDIT_IN_PLACE_OPTION=(-i) +fi +sed "${SED_EDIT_IN_PLACE_OPTION[@]}" \ + -e "s/\${product_name}/${PRODUCT_NAME}/g" \ + -e "s/\${product_version}/${PRODUCT_VERSION}/g" \ + -e "s/\${report_date_utc}/$(date -u +%Y-%m-%d)/g" \ + -e "s/\${product_release_creator}/${PRODUCT_RELEASE_CREATOR}/g" \ + -e "s>\${evergreen_build_url}>${EVERGREEN_BUILD_URL}>g" \ + "${SSDLC_REPORT_PATH}" +printf "%s\n" "${SSDLC_REPORT_PATH}" + +printf "\n" diff --git a/.evergreen/static-checks.sh b/.evergreen/static-checks.sh new file mode 100755 index 00000000000..1accf5c1684 --- /dev/null +++ b/.evergreen/static-checks.sh @@ -0,0 +1,15 @@ +#!/bin/bash + +set -o xtrace # Write all commands first to stderr +set -o errexit # Exit the script with error if any of the commands fail + +############################################ +# Main Program # +############################################ +RELATIVE_DIR_PATH="$(dirname "${BASH_SOURCE[0]:-$0}")" +. "${RELATIVE_DIR_PATH}/setup-env.bash" + +echo "Compiling JVM drivers" + +./gradlew -version +./gradlew -PxmlReports.enabled=true --info -x test -x integrationTest -x spotlessApply clean check scalaCheck jar testClasses docs diff --git a/.evergreen/template_ssdlc_compliance_report.md b/.evergreen/template_ssdlc_compliance_report.md new file mode 100644 index 00000000000..adadc60fd71 --- /dev/null +++ b/.evergreen/template_ssdlc_compliance_report.md @@ -0,0 +1,67 @@ +# ${product_name} SSDLC compliance report + +This report is available at +. + + + + + + + + + + + + + + + + + + +
Product name${product_name}
Product version${product_version}
Release creator + ${product_release_creator} +

+ Refer to data in Papertrail for more details. + There is currently no official way to serve that data. +

+
Report date, UTC${report_date_utc}
+ +## Process document + +Blocked on . + +The MongoDB SSDLC policy is available at +. + +## Third-party dependency information + +There are no dependencies to report vulnerabilities of. +Our [SBOM](https://docs.devprod.prod.corp.mongodb.com/mms/python/src/sbom/silkbomb/docs/CYCLONEDX/) lite +is . + +## Static analysis findings + +The static analysis findings are available at +. +All the findings in the aforementioned reports +are either of the MongoDB status "False Positive" or "No Fix Needed", +because code that has any other findings cannot technically get into the product. + + may also be of interest. + +## Security testing results + +The testing results are available at +<${evergreen_build_url}>. + +See the driver security testing summary + +for the description of what is tested. + +## Signature information + +The product artifacts are signed. +The signatures can be verified by following instructions at +. diff --git a/.git-blame-ignore-revs b/.git-blame-ignore-revs new file mode 100644 index 00000000000..e829944e910 --- /dev/null +++ b/.git-blame-ignore-revs @@ -0,0 +1,11 @@ +# .git-blame-ignore-revs +# Checkstyle fixes +94780bc8b72c62d9bc09beaa9ac62b942debab5f +# Copyright fixes +0aa2ec20d5215c0ac727602dd2cd891c22c69ba8 +# Scala spotless changes +fd21430c967571ed172259cc4100f291257a9a01 +# IntelliJ automated code cleanup +d9aa6044e1a6b440bcb013c330497f2813484050 +# Remove `final` in catch clauses +4b3b48546fb0457e5c515ccfe8780e373ad7de5f diff --git a/.gitattributes b/.gitattributes new file mode 100644 index 00000000000..16dec8d22f4 --- /dev/null +++ b/.gitattributes @@ -0,0 +1,2 @@ +# Set default behaviour, in case users don't have core.autocrlf set. +* text=auto \ No newline at end of file diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS new file mode 100644 index 00000000000..28c26f58754 --- /dev/null +++ b/.github/CODEOWNERS @@ -0,0 +1,2 @@ +# Listing code owners is required by DRIVERS-3098 +* @mongodb/dbx-java \ No newline at end of file diff --git a/.github/dependabot.yml b/.github/dependabot.yml new file mode 100644 index 00000000000..80bd0c9bf01 --- /dev/null +++ b/.github/dependabot.yml @@ -0,0 +1,9 @@ +version: 2 +updates: + - package-ecosystem: "gitsubmodule" + directory: "/" + schedule: + interval: "weekly" + commit-message: + prefix: "build" + include: "scope" diff --git a/.github/workflows/bump-and-tag.sh b/.github/workflows/bump-and-tag.sh new file mode 100755 index 00000000000..0875db6bc59 --- /dev/null +++ b/.github/workflows/bump-and-tag.sh @@ -0,0 +1,22 @@ +#!/usr/bin/env bash +set -e + +if [ "$#" -ne 3 ]; then + echo "Usage: $0 " >&2 + exit 1 +fi + +CURRENT_VERSION=$1 +RELEASE_VERSION=$2 +NEXT_VERSION=$3 + +SCRIPT_DIR=$(dirname ${BASH_SOURCE[0]}) + +echo "Bump version in gradle.properties to ${RELEASE_VERSION}" +${SCRIPT_DIR}/bump-version.sh "${RELEASE_VERSION_WITHOUT_SUFFIX}-SNAPSHOT" "${RELEASE_VERSION}" + +echo "Create release tag for ${RELEASE_VERSION}" +git tag -a -m "${RELEASE_VERSION}" r${RELEASE_VERSION} + +echo "Bump to snapshot version for ${NEXT_VERSION}" +${SCRIPT_DIR}/bump-version.sh "${RELEASE_VERSION}" "${NEXT_VERSION}-SNAPSHOT" diff --git a/.github/workflows/bump-version.sh b/.github/workflows/bump-version.sh new file mode 100755 index 00000000000..eaa92163bfa --- /dev/null +++ b/.github/workflows/bump-version.sh @@ -0,0 +1,13 @@ +#!/usr/bin/env bash +set -e + +if [ "$#" -ne 2 ]; then + echo "Usage: $0 " >&2 + exit 1 +fi + +FROM_VERSION=$1 +TO_VERSION=$2 + +sed --in-place "s/version=${FROM_VERSION}/version=${TO_VERSION}/g" gradle.properties +git commit -m "Version: bump ${TO_VERSION}" gradle.properties diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml new file mode 100644 index 00000000000..005ac768f3b --- /dev/null +++ b/.github/workflows/release.yml @@ -0,0 +1,139 @@ +name: "Release New Version" +run-name: "Release ${{ inputs.version }}" + +on: + workflow_dispatch: + inputs: + version: + description: "The version to be released (e.g. 1.2.3)" + required: true + type: "string" + +jobs: + prepare-release: + environment: release + name: "Prepare release" + runs-on: ubuntu-latest + permissions: + # Write permission for id-token is necessary to generate a new token for the GitHub App + id-token: write + # Write permission for contents is to ensure we're allowed to push to the repository + contents: write + + steps: + - name: "Create release output" + run: echo '🎬 Release process for version ${{ env.RELEASE_VERSION }} started by @${{ github.triggering_actor }}' >> $GITHUB_STEP_SUMMARY + + - uses: mongodb-labs/drivers-github-tools/secure-checkout@v2 + with: + app_id: ${{ vars.APP_ID }} + private_key: ${{ secrets.APP_PRIVATE_KEY }} + + - name: "Store version numbers in env variables" + # The awk command to increase the version number was copied from + # StackOverflow: https://stackoverflow.com/a/61921674/3959933 + # Variables set here: + # RELEASE_VERSION: The version the deployment is expected to create + # RELEASE_VERSION_WITHOUT_SUFFIX: The version without any stability + # suffixes. Example: 5.2.0-beta0 => 5.2.0 + # NEXT_VERSION: The next version to be released. For pre-releases, the + # next version is a snapshot of the pre-release version. Examples: + # 5.2.0 => 5.2.1; 5.2.0-beta0 => 5.2.0 + # RELEASE_BRANCH: The name of the stable branch for this release series + # Example: 5.2.0 => 5.2.x + # Example: 5.2.0-beta1 => + run: | + echo RELEASE_VERSION=${{ inputs.version }} >> $GITHUB_ENV + echo RELEASE_VERSION_WITHOUT_SUFFIX=$(echo ${{ inputs.version }} | awk -F- '{print $1}') >> $GITHUB_ENV + if [[ "${{ inputs.version }}" =~ (alpha|beta|rc)[0-9]+$ ]]; then + echo NEXT_VERSION=$(echo ${{ inputs.version }} | awk -F- '{print $1}') >> $GITHUB_ENV + echo RELEASE_BRANCH=${{ github.ref_name }} >> $GITHUB_ENV + else + echo NEXT_VERSION=$(echo ${{ inputs.version }} | awk -F. -v OFS=. '{$NF += 1 ; print}') >> $GITHUB_ENV + echo RELEASE_BRANCH=$(echo ${{ inputs.version }} | awk -F. -v OFS=. '{$NF = "x" ; print}') >> $GITHUB_ENV + fi + + - name: "Ensure current snapshot version matches release version" + run: | + grep -q "version=${{ env.RELEASE_VERSION_WITHOUT_SUFFIX }}-SNAPSHOT" gradle.properties + if [[ $? != 0 ]]; then + echo '❌ Release failed: version in gradle.properties is not a snapshot for release version ${{ inputs.version }}' >> $GITHUB_STEP_SUMMARY + exit 1 + fi + + - name: "Ensure release tag does not already exist" + run: | + if [[ $(git tag -l r${{ env.RELEASE_VERSION }}) == r${{ env.RELEASE_VERSION }} ]]; then + echo '❌ Release failed: tag for version ${{ inputs.version }} already exists' >> $GITHUB_STEP_SUMMARY + exit 1 + fi + + # For patch releases (A.B.C where C != 0), we expect the release to be + # triggered from the A.B.x maintenance branch. We use the release version + # without suffixes to avoid mistakes when making pre-releases + - name: "Fail if patch release is created from wrong release branch" + if: ${{ !endsWith(env.RELEASE_VERSION_WITHOUT_SUFFIX, '.0') && env.RELEASE_BRANCH != github.ref_name }} + run: | + echo '❌ Release failed due to branch mismatch: expected ${{ inputs.version }} to be released from ${{ env.RELEASE_BRANCH }}, got ${{ github.ref_name }}' >> $GITHUB_STEP_SUMMARY + exit 1 + + # For non-patch releases (A.B.C where C == 0), we expect the release to + # be triggered from main or the A.B.x maintenance branch. This includes + # pre-releases for any non-patch releases, e.g. 5.2.0-beta1 + - name: "Fail if non-patch release is created from wrong release branch" + if: ${{ endsWith(env.RELEASE_VERSION_WITHOUT_SUFFIX, '.0') && env.RELEASE_BRANCH != github.ref_name && github.ref_name != 'main' }} + run: | + echo '❌ Release failed due to branch mismatch: expected ${{ inputs.version }} to be released from ${{ env.RELEASE_BRANCH }} or main, got ${{ github.ref_name }}' >> $GITHUB_STEP_SUMMARY + exit 1 + + # Set commit author information to the user that triggered the release workflow + - name: "Set git author information" + run: | + GITHUB_USER_NAME=$(gh api users/${{ github.actor }} --jq '.name') + GITHUB_USER_ID=$(gh api users/${{ github.actor }} --jq '.id') + git config user.name "${GITHUB_USER_NAME}" + git config user.email "${GITHUB_USER_ID}+${{ github.actor }}@users.noreply.github.com" + + # If a non-patch release is created from a branch other than its + # maintenance branch, create that branch from the current one and push it + # Pre-releases don't have this behaviour, so we can check the full release + # version including stability suffixes to exclude those + - name: "Create new release branch for non-patch release" + if: ${{ endsWith(env.RELEASE_VERSION, '.0') && env.RELEASE_BRANCH != github.ref_name }} + run: | + echo '🆕 Creating new release branch ${{ env.RELEASE_BRANCH }} from ${{ github.ref_name }}' >> $GITHUB_STEP_SUMMARY + git checkout -b ${{ env.RELEASE_BRANCH }} + NEXT_MINOR_VERSION=$(echo "${{ env.RELEASE_VERSION }}" | awk -F. -v OFS=. '{$2 += 1 ; $NF = 0 ; print}') + echo "➡️ Bumping version for ${{ github.ref_name }} branch to ${NEXT_MINOR_VERSION}" >> $GITHUB_STEP_SUMMARY + git checkout ${{ github.ref_name }} + .github/workflows/bump-version.sh "${{ env.RELEASE_VERSION_WITHOUT_SUFFIX }}-SNAPSHOT" "${NEXT_MINOR_VERSION}-SNAPSHOT" + git push origin ${{ github.ref_name }} + git checkout ${{ env.RELEASE_BRANCH }} + + # This step bumps version numbers in gradle.properties and creates git artifacts for the release + - name: "Bump version numbers and create release tag" + run: .github/workflows/bump-and-tag.sh "${{ env.RELEASE_VERSION_WITHOUT_SUFFIX }}" "${{ env.RELEASE_VERSION }}" "${{ env.NEXT_VERSION }}" + + - name: "Push release branch and tag" + run: | + git push origin ${{ env.RELEASE_BRANCH }} + git push origin r${{ env.RELEASE_VERSION }} + + - name: "Create draft release with generated changelog" + run: | + if [[ "${{ inputs.version }}" =~ (alpha|beta|rc) ]]; then + PRERELEASE="--prerelease --latest=false" + fi + echo "RELEASE_URL=$(\ + gh release create r${RELEASE_VERSION} \ + ${PRERELEASE} \ + --target ${{ env.RELEASE_BRANCH }} \ + --title "Java Driver ${{ env.RELEASE_VERSION }} ($(date '+%B %d, %Y'))" \ + --generate-notes \ + --draft\ + )" >> "$GITHUB_ENV" + + - name: "Set summary" + run: | + echo '🚀 Created tag and drafted release for version [${{ env.RELEASE_VERSION }}](${{ env.RELEASE_URL }})' >> $GITHUB_STEP_SUMMARY + echo '✍️ You may now update the release notes and publish the release when ready' >> $GITHUB_STEP_SUMMARY diff --git a/.gitignore b/.gitignore index 1c4a883bee2..6398e8490e8 100644 --- a/.gitignore +++ b/.gitignore @@ -1,58 +1,60 @@ -commit-* -.jsshell -.js*shell - -*.[oa] *~ -*.pyc -*CVS -*.so -*.d -*.class* -.cvsignore* -.svn -.project .#* .git *# -*.swp -.MQLshell -#os x stuff +# os x stuff *Thumbs.db* *.DS_Store -src/main/Foo.java -src/main/Eliot.java -settings.py -foo.java - -10gen.properties -logs -test-output +# Build artifacts build -crap -hs_err_* -doc -docs -bin target -include/jython/cachedir -mongo.jar -mongo-*.jar -bson.jar -bson-*.jar - -TAGS +out +mongo*.jar -#test local files -src/test/ed/webtests/webtest-local.bash +# Eclipse files +.classpath +.project +.settings +# Intellij IDEA files *.ipr -*.iml *.iws *.iml -.idea +*.idea +workspace.xml atlassian-ide-plugin.xml -out/* +# gradle +.gradle + +# code review +codereview.rc + +# evergreen +expansion.yml + +# local settings +**/gradle.properties +local.properties + +# jenv +.java-version + +#sdkman +.sdkmanrc + +# mongocryptd +**/mongocryptd*.pid + +# shell scripts +*.sh +!.evergreen/*.sh + +# security-sensitive files +*.gpg + +# bin build directories +**/bin + diff --git a/.gitmodules b/.gitmodules new file mode 100644 index 00000000000..a9ac62f04bb --- /dev/null +++ b/.gitmodules @@ -0,0 +1,3 @@ +[submodule "specifications"] + path = driver-core/src/test/resources/specifications + url = https://github.com/mongodb/specifications diff --git a/.travis.yml b/.travis.yml deleted file mode 100644 index d10e8d879c0..00000000000 --- a/.travis.yml +++ /dev/null @@ -1,23 +0,0 @@ -language: java - -jdk: - - oraclejdk7 - - openjdk6 - -notifications: - email: - recipients: - - jeff.yemin@10gen.com - - trisha.gee@10gen.com - - uladzimir.mihura@10gen.com - on_success: change - on_failure: always - flowdock: - secure: "Ziw1Be2tV0QAYuiYDrepfdNH/oBfNhnXFMji3AdRi6MePVpc6CtOOT/b9Fra\nQgMMrX3AHk/QIDo6QQx9/aVB3FS1fzOPGQkSsEZmiljZU7wZCct1sSSyttf/\nsRG0lyTnmgFNTHyTSDT3JbXAkyF/vJmG/JJJoBUZhmFxzR2fM0Q=" - -services: - - mongodb - -branches: - only: - - master diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 23f14a8abf0..88827db052f 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -2,40 +2,39 @@ Thank you for your interest in contributing to the MongoDB Java driver. -We are building this software together and strongly encourage contributions -from the community that are within the guidelines set forth below. +We are building this software together and strongly encourage contributions from the community that are within the guidelines set forth +below. Bug Fixes and New Features -------------------------- -Before starting to write code, look for existing [tickets] -(https://jira.mongodb.org/browse/JAVA) or [create one] -(https://jira.mongodb.org/secure/CreateIssue!default.jspa) -for your bug, issue, or feature request. This helps the community -avoid working on something that might not be of interest or which -has already been addressed. +Before starting to write code, look for existing [tickets](https://jira.mongodb.org/browse/JAVA) or +[create one](https://jira.mongodb.org/secure/CreateIssue!default.jspa) for your bug, issue, or feature request. This helps the community +avoid working on something that might not be of interest or which has already been addressed. Pull Requests ------------- -Pull requests should be made against the master (development) -branch and include relevant tests, if applicable. The driver follows -the Git-Flow branching model where the traditional master branch is -known as release and the master (default) branch is considered under -development. +Pull requests should generally be made against the master (default) branch and include relevant tests, if applicable. -Code should compile and tests should pass under all Java versions -which the driver currently supports. Currently the Java driver supports -a minimum version of Java 5. Please run 'ant test' to confirm. If your -tests modify code related to replica sets, please ensure that you run the -tests with a replica set where the primary is on port 27017. +Code should compile with the Java 9 compiler and tests should pass under all Java versions which the driver currently +supports. Currently the Java driver supports a minimum version of Java 8. Please run './gradlew test' to confirm. By default, running the +tests requires that you start a mongod server on localhost, listening on the default port and configured to run with +[`enableTestCommands`](https://www.mongodb.com/docs/manual/reference/parameters/#param.enableTestCommands), which may be set with the +`--setParameter enableTestCommands=1` command-line parameter. At minimum, please test against the latest release version of the MongoDB +server. -The results of pull request testing will be appended to the request. -If any tests do not pass, or relevant tests are not included the pull -request will not be considered. +The results of pull request testing will be appended to the request. If any tests do not pass, or relevant tests are not included, the +pull request will not be considered. Talk To Us ---------- -If you want to work on something or have questions / complaints please reach -out to us by creating a Question issue at (https://jira.mongodb.org/secure/CreateIssue!default.jspa). +If you have questions about using the driver, please reach out on the +[MongoDB Community Forums](https://www.mongodb.com/community/forums/tags/c/data/drivers/7/java). + +Thanks to all the people who have already contributed! + + + + diff --git a/History.md b/History.md deleted file mode 100644 index 863790fb5b7..00000000000 --- a/History.md +++ /dev/null @@ -1,99 +0,0 @@ - -2.7.3 / 2012-01-30 -================== - - * synchronized access to encoder/decoder - * JAVA-505: Made encoder creation work just like decoder creation - + using cached DBDecoder in most cases to avoid excessive memory allocation caused by too many instances of DefaultDBDecoder being created - * JAVA-505 / JAVA-421 - Regression in performance of Java Driver should be rolled back (GC Related) - -2.7.3RC1 / 2012-01-17 -================== - - * Remove defunct BSONInputTest - * JAVA-505 / JAVA-421 - Regression in performance of Java Driver should be rolled back (GC Related) - -2.7.2 / 2011-11-10 -================== - - * JAVA-469: java.net.NetworkInterface.getNetworkInterfaces may fail with IBM JVM, which prevents from using driver - * deprecated replica pair constructors. - - updated javadocs and removed replica pair javadocs in class doc. - * JAVA-428: Fixed an issue where read preferences not set on cursor - -2.7.1 / 2011-11-08 -================== - - * JAVA-467 - added _check call to getServerAddress if _it is null - * JAVA-467 - Moved variable calls to method to fix read preference hierarchy - * simplified getServerAddress method. - -2.7.0 / 2011-11-04 -=================== - - * Released Java Driver 2.7.0 - * See change notes from Release Candidate series - * Please report any bugs or issues to https://jira.mongodb.org/browse/JAVA - - -2.7.0-rc4 / 2011-11-03 -======================= - - * New Secondary Selection tests for Replica Sets introduced - * To correct a regression, make WriteConcern immutable (Otherwise you can mutate static presets like SAFE at runtime) * Reintroduced constructors which accept continueOnErrorForInsert args * To enable you to set continueOnErrorForInsert with the presets like SAFE, immutable "new WriteConcern like this with COEI changed" method added. - -2.7.0-rc3 / 2011-10-31 -======================= - - * changed if statement to improve readability. - * JAVA-462: GridFSInputFile does not close input streams when constructed by the driver (greenlaw110) - - Add closeStreamOnPersist option when creating GridFSInputFile - * JAVA-425 fixes - - attempt to clean up and standardize writeConcern - - throw exception if w is wrong type - - fix cast exception in case W is a String - * Documented continue on error better. - * Close inputstream of GridFSInputFile once chunk saved - * JAVA-461: the logic to spread requests around slaves may select a slave over latency limit - * Reset buffer when the object is checked out and before adding back. - * added MongoOptions test - * use the socket factory from the Mongo for ReplicaSetStatus connections - * added MongoOptions.copy - -2.7.0-rc2 / 2011-10-26 -======================== - - * JAVA-459: smooth the latency measurements (for secondary/slave server selection) - * JAVA-428: Fixed edge cases where slaveOK / ReadPreference.SECONDARY wasn't working - * JAVA-444: make ensureIndex first do a read on index collection to see if index exists * If an embedded field was referenced in an index a hard failure occurred, due to triggering of the 'no dots in key names' logic via the insert. Moved code to use the lower level API method which permits disabling of key checks. - * Introduced "bamboo-test" task which does NOT halt on failure to allow bamboo tests to complete and report properly (and various fixes) - * added unit test for x.y ensureIndex. - -2.7.0-rc1 / 2011-10-24 -======================= - - * JAVA-434: replaced isEmpty() with 1.5 compatible alternative - * JAVA-363: NPE in GridFS.validate - * JAVA-356: make JSONParseException public - * JAVA-413: intern dbref.ns - * JAVA-444: query before insert on index creation. - * JAVA-404: slaveOk support for inline mapreduce (routes to secondaries); changed CommandResult to include serverUsed, made readPref-Secondary set slaveOk query flag, toString/toJSON on Node/ReplicaSetStatus. - * Import javax.net in an OSGi environment. - * JAVA-428 - Support new ReadPreference semantics, deprecate SlaveOK - * Added a skip method to ensure continueOnInsertError test is only run on Server versions 2.0+ - * JAVA-448 - Tailable Cursor behavior while not in AWAIT mode - + Fixed non-await empty tailable cursor behavior to be more consistently inline with other drivers & user expectations. Instead of forcing a sleep of 500 milliseconds on "no data", we instead when tailable catch an empty cursor and return null instead. This should be more safely non blocking for users who need to roll their own event driven code, for which the sleep breaks logic. - * JAVA-439: Check keys to disallow dot in key names for embedded maps - * added getObjectId method. - * add partial query option flag - * JAVA-425 - Support MongoDB Server 2.0 getLastError Changes (j, w=string/number) - * JAVA-427 - Support ContinueOnError Flag for bulk insert - * JAVA-422 - Memoize field locations (and other calculated data) for LazyBSONObject - * JAVA-421 - LazyBSONObject exhibits certain behavioral breakages - * JAVA-420: LazyBSONObject does not handle DBRef - * Fix & cleanup of Javadoc comments (kevinsawicki) - * JAVA-365: Poor concurrency handling in DBApiLayer.doGetCollection - * JAVA-364: MapReduceOutput sometimes returns empty results in a replica set when SLAVE_OK=true - * JAVA-333: Allow bson decoder per operation (find/cursor) - * JAVA-323: When executing a command, only toplevel object returned should be a CommandResult (not sub-objects) - diff --git a/LICENSE.txt b/LICENSE.txt index bc097c6e4f2..261eeb9e9f8 100644 --- a/LICENSE.txt +++ b/LICENSE.txt @@ -186,7 +186,7 @@ same "printed page" as the copyright notice for easier identification within third-party archives. - Copyright (C) 2008-2013 10gen, Inc. + Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -199,5 +199,3 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. - - diff --git a/README.md b/README.md index eb1dc1db050..ef32f93306b 100644 --- a/README.md +++ b/README.md @@ -1,46 +1,145 @@ -## MongoDB Java Driver ## - * release notes: http://github.com/mongodb/mongo-java-driver/wiki/Release-Notes - * driver home: http://github.com/mongodb/mongo-java-driver - * mongodb home: http://www.mongodb.org/ - * javadoc: http://api.mongodb.org/java/ - * releases: http://central.maven.org/maven2/org/mongodb/mongo-java-driver/ - * snapshots: https://oss.sonatype.org/content/repositories/snapshots/org/mongodb/mongo-java-driver/ - -### Questions and Bug Reports - * mailing list: http://groups.google.com/group/mongodb-user - * jira: http://jira.mongodb.org/browse/JAVA - -### Build status: -[![Build Status](https://jenkins.10gen.com/job/mongo-java-driver/badge/icon)](https://jenkins.10gen.com/job/mongo-java-driver/) - -### Maintainers -* Jeff Yemin jeff.yemin@10gen.com -* Trisha Gee trisha.gee@10gen.com - -### Contributors: -* Scott Hernandez scott@10gen.com -* Ryan Nitz ryan@10gen.com -* Antoine Girbal antoine@10gen.com -* Brendan McAdams brendan@10gen.com -* Eliot Horowitz eliot@10gen.com -* Kristina Chodorow kristina@10gen.com -* Geir Magnusson geir@pobox.com -* Keith Branton mongoDBjira@branton.co.uk -* Dave Brosius dbrosius@mebigfatguy.com -* Hans Meiser hmeiser@example.com -* Benedikt Waldvogel mail@bwaldvogel.de -* William Shulman william.shulman@gmail.com -* Daniel Spilker mail@daniel-spilker.com -* Kyle Banker kylebanker@gmail.com -* Ville Lautanala ville.lautanala@nodeta.fi -* Dan Simpson dan.simpson@gmail.com -* Tim Nelson tnelly27@gmail.com -* Jon Hoffman jonhoffman@gmail.com -* Daniel Kunnath dan@gameattain.com -* Harry Heymann harryh@gmail.com -* Mike Dirolf mike@10gen.com -* Zsombor Gegesy gzsombor@gmail.com -* Richard Kreuter richard@10gen.com -* Matthew Foemmel git@foemmel.com -* Jim Dickinson jdickinson@shopwiki.com -* Jorge Ortiz jorge.ortiz@gmail.com +## Release Notes + +Release notes are available [here](https://github.com/mongodb/mongo-java-driver/releases). + +## Documentation + +Reference and API documentation for the Java driver is available [here](https://www.mongodb.com/docs/drivers/java/sync/current/). + +Reference and API documentation for the Kotlin driver is available [here](https://www.mongodb.com/docs/drivers/kotlin/coroutine/current/). + +Reference and API documentation for the Scala driver is available [here](https://www.mongodb.com/docs/languages/scala/scala-driver/current/). + +## Tutorials / Training + +For tutorials on how to use the MongoDB JVM Drivers, please reference [MongoDB University](https://learn.mongodb.com/). Additional tutorials, videos, and code examples using both the Java Driver and the Kotlin Driver can also be found in the [MongoDB Developer Center](https://www.mongodb.com/developer/). + +## Support / Feedback + +For issues with, questions about, or feedback for the MongoDB Java, Kotlin, and Scala drivers, please look into +our [support channels](https://www.mongodb.com/docs/manual/support/). Please +do not email any of the driver developers directly with issues or +questions - you're more likely to get an answer on the [MongoDB Community Forums](https://community.mongodb.com/tags/c/drivers-odms-connectors/7/java-driver) or [StackOverflow](https://stackoverflow.com/questions/tagged/mongodb+java). + +At a minimum, please include in your description the exact version of the driver that you are using. If you are having +connectivity issues, it's often also useful to paste in the line of code where you construct the MongoClient instance, +along with the values of all parameters that you pass to the constructor. You should also check your application logs for +any connectivity-related exceptions and post those as well. + +## Bugs / Feature Requests + +Think you’ve found a bug in the Java, Kotlin, or Scala drivers? Want to see a new feature in the drivers? Please open a +case in our issue management tool, JIRA: + +- [Create an account and login](https://jira.mongodb.org). +- Navigate to [the JAVA project](https://jira.mongodb.org/browse/JAVA). +- Click **Create Issue** - Please provide as much information as possible about the issue type, which driver you are using, and how to reproduce your issue. + +Bug reports in JIRA for the driver and the Core Server (i.e. SERVER) project are **public**. + +If you’ve identified a security vulnerability in a driver or any other +MongoDB project, please report it according to the [instructions here](https://www.mongodb.com/docs/manual/tutorial/create-a-vulnerability-report). + +## Versioning + +We follow [semantic versioning](https://semver.org/spec/v2.0.0.html) when releasing. + +#### @Alpha + +APIs marked with the `@Alpha` annotation are in the early stages of development, subject to incompatible changes, +or even removal, in a future release and may lack some intended features. An APIs bearing `@Alpha` annotation may +contain known issues affecting functionality, performance, and stability. They are also exempt from any compatibility +guarantees made by its containing library. + +It is inadvisable for applications to use Alpha APIs in production environments or for libraries +(which get included on users' CLASSPATHs, outside the library developers' control) to depend on these APIs. Alpha APIs +are intended for experimental purposes only. + +#### @Beta + +APIs marked with the `@Beta` annotation at the class or method level are subject to change. They can be modified in any way, or even +removed, at any time. If your code is a library itself (i.e. it is used on the CLASSPATH of users outside your own control), you should not +use beta APIs, unless you repackage them (e.g. by using shading, etc). + +#### @Deprecated + +APIs marked with the `@Deprecated` annotation at the class or method level will remain supported until the next major release but it is +recommended to stop using them. + +#### com.mongodb.internal.* + +All code inside the `com.mongodb.internal.*` packages is considered private API and should not be relied upon at all. It can change at any +time. + +## Binaries + +Binaries and dependency information for Maven, Gradle, Ivy and others can be found at +[https://central.sonatype.com/search](https://central.sonatype.com/search?namespace=org.mongodb&name=mongodb-driver-sync). + +Example for Maven: + +```xml + + org.mongodb + mongodb-driver-sync + x.y.z + +``` +Snapshot builds are also published regulary via Sonatype. + +Example for Maven: + +```xml + + + Central Portal Snapshots + central-portal-snapshots + https://central.sonatype.com/repository/maven-snapshots/ + + false + + + true + + + +``` + +## Build + +Java 17+ and git is required to build and compile the source. To build and test the driver: + +``` +$ git clone --recurse-submodules https://github.com/mongodb/mongo-java-driver.git +$ cd mongo-java-driver +$ ./gradlew check +``` + +The test suite requires mongod to be running with [`enableTestCommands`](https://www.mongodb.com/docs/manual/reference/parameters/#param.enableTestCommands), which may be set with the `--setParameter enableTestCommands=1` +command-line parameter: +``` +$ mkdir -p data/db +$ mongod --dbpath ./data/db --logpath ./data/mongod.log --port 27017 --logappend --fork --setParameter enableTestCommands=1 +``` + +If you encounter `"Too many open files"` errors when running the tests then you will need to increase +the number of available file descriptors prior to starting mongod as described in [https://www.mongodb.com/docs/manual/reference/ulimit/](https://www.mongodb.com/docs/manual/reference/ulimit/) + +## IntelliJ IDEA + +A couple of manual configuration steps are required to run the code in IntelliJ: + +- Java 17+ is required to build and compile the source. + +- **Error:** `java: cannot find symbol: class SNIHostName location: package javax.net.ssl`
+ **Fix:** Settings/Preferences > Build, Execution, Deployment > Compiler > Java Compiler - untick "Use '--release' option for + cross-compilation (Java 9 and later)" + +- **Error:** `java: package com.mongodb.internal.build does not exist`
+ **Fixes:** Any of the following:
+ - Run the `generateBuildConfig` task: eg: `./gradlew generateBuildConfig` or via Gradle > driver-core > Tasks > buildconfig > + generateBuildConfig + - Set `generateBuildConfig` to execute Before Build. via Gradle > Tasks > buildconfig > right click generateBuildConfig - click on + "Execute Before Build" + - Delegate all build actions to Gradle: Settings/Preferences > Build, Execution, Deployment > Build Tools > Gradle > Build and run + using/Run tests using - select "Gradle" diff --git a/THIRD-PARTY-NOTICES b/THIRD-PARTY-NOTICES new file mode 100644 index 00000000000..acca60ca973 --- /dev/null +++ b/THIRD-PARTY-NOTICES @@ -0,0 +1,202 @@ +The MongoDB Java Driver uses third-party libraries or other resources that may +be distributed under licenses different than the MongoDB Java Driver software. + +In the event that we accidentally failed to list a required notice, +please bring it to our attention through any of the ways detailed here: + + https://jira.mongodb.org/browse/JAVA + +The attached notices are provided for information only. + +For any licenses that require disclosure of source, sources are available at +https://github.com/mongodb/mongo-java-driver. + + +1) The following files: Immutable.java, NotThreadSafe.java, ThreadSafe.java + + Copyright (c) 2005 Brian Goetz and Tim Peierls + Released under the Creative Commons Attribution License (http://creativecommons.org/licenses/by/2.5) + Official home: http://www.jcip.net + + Any republication or derived work distributed in source code form + must include this copyright and license notice. + +2) The following files: Assertions.java + + Copyright (c) 2008-2014 Atlassian Pty Ltd + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +3) The following files: + + Alpha.java (formerly Beta.java) + Beta.java + + Copyright 2010 The Guava Authors + Copyright 2011 The Guava Authors + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +4) The following files: InstantCodec.java, Jsr310CodecProvider.java, LocalDateCodec.java, LocalDateTimeCodec.java, LocalTimeCodec.java + + Copyright 2008-present MongoDB, Inc. + Copyright 2018 Cezary Bartosiak + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +5) The following files: SaslPrep.java + + Copyright 2008-present MongoDB, Inc. + Copyright 2017 Tom Bentley + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +6) The following files (originally from https://github.com/marianobarrios/tls-channel): + + AsynchronousTlsChannel.java + AsynchronousTlsChannelGroup.java + BufferAllocator.java + BufferHolder.java + ByteBufferSet.java + ByteBufferUtil.java + ClientTlsChannel.java + DirectBufferAllocator.java + DirectBufferDeallocator.java + ExtendedAsynchronousByteChannel.java + HeapBufferAllocator.java + NeedsReadException.java + NeedsTaskException.java + NeedsWriteException.java + ServerTlsChannel.java + SniSslContextFactory.java + TlsChannel.java + TlsChannelBuilder.java + TlsChannelCallbackException.java + TlsChannelFlowControlException.java + TlsChannelImpl.java + TlsExplorer.java + TrackingAllocator.java + Util.java + WouldBlockException.java + + Copyright (c) [2015-2020] all contributors + + MIT License + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in all + copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + SOFTWARE. + +7) The following files (originally from https://github.com/google/guava): + + InetAddressUtils.java (formerly InetAddresses.java) + InetAddressUtilsTest.java (formerly InetAddressesTest.java) + + Copyright 2008-present MongoDB, Inc. + Copyright (C) 2008 The Guava Authors + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +8) The following files (originally from https://github.com/Litote/kmongo): + + Filters.kt + Properties.kt + KPropertyPath.kt + FiltersTest.kt + KPropertiesTest.kt + + Copyright 2008-present MongoDB, Inc. + Copyright (C) 2016/2022 Litote + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +9) The following files: BsonCodecUtils.kt + + Copyright 2008-present MongoDB, Inc. + Copyright 2017-2021 JetBrains s.r.o. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/bom/build.gradle.kts b/bom/build.gradle.kts new file mode 100644 index 00000000000..806c4f20950 --- /dev/null +++ b/bom/build.gradle.kts @@ -0,0 +1,155 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +import ProjectExtensions.configureMavenPublication +import groovy.util.Node +import groovy.util.NodeList + +plugins { + id("java-platform") + id("project.base") + id("conventions.publishing") + id("conventions.spotless") +} + +base.archivesName.set("mongodb-driver-bom") + +dependencies { + constraints { + api(project(":mongodb-crypt")) + api(project(":driver-core")) + api(project(":bson")) + api(project(":bson-record-codec")) + + api(project(":driver-sync")) + api(project(":driver-reactive-streams")) + + api(project(":bson-kotlin")) + api(project(":bson-kotlinx")) + api(project(":driver-kotlin-coroutine")) + api(project(":driver-kotlin-sync")) + api(project(":driver-kotlin-extensions")) + + api(project(":bson-scala")) + api(project(":driver-scala")) + } +} + +/* + * Handle the multiple versions of Scala we support as defined in `gradle.properties` + */ +val defaultScalaVersion: String = project.findProperty("defaultScalaVersion")!!.toString() +val scalaVersions: List? = project.findProperty("supportedScalaVersions")?.toString()?.split(",") + +require(!scalaVersions.isNullOrEmpty()) { + "Scala versions must be provided as a comma-separated list in the 'supportedScalaVersions' project property" +} + +scalaVersions?.forEach { version -> + require(version.matches(Regex("\\d\\.\\d{2}"))) { "Scala version '$version' must be in the format X.YY" } +} +/* + * Apply the Java Platform plugin to create the BOM + * Modify the generated POM to include all supported versions of Scala for driver-scala or bson-scala. + */ +configureMavenPublication { + components.findByName("javaPlatform")?.let { from(it) } + + pom { + name.set("bom") + description.set( + "This Bill of Materials POM simplifies dependency management when referencing multiple MongoDB Java Driver artifacts in projects using Gradle or Maven.") + + withXml { + val pomXml: Node = asNode() + + val dependencyManagementNode = pomXml.getNode("dependencyManagement") + require(dependencyManagementNode != null) { + " node not found in the generated BOM POM" + } + val dependenciesNode = dependencyManagementNode.getNode("dependencies") + require(dependenciesNode != null) { " node not found in the generated BOM POM" } + + val existingScalaDeps = + dependenciesNode + .children() + .map { it as Node } + .filter { it.getNode("artifactId")?.text()?.contains("scala") ?: false } + + existingScalaDeps.forEach { + val groupId: String = it.getNode("groupId")!!.text() + val originalArtifactId: String = it.getNode("artifactId")!!.text() + val artifactVersion: String = it.getNode("version")!!.text() + + // Add multiple versions with Scala suffixes for each Scala-related dependency. + scalaVersions!!.forEach { scalaVersion -> + if (scalaVersion != defaultScalaVersion) { + // Replace scala version suffix + val newArtifactId: String = originalArtifactId.replace(defaultScalaVersion, scalaVersion) + val dependencyNode = dependenciesNode.appendNode("dependency") + dependencyNode.appendNode("groupId", groupId) + dependencyNode.appendNode("artifactId", newArtifactId) + dependencyNode.appendNode("version", artifactVersion) + } + } + } + } + } +} + +/* + * Validate the BOM file. + */ +tasks.withType { + pom.withXml { + val pomXml: Node = asNode() + val dependenciesNode = pomXml.getNode("dependencyManagement").getNode("dependencies") + require(dependenciesNode!!.children().isNotEmpty()) { + "BOM must contain more then one element:\n$destination" + } + + dependenciesNode + .children() + .map { it as Node } + .forEach { + val groupId: String = it.getNode("groupId")!!.text() + require(groupId.startsWith("org.mongodb")) { + "BOM must contain only 'org.mongodb' dependencies, but found '$groupId':\n$destination" + } + + /* + * The and tags should be omitted in BOM dependencies. + * This ensures that consuming projects have the flexibility to decide whether a dependency is optional in their context. + * + * The BOM's role is to provide version information, not to dictate inclusion or exclusion of dependencies. + */ + require(it.getNode("scope") == null) { + "BOM must not contain elements in dependency:\n$destination" + } + require(it.getNode("optional") == null) { + "BOM must not contain elements in dependency:\n$destination" + } + } + } +} + +/** A node lookup helper. */ +private fun Node?.getNode(nodeName: String): Node? { + val found = this?.get(nodeName) + if (found is NodeList && found.isNotEmpty()) { + return found[0] as Node + } + return null +} diff --git a/bson-kotlin/build.gradle.kts b/bson-kotlin/build.gradle.kts new file mode 100644 index 00000000000..2cfd4413637 --- /dev/null +++ b/bson-kotlin/build.gradle.kts @@ -0,0 +1,39 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +import ProjectExtensions.configureJarManifest +import ProjectExtensions.configureMavenPublication + +plugins { id("project.kotlin") } + +base.archivesName.set("bson-kotlin") + +dependencies { + api(project(path = ":bson", configuration = "default")) + implementation(libs.kotlin.reflect) + + // Test case checks MongoClientSettings.getDefaultCodecRegistry() support + testImplementation(project(path = ":driver-core", configuration = "default")) +} + +configureMavenPublication { + pom { + name.set("BSON Kotlin") + description.set("The BSON Codec for Kotlin") + url.set("https://bsonspec.org") + } +} + +configureJarManifest { attributes["Automatic-Module-Name"] = "org.mongodb.bson.kotlin" } diff --git a/bson-kotlin/src/main/kotlin/org/bson/codecs/kotlin/ArrayCodec.kt b/bson-kotlin/src/main/kotlin/org/bson/codecs/kotlin/ArrayCodec.kt new file mode 100644 index 00000000000..10ea90aee1b --- /dev/null +++ b/bson-kotlin/src/main/kotlin/org/bson/codecs/kotlin/ArrayCodec.kt @@ -0,0 +1,128 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.bson.codecs.kotlin + +import java.lang.reflect.ParameterizedType +import java.lang.reflect.Type +import kotlin.reflect.KClass +import org.bson.BsonReader +import org.bson.BsonType +import org.bson.BsonWriter +import org.bson.codecs.Codec +import org.bson.codecs.DecoderContext +import org.bson.codecs.EncoderContext +import org.bson.codecs.configuration.CodecRegistry + +@Suppress("UNCHECKED_CAST") +internal data class ArrayCodec(private val kClass: KClass, private val codec: Codec) : Codec { + + companion object { + internal fun create( + kClass: KClass, + typeArguments: List, + codecRegistry: CodecRegistry + ): Codec { + assert(kClass.javaObjectType.isArray) { "$kClass must be an array type" } + val (valueClass, nestedTypes) = + if (typeArguments.isEmpty()) { + Pair(kClass.java.componentType.kotlin.javaObjectType as Class, emptyList()) + } else { + // Unroll the actual class and any type arguments + when (val pType = typeArguments[0]) { + is Class<*> -> Pair(pType as Class, emptyList()) + is ParameterizedType -> Pair(pType.rawType as Class, pType.actualTypeArguments.toList()) + else -> Pair(Object::class.java as Class, emptyList()) + } + } + val codec = + if (nestedTypes.isEmpty()) codecRegistry.get(valueClass) else codecRegistry.get(valueClass, nestedTypes) + return ArrayCodec(kClass, codec) + } + } + + private val isPrimitiveArray = kClass.java.componentType != kClass.java.componentType.kotlin.javaObjectType + + override fun encode(writer: BsonWriter, arrayValue: R, encoderContext: EncoderContext) { + writer.writeStartArray() + + boxed(arrayValue).forEach { + if (it == null) writer.writeNull() else encoderContext.encodeWithChildContext(codec, writer, it) + } + + writer.writeEndArray() + } + + override fun getEncoderClass(): Class = kClass.java + + override fun decode(reader: BsonReader, decoderContext: DecoderContext): R { + reader.readStartArray() + val data = ArrayList() + while (reader.readBsonType() != BsonType.END_OF_DOCUMENT) { + if (reader.currentBsonType == BsonType.NULL) { + reader.readNull() + data.add(null) + } else { + data.add(decoderContext.decodeWithChildContext(codec, reader)) + } + } + reader.readEndArray() + return unboxed(data) + } + + fun boxed(arrayValue: R): Iterable { + val boxedValue = + if (!isPrimitiveArray) { + (arrayValue as Array).asIterable() + } else if (arrayValue is BooleanArray) { + arrayValue.asIterable() + } else if (arrayValue is ByteArray) { + arrayValue.asIterable() + } else if (arrayValue is CharArray) { + arrayValue.asIterable() + } else if (arrayValue is DoubleArray) { + arrayValue.asIterable() + } else if (arrayValue is FloatArray) { + arrayValue.asIterable() + } else if (arrayValue is IntArray) { + arrayValue.asIterable() + } else if (arrayValue is LongArray) { + arrayValue.asIterable() + } else if (arrayValue is ShortArray) { + arrayValue.asIterable() + } else { + throw IllegalArgumentException("Unsupported array type ${arrayValue.javaClass}") + } + return boxedValue as Iterable + } + + private fun unboxed(data: ArrayList): R { + return when (kClass) { + BooleanArray::class -> (data as ArrayList).toBooleanArray() as R + ByteArray::class -> (data as ArrayList).toByteArray() as R + CharArray::class -> (data as ArrayList).toCharArray() as R + DoubleArray::class -> (data as ArrayList).toDoubleArray() as R + FloatArray::class -> (data as ArrayList).toFloatArray() as R + IntArray::class -> (data as ArrayList).toIntArray() as R + LongArray::class -> (data as ArrayList).toLongArray() as R + ShortArray::class -> (data as ArrayList).toShortArray() as R + else -> data.toArray(arrayOfNulls(data.size)) as R + } + } + + private fun arrayOfNulls(size: Int): Array { + return java.lang.reflect.Array.newInstance(codec.encoderClass, size) as Array + } +} diff --git a/bson-kotlin/src/main/kotlin/org/bson/codecs/kotlin/ArrayCodecProvider.kt b/bson-kotlin/src/main/kotlin/org/bson/codecs/kotlin/ArrayCodecProvider.kt new file mode 100644 index 00000000000..eccb5b88b27 --- /dev/null +++ b/bson-kotlin/src/main/kotlin/org/bson/codecs/kotlin/ArrayCodecProvider.kt @@ -0,0 +1,31 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.bson.codecs.kotlin + +import java.lang.reflect.Type +import org.bson.codecs.Codec +import org.bson.codecs.configuration.CodecProvider +import org.bson.codecs.configuration.CodecRegistry + +/** A Kotlin reflection based Codec Provider for data classes */ +public class ArrayCodecProvider : CodecProvider { + override fun get(clazz: Class, registry: CodecRegistry): Codec? = get(clazz, emptyList(), registry) + + override fun get(clazz: Class, typeArguments: List, registry: CodecRegistry): Codec? = + if (clazz.isArray) { + ArrayCodec.create(clazz.kotlin, typeArguments, registry) + } else null +} diff --git a/bson-kotlin/src/main/kotlin/org/bson/codecs/kotlin/DataClassCodec.kt b/bson-kotlin/src/main/kotlin/org/bson/codecs/kotlin/DataClassCodec.kt new file mode 100644 index 00000000000..85e705cb8c0 --- /dev/null +++ b/bson-kotlin/src/main/kotlin/org/bson/codecs/kotlin/DataClassCodec.kt @@ -0,0 +1,263 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.bson.codecs.kotlin + +import java.lang.reflect.ParameterizedType +import java.lang.reflect.Type +import kotlin.reflect.KClass +import kotlin.reflect.KClassifier +import kotlin.reflect.KFunction +import kotlin.reflect.KParameter +import kotlin.reflect.KProperty1 +import kotlin.reflect.KTypeParameter +import kotlin.reflect.KTypeProjection +import kotlin.reflect.full.createType +import kotlin.reflect.full.findAnnotation +import kotlin.reflect.full.findAnnotations +import kotlin.reflect.full.hasAnnotation +import kotlin.reflect.full.primaryConstructor +import kotlin.reflect.jvm.javaType +import kotlin.reflect.jvm.jvmErasure +import org.bson.BsonReader +import org.bson.BsonType +import org.bson.BsonWriter +import org.bson.codecs.Codec +import org.bson.codecs.DecoderContext +import org.bson.codecs.EncoderContext +import org.bson.codecs.RepresentationConfigurable +import org.bson.codecs.configuration.CodecConfigurationException +import org.bson.codecs.configuration.CodecRegistry +import org.bson.codecs.pojo.annotations.BsonCreator +import org.bson.codecs.pojo.annotations.BsonDiscriminator +import org.bson.codecs.pojo.annotations.BsonExtraElements +import org.bson.codecs.pojo.annotations.BsonId +import org.bson.codecs.pojo.annotations.BsonIgnore +import org.bson.codecs.pojo.annotations.BsonProperty +import org.bson.codecs.pojo.annotations.BsonRepresentation +import org.bson.diagnostics.Loggers + +internal data class DataClassCodec( + private val kClass: KClass, + private val primaryConstructor: KFunction, + private val propertyModels: List, +) : Codec { + + private val fieldNamePropertyModelMap = propertyModels.associateBy { it.fieldName } + private val propertyModelId: PropertyModel? = fieldNamePropertyModelMap[idFieldName] + + data class PropertyModel(val param: KParameter, val fieldName: String, val codec: Codec) + + override fun encode(writer: BsonWriter, value: T, encoderContext: EncoderContext) { + writer.writeStartDocument() + if (propertyModelId != null) { + encodeProperty(propertyModelId, value, writer, encoderContext) + } + propertyModels + .filter { it != propertyModelId } + .forEach { propertyModel -> encodeProperty(propertyModel, value, writer, encoderContext) } + writer.writeEndDocument() + } + + override fun getEncoderClass(): Class = kClass.java + + @Suppress("TooGenericExceptionCaught") + override fun decode(reader: BsonReader, decoderContext: DecoderContext): T { + val args: MutableMap = mutableMapOf() + fieldNamePropertyModelMap.values.forEach { args[it.param] = null } + + reader.readStartDocument() + while (reader.readBsonType() != BsonType.END_OF_DOCUMENT) { + val fieldName = reader.readName() + val propertyModel = fieldNamePropertyModelMap[fieldName] + if (propertyModel == null) { + reader.skipValue() + if (logger.isTraceEnabled) { + logger.trace("Found property not present in the DataClass: $fieldName") + } + } else if (propertyModel.param.type.isMarkedNullable && reader.currentBsonType == BsonType.NULL) { + reader.readNull() + } else { + try { + args[propertyModel.param] = decoderContext.decodeWithChildContext(propertyModel.codec, reader) + } catch (e: Exception) { + throw CodecConfigurationException( + "Unable to decode $fieldName for ${kClass.simpleName} data class.", e) + } + } + } + reader.readEndDocument() + + try { + return primaryConstructor.callBy(args) + } catch (e: Exception) { + throw CodecConfigurationException( + "Unable to invoke primary constructor of ${kClass.simpleName} data class", e) + } + } + + @Suppress("UNCHECKED_CAST") + private fun encodeProperty( + propertyModel: PropertyModel, + value: T, + writer: BsonWriter, + encoderContext: EncoderContext + ) { + value::class + .members + .firstOrNull { it.name == propertyModel.param.name } + ?.let { + val propertyValue = (it as KProperty1).get(value) + propertyValue?.let { pValue -> + writer.writeName(propertyModel.fieldName) + encoderContext.encodeWithChildContext(propertyModel.codec, writer, pValue) + } + } + } + + companion object { + + internal val logger = Loggers.getLogger("DataClassCodec") + private const val idFieldName = "_id" + + internal fun create( + kClass: KClass, + codecRegistry: CodecRegistry, + types: List = emptyList() + ): Codec? { + return if (kClass.isData) { + validateAnnotations(kClass) + val primaryConstructor = + kClass.primaryConstructor ?: throw CodecConfigurationException("No primary constructor for $kClass") + val typeMap = + types + .mapIndexed { i, k -> primaryConstructor.typeParameters[i].createType().classifier!! to k } + .toMap() + + val propertyModels = + primaryConstructor.parameters.map { kParameter -> + PropertyModel( + kParameter, computeFieldName(kParameter), getCodec(kParameter, typeMap, codecRegistry)) + } + return DataClassCodec(kClass, primaryConstructor, propertyModels) + } else { + null + } + } + + private fun validateAnnotations(kClass: KClass) { + codecConfigurationRequires(kClass.findAnnotation() == null) { + """Annotation 'BsonDiscriminator' is not supported on kotlin data classes, + | but found on ${kClass.simpleName}.""" + .trimMargin() + } + + codecConfigurationRequires(kClass.constructors.all { it.findAnnotations().isEmpty() }) { + """Annotation 'BsonCreator' is not supported on kotlin data classes, + | but found in ${kClass.simpleName}.""" + .trimMargin() + } + + kClass.primaryConstructor?.parameters?.map { param -> + codecConfigurationRequires(param.findAnnotations().isEmpty()) { + """Annotation 'BsonIgnore' is not supported in kotlin data classes, + | found on the parameter for ${param.name}.""" + .trimMargin() + } + codecConfigurationRequires(param.findAnnotations().isEmpty()) { + """Annotation 'BsonExtraElements' is not supported in kotlin data classes, + | found on the parameter for ${param.name}.""" + .trimMargin() + } + } + } + + private fun computeFieldName(parameter: KParameter): String { + return if (parameter.hasAnnotation()) { + idFieldName + } else { + parameter.findAnnotation()?.value ?: requireNotNull(parameter.name) + } + } + + @Suppress("UNCHECKED_CAST") + private fun getCodec( + kParameter: KParameter, + typeMap: Map, + codecRegistry: CodecRegistry + ): Codec { + return when (kParameter.type.classifier) { + is KClass<*> -> { + codecRegistry.getCodec( + kParameter, + (kParameter.type.classifier as KClass).javaObjectType, + kParameter.type.arguments + .mapNotNull { typeMap[it.type?.classifier] ?: computeJavaType(it) } + .toList()) + } + is KTypeParameter -> { + when (val pType = typeMap[kParameter.type.classifier] ?: kParameter.type.javaType) { + is Class<*> -> + codecRegistry.getCodec(kParameter, (pType as Class).kotlin.java, emptyList()) + is ParameterizedType -> + codecRegistry.getCodec( + kParameter, + (pType.rawType as Class).kotlin.javaObjectType, + pType.actualTypeArguments.toList()) + else -> null + } + } + else -> null + } + ?: throw CodecConfigurationException( + "Could not find codec for ${kParameter.name} with type ${kParameter.type}") + } + + private fun computeJavaType(kTypeProjection: KTypeProjection): Type? { + val javaType: Type = kTypeProjection.type?.javaType!! + return if (javaType == Any::class.java) { + kTypeProjection.type?.jvmErasure?.javaObjectType + } else javaType + } + + @Suppress("UNCHECKED_CAST") + private fun CodecRegistry.getCodec(kParameter: KParameter, clazz: Class, types: List): Codec { + val codec = + if (clazz.isArray) { + ArrayCodec.create(clazz.kotlin, types, this) + } else if (types.isEmpty()) { + this.get(clazz) + } else { + this.get(clazz, types) + } + + return kParameter.findAnnotation()?.let { + if (codec !is RepresentationConfigurable<*>) { + throw CodecConfigurationException( + "Codec for `${kParameter.name}` must implement RepresentationConfigurable" + + " to supportBsonRepresentation") + } + codec.withRepresentation(it.value) as Codec + } + ?: codec + } + + private fun codecConfigurationRequires(value: Boolean, lazyMessage: () -> String) { + if (!value) { + throw CodecConfigurationException(lazyMessage.invoke()) + } + } + } +} diff --git a/bson-kotlin/src/main/kotlin/org/bson/codecs/kotlin/DataClassCodecProvider.kt b/bson-kotlin/src/main/kotlin/org/bson/codecs/kotlin/DataClassCodecProvider.kt new file mode 100644 index 00000000000..962741033e1 --- /dev/null +++ b/bson-kotlin/src/main/kotlin/org/bson/codecs/kotlin/DataClassCodecProvider.kt @@ -0,0 +1,29 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.bson.codecs.kotlin + +import java.lang.reflect.Type +import org.bson.codecs.Codec +import org.bson.codecs.configuration.CodecProvider +import org.bson.codecs.configuration.CodecRegistry + +/** A Kotlin reflection based Codec Provider for data classes */ +public class DataClassCodecProvider : CodecProvider { + override fun get(clazz: Class, registry: CodecRegistry): Codec? = get(clazz, emptyList(), registry) + + override fun get(clazz: Class, typeArguments: List, registry: CodecRegistry): Codec? = + DataClassCodec.create(clazz.kotlin, registry, typeArguments) +} diff --git a/bson-kotlin/src/test/kotlin/org/bson/codecs/kotlin/DataClassCodecProviderTest.kt b/bson-kotlin/src/test/kotlin/org/bson/codecs/kotlin/DataClassCodecProviderTest.kt new file mode 100644 index 00000000000..7b9e0bbb2ba --- /dev/null +++ b/bson-kotlin/src/test/kotlin/org/bson/codecs/kotlin/DataClassCodecProviderTest.kt @@ -0,0 +1,92 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.bson.codecs.kotlin + +import com.mongodb.MongoClientSettings +import kotlin.test.assertEquals +import kotlin.test.assertNotNull +import kotlin.test.assertNull +import kotlin.test.assertTrue +import kotlin.time.Duration +import org.bson.BsonReader +import org.bson.BsonWriter +import org.bson.codecs.Codec +import org.bson.codecs.DecoderContext +import org.bson.codecs.EncoderContext +import org.bson.codecs.configuration.CodecConfigurationException +import org.bson.codecs.configuration.CodecRegistries.fromCodecs +import org.bson.codecs.configuration.CodecRegistries.fromProviders +import org.bson.codecs.configuration.CodecRegistries.fromRegistries +import org.bson.codecs.kotlin.samples.DataClassParameterized +import org.bson.codecs.kotlin.samples.DataClassWithJVMErasure +import org.bson.codecs.kotlin.samples.DataClassWithSimpleValues +import org.bson.conversions.Bson +import org.junit.jupiter.api.Test +import org.junit.jupiter.api.assertDoesNotThrow +import org.junit.jupiter.api.assertThrows + +class DataClassCodecProviderTest { + + @Test + fun shouldReturnNullForNonDataClass() { + assertNull(DataClassCodecProvider().get(String::class.java, Bson.DEFAULT_CODEC_REGISTRY)) + } + + @Test + fun shouldReturnDataClassCodecForDataClass() { + val provider = DataClassCodecProvider() + val codec = provider.get(DataClassWithSimpleValues::class.java, Bson.DEFAULT_CODEC_REGISTRY) + + assertNotNull(codec) + assertTrue { codec is DataClassCodec } + assertEquals(DataClassWithSimpleValues::class.java, codec.encoderClass) + } + + @Test + fun shouldRequireTypeArgumentsForDataClassParameterized() { + assertThrows { + DataClassCodecProvider().get(DataClassParameterized::class.java, Bson.DEFAULT_CODEC_REGISTRY) + } + } + + @Test + fun shouldReturnDataClassCodecUsingDefaultRegistry() { + val codec = MongoClientSettings.getDefaultCodecRegistry().get(DataClassWithSimpleValues::class.java) + + assertNotNull(codec) + assertTrue { codec is DataClassCodec } + assertEquals(DataClassWithSimpleValues::class.java, codec.encoderClass) + } + + @Test + fun shouldBeAbleHandleDataClassWithJVMErasure() { + + class DurationCodec : Codec { + override fun encode(writer: BsonWriter, value: Duration, encoderContext: EncoderContext) = TODO() + override fun getEncoderClass(): Class = Duration::class.java + override fun decode(reader: BsonReader, decoderContext: DecoderContext): Duration = TODO() + } + + val registry = + fromRegistries( + fromCodecs(DurationCodec()), fromProviders(DataClassCodecProvider()), Bson.DEFAULT_CODEC_REGISTRY) + + val codec = assertDoesNotThrow { registry.get(DataClassWithJVMErasure::class.java) } + assertNotNull(codec) + assertTrue { codec is DataClassCodec } + assertEquals(DataClassWithJVMErasure::class.java, codec.encoderClass) + } +} diff --git a/bson-kotlin/src/test/kotlin/org/bson/codecs/kotlin/DataClassCodecTest.kt b/bson-kotlin/src/test/kotlin/org/bson/codecs/kotlin/DataClassCodecTest.kt new file mode 100644 index 00000000000..c203a5d2358 --- /dev/null +++ b/bson-kotlin/src/test/kotlin/org/bson/codecs/kotlin/DataClassCodecTest.kt @@ -0,0 +1,593 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.bson.codecs.kotlin + +import kotlin.test.assertEquals +import org.bson.BsonDocument +import org.bson.BsonDocumentReader +import org.bson.BsonDocumentWriter +import org.bson.codecs.DecoderContext +import org.bson.codecs.EncoderContext +import org.bson.codecs.configuration.CodecConfigurationException +import org.bson.codecs.configuration.CodecRegistries.fromProviders +import org.bson.codecs.kotlin.samples.Box +import org.bson.codecs.kotlin.samples.DataClassEmbedded +import org.bson.codecs.kotlin.samples.DataClassLastItemDefaultsToNull +import org.bson.codecs.kotlin.samples.DataClassListOfDataClasses +import org.bson.codecs.kotlin.samples.DataClassListOfListOfDataClasses +import org.bson.codecs.kotlin.samples.DataClassListOfSealed +import org.bson.codecs.kotlin.samples.DataClassMapOfDataClasses +import org.bson.codecs.kotlin.samples.DataClassMapOfListOfDataClasses +import org.bson.codecs.kotlin.samples.DataClassNestedParameterizedTypes +import org.bson.codecs.kotlin.samples.DataClassParameterized +import org.bson.codecs.kotlin.samples.DataClassSealedA +import org.bson.codecs.kotlin.samples.DataClassSealedB +import org.bson.codecs.kotlin.samples.DataClassSealedC +import org.bson.codecs.kotlin.samples.DataClassSelfReferential +import org.bson.codecs.kotlin.samples.DataClassWithArrays +import org.bson.codecs.kotlin.samples.DataClassWithBooleanMapKey +import org.bson.codecs.kotlin.samples.DataClassWithBsonConstructor +import org.bson.codecs.kotlin.samples.DataClassWithBsonDiscriminator +import org.bson.codecs.kotlin.samples.DataClassWithBsonExtraElements +import org.bson.codecs.kotlin.samples.DataClassWithBsonId +import org.bson.codecs.kotlin.samples.DataClassWithBsonIgnore +import org.bson.codecs.kotlin.samples.DataClassWithBsonProperty +import org.bson.codecs.kotlin.samples.DataClassWithCollections +import org.bson.codecs.kotlin.samples.DataClassWithDataClassMapKey +import org.bson.codecs.kotlin.samples.DataClassWithDefaults +import org.bson.codecs.kotlin.samples.DataClassWithEmbedded +import org.bson.codecs.kotlin.samples.DataClassWithEnum +import org.bson.codecs.kotlin.samples.DataClassWithEnumMapKey +import org.bson.codecs.kotlin.samples.DataClassWithFailingInit +import org.bson.codecs.kotlin.samples.DataClassWithInvalidBsonRepresentation +import org.bson.codecs.kotlin.samples.DataClassWithListThatLastItemDefaultsToNull +import org.bson.codecs.kotlin.samples.DataClassWithMutableList +import org.bson.codecs.kotlin.samples.DataClassWithMutableMap +import org.bson.codecs.kotlin.samples.DataClassWithMutableSet +import org.bson.codecs.kotlin.samples.DataClassWithNativeArrays +import org.bson.codecs.kotlin.samples.DataClassWithNestedParameterized +import org.bson.codecs.kotlin.samples.DataClassWithNestedParameterizedDataClass +import org.bson.codecs.kotlin.samples.DataClassWithNullableGeneric +import org.bson.codecs.kotlin.samples.DataClassWithNulls +import org.bson.codecs.kotlin.samples.DataClassWithObjectIdAndBsonDocument +import org.bson.codecs.kotlin.samples.DataClassWithPair +import org.bson.codecs.kotlin.samples.DataClassWithParameterizedDataClass +import org.bson.codecs.kotlin.samples.DataClassWithSequence +import org.bson.codecs.kotlin.samples.DataClassWithSimpleValues +import org.bson.codecs.kotlin.samples.DataClassWithTriple +import org.bson.codecs.kotlin.samples.Key +import org.bson.conversions.Bson +import org.bson.types.ObjectId +import org.junit.jupiter.api.Test +import org.junit.jupiter.api.assertThrows + +class DataClassCodecTest { + private val numberLong = "\$numberLong" + private val emptyDocument = "{}" + + @Test + fun testDataClassWithSimpleValues() { + val expected = + """{"char": "c", "byte": 0, "short": 1, "int": 22, "long": {"$numberLong": "42"}, "float": 4.0, + | "double": 4.2, "boolean": true, "string": "String"}""" + .trimMargin() + val dataClass = DataClassWithSimpleValues('c', 0, 1, 22, 42L, 4.0f, 4.2, true, "String") + + assertRoundTrips(expected, dataClass) + } + + @Test + fun testDataClassWithComplexTypes() { + val expected = + """{ + | "listSimple": ["a", "b", "c", "d"], + | "listList": [["a", "b"], [], ["c", "d"]], + | "listMap": [{"a": 1, "b": 2}, {}, {"c": 3, "d": 4}], + | "mapSimple": {"a": 1, "b": 2, "c": 3, "d": 4}, + | "mapList": {"a": ["a", "b"], "b": [], "c": ["c", "d"]}, + | "mapMap" : {"a": {"a": 1, "b": 2}, "b": {}, "c": {"c": 3, "d": 4}} + |}""" + .trimMargin() + + val dataClass = + DataClassWithCollections( + listOf("a", "b", "c", "d"), + listOf(listOf("a", "b"), emptyList(), listOf("c", "d")), + listOf(mapOf("a" to 1, "b" to 2), emptyMap(), mapOf("c" to 3, "d" to 4)), + mapOf("a" to 1, "b" to 2, "c" to 3, "d" to 4), + mapOf("a" to listOf("a", "b"), "b" to emptyList(), "c" to listOf("c", "d")), + mapOf("a" to mapOf("a" to 1, "b" to 2), "b" to emptyMap(), "c" to mapOf("c" to 3, "d" to 4))) + + assertRoundTrips(expected, dataClass) + } + + @Test + fun testDataClassWithArrays() { + val expected = + """{ + | "arraySimple": ["a", "b", "c", "d"], + | "nestedArrays": [["e", "f"], [], ["g", "h"]], + | "arrayOfMaps": [{"A": ["aa"], "B": ["bb"]}, {}, {"C": ["cc", "ccc"]}], + |}""" + .trimMargin() + + val dataClass = + DataClassWithArrays( + arrayOf("a", "b", "c", "d"), + arrayOf(arrayOf("e", "f"), emptyArray(), arrayOf("g", "h")), + arrayOf( + mapOf("A" to arrayOf("aa"), "B" to arrayOf("bb")), emptyMap(), mapOf("C" to arrayOf("cc", "ccc")))) + + assertRoundTrips(expected, dataClass) + } + + @Test + fun testDataClassWithNativeArrays() { + val expected = + """{ + | "booleanArray": [true, false], + | "byteArray": [1, 2], + | "charArray": ["a", "b"], + | "doubleArray": [ 1.1, 2.2, 3.3], + | "floatArray": [1.0, 2.0, 3.0], + | "intArray": [10, 20, 30, 40], + | "longArray": [{ "$numberLong": "111" }, { "$numberLong": "222" }, { "$numberLong": "333" }], + | "shortArray": [1, 2, 3], + | "listOfArrays": [[true, false], [false, true]], + | "mapOfArrays": {"A": [1, 2], "B":[], "C": [3, 4]} + |}""" + .trimMargin() + + val dataClass = + DataClassWithNativeArrays( + booleanArrayOf(true, false), + byteArrayOf(1, 2), + charArrayOf('a', 'b'), + doubleArrayOf(1.1, 2.2, 3.3), + floatArrayOf(1.0f, 2.0f, 3.0f), + intArrayOf(10, 20, 30, 40), + longArrayOf(111, 222, 333), + shortArrayOf(1, 2, 3), + listOf(booleanArrayOf(true, false), booleanArrayOf(false, true)), + mapOf(Pair("A", intArrayOf(1, 2)), Pair("B", intArrayOf()), Pair("C", intArrayOf(3, 4)))) + + assertRoundTrips(expected, dataClass) + } + + @Test + fun testDataClassWithDefaults() { + val expectedDefault = + """{ + | "boolean": false, + | "string": "String", + | "listSimple": ["a", "b", "c"] + |}""" + .trimMargin() + + val defaultDataClass = DataClassWithDefaults() + assertRoundTrips(expectedDefault, defaultDataClass) + } + + @Test + fun testDataClassWithNulls() { + val dataClass = DataClassWithNulls(null, null, null) + assertRoundTrips(emptyDocument, dataClass) + + val withStoredNulls = BsonDocument.parse("""{"boolean": null, "string": null, "listSimple": null}""") + assertDecodesTo(withStoredNulls, dataClass) + } + + @Test + fun testDataClassWithListThatLastItemDefaultsToNull() { + val expected = + """{ + | "elements": [{"required": "required"}, {"required": "required"}], + |}""" + .trimMargin() + + val dataClass = + DataClassWithListThatLastItemDefaultsToNull( + listOf(DataClassLastItemDefaultsToNull("required"), DataClassLastItemDefaultsToNull("required"))) + assertRoundTrips(expected, dataClass) + } + + @Test + fun testDataClassWithNullableGenericsNotNull() { + val expected = + """{ + | "box": {"boxed": "String"} + |}""" + .trimMargin() + + val dataClass = DataClassWithNullableGeneric(Box("String")) + assertRoundTrips(expected, dataClass) + } + + @Test + fun testDataClassWithNullableGenericsNull() { + val expected = """{"box": {}}""" + val dataClass = DataClassWithNullableGeneric(Box(null)) + assertRoundTrips(expected, dataClass) + } + + @Test + fun testDataClassSelfReferential() { + val expected = + """{"name": "tree", + | "left": {"name": "L", "left": {"name": "LL"}, "right": {"name": "LR"}}, + | "right": {"name": "R", + | "left": {"name": "RL", + | "left": {"name": "RLL"}, + | "right": {"name": "RLR"}}, + | "right": {"name": "RR"}} + |}""" + .trimMargin() + val dataClass = + DataClassSelfReferential( + "tree", + DataClassSelfReferential("L", DataClassSelfReferential("LL"), DataClassSelfReferential("LR")), + DataClassSelfReferential( + "R", + DataClassSelfReferential("RL", DataClassSelfReferential("RLL"), DataClassSelfReferential("RLR")), + DataClassSelfReferential("RR"))) + + assertRoundTrips(expected, dataClass) + } + + @Test + fun testDataClassWithEmbedded() { + val expected = """{"id": "myId", "embedded": {"name": "embedded1"}}""" + val dataClass = DataClassWithEmbedded("myId", DataClassEmbedded("embedded1")) + + assertRoundTrips(expected, dataClass) + } + + @Test + fun testDataClassListOfDataClasses() { + val expected = """{"id": "myId", "nested": [{"name": "embedded1"}, {"name": "embedded2"}]}""" + val dataClass = + DataClassListOfDataClasses("myId", listOf(DataClassEmbedded("embedded1"), DataClassEmbedded("embedded2"))) + + assertRoundTrips(expected, dataClass) + } + + @Test + fun testDataClassListOfListOfDataClasses() { + val expected = """{"id": "myId", "nested": [[{"name": "embedded1"}], [{"name": "embedded2"}]]}""" + val dataClass = + DataClassListOfListOfDataClasses( + "myId", listOf(listOf(DataClassEmbedded("embedded1")), listOf(DataClassEmbedded("embedded2")))) + + assertRoundTrips(expected, dataClass) + } + + @Test + fun testDataClassMapOfDataClasses() { + val expected = """{"id": "myId", "nested": {"first": {"name": "embedded1"}, "second": {"name": "embedded2"}}}""" + val dataClass = + DataClassMapOfDataClasses( + "myId", mapOf("first" to DataClassEmbedded("embedded1"), "second" to DataClassEmbedded("embedded2"))) + + assertRoundTrips(expected, dataClass) + } + + @Test + fun testDataClassMapOfListOfDataClasses() { + val expected = + """{"id": "myId", "nested": {"first": [{"name": "embedded1"}], "second": [{"name": "embedded2"}]}}""" + val dataClass = + DataClassMapOfListOfDataClasses( + "myId", + mapOf( + "first" to listOf(DataClassEmbedded("embedded1")), + "second" to listOf(DataClassEmbedded("embedded2")))) + + assertRoundTrips(expected, dataClass) + } + + @Test + fun testDataClassWithParameterizedDataClass() { + val expected = + """{"id": "myId", + | "parameterizedDataClass": {"number": 2.0, "string": "myString", + | "parameterizedList": [{"name": "embedded1"}]} + |}""" + .trimMargin() + val dataClass = + DataClassWithParameterizedDataClass( + "myId", DataClassParameterized(2.0, "myString", listOf(DataClassEmbedded("embedded1")))) + + assertRoundTrips(expected, dataClass) + } + + @Test + fun testDataClassWithNestedParameterizedDataClass() { + val expected = + """{"id": "myId", + |"nestedParameterized": { + | "parameterizedDataClass": + | {"number": 4.2, "string": "myString", "parameterizedList": [{"name": "embedded1"}]}, + | "other": "myOtherString", "optionalOther": "myOptionalOtherString" + | } + |}""" + .trimMargin() + val dataClass = + DataClassWithNestedParameterizedDataClass( + "myId", + DataClassWithNestedParameterized( + DataClassParameterized(4.2, "myString", listOf(DataClassEmbedded("embedded1"))), + "myOtherString", + "myOptionalOtherString")) + + assertRoundTrips(expected, dataClass) + } + + @Test + fun testDataClassWithPair() { + val expected = """{"pair": {"first": "a", "second": 1}}""" + val dataClass = DataClassWithPair("a" to 1) + + assertRoundTrips(expected, dataClass) + } + + @Test + fun testDataClassWithTriple() { + val expected = """{"triple": {"first": "a", "second": 1, "third": 2.1}}""" + val dataClass = DataClassWithTriple(Triple("a", 1, 2.1)) + + assertRoundTrips(expected, dataClass) + } + + @Test + fun testDataClassNestedParameterizedTypes() { + val expected = + """{ + |"triple": { + | "first": "0", + | "second": {"first": 1, "second": {"first": 1.2, "second": {"first": "1.3", "second": 1.3}}}, + | "third": {"first": 2, "second": {"first": 2.1, "second": "two dot two"}, + | "third": {"first": "3.1", "second": {"first": 3.2, "second": "three dot two" }, + | "third": 3.3 }} + | } + |}""" + .trimMargin() + val dataClass = + DataClassNestedParameterizedTypes( + Triple( + "0", + Pair(1, Pair(1.2, Pair("1.3", 1.3))), + Triple(2, Pair(2.1, "two dot two"), Triple("3.1", Pair(3.2, "three dot two"), 3.3)))) + + assertRoundTrips(expected, dataClass) + } + + @Test + fun testDataClassWithMutableList() { + val expected = """{"value": ["A", "B", "C"]}""" + val dataClass = DataClassWithMutableList(mutableListOf("A", "B", "C")) + + assertRoundTrips(expected, dataClass) + } + + @Test + fun testDataClassWithMutableSet() { + val expected = """{"value": ["A", "B", "C"]}""" + val dataClass = DataClassWithMutableSet(mutableSetOf("A", "B", "C")) + + assertRoundTrips(expected, dataClass) + } + + @Test + fun testDataClassWithMutableMap() { + val expected = """{"value": {"a": "A", "b": "B", "c": "C"}}""" + val dataClass = DataClassWithMutableMap(mutableMapOf("a" to "A", "b" to "B", "c" to "C")) + + assertRoundTrips(expected, dataClass) + } + + @Test + fun testDataClassWithEnum() { + val expected = """{"value": "A"}""" + + val dataClass = DataClassWithEnum(Key.A) + assertRoundTrips(expected, dataClass) + } + + @Test + fun testDataClassWithEnumKeyMap() { + assertThrows("Unsupported map key") { + DataClassCodec.create(DataClassWithEnumMapKey::class, registry()) + } + } + + @Test + fun testDataClassWithSequence() { + assertThrows("Unsupported type Sequence") { + DataClassCodec.create(DataClassWithSequence::class, registry()) + } + } + + @Test + fun testDataClassWithBooleanKeyMap() { + assertThrows("Unsupported Map key type") { + DataClassCodec.create(DataClassWithBooleanMapKey::class, registry()) + } + } + + @Test + fun testDataClassWithDataClassKeyMap() { + assertThrows("Unsupported Map key type") { + DataClassCodec.create(DataClassWithDataClassMapKey::class, registry()) + } + } + + @Test + fun testDataClassEmbeddedWithExtraData() { + val expected = + """{ + | "extraA": "extraA", + | "name": "NAME", + | "extraB": "extraB" + |}""" + .trimMargin() + + val dataClass = DataClassEmbedded("NAME") + assertDecodesTo(BsonDocument.parse(expected), dataClass) + } + + @Test + fun testDataClassWithObjectIdAndBsonDocument() { + val subDocument = + """{ + | "_id": 1, + | "arrayEmpty": [], + | "arraySimple": [{"${'$'}numberInt": "1"}, {"${'$'}numberInt": "2"}, {"${'$'}numberInt": "3"}], + | "arrayComplex": [{"a": {"${'$'}numberInt": "1"}}, {"a": {"${'$'}numberInt": "2"}}], + | "arrayMixedTypes": [{"${'$'}numberInt": "1"}, {"${'$'}numberInt": "2"}, true, + | [{"${'$'}numberInt": "1"}, {"${'$'}numberInt": "2"}, {"${'$'}numberInt": "3"}], + | {"a": {"${'$'}numberInt": "2"}}], + | "arrayComplexMixedTypes": [{"a": {"${'$'}numberInt": "1"}}, {"a": "a"}], + | "binary": {"${'$'}binary": {"base64": "S2Fma2Egcm9ja3Mh", "subType": "00"}}, + | "boolean": true, + | "code": {"${'$'}code": "int i = 0;"}, + | "codeWithScope": {"${'$'}code": "int x = y", "${'$'}scope": {"y": {"${'$'}numberInt": "1"}}}, + | "dateTime": {"${'$'}date": {"${'$'}numberLong": "1577836801000"}}, + | "decimal128": {"${'$'}numberDecimal": "1.0"}, + | "documentEmpty": {}, + | "document": {"a": {"${'$'}numberInt": "1"}}, + | "double": {"${'$'}numberDouble": "62.0"}, + | "int32": {"${'$'}numberInt": "42"}, + | "int64": {"${'$'}numberLong": "52"}, + | "maxKey": {"${'$'}maxKey": 1}, + | "minKey": {"${'$'}minKey": 1}, + | "null": null, + | "objectId": {"${'$'}oid": "5f3d1bbde0ca4d2829c91e1d"}, + | "regex": {"${'$'}regularExpression": {"pattern": "^test.*regex.*xyz$", "options": "i"}}, + | "string": "the fox ...", + | "symbol": {"${'$'}symbol": "ruby stuff"}, + | "timestamp": {"${'$'}timestamp": {"t": 305419896, "i": 5}}, + | "undefined": {"${'$'}undefined": true} + | }""" + .trimMargin() + val expected = """{"objectId": {"${'$'}oid": "111111111111111111111111"}, "bsonDocument": $subDocument}""" + + val dataClass = + DataClassWithObjectIdAndBsonDocument(ObjectId("111111111111111111111111"), BsonDocument.parse(subDocument)) + assertRoundTrips(expected, dataClass) + } + + @Test + fun testDataClassSealed() { + val dataClassA = DataClassSealedA("string") + val dataClassB = DataClassSealedB(1) + val dataClassC = DataClassSealedC("String") + + val expectedDataClassSealedA = """{"a": "string"}""" + assertRoundTrips(expectedDataClassSealedA, dataClassA) + + val expectedDataClassSealedB = """{"b": 1}""" + assertRoundTrips(expectedDataClassSealedB, dataClassB) + + val expectedDataClassSealedC = """{"c": "String"}""" + assertRoundTrips(expectedDataClassSealedC, dataClassC) + + assertThrows("No Codec for DataClassSealed") { + DataClassCodec.create(DataClassListOfSealed::class, registry()) + } + } + + @Test + fun testDataFailures() { + assertThrows("Missing data") { + val codec = DataClassCodec.create(DataClassWithSimpleValues::class, registry()) + codec?.decode(BsonDocumentReader(BsonDocument()), DecoderContext.builder().build()) + } + + assertThrows("Invalid types") { + val data = + BsonDocument.parse( + """{"char": 123, "short": "2", "int": 22, "long": "ok", "float": true, "double": false, + | "boolean": "true", "string": 99}""" + .trimMargin()) + val codec = DataClassCodec.create(DataClassWithSimpleValues::class, registry()) + codec?.decode(BsonDocumentReader(data), DecoderContext.builder().build()) + } + + assertThrows("Invalid complex types") { + val data = BsonDocument.parse("""{"_id": "myId", "embedded": 123}""") + val codec = DataClassCodec.create(DataClassWithEmbedded::class, registry()) + codec?.decode(BsonDocumentReader(data), DecoderContext.builder().build()) + } + + assertThrows("Failing init") { + val data = BsonDocument.parse("""{"id": "myId"}""") + val codec = DataClassCodec.create(DataClassWithFailingInit::class, registry()) + codec?.decode(BsonDocumentReader(data), DecoderContext.builder().build()) + } + } + + @Test + fun testSupportedAnnotations() { + assertRoundTrips("""{"_id": "a"}""", DataClassWithBsonId("a")) + assertRoundTrips("""{"_id": "a"}""", DataClassWithBsonProperty("a")) + } + + @Test + fun testInvalidAnnotations() { + assertThrows { + DataClassCodec.create(DataClassWithBsonDiscriminator::class, registry()) + } + assertThrows { + DataClassCodec.create(DataClassWithBsonConstructor::class, registry()) + } + assertThrows { DataClassCodec.create(DataClassWithBsonIgnore::class, registry()) } + assertThrows { + DataClassCodec.create(DataClassWithBsonExtraElements::class, registry()) + } + assertThrows { + DataClassCodec.create(DataClassWithInvalidBsonRepresentation::class, registry()) + } + } + + private fun assertRoundTrips(expected: String, value: T) { + assertDecodesTo(assertEncodesTo(expected, value), value) + } + + @Suppress("UNCHECKED_CAST") + private fun assertEncodesTo(json: String, value: T): BsonDocument { + val expected = BsonDocument.parse(json) + val codec: DataClassCodec = DataClassCodec.create(value::class, registry()) as DataClassCodec + val document = BsonDocument() + val writer = BsonDocumentWriter(document) + + codec.encode(writer, value, EncoderContext.builder().build()) + assertEquals(expected, document) + if (expected.contains("_id")) { + assertEquals("_id", document.firstKey) + } + return document + } + + @Suppress("UNCHECKED_CAST") + private fun assertDecodesTo(value: BsonDocument, expected: T) { + val codec: DataClassCodec = DataClassCodec.create(expected::class, registry()) as DataClassCodec + val decoded: T = codec.decode(BsonDocumentReader(value), DecoderContext.builder().build()) + + assertEquals(expected, decoded) + } + + private fun registry() = fromProviders(ArrayCodecProvider(), DataClassCodecProvider(), Bson.DEFAULT_CODEC_REGISTRY) +} diff --git a/bson-kotlin/src/test/kotlin/org/bson/codecs/kotlin/samples/DataClasses.kt b/bson-kotlin/src/test/kotlin/org/bson/codecs/kotlin/samples/DataClasses.kt new file mode 100644 index 00000000000..77483cc9ee7 --- /dev/null +++ b/bson-kotlin/src/test/kotlin/org/bson/codecs/kotlin/samples/DataClasses.kt @@ -0,0 +1,258 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.bson.codecs.kotlin.samples + +import kotlin.time.Duration +import org.bson.BsonDocument +import org.bson.BsonMaxKey +import org.bson.BsonType +import org.bson.codecs.pojo.annotations.BsonCreator +import org.bson.codecs.pojo.annotations.BsonDiscriminator +import org.bson.codecs.pojo.annotations.BsonExtraElements +import org.bson.codecs.pojo.annotations.BsonId +import org.bson.codecs.pojo.annotations.BsonIgnore +import org.bson.codecs.pojo.annotations.BsonProperty +import org.bson.codecs.pojo.annotations.BsonRepresentation +import org.bson.types.ObjectId + +data class DataClassWithSimpleValues( + val char: Char, + val byte: Byte, + val short: Short, + val int: Int, + val long: Long, + val float: Float, + val double: Double, + val boolean: Boolean, + val string: String +) + +data class DataClassWithCollections( + val listSimple: List, + val listList: List>, + val listMap: List>, + val mapSimple: Map, + val mapList: Map>, + val mapMap: Map> +) + +data class DataClassWithArrays( + val arraySimple: Array, + val nestedArrays: Array>, + val arrayOfMaps: Array>> +) { + override fun equals(other: Any?): Boolean { + if (this === other) return true + if (javaClass != other?.javaClass) return false + + other as DataClassWithArrays + + if (!arraySimple.contentEquals(other.arraySimple)) return false + if (!nestedArrays.contentDeepEquals(other.nestedArrays)) return false + + if (arrayOfMaps.size != other.arrayOfMaps.size) return false + arrayOfMaps.forEachIndexed { i, map -> + val otherMap = other.arrayOfMaps[i] + if (map.keys != otherMap.keys) return false + map.keys.forEach { key -> if (!map[key].contentEquals(otherMap[key])) return false } + } + + return true + } + + override fun hashCode(): Int { + var result = arraySimple.contentHashCode() + result = 31 * result + nestedArrays.contentDeepHashCode() + result = 31 * result + arrayOfMaps.contentHashCode() + return result + } +} + +data class DataClassWithNativeArrays( + val booleanArray: BooleanArray, + val byteArray: ByteArray, + val charArray: CharArray, + val doubleArray: DoubleArray, + val floatArray: FloatArray, + val intArray: IntArray, + val longArray: LongArray, + val shortArray: ShortArray, + val listOfArrays: List, + val mapOfArrays: Map +) { + + @SuppressWarnings("ComplexMethod") + override fun equals(other: Any?): Boolean { + if (this === other) return true + if (javaClass != other?.javaClass) return false + + other as DataClassWithNativeArrays + + if (!booleanArray.contentEquals(other.booleanArray)) return false + if (!byteArray.contentEquals(other.byteArray)) return false + if (!charArray.contentEquals(other.charArray)) return false + if (!doubleArray.contentEquals(other.doubleArray)) return false + if (!floatArray.contentEquals(other.floatArray)) return false + if (!intArray.contentEquals(other.intArray)) return false + if (!longArray.contentEquals(other.longArray)) return false + if (!shortArray.contentEquals(other.shortArray)) return false + + if (listOfArrays.size != other.listOfArrays.size) return false + listOfArrays.forEachIndexed { i, value -> if (!value.contentEquals(other.listOfArrays[i])) return false } + + if (mapOfArrays.keys != other.mapOfArrays.keys) return false + mapOfArrays.keys.forEach { key -> if (!mapOfArrays[key].contentEquals(other.mapOfArrays[key])) return false } + + return true + } + + override fun hashCode(): Int { + var result = booleanArray.contentHashCode() + result = 31 * result + byteArray.contentHashCode() + result = 31 * result + charArray.contentHashCode() + result = 31 * result + doubleArray.contentHashCode() + result = 31 * result + floatArray.contentHashCode() + result = 31 * result + intArray.contentHashCode() + result = 31 * result + longArray.contentHashCode() + result = 31 * result + shortArray.contentHashCode() + result = 31 * result + listOfArrays.hashCode() + result = 31 * result + mapOfArrays.hashCode() + return result + } +} + +data class DataClassWithDefaults( + val boolean: Boolean = false, + val string: String = "String", + val listSimple: List = listOf("a", "b", "c") +) + +data class DataClassWithNulls(val boolean: Boolean?, val string: String?, val listSimple: List?) + +data class DataClassWithListThatLastItemDefaultsToNull(val elements: List) + +data class DataClassLastItemDefaultsToNull(val required: String, val optional: String? = null) + +data class DataClassSelfReferential( + val name: String, + val left: DataClassSelfReferential? = null, + val right: DataClassSelfReferential? = null +) + +data class DataClassEmbedded(val name: String) + +data class DataClassWithEmbedded(val id: String, val embedded: DataClassEmbedded) + +data class DataClassListOfDataClasses(val id: String, val nested: List) + +data class DataClassListOfListOfDataClasses(val id: String, val nested: List>) + +data class DataClassMapOfDataClasses(val id: String, val nested: Map) + +data class DataClassMapOfListOfDataClasses(val id: String, val nested: Map>) + +data class DataClassWithParameterizedDataClass( + val id: String, + val parameterizedDataClass: DataClassParameterized +) + +data class DataClassParameterized(val number: N, val string: String, val parameterizedList: List) + +data class DataClassWithNestedParameterizedDataClass( + val id: String, + val nestedParameterized: DataClassWithNestedParameterized +) + +data class DataClassWithNestedParameterized( + val parameterizedDataClass: DataClassParameterized, + val other: B, + val optionalOther: B? +) + +data class DataClassWithPair(val pair: Pair) + +data class DataClassWithTriple(val triple: Triple) + +data class DataClassNestedParameterizedTypes( + val triple: + Triple< + String, + Pair>>, + Triple, Triple, Double>>> +) + +data class DataClassWithMutableList(val value: MutableList) + +data class DataClassWithMutableSet(val value: MutableSet) + +data class DataClassWithMutableMap(val value: MutableMap) + +data class DataClassWithBooleanMapKey(val map: Map) + +enum class Key { + A, + B +} + +data class DataClassWithEnum(val value: Key) + +data class DataClassWithEnumMapKey(val map: Map) + +data class DataClassKey(val value: String) + +data class DataClassWithDataClassMapKey(val map: Map) + +data class DataClassWithObjectIdAndBsonDocument(val objectId: ObjectId, val bsonDocument: BsonDocument) + +sealed class DataClassSealed + +data class DataClassSealedA(val a: String) : DataClassSealed() + +data class DataClassSealedB(val b: Int) : DataClassSealed() + +data class DataClassSealedC(val c: String) : DataClassSealed() + +data class DataClassListOfSealed(val items: List) + +data class DataClassWithBsonId(@BsonId val id: String) + +data class DataClassWithBsonProperty(@BsonProperty("_id") val id: String) + +@BsonDiscriminator data class DataClassWithBsonDiscriminator(val id: String) + +data class DataClassWithBsonIgnore(val id: String, @BsonIgnore val ignored: String) + +data class DataClassWithBsonExtraElements(val id: String, @BsonExtraElements val extraElements: Map) + +data class DataClassWithBsonConstructor(val id: String, val count: Int) { + @BsonCreator constructor(id: String) : this(id, -1) +} + +data class DataClassWithInvalidBsonRepresentation(@BsonRepresentation(BsonType.STRING) val id: BsonMaxKey) + +data class DataClassWithFailingInit(val id: String) { + init { + require(false) + } +} + +data class DataClassWithSequence(val value: Sequence) + +data class DataClassWithJVMErasure(val duration: Duration, val ints: List) + +data class Box(val boxed: T) + +data class DataClassWithNullableGeneric(val box: Box) diff --git a/bson-kotlinx/build.gradle.kts b/bson-kotlinx/build.gradle.kts new file mode 100644 index 00000000000..1671a876edf --- /dev/null +++ b/bson-kotlinx/build.gradle.kts @@ -0,0 +1,47 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +import ProjectExtensions.configureJarManifest +import ProjectExtensions.configureMavenPublication + +plugins { + id("project.kotlin") + alias(libs.plugins.kotlin.serialization) +} + +base.archivesName.set("bson-kotlinx") + +dependencies { + api(project(path = ":bson", configuration = "default")) + implementation(platform(libs.kotlinx.serialization)) + implementation(libs.kotlinx.serialization.core) + implementation(libs.kotlin.reflect) + + optionalApi(libs.kotlinx.serialization.datetime) + optionalApi(libs.kotlinx.serialization.json) + + // Test case checks MongoClientSettings.getDefaultCodecRegistry() support + testImplementation(project(path = ":driver-core", configuration = "default")) +} + +configureMavenPublication { + pom { + name.set("BSON Kotlinx") + description.set("The BSON Codec for Kotlinx serialization") + url.set("https://bsonspec.org") + } +} + +configureJarManifest { attributes["Automatic-Module-Name"] = "org.mongodb.bson.kotlinx" } diff --git a/bson-kotlinx/src/main/kotlin/org/bson/codecs/kotlinx/BsonConfiguration.kt b/bson-kotlinx/src/main/kotlin/org/bson/codecs/kotlinx/BsonConfiguration.kt new file mode 100644 index 00000000000..8a163f42f83 --- /dev/null +++ b/bson-kotlinx/src/main/kotlin/org/bson/codecs/kotlinx/BsonConfiguration.kt @@ -0,0 +1,49 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.bson.codecs.kotlinx + +/** + * Bson Configuration for serialization + * + * Usage example with codecs: + * ``` + * val codec = KotlinSerializerCodec.create(mykClass, bsonConfiguration = BsonConfiguration(encodeDefaults = false)) + * ``` + * + * @property encodeDefaults encode default values, defaults to true + * @property explicitNulls encode null values, defaults to false + * @property classDiscriminator class discriminator to use when encoding polymorphic types + */ +public data class BsonConfiguration( + val encodeDefaults: Boolean = true, + val explicitNulls: Boolean = false, + val classDiscriminator: String = "_t", + val bsonNamingStrategy: BsonNamingStrategy? = null +) + +/** + * Optional BSON naming strategy for a field. + * + * @since 5.4 + */ +public enum class BsonNamingStrategy { + + /** + * A strategy that transforms serial names from camel case to snake case — lowercase characters with words separated + * by underscores. + */ + SNAKE_CASE, +} diff --git a/bson-kotlinx/src/main/kotlin/org/bson/codecs/kotlinx/BsonDecoder.kt b/bson-kotlinx/src/main/kotlin/org/bson/codecs/kotlinx/BsonDecoder.kt new file mode 100644 index 00000000000..c00d09345d0 --- /dev/null +++ b/bson-kotlinx/src/main/kotlin/org/bson/codecs/kotlinx/BsonDecoder.kt @@ -0,0 +1,342 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.bson.codecs.kotlinx + +import kotlinx.serialization.DeserializationStrategy +import kotlinx.serialization.ExperimentalSerializationApi +import kotlinx.serialization.SerializationException +import kotlinx.serialization.descriptors.PolymorphicKind +import kotlinx.serialization.descriptors.PrimitiveKind +import kotlinx.serialization.descriptors.SerialDescriptor +import kotlinx.serialization.descriptors.SerialKind +import kotlinx.serialization.descriptors.StructureKind +import kotlinx.serialization.encoding.AbstractDecoder +import kotlinx.serialization.encoding.CompositeDecoder +import kotlinx.serialization.encoding.CompositeDecoder.Companion.DECODE_DONE +import kotlinx.serialization.encoding.CompositeDecoder.Companion.UNKNOWN_NAME +import kotlinx.serialization.encoding.Decoder +import kotlinx.serialization.modules.SerializersModule +import org.bson.AbstractBsonReader +import org.bson.BsonInvalidOperationException +import org.bson.BsonReader +import org.bson.BsonReaderMark +import org.bson.BsonType +import org.bson.BsonValue +import org.bson.codecs.BsonValueCodec +import org.bson.codecs.DecoderContext +import org.bson.codecs.kotlinx.utils.BsonCodecUtils.cacheElementNamesByDescriptor +import org.bson.codecs.kotlinx.utils.BsonCodecUtils.createBsonArrayDecoder +import org.bson.codecs.kotlinx.utils.BsonCodecUtils.createBsonDecoder +import org.bson.codecs.kotlinx.utils.BsonCodecUtils.createBsonDocumentDecoder +import org.bson.codecs.kotlinx.utils.BsonCodecUtils.createBsonMapDecoder +import org.bson.codecs.kotlinx.utils.BsonCodecUtils.createBsonPolymorphicDecoder +import org.bson.codecs.kotlinx.utils.BsonCodecUtils.getCachedElementNamesByDescriptor +import org.bson.internal.NumberCodecHelper +import org.bson.internal.StringCodecHelper +import org.bson.types.ObjectId + +/** + * The BsonDecoder interface + * + * For custom serialization handlers + */ +@ExperimentalSerializationApi +public sealed interface BsonDecoder : Decoder, CompositeDecoder { + + /** @return the decoded ObjectId */ + public fun decodeObjectId(): ObjectId + /** @return the decoded BsonValue */ + public fun decodeBsonValue(): BsonValue +} + +@OptIn(ExperimentalSerializationApi::class) +internal sealed class AbstractBsonDecoder( + val reader: AbstractBsonReader, + override val serializersModule: SerializersModule, + val configuration: BsonConfiguration +) : BsonDecoder, AbstractDecoder() { + + companion object { + + val bsonValueCodec = BsonValueCodec() + const val UNKNOWN_INDEX = -10 + val validKeyKinds = setOf(PrimitiveKind.STRING, PrimitiveKind.CHAR, SerialKind.ENUM) + + fun validateCurrentBsonType( + reader: BsonReader, + expectedType: BsonType, + descriptor: SerialDescriptor, + actualType: (descriptor: SerialDescriptor) -> String = { it.kind.toString() } + ) { + reader.currentBsonType?.let { + if (it != expectedType) { + throw SerializationException( + "Invalid data for `${actualType(descriptor)}` expected a bson " + + "${expectedType.name.lowercase()} found: ${reader.currentBsonType}") + } + } + } + } + + private data class ElementMetadata(val name: String, val nullable: Boolean, var processed: Boolean = false) + private var elementsMetadata: Array? = null + private var currentIndex: Int = UNKNOWN_INDEX + + private fun initElementMetadata(descriptor: SerialDescriptor) { + if (this.elementsMetadata != null) return + val elementsMetadata = + Array(descriptor.elementsCount) { + val elementDescriptor = descriptor.getElementDescriptor(it) + ElementMetadata( + elementDescriptor.serialName, elementDescriptor.isNullable && !descriptor.isElementOptional(it)) + } + this.elementsMetadata = elementsMetadata + cacheElementNamesByDescriptor(descriptor, configuration) + } + + override fun decodeElementIndex(descriptor: SerialDescriptor): Int { + initElementMetadata(descriptor) + currentIndex = decodeElementIndexImpl(descriptor) + elementsMetadata?.getOrNull(currentIndex)?.processed = true + return currentIndex + } + + @Suppress("ReturnCount", "ComplexMethod") + private fun decodeElementIndexImpl(descriptor: SerialDescriptor): Int { + val elementMetadata = elementsMetadata ?: error("elementsMetadata may not be null.") + val name: String? = + when (reader.state ?: error("State of reader may not be null.")) { + AbstractBsonReader.State.NAME -> reader.readName() + AbstractBsonReader.State.VALUE -> reader.currentName + AbstractBsonReader.State.TYPE -> { + reader.readBsonType() + return decodeElementIndexImpl(descriptor) + } + AbstractBsonReader.State.END_OF_DOCUMENT, + AbstractBsonReader.State.END_OF_ARRAY -> + return elementMetadata.indexOfFirst { it.nullable && !it.processed } + else -> null + } + + return name?.let { + val index = + if (configuration.bsonNamingStrategy == BsonNamingStrategy.SNAKE_CASE) { + getCachedElementNamesByDescriptor(descriptor)[it]?.let { name -> descriptor.getElementIndex(name) } + ?: UNKNOWN_NAME + } else { + descriptor.getElementIndex(it) + } + return if (index == UNKNOWN_NAME) { + reader.skipValue() + decodeElementIndexImpl(descriptor) + } else { + index + } + } + ?: UNKNOWN_NAME + } + + override fun beginStructure(descriptor: SerialDescriptor): CompositeDecoder { + return when (descriptor.kind) { + is PolymorphicKind -> createBsonPolymorphicDecoder(descriptor, reader, serializersModule, configuration) + is StructureKind.LIST -> createBsonArrayDecoder(descriptor, reader, serializersModule, configuration) + is StructureKind.CLASS, + StructureKind.OBJECT -> createBsonDocumentDecoder(descriptor, reader, serializersModule, configuration) + is StructureKind.MAP -> createBsonMapDecoder(descriptor, reader, serializersModule, configuration) + else -> throw SerializationException("Primitives are not supported at top-level") + } + } + + override fun endStructure(descriptor: SerialDescriptor) { + when (descriptor.kind) { + is StructureKind.LIST -> reader.readEndArray() + is StructureKind.MAP, + StructureKind.CLASS, + StructureKind.OBJECT -> reader.readEndDocument() + else -> {} + } + } + + override fun decodeByte(): Byte = NumberCodecHelper.decodeByte(reader) + override fun decodeChar(): Char = StringCodecHelper.decodeChar(reader) + override fun decodeFloat(): Float = NumberCodecHelper.decodeFloat(reader) + override fun decodeShort(): Short = NumberCodecHelper.decodeShort(reader) + override fun decodeBoolean(): Boolean = reader.readBoolean() + override fun decodeDouble(): Double = NumberCodecHelper.decodeDouble(reader) + override fun decodeInt(): Int = NumberCodecHelper.decodeInt(reader) + override fun decodeLong(): Long = NumberCodecHelper.decodeLong(reader) + override fun decodeString(): String = readOrThrow({ reader.readString() }, BsonType.STRING) + + override fun decodeNull(): Nothing? { + if (reader.state == AbstractBsonReader.State.VALUE) { + readOrThrow({ reader.readNull() }, BsonType.NULL) + } + return null + } + + override fun decodeEnum(enumDescriptor: SerialDescriptor): Int = enumDescriptor.getElementIndex(decodeString()) + override fun decodeNotNullMark(): Boolean { + return reader.state != AbstractBsonReader.State.END_OF_DOCUMENT && reader.currentBsonType != BsonType.NULL + } + + override fun decodeObjectId(): ObjectId = readOrThrow({ reader.readObjectId() }, BsonType.OBJECT_ID) + override fun decodeBsonValue(): BsonValue = bsonValueCodec.decode(reader, DecoderContext.builder().build()) + + private inline fun readOrThrow(action: () -> T, bsonType: BsonType): T { + return try { + action() + } catch (e: BsonInvalidOperationException) { + throw BsonInvalidOperationException( + "Reading field '${reader.currentName}' failed expected $bsonType type but found:" + + " ${reader.currentBsonType}.", + e) + } + } +} + +/** The default Bson Decoder implementation */ +internal open class BsonDecoderImpl( + reader: AbstractBsonReader, + serializersModule: SerializersModule, + configuration: BsonConfiguration +) : AbstractBsonDecoder(reader, serializersModule, configuration) + +/** The Bson array decoder */ +internal open class BsonArrayDecoder( + descriptor: SerialDescriptor, + reader: AbstractBsonReader, + serializersModule: SerializersModule, + configuration: BsonConfiguration +) : AbstractBsonDecoder(reader, serializersModule, configuration) { + + init { + validateCurrentBsonType(reader, BsonType.ARRAY, descriptor) + reader.readStartArray() + } + + private var index = 0 + override fun decodeElementIndex(descriptor: SerialDescriptor): Int { + val nextType = reader.readBsonType() + if (nextType == BsonType.END_OF_DOCUMENT) return DECODE_DONE + return index++ + } +} + +/** The Bson document decoder */ +@OptIn(ExperimentalSerializationApi::class) +internal open class BsonDocumentDecoder( + descriptor: SerialDescriptor, + reader: AbstractBsonReader, + serializersModule: SerializersModule, + configuration: BsonConfiguration +) : AbstractBsonDecoder(reader, serializersModule, configuration) { + + init { + validateCurrentBsonType(reader, BsonType.DOCUMENT, descriptor) { it.serialName } + reader.readStartDocument() + } +} + +/** The Bson polymorphic class decoder */ +@OptIn(ExperimentalSerializationApi::class) +internal open class BsonPolymorphicDecoder( + descriptor: SerialDescriptor, + reader: AbstractBsonReader, + serializersModule: SerializersModule, + configuration: BsonConfiguration +) : AbstractBsonDecoder(reader, serializersModule, configuration) { + private var index = 0 + private var mark: BsonReaderMark? + + init { + mark = reader.mark + validateCurrentBsonType(reader, BsonType.DOCUMENT, descriptor) { it.serialName } + reader.readStartDocument() + } + + override fun decodeSerializableValue(deserializer: DeserializationStrategy): T { + mark?.let { + it.reset() + mark = null + } + return deserializer.deserialize(createBsonDecoder(reader, serializersModule, configuration)) + } + + override fun decodeElementIndex(descriptor: SerialDescriptor): Int { + var found = false + return when (index) { + 0 -> { + while (reader.readBsonType() != BsonType.END_OF_DOCUMENT) { + if (reader.readName() == configuration.classDiscriminator) { + found = true + break + } + reader.skipValue() + } + if (!found) { + throw SerializationException( + "Missing required discriminator field `${configuration.classDiscriminator}` " + + "for polymorphic class: `${descriptor.serialName}`.") + } + index++ + } + 1 -> index++ + else -> DECODE_DONE + } + } +} + +/** The Bson map decoder */ +@OptIn(ExperimentalSerializationApi::class) +internal open class BsonMapDecoder( + descriptor: SerialDescriptor, + reader: AbstractBsonReader, + serializersModule: SerializersModule, + configuration: BsonConfiguration +) : AbstractBsonDecoder(reader, serializersModule, configuration) { + private var index = 0 + private var isKey = false + + init { + validateCurrentBsonType(reader, BsonType.DOCUMENT, descriptor) + reader.readStartDocument() + } + + override fun decodeString(): String { + return if (isKey) { + reader.readName() + } else { + super.decodeString() + } + } + + override fun decodeElementIndex(descriptor: SerialDescriptor): Int { + val keyKind = descriptor.getElementDescriptor(0).kind + if (!validKeyKinds.contains(keyKind)) { + throw SerializationException( + "Invalid key type for ${descriptor.serialName}. Expected STRING or ENUM but found: `${keyKind}`") + } + + if (!isKey) { + isKey = true + val nextType = reader.readBsonType() + if (nextType == BsonType.END_OF_DOCUMENT) return DECODE_DONE + } else { + isKey = false + } + return index++ + } +} diff --git a/bson-kotlinx/src/main/kotlin/org/bson/codecs/kotlinx/BsonEncoder.kt b/bson-kotlinx/src/main/kotlin/org/bson/codecs/kotlinx/BsonEncoder.kt new file mode 100644 index 00000000000..8a34bccdb36 --- /dev/null +++ b/bson-kotlinx/src/main/kotlin/org/bson/codecs/kotlinx/BsonEncoder.kt @@ -0,0 +1,258 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.bson.codecs.kotlinx + +import kotlinx.serialization.ExperimentalSerializationApi +import kotlinx.serialization.SerializationException +import kotlinx.serialization.SerializationStrategy +import kotlinx.serialization.descriptors.PolymorphicKind +import kotlinx.serialization.descriptors.PrimitiveKind +import kotlinx.serialization.descriptors.SerialDescriptor +import kotlinx.serialization.descriptors.SerialKind +import kotlinx.serialization.descriptors.StructureKind +import kotlinx.serialization.encoding.AbstractEncoder +import kotlinx.serialization.encoding.CompositeEncoder +import kotlinx.serialization.encoding.Encoder +import kotlinx.serialization.modules.SerializersModule +import org.bson.BsonValue +import org.bson.BsonWriter +import org.bson.codecs.BsonValueCodec +import org.bson.codecs.EncoderContext +import org.bson.codecs.kotlinx.utils.BsonCodecUtils.convertCamelCase +import org.bson.types.ObjectId + +/** + * The BsonEncoder interface + * + * For custom serialization handlers + */ +@ExperimentalSerializationApi +public sealed interface BsonEncoder : Encoder, CompositeEncoder { + + /** + * Encodes an ObjectId + * + * @param value the ObjectId + */ + public fun encodeObjectId(value: ObjectId) + + /** + * Encodes a BsonValue + * + * @param value the BsonValue + */ + public fun encodeBsonValue(value: BsonValue) +} + +/** + * The default BsonEncoder implementation + * + * Unlike BsonDecoder implementations, state is shared when encoding, so a single class is used to encode Bson Arrays, + * Documents, Polymorphic types and Maps. + */ +@OptIn(ExperimentalSerializationApi::class) +internal open class BsonEncoderImpl( + val writer: BsonWriter, + override val serializersModule: SerializersModule, + val configuration: BsonConfiguration +) : BsonEncoder, AbstractEncoder() { + + companion object { + val validKeyKinds = setOf(PrimitiveKind.STRING, PrimitiveKind.CHAR, SerialKind.ENUM) + val bsonValueCodec = BsonValueCodec() + } + + private var isPolymorphic = false + private var state = STATE.VALUE + private var mapState = MapState() + internal val deferredElementHandler: DeferredElementHandler = DeferredElementHandler() + + override fun shouldEncodeElementDefault(descriptor: SerialDescriptor, index: Int): Boolean = + configuration.encodeDefaults + + override fun beginStructure(descriptor: SerialDescriptor): CompositeEncoder { + when (descriptor.kind) { + is PolymorphicKind -> { + writer.writeStartDocument() + writer.writeName(configuration.classDiscriminator) + isPolymorphic = true + } + is StructureKind.LIST -> writer.writeStartArray() + is StructureKind.CLASS, + StructureKind.OBJECT -> { + if (isPolymorphic) { + isPolymorphic = false + } else { + writer.writeStartDocument() + } + } + is StructureKind.MAP -> { + writer.writeStartDocument() + mapState = MapState() + } + else -> throw SerializationException("Primitives are not supported at top-level") + } + return this + } + + override fun endStructure(descriptor: SerialDescriptor) { + when (descriptor.kind) { + is StructureKind.LIST -> writer.writeEndArray() + StructureKind.MAP, + StructureKind.CLASS, + StructureKind.OBJECT -> writer.writeEndDocument() + else -> {} + } + } + + override fun encodeElement(descriptor: SerialDescriptor, index: Int): Boolean { + when (descriptor.kind) { + is StructureKind.CLASS -> { + val elementName = descriptor.getElementName(index) + if (descriptor.getElementDescriptor(index).isNullable) { + deferredElementHandler.set(elementName) + } else { + encodeName(elementName) + } + } + is StructureKind.MAP -> { + if (index == 0) { + val keyKind = descriptor.getElementDescriptor(index).kind + if (!validKeyKinds.contains(keyKind)) { + throw SerializationException( + """Invalid key type for ${descriptor.serialName}. + | Expected STRING or ENUM but found: `${keyKind}`.""" + .trimMargin()) + } + } + state = mapState.nextState() + } + else -> {} + } + return true + } + + override fun encodeSerializableValue(serializer: SerializationStrategy, value: T) { + deferredElementHandler.with( + { + // When using generics its possible for `value` to be null + // See: https://youtrack.jetbrains.com/issue/KT-66206 + if (value != null || configuration.explicitNulls) { + encodeName(it) + super.encodeSerializableValue(serializer, value) + } + }, + { super.encodeSerializableValue(serializer, value) }) + } + + override fun encodeNullableSerializableValue(serializer: SerializationStrategy, value: T?) { + deferredElementHandler.with( + { + if (value != null || configuration.explicitNulls) { + encodeName(it) + super.encodeNullableSerializableValue(serializer, value) + } + }, + { super.encodeNullableSerializableValue(serializer, value) }) + } + + override fun encodeByte(value: Byte) = encodeInt(value.toInt()) + override fun encodeChar(value: Char) = encodeString(value.toString()) + override fun encodeFloat(value: Float) = encodeDouble(value.toDouble()) + override fun encodeShort(value: Short) = encodeInt(value.toInt()) + + override fun encodeBoolean(value: Boolean) = writer.writeBoolean(value) + override fun encodeDouble(value: Double) = writer.writeDouble(value) + override fun encodeInt(value: Int) = writer.writeInt32(value) + override fun encodeLong(value: Long) = writer.writeInt64(value) + override fun encodeNull() = writer.writeNull() + + override fun encodeString(value: String) { + when (state) { + STATE.NAME -> deferredElementHandler.set(value) + STATE.VALUE -> writer.writeString(value) + } + } + + override fun encodeEnum(enumDescriptor: SerialDescriptor, index: Int) { + val value = enumDescriptor.getElementName(index) + when (state) { + STATE.NAME -> encodeName(value) + STATE.VALUE -> writer.writeString(value) + } + } + + override fun encodeObjectId(value: ObjectId) { + writer.writeObjectId(value) + } + + override fun encodeBsonValue(value: BsonValue) { + bsonValueCodec.encode(writer, value, EncoderContext.builder().build()) + } + + internal fun encodeName(value: Any) { + val name = + value.toString().let { + if (configuration.bsonNamingStrategy == BsonNamingStrategy.SNAKE_CASE) { + convertCamelCase(it, '_') + } else { + it + } + } + writer.writeName(name) + state = STATE.VALUE + } + + private enum class STATE { + NAME, + VALUE + } + + private class MapState { + var currentState: STATE = STATE.VALUE + fun getState(): STATE = currentState + + fun nextState(): STATE { + currentState = + when (currentState) { + STATE.VALUE -> STATE.NAME + STATE.NAME -> STATE.VALUE + } + return getState() + } + } + + internal class DeferredElementHandler { + private var deferredElementName: String? = null + + fun set(name: String) { + assert(deferredElementName == null) { "Overwriting an existing deferred name" } + deferredElementName = name + } + + fun with(actionWithDeferredElement: (String) -> Unit, actionWithoutDeferredElement: () -> Unit) { + deferredElementName?.let { + reset() + actionWithDeferredElement(it) + } + ?: actionWithoutDeferredElement() + } + + private fun reset() { + deferredElementName = null + } + } +} diff --git a/bson-kotlinx/src/main/kotlin/org/bson/codecs/kotlinx/BsonSerializers.kt b/bson-kotlinx/src/main/kotlin/org/bson/codecs/kotlinx/BsonSerializers.kt new file mode 100644 index 00000000000..26c19c0fe17 --- /dev/null +++ b/bson-kotlinx/src/main/kotlin/org/bson/codecs/kotlinx/BsonSerializers.kt @@ -0,0 +1,136 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.bson.codecs.kotlinx + +import kotlinx.serialization.ExperimentalSerializationApi +import kotlinx.serialization.KSerializer +import kotlinx.serialization.SerializationException +import kotlinx.serialization.Serializer +import kotlinx.serialization.descriptors.PrimitiveKind +import kotlinx.serialization.descriptors.PrimitiveSerialDescriptor +import kotlinx.serialization.descriptors.SerialDescriptor +import kotlinx.serialization.encoding.Decoder +import kotlinx.serialization.encoding.Encoder +import kotlinx.serialization.modules.SerializersModule +import kotlinx.serialization.modules.plus +import org.bson.BsonArray +import org.bson.BsonBinary +import org.bson.BsonBoolean +import org.bson.BsonDateTime +import org.bson.BsonDbPointer +import org.bson.BsonDecimal128 +import org.bson.BsonDocument +import org.bson.BsonDouble +import org.bson.BsonInt32 +import org.bson.BsonInt64 +import org.bson.BsonJavaScript +import org.bson.BsonJavaScriptWithScope +import org.bson.BsonMaxKey +import org.bson.BsonMinKey +import org.bson.BsonNull +import org.bson.BsonObjectId +import org.bson.BsonRegularExpression +import org.bson.BsonString +import org.bson.BsonSymbol +import org.bson.BsonTimestamp +import org.bson.BsonUndefined +import org.bson.BsonValue +import org.bson.RawBsonArray +import org.bson.RawBsonDocument +import org.bson.types.ObjectId + +/** + * The default serializers module + * + * Handles: + * - ObjectId serialization + * - BsonValue serialization + */ +@ExperimentalSerializationApi +public val defaultSerializersModule: SerializersModule = + ObjectIdSerializer.serializersModule + BsonValueSerializer.serializersModule + dateTimeSerializersModule + +@ExperimentalSerializationApi +@Serializer(forClass = ObjectId::class) +public object ObjectIdSerializer : KSerializer { + override val descriptor: SerialDescriptor = PrimitiveSerialDescriptor("ObjectIdSerializer", PrimitiveKind.STRING) + + override fun serialize(encoder: Encoder, value: ObjectId) { + when (encoder) { + is BsonEncoder -> encoder.encodeObjectId(value) + else -> throw SerializationException("ObjectId is not supported by ${encoder::class}") + } + } + + override fun deserialize(decoder: Decoder): ObjectId { + return when (decoder) { + is BsonDecoder -> decoder.decodeObjectId() + else -> throw SerializationException("ObjectId is not supported by ${decoder::class}") + } + } + + public val serializersModule: SerializersModule = SerializersModule { + contextual(ObjectId::class, ObjectIdSerializer) + } +} + +@ExperimentalSerializationApi +@Serializer(forClass = BsonValue::class) +public object BsonValueSerializer : KSerializer { + override val descriptor: SerialDescriptor = PrimitiveSerialDescriptor("BsonValueSerializer", PrimitiveKind.STRING) + + override fun serialize(encoder: Encoder, value: BsonValue) { + when (encoder) { + is BsonEncoder -> encoder.encodeBsonValue(value) + else -> throw SerializationException("BsonValues are not supported by ${encoder::class}") + } + } + + override fun deserialize(decoder: Decoder): BsonValue { + return when (decoder) { + is BsonDecoder -> decoder.decodeBsonValue() + else -> throw SerializationException("BsonValues are not supported by ${decoder::class}") + } + } + + @Suppress("UNCHECKED_CAST") + public val serializersModule: SerializersModule = SerializersModule { + contextual(BsonNull::class, BsonValueSerializer as KSerializer) + contextual(BsonArray::class, BsonValueSerializer as KSerializer) + contextual(BsonBinary::class, BsonValueSerializer as KSerializer) + contextual(BsonBoolean::class, BsonValueSerializer as KSerializer) + contextual(BsonDateTime::class, BsonValueSerializer as KSerializer) + contextual(BsonDbPointer::class, BsonValueSerializer as KSerializer) + contextual(BsonDocument::class, BsonValueSerializer as KSerializer) + contextual(BsonDouble::class, BsonValueSerializer as KSerializer) + contextual(BsonInt32::class, BsonValueSerializer as KSerializer) + contextual(BsonInt64::class, BsonValueSerializer as KSerializer) + contextual(BsonDecimal128::class, BsonValueSerializer as KSerializer) + contextual(BsonMaxKey::class, BsonValueSerializer as KSerializer) + contextual(BsonMinKey::class, BsonValueSerializer as KSerializer) + contextual(BsonJavaScript::class, BsonValueSerializer as KSerializer) + contextual(BsonJavaScriptWithScope::class, BsonValueSerializer as KSerializer) + contextual(BsonObjectId::class, BsonValueSerializer as KSerializer) + contextual(BsonRegularExpression::class, BsonValueSerializer as KSerializer) + contextual(BsonString::class, BsonValueSerializer as KSerializer) + contextual(BsonSymbol::class, BsonValueSerializer as KSerializer) + contextual(BsonTimestamp::class, BsonValueSerializer as KSerializer) + contextual(BsonUndefined::class, BsonValueSerializer as KSerializer) + contextual(BsonDocument::class, BsonValueSerializer as KSerializer) + contextual(RawBsonDocument::class, BsonValueSerializer as KSerializer) + contextual(RawBsonArray::class, BsonValueSerializer as KSerializer) + } +} diff --git a/bson-kotlinx/src/main/kotlin/org/bson/codecs/kotlinx/DateTimeSerializers.kt b/bson-kotlinx/src/main/kotlin/org/bson/codecs/kotlinx/DateTimeSerializers.kt new file mode 100644 index 00000000000..7b597135d4f --- /dev/null +++ b/bson-kotlinx/src/main/kotlin/org/bson/codecs/kotlinx/DateTimeSerializers.kt @@ -0,0 +1,221 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.bson.codecs.kotlinx + +import java.time.ZoneOffset +import kotlinx.datetime.Instant +import kotlinx.datetime.LocalDate +import kotlinx.datetime.LocalDateTime +import kotlinx.datetime.LocalTime +import kotlinx.datetime.TimeZone +import kotlinx.datetime.UtcOffset +import kotlinx.datetime.atDate +import kotlinx.datetime.atStartOfDayIn +import kotlinx.datetime.toInstant +import kotlinx.datetime.toLocalDateTime +import kotlinx.serialization.ExperimentalSerializationApi +import kotlinx.serialization.KSerializer +import kotlinx.serialization.SerializationException +import kotlinx.serialization.descriptors.PrimitiveKind +import kotlinx.serialization.descriptors.PrimitiveSerialDescriptor +import kotlinx.serialization.descriptors.SerialDescriptor +import kotlinx.serialization.encoding.Decoder +import kotlinx.serialization.encoding.Encoder +import kotlinx.serialization.modules.SerializersModule +import kotlinx.serialization.modules.plus +import org.bson.BsonDateTime +import org.bson.codecs.kotlinx.utils.SerializationModuleUtils.isClassAvailable + +/** + * The default serializers module + * + * Handles: + * - ObjectId serialization + * - BsonValue serialization + * - Instant serialization + * - LocalDate serialization + * - LocalDateTime serialization + * - LocalTime serialization + */ +@ExperimentalSerializationApi +public val dateTimeSerializersModule: SerializersModule by lazy { + var module = SerializersModule {} + if (isClassAvailable("kotlinx.datetime.Instant")) { + module += InstantAsBsonDateTime.serializersModule + } + if (isClassAvailable("kotlinx.datetime.LocalDate")) { + module += LocalDateAsBsonDateTime.serializersModule + } + if (isClassAvailable("kotlinx.datetime.LocalDateTime")) { + module += LocalDateTimeAsBsonDateTime.serializersModule + } + if (isClassAvailable("kotlinx.datetime.LocalTime")) { + module += LocalTimeAsBsonDateTime.serializersModule + } + module +} + +/** + * Instant KSerializer. + * + * Encodes and decodes `Instant` objects to and from `BsonDateTime`. Data is extracted via + * [kotlinx.datetime.Instant.fromEpochMilliseconds] and stored to millisecond accuracy. + * + * @since 5.2 + */ +@ExperimentalSerializationApi +public object InstantAsBsonDateTime : KSerializer { + override val descriptor: SerialDescriptor = PrimitiveSerialDescriptor("InstantAsBsonDateTime", PrimitiveKind.STRING) + + override fun serialize(encoder: Encoder, value: Instant) { + when (encoder) { + is BsonEncoder -> encoder.encodeBsonValue(BsonDateTime(value.toEpochMilliseconds())) + else -> throw SerializationException("Instant is not supported by ${encoder::class}") + } + } + + override fun deserialize(decoder: Decoder): Instant { + return when (decoder) { + is BsonDecoder -> Instant.fromEpochMilliseconds(decoder.decodeBsonValue().asDateTime().value) + else -> throw SerializationException("Instant is not supported by ${decoder::class}") + } + } + + @Suppress("UNCHECKED_CAST") + public val serializersModule: SerializersModule = SerializersModule { + contextual(Instant::class, InstantAsBsonDateTime as KSerializer) + } +} + +/** + * LocalDate KSerializer. + * + * Encodes and decodes `LocalDate` objects to and from `BsonDateTime`. + * + * Converts the `LocalDate` values to and from `UTC`. + * + * @since 5.2 + */ +@ExperimentalSerializationApi +public object LocalDateAsBsonDateTime : KSerializer { + override val descriptor: SerialDescriptor = + PrimitiveSerialDescriptor("LocalDateAsBsonDateTime", PrimitiveKind.STRING) + + override fun serialize(encoder: Encoder, value: LocalDate) { + when (encoder) { + is BsonEncoder -> { + val epochMillis = value.atStartOfDayIn(TimeZone.UTC).toEpochMilliseconds() + encoder.encodeBsonValue(BsonDateTime(epochMillis)) + } + else -> throw SerializationException("LocalDate is not supported by ${encoder::class}") + } + } + + override fun deserialize(decoder: Decoder): LocalDate { + return when (decoder) { + is BsonDecoder -> + Instant.fromEpochMilliseconds(decoder.decodeBsonValue().asDateTime().value) + .toLocalDateTime(TimeZone.UTC) + .date + else -> throw SerializationException("LocalDate is not supported by ${decoder::class}") + } + } + + @Suppress("UNCHECKED_CAST") + public val serializersModule: SerializersModule = SerializersModule { + contextual(LocalDate::class, LocalDateAsBsonDateTime as KSerializer) + } +} + +/** + * LocalDateTime KSerializer. + * + * Encodes and decodes `LocalDateTime` objects to and from `BsonDateTime`. Data is stored to millisecond accuracy. + * + * Converts the `LocalDateTime` values to and from `UTC`. + * + * @since 5.2 + */ +@ExperimentalSerializationApi +public object LocalDateTimeAsBsonDateTime : KSerializer { + override val descriptor: SerialDescriptor = + PrimitiveSerialDescriptor("LocalDateTimeAsBsonDateTime", PrimitiveKind.STRING) + + override fun serialize(encoder: Encoder, value: LocalDateTime) { + when (encoder) { + is BsonEncoder -> { + val epochMillis = value.toInstant(UtcOffset(ZoneOffset.UTC)).toEpochMilliseconds() + encoder.encodeBsonValue(BsonDateTime(epochMillis)) + } + else -> throw SerializationException("LocalDateTime is not supported by ${encoder::class}") + } + } + + override fun deserialize(decoder: Decoder): LocalDateTime { + return when (decoder) { + is BsonDecoder -> + Instant.fromEpochMilliseconds(decoder.decodeBsonValue().asDateTime().value) + .toLocalDateTime(TimeZone.UTC) + else -> throw SerializationException("LocalDateTime is not supported by ${decoder::class}") + } + } + + @Suppress("UNCHECKED_CAST") + public val serializersModule: SerializersModule = SerializersModule { + contextual(LocalDateTime::class, LocalDateTimeAsBsonDateTime as KSerializer) + } +} + +/** + * LocalTime KSerializer. + * + * Encodes and decodes `LocalTime` objects to and from `BsonDateTime`. Data is stored to millisecond accuracy. + * + * Converts the `LocalTime` values to and from EpochDay at `UTC`. + * + * @since 5.2 + */ +@ExperimentalSerializationApi +public object LocalTimeAsBsonDateTime : KSerializer { + override val descriptor: SerialDescriptor = + PrimitiveSerialDescriptor("LocalTimeAsBsonDateTime", PrimitiveKind.STRING) + + override fun serialize(encoder: Encoder, value: LocalTime) { + when (encoder) { + is BsonEncoder -> { + val epochMillis = + value.atDate(LocalDate.fromEpochDays(0)).toInstant(UtcOffset(ZoneOffset.UTC)).toEpochMilliseconds() + encoder.encodeBsonValue(BsonDateTime(epochMillis)) + } + else -> throw SerializationException("LocalTime is not supported by ${encoder::class}") + } + } + + override fun deserialize(decoder: Decoder): LocalTime { + return when (decoder) { + is BsonDecoder -> + Instant.fromEpochMilliseconds(decoder.decodeBsonValue().asDateTime().value) + .toLocalDateTime(TimeZone.UTC) + .time + else -> throw SerializationException("LocalTime is not supported by ${decoder::class}") + } + } + + @Suppress("UNCHECKED_CAST") + public val serializersModule: SerializersModule = SerializersModule { + contextual(LocalTime::class, LocalTimeAsBsonDateTime as KSerializer) + } +} diff --git a/bson-kotlinx/src/main/kotlin/org/bson/codecs/kotlinx/JsonBsonDecoder.kt b/bson-kotlinx/src/main/kotlin/org/bson/codecs/kotlinx/JsonBsonDecoder.kt new file mode 100644 index 00000000000..bd8b6739958 --- /dev/null +++ b/bson-kotlinx/src/main/kotlin/org/bson/codecs/kotlinx/JsonBsonDecoder.kt @@ -0,0 +1,154 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.bson.codecs.kotlinx + +import java.util.Base64 +import kotlinx.serialization.ExperimentalSerializationApi +import kotlinx.serialization.descriptors.SerialDescriptor +import kotlinx.serialization.json.Json +import kotlinx.serialization.json.JsonArray +import kotlinx.serialization.json.JsonDecoder +import kotlinx.serialization.json.JsonElement +import kotlinx.serialization.json.JsonObject +import kotlinx.serialization.json.JsonPrimitive +import kotlinx.serialization.json.buildJsonArray +import kotlinx.serialization.json.buildJsonObject +import kotlinx.serialization.modules.SerializersModule +import org.bson.AbstractBsonReader +import org.bson.BsonBinarySubType +import org.bson.BsonType +import org.bson.UuidRepresentation +import org.bson.codecs.kotlinx.utils.BsonCodecUtils.toJsonNamingStrategy +import org.bson.internal.UuidHelper + +@OptIn(ExperimentalSerializationApi::class) +internal interface JsonBsonDecoder : BsonDecoder, JsonDecoder { + val reader: AbstractBsonReader + val configuration: BsonConfiguration + + fun json(): Json = Json { + explicitNulls = configuration.explicitNulls + encodeDefaults = configuration.encodeDefaults + classDiscriminator = configuration.classDiscriminator + namingStrategy = configuration.bsonNamingStrategy.toJsonNamingStrategy() + serializersModule = this@JsonBsonDecoder.serializersModule + } + + @Suppress("ComplexMethod") + override fun decodeJsonElement(): JsonElement = + reader.run { + when (currentBsonType) { + BsonType.DOCUMENT -> readJsonObject() + BsonType.ARRAY -> readJsonArray() + BsonType.NULL -> JsonPrimitive(decodeNull()) + BsonType.STRING -> JsonPrimitive(decodeString()) + BsonType.BOOLEAN -> JsonPrimitive(decodeBoolean()) + BsonType.INT32 -> JsonPrimitive(decodeInt()) + BsonType.INT64 -> JsonPrimitive(decodeLong()) + BsonType.DOUBLE -> JsonPrimitive(decodeDouble()) + BsonType.DECIMAL128 -> JsonPrimitive(reader.readDecimal128()) + BsonType.OBJECT_ID -> JsonPrimitive(decodeObjectId().toHexString()) + BsonType.DATE_TIME -> JsonPrimitive(reader.readDateTime()) + BsonType.TIMESTAMP -> JsonPrimitive(reader.readTimestamp().value) + BsonType.BINARY -> { + val subtype = reader.peekBinarySubType() + val data = reader.readBinaryData().data + when (subtype) { + BsonBinarySubType.UUID_LEGACY.value -> + JsonPrimitive( + UuidHelper.decodeBinaryToUuid(data, subtype, UuidRepresentation.JAVA_LEGACY).toString()) + BsonBinarySubType.UUID_STANDARD.value -> + JsonPrimitive( + UuidHelper.decodeBinaryToUuid(data, subtype, UuidRepresentation.STANDARD).toString()) + else -> JsonPrimitive(Base64.getEncoder().encodeToString(data)) + } + } + else -> error("Unsupported json type: $currentBsonType") + } + } + + private fun readJsonObject(): JsonObject { + reader.readStartDocument() + val obj = buildJsonObject { + var type = reader.readBsonType() + while (type != BsonType.END_OF_DOCUMENT) { + put(reader.readName(), decodeJsonElement()) + type = reader.readBsonType() + } + } + + reader.readEndDocument() + return obj + } + + private fun readJsonArray(): JsonArray { + reader.readStartArray() + val array = buildJsonArray { + var type = reader.readBsonType() + while (type != BsonType.END_OF_DOCUMENT) { + add(decodeJsonElement()) + type = reader.readBsonType() + } + } + + reader.readEndArray() + return array + } +} + +internal class JsonBsonDecoderImpl( + reader: AbstractBsonReader, + serializersModule: SerializersModule, + configuration: BsonConfiguration +) : BsonDecoderImpl(reader, serializersModule, configuration), JsonBsonDecoder { + override val json = json() +} + +internal class JsonBsonArrayDecoder( + descriptor: SerialDescriptor, + reader: AbstractBsonReader, + serializersModule: SerializersModule, + configuration: BsonConfiguration +) : BsonArrayDecoder(descriptor, reader, serializersModule, configuration), JsonBsonDecoder { + override val json = json() +} + +internal class JsonBsonDocumentDecoder( + descriptor: SerialDescriptor, + reader: AbstractBsonReader, + serializersModule: SerializersModule, + configuration: BsonConfiguration +) : BsonDocumentDecoder(descriptor, reader, serializersModule, configuration), JsonBsonDecoder { + override val json = json() +} + +internal class JsonBsonPolymorphicDecoder( + descriptor: SerialDescriptor, + reader: AbstractBsonReader, + serializersModule: SerializersModule, + configuration: BsonConfiguration +) : BsonPolymorphicDecoder(descriptor, reader, serializersModule, configuration), JsonBsonDecoder { + override val json = json() +} + +internal class JsonBsonMapDecoder( + descriptor: SerialDescriptor, + reader: AbstractBsonReader, + serializersModule: SerializersModule, + configuration: BsonConfiguration +) : BsonMapDecoder(descriptor, reader, serializersModule, configuration), JsonBsonDecoder { + override val json = json() +} diff --git a/bson-kotlinx/src/main/kotlin/org/bson/codecs/kotlinx/JsonBsonEncoder.kt b/bson-kotlinx/src/main/kotlin/org/bson/codecs/kotlinx/JsonBsonEncoder.kt new file mode 100644 index 00000000000..4a754834e6d --- /dev/null +++ b/bson-kotlinx/src/main/kotlin/org/bson/codecs/kotlinx/JsonBsonEncoder.kt @@ -0,0 +1,134 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.bson.codecs.kotlinx + +import java.math.BigDecimal +import kotlinx.serialization.ExperimentalSerializationApi +import kotlinx.serialization.SerializationStrategy +import kotlinx.serialization.json.Json +import kotlinx.serialization.json.JsonArray +import kotlinx.serialization.json.JsonElement +import kotlinx.serialization.json.JsonEncoder +import kotlinx.serialization.json.JsonNull +import kotlinx.serialization.json.JsonObject +import kotlinx.serialization.json.JsonPrimitive +import kotlinx.serialization.json.double +import kotlinx.serialization.json.int +import kotlinx.serialization.json.long +import kotlinx.serialization.modules.SerializersModule +import org.bson.BsonWriter +import org.bson.codecs.kotlinx.utils.BsonCodecUtils.toJsonNamingStrategy +import org.bson.types.Decimal128 + +@OptIn(ExperimentalSerializationApi::class) +internal class JsonBsonEncoder( + writer: BsonWriter, + override val serializersModule: SerializersModule, + configuration: BsonConfiguration, +) : BsonEncoderImpl(writer, serializersModule, configuration), JsonEncoder { + + companion object { + private val DOUBLE_MIN_VALUE = BigDecimal.valueOf(Double.MIN_VALUE) + private val DOUBLE_MAX_VALUE = BigDecimal.valueOf(Double.MAX_VALUE) + private val INT_MIN_VALUE = BigDecimal.valueOf(Int.MIN_VALUE.toLong()) + private val INT_MAX_VALUE = BigDecimal.valueOf(Int.MAX_VALUE.toLong()) + private val LONG_MIN_VALUE = BigDecimal.valueOf(Long.MIN_VALUE) + private val LONG_MAX_VALUE = BigDecimal.valueOf(Long.MAX_VALUE) + } + + override val json = Json { + explicitNulls = configuration.explicitNulls + encodeDefaults = configuration.encodeDefaults + classDiscriminator = configuration.classDiscriminator + namingStrategy = configuration.bsonNamingStrategy.toJsonNamingStrategy() + serializersModule = this@JsonBsonEncoder.serializersModule + } + + override fun encodeSerializableValue(serializer: SerializationStrategy, value: T) { + if (value is JsonElement) encodeJsonElement(value) + else super.encodeSerializableValue(serializer, value) + } + + override fun encodeJsonElement(element: JsonElement) { + deferredElementHandler.with( + { + when (element) { + is JsonNull -> + if (configuration.explicitNulls) { + encodeName(it) + encodeNull() + } + is JsonPrimitive -> { + encodeName(it) + encodeJsonPrimitive(element) + } + is JsonObject -> { + encodeName(it) + encodeJsonObject(element) + } + is JsonArray -> { + encodeName(it) + encodeJsonArray(element) + } + } + }, + { + when (element) { + is JsonNull -> if (configuration.explicitNulls) encodeNull() + is JsonPrimitive -> encodeJsonPrimitive(element) + is JsonObject -> encodeJsonObject(element) + is JsonArray -> encodeJsonArray(element) + } + }) + } + + private fun encodeJsonPrimitive(primitive: JsonPrimitive) { + val content = primitive.content + when { + primitive.isString -> encodeString(content) + content == "true" || content == "false" -> encodeBoolean(content.toBooleanStrict()) + else -> { + val decimal = BigDecimal(content) + when { + decimal.scale() != 0 -> + if (DOUBLE_MIN_VALUE <= decimal && decimal <= DOUBLE_MAX_VALUE) { + encodeDouble(primitive.double) + } else { + writer.writeDecimal128(Decimal128(decimal)) + } + INT_MIN_VALUE <= decimal && decimal <= INT_MAX_VALUE -> encodeInt(primitive.int) + LONG_MIN_VALUE <= decimal && decimal <= LONG_MAX_VALUE -> encodeLong(primitive.long) + else -> writer.writeDecimal128(Decimal128(decimal)) + } + } + } + } + + private fun encodeJsonObject(obj: JsonObject) { + writer.writeStartDocument() + obj.forEach { k, v -> + deferredElementHandler.set(k) + encodeJsonElement(v) + } + writer.writeEndDocument() + } + + private fun encodeJsonArray(array: JsonArray) { + writer.writeStartArray() + array.forEach(::encodeJsonElement) + writer.writeEndArray() + } +} diff --git a/bson-kotlinx/src/main/kotlin/org/bson/codecs/kotlinx/KotlinSerializerCodec.kt b/bson-kotlinx/src/main/kotlin/org/bson/codecs/kotlinx/KotlinSerializerCodec.kt new file mode 100644 index 00000000000..0c7491b2278 --- /dev/null +++ b/bson-kotlinx/src/main/kotlin/org/bson/codecs/kotlinx/KotlinSerializerCodec.kt @@ -0,0 +1,186 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.bson.codecs.kotlinx + +import kotlin.reflect.KClass +import kotlin.reflect.full.findAnnotation +import kotlin.reflect.full.findAnnotations +import kotlin.reflect.full.hasAnnotation +import kotlin.reflect.full.primaryConstructor +import kotlinx.serialization.ExperimentalSerializationApi +import kotlinx.serialization.InternalSerializationApi +import kotlinx.serialization.KSerializer +import kotlinx.serialization.Serializable +import kotlinx.serialization.SerializationException +import kotlinx.serialization.modules.SerializersModule +import kotlinx.serialization.serializer +import org.bson.AbstractBsonReader +import org.bson.BsonReader +import org.bson.BsonWriter +import org.bson.codecs.Codec +import org.bson.codecs.DecoderContext +import org.bson.codecs.EncoderContext +import org.bson.codecs.configuration.CodecConfigurationException +import org.bson.codecs.kotlinx.utils.BsonCodecUtils.createBsonDecoder +import org.bson.codecs.kotlinx.utils.BsonCodecUtils.createBsonEncoder +import org.bson.codecs.pojo.annotations.BsonCreator +import org.bson.codecs.pojo.annotations.BsonDiscriminator +import org.bson.codecs.pojo.annotations.BsonExtraElements +import org.bson.codecs.pojo.annotations.BsonId +import org.bson.codecs.pojo.annotations.BsonIgnore +import org.bson.codecs.pojo.annotations.BsonProperty +import org.bson.codecs.pojo.annotations.BsonRepresentation + +/** + * The Kotlin serializer codec which utilizes the kotlinx serialization module. + * + * Use the [create] method to create the codec + */ +@OptIn(ExperimentalSerializationApi::class, InternalSerializationApi::class) +public class KotlinSerializerCodec +private constructor( + private val kClass: KClass, + private val serializer: KSerializer, + private val serializersModule: SerializersModule, + private val bsonConfiguration: BsonConfiguration +) : Codec { + + /** KotlinSerializerCodec companion object */ + public companion object { + + /** + * Creates a `Codec` for the kClass or returns null if there is no serializer available. + * + * @param T The codec type + * @param serializersModule the serializiers module to use + * @param bsonConfiguration the bson configuration for serializing + * @return the codec + */ + public inline fun create( + serializersModule: SerializersModule = defaultSerializersModule, + bsonConfiguration: BsonConfiguration = BsonConfiguration() + ): Codec? = create(T::class, serializersModule, bsonConfiguration) + + /** + * Creates a `Codec` for the kClass or returns null if there is no serializer available. + * + * @param T The codec type + * @param kClass the KClass for the codec + * @param serializersModule the serializiers module to use + * @param bsonConfiguration the bson configuration for serializing + * @return the codec + */ + @Suppress("SwallowedException") + public fun create( + kClass: KClass, + serializersModule: SerializersModule = defaultSerializersModule, + bsonConfiguration: BsonConfiguration = BsonConfiguration() + ): Codec? { + return if (kClass.hasAnnotation()) { + try { + create(kClass, kClass.serializer(), serializersModule, bsonConfiguration) + } catch (exception: SerializationException) { + null + } + } else { + null + } + } + + /** + * Creates a `Codec` for the kClass using the supplied serializer + * + * @param T The codec type + * @param kClass the KClass for the codec + * @param serializer the KSerializer to use + * @param serializersModule the serializiers module to use + * @param bsonConfiguration the bson configuration for serializing + * @return the codec + */ + public fun create( + kClass: KClass, + serializer: KSerializer, + serializersModule: SerializersModule, + bsonConfiguration: BsonConfiguration + ): Codec { + validateAnnotations(kClass) + return KotlinSerializerCodec(kClass, serializer, serializersModule, bsonConfiguration) + } + + private fun validateAnnotations(kClass: KClass) { + codecConfigurationRequires(kClass.findAnnotation() == null) { + """Annotation 'BsonDiscriminator' is not supported with kotlin serialization, + | but found on ${kClass.simpleName}. Use `BsonConfiguration` with `KotlinSerializerCodec.create` + | to configure a discriminator.""" + .trimMargin() + } + + codecConfigurationRequires(kClass.constructors.all { it.findAnnotations().isEmpty() }) { + """Annotation 'BsonCreator' is not supported with kotlin serialization, + | but found in ${kClass.simpleName}.""" + .trimMargin() + } + + kClass.primaryConstructor?.parameters?.map { param -> + codecConfigurationRequires(param.findAnnotations().isEmpty()) { + """Annotation 'BsonId' is not supported with kotlin serialization, + | found on the parameter for ${param.name}. Use `@SerialName("_id")` instead.""" + .trimMargin() + } + + codecConfigurationRequires(param.findAnnotations().isEmpty()) { + """Annotation 'BsonProperty' is not supported with kotlin serialization, + | found on the parameter for ${param.name}. Use `@SerialName` instead.""" + .trimMargin() + } + + codecConfigurationRequires(param.findAnnotations().isEmpty()) { + """Annotation 'BsonIgnore' is not supported with kotlinx serialization, + | found on the parameter for ${param.name}. Use `@Transient` annotation to ignore a property.""" + .trimMargin() + } + + codecConfigurationRequires(param.findAnnotations().isEmpty()) { + """Annotation 'BsonExtraElements' is not supported with kotlinx serialization, + | found on the parameter for ${param.name}.""" + .trimMargin() + } + + codecConfigurationRequires(param.findAnnotations().isEmpty()) { + """Annotation 'BsonRepresentation' is not supported with kotlinx serialization, + | found on the parameter for ${param.name}.""" + .trimMargin() + } + } + } + private fun codecConfigurationRequires(value: Boolean, lazyMessage: () -> String) { + if (!value) { + throw CodecConfigurationException(lazyMessage.invoke()) + } + } + } + + override fun encode(writer: BsonWriter, value: T, encoderContext: EncoderContext) { + serializer.serialize(createBsonEncoder(writer, serializersModule, bsonConfiguration), value) + } + + override fun getEncoderClass(): Class = kClass.java + + override fun decode(reader: BsonReader, decoderContext: DecoderContext): T { + require(reader is AbstractBsonReader) + return serializer.deserialize(createBsonDecoder(reader, serializersModule, bsonConfiguration)) + } +} diff --git a/bson-kotlinx/src/main/kotlin/org/bson/codecs/kotlinx/KotlinSerializerCodecProvider.kt b/bson-kotlinx/src/main/kotlin/org/bson/codecs/kotlinx/KotlinSerializerCodecProvider.kt new file mode 100644 index 00000000000..1ae5353dbaa --- /dev/null +++ b/bson-kotlinx/src/main/kotlin/org/bson/codecs/kotlinx/KotlinSerializerCodecProvider.kt @@ -0,0 +1,37 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.bson.codecs.kotlinx + +import kotlinx.serialization.ExperimentalSerializationApi +import kotlinx.serialization.modules.SerializersModule +import org.bson.codecs.Codec +import org.bson.codecs.configuration.CodecProvider +import org.bson.codecs.configuration.CodecRegistry + +/** + * A Kotlin Serialization based Codec Provider + * + * The underlying class must be annotated with the `@Serializable`. + */ +@OptIn(ExperimentalSerializationApi::class) +public class KotlinSerializerCodecProvider( + private val serializersModule: SerializersModule = defaultSerializersModule, + private val bsonConfiguration: BsonConfiguration = BsonConfiguration() +) : CodecProvider { + + override fun get(clazz: Class, registry: CodecRegistry): Codec? = + KotlinSerializerCodec.create(clazz.kotlin, serializersModule, bsonConfiguration) +} diff --git a/bson-kotlinx/src/main/kotlin/org/bson/codecs/kotlinx/utils/BsonCodecUtils.kt b/bson-kotlinx/src/main/kotlin/org/bson/codecs/kotlinx/utils/BsonCodecUtils.kt new file mode 100644 index 00000000000..daf6c7df6f9 --- /dev/null +++ b/bson-kotlinx/src/main/kotlin/org/bson/codecs/kotlinx/utils/BsonCodecUtils.kt @@ -0,0 +1,194 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.bson.codecs.kotlinx.utils + +import kotlinx.serialization.ExperimentalSerializationApi +import kotlinx.serialization.SerializationException +import kotlinx.serialization.descriptors.SerialDescriptor +import kotlinx.serialization.descriptors.elementNames +import kotlinx.serialization.json.JsonNamingStrategy +import kotlinx.serialization.modules.SerializersModule +import org.bson.AbstractBsonReader +import org.bson.BsonWriter +import org.bson.codecs.kotlinx.BsonArrayDecoder +import org.bson.codecs.kotlinx.BsonConfiguration +import org.bson.codecs.kotlinx.BsonDecoder +import org.bson.codecs.kotlinx.BsonDecoderImpl +import org.bson.codecs.kotlinx.BsonDocumentDecoder +import org.bson.codecs.kotlinx.BsonEncoder +import org.bson.codecs.kotlinx.BsonEncoderImpl +import org.bson.codecs.kotlinx.BsonMapDecoder +import org.bson.codecs.kotlinx.BsonNamingStrategy +import org.bson.codecs.kotlinx.BsonPolymorphicDecoder +import org.bson.codecs.kotlinx.JsonBsonArrayDecoder +import org.bson.codecs.kotlinx.JsonBsonDecoderImpl +import org.bson.codecs.kotlinx.JsonBsonDocumentDecoder +import org.bson.codecs.kotlinx.JsonBsonEncoder +import org.bson.codecs.kotlinx.JsonBsonMapDecoder +import org.bson.codecs.kotlinx.JsonBsonPolymorphicDecoder + +@ExperimentalSerializationApi +internal object BsonCodecUtils { + + @Suppress("SwallowedException") + private val hasJsonEncoder: Boolean by lazy { + try { + Class.forName("kotlinx.serialization.json.JsonEncoder") + true + } catch (e: ClassNotFoundException) { + false + } + } + + @Suppress("SwallowedException") + private val hasJsonDecoder: Boolean by lazy { + try { + Class.forName("kotlinx.serialization.json.JsonDecoder") + true + } catch (e: ClassNotFoundException) { + false + } + } + + private val cachedElementNamesByDescriptor: MutableMap> = mutableMapOf() + + internal fun createBsonEncoder( + writer: BsonWriter, + serializersModule: SerializersModule, + configuration: BsonConfiguration + ): BsonEncoder { + return if (hasJsonEncoder) JsonBsonEncoder(writer, serializersModule, configuration) + else BsonEncoderImpl(writer, serializersModule, configuration) + } + + internal fun createBsonDecoder( + reader: AbstractBsonReader, + serializersModule: SerializersModule, + configuration: BsonConfiguration + ): BsonDecoder { + return if (hasJsonDecoder) JsonBsonDecoderImpl(reader, serializersModule, configuration) + else BsonDecoderImpl(reader, serializersModule, configuration) + } + + internal fun createBsonArrayDecoder( + descriptor: SerialDescriptor, + reader: AbstractBsonReader, + serializersModule: SerializersModule, + configuration: BsonConfiguration + ): BsonArrayDecoder { + return if (hasJsonDecoder) JsonBsonArrayDecoder(descriptor, reader, serializersModule, configuration) + else BsonArrayDecoder(descriptor, reader, serializersModule, configuration) + } + + internal fun createBsonDocumentDecoder( + descriptor: SerialDescriptor, + reader: AbstractBsonReader, + serializersModule: SerializersModule, + configuration: BsonConfiguration + ): BsonDocumentDecoder { + return if (hasJsonDecoder) JsonBsonDocumentDecoder(descriptor, reader, serializersModule, configuration) + else BsonDocumentDecoder(descriptor, reader, serializersModule, configuration) + } + + internal fun createBsonPolymorphicDecoder( + descriptor: SerialDescriptor, + reader: AbstractBsonReader, + serializersModule: SerializersModule, + configuration: BsonConfiguration + ): BsonPolymorphicDecoder { + return if (hasJsonDecoder) JsonBsonPolymorphicDecoder(descriptor, reader, serializersModule, configuration) + else BsonPolymorphicDecoder(descriptor, reader, serializersModule, configuration) + } + + internal fun createBsonMapDecoder( + descriptor: SerialDescriptor, + reader: AbstractBsonReader, + serializersModule: SerializersModule, + configuration: BsonConfiguration + ): BsonMapDecoder { + return if (hasJsonDecoder) JsonBsonMapDecoder(descriptor, reader, serializersModule, configuration) + else BsonMapDecoder(descriptor, reader, serializersModule, configuration) + } + + internal fun cacheElementNamesByDescriptor(descriptor: SerialDescriptor, configuration: BsonConfiguration) { + val convertedNameMap = + when (configuration.bsonNamingStrategy) { + BsonNamingStrategy.SNAKE_CASE -> { + val snakeCasedNames = descriptor.elementNames.associateWith { name -> convertCamelCase(name, '_') } + + snakeCasedNames.entries + .groupBy { entry -> entry.value } + .filter { group -> group.value.size > 1 } + .entries + .fold(StringBuilder("")) { acc, group -> + val keys = group.value.joinToString(", ") { entry -> entry.key } + acc.append("$keys in ${descriptor.serialName} generate same name: ${group.key}.\n") + } + .toString() + .takeIf { it.trim().isNotEmpty() } + ?.let { errorMessage: String -> throw SerializationException(errorMessage) } + + snakeCasedNames.entries.associate { it.value to it.key } + } + else -> emptyMap() + } + + cachedElementNamesByDescriptor[descriptor.serialName] = convertedNameMap + } + + internal fun getCachedElementNamesByDescriptor(descriptor: SerialDescriptor): Map { + return cachedElementNamesByDescriptor[descriptor.serialName] ?: emptyMap() + } + + // https://github.com/Kotlin/kotlinx.serialization/blob/f9f160a680da9f92c3bb121ae3644c96e57ba42e/formats/json/commonMain/src/kotlinx/serialization/json/JsonNamingStrategy.kt#L142-L174 + internal fun convertCamelCase(value: String, delimiter: Char) = + buildString(value.length * 2) { + var bufferedChar: Char? = null + var previousUpperCharsCount = 0 + + value.forEach { c -> + if (c.isUpperCase()) { + if (previousUpperCharsCount == 0 && isNotEmpty() && last() != delimiter) append(delimiter) + + bufferedChar?.let(::append) + + previousUpperCharsCount++ + bufferedChar = c.lowercaseChar() + } else { + if (bufferedChar != null) { + if (previousUpperCharsCount > 1 && c.isLetter()) { + append(delimiter) + } + append(bufferedChar) + previousUpperCharsCount = 0 + bufferedChar = null + } + append(c) + } + } + + if (bufferedChar != null) { + append(bufferedChar) + } + } + + internal fun BsonNamingStrategy?.toJsonNamingStrategy(): JsonNamingStrategy? { + return when (this) { + BsonNamingStrategy.SNAKE_CASE -> JsonNamingStrategy.SnakeCase + else -> null + } + } +} diff --git a/bson-kotlinx/src/main/kotlin/org/bson/codecs/kotlinx/utils/SerializationModuleUtils.kt b/bson-kotlinx/src/main/kotlin/org/bson/codecs/kotlinx/utils/SerializationModuleUtils.kt new file mode 100644 index 00000000000..306644c81ad --- /dev/null +++ b/bson-kotlinx/src/main/kotlin/org/bson/codecs/kotlinx/utils/SerializationModuleUtils.kt @@ -0,0 +1,28 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.bson.codecs.kotlinx.utils + +internal object SerializationModuleUtils { + @Suppress("SwallowedException") + fun isClassAvailable(className: String): Boolean { + return try { + Class.forName(className) + true + } catch (e: ClassNotFoundException) { + false + } + } +} diff --git a/bson-kotlinx/src/test/kotlin/org/bson/codecs/kotlinx/KotlinSerializerCodecProviderTest.kt b/bson-kotlinx/src/test/kotlin/org/bson/codecs/kotlinx/KotlinSerializerCodecProviderTest.kt new file mode 100644 index 00000000000..5a912e7bb3a --- /dev/null +++ b/bson-kotlinx/src/test/kotlin/org/bson/codecs/kotlinx/KotlinSerializerCodecProviderTest.kt @@ -0,0 +1,149 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.bson.codecs.kotlinx + +import com.mongodb.MongoClientSettings +import kotlin.test.assertEquals +import kotlin.test.assertNotNull +import kotlin.test.assertNull +import kotlin.test.assertTrue +import kotlinx.serialization.ExperimentalSerializationApi +import kotlinx.serialization.modules.SerializersModule +import kotlinx.serialization.modules.plus +import kotlinx.serialization.modules.polymorphic +import kotlinx.serialization.modules.subclass +import org.bson.BsonDocument +import org.bson.BsonDocumentReader +import org.bson.BsonDocumentWriter +import org.bson.codecs.DecoderContext +import org.bson.codecs.EncoderContext +import org.bson.codecs.kotlinx.samples.DataClassContainsOpen +import org.bson.codecs.kotlinx.samples.DataClassOpen +import org.bson.codecs.kotlinx.samples.DataClassOpenA +import org.bson.codecs.kotlinx.samples.DataClassOpenB +import org.bson.codecs.kotlinx.samples.DataClassParameterized +import org.bson.codecs.kotlinx.samples.DataClassSealedInterface +import org.bson.codecs.kotlinx.samples.DataClassWithSimpleValues +import org.bson.codecs.kotlinx.samples.SealedInterface +import org.bson.conversions.Bson +import org.bson.json.JsonReader +import org.bson.types.ObjectId +import org.junit.jupiter.api.Test + +class KotlinSerializerCodecProviderTest { + + data class NotMarkedSerializable(val t: String) + + @Test + fun shouldReturnNullForNonSerializableClass() { + assertNull(KotlinSerializerCodecProvider().get(NotMarkedSerializable::class.java, Bson.DEFAULT_CODEC_REGISTRY)) + assertNull(KotlinSerializerCodecProvider().get(DoubleArray::class.java, Bson.DEFAULT_CODEC_REGISTRY)) + assertNull(KotlinSerializerCodecProvider().get(CharSequence::class.java, Bson.DEFAULT_CODEC_REGISTRY)) + } + + @Test + fun shouldReturnKotlinSerializerCodecForDataClass() { + val provider = KotlinSerializerCodecProvider() + val codec = provider.get(DataClassWithSimpleValues::class.java, Bson.DEFAULT_CODEC_REGISTRY) + + assertNotNull(codec) + assertTrue { codec is KotlinSerializerCodec } + assertEquals(DataClassWithSimpleValues::class.java, codec.encoderClass) + } + + @Test + fun shouldReturnNullFoRawParameterizedDataClass() { + val codec = KotlinSerializerCodecProvider().get(DataClassParameterized::class.java, Bson.DEFAULT_CODEC_REGISTRY) + assertNull(codec) + } + + @Test + fun shouldReturnKotlinSerializerCodecUsingDefaultRegistry() { + val codec = MongoClientSettings.getDefaultCodecRegistry().get(DataClassWithSimpleValues::class.java) + + assertNotNull(codec) + assertTrue { codec is KotlinSerializerCodec } + assertEquals(DataClassWithSimpleValues::class.java, codec.encoderClass) + } + + @Test + fun testDataClassWithSimpleValuesFieldOrdering() { + val codec = MongoClientSettings.getDefaultCodecRegistry().get(DataClassWithSimpleValues::class.java) + val expected = DataClassWithSimpleValues('c', 0, 1, 22, 42L, 4.0f, 4.2, true, "String") + + val numberLong = "\$numberLong" + val actual = + codec.decode( + JsonReader( + """{"boolean": true, "byte": 0, "char": "c", "double": 4.2, "float": 4.0, "int": 22, + |"long": {"$numberLong": "42"}, "short": 1, "string": "String"}""" + .trimMargin()), + DecoderContext.builder().build()) + + assertEquals(expected, actual) + } + + @Test + fun testDataClassSealedFieldOrdering() { + val codec = MongoClientSettings.getDefaultCodecRegistry().get(SealedInterface::class.java) + + val objectId = ObjectId("111111111111111111111111") + val oid = "\$oid" + val expected = DataClassSealedInterface(objectId, "string") + val actual = + codec.decode( + JsonReader( + """{"name": "string", "_id": {$oid: "${objectId.toHexString()}"}, + |"_t": "org.bson.codecs.kotlinx.samples.DataClassSealedInterface"}""" + .trimMargin()), + DecoderContext.builder().build()) + + assertEquals(expected, actual) + } + + @OptIn(ExperimentalSerializationApi::class) + @Test + fun shouldAllowOverridingOfSerializersModuleAndBsonConfigurationInConstructor() { + + val serializersModule = + SerializersModule { + this.polymorphic(DataClassOpen::class) { + this.subclass(DataClassOpenA::class) + this.subclass(DataClassOpenB::class) + } + } + defaultSerializersModule + + val bsonConfiguration = BsonConfiguration(classDiscriminator = "__type") + val dataClassContainsOpenB = DataClassContainsOpen(DataClassOpenB(1)) + + val codec = + KotlinSerializerCodecProvider(serializersModule, bsonConfiguration) + .get(DataClassContainsOpen::class.java, Bson.DEFAULT_CODEC_REGISTRY)!! + + assertTrue { codec is KotlinSerializerCodec } + val encodedDocument = BsonDocument() + val writer = BsonDocumentWriter(encodedDocument) + codec.encode(writer, dataClassContainsOpenB, EncoderContext.builder().build()) + writer.flush() + + assertEquals( + BsonDocument.parse("""{"open": {"__type": "org.bson.codecs.kotlinx.samples.DataClassOpenB", "b": 1}}"""), + encodedDocument) + + assertEquals( + dataClassContainsOpenB, codec.decode(BsonDocumentReader(encodedDocument), DecoderContext.builder().build())) + } +} diff --git a/bson-kotlinx/src/test/kotlin/org/bson/codecs/kotlinx/KotlinSerializerCodecTest.kt b/bson-kotlinx/src/test/kotlin/org/bson/codecs/kotlinx/KotlinSerializerCodecTest.kt new file mode 100644 index 00000000000..f9b3eb753c5 --- /dev/null +++ b/bson-kotlinx/src/test/kotlin/org/bson/codecs/kotlinx/KotlinSerializerCodecTest.kt @@ -0,0 +1,1227 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.bson.codecs.kotlinx + +import java.math.BigDecimal +import java.util.Base64 +import java.util.stream.Stream +import kotlin.test.assertEquals +import kotlinx.datetime.Instant +import kotlinx.datetime.LocalDate +import kotlinx.datetime.LocalDateTime +import kotlinx.datetime.LocalTime +import kotlinx.serialization.ExperimentalSerializationApi +import kotlinx.serialization.MissingFieldException +import kotlinx.serialization.SerializationException +import kotlinx.serialization.json.JsonPrimitive +import kotlinx.serialization.json.buildJsonArray +import kotlinx.serialization.json.buildJsonObject +import kotlinx.serialization.json.put +import kotlinx.serialization.modules.SerializersModule +import kotlinx.serialization.modules.plus +import kotlinx.serialization.modules.polymorphic +import kotlinx.serialization.modules.subclass +import org.bson.BsonBoolean +import org.bson.BsonDocument +import org.bson.BsonDocumentReader +import org.bson.BsonDocumentWriter +import org.bson.BsonDouble +import org.bson.BsonInt32 +import org.bson.BsonInt64 +import org.bson.BsonInvalidOperationException +import org.bson.BsonMaxKey +import org.bson.BsonMinKey +import org.bson.BsonString +import org.bson.BsonUndefined +import org.bson.codecs.DecoderContext +import org.bson.codecs.EncoderContext +import org.bson.codecs.configuration.CodecConfigurationException +import org.bson.codecs.kotlinx.samples.Box +import org.bson.codecs.kotlinx.samples.DataClassBsonValues +import org.bson.codecs.kotlinx.samples.DataClassContainsOpen +import org.bson.codecs.kotlinx.samples.DataClassContainsValueClass +import org.bson.codecs.kotlinx.samples.DataClassEmbedded +import org.bson.codecs.kotlinx.samples.DataClassKey +import org.bson.codecs.kotlinx.samples.DataClassLastItemDefaultsToNull +import org.bson.codecs.kotlinx.samples.DataClassListOfDataClasses +import org.bson.codecs.kotlinx.samples.DataClassListOfListOfDataClasses +import org.bson.codecs.kotlinx.samples.DataClassListOfSealed +import org.bson.codecs.kotlinx.samples.DataClassMapOfDataClasses +import org.bson.codecs.kotlinx.samples.DataClassMapOfListOfDataClasses +import org.bson.codecs.kotlinx.samples.DataClassNestedParameterizedTypes +import org.bson.codecs.kotlinx.samples.DataClassOpen +import org.bson.codecs.kotlinx.samples.DataClassOpenA +import org.bson.codecs.kotlinx.samples.DataClassOpenB +import org.bson.codecs.kotlinx.samples.DataClassOptionalBsonValues +import org.bson.codecs.kotlinx.samples.DataClassParameterized +import org.bson.codecs.kotlinx.samples.DataClassSealed +import org.bson.codecs.kotlinx.samples.DataClassSealedA +import org.bson.codecs.kotlinx.samples.DataClassSealedB +import org.bson.codecs.kotlinx.samples.DataClassSealedC +import org.bson.codecs.kotlinx.samples.DataClassSelfReferential +import org.bson.codecs.kotlinx.samples.DataClassWithAnnotations +import org.bson.codecs.kotlinx.samples.DataClassWithBooleanMapKey +import org.bson.codecs.kotlinx.samples.DataClassWithBsonConstructor +import org.bson.codecs.kotlinx.samples.DataClassWithBsonDiscriminator +import org.bson.codecs.kotlinx.samples.DataClassWithBsonExtraElements +import org.bson.codecs.kotlinx.samples.DataClassWithBsonId +import org.bson.codecs.kotlinx.samples.DataClassWithBsonIgnore +import org.bson.codecs.kotlinx.samples.DataClassWithBsonProperty +import org.bson.codecs.kotlinx.samples.DataClassWithBsonRepresentation +import org.bson.codecs.kotlinx.samples.DataClassWithCamelCase +import org.bson.codecs.kotlinx.samples.DataClassWithCollections +import org.bson.codecs.kotlinx.samples.DataClassWithContextualDateValues +import org.bson.codecs.kotlinx.samples.DataClassWithDataClassMapKey +import org.bson.codecs.kotlinx.samples.DataClassWithDateValues +import org.bson.codecs.kotlinx.samples.DataClassWithDefaults +import org.bson.codecs.kotlinx.samples.DataClassWithEmbedded +import org.bson.codecs.kotlinx.samples.DataClassWithEncodeDefault +import org.bson.codecs.kotlinx.samples.DataClassWithEnum +import org.bson.codecs.kotlinx.samples.DataClassWithEnumMapKey +import org.bson.codecs.kotlinx.samples.DataClassWithFailingInit +import org.bson.codecs.kotlinx.samples.DataClassWithJsonElement +import org.bson.codecs.kotlinx.samples.DataClassWithJsonElements +import org.bson.codecs.kotlinx.samples.DataClassWithJsonElementsNullable +import org.bson.codecs.kotlinx.samples.DataClassWithKotlinAllowedName +import org.bson.codecs.kotlinx.samples.DataClassWithListThatLastItemDefaultsToNull +import org.bson.codecs.kotlinx.samples.DataClassWithMutableList +import org.bson.codecs.kotlinx.samples.DataClassWithMutableMap +import org.bson.codecs.kotlinx.samples.DataClassWithMutableSet +import org.bson.codecs.kotlinx.samples.DataClassWithNestedParameterized +import org.bson.codecs.kotlinx.samples.DataClassWithNestedParameterizedDataClass +import org.bson.codecs.kotlinx.samples.DataClassWithNullableGeneric +import org.bson.codecs.kotlinx.samples.DataClassWithNulls +import org.bson.codecs.kotlinx.samples.DataClassWithPair +import org.bson.codecs.kotlinx.samples.DataClassWithParameterizedDataClass +import org.bson.codecs.kotlinx.samples.DataClassWithRequired +import org.bson.codecs.kotlinx.samples.DataClassWithSameSnakeCaseName +import org.bson.codecs.kotlinx.samples.DataClassWithSequence +import org.bson.codecs.kotlinx.samples.DataClassWithSimpleValues +import org.bson.codecs.kotlinx.samples.DataClassWithTriple +import org.bson.codecs.kotlinx.samples.Key +import org.bson.codecs.kotlinx.samples.SealedInterface +import org.bson.codecs.kotlinx.samples.ValueClass +import org.bson.json.JsonMode +import org.bson.json.JsonWriterSettings +import org.junit.jupiter.api.Test +import org.junit.jupiter.api.assertThrows +import org.junit.jupiter.params.ParameterizedTest +import org.junit.jupiter.params.provider.MethodSource + +@OptIn(ExperimentalSerializationApi::class) +@Suppress("LargeClass") +class KotlinSerializerCodecTest { + private val oid = "\$oid" + private val numberLong = "\$numberLong" + private val numberDecimal = "\$numberDecimal" + private val emptyDocument = "{}" + private val altConfiguration = + BsonConfiguration(encodeDefaults = false, classDiscriminator = "_t", explicitNulls = true) + + private val allBsonTypesJson = + """{ + | "id": {"$oid": "111111111111111111111111"}, + | "arrayEmpty": [], + | "arraySimple": [{"${'$'}numberInt": "1"}, {"${'$'}numberInt": "2"}, {"${'$'}numberInt": "3"}], + | "arrayComplex": [{"a": {"${'$'}numberInt": "1"}}, {"a": {"${'$'}numberInt": "2"}}], + | "arrayMixedTypes": [{"${'$'}numberInt": "1"}, {"${'$'}numberInt": "2"}, true, + | [{"${'$'}numberInt": "1"}, {"${'$'}numberInt": "2"}, {"${'$'}numberInt": "3"}], + | {"a": {"${'$'}numberInt": "2"}}], + | "arrayComplexMixedTypes": [{"a": {"${'$'}numberInt": "1"}}, {"a": "a"}], + | "binary": {"${'$'}binary": {"base64": "S2Fma2Egcm9ja3Mh", "subType": "00"}}, + | "boolean": true, + | "code": {"${'$'}code": "int i = 0;"}, + | "codeWithScope": {"${'$'}code": "int x = y", "${'$'}scope": {"y": 1}}, + | "dateTime": {"${'$'}date": {"${'$'}numberLong": "1577836801000"}}, + | "decimal128": {"${'$'}numberDecimal": "1.0"}, + | "documentEmpty": {}, + | "document": {"a": {"${'$'}numberInt": "1"}}, + | "double": {"${'$'}numberDouble": "62.0"}, + | "int32": {"${'$'}numberInt": "42"}, + | "int64": {"${'$'}numberLong": "52"}, + | "maxKey": {"${'$'}maxKey": 1}, + | "minKey": {"${'$'}minKey": 1}, + | "objectId": {"${'$'}oid": "211111111111111111111112"}, + | "regex": {"${'$'}regularExpression": {"pattern": "^test.*regex.*xyz$", "options": "i"}}, + | "string": "the fox ...", + | "symbol": {"${'$'}symbol": "ruby stuff"}, + | "timestamp": {"${'$'}timestamp": {"t": 305419896, "i": 5}}, + | "undefined": {"${'$'}undefined": true} + | }""" + .trimMargin() + + private val allBsonTypesDocument = BsonDocument.parse(allBsonTypesJson) + private val jsonAllSupportedTypesDocument: BsonDocument by + lazy { + val doc = BsonDocument.parse(allBsonTypesJson) + listOf("minKey", "maxKey", "code", "codeWithScope", "regex", "symbol", "undefined").forEach { + doc.remove(it) + } + doc + } + + companion object { + @JvmStatic + fun testTypesCastingDataClassWithSimpleValues(): Stream { + return Stream.of( + BsonDocument() + .append("char", BsonString("c")) + .append("byte", BsonInt32(1)) + .append("short", BsonInt32(2)) + .append("int", BsonInt32(10)) + .append("long", BsonInt32(10)) + .append("float", BsonInt32(2)) + .append("double", BsonInt32(3)) + .append("boolean", BsonBoolean.TRUE) + .append("string", BsonString("String")), + BsonDocument() + .append("char", BsonString("c")) + .append("byte", BsonDouble(1.0)) + .append("short", BsonDouble(2.0)) + .append("int", BsonDouble(9.9999999999999992)) + .append("long", BsonDouble(9.9999999999999992)) + .append("float", BsonDouble(2.0)) + .append("double", BsonDouble(3.0)) + .append("boolean", BsonBoolean.TRUE) + .append("string", BsonString("String")), + BsonDocument() + .append("char", BsonString("c")) + .append("byte", BsonDouble(1.0)) + .append("short", BsonDouble(2.0)) + .append("int", BsonDouble(10.0)) + .append("long", BsonDouble(10.0)) + .append("float", BsonDouble(2.0)) + .append("double", BsonDouble(3.0)) + .append("boolean", BsonBoolean.TRUE) + .append("string", BsonString("String")), + BsonDocument() + .append("char", BsonString("c")) + .append("byte", BsonInt64(1)) + .append("short", BsonInt64(2)) + .append("int", BsonInt64(10)) + .append("long", BsonInt64(10)) + .append("float", BsonInt64(2)) + .append("double", BsonInt64(3)) + .append("boolean", BsonBoolean.TRUE) + .append("string", BsonString("String"))) + } + } + + @ParameterizedTest + @MethodSource("testTypesCastingDataClassWithSimpleValues") + fun testTypesCastingDataClassWithSimpleValues(data: BsonDocument) { + val expectedDataClass = DataClassWithSimpleValues('c', 1, 2, 10, 10L, 2.0f, 3.0, true, "String") + + assertDecodesTo(data, expectedDataClass) + } + + @Test + fun testDataClassWithDateValuesContextualSerialization() { + val expected = + "{\n" + + " \"instant\": {\"\$date\": \"2001-09-09T01:46:40Z\"}, \n" + + " \"localTime\": {\"\$date\": \"1970-01-01T00:00:10Z\"}, \n" + + " \"localDateTime\": {\"\$date\": \"2021-01-01T00:00:04Z\"}, \n" + + " \"localDate\": {\"\$date\": \"1970-10-28T00:00:00Z\"}\n" + + "}".trimMargin() + + val expectedDataClass = + DataClassWithContextualDateValues( + Instant.fromEpochMilliseconds(10_000_000_000_00), + LocalTime.fromMillisecondOfDay(10_000), + LocalDateTime.parse("2021-01-01T00:00:04"), + LocalDate.fromEpochDays(300)) + + assertRoundTrips(expected, expectedDataClass) + } + + @Test + fun testDataClassWithDateValuesStandard() { + val expected = + "{\n" + + " \"instant\": \"1970-01-01T00:00:01Z\", \n" + + " \"localTime\": \"00:00:01\", \n" + + " \"localDateTime\": \"2021-01-01T00:00:04\", \n" + + " \"localDate\": \"1970-01-02\"\n" + + "}".trimMargin() + + val expectedDataClass = + DataClassWithDateValues( + Instant.fromEpochMilliseconds(1000), + LocalTime.fromMillisecondOfDay(1000), + LocalDateTime.parse("2021-01-01T00:00:04"), + LocalDate.fromEpochDays(1)) + + assertRoundTrips(expected, expectedDataClass) + } + + @Test + fun testDataClassWithComplexTypes() { + val expected = + """{ + | "listSimple": ["a", "b", "c", "d"], + | "listList": [["a", "b"], [], ["c", "d"]], + | "listMap": [{"a": 1, "b": 2}, {}, {"c": 3, "d": 4}], + | "mapSimple": {"a": 1, "b": 2, "c": 3, "d": 4}, + | "mapList": {"a": ["a", "b"], "b": [], "c": ["c", "d"]}, + | "mapMap" : {"a": {"a": 1, "b": 2}, "b": {}, "c": {"c": 3, "d": 4}} + |}""" + .trimMargin() + + val dataClass = + DataClassWithCollections( + listOf("a", "b", "c", "d"), + listOf(listOf("a", "b"), emptyList(), listOf("c", "d")), + listOf(mapOf("a" to 1, "b" to 2), emptyMap(), mapOf("c" to 3, "d" to 4)), + mapOf("a" to 1, "b" to 2, "c" to 3, "d" to 4), + mapOf("a" to listOf("a", "b"), "b" to emptyList(), "c" to listOf("c", "d")), + mapOf("a" to mapOf("a" to 1, "b" to 2), "b" to emptyMap(), "c" to mapOf("c" to 3, "d" to 4))) + + assertRoundTrips(expected, dataClass) + } + + @Test + fun testDataClassWithDefaults() { + val expectedDefault = + """{ + | "boolean": false, + | "string": "String", + | "listSimple": ["a", "b", "c"] + |}""" + .trimMargin() + + val defaultDataClass = DataClassWithDefaults() + assertRoundTrips(expectedDefault, defaultDataClass) + assertRoundTrips(emptyDocument, defaultDataClass, altConfiguration) + + val expectedSomeOverrides = """{"boolean": true, "listSimple": ["a"]}""" + val someOverridesDataClass = DataClassWithDefaults(boolean = true, listSimple = listOf("a")) + assertRoundTrips(expectedSomeOverrides, someOverridesDataClass, altConfiguration) + } + + @Test + fun testDataClassWithNulls() { + val expectedNulls = + """{ + | "boolean": null, + | "string": null, + | "listSimple": null + |}""" + .trimMargin() + + val dataClass = DataClassWithNulls(null, null, null) + assertRoundTrips(emptyDocument, dataClass) + assertRoundTrips(expectedNulls, dataClass, altConfiguration) + } + + @Test + fun testDataClassWithListThatLastItemDefaultsToNull() { + val expectedWithOutNulls = + """{ + | "elements": [{"required": "required"}, {"required": "required"}], + |}""" + .trimMargin() + + val dataClass = + DataClassWithListThatLastItemDefaultsToNull( + listOf(DataClassLastItemDefaultsToNull("required"), DataClassLastItemDefaultsToNull("required"))) + assertRoundTrips(expectedWithOutNulls, dataClass) + + val expectedWithNulls = + """{ + | "elements": [{"required": "required", "optional": null}, {"required": "required", "optional": null}], + |}""" + .trimMargin() + assertRoundTrips(expectedWithNulls, dataClass, BsonConfiguration(explicitNulls = true)) + } + + @Test + fun testDataClassWithNullableGenericsNotNull() { + val expected = + """{ + | "box": {"boxed": "String"} + |}""" + .trimMargin() + + val dataClass = DataClassWithNullableGeneric(Box("String")) + assertRoundTrips(expected, dataClass) + } + + @Test + fun testDataClassWithNullableGenericsNull() { + val expectedDefault = """{"box": {}}""" + val dataClass = DataClassWithNullableGeneric(Box(null)) + assertRoundTrips(expectedDefault, dataClass) + val expectedNull = """{"box": {"boxed": null}}""" + assertRoundTrips(expectedNull, dataClass, altConfiguration) + } + + @Test + fun testDataClassSelfReferential() { + val expected = + """{"name": "tree", + | "left": {"name": "L", "left": {"name": "LL"}, "right": {"name": "LR"}}, + | "right": {"name": "R", + | "left": {"name": "RL", + | "left": {"name": "RLL"}, + | "right": {"name": "RLR"}}, + | "right": {"name": "RR"}} + |}""" + .trimMargin() + val dataClass = + DataClassSelfReferential( + "tree", + DataClassSelfReferential("L", DataClassSelfReferential("LL"), DataClassSelfReferential("LR")), + DataClassSelfReferential( + "R", + DataClassSelfReferential("RL", DataClassSelfReferential("RLL"), DataClassSelfReferential("RLR")), + DataClassSelfReferential("RR"))) + + assertRoundTrips(expected, dataClass) + } + + @Test + fun testDataClassWithEmbedded() { + val expected = """{"id": "myId", "embedded": {"name": "embedded1"}}""" + val dataClass = DataClassWithEmbedded("myId", DataClassEmbedded("embedded1")) + + assertRoundTrips(expected, dataClass) + } + + @Test + fun testDataClassListOfDataClasses() { + val expected = """{"id": "myId", "nested": [{"name": "embedded1"}, {"name": "embedded2"}]}""" + val dataClass = + DataClassListOfDataClasses("myId", listOf(DataClassEmbedded("embedded1"), DataClassEmbedded("embedded2"))) + + assertRoundTrips(expected, dataClass) + } + + @Test + fun testDataClassListOfListOfDataClasses() { + val expected = """{"id": "myId", "nested": [[{"name": "embedded1"}], [{"name": "embedded2"}]]}""" + val dataClass = + DataClassListOfListOfDataClasses( + "myId", listOf(listOf(DataClassEmbedded("embedded1")), listOf(DataClassEmbedded("embedded2")))) + + assertRoundTrips(expected, dataClass) + } + + @Test + fun testDataClassMapOfDataClasses() { + val expected = """{"id": "myId", "nested": {"first": {"name": "embedded1"}, "second": {"name": "embedded2"}}}""" + val dataClass = + DataClassMapOfDataClasses( + "myId", mapOf("first" to DataClassEmbedded("embedded1"), "second" to DataClassEmbedded("embedded2"))) + + assertRoundTrips(expected, dataClass) + } + + @Test + fun testDataClassMapOfListOfDataClasses() { + val expected = + """{"id": "myId", "nested": {"first": [{"name": "embedded1"}], "second": [{"name": "embedded2"}]}}""" + val dataClass = + DataClassMapOfListOfDataClasses( + "myId", + mapOf( + "first" to listOf(DataClassEmbedded("embedded1")), + "second" to listOf(DataClassEmbedded("embedded2")))) + + assertRoundTrips(expected, dataClass) + } + + @Test + fun testDataClassWithParameterizedDataClass() { + val expected = + """{"id": "myId", + | "parameterizedDataClass": {"number": 2.0, "string": "myString", + | "parameterizedList": [{"name": "embedded1"}]} + |}""" + .trimMargin() + val dataClass = + DataClassWithParameterizedDataClass( + "myId", DataClassParameterized(2.0, "myString", listOf(DataClassEmbedded("embedded1")))) + + assertRoundTrips(expected, dataClass) + } + + @Test + fun testDataClassWithNestedParameterizedDataClass() { + val expected = + """{"id": "myId", + |"nestedParameterized": { + | "parameterizedDataClass": + | {"number": 4.2, "string": "myString", "parameterizedList": [{"name": "embedded1"}]}, + | "other": "myOtherString", "optionalOther": "myOptionalOtherString" + | } + |}""" + .trimMargin() + val dataClass = + DataClassWithNestedParameterizedDataClass( + "myId", + DataClassWithNestedParameterized( + DataClassParameterized(4.2, "myString", listOf(DataClassEmbedded("embedded1"))), + "myOtherString", + "myOptionalOtherString")) + + assertRoundTrips(expected, dataClass) + } + + @Test + fun testDataClassWithPair() { + val expected = """{"pair": {"first": "a", "second": 1}}""" + val dataClass = DataClassWithPair("a" to 1) + + assertRoundTrips(expected, dataClass) + } + + @Test + fun testDataClassWithTriple() { + val expected = """{"triple": {"first": "a", "second": 1, "third": 2.1}}""" + val dataClass = DataClassWithTriple(Triple("a", 1, 2.1)) + + assertRoundTrips(expected, dataClass) + } + + @Test + fun testDataClassNestedParameterizedTypes() { + val expected = + """{ + |"triple": { + | "first": "0", + | "second": {"first": 1, "second": {"first": 1.2, "second": {"first": "1.3", "second": 1.3}}}, + | "third": {"first": 2, "second": {"first": 2.1, "second": "two dot two"}, + | "third": {"first": "3.1", "second": {"first": 3.2, "second": "three dot two" }, + | "third": 3.3 }} + | } + |}""" + .trimMargin() + val dataClass = + DataClassNestedParameterizedTypes( + Triple( + "0", + Pair(1, Pair(1.2, Pair("1.3", 1.3))), + Triple(2, Pair(2.1, "two dot two"), Triple("3.1", Pair(3.2, "three dot two"), 3.3)))) + + assertRoundTrips(expected, dataClass) + } + + @Test + fun testDataClassWithMutableList() { + val expected = """{"value": ["A", "B", "C"]}""" + val dataClass = DataClassWithMutableList(mutableListOf("A", "B", "C")) + + assertRoundTrips(expected, dataClass) + } + + @Test + fun testDataClassWithMutableSet() { + val expected = """{"value": ["A", "B", "C"]}""" + val dataClass = DataClassWithMutableSet(mutableSetOf("A", "B", "C")) + + assertRoundTrips(expected, dataClass) + } + + @Test + fun testDataClassWithMutableMap() { + val expected = """{"value": {"a": "A", "b": "B", "c": "C"}}""" + val dataClass = DataClassWithMutableMap(mutableMapOf("a" to "A", "b" to "B", "c" to "C")) + + assertRoundTrips(expected, dataClass) + } + + @Test + fun testDataClassWithAnnotations() { + val expected = """{"_id": "id", "nom": "name", "string": "string"}""" + val dataClass = DataClassWithAnnotations("id", "name", "string") + + assertRoundTrips(expected, dataClass) + } + + @Test + fun testDataClassWithEncodeDefault() { + val expectedDefault = + """{ + | "boolean": false, + | "listSimple": ["a", "b", "c"] + |}""" + .trimMargin() + + val defaultDataClass = DataClassWithEncodeDefault() + assertRoundTrips(expectedDefault, defaultDataClass) + assertRoundTrips("""{"listSimple": ["a", "b", "c"]}""", defaultDataClass, altConfiguration) + + val expectedSomeOverrides = """{"string": "STRING", "listSimple": ["a"]}""" + val someOverridesDataClass = DataClassWithEncodeDefault(string = "STRING", listSimple = listOf("a")) + assertRoundTrips(expectedSomeOverrides, someOverridesDataClass, altConfiguration) + } + + @Test + fun testDataClassWithRequired() { + val expectedDefault = + """{ + | "boolean": false, + | "string": "String", + | "listSimple": ["a", "b", "c"] + |}""" + .trimMargin() + + val defaultDataClass = DataClassWithRequired() + assertRoundTrips(expectedDefault, defaultDataClass) + + assertThrows { deserialize(BsonDocument()) } + } + + @Test + fun testDataClassWithEnum() { + val expected = """{"value": "A"}""" + + val dataClass = DataClassWithEnum(Key.A) + assertRoundTrips(expected, dataClass) + } + + @Test + fun testDataClassWithEnumKeyMap() { + val expected = """{"map": {"A": true, "B": false}}""" + + val dataClass = DataClassWithEnumMapKey(mapOf(Key.A to true, Key.B to false)) + assertRoundTrips(expected, dataClass) + } + + @Test + fun testDataClassWithSequence() { + val dataClass = DataClassWithSequence(listOf("A", "B", "C").asSequence()) + assertThrows { serialize(dataClass) } + } + + @Test + fun testDataClassWithBooleanKeyMap() { + val dataClass = DataClassWithBooleanMapKey(mapOf(true to true, false to false)) + assertThrows { serialize(dataClass) } + assertThrows { + deserialize(BsonDocument.parse("""{"map": {"true": true}}""")) + } + } + + @Test + fun testDataClassWithDataClassKeyMap() { + val dataClass = DataClassWithDataClassMapKey(mapOf(DataClassKey("A") to true, DataClassKey("A") to false)) + assertThrows { serialize(dataClass) } + assertThrows { + deserialize(BsonDocument.parse("""{"map": {"A": true}}""")) + } + } + + @Test + fun testDataClassEmbeddedWithExtraData() { + val expected = + """{ + | "extraA": "extraA", + | "name": "NAME", + | "extraB": "extraB" + |}""" + .trimMargin() + + val dataClass = DataClassEmbedded("NAME") + assertDecodesTo(BsonDocument.parse(expected), dataClass) + } + + @Test + fun testDataClassBsonValues() { + + val dataClass = + DataClassBsonValues( + allBsonTypesDocument["id"]!!.asObjectId().value, + allBsonTypesDocument["arrayEmpty"]!!.asArray(), + allBsonTypesDocument["arraySimple"]!!.asArray(), + allBsonTypesDocument["arrayComplex"]!!.asArray(), + allBsonTypesDocument["arrayMixedTypes"]!!.asArray(), + allBsonTypesDocument["arrayComplexMixedTypes"]!!.asArray(), + allBsonTypesDocument["binary"]!!.asBinary(), + allBsonTypesDocument["boolean"]!!.asBoolean(), + allBsonTypesDocument["code"]!!.asJavaScript(), + allBsonTypesDocument["codeWithScope"]!!.asJavaScriptWithScope(), + allBsonTypesDocument["dateTime"]!!.asDateTime(), + allBsonTypesDocument["decimal128"]!!.asDecimal128(), + allBsonTypesDocument["documentEmpty"]!!.asDocument(), + allBsonTypesDocument["document"]!!.asDocument(), + allBsonTypesDocument["double"]!!.asDouble(), + allBsonTypesDocument["int32"]!!.asInt32(), + allBsonTypesDocument["int64"]!!.asInt64(), + allBsonTypesDocument["maxKey"]!! as BsonMaxKey, + allBsonTypesDocument["minKey"]!! as BsonMinKey, + allBsonTypesDocument["objectId"]!!.asObjectId(), + allBsonTypesDocument["regex"]!!.asRegularExpression(), + allBsonTypesDocument["string"]!!.asString(), + allBsonTypesDocument["symbol"]!!.asSymbol(), + allBsonTypesDocument["timestamp"]!!.asTimestamp(), + allBsonTypesDocument["undefined"]!! as BsonUndefined) + + assertRoundTrips(allBsonTypesJson, dataClass) + } + + @Test + fun testDataClassOptionalBsonValues() { + val dataClass = + DataClassOptionalBsonValues( + allBsonTypesDocument["id"]!!.asObjectId().value, + allBsonTypesDocument["arrayEmpty"]!!.asArray(), + allBsonTypesDocument["arraySimple"]!!.asArray(), + allBsonTypesDocument["arrayComplex"]!!.asArray(), + allBsonTypesDocument["arrayMixedTypes"]!!.asArray(), + allBsonTypesDocument["arrayComplexMixedTypes"]!!.asArray(), + allBsonTypesDocument["binary"]!!.asBinary(), + allBsonTypesDocument["boolean"]!!.asBoolean(), + allBsonTypesDocument["code"]!!.asJavaScript(), + allBsonTypesDocument["codeWithScope"]!!.asJavaScriptWithScope(), + allBsonTypesDocument["dateTime"]!!.asDateTime(), + allBsonTypesDocument["decimal128"]!!.asDecimal128(), + allBsonTypesDocument["documentEmpty"]!!.asDocument(), + allBsonTypesDocument["document"]!!.asDocument(), + allBsonTypesDocument["double"]!!.asDouble(), + allBsonTypesDocument["int32"]!!.asInt32(), + allBsonTypesDocument["int64"]!!.asInt64(), + allBsonTypesDocument["maxKey"]!! as BsonMaxKey, + allBsonTypesDocument["minKey"]!! as BsonMinKey, + allBsonTypesDocument["objectId"]!!.asObjectId(), + allBsonTypesDocument["regex"]!!.asRegularExpression(), + allBsonTypesDocument["string"]!!.asString(), + allBsonTypesDocument["symbol"]!!.asSymbol(), + allBsonTypesDocument["timestamp"]!!.asTimestamp(), + allBsonTypesDocument["undefined"]!! as BsonUndefined) + + assertRoundTrips(allBsonTypesJson, dataClass) + + val emptyDataClass = + DataClassOptionalBsonValues( + null, + null, + null, + null, + null, + null, + null, + null, + null, + null, + null, + null, + null, + null, + null, + null, + null, + null, + null, + null, + null, + null, + null, + null, + null) + + assertRoundTrips("{}", emptyDataClass) + assertRoundTrips( + """{ "id": null, "arrayEmpty": null, "arraySimple": null, "arrayComplex": null, "arrayMixedTypes": null, + | "arrayComplexMixedTypes": null, "binary": null, "boolean": null, "code": null, "codeWithScope": null, + | "dateTime": null, "decimal128": null, "documentEmpty": null, "document": null, "double": null, + | "int32": null, "int64": null, "maxKey": null, "minKey": null, "objectId": null, "regex": null, + | "string": null, "symbol": null, "timestamp": null, "undefined": null }""" + .trimMargin(), + emptyDataClass, + BsonConfiguration(explicitNulls = true)) + } + + @Test + fun testDataClassSealed() { + val expectedA = """{"a": "string"}""" + val dataClassA = DataClassSealedA("string") + assertRoundTrips(expectedA, dataClassA) + + val expectedB = """{"b": 1}""" + val dataClassB = DataClassSealedB(1) + assertRoundTrips(expectedB, dataClassB) + + val expectedC = """{"c": "String"}""" + val dataClassC = DataClassSealedC("String") + assertRoundTrips(expectedC, dataClassC) + + val expectedDataClassSealedA = """{"_t": "org.bson.codecs.kotlinx.samples.DataClassSealedA", "a": "string"}""" + val dataClassSealedA = DataClassSealedA("string") as DataClassSealed + assertRoundTrips(expectedDataClassSealedA, dataClassSealedA) + + val expectedDataClassSealedB = """{"_t": "org.bson.codecs.kotlinx.samples.DataClassSealedB", "b": 1}""" + val dataClassSealedB = DataClassSealedB(1) as DataClassSealed + assertRoundTrips(expectedDataClassSealedB, dataClassSealedB) + + val expectedDataClassSealedC = """{"_t": "C", "c": "String"}""" + val dataClassSealedC = DataClassSealedC("String") as DataClassSealed + assertRoundTrips(expectedDataClassSealedC, dataClassSealedC) + + val dataClassListOfSealed = DataClassListOfSealed(listOf(dataClassA, dataClassB, dataClassC)) + val expectedListOfSealed = + """{"items": [$expectedDataClassSealedA, $expectedDataClassSealedB, $expectedDataClassSealedC]}""" + assertRoundTrips(expectedListOfSealed, dataClassListOfSealed) + + val expectedListOfSealedDiscriminator = expectedListOfSealed.replace("_t", "#class") + assertRoundTrips( + expectedListOfSealedDiscriminator, dataClassListOfSealed, BsonConfiguration(classDiscriminator = "#class")) + } + + @Test + fun testDataClassOpen() { + val expectedA = """{"a": "string"}""" + val dataClassA = DataClassOpenA("string") + assertRoundTrips(expectedA, dataClassA) + + val expectedB = """{"b": 1}""" + val dataClassB = DataClassOpenB(1) + assertRoundTrips(expectedB, dataClassB) + + val serializersModule = + SerializersModule { + this.polymorphic(DataClassOpen::class) { + this.subclass(DataClassOpenA::class) + this.subclass(DataClassOpenB::class) + } + } + defaultSerializersModule + + val dataClassContainsOpenA = DataClassContainsOpen(dataClassA) + val expectedOpenA = """{"open": {"_t": "org.bson.codecs.kotlinx.samples.DataClassOpenA", "a": "string"}}""" + assertRoundTrips(expectedOpenA, dataClassContainsOpenA, serializersModule = serializersModule) + + val dataClassContainsOpenB = DataClassContainsOpen(dataClassB) + val expectedOpenB = """{"open": {"#class": "org.bson.codecs.kotlinx.samples.DataClassOpenB", "b": 1}}""" + assertRoundTrips( + expectedOpenB, + dataClassContainsOpenB, + configuration = BsonConfiguration(classDiscriminator = "#class"), + serializersModule = serializersModule) + } + + @Test + fun testValueClasses() { + val expected = """{"value": "valueString"}""" + val valueClass = ValueClass("valueString") + val dataClass = DataClassContainsValueClass(valueClass) + + assertThrows() { serialize(valueClass) } + assertRoundTrips(expected, dataClass) + } + + @Test + fun testDataClassWithJsonElement() { + val expected = + """{"value": { + |"char": "c", + |"byte": 0, + |"short": 1, + |"int": 22, + |"long": {"$numberLong": "3000000000"}, + |"decimal": {"$numberDecimal": "10000000000000000000"} + |"decimal2": {"$numberDecimal": "3.1230E+700"} + |"float": 4.0, + |"double": 4.2, + |"boolean": true, + |"string": "String" + |}}""" + .trimMargin() + + val dataClass = + DataClassWithJsonElement( + buildJsonObject { + put("char", "c") + put("byte", 0) + put("short", 1) + put("int", 22) + put("long", 3_000_000_000) + put("decimal", BigDecimal("10000000000000000000")) + put("decimal2", BigDecimal("3.1230E+700")) + put("float", 4.0) + put("double", 4.2) + put("boolean", true) + put("string", "String") + }) + + assertRoundTrips(expected, dataClass) + } + + @Test + fun testDataClassWithJsonElements() { + val expected = + """{ + | "jsonElement": {"string": "String"}, + | "jsonArray": [1, 2], + | "jsonElements": [{"string": "String"}, {"int": 42}], + | "jsonNestedMap": {"nestedString": {"string": "String"}, + | "nestedLong": {"long": {"$numberLong": "3000000000"}}} + |}""" + .trimMargin() + + val dataClass = + DataClassWithJsonElements( + buildJsonObject { put("string", "String") }, + buildJsonArray { + add(JsonPrimitive(1)) + add(JsonPrimitive(2)) + }, + listOf(buildJsonObject { put("string", "String") }, buildJsonObject { put("int", 42) }), + mapOf( + Pair("nestedString", buildJsonObject { put("string", "String") }), + Pair("nestedLong", buildJsonObject { put("long", 3000000000L) }))) + + assertRoundTrips(expected, dataClass) + } + + @Test + fun testDataClassWithJsonElementsNullable() { + val expected = + """{ + | "jsonElement": {"null": null}, + | "jsonArray": [1, 2, null], + | "jsonElements": [{"null": null}], + | "jsonNestedMap": {"nestedNull": null} + |}""" + .trimMargin() + + val dataClass = + DataClassWithJsonElementsNullable( + buildJsonObject { put("null", null) }, + buildJsonArray { + add(JsonPrimitive(1)) + add(JsonPrimitive(2)) + add(JsonPrimitive(null)) + }, + listOf(buildJsonObject { put("null", null) }), + mapOf(Pair("nestedNull", null))) + + assertRoundTrips(expected, dataClass, altConfiguration) + + val expectedNoNulls = + """{ + | "jsonElement": {}, + | "jsonArray": [1, 2], + | "jsonElements": [{}], + | "jsonNestedMap": {} + |}""" + .trimMargin() + + val dataClassNoNulls = + DataClassWithJsonElementsNullable( + buildJsonObject {}, + buildJsonArray { + add(JsonPrimitive(1)) + add(JsonPrimitive(2)) + }, + listOf(buildJsonObject {}), + mapOf()) + assertEncodesTo(expectedNoNulls, dataClass) + assertDecodesTo(expectedNoNulls, dataClassNoNulls) + } + + @Test + fun testDataClassWithJsonElementNullSupport() { + val expected = + """{"jsonElement": {"null": null}, + | "jsonArray": [1, 2, null], + | "jsonElements": [{"null": null}], + | "jsonNestedMap": {"nestedNull": null} + | } + | """ + .trimMargin() + + val dataClass = + DataClassWithJsonElements( + buildJsonObject { put("null", null) }, + buildJsonArray { + add(JsonPrimitive(1)) + add(JsonPrimitive(2)) + add(JsonPrimitive(null)) + }, + listOf(buildJsonObject { put("null", null) }), + mapOf(Pair("nestedNull", JsonPrimitive(null)))) + + assertRoundTrips(expected, dataClass, altConfiguration) + + val expectedNoNulls = + """{"jsonElement": {}, + | "jsonArray": [1, 2], + | "jsonElements": [{}], + | "jsonNestedMap": {} + | } + | """ + .trimMargin() + + val dataClassNoNulls = + DataClassWithJsonElements( + buildJsonObject {}, + buildJsonArray { + add(JsonPrimitive(1)) + add(JsonPrimitive(2)) + }, + listOf(buildJsonObject {}), + mapOf()) + assertEncodesTo(expectedNoNulls, dataClass) + assertDecodesTo(expectedNoNulls, dataClassNoNulls) + } + + @Test + @Suppress("LongMethod") + fun testDataClassWithJsonElementBsonSupport() { + val dataClassWithAllSupportedJsonTypes = + DataClassWithJsonElement( + buildJsonObject { + put("id", "111111111111111111111111") + put("arrayEmpty", buildJsonArray {}) + put( + "arraySimple", + buildJsonArray { + add(JsonPrimitive(1)) + add(JsonPrimitive(2)) + add(JsonPrimitive(3)) + }) + put( + "arrayComplex", + buildJsonArray { + add(buildJsonObject { put("a", JsonPrimitive(1)) }) + add(buildJsonObject { put("a", JsonPrimitive(2)) }) + }) + put( + "arrayMixedTypes", + buildJsonArray { + add(JsonPrimitive(1)) + add(JsonPrimitive(2)) + add(JsonPrimitive(true)) + add( + buildJsonArray { + add(JsonPrimitive(1)) + add(JsonPrimitive(2)) + add(JsonPrimitive(3)) + }) + add(buildJsonObject { put("a", JsonPrimitive(2)) }) + }) + put( + "arrayComplexMixedTypes", + buildJsonArray { + add(buildJsonObject { put("a", JsonPrimitive(1)) }) + add(buildJsonObject { put("a", JsonPrimitive("a")) }) + }) + put("binary", JsonPrimitive("S2Fma2Egcm9ja3Mh")) + put("boolean", JsonPrimitive(true)) + put("dateTime", JsonPrimitive(1577836801000)) + put("decimal128", JsonPrimitive(1.0)) + put("documentEmpty", buildJsonObject {}) + put("document", buildJsonObject { put("a", JsonPrimitive(1)) }) + put("double", JsonPrimitive(62.0)) + put("int32", JsonPrimitive(42)) + put("int64", JsonPrimitive(52)) + put("objectId", JsonPrimitive("211111111111111111111112")) + put("string", JsonPrimitive("the fox ...")) + put("timestamp", JsonPrimitive(1311768464867721221)) + }) + + val jsonWriterSettings = + JsonWriterSettings.builder() + .outputMode(JsonMode.RELAXED) + .objectIdConverter { oid, writer -> writer.writeString(oid.toHexString()) } + .dateTimeConverter { d, writer -> writer.writeNumber(d.toString()) } + .timestampConverter { ts, writer -> writer.writeNumber(ts.value.toString()) } + .binaryConverter { b, writer -> writer.writeString(Base64.getEncoder().encodeToString(b.data)) } + .decimal128Converter { d, writer -> writer.writeNumber(d.toDouble().toString()) } + .build() + val dataClassWithAllSupportedJsonTypesSimpleJson = jsonAllSupportedTypesDocument.toJson(jsonWriterSettings) + + assertEncodesTo( + """{"value": $dataClassWithAllSupportedJsonTypesSimpleJson }""", dataClassWithAllSupportedJsonTypes) + assertDecodesTo("""{"value": $jsonAllSupportedTypesDocument}""", dataClassWithAllSupportedJsonTypes) + } + + @Test + fun testDataFailures() { + assertThrows("Missing data") { + val codec = KotlinSerializerCodec.create(DataClassWithSimpleValues::class) + codec?.decode(BsonDocumentReader(BsonDocument()), DecoderContext.builder().build()) + } + + assertThrows("Invalid types") { + val data = + BsonDocument.parse( + """{"char": 123, "short": "2", "int": 22, "long": "ok", "float": true, "double": false, + | "boolean": "true", "string": 99}""" + .trimMargin()) + val codec = KotlinSerializerCodec.create() + codec?.decode(BsonDocumentReader(data), DecoderContext.builder().build()) + } + + assertThrows("Failing init") { + val data = BsonDocument.parse("""{"id": "myId"}""") + val codec = KotlinSerializerCodec.create() + codec?.decode(BsonDocumentReader(data), DecoderContext.builder().build()) + } + + var exception = + assertThrows("Invalid complex types - document") { + val data = BsonDocument.parse("""{"_id": "myId", "embedded": 123}""") + val codec = KotlinSerializerCodec.create() + codec?.decode(BsonDocumentReader(data), DecoderContext.builder().build()) + } + assertEquals( + "Invalid data for `org.bson.codecs.kotlinx.samples.DataClassEmbedded` " + + "expected a bson document found: INT32", + exception.message) + + exception = + assertThrows("Invalid complex types - list") { + val data = BsonDocument.parse("""{"_id": "myId", "nested": 123}""") + val codec = KotlinSerializerCodec.create() + codec?.decode(BsonDocumentReader(data), DecoderContext.builder().build()) + } + assertEquals("Invalid data for `LIST` expected a bson array found: INT32", exception.message) + + exception = + assertThrows("Invalid complex types - map") { + val data = BsonDocument.parse("""{"_id": "myId", "nested": 123}""") + val codec = KotlinSerializerCodec.create() + codec?.decode(BsonDocumentReader(data), DecoderContext.builder().build()) + } + assertEquals("Invalid data for `MAP` expected a bson document found: INT32", exception.message) + + exception = + assertThrows("Missing discriminator") { + val data = BsonDocument.parse("""{"_id": {"$oid": "111111111111111111111111"}, "name": "string"}""") + val codec = KotlinSerializerCodec.create() + codec?.decode(BsonDocumentReader(data), DecoderContext.builder().build()) + } + assertEquals( + "Missing required discriminator field `_t` for polymorphic class: " + + "`org.bson.codecs.kotlinx.samples.SealedInterface`.", + exception.message) + } + + @Test + fun testInvalidAnnotations() { + assertThrows { KotlinSerializerCodec.create(DataClassWithBsonId::class) } + assertThrows { KotlinSerializerCodec.create(DataClassWithBsonProperty::class) } + assertThrows { + KotlinSerializerCodec.create(DataClassWithBsonDiscriminator::class) + } + assertThrows { KotlinSerializerCodec.create(DataClassWithBsonConstructor::class) } + assertThrows { KotlinSerializerCodec.create(DataClassWithBsonIgnore::class) } + assertThrows { + KotlinSerializerCodec.create(DataClassWithBsonExtraElements::class) + } + assertThrows { + KotlinSerializerCodec.create(DataClassWithBsonRepresentation::class) + } + } + + @Test + fun testSnakeCaseNamingStrategy() { + val expected = + """{"two_words": "", "my_property": "", "camel_case_underscores": "", "url_mapping": "", + | "my_http_auth": "", "my_http2_api_key": "", "my_http2fast_api_key": ""}""" + .trimMargin() + val dataClass = DataClassWithCamelCase() + assertRoundTrips(expected, dataClass, BsonConfiguration(bsonNamingStrategy = BsonNamingStrategy.SNAKE_CASE)) + } + + @Test + fun testSameSnakeCaseName() { + val expected = """{"my_http_auth": "", "my_http_auth1": ""}""" + val dataClass = DataClassWithSameSnakeCaseName() + val exception = + assertThrows { + assertRoundTrips( + expected, dataClass, BsonConfiguration(bsonNamingStrategy = BsonNamingStrategy.SNAKE_CASE)) + } + assertEquals( + "myHTTPAuth, myHttpAuth in org.bson.codecs.kotlinx.samples.DataClassWithSameSnakeCaseName" + + " generate same name: my_http_auth.\n" + + "myHTTPAuth1, myHttpAuth1 in org.bson.codecs.kotlinx.samples.DataClassWithSameSnakeCaseName" + + " generate same name: my_http_auth1.\n", + exception.message) + } + + @Test + fun testKotlinAllowedName() { + val expected = """{"имя_переменной": "", "variable _name": ""}""" + val dataClass = DataClassWithKotlinAllowedName() + assertRoundTrips(expected, dataClass, BsonConfiguration(bsonNamingStrategy = BsonNamingStrategy.SNAKE_CASE)) + } + + private inline fun assertRoundTrips( + expected: String, + value: T, + configuration: BsonConfiguration = BsonConfiguration(), + serializersModule: SerializersModule = defaultSerializersModule + ) { + assertDecodesTo( + assertEncodesTo(expected, value, serializersModule, configuration), value, serializersModule, configuration) + } + + private inline fun assertEncodesTo( + json: String, + value: T, + serializersModule: SerializersModule = defaultSerializersModule, + configuration: BsonConfiguration = BsonConfiguration() + ): BsonDocument { + val expected = BsonDocument.parse(json) + val actual = serialize(value, serializersModule, configuration) + println(actual.toJson()) + assertEquals(expected, actual) + return actual + } + + private inline fun serialize( + value: T, + serializersModule: SerializersModule = defaultSerializersModule, + configuration: BsonConfiguration = BsonConfiguration() + ): BsonDocument { + val document = BsonDocument() + val writer = BsonDocumentWriter(document) + val codec = KotlinSerializerCodec.create(T::class, serializersModule, configuration)!! + codec.encode(writer, value, EncoderContext.builder().build()) + writer.flush() + return document + } + + private inline fun assertDecodesTo( + value: String, + expected: T, + serializersModule: SerializersModule = defaultSerializersModule, + configuration: BsonConfiguration = BsonConfiguration() + ) { + assertDecodesTo(BsonDocument.parse(value), expected, serializersModule, configuration) + } + + private inline fun assertDecodesTo( + value: BsonDocument, + expected: T, + serializersModule: SerializersModule = defaultSerializersModule, + configuration: BsonConfiguration = BsonConfiguration() + ) { + assertEquals(expected, deserialize(value, serializersModule, configuration)) + } + private inline fun deserialize( + value: BsonDocument, + serializersModule: SerializersModule = defaultSerializersModule, + configuration: BsonConfiguration = BsonConfiguration() + ): T { + val codec = KotlinSerializerCodec.create(T::class, serializersModule, configuration)!! + return codec.decode(BsonDocumentReader(value), DecoderContext.builder().build()) + } +} diff --git a/bson-kotlinx/src/test/kotlin/org/bson/codecs/kotlinx/samples/DataClasses.kt b/bson-kotlinx/src/test/kotlin/org/bson/codecs/kotlinx/samples/DataClasses.kt new file mode 100644 index 00000000000..773af52cd96 --- /dev/null +++ b/bson-kotlinx/src/test/kotlin/org/bson/codecs/kotlinx/samples/DataClasses.kt @@ -0,0 +1,371 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.bson.codecs.kotlinx.samples + +import kotlinx.datetime.Instant +import kotlinx.datetime.LocalDate +import kotlinx.datetime.LocalDateTime +import kotlinx.datetime.LocalTime +import kotlinx.serialization.Contextual +import kotlinx.serialization.EncodeDefault +import kotlinx.serialization.ExperimentalSerializationApi +import kotlinx.serialization.Required +import kotlinx.serialization.SerialName +import kotlinx.serialization.Serializable +import kotlinx.serialization.json.JsonArray +import kotlinx.serialization.json.JsonElement +import org.bson.BsonArray +import org.bson.BsonBinary +import org.bson.BsonBoolean +import org.bson.BsonDateTime +import org.bson.BsonDecimal128 +import org.bson.BsonDocument +import org.bson.BsonDouble +import org.bson.BsonInt32 +import org.bson.BsonInt64 +import org.bson.BsonJavaScript +import org.bson.BsonJavaScriptWithScope +import org.bson.BsonMaxKey +import org.bson.BsonMinKey +import org.bson.BsonObjectId +import org.bson.BsonRegularExpression +import org.bson.BsonString +import org.bson.BsonSymbol +import org.bson.BsonTimestamp +import org.bson.BsonType +import org.bson.BsonUndefined +import org.bson.codecs.pojo.annotations.BsonCreator +import org.bson.codecs.pojo.annotations.BsonDiscriminator +import org.bson.codecs.pojo.annotations.BsonExtraElements +import org.bson.codecs.pojo.annotations.BsonId +import org.bson.codecs.pojo.annotations.BsonIgnore +import org.bson.codecs.pojo.annotations.BsonProperty +import org.bson.codecs.pojo.annotations.BsonRepresentation +import org.bson.types.ObjectId + +@Serializable +data class DataClassWithSimpleValues( + val char: Char, + val byte: Byte, + val short: Short, + val int: Int, + val long: Long, + val float: Float, + val double: Double, + val boolean: Boolean, + val string: String +) + +@Serializable +data class DataClassWithContextualDateValues( + @Contextual val instant: Instant, + @Contextual val localTime: LocalTime, + @Contextual val localDateTime: LocalDateTime, + @Contextual val localDate: LocalDate, +) + +@Serializable +data class DataClassWithDateValues( + val instant: Instant, + val localTime: LocalTime, + val localDateTime: LocalDateTime, + val localDate: LocalDate, +) + +@Serializable +data class DataClassWithCollections( + val listSimple: List, + val listList: List>, + val listMap: List>, + val mapSimple: Map, + val mapList: Map>, + val mapMap: Map> +) + +@Serializable +data class DataClassWithDefaults( + val boolean: Boolean = false, + val string: String = "String", + val listSimple: List = listOf("a", "b", "c") +) + +@Serializable +data class DataClassWithCamelCase( + val twoWords: String = "", + @Suppress("ConstructorParameterNaming") val MyProperty: String = "", + @Suppress("ConstructorParameterNaming") val camel_Case_Underscores: String = "", + @Suppress("ConstructorParameterNaming") val URLMapping: String = "", + val myHTTPAuth: String = "", + val myHTTP2ApiKey: String = "", + val myHTTP2fastApiKey: String = "", +) + +@Serializable +data class DataClassWithSameSnakeCaseName( + val myHTTPAuth: String = "", + val myHttpAuth: String = "", + val myHTTPAuth1: String = "", + val myHttpAuth1: String = "", +) + +@Serializable +data class DataClassWithKotlinAllowedName( + @Suppress("ConstructorParameterNaming") val имяПеременной: String = "", + @Suppress("ConstructorParameterNaming") val `variable Name`: String = "", +) + +@Serializable data class DataClassWithNulls(val boolean: Boolean?, val string: String?, val listSimple: List?) + +@Serializable +data class DataClassWithListThatLastItemDefaultsToNull(val elements: List) + +@Serializable data class DataClassLastItemDefaultsToNull(val required: String, val optional: String? = null) + +@Serializable +data class DataClassSelfReferential( + val name: String, + val left: DataClassSelfReferential? = null, + val right: DataClassSelfReferential? = null +) + +@Serializable data class DataClassEmbedded(val name: String) + +@Serializable data class DataClassWithEmbedded(val id: String, val embedded: DataClassEmbedded) + +@Serializable data class DataClassListOfDataClasses(val id: String, val nested: List) + +@Serializable data class DataClassListOfListOfDataClasses(val id: String, val nested: List>) + +@Serializable data class DataClassMapOfDataClasses(val id: String, val nested: Map) + +@Serializable +data class DataClassMapOfListOfDataClasses(val id: String, val nested: Map>) + +@Serializable +data class DataClassWithParameterizedDataClass( + val id: String, + val parameterizedDataClass: DataClassParameterized +) + +@Serializable +data class DataClassParameterized(val number: N, val string: String, val parameterizedList: List) + +@Serializable +data class DataClassWithNestedParameterizedDataClass( + val id: String, + val nestedParameterized: DataClassWithNestedParameterized +) + +@Serializable +data class DataClassWithNestedParameterized( + val parameterizedDataClass: DataClassParameterized, + val other: B, + val optionalOther: B? +) + +@Serializable data class DataClassWithPair(val pair: Pair) + +@Serializable data class DataClassWithTriple(val triple: Triple) + +@Serializable +data class DataClassNestedParameterizedTypes( + val triple: + Triple< + String, + Pair>>, + Triple, Triple, Double>>> +) + +@Serializable data class DataClassWithMutableList(val value: MutableList) + +@Serializable data class DataClassWithMutableSet(val value: MutableSet) + +@Serializable data class DataClassWithMutableMap(val value: MutableMap) + +@Serializable +data class DataClassWithAnnotations( + @SerialName("_id") val id: String, + @SerialName("nom") val name: String, + val string: String +) + +@OptIn(ExperimentalSerializationApi::class) +@Serializable +data class DataClassWithEncodeDefault( + val boolean: Boolean = false, + @EncodeDefault(EncodeDefault.Mode.NEVER) val string: String = "String", + @EncodeDefault(EncodeDefault.Mode.ALWAYS) val listSimple: List = listOf("a", "b", "c") +) + +@Serializable +data class DataClassWithRequired( + val boolean: Boolean = false, + @Required val string: String = "String", + @Required val listSimple: List = listOf("a", "b", "c") +) + +@Serializable data class DataClassWithBooleanMapKey(val map: Map) + +enum class Key { + A, + B +} + +@Serializable data class DataClassWithEnum(val value: Key) + +@Serializable data class DataClassWithEnumMapKey(val map: Map) + +@Serializable data class DataClassKey(val value: String) + +@Serializable data class DataClassWithDataClassMapKey(val map: Map) + +@Serializable +data class DataClassBsonValues( + @Contextual val id: ObjectId, + @Contextual val arrayEmpty: BsonArray, + @Contextual val arraySimple: BsonArray, + @Contextual val arrayComplex: BsonArray, + @Contextual val arrayMixedTypes: BsonArray, + @Contextual val arrayComplexMixedTypes: BsonArray, + @Contextual val binary: BsonBinary, + @Contextual val boolean: BsonBoolean, + @Contextual val code: BsonJavaScript, + @Contextual val codeWithScope: BsonJavaScriptWithScope, + @Contextual val dateTime: BsonDateTime, + @Contextual val decimal128: BsonDecimal128, + @Contextual val documentEmpty: BsonDocument, + @Contextual val document: BsonDocument, + @Contextual val double: BsonDouble, + @Contextual val int32: BsonInt32, + @Contextual val int64: BsonInt64, + @Contextual val maxKey: BsonMaxKey, + @Contextual val minKey: BsonMinKey, + @Contextual val objectId: BsonObjectId, + @Contextual val regex: BsonRegularExpression, + @Contextual val string: BsonString, + @Contextual val symbol: BsonSymbol, + @Contextual val timestamp: BsonTimestamp, + @Contextual val undefined: BsonUndefined, +) + +@Serializable +data class DataClassOptionalBsonValues( + @Contextual val id: ObjectId?, + @Contextual val arrayEmpty: BsonArray?, + @Contextual val arraySimple: BsonArray?, + @Contextual val arrayComplex: BsonArray?, + @Contextual val arrayMixedTypes: BsonArray?, + @Contextual val arrayComplexMixedTypes: BsonArray?, + @Contextual val binary: BsonBinary?, + @Contextual val boolean: BsonBoolean?, + @Contextual val code: BsonJavaScript?, + @Contextual val codeWithScope: BsonJavaScriptWithScope?, + @Contextual val dateTime: BsonDateTime?, + @Contextual val decimal128: BsonDecimal128?, + @Contextual val documentEmpty: BsonDocument?, + @Contextual val document: BsonDocument?, + @Contextual val double: BsonDouble?, + @Contextual val int32: BsonInt32?, + @Contextual val int64: BsonInt64?, + @Contextual val maxKey: BsonMaxKey?, + @Contextual val minKey: BsonMinKey?, + @Contextual val objectId: BsonObjectId?, + @Contextual val regex: BsonRegularExpression?, + @Contextual val string: BsonString?, + @Contextual val symbol: BsonSymbol?, + @Contextual val timestamp: BsonTimestamp?, + @Contextual val undefined: BsonUndefined?, +) + +@Serializable sealed class DataClassSealed + +@Serializable data class DataClassSealedA(val a: String) : DataClassSealed() + +@Serializable data class DataClassSealedB(val b: Int) : DataClassSealed() + +@Serializable @SerialName("C") data class DataClassSealedC(val c: String) : DataClassSealed() + +@Serializable +sealed interface SealedInterface { + val name: String +} + +@Serializable +data class DataClassSealedInterface(@Contextual @SerialName("_id") val id: ObjectId, override val name: String) : + SealedInterface + +@Serializable data class DataClassListOfSealed(val items: List) + +interface DataClassOpen + +@Serializable data class DataClassOpenA(val a: String) : DataClassOpen + +@Serializable data class DataClassOpenB(val b: Int) : DataClassOpen + +@Serializable data class DataClassContainsOpen(val open: DataClassOpen) + +@JvmInline @Serializable value class ValueClass(val s: String) + +@Serializable data class DataClassContainsValueClass(val value: ValueClass) + +@Serializable data class DataClassWithBsonId(@BsonId val id: String) + +@Serializable data class DataClassWithBsonProperty(@BsonProperty("_id") val id: String) + +@BsonDiscriminator @Serializable data class DataClassWithBsonDiscriminator(val id: String) + +@Serializable data class DataClassWithBsonIgnore(val id: String, @BsonIgnore val ignored: String) + +@Serializable +data class DataClassWithBsonExtraElements(val id: String, @BsonExtraElements val extraElements: Map) + +@Serializable +data class DataClassWithBsonConstructor(val id: String, val count: Int) { + @BsonCreator constructor(id: String) : this(id, -1) +} + +@Serializable data class DataClassWithBsonRepresentation(@BsonRepresentation(BsonType.STRING) val id: Int) + +@Serializable +data class DataClassWithFailingInit(val id: String) { + init { + require(false) + } +} + +@Serializable data class DataClassWithSequence(val value: Sequence) + +@Serializable data class Box(val boxed: T) + +@Serializable data class DataClassWithNullableGeneric(val box: Box) + +@Serializable data class DataClassWithJsonElement(val value: JsonElement) + +@Serializable +data class DataClassWithJsonElements( + val jsonElement: JsonElement, + val jsonArray: JsonArray, + val jsonElements: List, + val jsonNestedMap: Map +) + +@Serializable +data class DataClassWithJsonElementsNullable( + val jsonElement: JsonElement?, + val jsonArray: JsonArray?, + val jsonElements: List?, + val jsonNestedMap: Map? +) diff --git a/bson-record-codec/build.gradle.kts b/bson-record-codec/build.gradle.kts new file mode 100644 index 00000000000..5165679c06b --- /dev/null +++ b/bson-record-codec/build.gradle.kts @@ -0,0 +1,53 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +import ProjectExtensions.configureJarManifest +import ProjectExtensions.configureMavenPublication + +plugins { + id("project.java") + id("conventions.test-artifacts") +} + +base.archivesName.set("bson-record-codec") + +dependencies { + api(project(path = ":bson", configuration = "default")) + + // Test case checks MongoClientSettings.getDefaultCodecRegistry() support + testImplementation(project(path = ":driver-core", configuration = "default")) +} + +configureMavenPublication { + pom { + name.set("BSON Record Codec") + description.set("The BSON Codec for Java records") + url.set("https://bsonspec.org") + } +} + +configureJarManifest { + attributes["Automatic-Module-Name"] = "org.mongodb.bson.record.codec" + attributes["Bundle-SymbolicName"] = "org.mongodb.bson-record-codec" +} + +java { + sourceCompatibility = JavaVersion.VERSION_17 + targetCompatibility = JavaVersion.VERSION_17 +} + +tasks.withType { options.release.set(17) } + +tasks.withType().configureEach { onlyIf { javaVersion.isCompatibleWith(JavaVersion.VERSION_17) } } diff --git a/bson-record-codec/src/main/org/bson/codecs/record/RecordCodec.java b/bson-record-codec/src/main/org/bson/codecs/record/RecordCodec.java new file mode 100644 index 00000000000..01b59f35265 --- /dev/null +++ b/bson-record-codec/src/main/org/bson/codecs/record/RecordCodec.java @@ -0,0 +1,382 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.record; + +import org.bson.BsonInvalidOperationException; +import org.bson.BsonReader; +import org.bson.BsonType; +import org.bson.BsonWriter; +import org.bson.codecs.Codec; +import org.bson.codecs.DecoderContext; +import org.bson.codecs.EncoderContext; +import org.bson.codecs.RepresentationConfigurable; +import org.bson.codecs.configuration.CodecConfigurationException; +import org.bson.codecs.configuration.CodecRegistry; +import org.bson.codecs.pojo.annotations.BsonCreator; +import org.bson.codecs.pojo.annotations.BsonDiscriminator; +import org.bson.codecs.pojo.annotations.BsonExtraElements; +import org.bson.codecs.pojo.annotations.BsonId; +import org.bson.codecs.pojo.annotations.BsonIgnore; +import org.bson.codecs.pojo.annotations.BsonProperty; +import org.bson.codecs.pojo.annotations.BsonRepresentation; +import org.bson.diagnostics.Logger; +import org.bson.diagnostics.Loggers; + +import javax.annotation.Nullable; +import java.lang.annotation.Annotation; +import java.lang.reflect.Constructor; +import java.lang.reflect.InvocationTargetException; +import java.lang.reflect.ParameterizedType; +import java.lang.reflect.RecordComponent; +import java.lang.reflect.Type; +import java.lang.reflect.TypeVariable; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.Map; +import java.util.function.Function; +import java.util.stream.Collectors; + +import static java.lang.String.format; +import static org.bson.assertions.Assertions.notNull; + +final class RecordCodec implements Codec { + private static final Logger LOGGER = Loggers.getLogger("RecordCodec"); + private final Class clazz; + private final Constructor canonicalConstructor; + private final List componentModels; + private final ComponentModel componentModelForId; + private final Map fieldNameToComponentModel; + + private static final class ComponentModel { + private final RecordComponent component; + private final Codec codec; + private final int index; + private final String fieldName; + private final boolean isNullable; + + private ComponentModel(final List typeParameters, final RecordComponent component, final CodecRegistry codecRegistry, + final int index) { + validateAnnotations(component, index); + this.component = component; + this.codec = computeCodec(typeParameters, component, codecRegistry); + this.index = index; + this.fieldName = computeFieldName(component); + this.isNullable = !component.getType().isPrimitive(); + } + + String getComponentName() { + return component.getName(); + } + + String getFieldName() { + return fieldName; + } + + Object getValue(final Record record) throws InvocationTargetException, IllegalAccessException { + return component.getAccessor().invoke(record); + } + + private static Codec computeCodec(final List typeParameters, final RecordComponent component, + final CodecRegistry codecRegistry) { + var rawType = toWrapper(resolveComponentType(typeParameters, component)); + var codec = component.getGenericType() instanceof ParameterizedType parameterizedType + ? codecRegistry.get(rawType, + resolveActualTypeArguments(typeParameters, component.getDeclaringRecord(), parameterizedType)) + : codecRegistry.get(rawType); + BsonType bsonRepresentationType = null; + + if (isAnnotationPresentOnField(component, BsonRepresentation.class)) { + bsonRepresentationType = getAnnotationOnField(component, + BsonRepresentation.class).value(); + } + if (bsonRepresentationType != null) { + if (codec instanceof RepresentationConfigurable representationConfigurable) { + codec = representationConfigurable.withRepresentation(bsonRepresentationType); + } else { + throw new CodecConfigurationException( + format("Codec for %s must implement RepresentationConfigurable to support BsonRepresentation", + codec.getEncoderClass())); + } + } + return codec; + } + + private static Class resolveComponentType(final List typeParameters, final RecordComponent component) { + Type resolvedType = resolveType(component.getGenericType(), typeParameters, component.getDeclaringRecord()); + return resolvedType instanceof Class clazz ? clazz : component.getType(); + } + + private static List resolveActualTypeArguments(final List typeParameters, final Class recordClass, + final ParameterizedType parameterizedType) { + return Arrays.stream(parameterizedType.getActualTypeArguments()) + .map(type -> resolveType(type, typeParameters, recordClass)) + .toList(); + } + + private static Type resolveType(final Type type, final List typeParameters, final Class recordClass) { + return type instanceof TypeVariable typeVariable + ? typeParameters.get(getIndexOfTypeParameter(typeVariable.getName(), recordClass)) + : type; + } + + // Get + private static int getIndexOfTypeParameter(final String typeParameterName, final Class recordClass) { + var typeParameters = recordClass.getTypeParameters(); + for (int i = 0; i < typeParameters.length; i++) { + if (typeParameters[i].getName().equals(typeParameterName)) { + return i; + } + } + throw new CodecConfigurationException(format("Could not find type parameter on record %s with name %s", + recordClass.getName(), typeParameterName)); + } + + private static String computeFieldName(final RecordComponent component) { + if (isAnnotationPresentOnField(component, BsonId.class)) { + return "_id"; + } else if (isAnnotationPresentOnField(component, BsonProperty.class)) { + return getAnnotationOnField(component, BsonProperty.class).value(); + } + return component.getName(); + } + + private static boolean isAnnotationPresentOnField(final RecordComponent component, + final Class annotation) { + try { + return component.getDeclaringRecord().getDeclaredField(component.getName()).isAnnotationPresent(annotation); + } catch (NoSuchFieldException e) { + throw new AssertionError(format("Unexpectedly missing the declared field for record component %s", component), e); + } + } + + private static boolean isAnnotationPresentOnCanonicalConstructorParameter(final RecordComponent component, + final int index, final Class annotation) { + return getCanonicalConstructor(component.getDeclaringRecord()).getParameters()[index].isAnnotationPresent(annotation); + } + + private static T getAnnotationOnField(final RecordComponent component, final Class annotation) { + try { + return component.getDeclaringRecord().getDeclaredField(component.getName()).getAnnotation(annotation); + } catch (NoSuchFieldException e) { + throw new AssertionError(format("Unexpectedly missing the declared field for recordComponent %s", component), e); + } + } + + private static void validateAnnotations(final RecordComponent component, final int index) { + validateAnnotationNotPresentOnType(component.getDeclaringRecord(), BsonDiscriminator.class); + validateAnnotationNotPresentOnConstructor(component.getDeclaringRecord(), BsonCreator.class); + validateAnnotationNotPresentOnMethod(component.getDeclaringRecord(), BsonCreator.class); + validateAnnotationNotPresentOnFieldOrAccessor(component, BsonIgnore.class); + validateAnnotationNotPresentOnFieldOrAccessor(component, BsonExtraElements.class); + validateAnnotationOnlyOnField(component, index, BsonId.class); + validateAnnotationOnlyOnField(component, index, BsonProperty.class); + validateAnnotationOnlyOnField(component, index, BsonRepresentation.class); + } + + private static void validateAnnotationNotPresentOnType(final Class clazz, + @SuppressWarnings("SameParameterValue") final Class annotation) { + if (clazz.isAnnotationPresent(annotation)) { + throw new CodecConfigurationException(format("Annotation '%s' not supported on records, but found on '%s'", + annotation, clazz.getName())); + } + } + + private static void validateAnnotationNotPresentOnConstructor(final Class clazz, + @SuppressWarnings("SameParameterValue") final Class annotation) { + for (var constructor : clazz.getConstructors()) { + if (constructor.isAnnotationPresent(annotation)) { + throw new CodecConfigurationException( + format("Annotation '%s' not supported on record constructors, but found on constructor of '%s'", + annotation, clazz.getName())); + } + } + } + + private static void validateAnnotationNotPresentOnMethod(final Class clazz, + @SuppressWarnings("SameParameterValue") final Class annotation) { + for (var method : clazz.getMethods()) { + if (method.isAnnotationPresent(annotation)) { + throw new CodecConfigurationException( + format("Annotation '%s' not supported on methods, but found on method '%s' of '%s'", + annotation, method.getName(), clazz.getName())); + } + } + } + + private static void validateAnnotationNotPresentOnFieldOrAccessor(final RecordComponent component, + final Class annotation) { + if (isAnnotationPresentOnField(component, annotation)) { + throw new CodecConfigurationException( + format("Annotation '%s' is not supported on records, but found on component '%s' of record '%s'", + annotation.getName(), component, component.getDeclaringRecord())); + } + if (component.getAccessor().isAnnotationPresent(annotation)) { + throw new CodecConfigurationException( + format("Annotation '%s' is not supported on records, but found on accessor for component '%s' of record '%s'", + annotation.getName(), component, component.getDeclaringRecord())); + } + } + + private static void validateAnnotationOnlyOnField(final RecordComponent component, final int index, + final Class annotation) { + if (!isAnnotationPresentOnField(component, annotation)) { + if (component.getAccessor().isAnnotationPresent(annotation)) { + throw new CodecConfigurationException(format("Annotation %s present on accessor but not component '%s' of record '%s'", + annotation.getName(), component, component.getDeclaringRecord())); + } + if (isAnnotationPresentOnCanonicalConstructorParameter(component, index, annotation)) { + throw new CodecConfigurationException( + format("Annotation %s present on canonical constructor parameter but not component '%s' of record '%s'", + annotation.getName(), component, component.getDeclaringRecord())); + } + } + } + } + + RecordCodec(final Class clazz, final List types, final CodecRegistry codecRegistry) { + if (types.size() != clazz.getTypeParameters().length) { + throw new CodecConfigurationException("Unexpected number of type parameters for record class " + clazz); + } + this.clazz = notNull("class", clazz); + canonicalConstructor = notNull("canonicalConstructor", getCanonicalConstructor(clazz)); + componentModels = getComponentModels(clazz, codecRegistry, types); + fieldNameToComponentModel = componentModels.stream() + .collect(Collectors.toMap(ComponentModel::getFieldName, Function.identity())); + componentModelForId = getComponentModelForId(clazz, componentModels); + } + + @SuppressWarnings("unchecked") + @Override + public T decode(final BsonReader reader, final DecoderContext decoderContext) { + reader.readStartDocument(); + + Object[] constructorArguments = new Object[componentModels.size()]; + while (reader.readBsonType() != BsonType.END_OF_DOCUMENT) { + var fieldName = reader.readName(); + var componentModel = fieldNameToComponentModel.get(fieldName); + if (componentModel == null) { + reader.skipValue(); + if (LOGGER.isTraceEnabled()) { + LOGGER.trace(format("Found property not present in the ClassModel: %s", fieldName)); + } + } else if (reader.getCurrentBsonType() == BsonType.NULL) { + if (!componentModel.isNullable) { + throw new BsonInvalidOperationException(format("Null value on primitive field: %s", componentModel.fieldName)); + } + reader.readNull(); + } else { + constructorArguments[componentModel.index] = decoderContext.decodeWithChildContext(componentModel.codec, reader); + } + } + reader.readEndDocument(); + + try { + return (T) canonicalConstructor.newInstance(constructorArguments); + } catch (ReflectiveOperationException e) { + throw new CodecConfigurationException(format("Unable to invoke canonical constructor of record class %s", clazz.getName()), e); + } + } + + @Override + public void encode(final BsonWriter writer, final T record, final EncoderContext encoderContext) { + writer.writeStartDocument(); + if (componentModelForId != null) { + writeComponent(writer, record, componentModelForId); + } + for (var componentModel : componentModels) { + if (componentModel == componentModelForId) { + continue; + } + writeComponent(writer, record, componentModel); + } + writer.writeEndDocument(); + + } + + @Override + public Class getEncoderClass() { + return clazz; + } + + @SuppressWarnings({"unchecked", "rawtypes"}) + private void writeComponent(final BsonWriter writer, final T record, final ComponentModel componentModel) { + try { + Object componentValue = componentModel.getValue(record); + if (componentValue != null) { + writer.writeName(componentModel.getFieldName()); + ((Codec) componentModel.codec).encode(writer, componentValue, EncoderContext.builder().build()); + } + } catch (ReflectiveOperationException e) { + throw new CodecConfigurationException( + format("Unable to access value of component %s for record %s", componentModel.getComponentName(), clazz.getName()), e); + } + } + + private static List getComponentModels(final Class clazz, final CodecRegistry codecRegistry, + final List typeParameters) { + var recordComponents = clazz.getRecordComponents(); + var componentModels = new ArrayList(recordComponents.length); + for (int i = 0; i < recordComponents.length; i++) { + componentModels.add(new ComponentModel(typeParameters, recordComponents[i], codecRegistry, i)); + } + return componentModels; + } + + @Nullable + private static ComponentModel getComponentModelForId(final Class clazz, final List componentModels) { + List componentModelsForId = componentModels.stream() + .filter(componentModel -> componentModel.getFieldName().equals("_id")).toList(); + if (componentModelsForId.size() > 1) { + throw new CodecConfigurationException(format("Record %s has more than one _id component", clazz.getName())); + } else { + return componentModelsForId.stream().findFirst().orElse(null); + } + } + + private static Constructor getCanonicalConstructor(final Class clazz) { + try { + return clazz.getDeclaredConstructor(Arrays.stream(clazz.getRecordComponents()) + .map(RecordComponent::getType) + .toArray(Class[]::new)); + } catch (NoSuchMethodException e) { + throw new AssertionError(format("Could not find canonical constructor for record %s", clazz.getName())); + } + } + + private static Class toWrapper(final Class clazz) { + if (clazz == Integer.TYPE) { + return Integer.class; + } else if (clazz == Long.TYPE) { + return Long.class; + } else if (clazz == Boolean.TYPE) { + return Boolean.class; + } else if (clazz == Byte.TYPE) { + return Byte.class; + } else if (clazz == Character.TYPE) { + return Character.class; + } else if (clazz == Float.TYPE) { + return Float.class; + } else if (clazz == Double.TYPE) { + return Double.class; + } else if (clazz == Short.TYPE) { + return Short.class; + } else { + return clazz; + } + } +} diff --git a/bson-record-codec/src/main/org/bson/codecs/record/RecordCodecProvider.java b/bson-record-codec/src/main/org/bson/codecs/record/RecordCodecProvider.java new file mode 100644 index 00000000000..e41f1fb2bb1 --- /dev/null +++ b/bson-record-codec/src/main/org/bson/codecs/record/RecordCodecProvider.java @@ -0,0 +1,49 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.record; + +import org.bson.codecs.Codec; +import org.bson.codecs.configuration.CodecProvider; +import org.bson.codecs.configuration.CodecRegistry; + +import java.lang.reflect.Type; +import java.util.List; + +import static org.bson.assertions.Assertions.assertNotNull; + +/** + * Provides Codec instances for Java records. + * + * @since 4.6 + * @see Record + */ +public final class RecordCodecProvider implements CodecProvider { + @Override + public Codec get(final Class clazz, final CodecRegistry registry) { + return get(clazz, List.of(), registry); + } + + @Override + public Codec get(final Class clazz, final List typeArguments, final CodecRegistry registry) { + if (!assertNotNull(clazz).isRecord()) { + return null; + } + @SuppressWarnings({"unchecked", "rawtypes"}) + Codec result = new RecordCodec(clazz, assertNotNull(typeArguments), registry); + return result; + } +} diff --git a/bson-record-codec/src/main/org/bson/codecs/record/package-info.java b/bson-record-codec/src/main/org/bson/codecs/record/package-info.java new file mode 100644 index 00000000000..b1d179eb3f6 --- /dev/null +++ b/bson-record-codec/src/main/org/bson/codecs/record/package-info.java @@ -0,0 +1,20 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * This package contains classes for encoding and decoding Java records. + */ +package org.bson.codecs.record; diff --git a/bson-record-codec/src/test/unit/org/bson/codecs/record/RecordCodecProviderTest.java b/bson-record-codec/src/test/unit/org/bson/codecs/record/RecordCodecProviderTest.java new file mode 100644 index 00000000000..5f71169f0c8 --- /dev/null +++ b/bson-record-codec/src/test/unit/org/bson/codecs/record/RecordCodecProviderTest.java @@ -0,0 +1,62 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.record; + +import com.mongodb.MongoClientSettings; +import org.bson.codecs.record.samples.TestRecordWithPojoAnnotations; +import org.bson.conversions.Bson; +import org.junit.jupiter.api.Test; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertTrue; + + +public class RecordCodecProviderTest { + + @Test + public void shouldReturnNullForNonRecord() { + var provider = new RecordCodecProvider(); + + // expect + assertNull(provider.get(String.class, Bson.DEFAULT_CODEC_REGISTRY)); + } + + @Test + public void shouldReturnRecordCodecForRecord() { + var provider = new RecordCodecProvider(); + + // when + var codec = provider.get(TestRecordWithPojoAnnotations.class, Bson.DEFAULT_CODEC_REGISTRY); + + // then + assertTrue(codec instanceof RecordCodec); + var recordCodec = (RecordCodec) codec; + assertEquals(TestRecordWithPojoAnnotations.class, recordCodec.getEncoderClass()); + } + + @Test + public void shouldReturnRecordCodecForRecordUsingDefaultRegistry() { + // when + var codec = MongoClientSettings.getDefaultCodecRegistry().get(TestRecordWithPojoAnnotations.class); + + // then + assertTrue(codec instanceof RecordCodec); + var recordCodec = (RecordCodec) codec; + assertEquals(TestRecordWithPojoAnnotations.class, recordCodec.getEncoderClass()); + } +} diff --git a/bson-record-codec/src/test/unit/org/bson/codecs/record/RecordCodecTest.java b/bson-record-codec/src/test/unit/org/bson/codecs/record/RecordCodecTest.java new file mode 100644 index 00000000000..c7adef45bc8 --- /dev/null +++ b/bson-record-codec/src/test/unit/org/bson/codecs/record/RecordCodecTest.java @@ -0,0 +1,419 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.record; + +import org.bson.BsonArray; +import org.bson.BsonDocument; +import org.bson.BsonDocumentReader; +import org.bson.BsonDocumentWriter; +import org.bson.BsonDouble; +import org.bson.BsonInt32; +import org.bson.BsonInvalidOperationException; +import org.bson.BsonNull; +import org.bson.BsonObjectId; +import org.bson.BsonString; +import org.bson.codecs.DecoderContext; +import org.bson.codecs.EncoderContext; +import org.bson.codecs.configuration.CodecConfigurationException; +import org.bson.codecs.configuration.CodecRegistry; +import org.bson.codecs.record.samples.TestRecordEmbedded; +import org.bson.codecs.record.samples.TestRecordParameterized; +import org.bson.codecs.record.samples.TestRecordWithIllegalBsonCreatorOnConstructor; +import org.bson.codecs.record.samples.TestRecordWithIllegalBsonCreatorOnMethod; +import org.bson.codecs.record.samples.TestRecordWithIllegalBsonDiscriminatorOnRecord; +import org.bson.codecs.record.samples.TestRecordWithIllegalBsonExtraElementsOnAccessor; +import org.bson.codecs.record.samples.TestRecordWithIllegalBsonExtraElementsOnComponent; +import org.bson.codecs.record.samples.TestRecordWithIllegalBsonIdOnAccessor; +import org.bson.codecs.record.samples.TestRecordWithIllegalBsonIdOnCanonicalConstructor; +import org.bson.codecs.record.samples.TestRecordWithIllegalBsonIgnoreOnAccessor; +import org.bson.codecs.record.samples.TestRecordWithIllegalBsonIgnoreOnComponent; +import org.bson.codecs.record.samples.TestRecordWithIllegalBsonPropertyOnAccessor; +import org.bson.codecs.record.samples.TestRecordWithIllegalBsonPropertyOnCanonicalConstructor; +import org.bson.codecs.record.samples.TestRecordWithIllegalBsonRepresentationOnAccessor; +import org.bson.codecs.record.samples.TestRecordWithListOfListOfRecords; +import org.bson.codecs.record.samples.TestRecordWithListOfRecords; +import org.bson.codecs.record.samples.TestRecordWithMapOfListOfRecords; +import org.bson.codecs.record.samples.TestRecordWithMapOfRecords; +import org.bson.codecs.record.samples.TestRecordWithNestedParameterized; +import org.bson.codecs.record.samples.TestRecordWithNestedParameterizedRecord; +import org.bson.codecs.record.samples.TestRecordWithNullableField; +import org.bson.codecs.record.samples.TestRecordWithParameterizedRecord; +import org.bson.codecs.record.samples.TestRecordWithPojoAnnotations; +import org.bson.codecs.record.samples.TestSelfReferentialHolderRecord; +import org.bson.codecs.record.samples.TestSelfReferentialRecord; +import org.bson.conversions.Bson; +import org.bson.types.ObjectId; +import org.junit.jupiter.api.Test; + +import java.util.List; +import java.util.Map; + +import static org.bson.codecs.configuration.CodecRegistries.fromProviders; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; + +public class RecordCodecTest { + + @Test + public void testRecordWithPojoAnnotations() { + var codec = createRecordCodec(TestRecordWithPojoAnnotations.class, Bson.DEFAULT_CODEC_REGISTRY); + var identifier = new ObjectId(); + var testRecord = new TestRecordWithPojoAnnotations("Lucas", 14, List.of("soccer", "basketball"), identifier.toHexString()); + + var document = new BsonDocument(); + var writer = new BsonDocumentWriter(document); + + // when + codec.encode(writer, testRecord, EncoderContext.builder().build()); + + // then + assertEquals( + new BsonDocument("_id", new BsonObjectId(identifier)) + .append("name", new BsonString("Lucas")) + .append("hobbies", new BsonArray(List.of(new BsonString("soccer"), new BsonString("basketball")))) + .append("a", new BsonInt32(14)), + document); + assertEquals("_id", document.getFirstKey()); + + // when + var decoded = codec.decode(new BsonDocumentReader(document), DecoderContext.builder().build()); + + // then + assertEquals(testRecord, decoded); + } + + @Test + public void testRecordWithNestedListOfRecords() { + var codec = createRecordCodec(TestRecordWithListOfRecords.class, + fromProviders(new RecordCodecProvider(), Bson.DEFAULT_CODEC_REGISTRY)); + var identifier = new ObjectId(); + var testRecord = new TestRecordWithListOfRecords(identifier, List.of(new TestRecordEmbedded("embedded"))); + + var document = new BsonDocument(); + var writer = new BsonDocumentWriter(document); + + // when + codec.encode(writer, testRecord, EncoderContext.builder().build()); + + // then + assertEquals( + new BsonDocument("_id", new BsonObjectId(identifier)) + .append("nestedRecords", new BsonArray(List.of(new BsonDocument("name", new BsonString("embedded"))))), + document); + assertEquals("_id", document.getFirstKey()); + + // when + var decoded = codec.decode(new BsonDocumentReader(document), DecoderContext.builder().build()); + + // then + assertEquals(testRecord, decoded); + } + + @Test + public void testRecordWithNestedListOfListOfRecords() { + var codec = createRecordCodec(TestRecordWithListOfListOfRecords.class, + fromProviders(new RecordCodecProvider(), Bson.DEFAULT_CODEC_REGISTRY)); + var identifier = new ObjectId(); + var testRecord = new TestRecordWithListOfListOfRecords(identifier, List.of(List.of(new TestRecordEmbedded("embedded")))); + + var document = new BsonDocument(); + var writer = new BsonDocumentWriter(document); + + // when + codec.encode(writer, testRecord, EncoderContext.builder().build()); + + // then + assertEquals( + new BsonDocument("_id", new BsonObjectId(identifier)) + .append("nestedRecords", + new BsonArray(List.of(new BsonArray(List.of(new BsonDocument("name", new BsonString("embedded"))))))), + document); + assertEquals("_id", document.getFirstKey()); + + // when + var decoded = codec.decode(new BsonDocumentReader(document), DecoderContext.builder().build()); + + // then + assertEquals(testRecord, decoded); + } + + @Test + public void testRecordWithNestedMapOfRecords() { + var codec = createRecordCodec(TestRecordWithMapOfRecords.class, + fromProviders(new RecordCodecProvider(), Bson.DEFAULT_CODEC_REGISTRY)); + var identifier = new ObjectId(); + var testRecord = new TestRecordWithMapOfRecords(identifier, + Map.of("first", new TestRecordEmbedded("embedded"))); + + var document = new BsonDocument(); + var writer = new BsonDocumentWriter(document); + + // when + codec.encode(writer, testRecord, EncoderContext.builder().build()); + + // then + assertEquals( + new BsonDocument("_id", new BsonObjectId(identifier)) + .append("nestedRecords", new BsonDocument("first", new BsonDocument("name", new BsonString("embedded")))), + document); + assertEquals("_id", document.getFirstKey()); + + // when + var decoded = codec.decode(new BsonDocumentReader(document), DecoderContext.builder().build()); + + // then + assertEquals(testRecord, decoded); + } + + @Test + public void testRecordWithNestedMapOfListRecords() { + var codec = createRecordCodec(TestRecordWithMapOfListOfRecords.class, + fromProviders(new RecordCodecProvider(), Bson.DEFAULT_CODEC_REGISTRY)); + var identifier = new ObjectId(); + var testRecord = new TestRecordWithMapOfListOfRecords(identifier, + Map.of("first", List.of(new TestRecordEmbedded("embedded")))); + + var document = new BsonDocument(); + var writer = new BsonDocumentWriter(document); + + // when + codec.encode(writer, testRecord, EncoderContext.builder().build()); + + // then + assertEquals( + new BsonDocument("_id", new BsonObjectId(identifier)) + .append("nestedRecords", + new BsonDocument("first", + new BsonArray(List.of(new BsonDocument("name", new BsonString("embedded")))))), + document); + assertEquals("_id", document.getFirstKey()); + + // when + var decoded = codec.decode(new BsonDocumentReader(document), DecoderContext.builder().build()); + + // then + assertEquals(testRecord, decoded); + } + + @Test + public void testRecordWithNestedParameterizedRecord() { + var codec = createRecordCodec(TestRecordWithParameterizedRecord.class, + fromProviders(new RecordCodecProvider(), Bson.DEFAULT_CODEC_REGISTRY)); + var identifier = new ObjectId(); + var testRecord = new TestRecordWithParameterizedRecord(identifier, + new TestRecordParameterized<>(42.0, List.of(new TestRecordEmbedded("embedded")))); + + var document = new BsonDocument(); + var writer = new BsonDocumentWriter(document); + + // when + codec.encode(writer, testRecord, EncoderContext.builder().build()); + + // then + assertEquals( + new BsonDocument("_id", new BsonObjectId(identifier)) + .append("parameterizedRecord", + new BsonDocument("number", new BsonDouble(42.0)) + .append("parameterizedList", + new BsonArray(List.of(new BsonDocument("name", new BsonString("embedded")))))), + document); + assertEquals("_id", document.getFirstKey()); + + // when + var decoded = codec.decode(new BsonDocumentReader(document), DecoderContext.builder().build()); + + // then + assertEquals(testRecord, decoded); + } + + @Test + public void testRecordWithNestedParameterizedRecordWithDifferentlyOrderedTypeParameters() { + var codec = createRecordCodec(TestRecordWithNestedParameterizedRecord.class, + fromProviders(new RecordCodecProvider(), Bson.DEFAULT_CODEC_REGISTRY)); + var identifier = new ObjectId(); + var testRecord = new TestRecordWithNestedParameterizedRecord(identifier, + new TestRecordWithNestedParameterized<>( + new TestRecordParameterized<>(42.0, List.of(new TestRecordEmbedded("p"))), + "o")); + + var document = new BsonDocument(); + var writer = new BsonDocumentWriter(document); + + // when + codec.encode(writer, testRecord, EncoderContext.builder().build()); + + // then + assertEquals( + new BsonDocument("_id", new BsonObjectId(identifier)) + .append("nestedParameterized", + new BsonDocument("parameterizedRecord", + new BsonDocument("number", new BsonDouble(42.0)) + .append("parameterizedList", + new BsonArray(List.of(new BsonDocument("name", new BsonString("p")))))) + .append("other", new BsonString("o"))), + document); + + // when + var decoded = codec.decode(new BsonDocumentReader(document), DecoderContext.builder().build()); + + // then + assertEquals(testRecord, decoded); + } + + @Test + public void testRecordWithNulls() { + var codec = createRecordCodec(TestRecordWithPojoAnnotations.class, Bson.DEFAULT_CODEC_REGISTRY); + var identifier = new ObjectId(); + var testRecord = new TestRecordWithPojoAnnotations(null, 14, null, identifier.toHexString()); + + var document = new BsonDocument(); + var writer = new BsonDocumentWriter(document); + + // when + codec.encode(writer, testRecord, EncoderContext.builder().build()); + + // then + assertEquals( + new BsonDocument("_id", new BsonObjectId(identifier)) + .append("a", new BsonInt32(14)), + document); + + // when + var decoded = codec.decode(new BsonDocumentReader(document), DecoderContext.builder().build()); + + // then + assertEquals(testRecord, decoded); + } + + @Test + public void testRecordWithStoredNulls() { + var codec = createRecordCodec(TestRecordWithNullableField.class, Bson.DEFAULT_CODEC_REGISTRY); + var identifier = new ObjectId(); + var testRecord = new TestRecordWithNullableField(identifier, null, 42); + + var document = new BsonDocument("_id", new BsonObjectId(identifier)) + .append("name", new BsonNull()) + .append("age", new BsonInt32(42)); + + // when + var decoded = codec.decode(new BsonDocumentReader(document), DecoderContext.builder().build()); + + // then + assertEquals(testRecord, decoded); + } + + @Test + public void testExceptionsWithStoredNullsOnPrimitiveField() { + var codec = createRecordCodec(TestRecordWithNullableField.class, Bson.DEFAULT_CODEC_REGISTRY); + + var document = new BsonDocument("_id", new BsonObjectId(new ObjectId())) + .append("name", new BsonString("Felix")) + .append("age", new BsonNull()); + + assertThrows(BsonInvalidOperationException.class, () -> + codec.decode(new BsonDocumentReader(document), DecoderContext.builder().build())); + } + + @Test + public void testRecordWithExtraData() { + var codec = createRecordCodec(TestRecordWithPojoAnnotations.class, Bson.DEFAULT_CODEC_REGISTRY); + var identifier = new ObjectId(); + var testRecord = new TestRecordWithPojoAnnotations("Felix", 13, List.of("rugby", "badminton"), identifier.toHexString()); + + var document = new BsonDocument("_id", new BsonObjectId(identifier)) + .append("nationality", new BsonString("British")) + .append("name", new BsonString("Felix")) + .append("hobbies", new BsonArray(List.of(new BsonString("rugby"), new BsonString("badminton")))) + .append("a", new BsonInt32(13)); + + // when + var decoded = codec.decode(new BsonDocumentReader(document), DecoderContext.builder().build()); + + // then + assertEquals(testRecord, decoded); + } + + @Test + public void testSelfReferentialRecords() { + var registry = fromProviders(new RecordCodecProvider(), Bson.DEFAULT_CODEC_REGISTRY); + var codec = registry.get(TestSelfReferentialHolderRecord.class); + var testRecord = new TestSelfReferentialHolderRecord("0", + new TestSelfReferentialRecord<>("1", + new TestSelfReferentialRecord<>("2", null, null), + new TestSelfReferentialRecord<>("3", null, null))); + + var document = new BsonDocument(); + + // when + codec.encode(new BsonDocumentWriter(document), testRecord, EncoderContext.builder().build()); + + // then + assertEquals( + new BsonDocument("_id", new BsonString("0")) + .append("selfReferentialRecord", + new BsonDocument("name", new BsonString("1")) + .append("left", new BsonDocument("name", new BsonString("2"))) + .append("right", new BsonDocument("name", new BsonString("3")))), + document); + + // when + var decoded = codec.decode(new BsonDocumentReader(document), DecoderContext.builder().build()); + + // then + assertEquals(testRecord, decoded); + } + + @Test + public void testExceptionsForAnnotationsNotOnRecordComponent() { + assertThrows(CodecConfigurationException.class, () -> + createRecordCodec(TestRecordWithIllegalBsonIdOnAccessor.class, Bson.DEFAULT_CODEC_REGISTRY)); + assertThrows(CodecConfigurationException.class, () -> + createRecordCodec(TestRecordWithIllegalBsonIdOnCanonicalConstructor.class, Bson.DEFAULT_CODEC_REGISTRY)); + + assertThrows(CodecConfigurationException.class, () -> + createRecordCodec(TestRecordWithIllegalBsonPropertyOnAccessor.class, Bson.DEFAULT_CODEC_REGISTRY)); + assertThrows(CodecConfigurationException.class, () -> + createRecordCodec(TestRecordWithIllegalBsonPropertyOnCanonicalConstructor.class, Bson.DEFAULT_CODEC_REGISTRY)); + + assertThrows(CodecConfigurationException.class, () -> + createRecordCodec(TestRecordWithIllegalBsonRepresentationOnAccessor.class, Bson.DEFAULT_CODEC_REGISTRY)); + } + + @Test + public void testExceptionsForUnsupportedAnnotations() { + assertThrows(CodecConfigurationException.class, () -> + createRecordCodec(TestRecordWithIllegalBsonDiscriminatorOnRecord.class, Bson.DEFAULT_CODEC_REGISTRY)); + + assertThrows(CodecConfigurationException.class, () -> + createRecordCodec(TestRecordWithIllegalBsonCreatorOnConstructor.class, Bson.DEFAULT_CODEC_REGISTRY)); + assertThrows(CodecConfigurationException.class, () -> + createRecordCodec(TestRecordWithIllegalBsonCreatorOnMethod.class, Bson.DEFAULT_CODEC_REGISTRY)); + + assertThrows(CodecConfigurationException.class, () -> + createRecordCodec(TestRecordWithIllegalBsonIgnoreOnComponent.class, Bson.DEFAULT_CODEC_REGISTRY)); + assertThrows(CodecConfigurationException.class, () -> + createRecordCodec(TestRecordWithIllegalBsonIgnoreOnAccessor.class, Bson.DEFAULT_CODEC_REGISTRY)); + assertThrows(CodecConfigurationException.class, () -> + createRecordCodec(TestRecordWithIllegalBsonExtraElementsOnComponent.class, Bson.DEFAULT_CODEC_REGISTRY)); + assertThrows(CodecConfigurationException.class, () -> + createRecordCodec(TestRecordWithIllegalBsonExtraElementsOnAccessor.class, Bson.DEFAULT_CODEC_REGISTRY)); + } + + private static RecordCodec createRecordCodec(final Class clazz, final CodecRegistry registry) { + return new RecordCodec<>(clazz, List.of(), registry); + } +} diff --git a/bson-record-codec/src/test/unit/org/bson/codecs/record/samples/TestRecordEmbedded.java b/bson-record-codec/src/test/unit/org/bson/codecs/record/samples/TestRecordEmbedded.java new file mode 100644 index 00000000000..b83f6bde2e2 --- /dev/null +++ b/bson-record-codec/src/test/unit/org/bson/codecs/record/samples/TestRecordEmbedded.java @@ -0,0 +1,20 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.record.samples; + +public record TestRecordEmbedded(String name) { +} diff --git a/bson-record-codec/src/test/unit/org/bson/codecs/record/samples/TestRecordParameterized.java b/bson-record-codec/src/test/unit/org/bson/codecs/record/samples/TestRecordParameterized.java new file mode 100644 index 00000000000..91f0c051b33 --- /dev/null +++ b/bson-record-codec/src/test/unit/org/bson/codecs/record/samples/TestRecordParameterized.java @@ -0,0 +1,22 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.record.samples; + +import java.util.List; + +public record TestRecordParameterized(N number, List parameterizedList) { +} diff --git a/bson-record-codec/src/test/unit/org/bson/codecs/record/samples/TestRecordWithIllegalBsonCreatorOnConstructor.java b/bson-record-codec/src/test/unit/org/bson/codecs/record/samples/TestRecordWithIllegalBsonCreatorOnConstructor.java new file mode 100644 index 00000000000..d9db5adb149 --- /dev/null +++ b/bson-record-codec/src/test/unit/org/bson/codecs/record/samples/TestRecordWithIllegalBsonCreatorOnConstructor.java @@ -0,0 +1,27 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.record.samples; + +import org.bson.codecs.pojo.annotations.BsonCreator; + +public record TestRecordWithIllegalBsonCreatorOnConstructor(String name) { + @SuppressWarnings("RedundantRecordConstructor") + @BsonCreator + public TestRecordWithIllegalBsonCreatorOnConstructor(final String name) { + this.name = name; + } +} diff --git a/bson-record-codec/src/test/unit/org/bson/codecs/record/samples/TestRecordWithIllegalBsonCreatorOnMethod.java b/bson-record-codec/src/test/unit/org/bson/codecs/record/samples/TestRecordWithIllegalBsonCreatorOnMethod.java new file mode 100644 index 00000000000..707162ced9c --- /dev/null +++ b/bson-record-codec/src/test/unit/org/bson/codecs/record/samples/TestRecordWithIllegalBsonCreatorOnMethod.java @@ -0,0 +1,26 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.record.samples; + +import org.bson.codecs.pojo.annotations.BsonCreator; + +public record TestRecordWithIllegalBsonCreatorOnMethod(String name) { + @BsonCreator + public static TestRecordWithIllegalBsonCreatorOnMethod create(final String name) { + return new TestRecordWithIllegalBsonCreatorOnMethod(name); + } +} diff --git a/bson-record-codec/src/test/unit/org/bson/codecs/record/samples/TestRecordWithIllegalBsonDiscriminatorOnRecord.java b/bson-record-codec/src/test/unit/org/bson/codecs/record/samples/TestRecordWithIllegalBsonDiscriminatorOnRecord.java new file mode 100644 index 00000000000..4d6fa0995b9 --- /dev/null +++ b/bson-record-codec/src/test/unit/org/bson/codecs/record/samples/TestRecordWithIllegalBsonDiscriminatorOnRecord.java @@ -0,0 +1,23 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.record.samples; + +import org.bson.codecs.pojo.annotations.BsonDiscriminator; + +@BsonDiscriminator +public record TestRecordWithIllegalBsonDiscriminatorOnRecord(String name) { +} diff --git a/bson-record-codec/src/test/unit/org/bson/codecs/record/samples/TestRecordWithIllegalBsonExtraElementsOnAccessor.java b/bson-record-codec/src/test/unit/org/bson/codecs/record/samples/TestRecordWithIllegalBsonExtraElementsOnAccessor.java new file mode 100644 index 00000000000..54f8489e388 --- /dev/null +++ b/bson-record-codec/src/test/unit/org/bson/codecs/record/samples/TestRecordWithIllegalBsonExtraElementsOnAccessor.java @@ -0,0 +1,27 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.record.samples; + +import org.bson.codecs.pojo.annotations.BsonExtraElements; + +public record TestRecordWithIllegalBsonExtraElementsOnAccessor(String name) { + @Override + @BsonExtraElements + public String name() { + return name; + } +} diff --git a/bson-record-codec/src/test/unit/org/bson/codecs/record/samples/TestRecordWithIllegalBsonExtraElementsOnComponent.java b/bson-record-codec/src/test/unit/org/bson/codecs/record/samples/TestRecordWithIllegalBsonExtraElementsOnComponent.java new file mode 100644 index 00000000000..e7f919c90fc --- /dev/null +++ b/bson-record-codec/src/test/unit/org/bson/codecs/record/samples/TestRecordWithIllegalBsonExtraElementsOnComponent.java @@ -0,0 +1,22 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.record.samples; + +import org.bson.codecs.pojo.annotations.BsonExtraElements; + +public record TestRecordWithIllegalBsonExtraElementsOnComponent(@BsonExtraElements String name) { +} diff --git a/bson-record-codec/src/test/unit/org/bson/codecs/record/samples/TestRecordWithIllegalBsonIdOnAccessor.java b/bson-record-codec/src/test/unit/org/bson/codecs/record/samples/TestRecordWithIllegalBsonIdOnAccessor.java new file mode 100644 index 00000000000..b61162d6489 --- /dev/null +++ b/bson-record-codec/src/test/unit/org/bson/codecs/record/samples/TestRecordWithIllegalBsonIdOnAccessor.java @@ -0,0 +1,28 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.record.samples; + +import org.bson.codecs.pojo.annotations.BsonId; + +public record TestRecordWithIllegalBsonIdOnAccessor(String name) { + + @Override + @BsonId + public String name() { + return name; + } +} diff --git a/bson-record-codec/src/test/unit/org/bson/codecs/record/samples/TestRecordWithIllegalBsonIdOnCanonicalConstructor.java b/bson-record-codec/src/test/unit/org/bson/codecs/record/samples/TestRecordWithIllegalBsonIdOnCanonicalConstructor.java new file mode 100644 index 00000000000..2c4d5bc18d7 --- /dev/null +++ b/bson-record-codec/src/test/unit/org/bson/codecs/record/samples/TestRecordWithIllegalBsonIdOnCanonicalConstructor.java @@ -0,0 +1,25 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.record.samples; + +import org.bson.codecs.pojo.annotations.BsonId; + +public record TestRecordWithIllegalBsonIdOnCanonicalConstructor(String name) { + public TestRecordWithIllegalBsonIdOnCanonicalConstructor(@BsonId final String name) { + this.name = name; + } +} diff --git a/bson-record-codec/src/test/unit/org/bson/codecs/record/samples/TestRecordWithIllegalBsonIgnoreOnAccessor.java b/bson-record-codec/src/test/unit/org/bson/codecs/record/samples/TestRecordWithIllegalBsonIgnoreOnAccessor.java new file mode 100644 index 00000000000..12a04cbc724 --- /dev/null +++ b/bson-record-codec/src/test/unit/org/bson/codecs/record/samples/TestRecordWithIllegalBsonIgnoreOnAccessor.java @@ -0,0 +1,28 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.record.samples; + +import org.bson.codecs.pojo.annotations.BsonIgnore; + +public record TestRecordWithIllegalBsonIgnoreOnAccessor(String name) { + + @Override + @BsonIgnore + public String name() { + return name; + } +} diff --git a/bson-record-codec/src/test/unit/org/bson/codecs/record/samples/TestRecordWithIllegalBsonIgnoreOnComponent.java b/bson-record-codec/src/test/unit/org/bson/codecs/record/samples/TestRecordWithIllegalBsonIgnoreOnComponent.java new file mode 100644 index 00000000000..5d18e3fe48d --- /dev/null +++ b/bson-record-codec/src/test/unit/org/bson/codecs/record/samples/TestRecordWithIllegalBsonIgnoreOnComponent.java @@ -0,0 +1,22 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.record.samples; + +import org.bson.codecs.pojo.annotations.BsonIgnore; + +public record TestRecordWithIllegalBsonIgnoreOnComponent(@BsonIgnore String name) { +} diff --git a/bson-record-codec/src/test/unit/org/bson/codecs/record/samples/TestRecordWithIllegalBsonPropertyOnAccessor.java b/bson-record-codec/src/test/unit/org/bson/codecs/record/samples/TestRecordWithIllegalBsonPropertyOnAccessor.java new file mode 100644 index 00000000000..e4a2ad204ad --- /dev/null +++ b/bson-record-codec/src/test/unit/org/bson/codecs/record/samples/TestRecordWithIllegalBsonPropertyOnAccessor.java @@ -0,0 +1,28 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.record.samples; + +import org.bson.codecs.pojo.annotations.BsonProperty; + +public record TestRecordWithIllegalBsonPropertyOnAccessor(String name) { + + @Override + @BsonProperty("n") + public String name() { + return name; + } +} diff --git a/bson-record-codec/src/test/unit/org/bson/codecs/record/samples/TestRecordWithIllegalBsonPropertyOnCanonicalConstructor.java b/bson-record-codec/src/test/unit/org/bson/codecs/record/samples/TestRecordWithIllegalBsonPropertyOnCanonicalConstructor.java new file mode 100644 index 00000000000..9465505bf29 --- /dev/null +++ b/bson-record-codec/src/test/unit/org/bson/codecs/record/samples/TestRecordWithIllegalBsonPropertyOnCanonicalConstructor.java @@ -0,0 +1,26 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.record.samples; + +import org.bson.codecs.pojo.annotations.BsonProperty; + +public record TestRecordWithIllegalBsonPropertyOnCanonicalConstructor(String name) { + + public TestRecordWithIllegalBsonPropertyOnCanonicalConstructor(@BsonProperty("n") final String name) { + this.name = name; + } +} diff --git a/bson-record-codec/src/test/unit/org/bson/codecs/record/samples/TestRecordWithIllegalBsonRepresentationOnAccessor.java b/bson-record-codec/src/test/unit/org/bson/codecs/record/samples/TestRecordWithIllegalBsonRepresentationOnAccessor.java new file mode 100644 index 00000000000..d453f4e1e85 --- /dev/null +++ b/bson-record-codec/src/test/unit/org/bson/codecs/record/samples/TestRecordWithIllegalBsonRepresentationOnAccessor.java @@ -0,0 +1,29 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.record.samples; + +import org.bson.BsonType; +import org.bson.codecs.pojo.annotations.BsonRepresentation; + +public record TestRecordWithIllegalBsonRepresentationOnAccessor(String name) { + + @Override + @BsonRepresentation(value = BsonType.INT32) + public String name() { + return name; + } +} diff --git a/bson-record-codec/src/test/unit/org/bson/codecs/record/samples/TestRecordWithListOfListOfRecords.java b/bson-record-codec/src/test/unit/org/bson/codecs/record/samples/TestRecordWithListOfListOfRecords.java new file mode 100644 index 00000000000..65012c32fbb --- /dev/null +++ b/bson-record-codec/src/test/unit/org/bson/codecs/record/samples/TestRecordWithListOfListOfRecords.java @@ -0,0 +1,25 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.record.samples; + +import org.bson.codecs.pojo.annotations.BsonId; +import org.bson.types.ObjectId; + +import java.util.List; + +public record TestRecordWithListOfListOfRecords(@BsonId ObjectId id, List> nestedRecords) { +} diff --git a/bson-record-codec/src/test/unit/org/bson/codecs/record/samples/TestRecordWithListOfRecords.java b/bson-record-codec/src/test/unit/org/bson/codecs/record/samples/TestRecordWithListOfRecords.java new file mode 100644 index 00000000000..459186e863c --- /dev/null +++ b/bson-record-codec/src/test/unit/org/bson/codecs/record/samples/TestRecordWithListOfRecords.java @@ -0,0 +1,25 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.record.samples; + +import org.bson.codecs.pojo.annotations.BsonId; +import org.bson.types.ObjectId; + +import java.util.List; + +public record TestRecordWithListOfRecords(@BsonId ObjectId id, List nestedRecords) { +} diff --git a/bson-record-codec/src/test/unit/org/bson/codecs/record/samples/TestRecordWithMapOfListOfRecords.java b/bson-record-codec/src/test/unit/org/bson/codecs/record/samples/TestRecordWithMapOfListOfRecords.java new file mode 100644 index 00000000000..b9b220b9579 --- /dev/null +++ b/bson-record-codec/src/test/unit/org/bson/codecs/record/samples/TestRecordWithMapOfListOfRecords.java @@ -0,0 +1,26 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.record.samples; + +import org.bson.codecs.pojo.annotations.BsonId; +import org.bson.types.ObjectId; + +import java.util.List; +import java.util.Map; + +public record TestRecordWithMapOfListOfRecords(@BsonId ObjectId id, Map> nestedRecords) { +} diff --git a/bson-record-codec/src/test/unit/org/bson/codecs/record/samples/TestRecordWithMapOfRecords.java b/bson-record-codec/src/test/unit/org/bson/codecs/record/samples/TestRecordWithMapOfRecords.java new file mode 100644 index 00000000000..5989fdbb085 --- /dev/null +++ b/bson-record-codec/src/test/unit/org/bson/codecs/record/samples/TestRecordWithMapOfRecords.java @@ -0,0 +1,25 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.record.samples; + +import org.bson.codecs.pojo.annotations.BsonId; +import org.bson.types.ObjectId; + +import java.util.Map; + +public record TestRecordWithMapOfRecords(@BsonId ObjectId id, Map nestedRecords) { +} diff --git a/bson-record-codec/src/test/unit/org/bson/codecs/record/samples/TestRecordWithNestedParameterized.java b/bson-record-codec/src/test/unit/org/bson/codecs/record/samples/TestRecordWithNestedParameterized.java new file mode 100644 index 00000000000..c760e2f7f73 --- /dev/null +++ b/bson-record-codec/src/test/unit/org/bson/codecs/record/samples/TestRecordWithNestedParameterized.java @@ -0,0 +1,22 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.record.samples; + +public record TestRecordWithNestedParameterized( + TestRecordParameterized parameterizedRecord, + B other) { +} diff --git a/bson-record-codec/src/test/unit/org/bson/codecs/record/samples/TestRecordWithNestedParameterizedRecord.java b/bson-record-codec/src/test/unit/org/bson/codecs/record/samples/TestRecordWithNestedParameterizedRecord.java new file mode 100644 index 00000000000..8a992f13a18 --- /dev/null +++ b/bson-record-codec/src/test/unit/org/bson/codecs/record/samples/TestRecordWithNestedParameterizedRecord.java @@ -0,0 +1,25 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.record.samples; + +import org.bson.codecs.pojo.annotations.BsonId; +import org.bson.types.ObjectId; + +public record TestRecordWithNestedParameterizedRecord( + @BsonId ObjectId id, + TestRecordWithNestedParameterized nestedParameterized) { +} diff --git a/bson-record-codec/src/test/unit/org/bson/codecs/record/samples/TestRecordWithNullableField.java b/bson-record-codec/src/test/unit/org/bson/codecs/record/samples/TestRecordWithNullableField.java new file mode 100644 index 00000000000..f2329c8170e --- /dev/null +++ b/bson-record-codec/src/test/unit/org/bson/codecs/record/samples/TestRecordWithNullableField.java @@ -0,0 +1,23 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.record.samples; + +import org.bson.codecs.pojo.annotations.BsonId; +import org.bson.types.ObjectId; + +public record TestRecordWithNullableField(@BsonId ObjectId id, String name, int age) { +} diff --git a/bson-record-codec/src/test/unit/org/bson/codecs/record/samples/TestRecordWithParameterizedRecord.java b/bson-record-codec/src/test/unit/org/bson/codecs/record/samples/TestRecordWithParameterizedRecord.java new file mode 100644 index 00000000000..fc8d1feee39 --- /dev/null +++ b/bson-record-codec/src/test/unit/org/bson/codecs/record/samples/TestRecordWithParameterizedRecord.java @@ -0,0 +1,24 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.record.samples; + +import org.bson.codecs.pojo.annotations.BsonId; +import org.bson.types.ObjectId; + +public record TestRecordWithParameterizedRecord(@BsonId ObjectId id, + TestRecordParameterized parameterizedRecord) { +} diff --git a/bson-record-codec/src/test/unit/org/bson/codecs/record/samples/TestRecordWithPojoAnnotations.java b/bson-record-codec/src/test/unit/org/bson/codecs/record/samples/TestRecordWithPojoAnnotations.java new file mode 100644 index 00000000000..650d3b8de0d --- /dev/null +++ b/bson-record-codec/src/test/unit/org/bson/codecs/record/samples/TestRecordWithPojoAnnotations.java @@ -0,0 +1,57 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.record.samples; + +import org.bson.BsonType; +import org.bson.codecs.pojo.annotations.BsonId; +import org.bson.codecs.pojo.annotations.BsonProperty; +import org.bson.codecs.pojo.annotations.BsonRepresentation; + +import java.util.List; + +public record TestRecordWithPojoAnnotations(String name, + @BsonProperty("a") int age, + List hobbies, + @BsonRepresentation(BsonType.OBJECT_ID) @BsonId String identifier) { + + public TestRecordWithPojoAnnotations(final String name, final int age, final List hobbies, final String identifier) { + this.name = name; + this.age = age; + this.hobbies = hobbies; + this.identifier = identifier; + } + + @Override + public String name() { + return name; + } + + @Override + public int age() { + return age; + } + + @Override + public List hobbies() { + return hobbies; + } + + @Override + public String identifier() { + return identifier; + } +} diff --git a/bson-record-codec/src/test/unit/org/bson/codecs/record/samples/TestSelfReferentialHolderRecord.java b/bson-record-codec/src/test/unit/org/bson/codecs/record/samples/TestSelfReferentialHolderRecord.java new file mode 100644 index 00000000000..557243d3f50 --- /dev/null +++ b/bson-record-codec/src/test/unit/org/bson/codecs/record/samples/TestSelfReferentialHolderRecord.java @@ -0,0 +1,23 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.record.samples; + +import org.bson.codecs.pojo.annotations.BsonId; + +public record TestSelfReferentialHolderRecord(@BsonId String id, + TestSelfReferentialRecord selfReferentialRecord) { +} diff --git a/bson-record-codec/src/test/unit/org/bson/codecs/record/samples/TestSelfReferentialRecord.java b/bson-record-codec/src/test/unit/org/bson/codecs/record/samples/TestSelfReferentialRecord.java new file mode 100644 index 00000000000..5f097854670 --- /dev/null +++ b/bson-record-codec/src/test/unit/org/bson/codecs/record/samples/TestSelfReferentialRecord.java @@ -0,0 +1,24 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.record.samples; + +import com.mongodb.lang.Nullable; + +public record TestSelfReferentialRecord(T name, + @Nullable TestSelfReferentialRecord left, + @Nullable TestSelfReferentialRecord right) { +} diff --git a/bson-scala/build.gradle.kts b/bson-scala/build.gradle.kts new file mode 100644 index 00000000000..e23087ae314 --- /dev/null +++ b/bson-scala/build.gradle.kts @@ -0,0 +1,37 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +import ProjectExtensions.configureJarManifest +import ProjectExtensions.configureMavenPublication + +plugins { id("project.scala") } + +base.archivesName.set("mongo-scala-bson") + +dependencies { api(project(path = ":bson", configuration = "default")) } + +configureMavenPublication { + pom { + name.set("Mongo Scala BSON Library") + description.set("A Scala wrapper / extension to the BSON library") + url.set("https://bsonspec.org") + } +} + +configureJarManifest { + attributes["Automatic-Module-Name"] = "org.mongodb.bson.scala" + attributes["Bundle-SymbolicName"] = "org.mongodb.scala.mongo-scala-bson" + attributes["Import-Package"] = "!scala.*,*" +} diff --git a/bson-scala/src/main/scala-2.13+/org/mongodb/scala/bson/collection/immutable/Document.scala b/bson-scala/src/main/scala-2.13+/org/mongodb/scala/bson/collection/immutable/Document.scala new file mode 100644 index 00000000000..31afbf30059 --- /dev/null +++ b/bson-scala/src/main/scala-2.13+/org/mongodb/scala/bson/collection/immutable/Document.scala @@ -0,0 +1,141 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala.bson.collection.immutable + +import scala.jdk.CollectionConverters._ +import scala.collection.mutable.ListBuffer +import scala.collection.{ mutable, Iterable, IterableOps, SpecificIterableFactory, StrictOptimizedIterableOps } +import org.mongodb.scala.bson._ +import org.mongodb.scala.bson.collection.BaseDocument + +/** + * The immutable [[Document]] companion object for easy creation. + */ +object Document extends SpecificIterableFactory[(String, BsonValue), Document] { + + import BsonMagnets._ + + /** + * Create a new empty Document + * @return a new Document + */ + def empty: Document = apply() + + /** + * Create a new Document + * @return a new Document + */ + def apply(): Document = new Document(new BsonDocument()) + + /** + * Parses a string in MongoDB Extended JSON format to a `Document` + * + * @param json the JSON stringN + * @return a corresponding `Document` object + * @see org.bson.json.JsonReader + * @see [[https://www.mongodb.com/docs/manual/reference/mongodb-extended-json/ MongoDB Extended JSON]] + */ + def apply(json: String): Document = new Document(BsonDocument(json)) + + /** + * Create a new document from the elems + * @param elems the key/value pairs that make up the Document. This can be any valid `(String, BsonValue)` pair that can be + * transformed into a [[BsonElement]] via [[BsonMagnets.CanBeBsonElement]] implicits and any [[BsonTransformer]]s that + * are in scope. + * @return a new Document consisting key/value pairs given by `elems`. + */ + def apply(elems: CanBeBsonElement*): Document = { + val underlying = new BsonDocument() + elems.foreach(elem => underlying.put(elem.key, elem.value)) + new Document(underlying) + } + + /** + * Create a new document from the elems + * @param elems a sequence of key/values that make up the Document. This can be any valid sequence of `(String, BsonValue)` pairs that + * can be transformed into a sequence of [[BsonElement]]s via [[BsonMagnets.CanBeBsonElements]] implicits and any + * [[BsonTransformer]]s + * that are in scope. + * @return a new Document consisting key/value pairs given by `elems`. + */ + def apply(elems: CanBeBsonElements): Document = { + val underlying = new BsonDocument() + elems.values.foreach(el => underlying.put(el.key, el.value)) + new Document(underlying) + } + + def builder: mutable.Builder[(String, BsonValue), Document] = ListBuffer[(String, BsonValue)]() mapResult fromSeq + + def fromSeq(ts: Seq[(String, BsonValue)]): Document = { + val underlying = new BsonDocument() + ts.foreach(kv => underlying.put(kv._1, kv._2)) + apply(underlying) + } + + override def newBuilder: mutable.Builder[(String, BsonValue), Document] = builder + override def fromSpecific(it: IterableOnce[(String, BsonValue)]): Document = fromSeq(it.iterator.toSeq) +} + +/** + * An immutable Document implementation. + * + * A strictly typed `Map[String, BsonValue]` like structure that traverses the elements in insertion order. Unlike native scala maps there + * is no variance in the value type and it always has to be a `BsonValue`. + * + * @param underlying the underlying BsonDocument which stores the data. + * + */ +case class Document(protected[scala] val underlying: BsonDocument) + extends BaseDocument[Document] + with IterableOps[(String, BsonValue), Iterable, Document] + with StrictOptimizedIterableOps[(String, BsonValue), Iterable, Document] { + + /** + * Creates a new immutable document + * @param underlying the underlying BsonDocument + * @return a new document + */ + protected[scala] def apply(underlying: BsonDocument) = new Document(underlying) + + /** + * Applies a function `f` to all elements of this document. + * + * @param f the function that is applied for its side-effect to every element. + * The result of function `f` is discarded. + * + * @tparam U the type parameter describing the result of function `f`. + * This result will always be ignored. Typically `U` is `Unit`, + * but this is not necessary. + * + */ + override def foreach[U](f: ((String, BsonValue)) => U): Unit = underlying.asScala foreach f + + // Mandatory overrides of `fromSpecific`, `newSpecificBuilder`, + // and `empty`, from `IterableOps` + override protected def fromSpecific(coll: IterableOnce[(String, BsonValue)]): Document = Document.fromSpecific(coll) + override protected def newSpecificBuilder: mutable.Builder[(String, BsonValue), Document] = Document.newBuilder + override def empty: Document = Document.empty + + // Overloading of `appended`, `prepended`, `appendedAll`, `prependedAll`, + // `map`, `flatMap` and `concat` to return an `RNA` when possible + def concat(suffix: IterableOnce[(String, BsonValue)]): Document = strictOptimizedConcat(suffix, newSpecificBuilder) + // scalastyle:off method.name + @inline final def ++(suffix: IterableOnce[(String, BsonValue)]): Document = concat(suffix) + // scalastyle:on method.name + def map[B](f: ((String, BsonValue)) => (String, BsonValue)): Document = strictOptimizedMap(newSpecificBuilder, f) + +} diff --git a/bson-scala/src/main/scala-2.13+/org/mongodb/scala/bson/collection/mutable/Document.scala b/bson-scala/src/main/scala-2.13+/org/mongodb/scala/bson/collection/mutable/Document.scala new file mode 100644 index 00000000000..86f11c5a8f7 --- /dev/null +++ b/bson-scala/src/main/scala-2.13+/org/mongodb/scala/bson/collection/mutable/Document.scala @@ -0,0 +1,284 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala.bson.collection.mutable + +import org.mongodb.scala.bson._ +import org.mongodb.scala.bson.collection.BaseDocument + +import scala.jdk.CollectionConverters._ +import scala.collection._ +import scala.collection.mutable.ListBuffer + +/** + * Mutable [[Document]] companion object for easy creation. + */ +object Document extends SpecificIterableFactory[(String, BsonValue), Document] { + + import BsonMagnets._ + + /** + * Create a new empty Document + * @return a new Document + */ + def empty: Document = apply() + + /** + * Create a new Document + * @return a new Document + */ + def apply(): Document = Document(BsonDocument()) + + /** + * Parses a string in MongoDB Extended JSON format to a `Document` + * + * @param json the JSON string + * @return a corresponding `Document` object + * @see org.bson.json.JsonReader + * @see [[https://www.mongodb.com/docs/manual/reference/mongodb-extended-json/ MongoDB Extended JSON]] + */ + def apply(json: String): Document = Document(BsonDocument(json)) + + /** + * Create a new document from the elems + * @param elems the key/value pairs that make up the Document. This can be any valid `(String, BsonValue)` pair that can be + * transformed into a [[BsonElement]] via [[BsonMagnets.CanBeBsonElement]] implicits and any [[BsonTransformer]]s that are + * in scope. + * @return a new Document consisting key/value pairs given by `elems`. + */ + def apply(elems: CanBeBsonElement*): Document = { + val underlying = new BsonDocument() + elems.foreach(elem => underlying.put(elem.key, elem.value)) + new Document(underlying) + } + + /** + * Create a new document from the elems + * @param elem a sequence of key/values that make up the Document. This can be any valid sequence of `(String, BsonValue)` pairs that + * can be transformed into a sequence of [[BsonElement]]s via [[BsonMagnets.CanBeBsonElements]] implicits and any + * [[BsonTransformer]]s + * that are in scope. + * @return a new Document consisting key/value pairs given by `elems`. + */ + def apply(elem: CanBeBsonElements): Document = { + val underlying = new BsonDocument() + elem.values.foreach(kv => underlying.put(kv.key, kv.value)) + new Document(underlying) + } + + private def builder: mutable.Builder[(String, BsonValue), Document] = + ListBuffer[(String, BsonValue)]() mapResult fromSeq + + private def fromSeq(ts: Seq[(String, BsonValue)]): Document = { + val underlying = new BsonDocument() + ts.foreach(kv => underlying.put(kv._1, kv._2)) + apply(underlying) + } + + override def newBuilder: mutable.Builder[(String, BsonValue), Document] = builder + override def fromSpecific(it: IterableOnce[(String, BsonValue)]): Document = fromSeq(it.iterator.toSeq) +} + +/** + * An mutable Document implementation. + * + * A strictly typed `Map[String, BsonValue]` like structure that traverses the elements in insertion order. Unlike native scala maps there + * is no variance in the value type and it always has to be a `BsonValue`. + * + * @param underlying the underlying BsonDocument which stores the data. + */ +case class Document(protected[scala] val underlying: BsonDocument) + extends BaseDocument[Document] + with IterableOps[(String, BsonValue), Iterable, Document] + with StrictOptimizedIterableOps[(String, BsonValue), Iterable, Document] { + + import BsonMagnets._ + + /** + * Creates a new immutable document + * @param underlying the underlying BsonDocument + * @return a new document + */ + protected[scala] def apply(underlying: BsonDocument) = new Document(underlying) + + /** + * Applies a function `f` to all elements of this document. + * + * @param f the function that is applied for its side-effect to every element. + * The result of function `f` is discarded. + * + * @tparam U the type parameter describing the result of function `f`. + * This result will always be ignored. Typically `U` is `Unit`, + * but this is not necessary. + * + */ + override def foreach[U](f: ((String, BsonValue)) => U): Unit = underlying.asScala foreach f + + // Mandatory overrides of `fromSpecific`, `newSpecificBuilder`, + // and `empty`, from `IterableOps` + override protected def fromSpecific(coll: IterableOnce[(String, BsonValue)]): Document = Document.fromSpecific(coll) + override protected def newSpecificBuilder: mutable.Builder[(String, BsonValue), Document] = Document.newBuilder + override def empty: Document = Document.empty + + // Overloading of `appended`, `prepended`, `appendedAll`, `prependedAll`, + // `map`, `flatMap` and `concat` to return an `Document` when possible + def concat(suffix: IterableOnce[(String, BsonValue)]): Document = strictOptimizedConcat(suffix, newSpecificBuilder) + // scalastyle:off method.name + @inline final def ++(suffix: IterableOnce[(String, BsonValue)]): Document = concat(suffix) + // scalastyle:on method.name + def map[B](f: ((String, BsonValue)) => (String, BsonValue)): Document = strictOptimizedMap(newSpecificBuilder, f) + // TODO other operations + + // scalastyle:off method.name + /** + * Adds a new key/value pair to this document. + * If the document already contains a mapping for the key, it will be overridden by the new value. + * + * @param elems the key/value pair. This can be any valid `(String, BsonValue)` pair that can be transformed into a [[BsonElement]] + * via [[BsonMagnets.CanBeBsonElement]] implicits and any [[BsonTransformer]]s that are in scope. + * @return the document itself + */ + def +=(elems: CanBeBsonElement*): Document = { + elems.foreach(elem => underlying.put(elem.key, elem.value)) + this + } + + /** + * Adds all elements produced by a TraversableOnce to this document. + * + * @param elems a sequence of key/values that make up the Document. This can be any valid sequence of `(String, BsonValue)` pairs that + * can be transformed into a sequence of [[BsonElement]]s via [[BsonMagnets.CanBeBsonElements]] implicits and + * any [[BsonTransformer]]s + * that are in scope. + * @return the document itself. + */ + def ++=(elems: CanBeBsonElements): Document = { + elems.values.foreach(elem => underlying.put(elem.key, elem.value)) + this + } + // scalastyle:on method.name + + /** + * Adds a new key/value pair to this map. + * If the document already contains a mapping for the key, it will be overridden by the new value. + * + * @param key The key to update + * @param value The new value + */ + def update[B](key: String, value: B)(implicit transformer: BsonTransformer[B]): Unit = { this += ((key, value)) } + + /** + * Adds a new key/value pair to this document and optionally returns previously bound value. + * If the document already contains a mapping for the key, it will be overridden by the new value. + * + * @param key the key to update + * @param value the new value + * @return an option value containing the value associated with the key before the `put` operation was executed, or + * `None` if `key` was not defined in the document before. + */ + def put[B](key: String, value: B)(implicit transformer: BsonTransformer[B]): Option[BsonValue] = { + val r = get(key) + update(key, value) + r + } + + /** + * If given key is already in this document, returns associated value. + * + * Otherwise, computes value from given expression `op`, stores with key in document and returns that value. + * @param key the key to test + * @param op the computation yielding the value to associate with `key`, if `key` is previously unbound. + * @return the value associated with key (either previously or as a result of executing the method). + */ + def getOrElseUpdate[B](key: String, op: => B)(implicit transformer: BsonTransformer[B]): BsonValue = { + if (get(key).isEmpty) this += ((key, op)) + this(key) + } + + // scalastyle:off method.name + /** + * Removes a key from this document. + * @param key the key to be removed + * @return the document itself. + */ + def -=(key: String): Document = { underlying.remove(key); this } + + /** + * Removes two or more elements from this document. + * + * @param elems the remaining elements to remove. + * @return the document itself + */ + def -=(elems: String*): Document = { + this --= elems + } + + /** + * Removes all elements produced by an iterator from this document. + * + * @param xs the iterator producing the elements to remove. + * @return the document itself + */ + def --=(xs: IterableOnce[String]): Document = { xs foreach -=; this } + // scalastyle:on method.name + + /** + * Removes a key from this document, returning the value associated previously with that key as an option. + * @param key the key to be removed + * @return an option value containing the value associated previously with `key`, + * or `None` if `key` was not defined in the document before. + */ + def remove(key: String): Option[BsonValue] = { + val r = get(key) + this -= key + r + } + + /** + * Retains only those mappings for which the predicate `p` returns `true`. + * + * @param p The test predicate + */ + def retain(p: (String, BsonValue) => Boolean): Document = { + for ((k, v) <- this) + if (!p(k, v)) underlying.remove(k) + this + } + + /** + * Removes all bindings from the document. After this operation has completed the document will be empty. + */ + def clear(): Unit = underlying.clear() + + /** + * Applies a transformation function to all values contained in this document. + * The transformation function produces new values from existing keys associated values. + * + * @param f the transformation to apply + * @return the document itself. + */ + def transform[B](f: (String, BsonValue) => B)(implicit transformer: BsonTransformer[B]): Document = { + this.foreach(kv => update(kv._1, f(kv._1, kv._2))) + this + } + + /** + * Copies the document and creates a new one + * + * @return a new document with a copy of the underlying BsonDocument + */ + def copy(): Document = Document(copyBsonDocument()) +} diff --git a/bson-scala/src/main/scala-2.13-/org/mongodb/scala/bson/collection/immutable/Document.scala b/bson-scala/src/main/scala-2.13-/org/mongodb/scala/bson/collection/immutable/Document.scala new file mode 100644 index 00000000000..4ed15c16b25 --- /dev/null +++ b/bson-scala/src/main/scala-2.13-/org/mongodb/scala/bson/collection/immutable/Document.scala @@ -0,0 +1,142 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala.bson.collection.immutable + +import scala.collection.JavaConverters._ +import scala.collection.generic.CanBuildFrom +import scala.collection.mutable.ListBuffer +import scala.collection.{ mutable, Traversable, TraversableLike } + +import org.mongodb.scala.bson._ +import org.mongodb.scala.bson.collection.BaseDocument + +/** + * The immutable [[Document]] companion object for easy creation. + */ +object Document { + + import BsonMagnets._ + + /** + * Create a new empty Document + * @return a new Document + */ + def empty: Document = apply() + + /** + * Create a new Document + * @return a new Document + */ + def apply(): Document = new Document(new BsonDocument()) + + /** + * Parses a string in MongoDB Extended JSON format to a `Document` + * + * @param json the JSON string + * @return a corresponding `Document` object + * @see org.bson.json.JsonReader + * @see [[https://www.mongodb.com/docs/manual/reference/mongodb-extended-json/ MongoDB Extended JSON]] + */ + def apply(json: String): Document = new Document(BsonDocument(json)) + + /** + * Create a new document from the elems + * @param elems the key/value pairs that make up the Document. This can be any valid `(String, BsonValue)` pair that can be + * transformed into a [[BsonElement]] via [[BsonMagnets.CanBeBsonElement]] implicits and any [[BsonTransformer]]s that + * are in scope. + * @return a new Document consisting key/value pairs given by `elems`. + */ + def apply(elems: CanBeBsonElement*): Document = { + val underlying = new BsonDocument() + elems.foreach(elem => underlying.put(elem.key, elem.value)) + new Document(underlying) + } + + /** + * Create a new document from the elems + * @param elems a sequence of key/values that make up the Document. This can be any valid sequence of `(String, BsonValue)` pairs that + * can be transformed into a sequence of [[BsonElement]]s via [[BsonMagnets.CanBeBsonElements]] implicits and any + * [[BsonTransformer]]s + * that are in scope. + * @return a new Document consisting key/value pairs given by `elems`. + */ + def apply(elems: CanBeBsonElements): Document = { + val underlying = new BsonDocument() + elems.values.foreach(el => underlying.put(el.key, el.value)) + new Document(underlying) + } + + /** + * A implicit builder factory. + * + * @return a builder factory. + */ + implicit def canBuildFrom: CanBuildFrom[Traversable[(String, BsonValue)], (String, BsonValue), Document] = { + new CanBuildFrom[Traversable[(String, BsonValue)], (String, BsonValue), Document] { + def apply(): mutable.Builder[(String, BsonValue), Document] = builder + def apply(from: Traversable[(String, BsonValue)]): mutable.Builder[(String, BsonValue), Document] = builder + } + } + + def builder: mutable.Builder[(String, BsonValue), Document] = ListBuffer[(String, BsonValue)]() mapResult fromSeq + + def fromSeq(ts: Seq[(String, BsonValue)]): Document = { + val underlying = new BsonDocument() + ts.foreach(kv => underlying.put(kv._1, kv._2)) + apply(underlying) + } +} + +/** + * An immutable Document implementation. + * + * A strictly typed `Map[String, BsonValue]` like structure that traverses the elements in insertion order. Unlike native scala maps there + * is no variance in the value type and it always has to be a `BsonValue`. + * + * @param underlying the underlying BsonDocument which stores the data. + * + */ +case class Document(protected[scala] val underlying: BsonDocument) + extends BaseDocument[Document] + with TraversableLike[(String, BsonValue), Document] { + + /** + * Creates a new immutable document + * @param underlying the underlying BsonDocument + * @return a new document + */ + protected[scala] def apply(underlying: BsonDocument) = new Document(underlying) + + /** + * Applies a function `f` to all elements of this document. + * + * @param f the function that is applied for its side-effect to every element. + * The result of function `f` is discarded. + * + * @tparam U the type parameter describing the result of function `f`. + * This result will always be ignored. Typically `U` is `Unit`, + * but this is not necessary. + * + */ + override def foreach[U](f: ((String, BsonValue)) => U): Unit = underlying.asScala foreach f + + /** + * Creates a new builder for this collection type. + */ + override def newBuilder: mutable.Builder[(String, BsonValue), Document] = Document.builder + +} diff --git a/bson-scala/src/main/scala-2.13-/org/mongodb/scala/bson/collection/mutable/Document.scala b/bson-scala/src/main/scala-2.13-/org/mongodb/scala/bson/collection/mutable/Document.scala new file mode 100644 index 00000000000..714ef73583f --- /dev/null +++ b/bson-scala/src/main/scala-2.13-/org/mongodb/scala/bson/collection/mutable/Document.scala @@ -0,0 +1,284 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala.bson.collection.mutable + +import scala.collection.JavaConverters._ +import scala.collection._ +import scala.collection.generic.CanBuildFrom +import scala.collection.mutable.ListBuffer + +import org.mongodb.scala.bson._ +import org.mongodb.scala.bson.collection.BaseDocument + +/** + * Mutable [[Document]] companion object for easy creation. + */ +object Document { + + import BsonMagnets._ + + /** + * Create a new empty Document + * @return a new Document + */ + def empty: Document = apply() + + /** + * Create a new Document + * @return a new Document + */ + def apply(): Document = Document(BsonDocument()) + + /** + * Parses a string in MongoDB Extended JSON format to a `Document` + * + * @param json the JSON string + * @return a corresponding `Document` object + * @see org.bson.json.JsonReader + * @see [[https://www.mongodb.com/docs/manual/reference/mongodb-extended-json/ MongoDB Extended JSON]] + */ + def apply(json: String): Document = Document(BsonDocument(json)) + + /** + * Create a new document from the elems + * @param elems the key/value pairs that make up the Document. This can be any valid `(String, BsonValue)` pair that can be + * transformed into a [[BsonElement]] via [[BsonMagnets.CanBeBsonElement]] implicits and any [[BsonTransformer]]s that are + * in scope. + * @return a new Document consisting key/value pairs given by `elems`. + */ + def apply(elems: CanBeBsonElement*): Document = { + val underlying = new BsonDocument() + elems.foreach(elem => underlying.put(elem.key, elem.value)) + new Document(underlying) + } + + /** + * Create a new document from the elems + * @param elem a sequence of key/values that make up the Document. This can be any valid sequence of `(String, BsonValue)` pairs that + * can be transformed into a sequence of [[BsonElement]]s via [[BsonMagnets.CanBeBsonElements]] implicits and any + * [[BsonTransformer]]s + * that are in scope. + * @return a new Document consisting key/value pairs given by `elems`. + */ + def apply(elem: CanBeBsonElements): Document = { + val underlying = new BsonDocument() + elem.values.foreach(kv => underlying.put(kv.key, kv.value)) + new Document(underlying) + } + + /** + * A implicit builder factory. + * + * @return a builder factory. + */ + implicit def canBuildFrom: CanBuildFrom[Traversable[(String, BsonValue)], (String, BsonValue), Document] = { + new CanBuildFrom[Traversable[(String, BsonValue)], (String, BsonValue), Document] { + def apply(): mutable.Builder[(String, BsonValue), Document] = builder + def apply(from: Traversable[(String, BsonValue)]): mutable.Builder[(String, BsonValue), Document] = builder + } + } + + private def builder: mutable.Builder[(String, BsonValue), Document] = + ListBuffer[(String, BsonValue)]() mapResult fromSeq + + private def fromSeq(ts: Seq[(String, BsonValue)]): Document = { + val underlying = new BsonDocument() + ts.foreach(kv => underlying.put(kv._1, kv._2)) + apply(underlying) + } +} + +/** + * An mutable Document implementation. + * + * A strictly typed `Map[String, BsonValue]` like structure that traverses the elements in insertion order. Unlike native scala maps there + * is no variance in the value type and it always has to be a `BsonValue`. + * + * @param underlying the underlying BsonDocument which stores the data. + */ +case class Document(protected[scala] val underlying: BsonDocument) + extends BaseDocument[Document] + with TraversableLike[(String, BsonValue), Document] + with Mutable { + + import BsonMagnets._ + + /** + * Creates a new immutable document + * @param underlying the underlying BsonDocument + * @return a new document + */ + protected[scala] def apply(underlying: BsonDocument) = new Document(underlying) + + /** + * Applies a function `f` to all elements of this document. + * + * @param f the function that is applied for its side-effect to every element. + * The result of function `f` is discarded. + * + * @tparam U the type parameter describing the result of function `f`. + * This result will always be ignored. Typically `U` is `Unit`, + * but this is not necessary. + * + */ + override def foreach[U](f: ((String, BsonValue)) => U): Unit = underlying.asScala foreach f + + /** + * Creates a new builder for this collection type. + */ + override def newBuilder: mutable.Builder[(String, BsonValue), Document] = Document.builder + + // scalastyle:off method.name + /** + * Adds a new key/value pair to this document. + * If the document already contains a mapping for the key, it will be overridden by the new value. + * + * @param elems the key/value pair. This can be any valid `(String, BsonValue)` pair that can be transformed into a [[BsonElement]] + * via [[BsonMagnets.CanBeBsonElement]] implicits and any [[BsonTransformer]]s that are in scope. + * @return the document itself + */ + def +=(elems: CanBeBsonElement*): Document = { + elems.foreach(elem => underlying.put(elem.key, elem.value)) + this + } + + /** + * Adds all elements produced by a TraversableOnce to this document. + * + * @param elems a sequence of key/values that make up the Document. This can be any valid sequence of `(String, BsonValue)` pairs that + * can be transformed into a sequence of [[BsonElement]]s via [[BsonMagnets.CanBeBsonElements]] implicits and + * any [[BsonTransformer]]s + * that are in scope. + * @return the document itself. + */ + def ++=(elems: CanBeBsonElements): Document = { + elems.values.foreach(elem => underlying.put(elem.key, elem.value)) + this + } + // scalastyle:on method.name + + /** + * Adds a new key/value pair to this map. + * If the document already contains a mapping for the key, it will be overridden by the new value. + * + * @param key The key to update + * @param value The new value + */ + def update[B](key: String, value: B)(implicit transformer: BsonTransformer[B]): Unit = { this += ((key, value)) } + + /** + * Adds a new key/value pair to this document and optionally returns previously bound value. + * If the document already contains a mapping for the key, it will be overridden by the new value. + * + * @param key the key to update + * @param value the new value + * @return an option value containing the value associated with the key before the `put` operation was executed, or + * `None` if `key` was not defined in the document before. + */ + def put[B](key: String, value: B)(implicit transformer: BsonTransformer[B]): Option[BsonValue] = { + val r = get(key) + update(key, value) + r + } + + /** + * If given key is already in this document, returns associated value. + * + * Otherwise, computes value from given expression `op`, stores with key in document and returns that value. + * @param key the key to test + * @param op the computation yielding the value to associate with `key`, if `key` is previously unbound. + * @return the value associated with key (either previously or as a result of executing the method). + */ + def getOrElseUpdate[B](key: String, op: => B)(implicit transformer: BsonTransformer[B]): BsonValue = { + if (get(key).isEmpty) this += ((key, op)) + this(key) + } + + // scalastyle:off method.name + /** + * Removes a key from this document. + * @param key the key to be removed + * @return the document itself. + */ + def -=(key: String): Document = { underlying.remove(key); this } + + /** + * Removes two or more elements from this document. + * + * @param elems the remaining elements to remove. + * @return the document itself + */ + def -=(elems: String*): Document = { + this --= elems + } + + /** + * Removes all elements produced by an iterator from this document. + * + * @param xs the iterator producing the elements to remove. + * @return the document itself + */ + def --=(xs: TraversableOnce[String]): Document = { xs foreach -=; this } + // scalastyle:on method.name + + /** + * Removes a key from this document, returning the value associated previously with that key as an option. + * @param key the key to be removed + * @return an option value containing the value associated previously with `key`, + * or `None` if `key` was not defined in the document before. + */ + def remove(key: String): Option[BsonValue] = { + val r = get(key) + this -= key + r + } + + /** + * Retains only those mappings for which the predicate `p` returns `true`. + * + * @param p The test predicate + */ + def retain(p: (String, BsonValue) => Boolean): Document = { + for ((k, v) <- this) + if (!p(k, v)) underlying.remove(k) + this + } + + /** + * Removes all bindings from the document. After this operation has completed the document will be empty. + */ + def clear(): Unit = underlying.clear() + + /** + * Applies a transformation function to all values contained in this document. + * The transformation function produces new values from existing keys associated values. + * + * @param f the transformation to apply + * @return the document itself. + */ + def transform[B](f: (String, BsonValue) => B)(implicit transformer: BsonTransformer[B]): Document = { + this.foreach(kv => update(kv._1, f(kv._1, kv._2))) + this + } + + /** + * Copies the document and creates a new one + * + * @return a new document with a copy of the underlying BsonDocument + */ + def copy(): Document = Document(copyBsonDocument()) +} diff --git a/bson-scala/src/main/scala/org/mongodb/scala/bson/BsonElement.scala b/bson-scala/src/main/scala/org/mongodb/scala/bson/BsonElement.scala new file mode 100644 index 00000000000..41d3112b1f1 --- /dev/null +++ b/bson-scala/src/main/scala/org/mongodb/scala/bson/BsonElement.scala @@ -0,0 +1,28 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala.bson + +/** + * A companion helper for a `BsonElement` - the mapping from a name to a BsonValue. + * + * Used by the [[BsonMagnets]] and polices valid key/value pairs types for [[Document]]. + * + * @since 1.0 + */ +object BsonElement { + def apply(key: String, value: BsonValue): BsonElement = new BsonElement(key, value) +} diff --git a/bson-scala/src/main/scala/org/mongodb/scala/bson/BsonMagnets.scala b/bson-scala/src/main/scala/org/mongodb/scala/bson/BsonMagnets.scala new file mode 100644 index 00000000000..f74e64c15f1 --- /dev/null +++ b/bson-scala/src/main/scala/org/mongodb/scala/bson/BsonMagnets.scala @@ -0,0 +1,118 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala.bson + +import scala.language.implicitConversions + +/** + * A magnet pattern implementation enforcing the validity of user provided native values being able to be converted into [[BsonValue]]s. + * + * @since 1.0 + */ +protected[bson] object BsonMagnets { + + /** + * Represents any single [[BsonValue]] + * + * This is a `BsonValue` or any type of `T` that has a [[BsonTransformer]] in scope for the given type. + */ + sealed trait CanBeBsonValue { + val value: BsonValue + } + + /** + * Implicitly converts type `T` to a [[BsonValue]] as long as there is a [[BsonTransformer]] in scope for the given type. + * + * @param v the initial value + * @param transformer implicitly provided [[BsonTransformer]] that needs to be in scope for type `T` to be transformed into a [[BsonValue]] + * @tparam T the type of the initial value + * @return A CanBeBsonValue that holds the transformed [[BsonValue]] + */ + implicit def singleToCanBeBsonValue[T](v: T)(implicit transformer: BsonTransformer[T]): CanBeBsonValue = { + new CanBeBsonValue { + override val value: BsonValue = transformer(v) + } + } + + /** + * Represents a single [[BsonElement]] + * + * This is essentially a `(String, BsonValue)` key value pair. Any pair of `(String, T)` where type `T` has a [[BsonTransformer]] in + * scope into a [[BsonValue]] is also a valid pair. + */ + sealed trait CanBeBsonElement { + val bsonElement: BsonElement + + /** + * The key of the [[BsonElement]] + * @return the key + */ + def key: String = bsonElement.getName + + /** + * The value of the [[BsonElement]] + * @return the BsonValue + */ + def value: BsonValue = bsonElement.getValue + } + + /** + * Implicitly converts key/value tuple of type (String, T) into a `CanBeBsonElement` + * + * @param kv the key value pair + * @param transformer the implicit [[BsonTransformer]] for the value + * @tparam T the type of the value + * @return a CanBeBsonElement representing the key/value pair + */ + implicit def tupleToCanBeBsonElement[T]( + kv: (String, T) + )(implicit transformer: BsonTransformer[T]): CanBeBsonElement = { + new CanBeBsonElement { + override val bsonElement: BsonElement = BsonElement(kv._1, transformer(kv._2)) + } + } + + /** + * Represents a sequence of [[BsonElement]]s + * + * This is essentially a `Iterable[(String, BsonValue)]` of key value pairs. Any pair of `(String, T)` where type `T` has a + * [[BsonTransformer]] in scope into a [[BsonValue]] is also a valid pair. + */ + sealed trait CanBeBsonElements { + + /** + * The `BsonElement` sequence + */ + val values: Iterable[BsonElement] + } + + /** + * Implicitly converts any iterable of key/value pairs into a [[CanBeBsonElements]]. + * + * @param elems the iterable of key/value pairs + * @param transformer the implicit transformer for the values + * @tparam T the type of the values + * @return CanBeBsonElements representing the key/value pairs + */ + implicit def iterableToCanBeBsonElements[T]( + elems: Iterable[(String, T)] + )(implicit transformer: BsonTransformer[T]): CanBeBsonElements = + new CanBeBsonElements { + override val values: Iterable[BsonElement] = elems.map(kv => BsonElement(kv._1, transformer(kv._2))) + } + +} diff --git a/bson-scala/src/main/scala/org/mongodb/scala/bson/BsonTransformer.scala b/bson-scala/src/main/scala/org/mongodb/scala/bson/BsonTransformer.scala new file mode 100644 index 00000000000..c3a0a64dc45 --- /dev/null +++ b/bson-scala/src/main/scala/org/mongodb/scala/bson/BsonTransformer.scala @@ -0,0 +1,228 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala.bson + +import java.util.Date + +import scala.annotation.implicitNotFound +import scala.collection.JavaConverters._ +import scala.util.matching.Regex + +import org.mongodb.scala.bson.collection.immutable.{ Document => IDocument } +import org.mongodb.scala.bson.collection.mutable.{ Document => MDocument } + +/** + * BsonTransformers allow the transformation of type `T` to their corresponding [[BsonValue]]. + * + * Custom implementations can be written to implicitly to convert a `T` into a [[BsonValue]] so it can be stored in a `Document`. + * + * @tparam T the type of value to be transformed into a [[BsonValue]]. + * @since 1.0 + */ +@implicitNotFound( + "No bson implicit transformer found for type ${T}. Implement or import an implicit BsonTransformer for this type." +) +trait BsonTransformer[-T] { + + /** + * Convert the object into a [[BsonValue]] + */ + def apply(value: T): BsonValue +} + +/** + * Maps the following native scala types to BsonValues: + * + * - `BsonValue => BsonValue` + * - `BigDecimal` => BsonDecimal128 + * - `Boolean => BsonBoolean` + * - `String => BsonString` + * - `Array[Byte] => BsonBinary` + * - `Regex => BsonRegex` + * - `Date => BsonDateTime` + * - `Decimal128` => BsonDecimal128 + * - `ObjectId => BsonObjectId` + * - `Int => BsonInt32` + * - `Long => BsonInt64` + * - `Double => BsonDouble` + * - `None => BsonNull` + * - `immutable.Document => BsonDocument` + * - `mutable.Document => BsonDocument` + * - `Option[T] => BsonValue` where `T` is one of the above types + * - `Seq[(String, T)] => BsonDocument` where `T` is one of the above types + * - `Seq[T] => BsonArray` where `T` is one of the above types + */ +object BsonTransformer extends DefaultBsonTransformers {} + +/** + * Default BsonTransformers for native types. + */ +trait DefaultBsonTransformers extends LowPrio { + + /** + * Noop transformer for `BsonValue`s + */ + implicit object TransformBsonValue extends BsonTransformer[BsonValue] { + def apply(value: BsonValue): BsonValue = value + } + + /** + * Transforms `BigDecimal` to `BsonDecimal128` + */ + implicit object TransformBigDecimal extends BsonTransformer[BigDecimal] { + def apply(value: BigDecimal): BsonDecimal128 = BsonDecimal128(value) + } + + /** + * Transforms `Boolean` to `BsonBoolean` + */ + implicit object TransformBoolean extends BsonTransformer[Boolean] { + def apply(value: Boolean): BsonBoolean = BsonBoolean(value) + } + + /** + * Transforms `String` to `BsonString` + */ + implicit object TransformString extends BsonTransformer[String] { + def apply(value: String): BsonString = BsonString(value) + } + + /** + * Transforms `Array[Byte]` to `BsonBinary` + */ + implicit object TransformBinary extends BsonTransformer[Array[Byte]] { + def apply(value: Array[Byte]): BsonBinary = BsonBinary(value) + } + + /** + * Transforms `Regex` to `BsonRegex` + */ + implicit object TransformRegex extends BsonTransformer[Regex] { + def apply(value: Regex): BsonRegularExpression = BsonRegularExpression(value) + } + + /** + * Transforms `Date` to `BsonDateTime` + */ + implicit object TransformDateTime extends BsonTransformer[Date] { + def apply(value: Date): BsonDateTime = BsonDateTime(value) + } + + /** + * Transforms `Decimal128` to `BsonDecimal128` + */ + implicit object TransformDecimal128 extends BsonTransformer[Decimal128] { + def apply(value: Decimal128): BsonDecimal128 = BsonDecimal128(value) + } + + /** + * Transforms `ObjectId` to `BsonObjectId` + */ + implicit object TransformObjectId extends BsonTransformer[ObjectId] { + def apply(value: ObjectId): BsonObjectId = BsonObjectId(value) + } + + /** + * Transforms `Int` to `BsonInt32` + */ + implicit object TransformInt extends BsonTransformer[Int] { + def apply(value: Int): BsonInt32 = BsonInt32(value) + } + + /** + * Transforms `Long` to `BsonInt64` + */ + implicit object TransformLong extends BsonTransformer[Long] { + def apply(value: Long): BsonInt64 = BsonInt64(value) + } + + /** + * Transforms `Double` to `BsonDouble` + */ + implicit object TransformDouble extends BsonTransformer[Double] { + def apply(value: Double): BsonDouble = BsonDouble(value) + } + + /** + * Transforms `None` to `BsonNull` + */ + implicit object TransformNone extends BsonTransformer[Option[Nothing]] { + def apply(value: Option[Nothing]): BsonNull = BsonNull() + } + + /** + * Transforms `Option[T]` to `BsonValue` + */ + implicit def transformOption[T](implicit transformer: BsonTransformer[T]): BsonTransformer[Option[T]] = { + new BsonTransformer[Option[T]] { + def apply(value: Option[T]): BsonValue = value match { + case Some(transformable) => transformer(transformable) + case None => BsonNull() + } + } + } + +} + +trait LowPrio { + + /** + * Transforms `immutable.Document` to `BsonDocument` + */ + implicit object TransformImmutableDocument extends BsonTransformer[IDocument] { + def apply(value: IDocument): BsonDocument = value.toBsonDocument + } + + /** + * Transforms `mutable.Document` to `BsonDocument` + */ + implicit object TransformMutableDocument extends BsonTransformer[MDocument] { + def apply(value: MDocument): BsonDocument = value.underlying + } + + /** + * Transforms `Seq[(String, T)]` to `BsonDocument` + * + * @param transformer implicit transformer for type `T` + * @tparam T the type of the values + * @return a BsonDocument containing the values + */ + implicit def transformKeyValuePairs[T]( + implicit transformer: BsonTransformer[T] + ): BsonTransformer[Seq[(String, T)]] = { + new BsonTransformer[Seq[(String, T)]] { + def apply(values: Seq[(String, T)]): BsonDocument = { + BsonDocument(values.map(kv => (kv._1, transformer(kv._2))).toList) + } + } + } + + /** + * Transforms `Seq[T]` to `BsonArray` + * + * @param transformer implicit transformer for type `T` + * @tparam T the type of the values + * @return a BsonArray containing all the values + */ + implicit def transformSeq[T](implicit transformer: BsonTransformer[T]): BsonTransformer[Seq[T]] = { + new BsonTransformer[Seq[T]] { + def apply(values: Seq[T]): BsonValue = { + new BsonArray(values.map(transformer.apply).toList.asJava) + } + } + } +} diff --git a/bson-scala/src/main/scala/org/mongodb/scala/bson/BsonValue.scala b/bson-scala/src/main/scala/org/mongodb/scala/bson/BsonValue.scala new file mode 100644 index 00000000000..8fdd797369c --- /dev/null +++ b/bson-scala/src/main/scala/org/mongodb/scala/bson/BsonValue.scala @@ -0,0 +1,497 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala.bson + +import java.util.Date + +import scala.collection.JavaConverters._ +import scala.util.matching.Regex + +import org.bson.{ BsonDocument => JBsonDocument } + +/** + * Companion helper for a BsonArray + * + * @since 1.0 + */ +object BsonArray { + import BsonMagnets._ + + /** + * Create an empty BsonArray + * @return the BsonArray + */ + def apply(): BsonArray = new BsonArray() + + /** + * Create a BsonArray from the provided values + * + * @param elems the `BsonValues` to become the `BsonArray` + * @return the BsonArray + */ + @deprecated("Use `fromIterable` instead", "2.7.0") + def apply(elems: Iterable[BsonValue]): BsonArray = fromIterable(elems) + + /** + * Create a BsonArray from the provided values + * + * @param elems the `BsonValues` to become the `BsonArray` + * @return the BsonArray + */ + def fromIterable(elems: Iterable[BsonValue]): BsonArray = new BsonArray(elems.toList.asJava) + + /** + * Creates a BsonArray from the provided values + * + * + * @param elems the values that can be transformed into a `BsonValue` + * @return the BsonArray + */ + def apply(elems: CanBeBsonValue*): BsonArray = new BsonArray(elems.map(_.value).asJava) +} + +/** + * Companion helper for a BsonBinary + * @since 1.0 + */ +object BsonBinary { + + /** + * Creates the BsonBinary form the provided bytes + * + * @param value the bytes + * @return the BsonBinary + */ + def apply(value: Array[Byte]): BsonBinary = new BsonBinary(value) +} + +/** + * Companion helper for a BsonBoolean + * @since 1.0 + */ +object BsonBoolean { + + /** + * Creates a `BsonBoolean` + * + * @param value the value + * @return the BsonBoolean + */ + def apply(value: Boolean): BsonBoolean = new BsonBoolean(value) +} + +/** + * Companion helper for a BsonDateTime + * @since 1.0 + */ +object BsonDateTime { + + /** + * Creates a BsonDateTime + * + * @param value the number of milliseconds since the Unix epoch + * @return the BsonDateTime + */ + def apply(value: Long): BsonDateTime = new BsonDateTime(value) + + /** + * Creates a BsonDateTime + * + * @param date a `java.util.Date` + * @return the BsonDateTime + */ + def apply(date: Date): BsonDateTime = new BsonDateTime(date.getTime) +} + +/** + * Companion helper for a BsonDecimal128 + * @since 1.2 + */ +object BsonDecimal128 { + + /** + * Creates a `BsonDecimal128` + * + * @param value the `Decimal128` + * @return the BigDecimal + */ + def apply(value: Decimal128): BsonDecimal128 = new BsonDecimal128(value) + + /** + * Creates a `BsonDecimal128` + * + * @param value the `BigDecimal` + * @return the BigDecimal + */ + def apply(value: BigDecimal): BsonDecimal128 = apply(new Decimal128(value.bigDecimal)) + + /** + * Creates a `BsonDecimal128` + * + * @param value the long value to convert + * @return the BigDecimal + */ + def apply(value: Long): BsonDecimal128 = apply(new Decimal128(value)) + + /** + * Creates a `BsonDecimal128` + * + * @param value the string value to convert + * @return the BigDecimal + */ + def apply(value: String): BsonDecimal128 = apply(org.bson.types.Decimal128.parse(value)) +} + +/** + * Companion helper for a BsonDocument + * @since 1.0 + */ +object BsonDocument { + + import BsonMagnets._ + + /** + * Creates an empty `BsonDocument` + * @return the BsonDocument + */ + def apply(): BsonDocument = new JBsonDocument() + + /** + * Creates a `BsonDocument` from the key value pairs + * + * @param elems a traversable of key, value pairs + * @return the BsonDocument + */ + def apply(elems: Traversable[(String, BsonValue)]): BsonDocument = { + val bsonDocument = new JBsonDocument() + elems.foreach(kv => bsonDocument.put(kv._1, kv._2)) + bsonDocument + } + + /** + * Creates a `BsonDocument` from key value pairs + * + * @param elems the key, value pairs + * @return the BsonDocument + */ + def apply(elems: CanBeBsonElement*): BsonDocument = { + val bsonDocument = new JBsonDocument() + elems.foreach(elem => bsonDocument.put(elem.key, elem.value)) + bsonDocument + } + + /** + * Creates a `BsonDocument` from a json String + * + * @param json the json string + * @return the BsonDocumet + */ + def apply(json: String): BsonDocument = JBsonDocument.parse(json) +} + +/** + * Companion helper for a BsonDouble + * @since 1.0 + */ +object BsonDouble { + + /** + * Creates a `BsonDouble` + * + * @param value the BsonDouble value + * @return the BsonDouble + */ + def apply(value: Double): BsonDouble = new BsonDouble(value) +} + +/** + * Companion helper for a BsonInt32 + * @since 1.0 + */ +object BsonInt32 { + + /** + * Creates a `BsonInt32` + * + * @param value the BsonInt32 value + * @return the BsonInt32 + */ + def apply(value: Int): BsonInt32 = new BsonInt32(value) +} + +/** + * Companion helper for a BsonInt64 + * @since 1.0 + */ +object BsonInt64 { + + /** + * Creates a `BsonInt64` + * + * @param value the BsonInt64 value + * @return the BsonInt64 + */ + def apply(value: Long): BsonInt64 = new BsonInt64(value) +} + +/** + * Companion helper for a BsonJavaScript + * @since 1.0 + */ +object BsonJavaScript { + + /** + * Creates a `BsonJavaScript` + * + * @param value the javascript function + * @return the BsonJavaScript + */ + def apply(value: String): BsonJavaScript = new BsonJavaScript(value) +} + +/** + * Companion helper for a BsonJavaScriptWithScope + * @since 1.0 + */ +object BsonJavaScriptWithScope { + import BsonMagnets._ + + /** + * Creates a `BsonJavaScript` + * + * @param value the javascript function + * @param scope the function scope + * @return the BsonJavaScript + */ + def apply(value: String, scope: BsonDocument): BsonJavaScriptWithScope = new BsonJavaScriptWithScope(value, scope) + + /** + * Creates a `BsonJavaScript` + * + * @param value the javascript function + * @param scope the function scope + * @return the BsonJavaScript + */ + def apply(value: String, scope: CanBeBsonElement*): BsonJavaScriptWithScope = + new BsonJavaScriptWithScope(value, BsonDocument(scope: _*)) + + /** + * Creates a `BsonJavaScript` + * + * @param value the javascript function + * @param scope the function scope + * @return the BsonJavaScript + */ + def apply(value: String, scope: Traversable[(String, BsonValue)]): BsonJavaScriptWithScope = + new BsonJavaScriptWithScope(value, BsonDocument(scope)) +} + +/** + * Companion helper for a BsonMaxKey + * @since 1.0 + */ +object BsonMaxKey { + + /** + * Creates a `BsonMaxKey` + * @return the BsonMaxKey + */ + def apply(): BsonMaxKey = new BsonMaxKey() +} + +/** + * Companion helper for a BsonMinKey + * @since 1.0 + */ +object BsonMinKey { + + /** + * Creates a `BsonMinKey` + * @return the BsonMinKey + */ + def apply(): BsonMinKey = new BsonMinKey() +} + +/** + * Companion helper for a BsonNull + * @since 1.0 + */ +object BsonNull { + + /** + * Creates a `BsonNull` + * @return the BsonNull + */ + def apply(): BsonNull = new BsonNull() +} + +/** + * Companion helper for a BsonNumber + * @since 1.0 + */ +object BsonNumber { + + /** + * Creates a `BsonNumber` + * + * @param value the value + * @return the BsonNumber + */ + def apply(value: Int): BsonNumber = new BsonInt32(value) + + /** + * Creates a `BsonNumber` + * + * @param value the value + * @return the BsonNumber + */ + def apply(value: Long): BsonNumber = new BsonInt64(value) + + /** + * Creates a `BsonNumber` + * + * @param value the value + * @return the BsonNumber + */ + def apply(value: Double): BsonNumber = new BsonDouble(value) +} + +/** + * Companion helper for a BsonObjectId + * @since 1.0 + */ +object BsonObjectId { + + /** + * Creates a new `BsonObjectId` + * + * @return the BsonObjectId + */ + def apply(): BsonObjectId = new BsonObjectId(new ObjectId()) + + /** + * Creates a new `BsonObjectId` + * + * @param value the 24-byte hexadecimal string representation of an `ObjectId`. + * @return the BsonObjectId + */ + def apply(value: String): BsonObjectId = new BsonObjectId(new ObjectId(value)) + + /** + * Creates a new `BsonObjectId` + * + * @param value the `ObjectId`. + * @return the BsonObjectId + */ + def apply(value: ObjectId): BsonObjectId = new BsonObjectId(value) +} + +/** + * Companion helper for a BsonRegularExpression + * @since 1.0 + */ +object BsonRegularExpression { + + /** + * Creates a new `BsonRegularExpression` + * + * @param value the `Regex`. + * @return the BsonRegularExpression + */ + def apply(value: Regex): BsonRegularExpression = new BsonRegularExpression(value.regex) + + /** + * Creates a new `BsonRegularExpression` + * + * @param value the Regex string. + * @return the BsonRegularExpression + */ + def apply(value: String): BsonRegularExpression = new BsonRegularExpression(value) + + /** + * Creates a new `BsonRegularExpression` + * + * @param value the Regex string. + * @param options the regex options tring + * @return the BsonRegularExpression + */ + def apply(value: String, options: String): BsonRegularExpression = new BsonRegularExpression(value, options) +} + +/** + * Companion helper for a BsonString + * @since 1.0 + */ +object BsonString { + + /** + * Creates a new `BsonString` + * + * @param value the string. + * @return the BsonString + */ + def apply(value: String): BsonString = new BsonString(value) +} + +/** + * Companion helper for a BsonSymbol + * @since 1.0 + */ +object BsonSymbol { + + /** + * Creates a new `BsonSymbol` + * + * @param value the Symbol. + * @return the BsonSymbol + */ + def apply(value: Symbol): BsonSymbol = new BsonSymbol(value.name) +} + +/** + * Companion helper for a BsonTimestamp + * @since 1.0 + */ +object BsonTimestamp { + + /** + * Creates a new `BsonTimestamp` + * @return the BsonTimestamp + */ + def apply(): BsonTimestamp = new BsonTimestamp(0, 0) + + /** + * Creates a new `BsonTimestamp` + * @param time the time in seconds since epoch + * @param inc an incrementing ordinal for operations within a given second + * @return the BsonTimestamp + */ + def apply(time: Int, inc: Int): BsonTimestamp = new BsonTimestamp(time, inc) +} + +/** + * Companion helper for a BsonUndefined + * @since 1.0 + */ +object BsonUndefined { + + /** + * Creates a new `BsonUndefined` + * @return the BsonUndefined + */ + def apply(): BsonUndefined = new BsonUndefined() +} diff --git a/bson-scala/src/main/scala/org/mongodb/scala/bson/DefaultHelper.scala b/bson-scala/src/main/scala/org/mongodb/scala/bson/DefaultHelper.scala new file mode 100644 index 00000000000..b8664276eae --- /dev/null +++ b/bson-scala/src/main/scala/org/mongodb/scala/bson/DefaultHelper.scala @@ -0,0 +1,84 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala.bson + +/** + * A helper containing the sealed `DefaultsTo` trait which is used to determine the default type for a given method. + * + * @since 1.0 + */ +protected[scala] object DefaultHelper { + + /** + * Neat helper to obtain a default type should one not be given eg: + * + * {{{ + * def find[T]()(implicit e: T DefaultsTo Document) { ... } + * }}} + * + * The signature of the `find` method ensures that it can only be called if the caller can supply an object of type + * `DefaultsTo[T, Document]`. Of course, the [[DefaultsTo.default]] and `[[DefaultsTo.overrideDefault]] methods make it easy to create + * such an object for any type `T`. Since these methods are implicit, the compiler automatically handles the business of calling one of + * them and passing the result into `find`. + * + * ''But how does the compiler know which method to call?'' It uses its type inference and implicit resolution rules to determine the + * appropriate method. There are three cases to consider: + * + * 1. `find` is called with no type parameter. In this case, type T must be inferred. Searching for an implicit method that can provide + * an object of type `DefaultsTo[T, Document]`, the compiler finds `default` and `overrideDefault`. `default` is chosen since it has + * priority (because it's defined in a proper subclass of the trait that defines overrideDefault). As a result, T must be bound to + * `Document. + * + * 2. `find` is called with a non-Document type parameter (e.g., `find[BsonDocument]()`). In this case, an object of type + * `DefaultsTo[BsonDocument, Document]` must be supplied. Only the `overrideDefault` method can supply it, so the compiler inserts the + * appropriate call. + * + * 3. `find` is called with `Document` as the type parameter. Again, either method is applicable, but default wins due to its higher + * priority. + * + */ + sealed class DefaultsTo[A, B] + + /** + * Companion object for [[DefaultsTo]] + */ + object DefaultsTo extends LowPriorityDefaultsTo { + + /** + * Implicitly sets a default type of B. See [[DefaultsTo]] + * + * @tparam B the default type + * @return Defaults[B, B] instance + */ + implicit def default[B]: DefaultsTo[B, B] = new DefaultsTo[B, B] + } + + /** + * Lower priority defaultsTo implicit helper + */ + trait LowPriorityDefaultsTo { + + /** + * Overrides the default with the set type of A. See [[DefaultsTo]] + * + * @tparam A The type to use + * @tparam B The default type incase type A was missing + * @return Defaults[A, B] instance + */ + implicit def overrideDefault[A, B]: DefaultsTo[A, B] = new DefaultsTo[A, B] + } +} diff --git a/bson-scala/src/main/scala/org/mongodb/scala/bson/annotations/BsonIgnore.scala b/bson-scala/src/main/scala/org/mongodb/scala/bson/annotations/BsonIgnore.scala new file mode 100644 index 00000000000..f1300a90713 --- /dev/null +++ b/bson-scala/src/main/scala/org/mongodb/scala/bson/annotations/BsonIgnore.scala @@ -0,0 +1,10 @@ +package org.mongodb.scala.bson.annotations + +import scala.annotation.StaticAnnotation + +/** + * Annotation to ignore a property. + * + * @since 4.2 + */ +case class BsonIgnore() extends StaticAnnotation diff --git a/bson-scala/src/main/scala/org/mongodb/scala/bson/annotations/BsonProperty.scala b/bson-scala/src/main/scala/org/mongodb/scala/bson/annotations/BsonProperty.scala new file mode 100644 index 00000000000..28fe469e5f8 --- /dev/null +++ b/bson-scala/src/main/scala/org/mongodb/scala/bson/annotations/BsonProperty.scala @@ -0,0 +1,26 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala.bson.annotations + +import scala.annotation.StaticAnnotation + +/** + * Annotation to change the stored key of a property + * + * @param key the key for the stored property + */ +case class BsonProperty(key: String) extends StaticAnnotation diff --git a/bson-scala/src/main/scala/org/mongodb/scala/bson/codecs/DocumentCodecProvider.scala b/bson-scala/src/main/scala/org/mongodb/scala/bson/codecs/DocumentCodecProvider.scala new file mode 100644 index 00000000000..b60032961af --- /dev/null +++ b/bson-scala/src/main/scala/org/mongodb/scala/bson/codecs/DocumentCodecProvider.scala @@ -0,0 +1,42 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala.bson.codecs + +import org.bson.codecs.Codec +import org.bson.codecs.configuration.{ CodecProvider, CodecRegistry } +import org.mongodb.scala.bson.collection.{ immutable, mutable } + +/** + * A [[http://api.mongodb.org/java/current/org/bson/codecs/configuration/CodecProvider.html CodecProvider]] for the Document + * class and all the default Codec implementations on which it depends. + */ +case class DocumentCodecProvider() extends CodecProvider { + + val IMMUTABLE: Class[immutable.Document] = classOf[immutable.Document] + val MUTABLE: Class[mutable.Document] = classOf[mutable.Document] + + // scalastyle:off null + @SuppressWarnings(Array("unchecked")) + def get[T](clazz: Class[T], registry: CodecRegistry): Codec[T] = { + clazz match { + case IMMUTABLE => ImmutableDocumentCodec(registry).asInstanceOf[Codec[T]] + case MUTABLE => MutableDocumentCodec(registry).asInstanceOf[Codec[T]] + case _ => null + } + } + // scalastyle:on null +} diff --git a/bson-scala/src/main/scala/org/mongodb/scala/bson/codecs/ImmutableDocumentCodec.scala b/bson-scala/src/main/scala/org/mongodb/scala/bson/codecs/ImmutableDocumentCodec.scala new file mode 100644 index 00000000000..a82300b3e0d --- /dev/null +++ b/bson-scala/src/main/scala/org/mongodb/scala/bson/codecs/ImmutableDocumentCodec.scala @@ -0,0 +1,63 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala.bson.codecs + +import org.bson.codecs.configuration.CodecRegistry +import org.bson.codecs.{ BsonDocumentCodec, CollectibleCodec, DecoderContext, EncoderContext } +import org.bson.{ BsonReader, BsonValue, BsonWriter } +import org.mongodb.scala.bson.collection.immutable.Document + +/** + * Companion helper for immutable Document instances. + */ +object ImmutableDocumentCodec { + def apply(): ImmutableDocumentCodec = ImmutableDocumentCodec(None) + def apply(registry: CodecRegistry): ImmutableDocumentCodec = ImmutableDocumentCodec(Some(registry)) +} + +/** + * A Codec for immutable Document instances. + * + * As the underlying driver expects documents to be mutable the driver has direct access to the Documents underlying + * mutable `BsonDocument` instance and therefore will mutate the document when adding an `_id` + */ +case class ImmutableDocumentCodec(registry: Option[CodecRegistry]) extends CollectibleCodec[Document] { + + lazy val underlying: BsonDocumentCodec = { + registry.map(new BsonDocumentCodec(_)).getOrElse(new BsonDocumentCodec) + } + + override def generateIdIfAbsentFromDocument(document: Document): Document = { + if (!underlying.documentHasId(document.underlying)) { + Document(underlying.generateIdIfAbsentFromDocument(document.toBsonDocument.clone)) + } else { + document + } + } + + override def documentHasId(document: Document): Boolean = underlying.documentHasId(document.underlying) + + override def getDocumentId(document: Document): BsonValue = underlying.getDocumentId(document.underlying) + + override def encode(writer: BsonWriter, value: Document, encoderContext: EncoderContext): Unit = + underlying.encode(writer, value.underlying, encoderContext) + + override def getEncoderClass: Class[Document] = classOf[Document] + + override def decode(reader: BsonReader, decoderContext: DecoderContext): Document = + Document(underlying.decode(reader, decoderContext)) +} diff --git a/bson-scala/src/main/scala/org/mongodb/scala/bson/codecs/IterableCodec.scala b/bson-scala/src/main/scala/org/mongodb/scala/bson/codecs/IterableCodec.scala new file mode 100644 index 00000000000..c6d98d78ba0 --- /dev/null +++ b/bson-scala/src/main/scala/org/mongodb/scala/bson/codecs/IterableCodec.scala @@ -0,0 +1,130 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala.bson.codecs + +import java.util.UUID + +import scala.collection.mutable + +import org.bson._ +import org.bson.codecs._ +import org.bson.codecs.configuration.CodecRegistry + +/** + * IterableCodec companion object + * + * @since 1.2 + */ +object IterableCodec { + + def apply(registry: CodecRegistry, bsonTypeClassMap: BsonTypeClassMap): IterableCodec = + apply(registry, bsonTypeClassMap, None) + + def apply( + registry: CodecRegistry, + bsonTypeClassMap: BsonTypeClassMap, + valueTransformer: Option[Transformer] + ): IterableCodec = { + new IterableCodec(registry, bsonTypeClassMap, valueTransformer.getOrElse(DEFAULT_TRANSFORMER)) + } + + private val DEFAULT_TRANSFORMER = new Transformer() { + def transform(objectToTransform: Object): Object = objectToTransform + } +} + +/** + * Encodes and decodes `Iterable` objects. + * + * @since 1.2 + */ +@SuppressWarnings(Array("rawtypes")) +case class IterableCodec(registry: CodecRegistry, bsonTypeClassMap: BsonTypeClassMap, valueTransformer: Transformer) + extends Codec[Iterable[_ <: Any]] { + lazy val bsonTypeCodecMap = new BsonTypeCodecMap(bsonTypeClassMap, registry) + + override def decode(reader: BsonReader, decoderContext: DecoderContext): Iterable[_] = + readValue(reader, decoderContext).asInstanceOf[Iterable[_]] + + override def encode(writer: BsonWriter, value: Iterable[_ <: Any], encoderContext: EncoderContext): Unit = + writeValue(writer, encoderContext, value) + + override def getEncoderClass: Class[Iterable[_]] = classOf[Iterable[_]] + + @SuppressWarnings(Array("unchecked", "rawtypes")) + private def writeValue[T](writer: BsonWriter, encoderContext: EncoderContext, value: T): Unit = { + value match { + case isNull if value == null => writer.writeNull() // scalastyle:ignore + case map: Map[_, _] => + writeMap(writer, map.asInstanceOf[Map[String, Any]], encoderContext.getChildContext) + case list: Iterable[_] => + writeIterable(writer, list, encoderContext.getChildContext) + case _ => + val codec = registry.get(value.getClass).asInstanceOf[Encoder[T]] + encoderContext.encodeWithChildContext(codec, writer, value) + } + } + + private def writeMap(writer: BsonWriter, map: Map[String, Any], encoderContext: EncoderContext): Unit = { + writer.writeStartDocument() + map.foreach(kv => { + writer.writeName(kv._1) + writeValue(writer, encoderContext, kv._2) + }) + writer.writeEndDocument() + } + + private def writeIterable(writer: BsonWriter, list: Iterable[_], encoderContext: EncoderContext): Unit = { + writer.writeStartArray() + list.foreach(value => writeValue(writer, encoderContext, value)) + writer.writeEndArray() + } + + private def readValue(reader: BsonReader, decoderContext: DecoderContext): Any = { + reader.getCurrentBsonType match { + case BsonType.NULL => + reader.readNull() + null // scalastyle:ignore + case BsonType.ARRAY => readList(reader, decoderContext) + case BsonType.DOCUMENT => readMap(reader, decoderContext) + case BsonType.BINARY if BsonBinarySubType.isUuid(reader.peekBinarySubType) && reader.peekBinarySize == 16 => + registry.get(classOf[UUID]).decode(reader, decoderContext) + case bsonType: BsonType => + valueTransformer.transform(bsonTypeCodecMap.get(bsonType).decode(reader, decoderContext)) + } + } + + private def readMap(reader: BsonReader, decoderContext: DecoderContext): Map[String, _] = { + val map = mutable.Map[String, Any]() + reader.readStartDocument() + while (reader.readBsonType ne BsonType.END_OF_DOCUMENT) { + map += (reader.readName -> readValue(reader, decoderContext)) + } + reader.readEndDocument() + map.toMap + } + + private def readList(reader: BsonReader, decoderContext: DecoderContext): List[_] = { + reader.readStartArray() + val list = mutable.ListBuffer[Any]() + while (reader.readBsonType ne BsonType.END_OF_DOCUMENT) { + list.append(readValue(reader, decoderContext)) + } + reader.readEndArray() + list.toList + } +} diff --git a/bson-scala/src/main/scala/org/mongodb/scala/bson/codecs/IterableCodecProvider.scala b/bson-scala/src/main/scala/org/mongodb/scala/bson/codecs/IterableCodecProvider.scala new file mode 100644 index 00000000000..eeb0d4ee440 --- /dev/null +++ b/bson-scala/src/main/scala/org/mongodb/scala/bson/codecs/IterableCodecProvider.scala @@ -0,0 +1,74 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala.bson.codecs + +import org.bson.Transformer +import org.bson.codecs.Codec +import org.bson.codecs.configuration.{ CodecProvider, CodecRegistry } + +/** + * IterableCodecProvider companion object + * + * @since 1.2 + */ +object IterableCodecProvider { + + /** + * Create a `IterableCodecProvider` with the default `BsonTypeClassMap` and `Transformer`. + * @return the new instance + */ + def apply(): IterableCodecProvider = new IterableCodecProvider(BsonTypeClassMap(), None) + + /** + * Create a `IterableCodecProvider` with the given `BsonTypeClassMap` and the default `Transformer`. + * + * @param bsonTypeClassMap the bson type class map + * @return the new instance + */ + def apply(bsonTypeClassMap: BsonTypeClassMap): IterableCodecProvider = + new IterableCodecProvider(bsonTypeClassMap, None) + + /** + * Create a `IterableCodecProvider` with the default `BsonTypeClassMap` and the given `Transformer`. + * + * @param valueTransformer the value transformer for decoded values + * @return the new instance + */ + def apply(valueTransformer: Transformer): IterableCodecProvider = + new IterableCodecProvider(BsonTypeClassMap(), Option(valueTransformer)) +} + +/** + * A `CodecProvider` for classes than implement the `Iterable` interface. + * + * @param bsonTypeClassMap the non-null `BsonTypeClassMap` with which to construct instances of `DocumentCodec` and `ListCodec`. + * @param valueTransformer the value transformer for decoded values + * + * @since 1.2 + */ +case class IterableCodecProvider(bsonTypeClassMap: BsonTypeClassMap, valueTransformer: Option[Transformer]) + extends CodecProvider { + + @SuppressWarnings(Array("unchecked")) + def get[T](clazz: Class[T], registry: CodecRegistry): Codec[T] = { + if (classOf[Iterable[_]].isAssignableFrom(clazz)) { + IterableCodec(registry, bsonTypeClassMap, valueTransformer).asInstanceOf[Codec[T]] + } else { + null // scalastyle:ignore + } + } +} diff --git a/bson-scala/src/main/scala/org/mongodb/scala/bson/codecs/Macros.scala b/bson-scala/src/main/scala/org/mongodb/scala/bson/codecs/Macros.scala new file mode 100644 index 00000000000..f667342c91a --- /dev/null +++ b/bson-scala/src/main/scala/org/mongodb/scala/bson/codecs/Macros.scala @@ -0,0 +1,130 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala.bson.codecs + +import scala.annotation.compileTimeOnly +import scala.language.experimental.macros +import scala.language.implicitConversions + +import org.bson.codecs.Codec +import org.bson.codecs.configuration.{ CodecProvider, CodecRegistry } + +import org.mongodb.scala.bson.codecs.macrocodecs.{ CaseClassCodec, CaseClassProvider } + +/** + * Macro based Codecs + * + * Allows the compile time creation of Codecs for case classes. + * + * The recommended approach is to use the implicit [[Macros.createCodecProvider[T](clazz:Class[T])*]] method to help build a codecRegistry: + * ``` + * import org.mongodb.scala.bson.codecs.Macros.createCodecProvider + * import org.bson.codecs.configuration.CodecRegistries.{fromRegistries, fromProviders} + * + * case class Contact(phone: String) + * case class User(_id: Int, username: String, age: Int, hobbies: List[String], contacts: List[Contact]) + * + * val codecRegistry = fromRegistries(fromProviders(classOf[User], classOf[Contact]), MongoClient.DEFAULT_CODEC_REGISTRY) + * ``` + * + * @since 2.0 + */ +object Macros { + + /** + * Creates a CodecProvider for a case class + * + * @tparam T the case class to create a Codec from + * @return the CodecProvider for the case class + */ + @compileTimeOnly("Creating a CodecProvider utilises Macros and must be run at compile time.") + def createCodecProvider[T](): CodecProvider = macro CaseClassProvider.createCodecProviderEncodeNone[T] + + /** + * Creates a CodecProvider for a case class using the given class to represent the case class + * + * @param clazz the clazz that is the case class + * @tparam T the case class to create a Codec from + * @return the CodecProvider for the case class + */ + @compileTimeOnly("Creating a CodecProvider utilises Macros and must be run at compile time.") + implicit def createCodecProvider[T](clazz: Class[T]): CodecProvider = + macro CaseClassProvider.createCodecProviderWithClassEncodeNone[T] + + /** + * Creates a CodecProvider for a case class that ignores any `None` values + * + * @tparam T the case class to create a Codec from + * @return the CodecProvider for the case class + * @since 2.1 + */ + @compileTimeOnly("Creating a CodecProvider utilises Macros and must be run at compile time.") + def createCodecProviderIgnoreNone[T](): CodecProvider = macro CaseClassProvider.createCodecProviderIgnoreNone[T] + + /** + * Creates a CodecProvider for a case class that ignores any `None` values, using the given class to represent the case class + * + * @param clazz the clazz that is the case class + * @tparam T the case class to create a Codec from + * @return the CodecProvider for the case class + * @since 2.1 + */ + @compileTimeOnly("Creating a CodecProvider utilises Macros and must be run at compile time.") + def createCodecProviderIgnoreNone[T](clazz: Class[T]): CodecProvider = + macro CaseClassProvider.createCodecProviderWithClassIgnoreNone[T] + + /** + * Creates a Codec for a case class + * + * @tparam T the case class to create a Codec from + * @return the Codec for the case class + */ + @compileTimeOnly("Creating a Codec utilises Macros and must be run at compile time.") + def createCodec[T](): Codec[T] = macro CaseClassCodec.createCodecBasicCodecRegistryEncodeNone[T] + + /** + * Creates a Codec for a case class + * + * @param codecRegistry the Codec Registry to use + * @tparam T the case class to create a codec from + * @return the Codec for the case class + */ + @compileTimeOnly("Creating a Codec utilises Macros and must be run at compile time.") + def createCodec[T](codecRegistry: CodecRegistry): Codec[T] = macro CaseClassCodec.createCodecEncodeNone[T] + + /** + * Creates a Codec for a case class + * + * @tparam T the case class to create a Codec from + * @return the Codec for the case class + * @since 2.1 + */ + @compileTimeOnly("Creating a Codec utilises Macros and must be run at compile time.") + def createCodecIgnoreNone[T](): Codec[T] = macro CaseClassCodec.createCodecBasicCodecRegistryIgnoreNone[T] + + /** + * Creates a Codec for a case class + * + * @param codecRegistry the Codec Registry to use + * @tparam T the case class to create a codec from + * @return the Codec for the case class + * @since 2.1 + */ + @compileTimeOnly("Creating a Codec utilises Macros and must be run at compile time.") + def createCodecIgnoreNone[T](codecRegistry: CodecRegistry): Codec[T] = macro CaseClassCodec.createCodecIgnoreNone[T] + +} diff --git a/bson-scala/src/main/scala/org/mongodb/scala/bson/codecs/MutableDocumentCodec.scala b/bson-scala/src/main/scala/org/mongodb/scala/bson/codecs/MutableDocumentCodec.scala new file mode 100644 index 00000000000..c9a28fd5aeb --- /dev/null +++ b/bson-scala/src/main/scala/org/mongodb/scala/bson/codecs/MutableDocumentCodec.scala @@ -0,0 +1,57 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala.bson.codecs + +import org.bson.codecs.configuration.CodecRegistry +import org.bson.codecs.{ BsonDocumentCodec, CollectibleCodec, DecoderContext, EncoderContext } +import org.bson.{ BsonReader, BsonValue, BsonWriter } +import org.mongodb.scala.bson.collection.mutable.Document + +/** + * Companion helper for mutable Document instances. + */ +object MutableDocumentCodec { + def apply(): MutableDocumentCodec = MutableDocumentCodec(None) + def apply(registry: CodecRegistry): MutableDocumentCodec = MutableDocumentCodec(Some(registry)) +} + +/** + * A Codec for mutable Document instances. + */ +case class MutableDocumentCodec(registry: Option[CodecRegistry]) extends CollectibleCodec[Document] { + + lazy val underlying: BsonDocumentCodec = { + registry.map(new BsonDocumentCodec(_)).getOrElse(new BsonDocumentCodec) + } + + override def generateIdIfAbsentFromDocument(document: Document): Document = { + underlying.generateIdIfAbsentFromDocument(document.underlying) + document + } + + override def documentHasId(document: Document): Boolean = underlying.documentHasId(document.underlying) + + override def getDocumentId(document: Document): BsonValue = underlying.getDocumentId(document.underlying) + + override def encode(writer: BsonWriter, value: Document, encoderContext: EncoderContext): Unit = + underlying.encode(writer, value.underlying, encoderContext) + + override def getEncoderClass: Class[Document] = classOf[Document] + + override def decode(reader: BsonReader, decoderContext: DecoderContext): Document = + Document(underlying.decode(reader, decoderContext)) +} diff --git a/bson-scala/src/main/scala/org/mongodb/scala/bson/codecs/macrocodecs/CaseClassCodec.scala b/bson-scala/src/main/scala/org/mongodb/scala/bson/codecs/macrocodecs/CaseClassCodec.scala new file mode 100644 index 00000000000..a5e61754f1f --- /dev/null +++ b/bson-scala/src/main/scala/org/mongodb/scala/bson/codecs/macrocodecs/CaseClassCodec.scala @@ -0,0 +1,417 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala.bson.codecs.macrocodecs + +import scala.reflect.macros.whitebox +import org.bson.codecs.Codec +import org.bson.codecs.configuration.CodecRegistry +import org.mongodb.scala.bson.annotations.{ BsonIgnore, BsonProperty } + +private[codecs] object CaseClassCodec { + + def createCodecBasicCodecRegistryEncodeNone[T: c.WeakTypeTag](c: whitebox.Context)(): c.Expr[Codec[T]] = { + import c.universe._ + createCodecBasicCodecRegistry[T](c)(c.Expr[Boolean](q"true")) + } + + def createCodecEncodeNone[T: c.WeakTypeTag]( + c: whitebox.Context + )(codecRegistry: c.Expr[CodecRegistry]): c.Expr[Codec[T]] = { + import c.universe._ + createCodec[T](c)(codecRegistry, c.Expr[Boolean](q"true")) + } + + def createCodecBasicCodecRegistryIgnoreNone[T: c.WeakTypeTag](c: whitebox.Context)(): c.Expr[Codec[T]] = { + import c.universe._ + createCodecBasicCodecRegistry[T](c)(c.Expr[Boolean](q"false")) + } + + def createCodecIgnoreNone[T: c.WeakTypeTag]( + c: whitebox.Context + )(codecRegistry: c.Expr[CodecRegistry]): c.Expr[Codec[T]] = { + import c.universe._ + createCodec[T](c)(codecRegistry, c.Expr[Boolean](q"false")) + } + + def createCodecBasicCodecRegistry[T: c.WeakTypeTag]( + c: whitebox.Context + )(encodeNone: c.Expr[Boolean]): c.Expr[Codec[T]] = { + import c.universe._ + createCodec[T](c)( + c.Expr[CodecRegistry]( + q""" + import org.bson.codecs.{ BsonValueCodecProvider, ValueCodecProvider } + import org.bson.codecs.configuration.CodecRegistries.fromProviders + fromProviders( + DocumentCodecProvider(), + IterableCodecProvider(), + new ValueCodecProvider(), + new BsonValueCodecProvider() + ) + """ + ), + encodeNone + ) + } + + // scalastyle:off method.length + def createCodec[T: c.WeakTypeTag]( + c: whitebox.Context + )(codecRegistry: c.Expr[CodecRegistry], encodeNone: c.Expr[Boolean]): c.Expr[Codec[T]] = { + import c.universe._ + + // Declared types + val mainType = weakTypeOf[T] + + val stringType = typeOf[String] + val mapTypeSymbol = typeOf[collection.Map[_, _]].typeSymbol + + // Names + val classTypeName = mainType.typeSymbol.name.toTypeName + val codecName = TypeName(s"${classTypeName}MacroCodec") + + // Type checkers + def isCaseClass(t: Type): Boolean = { + // https://github.com/scala/bug/issues/7755 + val _ = t.typeSymbol.typeSignature + t.typeSymbol.isClass && t.typeSymbol.asClass.isCaseClass && !t.typeSymbol.isModuleClass + } + + def isCaseObject(t: Type): Boolean = t.typeSymbol.isModuleClass && t.typeSymbol.asClass.isCaseClass + def isMap(t: Type): Boolean = t.baseClasses.contains(mapTypeSymbol) + def isOption(t: Type): Boolean = t.typeSymbol == definitions.OptionClass + def isTuple(t: Type): Boolean = definitions.TupleClass.seq.contains(t.typeSymbol) + def isSealed(t: Type): Boolean = t.typeSymbol.isClass && t.typeSymbol.asClass.isSealed + def isAbstractSealed(t: Type): Boolean = isSealed(t) && t.typeSymbol.isAbstract + + def allSubclasses(s: Symbol): Set[Symbol] = { + val directSubClasses = s.asClass.knownDirectSubclasses + directSubClasses ++ directSubClasses.flatMap({ s: Symbol => + allSubclasses(s) + }) + } + val subClasses: List[Type] = + allSubclasses(mainType.typeSymbol).map(_.asClass.toType).filter(t => isCaseClass(t) || isCaseObject(t)).toList + if (isSealed(mainType) && subClasses.isEmpty) { + c.abort( + c.enclosingPosition, + s"No known subclasses of the sealed ${if (mainType.typeSymbol.asClass.isTrait) "trait" else "class"}" + ) + } + val knownTypes: List[Type] = (mainType +: subClasses).filterNot(_.typeSymbol.isAbstract).reverse + + def createTerms(t: Type): List[TermSymbol] = { + if (!isAbstractSealed(t)) { + val constructor = t.decl(termNames.CONSTRUCTOR) + if (!constructor.isMethod) c.abort(c.enclosingPosition, "No constructor, unsupported class type") + constructor.asMethod.paramLists match { + case h :: _ => h.map(_.asTerm) + case _ => List.empty + } + } else { + List.empty + } + } + + val terms = knownTypes.flatMap(t => createTerms(t)) + + val fields: Map[Type, List[(TermName, Type)]] = { + knownTypes + .map(t => + ( + t, + t.members.sorted + .filter(_.isMethod) + .map(_.asMethod) + .filter(m => m.isGetter && m.isParamAccessor) + .map(m => (m.name, m.returnType.asSeenFrom(t, t.typeSymbol))) + ) + ) + .toMap + } + + val classAnnotatedFieldsMap: Map[TermName, Constant] = { + terms + .flatMap(t => { + t.annotations + .find(a => a.tree.tpe eq typeOf[BsonProperty]) + .flatMap(_.tree.children.lastOption) + .map(tree => { + t.name -> tree.productElement(0).asInstanceOf[Constant] + }) + }) + .toMap + } + + val ignoredFields: Map[Type, Seq[(TermName, Tree)]] = { + knownTypes.map { tpe => + if (!isCaseClass(tpe)) { + (tpe, Nil) + } else { + val constructor = tpe.decl(termNames.CONSTRUCTOR) + if (!constructor.isMethod) c.abort(c.enclosingPosition, "No constructor, unsupported class type") + + val defaults = constructor.asMethod.paramLists.head + .map(_.asTerm) + .zipWithIndex + .filter(_._1.annotations.exists(_.tree.tpe == typeOf[BsonIgnore])) + .map { + case (p, i) => + if (p.isParamWithDefault) { + val getterName = TermName("apply$default$" + (i + 1)) + p.name -> q"${tpe.typeSymbol.companion}.$getterName" + } else { + c.abort( + c.enclosingPosition, + s"Field [${p.name}] with BsonIgnore annotation must have a default value" + ) + } + } + + tpe -> defaults + } + }.toMap + } + + // Data converters + def keyName(t: Type): Literal = Literal(Constant(t.typeSymbol.name.decodedName.toString)) + def keyNameTerm(t: TermName): Literal = Literal(classAnnotatedFieldsMap.getOrElse(t, Constant(t.toString))) + + // Primitives type map + val primitiveTypesMap: Map[Type, Type] = Map( + typeOf[Boolean] -> typeOf[java.lang.Boolean], + typeOf[Byte] -> typeOf[java.lang.Byte], + typeOf[Char] -> typeOf[java.lang.Character], + typeOf[Double] -> typeOf[java.lang.Double], + typeOf[Float] -> typeOf[java.lang.Float], + typeOf[Int] -> typeOf[java.lang.Integer], + typeOf[Long] -> typeOf[java.lang.Long], + typeOf[Short] -> typeOf[java.lang.Short] + ) + + /* + * Flattens the type args for any given type. + * + * Removes the key field from Maps as they have to be strings. + * Removes Option type as the Option value is wrapped automatically below. + * Throws if the case class contains a Tuple + * + * @param at the type to flatten the arguments for + * @return a list of the type arguments for the type + */ + def flattenTypeArgs(at: Type): List[c.universe.Type] = { + val t = at.dealias + val typeArgs = t.typeArgs match { + case head :: _ if isMap(t) && !(head.erasure =:= stringType) => + c.abort(c.enclosingPosition, "Maps must contain string types for keys") + case _ :: tail if isMap(t) /* head.erasure =:= stringType */ => tail + case args => args + } + val types = t +: typeArgs.flatMap(x => flattenTypeArgs(x)) + if (types.exists(isTuple)) c.abort(c.enclosingPosition, "Tuples currently aren't supported in case classes") + types.filterNot(isOption).map(x => if (isCaseClass(x)) x else primitiveTypesMap.getOrElse(x.erasure, x)) + } + + /* + * Maps the given field names to type args for the values in the field + * + * ``` + * addresses: Seq[Address] => (addresses, List[classOf[Seq], classOf[Address]]) + * nestedAddresses: Seq[Seq[Address]] => (addresses, List[classOf[Seq], classOf[Seq], classOf[Address]]) + * ``` + * + * @return a map of the field names with a list of the contain types + */ + def createFieldTypeArgsMap(fields: List[(TermName, Type)]) = { + val setTypeArgs = fields.map({ + case (name, f) => + val key = keyNameTerm(name) + q""" + typeArgs += ($key -> { + val tpeArgs = mutable.ListBuffer.empty[Class[_]] + ..${flattenTypeArgs(f).map(t => + q"tpeArgs += classOf[${if (isCaseClass(t)) t.finalResultType else t.finalResultType.erasure}]" + )} + tpeArgs.toList + })""" + }) + + q""" + val typeArgs = mutable.Map[String, List[Class[_]]]() + ..$setTypeArgs + typeArgs.toMap + """ + } + + /* + * For each case class sets the Map of the given field names and their field types. + */ + def createClassFieldTypeArgsMap = { + val setClassFieldTypeArgs = fields.map(field => q""" + classFieldTypeArgs += (${keyName(field._1)} -> ${createFieldTypeArgsMap(field._2)}) + """) + + q""" + val classFieldTypeArgs = mutable.Map[String, Map[String, List[Class[_]]]]() + ..$setClassFieldTypeArgs + classFieldTypeArgs.toMap + """ + } + + /* + * Creates a `Map[String, Class[_]]` mapping the case class name and the type. + * + * @return the case classes map + */ + def caseClassesMap = { + val setSubClasses = + knownTypes.map(t => q"caseClassesMap += (${keyName(t)} -> classOf[${t.finalResultType.erasure}])") + q""" + val caseClassesMap = mutable.Map[String, Class[_]]() + ..$setSubClasses + caseClassesMap.toMap + """ + } + + /* + * Creates a `Map[Class[_], Boolean]` mapping field types to a boolean representing if they are a case class. + * + * @return the class to case classes map + */ + def classToCaseClassMap = { + val flattenedFieldTypes = fields.flatMap({ case (t, types) => types.map(f => f._2) :+ t }) + val setClassToCaseClassMap = flattenedFieldTypes.map(t => + q"""classToCaseClassMap ++= ${flattenTypeArgs(t).map(t => + q"(classOf[${t.finalResultType.erasure}], ${isCaseClass(t) || isCaseObject(t) || isSealed(t)})" + )}""" + ) + + q""" + val classToCaseClassMap = mutable.Map[Class[_], Boolean]() + ..$setClassToCaseClassMap + classToCaseClassMap.toMap + """ + } + + /* + * Handles the writing of case class fields. + * + * @param fields the list of fields + * @return the tree that writes the case class fields + */ + def writeClassValues(fields: List[(TermName, Type)], ignoredFields: Seq[(TermName, Tree)]): List[Tree] = { + fields + .filterNot { case (name, _) => ignoredFields.exists { case (iname, _) => name == iname } } + .map({ + case (name, f) => + val key = keyNameTerm(name) + f match { + case optional if isOption(optional) => q""" + val localVal = instanceValue.$name + if (localVal.isDefined) { + writer.writeName($key) + this.writeFieldValue($key, writer, localVal.get, encoderContext) + } else if ($encodeNone) { + writer.writeName($key) + this.writeFieldValue($key, writer, this.bsonNull, encoderContext) + }""" + case _ => q""" + val localVal = instanceValue.$name + writer.writeName($key) + this.writeFieldValue($key, writer, localVal, encoderContext) + """ + } + }) + } + + /* + * Writes the Case Class fields and values to the BsonWriter + */ + def writeValue: Tree = { + val cases: Seq[Tree] = { + fields.map { + case (classType, _) if isCaseObject(classType) => cq""" ${keyName(classType)} =>""" + case (classType, fields) => + cq""" ${keyName(classType)} => + val instanceValue = value.asInstanceOf[${classType}] + ..${writeClassValues(fields, ignoredFields(classType))}""" + }.toSeq + } :+ cq"""_ => throw new BsonInvalidOperationException("Unexpected class type: " + className)""" + q""" + writer.writeStartDocument() + this.writeClassFieldName(writer, className, encoderContext) + className match { case ..$cases } + writer.writeEndDocument() + """ + } + + def fieldSetters(fields: List[(TermName, Type)], ignoredFields: Seq[(TermName, Tree)]) = { + fields.map({ + case (name, f) => + val key = keyNameTerm(name) + val missingField = Literal(Constant(s"Missing field: $key")) + + ignoredFields.find { case (iname, _) => name == iname }.map(_._2) match { + case Some(default) => + q"$name = $default" + case None => + f match { + case optional if isOption(optional) => + q"$name = (if (fieldData.contains($key)) Option(fieldData($key)) else None).asInstanceOf[$f]" + case _ => + q"""$name = fieldData.getOrElse($key, throw new BsonInvalidOperationException($missingField)).asInstanceOf[$f]""" + } + } + }) + } + + def getInstance = { + val cases = knownTypes.map { st => + if (isCaseObject(st)) { + val instance = st.typeSymbol.asClass.module + cq"${keyName(st)} => $instance" + } else { + cq"${keyName(st)} => new $st(..${fieldSetters(fields(st), ignoredFields(st))})" + } + } :+ cq"""_ => throw new BsonInvalidOperationException("Unexpected class type: " + className)""" + q"className match { case ..$cases }" + } + + c.Expr[Codec[T]]( + q""" + import scala.collection.mutable + import org.bson.{ BsonInvalidOperationException, BsonWriter } + import org.bson.codecs.EncoderContext + import org.bson.codecs.configuration.CodecRegistry + import org.mongodb.scala.bson.codecs.macrocodecs.MacroCodec + + final case class $codecName(codecRegistry: CodecRegistry) extends { + val encoderClass = classOf[$classTypeName] + } with MacroCodec[$classTypeName] { + val caseClassesMap = $caseClassesMap + val classToCaseClassMap = $classToCaseClassMap + val classFieldTypeArgsMap = $createClassFieldTypeArgsMap + def getInstance(className: String, fieldData: Map[String, Any]) = $getInstance + def writeCaseClassData(className: String, writer: BsonWriter, value: $mainType, encoderContext: EncoderContext) = $writeValue + } + + ${codecName.toTermName}($codecRegistry).asInstanceOf[Codec[$mainType]] + """ + ) + } + // scalastyle:on method.length +} diff --git a/bson-scala/src/main/scala/org/mongodb/scala/bson/codecs/macrocodecs/CaseClassProvider.scala b/bson-scala/src/main/scala/org/mongodb/scala/bson/codecs/macrocodecs/CaseClassProvider.scala new file mode 100644 index 00000000000..1eac1f3afd3 --- /dev/null +++ b/bson-scala/src/main/scala/org/mongodb/scala/bson/codecs/macrocodecs/CaseClassProvider.scala @@ -0,0 +1,77 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala.bson.codecs.macrocodecs + +import scala.reflect.macros.whitebox + +import org.bson.codecs.configuration.{ CodecProvider, CodecRegistry } + +private[codecs] object CaseClassProvider { + + def createCodecProviderEncodeNone[T: c.WeakTypeTag](c: whitebox.Context)(): c.Expr[CodecProvider] = { + import c.universe._ + createCodecProvider[T](c)(c.Expr[Boolean](q"true")) + } + + def createCodecProviderWithClassEncodeNone[T: c.WeakTypeTag]( + c: whitebox.Context + )(clazz: c.Expr[Class[T]]): c.Expr[CodecProvider] = { + import c.universe._ + createCodecProvider[T](c)(c.Expr[Boolean](q"true")) + } + + def createCodecProviderWithClassIgnoreNone[T: c.WeakTypeTag]( + c: whitebox.Context + )(clazz: c.Expr[Class[T]]): c.Expr[CodecProvider] = { + import c.universe._ + createCodecProvider[T](c)(c.Expr[Boolean](q"false")) + } + + def createCodecProviderIgnoreNone[T: c.WeakTypeTag](c: whitebox.Context)(): c.Expr[CodecProvider] = { + import c.universe._ + createCodecProvider[T](c)(c.Expr[Boolean](q"false")) + } + + def createCodecProvider[T: c.WeakTypeTag](c: whitebox.Context)(encodeNone: c.Expr[Boolean]): c.Expr[CodecProvider] = { + import c.universe._ + + // Declared type + val mainType = weakTypeOf[T] + + // Names + def exprCodecRegistry = c.Expr[CodecRegistry](q"codecRegistry") + def codec = CaseClassCodec.createCodec[T](c)(exprCodecRegistry, encodeNone) + + c.Expr[CodecProvider]( + q""" + import org.bson.codecs.Codec + import org.bson.codecs.configuration.{ CodecProvider, CodecRegistry } + + new CodecProvider { + @SuppressWarnings(Array("unchecked")) + def get[C](clazz: Class[C], codecRegistry: CodecRegistry): Codec[C] = { + if (classOf[$mainType].isAssignableFrom(clazz)) { + $codec.asInstanceOf[Codec[C]] + } else { + null + } + } + } + """ + ) + } +} diff --git a/bson-scala/src/main/scala/org/mongodb/scala/bson/codecs/macrocodecs/MacroCodec.scala b/bson-scala/src/main/scala/org/mongodb/scala/bson/codecs/macrocodecs/MacroCodec.scala new file mode 100644 index 00000000000..e284647af87 --- /dev/null +++ b/bson-scala/src/main/scala/org/mongodb/scala/bson/codecs/macrocodecs/MacroCodec.scala @@ -0,0 +1,254 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala.bson.codecs.macrocodecs + +import scala.collection.JavaConverters._ +import scala.collection.mutable + +import org.bson._ +import org.bson.codecs.configuration.{ CodecRegistries, CodecRegistry } +import org.bson.codecs.{ Codec, DecoderContext, Encoder, EncoderContext } +import scala.collection.immutable.Vector + +import org.mongodb.scala.bson.BsonNull + +/** + * + * @tparam T the case class type for the codec + * @since 2.0 + */ +trait MacroCodec[T] extends Codec[T] { + + /** + * Creates a `Map[String, Class[_]]` mapping the case class name and the type. + */ + val caseClassesMap: Map[String, Class[_]] + + /** + * Creates a `Map[Class[_], Boolean]` mapping field types to a boolean representing if they are a case class. + */ + val classToCaseClassMap: Map[Class[_], Boolean] + + /** + * A nested map of case class name to a Map of the given field names and a list of the field types. + */ + val classFieldTypeArgsMap: Map[String, Map[String, List[Class[_]]]] + + /** + * The case class type for the codec. + */ + val encoderClass: Class[T] + + /** + * The `CodecRegistry` for use with the codec. + */ + val codecRegistry: CodecRegistry + + /** + * Creates a new instance of the case class with the provided data + * + * @param className the name of the class to be instantiated + * @param fieldsData the Map of data for the class + * @return the new instance of the class + */ + def getInstance(className: String, fieldsData: Map[String, Any]): T + + /** + * The method that writes the data for the case class + * + * @param className the name of the current case class being written + * @param writer the `BsonWriter` + * @param value the value to the case class + * @param encoderContext the `EncoderContext` + */ + def writeCaseClassData(className: String, writer: BsonWriter, value: T, encoderContext: EncoderContext): Unit + + /** + * The field used to save the class name when saving sealed case classes. + */ + val classFieldName = "_t" + lazy val hasClassFieldName: Boolean = caseClassesMapInv.keySet != Set(encoderClass) + lazy val caseClassesMapInv: Map[Class[_], String] = caseClassesMap.map(_.swap) + protected val registry: CodecRegistry = + CodecRegistries.fromRegistries(List(codecRegistry, CodecRegistries.fromCodecs(this)).asJava) + protected val bsonNull = BsonNull() + + override def encode(writer: BsonWriter, value: T, encoderContext: EncoderContext): Unit = { + if (value == null) { // scalastyle:ignore + throw new BsonInvalidOperationException(s"Invalid value for $encoderClass found a `null` value.") + } + writeValue(writer, value, encoderContext) + } + + override def decode(reader: BsonReader, decoderContext: DecoderContext): T = { + val className = getClassName(reader, decoderContext) + val fieldTypeArgsMap = classFieldTypeArgsMap(className) + val map = mutable.Map[String, Any]() + reader.readStartDocument() + while (reader.readBsonType ne BsonType.END_OF_DOCUMENT) { + val name = reader.readName + val typeArgs = if (name == classFieldName) List(classOf[String]) else fieldTypeArgsMap.getOrElse(name, List.empty) + if (typeArgs.isEmpty) { + reader.skipValue() + } else { + map += (name -> readValue(reader, decoderContext, typeArgs.head, typeArgs.tail)) + } + } + reader.readEndDocument() + getInstance(className, map.toMap) + } + + override def getEncoderClass: Class[T] = encoderClass + + protected def getClassName(reader: BsonReader, decoderContext: DecoderContext): String = { + if (hasClassFieldName) { + // Find the class name + @scala.annotation.tailrec + def readOptionalClassName(): Option[String] = { + if (reader.readBsonType == BsonType.END_OF_DOCUMENT) { + None + } else if (reader.readName == classFieldName) { + Some(codecRegistry.get(classOf[String]).decode(reader, decoderContext)) + } else { + reader.skipValue() + readOptionalClassName() + } + } + + val mark: BsonReaderMark = reader.getMark() + reader.readStartDocument() + val optionalClassName: Option[String] = readOptionalClassName() + mark.reset() + + val className = optionalClassName.getOrElse { + throw new BsonInvalidOperationException(s"Could not decode sealed case class. Missing '$classFieldName' field.") + } + + if (!caseClassesMap.contains(className)) { + throw new BsonInvalidOperationException(s"Could not decode sealed case class, unknown class $className.") + } + className + } else { + caseClassesMap.head._1 + } + } + + protected def writeClassFieldName(writer: BsonWriter, className: String, encoderContext: EncoderContext): Unit = { + if (hasClassFieldName) { + writer.writeName(classFieldName) + this.writeValue(writer, className, encoderContext) + } + } + + protected def writeFieldValue[V]( + fieldName: String, + writer: BsonWriter, + value: V, + encoderContext: EncoderContext + ): Unit = { + if (value == null) { // scalastyle:ignore + throw new BsonInvalidOperationException(s"Invalid value for $fieldName found a `null` value.") + } + writeValue(writer, value, encoderContext) + } + + protected def writeValue[V](writer: BsonWriter, value: V, encoderContext: EncoderContext): Unit = { + val clazz = value.getClass + caseClassesMapInv.get(clazz) match { + case Some(className) => + writeCaseClassData(className: String, writer: BsonWriter, value.asInstanceOf[T], encoderContext: EncoderContext) + case None => + val codec = registry.get(clazz).asInstanceOf[Encoder[V]] + encoderContext.encodeWithChildContext(codec, writer, value) + } + } + + protected def readValue[V]( + reader: BsonReader, + decoderContext: DecoderContext, + clazz: Class[V], + typeArgs: List[Class[_]] + ): V = { + val currentType = reader.getCurrentBsonType + currentType match { + case BsonType.DOCUMENT => readDocument(reader, decoderContext, clazz, typeArgs) + case BsonType.ARRAY => readArray(reader, decoderContext, clazz, typeArgs) + case BsonType.NULL => + reader.readNull() + null.asInstanceOf[V] // scalastyle:ignore + case _ => registry.get(clazz).decode(reader, decoderContext) + } + } + + protected def readArray[V]( + reader: BsonReader, + decoderContext: DecoderContext, + clazz: Class[V], + typeArgs: List[Class[_]] + ): V = { + + if (typeArgs.isEmpty) { + throw new BsonInvalidOperationException( + s"Invalid Bson format for '${clazz.getSimpleName}'. Found a list but there is no type data." + ) + } + reader.readStartArray() + val list = mutable.ListBuffer[Any]() + while (reader.readBsonType ne BsonType.END_OF_DOCUMENT) { + list.append(readValue(reader, decoderContext, typeArgs.head, typeArgs.tail)) + } + reader.readEndArray() + if (classOf[Set[_]].isAssignableFrom(clazz)) { + list.toSet.asInstanceOf[V] + } else if (classOf[Vector[_]].isAssignableFrom(clazz)) { + list.toVector.asInstanceOf[V] + } else if (classOf[Stream[_]].isAssignableFrom(clazz)) { + list.toStream.asInstanceOf[V] + } else { + list.toList.asInstanceOf[V] + } + } + + protected def readDocument[V]( + reader: BsonReader, + decoderContext: DecoderContext, + clazz: Class[V], + typeArgs: List[Class[_]] + ): V = { + if (classToCaseClassMap.getOrElse(clazz, false) || typeArgs.isEmpty) { + registry.get(clazz).decode(reader, decoderContext) + } else { + val map = mutable.Map[String, Any]() + reader.readStartDocument() + while (reader.readBsonType ne BsonType.END_OF_DOCUMENT) { + val name = reader.readName + if (typeArgs.isEmpty) { + reader.skipValue() + } else { + map += (name -> readValue( + reader, + decoderContext, + typeArgs.head, + typeArgs.tail + )) + } + } + reader.readEndDocument() + map.toMap.asInstanceOf[V] + } + } +} diff --git a/bson-scala/src/main/scala/org/mongodb/scala/bson/codecs/package.scala b/bson-scala/src/main/scala/org/mongodb/scala/bson/codecs/package.scala new file mode 100644 index 00000000000..0ce606ad1cb --- /dev/null +++ b/bson-scala/src/main/scala/org/mongodb/scala/bson/codecs/package.scala @@ -0,0 +1,38 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala.bson + +package object codecs { + + /** + * Type alias to the `BsonTypeClassMap` + */ + type BsonTypeClassMap = org.bson.codecs.BsonTypeClassMap + + /** + * Companion to return the default `BsonTypeClassMap` + */ + object BsonTypeClassMap { + def apply(): BsonTypeClassMap = new BsonTypeClassMap() + } + + /** + * Type alias to the `BsonTypeCodecMap` + */ + type BsonTypeCodecMap = org.bson.codecs.BsonTypeCodecMap + +} diff --git a/bson-scala/src/main/scala/org/mongodb/scala/bson/collection/BaseDocument.scala b/bson-scala/src/main/scala/org/mongodb/scala/bson/collection/BaseDocument.scala new file mode 100644 index 00000000000..69d25ca69d9 --- /dev/null +++ b/bson-scala/src/main/scala/org/mongodb/scala/bson/collection/BaseDocument.scala @@ -0,0 +1,257 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala.bson.collection + +import scala.collection.JavaConverters._ +import scala.collection.{ GenTraversableOnce, Traversable } +import scala.reflect.ClassTag +import scala.util.{ Failure, Success, Try } + +import org.bson.json.JsonWriterSettings + +import org.mongodb.scala.bson.DefaultHelper._ +import org.mongodb.scala.bson._ +import org.bson.codecs.configuration.CodecRegistry +import org.mongodb.scala.bson.conversions.Bson + +import org.mongodb.scala.bson.BsonMagnets + +/** + * Base Document trait. + * + * A strictly typed `Traversable[(String, BsonValue)]` and provides the underlying immutable document behaviour. + * See [[immutable.Document]] or [[mutable.Document]] for the concrete implementations. + * + * @tparam T The concrete Document implementation + */ +private[bson] trait BaseDocument[T] extends Traversable[(String, BsonValue)] with Bson { + + import BsonMagnets._ + + /** + * The underlying bson document + * + * Restricted access to the underlying BsonDocument + */ + protected[scala] val underlying: BsonDocument + + /** + * Create a concrete document instance + * + * @param underlying the underlying BsonDocument + * @return a concrete document instance + */ + protected[scala] def apply(underlying: BsonDocument): T + + /** + * Retrieves the value which is associated with the given key or throws a `NoSuchElementException`. + * + * @param key the key + * @return the value associated with the given key, or throws `NoSuchElementException`. + */ + def apply[TResult <: BsonValue]( + key: String + )(implicit e: TResult DefaultsTo BsonValue, ct: ClassTag[TResult]): TResult = { + get[TResult](key) match { + case Some(value) => value + case None => throw new NoSuchElementException("key not found: " + key) + } + } + + /** + * Returns the value associated with a key, or a default value if the key is not contained in the map. + * @param key the key. + * @param default The default value in case no binding for `key` is found in the Document. + * This can be any [[BsonValue]] type or any native type that has an implicit [[BsonTransformer]] in scope. + * @tparam B the result type of the default computation. + * @return the value associated with `key` if it exists, + * otherwise the result of the `default` computation. + */ + def getOrElse[B >: BsonValue](key: String, default: CanBeBsonValue): B = get(key) match { + case Some(v) => v + case None => default.value + } + + // scalastyle:off spaces.after.plus method.name + /** + * Creates a new document containing a new key/value and all the existing key/values. + * + * Mapping `kv` will override existing mappings from this document with the same key. + * + * @param elems the key/value mapping to be added. This can be any valid `(String, BsonValue)` pair that can be transformed into a + * [[BsonElement]] via [[BsonMagnets.CanBeBsonElement]] implicits and any [[BsonTransformer]]s that are in scope. + * @return a new document containing mappings of this document and the mapping `kv`. + */ + def +(elems: CanBeBsonElement*): T = { + val bsonDocument: BsonDocument = copyBsonDocument() + elems.foreach(elem => bsonDocument.put(elem.key, elem.value)) + apply(bsonDocument) + } + // scalastyle:on spaces.after.plus + + /** + * Removes one or more elements to this document and returns a new document. + * + * @param elems the remaining elements to remove. + * @return A new document with the keys removed. + */ + def -(elems: String*): T = --(elems) + + /** + * Removes a number of elements provided by a traversable object and returns a new document without the removed elements. + * + * @param xs the traversable object consisting of key-value pairs. + * @return a new document with the bindings of this document and those from `xs`. + */ + def --(xs: GenTraversableOnce[String]): T = { + val keysToIgnore = xs.toList + val newUnderlying = new BsonDocument() + for ((k, v) <- iterator if !keysToIgnore.contains(k)) { + newUnderlying.put(k, v) + } + apply(newUnderlying) + } + // scalastyle:on method.name + + /** + * Creates a new Document consisting of all key/value pairs of the current document + * plus a new pair of a given key and value. + * + * @param key The key to add + * @param value The new value + * @return A fresh immutable document with the binding from `key` to `value` added to the new document. + */ + def updated[B](key: String, value: B)(implicit transformer: BsonTransformer[B]): T = this + ((key, value)) + + /** + * Creates a new Document consisting of all key/value pairs of the current document + * plus a new pair of a given key and value. + * + * @param elems The key/values to add. This can be any valid `(String, BsonValue)` pair that can be transformed into a + * [[BsonElement]] via [[BsonMagnets.CanBeBsonElement]] implicits and any [[BsonTransformer]]s that are in scope. + * @return A fresh immutable document with the binding from `key` to `value` added to the new document. + */ + def updated(elems: CanBeBsonElement*): T = this + (elems: _*) + + /** + * Optionally returns the value associated with a key. + * + * @param key the key we want to lookup + * @return an option value containing the value associated with `key` in this document, + * or `None` if none exists. + */ + def get[TResult <: BsonValue]( + key: String + )(implicit e: TResult DefaultsTo BsonValue, ct: ClassTag[TResult]): Option[TResult] = { + underlying.containsKey(key) match { + case true => + Try(ct.runtimeClass.cast(underlying.get(key))) match { + case Success(v) => Some(v.asInstanceOf[TResult]) + case Failure(ex) => None + } + case false => None + } + } + + /** + * Creates a new iterator over all key/value pairs in this document + * + * @return the new iterator + */ + def iterator: Iterator[(String, BsonValue)] = underlying.asScala.iterator + + /** + * Filters this document by retaining only keys satisfying a predicate. + * @param p the predicate used to test keys + * @return a new document consisting only of those key value pairs of this map where the key satisfies + * the predicate `p`. + */ + def filterKeys(p: String => Boolean): T = this -- keys.filterNot(p) + + /** + * Tests whether this map contains a binding for a key + * + * @param key the key + * @return true if there is a binding for key in this document, false otherwise. + */ + def contains(key: String): Boolean = underlying.containsKey(key) + + /** + * Collects all keys of this document in a set. + * + * @return a set containing all keys of this document. + */ + def keySet: Set[String] = underlying.keySet().asScala.toSet + + /** + * Collects all keys of this document in an iterable collection. + * + * @return the keys of this document as an iterable. + */ + def keys: Iterable[String] = keySet.toIterable + + /** + * Creates an iterator for all keys. + * + * @return an iterator over all keys. + */ + def keysIterator: Iterator[String] = keySet.toIterator + + /** + * Collects all values of this document in an iterable collection. + * + * @return the values of this document as an iterable. + */ + def values: Iterable[BsonValue] = underlying.values().asScala + + /** + * Creates an iterator for all values in this document. + * + * @return an iterator over all values that are associated with some key in this document. + */ + def valuesIterator: Iterator[BsonValue] = values.toIterator + + /** + * Gets a JSON representation of this document + * + * @return a JSON representation of this document + */ + def toJson(): String = underlying.toJson + + /** + * Gets a JSON representation of this document using the given `JsonWriterSettings`. + * @param settings the JSON writer settings + * @return a JSON representation of this document + */ + def toJson(settings: JsonWriterSettings): String = underlying.toJson(settings) + + override def toBsonDocument: BsonDocument = underlying + + override def toBsonDocument[TDocument](documentClass: Class[TDocument], codecRegistry: CodecRegistry): BsonDocument = + underlying + + /** + * Copies the BsonDocument + * @return the copied BsonDocument + */ + private[collection] def copyBsonDocument(): BsonDocument = { + val bsonDocument = BsonDocument() + for (entry <- underlying.entrySet().asScala) bsonDocument.put(entry.getKey, entry.getValue) + bsonDocument + } + +} diff --git a/bson-scala/src/main/scala/org/mongodb/scala/bson/collection/package.scala b/bson-scala/src/main/scala/org/mongodb/scala/bson/collection/package.scala new file mode 100644 index 00000000000..7ea56e96b0c --- /dev/null +++ b/bson-scala/src/main/scala/org/mongodb/scala/bson/collection/package.scala @@ -0,0 +1,40 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala.bson + +/** + * The collection package. + */ +package object collection { + + /** + * An immutable Document implementation. + * + * A strictly typed `Map[String, BsonValue]` like structure that traverses the elements in insertion order. Unlike native scala maps there + * is no variance in the value type and it always has to be a `BsonValue`. + */ + type Document = immutable.Document + + /** + * An immutable Document implementation. + * + * A strictly typed `Map[String, BsonValue]` like structure that traverses the elements in insertion order. Unlike native scala maps there + * is no variance in the value type and it always has to be a `BsonValue`. + */ + val Document = immutable.Document + +} diff --git a/bson-scala/src/main/scala/org/mongodb/scala/bson/conversions/package.scala b/bson-scala/src/main/scala/org/mongodb/scala/bson/conversions/package.scala new file mode 100644 index 00000000000..977fbfd89fb --- /dev/null +++ b/bson-scala/src/main/scala/org/mongodb/scala/bson/conversions/package.scala @@ -0,0 +1,28 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala.bson + +/** + * The conversions package. + */ +package object conversions { + + /** + * Type alias to the Bson interface - an interface for types that are able to render themselves into a `BsonDocument`. + */ + type Bson = org.bson.conversions.Bson +} diff --git a/bson-scala/src/main/scala/org/mongodb/scala/bson/package.scala b/bson-scala/src/main/scala/org/mongodb/scala/bson/package.scala new file mode 100644 index 00000000000..1da0979f1fd --- /dev/null +++ b/bson-scala/src/main/scala/org/mongodb/scala/bson/package.scala @@ -0,0 +1,178 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala + +/** + * The bson package, contains mirrors and companion objects for `Bson` values. + */ +package object bson { + + /** + * An immutable Document implementation. + * + * A strictly typed `Map[String, BsonValue]` like structure that traverses the elements in insertion order. Unlike native scala maps there + * is no variance in the value type and it always has to be a `BsonValue`. + */ + type Document = collection.Document + + /** + * An immutable Document implementation. + * + * A strictly typed `Map[String, BsonValue]` like structure that traverses the elements in insertion order. Unlike native scala maps there + * is no variance in the value type and it always has to be a `BsonValue`. + */ + val Document = collection.Document + + /** + * Alias to `org.bson.BsonArray` + */ + type BsonArray = org.bson.BsonArray + + /** + * Alias to `org.bson.BsonBinary` + */ + type BsonBinary = org.bson.BsonBinary + + /** + * Alias to `org.bson.BsonBoolean` + */ + type BsonBoolean = org.bson.BsonBoolean + + /** + * Alias to `org.bson.BsonDateTime` + */ + type BsonDateTime = org.bson.BsonDateTime + + /** + * Alias to `org.bson.BsonDecimal128` + * @since 1.2 + */ + type BsonDecimal128 = org.bson.BsonDecimal128 + + /** + * Alias to `org.bson.BsonDocument` + */ + type BsonDocument = org.bson.BsonDocument + + /** + * Alias to `org.bson.BsonDouble` + */ + type BsonDouble = org.bson.BsonDouble + + /** + * Alias to `org.bson.BsonInt32` + */ + type BsonInt32 = org.bson.BsonInt32 + + /** + * Alias to `org.bson.BsonInt64` + */ + type BsonInt64 = org.bson.BsonInt64 + + /** + * Alias to `org.bson.BsonJavaScript` + */ + type BsonJavaScript = org.bson.BsonJavaScript + + /** + * Alias to `org.bson.BsonJavaScriptWithScope` + */ + type BsonJavaScriptWithScope = org.bson.BsonJavaScriptWithScope + + /** + * Alias to `org.bson.BsonMaxKey` + */ + type BsonMaxKey = org.bson.BsonMaxKey + + /** + * Alias to `org.bson.BsonMinKey` + */ + type BsonMinKey = org.bson.BsonMinKey + + /** + * Alias to `org.bson.BsonNull` + */ + type BsonNull = org.bson.BsonNull + + /** + * Alias to `org.bson.BsonNumber` + */ + type BsonNumber = org.bson.BsonNumber + + /** + * Alias to `org.bson.BsonObjectId` + */ + type BsonObjectId = org.bson.BsonObjectId + + /** + * Alias to `org.bson.BsonRegularExpression` + */ + type BsonRegularExpression = org.bson.BsonRegularExpression + + /** + * Alias to `org.bson.BsonString` + */ + type BsonString = org.bson.BsonString + + /** + * Alias to `org.bson.BsonSymbol` + */ + type BsonSymbol = org.bson.BsonSymbol + + /** + * Alias to `org.bson.BsonTimestamp` + */ + type BsonTimestamp = org.bson.BsonTimestamp + + /** + * Alias to `org.bson.BsonUndefined` + */ + type BsonUndefined = org.bson.BsonUndefined + + /** + * Alias to `org.bson.BsonValue` + */ + type BsonValue = org.bson.BsonValue + + /** + * Alias to `org.bson.BsonElement` + */ + type BsonElement = org.bson.BsonElement + + /** + * Alias to `org.bson.ObjectId` + * @since 1.2 + */ + type ObjectId = org.bson.types.ObjectId + + /** + * Alias to `org.bson.Decimal128` + * @since 1.2 + */ + type Decimal128 = org.bson.types.Decimal128 + + /** + * Implicit value class for a [[BsonElement]] allowing easy access to the key/value pair + * + * @param self the bsonElement + */ + implicit class RichBsonElement(val self: BsonElement) extends AnyVal { + def key: String = self.getName + def value: BsonValue = self.getValue + } + +} diff --git a/bson-scala/src/test/scala/org/mongodb/scala/bson/BaseSpec.scala b/bson-scala/src/test/scala/org/mongodb/scala/bson/BaseSpec.scala new file mode 100644 index 00000000000..4ac18c5b31c --- /dev/null +++ b/bson-scala/src/test/scala/org/mongodb/scala/bson/BaseSpec.scala @@ -0,0 +1,21 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.mongodb.scala.bson + +import org.scalatest.flatspec.AnyFlatSpec +import org.scalatest.matchers.should.Matchers + +abstract class BaseSpec extends AnyFlatSpec with Matchers {} diff --git a/bson-scala/src/test/scala/org/mongodb/scala/bson/BsonTransformerSpec.scala b/bson-scala/src/test/scala/org/mongodb/scala/bson/BsonTransformerSpec.scala new file mode 100644 index 00000000000..79629d04151 --- /dev/null +++ b/bson-scala/src/test/scala/org/mongodb/scala/bson/BsonTransformerSpec.scala @@ -0,0 +1,94 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala.bson + +import java.util.Date + +import org.mongodb.scala.bson.collection.{ immutable, mutable } + +import scala.language.implicitConversions + +class BsonTransformerSpec extends BaseSpec { + + "The BsonTransformer companion" should "not transform BsonValues" in { + transform(BsonString("abc")) should equal(BsonString("abc")) + } + it should "transform Binary" in { + transform(Array[Byte](128.toByte)) should equal(BsonBinary(Array[Byte](128.toByte))) + } + it should "transform BigDecmial" in { + transform(BigDecimal(100)) should equal(BsonDecimal128(100)) + } + it should "transform Boolean" in { + transform(true) should equal(BsonBoolean(true)) + } + it should "transform DateTime" in { + transform(new Date(100)) should equal(BsonDateTime(100)) + } + it should "transform Decimal128" in { + transform(new Decimal128(100)) should equal(BsonDecimal128(100)) + } + it should "transform Double" in { + transform(2.0) should equal(BsonDouble(2.0)) + } + it should "transform ImmutableDocument" in { + transform(immutable.Document("a" -> 1, "b" -> "two", "c" -> false)) should equal( + BsonDocument("a" -> 1, "b" -> "two", "c" -> false) + ) + } + + it should "transform Int" in { + transform(1) should equal(BsonInt32(1)) + } + it should "transform KeyValuePairs[T]" in { + transform(Seq("a" -> "a", "b" -> "b", "c" -> "c")) should equal(BsonDocument("a" -> "a", "b" -> "b", "c" -> "c")) + } + it should "transform Long" in { + transform(1L) should equal(BsonInt64(1)) + } + it should "transform MutableDocument" in { + transform(mutable.Document("a" -> 1, "b" -> "two", "c" -> false)) should equal( + BsonDocument("a" -> 1, "b" -> "two", "c" -> false) + ) + } + it should "transform None" in { + transform(None) should equal(BsonNull()) + } + it should "transform ObjectId" in { + val objectId = new ObjectId() + transform(objectId) should equal(BsonObjectId(objectId)) + } + it should "transform Option[T]" in { + transform(Some(1)) should equal(new BsonInt32(1)) + } + it should "transform Regex" in { + transform("/.*/".r) should equal(BsonRegularExpression("/.*/")) + } + it should "transform Seq[T]" in { + transform(Seq("a", "b", "c")) should equal(BsonArray("a", "b", "c")) + } + it should "transform String" in { + transform("abc") should equal(BsonString("abc")) + } + + it should "thrown a runtime exception when no transformer available" in { + "transform(BigInt(12))" shouldNot compile + } + + implicit def transform[T](v: T)(implicit transformer: BsonTransformer[T]): BsonValue = transformer(v) + +} diff --git a/bson-scala/src/test/scala/org/mongodb/scala/bson/BsonValueSpec.scala b/bson-scala/src/test/scala/org/mongodb/scala/bson/BsonValueSpec.scala new file mode 100644 index 00000000000..f7040afc427 --- /dev/null +++ b/bson-scala/src/test/scala/org/mongodb/scala/bson/BsonValueSpec.scala @@ -0,0 +1,152 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala.bson + +import java.util.Date + +import scala.collection.JavaConverters._ + +class BsonValueSpec extends BaseSpec { + + "BsonArray companion" should "create a BsonArray" in { + BsonArray() should equal(new BsonArray()) + + val values: List[BsonNumber] = List(BsonInt32(1), BsonInt64(2), new BsonDouble(3.0)) + val bsonArray = BsonArray.fromIterable(values) + val expected = new BsonArray(values.asJava) + + bsonArray should equal(expected) + + val implicitBsonArray = BsonArray(1, 2L, 3.0) + implicitBsonArray should equal(expected) + } + + "BsonBinary companion" should "create a BsonBinary" in { + val byteArray = Array[Byte](80.toByte, 5, 4, 3, 2, 1) + BsonBinary(byteArray) should equal(new BsonBinary(byteArray)) + } + + "BsonBoolean companion" should "create a BsonBoolean" in { + BsonBoolean(false) should equal(new BsonBoolean(false)) + BsonBoolean(true) should equal(new BsonBoolean(true)) + } + + "BsonDateTime companion" should "create a BsonDateTime" in { + val date = new Date() + + BsonDateTime(date) should equal(new BsonDateTime(date.getTime)) + BsonDateTime(1000) should equal(new BsonDateTime(1000)) + } + + "BsonDecimal128 companion" should "create a BsonDecimal128" in { + val expected = new BsonDecimal128(new Decimal128(100)) + + BsonDecimal128(100) should equal(expected) + BsonDecimal128("100") should equal(expected) + BsonDecimal128(BigDecimal(100)) should equal(expected) + BsonDecimal128(new Decimal128(100)) should equal(expected) + } + + "BsonDocument companion" should "create a BsonDocument" in { + val expected = new BsonDocument("a", BsonInt32(1)) + expected.put("b", BsonDouble(2.0)) + + BsonDocument() should equal(new BsonDocument()) + BsonDocument("a" -> 1, "b" -> 2.0) should equal(expected) + BsonDocument(Seq(("a", BsonInt32(1)), ("b", BsonDouble(2.0)))) should equal(expected) + BsonDocument("{a: 1, b: 2.0}") should equal(expected) + } + + "BsonDouble companion" should "create a BsonDouble" in { + BsonDouble(2.0) should equal(new BsonDouble(2.0)) + } + + "BsonInt32 companion" should "create a BsonInt32" in { + BsonInt32(1) should equal(new BsonInt32(1)) + } + + "BsonInt64 companion" should "create a BsonInt64" in { + BsonInt64(1) should equal(new BsonInt64(1)) + } + + "BsonJavaScript companion" should "create a BsonJavaScript" in { + BsonJavaScript("function(){}") should equal(new BsonJavaScript("function(){}")) + } + + "BsonJavaScriptWithScope companion" should "create a BsonJavaScriptWithScope" in { + val function = "function(){}" + val scope = new BsonDocument("a", new BsonInt32(1)) + val expected = new BsonJavaScriptWithScope(function, scope) + + BsonJavaScriptWithScope(function, scope) should equal(expected) + BsonJavaScriptWithScope(function, "a" -> 1) should equal(expected) + BsonJavaScriptWithScope(function, Document("a" -> 1)) should equal(expected) + } + + "BsonMaxKey companion" should "create a BsonMaxKey" in { + BsonMaxKey() should equal(new BsonMaxKey()) + } + + "BsonMinKey companion" should "create a BsonMinKey" in { + BsonMinKey() should equal(new BsonMinKey()) + } + + "BsonNull companion" should "create a BsonNull" in { + BsonNull() should equal(new BsonNull()) + } + + "BsonNumber companion" should "create a BsonNumber" in { + BsonNumber(1) should equal(BsonInt32(1)) + BsonNumber(1L) should equal(BsonInt64(1)) + BsonNumber(1.0) should equal(BsonDouble(1.0)) + } + + "BsonObjectId companion" should "create a BsonObjectId" in { + val bsonObjectId = BsonObjectId() + val objectId = bsonObjectId.getValue + val hexString = objectId.toHexString + val expected = new BsonObjectId(bsonObjectId.getValue) + + bsonObjectId should equal(expected) + BsonObjectId(hexString) should equal(expected) + BsonObjectId(objectId) should equal(expected) + } + + "BsonRegularExpression companion" should "create a BsonRegularExpression" in { + BsonRegularExpression("/(.*)/") should equal(new BsonRegularExpression("/(.*)/")) + BsonRegularExpression("/(.*)/".r) should equal(new BsonRegularExpression("/(.*)/")) + BsonRegularExpression("/(.*)/", "?i") should equal(new BsonRegularExpression("/(.*)/", "?i")) + } + + "BsonString companion" should "create a BsonString" in { + BsonString("aBc") should equal(new BsonString("aBc")) + } + + "BsonSymbol companion" should "create a BsonSymbol" in { + BsonSymbol(Symbol("sym")) should equal(new BsonSymbol("sym")) + } + + "BsonTimestamp companion" should "create a BsonTimestamp" in { + BsonTimestamp() should equal(new BsonTimestamp(0, 0)) + BsonTimestamp(10, 1) should equal(new BsonTimestamp(10, 1)) + } + + "BsonUndefined companion" should "create a BsonUndefined" in { + BsonUndefined() should equal(new BsonUndefined()) + } + +} diff --git a/bson-scala/src/test/scala/org/mongodb/scala/bson/codecs/DocumentCodecProviderSpec.scala b/bson-scala/src/test/scala/org/mongodb/scala/bson/codecs/DocumentCodecProviderSpec.scala new file mode 100644 index 00000000000..7fb08f842c1 --- /dev/null +++ b/bson-scala/src/test/scala/org/mongodb/scala/bson/codecs/DocumentCodecProviderSpec.scala @@ -0,0 +1,35 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala.bson.codecs + +import org.bson.codecs.configuration.CodecRegistries.fromProviders +import org.mongodb.scala.bson.BaseSpec +import org.mongodb.scala.bson.collection.{ immutable, mutable, Document } + +class DocumentCodecProviderSpec extends BaseSpec { + + "DocumentCodecProvider" should "get the correct codec" in { + + val provider = DocumentCodecProvider() + val registry = fromProviders(provider) + + provider.get[Document](classOf[Document], registry) shouldBe a[ImmutableDocumentCodec] + provider.get[immutable.Document](classOf[immutable.Document], registry) shouldBe a[ImmutableDocumentCodec] + provider.get[mutable.Document](classOf[mutable.Document], registry) shouldBe a[MutableDocumentCodec] + Option(provider.get[String](classOf[String], registry)) shouldBe None + } +} diff --git a/bson-scala/src/test/scala/org/mongodb/scala/bson/codecs/ImmutableDocumentCodecSpec.scala b/bson-scala/src/test/scala/org/mongodb/scala/bson/codecs/ImmutableDocumentCodecSpec.scala new file mode 100644 index 00000000000..74c6436f5bc --- /dev/null +++ b/bson-scala/src/test/scala/org/mongodb/scala/bson/codecs/ImmutableDocumentCodecSpec.scala @@ -0,0 +1,130 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala.bson.codecs + +import java.nio.ByteBuffer +import java.util.Date + +import org.bson._ +import org.bson.codecs.configuration.CodecRegistry +import org.bson.codecs.{ DecoderContext, EncoderContext } +import org.bson.io.{ BasicOutputBuffer, ByteBufferBsonInput } +import org.bson.types.ObjectId +import org.mongodb.scala.bson.BaseSpec +import org.mongodb.scala.bson.codecs.Registry.DEFAULT_CODEC_REGISTRY +import org.mongodb.scala.bson.collection.immutable.Document + +import scala.collection.JavaConverters._ + +class ImmutableDocumentCodecSpec extends BaseSpec { + + val registry: CodecRegistry = DEFAULT_CODEC_REGISTRY + + "MutableDocumentCodec" should "encode and decode all default types with readers and writers" in { + val original: Document = Document( + "binary" -> new BsonBinary("bson".toCharArray map (_.toByte)), + "boolean" -> new BsonBoolean(true), + "dateTime" -> new BsonDateTime(new Date().getTime), + "double" -> new BsonDouble(1.0), + "int" -> new BsonInt32(1), + "long" -> new BsonInt64(1L), + "null" -> new BsonNull(), + "objectId" -> new BsonObjectId(new ObjectId()), + "regEx" -> new BsonRegularExpression("^bson".r.regex), + "string" -> new BsonString("string"), + "symbol" -> new BsonSymbol(Symbol("bson").name), + "bsonDocument" -> new BsonDocument("a", new BsonString("string")), + "array" -> new BsonArray(List(new BsonString("string"), new BsonBoolean(false)).asJava) + ) + + info("encoding") + val writer: BsonBinaryWriter = new BsonBinaryWriter(new BasicOutputBuffer()) + ImmutableDocumentCodec(registry).encode(writer, original, EncoderContext.builder().build()) + + info("decoding") + val buffer: BasicOutputBuffer = writer.getBsonOutput().asInstanceOf[BasicOutputBuffer]; + val reader: BsonBinaryReader = new BsonBinaryReader( + new ByteBufferBsonInput( + new ByteBufNIO(ByteBuffer.wrap(buffer.toByteArray)) + ) + ) + + val decodedDocument = ImmutableDocumentCodec().decode(reader, DecoderContext.builder().build()) + + decodedDocument shouldBe a[Document] + original should equal(decodedDocument) + } + + it should "respect encodeIdFirst property in encoder context" in { + val original: Document = Document( + "a" -> new BsonString("string"), + "_id" -> new BsonInt32(1), + "nested" -> Document("a" -> new BsonString("string"), "_id" -> new BsonInt32(1)).toBsonDocument + ) + + info("encoding") + val writer: BsonBinaryWriter = new BsonBinaryWriter(new BasicOutputBuffer()) + ImmutableDocumentCodec(registry).encode( + writer, + original, + EncoderContext.builder().isEncodingCollectibleDocument(true).build() + ) + + info("decoding") + val buffer: BasicOutputBuffer = writer.getBsonOutput().asInstanceOf[BasicOutputBuffer]; + val reader: BsonBinaryReader = + new BsonBinaryReader(new ByteBufferBsonInput(new ByteBufNIO(ByteBuffer.wrap(buffer.toByteArray)))) + + val decodedDocument = ImmutableDocumentCodec().decode(reader, DecoderContext.builder().build()) + + decodedDocument shouldBe a[Document] + original should equal(decodedDocument) + decodedDocument.keys.toList should contain theSameElementsInOrderAs (List("_id", "a", "nested")) + + Document(decodedDocument[BsonDocument]("nested")).keys.toList should contain theSameElementsInOrderAs (List( + "a", + "_id" + )) + } + + it should "encoder class should work as expected" in { + ImmutableDocumentCodec().getEncoderClass should equal(classOf[Document]) + } + + it should "determine if document has an _id" in { + ImmutableDocumentCodec().documentHasId(Document()) should be(false) + ImmutableDocumentCodec().documentHasId(Document("_id" -> new BsonInt32(1))) should be(true) + } + + it should "get the document_id" in { + ImmutableDocumentCodec().getDocumentId(Document()) should be(null) + ImmutableDocumentCodec().getDocumentId(Document("_id" -> new BsonInt32(1))) should be(new BsonInt32(1)) + } + + it should "generate document id if absent but not mutate original document" in { + val document = Document() + val document2 = ImmutableDocumentCodec().generateIdIfAbsentFromDocument(document) + document.contains("_id") shouldBe false + document2("_id") shouldBe a[BsonObjectId] + } + + it should "not generate document id if present" in { + val document = Document("_id" -> new BsonInt32(1)) + ImmutableDocumentCodec().generateIdIfAbsentFromDocument(document) + document("_id") should equal(new BsonInt32(1)) + } +} diff --git a/bson-scala/src/test/scala/org/mongodb/scala/bson/codecs/IterableCodecProviderSpec.scala b/bson-scala/src/test/scala/org/mongodb/scala/bson/codecs/IterableCodecProviderSpec.scala new file mode 100644 index 00000000000..2e9fb983a90 --- /dev/null +++ b/bson-scala/src/test/scala/org/mongodb/scala/bson/codecs/IterableCodecProviderSpec.scala @@ -0,0 +1,36 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala.bson.codecs + +import org.bson.codecs.configuration.CodecRegistries.fromProviders +import org.mongodb.scala.bson.BaseSpec + +class IterableCodecProviderSpec extends BaseSpec { + + "IterableCodecProvider" should "get the correct codec" in { + + val provider = IterableCodecProvider() + val registry = fromProviders(provider) + + provider.get[Iterable[Any]](classOf[Iterable[Any]], registry) shouldBe a[IterableCodec] + provider.get[List[String]](classOf[List[String]], registry) shouldBe a[IterableCodec] + provider.get[Seq[Integer]](classOf[Seq[Integer]], registry) shouldBe a[IterableCodec] + provider.get[Map[String, Integer]](classOf[Map[String, Integer]], registry) shouldBe a[IterableCodec] + Option(provider.get[String](classOf[String], registry)) shouldBe None + } + +} diff --git a/bson-scala/src/test/scala/org/mongodb/scala/bson/codecs/IterableCodecSpec.scala b/bson-scala/src/test/scala/org/mongodb/scala/bson/codecs/IterableCodecSpec.scala new file mode 100644 index 00000000000..644d6fc068f --- /dev/null +++ b/bson-scala/src/test/scala/org/mongodb/scala/bson/codecs/IterableCodecSpec.scala @@ -0,0 +1,122 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala.bson.codecs + +import org.bson.codecs.{ DecoderContext, EncoderContext } +import org.bson.{ BsonDocumentReader, BsonDocumentWriter, Transformer } +import org.mongodb.scala.bson.codecs.Registry.DEFAULT_CODEC_REGISTRY +import org.mongodb.scala.bson.{ BaseSpec, BsonDocument } + +class IterableCodecSpec extends BaseSpec { + + "IterableCodec" should "have the correct encoding class" in { + val codec = IterableCodec(DEFAULT_CODEC_REGISTRY, BsonTypeClassMap()) + codec.getEncoderClass() should equal(classOf[Iterable[_]]) + } + + it should "encode an Iterable to a BSON array" in { + val codec = IterableCodec(DEFAULT_CODEC_REGISTRY, BsonTypeClassMap()) + val writer = new BsonDocumentWriter(new BsonDocument()) + + writer.writeStartDocument() + writer.writeName("array") + codec.encode(writer, List(1, 2, 3), EncoderContext.builder().build()) + writer.writeEndDocument() + writer.getDocument should equal(BsonDocument("{array : [1, 2, 3]}")) + } + + it should "decode a BSON array to an Iterable" in { + val codec = IterableCodec(DEFAULT_CODEC_REGISTRY, BsonTypeClassMap()) + val reader = new BsonDocumentReader(BsonDocument("{array : [1, 2, 3]}")) + + reader.readStartDocument() + reader.readName("array") + val iterable = codec.decode(reader, DecoderContext.builder().build()) + reader.readEndDocument() + + iterable should equal(List(1, 2, 3)) + } + + it should "encode an Iterable containing Maps to a BSON array" in { + val codec = IterableCodec(DEFAULT_CODEC_REGISTRY, BsonTypeClassMap()) + val writer = new BsonDocumentWriter(new BsonDocument()) + + writer.writeStartDocument() + writer.writeName("array") + codec.encode( + writer, + List(Map("a" -> 1, "b" -> 2, "c" -> null)), + EncoderContext.builder().build() + ) // scalastyle:ignore + writer.writeEndDocument() + writer.getDocument should equal(BsonDocument("{array : [{a: 1, b: 2, c: null}]}")) + } + + it should "decode a BSON array containing maps to an Iterable" in { + val codec = IterableCodec(DEFAULT_CODEC_REGISTRY, BsonTypeClassMap()) + val reader = new BsonDocumentReader(BsonDocument("{array : [{a: 1, b: 2, c: null}]}")) + + reader.readStartDocument() + reader.readName("array") + val iterable = codec.decode(reader, DecoderContext.builder().build()) + reader.readEndDocument() + + iterable should equal(List(Map("a" -> 1, "b" -> 2, "c" -> null))) // scalastyle:ignore + } + + it should "encode a Map to a BSON document" in { + val codec = IterableCodec(DEFAULT_CODEC_REGISTRY, BsonTypeClassMap()) + val writer = new BsonDocumentWriter(new BsonDocument()) + + writer.writeStartDocument() + writer.writeName("document") + codec.encode(writer, Map("a" -> 1, "b" -> 2), EncoderContext.builder().build()) + writer.writeEndDocument() + writer.getDocument should equal(BsonDocument("{document : {a: 1, b: 2}}")) + } + + it should "decode a BSON Document to a Map" in { + val codec = IterableCodec(DEFAULT_CODEC_REGISTRY, BsonTypeClassMap()) + val reader = new BsonDocumentReader(BsonDocument("{document : {a: 1, b: 2}}")) + + reader.readStartDocument() + reader.readName("document") + val iterable = codec.decode(reader, DecoderContext.builder().build()) + reader.readEndDocument() + + iterable should equal(Map("a" -> 1, "b" -> 2)) + } + + it should "use the provided transformer" in { + val codec = IterableCodec( + DEFAULT_CODEC_REGISTRY, + BsonTypeClassMap(), + new Transformer { + override def transform(objectToTransform: Any): AnyRef = s"$objectToTransform" + } + ) + val reader = new BsonDocumentReader(BsonDocument("{array : [1, 2, 3]}")) + + reader.readStartDocument() + reader.readName("array") + val iterable = codec.decode(reader, DecoderContext.builder().build()) + reader.readEndDocument() + + iterable.toList should contain theSameElementsInOrderAs List("1", "2", "3") + } + +} diff --git a/bson-scala/src/test/scala/org/mongodb/scala/bson/codecs/MacrosSpec.scala b/bson-scala/src/test/scala/org/mongodb/scala/bson/codecs/MacrosSpec.scala new file mode 100644 index 00000000000..e3c8ded2d89 --- /dev/null +++ b/bson-scala/src/test/scala/org/mongodb/scala/bson/codecs/MacrosSpec.scala @@ -0,0 +1,727 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala.bson.codecs + +import java.nio.ByteBuffer +import java.util +import java.util.Date + +import org.bson._ +import org.bson.codecs.configuration.{ CodecProvider, CodecRegistries, CodecRegistry } +import org.bson.codecs.{ Codec, DecoderContext, EncoderContext } +import org.bson.io.{ BasicOutputBuffer, ByteBufferBsonInput, OutputBuffer } +import org.bson.types.ObjectId +import org.mongodb.scala.bson.BaseSpec +import org.mongodb.scala.bson.annotations.{ BsonIgnore, BsonProperty } +import org.mongodb.scala.bson.codecs.Macros.{ createCodecProvider, createCodecProviderIgnoreNone } +import org.mongodb.scala.bson.codecs.Registry.DEFAULT_CODEC_REGISTRY +import org.mongodb.scala.bson.collection.immutable.Document +import scala.collection.immutable.Vector + +import scala.collection.JavaConverters._ +import scala.reflect.ClassTag + +//scalastyle:off +class MacrosSpec extends BaseSpec { + + case class Empty() + case class Person(firstName: String, lastName: String) + case class DefaultValue(name: String, active: Boolean = false) + case class SeqOfStrings(name: String, value: Seq[String]) + case class RecursiveSeq(name: String, value: Seq[RecursiveSeq]) + case class AnnotatedClass(@BsonProperty("annotated_name") name: String) + case class IgnoredFieldClass(name: String, @BsonIgnore meta: String = "ignored_default") + + case class Binary(binary: Array[Byte]) { + + /** + * Custom equals + * + * Because `Array[Byte]` only does equality based on identity we use toSeq helper to compare the actual values. + * + * @param arg the other value + * @return true if equal else false + */ + override def equals(arg: Any): Boolean = arg match { + case that: Binary => that.binary.toSeq == binary.toSeq + case _ => false + } + } + case class AllTheBsonTypes( + documentMap: Map[String, String], + array: Seq[String], + date: Date, + boolean: Boolean, + double: Double, + int32: Int, + int64: Long, + string: String, + binary: Binary, + none: Option[String] + ) + + case class MapOfStrings(name: String, value: Map[String, String]) + case class SeqOfMapOfStrings(name: String, value: Seq[Map[String, String]]) + case class RecursiveMapOfStrings(name: String, value: Seq[Map[String, RecursiveMapOfStrings]]) + + type StringAlias = String + case class MapOfStringAliases(name: String, value: Map[StringAlias, StringAlias]) + + case class ContainsCaseClass(name: String, friend: Person) + case class ContainsSeqCaseClass(name: String, friends: Seq[Person]) + case class ContainsNestedSeqCaseClass(name: String, friends: Seq[Seq[Person]]) + case class ContainsMapOfCaseClasses(name: String, friends: Map[String, Person]) + case class ContainsMapOfMapOfCaseClasses(name: String, friends: Map[String, Map[String, Person]]) + case class ContainsCaseClassWithDefault(name: String, friend: Person = Person("Frank", "Sinatra")) + + case class ContainsSet(name: String, friends: Set[String]) + case class ContainsVector(name: String, friends: Vector[String]) + case class ContainsList(name: String, friends: List[String]) + case class ContainsStream(name: String, friends: Stream[String]) + + case class CaseClassWithVal(_id: ObjectId, name: String) { + val id: String = _id.toString + } + + case class OptionalValue(name: String, value: Option[String]) + case class OptionalCaseClass(name: String, value: Option[Person]) + case class OptionalRecursive(name: String, value: Option[OptionalRecursive]) + + sealed class Tree + case class Branch(@BsonProperty("l1") b1: Tree, @BsonProperty("r1") b2: Tree, value: Int) extends Tree + case class Leaf(value: Int) extends Tree + + sealed trait WithIgnored + case class MetaIgnoredField(data: String, @BsonIgnore meta: Seq[String] = Vector("ignore_me")) extends WithIgnored + case class LeafCountIgnoredField(branchCount: Int, @BsonIgnore leafCount: Int = 100) extends WithIgnored + case class ContainsIgnoredField(list: Seq[WithIgnored]) + + case class ContainsADT(name: String, tree: Tree) + case class ContainsSeqADT(name: String, trees: Seq[Tree]) + case class ContainsNestedSeqADT(name: String, trees: Seq[Seq[Tree]]) + + sealed class Graph + case class Node(name: String, value: Option[Graph]) extends Graph + + sealed class NotImplementedSealedClass + sealed trait NotImplementedSealedTrait + case class UnsupportedTuple(value: (String, String)) + case class UnsupportedMap(value: Map[Int, Int]) + + type SimpleTypeAlias = Map[String, String] + case class ContainsSimpleTypeAlias(a: String, b: SimpleTypeAlias = Map.empty) + type CaseClassTypeAlias = Person + case class ContainsCaseClassTypeAlias(a: String, b: CaseClassTypeAlias) + type ADTCaseClassTypeAlias = ContainsADT + case class ContainsADTCaseClassTypeAlias(a: String, b: ADTCaseClassTypeAlias) + + trait Tag + case class ContainsTaggedTypes( + a: Int with Tag, + b: String with Tag, + c: Map[String with Tag, Int with Tag] with Tag, + d: Empty with Tag + ) extends Tag + + case class ContainsTypeLessMap(a: BsonDocument) + + sealed class SealedClassCaseObject + object SealedClassCaseObject { + case object Alpha extends SealedClassCaseObject + } + + sealed trait CaseObjectEnum + case object Alpha extends CaseObjectEnum + case object Bravo extends CaseObjectEnum + case object Charlie extends CaseObjectEnum + + case class ContainsEnumADT(name: String, enum: CaseObjectEnum) + + sealed class SealedClass + case class SealedClassA(stringField: String) extends SealedClass + case class SealedClassB(intField: Int) extends SealedClass + case class ContainsSealedClass(list: List[SealedClass]) + + sealed abstract class SealedAbstractClass + case class SealedAbstractClassA(stringField: String) extends SealedAbstractClass + case class SealedAbstractClassB(intField: Int) extends SealedAbstractClass + case class ContainsSealedAbstractClass(list: List[SealedAbstractClass]) + + sealed class SealedClassWithParams(val superField: String) + case class SealedClassWithParamsA(stringField: String, override val superField: String) + extends SealedClassWithParams(superField) + case class SealedClassWithParamsB(intField: Int, override val superField: String) + extends SealedClassWithParams(superField) + case class ContainsSealedClassWithParams(list: List[SealedClassWithParams]) + + sealed abstract class SealedAbstractClassWithParams(val superField: String) + case class SealedAbstractClassWithParamsA(stringField: String, override val superField: String) + extends SealedAbstractClassWithParams(superField) + case class SealedAbstractClassWithParamsB(intField: Int, override val superField: String) + extends SealedAbstractClassWithParams(superField) + case class ContainsSealedAbstractClassWithParams(list: List[SealedAbstractClassWithParams]) + + sealed trait SealedTrait + case class SealedTraitA(stringField: String) extends SealedTrait + case class SealedTraitB(intField: Int) extends SealedTrait + case class ContainsSealedTrait(list: List[SealedTrait]) + + sealed class SingleSealedClass + case class SingleSealedClassImpl() extends SingleSealedClass + + sealed abstract class SingleSealedAbstractClass + case class SingleSealedAbstractClassImpl() extends SingleSealedAbstractClass + + sealed trait SingleSealedTrait + case class SingleSealedTraitImpl() extends SingleSealedTrait + + "Macros" should "be able to round trip simple case classes" in { + roundTrip(Empty(), "{}", classOf[Empty]) + roundTrip(Person("Bob", "Jones"), """{firstName: "Bob", lastName: "Jones"}""", classOf[Person]) + roundTrip(DefaultValue(name = "Bob"), """{name: "Bob", active: false}""", classOf[DefaultValue]) + roundTrip( + SeqOfStrings("Bob", Seq("scala", "jvm")), + """{name: "Bob", value: ["scala", "jvm"]}""", + classOf[SeqOfStrings] + ) + roundTrip( + RecursiveSeq("Bob", Seq(RecursiveSeq("Charlie", Seq.empty[RecursiveSeq]))), + """{name: "Bob", value: [{name: "Charlie", value: []}]}""", + classOf[RecursiveSeq] + ) + roundTrip(AnnotatedClass("Bob"), """{annotated_name: "Bob"}""", classOf[AnnotatedClass]) + roundTrip( + MapOfStrings("Bob", Map("brother" -> "Tom Jones")), + """{name: "Bob", value: {brother: "Tom Jones"}}""", + classOf[MapOfStrings] + ) + roundTrip( + MapOfStringAliases("Bob", Map("brother" -> "Tom Jones")), + """{name: "Bob", value: {brother: "Tom Jones"}}""", + classOf[MapOfStringAliases] + ) + roundTrip( + SeqOfMapOfStrings("Bob", Seq(Map("brother" -> "Tom Jones"))), + """{name: "Bob", value: [{brother: "Tom Jones"}]}""", + classOf[SeqOfMapOfStrings] + ) + roundTrip( + ContainsSet("Bob", Set("Tom", "Charlie")), + """{name: "Bob", friends: ["Tom","Charlie"]}""", + Macros.createCodecProvider(classOf[ContainsSet]) + ) + roundTrip( + ContainsVector("Bob", Vector("Tom", "Charlie")), + """{name: "Bob", friends: ["Tom","Charlie"]}""", + Macros.createCodecProvider(classOf[ContainsVector]) + ) + roundTrip( + ContainsList("Bob", List("Tom", "Charlie")), + """{name: "Bob", friends: ["Tom","Charlie"]}""", + Macros.createCodecProvider(classOf[ContainsList]) + ) + roundTrip( + ContainsStream("Bob", Stream("Tom", "Charlie")), + """{name: "Bob", friends: ["Tom","Charlie"]}""", + Macros.createCodecProvider(classOf[ContainsStream]) + ) + } + + it should "be able to ignore fields" in { + roundTrip( + IgnoredFieldClass("Bob", "singer"), + IgnoredFieldClass("Bob"), + """{name: "Bob"}""", + classOf[IgnoredFieldClass] + ) + + roundTrip( + ContainsIgnoredField(Vector(MetaIgnoredField("Bob", List("singer")), LeafCountIgnoredField(1, 10))), + ContainsIgnoredField(Vector(MetaIgnoredField("Bob"), LeafCountIgnoredField(1))), + """{"list" : [{"_t" : "MetaIgnoredField", "data" : "Bob" }, {"_t" : "LeafCountIgnoredField", "branchCount": 1}]}""", + classOf[ContainsIgnoredField], + classOf[WithIgnored] + ) + } + + it should "be able to round trip polymorphic nested case classes in a sealed class" in { + roundTrip( + ContainsSealedClass(List(SealedClassA("test"), SealedClassB(12))), + """{"list" : [{"_t" : "SealedClassA", "stringField" : "test"}, {"_t" : "SealedClassB", "intField" : 12}]}""", + classOf[ContainsSealedClass], + classOf[SealedClass] + ) + } + + it should "be able to round trip polymorphic nested case classes in a sealed abstract class" in { + roundTrip( + ContainsSealedAbstractClass(List(SealedAbstractClassA("test"), SealedAbstractClassB(12))), + """{"list" : [{"_t" : "SealedAbstractClassA", "stringField" : "test"}, {"_t" : "SealedAbstractClassB", "intField" : 12}]}""", + classOf[ContainsSealedAbstractClass], + classOf[SealedAbstractClass] + ) + } + + it should "be able to round trip polymorphic nested case classes in a sealed class with parameters" in { + roundTrip( + ContainsSealedClassWithParams( + List(SealedClassWithParamsA("test", "tested1"), SealedClassWithParamsB(12, "tested2")) + ), + """{"list" : [{"_t" : "SealedClassWithParamsA", "stringField" : "test", "superField" : "tested1"}, {"_t" : "SealedClassWithParamsB", "intField" : 12, "superField" : "tested2"}]}""", + classOf[ContainsSealedClassWithParams], + classOf[SealedClassWithParams] + ) + } + + it should "be able to round trip polymorphic nested case classes in a sealed abstract class with parameters" in { + roundTrip( + ContainsSealedAbstractClassWithParams( + List(SealedAbstractClassWithParamsA("test", "tested1"), SealedAbstractClassWithParamsB(12, "tested2")) + ), + """{"list" : [{"_t" : "SealedAbstractClassWithParamsA", "stringField" : "test", "superField" : "tested1"}, {"_t" : "SealedAbstractClassWithParamsB", "intField" : 12, "superField" : "tested2"}]}""", + classOf[ContainsSealedAbstractClassWithParams], + classOf[SealedAbstractClassWithParams] + ) + } + + it should "be able to round trip polymorphic nested case classes in a sealed trait" in { + roundTrip( + ContainsSealedTrait(List(SealedTraitA("test"), SealedTraitB(12))), + """{"list" : [{"_t" : "SealedTraitA", "stringField" : "test"}, {"_t" : "SealedTraitB", "intField" : 12}]}""", + classOf[ContainsSealedTrait], + classOf[SealedTrait] + ) + } + + it should "be able to round trip nested case classes" in { + roundTrip( + ContainsCaseClass("Charlie", Person("Bob", "Jones")), + """{name: "Charlie", friend: {firstName: "Bob", lastName: "Jones"}}""", + classOf[ContainsCaseClass], + classOf[Person] + ) + roundTrip( + ContainsSeqCaseClass("Charlie", Seq(Person("Bob", "Jones"))), + """{name: "Charlie", friends: [{firstName: "Bob", lastName: "Jones"}]}""", + classOf[ContainsSeqCaseClass], + classOf[Person] + ) + roundTrip( + ContainsNestedSeqCaseClass("Charlie", Seq(Seq(Person("Bob", "Jones")), Seq(Person("Tom", "Jones")))), + """{name: "Charlie", friends: [[{firstName: "Bob", lastName: "Jones"}], [{firstName: "Tom", lastName: "Jones"}]]}""", + classOf[ContainsNestedSeqCaseClass], + classOf[Person] + ) + } + + it should "be able to round trip nested case classes in maps" in { + roundTrip( + ContainsMapOfCaseClasses("Bob", Map("name" -> Person("Jane", "Jones"))), + """{name: "Bob", friends: {name: {firstName: "Jane", lastName: "Jones"}}}""", + classOf[ContainsMapOfCaseClasses], + classOf[Person] + ) + roundTrip( + ContainsMapOfMapOfCaseClasses("Bob", Map("maternal" -> Map("mother" -> Person("Jane", "Jones")))), + """{name: "Bob", friends: {maternal: {mother: {firstName: "Jane", lastName: "Jones"}}}}""", + classOf[ContainsMapOfMapOfCaseClasses], + classOf[Person] + ) + } + + it should "be able to round trip optional values" in { + roundTrip(OptionalValue("Bob", None), """{name: "Bob", value: null}""", classOf[OptionalValue]) + roundTrip(OptionalValue("Bob", Some("value")), """{name: "Bob", value: "value"}""", classOf[OptionalValue]) + roundTrip(OptionalCaseClass("Bob", None), """{name: "Bob", value: null}""", classOf[OptionalCaseClass]) + roundTrip( + OptionalCaseClass("Bob", Some(Person("Charlie", "Jones"))), + """{name: "Bob", value: {firstName: "Charlie", lastName: "Jones"}}""", + classOf[OptionalCaseClass], + classOf[Person] + ) + + roundTrip(OptionalRecursive("Bob", None), """{name: "Bob", value: null}""", classOf[OptionalRecursive]) + roundTrip( + OptionalRecursive("Bob", Some(OptionalRecursive("Charlie", None))), + """{name: "Bob", value: {name: "Charlie", value: null}}""", + classOf[OptionalRecursive] + ) + } + + it should "be able to round trip Map values where the top level implementations don't include type information" in { + roundTrip( + ContainsTypeLessMap(BsonDocument.parse("""{b: "c"}""")), + """{a: {b: "c"}}""", + classOf[ContainsTypeLessMap] + ) + } + + it should "be able to decode case classes missing optional values" in { + val registry = + CodecRegistries.fromRegistries(CodecRegistries.fromProviders(classOf[OptionalValue]), DEFAULT_CODEC_REGISTRY) + val buffer = encode(registry.get(classOf[Document]), Document("name" -> "Bob")) + + decode(registry.get(classOf[OptionalValue]), buffer) should equal(OptionalValue("Bob", None)) + } + + it should "be able to round trip default values" in { + roundTrip( + ContainsCaseClassWithDefault("Charlie"), + """{name: "Charlie", friend: { firstName: "Frank", lastName: "Sinatra"}}""", + classOf[ContainsCaseClassWithDefault], + classOf[Person] + ) + } + + it should "rountrip case classes containing vals" in { + val id = new ObjectId + roundTrip( + CaseClassWithVal(id, "Bob"), + s"""{"_id": {"$$oid": "${id.toHexString}" }, "name" : "Bob"}""", + classOf[CaseClassWithVal] + ) + } + + it should "be able to decode case class with vals" in { + val registry = CodecRegistries.fromRegistries( + CodecRegistries.fromProviders(classOf[CaseClassWithVal]), + DEFAULT_CODEC_REGISTRY + ) + + val id = new ObjectId + val buffer = encode( + registry.get(classOf[Document]), + Document("_id" -> id, "name" -> "Bob") + ) + + decode( + registry.get(classOf[CaseClassWithVal]), + buffer + ) should equal(CaseClassWithVal(id, "Bob")) + } + + it should "be able to round trip optional values, when None is ignored" in { + roundTrip(OptionalValue("Bob", None), """{name: "Bob"}""", createCodecProviderIgnoreNone[OptionalValue]()) + roundTrip( + OptionalValue("Bob", Some("value")), + """{name: "Bob", value: "value"}""", + createCodecProviderIgnoreNone[OptionalValue]() + ) + roundTrip(OptionalCaseClass("Bob", None), """{name: "Bob"}""", createCodecProviderIgnoreNone[OptionalCaseClass]()) + roundTrip( + OptionalCaseClass("Bob", Some(Person("Charlie", "Jones"))), + """{name: "Bob", value: {firstName: "Charlie", lastName: "Jones"}}""", + createCodecProviderIgnoreNone[OptionalCaseClass](), + createCodecProviderIgnoreNone[Person]() + ) + + roundTrip(OptionalRecursive("Bob", None), """{name: "Bob"}""", createCodecProviderIgnoreNone[OptionalRecursive]()) + roundTrip( + OptionalRecursive("Bob", Some(OptionalRecursive("Charlie", None))), + """{name: "Bob", value: {name: "Charlie"}}""", + createCodecProviderIgnoreNone[OptionalRecursive]() + ) + } + + it should "roundtrip all the supported bson types" in { + roundTrip( + AllTheBsonTypes( + Map("a" -> "b"), + Seq("a", "b", "c"), + new Date(123), + boolean = true, + 1.0, + 10, + 100L, + "string", + Binary(Array[Byte](123)), + None + ), + """{"documentMap" : { "a" : "b" }, "array" : ["a", "b", "c"], "date" : { "$date" : 123 }, "boolean" : true, + | "double" : 1.0, "int32" : 10, "int64" : { "$numberLong" : "100" }, "string" : "string", + | "binary" : { "binary": { "$binary" : "ew==", "$type" : "00" } }, "none" : null }""".stripMargin, + classOf[Binary], + classOf[AllTheBsonTypes] + ) + } + + it should "support ADT sealed case classes" in { + val leaf = Leaf(1) + val branch = Branch(Branch(Leaf(1), Leaf(2), 3), Branch(Leaf(4), Leaf(5), 6), 3) // scalastyle:ignore + val leafJson = createTreeJson(leaf) + val branchJson = createTreeJson(branch) + + roundTrip(leaf, leafJson, classOf[Tree]) + roundTrip(branch, branchJson, classOf[Tree]) + + roundTrip(ContainsADT("Bob", leaf), s"""{name: "Bob", tree: $leafJson}""", classOf[ContainsADT], classOf[Tree]) + roundTrip(ContainsADT("Bob", branch), s"""{name: "Bob", tree: $branchJson}""", classOf[ContainsADT], classOf[Tree]) + + roundTrip( + ContainsSeqADT("Bob", List(leaf, branch)), + s"""{name: "Bob", trees: [$leafJson, $branchJson]}""", + classOf[ContainsSeqADT], + classOf[Tree] + ) + roundTrip( + ContainsNestedSeqADT("Bob", List(List(leaf), List(branch))), + s"""{name: "Bob", trees: [[$leafJson], [$branchJson]]}""", + classOf[ContainsNestedSeqADT], + classOf[Tree] + ) + } + + it should "write the type of sealed classes and traits with only one subclass" in { + roundTrip(SingleSealedClassImpl(), """{ "_t" : "SingleSealedClassImpl" }""".stripMargin, classOf[SingleSealedClass]) + roundTrip( + SingleSealedAbstractClassImpl(), + """{ "_t" : "SingleSealedAbstractClassImpl" }""".stripMargin, + classOf[SingleSealedAbstractClass] + ) + roundTrip(SingleSealedTraitImpl(), """{ "_t" : "SingleSealedTraitImpl" }""".stripMargin, classOf[SingleSealedTrait]) + } + + it should "support optional values in ADT sealed classes" in { + val nodeA = Node("nodeA", None) + val nodeB = Node("nodeB", Some(nodeA)) + + val nodeAJson = """{_t: "Node", name: "nodeA", value: null}""" + val nodeBJson = s"""{_t: "Node", name: "nodeB", value: $nodeAJson}""" + + roundTrip(nodeA, nodeAJson, classOf[Graph]) + roundTrip(nodeB, nodeBJson, classOf[Graph]) + } + + it should "support type aliases in case classes" in { + roundTrip( + ContainsSimpleTypeAlias("c", Map("d" -> "c")), + """{a: "c", b: {d: "c"}}""", + classOf[ContainsSimpleTypeAlias] + ) + roundTrip( + ContainsCaseClassTypeAlias("c", Person("Tom", "Jones")), + """{a: "c", b: {firstName: "Tom", lastName: "Jones"}}""", + classOf[ContainsCaseClassTypeAlias], + classOf[CaseClassTypeAlias] + ) + + val branch = Branch(Branch(Leaf(1), Leaf(2), 3), Branch(Leaf(4), Leaf(5), 6), 3) // scalastyle:ignore + val branchJson = createTreeJson(branch) + roundTrip( + ContainsADTCaseClassTypeAlias("c", ContainsADT("Tom", branch)), + s"""{a: "c", b: {name: "Tom", tree: $branchJson}}""", + classOf[ContainsADTCaseClassTypeAlias], + classOf[ADTCaseClassTypeAlias], + classOf[Tree] + ) + } + + it should "support tagged types in case classes" in { + assume(!scala.util.Properties.versionNumberString.startsWith("2.11")) + val a = 1.asInstanceOf[Int with Tag] + val b = "b".asInstanceOf[String with Tag] + val c = Map("c" -> 0).asInstanceOf[Map[String with Tag, Int with Tag] with Tag] + val d = Empty().asInstanceOf[Empty with Tag] + roundTrip( + ContainsTaggedTypes(a, b, c, d), + """{a: 1, b: "b", c: {c: 0}, d: {}}""", + classOf[ContainsTaggedTypes], + classOf[Empty] + ) + } + + it should "be able to support value classes" in { + val valueClassCodecProvider = new CodecProvider { + override def get[T](clazz: Class[T], registry: CodecRegistry): Codec[T] = { + if (clazz == classOf[IsValueClass]) { + new Codec[IsValueClass] { + override def encode(writer: BsonWriter, value: IsValueClass, encoderContext: EncoderContext): Unit = + writer.writeInt32(value.id) + + override def getEncoderClass: Class[IsValueClass] = classOf[IsValueClass] + + override def decode(reader: BsonReader, decoderContext: DecoderContext): IsValueClass = + IsValueClass(reader.readInt32()) + }.asInstanceOf[Codec[T]] + } else { + null // scalastyle:ignore + } + } + } + roundTrip( + ContainsValueClass(IsValueClass(1), "string value"), + """{id: 1, myString: 'string value'}""", + classOf[ContainsValueClass], + valueClassCodecProvider + ) + } + + it should "support case object enum types" in { + roundTrip(Alpha, """{_t:"Alpha"}""", classOf[CaseObjectEnum]) + roundTrip(Bravo, """{_t:"Bravo"}""", classOf[CaseObjectEnum]) + roundTrip(Charlie, """{_t:"Charlie"}""", classOf[CaseObjectEnum]) + + roundTrip( + ContainsEnumADT("Bob", Alpha), + """{name:"Bob", enum:{_t:"Alpha"}}""", + classOf[ContainsEnumADT], + classOf[CaseObjectEnum] + ) + } + + it should "support extra fields in the document" in { + val json = + """{firstName: "Bob", lastName: "Jones", address: {number: 1, street: "Acacia Avenue"}, aliases: ["Robert", "Rob"]}""" + decode(Person("Bob", "Jones"), json, Macros.createCodec[Person]()) + } + + it should "support throw a CodecConfigurationException missing _t field" in { + val missing_t = """{name: "nodeA", value: null}""" + val registry = CodecRegistries.fromRegistries(CodecRegistries.fromProviders(classOf[Graph]), DEFAULT_CODEC_REGISTRY) + + val buffer = encode(registry.get(classOf[Document]), Document(missing_t)) + + an[BsonInvalidOperationException] should be thrownBy { + decode(registry.get(classOf[Graph]), buffer) + } + } + + it should "support throw a CodecConfigurationException with an unknown class name in the _t field" in { + val missing_t = """{_t: "Wibble", name: "nodeA", value: null}""" + val registry = CodecRegistries.fromRegistries(CodecRegistries.fromProviders(classOf[Graph]), DEFAULT_CODEC_REGISTRY) + val buffer = encode(registry.get(classOf[Document]), Document(missing_t)) + + an[BsonInvalidOperationException] should be thrownBy { + decode(registry.get(classOf[Graph]), buffer) + } + } + + it should "throw a CodecConfigurationException when encountering null values in case classes" in { + val registry = + CodecRegistries.fromRegistries(CodecRegistries.fromProviders(classOf[Person]), DEFAULT_CODEC_REGISTRY) + an[BsonInvalidOperationException] should be thrownBy { + encode(registry.get(classOf[Person]), null) + } + + an[BsonInvalidOperationException] should be thrownBy { + encode(registry.get(classOf[Person]), Person(null, null)) + } + } + + it should "not compile case classes with unsupported values" in { + "Macros.createCodecProvider(classOf[UnsupportedTuple])" shouldNot compile + "Macros.createCodecProvider(classOf[UnsupportedMap])" shouldNot compile + } + + it should "not compile if there are no concrete implementations of a sealed class or trait" in { + "Macros.createCodecProvider(classOf[NotImplementedSealedClass])" shouldNot compile + "Macros.createCodecProvider(classOf[NotImplementedSealedTrait])" shouldNot compile + } + + it should "error when reading unexpected lists" in { + val registry = CodecRegistries.fromRegistries( + CodecRegistries.fromProviders(classOf[ContainsCaseClass], classOf[Person]), + DEFAULT_CODEC_REGISTRY + ) + an[BsonInvalidOperationException] should be thrownBy { + val json = """{name: "Bob", friend: [{firstName: "Jane", lastName: "Ada"}]}""" + decode(ContainsCaseClass("Bob", Person("Jane", "Ada")), json, registry.get(classOf[ContainsCaseClass])) + } + } + + it should "error when reading unexpected documents" in { + val registry = CodecRegistries.fromRegistries( + CodecRegistries.fromProviders(classOf[ContainsCaseClass], classOf[Person]), + DEFAULT_CODEC_REGISTRY + ) + an[BsonInvalidOperationException] should be thrownBy { + val json = """{name: "Bob", friend: {first: {firstName: "Jane", lastName: "Ada"}}}""" + decode(ContainsCaseClass("Bob", Person("Jane", "Ada")), json, registry.get(classOf[ContainsCaseClass])) + } + } + + def roundTrip[T](value: T, expected: String, provider: CodecProvider, providers: CodecProvider*)( + implicit ct: ClassTag[T] + ): Unit = { + val codecProviders: util.List[CodecProvider] = (provider +: providers).asJava + val registry = CodecRegistries.fromRegistries(CodecRegistries.fromProviders(codecProviders), DEFAULT_CODEC_REGISTRY) + val codec = registry.get(ct.runtimeClass).asInstanceOf[Codec[T]] + roundTripCodec(value, Document(expected), codec) + } + + def roundTrip[T](value: T, decodedValue: T, expected: String, provider: CodecProvider, providers: CodecProvider*)( + implicit ct: ClassTag[T] + ): Unit = { + val codecProviders: util.List[CodecProvider] = (provider +: providers).asJava + val registry = CodecRegistries.fromRegistries(CodecRegistries.fromProviders(codecProviders), DEFAULT_CODEC_REGISTRY) + val codec = registry.get(ct.runtimeClass).asInstanceOf[Codec[T]] + roundTripCodec(value, decodedValue, Document(expected), codec) + } + + def roundTripCodec[T](value: T, expected: Document, codec: Codec[T]): Unit = { + val encoded = encode(codec, value) + val actual = decode(documentCodec, encoded) + assert(expected == actual, s"Encoded document: (${actual.toJson()}) did not equal: (${expected.toJson()})") + + val roundTripped = decode(codec, encode(codec, value)) + assert(roundTripped == value, s"Round Tripped case class: ($roundTripped) did not equal the original: ($value)") + } + + def roundTripCodec[T](value: T, decodedValue: T, expected: Document, codec: Codec[T]): Unit = { + val encoded = encode(codec, value) + val actual = decode(documentCodec, encoded) + assert(expected == actual, s"Encoded document: (${actual.toJson()}) did not equal: (${expected.toJson()})") + + val roundTripped = decode(codec, encode(codec, value)) + assert( + roundTripped == decodedValue, + s"Round Tripped case class: ($roundTripped) did not equal the expected: ($decodedValue)" + ) + } + + def encode[T](codec: Codec[T], value: T): OutputBuffer = { + val buffer = new BasicOutputBuffer() + val writer = new BsonBinaryWriter(buffer) + codec.encode(writer, value, EncoderContext.builder.build) + buffer + } + + def decode[T](codec: Codec[T], buffer: OutputBuffer): T = { + val reader = new BsonBinaryReader(new ByteBufferBsonInput(new ByteBufNIO(ByteBuffer.wrap(buffer.toByteArray)))) + codec.decode(reader, DecoderContext.builder().build()) + } + + def decode[T](value: T, json: String, codec: Codec[T]): Unit = { + val roundTripped = decode(codec, encode(documentCodec, Document(json))) + assert(roundTripped == value, s"Round Tripped case class: ($roundTripped) did not equal the original: ($value)") + } + + val documentCodec: Codec[Document] = DEFAULT_CODEC_REGISTRY.get(classOf[Document]) + + def createTreeJson(tree: Tree): String = { + tree match { + case l: Leaf => s"""{_t: "Leaf", value: ${l.value}}""" + case b: Branch => + s"""{_t: "Branch", l1: ${createTreeJson(b.b1)}, r1: ${createTreeJson(b.b2)}, value: ${b.value}}""" + case _ => "{}" + } + } + +} + +case class IsValueClass(id: Int) extends AnyVal +case class ContainsValueClass(id: IsValueClass, myString: String) diff --git a/bson-scala/src/test/scala/org/mongodb/scala/bson/codecs/MutableDocumentCodecSpec.scala b/bson-scala/src/test/scala/org/mongodb/scala/bson/codecs/MutableDocumentCodecSpec.scala new file mode 100644 index 00000000000..6a6b78580b1 --- /dev/null +++ b/bson-scala/src/test/scala/org/mongodb/scala/bson/codecs/MutableDocumentCodecSpec.scala @@ -0,0 +1,127 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala.bson.codecs + +import java.nio.ByteBuffer +import java.util.Date + +import org.bson._ +import org.bson.codecs.configuration.CodecRegistry +import org.bson.codecs.{ DecoderContext, EncoderContext } +import org.bson.io.{ BasicOutputBuffer, ByteBufferBsonInput } +import org.bson.types.ObjectId +import org.mongodb.scala.bson.BaseSpec +import org.mongodb.scala.bson.codecs.Registry.DEFAULT_CODEC_REGISTRY +import org.mongodb.scala.bson.collection.mutable +import org.mongodb.scala.bson.collection.mutable.Document + +import scala.collection.JavaConverters._ + +class MutableDocumentCodecSpec extends BaseSpec { + + val registry: CodecRegistry = DEFAULT_CODEC_REGISTRY + + "MutableDocumentCodec" should "encode and decode all default types with readers and writers" in { + val original: mutable.Document = Document( + "binary" -> new BsonBinary("bson".toCharArray map (_.toByte)), + "boolean" -> new BsonBoolean(true), + "dateTime" -> new BsonDateTime(new Date().getTime), + "double" -> new BsonDouble(1.0), + "int" -> new BsonInt32(1), + "long" -> new BsonInt64(1L), + "null" -> new BsonNull(), + "objectId" -> new BsonObjectId(new ObjectId()), + "regEx" -> new BsonRegularExpression("^bson".r.regex), + "string" -> new BsonString("string"), + "symbol" -> new BsonSymbol(Symbol("bson").name), + "bsonDocument" -> new BsonDocument("a", new BsonString("string")), + "array" -> new BsonArray(List(new BsonString("string"), new BsonBoolean(false)).asJava) + ) + + info("encoding") + val writer: BsonBinaryWriter = new BsonBinaryWriter(new BasicOutputBuffer()) + MutableDocumentCodec(registry).encode(writer, original, EncoderContext.builder().build()) + + info("decoding") + val buffer: BasicOutputBuffer = writer.getBsonOutput().asInstanceOf[BasicOutputBuffer]; + val reader: BsonBinaryReader = + new BsonBinaryReader(new ByteBufferBsonInput(new ByteBufNIO(ByteBuffer.wrap(buffer.toByteArray)))) + + val decodedDocument = MutableDocumentCodec().decode(reader, DecoderContext.builder().build()) + + decodedDocument shouldBe a[mutable.Document] + original should equal(decodedDocument) + } + + it should "respect encodeIdFirst property in encoder context" in { + val original: mutable.Document = Document( + "a" -> new BsonString("string"), + "_id" -> new BsonInt32(1), + "nested" -> Document("a" -> new BsonString("string"), "_id" -> new BsonInt32(1)).toBsonDocument + ) + + info("encoding") + val writer: BsonBinaryWriter = new BsonBinaryWriter(new BasicOutputBuffer()) + MutableDocumentCodec(registry).encode( + writer, + original, + EncoderContext.builder().isEncodingCollectibleDocument(true).build() + ) + + info("decoding") + val buffer: BasicOutputBuffer = writer.getBsonOutput().asInstanceOf[BasicOutputBuffer]; + val reader: BsonBinaryReader = + new BsonBinaryReader(new ByteBufferBsonInput(new ByteBufNIO(ByteBuffer.wrap(buffer.toByteArray)))) + + val decodedDocument = MutableDocumentCodec().decode(reader, DecoderContext.builder().build()) + + decodedDocument shouldBe a[mutable.Document] + original should equal(decodedDocument) + decodedDocument.keys.toList should contain theSameElementsInOrderAs (List("_id", "a", "nested")) + + Document(decodedDocument[BsonDocument]("nested")).keys.toList should contain theSameElementsInOrderAs (List( + "a", + "_id" + )) + } + + it should "encoder class should work as expected" in { + MutableDocumentCodec().getEncoderClass should equal(classOf[mutable.Document]) + } + + it should "determine if document has an _id" in { + MutableDocumentCodec().documentHasId(Document()) should be(false) + MutableDocumentCodec().documentHasId(Document("_id" -> new BsonInt32(1))) should be(true) + } + + it should "get the document_id" in { + MutableDocumentCodec().getDocumentId(Document()) should be(null) + MutableDocumentCodec().getDocumentId(Document("_id" -> new BsonInt32(1))) should be(new BsonInt32(1)) + } + + it should "generate document id if absent" in { + val document = Document() + MutableDocumentCodec().generateIdIfAbsentFromDocument(document) + document("_id") shouldBe a[BsonObjectId] + } + + it should "not generate document id if present" in { + val document = Document("_id" -> new BsonInt32(1)) + MutableDocumentCodec().generateIdIfAbsentFromDocument(document) + document("_id") should equal(new BsonInt32(1)) + } +} diff --git a/bson-scala/src/test/scala/org/mongodb/scala/bson/codecs/Registry.scala b/bson-scala/src/test/scala/org/mongodb/scala/bson/codecs/Registry.scala new file mode 100644 index 00000000000..ae17988a770 --- /dev/null +++ b/bson-scala/src/test/scala/org/mongodb/scala/bson/codecs/Registry.scala @@ -0,0 +1,32 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala.bson.codecs + +import org.bson.codecs.configuration.CodecRegistries.fromProviders +import org.bson.codecs.configuration.CodecRegistry +import org.bson.codecs.{ BsonValueCodecProvider, ValueCodecProvider } + +object Registry { + + val DEFAULT_CODEC_REGISTRY: CodecRegistry = fromProviders( + DocumentCodecProvider(), + IterableCodecProvider(), + new ValueCodecProvider(), + new BsonValueCodecProvider() + ) + +} diff --git a/bson-scala/src/test/scala/org/mongodb/scala/bson/collections/DocumentImplicitTypeConversion.scala b/bson-scala/src/test/scala/org/mongodb/scala/bson/collections/DocumentImplicitTypeConversion.scala new file mode 100644 index 00000000000..0afce3b596b --- /dev/null +++ b/bson-scala/src/test/scala/org/mongodb/scala/bson/collections/DocumentImplicitTypeConversion.scala @@ -0,0 +1,72 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala.bson.collections + +import org.mongodb.scala.bson._ +import org.mongodb.scala.bson.collection.immutable.Document +import org.mongodb.scala.bson.collection.mutable + +class DocumentImplicitTypeConversion extends BaseSpec { + + val emptyDoc: Document = Document.empty + + "Document additions and updates" should "support simple additions" in { + val doc1: Document = Document() + ("key" -> "value") + doc1 should equal(Document("key" -> BsonString("value"))) + + val doc2: Document = doc1 + ("key2" -> 2) + doc2 should equal(Document("key" -> BsonString("value"), "key2" -> BsonInt32(2))) + } + + it should "support multiple additions" in { + val doc1: Document = emptyDoc + ("key" -> "value", "key2" -> 2, "key3" -> true, "key4" -> None) + doc1 should equal( + Document("key" -> BsonString("value"), "key2" -> BsonInt32(2), "key3" -> BsonBoolean(true), "key4" -> BsonNull()) + ) + } + + it should "support addition of a traversable" in { + val doc1: Document = emptyDoc ++ Document("key" -> "value", "key2" -> 2, "key3" -> true, "key4" -> None) + doc1 should equal( + Document("key" -> BsonString("value"), "key2" -> BsonInt32(2), "key3" -> BsonBoolean(true), "key4" -> BsonNull()) + ) + } + + it should "support updated" in { + val doc1: Document = emptyDoc.updated("key", "value") + emptyDoc should not be doc1 + doc1 should equal(Document("key" -> BsonString("value"))) + } + + it should "be creatable from mixed types" in { + val doc1: Document = Document( + "a" -> "string", + "b" -> true, + "c" -> List("a", "b", "c"), + "d" -> Document("a" -> "string", "b" -> true, "c" -> List("a", "b", "c")) + ) + + val doc2: mutable.Document = mutable.Document( + "a" -> "string", + "b" -> true, + "c" -> List("a", "b", "c"), + "d" -> + mutable.Document("a" -> "string", "b" -> true, "c" -> List("a", "b", "c")) + ) + doc1.toBsonDocument should equal(doc2.toBsonDocument) + } +} diff --git a/bson-scala/src/test/scala/org/mongodb/scala/bson/collections/ImmutableDocumentSpec.scala b/bson-scala/src/test/scala/org/mongodb/scala/bson/collections/ImmutableDocumentSpec.scala new file mode 100644 index 00000000000..d24ff044516 --- /dev/null +++ b/bson-scala/src/test/scala/org/mongodb/scala/bson/collections/ImmutableDocumentSpec.scala @@ -0,0 +1,216 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala.bson.collections + +import org.bson.json.JsonParseException +import org.mongodb.scala.bson._ +import org.mongodb.scala.bson.collection.immutable.Document + +import scala.collection.mutable + +class ImmutableDocumentSpec extends BaseSpec { + + val emptyDoc: Document = Document.empty + val doc: Document = Document("key" -> "value", "key2" -> "value2", "key3" -> "value3") + val docMap: Map[String, BsonValue] = doc.toMap + + "Document lookups" should "be the same as empty documents" in { + emptyDoc should equal(Document()) + } + + it should "support construction via json" in { + Document("{a: 1, b: true}") should equal(Document("a" -> 1, "b" -> true)) + + intercept[JsonParseException] { + Document("not Json") + } + } + + it should "support get()" in { + doc.get("key") should equal(Some(BsonString("value"))) + doc.get("nonexistent") should equal(None) + } + + it should "support direct lookup" in { + doc("key") should equal(BsonString("value")) + doc[BsonString]("key") should equal(BsonString("value")) + + // When the key doesn't exist + an[NoSuchElementException] should be thrownBy doc("nonexistent") + + // When the key exists but the type doesn't match" + an[NoSuchElementException] should be thrownBy doc[BsonArray]("key") + } + + it should "support getOrElse" in { + doc.getOrElse("key", BsonBoolean(false)) should equal(BsonString("value")) + doc.getOrElse("nonexistent", BsonBoolean(false)) should equal(BsonBoolean(false)) + } + + it should "support contains" in { + doc contains "key" should equal(true) + doc contains "nonexistent" should equal(false) + } + + "Document additions and updates" should "support simple additions" in { + val doc1: Document = emptyDoc + ("key" -> "value") + emptyDoc should not be doc1 + doc1 should equal(Document("key" -> "value")) + + val doc2: Document = doc1 + ("key2" -> "value2") + doc1 should not be doc2 + doc2 should equal(Document("key" -> "value", "key2" -> "value2")) + } + + it should "support multiple additions" in { + val doc1: Document = emptyDoc + ("key" -> "value", "key2" -> "value2", + "key3" -> "value3") + emptyDoc should not be doc1 + doc1 should equal(Document("key" -> "value", "key2" -> "value2", "key3" -> "value3")) + + val doc2: Document = doc1 + ("key4" -> "value4") + doc1 should not be doc2 + doc2 should equal(Document("key" -> "value", "key2" -> "value2", "key3" -> "value3", "key4" -> "value4")) + } + + it should "support addition of a traversable" in { + val doc1: Document = emptyDoc ++ Set("key" -> BsonString("value"), "key2" -> BsonString("value2")) + emptyDoc should not be doc1 + doc1 should equal(Document("key" -> BsonString("value"), "key2" -> BsonString("value2"))) + + val doc2: Document = doc1 ++ List("key3" -> BsonString("value3")) + doc1 should not be doc2 + doc2 should equal( + Document("key" -> BsonString("value"), "key2" -> BsonString("value2"), "key3" -> BsonString("value3")) + ) + } + + it should "support updated" in { + val doc1: Document = emptyDoc updated ("key", "value") + emptyDoc should not be doc1 + doc1 should equal(Document("key" -> "value")) + + val doc2: Document = doc1 updated ("key2" -> "value2") + doc1 should not be doc2 + doc2 should equal(Document("key" -> "value", "key2" -> "value2")) + } + + "Document removals" should "support subtractions" in { + val doc1: Document = doc - "nonexistent key" + doc1 should equal(doc) + + val doc2: Document = doc - "key" + doc1 should not be doc2 + doc2 should equal(Document("key2" -> "value2", "key3" -> "value3")) + } + + it should "support multiple subtractions" in { + val doc1: Document = doc - ("key", "key2") + doc should not be doc1 + doc1 should equal(Document("key3" -> "value3")) + + } + + it should "support subtraction of a traversable" in { + val doc1: Document = doc -- Set("key", "key2") + doc should not be doc1 + doc1 should equal(Document("key3" -> "value3")) + + val doc2: Document = doc -- List("key3") + doc1 should not be doc2 + doc2 should equal(Document("key" -> "value", "key2" -> "value2")) + + } + + "Document subcollections" should "provide keys in the order set" in { + doc.keys should equal(Set("key", "key2", "key3")) + + val doc1: Document = doc + ("aNewKey" -> "1") + doc1.keys should equal(Set("key", "key2", "key3", "aNewKey")) + } + + it should "provide a keySet in the order set" in { + doc.keySet should equal(Set("key", "key2", "key3")) + + val doc1: Document = doc + ("aNewKey" -> "1") + doc1.keySet should equal(Set("key", "key2", "key3", "aNewKey")) + } + + it should "provide a keysIterator in the order set" in { + doc.keysIterator.toSet should equal(Set("key", "key2", "key3")) + + val doc1: Document = doc + ("aNewKey" -> "1") + doc1.keysIterator.toSet should equal(Set("key", "key2", "key3", "aNewKey")) + } + + it should "provide values in the order set" in { + doc.values.toSet should equal(Set(BsonString("value"), BsonString("value2"), BsonString("value3"))) + + val doc1: Document = doc + ("aNewKey" -> 1) + doc1.values.toSet should equal(Set(BsonString("value"), BsonString("value2"), BsonString("value3"), BsonInt32(1))) + } + + it should "provide a valueSet in the order set" in { + doc.valuesIterator.toSet should equal(Set(BsonString("value"), BsonString("value2"), BsonString("value3"))) + + val doc1: Document = doc + ("aNewKey" -> 1) + doc1.valuesIterator.toSet should equal( + Set(BsonString("value"), BsonString("value2"), BsonString("value3"), BsonInt32(1)) + ) + } + + "Document transformations" should "be filterable by keys" in { + val doc1: Document = doc.filterKeys(k => k == "key") + + doc1 should equal(Document("key" -> "value")) + } + + "Traversable helpers" should "work as expected" in { + val map = mutable.Map[String, BsonValue]() + doc foreach (kv => map += kv) + + doc.toMap should equal(map) + } + + it should "be able to create new Documents from iterable" in { + val doc1 = Document(docMap) + doc should equal(doc1) + } + + // it should "be mappable thanks to CanBuildFrom" in { + // Document.empty.map({ kv => kv }) should equal(Document.empty) + // val doc1: Document = docMap.map(kv => kv).to(Document) + // + // doc1 should equal(doc) + // } + + it should "return a BsonDocument" in { + val bsonDoc: BsonDocument = doc.toBsonDocument + doc.underlying should equal(bsonDoc) + } + + it should "return a Json representation" in { + doc.toJson() should equal("""{"key": "value", "key2": "value2", "key3": "value3"}""") + } + + "Documents" should "support Traversable like builders" in { + val doc1 = doc.filter(kv => kv._1 == "key") + + doc1 should not equal doc + doc1 should equal(Document("key" -> "value")) + } +} diff --git a/bson-scala/src/test/scala/org/mongodb/scala/bson/collections/MutableDocumentSpec.scala b/bson-scala/src/test/scala/org/mongodb/scala/bson/collections/MutableDocumentSpec.scala new file mode 100644 index 00000000000..918b8f4c5f6 --- /dev/null +++ b/bson-scala/src/test/scala/org/mongodb/scala/bson/collections/MutableDocumentSpec.scala @@ -0,0 +1,341 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala.bson.collections + +import org.bson.json.JsonParseException +import org.bson.{ BsonArray, BsonDocument, BsonValue } +import org.mongodb.scala.bson.collection.mutable.Document +import org.mongodb.scala.bson.{ BaseSpec, BsonBoolean, BsonString } + +import scala.collection.mutable + +class MutableDocumentSpec extends BaseSpec { + + val emptyDoc: Document = Document.empty + val doc: Document = Document("key" -> "value", "key2" -> "value2", "key3" -> "value3") + val docMap: Map[String, BsonValue] = doc.toMap + + "Document lookups" should "be the same as empty documents" in { + emptyDoc should equal(Document()) + } + + it should "support construction via json" in { + Document("{a: 1, b: true}") should equal(Document("a" -> 1, "b" -> true)) + + intercept[JsonParseException] { + Document("not Json") + } + } + + it should "support get()" in { + doc.get("key") should equal(Some(BsonString("value"))) + doc.get("nonexistent") should equal(None) + } + + it should "support direct lookup" in { + doc("key") should equal(BsonString("value")) + doc[BsonString]("key") should equal(BsonString("value")) + + // When the key doesn't exist + an[NoSuchElementException] should be thrownBy doc("nonexistent") + + // When the key exists but the type doesn't match" + an[NoSuchElementException] should be thrownBy doc[BsonArray]("key") + } + + it should "support getOrElse" in { + doc.getOrElse("key", BsonBoolean(false)) should equal(BsonString("value")) + doc.getOrElse("nonexistent", BsonBoolean(false)) should equal(BsonBoolean(false)) + } + + it should "support contains" in { + doc contains "key" should equal(true) + doc contains "nonexistent" should equal(false) + } + + "Document additions and updates" should "support simple additions" in { + val doc1: Document = emptyDoc + ("key" -> BsonString("value")) + emptyDoc should not be doc1 + doc1 should equal(Document("key" -> BsonString("value"))) + + val doc2: Document = doc1 + ("key2" -> BsonString("value2")) + doc1 should not be doc2 + doc2 should equal(Document("key" -> BsonString("value"), "key2" -> BsonString("value2"))) + } + + it should "support multiple additions" in { + val doc1: Document = emptyDoc + ("key" -> BsonString("value"), "key2" -> BsonString("value2"), + "key3" -> BsonString("value3")) + emptyDoc should not be doc1 + doc1 should equal( + Document("key" -> BsonString("value"), "key2" -> BsonString("value2"), "key3" -> BsonString("value3")) + ) + + val doc2: Document = doc1 + ("key4" -> BsonString("value4")) + doc1 should not be doc2 + doc2 should equal( + Document( + "key" -> BsonString("value"), + "key2" -> BsonString("value2"), + "key3" -> BsonString("value3"), + "key4" -> BsonString("value4") + ) + ) + } + + it should "support addition of a traversable" in { + val doc1: Document = emptyDoc ++ Set("key" -> BsonString("value"), "key2" -> BsonString("value2")) + emptyDoc should not be doc1 + doc1 should equal(Document("key" -> BsonString("value"), "key2" -> BsonString("value2"))) + + val doc2: Document = doc1 ++ List("key3" -> BsonString("value3")) + doc1 should not be doc2 + doc2 should equal( + Document("key" -> BsonString("value"), "key2" -> BsonString("value2"), "key3" -> BsonString("value3")) + ) + } + + it should "support updated" in { + val doc1: Document = emptyDoc updated ("key", BsonString("value")) + emptyDoc should not be doc1 + doc1 should equal(Document("key" -> BsonString("value"))) + + val doc2: Document = doc1 updated ("key2" -> BsonString("value2")) + doc1 should not be doc2 + doc2 should equal(Document("key" -> BsonString("value"), "key2" -> BsonString("value2"))) + } + + "Document removals" should "support subtractions" in { + val doc1: Document = doc - "nonexistent key" + doc1 should equal(doc) + + val doc2: Document = doc - "key" + doc1 should not be doc2 + doc2 should equal(Document("key2" -> BsonString("value2"), "key3" -> BsonString("value3"))) + + } + + it should "support multiple subtractions" in { + val doc1: Document = doc - ("key", "key2") + doc should not be doc1 + doc1 should equal(Document("key3" -> BsonString("value3"))) + + } + + it should "support subtraction of a traversable" in { + val doc1: Document = doc -- Set("key", "key2") + doc should not be doc1 + doc1 should equal(Document("key3" -> BsonString("value3"))) + + val doc2: Document = doc -- List("key3") + doc1 should not be doc2 + doc2 should equal(Document("key" -> BsonString("value"), "key2" -> BsonString("value2"))) + + } + + "Document subcollections" should "provide keys in the order set" in { + doc.keys should equal(Set("key", "key2", "key3")) + + val doc1: Document = doc + ("aNewKey" -> BsonString("1")) + doc1.keys should equal(Set("key", "key2", "key3", "aNewKey")) + } + + it should "provide a keySet in the order set" in { + doc.keySet should equal(Set("key", "key2", "key3")) + + val doc1: Document = doc + ("aNewKey" -> BsonString("1")) + doc1.keySet should equal(Set("key", "key2", "key3", "aNewKey")) + } + + it should "provide a keysIterator in the order set" in { + doc.keysIterator.toSet should equal(Set("key", "key2", "key3")) + + val doc1: Document = doc + ("aNewKey" -> BsonString("1")) + doc1.keysIterator.toSet should equal(Set("key", "key2", "key3", "aNewKey")) + } + + it should "provide values in the order set" in { + doc.values.toSet should equal(Set(BsonString("value"), BsonString("value2"), BsonString("value3"))) + + val doc1: Document = doc + ("aNewKey" -> BsonString("1")) + doc1.values.toSet should equal( + Set(BsonString("value"), BsonString("value2"), BsonString("value3"), BsonString("1")) + ) + } + + it should "provide a valueSet in the order set" in { + doc.valuesIterator.toSet should equal(Set(BsonString("value"), BsonString("value2"), BsonString("value3"))) + + val doc1: Document = doc + ("aNewKey" -> BsonString("1")) + doc1.valuesIterator.toSet should equal( + Set(BsonString("value"), BsonString("value2"), BsonString("value3"), BsonString("1")) + ) + } + + "Document transformations" should "be filterable by keys" in { + val doc1: Document = doc.filterKeys(k => k == "key") + + doc1 should equal(Document("key" -> BsonString("value"))) + } + + "Traversable helpers" should "work as expected" in { + val map = mutable.Map[String, BsonValue]() + doc foreach (kv => map += kv) + + doc.toMap should equal(map) + } + + it should "be able to create new Documents from iterable" in { + val doc1 = Document(docMap) + doc should equal(doc1) + } + + // it should "be mappable thanks to CanBuildFrom" in { + // Document.empty.map({ kv => kv }) should equal(Document.empty) + // val doc1: Document = docMap.map(kv => kv).to(Document) + // + // doc1 should equal(doc) + // } + + it should "return a BsonDocument" in { + val bsonDoc: BsonDocument = doc.toBsonDocument + doc.underlying should equal(bsonDoc) + } + + it should "return a Json representation" in { + doc.toJson() should equal("""{"key": "value", "key2": "value2", "key3": "value3"}""") + } + + "Documents" should "support Traversable like builders" in { + val doc1 = doc.filter(kv => kv._1 == "key") + + doc1 should not equal (doc) + doc1 should equal(Document("key" -> BsonString("value"))) + } + + "Mutable Documents" should "have maplike mutability" in { + val doc1 = Document.empty + doc1 += (("x", BsonString("x"))) + + doc1 should equal(Document("x" -> BsonString("x"))) + } + + it should "support multiple inline additions" in { + val doc1: Document = Document.empty += ("key" -> BsonString("value"), "key2" -> BsonString("value2")) + doc1 should equal(Document("key" -> BsonString("value"), "key2" -> BsonString("value2"))) + + val doc2: Document = doc1 += ("key3" -> BsonString("value3")) + doc1 should equal(doc2) + doc2 should equal( + Document("key" -> BsonString("value"), "key2" -> BsonString("value2"), "key3" -> BsonString("value3")) + ) + } + + it should "support inline addition of a traversable" in { + val doc1: Document = Document.empty ++= Set("key" -> BsonString("value"), "key2" -> BsonString("value2")) + doc1 should equal(Document("key" -> BsonString("value"), "key2" -> BsonString("value2"))) + + val doc2: Document = doc1 ++= List("key3" -> BsonString("value3")) + doc1 should equal(doc2) + doc2 should equal( + Document("key" -> BsonString("value"), "key2" -> BsonString("value2"), "key3" -> BsonString("value3")) + ) + } + + it should "support put" in { + val doc1: Document = Document.empty + doc1.put("key", BsonString("value")) shouldBe None + doc1 should equal(Document("key" -> BsonString("value"))) + + doc1.put("key", BsonString("newValue")) shouldBe Some(BsonString("value")) + doc1 should equal(Document("key" -> BsonString("newValue"))) + } + + it should "support getOrElseUpdate" in { + val doc1: Document = Document.empty + doc1.getOrElseUpdate("key", BsonString("value")) shouldBe BsonString("value") + doc1 should equal(Document("key" -> BsonString("value"))) + + doc1.getOrElseUpdate("key", BsonString("newValue")) shouldBe BsonString("value") + doc1 should equal(Document("key" -> BsonString("value"))) + } + + it should "support inline update" in { + val doc1: Document = Document.empty + doc1 update ("key", BsonString("value")) + doc1 should equal(Document("key" -> BsonString("value"))) + + doc1 update ("key2", BsonString("value2")) + doc1 should equal(Document("key" -> BsonString("value"), "key2" -> BsonString("value2"))) + } + + "Document removals" should "support inline subtractions" in { + val doc1: Document = doc.copy() -= "nonexistent key" + doc1 should equal(doc) + + val doc2: Document = doc1 -= "key" + doc1 should not be equal(doc2) + doc2 should equal(Document("key2" -> BsonString("value2"), "key3" -> BsonString("value3"))) + } + + it should "support multiple inline subtractions" in { + val doc1: Document = doc.copy() -= ("key", "key2") + doc should not be doc1 + doc1 should equal(Document("key3" -> BsonString("value3"))) + } + + it should "support inline subtraction of a traversable" in { + val doc1: Document = doc.copy() --= Set("key", "key2") + doc should not be doc1 + doc1 should equal(Document("key3" -> BsonString("value3"))) + + val doc2: Document = doc1 --= List("key3") + doc1 should equal(doc2) + doc2 should equal(Document()) + } + + it should "support remove" in { + val doc1: Document = Document("key" -> BsonString("value")) + + doc1.remove("key") shouldBe Some(BsonString("value")) + doc1 should equal(Document()) + + doc1.remove("noKey") shouldBe None + doc1 should equal(Document()) + } + + it should "support retain" in { + val doc1: Document = Document("key" -> BsonString("value"), "key2" -> BsonString("value2")) + + doc1.retain((k, v) => k == "key") + doc1 should equal(Document("key" -> BsonString("value"))) + } + + it should "support clear" in { + val doc1: Document = Document("key" -> BsonString("value"), "key2" -> BsonString("value2")) + + doc1.clear() + doc1 should equal(Document()) + } + + it should "support transform" in { + val doc1: Document = Document("key" -> BsonString("value"), "key2" -> BsonString("value2")) + + doc1.transform((k, v) => BsonString(v.asString().getValue.toUpperCase)) + doc1 should equal(Document("key" -> BsonString("VALUE"), "key2" -> BsonString("VALUE2"))) + } +} diff --git a/bson/build.gradle.kts b/bson/build.gradle.kts new file mode 100644 index 00000000000..fab3cdaacb5 --- /dev/null +++ b/bson/build.gradle.kts @@ -0,0 +1,39 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +import ProjectExtensions.configureJarManifest +import ProjectExtensions.configureMavenPublication + +plugins { + id("project.java") + id("conventions.testing-junit") + id("conventions.testing-spock") + id("conventions.test-artifacts") +} + +base.archivesName.set("bson") + +configureMavenPublication { + pom { + name.set("BSON") + description.set("The BSON library") + url.set("https://bsonspec.org") + } +} + +configureJarManifest { + attributes["Automatic-Module-Name"] = "org.mongodb.bson" + attributes["Import-Package"] = "org.slf4j.*;resolution:=optional" +} diff --git a/bson/src/main/org/bson/AbstractBsonReader.java b/bson/src/main/org/bson/AbstractBsonReader.java new file mode 100644 index 00000000000..88c5fda5153 --- /dev/null +++ b/bson/src/main/org/bson/AbstractBsonReader.java @@ -0,0 +1,911 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson; + +import org.bson.types.Decimal128; +import org.bson.types.ObjectId; + +import static java.lang.String.format; + +/** + * Abstract base class for BsonReader implementations. + * + * @since 3.0 + */ +public abstract class AbstractBsonReader implements BsonReader { + private State state; + private Context context; + private BsonType currentBsonType; + private String currentName; + private boolean closed; + + /** + * Initializes a new instance of the BsonReader class. + */ + protected AbstractBsonReader() { + state = State.INITIAL; + } + + @Override + public BsonType getCurrentBsonType() { + return currentBsonType; + } + + @Override + public String getCurrentName() { + if (state != State.VALUE) { + throwInvalidState("getCurrentName", State.VALUE); + } + return currentName; + } + + /** + * Sets the type of the current value being read. + * + * @param newType the BSON Type. + */ + protected void setCurrentBsonType(final BsonType newType) { + currentBsonType = newType; + } + + /** + * @return The current state of the reader. + */ + public State getState() { + return state; + } + + /** + * Sets the new current state of this reader. + * + * @param newState the state to set this reader to. + */ + protected void setState(final State newState) { + state = newState; + } + + /** + * Sets the field name for the key/value pair being read. + * + * @param newName the field name + */ + protected void setCurrentName(final String newName) { + currentName = newName; + } + + /** + * Closes the reader. + */ + public void close() { + closed = true; + } + + /** + * Return true if the reader has been closed. + * + * @return true if closed + */ + protected boolean isClosed() { + return closed; + } + + /** + * Handles the logic to read binary data + * + * @return the BsonBinary value + */ + protected abstract BsonBinary doReadBinaryData(); + + /** + * Handles the logic to peek at the binary subtype. + * + * @return the binary subtype + */ + protected abstract byte doPeekBinarySubType(); + + /** + * Handles the logic to peek at the binary size. + * + * @return the binary size + * @since 3.4 + */ + protected abstract int doPeekBinarySize(); + + /** + * Handles the logic to read booleans + * + * @return the boolean value + */ + protected abstract boolean doReadBoolean(); + + /** + * Handles the logic to read date time + * + * @return the long value + */ + protected abstract long doReadDateTime(); + + /** + * Handles the logic to read doubles + * + * @return the double value + */ + protected abstract double doReadDouble(); + + /** + * Handles the logic when reading the end of an array + */ + protected abstract void doReadEndArray(); + + /** + * Handles the logic when reading the end of a document + */ + protected abstract void doReadEndDocument(); + + /** + * Handles the logic to read 32 bit ints + * + * @return the int value + */ + protected abstract int doReadInt32(); + + /** + * Handles the logic to read 64 bit ints + * + * @return the long value + */ + protected abstract long doReadInt64(); + + + /** + * Handles the logic to read Decimal128 + * + * @return the Decimal128 value + * @since 3.4 + */ + protected abstract Decimal128 doReadDecimal128(); + + /** + * Handles the logic to read JavaScript functions + * + * @return the String value + */ + protected abstract String doReadJavaScript(); + + /** + * Handles the logic to read scoped JavaScript functions + * + * @return the String value + */ + protected abstract String doReadJavaScriptWithScope(); + + /** + * Handles the logic to read a Max key + */ + protected abstract void doReadMaxKey(); + + /** + * Handles the logic to read a Min key + */ + protected abstract void doReadMinKey(); + + /** + * Handles the logic to read a null value + */ + protected abstract void doReadNull(); + + /** + * Handles the logic to read an ObjectId + * + * @return the ObjectId value + */ + protected abstract ObjectId doReadObjectId(); + + /** + * Handles the logic to read a regular expression + * + * @return the BsonRegularExpression value + */ + protected abstract BsonRegularExpression doReadRegularExpression(); + + /** + * Handles the logic to read a DBPointer + * + * @return the BsonDbPointer value + */ + protected abstract BsonDbPointer doReadDBPointer(); + + /** + * Handles the logic to read the start of an array + */ + protected abstract void doReadStartArray(); + + /** + * Handles the logic to read the start of a document + */ + protected abstract void doReadStartDocument(); + + /** + * Handles the logic to read a String + * + * @return the String value + */ + protected abstract String doReadString(); + + /** + * Handles the logic to read a Symbol + * + * @return the String value + */ + protected abstract String doReadSymbol(); + + /** + * Handles the logic to read a timestamp + * + * @return the BsonTimestamp value + */ + protected abstract BsonTimestamp doReadTimestamp(); + + /** + * Handles the logic to read an Undefined value + */ + protected abstract void doReadUndefined(); + + /** + * Handles any logic required to skip the name (reader must be positioned on a name). + */ + protected abstract void doSkipName(); + + /** + * Handles any logic required to skip the value (reader must be positioned on a value). + */ + protected abstract void doSkipValue(); + + @Override + public BsonBinary readBinaryData() { + checkPreconditions("readBinaryData", BsonType.BINARY); + setState(getNextState()); + return doReadBinaryData(); + } + + @Override + public byte peekBinarySubType() { + checkPreconditions("readBinaryData", BsonType.BINARY); + return doPeekBinarySubType(); + } + + @Override + public int peekBinarySize() { + checkPreconditions("readBinaryData", BsonType.BINARY); + return doPeekBinarySize(); + } + + @Override + public boolean readBoolean() { + checkPreconditions("readBoolean", BsonType.BOOLEAN); + setState(getNextState()); + return doReadBoolean(); + } + + @Override + public abstract BsonType readBsonType(); + + @Override + public long readDateTime() { + checkPreconditions("readDateTime", BsonType.DATE_TIME); + setState(getNextState()); + return doReadDateTime(); + } + + @Override + public double readDouble() { + checkPreconditions("readDouble", BsonType.DOUBLE); + setState(getNextState()); + return doReadDouble(); + } + + @Override + public void readEndArray() { + if (isClosed()) { + throw new IllegalStateException("BsonReader is closed"); + } + if (getContext().getContextType() != BsonContextType.ARRAY) { + throwInvalidContextType("readEndArray", getContext().getContextType(), BsonContextType.ARRAY); + } + if (getState() == State.TYPE) { + readBsonType(); // will set state to EndOfArray if at end of array + } + if (getState() != State.END_OF_ARRAY) { + throwInvalidState("ReadEndArray", State.END_OF_ARRAY); + } + + doReadEndArray(); + + setStateOnEnd(); + } + + @Override + public void readEndDocument() { + if (isClosed()) { + throw new IllegalStateException("BsonReader is closed"); + } + if (getContext().getContextType() != BsonContextType.DOCUMENT && getContext().getContextType() != BsonContextType.SCOPE_DOCUMENT) { + throwInvalidContextType("readEndDocument", + getContext().getContextType(), BsonContextType.DOCUMENT, BsonContextType.SCOPE_DOCUMENT); + } + if (getState() == State.TYPE) { + readBsonType(); // will set state to EndOfDocument if at end of document + } + if (getState() != State.END_OF_DOCUMENT) { + throwInvalidState("readEndDocument", State.END_OF_DOCUMENT); + } + + doReadEndDocument(); + + setStateOnEnd(); + } + + @Override + public int readInt32() { + checkPreconditions("readInt32", BsonType.INT32); + setState(getNextState()); + return doReadInt32(); + + } + + @Override + public long readInt64() { + checkPreconditions("readInt64", BsonType.INT64); + setState(getNextState()); + return doReadInt64(); + } + + @Override + public Decimal128 readDecimal128() { + checkPreconditions("readDecimal", BsonType.DECIMAL128); + setState(getNextState()); + return doReadDecimal128(); + } + + @Override + public String readJavaScript() { + checkPreconditions("readJavaScript", BsonType.JAVASCRIPT); + setState(getNextState()); + return doReadJavaScript(); + } + + @Override + public String readJavaScriptWithScope() { + checkPreconditions("readJavaScriptWithScope", BsonType.JAVASCRIPT_WITH_SCOPE); + setState(State.SCOPE_DOCUMENT); + return doReadJavaScriptWithScope(); + } + + @Override + public void readMaxKey() { + checkPreconditions("readMaxKey", BsonType.MAX_KEY); + setState(getNextState()); + doReadMaxKey(); + } + + @Override + public void readMinKey() { + checkPreconditions("readMinKey", BsonType.MIN_KEY); + setState(getNextState()); + doReadMinKey(); + } + + @Override + public void readNull() { + checkPreconditions("readNull", BsonType.NULL); + setState(getNextState()); + doReadNull(); + } + + @Override + public ObjectId readObjectId() { + checkPreconditions("readObjectId", BsonType.OBJECT_ID); + setState(getNextState()); + return doReadObjectId(); + } + + @Override + public BsonRegularExpression readRegularExpression() { + checkPreconditions("readRegularExpression", BsonType.REGULAR_EXPRESSION); + setState(getNextState()); + return doReadRegularExpression(); + } + + @Override + public BsonDbPointer readDBPointer() { + checkPreconditions("readDBPointer", BsonType.DB_POINTER); + setState(getNextState()); + return doReadDBPointer(); + } + + @Override + public void readStartArray() { + checkPreconditions("readStartArray", BsonType.ARRAY); + doReadStartArray(); + setState(State.TYPE); + } + + @Override + public void readStartDocument() { + checkPreconditions("readStartDocument", BsonType.DOCUMENT); + doReadStartDocument(); + setState(State.TYPE); + } + + @Override + public String readString() { + checkPreconditions("readString", BsonType.STRING); + setState(getNextState()); + return doReadString(); + } + + @Override + public String readSymbol() { + checkPreconditions("readSymbol", BsonType.SYMBOL); + setState(getNextState()); + return doReadSymbol(); + } + + @Override + public BsonTimestamp readTimestamp() { + checkPreconditions("readTimestamp", BsonType.TIMESTAMP); + setState(getNextState()); + return doReadTimestamp(); + } + + @Override + public void readUndefined() { + checkPreconditions("readUndefined", BsonType.UNDEFINED); + setState(getNextState()); + doReadUndefined(); + } + + @Override + public void skipName() { + if (isClosed()) { + throw new IllegalStateException("This instance has been closed"); + } + if (getState() != State.NAME) { + throwInvalidState("skipName", State.NAME); + } + setState(State.VALUE); + doSkipName(); + } + + @Override + public void skipValue() { + if (isClosed()) { + throw new IllegalStateException("BSONBinaryWriter"); + } + if (getState() != State.VALUE) { + throwInvalidState("skipValue", State.VALUE); + } + + doSkipValue(); + + setState(State.TYPE); + } + + @Override + public BsonBinary readBinaryData(final String name) { + verifyName(name); + return readBinaryData(); + } + + @Override + public boolean readBoolean(final String name) { + verifyName(name); + return readBoolean(); + } + + @Override + public long readDateTime(final String name) { + verifyName(name); + return readDateTime(); + } + + @Override + public double readDouble(final String name) { + verifyName(name); + return readDouble(); + } + + @Override + public int readInt32(final String name) { + verifyName(name); + return readInt32(); + } + + @Override + public long readInt64(final String name) { + verifyName(name); + return readInt64(); + } + + @Override + public Decimal128 readDecimal128(final String name) { + verifyName(name); + return readDecimal128(); + } + + @Override + public String readJavaScript(final String name) { + verifyName(name); + return readJavaScript(); + } + + @Override + public String readJavaScriptWithScope(final String name) { + verifyName(name); + return readJavaScriptWithScope(); + } + + @Override + public void readMaxKey(final String name) { + verifyName(name); + readMaxKey(); + } + + @Override + public void readMinKey(final String name) { + verifyName(name); + readMinKey(); + } + + @Override + public String readName() { + if (state == State.TYPE) { + readBsonType(); + } + if (state != State.NAME) { + throwInvalidState("readName", State.NAME); + } + + state = State.VALUE; + return currentName; + } + + @Override + public void readName(final String name) { + verifyName(name); + } + + @Override + public void readNull(final String name) { + verifyName(name); + readNull(); + } + + @Override + public ObjectId readObjectId(final String name) { + verifyName(name); + return readObjectId(); + } + + @Override + public BsonRegularExpression readRegularExpression(final String name) { + verifyName(name); + return readRegularExpression(); + } + + @Override + public BsonDbPointer readDBPointer(final String name) { + verifyName(name); + return readDBPointer(); + } + + + @Override + public String readString(final String name) { + verifyName(name); + return readString(); + } + + @Override + public String readSymbol(final String name) { + verifyName(name); + return readSymbol(); + } + + @Override + public BsonTimestamp readTimestamp(final String name) { + verifyName(name); + return readTimestamp(); + } + + @Override + public void readUndefined(final String name) { + verifyName(name); + readUndefined(); + } + + /** + * Throws an InvalidOperationException when the method called is not valid for the current ContextType. + * + * @param methodName The name of the method. + * @param actualContextType The actual ContextType. + * @param validContextTypes The valid ContextTypes. + * @throws BsonInvalidOperationException when the method called is not valid for the current ContextType. + */ + protected void throwInvalidContextType(final String methodName, final BsonContextType actualContextType, + final BsonContextType... validContextTypes) { + String validContextTypesString = StringUtils.join(" or ", validContextTypes); + String message = format("%s can only be called when ContextType is %s, not when ContextType is %s.", + methodName, validContextTypesString, actualContextType); + throw new BsonInvalidOperationException(message); + } + + /** + * Throws an InvalidOperationException when the method called is not valid for the current state. + * + * @param methodName The name of the method. + * @param validStates The valid states. + * @throws BsonInvalidOperationException when the method called is not valid for the current state. + */ + protected void throwInvalidState(final String methodName, final State... validStates) { + String validStatesString = StringUtils.join(" or ", validStates); + String message = format("%s can only be called when State is %s, not when State is %s.", + methodName, validStatesString, state); + throw new BsonInvalidOperationException(message); + } + + /** + * Verifies the current state and BSONType of the reader. + * + * @param methodName The name of the method calling this one. + * @param requiredBsonType The required BSON type. + */ + protected void verifyBSONType(final String methodName, final BsonType requiredBsonType) { + if (state == State.INITIAL || state == State.SCOPE_DOCUMENT || state == State.TYPE) { + readBsonType(); + } + if (state == State.NAME) { + // ignore name + skipName(); + } + if (state != State.VALUE) { + throwInvalidState(methodName, State.VALUE); + } + if (currentBsonType != requiredBsonType) { + throw new BsonInvalidOperationException(format("%s can only be called when CurrentBSONType is %s, " + + "not when CurrentBSONType is %s.", + methodName, requiredBsonType, currentBsonType)); + } + } + + /** + * Verifies the name of the current element. + * + * @param expectedName The expected name. + * @throws BsonSerializationException when the name read is not the expected name + */ + protected void verifyName(final String expectedName) { + readBsonType(); + String actualName = readName(); + if (!actualName.equals(expectedName)) { + throw new BsonSerializationException(format("Expected element name to be '%s', not '%s'.", + expectedName, actualName)); + } + } + + /** + * Ensures any conditions are met before reading commences. Throws exceptions if the conditions are not met. + * + * @param methodName the name of the current method, which will indicate the field being read + * @param type the type of this field + */ + protected void checkPreconditions(final String methodName, final BsonType type) { + if (isClosed()) { + throw new IllegalStateException("BsonReader is closed"); + } + + verifyBSONType(methodName, type); + } + + /** + * Get the context, which will indicate which state the reader is in, for example which part of a document it's currently reading. + * + * @return the context + */ + protected Context getContext() { + return context; + } + + /** + * Set the context, which will indicate which state the reader is in, for example which part of a document it's currently reading. + * + * @param context the current context. + */ + protected void setContext(final Context context) { + this.context = context; + } + + /** + * Returns the next {@code State} to transition to, based on the {@link org.bson.AbstractBsonReader.Context} of this reader. + * + * @return the next state + */ + protected State getNextState() { + switch (context.getContextType()) { + case ARRAY: + case DOCUMENT: + case SCOPE_DOCUMENT: + return State.TYPE; + case TOP_LEVEL: + return State.DONE; + default: + throw new BSONException(format("Unexpected ContextType %s.", context.getContextType())); + } + } + + private void setStateOnEnd() { + switch (getContext().getContextType()) { + case ARRAY: + case DOCUMENT: + setState(State.TYPE); + break; + case TOP_LEVEL: + setState(State.DONE); + break; + default: + throw new BSONException(format("Unexpected ContextType %s.", getContext().getContextType())); + } + } + + /** + * An implementation of {@code BsonReaderMark}. + */ + protected class Mark implements BsonReaderMark { + private final State state; + private final Context parentContext; + private final BsonContextType contextType; + private final BsonType currentBsonType; + private final String currentName; + + /** + * Gets the parent context. + * + * @return the parent context + */ + protected Context getParentContext() { + return parentContext; + } + + /** + * Gets the context type. + * + * @return the context type + */ + protected BsonContextType getContextType() { + return contextType; + } + + /** + * Construct an instance. + */ + protected Mark() { + state = AbstractBsonReader.this.state; + parentContext = AbstractBsonReader.this.context.parentContext; + contextType = AbstractBsonReader.this.context.contextType; + currentBsonType = AbstractBsonReader.this.currentBsonType; + currentName = AbstractBsonReader.this.currentName; + } + + @Override + public void reset() { + AbstractBsonReader.this.state = state; + AbstractBsonReader.this.currentBsonType = currentBsonType; + AbstractBsonReader.this.currentName = currentName; + } + } + + + /** + * The context for the reader. Records the parent context, creating a bread crumb trail to trace back up to the root context of the + * reader. Also records the {@link org.bson.BsonContextType}, indicating whether the reader is reading a document, array, or other + * complex sub-structure. + */ + protected abstract class Context { + + private final Context parentContext; + private final BsonContextType contextType; + + /** + * Creates a new instance. + * + * @param parentContext a possibly null value for the context that came before this one + * @param contextType the type of this context + */ + protected Context(final Context parentContext, final BsonContextType contextType) { + this.parentContext = parentContext; + this.contextType = contextType; + } + + /** + * Returns the parent context. Allows users of this context object to transition to this parent context. + * + * @return the context that came before this one + */ + protected Context getParentContext() { + return parentContext; + } + + /** + * Return the type of this context. + * + * @return the context type. + */ + protected BsonContextType getContextType() { + return contextType; + } + } + + /** + * The state of a reader. Indicates where in a document the reader is. + */ + public enum State { + /** + * The initial state. + */ + INITIAL, + + /** + * The reader is positioned at the type of an element or value. + */ + TYPE, + + /** + * The reader is positioned at the name of an element. + */ + NAME, + + /** + * The reader is positioned at a value. + */ + VALUE, + + /** + * The reader is positioned at a scope document. + */ + SCOPE_DOCUMENT, + + /** + * The reader is positioned at the end of a document. + */ + END_OF_DOCUMENT, + + /** + * The reader is positioned at the end of an array. + */ + END_OF_ARRAY, + + /** + * The reader has finished reading a document. + */ + DONE, + + /** + * The reader is closed. + */ + CLOSED + } +} diff --git a/bson/src/main/org/bson/AbstractBsonWriter.java b/bson/src/main/org/bson/AbstractBsonWriter.java new file mode 100644 index 00000000000..9d571862af0 --- /dev/null +++ b/bson/src/main/org/bson/AbstractBsonWriter.java @@ -0,0 +1,1135 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson; + +import org.bson.types.Decimal128; +import org.bson.types.ObjectId; + +import java.io.Closeable; +import java.util.ArrayDeque; +import java.util.Arrays; +import java.util.Deque; +import java.util.List; +import java.util.Map; + +import static java.lang.String.format; +import static org.bson.assertions.Assertions.notNull; + +/** + * Represents a BSON writer for some external format (see subclasses). + * + * @since 3.0 + */ +public abstract class AbstractBsonWriter implements BsonWriter, Closeable { + private final BsonWriterSettings settings; + private final Deque fieldNameValidatorStack = new ArrayDeque<>(); + private State state; + private Context context; + private int serializationDepth; + private boolean closed; + + /** + * Initializes a new instance of the BsonWriter class. + * + * @param settings The writer settings. + */ + protected AbstractBsonWriter(final BsonWriterSettings settings) { + this(settings, NoOpFieldNameValidator.INSTANCE); + } + + /** + * Initializes a new instance of the BsonWriter class. + * + * @param settings The writer settings. + * @param validator the field name validator + */ + protected AbstractBsonWriter(final BsonWriterSettings settings, final FieldNameValidator validator) { + if (validator == null) { + throw new IllegalArgumentException("Validator can not be null"); + } + this.settings = settings; + fieldNameValidatorStack.push(validator); + state = State.INITIAL; + } + + /** + * The name of the field being written. + * + * @return the name of the field + */ + protected String getName() { + return context.name; + } + + /** + * Returns whether this writer has been closed. + * + * @return true if the {@link #close()} method has been called. + */ + protected boolean isClosed() { + return closed; + } + + /** + * Sets the current state of the writer. The current state determines what sort of actions are valid for this writer at this time. + * + * @param state the state to set this writer to. + */ + protected void setState(final State state) { + this.state = state; + } + + /** + * Gets the current state of this writer. The current state determines what sort of actions are valid for this writer at this time. + * + * @return the current state of the writer. + */ + protected State getState() { + return state; + } + + /** + * Get the context, which will indicate which state the writer is in, for example which part of a document it's currently writing. + * + * @return the current context. + */ + protected Context getContext() { + return context; + } + + /** + * Set the context, which will indicate which state the writer is in, for example which part of a document it's currently writing. + * + * @param context the new context for this writer + */ + protected void setContext(final Context context) { + this.context = context; + } + + /** + * Handles the logic to start writing a document + */ + protected abstract void doWriteStartDocument(); + + /** + * Handles the logic of writing the end of a document + */ + protected abstract void doWriteEndDocument(); + + /** + * Handles the logic to start writing an array + */ + protected abstract void doWriteStartArray(); + + /** + * Handles the logic of writing the end of an array + */ + protected abstract void doWriteEndArray(); + + /** + * Handles the logic of writing a {@code BsonBinary} value + * + * @param value the {@code BsonBinary} value to write + */ + protected abstract void doWriteBinaryData(BsonBinary value); + + + /** + * Handles the logic of writing a boolean value + * + * @param value the {@code boolean} value to write + */ + protected abstract void doWriteBoolean(boolean value); + + /** + * Handles the logic of writing a date time value + * + * @param value the {@code long} value to write + */ + protected abstract void doWriteDateTime(long value); + + /** + * Handles the logic of writing a DbPointer value + * + * @param value the {@code BsonDbPointer} value to write + */ + protected abstract void doWriteDBPointer(BsonDbPointer value); + + /** + * Handles the logic of writing a Double value + * + * @param value the {@code double} value to write + */ + protected abstract void doWriteDouble(double value); + + /** + * Handles the logic of writing an int32 value + * + * @param value the {@code int} value to write + */ + protected abstract void doWriteInt32(int value); + + /** + * Handles the logic of writing an int64 value + * + * @param value the {@code long} value to write + */ + protected abstract void doWriteInt64(long value); + + /** + * Handles the logic of writing a Decimal128 value + * + * @param value the {@code Decimal128} value to write + * @since 3.4 + */ + protected abstract void doWriteDecimal128(Decimal128 value); + + /** + * Handles the logic of writing a JavaScript function + * + * @param value the {@code String} value to write + */ + protected abstract void doWriteJavaScript(String value); + + /** + * Handles the logic of writing a scoped JavaScript function + * + * @param value the {@code boolean} value to write + */ + protected abstract void doWriteJavaScriptWithScope(String value); + + /** + * Handles the logic of writing a Max key + */ + protected abstract void doWriteMaxKey(); + + /** + * Handles the logic of writing a Min key + */ + protected abstract void doWriteMinKey(); + + /** + * Handles the logic of writing a Null value + */ + protected abstract void doWriteNull(); + + /** + * Handles the logic of writing an ObjectId + * + * @param value the {@code ObjectId} value to write + */ + protected abstract void doWriteObjectId(ObjectId value); + + /** + * Handles the logic of writing a regular expression + * + * @param value the {@code BsonRegularExpression} value to write + */ + protected abstract void doWriteRegularExpression(BsonRegularExpression value); + + /** + * Handles the logic of writing a String + * + * @param value the {@code String} value to write + */ + protected abstract void doWriteString(String value); + + /** + * Handles the logic of writing a Symbol + * + * @param value the {@code boolean} value to write + */ + protected abstract void doWriteSymbol(String value); + + /** + * Handles the logic of writing a timestamp + * + * @param value the {@code BsonTimestamp} value to write + */ + protected abstract void doWriteTimestamp(BsonTimestamp value); + + /** + * Handles the logic of writing an Undefined value + */ + protected abstract void doWriteUndefined(); + + @Override + public void writeStartDocument(final String name) { + writeName(name); + writeStartDocument(); + } + + @Override + public void writeStartDocument() { + checkPreconditions("writeStartDocument", State.INITIAL, State.VALUE, State.SCOPE_DOCUMENT, State.DONE); + if (context != null && context.name != null) { + FieldNameValidator validator = fieldNameValidatorStack.peek().getValidatorForField(getName()); + fieldNameValidatorStack.push(validator); + validator.start(); + } + serializationDepth++; + if (serializationDepth > settings.getMaxSerializationDepth()) { + throw new BsonSerializationException("Maximum serialization depth exceeded (does the object being " + + "serialized have a circular reference?)."); + } + + doWriteStartDocument(); + setState(State.NAME); + } + + @Override + public void writeEndDocument() { + checkPreconditions("writeEndDocument", State.NAME); + + BsonContextType contextType = getContext().getContextType(); + if (contextType != BsonContextType.DOCUMENT && contextType != BsonContextType.SCOPE_DOCUMENT) { + throwInvalidContextType("WriteEndDocument", contextType, BsonContextType.DOCUMENT, BsonContextType.SCOPE_DOCUMENT); + } + + if (context.getParentContext() != null && context.getParentContext().name != null) { + fieldNameValidatorStack.pop().end(); + } + serializationDepth--; + + doWriteEndDocument(); + + if (getContext() == null || getContext().getContextType() == BsonContextType.TOP_LEVEL) { + setState(State.DONE); + } else { + setState(getNextState()); + } + } + + @Override + public void writeStartArray(final String name) { + writeName(name); + writeStartArray(); + } + + @Override + public void writeStartArray() { + checkPreconditions("writeStartArray", State.VALUE); + + if (context != null && context.name != null) { + fieldNameValidatorStack.push(fieldNameValidatorStack.peek().getValidatorForField(getName())); + } + serializationDepth++; + if (serializationDepth > settings.getMaxSerializationDepth()) { + throw new BsonSerializationException("Maximum serialization depth exceeded (does the object being " + + "serialized have a circular reference?)."); + } + + doWriteStartArray(); + setState(State.VALUE); + } + + @Override + public void writeEndArray() { + checkPreconditions("writeEndArray", State.VALUE); + + if (getContext().getContextType() != BsonContextType.ARRAY) { + throwInvalidContextType("WriteEndArray", getContext().getContextType(), BsonContextType.ARRAY); + } + + if (context.getParentContext() != null && context.getParentContext().name != null) { + fieldNameValidatorStack.pop(); + } + serializationDepth--; + + doWriteEndArray(); + setState(getNextState()); + } + + @Override + public void writeBinaryData(final String name, final BsonBinary binary) { + notNull("name", name); + notNull("value", binary); + writeName(name); + writeBinaryData(binary); + } + + @Override + public void writeBinaryData(final BsonBinary binary) { + notNull("value", binary); + checkPreconditions("writeBinaryData", State.VALUE, State.INITIAL); + doWriteBinaryData(binary); + setState(getNextState()); + } + + @Override + public void writeBoolean(final String name, final boolean value) { + writeName(name); + writeBoolean(value); + } + + @Override + public void writeBoolean(final boolean value) { + checkPreconditions("writeBoolean", State.VALUE, State.INITIAL); + doWriteBoolean(value); + setState(getNextState()); + } + + @Override + public void writeDateTime(final String name, final long value) { + writeName(name); + writeDateTime(value); + } + + @Override + public void writeDateTime(final long value) { + checkPreconditions("writeDateTime", State.VALUE, State.INITIAL); + doWriteDateTime(value); + setState(getNextState()); + } + + @Override + public void writeDBPointer(final String name, final BsonDbPointer value) { + notNull("name", name); + notNull("value", value); + writeName(name); + writeDBPointer(value); + } + + @Override + public void writeDBPointer(final BsonDbPointer value) { + notNull("value", value); + checkPreconditions("writeDBPointer", State.VALUE, State.INITIAL); + doWriteDBPointer(value); + setState(getNextState()); + } + + @Override + public void writeDouble(final String name, final double value) { + writeName(name); + writeDouble(value); + } + + @Override + public void writeDouble(final double value) { + checkPreconditions("writeDBPointer", State.VALUE, State.INITIAL); + doWriteDouble(value); + setState(getNextState()); + } + + @Override + public void writeInt32(final String name, final int value) { + writeName(name); + writeInt32(value); + } + + @Override + public void writeInt32(final int value) { + checkPreconditions("writeInt32", State.VALUE); + doWriteInt32(value); + setState(getNextState()); + } + + @Override + public void writeInt64(final String name, final long value) { + writeName(name); + writeInt64(value); + } + + @Override + public void writeInt64(final long value) { + checkPreconditions("writeInt64", State.VALUE); + doWriteInt64(value); + setState(getNextState()); + } + + @Override + public void writeDecimal128(final Decimal128 value) { + notNull("value", value); + checkPreconditions("writeInt64", State.VALUE); + doWriteDecimal128(value); + setState(getNextState()); + } + + @Override + public void writeDecimal128(final String name, final Decimal128 value) { + notNull("name", name); + notNull("value", value); + writeName(name); + writeDecimal128(value); + } + + @Override + public void writeJavaScript(final String name, final String code) { + notNull("name", name); + notNull("value", code); + writeName(name); + writeJavaScript(code); + } + + @Override + public void writeJavaScript(final String code) { + notNull("value", code); + checkPreconditions("writeJavaScript", State.VALUE); + doWriteJavaScript(code); + setState(getNextState()); + } + + @Override + public void writeJavaScriptWithScope(final String name, final String code) { + notNull("name", name); + notNull("value", code); + writeName(name); + writeJavaScriptWithScope(code); + } + + @Override + public void writeJavaScriptWithScope(final String code) { + notNull("value", code); + checkPreconditions("writeJavaScriptWithScope", State.VALUE); + doWriteJavaScriptWithScope(code); + setState(State.SCOPE_DOCUMENT); + } + + @Override + public void writeMaxKey(final String name) { + writeName(name); + writeMaxKey(); + } + + @Override + public void writeMaxKey() { + checkPreconditions("writeMaxKey", State.VALUE); + doWriteMaxKey(); + setState(getNextState()); + } + + @Override + public void writeMinKey(final String name) { + writeName(name); + writeMinKey(); + } + + @Override + public void writeMinKey() { + checkPreconditions("writeMinKey", State.VALUE); + doWriteMinKey(); + setState(getNextState()); + } + + @Override + public void writeName(final String name) { + notNull("name", name); + if (state != State.NAME) { + throwInvalidState("WriteName", State.NAME); + } + FieldNameValidator fieldNameValidator = fieldNameValidatorStack.peek(); + if (!fieldNameValidator.validate(name)) { + throw new IllegalArgumentException(fieldNameValidator.getValidationErrorMessage(name)); + } + doWriteName(name); + context.name = name; + state = State.VALUE; + } + + /** + * Handles the logic of writing the element name. + * + * @param name the name of the element + * @since 3.5 + */ + protected void doWriteName(final String name) { + } + + @Override + public void writeNull(final String name) { + writeName(name); + writeNull(); + } + + @Override + public void writeNull() { + checkPreconditions("writeNull", State.VALUE); + doWriteNull(); + setState(getNextState()); + } + + @Override + public void writeObjectId(final String name, final ObjectId objectId) { + notNull("name", name); + notNull("value", objectId); + writeName(name); + writeObjectId(objectId); + } + + @Override + public void writeObjectId(final ObjectId objectId) { + notNull("value", objectId); + checkPreconditions("writeObjectId", State.VALUE); + doWriteObjectId(objectId); + setState(getNextState()); + } + + @Override + public void writeRegularExpression(final String name, final BsonRegularExpression regularExpression) { + notNull("name", name); + notNull("value", regularExpression); + writeName(name); + writeRegularExpression(regularExpression); + } + + @Override + public void writeRegularExpression(final BsonRegularExpression regularExpression) { + notNull("value", regularExpression); + checkPreconditions("writeRegularExpression", State.VALUE); + doWriteRegularExpression(regularExpression); + setState(getNextState()); + } + + @Override + public void writeString(final String name, final String value) { + notNull("name", name); + notNull("value", value); + writeName(name); + writeString(value); + } + + @Override + public void writeString(final String value) { + notNull("value", value); + checkPreconditions("writeString", State.VALUE); + doWriteString(value); + setState(getNextState()); + + } + + @Override + public void writeSymbol(final String name, final String value) { + notNull("name", name); + notNull("value", value); + writeName(name); + writeSymbol(value); + } + + @Override + public void writeSymbol(final String value) { + notNull("value", value); + checkPreconditions("writeSymbol", State.VALUE); + doWriteSymbol(value); + setState(getNextState()); + } + + @Override + public void writeTimestamp(final String name, final BsonTimestamp value) { + notNull("name", name); + notNull("value", value); + writeName(name); + writeTimestamp(value); + } + + @Override + public void writeTimestamp(final BsonTimestamp value) { + notNull("value", value); + checkPreconditions("writeTimestamp", State.VALUE); + doWriteTimestamp(value); + setState(getNextState()); + } + + @Override + public void writeUndefined(final String name) { + writeName(name); + writeUndefined(); + } + + @Override + public void writeUndefined() { + checkPreconditions("writeUndefined", State.VALUE); + doWriteUndefined(); + setState(getNextState()); + } + + /** + * Returns the next valid state for this writer. For example, transitions from {@link State#VALUE} to {@link State#NAME} once a value + * is written. + * + * @return the next {@code State} + */ + protected State getNextState() { + if (getContext().getContextType() == BsonContextType.ARRAY) { + return State.VALUE; + } else { + return State.NAME; + } + } + + /** + * Checks if this writer's current state is in the list of given states. + * + * @param validStates an array of {@code State}s to compare this writer's state to. + * @return true if this writer's state is in the given list. + */ + protected boolean checkState(final State[] validStates) { + for (final State cur : validStates) { + if (cur == getState()) { + return true; + } + } + return false; + } + + /** + * Checks the writer is in the correct state. If the writer's current state is in the list of given states, this method will complete + * without exception. Throws an {@link java.lang.IllegalStateException} if the writer is closed. Throws BsonInvalidOperationException + * if the method is trying to do something that is not permitted in the current state. + * + * @param methodName the name of the method being performed that checks are being performed for + * @param validStates the list of valid states for this operation + * @see #throwInvalidState(String, org.bson.AbstractBsonWriter.State...) + */ + protected void checkPreconditions(final String methodName, final State... validStates) { + if (isClosed()) { + throw new IllegalStateException("BsonWriter is closed"); + } + + if (!checkState(validStates)) { + throwInvalidState(methodName, validStates); + } + } + + /** + * Throws an InvalidOperationException when the method called is not valid for the current ContextType. + * + * @param methodName The name of the method. + * @param actualContextType The actual ContextType. + * @param validContextTypes The valid ContextTypes. + * @throws BsonInvalidOperationException when the method called is not valid for the current ContextType. + */ + protected void throwInvalidContextType(final String methodName, final BsonContextType actualContextType, + final BsonContextType... validContextTypes) { + String validContextTypesString = StringUtils.join(" or ", validContextTypes); + throw new BsonInvalidOperationException(format("%s can only be called when ContextType is %s, " + + "not when ContextType is %s.", + methodName, validContextTypesString, actualContextType)); + } + + /** + * Throws a {@link BsonInvalidOperationException} when the method called is not valid for the current state. + * + * @param methodName The name of the method. + * @param validStates The valid states. + * @throws BsonInvalidOperationException when the method called is not valid for the current state. + */ + protected void throwInvalidState(final String methodName, final State... validStates) { + if (state == State.INITIAL || state == State.SCOPE_DOCUMENT || state == State.DONE) { + if (!methodName.startsWith("end") && !methodName.equals("writeName")) { // NOPMD + //NOPMD collapsing these if statements will not aid readability + String typeName = methodName.substring(5); + if (typeName.startsWith("start")) { + typeName = typeName.substring(5); + } + String article = "A"; + if (Arrays.asList('A', 'E', 'I', 'O', 'U').contains(typeName.charAt(0))) { + article = "An"; + } + throw new BsonInvalidOperationException(format("%s %s value cannot be written to the root level of a BSON document.", + article, typeName)); + } + } + + String validStatesString = StringUtils.join(" or ", validStates); + throw new BsonInvalidOperationException(format("%s can only be called when State is %s, not when State is %s", + methodName, validStatesString, state)); + } + + /** + * {@inheritDoc} + *

+ * The {@link #flush()} method of {@link AbstractBsonWriter} does nothing.

+ */ + @Override + public void flush() { + } + + @Override + public void close() { + closed = true; + } + + @Override + public void pipe(final BsonReader reader) { + notNull("reader", reader); + pipeDocument(reader, null); + } + + /** + * Reads a single document from the given BsonReader and writes it to this, appending the given extra elements to the document. + * + * @param reader the source of the document + * @param extraElements the extra elements to append to the document + * @since 3.6 + */ + public void pipe(final BsonReader reader, final List extraElements) { + notNull("reader", reader); + notNull("extraElements", extraElements); + pipeDocument(reader, extraElements); + } + + /** + * Pipe a list of extra element to this writer + * + * @param extraElements the extra elements + */ + protected void pipeExtraElements(final List extraElements) { + notNull("extraElements", extraElements); + for (BsonElement cur : extraElements) { + writeName(cur.getName()); + pipeValue(cur.getValue()); + } + } + + /** + * Return true if the current execution of the pipe method should be aborted. + * + * @return true if the current execution of the pipe method should be aborted. + * + * @since 3.7 + */ + protected boolean abortPipe() { + return false; + } + + private void pipeDocument(final BsonReader reader, final List extraElements) { + reader.readStartDocument(); + writeStartDocument(); + while (reader.readBsonType() != BsonType.END_OF_DOCUMENT) { + writeName(reader.readName()); + pipeValue(reader); + if (abortPipe()) { + return; + } + } + reader.readEndDocument(); + if (extraElements != null) { + pipeExtraElements(extraElements); + } + writeEndDocument(); + } + + private void pipeJavascriptWithScope(final BsonReader reader) { + writeJavaScriptWithScope(reader.readJavaScriptWithScope()); + pipeDocument(reader, null); + } + + private void pipeValue(final BsonReader reader) { + switch (reader.getCurrentBsonType()) { + case DOCUMENT: + pipeDocument(reader, null); + break; + case ARRAY: + pipeArray(reader); + break; + case DOUBLE: + writeDouble(reader.readDouble()); + break; + case STRING: + writeString(reader.readString()); + break; + case BINARY: + writeBinaryData(reader.readBinaryData()); + break; + case UNDEFINED: + reader.readUndefined(); + writeUndefined(); + break; + case OBJECT_ID: + writeObjectId(reader.readObjectId()); + break; + case BOOLEAN: + writeBoolean(reader.readBoolean()); + break; + case DATE_TIME: + writeDateTime(reader.readDateTime()); + break; + case NULL: + reader.readNull(); + writeNull(); + break; + case REGULAR_EXPRESSION: + writeRegularExpression(reader.readRegularExpression()); + break; + case JAVASCRIPT: + writeJavaScript(reader.readJavaScript()); + break; + case SYMBOL: + writeSymbol(reader.readSymbol()); + break; + case JAVASCRIPT_WITH_SCOPE: + pipeJavascriptWithScope(reader); + break; + case INT32: + writeInt32(reader.readInt32()); + break; + case TIMESTAMP: + writeTimestamp(reader.readTimestamp()); + break; + case INT64: + writeInt64(reader.readInt64()); + break; + case DECIMAL128: + writeDecimal128(reader.readDecimal128()); + break; + case MIN_KEY: + reader.readMinKey(); + writeMinKey(); + break; + case DB_POINTER: + writeDBPointer(reader.readDBPointer()); + break; + case MAX_KEY: + reader.readMaxKey(); + writeMaxKey(); + break; + default: + throw new IllegalArgumentException("unhandled BSON type: " + reader.getCurrentBsonType()); + } + } + + private void pipeDocument(final BsonDocument value) { + writeStartDocument(); + for (Map.Entry cur : value.entrySet()) { + writeName(cur.getKey()); + pipeValue(cur.getValue()); + } + writeEndDocument(); + } + + private void pipeArray(final BsonReader reader) { + reader.readStartArray(); + writeStartArray(); + while (reader.readBsonType() != BsonType.END_OF_DOCUMENT) { + pipeValue(reader); + if (abortPipe()) { + return; + } + } + reader.readEndArray(); + writeEndArray(); + } + + private void pipeArray(final BsonArray array) { + writeStartArray(); + for (BsonValue cur : array) { + pipeValue(cur); + } + writeEndArray(); + } + + private void pipeJavascriptWithScope(final BsonJavaScriptWithScope javaScriptWithScope) { + writeJavaScriptWithScope(javaScriptWithScope.getCode()); + pipeDocument(javaScriptWithScope.getScope()); + } + + private void pipeValue(final BsonValue value) { + switch (value.getBsonType()) { + case DOCUMENT: + pipeDocument(value.asDocument()); + break; + case ARRAY: + pipeArray(value.asArray()); + break; + case DOUBLE: + writeDouble(value.asDouble().getValue()); + break; + case STRING: + writeString(value.asString().getValue()); + break; + case BINARY: + writeBinaryData(value.asBinary()); + break; + case UNDEFINED: + writeUndefined(); + break; + case OBJECT_ID: + writeObjectId(value.asObjectId().getValue()); + break; + case BOOLEAN: + writeBoolean(value.asBoolean().getValue()); + break; + case DATE_TIME: + writeDateTime(value.asDateTime().getValue()); + break; + case NULL: + writeNull(); + break; + case REGULAR_EXPRESSION: + writeRegularExpression(value.asRegularExpression()); + break; + case JAVASCRIPT: + writeJavaScript(value.asJavaScript().getCode()); + break; + case SYMBOL: + writeSymbol(value.asSymbol().getSymbol()); + break; + case JAVASCRIPT_WITH_SCOPE: + pipeJavascriptWithScope(value.asJavaScriptWithScope()); + break; + case INT32: + writeInt32(value.asInt32().getValue()); + break; + case TIMESTAMP: + writeTimestamp(value.asTimestamp()); + break; + case INT64: + writeInt64(value.asInt64().getValue()); + break; + case DECIMAL128: + writeDecimal128(value.asDecimal128().getValue()); + break; + case MIN_KEY: + writeMinKey(); + break; + case DB_POINTER: + writeDBPointer(value.asDBPointer()); + break; + case MAX_KEY: + writeMaxKey(); + break; + default: + throw new IllegalArgumentException("unhandled BSON type: " + value.getBsonType()); + } + } + + /** + * The state of a writer. Indicates where in a document the writer is. + */ + public enum State { + /** + * The initial state. + */ + INITIAL, + + /** + * The writer is positioned to write a name. + */ + NAME, + + /** + * The writer is positioned to write a value. + */ + VALUE, + + /** + * The writer is positioned to write a scope document (call WriteStartDocument to start writing the scope document). + */ + SCOPE_DOCUMENT, + + /** + * The writer is done. + */ + DONE, + + /** + * The writer is closed. + */ + CLOSED + } + + /** + * The context for the writer. Records the parent context, creating a bread crumb trail to trace back up to the root context of the + * reader. Also records the {@link org.bson.BsonContextType}, indicating whether the writer is reading a document, array, or other + * complex sub-structure. + */ + public class Context { + private final Context parentContext; + private final BsonContextType contextType; + private String name; + + /** + * Creates a new instance, copying values from an existing context. + * + * @param from the {@code Context} to copy values from + */ + public Context(final Context from) { + parentContext = from.parentContext; + contextType = from.contextType; + } + + /** + * Creates a new instance. + * + * @param parentContext the context of the parent node + * @param contextType the context type. + */ + public Context(final Context parentContext, final BsonContextType contextType) { + this.parentContext = parentContext; + this.contextType = contextType; + } + + /** + * Returns the parent context. Allows users of this context object to transition to this parent context. + * + * @return the context that came before this one + */ + public Context getParentContext() { + return parentContext; + } + + /** + * Gets the current context type. + * + * @return the current context type. + */ + public BsonContextType getContextType() { + return contextType; + } + + /** + * Copies the values from this {@code Context} into a new instance. + * + * @return the new instance with the same values as this context. + */ + public Context copy() { + return new Context(this); + } + } + + /** + * Capture the current state of this writer - its {@link org.bson.AbstractBsonWriter.Context}, {@link + * org.bson.AbstractBsonWriter.State}, field name and depth. + */ + protected class Mark { + private final Context markedContext; + private final State markedState; + private final String currentName; + private final int serializationDepth; + + /** + * Creates a new snapshopt of the current state. + */ + protected Mark() { + this.markedContext = AbstractBsonWriter.this.context.copy(); + this.markedState = AbstractBsonWriter.this.state; + this.currentName = AbstractBsonWriter.this.context.name; + this.serializationDepth = AbstractBsonWriter.this.serializationDepth; + } + + /** + * Resets the {@code AbstractBsonWriter} instance that contains this {@code Mark} to the state the writer was in when the Mark was + * created. + */ + protected void reset() { + setContext(markedContext); + setState(markedState); + AbstractBsonWriter.this.context.name = currentName; + AbstractBsonWriter.this.serializationDepth = serializationDepth; + } + } +} diff --git a/bson/src/main/org/bson/BSON.java b/bson/src/main/org/bson/BSON.java new file mode 100644 index 00000000000..2496bbc2348 --- /dev/null +++ b/bson/src/main/org/bson/BSON.java @@ -0,0 +1,114 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson; + +import java.util.regex.Pattern; + +/** + * Contains byte representations of all the BSON types (see the BSON Specification). Also + * supports the registration of encoding and decoding hooks to transform BSON types during encoding or decoding. + * + * @see org.bson.Transformer + */ +class BSON { + + + static final byte B_GENERAL = 0; + static final byte B_BINARY = 2; + + // --- regex flags + + private static final int FLAG_GLOBAL = 256; + + private static final int[] FLAG_LOOKUP = new int[Character.MAX_VALUE]; + + static { + FLAG_LOOKUP['g'] = FLAG_GLOBAL; + FLAG_LOOKUP['i'] = Pattern.CASE_INSENSITIVE; + FLAG_LOOKUP['m'] = Pattern.MULTILINE; + FLAG_LOOKUP['s'] = Pattern.DOTALL; + FLAG_LOOKUP['c'] = Pattern.CANON_EQ; + FLAG_LOOKUP['x'] = Pattern.COMMENTS; + FLAG_LOOKUP['d'] = Pattern.UNIX_LINES; + FLAG_LOOKUP['t'] = Pattern.LITERAL; + FLAG_LOOKUP['u'] = Pattern.UNICODE_CASE; + } + + /** + * Converts a sequence of regular expression modifiers from the database into Java regular expression flags. + * + * @param s regular expression modifiers + * @return the Java flags + * @throws IllegalArgumentException If sequence contains invalid flags. + */ + static int regexFlags(final String s) { + int flags = 0; + + if (s == null) { + return flags; + } + + for (final char f : s.toLowerCase().toCharArray()) { + flags |= regexFlag(f); + } + + return flags; + } + + /** + * Converts a regular expression modifier from the database into Java regular expression flags. + * + * @param c regular expression modifier + * @return the Java flags + * @throws IllegalArgumentException If sequence contains invalid flags. + */ + private static int regexFlag(final char c) { + + int flag = FLAG_LOOKUP[c]; + + if (flag == 0) { + throw new IllegalArgumentException(String.format("Unrecognized flag [%c]", c)); + } + + return flag; + } + + /** + * Converts Java regular expression flags into regular expression modifiers from the database. + * + * @param flags the Java flags + * @return the Java flags + * @throws IllegalArgumentException if some flags couldn't be recognized. + */ + static String regexFlags(final int flags) { + int processedFlags = flags; + StringBuilder buf = new StringBuilder(); + + for (int i = 0; i < FLAG_LOOKUP.length; i++) { + if ((processedFlags & FLAG_LOOKUP[i]) > 0) { + buf.append((char) i); + processedFlags -= FLAG_LOOKUP[i]; + } + } + + if (processedFlags > 0) { + throw new IllegalArgumentException("Some flags could not be recognized."); + } + + return buf.toString(); + } +} diff --git a/bson/src/main/org/bson/BSONCallback.java b/bson/src/main/org/bson/BSONCallback.java new file mode 100644 index 00000000000..007c34265a7 --- /dev/null +++ b/bson/src/main/org/bson/BSONCallback.java @@ -0,0 +1,262 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson; + +import org.bson.types.Decimal128; +import org.bson.types.ObjectId; + +/** + * A callback interface for describing the structure of a BSON document. Implementations of this define how to turn BSON read from MongoDB + * into Java objects. + *

+ * See the BSON Spec. + */ +public interface BSONCallback { + + /** + * Signals the start of a BSON document, which usually maps onto some Java object. + * + * @mongodb.driver.manual core/document/ MongoDB Documents + */ + void objectStart(); + + /** + * Signals the start of a BSON document, which usually maps onto some Java object. + * + * @param name the field name of the document. + * @mongodb.driver.manual core/document/ MongoDB Documents + */ + void objectStart(String name); + + /** + * Called at the end of the document/array, and returns this object. + * + * @return the Object that has been read from this section of the document. + */ + Object objectDone(); + + /** + * Resets the callback, clearing all state. + */ + void reset(); + + /** + * Returns the finished top-level Document. + * + * @return the top level document read from the database. + */ + Object get(); + + /** + * Factory method for BSONCallbacks. + * + * @return a new BSONCallback. + */ + BSONCallback createBSONCallback(); + + /** + * Signals the start of a BSON array. + * + * @mongodb.driver.manual tutorial/query-documents/#read-operations-arrays Arrays + */ + void arrayStart(); + + /** + * Signals the start of a BSON array, with its field name. + * + * @param name the name of this array field + * @mongodb.driver.manual tutorial/query-documents/#read-operations-arrays Arrays + */ + void arrayStart(String name); + + /** + * Called the end of the array, and returns the completed array. + * + * @return an Object representing the array that has been read from this section of the document. + */ + Object arrayDone(); + + /** + * Called when reading a BSON field that exists but has a null value. + * + * @param name the name of the field + * @see org.bson.BsonType#NULL + */ + void gotNull(String name); + + /** + * Called when reading a field with a {@link org.bson.BsonType#UNDEFINED} value. + * + * @param name the name of the field + * @see org.bson.BsonType#UNDEFINED + */ + void gotUndefined(String name); + + /** + * Called when reading a field with a {@link org.bson.BsonType#MIN_KEY} value. + * + * @param name the name of the field + */ + void gotMinKey(String name); + + /** + * Called when reading a field with a {@link org.bson.BsonType#MAX_KEY} value. + * + * @param name the name of the field + */ + void gotMaxKey(String name); + + /** + * Called when reading a field with a {@link org.bson.BsonType#BOOLEAN} value. + * + * @param name the name of the field + * @param value the field's value + */ + void gotBoolean(String name, boolean value); + + /** + * Called when reading a field with a {@link org.bson.BsonType#DOUBLE} value. + * + * @param name the name of the field + * @param value the field's value + */ + void gotDouble(String name, double value); + + /** + * Called when reading a field with a {@link org.bson.BsonType#DECIMAL128} value. + * + * @param name the field name + * @param value the Decimal128 field value + * @since 3.4 + * @mongodb.server.release 3.4 + */ + void gotDecimal128(String name, Decimal128 value); + + /** + * Called when reading a field with a {@link org.bson.BsonType#INT32} value. + * + * @param name the name of the field + * @param value the field's value + */ + void gotInt(String name, int value); + + /** + * Called when reading a field with a {@link org.bson.BsonType#INT64} value. + * + * @param name the name of the field + * @param value the field's value + */ + void gotLong(String name, long value); + + /** + * Called when reading a field with a {@link org.bson.BsonType#DATE_TIME} value. + * + * @param name the name of the field + * @param millis the date and time in milliseconds + */ + void gotDate(String name, long millis); + + /** + * Called when reading a field with a {@link org.bson.BsonType#STRING} value. + * + * @param name the name of the field + * @param value the field's value + */ + void gotString(String name, String value); + + /** + * Called when reading a field with a {@link org.bson.BsonType#SYMBOL} value. + * + * @param name the name of the field + * @param value the field's value + */ + void gotSymbol(String name, String value); + + /** + * Called when reading a field with a {@link org.bson.BsonType#REGULAR_EXPRESSION} value. + * + * @param name the name of the field + * @param pattern the regex pattern + * @param flags the optional flags for the regular expression + * @mongodb.driver.manual reference/operator/query/regex/ $regex + */ + void gotRegex(String name, String pattern, String flags); + + /** + * Called when reading a field with a {@link org.bson.BsonType#TIMESTAMP} value. + * + * @param name the name of the field + * @param time the time in seconds since epoch + * @param increment an incrementing ordinal for operations within a given second + * @mongodb.driver.manual reference/bson-types/#timestamps Timestamps + */ + void gotTimestamp(String name, int time, int increment); + + /** + * Called when reading a field with a {@link org.bson.BsonType#OBJECT_ID} value. + * + * @param name the name of the field + * @param id the object ID + */ + void gotObjectId(String name, ObjectId id); + + /** + * Invoked when {@link org.bson.BSONDecoder} encountered a {@link org.bson.BsonType#DB_POINTER} type field in a byte sequence. + * + * @param name the name of the field + * @param namespace the namespace to which reference is pointing to + * @param id the if of the object to which reference is pointing to + */ + void gotDBRef(String name, String namespace, ObjectId id); + + /** + * Called when reading a field with a {@link org.bson.BsonType#BINARY} value. Note that binary values have a subtype, which may + * determine how the value is processed. + * + * @param name the name of the field + * @param type one of the binary subtypes: {@link org.bson.BsonBinarySubType} + * @param data the field's value + */ + void gotBinary(String name, byte type, byte[] data); + + /** + * Called when reading a field with a {@link java.util.UUID} value. This is a binary value of subtype + * {@link org.bson.BsonBinarySubType#UUID_LEGACY} + * + * @param name the name of the field + * @param part1 the first part of the UUID + * @param part2 the second part of the UUID + */ + void gotUUID(String name, long part1, long part2); + + /** + * Called when reading a field with a {@link org.bson.BsonType#JAVASCRIPT} value. + * + * @param name the name of the field + * @param code the JavaScript code + */ + void gotCode(String name, String code); + + /** + * Called when reading a field with a {@link org.bson.BsonType#JAVASCRIPT_WITH_SCOPE} value. + * + * @param name the name of the field + * @param code the JavaScript code + * @param scope a document representing the scope for the code + */ + void gotCodeWScope(String name, String code, Object scope); +} diff --git a/bson/src/main/org/bson/BSONCallbackAdapter.java b/bson/src/main/org/bson/BSONCallbackAdapter.java new file mode 100644 index 00000000000..1d8b5ffe746 --- /dev/null +++ b/bson/src/main/org/bson/BSONCallbackAdapter.java @@ -0,0 +1,224 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson; + +import org.bson.internal.UuidHelper; +import org.bson.types.Decimal128; +import org.bson.types.ObjectId; + +import java.util.UUID; + +import static org.bson.BasicBSONDecoder.getDefaultUuidRepresentation; + +class BSONCallbackAdapter extends AbstractBsonWriter { + + private BSONCallback bsonCallback; + + /** + * Initializes a new instance of the BsonWriter class. + * + * @param settings The writer settings. + * @param bsonCallback The callback to inform of operations on this writer + */ + protected BSONCallbackAdapter(final BsonWriterSettings settings, final BSONCallback bsonCallback) { + super(settings); + this.bsonCallback = bsonCallback; + } + + @Override + public void doWriteStartDocument() { + BsonContextType contextType = getState() == State.SCOPE_DOCUMENT + ? BsonContextType.SCOPE_DOCUMENT + : BsonContextType.DOCUMENT; + + if (getContext() == null || contextType == BsonContextType.SCOPE_DOCUMENT) { + bsonCallback.objectStart(); + } else { + bsonCallback.objectStart(getName()); + } + setContext(new Context(getContext(), contextType)); + } + + @Override + protected void doWriteEndDocument() { + BsonContextType contextType = getContext().getContextType(); + + setContext(getContext().getParentContext()); + bsonCallback.objectDone(); + + if (contextType == BsonContextType.SCOPE_DOCUMENT) { + Object scope = bsonCallback.get(); + bsonCallback = getContext().callback; + bsonCallback.gotCodeWScope(getContext().name, getContext().code, scope); + } + } + + @Override + public void doWriteStartArray() { + bsonCallback.arrayStart(getName()); + setContext(new Context(getContext(), BsonContextType.ARRAY)); + } + + @Override + protected void doWriteEndArray() { + setContext(getContext().getParentContext()); + bsonCallback.arrayDone(); + } + + @Override + protected void doWriteBinaryData(final BsonBinary value) { + if (BsonBinarySubType.isUuid(value.getType())) { + doWriteUuid(value); + } else { + bsonCallback.gotBinary(getName(), value.getType(), value.getData()); + } + } + + private void doWriteUuid(final BsonBinary value) { + UuidRepresentation defaultUuidRepresentation = getDefaultUuidRepresentation(); + if (value.getType() == defaultUuidRepresentation.getSubtype().getValue()) { + UUID uuid = UuidHelper.decodeBinaryToUuid(value.getData(), value.getType(), defaultUuidRepresentation); + bsonCallback.gotUUID(getName(), uuid.getMostSignificantBits(), uuid.getLeastSignificantBits()); + } else { + bsonCallback.gotBinary(getName(), value.getType(), value.getData()); + } + } + + @Override + public void doWriteBoolean(final boolean value) { + bsonCallback.gotBoolean(getName(), value); + setState(getNextState()); + } + + @Override + protected void doWriteDateTime(final long value) { + bsonCallback.gotDate(getName(), value); + } + + @Override + protected void doWriteDBPointer(final BsonDbPointer value) { + bsonCallback.gotDBRef(getName(), value.getNamespace(), value.getId()); + } + + @Override + protected void doWriteDouble(final double value) { + bsonCallback.gotDouble(getName(), value); + } + + @Override + protected void doWriteInt32(final int value) { + bsonCallback.gotInt(getName(), value); + } + + @Override + protected void doWriteInt64(final long value) { + bsonCallback.gotLong(getName(), value); + } + + @Override + protected void doWriteDecimal128(final Decimal128 value) { + bsonCallback.gotDecimal128(getName(), value); + } + + @Override + protected void doWriteJavaScript(final String value) { + bsonCallback.gotCode(getName(), value); + } + + @Override + protected void doWriteJavaScriptWithScope(final String value) { + getContext().callback = bsonCallback; + getContext().code = value; + getContext().name = getName(); + this.bsonCallback = bsonCallback.createBSONCallback(); + } + + @Override + protected void doWriteMaxKey() { + bsonCallback.gotMaxKey(getName()); + } + + @Override + protected void doWriteMinKey() { + bsonCallback.gotMinKey(getName()); + } + + @Override + public void doWriteNull() { + bsonCallback.gotNull(getName()); + } + + @Override + public void doWriteObjectId(final ObjectId value) { + bsonCallback.gotObjectId(getName(), value); + } + + @Override + public void doWriteRegularExpression(final BsonRegularExpression value) { + bsonCallback.gotRegex(getName(), value.getPattern(), value.getOptions()); + } + + @Override + public void doWriteString(final String value) { + bsonCallback.gotString(getName(), value); + } + + @Override + public void doWriteSymbol(final String value) { + bsonCallback.gotSymbol(getName(), value); + } + + @Override + public void doWriteTimestamp(final BsonTimestamp value) { + bsonCallback.gotTimestamp(getName(), value.getTime(), value.getInc()); + } + + @Override + public void doWriteUndefined() { + bsonCallback.gotUndefined(getName()); + } + + @Override + protected Context getContext() { + return (Context) super.getContext(); + } + + @Override + protected String getName() { + if (getContext().getContextType() == BsonContextType.ARRAY) { + return Integer.toString(getContext().index++); + } else { + return super.getName(); + } + } + + public class Context extends AbstractBsonWriter.Context { + private int index; // used when contextType is an array + private BSONCallback callback; + private String code; + private String name; + + Context(final Context parentContext, final BsonContextType contextType) { + super(parentContext, contextType); + } + + @Override + public Context getParentContext() { + return (Context) super.getParentContext(); + } + } +} diff --git a/bson/src/main/org/bson/BSONDecoder.java b/bson/src/main/org/bson/BSONDecoder.java new file mode 100644 index 00000000000..9b7e7dd326d --- /dev/null +++ b/bson/src/main/org/bson/BSONDecoder.java @@ -0,0 +1,62 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson; + +import java.io.IOException; +import java.io.InputStream; + +/** + * An interface for decoders of BSON documents. + */ +public interface BSONDecoder { + + /** + * Read a single BSON object from the given bytes. + * + * @param bytes the bytes in BSON format + * @return the BSON object for the given bytes + */ + BSONObject readObject(byte[] bytes); + + /** + * Read a single BSON object from the given input stream. + * + * @param in the input stream in BSON format + * @return the BSON object for the given bytes + * @throws java.io.IOException if there's a problem reading the object from the {@code InputStream} + */ + BSONObject readObject(InputStream in) throws IOException; + + /** + * Decode a single BSON object into the given callback from the given byte array. + * + * @param bytes the bytes in BSON format + * @param callback the callback + * @return the number of bytes in the BSON object + */ + int decode(byte[] bytes, BSONCallback callback); + + /** + * Decode a single BSON object into the given callback from the given input stream. + * + * @param in the input stream in BSON format + * @param callback the callback + * @return the number of bytes read from the input stream + * @throws java.io.IOException if there's a problem reading from the {@code InputStream} + */ + int decode(InputStream in, BSONCallback callback) throws IOException; +} diff --git a/bson/src/main/org/bson/BSONEncoder.java b/bson/src/main/org/bson/BSONEncoder.java new file mode 100644 index 00000000000..6974147b845 --- /dev/null +++ b/bson/src/main/org/bson/BSONEncoder.java @@ -0,0 +1,64 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson; + +import org.bson.io.OutputBuffer; + +/** + *

A {@code BSONEncoder} is a class which can be used to turn documents into byte arrays. The {@code BSONEncoder} walks down through the + * object graph and writes corresponding {@code byte} sequences into underlying {@code OutputBuffer}.

+ * + *

This class is a part of legacy API. Please check {@link org.bson.codecs.Encoder} for a new one.

+ */ +public interface BSONEncoder { + + /** + * Encode a document into byte array. + * This is a shortcut method which creates a new {@link OutputBuffer}, + * invokes the other 3 methods in a corresponding sequence: + *
    + *
  • {@link #set(org.bson.io.OutputBuffer)}
  • + *
  • {@link #putObject(BSONObject)}
  • + *
  • {@link #done()}
  • + *
+ * and returns the contents of the {@code OutputBuffer}. + * + * @param document the document to be encoded + * @return a byte sequence + */ + byte[] encode(BSONObject document); + + /** + * Encoder and write a document into underlying buffer. + * + * @param document the document to be encoded + * @return number of bytes written + */ + int putObject(BSONObject document); + + /** + * Free the resources. + */ + void done(); + + /** + * Sets the buffer to wrich the result of encoding will be written. + * + * @param buffer the buffer to be used to write a byte sequences to + */ + void set(OutputBuffer buffer); +} diff --git a/bson/src/main/org/bson/BSONException.java b/bson/src/main/org/bson/BSONException.java new file mode 100644 index 00000000000..6b53a6c8bd0 --- /dev/null +++ b/bson/src/main/org/bson/BSONException.java @@ -0,0 +1,81 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson; + +/** + * A general runtime exception raised in BSON processing. + * @serial exclude + */ +public class BSONException extends RuntimeException { + + private static final long serialVersionUID = -4415279469780082174L; + + private Integer errorCode = null; + + /** + * @param msg The error message. + */ + public BSONException(final String msg) { + super(msg); + } + + /** + * @param errorCode The error code. + * @param msg The error message. + */ + public BSONException(final int errorCode, final String msg) { + super(msg); + this.errorCode = errorCode; + } + + /** + * @param msg The error message. + * @param t The throwable cause. + */ + public BSONException(final String msg, final Throwable t) { + super(msg, t); + } + + /** + * @param errorCode The error code. + * @param msg The error message. + * @param t The throwable cause. + */ + public BSONException(final int errorCode, final String msg, final Throwable t) { + super(msg, t); + this.errorCode = errorCode; + } + + /** + * Returns the error code. + * + * @return The error code. + */ + public Integer getErrorCode() { + return errorCode; + } + + /** + * Returns if the error code is set (i.e., not null). + * + * @return true if the error code is not null. + */ + public boolean hasErrorCode() { + return (errorCode != null); + } +} + diff --git a/bson/src/main/org/bson/BSONObject.java b/bson/src/main/org/bson/BSONObject.java new file mode 100644 index 00000000000..55863736793 --- /dev/null +++ b/bson/src/main/org/bson/BSONObject.java @@ -0,0 +1,90 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson; + +import java.util.Map; +import java.util.Set; + +/** + * A key-value map that can be saved to the database. + */ +@SuppressWarnings("rawtypes") +public interface BSONObject { + + /** + * Sets a name/value pair in this object. + * + * @param key Name to set + * @param v Corresponding value + * @return the previous value associated with {@code key}, or {@code null} if there was no mapping for {@code key}. (A + * {@code null} return can also indicate that the map previously associated {@code null} with {@code key}.) + */ + Object put(String key, Object v); + + /** + * Sets all key/value pairs from an object into this object + * + * @param o the object + */ + void putAll(BSONObject o); + + /** + * Sets all key/value pairs from a map into this object + * + * @param m the map + */ + void putAll(Map m); + + /** + * Gets a field from this object by a given name. + * + * @param key The name of the field fetch + * @return The field, if found + */ + Object get(String key); + + /** + * Returns a map representing this BSONObject. + * + * @return the map + */ + Map toMap(); + + /** + * Removes a field with a given name from this object. + * + * @param key The name of the field to remove + * @return The value removed from this object + */ + Object removeField(String key); + + /** + * Checks if this object contains a field with the given name. + * + * @param s Field name for which to check + * @return True if the field is present + */ + boolean containsField(String s); + + /** + * Returns this object's fields' names + * + * @return The names of the fields in this object + */ + Set keySet(); +} + diff --git a/bson/src/main/org/bson/BasicBSONCallback.java b/bson/src/main/org/bson/BasicBSONCallback.java new file mode 100644 index 00000000000..9990749a66f --- /dev/null +++ b/bson/src/main/org/bson/BasicBSONCallback.java @@ -0,0 +1,297 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// BasicBSONCallback.java + +package org.bson; + +import org.bson.types.BSONTimestamp; +import org.bson.types.BasicBSONList; +import org.bson.types.Binary; +import org.bson.types.Code; +import org.bson.types.CodeWScope; +import org.bson.types.Decimal128; +import org.bson.types.MaxKey; +import org.bson.types.MinKey; +import org.bson.types.ObjectId; + +import java.util.Date; +import java.util.LinkedList; +import java.util.List; +import java.util.UUID; +import java.util.regex.Pattern; + +/** + * An implementation of {@code BsonCallback} that creates an instance of BSONObject. + */ +public class BasicBSONCallback implements BSONCallback { + + private Object root; + private final LinkedList stack; + private final LinkedList nameStack; + + /** + * Creates a new instance. + */ + public BasicBSONCallback() { + stack = new LinkedList<>(); + nameStack = new LinkedList<>(); + reset(); + } + + @Override + public Object get() { + return root; + } + + /** + * Factory method for creating a new BSONObject. + * + * @return a new BasicBSONObject. + */ + public BSONObject create() { + return new BasicBSONObject(); + } + + /** + * Factory method for creating a new BSON List. + * + * @return a new BasicBSONList. + */ + protected BSONObject createList() { + return new BasicBSONList(); + } + + @Override + public BSONCallback createBSONCallback() { + return new BasicBSONCallback(); + } + + /** + * Helper method to create either a BSON Object or a BSON List depending upon whether the {@code array} parameter is true or not. + * + * @param array set to true to create a new BSON List, otherwise will create a new BSONObject + * @param path a list of field names to navigate to this field in the document + * @return the new BSONObject + */ + public BSONObject create(final boolean array, final List path) { + return array ? createList() : create(); + } + + @Override + public void objectStart() { + if (stack.size() > 0) { + throw new IllegalStateException("Illegal object beginning in current context."); + } + root = create(false, null); + stack.add((BSONObject) root); + } + + @Override + public void objectStart(final String name) { + nameStack.addLast(name); + BSONObject o = create(false, nameStack); + stack.getLast().put(name, o); + stack.addLast(o); + } + + @Override + public Object objectDone() { + BSONObject o = stack.removeLast(); + if (nameStack.size() > 0) { + nameStack.removeLast(); + } else if (stack.size() > 0) { + throw new IllegalStateException("Illegal object end in current context."); + } + + return o; + } + + @Override + public void arrayStart() { + root = create(true, null); + stack.add((BSONObject) root); + } + + @Override + public void arrayStart(final String name) { + nameStack.addLast(name); + BSONObject o = create(true, nameStack); + stack.getLast().put(name, o); + stack.addLast(o); + } + + @Override + public Object arrayDone() { + return objectDone(); + } + + @Override + public void gotNull(final String name) { + cur().put(name, null); + } + + @Override + public void gotUndefined(final String name) { + } + + @Override + public void gotMinKey(final String name) { + cur().put(name, new MinKey()); + } + + @Override + public void gotMaxKey(final String name) { + cur().put(name, new MaxKey()); + } + + @Override + public void gotBoolean(final String name, final boolean value) { + _put(name, value); + } + + @Override + public void gotDouble(final String name, final double value) { + _put(name, value); + } + + @Override + public void gotInt(final String name, final int value) { + _put(name, value); + } + + @Override + public void gotLong(final String name, final long value) { + _put(name, value); + } + + @Override + public void gotDecimal128(final String name, final Decimal128 value) { + _put(name, value); + } + + @Override + public void gotDate(final String name, final long millis) { + _put(name, new Date(millis)); + } + + @Override + public void gotRegex(final String name, final String pattern, final String flags) { + _put(name, Pattern.compile(pattern, BSON.regexFlags(flags))); + } + + @Override + public void gotString(final String name, final String value) { + _put(name, value); + } + + @Override + public void gotSymbol(final String name, final String value) { + _put(name, value); + } + + @Override + public void gotTimestamp(final String name, final int time, final int increment) { + _put(name, new BSONTimestamp(time, increment)); + } + + @Override + public void gotObjectId(final String name, final ObjectId id) { + _put(name, id); + } + + @Override + public void gotDBRef(final String name, final String namespace, final ObjectId id) { + _put(name, new BasicBSONObject("$ns", namespace).append("$id", id)); + } + + @Override + public void gotBinary(final String name, final byte type, final byte[] data) { + if (type == BSON.B_GENERAL || type == BSON.B_BINARY) { + _put(name, data); + } else { + _put(name, new Binary(type, data)); + } + } + + @Override + public void gotUUID(final String name, final long part1, final long part2) { + _put(name, new UUID(part1, part2)); + } + + @Override + public void gotCode(final String name, final String code) { + _put(name, new Code(code)); + } + + @Override + public void gotCodeWScope(final String name, final String code, final Object scope) { + _put(name, new CodeWScope(code, (BSONObject) scope)); + } + + /** + * Puts a new value into the document. + * + * @param name the name of the field + * @param value the value + */ + protected void _put(final String name, final Object value) { + cur().put(name, value); + } + + /** + * Gets the current value + * + * @return the current value + */ + protected BSONObject cur() { + return stack.getLast(); + } + + /** + * Gets the name of the current field + * + * @return the name of the current field. + */ + protected String curName() { + return nameStack.peekLast(); + } + + /** + * Sets the root document for this position + * + * @param root the new root document + */ + protected void setRoot(final Object root) { + this.root = root; + } + + /** + * Returns whether this is the top level or not + * + * @return true if there's nothing on the stack, and this is the top level of the document. + */ + protected boolean isStackEmpty() { + return stack.size() < 1; + } + + @Override + public void reset() { + root = null; + stack.clear(); + nameStack.clear(); + } +} diff --git a/bson/src/main/org/bson/BasicBSONDecoder.java b/bson/src/main/org/bson/BasicBSONDecoder.java new file mode 100644 index 00000000000..35c44ea6033 --- /dev/null +++ b/bson/src/main/org/bson/BasicBSONDecoder.java @@ -0,0 +1,110 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson; + +import org.bson.io.ByteBufferBsonInput; + +import java.io.IOException; +import java.io.InputStream; +import java.nio.ByteBuffer; + +import static org.bson.assertions.Assertions.notNull; + +/** + * Basic implementation of BSONDecoder interface that creates BasicBSONObject instances + */ +public class BasicBSONDecoder implements BSONDecoder { + + /** + * Sets the global (JVM-wide) {@link UuidRepresentation} to use when decoding BSON binary values with subtypes of either + * {@link BsonBinarySubType#UUID_STANDARD} or {@link BsonBinarySubType#UUID_LEGACY}. + * + *

+ * If the {@link BsonBinarySubType} of the value to be decoded matches the binary subtype of the {@link UuidRepresentation}, + * then the value will be decoded to an instance of {@link java.util.UUID}, according to the semantics of the + * {@link UuidRepresentation}. Otherwise, it will be decoded to an instance of {@link org.bson.types.Binary}. + *

+ * + *

+ * Defaults to {@link UuidRepresentation#JAVA_LEGACY}. If set to {@link UuidRepresentation#UNSPECIFIED}, attempting to decode any + * UUID will throw a {@link BSONException}. + *

+ * + * @param uuidRepresentation the uuid representation, which may not be null + * @see BSONCallback#gotUUID(String, long, long) + * @see BasicBSONEncoder#setDefaultUuidRepresentation(UuidRepresentation) + * @since 4.7 + */ + public static void setDefaultUuidRepresentation(final UuidRepresentation uuidRepresentation) { + defaultUuidRepresentation = notNull("uuidRepresentation", uuidRepresentation); + } + + /** + * Gets the default {@link UuidRepresentation} to use when decoding BSON binary values. + * + *

+ * If unset, the default is {@link UuidRepresentation#JAVA_LEGACY}. + *

+ * + * @return the uuid representation, which may not be null + * @see BSONCallback#gotUUID(String, long, long) + * @see BasicBSONEncoder#setDefaultUuidRepresentation(UuidRepresentation) + * @since 4.7 + */ + public static UuidRepresentation getDefaultUuidRepresentation() { + return defaultUuidRepresentation; + } + + private static volatile UuidRepresentation defaultUuidRepresentation = UuidRepresentation.JAVA_LEGACY; + + @Override + public BSONObject readObject(final byte[] bytes) { + BSONCallback bsonCallback = new BasicBSONCallback(); + decode(bytes, bsonCallback); + return (BSONObject) bsonCallback.get(); + } + + @Override + public BSONObject readObject(final InputStream in) throws IOException { + return readObject(readFully(in)); + } + + @Override + public int decode(final byte[] bytes, final BSONCallback callback) { + try (BsonBinaryReader reader = new BsonBinaryReader(new ByteBufferBsonInput(new ByteBufNIO(ByteBuffer.wrap(bytes))))) { + BsonWriter writer = new BSONCallbackAdapter(new BsonWriterSettings(), callback); + writer.pipe(reader); + return reader.getBsonInput().getPosition(); //TODO check this. + } + } + + @Override + public int decode(final InputStream in, final BSONCallback callback) throws IOException { + return decode(readFully(in), callback); + } + + private byte[] readFully(final InputStream input) throws IOException { + byte[] sizeBytes = new byte[4]; + Bits.readFully(input, sizeBytes); + int size = Bits.readInt(sizeBytes); + + byte[] buffer = new byte[size]; + System.arraycopy(sizeBytes, 0, buffer, 0, 4); + Bits.readFully(input, buffer, 4, size - 4); + return buffer; + } +} diff --git a/bson/src/main/org/bson/BasicBSONEncoder.java b/bson/src/main/org/bson/BasicBSONEncoder.java new file mode 100644 index 00000000000..d7a90afe480 --- /dev/null +++ b/bson/src/main/org/bson/BasicBSONEncoder.java @@ -0,0 +1,575 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson; + +import org.bson.internal.UuidHelper; +import org.bson.io.BasicOutputBuffer; +import org.bson.io.OutputBuffer; +import org.bson.types.BSONTimestamp; +import org.bson.types.Binary; +import org.bson.types.Code; +import org.bson.types.CodeWScope; +import org.bson.types.Decimal128; +import org.bson.types.MaxKey; +import org.bson.types.MinKey; +import org.bson.types.ObjectId; +import org.bson.types.Symbol; + +import java.lang.reflect.Array; +import java.util.Date; +import java.util.Map; +import java.util.Set; +import java.util.UUID; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicLong; +import java.util.regex.Pattern; + +import static org.bson.assertions.Assertions.notNull; + +/** + * This is meant to be pooled or cached. There is some per instance memory for string conversion, etc... + */ +public class BasicBSONEncoder implements BSONEncoder { + + /** + * Sets the global (JVM-wide) {@link UuidRepresentation} to use when encoding UUID values to BSON binary. + * + *

+ * Defaults to {@link UuidRepresentation#JAVA_LEGACY}. If set to {@link UuidRepresentation#UNSPECIFIED}, attempting to encode any + * UUID will throw a {@link BSONException}. + *

+ * + * @param uuidRepresentation the uuid representation, which may not be null + * @see #putUUID(String, UUID) + * @see BasicBSONDecoder#setDefaultUuidRepresentation(UuidRepresentation) + * @since 4.7 + */ + public static void setDefaultUuidRepresentation(final UuidRepresentation uuidRepresentation) { + defaultUuidRepresentation = notNull("uuidRepresentation", uuidRepresentation); + } + + /** + * Sets the default {@link UuidRepresentation} to use when encoding UUID values to BSON binary. + * + *

+ * If unset, the default is {@link UuidRepresentation#JAVA_LEGACY}. + *

+ * + * @return the uuid representation, which may not be null + * @see #putUUID(String, UUID) + * @see BasicBSONDecoder#setDefaultUuidRepresentation(UuidRepresentation) + * @since 4.7 + */ + public static UuidRepresentation getDefaultUuidRepresentation() { + return defaultUuidRepresentation; + } + + private static volatile UuidRepresentation defaultUuidRepresentation = UuidRepresentation.JAVA_LEGACY; + + private BsonBinaryWriter bsonWriter; + private OutputBuffer outputBuffer; + + @Override + public byte[] encode(final BSONObject document) { + OutputBuffer outputBuffer = new BasicOutputBuffer(); + set(outputBuffer); + putObject(document); + done(); + return outputBuffer.toByteArray(); + } + + @Override + public void done() { + this.bsonWriter.close(); + this.bsonWriter = null; + } + + @Override + public void set(final OutputBuffer buffer) { + if (this.bsonWriter != null) { + throw new IllegalStateException("Performing another operation at this moment"); + } + outputBuffer = buffer; + this.bsonWriter = new BsonBinaryWriter(buffer); + } + + /** + * Gets the buffer the BSON is being encoded into. + * + * @return the OutputBuffer + */ + protected OutputBuffer getOutputBuffer() { + return outputBuffer; + } + + /** + * Gets the writer responsible for writing the encoded BSON. + * + * @return the writer used to write the encoded BSON + */ + protected BsonBinaryWriter getBsonWriter() { + return bsonWriter; + } + + /** + * Encodes a {@code BSONObject}. This is for the higher level api calls. + * + * @param document the document to encode + * @return the number of characters in the encoding + */ + @Override + public int putObject(final BSONObject document) { + int startPosition = getOutputBuffer().getPosition(); + bsonWriter.writeStartDocument(); + + if (isTopLevelDocument() && document.containsField("_id")) { + _putObjectField("_id", document.get("_id")); + } + + for (final String key : document.keySet()) { + if (isTopLevelDocument() && key.equals("_id")) { + continue; + } + _putObjectField(key, document.get(key)); + } + bsonWriter.writeEndDocument(); + return getOutputBuffer().getPosition() - startPosition; + } + + private boolean isTopLevelDocument() { + return bsonWriter.getContext().getParentContext() == null; + } + + /** + * Writes a field name + * + * @param name the field name + */ + protected void putName(final String name) { + if (bsonWriter.getState() == AbstractBsonWriter.State.NAME) { + bsonWriter.writeName(name); + } + } + + /** + * Encodes any Object type + * + * @param name the field name + * @param value the value to write + */ + protected void _putObjectField(final String name, final Object value) { + if ("_transientFields".equals(name)) { + return; + } + if (name.contains("\0")) { + throw new IllegalArgumentException("Document field names can't have a NULL character. (Bad Key: '" + name + "')"); + } + + if ("$where".equals(name) && value instanceof String) { + putCode(name, new Code((String) value)); + } + + if (value == null) { + putNull(name); + } else if (value instanceof Date) { + putDate(name, (Date) value); + } else if (value instanceof Decimal128) { + putDecimal128(name, (Decimal128) value); + } else if (value instanceof Number) { + putNumber(name, (Number) value); + } else if (value instanceof Character) { + putString(name, value.toString()); + } else if (value instanceof String) { + putString(name, value.toString()); + } else if (value instanceof ObjectId) { + putObjectId(name, (ObjectId) value); + } else if (value instanceof Boolean) { + putBoolean(name, (Boolean) value); + } else if (value instanceof Pattern) { + putPattern(name, (Pattern) value); + } else if (value instanceof Iterable) { + putIterable(name, (Iterable) value); + } else if (value instanceof BSONObject) { + putObject(name, (BSONObject) value); + } else if (value instanceof Map) { + putMap(name, (Map) value); + } else if (value instanceof byte[]) { + putBinary(name, (byte[]) value); + } else if (value instanceof Binary) { + putBinary(name, (Binary) value); + } else if (value instanceof UUID) { + putUUID(name, (UUID) value); + } else if (value.getClass().isArray()) { + putArray(name, value); + } else if (value instanceof Symbol) { + putSymbol(name, (Symbol) value); + } else if (value instanceof BSONTimestamp) { + putTimestamp(name, (BSONTimestamp) value); + } else if (value instanceof CodeWScope) { + putCodeWScope(name, (CodeWScope) value); + } else if (value instanceof Code) { + putCode(name, (Code) value); + } else if (value instanceof MinKey) { + putMinKey(name); + } else if (value instanceof MaxKey) { + putMaxKey(name); + } else if (putSpecial(name, value)) { + // no-op + } else { + throw new IllegalArgumentException("Can't serialize " + value.getClass()); + } + + } + + /** + * Encodes a null value + * + * @param name the field name + * @see org.bson.BsonType#NULL + */ + protected void putNull(final String name) { + putName(name); + bsonWriter.writeNull(); + } + + /** + * Encodes an undefined value + * + * @param name the field name + * @see org.bson.BsonType#UNDEFINED + */ + protected void putUndefined(final String name) { + putName(name); + bsonWriter.writeUndefined(); + } + + /** + * Encodes a BSON timestamp + * + * @param name the field name + * @param timestamp the timestamp to encode + * @see org.bson.BsonType#TIMESTAMP + */ + protected void putTimestamp(final String name, final BSONTimestamp timestamp) { + putName(name); + bsonWriter.writeTimestamp(new BsonTimestamp(timestamp.getTime(), timestamp.getInc())); + } + + /** + * Encodes a field to a {@link org.bson.BsonType#JAVASCRIPT} value. + * + * @param name the field name + * @param code the value + */ + protected void putCode(final String name, final Code code) { + putName(name); + bsonWriter.writeJavaScript(code.getCode()); + } + + /** + * Encodes a field to a {@link org.bson.BsonType#JAVASCRIPT_WITH_SCOPE} value. + * + * @param name the field name + * @param codeWScope the value + */ + protected void putCodeWScope(final String name, final CodeWScope codeWScope) { + putName(name); + bsonWriter.writeJavaScriptWithScope(codeWScope.getCode()); + putObject(codeWScope.getScope()); + } + + /** + * Encodes a field with a {@code Boolean} or {@code boolean} value + * + * @param name the field name + * @param value the value + */ + protected void putBoolean(final String name, final Boolean value) { + putName(name); + bsonWriter.writeBoolean(value); + } + + /** + * Encodes a field with data and time value. + * + * @param name the field name + * @param date the value + * @see org.bson.BsonType#DATE_TIME + */ + protected void putDate(final String name, final Date date) { + putName(name); + bsonWriter.writeDateTime(date.getTime()); + } + + /** + * Encodes any number field. + * + * @param name the field name + * @param number the value + */ + protected void putNumber(final String name, final Number number) { + putName(name); + if (number instanceof Integer || number instanceof Short || number instanceof Byte || number instanceof AtomicInteger) { + bsonWriter.writeInt32(number.intValue()); + } else if (number instanceof Long || number instanceof AtomicLong) { + bsonWriter.writeInt64(number.longValue()); + } else if (number instanceof Float || number instanceof Double) { + bsonWriter.writeDouble(number.doubleValue()); + } else { + throw new IllegalArgumentException("Can't serialize " + number.getClass()); + } + } + + /** + * Encodes a Decimal128 field. + * + * @param name the field name + * @param value the value + * @since 3.4 + * @mongodb.server.release 3.4 + */ + protected void putDecimal128(final String name, final Decimal128 value) { + putName(name); + bsonWriter.writeDecimal128(value); + } + + /** + * Encodes a byte array field + * + * @param name the field name + * @param bytes the value + * @see org.bson.BsonType#BINARY + */ + protected void putBinary(final String name, final byte[] bytes) { + putName(name); + bsonWriter.writeBinaryData(new BsonBinary(bytes)); + } + + /** + * Encodes a Binary field + * + * @param name the field name + * @param binary the value + * @see org.bson.BsonType#BINARY + */ + protected void putBinary(final String name, final Binary binary) { + putName(name); + bsonWriter.writeBinaryData(new BsonBinary(binary.getType(), binary.getData())); + } + + /** + * Encodes a field with a {@link java.util.UUID} value. This is encoded to a binary value of subtype {@link + * org.bson.BsonBinarySubType#UUID_LEGACY} + * + * @param name the field name + * @param uuid the value + */ + protected void putUUID(final String name, final UUID uuid) { + putName(name); + UuidRepresentation uuidRepresentation = defaultUuidRepresentation; + byte[] bytes = UuidHelper.encodeUuidToBinary(uuid, uuidRepresentation); + bsonWriter.writeBinaryData(new BsonBinary( + uuidRepresentation == UuidRepresentation.STANDARD ? BsonBinarySubType.UUID_STANDARD : BsonBinarySubType.UUID_LEGACY, + bytes)); + } + + /** + * Encodes a Symbol field + * + * @param name the field name + * @param symbol the value + * @see org.bson.BsonType#SYMBOL + */ + protected void putSymbol(final String name, final Symbol symbol) { + putName(name); + bsonWriter.writeSymbol(symbol.getSymbol()); + } + + /** + * Encodes a String field + * + * @param name the field name + * @param value the value + * @see org.bson.BsonType#STRING + */ + protected void putString(final String name, final String value) { + putName(name); + bsonWriter.writeString(value); + } + + /** + * Encodes a Pattern field to a {@link org.bson.BsonType#REGULAR_EXPRESSION}. + * + * @param name the field name + * @param value the value + * @mongodb.driver.manual reference/operator/query/regex/ $regex + * @see org.bson.BsonType#BINARY + */ + protected void putPattern(final String name, final Pattern value) { + putName(name); + bsonWriter.writeRegularExpression(new BsonRegularExpression(value.pattern(), org.bson.BSON.regexFlags(value.flags()))); + } + + /** + * Encodes an ObjectId field to a {@link org.bson.BsonType#OBJECT_ID}. + * + * @param name the field name + * @param objectId the value + */ + protected void putObjectId(final String name, final ObjectId objectId) { + putName(name); + bsonWriter.writeObjectId(objectId); + } + + /** + * Encodes an array field. + * + * @param name the field name + * @param object the array, which can be any sort of primitive or String array + */ + protected void putArray(final String name, final Object object) { + putName(name); + bsonWriter.writeStartArray(); + if (object instanceof int[]) { + for (final int i : (int[]) object) { + bsonWriter.writeInt32(i); + } + } else if (object instanceof long[]) { + for (final long i : (long[]) object) { + bsonWriter.writeInt64(i); + } + } else if (object instanceof float[]) { + for (final float i : (float[]) object) { + bsonWriter.writeDouble(i); + } + } else if (object instanceof short[]) { + for (final short i : (short[]) object) { + bsonWriter.writeInt32(i); + } + } else if (object instanceof byte[]) { + for (final byte i : (byte[]) object) { + bsonWriter.writeInt32(i); + } + } else if (object instanceof double[]) { + for (final double i : (double[]) object) { + bsonWriter.writeDouble(i); + } + } else if (object instanceof boolean[]) { + for (final boolean i : (boolean[]) object) { + bsonWriter.writeBoolean(i); + } + } else if (object instanceof String[]) { + for (final String i : (String[]) object) { + bsonWriter.writeString(i); + } + } else { + int length = Array.getLength(object); + for (int i = 0; i < length; i++) { + _putObjectField(String.valueOf(i), Array.get(object, i)); + } + } + bsonWriter.writeEndArray(); + } + + /** + * Encodes an Iterable, for example {@code List} values + * + * @param name the field name + * @param iterable the value + */ + @SuppressWarnings("rawtypes") + protected void putIterable(final String name, final Iterable iterable) { + putName(name); + bsonWriter.writeStartArray(); + int i = 0; + for (final Object o : iterable) { + _putObjectField(String.valueOf(i), o); + } + bsonWriter.writeEndArray(); + } + + /** + * Encodes a map, as a BSON document + * + * @param name the field name + * @param map the value + */ + @SuppressWarnings({"rawtypes", "unchecked"}) + protected void putMap(final String name, final Map map) { + putName(name); + bsonWriter.writeStartDocument(); + for (final Map.Entry entry : (Set) map.entrySet()) { + _putObjectField((String) entry.getKey(), entry.getValue()); + } + bsonWriter.writeEndDocument(); + } + + /** + * Encodes any {@code BSONObject}, as a document + * + * @param name the field name + * @param document the value + * @return the number of characters in the encoding + */ + protected int putObject(final String name, final BSONObject document) { + putName(name); + return putObject(document); + } + + /** + * Special values are not encoded into documents. + * + * @param name the field name + * @param special the value + * @return true if the operation is successful. This implementation always returns false. + */ + protected boolean putSpecial(final String name, final Object special) { + return false; + } + + /** + * Encodes a field to a {@link org.bson.BsonType#MIN_KEY} value. + * + * @param name the field name + */ + protected void putMinKey(final String name) { + putName(name); + bsonWriter.writeMinKey(); + } + + /** + * Encodes a field to a {@link org.bson.BsonType#MAX_KEY} value. + * + * @param name the field name + */ + protected void putMaxKey(final String name) { + putName(name); + bsonWriter.writeMaxKey(); + } + + private static void writeLongToArrayLittleEndian(final byte[] bytes, final int offset, final long x) { + bytes[offset] = (byte) (0xFFL & (x)); + bytes[offset + 1] = (byte) (0xFFL & (x >> 8)); + bytes[offset + 2] = (byte) (0xFFL & (x >> 16)); + bytes[offset + 3] = (byte) (0xFFL & (x >> 24)); + bytes[offset + 4] = (byte) (0xFFL & (x >> 32)); + bytes[offset + 5] = (byte) (0xFFL & (x >> 40)); + bytes[offset + 6] = (byte) (0xFFL & (x >> 48)); + bytes[offset + 7] = (byte) (0xFFL & (x >> 56)); + } + +} diff --git a/bson/src/main/org/bson/BasicBSONObject.java b/bson/src/main/org/bson/BasicBSONObject.java new file mode 100644 index 00000000000..d247bce7d77 --- /dev/null +++ b/bson/src/main/org/bson/BasicBSONObject.java @@ -0,0 +1,423 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson; + +import org.bson.types.BasicBSONList; +import org.bson.types.ObjectId; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Date; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.TreeSet; + +/** + * A simple implementation of {@code DBObject}. A {@code DBObject} can be created as follows, using this class: {@code DBObject obj = new + * BasicBSONObject(); obj.put( "foo", "bar" ); } + */ +@SuppressWarnings({"rawtypes"}) +public class BasicBSONObject extends LinkedHashMap implements BSONObject { + + private static final long serialVersionUID = -4415279469780082174L; + + /** + * Creates an empty object. + */ + public BasicBSONObject() { + } + + /** + * Creates an empty object. + * + * @param size the initial capacity for the Map storing this document. + */ + public BasicBSONObject(final int size) { + super(size); + } + + /** + * Creates a BSONObject initialised with a single key/value pair. + * + * @param key key under which to store + * @param value value to store + */ + public BasicBSONObject(final String key, final Object value) { + put(key, value); + } + + /** + * Creates a DBObject from a map. + * + * @param map map to convert + */ + @SuppressWarnings("unchecked") + public BasicBSONObject(final Map map) { + super(map); + } + + /** + * Converts a DBObject to a map. + * + * @return the DBObject + */ + public Map toMap() { + return new LinkedHashMap<>(this); + } + + /** + * Deletes a field from this object. + * + * @param key the field name to remove + * @return the object removed + */ + public Object removeField(final String key) { + return remove(key); + } + + /** + * Checks if this object contains a given field + * + * @param field field name + * @return if the field exists + */ + public boolean containsField(final String field) { + return containsKey(field); + } + + /** + * Gets a value from this object + * + * @param key field name + * @return the value + */ + public Object get(final String key) { + return super.get(key); + } + + /** + * Returns the value of a field as an {@code int}. + * + * @param key the field to look for + * @return the field value (or default) + */ + public int getInt(final String key) { + Object o = get(key); + if (o == null) { + throw new NullPointerException("no value for: " + key); + } + + return toInt(o); + } + + /** + * Returns the value of a field as an {@code int}. + * + * @param key the field to look for + * @param def the default to return + * @return the field value (or default) + */ + public int getInt(final String key, final int def) { + Object foo = get(key); + if (foo == null) { + return def; + } + + return toInt(foo); + } + + /** + * Returns the value of a field as a {@code long}. + * + * @param key the field to return + * @return the field value + */ + public long getLong(final String key) { + Object foo = get(key); + return ((Number) foo).longValue(); + } + + /** + * Returns the value of a field as an {@code long}. + * + * @param key the field to look for + * @param def the default to return + * @return the field value (or default) + */ + public long getLong(final String key, final long def) { + Object foo = get(key); + if (foo == null) { + return def; + } + + return ((Number) foo).longValue(); + } + + /** + * Returns the value of a field as a {@code double}. + * + * @param key the field to return + * @return the field value + */ + public double getDouble(final String key) { + Object foo = get(key); + return ((Number) foo).doubleValue(); + } + + /** + * Returns the value of a field as an {@code double}. + * + * @param key the field to look for + * @param def the default to return + * @return the field value (or default) + */ + public double getDouble(final String key, final double def) { + Object foo = get(key); + if (foo == null) { + return def; + } + + return ((Number) foo).doubleValue(); + } + + /** + * Returns the value of a field as a string + * + * @param key the field to look up + * @return the value of the field, converted to a string + */ + public String getString(final String key) { + Object foo = get(key); + if (foo == null) { + return null; + } + return foo.toString(); + } + + /** + * Returns the value of a field as a string + * + * @param key the field to look up + * @param def the default to return + * @return the value of the field, converted to a string + */ + public String getString(final String key, final String def) { + Object foo = get(key); + if (foo == null) { + return def; + } + + return foo.toString(); + } + + /** + * Returns the value of a field as a boolean. + * + * @param key the field to look up + * @return the value of the field, or false if field does not exist + */ + public boolean getBoolean(final String key) { + return getBoolean(key, false); + } + + /** + * Returns the value of a field as a boolean + * + * @param key the field to look up + * @param def the default value in case the field is not found + * @return the value of the field, converted to a string + */ + public boolean getBoolean(final String key, final boolean def) { + Object foo = get(key); + if (foo == null) { + return def; + } + if (foo instanceof Number) { + return ((Number) foo).intValue() > 0; + } + if (foo instanceof Boolean) { + return (Boolean) foo; + } + throw new IllegalArgumentException("can't coerce to bool:" + foo.getClass()); + } + + /** + * Returns the object id or null if not set. + * + * @param field The field to return + * @return The field object value or null if not found (or if null :-^). + */ + public ObjectId getObjectId(final String field) { + return (ObjectId) get(field); + } + + /** + * Returns the object id or def if not set. + * + * @param field The field to return + * @param def the default value in case the field is not found + * @return The field object value or def if not set. + */ + public ObjectId getObjectId(final String field, final ObjectId def) { + Object foo = get(field); + return (foo != null) ? (ObjectId) foo : def; + } + + /** + * Returns the date or null if not set. + * + * @param field The field to return + * @return The field object value or null if not found. + */ + public Date getDate(final String field) { + return (Date) get(field); + } + + /** + * Returns the date or def if not set. + * + * @param field The field to return + * @param def the default value in case the field is not found + * @return The field object value or def if not set. + */ + public Date getDate(final String field, final Date def) { + Object foo = get(field); + return (foo != null) ? (Date) foo : def; + } + + @SuppressWarnings("unchecked") + @Override + public void putAll(final Map m) { + for (final Map.Entry entry : (Set) m.entrySet()) { + put(entry.getKey().toString(), entry.getValue()); + } + } + + @Override + public void putAll(final BSONObject o) { + for (final String k : o.keySet()) { + put(k, o.get(k)); + } + } + + /** + * Add a key/value pair to this object + * + * @param key the field name + * @param val the field value + * @return {@code this} + */ + public BasicBSONObject append(final String key, final Object val) { + put(key, val); + + return this; + } + + /** + * Compares two documents according to their serialized form, ignoring the order of keys. + * + * @param o the document to compare to, which must be an instance of {@link org.bson.BSONObject}. + * @return true if the documents have the same serialized form, ignoring key order. + */ + @Override + public boolean equals(final Object o) { + if (o == this) { + return true; + } + + if (!(o instanceof BSONObject)) { + return false; + } + + BSONObject other = (BSONObject) o; + + if (!keySet().equals(other.keySet())) { + return false; + } + + return Arrays.equals(getEncoder().encode(canonicalizeBSONObject(this)), getEncoder().encode(canonicalizeBSONObject(other))); + } + + @Override + public int hashCode() { + return Arrays.hashCode(canonicalizeBSONObject(this).encode()); + } + + private byte[] encode() { + return getEncoder().encode(this); + } + + private BSONEncoder getEncoder() { + return new BasicBSONEncoder(); + } + + // create a copy of "from", but with keys ordered alphabetically + @SuppressWarnings("unchecked") + private static Object canonicalize(final Object from) { + if (from instanceof BSONObject && !(from instanceof BasicBSONList)) { + return canonicalizeBSONObject((BSONObject) from); + } else if (from instanceof List) { + return canonicalizeList((List) from); + } else if (from instanceof Map) { + return canonicalizeMap((Map) from); + } else { + return from; + } + } + + private static Map canonicalizeMap(final Map from) { + Map canonicalized = new LinkedHashMap<>(from.size()); + TreeSet keysInOrder = new TreeSet<>(from.keySet()); + for (String key : keysInOrder) { + Object val = from.get(key); + canonicalized.put(key, canonicalize(val)); + } + return canonicalized; + } + + private static BasicBSONObject canonicalizeBSONObject(final BSONObject from) { + BasicBSONObject canonicalized = new BasicBSONObject(); + TreeSet keysInOrder = new TreeSet<>(from.keySet()); + for (String key : keysInOrder) { + Object val = from.get(key); + canonicalized.put(key, canonicalize(val)); + } + return canonicalized; + } + + private static List canonicalizeList(final List list) { + List canonicalized = new ArrayList<>(list.size()); + for (Object cur : list) { + canonicalized.add(canonicalize(cur)); + } + return canonicalized; + } + + private int toInt(final Object o) { + if (o instanceof Number) { + return ((Number) o).intValue(); + } + + if (o instanceof Boolean) { + return ((Boolean) o) ? 1 : 0; + } + + throw new IllegalArgumentException("can't convert: " + o.getClass().getName() + " to int"); + } +} diff --git a/bson/src/main/org/bson/BinaryVector.java b/bson/src/main/org/bson/BinaryVector.java new file mode 100644 index 00000000000..273b4a0e5e9 --- /dev/null +++ b/bson/src/main/org/bson/BinaryVector.java @@ -0,0 +1,201 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson; + +import org.bson.annotations.Beta; +import org.bson.annotations.Reason; + +import static org.bson.assertions.Assertions.isTrueArgument; +import static org.bson.assertions.Assertions.notNull; + +/** + * Binary Vectors are densely packed arrays of numbers, all the same type, which are stored and retrieved efficiently using the BSON Binary + * Subtype 9 format. This class supports multiple vector {@link DataType}'s and provides static methods to create vectors. + *

+ * NOTE: This class should be treated as sealed: it must not be extended or implemented by consumers of the library. + * + * @mongodb.server.release 6.0 + * @see BsonBinary + * @since 5.3 + */ +public abstract class BinaryVector { + private final DataType dataType; + + BinaryVector(final DataType dataType) { + this.dataType = dataType; + } + + /** + * Creates a vector with the {@link DataType#PACKED_BIT} data type. + *

+ * A {@link DataType#PACKED_BIT} vector is a binary quantized vector where each element of a vector is represented by a single bit (0 or 1). Each byte + * can hold up to 8 bits (vector elements). The padding parameter is used to specify how many least-significant bits in the final byte + * should be ignored.

+ * + *

For example, a vector with two bytes and a padding of 4 would have the following structure:

+ *
+     * Byte 1: 238 (binary: 11101110)
+     * Byte 2: 224 (binary: 11100000)
+     * Padding: 4 (ignore the last 4 bits in Byte 2)
+     * Resulting vector: 12 bits: 111011101110
+     * 
+ *

+ * NOTE: The byte array `data` is not copied; changes to the provided array will be reflected + * in the created {@link PackedBitBinaryVector} instance. + * + * @param data The byte array representing the packed bit vector data. Each byte can store 8 bits. + * @param padding The number of least-significant bits (0 to 7) to ignore in the final byte of the vector data. + * @return A {@link PackedBitBinaryVector} instance with the {@link DataType#PACKED_BIT} data type. + * @throws IllegalArgumentException If the padding value is greater than 7. + */ + @Beta(Reason.SERVER) + public static PackedBitBinaryVector packedBitVector(final byte[] data, final byte padding) { + notNull("data", data); + isTrueArgument("Padding must be between 0 and 7 bits. Provided padding: " + padding, padding >= 0 && padding <= 7); + isTrueArgument("Padding must be 0 if vector is empty. Provided padding: " + padding, padding == 0 || data.length > 0); + return new PackedBitBinaryVector(data, padding); + } + + /** + * Creates a vector with the {@link DataType#INT8} data type. + * + *

A {@link DataType#INT8} vector is a vector of 8-bit signed integers where each byte in the vector represents an element of a vector, + * with values in the range [-128, 127].

+ *

+ * NOTE: The byte array `data` is not copied; changes to the provided array will be reflected + * in the created {@link Int8BinaryVector} instance. + * + * @param data The byte array representing the {@link DataType#INT8} vector data. + * @return A {@link Int8BinaryVector} instance with the {@link DataType#INT8} data type. + */ + public static Int8BinaryVector int8Vector(final byte[] data) { + notNull("data", data); + return new Int8BinaryVector(data); + } + + /** + * Creates a vector with the {@link DataType#FLOAT32} data type. + *

+ * A {@link DataType#FLOAT32} vector is a vector of floating-point numbers, where each element in the vector is a float.

+ *

+ * NOTE: The float array `data` is not copied; changes to the provided array will be reflected + * in the created {@link Float32BinaryVector} instance. + * + * @param data The float array representing the {@link DataType#FLOAT32} vector data. + * @return A {@link Float32BinaryVector} instance with the {@link DataType#FLOAT32} data type. + */ + public static Float32BinaryVector floatVector(final float[] data) { + notNull("data", data); + return new Float32BinaryVector(data); + } + + /** + * Returns the {@link PackedBitBinaryVector}. + * + * @return {@link PackedBitBinaryVector}. + * @throws IllegalStateException if this vector is not of type {@link DataType#PACKED_BIT}. Use {@link #getDataType()} to check the vector + * type before calling this method. + */ + public PackedBitBinaryVector asPackedBitVector() { + ensureType(DataType.PACKED_BIT); + return (PackedBitBinaryVector) this; + } + + /** + * Returns the {@link Int8BinaryVector}. + * + * @return {@link Int8BinaryVector}. + * @throws IllegalStateException if this vector is not of type {@link DataType#INT8}. Use {@link #getDataType()} to check the vector + * type before calling this method. + */ + public Int8BinaryVector asInt8Vector() { + ensureType(DataType.INT8); + return (Int8BinaryVector) this; + } + + /** + * Returns the {@link Float32BinaryVector}. + * + * @return {@link Float32BinaryVector}. + * @throws IllegalStateException if this vector is not of type {@link DataType#FLOAT32}. Use {@link #getDataType()} to check the vector + * type before calling this method. + */ + public Float32BinaryVector asFloat32Vector() { + ensureType(DataType.FLOAT32); + return (Float32BinaryVector) this; + } + + /** + * Returns {@link DataType} of the vector. + * + * @return the data type of the vector. + */ + public DataType getDataType() { + return this.dataType; + } + + + private void ensureType(final DataType expected) { + if (this.dataType != expected) { + throw new IllegalStateException("Expected vector data type " + expected + ", but found " + this.dataType); + } + } + + /** + * Represents the data type (dtype) of a vector. + *

+ * Each dtype determines how the data in the vector is stored, including how many bits are used to represent each element + * in the vector. + * + * @mongodb.server.release 6.0 + * @since 5.3 + */ + public enum DataType { + /** + * An INT8 vector is a vector of 8-bit signed integers. The vector is stored as an array of bytes, where each byte + * represents a signed integer in the range [-128, 127]. + */ + INT8((byte) 0x03), + /** + * A FLOAT32 vector is a vector of 32-bit floating-point numbers, where each element in the vector is a float. + */ + FLOAT32((byte) 0x27), + /** + * A PACKED_BIT vector is a binary quantized vector where each element of a vector is represented by a single bit (0 or 1). + * Each byte can hold up to 8 bits (vector elements). + */ + PACKED_BIT((byte) 0x10); + + private final byte value; + + DataType(final byte value) { + this.value = value; + } + + /** + * Returns the byte value associated with this {@link DataType}. + * + *

This value is used in the BSON binary format to indicate the data type of the vector.

+ * + * @return the byte value representing the {@link DataType}. + */ + public byte getValue() { + return value; + } + } +} + diff --git a/bson/src/main/org/bson/Bits.java b/bson/src/main/org/bson/Bits.java new file mode 100644 index 00000000000..55c79222fc9 --- /dev/null +++ b/bson/src/main/org/bson/Bits.java @@ -0,0 +1,163 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson; + +import java.io.EOFException; +import java.io.IOException; +import java.io.InputStream; + +/** + * Utility class for reading values from an input stream. + */ +class Bits { + + /** + * Reads bytes from the input stream and puts them into the given byte buffer. The equivalent of calling + * {@link #readFully(java.io.InputStream, byte[], int, int)} with an offset of zero and a length equal to the length of the buffer. + * + * @param inputStream the input stream to read from + * @param buffer the buffer into which the data is read. + * @throws IOException if there's an error reading from the {@code inputStream} + */ + static void readFully(final InputStream inputStream, final byte[] buffer) + throws IOException { + readFully(inputStream, buffer, 0, buffer.length); + } + + /** + * Reads bytes from the input stream and puts them into the given byte buffer. + * + * @param inputStream the input stream to read from + * @param buffer the buffer into which the data is read. + * @param offset the start offset in array {@code buffer} at which the data is written. + * @param length the maximum number of bytes to read. + * @throws IOException if there's an error reading from the {@code inputStream} + * @see java.io.InputStream#read(byte[], int, int) + */ + static void readFully(final InputStream inputStream, final byte[] buffer, final int offset, final int length) + throws IOException { + if (buffer.length < length + offset) { + throw new IllegalArgumentException("Buffer is too small"); + } + + int arrayOffset = offset; + int bytesToRead = length; + while (bytesToRead > 0) { + int bytesRead = inputStream.read(buffer, arrayOffset, bytesToRead); + if (bytesRead < 0) { + throw new EOFException(); + } + bytesToRead -= bytesRead; + arrayOffset += bytesRead; + } + } + + /** + * Reads and returns a single integer value from the input stream. + * + * @param inputStream the input stream to read from + * @param buffer the buffer to write the input stream bytes into + * @return the integer value + * @throws IOException if there's an error reading from the {@code inputStream} + */ + static int readInt(final InputStream inputStream, final byte[] buffer) throws IOException { + readFully(inputStream, buffer, 0, 4); + return readInt(buffer); + } + + /** + * Reads and returns a single integer value from the buffer. The equivalent of calling {@link #readInt(byte[], int)} + * with an offset of zero. + * + * @param buffer the buffer to read from + * @return the integer value + */ + static int readInt(final byte[] buffer) { + return readInt(buffer, 0); + } + + /** + * Reads and returns a single integer value from the buffer. + * + * @param buffer the buffer to read from + * @param offset the position to start reading from the buffer + * @return the integer value + */ + static int readInt(final byte[] buffer, final int offset) { + int x = 0; + x |= (0xFF & buffer[offset]) << 0; + x |= (0xFF & buffer[offset + 1]) << 8; + x |= (0xFF & buffer[offset + 2]) << 16; + x |= (0xFF & buffer[offset + 3]) << 24; + return x; + } + + /** + * Reads and returns a single long value from the input stream. + * + * @param inputStream the input stream to read from + * @return the long value + * @throws IOException if there's an error reading from the {@code inputStream} + */ + static long readLong(final InputStream inputStream) throws IOException { + return readLong(inputStream, new byte[8]); + } + + /** + * Reads and returns a single long value from the input stream. + * + * @param inputStream the input stream to read from + * @param buffer the buffer to write the input stream bytes into + * @return the long value + * @throws IOException if there's an error reading from the {@code inputStream} + */ + static long readLong(final InputStream inputStream, final byte[] buffer) throws IOException { + readFully(inputStream, buffer, 0, 8); + return readLong(buffer); + } + + /** + * Reads and returns a single long value from the buffer. The equivalent of called {@link #readLong(byte[], int)} with an offset of + * zero. + * + * @param buffer the buffer to read from + * @return the long value + */ + static long readLong(final byte[] buffer) { + return readLong(buffer, 0); + } + + /** + * Reads and returns a single long value from the buffer. + * + * @param buffer the buffer to read from + * @param offset the position to start reading from the buffer + * @return the long value + */ + static long readLong(final byte[] buffer, final int offset) { + long x = 0; + x |= (0xFFL & buffer[offset]) << 0; + x |= (0xFFL & buffer[offset + 1]) << 8; + x |= (0xFFL & buffer[offset + 2]) << 16; + x |= (0xFFL & buffer[offset + 3]) << 24; + x |= (0xFFL & buffer[offset + 4]) << 32; + x |= (0xFFL & buffer[offset + 5]) << 40; + x |= (0xFFL & buffer[offset + 6]) << 48; + x |= (0xFFL & buffer[offset + 7]) << 56; + return x; + } +} diff --git a/bson/src/main/org/bson/BsonArray.java b/bson/src/main/org/bson/BsonArray.java new file mode 100644 index 00000000000..876858b01b0 --- /dev/null +++ b/bson/src/main/org/bson/BsonArray.java @@ -0,0 +1,267 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson; + +import org.bson.codecs.BsonArrayCodec; +import org.bson.codecs.DecoderContext; +import org.bson.json.JsonReader; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.Iterator; +import java.util.List; +import java.util.ListIterator; + +/** + * A type-safe representation of the BSON array type. + * + * @since 3.0 + */ +public class BsonArray extends BsonValue implements List, Cloneable { + + private final List values; + + /** + * Construct an instance with the given list of values. + * + * @param values the list of values, none of whose members may be null. + */ + public BsonArray(final List values) { + this(values, true); + } + + /** + * Construct an empty BsonArray + */ + public BsonArray() { + this(new ArrayList<>(), false); + } + + /** + * Construct an empty BsonArray with the specified initial capacity. + * + * @param initialCapacity the initial capacity of the BsonArray + * @throws IllegalArgumentException if the specified initial capacity + * is negative + * @since 4.3 + */ + public BsonArray(final int initialCapacity) { + this(new ArrayList<>(initialCapacity), false); + } + + @SuppressWarnings("unchecked") + BsonArray(final List values, final boolean copy) { + if (copy) { + this.values = new ArrayList<>(values); + } else { + this.values = (List) values; + } + } + + /** + * Parses a string in MongoDB Extended JSON format to a {@code BsonArray} + * + * @param json the JSON string + * @return a corresponding {@code BsonArray} object + * @see org.bson.json.JsonReader + * @mongodb.driver.manual reference/mongodb-extended-json/ MongoDB Extended JSON + * + * @since 3.4 + */ + public static BsonArray parse(final String json) { + return new BsonArrayCodec().decode(new JsonReader(json), DecoderContext.builder().build()); + } + + /** + * Gets the values in this array as a list of {@code BsonValue} objects. + * + * @return the values in this array. + */ + public List getValues() { + return Collections.unmodifiableList(values); + } + + @Override + public BsonType getBsonType() { + return BsonType.ARRAY; + } + + @Override + public int size() { + return values.size(); + } + + @Override + public boolean isEmpty() { + return values.isEmpty(); + } + + @Override + public boolean contains(final Object o) { + return values.contains(o); + } + + @Override + public Iterator iterator() { + return values.iterator(); + } + + @Override + public Object[] toArray() { + return values.toArray(); + } + + @Override + public T[] toArray(final T[] a) { + return values.toArray(a); + } + + @Override + public boolean add(final BsonValue bsonValue) { + return values.add(bsonValue); + } + + @Override + public boolean remove(final Object o) { + return values.remove(o); + } + + @Override + public boolean containsAll(final Collection c) { + return values.containsAll(c); + } + + @Override + public boolean addAll(final Collection c) { + return values.addAll(c); + } + + @Override + public boolean addAll(final int index, final Collection c) { + return values.addAll(index, c); + } + + @Override + public boolean removeAll(final Collection c) { + return values.removeAll(c); + } + + @Override + public boolean retainAll(final Collection c) { + return values.retainAll(c); + } + + @Override + public void clear() { + values.clear(); + } + + @Override + public BsonValue get(final int index) { + return values.get(index); + } + + @Override + public BsonValue set(final int index, final BsonValue element) { + return values.set(index, element); + } + + @Override + public void add(final int index, final BsonValue element) { + values.add(index, element); + } + + @Override + public BsonValue remove(final int index) { + return values.remove(index); + } + + @Override + public int indexOf(final Object o) { + return values.indexOf(o); + } + + @Override + public int lastIndexOf(final Object o) { + return values.lastIndexOf(o); + } + + @Override + public ListIterator listIterator() { + return values.listIterator(); + } + + @Override + public ListIterator listIterator(final int index) { + return values.listIterator(index); + } + + @Override + public List subList(final int fromIndex, final int toIndex) { + return values.subList(fromIndex, toIndex); + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (!(o instanceof BsonArray)) { + return false; + } + + BsonArray that = (BsonArray) o; + return getValues().equals(that.getValues()); + } + + @Override + public int hashCode() { + return values.hashCode(); + } + + @Override + public String toString() { + return "BsonArray{" + + "values=" + getValues() + + '}'; + } + + @Override + public BsonArray clone() { + BsonArray to = new BsonArray(this.size()); + for (BsonValue cur : this) { + switch (cur.getBsonType()) { + case DOCUMENT: + to.add(cur.asDocument().clone()); + break; + case ARRAY: + to.add(cur.asArray().clone()); + break; + case BINARY: + to.add(BsonBinary.clone(cur.asBinary())); + break; + case JAVASCRIPT_WITH_SCOPE: + to.add(BsonJavaScriptWithScope.clone(cur.asJavaScriptWithScope())); + break; + default: + to.add(cur); + } + } + return to; + } +} diff --git a/bson/src/main/org/bson/BsonBinary.java b/bson/src/main/org/bson/BsonBinary.java new file mode 100644 index 00000000000..833a1b5ad29 --- /dev/null +++ b/bson/src/main/org/bson/BsonBinary.java @@ -0,0 +1,248 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson; + +import org.bson.assertions.Assertions; +import org.bson.internal.UuidHelper; +import org.bson.internal.vector.BinaryVectorHelper; + +import java.util.Arrays; +import java.util.UUID; + +import static org.bson.internal.vector.BinaryVectorHelper.encodeVectorToBinary; + +/** + * A representation of the BSON Binary type. Note that for performance reasons instances of this class are not immutable, + * so care should be taken to only modify the underlying byte array if you know what you're doing, or else make a defensive copy. + * + * @since 3.0 + */ +public class BsonBinary extends BsonValue { + + private final byte type; + private final byte[] data; + + /** + * Construct a new instance with the given data and the default sub-type + * + * @param data the data + * + * @see org.bson.BsonBinarySubType#BINARY + */ + public BsonBinary(final byte[] data) { + this(BsonBinarySubType.BINARY, data); + } + + /** + * Construct a new instance with the given data and binary sub type. + * + * @param data the data + * @param type the binary sub type + * + * @see org.bson.BsonBinarySubType#BINARY + */ + public BsonBinary(final BsonBinarySubType type, final byte[] data) { + if (type == null) { + throw new IllegalArgumentException("type may not be null"); + } + if (data == null) { + throw new IllegalArgumentException("data may not be null"); + } + this.type = type.getValue(); + this.data = data; + } + + /** + * Construct a new instance with the given data and binary sub type. + * + * @param data the data + * @param type the binary sub type + * + * @see org.bson.BsonBinarySubType#BINARY + */ + public BsonBinary(final byte type, final byte[] data) { + if (data == null) { + throw new IllegalArgumentException("data may not be null"); + } + this.type = type; + this.data = data; + } + + /** + * Construct a Type 4 BsonBinary from the given UUID. + * + * @param uuid the UUID + * @since 3.9 + */ + public BsonBinary(final UUID uuid) { + this(uuid, UuidRepresentation.STANDARD); + } + + /** + * Constructs a {@linkplain BsonBinarySubType#VECTOR subtype 9} {@link BsonBinary} from the given {@link BinaryVector}. + * + * @param vector the {@link BinaryVector} + * @since 5.3 + */ + public BsonBinary(final BinaryVector vector) { + if (vector == null) { + throw new IllegalArgumentException("Vector must not be null"); + } + this.data = encodeVectorToBinary(vector); + type = BsonBinarySubType.VECTOR.getValue(); + } + + /** + * Construct a new instance from the given UUID and UuidRepresentation + * + * @param uuid the UUID + * @param uuidRepresentation the UUID representation + * @since 3.9 + */ + public BsonBinary(final UUID uuid, final UuidRepresentation uuidRepresentation) { + if (uuid == null) { + throw new IllegalArgumentException("uuid may not be null"); + } + if (uuidRepresentation == null) { + throw new IllegalArgumentException("uuidRepresentation may not be null"); + } + this.data = UuidHelper.encodeUuidToBinary(uuid, uuidRepresentation); + this.type = uuidRepresentation == UuidRepresentation.STANDARD + ? BsonBinarySubType.UUID_STANDARD.getValue() + : BsonBinarySubType.UUID_LEGACY.getValue(); + } + + /** + * Returns the binary as a UUID. The binary type must be 4. + * + * @return the uuid + * @since 3.9 + */ + public UUID asUuid() { + if (!BsonBinarySubType.isUuid(type)) { + throw new BsonInvalidOperationException("type must be a UUID subtype."); + } + + if (type != BsonBinarySubType.UUID_STANDARD.getValue()) { + throw new BsonInvalidOperationException("uuidRepresentation must be set to return the correct UUID."); + } + + return UuidHelper.decodeBinaryToUuid(this.data.clone(), this.type, UuidRepresentation.STANDARD); + } + + /** + * Returns the binary as a {@link BinaryVector}. The {@linkplain #getType() subtype} must be {@linkplain BsonBinarySubType#VECTOR 9}. + * + * @return the vector + * @throws BsonInvalidOperationException if the binary subtype is not {@link BsonBinarySubType#VECTOR}. + * @since 5.3 + */ + public BinaryVector asVector() { + if (type != BsonBinarySubType.VECTOR.getValue()) { + throw new BsonInvalidOperationException("type must be a Vector subtype."); + } + + return BinaryVectorHelper.decodeBinaryToVector(this.data); + } + + /** + * Returns the binary as a UUID. + * + * @param uuidRepresentation the UUID representation + * @return the uuid + * @since 3.9 + */ + public UUID asUuid(final UuidRepresentation uuidRepresentation) { + Assertions.notNull("uuidRepresentation", uuidRepresentation); + + byte uuidType = uuidRepresentation == UuidRepresentation.STANDARD + ? BsonBinarySubType.UUID_STANDARD.getValue() + : BsonBinarySubType.UUID_LEGACY.getValue(); + + if (type != uuidType) { + throw new BsonInvalidOperationException("uuidRepresentation does not match current uuidRepresentation."); + } + + return UuidHelper.decodeBinaryToUuid(data.clone(), type, uuidRepresentation); + } + + @Override + public BsonType getBsonType() { + return BsonType.BINARY; + } + + /** + * Gets the type of this Binary. + * + * @return the type + */ + public byte getType() { + return type; + } + + /** + * Gets the data of this Binary. + *

+ * This method returns the internal copy of the byte array, so only modify the contents of the returned array if the intention is to + * change the state of this instance. + * + * @return the data + */ + public byte[] getData() { + return data; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + BsonBinary that = (BsonBinary) o; + + if (!Arrays.equals(data, that.data)) { + return false; + } + if (type != that.type) { + return false; + } + + return true; + } + + @Override + public int hashCode() { + int result = type; + result = 31 * result + Arrays.hashCode(data); + return result; + } + + @Override + public String toString() { + return "BsonBinary{" + + "type=" + type + + ", data=" + Arrays.toString(data) + + '}'; + } + + static BsonBinary clone(final BsonBinary from) { + return new BsonBinary(from.type, from.data.clone()); + } +} diff --git a/bson/src/main/org/bson/BsonBinaryReader.java b/bson/src/main/org/bson/BsonBinaryReader.java new file mode 100644 index 00000000000..5fff43beefe --- /dev/null +++ b/bson/src/main/org/bson/BsonBinaryReader.java @@ -0,0 +1,448 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson; + +import org.bson.io.BsonInput; +import org.bson.io.BsonInputMark; +import org.bson.io.ByteBufferBsonInput; +import org.bson.types.Decimal128; +import org.bson.types.ObjectId; + +import java.nio.ByteBuffer; + +import static java.lang.String.format; +import static org.bson.assertions.Assertions.notNull; + +/** + * A BsonReader implementation that reads from a binary stream of data. This is the most commonly used implementation. + * + * @since 3.0 + */ +public class BsonBinaryReader extends AbstractBsonReader { + + private final BsonInput bsonInput; + + /** + * Construct an instance. + * + * @param byteBuffer the input for this reader + */ + public BsonBinaryReader(final ByteBuffer byteBuffer) { + this(new ByteBufferBsonInput(new ByteBufNIO(notNull("byteBuffer", byteBuffer)))); + } + + /** + * Construct an instance. + * + * @param bsonInput the input for this reader + */ + public BsonBinaryReader(final BsonInput bsonInput) { + if (bsonInput == null) { + throw new IllegalArgumentException("bsonInput is null"); + } + this.bsonInput = bsonInput; + setContext(new Context(null, BsonContextType.TOP_LEVEL, 0, 0)); + } + + @Override + public void close() { + super.close(); + } + + /** + * Gets the BSON input backing this instance. + * + * @return the BSON input + */ + public BsonInput getBsonInput() { + return bsonInput; + } + + @Override + public BsonType readBsonType() { + if (isClosed()) { + throw new IllegalStateException("BSONBinaryWriter"); + } + + if (getState() == State.INITIAL || getState() == State.DONE || getState() == State.SCOPE_DOCUMENT) { + // there is an implied type of Document for the top level and for scope documents + setCurrentBsonType(BsonType.DOCUMENT); + setState(State.VALUE); + return getCurrentBsonType(); + } + if (getState() != State.TYPE) { + throwInvalidState("ReadBSONType", State.TYPE); + } + + byte bsonTypeByte = bsonInput.readByte(); + BsonType bsonType = BsonType.findByValue(bsonTypeByte); + if (bsonType == null) { + String name = bsonInput.readCString(); + throw new BsonSerializationException(format("Detected unknown BSON type \"\\x%x\" for fieldname \"%s\". " + + "Are you using the latest driver version?", + bsonTypeByte, name)); + } + setCurrentBsonType(bsonType); + + if (getCurrentBsonType() == BsonType.END_OF_DOCUMENT) { + switch (getContext().getContextType()) { + case ARRAY: + setState(State.END_OF_ARRAY); + return BsonType.END_OF_DOCUMENT; + case DOCUMENT: + case SCOPE_DOCUMENT: + setState(State.END_OF_DOCUMENT); + return BsonType.END_OF_DOCUMENT; + default: + throw new BsonSerializationException(format("BSONType EndOfDocument is not valid when ContextType is %s.", + getContext().getContextType())); + } + } else { + switch (getContext().getContextType()) { + case ARRAY: + bsonInput.skipCString(); // ignore array element names + setState(State.VALUE); + break; + case DOCUMENT: + case SCOPE_DOCUMENT: + setCurrentName(bsonInput.readCString()); + setState(State.NAME); + break; + default: + throw new BSONException("Unexpected ContextType."); + } + + return getCurrentBsonType(); + } + } + + @Override + protected BsonBinary doReadBinaryData() { + int numBytes = readSize(); + byte type = bsonInput.readByte(); + + if (type == BsonBinarySubType.OLD_BINARY.getValue()) { + int repeatedNumBytes = bsonInput.readInt32(); + if (repeatedNumBytes != numBytes - 4) { + throw new BsonSerializationException("Binary sub type OldBinary has inconsistent sizes"); + } + numBytes -= 4; + } + byte[] bytes = new byte[numBytes]; + bsonInput.readBytes(bytes); + return new BsonBinary(type, bytes); + } + + @Override + protected byte doPeekBinarySubType() { + Mark mark = new Mark(); + readSize(); + byte type = bsonInput.readByte(); + mark.reset(); + return type; + } + + @Override + protected int doPeekBinarySize() { + Mark mark = new Mark(); + int size = readSize(); + mark.reset(); + return size; + } + + @Override + protected boolean doReadBoolean() { + byte booleanByte = bsonInput.readByte(); + if (booleanByte != 0 && booleanByte != 1) { + throw new BsonSerializationException(format("Expected a boolean value but found %d", booleanByte)); + } + return booleanByte == 0x1; + } + + @Override + protected long doReadDateTime() { + return bsonInput.readInt64(); + } + + @Override + protected double doReadDouble() { + return bsonInput.readDouble(); + } + + @Override + protected int doReadInt32() { + return bsonInput.readInt32(); + } + + @Override + protected long doReadInt64() { + return bsonInput.readInt64(); + } + + @Override + public Decimal128 doReadDecimal128() { + long low = bsonInput.readInt64(); + long high = bsonInput.readInt64(); + return Decimal128.fromIEEE754BIDEncoding(high, low); + } + + @Override + protected String doReadJavaScript() { + return bsonInput.readString(); + } + + @Override + protected String doReadJavaScriptWithScope() { + int startPosition = bsonInput.getPosition(); // position of size field + int size = readSize(); + setContext(new Context(getContext(), BsonContextType.JAVASCRIPT_WITH_SCOPE, startPosition, size)); + return bsonInput.readString(); + } + + @Override + protected void doReadMaxKey() { + } + + @Override + protected void doReadMinKey() { + } + + @Override + protected void doReadNull() { + } + + @Override + protected ObjectId doReadObjectId() { + return bsonInput.readObjectId(); + } + + @Override + protected BsonRegularExpression doReadRegularExpression() { + return new BsonRegularExpression(bsonInput.readCString(), bsonInput.readCString()); + } + + @Override + protected BsonDbPointer doReadDBPointer() { + return new BsonDbPointer(bsonInput.readString(), bsonInput.readObjectId()); + } + + @Override + protected String doReadString() { + return bsonInput.readString(); + } + + @Override + protected String doReadSymbol() { + return bsonInput.readString(); + } + + @Override + protected BsonTimestamp doReadTimestamp() { + return new BsonTimestamp(bsonInput.readInt64()); + } + + @Override + protected void doReadUndefined() { + } + + @Override + public void doReadStartArray() { + int startPosition = bsonInput.getPosition(); // position of size field + int size = readSize(); + setContext(new Context(getContext(), BsonContextType.ARRAY, startPosition, size)); + } + + @Override + protected void doReadStartDocument() { + BsonContextType contextType = (getState() == State.SCOPE_DOCUMENT) + ? BsonContextType.SCOPE_DOCUMENT : BsonContextType.DOCUMENT; + int startPosition = bsonInput.getPosition(); // position of size field + int size = readSize(); + setContext(new Context(getContext(), contextType, startPosition, size)); + } + + @Override + protected void doReadEndArray() { + setContext(getContext().popContext(bsonInput.getPosition())); + } + + @Override + protected void doReadEndDocument() { + setContext(getContext().popContext(bsonInput.getPosition())); + if (getContext().getContextType() == BsonContextType.JAVASCRIPT_WITH_SCOPE) { + setContext(getContext().popContext(bsonInput.getPosition())); // JavaScriptWithScope + } + } + + @Override + protected void doSkipName() { + } + + @Override + protected void doSkipValue() { + if (isClosed()) { + throw new IllegalStateException("BSONBinaryWriter"); + } + if (getState() != State.VALUE) { + throwInvalidState("skipValue", State.VALUE); + } + + int skip; + switch (getCurrentBsonType()) { + case ARRAY: + skip = readSize() - 4; + break; + case BINARY: + skip = readSize() + 1; + break; + case BOOLEAN: + skip = 1; + break; + case DATE_TIME: + skip = 8; + break; + case DOCUMENT: + skip = readSize() - 4; + break; + case DOUBLE: + skip = 8; + break; + case INT32: + skip = 4; + break; + case INT64: + skip = 8; + break; + case DECIMAL128: + skip = 16; + break; + case JAVASCRIPT: + skip = readSize(); + break; + case JAVASCRIPT_WITH_SCOPE: + skip = readSize() - 4; + break; + case MAX_KEY: + skip = 0; + break; + case MIN_KEY: + skip = 0; + break; + case NULL: + skip = 0; + break; + case OBJECT_ID: + skip = 12; + break; + case REGULAR_EXPRESSION: + bsonInput.skipCString(); + bsonInput.skipCString(); + skip = 0; + break; + case STRING: + skip = readSize(); + break; + case SYMBOL: + skip = readSize(); + break; + case TIMESTAMP: + skip = 8; + break; + case UNDEFINED: + skip = 0; + break; + case DB_POINTER: + skip = readSize() + 12; // String followed by ObjectId + break; + default: + throw new BSONException("Unexpected BSON type: " + getCurrentBsonType()); + } + bsonInput.skip(skip); + + setState(State.TYPE); + } + + private int readSize() { + int size = bsonInput.readInt32(); + if (size < 0) { + String message = format("Size %s is not valid because it is negative.", size); + throw new BsonSerializationException(message); + } + return size; + } + + protected Context getContext() { + return (Context) super.getContext(); + } + + @Override + public BsonReaderMark getMark() { + return new Mark(); + } + + /** + * An implementation of {@code AbstractBsonReader.Mark}. + */ + protected class Mark extends AbstractBsonReader.Mark { + private final int startPosition; + private final int size; + private final BsonInputMark bsonInputMark; + + /** + * Construct an instance. + */ + protected Mark() { + startPosition = BsonBinaryReader.this.getContext().startPosition; + size = BsonBinaryReader.this.getContext().size; + bsonInputMark = BsonBinaryReader.this.bsonInput.getMark(Integer.MAX_VALUE); + } + + @Override + public void reset() { + super.reset(); + bsonInputMark.reset(); + BsonBinaryReader.this.setContext(new Context((Context) getParentContext(), getContextType(), startPosition, size)); + } + } + + /** + * An implementation of {@code AbstractBsonReader.Context}. + */ + protected class Context extends AbstractBsonReader.Context { + private final int startPosition; + private final int size; + + Context(final Context parentContext, final BsonContextType contextType, final int startPosition, final int size) { + super(parentContext, contextType); + this.startPosition = startPosition; + this.size = size; + } + + Context popContext(final int position) { + int actualSize = position - startPosition; + if (actualSize != size) { + throw new BsonSerializationException(format("Expected size to be %d, not %d.", size, actualSize)); + } + return getParentContext(); + } + + @Override + protected Context getParentContext() { + return (Context) super.getParentContext(); + } + } +} diff --git a/bson/src/main/org/bson/BsonBinarySubType.java b/bson/src/main/org/bson/BsonBinarySubType.java new file mode 100644 index 00000000000..08c29e2ef09 --- /dev/null +++ b/bson/src/main/org/bson/BsonBinarySubType.java @@ -0,0 +1,115 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson; + +/** + * The Binary subtype. + * + * @since 3.0 + */ +public enum BsonBinarySubType { + /** + * Binary data. + */ + BINARY((byte) 0x00), + + /** + * A function. + */ + FUNCTION((byte) 0x01), + + /** + * Obsolete binary data subtype (use Binary instead). + */ + OLD_BINARY((byte) 0x02), + + /** + * A UUID in a driver dependent legacy byte order. + */ + UUID_LEGACY((byte) 0x03), + + /** + * A UUID in standard network byte order. + */ + UUID_STANDARD((byte) 0x04), + + /** + * An MD5 hash. + */ + MD5((byte) 0x05), + + /** + * Encrypted data. + * + * @since 4.4 + */ + ENCRYPTED((byte) 0x06), + + /** + * Columnar data. + * + * @since 4.4 + */ + COLUMN((byte) 0x07), + + /** + * Sensitive data (e.g., HMAC keys) that should be excluded from server-side logging. + * + * @since 5.3 + */ + SENSITIVE((byte) 0x08), + + /** + * Vector data. + * + * @mongodb.server.release 6.0 + * @since 5.3 + * @see BinaryVector + */ + VECTOR((byte) 0x09), + + /** + * User defined binary data. + */ + USER_DEFINED((byte) 0x80); + + private final byte value; + + /** + * Returns true if the given value is a UUID subtype. + * + * @param value the subtype value as a byte. + * @return true if value is a UUID subtype. + * @since 3.4 + */ + public static boolean isUuid(final byte value) { + return value == UUID_LEGACY.getValue() || value == UUID_STANDARD.getValue(); + } + + BsonBinarySubType(final byte value) { + this.value = value; + } + + /** + * Gets the byte representation of this subtype. + * + * @return this subtype as a byte. + */ + public byte getValue() { + return value; + } +} diff --git a/bson/src/main/org/bson/BsonBinaryWriter.java b/bson/src/main/org/bson/BsonBinaryWriter.java new file mode 100644 index 00000000000..20e73d97d44 --- /dev/null +++ b/bson/src/main/org/bson/BsonBinaryWriter.java @@ -0,0 +1,517 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson; + +import org.bson.io.BsonInput; +import org.bson.io.BsonOutput; +import org.bson.types.Decimal128; +import org.bson.types.ObjectId; + +import java.util.ArrayDeque; +import java.util.Deque; +import java.util.List; + +import static java.lang.Math.max; +import static java.lang.String.format; +import static org.bson.assertions.Assertions.notNull; + +/** + * A BsonWriter implementation that writes to a binary stream of data. This is the most commonly used implementation. + * + * @since 3.0 + */ +public class BsonBinaryWriter extends AbstractBsonWriter { + private final BsonBinaryWriterSettings binaryWriterSettings; + + private final BsonOutput bsonOutput; + private final Deque maxDocumentSizeStack = new ArrayDeque<>(); + private static final int ARRAY_INDEXES_CACHE_SIZE = 1000; + private static final byte[] ARRAY_INDEXES_BUFFER; + private static final int[] ARRAY_INDEXES_OFFSETS; + private static final int[] ARRAY_INDEXES_LENGTHS; + private Mark mark; + + static { + ARRAY_INDEXES_LENGTHS = new int[ARRAY_INDEXES_CACHE_SIZE]; + ARRAY_INDEXES_OFFSETS = new int[ARRAY_INDEXES_CACHE_SIZE]; + int totalSize = 0; + for (int i = 0; i < ARRAY_INDEXES_CACHE_SIZE; i++) { + totalSize += (int) (Math.log10(max(i, 1)) + + 1 // number of digits + + 1); // +1 for null terminator + } + ARRAY_INDEXES_BUFFER = new byte[totalSize]; + + // Fill buffer + int offset = 0; + for (int i = 0; i < ARRAY_INDEXES_CACHE_SIZE; i++) { + String string = Integer.toString(i); + int length = string.length(); + for (int j = 0; j < length; j++) { + ARRAY_INDEXES_BUFFER[offset++] = (byte) string.charAt(j); + } + ARRAY_INDEXES_BUFFER[offset++] = 0; + ARRAY_INDEXES_OFFSETS[i] = offset - (length + 1); + ARRAY_INDEXES_LENGTHS[i] = length + 1; // +1 for null terminator + } + } + + /** + * Construct an instance. + * + * @param bsonOutput the output to write to + * @param validator the field name validator to apply + */ + public BsonBinaryWriter(final BsonOutput bsonOutput, final FieldNameValidator validator) { + this(new BsonWriterSettings(), new BsonBinaryWriterSettings(), bsonOutput, validator); + } + + /** + * Construct an instance. + * + * @param bsonOutput the output to write to + */ + public BsonBinaryWriter(final BsonOutput bsonOutput) { + this(new BsonWriterSettings(), new BsonBinaryWriterSettings(), bsonOutput); + } + + /** + * Construct an instance. + * + * @param settings the generic BsonWriter settings + * @param binaryWriterSettings the settings specific to a BsonBinaryWriter + * @param bsonOutput the output to write to + */ + public BsonBinaryWriter(final BsonWriterSettings settings, final BsonBinaryWriterSettings binaryWriterSettings, + final BsonOutput bsonOutput) { + this(settings, binaryWriterSettings, bsonOutput, NoOpFieldNameValidator.INSTANCE); + } + + /** + * Construct an instance. + * + * @param settings the generic BsonWriter settings + * @param binaryWriterSettings the settings specific to a BsonBinaryWriter + * @param bsonOutput the output to write to + * @param validator the field name validator to apply + */ + public BsonBinaryWriter(final BsonWriterSettings settings, final BsonBinaryWriterSettings binaryWriterSettings, + final BsonOutput bsonOutput, final FieldNameValidator validator) { + super(settings, validator); + this.binaryWriterSettings = binaryWriterSettings; + this.bsonOutput = bsonOutput; + maxDocumentSizeStack.push(binaryWriterSettings.getMaxDocumentSize()); + } + + @Override + public void close() { + super.close(); + } + + /** + * Gets the BSON output backing this instance. + * + * @return the BSON output + */ + public BsonOutput getBsonOutput() { + return bsonOutput; + } + + /** + * @return the BsonBinaryWriterSettings + * @since 3.6 + */ + public BsonBinaryWriterSettings getBinaryWriterSettings() { + return binaryWriterSettings; + } + + @Override + protected Context getContext() { + return (Context) super.getContext(); + } + + @Override + protected void doWriteStartDocument() { + if (getState() == State.VALUE) { + bsonOutput.writeByte(BsonType.DOCUMENT.getValue()); + writeCurrentName(); + } + setContext(new Context(getContext(), BsonContextType.DOCUMENT, bsonOutput.getPosition())); + bsonOutput.writeInt32(0); // reserve space for size + } + + @Override + protected void doWriteEndDocument() { + bsonOutput.writeByte(0); + backpatchSize(); // size of document + + setContext(getContext().getParentContext()); + if (getContext() != null && getContext().getContextType() == BsonContextType.JAVASCRIPT_WITH_SCOPE) { + backpatchSize(); // size of the JavaScript with scope value + setContext(getContext().getParentContext()); + } + } + + @Override + protected void doWriteStartArray() { + bsonOutput.writeByte(BsonType.ARRAY.getValue()); + writeCurrentName(); + setContext(new Context(getContext(), BsonContextType.ARRAY, bsonOutput.getPosition())); + bsonOutput.writeInt32(0); // reserve space for size + } + + @Override + protected void doWriteEndArray() { + bsonOutput.writeByte(0); + backpatchSize(); // size of document + setContext(getContext().getParentContext()); + } + + @Override + protected void doWriteBinaryData(final BsonBinary value) { + bsonOutput.writeByte(BsonType.BINARY.getValue()); + writeCurrentName(); + + int totalLen = value.getData().length; + + if (value.getType() == BsonBinarySubType.OLD_BINARY.getValue()) { + totalLen += 4; + } + + bsonOutput.writeInt32(totalLen); + bsonOutput.writeByte(value.getType()); + if (value.getType() == BsonBinarySubType.OLD_BINARY.getValue()) { + bsonOutput.writeInt32(totalLen - 4); + } + bsonOutput.writeBytes(value.getData()); + } + + @Override + public void doWriteBoolean(final boolean value) { + bsonOutput.writeByte(BsonType.BOOLEAN.getValue()); + writeCurrentName(); + bsonOutput.writeByte(value ? 1 : 0); + } + + @Override + protected void doWriteDateTime(final long value) { + bsonOutput.writeByte(BsonType.DATE_TIME.getValue()); + writeCurrentName(); + bsonOutput.writeInt64(value); + } + + @Override + protected void doWriteDBPointer(final BsonDbPointer value) { + bsonOutput.writeByte(BsonType.DB_POINTER.getValue()); + writeCurrentName(); + + bsonOutput.writeString(value.getNamespace()); + bsonOutput.writeBytes(value.getId().toByteArray()); + } + + @Override + protected void doWriteDouble(final double value) { + bsonOutput.writeByte(BsonType.DOUBLE.getValue()); + writeCurrentName(); + bsonOutput.writeDouble(value); + } + + @Override + protected void doWriteInt32(final int value) { + bsonOutput.writeByte(BsonType.INT32.getValue()); + writeCurrentName(); + bsonOutput.writeInt32(value); + } + + @Override + protected void doWriteInt64(final long value) { + bsonOutput.writeByte(BsonType.INT64.getValue()); + writeCurrentName(); + bsonOutput.writeInt64(value); + } + + @Override + protected void doWriteDecimal128(final Decimal128 value) { + bsonOutput.writeByte(BsonType.DECIMAL128.getValue()); + writeCurrentName(); + bsonOutput.writeInt64(value.getLow()); + bsonOutput.writeInt64(value.getHigh()); + } + + @Override + protected void doWriteJavaScript(final String value) { + bsonOutput.writeByte(BsonType.JAVASCRIPT.getValue()); + writeCurrentName(); + bsonOutput.writeString(value); + } + + @Override + protected void doWriteJavaScriptWithScope(final String value) { + bsonOutput.writeByte(BsonType.JAVASCRIPT_WITH_SCOPE.getValue()); + writeCurrentName(); + setContext(new Context(getContext(), BsonContextType.JAVASCRIPT_WITH_SCOPE, bsonOutput.getPosition())); + bsonOutput.writeInt32(0); + bsonOutput.writeString(value); + } + + @Override + protected void doWriteMaxKey() { + bsonOutput.writeByte(BsonType.MAX_KEY.getValue()); + writeCurrentName(); + } + + @Override + protected void doWriteMinKey() { + bsonOutput.writeByte(BsonType.MIN_KEY.getValue()); + writeCurrentName(); + } + + @Override + public void doWriteNull() { + bsonOutput.writeByte(BsonType.NULL.getValue()); + writeCurrentName(); + } + + @Override + public void doWriteObjectId(final ObjectId value) { + bsonOutput.writeByte(BsonType.OBJECT_ID.getValue()); + writeCurrentName(); + bsonOutput.writeObjectId(value); + } + + @Override + public void doWriteRegularExpression(final BsonRegularExpression value) { + bsonOutput.writeByte(BsonType.REGULAR_EXPRESSION.getValue()); + writeCurrentName(); + bsonOutput.writeCString(value.getPattern()); + bsonOutput.writeCString(value.getOptions()); + } + + @Override + public void doWriteString(final String value) { + bsonOutput.writeByte(BsonType.STRING.getValue()); + writeCurrentName(); + bsonOutput.writeString(value); + } + + @Override + public void doWriteSymbol(final String value) { + bsonOutput.writeByte(BsonType.SYMBOL.getValue()); + writeCurrentName(); + bsonOutput.writeString(value); + } + + @Override + public void doWriteTimestamp(final BsonTimestamp value) { + bsonOutput.writeByte(BsonType.TIMESTAMP.getValue()); + writeCurrentName(); + bsonOutput.writeInt64(value.getValue()); + } + + @Override + public void doWriteUndefined() { + bsonOutput.writeByte(BsonType.UNDEFINED.getValue()); + writeCurrentName(); + } + + @Override + public void pipe(final BsonReader reader) { + notNull("reader", reader); + pipeDocument(reader, null); + } + + @Override + public void pipe(final BsonReader reader, final List extraElements) { + notNull("reader", reader); + notNull("extraElements", extraElements); + pipeDocument(reader, extraElements); + } + + private void pipeDocument(final BsonReader reader, final List extraElements) { + if (reader instanceof BsonBinaryReader) { + BsonBinaryReader binaryReader = (BsonBinaryReader) reader; + if (getState() == State.VALUE) { + bsonOutput.writeByte(BsonType.DOCUMENT.getValue()); + writeCurrentName(); + } + BsonInput bsonInput = binaryReader.getBsonInput(); + int size = bsonInput.readInt32(); + if (size < 5) { + throw new BsonSerializationException("Document size must be at least 5"); + } + int pipedDocumentStartPosition = bsonOutput.getPosition(); + bsonOutput.writeInt32(size); + byte[] bytes = new byte[size - 4]; + bsonInput.readBytes(bytes); + bsonOutput.writeBytes(bytes); + + binaryReader.setState(AbstractBsonReader.State.TYPE); + + if (extraElements != null) { + bsonOutput.truncateToPosition(bsonOutput.getPosition() - 1); + setContext(new Context(getContext(), BsonContextType.DOCUMENT, pipedDocumentStartPosition)); + setState(State.NAME); + pipeExtraElements(extraElements); + bsonOutput.writeByte(0); + bsonOutput.writeInt32(pipedDocumentStartPosition, bsonOutput.getPosition() - pipedDocumentStartPosition); + setContext(getContext().getParentContext()); + } + + if (getContext() == null) { + setState(State.DONE); + } else { + if (getContext().getContextType() == BsonContextType.JAVASCRIPT_WITH_SCOPE) { + backpatchSize(); // size of the JavaScript with scope value + setContext(getContext().getParentContext()); + } + setState(getNextState()); + } + + validateSize(bsonOutput.getPosition() - pipedDocumentStartPosition); + } else if (extraElements != null) { + super.pipe(reader, extraElements); + } else { + super.pipe(reader); + } + } + + /** + * Sets a maximum size for documents from this point. + * + * @param maxDocumentSize the maximum document size. + */ + public void pushMaxDocumentSize(final int maxDocumentSize) { + maxDocumentSizeStack.push(maxDocumentSize); + } + + /** + * Reset the maximum document size to its previous value. + */ + public void popMaxDocumentSize() { + maxDocumentSizeStack.pop(); + } + + /** + * Create a snapshot of this writer's context at a point in time. + */ + public void mark() { + mark = new Mark(); + } + + /** + * Resets this writer to the last {@link #mark()} saved. + * + * @throws IllegalStateException if {@link #mark()} was not called prior to reset. + */ + public void reset() { + if (mark == null) { + throw new IllegalStateException("Can not reset without first marking"); + } + + mark.reset(); + mark = null; + } + + private void writeCurrentName() { + if (getContext().getContextType() == BsonContextType.ARRAY) { + int index = getContext().index++; + if (index >= ARRAY_INDEXES_CACHE_SIZE) { + bsonOutput.writeCString(Integer.toString(index)); + } else { + bsonOutput.writeBytes(ARRAY_INDEXES_BUFFER, + ARRAY_INDEXES_OFFSETS[index], + ARRAY_INDEXES_LENGTHS[index]); + } + } else { + bsonOutput.writeCString(getName()); + } + } + + private void backpatchSize() { + int size = bsonOutput.getPosition() - getContext().startPosition; + validateSize(size); + bsonOutput.writeInt32(bsonOutput.getPosition() - size, size); + } + + private void validateSize(final int size) { + if (size > maxDocumentSizeStack.peek()) { + throw new BsonMaximumSizeExceededException(format("Document size of %d is larger than maximum of %d.", size, + maxDocumentSizeStack.peek())); + } + } + + /** + * An implementation of {@code AbstractBsonWriter.Context}. + */ + protected class Context extends AbstractBsonWriter.Context { + private final int startPosition; + private int index; // used when contextType is an array + + /** + * Creates a new instance + * + * @param parentContext the context of the parent node + * @param contextType the type of this context + * @param startPosition the position of the output stream of this writer. + */ + public Context(final Context parentContext, final BsonContextType contextType, final int startPosition) { + super(parentContext, contextType); + this.startPosition = startPosition; + } + + /** + * Creates a new instance by copying the values from the given context. + * + * @param from the Context to copy. + */ + public Context(final Context from) { + super(from); + startPosition = from.startPosition; + index = from.index; + } + + @Override + public Context getParentContext() { + return (Context) super.getParentContext(); + } + + @Override + public Context copy() { + return new Context(this); + } + } + + /** + * An implementation of {@code AbstractBsonWriter.Mark}. + */ + protected class Mark extends AbstractBsonWriter.Mark { + private final int position; + + /** + * Creates a new instance storing the current position of the {@link org.bson.io.BsonOutput}. + */ + protected Mark() { + this.position = bsonOutput.getPosition(); + } + + @Override + protected void reset() { + super.reset(); + bsonOutput.truncateToPosition(mark.position); + } + } +} diff --git a/bson/src/main/org/bson/BsonBinaryWriterSettings.java b/bson/src/main/org/bson/BsonBinaryWriterSettings.java new file mode 100644 index 00000000000..4c2957987b3 --- /dev/null +++ b/bson/src/main/org/bson/BsonBinaryWriterSettings.java @@ -0,0 +1,51 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson; + +/** + * The customisable settings for writing BSON. + * + * @since 3.0 + */ +public class BsonBinaryWriterSettings { + private final int maxDocumentSize; + + /** + * Creates a new instance of the settings with the given maximum document size. + * + * @param maxDocumentSize the maximum document size. + */ + public BsonBinaryWriterSettings(final int maxDocumentSize) { + this.maxDocumentSize = maxDocumentSize; + } + + /** + * Creates a new instance of the settings with {@link java.lang.Integer#MAX_VALUE} as the maximum document size. + */ + public BsonBinaryWriterSettings() { + this(Integer.MAX_VALUE); + } + + /** + * Gets the maximum size for BSON documents. + * + * @return the maximum size of BSON documents. ??? + */ + public int getMaxDocumentSize() { + return maxDocumentSize; + } +} diff --git a/bson/src/main/org/bson/BsonBoolean.java b/bson/src/main/org/bson/BsonBoolean.java new file mode 100644 index 00000000000..f8af1cd6df7 --- /dev/null +++ b/bson/src/main/org/bson/BsonBoolean.java @@ -0,0 +1,105 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson; + +/** + * A representation of the BSON Boolean type. + * + * @since 3.0 + */ +public final class BsonBoolean extends BsonValue implements Comparable { + + private final boolean value; + + /** + * The true value. + */ + public static final BsonBoolean TRUE = new BsonBoolean(true); + + /** + * The false value. + */ + public static final BsonBoolean FALSE = new BsonBoolean(false); + + /** + * Returns a {@code BsonBoolean} instance representing the specified {@code boolean} value. + * + * @param value a boolean value. + * @return {@link BsonBoolean#TRUE} if {@code value} is true, {@link BsonBoolean#FALSE} if {@code value} is false + */ + public static BsonBoolean valueOf(final boolean value) { + return value ? TRUE : FALSE; + } + + /** + * Construct a new instance with the given value. + * + * @param value the value + */ + public BsonBoolean(final boolean value) { + this.value = value; + } + + @Override + public int compareTo(final BsonBoolean o) { + return Boolean.valueOf(value).compareTo(o.value); + } + + @Override + public BsonType getBsonType() { + return BsonType.BOOLEAN; + } + + /** + * Gets the boolean value. + * + * @return the value + */ + public boolean getValue() { + return value; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + BsonBoolean that = (BsonBoolean) o; + + if (value != that.value) { + return false; + } + + return true; + } + + @Override + public int hashCode() { + return (value ? 1 : 0); + } + + @Override + public String toString() { + return "BsonBoolean{" + + "value=" + value + + '}'; + } +} diff --git a/bson/src/main/org/bson/BsonContextType.java b/bson/src/main/org/bson/BsonContextType.java new file mode 100644 index 00000000000..161f33d11b6 --- /dev/null +++ b/bson/src/main/org/bson/BsonContextType.java @@ -0,0 +1,49 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson; + +/** + * Used by BsonReader and BsonWriter implementations to represent the current context. + * + * @since 3.0 + */ +public enum BsonContextType { + /** + * The top level of a BSON document. + */ + TOP_LEVEL, + + /** + * A (possibly embedded) BSON document. + */ + DOCUMENT, + + /** + * A BSON array. + */ + ARRAY, + + /** + * A JAVASCRIPT_WITH_SCOPE BSON value. + */ + JAVASCRIPT_WITH_SCOPE, + + /** + * The scope document of a JAVASCRIPT_WITH_SCOPE BSON value. + */ + SCOPE_DOCUMENT +} diff --git a/bson/src/main/org/bson/BsonDateTime.java b/bson/src/main/org/bson/BsonDateTime.java new file mode 100644 index 00000000000..355807d1372 --- /dev/null +++ b/bson/src/main/org/bson/BsonDateTime.java @@ -0,0 +1,85 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson; + +/** + * A representation of the BSON DateTime type. + * + * @since 3.0 + */ +public class BsonDateTime extends BsonValue implements Comparable { + + private final long value; + + /** + * Construct a new instance with the given value. + * + * @param value the value, which may not be null + */ + public BsonDateTime(final long value) { + this.value = value; + } + + @Override + public int compareTo(final BsonDateTime o) { + return Long.compare(value, o.value); + } + + @Override + public BsonType getBsonType() { + return BsonType.DATE_TIME; + } + + /** + * Gets the DateTime value as a long + * + * @return the value + */ + public long getValue() { + return value; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + BsonDateTime that = (BsonDateTime) o; + + if (value != that.value) { + return false; + } + + return true; + } + + @Override + public int hashCode() { + return (int) (value ^ (value >>> 32)); + } + + @Override + public String toString() { + return "BsonDateTime{" + + "value=" + value + + '}'; + } +} diff --git a/bson/src/main/org/bson/BsonDbPointer.java b/bson/src/main/org/bson/BsonDbPointer.java new file mode 100644 index 00000000000..e74ec049ce4 --- /dev/null +++ b/bson/src/main/org/bson/BsonDbPointer.java @@ -0,0 +1,106 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson; + +import org.bson.types.ObjectId; + +/** + * Holder for a BSON type DBPointer(0x0c). It's deprecated in BSON Specification and present here because of compatibility reasons. + * + * @since 3.0 + */ +public class BsonDbPointer extends BsonValue { + + private final String namespace; + private final ObjectId id; + + /** + * Construct a new instance with the given namespace and id. + * + * @param namespace the namespace + * @param id the id + */ + public BsonDbPointer(final String namespace, final ObjectId id) { + if (namespace == null) { + throw new IllegalArgumentException("namespace can not be null"); + } + if (id == null) { + throw new IllegalArgumentException("id can not be null"); + } + this.namespace = namespace; + this.id = id; + } + + @Override + public BsonType getBsonType() { + return BsonType.DB_POINTER; + } + + /** + * Gets the namespace. + * + * @return the namespace + */ + public String getNamespace() { + return namespace; + } + + /** + * Gets the id. + * + * @return the id + */ + public ObjectId getId() { + return id; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + BsonDbPointer dbPointer = (BsonDbPointer) o; + + if (!id.equals(dbPointer.id)) { + return false; + } + if (!namespace.equals(dbPointer.namespace)) { + return false; + } + + return true; + } + + @Override + public int hashCode() { + int result = namespace.hashCode(); + result = 31 * result + id.hashCode(); + return result; + } + + @Override + public String toString() { + return "BsonDbPointer{" + + "namespace='" + namespace + '\'' + + ", id=" + id + + '}'; + } +} diff --git a/bson/src/main/org/bson/BsonDecimal128.java b/bson/src/main/org/bson/BsonDecimal128.java new file mode 100644 index 00000000000..3a48eeb2910 --- /dev/null +++ b/bson/src/main/org/bson/BsonDecimal128.java @@ -0,0 +1,104 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson; + +import org.bson.types.Decimal128; + +import static org.bson.assertions.Assertions.notNull; + +/** + * A representation of the BSON Decimal128 type. + * + * @since 3.4 + */ +public final class BsonDecimal128 extends BsonNumber { + private final Decimal128 value; + + /** + * Construct a new instance with the given value. + * + * @param value the value, which may not be null + */ + public BsonDecimal128(final Decimal128 value) { + notNull("value", value); + this.value = value; + } + + @Override + public BsonType getBsonType() { + return BsonType.DECIMAL128; + } + + /** + * Gets the Decimal128 value. + * + * @return the value + */ + public Decimal128 getValue() { + return value; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + BsonDecimal128 that = (BsonDecimal128) o; + + if (!value.equals(that.value)) { + return false; + } + + return true; + } + + @Override + public int hashCode() { + return value.hashCode(); + } + + @Override + public String toString() { + return "BsonDecimal128{" + + "value=" + value + + '}'; + } + + @Override + public int intValue() { + return value.intValue(); + } + + @Override + public long longValue() { + return value.longValue(); + } + + @Override + public double doubleValue() { + return value.doubleValue(); + } + + @Override + public Decimal128 decimal128Value() { + return value; + } +} diff --git a/bson/src/main/org/bson/BsonDocument.java b/bson/src/main/org/bson/BsonDocument.java new file mode 100644 index 00000000000..87625de8dbd --- /dev/null +++ b/bson/src/main/org/bson/BsonDocument.java @@ -0,0 +1,936 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson; + +import org.bson.codecs.BsonDocumentCodec; +import org.bson.codecs.DecoderContext; +import org.bson.codecs.EncoderContext; +import org.bson.codecs.configuration.CodecRegistry; +import org.bson.conversions.Bson; +import org.bson.io.BasicOutputBuffer; +import org.bson.json.JsonMode; +import org.bson.json.JsonReader; +import org.bson.json.JsonWriter; +import org.bson.json.JsonWriterSettings; + +import java.io.InvalidObjectException; +import java.io.ObjectInputStream; +import java.io.Serializable; +import java.io.StringWriter; +import java.nio.ByteBuffer; +import java.nio.ByteOrder; +import java.util.Collection; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; +import java.util.Set; + +import static java.lang.String.format; + +/** + * A type-safe container for a BSON document. This class should NOT be sub-classed by third parties. + * + * @since 3.0 + */ +public class BsonDocument extends BsonValue implements Map, Cloneable, Bson, Serializable { + private static final long serialVersionUID = 1L; + + /** + * The underlying map. + */ + private final Map map; + + /** + * Parses a string in MongoDB Extended JSON format to a {@code BsonDocument} + * + * @param json the JSON string + * @return a corresponding {@code BsonDocument} object + * @see org.bson.json.JsonReader + * @mongodb.driver.manual reference/mongodb-extended-json/ MongoDB Extended JSON + */ + public static BsonDocument parse(final String json) { + return new BsonDocumentCodec().decode(new JsonReader(json), DecoderContext.builder().build()); + } + + /** + * Construct a new instance with the given list {@code BsonElement}, none of which may be null. + * + * @param bsonElements a list of {@code BsonElement} + */ + public BsonDocument(final List bsonElements) { + this(bsonElements.size()); + for (BsonElement cur : bsonElements) { + put(cur.getName(), cur.getValue()); + } + } + + /** + * Construct a new instance with a single key value pair + * + * @param key the key + * @param value the value + */ + public BsonDocument(final String key, final BsonValue value) { + this(); + put(key, value); + } + + /** + * Construct an empty document with the specified initial capacity. + * + * @param initialCapacity the initial capacity + * @throws IllegalArgumentException if the initial capacity is negative + * @since 4.3 + */ + public BsonDocument(final int initialCapacity) { + map = new LinkedHashMap<>(initialCapacity); + } + + /** + * Construct an empty document. + */ + public BsonDocument() { + map = new LinkedHashMap<>(); + } + + @Override + public BsonDocument toBsonDocument(final Class documentClass, final CodecRegistry codecRegistry) { + return this; + } + + @Override + public BsonType getBsonType() { + return BsonType.DOCUMENT; + } + + @Override + public int size() { + return map.size(); + } + + @Override + public boolean isEmpty() { + return map.isEmpty(); + } + + @Override + public boolean containsKey(final Object key) { + return map.containsKey(key); + } + + @Override + public boolean containsValue(final Object value) { + return map.containsValue(value); + } + + @Override + public BsonValue get(final Object key) { + return map.get(key); + } + + /** + * Gets the value of the key if it is a BsonDocument, or throws if not. + * + * @param key the key + * @return the value of the key as a BsonDocument + * @throws org.bson.BsonInvalidOperationException if the document does not contain the key or the value is not a BsonDocument + */ + public BsonDocument getDocument(final Object key) { + throwIfKeyAbsent(key); + return get(key).asDocument(); + } + + /** + * Gets the value of the key if it is a BsonArray, or throws if not. + * + * @param key the key + * @return the value of the key as a BsonArray + * @throws org.bson.BsonInvalidOperationException if the document does not contain the key or the value is not of the expected type + */ + public BsonArray getArray(final Object key) { + throwIfKeyAbsent(key); + + return get(key).asArray(); + } + + /** + * Gets the value of the key if it is a BsonNumber, or throws if not. + * + * @param key the key + * @return the value of the key as a BsonNumber + * @throws org.bson.BsonInvalidOperationException if the document does not contain the key or the value is not of the expected type + */ + public BsonNumber getNumber(final Object key) { + throwIfKeyAbsent(key); + return get(key).asNumber(); + } + + /** + * Gets the value of the key if it is a BsonInt32, or throws if not. + * + * @param key the key + * @return the value of the key as a BsonInt32 + * @throws org.bson.BsonInvalidOperationException if the document does not contain the key or the value is not of the expected type + */ + public BsonInt32 getInt32(final Object key) { + throwIfKeyAbsent(key); + return get(key).asInt32(); + } + + /** + * Gets the value of the key if it is a BsonInt64, or throws if not. + * + * @param key the key + * @return the value of the key as a BsonInt64 + * @throws org.bson.BsonInvalidOperationException if the document does not contain the key or the value is not of the expected type + */ + public BsonInt64 getInt64(final Object key) { + throwIfKeyAbsent(key); + return get(key).asInt64(); + } + + /** + * Gets the value of the key if it is a BsonDecimal128, or throws if not. + * + * @param key the key + * @return the value of the key as a BsonDecimal128 + * @throws org.bson.BsonInvalidOperationException if the document does not contain the key or the value is not of the expected type + * @since 3.4 + */ + public BsonDecimal128 getDecimal128(final Object key) { + throwIfKeyAbsent(key); + return get(key).asDecimal128(); + } + + /** + * Gets the value of the key if it is a BsonDouble, or throws if not. + * + * @param key the key + * @return the value of the key as a BsonDouble + * @throws org.bson.BsonInvalidOperationException if the document does not contain the key or the value is not of the expected type + */ + public BsonDouble getDouble(final Object key) { + throwIfKeyAbsent(key); + return get(key).asDouble(); + } + + /** + * Gets the value of the key if it is a BsonBoolean, or throws if not. + * + * @param key the key + * @return the value of the key as a BsonBoolean + * @throws org.bson.BsonInvalidOperationException if the document does not contain the key or the value is not of the expected type + */ + public BsonBoolean getBoolean(final Object key) { + throwIfKeyAbsent(key); + return get(key).asBoolean(); + } + + /** + * Gets the value of the key if it is a BsonString, or throws if not. + * + * @param key the key + * @return the value of the key as a BsonString + * @throws org.bson.BsonInvalidOperationException if the document does not contain the key or the value is not of the expected type + */ + public BsonString getString(final Object key) { + throwIfKeyAbsent(key); + return get(key).asString(); + } + + /** + * Gets the value of the key if it is a BsonDateTime, or throws if not. + * + * @param key the key + * @return the value of the key as a BsonDateTime + * @throws org.bson.BsonInvalidOperationException if the document does not contain the key or the value is not of the expected type + */ + public BsonDateTime getDateTime(final Object key) { + throwIfKeyAbsent(key); + return get(key).asDateTime(); + } + + /** + * Gets the value of the key if it is a BsonTimestamp, or throws if not. + * + * @param key the key + * @return the value of the key as a BsonTimestamp + * @throws org.bson.BsonInvalidOperationException if the document does not contain the key or the value is not of the expected type + */ + public BsonTimestamp getTimestamp(final Object key) { + throwIfKeyAbsent(key); + return get(key).asTimestamp(); + } + + /** + * Gets the value of the key if it is a BsonObjectId, or throws if not. + * + * @param key the key + * @return the value of the key as a BsonObjectId + * @throws org.bson.BsonInvalidOperationException if the document does not contain the key or the value is not of the expected type + */ + public BsonObjectId getObjectId(final Object key) { + throwIfKeyAbsent(key); + return get(key).asObjectId(); + } + + /** + * Gets the value of the key if it is a BsonRegularExpression, or throws if not. + * + * @param key the key + * @return the value of the key as a BsonRegularExpression + * @throws org.bson.BsonInvalidOperationException if the document does not contain the key or the value is not of the expected type + */ + public BsonRegularExpression getRegularExpression(final Object key) { + throwIfKeyAbsent(key); + return get(key).asRegularExpression(); + } + + /** + * Gets the value of the key if it is a BsonBinary, or throws if not. + * + * @param key the key + * @return the value of the key as a BsonBinary + * @throws org.bson.BsonInvalidOperationException if the document does not contain the key or the value is not of the expected type + */ + public BsonBinary getBinary(final Object key) { + throwIfKeyAbsent(key); + return get(key).asBinary(); + } + + /** + * Returns true if the value of the key is a BsonNull, returns false if the document does not contain the key. + * + * @param key the key + * @return true if the value of the key is a BsonNull, returns false if the document does not contain the key. + */ + public boolean isNull(final Object key) { + if (!containsKey(key)) { + return false; + } + return get(key).isNull(); + } + + /** + * Returns true if the value of the key is a BsonDocument, returns false if the document does not contain the key. + * + * @param key the key + * @return true if the value of the key is a BsonDocument, returns false if the document does not contain the key. + */ + public boolean isDocument(final Object key) { + if (!containsKey(key)) { + return false; + } + return get(key).isDocument(); + } + + /** + * Returns true if the value of the key is a BsonArray, returns false if the document does not contain the key. + * + * @param key the key + * @return true if the value of the key is a BsonArray, returns false if the document does not contain the key. + */ + public boolean isArray(final Object key) { + if (!containsKey(key)) { + return false; + } + return get(key).isArray(); + } + + /** + * Returns true if the value of the key is a BsonNumber, returns false if the document does not contain the key. + * + * @param key the key + * @return true if the value of the key is a BsonNumber, returns false if the document does not contain the key. + */ + public boolean isNumber(final Object key) { + if (!containsKey(key)) { + return false; + } + return get(key).isNumber(); + } + + /** + * Returns true if the value of the key is a BsonInt32, returns false if the document does not contain the key. + * + * @param key the key + * @return true if the value of the key is a BsonInt32, returns false if the document does not contain the key. + */ + public boolean isInt32(final Object key) { + if (!containsKey(key)) { + return false; + } + return get(key).isInt32(); + } + + /** + * Returns true if the value of the key is a BsonInt64, returns false if the document does not contain the key. + * + * @param key the key + * @return true if the value of the key is a BsonInt64, returns false if the document does not contain the key. + */ + public boolean isInt64(final Object key) { + if (!containsKey(key)) { + return false; + } + return get(key).isInt64(); + } + + /** + * Returns true if the value of the key is a BsonDecimal128, returns false if the document does not contain the key. + * + * @param key the key + * @return true if the value of the key is a BsonDecimal128, returns false if the document does not contain the key. + * @since 3.4 + */ + public boolean isDecimal128(final Object key) { + if (!containsKey(key)) { + return false; + } + return get(key).isDecimal128(); + } + + + /** + * Returns true if the value of the key is a BsonDouble, returns false if the document does not contain the key. + * + * @param key the key + * @return true if the value of the key is a BsonDouble, returns false if the document does not contain the key. + */ + public boolean isDouble(final Object key) { + if (!containsKey(key)) { + return false; + } + return get(key).isDouble(); + } + + /** + * Returns true if the value of the key is a BsonBoolean, returns false if the document does not contain the key. + * + * @param key the key + * @return true if the value of the key is a BsonBoolean, returns false if the document does not contain the key. + */ + public boolean isBoolean(final Object key) { + if (!containsKey(key)) { + return false; + } + return get(key).isBoolean(); + } + + /** + * Returns true if the value of the key is a BsonString, returns false if the document does not contain the key. + * + * @param key the key + * @return true if the value of the key is a BsonString, returns false if the document does not contain the key. + */ + public boolean isString(final Object key) { + if (!containsKey(key)) { + return false; + } + return get(key).isString(); + } + + /** + * Returns true if the value of the key is a BsonDateTime, returns false if the document does not contain the key. + * + * @param key the key + * @return true if the value of the key is a BsonDateTime, returns false if the document does not contain the key. + */ + public boolean isDateTime(final Object key) { + if (!containsKey(key)) { + return false; + } + return get(key).isDateTime(); + } + + /** + * Returns true if the value of the key is a BsonTimestamp, returns false if the document does not contain the key. + * + * @param key the key + * @return true if the value of the key is a BsonTimestamp, returns false if the document does not contain the key. + */ + public boolean isTimestamp(final Object key) { + if (!containsKey(key)) { + return false; + } + return get(key).isTimestamp(); + } + + /** + * Returns true if the value of the key is a BsonObjectId, returns false if the document does not contain the key. + * + * @param key the key + * @return true if the value of the key is a BsonObjectId, returns false if the document does not contain the key. + */ + public boolean isObjectId(final Object key) { + if (!containsKey(key)) { + return false; + } + return get(key).isObjectId(); + } + + /** + * Returns true if the value of the key is a BsonBinary, returns false if the document does not contain the key. + * + * @param key the key + * @return true if the value of the key is a BsonBinary, returns false if the document does not contain the key. + */ + public boolean isBinary(final Object key) { + if (!containsKey(key)) { + return false; + } + return get(key).isBinary(); + } + + /** + * If the document does not contain the given key, return the given default value. Otherwise, gets the value of the key. + * + * @param key the key + * @param defaultValue the default value + * @return the value of the key as a BsonValue + */ + public BsonValue get(final Object key, final BsonValue defaultValue) { + BsonValue value = get(key); + return value != null ? value : defaultValue; + } + + /** + * If the document does not contain the given key, return the given default value. Otherwise, gets the value of the key as a + * BsonDocument. + * + * @param key the key + * @param defaultValue the default value + * @return the value of the key as a BsonDocument + * @throws org.bson.BsonInvalidOperationException if the document contains the key but the value is not of the expected type + */ + public BsonDocument getDocument(final Object key, final BsonDocument defaultValue) { + if (!containsKey(key)) { + return defaultValue; + } + return get(key).asDocument(); + } + + /** + * If the document does not contain the given key, return the given default value. Otherwise, gets the value of the key as a + * BsonArray. + * + * @param key the key + * @param defaultValue the default value + * @return the value of the key as a BsonArray + * @throws org.bson.BsonInvalidOperationException if the document contains the key but the value is not of the expected type + */ + public BsonArray getArray(final Object key, final BsonArray defaultValue) { + if (!containsKey(key)) { + return defaultValue; + } + return get(key).asArray(); + } + + /** + * If the document does not contain the given key, return the given default value. Otherwise, gets the value of the key as a + * BsonNumber. + * + * @param key the key + * @param defaultValue the default value + * @return the value of the key as a BsonNumber + * @throws org.bson.BsonInvalidOperationException if the document contains the key but the value is not of the expected type + */ + public BsonNumber getNumber(final Object key, final BsonNumber defaultValue) { + if (!containsKey(key)) { + return defaultValue; + } + return get(key).asNumber(); + } + + /** + * If the document does not contain the given key, return the given default value. Otherwise, gets the value of the key as a + * BsonInt32. + * + * @param key the key + * @param defaultValue the default value + * @return the value of the key as a BsonInt32 + * @throws org.bson.BsonInvalidOperationException if the document contains the key but the value is not of the expected type + */ + public BsonInt32 getInt32(final Object key, final BsonInt32 defaultValue) { + if (!containsKey(key)) { + return defaultValue; + } + return get(key).asInt32(); + } + + /** + * If the document does not contain the given key, return the given default value. Otherwise, gets the value of the key as a + * BsonInt64. + * + * @param key the key + * @param defaultValue the default value + * @return the value of the key as a BsonInt64 + * @throws org.bson.BsonInvalidOperationException if the document contains the key but the value is not of the expected type + */ + public BsonInt64 getInt64(final Object key, final BsonInt64 defaultValue) { + if (!containsKey(key)) { + return defaultValue; + } + return get(key).asInt64(); + } + + /** + * If the document does not contain the given key, return the given default value. Otherwise, gets the value of the key as a + * BsonDecimal128. + * + * @param key the key + * @param defaultValue the default value + * @return the value of the key as a BsonDecimal128 + * @throws org.bson.BsonInvalidOperationException if the document contains the key but the value is not of the expected type + * @since 3.4 + */ + public BsonDecimal128 getDecimal128(final Object key, final BsonDecimal128 defaultValue) { + if (!containsKey(key)) { + return defaultValue; + } + return get(key).asDecimal128(); + } + + /** + * If the document does not contain the given key, return the given default value. Otherwise, gets the value of the key as a + * BsonDouble. + * + * @param key the key + * @param defaultValue the default value + * @return the value of the key as a BsonDouble + * @throws org.bson.BsonInvalidOperationException if the document contains the key but the value is not of the expected type + */ + public BsonDouble getDouble(final Object key, final BsonDouble defaultValue) { + if (!containsKey(key)) { + return defaultValue; + } + return get(key).asDouble(); + } + + /** + * If the document does not contain the given key, return the given default value. Otherwise, gets the value of the key as a + * BsonBoolean. + * + * @param key the key + * @param defaultValue the default value + * @return the value of the key as a BsonBoolean + * @throws org.bson.BsonInvalidOperationException if the document contains the key but the value is not of the expected type + */ + public BsonBoolean getBoolean(final Object key, final BsonBoolean defaultValue) { + if (!containsKey(key)) { + return defaultValue; + } + return get(key).asBoolean(); + } + + /** + * If the document does not contain the given key, return the given default value. Otherwise, gets the value of the key as a + * BsonString. + * + * @param key the key + * @param defaultValue the default value + * @return the value of the key as a BsonString + * @throws org.bson.BsonInvalidOperationException if the document contains the key but the value is not of the expected type + */ + public BsonString getString(final Object key, final BsonString defaultValue) { + if (!containsKey(key)) { + return defaultValue; + } + return get(key).asString(); + } + + /** + * If the document does not contain the given key, return the given default value. Otherwise, gets the value of the key as a + * BsonDateTime. + * + * @param key the key + * @param defaultValue the default value + * @return the value of the key as a BsonDateTime + * @throws org.bson.BsonInvalidOperationException if the document contains the key but the value is not of the expected type + */ + public BsonDateTime getDateTime(final Object key, final BsonDateTime defaultValue) { + if (!containsKey(key)) { + return defaultValue; + } + return get(key).asDateTime(); + } + + /** + * If the document does not contain the given key, return the given default value. Otherwise, gets the value of the key as a + * BsonTimestamp. + * + * @param key the key + * @param defaultValue the default value + * @return the value of the key as a BsonTimestamp + * @throws org.bson.BsonInvalidOperationException if the document contains the key but the value is not of the expected type + */ + public BsonTimestamp getTimestamp(final Object key, final BsonTimestamp defaultValue) { + if (!containsKey(key)) { + return defaultValue; + } + return get(key).asTimestamp(); + } + + /** + * If the document does not contain the given key, return the given default value. Otherwise, gets the value of the key as a + * BsonObjectId. + * + * @param key the key + * @param defaultValue the default value + * @return the value of the key as a BsonObjectId + * @throws org.bson.BsonInvalidOperationException if the document contains the key but the value is not of the expected type + */ + public BsonObjectId getObjectId(final Object key, final BsonObjectId defaultValue) { + if (!containsKey(key)) { + return defaultValue; + } + return get(key).asObjectId(); + } + + /** + * If the document does not contain the given key, return the given default value. Otherwise, gets the value of the key as a + * BsonBinary. + * + * @param key the key + * @param defaultValue the default value + * @return the value of the key as a BsonBinary + * @throws org.bson.BsonInvalidOperationException if the document contains the key but the value is not of the expected type + */ + public BsonBinary getBinary(final Object key, final BsonBinary defaultValue) { + if (!containsKey(key)) { + return defaultValue; + } + return get(key).asBinary(); + } + + /** + * If the document does not contain the given key, return the given default value. Otherwise, gets the value of the key as a + * BsonRegularExpression. + * + * @param key the key + * @param defaultValue the default value + * @return the value of the key as a BsonRegularExpression + * @throws org.bson.BsonInvalidOperationException if the document contains the key but the value is not of the expected type + */ + public BsonRegularExpression getRegularExpression(final Object key, final BsonRegularExpression defaultValue) { + if (!containsKey(key)) { + return defaultValue; + } + return get(key).asRegularExpression(); + } + + @Override + public BsonValue put(final String key, final BsonValue value) { + if (value == null) { + throw new IllegalArgumentException(format("The value for key %s can not be null", key)); + } + return map.put(key, value); + } + + @Override + public BsonValue remove(final Object key) { + return map.remove(key); + } + + @Override + public void putAll(final Map m) { + for (Map.Entry cur : m.entrySet()) { + put(cur.getKey(), cur.getValue()); + } + } + + @Override + public void clear() { + map.clear(); + } + + @Override + public Set keySet() { + return map.keySet(); + } + + @Override + public Collection values() { + return map.values(); + } + + @Override + public Set> entrySet() { + return map.entrySet(); + } + + /** + * Put the given key and value into this document, and return the document. + * + * @param key the key + * @param value the value + * @return this + */ + public BsonDocument append(final String key, final BsonValue value) { + put(key, value); + return this; + } + + /** + * Gets the first key in the document. + * + * @return the first key in the document + * @throws java.util.NoSuchElementException if the document is empty + * @since 3.6 + */ + public String getFirstKey() { + return keySet().iterator().next(); + } + + /** + * Gets the first value in the document + * + * @return the first value in the document + * @throws java.util.NoSuchElementException if the document is empty + * @since 3.7 + */ + public BsonReader asBsonReader() { + return new BsonDocumentReader(this); + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (!(o instanceof BsonDocument)) { + return false; + } + + BsonDocument that = (BsonDocument) o; + + return entrySet().equals(that.entrySet()); + } + + @Override + public int hashCode() { + return entrySet().hashCode(); + } + + /** + * Gets a JSON representation of this document using the {@link org.bson.json.JsonMode#RELAXED} output mode, and otherwise the default + * settings of {@link JsonWriterSettings.Builder}. + * + * @return a JSON representation of this document + * @see #toJson(JsonWriterSettings) + * @see JsonWriterSettings + */ + public String toJson() { + return toJson(JsonWriterSettings.builder().outputMode(JsonMode.RELAXED).build()); + } + + /** + * Gets a JSON representation of this document using the given {@code JsonWriterSettings}. + * @param settings the JSON writer settings + * @return a JSON representation of this document + */ + public String toJson(final JsonWriterSettings settings) { + StringWriter writer = new StringWriter(); + new BsonDocumentCodec().encode(new JsonWriter(writer, settings), this, EncoderContext.builder().build()); + return writer.toString(); + } + + @Override + public String toString() { + return toJson(); + } + + @Override + public BsonDocument clone() { + BsonDocument to = new BsonDocument(this.size()); + for (Entry cur : entrySet()) { + switch (cur.getValue().getBsonType()) { + case DOCUMENT: + to.put(cur.getKey(), cur.getValue().asDocument().clone()); + break; + case ARRAY: + to.put(cur.getKey(), cur.getValue().asArray().clone()); + break; + case BINARY: + to.put(cur.getKey(), BsonBinary.clone(cur.getValue().asBinary())); + break; + case JAVASCRIPT_WITH_SCOPE: + to.put(cur.getKey(), BsonJavaScriptWithScope.clone(cur.getValue().asJavaScriptWithScope())); + break; + default: + to.put(cur.getKey(), cur.getValue()); + } + } + return to; + } + + private void throwIfKeyAbsent(final Object key) { + if (!containsKey(key)) { + throw new BsonInvalidOperationException("Document does not contain key " + key); + } + } + + /** + * Write the replacement object. + * + *

+ * See https://docs.oracle.com/javase/6/docs/platform/serialization/spec/output.html + *

+ * + * @return a proxy for the document + */ + private Object writeReplace() { + return new SerializationProxy(this); + } + + /** + * Prevent normal deserialization. + * + *

+ * See https://docs.oracle.com/javase/6/docs/platform/serialization/spec/input.html + *

+ * + * @param stream the stream + * @throws InvalidObjectException in all cases + */ + private void readObject(final ObjectInputStream stream) throws InvalidObjectException { + throw new InvalidObjectException("Proxy required"); + } + + private static class SerializationProxy implements Serializable { + private static final long serialVersionUID = 1L; + + private final byte[] bytes; + + SerializationProxy(final BsonDocument document) { + BasicOutputBuffer buffer = new BasicOutputBuffer(); + new BsonDocumentCodec().encode(new BsonBinaryWriter(buffer), document, EncoderContext.builder().build()); + this.bytes = new byte[buffer.size()]; + int curPos = 0; + for (ByteBuf cur : buffer.getByteBuffers()) { + System.arraycopy(cur.array(), cur.position(), bytes, curPos, cur.limit()); + curPos += cur.position(); + } + } + + private Object readResolve() { + return new BsonDocumentCodec().decode(new BsonBinaryReader(ByteBuffer.wrap(bytes) + .order(ByteOrder.LITTLE_ENDIAN)), + DecoderContext.builder().build()); + } + } +} diff --git a/bson/src/main/org/bson/BsonDocumentReader.java b/bson/src/main/org/bson/BsonDocumentReader.java new file mode 100644 index 00000000000..0aaca06eaef --- /dev/null +++ b/bson/src/main/org/bson/BsonDocumentReader.java @@ -0,0 +1,422 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson; + +import org.bson.types.Decimal128; +import org.bson.types.ObjectId; + +import java.util.ArrayList; +import java.util.Iterator; +import java.util.List; +import java.util.Map; + +/** + * A {@code BsonReader} implementation that reads from an instance of {@code BsonDocument}. This can be used to decode a {@code + * BsonDocument} using a {@code Decoder}. + * + * @see BsonDocument + * @see org.bson.codecs.Decoder + * + * @since 3.0 + */ +public class BsonDocumentReader extends AbstractBsonReader { + private BsonValue currentValue; + + /** + * Construct a new instance. + * + * @param document the document to read from + */ + public BsonDocumentReader(final BsonDocument document) { + setContext(new Context(null, BsonContextType.TOP_LEVEL, document)); + currentValue = document; + } + + @Override + protected BsonBinary doReadBinaryData() { + return currentValue.asBinary(); + } + + @Override + protected byte doPeekBinarySubType() { + return currentValue.asBinary().getType(); + } + + @Override + protected int doPeekBinarySize() { + return currentValue.asBinary().getData().length; + } + + @Override + protected boolean doReadBoolean() { + return currentValue.asBoolean().getValue(); + } + + @Override + protected long doReadDateTime() { + return currentValue.asDateTime().getValue(); + } + + @Override + protected double doReadDouble() { + return currentValue.asDouble().getValue(); + } + + @Override + protected void doReadEndArray() { + setContext(getContext().getParentContext()); + } + + @Override + protected void doReadEndDocument() { + setContext(getContext().getParentContext()); + switch (getContext().getContextType()) { + case ARRAY: + case DOCUMENT: + setState(State.TYPE); + break; + case TOP_LEVEL: + setState(State.DONE); + break; + default: + throw new BSONException("Unexpected ContextType."); + } + } + + @Override + protected int doReadInt32() { + return currentValue.asInt32().getValue(); + } + + @Override + protected long doReadInt64() { + return currentValue.asInt64().getValue(); + } + + @Override + public Decimal128 doReadDecimal128() { + return currentValue.asDecimal128().getValue(); + } + + @Override + protected String doReadJavaScript() { + return currentValue.asJavaScript().getCode(); + } + + @Override + protected String doReadJavaScriptWithScope() { + return currentValue.asJavaScriptWithScope().getCode(); + } + + @Override + protected void doReadMaxKey() { + } + + @Override + protected void doReadMinKey() { + } + + @Override + protected void doReadNull() { + } + + @Override + protected ObjectId doReadObjectId() { + return currentValue.asObjectId().getValue(); + } + + @Override + protected BsonRegularExpression doReadRegularExpression() { + return currentValue.asRegularExpression(); + } + + @Override + protected BsonDbPointer doReadDBPointer() { + return currentValue.asDBPointer(); + } + + @Override + protected void doReadStartArray() { + BsonArray array = currentValue.asArray(); + setContext(new Context(getContext(), BsonContextType.ARRAY, array)); + } + + @Override + protected void doReadStartDocument() { + BsonDocument document; + if (currentValue.getBsonType() == BsonType.JAVASCRIPT_WITH_SCOPE) { + document = currentValue.asJavaScriptWithScope().getScope(); + } else { + document = currentValue.asDocument(); + } + setContext(new Context(getContext(), BsonContextType.DOCUMENT, document)); + } + + @Override + protected String doReadString() { + return currentValue.asString().getValue(); + } + + @Override + protected String doReadSymbol() { + return currentValue.asSymbol().getSymbol(); + } + + @Override + protected BsonTimestamp doReadTimestamp() { + return currentValue.asTimestamp(); + } + + @Override + protected void doReadUndefined() { + } + + @Override + protected void doSkipName() { + } + + @Override + protected void doSkipValue() { + } + + @Override + public BsonType readBsonType() { + if (getState() == State.INITIAL || getState() == State.SCOPE_DOCUMENT) { + // there is an implied type of Document for the top level and for scope documents + setCurrentBsonType(BsonType.DOCUMENT); + setState(State.VALUE); + return getCurrentBsonType(); + } + + if (getState() != State.TYPE) { + throwInvalidState("ReadBSONType", State.TYPE); + } + + switch (getContext().getContextType()) { + case ARRAY: + currentValue = getContext().getNextValue(); + if (currentValue == null) { + setState(State.END_OF_ARRAY); + return BsonType.END_OF_DOCUMENT; + } + setState(State.VALUE); + break; + case DOCUMENT: + Map.Entry currentElement = getContext().getNextElement(); + if (currentElement == null) { + setState(State.END_OF_DOCUMENT); + return BsonType.END_OF_DOCUMENT; + } + setCurrentName(currentElement.getKey()); + currentValue = currentElement.getValue(); + setState(State.NAME); + break; + default: + throw new BSONException("Invalid ContextType."); + } + + setCurrentBsonType(currentValue.getBsonType()); + return getCurrentBsonType(); + } + + @Override + public BsonReaderMark getMark() { + return new Mark(); + } + + @Override + protected Context getContext() { + return (Context) super.getContext(); + } + + /** + * An implementation of {@code AbstractBsonReader.Mark}. + */ + protected class Mark extends AbstractBsonReader.Mark { + private final BsonValue currentValue; + private final Context context; + + /** + * Construct an instance. + */ + protected Mark() { + currentValue = BsonDocumentReader.this.currentValue; + context = BsonDocumentReader.this.getContext(); + context.mark(); + } + + @Override + public void reset() { + super.reset(); + BsonDocumentReader.this.currentValue = currentValue; + BsonDocumentReader.this.setContext(context); + context.reset(); + } + } + + private static class BsonDocumentMarkableIterator implements Iterator { + + private final Iterator baseIterator; + private final List markIterator = new ArrayList<>(); + private int curIndex; // index of the cursor + private boolean marking; + + protected BsonDocumentMarkableIterator(final Iterator baseIterator) { + this.baseIterator = baseIterator; + curIndex = 0; + marking = false; + } + + /** + * + */ + protected void mark() { + marking = true; + } + + /** + * + */ + protected void reset() { + curIndex = 0; + marking = false; + } + + + @Override + public boolean hasNext() { + return baseIterator.hasNext() || curIndex < markIterator.size(); + } + + @Override + public T next() { + T value; + //TODO: check closed + if (curIndex < markIterator.size()) { + value = markIterator.get(curIndex); + if (marking) { + curIndex++; + } else { + markIterator.remove(0); + } + } else { + value = baseIterator.next(); + if (marking) { + markIterator.add(value); + curIndex++; + } + } + + + return value; + } + + @Override + public void remove() { + // iterator is read only + } + } + + /** + * An implementation of {@code AbstractBsonReader.Context}. + */ + protected class Context extends AbstractBsonReader.Context { + + private BsonDocumentMarkableIterator> documentIterator; + private BsonDocumentMarkableIterator arrayIterator; + + /** + * Construct an instance. + * + * @param parentContext the parent context + * @param contextType the context type + * @param array the array context + */ + protected Context(final Context parentContext, final BsonContextType contextType, final BsonArray array) { + super(parentContext, contextType); + arrayIterator = new BsonDocumentMarkableIterator<>(array.iterator()); + } + + /** + * Construct an instance. + * + * @param parentContext the parent context + * @param contextType the context type + * @param document the document context + */ + protected Context(final Context parentContext, final BsonContextType contextType, final BsonDocument document) { + super(parentContext, contextType); + documentIterator = new BsonDocumentMarkableIterator<>(document.entrySet().iterator()); + } + + /** + * Gets the next element. + * + * @return the next element, which may be null + */ + public Map.Entry getNextElement() { + if (documentIterator.hasNext()) { + return documentIterator.next(); + } else { + return null; + } + } + + /** + * Create a mark. + */ + protected void mark() { + if (documentIterator != null) { + documentIterator.mark(); + } else { + arrayIterator.mark(); + } + + if (getParentContext() != null) { + ((Context) getParentContext()).mark(); + } + } + + /** + * Reset the context. + */ + protected void reset() { + if (documentIterator != null) { + documentIterator.reset(); + } else { + arrayIterator.reset(); + } + + if (getParentContext() != null) { + ((Context) getParentContext()).reset(); + } + } + + /** + * Gets the next value. + * + * @return the next value, which may be null + */ + public BsonValue getNextValue() { + if (arrayIterator.hasNext()) { + return arrayIterator.next(); + } else { + return null; + } + } + } +} diff --git a/bson/src/main/org/bson/BsonDocumentWrapper.java b/bson/src/main/org/bson/BsonDocumentWrapper.java new file mode 100644 index 00000000000..f846d40e1c3 --- /dev/null +++ b/bson/src/main/org/bson/BsonDocumentWrapper.java @@ -0,0 +1,231 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson; + +import org.bson.codecs.Encoder; +import org.bson.codecs.EncoderContext; +import org.bson.codecs.configuration.CodecRegistry; + +import java.io.InvalidObjectException; +import java.io.ObjectInputStream; +import java.util.Collection; +import java.util.Map; +import java.util.Set; + +/** + * A {@code BsonDocument} that begins its life as a document of any type and an {@code Encoder} for that document, which lets an instance of + * any class with an Encoder be treated as a BsonDocument. If any methods are called which required access to the individual elements of the + * document, then, on demand, the document will be unwrapped into a BsonDocument using a {@code BsonDocumentWriter} and the {@code Encoder}. + * But if all that is done with this document is to encode it, then the {@code Encoder} will be used to do that. + * + * @param the type of the document that is wrapped + * @see org.bson.BsonDocumentWriter + * @since 3.0 + */ +public final class BsonDocumentWrapper extends BsonDocument { + private static final long serialVersionUID = 1L; + + private final transient T wrappedDocument; + private final transient Encoder encoder; + + /** + * The unwrapped document, which may be null + */ + private BsonDocument unwrapped; + + /** + * A helper to convert a document of type Object to a BsonDocument + * + *

If not already a BsonDocument it looks up the documents' class in the codecRegistry and wraps it into a BsonDocumentWrapper

+ * + * @param document the document to convert + * @param codecRegistry the codecRegistry that can be used in the conversion of the Object + * @return a BsonDocument + */ + @SuppressWarnings({"rawtypes", "unchecked"}) + public static BsonDocument asBsonDocument(final Object document, final CodecRegistry codecRegistry) { + if (document == null) { + return null; + } + if (document instanceof BsonDocument) { + return (BsonDocument) document; + } else { + return new BsonDocumentWrapper(document, codecRegistry.get(document.getClass())); + } + } + + /** + * Construct a new instance with the given document and encoder for the document. + * + * @param wrappedDocument the wrapped document + * @param encoder the encoder for the wrapped document + */ + public BsonDocumentWrapper(final T wrappedDocument, final Encoder encoder) { + if (wrappedDocument == null) { + throw new IllegalArgumentException("Document can not be null"); + } + this.wrappedDocument = wrappedDocument; + this.encoder = encoder; + } + + /** + * Get the wrapped document. + * + * @return the wrapped document + */ + public T getWrappedDocument() { + return wrappedDocument; + } + + /** + * Get the encoder to use for the wrapped document. + * + * @return the encoder + */ + public Encoder getEncoder() { + return encoder; + } + + /** + * Determine whether the document has been unwrapped already. + * + * @return true if the wrapped document has been unwrapped already + */ + public boolean isUnwrapped() { + return unwrapped != null; + } + + @Override + public int size() { + return getUnwrapped().size(); + } + + @Override + public boolean isEmpty() { + return getUnwrapped().isEmpty(); + } + + @Override + public boolean containsKey(final Object key) { + return getUnwrapped().containsKey(key); + } + + @Override + public boolean containsValue(final Object value) { + return getUnwrapped().containsValue(value); + } + + @Override + public BsonValue get(final Object key) { + return getUnwrapped().get(key); + } + + @Override + public BsonValue put(final String key, final BsonValue value) { + return getUnwrapped().put(key, value); + } + + @Override + public BsonValue remove(final Object key) { + return getUnwrapped().remove(key); + } + + @Override + public void putAll(final Map m) { + super.putAll(m); + } + + @Override + public void clear() { + super.clear(); + } + + @Override + public Set keySet() { + return getUnwrapped().keySet(); + } + + @Override + public Collection values() { + return getUnwrapped().values(); + } + + @Override + public Set> entrySet() { + return getUnwrapped().entrySet(); + } + + @Override + public boolean equals(final Object o) { + return getUnwrapped().equals(o); + } + + @Override + public int hashCode() { + return getUnwrapped().hashCode(); + } + + @Override + public String toString() { + return getUnwrapped().toString(); + } + + @Override + public BsonDocument clone() { + return getUnwrapped().clone(); + } + + private BsonDocument getUnwrapped() { + if (encoder == null) { + throw new BsonInvalidOperationException("Can not unwrap a BsonDocumentWrapper with no Encoder"); + } + if (unwrapped == null) { + BsonDocument unwrapped = new BsonDocument(); + BsonWriter writer = new BsonDocumentWriter(unwrapped); + encoder.encode(writer, wrappedDocument, EncoderContext.builder().build()); + this.unwrapped = unwrapped; + } + return unwrapped; + } + + /** + * Write the replacement object. + * + *

+ * See https://docs.oracle.com/javase/6/docs/platform/serialization/spec/output.html + *

+ * + * @return a proxy for the document + */ + private Object writeReplace() { + return getUnwrapped(); + } + + /** + * Prevent normal deserialization. + * + *

+ * See https://docs.oracle.com/javase/6/docs/platform/serialization/spec/input.html + *

+ * + * @param stream the stream + * @throws InvalidObjectException in all cases + */ + private void readObject(final ObjectInputStream stream) throws InvalidObjectException { + throw new InvalidObjectException("Proxy required"); + } +} diff --git a/bson/src/main/org/bson/BsonDocumentWriter.java b/bson/src/main/org/bson/BsonDocumentWriter.java new file mode 100644 index 00000000000..a34188645cd --- /dev/null +++ b/bson/src/main/org/bson/BsonDocumentWriter.java @@ -0,0 +1,226 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson; + +import org.bson.types.Decimal128; +import org.bson.types.ObjectId; + +import static org.bson.BsonContextType.DOCUMENT; +import static org.bson.BsonContextType.SCOPE_DOCUMENT; + +/** + * A {@code BsonWriter} implementation that writes to an instance of {@code BsonDocument}. This can be used to encode an object into a + * {@code BsonDocument} using an {@code Encoder}. + * + * @see BsonDocument + * @see org.bson.codecs.Encoder + * + * @since 3.0 + */ +public class BsonDocumentWriter extends AbstractBsonWriter { + + private final BsonDocument document; + + /** + * Construct a new instance. + * + * @param document the document to write to + */ + public BsonDocumentWriter(final BsonDocument document) { + super(new BsonWriterSettings()); + this.document = document; + setContext(new Context()); + } + + /** + * Gets the document that the writer is writing to. + * + * @return the document + */ + public BsonDocument getDocument() { + return document; + } + + @Override + protected void doWriteStartDocument() { + switch (getState()) { + case INITIAL: + setContext(new Context(document, DOCUMENT, getContext())); + break; + case VALUE: + setContext(new Context(new BsonDocument(), DOCUMENT, getContext())); + break; + case SCOPE_DOCUMENT: + setContext(new Context(new BsonDocument(), SCOPE_DOCUMENT, getContext())); + break; + default: + throw new BsonInvalidOperationException("Unexpected state " + getState()); + } + } + + @Override + protected void doWriteEndDocument() { + BsonValue value = getContext().container; + setContext(getContext().getParentContext()); + + if (getContext().getContextType() == BsonContextType.JAVASCRIPT_WITH_SCOPE) { + BsonDocument scope = (BsonDocument) value; + BsonString code = (BsonString) getContext().container; + setContext(getContext().getParentContext()); + write(new BsonJavaScriptWithScope(code.getValue(), scope)); + } else if (getContext().getContextType() != BsonContextType.TOP_LEVEL) { + write(value); + } + } + + @Override + protected void doWriteStartArray() { + setContext(new Context(new BsonArray(), BsonContextType.ARRAY, getContext())); + } + + @Override + protected void doWriteEndArray() { + BsonValue array = getContext().container; + setContext(getContext().getParentContext()); + write(array); + } + + @Override + protected void doWriteBinaryData(final BsonBinary value) { + write(value); + } + + @Override + public void doWriteBoolean(final boolean value) { + write(BsonBoolean.valueOf(value)); + } + + @Override + protected void doWriteDateTime(final long value) { + write(new BsonDateTime(value)); + } + + @Override + protected void doWriteDBPointer(final BsonDbPointer value) { + write(value); + } + + @Override + protected void doWriteDouble(final double value) { + write(new BsonDouble(value)); + } + + @Override + protected void doWriteInt32(final int value) { + write(new BsonInt32(value)); + } + + @Override + protected void doWriteInt64(final long value) { + write(new BsonInt64(value)); + } + + @Override + protected void doWriteDecimal128(final Decimal128 value) { + write(new BsonDecimal128(value)); + } + + @Override + protected void doWriteJavaScript(final String value) { + write(new BsonJavaScript(value)); + } + + @Override + protected void doWriteJavaScriptWithScope(final String value) { + setContext(new Context(new BsonString(value), BsonContextType.JAVASCRIPT_WITH_SCOPE, getContext())); + } + + @Override + protected void doWriteMaxKey() { + write(new BsonMaxKey()); + } + + @Override + protected void doWriteMinKey() { + write(new BsonMinKey()); + } + + @Override + public void doWriteNull() { + write(BsonNull.VALUE); + } + + @Override + public void doWriteObjectId(final ObjectId value) { + write(new BsonObjectId(value)); + } + + @Override + public void doWriteRegularExpression(final BsonRegularExpression value) { + write(value); + } + + @Override + public void doWriteString(final String value) { + write(new BsonString(value)); + } + + @Override + public void doWriteSymbol(final String value) { + write(new BsonSymbol(value)); + } + + @Override + public void doWriteTimestamp(final BsonTimestamp value) { + write(value); + } + + @Override + public void doWriteUndefined() { + write(new BsonUndefined()); + } + + @Override + protected Context getContext() { + return (Context) super.getContext(); + } + + private void write(final BsonValue value) { + getContext().add(value); + } + + private class Context extends AbstractBsonWriter.Context { + private BsonValue container; + + Context(final BsonValue container, final BsonContextType contextType, final Context parent) { + super(parent, contextType); + this.container = container; + } + + Context() { + super(null, BsonContextType.TOP_LEVEL); + } + + void add(final BsonValue value) { + if (container instanceof BsonArray) { + ((BsonArray) container).add(value); + } else { + ((BsonDocument) container).put(getName(), value); + } + } + } +} diff --git a/bson/src/main/org/bson/BsonDouble.java b/bson/src/main/org/bson/BsonDouble.java new file mode 100644 index 00000000000..c3f114df493 --- /dev/null +++ b/bson/src/main/org/bson/BsonDouble.java @@ -0,0 +1,117 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson; + +import org.bson.types.Decimal128; + +import java.math.BigDecimal; + +/** + * A representation of the BSON Double type. + * + * @since 3.0 + */ +public class BsonDouble extends BsonNumber implements Comparable { + + private final double value; + + /** + * Construct a new instance with the given value. + * + * @param value the value + */ + public BsonDouble(final double value) { + this.value = value; + } + + @Override + public int compareTo(final BsonDouble o) { + return Double.compare(value, o.value); + } + + @Override + public BsonType getBsonType() { + return BsonType.DOUBLE; + } + + /** + * Gets the double value. + * + * @return the value + */ + public double getValue() { + return value; + } + + @Override + public int intValue() { + return (int) value; + } + + @Override + public long longValue() { + return (long) value; + } + + @Override + public Decimal128 decimal128Value() { + if (Double.isNaN(value)) { + return Decimal128.NaN; + } + if (Double.isInfinite(value)) { + return value > 0 ? Decimal128.POSITIVE_INFINITY : Decimal128.NEGATIVE_INFINITY; + } + + return new Decimal128(new BigDecimal(value)); + } + + @Override + public double doubleValue() { + return value; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + BsonDouble that = (BsonDouble) o; + + if (Double.compare(that.value, value) != 0) { + return false; + } + + return true; + } + + @Override + public int hashCode() { + long temp = Double.doubleToLongBits(value); + return (int) (temp ^ (temp >>> 32)); + } + + @Override + public String toString() { + return "BsonDouble{" + + "value=" + value + + '}'; + } +} diff --git a/bson/src/main/org/bson/BsonElement.java b/bson/src/main/org/bson/BsonElement.java new file mode 100644 index 00000000000..0b898fb8d8d --- /dev/null +++ b/bson/src/main/org/bson/BsonElement.java @@ -0,0 +1,85 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson; + +/** + * A mapping from a name to a BsonValue. + * + * @see BsonDocument + * @since 3.0 + */ +public class BsonElement { + private final String name; + private final BsonValue value; + + /** + * Construct a new instance with the given key and value + * + * @param name the non-null key + * @param value the non-null value + */ + public BsonElement(final String name, final BsonValue value) { + this.name = name; + this.value = value; + } + + /** + * Gets the name of the key/field. + * + * @return the name of the field. + */ + public String getName() { + return name; + } + + /** + * Gets the value of this element. + * + * @return a {@code BsonValue} containing the value of this element. + */ + public BsonValue getValue() { + return value; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + BsonElement that = (BsonElement) o; + + if (getName() != null ? !getName().equals(that.getName()) : that.getName() != null) { + return false; + } + if (getValue() != null ? !getValue().equals(that.getValue()) : that.getValue() != null) { + return false; + } + + return true; + } + + @Override + public int hashCode() { + int result = getName() != null ? getName().hashCode() : 0; + result = 31 * result + (getValue() != null ? getValue().hashCode() : 0); + return result; + } +} diff --git a/bson/src/main/org/bson/BsonInt32.java b/bson/src/main/org/bson/BsonInt32.java new file mode 100644 index 00000000000..d89a977941a --- /dev/null +++ b/bson/src/main/org/bson/BsonInt32.java @@ -0,0 +1,107 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson; + +import org.bson.types.Decimal128; + +/** + * A representation of the BSON Int32 type. + * + * @since 3.0 + */ +public final class BsonInt32 extends BsonNumber implements Comparable { + + private final int value; + + /** + * Construct a new instance with the given value. + * + * @param value the value + */ + public BsonInt32(final int value) { + this.value = value; + } + + @Override + public int compareTo(final BsonInt32 o) { + return (value < o.value) ? -1 : ((value == o.value) ? 0 : 1); + } + + @Override + public BsonType getBsonType() { + return BsonType.INT32; + } + + /** + * Gets the integer value. + * + * @return the value + */ + public int getValue() { + return value; + } + + @Override + public int intValue() { + return value; + } + + @Override + public long longValue() { + return value; + } + + @Override + public Decimal128 decimal128Value() { + return new Decimal128(value); + } + + @Override + public double doubleValue() { + return value; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + BsonInt32 bsonInt32 = (BsonInt32) o; + + if (value != bsonInt32.value) { + return false; + } + + return true; + } + + @Override + public int hashCode() { + return value; + } + + @Override + public String toString() { + return "BsonInt32{" + + "value=" + value + + '}'; + } +} diff --git a/bson/src/main/org/bson/BsonInt64.java b/bson/src/main/org/bson/BsonInt64.java new file mode 100644 index 00000000000..8f2b4f40223 --- /dev/null +++ b/bson/src/main/org/bson/BsonInt64.java @@ -0,0 +1,106 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson; + +import org.bson.types.Decimal128; + +/** + * A representation of the BSON Int64 type. + */ +public final class BsonInt64 extends BsonNumber implements Comparable { + + private final long value; + + /** + * Construct a new instance with the given value. + * + * @param value the value + */ + public BsonInt64(final long value) { + this.value = value; + } + + @Override + public int compareTo(final BsonInt64 o) { + return (value < o.value) ? -1 : ((value == o.value) ? 0 : 1); + } + + @Override + public BsonType getBsonType() { + return BsonType.INT64; + } + + + /** + * Gets the long value. + * + * @return the value + */ + public long getValue() { + return value; + } + + @Override + public int intValue() { + return (int) value; + } + + @Override + public long longValue() { + return value; + } + + @Override + public double doubleValue() { + return value; + } + + @Override + public Decimal128 decimal128Value() { + return new Decimal128(value); + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + BsonInt64 bsonInt64 = (BsonInt64) o; + + if (value != bsonInt64.value) { + return false; + } + + return true; + } + + @Override + public int hashCode() { + return (int) (value ^ (value >>> 32)); + } + + @Override + public String toString() { + return "BsonInt64{" + + "value=" + value + + '}'; + } +} diff --git a/bson/src/main/org/bson/BsonInvalidOperationException.java b/bson/src/main/org/bson/BsonInvalidOperationException.java new file mode 100644 index 00000000000..9e405ff919f --- /dev/null +++ b/bson/src/main/org/bson/BsonInvalidOperationException.java @@ -0,0 +1,46 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson; + +/** + * An exception indicating an invalid BSON operation. + * + * @since 3.0 + */ +public class BsonInvalidOperationException extends BSONException { + private static final long serialVersionUID = 7684248076818601418L; + + /** + * Construct a new instance. + * + * @param message the message + */ + public BsonInvalidOperationException(final String message) { + super(message); + } + + /** + * Construct a new instance. + * + * @param message the message + * @param t the throwable cause. + * @since 3.5 + */ + public BsonInvalidOperationException(final String message, final Throwable t) { + super(message, t); + } +} diff --git a/bson/src/main/org/bson/BsonJavaScript.java b/bson/src/main/org/bson/BsonJavaScript.java new file mode 100644 index 00000000000..2546bb3c9e4 --- /dev/null +++ b/bson/src/main/org/bson/BsonJavaScript.java @@ -0,0 +1,81 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson; + +/** + * For using the JavaScript Code type. + * + * @since 3.0 + */ +public class BsonJavaScript extends BsonValue { + + private final String code; + + /** + * Construct a new instance with the given JavaScript code. + * + * @param code the JavaScript code + */ + public BsonJavaScript(final String code) { + this.code = code; + } + + @Override + public BsonType getBsonType() { + return BsonType.JAVASCRIPT; + } + + /** + * Get the JavaScript code. + * + * @return the code + */ + public String getCode() { + return code; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + BsonJavaScript code1 = (BsonJavaScript) o; + + if (!code.equals(code1.code)) { + return false; + } + + return true; + } + + @Override + public int hashCode() { + return code.hashCode(); + } + + @Override + public String toString() { + return "BsonJavaScript{" + + "code='" + code + '\'' + + '}'; + } +} + diff --git a/bson/src/main/org/bson/BsonJavaScriptWithScope.java b/bson/src/main/org/bson/BsonJavaScriptWithScope.java new file mode 100644 index 00000000000..67e172a0aad --- /dev/null +++ b/bson/src/main/org/bson/BsonJavaScriptWithScope.java @@ -0,0 +1,109 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson; + +/** + * A representation of the JavaScript Code with Scope BSON type. + * + * @since 3.0 + */ +public class BsonJavaScriptWithScope extends BsonValue { + + private final String code; + private final BsonDocument scope; + + /** + * Construct a new instance with the given code and scope. + * + * @param code the code + * @param scope the scope + */ + public BsonJavaScriptWithScope(final String code, final BsonDocument scope) { + if (code == null) { + throw new IllegalArgumentException("code can not be null"); + } + if (scope == null) { + throw new IllegalArgumentException("scope can not be null"); + } + this.code = code; + this.scope = scope; + } + + @Override + public BsonType getBsonType() { + return BsonType.JAVASCRIPT_WITH_SCOPE; + } + + /** + * Get the code. + * + * @return the code + */ + public String getCode() { + return code; + } + + /** + * Get the scope. + * + * @return the scope + */ + public BsonDocument getScope() { + return scope; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + BsonJavaScriptWithScope that = (BsonJavaScriptWithScope) o; + + if (!code.equals(that.code)) { + return false; + } + if (!scope.equals(that.scope)) { + return false; + } + + return true; + } + + @Override + public int hashCode() { + int result = code.hashCode(); + result = 31 * result + scope.hashCode(); + return result; + } + + @Override + public String toString() { + return "BsonJavaScriptWithScope{" + + "code=" + getCode() + + "scope=" + scope + + '}'; + } + + static BsonJavaScriptWithScope clone(final BsonJavaScriptWithScope from) { + return new BsonJavaScriptWithScope(from.code, from.scope.clone()); + } +} + diff --git a/bson/src/main/org/bson/BsonMaxKey.java b/bson/src/main/org/bson/BsonMaxKey.java new file mode 100644 index 00000000000..63ee4b48529 --- /dev/null +++ b/bson/src/main/org/bson/BsonMaxKey.java @@ -0,0 +1,44 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson; + +/** + * Represent the maximum key value regardless of the key's type + */ +public final class BsonMaxKey extends BsonValue { + + @Override + public BsonType getBsonType() { + return BsonType.MAX_KEY; + } + + @Override + public boolean equals(final Object o) { + return o instanceof BsonMaxKey; + } + + @Override + public int hashCode() { + return 0; + } + + @Override + public String toString() { + return "BsonMaxKey"; + } + +} diff --git a/bson/src/main/org/bson/BsonMaximumSizeExceededException.java b/bson/src/main/org/bson/BsonMaximumSizeExceededException.java new file mode 100644 index 00000000000..aac48523e0e --- /dev/null +++ b/bson/src/main/org/bson/BsonMaximumSizeExceededException.java @@ -0,0 +1,35 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson; + +/** + * An exception indicating a failure to serialize a BSON document due to it exceeding the maximum size. + * + * @since 3.7 + */ +public class BsonMaximumSizeExceededException extends BsonSerializationException { + private static final long serialVersionUID = 8725368828269129777L; + + /** + * Construct a new instance. + * + * @param message the message + */ + public BsonMaximumSizeExceededException(final String message) { + super(message); + } +} diff --git a/bson/src/main/org/bson/BsonMinKey.java b/bson/src/main/org/bson/BsonMinKey.java new file mode 100644 index 00000000000..d9abf013e87 --- /dev/null +++ b/bson/src/main/org/bson/BsonMinKey.java @@ -0,0 +1,44 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson; + +/** + * Represent the minimum key value regardless of the key's type + */ +public final class BsonMinKey extends BsonValue { + + @Override + public BsonType getBsonType() { + return BsonType.MIN_KEY; + } + + @Override + public boolean equals(final Object o) { + return o instanceof BsonMinKey; + } + + @Override + public int hashCode() { + return 0; + } + + @Override + public String toString() { + return "BsonMinKey"; + } + +} diff --git a/bson/src/main/org/bson/BsonNull.java b/bson/src/main/org/bson/BsonNull.java new file mode 100644 index 00000000000..5d6c701a465 --- /dev/null +++ b/bson/src/main/org/bson/BsonNull.java @@ -0,0 +1,56 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson; + +/** + * A representation of the BSON Null type. + * + * @since 3.0 + */ +public final class BsonNull extends BsonValue { + + /** + * A singleton instance of the null value. + */ + public static final BsonNull VALUE = new BsonNull(); + + @Override + public BsonType getBsonType() { + return BsonType.NULL; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + return true; + } + + @Override + public int hashCode() { + return 0; + } + + @Override + public String toString() { + return "BsonNull"; + } +} diff --git a/bson/src/main/org/bson/BsonNumber.java b/bson/src/main/org/bson/BsonNumber.java new file mode 100644 index 00000000000..c0449e754ed --- /dev/null +++ b/bson/src/main/org/bson/BsonNumber.java @@ -0,0 +1,55 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson; + +import org.bson.types.Decimal128; + +/** + * Base class for the numeric BSON types. This class mirrors the functionality provided by {@code java.lang.Number}. + * + * @since 3.0 + */ +public abstract class BsonNumber extends BsonValue { + /** + * Returns the value of the specified number as an {@code int}, which may involve rounding or truncation. + * + * @return the numeric value represented by this object after conversion to type {@code int}. + */ + public abstract int intValue(); + + /** + * Returns the value of the specified number as an {@code long}, which may involve rounding or truncation. + * + * @return the numeric value represented by this object after conversion to type {@code long}. + */ + public abstract long longValue(); + + /** + * Returns the value of the specified number as a {@code double}, which may involve rounding. + * + * @return the numeric value represented by this object after conversion to type {@code double}. + */ + public abstract double doubleValue(); + + /** + * Returns the value of the specified number as a {@code Decimal128}, which may involve rounding. + * + * @return the numeric value represented by this object after conversion to type {@code Decimal128}. + * @since 3.4 + */ + public abstract Decimal128 decimal128Value(); +} diff --git a/bson/src/main/org/bson/BsonObjectId.java b/bson/src/main/org/bson/BsonObjectId.java new file mode 100644 index 00000000000..790daa006a1 --- /dev/null +++ b/bson/src/main/org/bson/BsonObjectId.java @@ -0,0 +1,96 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson; + +import org.bson.types.ObjectId; + +/** + * A representation of the BSON ObjectId type. + * + * @since 3.0 + */ +public class BsonObjectId extends BsonValue implements Comparable { + + private final ObjectId value; + + /** + * Construct a new instance with a new {@code ObjectId}. + */ + public BsonObjectId() { + this(new ObjectId()); + } + + /** + * Construct a new instance with the given {@code ObjectId} instance. + * @param value the ObjectId + */ + public BsonObjectId(final ObjectId value) { + if (value == null) { + throw new IllegalArgumentException("value may not be null"); + } + this.value = value; + } + + /** + * Get the {@code ObjectId} value. + * + * @return the {@code ObjectId} value + */ + public ObjectId getValue() { + return value; + } + + @Override + public BsonType getBsonType() { + return BsonType.OBJECT_ID; + } + + @Override + public int compareTo(final BsonObjectId o) { + return value.compareTo(o.value); + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + BsonObjectId that = (BsonObjectId) o; + + if (!value.equals(that.value)) { + return false; + } + + return true; + } + + @Override + public int hashCode() { + return value.hashCode(); + } + + @Override + public String toString() { + return "BsonObjectId{" + + "value=" + value.toHexString() + + '}'; + } +} diff --git a/bson/src/main/org/bson/BsonReader.java b/bson/src/main/org/bson/BsonReader.java new file mode 100644 index 00000000000..89251df5e42 --- /dev/null +++ b/bson/src/main/org/bson/BsonReader.java @@ -0,0 +1,395 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson; + +import org.bson.types.Decimal128; +import org.bson.types.ObjectId; + +import java.io.Closeable; + +/** + * An interface for reading a logical BSON document using a pull-oriented API. + * + * @since 3.0 + */ +public interface BsonReader extends Closeable { + /** + * @return The current BsonType. + */ + BsonType getCurrentBsonType(); + + /** + * Gets the most recently read name. + * + * @return the most recently read name + */ + String getCurrentName(); + + /** + * Reads BSON Binary data from the reader. + * + * @return A Binary. + */ + BsonBinary readBinaryData(); + + /** + * Peeks the subtype of the binary data that the reader is positioned at. This operation is not permitted if the mark is already set. + * + * @return the subtype + * @see #getMark() + */ + byte peekBinarySubType(); + + /** + * Peeks the size of the binary data that the reader is positioned at. This operation is not permitted if the mark is already set. + * + * @return the size of the binary data + * @see #getMark() + * @since 3.4 + */ + int peekBinarySize(); + + /** + * Reads a BSON Binary data element from the reader. + * + * @param name The name of the element. + * @return A Binary. + */ + BsonBinary readBinaryData(String name); + + /** + * Reads a BSON Boolean from the reader. + * + * @return A Boolean. + */ + boolean readBoolean(); + + /** + * Reads a BSON Boolean element from the reader. + * + * @param name The name of the element. + * @return A Boolean. + */ + boolean readBoolean(String name); + + /** + * Reads a BSONType from the reader. + * + * @return A BSONType. + */ + BsonType readBsonType(); + + /** + * Reads a BSON DateTime from the reader. + * + * @return The number of milliseconds since the Unix epoch. + */ + long readDateTime(); + + /** + * Reads a BSON DateTime element from the reader. + * + * @param name The name of the element. + * @return The number of milliseconds since the Unix epoch. + */ + long readDateTime(String name); + + /** + * Reads a BSON Double from the reader. + * + * @return A Double. + */ + double readDouble(); + + /** + * Reads a BSON Double element from the reader. + * + * @param name The name of the element. + * @return A Double. + */ + double readDouble(String name); + + /** + * Reads the end of a BSON array from the reader. + */ + void readEndArray(); + + /** + * Reads the end of a BSON document from the reader. + */ + void readEndDocument(); + + /** + * Reads a BSON Int32 from the reader. + * + * @return An Int32. + */ + int readInt32(); + + /** + * Reads a BSON Int32 element from the reader. + * + * @param name The name of the element. + * @return An Int32. + */ + int readInt32(String name); + + /** + * Reads a BSON Int64 from the reader. + * + * @return An Int64. + */ + long readInt64(); + + /** + * Reads a BSON Int64 element from the reader. + * + * @param name The name of the element. + * @return An Int64. + */ + long readInt64(String name); + + /** + * Reads a BSON Decimal128 from the reader. + * + * @return A Decimal128 + * @since 3.4 + */ + Decimal128 readDecimal128(); + + /** + * Reads a BSON Decimal128 element from the reader. + * + * @param name The name of the element. + * @return A Decimal128 + * @since 3.4 + */ + Decimal128 readDecimal128(String name); + + /** + * Reads a BSON JavaScript from the reader. + * + * @return A string. + */ + String readJavaScript(); + + /** + * Reads a BSON JavaScript element from the reader. + * + * @param name The name of the element. + * @return A string. + */ + String readJavaScript(String name); + + /** + * Reads a BSON JavaScript with scope from the reader (call readStartDocument next to read the scope). + * + * @return A string. + */ + String readJavaScriptWithScope(); + + /** + * Reads a BSON JavaScript with scope element from the reader (call readStartDocument next to read the scope). + * + * @param name The name of the element. + * @return A string. + */ + String readJavaScriptWithScope(String name); + + /** + * Reads a BSON MaxKey from the reader. + */ + void readMaxKey(); + + /** + * Reads a BSON MaxKey element from the reader. + * + * @param name The name of the element. + */ + void readMaxKey(String name); + + /** + * Reads a BSON MinKey from the reader. + */ + void readMinKey(); + + /** + * Reads a BSON MinKey element from the reader. + * + * @param name The name of the element. + */ + void readMinKey(String name); + + /** + * Reads the name of an element from the reader. + * + * @return The name of the element. + */ + String readName(); + + /** + * Reads the name of an element from the reader. + * + * @param name The name of the element. + */ + void readName(String name); + + /** + * Reads a BSON null from the reader. + */ + void readNull(); + + /** + * Reads a BSON null element from the reader. + * + * @param name The name of the element. + */ + void readNull(String name); + + /** + * Reads a BSON ObjectId from the reader. + * + * @return the {@code ObjectId} value + */ + ObjectId readObjectId(); + + /** + * Reads a BSON ObjectId element from the reader. + * + * @param name The name of the element. + * @return ObjectId. + */ + ObjectId readObjectId(String name); + + /** + * Reads a BSON regular expression from the reader. + * + * @return A regular expression. + */ + BsonRegularExpression readRegularExpression(); + + /** + * Reads a BSON regular expression element from the reader. + * + * @param name The name of the element. + * @return A regular expression. + */ + BsonRegularExpression readRegularExpression(String name); + + /** + * Reads a BSON DBPointer from the reader. + * + * @return A DBPointer. + */ + BsonDbPointer readDBPointer(); + + /** + * Reads a BSON DBPointer element from the reader. + * + * @param name The name of the element. + * @return A DBPointer. + */ + BsonDbPointer readDBPointer(String name); + + /** + * Reads the start of a BSON array. + */ + void readStartArray(); + + /** + * Reads the start of a BSON document. + */ + void readStartDocument(); + + /** + * Reads a BSON String from the reader. + * + * @return A String. + */ + String readString(); + + /** + * Reads a BSON string element from the reader. + * + * @param name The name of the element. + * @return A String. + */ + String readString(String name); + + /** + * Reads a BSON symbol from the reader. + * + * @return A string. + */ + String readSymbol(); + + /** + * Reads a BSON symbol element from the reader. + * + * @param name The name of the element. + * @return A string. + */ + String readSymbol(String name); + + /** + * Reads a BSON timestamp from the reader. + * + * @return The combined timestamp/increment. + */ + BsonTimestamp readTimestamp(); + + /** + * Reads a BSON timestamp element from the reader. + * + * @param name The name of the element. + * @return The combined timestamp/increment. + */ + BsonTimestamp readTimestamp(String name); + + /** + * Reads a BSON undefined from the reader. + */ + void readUndefined(); + + /** + * Reads a BSON undefined element from the reader. + * + * @param name The name of the element. + */ + void readUndefined(String name); + + /** + * Skips the name (reader must be positioned on a name). + */ + void skipName(); + + /** + * Skips the value (reader must be positioned on a value). + */ + void skipValue(); + + /** + * Gets a mark representing the current state of the reader. + * + * @return the mark + * @since 3.5 + */ + BsonReaderMark getMark(); + + @Override + void close(); +} diff --git a/bson/src/main/org/bson/BsonReaderMark.java b/bson/src/main/org/bson/BsonReaderMark.java new file mode 100644 index 00000000000..f24190222fa --- /dev/null +++ b/bson/src/main/org/bson/BsonReaderMark.java @@ -0,0 +1,31 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson; + +/** + * Represents a bookmark that can be used to reset a {@link BsonReader} to its state at the time the mark was created. + * + * @see BsonReader#getMark() + * + * @since 3.5 + */ +public interface BsonReaderMark { + /** + * Reset the {@link BsonReader} to its state at the time the mark was created. + */ + void reset(); +} diff --git a/bson/src/main/org/bson/BsonRegularExpression.java b/bson/src/main/org/bson/BsonRegularExpression.java new file mode 100644 index 00000000000..e9b6839be6a --- /dev/null +++ b/bson/src/main/org/bson/BsonRegularExpression.java @@ -0,0 +1,117 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson; + +import java.util.Arrays; + +import static org.bson.assertions.Assertions.notNull; + +/** + * A holder class for a BSON regular expression, so that we can delay compiling into a Pattern until necessary. + * + * @since 3.0 + */ +public final class BsonRegularExpression extends BsonValue { + + private final String pattern; + private final String options; + + /** + * Creates a new instance + * + * @param pattern the regular expression {@link java.util.regex.Pattern} + * @param options the options for the regular expression + */ + public BsonRegularExpression(final String pattern, final String options) { + this.pattern = notNull("pattern", pattern); + this.options = options == null ? "" : sortOptionCharacters(options); + } + + /** + * Creates a new instance with no options set. + * + * @param pattern the regular expression {@link java.util.regex.Pattern} + */ + public BsonRegularExpression(final String pattern) { + this(pattern, null); + } + + @Override + public BsonType getBsonType() { + return BsonType.REGULAR_EXPRESSION; + } + + /** + * Gets the regex pattern. + * + * @return the regular expression pattern + */ + public String getPattern() { + return pattern; + } + + /** + * Gets the options for the regular expression + * + * @return the options. + */ + public String getOptions() { + return options; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + BsonRegularExpression that = (BsonRegularExpression) o; + + if (!options.equals(that.options)) { + return false; + } + if (!pattern.equals(that.pattern)) { + return false; + } + + return true; + } + + @Override + public int hashCode() { + int result = pattern.hashCode(); + result = 31 * result + options.hashCode(); + return result; + } + + @Override + public String toString() { + return "BsonRegularExpression{" + + "pattern='" + pattern + '\'' + + ", options='" + options + '\'' + + '}'; + } + + private String sortOptionCharacters(final String options) { + char[] chars = options.toCharArray(); + Arrays.sort(chars); + return new String(chars); + } +} diff --git a/bson/src/main/org/bson/BsonSerializationException.java b/bson/src/main/org/bson/BsonSerializationException.java new file mode 100644 index 00000000000..d2b7713e980 --- /dev/null +++ b/bson/src/main/org/bson/BsonSerializationException.java @@ -0,0 +1,35 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson; + +/** + * An exception indicating a failure to serialize a BSON value. + * + * @since 3.0 + */ +public class BsonSerializationException extends BSONException { + private static final long serialVersionUID = -5214580094005440780L; + + /** + * Construct a new instance. + * + * @param message the message + */ + public BsonSerializationException(final String message) { + super(message); + } +} diff --git a/bson/src/main/org/bson/BsonString.java b/bson/src/main/org/bson/BsonString.java new file mode 100644 index 00000000000..379aaa8ef14 --- /dev/null +++ b/bson/src/main/org/bson/BsonString.java @@ -0,0 +1,88 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson; + +/** + * A representation of the BSON String type. + * + * @since 3.0 + */ +public class BsonString extends BsonValue implements Comparable { + + private final String value; + + /** + * Construct a new instance with the given value. + * + * @param value the non-null value + */ + public BsonString(final String value) { + if (value == null) { + throw new IllegalArgumentException("Value can not be null"); + } + this.value = value; + } + + @Override + public int compareTo(final BsonString o) { + return value.compareTo(o.value); + } + + @Override + public BsonType getBsonType() { + return BsonType.STRING; + } + + /** + * Gets the String value. + * + * @return the value + */ + public String getValue() { + return value; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + BsonString that = (BsonString) o; + + if (!value.equals(that.value)) { + return false; + } + + return true; + } + + @Override + public int hashCode() { + return value.hashCode(); + } + + @Override + public String toString() { + return "BsonString{" + + "value='" + value + '\'' + + '}'; + } +} diff --git a/bson/src/main/org/bson/BsonSymbol.java b/bson/src/main/org/bson/BsonSymbol.java new file mode 100644 index 00000000000..b4d9ded22c4 --- /dev/null +++ b/bson/src/main/org/bson/BsonSymbol.java @@ -0,0 +1,89 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Symbol.java + +package org.bson; + +/** + * Class to hold a BSON symbol object, which is an interned string in Ruby + * + * @since 3.0 + */ +public class BsonSymbol extends BsonValue { + + private final String symbol; + + /** + * Creates a new instance. + * + * @param value the symbol value + */ + public BsonSymbol(final String value) { + if (value == null) { + throw new IllegalArgumentException("Value can not be null"); + } + symbol = value; + } + + @Override + public BsonType getBsonType() { + return BsonType.SYMBOL; + } + + /** + * Gets the symbol value + * + * @return the symbol. + */ + public String getSymbol() { + return symbol; + } + + /** + * Will compare equal to a String that is equal to the String that this holds + * + * @param o the Symbol to compare this to + * @return true if parameter o is the same as this Symbol + */ + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + BsonSymbol symbol1 = (BsonSymbol) o; + + if (!symbol.equals(symbol1.symbol)) { + return false; + } + + return true; + } + + @Override + public int hashCode() { + return symbol.hashCode(); + } + + @Override + public String toString() { + return symbol; + } +} diff --git a/bson/src/main/org/bson/BsonTimestamp.java b/bson/src/main/org/bson/BsonTimestamp.java new file mode 100644 index 00000000000..96b9e7610c8 --- /dev/null +++ b/bson/src/main/org/bson/BsonTimestamp.java @@ -0,0 +1,127 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson; + +import static java.lang.Long.compareUnsigned; + +/** + * A value representing the BSON timestamp type. + * + * @since 3.0 + */ +public final class BsonTimestamp extends BsonValue implements Comparable { + + private final long value; + + /** + * Construct a new instance with a null time and a 0 increment. + */ + public BsonTimestamp() { + value = 0; + } + + /** + * Construct a new instance for the given value, which combines the time in seconds and the increment as a single long value. + * + * @param value the timestamp as a single long value + * @since 3.5 + */ + public BsonTimestamp(final long value) { + this.value = value; + } + + /** + * Construct a new instance for the given time and increment. + * + * @param seconds the number of seconds since the epoch + * @param increment the increment. + */ + public BsonTimestamp(final int seconds, final int increment) { + value = ((long) seconds << 32) | (increment & 0xFFFFFFFFL); + } + + @Override + public BsonType getBsonType() { + return BsonType.TIMESTAMP; + } + + + /** + * Gets the value of the timestamp. + * + * @return the timestamp value + * @since 3.5 + */ + public long getValue() { + return value; + } + + /** + * Gets the time in seconds since epoch. + * + * @return an int representing time in seconds since epoch + */ + public int getTime() { + return (int) (value >> 32); + } + + /** + * Gets the increment value. + * + * @return an incrementing ordinal for operations within a given second + */ + public int getInc() { + return (int) value; + } + + @Override + public String toString() { + return "Timestamp{" + + "value=" + getValue() + + ", seconds=" + getTime() + + ", inc=" + getInc() + + '}'; + } + + @Override + public int compareTo(final BsonTimestamp ts) { + return compareUnsigned(value, ts.value); + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + BsonTimestamp timestamp = (BsonTimestamp) o; + + if (value != timestamp.value) { + return false; + } + + return true; + } + + @Override + public int hashCode() { + return (int) (value ^ (value >>> 32)); + } +} diff --git a/bson/src/main/org/bson/BsonType.java b/bson/src/main/org/bson/BsonType.java new file mode 100644 index 00000000000..c3f0530000e --- /dev/null +++ b/bson/src/main/org/bson/BsonType.java @@ -0,0 +1,158 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson; + +/** + * Enumeration of all the BSON types currently supported. + * + * @since 3.0 + */ +public enum BsonType { + /** + * Not a real BSON type. Used to signal the end of a document. + */ + END_OF_DOCUMENT(0x00), + // no values of this type exist it marks the end of a document + /** + * A BSON double. + */ + DOUBLE(0x01), + /** + * A BSON string. + */ + STRING(0x02), + /** + * A BSON document. + */ + DOCUMENT(0x03), + /** + * A BSON array. + */ + ARRAY(0x04), + /** + * BSON binary data. + */ + BINARY(0x05), + /** + * A BSON undefined value. + */ + UNDEFINED(0x06), + /** + * A BSON ObjectId. + */ + OBJECT_ID(0x07), + /** + * A BSON bool. + */ + BOOLEAN(0x08), + /** + * A BSON DateTime. + */ + DATE_TIME(0x09), + /** + * A BSON null value. + */ + NULL(0x0a), + /** + * A BSON regular expression. + */ + REGULAR_EXPRESSION(0x0b), + /** + * A BSON regular expression. + */ + DB_POINTER(0x0c), + /** + * BSON JavaScript code. + */ + JAVASCRIPT(0x0d), + /** + * A BSON symbol. + */ + SYMBOL(0x0e), + /** + * BSON JavaScript code with a scope (a set of variables with values). + */ + JAVASCRIPT_WITH_SCOPE(0x0f), + /** + * A BSON 32-bit integer. + */ + INT32(0x10), + /** + * A BSON timestamp. + */ + TIMESTAMP(0x11), + /** + * A BSON 64-bit integer. + */ + INT64(0x12), + /** + * A BSON Decimal128. + * + * @since 3.4 + */ + DECIMAL128(0x13), + /** + * A BSON MinKey value. + */ + MIN_KEY(0xff), + /** + * A BSON MaxKey value. + */ + MAX_KEY(0x7f); + + private static final BsonType[] LOOKUP_TABLE = new BsonType[MIN_KEY.getValue() + 1]; + + private final int value; + + static { + for (final BsonType cur : BsonType.values()) { + LOOKUP_TABLE[cur.getValue()] = cur; + } + } + + BsonType(final int value) { + this.value = value; + } + + /** + * Get the int value of this BSON type. + * + * @return the int value of this type. + */ + public int getValue() { + return value; + } + + /** + * Gets the {@code BsonType} that corresponds to the given int value. + * + * @param value the int value of the desired BSON type. + * @return the corresponding {@code BsonType} + */ + public static BsonType findByValue(final int value) { + return LOOKUP_TABLE[value & 0xFF]; + } + + /** + * Returns whether this type is some sort of containing type, e.g. a document or array. + * + * @return true if this is some sort of containing type rather than a primitive value + */ + public boolean isContainer() { + return this == DOCUMENT || this == ARRAY; + } +} diff --git a/bson/src/main/org/bson/BsonUndefined.java b/bson/src/main/org/bson/BsonUndefined.java new file mode 100644 index 00000000000..28f18957c7d --- /dev/null +++ b/bson/src/main/org/bson/BsonUndefined.java @@ -0,0 +1,51 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson; + +/** + * Represents the value associated with the BSON Undefined type. All values of this type are identical. Note that this type has been + * deprecated in the BSON specification. + * + * @see BSON Spec + * @see org.bson.BsonType#UNDEFINED + * @since 3.0 + */ +public final class BsonUndefined extends BsonValue { + + @Override + public BsonType getBsonType() { + return BsonType.UNDEFINED; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + + if (o == null || getClass() != o.getClass()) { + return false; + } + + return true; + } + + @Override + public int hashCode() { + return 0; + } +} diff --git a/bson/src/main/org/bson/BsonValue.java b/bson/src/main/org/bson/BsonValue.java new file mode 100644 index 00000000000..2318407d6b7 --- /dev/null +++ b/bson/src/main/org/bson/BsonValue.java @@ -0,0 +1,423 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson; + +import static java.lang.String.format; + +/** + * Base class for any BSON type. + * + * @since 3.0 + */ +public abstract class BsonValue { + /** + * Construct a new instance. This is package-protected so that the BSON type system is closed. + */ + BsonValue() { + } + + /** + * Gets the BSON type of this value. + * + * @return the BSON type, which may not be null (but may be BSONType.NULL) + */ + public abstract BsonType getBsonType(); + + /** + * Gets this value as a BsonDocument if it is one, otherwise throws exception + * + * @return a BsonDocument + * @throws org.bson.BsonInvalidOperationException if this value is not of the expected type + */ + public BsonDocument asDocument() { + throwIfInvalidType(BsonType.DOCUMENT); + return (BsonDocument) this; + } + + /** + * Gets this value as a BsonArray if it is one, otherwise throws exception + * + * @return a BsonArray + * @throws org.bson.BsonInvalidOperationException if this value is not of the expected type + */ + public BsonArray asArray() { + throwIfInvalidType(BsonType.ARRAY); + return (BsonArray) this; + } + + /** + * Gets this value as a BsonString if it is one, otherwise throws exception + * + * @return a BsonString + * @throws org.bson.BsonInvalidOperationException if this value is not of the expected type + */ + public BsonString asString() { + throwIfInvalidType(BsonType.STRING); + return (BsonString) this; + } + + /** + * Gets this value as a BsonNumber if it is one, otherwise throws exception + * + * @return a BsonNumber + * @throws org.bson.BsonInvalidOperationException if this value is not of the expected type + */ + public BsonNumber asNumber() { + if (!isNumber()) { + throw new BsonInvalidOperationException(format("Value expected to be of a numerical BSON type is of unexpected type %s", + getBsonType())); + } + return (BsonNumber) this; + } + + /** + * Gets this value as a BsonInt32 if it is one, otherwise throws exception + * + * @return a BsonInt32 + * @throws org.bson.BsonInvalidOperationException if this value is not of the expected type + */ + public BsonInt32 asInt32() { + throwIfInvalidType(BsonType.INT32); + return (BsonInt32) this; + } + + /** + * Gets this value as a BsonInt64 if it is one, otherwise throws exception + * + * @return a BsonInt64 + * @throws org.bson.BsonInvalidOperationException if this value is not of the expected type + */ + public BsonInt64 asInt64() { + throwIfInvalidType(BsonType.INT64); + return (BsonInt64) this; + } + + /** + * Gets this value as a BsonDecimal128 if it is one, otherwise throws exception + * + * @return a BsonDecimal128 + * @throws org.bson.BsonInvalidOperationException if this value is not of the expected type + * @since 3.4 + */ + public BsonDecimal128 asDecimal128() { + throwIfInvalidType(BsonType.DECIMAL128); + return (BsonDecimal128) this; + } + + /** + * Gets this value as a BsonDouble if it is one, otherwise throws exception + * + * @return a BsonDouble + * @throws org.bson.BsonInvalidOperationException if this value is not of the expected type + */ + public BsonDouble asDouble() { + throwIfInvalidType(BsonType.DOUBLE); + return (BsonDouble) this; + } + + /** + * Gets this value as a BsonBoolean if it is one, otherwise throws exception + * + * @return a BsonBoolean + * @throws org.bson.BsonInvalidOperationException if this value is not of the expected type + */ + public BsonBoolean asBoolean() { + throwIfInvalidType(BsonType.BOOLEAN); + return (BsonBoolean) this; + } + + /** + * Gets this value as an BsonObjectId if it is one, otherwise throws exception + * + * @return an BsonObjectId + * @throws org.bson.BsonInvalidOperationException if this value is not of the expected type + */ + public BsonObjectId asObjectId() { + throwIfInvalidType(BsonType.OBJECT_ID); + return (BsonObjectId) this; + } + + /** + * Gets this value as a BsonDbPointer if it is one, otherwise throws exception + * + * @return an BsonDbPointer + * @throws org.bson.BsonInvalidOperationException if this value is not of the expected type + */ + public BsonDbPointer asDBPointer() { + throwIfInvalidType(BsonType.DB_POINTER); + return (BsonDbPointer) this; + } + + /** + * Gets this value as a BsonTimestamp if it is one, otherwise throws exception + * + * @return an BsonTimestamp + * @throws org.bson.BsonInvalidOperationException if this value is not of the expected type + */ + public BsonTimestamp asTimestamp() { + throwIfInvalidType(BsonType.TIMESTAMP); + return (BsonTimestamp) this; + } + + /** + * Gets this value as a BsonBinary if it is one, otherwise throws exception + * + * @return an BsonBinary + * @throws org.bson.BsonInvalidOperationException if this value is not of the expected type + */ + public BsonBinary asBinary() { + throwIfInvalidType(BsonType.BINARY); + return (BsonBinary) this; + } + + /** + * Gets this value as a BsonDateTime if it is one, otherwise throws exception + * + * @return an BsonDateTime + * @throws org.bson.BsonInvalidOperationException if this value is not of the expected type + */ + public BsonDateTime asDateTime() { + throwIfInvalidType(BsonType.DATE_TIME); + return (BsonDateTime) this; + } + + /** + * Gets this value as a BsonSymbol if it is one, otherwise throws exception + * + * @return an BsonSymbol + * @throws org.bson.BsonInvalidOperationException if this value is not of the expected type + */ + public BsonSymbol asSymbol() { + throwIfInvalidType(BsonType.SYMBOL); + return (BsonSymbol) this; + } + + /** + * Gets this value as a BsonRegularExpression if it is one, otherwise throws exception + * + * @return an BsonRegularExpression + * @throws org.bson.BsonInvalidOperationException if this value is not of the expected type + */ + public BsonRegularExpression asRegularExpression() { + throwIfInvalidType(BsonType.REGULAR_EXPRESSION); + return (BsonRegularExpression) this; + } + + /** + * Gets this value as a {@code BsonJavaScript} if it is one, otherwise throws exception + * + * @return a BsonJavaScript + * @throws org.bson.BsonInvalidOperationException if this value is not of the expected type + */ + public BsonJavaScript asJavaScript() { + throwIfInvalidType(BsonType.JAVASCRIPT); + return (BsonJavaScript) this; + } + + /** + * Gets this value as a BsonJavaScriptWithScope if it is one, otherwise throws exception + * + * @return a BsonJavaScriptWithScope + * @throws org.bson.BsonInvalidOperationException if this value is not of the expected type + */ + public BsonJavaScriptWithScope asJavaScriptWithScope() { + throwIfInvalidType(BsonType.JAVASCRIPT_WITH_SCOPE); + return (BsonJavaScriptWithScope) this; + } + + + /** + * Returns true if this is a BsonNull, false otherwise. + * + * @return true if this is a BsonNull, false otherwise + */ + public boolean isNull() { + return this instanceof BsonNull; + } + + /** + * Returns true if this is a BsonDocument, false otherwise. + * + * @return true if this is a BsonDocument, false otherwise + */ + public boolean isDocument() { + return this instanceof BsonDocument; + } + + /** + * Returns true if this is a BsonArray, false otherwise. + * + * @return true if this is a BsonArray, false otherwise + */ + public boolean isArray() { + return this instanceof BsonArray; + } + + /** + * Returns true if this is a BsonString, false otherwise. + * + * @return true if this is a BsonString, false otherwise + */ + public boolean isString() { + return this instanceof BsonString; + } + + /** + * Returns true if this is a BsonNumber, false otherwise. + * + * @return true if this is a BsonNumber, false otherwise + */ + public boolean isNumber() { + return this instanceof BsonNumber; + } + + /** + * Returns true if this is a BsonInt32, false otherwise. + * + * @return true if this is a BsonInt32, false otherwise + */ + public boolean isInt32() { + return this instanceof BsonInt32; + } + + /** + * Returns true if this is a BsonInt64, false otherwise. + * + * @return true if this is a BsonInt64, false otherwise + */ + public boolean isInt64() { + return this instanceof BsonInt64; + } + + /** + * Returns true if this is a BsonDecimal128, false otherwise. + * + * @return true if this is a BsonDecimal128, false otherwise + * @since 3.4 + */ + public boolean isDecimal128() { + return this instanceof BsonDecimal128; + } + + /** + * Returns true if this is a BsonDouble, false otherwise. + * + * @return true if this is a BsonDouble, false otherwise + */ + public boolean isDouble() { + return this instanceof BsonDouble; + + } + + /** + * Returns true if this is a BsonBoolean, false otherwise. + * + * @return true if this is a BsonBoolean, false otherwise + */ + public boolean isBoolean() { + return this instanceof BsonBoolean; + + } + + /** + * Returns true if this is an BsonObjectId, false otherwise. + * + * @return true if this is an BsonObjectId, false otherwise + */ + public boolean isObjectId() { + return this instanceof BsonObjectId; + } + + /** + * Returns true if this is a BsonDbPointer, false otherwise. + * + * @return true if this is a BsonDbPointer, false otherwise + */ + public boolean isDBPointer() { + return this instanceof BsonDbPointer; + } + + /** + * Returns true if this is a BsonTimestamp, false otherwise. + * + * @return true if this is a BsonTimestamp, false otherwise + */ + public boolean isTimestamp() { + return this instanceof BsonTimestamp; + } + + /** + * Returns true if this is a BsonBinary, false otherwise. + * + * @return true if this is a BsonBinary, false otherwise + */ + public boolean isBinary() { + return this instanceof BsonBinary; + } + + /** + * Returns true if this is a BsonDateTime, false otherwise. + * + * @return true if this is a BsonDateTime, false otherwise + */ + public boolean isDateTime() { + return this instanceof BsonDateTime; + } + + /** + * Returns true if this is a BsonSymbol, false otherwise. + * + * @return true if this is a BsonSymbol, false otherwise + */ + public boolean isSymbol() { + return this instanceof BsonSymbol; + } + + /** + * Returns true if this is a BsonRegularExpression, false otherwise. + * + * @return true if this is a BsonRegularExpression, false otherwise + */ + public boolean isRegularExpression() { + return this instanceof BsonRegularExpression; + } + + /** + * Returns true if this is a BsonJavaScript, false otherwise. + * + * @return true if this is a BsonJavaScript, false otherwise + */ + public boolean isJavaScript() { + return this instanceof BsonJavaScript; + } + + /** + * Returns true if this is a BsonJavaScriptWithScope, false otherwise. + * + * @return true if this is a BsonJavaScriptWithScope, false otherwise + */ + public boolean isJavaScriptWithScope() { + return this instanceof BsonJavaScriptWithScope; + } + + private void throwIfInvalidType(final BsonType expectedType) { + if (getBsonType() != expectedType) { + throw new BsonInvalidOperationException(format("Value expected to be of type %s is of unexpected type %s", + expectedType, getBsonType())); + } + } +} diff --git a/bson/src/main/org/bson/BsonWriter.java b/bson/src/main/org/bson/BsonWriter.java new file mode 100644 index 00000000000..c3da5dc6059 --- /dev/null +++ b/bson/src/main/org/bson/BsonWriter.java @@ -0,0 +1,360 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson; + +import org.bson.types.Decimal128; +import org.bson.types.ObjectId; + +/** + * An interface for writing a logical BSON document using a push-oriented API. + * + * @since 3.0 + */ +public interface BsonWriter { + /** + * Flushes any pending data to the output destination. + */ + void flush(); + + /** + * Writes a BSON Binary data element to the writer. + * + * @param binary The Binary data. + */ + void writeBinaryData(BsonBinary binary); + + /** + * Writes a BSON Binary data element to the writer. + * + * @param name The name of the element. + * @param binary The Binary data value. + */ + void writeBinaryData(String name, BsonBinary binary); + + /** + * Writes a BSON Boolean to the writer. + * + * @param value The Boolean value. + */ + void writeBoolean(boolean value); + + /** + * Writes a BSON Boolean element to the writer. + * + * @param name The name of the element. + * @param value The Boolean value. + */ + void writeBoolean(String name, boolean value); + + /** + * Writes a BSON DateTime to the writer. + * + * @param value The number of milliseconds since the Unix epoch. + */ + void writeDateTime(long value); + + /** + * Writes a BSON DateTime element to the writer. + * + * @param name The name of the element. + * @param value The number of milliseconds since the Unix epoch. + */ + void writeDateTime(String name, long value); + + /** + * Writes a BSON DBPointer to the writer. + * + * @param value The DBPointer to write + */ + void writeDBPointer(BsonDbPointer value); + + /** + * Writes a BSON DBPointer element to the writer. + * + * @param name The name of the element. + * @param value The DBPointer to write + */ + void writeDBPointer(String name, BsonDbPointer value); + + /** + * Writes a BSON Double to the writer. + * + * @param value The Double value. + */ + void writeDouble(double value); + + /** + * Writes a BSON Double element to the writer. + * + * @param name The name of the element. + * @param value The Double value. + */ + void writeDouble(String name, double value); + + /** + * Writes the end of a BSON array to the writer. + */ + void writeEndArray(); + + /** + * Writes the end of a BSON document to the writer. + */ + void writeEndDocument(); + + /** + * Writes a BSON Int32 to the writer. + * + * @param value The Int32 value. + */ + void writeInt32(int value); + + /** + * Writes a BSON Int32 element to the writer. + * + * @param name The name of the element. + * @param value The Int32 value. + */ + void writeInt32(String name, int value); + + /** + * Writes a BSON Int64 to the writer. + * + * @param value The Int64 value. + */ + void writeInt64(long value); + + /** + * Writes a BSON Int64 element to the writer. + * + * @param name The name of the element. + * @param value The Int64 value. + */ + void writeInt64(String name, long value); + + /** + * Writes a BSON Decimal128 to the writer. + * + * @param value The Decimal128 value. + * @since 3.4 + */ + void writeDecimal128(Decimal128 value); + + /** + * Writes a BSON Decimal128 element to the writer. + * + * @param name The name of the element. + * @param value The Decimal128 value. + * @since 3.4 + */ + void writeDecimal128(String name, Decimal128 value); + + /** + * Writes a BSON JavaScript to the writer. + * + * @param code The JavaScript code. + */ + void writeJavaScript(String code); + + /** + * Writes a BSON JavaScript element to the writer. + * + * @param name The name of the element. + * @param code The JavaScript code. + */ + void writeJavaScript(String name, String code); + + /** + * Writes a BSON JavaScript to the writer (call WriteStartDocument to start writing the scope). + * + * @param code The JavaScript code. + */ + void writeJavaScriptWithScope(String code); + + /** + * Writes a BSON JavaScript element to the writer (call WriteStartDocument to start writing the scope). + * + * @param name The name of the element. + * @param code The JavaScript code. + */ + void writeJavaScriptWithScope(String name, String code); + + /** + * Writes a BSON MaxKey to the writer. + */ + void writeMaxKey(); + + /** + * Writes a BSON MaxKey element to the writer. + * + * @param name The name of the element. + */ + void writeMaxKey(String name); + + /** + * Writes a BSON MinKey to the writer. + */ + void writeMinKey(); + + /** + * Writes a BSON MinKey element to the writer. + * + * @param name The name of the element. + */ + void writeMinKey(String name); + + /** + * Writes the name of an element to the writer. + * + * @param name The name of the element. + */ + void writeName(String name); + + /** + * Writes a BSON null to the writer. + */ + void writeNull(); + + /** + * Writes a BSON null element to the writer. + * + * @param name The name of the element. + */ + void writeNull(String name); + + /** + * Writes a BSON ObjectId to the writer. + * + * @param objectId The ObjectId value. + */ + void writeObjectId(ObjectId objectId); + + /** + * Writes a BSON ObjectId element to the writer. + * + * @param name The name of the element. + * @param objectId The ObjectId value. + */ + void writeObjectId(String name, ObjectId objectId); + + /** + * Writes a BSON regular expression to the writer. + * + * @param regularExpression the regular expression to write. + */ + void writeRegularExpression(BsonRegularExpression regularExpression); + + /** + * Writes a BSON regular expression element to the writer. + * + * @param name The name of the element. + * @param regularExpression The RegularExpression value. + */ + void writeRegularExpression(String name, BsonRegularExpression regularExpression); + + /** + * Writes the start of a BSON array to the writer. + * + * @throws BsonSerializationException if maximum serialization depth exceeded. + */ + void writeStartArray(); + + /** + * Writes the start of a BSON array element to the writer. + * + * @param name The name of the element. + */ + void writeStartArray(String name); + + /** + * Writes the start of a BSON document to the writer. + * + * @throws BsonSerializationException if maximum serialization depth exceeded. + */ + void writeStartDocument(); + + /** + * Writes the start of a BSON document element to the writer. + * + * @param name The name of the element. + */ + void writeStartDocument(String name); + + /** + * Writes a BSON String to the writer. + * + * @param value The String value. + */ + void writeString(String value); + + /** + * Writes a BSON String element to the writer. + * + * @param name The name of the element. + * @param value The String value. + */ + void writeString(String name, String value); + + /** + * Writes a BSON Symbol to the writer. + * + * @param value The symbol. + */ + void writeSymbol(String value); + + /** + * Writes a BSON Symbol element to the writer. + * + * @param name The name of the element. + * @param value The symbol. + */ + void writeSymbol(String name, String value); + + /** + * Writes a BSON Timestamp to the writer. + * + * @param value The combined timestamp/increment value. + */ + void writeTimestamp(BsonTimestamp value); + + /** + * Writes a BSON Timestamp element to the writer. + * + * @param name The name of the element. + * @param value The combined timestamp/increment value. + */ + void writeTimestamp(String name, BsonTimestamp value); + + /** + * Writes a BSON undefined to the writer. + */ + void writeUndefined(); + + /** + * Writes a BSON undefined element to the writer. + * + * @param name The name of the element. + */ + void writeUndefined(String name); + + /** + * Reads a single document from a BsonReader and writes it to this. + * + * @param reader The source. + */ + void pipe(BsonReader reader); + +} diff --git a/bson/src/main/org/bson/BsonWriterSettings.java b/bson/src/main/org/bson/BsonWriterSettings.java new file mode 100644 index 00000000000..9e3f1243e14 --- /dev/null +++ b/bson/src/main/org/bson/BsonWriterSettings.java @@ -0,0 +1,51 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson; + +/** + * All the customisable settings for writing BSON. + * + * @since 3.0 + */ +public class BsonWriterSettings { + private final int maxSerializationDepth; + + /** + * Creates a new instance of the settings with the given maximum serialization depth. + * + * @param maxSerializationDepth the maximum number of nested levels to serialise + */ + public BsonWriterSettings(final int maxSerializationDepth) { + this.maxSerializationDepth = maxSerializationDepth; + } + + /** + * Creates a new instance of the settings with the default maximum serialization depth of 1024. + */ + public BsonWriterSettings() { + this(1024); + } + + /** + * Gets the maximum nuber of levels of depth defined by this settings object. + * + * @return the maximum number of levels that can be serialized. + */ + public int getMaxSerializationDepth() { + return maxSerializationDepth; + } +} diff --git a/bson/src/main/org/bson/ByteBuf.java b/bson/src/main/org/bson/ByteBuf.java new file mode 100644 index 00000000000..cd14d2f93df --- /dev/null +++ b/bson/src/main/org/bson/ByteBuf.java @@ -0,0 +1,525 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson; + +import java.nio.ByteBuffer; +import java.nio.ByteOrder; + +/** + *

An interface wrapper around a {@code java.nio.ByteBuffer} which additionally is {@code Closeable}, so that pooled byte buffers know + * how.

+ * + *

This interface is not frozen yet, and methods may be added in a minor release, so beware implementing this yourself.

+ * + * @since 3.0 + */ +public interface ByteBuf { + + /** + * Returns this buffer's capacity. + * + * @return The capacity of this buffer + */ + int capacity(); + + /** + *

Absolute put method (optional operation).

+ * + *

Writes the given byte into this buffer at the given index.

+ * + * @param index The index at which the byte will be written + * @param value The byte value to be written + * @return This buffer + * @throws IndexOutOfBoundsException If {@code index} is negative or not smaller than the buffer's limit + * @throws java.nio.ReadOnlyBufferException If this buffer is read-only + */ + ByteBuf put(int index, byte value); + + /** + * Returns the number of elements between the current position and the limit. + * + * @return The number of elements remaining in this buffer + */ + int remaining(); + + /** + *

Relative bulk put method  (optional operation).

+ * + *

This method transfers bytes into this buffer from the given source array. If there are more bytes to be copied from the array + * than remain in this buffer, that is, if {@code length > remaining()}, then no bytes are transferred and a {@link + * java.nio.BufferOverflowException} is thrown.

+ * + *

Otherwise, this method copies {@code length} bytes from the given array into this buffer, starting at the given offset in the + * array and at the current position of this buffer. The position of this buffer is then incremented by {@code length}.

+ * + *

In other words, an invocation of this method of the form {@code dst.put(src, off, len)} has exactly the same effect as the + * loop

+ *
{@code
+     *     for (int i = off; i < off + len; i++) {
+     *         dst.put(a[i]);
+     *     }}
+     * 
+ * + *

except that it first checks that there is sufficient space in this buffer and it is potentially much more efficient.

+ * + * @param src The array from which bytes are to be read + * @param offset The offset within the array of the first byte to be read; must be non-negative and no larger than {@code array.length} + * @param length The number of bytes to be read from the given array; must be non-negative and no larger than {@code array.length - + * offset} + * @return This buffer + * @throws java.nio.BufferOverflowException If there is insufficient space in this buffer + * @throws IndexOutOfBoundsException If the preconditions on the {@code offset} and {@code length} parameters do not hold + * @throws java.nio.ReadOnlyBufferException If this buffer is read-only + */ + ByteBuf put(byte[] src, int offset, int length); + + /** + * States whether there are any elements between the current position and the limit. + * + * @return {@code true} if, and only if, there is at least one element remaining in this buffer + */ + boolean hasRemaining(); + + /** + *

Relative put method (optional operation).

+ * + *

Writes the given byte into this buffer at the current position, and then increments the position.

+ * + * @param value The byte to be written + * @return This buffer + * @throws java.nio.BufferOverflowException If this buffer's current position is not smaller than its limit + * @throws java.nio.ReadOnlyBufferException If this buffer is read-only + */ + ByteBuf put(byte value); + + /** + * Writes the given int value into this buffer at the current position, + * using the current byte order, and increments the position by 4. + * + * @param value the int value to be written + * @return this buffer + * @throws java.nio.BufferOverflowException if there are fewer than 4 bytes remaining in this buffer + * @throws java.nio.ReadOnlyBufferException if this buffer is read-only + * @since 5.4 + */ + ByteBuf putInt(int value); + + /** + * Writes the given int value into this buffer at the current position, + * using the current byte order, and increments the position by 4. + * + * @param value the int value to be written + * @param index the index at which the int will be written + * @return this buffer + * @throws java.nio.BufferOverflowException if there are fewer than 4 bytes remaining in this buffer + * @throws java.nio.ReadOnlyBufferException if this buffer is read-only + * @since 5.4 + */ + ByteBuf putInt(int index, int value); + + /** + * Writes the given double value into this buffer at the current position, + * using the current byte order, and increments the position by 8. + * + * @param value the double value to be written + * @return this buffer + * @throws java.nio.BufferOverflowException if there are fewer than 8 bytes remaining in this buffer + * @throws java.nio.ReadOnlyBufferException if this buffer is read-only + * @since 5.4 + */ + ByteBuf putDouble(double value); + + /** + * Writes the given long value into this buffer at the current position, + * using the current byte order, and increments the position by 8. + * + * @param value the long value to be written + * @return this buffer + * @throws java.nio.BufferOverflowException if there are fewer than 8 bytes remaining in this buffer + * @throws java.nio.ReadOnlyBufferException if this buffer is read-only + * @since 5.4 + */ + ByteBuf putLong(long value); + + /** + *

Flips this buffer. The limit is set to the current position and then the position is set to zero. If the mark is defined then it + * is discarded.

+ * + *

After a sequence of channel-read or put operations, invoke this method to prepare for a sequence of channel-write or + * relative get operations. For example:

+ *
+     * buf.put(magic);    // Prepend header
+     * in.read(buf);      // Read data into rest of buffer
+     * buf.flip();        // Flip buffer
+     * out.write(buf);    // Write header + data to channel
+     * 
+ *

This method is often used in conjunction with the {@link java.nio.ByteBuffer#compact compact} method when transferring data from + * one place to another.

+ * + * @return This buffer + */ + ByteBuf flip(); + + /** + *

Returns the byte array that backs this buffer (optional operation).

+ * + *

Modifications to this buffer's content will cause the returned array's content to be modified, and vice versa.

+ * + * @return The array that backs this buffer + * @throws java.nio.ReadOnlyBufferException If this buffer is backed by an array but is read-only + * @throws UnsupportedOperationException If this buffer is not backed by an accessible array + */ + byte[] array(); + + /** + *

States whether this buffer is backed by an accessible byte array.

+ * + *

If this method returns {@code true} then the {@link #array()} and {@link #arrayOffset()} methods may safely be invoked.

+ * + * @return {@code true} if, and only if, this buffer is backed by an array and is not read-only + * @since 5.5 + */ + boolean isBackedByArray(); + + /** + * Returns the offset of the first byte within the backing byte array of + * this buffer. + * + * @return the offset within this buffer's array. + * @throws java.nio.ReadOnlyBufferException If this buffer is backed by an array but is read-only + * @throws UnsupportedOperationException if this buffer is not backed by an accessible array + * @since 5.5 + */ + int arrayOffset(); + + /** + * Returns this buffer's limit. + * + * @return The limit of this buffer + */ + int limit(); + + /** + * Sets this buffer's position. If the mark is defined and larger than the new position then it is discarded. + * + * @param newPosition The new position value; must be non-negative and no larger than the current limit + * @return This buffer + * @throws IllegalArgumentException If the preconditions on {@code newPosition} do not hold + */ + ByteBuf position(int newPosition); + + /** + *

Clears this buffer. The position is set to zero, the limit is set to the capacity, and the mark is discarded.

+ * + *

Invoke this method before using a sequence of channel-read or put operations to fill this buffer. For example:

+ * + *
+     * buf.clear();     // Prepare buffer for reading
+     * in.read(buf);    // Read data
+     * 
+ *

This method does not actually erase the data in the buffer, but it is named as if it did because it will most often be used in + * situations in which that might as well be the case.

+ * + * @return This buffer + */ + ByteBuf clear(); + + /** + * Modifies this buffer's byte order. + * + * @param byteOrder The new byte order, either {@link ByteOrder#BIG_ENDIAN BIG_ENDIAN} or {@link ByteOrder#LITTLE_ENDIAN LITTLE_ENDIAN} + * @return This buffer + */ + ByteBuf order(ByteOrder byteOrder); + + /** + * Relative get method. Reads the byte at this buffer's current position, and then increments the position. + * + * @return The byte at the buffer's current position + * @throws java.nio.BufferUnderflowException If the buffer's current position is not smaller than its limit + */ + byte get(); + + /** + * Absolute get method. Reads the byte at the given index. + * + * @param index The index from which the byte will be read + * @return The byte at the given index + * @throws IndexOutOfBoundsException If {@code index} is negative or not smaller than the buffer's limit + */ + byte get(int index); + + /** + *

Relative bulk {@code get} method.

+ * + *

This method transfers bytes from this buffer into the given destination array. An invocation of this method of the form {@code + * src.get(a)} behaves in exactly the same way as the invocation:

+ * + *
+     * src.get(a, 0, a.length)
+     * 
+ * + * @param bytes the destination byte array + * @return This buffer + * @throws java.nio.BufferUnderflowException If there are fewer than {@code length} bytes remaining in this buffer + */ + ByteBuf get(byte[] bytes); + + /** + *

Absolute bulk {@code get} method.

+ * + *

This method transfers bytes from this buffer into the given destination array. An invocation of this method of the form {@code + * src.get(a)} behaves in exactly the same way as the invocation:

+ * + *
+     * src.get(index, a, 0, a.length)
+     * 
+ * + * @param index The index from which the bytes will be read + * @param bytes the destination byte array + * @return This buffer + * @throws java.nio.BufferUnderflowException If there are fewer than {@code length} bytes remaining in this buffer + */ + ByteBuf get(int index, byte[] bytes); + + /** + * Relative bulk get method. + * + *

This method transfers bytes from this buffer into the given + * destination array. If there are fewer bytes remaining in the + * buffer than are required to satisfy the request, that is, if + * length > remaining(), then no + * bytes are transferred and a {@link java.nio.BufferUnderflowException} is + * thrown. + * + *

Otherwise, this method copies {@code length} bytes from this + * buffer into the given array, starting at the current position of this + * buffer and at the given offset in the array. The position of this + * buffer is then incremented by {@code length}. + * + *

In other words, an invocation of this method of the form + * src.get(dst, off, len) + * has exactly the same effect as the loop + * + *

+     * {@code
+     *     for (int i = off; i < off + len; i++)
+     *         dst[i] = src.get();
+     * }
+     * 
+ * + * except that it first checks that there are sufficient bytes in + * this buffer and it is potentially much more efficient. + * + * @param bytes + * The array into which bytes are to be written + * + * @param offset + * The offset within the array of the first byte to be + * written; must be non-negative and no larger than + * {@code dst.length} + * + * @param length + * The maximum number of bytes to be written to the given + * array; must be non-negative and no larger than + * {@code dst.length - offset} + * + * @return This buffer + * + * @throws java.nio.BufferUnderflowException + * If there are fewer than {@code length} bytes + * remaining in this buffer + * + * @throws IndexOutOfBoundsException + * If the preconditions on the {@code offset} and {@code length} + * parameters do not hold + */ + ByteBuf get(byte[] bytes, int offset, int length); + + /** + * Absolute bulk get method. + * + *

This method transfers bytes from this buffer into the given destination array. If there are fewer bytes remaining in the buffer + * than are required to satisfy the request, that is, if {@code length > remaining}, then no bytes are + * transferred and a {@link java.nio.BufferUnderflowException} is thrown.

+ * + *

Otherwise, this method copies {@code length} bytes from this buffer into the given array, starting at the given index buffer + * and at the given offset in the array.

+ * + *

In other words, an invocation of this method of the form {@code src.get(dst, off, len)} has exactly the same + * effect as the loop

+ *
+     * {@code
+     *     for (int i = off; i < off + len; i++)
+     *         dst[i] = src.get(i);
+     * }
+     * 
+ * + * except that it first checks that there are sufficient bytes in this buffer and it is potentially much more efficient. + * + * @param index The index from which the bytes will be read + * @param bytes The array into which bytes are to be written + * @param offset The offset within the array of the first byte to be written; must be non-negative and no larger than + * {@code dst.length} + * @param length The maximum number of bytes to be written to the given array; must be non-negative and no larger than + * {@code dst.length - offset} + * @return This buffer + * @throws java.nio.BufferUnderflowException If there are fewer than {@code length} bytes remaining in this buffer + * @throws IndexOutOfBoundsException If the preconditions on the {@code offset} and {@code length} parameters do not hold + */ + ByteBuf get(int index, byte[] bytes, int offset, int length); + + /** + *

Relative get method for reading a long value.

Reads the next eight bytes at this buffer's current position, + * composing them into a long value according to the current byte order, and then increments the position by eight.

+ * + * @return The long value at the buffer's current position + * @throws java.nio.BufferUnderflowException If there are fewer than eight bytes remaining in this buffer + */ + long getLong(); + + /** + * Absolute get method for reading a long value. + * + *

Reads eight bytes at the given index, composing them into a long value according to the current byte order.

+ * + * @param index The index from which the bytes will be read + * + * @return The long value at the given index + * + * @throws IndexOutOfBoundsException If {@code index} is negative or not smaller than the buffer's limit, minus seven + */ + long getLong(int index); + + /** + *

Relative get method for reading a double value.

+ * + *

Reads the next eight bytes at this buffer's current position, composing them into a double value according to the current byte + * order, and then increments the position by eight.

+ * + * @return The double value at the buffer's current position + * @throws java.nio.BufferUnderflowException If there are fewer than eight bytes remaining in this buffer + */ + double getDouble(); + + /** + * Absolute get method for reading a double value. + * + *

Reads eight bytes at the given index, composing them into a double value + * according to the current byte order.

+ * + * @param index The index from which the bytes will be read + * @return The double value at the given index + * @throws IndexOutOfBoundsException If {@code index} is negative or not smaller than the buffer's limit, minus seven + */ + double getDouble(int index); + + /** + *

Relative get method for reading an int value.

+ * + *

Reads the next four bytes at this buffer's current position, composing them into an int value according to the current byte order, + * and then increments the position by four.

+ * + * @return The int value at the buffer's current position + * @throws java.nio.BufferUnderflowException If there are fewer than four bytes remaining in this buffer + */ + int getInt(); + + /** + * Absolute get method for reading an int value.

Reads four bytes at the given index, composing them into a int value + * according to the current byte order.

+ * + * @param index The index from which the bytes will be read + * @return The int value at the given index + * @throws IndexOutOfBoundsException If {@code index} is negative or not smaller than the buffer's limit, minus three + */ + int getInt(int index); + + /** + * Returns this buffer's position. + * + * @return The position of this buffer + */ + int position(); + + /** + * Sets this buffer's limit. If the position is larger than the new limit then it is set to the new limit. If the mark is defined and + * larger than the new limit then it is discarded. + * + * @param newLimit The new limit value; must be non-negative and no larger than this buffer's capacity + * @return This buffer + * @throws IllegalArgumentException If the preconditions on {@code newLimit} do not hold + */ + ByteBuf limit(int newLimit); + + + /** + *

Creates a new, read-only byte buffer that shares this buffer's content.

+ * + *

The content of the new buffer will be that of this buffer. Changes to this buffer's content will be visible in the new buffer; + * the new buffer itself, however, will be read-only and will not allow the shared content to be modified. The two buffers' position, + * limit, and mark values will be independent.

+ * + *

The new buffer's capacity, limit, position, and mark values will be identical to those of this buffer.

+ * + * @return The new, read-only byte buffer + */ + ByteBuf asReadOnly(); + + + /** + *

Creates a new byte buffer that shares this buffer's content.

+ * + *

The content of the new buffer will be that of this buffer. Changes to this buffer's content will be visible in the new buffer, + * and vice versa; the two buffers' position, limit, and mark values will be independent.

+ * + *

The new buffer's capacity, limit, position, and mark values will be identical to those of this buffer. The new buffer will be + * direct if, and only if, this buffer is direct, and it will be read-only if, and only if, this buffer is read-only.

+ * + * @return The new byte buffer + */ + ByteBuf duplicate(); + + /** + * Gets the underlying NIO {@code ByteBuffer}. Changes made directly to the returned buffer will be reflected in this instance, and + * vice versa, so be careful. This method should really only be used so that the underlying buffer can be passed directly to a socket + * channel. + * + * @return the underlying ByteBuffer + */ + ByteBuffer asNIO(); + + /** + * Gets the current reference count, which is 1 for a new {@link ByteBuf}. + * + * @return the current count, which must be greater than or equal to 0 + */ + int getReferenceCount(); + + /** + * Retain an additional reference to this object. All retained references must be released, or there will be a leak. + * + * @return this + */ + ByteBuf retain(); + + /** + * Release a reference to this object. + * @throws java.lang.IllegalStateException if the reference count is already 0 + */ + void release(); +} diff --git a/bson/src/main/org/bson/ByteBufNIO.java b/bson/src/main/org/bson/ByteBufNIO.java new file mode 100644 index 00000000000..dfcc6379070 --- /dev/null +++ b/bson/src/main/org/bson/ByteBufNIO.java @@ -0,0 +1,264 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson; + +import java.nio.Buffer; +import java.nio.ByteBuffer; +import java.nio.ByteOrder; +import java.util.concurrent.atomic.AtomicInteger; + +/** + * Implementation of {@code ByteBuf} which simply wraps an NIO {@code ByteBuffer} and forwards all calls to it. + * + * @since 3.0 + */ +public class ByteBufNIO implements ByteBuf { + private ByteBuffer buf; + private final AtomicInteger referenceCount = new AtomicInteger(1); + + /** + * Creates a new instance. + * + * @param buf the {@code ByteBuffer} to wrap. + */ + public ByteBufNIO(final ByteBuffer buf) { + this.buf = buf.order(ByteOrder.LITTLE_ENDIAN); + } + + @Override + public int getReferenceCount() { + return referenceCount.get(); + } + + @Override + public ByteBufNIO retain() { + if (referenceCount.incrementAndGet() == 1) { + referenceCount.decrementAndGet(); + throw new IllegalStateException("Attempted to increment the reference count when it is already 0"); + } + return this; + } + + @Override + public void release() { + if (referenceCount.decrementAndGet() < 0) { + referenceCount.incrementAndGet(); + throw new IllegalStateException("Attempted to decrement the reference count below 0"); + } + if (referenceCount.get() == 0) { + buf = null; + } + } + + @Override + public int capacity() { + return buf.capacity(); + } + + @Override + public ByteBuf put(final int index, final byte b) { + buf.put(index, b); + return this; + } + + @Override + public int remaining() { + return buf.remaining(); + } + + @Override + public ByteBuf put(final byte[] src, final int offset, final int length) { + buf.put(src, offset, length); + return this; + } + + @Override + public boolean hasRemaining() { + return buf.hasRemaining(); + } + + @Override + public ByteBuf put(final byte b) { + buf.put(b); + return this; + } + + @Override + public ByteBuf putInt(final int b) { + buf.putInt(b); + return this; + } + + @Override + public ByteBuf putInt(final int index, final int b) { + buf.putInt(index, b); + return this; + } + + @Override + public ByteBuf putDouble(final double b) { + buf.putDouble(b); + return this; + } + + @Override + public ByteBuf putLong(final long b) { + buf.putLong(b); + return this; + } + + @Override + public ByteBuf flip() { + ((Buffer) buf).flip(); + return this; + } + + @Override + public byte[] array() { + return buf.array(); + } + + @Override + public boolean isBackedByArray() { + return buf.hasArray(); + } + + @Override + public int arrayOffset() { + return buf.arrayOffset(); + } + + @Override + public int limit() { + return buf.limit(); + } + + @Override + public ByteBuf position(final int newPosition) { + ((Buffer) buf).position(newPosition); + return this; + } + + @Override + public ByteBuf clear() { + ((Buffer) buf).clear(); + return this; + } + + @Override + public ByteBuf order(final ByteOrder byteOrder) { + buf.order(byteOrder); + return this; + } + + @Override + public byte get() { + return buf.get(); + } + + @Override + public byte get(final int index) { + return buf.get(index); + } + + @Override + public ByteBuf get(final byte[] bytes) { + buf.get(bytes); + return this; + } + + @Override + public ByteBuf get(final int index, final byte[] bytes) { + return get(index, bytes, 0, bytes.length); + } + + @Override + public ByteBuf get(final byte[] bytes, final int offset, final int length) { + buf.get(bytes, offset, length); + return this; + } + + @Override + public ByteBuf get(final int index, final byte[] bytes, final int offset, final int length) { + if (buf.hasArray()) { + System.arraycopy(buf.array(), index, bytes, offset, length); + } else { + // Fallback to per-byte copying if no backing array is available. + for (int i = 0; i < length; i++) { + bytes[offset + i] = buf.get(index + i); + } + } + return this; + } + + @Override + public long getLong() { + return buf.getLong(); + } + + @Override + public long getLong(final int index) { + return buf.getLong(index); + } + + @Override + public double getDouble() { + return buf.getDouble(); + } + + @Override + public double getDouble(final int index) { + return buf.getDouble(index); + } + + @Override + public int getInt() { + return buf.getInt(); + } + + @Override + public int getInt(final int index) { + return buf.getInt(index); + } + + @Override + public int position() { + return buf.position(); + } + + @Override + public ByteBuf limit(final int newLimit) { + ((Buffer) buf).limit(newLimit); + return this; + } + + @Override + public ByteBuf asReadOnly() { + return new ByteBufNIO(buf.asReadOnlyBuffer()); + } + + @Override + public ByteBuf duplicate() { + return new ByteBufNIO(buf.duplicate()); + } + + @Override + public ByteBuffer asNIO() { + return buf; + } +} + diff --git a/bson/src/main/org/bson/Document.java b/bson/src/main/org/bson/Document.java new file mode 100644 index 00000000000..423d234c6d7 --- /dev/null +++ b/bson/src/main/org/bson/Document.java @@ -0,0 +1,552 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson; + +import org.bson.codecs.BsonValueCodecProvider; +import org.bson.codecs.Codec; +import org.bson.codecs.CollectionCodecProvider; +import org.bson.codecs.Decoder; +import org.bson.codecs.DecoderContext; +import org.bson.codecs.DocumentCodec; +import org.bson.codecs.DocumentCodecProvider; +import org.bson.codecs.Encoder; +import org.bson.codecs.EncoderContext; +import org.bson.codecs.IterableCodecProvider; +import org.bson.codecs.MapCodecProvider; +import org.bson.codecs.ValueCodecProvider; +import org.bson.codecs.configuration.CodecRegistry; +import org.bson.conversions.Bson; +import org.bson.json.JsonMode; +import org.bson.json.JsonReader; +import org.bson.json.JsonWriter; +import org.bson.json.JsonWriterSettings; +import org.bson.types.ObjectId; + +import java.io.Serializable; +import java.io.StringWriter; +import java.util.Collection; +import java.util.Date; +import java.util.Iterator; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; +import java.util.Set; + +import static java.lang.String.format; +import static java.util.Arrays.asList; +import static org.bson.assertions.Assertions.isTrue; +import static org.bson.assertions.Assertions.notNull; +import static org.bson.codecs.configuration.CodecRegistries.fromProviders; +import static org.bson.codecs.configuration.CodecRegistries.withUuidRepresentation; + +/** + * A representation of a document as a {@code Map}. All iterators will traverse the elements in insertion order, as with {@code + * LinkedHashMap}. + * + * @mongodb.driver.manual core/document document + * @since 3.0.0 + */ +public class Document implements Map, Serializable, Bson { + private static final Codec DEFAULT_CODEC = + withUuidRepresentation(fromProviders(asList(new ValueCodecProvider(), + new CollectionCodecProvider(), new IterableCodecProvider(), + new BsonValueCodecProvider(), new DocumentCodecProvider(), new MapCodecProvider())), UuidRepresentation.STANDARD) + .get(Document.class); + + private static final long serialVersionUID = 6297731997167536582L; + + /** + * The map of keys to values. + */ + private final LinkedHashMap documentAsMap; + + /** + * Creates an empty Document instance. + */ + public Document() { + documentAsMap = new LinkedHashMap<>(); + } + + /** + * Create a Document instance initialized with the given key/value pair. + * + * @param key key + * @param value value + */ + public Document(final String key, final Object value) { + documentAsMap = new LinkedHashMap<>(); + documentAsMap.put(key, value); + } + + /** + * Creates a Document instance initialized with the given map. + * + * @param map initial map + */ + public Document(final Map map) { + documentAsMap = new LinkedHashMap<>(map); + } + + + /** + * Parses a string in MongoDB Extended JSON format to a {@code Document} + * + * @param json the JSON string + * @return a corresponding {@code Document} object + * @see org.bson.json.JsonReader + * @mongodb.driver.manual reference/mongodb-extended-json/ MongoDB Extended JSON + */ + public static Document parse(final String json) { + return parse(json, DEFAULT_CODEC); + } + + /** + * Parses a string in MongoDB Extended JSON format to a {@code Document} + * + * @param json the JSON string + * @param decoder the {@code Decoder} to use to parse the JSON string into a {@code Document} + * @return a corresponding {@code Document} object + * @see org.bson.json.JsonReader + * @mongodb.driver.manual reference/mongodb-extended-json/ MongoDB Extended JSON + */ + public static Document parse(final String json, final Decoder decoder) { + notNull("codec", decoder); + JsonReader bsonReader = new JsonReader(json); + return decoder.decode(bsonReader, DecoderContext.builder().build()); + } + + @Override + public BsonDocument toBsonDocument(final Class documentClass, final CodecRegistry codecRegistry) { + return new BsonDocumentWrapper<>(this, codecRegistry.get(Document.class)); + } + + /** + * Put the given key/value pair into this Document and return this. Useful for chaining puts in a single expression, e.g. + *
+     * doc.append("a", 1).append("b", 2)}
+     * 
+ * @param key key + * @param value value + * @return this + */ + public Document append(final String key, final Object value) { + documentAsMap.put(key, value); + return this; + } + + /** + * Gets the value of the given key, casting it to the given {@code Class}. This is useful to avoid having casts in client code, + * though the effect is the same. So to get the value of a key that is of type String, you would write {@code String name = + * doc.get("name", String.class)} instead of {@code String name = (String) doc.get("x") }. + * + * @param key the key + * @param clazz the non-null class to cast the value to + * @param the type of the class + * @return the value of the given key, or null if the instance does not contain this key. + * @throws ClassCastException if the value of the given key is not of type T + */ + public T get(final Object key, final Class clazz) { + notNull("clazz", clazz); + return clazz.cast(documentAsMap.get(key)); + } + + /** + * Gets the value of the given key, casting it to {@code Class} or returning the default value if null. + * This is useful to avoid having casts in client code, though the effect is the same. + * + * @param key the key + * @param defaultValue what to return if the value is null + * @param the type of the class + * @return the value of the given key, or null if the instance does not contain this key. + * @throws ClassCastException if the value of the given key is not of type T + * @since 3.5 + */ + @SuppressWarnings("unchecked") + public T get(final Object key, final T defaultValue) { + notNull("defaultValue", defaultValue); + Object value = documentAsMap.get(key); + return value == null ? defaultValue : (T) value; + } + + /** + * Gets the value in an embedded document, casting it to the given {@code Class}. The list of keys represents a path to the + * embedded value, drilling down into an embedded document for each key. This is useful to avoid having casts in + * client code, though the effect is the same. + *

+ * The generic type of the keys list is {@code ?} to be consistent with the corresponding {@code get} methods, but in practice + * the actual type of the argument should be {@code List}. So to get the embedded value of a key list that is of type String, + * you would write {@code String name = doc.getEmbedded(List.of("employee", "manager", "name"), String.class)} instead of + * {@code String name = (String) doc.get("employee", Document.class).get("manager", Document.class).get("name") }. + * + * @param keys the list of keys + * @param clazz the non-null class to cast the value to + * @param the type of the class + * @return the value of the given embedded key, or null if the instance does not contain this embedded key. + * @throws ClassCastException if the value of the given embedded key is not of type T + * @since 3.10 + */ + public T getEmbedded(final List keys, final Class clazz) { + notNull("keys", keys); + isTrue("keys", !keys.isEmpty()); + notNull("clazz", clazz); + return getEmbeddedValue(keys, clazz, null); + } + + /** + * Gets the value in an embedded document, casting it to the given {@code Class} or returning the default value if null. + * The list of keys represents a path to the embedded value, drilling down into an embedded document for each key. + * This is useful to avoid having casts in client code, though the effect is the same. + *

+ * The generic type of the keys list is {@code ?} to be consistent with the corresponding {@code get} methods, but in practice + * the actual type of the argument should be {@code List}. So to get the embedded value of a key list that is of type String, + * you would write {@code String name = doc.getEmbedded(List.of("employee", "manager", "name"), "John Smith")} instead of + * {@code String name = doc.get("employee", Document.class).get("manager", Document.class).get("name", "John Smith") }. + * + * @param keys the list of keys + * @param defaultValue what to return if the value is null + * @param the type of the class + * @return the value of the given key, or null if the instance does not contain this key. + * @throws ClassCastException if the value of the given key is not of type T + * @since 3.10 + */ + public T getEmbedded(final List keys, final T defaultValue) { + notNull("keys", keys); + isTrue("keys", !keys.isEmpty()); + notNull("defaultValue", defaultValue); + return getEmbeddedValue(keys, null, defaultValue); + } + + + // Gets the embedded value of the given list of keys, casting it to {@code Class} or returning the default value if null. + // Throws ClassCastException if any of the intermediate embedded values is not a Document. + @SuppressWarnings("unchecked") + private T getEmbeddedValue(final List keys, final Class clazz, final T defaultValue) { + Object value = this; + Iterator keyIterator = keys.iterator(); + while (keyIterator.hasNext()) { + Object key = keyIterator.next(); + value = ((Document) value).get(key); + if (!(value instanceof Document)) { + if (value == null) { + return defaultValue; + } else if (keyIterator.hasNext()) { + throw new ClassCastException(format("At key %s, the value is not a Document (%s)", + key, value.getClass().getName())); + } + } + } + return clazz != null ? clazz.cast(value) : (T) value; + } + + /** + * Gets the value of the given key as an Integer. + * + * @param key the key + * @return the value as an integer, which may be null + * @throws java.lang.ClassCastException if the value is not an integer + */ + public Integer getInteger(final Object key) { + return (Integer) get(key); + } + + /** + * Gets the value of the given key as a primitive int. + * + * @param key the key + * @param defaultValue what to return if the value is null + * @return the value as an integer, which may be null + * @throws java.lang.ClassCastException if the value is not an integer + */ + public int getInteger(final Object key, final int defaultValue) { + return get(key, defaultValue); + } + + /** + * Gets the value of the given key as a Long. + * + * @param key the key + * @return the value as a long, which may be null + * @throws java.lang.ClassCastException if the value is not an long + */ + public Long getLong(final Object key) { + return (Long) get(key); + } + + /** + * Gets the value of the given key as a Double. + * + * @param key the key + * @return the value as a double, which may be null + * @throws java.lang.ClassCastException if the value is not an double + */ + public Double getDouble(final Object key) { + return (Double) get(key); + } + + /** + * Gets the value of the given key as a String. + * + * @param key the key + * @return the value as a String, which may be null + * @throws java.lang.ClassCastException if the value is not a String + */ + public String getString(final Object key) { + return (String) get(key); + } + + /** + * Gets the value of the given key as a Boolean. + * + * @param key the key + * @return the value as a Boolean, which may be null + * @throws java.lang.ClassCastException if the value is not an boolean + */ + public Boolean getBoolean(final Object key) { + return (Boolean) get(key); + } + + /** + * Gets the value of the given key as a primitive boolean. + * + * @param key the key + * @param defaultValue what to return if the value is null + * @return the value as a primitive boolean + * @throws java.lang.ClassCastException if the value is not a boolean + */ + public boolean getBoolean(final Object key, final boolean defaultValue) { + return get(key, defaultValue); + } + + /** + * Gets the value of the given key as an ObjectId. + * + * @param key the key + * @return the value as an ObjectId, which may be null + * @throws java.lang.ClassCastException if the value is not an ObjectId + */ + public ObjectId getObjectId(final Object key) { + return (ObjectId) get(key); + } + + /** + * Gets the value of the given key as a Date. + * + * @param key the key + * @return the value as a Date, which may be null + * @throws java.lang.ClassCastException if the value is not a Date + */ + public Date getDate(final Object key) { + return (Date) get(key); + } + + /** + * Gets the list value of the given key, casting the list elements to the given {@code Class}. This is useful to avoid having + * casts in client code, though the effect is the same. + * + * @param key the key + * @param clazz the non-null class to cast the list value to + * @param the type of the class + * @return the list value of the given key, or null if the instance does not contain this key. + * @throws ClassCastException if the elements in the list value of the given key is not of type T or the value is not a list + * @since 3.10 + */ + public List getList(final Object key, final Class clazz) { + notNull("clazz", clazz); + return constructValuesList(key, clazz, null); + } + + /** + * Gets the list value of the given key, casting the list elements to {@code Class} or returning the default list value if null. + * This is useful to avoid having casts in client code, though the effect is the same. + * + * @param key the key + * @param clazz the non-null class to cast the list value to + * @param defaultValue what to return if the value is null + * @param the type of the class + * @return the list value of the given key, or the default list value if the instance does not contain this key. + * @throws ClassCastException if the value of the given key is not of type T + * @since 3.10 + */ + public List getList(final Object key, final Class clazz, final List defaultValue) { + notNull("defaultValue", defaultValue); + notNull("clazz", clazz); + return constructValuesList(key, clazz, defaultValue); + } + + + // Construct the list of values for the specified key, or return the default value if the value is null. + // A ClassCastException will be thrown if an element in the list is not of type T. + @SuppressWarnings("unchecked") + private List constructValuesList(final Object key, final Class clazz, final List defaultValue) { + List value = get(key, List.class); + if (value == null) { + return defaultValue; + } + + for (Object item : value) { + if (item != null && !clazz.isAssignableFrom(item.getClass())) { + throw new ClassCastException(format("List element cannot be cast to %s", clazz.getName())); + } + } + return value; + } + + /** + * Gets a JSON representation of this document using the {@link org.bson.json.JsonMode#RELAXED} output mode, and otherwise the default + * settings of {@link JsonWriterSettings.Builder} and {@link DocumentCodec}. + * + * @return a JSON representation of this document + * @throws org.bson.codecs.configuration.CodecConfigurationException if the document contains types not in the default registry + * @see #toJson(JsonWriterSettings) + * @see JsonWriterSettings + */ + public String toJson() { + return toJson(JsonWriterSettings.builder().outputMode(JsonMode.RELAXED).build()); + } + + /** + * Gets a JSON representation of this document + * + *

With the default {@link DocumentCodec}.

+ * + * @param writerSettings the json writer settings to use when encoding + * @return a JSON representation of this document + * @throws org.bson.codecs.configuration.CodecConfigurationException if the document contains types not in the default registry + */ + public String toJson(final JsonWriterSettings writerSettings) { + return toJson(writerSettings, DEFAULT_CODEC); + } + + /** + * Gets a JSON representation of this document + * + *

With the default {@link JsonWriterSettings}.

+ * + * @param encoder the document codec instance to use to encode the document + * @return a JSON representation of this document + * @throws org.bson.codecs.configuration.CodecConfigurationException if the registry does not contain a codec for the document values. + */ + public String toJson(final Encoder encoder) { + return toJson(JsonWriterSettings.builder().outputMode(JsonMode.RELAXED).build(), encoder); + } + + /** + * Gets a JSON representation of this document + * + * @param writerSettings the json writer settings to use when encoding + * @param encoder the document codec instance to use to encode the document + * @return a JSON representation of this document + * @throws org.bson.codecs.configuration.CodecConfigurationException if the registry does not contain a codec for the document values. + */ + public String toJson(final JsonWriterSettings writerSettings, final Encoder encoder) { + JsonWriter writer = new JsonWriter(new StringWriter(), writerSettings); + encoder.encode(writer, this, EncoderContext.builder().build()); + return writer.getWriter().toString(); + } + + // Vanilla Map methods delegate to map field + + @Override + public int size() { + return documentAsMap.size(); + } + + @Override + public boolean isEmpty() { + return documentAsMap.isEmpty(); + } + + @Override + public boolean containsValue(final Object value) { + return documentAsMap.containsValue(value); + } + + @Override + public boolean containsKey(final Object key) { + return documentAsMap.containsKey(key); + } + + @Override + public Object get(final Object key) { + return documentAsMap.get(key); + } + + @Override + public Object put(final String key, final Object value) { + return documentAsMap.put(key, value); + } + + @Override + public Object remove(final Object key) { + return documentAsMap.remove(key); + } + + @Override + public void putAll(final Map map) { + documentAsMap.putAll(map); + } + + @Override + public void clear() { + documentAsMap.clear(); + } + + @Override + public Set keySet() { + return documentAsMap.keySet(); + } + + @Override + public Collection values() { + return documentAsMap.values(); + } + + @Override + public Set> entrySet() { + return documentAsMap.entrySet(); + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + Document document = (Document) o; + + if (!documentAsMap.equals(document.documentAsMap)) { + return false; + } + + return true; + } + + @Override + public int hashCode() { + return documentAsMap.hashCode(); + } + + @Override + public String toString() { + return "Document{" + + documentAsMap + + '}'; + } +} diff --git a/bson/src/main/org/bson/EmptyBSONCallback.java b/bson/src/main/org/bson/EmptyBSONCallback.java new file mode 100644 index 00000000000..db5782679d0 --- /dev/null +++ b/bson/src/main/org/bson/EmptyBSONCallback.java @@ -0,0 +1,171 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson; + +import org.bson.types.Decimal128; +import org.bson.types.ObjectId; + +/** + * Convenience implementation of BSONCallback that throws {@code UnsupportedOperationException} for all methods. + */ +public class EmptyBSONCallback implements BSONCallback { + + @Override + public void objectStart() { + throw new UnsupportedOperationException("Operation is not supported"); + } + + @Override + public void objectStart(final String name) { + throw new UnsupportedOperationException("Operation is not supported"); + } + + @Override + public Object objectDone() { + throw new UnsupportedOperationException("Operation is not supported"); + } + + @Override + public void reset() { + throw new UnsupportedOperationException("Operation is not supported"); + } + + @Override + public Object get() { + throw new UnsupportedOperationException("Operation is not supported"); + } + + @Override + public BSONCallback createBSONCallback() { + throw new UnsupportedOperationException("Operation is not supported"); + } + + @Override + public void arrayStart() { + throw new UnsupportedOperationException("Operation is not supported"); + } + + @Override + public void arrayStart(final String name) { + throw new UnsupportedOperationException("Operation is not supported"); + } + + @Override + public Object arrayDone() { + throw new UnsupportedOperationException("Operation is not supported"); + } + + @Override + public void gotNull(final String name) { + throw new UnsupportedOperationException("Operation is not supported"); + } + + @Override + public void gotUndefined(final String name) { + throw new UnsupportedOperationException("Operation is not supported"); + } + + @Override + public void gotMinKey(final String name) { + throw new UnsupportedOperationException("Operation is not supported"); + } + + @Override + public void gotMaxKey(final String name) { + throw new UnsupportedOperationException("Operation is not supported"); + } + + @Override + public void gotBoolean(final String name, final boolean value) { + throw new UnsupportedOperationException("Operation is not supported"); + } + + @Override + public void gotDouble(final String name, final double value) { + throw new UnsupportedOperationException("Operation is not supported"); + } + + @Override + public void gotInt(final String name, final int value) { + throw new UnsupportedOperationException("Operation is not supported"); + } + + @Override + public void gotLong(final String name, final long value) { + throw new UnsupportedOperationException("Operation is not supported"); + } + + @Override + public void gotDecimal128(final String name, final Decimal128 value) { + throw new UnsupportedOperationException("Operation is not supported"); + } + + @Override + public void gotDate(final String name, final long millis) { + throw new UnsupportedOperationException("Operation is not supported"); + } + + @Override + public void gotString(final String name, final String value) { + throw new UnsupportedOperationException("Operation is not supported"); + } + + @Override + public void gotSymbol(final String name, final String value) { + throw new UnsupportedOperationException("Operation is not supported"); + } + + @Override + public void gotRegex(final String name, final String pattern, final String flags) { + throw new UnsupportedOperationException("Operation is not supported"); + } + + @Override + public void gotTimestamp(final String name, final int time, final int increment) { + throw new UnsupportedOperationException("Operation is not supported"); + } + + @Override + public void gotObjectId(final String name, final ObjectId id) { + throw new UnsupportedOperationException("Operation is not supported"); + } + + @Override + public void gotDBRef(final String name, final String namespace, final ObjectId id) { + throw new UnsupportedOperationException("Operation is not supported"); + } + + @Override + public void gotBinary(final String name, final byte type, final byte[] data) { + throw new UnsupportedOperationException("Operation is not supported"); + } + + @Override + public void gotUUID(final String name, final long part1, final long part2) { + throw new UnsupportedOperationException("Operation is not supported"); + } + + @Override + public void gotCode(final String name, final String code) { + throw new UnsupportedOperationException("Operation is not supported"); + } + + @Override + public void gotCodeWScope(final String name, final String code, final Object scope) { + throw new UnsupportedOperationException("Operation is not supported"); + } +} diff --git a/bson/src/main/org/bson/FieldNameValidator.java b/bson/src/main/org/bson/FieldNameValidator.java new file mode 100644 index 00000000000..e7438cccb69 --- /dev/null +++ b/bson/src/main/org/bson/FieldNameValidator.java @@ -0,0 +1,71 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson; + +import static java.lang.String.format; +import static org.bson.assertions.Assertions.isTrue; + +/** + * A field name validator, for use by BSON writers to validate field names as documents are encoded. + * + * @since 3.0 + */ +public interface FieldNameValidator { + /** + * Returns true if the field name is valid, false otherwise. + * + * @param fieldName the field name + * @return true if the field name is valid, false otherwise + */ + boolean validate(String fieldName); + + /** + * Return the validation error message for an invalid field + * + * @param fieldName the field name + * @return the validation error message + * @throws IllegalArgumentException if fieldName is actually valid + */ + default String getValidationErrorMessage(final String fieldName) { + isTrue(fieldName + " is valid", !validate(fieldName)); + return format("Invalid BSON field name %s", fieldName); + } + + /** + * Gets a new validator to use for the value of the field with the given name. + * + * @param fieldName the field name + * @return a non-null validator + */ + FieldNameValidator getValidatorForField(String fieldName); + + /** + * Start validation of a single document. + * + * @since 4.0 + */ + default void start() { + } + + /** + * End validation of a single document. + * + * @since 4.0 + */ + default void end() { + } +} diff --git a/bson/src/main/org/bson/Float32BinaryVector.java b/bson/src/main/org/bson/Float32BinaryVector.java new file mode 100644 index 00000000000..37d1b8abb6e --- /dev/null +++ b/bson/src/main/org/bson/Float32BinaryVector.java @@ -0,0 +1,79 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson; + +import java.util.Arrays; + +import static org.bson.assertions.Assertions.assertNotNull; + +/** + * Represents a vector of 32-bit floating-point numbers, where each element in the vector is a float. + *

+ * The {@link Float32BinaryVector} is used to store and retrieve data efficiently using the BSON Binary Subtype 9 format. + * + * @mongodb.server.release 6.0 + * @see BinaryVector#floatVector(float[]) + * @see BsonBinary#BsonBinary(BinaryVector) + * @see BsonBinary#asVector() + * @since 5.3 + */ +public final class Float32BinaryVector extends BinaryVector { + + private final float[] data; + + Float32BinaryVector(final float[] vectorData) { + super(DataType.FLOAT32); + this.data = assertNotNull(vectorData); + } + + /** + * Retrieve the underlying float array representing this {@link Float32BinaryVector}, where each float + * represents an element of a vector. + *

+ * NOTE: The underlying float array is not copied; changes to the returned array will be reflected in this instance. + * + * @return the underlying float array representing this {@link Float32BinaryVector} vector. + */ + public float[] getData() { + return assertNotNull(data); + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + Float32BinaryVector that = (Float32BinaryVector) o; + return Arrays.equals(data, that.data); + } + + @Override + public int hashCode() { + return Arrays.hashCode(data); + } + + @Override + public String toString() { + return "Float32Vector{" + + "data=" + Arrays.toString(data) + + ", dataType=" + getDataType() + + '}'; + } +} diff --git a/bson/src/main/org/bson/Int8BinaryVector.java b/bson/src/main/org/bson/Int8BinaryVector.java new file mode 100644 index 00000000000..a851aff94ff --- /dev/null +++ b/bson/src/main/org/bson/Int8BinaryVector.java @@ -0,0 +1,80 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson; + +import java.util.Arrays; +import java.util.Objects; + +import static org.bson.assertions.Assertions.assertNotNull; + +/** + * Represents a vector of 8-bit signed integers, where each element in the vector is a byte. + *

+ * The {@link Int8BinaryVector} is used to store and retrieve data efficiently using the BSON Binary Subtype 9 format. + * + * @mongodb.server.release 6.0 + * @see BinaryVector#int8Vector(byte[]) + * @see BsonBinary#BsonBinary(BinaryVector) + * @see BsonBinary#asVector() + * @since 5.3 + */ +public final class Int8BinaryVector extends BinaryVector { + + private byte[] data; + + Int8BinaryVector(final byte[] data) { + super(DataType.INT8); + this.data = assertNotNull(data); + } + + /** + * Retrieve the underlying byte array representing this {@link Int8BinaryVector} vector, where each byte represents + * an element of a vector. + *

+ * NOTE: The underlying byte array is not copied; changes to the returned array will be reflected in this instance. + * + * @return the underlying byte array representing this {@link Int8BinaryVector} vector. + */ + public byte[] getData() { + return assertNotNull(data); + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + Int8BinaryVector that = (Int8BinaryVector) o; + return Objects.deepEquals(data, that.data); + } + + @Override + public int hashCode() { + return Arrays.hashCode(data); + } + + @Override + public String toString() { + return "Int8Vector{" + + "data=" + Arrays.toString(data) + + ", dataType=" + getDataType() + + '}'; + } +} diff --git a/bson/src/main/org/bson/LazyBSONCallback.java b/bson/src/main/org/bson/LazyBSONCallback.java new file mode 100644 index 00000000000..6ae03ce8cce --- /dev/null +++ b/bson/src/main/org/bson/LazyBSONCallback.java @@ -0,0 +1,85 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson; + +import org.bson.types.ObjectId; + +import java.util.List; + +/** + * A {@code BSONCallback} for creation of {@code LazyBSONObject} and {@code LazyBSONList} instances. + */ +public class LazyBSONCallback extends EmptyBSONCallback { + private Object root; + + @Override + public void reset() { + this.root = null; + } + + @Override + public Object get() { + return getRoot(); + } + + @Override + public void gotBinary(final String name, final byte type, final byte[] data) { + setRoot(createObject(data, 0)); + } + + /** + * Create a {@code LazyBSONObject} instance from the given bytes starting from the given offset. + * + * @param bytes the raw BSON bytes + * @param offset the offset into the bytes + * @return the LazyBSONObject + */ + public Object createObject(final byte[] bytes, final int offset) { + return new LazyBSONObject(bytes, offset, this); + } + + /** + * Create a {@code LazyBSONList} from the given bytes starting from the given offset. + * + * @param bytes the raw BSON bytes + * @param offset the offset into the bytes + * @return the LazyBSONList + */ + @SuppressWarnings("rawtypes") + public List createArray(final byte[] bytes, final int offset) { + return new LazyBSONList(bytes, offset, this); + } + + /** + * This is a factory method pattern to create appropriate objects for BSON type DBPointer(0x0c). + * + * @param ns the namespace of the reference + * @param id the identifier of the reference + * @return object to be used as reference representation + */ + public Object createDBRef(final String ns, final ObjectId id) { + return new BasicBSONObject("$ns", ns).append("$id", id); + } + + private Object getRoot() { + return root; + } + + private void setRoot(final Object root) { + this.root = root; + } +} diff --git a/bson/src/main/org/bson/LazyBSONDecoder.java b/bson/src/main/org/bson/LazyBSONDecoder.java new file mode 100644 index 00000000000..7ca3fd5099a --- /dev/null +++ b/bson/src/main/org/bson/LazyBSONDecoder.java @@ -0,0 +1,64 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson; + +import java.io.ByteArrayInputStream; +import java.io.IOException; +import java.io.InputStream; +import java.util.Arrays; + +/** + * A decoder for {@code LazyBSONObject} instances. + */ +public class LazyBSONDecoder implements BSONDecoder { + private static final int BYTES_IN_INTEGER = 4; + + @Override + public BSONObject readObject(final byte[] bytes) { + BSONCallback bsonCallback = new LazyBSONCallback(); + decode(bytes, bsonCallback); + return (BSONObject) bsonCallback.get(); + } + + @Override + public BSONObject readObject(final InputStream in) throws IOException { + BSONCallback bsonCallback = new LazyBSONCallback(); + decode(in, bsonCallback); + return (BSONObject) bsonCallback.get(); + } + + @Override + public int decode(final byte[] bytes, final BSONCallback callback) { + try { + return decode(new ByteArrayInputStream(bytes), callback); + } catch (IOException e) { + throw new BSONException("Invalid bytes received", e); + } + } + + @Override + public int decode(final InputStream in, final BSONCallback callback) throws IOException { + byte[] documentSizeBuffer = new byte[BYTES_IN_INTEGER]; + int documentSize = Bits.readInt(in, documentSizeBuffer); + byte[] documentBytes = Arrays.copyOf(documentSizeBuffer, documentSize); + Bits.readFully(in, documentBytes, BYTES_IN_INTEGER, documentSize - BYTES_IN_INTEGER); + + // note that we are handing off ownership of the documentBytes byte array to the callback + callback.gotBinary(null, (byte) 0, documentBytes); + return documentSize; + } +} diff --git a/bson/src/main/org/bson/LazyBSONList.java b/bson/src/main/org/bson/LazyBSONList.java new file mode 100644 index 00000000000..43d91b37eab --- /dev/null +++ b/bson/src/main/org/bson/LazyBSONList.java @@ -0,0 +1,225 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson; + +import java.util.Collection; +import java.util.HashSet; +import java.util.Iterator; +import java.util.List; +import java.util.ListIterator; +import java.util.NoSuchElementException; +import java.util.Set; + +/** + * A {@code LazyBSONObject} representing a BSON array. + */ +@SuppressWarnings("rawtypes") +public class LazyBSONList extends LazyBSONObject implements List { + + /** + * Construct an instance with the given raw bytes and offset. + * + * @param bytes the raw BSON bytes + * @param callback the callback to use to create nested values + */ + public LazyBSONList(final byte[] bytes, final LazyBSONCallback callback) { + super(bytes, callback); + } + + /** + * Construct an instance with the given raw bytes and offset. + * + * @param bytes the raw BSON bytes + * @param offset the offset into the raw bytes + * @param callback the callback to use to create nested values + */ + public LazyBSONList(final byte[] bytes, final int offset, final LazyBSONCallback callback) { + super(bytes, offset, callback); + } + + @Override + public int size() { + return keySet().size(); + } + + @Override + public boolean contains(final Object o) { + return indexOf(o) > -1; + } + + @Override + public Iterator iterator() { + return new LazyBSONListIterator(); + } + + @Override + public boolean containsAll(final Collection collection) { + Set values = new HashSet<>(); + for (final Object o : this) { + values.add(o); + } + return values.containsAll(collection); + } + + @Override + public Object get(final int index) { + return get(String.valueOf(index)); + } + + @Override + public int indexOf(final Object o) { + Iterator it = iterator(); + for (int pos = 0; it.hasNext(); pos++) { + if (o.equals(it.next())) { + return pos; + } + } + return -1; + } + + @Override + public int lastIndexOf(final Object o) { + int lastFound = -1; + Iterator it = iterator(); + + for (int pos = 0; it.hasNext(); pos++) { + if (o.equals(it.next())) { + lastFound = pos; + } + } + + return lastFound; + } + + /** + * An iterator over the values in a LazyBsonList. + */ + public class LazyBSONListIterator implements Iterator { + private final BsonBinaryReader reader; + private BsonType cachedBsonType; + + /** + * Construct an instance + */ + public LazyBSONListIterator() { + reader = getBsonReader(); + reader.readStartDocument(); + } + + @Override + public boolean hasNext() { + if (cachedBsonType == null) { + cachedBsonType = reader.readBsonType(); + } + return cachedBsonType != BsonType.END_OF_DOCUMENT; + } + + @Override + public Object next() { + if (!hasNext()) { + throw new NoSuchElementException(); + } else { + cachedBsonType = null; + reader.readName(); + return readValue(reader); + } + } + + @Override + public void remove() { + throw new UnsupportedOperationException("Operation is not supported"); + } + + } + + /* ----------------- Unsupported operations --------------------- */ + + @Override + public ListIterator listIterator() { + throw new UnsupportedOperationException("Operation is not supported instance of this type"); + } + + @Override + public ListIterator listIterator(final int index) { + throw new UnsupportedOperationException("Operation is not supported instance of this type"); + } + + @Override + public boolean add(final Object o) { + throw new UnsupportedOperationException("Object is read only"); + } + + @Override + public boolean remove(final Object o) { + throw new UnsupportedOperationException("Object is read only"); + } + + @Override + public boolean addAll(final Collection c) { + throw new UnsupportedOperationException("Object is read only"); + } + + @Override + public boolean addAll(final int index, final Collection c) { + throw new UnsupportedOperationException("Object is read only"); + } + + @Override + public boolean removeAll(final Collection c) { + throw new UnsupportedOperationException("Object is read only"); + } + + @Override + public boolean retainAll(final Collection c) { + throw new UnsupportedOperationException("Object is read only"); + } + + @Override + public void clear() { + throw new UnsupportedOperationException("Object is read only"); + } + + @Override + public Object set(final int index, final Object element) { + throw new UnsupportedOperationException("Object is read only"); + } + + @Override + public void add(final int index, final Object element) { + throw new UnsupportedOperationException("Object is read only"); + } + + @Override + public Object remove(final int index) { + throw new UnsupportedOperationException("Object is read only"); + } + + @Override + public List subList(final int fromIndex, final int toIndex) { + throw new UnsupportedOperationException("Operation is not supported"); + } + + @Override + public Object[] toArray() { + throw new UnsupportedOperationException("Operation is not supported"); + } + + @Override + public Object[] toArray(final Object[] a) { + throw new UnsupportedOperationException("Operation is not supported"); + } +} diff --git a/bson/src/main/org/bson/LazyBSONObject.java b/bson/src/main/org/bson/LazyBSONObject.java new file mode 100644 index 00000000000..35afc1b33ff --- /dev/null +++ b/bson/src/main/org/bson/LazyBSONObject.java @@ -0,0 +1,471 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson; + +import org.bson.io.ByteBufferBsonInput; +import org.bson.types.BSONTimestamp; +import org.bson.types.Binary; +import org.bson.types.Code; +import org.bson.types.CodeWScope; +import org.bson.types.MaxKey; +import org.bson.types.MinKey; +import org.bson.types.Symbol; + +import java.io.IOException; +import java.io.OutputStream; +import java.nio.Buffer; +import java.nio.ByteBuffer; +import java.nio.ByteOrder; +import java.nio.channels.Channels; +import java.nio.channels.WritableByteChannel; +import java.util.AbstractMap; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.Date; +import java.util.Iterator; +import java.util.LinkedHashMap; +import java.util.LinkedHashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.regex.Pattern; + +import static org.bson.BsonBinarySubType.BINARY; +import static org.bson.BsonBinarySubType.OLD_BINARY; + +/** + * An immutable {@code BSONObject} backed by a byte buffer that lazily provides keys and values on request. This is useful for transferring + * BSON documents between servers when you don't want to pay the performance penalty of encoding or decoding them fully. + */ +public class LazyBSONObject implements BSONObject { + private final byte[] bytes; + private final int offset; + private final LazyBSONCallback callback; + + + /** + * Construct an instance. + * + * @param bytes the raw bytes + * @param callback the callback to use to construct nested values + */ + public LazyBSONObject(final byte[] bytes, final LazyBSONCallback callback) { + this(bytes, 0, callback); + } + + /** + * Construct an instance. + * + * @param bytes the raw bytes + * @param offset the offset into the raw bytes representing the start of the document + * @param callback the callback to use to construct nested values + */ + public LazyBSONObject(final byte[] bytes, final int offset, final LazyBSONCallback callback) { + this.bytes = bytes; + this.callback = callback; + this.offset = offset; + } + + /** + * Gets the offset into the raw bytes representing the start of the document + * + * @return the offset + */ + protected int getOffset() { + return offset; + } + + /** + * Gets the raw bytes. + * + * @return the raw bytes + */ + protected byte[] getBytes() { + return bytes; + } + + @Override + public Object get(final String key) { + BsonBinaryReader reader = getBsonReader(); + Object value; + try { + reader.readStartDocument(); + value = null; + while (reader.readBsonType() != BsonType.END_OF_DOCUMENT) { + if (key.equals(reader.readName())) { + value = readValue(reader); + break; + } else { + reader.skipValue(); + } + } + } finally { + reader.close(); + } + return value; + } + + @Override + public boolean containsField(final String s) { + try (BsonBinaryReader reader = getBsonReader()) { + reader.readStartDocument(); + while (reader.readBsonType() != BsonType.END_OF_DOCUMENT) { + if (reader.readName().equals(s)) { + return true; + } else { + reader.skipValue(); + } + } + } + return false; + } + + @Override + public Set keySet() { + Set keys = new LinkedHashSet<>(); + try (BsonBinaryReader reader = getBsonReader()) { + reader.readStartDocument(); + while (reader.readBsonType() != BsonType.END_OF_DOCUMENT) { + keys.add(reader.readName()); + reader.skipValue(); + } + reader.readEndDocument(); + } + return Collections.unmodifiableSet(keys); + } + + Object readValue(final BsonBinaryReader reader) { + switch (reader.getCurrentBsonType()) { + case DOCUMENT: + return readDocument(reader); + case ARRAY: + return readArray(reader); + case DOUBLE: + return reader.readDouble(); + case STRING: + return reader.readString(); + case BINARY: + byte binarySubType = reader.peekBinarySubType(); + BsonBinary binary = reader.readBinaryData(); + if (binarySubType == BINARY.getValue() || binarySubType == OLD_BINARY.getValue()) { + return binary.getData(); + } else { + return new Binary(binary.getType(), binary.getData()); + } + case NULL: + reader.readNull(); + return null; + case UNDEFINED: + reader.readUndefined(); + return null; + case OBJECT_ID: + return reader.readObjectId(); + case BOOLEAN: + return reader.readBoolean(); + case DATE_TIME: + return new Date(reader.readDateTime()); + case REGULAR_EXPRESSION: + BsonRegularExpression regularExpression = reader.readRegularExpression(); + return Pattern.compile( + regularExpression.getPattern(), + BSON.regexFlags(regularExpression.getOptions()) + ); + case DB_POINTER: + BsonDbPointer dbPointer = reader.readDBPointer(); + return callback.createDBRef(dbPointer.getNamespace(), dbPointer.getId()); + case JAVASCRIPT: + return new Code(reader.readJavaScript()); + case SYMBOL: + return new Symbol(reader.readSymbol()); + case JAVASCRIPT_WITH_SCOPE: + return new CodeWScope(reader.readJavaScriptWithScope(), (BSONObject) readJavaScriptWithScopeDocument(reader)); + case INT32: + return reader.readInt32(); + case TIMESTAMP: + BsonTimestamp timestamp = reader.readTimestamp(); + return new BSONTimestamp(timestamp.getTime(), timestamp.getInc()); + case INT64: + return reader.readInt64(); + case DECIMAL128: + return reader.readDecimal128(); + case MIN_KEY: + reader.readMinKey(); + return new MinKey(); + case MAX_KEY: + reader.readMaxKey(); + return new MaxKey(); + default: + throw new IllegalArgumentException("unhandled BSON type: " + reader.getCurrentBsonType()); + } + } + + private Object readArray(final BsonBinaryReader reader) { + int position = reader.getBsonInput().getPosition(); + reader.skipValue(); + return callback.createArray(bytes, offset + position); + } + + private Object readDocument(final BsonBinaryReader reader) { + int position = reader.getBsonInput().getPosition(); + reader.skipValue(); + return callback.createObject(bytes, offset + position); + } + + private Object readJavaScriptWithScopeDocument(final BsonBinaryReader reader) { + int position = reader.getBsonInput().getPosition(); + reader.readStartDocument(); + while (reader.readBsonType() != BsonType.END_OF_DOCUMENT) { + reader.skipName(); + reader.skipValue(); + } + reader.readEndDocument(); + return callback.createObject(bytes, offset + position); + } + + BsonBinaryReader getBsonReader() { + ByteBuffer buffer = getBufferForInternalBytes(); + return new BsonBinaryReader(new ByteBufferBsonInput(new ByteBufNIO(buffer))); + } + + private ByteBuffer getBufferForInternalBytes() { + ByteBuffer buffer = ByteBuffer.wrap(bytes, offset, bytes.length - offset).slice(); + buffer.order(ByteOrder.LITTLE_ENDIAN); + ((Buffer) buffer).limit(buffer.getInt()); + ((Buffer) buffer).rewind(); + return buffer; + } + + /** + * Gets whether this is an empty {@code BSONObject}. + * + * @return true if this has no keys + */ + public boolean isEmpty() { + return keySet().size() == 0; + } + + /** + * Gets the size in bytes of the BSON document. + * + * @return the size in bytes + */ + public int getBSONSize() { + return getBufferForInternalBytes().getInt(); + } + + /** + * Pipe the raw bytes into the given output stream. + * + * @param os the output stream + * @return the number of bytes written + * @throws IOException any IOException thrown by the output stream + */ + public int pipe(final OutputStream os) throws IOException { + WritableByteChannel channel = Channels.newChannel(os); + return channel.write(getBufferForInternalBytes()); + } + + /** + * Gets the entry set for all the key/value pairs in this {@code BSONObject}. The returned set is immutable. + * + * @return then entry set + */ + public Set> entrySet() { + List> entries = new ArrayList<>(); + try (BsonBinaryReader reader = getBsonReader()) { + reader.readStartDocument(); + while (reader.readBsonType() != BsonType.END_OF_DOCUMENT) { + entries.add(new AbstractMap.SimpleImmutableEntry<>(reader.readName(), readValue(reader))); + } + reader.readEndDocument(); + } + return new Set>() { + @Override + public int size() { + return entries.size(); + } + + @Override + public boolean isEmpty() { + return entries.isEmpty(); + } + + @Override + public Iterator> iterator() { + return entries.iterator(); + } + + @Override + public Object[] toArray() { + return entries.toArray(); + } + + @Override + public T[] toArray(final T[] a) { + return entries.toArray(a); + } + + @Override + public boolean contains(final Object o) { + return entries.contains(o); + } + + @Override + public boolean containsAll(final Collection c) { + return entries.containsAll(c); + } + + @Override + public boolean add(final Map.Entry stringObjectEntry) { + throw new UnsupportedOperationException(); + } + + @Override + public boolean remove(final Object o) { + throw new UnsupportedOperationException(); + } + + @Override + public boolean addAll(final Collection> c) { + throw new UnsupportedOperationException(); + } + + @Override + public boolean retainAll(final Collection c) { + throw new UnsupportedOperationException(); + } + + @Override + public boolean removeAll(final Collection c) { + throw new UnsupportedOperationException(); + } + + @Override + public void clear() { + throw new UnsupportedOperationException(); + } + }; + } + + @Override + public int hashCode() { + int result = 1; + int size = getBSONSize(); + for (int i = offset; i < offset + size; i++) { + result = 31 * result + bytes[i]; + } + return result; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + LazyBSONObject other = (LazyBSONObject) o; + + if (this.bytes == other.bytes && this.offset == other.offset) { + return true; + } + if (this.bytes == null || other.bytes == null) { + return false; + } + + if (this.bytes.length == 0 || other.bytes.length == 0) { + return false; + } + + //comparing document length + int length = this.bytes[this.offset]; + if (other.bytes[other.offset] != length) { + return false; + } + + //comparing document contents + for (int i = 0; i < length; i++) { + if (this.bytes[this.offset + i] != other.bytes[other.offset + i]) { + return false; + } + } + + return true; + } + + + /* ----------------- Unsupported operations --------------------- */ + + /** + * Always throws {@code UnsupportedOperationException}. + * + * @param key Name to set + * @param v Corresponding value + * @return will not return normally + * @throws java.lang.UnsupportedOperationException the object is read only + */ + @Override + public Object put(final String key, final Object v) { + throw new UnsupportedOperationException("Object is read only"); + } + + /** + * Always throws {@code UnsupportedOperationException}. + * + * @param o the object + * @throws java.lang.UnsupportedOperationException the object is read only + */ + @Override + public void putAll(final BSONObject o) { + throw new UnsupportedOperationException("Object is read only"); + } + + /** + * Always throws {@code UnsupportedOperationException}. + * + * @param m the map + * @throws java.lang.UnsupportedOperationException the object is read only + */@Override + @SuppressWarnings("rawtypes") + public void putAll(final Map m) { + throw new UnsupportedOperationException("Object is read only"); + } + + /** + * Always throws {@code UnsupportedOperationException}. + * + * @param key The name of the field to remove + * @return will not return normally + * @throws java.lang.UnsupportedOperationException the object is read only + */ + @Override + public Object removeField(final String key) { + throw new UnsupportedOperationException("Object is read only"); + } + + @Override + @SuppressWarnings("rawtypes") + public Map toMap() { + Map map = new LinkedHashMap<>(); + for (final Map.Entry entry : entrySet()) { + map.put(entry.getKey(), entry.getValue()); + } + return Collections.unmodifiableMap(map); + } +} diff --git a/bson/src/main/org/bson/NoOpFieldNameValidator.java b/bson/src/main/org/bson/NoOpFieldNameValidator.java new file mode 100644 index 00000000000..33353498986 --- /dev/null +++ b/bson/src/main/org/bson/NoOpFieldNameValidator.java @@ -0,0 +1,34 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson; + +final class NoOpFieldNameValidator implements FieldNameValidator { + static final NoOpFieldNameValidator INSTANCE = new NoOpFieldNameValidator(); + + private NoOpFieldNameValidator() { + } + + @Override + public boolean validate(final String fieldName) { + return true; + } + + @Override + public FieldNameValidator getValidatorForField(final String fieldName) { + return this; + } +} diff --git a/bson/src/main/org/bson/PackedBitBinaryVector.java b/bson/src/main/org/bson/PackedBitBinaryVector.java new file mode 100644 index 00000000000..33200650204 --- /dev/null +++ b/bson/src/main/org/bson/PackedBitBinaryVector.java @@ -0,0 +1,105 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson; + +import org.bson.annotations.Beta; +import org.bson.annotations.Reason; + +import java.util.Arrays; +import java.util.Objects; + +import static org.bson.assertions.Assertions.assertNotNull; + +/** + * Represents a packed bit vector, where each element of the vector is represented by a single bit (0 or 1). + *

+ * The {@link PackedBitBinaryVector} is used to store data efficiently using the BSON Binary Subtype 9 format. + * + * @mongodb.server.release 6.0 + * @see BinaryVector#packedBitVector(byte[], byte) + * @see BsonBinary#BsonBinary(BinaryVector) + * @see BsonBinary#asVector() + * @since 5.3 + */ +@Beta(Reason.SERVER) +public final class PackedBitBinaryVector extends BinaryVector { + + private final byte padding; + private final byte[] data; + + PackedBitBinaryVector(final byte[] data, final byte padding) { + super(DataType.PACKED_BIT); + this.data = assertNotNull(data); + this.padding = padding; + } + + /** + * Retrieve the underlying byte array representing this {@link PackedBitBinaryVector} vector, where + * each bit represents an element of the vector (either 0 or 1). + *

+ * Note that the {@linkplain #getPadding() padding value} should be considered when interpreting the final byte of the array, + * as it indicates how many least-significant bits are to be ignored. + * + * @return the underlying byte array representing this {@link PackedBitBinaryVector} vector. + * @see #getPadding() + */ + public byte[] getData() { + return assertNotNull(data); + } + + /** + * Returns the padding value for this vector. + * + *

Padding refers to the number of least-significant bits in the final byte that are ignored when retrieving + * {@linkplain #getData() the vector array}. For instance, if the padding value is 3, this means that the last byte contains + * 3 least-significant unused bits, which should be disregarded during operations.

+ *

+ * + * NOTE: The underlying byte array is not copied; changes to the returned array will be reflected in this instance. + * + * @return the padding value (between 0 and 7). + */ + public byte getPadding() { + return this.padding; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + PackedBitBinaryVector that = (PackedBitBinaryVector) o; + return padding == that.padding && Arrays.equals(data, that.data); + } + + @Override + public int hashCode() { + return Objects.hash(padding, Arrays.hashCode(data)); + } + + @Override + public String toString() { + return "PackedBitVector{" + + "padding=" + padding + + ", data=" + Arrays.toString(data) + + ", dataType=" + getDataType() + + '}'; + } +} diff --git a/bson/src/main/org/bson/RawBsonArray.java b/bson/src/main/org/bson/RawBsonArray.java new file mode 100644 index 00000000000..fc56f312e01 --- /dev/null +++ b/bson/src/main/org/bson/RawBsonArray.java @@ -0,0 +1,374 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson; + +import org.bson.io.ByteBufferBsonInput; + +import java.io.InvalidObjectException; +import java.io.ObjectInputStream; +import java.io.Serializable; +import java.nio.ByteBuffer; +import java.nio.ByteOrder; +import java.util.AbstractList; +import java.util.Collection; +import java.util.Iterator; +import java.util.ListIterator; +import java.util.NoSuchElementException; + +import static org.bson.assertions.Assertions.isTrueArgument; +import static org.bson.assertions.Assertions.notNull; + +/** + * An immutable BSON array that is represented using only the raw bytes. + * + * @since 3.7 + */ +public class RawBsonArray extends BsonArray implements Serializable { + private static final long serialVersionUID = 2L; + private static final String IMMUTABLE_MSG = "RawBsonArray instances are immutable"; + + private final transient RawBsonArrayList delegate; + + /** + * Constructs a new instance with the given byte array. Note that it does not make a copy of the array, so do not modify it after + * passing it to this constructor. + * + * @param bytes the bytes representing a BSON document. Note that the byte array is NOT copied, so care must be taken not to modify it + * after passing it to this construction, unless of course that is your intention. + */ + public RawBsonArray(final byte[] bytes) { + this(notNull("bytes", bytes), 0, bytes.length); + } + + /** + * Constructs a new instance with the given byte array, offset, and length. Note that it does not make a copy of the array, so do not + * modify it after passing it to this constructor. + * + * @param bytes the bytes representing a BSON document. Note that the byte array is NOT copied, so care must be taken not to modify it + * after passing it to this construction, unless of course that is your intention. + * @param offset the offset into the byte array + * @param length the length of the subarray to use + */ + public RawBsonArray(final byte[] bytes, final int offset, final int length) { + this(new RawBsonArrayList(bytes, offset, length)); + } + + private RawBsonArray(final RawBsonArrayList values) { + super(values, false); + this.delegate = values; + } + + ByteBuf getByteBuffer() { + return delegate.getByteBuffer(); + } + + @Override + public boolean add(final BsonValue bsonValue) { + throw new UnsupportedOperationException(IMMUTABLE_MSG); + } + + @Override + public boolean remove(final Object o) { + throw new UnsupportedOperationException(IMMUTABLE_MSG); + } + + @Override + public boolean addAll(final Collection c) { + throw new UnsupportedOperationException(IMMUTABLE_MSG); + } + + @Override + public boolean addAll(final int index, final Collection c) { + throw new UnsupportedOperationException(IMMUTABLE_MSG); + } + + @Override + public boolean removeAll(final Collection c) { + throw new UnsupportedOperationException(IMMUTABLE_MSG); + } + + @Override + public boolean retainAll(final Collection c) { + throw new UnsupportedOperationException(IMMUTABLE_MSG); + } + + @Override + public void clear() { + throw new UnsupportedOperationException(IMMUTABLE_MSG); + } + + @Override + public BsonValue set(final int index, final BsonValue element) { + throw new UnsupportedOperationException(IMMUTABLE_MSG); + } + + @Override + public void add(final int index, final BsonValue element) { + throw new UnsupportedOperationException(IMMUTABLE_MSG); + } + + @Override + public BsonValue remove(final int index) { + throw new UnsupportedOperationException(IMMUTABLE_MSG); + } + + @Override + public BsonArray clone() { + return new RawBsonArray(delegate.bytes.clone(), delegate.offset, delegate.length); + } + + @Override + public boolean equals(final Object o) { + return super.equals(o); + } + + @Override + public int hashCode() { + return super.hashCode(); + } + + /** + * Write the replacement object. + * + *

+ * See https://docs.oracle.com/javase/6/docs/platform/serialization/spec/output.html + *

+ * + * @return a proxy for the document + */ + private Object writeReplace() { + return new SerializationProxy(delegate.bytes, delegate.offset, delegate.length); + } + + /** + * Prevent normal deserialization. + * + *

+ * See https://docs.oracle.com/javase/6/docs/platform/serialization/spec/input.html + *

+ * + * @param stream the stream + * @throws InvalidObjectException in all cases + */ + private void readObject(final ObjectInputStream stream) throws InvalidObjectException { + throw new InvalidObjectException("Proxy required"); + } + + private static class SerializationProxy implements Serializable { + private static final long serialVersionUID = 1L; + + private final byte[] bytes; + + SerializationProxy(final byte[] bytes, final int offset, final int length) { + if (bytes.length == length) { + this.bytes = bytes; + } else { + this.bytes = new byte[length]; + System.arraycopy(bytes, offset, this.bytes, 0, length); + } + } + + private Object readResolve() { + return new RawBsonArray(bytes); + } + } + + static class RawBsonArrayList extends AbstractList { + private static final int MIN_BSON_ARRAY_SIZE = 5; + private Integer cachedSize; + private final byte[] bytes; + private final int offset; + private final int length; + + RawBsonArrayList(final byte[] bytes, final int offset, final int length) { + notNull("bytes", bytes); + isTrueArgument("offset >= 0", offset >= 0); + isTrueArgument("offset < bytes.length", offset < bytes.length); + isTrueArgument("length <= bytes.length - offset", length <= bytes.length - offset); + isTrueArgument("length >= 5", length >= MIN_BSON_ARRAY_SIZE); + this.bytes = bytes; + this.offset = offset; + this.length = length; + } + + @Override + public BsonValue get(final int index) { + if (index < 0) { + throw new IndexOutOfBoundsException(); + } + int curIndex = 0; + try (BsonBinaryReader bsonReader = createReader()) { + bsonReader.readStartDocument(); + while (bsonReader.readBsonType() != BsonType.END_OF_DOCUMENT) { + bsonReader.skipName(); + if (curIndex == index) { + return RawBsonValueHelper.decode(bytes, bsonReader); + } + bsonReader.skipValue(); + curIndex++; + } + bsonReader.readEndDocument(); + } + throw new IndexOutOfBoundsException(); + } + + @Override + public int size() { + if (cachedSize != null) { + return cachedSize; + } + int size = 0; + try (BsonBinaryReader bsonReader = createReader()) { + bsonReader.readStartDocument(); + while (bsonReader.readBsonType() != BsonType.END_OF_DOCUMENT) { + size++; + bsonReader.readName(); + bsonReader.skipValue(); + } + bsonReader.readEndDocument(); + } + cachedSize = size; + return cachedSize; + } + + @Override + public Iterator iterator() { + return new Itr(); + } + + @Override + public ListIterator listIterator() { + return new ListItr(0); + } + + @Override + public ListIterator listIterator(final int index) { + return new ListItr(index); + } + + private class Itr implements Iterator { + private int cursor = 0; + private BsonBinaryReader bsonReader; + private int currentPosition = 0; + + Itr() { + this(0); + } + + Itr(final int cursorPosition) { + setIterator(cursorPosition); + } + + public boolean hasNext() { + boolean hasNext = cursor != size(); + if (!hasNext) { + bsonReader.close(); + } + return hasNext; + } + + public BsonValue next() { + while (cursor > currentPosition && bsonReader.readBsonType() != BsonType.END_OF_DOCUMENT) { + bsonReader.skipName(); + bsonReader.skipValue(); + currentPosition++; + } + + if (bsonReader.readBsonType() != BsonType.END_OF_DOCUMENT) { + bsonReader.skipName(); + cursor += 1; + currentPosition = cursor; + return RawBsonValueHelper.decode(bytes, bsonReader); + } else { + bsonReader.close(); + throw new NoSuchElementException(); + } + } + + @Override + public void remove() { + throw new UnsupportedOperationException(IMMUTABLE_MSG); + } + + public int getCursor() { + return cursor; + } + + public void setCursor(final int cursor) { + this.cursor = cursor; + } + + void setIterator(final int cursorPosition) { + cursor = cursorPosition; + currentPosition = 0; + if (bsonReader != null) { + bsonReader.close(); + } + bsonReader = createReader(); + bsonReader.readStartDocument(); + } + } + + private class ListItr extends Itr implements ListIterator { + ListItr(final int index) { + super(index); + } + + public boolean hasPrevious() { + return getCursor() != 0; + } + + public BsonValue previous() { + try { + BsonValue previous = get(previousIndex()); + setIterator(previousIndex()); + return previous; + } catch (IndexOutOfBoundsException e) { + throw new NoSuchElementException(); + } + } + + public int nextIndex() { + return getCursor(); + } + + public int previousIndex() { + return getCursor() - 1; + } + + @Override + public void set(final BsonValue bsonValue) { + throw new UnsupportedOperationException(IMMUTABLE_MSG); + } + + @Override + public void add(final BsonValue bsonValue) { + throw new UnsupportedOperationException(IMMUTABLE_MSG); + } + } + + private BsonBinaryReader createReader() { + return new BsonBinaryReader(new ByteBufferBsonInput(getByteBuffer())); + } + + ByteBuf getByteBuffer() { + ByteBuffer buffer = ByteBuffer.wrap(bytes, offset, length); + buffer.order(ByteOrder.LITTLE_ENDIAN); + return new ByteBufNIO(buffer); + } + } +} diff --git a/bson/src/main/org/bson/RawBsonDocument.java b/bson/src/main/org/bson/RawBsonDocument.java new file mode 100644 index 00000000000..eb672bcef8d --- /dev/null +++ b/bson/src/main/org/bson/RawBsonDocument.java @@ -0,0 +1,390 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson; + +import org.bson.codecs.BsonDocumentCodec; +import org.bson.codecs.Codec; +import org.bson.codecs.Decoder; +import org.bson.codecs.DecoderContext; +import org.bson.codecs.EncoderContext; +import org.bson.codecs.RawBsonDocumentCodec; +import org.bson.io.BasicOutputBuffer; +import org.bson.io.ByteBufferBsonInput; +import org.bson.json.JsonMode; +import org.bson.json.JsonReader; +import org.bson.json.JsonWriter; +import org.bson.json.JsonWriterSettings; + +import java.io.InvalidObjectException; +import java.io.ObjectInputStream; +import java.io.Serializable; +import java.io.StringWriter; +import java.nio.ByteBuffer; +import java.nio.ByteOrder; +import java.util.Collection; +import java.util.Map; +import java.util.NoSuchElementException; +import java.util.Set; + +import static org.bson.assertions.Assertions.isTrueArgument; +import static org.bson.assertions.Assertions.notNull; + +/** + * An immutable BSON document that is represented using only the raw bytes. + * + * @since 3.0 + */ +public final class RawBsonDocument extends BsonDocument { + private static final long serialVersionUID = 1L; + private static final int MIN_BSON_DOCUMENT_SIZE = 5; + + /** + * The raw bytes. + */ + private final byte[] bytes; + + /** + * The offset into bytes, which must be less than {@code bytes.length}. + */ + private final int offset; + + /** + * The length, which must be less than {@code offset + bytes.length}. + */ + private final int length; + + /** + * Parses a string in MongoDB Extended JSON format to a {@code RawBsonDocument} + * + * @param json the JSON string + * @return a corresponding {@code RawBsonDocument} object + * @see org.bson.json.JsonReader + * @mongodb.driver.manual reference/mongodb-extended-json/ MongoDB Extended JSON + * @since 3.3 + */ + public static RawBsonDocument parse(final String json) { + notNull("json", json); + return new RawBsonDocumentCodec().decode(new JsonReader(json), DecoderContext.builder().build()); + } + + /** + * Constructs a new instance with the given byte array. Note that it does not make a copy of the array, so do not modify it after + * passing it to this constructor. + * + * @param bytes the bytes representing a BSON document. Note that the byte array is NOT copied, so care must be taken not to modify it + * after passing it to this construction, unless of course that is your intention. + */ + public RawBsonDocument(final byte[] bytes) { + this(notNull("bytes", bytes), 0, bytes.length); + } + + /** + * Constructs a new instance with the given byte array, offset, and length. Note that it does not make a copy of the array, so do not + * modify it after passing it to this constructor. + * + * @param bytes the bytes representing a BSON document. Note that the byte array is NOT copied, so care must be taken not to modify it + * after passing it to this construction, unless of course that is your intention. + * @param offset the offset into the byte array + * @param length the length of the subarray to use + * @since 3.3 + */ + public RawBsonDocument(final byte[] bytes, final int offset, final int length) { + notNull("bytes", bytes); + isTrueArgument("offset >= 0", offset >= 0); + isTrueArgument("offset < bytes.length", offset < bytes.length); + isTrueArgument("length <= bytes.length - offset", length <= bytes.length - offset); + isTrueArgument("length >= 5", length >= MIN_BSON_DOCUMENT_SIZE); + this.bytes = bytes; + this.offset = offset; + this.length = length; + } + + /** + * Construct a new instance from the given document and codec for the document type. + * + * @param document the document to transform + * @param codec the codec to facilitate the transformation + * @param the BSON type that the codec encodes/decodes + */ + public RawBsonDocument(final T document, final Codec codec) { + notNull("document", document); + notNull("codec", codec); + BasicOutputBuffer buffer = new BasicOutputBuffer(); + try (BsonBinaryWriter writer = new BsonBinaryWriter(buffer)) { + codec.encode(writer, document, EncoderContext.builder().build()); + this.bytes = buffer.getInternalBuffer(); + this.offset = 0; + this.length = buffer.getPosition(); + } + } + + /** + * Returns a {@code ByteBuf} that wraps the byte array, with the proper byte order. Any changes made to the returned will be reflected + * in the underlying byte array owned by this instance. + * + * @return a byte buffer that wraps the byte array owned by this instance. + */ + public ByteBuf getByteBuffer() { + ByteBuffer buffer = ByteBuffer.wrap(bytes, offset, length); + buffer.order(ByteOrder.LITTLE_ENDIAN); + return new ByteBufNIO(buffer); + } + + /** + * Decode this into a document. + * + * @param codec the codec to facilitate the transformation + * @param the BSON type that the codec encodes/decodes + * @return the decoded document + */ + public T decode(final Codec codec) { + return decode((Decoder) codec); + } + + /** + * Decode this into a document. + * + * @param decoder the decoder to facilitate the transformation + * @param the BSON type that the codec encodes/decodes + * @return the decoded document + * @since 3.6 + */ + public T decode(final Decoder decoder) { + try (BsonBinaryReader reader = createReader()) { + return decoder.decode(reader, DecoderContext.builder().build()); + } + } + + @Override + public void clear() { + throw new UnsupportedOperationException("RawBsonDocument instances are immutable"); + } + + @Override + public BsonValue put(final String key, final BsonValue value) { + throw new UnsupportedOperationException("RawBsonDocument instances are immutable"); + } + + @Override + public BsonDocument append(final String key, final BsonValue value) { + throw new UnsupportedOperationException("RawBsonDocument instances are immutable"); + } + + @Override + public void putAll(final Map m) { + throw new UnsupportedOperationException("RawBsonDocument instances are immutable"); + } + + @Override + public BsonValue remove(final Object key) { + throw new UnsupportedOperationException("RawBsonDocument instances are immutable"); + } + + @Override + public boolean isEmpty() { + try (BsonBinaryReader bsonReader = createReader()) { + bsonReader.readStartDocument(); + if (bsonReader.readBsonType() != BsonType.END_OF_DOCUMENT) { + return false; + } + bsonReader.readEndDocument(); + } + + return true; + } + + @Override + public int size() { + int size = 0; + try (BsonBinaryReader bsonReader = createReader()) { + bsonReader.readStartDocument(); + while (bsonReader.readBsonType() != BsonType.END_OF_DOCUMENT) { + size++; + bsonReader.readName(); + bsonReader.skipValue(); + } + bsonReader.readEndDocument(); + } + + return size; + } + + @Override + public Set> entrySet() { + return toBaseBsonDocument().entrySet(); + } + + @Override + public Collection values() { + return toBaseBsonDocument().values(); + } + + @Override + public Set keySet() { + return toBaseBsonDocument().keySet(); + } + + @Override + public String getFirstKey() { + try (BsonBinaryReader bsonReader = createReader()) { + bsonReader.readStartDocument(); + try { + return bsonReader.readName(); + } catch (BsonInvalidOperationException e) { + throw new NoSuchElementException(); + } + } + } + + @Override + public boolean containsKey(final Object key) { + if (key == null) { + throw new IllegalArgumentException("key can not be null"); + } + + try (BsonBinaryReader bsonReader = createReader()) { + bsonReader.readStartDocument(); + while (bsonReader.readBsonType() != BsonType.END_OF_DOCUMENT) { + if (bsonReader.readName().equals(key)) { + return true; + } + bsonReader.skipValue(); + } + bsonReader.readEndDocument(); + } + + return false; + } + + @Override + public boolean containsValue(final Object value) { + try (BsonBinaryReader bsonReader = createReader()) { + bsonReader.readStartDocument(); + while (bsonReader.readBsonType() != BsonType.END_OF_DOCUMENT) { + bsonReader.skipName(); + if (RawBsonValueHelper.decode(bytes, bsonReader).equals(value)) { + return true; + } + } + bsonReader.readEndDocument(); + } + + return false; + } + + @Override + public BsonValue get(final Object key) { + notNull("key", key); + + try (BsonBinaryReader bsonReader = createReader()) { + bsonReader.readStartDocument(); + while (bsonReader.readBsonType() != BsonType.END_OF_DOCUMENT) { + if (bsonReader.readName().equals(key)) { + return RawBsonValueHelper.decode(bytes, bsonReader); + } + bsonReader.skipValue(); + } + bsonReader.readEndDocument(); + } + + return null; + } + + @Override + public String toJson() { + return toJson(JsonWriterSettings.builder().outputMode(JsonMode.RELAXED).build()); + } + + @Override + public String toJson(final JsonWriterSettings settings) { + StringWriter writer = new StringWriter(); + new RawBsonDocumentCodec().encode(new JsonWriter(writer, settings), this, EncoderContext.builder().build()); + return writer.toString(); + } + + @Override + public boolean equals(final Object o) { + return toBaseBsonDocument().equals(o); + } + + @Override + public int hashCode() { + return toBaseBsonDocument().hashCode(); + } + + @Override + public BsonDocument clone() { + return new RawBsonDocument(bytes.clone(), offset, length); + } + + private BsonBinaryReader createReader() { + return new BsonBinaryReader(new ByteBufferBsonInput(getByteBuffer())); + } + + // Transform to an org.bson.BsonDocument instance + private BsonDocument toBaseBsonDocument() { + try (BsonBinaryReader bsonReader = createReader()) { + return new BsonDocumentCodec().decode(bsonReader, DecoderContext.builder().build()); + } + } + + /** + * Write the replacement object. + * + *

+ * See https://docs.oracle.com/javase/6/docs/platform/serialization/spec/output.html + *

+ * + * @return a proxy for the document + */ + private Object writeReplace() { + return new SerializationProxy(this.bytes, offset, length); + } + + /** + * Prevent normal deserialization. + * + *

+ * See https://docs.oracle.com/javase/6/docs/platform/serialization/spec/input.html + *

+ * + * @param stream the stream + * @throws InvalidObjectException in all cases + */ + private void readObject(final ObjectInputStream stream) throws InvalidObjectException { + throw new InvalidObjectException("Proxy required"); + } + + private static class SerializationProxy implements Serializable { + private static final long serialVersionUID = 1L; + + private final byte[] bytes; + + SerializationProxy(final byte[] bytes, final int offset, final int length) { + if (bytes.length == length) { + this.bytes = bytes; + } else { + this.bytes = new byte[length]; + System.arraycopy(bytes, offset, this.bytes, 0, length); + } + } + + private Object readResolve() { + return new RawBsonDocument(bytes); + } + } +} diff --git a/bson/src/main/org/bson/RawBsonValueHelper.java b/bson/src/main/org/bson/RawBsonValueHelper.java new file mode 100644 index 00000000000..864d285a1a6 --- /dev/null +++ b/bson/src/main/org/bson/RawBsonValueHelper.java @@ -0,0 +1,49 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson; + +import org.bson.codecs.BsonValueCodecProvider; +import org.bson.codecs.DecoderContext; +import org.bson.codecs.configuration.CodecRegistry; +import org.bson.io.BsonInputMark; + +import static org.bson.codecs.BsonValueCodecProvider.getClassForBsonType; +import static org.bson.codecs.configuration.CodecRegistries.fromProviders; + +final class RawBsonValueHelper { + private static final CodecRegistry REGISTRY = fromProviders(new BsonValueCodecProvider()); + + static BsonValue decode(final byte[] bytes, final BsonBinaryReader bsonReader) { + if (bsonReader.getCurrentBsonType() == BsonType.DOCUMENT || bsonReader.getCurrentBsonType() == BsonType.ARRAY) { + int position = bsonReader.getBsonInput().getPosition(); + BsonInputMark mark = bsonReader.getBsonInput().getMark(4); + int size = bsonReader.getBsonInput().readInt32(); + mark.reset(); + bsonReader.skipValue(); + if (bsonReader.getCurrentBsonType() == BsonType.DOCUMENT) { + return new RawBsonDocument(bytes, position, size); + } else { + return new RawBsonArray(bytes, position, size); + } + } else { + return REGISTRY.get(getClassForBsonType(bsonReader.getCurrentBsonType())).decode(bsonReader, DecoderContext.builder().build()); + } + } + + private RawBsonValueHelper() { + } +} diff --git a/bson/src/main/org/bson/StringUtils.java b/bson/src/main/org/bson/StringUtils.java new file mode 100644 index 00000000000..461b27d9113 --- /dev/null +++ b/bson/src/main/org/bson/StringUtils.java @@ -0,0 +1,30 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson; + +import java.util.Arrays; +import java.util.stream.Collectors; + +final class StringUtils { + @SafeVarargs + @SuppressWarnings("varargs") + public static String join(final String delimiter, final T... values) { + return Arrays.stream(values).map(String::valueOf).collect(Collectors.joining(delimiter)); + } + + private StringUtils() { } +} diff --git a/bson/src/main/org/bson/Transformer.java b/bson/src/main/org/bson/Transformer.java new file mode 100644 index 00000000000..edf4a82b2bb --- /dev/null +++ b/bson/src/main/org/bson/Transformer.java @@ -0,0 +1,31 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson; + +/** + * Transforms objects that can be converted to BSON into other Java types, and vice versa. + */ +public interface Transformer { + /** + * Turns the {@code objectToTransform} into some other {@code Object}. This can either be turning a simple BSON-friendly object into a + * different Java type, or it can be turning a Java type that can't automatically be converted into BSON into something that can. + * + * @param objectToTransform the object that needs to be transformed. + * @return the new transformed object. + */ + Object transform(Object objectToTransform); +} diff --git a/bson/src/main/org/bson/UuidRepresentation.java b/bson/src/main/org/bson/UuidRepresentation.java new file mode 100644 index 00000000000..76695b65314 --- /dev/null +++ b/bson/src/main/org/bson/UuidRepresentation.java @@ -0,0 +1,87 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson; + +import static java.lang.String.format; +import static org.bson.BsonBinarySubType.UUID_LEGACY; +import static org.bson.BsonBinarySubType.UUID_STANDARD; + +/** + * The representation to use when converting a UUID to a BSON binary value. + * This class is necessary because the different drivers used to have different + * ways of encoding UUID, with the BSON subtype: \x03 UUID old. + * + * @since 3.0 + */ +public enum UuidRepresentation { + + /** + * An unspecified representation of UUID. Essentially, this is the null representation value. + * + * @since 3.12 + */ + UNSPECIFIED, + + /** + * The canonical representation of UUID + *

+ * BSON binary subtype 4 + */ + STANDARD, + + /** + * The legacy representation of UUID used by the C# driver + *

+ * BSON binary subtype 3 + */ + C_SHARP_LEGACY, + + /** + * The legacy representation of UUID used by the Java driver + *

+ * BSON binary subtype 3 + */ + JAVA_LEGACY, + + /** + * The legacy representation of UUID used by the Python driver, which is the same + * format as STANDARD, but has the UUID old BSON subtype (\x03) + *

+ * BSON binary subtype 3 + */ + PYTHON_LEGACY; + + /** + * Gets the BSON binary subtype for the representation. + * + * @return the BSON binary subtype for the representation + * @throws BSONException if this is {@link #UNSPECIFIED} + * @since 4.7 + */ + public BsonBinarySubType getSubtype() { + switch (this) { + case STANDARD: + return UUID_STANDARD; + case JAVA_LEGACY: + case PYTHON_LEGACY: + case C_SHARP_LEGACY: + return UUID_LEGACY; + default: + throw new BSONException(format("No BsonBinarySubType for %s", this)); + } + } +} diff --git a/bson/src/main/org/bson/annotations/Beta.java b/bson/src/main/org/bson/annotations/Beta.java new file mode 100644 index 00000000000..0db9171952c --- /dev/null +++ b/bson/src/main/org/bson/annotations/Beta.java @@ -0,0 +1,56 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * Copyright 2010 The Guava Authors + * Copyright 2011 The Guava Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.annotations; + +import java.lang.annotation.Documented; +import java.lang.annotation.ElementType; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.annotation.Target; + +/** + * Signifies that a public API (public class, method or field) is subject to + * incompatible changes, or even removal, in a future release. An API bearing + * this annotation is exempt from any compatibility guarantees made by its + * containing library. Note that the presence of this annotation implies nothing + * about the quality or performance of the API in question, only the fact that + * it is not "API-frozen." + * + *

It is generally safe for applications to depend on beta APIs, at + * the cost of some extra work during upgrades. However it is generally + * inadvisable for libraries (which get included on users' CLASSPATHs, + * outside the library developers' control) to do so. + * + **/ +@Retention(RetentionPolicy.CLASS) +@Target({ + ElementType.ANNOTATION_TYPE, + ElementType.CONSTRUCTOR, + ElementType.FIELD, + ElementType.METHOD, + ElementType.PACKAGE, + ElementType.TYPE }) +@Documented +@Beta(Reason.CLIENT) +public @interface Beta { + /** + * @return The reason an API element is marked with {@link Beta}. + */ + Reason[] value(); +} diff --git a/bson/src/main/org/bson/annotations/Reason.java b/bson/src/main/org/bson/annotations/Reason.java new file mode 100644 index 00000000000..d0b11c79651 --- /dev/null +++ b/bson/src/main/org/bson/annotations/Reason.java @@ -0,0 +1,34 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.annotations; + +/** + * Enumerates the reasons an API element might be marked with annotations like {@link Beta}. + */ +@Beta(Reason.CLIENT) +public enum Reason { + /** + * Indicates that the status of the driver API is the reason for the annotation. + */ + CLIENT, + + /** + * The driver API relies on the server API. + * This dependency is the reason for the annotation and suggests that changes in the server API could impact the driver API. + */ + SERVER +} diff --git a/bson/src/main/org/bson/annotations/package-info.java b/bson/src/main/org/bson/annotations/package-info.java new file mode 100644 index 00000000000..ac5cd9dabf9 --- /dev/null +++ b/bson/src/main/org/bson/annotations/package-info.java @@ -0,0 +1,20 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * Contains annotations that can apply to any part of the BSON library code. + */ +package org.bson.annotations; diff --git a/bson/src/main/org/bson/assertions/Assertions.java b/bson/src/main/org/bson/assertions/Assertions.java new file mode 100644 index 00000000000..16e4a3f1737 --- /dev/null +++ b/bson/src/main/org/bson/assertions/Assertions.java @@ -0,0 +1,164 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * Copyright (c) 2008-2014 Atlassian Pty Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.assertions; + +import javax.annotation.Nullable; + +/** + *

Design by contract assertions.

This class is not part of the public API and may be removed or changed at any time.

+ */ +public final class Assertions { + /** + * Throw IllegalArgumentException if the value is null. + * + * @param name the parameter name + * @param value the value that should not be null + * @param the value type + * @return the value + * @throws IllegalArgumentException if value is null + */ + public static T notNull(final String name, final T value) { + if (value == null) { + throw new IllegalArgumentException(name + " can not be null"); + } + return value; + } + + /** + * Throw IllegalStateException if the condition if false. + * + * @param name the name of the state that is being checked + * @param condition the condition about the parameter to check + * @throws IllegalStateException if the condition is false + */ + public static void isTrue(final String name, final boolean condition) { + if (!condition) { + throw new IllegalStateException("state should be: " + name); + } + } + + /** + * Throw IllegalArgumentException if the condition if false. + * + * @param name the name of the state that is being checked + * @param condition the condition about the parameter to check + * @throws IllegalArgumentException if the condition is false + */ + public static void isTrueArgument(final String name, final boolean condition) { + if (!condition) { + throw new IllegalArgumentException("state should be: " + name); + } + } + + /** + * Throw IllegalArgumentException if the condition if false, otherwise return the value. This is useful when arguments must be checked + * within an expression, as when using {@code this} to call another constructor, which must be the first line of the calling + * constructor. + * + * @param the value type + * @param name the name of the state that is being checked + * @param value the value of the argument + * @param condition the condition about the parameter to check + * @return the value + * @throws java.lang.IllegalArgumentException if the condition is false + */ + public static T isTrueArgument(final String name, final T value, final boolean condition) { + if (!condition) { + throw new IllegalArgumentException("state should be: " + name); + } + return value; + } + + /** + * @return Never completes normally. The return type is {@link AssertionError} to allow writing {@code throw fail()}. + * This may be helpful in non-{@code void} methods. + * @throws AssertionError Always + */ + public static AssertionError fail() throws AssertionError { + throw new AssertionError(); + } + + /** + * @param msg The failure message. + * @return Never completes normally. The return type is {@link AssertionError} to allow writing {@code throw fail("failure message")}. + * This may be helpful in non-{@code void} methods. + * @throws AssertionError Always + */ + public static AssertionError fail(final String msg) throws AssertionError { + throw new AssertionError(assertNotNull(msg)); + } + + /** + * @param msg The failure message. + * @param cause The underlying cause + * @return Never completes normally. The return type is {@link AssertionError} to allow writing + * {@code throw fail("failure message", throwable)}. + * This may be helpful in non-{@code void} methods. + * @throws AssertionError Always + */ + public static AssertionError fail(final String msg, final Throwable cause) throws AssertionError { + throw new AssertionError(assertNotNull(msg), assertNotNull(cause)); + } + + /** + * @param value A value to check. + * @param The type of {@code value}. + * @return {@code value} + * @throws AssertionError If {@code value} is {@code null}. + */ + public static T assertNotNull(@Nullable final T value) throws AssertionError { + if (value == null) { + throw new AssertionError(); + } + return value; + } + + /** + * Throw AssertionError if the condition if false. + * + * @param name the name of the state that is being checked + * @param condition the condition about the parameter to check + * @throws AssertionError if the condition is false + */ + public static void assertTrue(final String name, final boolean condition) { + if (!condition) { + throw new AssertionError("state should be: " + assertNotNull(name)); + } + } + + /** + * Cast an object to the given class and return it, or throw IllegalArgumentException if it's not assignable to that class. + * + * @param clazz the class to cast to + * @param value the value to cast + * @param errorMessage the error message to include in the exception + * @param the Class type + * @return value cast to clazz + * @throws IllegalArgumentException if value is not assignable to clazz + */ + @SuppressWarnings("unchecked") + public static T convertToType(final Class clazz, final Object value, final String errorMessage) { + if (!clazz.isAssignableFrom(value.getClass())) { + throw new IllegalArgumentException(errorMessage); + } + return (T) value; + } + + private Assertions() { + } +} diff --git a/bson/src/main/org/bson/assertions/package-info.java b/bson/src/main/org/bson/assertions/package-info.java new file mode 100644 index 00000000000..c098c82b4be --- /dev/null +++ b/bson/src/main/org/bson/assertions/package-info.java @@ -0,0 +1,20 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * This package contains design by contract assertions + */ +package org.bson.assertions; diff --git a/bson/src/main/org/bson/codecs/AbstractCollectionCodec.java b/bson/src/main/org/bson/codecs/AbstractCollectionCodec.java new file mode 100644 index 00000000000..9d2edfd6da3 --- /dev/null +++ b/bson/src/main/org/bson/codecs/AbstractCollectionCodec.java @@ -0,0 +1,121 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs; + +import org.bson.BsonReader; +import org.bson.BsonType; +import org.bson.BsonWriter; +import org.bson.codecs.configuration.CodecConfigurationException; + +import java.lang.reflect.Constructor; +import java.lang.reflect.InvocationTargetException; +import java.util.AbstractCollection; +import java.util.AbstractList; +import java.util.AbstractSet; +import java.util.ArrayList; +import java.util.Collection; +import java.util.HashSet; +import java.util.List; +import java.util.NavigableSet; +import java.util.Set; +import java.util.SortedSet; +import java.util.TreeSet; +import java.util.function.Supplier; + +import static java.lang.String.format; +import static org.bson.assertions.Assertions.notNull; + +abstract class AbstractCollectionCodec> implements Codec { + + private final Class clazz; + private final Supplier supplier; + + @SuppressWarnings({"unchecked", "UnnecessaryLocalVariable", "rawtypes"}) + AbstractCollectionCodec(final Class clazz) { + this.clazz = notNull("clazz", clazz); + Class rawClass = clazz; + if (rawClass == Collection.class || rawClass == List.class || rawClass == AbstractCollection.class || rawClass == AbstractList.class + || rawClass == ArrayList.class) { + supplier = () -> (C) new ArrayList(); + } else if (rawClass == Set.class || rawClass == AbstractSet.class || rawClass == HashSet.class) { + supplier = () -> (C) new HashSet(); + } else if (rawClass == NavigableSet.class || rawClass == SortedSet.class || rawClass == TreeSet.class) { + //noinspection SortedCollectionWithNonComparableKeys + supplier = () -> (C) new TreeSet(); + } else { + Constructor> constructor; + Supplier supplier; + try { + constructor = clazz.getDeclaredConstructor(); + supplier = () -> { + try { + return (C) constructor.newInstance(); + } catch (InstantiationException | IllegalAccessException | InvocationTargetException e) { + throw new CodecConfigurationException(format("Can not invoke no-args constructor for Collection class %s", clazz), + e); + } + }; + } catch (NoSuchMethodException e) { + supplier = () -> { + throw new CodecConfigurationException(format("No no-args constructor for Collection class %s", clazz), e); + }; + } + this.supplier = supplier; + } + } + + abstract T readValue(BsonReader reader, DecoderContext decoderContext); + + abstract void writeValue(BsonWriter writer, T cur, EncoderContext encoderContext); + + @Override + public C decode(final BsonReader reader, final DecoderContext decoderContext) { + reader.readStartArray(); + + C collection = supplier.get(); + while (reader.readBsonType() != BsonType.END_OF_DOCUMENT) { + if (reader.getCurrentBsonType() == BsonType.NULL) { + reader.readNull(); + collection.add(null); + } else { + collection.add(readValue(reader, decoderContext)); + } + } + + reader.readEndArray(); + + return collection; + } + + @Override + public void encode(final BsonWriter writer, final C value, final EncoderContext encoderContext) { + writer.writeStartArray(); + for (final T cur : value) { + if (cur == null) { + writer.writeNull(); + } else { + writeValue(writer, cur, encoderContext); + } + } + writer.writeEndArray(); + } + + @Override + public Class getEncoderClass() { + return clazz; + } +} diff --git a/bson/src/main/org/bson/codecs/AbstractMapCodec.java b/bson/src/main/org/bson/codecs/AbstractMapCodec.java new file mode 100644 index 00000000000..3987c19da92 --- /dev/null +++ b/bson/src/main/org/bson/codecs/AbstractMapCodec.java @@ -0,0 +1,114 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs; + +import org.bson.BsonReader; +import org.bson.BsonType; +import org.bson.BsonWriter; +import org.bson.codecs.configuration.CodecConfigurationException; + +import javax.annotation.Nullable; +import java.lang.reflect.Constructor; +import java.lang.reflect.InvocationTargetException; +import java.util.AbstractMap; +import java.util.HashMap; +import java.util.Map; +import java.util.NavigableMap; +import java.util.TreeMap; +import java.util.function.Supplier; + +import static java.lang.String.format; +import static org.bson.assertions.Assertions.notNull; + +abstract class AbstractMapCodec> implements Codec { + + private final Supplier supplier; + private final Class clazz; + + @SuppressWarnings({"unchecked", "UnnecessaryLocalVariable", "rawtypes"}) + AbstractMapCodec(@Nullable final Class clazz) { + this.clazz = notNull("clazz", clazz); + Class rawClass = clazz; + if (rawClass == Map.class || rawClass == AbstractMap.class || rawClass == HashMap.class) { + supplier = () -> (M) new HashMap(); + } else if (rawClass == NavigableMap.class || rawClass == TreeMap.class) { + supplier = () -> (M) new TreeMap(); + } else { + Constructor> constructor; + Supplier supplier; + try { + constructor = clazz.getDeclaredConstructor(); + supplier = () -> { + try { + return (M) constructor.newInstance(); + } catch (InstantiationException | IllegalAccessException | InvocationTargetException e) { + throw new CodecConfigurationException("Can not invoke no-args constructor for Map class %s", e); + } + }; + } catch (NoSuchMethodException e) { + supplier = () -> { + throw new CodecConfigurationException(format("Map class %s has no public no-args constructor", clazz), e); + }; + } + this.supplier = supplier; + } + } + + abstract T readValue(BsonReader reader, DecoderContext decoderContext); + + abstract void writeValue(BsonWriter writer, T value, EncoderContext encoderContext); + + @Override + public void encode(final BsonWriter writer, final M map, final EncoderContext encoderContext) { + writer.writeStartDocument(); + for (final Map.Entry entry : map.entrySet()) { + writer.writeName(entry.getKey()); + T value = entry.getValue(); + if (value == null) { + writer.writeNull(); + } else { + writeValue(writer, value, encoderContext); + } + } + writer.writeEndDocument(); + } + + + @Override + public M decode(final BsonReader reader, final DecoderContext decoderContext) { + M map = supplier.get(); + + reader.readStartDocument(); + while (reader.readBsonType() != BsonType.END_OF_DOCUMENT) { + String fieldName = reader.readName(); + if (reader.getCurrentBsonType() == BsonType.NULL) { + reader.readNull(); + map.put(fieldName, null); + } else { + map.put(fieldName, readValue(reader, decoderContext)); + } + } + + reader.readEndDocument(); + return map; + } + + @Override + public Class getEncoderClass() { + return clazz; + } +} diff --git a/bson/src/main/org/bson/codecs/AtomicBooleanCodec.java b/bson/src/main/org/bson/codecs/AtomicBooleanCodec.java new file mode 100644 index 00000000000..f04d286f09a --- /dev/null +++ b/bson/src/main/org/bson/codecs/AtomicBooleanCodec.java @@ -0,0 +1,45 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs; + +import org.bson.BsonReader; +import org.bson.BsonWriter; + +import java.util.concurrent.atomic.AtomicBoolean; + +/** + * Encodes and decodes {@code AtomicBoolean} objects. + * + * @since 3.0 + */ + +public class AtomicBooleanCodec implements Codec { + @Override + public void encode(final BsonWriter writer, final AtomicBoolean value, final EncoderContext encoderContext) { + writer.writeBoolean(value.get()); + } + + @Override + public AtomicBoolean decode(final BsonReader reader, final DecoderContext decoderContext) { + return new AtomicBoolean(reader.readBoolean()); + } + + @Override + public Class getEncoderClass() { + return AtomicBoolean.class; + } +} diff --git a/bson/src/main/org/bson/codecs/AtomicIntegerCodec.java b/bson/src/main/org/bson/codecs/AtomicIntegerCodec.java new file mode 100644 index 00000000000..d8963ed40d7 --- /dev/null +++ b/bson/src/main/org/bson/codecs/AtomicIntegerCodec.java @@ -0,0 +1,48 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs; + +import org.bson.BsonReader; +import org.bson.BsonWriter; + +import java.util.concurrent.atomic.AtomicInteger; + +import static org.bson.internal.NumberCodecHelper.decodeInt; + +/** + * Encodes and decodes {@code AtomicInteger} objects. + * + * @since 3.0 + */ + +public class AtomicIntegerCodec implements Codec { + + @Override + public void encode(final BsonWriter writer, final AtomicInteger value, final EncoderContext encoderContext) { + writer.writeInt32(value.intValue()); + } + + @Override + public AtomicInteger decode(final BsonReader reader, final DecoderContext decoderContext) { + return new AtomicInteger(decodeInt(reader)); + } + + @Override + public Class getEncoderClass() { + return AtomicInteger.class; + } +} diff --git a/bson/src/main/org/bson/codecs/AtomicLongCodec.java b/bson/src/main/org/bson/codecs/AtomicLongCodec.java new file mode 100644 index 00000000000..7f08af77961 --- /dev/null +++ b/bson/src/main/org/bson/codecs/AtomicLongCodec.java @@ -0,0 +1,48 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs; + +import org.bson.BsonReader; +import org.bson.BsonWriter; + +import java.util.concurrent.atomic.AtomicLong; + +import static org.bson.internal.NumberCodecHelper.decodeLong; + +/** + * Encodes and decodes {@code AtomicLong} objects. + * + * @since 3.0 + */ + +public class AtomicLongCodec implements Codec { + + @Override + public void encode(final BsonWriter writer, final AtomicLong value, final EncoderContext encoderContext) { + writer.writeInt64(value.longValue()); + } + + @Override + public AtomicLong decode(final BsonReader reader, final DecoderContext decoderContext) { + return new AtomicLong(decodeLong(reader)); + } + + @Override + public Class getEncoderClass() { + return AtomicLong.class; + } +} diff --git a/bson/src/main/org/bson/codecs/BigDecimalCodec.java b/bson/src/main/org/bson/codecs/BigDecimalCodec.java new file mode 100644 index 00000000000..8ad6d555baa --- /dev/null +++ b/bson/src/main/org/bson/codecs/BigDecimalCodec.java @@ -0,0 +1,46 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs; + +import org.bson.BsonReader; +import org.bson.BsonWriter; +import org.bson.types.Decimal128; + +import java.math.BigDecimal; + +/** + * Encodes and decodes {@code BigDecimal} objects. + * + * @since 3.5 + */ +public final class BigDecimalCodec implements Codec { + + @Override + public void encode(final BsonWriter writer, final BigDecimal value, final EncoderContext encoderContext) { + writer.writeDecimal128(new Decimal128(value)); + } + + @Override + public BigDecimal decode(final BsonReader reader, final DecoderContext decoderContext) { + return reader.readDecimal128().bigDecimalValue(); + } + + @Override + public Class getEncoderClass() { + return BigDecimal.class; + } +} diff --git a/bson/src/main/org/bson/codecs/BinaryCodec.java b/bson/src/main/org/bson/codecs/BinaryCodec.java new file mode 100644 index 00000000000..cb6c6dbac41 --- /dev/null +++ b/bson/src/main/org/bson/codecs/BinaryCodec.java @@ -0,0 +1,45 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs; + +import org.bson.BsonBinary; +import org.bson.BsonReader; +import org.bson.BsonWriter; +import org.bson.types.Binary; + +/** + * A Codec for the BSON Binary type. + * + * @since 3.0 + */ +public class BinaryCodec implements Codec { + @Override + public void encode(final BsonWriter writer, final Binary value, final EncoderContext encoderContext) { + writer.writeBinaryData(new BsonBinary(value.getType(), value.getData())); + } + + @Override + public Binary decode(final BsonReader reader, final DecoderContext decoderContext) { + BsonBinary bsonBinary = reader.readBinaryData(); + return new Binary(bsonBinary.getType(), bsonBinary.getData()); + } + + @Override + public Class getEncoderClass() { + return Binary.class; + } +} diff --git a/bson/src/main/org/bson/codecs/BinaryVectorCodec.java b/bson/src/main/org/bson/codecs/BinaryVectorCodec.java new file mode 100644 index 00000000000..4d23557ad49 --- /dev/null +++ b/bson/src/main/org/bson/codecs/BinaryVectorCodec.java @@ -0,0 +1,56 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs; + +import org.bson.BsonBinary; +import org.bson.BsonBinarySubType; +import org.bson.BsonInvalidOperationException; +import org.bson.BsonReader; +import org.bson.BsonWriter; +import org.bson.BinaryVector; + +/** + * Encodes and decodes {@link BinaryVector} objects. + * + */ + final class BinaryVectorCodec implements Codec { + + @Override + public void encode(final BsonWriter writer, final BinaryVector vectorToEncode, final EncoderContext encoderContext) { + writer.writeBinaryData(new BsonBinary(vectorToEncode)); + } + + @Override + public BinaryVector decode(final BsonReader reader, final DecoderContext decoderContext) { + byte subType = reader.peekBinarySubType(); + + if (subType != BsonBinarySubType.VECTOR.getValue()) { + throw new BsonInvalidOperationException("Expected vector binary subtype " + BsonBinarySubType.VECTOR.getValue() + " but found " + subType); + } + + return reader.readBinaryData() + .asBinary() + .asVector(); + } + + @Override + public Class getEncoderClass() { + return BinaryVector.class; + } +} + + diff --git a/bson/src/main/org/bson/codecs/BooleanCodec.java b/bson/src/main/org/bson/codecs/BooleanCodec.java new file mode 100644 index 00000000000..79206748527 --- /dev/null +++ b/bson/src/main/org/bson/codecs/BooleanCodec.java @@ -0,0 +1,42 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs; + +import org.bson.BsonReader; +import org.bson.BsonWriter; + +/** + * Encodes and decodes {@code Boolean} objects. + * + * @since 3.0 + */ +public class BooleanCodec implements Codec { + @Override + public void encode(final BsonWriter writer, final Boolean value, final EncoderContext encoderContext) { + writer.writeBoolean(value); + } + + @Override + public Boolean decode(final BsonReader reader, final DecoderContext decoderContext) { + return reader.readBoolean(); + } + + @Override + public Class getEncoderClass() { + return Boolean.class; + } +} diff --git a/bson/src/main/org/bson/codecs/BsonArrayCodec.java b/bson/src/main/org/bson/codecs/BsonArrayCodec.java new file mode 100644 index 00000000000..6d16bb7d1b0 --- /dev/null +++ b/bson/src/main/org/bson/codecs/BsonArrayCodec.java @@ -0,0 +1,104 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs; + +import org.bson.BsonArray; +import org.bson.BsonReader; +import org.bson.BsonType; +import org.bson.BsonValue; +import org.bson.BsonWriter; +import org.bson.codecs.configuration.CodecRegistry; + +import static org.bson.assertions.Assertions.notNull; +import static org.bson.codecs.BsonValueCodecProvider.getBsonTypeClassMap; +import static org.bson.codecs.configuration.CodecRegistries.fromProviders; + +/** + * A codec for BsonArray instances. + * + * @since 3.0 + */ +public class BsonArrayCodec implements Codec { + + private static final CodecRegistry DEFAULT_REGISTRY = fromProviders(new BsonValueCodecProvider()); + private static final BsonTypeCodecMap DEFAULT_BSON_TYPE_CODEC_MAP = new BsonTypeCodecMap(getBsonTypeClassMap(), DEFAULT_REGISTRY); + private final BsonTypeCodecMap bsonTypeCodecMap; + + /** + * Creates a new instance with a default codec registry that uses the {@link BsonValueCodecProvider}. + * + * @since 3.4 + */ + public BsonArrayCodec() { + this(DEFAULT_BSON_TYPE_CODEC_MAP); + } + + /** + * Construct an instance with the given registry + * + * @param codecRegistry the codec registry + */ + public BsonArrayCodec(final CodecRegistry codecRegistry) { + this(new BsonTypeCodecMap(getBsonTypeClassMap(), codecRegistry)); + } + + private BsonArrayCodec(final BsonTypeCodecMap bsonTypeCodecMap) { + this.bsonTypeCodecMap = notNull("bsonTypeCodecMap", bsonTypeCodecMap); + } + + @Override + public BsonArray decode(final BsonReader reader, final DecoderContext decoderContext) { + BsonArray bsonArray = new BsonArray(); + reader.readStartArray(); + while (reader.readBsonType() != BsonType.END_OF_DOCUMENT) { + bsonArray.add(readValue(reader, decoderContext)); + } + reader.readEndArray(); + return bsonArray; + } + + @Override + @SuppressWarnings({"unchecked", "rawtypes"}) + public void encode(final BsonWriter writer, final BsonArray array, final EncoderContext encoderContext) { + writer.writeStartArray(); + + for (BsonValue value : array) { + Codec codec = bsonTypeCodecMap.get(value.getBsonType()); + encoderContext.encodeWithChildContext(codec, writer, value); + } + + writer.writeEndArray(); + } + + @Override + public Class getEncoderClass() { + return BsonArray.class; + } + + /** + * This method may be overridden to change the behavior of reading the current value from the given {@code BsonReader}. It is required + * that the value be fully consumed before returning. + * + * @param reader the read to read the value from + * @param decoderContext the decoder context + * @return the non-null value read from the reader + */ + protected BsonValue readValue(final BsonReader reader, final DecoderContext decoderContext) { + BsonType currentBsonType = reader.getCurrentBsonType(); + return (BsonValue) bsonTypeCodecMap.get(currentBsonType).decode(reader, decoderContext); + } +} diff --git a/bson/src/main/org/bson/codecs/BsonBinaryCodec.java b/bson/src/main/org/bson/codecs/BsonBinaryCodec.java new file mode 100644 index 00000000000..9790b972ea0 --- /dev/null +++ b/bson/src/main/org/bson/codecs/BsonBinaryCodec.java @@ -0,0 +1,43 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs; + +import org.bson.BsonBinary; +import org.bson.BsonReader; +import org.bson.BsonWriter; + +/** + * A Codec for the BSON Binary type. + * + * @since 3.0 + */ +public class BsonBinaryCodec implements Codec { + @Override + public void encode(final BsonWriter writer, final BsonBinary value, final EncoderContext encoderContext) { + writer.writeBinaryData(value); + } + + @Override + public BsonBinary decode(final BsonReader reader, final DecoderContext decoderContext) { + return reader.readBinaryData(); + } + + @Override + public Class getEncoderClass() { + return BsonBinary.class; + } +} diff --git a/bson/src/main/org/bson/codecs/BsonBooleanCodec.java b/bson/src/main/org/bson/codecs/BsonBooleanCodec.java new file mode 100644 index 00000000000..aabdb10df82 --- /dev/null +++ b/bson/src/main/org/bson/codecs/BsonBooleanCodec.java @@ -0,0 +1,44 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs; + +import org.bson.BsonBoolean; +import org.bson.BsonReader; +import org.bson.BsonWriter; + +/** + * A Codec for BsonBoolean instances. + * + * @since 3.0 + */ +public class BsonBooleanCodec implements Codec { + @Override + public BsonBoolean decode(final BsonReader reader, final DecoderContext decoderContext) { + boolean value = reader.readBoolean(); + return BsonBoolean.valueOf(value); + } + + @Override + public void encode(final BsonWriter writer, final BsonBoolean value, final EncoderContext encoderContext) { + writer.writeBoolean(value.getValue()); + } + + @Override + public Class getEncoderClass() { + return BsonBoolean.class; + } +} diff --git a/bson/src/main/org/bson/codecs/BsonCodec.java b/bson/src/main/org/bson/codecs/BsonCodec.java new file mode 100644 index 00000000000..f7e81a0bb3f --- /dev/null +++ b/bson/src/main/org/bson/codecs/BsonCodec.java @@ -0,0 +1,65 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs; + +import org.bson.BsonDocument; +import org.bson.BsonReader; +import org.bson.BsonWriter; +import org.bson.codecs.configuration.CodecConfigurationException; +import org.bson.codecs.configuration.CodecRegistry; +import org.bson.conversions.Bson; + +import static java.lang.String.format; + +/** + * A codec for encoding Bson Implementations + * + * @since 3.11 + */ +public class BsonCodec implements Codec { + private static final Codec BSON_DOCUMENT_CODEC = new BsonDocumentCodec(); + private final CodecRegistry registry; + + /** + * Create a new instance + * + * @param registry the codec registry + */ + public BsonCodec(final CodecRegistry registry) { + this.registry = registry; + } + + @Override + public Bson decode(final BsonReader reader, final DecoderContext decoderContext) { + throw new UnsupportedOperationException("The BsonCodec can only encode to Bson"); + } + + @Override + public void encode(final BsonWriter writer, final Bson value, final EncoderContext encoderContext) { + try { + BsonDocument bsonDocument = value.toBsonDocument(BsonDocument.class, registry); + BSON_DOCUMENT_CODEC.encode(writer, bsonDocument, encoderContext); + } catch (Exception e) { + throw new CodecConfigurationException(format("Unable to encode a Bson implementation: %s", value), e); + } + } + + @Override + public Class getEncoderClass() { + return Bson.class; + } +} diff --git a/bson/src/main/org/bson/codecs/BsonCodecProvider.java b/bson/src/main/org/bson/codecs/BsonCodecProvider.java new file mode 100644 index 00000000000..950379d68ad --- /dev/null +++ b/bson/src/main/org/bson/codecs/BsonCodecProvider.java @@ -0,0 +1,43 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs; + +import org.bson.codecs.configuration.CodecProvider; +import org.bson.codecs.configuration.CodecRegistry; +import org.bson.conversions.Bson; + +/** + * A codec for encoding simple Bson interface implementations + * + * @since 3.11 + */ +public class BsonCodecProvider implements CodecProvider { + + @Override + @SuppressWarnings("unchecked") + public Codec get(final Class clazz, final CodecRegistry registry) { + if (Bson.class.isAssignableFrom(clazz)) { + return (Codec) new BsonCodec(registry); + } + return null; + } + + @Override + public String toString() { + return "BsonCodecProvider{}"; + } +} diff --git a/bson/src/main/org/bson/codecs/BsonDBPointerCodec.java b/bson/src/main/org/bson/codecs/BsonDBPointerCodec.java new file mode 100644 index 00000000000..eabb6fce153 --- /dev/null +++ b/bson/src/main/org/bson/codecs/BsonDBPointerCodec.java @@ -0,0 +1,44 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs; + +import org.bson.BsonDbPointer; +import org.bson.BsonReader; +import org.bson.BsonWriter; + +/** + * Converts BSON type DBPointer(0x0c) to database references as DBPointer is deprecated. + * + * @since 3.0 + */ +public class BsonDBPointerCodec implements Codec { + + @Override + public BsonDbPointer decode(final BsonReader reader, final DecoderContext decoderContext) { + return reader.readDBPointer(); + } + + @Override + public void encode(final BsonWriter writer, final BsonDbPointer value, final EncoderContext encoderContext) { + writer.writeDBPointer(value); + } + + @Override + public Class getEncoderClass() { + return BsonDbPointer.class; + } +} diff --git a/bson/src/main/org/bson/codecs/BsonDateTimeCodec.java b/bson/src/main/org/bson/codecs/BsonDateTimeCodec.java new file mode 100644 index 00000000000..0ec7dcc20ae --- /dev/null +++ b/bson/src/main/org/bson/codecs/BsonDateTimeCodec.java @@ -0,0 +1,43 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs; + +import org.bson.BsonDateTime; +import org.bson.BsonReader; +import org.bson.BsonWriter; + +/** + * A Codec for BsonDateTime instances. + * + * @since 3.0 + */ +public class BsonDateTimeCodec implements Codec { + @Override + public BsonDateTime decode(final BsonReader reader, final DecoderContext decoderContext) { + return new BsonDateTime(reader.readDateTime()); + } + + @Override + public void encode(final BsonWriter writer, final BsonDateTime value, final EncoderContext encoderContext) { + writer.writeDateTime(value.getValue()); + } + + @Override + public Class getEncoderClass() { + return BsonDateTime.class; + } +} diff --git a/bson/src/main/org/bson/codecs/BsonDecimal128Codec.java b/bson/src/main/org/bson/codecs/BsonDecimal128Codec.java new file mode 100644 index 00000000000..576e30536fb --- /dev/null +++ b/bson/src/main/org/bson/codecs/BsonDecimal128Codec.java @@ -0,0 +1,43 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs; + +import org.bson.BsonDecimal128; +import org.bson.BsonReader; +import org.bson.BsonWriter; + +/** + * A Codec for BsonDecimal128 instances. + * + * @since 3.4 + */ +public class BsonDecimal128Codec implements Codec { + @Override + public BsonDecimal128 decode(final BsonReader reader, final DecoderContext decoderContext) { + return new BsonDecimal128(reader.readDecimal128()); + } + + @Override + public void encode(final BsonWriter writer, final BsonDecimal128 value, final EncoderContext encoderContext) { + writer.writeDecimal128(value.getValue()); + } + + @Override + public Class getEncoderClass() { + return BsonDecimal128.class; + } +} diff --git a/bson/src/main/org/bson/codecs/BsonDocumentCodec.java b/bson/src/main/org/bson/codecs/BsonDocumentCodec.java new file mode 100644 index 00000000000..75bd3b7a2b0 --- /dev/null +++ b/bson/src/main/org/bson/codecs/BsonDocumentCodec.java @@ -0,0 +1,159 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs; + +import org.bson.BsonDocument; +import org.bson.BsonObjectId; +import org.bson.BsonReader; +import org.bson.BsonType; +import org.bson.BsonValue; +import org.bson.BsonWriter; +import org.bson.codecs.configuration.CodecRegistry; +import org.bson.types.ObjectId; + +import java.util.Map; + +import static org.bson.assertions.Assertions.notNull; +import static org.bson.codecs.BsonValueCodecProvider.getBsonTypeClassMap; +import static org.bson.codecs.configuration.CodecRegistries.fromProviders; + +/** + * A codec for BsonDocument instances. + * + * @since 3.0 + */ +public class BsonDocumentCodec implements CollectibleCodec { + private static final String ID_FIELD_NAME = "_id"; + private static final CodecRegistry DEFAULT_REGISTRY = fromProviders(new BsonValueCodecProvider()); + private static final BsonTypeCodecMap DEFAULT_BSON_TYPE_CODEC_MAP = new BsonTypeCodecMap(getBsonTypeClassMap(), DEFAULT_REGISTRY); + + private final CodecRegistry codecRegistry; + private final BsonTypeCodecMap bsonTypeCodecMap; + + /** + * Creates a new instance with a default codec registry that uses the {@link BsonValueCodecProvider}. + */ + public BsonDocumentCodec() { + this(DEFAULT_REGISTRY, DEFAULT_BSON_TYPE_CODEC_MAP); + } + + /** + * Creates a new instance initialised with the given codec registry. + * + * @param codecRegistry the {@code CodecRegistry} to use to look up the codecs for encoding and decoding to/from BSON + */ + public BsonDocumentCodec(final CodecRegistry codecRegistry) { + this(codecRegistry, new BsonTypeCodecMap(getBsonTypeClassMap(), codecRegistry)); + } + + private BsonDocumentCodec(final CodecRegistry codecRegistry, final BsonTypeCodecMap bsonTypeCodecMap) { + this.codecRegistry = notNull("Codec registry", codecRegistry); + this.bsonTypeCodecMap = notNull("bsonTypeCodecMap", bsonTypeCodecMap); + } + + /** + * Gets the {@code CodecRegistry} for this {@code Codec}. + * + * @return the registry + */ + public CodecRegistry getCodecRegistry() { + return codecRegistry; + } + + @Override + public BsonDocument decode(final BsonReader reader, final DecoderContext decoderContext) { + BsonDocument bsonDocument = new BsonDocument(); + reader.readStartDocument(); + while (reader.readBsonType() != BsonType.END_OF_DOCUMENT) { + String fieldName = reader.readName(); + bsonDocument.append(fieldName, readValue(reader, decoderContext)); + } + + reader.readEndDocument(); + return bsonDocument; + } + + /** + * This method may be overridden to change the behavior of reading the current value from the given {@code BsonReader}. It is required + * that the value be fully consumed before returning. + * + * @param reader the read to read the value from + * @param decoderContext the context + * @return the non-null value read from the reader + */ + protected BsonValue readValue(final BsonReader reader, final DecoderContext decoderContext) { + return (BsonValue) bsonTypeCodecMap.get(reader.getCurrentBsonType()).decode(reader, decoderContext); + } + + @Override + public void encode(final BsonWriter writer, final BsonDocument value, final EncoderContext encoderContext) { + writer.writeStartDocument(); + + beforeFields(writer, encoderContext, value); + for (Map.Entry entry : value.entrySet()) { + if (skipField(encoderContext, entry.getKey())) { + continue; + } + + writer.writeName(entry.getKey()); + writeValue(writer, encoderContext, entry.getValue()); + } + + writer.writeEndDocument(); + } + + private void beforeFields(final BsonWriter bsonWriter, final EncoderContext encoderContext, final BsonDocument value) { + if (encoderContext.isEncodingCollectibleDocument() && value.containsKey(ID_FIELD_NAME)) { + bsonWriter.writeName(ID_FIELD_NAME); + writeValue(bsonWriter, encoderContext, value.get(ID_FIELD_NAME)); + } + } + + private boolean skipField(final EncoderContext encoderContext, final String key) { + return encoderContext.isEncodingCollectibleDocument() && key.equals(ID_FIELD_NAME); + } + + + @SuppressWarnings({"unchecked", "rawtypes"}) + private void writeValue(final BsonWriter writer, final EncoderContext encoderContext, final BsonValue value) { + Codec codec = bsonTypeCodecMap.get(value.getBsonType()); + encoderContext.encodeWithChildContext(codec, writer, value); + } + + @Override + public Class getEncoderClass() { + return BsonDocument.class; + } + + @Override + public BsonDocument generateIdIfAbsentFromDocument(final BsonDocument document) { + if (!documentHasId(document)) { + document.put(ID_FIELD_NAME, new BsonObjectId(new ObjectId())); + } + return document; + } + + @Override + public boolean documentHasId(final BsonDocument document) { + return document.containsKey(ID_FIELD_NAME); + } + + @Override + public BsonValue getDocumentId(final BsonDocument document) { + return document.get(ID_FIELD_NAME); + } +} diff --git a/bson/src/main/org/bson/codecs/BsonDocumentWrapperCodec.java b/bson/src/main/org/bson/codecs/BsonDocumentWrapperCodec.java new file mode 100644 index 00000000000..70df3a677e1 --- /dev/null +++ b/bson/src/main/org/bson/codecs/BsonDocumentWrapperCodec.java @@ -0,0 +1,71 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs; + +import org.bson.BsonDocument; +import org.bson.BsonDocumentWrapper; +import org.bson.BsonReader; +import org.bson.BsonWriter; + +/** + * A Codec + * + * @since 3.0 + */ +@SuppressWarnings("rawtypes") +public class BsonDocumentWrapperCodec implements Codec { + + private final Codec bsonDocumentCodec; + + /** + * Construct a new instance, + * + * @param bsonDocumentCodec the code to use if the {@code BsonDocumentWrapper} has been unwrapped. + */ + public BsonDocumentWrapperCodec(final Codec bsonDocumentCodec) { + this.bsonDocumentCodec = bsonDocumentCodec; + } + + /** + * Decoding of {@code BsonDocumentWrapper} instances is not supported, so this method will throw {@code UnsupportedOperationException} + * in all cases. + * + * @param reader the BSON reader the reader + * @param decoderContext a decoder context, currently unused + * @return the document + */ + @Override + public BsonDocumentWrapper decode(final BsonReader reader, final DecoderContext decoderContext) { + throw new UnsupportedOperationException("Decoding into a BsonDocumentWrapper is not allowed"); + } + + @Override + @SuppressWarnings("unchecked") + public void encode(final BsonWriter writer, final BsonDocumentWrapper value, final EncoderContext encoderContext) { + if (value.isUnwrapped()) { + bsonDocumentCodec.encode(writer, value, encoderContext); + } else { + Encoder encoder = value.getEncoder(); + encoder.encode(writer, value.getWrappedDocument(), encoderContext); + } + } + + @Override + public Class getEncoderClass() { + return BsonDocumentWrapper.class; + } +} diff --git a/bson/src/main/org/bson/codecs/BsonDoubleCodec.java b/bson/src/main/org/bson/codecs/BsonDoubleCodec.java new file mode 100644 index 00000000000..ea135c3ec6b --- /dev/null +++ b/bson/src/main/org/bson/codecs/BsonDoubleCodec.java @@ -0,0 +1,43 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs; + +import org.bson.BsonDouble; +import org.bson.BsonReader; +import org.bson.BsonWriter; + +/** + * A Codec for BsonDouble instances. + * + * @since 3.0 + */ +public class BsonDoubleCodec implements Codec { + @Override + public BsonDouble decode(final BsonReader reader, final DecoderContext decoderContext) { + return new BsonDouble(reader.readDouble()); + } + + @Override + public void encode(final BsonWriter writer, final BsonDouble value, final EncoderContext encoderContext) { + writer.writeDouble(value.getValue()); + } + + @Override + public Class getEncoderClass() { + return BsonDouble.class; + } +} diff --git a/bson/src/main/org/bson/codecs/BsonInt32Codec.java b/bson/src/main/org/bson/codecs/BsonInt32Codec.java new file mode 100644 index 00000000000..76ca33b8cb3 --- /dev/null +++ b/bson/src/main/org/bson/codecs/BsonInt32Codec.java @@ -0,0 +1,43 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs; + +import org.bson.BsonInt32; +import org.bson.BsonReader; +import org.bson.BsonWriter; + +/** + * A Codec for BsonInt32 instances. + * + * @since 3.0 + */ +public class BsonInt32Codec implements Codec { + @Override + public BsonInt32 decode(final BsonReader reader, final DecoderContext decoderContext) { + return new BsonInt32(reader.readInt32()); + } + + @Override + public void encode(final BsonWriter writer, final BsonInt32 value, final EncoderContext encoderContext) { + writer.writeInt32(value.getValue()); + } + + @Override + public Class getEncoderClass() { + return BsonInt32.class; + } +} diff --git a/bson/src/main/org/bson/codecs/BsonInt64Codec.java b/bson/src/main/org/bson/codecs/BsonInt64Codec.java new file mode 100644 index 00000000000..df7a3e0f266 --- /dev/null +++ b/bson/src/main/org/bson/codecs/BsonInt64Codec.java @@ -0,0 +1,43 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs; + +import org.bson.BsonInt64; +import org.bson.BsonReader; +import org.bson.BsonWriter; + +/** + * A Codec for BsonInt64 instances. + * + * @since 3.0 + */ +public class BsonInt64Codec implements Codec { + @Override + public BsonInt64 decode(final BsonReader reader, final DecoderContext decoderContext) { + return new BsonInt64(reader.readInt64()); + } + + @Override + public void encode(final BsonWriter writer, final BsonInt64 value, final EncoderContext encoderContext) { + writer.writeInt64(value.getValue()); + } + + @Override + public Class getEncoderClass() { + return BsonInt64.class; + } +} diff --git a/bson/src/main/org/bson/codecs/BsonJavaScriptCodec.java b/bson/src/main/org/bson/codecs/BsonJavaScriptCodec.java new file mode 100644 index 00000000000..1a0f0efe1ca --- /dev/null +++ b/bson/src/main/org/bson/codecs/BsonJavaScriptCodec.java @@ -0,0 +1,43 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs; + +import org.bson.BsonJavaScript; +import org.bson.BsonReader; +import org.bson.BsonWriter; + +/** + * A Codec for the {@code BsonJavaScript} type. + * + * @since 3.0 + */ +public class BsonJavaScriptCodec implements Codec { + @Override + public BsonJavaScript decode(final BsonReader reader, final DecoderContext decoderContext) { + return new BsonJavaScript(reader.readJavaScript()); + } + + @Override + public void encode(final BsonWriter writer, final BsonJavaScript value, final EncoderContext encoderContext) { + writer.writeJavaScript(value.getCode()); + } + + @Override + public Class getEncoderClass() { + return BsonJavaScript.class; + } +} diff --git a/bson/src/main/org/bson/codecs/BsonJavaScriptWithScopeCodec.java b/bson/src/main/org/bson/codecs/BsonJavaScriptWithScopeCodec.java new file mode 100644 index 00000000000..cd55b0cf9de --- /dev/null +++ b/bson/src/main/org/bson/codecs/BsonJavaScriptWithScopeCodec.java @@ -0,0 +1,58 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs; + +import org.bson.BsonDocument; +import org.bson.BsonJavaScriptWithScope; +import org.bson.BsonReader; +import org.bson.BsonWriter; + +/** + * A Codec for {@code BsonJavaScriptWithScope} instances. + * + * @since 3.0 + */ +public class BsonJavaScriptWithScopeCodec implements Codec { + private final Codec documentCodec; + + /** + * Construct a new instance with the given codec to use for the nested document + * + * @param documentCodec the non-null codec for the nested document + */ + public BsonJavaScriptWithScopeCodec(final Codec documentCodec) { + this.documentCodec = documentCodec; + } + + @Override + public BsonJavaScriptWithScope decode(final BsonReader bsonReader, final DecoderContext decoderContext) { + String code = bsonReader.readJavaScriptWithScope(); + BsonDocument scope = documentCodec.decode(bsonReader, decoderContext); + return new BsonJavaScriptWithScope(code, scope); + } + + @Override + public void encode(final BsonWriter writer, final BsonJavaScriptWithScope codeWithScope, final EncoderContext encoderContext) { + writer.writeJavaScriptWithScope(codeWithScope.getCode()); + documentCodec.encode(writer, codeWithScope.getScope(), encoderContext); + } + + @Override + public Class getEncoderClass() { + return BsonJavaScriptWithScope.class; + } +} diff --git a/bson/src/main/org/bson/codecs/BsonMaxKeyCodec.java b/bson/src/main/org/bson/codecs/BsonMaxKeyCodec.java new file mode 100644 index 00000000000..17f9ff22303 --- /dev/null +++ b/bson/src/main/org/bson/codecs/BsonMaxKeyCodec.java @@ -0,0 +1,44 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs; + +import org.bson.BsonMaxKey; +import org.bson.BsonReader; +import org.bson.BsonWriter; + +/** + * A codec for {@code BsonMaxKey} instances. + * + * @since 3.0 + */ +public class BsonMaxKeyCodec implements Codec { + @Override + public void encode(final BsonWriter writer, final BsonMaxKey value, final EncoderContext encoderContext) { + writer.writeMaxKey(); + } + + @Override + public BsonMaxKey decode(final BsonReader reader, final DecoderContext decoderContext) { + reader.readMaxKey(); + return new BsonMaxKey(); + } + + @Override + public Class getEncoderClass() { + return BsonMaxKey.class; + } +} diff --git a/bson/src/main/org/bson/codecs/BsonMinKeyCodec.java b/bson/src/main/org/bson/codecs/BsonMinKeyCodec.java new file mode 100644 index 00000000000..fd2cefb7082 --- /dev/null +++ b/bson/src/main/org/bson/codecs/BsonMinKeyCodec.java @@ -0,0 +1,44 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs; + +import org.bson.BsonMinKey; +import org.bson.BsonReader; +import org.bson.BsonWriter; + +/** + * A codec for {@code BsonMinKey} instances. + * + * @since 3.0 + */ +public class BsonMinKeyCodec implements Codec { + @Override + public void encode(final BsonWriter writer, final BsonMinKey value, final EncoderContext encoderContext) { + writer.writeMinKey(); + } + + @Override + public BsonMinKey decode(final BsonReader reader, final DecoderContext decoderContext) { + reader.readMinKey(); + return new BsonMinKey(); + } + + @Override + public Class getEncoderClass() { + return BsonMinKey.class; + } +} diff --git a/bson/src/main/org/bson/codecs/BsonNullCodec.java b/bson/src/main/org/bson/codecs/BsonNullCodec.java new file mode 100644 index 00000000000..454f411e93c --- /dev/null +++ b/bson/src/main/org/bson/codecs/BsonNullCodec.java @@ -0,0 +1,45 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs; + +import org.bson.BsonNull; +import org.bson.BsonReader; +import org.bson.BsonWriter; + +/** + * A Codec for BsonNull instances. + * + * @since 3.0 + */ +public class BsonNullCodec implements Codec { + + @Override + public BsonNull decode(final BsonReader reader, final DecoderContext decoderContext) { + reader.readNull(); + return BsonNull.VALUE; + } + + @Override + public void encode(final BsonWriter writer, final BsonNull value, final EncoderContext encoderContext) { + writer.writeNull(); + } + + @Override + public Class getEncoderClass() { + return BsonNull.class; + } +} diff --git a/bson/src/main/org/bson/codecs/BsonObjectIdCodec.java b/bson/src/main/org/bson/codecs/BsonObjectIdCodec.java new file mode 100644 index 00000000000..9044ffc9769 --- /dev/null +++ b/bson/src/main/org/bson/codecs/BsonObjectIdCodec.java @@ -0,0 +1,43 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs; + +import org.bson.BsonObjectId; +import org.bson.BsonReader; +import org.bson.BsonWriter; + +/** + * A Codec for {@code BsonObjectId} instances. + * + * @since 3.0 + */ +public class BsonObjectIdCodec implements Codec { + @Override + public void encode(final BsonWriter writer, final BsonObjectId value, final EncoderContext encoderContext) { + writer.writeObjectId(value.getValue()); + } + + @Override + public BsonObjectId decode(final BsonReader reader, final DecoderContext decoderContext) { + return new BsonObjectId(reader.readObjectId()); + } + + @Override + public Class getEncoderClass() { + return BsonObjectId.class; + } +} diff --git a/bson/src/main/org/bson/codecs/BsonRegularExpressionCodec.java b/bson/src/main/org/bson/codecs/BsonRegularExpressionCodec.java new file mode 100644 index 00000000000..e9efde5fadc --- /dev/null +++ b/bson/src/main/org/bson/codecs/BsonRegularExpressionCodec.java @@ -0,0 +1,43 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs; + +import org.bson.BsonReader; +import org.bson.BsonRegularExpression; +import org.bson.BsonWriter; + +/** + * A codec for BSON regular expressions. + * + * @since 3.0 + */ +public class BsonRegularExpressionCodec implements Codec { + @Override + public BsonRegularExpression decode(final BsonReader reader, final DecoderContext decoderContext) { + return reader.readRegularExpression(); + } + + @Override + public void encode(final BsonWriter writer, final BsonRegularExpression value, final EncoderContext encoderContext) { + writer.writeRegularExpression(value); + } + + @Override + public Class getEncoderClass() { + return BsonRegularExpression.class; + } +} diff --git a/bson/src/main/org/bson/codecs/BsonStringCodec.java b/bson/src/main/org/bson/codecs/BsonStringCodec.java new file mode 100644 index 00000000000..837bd7ee1a2 --- /dev/null +++ b/bson/src/main/org/bson/codecs/BsonStringCodec.java @@ -0,0 +1,43 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs; + +import org.bson.BsonReader; +import org.bson.BsonString; +import org.bson.BsonWriter; + +/** + * A Codec for BsonString instances. + * + * @since 3.0 + */ +public class BsonStringCodec implements Codec { + @Override + public BsonString decode(final BsonReader reader, final DecoderContext decoderContext) { + return new BsonString(reader.readString()); + } + + @Override + public void encode(final BsonWriter writer, final BsonString value, final EncoderContext encoderContext) { + writer.writeString(value.getValue()); + } + + @Override + public Class getEncoderClass() { + return BsonString.class; + } +} diff --git a/bson/src/main/org/bson/codecs/BsonSymbolCodec.java b/bson/src/main/org/bson/codecs/BsonSymbolCodec.java new file mode 100644 index 00000000000..b68f85e3262 --- /dev/null +++ b/bson/src/main/org/bson/codecs/BsonSymbolCodec.java @@ -0,0 +1,43 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs; + +import org.bson.BsonReader; +import org.bson.BsonSymbol; +import org.bson.BsonWriter; + +/** + * A codec for BSON symbol type. + * + * @since 3.0 + */ +public class BsonSymbolCodec implements Codec { + @Override + public BsonSymbol decode(final BsonReader reader, final DecoderContext decoderContext) { + return new BsonSymbol(reader.readSymbol()); + } + + @Override + public void encode(final BsonWriter writer, final BsonSymbol value, final EncoderContext encoderContext) { + writer.writeSymbol(value.getSymbol()); + } + + @Override + public Class getEncoderClass() { + return BsonSymbol.class; + } +} diff --git a/bson/src/main/org/bson/codecs/BsonTimestampCodec.java b/bson/src/main/org/bson/codecs/BsonTimestampCodec.java new file mode 100644 index 00000000000..95d64346275 --- /dev/null +++ b/bson/src/main/org/bson/codecs/BsonTimestampCodec.java @@ -0,0 +1,42 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs; + +import org.bson.BsonReader; +import org.bson.BsonTimestamp; +import org.bson.BsonWriter; + +/** + * A Codec for BSON Timestamp instances. + * + * @since 3.0 + */ +public class BsonTimestampCodec implements Codec { + @Override + public void encode(final BsonWriter writer, final BsonTimestamp value, final EncoderContext encoderContext) { + writer.writeTimestamp(value); + } + + @Override + public BsonTimestamp decode(final BsonReader reader, final DecoderContext decoderContext) { + return reader.readTimestamp(); + } + + @Override + public Class getEncoderClass() { + return BsonTimestamp.class; + }} diff --git a/bson/src/main/org/bson/codecs/BsonTypeClassMap.java b/bson/src/main/org/bson/codecs/BsonTypeClassMap.java new file mode 100644 index 00000000000..32acaeb7f85 --- /dev/null +++ b/bson/src/main/org/bson/codecs/BsonTypeClassMap.java @@ -0,0 +1,144 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs; + +import org.bson.BsonDbPointer; +import org.bson.BsonRegularExpression; +import org.bson.BsonTimestamp; +import org.bson.BsonType; +import org.bson.BsonUndefined; +import org.bson.Document; +import org.bson.types.Binary; +import org.bson.types.Code; +import org.bson.types.CodeWithScope; +import org.bson.types.Decimal128; +import org.bson.types.MaxKey; +import org.bson.types.MinKey; +import org.bson.types.ObjectId; +import org.bson.types.Symbol; + +import java.util.Arrays; +import java.util.Collections; +import java.util.Date; +import java.util.List; +import java.util.Map; + +/** + *

A map from a BSON types to the Class to which it should be decoded. This class is useful if, for example, + * you want to change the default decoding of BSON DATE to something besides {@code java.util.Date}.

+ * + *

The default mappings are:

+ * + *
    + *
  • DOCUMENT: {@code org.bson.Document.class}
  • + *
  • ARRAY: {@code java.util.List.class}
  • + *
  • DATE_TIME: {@code java.util.Date.class}
  • + *
  • BOOLEAN: {@code java.lang.Boolean.class}
  • + *
  • DOUBLE: {@code java.lang.Double.class}
  • + *
  • INT32: {@code java.lang.Integer.class}
  • + *
  • INT64: {@code java.lang.Long.class}
  • + *
  • DECIMAL128: {@code org.bson.types.Decimal128.class}
  • + *
  • STRING: {@code java.lang.String.class}
  • + *
  • BINARY: {@code org.bson.types.Binary.class}
  • + *
  • OBJECT_ID: {@code org.bson.types.ObjectId.class}
  • + *
  • REGULAR_EXPRESSION: {@code org.bson.types.RegularExpression.class}
  • + *
  • SYMBOL: {@code org.bson.types.Symbol.class}
  • + *
  • DB_POINTER: {@code org.bson.types.DBPointer.class}
  • + *
  • MAX_KEY: {@code org.bson.types.MaxKey.class}
  • + *
  • MIN_KEY: {@code org.bson.types.MinKey.class}
  • + *
  • JAVASCRIPT: {@code org.bson.types.Code.class}
  • + *
  • JAVASCRIPT_WITH_SCOPE: {@code org.bson.types.CodeWithScope.class}
  • + *
  • TIMESTAMP: {@code org.bson.types.BSONTimestamp.class}
  • + *
  • UNDEFINED: {@code org.bson.types.Undefined.class}
  • + *
+ * + * @since 3.0 + */ +public class BsonTypeClassMap { + static final BsonTypeClassMap DEFAULT_BSON_TYPE_CLASS_MAP = new BsonTypeClassMap(); + private final Class[] bsonTypeOrdinalToClassMap = new Class[256]; + + /** + * Construct an instance with the default mapping, but replacing the default mapping with any values contained in the given map. + * This allows a caller to easily replace a single or a few mappings, while leaving the rest at their default values. + * + * @param replacementsForDefaults the replacement mappings + */ + public BsonTypeClassMap(final Map> replacementsForDefaults) { + addDefaults(); + replacementsForDefaults.forEach((key, value) -> bsonTypeOrdinalToClassMap[key.getValue()] = value); + } + + /** + * Construct an instance with the default mappings. + */ + public BsonTypeClassMap() { + this(Collections.emptyMap()); + } + + /** + * Gets the Class that is mapped to the given BSON type. + * + * @param bsonType the BSON type + * @return the Class that is mapped to the BSON type + */ + public Class get(final BsonType bsonType) { + return bsonTypeOrdinalToClassMap[bsonType.getValue()]; + } + + private void addDefaults() { + bsonTypeOrdinalToClassMap[BsonType.ARRAY.getValue()] = List.class; + bsonTypeOrdinalToClassMap[BsonType.BINARY.getValue()] = Binary.class; + bsonTypeOrdinalToClassMap[BsonType.BOOLEAN.getValue()] = Boolean.class; + bsonTypeOrdinalToClassMap[BsonType.DATE_TIME.getValue()] = Date.class; + bsonTypeOrdinalToClassMap[BsonType.DB_POINTER.getValue()] = BsonDbPointer.class; + bsonTypeOrdinalToClassMap[BsonType.DOCUMENT.getValue()] = Document.class; + bsonTypeOrdinalToClassMap[BsonType.DOUBLE.getValue()] = Double.class; + bsonTypeOrdinalToClassMap[BsonType.INT32.getValue()] = Integer.class; + bsonTypeOrdinalToClassMap[BsonType.INT64.getValue()] = Long.class; + bsonTypeOrdinalToClassMap[BsonType.DECIMAL128.getValue()] = Decimal128.class; + bsonTypeOrdinalToClassMap[BsonType.MAX_KEY.getValue()] = MaxKey.class; + bsonTypeOrdinalToClassMap[BsonType.MIN_KEY.getValue()] = MinKey.class; + bsonTypeOrdinalToClassMap[BsonType.JAVASCRIPT.getValue()] = Code.class; + bsonTypeOrdinalToClassMap[BsonType.JAVASCRIPT_WITH_SCOPE.getValue()] = CodeWithScope.class; + bsonTypeOrdinalToClassMap[BsonType.OBJECT_ID.getValue()] = ObjectId.class; + bsonTypeOrdinalToClassMap[BsonType.REGULAR_EXPRESSION.getValue()] = BsonRegularExpression.class; + bsonTypeOrdinalToClassMap[BsonType.STRING.getValue()] = String.class; + bsonTypeOrdinalToClassMap[BsonType.SYMBOL.getValue()] = Symbol.class; + bsonTypeOrdinalToClassMap[BsonType.TIMESTAMP.getValue()] = BsonTimestamp.class; + bsonTypeOrdinalToClassMap[BsonType.UNDEFINED.getValue()] = BsonUndefined.class; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + BsonTypeClassMap that = (BsonTypeClassMap) o; + + return Arrays.equals(bsonTypeOrdinalToClassMap, that.bsonTypeOrdinalToClassMap); + } + + @Override + public int hashCode() { + return Arrays.hashCode(bsonTypeOrdinalToClassMap); + } +} diff --git a/bson/src/main/org/bson/codecs/BsonTypeCodecMap.java b/bson/src/main/org/bson/codecs/BsonTypeCodecMap.java new file mode 100644 index 00000000000..3a3def7ca7f --- /dev/null +++ b/bson/src/main/org/bson/codecs/BsonTypeCodecMap.java @@ -0,0 +1,73 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs; + +import org.bson.BsonType; +import org.bson.codecs.configuration.CodecConfigurationException; +import org.bson.codecs.configuration.CodecRegistry; + +import static java.lang.String.format; +import static org.bson.assertions.Assertions.notNull; + +/** + * An efficient map of BsonType to Codec + * + * @since 3.3 + */ +public class BsonTypeCodecMap { + private final BsonTypeClassMap bsonTypeClassMap; + private final Codec[] codecs = new Codec[256]; + + /** + * Initializes the map by iterating the keys of the given BsonTypeClassMap and looking up the Codec for the Class mapped to each key. + * @param bsonTypeClassMap the non-null BsonTypeClassMap + * @param codecRegistry the non-null CodecRegistry + */ + public BsonTypeCodecMap(final BsonTypeClassMap bsonTypeClassMap, final CodecRegistry codecRegistry) { + this.bsonTypeClassMap = notNull("bsonTypeClassMap", bsonTypeClassMap); + notNull("codecRegistry", codecRegistry); + for (BsonType cur : BsonType.values()) { + Class clazz = bsonTypeClassMap.get(cur); + if (clazz != null) { + try { + codecs[cur.getValue()] = codecRegistry.get(clazz); + } catch (CodecConfigurationException e) { + // delay reporting this until the codec is actually requested + } + } + } + } + + /** + * Gets the Codec mapped to the given bson type. + * + * @param bsonType the non-null BsonType + * @return the non-null Codec + */ + public Codec get(final BsonType bsonType) { + Codec codec = codecs[bsonType.getValue()]; + if (codec == null) { + Class clazz = bsonTypeClassMap.get(bsonType); + if (clazz == null) { + throw new CodecConfigurationException(format("No class mapped for BSON type %s.", bsonType)); + } else { + throw new CodecConfigurationException(format("Can't find a codec for %s.", clazz)); + } + } + return codec; + } +} diff --git a/bson/src/main/org/bson/codecs/BsonUndefinedCodec.java b/bson/src/main/org/bson/codecs/BsonUndefinedCodec.java new file mode 100644 index 00000000000..7cebe50ad76 --- /dev/null +++ b/bson/src/main/org/bson/codecs/BsonUndefinedCodec.java @@ -0,0 +1,50 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs; + +import org.bson.BsonReader; +import org.bson.BsonUndefined; +import org.bson.BsonWriter; + +/** + *

Allows reading and writing of the BSON Undefined type. On encoding, it will write the correct type to the BsonWriter, but ignore the + * value, and on decoding it will read the type off the BsonReader and return an Undefined type, which simply represents a placeholder for + * the undefined value.

+ * + *

The undefined type is deprecated (see the spec).

+ * + * @see BSON Spec + * @see org.bson.BsonType#UNDEFINED + * @since 3.0 + */ +public class BsonUndefinedCodec implements Codec { + @Override + public BsonUndefined decode(final BsonReader reader, final DecoderContext decoderContext) { + reader.readUndefined(); + return new BsonUndefined(); + } + + @Override + public void encode(final BsonWriter writer, final BsonUndefined value, final EncoderContext encoderContext) { + writer.writeUndefined(); + } + + @Override + public Class getEncoderClass() { + return BsonUndefined.class; + } +} diff --git a/bson/src/main/org/bson/codecs/BsonValueCodec.java b/bson/src/main/org/bson/codecs/BsonValueCodec.java new file mode 100644 index 00000000000..d8ecfd9fb28 --- /dev/null +++ b/bson/src/main/org/bson/codecs/BsonValueCodec.java @@ -0,0 +1,69 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs; + +import org.bson.BsonReader; +import org.bson.BsonValue; +import org.bson.BsonWriter; +import org.bson.codecs.configuration.CodecRegistry; + +import static org.bson.codecs.configuration.CodecRegistries.fromProviders; + +/** + * A codec for unknown BsonValues. + * + *

Useful for decoding a mix of differing Bson types.

+ * + * @since 3.0 + */ +public class BsonValueCodec implements Codec { + + private final CodecRegistry codecRegistry; + + /** + * Creates a new instance with a default codec registry that uses the {@link BsonValueCodecProvider}. + */ + public BsonValueCodec() { + this(fromProviders(new BsonValueCodecProvider())); + } + + /** + * Creates a new instance initialised with the given codec registry. + * + * @param codecRegistry the {@code CodecRegistry} to use to look up the codecs for encoding and decoding to/from BSON + */ + public BsonValueCodec(final CodecRegistry codecRegistry) { + this.codecRegistry = codecRegistry; + } + + @Override + public BsonValue decode(final BsonReader reader, final DecoderContext decoderContext) { + return codecRegistry.get(BsonValueCodecProvider.getClassForBsonType(reader.getCurrentBsonType())).decode(reader, decoderContext); + } + + @SuppressWarnings({"unchecked", "rawtypes"}) + @Override + public void encode(final BsonWriter writer, final BsonValue value, final EncoderContext encoderContext) { + Codec codec = codecRegistry.get(value.getClass()); + encoderContext.encodeWithChildContext(codec, writer, value); + } + + @Override + public Class getEncoderClass() { + return BsonValue.class; + } +} diff --git a/bson/src/main/org/bson/codecs/BsonValueCodecProvider.java b/bson/src/main/org/bson/codecs/BsonValueCodecProvider.java new file mode 100644 index 00000000000..8a7a3f77375 --- /dev/null +++ b/bson/src/main/org/bson/codecs/BsonValueCodecProvider.java @@ -0,0 +1,178 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs; + +import org.bson.BsonArray; +import org.bson.BsonBinary; +import org.bson.BsonBoolean; +import org.bson.BsonDateTime; +import org.bson.BsonDbPointer; +import org.bson.BsonDecimal128; +import org.bson.BsonDocument; +import org.bson.BsonDocumentWrapper; +import org.bson.BsonDouble; +import org.bson.BsonInt32; +import org.bson.BsonInt64; +import org.bson.BsonJavaScript; +import org.bson.BsonJavaScriptWithScope; +import org.bson.BsonMaxKey; +import org.bson.BsonMinKey; +import org.bson.BsonNull; +import org.bson.BsonObjectId; +import org.bson.BsonRegularExpression; +import org.bson.BsonString; +import org.bson.BsonSymbol; +import org.bson.BsonTimestamp; +import org.bson.BsonType; +import org.bson.BsonUndefined; +import org.bson.BsonValue; +import org.bson.RawBsonDocument; +import org.bson.codecs.configuration.CodecProvider; +import org.bson.codecs.configuration.CodecRegistry; + +import java.util.HashMap; +import java.util.Map; + +/** + * A {@code CodecProvider} for all subclass of BsonValue. + * + * @since 3.0 + */ +public class BsonValueCodecProvider implements CodecProvider { + private static final BsonTypeClassMap DEFAULT_BSON_TYPE_CLASS_MAP; + + private final Map, Codec> codecs = new HashMap<>(); + + /** + * Construct a new instance with the default codec for each BSON type. + */ + public BsonValueCodecProvider() { + addCodecs(); + } + + /** + * Get the {@code BsonValue} subclass associated with the given {@code BsonType}. + * @param bsonType the BsonType + * @return the class associated with the given type + */ + @SuppressWarnings("unchecked") + public static Class getClassForBsonType(final BsonType bsonType) { + return (Class) DEFAULT_BSON_TYPE_CLASS_MAP.get(bsonType); + } + + /** + * Gets the BsonTypeClassMap used by this provider. + * + * @return the non-null BsonTypeClassMap + * @since 3.3 + */ + public static BsonTypeClassMap getBsonTypeClassMap() { + return DEFAULT_BSON_TYPE_CLASS_MAP; + } + + @Override + @SuppressWarnings("unchecked") + public Codec get(final Class clazz, final CodecRegistry registry) { + if (codecs.containsKey(clazz)) { + return (Codec) codecs.get(clazz); + } + + if (clazz == BsonJavaScriptWithScope.class) { + return (Codec) new BsonJavaScriptWithScopeCodec(registry.get(BsonDocument.class)); + } + + if (clazz == BsonValue.class) { + return (Codec) new BsonValueCodec(registry); + } + + if (clazz == BsonDocumentWrapper.class) { + return (Codec) new BsonDocumentWrapperCodec(registry.get(BsonDocument.class)); + } + + if (clazz == RawBsonDocument.class) { + return (Codec) new RawBsonDocumentCodec(); + } + + if (BsonDocument.class.isAssignableFrom(clazz)) { + return (Codec) new BsonDocumentCodec(registry); + } + + if (BsonArray.class.isAssignableFrom(clazz)) { + return (Codec) new BsonArrayCodec(registry); + } + + return null; + } + + private void addCodecs() { + addCodec(new BsonNullCodec()); + addCodec(new BsonBinaryCodec()); + addCodec(new BsonBooleanCodec()); + addCodec(new BsonDateTimeCodec()); + addCodec(new BsonDBPointerCodec()); + addCodec(new BsonDoubleCodec()); + addCodec(new BsonInt32Codec()); + addCodec(new BsonInt64Codec()); + addCodec(new BsonDecimal128Codec()); + addCodec(new BsonMinKeyCodec()); + addCodec(new BsonMaxKeyCodec()); + addCodec(new BsonJavaScriptCodec()); + addCodec(new BsonObjectIdCodec()); + addCodec(new BsonRegularExpressionCodec()); + addCodec(new BsonStringCodec()); + addCodec(new BsonSymbolCodec()); + addCodec(new BsonTimestampCodec()); + addCodec(new BsonUndefinedCodec()); + } + + private void addCodec(final Codec codec) { + codecs.put(codec.getEncoderClass(), codec); + } + + static { + Map> map = new HashMap<>(); + + map.put(BsonType.NULL, BsonNull.class); + map.put(BsonType.ARRAY, BsonArray.class); + map.put(BsonType.BINARY, BsonBinary.class); + map.put(BsonType.BOOLEAN, BsonBoolean.class); + map.put(BsonType.DATE_TIME, BsonDateTime.class); + map.put(BsonType.DB_POINTER, BsonDbPointer.class); + map.put(BsonType.DOCUMENT, BsonDocument.class); + map.put(BsonType.DOUBLE, BsonDouble.class); + map.put(BsonType.INT32, BsonInt32.class); + map.put(BsonType.INT64, BsonInt64.class); + map.put(BsonType.DECIMAL128, BsonDecimal128.class); + map.put(BsonType.MAX_KEY, BsonMaxKey.class); + map.put(BsonType.MIN_KEY, BsonMinKey.class); + map.put(BsonType.JAVASCRIPT, BsonJavaScript.class); + map.put(BsonType.JAVASCRIPT_WITH_SCOPE, BsonJavaScriptWithScope.class); + map.put(BsonType.OBJECT_ID, BsonObjectId.class); + map.put(BsonType.REGULAR_EXPRESSION, BsonRegularExpression.class); + map.put(BsonType.STRING, BsonString.class); + map.put(BsonType.SYMBOL, BsonSymbol.class); + map.put(BsonType.TIMESTAMP, BsonTimestamp.class); + map.put(BsonType.UNDEFINED, BsonUndefined.class); + + DEFAULT_BSON_TYPE_CLASS_MAP = new BsonTypeClassMap(map); + } + + @Override + public String toString() { + return "BsonValueCodecProvider{}"; + } +} diff --git a/bson/src/main/org/bson/codecs/ByteArrayCodec.java b/bson/src/main/org/bson/codecs/ByteArrayCodec.java new file mode 100644 index 00000000000..99b07efa9ab --- /dev/null +++ b/bson/src/main/org/bson/codecs/ByteArrayCodec.java @@ -0,0 +1,43 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs; + +import org.bson.BsonBinary; +import org.bson.BsonReader; +import org.bson.BsonWriter; + +/** + * Encodes and decodes byte arrays. + * + * @since 3.0 + */ +public class ByteArrayCodec implements Codec { + @Override + public void encode(final BsonWriter writer, final byte[] value, final EncoderContext encoderContext) { + writer.writeBinaryData(new BsonBinary(value)); + } + + @Override + public byte[] decode(final BsonReader reader, final DecoderContext decoderContext) { + return reader.readBinaryData().getData(); + } + + @Override + public Class getEncoderClass() { + return byte[].class; + } +} diff --git a/bson/src/main/org/bson/codecs/ByteCodec.java b/bson/src/main/org/bson/codecs/ByteCodec.java new file mode 100644 index 00000000000..e7011f8b58d --- /dev/null +++ b/bson/src/main/org/bson/codecs/ByteCodec.java @@ -0,0 +1,45 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs; + +import org.bson.BsonReader; +import org.bson.BsonWriter; + +import static org.bson.internal.NumberCodecHelper.decodeByte; + +/** + * Encodes and decodes {@code Byte} objects. + * + * @since 3.0 + */ +public class ByteCodec implements Codec { + + @Override + public void encode(final BsonWriter writer, final Byte value, final EncoderContext encoderContext) { + writer.writeInt32(value); + } + + @Override + public Byte decode(final BsonReader reader, final DecoderContext decoderContext) { + return decodeByte(reader); + } + + @Override + public Class getEncoderClass() { + return Byte.class; + } +} diff --git a/bson/src/main/org/bson/codecs/CharacterCodec.java b/bson/src/main/org/bson/codecs/CharacterCodec.java new file mode 100644 index 00000000000..4ad6efa2663 --- /dev/null +++ b/bson/src/main/org/bson/codecs/CharacterCodec.java @@ -0,0 +1,47 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs; + +import org.bson.BsonReader; +import org.bson.BsonWriter; +import org.bson.internal.StringCodecHelper; + +import static org.bson.assertions.Assertions.notNull; + +/** + * Encodes and decodes {@code Character} objects. + * + * @since 3.0 + */ +public class CharacterCodec implements Codec { + @Override + public void encode(final BsonWriter writer, final Character value, final EncoderContext encoderContext) { + notNull("value", value); + + writer.writeString(value.toString()); + } + + @Override + public Character decode(final BsonReader reader, final DecoderContext decoderContext) { + return StringCodecHelper.decodeChar(reader); + } + + @Override + public Class getEncoderClass() { + return Character.class; + } +} diff --git a/bson/src/main/org/bson/codecs/CodeCodec.java b/bson/src/main/org/bson/codecs/CodeCodec.java new file mode 100644 index 00000000000..0b5fafd6c8e --- /dev/null +++ b/bson/src/main/org/bson/codecs/CodeCodec.java @@ -0,0 +1,44 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs; + +import org.bson.BsonReader; +import org.bson.BsonWriter; +import org.bson.types.Code; + +/** + * Encodes and decodes instances of type {@link org.bson.types.Code}. + * + * @since 3.0 + */ +public class CodeCodec implements Codec { + + @Override + public void encode(final BsonWriter writer, final Code value, final EncoderContext encoderContext) { + writer.writeJavaScript(value.getCode()); + } + + @Override + public Code decode(final BsonReader bsonReader, final DecoderContext decoderContext) { + return new Code(bsonReader.readJavaScript()); + } + + @Override + public Class getEncoderClass() { + return Code.class; + } +} diff --git a/bson/src/main/org/bson/codecs/CodeWithScopeCodec.java b/bson/src/main/org/bson/codecs/CodeWithScopeCodec.java new file mode 100644 index 00000000000..33cb8efafc5 --- /dev/null +++ b/bson/src/main/org/bson/codecs/CodeWithScopeCodec.java @@ -0,0 +1,58 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs; + +import org.bson.BsonReader; +import org.bson.BsonWriter; +import org.bson.Document; +import org.bson.types.CodeWithScope; + +/** + * Encodes and decodes {@code CodeWithScope} instances. + * + * @since 3.0 + */ +public class CodeWithScopeCodec implements Codec { + private final Codec documentCodec; + + /** + * Creates a new CodeWithScopeCodec. + * + * @param documentCodec a Codec for encoding and decoding the {@link org.bson.types.CodeWithScope#getScope()}. + */ + public CodeWithScopeCodec(final Codec documentCodec) { + this.documentCodec = documentCodec; + } + + @Override + public CodeWithScope decode(final BsonReader bsonReader, final DecoderContext decoderContext) { + String code = bsonReader.readJavaScriptWithScope(); + Document scope = documentCodec.decode(bsonReader, decoderContext); + return new CodeWithScope(code, scope); + } + + @Override + public void encode(final BsonWriter writer, final CodeWithScope codeWithScope, final EncoderContext encoderContext) { + writer.writeJavaScriptWithScope(codeWithScope.getCode()); + documentCodec.encode(writer, codeWithScope.getScope(), encoderContext); + } + + @Override + public Class getEncoderClass() { + return CodeWithScope.class; + } +} diff --git a/bson/src/main/org/bson/codecs/Codec.java b/bson/src/main/org/bson/codecs/Codec.java new file mode 100644 index 00000000000..6e9dfc2f8dc --- /dev/null +++ b/bson/src/main/org/bson/codecs/Codec.java @@ -0,0 +1,27 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs; + +/** + * Implementations of this interface can both encode and decode values of type {@code T}. + * + * @param the value type + * + * @since 3.0 + */ +public interface Codec extends Encoder, Decoder { +} diff --git a/bson/src/main/org/bson/codecs/CollectibleCodec.java b/bson/src/main/org/bson/codecs/CollectibleCodec.java new file mode 100644 index 00000000000..27d8ee9c324 --- /dev/null +++ b/bson/src/main/org/bson/codecs/CollectibleCodec.java @@ -0,0 +1,53 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs; + +import org.bson.BsonValue; + +/** + * A Codec that generates complete BSON documents for storage in a MongoDB collection. + * + * @param the document type + * @since 3.0 + */ +public interface CollectibleCodec extends Codec { + /** + * Generates a value for the _id field on the given document, if the document does not have one. + * + * @param document the document for which to generate a value for the _id. + * @return the document with the _id + */ + T generateIdIfAbsentFromDocument(T document); + + /** + * Returns true if the given document has an _id. + * + * @param document the document in which to look for an _id + * @return true if the document has an _id + */ + boolean documentHasId(T document); + + /** + * Gets the _id of the given document if it contains one, otherwise throws {@code IllegalArgumentException}. To avoid the latter case, + * call {@code documentHasId} first to check. + * + * @param document the document from which to get the _id + * @return the _id of the document + * @throws java.lang.IllegalStateException if the document does not contain an _id + */ + BsonValue getDocumentId(T document); +} diff --git a/bson/src/main/org/bson/codecs/CollectionCodec.java b/bson/src/main/org/bson/codecs/CollectionCodec.java new file mode 100644 index 00000000000..d53ab4a937e --- /dev/null +++ b/bson/src/main/org/bson/codecs/CollectionCodec.java @@ -0,0 +1,95 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs; + +import org.bson.BsonReader; +import org.bson.BsonWriter; +import org.bson.Transformer; +import org.bson.UuidRepresentation; +import org.bson.codecs.configuration.CodecRegistry; + +import java.util.Collection; +import java.util.List; + +import static org.bson.assertions.Assertions.notNull; + +/** + * A codec for {@code Collection}. + * + *

Supports {@link Collection}, {@link List}, {@link java.util.AbstractCollection}, {@link java.util.AbstractList}, + * {@link java.util.Set}, {@link java.util.NavigableSet}, {@link java.util.SortedSet}, {@link java.util.AbstractSet} or any + * concrete class that implements {@code Collection} and has a public no-args constructor. If the type argument is + * {@code Collection}, {@code List}, {@code AbstractCollection}, or {@code AbstractList}, + * it constructs {@code ArrayList} instances when decoding. If the type argument is {@code Set} or + * {@code AbstractSet}, it constructs {@code HashSet} instances when decoding. If the type argument is + * {@code NavigableSet} or {@code SortedSet}, it constructs {@code TreeSet} instances when decoding.

+ * + *

Replaces the now deprecated {@link IterableCodec}.

+ * + * @param the actual type of the Collection, e.g. {@code List} + */ +@SuppressWarnings("rawtypes") +final class CollectionCodec> extends AbstractCollectionCodec + implements OverridableUuidRepresentationCodec { + + private final CodecRegistry registry; + private final BsonTypeCodecMap bsonTypeCodecMap; + private final Transformer valueTransformer; + private final UuidRepresentation uuidRepresentation; + + /** + * Construct a new instance with the given {@code CodecRegistry} and {@code BsonTypeClassMap}. + * + * @param registry the non-null codec registry + * @param bsonTypeClassMap the non-null BsonTypeClassMap + * @param valueTransformer the value Transformer + * @param clazz the class + */ + CollectionCodec(final CodecRegistry registry, final BsonTypeClassMap bsonTypeClassMap, final Transformer valueTransformer, + final Class clazz) { + this(registry, new BsonTypeCodecMap(notNull("bsonTypeClassMap", bsonTypeClassMap), registry), valueTransformer, clazz, + UuidRepresentation.UNSPECIFIED); + } + private CollectionCodec(final CodecRegistry registry, final BsonTypeCodecMap bsonTypeCodecMap, final Transformer valueTransformer, + final Class clazz, final UuidRepresentation uuidRepresentation) { + super(clazz); + this.registry = notNull("registry", registry); + this.bsonTypeCodecMap = bsonTypeCodecMap; + this.valueTransformer = valueTransformer != null ? valueTransformer : (value) -> value; + this.uuidRepresentation = uuidRepresentation; + } + + @Override + public Codec withUuidRepresentation(final UuidRepresentation uuidRepresentation) { + if (this.uuidRepresentation.equals(uuidRepresentation)) { + return this; + } + return new CollectionCodec<>(registry, bsonTypeCodecMap, valueTransformer, getEncoderClass(), uuidRepresentation); + } + + @Override + Object readValue(final BsonReader reader, final DecoderContext decoderContext) { + return ContainerCodecHelper.readValue(reader, decoderContext, bsonTypeCodecMap, uuidRepresentation, registry, valueTransformer); + } + + @SuppressWarnings("unchecked") + @Override + void writeValue(final BsonWriter writer, final Object value, final EncoderContext encoderContext) { + Codec codec = registry.get(value.getClass()); + encoderContext.encodeWithChildContext(codec, writer, value); + } +} diff --git a/bson/src/main/org/bson/codecs/CollectionCodecProvider.java b/bson/src/main/org/bson/codecs/CollectionCodecProvider.java new file mode 100644 index 00000000000..c4c447e87bd --- /dev/null +++ b/bson/src/main/org/bson/codecs/CollectionCodecProvider.java @@ -0,0 +1,140 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs; + +import org.bson.Transformer; +import org.bson.codecs.configuration.CodecConfigurationException; +import org.bson.codecs.configuration.CodecProvider; +import org.bson.codecs.configuration.CodecRegistry; + +import java.lang.reflect.Type; +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.Objects; + +import static org.bson.assertions.Assertions.notNull; +import static org.bson.codecs.BsonTypeClassMap.DEFAULT_BSON_TYPE_CLASS_MAP; +import static org.bson.codecs.ContainerCodecHelper.getCodec; + +/** + * A {@code CodecProvider} for classes than implement the {@code Collection} interface. + * + * @since 3.3 + */ +public class CollectionCodecProvider implements CodecProvider { + private final BsonTypeClassMap bsonTypeClassMap; + private final Transformer valueTransformer; + + /** + * Construct a new instance with a default {@code BsonTypeClassMap} and no {@code Transformer}. + */ + public CollectionCodecProvider() { + this(DEFAULT_BSON_TYPE_CLASS_MAP); + } + + /** + * Construct a new instance with a default {@code BsonTypeClassMap} and the given {@code Transformer}. The transformer is used by the + * IterableCodec as a last step when decoding values. + * + * @param valueTransformer the value transformer for decoded values + */ + public CollectionCodecProvider(final Transformer valueTransformer) { + this(DEFAULT_BSON_TYPE_CLASS_MAP, valueTransformer); + } + + /** + * Construct a new instance with the given instance of {@code BsonTypeClassMap} and no {@code Transformer}. + * + * @param bsonTypeClassMap the non-null {@code BsonTypeClassMap} with which to construct instances of {@code DocumentCodec} and {@code + * ListCodec} + */ + public CollectionCodecProvider(final BsonTypeClassMap bsonTypeClassMap) { + this(bsonTypeClassMap, null); + } + + /** + * Construct a new instance with the given instance of {@code BsonTypeClassMap} and {@code Transformer}. + * + * @param bsonTypeClassMap the non-null {@code BsonTypeClassMap} with which to construct instances of {@code DocumentCodec} and {@code + * ListCodec}. + * @param valueTransformer the value transformer for decoded values + */ + public CollectionCodecProvider(final BsonTypeClassMap bsonTypeClassMap, final Transformer valueTransformer) { + this.bsonTypeClassMap = notNull("bsonTypeClassMap", bsonTypeClassMap); + this.valueTransformer = valueTransformer; + } + + @Override + public Codec get(final Class clazz, final CodecRegistry registry) { + return get(clazz, Collections.emptyList(), registry); + } + + @Override + public Codec get(final Class clazz, final List typeArguments, final CodecRegistry registry) { + if (Collection.class.isAssignableFrom(clazz)) { + int typeArgumentsSize = typeArguments.size(); + switch (typeArgumentsSize) { + case 0: { + @SuppressWarnings({"unchecked", "rawtypes"}) + Codec result = new CollectionCodec(registry, bsonTypeClassMap, valueTransformer, clazz); + return result; + } + case 1: { + @SuppressWarnings({"unchecked", "rawtypes"}) + Codec result = new ParameterizedCollectionCodec(getCodec(registry, typeArguments.get(0)), clazz); + return result; + } + default: { + throw new CodecConfigurationException("Expected only one type argument for a Collection, but found " + typeArgumentsSize); + } + } + } + return null; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + CollectionCodecProvider that = (CollectionCodecProvider) o; + + if (!bsonTypeClassMap.equals(that.bsonTypeClassMap)) { + return false; + } + if (!Objects.equals(valueTransformer, that.valueTransformer)) { + return false; + } + + return true; + } + + @Override + public int hashCode() { + return Objects.hash(bsonTypeClassMap, valueTransformer); + } + + @Override + public String toString() { + return "CollectionCodecProvider{}"; + } +} diff --git a/bson/src/main/org/bson/codecs/ContainerCodecHelper.java b/bson/src/main/org/bson/codecs/ContainerCodecHelper.java new file mode 100644 index 00000000000..2243f209528 --- /dev/null +++ b/bson/src/main/org/bson/codecs/ContainerCodecHelper.java @@ -0,0 +1,108 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs; + +import org.bson.BsonBinarySubType; +import org.bson.BsonReader; +import org.bson.BsonType; +import org.bson.Transformer; +import org.bson.UuidRepresentation; +import org.bson.BinaryVector; +import org.bson.codecs.configuration.CodecConfigurationException; +import org.bson.codecs.configuration.CodecRegistry; + +import java.lang.reflect.ParameterizedType; +import java.lang.reflect.Type; +import java.util.Arrays; +import java.util.UUID; + +import static org.bson.internal.UuidHelper.isLegacyUUID; + +/** + * Helper methods for Codec implementations for containers, e.g. {@code Map} and {@code Iterable}. + */ +final class ContainerCodecHelper { + + static Object readValue(final BsonReader reader, final DecoderContext decoderContext, + final BsonTypeCodecMap bsonTypeCodecMap, final UuidRepresentation uuidRepresentation, + final CodecRegistry registry, final Transformer valueTransformer) { + + BsonType bsonType = reader.getCurrentBsonType(); + if (bsonType == BsonType.NULL) { + reader.readNull(); + return null; + } else { + Codec currentCodec = bsonTypeCodecMap.get(bsonType); + + if (bsonType == BsonType.BINARY) { + byte binarySubType = reader.peekBinarySubType(); + currentCodec = getBinarySubTypeCodec( + reader, + uuidRepresentation, + registry, binarySubType, + currentCodec); + } + + return valueTransformer.transform(currentCodec.decode(reader, decoderContext)); + } + } + + private static Codec getBinarySubTypeCodec(final BsonReader reader, + final UuidRepresentation uuidRepresentation, + final CodecRegistry registry, + final byte binarySubType, + final Codec binaryTypeCodec) { + + if (binarySubType == BsonBinarySubType.VECTOR.getValue()) { + Codec vectorCodec = registry.get(BinaryVector.class, registry); + if (vectorCodec != null) { + return vectorCodec; + } + } else if (reader.peekBinarySize() == 16) { + switch (binarySubType) { + case 3: + if (isLegacyUUID(uuidRepresentation)) { + return registry.get(UUID.class); + } + break; + case 4: + if (uuidRepresentation == UuidRepresentation.STANDARD) { + return registry.get(UUID.class); + } + break; + default: + break; + } + } + + return binaryTypeCodec; + } + + static Codec getCodec(final CodecRegistry codecRegistry, final Type type) { + if (type instanceof Class) { + return codecRegistry.get((Class) type); + } else if (type instanceof ParameterizedType) { + ParameterizedType parameterizedType = (ParameterizedType) type; + return codecRegistry.get((Class) parameterizedType.getRawType(), Arrays.asList(parameterizedType.getActualTypeArguments())); + } else { + throw new CodecConfigurationException("Unsupported generic type of container: " + type); + } + } + + private ContainerCodecHelper() { + } +} diff --git a/bson/src/main/org/bson/codecs/DateCodec.java b/bson/src/main/org/bson/codecs/DateCodec.java new file mode 100644 index 00000000000..cf0cc7cb621 --- /dev/null +++ b/bson/src/main/org/bson/codecs/DateCodec.java @@ -0,0 +1,44 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs; + +import org.bson.BsonReader; +import org.bson.BsonWriter; + +import java.util.Date; + +/** + * Encodes and decodes {@code java.util.Date} objects. + * + * @since 3.0 + */ +public class DateCodec implements Codec { + @Override + public void encode(final BsonWriter writer, final Date value, final EncoderContext encoderContext) { + writer.writeDateTime(value.getTime()); + } + + @Override + public Date decode(final BsonReader reader, final DecoderContext decoderContext) { + return new Date(reader.readDateTime()); + } + + @Override + public Class getEncoderClass() { + return Date.class; + } +} diff --git a/bson/src/main/org/bson/codecs/Decimal128Codec.java b/bson/src/main/org/bson/codecs/Decimal128Codec.java new file mode 100644 index 00000000000..d82bb39288c --- /dev/null +++ b/bson/src/main/org/bson/codecs/Decimal128Codec.java @@ -0,0 +1,43 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs; + +import org.bson.BsonReader; +import org.bson.BsonWriter; +import org.bson.types.Decimal128; + +/** + * Encodes and decodes {@code Decimal128} objects. + * + * @since 3.4 + */ +public final class Decimal128Codec implements Codec { + @Override + public Decimal128 decode(final BsonReader reader, final DecoderContext decoderContext) { + return reader.readDecimal128(); + } + + @Override + public void encode(final BsonWriter writer, final Decimal128 value, final EncoderContext encoderContext) { + writer.writeDecimal128(value); + } + + @Override + public Class getEncoderClass() { + return Decimal128.class; + } +} diff --git a/bson/src/main/org/bson/codecs/Decoder.java b/bson/src/main/org/bson/codecs/Decoder.java new file mode 100644 index 00000000000..051b102c92d --- /dev/null +++ b/bson/src/main/org/bson/codecs/Decoder.java @@ -0,0 +1,36 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs; + +import org.bson.BsonReader; + +/** + * Decoders are used for reading BSON types from MongoDB and converting them into Java objects. + * + * @param the type to decode into, the return type of the {@link #decode(org.bson.BsonReader, DecoderContext)} method. + * @since 3.0 + */ +public interface Decoder { + /** + * Decodes a BSON value from the given reader into an instance of the type parameter {@code T}. + * + * @param reader the BSON reader + * @param decoderContext the decoder context + * @return an instance of the type parameter {@code T}. + */ + T decode(BsonReader reader, DecoderContext decoderContext); +} diff --git a/bson/src/main/org/bson/codecs/DecoderContext.java b/bson/src/main/org/bson/codecs/DecoderContext.java new file mode 100644 index 00000000000..7ebabaad05b --- /dev/null +++ b/bson/src/main/org/bson/codecs/DecoderContext.java @@ -0,0 +1,102 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs; + +import org.bson.BsonReader; + +import static org.bson.assertions.Assertions.notNull; + +/** + * The context for decoding values to BSON. + * + * @see org.bson.codecs.Decoder + * @since 3.0 + */ +public final class DecoderContext { + private static final DecoderContext DEFAULT_CONTEXT = DecoderContext.builder().build(); + private final boolean checkedDiscriminator; + + /** + * @return true if the discriminator has been checked + */ + public boolean hasCheckedDiscriminator() { + return checkedDiscriminator; + } + + /** + * Create a builder. + * + * @return the builder + */ + public static Builder builder() { + return new Builder(); + } + + /** + * A builder for {@code DecoderContext} instances. + */ + public static final class Builder { + private Builder() { + } + + private boolean checkedDiscriminator; + + /** + * @return true if the discriminator has been checked + */ + public boolean hasCheckedDiscriminator() { + return checkedDiscriminator; + } + + /** + * Sets the checkedDiscriminator + * + * @param checkedDiscriminator the checkedDiscriminator + * @return this + */ + public Builder checkedDiscriminator(final boolean checkedDiscriminator) { + this.checkedDiscriminator = checkedDiscriminator; + return this; + } + + /** + * Build an instance of {@code DecoderContext}. + * @return the decoder context + */ + public DecoderContext build() { + return new DecoderContext(this); + } + } + + /** + * Creates a child context and then deserializes using the reader. + * + * @param decoder the decoder to decode with + * @param reader the reader to decode to + * @param the type of the decoder + * @return the decoded value + * @since 3.5 + */ + public T decodeWithChildContext(final Decoder decoder, final BsonReader reader) { + notNull("decoder", decoder); + return decoder.decode(reader, DEFAULT_CONTEXT); + } + + private DecoderContext(final Builder builder) { + this.checkedDiscriminator = builder.hasCheckedDiscriminator(); + } +} diff --git a/bson/src/main/org/bson/codecs/DocumentCodec.java b/bson/src/main/org/bson/codecs/DocumentCodec.java new file mode 100644 index 00000000000..0c4161f53fd --- /dev/null +++ b/bson/src/main/org/bson/codecs/DocumentCodec.java @@ -0,0 +1,209 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs; + +import org.bson.BsonDocument; +import org.bson.BsonDocumentWriter; +import org.bson.BsonReader; +import org.bson.BsonType; +import org.bson.BsonValue; +import org.bson.BsonWriter; +import org.bson.Document; +import org.bson.Transformer; +import org.bson.UuidRepresentation; +import org.bson.codecs.configuration.CodecRegistry; + +import java.util.Map; + +import static java.util.Arrays.asList; +import static org.bson.assertions.Assertions.notNull; +import static org.bson.codecs.BsonTypeClassMap.DEFAULT_BSON_TYPE_CLASS_MAP; +import static org.bson.codecs.ContainerCodecHelper.readValue; +import static org.bson.codecs.configuration.CodecRegistries.fromProviders; + +/** + * A Codec for Document instances. + * + * @see org.bson.Document + * @since 3.0 + */ +public class DocumentCodec implements CollectibleCodec, OverridableUuidRepresentationCodec { + + private static final String ID_FIELD_NAME = "_id"; + private static final CodecRegistry DEFAULT_REGISTRY = fromProviders(asList(new ValueCodecProvider(), + new CollectionCodecProvider(), new IterableCodecProvider(), + new BsonValueCodecProvider(), new DocumentCodecProvider(), new MapCodecProvider())); + private static final BsonTypeCodecMap DEFAULT_BSON_TYPE_CODEC_MAP = new BsonTypeCodecMap(DEFAULT_BSON_TYPE_CLASS_MAP, DEFAULT_REGISTRY); + private static final IdGenerator DEFAULT_ID_GENERATOR = new ObjectIdGenerator(); + + private final BsonTypeCodecMap bsonTypeCodecMap; + private final CodecRegistry registry; + private final IdGenerator idGenerator; + private final Transformer valueTransformer; + private final UuidRepresentation uuidRepresentation; + + /** + * Construct a new instance with a default {@code CodecRegistry}. + */ + public DocumentCodec() { + this(DEFAULT_REGISTRY, DEFAULT_BSON_TYPE_CODEC_MAP, null); + } + + /** + * Construct a new instance with the given registry. + * + * @param registry the registry + * @since 3.5 + */ + public DocumentCodec(final CodecRegistry registry) { + this(registry, DEFAULT_BSON_TYPE_CLASS_MAP); + } + + /** + * Construct a new instance with the given registry and BSON type class map. + * + * @param registry the registry + * @param bsonTypeClassMap the BSON type class map + */ + public DocumentCodec(final CodecRegistry registry, final BsonTypeClassMap bsonTypeClassMap) { + this(registry, bsonTypeClassMap, null); + } + + /** + * Construct a new instance with the given registry and BSON type class map. The transformer is applied as a last step when decoding + * values, which allows users of this codec to control the decoding process. For example, a user of this class could substitute a + * value decoded as a Document with an instance of a special purpose class (e.g., one representing a DBRef in MongoDB). + * + * @param registry the registry + * @param bsonTypeClassMap the BSON type class map + * @param valueTransformer the value transformer to use as a final step when decoding the value of any field in the document + */ + public DocumentCodec(final CodecRegistry registry, final BsonTypeClassMap bsonTypeClassMap, final Transformer valueTransformer) { + this(registry, new BsonTypeCodecMap(notNull("bsonTypeClassMap", bsonTypeClassMap), registry), valueTransformer); + } + + private DocumentCodec(final CodecRegistry registry, final BsonTypeCodecMap bsonTypeCodecMap, final Transformer valueTransformer) { + this(registry, bsonTypeCodecMap, DEFAULT_ID_GENERATOR, valueTransformer, UuidRepresentation.UNSPECIFIED); + } + + private DocumentCodec(final CodecRegistry registry, final BsonTypeCodecMap bsonTypeCodecMap, final IdGenerator idGenerator, + final Transformer valueTransformer, final UuidRepresentation uuidRepresentation) { + this.registry = notNull("registry", registry); + this.bsonTypeCodecMap = bsonTypeCodecMap; + this.idGenerator = idGenerator; + this.valueTransformer = valueTransformer != null ? valueTransformer : value -> value; + this.uuidRepresentation = uuidRepresentation; + } + + @Override + public Codec withUuidRepresentation(final UuidRepresentation uuidRepresentation) { + if (this.uuidRepresentation.equals(uuidRepresentation)) { + return this; + } + return new DocumentCodec(registry, bsonTypeCodecMap, idGenerator, valueTransformer, uuidRepresentation); + } + + @Override + public boolean documentHasId(final Document document) { + return document.containsKey(ID_FIELD_NAME); + } + + @Override + public BsonValue getDocumentId(final Document document) { + if (!documentHasId(document)) { + throw new IllegalStateException("The document does not contain an _id"); + } + + Object id = document.get(ID_FIELD_NAME); + if (id instanceof BsonValue) { + return (BsonValue) id; + } + + BsonDocument idHoldingDocument = new BsonDocument(); + BsonWriter writer = new BsonDocumentWriter(idHoldingDocument); + writer.writeStartDocument(); + writer.writeName(ID_FIELD_NAME); + writeValue(writer, EncoderContext.builder().build(), id); + writer.writeEndDocument(); + return idHoldingDocument.get(ID_FIELD_NAME); + } + + @Override + public Document generateIdIfAbsentFromDocument(final Document document) { + if (!documentHasId(document)) { + document.put(ID_FIELD_NAME, idGenerator.generate()); + } + return document; + } + + @Override + public void encode(final BsonWriter writer, final Document document, final EncoderContext encoderContext) { + writer.writeStartDocument(); + + beforeFields(writer, encoderContext, document); + + for (final Map.Entry entry : document.entrySet()) { + if (skipField(encoderContext, entry.getKey())) { + continue; + } + writer.writeName(entry.getKey()); + writeValue(writer, encoderContext, entry.getValue()); + } + writer.writeEndDocument(); + } + + @Override + public Document decode(final BsonReader reader, final DecoderContext decoderContext) { + Document document = new Document(); + + reader.readStartDocument(); + while (reader.readBsonType() != BsonType.END_OF_DOCUMENT) { + String fieldName = reader.readName(); + document.put(fieldName, readValue(reader, decoderContext, bsonTypeCodecMap, uuidRepresentation, registry, valueTransformer)); + } + + reader.readEndDocument(); + + return document; + } + + @Override + public Class getEncoderClass() { + return Document.class; + } + + private void beforeFields(final BsonWriter bsonWriter, final EncoderContext encoderContext, final Map document) { + if (encoderContext.isEncodingCollectibleDocument() && document.containsKey(ID_FIELD_NAME)) { + bsonWriter.writeName(ID_FIELD_NAME); + writeValue(bsonWriter, encoderContext, document.get(ID_FIELD_NAME)); + } + } + + private boolean skipField(final EncoderContext encoderContext, final String key) { + return encoderContext.isEncodingCollectibleDocument() && key.equals(ID_FIELD_NAME); + } + + @SuppressWarnings({"unchecked", "rawtypes"}) + private void writeValue(final BsonWriter writer, final EncoderContext encoderContext, final Object value) { + if (value == null) { + writer.writeNull(); + } else { + Codec codec = registry.get(value.getClass()); + encoderContext.encodeWithChildContext(codec, writer, value); + } + } +} diff --git a/bson/src/main/org/bson/codecs/DocumentCodecProvider.java b/bson/src/main/org/bson/codecs/DocumentCodecProvider.java new file mode 100644 index 00000000000..2d5c34e9f1f --- /dev/null +++ b/bson/src/main/org/bson/codecs/DocumentCodecProvider.java @@ -0,0 +1,125 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs; + +import org.bson.Document; +import org.bson.Transformer; +import org.bson.codecs.configuration.CodecProvider; +import org.bson.codecs.configuration.CodecRegistry; +import org.bson.types.CodeWithScope; + +import java.util.Objects; + +import static org.bson.assertions.Assertions.notNull; +import static org.bson.codecs.BsonTypeClassMap.DEFAULT_BSON_TYPE_CLASS_MAP; + +/** + * A {@code CodecProvider} for the Document class and all the default Codec implementations on which it depends. + * + * @since 3.0 + */ +public class DocumentCodecProvider implements CodecProvider { + private final BsonTypeClassMap bsonTypeClassMap; + private final Transformer valueTransformer; + + /** + * Construct a new instance with a default {@code BsonTypeClassMap}. + */ + public DocumentCodecProvider() { + this(DEFAULT_BSON_TYPE_CLASS_MAP); + } + + /** + * Construct a new instance with a default {@code BsonTypeClassMap} and the given {@code Transformer}. The transformer is used by the + * DocumentCodec as a last step when decoding values. + * + * @param valueTransformer the value transformer for decoded values + * @see org.bson.codecs.DocumentCodec#DocumentCodec(org.bson.codecs.configuration.CodecRegistry, BsonTypeClassMap, org.bson.Transformer) + */ + public DocumentCodecProvider(final Transformer valueTransformer) { + this(DEFAULT_BSON_TYPE_CLASS_MAP, valueTransformer); + } + + /** + * Construct a new instance with the given instance of {@code BsonTypeClassMap}. + * + * @param bsonTypeClassMap the non-null {@code BsonTypeClassMap} with which to construct instances of {@code DocumentCodec} and {@code + * ListCodec} + */ + public DocumentCodecProvider(final BsonTypeClassMap bsonTypeClassMap) { + this(bsonTypeClassMap, null); + } + + /** + * Construct a new instance with the given instance of {@code BsonTypeClassMap}. + * + * @param bsonTypeClassMap the non-null {@code BsonTypeClassMap} with which to construct instances of {@code DocumentCodec} and {@code + * ListCodec}. + * @param valueTransformer the value transformer for decoded values + */ + public DocumentCodecProvider(final BsonTypeClassMap bsonTypeClassMap, final Transformer valueTransformer) { + this.bsonTypeClassMap = notNull("bsonTypeClassMap", bsonTypeClassMap); + this.valueTransformer = valueTransformer; + } + + @Override + @SuppressWarnings("unchecked") + public Codec get(final Class clazz, final CodecRegistry registry) { + if (clazz == CodeWithScope.class) { + return (Codec) new CodeWithScopeCodec(registry.get(Document.class)); + } + + if (clazz == Document.class) { + return (Codec) new DocumentCodec(registry, bsonTypeClassMap, valueTransformer); + } + + return null; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + DocumentCodecProvider that = (DocumentCodecProvider) o; + + if (!bsonTypeClassMap.equals(that.bsonTypeClassMap)) { + return false; + } + if (!Objects.equals(valueTransformer, that.valueTransformer)) { + return false; + } + + return true; + } + + @Override + public int hashCode() { + int result = bsonTypeClassMap.hashCode(); + result = 31 * result + (valueTransformer != null ? valueTransformer.hashCode() : 0); + return result; + } + + @Override + public String toString() { + return "DocumentCodecProvider{}"; + } +} diff --git a/bson/src/main/org/bson/codecs/DoubleCodec.java b/bson/src/main/org/bson/codecs/DoubleCodec.java new file mode 100644 index 00000000000..33e3f6782bd --- /dev/null +++ b/bson/src/main/org/bson/codecs/DoubleCodec.java @@ -0,0 +1,44 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs; + +import org.bson.BsonReader; +import org.bson.BsonWriter; + +import static org.bson.internal.NumberCodecHelper.decodeDouble; + +/** + * Encodes and decodes {@code Double} objects. + * + * @since 3.0 + */ +public class DoubleCodec implements Codec { + @Override + public void encode(final BsonWriter writer, final Double value, final EncoderContext encoderContext) { + writer.writeDouble(value); + } + + @Override + public Double decode(final BsonReader reader, final DecoderContext decoderContext) { + return decodeDouble(reader); + } + + @Override + public Class getEncoderClass() { + return Double.class; + } +} diff --git a/bson/src/main/org/bson/codecs/Encoder.java b/bson/src/main/org/bson/codecs/Encoder.java new file mode 100644 index 00000000000..4b06894c71d --- /dev/null +++ b/bson/src/main/org/bson/codecs/Encoder.java @@ -0,0 +1,44 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs; + +import org.bson.BsonWriter; + +/** + * Instances of this class are capable of encoding an instance of the type parameter {@code T} into a BSON value. + * . + * @param the type that the instance can encode into BSON + * + * @since 3.0 + */ +public interface Encoder { + + /** + * Encode an instance of the type parameter {@code T} into a BSON value. + * @param writer the BSON writer to encode into + * @param value the value to encode + * @param encoderContext the encoder context + */ + void encode(BsonWriter writer, T value, EncoderContext encoderContext); + + /** + * Returns the Class instance that this encodes. This is necessary because Java does not reify generic types. + * + * @return the Class instance that this encodes. + */ + Class getEncoderClass(); +} diff --git a/bson/src/main/org/bson/codecs/EncoderContext.java b/bson/src/main/org/bson/codecs/EncoderContext.java new file mode 100644 index 00000000000..af074bb2664 --- /dev/null +++ b/bson/src/main/org/bson/codecs/EncoderContext.java @@ -0,0 +1,106 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs; + +import org.bson.BsonWriter; + +/** + * The context for encoding values to BSON. + * + * @see org.bson.codecs.Encoder + * @since 3.0 + */ +public final class EncoderContext { + + private static final EncoderContext DEFAULT_CONTEXT = EncoderContext.builder().build(); + + private final boolean encodingCollectibleDocument; + + /** + * Create a builder. + * + * @return the builder + */ + public static Builder builder() { + return new Builder(); + } + + /** + * A builder for {@code EncoderContext} instances. + */ + public static final class Builder { + private boolean encodingCollectibleDocument; + + private Builder() { + } + + /** + * Set to true if the value to be encoded is a document that will be put in a MongoDB collection. + * + * @param encodingCollectibleDocument true if the value to be encoded is a document that will be put in a MongoDB collection + * @return this + */ + public Builder isEncodingCollectibleDocument(final boolean encodingCollectibleDocument) { + this.encodingCollectibleDocument = encodingCollectibleDocument; + return this; + } + + /** + * Build an instance of {@code EncoderContext}. + * @return the encoder context + */ + public EncoderContext build() { + return new EncoderContext(this); + } + } + + /** + * Returns true if the value to be encoded is a document that will be put in a MongoDB collection. Encoders for such documents + * might choose to act differently when encoding such as documents, e.g. by re-ordering the fields in some way (like encoding the _id + * field first). + * + * @return true if the value to be encoded is a document that will be put in a MongoDB collection + */ + public boolean isEncodingCollectibleDocument() { + return encodingCollectibleDocument; + } + + /** + * Creates a child context based on this and serializes the value with it to the writer. + * + * @param encoder the encoder to encode value with + * @param writer the writer to encode to + * @param value the value to encode + * @param the type of the value + */ + public void encodeWithChildContext(final Encoder encoder, final BsonWriter writer, final T value) { + encoder.encode(writer, value, DEFAULT_CONTEXT); + } + + /** + * Gets a child context based on this. + * + * @return the child context + */ + public EncoderContext getChildContext() { + return DEFAULT_CONTEXT; + } + + private EncoderContext(final Builder builder) { + encodingCollectibleDocument = builder.encodingCollectibleDocument; + } +} diff --git a/bson/src/main/org/bson/codecs/EnumCodec.java b/bson/src/main/org/bson/codecs/EnumCodec.java new file mode 100644 index 00000000000..0ef6e28077d --- /dev/null +++ b/bson/src/main/org/bson/codecs/EnumCodec.java @@ -0,0 +1,54 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs; + +import org.bson.BsonReader; +import org.bson.BsonWriter; + +/** + * A codec for classes that extends {@link Enum} + * + * @param The enum type + * @since 4.5 + */ +public final class EnumCodec> implements Codec { + private final Class clazz; + + /** + * Construct an instance for teh given enum class. + * + * @param clazz the enum class + */ + public EnumCodec(final Class clazz) { + this.clazz = clazz; + } + + @Override + public T decode(final BsonReader reader, final DecoderContext decoderContext) { + return Enum.valueOf(clazz, reader.readString()); + } + + @Override + public void encode(final BsonWriter writer, final T value, final EncoderContext encoderContext) { + writer.writeString(value.name()); + } + + @Override + public Class getEncoderClass() { + return clazz; + } +} diff --git a/bson/src/main/org/bson/codecs/EnumCodecProvider.java b/bson/src/main/org/bson/codecs/EnumCodecProvider.java new file mode 100644 index 00000000000..2ccd6ab9287 --- /dev/null +++ b/bson/src/main/org/bson/codecs/EnumCodecProvider.java @@ -0,0 +1,41 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs; + +import org.bson.codecs.configuration.CodecProvider; +import org.bson.codecs.configuration.CodecRegistry; + +/** + * A codec provider for classes that extend {@link Enum}. + * + * @since 4.5 + */ +public final class EnumCodecProvider implements CodecProvider { + @Override + @SuppressWarnings({"unchecked", "rawtypes"}) + public Codec get(final Class clazz, final CodecRegistry registry) { + if (Enum.class.isAssignableFrom(clazz)) { + return (Codec) new EnumCodec(clazz); + } + return null; + } + + @Override + public String toString() { + return "EnumCodecProvider{}"; + } +} diff --git a/bson/src/main/org/bson/codecs/Float32BinaryVectorCodec.java b/bson/src/main/org/bson/codecs/Float32BinaryVectorCodec.java new file mode 100644 index 00000000000..99f740a6873 --- /dev/null +++ b/bson/src/main/org/bson/codecs/Float32BinaryVectorCodec.java @@ -0,0 +1,56 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs; + +import org.bson.BsonBinary; +import org.bson.BsonBinarySubType; +import org.bson.BsonInvalidOperationException; +import org.bson.BsonReader; +import org.bson.BsonWriter; +import org.bson.Float32BinaryVector; + +/** + * Encodes and decodes {@link Float32BinaryVector} objects. + * + */ +final class Float32BinaryVectorCodec implements Codec { + + @Override + public void encode(final BsonWriter writer, final Float32BinaryVector vectorToEncode, final EncoderContext encoderContext) { + writer.writeBinaryData(new BsonBinary(vectorToEncode)); + } + + @Override + public Float32BinaryVector decode(final BsonReader reader, final DecoderContext decoderContext) { + byte subType = reader.peekBinarySubType(); + + if (subType != BsonBinarySubType.VECTOR.getValue()) { + throw new BsonInvalidOperationException("Expected vector binary subtype " + BsonBinarySubType.VECTOR.getValue() + " but found: " + subType); + } + + return reader.readBinaryData() + .asBinary() + .asVector() + .asFloat32Vector(); + } + + @Override + public Class getEncoderClass() { + return Float32BinaryVector.class; + } +} + diff --git a/bson/src/main/org/bson/codecs/FloatCodec.java b/bson/src/main/org/bson/codecs/FloatCodec.java new file mode 100644 index 00000000000..49dc7e22aff --- /dev/null +++ b/bson/src/main/org/bson/codecs/FloatCodec.java @@ -0,0 +1,45 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs; + +import org.bson.BsonReader; +import org.bson.BsonWriter; + +import static org.bson.internal.NumberCodecHelper.decodeFloat; + +/** + * Encodes and decodes {@code Float} objects. + * + * @since 3.0 + */ +public class FloatCodec implements Codec { + + @Override + public void encode(final BsonWriter writer, final Float value, final EncoderContext encoderContext) { + writer.writeDouble(value); + } + + @Override + public Float decode(final BsonReader reader, final DecoderContext decoderContext) { + return decodeFloat(reader); + } + + @Override + public Class getEncoderClass() { + return Float.class; + } +} diff --git a/bson/src/main/org/bson/codecs/IdGenerator.java b/bson/src/main/org/bson/codecs/IdGenerator.java new file mode 100644 index 00000000000..790737542c1 --- /dev/null +++ b/bson/src/main/org/bson/codecs/IdGenerator.java @@ -0,0 +1,31 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs; + +/** + * Classes that implement this interface define a way to create IDs for MongoDB documents. + * + * @since 3.0 + */ +public interface IdGenerator { + /** + * Generates an ID for a MongoDB Document. + * + * @return any type of Object representing an ID. + */ + Object generate(); +} diff --git a/bson/src/main/org/bson/codecs/Int8VectorCodec.java b/bson/src/main/org/bson/codecs/Int8VectorCodec.java new file mode 100644 index 00000000000..963da625d7f --- /dev/null +++ b/bson/src/main/org/bson/codecs/Int8VectorCodec.java @@ -0,0 +1,58 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs; + +import org.bson.BsonBinary; +import org.bson.BsonBinarySubType; +import org.bson.BsonInvalidOperationException; +import org.bson.BsonReader; +import org.bson.BsonWriter; +import org.bson.Int8BinaryVector; + +/** + * Encodes and decodes {@link Int8BinaryVector} objects. + * + * @since 5.3 + */ +final class Int8VectorCodec implements Codec { + + @Override + public void encode(final BsonWriter writer, final Int8BinaryVector vectorToEncode, final EncoderContext encoderContext) { + writer.writeBinaryData(new BsonBinary(vectorToEncode)); + } + + @Override + public Int8BinaryVector decode(final BsonReader reader, final DecoderContext decoderContext) { + byte subType = reader.peekBinarySubType(); + + if (subType != BsonBinarySubType.VECTOR.getValue()) { + throw new BsonInvalidOperationException("Expected vector binary subtype " + BsonBinarySubType.VECTOR.getValue() + " but found: " + subType); + } + + return reader.readBinaryData() + .asBinary() + .asVector() + .asInt8Vector(); + } + + + @Override + public Class getEncoderClass() { + return Int8BinaryVector.class; + } +} + diff --git a/bson/src/main/org/bson/codecs/IntegerCodec.java b/bson/src/main/org/bson/codecs/IntegerCodec.java new file mode 100644 index 00000000000..bb0c5c082d5 --- /dev/null +++ b/bson/src/main/org/bson/codecs/IntegerCodec.java @@ -0,0 +1,45 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs; + +import org.bson.BsonReader; +import org.bson.BsonWriter; + +import static org.bson.internal.NumberCodecHelper.decodeInt; + +/** + * Encodes and decodes {@code Integer} objects. + * + * @since 3.0 + */ +public class IntegerCodec implements Codec { + + @Override + public void encode(final BsonWriter writer, final Integer value, final EncoderContext encoderContext) { + writer.writeInt32(value); + } + + @Override + public Integer decode(final BsonReader reader, final DecoderContext decoderContext) { + return decodeInt(reader); + } + + @Override + public Class getEncoderClass() { + return Integer.class; + } +} diff --git a/bson/src/main/org/bson/codecs/IterableCodec.java b/bson/src/main/org/bson/codecs/IterableCodec.java new file mode 100644 index 00000000000..028c571aaef --- /dev/null +++ b/bson/src/main/org/bson/codecs/IterableCodec.java @@ -0,0 +1,99 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs; + +import org.bson.BsonReader; +import org.bson.BsonType; +import org.bson.BsonWriter; +import org.bson.Transformer; +import org.bson.UuidRepresentation; +import org.bson.codecs.configuration.CodecRegistry; + +import java.util.ArrayList; +import java.util.List; + +import static org.bson.assertions.Assertions.notNull; +import static org.bson.codecs.ContainerCodecHelper.readValue; + +/** + * Encodes and decodes {@code Iterable} objects. + */ +@SuppressWarnings("rawtypes") +class IterableCodec implements Codec, OverridableUuidRepresentationCodec { + + private final CodecRegistry registry; + private final BsonTypeCodecMap bsonTypeCodecMap; + private final Transformer valueTransformer; + private final UuidRepresentation uuidRepresentation; + + IterableCodec(final CodecRegistry registry, final BsonTypeClassMap bsonTypeClassMap, final Transformer valueTransformer) { + this(registry, new BsonTypeCodecMap(notNull("bsonTypeClassMap", bsonTypeClassMap), registry), valueTransformer, + UuidRepresentation.UNSPECIFIED); + } + + private IterableCodec(final CodecRegistry registry, final BsonTypeCodecMap bsonTypeCodecMap, final Transformer valueTransformer, + final UuidRepresentation uuidRepresentation) { + this.registry = notNull("registry", registry); + this.bsonTypeCodecMap = bsonTypeCodecMap; + this.valueTransformer = valueTransformer != null ? valueTransformer : objectToTransform -> objectToTransform; + this.uuidRepresentation = uuidRepresentation; + } + + + @Override + public Codec withUuidRepresentation(final UuidRepresentation uuidRepresentation) { + return new IterableCodec(registry, bsonTypeCodecMap, valueTransformer, uuidRepresentation); + } + + @Override + public Iterable decode(final BsonReader reader, final DecoderContext decoderContext) { + reader.readStartArray(); + + List list = new ArrayList<>(); + while (reader.readBsonType() != BsonType.END_OF_DOCUMENT) { + list.add(readValue(reader, decoderContext, bsonTypeCodecMap, uuidRepresentation, registry, valueTransformer)); + } + + reader.readEndArray(); + + return list; + } + + @Override + public void encode(final BsonWriter writer, final Iterable value, final EncoderContext encoderContext) { + writer.writeStartArray(); + for (final Object cur : value) { + writeValue(writer, encoderContext, cur); + } + writer.writeEndArray(); + } + + @Override + public Class getEncoderClass() { + return Iterable.class; + } + + @SuppressWarnings({"unchecked", "rawtypes"}) + private void writeValue(final BsonWriter writer, final EncoderContext encoderContext, final Object value) { + if (value == null) { + writer.writeNull(); + } else { + Codec codec = registry.get(value.getClass()); + encoderContext.encodeWithChildContext(codec, writer, value); + } + } +} diff --git a/bson/src/main/org/bson/codecs/IterableCodecProvider.java b/bson/src/main/org/bson/codecs/IterableCodecProvider.java new file mode 100644 index 00000000000..c59788aa007 --- /dev/null +++ b/bson/src/main/org/bson/codecs/IterableCodecProvider.java @@ -0,0 +1,118 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs; + +import org.bson.Transformer; +import org.bson.codecs.configuration.CodecProvider; +import org.bson.codecs.configuration.CodecRegistry; + +import java.util.Objects; + +import static org.bson.assertions.Assertions.notNull; +import static org.bson.codecs.BsonTypeClassMap.DEFAULT_BSON_TYPE_CLASS_MAP; + +/** + * A {@code CodecProvider} for classes than implement the {@code Iterable} interface. + * + * @since 3.3 + */ +public class IterableCodecProvider implements CodecProvider { + private final BsonTypeClassMap bsonTypeClassMap; + private final Transformer valueTransformer; + + /** + * Construct a new instance with a default {@code BsonTypeClassMap} and no {@code Transformer}. + */ + public IterableCodecProvider() { + this(DEFAULT_BSON_TYPE_CLASS_MAP); + } + + /** + * Construct a new instance with a default {@code BsonTypeClassMap} and the given {@code Transformer}. The transformer is used by the + * IterableCodec as a last step when decoding values. + * + * @param valueTransformer the value transformer for decoded values + */ + public IterableCodecProvider(final Transformer valueTransformer) { + this(DEFAULT_BSON_TYPE_CLASS_MAP, valueTransformer); + } + + /** + * Construct a new instance with the given instance of {@code BsonTypeClassMap} and no {@code Transformer}. + * + * @param bsonTypeClassMap the non-null {@code BsonTypeClassMap} with which to construct instances of {@code DocumentCodec} and {@code + * ListCodec} + */ + public IterableCodecProvider(final BsonTypeClassMap bsonTypeClassMap) { + this(bsonTypeClassMap, null); + } + + /** + * Construct a new instance with the given instance of {@code BsonTypeClassMap} and {@code Transformer}. + * + * @param bsonTypeClassMap the non-null {@code BsonTypeClassMap} with which to construct instances of {@code DocumentCodec} and {@code + * ListCodec}. + * @param valueTransformer the value transformer for decoded values + */ + public IterableCodecProvider(final BsonTypeClassMap bsonTypeClassMap, final Transformer valueTransformer) { + this.bsonTypeClassMap = notNull("bsonTypeClassMap", bsonTypeClassMap); + this.valueTransformer = valueTransformer; + } + + @Override + @SuppressWarnings({"unchecked", "deprecation"}) + public Codec get(final Class clazz, final CodecRegistry registry) { + if (Iterable.class.isAssignableFrom(clazz)) { + return (Codec) new IterableCodec(registry, bsonTypeClassMap, valueTransformer); + } + + return null; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + IterableCodecProvider that = (IterableCodecProvider) o; + + if (!bsonTypeClassMap.equals(that.bsonTypeClassMap)) { + return false; + } + if (!Objects.equals(valueTransformer, that.valueTransformer)) { + return false; + } + + return true; + } + + @Override + public int hashCode() { + int result = bsonTypeClassMap.hashCode(); + result = 31 * result + (valueTransformer != null ? valueTransformer.hashCode() : 0); + return result; + } + + @Override + public String toString() { + return "IterableCodecProvider{}"; + } +} diff --git a/bson/src/main/org/bson/codecs/JsonObjectCodec.java b/bson/src/main/org/bson/codecs/JsonObjectCodec.java new file mode 100644 index 00000000000..7fa5a6262d6 --- /dev/null +++ b/bson/src/main/org/bson/codecs/JsonObjectCodec.java @@ -0,0 +1,69 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs; + +import org.bson.BsonReader; +import org.bson.BsonWriter; +import org.bson.json.JsonObject; +import org.bson.json.JsonReader; +import org.bson.json.JsonWriter; +import org.bson.json.JsonWriterSettings; + +import java.io.StringWriter; + +/** + * Encodes and Decodes JSON object strings. + * + * @since 4.2 + */ +public class JsonObjectCodec implements Codec { + private final JsonWriterSettings writerSettings; + + /** + * Construct a JsonObjectCodec with default JsonWriterSettings + */ + public JsonObjectCodec() { + this(JsonWriterSettings.builder().build()); + } + + /** + * Construct a JsonObjectCodec with provided JsonWriterSettings + * + * @param writerSettings the settings + */ + public JsonObjectCodec(final JsonWriterSettings writerSettings) { + this.writerSettings = writerSettings; + } + + @Override + public void encode(final BsonWriter writer, final JsonObject value, final EncoderContext encoderContext) { + writer.pipe(new JsonReader(value.getJson())); + } + + @Override + public JsonObject decode(final BsonReader reader, final DecoderContext decoderContext) { + StringWriter stringWriter = new StringWriter(); + new JsonWriter(stringWriter, writerSettings).pipe(reader); + return new JsonObject(stringWriter.toString()); + } + + @Override + public Class getEncoderClass() { + return JsonObject.class; + } + +} diff --git a/bson/src/main/org/bson/codecs/JsonObjectCodecProvider.java b/bson/src/main/org/bson/codecs/JsonObjectCodecProvider.java new file mode 100644 index 00000000000..f8f1ed79d1e --- /dev/null +++ b/bson/src/main/org/bson/codecs/JsonObjectCodecProvider.java @@ -0,0 +1,43 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs; + +import org.bson.codecs.configuration.CodecProvider; +import org.bson.codecs.configuration.CodecRegistry; +import org.bson.json.JsonObject; + +/** + * A {@code CodecProvider} for JSON object strings + * + * @since 4.2 + */ +public final class JsonObjectCodecProvider implements CodecProvider { + + @Override + @SuppressWarnings("unchecked") + public Codec get(final Class clazz, final CodecRegistry registry) { + if (clazz.equals(JsonObject.class)) { + return (Codec) new JsonObjectCodec(); + } + return null; + } + + @Override + public String toString() { + return "JsonObjectCodecProvider{}"; + } +} diff --git a/bson/src/main/org/bson/codecs/LongCodec.java b/bson/src/main/org/bson/codecs/LongCodec.java new file mode 100644 index 00000000000..0e16e4430bc --- /dev/null +++ b/bson/src/main/org/bson/codecs/LongCodec.java @@ -0,0 +1,46 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs; + +import org.bson.BsonReader; +import org.bson.BsonWriter; + +import static org.bson.internal.NumberCodecHelper.decodeLong; + +/** + * Encodes and decodes {@code Long} objects. + * + * @since 3.0 + */ + +public class LongCodec implements Codec { + + @Override + public void encode(final BsonWriter writer, final Long value, final EncoderContext encoderContext) { + writer.writeInt64(value); + } + + @Override + public Long decode(final BsonReader reader, final DecoderContext decoderContext) { + return decodeLong(reader); + } + + @Override + public Class getEncoderClass() { + return Long.class; + } +} diff --git a/bson/src/main/org/bson/codecs/MapCodec.java b/bson/src/main/org/bson/codecs/MapCodec.java new file mode 100644 index 00000000000..e98a2bde399 --- /dev/null +++ b/bson/src/main/org/bson/codecs/MapCodec.java @@ -0,0 +1,93 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs; + +import org.bson.BsonReader; +import org.bson.BsonWriter; +import org.bson.Transformer; +import org.bson.UuidRepresentation; +import org.bson.codecs.configuration.CodecRegistry; + +import java.util.Map; + +import static org.bson.assertions.Assertions.notNull; + +/** + * A codec for {@code Map}. + * + *

Supports {@link Map}, {@link java.util.NavigableMap}, {@link java.util.AbstractMap} or any concrete class that implements {@code + * Map} and has a public no-args constructor. If the type argument is {@code Map}, it constructs + * {@code HashMap} instances when decoding. If the type argument is {@code NavigableMap}, it constructs + * {@code TreeMap} instances when decoding.

+ * + * @param the actual type of the Map, e.g. {@code NavigableMap} + */ +@SuppressWarnings("rawtypes") +final class MapCodec> extends AbstractMapCodec + implements OverridableUuidRepresentationCodec { + + private final BsonTypeCodecMap bsonTypeCodecMap; + private final CodecRegistry registry; + private final Transformer valueTransformer; + private final UuidRepresentation uuidRepresentation; + + /** + * Construct a new instance with the given registry and BSON type class map. The transformer is applied as a last step when decoding + * values, which allows users of this codec to control the decoding process. For example, a user of this class could substitute a + * value decoded as a Document with an instance of a special purpose class (e.g., one representing a DBRef in MongoDB). + * + * @param registry the registry + * @param bsonTypeClassMap the BSON type class map + * @param valueTransformer the value transformer to use as a final step when decoding the value of any field in the map + * @param clazz the Map subclass + * @since 4.8 + */ + MapCodec(final CodecRegistry registry, final BsonTypeClassMap bsonTypeClassMap, final Transformer valueTransformer, + final Class clazz) { + this(registry, new BsonTypeCodecMap(notNull("bsonTypeClassMap", bsonTypeClassMap), registry), valueTransformer, + UuidRepresentation.UNSPECIFIED, clazz); + } + + private MapCodec(final CodecRegistry registry, final BsonTypeCodecMap bsonTypeCodecMap, final Transformer valueTransformer, + final UuidRepresentation uuidRepresentation, final Class clazz) { + super(clazz); + this.registry = notNull("registry", registry); + this.bsonTypeCodecMap = bsonTypeCodecMap; + this.valueTransformer = valueTransformer != null ? valueTransformer : (value) -> value; + this.uuidRepresentation = uuidRepresentation; + } + + @Override + public Codec withUuidRepresentation(final UuidRepresentation uuidRepresentation) { + if (this.uuidRepresentation.equals(uuidRepresentation)) { + return this; + } + return new MapCodec<>(registry, bsonTypeCodecMap, valueTransformer, uuidRepresentation, getEncoderClass()); + } + + @Override + Object readValue(final BsonReader reader, final DecoderContext decoderContext) { + return ContainerCodecHelper.readValue(reader, decoderContext, bsonTypeCodecMap, uuidRepresentation, registry, valueTransformer); + } + + @SuppressWarnings({"rawtypes", "unchecked"}) + @Override + void writeValue(final BsonWriter writer, final Object value, final EncoderContext encoderContext) { + Codec codec = registry.get(value.getClass()); + encoderContext.encodeWithChildContext(codec, writer, value); + } +} diff --git a/bson/src/main/org/bson/codecs/MapCodecProvider.java b/bson/src/main/org/bson/codecs/MapCodecProvider.java new file mode 100644 index 00000000000..d87de577211 --- /dev/null +++ b/bson/src/main/org/bson/codecs/MapCodecProvider.java @@ -0,0 +1,144 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs; + +import org.bson.Transformer; +import org.bson.codecs.configuration.CodecConfigurationException; +import org.bson.codecs.configuration.CodecProvider; +import org.bson.codecs.configuration.CodecRegistry; + +import java.lang.reflect.Type; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.Objects; + +import static org.bson.assertions.Assertions.notNull; +import static org.bson.codecs.BsonTypeClassMap.DEFAULT_BSON_TYPE_CLASS_MAP; +import static org.bson.codecs.ContainerCodecHelper.getCodec; + +/** + * A {@code CodecProvider} for the Map class and all the default Codec implementations on which it depends. + * + * @since 3.5 + */ +public class MapCodecProvider implements CodecProvider { + private final BsonTypeClassMap bsonTypeClassMap; + private final Transformer valueTransformer; + + /** + * Construct a new instance with a default {@code BsonTypeClassMap}. + */ + public MapCodecProvider() { + this(DEFAULT_BSON_TYPE_CLASS_MAP); + } + + /** + * Construct a new instance with the given instance of {@code BsonTypeClassMap}. + * + * @param bsonTypeClassMap the non-null {@code BsonTypeClassMap} with which to construct instances of {@code DocumentCodec} and {@code + * ListCodec} + */ + public MapCodecProvider(final BsonTypeClassMap bsonTypeClassMap) { + this(bsonTypeClassMap, null); + } + + /** + * Construct a new instance with a default {@code BsonTypeClassMap} and the given {@code Transformer}. The transformer is used by the + * MapCodec as a last step when decoding values. + * + * @param valueTransformer the value transformer for decoded values + */ + public MapCodecProvider(final Transformer valueTransformer) { + this(DEFAULT_BSON_TYPE_CLASS_MAP, valueTransformer); + } + + /** + * Construct a new instance with the given instance of {@code BsonTypeClassMap}. + * + * @param bsonTypeClassMap the non-null {@code BsonTypeClassMap} with which to construct instances of {@code MapCodec}. + * @param valueTransformer the value transformer for decoded values + */ + public MapCodecProvider(final BsonTypeClassMap bsonTypeClassMap, final Transformer valueTransformer) { + this.bsonTypeClassMap = notNull("bsonTypeClassMap", bsonTypeClassMap); + this.valueTransformer = valueTransformer; + } + + @Override + public Codec get(final Class clazz, final CodecRegistry registry) { + return get(clazz, Collections.emptyList(), registry); + } + + @Override + public Codec get(final Class clazz, final List typeArguments, final CodecRegistry registry) { + if (Map.class.isAssignableFrom(clazz)) { + int typeArgumentsSize = typeArguments.size(); + switch (typeArgumentsSize) { + case 0: { + @SuppressWarnings({"unchecked", "rawtypes"}) + Codec result = new MapCodec(registry, bsonTypeClassMap, valueTransformer, clazz); + return result; + } + case 2: { + Type genericTypeOfMapKey = typeArguments.get(0); + if (!genericTypeOfMapKey.getTypeName().equals("java.lang.String")) { + throw new CodecConfigurationException("Unsupported key type for Map: " + genericTypeOfMapKey.getTypeName()); + } + @SuppressWarnings({"unchecked", "rawtypes"}) + Codec result = new ParameterizedMapCodec(getCodec(registry, typeArguments.get(1)), clazz); + return result; + } + default: { + throw new CodecConfigurationException("Expected two parameterized type for an Iterable, but found " + typeArgumentsSize); + } + } + } + return null; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + MapCodecProvider that = (MapCodecProvider) o; + if (!bsonTypeClassMap.equals(that.bsonTypeClassMap)) { + return false; + } + if (!Objects.equals(valueTransformer, that.valueTransformer)) { + return false; + } + + return true; + } + + @Override + public int hashCode() { + int result = bsonTypeClassMap.hashCode(); + result = 31 * result + (valueTransformer != null ? valueTransformer.hashCode() : 0); + return result; + } + + @Override + public String toString() { + return "MapCodecProvider{}"; + } +} diff --git a/bson/src/main/org/bson/codecs/MaxKeyCodec.java b/bson/src/main/org/bson/codecs/MaxKeyCodec.java new file mode 100644 index 00000000000..a8907e5a9a4 --- /dev/null +++ b/bson/src/main/org/bson/codecs/MaxKeyCodec.java @@ -0,0 +1,44 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs; + +import org.bson.BsonReader; +import org.bson.BsonWriter; +import org.bson.types.MaxKey; + +/** + * Encodes and decodes instances of type {@link org.bson.types.MaxKey}. + * + * @since 3.0 + */ +public class MaxKeyCodec implements Codec { + @Override + public void encode(final BsonWriter writer, final MaxKey value, final EncoderContext encoderContext) { + writer.writeMaxKey(); + } + + @Override + public MaxKey decode(final BsonReader reader, final DecoderContext decoderContext) { + reader.readMaxKey(); + return new MaxKey(); + } + + @Override + public Class getEncoderClass() { + return MaxKey.class; + } +} diff --git a/bson/src/main/org/bson/codecs/MinKeyCodec.java b/bson/src/main/org/bson/codecs/MinKeyCodec.java new file mode 100644 index 00000000000..c3bb34efaa9 --- /dev/null +++ b/bson/src/main/org/bson/codecs/MinKeyCodec.java @@ -0,0 +1,44 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs; + +import org.bson.BsonReader; +import org.bson.BsonWriter; +import org.bson.types.MinKey; + +/** + * Encodes and decodes instances of type {@link org.bson.types.MinKey}. + * + * @since 3.0 + */ +public class MinKeyCodec implements Codec { + @Override + public void encode(final BsonWriter writer, final MinKey value, final EncoderContext encoderContext) { + writer.writeMinKey(); + } + + @Override + public MinKey decode(final BsonReader reader, final DecoderContext decoderContext) { + reader.readMinKey(); + return new MinKey(); + } + + @Override + public Class getEncoderClass() { + return MinKey.class; + } +} diff --git a/bson/src/main/org/bson/codecs/ObjectIdCodec.java b/bson/src/main/org/bson/codecs/ObjectIdCodec.java new file mode 100644 index 00000000000..d688bf69087 --- /dev/null +++ b/bson/src/main/org/bson/codecs/ObjectIdCodec.java @@ -0,0 +1,43 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs; + +import org.bson.BsonReader; +import org.bson.BsonWriter; +import org.bson.types.ObjectId; + +/** + * A Codec for ObjectId instances. + * + * @since 3.0 + */ +public class ObjectIdCodec implements Codec { + @Override + public void encode(final BsonWriter writer, final ObjectId value, final EncoderContext encoderContext) { + writer.writeObjectId(value); + } + + @Override + public ObjectId decode(final BsonReader reader, final DecoderContext decoderContext) { + return reader.readObjectId(); + } + + @Override + public Class getEncoderClass() { + return ObjectId.class; + } +} diff --git a/bson/src/main/org/bson/codecs/ObjectIdGenerator.java b/bson/src/main/org/bson/codecs/ObjectIdGenerator.java new file mode 100644 index 00000000000..9191ebb1d60 --- /dev/null +++ b/bson/src/main/org/bson/codecs/ObjectIdGenerator.java @@ -0,0 +1,31 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs; + +import org.bson.types.ObjectId; + +/** + * Creates new {@code ObjectId} instances as IDs for MongoDB Documents. + * + * @since 3.0 + */ +public class ObjectIdGenerator implements IdGenerator { + @Override + public Object generate() { + return new ObjectId(); + } +} diff --git a/bson/src/main/org/bson/codecs/OverridableUuidRepresentationCodec.java b/bson/src/main/org/bson/codecs/OverridableUuidRepresentationCodec.java new file mode 100644 index 00000000000..f0f392dd140 --- /dev/null +++ b/bson/src/main/org/bson/codecs/OverridableUuidRepresentationCodec.java @@ -0,0 +1,34 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs; + +import org.bson.UuidRepresentation; + +/** + * A marker interface for {@code Codec} implementations that can derive a new instance that overrides the {@code UuidRepresentation}. + * @param the value type + * @since 3.12 + */ +public interface OverridableUuidRepresentationCodec { + /** + * Implementations must return a new instance with the {@code UuidRepresentation} overridden with the given value. + * + * @param uuidRepresentation the UuidRepresentation + * @return a new instance equivalent to this but with the given UuidRepresentation + */ + Codec withUuidRepresentation(UuidRepresentation uuidRepresentation); +} diff --git a/bson/src/main/org/bson/codecs/OverridableUuidRepresentationUuidCodec.java b/bson/src/main/org/bson/codecs/OverridableUuidRepresentationUuidCodec.java new file mode 100644 index 00000000000..1076282e89f --- /dev/null +++ b/bson/src/main/org/bson/codecs/OverridableUuidRepresentationUuidCodec.java @@ -0,0 +1,53 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs; + +import org.bson.UuidRepresentation; + +import java.util.UUID; + +/** + * An extension of {@code UuidCodec} that allows its configured {@code UuidRepresentation} to be overridden by an externally configured + * {@code UuidRepresentation}, most likely configured on {@code MongoClientSettings} or {@code MongoClientOptions}. + * + * @since 3.12 + */ +public class OverridableUuidRepresentationUuidCodec extends UuidCodec implements OverridableUuidRepresentationCodec { + + /** + * Construct an instance with the default UUID representation. + */ + public OverridableUuidRepresentationUuidCodec() { + } + + /** + * Construct an instance with the given UUID representation. + * + * @param uuidRepresentation the UUID representation + */ + public OverridableUuidRepresentationUuidCodec(final UuidRepresentation uuidRepresentation) { + super(uuidRepresentation); + } + + @Override + public Codec withUuidRepresentation(final UuidRepresentation uuidRepresentation) { + if (getUuidRepresentation().equals(uuidRepresentation)) { + return this; + } + return new OverridableUuidRepresentationUuidCodec(uuidRepresentation); + } +} diff --git a/bson/src/main/org/bson/codecs/PackedBitBinaryVectorCodec.java b/bson/src/main/org/bson/codecs/PackedBitBinaryVectorCodec.java new file mode 100644 index 00000000000..c8d0410a4c6 --- /dev/null +++ b/bson/src/main/org/bson/codecs/PackedBitBinaryVectorCodec.java @@ -0,0 +1,59 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs; + +import org.bson.BsonBinary; +import org.bson.BsonBinarySubType; +import org.bson.BsonInvalidOperationException; +import org.bson.BsonReader; +import org.bson.BsonWriter; +import org.bson.PackedBitBinaryVector; + +/** + * Encodes and decodes {@link PackedBitBinaryVector} objects. + * + */ +final class PackedBitBinaryVectorCodec implements Codec { + + @Override + public void encode(final BsonWriter writer, final PackedBitBinaryVector vectorToEncode, final EncoderContext encoderContext) { + writer.writeBinaryData(new BsonBinary(vectorToEncode)); + } + + @Override + public PackedBitBinaryVector decode(final BsonReader reader, final DecoderContext decoderContext) { + byte subType = reader.peekBinarySubType(); + + if (subType != BsonBinarySubType.VECTOR.getValue()) { + throw new BsonInvalidOperationException( + "Expected vector binary subtype " + BsonBinarySubType.VECTOR.getValue() + " but found: " + subType); + } + + return reader.readBinaryData() + .asBinary() + .asVector() + .asPackedBitVector(); + } + + + @Override + public Class getEncoderClass() { + return PackedBitBinaryVector.class; + } +} + + diff --git a/bson/src/main/org/bson/codecs/ParameterizedCollectionCodec.java b/bson/src/main/org/bson/codecs/ParameterizedCollectionCodec.java new file mode 100644 index 00000000000..8d12a847a57 --- /dev/null +++ b/bson/src/main/org/bson/codecs/ParameterizedCollectionCodec.java @@ -0,0 +1,41 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs; + +import org.bson.BsonReader; +import org.bson.BsonWriter; + +import java.util.Collection; + +class ParameterizedCollectionCodec> extends AbstractCollectionCodec { + private final Codec codec; + + ParameterizedCollectionCodec(final Codec codec, final Class clazz) { + super(clazz); + this.codec = codec; + } + + @Override + T readValue(final BsonReader reader, final DecoderContext decoderContext) { + return decoderContext.decodeWithChildContext(codec, reader); + } + + @Override + void writeValue(final BsonWriter writer, final T cur, final EncoderContext encoderContext) { + encoderContext.encodeWithChildContext(codec, writer, cur); + } +} diff --git a/bson/src/main/org/bson/codecs/ParameterizedMapCodec.java b/bson/src/main/org/bson/codecs/ParameterizedMapCodec.java new file mode 100644 index 00000000000..b4871f6c0c3 --- /dev/null +++ b/bson/src/main/org/bson/codecs/ParameterizedMapCodec.java @@ -0,0 +1,46 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs; + +import org.bson.BsonReader; +import org.bson.BsonWriter; + +import java.util.Map; + +/** + * A Codec for Map instances. + * + * @since 3.5 + */ +class ParameterizedMapCodec> extends AbstractMapCodec { + private final Codec codec; + + ParameterizedMapCodec(final Codec codec, final Class clazz) { + super(clazz); + this.codec = codec; + } + + @Override + T readValue(final BsonReader reader, final DecoderContext decoderContext) { + return decoderContext.decodeWithChildContext(codec, reader); + } + + @Override + void writeValue(final BsonWriter writer, final T value, final EncoderContext encoderContext) { + encoderContext.encodeWithChildContext(codec, writer, value); + } +} diff --git a/bson/src/main/org/bson/codecs/PatternCodec.java b/bson/src/main/org/bson/codecs/PatternCodec.java new file mode 100644 index 00000000000..1287575c7dd --- /dev/null +++ b/bson/src/main/org/bson/codecs/PatternCodec.java @@ -0,0 +1,131 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs; + +import org.bson.BsonReader; +import org.bson.BsonRegularExpression; +import org.bson.BsonWriter; + +import java.util.HashMap; +import java.util.Map; +import java.util.regex.Pattern; + +/** + * A codec for {@code Pattern} instances. + * + * @since 3.0 + */ +public class PatternCodec implements Codec { + @Override + public void encode(final BsonWriter writer, final Pattern value, final EncoderContext encoderContext) { + writer.writeRegularExpression(new BsonRegularExpression(value.pattern(), getOptionsAsString(value))); + } + + @Override + public Pattern decode(final BsonReader reader, final DecoderContext decoderContext) { + BsonRegularExpression regularExpression = reader.readRegularExpression(); + return Pattern.compile(regularExpression.getPattern(), getOptionsAsInt(regularExpression)); + } + + @Override + public Class getEncoderClass() { + return Pattern.class; + } + + private static String getOptionsAsString(final Pattern pattern) { + int flags = pattern.flags(); + StringBuilder buf = new StringBuilder(); + + for (final RegexFlag flag : RegexFlag.values()) { + if ((pattern.flags() & flag.javaFlag) > 0) { + buf.append(flag.flagChar); + flags -= flag.javaFlag; + } + } + + if (flags > 0) { + throw new IllegalArgumentException("some flags could not be recognized."); + } + + return buf.toString(); + } + + private static int getOptionsAsInt(final BsonRegularExpression regularExpression) { + int optionsInt = 0; + + String optionsString = regularExpression.getOptions(); + + if (optionsString == null || optionsString.length() == 0) { + return optionsInt; + } + + optionsString = optionsString.toLowerCase(); + + for (int i = 0; i < optionsString.length(); i++) { + RegexFlag flag = RegexFlag.getByCharacter(optionsString.charAt(i)); + if (flag != null) { + optionsInt |= flag.javaFlag; + if (flag.unsupported != null) { + // TODO: deal with logging + // warnUnsupportedRegex( flag.unsupported ); + } + } else { + // TODO: throw a better exception here + throw new IllegalArgumentException("unrecognized flag [" + optionsString.charAt(i) + "] " + (int) optionsString.charAt(i)); + } + } + return optionsInt; + } + + + private static final int GLOBAL_FLAG = 256; + + private enum RegexFlag { + CANON_EQ(Pattern.CANON_EQ, 'c', "Pattern.CANON_EQ"), + UNIX_LINES(Pattern.UNIX_LINES, 'd', "Pattern.UNIX_LINES"), + GLOBAL(GLOBAL_FLAG, 'g', null), + CASE_INSENSITIVE(Pattern.CASE_INSENSITIVE, 'i', null), + MULTILINE(Pattern.MULTILINE, 'm', null), + DOTALL(Pattern.DOTALL, 's', "Pattern.DOTALL"), + LITERAL(Pattern.LITERAL, 't', "Pattern.LITERAL"), + UNICODE_CASE(Pattern.UNICODE_CASE, 'u', "Pattern.UNICODE_CASE"), + COMMENTS(Pattern.COMMENTS, 'x', null); + + private static final Map BY_CHARACTER = new HashMap<>(); + + private final int javaFlag; + private final char flagChar; + private final String unsupported; + + static { + for (final RegexFlag flag : values()) { + BY_CHARACTER.put(flag.flagChar, flag); + } + } + + public static RegexFlag getByCharacter(final char ch) { + return BY_CHARACTER.get(ch); + } + + RegexFlag(final int f, final char ch, final String u) { + javaFlag = f; + flagChar = ch; + unsupported = u; + } + } + +} diff --git a/bson/src/main/org/bson/codecs/RawBsonDocumentCodec.java b/bson/src/main/org/bson/codecs/RawBsonDocumentCodec.java new file mode 100644 index 00000000000..4d81b7f97aa --- /dev/null +++ b/bson/src/main/org/bson/codecs/RawBsonDocumentCodec.java @@ -0,0 +1,66 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs; + +import org.bson.BsonBinaryReader; +import org.bson.BsonBinaryWriter; +import org.bson.BsonReader; +import org.bson.BsonWriter; +import org.bson.RawBsonDocument; +import org.bson.io.BasicOutputBuffer; +import org.bson.io.ByteBufferBsonInput; + +/** + * A simple BSONDocumentBuffer codec. It does not attempt to validate the contents of the underlying ByteBuffer. It assumes that it + * contains a single encoded BSON document. + * + * @since 3.0 + */ +public class RawBsonDocumentCodec implements Codec { + + /** + * Constructs a new instance. + */ + public RawBsonDocumentCodec() { + } + + @Override + public void encode(final BsonWriter writer, final RawBsonDocument value, final EncoderContext encoderContext) { + try (BsonBinaryReader reader = new BsonBinaryReader(new ByteBufferBsonInput(value.getByteBuffer()))) { + writer.pipe(reader); + } + } + + @Override + public RawBsonDocument decode(final BsonReader reader, final DecoderContext decoderContext) { + BasicOutputBuffer buffer = new BasicOutputBuffer(0); + BsonBinaryWriter writer = new BsonBinaryWriter(buffer); + try { + writer.pipe(reader); + return new RawBsonDocument(buffer.getInternalBuffer(), 0, buffer.getPosition()); + } finally { + writer.close(); + buffer.close(); + } + } + + @Override + public Class getEncoderClass() { + return RawBsonDocument.class; + } +} + diff --git a/bson/src/main/org/bson/codecs/RepresentationConfigurable.java b/bson/src/main/org/bson/codecs/RepresentationConfigurable.java new file mode 100644 index 00000000000..2d33f991052 --- /dev/null +++ b/bson/src/main/org/bson/codecs/RepresentationConfigurable.java @@ -0,0 +1,47 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs; + +import org.bson.BsonType; +import org.bson.codecs.configuration.CodecConfigurationException; + +/** + * Implementations of this interface can decode additional types + * and translate them to the desired value type depending on the BsonRepresentation. + * + * @param the value type + * @since 4.2 + */ +public interface RepresentationConfigurable { + + /** + * Gets the BsonRepresentation. + * + * @return the BsonRepresentation + */ + BsonType getRepresentation(); + + /** + * Returns an immutable codec with the given representation. If the provided representation + * is not supported an exception will be thrown. + * + * @param representation the BsonRepresentation. + * @return a new Codec with the correct representation. + * @throws CodecConfigurationException if the codec does not support the provided representation + */ + Codec withRepresentation(BsonType representation); +} diff --git a/bson/src/main/org/bson/codecs/ShortCodec.java b/bson/src/main/org/bson/codecs/ShortCodec.java new file mode 100644 index 00000000000..8c439e36b8d --- /dev/null +++ b/bson/src/main/org/bson/codecs/ShortCodec.java @@ -0,0 +1,45 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs; + +import org.bson.BsonReader; +import org.bson.BsonWriter; + +import static org.bson.internal.NumberCodecHelper.decodeShort; + +/** + * Encodes and decodes {@code Short} objects. + * + * @since 3.0 + */ +public class ShortCodec implements Codec { + + @Override + public void encode(final BsonWriter writer, final Short value, final EncoderContext encoderContext) { + writer.writeInt32(value); + } + + @Override + public Short decode(final BsonReader reader, final DecoderContext decoderContext) { + return decodeShort(reader); + } + + @Override + public Class getEncoderClass() { + return Short.class; + } +} diff --git a/bson/src/main/org/bson/codecs/StringCodec.java b/bson/src/main/org/bson/codecs/StringCodec.java new file mode 100644 index 00000000000..d31cc6eb24f --- /dev/null +++ b/bson/src/main/org/bson/codecs/StringCodec.java @@ -0,0 +1,93 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs; + +import org.bson.BsonInvalidOperationException; +import org.bson.BsonReader; +import org.bson.BsonType; +import org.bson.BsonWriter; +import org.bson.codecs.configuration.CodecConfigurationException; +import org.bson.types.ObjectId; + +/** + * Encodes and decodes {@code String} objects. + * + * @since 3.0 + */ +public class StringCodec implements Codec, RepresentationConfigurable { + private final BsonType representation; + + /** + * Constructs a StringCodec with a String representation. + */ + public StringCodec() { + representation = BsonType.STRING; + } + + private StringCodec(final BsonType representation) { + this.representation = representation; + } + + @Override + public BsonType getRepresentation() { + return representation; + } + + @Override + public Codec withRepresentation(final BsonType representation) { + if (representation != BsonType.OBJECT_ID && representation != BsonType.STRING) { + throw new CodecConfigurationException(representation + " is not a supported representation for StringCodec"); + } + return new StringCodec(representation); + } + + + @Override + public void encode(final BsonWriter writer, final String value, final EncoderContext encoderContext) { + switch (representation) { + case STRING: + writer.writeString(value); + break; + case OBJECT_ID: + writer.writeObjectId(new ObjectId(value)); + break; + default: + throw new BsonInvalidOperationException("Cannot encode a String to a " + representation); + } + } + + @Override + public String decode(final BsonReader reader, final DecoderContext decoderContext) { + switch (representation) { + case STRING: + if (reader.getCurrentBsonType() == BsonType.SYMBOL) { + return reader.readSymbol(); + } else { + return reader.readString(); + } + case OBJECT_ID: + return reader.readObjectId().toHexString(); + default: + throw new CodecConfigurationException("Cannot decode " + representation + " to a String"); + } + } + + @Override + public Class getEncoderClass() { + return String.class; + } +} diff --git a/bson/src/main/org/bson/codecs/SymbolCodec.java b/bson/src/main/org/bson/codecs/SymbolCodec.java new file mode 100644 index 00000000000..3b1d5a1c086 --- /dev/null +++ b/bson/src/main/org/bson/codecs/SymbolCodec.java @@ -0,0 +1,43 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs; + +import org.bson.BsonReader; +import org.bson.BsonWriter; +import org.bson.types.Symbol; + +/** + * A codec for BSON symbol type. + * + * @since 3.0 + */ +public class SymbolCodec implements Codec { + @Override + public Symbol decode(final BsonReader reader, final DecoderContext decoderContext) { + return new Symbol(reader.readSymbol()); + } + + @Override + public void encode(final BsonWriter writer, final Symbol value, final EncoderContext encoderContext) { + writer.writeSymbol(value.getSymbol()); + } + + @Override + public Class getEncoderClass() { + return Symbol.class; + } +} diff --git a/bson/src/main/org/bson/codecs/UuidCodec.java b/bson/src/main/org/bson/codecs/UuidCodec.java new file mode 100644 index 00000000000..a54b62ad46a --- /dev/null +++ b/bson/src/main/org/bson/codecs/UuidCodec.java @@ -0,0 +1,107 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs; + +import org.bson.BSONException; +import org.bson.BsonBinary; +import org.bson.BsonBinarySubType; +import org.bson.BsonReader; +import org.bson.BsonWriter; +import org.bson.UuidRepresentation; +import org.bson.codecs.configuration.CodecConfigurationException; +import org.bson.internal.UuidHelper; + +import java.util.UUID; + +import static org.bson.assertions.Assertions.notNull; + +/** + * Encodes and decodes {@code UUID} objects. + * + * @since 3.0 + */ +public class UuidCodec implements Codec { + + private final UuidRepresentation uuidRepresentation; + + /** + * The default UUIDRepresentation is JAVA_LEGACY to be compatible with existing documents + * + * @param uuidRepresentation the representation of UUID + * @see org.bson.UuidRepresentation + */ + public UuidCodec(final UuidRepresentation uuidRepresentation) { + notNull("uuidRepresentation", uuidRepresentation); + this.uuidRepresentation = uuidRepresentation; + } + + /** + * The constructor for UUIDCodec, default is JAVA_LEGACY + */ + public UuidCodec() { + this.uuidRepresentation = UuidRepresentation.UNSPECIFIED; + } + + /** + * The {@code UuidRepresentation} with which this instance is configured + * + * @return the uuid representation + * @since 3.12 + */ + public UuidRepresentation getUuidRepresentation() { + return uuidRepresentation; + } + + @Override + public void encode(final BsonWriter writer, final UUID value, final EncoderContext encoderContext) { + if (uuidRepresentation == UuidRepresentation.UNSPECIFIED) { + throw new CodecConfigurationException("The uuidRepresentation has not been specified, so the UUID cannot be encoded."); + } + byte[] binaryData = UuidHelper.encodeUuidToBinary(value, uuidRepresentation); + // changed the default subtype to STANDARD since 3.0 + if (uuidRepresentation == UuidRepresentation.STANDARD) { + writer.writeBinaryData(new BsonBinary(BsonBinarySubType.UUID_STANDARD, binaryData)); + } else { + writer.writeBinaryData(new BsonBinary(BsonBinarySubType.UUID_LEGACY, binaryData)); + } + } + + @Override + public UUID decode(final BsonReader reader, final DecoderContext decoderContext) { + byte subType = reader.peekBinarySubType(); + + if (subType != BsonBinarySubType.UUID_LEGACY.getValue() && subType != BsonBinarySubType.UUID_STANDARD.getValue()) { + throw new BSONException("Unexpected BsonBinarySubType"); + } + + byte[] bytes = reader.readBinaryData().getData(); + + return UuidHelper.decodeBinaryToUuid(bytes, subType, uuidRepresentation); + } + + @Override + public Class getEncoderClass() { + return UUID.class; + } + + @Override + public String toString() { + return "UuidCodec{" + + "uuidRepresentation=" + uuidRepresentation + + '}'; + } +} diff --git a/bson/src/main/org/bson/codecs/UuidCodecHelper.java b/bson/src/main/org/bson/codecs/UuidCodecHelper.java new file mode 100644 index 00000000000..5087c212dee --- /dev/null +++ b/bson/src/main/org/bson/codecs/UuidCodecHelper.java @@ -0,0 +1,32 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs; + +final class UuidCodecHelper { + // reverse elements in the subarray data[start:start+length] + public static void reverseByteArray(final byte[] data, final int start, final int length) { + for (int left = start, right = start + length - 1; left < right; left++, right--) { + // swap the values at the left and right indices + byte temp = data[left]; + data[left] = data[right]; + data[right] = temp; + } + } + + private UuidCodecHelper() { + } +} diff --git a/bson/src/main/org/bson/codecs/UuidCodecProvider.java b/bson/src/main/org/bson/codecs/UuidCodecProvider.java new file mode 100644 index 00000000000..de0fdf146e9 --- /dev/null +++ b/bson/src/main/org/bson/codecs/UuidCodecProvider.java @@ -0,0 +1,55 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + package org.bson.codecs; + +import org.bson.UuidRepresentation; +import org.bson.codecs.configuration.CodecProvider; +import org.bson.codecs.configuration.CodecRegistry; + +import java.util.UUID; + + /** + * A {@code CodecProvider} for UUID Codecs with custom UUID representations + * + * @since 3.0 + */ + public class UuidCodecProvider implements CodecProvider { + + private final UuidRepresentation uuidRepresentation; + + /** + * Set the UUIDRepresentation to be used in the codec + * default is JAVA_LEGACY to be compatible with existing documents + * + * @param uuidRepresentation the representation of UUID + * + * @since 3.0 + * @see org.bson.UuidRepresentation + */ + public UuidCodecProvider(final UuidRepresentation uuidRepresentation) { + this.uuidRepresentation = uuidRepresentation; + } + + @Override + @SuppressWarnings("unchecked") + public Codec get(final Class clazz, final CodecRegistry registry) { + if (clazz == UUID.class) { + return (Codec) (new UuidCodec(uuidRepresentation)); + } + return null; + } + } diff --git a/bson/src/main/org/bson/codecs/ValueCodecProvider.java b/bson/src/main/org/bson/codecs/ValueCodecProvider.java new file mode 100644 index 00000000000..5c21e048529 --- /dev/null +++ b/bson/src/main/org/bson/codecs/ValueCodecProvider.java @@ -0,0 +1,133 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs; + +import org.bson.codecs.configuration.CodecProvider; +import org.bson.codecs.configuration.CodecRegistry; + +import java.util.HashMap; +import java.util.Map; + +/** + * A Codec provider for dynamically-typed value classes. Other providers are needed for containers for maps and arrays. It provides the + * following codecs: + * + *
    + *
  • {@link org.bson.codecs.BinaryCodec}
  • + *
  • {@link org.bson.codecs.BooleanCodec}
  • + *
  • {@link org.bson.codecs.DateCodec}
  • + *
  • {@link org.bson.codecs.DoubleCodec}
  • + *
  • {@link org.bson.codecs.IntegerCodec}
  • + *
  • {@link org.bson.codecs.LongCodec}
  • + *
  • {@link org.bson.codecs.Decimal128Codec}
  • + *
  • {@link org.bson.codecs.MinKeyCodec}
  • + *
  • {@link org.bson.codecs.MaxKeyCodec}
  • + *
  • {@link org.bson.codecs.CodeCodec}
  • + *
  • {@link org.bson.codecs.ObjectIdCodec}
  • + *
  • {@link org.bson.codecs.CharacterCodec}
  • + *
  • {@link org.bson.codecs.StringCodec}
  • + *
  • {@link org.bson.codecs.SymbolCodec}
  • + *
  • {@link org.bson.codecs.UuidCodec}
  • + *
  • {@link BinaryVectorCodec}
  • + *
  • {@link Float32BinaryVectorCodec}
  • + *
  • {@link Int8VectorCodec}
  • + *
  • {@link PackedBitBinaryVectorCodec}
  • + *
  • {@link org.bson.codecs.ByteCodec}
  • + *
  • {@link org.bson.codecs.ShortCodec}
  • + *
  • {@link org.bson.codecs.ByteArrayCodec}
  • + *
  • {@link org.bson.codecs.FloatCodec}
  • + *
  • {@link org.bson.codecs.AtomicBooleanCodec}
  • + *
  • {@link org.bson.codecs.AtomicIntegerCodec}
  • + *
  • {@link org.bson.codecs.AtomicLongCodec}
  • + *
+ * + * @since 3.0 + */ +public class ValueCodecProvider implements CodecProvider { + private final Map, Codec> codecs = new HashMap<>(); + + /** + * A provider of Codecs for simple value types. + */ + public ValueCodecProvider() { + addCodecs(); + } + + @Override + @SuppressWarnings("unchecked") + public Codec get(final Class clazz, final CodecRegistry registry) { + return (Codec) codecs.get(clazz); + } + + private void addCodecs() { + addCodec(new BinaryCodec()); + addCodec(new BooleanCodec()); + addCodec(new DateCodec()); + addCodec(new DoubleCodec()); + addCodec(new IntegerCodec()); + addCodec(new LongCodec()); + addCodec(new MinKeyCodec()); + addCodec(new MaxKeyCodec()); + addCodec(new CodeCodec()); + addCodec(new Decimal128Codec()); + addCodec(new BigDecimalCodec()); + addCodec(new ObjectIdCodec()); + addCodec(new CharacterCodec()); + addCodec(new StringCodec()); + addCodec(new SymbolCodec()); + addCodec(new OverridableUuidRepresentationUuidCodec()); + addCodec(new BinaryVectorCodec()); + addCodec(new Float32BinaryVectorCodec()); + addCodec(new Int8VectorCodec()); + addCodec(new PackedBitBinaryVectorCodec()); + + addCodec(new ByteCodec()); + addCodec(new PatternCodec()); + addCodec(new ShortCodec()); + addCodec(new ByteArrayCodec()); + addCodec(new FloatCodec()); + addCodec(new AtomicBooleanCodec()); + addCodec(new AtomicIntegerCodec()); + addCodec(new AtomicLongCodec()); + } + + private void addCodec(final Codec codec) { + codecs.put(codec.getEncoderClass(), codec); + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + return true; + } + + @Override + public int hashCode() { + return 0; + } + + @Override + public String toString() { + return "ValueCodecProvider{}"; + } +} diff --git a/bson/src/main/org/bson/codecs/configuration/CodecConfigurationException.java b/bson/src/main/org/bson/codecs/configuration/CodecConfigurationException.java new file mode 100644 index 00000000000..13d5c79dfa5 --- /dev/null +++ b/bson/src/main/org/bson/codecs/configuration/CodecConfigurationException.java @@ -0,0 +1,48 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.configuration; + +/** + * An exception indicating that a codec registry has been misconfigured in some way, preventing it from providing a codec for the + * requested class. + * + * @since 3.0 + */ +public class CodecConfigurationException extends RuntimeException { + + private static final long serialVersionUID = -5656763889202800056L; + + /** + * Construct a new instance. + * + * @param msg the message + */ + public CodecConfigurationException(final String msg) { + super(msg); + } + + /** + * Construct a new instance and wraps a cause + * + * @param message the message + * @param cause the underlying cause + * @since 3.5 + */ + public CodecConfigurationException(final String message, final Throwable cause) { + super(message, cause); + } +} diff --git a/bson/src/main/org/bson/codecs/configuration/CodecProvider.java b/bson/src/main/org/bson/codecs/configuration/CodecProvider.java new file mode 100644 index 00000000000..8f01c60b551 --- /dev/null +++ b/bson/src/main/org/bson/codecs/configuration/CodecProvider.java @@ -0,0 +1,72 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.configuration; + +import org.bson.codecs.Codec; + +import java.lang.reflect.Type; +import java.util.Collection; +import java.util.List; + +/** + * A provider of {@code Codec} instances. Typically, an instance of a class implementing this interface would be used to construct a + * {@code CodecRegistry}. + * + *

While the {@code CodecProvider} interface adds no stipulations to the general contract for the Object.equals, + * programmers who implement the {@code CodecProvider} interface "directly" must exercise care if they choose to override the + * {@code Object.equals}. It is not necessary to do so, and the simplest course of action is to rely on Object's implementation, but the + * implementer may wish to implement a "value comparison" in place of the default "reference comparison."

+ * + * @since 3.0 + */ +public interface CodecProvider { + + /** + * Get a {@code Codec} using the given context, which includes, most importantly, the Class for which a {@code Codec} is required. + * + *

This method is called by the driver only if {@link #get(Class, List, CodecRegistry)} is not overridden, + * or is overridden such that it calls this method.

+ * + * @param clazz the Class for which to get a Codec + * @param registry the registry to use for resolving dependent Codec instances + * @param the type of the class for which a Codec is required + * @return the Codec instance, which may be null, if this source is unable to provide one for the requested Class + */ + Codec get(Class clazz, CodecRegistry registry); + + /** + * Get a {@code Codec} using the given context, which includes, most importantly, the Class for which a {@code Codec} is required. + * + *

The default implementation delegates to {@link #get(Class, CodecRegistry)}, thus not propagating {@code typeArguments} + * when it uses the {@code registry}.

+ * + * @param clazz the Class for which to get a Codec + * @param typeArguments The type arguments for the {@code clazz}. The size of the list is either equal to the + * number of type parameters of the {@code clazz}, or is zero. + * For example, if {@code clazz} is {@link Collection}{@code .class}, then the size of {@code typeArguments} is one, + * since {@link Collection} has a single type parameter. + * The list may be {@linkplain List#isEmpty() empty} either because the {@code clazz} is not generic, + * or because another {@link CodecProvider} did not propagate {@code clazz}'s type arguments to the {@code registry} when using it. + * @param registry the registry to use for resolving dependent Codec instances + * @return the Codec instance, which may be null, if this source is unable to provide one for the requested Class + * @param the type of the class for which a Codec is required + * @since 4.10 + */ + default Codec get(Class clazz, List typeArguments, CodecRegistry registry) { + return get(clazz, registry); + } +} diff --git a/bson/src/main/org/bson/codecs/configuration/CodecRegistries.java b/bson/src/main/org/bson/codecs/configuration/CodecRegistries.java new file mode 100644 index 00000000000..87996dbb632 --- /dev/null +++ b/bson/src/main/org/bson/codecs/configuration/CodecRegistries.java @@ -0,0 +1,144 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.configuration; + +import org.bson.UuidRepresentation; +import org.bson.codecs.Codec; +import org.bson.internal.ProvidersCodecRegistry; + +import java.util.List; + +import static java.util.Arrays.asList; + +/** + * A helper class for creating and combining codecs, codec providers, and codec registries + * + * @since 3.0 + */ +public final class CodecRegistries { + + /** + * Apply given {@link UuidRepresentation} to the given {@link CodecRegistry}. + * + * @param codecRegistry the code registry + * @param uuidRepresentation the uuid representation + * @return a {@code CodecRegistry} with the given {@code UuidRepresentation} applied to the given {@code CodecRegistry} + * @since 4.5 + */ + public static CodecRegistry withUuidRepresentation(final CodecRegistry codecRegistry, final UuidRepresentation uuidRepresentation) { + return fromProviders(new OverridableUuidRepresentationCodecProvider(codecRegistry, uuidRepresentation)); + } + + /** + * Creates a {@code CodecRegistry} from the provided list of {@code Codec} instances. + * + *

This registry can then be used alongside other registries. Typically used when adding extra codecs to existing codecs with the + * {@link #fromRegistries(CodecRegistry...)} )} helper.

+ * + * @param codecs the {@code Codec} to create a registry for + * @return a {@code CodecRegistry} for the given list of {@code Codec} instances. + */ + public static CodecRegistry fromCodecs(final Codec... codecs) { + return fromCodecs(asList(codecs)); + } + + /** + * Creates a {@code CodecRegistry} from the provided list of {@code Codec} instances. + * + *

This registry can then be used alongside other registries. Typically used when adding extra codecs to existing codecs with the + * {@link #fromRegistries(CodecRegistry...)} )} helper.

+ * + * @param codecs the {@code Codec} to create a registry for + * @return a {@code CodecRegistry} for the given list of {@code Codec} instances. + */ + public static CodecRegistry fromCodecs(final List> codecs) { + return fromProviders(new MapOfCodecsProvider(codecs)); + } + + /** + * Creates a {@code CodecRegistry} from the provided list of {@code CodecProvider} instances. + * + *

The created instance can handle cycles of {@code Codec} dependencies, i.e when the construction of a {@code Codec} for class A + * requires the construction of a {@code Codec} for class B, and vice versa.

+ * + * @param providers the codec provider + * @return a {@code CodecRegistry} with the ordered list of {@code CodecProvider} instances. The registry is also guaranteed to be an + * instance of {code CodecProvider}, so that when one is passed to {@link #fromRegistries(CodecRegistry...)} or {@link + * #fromRegistries(java.util.List)} it will be treated as a {@code CodecProvider} and properly resolve any dependencies between + * registries. + */ + public static CodecRegistry fromProviders(final CodecProvider... providers) { + return fromProviders(asList(providers)); + } + + /** + * Creates a {@code CodecRegistry} from the provided list of {@code CodecProvider} instances. + * + *

The created instance can handle cycles of {@code Codec} dependencies, i.e when the construction of a {@code Codec} for class A + * requires the construction of a {@code Codec} for class B, and vice versa.

+ * + * @param providers the codec provider + * @return a {@code CodecRegistry} with the ordered list of {@code CodecProvider} instances. The registry is also guaranteed to be an + * instance of {code CodecProvider}, so that when one is passed to {@link #fromRegistries(CodecRegistry...)} or {@link + * #fromRegistries(java.util.List)} it will be treated as a {@code CodecProvider} and properly resolve any dependencies between + * registries. + */ + public static CodecRegistry fromProviders(final List providers) { + return new ProvidersCodecRegistry(providers); + } + + /** + * A {@code CodecRegistry} that combines the given {@code CodecRegistry} instances into a single registry. + * + *

The registries are checked in order until one returns a {@code Codec} for the requested {@code Class}.

+ * + *

The created instance can handle cycles of {@code Codec} dependencies, i.e when the construction of a {@code Codec} for class A + * requires the construction of a {@code Codec} for class B, and vice versa.

+ + *

Any of the given registries that also implement {@code CodecProvider} will be treated as a {@code CodecProvider} instead of a + * {@code CodecRegistry}, which will ensure proper resolution of any dependencies between registries.

+ * + * @param registries the preferred registry for {@code Codec} lookups + * + * @return a {@code CodecRegistry} that combines the list of {@code CodecRegistry} instances into a single one + */ + public static CodecRegistry fromRegistries(final CodecRegistry... registries) { + return fromRegistries(asList(registries)); + } + + /** + * A {@code CodecRegistry} that combines the given {@code CodecRegistry} instances into a single registry. + * + *

The registries are checked in order until one returns a {@code Codec} for the requested {@code Class}.

+ * + *

The created instance can handle cycles of {@code Codec} dependencies, i.e when the construction of a {@code Codec} for class A + * requires the construction of a {@code Codec} for class B, and vice versa.

+ + *

Any of the given registries that also implement {@code CodecProvider} will be treated as a {@code CodecProvider} instead of a + * {@code CodecRegistry}, which will ensure proper resolution of any dependencies between registries.

+ * + * @param registries the preferred registry for {@code Codec} lookups + * + * @return a {@code CodecRegistry} that combines the list of {@code CodecRegistry} instances into a single one + */ + public static CodecRegistry fromRegistries(final List registries) { + return new ProvidersCodecRegistry(registries); + } + + private CodecRegistries() { + } +} diff --git a/bson/src/main/org/bson/codecs/configuration/CodecRegistry.java b/bson/src/main/org/bson/codecs/configuration/CodecRegistry.java new file mode 100644 index 00000000000..f77ad80068c --- /dev/null +++ b/bson/src/main/org/bson/codecs/configuration/CodecRegistry.java @@ -0,0 +1,72 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.configuration; + +import org.bson.assertions.Assertions; +import org.bson.codecs.Codec; + +import java.lang.reflect.Type; +import java.util.List; + +/** + * A registry of Codec instances searchable by the class that the Codec can encode and decode. + * + *

While the {@code CodecRegistry} interface adds no stipulations to the general contract for the Object.equals, + * programmers who implement the {@code CodecRegistry} interface "directly" must exercise care if they choose to override the + * {@code Object.equals}. It is not necessary to do so, and the simplest course of action is to rely on Object's implementation, but the + * implementer may wish to implement a "value comparison" in place of the default "reference comparison."

+ * + *

As of the 4.0 release, this class extends the {@code CodecProvider} interface. This capability was introduced to enable nesting + * registries inside another registry.

+ * + *

Applications are encouraged to NOT implement this interface, but rather use the factory methods in {@link CodecRegistries}.

+ * + * @since 3.0 + * @see CodecRegistries + */ +public interface CodecRegistry extends CodecProvider { + /** + * Gets a {@code Codec} for the given Class. + * + * @param clazz the class + * @param the class type + * @return a codec for the given class + * @throws CodecConfigurationException if the registry does not contain a codec for the given class. + */ + Codec get(Class clazz); + + /** + * Gets a Codec for the given parameterized class, after resolving any type variables with the given type arguments. + * + *

+ * The default behavior is to throw a {@link AssertionError}, as it's expected that {@code CodecRegistry} implementations are always + * provided by this library and will override the method appropriately. + *

+ * + * @param clazz the parameterized class + * @param typeArguments the type arguments to apply to the parameterized class. This list may be empty but not null. + * @param the class type + * @return a codec for the given class, with the given type parameters resolved + * @throws CodecConfigurationException if no codec can be found for the given class and type arguments. + * @throws AssertionError by default, if the implementation does not override this method, or if no codec can be found + * for the given class and type arguments. + * @since 4.8 + */ + default Codec get(Class clazz, List typeArguments) { + throw Assertions.fail("This method should have been overridden but was not."); + } +} diff --git a/bson/src/main/org/bson/codecs/configuration/MapOfCodecsProvider.java b/bson/src/main/org/bson/codecs/configuration/MapOfCodecsProvider.java new file mode 100644 index 00000000000..c8277a23942 --- /dev/null +++ b/bson/src/main/org/bson/codecs/configuration/MapOfCodecsProvider.java @@ -0,0 +1,46 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.configuration; + +import org.bson.codecs.Codec; + +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +final class MapOfCodecsProvider implements CodecProvider { + private final Map, Codec> codecsMap = new HashMap<>(); + + MapOfCodecsProvider(final List> codecsList) { + for (Codec codec : codecsList) { + codecsMap.put(codec.getEncoderClass(), codec); + } + } + + @Override + @SuppressWarnings("unchecked") + public Codec get(final Class clazz, final CodecRegistry registry) { + return (Codec) codecsMap.get(clazz); + } + + @Override + public String toString() { + return "MapOfCodecsProvider{" + + "codecsMap=" + codecsMap + + '}'; + } +} diff --git a/bson/src/main/org/bson/codecs/configuration/OverridableUuidRepresentationCodecProvider.java b/bson/src/main/org/bson/codecs/configuration/OverridableUuidRepresentationCodecProvider.java new file mode 100644 index 00000000000..f46964fedd3 --- /dev/null +++ b/bson/src/main/org/bson/codecs/configuration/OverridableUuidRepresentationCodecProvider.java @@ -0,0 +1,86 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.configuration; + +import org.bson.UuidRepresentation; +import org.bson.codecs.Codec; +import org.bson.codecs.OverridableUuidRepresentationCodec; + +import java.lang.reflect.Type; +import java.util.Collections; +import java.util.List; + +import static org.bson.assertions.Assertions.notNull; + +final class OverridableUuidRepresentationCodecProvider implements CodecProvider { + + private final CodecProvider wrapped; + private final UuidRepresentation uuidRepresentation; + + OverridableUuidRepresentationCodecProvider(final CodecProvider wrapped, final UuidRepresentation uuidRepresentation) { + this.uuidRepresentation = notNull("uuidRepresentation", uuidRepresentation); + this.wrapped = notNull("wrapped", wrapped); + } + + @Override + public Codec get(final Class clazz, final CodecRegistry registry) { + return get(clazz, Collections.emptyList(), registry); + } + + @Override + public Codec get(final Class clazz, final List typeArguments, final CodecRegistry registry) { + Codec codec = wrapped.get(clazz, typeArguments, registry); + if (codec instanceof OverridableUuidRepresentationCodec) { + @SuppressWarnings("unchecked") + Codec codecWithUuidRepresentation = ((OverridableUuidRepresentationCodec) codec).withUuidRepresentation(uuidRepresentation); + codec = codecWithUuidRepresentation; + } + return codec; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + OverridableUuidRepresentationCodecProvider that = (OverridableUuidRepresentationCodecProvider) o; + + if (!wrapped.equals(that.wrapped)) { + return false; + } + return uuidRepresentation == that.uuidRepresentation; + } + + @Override + public int hashCode() { + int result = wrapped.hashCode(); + result = 31 * result + uuidRepresentation.hashCode(); + return result; + } + + @Override + public String toString() { + return "OverridableUuidRepresentationCodecRegistry{" + + "wrapped=" + wrapped + + ", uuidRepresentation=" + uuidRepresentation + + '}'; + } +} diff --git a/bson/src/main/org/bson/codecs/configuration/package-info.java b/bson/src/main/org/bson/codecs/configuration/package-info.java new file mode 100644 index 00000000000..a4c3e3847ea --- /dev/null +++ b/bson/src/main/org/bson/codecs/configuration/package-info.java @@ -0,0 +1,20 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * This package contains codec configurations and the codec registry helper + */ +package org.bson.codecs.configuration; diff --git a/bson/src/main/org/bson/codecs/jsr310/DateTimeBasedCodec.java b/bson/src/main/org/bson/codecs/jsr310/DateTimeBasedCodec.java new file mode 100644 index 00000000000..b956d022198 --- /dev/null +++ b/bson/src/main/org/bson/codecs/jsr310/DateTimeBasedCodec.java @@ -0,0 +1,37 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.jsr310; + +import org.bson.BsonReader; +import org.bson.BsonType; +import org.bson.codecs.Codec; +import org.bson.codecs.configuration.CodecConfigurationException; + +import static java.lang.String.format; + +abstract class DateTimeBasedCodec implements Codec { + + long validateAndReadDateTime(final BsonReader reader) { + BsonType currentType = reader.getCurrentBsonType(); + if (!currentType.equals(BsonType.DATE_TIME)) { + throw new CodecConfigurationException(format("Could not decode into %s, expected '%s' BsonType but got '%s'.", + getEncoderClass().getSimpleName(), BsonType.DATE_TIME, currentType)); + } + return reader.readDateTime(); + } + +} diff --git a/bson/src/main/org/bson/codecs/jsr310/InstantCodec.java b/bson/src/main/org/bson/codecs/jsr310/InstantCodec.java new file mode 100644 index 00000000000..29eb1f8469d --- /dev/null +++ b/bson/src/main/org/bson/codecs/jsr310/InstantCodec.java @@ -0,0 +1,66 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * Copyright 2018 Cezary Bartosiak + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.jsr310; + +import org.bson.BsonReader; +import org.bson.BsonWriter; +import org.bson.codecs.DecoderContext; +import org.bson.codecs.EncoderContext; +import org.bson.codecs.configuration.CodecConfigurationException; + +import java.time.Instant; + +import static java.lang.String.format; + +/** + * Instant Codec. + * + *

+ * Encodes and decodes {@code Instant} objects to and from {@code DateTime}. + * Data is extracted via {@link Instant#toEpochMilli()} and stored to millisecond accuracy. + *

+ * + * @mongodb.driver.manual reference/bson-types + * @since 3.7 + */ +public class InstantCodec extends DateTimeBasedCodec { + + @Override + public Instant decode(final BsonReader reader, final DecoderContext decoderContext) { + return Instant.ofEpochMilli(validateAndReadDateTime(reader)); + } + + /** + * {@inheritDoc} + * @throws CodecConfigurationException if the Instant cannot be converted to a valid Bson DateTime. + */ + @Override + public void encode(final BsonWriter writer, final Instant value, final EncoderContext encoderContext) { + try { + writer.writeDateTime(value.toEpochMilli()); + } catch (ArithmeticException e) { + throw new CodecConfigurationException(format("Unsupported Instant value '%s' could not be converted to milliseconds: %s", + value, e.getMessage()), e); + } + } + + @Override + public Class getEncoderClass() { + return Instant.class; + } +} diff --git a/bson/src/main/org/bson/codecs/jsr310/Jsr310CodecProvider.java b/bson/src/main/org/bson/codecs/jsr310/Jsr310CodecProvider.java new file mode 100644 index 00000000000..feea82df72a --- /dev/null +++ b/bson/src/main/org/bson/codecs/jsr310/Jsr310CodecProvider.java @@ -0,0 +1,64 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * Copyright 2018 Cezary Bartosiak + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.jsr310; + +import org.bson.codecs.Codec; +import org.bson.codecs.configuration.CodecProvider; +import org.bson.codecs.configuration.CodecRegistry; + +import java.util.HashMap; +import java.util.Map; + +/** + * A CodecProvider for JSR-310 Date and Time API classes. + * + *

+ * Supplies the following JSR-310 based Codecs: + *

    + *
  • {@link InstantCodec} + *
  • {@link LocalDateCodec} + *
  • {@link LocalDateTimeCodec} + *
  • {@link LocalTimeCodec} + *
+ * + * @since 3.7 + */ +public class Jsr310CodecProvider implements CodecProvider { + private static final Map, Codec> JSR310_CODEC_MAP = new HashMap<>(); + static { + putCodec(new InstantCodec()); + putCodec(new LocalDateCodec()); + putCodec(new LocalDateTimeCodec()); + putCodec(new LocalTimeCodec()); + } + + private static void putCodec(final Codec codec) { + JSR310_CODEC_MAP.put(codec.getEncoderClass(), codec); + } + + @SuppressWarnings("unchecked") + @Override + public Codec get(final Class clazz, final CodecRegistry registry) { + return (Codec) JSR310_CODEC_MAP.get(clazz); + } + + @Override + public String toString() { + return "Jsr310CodecProvider{}"; + } +} diff --git a/bson/src/main/org/bson/codecs/jsr310/LocalDateCodec.java b/bson/src/main/org/bson/codecs/jsr310/LocalDateCodec.java new file mode 100644 index 00000000000..0074945038a --- /dev/null +++ b/bson/src/main/org/bson/codecs/jsr310/LocalDateCodec.java @@ -0,0 +1,68 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * Copyright 2018 Cezary Bartosiak + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.jsr310; + +import org.bson.BsonReader; +import org.bson.BsonWriter; +import org.bson.codecs.DecoderContext; +import org.bson.codecs.EncoderContext; +import org.bson.codecs.configuration.CodecConfigurationException; + +import java.time.Instant; +import java.time.LocalDate; +import java.time.ZoneId; +import java.time.ZoneOffset; + +import static java.lang.String.format; + +/** + * LocalDate Codec. + * + *

Encodes and decodes {@code LocalDate} objects to and from {@code DateTime}.

+ *

Converts the {@code LocalDate} values to and from {@link ZoneOffset#UTC}.

+ * + * @mongodb.driver.manual reference/bson-types + * @since 3.7 + */ +public class LocalDateCodec extends DateTimeBasedCodec { + + @Override + public LocalDate decode(final BsonReader reader, final DecoderContext decoderContext) { + return Instant.ofEpochMilli(validateAndReadDateTime(reader)).atZone(ZoneOffset.UTC).toLocalDate(); + } + + /** + * {@inheritDoc} + *

Converts the {@code LocalDate} to {@link ZoneOffset#UTC} via {@link LocalDate#atStartOfDay(ZoneId)}.

+ * @throws CodecConfigurationException if the LocalDate cannot be converted to a valid Bson DateTime. + */ + @Override + public void encode(final BsonWriter writer, final LocalDate value, final EncoderContext encoderContext) { + try { + writer.writeDateTime(value.atStartOfDay(ZoneOffset.UTC).toInstant().toEpochMilli()); + } catch (ArithmeticException e) { + throw new CodecConfigurationException(format("Unsupported LocalDate '%s' could not be converted to milliseconds: %s", + value, e.getMessage()), e); + } + } + + @Override + public Class getEncoderClass() { + return LocalDate.class; + } +} diff --git a/bson/src/main/org/bson/codecs/jsr310/LocalDateTimeCodec.java b/bson/src/main/org/bson/codecs/jsr310/LocalDateTimeCodec.java new file mode 100644 index 00000000000..0444fec4f38 --- /dev/null +++ b/bson/src/main/org/bson/codecs/jsr310/LocalDateTimeCodec.java @@ -0,0 +1,67 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * Copyright 2018 Cezary Bartosiak + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.jsr310; + +import org.bson.BsonReader; +import org.bson.BsonWriter; +import org.bson.codecs.DecoderContext; +import org.bson.codecs.EncoderContext; +import org.bson.codecs.configuration.CodecConfigurationException; + +import java.time.Instant; +import java.time.LocalDateTime; +import java.time.ZoneOffset; + +import static java.lang.String.format; + +/** + * LocalDateTime Codec. + * + *

Encodes and decodes {@code LocalDateTime} objects to and from {@code DateTime}. Data is stored to millisecond accuracy.

+ *

Converts the {@code LocalDateTime} values to and from {@link ZoneOffset#UTC}.

+ * + * @mongodb.driver.manual reference/bson-types + * @since 3.7 + */ +public class LocalDateTimeCodec extends DateTimeBasedCodec { + + @Override + public LocalDateTime decode(final BsonReader reader, final DecoderContext decoderContext) { + return Instant.ofEpochMilli(validateAndReadDateTime(reader)).atZone(ZoneOffset.UTC).toLocalDateTime(); + } + + /** + * {@inheritDoc} + *

Converts the {@code LocalDateTime} to {@link ZoneOffset#UTC} via {@link LocalDateTime#toInstant(ZoneOffset)}.

+ * @throws CodecConfigurationException if the LocalDateTime cannot be converted to a valid Bson DateTime. + */ + @Override + public void encode(final BsonWriter writer, final LocalDateTime value, final EncoderContext encoderContext) { + try { + writer.writeDateTime(value.toInstant(ZoneOffset.UTC).toEpochMilli()); + } catch (ArithmeticException e) { + throw new CodecConfigurationException(format("Unsupported LocalDateTime value '%s' could not be converted to milliseconds: %s", + value, e.getMessage()), e); + } + } + + @Override + public Class getEncoderClass() { + return LocalDateTime.class; + } +} diff --git a/bson/src/main/org/bson/codecs/jsr310/LocalTimeCodec.java b/bson/src/main/org/bson/codecs/jsr310/LocalTimeCodec.java new file mode 100644 index 00000000000..710e6ef6fcf --- /dev/null +++ b/bson/src/main/org/bson/codecs/jsr310/LocalTimeCodec.java @@ -0,0 +1,60 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * Copyright 2018 Cezary Bartosiak + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.jsr310; + +import org.bson.BsonReader; +import org.bson.BsonWriter; +import org.bson.codecs.DecoderContext; +import org.bson.codecs.EncoderContext; + +import java.time.Instant; +import java.time.LocalDate; +import java.time.LocalTime; +import java.time.ZoneOffset; + +/** + * LocalTime Codec. + * + *

Encodes and decodes {@code LocalTime} objects to and from {@code DateTime}. Data is stored to millisecond accuracy.

+ *

Converts the {@code LocalTime} values to and from EpochDay at {@link ZoneOffset#UTC}.

+ * + * @mongodb.driver.manual reference/bson-types + * @since 3.7 + */ +public class LocalTimeCodec extends DateTimeBasedCodec { + + @Override + public LocalTime decode(final BsonReader reader, final DecoderContext decoderContext) { + return Instant.ofEpochMilli(validateAndReadDateTime(reader)).atOffset(ZoneOffset.UTC).toLocalTime(); + } + + /** + * {@inheritDoc} + *

Converts the {@code LocalTime} to {@link ZoneOffset#UTC} at EpochDay via {@link LocalTime#atDate(LocalDate)} and + * {@link java.time.LocalDateTime#toInstant(ZoneOffset)}.

+ */ + @Override + public void encode(final BsonWriter writer, final LocalTime value, final EncoderContext encoderContext) { + writer.writeDateTime(value.atDate(LocalDate.ofEpochDay(0L)).toInstant(ZoneOffset.UTC).toEpochMilli()); + } + + @Override + public Class getEncoderClass() { + return LocalTime.class; + } +} diff --git a/bson/src/main/org/bson/codecs/jsr310/package-info.java b/bson/src/main/org/bson/codecs/jsr310/package-info.java new file mode 100644 index 00000000000..6541a66af34 --- /dev/null +++ b/bson/src/main/org/bson/codecs/jsr310/package-info.java @@ -0,0 +1,20 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * This package contains classes specific to the JSR-310 Date and Time API + */ +package org.bson.codecs.jsr310; diff --git a/bson/src/main/org/bson/codecs/package-info.java b/bson/src/main/org/bson/codecs/package-info.java new file mode 100644 index 00000000000..74000679ccd --- /dev/null +++ b/bson/src/main/org/bson/codecs/package-info.java @@ -0,0 +1,20 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * This package contains all the default BSON codecs. + */ +package org.bson.codecs; diff --git a/bson/src/main/org/bson/codecs/pojo/AutomaticPojoCodec.java b/bson/src/main/org/bson/codecs/pojo/AutomaticPojoCodec.java new file mode 100644 index 00000000000..921976a78e3 --- /dev/null +++ b/bson/src/main/org/bson/codecs/pojo/AutomaticPojoCodec.java @@ -0,0 +1,74 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo; + +import org.bson.BsonReader; +import org.bson.BsonWriter; +import org.bson.codecs.DecoderContext; +import org.bson.codecs.EncoderContext; +import org.bson.codecs.configuration.CodecConfigurationException; + +import static java.lang.String.format; + +final class AutomaticPojoCodec extends PojoCodec { + private final PojoCodec pojoCodec; + + AutomaticPojoCodec(final PojoCodec pojoCodec) { + this.pojoCodec = pojoCodec; + } + + @Override + public T decode(final BsonReader reader, final DecoderContext decoderContext) { + try { + return pojoCodec.decode(reader, decoderContext); + } catch (CodecConfigurationException e) { + throw new CodecConfigurationException( + format("An exception occurred when decoding using the AutomaticPojoCodec.%n" + + "Decoding into a '%s' failed with the following exception:%n%n%s%n%n" + + "A custom Codec or PojoCodec may need to be explicitly configured and registered to handle this type.", + pojoCodec.getEncoderClass().getSimpleName(), e.getMessage()), e); + } + } + + @Override + public void encode(final BsonWriter writer, final T value, final EncoderContext encoderContext) { + try { + pojoCodec.encode(writer, value, encoderContext); + } catch (CodecConfigurationException e) { + throw new CodecConfigurationException( + format("An exception occurred when encoding using the AutomaticPojoCodec.%n" + + "Encoding a %s: '%s' failed with the following exception:%n%n%s%n%n" + + "A custom Codec or PojoCodec may need to be explicitly configured and registered to handle this type.", + getEncoderClass().getSimpleName(), value, e.getMessage()), e); + } + } + + @Override + public Class getEncoderClass() { + return pojoCodec.getEncoderClass(); + } + + @Override + ClassModel getClassModel() { + return pojoCodec.getClassModel(); + } + + @Override + DiscriminatorLookup getDiscriminatorLookup() { + return pojoCodec.getDiscriminatorLookup(); + } +} diff --git a/bson/src/main/org/bson/codecs/pojo/ClassModel.java b/bson/src/main/org/bson/codecs/pojo/ClassModel.java new file mode 100644 index 00000000000..d47452ab578 --- /dev/null +++ b/bson/src/main/org/bson/codecs/pojo/ClassModel.java @@ -0,0 +1,232 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Objects; + +/** + * This model represents the metadata for a class and all its properties. + * + * @param The type of the class the ClassModel represents + * @since 3.5 + */ +public final class ClassModel { + private final String name; + private final Class type; + private final boolean hasTypeParameters; + private final InstanceCreatorFactory instanceCreatorFactory; + private final boolean discriminatorEnabled; + private final String discriminatorKey; + private final String discriminator; + private final IdPropertyModelHolder idPropertyModelHolder; + private final List> propertyModels; + private final Map propertyNameToTypeParameterMap; + + ClassModel(final Class clazz, final Map propertyNameToTypeParameterMap, + final InstanceCreatorFactory instanceCreatorFactory, final Boolean discriminatorEnabled, final String discriminatorKey, + final String discriminator, final IdPropertyModelHolder idPropertyModelHolder, + final List> propertyModels) { + this.name = clazz.getSimpleName(); + this.type = clazz; + this.hasTypeParameters = clazz.getTypeParameters().length > 0; + this.propertyNameToTypeParameterMap = Collections.unmodifiableMap( + new HashMap<>(propertyNameToTypeParameterMap)); + this.instanceCreatorFactory = instanceCreatorFactory; + this.discriminatorEnabled = discriminatorEnabled; + this.discriminatorKey = discriminatorKey; + this.discriminator = discriminator; + this.idPropertyModelHolder = idPropertyModelHolder; + this.propertyModels = Collections.unmodifiableList(new ArrayList<>(propertyModels)); + } + + /** + * Creates a new Class Model builder instance using reflection. + * + * @param type the POJO class to reflect and configure the builder with. + * @param the type of the class + * @return a new Class Model builder instance using reflection on the {@code clazz}. + */ + public static ClassModelBuilder builder(final Class type) { + return new ClassModelBuilder<>(type); + } + + /** + * @return a new InstanceCreator instance for the ClassModel + */ + InstanceCreator getInstanceCreator() { + return instanceCreatorFactory.create(); + } + + /** + * @return the backing class for the ClassModel + */ + public Class getType() { + return type; + } + + /** + * @return true if the underlying type has type parameters. + */ + public boolean hasTypeParameters() { + return hasTypeParameters; + } + + /** + * @return true if a discriminator should be used when storing the data. + */ + public boolean useDiscriminator() { + return discriminatorEnabled; + } + + /** + * Gets the value for the discriminator. + * + * @return the discriminator value or null if not set + */ + public String getDiscriminatorKey() { + return discriminatorKey; + } + + /** + * Returns the discriminator key. + * + * @return the discriminator key or null if not set + */ + public String getDiscriminator() { + return discriminator; + } + + /** + * Gets a {@link PropertyModel} by the property name. + * + * @param propertyName the PropertyModel's property name + * @return the PropertyModel or null if the property is not found + */ + public PropertyModel getPropertyModel(final String propertyName) { + for (PropertyModel propertyModel : propertyModels) { + if (propertyModel.getName().equals(propertyName)) { + return propertyModel; + } + } + return null; + } + + /** + * Returns all the properties on this model + * + * @return the list of properties + */ + public List> getPropertyModels() { + return propertyModels; + } + + /** + * Returns the {@link PropertyModel} mapped as the id property for this ClassModel + * + * @return the PropertyModel for the id + */ + public PropertyModel getIdPropertyModel() { + return idPropertyModelHolder != null ? idPropertyModelHolder.getPropertyModel() : null; + } + + IdPropertyModelHolder getIdPropertyModelHolder() { + return idPropertyModelHolder; + } + + /** + * Returns the name of the class represented by this ClassModel + * + * @return the name + */ + public String getName() { + return name; + } + + @Override + public String toString() { + return "ClassModel{" + + "type=" + type + + "}"; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + ClassModel that = (ClassModel) o; + + if (discriminatorEnabled != that.discriminatorEnabled) { + return false; + } + if (!getType().equals(that.getType())) { + return false; + } + if (!getInstanceCreatorFactory().equals(that.getInstanceCreatorFactory())) { + return false; + } + if (getDiscriminatorKey() != null ? !getDiscriminatorKey().equals(that.getDiscriminatorKey()) + : that.getDiscriminatorKey() != null) { + return false; + } + if (getDiscriminator() != null ? !getDiscriminator().equals(that.getDiscriminator()) : that.getDiscriminator() != null) { + return false; + } + if (!Objects.equals(idPropertyModelHolder, that.idPropertyModelHolder)) { + return false; + } + if (!getPropertyModels().equals(that.getPropertyModels())) { + return false; + } + if (!getPropertyNameToTypeParameterMap().equals(that.getPropertyNameToTypeParameterMap())) { + return false; + } + + return true; + } + + @Override + public int hashCode() { + int result = getType().hashCode(); + result = 31 * result + getInstanceCreatorFactory().hashCode(); + result = 31 * result + (discriminatorEnabled ? 1 : 0); + result = 31 * result + (getDiscriminatorKey() != null ? getDiscriminatorKey().hashCode() : 0); + result = 31 * result + (getDiscriminator() != null ? getDiscriminator().hashCode() : 0); + result = 31 * result + (getIdPropertyModelHolder() != null ? getIdPropertyModelHolder().hashCode() : 0); + result = 31 * result + getPropertyModels().hashCode(); + result = 31 * result + getPropertyNameToTypeParameterMap().hashCode(); + return result; + } + + InstanceCreatorFactory getInstanceCreatorFactory() { + return instanceCreatorFactory; + } + + Map getPropertyNameToTypeParameterMap() { + return propertyNameToTypeParameterMap; + } + +} diff --git a/bson/src/main/org/bson/codecs/pojo/ClassModelBuilder.java b/bson/src/main/org/bson/codecs/pojo/ClassModelBuilder.java new file mode 100644 index 00000000000..98e7c25c6c0 --- /dev/null +++ b/bson/src/main/org/bson/codecs/pojo/ClassModelBuilder.java @@ -0,0 +1,349 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo; + +import org.bson.codecs.configuration.CodecConfigurationException; + +import java.lang.annotation.Annotation; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import static java.lang.String.format; +import static java.util.Collections.emptyList; +import static java.util.Collections.emptyMap; +import static java.util.Collections.unmodifiableList; +import static java.util.Collections.unmodifiableMap; +import static org.bson.assertions.Assertions.notNull; +import static org.bson.codecs.pojo.Conventions.DEFAULT_CONVENTIONS; +import static org.bson.codecs.pojo.PojoBuilderHelper.configureClassModelBuilder; +import static org.bson.codecs.pojo.PojoBuilderHelper.stateNotNull; + +/** + * A builder for programmatically creating {@code ClassModels}. + * + * @param The type of the class the ClassModel represents + * @since 3.5 + * @see ClassModel + */ +public class ClassModelBuilder { + static final String ID_PROPERTY_NAME = "_id"; + private final List> propertyModelBuilders = new ArrayList<>(); + private IdGenerator idGenerator; + private InstanceCreatorFactory instanceCreatorFactory; + private Class type; + private Map propertyNameToTypeParameterMap = emptyMap(); + private List conventions = DEFAULT_CONVENTIONS; + private List annotations = emptyList(); + private boolean discriminatorEnabled; + private String discriminator; + private String discriminatorKey; + private String idPropertyName; + + ClassModelBuilder(final Class type) { + configureClassModelBuilder(this, notNull("type", type)); + } + + /** + * Sets the IdGenerator for the ClassModel + * + * @param idGenerator the IdGenerator + * @return this + * @since 3.10 + */ + public ClassModelBuilder idGenerator(final IdGenerator idGenerator) { + this.idGenerator = idGenerator; + return this; + } + + /** + * @return the IdGenerator for the ClassModel, or null if not set + * @since 3.10 + */ + public IdGenerator getIdGenerator() { + return idGenerator; + } + + /** + * Sets the InstanceCreatorFactory for the ClassModel + * + * @param instanceCreatorFactory the InstanceCreatorFactory + * @return this + */ + public ClassModelBuilder instanceCreatorFactory(final InstanceCreatorFactory instanceCreatorFactory) { + this.instanceCreatorFactory = notNull("instanceCreatorFactory", instanceCreatorFactory); + return this; + } + + /** + * @return the InstanceCreatorFactory for the ClassModel + */ + public InstanceCreatorFactory getInstanceCreatorFactory() { + return instanceCreatorFactory; + } + + /** + * Sets the type of the model + * + * @param type the type of the class + * @return the builder to configure the class being modeled + */ + public ClassModelBuilder type(final Class type) { + this.type = notNull("type", type); + return this; + } + + /** + * @return the type if set or null + */ + public Class getType() { + return type; + } + + /** + * Sets the conventions to apply to the model + * + * @param conventions a list of conventions + * @return this + */ + public ClassModelBuilder conventions(final List conventions) { + this.conventions = notNull("conventions", conventions); + return this; + } + + /** + * @return the conventions o apply to the model + */ + public List getConventions() { + return conventions; + } + + /** + * Sets the annotations for the model + * + * @param annotations a list of annotations + * @return this + */ + public ClassModelBuilder annotations(final List annotations) { + this.annotations = notNull("annotations", annotations); + return this; + } + + /** + * @return the annotations on the modeled type if set or null + */ + public List getAnnotations() { + return annotations; + } + + /** + * Sets the discriminator to be used when storing instances of the modeled type + * + * @param discriminator the discriminator value + * @return this + */ + public ClassModelBuilder discriminator(final String discriminator) { + this.discriminator = discriminator; + return this; + } + + /** + * @return the discriminator to be used when storing instances of the modeled type or null if not set + */ + public String getDiscriminator() { + return discriminator; + } + + /** + * Sets the discriminator key to be used when storing instances of the modeled type + * + * @param discriminatorKey the discriminator key value + * @return this + */ + public ClassModelBuilder discriminatorKey(final String discriminatorKey) { + this.discriminatorKey = discriminatorKey; + return this; + } + + /** + * @return the discriminator key to be used when storing instances of the modeled type or null if not set + */ + public String getDiscriminatorKey() { + return discriminatorKey; + } + + /** + * Enables or disables the use of a discriminator when serializing + * + * @param discriminatorEnabled true to enable the use of a discriminator + * @return this + */ + public ClassModelBuilder enableDiscriminator(final boolean discriminatorEnabled) { + this.discriminatorEnabled = discriminatorEnabled; + return this; + } + + /** + * @return true if a discriminator should be used when serializing, otherwise false + */ + public Boolean useDiscriminator() { + return discriminatorEnabled; + } + + /** + * Designates a property as the {@code _id} property for this type. If another property is currently marked as the {@code _id} + * property, that setting is cleared in favor of the named property. + * + * @param idPropertyName the property name to use for the {@code _id} property, a null value removes the set idPropertyName. + * + * @return this + */ + public ClassModelBuilder idPropertyName(final String idPropertyName) { + this.idPropertyName = idPropertyName; + return this; + } + + /** + * @return the designated {@code _id} property name for this type or null if not set + */ + public String getIdPropertyName() { + return idPropertyName; + } + + /** + * Remove a property from the builder + * + * @param propertyName the actual property name in the POJO and not the {@code documentPropertyName}. + * @return returns true if the property matched and was removed + */ + public boolean removeProperty(final String propertyName) { + return propertyModelBuilders.remove(getProperty(notNull("propertyName", propertyName))); + } + + /** + * Gets a property by the property name. + * + * @param propertyName the name of the property to find. + * @return the property or null if the property is not found + */ + public PropertyModelBuilder getProperty(final String propertyName) { + notNull("propertyName", propertyName); + for (PropertyModelBuilder propertyModelBuilder : propertyModelBuilders) { + if (propertyModelBuilder.getName().equals(propertyName)) { + return propertyModelBuilder; + } + } + return null; + } + + /** + * @return the properties on the modeled type + */ + public List> getPropertyModelBuilders() { + return unmodifiableList(propertyModelBuilders); + } + + /** + * Creates a new ClassModel instance based on the mapping data provided. + * + * @return the new instance + */ + public ClassModel build() { + List> propertyModels = new ArrayList<>(); + PropertyModel idPropertyModel = null; + + stateNotNull("type", type); + for (Convention convention : conventions) { + convention.apply(this); + } + + stateNotNull("instanceCreatorFactory", instanceCreatorFactory); + if (discriminatorEnabled) { + stateNotNull("discriminatorKey", discriminatorKey); + stateNotNull("discriminator", discriminator); + } + + for (PropertyModelBuilder propertyModelBuilder : propertyModelBuilders) { + boolean isIdProperty = propertyModelBuilder.getName().equals(idPropertyName); + if (isIdProperty) { + propertyModelBuilder.readName(ID_PROPERTY_NAME).writeName(ID_PROPERTY_NAME); + } + + PropertyModel model = propertyModelBuilder.build(); + propertyModels.add(model); + if (isIdProperty) { + idPropertyModel = model; + } + } + validatePropertyModels(type.getSimpleName(), propertyModels); + return new ClassModel<>(type, propertyNameToTypeParameterMap, instanceCreatorFactory, discriminatorEnabled, discriminatorKey, + discriminator, IdPropertyModelHolder.create(type, idPropertyModel, idGenerator), unmodifiableList(propertyModels)); + } + + @Override + public String toString() { + return format("ClassModelBuilder{type=%s}", type); + } + + Map getPropertyNameToTypeParameterMap() { + return propertyNameToTypeParameterMap; + } + + ClassModelBuilder propertyNameToTypeParameterMap(final Map propertyNameToTypeParameterMap) { + this.propertyNameToTypeParameterMap = unmodifiableMap(new HashMap<>(propertyNameToTypeParameterMap)); + return this; + } + + ClassModelBuilder addProperty(final PropertyModelBuilder propertyModelBuilder) { + propertyModelBuilders.add(notNull("propertyModelBuilder", propertyModelBuilder)); + return this; + } + + private void validatePropertyModels(final String declaringClass, final List> propertyModels) { + Map propertyNameMap = new HashMap<>(); + Map propertyReadNameMap = new HashMap<>(); + Map propertyWriteNameMap = new HashMap<>(); + + for (PropertyModel propertyModel : propertyModels) { + if (propertyModel.hasError()) { + throw new CodecConfigurationException(propertyModel.getError()); + } + checkForDuplicates("property", propertyModel.getName(), propertyNameMap, declaringClass); + if (propertyModel.isReadable()) { + checkForDuplicates("read property", propertyModel.getReadName(), propertyReadNameMap, declaringClass); + } + if (propertyModel.isWritable()) { + checkForDuplicates("write property", propertyModel.getWriteName(), propertyWriteNameMap, declaringClass); + } + } + + if (idPropertyName != null && !propertyNameMap.containsKey(idPropertyName)) { + throw new CodecConfigurationException(format("Invalid id property, property named '%s' can not be found.", idPropertyName)); + } + } + + private void checkForDuplicates(final String propertyType, final String propertyName, final Map propertyNameMap, + final String declaringClass) { + if (propertyNameMap.containsKey(propertyName)) { + throw new CodecConfigurationException(format("Duplicate %s named '%s' found in %s.", propertyType, propertyName, + declaringClass)); + } + propertyNameMap.put(propertyName, 1); + } + +} diff --git a/bson/src/main/org/bson/codecs/pojo/CollectionPropertyCodecProvider.java b/bson/src/main/org/bson/codecs/pojo/CollectionPropertyCodecProvider.java new file mode 100644 index 00000000000..abf5add374c --- /dev/null +++ b/bson/src/main/org/bson/codecs/pojo/CollectionPropertyCodecProvider.java @@ -0,0 +1,108 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.bson.codecs.pojo; + +import java.util.TreeSet; + +import org.bson.BsonReader; +import org.bson.BsonType; +import org.bson.BsonWriter; +import org.bson.codecs.Codec; +import org.bson.codecs.DecoderContext; +import org.bson.codecs.EncoderContext; +import org.bson.codecs.configuration.CodecConfigurationException; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.HashSet; + +import static java.lang.String.format; + +final class CollectionPropertyCodecProvider implements PropertyCodecProvider { + @SuppressWarnings({"rawtypes", "unchecked"}) + @Override + public Codec get(final TypeWithTypeParameters type, final PropertyCodecRegistry registry) { + if (Collection.class.isAssignableFrom(type.getType()) && type.getTypeParameters().size() == 1) { + return new CollectionCodec(type.getType(), registry.get(type.getTypeParameters().get(0))); + } else { + return null; + } + } + + private static class CollectionCodec implements Codec> { + private final Class> encoderClass; + private final Codec codec; + + CollectionCodec(final Class> encoderClass, final Codec codec) { + this.encoderClass = encoderClass; + this.codec = codec; + } + + @Override + public void encode(final BsonWriter writer, final Collection collection, final EncoderContext encoderContext) { + writer.writeStartArray(); + for (final T value : collection) { + if (value == null) { + writer.writeNull(); + } else { + codec.encode(writer, value, encoderContext); + } + } + writer.writeEndArray(); + } + + @Override + public Collection decode(final BsonReader reader, final DecoderContext context) { + Collection collection = getInstance(); + reader.readStartArray(); + while (reader.readBsonType() != BsonType.END_OF_DOCUMENT) { + if (reader.getCurrentBsonType() == BsonType.NULL) { + collection.add(null); + reader.readNull(); + } else { + collection.add(codec.decode(reader, context)); + } + } + reader.readEndArray(); + return collection; + } + + @Override + public Class> getEncoderClass() { + return encoderClass; + } + + private Collection getInstance() { + if (encoderClass.isInterface()) { + if (encoderClass.isAssignableFrom(ArrayList.class)) { + return new ArrayList<>(); + } else if (encoderClass.isAssignableFrom(HashSet.class)) { + return new HashSet<>(); + } else if (encoderClass.isAssignableFrom(TreeSet.class)) { + return new TreeSet<>(); + } else { + throw new CodecConfigurationException(format("Unsupported Collection interface of %s!", encoderClass.getName())); + } + } + + try { + return encoderClass.getDeclaredConstructor().newInstance(); + } catch (Exception e) { + throw new CodecConfigurationException(e.getMessage(), e); + } + } + } +} diff --git a/bson/src/main/org/bson/codecs/pojo/Convention.java b/bson/src/main/org/bson/codecs/pojo/Convention.java new file mode 100644 index 00000000000..f2101822200 --- /dev/null +++ b/bson/src/main/org/bson/codecs/pojo/Convention.java @@ -0,0 +1,33 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo; + +/** + * Defines a convention to be applied when mapping a class. + * + * @since 3.5 + */ +public interface Convention { + + /** + * This method applies this Convention to the given ClassModelBuilder + * + * @param classModelBuilder the ClassModelBuilder to apply the convention to + */ + void apply(ClassModelBuilder classModelBuilder); + +} diff --git a/bson/src/main/org/bson/codecs/pojo/ConventionAnnotationImpl.java b/bson/src/main/org/bson/codecs/pojo/ConventionAnnotationImpl.java new file mode 100644 index 00000000000..e9adcaa9024 --- /dev/null +++ b/bson/src/main/org/bson/codecs/pojo/ConventionAnnotationImpl.java @@ -0,0 +1,280 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo; + +import org.bson.BsonType; +import org.bson.codecs.configuration.CodecConfigurationException; +import org.bson.codecs.pojo.annotations.BsonCreator; +import org.bson.codecs.pojo.annotations.BsonDiscriminator; +import org.bson.codecs.pojo.annotations.BsonExtraElements; +import org.bson.codecs.pojo.annotations.BsonId; +import org.bson.codecs.pojo.annotations.BsonIgnore; +import org.bson.codecs.pojo.annotations.BsonProperty; +import org.bson.codecs.pojo.annotations.BsonRepresentation; +import org.bson.diagnostics.Logger; +import org.bson.diagnostics.Loggers; + +import java.lang.annotation.Annotation; +import java.lang.reflect.Constructor; +import java.lang.reflect.Method; +import java.lang.reflect.Type; +import java.util.ArrayList; +import java.util.List; +import java.util.Objects; +import java.util.Map; + +import static java.lang.String.format; +import static java.lang.reflect.Modifier.isPublic; +import static java.lang.reflect.Modifier.isStatic; +import static org.bson.codecs.pojo.PojoBuilderHelper.createPropertyModelBuilder; + +final class ConventionAnnotationImpl implements Convention { + + private static final Logger LOGGER = Loggers.getLogger("ConventionAnnotation"); + + @Override + public void apply(final ClassModelBuilder classModelBuilder) { + for (final Annotation annotation : classModelBuilder.getAnnotations()) { + processClassAnnotation(classModelBuilder, annotation); + } + + for (PropertyModelBuilder propertyModelBuilder : classModelBuilder.getPropertyModelBuilders()) { + processPropertyAnnotations(classModelBuilder, propertyModelBuilder); + } + + processCreatorAnnotation(classModelBuilder); + + cleanPropertyBuilders(classModelBuilder); + } + + private void processClassAnnotation(final ClassModelBuilder classModelBuilder, final Annotation annotation) { + if (annotation instanceof BsonDiscriminator) { + BsonDiscriminator discriminator = (BsonDiscriminator) annotation; + String key = discriminator.key(); + if (!key.equals("")) { + classModelBuilder.discriminatorKey(key); + } + + String name = discriminator.value(); + if (!name.equals("")) { + classModelBuilder.discriminator(name); + } + classModelBuilder.enableDiscriminator(true); + } + } + + private void processPropertyAnnotations(final ClassModelBuilder classModelBuilder, + final PropertyModelBuilder propertyModelBuilder) { + for (Annotation annotation : propertyModelBuilder.getReadAnnotations()) { + if (annotation instanceof BsonProperty) { + BsonProperty bsonProperty = (BsonProperty) annotation; + if (!"".equals(bsonProperty.value())) { + propertyModelBuilder.readName(bsonProperty.value()); + } + propertyModelBuilder.discriminatorEnabled(bsonProperty.useDiscriminator()); + if (propertyModelBuilder.getName().equals(classModelBuilder.getIdPropertyName())) { + classModelBuilder.idPropertyName(null); + } + } else if (annotation instanceof BsonId) { + classModelBuilder.idPropertyName(propertyModelBuilder.getName()); + } else if (annotation instanceof BsonIgnore) { + propertyModelBuilder.readName(null); + } else if (annotation instanceof BsonRepresentation) { + BsonRepresentation bsonRepresentation = (BsonRepresentation) annotation; + BsonType bsonRep = bsonRepresentation.value(); + propertyModelBuilder.bsonRepresentation(bsonRep); + } else if (annotation instanceof BsonExtraElements) { + processBsonExtraElementsAnnotation(propertyModelBuilder); + } + } + + for (Annotation annotation : propertyModelBuilder.getWriteAnnotations()) { + if (annotation instanceof BsonProperty) { + BsonProperty bsonProperty = (BsonProperty) annotation; + if (!"".equals(bsonProperty.value())) { + propertyModelBuilder.writeName(bsonProperty.value()); + } + } else if (annotation instanceof BsonIgnore) { + propertyModelBuilder.writeName(null); + } + } + } + + @SuppressWarnings("unchecked") + private void processCreatorAnnotation(final ClassModelBuilder classModelBuilder) { + Class clazz = classModelBuilder.getType(); + CreatorExecutable creatorExecutable = null; + for (Constructor constructor : clazz.getDeclaredConstructors()) { + if (isPublic(constructor.getModifiers()) && !constructor.isSynthetic()) { + for (Annotation annotation : constructor.getDeclaredAnnotations()) { + if (annotation.annotationType().equals(BsonCreator.class)) { + if (creatorExecutable != null) { + throw new CodecConfigurationException("Found multiple constructors annotated with @BsonCreator"); + } + creatorExecutable = new CreatorExecutable<>(clazz, (Constructor) constructor); + } + } + } + } + + Class bsonCreatorClass = clazz; + boolean foundStaticBsonCreatorMethod = false; + while (bsonCreatorClass != null && !foundStaticBsonCreatorMethod) { + for (Method method : bsonCreatorClass.getDeclaredMethods()) { + if (isStatic(method.getModifiers()) && !method.isSynthetic() && !method.isBridge()) { + for (Annotation annotation : method.getDeclaredAnnotations()) { + if (annotation.annotationType().equals(BsonCreator.class)) { + if (creatorExecutable != null) { + throw new CodecConfigurationException("Found multiple constructors / methods annotated with @BsonCreator"); + } else if (!bsonCreatorClass.isAssignableFrom(method.getReturnType())) { + throw new CodecConfigurationException( + format("Invalid method annotated with @BsonCreator. Returns '%s', expected %s", + method.getReturnType(), bsonCreatorClass)); + } + creatorExecutable = new CreatorExecutable<>(clazz, method); + foundStaticBsonCreatorMethod = true; + } + } + } + } + + bsonCreatorClass = bsonCreatorClass.getSuperclass(); + } + + if (creatorExecutable != null) { + List properties = creatorExecutable.getProperties(); + List> parameterTypes = creatorExecutable.getParameterTypes(); + List parameterGenericTypes = creatorExecutable.getParameterGenericTypes(); + + if (properties.size() != parameterTypes.size()) { + throw creatorExecutable.getError(clazz, "All parameters in the @BsonCreator method / constructor must be annotated " + + "with a @BsonProperty."); + } + for (int i = 0; i < properties.size(); i++) { + boolean isIdProperty = creatorExecutable.getIdPropertyIndex() != null && creatorExecutable.getIdPropertyIndex().equals(i); + Class parameterType = parameterTypes.get(i); + Type genericType = parameterGenericTypes.get(i); + PropertyModelBuilder propertyModelBuilder = null; + + if (isIdProperty) { + if (classModelBuilder.getIdPropertyName() == null) { + throw new CodecConfigurationException("A @BsonId annotation has been used with @BsonCreator " + + "but there is no known Id property.\n" + + "Please either use the @BsonProperty annotation in the creator or " + + "annotate the corresponding property in the class with the @BsonId."); + } + propertyModelBuilder = classModelBuilder.getProperty(classModelBuilder.getIdPropertyName()); + } else { + BsonProperty bsonProperty = properties.get(i); + + // Find the property using write name and falls back to read name + for (PropertyModelBuilder builder : classModelBuilder.getPropertyModelBuilders()) { + if (bsonProperty.value().equals(builder.getWriteName())) { + propertyModelBuilder = builder; + break; + } else if (bsonProperty.value().equals(builder.getReadName())) { + // When there is a property that matches the read name of the parameter, save it but continue to look + // This is just in case there is another property that matches the write name. + propertyModelBuilder = builder; + } + } + + // Support legacy options, when BsonProperty matches the actual POJO property name (e.g. method name or field name). + if (propertyModelBuilder == null) { + propertyModelBuilder = classModelBuilder.getProperty(bsonProperty.value()); + } + + if (propertyModelBuilder == null) { + propertyModelBuilder = addCreatorPropertyToClassModelBuilder(classModelBuilder, bsonProperty.value(), + parameterType); + } else { + // If not using a legacy BsonProperty reference to the property set the write name to be the annotated name. + if (!bsonProperty.value().equals(propertyModelBuilder.getName())) { + propertyModelBuilder.writeName(bsonProperty.value()); + } + tryToExpandToGenericType(parameterType, propertyModelBuilder, genericType); + } + } + + if (!propertyModelBuilder.getTypeData().isAssignableFrom(parameterType)) { + throw creatorExecutable.getError(clazz, format("Invalid Property type for '%s'. Expected %s, found %s.", + propertyModelBuilder.getWriteName(), propertyModelBuilder.getTypeData().getType(), parameterType)); + } + } + classModelBuilder.instanceCreatorFactory(new InstanceCreatorFactoryImpl<>(creatorExecutable)); + } + } + + @SuppressWarnings("unchecked") + private static void tryToExpandToGenericType(final Class parameterType, final PropertyModelBuilder propertyModelBuilder, + final Type genericType) { + if (parameterType.isAssignableFrom(propertyModelBuilder.getTypeData().getType())) { + // The existing getter for this field returns a more specific type than what the constructor accepts + // This is typical when the getter returns a specific subtype, but the constructor accepts a more + // general one (e.g.: getter returns ImmutableList, while constructor just accepts List) + propertyModelBuilder.typeData(TypeData.newInstance(genericType, (Class) parameterType)); + } + } + + private PropertyModelBuilder addCreatorPropertyToClassModelBuilder(final ClassModelBuilder classModelBuilder, + final String name, + final Class clazz) { + PropertyModelBuilder propertyModelBuilder = createPropertyModelBuilder(new PropertyMetadata<>(name, + classModelBuilder.getType().getSimpleName(), TypeData.builder(clazz).build())).readName(null).writeName(name); + classModelBuilder.addProperty(propertyModelBuilder); + return propertyModelBuilder; + } + + private void cleanPropertyBuilders(final ClassModelBuilder classModelBuilder) { + List propertiesToRemove = new ArrayList<>(); + for (PropertyModelBuilder propertyModelBuilder : classModelBuilder.getPropertyModelBuilders()) { + if (!propertyModelBuilder.isReadable() && !propertyModelBuilder.isWritable()) { + propertiesToRemove.add(propertyModelBuilder.getName()); + } + if (classModelBuilder.useDiscriminator() && Objects.equals(classModelBuilder.getDiscriminatorKey(), propertyModelBuilder.getReadName())) { + propertiesToRemove.add(propertyModelBuilder.getName()); + LOGGER.warn( + format( + "Removed the property '%s' from the model because the discriminator has the same key", + classModelBuilder.getDiscriminatorKey() + ) + ); + } + } + for (String propertyName : propertiesToRemove) { + classModelBuilder.removeProperty(propertyName); + } + } + + private void processBsonExtraElementsAnnotation(final PropertyModelBuilder propertyModelBuilder) { + PropertyAccessor propertyAccessor = propertyModelBuilder.getPropertyAccessor(); + if (!(propertyAccessor instanceof PropertyAccessorImpl)) { + throw new CodecConfigurationException(format("The @BsonExtraElements annotation is not compatible with " + + "propertyModelBuilder instances that have custom implementations of org.bson.codecs.pojo.PropertyAccessor: %s", + propertyModelBuilder.getPropertyAccessor().getClass().getName())); + } + + if (!Map.class.isAssignableFrom(propertyModelBuilder.getTypeData().getType())) { + throw new CodecConfigurationException(format("The @BsonExtraElements annotation is not compatible with " + + "propertyModelBuilder with the following type: %s. " + + "Please use a Document, BsonDocument or Map type.", + propertyModelBuilder.getTypeData())); + } + propertyModelBuilder.propertySerialization(new PropertyModelSerializationInlineImpl<>(propertyModelBuilder.getPropertySerialization())); + propertyModelBuilder.propertyAccessor(new FieldPropertyAccessor<>((PropertyAccessorImpl) propertyAccessor)); + } +} diff --git a/bson/src/main/org/bson/codecs/pojo/ConventionDefaultsImpl.java b/bson/src/main/org/bson/codecs/pojo/ConventionDefaultsImpl.java new file mode 100644 index 00000000000..c7628f84248 --- /dev/null +++ b/bson/src/main/org/bson/codecs/pojo/ConventionDefaultsImpl.java @@ -0,0 +1,38 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo; + +final class ConventionDefaultsImpl implements Convention { + @Override + public void apply(final ClassModelBuilder classModelBuilder) { + if (classModelBuilder.getDiscriminatorKey() == null) { + classModelBuilder.discriminatorKey("_t"); + } + if (classModelBuilder.getDiscriminator() == null && classModelBuilder.getType() != null) { + classModelBuilder.discriminator(classModelBuilder.getType().getName()); + } + + for (final PropertyModelBuilder propertyModel : classModelBuilder.getPropertyModelBuilders()) { + if (classModelBuilder.getIdPropertyName() == null) { + String propertyName = propertyModel.getName(); + if (propertyName.equals("_id") || propertyName.equals("id")) { + classModelBuilder.idPropertyName(propertyName); + } + } + } + } +} diff --git a/bson/src/main/org/bson/codecs/pojo/ConventionObjectIdGeneratorsImpl.java b/bson/src/main/org/bson/codecs/pojo/ConventionObjectIdGeneratorsImpl.java new file mode 100644 index 00000000000..fe5be23ab5d --- /dev/null +++ b/bson/src/main/org/bson/codecs/pojo/ConventionObjectIdGeneratorsImpl.java @@ -0,0 +1,41 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo; + +import org.bson.BsonObjectId; +import org.bson.BsonType; +import org.bson.types.ObjectId; + +final class ConventionObjectIdGeneratorsImpl implements Convention { + @Override + public void apply(final ClassModelBuilder classModelBuilder) { + if (classModelBuilder.getIdGenerator() == null && classModelBuilder.getIdPropertyName() != null) { + PropertyModelBuilder idProperty = classModelBuilder.getProperty(classModelBuilder.getIdPropertyName()); + if (idProperty != null) { + Class idType = idProperty.getTypeData().getType(); + if (classModelBuilder.getIdGenerator() == null && idType.equals(ObjectId.class)) { + classModelBuilder.idGenerator(IdGenerators.OBJECT_ID_GENERATOR); + } else if (classModelBuilder.getIdGenerator() == null && idType.equals(BsonObjectId.class)) { + classModelBuilder.idGenerator(IdGenerators.BSON_OBJECT_ID_GENERATOR); + } else if (classModelBuilder.getIdGenerator() == null && idType.equals(String.class) + && idProperty.getBsonRepresentation() == BsonType.OBJECT_ID) { + classModelBuilder.idGenerator(IdGenerators.STRING_ID_GENERATOR); + } + } + } + } +} diff --git a/bson/src/main/org/bson/codecs/pojo/ConventionSetPrivateFieldImpl.java b/bson/src/main/org/bson/codecs/pojo/ConventionSetPrivateFieldImpl.java new file mode 100644 index 00000000000..74a28eb6c06 --- /dev/null +++ b/bson/src/main/org/bson/codecs/pojo/ConventionSetPrivateFieldImpl.java @@ -0,0 +1,47 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo; + +import org.bson.codecs.configuration.CodecConfigurationException; + +import static java.lang.String.format; +import static java.lang.reflect.Modifier.isPrivate; + +final class ConventionSetPrivateFieldImpl implements Convention { + + @Override + public void apply(final ClassModelBuilder classModelBuilder) { + for (PropertyModelBuilder propertyModelBuilder : classModelBuilder.getPropertyModelBuilders()) { + if (!(propertyModelBuilder.getPropertyAccessor() instanceof PropertyAccessorImpl)) { + throw new CodecConfigurationException(format("The SET_PRIVATE_FIELDS_CONVENTION is not compatible with " + + "propertyModelBuilder instance that have custom implementations of org.bson.codecs.pojo.PropertyAccessor: %s", + propertyModelBuilder.getPropertyAccessor().getClass().getName())); + } + PropertyAccessorImpl defaultAccessor = (PropertyAccessorImpl) propertyModelBuilder.getPropertyAccessor(); + PropertyMetadata propertyMetaData = defaultAccessor.getPropertyMetadata(); + if (!propertyMetaData.isDeserializable() && propertyMetaData.getField() != null + && isPrivate(propertyMetaData.getField().getModifiers())) { + setPropertyAccessor(propertyModelBuilder); + } + } + } + + private void setPropertyAccessor(final PropertyModelBuilder propertyModelBuilder) { + propertyModelBuilder.propertyAccessor(new FieldPropertyAccessor<>((PropertyAccessorImpl) propertyModelBuilder.getPropertyAccessor())); + } + +} diff --git a/bson/src/main/org/bson/codecs/pojo/ConventionUseGettersAsSettersImpl.java b/bson/src/main/org/bson/codecs/pojo/ConventionUseGettersAsSettersImpl.java new file mode 100644 index 00000000000..7cc677cf96b --- /dev/null +++ b/bson/src/main/org/bson/codecs/pojo/ConventionUseGettersAsSettersImpl.java @@ -0,0 +1,114 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo; + +import org.bson.codecs.configuration.CodecConfigurationException; + +import java.util.Collection; +import java.util.Map; + +import static java.lang.String.format; + +final class ConventionUseGettersAsSettersImpl implements Convention { + + @Override + public void apply(final ClassModelBuilder classModelBuilder) { + for (PropertyModelBuilder propertyModelBuilder : classModelBuilder.getPropertyModelBuilders()) { + if (!(propertyModelBuilder.getPropertyAccessor() instanceof PropertyAccessorImpl)) { + throw new CodecConfigurationException(format("The USE_GETTER_AS_SETTER_CONVENTION is not compatible with " + + "propertyModelBuilder instance that have custom implementations of org.bson.codecs.pojo.PropertyAccessor: %s", + propertyModelBuilder.getPropertyAccessor().getClass().getName())); + } + PropertyAccessorImpl defaultAccessor = (PropertyAccessorImpl) propertyModelBuilder.getPropertyAccessor(); + PropertyMetadata propertyMetaData = defaultAccessor.getPropertyMetadata(); + if (!propertyMetaData.isDeserializable() && propertyMetaData.isSerializable() + && isMapOrCollection(propertyMetaData.getTypeData().getType())) { + setPropertyAccessor(propertyModelBuilder); + } + } + } + + private boolean isMapOrCollection(final Class clazz) { + return Collection.class.isAssignableFrom(clazz) || Map.class.isAssignableFrom(clazz); + } + + private void setPropertyAccessor(final PropertyModelBuilder propertyModelBuilder) { + propertyModelBuilder.propertyAccessor(new PrivatePropertyAccessor<>( + (PropertyAccessorImpl) propertyModelBuilder.getPropertyAccessor())); + } + + @SuppressWarnings({"rawtypes", "unchecked"}) + private static final class PrivatePropertyAccessor implements PropertyAccessor { + private final PropertyAccessorImpl wrapped; + + private PrivatePropertyAccessor(final PropertyAccessorImpl wrapped) { + this.wrapped = wrapped; + } + + @Override + public T get(final S instance) { + return wrapped.get(instance); + } + + @Override + public void set(final S instance, final T value) { + if (value instanceof Collection) { + mutateCollection(instance, (Collection) value); + } else if (value instanceof Map) { + mutateMap(instance, (Map) value); + } else { + throwCodecConfigurationException(format("Unexpected type: '%s'", value.getClass()), null); + } + } + + private void mutateCollection(final S instance, final Collection value) { + T originalCollection = get(instance); + Collection collection = ((Collection) originalCollection); + if (collection == null) { + throwCodecConfigurationException("The getter returned null.", null); + } else if (!collection.isEmpty()) { + throwCodecConfigurationException("The getter returned a non empty collection.", null); + } else { + try { + collection.addAll(value); + } catch (Exception e) { + throwCodecConfigurationException("collection#addAll failed.", e); + } + } + } + + private void mutateMap(final S instance, final Map value) { + T originalMap = get(instance); + Map map = ((Map) originalMap); + if (map == null) { + throwCodecConfigurationException("The getter returned null.", null); + } else if (!map.isEmpty()) { + throwCodecConfigurationException("The getter returned a non empty map.", null); + } else { + try { + map.putAll(value); + } catch (Exception e) { + throwCodecConfigurationException("map#putAll failed.", e); + } + } + } + private void throwCodecConfigurationException(final String reason, final Exception cause) { + throw new CodecConfigurationException(format("Cannot use getter in '%s' to set '%s'. %s", + wrapped.getPropertyMetadata().getDeclaringClassName(), wrapped.getPropertyMetadata().getName(), reason), cause); + } + } +} diff --git a/bson/src/main/org/bson/codecs/pojo/Conventions.java b/bson/src/main/org/bson/codecs/pojo/Conventions.java new file mode 100644 index 00000000000..0f54c13815d --- /dev/null +++ b/bson/src/main/org/bson/codecs/pojo/Conventions.java @@ -0,0 +1,94 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo; + +import java.util.Collections; +import java.util.List; + +import static java.util.Arrays.asList; +import static java.util.Collections.unmodifiableList; + +/** + * The default Conventions + * + * @since 3.5 + * @see Convention + */ +public final class Conventions { + + /** + * The default class and property conventions + * + *
    + *
  • Sets the discriminator key if not set to {@code _t} and the discriminator value if not set to the + * ClassModels simple type name.
  • + *
  • Configures the PropertyModels. If the {@code idProperty} isn't set and there is a + * property named {@code getId()}, {@code id} or {@code _id} it will be marked as the idProperty.
  • + *
+ */ + public static final Convention CLASS_AND_PROPERTY_CONVENTION = new ConventionDefaultsImpl(); + + /** + * The annotation convention. + * + *

Applies all the conventions related to the default {@link org.bson.codecs.pojo.annotations}.

+ */ + public static final Convention ANNOTATION_CONVENTION = new ConventionAnnotationImpl(); + + /** + * A convention that enables private fields to be set using reflection. + * + *

This convention mimics how some other JSON libraries directly set a private field when there is no setter.

+ *

Note: This convention is not part of the {@code DEFAULT_CONVENTIONS} list and must explicitly be set.

+ * + * @since 3.6 + */ + public static final Convention SET_PRIVATE_FIELDS_CONVENTION = new ConventionSetPrivateFieldImpl(); + + /** + * A convention that uses getter methods as setters for collections and maps if there is no setter. + * + *

This convention mimics how JAXB mutate collections and maps.

+ *

Note: This convention is not part of the {@code DEFAULT_CONVENTIONS} list and must explicitly be set.

+ * + * @since 3.6 + */ + public static final Convention USE_GETTERS_FOR_SETTERS = new ConventionUseGettersAsSettersImpl(); + + + /** + * A convention that sets the IdGenerator if the id property is either a {@link org.bson.types.ObjectId} or + * {@link org.bson.BsonObjectId}. + * + * @since 3.10 + */ + public static final Convention OBJECT_ID_GENERATORS = new ConventionObjectIdGeneratorsImpl(); + + /** + * The default conventions list + */ + public static final List DEFAULT_CONVENTIONS = + unmodifiableList(asList(CLASS_AND_PROPERTY_CONVENTION, ANNOTATION_CONVENTION, OBJECT_ID_GENERATORS)); + + /** + * An empty conventions list + */ + public static final List NO_CONVENTIONS = Collections.emptyList(); + + private Conventions() { + } +} diff --git a/bson/src/main/org/bson/codecs/pojo/CreatorExecutable.java b/bson/src/main/org/bson/codecs/pojo/CreatorExecutable.java new file mode 100644 index 00000000000..d8c13f125a9 --- /dev/null +++ b/bson/src/main/org/bson/codecs/pojo/CreatorExecutable.java @@ -0,0 +1,151 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo; + +import org.bson.codecs.configuration.CodecConfigurationException; +import org.bson.codecs.pojo.annotations.BsonId; +import org.bson.codecs.pojo.annotations.BsonProperty; + +import java.lang.annotation.Annotation; +import java.lang.reflect.Constructor; +import java.lang.reflect.Method; +import java.lang.reflect.Type; +import java.util.ArrayList; +import java.util.List; + +import static java.lang.String.format; +import static java.util.Arrays.asList; + +final class CreatorExecutable { + private final Class clazz; + private final Constructor constructor; + private final Method method; + private final List properties = new ArrayList<>(); + private final Integer idPropertyIndex; + private final List> parameterTypes = new ArrayList<>(); + private final List parameterGenericTypes = new ArrayList<>(); + + CreatorExecutable(final Class clazz, final Constructor constructor) { + this(clazz, constructor, null); + } + + CreatorExecutable(final Class clazz, final Method method) { + this(clazz, null, method); + } + + private CreatorExecutable(final Class clazz, final Constructor constructor, final Method method) { + this.clazz = clazz; + this.constructor = constructor; + this.method = method; + Integer idPropertyIndex = null; + + if (constructor != null || method != null) { + Class[] paramTypes = constructor != null ? constructor.getParameterTypes() : method.getParameterTypes(); + Type[] genericParamTypes = constructor != null ? constructor.getGenericParameterTypes() : method.getGenericParameterTypes(); + parameterTypes.addAll(asList(paramTypes)); + parameterGenericTypes.addAll(asList(genericParamTypes)); + Annotation[][] parameterAnnotations = constructor != null ? constructor.getParameterAnnotations() + : method.getParameterAnnotations(); + + for (int i = 0; i < parameterAnnotations.length; ++i) { + Annotation[] parameterAnnotation = parameterAnnotations[i]; + + for (Annotation annotation : parameterAnnotation) { + if (annotation.annotationType().equals(BsonProperty.class)) { + properties.add((BsonProperty) annotation); + break; + } + + if (annotation.annotationType().equals(BsonId.class)) { + properties.add(null); + idPropertyIndex = i; + break; + } + } + } + } + + this.idPropertyIndex = idPropertyIndex; + } + + Class getType() { + return clazz; + } + + List getProperties() { + return properties; + } + + Integer getIdPropertyIndex() { + return idPropertyIndex; + } + + List> getParameterTypes() { + return parameterTypes; + } + + List getParameterGenericTypes() { + return parameterGenericTypes; + } + + @SuppressWarnings("unchecked") + T getInstance() { + checkHasAnExecutable(); + try { + if (constructor != null) { + return constructor.newInstance(); + } else { + return (T) method.invoke(clazz); + } + } catch (Exception e) { + throw new CodecConfigurationException(e.getMessage(), e); + } + } + + @SuppressWarnings("unchecked") + T getInstance(final Object[] params) { + checkHasAnExecutable(); + try { + if (constructor != null) { + return constructor.newInstance(params); + } else { + return (T) method.invoke(clazz, params); + } + } catch (Exception e) { + throw new CodecConfigurationException(e.getMessage(), e); + } + } + + + CodecConfigurationException getError(final Class clazz, final String msg) { + return getError(clazz, constructor != null, msg); + } + + private void checkHasAnExecutable() { + if (constructor == null && method == null) { + throw new CodecConfigurationException(format("Cannot find a public constructor for '%s'. Please ensure " + + "the class has a public, empty constructor with no arguments, or else a constructor with a " + + "BsonCreator annotation", clazz.getSimpleName())); + } + } + + private static CodecConfigurationException getError(final Class clazz, final boolean isConstructor, final String msg) { + return new CodecConfigurationException(format("Invalid @BsonCreator %s in %s. %s", isConstructor ? "constructor" : "method", + clazz.getSimpleName(), msg)); + } + +} diff --git a/bson/src/main/org/bson/codecs/pojo/DiscriminatorLookup.java b/bson/src/main/org/bson/codecs/pojo/DiscriminatorLookup.java new file mode 100644 index 00000000000..084eb75b6bc --- /dev/null +++ b/bson/src/main/org/bson/codecs/pojo/DiscriminatorLookup.java @@ -0,0 +1,84 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo; + +import org.bson.codecs.configuration.CodecConfigurationException; + +import java.util.Map; +import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; + +import static java.lang.String.format; + +final class DiscriminatorLookup { + private final Map> discriminatorClassMap = new ConcurrentHashMap<>(); + private final Set packages; + + DiscriminatorLookup(final Map, ClassModel> classModels, final Set packages) { + for (ClassModel classModel : classModels.values()) { + if (classModel.getDiscriminator() != null) { + discriminatorClassMap.put(classModel.getDiscriminator(), classModel.getType()); + } + } + this.packages = packages; + } + + public Class lookup(final String discriminator) { + if (discriminatorClassMap.containsKey(discriminator)) { + return discriminatorClassMap.get(discriminator); + } + + Class clazz = getClassForName(discriminator); + if (clazz == null) { + clazz = searchPackages(discriminator); + } + + if (clazz == null) { + throw new CodecConfigurationException(format("A class could not be found for the discriminator: '%s'.", discriminator)); + } else { + discriminatorClassMap.put(discriminator, clazz); + } + return clazz; + } + + void addClassModel(final ClassModel classModel) { + if (classModel.getDiscriminator() != null) { + discriminatorClassMap.put(classModel.getDiscriminator(), classModel.getType()); + } + } + + private Class getClassForName(final String discriminator) { + Class clazz = null; + try { + clazz = Class.forName(discriminator); + } catch (ClassNotFoundException e) { + // Ignore + } + return clazz; + } + + private Class searchPackages(final String discriminator) { + Class clazz = null; + for (String packageName : packages) { + clazz = getClassForName(packageName + "." + discriminator); + if (clazz != null) { + return clazz; + } + } + return clazz; + } +} diff --git a/bson/src/main/org/bson/codecs/pojo/Either.java b/bson/src/main/org/bson/codecs/pojo/Either.java new file mode 100644 index 00000000000..1ad37e4ceef --- /dev/null +++ b/bson/src/main/org/bson/codecs/pojo/Either.java @@ -0,0 +1,81 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo; + +import java.util.Objects; +import java.util.function.Consumer; +import java.util.function.Function; + +import static org.bson.assertions.Assertions.notNull; + +final class Either { + + public static Either left(final L value) { + return new Either<>(notNull("value", value), null); + } + + public static Either right(final R value) { + return new Either<>(null, notNull("value", value)); + } + + private final L left; + private final R right; + + private Either(final L l, final R r) { + left = l; + right = r; + } + + public T map(final Function lFunc, final Function rFunc) { + return left != null ? lFunc.apply(left) : rFunc.apply(right); + } + + public void apply(final Consumer lFunc, final Consumer rFunc) { + if (left != null){ + lFunc.accept(left); + } + if (right != null){ + rFunc.accept(right); + } + } + + @Override + public String toString() { + return "Either{" + + "left=" + left + + ", right=" + right + + '}'; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + Either either = (Either) o; + return Objects.equals(left, either.left) && Objects.equals(right, either.right); + } + + @Override + public int hashCode() { + return Objects.hash(left, right); + } +} diff --git a/bson/src/main/org/bson/codecs/pojo/EnumPropertyCodecProvider.java b/bson/src/main/org/bson/codecs/pojo/EnumPropertyCodecProvider.java new file mode 100644 index 00000000000..474efeb9fe6 --- /dev/null +++ b/bson/src/main/org/bson/codecs/pojo/EnumPropertyCodecProvider.java @@ -0,0 +1,45 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo; + +import org.bson.codecs.Codec; +import org.bson.codecs.EnumCodec; +import org.bson.codecs.configuration.CodecConfigurationException; +import org.bson.codecs.configuration.CodecRegistry; + + +final class EnumPropertyCodecProvider implements PropertyCodecProvider { + private final CodecRegistry codecRegistry; + + EnumPropertyCodecProvider(final CodecRegistry codecRegistry) { + this.codecRegistry = codecRegistry; + } + + @SuppressWarnings({"unchecked", "rawtypes"}) + @Override + public Codec get(final TypeWithTypeParameters type, final PropertyCodecRegistry propertyCodecRegistry) { + Class clazz = type.getType(); + if (Enum.class.isAssignableFrom(clazz)) { + try { + return codecRegistry.get(clazz); + } catch (CodecConfigurationException e) { + return (Codec) new EnumCodec(clazz); + } + } + return null; + } +} diff --git a/bson/src/main/org/bson/codecs/pojo/FallbackPropertyCodecProvider.java b/bson/src/main/org/bson/codecs/pojo/FallbackPropertyCodecProvider.java new file mode 100644 index 00000000000..8c5fd4e9cf3 --- /dev/null +++ b/bson/src/main/org/bson/codecs/pojo/FallbackPropertyCodecProvider.java @@ -0,0 +1,40 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo; + +import org.bson.codecs.Codec; +import org.bson.codecs.configuration.CodecRegistry; + +final class FallbackPropertyCodecProvider implements PropertyCodecProvider { + private final CodecRegistry codecRegistry; + private final PojoCodec pojoCodec; + + FallbackPropertyCodecProvider(final PojoCodec pojoCodec, final CodecRegistry codecRegistry) { + this.pojoCodec = pojoCodec; + this.codecRegistry = codecRegistry; + } + + @SuppressWarnings("unchecked") + @Override + public Codec get(final TypeWithTypeParameters type, final PropertyCodecRegistry propertyCodecRegistry) { + Class clazz = type.getType(); + if (clazz == pojoCodec.getEncoderClass()) { + return (Codec) pojoCodec; + } + return codecRegistry.get(type.getType()); + } +} diff --git a/bson/src/main/org/bson/codecs/pojo/FieldPropertyAccessor.java b/bson/src/main/org/bson/codecs/pojo/FieldPropertyAccessor.java new file mode 100644 index 00000000000..61c82fd641c --- /dev/null +++ b/bson/src/main/org/bson/codecs/pojo/FieldPropertyAccessor.java @@ -0,0 +1,49 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.bson.codecs.pojo; + +import org.bson.codecs.configuration.CodecConfigurationException; + +import static java.lang.String.format; + +final class FieldPropertyAccessor implements PropertyAccessor { + private final PropertyAccessorImpl wrapped; + + FieldPropertyAccessor(final PropertyAccessorImpl wrapped) { + this.wrapped = wrapped; + try { + wrapped.getPropertyMetadata().getField().setAccessible(true); + } catch (Exception e) { + throw new CodecConfigurationException(format("Unable to make field accessible '%s' in %s", + wrapped.getPropertyMetadata().getName(), wrapped.getPropertyMetadata().getDeclaringClassName()), e); + } + } + + @Override + public T get(final S instance) { + return wrapped.get(instance); + } + + @Override + public void set(final S instance, final T value) { + try { + wrapped.getPropertyMetadata().getField().set(instance, value); + } catch (Exception e) { + throw new CodecConfigurationException(format("Unable to set value for property '%s' in %s", + wrapped.getPropertyMetadata().getName(), wrapped.getPropertyMetadata().getDeclaringClassName()), e); + } + } +} diff --git a/bson/src/main/org/bson/codecs/pojo/IdGenerator.java b/bson/src/main/org/bson/codecs/pojo/IdGenerator.java new file mode 100644 index 00000000000..e794c8841ef --- /dev/null +++ b/bson/src/main/org/bson/codecs/pojo/IdGenerator.java @@ -0,0 +1,37 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo; + +/** + * Classes that implement this interface define a way to create Ids for Pojo's. + * + * @param the type of the id value. + * @since 3.10 + */ +public interface IdGenerator { + /** + * Generates an id for a Pojo. + * + * @return the generated id value + */ + T generate(); + + /** + * @return the type of the generated id. + */ + Class getType(); +} diff --git a/bson/src/main/org/bson/codecs/pojo/IdGenerators.java b/bson/src/main/org/bson/codecs/pojo/IdGenerators.java new file mode 100644 index 00000000000..fdb023995db --- /dev/null +++ b/bson/src/main/org/bson/codecs/pojo/IdGenerators.java @@ -0,0 +1,79 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo; + +import org.bson.BsonObjectId; +import org.bson.types.ObjectId; + +/** + * The default IdGenerators + * + * @see IdGenerator + * @since 3.10 + */ +public final class IdGenerators { + + /** + * A IdGenerator for {@code ObjectId} + */ + public static final IdGenerator OBJECT_ID_GENERATOR = new IdGenerator() { + + @Override + public ObjectId generate() { + return new ObjectId(); + } + + @Override + public Class getType() { + return ObjectId.class; + } + }; + + /** + * A IdGenerator for {@code BsonObjectId} + */ + public static final IdGenerator BSON_OBJECT_ID_GENERATOR = new IdGenerator() { + + @Override + public BsonObjectId generate() { + return new BsonObjectId(); + } + + @Override + public Class getType() { + return BsonObjectId.class; + } + }; + + /** + * A IdGenerator for {@code String} + */ + public static final IdGenerator STRING_ID_GENERATOR = new IdGenerator() { + @Override + public String generate() { + return OBJECT_ID_GENERATOR.generate().toHexString(); + } + + @Override + public Class getType() { + return String.class; + } + }; + + private IdGenerators(){ + } +} diff --git a/bson/src/main/org/bson/codecs/pojo/IdPropertyModelHolder.java b/bson/src/main/org/bson/codecs/pojo/IdPropertyModelHolder.java new file mode 100644 index 00000000000..54a6a1a3341 --- /dev/null +++ b/bson/src/main/org/bson/codecs/pojo/IdPropertyModelHolder.java @@ -0,0 +1,81 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo; + +import org.bson.codecs.configuration.CodecConfigurationException; + +import java.util.Objects; + +import static java.lang.String.format; + +final class IdPropertyModelHolder { + private final PropertyModel propertyModel; + private final IdGenerator idGenerator; + + static IdPropertyModelHolder create(final ClassModel classModel, final PropertyModel idPropertyModel) { + return create(classModel.getType(), idPropertyModel, classModel.getIdPropertyModelHolder().getIdGenerator()); + } + + @SuppressWarnings("unchecked") + static IdPropertyModelHolder create(final Class type, final PropertyModel idProperty, + final IdGenerator idGenerator) { + if (idProperty == null && idGenerator != null) { + throw new CodecConfigurationException(format("Invalid IdGenerator. There is no IdProperty set for: %s", type)); + } else if (idGenerator != null && !idProperty.getTypeData().getType().isAssignableFrom(idGenerator.getType())) { + throw new CodecConfigurationException(format("Invalid IdGenerator. Mismatching types, the IdProperty type is: %s but" + + " the IdGenerator type is: %s", idProperty.getTypeData().getType(), idGenerator.getType())); + } + return new IdPropertyModelHolder<>(idProperty, (IdGenerator) idGenerator); + } + + private IdPropertyModelHolder(final PropertyModel propertyModel, final IdGenerator idGenerator) { + this.propertyModel = propertyModel; + this.idGenerator = idGenerator; + } + + PropertyModel getPropertyModel() { + return propertyModel; + } + + IdGenerator getIdGenerator() { + return idGenerator; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + IdPropertyModelHolder that = (IdPropertyModelHolder) o; + + if (!Objects.equals(propertyModel, that.propertyModel)) { + return false; + } + return Objects.equals(idGenerator, that.idGenerator); + } + + @Override + public int hashCode() { + int result = propertyModel != null ? propertyModel.hashCode() : 0; + result = 31 * result + (idGenerator != null ? idGenerator.hashCode() : 0); + return result; + } +} diff --git a/bson/src/main/org/bson/codecs/pojo/InstanceCreator.java b/bson/src/main/org/bson/codecs/pojo/InstanceCreator.java new file mode 100644 index 00000000000..8fca88f9dba --- /dev/null +++ b/bson/src/main/org/bson/codecs/pojo/InstanceCreator.java @@ -0,0 +1,44 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo; + +/** + * Provides access for setting data and the creation of a class instances. + * + * @param the type of the class + * @since 3.5 + */ +public interface InstanceCreator { + + /** + * Sets a value for the given PropertyModel + * + * @param value the new value for the property + * @param propertyModel the PropertyModel representing the property to set the value for. + * @param the PropertyModel's type + */ + void set(S value, PropertyModel propertyModel); + + /** + * Returns the new instance of the class. + *

Note: This will be called after all the values have been set.

+ * + * @return the new class instance. + */ + T getInstance(); + +} diff --git a/bson/src/main/org/bson/codecs/pojo/InstanceCreatorFactory.java b/bson/src/main/org/bson/codecs/pojo/InstanceCreatorFactory.java new file mode 100644 index 00000000000..14dc42825c6 --- /dev/null +++ b/bson/src/main/org/bson/codecs/pojo/InstanceCreatorFactory.java @@ -0,0 +1,31 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo; + +/** + * The factory for creating {@link InstanceCreator} instances + * + * @param the type of the ClassAccessor + * @since 3.5 + */ +public interface InstanceCreatorFactory { + + /** + * @return a new ClassAccessor instance + */ + InstanceCreator create(); +} diff --git a/bson/src/main/org/bson/codecs/pojo/InstanceCreatorFactoryImpl.java b/bson/src/main/org/bson/codecs/pojo/InstanceCreatorFactoryImpl.java new file mode 100644 index 00000000000..49aa2f82766 --- /dev/null +++ b/bson/src/main/org/bson/codecs/pojo/InstanceCreatorFactoryImpl.java @@ -0,0 +1,30 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo; + +final class InstanceCreatorFactoryImpl implements InstanceCreatorFactory { + private final CreatorExecutable creatorExecutable; + + InstanceCreatorFactoryImpl(final CreatorExecutable creatorExecutable) { + this.creatorExecutable = creatorExecutable; + } + + @Override + public InstanceCreator create() { + return new InstanceCreatorImpl<>(creatorExecutable); + } +} diff --git a/bson/src/main/org/bson/codecs/pojo/InstanceCreatorImpl.java b/bson/src/main/org/bson/codecs/pojo/InstanceCreatorImpl.java new file mode 100644 index 00000000000..7f3ad9e818b --- /dev/null +++ b/bson/src/main/org/bson/codecs/pojo/InstanceCreatorImpl.java @@ -0,0 +1,118 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo; + +import org.bson.codecs.configuration.CodecConfigurationException; + +import java.util.HashMap; +import java.util.Map; + +import static java.lang.String.format; + +final class InstanceCreatorImpl implements InstanceCreator { + private final CreatorExecutable creatorExecutable; + private final Map, Object> cachedValues; + private final Map properties; + private final Object[] params; + + private T newInstance; + + InstanceCreatorImpl(final CreatorExecutable creatorExecutable) { + this.creatorExecutable = creatorExecutable; + if (creatorExecutable.getProperties().isEmpty()) { + this.cachedValues = null; + this.properties = null; + this.params = null; + this.newInstance = creatorExecutable.getInstance(); + } else { + this.cachedValues = new HashMap<>(); + this.properties = new HashMap<>(); + + for (int i = 0; i < creatorExecutable.getProperties().size(); i++) { + if (creatorExecutable.getIdPropertyIndex() != null && creatorExecutable.getIdPropertyIndex() == i) { + this.properties.put(ClassModelBuilder.ID_PROPERTY_NAME, creatorExecutable.getIdPropertyIndex()); + } else { + this.properties.put(creatorExecutable.getProperties().get(i).value(), i); + } + } + + this.params = new Object[properties.size()]; + } + } + + @Override + public void set(final S value, final PropertyModel propertyModel) { + if (newInstance != null) { + propertyModel.getPropertyAccessor().set(newInstance, value); + } else { + if (!properties.isEmpty()) { + String propertyName = propertyModel.getWriteName(); + + if (!properties.containsKey(propertyName)) { + // Support legacy BsonProperty settings where the property name was used instead of the write name. + propertyName = propertyModel.getName(); + } + + Integer index = properties.get(propertyName); + if (index != null) { + params[index] = value; + } + properties.remove(propertyName); + } + + if (properties.isEmpty()) { + constructInstanceAndProcessCachedValues(); + } else { + cachedValues.put(propertyModel, value); + } + } + } + + @Override + public T getInstance() { + if (newInstance == null) { + try { + for (Map.Entry entry : properties.entrySet()) { + params[entry.getValue()] = null; + } + constructInstanceAndProcessCachedValues(); + } catch (CodecConfigurationException e) { + throw new CodecConfigurationException(format("Could not construct new instance of: %s. " + + "Missing the following properties: %s", + creatorExecutable.getType().getSimpleName(), properties.keySet()), e); + } + } + return newInstance; + } + + private void constructInstanceAndProcessCachedValues() { + try { + newInstance = creatorExecutable.getInstance(params); + } catch (Exception e) { + throw new CodecConfigurationException(e.getMessage(), e); + } + + for (Map.Entry, Object> entry : cachedValues.entrySet()) { + setPropertyValue(entry.getKey(), entry.getValue()); + } + } + + @SuppressWarnings("unchecked") + private void setPropertyValue(final PropertyModel propertyModel, final Object value) { + set((S) value, propertyModel); + } +} diff --git a/bson/src/main/org/bson/codecs/pojo/LazyMissingCodec.java b/bson/src/main/org/bson/codecs/pojo/LazyMissingCodec.java new file mode 100644 index 00000000000..b5e24292be1 --- /dev/null +++ b/bson/src/main/org/bson/codecs/pojo/LazyMissingCodec.java @@ -0,0 +1,50 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo; + +import org.bson.BsonReader; +import org.bson.BsonWriter; +import org.bson.codecs.Codec; +import org.bson.codecs.DecoderContext; +import org.bson.codecs.EncoderContext; +import org.bson.codecs.configuration.CodecConfigurationException; + + +class LazyMissingCodec implements Codec { + private final Class clazz; + private final CodecConfigurationException exception; + + LazyMissingCodec(final Class clazz, final CodecConfigurationException exception) { + this.clazz = clazz; + this.exception = exception; + } + + @Override + public S decode(final BsonReader reader, final DecoderContext decoderContext) { + throw exception; + } + + @Override + public void encode(final BsonWriter writer, final S value, final EncoderContext encoderContext) { + throw exception; + } + + @Override + public Class getEncoderClass() { + return clazz; + } +} diff --git a/bson/src/main/org/bson/codecs/pojo/LazyPropertyModelCodec.java b/bson/src/main/org/bson/codecs/pojo/LazyPropertyModelCodec.java new file mode 100644 index 00000000000..24537ce1d8e --- /dev/null +++ b/bson/src/main/org/bson/codecs/pojo/LazyPropertyModelCodec.java @@ -0,0 +1,227 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.bson.codecs.pojo; + +import org.bson.BsonReader; +import org.bson.BsonType; +import org.bson.BsonWriter; +import org.bson.codecs.Codec; +import org.bson.codecs.DecoderContext; +import org.bson.codecs.EncoderContext; +import org.bson.codecs.RepresentationConfigurable; +import org.bson.codecs.configuration.CodecConfigurationException; +import org.bson.codecs.configuration.CodecRegistry; + +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.locks.Lock; +import java.util.concurrent.locks.ReentrantLock; + +import static java.lang.String.format; +import static org.bson.codecs.pojo.PojoSpecializationHelper.specializeTypeData; + +class LazyPropertyModelCodec implements Codec { + private final PropertyModel propertyModel; + private final CodecRegistry registry; + private final PropertyCodecRegistry propertyCodecRegistry; + private final Lock codecLock = new ReentrantLock(); + private volatile Codec codec; + + LazyPropertyModelCodec(final PropertyModel propertyModel, final CodecRegistry registry, + final PropertyCodecRegistry propertyCodecRegistry) { + this.propertyModel = propertyModel; + this.registry = registry; + this.propertyCodecRegistry = propertyCodecRegistry; + } + + @Override + public T decode(final BsonReader reader, final DecoderContext decoderContext) { + return getPropertyModelCodec().decode(reader, decoderContext); + } + + @Override + public void encode(final BsonWriter writer, final T value, final EncoderContext encoderContext) { + getPropertyModelCodec().encode(writer, value, encoderContext); + } + + @Override + public Class getEncoderClass() { + return propertyModel.getTypeData().getType(); + } + + private Codec getPropertyModelCodec() { + Codec codec = this.codec; + if (codec == null) { + codecLock.lock(); + try { + codec = this.codec; + if (codec == null) { + codec = createCodec(); + this.codec = codec; + } + } finally { + codecLock.unlock(); + } + } + return codec; + } + + private Codec createCodec() { + Codec localCodec = getCodecFromPropertyRegistry(propertyModel); + if (localCodec instanceof PojoCodec) { + PojoCodec pojoCodec = (PojoCodec) localCodec; + ClassModel specialized = getSpecializedClassModel(pojoCodec.getClassModel(), propertyModel); + localCodec = new PojoCodecImpl<>(specialized, registry, propertyCodecRegistry, pojoCodec.getDiscriminatorLookup()); + } + return localCodec; + } + + @SuppressWarnings("unchecked") + private Codec getCodecFromPropertyRegistry(final PropertyModel propertyModel) { + Codec localCodec; + try { + localCodec = propertyCodecRegistry.get(propertyModel.getTypeData()); + } catch (CodecConfigurationException e) { + return new LazyMissingCodec<>(propertyModel.getTypeData().getType(), e); + } + if (localCodec == null) { + localCodec = new LazyMissingCodec<>(propertyModel.getTypeData().getType(), + new CodecConfigurationException("Unexpected missing codec for: " + propertyModel.getName())); + } + BsonType representation = propertyModel.getBsonRepresentation(); + if (representation != null) { + if (localCodec instanceof RepresentationConfigurable) { + return ((RepresentationConfigurable) localCodec).withRepresentation(representation); + } + throw new CodecConfigurationException("Codec must implement RepresentationConfigurable to support BsonRepresentation"); + } + return localCodec; + } + + private ClassModel getSpecializedClassModel(final ClassModel clazzModel, final PropertyModel propertyModel) { + boolean useDiscriminator = propertyModel.useDiscriminator() == null ? clazzModel.useDiscriminator() + : propertyModel.useDiscriminator(); + boolean validDiscriminator = clazzModel.getDiscriminatorKey() != null && clazzModel.getDiscriminator() != null; + boolean changeTheDiscriminator = (useDiscriminator != clazzModel.useDiscriminator()) && validDiscriminator; + + if (propertyModel.getTypeData().getTypeParameters().isEmpty() && !changeTheDiscriminator){ + return clazzModel; + } + + ArrayList> concretePropertyModels = new ArrayList<>(clazzModel.getPropertyModels()); + PropertyModel concreteIdProperty = clazzModel.getIdPropertyModel(); + + List> propertyTypeParameters = propertyModel.getTypeData().getTypeParameters(); + for (int i = 0; i < concretePropertyModels.size(); i++) { + PropertyModel model = concretePropertyModels.get(i); + String propertyName = model.getName(); + TypeParameterMap typeParameterMap = clazzModel.getPropertyNameToTypeParameterMap().get(propertyName); + if (typeParameterMap.hasTypeParameters()) { + PropertyModel concretePropertyModel = getSpecializedPropertyModel(model, propertyTypeParameters, typeParameterMap); + concretePropertyModels.set(i, concretePropertyModel); + if (concreteIdProperty != null && concreteIdProperty.getName().equals(propertyName)) { + concreteIdProperty = concretePropertyModel; + } + } + } + + boolean discriminatorEnabled = changeTheDiscriminator ? propertyModel.useDiscriminator() : clazzModel.useDiscriminator(); + return new ClassModel<>(clazzModel.getType(), clazzModel.getPropertyNameToTypeParameterMap(), + clazzModel.getInstanceCreatorFactory(), discriminatorEnabled, clazzModel.getDiscriminatorKey(), + clazzModel.getDiscriminator(), IdPropertyModelHolder.create(clazzModel, concreteIdProperty), concretePropertyModels); + } + + private PropertyModel getSpecializedPropertyModel(final PropertyModel propertyModel, + final List> propertyTypeParameters, + final TypeParameterMap typeParameterMap) { + TypeData specializedPropertyType = specializeTypeData(propertyModel.getTypeData(), propertyTypeParameters, typeParameterMap); + if (propertyModel.getTypeData().equals(specializedPropertyType)) { + return propertyModel; + } + + return new PropertyModel<>(propertyModel.getName(), propertyModel.getReadName(), propertyModel.getWriteName(), + specializedPropertyType, null, propertyModel.getPropertySerialization(), propertyModel.useDiscriminator(), + propertyModel.getPropertyAccessor(), propertyModel.getError(), propertyModel.getBsonRepresentation()); + } + + /** + * Instances of this codec are supposed to be replaced with usable implementations by {@link LazyPropertyModelCodec#createCodec()}. + */ + static final class NeedSpecializationCodec extends PojoCodec { + private final ClassModel classModel; + private final DiscriminatorLookup discriminatorLookup; + private final CodecRegistry codecRegistry; + + NeedSpecializationCodec(final ClassModel classModel, final DiscriminatorLookup discriminatorLookup, final CodecRegistry codecRegistry) { + this.classModel = classModel; + this.discriminatorLookup = discriminatorLookup; + this.codecRegistry = codecRegistry; + } + + @Override + public void encode(final BsonWriter writer, final T value, final EncoderContext encoderContext) { + if (value.getClass().equals(classModel.getType())) { + throw exception(); + } + tryEncode(codecRegistry.get(value.getClass()), writer, value, encoderContext); + } + + @Override + public T decode(final BsonReader reader, final DecoderContext decoderContext) { + return tryDecode(reader, decoderContext); + } + + @SuppressWarnings("unchecked") + private void tryEncode(final Codec codec, final BsonWriter writer, final T value, final EncoderContext encoderContext) { + try { + codec.encode(writer, (A) value, encoderContext); + } catch (Exception e) { + throw exception(); + } + } + + @SuppressWarnings("unchecked") + public T tryDecode(final BsonReader reader, final DecoderContext decoderContext) { + Codec codec = PojoCodecImpl.getCodecFromDocument(reader, classModel.useDiscriminator(), classModel.getDiscriminatorKey(), + codecRegistry, discriminatorLookup, null, classModel.getName()); + if (codec != null) { + return codec.decode(reader, decoderContext); + } + + throw exception(); + } + + @Override + public Class getEncoderClass() { + return classModel.getType(); + } + + private CodecConfigurationException exception() { + return new CodecConfigurationException(format("%s contains generic types that have not been specialised.%n" + + "Top level classes with generic types are not supported by the PojoCodec.", classModel.getName())); + } + + @Override + ClassModel getClassModel() { + return classModel; + } + + @Override + DiscriminatorLookup getDiscriminatorLookup() { + return discriminatorLookup; + } + } +} diff --git a/bson/src/main/org/bson/codecs/pojo/MapPropertyCodecProvider.java b/bson/src/main/org/bson/codecs/pojo/MapPropertyCodecProvider.java new file mode 100644 index 00000000000..3bbfc871390 --- /dev/null +++ b/bson/src/main/org/bson/codecs/pojo/MapPropertyCodecProvider.java @@ -0,0 +1,115 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.bson.codecs.pojo; + +import org.bson.BsonReader; +import org.bson.BsonType; +import org.bson.BsonWriter; +import org.bson.codecs.Codec; +import org.bson.codecs.DecoderContext; +import org.bson.codecs.EncoderContext; +import org.bson.codecs.configuration.CodecConfigurationException; + +import java.util.HashMap; +import java.util.Map; +import java.util.Map.Entry; + +import static java.lang.String.format; + +final class MapPropertyCodecProvider implements PropertyCodecProvider { + + @SuppressWarnings({"rawtypes", "unchecked"}) + @Override + public Codec get(final TypeWithTypeParameters type, final PropertyCodecRegistry registry) { + if (Map.class.isAssignableFrom(type.getType()) && type.getTypeParameters().size() == 2) { + Class keyType = type.getTypeParameters().get(0).getType(); + if (!keyType.equals(String.class)) { + throw new CodecConfigurationException(format("Invalid Map type. Maps MUST have string keys, found %s instead.", keyType)); + } + + try { + return new MapCodec(type.getType(), registry.get(type.getTypeParameters().get(1))); + } catch (CodecConfigurationException e) { + if (type.getTypeParameters().get(1).getType() == Object.class) { + try { + return (Codec) registry.get(TypeData.builder(Map.class).build()); + } catch (CodecConfigurationException e1) { + // Ignore and return original exception + } + } + throw e; + } + } else { + return null; + } + } + + private static class MapCodec implements Codec> { + private final Class> encoderClass; + private final Codec codec; + + MapCodec(final Class> encoderClass, final Codec codec) { + this.encoderClass = encoderClass; + this.codec = codec; + } + + @Override + public void encode(final BsonWriter writer, final Map map, final EncoderContext encoderContext) { + writer.writeStartDocument(); + for (final Entry entry : map.entrySet()) { + writer.writeName(entry.getKey()); + if (entry.getValue() == null) { + writer.writeNull(); + } else { + codec.encode(writer, entry.getValue(), encoderContext); + } + } + writer.writeEndDocument(); + } + + @Override + public Map decode(final BsonReader reader, final DecoderContext context) { + reader.readStartDocument(); + Map map = getInstance(); + while (reader.readBsonType() != BsonType.END_OF_DOCUMENT) { + if (reader.getCurrentBsonType() == BsonType.NULL) { + map.put(reader.readName(), null); + reader.readNull(); + } else { + map.put(reader.readName(), codec.decode(reader, context)); + } + } + reader.readEndDocument(); + return map; + } + + @Override + public Class> getEncoderClass() { + return encoderClass; + } + + private Map getInstance() { + if (encoderClass.isInterface()) { + return new HashMap<>(); + } + try { + return encoderClass.getDeclaredConstructor().newInstance(); + } catch (Exception e) { + throw new CodecConfigurationException(e.getMessage(), e); + } + } + } +} diff --git a/bson/src/main/org/bson/codecs/pojo/PojoBuilderHelper.java b/bson/src/main/org/bson/codecs/pojo/PojoBuilderHelper.java new file mode 100644 index 00000000000..9a0f5e69d0e --- /dev/null +++ b/bson/src/main/org/bson/codecs/pojo/PojoBuilderHelper.java @@ -0,0 +1,288 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo; + +import java.lang.annotation.Annotation; +import java.lang.reflect.Constructor; +import java.lang.reflect.Field; +import java.lang.reflect.Method; +import java.lang.reflect.ParameterizedType; +import java.lang.reflect.Type; +import java.lang.reflect.TypeVariable; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.LinkedHashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.TreeSet; + +import static java.lang.String.format; +import static java.lang.reflect.Modifier.isProtected; +import static java.lang.reflect.Modifier.isPublic; +import static java.util.Arrays.asList; +import static java.util.Collections.reverse; +import static org.bson.assertions.Assertions.notNull; +import static org.bson.codecs.pojo.PojoSpecializationHelper.specializeTypeData; +import static org.bson.codecs.pojo.PropertyReflectionUtils.getPropertyMethods; +import static org.bson.codecs.pojo.PropertyReflectionUtils.isGetter; +import static org.bson.codecs.pojo.PropertyReflectionUtils.toPropertyName; + +final class PojoBuilderHelper { + + @SuppressWarnings("unchecked") + static void configureClassModelBuilder(final ClassModelBuilder classModelBuilder, final Class clazz) { + classModelBuilder.type(notNull("clazz", clazz)); + + ArrayList annotations = new ArrayList<>(); + Set propertyNames = new TreeSet<>(); + Map propertyTypeParameterMap = new HashMap<>(); + String declaringClassName = clazz.getSimpleName(); + + Map> propertyNameMap = new HashMap<>(); + for (ClassWithParentTypeData currentClassWithParentTypeData : getClassHierarchy(clazz, null)) { + Class currentClass = currentClassWithParentTypeData.clazz; + TypeData parentClassTypeData = currentClassWithParentTypeData.parentClassTypeData; + annotations.addAll(asList(currentClass.getDeclaredAnnotations())); + List genericTypeNames = new ArrayList<>(); + for (TypeVariable> classTypeVariable : currentClass.getTypeParameters()) { + genericTypeNames.add(classTypeVariable.getName()); + } + + PropertyReflectionUtils.PropertyMethods propertyMethods = getPropertyMethods(currentClass); + + // Note that we're processing setters before getters. It's typical for setters to have more general types + // than getters (e.g.: getter returning ImmutableList, but setter accepting Collection), so by evaluating + // setters first, we'll initialize the PropertyMetadata with the more general type + for (Method method : propertyMethods.getSetterMethods()) { + String propertyName = toPropertyName(method); + propertyNames.add(propertyName); + PropertyMetadata propertyMetadata = getOrCreateMethodPropertyMetadata(propertyName, declaringClassName, propertyNameMap, + TypeData.newInstance(method), propertyTypeParameterMap, parentClassTypeData, genericTypeNames, + getGenericType(method)); + + if (propertyMetadata.getSetter() == null) { + propertyMetadata.setSetter(method); + for (Annotation annotation : method.getDeclaredAnnotations()) { + propertyMetadata.addWriteAnnotation(annotation); + } + } + } + + for (Method method : propertyMethods.getGetterMethods()) { + String propertyName = toPropertyName(method); + propertyNames.add(propertyName); + // If the getter is overridden in a subclass, we only want to process that property, and ignore + // potentially less specific methods from super classes + PropertyMetadata propertyMetadata = propertyNameMap.get(propertyName); + if (propertyMetadata != null && propertyMetadata.getGetter() != null) { + continue; + } + propertyMetadata = getOrCreateMethodPropertyMetadata(propertyName, declaringClassName, propertyNameMap, + TypeData.newInstance(method), propertyTypeParameterMap, parentClassTypeData, genericTypeNames, + getGenericType(method)); + if (propertyMetadata.getGetter() == null) { + propertyMetadata.setGetter(method); + for (Annotation annotation : method.getDeclaredAnnotations()) { + propertyMetadata.addReadAnnotation(annotation); + } + } + } + + for (Field field : currentClass.getDeclaredFields()) { + propertyNames.add(field.getName()); + // Note if properties are present and types don't match, the underlying field is treated as an implementation detail. + PropertyMetadata propertyMetadata = getOrCreateFieldPropertyMetadata(field.getName(), declaringClassName, + propertyNameMap, TypeData.newInstance(field), propertyTypeParameterMap, parentClassTypeData, genericTypeNames, + field.getGenericType()); + if (propertyMetadata != null && propertyMetadata.getField() == null) { + propertyMetadata.field(field); + for (Annotation annotation : field.getDeclaredAnnotations()) { + propertyMetadata.addReadAnnotation(annotation); + propertyMetadata.addWriteAnnotation(annotation); + } + } + } + } + + for (String propertyName : propertyNames) { + PropertyMetadata propertyMetadata = propertyNameMap.get(propertyName); + if (propertyMetadata.isSerializable() || propertyMetadata.isDeserializable()) { + classModelBuilder.addProperty(createPropertyModelBuilder(propertyMetadata)); + } + } + + reverse(annotations); + classModelBuilder.annotations(annotations); + classModelBuilder.propertyNameToTypeParameterMap(propertyTypeParameterMap); + + Constructor noArgsConstructor = null; + for (Constructor constructor : clazz.getDeclaredConstructors()) { + if (constructor.getParameterCount() == 0 + && (isPublic(constructor.getModifiers()) || isProtected(constructor.getModifiers()))) { + noArgsConstructor = (Constructor) constructor; + noArgsConstructor.setAccessible(true); + } + } + + classModelBuilder.instanceCreatorFactory(new InstanceCreatorFactoryImpl<>(new CreatorExecutable<>(clazz, noArgsConstructor))); + } + + private static PropertyMetadata getOrCreateMethodPropertyMetadata(final String propertyName, + final String declaringClassName, + final Map> propertyNameMap, + final TypeData typeData, + final Map propertyTypeParameterMap, + final TypeData parentClassTypeData, + final List genericTypeNames, + final Type genericType) { + PropertyMetadata propertyMetadata = getOrCreatePropertyMetadata(propertyName, declaringClassName, propertyNameMap, typeData); + if (!isAssignableClass(propertyMetadata.getTypeData().getType(), typeData.getType())) { + propertyMetadata.setError(format("Property '%s' in %s, has differing data types: %s and %s.", propertyName, + declaringClassName, propertyMetadata.getTypeData(), typeData)); + } + cachePropertyTypeData(propertyMetadata, propertyTypeParameterMap, parentClassTypeData, genericTypeNames, genericType); + return propertyMetadata; + } + + private static boolean isAssignableClass(final Class propertyTypeClass, final Class typeDataClass) { + notNull("propertyTypeClass", propertyTypeClass); + notNull("typeDataClass", typeDataClass); + return propertyTypeClass.isAssignableFrom(typeDataClass) || typeDataClass.isAssignableFrom(propertyTypeClass); + } + + private static PropertyMetadata getOrCreateFieldPropertyMetadata(final String propertyName, + final String declaringClassName, + final Map> propertyNameMap, + final TypeData typeData, + final Map propertyTypeParameterMap, + final TypeData parentClassTypeData, + final List genericTypeNames, + final Type genericType) { + PropertyMetadata propertyMetadata = getOrCreatePropertyMetadata(propertyName, declaringClassName, propertyNameMap, typeData); + if (!propertyMetadata.getTypeData().getType().isAssignableFrom(typeData.getType())) { + return null; + } + cachePropertyTypeData(propertyMetadata, propertyTypeParameterMap, parentClassTypeData, genericTypeNames, genericType); + return propertyMetadata; + } + + @SuppressWarnings("unchecked") + private static PropertyMetadata getOrCreatePropertyMetadata(final String propertyName, + final String declaringClassName, + final Map> propertyNameMap, + final TypeData typeData) { + PropertyMetadata propertyMetadata = (PropertyMetadata) propertyNameMap.get(propertyName); + if (propertyMetadata == null) { + propertyMetadata = new PropertyMetadata<>(propertyName, declaringClassName, typeData); + propertyNameMap.put(propertyName, propertyMetadata); + } + return propertyMetadata; + } + + private static void cachePropertyTypeData(final PropertyMetadata propertyMetadata, + final Map propertyTypeParameterMap, + final TypeData parentClassTypeData, + final List genericTypeNames, + final Type genericType) { + TypeParameterMap typeParameterMap = getTypeParameterMap(genericTypeNames, genericType); + propertyTypeParameterMap.put(propertyMetadata.getName(), typeParameterMap); + propertyMetadata.typeParameterInfo(typeParameterMap, parentClassTypeData); + } + + private static Type getGenericType(final Method method) { + return isGetter(method) ? method.getGenericReturnType() : method.getGenericParameterTypes()[0]; + } + + static PropertyModelBuilder createPropertyModelBuilder(final PropertyMetadata propertyMetadata) { + PropertyModelBuilder propertyModelBuilder = PropertyModel.builder() + .propertyName(propertyMetadata.getName()) + .readName(propertyMetadata.getName()) + .writeName(propertyMetadata.getName()) + .typeData(propertyMetadata.getTypeData()) + .readAnnotations(propertyMetadata.getReadAnnotations()) + .writeAnnotations(propertyMetadata.getWriteAnnotations()) + .propertySerialization(new PropertyModelSerializationImpl<>()) + .propertyAccessor(new PropertyAccessorImpl<>(propertyMetadata)) + .setError(propertyMetadata.getError()); + + if (propertyMetadata.getTypeParameters() != null) { + propertyModelBuilder.typeData(specializeTypeData(propertyModelBuilder.getTypeData(), propertyMetadata.getTypeParameters(), + propertyMetadata.getTypeParameterMap())); + } + + return propertyModelBuilder; + } + + private static TypeParameterMap getTypeParameterMap(final List genericTypeNames, final Type propertyType) { + int classParamIndex = genericTypeNames.indexOf(propertyType.toString()); + TypeParameterMap.Builder builder = TypeParameterMap.builder(); + if (classParamIndex != -1) { + builder.addIndex(classParamIndex); + } else { + if (propertyType instanceof ParameterizedType) { + ParameterizedType pt = (ParameterizedType) propertyType; + for (int i = 0; i < pt.getActualTypeArguments().length; i++) { + classParamIndex = genericTypeNames.indexOf(pt.getActualTypeArguments()[i].toString()); + if (classParamIndex != -1) { + builder.addIndex(i, classParamIndex); + } else { + builder.addIndex(i, getTypeParameterMap(genericTypeNames, pt.getActualTypeArguments()[i])); + } + } + } + } + return builder.build(); + } + + static V stateNotNull(final String property, final V value) { + if (value == null) { + throw new IllegalStateException(format("%s cannot be null", property)); + } + return value; + } + + @SuppressWarnings("unchecked") + private static Set> getClassHierarchy(final Class clazz, + final TypeData classTypeData) { + Set> classesToScan = new LinkedHashSet<>(); + Class currentClass = clazz; + TypeData parentClassTypeData = classTypeData; + while (currentClass != null && !currentClass.isEnum() && !currentClass.equals(Object.class)) { + classesToScan.add(new ClassWithParentTypeData<>(currentClass, parentClassTypeData)); + parentClassTypeData = TypeData.newInstance(currentClass.getGenericSuperclass(), currentClass); + for (Class interfaceClass : currentClass.getInterfaces()) { + classesToScan.addAll(getClassHierarchy((Class) interfaceClass, parentClassTypeData)); + } + currentClass = currentClass.getSuperclass(); + } + return classesToScan; + } + + private static final class ClassWithParentTypeData { + private final Class clazz; + private final TypeData parentClassTypeData; + + private ClassWithParentTypeData(final Class clazz, final TypeData parentClassTypeData) { + this.clazz = clazz; + this.parentClassTypeData = parentClassTypeData; + } + } + + private PojoBuilderHelper() { + } +} diff --git a/bson/src/main/org/bson/codecs/pojo/PojoCodec.java b/bson/src/main/org/bson/codecs/pojo/PojoCodec.java new file mode 100644 index 00000000000..698e77f4b76 --- /dev/null +++ b/bson/src/main/org/bson/codecs/pojo/PojoCodec.java @@ -0,0 +1,26 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo; + +import org.bson.codecs.Codec; + +abstract class PojoCodec implements Codec { + + abstract ClassModel getClassModel(); + + abstract DiscriminatorLookup getDiscriminatorLookup(); +} diff --git a/bson/src/main/org/bson/codecs/pojo/PojoCodecImpl.java b/bson/src/main/org/bson/codecs/pojo/PojoCodecImpl.java new file mode 100644 index 00000000000..cbcfc99b20d --- /dev/null +++ b/bson/src/main/org/bson/codecs/pojo/PojoCodecImpl.java @@ -0,0 +1,330 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.bson.codecs.pojo; + +import org.bson.BsonDocument; +import org.bson.BsonDocumentReader; +import org.bson.BsonDocumentWrapper; +import org.bson.BsonInvalidOperationException; +import org.bson.BsonReader; +import org.bson.BsonReaderMark; +import org.bson.BsonType; +import org.bson.BsonValue; +import org.bson.BsonWriter; +import org.bson.codecs.BsonValueCodec; +import org.bson.codecs.Codec; +import org.bson.codecs.DecoderContext; +import org.bson.codecs.Encoder; +import org.bson.codecs.EncoderContext; +import org.bson.codecs.configuration.CodecConfigurationException; +import org.bson.codecs.configuration.CodecRegistry; +import org.bson.diagnostics.Logger; +import org.bson.diagnostics.Loggers; + +import javax.annotation.Nullable; +import java.util.Collection; +import java.util.List; +import java.util.Map; +import java.util.function.Supplier; + +import static java.lang.String.format; + + +final class PojoCodecImpl extends PojoCodec { + private static final Logger LOGGER = Loggers.getLogger("PojoCodec"); + private static final Codec BSON_VALUE_CODEC = new BsonValueCodec(); + private final ClassModel classModel; + private final CodecRegistry registry; + private final PropertyCodecRegistry propertyCodecRegistry; + private final DiscriminatorLookup discriminatorLookup; + + PojoCodecImpl(final ClassModel classModel, final CodecRegistry codecRegistry, + final List propertyCodecProviders, final DiscriminatorLookup discriminatorLookup) { + this.classModel = classModel; + this.registry = codecRegistry; + this.discriminatorLookup = discriminatorLookup; + this.propertyCodecRegistry = new PropertyCodecRegistryImpl(this, registry, propertyCodecProviders); + specialize(); + } + + PojoCodecImpl(final ClassModel classModel, final CodecRegistry codecRegistry, + final PropertyCodecRegistry propertyCodecRegistry, final DiscriminatorLookup discriminatorLookup) { + this.classModel = classModel; + this.registry = codecRegistry; + this.discriminatorLookup = discriminatorLookup; + this.propertyCodecRegistry = propertyCodecRegistry; + specialize(); + } + + @SuppressWarnings("unchecked") + @Override + public void encode(final BsonWriter writer, final T value, final EncoderContext encoderContext) { + if (areEquivalentTypes(value.getClass(), classModel.getType())) { + writer.writeStartDocument(); + + encodeIdProperty(writer, value, encoderContext, classModel.getIdPropertyModelHolder()); + encodeDiscriminatorProperty(writer); + + for (PropertyModel propertyModel : classModel.getPropertyModels()) { + if (idProperty(propertyModel)) { + continue; + } + encodeProperty(writer, value, encoderContext, propertyModel); + } + writer.writeEndDocument(); + } else { + ((Codec) registry.get(value.getClass())).encode(writer, value, encoderContext); + } + } + + @Override + public T decode(final BsonReader reader, final DecoderContext decoderContext) { + if (decoderContext.hasCheckedDiscriminator()) { + InstanceCreator instanceCreator = classModel.getInstanceCreator(); + decodeProperties(reader, decoderContext, instanceCreator); + return instanceCreator.getInstance(); + } else { + return getCodecFromDocument(reader, classModel.useDiscriminator(), classModel.getDiscriminatorKey(), registry, + discriminatorLookup, this, classModel.getName()) + .decode(reader, DecoderContext.builder().checkedDiscriminator(true).build()); + } + } + + @Override + public Class getEncoderClass() { + return classModel.getType(); + } + + @Override + public String toString() { + return format("PojoCodec<%s>", classModel); + } + + ClassModel getClassModel() { + return classModel; + } + + private void encodeIdProperty(final BsonWriter writer, final T instance, final EncoderContext encoderContext, + final IdPropertyModelHolder propertyModelHolder) { + if (propertyModelHolder.getPropertyModel() != null) { + if (propertyModelHolder.getIdGenerator() == null) { + encodeProperty(writer, instance, encoderContext, propertyModelHolder.getPropertyModel()); + } else { + S id = propertyModelHolder.getPropertyModel().getPropertyAccessor().get(instance); + if (id == null && encoderContext.isEncodingCollectibleDocument()) { + id = propertyModelHolder.getIdGenerator().generate(); + try { + propertyModelHolder.getPropertyModel().getPropertyAccessor().set(instance, id); + } catch (Exception e) { + // ignore + } + } + encodeValue(writer, encoderContext, propertyModelHolder.getPropertyModel(), id); + } + } + } + + private boolean idProperty(final PropertyModel propertyModel) { + return propertyModel.equals(classModel.getIdPropertyModel()); + } + + private void encodeDiscriminatorProperty(final BsonWriter writer) { + if (classModel.useDiscriminator()) { + writer.writeString(classModel.getDiscriminatorKey(), classModel.getDiscriminator()); + } + } + + private void encodeProperty(final BsonWriter writer, final T instance, final EncoderContext encoderContext, + final PropertyModel propertyModel) { + if (propertyModel != null && propertyModel.isReadable()) { + S propertyValue = propertyModel.getPropertyAccessor().get(instance); + encodeValue(writer, encoderContext, propertyModel, propertyValue); + } + } + + @SuppressWarnings("unchecked") + private void encodeValue(final BsonWriter writer, final EncoderContext encoderContext, final PropertyModel propertyModel, + final S propertyValue) { + if (propertyModel.shouldSerialize(propertyValue)) { + try { + if (propertyModel.getPropertySerialization().inline()) { + if (propertyValue != null) { + new BsonDocumentWrapper<>(propertyValue, propertyModel.getCachedCodec()).forEach((k, v) -> { + writer.writeName(k); + encoderContext.encodeWithChildContext((Encoder) registry.get(v.getClass()), writer, v); + }); + } + } else { + writer.writeName(propertyModel.getReadName()); + if (propertyValue == null) { + writer.writeNull(); + } else { + encoderContext.encodeWithChildContext(propertyModel.getCachedCodec(), writer, propertyValue); + } + } + } catch (CodecConfigurationException e) { + throw new CodecConfigurationException(format("Failed to encode '%s'. Encoding '%s' errored with: %s", + classModel.getName(), propertyModel.getReadName(), e.getMessage()), e); + } + } + } + + private void decodeProperties(final BsonReader reader, final DecoderContext decoderContext, final InstanceCreator instanceCreator) { + PropertyModel inlineElementsPropertyModel = classModel.getPropertyModels() + .stream() + .filter(p -> p.getPropertySerialization().inline()) + .findFirst() + .orElse(null); + + BsonDocument extraElements = inlineElementsPropertyModel == null ? null : new BsonDocument(); + reader.readStartDocument(); + while (reader.readBsonType() != BsonType.END_OF_DOCUMENT) { + String name = reader.readName(); + if (classModel.useDiscriminator() && classModel.getDiscriminatorKey().equals(name)) { + reader.readString(); + } else { + decodePropertyModel(reader, decoderContext, instanceCreator, name, getPropertyModelByWriteName(classModel, name), extraElements); + } + } + reader.readEndDocument(); + setPropertyValueBsonExtraElements(instanceCreator, extraElements, inlineElementsPropertyModel); + } + + private void decodePropertyModel(final BsonReader reader, final DecoderContext decoderContext, + final InstanceCreator instanceCreator, final String name, + final PropertyModel propertyModel, @Nullable final BsonDocument extraElements) { + if (propertyModel != null) { + setPropertyValue(instanceCreator, () -> { + S value = null; + if (reader.getCurrentBsonType() == BsonType.NULL) { + reader.readNull(); + } else { + Codec codec = propertyModel.getCachedCodec(); + if (codec == null) { + throw new CodecConfigurationException(format("Missing codec in '%s' for '%s'", + classModel.getName(), propertyModel.getName())); + } + value = decoderContext.decodeWithChildContext(codec, reader); + } + return value; + }, propertyModel); + } else if (extraElements == null) { + if (LOGGER.isTraceEnabled()) { + LOGGER.trace(format("Found property not present in the ClassModel: %s", name)); + } + reader.skipValue(); + } else { + try { + extraElements.append(name, decoderContext.decodeWithChildContext(BSON_VALUE_CODEC, reader)); + } catch (CodecConfigurationException e) { + throw new CodecConfigurationException(format("Failed to decode '%s'. Decoding '%s' errored with: %s", + classModel.getName(), name, e.getMessage()), e); + } + } + } + + private void setPropertyValue(final InstanceCreator instanceCreator, final Supplier valueSupplier, + final PropertyModel propertyModel) { + try { + instanceCreator.set(valueSupplier.get(), propertyModel); + } catch (BsonInvalidOperationException | CodecConfigurationException e) { + throw new CodecConfigurationException(format("Failed to decode '%s'. Decoding '%s' errored with: %s", + classModel.getName(), propertyModel.getName(), e.getMessage()), e); + } + } + + private void setPropertyValueBsonExtraElements(final InstanceCreator instanceCreator, @Nullable final BsonDocument extraElements, + final PropertyModel inlineElementsPropertyModel) { + if (extraElements != null + && !extraElements.isEmpty() + && inlineElementsPropertyModel != null + && inlineElementsPropertyModel.isWritable()) { + setPropertyValue(instanceCreator, () -> + inlineElementsPropertyModel.getCachedCodec() + .decode(new BsonDocumentReader(extraElements), DecoderContext.builder().build()), + inlineElementsPropertyModel); + } + } + + private void specialize() { + classModel.getPropertyModels().forEach(this::cachePropertyModelCodec); + } + + private void cachePropertyModelCodec(final PropertyModel propertyModel) { + if (propertyModel.getCachedCodec() == null) { + Codec codec = propertyModel.getCodec() != null ? propertyModel.getCodec() + : new LazyPropertyModelCodec<>(propertyModel, registry, propertyCodecRegistry); + propertyModel.cachedCodec(codec); + } + } + + private boolean areEquivalentTypes(final Class t1, final Class t2) { + if (t1.equals(t2)) { + return true; + } else if (Collection.class.isAssignableFrom(t1) && Collection.class.isAssignableFrom(t2)) { + return true; + } else if (Map.class.isAssignableFrom(t1) && Map.class.isAssignableFrom(t2)) { + return true; + } + return false; + } + + @SuppressWarnings("unchecked") + @Nullable + static Codec getCodecFromDocument(final BsonReader reader, final boolean useDiscriminator, final String discriminatorKey, + final CodecRegistry registry, final DiscriminatorLookup discriminatorLookup, @Nullable final Codec defaultCodec, + final String simpleClassName) { + Codec codec = defaultCodec; + if (useDiscriminator) { + BsonReaderMark mark = reader.getMark(); + reader.readStartDocument(); + boolean discriminatorKeyFound = false; + while (!discriminatorKeyFound && reader.readBsonType() != BsonType.END_OF_DOCUMENT) { + String name = reader.readName(); + if (discriminatorKey.equals(name)) { + discriminatorKeyFound = true; + try { + Class discriminatorClass = discriminatorLookup.lookup(reader.readString()); + if (codec == null || !codec.getEncoderClass().equals(discriminatorClass)) { + codec = (Codec) registry.get(discriminatorClass); + } + } catch (Exception e) { + throw new CodecConfigurationException(format("Failed to decode '%s'. Decoding errored with: %s", + simpleClassName, e.getMessage()), e); + } + } else { + reader.skipValue(); + } + } + mark.reset(); + } + return codec; + } + + private PropertyModel getPropertyModelByWriteName(final ClassModel classModel, final String readName) { + for (PropertyModel propertyModel : classModel.getPropertyModels()) { + if (propertyModel.isWritable() && propertyModel.getWriteName().equals(readName)) { + return propertyModel; + } + } + return null; + } + + @Override + DiscriminatorLookup getDiscriminatorLookup() { + return discriminatorLookup; + } +} diff --git a/bson/src/main/org/bson/codecs/pojo/PojoCodecProvider.java b/bson/src/main/org/bson/codecs/pojo/PojoCodecProvider.java new file mode 100644 index 00000000000..255b520aabb --- /dev/null +++ b/bson/src/main/org/bson/codecs/pojo/PojoCodecProvider.java @@ -0,0 +1,243 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.bson.codecs.pojo; + +import org.bson.codecs.Codec; +import org.bson.codecs.configuration.CodecProvider; +import org.bson.codecs.configuration.CodecRegistry; +import org.bson.diagnostics.Logger; +import org.bson.diagnostics.Loggers; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; + +import static java.lang.String.format; +import static java.util.Arrays.asList; +import static org.bson.assertions.Assertions.notNull; + +/** + * Provides Codecs for registered POJOs via the ClassModel abstractions. + * + * @since 3.5 + */ +public final class PojoCodecProvider implements CodecProvider { + static final Logger LOGGER = Loggers.getLogger("codecs.pojo"); + private final boolean automatic; + private final Map, ClassModel> classModels; + private final Set packages; + private final List conventions; + private final DiscriminatorLookup discriminatorLookup; + private final List propertyCodecProviders; + + private PojoCodecProvider(final boolean automatic, final Map, ClassModel> classModels, final Set packages, + final List conventions, final List propertyCodecProviders) { + this.automatic = automatic; + this.classModels = classModels; + this.packages = packages; + this.conventions = conventions; + this.discriminatorLookup = new DiscriminatorLookup(classModels, packages); + this.propertyCodecProviders = propertyCodecProviders; + } + + /** + * Creates a Builder so classes or packages can be registered and configured before creating an immutable CodecProvider. + * + * @return the Builder + * @see Builder#register(Class[]) + */ + public static Builder builder() { + return new Builder(); + } + + @Override + public Codec get(final Class clazz, final CodecRegistry registry) { + return createCodec(clazz, registry); + } + + @SuppressWarnings("unchecked") + private PojoCodec createCodec(final Class clazz, final CodecRegistry registry) { + ClassModel classModel = (ClassModel) classModels.get(clazz); + if (classModel != null) { + return createCodec(classModel, registry, propertyCodecProviders, discriminatorLookup); + } else if (automatic || (clazz.getPackage() != null && packages.contains(clazz.getPackage().getName()))) { + try { + classModel = createClassModel(clazz, conventions); + if (clazz.isInterface() || !classModel.getPropertyModels().isEmpty() || classModel.useDiscriminator()) { + discriminatorLookup.addClassModel(classModel); + return new AutomaticPojoCodec<>(createCodec(classModel, registry, propertyCodecProviders, + discriminatorLookup)); + } + } catch (Exception e) { + LOGGER.warn(format("Cannot use '%s' with the PojoCodec.", clazz.getSimpleName()), e); + return null; + } + } + return null; + } + + private static PojoCodec createCodec(final ClassModel classModel, final CodecRegistry codecRegistry, + final List propertyCodecProviders, final DiscriminatorLookup discriminatorLookup) { + return shouldSpecialize(classModel) + ? new PojoCodecImpl<>(classModel, codecRegistry, propertyCodecProviders, discriminatorLookup) + : new LazyPropertyModelCodec.NeedSpecializationCodec<>(classModel, discriminatorLookup, codecRegistry); + } + + /** + * A Builder for the PojoCodecProvider + */ + public static final class Builder { + private final Set packages = new HashSet<>(); + private final Map, ClassModel> classModels = new HashMap<>(); + private final List> clazzes = new ArrayList<>(); + private List conventions = null; + private final List propertyCodecProviders = new ArrayList<>(); + private boolean automatic; + + /** + * Creates the PojoCodecProvider with the classes or packages that configured and registered. + * + * @return the Provider + * @see #register(Class...) + */ + public PojoCodecProvider build() { + List immutableConventions = conventions != null + ? Collections.unmodifiableList(new ArrayList<>(conventions)) + : null; + for (Class clazz : clazzes) { + if (!classModels.containsKey(clazz)) { + register(createClassModel(clazz, immutableConventions)); + } + } + return new PojoCodecProvider(automatic, classModels, packages, immutableConventions, propertyCodecProviders); + } + + /** + * Sets whether the provider should automatically try to wrap a {@link ClassModel} for any class that is requested. + * + *

Note: As Java Beans are convention based, when using automatic settings the provider should be the last provider in the + * registry.

+ * + * @param automatic whether to automatically wrap {@code ClassModels} or not. + * @return this + */ + public Builder automatic(final boolean automatic) { + this.automatic = automatic; + return this; + } + + /** + * Sets the conventions to use when creating {@code ClassModels} from classes or packages. + * + * @param conventions a list of conventions + * @return this + */ + public Builder conventions(final List conventions) { + this.conventions = notNull("conventions", conventions); + return this; + } + + /** + * Registers a classes with the builder for inclusion in the Provider. + * + *

Note: Uses reflection for the property mapping. If no conventions are configured on the builder the + * {@link Conventions#DEFAULT_CONVENTIONS} will be used.

+ * + * @param classes the classes to register + * @return this + */ + public Builder register(final Class... classes) { + clazzes.addAll(asList(classes)); + return this; + } + + /** + * Registers classModels for inclusion in the Provider. + * + * @param classModels the classModels to register + * @return this + */ + public Builder register(final ClassModel... classModels) { + notNull("classModels", classModels); + for (ClassModel classModel : classModels) { + this.classModels.put(classModel.getType(), classModel); + } + return this; + } + + /** + * Registers the packages of the given classes with the builder for inclusion in the Provider. This will allow classes in the + * given packages to mapped for use with PojoCodecProvider. + * + *

Note: Uses reflection for the field mapping. If no conventions are configured on the builder the + * {@link Conventions#DEFAULT_CONVENTIONS} will be used.

+ * + * @param packageNames the package names to register + * @return this + */ + public Builder register(final String... packageNames) { + packages.addAll(asList(notNull("packageNames", packageNames))); + return this; + } + + /** + * Registers codec providers that receive the type parameters of properties for instances encoded and decoded + * by a {@link PojoCodec} handled by this provider. + * + *

Note that you should prefer working with the {@link CodecRegistry}/{@link CodecProvider} hierarchy. Providers + * should only be registered here if a codec needs to be created for custom container types like optionals and + * collections. Support for types {@link Map} and {@link java.util.Collection} are built-in so explicitly handling + * them is not necessary. + * @param providers property codec providers to register + * @return this + * @since 3.6 + */ + public Builder register(final PropertyCodecProvider... providers) { + propertyCodecProviders.addAll(asList(notNull("providers", providers))); + return this; + } + + private Builder() { + } + } + + private static ClassModel createClassModel(final Class clazz, final List conventions) { + ClassModelBuilder builder = ClassModel.builder(clazz); + if (conventions != null) { + builder.conventions(conventions); + } + return builder.build(); + } + + private static boolean shouldSpecialize(final ClassModel classModel) { + if (!classModel.hasTypeParameters()) { + return true; + } + + for (Map.Entry entry : classModel.getPropertyNameToTypeParameterMap().entrySet()) { + TypeParameterMap typeParameterMap = entry.getValue(); + PropertyModel propertyModel = classModel.getPropertyModel(entry.getKey()); + if (typeParameterMap.hasTypeParameters() && (propertyModel == null || propertyModel.getCodec() == null)) { + return false; + } + } + return true; + } +} diff --git a/bson/src/main/org/bson/codecs/pojo/PojoSpecializationHelper.java b/bson/src/main/org/bson/codecs/pojo/PojoSpecializationHelper.java new file mode 100644 index 00000000000..8986c794af8 --- /dev/null +++ b/bson/src/main/org/bson/codecs/pojo/PojoSpecializationHelper.java @@ -0,0 +1,80 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo; + +import java.util.ArrayList; +import java.util.List; +import java.util.Map; + +final class PojoSpecializationHelper { + + @SuppressWarnings("unchecked") + static TypeData specializeTypeData(final TypeData typeData, final List> typeParameters, + final TypeParameterMap typeParameterMap) { + if (!typeParameterMap.hasTypeParameters() || typeParameters.isEmpty()) { + return typeData; + } + + Map> propertyToClassParamIndexMap = typeParameterMap.getPropertyToClassParamIndexMap(); + Either classTypeParamRepresentsWholeField = propertyToClassParamIndexMap.get(-1); + if (classTypeParamRepresentsWholeField != null) { + Integer index = classTypeParamRepresentsWholeField.map(i -> i, e -> { + throw new IllegalStateException("Invalid state, the whole class cannot be represented by a subtype."); + }); + return (TypeData) typeParameters.get(index); + } else { + return getTypeData(typeData, typeParameters, propertyToClassParamIndexMap); + } + } + + private static TypeData getTypeData(final TypeData typeData, final List> specializedTypeParameters, + final Map> propertyToClassParamIndexMap) { + List> subTypeParameters = new ArrayList<>(typeData.getTypeParameters()); + for (int i = 0; i < typeData.getTypeParameters().size(); i++) { + subTypeParameters.set(i, getTypeData(subTypeParameters.get(i), specializedTypeParameters, propertyToClassParamIndexMap, i)); + } + return TypeData.builder(typeData.getType()).addTypeParameters(subTypeParameters).build(); + } + + private static TypeData getTypeData(final TypeData typeData, final List> specializedTypeParameters, + final Map> propertyToClassParamIndexMap, + final int index) { + if (!propertyToClassParamIndexMap.containsKey(index)) { + return typeData; + } + return propertyToClassParamIndexMap.get(index).map(l -> { + if (typeData.getTypeParameters().isEmpty()) { + // Represents the whole typeData + return specializedTypeParameters.get(l); + } else { + // Represents a single nested type parameter within this typeData + TypeData.Builder builder = TypeData.builder(typeData.getType()); + List> typeParameters = new ArrayList<>(typeData.getTypeParameters()); + typeParameters.set(index, specializedTypeParameters.get(l)); + builder.addTypeParameters(typeParameters); + return builder.build(); + } + }, + r -> { + // Represents a child type parameter of this typeData + return getTypeData(typeData, specializedTypeParameters, r.getPropertyToClassParamIndexMap()); + }); + } + + private PojoSpecializationHelper() { + } +} diff --git a/bson/src/main/org/bson/codecs/pojo/PropertyAccessor.java b/bson/src/main/org/bson/codecs/pojo/PropertyAccessor.java new file mode 100644 index 00000000000..740856d876e --- /dev/null +++ b/bson/src/main/org/bson/codecs/pojo/PropertyAccessor.java @@ -0,0 +1,44 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo; + +/** + * Provides access for getting and setting property data. + * + * @param the type of the property + * @since 3.5 + */ +public interface PropertyAccessor { + + /** + * Gets the value for a given PropertyModel instance. + * + * @param instance the class instance to get the property value from + * @param the class instance type + * @return the value of the property. + */ + T get(S instance); + + /** + * Sets a value on the given PropertyModel + * + * @param instance the instance to set the property value to + * @param the class instance type + * @param value the new value for the property + */ + void set(S instance, T value); +} diff --git a/bson/src/main/org/bson/codecs/pojo/PropertyAccessorImpl.java b/bson/src/main/org/bson/codecs/pojo/PropertyAccessorImpl.java new file mode 100644 index 00000000000..cab25fa78ea --- /dev/null +++ b/bson/src/main/org/bson/codecs/pojo/PropertyAccessorImpl.java @@ -0,0 +1,77 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo; + +import org.bson.codecs.configuration.CodecConfigurationException; + +import static java.lang.String.format; + +final class PropertyAccessorImpl implements PropertyAccessor { + + private final PropertyMetadata propertyMetadata; + + PropertyAccessorImpl(final PropertyMetadata propertyMetadata) { + this.propertyMetadata = propertyMetadata; + } + + @Override + @SuppressWarnings("unchecked") + public T get(final S instance) { + try { + if (propertyMetadata.isSerializable()) { + if (propertyMetadata.getGetter() != null) { + return (T) propertyMetadata.getGetter().invoke(instance); + } else { + return (T) propertyMetadata.getField().get(instance); + } + } else { + throw getError(null); + } + } catch (Exception e) { + throw getError(e); + } + } + + @Override + public void set(final S instance, final T value) { + try { + if (propertyMetadata.isDeserializable()) { + if (propertyMetadata.getSetter() != null) { + propertyMetadata.getSetter().invoke(instance, value); + } else { + propertyMetadata.getField().set(instance, value); + } + } + } catch (Exception e) { + throw setError(e); + } + } + + PropertyMetadata getPropertyMetadata() { + return propertyMetadata; + } + + private CodecConfigurationException getError(final Exception cause) { + return new CodecConfigurationException(format("Unable to get value for property '%s' in %s", propertyMetadata.getName(), + propertyMetadata.getDeclaringClassName()), cause); + } + + private CodecConfigurationException setError(final Exception cause) { + return new CodecConfigurationException(format("Unable to set value for property '%s' in %s", propertyMetadata.getName(), + propertyMetadata.getDeclaringClassName()), cause); + } +} diff --git a/bson/src/main/org/bson/codecs/pojo/PropertyCodecProvider.java b/bson/src/main/org/bson/codecs/pojo/PropertyCodecProvider.java new file mode 100644 index 00000000000..765a88400aa --- /dev/null +++ b/bson/src/main/org/bson/codecs/pojo/PropertyCodecProvider.java @@ -0,0 +1,41 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.bson.codecs.pojo; + +import org.bson.codecs.Codec; + +/** + * A variant of {@link org.bson.codecs.configuration.CodecProvider} that generates codecs for {@link PojoCodec}. + * + *

This is a specialized codec provider that retrieves codecs which account for type parameters associated with + * a property. In particular this should only be used to add support for custom container types like optionals. + * It's only applicable for use by {@link PojoCodec} registered through {@link PojoCodecProvider#builder()}. + * + * @since 3.6 + */ +public interface PropertyCodecProvider { + + /** + * Get a {@code Codec} using the given context, which includes, most importantly, the class and bound type parameters + * for which a {@code Codec} is required. + * + * @param type the class and bound type parameters for which to get a Codec + * @param registry the registry to use for resolving dependent Codec instances + * @param the type of the class for which a Codec is required + * @return the Codec instance, which may be null, if this source is unable to provide one for the requested Class + */ + Codec get(TypeWithTypeParameters type, PropertyCodecRegistry registry); +} diff --git a/bson/src/main/org/bson/codecs/pojo/PropertyCodecRegistry.java b/bson/src/main/org/bson/codecs/pojo/PropertyCodecRegistry.java new file mode 100644 index 00000000000..7b91628b987 --- /dev/null +++ b/bson/src/main/org/bson/codecs/pojo/PropertyCodecRegistry.java @@ -0,0 +1,41 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.bson.codecs.pojo; + +import org.bson.codecs.Codec; +import org.bson.codecs.configuration.CodecConfigurationException; + +/** + * A variant of {@link org.bson.codecs.configuration.CodecRegistry} that generates codecs for {@link PojoCodec}. + * + *

This is a specialized codec registry that retrieves codecs which account for type parameters associated with + * a property. In particular this should only be used to add support for custom container types like optionals. + * It's only applicable for use by {@link PojoCodec} registered through {@link PojoCodecProvider#builder()}. + * + * @since 3.6 + */ +public interface PropertyCodecRegistry { + + /** + * Gets a {@code Codec} for the given Class. + * + * @param type the Class associated type parameters for this property for which to get a Codec + * @param the class type + * @return a codec for the given class + * @throws CodecConfigurationException if the registry does not contain a codec for the given class. + */ + Codec get(TypeWithTypeParameters type); +} diff --git a/bson/src/main/org/bson/codecs/pojo/PropertyCodecRegistryImpl.java b/bson/src/main/org/bson/codecs/pojo/PropertyCodecRegistryImpl.java new file mode 100644 index 00000000000..b6b27626ac2 --- /dev/null +++ b/bson/src/main/org/bson/codecs/pojo/PropertyCodecRegistryImpl.java @@ -0,0 +1,60 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo; + +import org.bson.codecs.Codec; +import org.bson.codecs.configuration.CodecRegistry; + +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.ConcurrentHashMap; + +class PropertyCodecRegistryImpl implements PropertyCodecRegistry { + private final List propertyCodecProviders; + private final ConcurrentHashMap, Codec> propertyCodecCache; + + PropertyCodecRegistryImpl(final PojoCodec pojoCodec, final CodecRegistry codecRegistry, + final List propertyCodecProviders) { + List augmentedProviders = new ArrayList<>(); + if (propertyCodecProviders != null) { + augmentedProviders.addAll(propertyCodecProviders); + } + augmentedProviders.add(new CollectionPropertyCodecProvider()); + augmentedProviders.add(new MapPropertyCodecProvider()); + augmentedProviders.add(new EnumPropertyCodecProvider(codecRegistry)); + augmentedProviders.add(new FallbackPropertyCodecProvider(pojoCodec, codecRegistry)); + this.propertyCodecProviders = augmentedProviders; + this.propertyCodecCache = new ConcurrentHashMap<>(); + } + + @SuppressWarnings("unchecked") + @Override + public Codec get(final TypeWithTypeParameters typeWithTypeParameters) { + if (propertyCodecCache.containsKey(typeWithTypeParameters)) { + return (Codec) propertyCodecCache.get(typeWithTypeParameters); + } + + for (PropertyCodecProvider propertyCodecProvider : propertyCodecProviders) { + Codec codec = propertyCodecProvider.get(typeWithTypeParameters, this); + if (codec != null) { + propertyCodecCache.put(typeWithTypeParameters, codec); + return codec; + } + } + return null; + } +} diff --git a/bson/src/main/org/bson/codecs/pojo/PropertyMetadata.java b/bson/src/main/org/bson/codecs/pojo/PropertyMetadata.java new file mode 100644 index 00000000000..69530ad3b4e --- /dev/null +++ b/bson/src/main/org/bson/codecs/pojo/PropertyMetadata.java @@ -0,0 +1,199 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo; + +import org.bson.codecs.configuration.CodecConfigurationException; + +import java.lang.annotation.Annotation; +import java.lang.reflect.Field; +import java.lang.reflect.Method; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import static java.lang.String.format; +import static java.lang.reflect.Modifier.isFinal; +import static java.lang.reflect.Modifier.isPublic; +import static java.lang.reflect.Modifier.isStatic; +import static java.lang.reflect.Modifier.isTransient; + +final class PropertyMetadata { + private static final TypeData VOID_TYPE_DATA = TypeData.builder(Void.class).build(); + private final String name; + private final String declaringClassName; + private final TypeData typeData; + private final Map, Annotation> readAnnotations = new HashMap<>(); + private final Map, Annotation> writeAnnotations = new HashMap<>(); + private TypeParameterMap typeParameterMap; + private List> typeParameters; + + private String error; + private Field field; + private Method getter; + private Method setter; + + PropertyMetadata(final String name, final String declaringClassName, final TypeData typeData) { + this.name = name; + this.declaringClassName = declaringClassName; + this.typeData = typeData; + } + + public String getName() { + return name; + } + + public List getReadAnnotations() { + return new ArrayList<>(readAnnotations.values()); + } + + public PropertyMetadata addReadAnnotation(final Annotation annotation) { + if (readAnnotations.containsKey(annotation.annotationType())) { + if (annotation.equals(readAnnotations.get(annotation.annotationType()))) { + return this; + } + throw new CodecConfigurationException(format("Read annotation %s for '%s' already exists in %s", annotation.annotationType(), + name, declaringClassName)); + } + readAnnotations.put(annotation.annotationType(), annotation); + return this; + } + + public List getWriteAnnotations() { + return new ArrayList<>(writeAnnotations.values()); + } + + public PropertyMetadata addWriteAnnotation(final Annotation annotation) { + if (writeAnnotations.containsKey(annotation.annotationType())) { + if (annotation.equals(writeAnnotations.get(annotation.annotationType()))) { + return this; + } + throw new CodecConfigurationException(format("Write annotation %s for '%s' already exists in %s", annotation.annotationType(), + name, declaringClassName)); + } + writeAnnotations.put(annotation.annotationType(), annotation); + return this; + } + + public Field getField() { + return field; + } + + public PropertyMetadata field(final Field field) { + this.field = field; + return this; + } + + public Method getGetter() { + return getter; + } + + public void setGetter(final Method getter) { + this.getter = getter; + } + + public Method getSetter() { + return setter; + } + + public void setSetter(final Method setter) { + this.setter = setter; + } + + public String getDeclaringClassName() { + return declaringClassName; + } + + public TypeData getTypeData() { + return typeData; + } + + public TypeParameterMap getTypeParameterMap() { + return typeParameterMap; + } + + public List> getTypeParameters() { + return typeParameters; + } + + public PropertyMetadata typeParameterInfo(final TypeParameterMap typeParameterMap, final TypeData parentTypeData) { + if (typeParameterMap != null && parentTypeData != null) { + this.typeParameterMap = typeParameterMap; + this.typeParameters = parentTypeData.getTypeParameters(); + } + return this; + } + + String getError() { + return error; + } + + void setError(final String error) { + this.error = error; + } + + public boolean isSerializable() { + if (isVoidType()) { + return false; + } + if (getter != null) { + return field == null || notStaticOrTransient(field.getModifiers()); + } else { + return field != null && isPublicAndNotStaticOrTransient(field.getModifiers()); + } + } + + public boolean isDeserializable() { + if (isVoidType()) { + return false; + } + if (setter != null) { + return field == null || !isFinal(field.getModifiers()) && notStaticOrTransient(field.getModifiers()); + } else { + return field != null && !isFinal(field.getModifiers()) && isPublicAndNotStaticOrTransient(field.getModifiers()); + } + } + + private boolean isVoidType() { + return VOID_TYPE_DATA.equals(typeData); + } + + private boolean notStaticOrTransient(final int modifiers) { + return !(isTransient(modifiers) || isStatic(modifiers)); + } + + private boolean isPublicAndNotStaticOrTransient(final int modifiers) { + return isPublic(modifiers) && notStaticOrTransient(modifiers); + } + + @Override + public String toString() { + return "PropertyMetadata{" + + "name='" + name + '\'' + + ", declaringClassName='" + declaringClassName + '\'' + + ", typeData=" + typeData + + ", readAnnotations=" + readAnnotations + + ", writeAnnotations=" + writeAnnotations + + ", typeParameterMap=" + typeParameterMap + + ", typeParameters=" + typeParameters + + ", error='" + error + '\'' + + ", field=" + field + + ", getter=" + getter + + ", setter=" + setter + + '}'; + } +} diff --git a/bson/src/main/org/bson/codecs/pojo/PropertyModel.java b/bson/src/main/org/bson/codecs/pojo/PropertyModel.java new file mode 100644 index 00000000000..5e6079795ff --- /dev/null +++ b/bson/src/main/org/bson/codecs/pojo/PropertyModel.java @@ -0,0 +1,247 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo; + +import org.bson.BsonType; +import org.bson.codecs.Codec; + +import java.util.Objects; + +/** + * Represents a property on a class and stores various metadata such as generic parameters + * + * @param the type of the property that the PropertyModel represents. + * @since 3.5 + */ +public final class PropertyModel { + private final String name; + private final String readName; + private final String writeName; + private final TypeData typeData; + private final Codec codec; + private final PropertySerialization propertySerialization; + private final Boolean useDiscriminator; + private final PropertyAccessor propertyAccessor; + private final String error; + private volatile Codec cachedCodec; + private final BsonType bsonRepresentation; + + PropertyModel(final String name, final String readName, final String writeName, final TypeData typeData, + final Codec codec, final PropertySerialization propertySerialization, final Boolean useDiscriminator, + final PropertyAccessor propertyAccessor, final String error, final BsonType bsonRepresentation) { + this.name = name; + this.readName = readName; + this.writeName = writeName; + this.typeData = typeData; + this.codec = codec; + this.cachedCodec = codec; + this.propertySerialization = propertySerialization; + this.useDiscriminator = useDiscriminator; + this.propertyAccessor = propertyAccessor; + this.error = error; + this.bsonRepresentation = bsonRepresentation; + } + + /** + * Create a new {@link PropertyModelBuilder} + * @param the type of the property + * @return the builder + */ + public static PropertyModelBuilder builder() { + return new PropertyModelBuilder<>(); + } + + /** + * @return the property name for the model + */ + public String getName() { + return name; + } + + /** + * @return the name of the property to use as the key when deserializing from BSON + */ + public String getWriteName() { + return writeName; + } + + /** + * @return the name of the property to use as the key when serializing into BSON + */ + public String getReadName() { + return readName; + } + + /** + * Property is writable. + * + * @return true if can be deserialized from BSON + */ + public boolean isWritable() { + return writeName != null; + } + + /** + * Property is readable. + * + * @return true if can be serialized to BSON + */ + public boolean isReadable() { + return readName != null; + } + + /** + * @return the type data for the property + */ + public TypeData getTypeData() { + return typeData; + } + + /** + * @return the custom codec to use if set or null + */ + public Codec getCodec() { + return codec; + } + + /** + * @return the BsonRepresentation of the field + * + * @since 4.2 + */ + public BsonType getBsonRepresentation() { + return bsonRepresentation; + } + + /** + * Returns true if the value should be serialized. + * + * @param value the value to check + * @return true if the value should be serialized. + */ + public boolean shouldSerialize(final T value) { + return propertySerialization.shouldSerialize(value); + } + + /** + * @return the property accessor + */ + public PropertyAccessor getPropertyAccessor() { + return propertyAccessor; + } + + /** + * @return true or false if a discriminator should be used when serializing or null if not set + */ + public Boolean useDiscriminator() { + return useDiscriminator; + } + + @Override + public String toString() { + return "PropertyModel{" + + "propertyName='" + name + "'" + + ", readName='" + readName + "'" + + ", writeName='" + writeName + "'" + + ", typeData=" + typeData + + "}"; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + PropertyModel that = (PropertyModel) o; + + if (getName() != null ? !getName().equals(that.getName()) : that.getName() != null) { + return false; + } + if (getReadName() != null ? !getReadName().equals(that.getReadName()) : that.getReadName() != null) { + return false; + } + if (getWriteName() != null ? !getWriteName().equals(that.getWriteName()) : that.getWriteName() != null) { + return false; + } + if (getTypeData() != null ? !getTypeData().equals(that.getTypeData()) : that.getTypeData() != null) { + return false; + } + if (getCodec() != null ? !getCodec().equals(that.getCodec()) : that.getCodec() != null) { + return false; + } + if (getPropertySerialization() != null ? !getPropertySerialization().equals(that.getPropertySerialization()) : that + .getPropertySerialization() != null) { + return false; + } + if (!Objects.equals(useDiscriminator, that.useDiscriminator)) { + return false; + } + if (getPropertyAccessor() != null ? !getPropertyAccessor().equals(that.getPropertyAccessor()) + : that.getPropertyAccessor() != null) { + return false; + } + + if (getError() != null ? !getError().equals(that.getError()) : that.getError() != null) { + return false; + } + + if (getCachedCodec() != null ? !getCachedCodec().equals(that.getCachedCodec()) : that.getCachedCodec() != null) { + return false; + } + + return true; + } + + @Override + public int hashCode() { + int result = getName() != null ? getName().hashCode() : 0; + result = 31 * result + (getReadName() != null ? getReadName().hashCode() : 0); + result = 31 * result + (getWriteName() != null ? getWriteName().hashCode() : 0); + result = 31 * result + (getTypeData() != null ? getTypeData().hashCode() : 0); + result = 31 * result + (getCodec() != null ? getCodec().hashCode() : 0); + result = 31 * result + (getPropertySerialization() != null ? getPropertySerialization().hashCode() : 0); + result = 31 * result + (useDiscriminator != null ? useDiscriminator.hashCode() : 0); + result = 31 * result + (getPropertyAccessor() != null ? getPropertyAccessor().hashCode() : 0); + result = 31 * result + (getError() != null ? getError().hashCode() : 0); + result = 31 * result + (getCachedCodec() != null ? getCachedCodec().hashCode() : 0); + return result; + } + + boolean hasError() { + return error != null; + } + + String getError() { + return error; + } + + PropertySerialization getPropertySerialization() { + return propertySerialization; + } + + void cachedCodec(final Codec codec) { + this.cachedCodec = codec; + } + + Codec getCachedCodec() { + return cachedCodec; + } +} diff --git a/bson/src/main/org/bson/codecs/pojo/PropertyModelBuilder.java b/bson/src/main/org/bson/codecs/pojo/PropertyModelBuilder.java new file mode 100644 index 00000000000..084e3908798 --- /dev/null +++ b/bson/src/main/org/bson/codecs/pojo/PropertyModelBuilder.java @@ -0,0 +1,302 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo; + +import org.bson.BsonType; +import org.bson.codecs.Codec; + +import java.lang.annotation.Annotation; +import java.util.List; + +import static java.lang.String.format; +import static java.util.Collections.emptyList; +import static java.util.Collections.unmodifiableList; +import static org.bson.assertions.Assertions.notNull; +import static org.bson.codecs.pojo.PojoBuilderHelper.stateNotNull; + +/** + * A builder for programmatically creating {@code PropertyModels}. + * + * @param the type of the property + * @since 3.5 + * @see PropertyModel + */ +public final class PropertyModelBuilder { + private String name; + private String readName; + private String writeName; + private TypeData typeData; + private PropertySerialization propertySerialization; + private Codec codec; + private PropertyAccessor propertyAccessor; + private List readAnnotations = emptyList(); + private List writeAnnotations = emptyList(); + private Boolean discriminatorEnabled; + private String error; + private BsonType bsonRepresentation; + + PropertyModelBuilder() { + } + + /** + * @return the property name + */ + public String getName() { + return name; + } + + /** + * @return the name of the property to use as the key when deserializing the data from BSON. + */ + public String getReadName() { + return readName; + } + + /** + * Sets the readName, the key for this property when deserializing the data from BSON. + * + *

Note: A null means this property will not used when deserializing.

+ * + * @param readName the name of the property to use as the key when deserializing the data from BSON. + * @return this + */ + public PropertyModelBuilder readName(final String readName) { + this.readName = readName; + return this; + } + + /** + * @return the name of the property to use as the key when serializing the data into BSON. + */ + public String getWriteName() { + return writeName; + } + + /** + * Sets the writeName, the key for this property when serializing the data into BSON. + * + *

Note: A null means this property will not be serialized.

+ * + * @param writeName the name of the property to use as the key when serializing the data into BSON. + * @return this + */ + public PropertyModelBuilder writeName(final String writeName) { + this.writeName = writeName; + return this; + } + + /** + * Sets a custom codec for the property + * + * @param codec the custom codec for the property + * @return this + */ + public PropertyModelBuilder codec(final Codec codec) { + this.codec = codec; + return this; + } + + /** + * @return the custom codec to use if set or null + */ + Codec getCodec() { + return codec; + } + + /** + * Sets the {@link PropertySerialization} checker + * + * @param propertySerialization checks if a property should be serialized + * @return this + */ + public PropertyModelBuilder propertySerialization(final PropertySerialization propertySerialization) { + this.propertySerialization = notNull("propertySerialization", propertySerialization); + return this; + } + + /** + * @return the {@link PropertySerialization} checker + */ + public PropertySerialization getPropertySerialization() { + return propertySerialization; + } + + /** + * Returns the read annotations, to be applied when serializing to BSON + * + * @return the read annotations + */ + public List getReadAnnotations() { + return readAnnotations; + } + + /** + * Sets the read annotations, to be applied when serializing to BSON + * + * @param annotations the read annotations + * @return this + */ + public PropertyModelBuilder readAnnotations(final List annotations) { + this.readAnnotations = unmodifiableList(notNull("annotations", annotations)); + return this; + } + + /** + * Returns the write annotations, to be applied when deserializing from BSON + * + * @return the write annotations + */ + public List getWriteAnnotations() { + return writeAnnotations; + } + + /** + * Sets the writeAnnotations, to be applied when deserializing from BSON + * + * @param writeAnnotations the writeAnnotations + * @return this + */ + public PropertyModelBuilder writeAnnotations(final List writeAnnotations) { + this.writeAnnotations = writeAnnotations; + return this; + } + + /** + * Property is writable. + * + * @return true if can be deserialized from BSON + */ + public boolean isWritable() { + return writeName != null; + } + + /** + * Property is readable. + * + * @return true if can be serialized to BSON + */ + public boolean isReadable() { + return readName != null; + } + + /** + * @return true or false if a discriminator should be used when serializing or null if not set + */ + public Boolean isDiscriminatorEnabled() { + return discriminatorEnabled; + } + + /** + * Enables or disables the use of a discriminator when serializing + * + * @param discriminatorEnabled the useDiscriminator value + * @return this + */ + public PropertyModelBuilder discriminatorEnabled(final boolean discriminatorEnabled) { + this.discriminatorEnabled = discriminatorEnabled; + return this; + } + + /** + * Returns the {@link PropertyAccessor} + * + * @return the PropertyAccessor + */ + public PropertyAccessor getPropertyAccessor() { + return propertyAccessor; + } + + /** + * Sets the {@link PropertyAccessor} + * + * @param propertyAccessor the PropertyAccessor + * @return this + */ + public PropertyModelBuilder propertyAccessor(final PropertyAccessor propertyAccessor) { + this.propertyAccessor = propertyAccessor; + return this; + } + + /** + * Returns the BsonRepresentation + * + * @return the BsonRepresentation + * @since 4.2 + */ + public BsonType getBsonRepresentation() { + return bsonRepresentation; + } + + /** + * Sets the BsonRepresentation + * + * @param bsonRepresentation the BsonRepresentation + * @return this + * @since 4.2 + */ + public PropertyModelBuilder bsonRepresentation(final BsonType bsonRepresentation) { + this.bsonRepresentation = bsonRepresentation; + return this; + } + + /** + * Creates the {@link PropertyModel}. + * + * @return the PropertyModel + */ + @SuppressWarnings({"rawtypes", "unchecked"}) + public PropertyModel build() { + if (!isReadable() && !isWritable()) { + throw new IllegalStateException(format("Invalid PropertyModel '%s', neither readable or writable,", name)); + } + return new PropertyModel( + stateNotNull("propertyName", name), + readName, + writeName, + stateNotNull("typeData", typeData), + codec, + stateNotNull("propertySerialization", propertySerialization), + discriminatorEnabled, + stateNotNull("propertyAccessor", propertyAccessor), + error, + bsonRepresentation); + } + + @Override + public String toString() { + return format("PropertyModelBuilder{propertyName=%s, typeData=%s}", name, typeData); + } + + PropertyModelBuilder propertyName(final String propertyName) { + this.name = notNull("propertyName", propertyName); + return this; + } + + TypeData getTypeData() { + return typeData; + } + + PropertyModelBuilder typeData(final TypeData typeData) { + this.typeData = notNull("typeData", typeData); + return this; + } + + PropertyModelBuilder setError(final String error) { + this.error = error; + return this; + } +} diff --git a/bson/src/main/org/bson/codecs/pojo/PropertyModelSerializationImpl.java b/bson/src/main/org/bson/codecs/pojo/PropertyModelSerializationImpl.java new file mode 100644 index 00000000000..41f44b4a570 --- /dev/null +++ b/bson/src/main/org/bson/codecs/pojo/PropertyModelSerializationImpl.java @@ -0,0 +1,28 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo; + +class PropertyModelSerializationImpl implements PropertySerialization { + + PropertyModelSerializationImpl() { + } + + @Override + public boolean shouldSerialize(final T value) { + return value != null; + } +} diff --git a/bson/src/main/org/bson/codecs/pojo/PropertyModelSerializationInlineImpl.java b/bson/src/main/org/bson/codecs/pojo/PropertyModelSerializationInlineImpl.java new file mode 100644 index 00000000000..77d064af16f --- /dev/null +++ b/bson/src/main/org/bson/codecs/pojo/PropertyModelSerializationInlineImpl.java @@ -0,0 +1,36 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo; + +class PropertyModelSerializationInlineImpl implements PropertySerialization { + + private final PropertySerialization wrapped; + + PropertyModelSerializationInlineImpl(final PropertySerialization wrapped) { + this.wrapped = wrapped; + } + + @Override + public boolean shouldSerialize(final T value) { + return wrapped.shouldSerialize(value); + } + + @Override + public boolean inline() { + return true; + } +} diff --git a/bson/src/main/org/bson/codecs/pojo/PropertyReflectionUtils.java b/bson/src/main/org/bson/codecs/pojo/PropertyReflectionUtils.java new file mode 100644 index 00000000000..6889a6579f8 --- /dev/null +++ b/bson/src/main/org/bson/codecs/pojo/PropertyReflectionUtils.java @@ -0,0 +1,113 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo; + +import java.lang.reflect.Method; +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; + +import static java.lang.reflect.Modifier.isPublic; +import static java.lang.reflect.Modifier.isStatic; + +final class PropertyReflectionUtils { + private PropertyReflectionUtils() {} + + private static final String IS_PREFIX = "is"; + private static final String GET_PREFIX = "get"; + private static final String SET_PREFIX = "set"; + + static boolean isGetter(final Method method) { + if (method.getParameterCount() > 0) { + return false; + } else if (method.getName().startsWith(GET_PREFIX) && method.getName().length() > GET_PREFIX.length()) { + return Character.isUpperCase(method.getName().charAt(GET_PREFIX.length())); + } else if (method.getName().startsWith(IS_PREFIX) && method.getName().length() > IS_PREFIX.length()) { + return Character.isUpperCase(method.getName().charAt(IS_PREFIX.length())); + } + return false; + } + + static boolean isSetter(final Method method) { + if (method.getName().startsWith(SET_PREFIX) && method.getName().length() > SET_PREFIX.length() + && method.getParameterCount() == 1) { + return Character.isUpperCase(method.getName().charAt(SET_PREFIX.length())); + } + return false; + } + + static String toPropertyName(final Method method) { + String name = method.getName(); + String propertyName = name.substring(name.startsWith(IS_PREFIX) ? 2 : 3); + char[] chars = propertyName.toCharArray(); + chars[0] = Character.toLowerCase(chars[0]); + return new String(chars); + } + + static PropertyMethods getPropertyMethods(final Class clazz) { + List setters = new ArrayList<>(); + List getters = new ArrayList<>(); + + // get all the default method from interface + for (Class i : clazz.getInterfaces()) { + for (Method method : i.getDeclaredMethods()) { + if (method.isDefault()) { + verifyAddMethodToList(method, getters, setters); + } + } + } + + for (Method method : clazz.getDeclaredMethods()) { + verifyAddMethodToList(method, getters, setters); + } + + return new PropertyMethods(getters, setters); + } + + private static void verifyAddMethodToList(final Method method, final List getters, final List setters) { + // Note that if you override a getter to provide a more specific return type, getting the declared methods + // on the subclass will return the overridden method as well as the method that was overridden from + // the super class. This original method is copied over into the subclass as a bridge method, so we're + // excluding them here to avoid multiple getters of the same property with different return types + if (isPublic(method.getModifiers()) && !isStatic(method.getModifiers()) && !method.isBridge()) { + if (isGetter(method)) { + getters.add(method); + } else if (isSetter(method)) { + // Setters are a bit more tricky - don't do anything fancy here + setters.add(method); + } + } + } + + static class PropertyMethods { + private final Collection getterMethods; + private final Collection setterMethods; + + PropertyMethods(final Collection getterMethods, final Collection setterMethods) { + this.getterMethods = getterMethods; + this.setterMethods = setterMethods; + } + + Collection getGetterMethods() { + return getterMethods; + } + + Collection getSetterMethods() { + return setterMethods; + } + } +} diff --git a/bson/src/main/org/bson/codecs/pojo/PropertySerialization.java b/bson/src/main/org/bson/codecs/pojo/PropertySerialization.java new file mode 100644 index 00000000000..471be733c59 --- /dev/null +++ b/bson/src/main/org/bson/codecs/pojo/PropertySerialization.java @@ -0,0 +1,42 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo; + +/** + * An interface allowing a {@link PropertyModel} to determine if a value should be serialized. + * + * @param the type of the property. + * @since 3.5 + */ +public interface PropertySerialization { + + /** + * Determines if a value should be serialized + * + * @param value the value to check + * @return true if the value should be serialized + */ + boolean shouldSerialize(T value); + + /** + * @return true if serialized inline + * @since 4.6 + */ + default boolean inline() { + return false; + } +} diff --git a/bson/src/main/org/bson/codecs/pojo/TypeData.java b/bson/src/main/org/bson/codecs/pojo/TypeData.java new file mode 100644 index 00000000000..aebdba4c08f --- /dev/null +++ b/bson/src/main/org/bson/codecs/pojo/TypeData.java @@ -0,0 +1,242 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo; + +import java.lang.reflect.Field; +import java.lang.reflect.Method; +import java.lang.reflect.ParameterizedType; +import java.lang.reflect.Type; +import java.lang.reflect.TypeVariable; +import java.lang.reflect.WildcardType; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import static java.lang.String.format; +import static org.bson.assertions.Assertions.notNull; +import static org.bson.codecs.pojo.PropertyReflectionUtils.isGetter; + + +final class TypeData implements TypeWithTypeParameters { + private final Class type; + private final List> typeParameters; + + /** + * Creates a new builder for ClassTypeData + * + * @param type the class for the type + * @param the type + * @return the builder + */ + public static Builder builder(final Class type) { + return new Builder<>(notNull("type", type)); + } + + public static TypeData newInstance(final Method method) { + if (isGetter(method)) { + return newInstance(method.getGenericReturnType(), method.getReturnType()); + } else { + return newInstance(method.getGenericParameterTypes()[0], method.getParameterTypes()[0]); + } + } + + public static TypeData newInstance(final Field field) { + return newInstance(field.getGenericType(), field.getType()); + } + + public static TypeData newInstance(final Type genericType, final Class clazz) { + TypeData.Builder builder = TypeData.builder(clazz); + if (genericType instanceof ParameterizedType) { + ParameterizedType pType = (ParameterizedType) genericType; + for (Type argType : pType.getActualTypeArguments()) { + getNestedTypeData(builder, argType); + } + } + return builder.build(); + } + + @SuppressWarnings({"unchecked", "rawtypes"}) + private static void getNestedTypeData(final TypeData.Builder builder, final Type type) { + if (type instanceof ParameterizedType) { + ParameterizedType pType = (ParameterizedType) type; + TypeData.Builder paramBuilder = TypeData.builder((Class) pType.getRawType()); + for (Type argType : pType.getActualTypeArguments()) { + getNestedTypeData(paramBuilder, argType); + } + builder.addTypeParameter(paramBuilder.build()); + } else if (type instanceof WildcardType) { + builder.addTypeParameter(TypeData.builder((Class) ((WildcardType) type).getUpperBounds()[0]).build()); + } else if (type instanceof TypeVariable) { + builder.addTypeParameter(TypeData.builder(Object.class).build()); + } else if (type instanceof Class) { + builder.addTypeParameter(TypeData.builder((Class) type).build()); + } + } + + /** + * @return the class this {@code ClassTypeData} represents + */ + @Override + public Class getType() { + return type; + } + + /** + * @return the type parameters for the class + */ + @Override + public List> getTypeParameters() { + return typeParameters; + } + + /** + * A builder for TypeData + * + * @param the main type + */ + public static final class Builder { + private final Class type; + private final List> typeParameters = new ArrayList<>(); + + private Builder(final Class type) { + this.type = type; + } + + /** + * Adds a type parameter + * + * @param typeParameter the type parameter + * @param the type of the type parameter + * @return this + */ + public Builder addTypeParameter(final TypeData typeParameter) { + typeParameters.add(notNull("typeParameter", typeParameter)); + return this; + } + + /** + * Adds multiple type parameters + * + * @param typeParameters the type parameters + * @return this + */ + public Builder addTypeParameters(final List> typeParameters) { + notNull("typeParameters", typeParameters); + for (TypeData typeParameter : typeParameters) { + addTypeParameter(typeParameter); + } + return this; + } + + /** + * @return the class type data + */ + public TypeData build() { + return new TypeData<>(type, Collections.unmodifiableList(typeParameters)); + } + } + + @Override + public String toString() { + String typeParams = typeParameters.isEmpty() ? "" + : ", typeParameters=[" + nestedTypeParameters(typeParameters) + "]"; + return "TypeData{" + + "type=" + type.getSimpleName() + + typeParams + + "}"; + } + + private static String nestedTypeParameters(final List> typeParameters) { + StringBuilder builder = new StringBuilder(); + int count = 0; + int last = typeParameters.size(); + for (TypeData typeParameter : typeParameters) { + count++; + builder.append(typeParameter.getType().getSimpleName()); + if (!typeParameter.getTypeParameters().isEmpty()) { + builder.append(format("<%s>", nestedTypeParameters(typeParameter.getTypeParameters()))); + } + if (count < last) { + builder.append(", "); + } + } + return builder.toString(); + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (!(o instanceof TypeData)) { + return false; + } + + TypeData that = (TypeData) o; + + if (!getType().equals(that.getType())) { + return false; + } + if (!getTypeParameters().equals(that.getTypeParameters())) { + return false; + } + + return true; + } + + @Override + public int hashCode() { + int result = getType().hashCode(); + result = 31 * result + getTypeParameters().hashCode(); + return result; + } + + private TypeData(final Class type, final List> typeParameters) { + this.type = boxType(type); + this.typeParameters = typeParameters; + } + + boolean isAssignableFrom(final Class cls) { + return type.isAssignableFrom(boxType(cls)); + } + + @SuppressWarnings("unchecked") + private Class boxType(final Class clazz) { + if (clazz.isPrimitive()) { + return (Class) PRIMITIVE_CLASS_MAP.get(clazz); + } else { + return clazz; + } + } + + private static final Map, Class> PRIMITIVE_CLASS_MAP; + static { + Map, Class> map = new HashMap<>(); + map.put(boolean.class, Boolean.class); + map.put(byte.class, Byte.class); + map.put(char.class, Character.class); + map.put(double.class, Double.class); + map.put(float.class, Float.class); + map.put(int.class, Integer.class); + map.put(long.class, Long.class); + map.put(short.class, Short.class); + map.put(void.class, Void.class); + PRIMITIVE_CLASS_MAP = map; + } +} diff --git a/bson/src/main/org/bson/codecs/pojo/TypeParameterMap.java b/bson/src/main/org/bson/codecs/pojo/TypeParameterMap.java new file mode 100644 index 00000000000..7da10b83c81 --- /dev/null +++ b/bson/src/main/org/bson/codecs/pojo/TypeParameterMap.java @@ -0,0 +1,144 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo; + +import java.util.HashMap; +import java.util.Map; + +import static java.util.Collections.unmodifiableMap; + + +/** + * Maps the index of a class's generic parameter type index to a property's. + */ +final class TypeParameterMap { + private final Map> propertyToClassParamIndexMap; + + /** + * Creates a new builder for the TypeParameterMap + * + * @return the builder + */ + static Builder builder() { + return new Builder(); + } + + /** + * Returns a mapping of property type parameter index to the class type parameter index. + * + *

Note: A property index of -1, means the class's parameter type represents the whole property

+ * + * @return a mapping of property type parameter index to the class type parameter index. + */ + Map> getPropertyToClassParamIndexMap() { + return propertyToClassParamIndexMap; + } + + boolean hasTypeParameters() { + return !propertyToClassParamIndexMap.isEmpty(); + } + + /** + * A builder for mapping field type parameter indices to the class type parameter indices + */ + static final class Builder { + private final Map> propertyToClassParamIndexMap = new HashMap<>(); + + private Builder() { + } + + /** + * Adds the type parameter index for a class that represents the whole property + * + * @param classTypeParameterIndex the class's type parameter index that represents the whole field + * @return this + */ + Builder addIndex(final int classTypeParameterIndex) { + propertyToClassParamIndexMap.put(-1, Either.left(classTypeParameterIndex)); + return this; + } + + /** + * Adds a mapping that represents the property + * + * @param propertyTypeParameterIndex the property's type parameter index + * @param classTypeParameterIndex the class's type parameter index + * @return this + */ + Builder addIndex(final int propertyTypeParameterIndex, final int classTypeParameterIndex) { + propertyToClassParamIndexMap.put(propertyTypeParameterIndex, Either.left(classTypeParameterIndex)); + return this; + } + + + /** + * Adds a mapping that represents the property + * + * @param propertyTypeParameterIndex the property's type parameter index + * @param typeParameterMap the sub class's type parameter map + * @return this + */ + Builder addIndex(final int propertyTypeParameterIndex, final TypeParameterMap typeParameterMap) { + propertyToClassParamIndexMap.put(propertyTypeParameterIndex, Either.right(typeParameterMap)); + return this; + } + + /** + * @return the TypeParameterMap + */ + TypeParameterMap build() { + if (propertyToClassParamIndexMap.size() > 1 && propertyToClassParamIndexMap.containsKey(-1)) { + throw new IllegalStateException("You cannot have a generic field that also has type parameters."); + } + return new TypeParameterMap(propertyToClassParamIndexMap); + } + } + + @Override + public String toString() { + return "TypeParameterMap{" + + "fieldToClassParamIndexMap=" + propertyToClassParamIndexMap + + "}"; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + TypeParameterMap that = (TypeParameterMap) o; + + if (!getPropertyToClassParamIndexMap().equals(that.getPropertyToClassParamIndexMap())) { + return false; + } + + return true; + } + + @Override + public int hashCode() { + return getPropertyToClassParamIndexMap().hashCode(); + } + + private TypeParameterMap(final Map> propertyToClassParamIndexMap) { + this.propertyToClassParamIndexMap = unmodifiableMap(propertyToClassParamIndexMap); + } +} diff --git a/bson/src/main/org/bson/codecs/pojo/TypeWithTypeParameters.java b/bson/src/main/org/bson/codecs/pojo/TypeWithTypeParameters.java new file mode 100644 index 00000000000..4d8ae778613 --- /dev/null +++ b/bson/src/main/org/bson/codecs/pojo/TypeWithTypeParameters.java @@ -0,0 +1,36 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.bson.codecs.pojo; + +import java.util.List; + +/** + * A combination of a type and its type parameters. + * + * @param the type which potentially has parameterized types + * @since 3.6 + */ +public interface TypeWithTypeParameters { + /** + * @return the class this {@code TypeWithTypeParameters} represents + */ + Class getType(); + + /** + * @return the type parameters for {@link #getType()} + */ + List> getTypeParameters(); +} diff --git a/bson/src/main/org/bson/codecs/pojo/annotations/BsonCreator.java b/bson/src/main/org/bson/codecs/pojo/annotations/BsonCreator.java new file mode 100644 index 00000000000..6f1627a17d5 --- /dev/null +++ b/bson/src/main/org/bson/codecs/pojo/annotations/BsonCreator.java @@ -0,0 +1,36 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo.annotations; + +import java.lang.annotation.ElementType; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.annotation.Target; + +/** + * An annotation that configures a constructor or method as the creator for the POJO. + * + *

For POJOs, requires the {@link org.bson.codecs.pojo.Conventions#ANNOTATION_CONVENTION}

+ *

For Java records, the annotation is not supported.

+ * + * @since 3.5 + * @see org.bson.codecs.pojo.Conventions#ANNOTATION_CONVENTION + */ +@Retention(RetentionPolicy.RUNTIME) +@Target({ElementType.METHOD, ElementType.CONSTRUCTOR}) +public @interface BsonCreator { +} diff --git a/bson/src/main/org/bson/codecs/pojo/annotations/BsonDiscriminator.java b/bson/src/main/org/bson/codecs/pojo/annotations/BsonDiscriminator.java new file mode 100644 index 00000000000..81e3c972771 --- /dev/null +++ b/bson/src/main/org/bson/codecs/pojo/annotations/BsonDiscriminator.java @@ -0,0 +1,50 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo.annotations; + +import java.lang.annotation.Documented; +import java.lang.annotation.ElementType; +import java.lang.annotation.Inherited; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.annotation.Target; + +/** + * An annotation that configures the discriminator key and value for a class. + * + *

For POJOs, requires the {@link org.bson.codecs.pojo.Conventions#ANNOTATION_CONVENTION}

+ *

For Java records, the annotation is not supported.

+ * + * @since 3.5 + * @see org.bson.codecs.pojo.Conventions#ANNOTATION_CONVENTION + */ +@Inherited +@Documented +@Retention(RetentionPolicy.RUNTIME) +@Target(ElementType.TYPE) +public @interface BsonDiscriminator { + + /** + * @return the discriminator value to use for this type. + */ + String value() default ""; + + /** + * @return the discriminator key to use for this type. + */ + String key() default "_t"; +} diff --git a/bson/src/main/org/bson/codecs/pojo/annotations/BsonExtraElements.java b/bson/src/main/org/bson/codecs/pojo/annotations/BsonExtraElements.java new file mode 100644 index 00000000000..1ae25e5da3d --- /dev/null +++ b/bson/src/main/org/bson/codecs/pojo/annotations/BsonExtraElements.java @@ -0,0 +1,42 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo.annotations; + +import java.lang.annotation.Documented; +import java.lang.annotation.ElementType; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.annotation.Target; + +/** + * An annotation that configures a property to be used as storage for any extra BSON elements that are not already mapped to other + * properties. All extra elements will be encoded from the BSON document into the annotated property, and encoded from the annotated + * property into the BSON document. + * + *

Can only be used on a single field in a POJO. Field must be a {@code Map} instance eg. {@code Document} or + * {@code BsonDocument}. + *

For POJOs, requires the {@link org.bson.codecs.pojo.Conventions#ANNOTATION_CONVENTION}

+ *

For Java records, the annotation is not yet supported.

+ * + * @since 4.7 + * @see org.bson.codecs.pojo.Conventions#ANNOTATION_CONVENTION + */ +@Documented +@Retention(RetentionPolicy.RUNTIME) +@Target({ElementType.METHOD, ElementType.FIELD, ElementType.PARAMETER}) +public @interface BsonExtraElements { +} diff --git a/bson/src/main/org/bson/codecs/pojo/annotations/BsonId.java b/bson/src/main/org/bson/codecs/pojo/annotations/BsonId.java new file mode 100644 index 00000000000..25049e69e5a --- /dev/null +++ b/bson/src/main/org/bson/codecs/pojo/annotations/BsonId.java @@ -0,0 +1,38 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo.annotations; + +import java.lang.annotation.Documented; +import java.lang.annotation.ElementType; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.annotation.Target; + +/** + * An annotation that configures the property as the id property for a {@link org.bson.codecs.pojo.ClassModel} or a Java record. + * + *

For POJOs, requires the {@link org.bson.codecs.pojo.Conventions#ANNOTATION_CONVENTION}

+ *

For Java records, the annotation is only supported on the record component.

+ * + * @since 3.5 + * @see org.bson.codecs.pojo.Conventions#ANNOTATION_CONVENTION + */ +@Documented +@Retention(RetentionPolicy.RUNTIME) +@Target({ElementType.FIELD, ElementType.METHOD, ElementType.PARAMETER}) +public @interface BsonId { +} diff --git a/bson/src/main/org/bson/codecs/pojo/annotations/BsonIgnore.java b/bson/src/main/org/bson/codecs/pojo/annotations/BsonIgnore.java new file mode 100644 index 00000000000..96b91051995 --- /dev/null +++ b/bson/src/main/org/bson/codecs/pojo/annotations/BsonIgnore.java @@ -0,0 +1,38 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo.annotations; + +import java.lang.annotation.Documented; +import java.lang.annotation.ElementType; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.annotation.Target; + +/** + * An annotation that configures a property to be ignored when reading and writing to BSON + * + *

For POJOs, requires the {@link org.bson.codecs.pojo.Conventions#ANNOTATION_CONVENTION}

+ *

For Java records, the annotation is not supported.

+ * + * @since 3.5 + * @see org.bson.codecs.pojo.Conventions#ANNOTATION_CONVENTION + */ +@Documented +@Target({ElementType.METHOD, ElementType.FIELD, ElementType.PARAMETER}) +@Retention(RetentionPolicy.RUNTIME) +public @interface BsonIgnore { +} diff --git a/bson/src/main/org/bson/codecs/pojo/annotations/BsonProperty.java b/bson/src/main/org/bson/codecs/pojo/annotations/BsonProperty.java new file mode 100644 index 00000000000..7c9c7b9c22c --- /dev/null +++ b/bson/src/main/org/bson/codecs/pojo/annotations/BsonProperty.java @@ -0,0 +1,60 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo.annotations; + +import org.bson.codecs.pojo.PropertyModel; + +import java.lang.annotation.Documented; +import java.lang.annotation.ElementType; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.annotation.Target; + +/** + * An annotation that configures a property. + * + *

For POJOs, requires the {@link org.bson.codecs.pojo.Conventions#ANNOTATION_CONVENTION}

+ *

For Java records, the annotation is only supported on the record component.

+ * + * @since 3.5 + * @see org.bson.codecs.pojo.Conventions#ANNOTATION_CONVENTION + */ +@Documented +@Target({ElementType.METHOD, ElementType.FIELD, ElementType.PARAMETER}) +@Retention(RetentionPolicy.RUNTIME) +public @interface BsonProperty { + /** + * The name of the property. + * + *

+ * Note: Regarding POJOs:
+ * For asymmetrical property names, the context of the {@code BsonProperty} can be important. + * For example, when used with {@code @BsonCreator} the value will relate to the read name. + * When used directly on a field it will set both the read name if unset and the write name if unset. + *

+ * + * @return the name to use for the property + * @see PropertyModel#getWriteName() + * @see PropertyModel#getReadName() + */ + String value() default ""; + + /** + * @return whether to include a discriminator when serializing nested Pojos. + */ + boolean useDiscriminator() default false; +} diff --git a/bson/src/main/org/bson/codecs/pojo/annotations/BsonRepresentation.java b/bson/src/main/org/bson/codecs/pojo/annotations/BsonRepresentation.java new file mode 100644 index 00000000000..465e64d016f --- /dev/null +++ b/bson/src/main/org/bson/codecs/pojo/annotations/BsonRepresentation.java @@ -0,0 +1,47 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo.annotations; + +import org.bson.BsonType; + +import java.lang.annotation.Documented; +import java.lang.annotation.ElementType; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.annotation.Target; + +/** + * An annotation that specifies what type the property is stored as in the database. + * + *

For POJOs, requires the {@link org.bson.codecs.pojo.Conventions#ANNOTATION_CONVENTION}

+ *

For Java records, the annotation is only supported on the record component.

+ *

For Kotlin data classes, the annotation is only supported on the constructor parameter.

+ * + * @since 4.2 + * @see org.bson.codecs.pojo.Conventions#ANNOTATION_CONVENTION + */ +@Documented +@Retention(RetentionPolicy.RUNTIME) +@Target({ElementType.FIELD, ElementType.METHOD, ElementType.PARAMETER}) +public @interface BsonRepresentation { + /** + * The type that the property is stored as in the database. + * + * @return the type that the property should be stored as. + */ + BsonType value(); +} diff --git a/bson/src/main/org/bson/codecs/pojo/annotations/package-info.java b/bson/src/main/org/bson/codecs/pojo/annotations/package-info.java new file mode 100644 index 00000000000..0a5f54f8046 --- /dev/null +++ b/bson/src/main/org/bson/codecs/pojo/annotations/package-info.java @@ -0,0 +1,21 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * This package defines various annotations used by the driver provided when used in conjunction with the + * {@link org.bson.codecs.pojo.Conventions#ANNOTATION_CONVENTION}. + */ +package org.bson.codecs.pojo.annotations; diff --git a/bson/src/main/org/bson/codecs/pojo/package-info.java b/bson/src/main/org/bson/codecs/pojo/package-info.java new file mode 100644 index 00000000000..a4a0f6d54ec --- /dev/null +++ b/bson/src/main/org/bson/codecs/pojo/package-info.java @@ -0,0 +1,20 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * This package contains classes specific to mapping user POJOs. + */ +package org.bson.codecs.pojo; diff --git a/bson/src/main/org/bson/conversions/Bson.java b/bson/src/main/org/bson/conversions/Bson.java new file mode 100644 index 00000000000..6ef749b7b3c --- /dev/null +++ b/bson/src/main/org/bson/conversions/Bson.java @@ -0,0 +1,99 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.conversions; + +import org.bson.BsonDocument; +import org.bson.codecs.BsonCodecProvider; +import org.bson.codecs.BsonValueCodecProvider; +import org.bson.codecs.CollectionCodecProvider; +import org.bson.codecs.DocumentCodecProvider; +import org.bson.codecs.EnumCodecProvider; +import org.bson.codecs.IterableCodecProvider; +import org.bson.codecs.JsonObjectCodecProvider; +import org.bson.codecs.MapCodecProvider; +import org.bson.codecs.ValueCodecProvider; +import org.bson.codecs.configuration.CodecRegistry; +import org.bson.codecs.jsr310.Jsr310CodecProvider; + +import static java.util.Arrays.asList; +import static org.bson.codecs.configuration.CodecRegistries.fromProviders; + +/** + * An interface for types that are able to render themselves into a {@code BsonDocument}. + * + * @since 3.0 + */ +public interface Bson { + /** + * This registry includes the following providers: + *
    + *
  • {@link ValueCodecProvider}
  • + *
  • {@link BsonValueCodecProvider}
  • + *
  • {@link DocumentCodecProvider}
  • + *
  • {@link CollectionCodecProvider}
  • + *
  • {@link IterableCodecProvider}
  • + *
  • {@link MapCodecProvider}
  • + *
  • {@link Jsr310CodecProvider}
  • + *
  • {@link JsonObjectCodecProvider}
  • + *
  • {@link BsonCodecProvider}
  • + *
  • {@link EnumCodecProvider}
  • + *
+ *

+ * Additional providers may be added in a future release. + *

+ * + * @since 4.2 + */ + CodecRegistry DEFAULT_CODEC_REGISTRY = + fromProviders(asList( + new ValueCodecProvider(), + new BsonValueCodecProvider(), + new DocumentCodecProvider(), + new CollectionCodecProvider(), + new IterableCodecProvider(), + new MapCodecProvider(), + new Jsr310CodecProvider(), + new JsonObjectCodecProvider(), + new BsonCodecProvider(), + new EnumCodecProvider())); + + /** + * Render into a BsonDocument. + * + * @param documentClass the document class in scope for the collection. This parameter may be ignored, but it may be used to alter + * the structure of the returned {@code BsonDocument} based on some knowledge of the document class. + * @param codecRegistry the codec registry. This parameter may be ignored, but it may be used to look up {@code Codec} instances for + * the document class or any other related class. + * @param the type of the document class + * @return the BsonDocument + */ + BsonDocument toBsonDocument(Class documentClass, CodecRegistry codecRegistry); + + /** + * Render into a BsonDocument using a document class and codec registry appropriate for the implementation. + *

+ * The default implementation of this method calls {@link #toBsonDocument(Class, CodecRegistry)} with the + * {@link BsonDocument} class as the first argument and {@link #DEFAULT_CODEC_REGISTRY} as the second argument. + *

+ * + * @return the BsonDocument + * @since 4.2 + */ + default BsonDocument toBsonDocument() { + return toBsonDocument(BsonDocument.class, DEFAULT_CODEC_REGISTRY); + } +} diff --git a/bson/src/main/org/bson/conversions/package-info.java b/bson/src/main/org/bson/conversions/package-info.java new file mode 100644 index 00000000000..dc8e5866fcd --- /dev/null +++ b/bson/src/main/org/bson/conversions/package-info.java @@ -0,0 +1,20 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * This package contains the Bson interface + */ +package org.bson.conversions; diff --git a/bson/src/main/org/bson/diagnostics/Logger.java b/bson/src/main/org/bson/diagnostics/Logger.java new file mode 100644 index 00000000000..8916dd2f258 --- /dev/null +++ b/bson/src/main/org/bson/diagnostics/Logger.java @@ -0,0 +1,164 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.diagnostics; + +/** + * This class is not part of the public API. It may be removed or changed at any time. + * + */ +public interface Logger { + /** + * Return the name of this Logger instance. + * + * @return name of this logger instance + */ + String getName(); + + /** + * Is the logger instance enabled for the TRACE level? + * + * @return True if this Logger is enabled for the TRACE level, false otherwise. + * @since 1.4 + */ + default boolean isTraceEnabled() { + return false; + } + + /** + * Log a message at the TRACE level. + * + * @param msg the message string to be logged + * @since 1.4 + */ + default void trace(String msg) { + } + + /** + * Log an exception (throwable) at the TRACE level with an accompanying message. + * + * @param msg the message accompanying the exception + * @param t the exception (throwable) to log + * @since 1.4 + */ + default void trace(String msg, Throwable t) { + } + + /** + * Is the logger instance enabled for the DEBUG level? + * + * @return True if this Logger is enabled for the DEBUG level, false otherwise. + */ + default boolean isDebugEnabled() { + return false; + } + + /** + * Log a message at the DEBUG level. + * + * @param msg the message string to be logged + */ + default void debug(String msg) { + } + + + /** + * Log an exception (throwable) at the DEBUG level with an accompanying message. + * + * @param msg the message accompanying the exception + * @param t the exception (throwable) to log + */ + default void debug(String msg, Throwable t) { + } + + /** + * Is the logger instance enabled for the INFO level? + * + * @return True if this Logger is enabled for the INFO level, false otherwise. + */ + default boolean isInfoEnabled() { + return false; + } + + /** + * Log a message at the INFO level. + * + * @param msg the message string to be logged + */ + default void info(String msg) { + } + + /** + * Log an exception (throwable) at the INFO level with an accompanying message. + * + * @param msg the message accompanying the exception + * @param t the exception (throwable) to log + */ + default void info(String msg, Throwable t) { + } + + /** + * Is the logger instance enabled for the WARN level? + * + * @return True if this Logger is enabled for the WARN level, false otherwise. + */ + default boolean isWarnEnabled() { + return false; + } + + /** + * Log a message at the WARN level. + * + * @param msg the message string to be logged + */ + default void warn(String msg) { + } + + /** + * Log an exception (throwable) at the WARN level with an accompanying message. + * + * @param msg the message accompanying the exception + * @param t the exception (throwable) to log + */ + default void warn(String msg, Throwable t) { + } + + /** + * Is the logger instance enabled for the ERROR level? + * + * @return True if this Logger is enabled for the ERROR level, false otherwise. + */ + default boolean isErrorEnabled() { + return false; + } + + /** + * Log a message at the ERROR level. + * + * @param msg the message string to be logged + */ + default void error(String msg) { + } + + /** + * Log an exception (throwable) at the ERROR level with an accompanying message. + * + * @param msg the message accompanying the exception + * @param t the exception (throwable) to log + */ + default void error(String msg, Throwable t) { + } +} diff --git a/bson/src/main/org/bson/diagnostics/Loggers.java b/bson/src/main/org/bson/diagnostics/Loggers.java new file mode 100644 index 00000000000..972a45c3773 --- /dev/null +++ b/bson/src/main/org/bson/diagnostics/Loggers.java @@ -0,0 +1,68 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.diagnostics; + +import static org.bson.assertions.Assertions.notNull; + +/** + * This class is not part of the public API. + * + * @since 3.0 + */ +public final class Loggers { + /** + * The prefix for all logger names. + */ + private static final String PREFIX = "org.bson"; + + private static final boolean USE_SLF4J = shouldUseSLF4J(); + + /** + * Gets a logger with the given suffix appended on to {@code PREFIX}, separated by a '.'. + * + * @param suffix the suffix for the logger + * @return the logger + * @see Loggers#PREFIX + */ + public static Logger getLogger(final String suffix) { + notNull("suffix", suffix); + if (suffix.startsWith(".") || suffix.endsWith(".")) { + throw new IllegalArgumentException("The suffix can not start or end with a '.'"); + } + + String name = PREFIX + "." + suffix; + + if (USE_SLF4J) { + return new SLF4JLogger(name); + } else { + return new NoOpLogger(name); + } + } + + private static boolean shouldUseSLF4J() { + try { + Class.forName("org.slf4j.Logger"); + return true; + } catch (ClassNotFoundException e) { + java.util.logging.Logger.getLogger("org.bson") + .warning(String.format("SLF4J not found on the classpath. Logging is disabled for the '%s' component", PREFIX)); + return false; + } + } + private Loggers() { + } +} diff --git a/bson/src/main/org/bson/diagnostics/NoOpLogger.java b/bson/src/main/org/bson/diagnostics/NoOpLogger.java new file mode 100644 index 00000000000..ec31831be7e --- /dev/null +++ b/bson/src/main/org/bson/diagnostics/NoOpLogger.java @@ -0,0 +1,33 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.diagnostics; + +/** + * A logger that disables all levels and logs nothing + */ +class NoOpLogger implements Logger { + private final String name; + + NoOpLogger(final String name) { + this.name = name; + } + + @Override + public String getName() { + return name; + } +} diff --git a/bson/src/main/org/bson/diagnostics/SLF4JLogger.java b/bson/src/main/org/bson/diagnostics/SLF4JLogger.java new file mode 100644 index 00000000000..8a63aa43373 --- /dev/null +++ b/bson/src/main/org/bson/diagnostics/SLF4JLogger.java @@ -0,0 +1,108 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.diagnostics; + +import org.slf4j.LoggerFactory; + +class SLF4JLogger implements Logger { + + private final org.slf4j.Logger delegate; + + SLF4JLogger(final String name) { + this.delegate = LoggerFactory.getLogger(name); + } + + @Override + public String getName() { + return delegate.getName(); + } + + @Override + public boolean isTraceEnabled() { + return delegate.isTraceEnabled(); + } + + @Override + public void trace(final String msg) { + delegate.trace(msg); + } + + @Override + public void trace(final String msg, final Throwable t) { + delegate.trace(msg, t); + } + + @Override + public boolean isDebugEnabled() { + return delegate.isDebugEnabled(); + } + + @Override + public void debug(final String msg) { + delegate.debug(msg); + } + + @Override + public void debug(final String msg, final Throwable t) { + delegate.debug(msg, t); + } + + @Override + public boolean isInfoEnabled() { + return delegate.isInfoEnabled(); + } + + @Override + public void info(final String msg) { + delegate.info(msg); + } + + @Override + public void info(final String msg, final Throwable t) { + delegate.info(msg, t); + } + + @Override + public boolean isWarnEnabled() { + return delegate.isWarnEnabled(); + } + + @Override + public void warn(final String msg) { + delegate.warn(msg); + } + + @Override + public void warn(final String msg, final Throwable t) { + delegate.warn(msg, t); + } + + @Override + public boolean isErrorEnabled() { + return delegate.isErrorEnabled(); + } + + @Override + public void error(final String msg) { + delegate.error(msg); + } + + @Override + public void error(final String msg, final Throwable t) { + delegate.error(msg, t); + } +} diff --git a/bson/src/main/org/bson/diagnostics/package-info.java b/bson/src/main/org/bson/diagnostics/package-info.java new file mode 100644 index 00000000000..746722d277c --- /dev/null +++ b/bson/src/main/org/bson/diagnostics/package-info.java @@ -0,0 +1,20 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * Contains the classes for visibility of the BSON layer, for example Logging. + */ +package org.bson.diagnostics; diff --git a/bson/src/main/org/bson/internal/BsonUtil.java b/bson/src/main/org/bson/internal/BsonUtil.java new file mode 100644 index 00000000000..6879c4c0e12 --- /dev/null +++ b/bson/src/main/org/bson/internal/BsonUtil.java @@ -0,0 +1,65 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.bson.internal; + +import org.bson.BsonArray; +import org.bson.BsonBinary; +import org.bson.BsonDocument; +import org.bson.BsonJavaScriptWithScope; +import org.bson.BsonValue; + +/** + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public final class BsonUtil { + public static BsonDocument mutableDeepCopy(final BsonDocument original) { + BsonDocument copy = new BsonDocument(original.size()); + original.forEach((key, value) -> copy.put(key, mutableDeepCopy(value))); + return copy; + } + + private static BsonArray mutableDeepCopy(final BsonArray original) { + BsonArray copy = new BsonArray(original.size()); + original.forEach(element -> copy.add(mutableDeepCopy(element))); + return copy; + } + + private static BsonBinary mutableDeepCopy(final BsonBinary original) { + return new BsonBinary(original.getType(), original.getData().clone()); + } + + private static BsonJavaScriptWithScope mutableDeepCopy(final BsonJavaScriptWithScope original) { + return new BsonJavaScriptWithScope(original.getCode(), mutableDeepCopy(original.getScope())); + } + + private static BsonValue mutableDeepCopy(final BsonValue original) { + switch (original.getBsonType()) { + case DOCUMENT: + return mutableDeepCopy(original.asDocument()); + case ARRAY: + return mutableDeepCopy(original.asArray()); + case BINARY: + return mutableDeepCopy(original.asBinary()); + case JAVASCRIPT_WITH_SCOPE: + return mutableDeepCopy(original.asJavaScriptWithScope()); + default: + return original; + } + } + + private BsonUtil() { + } +} diff --git a/bson/src/main/org/bson/internal/ChildCodecRegistry.java b/bson/src/main/org/bson/internal/ChildCodecRegistry.java new file mode 100644 index 00000000000..73bb46630de --- /dev/null +++ b/bson/src/main/org/bson/internal/ChildCodecRegistry.java @@ -0,0 +1,139 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.internal; + + +import org.bson.codecs.Codec; +import org.bson.codecs.configuration.CodecRegistry; + +import java.lang.reflect.Type; +import java.util.Collections; +import java.util.List; +import java.util.Objects; +import java.util.Optional; + +import static java.lang.String.format; +import static org.bson.assertions.Assertions.isTrueArgument; +import static org.bson.assertions.Assertions.notNull; + +// An implementation of CodecRegistry that is used to detect cyclic dependencies between Codecs +class ChildCodecRegistry implements CodecRegistry { + + private final ChildCodecRegistry parent; + private final CycleDetectingCodecRegistry registry; + private final Class codecClass; + private final List types; + + ChildCodecRegistry(final CycleDetectingCodecRegistry registry, final Class codecClass, final List types) { + this.codecClass = codecClass; + this.parent = null; + this.registry = registry; + this.types = types; + } + + private ChildCodecRegistry(final ChildCodecRegistry parent, final Class codecClass, final List types) { + this.parent = parent; + this.codecClass = codecClass; + this.registry = parent.registry; + this.types = types; + } + + public Class getCodecClass() { + return codecClass; + } + + public Optional> getTypes() { + return Optional.ofNullable(types); + } + + // Gets a Codec, but if it detects a cyclic dependency, return a LazyCodec which breaks the chain. + public Codec get(final Class clazz) { + if (hasCycles(clazz)) { + return new LazyCodec<>(registry, clazz, null); + } else { + return registry.get(new ChildCodecRegistry<>(this, clazz, null)); + } + } + + @Override + public Codec get(final Class clazz, final List typeArguments) { + notNull("typeArguments", typeArguments); + isTrueArgument(format("typeArguments size should equal the number of type parameters in class %s, but is %d", + clazz, typeArguments.size()), + clazz.getTypeParameters().length == typeArguments.size()); + if (hasCycles(clazz)) { + return new LazyCodec<>(registry, clazz, typeArguments); + } else { + return registry.get(new ChildCodecRegistry<>(this, clazz, typeArguments)); + } + } + + @Override + public Codec get(final Class clazz, final CodecRegistry registry) { + return get(clazz, Collections.emptyList(), registry); + } + + @Override + public Codec get(final Class clazz, final List typeArguments, final CodecRegistry registry) { + return this.registry.get(clazz, typeArguments, registry); + } + + private Boolean hasCycles(final Class theClass) { + ChildCodecRegistry current = this; + while (current != null) { + if (current.codecClass.equals(theClass)) { + return true; + } + + current = current.parent; + } + + return false; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + ChildCodecRegistry that = (ChildCodecRegistry) o; + + if (!codecClass.equals(that.codecClass)) { + return false; + } + if (!Objects.equals(parent, that.parent)) { + return false; + } + if (!registry.equals(that.registry)) { + return false; + } + + return true; + } + + @Override + public int hashCode() { + int result = parent != null ? parent.hashCode() : 0; + result = 31 * result + registry.hashCode(); + result = 31 * result + codecClass.hashCode(); + return result; + } +} diff --git a/bson/src/main/org/bson/internal/CodecCache.java b/bson/src/main/org/bson/internal/CodecCache.java new file mode 100644 index 00000000000..bec178559e3 --- /dev/null +++ b/bson/src/main/org/bson/internal/CodecCache.java @@ -0,0 +1,81 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.internal; + +import org.bson.codecs.Codec; + +import java.lang.reflect.Type; +import java.util.List; +import java.util.Objects; +import java.util.Optional; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentMap; + +import static org.bson.assertions.Assertions.assertNotNull; + +final class CodecCache { + + static final class CodecCacheKey { + private final Class clazz; + private final List types; + + CodecCacheKey(final Class clazz, final List types) { + this.clazz = clazz; + this.types = types; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + CodecCacheKey that = (CodecCacheKey) o; + return clazz.equals(that.clazz) && Objects.equals(types, that.types); + } + + @Override + public int hashCode() { + return Objects.hash(clazz, types); + } + + @Override + public String toString() { + return "CodecCacheKey{" + + "clazz=" + clazz + + ", types=" + types + + '}'; + } + } + + private final ConcurrentMap> codecCache = new ConcurrentHashMap<>(); + + public Codec putIfAbsent(final CodecCacheKey codecCacheKey, final Codec codec) { + assertNotNull(codec); + @SuppressWarnings("unchecked") + Codec prevCodec = (Codec) codecCache.putIfAbsent(codecCacheKey, codec); + return prevCodec == null ? codec : prevCodec; + } + + public Optional> get(final CodecCacheKey codecCacheKey) { + @SuppressWarnings("unchecked") + Codec codec = (Codec) codecCache.get(codecCacheKey); + return Optional.ofNullable(codec); + } +} diff --git a/bson/src/main/org/bson/internal/CycleDetectingCodecRegistry.java b/bson/src/main/org/bson/internal/CycleDetectingCodecRegistry.java new file mode 100644 index 00000000000..2aecba9f188 --- /dev/null +++ b/bson/src/main/org/bson/internal/CycleDetectingCodecRegistry.java @@ -0,0 +1,36 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.internal; + +import org.bson.codecs.Codec; +import org.bson.codecs.configuration.CodecRegistry; + +/** + * A marker interface for {@code CodecRegistry} implementations that are able to detect cycles. + * + * @since 3.12 + */ +interface CycleDetectingCodecRegistry extends CodecRegistry { + /** + * Get the Codec using the given context. + * + * @param context the child context + * @param the value type + * @return the Codec + */ + Codec get(ChildCodecRegistry context); +} diff --git a/bson/src/main/org/bson/internal/LazyCodec.java b/bson/src/main/org/bson/internal/LazyCodec.java new file mode 100644 index 00000000000..0e7f94e9441 --- /dev/null +++ b/bson/src/main/org/bson/internal/LazyCodec.java @@ -0,0 +1,67 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.internal; + +import org.bson.BsonReader; +import org.bson.BsonWriter; +import org.bson.codecs.Codec; +import org.bson.codecs.DecoderContext; +import org.bson.codecs.EncoderContext; +import org.bson.codecs.configuration.CodecRegistry; + +import java.lang.reflect.Type; +import java.util.List; + +class LazyCodec implements Codec { + private final CodecRegistry registry; + private final Class clazz; + private final List types; + private volatile Codec wrapped; + + LazyCodec(final CodecRegistry registry, final Class clazz, final List types) { + this.registry = registry; + this.clazz = clazz; + this.types = types; + } + + @Override + public void encode(final BsonWriter writer, final T value, final EncoderContext encoderContext) { + getWrapped().encode(writer, value, encoderContext); + } + + @Override + public Class getEncoderClass() { + return clazz; + } + + @Override + public T decode(final BsonReader reader, final DecoderContext decoderContext) { + return getWrapped().decode(reader, decoderContext); + } + + private Codec getWrapped() { + if (wrapped == null) { + if (types == null) { + wrapped = registry.get(clazz); + } else { + wrapped = registry.get(clazz, types); + } + } + + return wrapped; + } +} diff --git a/bson/src/main/org/bson/internal/NumberCodecHelper.java b/bson/src/main/org/bson/internal/NumberCodecHelper.java new file mode 100644 index 00000000000..faf63e56eb5 --- /dev/null +++ b/bson/src/main/org/bson/internal/NumberCodecHelper.java @@ -0,0 +1,161 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.internal; + +import org.bson.BsonInvalidOperationException; +import org.bson.BsonReader; +import org.bson.BsonType; +import org.bson.types.Decimal128; + +import java.math.BigDecimal; + +import static java.lang.String.format; + +/** + * This class is not part of the public API. It may be removed or changed at any time. + */ +public final class NumberCodecHelper { + + public static byte decodeByte(final BsonReader reader) { + int value = decodeInt(reader); + if (value < Byte.MIN_VALUE || value > Byte.MAX_VALUE) { + throw new BsonInvalidOperationException(format("%s can not be converted into a Byte.", value)); + } + return (byte) value; + } + + public static short decodeShort(final BsonReader reader) { + int value = decodeInt(reader); + if (value < Short.MIN_VALUE || value > Short.MAX_VALUE) { + throw new BsonInvalidOperationException(format("%s can not be converted into a Short.", value)); + } + return (short) value; + } + + public static int decodeInt(final BsonReader reader) { + int intValue; + BsonType bsonType = reader.getCurrentBsonType(); + switch (bsonType) { + case INT32: + intValue = reader.readInt32(); + break; + case INT64: + long longValue = reader.readInt64(); + intValue = (int) longValue; + if (longValue != (long) intValue) { + throw invalidConversion(Integer.class, longValue); + } + break; + case DOUBLE: + double doubleValue = reader.readDouble(); + intValue = (int) doubleValue; + if (doubleValue != (double) intValue) { + throw invalidConversion(Integer.class, doubleValue); + } + break; + case DECIMAL128: + Decimal128 decimal128 = reader.readDecimal128(); + intValue = decimal128.intValue(); + if (!decimal128.equals(new Decimal128(intValue))) { + throw invalidConversion(Integer.class, decimal128); + } + break; + default: + throw new BsonInvalidOperationException(format("Invalid numeric type, found: %s", bsonType)); + } + return intValue; + } + + public static long decodeLong(final BsonReader reader) { + long longValue; + BsonType bsonType = reader.getCurrentBsonType(); + switch (bsonType) { + case INT32: + longValue = reader.readInt32(); + break; + case INT64: + longValue = reader.readInt64(); + break; + case DOUBLE: + double doubleValue = reader.readDouble(); + longValue = (long) doubleValue; + if (doubleValue != (double) longValue) { + throw invalidConversion(Long.class, doubleValue); + } + break; + case DECIMAL128: + Decimal128 decimal128 = reader.readDecimal128(); + longValue = decimal128.longValue(); + if (!decimal128.equals(new Decimal128(longValue))) { + throw invalidConversion(Long.class, decimal128); + } + break; + default: + throw new BsonInvalidOperationException(format("Invalid numeric type, found: %s", bsonType)); + } + return longValue; + } + + public static float decodeFloat(final BsonReader reader) { + double value = decodeDouble(reader); + if (value < -Float.MAX_VALUE || value > Float.MAX_VALUE) { + throw new BsonInvalidOperationException(format("%s can not be converted into a Float.", value)); + } + return (float) value; + } + + public static double decodeDouble(final BsonReader reader) { + double doubleValue; + BsonType bsonType = reader.getCurrentBsonType(); + switch (bsonType) { + case INT32: + doubleValue = reader.readInt32(); + break; + case INT64: + long longValue = reader.readInt64(); + doubleValue = longValue; + if (longValue != (long) doubleValue) { + throw invalidConversion(Double.class, longValue); + } + break; + case DOUBLE: + doubleValue = reader.readDouble(); + break; + case DECIMAL128: + Decimal128 decimal128 = reader.readDecimal128(); + try { + doubleValue = decimal128.doubleValue(); + if (!decimal128.equals(new Decimal128(new BigDecimal(doubleValue)))) { + throw invalidConversion(Double.class, decimal128); + } + } catch (NumberFormatException e) { + throw invalidConversion(Double.class, decimal128); + } + break; + default: + throw new BsonInvalidOperationException(format("Invalid numeric type, found: %s", bsonType)); + } + return doubleValue; + } + + private static BsonInvalidOperationException invalidConversion(final Class clazz, final Number value) { + return new BsonInvalidOperationException(format("Could not convert `%s` to a %s without losing precision", value, clazz)); + } + + private NumberCodecHelper() { + } +} diff --git a/bson/src/main/org/bson/internal/ProvidersCodecRegistry.java b/bson/src/main/org/bson/internal/ProvidersCodecRegistry.java new file mode 100644 index 00000000000..ddb3c44355d --- /dev/null +++ b/bson/src/main/org/bson/internal/ProvidersCodecRegistry.java @@ -0,0 +1,122 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.internal; + +import org.bson.codecs.Codec; +import org.bson.codecs.configuration.CodecConfigurationException; +import org.bson.codecs.configuration.CodecProvider; +import org.bson.codecs.configuration.CodecRegistry; +import org.bson.internal.CodecCache.CodecCacheKey; + +import java.lang.reflect.Type; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; + +import static java.lang.String.format; +import static java.util.Collections.emptyList; +import static org.bson.assertions.Assertions.isTrueArgument; +import static org.bson.assertions.Assertions.notNull; + +/** + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public final class ProvidersCodecRegistry implements CycleDetectingCodecRegistry { + private final List codecProviders; + private final CodecCache codecCache = new CodecCache(); + + public ProvidersCodecRegistry(final List codecProviders) { + isTrueArgument("codecProviders must not be null or empty", codecProviders != null && codecProviders.size() > 0); + this.codecProviders = new ArrayList<>(codecProviders); + } + + @Override + public Codec get(final Class clazz) { + return get(new ChildCodecRegistry<>(this, clazz, null)); + } + + @Override + public Codec get(final Class clazz, final List typeArguments) { + notNull("typeArguments", typeArguments); + isTrueArgument(format("typeArguments size should equal the number of type parameters in class %s, but is %d", + clazz, typeArguments.size()), + clazz.getTypeParameters().length == typeArguments.size()); + return get(new ChildCodecRegistry<>(this, clazz, typeArguments)); + } + + @Override + public Codec get(final Class clazz, final CodecRegistry registry) { + return get(clazz, Collections.emptyList(), registry); + } + + @Override + public Codec get(final Class clazz, final List typeArguments, final CodecRegistry registry) { + for (CodecProvider provider : codecProviders) { + Codec codec = provider.get(clazz, typeArguments, registry); + if (codec != null) { + return codec; + } + } + return null; + } + + public Codec get(final ChildCodecRegistry context) { + CodecCacheKey codecCacheKey = new CodecCacheKey(context.getCodecClass(), context.getTypes().orElse(null)); + return codecCache.get(codecCacheKey).orElseGet(() -> { + for (CodecProvider provider : codecProviders) { + Codec codec = provider.get(context.getCodecClass(), context.getTypes().orElse(emptyList()), context); + if (codec != null) { + return codecCache.putIfAbsent(codecCacheKey, codec); + } + } + throw new CodecConfigurationException(format("Can't find a codec for %s.", codecCacheKey)); + }); + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + ProvidersCodecRegistry that = (ProvidersCodecRegistry) o; + if (codecProviders.size() != that.codecProviders.size()) { + return false; + } + for (int i = 0; i < codecProviders.size(); i++) { + if (codecProviders.get(i).getClass() != that.codecProviders.get(i).getClass()) { + return false; + } + } + return true; + } + + @Override + public int hashCode() { + return codecProviders.hashCode(); + } + + @Override + public String toString() { + return "ProvidersCodecRegistry{" + + "codecProviders=" + codecProviders + + '}'; + } +} diff --git a/bson/src/main/org/bson/internal/StringCodecHelper.java b/bson/src/main/org/bson/internal/StringCodecHelper.java new file mode 100644 index 00000000000..04225aad939 --- /dev/null +++ b/bson/src/main/org/bson/internal/StringCodecHelper.java @@ -0,0 +1,46 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.internal; + +import org.bson.BsonInvalidOperationException; +import org.bson.BsonReader; +import org.bson.BsonType; + +import static java.lang.String.format; + +/** + * This class is not part of the public API. It may be removed or changed at any time. + */ +public final class StringCodecHelper { + + private StringCodecHelper(){ + //NOP + } + + public static char decodeChar(final BsonReader reader) { + BsonType currentBsonType = reader.getCurrentBsonType(); + if (currentBsonType != BsonType.STRING) { + throw new BsonInvalidOperationException(format("Invalid string type, found: %s", currentBsonType)); + } + String string = reader.readString(); + if (string.length() != 1) { + throw new BsonInvalidOperationException(format("Attempting to decode the string '%s' to a character, but its length is not " + + "equal to one", string)); + } + return string.charAt(0); + } +} diff --git a/bson/src/main/org/bson/internal/UuidHelper.java b/bson/src/main/org/bson/internal/UuidHelper.java new file mode 100644 index 00000000000..9c46614b56e --- /dev/null +++ b/bson/src/main/org/bson/internal/UuidHelper.java @@ -0,0 +1,135 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.internal; + +import org.bson.BSONException; +import org.bson.BsonBinarySubType; +import org.bson.BsonSerializationException; +import org.bson.UuidRepresentation; + +import java.util.Arrays; +import java.util.UUID; + +/** + * Utilities for encoding and decoding UUID into binary. + * + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public final class UuidHelper { + private static void writeLongToArrayBigEndian(final byte[] bytes, final int offset, final long x) { + bytes[offset + 7] = (byte) (0xFFL & (x)); + bytes[offset + 6] = (byte) (0xFFL & (x >> 8)); + bytes[offset + 5] = (byte) (0xFFL & (x >> 16)); + bytes[offset + 4] = (byte) (0xFFL & (x >> 24)); + bytes[offset + 3] = (byte) (0xFFL & (x >> 32)); + bytes[offset + 2] = (byte) (0xFFL & (x >> 40)); + bytes[offset + 1] = (byte) (0xFFL & (x >> 48)); + bytes[offset] = (byte) (0xFFL & (x >> 56)); + } + + private static long readLongFromArrayBigEndian(final byte[] bytes, final int offset) { + long x = 0; + x |= (0xFFL & bytes[offset + 7]); + x |= (0xFFL & bytes[offset + 6]) << 8; + x |= (0xFFL & bytes[offset + 5]) << 16; + x |= (0xFFL & bytes[offset + 4]) << 24; + x |= (0xFFL & bytes[offset + 3]) << 32; + x |= (0xFFL & bytes[offset + 2]) << 40; + x |= (0xFFL & bytes[offset + 1]) << 48; + x |= (0xFFL & bytes[offset]) << 56; + return x; + } + + // reverse elements in the subarray data[start:start+length] + private static void reverseByteArray(final byte[] data, final int start, final int length) { + for (int left = start, right = start + length - 1; left < right; left++, right--) { + // swap the values at the left and right indices + byte temp = data[left]; + data[left] = data[right]; + data[right] = temp; + } + } + + public static byte[] encodeUuidToBinary(final UUID uuid, final UuidRepresentation uuidRepresentation) { + byte[] binaryData = new byte[16]; + writeLongToArrayBigEndian(binaryData, 0, uuid.getMostSignificantBits()); + writeLongToArrayBigEndian(binaryData, 8, uuid.getLeastSignificantBits()); + switch(uuidRepresentation) { + case C_SHARP_LEGACY: + reverseByteArray(binaryData, 0, 4); + reverseByteArray(binaryData, 4, 2); + reverseByteArray(binaryData, 6, 2); + break; + case JAVA_LEGACY: + reverseByteArray(binaryData, 0, 8); + reverseByteArray(binaryData, 8, 8); + break; + case PYTHON_LEGACY: + case STANDARD: + break; + default: + throw new BSONException("Unexpected UUID representation: " + uuidRepresentation); + } + + return binaryData; + } + + // This method will NOT modify the contents of the byte array + public static UUID decodeBinaryToUuid(final byte[] data, final byte type, final UuidRepresentation uuidRepresentation) { + if (data.length != 16) { + throw new BsonSerializationException(String.format("Expected length to be 16, not %d.", data.length)); + } + + byte[] localData = data; + + if (type == BsonBinarySubType.UUID_LEGACY.getValue()) { + switch(uuidRepresentation) { + case C_SHARP_LEGACY: + localData = Arrays.copyOf(data, 16); + + reverseByteArray(localData, 0, 4); + reverseByteArray(localData, 4, 2); + reverseByteArray(localData, 6, 2); + break; + case JAVA_LEGACY: + localData = Arrays.copyOf(data, 16); + + reverseByteArray(localData, 0, 8); + reverseByteArray(localData, 8, 8); + break; + case PYTHON_LEGACY: + break; + case STANDARD: + throw new BSONException("Can not decode a subtype 3 (UUID legacy) BSON binary when the decoder is configured to use " + + "the standard UUID representation"); + default: + throw new BSONException("Unexpected UUID representation: " + uuidRepresentation); + } + } + + return new UUID(readLongFromArrayBigEndian(localData, 0), readLongFromArrayBigEndian(localData, 8)); + } + + public static boolean isLegacyUUID(final UuidRepresentation uuidRepresentation) { + return uuidRepresentation == UuidRepresentation.JAVA_LEGACY + || uuidRepresentation == UuidRepresentation.C_SHARP_LEGACY + || uuidRepresentation == UuidRepresentation.PYTHON_LEGACY; + } + + private UuidHelper() { + } +} diff --git a/bson/src/main/org/bson/internal/vector/BinaryVectorHelper.java b/bson/src/main/org/bson/internal/vector/BinaryVectorHelper.java new file mode 100644 index 00000000000..74d50d334fc --- /dev/null +++ b/bson/src/main/org/bson/internal/vector/BinaryVectorHelper.java @@ -0,0 +1,177 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.internal.vector; + +import org.bson.BsonBinary; +import org.bson.BsonInvalidOperationException; +import org.bson.Float32BinaryVector; +import org.bson.Int8BinaryVector; +import org.bson.PackedBitBinaryVector; +import org.bson.BinaryVector; +import org.bson.assertions.Assertions; +import org.bson.types.Binary; + +import java.nio.ByteBuffer; +import java.nio.ByteOrder; +import java.nio.FloatBuffer; + +/** + * Helper class for encoding and decoding vectors to and from {@link BsonBinary}/{@link Binary}. + * + *

+ * This class is not part of the public API and may be removed or changed at any time. + * + * @see BinaryVector + * @see BsonBinary#asVector() + * @see BsonBinary#BsonBinary(BinaryVector) + */ +public final class BinaryVectorHelper { + + private static final ByteOrder STORED_BYTE_ORDER = ByteOrder.LITTLE_ENDIAN; + private static final String ERROR_MESSAGE_UNKNOWN_VECTOR_DATA_TYPE = "Unknown vector data type: "; + private static final byte ZERO_PADDING = 0; + + private BinaryVectorHelper() { + //NOP + } + + private static final int METADATA_SIZE = 2; + + public static byte[] encodeVectorToBinary(final BinaryVector vector) { + BinaryVector.DataType dataType = vector.getDataType(); + switch (dataType) { + case INT8: + return encodeVector(dataType.getValue(), ZERO_PADDING, vector.asInt8Vector().getData()); + case PACKED_BIT: + PackedBitBinaryVector packedBitVector = vector.asPackedBitVector(); + return encodeVector(dataType.getValue(), packedBitVector.getPadding(), packedBitVector.getData()); + case FLOAT32: + return encodeVector(dataType.getValue(), vector.asFloat32Vector().getData()); + default: + throw Assertions.fail(ERROR_MESSAGE_UNKNOWN_VECTOR_DATA_TYPE + dataType); + } + } + + /** + * Decodes a vector from a binary representation. + *

+ * The {@link #flush()} method of {@link OutputBuffer} does nothing.

+ */ + @Override + public void flush() throws IOException { + super.flush(); + } + + @Override + public void write(final byte[] bytes, final int offset, final int length) { + writeBytes(bytes, offset, length); + } + + @Override + public void writeBytes(final byte[] bytes) { + writeBytes(bytes, 0, bytes.length); + } + + @Override + public void writeInt32(final int value) { + write(value >> 0); + write(value >> 8); + write(value >> 16); + write(value >> 24); + } + + @Override + @Deprecated + public void writeInt32(final int position, final int value) { + write(position, value >> 0); + write(position + 1, value >> 8); + write(position + 2, value >> 16); + write(position + 3, value >> 24); + } + + @Override + public void writeInt64(final long value) { + write((byte) (0xFFL & (value >> 0))); + write((byte) (0xFFL & (value >> 8))); + write((byte) (0xFFL & (value >> 16))); + write((byte) (0xFFL & (value >> 24))); + write((byte) (0xFFL & (value >> 32))); + write((byte) (0xFFL & (value >> 40))); + write((byte) (0xFFL & (value >> 48))); + write((byte) (0xFFL & (value >> 56))); + } + + @Override + public void writeDouble(final double x) { + writeLong(Double.doubleToRawLongBits(x)); + } + + @Override + public void writeString(final String str) { + writeInt(0); // making space for size + int strLen = writeCharacters(str, false); + writeInt32(getPosition() - strLen - 4, strLen); + } + + @Override + public void writeCString(final String value) { + writeCharacters(value, true); + } + + @Override + public void writeObjectId(final ObjectId value) { + write(value.toByteArray()); + } + + /** + * Gets the output size in bytes. + * @return the size + */ + public int size() { + return getSize(); + } + + /** + * Pipe the contents of this output buffer into the given output stream + * + * @param out the stream to pipe to + * @return number of bytes written to the stream + * @throws java.io.IOException if the stream throws an exception + */ + public abstract int pipe(OutputStream out) throws IOException; + + /** + * Get a list of byte buffers that are prepared to be read from; in other words, whose position is 0 and whose limit is the number of + * bytes that should read.

Note that the byte buffers may be read-only.

+ * + * @return the non-null list of byte buffers, in LITTLE_ENDIAN order. The returned {@link ByteBuf}s must eventually be + * {@linkplain ByteBuf#release() released} explicitly, calling {@link OutputBuffer#close()} may be not enough to release them. + * The caller must not use the {@link ByteBuf}s after closing this {@link OutputBuffer}, + * though releasing them is allowed to be done after closing this {@link OutputBuffer}. + */ + public abstract List getByteBuffers(); + + @Override + public abstract void truncateToPosition(int newPosition); + + /** + * Gets a copy of the buffered bytes. + * + * @return the byte array + * @see org.bson.io.OutputBuffer#pipe(java.io.OutputStream) + */ + public byte[] toByteArray() { + try { + ByteArrayOutputStream bout = new ByteArrayOutputStream(size()); + pipe(bout); + return bout.toByteArray(); + } catch (IOException ioe) { + throw new RuntimeException("should be impossible", ioe); + } + } + + @Override + public void write(final int value) { + writeByte(value); + } + + /** + * Writes the given integer value to the buffer. + * + * @param value the value to write + * @see #writeInt32 + */ + public void writeInt(final int value) { + writeInt32(value); + } + + @Override + public String toString() { + return getClass().getName() + " size: " + size() + " pos: " + getPosition(); + } + + /** + * Write the specified byte at the specified position. + * + * @param position the position, which must be greater than equal to 0 and at least 4 less than the stream size + * @param value the value to write. The 24 high-order bits of the value are ignored. + */ + protected abstract void write(int position, int value); + + /** + * Writes the given long value to the buffer. + * + * @param value the value to write + * @see #writeInt64 + */ + public void writeLong(final long value) { + writeInt64(value); + } + + /** + * Writes the characters of a string to the buffer as UTF-8 bytes. + * + * @param str the string to write. + * @param checkForNullCharacters if true, check for and disallow null characters in the string. + * @return the total number of bytes written. + * @throws BsonSerializationException if checkForNullCharacters is true and the string contains a null character. + */ + protected int writeCharacters(final String str, final boolean checkForNullCharacters) { + int len = str.length(); + int total = 0; + + for (int i = 0; i < len;) { + int c = Character.codePointAt(str, i); + + if (checkForNullCharacters && c == 0x0) { + throw new BsonSerializationException(format("BSON cstring '%s' is not valid because it contains a null character " + + "at index %d", str, i)); + } + if (c < 0x80) { + write((byte) c); + total += 1; + } else if (c < 0x800) { + write((byte) (0xc0 + (c >> 6))); + write((byte) (0x80 + (c & 0x3f))); + total += 2; + } else if (c < 0x10000) { + write((byte) (0xe0 + (c >> 12))); + write((byte) (0x80 + ((c >> 6) & 0x3f))); + write((byte) (0x80 + (c & 0x3f))); + total += 3; + } else { + write((byte) (0xf0 + (c >> 18))); + write((byte) (0x80 + ((c >> 12) & 0x3f))); + write((byte) (0x80 + ((c >> 6) & 0x3f))); + write((byte) (0x80 + (c & 0x3f))); + total += 4; + } + + i += Character.charCount(c); + } + + write((byte) 0); + total++; + return total; + } +} diff --git a/bson/src/main/org/bson/io/package-info.java b/bson/src/main/org/bson/io/package-info.java new file mode 100644 index 00000000000..dca8d1a8df1 --- /dev/null +++ b/bson/src/main/org/bson/io/package-info.java @@ -0,0 +1,20 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * Contains classes implementing I/O operations used by BSON objects. + */ +package org.bson.io; diff --git a/bson/src/main/org/bson/json/Converter.java b/bson/src/main/org/bson/json/Converter.java new file mode 100644 index 00000000000..06342f109f9 --- /dev/null +++ b/bson/src/main/org/bson/json/Converter.java @@ -0,0 +1,33 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.json; + +/** + * A converter from a BSON value to JSON. + * + * @param the value type to convert + * @since 3.5 + */ +public interface Converter { + /** + * Convert the given value to JSON using the JSON writer. + * + * @param value the value, which may be null depending on the type + * @param writer the JSON writer + */ + void convert(T value, StrictJsonWriter writer); +} diff --git a/bson/src/main/org/bson/json/DateTimeFormatter.java b/bson/src/main/org/bson/json/DateTimeFormatter.java new file mode 100644 index 00000000000..9f060cf6c4e --- /dev/null +++ b/bson/src/main/org/bson/json/DateTimeFormatter.java @@ -0,0 +1,47 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.json; + +import java.time.Instant; +import java.time.LocalDate; +import java.time.ZoneId; +import java.time.ZoneOffset; +import java.time.ZonedDateTime; + +import static java.time.format.DateTimeFormatter.ISO_LOCAL_DATE; +import static java.time.format.DateTimeFormatter.ISO_OFFSET_DATE_TIME; + +final class DateTimeFormatter { + private static final int DATE_STRING_LENGTH = "1970-01-01".length(); + + static long parse(final String dateTimeString) { + // ISO_OFFSET_DATE_TIME will not parse date strings consisting of just year-month-day, so use ISO_LOCAL_DATE for those + if (dateTimeString.length() == DATE_STRING_LENGTH) { + return LocalDate.parse(dateTimeString, ISO_LOCAL_DATE).atStartOfDay().toInstant(ZoneOffset.UTC).toEpochMilli(); + } else { + return ISO_OFFSET_DATE_TIME.parse(dateTimeString, temporal -> Instant.from(temporal)).toEpochMilli(); + } + } + + static String format(final long dateTime) { + return ZonedDateTime.ofInstant(Instant.ofEpochMilli(dateTime), ZoneId.of("Z")).format(ISO_OFFSET_DATE_TIME); + } + + private DateTimeFormatter() { + } + +} diff --git a/bson/src/main/org/bson/json/ExtendedJsonBinaryConverter.java b/bson/src/main/org/bson/json/ExtendedJsonBinaryConverter.java new file mode 100644 index 00000000000..a779a4ef694 --- /dev/null +++ b/bson/src/main/org/bson/json/ExtendedJsonBinaryConverter.java @@ -0,0 +1,34 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.json; + +import org.bson.BsonBinary; + +import java.util.Base64; + +class ExtendedJsonBinaryConverter implements Converter { + + @Override + public void convert(final BsonBinary value, final StrictJsonWriter writer) { + writer.writeStartObject(); + writer.writeStartObject("$binary"); + writer.writeString("base64", Base64.getEncoder().encodeToString(value.getData())); + writer.writeString("subType", String.format("%02X", value.getType())); + writer.writeEndObject(); + writer.writeEndObject(); + } +} diff --git a/bson/src/main/org/bson/json/ExtendedJsonDateTimeConverter.java b/bson/src/main/org/bson/json/ExtendedJsonDateTimeConverter.java new file mode 100644 index 00000000000..05a0152e68d --- /dev/null +++ b/bson/src/main/org/bson/json/ExtendedJsonDateTimeConverter.java @@ -0,0 +1,29 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.json; + +class ExtendedJsonDateTimeConverter implements Converter { + @Override + public void convert(final Long value, final StrictJsonWriter writer) { + writer.writeStartObject(); + writer.writeStartObject("$date"); + writer.writeString("$numberLong", Long.toString(value)); + writer.writeEndObject(); + writer.writeEndObject(); + } + +} diff --git a/bson/src/main/org/bson/json/ExtendedJsonDecimal128Converter.java b/bson/src/main/org/bson/json/ExtendedJsonDecimal128Converter.java new file mode 100644 index 00000000000..f42d701d705 --- /dev/null +++ b/bson/src/main/org/bson/json/ExtendedJsonDecimal128Converter.java @@ -0,0 +1,29 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.json; + +import org.bson.types.Decimal128; + +class ExtendedJsonDecimal128Converter implements Converter { + @Override + public void convert(final Decimal128 value, final StrictJsonWriter writer) { + writer.writeStartObject(); + writer.writeName("$numberDecimal"); + writer.writeString(value.toString()); + writer.writeEndObject(); + } +} diff --git a/bson/src/main/org/bson/json/ExtendedJsonDoubleConverter.java b/bson/src/main/org/bson/json/ExtendedJsonDoubleConverter.java new file mode 100644 index 00000000000..1ad0db0ec1b --- /dev/null +++ b/bson/src/main/org/bson/json/ExtendedJsonDoubleConverter.java @@ -0,0 +1,28 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.json; + +class ExtendedJsonDoubleConverter implements Converter { + @Override + public void convert(final Double value, final StrictJsonWriter writer) { + writer.writeStartObject(); + writer.writeName("$numberDouble"); + writer.writeString(Double.toString(value)); + writer.writeEndObject(); + + } +} diff --git a/bson/src/main/org/bson/json/ExtendedJsonInt32Converter.java b/bson/src/main/org/bson/json/ExtendedJsonInt32Converter.java new file mode 100644 index 00000000000..64f30294fcd --- /dev/null +++ b/bson/src/main/org/bson/json/ExtendedJsonInt32Converter.java @@ -0,0 +1,27 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.json; + +class ExtendedJsonInt32Converter implements Converter { + @Override + public void convert(final Integer value, final StrictJsonWriter writer) { + writer.writeStartObject(); + writer.writeName("$numberInt"); + writer.writeString(Integer.toString(value)); + writer.writeEndObject(); + } +} diff --git a/bson/src/main/org/bson/json/ExtendedJsonInt64Converter.java b/bson/src/main/org/bson/json/ExtendedJsonInt64Converter.java new file mode 100644 index 00000000000..ce0c1b959a0 --- /dev/null +++ b/bson/src/main/org/bson/json/ExtendedJsonInt64Converter.java @@ -0,0 +1,27 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.json; + +class ExtendedJsonInt64Converter implements Converter { + @Override + public void convert(final Long value, final StrictJsonWriter writer) { + writer.writeStartObject(); + writer.writeName("$numberLong"); + writer.writeString(Long.toString(value)); + writer.writeEndObject(); + } +} diff --git a/bson/src/main/org/bson/json/ExtendedJsonMaxKeyConverter.java b/bson/src/main/org/bson/json/ExtendedJsonMaxKeyConverter.java new file mode 100644 index 00000000000..c5ce321b308 --- /dev/null +++ b/bson/src/main/org/bson/json/ExtendedJsonMaxKeyConverter.java @@ -0,0 +1,28 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.json; + +import org.bson.BsonMaxKey; + +class ExtendedJsonMaxKeyConverter implements Converter { + @Override + public void convert(final BsonMaxKey value, final StrictJsonWriter writer) { + writer.writeStartObject(); + writer.writeNumber("$maxKey", "1"); + writer.writeEndObject(); + } +} diff --git a/bson/src/main/org/bson/json/ExtendedJsonMinKeyConverter.java b/bson/src/main/org/bson/json/ExtendedJsonMinKeyConverter.java new file mode 100644 index 00000000000..f21f69309ed --- /dev/null +++ b/bson/src/main/org/bson/json/ExtendedJsonMinKeyConverter.java @@ -0,0 +1,28 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.json; + +import org.bson.BsonMinKey; + +class ExtendedJsonMinKeyConverter implements Converter { + @Override + public void convert(final BsonMinKey value, final StrictJsonWriter writer) { + writer.writeStartObject(); + writer.writeNumber("$minKey", "1"); + writer.writeEndObject(); + } +} diff --git a/bson/src/main/org/bson/json/ExtendedJsonObjectIdConverter.java b/bson/src/main/org/bson/json/ExtendedJsonObjectIdConverter.java new file mode 100644 index 00000000000..be0ed8af348 --- /dev/null +++ b/bson/src/main/org/bson/json/ExtendedJsonObjectIdConverter.java @@ -0,0 +1,29 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.json; + +import org.bson.types.ObjectId; + +class ExtendedJsonObjectIdConverter implements Converter { + + @Override + public void convert(final ObjectId value, final StrictJsonWriter writer) { + writer.writeStartObject(); + writer.writeString("$oid", value.toHexString()); + writer.writeEndObject(); + } +} diff --git a/bson/src/main/org/bson/json/ExtendedJsonRegularExpressionConverter.java b/bson/src/main/org/bson/json/ExtendedJsonRegularExpressionConverter.java new file mode 100644 index 00000000000..588f46a83e0 --- /dev/null +++ b/bson/src/main/org/bson/json/ExtendedJsonRegularExpressionConverter.java @@ -0,0 +1,31 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.json; + +import org.bson.BsonRegularExpression; + +class ExtendedJsonRegularExpressionConverter implements Converter { + @Override + public void convert(final BsonRegularExpression value, final StrictJsonWriter writer) { + writer.writeStartObject(); + writer.writeStartObject("$regularExpression"); + writer.writeString("pattern", value.getPattern()); + writer.writeString("options", value.getOptions()); + writer.writeEndObject(); + writer.writeEndObject(); + } +} diff --git a/bson/src/main/org/bson/json/ExtendedJsonTimestampConverter.java b/bson/src/main/org/bson/json/ExtendedJsonTimestampConverter.java new file mode 100644 index 00000000000..d664a60dc37 --- /dev/null +++ b/bson/src/main/org/bson/json/ExtendedJsonTimestampConverter.java @@ -0,0 +1,34 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.json; + +import org.bson.BsonTimestamp; + +import static java.lang.Integer.toUnsignedLong; +import static java.lang.Long.toUnsignedString; + +class ExtendedJsonTimestampConverter implements Converter { + @Override + public void convert(final BsonTimestamp value, final StrictJsonWriter writer) { + writer.writeStartObject(); + writer.writeStartObject("$timestamp"); + writer.writeNumber("t", toUnsignedString(toUnsignedLong(value.getTime()))); + writer.writeNumber("i", toUnsignedString(toUnsignedLong(value.getInc()))); + writer.writeEndObject(); + writer.writeEndObject(); + } +} diff --git a/bson/src/main/org/bson/json/ExtendedJsonUndefinedConverter.java b/bson/src/main/org/bson/json/ExtendedJsonUndefinedConverter.java new file mode 100644 index 00000000000..196e21e816a --- /dev/null +++ b/bson/src/main/org/bson/json/ExtendedJsonUndefinedConverter.java @@ -0,0 +1,28 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.json; + +import org.bson.BsonUndefined; + +class ExtendedJsonUndefinedConverter implements Converter { + @Override + public void convert(final BsonUndefined value, final StrictJsonWriter writer) { + writer.writeStartObject(); + writer.writeBoolean("$undefined", true); + writer.writeEndObject(); + } +} diff --git a/bson/src/main/org/bson/json/JsonBooleanConverter.java b/bson/src/main/org/bson/json/JsonBooleanConverter.java new file mode 100644 index 00000000000..9d97d7b7969 --- /dev/null +++ b/bson/src/main/org/bson/json/JsonBooleanConverter.java @@ -0,0 +1,24 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.json; + +class JsonBooleanConverter implements Converter { + @Override + public void convert(final Boolean value, final StrictJsonWriter writer) { + writer.writeBoolean(value); + } +} diff --git a/bson/src/main/org/bson/json/JsonBuffer.java b/bson/src/main/org/bson/json/JsonBuffer.java new file mode 100644 index 00000000000..2db6c116238 --- /dev/null +++ b/bson/src/main/org/bson/json/JsonBuffer.java @@ -0,0 +1,32 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.json; + +interface JsonBuffer { + + int getPosition(); + + int read(); + + void unread(int c); + + int mark(); + + void reset(int markPos); + + void discard(int markPos); +} diff --git a/bson/src/main/org/bson/json/JsonDoubleConverter.java b/bson/src/main/org/bson/json/JsonDoubleConverter.java new file mode 100644 index 00000000000..26b46ab89d5 --- /dev/null +++ b/bson/src/main/org/bson/json/JsonDoubleConverter.java @@ -0,0 +1,24 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.json; + +class JsonDoubleConverter implements Converter { + @Override + public void convert(final Double value, final StrictJsonWriter writer) { + writer.writeNumber(Double.toString(value)); + } +} diff --git a/bson/src/main/org/bson/json/JsonInt32Converter.java b/bson/src/main/org/bson/json/JsonInt32Converter.java new file mode 100644 index 00000000000..fc555e421f6 --- /dev/null +++ b/bson/src/main/org/bson/json/JsonInt32Converter.java @@ -0,0 +1,24 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.json; + +class JsonInt32Converter implements Converter { + @Override + public void convert(final Integer value, final StrictJsonWriter writer) { + writer.writeNumber(Integer.toString(value)); + } +} diff --git a/bson/src/main/org/bson/json/JsonJavaScriptConverter.java b/bson/src/main/org/bson/json/JsonJavaScriptConverter.java new file mode 100644 index 00000000000..437b8b09f95 --- /dev/null +++ b/bson/src/main/org/bson/json/JsonJavaScriptConverter.java @@ -0,0 +1,26 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.json; + +class JsonJavaScriptConverter implements Converter { + @Override + public void convert(final String value, final StrictJsonWriter writer) { + writer.writeStartObject(); + writer.writeString("$code", value); + writer.writeEndObject(); + } +} diff --git a/bson/src/main/org/bson/json/JsonMode.java b/bson/src/main/org/bson/json/JsonMode.java new file mode 100644 index 00000000000..7b5a99d6551 --- /dev/null +++ b/bson/src/main/org/bson/json/JsonMode.java @@ -0,0 +1,57 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.json; + +/** + * An enumeration of the supported output modes of {@code JSONWriter}. + * + * @see JsonWriter + * @since 3.0 + */ +public enum JsonMode { + + /** + * Strict mode representations of BSON types conform to the
JSON RFC spec. + * + * @deprecated The format generated with this mode is no longer considered standard for MongoDB tools. This value is not currently + * scheduled for removal. + */ + @Deprecated // NOT CURRENTLY INTENDED FOR REMOVAL + STRICT, + + /** + * While not formally documented, this output mode will attempt to produce output that corresponds to what the MongoDB shell actually + * produces when showing query results. + */ + SHELL, + + /** + * Standard extended JSON representation. + * + * @since 3.5 + * @see Extended JSON Specification + */ + EXTENDED, + + /** + * Standard relaxed extended JSON representation. + * + * @since 3.5 + * @see Extended JSON Specification + */ + RELAXED +} diff --git a/bson/src/main/org/bson/json/JsonNullConverter.java b/bson/src/main/org/bson/json/JsonNullConverter.java new file mode 100644 index 00000000000..b5208018c4e --- /dev/null +++ b/bson/src/main/org/bson/json/JsonNullConverter.java @@ -0,0 +1,26 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.json; + +import org.bson.BsonNull; + +class JsonNullConverter implements Converter { + @Override + public void convert(final BsonNull value, final StrictJsonWriter writer) { + writer.writeNull(); + } +} diff --git a/bson/src/main/org/bson/json/JsonObject.java b/bson/src/main/org/bson/json/JsonObject.java new file mode 100644 index 00000000000..5bb8b746f9f --- /dev/null +++ b/bson/src/main/org/bson/json/JsonObject.java @@ -0,0 +1,103 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.json; + +import org.bson.BsonDocument; +import org.bson.BsonDocumentWrapper; +import org.bson.codecs.configuration.CodecRegistry; +import org.bson.conversions.Bson; + +import static org.bson.assertions.Assertions.isTrueArgument; +import static org.bson.assertions.Assertions.notNull; + +/** + * A wrapper class that holds a JSON object string. This class makes decoding JSON efficient. + * Note that this class only holds valid JSON objects, not arrays or other values. + * + * @since 4.2 + */ +public class JsonObject implements Bson { + private final String json; + + /** + * Constructs a new instance with the given JSON object string. Clients must ensure + * they only pass in valid JSON objects to this constructor. The constructor does not + * perform full validation on construction, but an invalid JsonObject can cause errors + * when it is used later on. + * + * @param json the JSON object string + */ + public JsonObject(final String json) { + notNull("Json", json); + + boolean foundBrace = false; + for (int i = 0; i < json.length(); i++) { + char c = json.charAt(i); + if (c == '{') { + foundBrace = true; + break; + } + isTrueArgument("json is a valid JSON object", Character.isWhitespace(c)); + } + isTrueArgument("json is a valid JSON object", foundBrace); + + this.json = json; + } + + /** + * Gets the JSON object string + * + * @return the JSON object string + */ + public String getJson() { + return json; + } + + @Override + public BsonDocument toBsonDocument(final Class documentClass, final CodecRegistry registry) { + return new BsonDocumentWrapper<>(this, registry.get(JsonObject.class)); + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + + if (o == null || getClass() != o.getClass()) { + return false; + } + + JsonObject that = (JsonObject) o; + + if (!json.equals(that.getJson())) { + return false; + } + + return true; + } + + @Override + public int hashCode() { + return json.hashCode(); + } + + @Override + public String toString() { + return json; + } +} diff --git a/bson/src/main/org/bson/json/JsonParseException.java b/bson/src/main/org/bson/json/JsonParseException.java new file mode 100644 index 00000000000..93c2efd85fa --- /dev/null +++ b/bson/src/main/org/bson/json/JsonParseException.java @@ -0,0 +1,77 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.json; + + +import static java.lang.String.format; + +/** + * JSONParseException indicates some exception happened during JSON processing. + * + * @since 3.0 + */ +public class JsonParseException extends RuntimeException { + + + private static final long serialVersionUID = -6722022620020198727L; + + /** + * Constructs a new runtime exception with null as its detail message. + */ + public JsonParseException() { + } + + /** + * Constructs a new runtime exception with the specified detail message. + * + * @param s The detail message. + */ + public JsonParseException(final String s) { + super(s); + } + + + /** + * Constructs a new runtime exception with string formatted using specified pattern and arguments. + * + * @param pattern A {@link java.util.Formatter format string}. + * @param args the arguments to insert into the pattern String + */ + public JsonParseException(final String pattern, final Object... args) { + super(format(pattern, args)); + } + + /** + * Constructs a new runtime exception with the specified detail message and root cause. + * + * @param s The detail message + * @param t the throwable root cause + * @since 4.2 + */ + public JsonParseException(final String s, final Throwable t) { + super(s, t); + } + + /** + * Create a JSONParseException with the given {@link Throwable} cause. + * + * @param t the throwable root cause + */ + public JsonParseException(final Throwable t) { + super(t); + } +} diff --git a/bson/src/main/org/bson/json/JsonReader.java b/bson/src/main/org/bson/json/JsonReader.java new file mode 100644 index 00000000000..0884ebb7879 --- /dev/null +++ b/bson/src/main/org/bson/json/JsonReader.java @@ -0,0 +1,1440 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.json; + + +import org.bson.AbstractBsonReader; +import org.bson.BsonBinary; +import org.bson.BsonBinarySubType; +import org.bson.BsonContextType; +import org.bson.BsonDbPointer; +import org.bson.BsonInvalidOperationException; +import org.bson.BsonReaderMark; +import org.bson.BsonRegularExpression; +import org.bson.BsonTimestamp; +import org.bson.BsonType; +import org.bson.BsonUndefined; +import org.bson.types.Decimal128; +import org.bson.types.MaxKey; +import org.bson.types.MinKey; +import org.bson.types.ObjectId; + +import java.io.Reader; +import java.text.DateFormat; +import java.text.ParsePosition; +import java.text.SimpleDateFormat; +import java.time.format.DateTimeParseException; +import java.util.Base64; +import java.util.Calendar; +import java.util.Date; +import java.util.Locale; +import java.util.TimeZone; +import java.util.UUID; + +import static java.lang.String.format; + + +/** + * Reads a JSON in one of the following modes: + *
    + *
  • Strict mode that conforms to the JSON RFC specifications.
  • + *
  • JavaScript mode that that most JavaScript interpreters can process
  • + *
  • Shell mode that the mongo shell can process. + * This is also called "extended" JavaScript format.
  • + *
+ * For more information about this modes please see + * + * https://www.mongodb.com/docs/manual/reference/mongodb-extended-json/ + * + * + * @since 3.0 + */ +public class JsonReader extends AbstractBsonReader { + + private final JsonScanner scanner; + private JsonToken pushedToken; + private Object currentValue; + + /** + * Constructs a new instance with the given string positioned at a JSON object. + * + * @param json A string representation of a JSON object. + */ + public JsonReader(final String json) { + this(new JsonScanner(json)); + } + + /** + * Constructs a new instance with the given {@code Reader} positioned at a JSON object. + * + *

+ * The application is responsible for closing the {@code Reader}. + *

+ * + * @param reader A reader representation of a JSON object. + * @since 3.11 + */ + public JsonReader(final Reader reader) { + this(new JsonScanner(reader)); + } + + private JsonReader(final JsonScanner scanner) { + this.scanner = scanner; + setContext(new Context(null, BsonContextType.TOP_LEVEL)); + } + + @Override + protected BsonBinary doReadBinaryData() { + return (BsonBinary) currentValue; + } + + @Override + protected byte doPeekBinarySubType() { + return doReadBinaryData().getType(); + } + + @Override + protected int doPeekBinarySize() { + return doReadBinaryData().getData().length; + } + + @Override + protected boolean doReadBoolean() { + return (Boolean) currentValue; + } + + //CHECKSTYLE:OFF + @Override + public BsonType readBsonType() { + if (isClosed()) { + throw new IllegalStateException("This instance has been closed"); + } + if (getState() == State.INITIAL || getState() == State.DONE || getState() == State.SCOPE_DOCUMENT) { + // in JSON the top level value can be of any type so fall through + setState(State.TYPE); + } + if (getState() != State.TYPE) { + throwInvalidState("readBSONType", State.TYPE); + } + + if (getContext().getContextType() == BsonContextType.DOCUMENT) { + JsonToken nameToken = popToken(); + switch (nameToken.getType()) { + case STRING: + case UNQUOTED_STRING: + setCurrentName(nameToken.getValue(String.class)); + break; + case END_OBJECT: + setState(State.END_OF_DOCUMENT); + return BsonType.END_OF_DOCUMENT; + default: + throw new JsonParseException("JSON reader was expecting a name but found '%s'.", nameToken.getValue()); + } + + JsonToken colonToken = popToken(); + if (colonToken.getType() != JsonTokenType.COLON) { + throw new JsonParseException("JSON reader was expecting ':' but found '%s'.", colonToken.getValue()); + } + } + + JsonToken token = popToken(); + if (getContext().getContextType() == BsonContextType.ARRAY && token.getType() == JsonTokenType.END_ARRAY) { + setState(State.END_OF_ARRAY); + return BsonType.END_OF_DOCUMENT; + } + + boolean noValueFound = false; + switch (token.getType()) { + case BEGIN_ARRAY: + setCurrentBsonType(BsonType.ARRAY); + break; + case BEGIN_OBJECT: + visitExtendedJSON(); + break; + case DOUBLE: + setCurrentBsonType(BsonType.DOUBLE); + currentValue = token.getValue(); + break; + case END_OF_FILE: + setCurrentBsonType(BsonType.END_OF_DOCUMENT); + break; + case INT32: + setCurrentBsonType(BsonType.INT32); + currentValue = token.getValue(); + break; + case INT64: + setCurrentBsonType(BsonType.INT64); + currentValue = token.getValue(); + break; + case REGULAR_EXPRESSION: + setCurrentBsonType(BsonType.REGULAR_EXPRESSION); + currentValue = token.getValue(); + break; + case STRING: + setCurrentBsonType(BsonType.STRING); + currentValue = token.getValue(); + break; + case UNQUOTED_STRING: + String value = token.getValue(String.class); + + if ("false".equals(value) || "true".equals(value)) { + setCurrentBsonType(BsonType.BOOLEAN); + currentValue = Boolean.parseBoolean(value); + } else if ("Infinity".equals(value)) { + setCurrentBsonType(BsonType.DOUBLE); + currentValue = Double.POSITIVE_INFINITY; + } else if ("NaN".equals(value)) { + setCurrentBsonType(BsonType.DOUBLE); + currentValue = Double.NaN; + } else if ("null".equals(value)) { + setCurrentBsonType(BsonType.NULL); + } else if ("undefined".equals(value)) { + setCurrentBsonType(BsonType.UNDEFINED); + } else if ("MinKey".equals(value)) { + visitEmptyConstructor(); + setCurrentBsonType(BsonType.MIN_KEY); + currentValue = new MinKey(); + } else if ("MaxKey".equals(value)) { + visitEmptyConstructor(); + setCurrentBsonType(BsonType.MAX_KEY); + currentValue = new MaxKey(); + } else if ("BinData".equals(value)) { + setCurrentBsonType(BsonType.BINARY); + currentValue = visitBinDataConstructor(); + } else if ("Date".equals(value)) { + currentValue = visitDateTimeConstructorWithOutNew(); + setCurrentBsonType(BsonType.STRING); + } else if ("HexData".equals(value)) { + setCurrentBsonType(BsonType.BINARY); + currentValue = visitHexDataConstructor(); + } else if ("ISODate".equals(value)) { + setCurrentBsonType(BsonType.DATE_TIME); + currentValue = visitISODateTimeConstructor(); + } else if ("NumberInt".equals(value)) { + setCurrentBsonType(BsonType.INT32); + currentValue = visitNumberIntConstructor(); + } else if ("NumberLong".equals(value)) { + setCurrentBsonType(BsonType.INT64); + currentValue = visitNumberLongConstructor(); + } else if ("NumberDecimal".equals(value)) { + setCurrentBsonType(BsonType.DECIMAL128); + currentValue = visitNumberDecimalConstructor(); + } else if ("ObjectId".equals(value)) { + setCurrentBsonType(BsonType.OBJECT_ID); + currentValue = visitObjectIdConstructor(); + } else if ("Timestamp".equals(value)) { + setCurrentBsonType(BsonType.TIMESTAMP); + currentValue = visitTimestampConstructor(); + } else if ("RegExp".equals(value)) { + setCurrentBsonType(BsonType.REGULAR_EXPRESSION); + currentValue = visitRegularExpressionConstructor(); + } else if ("DBPointer".equals(value)) { + setCurrentBsonType(BsonType.DB_POINTER); + currentValue = visitDBPointerConstructor(); + } else if ("UUID".equals(value)) { + setCurrentBsonType(BsonType.BINARY); + currentValue = visitUUIDConstructor(); + } else if ("new".equals(value)) { + visitNew(); + } else { + noValueFound = true; + } + break; + default: + noValueFound = true; + break; + } + if (noValueFound) { + throw new JsonParseException("JSON reader was expecting a value but found '%s'.", token.getValue()); + } + + if (getContext().getContextType() == BsonContextType.ARRAY || getContext().getContextType() == BsonContextType.DOCUMENT) { + JsonToken commaToken = popToken(); + if (commaToken.getType() != JsonTokenType.COMMA) { + pushToken(commaToken); + } + } + + switch (getContext().getContextType()) { + case DOCUMENT: + case SCOPE_DOCUMENT: + default: + setState(State.NAME); + break; + case ARRAY: + case JAVASCRIPT_WITH_SCOPE: + case TOP_LEVEL: + setState(State.VALUE); + break; + } + return getCurrentBsonType(); + } + //CHECKSTYLE:ON + + @Override + public Decimal128 doReadDecimal128() { + return (Decimal128) currentValue; + } + + @Override + protected long doReadDateTime() { + return (Long) currentValue; + } + + @Override + protected double doReadDouble() { + return (Double) currentValue; + } + + @Override + protected void doReadEndArray() { + setContext(getContext().getParentContext()); + + if (getContext().getContextType() == BsonContextType.ARRAY || getContext().getContextType() == BsonContextType.DOCUMENT) { + JsonToken commaToken = popToken(); + if (commaToken.getType() != JsonTokenType.COMMA) { + pushToken(commaToken); + } + } + } + + @Override + protected void doReadEndDocument() { + setContext(getContext().getParentContext()); + if (getContext() != null && getContext().getContextType() == BsonContextType.SCOPE_DOCUMENT) { + setContext(getContext().getParentContext()); // JavaScriptWithScope + verifyToken(JsonTokenType.END_OBJECT); // outermost closing bracket for JavaScriptWithScope + } + + if (getContext() == null) { + throw new JsonParseException("Unexpected end of document."); + } + + if (getContext().getContextType() == BsonContextType.ARRAY || getContext().getContextType() == BsonContextType.DOCUMENT) { + JsonToken commaToken = popToken(); + if (commaToken.getType() != JsonTokenType.COMMA) { + pushToken(commaToken); + } + } + } + + @Override + protected int doReadInt32() { + return (Integer) currentValue; + } + + @Override + protected long doReadInt64() { + return (Long) currentValue; + } + + @Override + protected String doReadJavaScript() { + return (String) currentValue; + } + + @Override + protected String doReadJavaScriptWithScope() { + return (String) currentValue; + } + + @Override + protected void doReadMaxKey() { + } + + @Override + protected void doReadMinKey() { + } + + @Override + protected void doReadNull() { + } + + @Override + protected ObjectId doReadObjectId() { + return (ObjectId) currentValue; + } + + @Override + protected BsonRegularExpression doReadRegularExpression() { + return (BsonRegularExpression) currentValue; + } + + @Override + protected BsonDbPointer doReadDBPointer() { + return (BsonDbPointer) currentValue; + } + + @Override + protected void doReadStartArray() { + setContext(new Context(getContext(), BsonContextType.ARRAY)); + } + + @Override + protected void doReadStartDocument() { + setContext(new Context(getContext(), BsonContextType.DOCUMENT)); + } + + @Override + protected String doReadString() { + return (String) currentValue; + } + + @Override + protected String doReadSymbol() { + return (String) currentValue; + } + + @Override + protected BsonTimestamp doReadTimestamp() { + return (BsonTimestamp) currentValue; + } + + @Override + protected void doReadUndefined() { + } + + @Override + protected void doSkipName() { + } + + @Override + protected void doSkipValue() { + switch (getCurrentBsonType()) { + case ARRAY: + readStartArray(); + while (readBsonType() != BsonType.END_OF_DOCUMENT) { + skipValue(); + } + readEndArray(); + break; + case BINARY: + readBinaryData(); + break; + case BOOLEAN: + readBoolean(); + break; + case DATE_TIME: + readDateTime(); + break; + case DOCUMENT: + readStartDocument(); + while (readBsonType() != BsonType.END_OF_DOCUMENT) { + skipName(); + skipValue(); + } + readEndDocument(); + break; + case DOUBLE: + readDouble(); + break; + case INT32: + readInt32(); + break; + case INT64: + readInt64(); + break; + case DECIMAL128: + readDecimal128(); + break; + case JAVASCRIPT: + readJavaScript(); + break; + case JAVASCRIPT_WITH_SCOPE: + readJavaScriptWithScope(); + readStartDocument(); + while (readBsonType() != BsonType.END_OF_DOCUMENT) { + skipName(); + skipValue(); + } + readEndDocument(); + break; + case MAX_KEY: + readMaxKey(); + break; + case MIN_KEY: + readMinKey(); + break; + case NULL: + readNull(); + break; + case OBJECT_ID: + readObjectId(); + break; + case REGULAR_EXPRESSION: + readRegularExpression(); + break; + case STRING: + readString(); + break; + case SYMBOL: + readSymbol(); + break; + case TIMESTAMP: + readTimestamp(); + break; + case UNDEFINED: + readUndefined(); + break; + default: + } + } + + private JsonToken popToken() { + if (pushedToken != null) { + JsonToken token = pushedToken; + pushedToken = null; + return token; + } else { + return scanner.nextToken(); + } + } + + private void pushToken(final JsonToken token) { + if (pushedToken == null) { + pushedToken = token; + } else { + throw new BsonInvalidOperationException("There is already a pending token."); + } + } + + private void verifyToken(final JsonTokenType expectedType) { + JsonToken token = popToken(); + if (expectedType != token.getType()) { + throw new JsonParseException("JSON reader expected token type '%s' but found '%s'.", expectedType, token.getValue()); + } + } + + private void verifyToken(final JsonTokenType expectedType, final Object expectedValue) { + JsonToken token = popToken(); + if (expectedType != token.getType()) { + throw new JsonParseException("JSON reader expected token type '%s' but found '%s'.", expectedType, token.getValue()); + } + if (!expectedValue.equals(token.getValue())) { + throw new JsonParseException("JSON reader expected '%s' but found '%s'.", expectedValue, token.getValue()); + } + } + + private void verifyString(final String expected) { + if (expected == null) { + throw new IllegalArgumentException("Can't be null"); + } + + JsonToken token = popToken(); + JsonTokenType type = token.getType(); + + if ((type != JsonTokenType.STRING && type != JsonTokenType.UNQUOTED_STRING) || !expected.equals(token.getValue())) { + throw new JsonParseException("JSON reader expected '%s' but found '%s'.", expected, token.getValue()); + } + } + + private void visitNew() { + JsonToken typeToken = popToken(); + if (typeToken.getType() != JsonTokenType.UNQUOTED_STRING) { + throw new JsonParseException("JSON reader expected a type name but found '%s'.", typeToken.getValue()); + } + + String value = typeToken.getValue(String.class); + + if ("MinKey".equals(value)) { + visitEmptyConstructor(); + setCurrentBsonType(BsonType.MIN_KEY); + currentValue = new MinKey(); + } else if ("MaxKey".equals(value)) { + visitEmptyConstructor(); + setCurrentBsonType(BsonType.MAX_KEY); + currentValue = new MaxKey(); + } else if ("BinData".equals(value)) { + currentValue = visitBinDataConstructor(); + setCurrentBsonType(BsonType.BINARY); + } else if ("Date".equals(value)) { + currentValue = visitDateTimeConstructor(); + setCurrentBsonType(BsonType.DATE_TIME); + } else if ("HexData".equals(value)) { + currentValue = visitHexDataConstructor(); + setCurrentBsonType(BsonType.BINARY); + } else if ("ISODate".equals(value)) { + currentValue = visitISODateTimeConstructor(); + setCurrentBsonType(BsonType.DATE_TIME); + } else if ("NumberInt".equals(value)) { + currentValue = visitNumberIntConstructor(); + setCurrentBsonType(BsonType.INT32); + } else if ("NumberLong".equals(value)) { + currentValue = visitNumberLongConstructor(); + setCurrentBsonType(BsonType.INT64); + } else if ("NumberDecimal".equals(value)) { + currentValue = visitNumberDecimalConstructor(); + setCurrentBsonType(BsonType.DECIMAL128); + } else if ("ObjectId".equals(value)) { + currentValue = visitObjectIdConstructor(); + setCurrentBsonType(BsonType.OBJECT_ID); + } else if ("RegExp".equals(value)) { + currentValue = visitRegularExpressionConstructor(); + setCurrentBsonType(BsonType.REGULAR_EXPRESSION); + } else if ("DBPointer".equals(value)) { + currentValue = visitDBPointerConstructor(); + setCurrentBsonType(BsonType.DB_POINTER); + } else if ("UUID".equals(value)) { + currentValue = visitUUIDConstructor(); + setCurrentBsonType(BsonType.BINARY); + } else { + throw new JsonParseException("JSON reader expected a type name but found '%s'.", value); + } + } + + private void visitExtendedJSON() { + JsonToken nameToken = popToken(); + String value = nameToken.getValue(String.class); + JsonTokenType type = nameToken.getType(); + + if (type == JsonTokenType.STRING || type == JsonTokenType.UNQUOTED_STRING) { + + if ("$binary".equals(value) || "$type".equals(value)) { + currentValue = visitBinDataExtendedJson(value); + if (currentValue != null) { + setCurrentBsonType(BsonType.BINARY); + return; + } + } if ("$uuid".equals(value)) { + currentValue = visitUuidExtendedJson(); + setCurrentBsonType(BsonType.BINARY); + return; + } else if ("$regex".equals(value) || "$options".equals(value)) { + currentValue = visitRegularExpressionExtendedJson(value); + if (currentValue != null) { + setCurrentBsonType(BsonType.REGULAR_EXPRESSION); + return; + } + } else if ("$code".equals(value)) { + visitJavaScriptExtendedJson(); + return; + } else if ("$date".equals(value)) { + currentValue = visitDateTimeExtendedJson(); + setCurrentBsonType(BsonType.DATE_TIME); + return; + } else if ("$maxKey".equals(value)) { + currentValue = visitMaxKeyExtendedJson(); + setCurrentBsonType(BsonType.MAX_KEY); + return; + } else if ("$minKey".equals(value)) { + currentValue = visitMinKeyExtendedJson(); + setCurrentBsonType(BsonType.MIN_KEY); + return; + } else if ("$oid".equals(value)) { + currentValue = visitObjectIdExtendedJson(); + setCurrentBsonType(BsonType.OBJECT_ID); + return; + } else if ("$regularExpression".equals(value)) { + currentValue = visitNewRegularExpressionExtendedJson(); + setCurrentBsonType(BsonType.REGULAR_EXPRESSION); + return; + } else if ("$symbol".equals(value)) { + currentValue = visitSymbolExtendedJson(); + setCurrentBsonType(BsonType.SYMBOL); + return; + } else if ("$timestamp".equals(value)) { + currentValue = visitTimestampExtendedJson(); + setCurrentBsonType(BsonType.TIMESTAMP); + return; + } else if ("$undefined".equals(value)) { + currentValue = visitUndefinedExtendedJson(); + setCurrentBsonType(BsonType.UNDEFINED); + return; + } else if ("$numberLong".equals(value)) { + currentValue = visitNumberLongExtendedJson(); + setCurrentBsonType(BsonType.INT64); + return; + } else if ("$numberInt".equals(value)) { + currentValue = visitNumberIntExtendedJson(); + setCurrentBsonType(BsonType.INT32); + return; + } else if ("$numberDouble".equals(value)) { + currentValue = visitNumberDoubleExtendedJson(); + setCurrentBsonType(BsonType.DOUBLE); + return; + } else if ("$numberDecimal".equals(value)) { + currentValue = visitNumberDecimalExtendedJson(); + setCurrentBsonType(BsonType.DECIMAL128); + return; + } else if ("$dbPointer".equals(value)) { + currentValue = visitDbPointerExtendedJson(); + setCurrentBsonType(BsonType.DB_POINTER); + return; + } + } + + pushToken(nameToken); + setCurrentBsonType(BsonType.DOCUMENT); + } + + private void visitEmptyConstructor() { + JsonToken nextToken = popToken(); + if (nextToken.getType() == JsonTokenType.LEFT_PAREN) { + verifyToken(JsonTokenType.RIGHT_PAREN); + } else { + pushToken(nextToken); + } + } + + private BsonBinary visitBinDataConstructor() { + verifyToken(JsonTokenType.LEFT_PAREN); + JsonToken subTypeToken = popToken(); + if (subTypeToken.getType() != JsonTokenType.INT32) { + throw new JsonParseException("JSON reader expected a binary subtype but found '%s'.", subTypeToken.getValue()); + } + verifyToken(JsonTokenType.COMMA); + JsonToken bytesToken = popToken(); + if (bytesToken.getType() != JsonTokenType.UNQUOTED_STRING && bytesToken.getType() != JsonTokenType.STRING) { + throw new JsonParseException("JSON reader expected a string but found '%s'.", bytesToken.getValue()); + } + verifyToken(JsonTokenType.RIGHT_PAREN); + + byte[] bytes = Base64.getDecoder().decode(bytesToken.getValue(String.class)); + return new BsonBinary(subTypeToken.getValue(Integer.class).byteValue(), bytes); + } + + private BsonBinary visitUUIDConstructor() { + verifyToken(JsonTokenType.LEFT_PAREN); + String hexString = readStringFromExtendedJson().replace("-", ""); + verifyToken(JsonTokenType.RIGHT_PAREN); + return new BsonBinary(BsonBinarySubType.UUID_STANDARD, decodeHex(hexString)); + } + + private BsonRegularExpression visitRegularExpressionConstructor() { + verifyToken(JsonTokenType.LEFT_PAREN); + String pattern = readStringFromExtendedJson(); + String options = ""; + JsonToken commaToken = popToken(); + if (commaToken.getType() == JsonTokenType.COMMA) { + options = readStringFromExtendedJson(); + } else { + pushToken(commaToken); + } + verifyToken(JsonTokenType.RIGHT_PAREN); + return new BsonRegularExpression(pattern, options); + } + + private ObjectId visitObjectIdConstructor() { + verifyToken(JsonTokenType.LEFT_PAREN); + ObjectId objectId = new ObjectId(readStringFromExtendedJson()); + verifyToken(JsonTokenType.RIGHT_PAREN); + return objectId; + } + + private BsonTimestamp visitTimestampConstructor() { + verifyToken(JsonTokenType.LEFT_PAREN); + JsonToken timeToken = popToken(); + int time; + if (timeToken.getType() != JsonTokenType.INT32) { + throw new JsonParseException("JSON reader expected an integer but found '%s'.", timeToken.getValue()); + } else { + time = timeToken.getValue(Integer.class); + } + verifyToken(JsonTokenType.COMMA); + JsonToken incrementToken = popToken(); + int increment; + if (incrementToken.getType() != JsonTokenType.INT32) { + throw new JsonParseException("JSON reader expected an integer but found '%s'.", timeToken.getValue()); + } else { + increment = incrementToken.getValue(Integer.class); + } + + verifyToken(JsonTokenType.RIGHT_PAREN); + return new BsonTimestamp(time, increment); + } + + private BsonDbPointer visitDBPointerConstructor() { + verifyToken(JsonTokenType.LEFT_PAREN); + String namespace = readStringFromExtendedJson(); + verifyToken(JsonTokenType.COMMA); + ObjectId id = new ObjectId(readStringFromExtendedJson()); + verifyToken(JsonTokenType.RIGHT_PAREN); + return new BsonDbPointer(namespace, id); + } + + private int visitNumberIntConstructor() { + verifyToken(JsonTokenType.LEFT_PAREN); + JsonToken valueToken = popToken(); + int value; + if (valueToken.getType() == JsonTokenType.INT32) { + value = valueToken.getValue(Integer.class); + } else if (valueToken.getType() == JsonTokenType.STRING) { + value = Integer.parseInt(valueToken.getValue(String.class)); + } else { + throw new JsonParseException("JSON reader expected an integer or a string but found '%s'.", valueToken.getValue()); + } + verifyToken(JsonTokenType.RIGHT_PAREN); + return value; + } + + private long visitNumberLongConstructor() { + verifyToken(JsonTokenType.LEFT_PAREN); + JsonToken valueToken = popToken(); + long value; + if (valueToken.getType() == JsonTokenType.INT32 || valueToken.getType() == JsonTokenType.INT64) { + value = valueToken.getValue(Long.class); + } else if (valueToken.getType() == JsonTokenType.STRING) { + value = Long.parseLong(valueToken.getValue(String.class)); + } else { + throw new JsonParseException("JSON reader expected an integer or a string but found '%s'.", valueToken.getValue()); + } + verifyToken(JsonTokenType.RIGHT_PAREN); + return value; + } + + private Decimal128 visitNumberDecimalConstructor() { + verifyToken(JsonTokenType.LEFT_PAREN); + JsonToken valueToken = popToken(); + Decimal128 value; + if (valueToken.getType() == JsonTokenType.INT32 || valueToken.getType() == JsonTokenType.INT64 + || valueToken.getType() == JsonTokenType.DOUBLE) { + value = valueToken.getValue(Decimal128.class); + } else if (valueToken.getType() == JsonTokenType.STRING) { + value = Decimal128.parse(valueToken.getValue(String.class)); + } else { + throw new JsonParseException("JSON reader expected a number or a string but found '%s'.", valueToken.getValue()); + } + verifyToken(JsonTokenType.RIGHT_PAREN); + return value; + } + + private long visitISODateTimeConstructor() { + verifyToken(JsonTokenType.LEFT_PAREN); + + JsonToken token = popToken(); + if (token.getType() == JsonTokenType.RIGHT_PAREN) { + return new Date().getTime(); + } else if (token.getType() != JsonTokenType.STRING) { + throw new JsonParseException("JSON reader expected a string but found '%s'.", token.getValue()); + } + + verifyToken(JsonTokenType.RIGHT_PAREN); + + String dateTimeString = token.getValue(String.class); + + try { + return DateTimeFormatter.parse(dateTimeString); + } catch (DateTimeParseException e) { + throw new JsonParseException("Failed to parse string as a date: " + dateTimeString, e); + } + } + + private BsonBinary visitHexDataConstructor() { + verifyToken(JsonTokenType.LEFT_PAREN); + JsonToken subTypeToken = popToken(); + if (subTypeToken.getType() != JsonTokenType.INT32) { + throw new JsonParseException("JSON reader expected a binary subtype but found '%s'.", subTypeToken.getValue()); + } + verifyToken(JsonTokenType.COMMA); + String hex = readStringFromExtendedJson(); + verifyToken(JsonTokenType.RIGHT_PAREN); + + if ((hex.length() & 1) != 0) { + hex = "0" + hex; + } + + for (final BsonBinarySubType subType : BsonBinarySubType.values()) { + if (subType.getValue() == subTypeToken.getValue(Integer.class)) { + return new BsonBinary(subType, decodeHex(hex)); + } + } + return new BsonBinary(decodeHex(hex)); + } + + private long visitDateTimeConstructor() { + DateFormat format = new SimpleDateFormat("EEE MMM dd yyyy HH:mm:ss z", Locale.ENGLISH); + + verifyToken(JsonTokenType.LEFT_PAREN); + + JsonToken token = popToken(); + if (token.getType() == JsonTokenType.RIGHT_PAREN) { + return new Date().getTime(); + } else if (token.getType() == JsonTokenType.STRING) { + verifyToken(JsonTokenType.RIGHT_PAREN); + String s = token.getValue(String.class); + ParsePosition pos = new ParsePosition(0); + Date dateTime = format.parse(s, pos); + if (dateTime != null && pos.getIndex() == s.length()) { + return dateTime.getTime(); + } else { + throw new JsonParseException("JSON reader expected a date in 'EEE MMM dd yyyy HH:mm:ss z' format but found '%s'.", s); + } + + } else if (token.getType() == JsonTokenType.INT32 || token.getType() == JsonTokenType.INT64) { + long[] values = new long[7]; + int pos = 0; + while (true) { + if (pos < values.length) { + values[pos++] = token.getValue(Long.class); + } + token = popToken(); + if (token.getType() == JsonTokenType.RIGHT_PAREN) { + break; + } + if (token.getType() != JsonTokenType.COMMA) { + throw new JsonParseException("JSON reader expected a ',' or a ')' but found '%s'.", token.getValue()); + } + token = popToken(); + if (token.getType() != JsonTokenType.INT32 && token.getType() != JsonTokenType.INT64) { + throw new JsonParseException("JSON reader expected an integer but found '%s'.", token.getValue()); + } + } + if (pos == 1) { + return values[0]; + } else if (pos < 3 || pos > 7) { + throw new JsonParseException("JSON reader expected 1 or 3-7 integers but found %d.", pos); + } + + Calendar calendar = Calendar.getInstance(TimeZone.getTimeZone("UTC")); + calendar.set(Calendar.YEAR, (int) values[0]); + calendar.set(Calendar.MONTH, (int) values[1]); + calendar.set(Calendar.DAY_OF_MONTH, (int) values[2]); + calendar.set(Calendar.HOUR_OF_DAY, (int) values[3]); + calendar.set(Calendar.MINUTE, (int) values[4]); + calendar.set(Calendar.SECOND, (int) values[5]); + calendar.set(Calendar.MILLISECOND, (int) values[6]); + return calendar.getTimeInMillis(); + } else { + throw new JsonParseException("JSON reader expected an integer or a string but found '%s'.", token.getValue()); + } + } + + private String visitDateTimeConstructorWithOutNew() { + verifyToken(JsonTokenType.LEFT_PAREN); + JsonToken token = popToken(); + if (token.getType() != JsonTokenType.RIGHT_PAREN) { + while (token.getType() != JsonTokenType.END_OF_FILE) { + token = popToken(); + if (token.getType() == JsonTokenType.RIGHT_PAREN) { + break; + } + } + if (token.getType() != JsonTokenType.RIGHT_PAREN) { + throw new JsonParseException("JSON reader expected a ')' but found '%s'.", token.getValue()); + } + } + + DateFormat df = new SimpleDateFormat("EEE MMM dd yyyy HH:mm:ss z", Locale.ENGLISH); + return df.format(new Date()); + } + + private BsonBinary visitBinDataExtendedJson(final String firstKey) { + + Mark mark = new Mark(); + + try { + verifyToken(JsonTokenType.COLON); + + if (firstKey.equals("$binary")) { + JsonToken nextToken = popToken(); + if (nextToken.getType() == JsonTokenType.BEGIN_OBJECT) { + JsonToken nameToken = popToken(); + String firstNestedKey = nameToken.getValue(String.class); + byte[] data; + byte type; + if (firstNestedKey.equals("base64")) { + verifyToken(JsonTokenType.COLON); + data = Base64.getDecoder().decode(readStringFromExtendedJson()); + verifyToken(JsonTokenType.COMMA); + verifyString("subType"); + verifyToken(JsonTokenType.COLON); + type = readBinarySubtypeFromExtendedJson(); + } else if (firstNestedKey.equals("subType")) { + verifyToken(JsonTokenType.COLON); + type = readBinarySubtypeFromExtendedJson(); + verifyToken(JsonTokenType.COMMA); + verifyString("base64"); + verifyToken(JsonTokenType.COLON); + data = Base64.getDecoder().decode(readStringFromExtendedJson()); + } else { + throw new JsonParseException("Unexpected key for $binary: " + firstNestedKey); + } + verifyToken(JsonTokenType.END_OBJECT); + verifyToken(JsonTokenType.END_OBJECT); + return new BsonBinary(type, data); + } else { + mark.reset(); + return visitLegacyBinaryExtendedJson(firstKey); + } + } else { + mark.reset(); + return visitLegacyBinaryExtendedJson(firstKey); + } + } finally { + mark.discard(); + } + } + + private BsonBinary visitLegacyBinaryExtendedJson(final String firstKey) { + + Mark mark = new Mark(); + + try { + verifyToken(JsonTokenType.COLON); + + byte[] data; + byte type; + + if (firstKey.equals("$binary")) { + data = Base64.getDecoder().decode(readStringFromExtendedJson()); + verifyToken(JsonTokenType.COMMA); + verifyString("$type"); + verifyToken(JsonTokenType.COLON); + type = readBinarySubtypeFromExtendedJson(); + } else { + type = readBinarySubtypeFromExtendedJson(); + verifyToken(JsonTokenType.COMMA); + verifyString("$binary"); + verifyToken(JsonTokenType.COLON); + data = Base64.getDecoder().decode(readStringFromExtendedJson()); + } + verifyToken(JsonTokenType.END_OBJECT); + + return new BsonBinary(type, data); + } catch (JsonParseException | NumberFormatException e) { + mark.reset(); + return null; + } finally { + mark.discard(); + } + } + + private byte readBinarySubtypeFromExtendedJson() { + JsonToken subTypeToken = popToken(); + if (subTypeToken.getType() != JsonTokenType.STRING && subTypeToken.getType() != JsonTokenType.INT32) { + throw new JsonParseException("JSON reader expected a string or number but found '%s'.", subTypeToken.getValue()); + } + + if (subTypeToken.getType() == JsonTokenType.STRING) { + return (byte) Integer.parseInt(subTypeToken.getValue(String.class), 16); + } else { + return subTypeToken.getValue(Integer.class).byteValue(); + } + } + + private long visitDateTimeExtendedJson() { + long value; + verifyToken(JsonTokenType.COLON); + JsonToken valueToken = popToken(); + if (valueToken.getType() == JsonTokenType.BEGIN_OBJECT) { + JsonToken nameToken = popToken(); + String name = nameToken.getValue(String.class); + if (!name.equals("$numberLong")) { + throw new JsonParseException(format("JSON reader expected $numberLong within $date, but found %s", name)); + } + value = visitNumberLongExtendedJson(); + verifyToken(JsonTokenType.END_OBJECT); + } else { + if (valueToken.getType() == JsonTokenType.INT32 || valueToken.getType() == JsonTokenType.INT64) { + value = valueToken.getValue(Long.class); + } else if (valueToken.getType() == JsonTokenType.STRING) { + String dateTimeString = valueToken.getValue(String.class); + try { + value = DateTimeFormatter.parse(dateTimeString); + } catch (DateTimeParseException e) { + throw new JsonParseException("Failed to parse string as a date", e); + } + } else { + throw new JsonParseException("JSON reader expected an integer or string but found '%s'.", valueToken.getValue()); + } + verifyToken(JsonTokenType.END_OBJECT); + } + return value; + } + + private MaxKey visitMaxKeyExtendedJson() { + verifyToken(JsonTokenType.COLON); + verifyToken(JsonTokenType.INT32, 1); + verifyToken(JsonTokenType.END_OBJECT); + return new MaxKey(); + } + + private MinKey visitMinKeyExtendedJson() { + verifyToken(JsonTokenType.COLON); + verifyToken(JsonTokenType.INT32, 1); + verifyToken(JsonTokenType.END_OBJECT); + return new MinKey(); + } + + private ObjectId visitObjectIdExtendedJson() { + verifyToken(JsonTokenType.COLON); + ObjectId objectId = new ObjectId(readStringFromExtendedJson()); + verifyToken(JsonTokenType.END_OBJECT); + return objectId; + } + + private BsonRegularExpression visitNewRegularExpressionExtendedJson() { + verifyToken(JsonTokenType.COLON); + verifyToken(JsonTokenType.BEGIN_OBJECT); + + String pattern; + String options = ""; + String firstKey = readStringKeyFromExtendedJson(); + if (firstKey.equals("pattern")) { + verifyToken(JsonTokenType.COLON); + pattern = readStringFromExtendedJson(); + verifyToken(JsonTokenType.COMMA); + verifyString("options"); + verifyToken(JsonTokenType.COLON); + options = readStringFromExtendedJson(); + } else if (firstKey.equals("options")) { + verifyToken(JsonTokenType.COLON); + options = readStringFromExtendedJson(); + verifyToken(JsonTokenType.COMMA); + verifyString("pattern"); + verifyToken(JsonTokenType.COLON); + pattern = readStringFromExtendedJson(); + } else { + throw new JsonParseException("Expected 'pattern' and 'options' fields in $regularExpression document but found " + firstKey); + } + + verifyToken(JsonTokenType.END_OBJECT); + verifyToken(JsonTokenType.END_OBJECT); + return new BsonRegularExpression(pattern, options); + } + + private BsonRegularExpression visitRegularExpressionExtendedJson(final String firstKey) { + Mark extendedJsonMark = new Mark(); + + try { + verifyToken(JsonTokenType.COLON); + + String pattern; + String options = ""; + if (firstKey.equals("$regex")) { + pattern = readStringFromExtendedJson(); + verifyToken(JsonTokenType.COMMA); + verifyString("$options"); + verifyToken(JsonTokenType.COLON); + options = readStringFromExtendedJson(); + } else { + options = readStringFromExtendedJson(); + verifyToken(JsonTokenType.COMMA); + verifyString("$regex"); + verifyToken(JsonTokenType.COLON); + pattern = readStringFromExtendedJson(); + } + verifyToken(JsonTokenType.END_OBJECT); + return new BsonRegularExpression(pattern, options); + } catch (JsonParseException e) { + extendedJsonMark.reset(); + return null; + } finally { + extendedJsonMark.discard(); + } + } + + private String readStringFromExtendedJson() { + JsonToken patternToken = popToken(); + if (patternToken.getType() != JsonTokenType.STRING) { + throw new JsonParseException("JSON reader expected a string but found '%s'.", patternToken.getValue()); + } + return patternToken.getValue(String.class); + } + + + private String visitSymbolExtendedJson() { + verifyToken(JsonTokenType.COLON); + String symbol = readStringFromExtendedJson(); + verifyToken(JsonTokenType.END_OBJECT); + return symbol; + } + + private BsonTimestamp visitTimestampExtendedJson() { + verifyToken(JsonTokenType.COLON); + verifyToken(JsonTokenType.BEGIN_OBJECT); + + int time; + int increment; + + String firstKey = readStringKeyFromExtendedJson(); + if (firstKey.equals("t")) { + verifyToken(JsonTokenType.COLON); + time = readIntFromExtendedJson(); + verifyToken(JsonTokenType.COMMA); + verifyString("i"); + verifyToken(JsonTokenType.COLON); + increment = readIntFromExtendedJson(); + } else if (firstKey.equals("i")) { + verifyToken(JsonTokenType.COLON); + increment = readIntFromExtendedJson(); + verifyToken(JsonTokenType.COMMA); + verifyString("t"); + verifyToken(JsonTokenType.COLON); + time = readIntFromExtendedJson(); + } else { + throw new JsonParseException("Expected 't' and 'i' fields in $timestamp document but found " + firstKey); + } + + verifyToken(JsonTokenType.END_OBJECT); + verifyToken(JsonTokenType.END_OBJECT); + return new BsonTimestamp(time, increment); + } + + private int readIntFromExtendedJson() { + JsonToken nextToken = popToken(); + int value; + if (nextToken.getType() == JsonTokenType.INT32) { + value = nextToken.getValue(Integer.class); + } else if (nextToken.getType() == JsonTokenType.INT64) { + value = nextToken.getValue(Long.class).intValue(); + } else { + throw new JsonParseException("JSON reader expected an integer but found '%s'.", nextToken.getValue()); + } + return value; + } + + private BsonBinary visitUuidExtendedJson() { + verifyToken(JsonTokenType.COLON); + String uuidString = readStringFromExtendedJson(); + verifyToken(JsonTokenType.END_OBJECT); + try { + UuidStringValidator.validate(uuidString); + return new BsonBinary(UUID.fromString(uuidString)); + } catch (IllegalArgumentException e) { + throw new JsonParseException(e); + } + } + + private void visitJavaScriptExtendedJson() { + verifyToken(JsonTokenType.COLON); + String code = readStringFromExtendedJson(); + JsonToken nextToken = popToken(); + switch (nextToken.getType()) { + case COMMA: + verifyString("$scope"); + verifyToken(JsonTokenType.COLON); + setState(State.VALUE); + currentValue = code; + setCurrentBsonType(BsonType.JAVASCRIPT_WITH_SCOPE); + setContext(new Context(getContext(), BsonContextType.SCOPE_DOCUMENT)); + break; + case END_OBJECT: + currentValue = code; + setCurrentBsonType(BsonType.JAVASCRIPT); + break; + default: + throw new JsonParseException("JSON reader expected ',' or '}' but found '%s'.", nextToken); + } + } + + private BsonUndefined visitUndefinedExtendedJson() { + verifyToken(JsonTokenType.COLON); + JsonToken valueToken = popToken(); + if (!valueToken.getValue(String.class).equals("true")) { + throw new JsonParseException("JSON reader requires $undefined to have the value of true but found '%s'.", + valueToken.getValue()); + } + verifyToken(JsonTokenType.END_OBJECT); + return new BsonUndefined(); + } + + private Long visitNumberLongExtendedJson() { + verifyToken(JsonTokenType.COLON); + Long value; + String longAsString = readStringFromExtendedJson(); + try { + value = Long.valueOf(longAsString); + } catch (NumberFormatException e) { + throw new JsonParseException(format("Exception converting value '%s' to type %s", longAsString, Long.class.getName()), e); + } + verifyToken(JsonTokenType.END_OBJECT); + return value; + } + + private Integer visitNumberIntExtendedJson() { + verifyToken(JsonTokenType.COLON); + Integer value; + String intAsString = readStringFromExtendedJson(); + try { + value = Integer.valueOf(intAsString); + } catch (NumberFormatException e) { + throw new JsonParseException(format("Exception converting value '%s' to type %s", intAsString, Integer.class.getName()), e); + } + verifyToken(JsonTokenType.END_OBJECT); + return value; + } + + private Double visitNumberDoubleExtendedJson() { + verifyToken(JsonTokenType.COLON); + Double value; + String doubleAsString = readStringFromExtendedJson(); + try { + value = Double.valueOf(doubleAsString); + } catch (NumberFormatException e) { + throw new JsonParseException(format("Exception converting value '%s' to type %s", doubleAsString, Double.class.getName()), e); + } + verifyToken(JsonTokenType.END_OBJECT); + return value; + } + + private Decimal128 visitNumberDecimalExtendedJson() { + verifyToken(JsonTokenType.COLON); + Decimal128 value; + String decimal128AsString = readStringFromExtendedJson(); + try { + value = Decimal128.parse(decimal128AsString); + } catch (NumberFormatException e) { + throw new JsonParseException(format("Exception converting value '%s' to type %s", decimal128AsString, + Decimal128.class.getName()), e); + } + verifyToken(JsonTokenType.END_OBJECT); + return value; + } + + private BsonDbPointer visitDbPointerExtendedJson() { + verifyToken(JsonTokenType.COLON); + verifyToken(JsonTokenType.BEGIN_OBJECT); + + String ref; + ObjectId oid; + + String firstKey = readStringFromExtendedJson(); + if (firstKey.equals("$ref")) { + verifyToken(JsonTokenType.COLON); + ref = readStringFromExtendedJson(); + verifyToken(JsonTokenType.COMMA); + verifyString("$id"); + oid = readDbPointerIdFromExtendedJson(); + verifyToken(JsonTokenType.END_OBJECT); + } else if (firstKey.equals("$id")) { + oid = readDbPointerIdFromExtendedJson(); + verifyToken(JsonTokenType.COMMA); + verifyString("$ref"); + verifyToken(JsonTokenType.COLON); + ref = readStringFromExtendedJson(); + + } else { + throw new JsonParseException("Expected $ref and $id fields in $dbPointer document but found " + firstKey); + } + verifyToken(JsonTokenType.END_OBJECT); + return new BsonDbPointer(ref, oid); + } + + private ObjectId readDbPointerIdFromExtendedJson() { + ObjectId oid; + verifyToken(JsonTokenType.COLON); + verifyToken(JsonTokenType.BEGIN_OBJECT); + verifyToken(JsonTokenType.STRING, "$oid"); + oid = visitObjectIdExtendedJson(); + return oid; + } + + @Override + public BsonReaderMark getMark() { + return new Mark(); + } + + @Override + protected Context getContext() { + return (Context) super.getContext(); + } + + /** + * An implementation of {@code AbstractBsonReader.Mark}. + */ + protected class Mark extends AbstractBsonReader.Mark { + private final JsonToken pushedToken; + private final Object currentValue; + private final int markPos; + + /** + * Construct an instance. + */ + protected Mark() { + pushedToken = JsonReader.this.pushedToken; + currentValue = JsonReader.this.currentValue; + markPos = JsonReader.this.scanner.mark(); + } + + @Override + public void reset() { + super.reset(); + JsonReader.this.pushedToken = pushedToken; + JsonReader.this.currentValue = currentValue; + JsonReader.this.scanner.reset(markPos); + JsonReader.this.setContext(new Context(getParentContext(), getContextType())); + } + + /** + * Discard the mark. + */ + public void discard() { + JsonReader.this.scanner.discard(markPos); + } + } + + + /** + * An implementation of {@code AbstractBsonReader.Context}/ + */ + protected class Context extends AbstractBsonReader.Context { + /** + * Construct an instance. + * + * @param parentContext the parent context + * @param contextType the context type + */ + protected Context(final AbstractBsonReader.Context parentContext, final BsonContextType contextType) { + super(parentContext, contextType); + } + + @Override + protected Context getParentContext() { + return (Context) super.getParentContext(); + } + + @Override + protected BsonContextType getContextType() { + return super.getContextType(); + } + } + + private static byte[] decodeHex(final String hex) { + if (hex.length() % 2 != 0) { + throw new IllegalArgumentException("A hex string must contain an even number of characters: " + hex); + } + + byte[] out = new byte[hex.length() / 2]; + + for (int i = 0; i < hex.length(); i += 2) { + int high = Character.digit(hex.charAt(i), 16); + int low = Character.digit(hex.charAt(i + 1), 16); + if (high == -1 || low == -1) { + throw new IllegalArgumentException("A hex string can only contain the characters 0-9, A-F, a-f: " + hex); + } + + out[i / 2] = (byte) (high * 16 + low); + } + + return out; + } + + /** + * Read an extended json key and verify its type. + * Throws a org.bson.json.JsonParseException if the key is not an unquoted string or a simple string. + * @return the key string value + */ + private String readStringKeyFromExtendedJson() { + JsonToken patternToken = popToken(); + if (patternToken.getType() != JsonTokenType.STRING && patternToken.getType() != JsonTokenType.UNQUOTED_STRING) { + throw new JsonParseException("JSON reader expected a string but found '%s'.", patternToken.getValue()); + } + return patternToken.getValue(String.class); + } +} + diff --git a/bson/src/main/org/bson/json/JsonScanner.java b/bson/src/main/org/bson/json/JsonScanner.java new file mode 100644 index 00000000000..77ae680342d --- /dev/null +++ b/bson/src/main/org/bson/json/JsonScanner.java @@ -0,0 +1,553 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.json; + +import org.bson.BsonRegularExpression; + +import java.io.Reader; + +/** + * Parses the string representation of a JSON object into a set of {@link JsonToken}-derived objects. + * + * @since 3.0 + */ +class JsonScanner { + + private final JsonBuffer buffer; + + JsonScanner(final JsonBuffer buffer) { + this.buffer = buffer; + } + + JsonScanner(final String json) { + this(new JsonStringBuffer(json)); + } + + JsonScanner(final Reader reader) { + this(new JsonStreamBuffer(reader)); + } + + public void reset(final int markPos) { + buffer.reset(markPos); + } + + public int mark() { + return buffer.mark(); + } + + public void discard(final int markPos) { + buffer.discard(markPos); + } + + /** + * Finds and returns the next complete token from this scanner. If scanner reached the end of the source, it will return a token with + * {@code JSONTokenType.END_OF_FILE} type. + * + * @return The next token. + * @throws JsonParseException if source is invalid. + */ + public JsonToken nextToken() { + + int c = buffer.read(); + while (c != -1 && Character.isWhitespace(c)) { + c = buffer.read(); + } + if (c == -1) { + return new JsonToken(JsonTokenType.END_OF_FILE, ""); + } + + switch (c) { + case '{': + return new JsonToken(JsonTokenType.BEGIN_OBJECT, "{"); + case '}': + return new JsonToken(JsonTokenType.END_OBJECT, "}"); + case '[': + return new JsonToken(JsonTokenType.BEGIN_ARRAY, "["); + case ']': + return new JsonToken(JsonTokenType.END_ARRAY, "]"); + case '(': + return new JsonToken(JsonTokenType.LEFT_PAREN, "("); + case ')': + return new JsonToken(JsonTokenType.RIGHT_PAREN, ")"); + case ':': + return new JsonToken(JsonTokenType.COLON, ":"); + case ',': + return new JsonToken(JsonTokenType.COMMA, ","); + case '\'': + case '"': + return scanString((char) c); + case '/': + return scanRegularExpression(); + default: + if (c == '-' || Character.isDigit(c)) { + return scanNumber((char) c); + } else if (c == '$' || c == '_' || Character.isLetter(c)) { + return scanUnquotedString((char) c); + } else { + int position = buffer.getPosition(); + buffer.unread(c); + throw new JsonParseException("Invalid JSON input. Position: %d. Character: '%c'.", position, c); + } + } + } + + /** + * Reads {@code RegularExpressionToken} from source. The following variants of lexemes are possible: + *
+     *  /pattern/
+     *  /\(pattern\)/
+     *  /pattern/ims
+     * 
+ * Options can include 'i','m','x','s' + * + * @return The regular expression token. + * @throws JsonParseException if regular expression representation is not valid. + */ + private JsonToken scanRegularExpression() { + + StringBuilder patternBuilder = new StringBuilder(); + StringBuilder optionsBuilder = new StringBuilder(); + RegularExpressionState state = RegularExpressionState.IN_PATTERN; + while (true) { + int c = buffer.read(); + switch (state) { + case IN_PATTERN: + switch (c) { + case -1: + state = RegularExpressionState.INVALID; + break; + case '/': + state = RegularExpressionState.IN_OPTIONS; + break; + case '\\': + state = RegularExpressionState.IN_ESCAPE_SEQUENCE; + break; + default: + state = RegularExpressionState.IN_PATTERN; + break; + } + break; + case IN_ESCAPE_SEQUENCE: + state = RegularExpressionState.IN_PATTERN; + break; + case IN_OPTIONS: + switch (c) { + case 'i': + case 'm': + case 'x': + case 's': + state = RegularExpressionState.IN_OPTIONS; + break; + case ',': + case '}': + case ']': + case ')': + case -1: + state = RegularExpressionState.DONE; + break; + default: + if (Character.isWhitespace(c)) { + state = RegularExpressionState.DONE; + } else { + state = RegularExpressionState.INVALID; + } + break; + } + break; + default: + break; + } + + switch (state) { + case DONE: + buffer.unread(c); + BsonRegularExpression regex + = new BsonRegularExpression(patternBuilder.toString(), optionsBuilder.toString()); + return new JsonToken(JsonTokenType.REGULAR_EXPRESSION, regex); + case INVALID: + throw new JsonParseException("Invalid JSON regular expression. Position: %d.", buffer.getPosition()); + default: + if (state == RegularExpressionState.IN_OPTIONS) { + if (c != '/') { + optionsBuilder.append((char) c); + } + } else { + patternBuilder.append((char) c); + } + } + } + } + + /** + * Reads {@code StringToken} from source. + * + * @return The string token. + */ + private JsonToken scanUnquotedString(final char firstChar) { + StringBuilder sb = new StringBuilder(); + sb.append(firstChar); + int c = buffer.read(); + while (c == '$' || c == '_' || Character.isLetterOrDigit(c)) { + sb.append((char) c); + c = buffer.read(); + } + buffer.unread(c); + String lexeme = sb.toString(); + return new JsonToken(JsonTokenType.UNQUOTED_STRING, lexeme); + } + + /** + * Reads number token from source. The following variants of lexemes are possible: + *
+     *  12
+     *  123
+     *  -0
+     *  -345
+     *  -0.0
+     *  0e1
+     *  0e-1
+     *  -0e-1
+     *  1e12
+     *  -Infinity
+     * 
+ * + * @return The number token. + * @throws JsonParseException if number representation is invalid. + */ + //CHECKSTYLE:OFF + private JsonToken scanNumber(final char firstChar) { + + int c = firstChar; + StringBuilder sb = new StringBuilder(); + sb.append(firstChar); + + NumberState state; + + switch (c) { + case '-': + state = NumberState.SAW_LEADING_MINUS; + break; + case '0': + state = NumberState.SAW_LEADING_ZERO; + break; + default: + state = NumberState.SAW_INTEGER_DIGITS; + break; + } + + JsonTokenType type = JsonTokenType.INT64; + + + while (true) { + c = buffer.read(); + switch (state) { + case SAW_LEADING_MINUS: + switch (c) { + case '0': + state = NumberState.SAW_LEADING_ZERO; + break; + case 'I': + state = NumberState.SAW_MINUS_I; + break; + default: + if (Character.isDigit(c)) { + state = NumberState.SAW_INTEGER_DIGITS; + } else { + state = NumberState.INVALID; + } + break; + } + break; + case SAW_LEADING_ZERO: + switch (c) { + case '.': + state = NumberState.SAW_DECIMAL_POINT; + break; + case 'e': + case 'E': + state = NumberState.SAW_EXPONENT_LETTER; + break; + case ',': + case '}': + case ']': + case ')': + case -1: + state = NumberState.DONE; + break; + default: + if (Character.isDigit(c)) { + state = NumberState.SAW_INTEGER_DIGITS; + } else if (Character.isWhitespace(c)) { + state = NumberState.DONE; + } else { + state = NumberState.INVALID; + } + break; + } + break; + case SAW_INTEGER_DIGITS: + switch (c) { + case '.': + state = NumberState.SAW_DECIMAL_POINT; + break; + case 'e': + case 'E': + state = NumberState.SAW_EXPONENT_LETTER; + break; + case ',': + case '}': + case ']': + case ')': + case -1: + state = NumberState.DONE; + break; + default: + if (Character.isDigit(c)) { + state = NumberState.SAW_INTEGER_DIGITS; + } else if (Character.isWhitespace(c)) { + state = NumberState.DONE; + } else { + state = NumberState.INVALID; + } + break; + } + break; + case SAW_DECIMAL_POINT: + type = JsonTokenType.DOUBLE; + if (Character.isDigit(c)) { + state = NumberState.SAW_FRACTION_DIGITS; + } else { + state = NumberState.INVALID; + } + break; + case SAW_FRACTION_DIGITS: + switch (c) { + case 'e': + case 'E': + state = NumberState.SAW_EXPONENT_LETTER; + break; + case ',': + case '}': + case ']': + case ')': + case -1: + state = NumberState.DONE; + break; + default: + if (Character.isDigit(c)) { + state = NumberState.SAW_FRACTION_DIGITS; + } else if (Character.isWhitespace(c)) { + state = NumberState.DONE; + } else { + state = NumberState.INVALID; + } + break; + } + break; + case SAW_EXPONENT_LETTER: + type = JsonTokenType.DOUBLE; + switch (c) { + case '+': + case '-': + state = NumberState.SAW_EXPONENT_SIGN; + break; + default: + if (Character.isDigit(c)) { + state = NumberState.SAW_EXPONENT_DIGITS; + } else { + state = NumberState.INVALID; + } + break; + } + break; + case SAW_EXPONENT_SIGN: + if (Character.isDigit(c)) { + state = NumberState.SAW_EXPONENT_DIGITS; + } else { + state = NumberState.INVALID; + } + break; + case SAW_EXPONENT_DIGITS: + switch (c) { + case ',': + case '}': + case ']': + case ')': + state = NumberState.DONE; + break; + default: + if (Character.isDigit(c)) { + state = NumberState.SAW_EXPONENT_DIGITS; + } else if (Character.isWhitespace(c)) { + state = NumberState.DONE; + } else { + state = NumberState.INVALID; + } + break; + } + break; + case SAW_MINUS_I: + boolean sawMinusInfinity = true; + char[] nfinity = {'n', 'f', 'i', 'n', 'i', 't', 'y'}; + for (int i = 0; i < nfinity.length; i++) { + if (c != nfinity[i]) { + sawMinusInfinity = false; + break; + } + sb.append((char) c); + c = buffer.read(); + } + if (sawMinusInfinity) { + type = JsonTokenType.DOUBLE; + switch (c) { + case ',': + case '}': + case ']': + case ')': + case -1: + state = NumberState.DONE; + break; + default: + if (Character.isWhitespace(c)) { + state = NumberState.DONE; + } else { + state = NumberState.INVALID; + } + break; + } + } else { + state = NumberState.INVALID; + } + break; + default: + } + + switch (state) { + case INVALID: + throw new JsonParseException("Invalid JSON number"); + case DONE: + buffer.unread(c); + String lexeme = sb.toString(); + if (type == JsonTokenType.DOUBLE) { + return new JsonToken(JsonTokenType.DOUBLE, Double.parseDouble(lexeme)); + } else { + long value = Long.parseLong(lexeme); + if (value < Integer.MIN_VALUE || value > Integer.MAX_VALUE) { + return new JsonToken(JsonTokenType.INT64, value); + } else { + return new JsonToken(JsonTokenType.INT32, (int) value); + } + } + default: + sb.append((char) c); + } + } + + } + //CHECKSTYLE:ON + + /** + * Reads {@code StringToken} from source. + * + * @return The string token. + */ + //CHECKSTYLE:OFF + private JsonToken scanString(final char quoteCharacter) { + + StringBuilder sb = new StringBuilder(); + + while (true) { + int c = buffer.read(); + if (c == '\\') { + c = buffer.read(); + switch (c) { + case '\'': + sb.append('\''); + break; + case '"': + sb.append('"'); + break; + case '\\': + sb.append('\\'); + break; + case '/': + sb.append('/'); + break; + case 'b': + sb.append('\b'); + break; + case 'f': + sb.append('\f'); + break; + case 'n': + sb.append('\n'); + break; + case 'r': + sb.append('\r'); + break; + case 't': + sb.append('\t'); + break; + case 'u': + int u1 = buffer.read(); + int u2 = buffer.read(); + int u3 = buffer.read(); + int u4 = buffer.read(); + if (u4 != -1) { + String hex = new String(new char[]{(char) u1, (char) u2, (char) u3, (char) u4}); + sb.append((char) Integer.parseInt(hex, 16)); + } + break; + default: + throw new JsonParseException("Invalid escape sequence in JSON string '\\%c'.", c); + } + } else { + if (c == quoteCharacter) { + return new JsonToken(JsonTokenType.STRING, sb.toString()); + } + if (c != -1) { + sb.append((char) c); + } + } + if (c == -1) { + throw new JsonParseException("End of file in JSON string."); + } + } + } + + private enum NumberState { + SAW_LEADING_MINUS, + SAW_LEADING_ZERO, + SAW_INTEGER_DIGITS, + SAW_DECIMAL_POINT, + SAW_FRACTION_DIGITS, + SAW_EXPONENT_LETTER, + SAW_EXPONENT_SIGN, + SAW_EXPONENT_DIGITS, + SAW_MINUS_I, + DONE, + INVALID + } + + private enum RegularExpressionState { + IN_PATTERN, + IN_ESCAPE_SEQUENCE, + IN_OPTIONS, + DONE, + INVALID + } +} diff --git a/bson/src/main/org/bson/json/JsonStreamBuffer.java b/bson/src/main/org/bson/json/JsonStreamBuffer.java new file mode 100644 index 00000000000..077f141fd81 --- /dev/null +++ b/bson/src/main/org/bson/json/JsonStreamBuffer.java @@ -0,0 +1,155 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.json; + +import java.io.IOException; +import java.io.Reader; +import java.util.ArrayList; +import java.util.List; + +class JsonStreamBuffer implements JsonBuffer { + + private final Reader reader; + private final List markedPositions = new ArrayList<>(); + private final int initialBufferSize; + private int position; + private int lastChar; + private boolean reuseLastChar; + private boolean eof; + private char[] buffer; + private int bufferStartPos; + private int bufferCount; + + JsonStreamBuffer(final Reader reader) { + this(reader, 16); + } + + JsonStreamBuffer(final Reader reader, final int initialBufferSize) { + this.initialBufferSize = initialBufferSize; + this.reader = reader; + resetBuffer(); + } + + public int getPosition() { + return position; + } + + public int read() { + if (eof) { + throw new JsonParseException("Trying to read past EOF."); + } + + // if we just unread, we need to use the last character read since it may not be in the + // buffer + if (reuseLastChar) { + reuseLastChar = false; + int reusedChar = lastChar; + lastChar = -1; + position++; + return reusedChar; + } + + // use the buffer until we catch up to the stream position + if (position - bufferStartPos < bufferCount) { + int currChar = buffer[position - bufferStartPos]; + lastChar = currChar; + position++; + return currChar; + } + + if (markedPositions.isEmpty()) { + resetBuffer(); + } + + // otherwise, try and read from the stream + try { + int nextChar = reader.read(); + if (nextChar != -1) { + lastChar = nextChar; + addToBuffer((char) nextChar); + } + position++; + if (nextChar == -1) { + eof = true; + } + return nextChar; + + } catch (IOException e) { + throw new JsonParseException(e); + } + } + + private void resetBuffer() { + bufferStartPos = -1; + bufferCount = 0; + buffer = new char[initialBufferSize]; + } + + public void unread(final int c) { + eof = false; + if (c != -1 && lastChar == c) { + reuseLastChar = true; + position--; + } + } + + public int mark() { + if (bufferCount == 0) { // Why not markedPositions.isEmpty()? + bufferStartPos = position; + } + if (!markedPositions.contains(position)) { + markedPositions.add(position); + } + return position; + } + + public void reset(final int markPos) { + if (markPos > position) { + throw new IllegalStateException("mark cannot reset ahead of position, only back"); + } + int idx = markedPositions.indexOf(markPos); + if (idx == -1) { + throw new IllegalArgumentException("mark invalidated"); + } + if (markPos != position) { + reuseLastChar = false; + } + markedPositions.subList(idx, markedPositions.size()).clear(); + position = markPos; + } + + public void discard(final int markPos) { + int idx = markedPositions.indexOf(markPos); + if (idx == -1) { + return; + } + markedPositions.subList(idx, markedPositions.size()).clear(); + } + + private void addToBuffer(final char curChar) { + // if the lowest mark is ahead of our position, we can safely add it to our buffer + if (!markedPositions.isEmpty()) { + if (bufferCount == buffer.length) { + char[] newBuffer = new char[buffer.length * 2]; + System.arraycopy(buffer, 0, newBuffer, 0, bufferCount); + buffer = newBuffer; + } + buffer[bufferCount] = curChar; + bufferCount++; + } + } +} diff --git a/bson/src/main/org/bson/json/JsonStringBuffer.java b/bson/src/main/org/bson/json/JsonStringBuffer.java new file mode 100644 index 00000000000..79f8783f9b7 --- /dev/null +++ b/bson/src/main/org/bson/json/JsonStringBuffer.java @@ -0,0 +1,65 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.json; + +class JsonStringBuffer implements JsonBuffer { + + private final String buffer; + private int position; + private boolean eof; + + JsonStringBuffer(final String buffer) { + this.buffer = buffer; + } + + public int getPosition() { + return position; + } + + public int read() { + if (eof) { + throw new JsonParseException("Trying to read past EOF."); + } else if (position >= buffer.length()) { + eof = true; + return -1; + } else { + return buffer.charAt(position++); + } + } + + public void unread(final int c) { + eof = false; + if (c != -1 && buffer.charAt(position - 1) == c) { + position--; + } + } + + public int mark() { + return position; + } + + public void reset(final int markPos) { + if (markPos > position) { + throw new IllegalStateException("mark cannot reset ahead of position, only back"); + } + position = markPos; + } + + public void discard(final int markPos) { + } + +} diff --git a/bson/src/main/org/bson/json/JsonStringConverter.java b/bson/src/main/org/bson/json/JsonStringConverter.java new file mode 100644 index 00000000000..6a3d3456913 --- /dev/null +++ b/bson/src/main/org/bson/json/JsonStringConverter.java @@ -0,0 +1,24 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.json; + +class JsonStringConverter implements Converter { + @Override + public void convert(final String value, final StrictJsonWriter writer) { + writer.writeString(value); + } +} diff --git a/bson/src/main/org/bson/json/JsonSymbolConverter.java b/bson/src/main/org/bson/json/JsonSymbolConverter.java new file mode 100644 index 00000000000..51b65e7fd31 --- /dev/null +++ b/bson/src/main/org/bson/json/JsonSymbolConverter.java @@ -0,0 +1,26 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.json; + +class JsonSymbolConverter implements Converter { + @Override + public void convert(final String value, final StrictJsonWriter writer) { + writer.writeStartObject(); + writer.writeString("$symbol", value); + writer.writeEndObject(); + } +} diff --git a/bson/src/main/org/bson/json/JsonToken.java b/bson/src/main/org/bson/json/JsonToken.java new file mode 100644 index 00000000000..68c579f2369 --- /dev/null +++ b/bson/src/main/org/bson/json/JsonToken.java @@ -0,0 +1,78 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.json; + +import org.bson.BsonDouble; +import org.bson.types.Decimal128; + +import static java.lang.String.format; + +/** + * A JSON token. + */ +class JsonToken { + + private final Object value; + private final JsonTokenType type; + + JsonToken(final JsonTokenType type, final Object value) { + this.value = value; + this.type = type; + } + + public Object getValue() { + return value; + } + + public T getValue(final Class clazz) { + try { + if (Long.class == clazz) { + if (value instanceof Integer) { + return clazz.cast(((Integer) value).longValue()); + } else if (value instanceof String) { + return clazz.cast(Long.valueOf((String) value)); + } + } else if (Integer.class == clazz) { + if (value instanceof String) { + return clazz.cast(Integer.valueOf((String) value)); + } + } else if (Double.class == clazz) { + if (value instanceof String) { + return clazz.cast(Double.valueOf((String) value)); + } + } else if (Decimal128.class == clazz) { + if (value instanceof Integer) { + return clazz.cast(new Decimal128((Integer) value)); + } else if (value instanceof Long) { + return clazz.cast(new Decimal128((Long) value)); + } else if (value instanceof Double) { + return clazz.cast(new BsonDouble((Double) value).decimal128Value()); + } else if (value instanceof String) { + return clazz.cast(Decimal128.parse((String) value)); + } + } + + return clazz.cast(value); + } catch (Exception e) { + throw new JsonParseException(format("Exception converting value '%s' to type %s", value, clazz.getName()), e); + } + } + + public JsonTokenType getType() { + return type; + } +} diff --git a/bson/src/main/org/bson/json/JsonTokenType.java b/bson/src/main/org/bson/json/JsonTokenType.java new file mode 100644 index 00000000000..9b2e88d5341 --- /dev/null +++ b/bson/src/main/org/bson/json/JsonTokenType.java @@ -0,0 +1,99 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.json; + +enum JsonTokenType { + /** + * An invalid token. + */ + INVALID, + + /** + * A begin array token (a '['). + */ + BEGIN_ARRAY, + + /** + * A begin object token (a '{'). + */ + BEGIN_OBJECT, + + /** + * An end array token (a ']'). + */ + END_ARRAY, + + /** + * A left parenthesis (a '('). + */ + LEFT_PAREN, + + /** + * A right parenthesis (a ')'). + */ + RIGHT_PAREN, + + /** + * An end object token (a '}'). + */ + END_OBJECT, + + /** + * A colon token (a ':'). + */ + COLON, + + /** + * A comma token (a ','). + */ + COMMA, + + /** + * A Double token. + */ + DOUBLE, + + /** + * An Int32 token. + */ + INT32, + + /** + * And Int64 token. + */ + INT64, + + /** + * A regular expression token. + */ + REGULAR_EXPRESSION, + + /** + * A string token. + */ + STRING, + + /** + * An unquoted string token. + */ + UNQUOTED_STRING, + + /** + * An end of file token. + */ + END_OF_FILE +} diff --git a/bson/src/main/org/bson/json/JsonWriter.java b/bson/src/main/org/bson/json/JsonWriter.java new file mode 100644 index 00000000000..a1baf0ef0a2 --- /dev/null +++ b/bson/src/main/org/bson/json/JsonWriter.java @@ -0,0 +1,270 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.json; + +import org.bson.AbstractBsonWriter; +import org.bson.BsonBinary; +import org.bson.BsonContextType; +import org.bson.BsonDbPointer; +import org.bson.BsonRegularExpression; +import org.bson.BsonTimestamp; +import org.bson.types.Decimal128; +import org.bson.types.ObjectId; + +import java.io.Writer; + +/** + * A {@code BsonWriter} implementation that outputs a JSON representation of BSON. + * + * @since 3.0 + */ +public class JsonWriter extends AbstractBsonWriter { + private final JsonWriterSettings settings; + private final StrictCharacterStreamJsonWriter strictJsonWriter; + + /** + * Creates a new instance which uses {@code writer} to write JSON to. + * + * @param writer the writer to write JSON to. + */ + public JsonWriter(final Writer writer) { + this(writer, JsonWriterSettings.builder().build()); + } + + /** + * Creates a new instance which uses {@code writer} to write JSON to and uses the given settings. + * + * @param writer the writer to write JSON to. + * @param settings the settings to apply to this writer. + */ + public JsonWriter(final Writer writer, final JsonWriterSettings settings) { + super(settings); + this.settings = settings; + setContext(new Context(null, BsonContextType.TOP_LEVEL)); + strictJsonWriter = new StrictCharacterStreamJsonWriter(writer, StrictCharacterStreamJsonWriterSettings.builder() + .indent(settings.isIndent()) + .newLineCharacters(settings.getNewLineCharacters()) + .indentCharacters(settings.getIndentCharacters()) + .maxLength(settings.getMaxLength()) + .build()); + } + + /** + * Gets the {@code Writer}. + * + * @return the writer + */ + public Writer getWriter() { + return strictJsonWriter.getWriter(); + } + + @Override + protected Context getContext() { + return (Context) super.getContext(); + } + + @Override + protected void doWriteName(final String name) { + strictJsonWriter.writeName(name); + } + + @Override + protected void doWriteStartDocument() { + strictJsonWriter.writeStartObject(); + + BsonContextType contextType = (getState() == State.SCOPE_DOCUMENT) ? BsonContextType.SCOPE_DOCUMENT : BsonContextType.DOCUMENT; + setContext(new Context(getContext(), contextType)); + } + + @Override + protected void doWriteEndDocument() { + strictJsonWriter.writeEndObject(); + if (getContext().getContextType() == BsonContextType.SCOPE_DOCUMENT) { + setContext(getContext().getParentContext()); + writeEndDocument(); + } else { + setContext(getContext().getParentContext()); + } + } + + @Override + protected void doWriteStartArray() { + strictJsonWriter.writeStartArray(); + setContext(new Context(getContext(), BsonContextType.ARRAY)); + } + + @Override + protected void doWriteEndArray() { + strictJsonWriter.writeEndArray(); + setContext(getContext().getParentContext()); + } + + + @Override + protected void doWriteBinaryData(final BsonBinary binary) { + settings.getBinaryConverter().convert(binary, strictJsonWriter); + } + + @Override + public void doWriteBoolean(final boolean value) { + settings.getBooleanConverter().convert(value, strictJsonWriter); + } + + @Override + protected void doWriteDateTime(final long value) { + settings.getDateTimeConverter().convert(value, strictJsonWriter); + } + + @Override + protected void doWriteDBPointer(final BsonDbPointer value) { + if (settings.getOutputMode() == JsonMode.EXTENDED) { + strictJsonWriter.writeStartObject(); + strictJsonWriter.writeStartObject("$dbPointer"); + strictJsonWriter.writeString("$ref", value.getNamespace()); + strictJsonWriter.writeName("$id"); + doWriteObjectId(value.getId()); + strictJsonWriter.writeEndObject(); + strictJsonWriter.writeEndObject(); + } else { + strictJsonWriter.writeStartObject(); + strictJsonWriter.writeString("$ref", value.getNamespace()); + strictJsonWriter.writeName("$id"); + doWriteObjectId(value.getId()); + strictJsonWriter.writeEndObject(); + } + } + + @Override + protected void doWriteDouble(final double value) { + settings.getDoubleConverter().convert(value, strictJsonWriter); + } + + @Override + protected void doWriteInt32(final int value) { + settings.getInt32Converter().convert(value, strictJsonWriter); + } + + @Override + protected void doWriteInt64(final long value) { + settings.getInt64Converter().convert(value, strictJsonWriter); + } + + @Override + protected void doWriteDecimal128(final Decimal128 value) { + settings.getDecimal128Converter().convert(value, strictJsonWriter); + } + + @Override + protected void doWriteJavaScript(final String code) { + settings.getJavaScriptConverter().convert(code, strictJsonWriter); + } + + @Override + protected void doWriteJavaScriptWithScope(final String code) { + writeStartDocument(); + writeString("$code", code); + writeName("$scope"); + } + + @Override + protected void doWriteMaxKey() { + settings.getMaxKeyConverter().convert(null, strictJsonWriter); + } + + @Override + protected void doWriteMinKey() { + settings.getMinKeyConverter().convert(null, strictJsonWriter); + } + + @Override + public void doWriteNull() { + settings.getNullConverter().convert(null, strictJsonWriter); + } + + @Override + public void doWriteObjectId(final ObjectId objectId) { + settings.getObjectIdConverter().convert(objectId, strictJsonWriter); + } + + @Override + public void doWriteRegularExpression(final BsonRegularExpression regularExpression) { + settings.getRegularExpressionConverter().convert(regularExpression, strictJsonWriter); + } + + @Override + public void doWriteString(final String value) { + settings.getStringConverter().convert(value, strictJsonWriter); + } + + @Override + public void doWriteSymbol(final String value) { + settings.getSymbolConverter().convert(value, strictJsonWriter); + } + + @Override + public void doWriteTimestamp(final BsonTimestamp value) { + settings.getTimestampConverter().convert(value, strictJsonWriter); + } + + @Override + public void doWriteUndefined() { + settings.getUndefinedConverter().convert(null, strictJsonWriter); + } + + @Override + public void flush() { + strictJsonWriter.flush(); + } + + /** + * Return true if the output has been truncated due to exceeding the length specified in {@link JsonWriterSettings#getMaxLength()}. + * + * @return true if the output has been truncated + * @since 3.7 + * @see JsonWriterSettings#getMaxLength() + */ + public boolean isTruncated() { + return strictJsonWriter.isTruncated(); + } + + @Override + protected boolean abortPipe() { + return strictJsonWriter.isTruncated(); + } + + /** + * The context for the writer, inheriting all the values from {@link org.bson.AbstractBsonWriter.Context}, and additionally providing + * settings for the indentation level and whether there are any child elements at this level. + */ + public class Context extends AbstractBsonWriter.Context { + + /** + * Creates a new context. + * + * @param parentContext the parent context that can be used for going back up to the parent level + * @param contextType the type of this context + */ + public Context(final Context parentContext, final BsonContextType contextType) { + super(parentContext, contextType); + } + + @Override + public Context getParentContext() { + return (Context) super.getParentContext(); + } + } +} diff --git a/bson/src/main/org/bson/json/JsonWriterSettings.java b/bson/src/main/org/bson/json/JsonWriterSettings.java new file mode 100644 index 00000000000..53b340e295f --- /dev/null +++ b/bson/src/main/org/bson/json/JsonWriterSettings.java @@ -0,0 +1,771 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.json; + +import org.bson.BsonBinary; +import org.bson.BsonMaxKey; +import org.bson.BsonMinKey; +import org.bson.BsonNull; +import org.bson.BsonRegularExpression; +import org.bson.BsonTimestamp; +import org.bson.BsonUndefined; +import org.bson.BsonWriterSettings; +import org.bson.types.Decimal128; +import org.bson.types.ObjectId; + +import static org.bson.assertions.Assertions.isTrueArgument; +import static org.bson.assertions.Assertions.notNull; + +/** + * Settings to control the behavior of a {@code JSONWriter} instance. + * + * @see JsonWriter + * @since 3.0 + */ +public final class JsonWriterSettings extends BsonWriterSettings { + + private static final JsonNullConverter JSON_NULL_CONVERTER = new JsonNullConverter(); + private static final JsonStringConverter JSON_STRING_CONVERTER = new JsonStringConverter(); + private static final JsonBooleanConverter JSON_BOOLEAN_CONVERTER = new JsonBooleanConverter(); + private static final JsonDoubleConverter JSON_DOUBLE_CONVERTER = new JsonDoubleConverter(); + private static final ExtendedJsonDoubleConverter EXTENDED_JSON_DOUBLE_CONVERTER = new ExtendedJsonDoubleConverter(); + private static final RelaxedExtendedJsonDoubleConverter RELAXED_EXTENDED_JSON_DOUBLE_CONVERTER = + new RelaxedExtendedJsonDoubleConverter(); + private static final JsonInt32Converter JSON_INT_32_CONVERTER = new JsonInt32Converter(); + private static final ExtendedJsonInt32Converter EXTENDED_JSON_INT_32_CONVERTER = new ExtendedJsonInt32Converter(); + private static final JsonSymbolConverter JSON_SYMBOL_CONVERTER = new JsonSymbolConverter(); + private static final ExtendedJsonMinKeyConverter EXTENDED_JSON_MIN_KEY_CONVERTER = new ExtendedJsonMinKeyConverter(); + private static final ShellMinKeyConverter SHELL_MIN_KEY_CONVERTER = new ShellMinKeyConverter(); + private static final ExtendedJsonMaxKeyConverter EXTENDED_JSON_MAX_KEY_CONVERTER = new ExtendedJsonMaxKeyConverter(); + private static final ShellMaxKeyConverter SHELL_MAX_KEY_CONVERTER = new ShellMaxKeyConverter(); + private static final ExtendedJsonUndefinedConverter EXTENDED_JSON_UNDEFINED_CONVERTER = new ExtendedJsonUndefinedConverter(); + private static final ShellUndefinedConverter SHELL_UNDEFINED_CONVERTER = new ShellUndefinedConverter(); + private static final LegacyExtendedJsonDateTimeConverter LEGACY_EXTENDED_JSON_DATE_TIME_CONVERTER = + new LegacyExtendedJsonDateTimeConverter(); + private static final ExtendedJsonDateTimeConverter EXTENDED_JSON_DATE_TIME_CONVERTER = new ExtendedJsonDateTimeConverter(); + private static final RelaxedExtendedJsonDateTimeConverter RELAXED_EXTENDED_JSON_DATE_TIME_CONVERTER = + new RelaxedExtendedJsonDateTimeConverter(); + private static final ShellDateTimeConverter SHELL_DATE_TIME_CONVERTER = new ShellDateTimeConverter(); + private static final ExtendedJsonBinaryConverter EXTENDED_JSON_BINARY_CONVERTER = new ExtendedJsonBinaryConverter(); + private static final LegacyExtendedJsonBinaryConverter LEGACY_EXTENDED_JSON_BINARY_CONVERTER = new LegacyExtendedJsonBinaryConverter(); + private static final ShellBinaryConverter SHELL_BINARY_CONVERTER = new ShellBinaryConverter(); + private static final ExtendedJsonInt64Converter EXTENDED_JSON_INT_64_CONVERTER = new ExtendedJsonInt64Converter(); + private static final RelaxedExtendedJsonInt64Converter RELAXED_JSON_INT_64_CONVERTER = new RelaxedExtendedJsonInt64Converter(); + private static final ShellInt64Converter SHELL_INT_64_CONVERTER = new ShellInt64Converter(); + private static final ExtendedJsonDecimal128Converter EXTENDED_JSON_DECIMAL_128_CONVERTER = new ExtendedJsonDecimal128Converter(); + private static final ShellDecimal128Converter SHELL_DECIMAL_128_CONVERTER = new ShellDecimal128Converter(); + private static final ExtendedJsonObjectIdConverter EXTENDED_JSON_OBJECT_ID_CONVERTER = new ExtendedJsonObjectIdConverter(); + private static final ShellObjectIdConverter SHELL_OBJECT_ID_CONVERTER = new ShellObjectIdConverter(); + private static final ExtendedJsonTimestampConverter EXTENDED_JSON_TIMESTAMP_CONVERTER = new ExtendedJsonTimestampConverter(); + private static final ShellTimestampConverter SHELL_TIMESTAMP_CONVERTER = new ShellTimestampConverter(); + private static final ExtendedJsonRegularExpressionConverter EXTENDED_JSON_REGULAR_EXPRESSION_CONVERTER = + new ExtendedJsonRegularExpressionConverter(); + private static final LegacyExtendedJsonRegularExpressionConverter LEGACY_EXTENDED_JSON_REGULAR_EXPRESSION_CONVERTER = + new LegacyExtendedJsonRegularExpressionConverter(); + private static final ShellRegularExpressionConverter SHELL_REGULAR_EXPRESSION_CONVERTER = new ShellRegularExpressionConverter(); + + private final boolean indent; + private final String newLineCharacters; + private final String indentCharacters; + private final int maxLength; + private final JsonMode outputMode; + private final Converter nullConverter; + private final Converter stringConverter; + private final Converter dateTimeConverter; + private final Converter binaryConverter; + private final Converter booleanConverter; + private final Converter doubleConverter; + private final Converter int32Converter; + private final Converter int64Converter; + private final Converter decimal128Converter; + private final Converter objectIdConverter; + private final Converter timestampConverter; + private final Converter regularExpressionConverter; + private final Converter symbolConverter; + private final Converter undefinedConverter; + private final Converter minKeyConverter; + private final Converter maxKeyConverter; + private final Converter javaScriptConverter; + + /** + * Create a builder for JsonWriterSettings, which are immutable. + *

+ * Defaults to {@link JsonMode#RELAXED} + *

+ * + * @return a Builder instance + * @since 3.5 + */ + public static Builder builder() { + return new Builder(); + } + + @SuppressWarnings("deprecation") + private JsonWriterSettings(final Builder builder) { + indent = builder.indent; + newLineCharacters = builder.newLineCharacters != null ? builder.newLineCharacters : System.getProperty("line.separator"); + indentCharacters = builder.indentCharacters; + outputMode = builder.outputMode; + maxLength = builder.maxLength; + + if (builder.nullConverter != null) { + nullConverter = builder.nullConverter; + } else { + nullConverter = JSON_NULL_CONVERTER; + } + + if (builder.stringConverter != null) { + stringConverter = builder.stringConverter; + } else { + stringConverter = JSON_STRING_CONVERTER; + } + + if (builder.booleanConverter != null) { + booleanConverter = builder.booleanConverter; + } else { + booleanConverter = JSON_BOOLEAN_CONVERTER; + } + + if (builder.doubleConverter != null) { + doubleConverter = builder.doubleConverter; + } else if (outputMode == JsonMode.EXTENDED) { + doubleConverter = EXTENDED_JSON_DOUBLE_CONVERTER; + } else if (outputMode == JsonMode.RELAXED) { + doubleConverter = RELAXED_EXTENDED_JSON_DOUBLE_CONVERTER; + } else { + doubleConverter = JSON_DOUBLE_CONVERTER; + } + + if (builder.int32Converter != null) { + int32Converter = builder.int32Converter; + } else if (outputMode == JsonMode.EXTENDED) { + int32Converter = EXTENDED_JSON_INT_32_CONVERTER; + } + else { + int32Converter = JSON_INT_32_CONVERTER; + } + + if (builder.symbolConverter != null) { + symbolConverter = builder.symbolConverter; + } else { + symbolConverter = JSON_SYMBOL_CONVERTER; + } + + if (builder.javaScriptConverter != null) { + javaScriptConverter = builder.javaScriptConverter; + } else { + javaScriptConverter = new JsonJavaScriptConverter(); + } + + if (builder.minKeyConverter != null) { + minKeyConverter = builder.minKeyConverter; + } else if (outputMode == JsonMode.STRICT || outputMode == JsonMode.EXTENDED || outputMode == JsonMode.RELAXED) { + minKeyConverter = EXTENDED_JSON_MIN_KEY_CONVERTER; + } else { + minKeyConverter = SHELL_MIN_KEY_CONVERTER; + } + + if (builder.maxKeyConverter != null) { + maxKeyConverter = builder.maxKeyConverter; + } else if (outputMode == JsonMode.STRICT || outputMode == JsonMode.EXTENDED || outputMode == JsonMode.RELAXED) { + maxKeyConverter = EXTENDED_JSON_MAX_KEY_CONVERTER; + } else { + maxKeyConverter = SHELL_MAX_KEY_CONVERTER; + } + + if (builder.undefinedConverter != null) { + undefinedConverter = builder.undefinedConverter; + } else if (outputMode == JsonMode.STRICT || outputMode == JsonMode.EXTENDED || outputMode == JsonMode.RELAXED) { + undefinedConverter = EXTENDED_JSON_UNDEFINED_CONVERTER; + } else { + undefinedConverter = SHELL_UNDEFINED_CONVERTER; + } + + if (builder.dateTimeConverter != null) { + dateTimeConverter = builder.dateTimeConverter; + } else if (outputMode == JsonMode.STRICT) { + dateTimeConverter = LEGACY_EXTENDED_JSON_DATE_TIME_CONVERTER; + } else if (outputMode == JsonMode.EXTENDED) { + dateTimeConverter = EXTENDED_JSON_DATE_TIME_CONVERTER; + } else if (outputMode == JsonMode.RELAXED) { + dateTimeConverter = RELAXED_EXTENDED_JSON_DATE_TIME_CONVERTER; + } else { + dateTimeConverter = SHELL_DATE_TIME_CONVERTER; + } + + if (builder.binaryConverter != null) { + binaryConverter = builder.binaryConverter; + } else if (outputMode == JsonMode.STRICT) { + binaryConverter = LEGACY_EXTENDED_JSON_BINARY_CONVERTER; + } else if (outputMode == JsonMode.EXTENDED || outputMode == JsonMode.RELAXED) { + binaryConverter = EXTENDED_JSON_BINARY_CONVERTER; + } else { + binaryConverter = SHELL_BINARY_CONVERTER; + } + + if (builder.int64Converter != null) { + int64Converter = builder.int64Converter; + } else if (outputMode == JsonMode.STRICT || outputMode == JsonMode.EXTENDED) { + int64Converter = EXTENDED_JSON_INT_64_CONVERTER; + } else if (outputMode == JsonMode.RELAXED) { + int64Converter = RELAXED_JSON_INT_64_CONVERTER; + } else { + int64Converter = SHELL_INT_64_CONVERTER; + } + + if (builder.decimal128Converter != null) { + decimal128Converter = builder.decimal128Converter; + } else if (outputMode == JsonMode.STRICT || outputMode == JsonMode.EXTENDED || outputMode == JsonMode.RELAXED) { + decimal128Converter = EXTENDED_JSON_DECIMAL_128_CONVERTER; + } else { + decimal128Converter = SHELL_DECIMAL_128_CONVERTER; + } + + if (builder.objectIdConverter != null) { + objectIdConverter = builder.objectIdConverter; + } else if (outputMode == JsonMode.STRICT || outputMode == JsonMode.EXTENDED || outputMode == JsonMode.RELAXED) { + objectIdConverter = EXTENDED_JSON_OBJECT_ID_CONVERTER; + } else { + objectIdConverter = SHELL_OBJECT_ID_CONVERTER; + } + + if (builder.timestampConverter != null) { + timestampConverter = builder.timestampConverter; + } else if (outputMode == JsonMode.STRICT || outputMode == JsonMode.EXTENDED || outputMode == JsonMode.RELAXED) { + timestampConverter = EXTENDED_JSON_TIMESTAMP_CONVERTER; + } else { + timestampConverter = SHELL_TIMESTAMP_CONVERTER; + } + + if (builder.regularExpressionConverter != null) { + regularExpressionConverter = builder.regularExpressionConverter; + } else if (outputMode == JsonMode.EXTENDED || outputMode == JsonMode.RELAXED) { + regularExpressionConverter = EXTENDED_JSON_REGULAR_EXPRESSION_CONVERTER; + } else if (outputMode == JsonMode.STRICT) { + regularExpressionConverter = LEGACY_EXTENDED_JSON_REGULAR_EXPRESSION_CONVERTER; + } else { + regularExpressionConverter = SHELL_REGULAR_EXPRESSION_CONVERTER; + } + } + + /** + * The indentation mode. If true, output will be indented. Otherwise, it will all be on the same line. The default value is {@code + * false}. + * + * @return whether output should be indented. + */ + public boolean isIndent() { + return indent; + } + + /** + * The new line character(s) to use if indent mode is enabled. The default value is {@code System.getProperty("line.separator")}. + * + * @return the new line character(s) to use. + */ + public String getNewLineCharacters() { + return newLineCharacters; + } + + /** + * The indent characters to use if indent mode is enabled. The default value is two spaces. + * + * @return the indent character(s) to use. + */ + public String getIndentCharacters() { + return indentCharacters; + } + + /** + * The output mode to use. The default value is {@code }JSONMode.RELAXED}. + * + * @return the output mode. + */ + public JsonMode getOutputMode() { + return outputMode; + } + + /** + * The maximum length of the JSON string. The string will be truncated at this length. + * + * @return the maximum length of the JSON string + * @since 3.7 + */ + public int getMaxLength() { + return maxLength; + } + + /** + * A converter from BSON Null values to JSON. + * + * @return this + * @since 3.5 + */ + public Converter getNullConverter() { + return nullConverter; + } + + /** + * A converter from BSON String values to JSON. + * + * @return this + * @since 3.5 + */ + public Converter getStringConverter() { + return stringConverter; + } + + /** + * A converter from BSON Binary values to JSON. + * + * @return this + * @since 3.5 + */ + public Converter getBinaryConverter() { + return binaryConverter; + } + + /** + * A converter from BSON Boolean values to JSON. + * + * @return this + * @since 3.5 + */ + public Converter getBooleanConverter() { + return booleanConverter; + } + + /** + * A converter from BSON DateTime values to JSON. + * + * @return this + * @since 3.5 + */ + public Converter getDateTimeConverter() { + return dateTimeConverter; + } + + /** + * A converter from BSON Double values to JSON. + * + * @return this + * @since 3.5 + */ + public Converter getDoubleConverter() { + return doubleConverter; + } + + /** + * A converter from BSON Int32 values to JSON. + * + * @return this + * @since 3.5 + */ + public Converter getInt32Converter() { + return int32Converter; + } + + /** + * A converter from BSON Int64 values to JSON. + * + * @return this + * @since 3.5 + */ + public Converter getInt64Converter() { + return int64Converter; + } + + /** + * A converter from BSON Decimal128 values to JSON. + * + * @return this + * @since 3.5 + */ + public Converter getDecimal128Converter() { + return decimal128Converter; + } + + /** + * A converter from BSON ObjectId values to JSON. + * + * @return this + * @since 3.5 + */ + public Converter getObjectIdConverter() { + return objectIdConverter; + } + + /** + * A converter from BSON RegularExpression values to JSON. + * + * @return this + * @since 3.5 + */ + public Converter getRegularExpressionConverter() { + return regularExpressionConverter; + } + + /** + * A converter from BSON Timestamp values to JSON. + * + * @return this + * @since 3.5 + */ + public Converter getTimestampConverter() { + return timestampConverter; + } + + /** + * A converter from BSON Symbol values to JSON. + * + * @return this + * @since 3.5 + */ + public Converter getSymbolConverter() { + return symbolConverter; + } + + /** + * A converter from BSON MinKey values to JSON. + * + * @return this + * @since 3.5 + */ + public Converter getMinKeyConverter() { + return minKeyConverter; + } + + /** + * A converter from BSON MaxKey values to JSON. + * + * @return this + * @since 3.5 + */ + public Converter getMaxKeyConverter() { + return maxKeyConverter; + } + + /** + * A converter from BSON Undefined values to JSON. + * + * @return this + * @since 3.5 + */ + public Converter getUndefinedConverter() { + return undefinedConverter; + } + + /** + * A converter from BSON JavaScript values to JSON. + * + * @return this + * @since 3.5 + */ + public Converter getJavaScriptConverter() { + return javaScriptConverter; + } + + /** + * A builder for JsonWriterSettings + * + * @since 3.5 + */ + public static final class Builder { + private boolean indent; + private String newLineCharacters = System.getProperty("line.separator"); + private String indentCharacters = " "; + private JsonMode outputMode = JsonMode.RELAXED; + private int maxLength; + private Converter nullConverter; + private Converter stringConverter; + private Converter dateTimeConverter; + private Converter binaryConverter; + private Converter booleanConverter; + private Converter doubleConverter; + private Converter int32Converter; + private Converter int64Converter; + private Converter decimal128Converter; + private Converter objectIdConverter; + private Converter timestampConverter; + private Converter regularExpressionConverter; + private Converter symbolConverter; + private Converter undefinedConverter; + private Converter minKeyConverter; + private Converter maxKeyConverter; + private Converter javaScriptConverter; + + /** + * Build a JsonWriterSettings instance. + * + * @return a JsonWriterSettings instance + */ + public JsonWriterSettings build() { + return new JsonWriterSettings(this); + } + + /** + * Sets whether indentation is enabled, which defaults to false. + * + * @param indent whether indentation is enabled + * @return this + */ + public Builder indent(final boolean indent) { + this.indent = indent; + return this; + } + + /** + * Sets the new line character string to use when indentation is enabled, which defaults to + * {@code System.getProperty("line.separator")}. + * + * @param newLineCharacters the non-null new line character string + * @return this + */ + public Builder newLineCharacters(final String newLineCharacters) { + notNull("newLineCharacters", newLineCharacters); + this.newLineCharacters = newLineCharacters; + return this; + } + + /** + * Sets the indent character string to use when indentation is enabled, which defaults to two spaces. + * + * @param indentCharacters the non-null indent character string + * @return this + */ + public Builder indentCharacters(final String indentCharacters) { + notNull("indentCharacters", indentCharacters); + this.indentCharacters = indentCharacters; + return this; + } + + /** + * Sets the output mode, which defaults to {@link JsonMode#RELAXED}. + * + * @param outputMode the non-null output mode + * @return this + */ + public Builder outputMode(final JsonMode outputMode) { + notNull("outputMode", outputMode); + this.outputMode = outputMode; + return this; + } + + /** + * Sets the maximum length of the JSON string. The string will be truncated at this length. + * + * @param maxLength the maximum length, which must be >= 0 where 0 indicate no maximum length + * @return the maximum length of the JSON string + * @since 3.7 + */ + public Builder maxLength(final int maxLength) { + isTrueArgument("maxLength >= 0", maxLength >= 0); + this.maxLength = maxLength; + return this; + } + + /** + * Sets the converter from BSON Null values to JSON. + * + * @param nullConverter the converter + * @return this + */ + public Builder nullConverter(final Converter nullConverter) { + this.nullConverter = nullConverter; + return this; + } + + /** + * Sets the converter from BSON String values to JSON. + * + * @param stringConverter the converter + * @return this + */ + public Builder stringConverter(final Converter stringConverter) { + this.stringConverter = stringConverter; + return this; + } + + /** + * Sets the converter from BSON DateTime values to JSON. + * + * @param dateTimeConverter the converter + * @return this + */ + public Builder dateTimeConverter(final Converter dateTimeConverter) { + this.dateTimeConverter = dateTimeConverter; + return this; + } + + /** + * Sets the converter from BSON Binary values to JSON. + * + * @param binaryConverter the converter + * @return this + */ + public Builder binaryConverter(final Converter binaryConverter) { + this.binaryConverter = binaryConverter; + return this; + } + + /** + * Sets the converter from BSON Boolean values to JSON. + * + * @param booleanConverter the converter + * @return this + */ + public Builder booleanConverter(final Converter booleanConverter) { + this.booleanConverter = booleanConverter; + return this; + } + + /** + * Sets the converter from BSON Double values to JSON. + * + * @param doubleConverter the converter + * @return this + */ + public Builder doubleConverter(final Converter doubleConverter) { + this.doubleConverter = doubleConverter; + return this; + } + + /** + * Sets the converter from BSON Int32 values to JSON. + * + * @param int32Converter the converter + * @return this + */ + public Builder int32Converter(final Converter int32Converter) { + this.int32Converter = int32Converter; + return this; + } + + /** + * Sets the converter from BSON Int64 values to JSON. + * + * @param int64Converter the converter + * @return this + */ + public Builder int64Converter(final Converter int64Converter) { + this.int64Converter = int64Converter; + return this; + } + + /** + * Sets the converter from BSON Decimal128 values to JSON. + * + * @param decimal128Converter the converter + * @return this + */ + public Builder decimal128Converter(final Converter decimal128Converter) { + this.decimal128Converter = decimal128Converter; + return this; + } + + /** + * Sets the converter from BSON ObjectId values to JSON. + * + * @param objectIdConverter the converter + * @return this + */ + public Builder objectIdConverter(final Converter objectIdConverter) { + this.objectIdConverter = objectIdConverter; + return this; + } + + /** + * Sets the converter from BSON Timestamp values to JSON. + * + * @param timestampConverter the converter + * @return this + */ + public Builder timestampConverter(final Converter timestampConverter) { + this.timestampConverter = timestampConverter; + return this; + } + + /** + * Sets the converter from BSON Regular Expression values to JSON. + * + * @param regularExpressionConverter the converter + * @return this + */ + public Builder regularExpressionConverter(final Converter regularExpressionConverter) { + this.regularExpressionConverter = regularExpressionConverter; + return this; + } + + /** + * Sets the converter from BSON Symbol values to JSON. + * + * @param symbolConverter the converter + * @return this + */ + public Builder symbolConverter(final Converter symbolConverter) { + this.symbolConverter = symbolConverter; + return this; + } + + /** + * Sets the converter from BSON MinKey values to JSON. + * + * @param minKeyConverter the converter + * @return this + */ + public Builder minKeyConverter(final Converter minKeyConverter) { + this.minKeyConverter = minKeyConverter; + return this; + } + + /** + * Sets the converter from BSON MaxKey values to JSON. + * + * @param maxKeyConverter the converter + * @return this + */ + public Builder maxKeyConverter(final Converter maxKeyConverter) { + this.maxKeyConverter = maxKeyConverter; + return this; + } + + /** + * Sets the converter from BSON Undefined values to JSON. + * + * @param undefinedConverter the converter + * @return this + */ + public Builder undefinedConverter(final Converter undefinedConverter) { + this.undefinedConverter = undefinedConverter; + return this; + } + + /** + * Sets the converter from BSON JavaScript values to JSON. + * + * @param javaScriptConverter the converter + * @return this + */ + public Builder javaScriptConverter(final Converter javaScriptConverter) { + this.javaScriptConverter = javaScriptConverter; + return this; + } + + private Builder() { + } + } +} diff --git a/bson/src/main/org/bson/json/LegacyExtendedJsonBinaryConverter.java b/bson/src/main/org/bson/json/LegacyExtendedJsonBinaryConverter.java new file mode 100644 index 00000000000..22bf03939bb --- /dev/null +++ b/bson/src/main/org/bson/json/LegacyExtendedJsonBinaryConverter.java @@ -0,0 +1,32 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.json; + +import org.bson.BsonBinary; + +import java.util.Base64; + +class LegacyExtendedJsonBinaryConverter implements Converter { + + @Override + public void convert(final BsonBinary value, final StrictJsonWriter writer) { + writer.writeStartObject(); + writer.writeString("$binary", Base64.getEncoder().encodeToString(value.getData())); + writer.writeString("$type", String.format("%02X", value.getType())); + writer.writeEndObject(); + } +} diff --git a/bson/src/main/org/bson/json/LegacyExtendedJsonDateTimeConverter.java b/bson/src/main/org/bson/json/LegacyExtendedJsonDateTimeConverter.java new file mode 100644 index 00000000000..2fa1692ed7d --- /dev/null +++ b/bson/src/main/org/bson/json/LegacyExtendedJsonDateTimeConverter.java @@ -0,0 +1,27 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.json; + +class LegacyExtendedJsonDateTimeConverter implements Converter { + @Override + public void convert(final Long value, final StrictJsonWriter writer) { + writer.writeStartObject(); + writer.writeNumber("$date", Long.toString(value)); + writer.writeEndObject(); + } + +} diff --git a/bson/src/main/org/bson/json/LegacyExtendedJsonRegularExpressionConverter.java b/bson/src/main/org/bson/json/LegacyExtendedJsonRegularExpressionConverter.java new file mode 100644 index 00000000000..4cb8b0c9b0c --- /dev/null +++ b/bson/src/main/org/bson/json/LegacyExtendedJsonRegularExpressionConverter.java @@ -0,0 +1,29 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.json; + +import org.bson.BsonRegularExpression; + +class LegacyExtendedJsonRegularExpressionConverter implements Converter { + @Override + public void convert(final BsonRegularExpression value, final StrictJsonWriter writer) { + writer.writeStartObject(); + writer.writeString("$regex", value.getPattern()); + writer.writeString("$options", value.getOptions()); + writer.writeEndObject(); + } +} diff --git a/bson/src/main/org/bson/json/RelaxedExtendedJsonDateTimeConverter.java b/bson/src/main/org/bson/json/RelaxedExtendedJsonDateTimeConverter.java new file mode 100644 index 00000000000..002c7c1c089 --- /dev/null +++ b/bson/src/main/org/bson/json/RelaxedExtendedJsonDateTimeConverter.java @@ -0,0 +1,33 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.json; + +class RelaxedExtendedJsonDateTimeConverter implements Converter { + private static final Converter FALLBACK_CONVERTER = new ExtendedJsonDateTimeConverter(); + private static final long LAST_MS_OF_YEAR_9999 = 253402300799999L; + + @Override + public void convert(final Long value, final StrictJsonWriter writer) { + if (value < 0 || value > LAST_MS_OF_YEAR_9999) { + FALLBACK_CONVERTER.convert(value, writer); + } else { + writer.writeStartObject(); + writer.writeString("$date", DateTimeFormatter.format(value)); + writer.writeEndObject(); + } + } +} diff --git a/bson/src/main/org/bson/json/RelaxedExtendedJsonDoubleConverter.java b/bson/src/main/org/bson/json/RelaxedExtendedJsonDoubleConverter.java new file mode 100644 index 00000000000..ac845b2ecd0 --- /dev/null +++ b/bson/src/main/org/bson/json/RelaxedExtendedJsonDoubleConverter.java @@ -0,0 +1,30 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.json; + +class RelaxedExtendedJsonDoubleConverter implements Converter { + private static final Converter FALLBACK_CONVERTER = new ExtendedJsonDoubleConverter(); + + @Override + public void convert(final Double value, final StrictJsonWriter writer) { + if (value.isNaN() || value.isInfinite()) { + FALLBACK_CONVERTER.convert(value, writer); + } else { + writer.writeNumber(Double.toString(value)); + } + } +} diff --git a/bson/src/main/org/bson/json/RelaxedExtendedJsonInt64Converter.java b/bson/src/main/org/bson/json/RelaxedExtendedJsonInt64Converter.java new file mode 100644 index 00000000000..4a158e28495 --- /dev/null +++ b/bson/src/main/org/bson/json/RelaxedExtendedJsonInt64Converter.java @@ -0,0 +1,24 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.json; + +class RelaxedExtendedJsonInt64Converter implements Converter { + @Override + public void convert(final Long value, final StrictJsonWriter writer) { + writer.writeNumber(Long.toString(value)); + } +} diff --git a/bson/src/main/org/bson/json/ShellBinaryConverter.java b/bson/src/main/org/bson/json/ShellBinaryConverter.java new file mode 100644 index 00000000000..aec8204583e --- /dev/null +++ b/bson/src/main/org/bson/json/ShellBinaryConverter.java @@ -0,0 +1,31 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.json; + +import org.bson.BsonBinary; + +import java.util.Base64; + +import static java.lang.String.format; + +class ShellBinaryConverter implements Converter { + @Override + public void convert(final BsonBinary value, final StrictJsonWriter writer) { + writer.writeRaw(format("new BinData(%s, \"%s\")", value.getType() & 0xFF, + Base64.getEncoder().encodeToString(value.getData()))); + } +} diff --git a/bson/src/main/org/bson/json/ShellDateTimeConverter.java b/bson/src/main/org/bson/json/ShellDateTimeConverter.java new file mode 100644 index 00000000000..95c6441df6a --- /dev/null +++ b/bson/src/main/org/bson/json/ShellDateTimeConverter.java @@ -0,0 +1,37 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.json; + +import java.text.SimpleDateFormat; +import java.util.Date; +import java.util.TimeZone; + +import static java.lang.String.format; + + +class ShellDateTimeConverter implements Converter { + @Override + public void convert(final Long value, final StrictJsonWriter writer) { + SimpleDateFormat dateFormat = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss.SSS'Z'"); + dateFormat.setTimeZone(TimeZone.getTimeZone("UTC")); + if (value >= -59014396800000L && value <= 253399536000000L) { + writer.writeRaw(format("ISODate(\"%s\")", dateFormat.format(new Date(value)))); + } else { + writer.writeRaw(format("new Date(%d)", value)); + } + } +} diff --git a/bson/src/main/org/bson/json/ShellDecimal128Converter.java b/bson/src/main/org/bson/json/ShellDecimal128Converter.java new file mode 100644 index 00000000000..43d235b7417 --- /dev/null +++ b/bson/src/main/org/bson/json/ShellDecimal128Converter.java @@ -0,0 +1,28 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.json; + +import org.bson.types.Decimal128; + +import static java.lang.String.format; + +class ShellDecimal128Converter implements Converter { + @Override + public void convert(final Decimal128 value, final StrictJsonWriter writer) { + writer.writeRaw(format("NumberDecimal(\"%s\")", value.toString())); + } +} diff --git a/bson/src/main/org/bson/json/ShellInt64Converter.java b/bson/src/main/org/bson/json/ShellInt64Converter.java new file mode 100644 index 00000000000..94c9884a744 --- /dev/null +++ b/bson/src/main/org/bson/json/ShellInt64Converter.java @@ -0,0 +1,30 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.json; + +import static java.lang.String.format; + +class ShellInt64Converter implements Converter { + @Override + public void convert(final Long value, final StrictJsonWriter writer) { + if (value >= Integer.MIN_VALUE && value <= Integer.MAX_VALUE) { + writer.writeRaw(format("NumberLong(%d)", value)); + } else { + writer.writeRaw(format("NumberLong(\"%d\")", value)); + } + } +} diff --git a/bson/src/main/org/bson/json/ShellMaxKeyConverter.java b/bson/src/main/org/bson/json/ShellMaxKeyConverter.java new file mode 100644 index 00000000000..0f3b1ae23d1 --- /dev/null +++ b/bson/src/main/org/bson/json/ShellMaxKeyConverter.java @@ -0,0 +1,26 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.json; + +import org.bson.BsonMaxKey; + +class ShellMaxKeyConverter implements Converter { + @Override + public void convert(final BsonMaxKey value, final StrictJsonWriter writer) { + writer.writeRaw("MaxKey"); + } +} diff --git a/bson/src/main/org/bson/json/ShellMinKeyConverter.java b/bson/src/main/org/bson/json/ShellMinKeyConverter.java new file mode 100644 index 00000000000..f996f5b3e8c --- /dev/null +++ b/bson/src/main/org/bson/json/ShellMinKeyConverter.java @@ -0,0 +1,26 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.json; + +import org.bson.BsonMinKey; + +class ShellMinKeyConverter implements Converter { + @Override + public void convert(final BsonMinKey value, final StrictJsonWriter writer) { + writer.writeRaw("MinKey"); + } +} diff --git a/bson/src/main/org/bson/json/ShellObjectIdConverter.java b/bson/src/main/org/bson/json/ShellObjectIdConverter.java new file mode 100644 index 00000000000..9a8fc9e6f7b --- /dev/null +++ b/bson/src/main/org/bson/json/ShellObjectIdConverter.java @@ -0,0 +1,29 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.json; + +import org.bson.types.ObjectId; + +import static java.lang.String.format; + +class ShellObjectIdConverter implements Converter { + @Override + public void convert(final ObjectId value, final StrictJsonWriter writer) { + writer.writeRaw(format("ObjectId(\"%s\")", value.toHexString())); + + } +} diff --git a/bson/src/main/org/bson/json/ShellRegularExpressionConverter.java b/bson/src/main/org/bson/json/ShellRegularExpressionConverter.java new file mode 100644 index 00000000000..2deb44213a6 --- /dev/null +++ b/bson/src/main/org/bson/json/ShellRegularExpressionConverter.java @@ -0,0 +1,27 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.json; + +import org.bson.BsonRegularExpression; + +class ShellRegularExpressionConverter implements Converter { + @Override + public void convert(final BsonRegularExpression value, final StrictJsonWriter writer) { + String escaped = (value.getPattern().equals("")) ? "(?:)" : value.getPattern().replace("/", "\\/"); + writer.writeRaw("/" + escaped + "/" + value.getOptions()); + } +} diff --git a/bson/src/main/org/bson/json/ShellTimestampConverter.java b/bson/src/main/org/bson/json/ShellTimestampConverter.java new file mode 100644 index 00000000000..1d767ed1372 --- /dev/null +++ b/bson/src/main/org/bson/json/ShellTimestampConverter.java @@ -0,0 +1,28 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.json; + +import org.bson.BsonTimestamp; + +import static java.lang.String.format; + +class ShellTimestampConverter implements Converter { + @Override + public void convert(final BsonTimestamp value, final StrictJsonWriter writer) { + writer.writeRaw(format("Timestamp(%d, %d)", value.getTime(), value.getInc())); + } +} diff --git a/bson/src/main/org/bson/json/ShellUndefinedConverter.java b/bson/src/main/org/bson/json/ShellUndefinedConverter.java new file mode 100644 index 00000000000..297594dadc5 --- /dev/null +++ b/bson/src/main/org/bson/json/ShellUndefinedConverter.java @@ -0,0 +1,26 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.json; + +import org.bson.BsonUndefined; + +class ShellUndefinedConverter implements Converter { + @Override + public void convert(final BsonUndefined value, final StrictJsonWriter writer) { + writer.writeRaw("undefined"); + } +} diff --git a/bson/src/main/org/bson/json/StrictCharacterStreamJsonWriter.java b/bson/src/main/org/bson/json/StrictCharacterStreamJsonWriter.java new file mode 100644 index 00000000000..cce8af2fa17 --- /dev/null +++ b/bson/src/main/org/bson/json/StrictCharacterStreamJsonWriter.java @@ -0,0 +1,402 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.json; + +import org.bson.BSONException; +import org.bson.BsonInvalidOperationException; + +import java.io.IOException; +import java.io.Writer; + +import static org.bson.assertions.Assertions.notNull; + +/** + * A class that writes JSON texts as a character stream via a provided {@link Writer}. + * + * @since 3.5 + */ +public final class StrictCharacterStreamJsonWriter implements StrictJsonWriter { + private enum JsonContextType { + TOP_LEVEL, + DOCUMENT, + ARRAY, + } + + private enum State { + INITIAL, + NAME, + VALUE, + DONE + } + + private static class StrictJsonContext { + private final StrictJsonContext parentContext; + private final JsonContextType contextType; + private final String indentation; + private boolean hasElements; + + StrictJsonContext(final StrictJsonContext parentContext, final JsonContextType contextType, final String indentChars) { + this.parentContext = parentContext; + this.contextType = contextType; + this.indentation = (parentContext == null) ? indentChars : parentContext.indentation + indentChars; + } + } + + private final Writer writer; + private final StrictCharacterStreamJsonWriterSettings settings; + private StrictJsonContext context = new StrictJsonContext(null, JsonContextType.TOP_LEVEL, ""); + private State state = State.INITIAL; + private int curLength; + private boolean isTruncated; + + /** + * Construct an instance. + * + * @param writer the writer to write JSON to. + * @param settings the settings to apply to this writer. + */ + public StrictCharacterStreamJsonWriter(final Writer writer, final StrictCharacterStreamJsonWriterSettings settings) { + this.writer = writer; + this.settings = settings; + } + + /** + * Gets the current length of the JSON text. + * + * @return the current length of the JSON text + */ + public int getCurrentLength() { + return curLength; + } + + @Override + public void writeStartObject(final String name) { + writeName(name); + writeStartObject(); + } + + @Override + public void writeStartArray(final String name) { + writeName(name); + writeStartArray(); + } + + @Override + public void writeBoolean(final String name, final boolean value) { + notNull("name", name); + writeName(name); + writeBoolean(value); + } + + @Override + public void writeNumber(final String name, final String value) { + notNull("name", name); + notNull("value", value); + writeName(name); + writeNumber(value); + } + + @Override + public void writeString(final String name, final String value) { + notNull("name", name); + notNull("value", value); + writeName(name); + writeString(value); + } + + @Override + public void writeRaw(final String name, final String value) { + notNull("name", name); + notNull("value", value); + writeName(name); + writeRaw(value); + } + + @Override + public void writeNull(final String name) { + writeName(name); + writeNull(); + } + + @Override + public void writeName(final String name) { + notNull("name", name); + checkState(State.NAME); + + if (context.hasElements) { + write(","); + } + if (settings.isIndent()) { + write(settings.getNewLineCharacters()); + write(context.indentation); + } else if (context.hasElements){ + write(" "); + } + writeStringHelper(name); + write(": "); + + state = State.VALUE; + } + + @Override + public void writeBoolean(final boolean value) { + checkState(State.VALUE); + preWriteValue(); + write(value ? "true" : "false"); + setNextState(); + } + + @Override + public void writeNumber(final String value) { + notNull("value", value); + checkState(State.VALUE); + preWriteValue(); + write(value); + setNextState(); + } + + @Override + public void writeString(final String value) { + notNull("value", value); + checkState(State.VALUE); + preWriteValue(); + writeStringHelper(value); + setNextState(); + } + + @Override + public void writeRaw(final String value) { + notNull("value", value); + checkState(State.VALUE); + preWriteValue(); + write(value); + setNextState(); + } + + @Override + public void writeNull() { + checkState(State.VALUE); + preWriteValue(); + write("null"); + setNextState(); + } + + @Override + public void writeStartObject() { + if (state != State.INITIAL && state != State.VALUE) { + throw new BsonInvalidOperationException("Invalid state " + state); + } + preWriteValue(); + write("{"); + context = new StrictJsonContext(context, JsonContextType.DOCUMENT, settings.getIndentCharacters()); + state = State.NAME; + } + + @Override + public void writeStartArray() { + preWriteValue(); + write("["); + context = new StrictJsonContext(context, JsonContextType.ARRAY, settings.getIndentCharacters()); + state = State.VALUE; + } + + @Override + public void writeEndObject() { + checkState(State.NAME); + + if (settings.isIndent() && context.hasElements) { + write(settings.getNewLineCharacters()); + write(context.parentContext.indentation); + } + write("}"); + context = context.parentContext; + if (context.contextType == JsonContextType.TOP_LEVEL) { + state = State.DONE; + } else { + setNextState(); + } + } + + @Override + public void writeEndArray() { + checkState(State.VALUE); + + if (context.contextType != JsonContextType.ARRAY) { + throw new BsonInvalidOperationException("Can't end an array if not in an array"); + } + + if (settings.isIndent() && context.hasElements) { + write(settings.getNewLineCharacters()); + write(context.parentContext.indentation); + } + write("]"); + context = context.parentContext; + if (context.contextType == JsonContextType.TOP_LEVEL) { + state = State.DONE; + } else { + setNextState(); + } + } + + /** + * Return true if the output has been truncated due to exceeding the length specified in + * {@link StrictCharacterStreamJsonWriterSettings#getMaxLength()}. + * + * @return true if the output has been truncated + * @since 3.7 + * @see StrictCharacterStreamJsonWriterSettings#getMaxLength() + */ + public boolean isTruncated() { + return isTruncated; + } + + void flush() { + try { + writer.flush(); + } catch (IOException e) { + throwBSONException(e); + } + } + + Writer getWriter() { + return writer; + } + + private void preWriteValue() { + if (context.contextType == JsonContextType.ARRAY) { + if (context.hasElements) { + write(","); + } + if (settings.isIndent()) { + write(settings.getNewLineCharacters()); + write(context.indentation); + } else if (context.hasElements) { + write(" "); + } + } + context.hasElements = true; + } + + private void setNextState() { + if (context.contextType == JsonContextType.ARRAY) { + state = State.VALUE; + } else { + state = State.NAME; + } + } + + private void writeStringHelper(final String str) { + write('"'); + for (int i = 0; i < str.length(); i++) { + char c = str.charAt(i); + switch (c) { + case '"': + write("\\\""); + break; + case '\\': + write("\\\\"); + break; + case '\b': + write("\\b"); + break; + case '\f': + write("\\f"); + break; + case '\n': + write("\\n"); + break; + case '\r': + write("\\r"); + break; + case '\t': + write("\\t"); + break; + default: + switch (Character.getType(c)) { + case Character.UPPERCASE_LETTER: + case Character.LOWERCASE_LETTER: + case Character.TITLECASE_LETTER: + case Character.OTHER_LETTER: + case Character.DECIMAL_DIGIT_NUMBER: + case Character.LETTER_NUMBER: + case Character.OTHER_NUMBER: + case Character.SPACE_SEPARATOR: + case Character.CONNECTOR_PUNCTUATION: + case Character.DASH_PUNCTUATION: + case Character.START_PUNCTUATION: + case Character.END_PUNCTUATION: + case Character.INITIAL_QUOTE_PUNCTUATION: + case Character.FINAL_QUOTE_PUNCTUATION: + case Character.OTHER_PUNCTUATION: + case Character.MATH_SYMBOL: + case Character.CURRENCY_SYMBOL: + case Character.MODIFIER_SYMBOL: + case Character.OTHER_SYMBOL: + write(c); + break; + default: + write("\\u"); + write(Integer.toHexString((c & 0xf000) >> 12)); + write(Integer.toHexString((c & 0x0f00) >> 8)); + write(Integer.toHexString((c & 0x00f0) >> 4)); + write(Integer.toHexString(c & 0x000f)); + break; + } + break; + } + } + write('"'); + } + + private void write(final String str) { + try { + if (settings.getMaxLength() == 0 || str.length() + curLength < settings.getMaxLength()) { + writer.write(str); + curLength += str.length(); + } else { + writer.write(str.substring(0, settings.getMaxLength() - curLength)); + curLength = settings.getMaxLength(); + isTruncated = true; + } + } catch (IOException e) { + throwBSONException(e); + } + } + + private void write(final char c) { + try { + if (settings.getMaxLength() == 0 || curLength < settings.getMaxLength()) { + writer.write(c); + curLength++; + } else { + isTruncated = true; + } + } catch (IOException e) { + throwBSONException(e); + } + } + + private void checkState(final State requiredState) { + if (state != requiredState) { + throw new BsonInvalidOperationException("Invalid state " + state); + } + } + + private void throwBSONException(final IOException e) { + throw new BSONException("Wrapping IOException", e); + } +} diff --git a/bson/src/main/org/bson/json/StrictCharacterStreamJsonWriterSettings.java b/bson/src/main/org/bson/json/StrictCharacterStreamJsonWriterSettings.java new file mode 100644 index 00000000000..e395e3ee52c --- /dev/null +++ b/bson/src/main/org/bson/json/StrictCharacterStreamJsonWriterSettings.java @@ -0,0 +1,158 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.json; + +import static org.bson.assertions.Assertions.notNull; + +/** + * Settings to control the behavior of a {@code JSONWriter} instance. + * + * @see StrictCharacterStreamJsonWriter + * @since 3.5 + */ +public final class StrictCharacterStreamJsonWriterSettings { + + private final boolean indent; + private final String newLineCharacters; + private final String indentCharacters; + private final int maxLength; + + /** + * Create a builder for StrictCharacterStreamJsonWriterSettings, which are immutable. + * + * @return a Builder instance + */ + public static Builder builder() { + return new Builder(); + } + + private StrictCharacterStreamJsonWriterSettings(final Builder builder) { + indent = builder.indent; + newLineCharacters = builder.newLineCharacters != null ? builder.newLineCharacters : System.getProperty("line.separator"); + indentCharacters = builder.indentCharacters; + maxLength = builder.maxLength; + } + + /** + * The indentation mode. If true, output will be indented. Otherwise, it will all be on the same line. The default value is {@code + * false}. + * + * @return whether output should be indented. + */ + public boolean isIndent() { + return indent; + } + + /** + * The new line character(s) to use if indent mode is enabled. The default value is {@code System.getProperty("line.separator")}. + * + * @return the new line character(s) to use. + */ + public String getNewLineCharacters() { + return newLineCharacters; + } + + /** + * The indent characters to use if indent mode is enabled. The default value is two spaces. + * + * @return the indent character(s) to use. + */ + public String getIndentCharacters() { + return indentCharacters; + } + + /** + * The maximum length of the JSON string. The string will be truncated at this length. + * + * @return the maximum length of the JSON string + * @since 3.7 + */ + public int getMaxLength() { + return maxLength; + } + + /** + * A builder for StrictCharacterStreamJsonWriterSettings + * + * @since 3.4 + */ + public static final class Builder { + private boolean indent; + private String newLineCharacters = System.getProperty("line.separator"); + private String indentCharacters = " "; + private int maxLength; + + /** + * Build a JsonWriterSettings instance. + * + * @return a JsonWriterSettings instance + */ + public StrictCharacterStreamJsonWriterSettings build() { + return new StrictCharacterStreamJsonWriterSettings(this); + } + + /** + * Sets whether indentation is enabled. + * + * @param indent whether indentation is enabled + * @return this + */ + public Builder indent(final boolean indent) { + this.indent = indent; + return this; + } + + /** + * Sets the new line character string to use when indentation is enabled. + * + * @param newLineCharacters the non-null new line character string + * @return this + */ + public Builder newLineCharacters(final String newLineCharacters) { + notNull("newLineCharacters", newLineCharacters); + this.newLineCharacters = newLineCharacters; + return this; + } + + /** + * Sets the indent character string to use when indentation is enabled. + * + * @param indentCharacters the non-null indent character string + * @return this + */ + public Builder indentCharacters(final String indentCharacters) { + notNull("indentCharacters", indentCharacters); + this.indentCharacters = indentCharacters; + return this; + } + + /** + * Sets the maximum length of the JSON string. The string will be truncated at this length. + * + * @param maxLength the maximum length, which must be >= 0 where 0 indicate no maximum length + * @return the maximum length of the JSON string + * @since 3.7 + */ + public Builder maxLength(final int maxLength) { + this.maxLength = maxLength; + return this; + } + + private Builder() { + } + } +} diff --git a/bson/src/main/org/bson/json/StrictJsonWriter.java b/bson/src/main/org/bson/json/StrictJsonWriter.java new file mode 100644 index 00000000000..7c75849025b --- /dev/null +++ b/bson/src/main/org/bson/json/StrictJsonWriter.java @@ -0,0 +1,184 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.json; + +/** + * An interface for creating JSON texts that largely conform to RFC 7159. + * + * @since 3.5 + */ +public interface StrictJsonWriter { + /** + * Writes the name of a member to the writer. + * + * @param name the member name + * @throws org.bson.BsonInvalidOperationException if not in the correct state to write a member name + * @throws org.bson.BSONException if the underlying Writer throws an IOException + */ + void writeName(String name); + + /** + * Writes a boolean to the writer. + * + * @param value the boolean value. + * @throws org.bson.BsonInvalidOperationException if not in the correct state to write a value + * @throws org.bson.BSONException if the underlying Writer throws an IOException + */ + void writeBoolean(boolean value); + + /** + * Writes a a member with a boolean value to the writer. + * + * @param name the member name + * @param value the boolean value + * @throws org.bson.BsonInvalidOperationException if not in the correct state to write a member + * @throws org.bson.BSONException if the underlying Writer throws an IOException + */ + void writeBoolean(String name, boolean value); + + /** + * Writes a number to the writer. + * + * @param value the Double value, as a String so that clients can take full control over formatting + * @throws org.bson.BsonInvalidOperationException if not in the correct state to write a value + * @throws org.bson.BSONException if the underlying Writer throws an IOException + */ + void writeNumber(String value); + + /** + * Writes a member with a numeric value to the writer. + * + * @param name the member name + * @param value the Double value, as a String so that clients can take full control over formatting + * @throws org.bson.BsonInvalidOperationException if not in the correct state to write a member + * @throws org.bson.BSONException if the underlying Writer throws an IOException + */ + void writeNumber(String name, String value); + + /** + * Writes a String to the writer. + * + * @param value the String value + * @throws org.bson.BsonInvalidOperationException if not in the correct state to write a value + * @throws org.bson.BSONException if the underlying Writer throws an IOException + */ + void writeString(String value); + + /** + * Writes a member with a string value to the writer. + * + * @param name the member name + * @param value the String value + * @throws org.bson.BsonInvalidOperationException if not in the correct state to write a member + * @throws org.bson.BSONException if the underlying Writer throws an IOException + */ + void writeString(String name, String value); + + /** + * Writes a raw value without quoting or escaping. + * + * @param value the String value + * @throws org.bson.BsonInvalidOperationException if not in the correct state to write a value + * @throws org.bson.BSONException if the underlying Writer throws an IOException + */ + void writeRaw(String value); + + /** + * Writes a member with a raw value without quoting or escaping. + * + * @param name the member name + * @param value the raw value + * @throws org.bson.BsonInvalidOperationException if not in the correct state to write a member + * @throws org.bson.BSONException if the underlying Writer throws an IOException + */ + void writeRaw(String name, String value); + + /** + * Writes a null value to the writer. + * + * @throws org.bson.BsonInvalidOperationException if not in the correct state to write a value + * @throws org.bson.BSONException if the underlying Writer throws an IOException + */ + void writeNull(); + + /** + * Writes a member with a null value to the writer. + * + * @param name the member name + * @throws org.bson.BsonInvalidOperationException if not in the correct state to write a member + * @throws org.bson.BSONException if the underlying Writer throws an IOException + */ + void writeNull(String name); + + /** + * Writes the start of a array to the writer. + * + * @throws org.bson.BsonInvalidOperationException if not in the correct state to write a value + * @throws org.bson.BSONException if the underlying Writer throws an IOException + */ + void writeStartArray(); + + /** + * Writes the start of JSON array member to the writer. + * + * @param name the member name + * @throws org.bson.BsonInvalidOperationException if not in the correct state to write a member + * @throws org.bson.BSONException if the underlying Writer throws an IOException + */ + void writeStartArray(String name); + + /** + * Writes the start of a JSON object to the writer. + * + * @throws org.bson.BsonInvalidOperationException if not in the correct state to write a value + * @throws org.bson.BSONException if the underlying Writer throws an IOException + */ + void writeStartObject(); + + /** + * Writes the start of a JSON object member to the writer. + * + * @param name the member name + * @throws org.bson.BsonInvalidOperationException if not in the correct state to write a member + * @throws org.bson.BSONException if the underlying Writer throws an IOException + */ + void writeStartObject(String name); + + /** + * Writes the end of a JSON array to the writer. + * + * @throws org.bson.BsonInvalidOperationException if not in the correct state to write the end of an array + * @throws org.bson.BSONException if the underlying Writer throws an IOException + */ + void writeEndArray(); + + /** + * Writes the end of a JSON object to the writer. + * + * @throws org.bson.BsonInvalidOperationException if not in the correct state to write the end of an object + * @throws org.bson.BSONException if the underlying Writer throws an IOException + */ + void writeEndObject(); + + /** + * Return true if the output has been truncated due to exceeding any maximum length specified in settings. + * + * @return true if the output has been truncated + * @since 3.7 + */ + boolean isTruncated(); +} diff --git a/bson/src/main/org/bson/json/UuidStringValidator.java b/bson/src/main/org/bson/json/UuidStringValidator.java new file mode 100644 index 00000000000..5f0f18d96ba --- /dev/null +++ b/bson/src/main/org/bson/json/UuidStringValidator.java @@ -0,0 +1,73 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.json; + +import java.util.BitSet; + +final class UuidStringValidator { + private static final BitSet HEX_CHARS; + + static { + HEX_CHARS = new BitSet('f' + 1); + HEX_CHARS.set('0', '9' + 1); + HEX_CHARS.set('A', 'F' + 1); + HEX_CHARS.set('a', 'f' + 1); + } + + private static void validateFourHexChars(final String str, final int startPos) { + if (!(HEX_CHARS.get(str.charAt(startPos)) + && HEX_CHARS.get(str.charAt(startPos + 1)) + && HEX_CHARS.get(str.charAt(startPos + 2)) + && HEX_CHARS.get(str.charAt(startPos + 3)))) { + throw new IllegalArgumentException(String.format("Expected four hexadecimal characters in UUID string \"%s\" starting at " + + "position %d", str, startPos)); + } + } + + private static void validateDash(final String str, final int pos) { + if (str.charAt(pos) != '-') { + throw new IllegalArgumentException(String.format("Expected dash in UUID string \"%s\" at position %d", str, pos)); + } + } + + // UUID strings must be in the form 73ffd264-44b3-4c69-90e8-e7d1dfc035d4, but UUID.fromString fails to fully validate against that + // form, even though the Javadoc claims that it does For example, it will parse 73ff-d26444b-34c6-990e8e-7d1dfc035d4 (same as previous + // value but with hyphens in the wrong positions), but return a UUID that is not equal to the one it returns for the string with the + // hyphens in the correct positions. Given that, in order to comply with the Extended JSON specification, we add our own validation + // before calling UUID.fromString. + static void validate(final String uuidString) { + if (uuidString.length() != 36) { + throw new IllegalArgumentException(String.format("UUID string \"%s\" must be 36 characters", uuidString)); + } + + validateFourHexChars(uuidString, 0); + validateFourHexChars(uuidString, 4); + validateDash(uuidString, 8); + validateFourHexChars(uuidString, 9); + validateDash(uuidString, 13); + validateFourHexChars(uuidString, 14); + validateDash(uuidString, 18); + validateFourHexChars(uuidString, 19); + validateDash(uuidString, 23); + validateFourHexChars(uuidString, 24); + validateFourHexChars(uuidString, 28); + validateFourHexChars(uuidString, 32); + } + + private UuidStringValidator() { + } +} diff --git a/bson/src/main/org/bson/json/package-info.java b/bson/src/main/org/bson/json/package-info.java new file mode 100644 index 00000000000..05c872db6c7 --- /dev/null +++ b/bson/src/main/org/bson/json/package-info.java @@ -0,0 +1,20 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * JSON serialization and deserialization. + */ +package org.bson.json; diff --git a/bson/src/main/org/bson/package-info.java b/bson/src/main/org/bson/package-info.java new file mode 100644 index 00000000000..d02567e5d9d --- /dev/null +++ b/bson/src/main/org/bson/package-info.java @@ -0,0 +1,20 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * Contains the base BSON classes. + */ +package org.bson; diff --git a/bson/src/main/org/bson/types/BSONTimestamp.java b/bson/src/main/org/bson/types/BSONTimestamp.java new file mode 100644 index 00000000000..55178d6b3fd --- /dev/null +++ b/bson/src/main/org/bson/types/BSONTimestamp.java @@ -0,0 +1,117 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.types; + +import java.io.Serializable; +import java.util.Date; + +/** + * This is used for internal increment values. For normal dates you should use java.util.Date time is seconds since epoch + * inc is an ordinal. + * + * @mongodb.driver.manual reference/bson-types/#timestamps Timestamps + */ +public final class BSONTimestamp implements Comparable, Serializable { + + private static final long serialVersionUID = -3268482672267936464L; + + /** + * The millisecond increment within the second. + */ + private final int inc; + /** + * The time, in seconds + */ + private final Date time; + + /** + * Creates a new instance. + */ + public BSONTimestamp() { + inc = 0; + time = null; + } + + /** + * Creates a new instance. + * + * @param time the time in seconds since epoch + * @param increment an incrementing ordinal for operations within a given second + * @mongodb.driver.manual reference/bson-types/#timestamps Timestamps + */ + public BSONTimestamp(final int time, final int increment) { + this.time = new Date(time * 1000L); + this.inc = increment; + } + + /** + * Gets the time in seconds since epoch + * + * @return an int representing time in seconds since epoch + */ + public int getTime() { + if (time == null) { + return 0; + } + return (int) (time.getTime() / 1000); + } + + /** + * Gets the incrementing ordinal for operations within a given second. + * + * @return the increment ordinal + */ + public int getInc() { + return inc; + } + + @Override + public String toString() { + return "TS time:" + time + " inc:" + inc; + } + + @Override + public int compareTo(final BSONTimestamp ts) { + if (getTime() != ts.getTime()) { + return getTime() - ts.getTime(); + } else { + return getInc() - ts.getInc(); + } + } + + @Override + public int hashCode() { + int prime = 31; + int result = 1; + result = prime * result + inc; + result = prime * result + getTime(); + return result; + } + + @Override + public boolean equals(final Object obj) { + if (obj == this) { + return true; + } + if (obj instanceof BSONTimestamp) { + BSONTimestamp t2 = (BSONTimestamp) obj; + return getTime() == t2.getTime() && getInc() == t2.getInc(); + } + return false; + } + +} diff --git a/bson/src/main/org/bson/types/BasicBSONList.java b/bson/src/main/org/bson/types/BasicBSONList.java new file mode 100644 index 00000000000..00fe637ade1 --- /dev/null +++ b/bson/src/main/org/bson/types/BasicBSONList.java @@ -0,0 +1,167 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// BasicBSONList.java + +package org.bson.types; + +import org.bson.BSONObject; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.Iterator; +import java.util.Map; +import java.util.Set; + +/** + *

Utility class to allow array {@code DBObject}s to be created.

Note: MongoDB will also create arrays from {@code java.util + * .List}s.

+ *
+ * BSONObject obj = new BasicBSONList();
+ * obj.put( "0", value1 );
+ * obj.put( "4", value2 );
+ * obj.put( 2, value3 );
+ * 
+ *

This simulates the array [ value1, null, value3, null, value2 ] by creating the {@code DBObject} {@code { "0" : value1, "1" : null, + * "2" : value3, "3" : null, "4" : value2 }}.

+ * + *

BasicBSONList only supports numeric keys. Passing strings that cannot be converted to ints + * will cause an IllegalArgumentException.

+ *
+ * BasicBSONList list = new BasicBSONList();
+ * list.put("1", "bar"); // ok
+ * list.put("1E1", "bar"); // throws exception
+ * 
+ */ +@SuppressWarnings("rawtypes") +public class BasicBSONList extends ArrayList implements BSONObject { + + private static final long serialVersionUID = -4415279469780082174L; + + /** + * Puts a value at an index. For interface compatibility. Must be passed a String that is parsable to an int. + * + * @param key the index at which to insert the value + * @param v the value to insert + * @return the value + * @throws IllegalArgumentException if {@code key} cannot be parsed into an {@code int} + */ + @Override + public Object put(final String key, final Object v) { + return put(_getInt(key), v); + } + + /** + * Puts a value at an index. This will fill any unset indexes less than {@code index} with {@code null}. + * + * @param key the index at which to insert the value + * @param value the value to insert + * @return the value + */ + public Object put(final int key, final Object value) { + while (key >= size()) { + add(null); + } + set(key, value); + return value; + } + + @SuppressWarnings("unchecked") + @Override + public void putAll(final Map m) { + for (final Map.Entry entry : (Set) m.entrySet()) { + put(entry.getKey().toString(), entry.getValue()); + } + } + + @Override + public void putAll(final BSONObject o) { + for (final String k : o.keySet()) { + put(k, o.get(k)); + } + } + + /** + * Gets a value at an index. For interface compatibility. Must be passed a String that is parsable to an int. + * + * @param key the index + * @return the value, if found, or null + * @throws IllegalArgumentException if {@code key} cannot be parsed into an {@code int} + */ + public Object get(final String key) { + int i = _getInt(key); + if (i < 0) { + return null; + } + if (i >= size()) { + return null; + } + return get(i); + } + + @Override + public Object removeField(final String key) { + int i = _getInt(key); + if (i < 0) { + return null; + } + if (i >= size()) { + return null; + } + return remove(i); + } + + @Override + public boolean containsField(final String key) { + int i = _getInt(key, false); + if (i < 0) { + return false; + } + return i >= 0 && i < size(); + } + + @Override + public Set keySet() { + return new StringRangeSet(size()); + } + + @Override + @SuppressWarnings("unchecked") + public Map toMap() { + Map m = new HashMap(); + Iterator i = this.keySet().iterator(); + while (i.hasNext()) { + Object s = i.next(); + m.put(s, this.get(String.valueOf(s))); + } + return m; + } + + int _getInt(final String s) { + return _getInt(s, true); + } + + int _getInt(final String s, final boolean err) { + try { + return Integer.parseInt(s); + } catch (Exception e) { + if (err) { + throw new IllegalArgumentException("BasicBSONList can only work with numeric keys, not: [" + s + "]"); + } + return -1; + } + } +} diff --git a/bson/src/main/org/bson/types/Binary.java b/bson/src/main/org/bson/types/Binary.java new file mode 100644 index 00000000000..5ba482ccc41 --- /dev/null +++ b/bson/src/main/org/bson/types/Binary.java @@ -0,0 +1,124 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.types; + +import org.bson.BsonBinarySubType; + +import java.io.Serializable; +import java.util.Arrays; + +/** + * Generic binary holder. + */ +public class Binary implements Serializable { + private static final long serialVersionUID = 7902997490338209467L; + + /** + * The binary sub-type. + */ + private final byte type; + + /** + * The binary data. + */ + private final byte[] data; + + /** + * Creates a Binary object with the default binary type of 0 + * + * @param data raw data + */ + public Binary(final byte[] data) { + this(BsonBinarySubType.BINARY, data); + } + + /** + * Creates a Binary with the specified type and data. + * + * @param type the binary type + * @param data the binary data + */ + public Binary(final BsonBinarySubType type, final byte[] data) { + this(type.getValue(), data); + } + + /** + * Creates a Binary object + * + * @param type type of the field as encoded in BSON + * @param data raw data + */ + public Binary(final byte type, final byte[] data) { + this.type = type; + this.data = data.clone(); + } + + /** + * Get the binary sub type as a byte. + * + * @return the binary sub type as a byte. + */ + public byte getType() { + return type; + } + + /** + * Get a copy of the binary value. + * + * @return a copy of the binary value. + */ + public byte[] getData() { + return data.clone(); + } + + /** + * Get the length of the data. + * + * @return the length of the binary array. + */ + public int length() { + return data.length; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + Binary binary = (Binary) o; + + if (type != binary.type) { + return false; + } + if (!Arrays.equals(data, binary.data)) { + return false; + } + + return true; + } + + @Override + public int hashCode() { + int result = type; + result = 31 * result + Arrays.hashCode(data); + return result; + } +} diff --git a/bson/src/main/org/bson/types/Code.java b/bson/src/main/org/bson/types/Code.java new file mode 100644 index 00000000000..0bf9161315e --- /dev/null +++ b/bson/src/main/org/bson/types/Code.java @@ -0,0 +1,83 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Code.java + +package org.bson.types; + +import java.io.Serializable; + +/** + * For using the Code type. + */ +public class Code implements Serializable { + + private static final long serialVersionUID = 475535263314046697L; + + /** + * The JavaScript code string. + */ + private final String code; + + /** + * Construct a new instance with the given code. + * + * @param code the JavaScript code + */ + public Code(final String code) { + this.code = code; + } + + /** + * Get the JavaScript code. + * + * @return the code + */ + public String getCode() { + return code; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + Code code1 = (Code) o; + + if (!code.equals(code1.code)) { + return false; + } + + return true; + } + + @Override + public int hashCode() { + return code.hashCode(); + } + + @Override + public String toString() { + return "Code{" + + "code='" + code + '\'' + + '}'; + } +} + diff --git a/bson/src/main/org/bson/types/CodeWScope.java b/bson/src/main/org/bson/types/CodeWScope.java new file mode 100644 index 00000000000..57c95e29c24 --- /dev/null +++ b/bson/src/main/org/bson/types/CodeWScope.java @@ -0,0 +1,70 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.types; + +import org.bson.BSONObject; + +/** + * Represents the CodeWScope BSON type. + * + * @see org.bson.BsonType#JAVASCRIPT_WITH_SCOPE + */ +public class CodeWScope extends Code { + + /** + * The scope document. + */ + private final BSONObject scope; + + private static final long serialVersionUID = -6284832275113680002L; + + /** + * Creates a new instance + * @param code the JavaScript code + * @param scope the scope as a document + */ + public CodeWScope(final String code, final BSONObject scope) { + super(code); + this.scope = scope; + } + + /** + * Gets the scope for this JavaScript + * @return a document representing the scope + */ + public BSONObject getScope() { + return scope; + } + + @Override + public boolean equals(final Object o) { + if (o == null) { + return false; + } + if (getClass() != o.getClass()) { + return false; + } + CodeWScope c = (CodeWScope) o; + return getCode().equals(c.getCode()) && scope.equals(c.scope); + } + + @Override + public int hashCode() { + return getCode().hashCode() ^ scope.hashCode(); + } +} + diff --git a/bson/src/main/org/bson/types/CodeWithScope.java b/bson/src/main/org/bson/types/CodeWithScope.java new file mode 100644 index 00000000000..f2cec479b84 --- /dev/null +++ b/bson/src/main/org/bson/types/CodeWithScope.java @@ -0,0 +1,83 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.types; + +import org.bson.Document; + +import java.util.Objects; + +/** + * A representation of the JavaScript Code with Scope BSON type. + * + * @since 3.0 + */ +public class CodeWithScope extends Code { + + /** + * The scope document. + */ + private final Document scope; + + private static final long serialVersionUID = -6284832275113680002L; + + /** + * Construct an instance. + * + * @param code the code + * @param scope the scope + */ + public CodeWithScope(final String code, final Document scope) { + super(code); + this.scope = scope; + } + + /** + * Gets the scope, which is a mapping from identifiers to values, representing the scope in which the code should be evaluated. + * + * @return the scope + */ + public Document getScope() { + return scope; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + if (!super.equals(o)) { + return false; + } + + CodeWithScope that = (CodeWithScope) o; + + if (!Objects.equals(scope, that.scope)) { + return false; + } + + return true; + } + + @Override + public int hashCode() { + return getCode().hashCode() ^ scope.hashCode(); + } +} + diff --git a/bson/src/main/org/bson/types/Decimal128.java b/bson/src/main/org/bson/types/Decimal128.java new file mode 100644 index 00000000000..1cd3d9745fa --- /dev/null +++ b/bson/src/main/org/bson/types/Decimal128.java @@ -0,0 +1,629 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.types; + +import java.math.BigDecimal; +import java.math.BigInteger; +import static java.math.MathContext.DECIMAL128; +import static java.util.Arrays.asList; +import static java.util.Collections.singletonList; +import java.util.HashSet; +import java.util.Set; + +/** + * A binary integer decimal representation of a 128-bit decimal value, supporting 34 decimal digits of significand and an exponent range + * of -6143 to +6144. + * + * @since 3.4 + * @see BSON Decimal128 + * specification + * @see binary integer decimal + * @see decimal128 floating-point format + * @see 754-2008 - IEEE Standard for Floating-Point Arithmetic + */ +public final class Decimal128 extends Number implements Comparable { + + private static final long serialVersionUID = 4570973266503637887L; + + private static final long INFINITY_MASK = 0x7800000000000000L; + private static final long NaN_MASK = 0x7c00000000000000L; + private static final long SIGN_BIT_MASK = 1L << 63; + private static final int MIN_EXPONENT = -6176; + private static final int MAX_EXPONENT = 6111; + + private static final int EXPONENT_OFFSET = 6176; + private static final int MAX_BIT_LENGTH = 113; + + private static final BigInteger BIG_INT_TEN = new BigInteger("10"); + private static final BigInteger BIG_INT_ONE = new BigInteger("1"); + private static final BigInteger BIG_INT_ZERO = new BigInteger("0"); + + private static final Set NaN_STRINGS = new HashSet<>(singletonList("nan")); + private static final Set NEGATIVE_NaN_STRINGS = new HashSet<>(singletonList("-nan")); + private static final Set POSITIVE_INFINITY_STRINGS = new HashSet<>(asList("inf", "+inf", "infinity", "+infinity")); + private static final Set NEGATIVE_INFINITY_STRINGS = new HashSet<>(asList("-inf", "-infinity")); + + /** + * A constant holding the positive infinity of type {@code Decimal128}. It is equal to the value return by + * {@code Decimal128.valueOf("Infinity")}. + */ + public static final Decimal128 POSITIVE_INFINITY = fromIEEE754BIDEncoding(INFINITY_MASK, 0); + + /** + * A constant holding the negative infinity of type {@code Decimal128}. It is equal to the value return by + * {@code Decimal128.valueOf("-Infinity")}. + */ + public static final Decimal128 NEGATIVE_INFINITY = fromIEEE754BIDEncoding(INFINITY_MASK | SIGN_BIT_MASK, 0); + + /** + * A constant holding a negative Not-a-Number (-NaN) value of type {@code Decimal128}. It is equal to the value return by + * {@code Decimal128.valueOf("-NaN")}. + */ + public static final Decimal128 NEGATIVE_NaN = fromIEEE754BIDEncoding(NaN_MASK | SIGN_BIT_MASK, 0); + + /** + * A constant holding a Not-a-Number (NaN) value of type {@code Decimal128}. It is equal to the value return by + * {@code Decimal128.valueOf("NaN")}. + */ + public static final Decimal128 NaN = fromIEEE754BIDEncoding(NaN_MASK, 0); + + /** + * A constant holding a positive zero value of type {@code Decimal128}. It is equal to the value return by + * {@code Decimal128.valueOf("0")}. + */ + public static final Decimal128 POSITIVE_ZERO = fromIEEE754BIDEncoding(0x3040000000000000L, 0x0000000000000000L); + + /** + * A constant holding a negative zero value of type {@code Decimal128}. It is equal to the value return by + * {@code Decimal128.valueOf("-0")}. + */ + public static final Decimal128 NEGATIVE_ZERO = fromIEEE754BIDEncoding(0xb040000000000000L, 0x0000000000000000L); + + /** + * The high bits. + */ + private final long high; + /** + * The low bits. + */ + private final long low; + + /** + * Returns a Decimal128 value representing the given String. + * + * @param value the Decimal128 value represented as a String + * @return the Decimal128 value representing the given String + * @throws NumberFormatException if the value is out of the Decimal128 range + * @see + * + * From-String Specification + */ + public static Decimal128 parse(final String value) { + String lowerCasedValue = value.toLowerCase(); + + if (NaN_STRINGS.contains(lowerCasedValue)) { + return NaN; + } + if (NEGATIVE_NaN_STRINGS.contains(lowerCasedValue)) { + return NEGATIVE_NaN; + } + if (POSITIVE_INFINITY_STRINGS.contains(lowerCasedValue)) { + return POSITIVE_INFINITY; + } + if (NEGATIVE_INFINITY_STRINGS.contains(lowerCasedValue)) { + return NEGATIVE_INFINITY; + } + return new Decimal128(new BigDecimal(value), value.charAt(0) == '-'); + } + + /** + * Create an instance with the given high and low order bits representing this Decimal128 as an IEEE 754-2008 128-bit decimal + * floating point using the BID encoding scheme. + * + * @param high the high-order 64 bits + * @param low the low-order 64 bits + * @return the Decimal128 value representing the given high and low order bits + */ + public static Decimal128 fromIEEE754BIDEncoding(final long high, final long low) { + return new Decimal128(high, low); + } + + /** + * Constructs a Decimal128 value representing the given long. + * + * @param value the Decimal128 value represented as a long + */ + public Decimal128(final long value) { + this(new BigDecimal(value, DECIMAL128)); + } + + /** + * Constructs a Decimal128 value representing the given BigDecimal. + * + * @param value the Decimal128 value represented as a BigDecimal + * @throws NumberFormatException if the value is out of the Decimal128 range + */ + public Decimal128(final BigDecimal value) { + this(value, value.signum() == -1); + } + + private Decimal128(final long high, final long low) { + this.high = high; + this.low = low; + } + + // isNegative is necessary to detect -0, which can't be represented with a BigDecimal + private Decimal128(final BigDecimal initialValue, final boolean isNegative) { + long localHigh = 0; + long localLow = 0; + + BigDecimal value = clampAndRound(initialValue); + + long exponent = -value.scale(); + + if ((exponent < MIN_EXPONENT) || (exponent > MAX_EXPONENT)) { + throw new AssertionError("Exponent is out of range for Decimal128 encoding: " + exponent); } + + if (value.unscaledValue().bitLength() > MAX_BIT_LENGTH) { + throw new AssertionError("Unscaled roundedValue is out of range for Decimal128 encoding:" + value.unscaledValue()); + } + + BigInteger significand = value.unscaledValue().abs(); + int bitLength = significand.bitLength(); + + for (int i = 0; i < Math.min(64, bitLength); i++) { + if (significand.testBit(i)) { + localLow |= 1L << i; + } + } + + for (int i = 64; i < bitLength; i++) { + if (significand.testBit(i)) { + localHigh |= 1L << (i - 64); + } + } + + long biasedExponent = exponent + EXPONENT_OFFSET; + + localHigh |= biasedExponent << 49; + + if (value.signum() == -1 || isNegative) { + localHigh |= SIGN_BIT_MASK; + } + + high = localHigh; + low = localLow; + } + + private BigDecimal clampAndRound(final BigDecimal initialValue) { + BigDecimal value; + if (-initialValue.scale() > MAX_EXPONENT) { + int diff = -initialValue.scale() - MAX_EXPONENT; + if (initialValue.unscaledValue().equals(BIG_INT_ZERO)) { + value = new BigDecimal(initialValue.unscaledValue(), -MAX_EXPONENT); + } else if (diff + initialValue.precision() > 34) { + throw new NumberFormatException("Exponent is out of range for Decimal128 encoding of " + initialValue); + } else { + BigInteger multiplier = BIG_INT_TEN.pow(diff); + value = new BigDecimal(initialValue.unscaledValue().multiply(multiplier), initialValue.scale() + diff); + } + } else if (-initialValue.scale() < MIN_EXPONENT) { + // Increasing a very negative exponent may require decreasing precision, which is rounding + // Only round exactly (by removing precision that is all zeroes). An exception is thrown if the rounding would be inexact: + // Exact: .000...0011000 => 11000E-6177 => 1100E-6176 => .000001100 + // Inexact: .000...0011001 => 11001E-6177 => 1100E-6176 => .000001100 + int diff = initialValue.scale() + MIN_EXPONENT; + int undiscardedPrecision = ensureExactRounding(initialValue, diff); + BigInteger divisor = undiscardedPrecision == 0 ? BIG_INT_ONE : BIG_INT_TEN.pow(diff); + value = new BigDecimal(initialValue.unscaledValue().divide(divisor), initialValue.scale() - diff); + } else { + value = initialValue.round(DECIMAL128); + int extraPrecision = initialValue.precision() - value.precision(); + if (extraPrecision > 0) { + // Again, only round exactly + ensureExactRounding(initialValue, extraPrecision); + } + } + return value; + } + + private int ensureExactRounding(final BigDecimal initialValue, final int extraPrecision) { + String significand = initialValue.unscaledValue().abs().toString(); + int undiscardedPrecision = Math.max(0, significand.length() - extraPrecision); + for (int i = undiscardedPrecision; i < significand.length(); i++) { + if (significand.charAt(i) != '0') { + throw new NumberFormatException("Conversion to Decimal128 would require inexact rounding of " + initialValue); + } + } + return undiscardedPrecision; + } + + /** + * Gets the high-order 64 bits of the IEEE 754-2008 128-bit decimal floating point encoding for this Decimal128, using the BID encoding + * scheme. + * + * @return the high-order 64 bits of this Decimal128 + */ + public long getHigh() { + return high; + } + + /** + * Gets the low-order 64 bits of the IEEE 754-2008 128-bit decimal floating point encoding for this Decimal128, using the BID encoding + * scheme. + * + * @return the low-order 64 bits of this Decimal128 + */ + public long getLow() { + return low; + } + + /** + * Gets a BigDecimal that is equivalent to this Decimal128. + * + * @return a BigDecimal that is equivalent to this Decimal128 + * @throws ArithmeticException if the Decimal128 value is NaN, Infinity, -Infinity, or -0, none of which can be represented as a + * BigDecimal + */ + public BigDecimal bigDecimalValue() { + + if (isNaN()) { + throw new ArithmeticException("NaN can not be converted to a BigDecimal"); + } + + if (isInfinite()) { + throw new ArithmeticException("Infinity can not be converted to a BigDecimal"); + } + + BigDecimal bigDecimal = bigDecimalValueNoNegativeZeroCheck(); + + // If the BigDecimal is 0, but the Decimal128 is negative, that means we have -0. + if (isNegative() && bigDecimal.signum() == 0) { + throw new ArithmeticException("Negative zero can not be converted to a BigDecimal"); + } + + return bigDecimal; + } + + // Make sure that the argument comes from a call to bigDecimalValueNoNegativeZeroCheck on this instance + private boolean hasDifferentSign(final BigDecimal bigDecimal) { + return isNegative() && bigDecimal.signum() == 0; + } + + private boolean isZero(final BigDecimal bigDecimal) { + return !isNaN() && !isInfinite() && bigDecimal.compareTo(BigDecimal.ZERO) == 0; + } + + private BigDecimal bigDecimalValueNoNegativeZeroCheck() { + int scale = -getExponent(); + + if (twoHighestCombinationBitsAreSet()) { + return BigDecimal.valueOf(0, scale); + } + + return new BigDecimal(new BigInteger(isNegative() ? -1 : 1, getBytes()), scale); + } + + // May have leading zeros. Strip them before considering making this method public + private byte[] getBytes() { + byte[] bytes = new byte[15]; + + long mask = 0x00000000000000ff; + for (int i = 14; i >= 7; i--) { + bytes[i] = (byte) ((low & mask) >>> ((14 - i) << 3)); + mask = mask << 8; + } + + mask = 0x00000000000000ff; + for (int i = 6; i >= 1; i--) { + bytes[i] = (byte) ((high & mask) >>> ((6 - i) << 3)); + mask = mask << 8; + } + + mask = 0x0001000000000000L; + bytes[0] = (byte) ((high & mask) >>> 48); + return bytes; + } + + private int getExponent() { + if (twoHighestCombinationBitsAreSet()) { + return (int) ((high & 0x1fffe00000000000L) >>> 47) - EXPONENT_OFFSET; + } else { + return (int) ((high & 0x7fff800000000000L) >>> 49) - EXPONENT_OFFSET; + } + } + + private boolean twoHighestCombinationBitsAreSet() { + return (high & 3L << 61) == 3L << 61; + } + + /** + * Returns true if this Decimal128 is negative. + * + * @return true if this Decimal128 is negative + */ + public boolean isNegative() { + return (high & SIGN_BIT_MASK) == SIGN_BIT_MASK; + } + + /** + * Returns true if this Decimal128 is infinite. + * + * @return true if this Decimal128 is infinite + */ + public boolean isInfinite() { + return (high & INFINITY_MASK) == INFINITY_MASK; + } + + /** + * Returns true if this Decimal128 is finite. + * + * @return true if this Decimal128 is finite + */ + public boolean isFinite() { + return !isInfinite(); + } + + /** + * Returns true if this Decimal128 is Not-A-Number (NaN). + * + * @return true if this Decimal128 is Not-A-Number + */ + public boolean isNaN() { + return (high & NaN_MASK) == NaN_MASK; + } + + + @Override + public int compareTo(final Decimal128 o) { + if (isNaN()) { + return o.isNaN() ? 0 : 1; + } + if (isInfinite()) { + if (isNegative()) { + if (o.isInfinite() && o.isNegative()) { + return 0; + } else { + return -1; + } + } else { + if (o.isNaN()) { + return -1; + } else if (o.isInfinite() && !o.isNegative()) { + return 0; + } else { + return 1; + } + } + } + BigDecimal bigDecimal = bigDecimalValueNoNegativeZeroCheck(); + BigDecimal otherBigDecimal = o.bigDecimalValueNoNegativeZeroCheck(); + + if (isZero(bigDecimal) && o.isZero(otherBigDecimal)) { + if (hasDifferentSign(bigDecimal)) { + if (o.hasDifferentSign(otherBigDecimal)) { + return 0; + } + else { + return -1; + } + } else if (o.hasDifferentSign(otherBigDecimal)) { + return 1; + } + } + + if (o.isNaN()) { + return -1; + } else if (o.isInfinite()) { + if (o.isNegative()) { + return 1; + } else { + return -1; + } + } else { + return bigDecimal.compareTo(otherBigDecimal); + } + } + + /** + * Converts this {@code Decimal128} to a {@code int}. This conversion is analogous to the narrowing primitive conversion from + * {@code double} to {@code int} as defined in The Java™ Language Specification: any fractional part of this + * {@code Decimal128} will be discarded, and if the resulting integral value is too big to fit in a {@code int}, only the + * low-order 32 bits are returned. Note that this conversion can lose information about the overall magnitude and precision of this + * {@code Decimal128} value as well as return a result with the opposite sign. Note that {@code #NEGATIVE_ZERO} is converted to + * {@code 0}. + * + * @return this {@code Decimal128} converted to a {@code int}. + * @since 3.10 + */ + @Override + public int intValue() { + return (int) doubleValue(); + } + + /** + * Converts this {@code Decimal128} to a {@code long}. This conversion is analogous to the narrowing primitive conversion from + * {@code double} to {@code long} as defined in The Java™ Language Specification: any fractional part of this + * {@code Decimal128} will be discarded, and if the resulting integral value is too big to fit in a {@code long}, only the + * low-order 64 bits are returned. Note that this conversion can lose information about the overall magnitude and precision of this + * {@code Decimal128} value as well as return a result with the opposite sign. Note that {@code #NEGATIVE_ZERO} is converted to + * {@code 0L}. + * + * @return this {@code Decimal128} converted to a {@code long}. + * @since 3.10 + */ + @Override + public long longValue() { + return (long) doubleValue(); + } + + /** + * Converts this {@code Decimal128} to a {@code float}. This conversion is similar to the narrowing primitive conversion from + * {@code double} to {@code float} as defined in The Java™ Language Specification: if this {@code Decimal128} has + * too great a magnitude to represent as a {@code float}, it will be converted to {@link Float#NEGATIVE_INFINITY} or + * {@link Float#POSITIVE_INFINITY} as appropriate. Note that even when the return value is finite, this conversion can lose + * information about the precision of the {@code Decimal128} value. Note that {@code #NEGATIVE_ZERO} is converted to {@code 0.0f}. + * + * @return this {@code Decimal128} converted to a {@code float}. + * @since 3.10 + */ + @Override + public float floatValue() { + return (float) doubleValue(); + } + + /** + * Converts this {@code Decimal128} to a {@code double}. This conversion is similar to the narrowing primitive conversion from + * {@code double} to {@code float} as defined in The Java™ Language Specification: if this {@code Decimal128} has + * too great a magnitude to represent as a {@code double}, it will be converted to {@link Double#NEGATIVE_INFINITY} or + * {@link Double#POSITIVE_INFINITY} as appropriate. Note that even when the return value is finite, this conversion can lose + * information about the precision of the {@code Decimal128} value. Note that {@code #NEGATIVE_ZERO} is converted to {@code 0.0d}. + * + * @return this {@code Decimal128} converted to a {@code double}. + * @since 3.10 + */ + @Override + public double doubleValue() { + if (isNaN()) { + return Double.NaN; + } + if (isInfinite()) { + if (isNegative()) { + return Double.NEGATIVE_INFINITY; + } else { + return Double.POSITIVE_INFINITY; + } + } + + BigDecimal bigDecimal = bigDecimalValueNoNegativeZeroCheck(); + + if (hasDifferentSign(bigDecimal)) { + return -0.0d; + } + + return bigDecimal.doubleValue(); + } + + /** + * Returns true if the encoded representation of this instance is the same as the encoded representation of {@code o}. + *

+ * One consequence is that, whereas {@code Double.NaN != Double.NaN}, + * {@code new Decimal128("NaN").equals(new Decimal128("NaN")} returns true. + *

+ *

+ * Another consequence is that, as with BigDecimal, {@code new Decimal128("1.0").equals(new Decimal128("1.00")} returns false, + * because the precision is not the same and therefore the representation is not the same. + *

+ * + * @param o the object to compare for equality + * @return true if the instances are equal + */ + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + Decimal128 that = (Decimal128) o; + + if (high != that.high) { + return false; + } + if (low != that.low) { + return false; + } + + return true; + } + + @Override + public int hashCode() { + int result = (int) (low ^ (low >>> 32)); + result = 31 * result + (int) (high ^ (high >>> 32)); + return result; + } + + /** + * Returns the String representation of the Decimal128 value. + * + * @return the String representation + * @see + * To-String Sprecification + */ + @Override + public String toString() { + if (isNaN()) { + return "NaN"; + } + if (isInfinite()) { + if (isNegative()) { + return "-Infinity"; + } else { + return "Infinity"; + } + } + return toStringWithBigDecimal(); + } + + private String toStringWithBigDecimal() { + StringBuilder buffer = new StringBuilder(); + + BigDecimal bigDecimal = bigDecimalValueNoNegativeZeroCheck(); + String significand = bigDecimal.unscaledValue().abs().toString(); + + if (isNegative()) { + buffer.append('-'); + } + + int exponent = -bigDecimal.scale(); + int adjustedExponent = exponent + (significand.length() - 1); + if (exponent <= 0 && adjustedExponent >= -6) { + if (exponent == 0) { + buffer.append(significand); + } else { + int pad = -exponent - significand.length(); + if (pad >= 0) { + buffer.append('0'); + buffer.append('.'); + for (int i = 0; i < pad; i++) { + buffer.append('0'); + } + buffer.append(significand, 0, significand.length()); + } else { + buffer.append(significand, 0, -pad); + buffer.append('.'); + buffer.append(significand, -pad, -pad - exponent); + } + } + } else { + buffer.append(significand.charAt(0)); + if (significand.length() > 1) { + buffer.append('.'); + buffer.append(significand, 1, significand.length()); + } + buffer.append('E'); + if (adjustedExponent > 0) { + buffer.append('+'); + } + buffer.append(adjustedExponent); + } + return buffer.toString(); + } +} diff --git a/bson/src/main/org/bson/types/MaxKey.java b/bson/src/main/org/bson/types/MaxKey.java new file mode 100644 index 00000000000..f27cc75d629 --- /dev/null +++ b/bson/src/main/org/bson/types/MaxKey.java @@ -0,0 +1,43 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.types; + +import java.io.Serializable; + +/** + * Represent the maximum key value regardless of the key's type + */ +public final class MaxKey implements Serializable { + + private static final long serialVersionUID = 5123414776151687185L; + + @Override + public boolean equals(final Object o) { + return o instanceof MaxKey; + } + + @Override + public int hashCode() { + return 0; + } + + @Override + public String toString() { + return "MaxKey"; + } + +} diff --git a/bson/src/main/org/bson/types/MinKey.java b/bson/src/main/org/bson/types/MinKey.java new file mode 100644 index 00000000000..207934c827e --- /dev/null +++ b/bson/src/main/org/bson/types/MinKey.java @@ -0,0 +1,43 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.types; + +import java.io.Serializable; + +/** + * Represent the minimum key value regardless of the key's type + */ +public final class MinKey implements Serializable { + + private static final long serialVersionUID = 4075901136671855684L; + + @Override + public boolean equals(final Object o) { + return o instanceof MinKey; + } + + @Override + public int hashCode() { + return 0; + } + + @Override + public String toString() { + return "MinKey"; + } + +} diff --git a/bson/src/main/org/bson/types/ObjectId.java b/bson/src/main/org/bson/types/ObjectId.java new file mode 100644 index 00000000000..927d3ab0c31 --- /dev/null +++ b/bson/src/main/org/bson/types/ObjectId.java @@ -0,0 +1,410 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.types; + +import static org.bson.assertions.Assertions.isTrueArgument; +import static org.bson.assertions.Assertions.notNull; + +import java.io.InvalidObjectException; +import java.io.ObjectInputStream; +import java.io.Serializable; +import java.nio.ByteBuffer; +import java.nio.ByteOrder; +import java.security.SecureRandom; +import java.util.Date; +import java.util.concurrent.atomic.AtomicInteger; + +/** + *

A globally unique identifier for objects.

+ * + *

Consists of 12 bytes, divided as follows:

+ * + * + * + * + * + * + * + * + *
ObjectID layout
01234567891011
timerandom valueinc
+ * + *

Instances of this class are immutable.

+ * + * @mongodb.driver.manual core/object-id ObjectId + */ +public final class ObjectId implements Comparable, Serializable { + + // unused, as this class uses a proxy for serialization + private static final long serialVersionUID = 1L; + + private static final int OBJECT_ID_LENGTH = 12; + private static final int LOW_ORDER_THREE_BYTES = 0x00ffffff; + + // Use upper bytes of a long to represent the 5-byte random value. + private static final long RANDOM_VALUE; + + private static final AtomicInteger NEXT_COUNTER; + + private static final char[] HEX_CHARS = { + '0', '1', '2', '3', '4', '5', '6', '7', + '8', '9', 'a', 'b', 'c', 'd', 'e', 'f'}; + + /** + * The timestamp + */ + private final int timestamp; + + /** + * The final 8 bytes of the ObjectID are 5 bytes probabilistically unique to the machine and + * process, followed by a 3 byte incrementing counter initialized to a random value. + */ + private final long nonce; + + /** + * Gets a new object id. + * + * @return the new id + */ + public static ObjectId get() { + return new ObjectId(); + } + + /** + * Gets a new object id with the given date value and all other bits zeroed. + *

+ * The returned object id will compare as less than or equal to any other object id within the same second as the given date, and + * less than any later date. + *

+ * + * @param date the date + * @return the ObjectId + * @since 4.1 + */ + public static ObjectId getSmallestWithDate(final Date date) { + return new ObjectId(dateToTimestampSeconds(date), 0L); + } + + /** + * Checks if a string could be an {@code ObjectId}. + * + * @param hexString a potential ObjectId as a String. + * @return whether the string could be an object id + * @throws IllegalArgumentException if hexString is null + */ + public static boolean isValid(final String hexString) { + if (hexString == null) { + throw new IllegalArgumentException(); + } + + int len = hexString.length(); + if (len != 24) { + return false; + } + + for (int i = 0; i < len; i++) { + char c = hexString.charAt(i); + if (c >= '0' && c <= '9') { + continue; + } + if (c >= 'a' && c <= 'f') { + continue; + } + if (c >= 'A' && c <= 'F') { + continue; + } + + return false; + } + + return true; + } + + /** + * Create a new object id. + */ + public ObjectId() { + this(new Date()); + } + + /** + * Constructs a new instance using the given date. + * + * @param date the date + */ + public ObjectId(final Date date) { + this(dateToTimestampSeconds(date), RANDOM_VALUE | (NEXT_COUNTER.getAndIncrement() & LOW_ORDER_THREE_BYTES)); + } + + /** + * Constructs a new instances using the given date and counter. + * + * @param date the date + * @param counter the counter + * @throws IllegalArgumentException if the high order byte of counter is not zero + */ + public ObjectId(final Date date, final int counter) { + this(dateToTimestampSeconds(date), getNonceFromUntrustedCounter(counter)); + } + + /** + * Creates an ObjectId using the given time and counter. + * + * @param timestamp the time in seconds + * @param counter the counter + * @throws IllegalArgumentException if the high order byte of counter is not zero + */ + public ObjectId(final int timestamp, final int counter) { + this(timestamp, getNonceFromUntrustedCounter(counter)); + } + + private ObjectId(final int timestamp, final long nonce) { + this.timestamp = timestamp; + this.nonce = nonce; + } + + private static long getNonceFromUntrustedCounter(final int counter) { + if ((counter & 0xff000000) != 0) { + throw new IllegalArgumentException("The counter must be between 0 and 16777215 (it must fit in three bytes)."); + } + return RANDOM_VALUE | counter; + } + + /** + * Constructs a new instance from a 24-byte hexadecimal string representation. + * + * @param hexString the string to convert + * @throws IllegalArgumentException if the string is not a valid hex string representation of an ObjectId + */ + public ObjectId(final String hexString) { + this(parseHexString(hexString)); + } + + /** + * Constructs a new instance from the given byte array + * + * @param bytes the byte array + * @throws IllegalArgumentException if array is null or not of length 12 + */ + public ObjectId(final byte[] bytes) { + this(ByteBuffer.wrap(isTrueArgument("bytes has length of 12", bytes, notNull("bytes", bytes).length == 12))); + } + + /** + * Constructs a new instance from the given ByteBuffer + * + * @param buffer the ByteBuffer + * @throws IllegalArgumentException if the buffer is null or does not have at least 12 bytes remaining + * @since 3.4 + */ + public ObjectId(final ByteBuffer buffer) { + notNull("buffer", buffer); + isTrueArgument("buffer.remaining() >=12", buffer.remaining() >= OBJECT_ID_LENGTH); + + ByteOrder originalOrder = buffer.order(); + try { + buffer.order(ByteOrder.BIG_ENDIAN); + this.timestamp = buffer.getInt(); + this.nonce = buffer.getLong(); + } finally { + buffer.order(originalOrder); + } + } + + /** + * Convert to a byte array. Note that the numbers are stored in big-endian order. + * + * @return the byte array + */ + public byte[] toByteArray() { + // using .allocate ensures there is a backing array that can be returned + return ByteBuffer.allocate(OBJECT_ID_LENGTH) + .putInt(this.timestamp) + .putLong(this.nonce) + .array(); + } + + /** + * Convert to bytes and put those bytes to the provided ByteBuffer. + * Note that the numbers are stored in big-endian order. + * + * @param buffer the ByteBuffer + * @throws IllegalArgumentException if the buffer is null or does not have at least 12 bytes remaining + * @since 3.4 + */ + public void putToByteBuffer(final ByteBuffer buffer) { + notNull("buffer", buffer); + isTrueArgument("buffer.remaining() >=12", buffer.remaining() >= OBJECT_ID_LENGTH); + + ByteOrder originalOrder = buffer.order(); + try { + buffer.order(ByteOrder.BIG_ENDIAN); + buffer.putInt(this.timestamp); + buffer.putLong(this.nonce); + } finally { + buffer.order(originalOrder); + } + } + + /** + * Gets the timestamp (number of seconds since the Unix epoch). + * + * @return the timestamp + */ + public int getTimestamp() { + return timestamp; + } + + /** + * Gets the timestamp as a {@code Date} instance. + * + * @return the Date + */ + public Date getDate() { + return new Date((timestamp & 0xFFFFFFFFL) * 1000L); + } + + /** + * Converts this instance into a 24-byte hexadecimal string representation. + * + * @return a string representation of the ObjectId in hexadecimal format + */ + public String toHexString() { + char[] chars = new char[OBJECT_ID_LENGTH * 2]; + int i = 0; + for (byte b : toByteArray()) { + chars[i++] = HEX_CHARS[b >> 4 & 0xF]; + chars[i++] = HEX_CHARS[b & 0xF]; + } + return new String(chars); + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + ObjectId other = (ObjectId) o; + if (timestamp != other.timestamp) { + return false; + } + return nonce == other.nonce; + } + + @Override + public int hashCode() { + return 31 * timestamp + Long.hashCode(nonce); + } + + @Override + public int compareTo(final ObjectId other) { + int cmp = Integer.compareUnsigned(this.timestamp, other.timestamp); + if (cmp != 0) { + return cmp; + } + + return Long.compareUnsigned(nonce, other.nonce); + } + + @Override + public String toString() { + return toHexString(); + } + + /** + * Write the replacement object. + * + *

+ * See https://docs.oracle.com/javase/6/docs/platform/serialization/spec/output.html + *

+ * + * @return a proxy for the document + */ + private Object writeReplace() { + return new SerializationProxy(this); + } + + /** + * Prevent normal deserialization. + * + *

+ * See https://docs.oracle.com/javase/6/docs/platform/serialization/spec/input.html + *

+ * + * @param stream the stream + * @throws InvalidObjectException in all cases + */ + private void readObject(final ObjectInputStream stream) throws InvalidObjectException { + throw new InvalidObjectException("Proxy required"); + } + + private static class SerializationProxy implements Serializable { + private static final long serialVersionUID = 1L; + + private final byte[] bytes; + + SerializationProxy(final ObjectId objectId) { + bytes = objectId.toByteArray(); + } + + private Object readResolve() { + return new ObjectId(bytes); + } + } + + static { + try { + SecureRandom secureRandom = new SecureRandom(); + RANDOM_VALUE = secureRandom.nextLong() & ~LOW_ORDER_THREE_BYTES; + NEXT_COUNTER = new AtomicInteger(secureRandom.nextInt()); + } catch (Exception e) { + throw new RuntimeException(e); + } + } + + private static byte[] parseHexString(final String s) { + notNull("hexString", s); + isTrueArgument("hexString has 24 characters", s.length() == 24); + + byte[] b = new byte[OBJECT_ID_LENGTH]; + for (int i = 0; i < b.length; i++) { + int pos = i << 1; + char c1 = s.charAt(pos); + char c2 = s.charAt(pos + 1); + b[i] = (byte) ((hexCharToInt(c1) << 4) + hexCharToInt(c2)); + } + return b; + } + + private static int hexCharToInt(final char c) { + if (c >= '0' && c <= '9') { + return c - 48; + } else if (c >= 'a' && c <= 'f') { + return c - 87; + } else if (c >= 'A' && c <= 'F') { + return c - 55; + } + throw new IllegalArgumentException("invalid hexadecimal character: [" + c + "]"); + } + + private static int dateToTimestampSeconds(final Date time) { + return (int) (time.getTime() / 1000); + } +} diff --git a/bson/src/main/org/bson/types/StringRangeSet.java b/bson/src/main/org/bson/types/StringRangeSet.java new file mode 100644 index 00000000000..4daca26cc03 --- /dev/null +++ b/bson/src/main/org/bson/types/StringRangeSet.java @@ -0,0 +1,162 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.types; + +import java.util.Collection; +import java.util.Iterator; +import java.util.NoSuchElementException; +import java.util.Set; + +import static org.bson.assertions.Assertions.isTrue; + + +class StringRangeSet implements Set { + + private static final String[] STRINGS = new String[1024]; + + static { + for (int i = 0; i < STRINGS.length; ++i) { + STRINGS[i] = String.valueOf(i); + } + } + + private final int size; + + StringRangeSet(final int size) { + isTrue("size >= 0", size >= 0); + this.size = size; + } + + @Override + public int size() { + return size; + } + + @Override + public boolean isEmpty() { + return size() == 0; + } + + @Override + public boolean contains(final Object o) { + if (!(o instanceof String)) { + return false; + } + try { + int i = Integer.parseInt((String) o); + return i >= 0 && i < size(); + } catch (NumberFormatException e) { + return false; + } + } + + @Override + public Iterator iterator() { + return new Iterator() { + private int cur = 0; + + @Override + public boolean hasNext() { + return cur < size; + } + + @Override + public String next() { + if (!hasNext()) { + throw new NoSuchElementException(); + } + return intToString(cur++); + } + + @Override + public void remove() { + throw new UnsupportedOperationException(); + } + }; + } + + @Override + public Object[] toArray() { + Object[] retVal = new Object[size()]; + for (int i = 0; i < size(); i++) { + retVal[i] = intToString(i); + } + return retVal; + } + + @Override + @SuppressWarnings("unchecked") + public T[] toArray(final T[] a) { + T[] retVal = a.length >= size() + ? a + : (T[]) java.lang.reflect.Array + .newInstance(a.getClass().getComponentType(), size); + for (int i = 0; i < size(); i++) { + retVal[i] = (T) (intToString(i)); + } + if (a.length > size()) { + a[size] = null; + } + return retVal; + } + + @Override + public boolean add(final String integer) { + throw new UnsupportedOperationException(); + } + + @Override + public boolean remove(final Object o) { + throw new UnsupportedOperationException(); + } + + @Override + public boolean containsAll(final Collection c) { + for (Object e : c) { + if (!contains(e)) { + return false; + } + } + return true; + } + + @Override + public boolean addAll(final Collection c) { + throw new UnsupportedOperationException(); + } + + @Override + public boolean retainAll(final Collection c) { + throw new UnsupportedOperationException(); + } + + @Override + public boolean removeAll(final Collection c) { + throw new UnsupportedOperationException(); + } + + @Override + public void clear() { + throw new UnsupportedOperationException(); + } + + private String intToString(final int i) { + return i < STRINGS.length + ? STRINGS[i] + : Integer.toString(i); + } +} diff --git a/bson/src/main/org/bson/types/Symbol.java b/bson/src/main/org/bson/types/Symbol.java new file mode 100644 index 00000000000..2cf0dc4d859 --- /dev/null +++ b/bson/src/main/org/bson/types/Symbol.java @@ -0,0 +1,86 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Symbol.java + +package org.bson.types; + +import java.io.Serializable; + +/** + * Class to hold an instance of the BSON symbol type. + */ +public class Symbol implements Serializable { + + private static final long serialVersionUID = 1326269319883146072L; + + /** + * The symbol string. + */ + private final String symbol; + + /** + * Construct a new instance with the given symbol. + * + * @param symbol the symbol + */ + public Symbol(final String symbol) { + this.symbol = symbol; + } + + /** + * Gets the symbol. + * + * @return the symbol + */ + public String getSymbol() { + return symbol; + } + + /** + * Will compare equal to a String that is equal to the String that this holds + * + * @param o the Symbol to compare this to + * @return true if parameter o is the same as this Symbol + */ + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + Symbol symbol1 = (Symbol) o; + + if (!symbol.equals(symbol1.symbol)) { + return false; + } + + return true; + } + + @Override + public int hashCode() { + return symbol.hashCode(); + } + + @Override + public String toString() { + return symbol; + } +} diff --git a/bson/src/main/org/bson/types/package-info.java b/bson/src/main/org/bson/types/package-info.java new file mode 100644 index 00000000000..a82a0da608b --- /dev/null +++ b/bson/src/main/org/bson/types/package-info.java @@ -0,0 +1,20 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * Contains classes implementing various BSON types. + */ +package org.bson.types; diff --git a/bson/src/main/resources/META-INF/native-image/native-image.properties b/bson/src/main/resources/META-INF/native-image/native-image.properties new file mode 100644 index 00000000000..65c60367503 --- /dev/null +++ b/bson/src/main/resources/META-INF/native-image/native-image.properties @@ -0,0 +1,16 @@ +# +# Copyright 2008-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +Args = --initialize-at-run-time=org.bson.types.ObjectId diff --git a/bson/src/main/resources/META-INF/native-image/reflect-config.json b/bson/src/main/resources/META-INF/native-image/reflect-config.json new file mode 100644 index 00000000000..dd27feda44d --- /dev/null +++ b/bson/src/main/resources/META-INF/native-image/reflect-config.json @@ -0,0 +1,17 @@ +[ +{ + "name":"java.lang.Object", + "queryAllDeclaredMethods":true +}, +{ + "name":"sun.security.provider.NativePRNG", + "methods":[{"name":"","parameterTypes":[] }, {"name":"","parameterTypes":["java.security.SecureRandomParameters"] }] +}, +{ + "name":"sun.security.provider.SHA", + "methods":[{"name":"","parameterTypes":[] }] +}, +{ + "name":"org.slf4j.Logger" +} +] diff --git a/bson/src/test/resources/bson-binary-vector/float32.json b/bson/src/test/resources/bson-binary-vector/float32.json new file mode 100644 index 00000000000..e1d142c184b --- /dev/null +++ b/bson/src/test/resources/bson-binary-vector/float32.json @@ -0,0 +1,50 @@ +{ + "description": "Tests of Binary subtype 9, Vectors, with dtype FLOAT32", + "test_key": "vector", + "tests": [ + { + "description": "Simple Vector FLOAT32", + "valid": true, + "vector": [127.0, 7.0], + "dtype_hex": "0x27", + "dtype_alias": "FLOAT32", + "padding": 0, + "canonical_bson": "1C00000005766563746F72000A0000000927000000FE420000E04000" + }, + { + "description": "Vector with decimals and negative value FLOAT32", + "valid": true, + "vector": [127.7, -7.7], + "dtype_hex": "0x27", + "dtype_alias": "FLOAT32", + "padding": 0, + "canonical_bson": "1C00000005766563746F72000A0000000927006666FF426666F6C000" + }, + { + "description": "Empty Vector FLOAT32", + "valid": true, + "vector": [], + "dtype_hex": "0x27", + "dtype_alias": "FLOAT32", + "padding": 0, + "canonical_bson": "1400000005766563746F72000200000009270000" + }, + { + "description": "Infinity Vector FLOAT32", + "valid": true, + "vector": ["-inf", 0.0, "inf"], + "dtype_hex": "0x27", + "dtype_alias": "FLOAT32", + "padding": 0, + "canonical_bson": "2000000005766563746F72000E000000092700000080FF000000000000807F00" + }, + { + "description": "FLOAT32 with padding", + "valid": false, + "vector": [127.0, 7.0], + "dtype_hex": "0x27", + "dtype_alias": "FLOAT32", + "padding": 3 + } + ] +} \ No newline at end of file diff --git a/bson/src/test/resources/bson-binary-vector/int8.json b/bson/src/test/resources/bson-binary-vector/int8.json new file mode 100644 index 00000000000..c10c1b7d4e2 --- /dev/null +++ b/bson/src/test/resources/bson-binary-vector/int8.json @@ -0,0 +1,56 @@ +{ + "description": "Tests of Binary subtype 9, Vectors, with dtype INT8", + "test_key": "vector", + "tests": [ + { + "description": "Simple Vector INT8", + "valid": true, + "vector": [127, 7], + "dtype_hex": "0x03", + "dtype_alias": "INT8", + "padding": 0, + "canonical_bson": "1600000005766563746F7200040000000903007F0700" + }, + { + "description": "Empty Vector INT8", + "valid": true, + "vector": [], + "dtype_hex": "0x03", + "dtype_alias": "INT8", + "padding": 0, + "canonical_bson": "1400000005766563746F72000200000009030000" + }, + { + "description": "Overflow Vector INT8", + "valid": false, + "vector": [128], + "dtype_hex": "0x03", + "dtype_alias": "INT8", + "padding": 0 + }, + { + "description": "Underflow Vector INT8", + "valid": false, + "vector": [-129], + "dtype_hex": "0x03", + "dtype_alias": "INT8", + "padding": 0 + }, + { + "description": "INT8 with padding", + "valid": false, + "vector": [127, 7], + "dtype_hex": "0x03", + "dtype_alias": "INT8", + "padding": 3 + }, + { + "description": "INT8 with float inputs", + "valid": false, + "vector": [127.77, 7.77], + "dtype_hex": "0x03", + "dtype_alias": "INT8", + "padding": 0 + } + ] +} \ No newline at end of file diff --git a/bson/src/test/resources/bson-binary-vector/packed_bit.json b/bson/src/test/resources/bson-binary-vector/packed_bit.json new file mode 100644 index 00000000000..69fb3948335 --- /dev/null +++ b/bson/src/test/resources/bson-binary-vector/packed_bit.json @@ -0,0 +1,97 @@ +{ + "description": "Tests of Binary subtype 9, Vectors, with dtype PACKED_BIT", + "test_key": "vector", + "tests": [ + { + "description": "Padding specified with no vector data PACKED_BIT", + "valid": false, + "vector": [], + "dtype_hex": "0x10", + "dtype_alias": "PACKED_BIT", + "padding": 1 + }, + { + "description": "Simple Vector PACKED_BIT", + "valid": true, + "vector": [127, 7], + "dtype_hex": "0x10", + "dtype_alias": "PACKED_BIT", + "padding": 0, + "canonical_bson": "1600000005766563746F7200040000000910007F0700" + }, + { + "description": "Empty Vector PACKED_BIT", + "valid": true, + "vector": [], + "dtype_hex": "0x10", + "dtype_alias": "PACKED_BIT", + "padding": 0, + "canonical_bson": "1400000005766563746F72000200000009100000" + }, + { + "description": "PACKED_BIT with padding", + "valid": true, + "vector": [127, 7], + "dtype_hex": "0x10", + "dtype_alias": "PACKED_BIT", + "padding": 3, + "canonical_bson": "1600000005766563746F7200040000000910037F0700" + }, + { + "description": "Overflow Vector PACKED_BIT", + "valid": false, + "vector": [256], + "dtype_hex": "0x10", + "dtype_alias": "PACKED_BIT", + "padding": 0 + }, + { + "description": "Underflow Vector PACKED_BIT", + "valid": false, + "vector": [-1], + "dtype_hex": "0x10", + "dtype_alias": "PACKED_BIT", + "padding": 0 + }, + { + "description": "Vector with float values PACKED_BIT", + "valid": false, + "vector": [127.5], + "dtype_hex": "0x10", + "dtype_alias": "PACKED_BIT", + "padding": 0 + }, + { + "description": "Padding specified with no vector data PACKED_BIT", + "valid": false, + "vector": [], + "dtype_hex": "0x10", + "dtype_alias": "PACKED_BIT", + "padding": 1 + }, + { + "description": "Exceeding maximum padding PACKED_BIT", + "valid": false, + "vector": [1], + "dtype_hex": "0x10", + "dtype_alias": "PACKED_BIT", + "padding": 8 + }, + { + "description": "Negative padding PACKED_BIT", + "valid": false, + "vector": [1], + "dtype_hex": "0x10", + "dtype_alias": "PACKED_BIT", + "padding": -1 + }, + { + "description": "Vector with float values PACKED_BIT", + "valid": false, + "vector": [127.5], + "dtype_hex": "0x10", + "dtype_alias": "PACKED_BIT", + "padding": 0 + } + ] +} \ No newline at end of file diff --git a/bson/src/test/resources/bson/array.json b/bson/src/test/resources/bson/array.json new file mode 100644 index 00000000000..9ff953e5ae7 --- /dev/null +++ b/bson/src/test/resources/bson/array.json @@ -0,0 +1,49 @@ +{ + "description": "Array", + "bson_type": "0x04", + "test_key": "a", + "valid": [ + { + "description": "Empty", + "canonical_bson": "0D000000046100050000000000", + "canonical_extjson": "{\"a\" : []}" + }, + { + "description": "Single Element Array", + "canonical_bson": "140000000461000C0000001030000A0000000000", + "canonical_extjson": "{\"a\" : [{\"$numberInt\": \"10\"}]}" + }, + { + "description": "Single Element Array with index set incorrectly to empty string", + "degenerate_bson": "130000000461000B00000010000A0000000000", + "canonical_bson": "140000000461000C0000001030000A0000000000", + "canonical_extjson": "{\"a\" : [{\"$numberInt\": \"10\"}]}" + }, + { + "description": "Single Element Array with index set incorrectly to ab", + "degenerate_bson": "150000000461000D000000106162000A0000000000", + "canonical_bson": "140000000461000C0000001030000A0000000000", + "canonical_extjson": "{\"a\" : [{\"$numberInt\": \"10\"}]}" + }, + { + "description": "Multi Element Array with duplicate indexes", + "degenerate_bson": "1b000000046100130000001030000a000000103000140000000000", + "canonical_bson": "1b000000046100130000001030000a000000103100140000000000", + "canonical_extjson": "{\"a\" : [{\"$numberInt\": \"10\"}, {\"$numberInt\": \"20\"}]}" + } + ], + "decodeErrors": [ + { + "description": "Array length too long: eats outer terminator", + "bson": "140000000461000D0000001030000A0000000000" + }, + { + "description": "Array length too short: leaks terminator", + "bson": "140000000461000B0000001030000A0000000000" + }, + { + "description": "Invalid Array: bad string length in field", + "bson": "1A00000004666F6F00100000000230000500000062617A000000" + } + ] +} diff --git a/bson/src/test/resources/bson/binary.json b/bson/src/test/resources/bson/binary.json new file mode 100644 index 00000000000..0e0056f3a2c --- /dev/null +++ b/bson/src/test/resources/bson/binary.json @@ -0,0 +1,153 @@ +{ + "description": "Binary type", + "bson_type": "0x05", + "test_key": "x", + "valid": [ + { + "description": "subtype 0x00 (Zero-length)", + "canonical_bson": "0D000000057800000000000000", + "canonical_extjson": "{\"x\" : { \"$binary\" : {\"base64\" : \"\", \"subType\" : \"00\"}}}" + }, + { + "description": "subtype 0x00 (Zero-length, keys reversed)", + "canonical_bson": "0D000000057800000000000000", + "canonical_extjson": "{\"x\" : { \"$binary\" : {\"base64\" : \"\", \"subType\" : \"00\"}}}", + "degenerate_extjson": "{\"x\" : { \"$binary\" : {\"subType\" : \"00\", \"base64\" : \"\"}}}" + }, + { + "description": "subtype 0x00", + "canonical_bson": "0F0000000578000200000000FFFF00", + "canonical_extjson": "{\"x\" : { \"$binary\" : {\"base64\" : \"//8=\", \"subType\" : \"00\"}}}" + }, + { + "description": "subtype 0x01", + "canonical_bson": "0F0000000578000200000001FFFF00", + "canonical_extjson": "{\"x\" : { \"$binary\" : {\"base64\" : \"//8=\", \"subType\" : \"01\"}}}" + }, + { + "description": "subtype 0x02", + "canonical_bson": "13000000057800060000000202000000FFFF00", + "canonical_extjson": "{\"x\" : { \"$binary\" : {\"base64\" : \"//8=\", \"subType\" : \"02\"}}}" + }, + { + "description": "subtype 0x03", + "canonical_bson": "1D000000057800100000000373FFD26444B34C6990E8E7D1DFC035D400", + "canonical_extjson": "{\"x\" : { \"$binary\" : {\"base64\" : \"c//SZESzTGmQ6OfR38A11A==\", \"subType\" : \"03\"}}}" + }, + { + "description": "subtype 0x04", + "canonical_bson": "1D000000057800100000000473FFD26444B34C6990E8E7D1DFC035D400", + "canonical_extjson": "{\"x\" : { \"$binary\" : {\"base64\" : \"c//SZESzTGmQ6OfR38A11A==\", \"subType\" : \"04\"}}}" + }, + { + "description": "subtype 0x04 UUID", + "canonical_bson": "1D000000057800100000000473FFD26444B34C6990E8E7D1DFC035D400", + "canonical_extjson": "{\"x\" : { \"$binary\" : {\"base64\" : \"c//SZESzTGmQ6OfR38A11A==\", \"subType\" : \"04\"}}}", + "degenerate_extjson": "{\"x\" : { \"$uuid\" : \"73ffd264-44b3-4c69-90e8-e7d1dfc035d4\"}}" + }, + { + "description": "subtype 0x05", + "canonical_bson": "1D000000057800100000000573FFD26444B34C6990E8E7D1DFC035D400", + "canonical_extjson": "{\"x\" : { \"$binary\" : {\"base64\" : \"c//SZESzTGmQ6OfR38A11A==\", \"subType\" : \"05\"}}}" + }, + { + "description": "subtype 0x07", + "canonical_bson": "1D000000057800100000000773FFD26444B34C6990E8E7D1DFC035D400", + "canonical_extjson": "{\"x\" : { \"$binary\" : {\"base64\" : \"c//SZESzTGmQ6OfR38A11A==\", \"subType\" : \"07\"}}}" + }, + { + "description": "subtype 0x08", + "canonical_bson": "1D000000057800100000000873FFD26444B34C6990E8E7D1DFC035D400", + "canonical_extjson": "{\"x\" : { \"$binary\" : {\"base64\" : \"c//SZESzTGmQ6OfR38A11A==\", \"subType\" : \"08\"}}}" + }, + { + "description": "subtype 0x80", + "canonical_bson": "0F0000000578000200000080FFFF00", + "canonical_extjson": "{\"x\" : { \"$binary\" : {\"base64\" : \"//8=\", \"subType\" : \"80\"}}}" + }, + { + "description": "$type query operator (conflicts with legacy $binary form with $type field)", + "canonical_bson": "1F000000037800170000000224747970650007000000737472696E67000000", + "canonical_extjson": "{\"x\" : { \"$type\" : \"string\"}}" + }, + { + "description": "$type query operator (conflicts with legacy $binary form with $type field)", + "canonical_bson": "180000000378001000000010247479706500020000000000", + "canonical_extjson": "{\"x\" : { \"$type\" : {\"$numberInt\": \"2\"}}}" + }, + { + "description": "subtype 0x09 Vector FLOAT32", + "canonical_bson": "170000000578000A0000000927000000FE420000E04000", + "canonical_extjson": "{\"x\": {\"$binary\": {\"base64\": \"JwAAAP5CAADgQA==\", \"subType\": \"09\"}}}" + }, + { + "description": "subtype 0x09 Vector INT8", + "canonical_bson": "11000000057800040000000903007F0700", + "canonical_extjson": "{\"x\": {\"$binary\": {\"base64\": \"AwB/Bw==\", \"subType\": \"09\"}}}" + }, + { + "description": "subtype 0x09 Vector PACKED_BIT", + "canonical_bson": "11000000057800040000000910007F0700", + "canonical_extjson": "{\"x\": {\"$binary\": {\"base64\": \"EAB/Bw==\", \"subType\": \"09\"}}}" + }, + { + "description": "subtype 0x09 Vector (Zero-length) FLOAT32", + "canonical_bson": "0F0000000578000200000009270000", + "canonical_extjson": "{\"x\": {\"$binary\": {\"base64\": \"JwA=\", \"subType\": \"09\"}}}" + }, + { + "description": "subtype 0x09 Vector (Zero-length) INT8", + "canonical_bson": "0F0000000578000200000009030000", + "canonical_extjson": "{\"x\": {\"$binary\": {\"base64\": \"AwA=\", \"subType\": \"09\"}}}" + }, + { + "description": "subtype 0x09 Vector (Zero-length) PACKED_BIT", + "canonical_bson": "0F0000000578000200000009100000", + "canonical_extjson": "{\"x\": {\"$binary\": {\"base64\": \"EAA=\", \"subType\": \"09\"}}}" + } + ], + "decodeErrors": [ + { + "description": "Length longer than document", + "bson": "1D000000057800FF0000000573FFD26444B34C6990E8E7D1DFC035D400" + }, + { + "description": "Negative length", + "bson": "0D000000057800FFFFFFFF0000" + }, + { + "description": "subtype 0x02 length too long ", + "bson": "13000000057800060000000203000000FFFF00" + }, + { + "description": "subtype 0x02 length too short", + "bson": "13000000057800060000000201000000FFFF00" + }, + { + "description": "subtype 0x02 length negative one", + "bson": "130000000578000600000002FFFFFFFFFFFF00" + } + ], + "parseErrors": [ + { + "description": "$uuid wrong type", + "string": "{\"x\" : { \"$uuid\" : { \"data\" : \"73ffd264-44b3-4c69-90e8-e7d1dfc035d4\"}}}" + }, + { + "description": "$uuid invalid value--too short", + "string": "{\"x\" : { \"$uuid\" : \"73ffd264-44b3-90e8-e7d1dfc035d4\"}}" + }, + { + "description": "$uuid invalid value--too long", + "string": "{\"x\" : { \"$uuid\" : \"73ffd264-44b3-4c69-90e8-e7d1dfc035d4-789e4\"}}" + }, + { + "description": "$uuid invalid value--misplaced hyphens", + "string": "{\"x\" : { \"$uuid\" : \"73ff-d26444b-34c6-990e8e-7d1dfc035d4\"}}" + }, + { + "description": "$uuid invalid value--too many hyphens", + "string": "{\"x\" : { \"$uuid\" : \"----d264-44b3-4--9-90e8-e7d1dfc0----\"}}" + } + ] +} diff --git a/bson/src/test/resources/bson/boolean.json b/bson/src/test/resources/bson/boolean.json new file mode 100644 index 00000000000..84c282299a1 --- /dev/null +++ b/bson/src/test/resources/bson/boolean.json @@ -0,0 +1,27 @@ +{ + "description": "Boolean", + "bson_type": "0x08", + "test_key": "b", + "valid": [ + { + "description": "True", + "canonical_bson": "090000000862000100", + "canonical_extjson": "{\"b\" : true}" + }, + { + "description": "False", + "canonical_bson": "090000000862000000", + "canonical_extjson": "{\"b\" : false}" + } + ], + "decodeErrors": [ + { + "description": "Invalid boolean value of 2", + "bson": "090000000862000200" + }, + { + "description": "Invalid boolean value of -1", + "bson": "09000000086200FF00" + } + ] +} diff --git a/bson/src/test/resources/bson/code.json b/bson/src/test/resources/bson/code.json new file mode 100644 index 00000000000..b8482b2541b --- /dev/null +++ b/bson/src/test/resources/bson/code.json @@ -0,0 +1,67 @@ +{ + "description": "Javascript Code", + "bson_type": "0x0D", + "test_key": "a", + "valid": [ + { + "description": "Empty string", + "canonical_bson": "0D0000000D6100010000000000", + "canonical_extjson": "{\"a\" : {\"$code\" : \"\"}}" + }, + { + "description": "Single character", + "canonical_bson": "0E0000000D610002000000620000", + "canonical_extjson": "{\"a\" : {\"$code\" : \"b\"}}" + }, + { + "description": "Multi-character", + "canonical_bson": "190000000D61000D0000006162616261626162616261620000", + "canonical_extjson": "{\"a\" : {\"$code\" : \"abababababab\"}}" + }, + { + "description": "two-byte UTF-8 (\u00e9)", + "canonical_bson": "190000000D61000D000000C3A9C3A9C3A9C3A9C3A9C3A90000", + "canonical_extjson": "{\"a\" : {\"$code\" : \"\\u00e9\\u00e9\\u00e9\\u00e9\\u00e9\\u00e9\"}}" + }, + { + "description": "three-byte UTF-8 (\u2606)", + "canonical_bson": "190000000D61000D000000E29886E29886E29886E298860000", + "canonical_extjson": "{\"a\" : {\"$code\" : \"\\u2606\\u2606\\u2606\\u2606\"}}" + }, + { + "description": "Embedded nulls", + "canonical_bson": "190000000D61000D0000006162006261620062616261620000", + "canonical_extjson": "{\"a\" : {\"$code\" : \"ab\\u0000bab\\u0000babab\"}}" + } + ], + "decodeErrors": [ + { + "description": "bad code string length: 0 (but no 0x00 either)", + "bson": "0C0000000D61000000000000" + }, + { + "description": "bad code string length: -1", + "bson": "0C0000000D6100FFFFFFFF00" + }, + { + "description": "bad code string length: eats terminator", + "bson": "100000000D6100050000006200620000" + }, + { + "description": "bad code string length: longer than rest of document", + "bson": "120000000D00FFFFFF00666F6F6261720000" + }, + { + "description": "code string is not null-terminated", + "bson": "100000000D610004000000616263FF00" + }, + { + "description": "empty code string, but extra null", + "bson": "0E0000000D610001000000000000" + }, + { + "description": "invalid UTF-8", + "bson": "0E0000000D610002000000E90000" + } + ] +} diff --git a/bson/src/test/resources/bson/code_w_scope.json b/bson/src/test/resources/bson/code_w_scope.json new file mode 100644 index 00000000000..f956bcd54f6 --- /dev/null +++ b/bson/src/test/resources/bson/code_w_scope.json @@ -0,0 +1,78 @@ +{ + "description": "Javascript Code with Scope", + "bson_type": "0x0F", + "test_key": "a", + "valid": [ + { + "description": "Empty code string, empty scope", + "canonical_bson": "160000000F61000E0000000100000000050000000000", + "canonical_extjson": "{\"a\" : {\"$code\" : \"\", \"$scope\" : {}}}" + }, + { + "description": "Non-empty code string, empty scope", + "canonical_bson": "1A0000000F610012000000050000006162636400050000000000", + "canonical_extjson": "{\"a\" : {\"$code\" : \"abcd\", \"$scope\" : {}}}" + }, + { + "description": "Empty code string, non-empty scope", + "canonical_bson": "1D0000000F61001500000001000000000C000000107800010000000000", + "canonical_extjson": "{\"a\" : {\"$code\" : \"\", \"$scope\" : {\"x\" : {\"$numberInt\": \"1\"}}}}" + }, + { + "description": "Non-empty code string and non-empty scope", + "canonical_bson": "210000000F6100190000000500000061626364000C000000107800010000000000", + "canonical_extjson": "{\"a\" : {\"$code\" : \"abcd\", \"$scope\" : {\"x\" : {\"$numberInt\": \"1\"}}}}" + }, + { + "description": "Unicode and embedded null in code string, empty scope", + "canonical_bson": "1A0000000F61001200000005000000C3A9006400050000000000", + "canonical_extjson": "{\"a\" : {\"$code\" : \"\\u00e9\\u0000d\", \"$scope\" : {}}}" + } + ], + "decodeErrors": [ + { + "description": "field length zero", + "bson": "280000000F6100000000000500000061626364001300000010780001000000107900010000000000" + }, + { + "description": "field length negative", + "bson": "280000000F6100FFFFFFFF0500000061626364001300000010780001000000107900010000000000" + }, + { + "description": "field length too short (less than minimum size)", + "bson": "160000000F61000D0000000100000000050000000000" + }, + { + "description": "field length too short (truncates scope)", + "bson": "280000000F61001F0000000500000061626364001300000010780001000000107900010000000000" + }, + { + "description": "field length too long (clips outer doc)", + "bson": "280000000F6100210000000500000061626364001300000010780001000000107900010000000000" + }, + { + "description": "field length too long (longer than outer doc)", + "bson": "280000000F6100FF0000000500000061626364001300000010780001000000107900010000000000" + }, + { + "description": "bad code string: length too short", + "bson": "280000000F6100200000000400000061626364001300000010780001000000107900010000000000" + }, + { + "description": "bad code string: length too long (clips scope)", + "bson": "280000000F6100200000000600000061626364001300000010780001000000107900010000000000" + }, + { + "description": "bad code string: negative length", + "bson": "280000000F610020000000FFFFFFFF61626364001300000010780001000000107900010000000000" + }, + { + "description": "bad code string: length longer than field", + "bson": "280000000F610020000000FF00000061626364001300000010780001000000107900010000000000" + }, + { + "description": "bad scope doc (field has bad string length)", + "bson": "1C0000000F001500000001000000000C000000020000000000000000" + } + ] +} diff --git a/bson/src/test/resources/bson/datetime.json b/bson/src/test/resources/bson/datetime.json new file mode 100644 index 00000000000..f857afdc367 --- /dev/null +++ b/bson/src/test/resources/bson/datetime.json @@ -0,0 +1,42 @@ +{ + "description": "DateTime", + "bson_type": "0x09", + "test_key": "a", + "valid": [ + { + "description": "epoch", + "canonical_bson": "10000000096100000000000000000000", + "relaxed_extjson": "{\"a\" : {\"$date\" : \"1970-01-01T00:00:00Z\"}}", + "canonical_extjson": "{\"a\" : {\"$date\" : {\"$numberLong\" : \"0\"}}}" + }, + { + "description": "positive ms", + "canonical_bson": "10000000096100C5D8D6CC3B01000000", + "relaxed_extjson": "{\"a\" : {\"$date\" : \"2012-12-24T12:15:30.501Z\"}}", + "canonical_extjson": "{\"a\" : {\"$date\" : {\"$numberLong\" : \"1356351330501\"}}}" + }, + { + "description": "negative", + "canonical_bson": "10000000096100C33CE7B9BDFFFFFF00", + "relaxed_extjson": "{\"a\" : {\"$date\" : {\"$numberLong\" : \"-284643869501\"}}}", + "canonical_extjson": "{\"a\" : {\"$date\" : {\"$numberLong\" : \"-284643869501\"}}}" + }, + { + "description" : "Y10K", + "canonical_bson" : "1000000009610000DC1FD277E6000000", + "canonical_extjson" : "{\"a\":{\"$date\":{\"$numberLong\":\"253402300800000\"}}}" + }, + { + "description": "leading zero ms", + "canonical_bson": "10000000096100D1D6D6CC3B01000000", + "relaxed_extjson": "{\"a\" : {\"$date\" : \"2012-12-24T12:15:30.001Z\"}}", + "canonical_extjson": "{\"a\" : {\"$date\" : {\"$numberLong\" : \"1356351330001\"}}}" + } + ], + "decodeErrors": [ + { + "description": "datetime field truncated", + "bson": "0C0000000961001234567800" + } + ] +} diff --git a/bson/src/test/resources/bson/dbpointer.json b/bson/src/test/resources/bson/dbpointer.json new file mode 100644 index 00000000000..377e556a0ad --- /dev/null +++ b/bson/src/test/resources/bson/dbpointer.json @@ -0,0 +1,56 @@ +{ + "description": "DBPointer type (deprecated)", + "bson_type": "0x0C", + "deprecated": true, + "test_key": "a", + "valid": [ + { + "description": "DBpointer", + "canonical_bson": "1A0000000C610002000000620056E1FC72E0C917E9C471416100", + "canonical_extjson": "{\"a\": {\"$dbPointer\": {\"$ref\": \"b\", \"$id\": {\"$oid\": \"56e1fc72e0c917e9c4714161\"}}}}", + "converted_bson": "2a00000003610022000000022472656600020000006200072469640056e1fc72e0c917e9c47141610000", + "converted_extjson": "{\"a\": {\"$ref\": \"b\", \"$id\": {\"$oid\": \"56e1fc72e0c917e9c4714161\"}}}" + }, + { + "description": "DBpointer with opposite key order", + "canonical_bson": "1A0000000C610002000000620056E1FC72E0C917E9C471416100", + "canonical_extjson": "{\"a\": {\"$dbPointer\": {\"$ref\": \"b\", \"$id\": {\"$oid\": \"56e1fc72e0c917e9c4714161\"}}}}", + "degenerate_extjson": "{\"a\": {\"$dbPointer\": {\"$id\": {\"$oid\": \"56e1fc72e0c917e9c4714161\"}, \"$ref\": \"b\"}}}", + "converted_bson": "2a00000003610022000000022472656600020000006200072469640056e1fc72e0c917e9c47141610000", + "converted_extjson": "{\"a\": {\"$ref\": \"b\", \"$id\": {\"$oid\": \"56e1fc72e0c917e9c4714161\"}}}" + }, + { + "description": "With two-byte UTF-8", + "canonical_bson": "1B0000000C610003000000C3A90056E1FC72E0C917E9C471416100", + "canonical_extjson": "{\"a\": {\"$dbPointer\": {\"$ref\": \"é\", \"$id\": {\"$oid\": \"56e1fc72e0c917e9c4714161\"}}}}", + "converted_bson": "2B0000000361002300000002247265660003000000C3A900072469640056E1FC72E0C917E9C47141610000", + "converted_extjson": "{\"a\": {\"$ref\": \"é\", \"$id\": {\"$oid\": \"56e1fc72e0c917e9c4714161\"}}}" + } + ], + "decodeErrors": [ + { + "description": "String with negative length", + "bson": "1A0000000C6100FFFFFFFF620056E1FC72E0C917E9C471416100" + }, + { + "description": "String with zero length", + "bson": "1A0000000C610000000000620056E1FC72E0C917E9C471416100" + }, + { + "description": "String not null terminated", + "bson": "1A0000000C610002000000626256E1FC72E0C917E9C471416100" + }, + { + "description": "short OID (less than minimum length for field)", + "bson": "160000000C61000300000061620056E1FC72E0C91700" + }, + { + "description": "short OID (greater than minimum, but truncated)", + "bson": "1A0000000C61000300000061620056E1FC72E0C917E9C4716100" + }, + { + "description": "String with bad UTF-8", + "bson": "1A0000000C610002000000E90056E1FC72E0C917E9C471416100" + } + ] +} diff --git a/bson/src/test/resources/bson/dbref.json b/bson/src/test/resources/bson/dbref.json new file mode 100644 index 00000000000..41c0b09d0ea --- /dev/null +++ b/bson/src/test/resources/bson/dbref.json @@ -0,0 +1,51 @@ +{ + "description": "Document type (DBRef sub-documents)", + "bson_type": "0x03", + "valid": [ + { + "description": "DBRef", + "canonical_bson": "37000000036462726566002b0000000224726566000b000000636f6c6c656374696f6e00072469640058921b3e6e32ab156a22b59e0000", + "canonical_extjson": "{\"dbref\": {\"$ref\": \"collection\", \"$id\": {\"$oid\": \"58921b3e6e32ab156a22b59e\"}}}" + }, + { + "description": "DBRef with database", + "canonical_bson": "4300000003646272656600370000000224726566000b000000636f6c6c656374696f6e00072469640058921b3e6e32ab156a22b59e0224646200030000006462000000", + "canonical_extjson": "{\"dbref\": {\"$ref\": \"collection\", \"$id\": {\"$oid\": \"58921b3e6e32ab156a22b59e\"}, \"$db\": \"db\"}}" + }, + { + "description": "DBRef with database and additional fields", + "canonical_bson": "48000000036462726566003c0000000224726566000b000000636f6c6c656374696f6e0010246964002a00000002246462000300000064620002666f6f0004000000626172000000", + "canonical_extjson": "{\"dbref\": {\"$ref\": \"collection\", \"$id\": {\"$numberInt\": \"42\"}, \"$db\": \"db\", \"foo\": \"bar\"}}" + }, + { + "description": "DBRef with additional fields", + "canonical_bson": "4400000003646272656600380000000224726566000b000000636f6c6c656374696f6e00072469640058921b3e6e32ab156a22b59e02666f6f0004000000626172000000", + "canonical_extjson": "{\"dbref\": {\"$ref\": \"collection\", \"$id\": {\"$oid\": \"58921b3e6e32ab156a22b59e\"}, \"foo\": \"bar\"}}" + }, + { + "description": "Document with key names similar to those of a DBRef", + "canonical_bson": "3e0000000224726566000c0000006e6f742d612d646272656600072469640058921b3e6e32ab156a22b59e022462616e616e6100050000007065656c0000", + "canonical_extjson": "{\"$ref\": \"not-a-dbref\", \"$id\": {\"$oid\": \"58921b3e6e32ab156a22b59e\"}, \"$banana\": \"peel\"}" + }, + { + "description": "DBRef with additional dollar-prefixed and dotted fields", + "canonical_bson": "48000000036462726566003c0000000224726566000b000000636f6c6c656374696f6e00072469640058921b3e6e32ab156a22b59e10612e62000100000010246300010000000000", + "canonical_extjson": "{\"dbref\": {\"$ref\": \"collection\", \"$id\": {\"$oid\": \"58921b3e6e32ab156a22b59e\"}, \"a.b\": {\"$numberInt\": \"1\"}, \"$c\": {\"$numberInt\": \"1\"}}}" + }, + { + "description": "Sub-document resembles DBRef but $id is missing", + "canonical_bson": "26000000036462726566001a0000000224726566000b000000636f6c6c656374696f6e000000", + "canonical_extjson": "{\"dbref\": {\"$ref\": \"collection\"}}" + }, + { + "description": "Sub-document resembles DBRef but $ref is not a string", + "canonical_bson": "2c000000036462726566002000000010247265660001000000072469640058921b3e6e32ab156a22b59e0000", + "canonical_extjson": "{\"dbref\": {\"$ref\": {\"$numberInt\": \"1\"}, \"$id\": {\"$oid\": \"58921b3e6e32ab156a22b59e\"}}}" + }, + { + "description": "Sub-document resembles DBRef but $db is not a string", + "canonical_bson": "4000000003646272656600340000000224726566000b000000636f6c6c656374696f6e00072469640058921b3e6e32ab156a22b59e1024646200010000000000", + "canonical_extjson": "{\"dbref\": {\"$ref\": \"collection\", \"$id\": {\"$oid\": \"58921b3e6e32ab156a22b59e\"}, \"$db\": {\"$numberInt\": \"1\"}}}" + } + ] +} diff --git a/bson/src/test/resources/bson/decimal128-1.json b/bson/src/test/resources/bson/decimal128-1.json new file mode 100644 index 00000000000..8e7fbc93c6f --- /dev/null +++ b/bson/src/test/resources/bson/decimal128-1.json @@ -0,0 +1,341 @@ +{ + "description": "Decimal128", + "bson_type": "0x13", + "test_key": "d", + "valid": [ + { + "description": "Special - Canonical NaN", + "canonical_bson": "180000001364000000000000000000000000000000007C00", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"NaN\"}}" + }, + { + "description": "Special - Negative NaN", + "canonical_bson": "18000000136400000000000000000000000000000000FC00", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"NaN\"}}", + "lossy": true + }, + { + "description": "Special - Negative NaN", + "canonical_bson": "18000000136400000000000000000000000000000000FC00", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"NaN\"}}", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"-NaN\"}}", + "lossy": true + }, + { + "description": "Special - Canonical SNaN", + "canonical_bson": "180000001364000000000000000000000000000000007E00", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"NaN\"}}", + "lossy": true + }, + { + "description": "Special - Negative SNaN", + "canonical_bson": "18000000136400000000000000000000000000000000FE00", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"NaN\"}}", + "lossy": true + }, + { + "description": "Special - NaN with a payload", + "canonical_bson": "180000001364001200000000000000000000000000007E00", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"NaN\"}}", + "lossy": true + }, + { + "description": "Special - Canonical Positive Infinity", + "canonical_bson": "180000001364000000000000000000000000000000007800", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"Infinity\"}}" + }, + { + "description": "Special - Canonical Negative Infinity", + "canonical_bson": "18000000136400000000000000000000000000000000F800", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-Infinity\"}}" + }, + { + "description": "Special - Invalid representation treated as 0", + "canonical_bson": "180000001364000000000000000000000000000000106C00", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0\"}}", + "lossy": true + }, + { + "description": "Special - Invalid representation treated as -0", + "canonical_bson": "18000000136400DCBA9876543210DEADBEEF00000010EC00", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0\"}}", + "lossy": true + }, + { + "description": "Special - Invalid representation treated as 0E3", + "canonical_bson": "18000000136400FFFFFFFFFFFFFFFFFFFFFFFFFFFF116C00", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+3\"}}", + "lossy": true + }, + { + "description": "Regular - Adjusted Exponent Limit", + "canonical_bson": "18000000136400F2AF967ED05C82DE3297FF6FDE3CF22F00", + "canonical_extjson": "{\"d\": { \"$numberDecimal\": \"0.000001234567890123456789012345678901234\" }}" + }, + { + "description": "Regular - Smallest", + "canonical_bson": "18000000136400D204000000000000000000000000343000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.001234\"}}" + }, + { + "description": "Regular - Smallest with Trailing Zeros", + "canonical_bson": "1800000013640040EF5A07000000000000000000002A3000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00123400000\"}}" + }, + { + "description": "Regular - 0.1", + "canonical_bson": "1800000013640001000000000000000000000000003E3000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.1\"}}" + }, + { + "description": "Regular - 0.1234567890123456789012345678901234", + "canonical_bson": "18000000136400F2AF967ED05C82DE3297FF6FDE3CFC2F00", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.1234567890123456789012345678901234\"}}" + }, + { + "description": "Regular - 0", + "canonical_bson": "180000001364000000000000000000000000000000403000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0\"}}" + }, + { + "description": "Regular - -0", + "canonical_bson": "18000000136400000000000000000000000000000040B000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0\"}}" + }, + { + "description": "Regular - -0.0", + "canonical_bson": "1800000013640000000000000000000000000000003EB000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.0\"}}" + }, + { + "description": "Regular - 2", + "canonical_bson": "180000001364000200000000000000000000000000403000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"2\"}}" + }, + { + "description": "Regular - 2.000", + "canonical_bson": "18000000136400D0070000000000000000000000003A3000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"2.000\"}}" + }, + { + "description": "Regular - Largest", + "canonical_bson": "18000000136400F2AF967ED05C82DE3297FF6FDE3C403000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1234567890123456789012345678901234\"}}" + }, + { + "description": "Scientific - Tiniest", + "canonical_bson": "18000000136400FFFFFFFF638E8D37C087ADBE09ED010000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"9.999999999999999999999999999999999E-6143\"}}" + }, + { + "description": "Scientific - Tiny", + "canonical_bson": "180000001364000100000000000000000000000000000000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1E-6176\"}}" + }, + { + "description": "Scientific - Negative Tiny", + "canonical_bson": "180000001364000100000000000000000000000000008000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-1E-6176\"}}" + }, + { + "description": "Scientific - Adjusted Exponent Limit", + "canonical_bson": "18000000136400F2AF967ED05C82DE3297FF6FDE3CF02F00", + "canonical_extjson": "{\"d\": { \"$numberDecimal\": \"1.234567890123456789012345678901234E-7\" }}" + }, + { + "description": "Scientific - Fractional", + "canonical_bson": "1800000013640064000000000000000000000000002CB000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-1.00E-8\"}}" + }, + { + "description": "Scientific - 0 with Exponent", + "canonical_bson": "180000001364000000000000000000000000000000205F00", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+6000\"}}" + }, + { + "description": "Scientific - 0 with Negative Exponent", + "canonical_bson": "1800000013640000000000000000000000000000007A2B00", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E-611\"}}" + }, + { + "description": "Scientific - No Decimal with Signed Exponent", + "canonical_bson": "180000001364000100000000000000000000000000463000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+3\"}}" + }, + { + "description": "Scientific - Trailing Zero", + "canonical_bson": "180000001364001A04000000000000000000000000423000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.050E+4\"}}" + }, + { + "description": "Scientific - With Decimal", + "canonical_bson": "180000001364006900000000000000000000000000423000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.05E+3\"}}" + }, + { + "description": "Scientific - Full", + "canonical_bson": "18000000136400FFFFFFFFFFFFFFFFFFFFFFFFFFFF403000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"5192296858534827628530496329220095\"}}" + }, + { + "description": "Scientific - Large", + "canonical_bson": "18000000136400000000000A5BC138938D44C64D31FE5F00", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.000000000000000000000000000000000E+6144\"}}" + }, + { + "description": "Scientific - Largest", + "canonical_bson": "18000000136400FFFFFFFF638E8D37C087ADBE09EDFF5F00", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"9.999999999999999999999999999999999E+6144\"}}" + }, + { + "description": "Non-Canonical Parsing - Exponent Normalization", + "canonical_bson": "1800000013640064000000000000000000000000002CB000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"-100E-10\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-1.00E-8\"}}" + }, + { + "description": "Non-Canonical Parsing - Unsigned Positive Exponent", + "canonical_bson": "180000001364000100000000000000000000000000463000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"1E3\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+3\"}}" + }, + { + "description": "Non-Canonical Parsing - Lowercase Exponent Identifier", + "canonical_bson": "180000001364000100000000000000000000000000463000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"1e+3\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+3\"}}" + }, + { + "description": "Non-Canonical Parsing - Long Significand with Exponent", + "canonical_bson": "1800000013640079D9E0F9763ADA429D0200000000583000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"12345689012345789012345E+12\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.2345689012345789012345E+34\"}}" + }, + { + "description": "Non-Canonical Parsing - Positive Sign", + "canonical_bson": "18000000136400F2AF967ED05C82DE3297FF6FDE3C403000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"+1234567890123456789012345678901234\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1234567890123456789012345678901234\"}}" + }, + { + "description": "Non-Canonical Parsing - Long Decimal String", + "canonical_bson": "180000001364000100000000000000000000000000722800", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \".000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1E-999\"}}" + }, + { + "description": "Non-Canonical Parsing - nan", + "canonical_bson": "180000001364000000000000000000000000000000007C00", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"nan\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"NaN\"}}" + }, + { + "description": "Non-Canonical Parsing - nAn", + "canonical_bson": "180000001364000000000000000000000000000000007C00", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"nAn\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"NaN\"}}" + }, + { + "description": "Non-Canonical Parsing - +infinity", + "canonical_bson": "180000001364000000000000000000000000000000007800", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"+infinity\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"Infinity\"}}" + }, + { + "description": "Non-Canonical Parsing - infinity", + "canonical_bson": "180000001364000000000000000000000000000000007800", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"infinity\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"Infinity\"}}" + }, + { + "description": "Non-Canonical Parsing - infiniTY", + "canonical_bson": "180000001364000000000000000000000000000000007800", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"infiniTY\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"Infinity\"}}" + }, + { + "description": "Non-Canonical Parsing - inf", + "canonical_bson": "180000001364000000000000000000000000000000007800", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"inf\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"Infinity\"}}" + }, + { + "description": "Non-Canonical Parsing - inF", + "canonical_bson": "180000001364000000000000000000000000000000007800", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"inF\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"Infinity\"}}" + }, + { + "description": "Non-Canonical Parsing - -infinity", + "canonical_bson": "18000000136400000000000000000000000000000000F800", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"-infinity\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-Infinity\"}}" + }, + { + "description": "Non-Canonical Parsing - -infiniTy", + "canonical_bson": "18000000136400000000000000000000000000000000F800", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"-infiniTy\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-Infinity\"}}" + }, + { + "description": "Non-Canonical Parsing - -Inf", + "canonical_bson": "18000000136400000000000000000000000000000000F800", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"-Infinity\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-Infinity\"}}" + }, + { + "description": "Non-Canonical Parsing - -inf", + "canonical_bson": "18000000136400000000000000000000000000000000F800", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"-inf\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-Infinity\"}}" + }, + { + "description": "Non-Canonical Parsing - -inF", + "canonical_bson": "18000000136400000000000000000000000000000000F800", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"-inF\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-Infinity\"}}" + }, + { + "description": "Rounded Subnormal number", + "canonical_bson": "180000001364000100000000000000000000000000000000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"10E-6177\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1E-6176\"}}" + }, + { + "description": "Clamped", + "canonical_bson": "180000001364000a00000000000000000000000000fe5f00", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"1E6112\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0E+6112\"}}" + }, + { + "description": "Exact rounding", + "canonical_bson": "18000000136400000000000a5bc138938d44c64d31cc3700", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"1000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.000000000000000000000000000000000E+999\"}}" + }, + { + "description": "Clamped zeros with a large positive exponent", + "canonical_bson": "180000001364000000000000000000000000000000FE5F00", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+2147483647\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+6111\"}}" + }, + { + "description": "Clamped zeros with a large negative exponent", + "canonical_bson": "180000001364000000000000000000000000000000000000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E-2147483647\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E-6176\"}}" + }, + { + "description": "Clamped negative zeros with a large positive exponent", + "canonical_bson": "180000001364000000000000000000000000000000FEDF00", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0E+2147483647\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0E+6111\"}}" + }, + { + "description": "Clamped negative zeros with a large negative exponent", + "canonical_bson": "180000001364000000000000000000000000000000008000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0E-2147483647\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0E-6176\"}}" + } + ] +} diff --git a/bson/src/test/resources/bson/decimal128-2.json b/bson/src/test/resources/bson/decimal128-2.json new file mode 100644 index 00000000000..316d3b0e618 --- /dev/null +++ b/bson/src/test/resources/bson/decimal128-2.json @@ -0,0 +1,793 @@ +{ + "description": "Decimal128", + "bson_type": "0x13", + "test_key": "d", + "valid": [ + { + "description": "[decq021] Normality", + "canonical_bson": "18000000136400F2AF967ED05C82DE3297FF6FDE3C40B000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-1234567890123456789012345678901234\"}}" + }, + { + "description": "[decq823] values around [u]int32 edges (zeros done earlier)", + "canonical_bson": "18000000136400010000800000000000000000000040B000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-2147483649\"}}" + }, + { + "description": "[decq822] values around [u]int32 edges (zeros done earlier)", + "canonical_bson": "18000000136400000000800000000000000000000040B000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-2147483648\"}}" + }, + { + "description": "[decq821] values around [u]int32 edges (zeros done earlier)", + "canonical_bson": "18000000136400FFFFFF7F0000000000000000000040B000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-2147483647\"}}" + }, + { + "description": "[decq820] values around [u]int32 edges (zeros done earlier)", + "canonical_bson": "18000000136400FEFFFF7F0000000000000000000040B000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-2147483646\"}}" + }, + { + "description": "[decq152] fold-downs (more below)", + "canonical_bson": "18000000136400393000000000000000000000000040B000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-12345\"}}" + }, + { + "description": "[decq154] fold-downs (more below)", + "canonical_bson": "18000000136400D20400000000000000000000000040B000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-1234\"}}" + }, + { + "description": "[decq006] derivative canonical plain strings", + "canonical_bson": "18000000136400EE0200000000000000000000000040B000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-750\"}}" + }, + { + "description": "[decq164] fold-downs (more below)", + "canonical_bson": "1800000013640039300000000000000000000000003CB000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-123.45\"}}" + }, + { + "description": "[decq156] fold-downs (more below)", + "canonical_bson": "180000001364007B0000000000000000000000000040B000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-123\"}}" + }, + { + "description": "[decq008] derivative canonical plain strings", + "canonical_bson": "18000000136400EE020000000000000000000000003EB000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-75.0\"}}" + }, + { + "description": "[decq158] fold-downs (more below)", + "canonical_bson": "180000001364000C0000000000000000000000000040B000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-12\"}}" + }, + { + "description": "[decq122] Nmax and similar", + "canonical_bson": "18000000136400FFFFFFFF638E8D37C087ADBE09EDFFDF00", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-9.999999999999999999999999999999999E+6144\"}}" + }, + { + "description": "[decq002] (mostly derived from the Strawman 4 document and examples)", + "canonical_bson": "18000000136400EE020000000000000000000000003CB000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-7.50\"}}" + }, + { + "description": "[decq004] derivative canonical plain strings", + "canonical_bson": "18000000136400EE0200000000000000000000000042B000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-7.50E+3\"}}" + }, + { + "description": "[decq018] derivative canonical plain strings", + "canonical_bson": "18000000136400EE020000000000000000000000002EB000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-7.50E-7\"}}" + }, + { + "description": "[decq125] Nmax and similar", + "canonical_bson": "18000000136400F2AF967ED05C82DE3297FF6FDE3CFEDF00", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-1.234567890123456789012345678901234E+6144\"}}" + }, + { + "description": "[decq131] fold-downs (more below)", + "canonical_bson": "18000000136400000000807F1BCF85B27059C8A43CFEDF00", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-1.230000000000000000000000000000000E+6144\"}}" + }, + { + "description": "[decq162] fold-downs (more below)", + "canonical_bson": "180000001364007B000000000000000000000000003CB000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-1.23\"}}" + }, + { + "description": "[decq176] Nmin and below", + "canonical_bson": "18000000136400010000000A5BC138938D44C64D31008000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-1.000000000000000000000000000000001E-6143\"}}" + }, + { + "description": "[decq174] Nmin and below", + "canonical_bson": "18000000136400000000000A5BC138938D44C64D31008000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-1.000000000000000000000000000000000E-6143\"}}" + }, + { + "description": "[decq133] fold-downs (more below)", + "canonical_bson": "18000000136400000000000A5BC138938D44C64D31FEDF00", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-1.000000000000000000000000000000000E+6144\"}}" + }, + { + "description": "[decq160] fold-downs (more below)", + "canonical_bson": "18000000136400010000000000000000000000000040B000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-1\"}}" + }, + { + "description": "[decq172] Nmin and below", + "canonical_bson": "180000001364000100000000000000000000000000428000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-1E-6143\"}}" + }, + { + "description": "[decq010] derivative canonical plain strings", + "canonical_bson": "18000000136400EE020000000000000000000000003AB000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.750\"}}" + }, + { + "description": "[decq012] derivative canonical plain strings", + "canonical_bson": "18000000136400EE0200000000000000000000000038B000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.0750\"}}" + }, + { + "description": "[decq014] derivative canonical plain strings", + "canonical_bson": "18000000136400EE0200000000000000000000000034B000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.000750\"}}" + }, + { + "description": "[decq016] derivative canonical plain strings", + "canonical_bson": "18000000136400EE0200000000000000000000000030B000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.00000750\"}}" + }, + { + "description": "[decq404] zeros", + "canonical_bson": "180000001364000000000000000000000000000000000000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E-6176\"}}" + }, + { + "description": "[decq424] negative zeros", + "canonical_bson": "180000001364000000000000000000000000000000008000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0E-6176\"}}" + }, + { + "description": "[decq407] zeros", + "canonical_bson": "1800000013640000000000000000000000000000003C3000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00\"}}" + }, + { + "description": "[decq427] negative zeros", + "canonical_bson": "1800000013640000000000000000000000000000003CB000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.00\"}}" + }, + { + "description": "[decq409] zeros", + "canonical_bson": "180000001364000000000000000000000000000000403000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0\"}}" + }, + { + "description": "[decq428] negative zeros", + "canonical_bson": "18000000136400000000000000000000000000000040B000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0\"}}" + }, + { + "description": "[decq700] Selected DPD codes", + "canonical_bson": "180000001364000000000000000000000000000000403000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0\"}}" + }, + { + "description": "[decq406] zeros", + "canonical_bson": "1800000013640000000000000000000000000000003C3000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00\"}}" + }, + { + "description": "[decq426] negative zeros", + "canonical_bson": "1800000013640000000000000000000000000000003CB000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.00\"}}" + }, + { + "description": "[decq410] zeros", + "canonical_bson": "180000001364000000000000000000000000000000463000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+3\"}}" + }, + { + "description": "[decq431] negative zeros", + "canonical_bson": "18000000136400000000000000000000000000000046B000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0E+3\"}}" + }, + { + "description": "[decq419] clamped zeros...", + "canonical_bson": "180000001364000000000000000000000000000000FE5F00", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+6111\"}}" + }, + { + "description": "[decq432] negative zeros", + "canonical_bson": "180000001364000000000000000000000000000000FEDF00", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0E+6111\"}}" + }, + { + "description": "[decq405] zeros", + "canonical_bson": "180000001364000000000000000000000000000000000000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E-6176\"}}" + }, + { + "description": "[decq425] negative zeros", + "canonical_bson": "180000001364000000000000000000000000000000008000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0E-6176\"}}" + }, + { + "description": "[decq508] Specials", + "canonical_bson": "180000001364000000000000000000000000000000007800", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"Infinity\"}}" + }, + { + "description": "[decq528] Specials", + "canonical_bson": "18000000136400000000000000000000000000000000F800", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-Infinity\"}}" + }, + { + "description": "[decq541] Specials", + "canonical_bson": "180000001364000000000000000000000000000000007C00", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"NaN\"}}" + }, + { + "description": "[decq074] Nmin and below", + "canonical_bson": "18000000136400000000000A5BC138938D44C64D31000000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.000000000000000000000000000000000E-6143\"}}" + }, + { + "description": "[decq602] fold-down full sequence", + "canonical_bson": "18000000136400000000000A5BC138938D44C64D31FE5F00", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.000000000000000000000000000000000E+6144\"}}" + }, + { + "description": "[decq604] fold-down full sequence", + "canonical_bson": "180000001364000000000081EFAC855B416D2DEE04FE5F00", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.00000000000000000000000000000000E+6143\"}}" + }, + { + "description": "[decq606] fold-down full sequence", + "canonical_bson": "1800000013640000000080264B91C02220BE377E00FE5F00", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0000000000000000000000000000000E+6142\"}}" + }, + { + "description": "[decq608] fold-down full sequence", + "canonical_bson": "1800000013640000000040EAED7446D09C2C9F0C00FE5F00", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.000000000000000000000000000000E+6141\"}}" + }, + { + "description": "[decq610] fold-down full sequence", + "canonical_bson": "18000000136400000000A0CA17726DAE0F1E430100FE5F00", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.00000000000000000000000000000E+6140\"}}" + }, + { + "description": "[decq612] fold-down full sequence", + "canonical_bson": "18000000136400000000106102253E5ECE4F200000FE5F00", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0000000000000000000000000000E+6139\"}}" + }, + { + "description": "[decq614] fold-down full sequence", + "canonical_bson": "18000000136400000000E83C80D09F3C2E3B030000FE5F00", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.000000000000000000000000000E+6138\"}}" + }, + { + "description": "[decq616] fold-down full sequence", + "canonical_bson": "18000000136400000000E4D20CC8DCD2B752000000FE5F00", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.00000000000000000000000000E+6137\"}}" + }, + { + "description": "[decq618] fold-down full sequence", + "canonical_bson": "180000001364000000004A48011416954508000000FE5F00", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0000000000000000000000000E+6136\"}}" + }, + { + "description": "[decq620] fold-down full sequence", + "canonical_bson": "18000000136400000000A1EDCCCE1BC2D300000000FE5F00", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.000000000000000000000000E+6135\"}}" + }, + { + "description": "[decq622] fold-down full sequence", + "canonical_bson": "18000000136400000080F64AE1C7022D1500000000FE5F00", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.00000000000000000000000E+6134\"}}" + }, + { + "description": "[decq624] fold-down full sequence", + "canonical_bson": "18000000136400000040B2BAC9E0191E0200000000FE5F00", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0000000000000000000000E+6133\"}}" + }, + { + "description": "[decq626] fold-down full sequence", + "canonical_bson": "180000001364000000A0DEC5ADC935360000000000FE5F00", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.000000000000000000000E+6132\"}}" + }, + { + "description": "[decq628] fold-down full sequence", + "canonical_bson": "18000000136400000010632D5EC76B050000000000FE5F00", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.00000000000000000000E+6131\"}}" + }, + { + "description": "[decq630] fold-down full sequence", + "canonical_bson": "180000001364000000E8890423C78A000000000000FE5F00", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0000000000000000000E+6130\"}}" + }, + { + "description": "[decq632] fold-down full sequence", + "canonical_bson": "18000000136400000064A7B3B6E00D000000000000FE5F00", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.000000000000000000E+6129\"}}" + }, + { + "description": "[decq634] fold-down full sequence", + "canonical_bson": "1800000013640000008A5D78456301000000000000FE5F00", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.00000000000000000E+6128\"}}" + }, + { + "description": "[decq636] fold-down full sequence", + "canonical_bson": "180000001364000000C16FF2862300000000000000FE5F00", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0000000000000000E+6127\"}}" + }, + { + "description": "[decq638] fold-down full sequence", + "canonical_bson": "180000001364000080C6A47E8D0300000000000000FE5F00", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.000000000000000E+6126\"}}" + }, + { + "description": "[decq640] fold-down full sequence", + "canonical_bson": "1800000013640000407A10F35A0000000000000000FE5F00", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.00000000000000E+6125\"}}" + }, + { + "description": "[decq642] fold-down full sequence", + "canonical_bson": "1800000013640000A0724E18090000000000000000FE5F00", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0000000000000E+6124\"}}" + }, + { + "description": "[decq644] fold-down full sequence", + "canonical_bson": "180000001364000010A5D4E8000000000000000000FE5F00", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.000000000000E+6123\"}}" + }, + { + "description": "[decq646] fold-down full sequence", + "canonical_bson": "1800000013640000E8764817000000000000000000FE5F00", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.00000000000E+6122\"}}" + }, + { + "description": "[decq648] fold-down full sequence", + "canonical_bson": "1800000013640000E40B5402000000000000000000FE5F00", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0000000000E+6121\"}}" + }, + { + "description": "[decq650] fold-down full sequence", + "canonical_bson": "1800000013640000CA9A3B00000000000000000000FE5F00", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.000000000E+6120\"}}" + }, + { + "description": "[decq652] fold-down full sequence", + "canonical_bson": "1800000013640000E1F50500000000000000000000FE5F00", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.00000000E+6119\"}}" + }, + { + "description": "[decq654] fold-down full sequence", + "canonical_bson": "180000001364008096980000000000000000000000FE5F00", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0000000E+6118\"}}" + }, + { + "description": "[decq656] fold-down full sequence", + "canonical_bson": "1800000013640040420F0000000000000000000000FE5F00", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.000000E+6117\"}}" + }, + { + "description": "[decq658] fold-down full sequence", + "canonical_bson": "18000000136400A086010000000000000000000000FE5F00", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.00000E+6116\"}}" + }, + { + "description": "[decq660] fold-down full sequence", + "canonical_bson": "180000001364001027000000000000000000000000FE5F00", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0000E+6115\"}}" + }, + { + "description": "[decq662] fold-down full sequence", + "canonical_bson": "18000000136400E803000000000000000000000000FE5F00", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.000E+6114\"}}" + }, + { + "description": "[decq664] fold-down full sequence", + "canonical_bson": "180000001364006400000000000000000000000000FE5F00", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.00E+6113\"}}" + }, + { + "description": "[decq666] fold-down full sequence", + "canonical_bson": "180000001364000A00000000000000000000000000FE5F00", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0E+6112\"}}" + }, + { + "description": "[decq060] fold-downs (more below)", + "canonical_bson": "180000001364000100000000000000000000000000403000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1\"}}" + }, + { + "description": "[decq670] fold-down full sequence", + "canonical_bson": "180000001364000100000000000000000000000000FC5F00", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6110\"}}" + }, + { + "description": "[decq668] fold-down full sequence", + "canonical_bson": "180000001364000100000000000000000000000000FE5F00", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6111\"}}" + }, + { + "description": "[decq072] Nmin and below", + "canonical_bson": "180000001364000100000000000000000000000000420000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1E-6143\"}}" + }, + { + "description": "[decq076] Nmin and below", + "canonical_bson": "18000000136400010000000A5BC138938D44C64D31000000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.000000000000000000000000000000001E-6143\"}}" + }, + { + "description": "[decq036] fold-downs (more below)", + "canonical_bson": "18000000136400000000807F1BCF85B27059C8A43CFE5F00", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.230000000000000000000000000000000E+6144\"}}" + }, + { + "description": "[decq062] fold-downs (more below)", + "canonical_bson": "180000001364007B000000000000000000000000003C3000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.23\"}}" + }, + { + "description": "[decq034] Nmax and similar", + "canonical_bson": "18000000136400F2AF967ED05C82DE3297FF6FDE3CFE5F00", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.234567890123456789012345678901234E+6144\"}}" + }, + { + "description": "[decq441] exponent lengths", + "canonical_bson": "180000001364000700000000000000000000000000403000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"7\"}}" + }, + { + "description": "[decq449] exponent lengths", + "canonical_bson": "1800000013640007000000000000000000000000001E5F00", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"7E+5999\"}}" + }, + { + "description": "[decq447] exponent lengths", + "canonical_bson": "1800000013640007000000000000000000000000000E3800", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"7E+999\"}}" + }, + { + "description": "[decq445] exponent lengths", + "canonical_bson": "180000001364000700000000000000000000000000063100", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"7E+99\"}}" + }, + { + "description": "[decq443] exponent lengths", + "canonical_bson": "180000001364000700000000000000000000000000523000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"7E+9\"}}" + }, + { + "description": "[decq842] VG testcase", + "canonical_bson": "180000001364000000FED83F4E7C9FE4E269E38A5BCD1700", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"7.049000000000010795488000000000000E-3097\"}}" + }, + { + "description": "[decq841] VG testcase", + "canonical_bson": "180000001364000000203B9DB5056F000000000000002400", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"8.000000000000000000E-1550\"}}" + }, + { + "description": "[decq840] VG testcase", + "canonical_bson": "180000001364003C17258419D710C42F0000000000002400", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"8.81125000000001349436E-1548\"}}" + }, + { + "description": "[decq701] Selected DPD codes", + "canonical_bson": "180000001364000900000000000000000000000000403000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"9\"}}" + }, + { + "description": "[decq032] Nmax and similar", + "canonical_bson": "18000000136400FFFFFFFF638E8D37C087ADBE09EDFF5F00", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"9.999999999999999999999999999999999E+6144\"}}" + }, + { + "description": "[decq702] Selected DPD codes", + "canonical_bson": "180000001364000A00000000000000000000000000403000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"10\"}}" + }, + { + "description": "[decq057] fold-downs (more below)", + "canonical_bson": "180000001364000C00000000000000000000000000403000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"12\"}}" + }, + { + "description": "[decq703] Selected DPD codes", + "canonical_bson": "180000001364001300000000000000000000000000403000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"19\"}}" + }, + { + "description": "[decq704] Selected DPD codes", + "canonical_bson": "180000001364001400000000000000000000000000403000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"20\"}}" + }, + { + "description": "[decq705] Selected DPD codes", + "canonical_bson": "180000001364001D00000000000000000000000000403000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"29\"}}" + }, + { + "description": "[decq706] Selected DPD codes", + "canonical_bson": "180000001364001E00000000000000000000000000403000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"30\"}}" + }, + { + "description": "[decq707] Selected DPD codes", + "canonical_bson": "180000001364002700000000000000000000000000403000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"39\"}}" + }, + { + "description": "[decq708] Selected DPD codes", + "canonical_bson": "180000001364002800000000000000000000000000403000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"40\"}}" + }, + { + "description": "[decq709] Selected DPD codes", + "canonical_bson": "180000001364003100000000000000000000000000403000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"49\"}}" + }, + { + "description": "[decq710] Selected DPD codes", + "canonical_bson": "180000001364003200000000000000000000000000403000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"50\"}}" + }, + { + "description": "[decq711] Selected DPD codes", + "canonical_bson": "180000001364003B00000000000000000000000000403000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"59\"}}" + }, + { + "description": "[decq712] Selected DPD codes", + "canonical_bson": "180000001364003C00000000000000000000000000403000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"60\"}}" + }, + { + "description": "[decq713] Selected DPD codes", + "canonical_bson": "180000001364004500000000000000000000000000403000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"69\"}}" + }, + { + "description": "[decq714] Selected DPD codes", + "canonical_bson": "180000001364004600000000000000000000000000403000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"70\"}}" + }, + { + "description": "[decq715] Selected DPD codes", + "canonical_bson": "180000001364004700000000000000000000000000403000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"71\"}}" + }, + { + "description": "[decq716] Selected DPD codes", + "canonical_bson": "180000001364004800000000000000000000000000403000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"72\"}}" + }, + { + "description": "[decq717] Selected DPD codes", + "canonical_bson": "180000001364004900000000000000000000000000403000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"73\"}}" + }, + { + "description": "[decq718] Selected DPD codes", + "canonical_bson": "180000001364004A00000000000000000000000000403000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"74\"}}" + }, + { + "description": "[decq719] Selected DPD codes", + "canonical_bson": "180000001364004B00000000000000000000000000403000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"75\"}}" + }, + { + "description": "[decq720] Selected DPD codes", + "canonical_bson": "180000001364004C00000000000000000000000000403000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"76\"}}" + }, + { + "description": "[decq721] Selected DPD codes", + "canonical_bson": "180000001364004D00000000000000000000000000403000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"77\"}}" + }, + { + "description": "[decq722] Selected DPD codes", + "canonical_bson": "180000001364004E00000000000000000000000000403000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"78\"}}" + }, + { + "description": "[decq723] Selected DPD codes", + "canonical_bson": "180000001364004F00000000000000000000000000403000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"79\"}}" + }, + { + "description": "[decq056] fold-downs (more below)", + "canonical_bson": "180000001364007B00000000000000000000000000403000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"123\"}}" + }, + { + "description": "[decq064] fold-downs (more below)", + "canonical_bson": "1800000013640039300000000000000000000000003C3000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"123.45\"}}" + }, + { + "description": "[decq732] Selected DPD codes", + "canonical_bson": "180000001364000802000000000000000000000000403000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"520\"}}" + }, + { + "description": "[decq733] Selected DPD codes", + "canonical_bson": "180000001364000902000000000000000000000000403000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"521\"}}" + }, + { + "description": "[decq740] DPD: one of each of the huffman groups", + "canonical_bson": "180000001364000903000000000000000000000000403000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"777\"}}" + }, + { + "description": "[decq741] DPD: one of each of the huffman groups", + "canonical_bson": "180000001364000A03000000000000000000000000403000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"778\"}}" + }, + { + "description": "[decq742] DPD: one of each of the huffman groups", + "canonical_bson": "180000001364001303000000000000000000000000403000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"787\"}}" + }, + { + "description": "[decq746] DPD: one of each of the huffman groups", + "canonical_bson": "180000001364001F03000000000000000000000000403000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"799\"}}" + }, + { + "description": "[decq743] DPD: one of each of the huffman groups", + "canonical_bson": "180000001364006D03000000000000000000000000403000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"877\"}}" + }, + { + "description": "[decq753] DPD all-highs cases (includes the 24 redundant codes)", + "canonical_bson": "180000001364007803000000000000000000000000403000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"888\"}}" + }, + { + "description": "[decq754] DPD all-highs cases (includes the 24 redundant codes)", + "canonical_bson": "180000001364007903000000000000000000000000403000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"889\"}}" + }, + { + "description": "[decq760] DPD all-highs cases (includes the 24 redundant codes)", + "canonical_bson": "180000001364008203000000000000000000000000403000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"898\"}}" + }, + { + "description": "[decq764] DPD all-highs cases (includes the 24 redundant codes)", + "canonical_bson": "180000001364008303000000000000000000000000403000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"899\"}}" + }, + { + "description": "[decq745] DPD: one of each of the huffman groups", + "canonical_bson": "18000000136400D303000000000000000000000000403000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"979\"}}" + }, + { + "description": "[decq770] DPD all-highs cases (includes the 24 redundant codes)", + "canonical_bson": "18000000136400DC03000000000000000000000000403000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"988\"}}" + }, + { + "description": "[decq774] DPD all-highs cases (includes the 24 redundant codes)", + "canonical_bson": "18000000136400DD03000000000000000000000000403000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"989\"}}" + }, + { + "description": "[decq730] Selected DPD codes", + "canonical_bson": "18000000136400E203000000000000000000000000403000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"994\"}}" + }, + { + "description": "[decq731] Selected DPD codes", + "canonical_bson": "18000000136400E303000000000000000000000000403000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"995\"}}" + }, + { + "description": "[decq744] DPD: one of each of the huffman groups", + "canonical_bson": "18000000136400E503000000000000000000000000403000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"997\"}}" + }, + { + "description": "[decq780] DPD all-highs cases (includes the 24 redundant codes)", + "canonical_bson": "18000000136400E603000000000000000000000000403000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"998\"}}" + }, + { + "description": "[decq787] DPD all-highs cases (includes the 24 redundant codes)", + "canonical_bson": "18000000136400E703000000000000000000000000403000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"999\"}}" + }, + { + "description": "[decq053] fold-downs (more below)", + "canonical_bson": "18000000136400D204000000000000000000000000403000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1234\"}}" + }, + { + "description": "[decq052] fold-downs (more below)", + "canonical_bson": "180000001364003930000000000000000000000000403000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"12345\"}}" + }, + { + "description": "[decq792] Miscellaneous (testers' queries, etc.)", + "canonical_bson": "180000001364003075000000000000000000000000403000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"30000\"}}" + }, + { + "description": "[decq793] Miscellaneous (testers' queries, etc.)", + "canonical_bson": "1800000013640090940D0000000000000000000000403000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"890000\"}}" + }, + { + "description": "[decq824] values around [u]int32 edges (zeros done earlier)", + "canonical_bson": "18000000136400FEFFFF7F00000000000000000000403000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"2147483646\"}}" + }, + { + "description": "[decq825] values around [u]int32 edges (zeros done earlier)", + "canonical_bson": "18000000136400FFFFFF7F00000000000000000000403000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"2147483647\"}}" + }, + { + "description": "[decq826] values around [u]int32 edges (zeros done earlier)", + "canonical_bson": "180000001364000000008000000000000000000000403000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"2147483648\"}}" + }, + { + "description": "[decq827] values around [u]int32 edges (zeros done earlier)", + "canonical_bson": "180000001364000100008000000000000000000000403000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"2147483649\"}}" + }, + { + "description": "[decq828] values around [u]int32 edges (zeros done earlier)", + "canonical_bson": "18000000136400FEFFFFFF00000000000000000000403000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"4294967294\"}}" + }, + { + "description": "[decq829] values around [u]int32 edges (zeros done earlier)", + "canonical_bson": "18000000136400FFFFFFFF00000000000000000000403000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"4294967295\"}}" + }, + { + "description": "[decq830] values around [u]int32 edges (zeros done earlier)", + "canonical_bson": "180000001364000000000001000000000000000000403000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"4294967296\"}}" + }, + { + "description": "[decq831] values around [u]int32 edges (zeros done earlier)", + "canonical_bson": "180000001364000100000001000000000000000000403000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"4294967297\"}}" + }, + { + "description": "[decq022] Normality", + "canonical_bson": "18000000136400C7711CC7B548F377DC80A131C836403000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1111111111111111111111111111111111\"}}" + }, + { + "description": "[decq020] Normality", + "canonical_bson": "18000000136400F2AF967ED05C82DE3297FF6FDE3C403000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1234567890123456789012345678901234\"}}" + }, + { + "description": "[decq550] Specials", + "canonical_bson": "18000000136400FFFFFFFF638E8D37C087ADBE09ED413000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"9999999999999999999999999999999999\"}}" + } + ] +} + diff --git a/bson/src/test/resources/bson/decimal128-3.json b/bson/src/test/resources/bson/decimal128-3.json new file mode 100644 index 00000000000..9b015343ce7 --- /dev/null +++ b/bson/src/test/resources/bson/decimal128-3.json @@ -0,0 +1,1771 @@ +{ + "description": "Decimal128", + "bson_type": "0x13", + "test_key": "d", + "valid": [ + { + "description": "[basx066] strings without E cannot generate E in result", + "canonical_bson": "18000000136400185C0ACE0000000000000000000038B000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"-00345678.5432\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-345678.5432\"}}" + }, + { + "description": "[basx065] strings without E cannot generate E in result", + "canonical_bson": "18000000136400185C0ACE0000000000000000000038B000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0345678.5432\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-345678.5432\"}}" + }, + { + "description": "[basx064] strings without E cannot generate E in result", + "canonical_bson": "18000000136400185C0ACE0000000000000000000038B000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-345678.5432\"}}" + }, + { + "description": "[basx041] strings without E cannot generate E in result", + "canonical_bson": "180000001364004C0000000000000000000000000040B000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-76\"}}" + }, + { + "description": "[basx027] conform to rules and exponent will be in permitted range).", + "canonical_bson": "180000001364000F270000000000000000000000003AB000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-9.999\"}}" + }, + { + "description": "[basx026] conform to rules and exponent will be in permitted range).", + "canonical_bson": "180000001364009F230000000000000000000000003AB000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-9.119\"}}" + }, + { + "description": "[basx025] conform to rules and exponent will be in permitted range).", + "canonical_bson": "180000001364008F030000000000000000000000003CB000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-9.11\"}}" + }, + { + "description": "[basx024] conform to rules and exponent will be in permitted range).", + "canonical_bson": "180000001364005B000000000000000000000000003EB000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-9.1\"}}" + }, + { + "description": "[dqbsr531] negatives (Rounded)", + "canonical_bson": "1800000013640099761CC7B548F377DC80A131C836FEAF00", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"-1.1111111111111111111111111111123450\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-1.111111111111111111111111111112345\"}}" + }, + { + "description": "[basx022] conform to rules and exponent will be in permitted range).", + "canonical_bson": "180000001364000A000000000000000000000000003EB000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-1.0\"}}" + }, + { + "description": "[basx021] conform to rules and exponent will be in permitted range).", + "canonical_bson": "18000000136400010000000000000000000000000040B000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-1\"}}" + }, + { + "description": "[basx601] Zeros", + "canonical_bson": "1800000013640000000000000000000000000000002E3000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.000000000\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E-9\"}}" + }, + { + "description": "[basx622] Zeros", + "canonical_bson": "1800000013640000000000000000000000000000002EB000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.000000000\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0E-9\"}}" + }, + { + "description": "[basx602] Zeros", + "canonical_bson": "180000001364000000000000000000000000000000303000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00000000\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E-8\"}}" + }, + { + "description": "[basx621] Zeros", + "canonical_bson": "18000000136400000000000000000000000000000030B000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.00000000\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0E-8\"}}" + }, + { + "description": "[basx603] Zeros", + "canonical_bson": "180000001364000000000000000000000000000000323000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0000000\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E-7\"}}" + }, + { + "description": "[basx620] Zeros", + "canonical_bson": "18000000136400000000000000000000000000000032B000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.0000000\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0E-7\"}}" + }, + { + "description": "[basx604] Zeros", + "canonical_bson": "180000001364000000000000000000000000000000343000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.000000\"}}" + }, + { + "description": "[basx619] Zeros", + "canonical_bson": "18000000136400000000000000000000000000000034B000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.000000\"}}" + }, + { + "description": "[basx605] Zeros", + "canonical_bson": "180000001364000000000000000000000000000000363000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00000\"}}" + }, + { + "description": "[basx618] Zeros", + "canonical_bson": "18000000136400000000000000000000000000000036B000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.00000\"}}" + }, + { + "description": "[basx680] Zeros", + "canonical_bson": "180000001364000000000000000000000000000000403000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"000000.\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0\"}}" + }, + { + "description": "[basx606] Zeros", + "canonical_bson": "180000001364000000000000000000000000000000383000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0000\"}}" + }, + { + "description": "[basx617] Zeros", + "canonical_bson": "18000000136400000000000000000000000000000038B000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.0000\"}}" + }, + { + "description": "[basx681] Zeros", + "canonical_bson": "180000001364000000000000000000000000000000403000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"00000.\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0\"}}" + }, + { + "description": "[basx686] Zeros", + "canonical_bson": "180000001364000000000000000000000000000000403000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"+00000.\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0\"}}" + }, + { + "description": "[basx687] Zeros", + "canonical_bson": "18000000136400000000000000000000000000000040B000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"-00000.\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0\"}}" + }, + { + "description": "[basx019] conform to rules and exponent will be in permitted range).", + "canonical_bson": "1800000013640000000000000000000000000000003CB000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"-00.00\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.00\"}}" + }, + { + "description": "[basx607] Zeros", + "canonical_bson": "1800000013640000000000000000000000000000003A3000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.000\"}}" + }, + { + "description": "[basx616] Zeros", + "canonical_bson": "1800000013640000000000000000000000000000003AB000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.000\"}}" + }, + { + "description": "[basx682] Zeros", + "canonical_bson": "180000001364000000000000000000000000000000403000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"0000.\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0\"}}" + }, + { + "description": "[basx155] Numbers with E", + "canonical_bson": "1800000013640000000000000000000000000000003A3000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.000e+0\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.000\"}}" + }, + { + "description": "[basx130] Numbers with E", + "canonical_bson": "180000001364000000000000000000000000000000383000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.000E-1\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0000\"}}" + }, + { + "description": "[basx290] some more negative zeros [systematic tests below]", + "canonical_bson": "18000000136400000000000000000000000000000038B000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.000E-1\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.0000\"}}" + }, + { + "description": "[basx131] Numbers with E", + "canonical_bson": "180000001364000000000000000000000000000000363000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.000E-2\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00000\"}}" + }, + { + "description": "[basx291] some more negative zeros [systematic tests below]", + "canonical_bson": "18000000136400000000000000000000000000000036B000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.000E-2\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.00000\"}}" + }, + { + "description": "[basx132] Numbers with E", + "canonical_bson": "180000001364000000000000000000000000000000343000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.000E-3\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.000000\"}}" + }, + { + "description": "[basx292] some more negative zeros [systematic tests below]", + "canonical_bson": "18000000136400000000000000000000000000000034B000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.000E-3\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.000000\"}}" + }, + { + "description": "[basx133] Numbers with E", + "canonical_bson": "180000001364000000000000000000000000000000323000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.000E-4\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E-7\"}}" + }, + { + "description": "[basx293] some more negative zeros [systematic tests below]", + "canonical_bson": "18000000136400000000000000000000000000000032B000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.000E-4\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0E-7\"}}" + }, + { + "description": "[basx608] Zeros", + "canonical_bson": "1800000013640000000000000000000000000000003C3000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00\"}}" + }, + { + "description": "[basx615] Zeros", + "canonical_bson": "1800000013640000000000000000000000000000003CB000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.00\"}}" + }, + { + "description": "[basx683] Zeros", + "canonical_bson": "180000001364000000000000000000000000000000403000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"000.\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0\"}}" + }, + { + "description": "[basx630] Zeros", + "canonical_bson": "1800000013640000000000000000000000000000003C3000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00E+0\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00\"}}" + }, + { + "description": "[basx670] Zeros", + "canonical_bson": "1800000013640000000000000000000000000000003C3000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00E-0\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00\"}}" + }, + { + "description": "[basx631] Zeros", + "canonical_bson": "1800000013640000000000000000000000000000003E3000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00E+1\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0\"}}" + }, + { + "description": "[basx671] Zeros", + "canonical_bson": "1800000013640000000000000000000000000000003A3000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00E-1\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.000\"}}" + }, + { + "description": "[basx134] Numbers with E", + "canonical_bson": "180000001364000000000000000000000000000000383000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00E-2\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0000\"}}" + }, + { + "description": "[basx294] some more negative zeros [systematic tests below]", + "canonical_bson": "18000000136400000000000000000000000000000038B000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.00E-2\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.0000\"}}" + }, + { + "description": "[basx632] Zeros", + "canonical_bson": "180000001364000000000000000000000000000000403000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00E+2\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0\"}}" + }, + { + "description": "[basx672] Zeros", + "canonical_bson": "180000001364000000000000000000000000000000383000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00E-2\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0000\"}}" + }, + { + "description": "[basx135] Numbers with E", + "canonical_bson": "180000001364000000000000000000000000000000363000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00E-3\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00000\"}}" + }, + { + "description": "[basx295] some more negative zeros [systematic tests below]", + "canonical_bson": "18000000136400000000000000000000000000000036B000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.00E-3\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.00000\"}}" + }, + { + "description": "[basx633] Zeros", + "canonical_bson": "180000001364000000000000000000000000000000423000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00E+3\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+1\"}}" + }, + { + "description": "[basx673] Zeros", + "canonical_bson": "180000001364000000000000000000000000000000363000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00E-3\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00000\"}}" + }, + { + "description": "[basx136] Numbers with E", + "canonical_bson": "180000001364000000000000000000000000000000343000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00E-4\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.000000\"}}" + }, + { + "description": "[basx674] Zeros", + "canonical_bson": "180000001364000000000000000000000000000000343000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00E-4\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.000000\"}}" + }, + { + "description": "[basx634] Zeros", + "canonical_bson": "180000001364000000000000000000000000000000443000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00E+4\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+2\"}}" + }, + { + "description": "[basx137] Numbers with E", + "canonical_bson": "180000001364000000000000000000000000000000323000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00E-5\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E-7\"}}" + }, + { + "description": "[basx635] Zeros", + "canonical_bson": "180000001364000000000000000000000000000000463000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00E+5\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+3\"}}" + }, + { + "description": "[basx675] Zeros", + "canonical_bson": "180000001364000000000000000000000000000000323000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00E-5\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E-7\"}}" + }, + { + "description": "[basx636] Zeros", + "canonical_bson": "180000001364000000000000000000000000000000483000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00E+6\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+4\"}}" + }, + { + "description": "[basx676] Zeros", + "canonical_bson": "180000001364000000000000000000000000000000303000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00E-6\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E-8\"}}" + }, + { + "description": "[basx637] Zeros", + "canonical_bson": "1800000013640000000000000000000000000000004A3000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00E+7\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+5\"}}" + }, + { + "description": "[basx677] Zeros", + "canonical_bson": "1800000013640000000000000000000000000000002E3000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00E-7\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E-9\"}}" + }, + { + "description": "[basx638] Zeros", + "canonical_bson": "1800000013640000000000000000000000000000004C3000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00E+8\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+6\"}}" + }, + { + "description": "[basx678] Zeros", + "canonical_bson": "1800000013640000000000000000000000000000002C3000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00E-8\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E-10\"}}" + }, + { + "description": "[basx149] Numbers with E", + "canonical_bson": "180000001364000000000000000000000000000000523000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"000E+9\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+9\"}}" + }, + { + "description": "[basx639] Zeros", + "canonical_bson": "1800000013640000000000000000000000000000004E3000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00E+9\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+7\"}}" + }, + { + "description": "[basx679] Zeros", + "canonical_bson": "1800000013640000000000000000000000000000002A3000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00E-9\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E-11\"}}" + }, + { + "description": "[basx063] strings without E cannot generate E in result", + "canonical_bson": "18000000136400185C0ACE00000000000000000000383000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"+00345678.5432\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"345678.5432\"}}" + }, + { + "description": "[basx018] conform to rules and exponent will be in permitted range).", + "canonical_bson": "1800000013640000000000000000000000000000003EB000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.0\"}}" + }, + { + "description": "[basx609] Zeros", + "canonical_bson": "1800000013640000000000000000000000000000003E3000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0\"}}" + }, + { + "description": "[basx614] Zeros", + "canonical_bson": "1800000013640000000000000000000000000000003EB000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.0\"}}" + }, + { + "description": "[basx684] Zeros", + "canonical_bson": "180000001364000000000000000000000000000000403000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"00.\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0\"}}" + }, + { + "description": "[basx640] Zeros", + "canonical_bson": "1800000013640000000000000000000000000000003E3000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0E+0\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0\"}}" + }, + { + "description": "[basx660] Zeros", + "canonical_bson": "1800000013640000000000000000000000000000003E3000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0E-0\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0\"}}" + }, + { + "description": "[basx641] Zeros", + "canonical_bson": "180000001364000000000000000000000000000000403000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0E+1\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0\"}}" + }, + { + "description": "[basx661] Zeros", + "canonical_bson": "1800000013640000000000000000000000000000003C3000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0E-1\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00\"}}" + }, + { + "description": "[basx296] some more negative zeros [systematic tests below]", + "canonical_bson": "1800000013640000000000000000000000000000003AB000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.0E-2\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.000\"}}" + }, + { + "description": "[basx642] Zeros", + "canonical_bson": "180000001364000000000000000000000000000000423000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0E+2\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+1\"}}" + }, + { + "description": "[basx662] Zeros", + "canonical_bson": "1800000013640000000000000000000000000000003A3000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0E-2\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.000\"}}" + }, + { + "description": "[basx297] some more negative zeros [systematic tests below]", + "canonical_bson": "18000000136400000000000000000000000000000038B000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.0E-3\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.0000\"}}" + }, + { + "description": "[basx643] Zeros", + "canonical_bson": "180000001364000000000000000000000000000000443000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0E+3\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+2\"}}" + }, + { + "description": "[basx663] Zeros", + "canonical_bson": "180000001364000000000000000000000000000000383000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0E-3\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0000\"}}" + }, + { + "description": "[basx644] Zeros", + "canonical_bson": "180000001364000000000000000000000000000000463000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0E+4\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+3\"}}" + }, + { + "description": "[basx664] Zeros", + "canonical_bson": "180000001364000000000000000000000000000000363000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0E-4\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00000\"}}" + }, + { + "description": "[basx645] Zeros", + "canonical_bson": "180000001364000000000000000000000000000000483000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0E+5\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+4\"}}" + }, + { + "description": "[basx665] Zeros", + "canonical_bson": "180000001364000000000000000000000000000000343000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0E-5\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.000000\"}}" + }, + { + "description": "[basx646] Zeros", + "canonical_bson": "1800000013640000000000000000000000000000004A3000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0E+6\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+5\"}}" + }, + { + "description": "[basx666] Zeros", + "canonical_bson": "180000001364000000000000000000000000000000323000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0E-6\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E-7\"}}" + }, + { + "description": "[basx647] Zeros", + "canonical_bson": "1800000013640000000000000000000000000000004C3000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0E+7\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+6\"}}" + }, + { + "description": "[basx667] Zeros", + "canonical_bson": "180000001364000000000000000000000000000000303000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0E-7\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E-8\"}}" + }, + { + "description": "[basx648] Zeros", + "canonical_bson": "1800000013640000000000000000000000000000004E3000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0E+8\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+7\"}}" + }, + { + "description": "[basx668] Zeros", + "canonical_bson": "1800000013640000000000000000000000000000002E3000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0E-8\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E-9\"}}" + }, + { + "description": "[basx160] Numbers with E", + "canonical_bson": "180000001364000000000000000000000000000000523000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"00E+9\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+9\"}}" + }, + { + "description": "[basx161] Numbers with E", + "canonical_bson": "1800000013640000000000000000000000000000002E3000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"00E-9\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E-9\"}}" + }, + { + "description": "[basx649] Zeros", + "canonical_bson": "180000001364000000000000000000000000000000503000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0E+9\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+8\"}}" + }, + { + "description": "[basx669] Zeros", + "canonical_bson": "1800000013640000000000000000000000000000002C3000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0E-9\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E-10\"}}" + }, + { + "description": "[basx062] strings without E cannot generate E in result", + "canonical_bson": "18000000136400185C0ACE00000000000000000000383000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"+0345678.5432\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"345678.5432\"}}" + }, + { + "description": "[basx001] conform to rules and exponent will be in permitted range).", + "canonical_bson": "180000001364000000000000000000000000000000403000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0\"}}" + }, + { + "description": "[basx017] conform to rules and exponent will be in permitted range).", + "canonical_bson": "18000000136400000000000000000000000000000040B000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0\"}}" + }, + { + "description": "[basx611] Zeros", + "canonical_bson": "180000001364000000000000000000000000000000403000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0\"}}" + }, + { + "description": "[basx613] Zeros", + "canonical_bson": "18000000136400000000000000000000000000000040B000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0\"}}" + }, + { + "description": "[basx685] Zeros", + "canonical_bson": "180000001364000000000000000000000000000000403000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0\"}}" + }, + { + "description": "[basx688] Zeros", + "canonical_bson": "180000001364000000000000000000000000000000403000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"+0.\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0\"}}" + }, + { + "description": "[basx689] Zeros", + "canonical_bson": "18000000136400000000000000000000000000000040B000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0\"}}" + }, + { + "description": "[basx650] Zeros", + "canonical_bson": "180000001364000000000000000000000000000000403000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+0\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0\"}}" + }, + { + "description": "[basx651] Zeros", + "canonical_bson": "180000001364000000000000000000000000000000423000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+1\"}}" + }, + { + "description": "[basx298] some more negative zeros [systematic tests below]", + "canonical_bson": "1800000013640000000000000000000000000000003CB000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0E-2\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.00\"}}" + }, + { + "description": "[basx652] Zeros", + "canonical_bson": "180000001364000000000000000000000000000000443000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+2\"}}" + }, + { + "description": "[basx299] some more negative zeros [systematic tests below]", + "canonical_bson": "1800000013640000000000000000000000000000003AB000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0E-3\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.000\"}}" + }, + { + "description": "[basx653] Zeros", + "canonical_bson": "180000001364000000000000000000000000000000463000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+3\"}}" + }, + { + "description": "[basx654] Zeros", + "canonical_bson": "180000001364000000000000000000000000000000483000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+4\"}}" + }, + { + "description": "[basx655] Zeros", + "canonical_bson": "1800000013640000000000000000000000000000004A3000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+5\"}}" + }, + { + "description": "[basx656] Zeros", + "canonical_bson": "1800000013640000000000000000000000000000004C3000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+6\"}}" + }, + { + "description": "[basx657] Zeros", + "canonical_bson": "1800000013640000000000000000000000000000004E3000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+7\"}}" + }, + { + "description": "[basx658] Zeros", + "canonical_bson": "180000001364000000000000000000000000000000503000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+8\"}}" + }, + { + "description": "[basx138] Numbers with E", + "canonical_bson": "180000001364000000000000000000000000000000523000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"+0E+9\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+9\"}}" + }, + { + "description": "[basx139] Numbers with E", + "canonical_bson": "18000000136400000000000000000000000000000052B000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0E+9\"}}" + }, + { + "description": "[basx144] Numbers with E", + "canonical_bson": "180000001364000000000000000000000000000000523000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+9\"}}" + }, + { + "description": "[basx154] Numbers with E", + "canonical_bson": "180000001364000000000000000000000000000000523000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E9\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+9\"}}" + }, + { + "description": "[basx659] Zeros", + "canonical_bson": "180000001364000000000000000000000000000000523000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+9\"}}" + }, + { + "description": "[basx042] strings without E cannot generate E in result", + "canonical_bson": "18000000136400FC040000000000000000000000003C3000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"+12.76\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"12.76\"}}" + }, + { + "description": "[basx143] Numbers with E", + "canonical_bson": "180000001364000100000000000000000000000000523000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"+1E+009\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+9\"}}" + }, + { + "description": "[basx061] strings without E cannot generate E in result", + "canonical_bson": "18000000136400185C0ACE00000000000000000000383000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"+345678.5432\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"345678.5432\"}}" + }, + { + "description": "[basx036] conform to rules and exponent will be in permitted range).", + "canonical_bson": "1800000013640015CD5B0700000000000000000000203000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0000000123456789\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.23456789E-8\"}}" + }, + { + "description": "[basx035] conform to rules and exponent will be in permitted range).", + "canonical_bson": "1800000013640015CD5B0700000000000000000000223000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.000000123456789\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.23456789E-7\"}}" + }, + { + "description": "[basx034] conform to rules and exponent will be in permitted range).", + "canonical_bson": "1800000013640015CD5B0700000000000000000000243000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00000123456789\"}}" + }, + { + "description": "[basx053] strings without E cannot generate E in result", + "canonical_bson": "180000001364003200000000000000000000000000323000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0000050\"}}" + }, + { + "description": "[basx033] conform to rules and exponent will be in permitted range).", + "canonical_bson": "1800000013640015CD5B0700000000000000000000263000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0000123456789\"}}" + }, + { + "description": "[basx016] conform to rules and exponent will be in permitted range).", + "canonical_bson": "180000001364000C000000000000000000000000003A3000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.012\"}}" + }, + { + "description": "[basx015] conform to rules and exponent will be in permitted range).", + "canonical_bson": "180000001364007B000000000000000000000000003A3000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.123\"}}" + }, + { + "description": "[basx037] conform to rules and exponent will be in permitted range).", + "canonical_bson": "1800000013640078DF0D8648700000000000000000223000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.123456789012344\"}}" + }, + { + "description": "[basx038] conform to rules and exponent will be in permitted range).", + "canonical_bson": "1800000013640079DF0D8648700000000000000000223000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.123456789012345\"}}" + }, + { + "description": "[basx250] Numbers with E", + "canonical_bson": "18000000136400F104000000000000000000000000383000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.1265\"}}" + }, + { + "description": "[basx257] Numbers with E", + "canonical_bson": "18000000136400F104000000000000000000000000383000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.1265E-0\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.1265\"}}" + }, + { + "description": "[basx256] Numbers with E", + "canonical_bson": "18000000136400F104000000000000000000000000363000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.1265E-1\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.01265\"}}" + }, + { + "description": "[basx258] Numbers with E", + "canonical_bson": "18000000136400F1040000000000000000000000003A3000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.1265E+1\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265\"}}" + }, + { + "description": "[basx251] Numbers with E", + "canonical_bson": "18000000136400F104000000000000000000000000103000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.1265E-20\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E-21\"}}" + }, + { + "description": "[basx263] Numbers with E", + "canonical_bson": "18000000136400F104000000000000000000000000603000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.1265E+20\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E+19\"}}" + }, + { + "description": "[basx255] Numbers with E", + "canonical_bson": "18000000136400F104000000000000000000000000343000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.1265E-2\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.001265\"}}" + }, + { + "description": "[basx259] Numbers with E", + "canonical_bson": "18000000136400F1040000000000000000000000003C3000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.1265E+2\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"12.65\"}}" + }, + { + "description": "[basx254] Numbers with E", + "canonical_bson": "18000000136400F104000000000000000000000000323000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.1265E-3\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0001265\"}}" + }, + { + "description": "[basx260] Numbers with E", + "canonical_bson": "18000000136400F1040000000000000000000000003E3000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.1265E+3\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"126.5\"}}" + }, + { + "description": "[basx253] Numbers with E", + "canonical_bson": "18000000136400F104000000000000000000000000303000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.1265E-4\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00001265\"}}" + }, + { + "description": "[basx261] Numbers with E", + "canonical_bson": "18000000136400F104000000000000000000000000403000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.1265E+4\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1265\"}}" + }, + { + "description": "[basx252] Numbers with E", + "canonical_bson": "18000000136400F104000000000000000000000000283000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.1265E-8\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E-9\"}}" + }, + { + "description": "[basx262] Numbers with E", + "canonical_bson": "18000000136400F104000000000000000000000000483000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.1265E+8\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E+7\"}}" + }, + { + "description": "[basx159] Numbers with E", + "canonical_bson": "1800000013640049000000000000000000000000002E3000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.73e-7\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"7.3E-8\"}}" + }, + { + "description": "[basx004] conform to rules and exponent will be in permitted range).", + "canonical_bson": "1800000013640064000000000000000000000000003C3000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.00\"}}" + }, + { + "description": "[basx003] conform to rules and exponent will be in permitted range).", + "canonical_bson": "180000001364000A000000000000000000000000003E3000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0\"}}" + }, + { + "description": "[basx002] conform to rules and exponent will be in permitted range).", + "canonical_bson": "180000001364000100000000000000000000000000403000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1\"}}" + }, + { + "description": "[basx148] Numbers with E", + "canonical_bson": "180000001364000100000000000000000000000000523000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+009\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+9\"}}" + }, + { + "description": "[basx153] Numbers with E", + "canonical_bson": "180000001364000100000000000000000000000000523000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"1E009\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+9\"}}" + }, + { + "description": "[basx141] Numbers with E", + "canonical_bson": "180000001364000100000000000000000000000000523000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"1e+09\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+9\"}}" + }, + { + "description": "[basx146] Numbers with E", + "canonical_bson": "180000001364000100000000000000000000000000523000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+09\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+9\"}}" + }, + { + "description": "[basx151] Numbers with E", + "canonical_bson": "180000001364000100000000000000000000000000523000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"1e09\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+9\"}}" + }, + { + "description": "[basx142] Numbers with E", + "canonical_bson": "180000001364000100000000000000000000000000F43000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+90\"}}" + }, + { + "description": "[basx147] Numbers with E", + "canonical_bson": "180000001364000100000000000000000000000000F43000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"1e+90\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+90\"}}" + }, + { + "description": "[basx152] Numbers with E", + "canonical_bson": "180000001364000100000000000000000000000000F43000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"1E90\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+90\"}}" + }, + { + "description": "[basx140] Numbers with E", + "canonical_bson": "180000001364000100000000000000000000000000523000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+9\"}}" + }, + { + "description": "[basx150] Numbers with E", + "canonical_bson": "180000001364000100000000000000000000000000523000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"1E9\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+9\"}}" + }, + { + "description": "[basx014] conform to rules and exponent will be in permitted range).", + "canonical_bson": "18000000136400D2040000000000000000000000003A3000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.234\"}}" + }, + { + "description": "[basx170] Numbers with E", + "canonical_bson": "18000000136400F1040000000000000000000000003A3000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265\"}}" + }, + { + "description": "[basx177] Numbers with E", + "canonical_bson": "18000000136400F1040000000000000000000000003A3000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E-0\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265\"}}" + }, + { + "description": "[basx176] Numbers with E", + "canonical_bson": "18000000136400F104000000000000000000000000383000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E-1\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.1265\"}}" + }, + { + "description": "[basx178] Numbers with E", + "canonical_bson": "18000000136400F1040000000000000000000000003C3000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E+1\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"12.65\"}}" + }, + { + "description": "[basx171] Numbers with E", + "canonical_bson": "18000000136400F104000000000000000000000000123000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E-20\"}}" + }, + { + "description": "[basx183] Numbers with E", + "canonical_bson": "18000000136400F104000000000000000000000000623000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E+20\"}}" + }, + { + "description": "[basx175] Numbers with E", + "canonical_bson": "18000000136400F104000000000000000000000000363000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E-2\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.01265\"}}" + }, + { + "description": "[basx179] Numbers with E", + "canonical_bson": "18000000136400F1040000000000000000000000003E3000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E+2\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"126.5\"}}" + }, + { + "description": "[basx174] Numbers with E", + "canonical_bson": "18000000136400F104000000000000000000000000343000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E-3\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.001265\"}}" + }, + { + "description": "[basx180] Numbers with E", + "canonical_bson": "18000000136400F104000000000000000000000000403000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E+3\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1265\"}}" + }, + { + "description": "[basx173] Numbers with E", + "canonical_bson": "18000000136400F104000000000000000000000000323000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E-4\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0001265\"}}" + }, + { + "description": "[basx181] Numbers with E", + "canonical_bson": "18000000136400F104000000000000000000000000423000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E+4\"}}" + }, + { + "description": "[basx172] Numbers with E", + "canonical_bson": "18000000136400F1040000000000000000000000002A3000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E-8\"}}" + }, + { + "description": "[basx182] Numbers with E", + "canonical_bson": "18000000136400F1040000000000000000000000004A3000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E+8\"}}" + }, + { + "description": "[basx157] Numbers with E", + "canonical_bson": "180000001364000400000000000000000000000000523000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"4E+9\"}}" + }, + { + "description": "[basx067] examples", + "canonical_bson": "180000001364000500000000000000000000000000343000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"5E-6\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.000005\"}}" + }, + { + "description": "[basx069] examples", + "canonical_bson": "180000001364000500000000000000000000000000323000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"5E-7\"}}" + }, + { + "description": "[basx385] Engineering notation tests", + "canonical_bson": "180000001364000700000000000000000000000000403000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"7E0\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"7\"}}" + }, + { + "description": "[basx365] Engineering notation tests", + "canonical_bson": "180000001364000700000000000000000000000000543000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"7E10\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"7E+10\"}}" + }, + { + "description": "[basx405] Engineering notation tests", + "canonical_bson": "1800000013640007000000000000000000000000002C3000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"7E-10\"}}" + }, + { + "description": "[basx363] Engineering notation tests", + "canonical_bson": "180000001364000700000000000000000000000000563000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"7E11\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"7E+11\"}}" + }, + { + "description": "[basx407] Engineering notation tests", + "canonical_bson": "1800000013640007000000000000000000000000002A3000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"7E-11\"}}" + }, + { + "description": "[basx361] Engineering notation tests", + "canonical_bson": "180000001364000700000000000000000000000000583000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"7E12\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"7E+12\"}}" + }, + { + "description": "[basx409] Engineering notation tests", + "canonical_bson": "180000001364000700000000000000000000000000283000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"7E-12\"}}" + }, + { + "description": "[basx411] Engineering notation tests", + "canonical_bson": "180000001364000700000000000000000000000000263000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"7E-13\"}}" + }, + { + "description": "[basx383] Engineering notation tests", + "canonical_bson": "180000001364000700000000000000000000000000423000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"7E1\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"7E+1\"}}" + }, + { + "description": "[basx387] Engineering notation tests", + "canonical_bson": "1800000013640007000000000000000000000000003E3000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"7E-1\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.7\"}}" + }, + { + "description": "[basx381] Engineering notation tests", + "canonical_bson": "180000001364000700000000000000000000000000443000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"7E2\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"7E+2\"}}" + }, + { + "description": "[basx389] Engineering notation tests", + "canonical_bson": "1800000013640007000000000000000000000000003C3000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"7E-2\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.07\"}}" + }, + { + "description": "[basx379] Engineering notation tests", + "canonical_bson": "180000001364000700000000000000000000000000463000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"7E3\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"7E+3\"}}" + }, + { + "description": "[basx391] Engineering notation tests", + "canonical_bson": "1800000013640007000000000000000000000000003A3000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"7E-3\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.007\"}}" + }, + { + "description": "[basx377] Engineering notation tests", + "canonical_bson": "180000001364000700000000000000000000000000483000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"7E4\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"7E+4\"}}" + }, + { + "description": "[basx393] Engineering notation tests", + "canonical_bson": "180000001364000700000000000000000000000000383000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"7E-4\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0007\"}}" + }, + { + "description": "[basx375] Engineering notation tests", + "canonical_bson": "1800000013640007000000000000000000000000004A3000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"7E5\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"7E+5\"}}" + }, + { + "description": "[basx395] Engineering notation tests", + "canonical_bson": "180000001364000700000000000000000000000000363000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"7E-5\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00007\"}}" + }, + { + "description": "[basx373] Engineering notation tests", + "canonical_bson": "1800000013640007000000000000000000000000004C3000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"7E6\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"7E+6\"}}" + }, + { + "description": "[basx397] Engineering notation tests", + "canonical_bson": "180000001364000700000000000000000000000000343000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"7E-6\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.000007\"}}" + }, + { + "description": "[basx371] Engineering notation tests", + "canonical_bson": "1800000013640007000000000000000000000000004E3000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"7E7\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"7E+7\"}}" + }, + { + "description": "[basx399] Engineering notation tests", + "canonical_bson": "180000001364000700000000000000000000000000323000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"7E-7\"}}" + }, + { + "description": "[basx369] Engineering notation tests", + "canonical_bson": "180000001364000700000000000000000000000000503000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"7E8\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"7E+8\"}}" + }, + { + "description": "[basx401] Engineering notation tests", + "canonical_bson": "180000001364000700000000000000000000000000303000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"7E-8\"}}" + }, + { + "description": "[basx367] Engineering notation tests", + "canonical_bson": "180000001364000700000000000000000000000000523000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"7E9\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"7E+9\"}}" + }, + { + "description": "[basx403] Engineering notation tests", + "canonical_bson": "1800000013640007000000000000000000000000002E3000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"7E-9\"}}" + }, + { + "description": "[basx007] conform to rules and exponent will be in permitted range).", + "canonical_bson": "1800000013640064000000000000000000000000003E3000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"10.0\"}}" + }, + { + "description": "[basx005] conform to rules and exponent will be in permitted range).", + "canonical_bson": "180000001364000A00000000000000000000000000403000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"10\"}}" + }, + { + "description": "[basx165] Numbers with E", + "canonical_bson": "180000001364000A00000000000000000000000000523000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"10E+009\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0E+10\"}}" + }, + { + "description": "[basx163] Numbers with E", + "canonical_bson": "180000001364000A00000000000000000000000000523000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"10E+09\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0E+10\"}}" + }, + { + "description": "[basx325] Engineering notation tests", + "canonical_bson": "180000001364000A00000000000000000000000000403000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"10e0\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"10\"}}" + }, + { + "description": "[basx305] Engineering notation tests", + "canonical_bson": "180000001364000A00000000000000000000000000543000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"10e10\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0E+11\"}}" + }, + { + "description": "[basx345] Engineering notation tests", + "canonical_bson": "180000001364000A000000000000000000000000002C3000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"10e-10\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0E-9\"}}" + }, + { + "description": "[basx303] Engineering notation tests", + "canonical_bson": "180000001364000A00000000000000000000000000563000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"10e11\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0E+12\"}}" + }, + { + "description": "[basx347] Engineering notation tests", + "canonical_bson": "180000001364000A000000000000000000000000002A3000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"10e-11\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0E-10\"}}" + }, + { + "description": "[basx301] Engineering notation tests", + "canonical_bson": "180000001364000A00000000000000000000000000583000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"10e12\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0E+13\"}}" + }, + { + "description": "[basx349] Engineering notation tests", + "canonical_bson": "180000001364000A00000000000000000000000000283000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"10e-12\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0E-11\"}}" + }, + { + "description": "[basx351] Engineering notation tests", + "canonical_bson": "180000001364000A00000000000000000000000000263000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"10e-13\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0E-12\"}}" + }, + { + "description": "[basx323] Engineering notation tests", + "canonical_bson": "180000001364000A00000000000000000000000000423000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"10e1\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0E+2\"}}" + }, + { + "description": "[basx327] Engineering notation tests", + "canonical_bson": "180000001364000A000000000000000000000000003E3000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"10e-1\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0\"}}" + }, + { + "description": "[basx321] Engineering notation tests", + "canonical_bson": "180000001364000A00000000000000000000000000443000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"10e2\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0E+3\"}}" + }, + { + "description": "[basx329] Engineering notation tests", + "canonical_bson": "180000001364000A000000000000000000000000003C3000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"10e-2\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.10\"}}" + }, + { + "description": "[basx319] Engineering notation tests", + "canonical_bson": "180000001364000A00000000000000000000000000463000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"10e3\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0E+4\"}}" + }, + { + "description": "[basx331] Engineering notation tests", + "canonical_bson": "180000001364000A000000000000000000000000003A3000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"10e-3\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.010\"}}" + }, + { + "description": "[basx317] Engineering notation tests", + "canonical_bson": "180000001364000A00000000000000000000000000483000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"10e4\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0E+5\"}}" + }, + { + "description": "[basx333] Engineering notation tests", + "canonical_bson": "180000001364000A00000000000000000000000000383000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"10e-4\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0010\"}}" + }, + { + "description": "[basx315] Engineering notation tests", + "canonical_bson": "180000001364000A000000000000000000000000004A3000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"10e5\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0E+6\"}}" + }, + { + "description": "[basx335] Engineering notation tests", + "canonical_bson": "180000001364000A00000000000000000000000000363000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"10e-5\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00010\"}}" + }, + { + "description": "[basx313] Engineering notation tests", + "canonical_bson": "180000001364000A000000000000000000000000004C3000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"10e6\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0E+7\"}}" + }, + { + "description": "[basx337] Engineering notation tests", + "canonical_bson": "180000001364000A00000000000000000000000000343000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"10e-6\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.000010\"}}" + }, + { + "description": "[basx311] Engineering notation tests", + "canonical_bson": "180000001364000A000000000000000000000000004E3000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"10e7\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0E+8\"}}" + }, + { + "description": "[basx339] Engineering notation tests", + "canonical_bson": "180000001364000A00000000000000000000000000323000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"10e-7\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0000010\"}}" + }, + { + "description": "[basx309] Engineering notation tests", + "canonical_bson": "180000001364000A00000000000000000000000000503000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"10e8\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0E+9\"}}" + }, + { + "description": "[basx341] Engineering notation tests", + "canonical_bson": "180000001364000A00000000000000000000000000303000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"10e-8\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0E-7\"}}" + }, + { + "description": "[basx164] Numbers with E", + "canonical_bson": "180000001364000A00000000000000000000000000F43000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"10e+90\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0E+91\"}}" + }, + { + "description": "[basx162] Numbers with E", + "canonical_bson": "180000001364000A00000000000000000000000000523000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"10E+9\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0E+10\"}}" + }, + { + "description": "[basx307] Engineering notation tests", + "canonical_bson": "180000001364000A00000000000000000000000000523000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"10e9\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0E+10\"}}" + }, + { + "description": "[basx343] Engineering notation tests", + "canonical_bson": "180000001364000A000000000000000000000000002E3000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"10e-9\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0E-8\"}}" + }, + { + "description": "[basx008] conform to rules and exponent will be in permitted range).", + "canonical_bson": "1800000013640065000000000000000000000000003E3000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"10.1\"}}" + }, + { + "description": "[basx009] conform to rules and exponent will be in permitted range).", + "canonical_bson": "1800000013640068000000000000000000000000003E3000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"10.4\"}}" + }, + { + "description": "[basx010] conform to rules and exponent will be in permitted range).", + "canonical_bson": "1800000013640069000000000000000000000000003E3000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"10.5\"}}" + }, + { + "description": "[basx011] conform to rules and exponent will be in permitted range).", + "canonical_bson": "180000001364006A000000000000000000000000003E3000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"10.6\"}}" + }, + { + "description": "[basx012] conform to rules and exponent will be in permitted range).", + "canonical_bson": "180000001364006D000000000000000000000000003E3000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"10.9\"}}" + }, + { + "description": "[basx013] conform to rules and exponent will be in permitted range).", + "canonical_bson": "180000001364006E000000000000000000000000003E3000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"11.0\"}}" + }, + { + "description": "[basx040] strings without E cannot generate E in result", + "canonical_bson": "180000001364000C00000000000000000000000000403000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"12\"}}" + }, + { + "description": "[basx190] Numbers with E", + "canonical_bson": "18000000136400F1040000000000000000000000003C3000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"12.65\"}}" + }, + { + "description": "[basx197] Numbers with E", + "canonical_bson": "18000000136400F1040000000000000000000000003C3000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"12.65E-0\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"12.65\"}}" + }, + { + "description": "[basx196] Numbers with E", + "canonical_bson": "18000000136400F1040000000000000000000000003A3000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"12.65E-1\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265\"}}" + }, + { + "description": "[basx198] Numbers with E", + "canonical_bson": "18000000136400F1040000000000000000000000003E3000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"12.65E+1\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"126.5\"}}" + }, + { + "description": "[basx191] Numbers with E", + "canonical_bson": "18000000136400F104000000000000000000000000143000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"12.65E-20\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E-19\"}}" + }, + { + "description": "[basx203] Numbers with E", + "canonical_bson": "18000000136400F104000000000000000000000000643000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"12.65E+20\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E+21\"}}" + }, + { + "description": "[basx195] Numbers with E", + "canonical_bson": "18000000136400F104000000000000000000000000383000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"12.65E-2\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.1265\"}}" + }, + { + "description": "[basx199] Numbers with E", + "canonical_bson": "18000000136400F104000000000000000000000000403000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"12.65E+2\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1265\"}}" + }, + { + "description": "[basx194] Numbers with E", + "canonical_bson": "18000000136400F104000000000000000000000000363000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"12.65E-3\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.01265\"}}" + }, + { + "description": "[basx200] Numbers with E", + "canonical_bson": "18000000136400F104000000000000000000000000423000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"12.65E+3\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E+4\"}}" + }, + { + "description": "[basx193] Numbers with E", + "canonical_bson": "18000000136400F104000000000000000000000000343000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"12.65E-4\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.001265\"}}" + }, + { + "description": "[basx201] Numbers with E", + "canonical_bson": "18000000136400F104000000000000000000000000443000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"12.65E+4\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E+5\"}}" + }, + { + "description": "[basx192] Numbers with E", + "canonical_bson": "18000000136400F1040000000000000000000000002C3000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"12.65E-8\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E-7\"}}" + }, + { + "description": "[basx202] Numbers with E", + "canonical_bson": "18000000136400F1040000000000000000000000004C3000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"12.65E+8\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E+9\"}}" + }, + { + "description": "[basx044] strings without E cannot generate E in result", + "canonical_bson": "18000000136400FC040000000000000000000000003C3000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"012.76\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"12.76\"}}" + }, + { + "description": "[basx042] strings without E cannot generate E in result", + "canonical_bson": "18000000136400FC040000000000000000000000003C3000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"12.76\"}}" + }, + { + "description": "[basx046] strings without E cannot generate E in result", + "canonical_bson": "180000001364001100000000000000000000000000403000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"17.\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"17\"}}" + }, + { + "description": "[basx049] strings without E cannot generate E in result", + "canonical_bson": "180000001364002C00000000000000000000000000403000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"0044\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"44\"}}" + }, + { + "description": "[basx048] strings without E cannot generate E in result", + "canonical_bson": "180000001364002C00000000000000000000000000403000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"044\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"44\"}}" + }, + { + "description": "[basx158] Numbers with E", + "canonical_bson": "180000001364002C00000000000000000000000000523000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"44E+9\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"4.4E+10\"}}" + }, + { + "description": "[basx068] examples", + "canonical_bson": "180000001364003200000000000000000000000000323000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"50E-7\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0000050\"}}" + }, + { + "description": "[basx169] Numbers with E", + "canonical_bson": "180000001364006400000000000000000000000000523000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"100e+009\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.00E+11\"}}" + }, + { + "description": "[basx167] Numbers with E", + "canonical_bson": "180000001364006400000000000000000000000000523000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"100e+09\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.00E+11\"}}" + }, + { + "description": "[basx168] Numbers with E", + "canonical_bson": "180000001364006400000000000000000000000000F43000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"100E+90\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.00E+92\"}}" + }, + { + "description": "[basx166] Numbers with E", + "canonical_bson": "180000001364006400000000000000000000000000523000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"100e+9\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.00E+11\"}}" + }, + { + "description": "[basx210] Numbers with E", + "canonical_bson": "18000000136400F1040000000000000000000000003E3000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"126.5\"}}" + }, + { + "description": "[basx217] Numbers with E", + "canonical_bson": "18000000136400F1040000000000000000000000003E3000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"126.5E-0\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"126.5\"}}" + }, + { + "description": "[basx216] Numbers with E", + "canonical_bson": "18000000136400F1040000000000000000000000003C3000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"126.5E-1\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"12.65\"}}" + }, + { + "description": "[basx218] Numbers with E", + "canonical_bson": "18000000136400F104000000000000000000000000403000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"126.5E+1\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1265\"}}" + }, + { + "description": "[basx211] Numbers with E", + "canonical_bson": "18000000136400F104000000000000000000000000163000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"126.5E-20\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E-18\"}}" + }, + { + "description": "[basx223] Numbers with E", + "canonical_bson": "18000000136400F104000000000000000000000000663000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"126.5E+20\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E+22\"}}" + }, + { + "description": "[basx215] Numbers with E", + "canonical_bson": "18000000136400F1040000000000000000000000003A3000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"126.5E-2\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265\"}}" + }, + { + "description": "[basx219] Numbers with E", + "canonical_bson": "18000000136400F104000000000000000000000000423000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"126.5E+2\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E+4\"}}" + }, + { + "description": "[basx214] Numbers with E", + "canonical_bson": "18000000136400F104000000000000000000000000383000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"126.5E-3\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.1265\"}}" + }, + { + "description": "[basx220] Numbers with E", + "canonical_bson": "18000000136400F104000000000000000000000000443000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"126.5E+3\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E+5\"}}" + }, + { + "description": "[basx213] Numbers with E", + "canonical_bson": "18000000136400F104000000000000000000000000363000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"126.5E-4\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.01265\"}}" + }, + { + "description": "[basx221] Numbers with E", + "canonical_bson": "18000000136400F104000000000000000000000000463000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"126.5E+4\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E+6\"}}" + }, + { + "description": "[basx212] Numbers with E", + "canonical_bson": "18000000136400F1040000000000000000000000002E3000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"126.5E-8\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.000001265\"}}" + }, + { + "description": "[basx222] Numbers with E", + "canonical_bson": "18000000136400F1040000000000000000000000004E3000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"126.5E+8\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E+10\"}}" + }, + { + "description": "[basx006] conform to rules and exponent will be in permitted range).", + "canonical_bson": "18000000136400E803000000000000000000000000403000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1000\"}}" + }, + { + "description": "[basx230] Numbers with E", + "canonical_bson": "18000000136400F104000000000000000000000000403000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1265\"}}" + }, + { + "description": "[basx237] Numbers with E", + "canonical_bson": "18000000136400F104000000000000000000000000403000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"1265E-0\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1265\"}}" + }, + { + "description": "[basx236] Numbers with E", + "canonical_bson": "18000000136400F1040000000000000000000000003E3000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"1265E-1\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"126.5\"}}" + }, + { + "description": "[basx238] Numbers with E", + "canonical_bson": "18000000136400F104000000000000000000000000423000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"1265E+1\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E+4\"}}" + }, + { + "description": "[basx231] Numbers with E", + "canonical_bson": "18000000136400F104000000000000000000000000183000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"1265E-20\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E-17\"}}" + }, + { + "description": "[basx243] Numbers with E", + "canonical_bson": "18000000136400F104000000000000000000000000683000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"1265E+20\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E+23\"}}" + }, + { + "description": "[basx235] Numbers with E", + "canonical_bson": "18000000136400F1040000000000000000000000003C3000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"1265E-2\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"12.65\"}}" + }, + { + "description": "[basx239] Numbers with E", + "canonical_bson": "18000000136400F104000000000000000000000000443000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"1265E+2\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E+5\"}}" + }, + { + "description": "[basx234] Numbers with E", + "canonical_bson": "18000000136400F1040000000000000000000000003A3000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"1265E-3\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265\"}}" + }, + { + "description": "[basx240] Numbers with E", + "canonical_bson": "18000000136400F104000000000000000000000000463000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"1265E+3\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E+6\"}}" + }, + { + "description": "[basx233] Numbers with E", + "canonical_bson": "18000000136400F104000000000000000000000000383000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"1265E-4\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.1265\"}}" + }, + { + "description": "[basx241] Numbers with E", + "canonical_bson": "18000000136400F104000000000000000000000000483000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"1265E+4\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E+7\"}}" + }, + { + "description": "[basx232] Numbers with E", + "canonical_bson": "18000000136400F104000000000000000000000000303000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"1265E-8\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00001265\"}}" + }, + { + "description": "[basx242] Numbers with E", + "canonical_bson": "18000000136400F104000000000000000000000000503000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"1265E+8\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.265E+11\"}}" + }, + { + "description": "[basx060] strings without E cannot generate E in result", + "canonical_bson": "18000000136400185C0ACE00000000000000000000383000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"345678.5432\"}}" + }, + { + "description": "[basx059] strings without E cannot generate E in result", + "canonical_bson": "18000000136400F198670C08000000000000000000363000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"0345678.54321\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"345678.54321\"}}" + }, + { + "description": "[basx058] strings without E cannot generate E in result", + "canonical_bson": "180000001364006AF90B7C50000000000000000000343000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"345678.543210\"}}" + }, + { + "description": "[basx057] strings without E cannot generate E in result", + "canonical_bson": "180000001364006A19562522020000000000000000343000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"2345678.543210\"}}" + }, + { + "description": "[basx056] strings without E cannot generate E in result", + "canonical_bson": "180000001364006AB9C8733A0B0000000000000000343000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"12345678.543210\"}}" + }, + { + "description": "[basx031] conform to rules and exponent will be in permitted range).", + "canonical_bson": "1800000013640040AF0D8648700000000000000000343000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"123456789.000000\"}}" + }, + { + "description": "[basx030] conform to rules and exponent will be in permitted range).", + "canonical_bson": "1800000013640080910F8648700000000000000000343000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"123456789.123456\"}}" + }, + { + "description": "[basx032] conform to rules and exponent will be in permitted range).", + "canonical_bson": "1800000013640080910F8648700000000000000000403000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"123456789123456\"}}" + } + ] +} diff --git a/bson/src/test/resources/bson/decimal128-4.json b/bson/src/test/resources/bson/decimal128-4.json new file mode 100644 index 00000000000..0957019351f --- /dev/null +++ b/bson/src/test/resources/bson/decimal128-4.json @@ -0,0 +1,165 @@ +{ + "description": "Decimal128", + "bson_type": "0x13", + "test_key": "d", + "valid": [ + { + "description": "[basx023] conform to rules and exponent will be in permitted range).", + "canonical_bson": "1800000013640001000000000000000000000000003EB000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.1\"}}" + }, + + { + "description": "[basx045] strings without E cannot generate E in result", + "canonical_bson": "1800000013640003000000000000000000000000003A3000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"+0.003\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.003\"}}" + }, + { + "description": "[basx610] Zeros", + "canonical_bson": "1800000013640000000000000000000000000000003E3000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \".0\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0\"}}" + }, + { + "description": "[basx612] Zeros", + "canonical_bson": "1800000013640000000000000000000000000000003EB000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"-.0\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.0\"}}" + }, + { + "description": "[basx043] strings without E cannot generate E in result", + "canonical_bson": "18000000136400FC040000000000000000000000003C3000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"+12.76\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"12.76\"}}" + }, + { + "description": "[basx055] strings without E cannot generate E in result", + "canonical_bson": "180000001364000500000000000000000000000000303000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00000005\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"5E-8\"}}" + }, + { + "description": "[basx054] strings without E cannot generate E in result", + "canonical_bson": "180000001364000500000000000000000000000000323000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0000005\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"5E-7\"}}" + }, + { + "description": "[basx052] strings without E cannot generate E in result", + "canonical_bson": "180000001364000500000000000000000000000000343000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.000005\"}}" + }, + { + "description": "[basx051] strings without E cannot generate E in result", + "canonical_bson": "180000001364000500000000000000000000000000363000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"00.00005\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00005\"}}" + }, + { + "description": "[basx050] strings without E cannot generate E in result", + "canonical_bson": "180000001364000500000000000000000000000000383000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.0005\"}}" + }, + { + "description": "[basx047] strings without E cannot generate E in result", + "canonical_bson": "1800000013640005000000000000000000000000003E3000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \".5\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.5\"}}" + }, + { + "description": "[dqbsr431] check rounding modes heeded (Rounded)", + "canonical_bson": "1800000013640099761CC7B548F377DC80A131C836FE2F00", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.1111111111111111111111111111123450\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.111111111111111111111111111112345\"}}" + }, + { + "description": "OK2", + "canonical_bson": "18000000136400000000000A5BC138938D44C64D31FC2F00", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \".100000000000000000000000000000000000000000000000000000000000\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.1000000000000000000000000000000000\"}}" + } + ], + "parseErrors": [ + { + "description": "[basx564] Near-specials (Conversion_syntax)", + "string": "Infi" + }, + { + "description": "[basx565] Near-specials (Conversion_syntax)", + "string": "Infin" + }, + { + "description": "[basx566] Near-specials (Conversion_syntax)", + "string": "Infini" + }, + { + "description": "[basx567] Near-specials (Conversion_syntax)", + "string": "Infinit" + }, + { + "description": "[basx568] Near-specials (Conversion_syntax)", + "string": "-Infinit" + }, + { + "description": "[basx590] some baddies with dots and Es and dots and specials (Conversion_syntax)", + "string": ".Infinity" + }, + { + "description": "[basx562] Near-specials (Conversion_syntax)", + "string": "NaNq" + }, + { + "description": "[basx563] Near-specials (Conversion_syntax)", + "string": "NaNs" + }, + { + "description": "[dqbas939] overflow results at different rounding modes (Overflow & Inexact & Rounded)", + "string": "-7e10000" + }, + { + "description": "[dqbsr534] negatives (Rounded & Inexact)", + "string": "-1.11111111111111111111111111111234650" + }, + { + "description": "[dqbsr535] negatives (Rounded & Inexact)", + "string": "-1.11111111111111111111111111111234551" + }, + { + "description": "[dqbsr533] negatives (Rounded & Inexact)", + "string": "-1.11111111111111111111111111111234550" + }, + { + "description": "[dqbsr532] negatives (Rounded & Inexact)", + "string": "-1.11111111111111111111111111111234549" + }, + { + "description": "[dqbsr432] check rounding modes heeded (Rounded & Inexact)", + "string": "1.11111111111111111111111111111234549" + }, + { + "description": "[dqbsr433] check rounding modes heeded (Rounded & Inexact)", + "string": "1.11111111111111111111111111111234550" + }, + { + "description": "[dqbsr435] check rounding modes heeded (Rounded & Inexact)", + "string": "1.11111111111111111111111111111234551" + }, + { + "description": "[dqbsr434] check rounding modes heeded (Rounded & Inexact)", + "string": "1.11111111111111111111111111111234650" + }, + { + "description": "[dqbas938] overflow results at different rounding modes (Overflow & Inexact & Rounded)", + "string": "7e10000" + }, + { + "description": "Inexact rounding#1", + "string": "100000000000000000000000000000000000000000000000000000000001" + }, + { + "description": "Inexact rounding#2", + "string": "1E-6177" + } + ] +} diff --git a/bson/src/test/resources/bson/decimal128-5.json b/bson/src/test/resources/bson/decimal128-5.json new file mode 100644 index 00000000000..e976eae4075 --- /dev/null +++ b/bson/src/test/resources/bson/decimal128-5.json @@ -0,0 +1,402 @@ +{ + "description": "Decimal128", + "bson_type": "0x13", + "test_key": "d", + "valid": [ + { + "description": "[decq035] fold-downs (more below) (Clamped)", + "canonical_bson": "18000000136400000000807F1BCF85B27059C8A43CFE5F00", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.23E+6144\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.230000000000000000000000000000000E+6144\"}}" + }, + { + "description": "[decq037] fold-downs (more below) (Clamped)", + "canonical_bson": "18000000136400000000000A5BC138938D44C64D31FE5F00", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6144\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.000000000000000000000000000000000E+6144\"}}" + }, + { + "description": "[decq077] Nmin and below (Subnormal)", + "canonical_bson": "180000001364000000000081EFAC855B416D2DEE04000000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.100000000000000000000000000000000E-6143\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.00000000000000000000000000000000E-6144\"}}" + }, + { + "description": "[decq078] Nmin and below (Subnormal)", + "canonical_bson": "180000001364000000000081EFAC855B416D2DEE04000000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.00000000000000000000000000000000E-6144\"}}" + }, + { + "description": "[decq079] Nmin and below (Subnormal)", + "canonical_bson": "180000001364000A00000000000000000000000000000000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.000000000000000000000000000000010E-6143\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0E-6175\"}}" + }, + { + "description": "[decq080] Nmin and below (Subnormal)", + "canonical_bson": "180000001364000A00000000000000000000000000000000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0E-6175\"}}" + }, + { + "description": "[decq081] Nmin and below (Subnormal)", + "canonical_bson": "180000001364000100000000000000000000000000020000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.00000000000000000000000000000001E-6143\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1E-6175\"}}" + }, + { + "description": "[decq082] Nmin and below (Subnormal)", + "canonical_bson": "180000001364000100000000000000000000000000020000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1E-6175\"}}" + }, + { + "description": "[decq083] Nmin and below (Subnormal)", + "canonical_bson": "180000001364000100000000000000000000000000000000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"0.000000000000000000000000000000001E-6143\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1E-6176\"}}" + }, + { + "description": "[decq084] Nmin and below (Subnormal)", + "canonical_bson": "180000001364000100000000000000000000000000000000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1E-6176\"}}" + }, + { + "description": "[decq090] underflows cannot be tested for simple copies, check edge cases (Subnormal)", + "canonical_bson": "180000001364000100000000000000000000000000000000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"1e-6176\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1E-6176\"}}" + }, + { + "description": "[decq100] underflows cannot be tested for simple copies, check edge cases (Subnormal)", + "canonical_bson": "18000000136400FFFFFFFF095BC138938D44C64D31000000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"999999999999999999999999999999999e-6176\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"9.99999999999999999999999999999999E-6144\"}}" + }, + { + "description": "[decq130] fold-downs (more below) (Clamped)", + "canonical_bson": "18000000136400000000807F1BCF85B27059C8A43CFEDF00", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"-1.23E+6144\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-1.230000000000000000000000000000000E+6144\"}}" + }, + { + "description": "[decq132] fold-downs (more below) (Clamped)", + "canonical_bson": "18000000136400000000000A5BC138938D44C64D31FEDF00", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"-1E+6144\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-1.000000000000000000000000000000000E+6144\"}}" + }, + { + "description": "[decq177] Nmin and below (Subnormal)", + "canonical_bson": "180000001364000000000081EFAC855B416D2DEE04008000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.100000000000000000000000000000000E-6143\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-1.00000000000000000000000000000000E-6144\"}}" + }, + { + "description": "[decq178] Nmin and below (Subnormal)", + "canonical_bson": "180000001364000000000081EFAC855B416D2DEE04008000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-1.00000000000000000000000000000000E-6144\"}}" + }, + { + "description": "[decq179] Nmin and below (Subnormal)", + "canonical_bson": "180000001364000A00000000000000000000000000008000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.000000000000000000000000000000010E-6143\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-1.0E-6175\"}}" + }, + { + "description": "[decq180] Nmin and below (Subnormal)", + "canonical_bson": "180000001364000A00000000000000000000000000008000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-1.0E-6175\"}}" + }, + { + "description": "[decq181] Nmin and below (Subnormal)", + "canonical_bson": "180000001364000100000000000000000000000000028000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.00000000000000000000000000000001E-6143\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-1E-6175\"}}" + }, + { + "description": "[decq182] Nmin and below (Subnormal)", + "canonical_bson": "180000001364000100000000000000000000000000028000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-1E-6175\"}}" + }, + { + "description": "[decq183] Nmin and below (Subnormal)", + "canonical_bson": "180000001364000100000000000000000000000000008000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0.000000000000000000000000000000001E-6143\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-1E-6176\"}}" + }, + { + "description": "[decq184] Nmin and below (Subnormal)", + "canonical_bson": "180000001364000100000000000000000000000000008000", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-1E-6176\"}}" + }, + { + "description": "[decq190] underflow edge cases (Subnormal)", + "canonical_bson": "180000001364000100000000000000000000000000008000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"-1e-6176\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-1E-6176\"}}" + }, + { + "description": "[decq200] underflow edge cases (Subnormal)", + "canonical_bson": "18000000136400FFFFFFFF095BC138938D44C64D31008000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"-999999999999999999999999999999999e-6176\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-9.99999999999999999999999999999999E-6144\"}}" + }, + { + "description": "[decq400] zeros (Clamped)", + "canonical_bson": "180000001364000000000000000000000000000000000000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E-8000\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E-6176\"}}" + }, + { + "description": "[decq401] zeros (Clamped)", + "canonical_bson": "180000001364000000000000000000000000000000000000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E-6177\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E-6176\"}}" + }, + { + "description": "[decq414] clamped zeros... (Clamped)", + "canonical_bson": "180000001364000000000000000000000000000000FE5F00", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+6112\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+6111\"}}" + }, + { + "description": "[decq416] clamped zeros... (Clamped)", + "canonical_bson": "180000001364000000000000000000000000000000FE5F00", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+6144\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+6111\"}}" + }, + { + "description": "[decq418] clamped zeros... (Clamped)", + "canonical_bson": "180000001364000000000000000000000000000000FE5F00", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+8000\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"0E+6111\"}}" + }, + { + "description": "[decq420] negative zeros (Clamped)", + "canonical_bson": "180000001364000000000000000000000000000000008000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0E-8000\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0E-6176\"}}" + }, + { + "description": "[decq421] negative zeros (Clamped)", + "canonical_bson": "180000001364000000000000000000000000000000008000", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0E-6177\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0E-6176\"}}" + }, + { + "description": "[decq434] clamped zeros... (Clamped)", + "canonical_bson": "180000001364000000000000000000000000000000FEDF00", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0E+6112\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0E+6111\"}}" + }, + { + "description": "[decq436] clamped zeros... (Clamped)", + "canonical_bson": "180000001364000000000000000000000000000000FEDF00", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0E+6144\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0E+6111\"}}" + }, + { + "description": "[decq438] clamped zeros... (Clamped)", + "canonical_bson": "180000001364000000000000000000000000000000FEDF00", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0E+8000\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"-0E+6111\"}}" + }, + { + "description": "[decq601] fold-down full sequence (Clamped)", + "canonical_bson": "18000000136400000000000A5BC138938D44C64D31FE5F00", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6144\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.000000000000000000000000000000000E+6144\"}}" + }, + { + "description": "[decq603] fold-down full sequence (Clamped)", + "canonical_bson": "180000001364000000000081EFAC855B416D2DEE04FE5F00", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6143\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.00000000000000000000000000000000E+6143\"}}" + }, + { + "description": "[decq605] fold-down full sequence (Clamped)", + "canonical_bson": "1800000013640000000080264B91C02220BE377E00FE5F00", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6142\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0000000000000000000000000000000E+6142\"}}" + }, + { + "description": "[decq607] fold-down full sequence (Clamped)", + "canonical_bson": "1800000013640000000040EAED7446D09C2C9F0C00FE5F00", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6141\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.000000000000000000000000000000E+6141\"}}" + }, + { + "description": "[decq609] fold-down full sequence (Clamped)", + "canonical_bson": "18000000136400000000A0CA17726DAE0F1E430100FE5F00", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6140\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.00000000000000000000000000000E+6140\"}}" + }, + { + "description": "[decq611] fold-down full sequence (Clamped)", + "canonical_bson": "18000000136400000000106102253E5ECE4F200000FE5F00", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6139\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0000000000000000000000000000E+6139\"}}" + }, + { + "description": "[decq613] fold-down full sequence (Clamped)", + "canonical_bson": "18000000136400000000E83C80D09F3C2E3B030000FE5F00", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6138\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.000000000000000000000000000E+6138\"}}" + }, + { + "description": "[decq615] fold-down full sequence (Clamped)", + "canonical_bson": "18000000136400000000E4D20CC8DCD2B752000000FE5F00", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6137\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.00000000000000000000000000E+6137\"}}" + }, + { + "description": "[decq617] fold-down full sequence (Clamped)", + "canonical_bson": "180000001364000000004A48011416954508000000FE5F00", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6136\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0000000000000000000000000E+6136\"}}" + }, + { + "description": "[decq619] fold-down full sequence (Clamped)", + "canonical_bson": "18000000136400000000A1EDCCCE1BC2D300000000FE5F00", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6135\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.000000000000000000000000E+6135\"}}" + }, + { + "description": "[decq621] fold-down full sequence (Clamped)", + "canonical_bson": "18000000136400000080F64AE1C7022D1500000000FE5F00", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6134\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.00000000000000000000000E+6134\"}}" + }, + { + "description": "[decq623] fold-down full sequence (Clamped)", + "canonical_bson": "18000000136400000040B2BAC9E0191E0200000000FE5F00", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6133\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0000000000000000000000E+6133\"}}" + }, + { + "description": "[decq625] fold-down full sequence (Clamped)", + "canonical_bson": "180000001364000000A0DEC5ADC935360000000000FE5F00", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6132\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.000000000000000000000E+6132\"}}" + }, + { + "description": "[decq627] fold-down full sequence (Clamped)", + "canonical_bson": "18000000136400000010632D5EC76B050000000000FE5F00", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6131\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.00000000000000000000E+6131\"}}" + }, + { + "description": "[decq629] fold-down full sequence (Clamped)", + "canonical_bson": "180000001364000000E8890423C78A000000000000FE5F00", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6130\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0000000000000000000E+6130\"}}" + }, + { + "description": "[decq631] fold-down full sequence (Clamped)", + "canonical_bson": "18000000136400000064A7B3B6E00D000000000000FE5F00", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6129\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.000000000000000000E+6129\"}}" + }, + { + "description": "[decq633] fold-down full sequence (Clamped)", + "canonical_bson": "1800000013640000008A5D78456301000000000000FE5F00", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6128\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.00000000000000000E+6128\"}}" + }, + { + "description": "[decq635] fold-down full sequence (Clamped)", + "canonical_bson": "180000001364000000C16FF2862300000000000000FE5F00", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6127\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0000000000000000E+6127\"}}" + }, + { + "description": "[decq637] fold-down full sequence (Clamped)", + "canonical_bson": "180000001364000080C6A47E8D0300000000000000FE5F00", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6126\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.000000000000000E+6126\"}}" + }, + { + "description": "[decq639] fold-down full sequence (Clamped)", + "canonical_bson": "1800000013640000407A10F35A0000000000000000FE5F00", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6125\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.00000000000000E+6125\"}}" + }, + { + "description": "[decq641] fold-down full sequence (Clamped)", + "canonical_bson": "1800000013640000A0724E18090000000000000000FE5F00", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6124\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0000000000000E+6124\"}}" + }, + { + "description": "[decq643] fold-down full sequence (Clamped)", + "canonical_bson": "180000001364000010A5D4E8000000000000000000FE5F00", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6123\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.000000000000E+6123\"}}" + }, + { + "description": "[decq645] fold-down full sequence (Clamped)", + "canonical_bson": "1800000013640000E8764817000000000000000000FE5F00", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6122\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.00000000000E+6122\"}}" + }, + { + "description": "[decq647] fold-down full sequence (Clamped)", + "canonical_bson": "1800000013640000E40B5402000000000000000000FE5F00", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6121\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0000000000E+6121\"}}" + }, + { + "description": "[decq649] fold-down full sequence (Clamped)", + "canonical_bson": "1800000013640000CA9A3B00000000000000000000FE5F00", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6120\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.000000000E+6120\"}}" + }, + { + "description": "[decq651] fold-down full sequence (Clamped)", + "canonical_bson": "1800000013640000E1F50500000000000000000000FE5F00", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6119\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.00000000E+6119\"}}" + }, + { + "description": "[decq653] fold-down full sequence (Clamped)", + "canonical_bson": "180000001364008096980000000000000000000000FE5F00", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6118\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0000000E+6118\"}}" + }, + { + "description": "[decq655] fold-down full sequence (Clamped)", + "canonical_bson": "1800000013640040420F0000000000000000000000FE5F00", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6117\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.000000E+6117\"}}" + }, + { + "description": "[decq657] fold-down full sequence (Clamped)", + "canonical_bson": "18000000136400A086010000000000000000000000FE5F00", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6116\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.00000E+6116\"}}" + }, + { + "description": "[decq659] fold-down full sequence (Clamped)", + "canonical_bson": "180000001364001027000000000000000000000000FE5F00", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6115\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0000E+6115\"}}" + }, + { + "description": "[decq661] fold-down full sequence (Clamped)", + "canonical_bson": "18000000136400E803000000000000000000000000FE5F00", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6114\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.000E+6114\"}}" + }, + { + "description": "[decq663] fold-down full sequence (Clamped)", + "canonical_bson": "180000001364006400000000000000000000000000FE5F00", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6113\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.00E+6113\"}}" + }, + { + "description": "[decq665] fold-down full sequence (Clamped)", + "canonical_bson": "180000001364000A00000000000000000000000000FE5F00", + "degenerate_extjson": "{\"d\" : {\"$numberDecimal\" : \"1E+6112\"}}", + "canonical_extjson": "{\"d\" : {\"$numberDecimal\" : \"1.0E+6112\"}}" + } + ] +} + diff --git a/bson/src/test/resources/bson/decimal128-6.json b/bson/src/test/resources/bson/decimal128-6.json new file mode 100644 index 00000000000..eba6764e853 --- /dev/null +++ b/bson/src/test/resources/bson/decimal128-6.json @@ -0,0 +1,131 @@ +{ + "description": "Decimal128", + "bson_type": "0x13", + "test_key": "d", + "parseErrors": [ + { + "description": "Incomplete Exponent", + "string": "1e" + }, + { + "description": "Exponent at the beginning", + "string": "E01" + }, + { + "description": "Just a decimal place", + "string": "." + }, + { + "description": "2 decimal places", + "string": "..3" + }, + { + "description": "2 decimal places", + "string": ".13.3" + }, + { + "description": "2 decimal places", + "string": "1..3" + }, + { + "description": "2 decimal places", + "string": "1.3.4" + }, + { + "description": "2 decimal places", + "string": "1.34." + }, + { + "description": "Decimal with no digits", + "string": ".e" + }, + { + "description": "2 signs", + "string": "+-32.4" + }, + { + "description": "2 signs", + "string": "-+32.4" + }, + { + "description": "2 negative signs", + "string": "--32.4" + }, + { + "description": "2 negative signs", + "string": "-32.-4" + }, + { + "description": "End in negative sign", + "string": "32.0-" + }, + { + "description": "2 negative signs", + "string": "32.4E--21" + }, + { + "description": "2 negative signs", + "string": "32.4E-2-1" + }, + { + "description": "2 signs", + "string": "32.4E+-21" + }, + { + "description": "Empty string", + "string": "" + }, + { + "description": "leading white space positive number", + "string": " 1" + }, + { + "description": "leading white space negative number", + "string": " -1" + }, + { + "description": "trailing white space", + "string": "1 " + }, + { + "description": "Invalid", + "string": "E" + }, + { + "description": "Invalid", + "string": "invalid" + }, + { + "description": "Invalid", + "string": "i" + }, + { + "description": "Invalid", + "string": "in" + }, + { + "description": "Invalid", + "string": "-in" + }, + { + "description": "Invalid", + "string": "Na" + }, + { + "description": "Invalid", + "string": "-Na" + }, + { + "description": "Invalid", + "string": "1.23abc" + }, + { + "description": "Invalid", + "string": "1.23abcE+02" + }, + { + "description": "Invalid", + "string": "1.23E+0aabs2" + } + ] +} diff --git a/bson/src/test/resources/bson/decimal128-7.json b/bson/src/test/resources/bson/decimal128-7.json new file mode 100644 index 00000000000..0b78f1237b8 --- /dev/null +++ b/bson/src/test/resources/bson/decimal128-7.json @@ -0,0 +1,327 @@ +{ + "description": "Decimal128", + "bson_type": "0x13", + "test_key": "d", + "parseErrors": [ + { + "description": "[basx572] Near-specials (Conversion_syntax)", + "string": "-9Inf" + }, + { + "description": "[basx516] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", + "string": "-1-" + }, + { + "description": "[basx533] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", + "string": "0000.." + }, + { + "description": "[basx534] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", + "string": ".0000." + }, + { + "description": "[basx535] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", + "string": "00..00" + }, + { + "description": "[basx569] Near-specials (Conversion_syntax)", + "string": "0Inf" + }, + { + "description": "[basx571] Near-specials (Conversion_syntax)", + "string": "-0Inf" + }, + { + "description": "[basx575] Near-specials (Conversion_syntax)", + "string": "0sNaN" + }, + { + "description": "[basx503] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", + "string": "++1" + }, + { + "description": "[basx504] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", + "string": "--1" + }, + { + "description": "[basx505] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", + "string": "-+1" + }, + { + "description": "[basx506] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", + "string": "+-1" + }, + { + "description": "[basx510] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", + "string": " +1" + }, + { + "description": "[basx513] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", + "string": " + 1" + }, + { + "description": "[basx514] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", + "string": " - 1" + }, + { + "description": "[basx501] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", + "string": "." + }, + { + "description": "[basx502] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", + "string": ".." + }, + { + "description": "[basx519] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", + "string": "" + }, + { + "description": "[basx525] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", + "string": "e100" + }, + { + "description": "[basx549] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", + "string": "e+1" + }, + { + "description": "[basx577] some baddies with dots and Es and dots and specials (Conversion_syntax)", + "string": ".e+1" + }, + { + "description": "[basx578] some baddies with dots and Es and dots and specials (Conversion_syntax)", + "string": "+.e+1" + }, + { + "description": "[basx581] some baddies with dots and Es and dots and specials (Conversion_syntax)", + "string": "E+1" + }, + { + "description": "[basx582] some baddies with dots and Es and dots and specials (Conversion_syntax)", + "string": ".E+1" + }, + { + "description": "[basx583] some baddies with dots and Es and dots and specials (Conversion_syntax)", + "string": "+.E+1" + }, + { + "description": "[basx579] some baddies with dots and Es and dots and specials (Conversion_syntax)", + "string": "-.e+" + }, + { + "description": "[basx580] some baddies with dots and Es and dots and specials (Conversion_syntax)", + "string": "-.e" + }, + { + "description": "[basx584] some baddies with dots and Es and dots and specials (Conversion_syntax)", + "string": "-.E+" + }, + { + "description": "[basx585] some baddies with dots and Es and dots and specials (Conversion_syntax)", + "string": "-.E" + }, + { + "description": "[basx589] some baddies with dots and Es and dots and specials (Conversion_syntax)", + "string": "+.Inf" + }, + { + "description": "[basx586] some baddies with dots and Es and dots and specials (Conversion_syntax)", + "string": ".NaN" + }, + { + "description": "[basx587] some baddies with dots and Es and dots and specials (Conversion_syntax)", + "string": "-.NaN" + }, + { + "description": "[basx545] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", + "string": "ONE" + }, + { + "description": "[basx561] Near-specials (Conversion_syntax)", + "string": "qNaN" + }, + { + "description": "[basx573] Near-specials (Conversion_syntax)", + "string": "-sNa" + }, + { + "description": "[basx588] some baddies with dots and Es and dots and specials (Conversion_syntax)", + "string": "+.sNaN" + }, + { + "description": "[basx544] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", + "string": "ten" + }, + { + "description": "[basx527] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", + "string": "u0b65" + }, + { + "description": "[basx526] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", + "string": "u0e5a" + }, + { + "description": "[basx515] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", + "string": "x" + }, + { + "description": "[basx574] Near-specials (Conversion_syntax)", + "string": "xNaN" + }, + { + "description": "[basx530] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", + "string": ".123.5" + }, + { + "description": "[basx500] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", + "string": "1..2" + }, + { + "description": "[basx542] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", + "string": "1e1.0" + }, + { + "description": "[basx553] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", + "string": "1E+1.2.3" + }, + { + "description": "[basx543] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", + "string": "1e123e" + }, + { + "description": "[basx552] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", + "string": "1E+1.2" + }, + { + "description": "[basx546] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", + "string": "1e.1" + }, + { + "description": "[basx547] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", + "string": "1e1." + }, + { + "description": "[basx554] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", + "string": "1E++1" + }, + { + "description": "[basx555] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", + "string": "1E--1" + }, + { + "description": "[basx556] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", + "string": "1E+-1" + }, + { + "description": "[basx557] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", + "string": "1E-+1" + }, + { + "description": "[basx558] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", + "string": "1E'1" + }, + { + "description": "[basx559] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", + "string": "1E\"1" + }, + { + "description": "[basx520] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", + "string": "1e-" + }, + { + "description": "[basx560] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", + "string": "1E" + }, + { + "description": "[basx548] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", + "string": "1ee" + }, + { + "description": "[basx551] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", + "string": "1.2.1" + }, + { + "description": "[basx550] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", + "string": "1.23.4" + }, + { + "description": "[basx529] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", + "string": "1.34.5" + }, + { + "description": "[basx531] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", + "string": "01.35." + }, + { + "description": "[basx532] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", + "string": "01.35-" + }, + { + "description": "[basx518] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", + "string": "3+" + }, + { + "description": "[basx521] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", + "string": "7e99999a" + }, + { + "description": "[basx570] Near-specials (Conversion_syntax)", + "string": "9Inf" + }, + { + "description": "[basx512] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", + "string": "12 " + }, + { + "description": "[basx517] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", + "string": "12-" + }, + { + "description": "[basx507] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", + "string": "12e" + }, + { + "description": "[basx508] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", + "string": "12e++" + }, + { + "description": "[basx509] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", + "string": "12f4" + }, + { + "description": "[basx536] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", + "string": "111e*123" + }, + { + "description": "[basx537] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", + "string": "111e123-" + }, + { + "description": "[basx540] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", + "string": "111e1*23" + }, + { + "description": "[basx538] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", + "string": "111e+12+" + }, + { + "description": "[basx539] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", + "string": "111e1-3-" + }, + { + "description": "[basx541] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", + "string": "111E1e+3" + }, + { + "description": "[basx528] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", + "string": "123,65" + }, + { + "description": "[basx523] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", + "string": "7e12356789012x" + }, + { + "description": "[basx522] The 'baddies' tests from DiagBigDecimal, plus some new ones (Conversion_syntax)", + "string": "7e123567890x" + } + ] +} diff --git a/bson/src/test/resources/bson/document.json b/bson/src/test/resources/bson/document.json new file mode 100644 index 00000000000..698e7ae90af --- /dev/null +++ b/bson/src/test/resources/bson/document.json @@ -0,0 +1,60 @@ +{ + "description": "Document type (sub-documents)", + "bson_type": "0x03", + "test_key": "x", + "valid": [ + { + "description": "Empty subdoc", + "canonical_bson": "0D000000037800050000000000", + "canonical_extjson": "{\"x\" : {}}" + }, + { + "description": "Empty-string key subdoc", + "canonical_bson": "150000000378000D00000002000200000062000000", + "canonical_extjson": "{\"x\" : {\"\" : \"b\"}}" + }, + { + "description": "Single-character key subdoc", + "canonical_bson": "160000000378000E0000000261000200000062000000", + "canonical_extjson": "{\"x\" : {\"a\" : \"b\"}}" + }, + { + "description": "Dollar-prefixed key in sub-document", + "canonical_bson": "170000000378000F000000022461000200000062000000", + "canonical_extjson": "{\"x\" : {\"$a\" : \"b\"}}" + }, + { + "description": "Dollar as key in sub-document", + "canonical_bson": "160000000378000E0000000224000200000061000000", + "canonical_extjson": "{\"x\" : {\"$\" : \"a\"}}" + }, + { + "description": "Dotted key in sub-document", + "canonical_bson": "180000000378001000000002612E62000200000063000000", + "canonical_extjson": "{\"x\" : {\"a.b\" : \"c\"}}" + }, + { + "description": "Dot as key in sub-document", + "canonical_bson": "160000000378000E000000022E000200000061000000", + "canonical_extjson": "{\"x\" : {\".\" : \"a\"}}" + } + ], + "decodeErrors": [ + { + "description": "Subdocument length too long: eats outer terminator", + "bson": "1800000003666F6F000F0000001062617200FFFFFF7F0000" + }, + { + "description": "Subdocument length too short: leaks terminator", + "bson": "1500000003666F6F000A0000000862617200010000" + }, + { + "description": "Invalid subdocument: bad string length in field", + "bson": "1C00000003666F6F001200000002626172000500000062617A000000" + }, + { + "description": "Null byte in sub-document key", + "bson": "150000000378000D00000010610000010000000000" + } + ] +} diff --git a/bson/src/test/resources/bson/double.json b/bson/src/test/resources/bson/double.json new file mode 100644 index 00000000000..7a3bad158b3 --- /dev/null +++ b/bson/src/test/resources/bson/double.json @@ -0,0 +1,87 @@ +{ + "description": "Double type", + "bson_type": "0x01", + "test_key": "d", + "valid": [ + { + "description": "+1.0", + "canonical_bson": "10000000016400000000000000F03F00", + "canonical_extjson": "{\"d\" : {\"$numberDouble\": \"1.0\"}}", + "relaxed_extjson": "{\"d\" : 1.0}" + }, + { + "description": "-1.0", + "canonical_bson": "10000000016400000000000000F0BF00", + "canonical_extjson": "{\"d\" : {\"$numberDouble\": \"-1.0\"}}", + "relaxed_extjson": "{\"d\" : -1.0}" + }, + { + "description": "+1.0001220703125", + "canonical_bson": "10000000016400000000008000F03F00", + "canonical_extjson": "{\"d\" : {\"$numberDouble\": \"1.0001220703125\"}}", + "relaxed_extjson": "{\"d\" : 1.0001220703125}" + }, + { + "description": "-1.0001220703125", + "canonical_bson": "10000000016400000000008000F0BF00", + "canonical_extjson": "{\"d\" : {\"$numberDouble\": \"-1.0001220703125\"}}", + "relaxed_extjson": "{\"d\" : -1.0001220703125}" + }, + { + "description": "1.2345678921232E18", + "canonical_bson": "100000000164002a1bf5f41022b14300", + "canonical_extjson": "{\"d\" : {\"$numberDouble\": \"1.2345678921232E18\"}}", + "relaxed_extjson": "{\"d\" : 1.2345678921232E18}" + }, + { + "description": "-1.2345678921232E18", + "canonical_bson": "100000000164002a1bf5f41022b1c300", + "canonical_extjson": "{\"d\" : {\"$numberDouble\": \"-1.2345678921232E18\"}}", + "relaxed_extjson": "{\"d\" : -1.2345678921232E18}" + }, + { + "description": "0.0", + "canonical_bson": "10000000016400000000000000000000", + "canonical_extjson": "{\"d\" : {\"$numberDouble\": \"0.0\"}}", + "relaxed_extjson": "{\"d\" : 0.0}" + }, + { + "description": "-0.0", + "canonical_bson": "10000000016400000000000000008000", + "canonical_extjson": "{\"d\" : {\"$numberDouble\": \"-0.0\"}}", + "relaxed_extjson": "{\"d\" : -0.0}" + }, + { + "description": "NaN", + "canonical_bson": "10000000016400000000000000F87F00", + "canonical_extjson": "{\"d\": {\"$numberDouble\": \"NaN\"}}", + "relaxed_extjson": "{\"d\": {\"$numberDouble\": \"NaN\"}}", + "lossy": true + }, + { + "description": "NaN with payload", + "canonical_bson": "10000000016400120000000000F87F00", + "canonical_extjson": "{\"d\": {\"$numberDouble\": \"NaN\"}}", + "relaxed_extjson": "{\"d\": {\"$numberDouble\": \"NaN\"}}", + "lossy": true + }, + { + "description": "Inf", + "canonical_bson": "10000000016400000000000000F07F00", + "canonical_extjson": "{\"d\": {\"$numberDouble\": \"Infinity\"}}", + "relaxed_extjson": "{\"d\": {\"$numberDouble\": \"Infinity\"}}" + }, + { + "description": "-Inf", + "canonical_bson": "10000000016400000000000000F0FF00", + "canonical_extjson": "{\"d\": {\"$numberDouble\": \"-Infinity\"}}", + "relaxed_extjson": "{\"d\": {\"$numberDouble\": \"-Infinity\"}}" + } + ], + "decodeErrors": [ + { + "description": "double truncated", + "bson": "0B0000000164000000F03F00" + } + ] +} diff --git a/bson/src/test/resources/bson/int32.json b/bson/src/test/resources/bson/int32.json new file mode 100644 index 00000000000..1353fc3df8b --- /dev/null +++ b/bson/src/test/resources/bson/int32.json @@ -0,0 +1,43 @@ +{ + "description": "Int32 type", + "bson_type": "0x10", + "test_key": "i", + "valid": [ + { + "description": "MinValue", + "canonical_bson": "0C0000001069000000008000", + "canonical_extjson": "{\"i\" : {\"$numberInt\": \"-2147483648\"}}", + "relaxed_extjson": "{\"i\" : -2147483648}" + }, + { + "description": "MaxValue", + "canonical_bson": "0C000000106900FFFFFF7F00", + "canonical_extjson": "{\"i\" : {\"$numberInt\": \"2147483647\"}}", + "relaxed_extjson": "{\"i\" : 2147483647}" + }, + { + "description": "-1", + "canonical_bson": "0C000000106900FFFFFFFF00", + "canonical_extjson": "{\"i\" : {\"$numberInt\": \"-1\"}}", + "relaxed_extjson": "{\"i\" : -1}" + }, + { + "description": "0", + "canonical_bson": "0C0000001069000000000000", + "canonical_extjson": "{\"i\" : {\"$numberInt\": \"0\"}}", + "relaxed_extjson": "{\"i\" : 0}" + }, + { + "description": "1", + "canonical_bson": "0C0000001069000100000000", + "canonical_extjson": "{\"i\" : {\"$numberInt\": \"1\"}}", + "relaxed_extjson": "{\"i\" : 1}" + } + ], + "decodeErrors": [ + { + "description": "Bad int32 field length", + "bson": "090000001061000500" + } + ] +} diff --git a/bson/src/test/resources/bson/int64.json b/bson/src/test/resources/bson/int64.json new file mode 100644 index 00000000000..91f4abff950 --- /dev/null +++ b/bson/src/test/resources/bson/int64.json @@ -0,0 +1,43 @@ +{ + "description": "Int64 type", + "bson_type": "0x12", + "test_key": "a", + "valid": [ + { + "description": "MinValue", + "canonical_bson": "10000000126100000000000000008000", + "canonical_extjson": "{\"a\" : {\"$numberLong\" : \"-9223372036854775808\"}}", + "relaxed_extjson": "{\"a\" : -9223372036854775808}" + }, + { + "description": "MaxValue", + "canonical_bson": "10000000126100FFFFFFFFFFFFFF7F00", + "canonical_extjson": "{\"a\" : {\"$numberLong\" : \"9223372036854775807\"}}", + "relaxed_extjson": "{\"a\" : 9223372036854775807}" + }, + { + "description": "-1", + "canonical_bson": "10000000126100FFFFFFFFFFFFFFFF00", + "canonical_extjson": "{\"a\" : {\"$numberLong\" : \"-1\"}}", + "relaxed_extjson": "{\"a\" : -1}" + }, + { + "description": "0", + "canonical_bson": "10000000126100000000000000000000", + "canonical_extjson": "{\"a\" : {\"$numberLong\" : \"0\"}}", + "relaxed_extjson": "{\"a\" : 0}" + }, + { + "description": "1", + "canonical_bson": "10000000126100010000000000000000", + "canonical_extjson": "{\"a\" : {\"$numberLong\" : \"1\"}}", + "relaxed_extjson": "{\"a\" : 1}" + } + ], + "decodeErrors": [ + { + "description": "int64 field truncated", + "bson": "0C0000001261001234567800" + } + ] +} diff --git a/bson/src/test/resources/bson/maxkey.json b/bson/src/test/resources/bson/maxkey.json new file mode 100644 index 00000000000..67cad6db57b --- /dev/null +++ b/bson/src/test/resources/bson/maxkey.json @@ -0,0 +1,12 @@ +{ + "description": "Maxkey type", + "bson_type": "0x7F", + "test_key": "a", + "valid": [ + { + "description": "Maxkey", + "canonical_bson": "080000007F610000", + "canonical_extjson": "{\"a\" : {\"$maxKey\" : 1}}" + } + ] +} diff --git a/bson/src/test/resources/bson/minkey.json b/bson/src/test/resources/bson/minkey.json new file mode 100644 index 00000000000..8adee4509a5 --- /dev/null +++ b/bson/src/test/resources/bson/minkey.json @@ -0,0 +1,12 @@ +{ + "description": "Minkey type", + "bson_type": "0xFF", + "test_key": "a", + "valid": [ + { + "description": "Minkey", + "canonical_bson": "08000000FF610000", + "canonical_extjson": "{\"a\" : {\"$minKey\" : 1}}" + } + ] +} diff --git a/bson/src/test/resources/bson/multi-type-deprecated.json b/bson/src/test/resources/bson/multi-type-deprecated.json new file mode 100644 index 00000000000..665f388cd41 --- /dev/null +++ b/bson/src/test/resources/bson/multi-type-deprecated.json @@ -0,0 +1,15 @@ +{ + "description": "Multiple types within the same document", + "bson_type": "0x00", + "deprecated": true, + "valid": [ + { + "description": "All BSON types", + "canonical_bson": "38020000075F69640057E193D7A9CC81B4027498B50E53796D626F6C000700000073796D626F6C0002537472696E670007000000737472696E670010496E743332002A00000012496E743634002A0000000000000001446F75626C6500000000000000F0BF0542696E617279001000000003A34C38F7C3ABEDC8A37814A992AB8DB60542696E61727955736572446566696E656400050000008001020304050D436F6465000E00000066756E6374696F6E2829207B7D000F436F64655769746853636F7065001B0000000E00000066756E6374696F6E2829207B7D00050000000003537562646F63756D656E74001200000002666F6F0004000000626172000004417272617900280000001030000100000010310002000000103200030000001033000400000010340005000000001154696D657374616D7000010000002A0000000B5265676578007061747465726E0000094461746574696D6545706F6368000000000000000000094461746574696D65506F73697469766500FFFFFF7F00000000094461746574696D654E656761746976650000000080FFFFFFFF085472756500010846616C736500000C4442506F696E746572000B000000636F6C6C656374696F6E0057E193D7A9CC81B4027498B1034442526566003D0000000224726566000B000000636F6C6C656374696F6E00072469640057FD71E96E32AB4225B723FB02246462000900000064617461626173650000FF4D696E6B6579007F4D61786B6579000A4E756C6C0006556E646566696E65640000", + "converted_bson": "48020000075f69640057e193d7a9cc81b4027498b50253796d626f6c000700000073796d626f6c0002537472696e670007000000737472696e670010496e743332002a00000012496e743634002a0000000000000001446f75626c6500000000000000f0bf0542696e617279001000000003a34c38f7c3abedc8a37814a992ab8db60542696e61727955736572446566696e656400050000008001020304050d436f6465000e00000066756e6374696f6e2829207b7d000f436f64655769746853636f7065001b0000000e00000066756e6374696f6e2829207b7d00050000000003537562646f63756d656e74001200000002666f6f0004000000626172000004417272617900280000001030000100000010310002000000103200030000001033000400000010340005000000001154696d657374616d7000010000002a0000000b5265676578007061747465726e0000094461746574696d6545706f6368000000000000000000094461746574696d65506f73697469766500ffffff7f00000000094461746574696d654e656761746976650000000080ffffffff085472756500010846616c73650000034442506f696e746572002b0000000224726566000b000000636f6c6c656374696f6e00072469640057e193d7a9cc81b4027498b100034442526566003d0000000224726566000b000000636f6c6c656374696f6e00072469640057fd71e96e32ab4225b723fb02246462000900000064617461626173650000ff4d696e6b6579007f4d61786b6579000a4e756c6c000a556e646566696e65640000", + "canonical_extjson": "{\"_id\": {\"$oid\": \"57e193d7a9cc81b4027498b5\"}, \"Symbol\": {\"$symbol\": \"symbol\"}, \"String\": \"string\", \"Int32\": {\"$numberInt\": \"42\"}, \"Int64\": {\"$numberLong\": \"42\"}, \"Double\": {\"$numberDouble\": \"-1.0\"}, \"Binary\": { \"$binary\" : {\"base64\": \"o0w498Or7cijeBSpkquNtg==\", \"subType\": \"03\"}}, \"BinaryUserDefined\": { \"$binary\" : {\"base64\": \"AQIDBAU=\", \"subType\": \"80\"}}, \"Code\": {\"$code\": \"function() {}\"}, \"CodeWithScope\": {\"$code\": \"function() {}\", \"$scope\": {}}, \"Subdocument\": {\"foo\": \"bar\"}, \"Array\": [{\"$numberInt\": \"1\"}, {\"$numberInt\": \"2\"}, {\"$numberInt\": \"3\"}, {\"$numberInt\": \"4\"}, {\"$numberInt\": \"5\"}], \"Timestamp\": {\"$timestamp\": {\"t\": 42, \"i\": 1}}, \"Regex\": {\"$regularExpression\": {\"pattern\": \"pattern\", \"options\": \"\"}}, \"DatetimeEpoch\": {\"$date\": {\"$numberLong\": \"0\"}}, \"DatetimePositive\": {\"$date\": {\"$numberLong\": \"2147483647\"}}, \"DatetimeNegative\": {\"$date\": {\"$numberLong\": \"-2147483648\"}}, \"True\": true, \"False\": false, \"DBPointer\": {\"$dbPointer\": {\"$ref\": \"collection\", \"$id\": {\"$oid\": \"57e193d7a9cc81b4027498b1\"}}}, \"DBRef\": {\"$ref\": \"collection\", \"$id\": {\"$oid\": \"57fd71e96e32ab4225b723fb\"}, \"$db\": \"database\"}, \"Minkey\": {\"$minKey\": 1}, \"Maxkey\": {\"$maxKey\": 1}, \"Null\": null, \"Undefined\": {\"$undefined\": true}}", + "converted_extjson": "{\"_id\": {\"$oid\": \"57e193d7a9cc81b4027498b5\"}, \"Symbol\": \"symbol\", \"String\": \"string\", \"Int32\": {\"$numberInt\": \"42\"}, \"Int64\": {\"$numberLong\": \"42\"}, \"Double\": {\"$numberDouble\": \"-1.0\"}, \"Binary\": { \"$binary\" : {\"base64\": \"o0w498Or7cijeBSpkquNtg==\", \"subType\": \"03\"}}, \"BinaryUserDefined\": { \"$binary\" : {\"base64\": \"AQIDBAU=\", \"subType\": \"80\"}}, \"Code\": {\"$code\": \"function() {}\"}, \"CodeWithScope\": {\"$code\": \"function() {}\", \"$scope\": {}}, \"Subdocument\": {\"foo\": \"bar\"}, \"Array\": [{\"$numberInt\": \"1\"}, {\"$numberInt\": \"2\"}, {\"$numberInt\": \"3\"}, {\"$numberInt\": \"4\"}, {\"$numberInt\": \"5\"}], \"Timestamp\": {\"$timestamp\": {\"t\": 42, \"i\": 1}}, \"Regex\": {\"$regularExpression\": {\"pattern\": \"pattern\", \"options\": \"\"}}, \"DatetimeEpoch\": {\"$date\": {\"$numberLong\": \"0\"}}, \"DatetimePositive\": {\"$date\": {\"$numberLong\": \"2147483647\"}}, \"DatetimeNegative\": {\"$date\": {\"$numberLong\": \"-2147483648\"}}, \"True\": true, \"False\": false, \"DBPointer\": {\"$ref\": \"collection\", \"$id\": {\"$oid\": \"57e193d7a9cc81b4027498b1\"}}, \"DBRef\": {\"$ref\": \"collection\", \"$id\": {\"$oid\": \"57fd71e96e32ab4225b723fb\"}, \"$db\": \"database\"}, \"Minkey\": {\"$minKey\": 1}, \"Maxkey\": {\"$maxKey\": 1}, \"Null\": null, \"Undefined\": null}" + } + ] +} + diff --git a/bson/src/test/resources/bson/multi-type.json b/bson/src/test/resources/bson/multi-type.json new file mode 100644 index 00000000000..1e1d557c9ba --- /dev/null +++ b/bson/src/test/resources/bson/multi-type.json @@ -0,0 +1,11 @@ +{ + "description": "Multiple types within the same document", + "bson_type": "0x00", + "valid": [ + { + "description": "All BSON types", + "canonical_bson": "F4010000075F69640057E193D7A9CC81B4027498B502537472696E670007000000737472696E670010496E743332002A00000012496E743634002A0000000000000001446F75626C6500000000000000F0BF0542696E617279001000000003A34C38F7C3ABEDC8A37814A992AB8DB60542696E61727955736572446566696E656400050000008001020304050D436F6465000E00000066756E6374696F6E2829207B7D000F436F64655769746853636F7065001B0000000E00000066756E6374696F6E2829207B7D00050000000003537562646F63756D656E74001200000002666F6F0004000000626172000004417272617900280000001030000100000010310002000000103200030000001033000400000010340005000000001154696D657374616D7000010000002A0000000B5265676578007061747465726E0000094461746574696D6545706F6368000000000000000000094461746574696D65506F73697469766500FFFFFF7F00000000094461746574696D654E656761746976650000000080FFFFFFFF085472756500010846616C73650000034442526566003D0000000224726566000B000000636F6C6C656374696F6E00072469640057FD71E96E32AB4225B723FB02246462000900000064617461626173650000FF4D696E6B6579007F4D61786B6579000A4E756C6C0000", + "canonical_extjson": "{\"_id\": {\"$oid\": \"57e193d7a9cc81b4027498b5\"}, \"String\": \"string\", \"Int32\": {\"$numberInt\": \"42\"}, \"Int64\": {\"$numberLong\": \"42\"}, \"Double\": {\"$numberDouble\": \"-1.0\"}, \"Binary\": { \"$binary\" : {\"base64\": \"o0w498Or7cijeBSpkquNtg==\", \"subType\": \"03\"}}, \"BinaryUserDefined\": { \"$binary\" : {\"base64\": \"AQIDBAU=\", \"subType\": \"80\"}}, \"Code\": {\"$code\": \"function() {}\"}, \"CodeWithScope\": {\"$code\": \"function() {}\", \"$scope\": {}}, \"Subdocument\": {\"foo\": \"bar\"}, \"Array\": [{\"$numberInt\": \"1\"}, {\"$numberInt\": \"2\"}, {\"$numberInt\": \"3\"}, {\"$numberInt\": \"4\"}, {\"$numberInt\": \"5\"}], \"Timestamp\": {\"$timestamp\": {\"t\": 42, \"i\": 1}}, \"Regex\": {\"$regularExpression\": {\"pattern\": \"pattern\", \"options\": \"\"}}, \"DatetimeEpoch\": {\"$date\": {\"$numberLong\": \"0\"}}, \"DatetimePositive\": {\"$date\": {\"$numberLong\": \"2147483647\"}}, \"DatetimeNegative\": {\"$date\": {\"$numberLong\": \"-2147483648\"}}, \"True\": true, \"False\": false, \"DBRef\": {\"$ref\": \"collection\", \"$id\": {\"$oid\": \"57fd71e96e32ab4225b723fb\"}, \"$db\": \"database\"}, \"Minkey\": {\"$minKey\": 1}, \"Maxkey\": {\"$maxKey\": 1}, \"Null\": null}" + } + ] +} diff --git a/bson/src/test/resources/bson/null.json b/bson/src/test/resources/bson/null.json new file mode 100644 index 00000000000..f9b269473e6 --- /dev/null +++ b/bson/src/test/resources/bson/null.json @@ -0,0 +1,12 @@ +{ + "description": "Null type", + "bson_type": "0x0A", + "test_key": "a", + "valid": [ + { + "description": "Null", + "canonical_bson": "080000000A610000", + "canonical_extjson": "{\"a\" : null}" + } + ] +} diff --git a/bson/src/test/resources/bson/oid.json b/bson/src/test/resources/bson/oid.json new file mode 100644 index 00000000000..14e9caf4b40 --- /dev/null +++ b/bson/src/test/resources/bson/oid.json @@ -0,0 +1,28 @@ +{ + "description": "ObjectId", + "bson_type": "0x07", + "test_key": "a", + "valid": [ + { + "description": "All zeroes", + "canonical_bson": "1400000007610000000000000000000000000000", + "canonical_extjson": "{\"a\" : {\"$oid\" : \"000000000000000000000000\"}}" + }, + { + "description": "All ones", + "canonical_bson": "14000000076100FFFFFFFFFFFFFFFFFFFFFFFF00", + "canonical_extjson": "{\"a\" : {\"$oid\" : \"ffffffffffffffffffffffff\"}}" + }, + { + "description": "Random", + "canonical_bson": "1400000007610056E1FC72E0C917E9C471416100", + "canonical_extjson": "{\"a\" : {\"$oid\" : \"56e1fc72e0c917e9c4714161\"}}" + } + ], + "decodeErrors": [ + { + "description": "OID truncated", + "bson": "1200000007610056E1FC72E0C917E9C471" + } + ] +} diff --git a/bson/src/test/resources/bson/regex.json b/bson/src/test/resources/bson/regex.json new file mode 100644 index 00000000000..223802169df --- /dev/null +++ b/bson/src/test/resources/bson/regex.json @@ -0,0 +1,65 @@ +{ + "description": "Regular Expression type", + "bson_type": "0x0B", + "test_key": "a", + "valid": [ + { + "description": "empty regex with no options", + "canonical_bson": "0A0000000B6100000000", + "canonical_extjson": "{\"a\" : {\"$regularExpression\" : { \"pattern\": \"\", \"options\" : \"\"}}}" + }, + { + "description": "regex without options", + "canonical_bson": "0D0000000B6100616263000000", + "canonical_extjson": "{\"a\" : {\"$regularExpression\" : { \"pattern\": \"abc\", \"options\" : \"\"}}}" + }, + { + "description": "regex with options", + "canonical_bson": "0F0000000B610061626300696D0000", + "canonical_extjson": "{\"a\" : {\"$regularExpression\" : { \"pattern\": \"abc\", \"options\" : \"im\"}}}" + }, + { + "description": "regex with options (keys reversed)", + "canonical_bson": "0F0000000B610061626300696D0000", + "canonical_extjson": "{\"a\" : {\"$regularExpression\" : { \"pattern\": \"abc\", \"options\" : \"im\"}}}", + "degenerate_extjson": "{\"a\" : {\"$regularExpression\" : {\"options\" : \"im\", \"pattern\": \"abc\"}}}" + }, + { + "description": "regex with slash", + "canonical_bson": "110000000B610061622F636400696D0000", + "canonical_extjson": "{\"a\" : {\"$regularExpression\" : { \"pattern\": \"ab/cd\", \"options\" : \"im\"}}}" + }, + { + "description": "flags not alphabetized", + "degenerate_bson": "100000000B6100616263006D69780000", + "canonical_bson": "100000000B610061626300696D780000", + "canonical_extjson": "{\"a\" : {\"$regularExpression\" : { \"pattern\": \"abc\", \"options\" : \"imx\"}}}", + "degenerate_extjson": "{\"a\" : {\"$regularExpression\" : { \"pattern\": \"abc\", \"options\" : \"mix\"}}}" + }, + { + "description" : "Required escapes", + "canonical_bson" : "100000000B610061625C226162000000", + "canonical_extjson": "{\"a\" : {\"$regularExpression\" : { \"pattern\": \"ab\\\\\\\"ab\", \"options\" : \"\"}}}" + }, + { + "description" : "Regular expression as value of $regex query operator", + "canonical_bson" : "180000000B247265676578007061747465726E0069780000", + "canonical_extjson": "{\"$regex\" : {\"$regularExpression\" : { \"pattern\": \"pattern\", \"options\" : \"ix\"}}}" + }, + { + "description" : "Regular expression as value of $regex query operator with $options", + "canonical_bson" : "270000000B247265676578007061747465726E000002246F7074696F6E73000300000069780000", + "canonical_extjson": "{\"$regex\" : {\"$regularExpression\" : { \"pattern\": \"pattern\", \"options\" : \"\"}}, \"$options\" : \"ix\"}" + } + ], + "decodeErrors": [ + { + "description": "Null byte in pattern string", + "bson": "0F0000000B610061006300696D0000" + }, + { + "description": "Null byte in flags string", + "bson": "100000000B61006162630069006D0000" + } + ] +} diff --git a/bson/src/test/resources/bson/string.json b/bson/src/test/resources/bson/string.json new file mode 100644 index 00000000000..148334d0919 --- /dev/null +++ b/bson/src/test/resources/bson/string.json @@ -0,0 +1,72 @@ +{ + "description": "String", + "bson_type": "0x02", + "test_key": "a", + "valid": [ + { + "description": "Empty string", + "canonical_bson": "0D000000026100010000000000", + "canonical_extjson": "{\"a\" : \"\"}" + }, + { + "description": "Single character", + "canonical_bson": "0E00000002610002000000620000", + "canonical_extjson": "{\"a\" : \"b\"}" + }, + { + "description": "Multi-character", + "canonical_bson": "190000000261000D0000006162616261626162616261620000", + "canonical_extjson": "{\"a\" : \"abababababab\"}" + }, + { + "description": "two-byte UTF-8 (\u00e9)", + "canonical_bson": "190000000261000D000000C3A9C3A9C3A9C3A9C3A9C3A90000", + "canonical_extjson": "{\"a\" : \"\\u00e9\\u00e9\\u00e9\\u00e9\\u00e9\\u00e9\"}" + }, + { + "description": "three-byte UTF-8 (\u2606)", + "canonical_bson": "190000000261000D000000E29886E29886E29886E298860000", + "canonical_extjson": "{\"a\" : \"\\u2606\\u2606\\u2606\\u2606\"}" + }, + { + "description": "Embedded nulls", + "canonical_bson": "190000000261000D0000006162006261620062616261620000", + "canonical_extjson": "{\"a\" : \"ab\\u0000bab\\u0000babab\"}" + }, + { + "description": "Required escapes", + "canonical_bson" : "320000000261002600000061625C220102030405060708090A0B0C0D0E0F101112131415161718191A1B1C1D1E1F61620000", + "canonical_extjson" : "{\"a\":\"ab\\\\\\\"\\u0001\\u0002\\u0003\\u0004\\u0005\\u0006\\u0007\\b\\t\\n\\u000b\\f\\r\\u000e\\u000f\\u0010\\u0011\\u0012\\u0013\\u0014\\u0015\\u0016\\u0017\\u0018\\u0019\\u001a\\u001b\\u001c\\u001d\\u001e\\u001fab\"}" + } + ], + "decodeErrors": [ + { + "description": "bad string length: 0 (but no 0x00 either)", + "bson": "0C0000000261000000000000" + }, + { + "description": "bad string length: -1", + "bson": "0C000000026100FFFFFFFF00" + }, + { + "description": "bad string length: eats terminator", + "bson": "10000000026100050000006200620000" + }, + { + "description": "bad string length: longer than rest of document", + "bson": "120000000200FFFFFF00666F6F6261720000" + }, + { + "description": "string is not null-terminated", + "bson": "1000000002610004000000616263FF00" + }, + { + "description": "empty string, but extra null", + "bson": "0E00000002610001000000000000" + }, + { + "description": "invalid UTF-8", + "bson": "0E00000002610002000000E90000" + } + ] +} diff --git a/bson/src/test/resources/bson/symbol.json b/bson/src/test/resources/bson/symbol.json new file mode 100644 index 00000000000..3dd3577ebd1 --- /dev/null +++ b/bson/src/test/resources/bson/symbol.json @@ -0,0 +1,80 @@ +{ + "description": "Symbol", + "bson_type": "0x0E", + "deprecated": true, + "test_key": "a", + "valid": [ + { + "description": "Empty string", + "canonical_bson": "0D0000000E6100010000000000", + "canonical_extjson": "{\"a\": {\"$symbol\": \"\"}}", + "converted_bson": "0D000000026100010000000000", + "converted_extjson": "{\"a\": \"\"}" + }, + { + "description": "Single character", + "canonical_bson": "0E0000000E610002000000620000", + "canonical_extjson": "{\"a\": {\"$symbol\": \"b\"}}", + "converted_bson": "0E00000002610002000000620000", + "converted_extjson": "{\"a\": \"b\"}" + }, + { + "description": "Multi-character", + "canonical_bson": "190000000E61000D0000006162616261626162616261620000", + "canonical_extjson": "{\"a\": {\"$symbol\": \"abababababab\"}}", + "converted_bson": "190000000261000D0000006162616261626162616261620000", + "converted_extjson": "{\"a\": \"abababababab\"}" + }, + { + "description": "two-byte UTF-8 (\u00e9)", + "canonical_bson": "190000000E61000D000000C3A9C3A9C3A9C3A9C3A9C3A90000", + "canonical_extjson": "{\"a\": {\"$symbol\": \"éééééé\"}}", + "converted_bson": "190000000261000D000000C3A9C3A9C3A9C3A9C3A9C3A90000", + "converted_extjson": "{\"a\": \"éééééé\"}" + }, + { + "description": "three-byte UTF-8 (\u2606)", + "canonical_bson": "190000000E61000D000000E29886E29886E29886E298860000", + "canonical_extjson": "{\"a\": {\"$symbol\": \"☆☆☆☆\"}}", + "converted_bson": "190000000261000D000000E29886E29886E29886E298860000", + "converted_extjson": "{\"a\": \"☆☆☆☆\"}" + }, + { + "description": "Embedded nulls", + "canonical_bson": "190000000E61000D0000006162006261620062616261620000", + "canonical_extjson": "{\"a\": {\"$symbol\": \"ab\\u0000bab\\u0000babab\"}}", + "converted_bson": "190000000261000D0000006162006261620062616261620000", + "converted_extjson": "{\"a\": \"ab\\u0000bab\\u0000babab\"}" + } + ], + "decodeErrors": [ + { + "description": "bad symbol length: 0 (but no 0x00 either)", + "bson": "0C0000000E61000000000000" + }, + { + "description": "bad symbol length: -1", + "bson": "0C0000000E6100FFFFFFFF00" + }, + { + "description": "bad symbol length: eats terminator", + "bson": "100000000E6100050000006200620000" + }, + { + "description": "bad symbol length: longer than rest of document", + "bson": "120000000E00FFFFFF00666F6F6261720000" + }, + { + "description": "symbol is not null-terminated", + "bson": "100000000E610004000000616263FF00" + }, + { + "description": "empty symbol, but extra null", + "bson": "0E0000000E610001000000000000" + }, + { + "description": "invalid UTF-8", + "bson": "0E0000000E610002000000E90000" + } + ] +} diff --git a/bson/src/test/resources/bson/timestamp.json b/bson/src/test/resources/bson/timestamp.json new file mode 100644 index 00000000000..6f46564a327 --- /dev/null +++ b/bson/src/test/resources/bson/timestamp.json @@ -0,0 +1,34 @@ +{ + "description": "Timestamp type", + "bson_type": "0x11", + "test_key": "a", + "valid": [ + { + "description": "Timestamp: (123456789, 42)", + "canonical_bson": "100000001161002A00000015CD5B0700", + "canonical_extjson": "{\"a\" : {\"$timestamp\" : {\"t\" : 123456789, \"i\" : 42} } }" + }, + { + "description": "Timestamp: (123456789, 42) (keys reversed)", + "canonical_bson": "100000001161002A00000015CD5B0700", + "canonical_extjson": "{\"a\" : {\"$timestamp\" : {\"t\" : 123456789, \"i\" : 42} } }", + "degenerate_extjson": "{\"a\" : {\"$timestamp\" : {\"i\" : 42, \"t\" : 123456789} } }" + }, + { + "description": "Timestamp with high-order bit set on both seconds and increment", + "canonical_bson": "10000000116100FFFFFFFFFFFFFFFF00", + "canonical_extjson": "{\"a\" : {\"$timestamp\" : {\"t\" : 4294967295, \"i\" : 4294967295} } }" + }, + { + "description": "Timestamp with high-order bit set on both seconds and increment (not UINT32_MAX)", + "canonical_bson": "1000000011610000286BEE00286BEE00", + "canonical_extjson": "{\"a\" : {\"$timestamp\" : {\"t\" : 4000000000, \"i\" : 4000000000} } }" + } + ], + "decodeErrors": [ + { + "description": "Truncated timestamp field", + "bson": "0f0000001161002A00000015CD5B00" + } + ] +} diff --git a/bson/src/test/resources/bson/top.json b/bson/src/test/resources/bson/top.json new file mode 100644 index 00000000000..9c649b5e3f0 --- /dev/null +++ b/bson/src/test/resources/bson/top.json @@ -0,0 +1,266 @@ +{ + "description": "Top-level document validity", + "bson_type": "0x00", + "valid": [ + { + "description": "Dollar-prefixed key in top-level document", + "canonical_bson": "0F00000010246B6579002A00000000", + "canonical_extjson": "{\"$key\": {\"$numberInt\": \"42\"}}" + }, + { + "description": "Dollar as key in top-level document", + "canonical_bson": "0E00000002240002000000610000", + "canonical_extjson": "{\"$\": \"a\"}" + }, + { + "description": "Dotted key in top-level document", + "canonical_bson": "1000000002612E620002000000630000", + "canonical_extjson": "{\"a.b\": \"c\"}" + }, + { + "description": "Dot as key in top-level document", + "canonical_bson": "0E000000022E0002000000610000", + "canonical_extjson": "{\".\": \"a\"}" + } + ], + "decodeErrors": [ + { + "description": "An object size that's too small to even include the object size, but is a well-formed, empty object", + "bson": "0100000000" + }, + { + "description": "An object size that's only enough for the object size, but is a well-formed, empty object", + "bson": "0400000000" + }, + { + "description": "One object, with length shorter than size (missing EOO)", + "bson": "05000000" + }, + { + "description": "One object, sized correctly, with a spot for an EOO, but the EOO is 0x01", + "bson": "0500000001" + }, + { + "description": "One object, sized correctly, with a spot for an EOO, but the EOO is 0xff", + "bson": "05000000FF" + }, + { + "description": "One object, sized correctly, with a spot for an EOO, but the EOO is 0x70", + "bson": "0500000070" + }, + { + "description": "Byte count is zero (with non-zero input length)", + "bson": "00000000000000000000" + }, + { + "description": "Stated length exceeds byte count, with truncated document", + "bson": "1200000002666F6F0004000000626172" + }, + { + "description": "Stated length less than byte count, with garbage after envelope", + "bson": "1200000002666F6F00040000006261720000DEADBEEF" + }, + { + "description": "Stated length exceeds byte count, with valid envelope", + "bson": "1300000002666F6F00040000006261720000" + }, + { + "description": "Stated length less than byte count, with valid envelope", + "bson": "1100000002666F6F00040000006261720000" + }, + { + "description": "Invalid BSON type low range", + "bson": "07000000000000" + }, + { + "description": "Invalid BSON type high range", + "bson": "07000000800000" + }, + { + "description": "Document truncated mid-key", + "bson": "1200000002666F" + }, + { + "description": "Null byte in document key", + "bson": "0D000000107800000100000000" + } + ], + "parseErrors": [ + { + "description" : "Bad $regularExpression (extra field)", + "string" : "{\"a\" : {\"$regularExpression\": {\"pattern\": \"abc\", \"options\": \"\", \"unrelated\": true}}}" + }, + { + "description" : "Bad $regularExpression (missing options field)", + "string" : "{\"a\" : {\"$regularExpression\": {\"pattern\": \"abc\"}}}" + }, + { + "description": "Bad $regularExpression (pattern is number, not string)", + "string": "{\"x\" : {\"$regularExpression\" : { \"pattern\": 42, \"options\" : \"\"}}}" + }, + { + "description": "Bad $regularExpression (options are number, not string)", + "string": "{\"x\" : {\"$regularExpression\" : { \"pattern\": \"a\", \"options\" : 0}}}" + }, + { + "description" : "Bad $regularExpression (missing pattern field)", + "string" : "{\"a\" : {\"$regularExpression\": {\"options\":\"ix\"}}}" + }, + { + "description": "Bad $oid (number, not string)", + "string": "{\"a\" : {\"$oid\" : 42}}" + }, + { + "description": "Bad $oid (extra field)", + "string": "{\"a\" : {\"$oid\" : \"56e1fc72e0c917e9c4714161\", \"unrelated\": true}}" + }, + { + "description": "Bad $numberInt (number, not string)", + "string": "{\"a\" : {\"$numberInt\" : 42}}" + }, + { + "description": "Bad $numberInt (extra field)", + "string": "{\"a\" : {\"$numberInt\" : \"42\", \"unrelated\": true}}" + }, + { + "description": "Bad $numberLong (number, not string)", + "string": "{\"a\" : {\"$numberLong\" : 42}}" + }, + { + "description": "Bad $numberLong (extra field)", + "string": "{\"a\" : {\"$numberLong\" : \"42\", \"unrelated\": true}}" + }, + { + "description": "Bad $numberDouble (number, not string)", + "string": "{\"a\" : {\"$numberDouble\" : 42}}" + }, + { + "description": "Bad $numberDouble (extra field)", + "string": "{\"a\" : {\"$numberDouble\" : \".1\", \"unrelated\": true}}" + }, + { + "description": "Bad $numberDecimal (number, not string)", + "string": "{\"a\" : {\"$numberDecimal\" : 42}}" + }, + { + "description": "Bad $numberDecimal (extra field)", + "string": "{\"a\" : {\"$numberDecimal\" : \".1\", \"unrelated\": true}}" + }, + { + "description": "Bad $binary (binary is number, not string)", + "string": "{\"x\" : {\"$binary\" : {\"base64\" : 0, \"subType\" : \"00\"}}}" + }, + { + "description": "Bad $binary (type is number, not string)", + "string": "{\"x\" : {\"$binary\" : {\"base64\" : \"\", \"subType\" : 0}}}" + }, + { + "description": "Bad $binary (missing $type)", + "string": "{\"x\" : {\"$binary\" : {\"base64\" : \"//8=\"}}}" + }, + { + "description": "Bad $binary (missing $binary)", + "string": "{\"x\" : {\"$binary\" : {\"subType\" : \"00\"}}}" + }, + { + "description": "Bad $binary (extra field)", + "string": "{\"x\" : {\"$binary\" : {\"base64\" : \"//8=\", \"subType\" : 0, \"unrelated\": true}}}" + }, + { + "description": "Bad $code (type is number, not string)", + "string": "{\"a\" : {\"$code\" : 42}}" + }, + { + "description": "Bad $code (type is number, not string) when $scope is also present", + "string": "{\"a\" : {\"$code\" : 42, \"$scope\" : {}}}" + }, + { + "description": "Bad $code (extra field)", + "string": "{\"a\" : {\"$code\" : \"\", \"unrelated\": true}}" + }, + { + "description": "Bad $code with $scope (scope is number, not doc)", + "string": "{\"x\" : {\"$code\" : \"\", \"$scope\" : 42}}" + }, + { + "description": "Bad $timestamp (type is number, not doc)", + "string": "{\"a\" : {\"$timestamp\" : 42} }" + }, + { + "description": "Bad $timestamp ('t' type is string, not number)", + "string": "{\"a\" : {\"$timestamp\" : {\"t\" : \"123456789\", \"i\" : 42} } }" + }, + { + "description": "Bad $timestamp ('i' type is string, not number)", + "string": "{\"a\" : {\"$timestamp\" : {\"t\" : 123456789, \"i\" : \"42\"} } }" + }, + { + "description": "Bad $timestamp (extra field at same level as $timestamp)", + "string": "{\"a\" : {\"$timestamp\" : {\"t\" : \"123456789\", \"i\" : \"42\"}, \"unrelated\": true } }" + }, + { + "description": "Bad $timestamp (extra field at same level as t and i)", + "string": "{\"a\" : {\"$timestamp\" : {\"t\" : \"123456789\", \"i\" : \"42\", \"unrelated\": true} } }" + }, + { + "description": "Bad $timestamp (missing t)", + "string": "{\"a\" : {\"$timestamp\" : {\"i\" : \"42\"} } }" + }, + { + "description": "Bad $timestamp (missing i)", + "string": "{\"a\" : {\"$timestamp\" : {\"t\" : \"123456789\"} } }" + }, + { + "description": "Bad $date (number, not string or hash)", + "string": "{\"a\" : {\"$date\" : 42}}" + }, + { + "description": "Bad $date (extra field)", + "string": "{\"a\" : {\"$date\" : {\"$numberLong\" : \"1356351330501\"}, \"unrelated\": true}}" + }, + { + "description": "Bad $minKey (boolean, not integer)", + "string": "{\"a\" : {\"$minKey\" : true}}" + }, + { + "description": "Bad $minKey (wrong integer)", + "string": "{\"a\" : {\"$minKey\" : 0}}" + }, + { + "description": "Bad $minKey (extra field)", + "string": "{\"a\" : {\"$minKey\" : 1, \"unrelated\": true}}" + }, + { + "description": "Bad $maxKey (boolean, not integer)", + "string": "{\"a\" : {\"$maxKey\" : true}}" + }, + { + "description": "Bad $maxKey (wrong integer)", + "string": "{\"a\" : {\"$maxKey\" : 0}}" + }, + { + "description": "Bad $maxKey (extra field)", + "string": "{\"a\" : {\"$maxKey\" : 1, \"unrelated\": true}}" + }, + { + "description": "Bad DBpointer (extra field)", + "string": "{\"a\": {\"$dbPointer\": {\"a\": {\"$numberInt\": \"1\"}, \"$id\": {\"$oid\": \"56e1fc72e0c917e9c4714161\"}, \"c\": {\"$numberInt\": \"2\"}, \"$ref\": \"b\"}}}" + }, + { + "description" : "Null byte in document key", + "string" : "{\"a\\u0000\": 1 }" + }, + { + "description" : "Null byte in sub-document key", + "string" : "{\"a\" : {\"b\\u0000\": 1 }}" + }, + { + "description": "Null byte in $regularExpression pattern", + "string": "{\"a\" : {\"$regularExpression\" : { \"pattern\": \"b\\u0000\", \"options\" : \"i\"}}}" + }, + { + "description": "Null byte in $regularExpression options", + "string": "{\"a\" : {\"$regularExpression\" : { \"pattern\": \"b\", \"options\" : \"i\\u0000\"}}}" + } + ] +} diff --git a/bson/src/test/resources/bson/undefined.json b/bson/src/test/resources/bson/undefined.json new file mode 100644 index 00000000000..285f068258c --- /dev/null +++ b/bson/src/test/resources/bson/undefined.json @@ -0,0 +1,15 @@ +{ + "description": "Undefined type (deprecated)", + "bson_type": "0x06", + "deprecated": true, + "test_key": "a", + "valid": [ + { + "description": "Undefined", + "canonical_bson": "0800000006610000", + "canonical_extjson": "{\"a\" : {\"$undefined\" : true}}", + "converted_bson": "080000000A610000", + "converted_extjson": "{\"a\" : null}" + } + ] +} diff --git a/bson/src/test/unit/org/bson/BasicBSONDecoderSpecification.groovy b/bson/src/test/unit/org/bson/BasicBSONDecoderSpecification.groovy new file mode 100644 index 00000000000..9f13447e001 --- /dev/null +++ b/bson/src/test/unit/org/bson/BasicBSONDecoderSpecification.groovy @@ -0,0 +1,223 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson + +import org.bson.codecs.BsonDocumentCodec +import org.bson.codecs.EncoderContext +import org.bson.io.BasicOutputBuffer +import org.bson.types.BSONTimestamp +import org.bson.types.Binary +import org.bson.types.Code +import org.bson.types.CodeWScope +import org.bson.types.Decimal128 +import org.bson.types.MaxKey +import org.bson.types.MinKey +import org.bson.types.ObjectId +import spock.lang.Specification +import spock.lang.Subject +import spock.lang.Unroll + +import java.util.regex.Pattern + +import static org.bson.BasicBSONDecoder.getDefaultUuidRepresentation +import static org.bson.BasicBSONDecoder.setDefaultUuidRepresentation +import static org.bson.BsonBinarySubType.UUID_LEGACY +import static org.bson.BsonBinarySubType.UUID_STANDARD +import static org.bson.UuidRepresentation.JAVA_LEGACY +import static org.bson.UuidRepresentation.STANDARD +import static org.bson.internal.UuidHelper.encodeUuidToBinary + +@SuppressWarnings(['LineLength', 'DuplicateMapLiteral', 'UnnecessaryBooleanExpression']) +class BasicBSONDecoderSpecification extends Specification { + + @Subject + private final BasicBSONDecoder bsonDecoder = new BasicBSONDecoder() + + def setupSpec() { + Map.metaClass.bitwiseNegate = { new BasicBSONObject(delegate as Map) } + Pattern.metaClass.equals = { Pattern other -> + delegate.pattern() == other.pattern() && delegate.flags() == other.flags() + } + } + + def 'should decode from input stream'() { + setup: + InputStream is = new ByteArrayInputStream((byte[]) [12, 0, 0, 0, 16, 97, 0, 1, 0, 0, 0, 0]) + + when: + BSONObject document = bsonDecoder.readObject(is) + + then: + document == ~['a': 1] + } + + @Unroll + def 'should decode #type'() { + expect: + documentWithType as BasicBSONObject == bsonDecoder.readObject((byte[]) bytes) + + where: + documentWithType | bytes + ['d1': -1.01] | [17, 0, 0, 0, 1, 100, 49, 0, 41, 92, -113, -62, -11, 40, -16, -65, 0] + ['d2': Float.MIN_VALUE] | [17, 0, 0, 0, 1, 100, 50, 0, 0, 0, 0, 0, 0, 0, -96, 54, 0] + ['d3': Double.MAX_VALUE] | [17, 0, 0, 0, 1, 100, 51, 0, -1, -1, -1, -1, -1, -1, -17, 127, 0] + ['d4': 0.0] | [17, 0, 0, 0, 1, 100, 52, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] + ['s1': ''] | [14, 0, 0, 0, 2, 115, 49, 0, 1, 0, 0, 0, 0, 0] + ['s2': 'danke'] | [19, 0, 0, 0, 2, 115, 50, 0, 6, 0, 0, 0, 100, 97, 110, 107, 101, 0, 0] + ['s3': ',+\\\"<>;[]{}@#$%^&*()+_'] | [36, 0, 0, 0, 2, 115, 51, 0, 23, 0, 0, 0, 44, 43, 92, 34, 60, 62, 59, 91, 93, 123, 125, 64, 35, 36, 37, 94, 38, 42, 40, 41, 43, 95, 0, 0] + ['s4': 'a\u00e9\u3042\u0430\u0432\u0431\u0434'] | [28, 0, 0, 0, 2, 115, 52, 0, 15, 0, 0, 0, 97, -61, -87, -29, -127, -126, -48, -80, -48, -78, -48, -79, -48, -76, 0, 0] + ['o': [:]] | [13, 0, 0, 0, 3, 111, 0, 5, 0, 0, 0, 0, 0] + ['a1': []] | [14, 0, 0, 0, 4, 97, 49, 0, 5, 0, 0, 0, 0, 0] + ['a2': [[]]] | [22, 0, 0, 0, 4, 97, 50, 0, 13, 0, 0, 0, 4, 48, 0, 5, 0, 0, 0, 0, 0, 0] + ['b1': new Binary((byte) 0x01, (byte[]) [115, 116, 11])] | [17, 0, 0, 0, 5, 98, 49, 0, 3, 0, 0, 0, 1, 115, 116, 11, 0] + ['b2': [102, 111, 111] as byte[]] | [17, 0, 0, 0, 5, 98, 50, 0, 3, 0, 0, 0, 0, 102, 111, 111, 0] + ['_id': new ObjectId('50d3332018c6a1d8d1662b61')] | [22, 0, 0, 0, 7, 95, 105, 100, 0, 80, -45, 51, 32, 24, -58, -95, -40, -47, 102, 43, 97, 0] + ['b1': true] | [10, 0, 0, 0, 8, 98, 49, 0, 1, 0] + ['b2': false] | [10, 0, 0, 0, 8, 98, 50, 0, 0, 0] + ['d': new Date(582163200)] | [16, 0, 0, 0, 9, 100, 0, 0, 27, -77, 34, 0, 0, 0, 0, 0] + ['n': null] | [8, 0, 0, 0, 10, 110, 0, 0] + ['r': Pattern.compile('[a]*', Pattern.CASE_INSENSITIVE)] | [15, 0, 0, 0, 11, 114, 0, 91, 97, 93, 42, 0, 105, 0, 0] + ['js1': new Code('var i = 0')] | [24, 0, 0, 0, 13, 106, 115, 49, 0, 10, 0, 0, 0, 118, 97, 114, 32, 105, 32, 61, 32, 48, 0, 0] + ['s': 'c'] | [14, 0, 0, 0, 14, 115, 0, 2, 0, 0, 0, 99, 0, 0] + ['js2': new CodeWScope('i++', ~['x': 1])] | [34, 0, 0, 0, 15, 106, 115, 50, 0, 24, 0, 0, 0, 4, 0, 0, 0, 105, 43, 43, 0, 12, 0, 0, 0, 16, 120, 0, 1, 0, 0, 0, 0, 0] + ['i1': -12] | [13, 0, 0, 0, 16, 105, 49, 0, -12, -1, -1, -1, 0] + ['i2': Integer.MIN_VALUE] | [13, 0, 0, 0, 16, 105, 50, 0, 0, 0, 0, -128, 0] + ['i3': 0] | [13, 0, 0, 0, 16, 105, 51, 0, 0, 0, 0, 0, 0] + ['t': new BSONTimestamp(123999401, 44332)] | [16, 0, 0, 0, 17, 116, 0, 44, -83, 0, 0, -87, 20, 100, 7, 0] + ['i4': Long.MAX_VALUE] | [17, 0, 0, 0, 18, 105, 52, 0, -1, -1, -1, -1, -1, -1, -1, 127, 0] + ['k1': new MinKey()] | [9, 0, 0, 0, -1, 107, 49, 0, 0] + ['k2': new MaxKey()] | [9, 0, 0, 0, 127, 107, 50, 0, 0] + ['f': Decimal128.parse('0E-6176')] | [24, 0, 0, 0, 19, 102, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] + ['u': new UUID(1, 2)] | [29, 0, 0, 0, 5, 117, 0, 16, 0, 0, 0, 3, 1, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0] + + type = BsonType.findByValue(bytes[4]) + } + + def 'should decode complex structures'() { + expect: + complexDocument as BasicBSONObject == bsonDecoder.readObject((byte[]) bytes) + + where: + complexDocument | bytes + ['a': ~['d1': ~['b': true], 'd2': ~['b': false]]] | [39, 0, 0, 0, 3, 97, 0, 31, 0, 0, 0, 3, 100, 49, 0, 9, 0, 0, 0, 8, 98, 0, 1, 0, 3, 100, 50, 0, 9, 0, 0, 0, 8, 98, 0, 0, 0, 0, 0] + ['a': [~['b1': true], ~['b2': false]]] | [39, 0, 0, 0, 4, 97, 0, 31, 0, 0, 0, 3, 48, 0, 10, 0, 0, 0, 8, 98, 49, 0, 1, 0, 3, 49, 0, 10, 0, 0, 0, 8, 98, 50, 0, 0, 0, 0, 0] + ['a': [[1, 2]]] | [35, 0, 0, 0, 4, 97, 0, 27, 0, 0, 0, 4, 48, 0, 19, 0, 0, 0, 16, 48, 0, 1, 0, 0, 0, 16, 49, 0, 2, 0, 0, 0, 0, 0, 0] + ['js': new CodeWScope('i++', ~['njs': new CodeWScope('j++', ~['j': 0])])] | [55, 0, 0, 0, 15, 106, 115, 0, 46, 0, 0, 0, 4, 0, 0, 0, 105, 43, 43, 0, 34, 0, 0, 0, 15, 110, 106, 115, 0, 24, 0, 0, 0, 4, 0, 0, 0, 106, 43, 43, 0, 12, 0, 0, 0, 16, 106, 0, 0, 0, 0, 0, 0, 0, 0] + } + + @Unroll + def 'should call BSONCallback.#method when meet #type '() { + setup: + BSONCallback callback = Mock() + + when: + bsonDecoder.decode((byte[]) bytes, callback) + + then: + 1 * callback.objectStart() + 1 * callback."$method"(* _) >> { assert it == args } + 1 * callback.objectDone() + + where: + method | args || bytes + 'gotDouble' | ['d1', -1.01d] || [17, 0, 0, 0, 1, 100, 49, 0, 41, 92, -113, -62, -11, 40, -16, -65, 0] + 'gotString' | ['s2', 'danke'] || [19, 0, 0, 0, 2, 115, 50, 0, 6, 0, 0, 0, 100, 97, 110, 107, 101, 0, 0] + 'gotBinary' | ['b2', 0, [102, 111, 111] as byte[]] || [17, 0, 0, 0, 5, 98, 50, 0, 3, 0, 0, 0, 0, 102, 111, 111, 0] + 'gotObjectId' | ['_id', new ObjectId('50d3332018c6a1d8d1662b61')] || [22, 0, 0, 0, 7, 95, 105, 100, 0, 80, -45, 51, 32, 24, -58, -95, -40, -47, 102, 43, 97, 0] + 'gotBoolean' | ['b1', true] || [10, 0, 0, 0, 8, 98, 49, 0, 1, 0] + 'gotDate' | ['d', 582163200] || [16, 0, 0, 0, 9, 100, 0, 0, 27, -77, 34, 0, 0, 0, 0, 0] + 'gotNull' | ['n'] || [8, 0, 0, 0, 10, 110, 0, 0] + 'gotRegex' | ['r', '[a]*', 'i'] || [15, 0, 0, 0, 11, 114, 0, 91, 97, 93, 42, 0, 105, 0, 0] + 'gotCode' | ['js1', 'var i = 0'] || [24, 0, 0, 0, 13, 106, 115, 49, 0, 10, 0, 0, 0, 118, 97, 114, 32, 105, 32, 61, 32, 48, 0, 0] + 'gotSymbol' | ['s', 'c'] || [14, 0, 0, 0, 14, 115, 0, 2, 0, 0, 0, 99, 0, 0] + 'gotInt' | ['i1', -12] || [13, 0, 0, 0, 16, 105, 49, 0, -12, -1, -1, -1, 0] + 'gotLong' | ['i4', Long.MAX_VALUE] || [17, 0, 0, 0, 18, 105, 52, 0, -1, -1, -1, -1, -1, -1, -1, 127, 0] + 'gotTimestamp' | ['t', 123999401, 44332] || [16, 0, 0, 0, 17, 116, 0, 44, -83, 0, 0, -87, 20, 100, 7, 0] + 'gotMinKey' | ['k1'] || [9, 0, 0, 0, -1, 107, 49, 0, 0] + 'gotMaxKey' | ['k2'] || [9, 0, 0, 0, 127, 107, 50, 0, 0] + 'gotDecimal128' | ['f', Decimal128.parse('0E-6176')] || [24, 0, 0, 0, 19, 102, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] + + //gotDBRef + //arrayStart + //arrayDone + //objectStart + //objectDone + //gotBinaryArray + //gotUUID + //gotCodeWScope + type = BsonType.findByValue(bytes[4]) + } + + def 'should throw exception when input is invalid'() { + when: + bsonDecoder.readObject((byte[]) bytes) + + then: + thrown(exception) + + where: + exception | bytes + BsonSerializationException | [13, 0, 0, 0, 16, 97, 0, 1, 0, 0, 0, 0] + BsonSerializationException | [12, 0, 0, 0, 17, 97, 0, 1, 0, 0, 0, 0] + BsonSerializationException | [12, 0, 2, 0, 16, 97, 0, 1, 0, 0, 0, 0] + BsonSerializationException | [5, 0, 0, 0, 16, 97, 0, 1, 0, 0, 0, 0] + BsonSerializationException | [5, 0, 0, 0, 16, 97, 45, 1, 0, 0, 0, 0] + } + + + def 'default value of defaultUuidRepresentation is JAVA_LEGACY'() { + expect: + getDefaultUuidRepresentation() == JAVA_LEGACY + } + + @Unroll + def 'should decode UUID according to default uuid representation'() { + given: + def uuid = new UUID(1, 2) + def output = new BasicOutputBuffer() + new BsonDocumentCodec().encode(new BsonBinaryWriter(output), + new BsonDocument('u', new BsonBinary(uuid, encodedUuidRepresentation)), EncoderContext.builder().build()) + + when: + setDefaultUuidRepresentation(decodedUuidRepresentation) + + then: + getDefaultUuidRepresentation() == decodedUuidRepresentation + + when: + def decodedUuid = bsonDecoder.readObject(output.getInternalBuffer()).get('u') + + then: + decodedUuid == expectedUuid + + cleanup: + setDefaultUuidRepresentation(JAVA_LEGACY) + + where: + [encodedUuidRepresentation, decodedUuidRepresentation, expectedUuid] << [ + [JAVA_LEGACY, JAVA_LEGACY, + new UUID(1, 2)], + [JAVA_LEGACY, STANDARD, + new Binary(UUID_LEGACY, encodeUuidToBinary(new UUID(1, 2), JAVA_LEGACY))], + [STANDARD, JAVA_LEGACY, + new Binary(UUID_STANDARD, encodeUuidToBinary(new UUID(1, 2), STANDARD))], + [STANDARD, STANDARD, + new UUID(1, 2)] + + ] + } +} diff --git a/bson/src/test/unit/org/bson/BasicBSONEncoderSpecification.groovy b/bson/src/test/unit/org/bson/BasicBSONEncoderSpecification.groovy new file mode 100644 index 00000000000..886c784f6d3 --- /dev/null +++ b/bson/src/test/unit/org/bson/BasicBSONEncoderSpecification.groovy @@ -0,0 +1,215 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson + +import org.bson.codecs.BsonDocumentCodec +import org.bson.codecs.DecoderContext +import org.bson.io.BasicOutputBuffer +import org.bson.io.OutputBuffer +import org.bson.types.BSONTimestamp +import org.bson.types.BasicBSONList +import org.bson.types.Binary +import org.bson.types.Code +import org.bson.types.CodeWScope +import org.bson.types.Decimal128 +import org.bson.types.MaxKey +import org.bson.types.MinKey +import org.bson.types.ObjectId +import org.bson.types.Symbol +import spock.lang.Specification +import spock.lang.Subject +import spock.lang.Unroll + +import java.nio.ByteBuffer +import java.util.regex.Pattern + +import static org.bson.BasicBSONEncoder.getDefaultUuidRepresentation +import static org.bson.BasicBSONEncoder.setDefaultUuidRepresentation +import static org.bson.UuidRepresentation.JAVA_LEGACY +import static org.bson.UuidRepresentation.STANDARD + +@SuppressWarnings(['LineLength', 'DuplicateMapLiteral']) +class BasicBSONEncoderSpecification extends Specification { + + def setupSpec() { + Map.metaClass.bitwiseNegate = { new BasicBSONObject(delegate) } + Pattern.metaClass.equals = { Pattern other -> + delegate.pattern() == other.pattern() && delegate.flags() == other.flags() + } + } + + @Subject + private final BSONEncoder bsonEncoder = new BasicBSONEncoder() + + @Unroll + def 'should encode #aClass'() { + expect: + bytes as byte[] == bsonEncoder.encode(new BasicBSONObject(document)) + + where: + document | bytes + ['d': -1.01d] | [16, 0, 0, 0, 1, 100, 0, 41, 92, -113, -62, -11, 40, -16, -65, 0] + ['d': Float.MIN_VALUE] | [16, 0, 0, 0, 1, 100, 0, 0, 0, 0, 0, 0, 0, -96, 54, 0] + ['d': Double.MAX_VALUE] | [16, 0, 0, 0, 1, 100, 0, -1, -1, -1, -1, -1, -1, -17, 127, 0] + ['d': 0.0d] | [16, 0, 0, 0, 1, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] + ['s': ''] | [13, 0, 0, 0, 2, 115, 0, 1, 0, 0, 0, 0, 0] + ['s': 'danke'] | [18, 0, 0, 0, 2, 115, 0, 6, 0, 0, 0, 100, 97, 110, 107, 101, 0, 0] + ['s': ',+\\\"<>;[]{}@#$%^&*()+_'] | [35, 0, 0, 0, 2, 115, 0, 23, 0, 0, 0, 44, 43, 92, 34, 60, 62, 59, 91, 93, 123, 125, 64, 35, 36, 37, 94, 38, 42, 40, 41, 43, 95, 0, 0] + ['s': 'a\u00e9\u3042\u0430\u0432\u0431\u0434'] | [27, 0, 0, 0, 2, 115, 0, 15, 0, 0, 0, 97, -61, -87, -29, -127, -126, -48, -80, -48, -78, -48, -79, -48, -76, 0, 0] + ['o': ['a': 1]] | [20, 0, 0, 0, 3, 111, 0, 12, 0, 0, 0, 16, 97, 0, 1, 0, 0, 0, 0, 0] + ['a': []] | [13, 0, 0, 0, 4, 97, 0, 5, 0, 0, 0, 0, 0] + ['a': [] as Set] | [13, 0, 0, 0, 4, 97, 0, 5, 0, 0, 0, 0, 0] + ['a': [] as Iterable] | [13, 0, 0, 0, 4, 97, 0, 5, 0, 0, 0, 0, 0] + ['a': [] as Object[]] | [13, 0, 0, 0, 4, 97, 0, 5, 0, 0, 0, 0, 0] + ['a': new BasicBSONList()] | [13, 0, 0, 0, 4, 97, 0, 5, 0, 0, 0, 0, 0] + ['a': [[]]] | [21, 0, 0, 0, 4, 97, 0, 13, 0, 0, 0, 4, 48, 0, 5, 0, 0, 0, 0, 0, 0] + ['b': new Binary((byte) 0x01, (byte[]) [115, 116, 11])] | [16, 0, 0, 0, 5, 98, 0, 3, 0, 0, 0, 1, 115, 116, 11, 0] + ['b': [102, 111, 111] as byte[]] | [16, 0, 0, 0, 5, 98, 0, 3, 0, 0, 0, 0, 102, 111, 111, 0] + ['_id': new ObjectId('50d3332018c6a1d8d1662b61')] | [22, 0, 0, 0, 7, 95, 105, 100, 0, 80, -45, 51, 32, 24, -58, -95, -40, -47, 102, 43, 97, 0] + ['b': true] | [9, 0, 0, 0, 8, 98, 0, 1, 0] + ['b': false] | [9, 0, 0, 0, 8, 98, 0, 0, 0] + ['d': new Date(582163200)] | [16, 0, 0, 0, 9, 100, 0, 0, 27, -77, 34, 0, 0, 0, 0, 0] + ['n': null] | [8, 0, 0, 0, 10, 110, 0, 0] + ['r': Pattern.compile('[a]*', Pattern.CASE_INSENSITIVE)] | [15, 0, 0, 0, 11, 114, 0, 91, 97, 93, 42, 0, 105, 0, 0] + ['js': new Code('var i = 0')] | [23, 0, 0, 0, 13, 106, 115, 0, 10, 0, 0, 0, 118, 97, 114, 32, 105, 32, 61, 32, 48, 0, 0] + ['s': 'c' as char] | [14, 0, 0, 0, 2, 115, 0, 2, 0, 0, 0, 99, 0, 0] + ['s': new Symbol('c')] | [14, 0, 0, 0, 14, 115, 0, 2, 0, 0, 0, 99, 0, 0] + ['js': new CodeWScope('i++', ~['x': 1])] | [33, 0, 0, 0, 15, 106, 115, 0, 24, 0, 0, 0, 4, 0, 0, 0, 105, 43, 43, 0, 12, 0, 0, 0, 16, 120, 0, 1, 0, 0, 0, 0, 0] + ['i': -12] | [12, 0, 0, 0, 16, 105, 0, -12, -1, -1, -1, 0] + ['i': Integer.MIN_VALUE] | [12, 0, 0, 0, 16, 105, 0, 0, 0, 0, -128, 0] + ['i': 0] | [12, 0, 0, 0, 16, 105, 0, 0, 0, 0, 0, 0] + ['t': new BSONTimestamp(123999401, 44332)] | [16, 0, 0, 0, 17, 116, 0, 44, -83, 0, 0, -87, 20, 100, 7, 0] + ['i': Long.MAX_VALUE] | [16, 0, 0, 0, 18, 105, 0, -1, -1, -1, -1, -1, -1, -1, 127, 0] + ['k': new MinKey()] | [8, 0, 0, 0, -1, 107, 0, 0] + ['k': new MaxKey()] | [8, 0, 0, 0, 127, 107, 0, 0] + ['f': Decimal128.parse('0E-6176')] | [24, 0, 0, 0, 19, 102, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] + ['u': new UUID(1, 2)] | [29, 0, 0, 0, 5, 117, 0, 16, 0, 0, 0, 3, 1, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0] + + aClass = document.find { true }.value.getClass() + } + + @Unroll + def 'should encode #aClass array'() { + expect: + bytes as byte[] == bsonEncoder.encode(new BasicBSONObject(document)) + + where: + document | bytes + ['a': [1, 2] as int[]] | [27, 0, 0, 0, 4, 97, 0, 19, 0, 0, 0, 16, 48, 0, 1, 0, 0, 0, 16, 49, 0, 2, 0, 0, 0, 0, 0] + ['a': [1, 2] as long[]] | [35, 0, 0, 0, 4, 97, 0, 27, 0, 0, 0, 18, 48, 0, 1, 0, 0, 0, 0, 0, 0, 0, 18, 49, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0] + ['a': [1, 2] as float[]] | [35, 0, 0, 0, 4, 97, 0, 27, 0, 0, 0, 1, 48, 0, 0, 0, 0, 0, 0, 0, -16, 63, 1, 49, 0, 0, 0, 0, 0, 0, 0, 0, 64, 0, 0] + ['a': [1, 2] as short[]] | [27, 0, 0, 0, 4, 97, 0, 19, 0, 0, 0, 16, 48, 0, 1, 0, 0, 0, 16, 49, 0, 2, 0, 0, 0, 0, 0] + ['a': [1, 2] as double[]] | [35, 0, 0, 0, 4, 97, 0, 27, 0, 0, 0, 1, 48, 0, 0, 0, 0, 0, 0, 0, -16, 63, 1, 49, 0, 0, 0, 0, 0, 0, 0, 0, 64, 0, 0] + ['a': [true, false] as boolean[]] | [21, 0, 0, 0, 4, 97, 0, 13, 0, 0, 0, 8, 48, 0, 1, 8, 49, 0, 0, 0, 0] + ['a': ['x', 'y'] as String[]] | [31, 0, 0, 0, 4, 97, 0, 23, 0, 0, 0, 2, 48, 0, 2, 0, 0, 0, 120, 0, 2, 49, 0, 2, 0, 0, 0, 121, 0, 0, 0] + ['a': [1, 'y'] as Object[]] | [29, 0, 0, 0, 4, 97, 0, 21, 0, 0, 0, 16, 48, 0, 1, 0, 0, 0, 2, 49, 0, 2, 0, 0, 0, 121, 0, 0, 0] + ['a': [new ObjectId('50d3332018c6a1d8d1662b61')] as ObjectId[]] | [28, 0, 0, 0, 4, 97, 0, 20, 0, 0, 0, 7, 48, 0, 80, -45, 51, 32, 24, -58, -95, -40, -47, 102, 43, 97, 0, 0] + + aClass = document.get('a').getClass() + } + + def 'should encode complex structures'() { + expect: + bytes as byte[] == bsonEncoder.encode(~document) + + where: + document | bytes + ['a': ~['d1': ~['b': true], 'd2': ~['b': false]]] | [39, 0, 0, 0, 3, 97, 0, 31, 0, 0, 0, 3, 100, 49, 0, 9, 0, 0, 0, 8, 98, 0, 1, 0, 3, 100, 50, 0, 9, 0, 0, 0, 8, 98, 0, 0, 0, 0, 0] + ['a': [~['b1': true], ~['b2': false]]] | [39, 0, 0, 0, 4, 97, 0, 31, 0, 0, 0, 3, 48, 0, 10, 0, 0, 0, 8, 98, 49, 0, 1, 0, 3, 49, 0, 10, 0, 0, 0, 8, 98, 50, 0, 0, 0, 0, 0] + ['a': [[1, 2]]] | [35, 0, 0, 0, 4, 97, 0, 27, 0, 0, 0, 4, 48, 0, 19, 0, 0, 0, 16, 48, 0, 1, 0, 0, 0, 16, 49, 0, 2, 0, 0, 0, 0, 0, 0] + ['js': new CodeWScope('i++', ~['njs': new CodeWScope('j++', ~['j': 0])])] | [55, 0, 0, 0, 15, 106, 115, 0, 46, 0, 0, 0, 4, 0, 0, 0, 105, 43, 43, 0, 34, 0, 0, 0, 15, 110, 106, 115, 0, 24, 0, 0, 0, 4, 0, 0, 0, 106, 43, 43, 0, 12, 0, 0, 0, 16, 106, 0, 0, 0, 0, 0, 0, 0, 0] + } + + @SuppressWarnings(['SpaceBeforeClosingBrace', 'SpaceAfterOpeningBrace']) + def 'should throw IllegalArgumentException while encoding unknown class'() { + given: + def instanceOfCustomClass = new Object() {} + + when: + bsonEncoder.encode(~['a': instanceOfCustomClass]) + + then: + thrown(IllegalArgumentException) + } + + def 'should write to provided outputBuffer'() { + given: + OutputBuffer buffer = Mock() + bsonEncoder.set(buffer) + + when: + bsonEncoder.putObject(~['i': 0]) + + then: + (1.._) * buffer.writeCString(_) + (1.._) * buffer.writeInt32(_) + } + + def 'should throw IllegalStateException on setting buffer while encoder in use'() { + given: + bsonEncoder.set(new BasicOutputBuffer()) + bsonEncoder.putObject(new BasicBSONObject()) + + when: + bsonEncoder.set(new BasicOutputBuffer()) + + then: + thrown(IllegalStateException) + } + + def 'should write _id first'() { + given: + BasicBSONObject document = new BasicBSONObject('a', 2).append('_id', 1) + OutputBuffer buffer = Mock() + bsonEncoder.set(buffer) + + when: + bsonEncoder.putObject(document) + + then: + 1 * buffer.writeCString('_id') + 1 * buffer.writeInt32(1) + + then: + 1 * buffer.writeCString('a') + 1 * buffer.writeInt32(2) + } + + def 'should encode UUID according to default uuid representation'() { + given: + def defaultUuidRepresentation = getDefaultUuidRepresentation() + def uuid = new UUID(1, 2) + def document = new BasicBSONObject() + document.append('u', uuid) + + when: + setDefaultUuidRepresentation(uuidRepresentation) + def bytes = bsonEncoder.encode(new BasicBSONObject(document)) + def decodedDocument = new BsonDocumentCodec().decode(new BsonBinaryReader(ByteBuffer.wrap(bytes)), + DecoderContext.builder().build()) + + then: + defaultUuidRepresentation == JAVA_LEGACY + decodedDocument.getBinary('u').asUuid(uuidRepresentation) == uuid + + cleanup: + setDefaultUuidRepresentation(defaultUuidRepresentation) + + where: + uuidRepresentation << [JAVA_LEGACY, STANDARD] + } +} diff --git a/bson/src/test/unit/org/bson/BinaryVectorTest.java b/bson/src/test/unit/org/bson/BinaryVectorTest.java new file mode 100644 index 00000000000..57e8b294019 --- /dev/null +++ b/bson/src/test/unit/org/bson/BinaryVectorTest.java @@ -0,0 +1,179 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson; + +import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.ValueSource; + +import static org.junit.jupiter.api.Assertions.assertArrayEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertThrows; + +class BinaryVectorTest { + + @Test + void shouldCreateInt8Vector() { + // given + byte[] data = {1, 2, 3, 4, 5}; + + // when + Int8BinaryVector vector = BinaryVector.int8Vector(data); + + // then + assertNotNull(vector); + assertEquals(BinaryVector.DataType.INT8, vector.getDataType()); + assertArrayEquals(data, vector.getData()); + } + + @Test + void shouldThrowExceptionWhenCreatingInt8VectorWithNullData() { + // given + byte[] data = null; + + // when & Then + IllegalArgumentException exception = assertThrows(IllegalArgumentException.class, () -> BinaryVector.int8Vector(data)); + assertEquals("data can not be null", exception.getMessage()); + } + + @Test + void shouldCreateFloat32Vector() { + // given + float[] data = {1.0f, 2.0f, 3.0f}; + + // when + Float32BinaryVector vector = BinaryVector.floatVector(data); + + // then + assertNotNull(vector); + assertEquals(BinaryVector.DataType.FLOAT32, vector.getDataType()); + assertArrayEquals(data, vector.getData()); + } + + @Test + void shouldThrowExceptionWhenCreatingFloat32VectorWithNullData() { + // given + float[] data = null; + + // when & Then + IllegalArgumentException exception = assertThrows(IllegalArgumentException.class, () -> BinaryVector.floatVector(data)); + assertEquals("data can not be null", exception.getMessage()); + } + + + @ParameterizedTest(name = "{index}: validPadding={0}") + @ValueSource(bytes = {0, 1, 2, 3, 4, 5, 6, 7}) + void shouldCreatePackedBitVector(final byte validPadding) { + // given + byte[] data = {(byte) 0b10101010, (byte) 0b01010101}; + + // when + PackedBitBinaryVector vector = BinaryVector.packedBitVector(data, validPadding); + + // then + assertNotNull(vector); + assertEquals(BinaryVector.DataType.PACKED_BIT, vector.getDataType()); + assertArrayEquals(data, vector.getData()); + assertEquals(validPadding, vector.getPadding()); + } + + @ParameterizedTest(name = "{index}: invalidPadding={0}") + @ValueSource(bytes = {-1, 8}) + void shouldThrowExceptionWhenPackedBitVectorHasInvalidPadding(final byte invalidPadding) { + // given + byte[] data = {(byte) 0b10101010}; + + // when & Then + IllegalArgumentException exception = assertThrows(IllegalArgumentException.class, () -> + BinaryVector.packedBitVector(data, invalidPadding)); + assertEquals("state should be: Padding must be between 0 and 7 bits. Provided padding: " + invalidPadding, exception.getMessage()); + } + + @Test + void shouldThrowExceptionWhenPackedBitVectorIsCreatedWithNullData() { + // given + byte[] data = null; + byte padding = 0; + + // when & Then + IllegalArgumentException exception = assertThrows(IllegalArgumentException.class, () -> + BinaryVector.packedBitVector(data, padding)); + assertEquals("data can not be null", exception.getMessage()); + } + + @Test + void shouldCreatePackedBitVectorWithZeroPaddingAndEmptyData() { + // given + byte[] data = new byte[0]; + byte padding = 0; + + // when + PackedBitBinaryVector vector = BinaryVector.packedBitVector(data, padding); + + // then + assertNotNull(vector); + assertEquals(BinaryVector.DataType.PACKED_BIT, vector.getDataType()); + assertArrayEquals(data, vector.getData()); + assertEquals(padding, vector.getPadding()); + } + + @Test + void shouldThrowExceptionWhenPackedBitVectorWithNonZeroPaddingAndEmptyData() { + // given + byte[] data = new byte[0]; + byte padding = 1; + + // when & Then + IllegalArgumentException exception = assertThrows(IllegalArgumentException.class, () -> + BinaryVector.packedBitVector(data, padding)); + assertEquals("state should be: Padding must be 0 if vector is empty. Provided padding: " + padding, exception.getMessage()); + } + + @Test + void shouldThrowExceptionWhenRetrievingInt8DataFromNonInt8Vector() { + // given + float[] data = {1.0f, 2.0f}; + BinaryVector vector = BinaryVector.floatVector(data); + + // when & Then + IllegalStateException exception = assertThrows(IllegalStateException.class, vector::asInt8Vector); + assertEquals("Expected vector data type INT8, but found FLOAT32", exception.getMessage()); + } + + @Test + void shouldThrowExceptionWhenRetrievingFloat32DataFromNonFloat32Vector() { + // given + byte[] data = {1, 2, 3}; + BinaryVector vector = BinaryVector.int8Vector(data); + + // when & Then + IllegalStateException exception = assertThrows(IllegalStateException.class, vector::asFloat32Vector); + assertEquals("Expected vector data type FLOAT32, but found INT8", exception.getMessage()); + } + + @Test + void shouldThrowExceptionWhenRetrievingPackedBitDataFromNonPackedBitVector() { + // given + float[] data = {1.0f, 2.0f}; + BinaryVector vector = BinaryVector.floatVector(data); + + // when & Then + IllegalStateException exception = assertThrows(IllegalStateException.class, vector::asPackedBitVector); + assertEquals("Expected vector data type PACKED_BIT, but found FLOAT32", exception.getMessage()); + } +} diff --git a/bson/src/test/unit/org/bson/BitsTest.java b/bson/src/test/unit/org/bson/BitsTest.java new file mode 100644 index 00000000000..28d35ffeaa0 --- /dev/null +++ b/bson/src/test/unit/org/bson/BitsTest.java @@ -0,0 +1,107 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package org.bson; + + + +import org.junit.jupiter.api.Test; + +import java.io.ByteArrayInputStream; +import java.io.IOException; +import java.util.Arrays; + +import static org.junit.jupiter.api.Assertions.assertArrayEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; + + +public class BitsTest { + + private static final byte[] BYTES = {41, 0, 0, 0, 16, 105, 49, 0, -12, + -1, -1, -1, 16, 105, 50, 0, 0, 0, + 0, -128, 18, 105, 51, 0, -1, -1, -1, + -1, -1, -1, -1, 127, 16, 105, 52, 0, + 0, 0, 0, 0, 0}; + + @Test + public void testReadFullyWithBufferLargerThanExpected() throws IOException { + byte[] buffer = new byte[8192]; + Bits.readFully(new ByteArrayInputStream(BYTES), buffer, 0, BYTES.length); + assertArrayEquals(BYTES, Arrays.copyOfRange(buffer, 0, BYTES.length)); + } + + @Test + public void testReadFullyWithOffset() throws IOException { + int offset = 10; + byte[] buffer = new byte[8192]; + Bits.readFully(new ByteArrayInputStream(BYTES), buffer, offset, BYTES.length); + assertArrayEquals(BYTES, Arrays.copyOfRange(buffer, offset, BYTES.length + offset)); + } + + @Test + public void testReadFullyWithBufferEqualsToExpected() throws IOException { + int offset = 10; + byte[] buffer = new byte[offset + BYTES.length]; + Bits.readFully(new ByteArrayInputStream(BYTES), buffer, offset, BYTES.length); + assertArrayEquals(BYTES, Arrays.copyOfRange(buffer, offset, BYTES.length + offset)); + } + + @Test + public void testReadFullyUsingNotEnoughBigBuffer() throws IOException { + assertThrows(IllegalArgumentException.class, () -> + Bits.readFully(new ByteArrayInputStream(BYTES), new byte[2], 0, BYTES.length) + ); + } + + @Test + public void testReadFullyUsingNotEnoughBigBufferWithOffset() throws IOException { + assertThrows(IllegalArgumentException.class, () -> { + int offset = 10; + byte[] buffer = new byte[BYTES.length]; + Bits.readFully(new ByteArrayInputStream(BYTES), buffer, offset, BYTES.length); + }); + } + + @Test + public void testReadInt() { + assertEquals(41, Bits.readInt(BYTES)); + } + + @Test + public void testReadIntFromInputStream() throws IOException { + assertEquals(41, Bits.readInt(new ByteArrayInputStream(BYTES), new byte[4])); + } + + @Test + public void testReadIntWithOffset() { + assertEquals(-12, Bits.readInt(BYTES, 8)); + } + + @Test + public void testReadLong() { + assertEquals(Long.MAX_VALUE, Bits.readLong(BYTES, 24)); + } + + @Test + public void testReadLongWithNotEnoughData() { + assertThrows(ArrayIndexOutOfBoundsException.class, () -> + Bits.readLong(Arrays.copyOfRange(BYTES, 24, 30), 0) + ); + } + +} diff --git a/bson/src/test/unit/org/bson/BsonArraySpecification.groovy b/bson/src/test/unit/org/bson/BsonArraySpecification.groovy new file mode 100644 index 00000000000..a4d4e11c41a --- /dev/null +++ b/bson/src/test/unit/org/bson/BsonArraySpecification.groovy @@ -0,0 +1,61 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson + +import spock.lang.Specification + +class BsonArraySpecification extends Specification { + + def 'should be array type'() { + expect: + new BsonArray().getBsonType() == BsonType.ARRAY + } + + def 'should construct empty array'() { + when: + def array = new BsonArray() + + then: + array.isEmpty() + array.size() == 0 + array.getValues().isEmpty() + } + + def 'should construct from a list'() { + given: + def list = [BsonBoolean.TRUE, BsonBoolean.FALSE] + + when: + def array = new BsonArray(list) + + then: + !array.isEmpty() + array.size() == 2 + array.getValues() == list + + when: + list.remove(BsonBoolean.TRUE) + + then: + array.getValues() != list + } + + def 'should parse json'() { + expect: + BsonArray.parse('[1, true]') == new BsonArray([new BsonInt32(1), BsonBoolean.TRUE]) + } +} diff --git a/bson/src/test/unit/org/bson/BsonBinaryReaderSpecification.groovy b/bson/src/test/unit/org/bson/BsonBinaryReaderSpecification.groovy new file mode 100644 index 00000000000..d2f689ea398 --- /dev/null +++ b/bson/src/test/unit/org/bson/BsonBinaryReaderSpecification.groovy @@ -0,0 +1,52 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson + +import spock.lang.Specification +import spock.lang.Unroll + +import static org.bson.AbstractBsonReader.State.DONE +import static org.bson.AbstractBsonReader.State.TYPE +import static org.bson.BsonHelper.toBson + +class BsonBinaryReaderSpecification extends Specification { + + @Unroll + def 'should skip value #value'() { + given: + def document = new BsonDocument('name', value) + def reader = new BsonBinaryReader(toBson(document)) + reader.readStartDocument() + reader.readBsonType() + + when: + reader.skipName() + reader.skipValue() + + then: + reader.getState() == TYPE + + when: + reader.readEndDocument() + + then: + reader.getState() == DONE + + where: + value << BsonHelper.valuesOfEveryType() + } +} diff --git a/bson/src/test/unit/org/bson/BsonBinaryReaderTest.java b/bson/src/test/unit/org/bson/BsonBinaryReaderTest.java new file mode 100644 index 00000000000..bffda74ecaa --- /dev/null +++ b/bson/src/test/unit/org/bson/BsonBinaryReaderTest.java @@ -0,0 +1,76 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson; + +import org.bson.io.ByteBufferBsonInput; +import org.bson.types.ObjectId; +import org.junit.jupiter.api.Test; + +import java.nio.ByteBuffer; + +import static org.hamcrest.CoreMatchers.is; +import static org.hamcrest.MatcherAssert.assertThat; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.fail; + +public class BsonBinaryReaderTest { + + @Test + public void testReadDBPointer() { + BsonBinaryReader reader = createReaderForBytes(new byte[]{26, 0, 0, 0, 12, 97, 0, 2, 0, 0, 0, 98, 0, 82, 9, 41, 108, + -42, -60, -29, -116, -7, 111, -1, -36, 0}); + + reader.readStartDocument(); + assertThat(reader.readBsonType(), is(BsonType.DB_POINTER)); + BsonDbPointer dbPointer = reader.readDBPointer(); + assertThat(dbPointer.getNamespace(), is("b")); + assertThat(dbPointer.getId(), is(new ObjectId("5209296cd6c4e38cf96fffdc"))); + reader.readEndDocument(); + reader.close(); + } + + @Test + public void testInvalidBsonType() { + BsonBinaryReader reader = createReaderForBytes(new byte[]{26, 0, 0, 0, 22, 97, 0, 2, 0, 0, 0, 98, 0, 82, 9, 41, 108, + -42, -60, -29, -116, -7, 111, -1, -36, 0}); + + reader.readStartDocument(); + try { + reader.readBsonType(); + fail("Should have thrown BsonSerializationException"); + } catch (BsonSerializationException e) { + assertEquals("Detected unknown BSON type \"\\x16\" for fieldname \"a\". Are you using the latest driver version?", e.getMessage()); + } + } + + @Test + public void testInvalidBsonTypeFollowedByInvalidCString() { + BsonBinaryReader reader = createReaderForBytes(new byte[]{26, 0, 0, 0, 22, 97, 98}); + + reader.readStartDocument(); + try { + reader.readBsonType(); + fail("Should have thrown BsonSerializationException"); + } catch (BsonSerializationException e) { + assertEquals("Found a BSON string that is not null-terminated", e.getMessage()); + } + } + + private BsonBinaryReader createReaderForBytes(final byte[] bytes) { + return new BsonBinaryReader(new ByteBufferBsonInput(new ByteBufNIO(ByteBuffer.wrap(bytes)))); + } +} diff --git a/bson/src/test/unit/org/bson/BsonBinarySpecification.groovy b/bson/src/test/unit/org/bson/BsonBinarySpecification.groovy new file mode 100644 index 00000000000..503440daa04 --- /dev/null +++ b/bson/src/test/unit/org/bson/BsonBinarySpecification.groovy @@ -0,0 +1,88 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson + +import spock.lang.Specification +import spock.lang.Unroll + +class BsonBinarySpecification extends Specification { + + @Unroll + def 'should initialize with data'() { + given: + def bsonBinary = new BsonBinary((byte) 80, data as byte[]) + + expect: + data == bsonBinary.getData() + + where: + data << [ + [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16], + [2, 5, 4, 67, 3, 4, 5, 2, 4, 2, 5, 6, 7, 4, 5, 12], + [34, 24, 56, 76, 3, 4, 1, 12, 1, 9, 8, 7, 56, 46, 3, 9] + ] + } + + @Unroll + def 'should initialize with data and BsonBinarySubType'() { + given: + byte[] data = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16] + def bsonBinary = new BsonBinary(subType, data) + + expect: + subType.getValue() == bsonBinary.getType() + data == bsonBinary.getData() + + where: + subType << [BsonBinarySubType.BINARY, + BsonBinarySubType.FUNCTION, + BsonBinarySubType.MD5, + BsonBinarySubType.OLD_BINARY, + BsonBinarySubType.USER_DEFINED, + BsonBinarySubType.UUID_LEGACY, + BsonBinarySubType.UUID_STANDARD, + BsonBinarySubType.VECTOR] + } + + @Unroll + def 'should initialize with UUID'() { + given: + def bsonBinary = new BsonBinary(uuid) + + expect: + uuid == bsonBinary.asUuid() + + where: + uuid << [UUID.fromString('ffadee18-b533-11e8-96f8-529269fb1459'), + UUID.fromString('a5dc280e-b534-11e8-96f8-529269fb1459'), + UUID.fromString('4ef2a357-cb16-45a6-a6f6-a11ae1972917')] + } + + @Unroll + def 'should initialize with UUID and UUID representation'() { + given: + def uuid = UUID.fromString('ffadee18-b533-11e8-96f8-529269fb1459') + def bsonBinary = new BsonBinary(uuid, uuidRepresentation) + + expect: + uuid == bsonBinary.asUuid(uuidRepresentation) + + where: + uuidRepresentation << [UuidRepresentation.STANDARD, UuidRepresentation.C_SHARP_LEGACY, + UuidRepresentation.JAVA_LEGACY, UuidRepresentation.PYTHON_LEGACY] + } +} diff --git a/bson/src/test/unit/org/bson/BsonBinarySubTypeSpecification.groovy b/bson/src/test/unit/org/bson/BsonBinarySubTypeSpecification.groovy new file mode 100644 index 00000000000..448d63f23fd --- /dev/null +++ b/bson/src/test/unit/org/bson/BsonBinarySubTypeSpecification.groovy @@ -0,0 +1,39 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson + +import spock.lang.Specification + +class BsonBinarySubTypeSpecification extends Specification { + + def 'should be uuid only for legacy and uuid types'() { + expect: + BsonBinarySubType.isUuid(value as byte) == isUuid + + where: + value | isUuid + 1 | false + 2 | false + 3 | true + 4 | true + 5 | false + 6 | false + 7 | false + 8 | false + 9 | false + } +} diff --git a/bson/src/test/unit/org/bson/BsonBinaryTest.java b/bson/src/test/unit/org/bson/BsonBinaryTest.java new file mode 100644 index 00000000000..b47bcbf8a79 --- /dev/null +++ b/bson/src/test/unit/org/bson/BsonBinaryTest.java @@ -0,0 +1,266 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson; + +import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.Arguments; +import org.junit.jupiter.params.provider.EnumSource; +import org.junit.jupiter.params.provider.MethodSource; +import org.junit.jupiter.params.provider.ValueSource; + +import java.util.stream.Stream; + +import static org.junit.jupiter.api.Assertions.assertArrayEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.params.provider.Arguments.arguments; + +class BsonBinaryTest { + + private static final byte FLOAT32_DTYPE = BinaryVector.DataType.FLOAT32.getValue(); + private static final byte INT8_DTYPE = BinaryVector.DataType.INT8.getValue(); + private static final byte PACKED_BIT_DTYPE = BinaryVector.DataType.PACKED_BIT.getValue(); + public static final int ZERO_PADDING = 0; + + @Test + void shouldThrowExceptionWhenCreatingBsonBinaryWithNullVector() { + // given + BinaryVector vector = null; + + // when & then + IllegalArgumentException exception = assertThrows(IllegalArgumentException.class, () -> new BsonBinary(vector)); + assertEquals("Vector must not be null", exception.getMessage()); + } + + @ParameterizedTest + @EnumSource(value = BsonBinarySubType.class, mode = EnumSource.Mode.EXCLUDE, names = {"VECTOR"}) + void shouldThrowExceptionWhenBsonBinarySubTypeIsNotVector(final BsonBinarySubType bsonBinarySubType) { + // given + byte[] data = new byte[]{1, 2, 3, 4}; + BsonBinary bsonBinary = new BsonBinary(bsonBinarySubType.getValue(), data); + + // when & then + BsonInvalidOperationException exception = assertThrows(BsonInvalidOperationException.class, bsonBinary::asVector); + assertEquals("type must be a Vector subtype.", exception.getMessage()); + } + + @ParameterizedTest(name = "{index}: {0}") + @MethodSource("provideFloatVectors") + void shouldEncodeFloatVector(final BinaryVector actualFloat32Vector, final byte[] expectedBsonEncodedVector) { + // when + BsonBinary actualBsonBinary = new BsonBinary(actualFloat32Vector); + byte[] actualBsonEncodedVector = actualBsonBinary.getData(); + + // then + assertEquals(BsonBinarySubType.VECTOR.getValue(), actualBsonBinary.getType(), "The subtype must be VECTOR"); + assertArrayEquals(expectedBsonEncodedVector, actualBsonEncodedVector); + } + + @ParameterizedTest(name = "{index}: {0}") + @MethodSource("provideFloatVectors") + void shouldDecodeFloatVector(final Float32BinaryVector expectedFloatVector, final byte[] bsonEncodedVector) { + // when + Float32BinaryVector decodedVector = (Float32BinaryVector) new BsonBinary(BsonBinarySubType.VECTOR, bsonEncodedVector).asVector(); + + // then + assertEquals(expectedFloatVector, decodedVector); + } + + private static Stream provideFloatVectors() { + return Stream.of( + arguments( + BinaryVector.floatVector(new float[]{1.1f, 2.2f, 3.3f, -1.0f, Float.MAX_VALUE, Float.MIN_VALUE, Float.POSITIVE_INFINITY, + Float.NEGATIVE_INFINITY}), + new byte[]{FLOAT32_DTYPE, ZERO_PADDING, + (byte) 205, (byte) 204, (byte) 140, (byte) 63, // 1.1f in little-endian + (byte) 205, (byte) 204, (byte) 12, (byte) 64, // 2.2f in little-endian + (byte) 51, (byte) 51, (byte) 83, (byte) 64, // 3.3f in little-endian + (byte) 0, (byte) 0, (byte) 128, (byte) 191, // -1.0f in little-endian + (byte) 255, (byte) 255, (byte) 127, (byte) 127, // Float.MAX_VALUE in little-endian + (byte) 1, (byte) 0, (byte) 0, (byte) 0, // Float.MIN_VALUE in little-endian + (byte) 0, (byte) 0, (byte) 128, (byte) 127, // Float.POSITIVE_INFINITY in little-endian + (byte) 0, (byte) 0, (byte) 128, (byte) 255 // Float.NEGATIVE_INFINITY in little-endian + } + ), + arguments( + BinaryVector.floatVector(new float[]{0.0f}), + new byte[]{FLOAT32_DTYPE, ZERO_PADDING, + (byte) 0, (byte) 0, (byte) 0, (byte) 0 // 0.0f in little-endian + } + ), + arguments( + BinaryVector.floatVector(new float[]{}), + new byte[]{FLOAT32_DTYPE, ZERO_PADDING} + ) + ); + } + + @ParameterizedTest(name = "{index}: {0}") + @MethodSource("provideInt8Vectors") + void shouldEncodeInt8Vector(final BinaryVector actualInt8Vector, final byte[] expectedBsonEncodedVector) { + // when + BsonBinary actualBsonBinary = new BsonBinary(actualInt8Vector); + byte[] actualBsonEncodedVector = actualBsonBinary.getData(); + + // then + assertEquals(BsonBinarySubType.VECTOR.getValue(), actualBsonBinary.getType(), "The subtype must be VECTOR"); + assertArrayEquals(expectedBsonEncodedVector, actualBsonEncodedVector); + } + + @ParameterizedTest(name = "{index}: {0}") + @MethodSource("provideInt8Vectors") + void shouldDecodeInt8Vector(final Int8BinaryVector expectedInt8Vector, final byte[] bsonEncodedVector) { + // when + Int8BinaryVector decodedVector = (Int8BinaryVector) new BsonBinary(BsonBinarySubType.VECTOR, bsonEncodedVector).asVector(); + + // then + assertEquals(expectedInt8Vector, decodedVector); + } + + private static Stream provideInt8Vectors() { + return Stream.of( + arguments( + BinaryVector.int8Vector(new byte[]{Byte.MAX_VALUE, 1, 2, 3, 4, Byte.MIN_VALUE}), + new byte[]{INT8_DTYPE, ZERO_PADDING, Byte.MAX_VALUE, 1, 2, 3, 4, Byte.MIN_VALUE + }), + arguments(BinaryVector.int8Vector(new byte[]{}), + new byte[]{INT8_DTYPE, ZERO_PADDING} + ) + ); + } + + @ParameterizedTest + @MethodSource("providePackedBitVectors") + void shouldEncodePackedBitVector(final BinaryVector actualPackedBitVector, final byte[] expectedBsonEncodedVector) { + // when + BsonBinary actualBsonBinary = new BsonBinary(actualPackedBitVector); + byte[] actualBsonEncodedVector = actualBsonBinary.getData(); + + // then + assertEquals(BsonBinarySubType.VECTOR.getValue(), actualBsonBinary.getType(), "The subtype must be VECTOR"); + assertArrayEquals(expectedBsonEncodedVector, actualBsonEncodedVector); + } + + @ParameterizedTest + @MethodSource("providePackedBitVectors") + void shouldDecodePackedBitVector(final PackedBitBinaryVector expectedPackedBitVector, final byte[] bsonEncodedVector) { + // when + PackedBitBinaryVector decodedVector = (PackedBitBinaryVector) new BsonBinary(BsonBinarySubType.VECTOR, bsonEncodedVector).asVector(); + + // then + assertEquals(expectedPackedBitVector, decodedVector); + } + + private static Stream providePackedBitVectors() { + return Stream.of( + arguments( + BinaryVector.packedBitVector(new byte[]{(byte) 0, (byte) 255, (byte) 10}, (byte) 2), + new byte[]{PACKED_BIT_DTYPE, 2, (byte) 0, (byte) 255, (byte) 10} + ), + arguments( + BinaryVector.packedBitVector(new byte[0], (byte) 0), + new byte[]{PACKED_BIT_DTYPE, 0} + )); + } + + @Test + void shouldThrowExceptionForInvalidFloatArrayLengthWhenDecode() { + // given + byte[] invalidData = {FLOAT32_DTYPE, 0, 10, 20, 30}; + + // when & Then + BsonInvalidOperationException thrown = assertThrows(BsonInvalidOperationException.class, () -> { + new BsonBinary(BsonBinarySubType.VECTOR, invalidData).asVector(); + }); + assertEquals("Byte array length must be a multiple of 4 for FLOAT32 data type, but found: " + invalidData.length, + thrown.getMessage()); + } + + @ParameterizedTest + @ValueSource(ints = {0, 1}) + void shouldThrowExceptionWhenEncodedVectorLengthIsLessThenMetadataLength(final int encodedVectorLength) { + // given + byte[] invalidData = new byte[encodedVectorLength]; + + // when & Then + BsonInvalidOperationException thrown = assertThrows(BsonInvalidOperationException.class, () -> { + new BsonBinary(BsonBinarySubType.VECTOR, invalidData).asVector(); + }); + assertEquals("Vector encoded array length must be at least 2, but found: " + encodedVectorLength, + thrown.getMessage()); + } + + @ParameterizedTest + @ValueSource(bytes = {-1, 1}) + void shouldThrowExceptionForInvalidFloatArrayPaddingWhenDecode(final byte invalidPadding) { + // given + byte[] invalidData = {FLOAT32_DTYPE, invalidPadding, 10, 20, 30, 20}; + + // when & Then + BsonInvalidOperationException thrown = assertThrows(BsonInvalidOperationException.class, () -> { + new BsonBinary(BsonBinarySubType.VECTOR, invalidData).asVector(); + }); + assertEquals("Padding must be 0 for FLOAT32 data type, but found: " + invalidPadding, thrown.getMessage()); + } + + @ParameterizedTest + @ValueSource(bytes = {-1, 1}) + void shouldThrowExceptionForInvalidInt8ArrayPaddingWhenDecode(final byte invalidPadding) { + // given + byte[] invalidData = {INT8_DTYPE, invalidPadding, 10, 20, 30, 20}; + + // when & Then + BsonInvalidOperationException thrown = assertThrows(BsonInvalidOperationException.class, () -> { + new BsonBinary(BsonBinarySubType.VECTOR, invalidData).asVector(); + }); + assertEquals("Padding must be 0 for INT8 data type, but found: " + invalidPadding, thrown.getMessage()); + } + + @ParameterizedTest + @ValueSource(bytes = {-1, 8}) + void shouldThrowExceptionForInvalidPackedBitArrayPaddingWhenDecode(final byte invalidPadding) { + // given + byte[] invalidData = {PACKED_BIT_DTYPE, invalidPadding, 10, 20, 30, 20}; + + // when & then + BsonInvalidOperationException thrown = assertThrows(BsonInvalidOperationException.class, () -> { + new BsonBinary(BsonBinarySubType.VECTOR, invalidData).asVector(); + }); + assertEquals("Padding must be between 0 and 7 bits, but found: " + invalidPadding, thrown.getMessage()); + } + + @ParameterizedTest + @ValueSource(bytes = {-1, 1, 2, 3, 4, 5, 6, 7, 8}) + void shouldThrowExceptionForInvalidPackedBitArrayPaddingWhenDecodeEmptyVector(final byte invalidPadding) { + // given + byte[] invalidData = {PACKED_BIT_DTYPE, invalidPadding}; + + // when & Then + BsonInvalidOperationException thrown = assertThrows(BsonInvalidOperationException.class, () -> { + new BsonBinary(BsonBinarySubType.VECTOR, invalidData).asVector(); + }); + assertEquals("Padding must be 0 if vector is empty, but found: " + invalidPadding, thrown.getMessage()); + } + + @Test + void shouldThrowWhenUnknownVectorDType() { + // when + BsonBinary bsonBinary = new BsonBinary(BsonBinarySubType.VECTOR, new byte[]{(byte) 0}); + assertThrows(BsonInvalidOperationException.class, bsonBinary::asVector); + } +} diff --git a/bson/src/test/unit/org/bson/BsonBinaryWriterTest.java b/bson/src/test/unit/org/bson/BsonBinaryWriterTest.java new file mode 100644 index 00000000000..0b067fc816f --- /dev/null +++ b/bson/src/test/unit/org/bson/BsonBinaryWriterTest.java @@ -0,0 +1,861 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson; + +import org.bson.io.BasicOutputBuffer; +import org.bson.io.ByteBufferBsonInput; +import org.bson.types.ObjectId; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; + +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.charset.StandardCharsets; +import java.util.List; + +import static java.util.Arrays.asList; +import static org.hamcrest.CoreMatchers.is; +import static org.hamcrest.MatcherAssert.assertThat; +import static org.junit.jupiter.api.Assertions.assertArrayEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; + +public class BsonBinaryWriterTest { + + private static final byte FLOAT32_DTYPE = BinaryVector.DataType.FLOAT32.getValue(); + private static final int ZERO_PADDING = 0; + + private BsonBinaryWriter writer; + private BasicOutputBuffer buffer; + + @BeforeEach + public void setup() { + buffer = new BasicOutputBuffer(); + writer = new BsonBinaryWriter(new BsonWriterSettings(100), new BsonBinaryWriterSettings(12904), buffer); + } + + @AfterEach + public void tearDown() { + writer.close(); + } + + @Test + public void shouldThrowWhenMaxDocumentSizeIsExceeded() { + try { + writer.writeStartDocument(); + writer.writeBinaryData("b", new BsonBinary(new byte[12904])); + writer.writeEndDocument(); + fail(); + } catch (BsonMaximumSizeExceededException e) { + assertEquals("Document size of 12917 is larger than maximum of 12904.", e.getMessage()); + } + } + + @Test + public void shouldThrowIfAPushedMaxDocumentSizeIsExceeded() { + try { + writer.writeStartDocument(); + writer.pushMaxDocumentSize(10); + writer.writeStartDocument("doc"); + writer.writeString("s", "123456789"); + writer.writeEndDocument(); + } catch (BsonMaximumSizeExceededException e) { + assertEquals("Document size of 22 is larger than maximum of 10.", e.getMessage()); + } + } + + @Test + public void shouldNotThrowIfAPoppedMaxDocumentSizeIsExceeded() { + writer.writeStartDocument(); + writer.pushMaxDocumentSize(10); + writer.writeStartDocument("doc"); + writer.writeEndDocument(); + writer.popMaxDocumentSize(); + writer.writeBinaryData("bin", new BsonBinary(new byte[256])); + writer.writeEndDocument(); + } + + @Test + public void testWriteAndReadBoolean() { + writer.writeStartDocument(); + writer.writeBoolean("b1", true); + writer.writeBoolean("b2", false); + writer.writeEndDocument(); + + byte[] expectedValues = {15, 0, 0, 0, 8, 98, 49, 0, 1, 8, 98, 50, 0, 0, 0}; + assertArrayEquals(expectedValues, buffer.toByteArray()); + + BsonReader reader = createReaderForBytes(expectedValues); + reader.readStartDocument(); + assertThat(reader.readBsonType(), is(BsonType.BOOLEAN)); + assertEquals("b1", reader.readName()); + assertTrue(reader.readBoolean()); + assertThat(reader.readBsonType(), is(BsonType.BOOLEAN)); + assertEquals("b2", reader.readName()); + assertFalse(reader.readBoolean()); + reader.readEndDocument(); + } + + @Test + public void testWriteAndReadString() { + writer.writeStartDocument(); + + writer.writeString("s1", ""); + writer.writeString("s2", "danke"); + writer.writeString("s3", ",+\\\"<>;[]{}@#$%^&*()+_"); + writer.writeString("s4", "a\u00e9\u3042\u0430\u0432\u0431\u0434"); + + writer.writeEndDocument(); + + byte[] expectedValues = {82, 0, 0, 0, 2, 115, 49, 0, 1, 0, 0, 0, 0, 2, 115, 50, + 0, 6, 0, 0, 0, 100, 97, 110, 107, 101, 0, 2, 115, 51, 0, 23, + 0, 0, 0, 44, 43, 92, 34, 60, 62, 59, 91, 93, 123, 125, 64, 35, + 36, 37, 94, 38, 42, 40, 41, 43, 95, 0, 2, 115, 52, 0, 15, 0, + 0, 0, 97, -61, -87, -29, -127, -126, -48, -80, -48, -78, -48, -79, -48, -76, 0, + 0}; + assertArrayEquals(expectedValues, buffer.toByteArray()); + + BsonReader reader = createReaderForBytes(expectedValues); + reader.readStartDocument(); + + assertThat(reader.readBsonType(), is(BsonType.STRING)); + assertEquals("s1", reader.readName()); + assertEquals("", reader.readString()); + + assertThat(reader.readBsonType(), is(BsonType.STRING)); + assertEquals("s2", reader.readName()); + assertEquals("danke", reader.readString()); + + assertThat(reader.readBsonType(), is(BsonType.STRING)); + assertEquals("s3", reader.readName()); + assertEquals(",+\\\"<>;[]{}@#$%^&*()+_", reader.readString()); + + assertThat(reader.readBsonType(), is(BsonType.STRING)); + assertEquals("s4", reader.readName()); + assertEquals("a\u00e9\u3042\u0430\u0432\u0431\u0434", reader.readString()); + + reader.readEndDocument(); + } + + @Test + public void testWriteNumbers() { + + writer.writeStartDocument(); + + writer.writeInt32("i1", -12); + writer.writeInt32("i2", Integer.MIN_VALUE); + writer.writeInt64("i3", Long.MAX_VALUE); + writer.writeInt64("i4", 0); + + writer.writeEndDocument(); + + byte[] expectedValues = {45, 0, 0, 0, 16, 105, 49, 0, -12, -1, -1, -1, 16, 105, 50, 0, 0, 0, 0, -128, 18, + 105, + 51, 0, -1, -1, -1, -1, -1, -1, -1, 127, 18, 105, 52, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0}; + assertArrayEquals(expectedValues, buffer.toByteArray()); + } + + @Test + public void testWriteArray() { + + writer.writeStartDocument(); + + writer.writeStartArray("a1"); + writer.writeEndArray(); + writer.writeStartArray("a2"); + + writer.writeStartArray(); + writer.writeEndArray(); + + writer.writeEndArray(); + + writer.writeEndDocument(); + + byte[] expectedValues = {31, 0, 0, 0, 4, 97, 49, 0, 5, 0, 0, 0, 0, 4, 97, 50, 0, 13, 0, 0, 0, 4, 48, 0, 5, + 0, + 0, 0, 0, 0, 0}; + assertArrayEquals(expectedValues, buffer.toByteArray()); + } + + @Test + public void testWriteArrayElements() throws IOException { + ByteArrayOutputStream expectedOutput = new ByteArrayOutputStream(); + expectedOutput.write(new byte[]{ + -52, 25, 0, 0, //document length + 4, // array type + 97, 49, 0, // "a1" name + null terminator + -61, 25, 0, 0}); // array length + + + writer.writeStartDocument(); + writer.writeStartArray("a1"); + int arrayIndex = 0; + while (arrayIndex < 1100) { + writer.writeBoolean(true); + + expectedOutput.write(BsonType.BOOLEAN.getValue()); + expectedOutput.write(Integer.toString(arrayIndex++).getBytes(StandardCharsets.UTF_8)); + expectedOutput.write(0); // null terminator + expectedOutput.write(1); // boolean value + + writer.writeBoolean(false); + + expectedOutput.write(BsonType.BOOLEAN.getValue()); + expectedOutput.write(Integer.toString(arrayIndex++).getBytes(StandardCharsets.UTF_8)); + expectedOutput.write(0); // null terminator + expectedOutput.write(0); // boolean value + } + writer.writeEndArray(); + expectedOutput.write(0); // end of array + writer.writeEndDocument(); + expectedOutput.write(0); // end of a document + + assertArrayEquals(expectedOutput.toByteArray(), buffer.toByteArray()); + } + + @Test + public void testWriteNull() { + + writer.writeStartDocument(); + + writer.writeNull("n1"); + writer.writeName("n2"); + writer.writeNull(); + + writer.writeEndDocument(); + + byte[] expectedValues = {13, 0, 0, 0, 10, 110, 49, 0, 10, 110, 50, 0, 0}; + assertArrayEquals(expectedValues, buffer.toByteArray()); + } + + @Test + public void testWriteUndefined() { + + writer.writeStartDocument(); + + writer.writeName("u1"); + writer.writeUndefined(); + writer.writeUndefined("u2"); + + writer.writeEndDocument(); + + byte[] expectedValues = {13, 0, 0, 0, 6, 117, 49, 0, 6, 117, 50, 0, 0}; + assertArrayEquals(expectedValues, buffer.toByteArray()); + } + + @Test + public void testWriteObjectId() { + + ObjectId id = new ObjectId("50d3332018c6a1d8d1662b61"); + + writer.writeStartDocument(); + + writer.writeObjectId("_id", id); + + writer.writeEndDocument(); + + byte[] expectedValues = {22, 0, 0, 0, 7, 95, 105, 100, 0, 80, -45, 51, 32, 24, -58, -95, -40, -47, 102, + 43, + 97, 0}; + assertArrayEquals(expectedValues, buffer.toByteArray()); + } + + @Test + public void testWriteJavaScript() { + writer.writeStartDocument(); + + writer.writeJavaScript("js1", "var i = 0"); + writer.writeJavaScriptWithScope("js2", "i++"); + writer.writeStartDocument(); + + writer.writeInt32("x", 1); + + writer.writeEndDocument(); + + writer.writeEndDocument(); + + byte[] expectedValues = {53, 0, 0, 0, 13, 106, 115, 49, 0, 10, 0, 0, 0, 118, 97, 114, 32, 105, 32, 61, 32, + 48, + 0, 15, 106, 115, 50, 0, 24, 0, 0, 0, 4, 0, 0, 0, 105, 43, 43, 0, 12, 0, 0, 0, 16, + 120, 0, 1, 0, 0, 0, + 0, 0}; + assertArrayEquals(expectedValues, buffer.toByteArray()); + } + + @Test + public void testWriteMinMaxKeys() { + + writer.writeStartDocument(); + + writer.writeMaxKey("k1"); + writer.writeMinKey("k2"); + writer.writeName("k3"); + writer.writeMaxKey(); + + writer.writeEndDocument(); + + byte[] expectedValues = {17, 0, 0, 0, 127, 107, 49, 0, -1, 107, 50, 0, 127, 107, 51, 0, 0}; + assertArrayEquals(expectedValues, buffer.toByteArray()); + } + + @Test + public void testWriteBinary() { + + writer.writeStartDocument(); + + writer.writeBinaryData("b1", new BsonBinary(new byte[]{0, 0, 0, 0, 0, 0, 0, 0})); + writer.writeBinaryData("b2", new BsonBinary(BsonBinarySubType.OLD_BINARY, new byte[]{1, 1, 1, 1, 1})); + writer.writeBinaryData("b3", new BsonBinary(BsonBinarySubType.FUNCTION, new byte[]{})); + writer.writeBinaryData("b4", new BsonBinary(BsonBinarySubType.VECTOR, new byte[]{FLOAT32_DTYPE, ZERO_PADDING, + (byte) 205, (byte) 204, (byte) 140, (byte) 63})); + + writer.writeEndDocument(); + byte[] expectedValues = new byte[]{ + 64, // total document length + 0, 0, 0, + + //Binary + (byte) BsonType.BINARY.getValue(), + 98, 49, 0, // name "b1" + 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + + // Old binary + (byte) BsonType.BINARY.getValue(), + 98, 50, 0, // name "b2" + 9, 0, 0, 0, 2, 5, 0, 0, 0, 1, 1, 1, 1, 1, + + // Function binary + (byte) BsonType.BINARY.getValue(), + 98, 51, 0, // name "b3" + 0, 0, 0, 0, 1, + + //Vector binary + (byte) BsonType.BINARY.getValue(), + 98, 52, 0, // name "b4" + 6, 0, 0, 0, // total length, int32 (little endian) + BsonBinarySubType.VECTOR.getValue(), FLOAT32_DTYPE, ZERO_PADDING, (byte) 205, (byte) 204, (byte) 140, 63, + + 0 //end of document + }; + + assertArrayEquals(expectedValues, buffer.toByteArray()); + } + + @Test + public void testWriteRegularExpression() { + + writer.writeStartDocument(); + + writer.writeRegularExpression("r1", new BsonRegularExpression("([01]?[0-9][0-9]?)")); + writer.writeRegularExpression("r2", new BsonRegularExpression("[ \\t]+$", "i")); + + writer.writeEndDocument(); + + byte[] expectedValues = {43, 0, 0, 0, 11, 114, 49, 0, 40, 91, 48, 49, 93, 63, 91, 48, 45, 57, 93, 91, 48, + 45, + 57, 93, 63, 41, 0, 0, 11, 114, 50, 0, 91, 32, 92, 116, 93, 43, 36, 0, 105, 0, 0}; + assertArrayEquals(expectedValues, buffer.toByteArray()); + } + + @Test + public void testWriteTimestamp() { + writer.writeStartDocument(); + + writer.writeTimestamp("t1", new BsonTimestamp(123999401, 44332)); + + writer.writeEndDocument(); + + byte[] expectedValues = {17, 0, 0, 0, 17, 116, 49, 0, 44, -83, 0, 0, -87, 20, 100, 7, 0}; + assertArrayEquals(expectedValues, buffer.toByteArray()); + } + + @Test + public void testWriteDBPointer() { + writer.writeStartDocument(); + + BsonDbPointer dbPointer = new BsonDbPointer("my.test", new ObjectId("50d3332018c6a1d8d1662b61")); + writer.writeDBPointer("pt", dbPointer); + + writer.writeEndDocument(); + + byte[] expectedValues = {33, 0, 0, 0, 12, 112, 116, 0, 8, 0, 0, 0, 109, 121, 46, 116, 101, 115, 116, 0, 80, -45, 51, 32, 24, -58, + -95, -40, -47, 102, 43, 97, 0}; + assertArrayEquals(expectedValues, buffer.toByteArray()); + + BsonReader reader = createReaderForBytes(expectedValues); + reader.readStartDocument(); + assertThat(reader.readBsonType(), is(BsonType.DB_POINTER)); + assertEquals("pt", reader.readName()); + assertEquals(dbPointer, reader.readDBPointer()); + reader.readEndDocument(); + } + + @Test + public void testNullByteInTopLevelName() { + writer.writeStartDocument(); + writer.writeName("a\u0000b"); + assertThrows(BsonSerializationException.class, () -> writer.writeBoolean(true)); + } + + @Test + public void testNullByteInNestedName() { + writer.writeStartDocument(); + writer.writeName("nested"); + writer.writeStartDocument(); + writer.writeName("a\u0000b"); + assertThrows(BsonSerializationException.class, () -> writer.writeBoolean(true)); + } + + @Test + public void testNullByteInRegularExpressionPattern() { + writer.writeStartDocument(); + writer.writeName("regex"); + assertThrows(BsonSerializationException.class, () -> writer.writeRegularExpression(new BsonRegularExpression("a\u0000b"))); + } + + @Test + public void testNullByteInRegularExpressionOptions() { + writer.writeStartDocument(); + writer.writeName("regex"); + assertThrows(BsonSerializationException.class, () -> writer.writeRegularExpression(new BsonRegularExpression("a*", "i\u0000"))); + } + + @Test + //CHECKSTYLE:OFF + public void testWriteRead() throws IOException { + ObjectId oid1 = new ObjectId(); + + writer.writeStartDocument(); + { + writer.writeBoolean("b1", true); + writer.writeBoolean("b2", false); + writer.writeStartArray("a1"); + { + writer.writeString("danke"); + writer.writeString(""); + } + writer.writeEndArray(); + writer.writeStartDocument("d1"); + { + writer.writeDouble("do", 60); + writer.writeInt32("i32", 40); + writer.writeInt64("i64", Long.MAX_VALUE); + } + writer.writeEndDocument(); + writer.writeJavaScriptWithScope("js1", "print x"); + writer.writeStartDocument(); + { + writer.writeInt32("x", 1); + } + writer.writeEndDocument(); + writer.writeObjectId("oid1", oid1); + } + writer.writeEndDocument(); + + assertEquals(139, buffer.getPosition()); + + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + buffer.pipe(baos); + + ByteBufferBsonInput basicInputBuffer = new ByteBufferBsonInput(new ByteBufNIO(ByteBuffer.wrap(baos.toByteArray()))); + + try (BsonBinaryReader reader = new BsonBinaryReader(basicInputBuffer)) { + assertEquals(BsonType.DOCUMENT, reader.readBsonType()); + reader.readStartDocument(); + { + assertEquals(BsonType.BOOLEAN, reader.readBsonType()); + assertEquals("b1", reader.readName()); + assertTrue(reader.readBoolean()); + + assertEquals(BsonType.BOOLEAN, reader.readBsonType()); + assertEquals("b2", reader.readName()); + assertFalse(reader.readBoolean()); + + assertEquals(BsonType.ARRAY, reader.readBsonType()); + assertEquals("a1", reader.readName()); + reader.readStartArray(); + { + assertEquals(BsonType.STRING, reader.readBsonType()); + assertEquals("danke", reader.readString()); + + assertEquals(BsonType.STRING, reader.readBsonType()); + assertEquals("", reader.readString()); + } + assertEquals(BsonType.END_OF_DOCUMENT, reader.readBsonType()); + reader.readEndArray(); + assertEquals("d1", reader.readName()); + reader.readStartDocument(); + { + assertEquals(BsonType.DOUBLE, reader.readBsonType()); + assertEquals("do", reader.readName()); + assertEquals(60, reader.readDouble(), 0); + + assertEquals(BsonType.INT32, reader.readBsonType()); + assertEquals("i32", reader.readName()); + assertEquals(40, reader.readInt32()); + + assertEquals(BsonType.INT64, reader.readBsonType()); + assertEquals("i64", reader.readName()); + assertEquals(Long.MAX_VALUE, reader.readInt64()); + } + assertEquals(BsonType.END_OF_DOCUMENT, reader.readBsonType()); + reader.readEndDocument(); + + assertEquals(BsonType.JAVASCRIPT_WITH_SCOPE, reader.readBsonType()); + assertEquals("js1", reader.readName()); + assertEquals("print x", reader.readJavaScriptWithScope()); + + reader.readStartDocument(); + { + assertEquals(BsonType.INT32, reader.readBsonType()); + assertEquals("x", reader.readName()); + assertEquals(1, reader.readInt32()); + } + assertEquals(BsonType.END_OF_DOCUMENT, reader.readBsonType()); + reader.readEndDocument(); + + assertEquals(BsonType.OBJECT_ID, reader.readBsonType()); + assertEquals("oid1", reader.readName()); + assertEquals(oid1, reader.readObjectId()); + + assertEquals(BsonType.END_OF_DOCUMENT, reader.readBsonType()); + reader.readEndDocument(); + + } + } + } + //CHECKSTYLE:ON + + @Test + public void testPipe() { + writer.writeStartDocument(); + writer.writeBoolean("a", true); + writer.writeEndDocument(); + + byte[] bytes = buffer.toByteArray(); + + BasicOutputBuffer newBuffer = new BasicOutputBuffer(); + try (BsonBinaryWriter newWriter = new BsonBinaryWriter(newBuffer)) { + try (BsonBinaryReader reader = new BsonBinaryReader(new ByteBufferBsonInput(new ByteBufNIO(ByteBuffer.wrap(bytes))))) { + newWriter.pipe(reader); + } + } + assertArrayEquals(bytes, newBuffer.toByteArray()); + } + + @Test + public void testPipeNestedDocument() { + // { + // "value" : { "a" : true}, + // "b" : 2 + // } + writer.writeStartDocument(); + writer.writeStartDocument("value"); + writer.writeBoolean("a", true); + writer.writeEndDocument(); + writer.writeInt32("b", 2); + writer.writeEndDocument(); + + byte[] bytes = buffer.toByteArray(); + + BasicOutputBuffer newBuffer = new BasicOutputBuffer(); + BsonBinaryWriter newWriter = new BsonBinaryWriter(newBuffer); + BsonBinaryReader reader1 = new BsonBinaryReader(new ByteBufferBsonInput(new ByteBufNIO(ByteBuffer.wrap(bytes)))); + reader1.readStartDocument(); + reader1.readName(); + + newWriter.pipe(reader1); //pipe {'a':true} to writer + + assertEquals(BsonType.INT32, reader1.readBsonType()); //continue reading from the same reader + assertEquals("b", reader1.readName()); + assertEquals(2, reader1.readInt32()); + + BsonBinaryReader reader2 = new BsonBinaryReader(new ByteBufferBsonInput(new ByteBufNIO(ByteBuffer.wrap(newBuffer + .toByteArray())))); + + reader2.readStartDocument(); //checking what writer piped + assertEquals(BsonType.BOOLEAN, reader2.readBsonType()); + assertEquals("a", reader2.readName()); + assertTrue(reader2.readBoolean()); + reader2.readEndDocument(); + } + + + @Test + public void testPipeDocumentIntoArray() { + writer.writeStartDocument(); + writer.writeEndDocument(); + + byte[] bytes = buffer.toByteArray(); + + BasicOutputBuffer newBuffer = new BasicOutputBuffer(); + BsonBinaryWriter newWriter = new BsonBinaryWriter(newBuffer); + BsonBinaryReader reader1 = new BsonBinaryReader(new ByteBufferBsonInput(new ByteBufNIO(ByteBuffer.wrap(bytes)))); + + newWriter.writeStartDocument(); + newWriter.writeStartArray("a"); + newWriter.pipe(reader1); + newWriter.writeEndArray(); + newWriter.writeEndDocument(); + + BsonBinaryReader reader2 = new BsonBinaryReader(new ByteBufferBsonInput(new ByteBufNIO(ByteBuffer.wrap(newBuffer + .toByteArray())))); + + //checking what writer piped + reader2.readStartDocument(); + reader2.readStartArray(); + reader2.readStartDocument(); + reader2.readEndDocument(); + reader2.readEndArray(); + reader2.readEndDocument(); + } + + @Test + public void testPipeDocumentIntoDocument() { + writer.writeStartDocument(); + writer.writeString("str", "value"); + writer.writeEndDocument(); + + byte[] bytes = buffer.toByteArray(); + + BasicOutputBuffer newBuffer = new BasicOutputBuffer(); + BsonBinaryWriter newWriter = new BsonBinaryWriter(newBuffer); + BsonBinaryReader reader1 = new BsonBinaryReader(new ByteBufferBsonInput(new ByteBufNIO(ByteBuffer.wrap(bytes)))); + + newWriter.writeStartDocument(); + newWriter.writeName("doc"); + newWriter.pipe(reader1); + newWriter.writeEndDocument(); + + BsonBinaryReader reader2 = new BsonBinaryReader(new ByteBufferBsonInput(new ByteBufNIO(ByteBuffer.wrap(newBuffer + .toByteArray())))); + + //checking what writer piped + reader2.readStartDocument(); + assertEquals("doc", reader2.readName()); + reader2.readStartDocument(); + assertEquals("value", reader2.readString("str")); + reader2.readEndDocument(); + reader2.readEndDocument(); + } + + @Test + public void testPipeDocumentIntoTopLevel() { + writer.writeStartDocument(); + writer.writeString("str", "value"); + writer.writeEndDocument(); + + byte[] bytes = buffer.toByteArray(); + + BasicOutputBuffer newBuffer = new BasicOutputBuffer(); + BsonBinaryWriter newWriter = new BsonBinaryWriter(newBuffer); + BsonBinaryReader reader1 = new BsonBinaryReader(new ByteBufferBsonInput(new ByteBufNIO(ByteBuffer.wrap(bytes)))); + + newWriter.pipe(reader1); + + BsonBinaryReader reader2 = new BsonBinaryReader(new ByteBufferBsonInput(new ByteBufNIO(ByteBuffer.wrap(newBuffer + .toByteArray())))); + + //checking what writer piped + reader2.readStartDocument(); + assertEquals("value", reader2.readString("str")); + reader2.readEndDocument(); + } + + @Test + public void testPipeDocumentIntoScopeDocument() { + writer.writeStartDocument(); + writer.writeInt32("i", 0); + writer.writeEndDocument(); + + byte[] bytes = buffer.toByteArray(); + + BasicOutputBuffer newBuffer = new BasicOutputBuffer(); + BsonBinaryWriter newWriter = new BsonBinaryWriter(newBuffer); + BsonBinaryReader reader1 = new BsonBinaryReader(new ByteBufferBsonInput(new ByteBufNIO(ByteBuffer.wrap(bytes)))); + + newWriter.writeStartDocument(); + newWriter.writeJavaScriptWithScope("js", "i++"); + newWriter.pipe(reader1); + newWriter.writeEndDocument(); + + BsonBinaryReader reader2 = new BsonBinaryReader(new ByteBufferBsonInput(new ByteBufNIO(ByteBuffer.wrap(newBuffer + .toByteArray())))); + + //checking what writer piped + reader2.readStartDocument(); + reader2.readJavaScriptWithScope("js"); + reader2.readStartDocument(); + assertEquals(0, reader2.readInt32("i")); + reader2.readEndDocument(); + reader2.readEndDocument(); + } + + @Test + public void testPipeWithExtraElements() { + writer.writeStartDocument(); + writer.writeBoolean("a", true); + writer.writeString("$db", "test"); + writer.writeStartDocument("$readPreference"); + writer.writeString("mode", "primary"); + writer.writeEndDocument(); + writer.writeEndDocument(); + + byte[] bytes = buffer.toByteArray(); + + BasicOutputBuffer pipedBuffer = new BasicOutputBuffer(); + BsonBinaryWriter pipedWriter = new BsonBinaryWriter(new BsonWriterSettings(100), + new BsonBinaryWriterSettings(1024), pipedBuffer); + + pipedWriter.writeStartDocument(); + pipedWriter.writeBoolean("a", true); + pipedWriter.writeEndDocument(); + + List extraElements = asList( + new BsonElement("$db", new BsonString("test")), + new BsonElement("$readPreference", new BsonDocument("mode", new BsonString("primary"))) + ); + + BasicOutputBuffer newBuffer = new BasicOutputBuffer(); + try (BsonBinaryWriter newWriter = new BsonBinaryWriter(newBuffer)) { + try (BsonBinaryReader reader = new BsonBinaryReader(new ByteBufferBsonInput(new ByteBufNIO(ByteBuffer.wrap(pipedBuffer.toByteArray()))))) { + newWriter.pipe(reader, extraElements); + } + } + assertArrayEquals(bytes, newBuffer.toByteArray()); + } + + @Test + public void testPipeOfNestedDocumentWithExtraElements() { + writer.writeStartDocument(); + writer.writeStartDocument("nested"); + + writer.writeBoolean("a", true); + writer.writeString("$db", "test"); + writer.writeStartDocument("$readPreference"); + writer.writeString("mode", "primary"); + writer.writeEndDocument(); + writer.writeEndDocument(); + + writer.writeBoolean("b", true); + writer.writeEndDocument(); + + byte[] bytes = buffer.toByteArray(); + + BasicOutputBuffer pipedBuffer = new BasicOutputBuffer(); + BsonBinaryWriter pipedWriter = new BsonBinaryWriter(new BsonWriterSettings(100), + new BsonBinaryWriterSettings(1024), pipedBuffer); + + pipedWriter.writeStartDocument(); + pipedWriter.writeBoolean("a", true); + pipedWriter.writeEndDocument(); + + List extraElements = asList( + new BsonElement("$db", new BsonString("test")), + new BsonElement("$readPreference", new BsonDocument("mode", new BsonString("primary"))) + ); + + BasicOutputBuffer newBuffer = new BasicOutputBuffer(); + try (BsonBinaryWriter newWriter = new BsonBinaryWriter(newBuffer)) { + try (BsonBinaryReader reader = new BsonBinaryReader(new ByteBufferBsonInput(new ByteBufNIO(ByteBuffer.wrap(pipedBuffer.toByteArray()))))) { + newWriter.writeStartDocument(); + newWriter.writeName("nested"); + newWriter.pipe(reader, extraElements); + newWriter.writeBoolean("b", true); + newWriter.writeEndDocument(); + } + } + byte[] actualBytes = newBuffer.toByteArray(); + assertArrayEquals(bytes, actualBytes); + } + + @Test + public void testPipeOfDocumentWithInvalidSize() { + byte[] bytes = {4, 0, 0, 0}; // minimum document size is 5; + + BasicOutputBuffer newBuffer = new BasicOutputBuffer(); + try (BsonBinaryWriter newWriter = new BsonBinaryWriter(newBuffer)) { + try (BsonBinaryReader reader = new BsonBinaryReader(new ByteBufferBsonInput(new ByteBufNIO(ByteBuffer.wrap(bytes))))) { + newWriter.pipe(reader); + fail("Pipe is expected to fail with document size is < 5"); + } catch (BsonSerializationException e) { + // expected + } + } + + } + + // CHECKSTYLE:OFF + @Test + public void testMarkAndReset() throws IOException { + writer.writeStartDocument(); + writer.writeStartArray("a"); + { + writer.writeStartDocument(); + writer.writeInt32("i", 1); + writer.writeEndDocument(); + } + writer.mark(); + { + writer.writeStartDocument(); + writer.writeInt32("i", 2); + writer.writeEndDocument(); + } + writer.reset(); + { + writer.writeStartDocument(); + writer.writeInt32("i", 3); + writer.writeEndDocument(); + } + writer.writeEndArray(); + writer.writeEndDocument(); + + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + buffer.pipe(baos); + + ByteBufferBsonInput basicInputBuffer = new ByteBufferBsonInput(new ByteBufNIO(ByteBuffer.wrap(baos.toByteArray()))); + + try (BsonBinaryReader reader = new BsonBinaryReader(basicInputBuffer)) { + reader.readStartDocument(); + reader.readName("a"); + reader.readStartArray(); + { + reader.readStartDocument(); + assertEquals(1, reader.readInt32("i")); + reader.readEndDocument(); + } + { + reader.readStartDocument(); + assertEquals(3, reader.readInt32("i")); + reader.readEndDocument(); + } + reader.readEndArray(); + reader.readEndDocument(); + } + } + // CHECKSTYLE:ON + + private BsonBinaryReader createReaderForBytes(final byte[] bytes) { + return new BsonBinaryReader(new ByteBufferBsonInput(new ByteBufNIO(ByteBuffer.wrap(bytes)))); + } +} diff --git a/bson/src/test/unit/org/bson/BsonDocumentReaderSpecification.groovy b/bson/src/test/unit/org/bson/BsonDocumentReaderSpecification.groovy new file mode 100644 index 00000000000..e79fcc4f1ca --- /dev/null +++ b/bson/src/test/unit/org/bson/BsonDocumentReaderSpecification.groovy @@ -0,0 +1,101 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson + +import org.bson.codecs.BsonDocumentCodec +import org.bson.codecs.DecoderContext +import org.bson.types.Decimal128 +import org.bson.types.ObjectId +import spock.lang.Shared +import spock.lang.Specification + +class BsonDocumentReaderSpecification extends Specification { + + @Shared BsonDocument nullDoc + + def setup() { + nullDoc = new BsonDocument([ + new BsonElement('null', new BsonNull()) + ]) + } + + + def 'should read all types'() { + given: + def doc = new BsonDocument( + [ + new BsonElement('null', new BsonNull()), + new BsonElement('int32', new BsonInt32(42)), + new BsonElement('int64', new BsonInt64(52L)), + new BsonElement('decimal128', new BsonDecimal128(Decimal128.parse('1.0'))), + new BsonElement('boolean', new BsonBoolean(true)), + new BsonElement('date', new BsonDateTime(new Date().getTime())), + new BsonElement('double', new BsonDouble(62.0)), + new BsonElement('string', new BsonString('the fox ...')), + new BsonElement('minKey', new BsonMinKey()), + new BsonElement('maxKey', new BsonMaxKey()), + new BsonElement('dbPointer', new BsonDbPointer('test.test', new ObjectId())), + new BsonElement('code', new BsonJavaScript('int i = 0;')), + new BsonElement('codeWithScope', new BsonJavaScriptWithScope('x', new BsonDocument('x', new BsonInt32(1)))), + new BsonElement('objectId', new BsonObjectId(new ObjectId())), + new BsonElement('regex', new BsonRegularExpression('^test.*regex.*xyz$', 'i')), + new BsonElement('symbol', new BsonSymbol('ruby stuff')), + new BsonElement('timestamp', new BsonTimestamp(0x12345678, 5)), + new BsonElement('undefined', new BsonUndefined()), + new BsonElement('binary', new BsonBinary((byte) 80, [5, 4, 3, 2, 1] as byte[])), + new BsonElement('array', new BsonArray([new BsonInt32(1), new BsonInt64(2L), new BsonBoolean(true), + new BsonArray([new BsonInt32(1), new BsonInt32(2), new BsonInt32(3)]), + new BsonDocument('a', new BsonInt64(2L))])), + new BsonElement('document', new BsonDocument('a', new BsonInt32(1))) + ]) + + when: + def decodedDoc = new BsonDocumentCodec().decode(new BsonDocumentReader(doc), DecoderContext.builder().build()) + + then: + decodedDoc == doc + } + + def 'should fail, ReadBSONType can only be called when State is TYPE, not VALUE'() { + given: + def reader = new BsonDocumentReader(nullDoc) + + when: + reader.readStartDocument() + reader.readBsonType() + reader.readName() + reader.readBsonType() + + then: + thrown(BsonInvalidOperationException) + } + + def 'should fail, ReadBSONType can only be called when State is TYPE, not NAME'() { + given: + def reader = new BsonDocumentReader(nullDoc) + + when: + reader.readStartDocument() + reader.readBsonType() + reader.readBsonType() + + then: + thrown(BsonInvalidOperationException) + } + + +} diff --git a/bson/src/test/unit/org/bson/BsonDocumentSpecification.groovy b/bson/src/test/unit/org/bson/BsonDocumentSpecification.groovy new file mode 100644 index 00000000000..70004b654d5 --- /dev/null +++ b/bson/src/test/unit/org/bson/BsonDocumentSpecification.groovy @@ -0,0 +1,389 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson + +import org.bson.codecs.BsonDocumentCodec +import org.bson.codecs.DecoderContext +import org.bson.types.Decimal128 +import org.bson.types.ObjectId +import spock.lang.Specification + +import static org.bson.BsonHelper.documentWithValuesOfEveryType + +class BsonDocumentSpecification extends Specification { + + def 'conversion methods should behave correctly for the happy path'() { + given: + + def bsonNull = new BsonNull() + def bsonInt32 = new BsonInt32(42) + def bsonInt64 = new BsonInt64(52L) + def bsonDecimal128 = new BsonDecimal128(Decimal128.parse('1.0')) + def bsonBoolean = new BsonBoolean(true) + def bsonDateTime = new BsonDateTime(new Date().getTime()) + def bsonDouble = new BsonDouble(62.0) + def bsonString = new BsonString('the fox ...') + def minKey = new BsonMinKey() + def maxKey = new BsonMaxKey() + def javaScript = new BsonJavaScript('int i = 0;') + def objectId = new BsonObjectId(new ObjectId()) + def scope = new BsonJavaScriptWithScope('int x = y', new BsonDocument('y', new BsonInt32(1))) + def regularExpression = new BsonRegularExpression('^test.*regex.*xyz$', 'i') + def symbol = new BsonSymbol('ruby stuff') + def timestamp = new BsonTimestamp(0x12345678, 5) + def undefined = new BsonUndefined() + def binary = new BsonBinary((byte) 80, [5, 4, 3, 2, 1] as byte[]) + def bsonArray = new BsonArray([new BsonInt32(1), new BsonInt64(2L), new BsonBoolean(true), + new BsonArray([new BsonInt32(1), new BsonInt32(2), new BsonInt32(3)]), + new BsonDocument('a', new BsonInt64(2L))]) + def bsonDocument = new BsonDocument('a', new BsonInt32(1)) + + def root = new BsonDocument( + [ + new BsonElement('null', bsonNull), + new BsonElement('int32', bsonInt32), + new BsonElement('int64', bsonInt64), + new BsonElement('decimal128', bsonDecimal128), + new BsonElement('boolean', bsonBoolean), + new BsonElement('date', bsonDateTime), + new BsonElement('double', bsonDouble), + new BsonElement('string', bsonString), + new BsonElement('minKey', minKey), + new BsonElement('maxKey', maxKey), + new BsonElement('javaScript', javaScript), + new BsonElement('objectId', objectId), + new BsonElement('codeWithScope', scope), + new BsonElement('regex', regularExpression), + new BsonElement('symbol', symbol), + new BsonElement('timestamp', timestamp), + new BsonElement('undefined', undefined), + new BsonElement('binary', binary), + new BsonElement('array', bsonArray), + new BsonElement('document', bsonDocument) + ]) + + expect: + root.isNull('null') + root.getInt32('int32').is(bsonInt32) + root.getInt64('int64').is(bsonInt64) + root.getDecimal128('decimal128').is(bsonDecimal128) + root.getBoolean('boolean').is(bsonBoolean) + root.getDateTime('date').is(bsonDateTime) + root.getDouble('double').is(bsonDouble) + root.getString('string').is(bsonString) + root.getObjectId('objectId').is(objectId) + root.getRegularExpression('regex').is(regularExpression) + root.getBinary('binary').is(binary) + root.getTimestamp('timestamp').is(timestamp) + root.getArray('array').is(bsonArray) + root.getDocument('document').is(bsonDocument) + root.getNumber('int32').is(bsonInt32) + root.getNumber('int64').is(bsonInt64) + root.getNumber('double').is(bsonDouble) + + root.getInt32('int32', new BsonInt32(2)).is(bsonInt32) + root.getInt64('int64', new BsonInt64(4)).is(bsonInt64) + root.getDecimal128('decimal128', new BsonDecimal128(Decimal128.parse('4.0'))).is(bsonDecimal128) + root.getDouble('double', new BsonDouble(343.0)).is(bsonDouble) + root.getBoolean('boolean', new BsonBoolean(false)).is(bsonBoolean) + root.getDateTime('date', new BsonDateTime(3453)).is(bsonDateTime) + root.getString('string', new BsonString('df')).is(bsonString) + root.getObjectId('objectId', new BsonObjectId(new ObjectId())).is(objectId) + root.getRegularExpression('regex', new BsonRegularExpression('^foo', 'i')).is(regularExpression) + root.getBinary('binary', new BsonBinary(new byte[5])).is(binary) + root.getTimestamp('timestamp', new BsonTimestamp(343, 23)).is(timestamp) + root.getArray('array', new BsonArray()).is(bsonArray) + root.getDocument('document', new BsonDocument()).is(bsonDocument) + root.getNumber('int32', new BsonInt32(2)).is(bsonInt32) + root.getNumber('int64', new BsonInt32(2)).is(bsonInt64) + root.getNumber('double', new BsonInt32(2)).is(bsonDouble) + + root.get('int32').asInt32().is(bsonInt32) + root.get('int64').asInt64().is(bsonInt64) + root.get('decimal128').asDecimal128().is(bsonDecimal128) + root.get('boolean').asBoolean().is(bsonBoolean) + root.get('date').asDateTime().is(bsonDateTime) + root.get('double').asDouble().is(bsonDouble) + root.get('string').asString().is(bsonString) + root.get('objectId').asObjectId().is(objectId) + root.get('timestamp').asTimestamp().is(timestamp) + root.get('binary').asBinary().is(binary) + root.get('array').asArray().is(bsonArray) + root.get('document').asDocument().is(bsonDocument) + + root.isInt32('int32') + root.isNumber('int32') + root.isInt64('int64') + root.isDecimal128('decimal128') + root.isNumber('int64') + root.isBoolean('boolean') + root.isDateTime('date') + root.isDouble('double') + root.isNumber('double') + root.isString('string') + root.isObjectId('objectId') + root.isTimestamp('timestamp') + root.isBinary('binary') + root.isArray('array') + root.isDocument('document') + } + + def 'is methods should return false for missing keys'() { + given: + def root = new BsonDocument() + + expect: + !root.isNull('null') + !root.isNumber('number') + !root.isInt32('int32') + !root.isInt64('int64') + !root.isDecimal128('decimal128') + !root.isBoolean('boolean') + !root.isDateTime('date') + !root.isDouble('double') + !root.isString('string') + !root.isObjectId('objectId') + !root.isTimestamp('timestamp') + !root.isBinary('binary') + !root.isArray('array') + !root.isDocument('document') + } + + def 'get methods should return default values for missing keys'() { + given: + def bsonNull = new BsonNull() + def bsonInt32 = new BsonInt32(42) + def bsonInt64 = new BsonInt64(52L) + def bsonDecimal128 = new BsonDecimal128(Decimal128.parse('1.0')) + def bsonBoolean = new BsonBoolean(true) + def bsonDateTime = new BsonDateTime(new Date().getTime()) + def bsonDouble = new BsonDouble(62.0) + def bsonString = new BsonString('the fox ...') + def objectId = new BsonObjectId(new ObjectId()) + def regularExpression = new BsonRegularExpression('^test.*regex.*xyz$', 'i') + def timestamp = new BsonTimestamp(0x12345678, 5) + def binary = new BsonBinary((byte) 80, [5, 4, 3, 2, 1] as byte[]) + def bsonArray = new BsonArray([new BsonInt32(1), new BsonInt64(2L), new BsonBoolean(true), + new BsonDecimal128(Decimal128.parse('4.0')), + new BsonArray([new BsonInt32(1), new BsonInt32(2), new BsonInt32(3)]), + new BsonDocument('a', new BsonInt64(2L))]) + def bsonDocument = new BsonDocument('a', new BsonInt32(1)) + def root = new BsonDocument() + + expect: + root.get('m', bsonNull).is(bsonNull) + root.getArray('m', bsonArray).is(bsonArray) + root.getBoolean('m', bsonBoolean).is(bsonBoolean) + root.getDateTime('m', bsonDateTime).is(bsonDateTime) + root.getDocument('m', bsonDocument).is(bsonDocument) + root.getDouble('m', bsonDouble).is(bsonDouble) + root.getInt32('m', bsonInt32).is(bsonInt32) + root.getInt64('m', bsonInt64).is(bsonInt64) + root.getDecimal128('m', bsonDecimal128).is(bsonDecimal128) + root.getString('m', bsonString).is(bsonString) + root.getObjectId('m', objectId).is(objectId) + root.getString('m', bsonString).is(bsonString) + root.getTimestamp('m', timestamp).is(timestamp) + root.getNumber('m', bsonInt32).is(bsonInt32) + root.getRegularExpression('m', regularExpression).is(regularExpression) + root.getBinary('m', binary).is(binary) + } + + def 'clone should make a deep copy of all mutable BsonValue types'() { + given: + def document = new BsonDocument('d', new BsonDocument().append('i2', new BsonInt32(1))) + .append('i', new BsonInt32(2)) + .append('a', new BsonArray([new BsonInt32(3), + new BsonArray([new BsonInt32(11)]), + new BsonDocument('i3', new BsonInt32(6)), + new BsonBinary([1, 2, 3] as byte[]), + new BsonJavaScriptWithScope('code', new BsonDocument('a', new BsonInt32(4)))])) + .append('b', new BsonBinary([1, 2, 3] as byte[])) + .append('js', new BsonJavaScriptWithScope('code', new BsonDocument('a', new BsonInt32(4)))) + + when: + def clone = document.clone() + + then: + document == clone + !clone.is(document) + clone.get('i').is(document.get('i')) + !clone.get('d').is(document.get('d')) + !clone.get('a').is(document.get('a')) + !clone.get('b').is(document.get('b')) + !clone.get('b').asBinary().getData().is(document.get('b').asBinary().getData()) + !clone.get('js').asJavaScriptWithScope().getScope().is(document.get('js').asJavaScriptWithScope().getScope()) + + clone.get('a').asArray()[0].is(document.get('a').asArray()[0]) + !clone.get('a').asArray()[1].is(document.get('a').asArray()[1]) + !clone.get('a').asArray()[2].is(document.get('a').asArray()[2]) + !clone.get('a').asArray()[3].is(document.get('a').asArray()[3]) + !clone.get('a').asArray()[3].asBinary().getData().is(document.get('a').asArray()[3].asBinary().getData()) + !clone.get('a').asArray()[4].is(document.get('a').asArray()[4]) + !clone.get('a').asArray()[4].asJavaScriptWithScope().getScope().is(document.get('a').asArray()[4].asJavaScriptWithScope() + .getScope()) + } + + @SuppressWarnings('UnnecessaryObjectReferences') + def 'get methods should throw if key is absent'() { + given: + def root = new BsonDocument() + + when: + root.getInt32('int32') + + then: + thrown(BsonInvalidOperationException) + + when: + root.getInt64('int64') + + then: + thrown(BsonInvalidOperationException) + + when: + root.getDecimal128('decimal128') + + then: + thrown(BsonInvalidOperationException) + + when: + root.getBoolean('boolean') + + then: + thrown(BsonInvalidOperationException) + + when: + root.getDateTime('date') + + then: + thrown(BsonInvalidOperationException) + + when: + root.getDouble('double') + + then: + thrown(BsonInvalidOperationException) + + when: + root.getString('string') + + then: + thrown(BsonInvalidOperationException) + + when: + root.getObjectId('objectId') + + then: + thrown(BsonInvalidOperationException) + + when: + root.getRegularExpression('regex') + + then: + thrown(BsonInvalidOperationException) + + when: + root.getBinary('binary') + + then: + thrown(BsonInvalidOperationException) + + when: + root.getTimestamp('timestamp') + + then: + thrown(BsonInvalidOperationException) + + when: + root.getArray('array') + + then: + thrown(BsonInvalidOperationException) + + when: + root.getDocument('document') + + then: + thrown(BsonInvalidOperationException) + + when: + root.getNumber('int32') + + then: + thrown(BsonInvalidOperationException) + } + + def 'should get first key'() { + given: + def document = new BsonDocument('i', new BsonInt32(2)) + + expect: + document.getFirstKey() == 'i' + } + + def 'getFirstKey should throw NoSuchElementException if the document is empty'() { + given: + def document = new BsonDocument() + + when: + document.getFirstKey() + + then: + thrown(NoSuchElementException) + } + + def 'should create BsonReader'() { + given: + def document = documentWithValuesOfEveryType() + + when: + def reader = document.asBsonReader() + + then: + new BsonDocumentCodec().decode(reader, DecoderContext.builder().build()) == document + + cleanup: + reader.close() + } + + def 'should serialize and deserialize'() { + given: + def document = new BsonDocument('d', new BsonDocument().append('i2', new BsonInt32(1))) + .append('i', new BsonInt32(2)) + .append('d', new BsonDecimal128(Decimal128.parse('1.0'))) + .append('a', new BsonArray([new BsonInt32(3), + new BsonArray([new BsonInt32(11)]), + new BsonDocument('i3', new BsonInt32(6)), + new BsonBinary([1, 2, 3] as byte[]), + new BsonJavaScriptWithScope('code', new BsonDocument('a', new BsonInt32(4)))])) + .append('b', new BsonBinary([1, 2, 3] as byte[])) + .append('js', new BsonJavaScriptWithScope('code', new BsonDocument('a', new BsonInt32(4)))) + + def baos = new ByteArrayOutputStream() + def oos = new ObjectOutputStream(baos) + + when: + oos.writeObject(document) + def bais = new ByteArrayInputStream(baos.toByteArray()) + def ois = new ObjectInputStream(bais) + def deserializedDocument = ois.readObject() + + then: + document == deserializedDocument + } +} diff --git a/bson/src/test/unit/org/bson/BsonDocumentTest.java b/bson/src/test/unit/org/bson/BsonDocumentTest.java new file mode 100644 index 00000000000..32d56166f12 --- /dev/null +++ b/bson/src/test/unit/org/bson/BsonDocumentTest.java @@ -0,0 +1,108 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson; + +import org.bson.codecs.BsonDocumentCodec; +import org.bson.codecs.DecoderContext; +import org.bson.codecs.EncoderContext; +import org.bson.json.JsonMode; +import org.bson.json.JsonReader; +import org.bson.json.JsonWriter; +import org.bson.json.JsonWriterSettings; +import org.junit.jupiter.api.Test; + +import java.io.StringWriter; +import java.util.Arrays; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotEquals; + +// Don't convert to Spock, as Groovy intercepts equals/hashCode methods that we are trying to test +public class BsonDocumentTest { + private final BsonDocument emptyDocument = new BsonDocument(); + private final BsonDocument emptyRawDocument = new RawBsonDocument(emptyDocument, new BsonDocumentCodec()); + private final BsonDocument document = new BsonDocument() + .append("a", new BsonInt32(1)) + .append("b", new BsonInt32(2)) + .append("c", new BsonDocument("x", BsonBoolean.TRUE)) + .append("d", new BsonArray(Arrays.asList(new BsonDocument("y", + BsonBoolean.FALSE), + new BsonInt32(1)))); + + private final BsonDocument rawDocument = new RawBsonDocument(document, new BsonDocumentCodec()); + + @Test + public void shouldBeEqualToItself() { + assertEquals(emptyDocument, emptyDocument); + assertEquals(document, document); + } + + @Test + public void shouldBeEqualToEquivalentBsonDocument() { + assertEquals(emptyDocument, emptyRawDocument); + assertEquals(document, rawDocument); + assertEquals(emptyRawDocument, emptyDocument); + assertEquals(rawDocument, document); + } + + @Test + public void shouldNotBeEqualToDifferentBsonDocument() { + // expect + assertNotEquals(emptyDocument, document); + assertNotEquals(document, emptyRawDocument); + assertNotEquals(document, emptyRawDocument); + assertNotEquals(emptyRawDocument, document); + assertNotEquals(rawDocument, emptyDocument); + } + + @Test + public void shouldHaveSameHashCodeAsEquivalentBsonDocument() { + assertEquals(emptyDocument.hashCode(), new BsonDocument().hashCode()); + assertEquals(emptyDocument.hashCode(), emptyRawDocument.hashCode()); + assertEquals(document.hashCode(), rawDocument.hashCode()); + } + + @Test + public void toJsonShouldReturnEquivalent() { + assertEquals(new BsonDocumentCodec().decode(new JsonReader(document.toJson()), DecoderContext.builder().build()), document); + } + + @Test + public void toJsonShouldRespectDefaultJsonWriterSettings() { + StringWriter writer = new StringWriter(); + new BsonDocumentCodec().encode(new JsonWriter(writer), document, EncoderContext.builder().build()); + assertEquals(writer.toString(), document.toJson()); + } + + @Test + public void toJsonShouldRespectJsonWriterSettings() { + StringWriter writer = new StringWriter(); + JsonWriterSettings settings = JsonWriterSettings.builder().outputMode(JsonMode.SHELL).build(); + new BsonDocumentCodec().encode(new JsonWriter(writer, settings), document, EncoderContext.builder().build()); + assertEquals(writer.toString(), document.toJson(settings)); + } + + @Test + public void toStringShouldEqualToJson() { + assertEquals(document.toJson(), document.toString()); + } + + @Test + public void shouldParseJson() { + assertEquals(new BsonDocument("a", new BsonInt32(1)), BsonDocument.parse("{\"a\" : 1}")); + } +} diff --git a/bson/src/test/unit/org/bson/BsonDocumentWrapperSpecification.groovy b/bson/src/test/unit/org/bson/BsonDocumentWrapperSpecification.groovy new file mode 100644 index 00000000000..6c11a99f4bd --- /dev/null +++ b/bson/src/test/unit/org/bson/BsonDocumentWrapperSpecification.groovy @@ -0,0 +1,47 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson + +import org.bson.codecs.DocumentCodec +import spock.lang.Specification + +import static java.util.Arrays.asList + +class BsonDocumentWrapperSpecification extends Specification { + def document = new Document() + .append('a', 1) + .append('b', 2) + .append('c', asList('x', true)) + .append('d', asList(new Document('y', false), 1)) + + def wrapper = new BsonDocumentWrapper(document, new DocumentCodec()) + + def 'should serialize and deserialize'() { + given: + def baos = new ByteArrayOutputStream() + def oos = new ObjectOutputStream(baos) + + when: + oos.writeObject(wrapper) + def bais = new ByteArrayInputStream(baos.toByteArray()) + def ois = new ObjectInputStream(bais) + def deserializedDocument = ois.readObject() + + then: + wrapper == deserializedDocument + } +} diff --git a/bson/src/test/unit/org/bson/BsonDocumentWriterSpecification.groovy b/bson/src/test/unit/org/bson/BsonDocumentWriterSpecification.groovy new file mode 100644 index 00000000000..924f1f66368 --- /dev/null +++ b/bson/src/test/unit/org/bson/BsonDocumentWriterSpecification.groovy @@ -0,0 +1,67 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson + +import org.bson.codecs.BsonDocumentCodec +import org.bson.codecs.EncoderContext +import spock.lang.Specification + +import static org.bson.BsonHelper.documentWithValuesOfEveryType + +class BsonDocumentWriterSpecification extends Specification { + + def 'should write all types'() { + when: + def encodedDoc = new BsonDocument() + new BsonDocumentCodec().encode(new BsonDocumentWriter(encodedDoc), documentWithValuesOfEveryType(), + EncoderContext.builder().build()) + + then: + encodedDoc == documentWithValuesOfEveryType() + } + + def 'should pipe all types'() { + given: + def document = new BsonDocument() + def reader = new BsonDocumentReader(documentWithValuesOfEveryType()) + def writer = new BsonDocumentWriter(document) + + when: + writer.pipe(reader) + + then: + document == documentWithValuesOfEveryType() + } + + def 'should pipe all types with extra elements'() { + given: + def document = new BsonDocument() + def reader = new BsonDocumentReader(new BsonDocument()) + def writer = new BsonDocumentWriter(document) + + def extraElements = [] + for (def entry : documentWithValuesOfEveryType()) { + extraElements.add(new BsonElement(entry.getKey(), entry.getValue())) + } + + when: + writer.pipe(reader, extraElements) + + then: + document == documentWithValuesOfEveryType() + } +} diff --git a/bson/src/test/unit/org/bson/BsonHelper.java b/bson/src/test/unit/org/bson/BsonHelper.java new file mode 100644 index 00000000000..59fdba474a2 --- /dev/null +++ b/bson/src/test/unit/org/bson/BsonHelper.java @@ -0,0 +1,133 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson; + +import org.bson.codecs.BsonDocumentCodec; +import org.bson.codecs.DecoderContext; +import org.bson.codecs.EncoderContext; +import org.bson.io.BasicOutputBuffer; +import org.bson.types.Decimal128; +import org.bson.types.ObjectId; +import util.Hex; + +import java.nio.ByteBuffer; +import java.util.Date; +import java.util.List; + +import static java.lang.String.format; +import static java.util.Arrays.asList; + +public final class BsonHelper { + + private static final Date DATE = new Date(); + private static final ObjectId OBJECT_ID = new ObjectId(); + + public static List getBsonValues() { + return asList( + new BsonNull(), + new BsonInt32(42), + new BsonInt64(52L), + new BsonDecimal128(Decimal128.parse("4.00")), + new BsonBoolean(true), + new BsonDateTime(DATE.getTime()), + new BsonDouble(62.0), + new BsonString("the fox ..."), + new BsonMinKey(), + new BsonMaxKey(), + new BsonDbPointer("test.test", OBJECT_ID), + new BsonJavaScript("int i = 0;"), + new BsonJavaScriptWithScope("x", new BsonDocument("x", new BsonInt32(1))), + new BsonObjectId(OBJECT_ID), + new BsonRegularExpression("^test.*regex.*xyz$", "i"), + new BsonSymbol("ruby stuff"), + new BsonTimestamp(0x12345678, 5), + new BsonUndefined(), + new BsonBinary((byte) 80, new byte[]{5, 4, 3, 2, 1}), + new BsonArray(asList( + new BsonInt32(1), + new BsonInt64(2L), + new BsonBoolean(true), + new BsonArray(asList( + new BsonInt32(1), + new BsonInt32(2), + new BsonInt32(3), + new BsonDocument("a", new BsonInt64(2L)))))), + new BsonDocument("a", new BsonInt32(1))); + } + + // fail class loading if any BSON types are not represented in BSON_VALUES. + static { + for (BsonType curBsonType : BsonType.values()) { + if (curBsonType == BsonType.END_OF_DOCUMENT) { + continue; + } + + boolean found = false; + for (BsonValue curBsonValue : getBsonValues()) { + if (curBsonValue.getBsonType() == curBsonType) { + found = true; + break; + } + } + if (!found) { + throw new IllegalStateException(format("Missing BsonValue type %s in BSON_VALUES. Please add a BsonValue with that type!", + curBsonType)); + } + } + } + + public static List valuesOfEveryType() { + return getBsonValues(); + } + + public static BsonDocument documentWithValuesOfEveryType() { + BsonDocument document = new BsonDocument(); + List bsonValues = getBsonValues(); + for (int i = 0; i < bsonValues.size(); i++) { + document.append(Integer.toString(i), bsonValues.get(i)); + } + return document; + } + + public static ByteBuffer toBson(final BsonDocument document) { + BasicOutputBuffer bsonOutput = new BasicOutputBuffer(); + new BsonDocumentCodec().encode(new BsonBinaryWriter(bsonOutput), document, EncoderContext.builder().build()); + return ByteBuffer.wrap(bsonOutput.toByteArray()); + } + + private BsonHelper() { + } + + public static BsonDocument decodeToDocument(final String subjectHex, final String description) { + ByteBuffer byteBuffer = ByteBuffer.wrap(Hex.decode(subjectHex)); + BsonDocument actualDecodedDocument = new BsonDocumentCodec().decode(new BsonBinaryReader(byteBuffer), + DecoderContext.builder().build()); + + if (byteBuffer.hasRemaining()) { + throw new BsonSerializationException(format("Should have consumed all bytes, but " + byteBuffer.remaining() + + " still remain in the buffer for document with description ", + description)); + } + return actualDecodedDocument; + } + + public static String encodeToHex(final BsonDocument decodedDocument) { + BasicOutputBuffer outputBuffer = new BasicOutputBuffer(); + new BsonDocumentCodec().encode(new BsonBinaryWriter(outputBuffer), decodedDocument, EncoderContext.builder().build()); + return Hex.encode(outputBuffer.toByteArray()); + } +} diff --git a/bson/src/test/unit/org/bson/BsonNumberSpecification.groovy b/bson/src/test/unit/org/bson/BsonNumberSpecification.groovy new file mode 100644 index 00000000000..ae8dac25eae --- /dev/null +++ b/bson/src/test/unit/org/bson/BsonNumberSpecification.groovy @@ -0,0 +1,77 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson + +import org.bson.types.Decimal128 +import spock.lang.Specification + +class BsonNumberSpecification extends Specification { + + def 'should convert to int value'() { + expect: + new BsonInt32(1).intValue() == 1 + + new BsonInt64(1L).intValue() == 1 + new BsonInt64(Long.MAX_VALUE).intValue() == -1 + new BsonInt64(Long.MIN_VALUE).intValue() == 0 + + new BsonDouble(3.14).intValue() == 3 + + new BsonDecimal128(new Decimal128(1L)).intValue() == 1 + } + + def 'should convert to long value'() { + expect: + new BsonInt32(1).longValue() == 1L + + new BsonInt64(1L).longValue() == 1L + + new BsonDouble(3.14).longValue() == 3L + + new BsonDecimal128(new Decimal128(1L)).longValue() == 1L + } + + def 'should convert to double value'() { + expect: + new BsonInt32(1).doubleValue() == 1.0d + + new BsonInt64(1L).doubleValue() == 1.0d + new BsonInt64(Long.MAX_VALUE).doubleValue() == 9.223372036854776E18d + new BsonInt64(Long.MIN_VALUE).doubleValue() == -9.223372036854776E18d + + new BsonDouble(3.14d).doubleValue() == 3.14d + + new BsonDecimal128(Decimal128.parse('3.14')).doubleValue() == 3.14d + } + + def 'should convert to decimal128 value'() { + expect: + new BsonInt32(1).decimal128Value() == Decimal128.parse('1') + + new BsonInt64(1L).decimal128Value() == Decimal128.parse('1') + new BsonInt64(Long.MAX_VALUE).decimal128Value() == Decimal128.parse('9223372036854775807') + new BsonInt64(Long.MIN_VALUE).decimal128Value() == Decimal128.parse('-9223372036854775808') + + new BsonDouble(1.0d).decimal128Value() == Decimal128.parse('1') + new BsonDouble(Double.NaN).decimal128Value() == Decimal128.NaN + new BsonDouble(Double.POSITIVE_INFINITY).decimal128Value() == Decimal128.POSITIVE_INFINITY + new BsonDouble(Double.NEGATIVE_INFINITY).decimal128Value() == Decimal128.NEGATIVE_INFINITY + + new BsonDecimal128(Decimal128.parse('3.14')).decimal128Value() == Decimal128.parse('3.14') + } + +} diff --git a/bson/src/test/unit/org/bson/BsonRegularExpressionSpecification.groovy b/bson/src/test/unit/org/bson/BsonRegularExpressionSpecification.groovy new file mode 100644 index 00000000000..a8a58cc9020 --- /dev/null +++ b/bson/src/test/unit/org/bson/BsonRegularExpressionSpecification.groovy @@ -0,0 +1,62 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson + +import spock.lang.Specification + +class BsonRegularExpressionSpecification extends Specification { + def 'should get type'() { + expect: + new BsonRegularExpression('abc', '').bsonType == BsonType.REGULAR_EXPRESSION + } + + def 'should sort options'() { + expect: + new BsonRegularExpression('abc', 'uxsmi').options == 'imsux' + } + + def 'should accept invalid options'() { + expect: + new BsonRegularExpression('abc', 'uxsmiw').options == 'imsuwx' + } + + def 'should allow null options'() { + expect: + new BsonRegularExpression('abc').options == '' + new BsonRegularExpression('abc', null).options == '' + } + + def 'should get regular expression'() { + expect: + new BsonRegularExpression('abc', null).pattern == 'abc' + } + + def 'equivalent values should be equal and have same hashcode'() { + given: + def first = new BsonRegularExpression('abc', 'uxsmi') + def second = new BsonRegularExpression('abc', 'imsxu') + + expect: + first.equals(second) + first.hashCode() == second.hashCode() + } + + def 'should convert to string'() { + expect: + new BsonRegularExpression('abc', 'uxsmi').toString() == 'BsonRegularExpression{pattern=\'abc\', options=\'imsux\'}' + } +} diff --git a/bson/src/test/unit/org/bson/BsonTimestampSpecification.groovy b/bson/src/test/unit/org/bson/BsonTimestampSpecification.groovy new file mode 100644 index 00000000000..e539961d44b --- /dev/null +++ b/bson/src/test/unit/org/bson/BsonTimestampSpecification.groovy @@ -0,0 +1,88 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson + +import spock.lang.Specification +import spock.lang.Unroll + +import static java.lang.Integer.MAX_VALUE +import static java.lang.Integer.MIN_VALUE + +class BsonTimestampSpecification extends Specification { + + def 'bsonType should get expected value'() { + expect: + new BsonTimestamp().bsonType == BsonType.TIMESTAMP + } + + @Unroll + def 'compareTo should sort the timestamps as unsigned values'() { + def timestamps = [new BsonTimestamp(Long.MIN_VALUE), + new BsonTimestamp(Long.MAX_VALUE), + new BsonTimestamp(1), + new BsonTimestamp(2), + new BsonTimestamp(-1), + new BsonTimestamp(-2)] + when: + Collections.sort(timestamps) + + then: + timestamps == [new BsonTimestamp(1), + new BsonTimestamp(2), + new BsonTimestamp(Long.MAX_VALUE), + new BsonTimestamp(Long.MIN_VALUE), + new BsonTimestamp(-2), + new BsonTimestamp(-1)] + } + + @Unroll + def 'constructors should initialize instance'() { + when: + def tsFromValue = new BsonTimestamp(value) + def tsFromSecondsAndIncrement = new BsonTimestamp(seconds, increment) + + then: + tsFromValue.time == seconds + tsFromValue.inc == increment + tsFromValue.value == value + + tsFromSecondsAndIncrement.time == seconds + tsFromSecondsAndIncrement.inc == increment + tsFromSecondsAndIncrement.value == value + + where: + seconds | increment | value + 0 | 0 | 0L + 1 | 2 | 0x100000002L + -1 | -2 | 0xfffffffffffffffeL + 123456789 | 42 | 530242871224172586L + MIN_VALUE | MIN_VALUE | 0x8000000080000000L + MIN_VALUE | MAX_VALUE | 0x800000007fffffffL + MAX_VALUE | MIN_VALUE | 0x7fffffff80000000L + MAX_VALUE | MAX_VALUE | 0x7fffffff7fffffffL + } + + def 'no args constructor should initialize instance'() { + when: + def tsFromValue = new BsonTimestamp() + + then: + tsFromValue.time == 0 + tsFromValue.inc == 0 + tsFromValue.value == 0 + } +} diff --git a/bson/src/test/unit/org/bson/BsonValueSpecification.groovy b/bson/src/test/unit/org/bson/BsonValueSpecification.groovy new file mode 100644 index 00000000000..e23b1c43305 --- /dev/null +++ b/bson/src/test/unit/org/bson/BsonValueSpecification.groovy @@ -0,0 +1,221 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson + +import org.bson.types.Decimal128 +import org.bson.types.ObjectId +import spock.lang.Specification + +class BsonValueSpecification extends Specification { + def 'is methods should return true for the correct type'() { + expect: + new BsonNull().isNull() + new BsonInt32(42).isInt32() + new BsonInt32(42).isNumber() + new BsonInt64(52L).isInt64() + new BsonInt64(52L).isNumber() + new BsonDecimal128(Decimal128.parse('1')).isDecimal128() + new BsonDecimal128(Decimal128.parse('1')).isNumber() + new BsonDouble(62.0).isDouble() + new BsonDouble(62.0).isNumber() + new BsonBoolean(true).isBoolean() + new BsonDateTime(new Date().getTime()).isDateTime() + new BsonString('the fox ...').isString() + new BsonJavaScript('int i = 0;').isJavaScript() + new BsonObjectId(new ObjectId()).isObjectId() + new BsonJavaScriptWithScope('int x = y', new BsonDocument('y', new BsonInt32(1))).isJavaScriptWithScope() + new BsonRegularExpression('^test.*regex.*xyz$', 'i').isRegularExpression() + new BsonSymbol('ruby stuff').isSymbol() + new BsonTimestamp(0x12345678, 5).isTimestamp() + new BsonBinary((byte) 80, [5, 4, 3, 2, 1] as byte[]).isBinary() + new BsonDbPointer('n', new ObjectId()).isDBPointer() + new BsonArray().isArray() + new BsonDocument().isDocument() + } + + def 'is methods should return false for the incorrect type'() { + expect: + !new BsonBoolean(false).isNull() + !new BsonNull().isInt32() + !new BsonNull().isNumber() + !new BsonNull().isInt64() + !new BsonNull().isDecimal128() + !new BsonNull().isNumber() + !new BsonNull().isDouble() + !new BsonNull().isNumber() + !new BsonNull().isBoolean() + !new BsonNull().isDateTime() + !new BsonNull().isString() + !new BsonNull().isJavaScript() + !new BsonNull().isObjectId() + !new BsonNull().isJavaScriptWithScope() + !new BsonNull().isRegularExpression() + !new BsonNull().isSymbol() + !new BsonNull().isTimestamp() + !new BsonNull().isBinary() + !new BsonNull().isDBPointer() + !new BsonNull().isArray() + !new BsonNull().isDocument() + } + + def 'support BsonNumber interface for all number types'() { + expect: + bsonValue.asNumber() == bsonValue + bsonValue.asNumber().intValue()== intValue + bsonValue.asNumber().longValue() == longValue + bsonValue.asNumber().doubleValue() == doubleValue + bsonValue.asNumber().decimal128Value() == decimal128Value + + where: + bsonValue | intValue | longValue | doubleValue | decimal128Value + new BsonInt32(42) | 42 | 42L | 42.0 | Decimal128.parse('42') + new BsonInt64(42) | 42 | 42L | 42.0 | Decimal128.parse('42') + new BsonDouble(42) | 42 | 42L | 42.0 | Decimal128.parse('42') + new BsonDecimal128(Decimal128.parse('42')) | 42 | 42L | 42.0 | Decimal128.parse('42') + new BsonDecimal128(Decimal128.POSITIVE_INFINITY) | Integer.MAX_VALUE | Long.MAX_VALUE | Double.POSITIVE_INFINITY | Decimal128.POSITIVE_INFINITY + new BsonDecimal128(Decimal128.NEGATIVE_INFINITY) | Integer.MIN_VALUE | Long.MIN_VALUE | Double.NEGATIVE_INFINITY | Decimal128.NEGATIVE_INFINITY + new BsonDecimal128(Decimal128.NaN) | 0 | 0L | Double.NaN | Decimal128.NaN + } + + def 'as methods should return throw for the incorrect type'() { + when: + new BsonNull().asInt32() + + then: + thrown(BsonInvalidOperationException) + + when: + new BsonNull().asNumber() + + then: + thrown(BsonInvalidOperationException) + + when: + new BsonNull().asInt64() + + then: + thrown(BsonInvalidOperationException) + + when: + new BsonNull().asNumber() + + then: + thrown(BsonInvalidOperationException) + + when: + new BsonNull().asDouble() + + then: + thrown(BsonInvalidOperationException) + + when: + new BsonNull().asNumber() + + then: + thrown(BsonInvalidOperationException) + + when: + new BsonNull().asDecimal128() + + then: + thrown(BsonInvalidOperationException) + + when: + new BsonNull().asBoolean() + + then: + thrown(BsonInvalidOperationException) + + when: + new BsonNull().asDateTime() + + then: + thrown(BsonInvalidOperationException) + + when: + new BsonNull().asString() + + then: + thrown(BsonInvalidOperationException) + + when: + new BsonNull().asJavaScript() + + then: + thrown(BsonInvalidOperationException) + + when: + new BsonNull().asObjectId() + + then: + thrown(BsonInvalidOperationException) + + when: + new BsonNull().asJavaScriptWithScope() + + then: + thrown(BsonInvalidOperationException) + + when: + new BsonNull().asRegularExpression() + + then: + thrown(BsonInvalidOperationException) + + when: + new BsonNull().asSymbol() + + then: + thrown(BsonInvalidOperationException) + + when: + new BsonNull().asTimestamp() + + then: + thrown(BsonInvalidOperationException) + + when: + new BsonNull().asBinary() + + then: + thrown(BsonInvalidOperationException) + + when: + new BsonNull().asDBPointer() + + then: + thrown(BsonInvalidOperationException) + + when: + new BsonNull().asArray() + + then: + thrown(BsonInvalidOperationException) + + when: + new BsonNull().asDocument() + + then: + thrown(BsonInvalidOperationException) + + when: + new BsonNull().asNumber() + + then: + thrown(BsonInvalidOperationException) + } +} diff --git a/bson/src/test/unit/org/bson/BsonWriterSpecification.groovy b/bson/src/test/unit/org/bson/BsonWriterSpecification.groovy new file mode 100644 index 00000000000..05fa945a87f --- /dev/null +++ b/bson/src/test/unit/org/bson/BsonWriterSpecification.groovy @@ -0,0 +1,418 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License") + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson + +import org.bson.io.BasicOutputBuffer +import spock.lang.Specification + +class BsonWriterSpecification extends Specification { + + def 'shouldThrowExceptionForBooleanWhenWritingBeforeStartingDocument'() { + when: + writer.writeBoolean('b1', true) + + then: + thrown(BsonInvalidOperationException) + + where: + writer << [new BsonBinaryWriter(new BasicOutputBuffer()), new BsonDocumentWriter(new BsonDocument())] + } + + def 'shouldThrowExceptionForArrayWhenWritingBeforeStartingDocument'() { + when: + writer.writeStartArray() + + then: + thrown(BsonInvalidOperationException) + + where: + writer << [new BsonBinaryWriter(new BasicOutputBuffer()), new BsonDocumentWriter(new BsonDocument())] + } + + def 'shouldThrowExceptionForNullWhenWritingBeforeStartingDocument'() { + when: + writer.writeNull() + + then: + thrown(BsonInvalidOperationException) + + where: + writer << [new BsonBinaryWriter(new BasicOutputBuffer()), new BsonDocumentWriter(new BsonDocument())] + } + + def 'shouldThrowExceptionForStringWhenStateIsValue'() { + when: + writer.writeStartDocument() + writer.writeString('SomeString') + + then: + thrown(BsonInvalidOperationException) + + where: + writer << [new BsonBinaryWriter(new BasicOutputBuffer()), new BsonDocumentWriter(new BsonDocument())] + } + + def 'shouldThrowExceptionWhenEndingAnArrayWhenStateIsValue'() { + when: + writer.writeStartDocument() + writer.writeEndArray() + + then: + thrown(BsonInvalidOperationException) + + where: + writer << [new BsonBinaryWriter(new BasicOutputBuffer()), new BsonDocumentWriter(new BsonDocument())] + } + + def 'shouldThrowExceptionWhenWritingASecondName'() { + when: + writer.writeStartDocument() + writer.writeName('f1') + writer.writeName('i2') + + then: + thrown(BsonInvalidOperationException) + + where: + writer << [new BsonBinaryWriter(new BasicOutputBuffer()), new BsonDocumentWriter(new BsonDocument())] + } + + def 'shouldThrowExceptionWhenEndingADocumentBeforeValueIsWritten'() { + when: + writer.writeStartDocument() + writer.writeName('f1') + writer.writeEndDocument() + + then: + thrown(BsonInvalidOperationException) + + where: + writer << [new BsonBinaryWriter(new BasicOutputBuffer()), new BsonDocumentWriter(new BsonDocument())] + } + + def 'shouldThrowAnExceptionWhenTryingToWriteASecondValue'() { + when: + writer.writeStartDocument() + writer.writeName('f1') + writer.writeDouble(100) + writer.writeString('i2') + + then: + thrown(BsonInvalidOperationException) + + where: + writer << [new BsonBinaryWriter(new BasicOutputBuffer()), new BsonDocumentWriter(new BsonDocument())] + } + + def 'shouldThrowAnExceptionWhenTryingToWriteJavaScript'() { + when: + writer.writeStartDocument() + writer.writeName('f1') + writer.writeDouble(100) + writer.writeJavaScript('var i') + + then: + thrown(BsonInvalidOperationException) + + where: + writer << [new BsonBinaryWriter(new BasicOutputBuffer()), new BsonDocumentWriter(new BsonDocument())] + } + + def 'shouldThrowAnExceptionWhenWritingANameInAnArray'() { + when: + writer.writeStartDocument() + writer.writeName('f1') + writer.writeDouble(100) + writer.writeStartArray('f2') + writer.writeName('i3') + + then: + thrown(BsonInvalidOperationException) + + where: + writer << [new BsonBinaryWriter(new BasicOutputBuffer()), new BsonDocumentWriter(new BsonDocument())] + } + + def 'shouldThrowAnExceptionWhenEndingDocumentInTheMiddleOfWritingAnArray'() { + when: + writer.writeStartDocument() + writer.writeName('f1') + writer.writeDouble(100) + writer.writeStartArray('f2') + writer.writeEndDocument() + + then: + thrown(BsonInvalidOperationException) + + where: + writer << [new BsonBinaryWriter(new BasicOutputBuffer()), new BsonDocumentWriter(new BsonDocument())] + } + + def 'shouldThrowAnExceptionWhenEndingAnArrayInASubDocument'() { + when: + writer.with { + writeStartDocument() + writeName('f1') + writeDouble(100) + writeStartArray('f2') + writeStartDocument() + writeEndArray() + } + + then: + thrown(BsonInvalidOperationException) + + where: + writer << [new BsonBinaryWriter(new BasicOutputBuffer()), new BsonDocumentWriter(new BsonDocument())] + } + + def 'shouldThrowAnExceptionWhenWritingANameInAnArrayEvenWhenSubDocumentExistsInArray'() { + when: + //Does this test even make sense? + writer.with { + writeStartDocument() + writeName('f1') + writeDouble(100) + writeStartArray('f2') + writeStartDocument() + writeEndDocument() + writeName('i3') + } + + then: + thrown(BsonInvalidOperationException) + + where: + writer << [new BsonBinaryWriter(new BasicOutputBuffer()), new BsonDocumentWriter(new BsonDocument())] + } + + def 'shouldThrowExceptionWhenWritingObjectsIntoNestedArrays'() { + when: +//This test seem redundant? + writer.with { + writeStartDocument() + writeName('f1') + writeDouble(100) + writeStartArray('f2') + writeStartArray() + writeStartArray() + writeStartArray() + writeInt64('i4', 10) + } + then: + thrown(BsonInvalidOperationException) + + where: + writer << [new BsonBinaryWriter(new BasicOutputBuffer()), new BsonDocumentWriter(new BsonDocument())] + } + + def 'shouldThrowAnExceptionWhenAttemptingToEndAnArrayThatWasNotStarted'() { + when: + writer.with { + writeStartDocument() + writeStartArray('f2') + writeEndArray() + writeEndArray() + } + then: + thrown(BsonInvalidOperationException) + + where: + writer << [new BsonBinaryWriter(new BasicOutputBuffer()), new BsonDocumentWriter(new BsonDocument())] + } + + def 'shouldThrowAnErrorIfTryingToWriteNamesIntoAJavascriptScope1'() { + when: + writer.writeStartDocument() + writer.writeJavaScriptWithScope('js1', 'var i = 1') + + writer.writeBoolean('b4', true) + + then: + thrown(BsonInvalidOperationException) + + where: + writer << [new BsonBinaryWriter(new BasicOutputBuffer()), new BsonDocumentWriter(new BsonDocument())] + } + + def 'shouldThrowAnErrorIfTryingToWriteNamesIntoAJavascriptScope2'() { + when: + //do we really need to test every type written after writeJavaScriptWithScope? + writer.writeStartDocument() + writer.writeJavaScriptWithScope('js1', 'var i = 1') + + writer.writeBinaryData(new BsonBinary([0, 0, 1, 0] as byte[])) + + then: + thrown(BsonInvalidOperationException) + + where: + writer << [new BsonBinaryWriter(new BasicOutputBuffer()), new BsonDocumentWriter(new BsonDocument())] + } + + def 'shouldThrowAnErrorIfTryingToWriteNamesIntoAJavascriptScope3'() { + when: + //do we really need to test every type written after writeJavaScriptWithScope? + writer.writeStartDocument() + writer.writeJavaScriptWithScope('js1', 'var i = 1') + + writer.writeStartArray() + + then: + thrown(BsonInvalidOperationException) + + where: + writer << [new BsonBinaryWriter(new BasicOutputBuffer()), new BsonDocumentWriter(new BsonDocument())] + } + + def 'shouldThrowAnErrorIfTryingToWriteNamesIntoAJavascriptScope4'() { + when: + //do we really need to test every type written after writeJavaScriptWithScope? + writer.writeStartDocument() + writer.writeJavaScriptWithScope('js1', 'var i = 1') + + writer.writeEndDocument() + + then: + thrown(BsonInvalidOperationException) + + where: + writer << [new BsonBinaryWriter(new BasicOutputBuffer()), new BsonDocumentWriter(new BsonDocument())] + } + + def 'shouldThrowAnErrorIfKeyContainsNullCharacter'() { + when: + writer.writeStartDocument() + writer.writeBoolean('h\u0000i', true) + + + then: + thrown(BSONException) + + where: + writer << [new BsonBinaryWriter(new BasicOutputBuffer())] + } + + def 'shouldNotThrowAnErrorIfValueContainsNullCharacter'() { + when: + writer.writeStartDocument() + writer.writeString('x', 'h\u0000i') + + then: + true + + where: + writer << [new BsonBinaryWriter(new BasicOutputBuffer()), new BsonDocumentWriter(new BsonDocument())] + } + + def 'shouldNotThrowAnExceptionIfCorrectlyStartingAndEndingDocumentsAndSubDocuments'() { + when: + writer.writeStartDocument() + writer.writeJavaScriptWithScope('js1', 'var i = 1') + + writer.writeStartDocument() + writer.writeEndDocument() + + writer.writeEndDocument() + + then: + true + + where: + writer << [new BsonBinaryWriter(new BasicOutputBuffer()), new BsonDocumentWriter(new BsonDocument())] + } + + def 'shouldThrowOnInvalidFieldName'() { + given: + writer.writeStartDocument() + writer.writeString('good', 'string') + + when: + writer.writeString('bad', 'string') + + then: + thrown(IllegalArgumentException) + + where: + writer << [new BsonBinaryWriter(new BasicOutputBuffer(), new TestFieldNameValidator('bad'))] + } + + def 'shouldThrowOnInvalidFieldNameNestedInDocument'() { + given: + writer.with { + writeStartDocument() + writeName('doc') + writeStartDocument() + writeString('good', 'string') + writeString('bad', 'string') + } + when: + writer.writeString('bad-child', 'string') + + then: + thrown(IllegalArgumentException) + + where: + writer << [new BsonBinaryWriter(new BasicOutputBuffer(), new TestFieldNameValidator('bad'))] + } + + def 'shouldThrowOnInvalidFieldNameNestedInDocumentInArray'() { + given: + writer.with { + writeStartDocument() + writeName('doc') + writeStartArray() + writeStartDocument() + writeString('good', 'string') + writeString('bad', 'string') + } + when: + writer.writeString('bad-child', 'string') + + then: + def e = thrown(IllegalArgumentException) + e.getMessage() == 'testFieldNameValidator error' + + where: + writer << [new BsonBinaryWriter(new BasicOutputBuffer(), new TestFieldNameValidator('bad'))] + } + + class TestFieldNameValidator implements FieldNameValidator { + private final String badFieldName + + TestFieldNameValidator(final String badFieldName) { + this.badFieldName = badFieldName + } + + @Override + boolean validate(final String fieldName) { + fieldName != badFieldName + } + + @Override + String getValidationErrorMessage(final String fieldName) { + 'testFieldNameValidator error' + } + + @Override + FieldNameValidator getValidatorForField(final String fieldName) { + new TestFieldNameValidator(badFieldName + '-child') + } + } + +} + diff --git a/bson/src/test/unit/org/bson/DocumentTest.java b/bson/src/test/unit/org/bson/DocumentTest.java new file mode 100644 index 00000000000..bd9551e9407 --- /dev/null +++ b/bson/src/test/unit/org/bson/DocumentTest.java @@ -0,0 +1,204 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson; + +import org.bson.codecs.BsonTypeClassMap; +import org.bson.codecs.BsonValueCodecProvider; +import org.bson.codecs.CollectibleCodec; +import org.bson.codecs.DecoderContext; +import org.bson.codecs.DocumentCodec; +import org.bson.codecs.DocumentCodecProvider; +import org.bson.codecs.EncoderContext; +import org.bson.codecs.ValueCodecProvider; +import org.bson.codecs.configuration.CodecConfigurationException; +import org.bson.codecs.configuration.CodecRegistry; +import org.bson.conversions.Bson; +import org.bson.json.JsonReader; +import org.junit.jupiter.api.Test; + +import java.util.Collections; +import java.util.List; +import java.util.UUID; + +import static java.util.Arrays.asList; +import static org.bson.codecs.configuration.CodecRegistries.fromCodecs; +import static org.bson.codecs.configuration.CodecRegistries.fromProviders; +import static org.bson.codecs.configuration.CodecRegistries.fromRegistries; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotEquals; +import static org.junit.jupiter.api.Assertions.fail; + +// Don't convert to Spock, as Groovy intercepts equals/hashCode methods that we are trying to test +public class DocumentTest { + private final Document emptyDocument = new Document(); + private final Document document = new Document() + .append("a", 1) + .append("b", 2) + .append("c", new Document("x", true)) + .append("d", asList(new Document("y", false), 1)); + + private final Document customDocument = new Document("database", new Name("MongoDB")); + private final CodecRegistry customRegistry = fromRegistries(fromCodecs(new NameCodec()), + fromProviders(new DocumentCodecProvider(), new ValueCodecProvider(), new BsonValueCodecProvider())); + private final DocumentCodec customDocumentCodec = new DocumentCodec(customRegistry, new BsonTypeClassMap()); + + @Test + public void shouldBeEqualToItself() { + assertEquals(emptyDocument, emptyDocument); + assertEquals(document, document); + } + + @Test + public void shouldNotBeEqualToDifferentBsonDocument() { + // expect + assertNotEquals(emptyDocument, document); + } + + @Test + public void shouldHaveSameHashCodeAsEquivalentBsonDocument() { + assertEquals(emptyDocument.hashCode(), new BsonDocument().hashCode()); + } + + @Test + public void toJsonShouldReturnEquivalent() { + assertEquals(new DocumentCodec().decode(new JsonReader(document.toJson()), DecoderContext.builder().build()), document); + } + + // Test to ensure that toJson does not reorder _id field + @Test + public void toJsonShouldNotReorderIdField() { + // given + Document d = new Document().append("x", 1) + .append("y", Collections.singletonList("one")) + .append("_id", "1"); + assertEquals("{\"x\": 1, \"y\": [\"one\"], \"_id\": \"1\"}", d.toJson()); + } + + // Test in Java to make sure none of the casts result in compiler warnings or class cast exceptions + @Test + public void shouldGetWithDefaultValue() { + // given + Document d = new Document("x", 1) + .append("y", Collections.singletonList("one")) + .append("z", "foo"); + + // when the key is found + int x = d.get("x", 2); + List y = d.get("y", asList("three", "four")); + String z = d.get("z", "bar"); + + // then it returns the value + assertEquals(1, x); + assertEquals(asList("one"), y); + assertEquals("foo", z); + + // when the key is not found + int x2 = d.get("x2", 2); + List y2 = d.get("y2", asList("three", "four")); + String z2 = d.get("z2", "bar"); + + // then it returns the default value + assertEquals(2, x2); + assertEquals(asList("three", "four"), y2); + assertEquals("bar", z2); + } + + @Test + public void toJsonShouldTakeACustomDocumentCodec() { + + try { + customDocument.toJson(); + fail("Should fail due to custom type"); + } catch (CodecConfigurationException e) { + // noop + } + + assertEquals("{\"database\": {\"name\": \"MongoDB\"}}", customDocument.toJson(customDocumentCodec)); + } + + @Test + public void toBsonDocumentShouldCreateBsonDocument() { + BsonDocument expected = new BsonDocument() + .append("a", new BsonInt32(1)) + .append("b", new BsonInt32(2)) + .append("c", new BsonDocument("x", BsonBoolean.TRUE)) + .append("d", new BsonArray(asList(new BsonDocument("y", BsonBoolean.FALSE), new BsonInt32(1)))); + + assertEquals(expected, document.toBsonDocument(BsonDocument.class, Bson.DEFAULT_CODEC_REGISTRY)); + assertEquals(expected, document.toBsonDocument()); + } + + @Test + public void toJsonShouldRenderUuidAsStandard() { + UUID uuid = UUID.randomUUID(); + Document doc = new Document("_id", uuid); + + String json = doc.toJson(); + assertEquals(new BsonDocument("_id", new BsonBinary(uuid)), BsonDocument.parse(json)); + } + + public class Name { + private final String name; + + public Name(final String name) { + this.name = name; + } + + public String getName() { + return name; + } + } + + class NameCodec implements CollectibleCodec { + + @Override + public void encode(final BsonWriter writer, final Name n, final EncoderContext encoderContext) { + writer.writeStartDocument(); + writer.writeString("name", n.getName()); + writer.writeEndDocument(); + } + + @Override + public Name decode(final BsonReader reader, final DecoderContext decoderContext) { + reader.readStartDocument(); + String name = reader.readString("_id"); + reader.readEndDocument(); + return new Name(name); + } + + @Override + public Class getEncoderClass() { + return Name.class; + } + + @Override + public boolean documentHasId(final Name document) { + return false; + } + + @Override + public BsonObjectId getDocumentId(final Name document) { + return null; + } + + @Override + public Name generateIdIfAbsentFromDocument(final Name document) { + return document; + } + } + +} diff --git a/bson/src/test/unit/org/bson/GenericBsonTest.java b/bson/src/test/unit/org/bson/GenericBsonTest.java new file mode 100644 index 00000000000..582ec5d83dc --- /dev/null +++ b/bson/src/test/unit/org/bson/GenericBsonTest.java @@ -0,0 +1,319 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson; + +import org.bson.json.JsonMode; +import org.bson.json.JsonParseException; +import org.bson.json.JsonWriterSettings; +import org.bson.types.Decimal128; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.Arguments; +import org.junit.jupiter.params.provider.MethodSource; +import util.JsonPoweredTestHelper; + +import java.io.IOException; +import java.io.StringReader; +import java.io.StringWriter; +import java.nio.charset.StandardCharsets; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.stream.Stream; + +import static java.lang.String.format; +import static org.bson.BsonDocument.parse; +import static org.bson.BsonHelper.decodeToDocument; +import static org.bson.BsonHelper.encodeToHex; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; +import static org.junit.jupiter.api.Assumptions.assumeFalse; + +// BSON tests powered by language-agnostic JSON-based tests included in test resources +public class GenericBsonTest { + + private static final List IGNORED_PARSE_ERRORS = Arrays.asList( + "Bad $binary (type is number, not string)", // for backwards compat, JsonReader supports number for binary type + "Bad $date (number, not string or hash)", // for backwards compat, JsonReader supports numbers for $date + "Bad DBRef (ref is number, not string)", // JsonReader knows nothing of DBRef so these are not parse errors + "Bad DBRef (db is number, not string)"); + + enum TestCaseType { + VALID, + DECODE_ERROR, + PARSE_ERROR + } + + @ParameterizedTest(name = "{0}") + @MethodSource("data") + public void shouldPassAllOutcomes(@SuppressWarnings("unused") final String description, + final BsonDocument testDefinition, final BsonDocument testCase, final TestCaseType testCaseType) { + switch (testCaseType) { + case VALID: + runValid(testCase); + break; + case DECODE_ERROR: + runDecodeError(testCase); + break; + case PARSE_ERROR: + runParseError(testDefinition, testCase); + break; + default: + throw new IllegalArgumentException(format("Unsupported test case type %s", testCaseType)); + } + } + + private void runValid(final BsonDocument testCase) { + String description = testCase.getString("description").getValue(); + String canonicalBsonHex = testCase.getString("canonical_bson").getValue().toUpperCase(); + String degenerateBsonHex = testCase.getString("degenerate_bson", new BsonString("")).getValue().toUpperCase(); + String canonicalJson = replaceUnicodeEscapes(testCase.getString("canonical_extjson").getValue()); + String relaxedJson = replaceUnicodeEscapes(testCase.getString("relaxed_extjson", new BsonString("")).getValue()); + String degenerateJson = replaceUnicodeEscapes(testCase.getString("degenerate_extjson", new BsonString("")).getValue()); + boolean lossy = testCase.getBoolean("lossy", new BsonBoolean(false)).getValue(); + + BsonDocument decodedDocument = decodeToDocument(canonicalBsonHex, description); + + // native_to_bson( bson_to_native(cB) ) = cB + assertEquals(canonicalBsonHex, encodeToHex(decodedDocument), + format("Failed to create expected BSON for document with description '%s'", description)); + + JsonWriterSettings canonicalJsonWriterSettings = JsonWriterSettings.builder().outputMode(JsonMode.EXTENDED).build(); + JsonWriterSettings relaxedJsonWriterSettings = JsonWriterSettings.builder().outputMode(JsonMode.RELAXED).build(); + + if (!canonicalJson.isEmpty()) { + // native_to_canonical_extended_json( bson_to_native(cB) ) = cEJ + assertEquals(stripWhiteSpace(canonicalJson), stripWhiteSpace(decodedDocument.toJson(canonicalJsonWriterSettings)), + format("Failed to create expected canonical JSON for document with description '%s'", description)); + + // native_to_canonical_extended_json( json_to_native(cEJ) ) = cEJ + BsonDocument parsedCanonicalJsonDocument = parse(canonicalJson); + assertEquals(stripWhiteSpace(canonicalJson), stripWhiteSpace(parsedCanonicalJsonDocument.toJson(canonicalJsonWriterSettings)), + "Failed to create expected canonical JSON from parsing canonical JSON"); + + if (!lossy) { + // native_to_bson( json_to_native(cEJ) ) = cB + assertEquals(canonicalBsonHex, encodeToHex(parsedCanonicalJsonDocument), + "Failed to create expected canonical BSON from parsing canonical JSON"); + } + } + + if (!relaxedJson.isEmpty()) { + // native_to_relaxed_extended_json( bson_to_native(cB) ) = rEJ + assertEquals(stripWhiteSpace(relaxedJson), stripWhiteSpace(decodedDocument.toJson(relaxedJsonWriterSettings)), + format("Failed to create expected relaxed JSON for document with description '%s'", description)); + + // native_to_relaxed_extended_json( json_to_native(rEJ) ) = rEJ + assertEquals(stripWhiteSpace(relaxedJson), stripWhiteSpace(parse(relaxedJson).toJson(relaxedJsonWriterSettings)), + "Failed to create expected relaxed JSON from parsing relaxed JSON"); + } + + if (!degenerateJson.isEmpty()) { + // native_to_bson( json_to_native(dEJ) ) = cB + assertEquals(canonicalBsonHex, encodeToHex(parse(degenerateJson)), + "Failed to create expected canonical BSON from parsing canonical JSON"); + } + + if (!degenerateBsonHex.isEmpty()) { + BsonDocument decodedDegenerateDocument = decodeToDocument(degenerateBsonHex, description); + // native_to_bson( bson_to_native(dB) ) = cB + assertEquals(canonicalBsonHex, encodeToHex(decodedDegenerateDocument), + format("Failed to create expected canonical BSON from degenerate BSON for document with description '%s'", + description)); + } + } + + // The corpus escapes all non-ascii characters, but JSONWriter does not. This method converts the Unicode escape sequence into its + // regular UTF encoding in order to match the JSONWriter behavior. + private String replaceUnicodeEscapes(final String json) { + try { + StringReader reader = new StringReader(json); + StringWriter writer = new StringWriter(); + int cur; + while ((cur = reader.read()) != -1) { + char curChar = (char) cur; + if (curChar != '\\') { + writer.write(curChar); + continue; + } + + char nextChar = (char) reader.read(); + if (nextChar != 'u') { + writer.write(curChar); + writer.write(nextChar); + continue; + } + char[] codePointString = new char[4]; + reader.read(codePointString); + char escapedChar = (char) Integer.parseInt(new String(codePointString), 16); + if (shouldEscapeCharacter(escapedChar)) { + writer.write("\\u" + new String(codePointString)); + } else { + writer.write(escapedChar); + } + + } + return writer.toString(); + } catch (IOException e) { + throw new RuntimeException("impossible"); + } + } + + // copied from JsonWriter... + private boolean shouldEscapeCharacter(final char escapedChar) { + switch (Character.getType(escapedChar)) { + case Character.UPPERCASE_LETTER: + case Character.LOWERCASE_LETTER: + case Character.TITLECASE_LETTER: + case Character.OTHER_LETTER: + case Character.DECIMAL_DIGIT_NUMBER: + case Character.LETTER_NUMBER: + case Character.OTHER_NUMBER: + case Character.SPACE_SEPARATOR: + case Character.CONNECTOR_PUNCTUATION: + case Character.DASH_PUNCTUATION: + case Character.START_PUNCTUATION: + case Character.END_PUNCTUATION: + case Character.INITIAL_QUOTE_PUNCTUATION: + case Character.FINAL_QUOTE_PUNCTUATION: + case Character.OTHER_PUNCTUATION: + case Character.MATH_SYMBOL: + case Character.CURRENCY_SYMBOL: + case Character.MODIFIER_SYMBOL: + case Character.OTHER_SYMBOL: + return false; + default: + return true; + } + } + + private void runDecodeError(final BsonDocument testCase) { + try { + String description = testCase.getString("description").getValue(); + throwIfValueIsStringContainingReplacementCharacter(testCase, description); + fail(format("Should have failed parsing for subject with description '%s'", description)); + } catch (BsonSerializationException e) { + // all good + } + } + + private void runParseError(final BsonDocument testDefinition, final BsonDocument testCase) { + String description = testCase.getString("description").getValue(); + + assumeFalse(IGNORED_PARSE_ERRORS.contains(description)); + + String str = testCase.getString("string").getValue(); + + String testDefinitionDescription = testDefinition.getString("description").getValue(); + if (testDefinitionDescription.startsWith("Decimal128")) { + try { + Decimal128.parse(str); + fail(format("Should fail to parse '" + str + "' with description '%s'", description + "'")); + } catch (NumberFormatException e) { + // all good + } + } else if (testDefinitionDescription.startsWith("Top-level") || testDefinitionDescription.startsWith("Binary type")) { + try { + BsonDocument document = parse(str); + encodeToHex(document); + fail("Should fail to parse JSON '" + str + "' with description '" + description + "'"); + } catch (JsonParseException e) { + // all good + } catch (BsonInvalidOperationException e) { + if (!description.equals("Bad $code with $scope (scope is number, not doc)")) { + fail("Should throw JsonParseException for '" + str + "' with description '" + description + "'"); + } + // all good + } catch (BsonSerializationException e) { + if (isTestOfNullByteInCString(description)) { + assertTrue(e.getMessage().contains("is not valid because it contains a null character")); + } else { + fail("Unexpected BsonSerializationException"); + } + } + } else { + fail("Unrecognized test definition description: " + testDefinitionDescription); + } + } + + private boolean isTestOfNullByteInCString(final String description) { + return description.startsWith("Null byte"); + } + + // Working around the fact that the Java driver doesn't report an error for invalid UTF-8, but rather replaces the invalid + // sequence with the replacement character + private void throwIfValueIsStringContainingReplacementCharacter(final BsonDocument testCase, final String description) { + BsonDocument decodedDocument = decodeToDocument(testCase.getString("bson").getValue(), description); + BsonValue value = decodedDocument.get(decodedDocument.getFirstKey()); + + String decodedString; + if (value.isString()) { + decodedString = value.asString().getValue(); + } else if (value.isDBPointer()) { + decodedString = value.asDBPointer().getNamespace(); + } else if (value.isJavaScript()) { + decodedString = value.asJavaScript().getCode(); + } else if (value.isJavaScriptWithScope()) { + decodedString = value.asJavaScriptWithScope().getCode(); + } else if (value.isSymbol()) { + decodedString = value.asSymbol().getSymbol(); + } else { + throw new UnsupportedOperationException("Unsupported test for BSON type " + value.getBsonType()); + } + + if (decodedString.contains(StandardCharsets.UTF_8.newDecoder().replacement())) { + throw new BsonSerializationException("String contains replacement character"); + } + } + + + private static Stream data() { + List data = new ArrayList<>(); + for (BsonDocument testDocument : JsonPoweredTestHelper.getTestDocuments("/bson")) { + for (BsonValue curValue : testDocument.getArray("valid", new BsonArray())) { + BsonDocument testCaseDocument = curValue.asDocument(); + data.add(Arguments.of( + createTestCaseDescription(testDocument, testCaseDocument, "valid"), testDocument, testCaseDocument, + TestCaseType.VALID)); + } + + for (BsonValue curValue : testDocument.getArray("decodeErrors", new BsonArray())) { + BsonDocument testCaseDocument = curValue.asDocument(); + data.add(Arguments.of( + createTestCaseDescription(testDocument, testCaseDocument, "decodeError"), testDocument, testCaseDocument, + TestCaseType.DECODE_ERROR)); + } + for (BsonValue curValue : testDocument.getArray("parseErrors", new BsonArray())) { + BsonDocument testCaseDocument = curValue.asDocument(); + data.add(Arguments.of(createTestCaseDescription(testDocument, testCaseDocument, "parseError"), testDocument, + testCaseDocument, TestCaseType.PARSE_ERROR)); + } + } + return data.stream(); + } + + private static String createTestCaseDescription(final BsonDocument testDocument, final BsonDocument testCaseDocument, + final String testCaseType) { + return testDocument.getString("description").getValue() + + "[" + testCaseType + "]" + + ": " + testCaseDocument.getString("description").getValue(); + } + + private String stripWhiteSpace(final String json) { + return json.replace(" ", ""); + } +} diff --git a/bson/src/test/unit/org/bson/LazyBSONDecoderTest.java b/bson/src/test/unit/org/bson/LazyBSONDecoderTest.java new file mode 100644 index 00000000000..32b2f047f43 --- /dev/null +++ b/bson/src/test/unit/org/bson/LazyBSONDecoderTest.java @@ -0,0 +1,69 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson; + +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; + +import java.io.ByteArrayInputStream; +import java.io.IOException; +import java.io.InputStream; + +import static org.hamcrest.CoreMatchers.hasItems; +import static org.hamcrest.CoreMatchers.instanceOf; +import static org.hamcrest.MatcherAssert.assertThat; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertThrows; + +public class LazyBSONDecoderTest { + private BSONDecoder bsonDecoder; + + @BeforeEach + public void setUp() { + bsonDecoder = new LazyBSONDecoder(); + } + + @Test + public void testDecodingFromInputStream() throws IOException { + InputStream is = new ByteArrayInputStream(new byte[]{12, 0, 0, 0, 16, 97, 0, 1, 0, 0, 0, 0}); + BSONObject document = bsonDecoder.readObject(is); + assertNotNull(document); + assertThat(document, instanceOf(LazyBSONObject.class)); + assertEquals(1, document.keySet().size()); + assertThat(document.keySet(), hasItems("a")); + assertEquals(1, document.get("a")); + } + + @Test + public void testDecodingFromByteArray() throws IOException { + byte[] bytes = {12, 0, 0, 0, 16, 97, 0, 1, 0, 0, 0, 0}; + BSONObject document = bsonDecoder.readObject(bytes); + assertNotNull(document); + assertThat(document, instanceOf(LazyBSONObject.class)); + assertEquals(1, document.keySet().size()); + assertThat(document.keySet(), hasItems("a")); + assertEquals(1, document.get("a")); + } + + @Test + public void testDecodingFromInvalidInput() { + byte[] bytes = {16, 0, 0, 0, 16, 97, 0, 1, 0, 0, 0, 0}; + assertThrows(BSONException.class, () -> bsonDecoder.readObject(bytes)); + } + +} diff --git a/bson/src/test/unit/org/bson/LazyBSONListTest.java b/bson/src/test/unit/org/bson/LazyBSONListTest.java new file mode 100644 index 00000000000..cd2672b6575 --- /dev/null +++ b/bson/src/test/unit/org/bson/LazyBSONListTest.java @@ -0,0 +1,100 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson; + +import org.junit.jupiter.api.Test; + +import java.util.Iterator; +import java.util.List; +import java.util.NoSuchElementException; + +import static java.util.Arrays.asList; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; + +@SuppressWarnings({"rawtypes"}) +public class LazyBSONListTest { + private LazyBSONList encodeAndExtractList(final List list) { + BSONObject document = new BasicBSONObject("l", list); + return (LazyBSONList) new LazyBSONObject(new BasicBSONEncoder().encode(document), new LazyBSONCallback()).get("l"); + } + + + @Test + public void testArray() { + LazyBSONList list = encodeAndExtractList(asList(1, 2, 3)); + assertEquals(3, list.size()); + assertEquals(1, list.get(0)); + assertEquals(2, list.get(1)); + assertEquals(3, list.get(2)); + } + + @Test + public void testEmptyArray() { + LazyBSONList list = encodeAndExtractList(asList()); + assertEquals(0, list.size()); + assertFalse(list.iterator().hasNext()); + } + + @Test + public void testIndexOf() { + LazyBSONList list = encodeAndExtractList(asList("a", "b", "z")); + assertEquals(0, list.indexOf("a")); + assertEquals(2, list.indexOf("z")); + assertEquals(-1, list.indexOf("y")); + } + + @Test + public void testLastIndexOf() { + LazyBSONList list = encodeAndExtractList(asList("a", "b", "z", "b", "b", "z")); + assertEquals(4, list.lastIndexOf("b")); + assertEquals(5, list.lastIndexOf("z")); + assertEquals(0, list.lastIndexOf("a")); + assertEquals(-1, list.lastIndexOf("x")); + } + + @Test + public void testContainsAll() { + LazyBSONList list = encodeAndExtractList(asList("a", "b", "z")); + assertTrue(list.containsAll(asList("a", "b"))); + assertFalse(list.containsAll(asList("a", "b", "c", "z"))); + } + + @Test + public void testIterator() { + LazyBSONList list = encodeAndExtractList(asList("a", "b")); + Iterator it = list.iterator(); + assertTrue(it.hasNext()); + assertEquals("a", it.next()); + assertTrue(it.hasNext()); + assertEquals("b", it.next()); + assertFalse(it.hasNext()); + } + + @Test + public void testIteratorNextWhileNothingLeft() { + assertThrows(NoSuchElementException.class, () -> { + LazyBSONList list = encodeAndExtractList(asList()); + Iterator it = list.iterator(); + assertFalse(it.hasNext()); + it.next(); + }); + } + +} diff --git a/bson/src/test/unit/org/bson/LazyBSONObjectSpecification.groovy b/bson/src/test/unit/org/bson/LazyBSONObjectSpecification.groovy new file mode 100644 index 00000000000..43d910bd5fa --- /dev/null +++ b/bson/src/test/unit/org/bson/LazyBSONObjectSpecification.groovy @@ -0,0 +1,359 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson + +import org.bson.types.BSONTimestamp +import org.bson.types.Binary +import org.bson.types.Code +import org.bson.types.CodeWScope +import org.bson.types.Decimal128 +import org.bson.types.MaxKey +import org.bson.types.MinKey +import org.bson.types.ObjectId +import org.bson.types.Symbol +import spock.lang.Specification +import spock.lang.Unroll + +import java.util.regex.Pattern + +import static org.bson.BsonHelper.toBson +import static org.bson.BsonHelper.valuesOfEveryType +import static org.bson.BsonType.SYMBOL +import static org.bson.BsonType.UNDEFINED + +@SuppressWarnings(['LineLength']) +class LazyBSONObjectSpecification extends Specification { + + def setupSpec() { + Map.metaClass.bitwiseNegate = { new BasicBSONObject(delegate) } + Pattern.metaClass.equals = { Pattern other -> + delegate.pattern() == other.pattern() && delegate.flags() == other.flags() + } + } + + @Unroll + def 'should read #type'() { + def lazyBSONObject = new LazyBSONObject(bytes as byte[], new LazyBSONCallback()) + given: + + expect: + value == lazyBSONObject.get('f') + lazyBSONObject.keySet().contains('f') + + where: + value | bytes + -1.01 | [16, 0, 0, 0, 1, 102, 0, 41, 92, -113, -62, -11, 40, -16, -65, 0] + Float.MIN_VALUE | [16, 0, 0, 0, 1, 102, 0, 0, 0, 0, 0, 0, 0, -96, 54, 0] + Double.MAX_VALUE | [16, 0, 0, 0, 1, 102, 0, -1, -1, -1, -1, -1, -1, -17, 127, 0] + 0.0 | [16, 0, 0, 0, 1, 102, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] + '' | [13, 0, 0, 0, 2, 102, 0, 1, 0, 0, 0, 0, 0] + 'danke' | [18, 0, 0, 0, 2, 102, 0, 6, 0, 0, 0, 100, 97, 110, 107, 101, 0, 0] + ',+\\\"<>;[]{}@#$%^&*()+_' | [35, 0, 0, 0, 2, 102, 0, 23, 0, 0, 0, 44, 43, 92, 34, 60, 62, 59, 91, 93, 123, 125, 64, 35, 36, 37, 94, 38, 42, 40, 41, 43, 95, 0, 0] + 'a\u00e9\u3042\u0430\u0432\u0431\u0434' | [27, 0, 0, 0, 2, 102, 0, 15, 0, 0, 0, 97, -61, -87, -29, -127, -126, -48, -80, -48, -78, -48, -79, -48, -76, 0, 0] + new LazyBSONObject([5, 0, 0, 0, 0] as byte[], new LazyBSONCallback()) | [13, 0, 0, 0, 3, 102, 0, 5, 0, 0, 0, 0, 0] + [] | [13, 0, 0, 0, 4, 102, 0, 5, 0, 0, 0, 0, 0] + [1, 2, 3] as int[] | [34, 0, 0, 0, 4, 102, 0, 26, 0, 0, 0, 16, 48, 0, 1, 0, 0, 0, 16, 49, 0, 2, 0, 0, 0, 16, 50, 0, 3, 0, 0, 0, 0, 0] + [[]] | [21, 0, 0, 0, 4, 102, 0, 13, 0, 0, 0, 4, 48, 0, 5, 0, 0, 0, 0, 0, 0] + new Binary((byte) 0x01, (byte[]) [115, 116, 11]) | [16, 0, 0, 0, 5, 102, 0, 3, 0, 0, 0, 1, 115, 116, 11, 0] + new Binary((byte) 0x03, (byte[]) [115, 116, 11]) | [16, 0, 0, 0, 5, 102, 0, 3, 0, 0, 0, 3, 115, 116, 11, 0] + new Binary((byte) 0x04, (byte[]) [115, 116, 11]) | [16, 0, 0, 0, 5, 102, 0, 3, 0, 0, 0, 4, 115, 116, 11, 0] + [13, 12] as byte[] | [15, 0, 0, 0, 5, 102, 0, 2, 0, 0, 0, 0, 13, 12, 0] + [102, 111, 111] as byte[] | [16, 0, 0, 0, 5, 102, 0, 3, 0, 0, 0, 0, 102, 111, 111, 0] + new ObjectId('50d3332018c6a1d8d1662b61') | [20, 0, 0, 0, 7, 102, 0, 80, -45, 51, 32, 24, -58, -95, -40, -47, 102, 43, 97, 0] + true | [9, 0, 0, 0, 8, 102, 0, 1, 0] + false | [9, 0, 0, 0, 8, 102, 0, 0, 0] + new Date(582163200) | [16, 0, 0, 0, 9, 102, 0, 0, 27, -77, 34, 0, 0, 0, 0, 0] + null | [8, 0, 0, 0, 10, 102, 0, 0] + null | [8, 0, 0, 0, 6, 102, 0, 0] + Pattern.compile('[a]*', Pattern.CASE_INSENSITIVE) | [15, 0, 0, 0, 11, 102, 0, 91, 97, 93, 42, 0, 105, 0, 0] + new Code('var i = 0') | [22, 0, 0, 0, 13, 102, 0, 10, 0, 0, 0, 118, 97, 114, 32, 105, 32, 61, 32, 48, 0, 0] + new Symbol('c') | [14, 0, 0, 0, 14, 102, 0, 2, 0, 0, 0, 99, 0, 0] + new CodeWScope('i++', ~['x': 1]) | [32, 0, 0, 0, 15, 102, 0, 24, 0, 0, 0, 4, 0, 0, 0, 105, 43, 43, 0, 12, 0, 0, 0, 16, 120, 0, 1, 0, 0, 0, 0, 0] + -12 | [12, 0, 0, 0, 16, 102, 0, -12, -1, -1, -1, 0] + Integer.MIN_VALUE | [12, 0, 0, 0, 16, 102, 0, 0, 0, 0, -128, 0] + 0 | [12, 0, 0, 0, 16, 102, 0, 0, 0, 0, 0, 0] + new BSONTimestamp(123999401, 44332) | [16, 0, 0, 0, 17, 102, 0, 44, -83, 0, 0, -87, 20, 100, 7, 0] + Long.MAX_VALUE | [16, 0, 0, 0, 18, 102, 0, -1, -1, -1, -1, -1, -1, -1, 127, 0] + new MinKey() | [8, 0, 0, 0, -1, 102, 0, 0] + new MaxKey() | [8, 0, 0, 0, 127, 102, 0, 0] + Decimal128.parse('0E-6176') | [24, 0, 0, 0, 19, 102, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] + + type = BsonType.findByValue(bytes[4]) + } + + @Unroll + def 'should read value of #value'() { + given: + def bsonDocument = new BsonDocument('name', value) + def callback = new BasicBSONCallback() + new BasicBSONDecoder().decode(toBson(bsonDocument).array(), callback) + def dbObject = callback.get() as BasicBSONObject + def lazyBSONObject = new LazyBSONObject(toBson(bsonDocument).array(), new LazyBSONCallback()) + + expect: + lazyBSONObject.keySet().contains('name') + + when: + def expectedValue + if (value.bsonType == UNDEFINED) { + expectedValue = null + } else if (value.bsonType == SYMBOL) { + expectedValue = new Symbol(((BsonSymbol) value).getSymbol()) + } else { + expectedValue = dbObject.get('name') + } + + then: + expectedValue == lazyBSONObject.get('name') + + where: + value << valuesOfEveryType() + } + + def 'should have nested items as lazy'() { + given: + byte[] bytes = [ + 53, 0, 0, 0, 4, 97, 0, 26, 0, 0, 0, 16, 48, 0, 1, 0, 0, 0, 16, 49, 0, 2, 0, 0, 0, 16, 50, 0, + 3, 0, 0, 0, 0, 3, 111, 0, 16, 0, 0, 0, 1, 122, 0, -102, -103, -103, -103, -103, -103, -71, 63, 0, 0 + ] + + when: + LazyBSONObject document = new LazyBSONObject(bytes, new LazyBSONCallback()) + + then: + document.get('a') instanceof LazyBSONList + document.get('o') instanceof LazyBSONObject + } + + def 'should not understand DBRefs'() { + given: + byte[] bytes = [ + 44, 0, 0, 0, 3, 102, 0, 36, 0, 0, 0, 2, 36, 114, 101, 102, + 0, 4, 0, 0, 0, 97, 46, 98, 0, 7, 36, 105, 100, 0, 18, 52, + 86, 120, -112, 18, 52, 86, 120, -112, 18, 52, 0, 0, + ] + + when: + LazyBSONObject document = new LazyBSONObject(bytes, new LazyBSONCallback()) + + then: + document.get('f') instanceof LazyBSONObject + document.get('f').keySet() == ['$ref', '$id'] as Set + } + + def 'should retain fields order'() { + given: + byte[] bytes = [ + 47, 0, 0, 0, 16, 97, 0, 1, 0, 0, 0, 16, 98, 0, 2, 0, 0, 0, 16, 100, 0, 3, 0, 0, + 0, 16, 99, 0, 4, 0, 0, 0, 16, 101, 0, 5, 0, 0, 0, 16, 48, 0, 6, 0, 0, 0, 0 + ] + + when: + Iterator iterator = new LazyBSONObject(bytes, new LazyBSONCallback()).keySet().iterator() + + then: + iterator.next() == 'a' + iterator.next() == 'b' + iterator.next() == 'd' + iterator.next() == 'c' + iterator.next() == 'e' + iterator.next() == '0' + !iterator.hasNext() + } + + def 'should be able to compare itself to others'() { + given: + byte[] bytes = [ + 39, 0, 0, 0, 3, 97, 0, + 14, 0, 0, 0, 2, 120, 0, 2, 0, 0, 0, 121, 0, 0, + 3, 98, 0, + 14, 0, 0, 0, 2, 120, 0, 2, 0, 0, 0, 121, 0, 0, + 0 + ] + + when: + def bsonObject1 = new LazyBSONObject(bytes, new LazyBSONCallback()) + def bsonObject2 = new LazyBSONObject(bytes, new LazyBSONCallback()) + def bsonObject3 = new LazyBSONObject(bytes, 7, new LazyBSONCallback()) + def bsonObject4 = new LazyBSONObject(bytes, 24, new LazyBSONCallback()) + def bsonObject5 = new LazyBSONObject([14, 0, 0, 0, 2, 120, 0, 2, 0, 0, 0, 121, 0, 0] as byte[], new LazyBSONCallback()) + def bsonObject6 = new LazyBSONObject([5, 0, 0, 0, 0] as byte[], new LazyBSONCallback()) + + then: + bsonObject1.equals(bsonObject1) + !bsonObject1.equals(null) + !bsonObject1.equals('not equal') + bsonObject1.equals(bsonObject2) + bsonObject3.equals(bsonObject4) + !bsonObject1.equals(bsonObject3) + bsonObject4.equals(bsonObject5) + !bsonObject1.equals(bsonObject6) + + bsonObject1.hashCode() == bsonObject2.hashCode() + bsonObject3.hashCode() == bsonObject4.hashCode() + bsonObject1.hashCode() != bsonObject3.hashCode() + bsonObject4.hashCode() == bsonObject5.hashCode() + bsonObject1.hashCode() != bsonObject6.hashCode() + } + + def 'should return the size of a document'() { + given: + byte[] bytes = [12, 0, 0, 0, 16, 97, 0, 1, 0, 0, 0, 0] + + when: + LazyBSONObject document = new LazyBSONObject(bytes, new LazyBSONCallback()) + + then: + document.getBSONSize() == 12 + } + + def 'should understand that object is empty'() { + given: + byte[] bytes = [5, 0, 0, 0, 0] + + when: + LazyBSONObject document = new LazyBSONObject(bytes, new LazyBSONCallback()) + + then: + document.isEmpty() + } + + def 'should implement Map.keySet()'() { + given: + byte[] bytes = [16, 0, 0, 0, 16, 97, 0, 1, 0, 0, 0, 8, 98, 0, 1, 0] + + when: + LazyBSONObject document = new LazyBSONObject(bytes, new LazyBSONCallback()) + + then: + document.containsField('a') + !document.containsField('z') + document.get('z') == null + document.keySet() == ['a', 'b'] as Set + } + + def 'should implement Map.entrySet()'() { + given: + byte[] bytes = [16, 0, 0, 0, 16, 97, 0, 1, 0, 0, 0, 8, 98, 0, 1, 0] + LazyBSONObject document = new LazyBSONObject(bytes, new LazyBSONCallback()) + + when: + def entrySet = document.entrySet() + + then: + entrySet.size() == 2 + !entrySet.isEmpty() + entrySet.contains(new AbstractMap.SimpleImmutableEntry('a', 1)) + !entrySet.contains(new AbstractMap.SimpleImmutableEntry('a', 2)) + entrySet.containsAll([new AbstractMap.SimpleImmutableEntry('a', 1), new AbstractMap.SimpleImmutableEntry('b', true)]) + !entrySet.containsAll([new AbstractMap.SimpleImmutableEntry('a', 1), new AbstractMap.SimpleImmutableEntry('b', false)]) + entrySet.toArray() == [new AbstractMap.SimpleImmutableEntry('a', 1), new AbstractMap.SimpleImmutableEntry('b', true)].toArray() + entrySet.toArray(new Map.Entry[2]) == + [new AbstractMap.SimpleImmutableEntry('a', 1), new AbstractMap.SimpleImmutableEntry('b', true)].toArray() + + when: + def iterator = entrySet.iterator() + + then: + iterator.hasNext() + iterator.next() == new AbstractMap.SimpleImmutableEntry('a', 1) + iterator.hasNext() + iterator.next() == new AbstractMap.SimpleImmutableEntry('b', true) + !iterator.hasNext() + + when: + entrySet.add(new AbstractMap.SimpleImmutableEntry('key', null)) + + then: + thrown(UnsupportedOperationException) + + when: + entrySet.addAll([new AbstractMap.SimpleImmutableEntry('key', null)]) + + then: + thrown(UnsupportedOperationException) + + when: + entrySet.clear() + + then: + thrown(UnsupportedOperationException) + + when: + entrySet.remove(new AbstractMap.SimpleImmutableEntry('key', null)) + + then: + thrown(UnsupportedOperationException) + + when: + entrySet.removeAll([new AbstractMap.SimpleImmutableEntry('key', null)]) + + then: + thrown(UnsupportedOperationException) + + when: + entrySet.retainAll([new AbstractMap.SimpleImmutableEntry('key', null)]) + + then: + thrown(UnsupportedOperationException) + } + + def 'should throw on modification'() { + given: + LazyBSONObject document = new LazyBSONObject( + [16, 0, 0, 0, 16, 97, 0, 1, 0, 0, 0, 8, 98, 0, 1, 0] as byte[], + new LazyBSONCallback() + ) + + when: + document.keySet().add('c') + + then: + thrown(UnsupportedOperationException) + + when: + document.put('c', 2) + + then: + thrown(UnsupportedOperationException) + + when: + document.removeField('a') + + then: + thrown(UnsupportedOperationException) + + when: + document.toMap().put('a', 22) + + then: + thrown(UnsupportedOperationException) + } + + def 'should pipe to stream'() { + given: + byte[] bytes = [16, 0, 0, 0, 16, 97, 0, 1, 0, 0, 0, 8, 98, 0, 1, 0] + LazyBSONObject document = new LazyBSONObject(bytes, new LazyBSONCallback()) + ByteArrayOutputStream baos = new ByteArrayOutputStream() + + when: + document.pipe(baos) + + then: + bytes == baos.toByteArray() + } +} diff --git a/bson/src/test/unit/org/bson/LimitedLookaheadMarkSpecification.groovy b/bson/src/test/unit/org/bson/LimitedLookaheadMarkSpecification.groovy new file mode 100644 index 00000000000..5a859c396eb --- /dev/null +++ b/bson/src/test/unit/org/bson/LimitedLookaheadMarkSpecification.groovy @@ -0,0 +1,262 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson + +import org.bson.io.BasicOutputBuffer +import org.bson.io.ByteBufferBsonInput +import org.bson.json.JsonMode +import org.bson.json.JsonReader +import org.bson.json.JsonWriter +import org.bson.json.JsonWriterSettings +import spock.lang.Specification + +@SuppressWarnings('UnnecessaryObjectReferences') +class LimitedLookaheadMarkSpecification extends Specification { + + + def 'Lookahead should work at various states with Mark'(BsonWriter writer, boolean useAlternateReader) { + given: + writer.with { + writeStartDocument() + writeInt64('int64', 52L) + writeStartArray('array') + writeInt32(1) + writeInt64(2L) + writeStartArray() + writeInt32(3) + writeInt32(4) + writeEndArray() + writeStartDocument() + writeInt32('a', 5) + writeEndDocument() + writeNull() + writeEndArray() + writeStartDocument('document') + writeInt32('a', 6) + writeEndDocument() + writeEndDocument() + } + + + when: + BsonReader reader + BsonReaderMark mark + if (writer instanceof BsonDocumentWriter) { + reader = new BsonDocumentReader(writer.document) + } else if (writer instanceof BsonBinaryWriter) { + BasicOutputBuffer buffer = (BasicOutputBuffer) writer.getBsonOutput() + reader = new BsonBinaryReader(new ByteBufferBsonInput(buffer.getByteBuffers().get(0))) + } else if (writer instanceof JsonWriter) { + if (useAlternateReader) { + reader = new JsonReader(new InputStreamReader(new ByteArrayInputStream(writer.writer.toString().getBytes()))) + } else { + reader = new JsonReader(writer.writer.toString()) + } + } + + reader.readStartDocument() + // mark beginning of document * 1 + mark = reader.getMark() + + then: + reader.readName() == 'int64' + reader.readInt64() == 52L + reader.readStartArray() + + when: + // reset to beginning of document * 2 + mark.reset() + // mark beginning of document * 2 + mark = reader.getMark() + + then: + reader.readName() == 'int64' + reader.readInt64() == 52L + + when: + // make sure it's possible to reset to a mark after getting a new mark + reader.getMark() + // reset to beginning of document * 3 + mark.reset() + // mark beginning of document * 3 + mark = reader.getMark() + + then: + reader.readName() == 'int64' + reader.readInt64() == 52L + reader.readName() == 'array' + reader.readStartArray() + reader.readInt32() == 1 + reader.readInt64() == 2 + reader.readStartArray() + reader.readInt32() == 3 + reader.readInt32() == 4 + reader.readEndArray() + reader.readStartDocument() + reader.readName() == 'a' + reader.readInt32() == 5 + reader.readEndDocument() + reader.readNull() + reader.readEndArray() + reader.readName() == 'document' + reader.readStartDocument() + reader.readName() == 'a' + reader.readInt32() == 6 + reader.readEndDocument() + reader.readEndDocument() + + when: + // read entire document, reset to beginning + mark.reset() + + then: + reader.readName() == 'int64' + reader.readInt64() == 52L + reader.readName() == 'array' + + when: + // mark in outer-document * 1 + mark = reader.getMark() + + then: + reader.readStartArray() + reader.readInt32() == 1 + reader.readInt64() == 2 + reader.readStartArray() + + when: + // reset in sub-document * 1 + mark.reset() + // mark in outer-document * 2 + mark = reader.getMark() + + then: + reader.readStartArray() + reader.readInt32() == 1 + reader.readInt64() == 2 + reader.readStartArray() + reader.readInt32() == 3 + + when: + // reset in sub-document * 2 + mark.reset() + + then: + reader.readStartArray() + reader.readInt32() == 1 + reader.readInt64() == 2 + reader.readStartArray() + reader.readInt32() == 3 + reader.readInt32() == 4 + + when: + // mark in sub-document * 1 + mark = reader.getMark() + + then: + reader.readEndArray() + reader.readStartDocument() + reader.readName() == 'a' + reader.readInt32() == 5 + reader.readEndDocument() + reader.readNull() + reader.readEndArray() + + when: + // reset in outer-document * 1 + mark.reset() + // mark in sub-document * 2 + mark = reader.getMark() + + then: + reader.readEndArray() + reader.readStartDocument() + reader.readName() == 'a' + reader.readInt32() == 5 + reader.readEndDocument() + reader.readNull() + reader.readEndArray() + + when: + // reset in out-document * 2 + mark.reset() + + then: + reader.readEndArray() + reader.readStartDocument() + reader.readName() == 'a' + reader.readInt32() == 5 + reader.readEndDocument() + reader.readNull() + reader.readEndArray() + reader.readName() == 'document' + reader.readStartDocument() + reader.readName() == 'a' + reader.readInt32() == 6 + reader.readEndDocument() + reader.readEndDocument() + + where: + writer | useAlternateReader + new BsonDocumentWriter(new BsonDocument()) | false + new BsonBinaryWriter(new BasicOutputBuffer()) | false + new JsonWriter(new StringWriter(), JsonWriterSettings.builder().outputMode(JsonMode.STRICT).build()) | false + new JsonWriter(new StringWriter(), JsonWriterSettings.builder().outputMode(JsonMode.STRICT).build()) | true + } + + def 'should peek binary subtype and size'(BsonWriter writer) { + given: + writer.with { + writeStartDocument() + writeBinaryData('binary', new BsonBinary(BsonBinarySubType.UUID_LEGACY, new byte[16])) + writeInt64('int64', 52L) + writeEndDocument() + } + + when: + BsonReader reader + if (writer instanceof BsonDocumentWriter) { + reader = new BsonDocumentReader(writer.document) + } else if (writer instanceof BsonBinaryWriter) { + BasicOutputBuffer buffer = (BasicOutputBuffer) writer.getBsonOutput() + reader = new BsonBinaryReader(new ByteBufferBsonInput(buffer.getByteBuffers().get(0))) + } else if (writer instanceof JsonWriter) { + reader = new JsonReader(writer.writer.toString()) + } + + reader.readStartDocument() + reader.readName() + def subType = reader.peekBinarySubType() + def size = reader.peekBinarySize() + def binary = reader.readBinaryData() + def longValue = reader.readInt64('int64') + reader.readEndDocument() + + then: + subType == BsonBinarySubType.UUID_LEGACY.value + size == 16 + binary == new BsonBinary(BsonBinarySubType.UUID_LEGACY, new byte[16]) + longValue == 52L + + where: + writer << [ + new BsonDocumentWriter(new BsonDocument()), + new BsonBinaryWriter(new BasicOutputBuffer()), + new JsonWriter(new StringWriter(), JsonWriterSettings.builder().outputMode(JsonMode.STRICT).build()) + ] + } +} diff --git a/bson/src/test/unit/org/bson/RawBsonArraySpecification.groovy b/bson/src/test/unit/org/bson/RawBsonArraySpecification.groovy new file mode 100644 index 00000000000..27a7dcbbaa7 --- /dev/null +++ b/bson/src/test/unit/org/bson/RawBsonArraySpecification.groovy @@ -0,0 +1,402 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson + +import org.bson.codecs.BsonDocumentCodec +import spock.lang.Specification + +import java.nio.ByteOrder + +import static java.util.Arrays.asList +import static util.GroovyHelpers.areEqual + +class RawBsonArraySpecification extends Specification { + + static BsonArray emptyBsonArray = new BsonArray() + static RawBsonArray emptyRawBsonArray = new RawBsonDocument(new BsonDocument('a', emptyBsonArray), new BsonDocumentCodec()).get('a') + static BsonArray bsonArray = new BsonArray(asList(new BsonInt32(1), new BsonInt32(2), new BsonDocument('x', BsonBoolean.TRUE), + new BsonArray(asList(new BsonDocument('y', BsonBoolean.FALSE), new BsonArray(asList(new BsonInt32(1))))))) + + def 'constructors should throw if parameters are invalid'() { + when: + new RawBsonArray(null) + + then: + thrown(IllegalArgumentException) + + when: + new RawBsonArray(null, 0, 5) + + then: + thrown(IllegalArgumentException) + + when: + new RawBsonArray(new byte[5], -1, 5) + + then: + thrown(IllegalArgumentException) + + when: + new RawBsonArray(new byte[5], 5, 5) + + then: + thrown(IllegalArgumentException) + + when: + new RawBsonArray(new byte[5], 0, 0) + + then: + thrown(IllegalArgumentException) + + when: + new RawBsonArray(new byte[10], 6, 5) + + then: + thrown(IllegalArgumentException) + } + + def 'byteBuffer should contain the correct bytes'() { + when: + def byteBuf = rawBsonArray.getByteBuffer() + + then: + rawBsonArray == bsonArray + byteBuf.asNIO().order() == ByteOrder.LITTLE_ENDIAN + byteBuf.remaining() == 66 + + when: + def actualBytes = new byte[66] + byteBuf.get(actualBytes) + + then: + actualBytes == getBytesFromBsonArray() + + where: + rawBsonArray << createRawBsonArrayVariants() + } + + def 'contains should find existing values'() { + expect: + rawBsonArray.contains( bsonArray.get(0) ) + rawBsonArray.contains( bsonArray.get(1) ) + rawBsonArray.contains( bsonArray.get(2) ) + rawBsonArray.contains( bsonArray.get(3) ) + + where: + rawBsonArray << createRawBsonArrayVariants() + } + + def 'containsAll should return true if contains all'() { + expect: + rawBsonArray.containsAll(bsonArray.getValues()) + + where: + rawBsonArray << createRawBsonArrayVariants() + } + + def 'should return RawBsonDocument for sub documents and RawBsonArray for arrays'() { + expect: + rawBsonArray.get(0) instanceof BsonInt32 + rawBsonArray.get(1) instanceof BsonInt32 + rawBsonArray.get(2) instanceof RawBsonDocument + rawBsonArray.get(3) instanceof RawBsonArray + rawBsonArray.get(3).asArray().get(0) instanceof RawBsonDocument + rawBsonArray.get(3).asArray().get(1) instanceof RawBsonArray + + and: + rawBsonArray.get(2).getBoolean('x').value + !rawBsonArray.get(3).asArray().get(0).asDocument().getBoolean('y').value + rawBsonArray.get(3).asArray().get(1).asArray().get(0).asInt32().value == 1 + + where: + rawBsonArray << createRawBsonArrayVariants() + } + + + def 'get should throw if index out of bounds'() { + when: + rawBsonArray.get(-1) + + then: + thrown(IndexOutOfBoundsException) + + when: + rawBsonArray.get(5) + + then: + thrown(IndexOutOfBoundsException) + + where: + rawBsonArray << createRawBsonArrayVariants() + } + + def 'isEmpty should return false when the BsonArray is not empty'() { + expect: + !rawBsonArray.isEmpty() + + where: + rawBsonArray << createRawBsonArrayVariants() + } + + def 'isEmpty should return true when the BsonArray is empty'() { + expect: + emptyRawBsonArray.isEmpty() + } + + def 'should get correct size when the BsonArray is empty'() { + expect: + emptyRawBsonArray.size() == 0 + } + + def 'should get correct values set when the BsonArray is empty'() { + expect: + emptyRawBsonArray.getValues().isEmpty() + } + + def 'should get correct size'() { + expect: + createRawBsonArrayFromBsonArray().size() == 4 + + where: + rawBsonArray << createRawBsonArrayVariants() + } + + def 'should get correct values set'() { + expect: + rawBsonArray.getValues() == bsonArray.getValues() + + where: + rawBsonArray << createRawBsonArrayVariants() + } + + def 'all write methods should throw UnsupportedOperationException'() { + given: + def rawBsonArray = createRawBsonArrayFromBsonArray() + + when: + rawBsonArray.clear() + + then: + thrown(UnsupportedOperationException) + + when: + rawBsonArray.add(BsonNull.VALUE) + + then: + thrown(UnsupportedOperationException) + + when: + rawBsonArray.add(1, BsonNull.VALUE) + + then: + thrown(UnsupportedOperationException) + + when: + rawBsonArray.addAll([BsonNull.VALUE]) + + then: + thrown(UnsupportedOperationException) + + when: + rawBsonArray.addAll(1, [BsonNull.VALUE]) + + then: + thrown(UnsupportedOperationException) + + when: + rawBsonArray.remove(BsonNull.VALUE) + + then: + thrown(UnsupportedOperationException) + + when: + rawBsonArray.remove(1) + + then: + thrown(UnsupportedOperationException) + + when: + rawBsonArray.removeAll([BsonNull.VALUE]) + + then: + thrown(UnsupportedOperationException) + + when: + rawBsonArray.retainAll([BsonNull.VALUE]) + + then: + thrown(UnsupportedOperationException) + + when: + rawBsonArray.set(0, BsonNull.VALUE) + + then: + thrown(UnsupportedOperationException) + } + + def 'should find the indexOf a value'() { + expect: + rawBsonArray.indexOf(bsonArray.get(2)) == 2 + + where: + rawBsonArray << createRawBsonArrayVariants() + } + + def 'should find the lastIndexOf a value'() { + when: + RawBsonArray rawBsonArray = RawBsonDocument.parse('{a: [1, 2, 3, 1]}').get('a') + + then: + rawBsonArray.lastIndexOf(rawBsonArray.get(0)) == 3 + } + + + def 'should return a valid iterator for empty Bson Arrays'() { + when: + def iterator = emptyRawBsonArray.iterator() + + then: + !iterator.hasNext() + !iterator.hasNext() + } + + def 'should return a listIterator'() { + when: + RawBsonArray rawBsonArray = RawBsonDocument.parse('{a: [1, 2, 3, 1]}').get('a') + + then: + rawBsonArray.listIterator().toList() == rawBsonArray.getValues() + } + + def 'should return a listIterator with index'() { + when: + RawBsonArray rawBsonArray = RawBsonDocument.parse('{a: [1, 2, 3, 1]}').get('a') + + then: + rawBsonArray.listIterator(1).toList() == rawBsonArray.getValues().subList(1, 4) + } + + def 'should iterate forwards and backwards through a list iterator'() { + when: + RawBsonArray rawBsonArray = RawBsonDocument.parse('{a: [1, 2, 3, 4]}').get('a') + def iter = rawBsonArray.listIterator() + + then: + + iter.next() == new BsonInt32(1) + iter.previous() == new BsonInt32(1) + iter.next() == new BsonInt32(1) + iter.next() == new BsonInt32(2) + iter.previous() == new BsonInt32(2) + iter.previous() == new BsonInt32(1) + + when: + iter.previous() + + then: + thrown(NoSuchElementException) + } + + def 'should return a sublist'() { + when: + RawBsonArray rawBsonArray = RawBsonDocument.parse('{a: [1, 2, 3, 1]}').get('a') + + then: + rawBsonArray.subList(2, 3).toList() == rawBsonArray.getValues().subList(2, 3) + } + + def 'hashCode should equal hash code of identical BsonArray'() { + expect: + rawBsonArray.hashCode() == bsonArray.hashCode() + + where: + rawBsonArray << createRawBsonArrayVariants() + } + + def 'equals should equal identical BsonArray'() { + expect: + areEqual(rawBsonArray, bsonArray) + areEqual(bsonArray, rawBsonArray) + areEqual(rawBsonArray, rawBsonArray) + !areEqual(rawBsonArray, emptyRawBsonArray) + + where: + rawBsonArray << createRawBsonArrayVariants() + } + + def 'clone should make a deep copy'() { + when: + RawBsonArray cloned = rawBsonArray.clone() + + then: + !cloned.getByteBuffer().array().is(createRawBsonArrayFromBsonArray().getByteBuffer().array()) + cloned.getByteBuffer().remaining() == rawBsonArray.getByteBuffer().remaining() + cloned == createRawBsonArrayFromBsonArray() + + where: + rawBsonArray << createRawBsonArrayVariants() + } + + def 'should serialize and deserialize'() { + given: + def baos = new ByteArrayOutputStream() + def oos = new ObjectOutputStream(baos) + + when: + oos.writeObject(localRawDocument) + def bais = new ByteArrayInputStream(baos.toByteArray()) + def ois = new ObjectInputStream(bais) + def deserializedDocument = ois.readObject() + + then: + bsonArray == deserializedDocument + + where: + localRawDocument << createRawBsonArrayVariants() + } + + private static List createRawBsonArrayVariants() { + [ + createRawBsonArrayFromBsonArray(), + createRawBsonArrayFromByteArray(), + createRawBsonArrayFromByteArrayOffsetLength() + ] + } + + private static RawBsonArray createRawBsonArrayFromBsonArray() { + (RawBsonArray) new RawBsonDocument(new BsonDocument('a', bsonArray), new BsonDocumentCodec()).get('a') + } + + + private static byte[] getBytesFromBsonArray() { + def byteBuffer = createRawBsonArrayFromBsonArray().byteBuffer + byte[] strippedBytes = new byte[byteBuffer.remaining()] + byteBuffer.get(strippedBytes) + strippedBytes + } + + private static RawBsonArray createRawBsonArrayFromByteArray() { + new RawBsonArray(getBytesFromBsonArray()) + } + + private static RawBsonArray createRawBsonArrayFromByteArrayOffsetLength() { + def strippedBytes = getBytesFromBsonArray() + byte[] unstrippedBytes = new byte[strippedBytes.length + 2] + System.arraycopy(strippedBytes, 0, unstrippedBytes, 1, strippedBytes.length) + new RawBsonArray(unstrippedBytes, 1, strippedBytes.length) + } +} diff --git a/bson/src/test/unit/org/bson/RawBsonDocumentSpecification.groovy b/bson/src/test/unit/org/bson/RawBsonDocumentSpecification.groovy new file mode 100644 index 00000000000..a23ec06dedb --- /dev/null +++ b/bson/src/test/unit/org/bson/RawBsonDocumentSpecification.groovy @@ -0,0 +1,494 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson + +import org.bson.codecs.BsonDocumentCodec +import org.bson.codecs.DecoderContext +import org.bson.codecs.DocumentCodec +import org.bson.codecs.EncoderContext +import org.bson.codecs.RawBsonDocumentCodec +import org.bson.io.BasicOutputBuffer +import org.bson.json.JsonMode +import org.bson.json.JsonReader +import org.bson.json.JsonWriter +import org.bson.json.JsonWriterSettings +import spock.lang.Specification + +import java.nio.ByteOrder + +import static java.util.Arrays.asList +import static util.GroovyHelpers.areEqual + +class RawBsonDocumentSpecification extends Specification { + + static emptyDocument = new BsonDocument() + static emptyRawDocument = new RawBsonDocument(emptyDocument, new BsonDocumentCodec()) + static document = new BsonDocument() + .append('a', new BsonInt32(1)) + .append('b', new BsonInt32(2)) + .append('c', new BsonDocument('x', BsonBoolean.TRUE)) + .append('d', new BsonArray(asList(new BsonDocument('y', BsonBoolean.FALSE), new BsonArray(asList(new BsonInt32(1)))))) + + def 'constructors should throw if parameters are invalid'() { + when: + new RawBsonDocument(null) + + then: + thrown(IllegalArgumentException) + + when: + new RawBsonDocument(null, 0, 5) + + then: + thrown(IllegalArgumentException) + + when: + new RawBsonDocument(new byte[5], -1, 5) + + then: + thrown(IllegalArgumentException) + + when: + new RawBsonDocument(new byte[5], 5, 5) + + then: + thrown(IllegalArgumentException) + + when: + new RawBsonDocument(new byte[5], 0, 0) + + then: + thrown(IllegalArgumentException) + + when: + new RawBsonDocument(new byte[10], 6, 5) + + then: + thrown(IllegalArgumentException) + + when: + new RawBsonDocument(null, new DocumentCodec()) + + then: + thrown(IllegalArgumentException) + + when: + new RawBsonDocument(new Document(), null) + + then: + thrown(IllegalArgumentException) + } + + def 'byteBuffer should contain the correct bytes'() { + when: + def byteBuf = rawDocument.getByteBuffer() + + then: + rawDocument == document + byteBuf.asNIO().order() == ByteOrder.LITTLE_ENDIAN + byteBuf.remaining() == 66 + + when: + def actualBytes = new byte[66] + byteBuf.get(actualBytes) + + then: + actualBytes == getBytesFromDocument() + + where: + rawDocument << createRawDocumentVariants() + } + + def 'parse should through if parameter is invalid'() { + when: + RawBsonDocument.parse(null) + + then: + thrown(IllegalArgumentException) + } + + def 'should parse json'() { + expect: + RawBsonDocument.parse('{a : 1}') == new BsonDocument('a', new BsonInt32(1)) + } + + def 'containKey should throw if the key name is null'() { + when: + rawDocument.containsKey(null) + + then: + thrown(IllegalArgumentException) + + where: + rawDocument << createRawDocumentVariants() + } + + def 'containsKey should find an existing key'() { + expect: + rawDocument.containsKey('a') + rawDocument.containsKey('b') + rawDocument.containsKey('c') + rawDocument.containsKey('d') + + where: + rawDocument << createRawDocumentVariants() + } + + def 'containsKey should not find a non-existing key'() { + expect: + !rawDocument.containsKey('e') + !rawDocument.containsKey('x') + !rawDocument.containsKey('y') + rawDocument.get('e') == null + rawDocument.get('x') == null + rawDocument.get('y') == null + + where: + rawDocument << createRawDocumentVariants() + } + + def 'should return RawBsonDocument for sub documents and RawBsonArray for arrays'() { + expect: + rawDocument.get('a') instanceof BsonInt32 + rawDocument.get('b') instanceof BsonInt32 + rawDocument.get('c') instanceof RawBsonDocument + rawDocument.get('d') instanceof RawBsonArray + rawDocument.get('d').asArray().get(0) instanceof RawBsonDocument + rawDocument.get('d').asArray().get(1) instanceof RawBsonArray + + and: + rawDocument.getDocument('c').getBoolean('x').value + !rawDocument.get('d').asArray().get(0).asDocument().getBoolean('y').value + rawDocument.get('d').asArray().get(1).asArray().get(0).asInt32().value == 1 + + where: + rawDocument << createRawDocumentVariants() + } + + def 'containValue should find an existing value'() { + expect: + rawDocument.containsValue(document.get('a')) + rawDocument.containsValue(document.get('b')) + rawDocument.containsValue(document.get('c')) + rawDocument.containsValue(document.get('d')) + + where: + rawDocument << createRawDocumentVariants() + } + + def 'containValue should not find a non-existing value'() { + expect: + !rawDocument.containsValue(new BsonInt32(3)) + !rawDocument.containsValue(new BsonDocument('e', BsonBoolean.FALSE)) + !rawDocument.containsValue(new BsonArray(asList(new BsonInt32(2), new BsonInt32(4)))) + + where: + rawDocument << createRawDocumentVariants() + } + + def 'isEmpty should return false when the document is not empty'() { + expect: + !rawDocument.isEmpty() + + where: + rawDocument << createRawDocumentVariants() + } + + def 'isEmpty should return true when the document is empty'() { + expect: + emptyRawDocument.isEmpty() + } + + def 'should get correct size when the document is empty'() { + expect: + emptyRawDocument.size() == 0 + } + + def 'should get correct key set when the document is empty'() { + expect: + emptyRawDocument.keySet().isEmpty() + } + + def 'should get correct values set when the document is empty'() { + expect: + emptyRawDocument.values().isEmpty() + } + + def 'should get correct entry set when the document is empty'() { + expect: + emptyRawDocument.entrySet().isEmpty() + } + + def 'should get correct size'() { + expect: + createRawDocumenFromDocument().size() == 4 + + where: + rawDocument << createRawDocumentVariants() + } + + def 'should get correct key set'() { + expect: + rawDocument.keySet() == ['a', 'b', 'c', 'd'] as Set + + where: + rawDocument << createRawDocumentVariants() + } + + def 'should get correct values set'() { + expect: + rawDocument.values() as Set == [document.get('a'), document.get('b'), document.get('c'), document.get('d')] as Set + + where: + rawDocument << createRawDocumentVariants() + } + + def 'should get correct entry set'() { + expect: + rawDocument.entrySet() == [new TestEntry('a', document.get('a')), + new TestEntry('b', document.get('b')), + new TestEntry('c', document.get('c')), + new TestEntry('d', document.get('d'))] as Set + + where: + rawDocument << createRawDocumentVariants() + } + + def 'should get first key'() { + expect: + document.getFirstKey() == 'a' + + where: + rawDocument << createRawDocumentVariants() + } + + def 'getFirstKey should throw NoSuchElementException if the document is empty'() { + when: + emptyRawDocument.getFirstKey() + + then: + thrown(NoSuchElementException) + } + + def 'should create BsonReader'() { + when: + def reader = document.asBsonReader() + + then: + new BsonDocumentCodec().decode(reader, DecoderContext.builder().build()) == document + + cleanup: + reader.close() + } + + def 'toJson should return equivalent JSON'() { + expect: + new RawBsonDocumentCodec().decode(new JsonReader(rawDocument.toJson()), DecoderContext.builder().build()) == document + + where: + rawDocument << createRawDocumentVariants() + } + + def 'toJson should respect default JsonWriterSettings'() { + given: + def writer = new StringWriter() + + when: + new BsonDocumentCodec().encode(new JsonWriter(writer), document, EncoderContext.builder().build()) + + then: + writer.toString() == rawDocument.toJson() + + where: + rawDocument << createRawDocumentVariants() + } + + def 'toJson should respect JsonWriterSettings'() { + given: + def jsonWriterSettings = JsonWriterSettings.builder().outputMode(JsonMode.SHELL).build() + def writer = new StringWriter() + + when: + new RawBsonDocumentCodec().encode(new JsonWriter(writer, jsonWriterSettings), rawDocument, EncoderContext.builder().build()) + + then: + writer.toString() == rawDocument.toJson(jsonWriterSettings) + + where: + rawDocument << createRawDocumentVariants() + } + + def 'all write methods should throw UnsupportedOperationException'() { + given: + def rawDocument = createRawDocumenFromDocument() + + when: + rawDocument.clear() + + then: + thrown(UnsupportedOperationException) + + when: + rawDocument.put('x', BsonNull.VALUE) + + then: + thrown(UnsupportedOperationException) + + when: + rawDocument.append('x', BsonNull.VALUE) + + then: + thrown(UnsupportedOperationException) + + when: + rawDocument.putAll(new BsonDocument('x', BsonNull.VALUE)) + + then: + thrown(UnsupportedOperationException) + + when: + rawDocument.remove(BsonNull.VALUE) + + then: + thrown(UnsupportedOperationException) + } + + def 'should decode'() { + rawDocument.decode(new BsonDocumentCodec()) == document + + where: + rawDocument << createRawDocumentVariants() + } + + def 'hashCode should equal hash code of identical BsonDocument'() { + expect: + rawDocument.hashCode() == document.hashCode() + + where: + rawDocument << createRawDocumentVariants() + } + + def 'equals should equal identical BsonDocument'() { + expect: + areEqual(rawDocument, document) + areEqual(document, rawDocument) + areEqual(rawDocument, rawDocument) + !areEqual(rawDocument, emptyRawDocument) + + where: + rawDocument << createRawDocumentVariants() + } + + def 'clone should make a deep copy'() { + when: + RawBsonDocument cloned = rawDocument.clone() + + then: + !cloned.getByteBuffer().array().is(createRawDocumenFromDocument().getByteBuffer().array()) + cloned.getByteBuffer().remaining() == rawDocument.getByteBuffer().remaining() + cloned == createRawDocumenFromDocument() + + where: + rawDocument << [ + createRawDocumenFromDocument(), + createRawDocumentFromByteArray(), + createRawDocumentFromByteArrayOffsetLength() + ] + } + + def 'should serialize and deserialize'() { + given: + def baos = new ByteArrayOutputStream() + def oos = new ObjectOutputStream(baos) + + when: + oos.writeObject(localRawDocument) + def bais = new ByteArrayInputStream(baos.toByteArray()) + def ois = new ObjectInputStream(bais) + def deserializedDocument = ois.readObject() + + then: + document == deserializedDocument + + where: + localRawDocument << createRawDocumentVariants() + } + + private static List createRawDocumentVariants() { + [ + createRawDocumenFromDocument(), + createRawDocumentFromByteArray(), + createRawDocumentFromByteArrayOffsetLength() + ] + } + + private static RawBsonDocument createRawDocumenFromDocument() { + new RawBsonDocument(document, new BsonDocumentCodec()) + } + + private static RawBsonDocument createRawDocumentFromByteArray() { + byte[] strippedBytes = getBytesFromDocument() + new RawBsonDocument(strippedBytes) + } + + private static byte[] getBytesFromDocument() { + def (int size, byte[] bytes) = getBytesFromOutputBuffer() + def strippedBytes = new byte[size] + System.arraycopy(bytes, 0, strippedBytes, 0, size) + strippedBytes + } + + private static List getBytesFromOutputBuffer() { + def outputBuffer = new BasicOutputBuffer(1024) + new BsonDocumentCodec().encode(new BsonBinaryWriter(outputBuffer), document, EncoderContext.builder().build()) + def bytes = outputBuffer.getInternalBuffer() + [outputBuffer.position, bytes] + } + + private static RawBsonDocument createRawDocumentFromByteArrayOffsetLength() { + def (int size, byte[] bytes) = getBytesFromOutputBuffer() + def unstrippedBytes = new byte[size + 2] + System.arraycopy(bytes, 0, unstrippedBytes, 1, size) + new RawBsonDocument(unstrippedBytes, 1, size) + } + + class TestEntry implements Map.Entry { + + private final String key + private BsonValue value + + TestEntry(String key, BsonValue value) { + this.key = key + this.value = value + } + + @Override + String getKey() { + key + } + + @Override + BsonValue getValue() { + value + } + + @Override + BsonValue setValue(final BsonValue value) { + this.value = value + } + } +} diff --git a/bson/src/test/unit/org/bson/codecs/AtomicCodecSpecification.groovy b/bson/src/test/unit/org/bson/codecs/AtomicCodecSpecification.groovy new file mode 100644 index 00000000000..df3800f8797 --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/AtomicCodecSpecification.groovy @@ -0,0 +1,110 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs + +import org.bson.BsonBoolean +import org.bson.BsonDocument +import org.bson.BsonDocumentReader +import org.bson.BsonDocumentWriter +import org.bson.BsonInt32 +import org.bson.BsonInt64 +import spock.lang.Specification + +import java.util.concurrent.atomic.AtomicBoolean +import java.util.concurrent.atomic.AtomicInteger +import java.util.concurrent.atomic.AtomicLong + +class AtomicCodecSpecification extends Specification { + def 'should encode and decode atomic boolean'() { + given: + def codec = new AtomicBooleanCodec() + def atomicBoolean = new AtomicBoolean(true) + def document = new BsonDocument() + + when: + def writer = new BsonDocumentWriter(document) + writer.writeStartDocument() + writer.writeName('b') + codec.encode(writer, atomicBoolean, EncoderContext.builder().build()) + writer.writeEndDocument() + + then: + document == new BsonDocument('b', BsonBoolean.TRUE) + + when: + def reader = new BsonDocumentReader(document) + reader.readStartDocument() + reader.readName('b') + def value = codec.decode(reader, DecoderContext.builder().build()) + + then: + value.get() == atomicBoolean.get() + } + + def 'should encode and decode atomic integer'() { + given: + def codec = new AtomicIntegerCodec() + def atomicInteger = new AtomicInteger(1) + def document = new BsonDocument() + + when: + def writer = new BsonDocumentWriter(document) + writer.writeStartDocument() + writer.writeName('i') + codec.encode(writer, atomicInteger, EncoderContext.builder().build()) + writer.writeEndDocument() + + then: + document == new BsonDocument('i', new BsonInt32(1)) + + when: + def reader = new BsonDocumentReader(document) + reader.readStartDocument() + reader.readName('i') + def value = codec.decode(reader, DecoderContext.builder().build()) + + then: + value.get() == atomicInteger.get() + } + + def 'should encode and decode atomic long'() { + given: + def codec = new AtomicLongCodec() + def atomicLong = new AtomicLong(1L) + def document = new BsonDocument() + + when: + def writer = new BsonDocumentWriter(document) + writer.writeStartDocument() + writer.writeName('l') + codec.encode(writer, atomicLong, EncoderContext.builder().build()) + writer.writeEndDocument() + + then: + document == new BsonDocument('l', new BsonInt64(1L)) + + when: + def reader = new BsonDocumentReader(document) + reader.readStartDocument() + reader.readName('l') + def value = codec.decode(reader, DecoderContext.builder().build()) + + then: + value.get() == atomicLong.get() + } + +} diff --git a/bson/src/test/unit/org/bson/codecs/AtomicIntegerCodecTest.java b/bson/src/test/unit/org/bson/codecs/AtomicIntegerCodecTest.java new file mode 100644 index 00000000000..e4fcfd001ed --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/AtomicIntegerCodecTest.java @@ -0,0 +1,84 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs; + +import org.bson.BsonInvalidOperationException; +import org.bson.Document; +import org.junit.jupiter.api.Test; + +import java.util.concurrent.atomic.AtomicInteger; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; + +public final class AtomicIntegerCodecTest extends CodecTestCase { + + @Test + public void shouldRoundTripAtomicIntegerValues() { + Document original = new Document("a", new AtomicInteger(Integer.MAX_VALUE)); + roundTrip(original, new AtomicIntegerComparator(original)); + + original = new Document("a", new AtomicInteger(Integer.MIN_VALUE)); + roundTrip(original, new AtomicIntegerComparator(original)); + } + + @Test + public void shouldHandleAlternativeNumberValues() { + Document expected = new Document("a", new AtomicInteger(10)); + roundTrip(new Document("a", 10), new AtomicIntegerComparator(expected)); + roundTrip(new Document("a", 10L), new AtomicIntegerComparator(expected)); + roundTrip(new Document("a", 10.00), new AtomicIntegerComparator(expected)); + roundTrip(new Document("a", 9.9999999999999992), new AtomicIntegerComparator(expected)); + } + + @Test + public void shouldThrowWhenHandlingLossyDoubleValues() { + Document original = new Document("a", 9.9999999999999991); + assertThrows(BsonInvalidOperationException.class, () ->roundTrip(original, new AtomicIntegerComparator(original))); + } + + @Test + public void shouldErrorDecodingOutsideMinRange() { + assertThrows(BsonInvalidOperationException.class, () ->roundTrip(new Document("a", Long.MIN_VALUE))); + } + + @Test + public void shouldErrorDecodingOutsideMaxRange() { + assertThrows(BsonInvalidOperationException.class, () ->roundTrip(new Document("a", Long.MAX_VALUE))); + } + + @Override + DocumentCodecProvider getDocumentCodecProvider() { + return getSpecificNumberDocumentCodecProvider(AtomicInteger.class); + } + + private class AtomicIntegerComparator implements Comparator { + private final Document expected; + + AtomicIntegerComparator(final Document expected) { + this.expected = expected; + } + + @Override + public void apply(final Document result) { + assertEquals( + expected.get("a", AtomicInteger.class).get(), + result.get("a", AtomicInteger.class).get()); + } + } + +} diff --git a/bson/src/test/unit/org/bson/codecs/AtomicLongCodecTest.java b/bson/src/test/unit/org/bson/codecs/AtomicLongCodecTest.java new file mode 100644 index 00000000000..1efb30e6348 --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/AtomicLongCodecTest.java @@ -0,0 +1,82 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs; + +import org.bson.BsonInvalidOperationException; +import org.bson.Document; +import org.junit.jupiter.api.Test; + +import java.util.concurrent.atomic.AtomicLong; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; + +public final class AtomicLongCodecTest extends CodecTestCase { + + @Test + public void shouldRoundTripAtomicLongValues() { + Document original = new Document("a", new AtomicLong(Long.MAX_VALUE)); + roundTrip(original, new AtomicLongComparator(original)); + + original = new Document("a", new AtomicLong(Long.MIN_VALUE)); + roundTrip(original, new AtomicLongComparator(original)); + } + + @Test + public void shouldHandleAlternativeNumberValues() { + Document expected = new Document("a", new AtomicLong(10L)); + roundTrip(new Document("a", 10), new AtomicLongComparator(expected)); + roundTrip(new Document("a", 10L), new AtomicLongComparator(expected)); + roundTrip(new Document("a", 10.00), new AtomicLongComparator(expected)); + roundTrip(new Document("a", 9.9999999999999992), new AtomicLongComparator(expected)); + } + + @Test + public void shouldThrowWhenHandlingLossyDoubleValues() { + Document original = new Document("a", 9.9999999999999991); + assertThrows(BsonInvalidOperationException.class, () -> roundTrip(original, new AtomicLongComparator(original))); + } + + @Test + public void shouldErrorDecodingOutsideMinRange() { + assertThrows(BsonInvalidOperationException.class, () -> roundTrip(new Document("a", -Double.MAX_VALUE))); + } + + @Test + public void shouldErrorDecodingOutsideMaxRange() { + assertThrows(BsonInvalidOperationException.class, () -> roundTrip(new Document("a", Double.MAX_VALUE))); + } + + @Override + DocumentCodecProvider getDocumentCodecProvider() { + return getSpecificNumberDocumentCodecProvider(AtomicLong.class); + } + + private class AtomicLongComparator implements Comparator { + private final Document expected; + + AtomicLongComparator(final Document expected) { + this.expected = expected; + } + + @Override + public void apply(final Document result) { + assertEquals(expected.get("a", AtomicLong.class).get(), result.get("a", AtomicLong.class).get()); + } + } + +} diff --git a/bson/src/test/unit/org/bson/codecs/BigDecimalCodecSpecification.groovy b/bson/src/test/unit/org/bson/codecs/BigDecimalCodecSpecification.groovy new file mode 100644 index 00000000000..d8381e5c0c8 --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/BigDecimalCodecSpecification.groovy @@ -0,0 +1,67 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs + +import org.bson.BsonDecimal128 +import org.bson.BsonDocument +import org.bson.BsonDocumentReader +import org.bson.BsonDocumentWriter +import org.bson.BsonReader +import org.bson.types.Decimal128 +import spock.lang.Specification + +class BigDecimalCodecSpecification extends Specification { + + def 'should round trip BigDecimal successfully'() { + given: + def codec = new BigDecimalCodec() + def bsonDecimal128 = new BsonDecimal128(new Decimal128(bigDecimal)) + + when: + def writer = new BsonDocumentWriter(new BsonDocument()) + writer.writeStartDocument() + writer.writeName('bigDecimal') + codec.encode(writer, bigDecimal, EncoderContext.builder().build()) + writer.writeEndDocument() + + then: + bsonDecimal128 == writer.getDocument().get('bigDecimal') + + when: + BsonReader bsonReader = new BsonDocumentReader(writer.getDocument()) + bsonReader.readStartDocument() + bsonReader.readName() + BigDecimal actual = codec.decode(bsonReader, DecoderContext.builder().build()) + + then: + bigDecimal == actual + + where: + bigDecimal << [ + new BigDecimal(123), + new BigDecimal(42L), + new BigDecimal('12345678901234567890'), + new BigDecimal(Long.valueOf(42)), + new BigDecimal('42.0'), + new BigDecimal(Double.valueOf(42)), + new BigDecimal('1.2345678901234567890'), + new BigDecimal(Long.MAX_VALUE), + new BigDecimal(Long.MIN_VALUE), + new BigDecimal(0), + ] + } +} diff --git a/bson/src/test/unit/org/bson/codecs/BinaryBinaryVectorCodecTest.java b/bson/src/test/unit/org/bson/codecs/BinaryBinaryVectorCodecTest.java new file mode 100644 index 00000000000..fadddb7a635 --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/BinaryBinaryVectorCodecTest.java @@ -0,0 +1,152 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs; + +import org.bson.BsonBinary; +import org.bson.BsonBinaryReader; +import org.bson.BsonBinarySubType; +import org.bson.BsonBinaryWriter; +import org.bson.BsonDocument; +import org.bson.BsonInvalidOperationException; +import org.bson.BsonType; +import org.bson.BsonWriter; +import org.bson.ByteBufNIO; +import org.bson.Float32BinaryVector; +import org.bson.Int8BinaryVector; +import org.bson.PackedBitBinaryVector; +import org.bson.BinaryVector; +import org.bson.io.BasicOutputBuffer; +import org.bson.io.ByteBufferBsonInput; +import org.bson.io.OutputBuffer; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.Arguments; +import org.junit.jupiter.params.provider.EnumSource; +import org.junit.jupiter.params.provider.MethodSource; + +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.util.stream.Stream; + +import static org.bson.BsonHelper.toBson; +import static org.bson.assertions.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertArrayEquals; +import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.params.provider.Arguments.arguments; + +class BinaryBinaryVectorCodecTest extends CodecTestCase { + + private static Stream provideVectorsAndCodecs() { + return Stream.of( + arguments(BinaryVector.floatVector(new float[]{1.1f, 2.2f, 3.3f}), new Float32BinaryVectorCodec(), Float32BinaryVector.class), + arguments(BinaryVector.int8Vector(new byte[]{10, 20, 30, 40}), new Int8VectorCodec(), Int8BinaryVector.class), + arguments(BinaryVector.packedBitVector(new byte[]{(byte) 0b10101010, (byte) 0b01010101}, (byte) 3), new PackedBitBinaryVectorCodec(), PackedBitBinaryVector.class), + arguments(BinaryVector.packedBitVector(new byte[]{(byte) 0b10101010, (byte) 0b01010101}, (byte) 3), new BinaryVectorCodec(), BinaryVector.class), + arguments(BinaryVector.int8Vector(new byte[]{10, 20, 30, 40}), new BinaryVectorCodec(), BinaryVector.class), + arguments(BinaryVector.packedBitVector(new byte[]{(byte) 0b10101010, (byte) 0b01010101}, (byte) 3), new BinaryVectorCodec(), BinaryVector.class) + ); + } + + @ParameterizedTest + @MethodSource("provideVectorsAndCodecs") + void shouldEncodeVector(final BinaryVector vectorToEncode, final Codec vectorCodec) throws IOException { + // given + BsonBinary bsonBinary = new BsonBinary(vectorToEncode); + byte[] encodedVector = bsonBinary.getData(); + ByteArrayOutputStream expectedStream = new ByteArrayOutputStream(); + // Total length of a Document (int 32). It is 0, because we do not expect + // codec to write the end of the document (that is when we back-patch the length of the document). + expectedStream.write(new byte[]{0, 0, 0, 0}); + // Bson type + expectedStream.write((byte) BsonType.BINARY.getValue()); + // Field name "b4" + expectedStream.write(new byte[]{98, 52, 0}); + // Total length of binary data (little-endian format) + expectedStream.write(new byte[]{(byte) encodedVector.length, 0, 0, 0}); + // Vector binary subtype + expectedStream.write(BsonBinarySubType.VECTOR.getValue()); + // Actual BSON binary data + expectedStream.write(encodedVector); + + OutputBuffer buffer = new BasicOutputBuffer(); + BsonWriter writer = new BsonBinaryWriter(buffer); + writer.writeStartDocument(); + writer.writeName("b4"); + + // when + vectorCodec.encode(writer, vectorToEncode, EncoderContext.builder().build()); + + // then + assertArrayEquals(expectedStream.toByteArray(), buffer.toByteArray()); + } + + @ParameterizedTest + @MethodSource("provideVectorsAndCodecs") + void shouldDecodeVector(final BinaryVector vectorToDecode, final Codec vectorCodec) { + // given + OutputBuffer buffer = new BasicOutputBuffer(); + BsonWriter writer = new BsonBinaryWriter(buffer); + writer.writeStartDocument(); + writer.writeName("vector"); + writer.writeBinaryData(new BsonBinary(vectorToDecode)); + writer.writeEndDocument(); + + BsonBinaryReader reader = new BsonBinaryReader(new ByteBufferBsonInput(new ByteBufNIO(ByteBuffer.wrap(buffer.toByteArray())))); + reader.readStartDocument(); + + // when + BinaryVector decodedVector = vectorCodec.decode(reader, DecoderContext.builder().build()); + + // then + assertDoesNotThrow(reader::readEndDocument); + assertNotNull(decodedVector); + assertEquals(vectorToDecode, decodedVector); + } + + + @ParameterizedTest + @EnumSource(value = BsonBinarySubType.class, mode = EnumSource.Mode.EXCLUDE, names = {"VECTOR"}) + void shouldThrowExceptionForInvalidSubType(final BsonBinarySubType subType) { + // given + BsonDocument document = new BsonDocument("name", new BsonBinary(subType.getValue(), new byte[]{})); + BsonBinaryReader reader = new BsonBinaryReader(toBson(document)); + reader.readStartDocument(); + + // when & then + Stream.of(new Float32BinaryVectorCodec(), new Int8VectorCodec(), new PackedBitBinaryVectorCodec()) + .forEach(codec -> { + BsonInvalidOperationException exception = assertThrows(BsonInvalidOperationException.class, () -> + codec.decode(reader, DecoderContext.builder().build())); + assertEquals("Expected vector binary subtype 9 but found: " + subType.getValue(), exception.getMessage()); + }); + } + + + @ParameterizedTest + @MethodSource("provideVectorsAndCodecs") + void shouldReturnCorrectEncoderClass(final BinaryVector vector, + final Codec codec, + final Class expectedEncoderClass) { + // when + Class encoderClass = codec.getEncoderClass(); + + // then + assertEquals(expectedEncoderClass, encoderClass); + } +} diff --git a/bson/src/test/unit/org/bson/codecs/BsonCodecProviderSpecification.groovy b/bson/src/test/unit/org/bson/codecs/BsonCodecProviderSpecification.groovy new file mode 100644 index 00000000000..13739fe539f --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/BsonCodecProviderSpecification.groovy @@ -0,0 +1,41 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs + + +import org.bson.BsonDocument +import org.bson.BsonDocumentWrapper +import org.bson.RawBsonDocument +import spock.lang.Specification + +import static org.bson.codecs.configuration.CodecRegistries.fromProviders + +class BsonCodecProviderSpecification extends Specification { + + def provider = new BsonCodecProvider() + def codecRegistry = fromProviders(provider) + + def 'should get correct codec'() { + expect: + provider.get(String, codecRegistry) == null + + provider.get(BsonDocument, codecRegistry).class == BsonCodec + provider.get(BsonDocumentWrapper, codecRegistry).class == BsonCodec + provider.get(RawBsonDocument, codecRegistry).class == BsonCodec + provider.get(BsonDocumentSubclass, codecRegistry).class == BsonCodec + } +} diff --git a/bson/src/test/unit/org/bson/codecs/BsonCodecSpecification.groovy b/bson/src/test/unit/org/bson/codecs/BsonCodecSpecification.groovy new file mode 100644 index 00000000000..22add20813b --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/BsonCodecSpecification.groovy @@ -0,0 +1,87 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs + + +import org.bson.BsonDocument +import org.bson.BsonDocumentWriter +import org.bson.BsonReader +import org.bson.codecs.configuration.CodecConfigurationException +import org.bson.codecs.configuration.CodecRegistry +import org.bson.conversions.Bson +import spock.lang.Specification + +import static org.bson.codecs.configuration.CodecRegistries.fromProviders + +class BsonCodecSpecification extends Specification { + + def provider = new BsonCodecProvider() + def registry = fromProviders(provider) + + def 'should encode Bson'() { + given: + def codec = new BsonCodec() + def customBson = new CustomBson() + + when: + def writer = new BsonDocumentWriter(new BsonDocument()) + writer.writeStartDocument() + writer.writeName('customBson') + codec.encode(writer, customBson, EncoderContext.builder().build()) + writer.writeEndDocument() + + then: + BsonDocument.parse('{a: 1, b:2}') == writer.getDocument().get('customBson') + } + + def 'should throw CodecConfiguration exception if cannot encode Bson'() { + given: + def codec = new BsonCodec() + def customBson = new ExceptionRaisingBson() + + when: + def writer = new BsonDocumentWriter(new BsonDocument()) + writer.writeStartDocument() + writer.writeName('customBson') + codec.encode(writer, customBson, EncoderContext.builder().build()) + + then: + thrown(CodecConfigurationException) + } + + def 'should throw UnsupportedOperation exception if decode is called'() { + when: + new BsonCodec().decode(Stub(BsonReader), DecoderContext.builder().build()) + + then: + thrown(UnsupportedOperationException) + } + + class CustomBson implements Bson { + @Override + BsonDocument toBsonDocument(final Class clazz, final CodecRegistry codecRegistry) { + BsonDocument.parse('{a: 1, b: 2}') + } + } + + class ExceptionRaisingBson implements Bson { + @Override + BsonDocument toBsonDocument(final Class clazz, final CodecRegistry codecRegistry) { + throw new Exception('Cannot encode') + } + } +} diff --git a/bson/src/test/unit/org/bson/codecs/BsonDocumentCodecSpecification.groovy b/bson/src/test/unit/org/bson/codecs/BsonDocumentCodecSpecification.groovy new file mode 100644 index 00000000000..8ac2ebcec51 --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/BsonDocumentCodecSpecification.groovy @@ -0,0 +1,208 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs + +import org.bson.BsonArray +import org.bson.BsonBinary +import org.bson.BsonBinaryReader +import org.bson.BsonBinaryWriter +import org.bson.BsonBoolean +import org.bson.BsonDateTime +import org.bson.BsonDecimal128 +import org.bson.BsonDocument +import org.bson.BsonDocumentWriter +import org.bson.BsonDouble +import org.bson.BsonElement +import org.bson.BsonInt32 +import org.bson.BsonInt64 +import org.bson.BsonJavaScript +import org.bson.BsonJavaScriptWithScope +import org.bson.BsonMaxKey +import org.bson.BsonMinKey +import org.bson.BsonNull +import org.bson.BsonObjectId +import org.bson.BsonRegularExpression +import org.bson.BsonString +import org.bson.BsonSymbol +import org.bson.BsonTimestamp +import org.bson.BsonUndefined +import org.bson.ByteBufNIO +import org.bson.RawBsonDocument +import org.bson.io.BasicOutputBuffer +import org.bson.io.ByteBufferBsonInput +import org.bson.types.Decimal128 +import org.bson.types.ObjectId +import spock.lang.Specification + +import java.nio.ByteBuffer + +import static java.util.Arrays.asList + +class BsonDocumentCodecSpecification extends Specification { + def 'should encode and decode all default types'() { + given: + def doc = new BsonDocument( + [ + new BsonElement('null', new BsonNull()), + new BsonElement('int32', new BsonInt32(42)), + new BsonElement('int64', new BsonInt64(52L)), + new BsonElement('decimal128', new BsonDecimal128(Decimal128.parse('1.0'))), + new BsonElement('boolean', new BsonBoolean(true)), + new BsonElement('date', new BsonDateTime(new Date().getTime())), + new BsonElement('double', new BsonDouble(62.0)), + new BsonElement('string', new BsonString('the fox ...')), + new BsonElement('minKey', new BsonMinKey()), + new BsonElement('maxKey', new BsonMaxKey()), + new BsonElement('javaScript', new BsonJavaScript('int i = 0;')), + new BsonElement('objectId', new BsonObjectId(new ObjectId())), + new BsonElement('codeWithScope', new BsonJavaScriptWithScope('int x = y', new BsonDocument('y', new BsonInt32(1)))), + new BsonElement('regex', new BsonRegularExpression('^test.*regex.*xyz$', 'i')), + new BsonElement('symbol', new BsonSymbol('ruby stuff')), + new BsonElement('timestamp', new BsonTimestamp(0x12345678, 5)), + new BsonElement('undefined', new BsonUndefined()), + new BsonElement('binary', new BsonBinary((byte) 80, [5, 4, 3, 2, 1] as byte[])), + new BsonElement('array', new BsonArray([new BsonInt32(1), new BsonInt64(2L), new BsonBoolean(true), + new BsonArray([new BsonInt32(1), new BsonInt32(2), new BsonInt32(3)]), + new BsonDocument('a', new BsonInt64(2L))])), + new BsonElement('document', new BsonDocument('a', new BsonInt32(1))) + ]) + + doc.with { +// put('dbPointer', new DBPointer('foo.bar', new ObjectId())) +// put('codeWithScope', new CodeWithScope('int x = y', new Document('y', 1))) + } + when: + BsonBinaryWriter writer = new BsonBinaryWriter(new BasicOutputBuffer()) + new BsonDocumentCodec().encode(writer, doc, EncoderContext.builder().build()) + BsonBinaryReader reader = new BsonBinaryReader(new ByteBufferBsonInput( + new ByteBufNIO(ByteBuffer.wrap(writer.bsonOutput.toByteArray())))) + def decodedDoc = new BsonDocumentCodec().decode(reader, DecoderContext.builder().build()) + + then: + decodedDoc.get('null') == doc.get('null') + decodedDoc.get('int32') == doc.get('int32') + decodedDoc.get('int64') == doc.get('int64') + decodedDoc.get('decimal128') == doc.get('decimal128') + decodedDoc.get('boolean') == doc.get('boolean') + decodedDoc.get('date') == doc.get('date') +// decodedDoc.get('dbPointer') == doc.get('dbPointer') + decodedDoc.get('double') == doc.get('double') + decodedDoc.get('minKey') == doc.get('minKey') + decodedDoc.get('maxKey') == doc.get('maxKey') + decodedDoc.get('javaScript') == doc.get('javaScript') + decodedDoc.get('codeWithScope') == doc.get('codeWithScope') + decodedDoc.get('objectId') == doc.get('objectId') + decodedDoc.get('regex') == doc.get('regex') + decodedDoc.get('string') == doc.get('string') + decodedDoc.get('symbol') == doc.get('symbol') + decodedDoc.get('timestamp') == doc.get('timestamp') + decodedDoc.get('undefined') == doc.get('undefined') + decodedDoc.get('binary') == doc.get('binary') + decodedDoc.get('array') == doc.get('array') + decodedDoc.get('document') == doc.get('document') + } + + def 'should respect encodeIdFirst property in encoder context'() { + given: + def doc = new BsonDocument( + [ + new BsonElement('x', new BsonInt32(2)), + new BsonElement('_id', new BsonInt32(2)), + new BsonElement('nested', new BsonDocument( + [ + new BsonElement('x', new BsonInt32(2)), + new BsonElement('_id', new BsonInt32(2)) + ])), + new BsonElement('array', new BsonArray(asList(new BsonDocument( + [ + new BsonElement('x', new BsonInt32(2)), + new BsonElement('_id', new BsonInt32(2)) + ] + )))) + ]) + + when: + def encodedDocument = new BsonDocument() + new BsonDocumentCodec().encode(new BsonDocumentWriter(encodedDocument), doc, + EncoderContext.builder().isEncodingCollectibleDocument(true).build()) + + then: + encodedDocument.keySet() as List == ['_id', 'x', 'nested', 'array'] + encodedDocument.getDocument('nested').keySet() as List == ['x', '_id'] + encodedDocument.getArray('array').get(0).asDocument().keySet() as List == ['x', '_id'] + + when: + encodedDocument.clear() + new BsonDocumentCodec().encode(new BsonDocumentWriter(encodedDocument), doc, + EncoderContext.builder().isEncodingCollectibleDocument(false).build()) + + then: + encodedDocument.keySet() as List == ['x', '_id', 'nested', 'array'] + encodedDocument.getDocument('nested').keySet() as List == ['x', '_id'] + encodedDocument.getArray('array').get(0).asDocument().keySet() as List == ['x', '_id'] + } + + def 'should encode nested raw documents'() { + given: + def doc = new BsonDocument('a', BsonBoolean.TRUE) + def rawDoc = new RawBsonDocument(doc, new BsonDocumentCodec()) + def docWithNestedRawDoc = new BsonDocument('a', rawDoc).append('b', new BsonArray(asList(rawDoc))) + + when: + def encodedDocument = new BsonDocument() + new BsonDocumentCodec().encode(new BsonDocumentWriter(encodedDocument), docWithNestedRawDoc, + EncoderContext.builder().isEncodingCollectibleDocument(true).build()) + + then: + encodedDocument == docWithNestedRawDoc + } + + def 'should determine if document has an id'() { + expect: + !new BsonDocumentCodec().documentHasId(new BsonDocument()) + new BsonDocumentCodec().documentHasId(new BsonDocument('_id', new BsonInt32(1))) + } + + def 'should get document id'() { + expect: + !new BsonDocumentCodec().getDocumentId(new BsonDocument()) + new BsonDocumentCodec().getDocumentId(new BsonDocument('_id', new BsonInt32(1))) == new BsonInt32(1) + } + + def 'should generate document id if absent'() { + given: + def document = new BsonDocument() + + when: + document = new BsonDocumentCodec().generateIdIfAbsentFromDocument(document) + + then: + document.get('_id') instanceof BsonObjectId + } + + def 'should not generate document id if present'() { + given: + def document = new BsonDocument('_id', new BsonInt32(1)) + + when: + document = new BsonDocumentCodec().generateIdIfAbsentFromDocument(document) + + then: + document.get('_id') == new BsonInt32(1) + } + +} diff --git a/bson/src/test/unit/org/bson/codecs/BsonDocumentSubclass.java b/bson/src/test/unit/org/bson/codecs/BsonDocumentSubclass.java new file mode 100644 index 00000000000..980f0ccf8a7 --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/BsonDocumentSubclass.java @@ -0,0 +1,23 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs; + +import org.bson.BsonDocument; + +public class BsonDocumentSubclass extends BsonDocument { + private static final long serialVersionUID = 1L; +} diff --git a/bson/src/test/unit/org/bson/codecs/BsonTypeClassMapSpecification.groovy b/bson/src/test/unit/org/bson/codecs/BsonTypeClassMapSpecification.groovy new file mode 100644 index 00000000000..5f80719690d --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/BsonTypeClassMapSpecification.groovy @@ -0,0 +1,70 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs + +import org.bson.BsonDbPointer +import org.bson.BsonRegularExpression +import org.bson.BsonTimestamp +import org.bson.BsonType +import org.bson.BsonUndefined +import org.bson.Document +import org.bson.types.Binary +import org.bson.types.Code +import org.bson.types.CodeWithScope +import org.bson.types.Decimal128 +import org.bson.types.MaxKey +import org.bson.types.MinKey +import org.bson.types.ObjectId +import org.bson.types.Symbol +import spock.lang.Specification + +class BsonTypeClassMapSpecification extends Specification { + def 'should have defaults for all BSON types'() { + when: + def map = new BsonTypeClassMap() + + then: + map.get(BsonType.BINARY) == Binary + map.get(BsonType.BOOLEAN) == Boolean + map.get(BsonType.DATE_TIME) == Date + map.get(BsonType.DB_POINTER) == BsonDbPointer + map.get(BsonType.DOCUMENT) == Document + map.get(BsonType.DOUBLE) == Double + map.get(BsonType.INT32) == Integer + map.get(BsonType.INT64) == Long + map.get(BsonType.DECIMAL128) == Decimal128 + map.get(BsonType.MAX_KEY) == MaxKey + map.get(BsonType.MIN_KEY) == MinKey + map.get(BsonType.JAVASCRIPT) == Code + map.get(BsonType.JAVASCRIPT_WITH_SCOPE) == CodeWithScope + map.get(BsonType.OBJECT_ID) == ObjectId + map.get(BsonType.REGULAR_EXPRESSION) == BsonRegularExpression + map.get(BsonType.STRING) == String + map.get(BsonType.SYMBOL) == Symbol + map.get(BsonType.TIMESTAMP) == BsonTimestamp + map.get(BsonType.UNDEFINED) == BsonUndefined + map.get(BsonType.ARRAY) == List + } + + def 'should obey replacements'() { + when: + def map = new BsonTypeClassMap([(BsonType.DATE_TIME): java.sql.Date]) + + then: + map.get(BsonType.DATE_TIME) == java.sql.Date + } +} diff --git a/bson/src/test/unit/org/bson/codecs/BsonTypeCodecMapSpecification.groovy b/bson/src/test/unit/org/bson/codecs/BsonTypeCodecMapSpecification.groovy new file mode 100644 index 00000000000..a86ff0e5a41 --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/BsonTypeCodecMapSpecification.groovy @@ -0,0 +1,69 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs + +import org.bson.BsonType +import org.bson.codecs.configuration.CodecConfigurationException +import spock.lang.Specification + +import static org.bson.codecs.configuration.CodecRegistries.fromProviders +import static org.bson.codecs.configuration.CodecRegistries.fromRegistries + +class BsonTypeCodecMapSpecification extends Specification { + def bsonTypeClassMap = new BsonTypeClassMap() + def registry = fromRegistries(fromProviders(new DocumentCodecProvider(), new ValueCodecProvider(), new BsonValueCodecProvider())) + def bsonTypeCodecMap = new BsonTypeCodecMap(bsonTypeClassMap, registry) + + def 'should map types to codecs'() { + expect: + bsonTypeCodecMap.get(BsonType.BINARY).class == BinaryCodec + bsonTypeCodecMap.get(BsonType.BOOLEAN).class == BooleanCodec + bsonTypeCodecMap.get(BsonType.DATE_TIME).class == DateCodec + bsonTypeCodecMap.get(BsonType.DB_POINTER).class == BsonDBPointerCodec + bsonTypeCodecMap.get(BsonType.DOCUMENT).class == DocumentCodec + bsonTypeCodecMap.get(BsonType.DOUBLE).class == DoubleCodec + bsonTypeCodecMap.get(BsonType.INT32).class == IntegerCodec + bsonTypeCodecMap.get(BsonType.INT64).class == LongCodec + bsonTypeCodecMap.get(BsonType.DECIMAL128).class == Decimal128Codec + bsonTypeCodecMap.get(BsonType.MAX_KEY).class == MaxKeyCodec + bsonTypeCodecMap.get(BsonType.MIN_KEY).class == MinKeyCodec + bsonTypeCodecMap.get(BsonType.JAVASCRIPT).class == CodeCodec + bsonTypeCodecMap.get(BsonType.JAVASCRIPT_WITH_SCOPE).class == CodeWithScopeCodec + bsonTypeCodecMap.get(BsonType.OBJECT_ID).class == ObjectIdCodec + bsonTypeCodecMap.get(BsonType.REGULAR_EXPRESSION).class == BsonRegularExpressionCodec + bsonTypeCodecMap.get(BsonType.STRING).class == StringCodec + bsonTypeCodecMap.get(BsonType.SYMBOL).class == SymbolCodec + bsonTypeCodecMap.get(BsonType.TIMESTAMP).class == BsonTimestampCodec + bsonTypeCodecMap.get(BsonType.UNDEFINED).class == BsonUndefinedCodec + } + + def 'should throw exception for unmapped type'() { + when: + bsonTypeCodecMap.get(BsonType.NULL) + + then: + thrown(CodecConfigurationException) + } + + def 'should throw exception for unregistered codec'() { + when: + bsonTypeCodecMap.get(BsonType.ARRAY) + + then: + thrown(CodecConfigurationException) + } +} diff --git a/bson/src/test/unit/org/bson/codecs/BsonValueCodecProviderSpecification.groovy b/bson/src/test/unit/org/bson/codecs/BsonValueCodecProviderSpecification.groovy new file mode 100644 index 00000000000..de01d107551 --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/BsonValueCodecProviderSpecification.groovy @@ -0,0 +1,84 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs + +import org.bson.BsonArray +import org.bson.BsonBoolean +import org.bson.BsonDateTime +import org.bson.BsonDbPointer +import org.bson.BsonDecimal128 +import org.bson.BsonDocument +import org.bson.BsonDocumentWrapper +import org.bson.BsonDouble +import org.bson.BsonInt32 +import org.bson.BsonInt64 +import org.bson.BsonJavaScript +import org.bson.BsonJavaScriptWithScope +import org.bson.BsonMaxKey +import org.bson.BsonMinKey +import org.bson.BsonNull +import org.bson.BsonObjectId +import org.bson.BsonRegularExpression +import org.bson.BsonString +import org.bson.BsonSymbol +import org.bson.BsonTimestamp +import org.bson.BsonUndefined +import org.bson.RawBsonArray +import org.bson.RawBsonDocument +import spock.lang.Specification + +import static org.bson.codecs.configuration.CodecRegistries.fromProviders + +class BsonValueCodecProviderSpecification extends Specification { + + def provider = new BsonValueCodecProvider() + def codecRegistry = fromProviders(provider) + + def 'should get correct codec'() { + expect: + provider.get(String, codecRegistry) == null + + provider.get(BsonInt32, codecRegistry).class == BsonInt32Codec + provider.get(BsonInt64, codecRegistry).class == BsonInt64Codec + provider.get(BsonDouble, codecRegistry).class == BsonDoubleCodec + provider.get(BsonString, codecRegistry).class == BsonStringCodec + provider.get(BsonBoolean, codecRegistry).class == BsonBooleanCodec + provider.get(BsonDecimal128, codecRegistry).class == BsonDecimal128Codec + + provider.get(BsonNull, codecRegistry).class == BsonNullCodec + provider.get(BsonDateTime, codecRegistry).class == BsonDateTimeCodec + provider.get(BsonMinKey, codecRegistry).class == BsonMinKeyCodec + provider.get(BsonMaxKey, codecRegistry).class == BsonMaxKeyCodec + provider.get(BsonJavaScript, codecRegistry).class == BsonJavaScriptCodec + provider.get(BsonObjectId, codecRegistry).class == BsonObjectIdCodec + provider.get(BsonRegularExpression, codecRegistry).class == BsonRegularExpressionCodec + provider.get(BsonSymbol, codecRegistry).class == BsonSymbolCodec + provider.get(BsonTimestamp, codecRegistry).class == BsonTimestampCodec + provider.get(BsonUndefined, codecRegistry).class == BsonUndefinedCodec + provider.get(BsonDbPointer, codecRegistry).class == BsonDBPointerCodec + + provider.get(BsonJavaScriptWithScope, codecRegistry).class == BsonJavaScriptWithScopeCodec + + provider.get(BsonArray, codecRegistry).class == BsonArrayCodec + provider.get(RawBsonArray, codecRegistry).class == BsonArrayCodec + + provider.get(BsonDocument, codecRegistry).class == BsonDocumentCodec + provider.get(BsonDocumentWrapper, codecRegistry).class == BsonDocumentWrapperCodec + provider.get(RawBsonDocument, codecRegistry).class == RawBsonDocumentCodec + provider.get(BsonDocumentSubclass, codecRegistry).class == BsonDocumentCodec + } +} diff --git a/bson/src/test/unit/org/bson/codecs/ByteCodecTest.java b/bson/src/test/unit/org/bson/codecs/ByteCodecTest.java new file mode 100644 index 00000000000..20629fb027d --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/ByteCodecTest.java @@ -0,0 +1,63 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs; + +import org.bson.BsonInvalidOperationException; +import org.bson.Document; +import org.junit.jupiter.api.Test; + +import static org.junit.jupiter.api.Assertions.assertThrows; + +public final class ByteCodecTest extends CodecTestCase { + + @Test + public void shouldRoundTripByteValues() { + roundTrip(new Document("a", Byte.MAX_VALUE)); + roundTrip(new Document("a", Byte.MIN_VALUE)); + } + + @Test + public void shouldHandleAlternativeNumberValues() { + Document expected = new Document("a", (byte) 10); + roundTrip(new Document("a", 10), expected); + roundTrip(new Document("a", 10.00), expected); + roundTrip(new Document("a", 9.9999999999999992), expected); + } + + @Test + public void shouldErrorDecodingOutsideMinRange() { + assertThrows(BsonInvalidOperationException.class, () -> + roundTrip(new Document("a", Integer.MIN_VALUE))); + } + + @Test + public void shouldErrorDecodingOutsideMaxRange() { + assertThrows(BsonInvalidOperationException.class, () -> + roundTrip(new Document("a", Integer.MAX_VALUE))); + } + + @Test + public void shouldThrowWhenHandlingLossyDoubleValues() { + assertThrows(BsonInvalidOperationException.class, () -> + roundTrip(new Document("a", 9.9999999999999991))); + } + + @Override + DocumentCodecProvider getDocumentCodecProvider() { + return getSpecificNumberDocumentCodecProvider(Byte.class); + } +} diff --git a/bson/src/test/unit/org/bson/codecs/CharacterCodecSpecification.groovy b/bson/src/test/unit/org/bson/codecs/CharacterCodecSpecification.groovy new file mode 100644 index 00000000000..596fce92b54 --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/CharacterCodecSpecification.groovy @@ -0,0 +1,83 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs + +import org.bson.BsonDocument +import org.bson.BsonDocumentReader +import org.bson.BsonDocumentWriter +import org.bson.BsonInvalidOperationException +import org.bson.BsonString +import spock.lang.Specification + +class CharacterCodecSpecification extends Specification { + private final CharacterCodec codec = new CharacterCodec() + + def 'should get encoder class'() { + expect: + codec.encoderClass == Character + } + def 'when encoding a character, should throw if it is null'() { + given: + def writer = new BsonDocumentWriter(new BsonDocument()) + + when: + codec.encode(writer, null, EncoderContext.builder().build()) + + then: + thrown(IllegalArgumentException) + } + + def 'should encode a character'() { + given: + def writer = new BsonDocumentWriter(new BsonDocument()) + + when: + writer.writeStartDocument() + writer.writeName('str') + codec.encode(writer, 'c' as char, EncoderContext.builder().build()) + writer.writeEndDocument() + + then: + writer.document == new BsonDocument('str', new BsonString('c')) + } + + def 'should decode a character'() { + given: + def reader = new BsonDocumentReader(new BsonDocument('str', new BsonString('c'))) + + when: + reader.readStartDocument() + reader.readName() + def character = codec.decode(reader, DecoderContext.builder().build()) + + then: + character == 'c' as char + } + + def 'when decoding a string whose length is not 1, should throw a BsonInvalidOperationException'() { + given: + def reader = new BsonDocumentReader(new BsonDocument('str', new BsonString('cc'))) + + when: + reader.readStartDocument() + reader.readName() + codec.decode(reader, DecoderContext.builder().build()) + + then: + thrown(BsonInvalidOperationException) + } +} diff --git a/bson/src/test/unit/org/bson/codecs/CodeWithScopeSpecification.groovy b/bson/src/test/unit/org/bson/codecs/CodeWithScopeSpecification.groovy new file mode 100644 index 00000000000..d1df8412a0f --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/CodeWithScopeSpecification.groovy @@ -0,0 +1,65 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs + +import org.bson.BsonBinaryReader +import org.bson.BsonWriter +import org.bson.Document +import org.bson.types.CodeWithScope +import spock.lang.Specification +import spock.lang.Subject + +import static CodecTestUtil.prepareReaderWithObjectToBeDecoded + +class CodeWithScopeSpecification extends Specification { + private final BsonWriter bsonWriter = Mock() + + @Subject + private final CodeWithScopeCodec codeWithScopeCodec = new CodeWithScopeCodec(new DocumentCodec()) + + def 'should encode code with scope as java script followed by document of scope'() { + given: + String javascriptCode = '' + CodeWithScope codeWithScope = new CodeWithScope(javascriptCode, new Document('the', 'scope')) + + when: + codeWithScopeCodec.encode(bsonWriter, codeWithScope, EncoderContext.builder().build()) + + then: + 1 * bsonWriter.writeJavaScriptWithScope(javascriptCode) + then: + 1 * bsonWriter.writeStartDocument() + then: + 1 * bsonWriter.writeName('the') + then: + 1 * bsonWriter.writeString('scope') + then: + 1 * bsonWriter.writeEndDocument() + } + + def 'should decode code with scope'() { + given: + CodeWithScope codeWithScope = new CodeWithScope('{javascript code}', new Document('the', 'scope')) + BsonBinaryReader reader = prepareReaderWithObjectToBeDecoded(codeWithScope) + + when: + CodeWithScope actualCodeWithScope = codeWithScopeCodec.decode(reader, DecoderContext.builder().build()) + + then: + actualCodeWithScope == codeWithScope + } +} diff --git a/bson/src/test/unit/org/bson/codecs/CodecTestCase.java b/bson/src/test/unit/org/bson/codecs/CodecTestCase.java new file mode 100644 index 00000000000..17768d0d133 --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/CodecTestCase.java @@ -0,0 +1,126 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs; + +import org.bson.BsonBinaryReader; +import org.bson.BsonBinaryWriter; +import org.bson.BsonDocument; +import org.bson.BsonDocumentReader; +import org.bson.BsonDocumentWriter; +import org.bson.BsonType; +import org.bson.BsonValue; +import org.bson.BsonWriter; +import org.bson.ByteBufNIO; +import org.bson.Document; +import org.bson.codecs.configuration.CodecRegistry; +import org.bson.io.BasicOutputBuffer; +import org.bson.io.ByteBufferBsonInput; +import org.bson.io.OutputBuffer; + +import java.nio.ByteBuffer; +import java.util.HashMap; + +import static java.util.Arrays.asList; +import static org.bson.codecs.configuration.CodecRegistries.fromProviders; +import static org.junit.jupiter.api.Assertions.assertEquals; + +abstract class CodecTestCase { + + DocumentCodecProvider getDocumentCodecProvider() { + return new DocumentCodecProvider(); + } + + CodecRegistry getRegistry() { + return fromProviders(asList(new ValueCodecProvider(), getDocumentCodecProvider())); + } + + T getDecodedValue(final BsonValue bsonValue, final Decoder decoder) { + BsonDocument document = new BsonDocument("val", bsonValue); + BsonDocumentReader reader = new BsonDocumentReader(document); + reader.readStartDocument(); + reader.readName("val"); + return decoder.decode(reader, DecoderContext.builder().build()); + } + + BsonValue getEncodedValue(final T value, final Encoder encoder) { + BsonDocumentWriter writer = new BsonDocumentWriter(new BsonDocument()); + writer.writeStartDocument(); + writer.writeName("val"); + encoder.encode(writer, value, EncoderContext.builder().build()); + writer.writeEndDocument(); + return writer.getDocument().get("val"); + } + + void roundTrip(final T value) { + roundTrip(value, new DefaultComparator<>(value)); + } + + void roundTrip(final T value, final Comparator comparator) { + roundTripWithRegistry(value, comparator, getRegistry()); + } + + @SuppressWarnings("unchecked") + void roundTripWithRegistry(final T value, final Comparator comparator, final CodecRegistry codecRegistry) { + Codec codec = (Codec) codecRegistry.get(value.getClass()); + OutputBuffer encoded = encode(codec, value); + T decoded = decode(codec, encoded); + comparator.apply(decoded); + } + + public void roundTrip(final Document input, final Document expected) { + roundTrip(input, result -> assertEquals(expected, result)); + } + + OutputBuffer encode(final Codec codec, final T value) { + OutputBuffer buffer = new BasicOutputBuffer(); + BsonWriter writer = new BsonBinaryWriter(buffer); + codec.encode(writer, value, EncoderContext.builder().build()); + return buffer; + } + + T decode(final Codec codec, final OutputBuffer buffer) { + BsonBinaryReader reader = new BsonBinaryReader(new ByteBufferBsonInput(new ByteBufNIO(ByteBuffer.wrap(buffer.toByteArray())))); + return codec.decode(reader, DecoderContext.builder().build()); + } + + DocumentCodecProvider getSpecificNumberDocumentCodecProvider(final Class clazz) { + HashMap> replacements = new HashMap<>(); + replacements.put(BsonType.DOUBLE, clazz); + replacements.put(BsonType.INT32, clazz); + replacements.put(BsonType.INT64, clazz); + replacements.put(BsonType.DECIMAL128, clazz); + return new DocumentCodecProvider(new BsonTypeClassMap(replacements)); + } + + interface Comparator { + void apply(T result); + } + + class DefaultComparator implements Comparator { + private final T original; + + DefaultComparator(final T original) { + this.original = original; + } + + @Override + public void apply(final T result) { + assertEquals(original, result); + } + } + +} diff --git a/bson/src/test/unit/org/bson/codecs/CodecTestUtil.java b/bson/src/test/unit/org/bson/codecs/CodecTestUtil.java new file mode 100644 index 00000000000..54e0efee5b7 --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/CodecTestUtil.java @@ -0,0 +1,68 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs; + +import org.bson.BsonBinaryReader; +import org.bson.BsonBinaryWriter; +import org.bson.ByteBufNIO; +import org.bson.Document; +import org.bson.io.BasicOutputBuffer; +import org.bson.io.ByteBufferBsonInput; + +import static java.nio.ByteBuffer.wrap; + +public final class CodecTestUtil { + static BsonBinaryReader prepareReaderWithObjectToBeDecoded(final Object objectToDecode) { + //Need to encode it wrapped in a document to conform to the validation + Document document = new Document("wrapperDocument", objectToDecode); + + BasicOutputBuffer outputBuffer = new BasicOutputBuffer(); + + BsonBinaryWriter writer = new BsonBinaryWriter(outputBuffer); + byte[] documentAsByteArrayForReader; + try { + new DocumentCodec().encode(writer, document, EncoderContext.builder().build()); + documentAsByteArrayForReader = outputBuffer.toByteArray(); + } finally { + writer.close(); + } + + BsonBinaryReader reader = new BsonBinaryReader(new ByteBufferBsonInput(new ByteBufNIO(wrap(documentAsByteArrayForReader)))); + + //have to read off the wrapper document so the reader is in the correct position for the test + reader.readStartDocument(); + reader.readName(); + return reader; + } + + static BsonBinaryReader prepareReaderWithObjectToBeDecoded(final T objectToDecode, final Codec codec) { + BasicOutputBuffer outputBuffer = new BasicOutputBuffer(); + + BsonBinaryWriter writer = new BsonBinaryWriter(outputBuffer); + byte[] documentAsByteArrayForReader; + try { + codec.encode(writer, objectToDecode, EncoderContext.builder().build()); + documentAsByteArrayForReader = outputBuffer.toByteArray(); + } finally { + writer.close(); + } + + return new BsonBinaryReader(new ByteBufferBsonInput(new ByteBufNIO(wrap(documentAsByteArrayForReader)))); + } + + private CodecTestUtil() { } +} diff --git a/bson/src/test/unit/org/bson/codecs/CollectionCodecProviderTest.java b/bson/src/test/unit/org/bson/codecs/CollectionCodecProviderTest.java new file mode 100644 index 00000000000..d15a992f251 --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/CollectionCodecProviderTest.java @@ -0,0 +1,52 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs; + +import org.bson.conversions.Bson; +import org.junit.jupiter.api.Test; + +import java.util.Set; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertTrue; + +final class CollectionCodecProviderTest { + @Test + void shouldReturnNullForNonCollection() { + CollectionCodecProvider provider = new CollectionCodecProvider(); + assertNull(provider.get(String.class, Bson.DEFAULT_CODEC_REGISTRY)); + } + + @Test + void shouldReturnCollectionCodecForCollection() { + CollectionCodecProvider provider = new CollectionCodecProvider(); + @SuppressWarnings({"rawtypes", "unchecked"}) + Codec> codec = (Codec>) (Codec) provider.get(Set.class, Bson.DEFAULT_CODEC_REGISTRY); + assertTrue(codec instanceof CollectionCodec); + CollectionCodec> recordCodec = (CollectionCodec>) codec; + assertEquals(Set.class, recordCodec.getEncoderClass()); + } + + @Test + public void shouldReturnCollectionCodecForCollectionUsingDefaultRegistry() { + @SuppressWarnings({"rawtypes", "unchecked"}) + Codec> codec = (Codec>) (Codec) Bson.DEFAULT_CODEC_REGISTRY.get(Set.class); + assertTrue(codec instanceof CollectionCodec); + CollectionCodec> recordCodec = (CollectionCodec>) codec; + assertEquals(Set.class, recordCodec.getEncoderClass()); + } +} diff --git a/bson/src/test/unit/org/bson/codecs/CollectionCodecSpecification.groovy b/bson/src/test/unit/org/bson/codecs/CollectionCodecSpecification.groovy new file mode 100644 index 00000000000..269032b8014 --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/CollectionCodecSpecification.groovy @@ -0,0 +1,238 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs + +import org.bson.BsonArray +import org.bson.BsonDateTime +import org.bson.BsonDocument +import org.bson.BsonDocumentReader +import org.bson.BsonDocumentWriter +import org.bson.codecs.jsr310.Jsr310CodecProvider +import org.bson.types.Binary +import spock.lang.Specification +import spock.lang.Unroll + +import java.lang.reflect.ParameterizedType +import java.time.Instant +import java.util.concurrent.CopyOnWriteArrayList + +import static java.util.Arrays.asList +import static org.bson.BsonDocument.parse +import static org.bson.UuidRepresentation.C_SHARP_LEGACY +import static org.bson.UuidRepresentation.JAVA_LEGACY +import static org.bson.UuidRepresentation.PYTHON_LEGACY +import static org.bson.UuidRepresentation.STANDARD +import static org.bson.UuidRepresentation.UNSPECIFIED +import static org.bson.codecs.configuration.CodecRegistries.fromCodecs +import static org.bson.codecs.configuration.CodecRegistries.fromProviders +import static org.bson.codecs.configuration.CodecRegistries.fromRegistries + +class CollectionCodecSpecification extends Specification { + + static final REGISTRY = fromRegistries(fromCodecs(new UuidCodec(JAVA_LEGACY)), + fromProviders(new ValueCodecProvider(), new DocumentCodecProvider(), new BsonValueCodecProvider(), + new CollectionCodecProvider(), new MapCodecProvider())) + + def 'should decode to specified generic class'() { + given: + def doc = new BsonDocument('a', new BsonArray()) + + when: + def codec = new CollectionCodec(fromProviders([new ValueCodecProvider()]), new BsonTypeClassMap(), null, collectionType) + def reader = new BsonDocumentReader(doc) + reader.readStartDocument() + reader.readName('a') + def collection = codec.decode(reader, DecoderContext.builder().build()) + + then: + codec.getEncoderClass() == collectionType + collection.getClass() == decodedType + + where: + collectionType | decodedType + Collection | ArrayList + List | ArrayList + AbstractList | ArrayList + AbstractCollection | ArrayList + ArrayList | ArrayList + Set | HashSet + AbstractSet | HashSet + HashSet | HashSet + NavigableSet | TreeSet + SortedSet | TreeSet + TreeSet | TreeSet + CopyOnWriteArrayList | CopyOnWriteArrayList + } + + def 'should encode a Collection to a BSON array'() { + given: + def codec = new CollectionCodec(REGISTRY, new BsonTypeClassMap(), null, Collection) + def writer = new BsonDocumentWriter(new BsonDocument()) + + when: + writer.writeStartDocument() + writer.writeName('array') + codec.encode(writer, [1, 2, 3, null], EncoderContext.builder().build()) + writer.writeEndDocument() + + then: + writer.document == parse('{array : [1, 2, 3, null]}') + } + + def 'should decode a BSON array to a Collection'() { + given: + def codec = new CollectionCodec(REGISTRY, new BsonTypeClassMap(), null, Collection) + def reader = new BsonDocumentReader(parse('{array : [1, 2, 3, null]}')) + + when: + reader.readStartDocument() + reader.readName('array') + def collection = codec.decode(reader, DecoderContext.builder().build()) + reader.readEndDocument() + + then: + collection == [1, 2, 3, null] + } + + def 'should decode a BSON array of arrays to a Collection of Collection'() { + given: + def codec = new CollectionCodec(REGISTRY, new BsonTypeClassMap(), null, Collection) + def reader = new BsonDocumentReader(parse('{array : [[1, 2], [3, 4, 5]]}')) + + when: + reader.readStartDocument() + reader.readName('array') + def collection = codec.decode(reader, DecoderContext.builder().build()) + reader.readEndDocument() + + then: + collection == [[1, 2], [3, 4, 5]] + } + + def 'should use provided transformer'() { + given: + def codec = new CollectionCodec(REGISTRY, new BsonTypeClassMap(), { Object from -> + from.toString() + }, Collection) + def reader = new BsonDocumentReader(parse('{array : [1, 2, 3]}')) + + when: + reader.readStartDocument() + reader.readName('array') + def collection = codec.decode(reader, DecoderContext.builder().build()) + reader.readEndDocument() + + then: + collection == ['1', '2', '3'] + } + + @SuppressWarnings(['LineLength']) + @Unroll + def 'should decode binary subtype 3 for UUID'() { + given: + def reader = new BsonDocumentReader(parse(document)) + def codec = new CollectionCodec(fromCodecs(new UuidCodec(representation), new BinaryCodec()), new BsonTypeClassMap(), + null, Collection) + .withUuidRepresentation(representation) + + when: + reader.readStartDocument() + reader.readName('array') + def collection = codec.decode(reader, DecoderContext.builder().build()) + reader.readEndDocument() + + then: + value == collection + + where: + representation | value | document + JAVA_LEGACY | [UUID.fromString('08070605-0403-0201-100f-0e0d0c0b0a09')] | '{"array": [{ "$binary" : "AQIDBAUGBwgJCgsMDQ4PEA==", "$type" : "3" }]}' + C_SHARP_LEGACY | [UUID.fromString('04030201-0605-0807-090a-0b0c0d0e0f10')] | '{"array": [{ "$binary" : "AQIDBAUGBwgJCgsMDQ4PEA==", "$type" : "3" }]}' + PYTHON_LEGACY | [UUID.fromString('01020304-0506-0708-090a-0b0c0d0e0f10')] | '{"array": [{ "$binary" : "AQIDBAUGBwgJCgsMDQ4PEA==", "$type" : "3" }]}' + STANDARD | [new Binary((byte) 3, [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16] as byte[])] | '{"array": [{ "$binary" : "AQIDBAUGBwgJCgsMDQ4PEA==", "$type" : "3" }]}' + UNSPECIFIED | [new Binary((byte) 3, [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16] as byte[])] | '{"array": [{ "$binary" : "AQIDBAUGBwgJCgsMDQ4PEA==", "$type" : "3" }]}' + } + + @SuppressWarnings(['LineLength']) + @Unroll + def 'should decode binary subtype 4 for UUID'() { + given: + def reader = new BsonDocumentReader(parse(document)) + def codec = new CollectionCodec(fromCodecs(new UuidCodec(representation), new BinaryCodec()), new BsonTypeClassMap(), + null, Collection) + .withUuidRepresentation(representation) + + when: + reader.readStartDocument() + reader.readName('array') + def collection = codec.decode(reader, DecoderContext.builder().build()) + reader.readEndDocument() + + then: + value == collection + + where: + representation | value | document + STANDARD | [UUID.fromString('01020304-0506-0708-090a-0b0c0d0e0f10')] | '{"array": [{ "$binary" : "AQIDBAUGBwgJCgsMDQ4PEA==", "$type" : "4" }]}' + JAVA_LEGACY | [UUID.fromString('01020304-0506-0708-090a-0b0c0d0e0f10')] | '{"array": [{ "$binary" : "CAcGBQQDAgEQDw4NDAsKCQ==", "$type" : "3" }]}' + C_SHARP_LEGACY | [new Binary((byte) 4, [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16] as byte[])] | '{"array": [{ "$binary" : "AQIDBAUGBwgJCgsMDQ4PEA==", "$type" : "4" }]}' + PYTHON_LEGACY | [new Binary((byte) 4, [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16] as byte[])] | '{"array": [{ "$binary" : "AQIDBAUGBwgJCgsMDQ4PEA==", "$type" : "4" }]}' + UNSPECIFIED | [new Binary((byte) 4, [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16] as byte[])] | '{"array": [{ "$binary" : "AQIDBAUGBwgJCgsMDQ4PEA==", "$type" : "4" }]}' + } + + def 'should parameterize'() { + given: + def codec = fromProviders(new Jsr310CodecProvider(), REGISTRY).get( + Collection, + asList(((ParameterizedType) Container.getMethod('getInstants').genericReturnType).actualTypeArguments)) + def writer = new BsonDocumentWriter(new BsonDocument()) + def reader = new BsonDocumentReader(writer.getDocument()) + def instants = [ + ['firstMap': [Instant.ofEpochMilli(1), Instant.ofEpochMilli(2)]], + ['secondMap': [Instant.ofEpochMilli(3), Instant.ofEpochMilli(4)]]] + when: + writer.writeStartDocument() + writer.writeName('instants') + codec.encode(writer, instants, EncoderContext.builder().build()) + writer.writeEndDocument() + + then: + writer.getDocument() == new BsonDocument() + .append('instants', new BsonArray( + [ + new BsonDocument('firstMap', new BsonArray([new BsonDateTime(1), new BsonDateTime(2)])), + new BsonDocument('secondMap', new BsonArray([new BsonDateTime(3), new BsonDateTime(4)])) + ])) + + when: + reader.readStartDocument() + reader.readName('instants') + def decodedInstants = codec.decode(reader, DecoderContext.builder().build()) + + then: + decodedInstants == instants + } + + @SuppressWarnings('unused') + static class Container { + private final List>> instants = [] + + List>> getInstants() { + instants + } + } +} diff --git a/bson/src/test/unit/org/bson/codecs/DocumentCodecSpecification.groovy b/bson/src/test/unit/org/bson/codecs/DocumentCodecSpecification.groovy new file mode 100644 index 00000000000..c2dac8a6027 --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/DocumentCodecSpecification.groovy @@ -0,0 +1,300 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs + +import org.bson.BsonBinaryReader +import org.bson.BsonBinaryWriter +import org.bson.BsonDbPointer +import org.bson.BsonDocument +import org.bson.BsonDocumentReader +import org.bson.BsonDocumentWriter +import org.bson.BsonInt32 +import org.bson.BsonReader +import org.bson.BsonRegularExpression +import org.bson.BsonTimestamp +import org.bson.BsonUndefined +import org.bson.BsonWriter +import org.bson.ByteBufNIO +import org.bson.Document +import org.bson.codecs.configuration.CodecRegistry +import org.bson.io.BasicOutputBuffer +import org.bson.io.ByteBufferBsonInput +import org.bson.json.JsonReader +import org.bson.types.Binary +import org.bson.types.Code +import org.bson.types.CodeWithScope +import org.bson.types.MaxKey +import org.bson.types.MinKey +import org.bson.types.ObjectId +import org.bson.types.Symbol +import spock.lang.Shared +import spock.lang.Specification +import spock.lang.Unroll + +import java.nio.ByteBuffer +import java.util.concurrent.atomic.AtomicBoolean +import java.util.concurrent.atomic.AtomicInteger +import java.util.concurrent.atomic.AtomicLong + +import static java.util.Arrays.asList +import static org.bson.UuidRepresentation.C_SHARP_LEGACY +import static org.bson.UuidRepresentation.JAVA_LEGACY +import static org.bson.UuidRepresentation.PYTHON_LEGACY +import static org.bson.UuidRepresentation.STANDARD +import static org.bson.UuidRepresentation.UNSPECIFIED +import static org.bson.codecs.configuration.CodecRegistries.fromCodecs +import static org.bson.codecs.configuration.CodecRegistries.fromProviders +import static org.bson.codecs.configuration.CodecRegistries.fromRegistries + +class DocumentCodecSpecification extends Specification { + static final CodecRegistry REGISTRY = fromRegistries(fromCodecs(new UuidCodec(STANDARD)), + fromProviders(asList(new ValueCodecProvider(), new CollectionCodecProvider(), + new BsonValueCodecProvider(), new DocumentCodecProvider(), new MapCodecProvider()))) + + @Shared + BsonDocument bsonDoc = new BsonDocument() + @Shared + StringWriter stringWriter = new StringWriter() + + def 'should encode and decode all default types with all readers and writers'(BsonWriter writer) { + given: + def originalDocument = new Document() + originalDocument.with { + put('null', null) + put('int32', 42) + put('int64', 52L) + put('booleanTrue', true) + put('booleanFalse', false) + put('date', new Date()) + put('dbPointer', new BsonDbPointer('foo.bar', new ObjectId())) + put('double', 62.0 as double) + put('minKey', new MinKey()) + put('maxKey', new MaxKey()) + put('code', new Code('int i = 0;')) + put('codeWithScope', new CodeWithScope('int x = y', new Document('y', 1))) + put('objectId', new ObjectId()) + put('regex', new BsonRegularExpression('^test.*regex.*xyz$', 'i')) + put('string', 'the fox ...') + put('symbol', new Symbol('ruby stuff')) + put('timestamp', new BsonTimestamp(0x12345678, 5)) + put('undefined', new BsonUndefined()) + put('binary', new Binary((byte) 0x80, [5, 4, 3, 2, 1] as byte[])) + put('array', asList(1, 1L, true, [1, 2, 3], new Document('a', 1), null)) + put('uuid', new UUID(1L, 2L)) + put('document', new Document('a', 2)) + put('map', [a:1, b:2]) + put('atomicLong', new AtomicLong(1)) + put('atomicInteger', new AtomicInteger(1)) + put('atomicBoolean', new AtomicBoolean(true)) + } + + when: + new DocumentCodec(REGISTRY).withUuidRepresentation(STANDARD) + .encode(writer, originalDocument, EncoderContext.builder().build()) + BsonReader reader + if (writer instanceof BsonDocumentWriter) { + reader = new BsonDocumentReader(bsonDoc) + } else if (writer instanceof BsonBinaryWriter) { + BasicOutputBuffer buffer = (BasicOutputBuffer)writer.getBsonOutput() + reader = new BsonBinaryReader(new ByteBufferBsonInput(new ByteBufNIO( + ByteBuffer.wrap(buffer.toByteArray())))) + } else { + reader = new JsonReader(stringWriter.toString()) + } + def decodedDoc = new DocumentCodec(REGISTRY).withUuidRepresentation(STANDARD).decode(reader, DecoderContext.builder().build()) + + then: + decodedDoc.get('null') == originalDocument.get('null') + decodedDoc.get('int32') == originalDocument.get('int32') + decodedDoc.get('int64') == originalDocument.get('int64') + decodedDoc.get('booleanTrue') == originalDocument.get('booleanTrue') + decodedDoc.get('booleanFalse') == originalDocument.get('booleanFalse') + decodedDoc.get('date') == originalDocument.get('date') + decodedDoc.get('dbPointer') == originalDocument.get('dbPointer') + decodedDoc.get('double') == originalDocument.get('double') + decodedDoc.get('minKey') == originalDocument.get('minKey') + decodedDoc.get('maxKey') == originalDocument.get('maxKey') + decodedDoc.get('code') == originalDocument.get('code') + decodedDoc.get('codeWithScope') == originalDocument.get('codeWithScope') + decodedDoc.get('objectId') == originalDocument.get('objectId') + decodedDoc.get('regex') == originalDocument.get('regex') + decodedDoc.get('string') == originalDocument.get('string') + decodedDoc.get('symbol') == originalDocument.get('symbol') + decodedDoc.get('timestamp') == originalDocument.get('timestamp') + decodedDoc.get('undefined') == originalDocument.get('undefined') + decodedDoc.get('binary') == originalDocument.get('binary') + decodedDoc.get('uuid') == originalDocument.get('uuid') + decodedDoc.get('array') == originalDocument.get('array') + decodedDoc.get('document') == originalDocument.get('document') + decodedDoc.get('map') == originalDocument.get('map') + decodedDoc.get('atomicLong') == ((AtomicLong) originalDocument.get('atomicLong')).get() + decodedDoc.get('atomicInteger') == ((AtomicInteger) originalDocument.get('atomicInteger')).get() + decodedDoc.get('atomicBoolean') == ((AtomicBoolean) originalDocument.get('atomicBoolean')).get() + + where: + writer << [ + new BsonDocumentWriter(bsonDoc), + new BsonBinaryWriter(new BasicOutputBuffer()), +// new JsonWriter(stringWriter) + ] + } + + def 'should decode binary subtypes for UUID that are not 16 bytes into Binary'() { + given: + def reader = new BsonBinaryReader(ByteBuffer.wrap(bytes as byte[])) + + when: + def document = new DocumentCodec().decode(reader, DecoderContext.builder().build()) + + then: + value == document.get('f') + + where: + value | bytes + new Binary((byte) 0x03, (byte[]) [115, 116, 11]) | [16, 0, 0, 0, 5, 102, 0, 3, 0, 0, 0, 3, 115, 116, 11, 0] + new Binary((byte) 0x04, (byte[]) [115, 116, 11]) | [16, 0, 0, 0, 5, 102, 0, 3, 0, 0, 0, 4, 115, 116, 11, 0] + } + + @SuppressWarnings(['LineLength']) + @Unroll + def 'should decode binary subtype 3 for UUID'() { + given: + def reader = new BsonBinaryReader(ByteBuffer.wrap(bytes as byte[])) + + when: + def document = new DocumentCodec(fromCodecs(new UuidCodec(representation), new BinaryCodec())) + .withUuidRepresentation(representation) + .decode(reader, DecoderContext.builder().build()) + + then: + value == document.get('f') + + where: + representation | value | bytes + JAVA_LEGACY | UUID.fromString('08070605-0403-0201-100f-0e0d0c0b0a09') | [29, 0, 0, 0, 5, 102, 0, 16, 0, 0, 0, 3, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 0] + C_SHARP_LEGACY | UUID.fromString('04030201-0605-0807-090a-0b0c0d0e0f10') | [29, 0, 0, 0, 5, 102, 0, 16, 0, 0, 0, 3, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 0] + PYTHON_LEGACY | UUID.fromString('01020304-0506-0708-090a-0b0c0d0e0f10') | [29, 0, 0, 0, 5, 102, 0, 16, 0, 0, 0, 3, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 0] + STANDARD | new Binary((byte) 3, [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16] as byte[]) | [29, 0, 0, 0, 5, 102, 0, 16, 0, 0, 0, 3, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 0] + UNSPECIFIED | new Binary((byte) 3, [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16] as byte[]) | [29, 0, 0, 0, 5, 102, 0, 16, 0, 0, 0, 3, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 0] + } + + @SuppressWarnings(['LineLength']) + @Unroll + def 'should decode binary subtype 4 for UUID'() { + given: + def reader = new BsonBinaryReader(ByteBuffer.wrap(bytes as byte[])) + + when: + def document = new DocumentCodec(fromCodecs(new UuidCodec(representation), new BinaryCodec())) + .withUuidRepresentation(representation) + .decode(reader, DecoderContext.builder().build()) + + then: + value == document.get('f') + + where: + representation | value | bytes + STANDARD | UUID.fromString('01020304-0506-0708-090a-0b0c0d0e0f10') | [29, 0, 0, 0, 5, 102, 0, 16, 0, 0, 0, 4, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 0] + JAVA_LEGACY | new Binary((byte) 4, [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16] as byte[]) | [29, 0, 0, 0, 5, 102, 0, 16, 0, 0, 0, 4, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 0] + C_SHARP_LEGACY | new Binary((byte) 4, [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16] as byte[]) | [29, 0, 0, 0, 5, 102, 0, 16, 0, 0, 0, 4, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 0] + PYTHON_LEGACY | new Binary((byte) 4, [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16] as byte[]) | [29, 0, 0, 0, 5, 102, 0, 16, 0, 0, 0, 4, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 0] + UNSPECIFIED | new Binary((byte) 4, [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16] as byte[]) | [29, 0, 0, 0, 5, 102, 0, 16, 0, 0, 0, 4, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 0] + } + + def 'should respect encodeIdFirst property in encoder context'() { + given: + def originalDocument = new Document('x', 2) + .append('_id', 2) + .append('nested', new Document('x', 2).append('_id', 2)) + .append('array', asList(new Document('x', 2).append('_id', 2))) + + when: + def encodedDocument = new BsonDocument() + new DocumentCodec().encode(new BsonDocumentWriter(encodedDocument), originalDocument, + EncoderContext.builder().isEncodingCollectibleDocument(true).build()) + + then: + encodedDocument.keySet() as List == ['_id', 'x', 'nested', 'array'] + encodedDocument.getDocument('nested').keySet() as List == ['x', '_id'] + encodedDocument.getArray('array').get(0).asDocument().keySet() as List == ['x', '_id'] + + when: + encodedDocument.clear() + new DocumentCodec().encode(new BsonDocumentWriter(encodedDocument), originalDocument, + EncoderContext.builder().isEncodingCollectibleDocument(false).build()) + + then: + encodedDocument.keySet() as List == ['x', '_id', 'nested', 'array'] + encodedDocument.getDocument('nested').keySet() as List == ['x', '_id'] + encodedDocument.getArray('array').get(0).asDocument().keySet() as List == ['x', '_id'] + } + + def 'should apply transformer to decoded values'() { + given: + def codec = new DocumentCodec(fromProviders([new ValueCodecProvider(), new DocumentCodecProvider(), new BsonValueCodecProvider()]), + new BsonTypeClassMap(), + { Object value -> 5 }) + when: + def doc = codec.decode(new BsonDocumentReader(new BsonDocument('_id', new BsonInt32(1))), DecoderContext.builder().build()) + + then: + doc['_id'] == 5 + } + + + def 'should generate id if absent'() { + given: + def document = new Document() + + when: + document = new DocumentCodec().generateIdIfAbsentFromDocument(document) + + then: + document.get('_id') instanceof ObjectId + } + + def 'should not generate id if present'() { + given: + def document = new Document('_id', 1) + + when: + document = new DocumentCodec().generateIdIfAbsentFromDocument(document) + + then: + document.get('_id') == 1 + } + + def 'should determine if id is present'() { + expect: + new DocumentCodec().documentHasId(new Document('_id', 1)) + !new DocumentCodec().documentHasId(new Document()) + } + + def 'should get id if present'() { + expect: + new DocumentCodec().getDocumentId(new Document('_id', 1)) == new BsonInt32(1) + new DocumentCodec().getDocumentId(new Document('_id', new BsonInt32(1))) == new BsonInt32(1) + } + + def 'should throw if getting id when absent'() { + when: + new DocumentCodec().getDocumentId(new Document()) + + then: + thrown(IllegalStateException) + } +} diff --git a/bson/src/test/unit/org/bson/codecs/DocumentCodecTest.java b/bson/src/test/unit/org/bson/codecs/DocumentCodecTest.java new file mode 100644 index 00000000000..7343707d5a7 --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/DocumentCodecTest.java @@ -0,0 +1,192 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs; + +import org.bson.BsonBinaryReader; +import org.bson.BsonBinarySubType; +import org.bson.BsonBinaryWriter; +import org.bson.BsonInt32; +import org.bson.BsonObjectId; +import org.bson.ByteBufNIO; +import org.bson.Document; +import org.bson.BinaryVector; +import org.bson.io.BasicOutputBuffer; +import org.bson.io.BsonInput; +import org.bson.io.ByteBufferBsonInput; +import org.bson.types.Binary; +import org.bson.types.Code; +import org.bson.types.CodeWithScope; +import org.bson.types.Decimal128; +import org.bson.types.MaxKey; +import org.bson.types.MinKey; +import org.bson.types.ObjectId; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; + +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.util.Date; +import java.util.HashSet; +import java.util.List; + +import static java.util.Arrays.asList; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertTrue; + +public class DocumentCodecTest { + private BasicOutputBuffer buffer; + private BsonBinaryWriter writer; + + @BeforeEach + public void setUp() throws Exception { + buffer = new BasicOutputBuffer(); + writer = new BsonBinaryWriter(buffer); + } + + @AfterEach + public void tearDown() { + writer.close(); + } + + @Test + public void testPrimitiveBSONTypeCodecs() throws IOException { + DocumentCodec documentCodec = new DocumentCodec(); + Document doc = new Document(); + doc.put("oid", new ObjectId()); + doc.put("integer", 1); + doc.put("long", 2L); + doc.put("string", "hello"); + doc.put("double", 3.2); + doc.put("decimal", Decimal128.parse("0.100")); + doc.put("binary", new Binary(BsonBinarySubType.USER_DEFINED, new byte[]{0, 1, 2, 3})); + doc.put("date", new Date(1000)); + doc.put("boolean", true); + doc.put("code", new Code("var i = 0")); + doc.put("minkey", new MinKey()); + doc.put("maxkey", new MaxKey()); + doc.put("vectorFloat", BinaryVector.floatVector(new float[]{1.1f, 2.2f, 3.3f})); + doc.put("vectorInt8", BinaryVector.int8Vector(new byte[]{10, 20, 30, 40})); + doc.put("vectorPackedBit", BinaryVector.packedBitVector(new byte[]{(byte) 0b10101010, (byte) 0b01010101}, (byte) 3)); + // doc.put("pattern", Pattern.compile("^hello")); // TODO: Pattern doesn't override equals method! + doc.put("null", null); + + documentCodec.encode(writer, doc, EncoderContext.builder().build()); + + BsonInput bsonInput = createInputBuffer(); + Document decodedDocument = documentCodec.decode(new BsonBinaryReader(bsonInput), DecoderContext.builder().build()); + assertEquals(doc, decodedDocument); + } + + @Test + public void testIterableEncoding() throws IOException { + DocumentCodec documentCodec = new DocumentCodec(); + Document doc = new Document() + .append("list", asList(1, 2, 3, 4, 5)) + .append("set", new HashSet<>(asList(1, 2, 3, 4))); + + documentCodec.encode(writer, doc, EncoderContext.builder().build()); + + BsonInput bsonInput = createInputBuffer(); + Document decodedDocument = documentCodec.decode(new BsonBinaryReader(bsonInput), DecoderContext.builder().build()); + assertEquals(new Document() + .append("list", asList(1, 2, 3, 4, 5)) + .append("set", asList(1, 2, 3, 4)), decodedDocument); + } + + @Test + public void testCodeWithScopeEncoding() throws IOException { + DocumentCodec documentCodec = new DocumentCodec(); + Document doc = new Document(); + doc.put("theCode", new CodeWithScope("javaScript code", new Document("fieldNameOfScope", "valueOfScope"))); + + documentCodec.encode(writer, doc, EncoderContext.builder().build()); + + Document decodedDocument = documentCodec.decode(new BsonBinaryReader(createInputBuffer()), DecoderContext.builder().build()); + assertEquals(doc, decodedDocument); + } + + @Test + public void testIterableContainingOtherIterableEncoding() throws IOException { + DocumentCodec documentCodec = new DocumentCodec(); + Document doc = new Document(); + List> listOfLists = asList(asList(1), asList(2)); + doc.put("array", listOfLists); + + documentCodec.encode(writer, doc, EncoderContext.builder().build()); + + BsonInput bsonInput = createInputBuffer(); + Document decodedDocument = documentCodec.decode(new BsonBinaryReader(bsonInput), DecoderContext.builder().build()); + assertEquals(doc, decodedDocument); + } + + @Test + public void testIterableContainingDocumentsEncoding() throws IOException { + DocumentCodec documentCodec = new DocumentCodec(); + Document doc = new Document(); + List listOfDocuments = asList(new Document("intVal", 1), new Document("anotherInt", 2)); + doc.put("array", listOfDocuments); + + documentCodec.encode(writer, doc, EncoderContext.builder().build()); + + BsonInput bsonInput = createInputBuffer(); + Document decodedDocument = documentCodec.decode(new BsonBinaryReader(bsonInput), DecoderContext.builder().build()); + assertEquals(doc, decodedDocument); + } + + @Test + public void testNestedDocumentEncoding() throws IOException { + DocumentCodec documentCodec = new DocumentCodec(); + Document doc = new Document(); + doc.put("nested", new Document("x", 1)); + + documentCodec.encode(writer, doc, EncoderContext.builder().build()); + + BsonInput bsonInput = createInputBuffer(); + Document decodedDocument = documentCodec.decode(new BsonBinaryReader(bsonInput), DecoderContext.builder().build()); + assertEquals(doc, decodedDocument); + } + + @Test + public void shouldNotGenerateIdIfPresent() { + DocumentCodec documentCodec = new DocumentCodec(); + Document document = new Document("_id", 1); + assertTrue(documentCodec.documentHasId(document)); + document = documentCodec.generateIdIfAbsentFromDocument(document); + assertTrue(documentCodec.documentHasId(document)); + assertEquals(new BsonInt32(1), documentCodec.getDocumentId(document)); + } + + @Test + public void shouldGenerateIdIfAbsent() { + DocumentCodec documentCodec = new DocumentCodec(); + Document document = new Document(); + assertFalse(documentCodec.documentHasId(document)); + document = documentCodec.generateIdIfAbsentFromDocument(document); + assertTrue(documentCodec.documentHasId(document)); + assertEquals(BsonObjectId.class, documentCodec.getDocumentId(document).getClass()); + } + + // TODO: factor into common base class; + private BsonInput createInputBuffer() throws IOException { + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + buffer.pipe(baos); + return new ByteBufferBsonInput(new ByteBufNIO(ByteBuffer.wrap(baos.toByteArray()))); + } +} diff --git a/bson/src/test/unit/org/bson/codecs/DoubleCodecTest.java b/bson/src/test/unit/org/bson/codecs/DoubleCodecTest.java new file mode 100644 index 00000000000..cbf6031fb88 --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/DoubleCodecTest.java @@ -0,0 +1,66 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs; + +import org.bson.BsonInvalidOperationException; +import org.bson.Document; +import org.bson.types.Decimal128; +import org.junit.jupiter.api.Test; + +import static org.junit.jupiter.api.Assertions.assertThrows; + +public final class DoubleCodecTest extends CodecTestCase { + + @Test + public void shouldRoundTripDoubleValues() { + roundTrip(new Document("a", Long.MAX_VALUE), new Document("a", (double) Long.MAX_VALUE)); + roundTrip(new Document("a", Long.MIN_VALUE), new Document("a", (double) Long.MIN_VALUE)); + } + + @Test + public void shouldHandleAlternativeNumberValues() { + Document expected = new Document("a", 10.00); + roundTrip(new Document("a", 10), expected); + roundTrip(new Document("a", 10L), expected); + roundTrip(new Document("a", Decimal128.parse("10")), expected); + } + + @Test + public void shouldThrowWhenHandlingLossyLongValues() { + assertThrows(BsonInvalidOperationException.class, () -> roundTrip(new Document("a", Long.MAX_VALUE - 1))); + } + + @Test + public void shouldThrowWhenHandlingLossyLongValues2() { + assertThrows(BsonInvalidOperationException.class, () -> roundTrip(new Document("a", Long.MIN_VALUE + 1))); + } + + @Test + public void shouldThrowWhenHandlingLossyDecimal128Values() { + assertThrows(BsonInvalidOperationException.class, () -> roundTrip(new Document("a", Decimal128.parse("10.0")))); + } + + @Test + public void shouldThrowWhenHandlingNonExpressibleDecimal128Values() { + assertThrows(BsonInvalidOperationException.class, () -> roundTrip(new Document("a", Decimal128.parse("NaN")))); + } + + @Override + DocumentCodecProvider getDocumentCodecProvider() { + return getSpecificNumberDocumentCodecProvider(Double.class); + } +} diff --git a/bson/src/test/unit/org/bson/codecs/EnumCodecProviderTest.java b/bson/src/test/unit/org/bson/codecs/EnumCodecProviderTest.java new file mode 100644 index 00000000000..2e682fafdb5 --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/EnumCodecProviderTest.java @@ -0,0 +1,41 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs; + +import org.bson.codecs.configuration.CodecRegistries; +import org.junit.jupiter.api.Test; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertNull; + +public class EnumCodecProviderTest { + @Test + public void shouldProvideCodecForEnum() { + EnumCodecProvider provider = new EnumCodecProvider(); + Codec codec = provider.get(SimpleEnum.class, CodecRegistries.fromProviders(provider)); + assertNotNull(codec); + assertEquals(EnumCodec.class, codec.getClass()); + } + + @Test + public void shouldNotProvideCodecForNonEnum() { + EnumCodecProvider provider = new EnumCodecProvider(); + Codec codec = provider.get(String.class, CodecRegistries.fromProviders(provider)); + assertNull(codec); + } +} diff --git a/bson/src/test/unit/org/bson/codecs/EnumCodecTest.java b/bson/src/test/unit/org/bson/codecs/EnumCodecTest.java new file mode 100644 index 00000000000..5e714b5afd7 --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/EnumCodecTest.java @@ -0,0 +1,39 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs; + +import org.bson.BsonString; +import org.bson.BsonValue; +import org.junit.jupiter.api.Test; + +import static org.junit.jupiter.api.Assertions.assertEquals; + +public class EnumCodecTest extends CodecTestCase { + @Test + public void shouldEncodeEnum() { + Codec codec = new EnumCodec<>(SimpleEnum.class); + BsonValue encodedValue = getEncodedValue(SimpleEnum.BRAVO, codec); + assertEquals(SimpleEnum.BRAVO.name(), encodedValue.asString().getValue()); + } + + @Test + public void shouldDecodeEnum() { + Codec codec = new EnumCodec<>(SimpleEnum.class); + SimpleEnum decodedValue = getDecodedValue(new BsonString(SimpleEnum.BRAVO.name()), codec); + assertEquals(SimpleEnum.BRAVO, decodedValue); + } +} diff --git a/bson/src/test/unit/org/bson/codecs/FloatCodecTest.java b/bson/src/test/unit/org/bson/codecs/FloatCodecTest.java new file mode 100644 index 00000000000..90cf41a20de --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/FloatCodecTest.java @@ -0,0 +1,67 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs; + +import org.bson.BsonInvalidOperationException; +import org.bson.Document; +import org.bson.types.Decimal128; +import org.junit.jupiter.api.Test; + +import static org.junit.jupiter.api.Assertions.assertThrows; + +public final class FloatCodecTest extends CodecTestCase { + + @Test + public void shouldRoundTripFloatValues() { + roundTrip(new Document("a", Float.MAX_VALUE)); + roundTrip(new Document("a", -Float.MAX_VALUE)); + } + + @Test + public void shouldRoundTripNegativeFloatValues() { + roundTrip(new Document("a", -1f)); + } + + @Test + public void shouldHandleAlternativeNumberValues() { + Document expected = new Document("a", 10f); + roundTrip(new Document("a", 10), expected); + roundTrip(new Document("a", 10L), expected); + roundTrip(new Document("a", 9.9999999999999992), expected); + roundTrip(new Document("a", Decimal128.parse("10")), expected); + } + + @Test + public void shouldErrorDecodingOutsideMinRange() { + assertThrows(BsonInvalidOperationException.class, () -> roundTrip(new Document("a", -Double.MAX_VALUE))); + } + + @Test + public void shouldErrorDecodingOutsideMaxRange() { + assertThrows(BsonInvalidOperationException.class, () -> roundTrip(new Document("a", Double.MAX_VALUE))); + } + + @Test + public void shouldThrowWhenHandlingLossyDecimal128Values() { + assertThrows(BsonInvalidOperationException.class, () -> roundTrip(new Document("a", Decimal128.parse("10.0")))); + } + + @Override + DocumentCodecProvider getDocumentCodecProvider() { + return getSpecificNumberDocumentCodecProvider(Float.class); + } +} diff --git a/bson/src/test/unit/org/bson/codecs/IntegerCodecTest.java b/bson/src/test/unit/org/bson/codecs/IntegerCodecTest.java new file mode 100644 index 00000000000..11a8ac3647c --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/IntegerCodecTest.java @@ -0,0 +1,67 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs; + +import org.bson.BsonInvalidOperationException; +import org.bson.Document; +import org.bson.types.Decimal128; +import org.junit.jupiter.api.Test; + +import static org.junit.jupiter.api.Assertions.assertThrows; + +public final class IntegerCodecTest extends CodecTestCase { + + @Test + public void shouldRoundTripIntegerValues() { + roundTrip(new Document("a", Integer.MAX_VALUE)); + roundTrip(new Document("a", Integer.MIN_VALUE)); + } + + @Test + public void shouldHandleAlternativeNumberValues() { + Document expected = new Document("a", 10); + roundTrip(new Document("a", 10L), expected); + roundTrip(new Document("a", 10.00), expected); + roundTrip(new Document("a", 9.9999999999999992), expected); + roundTrip(new Document("a", Decimal128.parse("10")), expected); + } + + @Test + public void shouldErrorDecodingOutsideMinRange() { + assertThrows(BsonInvalidOperationException.class, () -> roundTrip(new Document("a", Long.MIN_VALUE))); + } + + @Test + public void shouldErrorDecodingOutsideMaxRange() { + assertThrows(BsonInvalidOperationException.class, () -> roundTrip(new Document("a", Long.MAX_VALUE))); + } + + @Test + public void shouldThrowWhenHandlingLossyDoubleValues() { + assertThrows(BsonInvalidOperationException.class, () -> roundTrip(new Document("a", 9.9999999999999991))); + } + + @Test + public void shouldThrowWhenHandlingLossyDecimal128Values() { + assertThrows(BsonInvalidOperationException.class, () -> roundTrip(new Document("a", Decimal128.parse("10.0")))); + } + + @Override + DocumentCodecProvider getDocumentCodecProvider() { + return getSpecificNumberDocumentCodecProvider(Integer.class); + } +} diff --git a/bson/src/test/unit/org/bson/codecs/IterableCodecProviderSpecification.groovy b/bson/src/test/unit/org/bson/codecs/IterableCodecProviderSpecification.groovy new file mode 100644 index 00000000000..b5217676871 --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/IterableCodecProviderSpecification.groovy @@ -0,0 +1,72 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs + +import org.bson.BsonType +import spock.lang.Specification + +import static org.bson.codecs.configuration.CodecRegistries.fromProviders + +class IterableCodecProviderSpecification extends Specification { + + def 'should provide codec for Iterables'() { + given: + def provider = new IterableCodecProvider() + def registry = fromProviders(provider, new BsonValueCodecProvider(), new ValueCodecProvider(), new DocumentCodecProvider()) + + expect: + provider.get(Iterable, registry) instanceof IterableCodec + provider.get(List, registry) instanceof IterableCodec + provider.get(ArrayList, registry) instanceof IterableCodec + } + + def 'should not provide codec for non-Iterables'() { + given: + def provider = new IterableCodecProvider() + def registry = fromProviders(provider, new BsonValueCodecProvider(), new ValueCodecProvider(), new DocumentCodecProvider()) + + expect: + provider.get(Integer, registry) == null + } + + def 'identical instances should be equal and have same hash code'() { + given: + def first = new IterableCodecProvider() + def second = new IterableCodecProvider() + + expect: + first.equals(first) + first.equals(second) + first.hashCode() == first.hashCode() + first.hashCode() == second.hashCode() + } + + def 'unidentical instances should not be equal'() { + given: + def first = new IterableCodecProvider() + def second = new IterableCodecProvider(new BsonTypeClassMap([(BsonType.BOOLEAN): String])) + def third = new IterableCodecProvider(new BsonTypeClassMap(), { Object from -> + from + }) + + expect: + !first.equals(Map) + !first.equals(second) + !first.equals(third) + !second.equals(third) + } +} diff --git a/bson/src/test/unit/org/bson/codecs/IterableCodecSpecification.groovy b/bson/src/test/unit/org/bson/codecs/IterableCodecSpecification.groovy new file mode 100644 index 00000000000..6af13dfc2ac --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/IterableCodecSpecification.groovy @@ -0,0 +1,174 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs + +import org.bson.BsonDocument +import org.bson.BsonDocumentReader +import org.bson.BsonDocumentWriter +import org.bson.types.Binary +import spock.lang.Specification +import spock.lang.Unroll + +import java.time.Instant + +import static org.bson.BsonDocument.parse +import static org.bson.UuidRepresentation.C_SHARP_LEGACY +import static org.bson.UuidRepresentation.JAVA_LEGACY +import static org.bson.UuidRepresentation.PYTHON_LEGACY +import static org.bson.UuidRepresentation.STANDARD +import static org.bson.UuidRepresentation.UNSPECIFIED +import static org.bson.codecs.configuration.CodecRegistries.fromCodecs +import static org.bson.codecs.configuration.CodecRegistries.fromProviders +import static org.bson.codecs.configuration.CodecRegistries.fromRegistries + +class IterableCodecSpecification extends Specification { + + static final REGISTRY = fromRegistries(fromCodecs(new UuidCodec(JAVA_LEGACY)), + fromProviders(new ValueCodecProvider(), new DocumentCodecProvider(), new BsonValueCodecProvider(), + new IterableCodecProvider(), new MapCodecProvider())) + + def 'should have Iterable encoding class'() { + given: + def codec = new IterableCodec(REGISTRY, new BsonTypeClassMap(), null) + + expect: + codec.getEncoderClass() == Iterable + } + + def 'should encode an Iterable to a BSON array'() { + given: + def codec = new IterableCodec(REGISTRY, new BsonTypeClassMap(), null) + def writer = new BsonDocumentWriter(new BsonDocument()) + + when: + writer.writeStartDocument() + writer.writeName('array') + codec.encode(writer, [1, 2, 3, null], EncoderContext.builder().build()) + writer.writeEndDocument() + + then: + writer.document == parse('{array : [1, 2, 3, null]}') + } + + def 'should decode a BSON array to an Iterable'() { + given: + def codec = new IterableCodec(REGISTRY, new BsonTypeClassMap(), null) + def reader = new BsonDocumentReader(parse('{array : [1, 2, 3, null]}')) + + when: + reader.readStartDocument() + reader.readName('array') + def iterable = codec.decode(reader, DecoderContext.builder().build()) + reader.readEndDocument() + + then: + iterable == [1, 2, 3, null] + } + + def 'should decode a BSON array of arrays to an Iterable of Iterables'() { + given: + def codec = new IterableCodec(REGISTRY, new BsonTypeClassMap(), null) + def reader = new BsonDocumentReader(parse('{array : [[1, 2], [3, 4, 5]]}')) + + when: + reader.readStartDocument() + reader.readName('array') + def iterable = codec.decode(reader, DecoderContext.builder().build()) + reader.readEndDocument() + + then: + iterable == [[1, 2], [3, 4, 5]] + } + + def 'should use provided transformer'() { + given: + def codec = new IterableCodec(REGISTRY, new BsonTypeClassMap(), { Object from -> + from.toString() + }) + def reader = new BsonDocumentReader(parse('{array : [1, 2, 3]}')) + + when: + reader.readStartDocument() + reader.readName('array') + def iterable = codec.decode(reader, DecoderContext.builder().build()) + reader.readEndDocument() + + then: + iterable == ['1', '2', '3'] + } + + @SuppressWarnings(['LineLength']) + @Unroll + def 'should decode binary subtype 3 for UUID'() { + given: + def reader = new BsonDocumentReader(parse(document)) + def codec = new IterableCodec(fromCodecs(new UuidCodec(representation), new BinaryCodec()), new BsonTypeClassMap(), null) + .withUuidRepresentation(representation) + + when: + reader.readStartDocument() + reader.readName('array') + def iterable = codec.decode(reader, DecoderContext.builder().build()) + reader.readEndDocument() + + then: + value == iterable + + where: + representation | value | document + JAVA_LEGACY | [UUID.fromString('08070605-0403-0201-100f-0e0d0c0b0a09')] | '{"array": [{ "$binary" : "AQIDBAUGBwgJCgsMDQ4PEA==", "$type" : "3" }]}' + C_SHARP_LEGACY | [UUID.fromString('04030201-0605-0807-090a-0b0c0d0e0f10')] | '{"array": [{ "$binary" : "AQIDBAUGBwgJCgsMDQ4PEA==", "$type" : "3" }]}' + PYTHON_LEGACY | [UUID.fromString('01020304-0506-0708-090a-0b0c0d0e0f10')] | '{"array": [{ "$binary" : "AQIDBAUGBwgJCgsMDQ4PEA==", "$type" : "3" }]}' + STANDARD | [new Binary((byte) 3, [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16] as byte[])] | '{"array": [{ "$binary" : "AQIDBAUGBwgJCgsMDQ4PEA==", "$type" : "3" }]}' + UNSPECIFIED | [new Binary((byte) 3, [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16] as byte[])] | '{"array": [{ "$binary" : "AQIDBAUGBwgJCgsMDQ4PEA==", "$type" : "3" }]}' + } + + @SuppressWarnings(['LineLength']) + @Unroll + def 'should decode binary subtype 4 for UUID'() { + given: + def reader = new BsonDocumentReader(parse(document)) + def codec = new IterableCodec(fromCodecs(new UuidCodec(representation), new BinaryCodec()), new BsonTypeClassMap(), null) + .withUuidRepresentation(representation) + + when: + reader.readStartDocument() + reader.readName('array') + def iterable = codec.decode(reader, DecoderContext.builder().build()) + reader.readEndDocument() + + then: + value == iterable + + where: + representation | value | document + STANDARD | [UUID.fromString('01020304-0506-0708-090a-0b0c0d0e0f10')] | '{"array": [{ "$binary" : "AQIDBAUGBwgJCgsMDQ4PEA==", "$type" : "4" }]}' + JAVA_LEGACY | [UUID.fromString('01020304-0506-0708-090a-0b0c0d0e0f10')] | '{"array": [{ "$binary" : "CAcGBQQDAgEQDw4NDAsKCQ==", "$type" : "3" }]}' + C_SHARP_LEGACY | [new Binary((byte) 4, [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16] as byte[])] | '{"array": [{ "$binary" : "AQIDBAUGBwgJCgsMDQ4PEA==", "$type" : "4" }]}' + PYTHON_LEGACY | [new Binary((byte) 4, [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16] as byte[])] | '{"array": [{ "$binary" : "AQIDBAUGBwgJCgsMDQ4PEA==", "$type" : "4" }]}' + UNSPECIFIED | [new Binary((byte) 4, [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16] as byte[])] | '{"array": [{ "$binary" : "AQIDBAUGBwgJCgsMDQ4PEA==", "$type" : "4" }]}' + } + + @SuppressWarnings('unused') + static class Container { + private final List>> instants = [] + + List>> getInstants() { + instants + } + } +} diff --git a/bson/src/test/unit/org/bson/codecs/JsonObjectCodecProviderTest.java b/bson/src/test/unit/org/bson/codecs/JsonObjectCodecProviderTest.java new file mode 100644 index 00000000000..f3af17ceefb --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/JsonObjectCodecProviderTest.java @@ -0,0 +1,37 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs; + +import org.bson.codecs.configuration.CodecProvider; +import org.bson.codecs.configuration.CodecRegistry; +import org.bson.json.JsonObject; +import org.junit.jupiter.api.Test; + +import static org.bson.codecs.configuration.CodecRegistries.fromProviders; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNull; + +public class JsonObjectCodecProviderTest { + + @Test + public void testJsonObjectCodecProvider() { + CodecProvider provider = new JsonObjectCodecProvider(); + CodecRegistry registry = fromProviders(provider); + assertEquals(provider.get(JsonObject.class, registry).getClass(), JsonObjectCodec.class); + assertNull(provider.get(Integer.class, registry)); + } +} diff --git a/bson/src/test/unit/org/bson/codecs/JsonObjectCodecSpecification.groovy b/bson/src/test/unit/org/bson/codecs/JsonObjectCodecSpecification.groovy new file mode 100644 index 00000000000..3c4a9c79723 --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/JsonObjectCodecSpecification.groovy @@ -0,0 +1,73 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs + +import org.bson.BsonDocument +import org.bson.BsonDocumentReader +import org.bson.BsonDocumentWriter +import org.bson.json.JsonMode +import org.bson.json.JsonObject +import org.bson.json.JsonWriterSettings +import spock.lang.Specification + +import static org.bson.BsonDocument.parse + +class JsonObjectCodecSpecification extends Specification { + def 'should have JsonObject encoding class'() { + given: + def codec = new JsonObjectCodec() + + expect: + codec.getEncoderClass() == JsonObject + } + + def 'should encode JsonObject correctly'() { + given: + def codec = new JsonObjectCodec() + def writer = new BsonDocumentWriter(new BsonDocument()) + + when: + codec.encode(writer, new JsonObject('{hello: {world: 1}}'), EncoderContext.builder().build()) + + then: + writer.document == parse('{hello: {world: 1}}') + } + + def 'should decode JsonObject correctly'() { + given: + def codec = new JsonObjectCodec() + def reader = new BsonDocumentReader(parse('{hello: {world: 1}}')) + + when: + def jsonObject = codec.decode(reader, DecoderContext.builder().build()) + + then: + jsonObject.getJson() == '{"hello": {"world": 1}}' + } + + def 'should use JsonWriterSettings'() { + given: + def codec = new JsonObjectCodec(JsonWriterSettings.builder().outputMode(JsonMode.EXTENDED).build()) + def reader = new BsonDocumentReader(parse('{hello: 1}')) + + when: + def jsonObject = codec.decode(reader, DecoderContext.builder().build()) + + then: + jsonObject.getJson() == '{"hello": {"$numberInt": "1"}}' + } +} diff --git a/bson/src/test/unit/org/bson/codecs/LongCodecTest.java b/bson/src/test/unit/org/bson/codecs/LongCodecTest.java new file mode 100644 index 00000000000..2005718a05d --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/LongCodecTest.java @@ -0,0 +1,62 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs; + +import org.bson.BsonInvalidOperationException; +import org.bson.Document; +import org.bson.types.Decimal128; +import org.junit.jupiter.api.Test; + +import static org.junit.jupiter.api.Assertions.assertThrows; + +public final class LongCodecTest extends CodecTestCase { + + @Test + public void shouldRoundTripLongValues() { + roundTrip(new Document("a", Long.MAX_VALUE)); + roundTrip(new Document("a", Long.MIN_VALUE)); + } + + @Test + public void shouldHandleAlternativeNumberValues() { + Document expected = new Document("a", 10L); + roundTrip(new Document("a", 10), expected); + roundTrip(new Document("a", 10.00), expected); + roundTrip(new Document("a", 9.9999999999999992), expected); + roundTrip(new Document("a", Decimal128.parse("10")), expected); + } + + @Test + public void shouldThrowWhenHandlingLossyValues() { + assertThrows(BsonInvalidOperationException.class, () ->roundTrip(new Document("a", Double.MAX_VALUE))); + } + + @Test + public void shouldThrowWhenHandlingLossyDoubleValues() { + assertThrows(BsonInvalidOperationException.class, () ->roundTrip(new Document("a", 9.9999999999999991))); + } + + @Test + public void shouldThrowWhenHandlingLossyDecimal128Values() { + assertThrows(BsonInvalidOperationException.class, () ->roundTrip(new Document("a", Decimal128.parse("10.0")))); + } + + @Override + DocumentCodecProvider getDocumentCodecProvider() { + return getSpecificNumberDocumentCodecProvider(Long.class); + } +} diff --git a/bson/src/test/unit/org/bson/codecs/MapCodecProviderTest.java b/bson/src/test/unit/org/bson/codecs/MapCodecProviderTest.java new file mode 100644 index 00000000000..6437334675a --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/MapCodecProviderTest.java @@ -0,0 +1,53 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs; + +import org.bson.conversions.Bson; +import org.junit.jupiter.api.Test; + +import java.util.Map; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertTrue; + +final class MapCodecProviderTest { + @Test + void shouldReturnNullForNonMap() { + MapCodecProvider provider = new MapCodecProvider(); + assertNull(provider.get(String.class, Bson.DEFAULT_CODEC_REGISTRY)); + } + + @Test + void shouldReturnMapCodecForMap() { + MapCodecProvider provider = new MapCodecProvider(); + @SuppressWarnings({"rawtypes", "unchecked"}) + Codec> codec = (Codec>) (Codec) provider.get(Map.class, Bson.DEFAULT_CODEC_REGISTRY); + assertTrue(codec instanceof MapCodec); + MapCodec> recordCodec = (MapCodec>) codec; + assertEquals(Map.class, recordCodec.getEncoderClass()); + } + + @Test + public void shouldReturnMapCodecForMapUsingDefaultRegistry() { + @SuppressWarnings({"rawtypes", "unchecked"}) + Codec> codec = (Codec>) (Codec) Bson.DEFAULT_CODEC_REGISTRY.get(Map.class); + assertTrue(codec instanceof MapCodec); + MapCodec> recordCodec = (MapCodec>) codec; + assertEquals(Map.class, recordCodec.getEncoderClass()); + } +} diff --git a/bson/src/test/unit/org/bson/codecs/MapCodecSpecification.groovy b/bson/src/test/unit/org/bson/codecs/MapCodecSpecification.groovy new file mode 100644 index 00000000000..ffe66e32d10 --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/MapCodecSpecification.groovy @@ -0,0 +1,296 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs + +import org.bson.BsonArray +import org.bson.BsonBinaryReader +import org.bson.BsonBinaryWriter +import org.bson.BsonDateTime +import org.bson.BsonDbPointer +import org.bson.BsonDocument +import org.bson.BsonDocumentReader +import org.bson.BsonDocumentWriter +import org.bson.BsonInt32 +import org.bson.BsonReader +import org.bson.BsonRegularExpression +import org.bson.BsonTimestamp +import org.bson.BsonUndefined +import org.bson.BsonWriter +import org.bson.ByteBufNIO +import org.bson.Document +import org.bson.codecs.jsr310.Jsr310CodecProvider +import org.bson.io.BasicOutputBuffer +import org.bson.io.ByteBufferBsonInput +import org.bson.json.JsonReader +import org.bson.types.Binary +import org.bson.types.Code +import org.bson.types.CodeWithScope +import org.bson.types.MaxKey +import org.bson.types.MinKey +import org.bson.types.ObjectId +import org.bson.types.Symbol +import spock.lang.Shared +import spock.lang.Specification +import spock.lang.Unroll + +import java.lang.reflect.ParameterizedType +import java.nio.ByteBuffer +import java.time.Instant +import java.util.concurrent.atomic.AtomicBoolean +import java.util.concurrent.atomic.AtomicInteger +import java.util.concurrent.atomic.AtomicLong + +import static java.util.Arrays.asList +import static org.bson.UuidRepresentation.C_SHARP_LEGACY +import static org.bson.UuidRepresentation.JAVA_LEGACY +import static org.bson.UuidRepresentation.PYTHON_LEGACY +import static org.bson.UuidRepresentation.STANDARD +import static org.bson.UuidRepresentation.UNSPECIFIED +import static org.bson.codecs.configuration.CodecRegistries.fromCodecs +import static org.bson.codecs.configuration.CodecRegistries.fromProviders +import static org.bson.codecs.configuration.CodecRegistries.fromRegistries + +class MapCodecSpecification extends Specification { + + static final REGISTRY = fromRegistries(fromCodecs(new UuidCodec(JAVA_LEGACY)), + fromProviders(asList(new ValueCodecProvider(), new BsonValueCodecProvider(), + new DocumentCodecProvider(), new CollectionCodecProvider(), new MapCodecProvider()))) + + @Shared + BsonDocument bsonDoc = new BsonDocument() + @Shared + StringWriter stringWriter = new StringWriter() + + def 'should encode and decode all default types with all readers and writers'(BsonWriter writer) { + given: + def originalDocument = [:] + originalDocument.with { + put('null', null) + put('int32', 42) + put('int64', 52L) + put('booleanTrue', true) + put('booleanFalse', false) + put('date', new Date()) + put('dbPointer', new BsonDbPointer('foo.bar', new ObjectId())) + put('double', 62.0 as double) + put('minKey', new MinKey()) + put('maxKey', new MaxKey()) + put('code', new Code('int i = 0;')) + put('codeWithScope', new CodeWithScope('int x = y', new Document('y', 1))) + put('objectId', new ObjectId()) + put('regex', new BsonRegularExpression('^test.*regex.*xyz$', 'i')) + put('string', 'the fox ...') + put('symbol', new Symbol('ruby stuff')) + put('timestamp', new BsonTimestamp(0x12345678, 5)) + put('undefined', new BsonUndefined()) + put('binary', new Binary((byte) 0x80, [5, 4, 3, 2, 1] as byte[])) + put('array', asList(1, 1L, true, [1, 2, 3], new Document('a', 1), null)) + put('document', new Document('a', 2)) + put('map', [a:1, b:2]) + put('atomicLong', new AtomicLong(1)) + put('atomicInteger', new AtomicInteger(1)) + put('atomicBoolean', new AtomicBoolean(true)) + } + + when: + new MapCodec(REGISTRY, new BsonTypeClassMap(), null, Map).encode(writer, originalDocument, EncoderContext.builder().build()) + BsonReader reader + if (writer instanceof BsonDocumentWriter) { + reader = new BsonDocumentReader(bsonDoc) + } else if (writer instanceof BsonBinaryWriter) { + BasicOutputBuffer buffer = (BasicOutputBuffer)writer.getBsonOutput() + reader = new BsonBinaryReader(new ByteBufferBsonInput(new ByteBufNIO( + ByteBuffer.wrap(buffer.toByteArray())))) + } else { + reader = new JsonReader(stringWriter.toString()) + } + def decodedDoc = new MapCodec(REGISTRY, new BsonTypeClassMap(), null, Map).decode(reader, DecoderContext.builder().build()) + + then: + decodedDoc.get('null') == originalDocument.get('null') + decodedDoc.get('int32') == originalDocument.get('int32') + decodedDoc.get('int64') == originalDocument.get('int64') + decodedDoc.get('booleanTrue') == originalDocument.get('booleanTrue') + decodedDoc.get('booleanFalse') == originalDocument.get('booleanFalse') + decodedDoc.get('date') == originalDocument.get('date') + decodedDoc.get('dbPointer') == originalDocument.get('dbPointer') + decodedDoc.get('double') == originalDocument.get('double') + decodedDoc.get('minKey') == originalDocument.get('minKey') + decodedDoc.get('maxKey') == originalDocument.get('maxKey') + decodedDoc.get('code') == originalDocument.get('code') + decodedDoc.get('codeWithScope') == originalDocument.get('codeWithScope') + decodedDoc.get('objectId') == originalDocument.get('objectId') + decodedDoc.get('regex') == originalDocument.get('regex') + decodedDoc.get('string') == originalDocument.get('string') + decodedDoc.get('symbol') == originalDocument.get('symbol') + decodedDoc.get('timestamp') == originalDocument.get('timestamp') + decodedDoc.get('undefined') == originalDocument.get('undefined') + decodedDoc.get('binary') == originalDocument.get('binary') + decodedDoc.get('array') == originalDocument.get('array') + decodedDoc.get('document') == originalDocument.get('document') + decodedDoc.get('map') == originalDocument.get('map') + decodedDoc.get('atomicLong') == ((AtomicLong) originalDocument.get('atomicLong')).get() + decodedDoc.get('atomicInteger') == ((AtomicInteger) originalDocument.get('atomicInteger')).get() + decodedDoc.get('atomicBoolean') == ((AtomicBoolean) originalDocument.get('atomicBoolean')).get() + + where: + writer << [ + new BsonDocumentWriter(bsonDoc), + new BsonBinaryWriter(new BasicOutputBuffer()) + ] + } + + def 'should decode binary subtypes for UUID that are not 16 bytes into Binary'() { + given: + def reader = new BsonBinaryReader(ByteBuffer.wrap(bytes as byte[])) + + when: + def document = new DocumentCodec().decode(reader, DecoderContext.builder().build()) + + then: + value == document.get('f') + + where: + value | bytes + new Binary((byte) 0x03, (byte[]) [115, 116, 11]) | [16, 0, 0, 0, 5, 102, 0, 3, 0, 0, 0, 3, 115, 116, 11, 0] + new Binary((byte) 0x04, (byte[]) [115, 116, 11]) | [16, 0, 0, 0, 5, 102, 0, 3, 0, 0, 0, 4, 115, 116, 11, 0] + } + + @SuppressWarnings(['LineLength']) + @Unroll + def 'should decode binary subtype 3 for UUID'() { + given: + def reader = new BsonBinaryReader(ByteBuffer.wrap(bytes as byte[])) + + when: + def map = new MapCodec(fromCodecs(new UuidCodec(representation), new BinaryCodec()), new BsonTypeClassMap(), null, Map) + .withUuidRepresentation(representation) + .decode(reader, DecoderContext.builder().build()) + + then: + value == map.get('f') + + where: + representation | value | bytes + JAVA_LEGACY | UUID.fromString('08070605-0403-0201-100f-0e0d0c0b0a09') | [29, 0, 0, 0, 5, 102, 0, 16, 0, 0, 0, 3, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 0] + C_SHARP_LEGACY | UUID.fromString('04030201-0605-0807-090a-0b0c0d0e0f10') | [29, 0, 0, 0, 5, 102, 0, 16, 0, 0, 0, 3, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 0] + PYTHON_LEGACY | UUID.fromString('01020304-0506-0708-090a-0b0c0d0e0f10') | [29, 0, 0, 0, 5, 102, 0, 16, 0, 0, 0, 3, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 0] + STANDARD | new Binary((byte) 3, [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16] as byte[]) | [29, 0, 0, 0, 5, 102, 0, 16, 0, 0, 0, 3, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 0] + UNSPECIFIED | new Binary((byte) 3, [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16] as byte[]) | [29, 0, 0, 0, 5, 102, 0, 16, 0, 0, 0, 3, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 0] + } + + @SuppressWarnings(['LineLength']) + @Unroll + def 'should decode binary subtype 4 for UUID'() { + given: + def reader = new BsonBinaryReader(ByteBuffer.wrap(bytes as byte[])) + + when: + def map = new MapCodec(fromCodecs(new UuidCodec(representation), new BinaryCodec()), new BsonTypeClassMap(), null, Map) + .withUuidRepresentation(representation) + .decode(reader, DecoderContext.builder().build()) + + then: + value == map.get('f') + + where: + representation | value | bytes + STANDARD | UUID.fromString('01020304-0506-0708-090a-0b0c0d0e0f10') | [29, 0, 0, 0, 5, 102, 0, 16, 0, 0, 0, 4, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 0] + JAVA_LEGACY | new Binary((byte) 4, [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16] as byte[]) | [29, 0, 0, 0, 5, 102, 0, 16, 0, 0, 0, 4, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 0] + C_SHARP_LEGACY | new Binary((byte) 4, [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16] as byte[]) | [29, 0, 0, 0, 5, 102, 0, 16, 0, 0, 0, 4, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 0] + PYTHON_LEGACY | new Binary((byte) 4, [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16] as byte[]) | [29, 0, 0, 0, 5, 102, 0, 16, 0, 0, 0, 4, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 0] + UNSPECIFIED | new Binary((byte) 4, [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16] as byte[]) | [29, 0, 0, 0, 5, 102, 0, 16, 0, 0, 0, 4, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 0] + } + + + def 'should apply transformer to decoded values'() { + given: + def codec = new MapCodec(fromProviders([new ValueCodecProvider(), new DocumentCodecProvider(), new BsonValueCodecProvider()]), + new BsonTypeClassMap(), + { Object value -> 5 }, Map) + when: + def doc = codec.decode(new BsonDocumentReader(new BsonDocument('_id', new BsonInt32(1))), DecoderContext.builder().build()) + + then: + doc['_id'] == 5 + } + + def 'should decode to specified generic class'() { + given: + def doc = new BsonDocument('_id', new BsonInt32(1)) + + when: + def codec = new MapCodec(fromProviders([new ValueCodecProvider()]), new BsonTypeClassMap(), null, mapType) + def map = codec.decode(new BsonDocumentReader(doc), DecoderContext.builder().build()) + + then: + codec.getEncoderClass() == mapType + map.getClass() == actualType + + where: + mapType | actualType + Map | HashMap + NavigableMap | TreeMap + AbstractMap | HashMap + HashMap | HashMap + TreeMap | TreeMap + WeakHashMap | WeakHashMap + } + + + def 'should parameterize'() { + given: + def codec = fromProviders(new Jsr310CodecProvider(), REGISTRY).get( + Map, + asList(((ParameterizedType) Container.getMethod('getInstants').genericReturnType).actualTypeArguments)) + + def writer = new BsonDocumentWriter(new BsonDocument()) + def reader = new BsonDocumentReader(writer.getDocument()) + def instants = + ['firstMap': [Instant.ofEpochMilli(1), Instant.ofEpochMilli(2)], + 'secondMap': [Instant.ofEpochMilli(3), Instant.ofEpochMilli(4)]] + when: + writer.writeStartDocument() + writer.writeName('instants') + codec.encode(writer, instants, EncoderContext.builder().build()) + writer.writeEndDocument() + + then: + writer.getDocument() == new BsonDocument() + .append('instants', + new BsonDocument() + .append('firstMap', new BsonArray([new BsonDateTime(1), new BsonDateTime(2)])) + .append('secondMap', new BsonArray([new BsonDateTime(3), new BsonDateTime(4)]))) + + when: + reader.readStartDocument() + reader.readName('instants') + def decodedInstants = codec.decode(reader, DecoderContext.builder().build()) + + then: + decodedInstants == instants + } + + @SuppressWarnings('unused') + static class Container { + private final Map> instants = [:] + + Map> getInstants() { + instants + } + } +} diff --git a/bson/src/test/unit/org/bson/codecs/OverridableUuidRepresentationUuidCodecSpecification.groovy b/bson/src/test/unit/org/bson/codecs/OverridableUuidRepresentationUuidCodecSpecification.groovy new file mode 100644 index 00000000000..4f52409c8d7 --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/OverridableUuidRepresentationUuidCodecSpecification.groovy @@ -0,0 +1,39 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs + +import org.bson.UuidRepresentation +import spock.lang.Specification + +class OverridableUuidRepresentationUuidCodecSpecification extends Specification{ + + def 'should change uuid representation'() { + when: + def codec = new OverridableUuidRepresentationUuidCodec() + + then: + codec.getUuidRepresentation() == UuidRepresentation.UNSPECIFIED + + when: + def newCodec = codec.withUuidRepresentation(UuidRepresentation.STANDARD) + + then: + newCodec instanceof OverridableUuidRepresentationCodec + (newCodec as OverridableUuidRepresentationCodec).getUuidRepresentation() == UuidRepresentation.STANDARD + } + +} diff --git a/bson/src/test/unit/org/bson/codecs/RawBsonDocumentCodecSpecification.groovy b/bson/src/test/unit/org/bson/codecs/RawBsonDocumentCodecSpecification.groovy new file mode 100644 index 00000000000..ee6dd3125af --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/RawBsonDocumentCodecSpecification.groovy @@ -0,0 +1,66 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs + +import org.bson.BsonBinaryReader +import org.bson.BsonBoolean +import org.bson.BsonDocument +import org.bson.BsonDocumentWriter +import org.bson.BsonElement +import org.bson.ByteBufNIO +import org.bson.RawBsonDocument +import org.bson.io.ByteBufferBsonInput +import spock.lang.Specification + +import java.nio.ByteBuffer + +class RawBsonDocumentCodecSpecification extends Specification { + + def codec = new RawBsonDocumentCodec() + def document = new BsonDocument([new BsonElement('b1', BsonBoolean.TRUE), new BsonElement('b2', BsonBoolean.FALSE)]) + def documentBytes = [15, 0, 0, 0, 8, 98, 49, 0, 1, 8, 98, 50, 0, 0, 0] as byte[] + + def 'should get encoder class'() { + expect: + codec.encoderClass == RawBsonDocument + } + + def 'should encode'() { + given: + def document = new BsonDocument() + def writer = new BsonDocumentWriter(document) + + when: + codec.encode(writer, new RawBsonDocument(documentBytes), EncoderContext.builder().build()) + + then: + document == new BsonDocument([new BsonElement('b1', BsonBoolean.TRUE), new BsonElement('b2', BsonBoolean.FALSE)]) + } + + def 'should decode'() { + given: + def reader = new BsonBinaryReader(new ByteBufferBsonInput(new ByteBufNIO(ByteBuffer.wrap(documentBytes)))) + + when: + RawBsonDocument buffer = codec.decode(reader, DecoderContext.builder().build()) + def bytes = new byte[buffer.getByteBuffer().remaining()] + buffer.getByteBuffer().get(bytes) + + then: + bytes == documentBytes + } +} diff --git a/bson/src/test/unit/org/bson/codecs/ShortCodecTest.java b/bson/src/test/unit/org/bson/codecs/ShortCodecTest.java new file mode 100644 index 00000000000..6bfb41fbb1a --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/ShortCodecTest.java @@ -0,0 +1,56 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs; + +import org.bson.BsonInvalidOperationException; +import org.bson.Document; +import org.junit.jupiter.api.Test; + +import static org.junit.jupiter.api.Assertions.assertThrows; + +public final class ShortCodecTest extends CodecTestCase { + + @Test + public void shouldRoundTripFloatValues() { + roundTrip(new Document("a", Short.MAX_VALUE)); + roundTrip(new Document("a", Short.MIN_VALUE)); + } + + @Test + public void shouldHandleAlternativeNumberValues() { + Document expected = new Document("a", (short) 10); + roundTrip(new Document("a", 10), expected); + roundTrip(new Document("a", 10L), expected); + roundTrip(new Document("a", 10.00), expected); + roundTrip(new Document("a", 9.9999999999999992), expected); + } + + @Test + public void shouldErrorDecodingOutsideMinRange() { + assertThrows(BsonInvalidOperationException.class, () -> roundTrip(new Document("a", Integer.MIN_VALUE))); + } + + @Test + public void shouldErrorDecodingOutsideMaxRange() { + assertThrows(BsonInvalidOperationException.class, () -> roundTrip(new Document("a", Integer.MAX_VALUE))); + } + + @Override + DocumentCodecProvider getDocumentCodecProvider() { + return getSpecificNumberDocumentCodecProvider(Short.class); + } +} diff --git a/bson/src/test/unit/org/bson/codecs/SimpleEnum.java b/bson/src/test/unit/org/bson/codecs/SimpleEnum.java new file mode 100644 index 00000000000..bd0ff19188a --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/SimpleEnum.java @@ -0,0 +1,23 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs; + +public enum SimpleEnum { + ALPHA, + BRAVO, + CHARLIE +} diff --git a/bson/src/test/unit/org/bson/codecs/StringCodecTest.java b/bson/src/test/unit/org/bson/codecs/StringCodecTest.java new file mode 100644 index 00000000000..2c9ae408c11 --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/StringCodecTest.java @@ -0,0 +1,125 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs; + +import org.bson.BsonInvalidOperationException; +import org.bson.BsonReader; +import org.bson.BsonType; +import org.bson.BsonWriter; +import org.bson.codecs.configuration.CodecConfigurationException; +import org.bson.json.JsonReader; +import org.bson.json.JsonWriter; +import org.junit.jupiter.api.Test; + +import java.io.StringWriter; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; + +public class StringCodecTest { + + private final DecoderContext decoderContext = DecoderContext.builder().build(); + private final EncoderContext encoderContext = EncoderContext.builder().build(); + private final Codec parent = new StringCodec(); + @SuppressWarnings("unchecked") + private final Codec child = ((RepresentationConfigurable) parent).withRepresentation(BsonType.OBJECT_ID); + + @Test + public void testSettingRepresentation() { + assertEquals(((RepresentationConfigurable) parent).getRepresentation(), BsonType.STRING); + assertEquals(((RepresentationConfigurable) child).getRepresentation(), BsonType.OBJECT_ID); + } + + @Test + public void testStringRepresentation() { + @SuppressWarnings("unchecked") + Codec child = ((RepresentationConfigurable) parent).withRepresentation(BsonType.STRING); + assertEquals(((RepresentationConfigurable) child).getRepresentation(), BsonType.STRING); + } + + @Test + public void testInvalidRepresentation() { + assertThrows(CodecConfigurationException.class, () -> ((RepresentationConfigurable) parent).withRepresentation(BsonType.INT32)); + } + + + @Test + public void testDecodeOnObjectIdWithObjectIdRep() { + BsonReader reader = new JsonReader("{'_id': ObjectId('5f5a6cc03237b5e06d6b887b'), 'name': 'Brian'}"); + reader.readStartDocument(); + reader.readName(); + String stringId = child.decode(reader, decoderContext); + + assertEquals(stringId, "5f5a6cc03237b5e06d6b887b"); + } + + @Test + public void testDecodeOnObjectIdWithStringRep() { + assertThrows(BsonInvalidOperationException.class, () -> { + BsonReader reader = new JsonReader("{'_id': ObjectId('5f5a6cc03237b5e06d6b887b'), 'name': 'Brian'}"); + reader.readStartDocument(); + reader.readName(); + parent.decode(reader, decoderContext); + }); + } + + @Test + public void testDecodeOnStringWithObjectIdRep() { + assertThrows(BsonInvalidOperationException.class, () -> { + BsonReader reader = new JsonReader("{'name': 'Brian'"); + reader.readStartDocument(); + reader.readName(); + child.decode(reader, decoderContext); + }); + } + + @Test + public void testDecodeOnStringWithStringRep() { + BsonReader reader = new JsonReader("{'name': 'Brian'"); + reader.readStartDocument(); + reader.readName(); + assertEquals(parent.decode(reader, decoderContext), "Brian"); + } + + @Test + public void testEncodeWithObjectIdRep() { + StringWriter writer = new StringWriter(); + BsonWriter jsonWriter = new JsonWriter(writer); + jsonWriter.writeStartDocument(); + jsonWriter.writeName("_id"); + + child.encode(jsonWriter, "5f5a6cc03237b5e06d6b887b", encoderContext); + + jsonWriter.writeEndDocument(); + + assertEquals(writer.toString(), "{\"_id\": {\"$oid\": \"5f5a6cc03237b5e06d6b887b\"}}"); + } + + @Test + public void testEncodeWithStringRep() { + StringWriter writer = new StringWriter(); + BsonWriter jsonWriter = new JsonWriter(writer); + jsonWriter.writeStartDocument(); + jsonWriter.writeName("_id"); + + parent.encode(jsonWriter, "5f5a6cc03237b5e06d6b887b", EncoderContext.builder().build()); + + jsonWriter.writeEndDocument(); + + assertEquals(writer.toString(), "{\"_id\": \"5f5a6cc03237b5e06d6b887b\"}"); + } +} diff --git a/bson/src/test/unit/org/bson/codecs/UndefinedCodecSpecification.groovy b/bson/src/test/unit/org/bson/codecs/UndefinedCodecSpecification.groovy new file mode 100644 index 00000000000..ac95db63efe --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/UndefinedCodecSpecification.groovy @@ -0,0 +1,59 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs + +import org.bson.BsonReader +import org.bson.BsonUndefined +import org.bson.BsonWriter +import spock.lang.Specification +import spock.lang.Subject + +class UndefinedCodecSpecification extends Specification { + @Subject + BsonUndefinedCodec codec = new BsonUndefinedCodec() + + def 'should return Undefined class'() { + expect: + codec.encoderClass == BsonUndefined + } + + def 'should decode undefined type from BsonReader'() { + given: + BsonReader reader = Mock() + + when: + def result = codec.decode(reader, DecoderContext.builder().build()) + + then: + 1 * reader.readUndefined() + result != null + result.class == BsonUndefined + } + + def 'should encode undefined type to BsonWriter'() { + given: + BsonWriter writer = Mock() + + when: + codec.encode(writer, new BsonUndefined(), EncoderContext.builder().build()) + + then: + 1 * writer.writeUndefined() + } + + +} diff --git a/bson/src/test/unit/org/bson/codecs/UuidCodecSpecification.groovy b/bson/src/test/unit/org/bson/codecs/UuidCodecSpecification.groovy new file mode 100644 index 00000000000..8bafd639882 --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/UuidCodecSpecification.groovy @@ -0,0 +1,171 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs + +import org.bson.BsonBinaryReader +import org.bson.BsonBinaryWriter +import org.bson.BsonDocument +import org.bson.BsonDocumentWriter +import org.bson.ByteBufNIO +import org.bson.UuidRepresentation +import org.bson.codecs.configuration.CodecConfigurationException +import org.bson.io.BasicOutputBuffer +import org.bson.io.ByteBufferBsonInput +import spock.lang.Shared +import spock.lang.Specification + +import java.nio.ByteBuffer + +/** + * + */ +class UuidCodecSpecification extends Specification { + + @Shared private UuidCodec uuidCodec + @Shared private BasicOutputBuffer outputBuffer + + def setup() { + uuidCodec = new UuidCodec() + outputBuffer = new BasicOutputBuffer() + } + + def 'should default to unspecified representation'() { + expect: + new UuidCodec().getUuidRepresentation() == UuidRepresentation.UNSPECIFIED + } + + def 'should decode different types of UUID'(UuidCodec codec, byte[] list) throws IOException { + given: + + ByteBufferBsonInput inputBuffer = new ByteBufferBsonInput(new ByteBufNIO(ByteBuffer.wrap(list))) + BsonBinaryReader bsonReader = new BsonBinaryReader(inputBuffer) + UUID expectedUuid = UUID.fromString('08070605-0403-0201-100f-0e0d0c0b0a09') + + bsonReader.readStartDocument() + bsonReader.readName() + + when: + UUID actualUuid = codec.decode(bsonReader, DecoderContext.builder().build()) + + then: + expectedUuid == actualUuid + + cleanup: + bsonReader.close() + + where: + + codec << [ + new UuidCodec(UuidRepresentation.JAVA_LEGACY), + new UuidCodec(UuidRepresentation.STANDARD), + new UuidCodec(UuidRepresentation.PYTHON_LEGACY), + new UuidCodec(UuidRepresentation.C_SHARP_LEGACY), + ] + + list << [ + [0, 0, 0, 0, //Start of document + 5, // type (BINARY) + 95, 105, 100, 0, // "_id" + 16, 0, 0, 0, // int "16" (length) + 3, // type (B_UUID_LEGACY) JAVA_LEGACY + 1, 2, 3, 4, 5, 6, 7, 8, + 9, 10, 11, 12, 13, 14, 15, 16], //8 bytes for long, 2 longs for UUID, Little Endian + + [0, 0, 0, 0, //Start of document + 5, // type (BINARY) + 95, 105, 100, 0, // "_id" + 16, 0, 0, 0, // int "16" (length) + 4, // type (UUID) + 8, 7, 6, 5, 4, 3, 2, 1, + 16, 15, 14, 13, 12, 11, 10, 9], //8 bytes for long, 2 longs for UUID, Big Endian + + [0, 0, 0, 0, //Start of document + 5, // type (BINARY) + 95, 105, 100, 0, // "_id" + 16, 0, 0, 0, // int "16" (length) + 3, // type (B_UUID_LEGACY) PYTHON_LEGACY + 8, 7, 6, 5, 4, 3, 2, 1, + 16, 15, 14, 13, 12, 11, 10, 9], //8 bytes for long, 2 longs for UUID, Big Endian + + [0, 0, 0, 0, //Start of document + 5, // type (BINARY) + 95, 105, 100, 0, // "_id" + 16, 0, 0, 0, // int "16" (length) + 3, // type (B_UUID_LEGACY) CSHARP_LEGACY + 5, 6, 7, 8, 3, 4, 1, 2, + 16, 15, 14, 13, 12, 11, 10, 9], //8 bytes for long, 2 longs for UUID, Big Endian + ] + } + + def 'should encode different types of UUIDs'(Byte bsonSubType, + UuidCodec codec, + UUID uuid) throws IOException { + given: + + byte[] encodedDoc = [0, 0, 0, 0, //Start of document + 5, // type (BINARY) + 95, 105, 100, 0, // "_id" + 16, 0, 0, 0, // int "16" (length) + 0, // bsonSubType + 1, 2, 3, 4, 5, 6, 7, 8, + 9, 10, 11, 12, 13, 14, 15, 16] //8 bytes for long, 2 longs for UUID + + encodedDoc[13] = bsonSubType + + BsonBinaryWriter bsonWriter = new BsonBinaryWriter(outputBuffer) + bsonWriter.writeStartDocument() + bsonWriter.writeName('_id') + + when: + codec.encode(bsonWriter, uuid, EncoderContext.builder().build()) + + then: + outputBuffer.toByteArray() == encodedDoc + + cleanup: + bsonWriter.close() + + where: + + bsonSubType << [3, 4, 3, 3] + + codec << [ + new UuidCodec(UuidRepresentation.JAVA_LEGACY), + new UuidCodec(UuidRepresentation.STANDARD), + new UuidCodec(UuidRepresentation.PYTHON_LEGACY), + new UuidCodec(UuidRepresentation.C_SHARP_LEGACY), + ] + + uuid << [ + UUID.fromString('08070605-0403-0201-100f-0e0d0c0b0a09'), // Java legacy UUID + UUID.fromString('01020304-0506-0708-090a-0b0c0d0e0f10'), // simulated standard UUID + UUID.fromString('01020304-0506-0708-090a-0b0c0d0e0f10'), // simulated Python UUID + UUID.fromString('04030201-0605-0807-090a-0b0c0d0e0f10') // simulated C# UUID + ] + } + + def 'should throw if representation is unspecified'() { + given: + def codec = new UuidCodec(UuidRepresentation.UNSPECIFIED) + + when: + codec.encode(new BsonDocumentWriter(new BsonDocument()), UUID.randomUUID(), EncoderContext.builder().build()) + + then: + thrown(CodecConfigurationException) + } +} diff --git a/bson/src/test/unit/org/bson/codecs/ValueCodecProviderSpecification.groovy b/bson/src/test/unit/org/bson/codecs/ValueCodecProviderSpecification.groovy new file mode 100644 index 00000000000..1fd738b5c5c --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/ValueCodecProviderSpecification.groovy @@ -0,0 +1,81 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs + +import org.bson.Document +import org.bson.Float32BinaryVector +import org.bson.Int8BinaryVector +import org.bson.PackedBitBinaryVector +import org.bson.BinaryVector +import org.bson.codecs.configuration.CodecRegistries +import org.bson.types.Binary +import org.bson.types.Code +import org.bson.types.Decimal128 +import org.bson.types.MaxKey +import org.bson.types.MinKey +import org.bson.types.ObjectId +import org.bson.types.Symbol +import spock.lang.Specification + +import java.util.concurrent.atomic.AtomicBoolean +import java.util.concurrent.atomic.AtomicInteger +import java.util.concurrent.atomic.AtomicLong +import java.util.regex.Pattern + +//Codenarc +@SuppressWarnings("VectorIsObsolete") +class ValueCodecProviderSpecification extends Specification { + private final provider = new ValueCodecProvider() + private final registry = CodecRegistries.fromProviders(provider) + + def 'should provide supported codecs'() { + expect: + provider.get(AtomicBoolean, registry) instanceof AtomicBooleanCodec + provider.get(AtomicInteger, registry) instanceof AtomicIntegerCodec + provider.get(AtomicLong, registry) instanceof AtomicLongCodec + + provider.get(Boolean, registry) instanceof BooleanCodec + provider.get(Integer, registry) instanceof IntegerCodec + provider.get(Long, registry) instanceof LongCodec + provider.get(Decimal128, registry) instanceof Decimal128Codec + provider.get(BigDecimal, registry) instanceof BigDecimalCodec + provider.get(Double, registry) instanceof DoubleCodec + provider.get(Character, registry) instanceof CharacterCodec + provider.get(String, registry) instanceof StringCodec + provider.get(Date, registry) instanceof DateCodec + provider.get(Byte, registry) instanceof ByteCodec + provider.get(Pattern, registry) instanceof PatternCodec + provider.get(Short, registry) instanceof ShortCodec + provider.get(byte[], registry) instanceof ByteArrayCodec + provider.get(Float, registry) instanceof FloatCodec + provider.get(BinaryVector, registry) instanceof BinaryVectorCodec + provider.get(Float32BinaryVector, registry) instanceof Float32BinaryVectorCodec + provider.get(Int8BinaryVector, registry) instanceof Int8VectorCodec + provider.get(PackedBitBinaryVector, registry) instanceof PackedBitBinaryVectorCodec + + provider.get(Binary, registry) instanceof BinaryCodec + provider.get(MinKey, registry) instanceof MinKeyCodec + provider.get(MaxKey, registry) instanceof MaxKeyCodec + provider.get(Code, registry) instanceof CodeCodec + provider.get(ObjectId, registry) instanceof ObjectIdCodec + provider.get(Symbol, registry) instanceof SymbolCodec + provider.get(UUID, registry) instanceof OverridableUuidRepresentationCodec + + provider.get(Document, registry) == null + } + +} diff --git a/bson/src/test/unit/org/bson/codecs/configuration/CodeRegistriesSpecification.groovy b/bson/src/test/unit/org/bson/codecs/configuration/CodeRegistriesSpecification.groovy new file mode 100644 index 00000000000..9cae58f7468 --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/configuration/CodeRegistriesSpecification.groovy @@ -0,0 +1,152 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.configuration + +import org.bson.BsonArray +import org.bson.BsonDateTime +import org.bson.BsonDocument +import org.bson.BsonDocumentReader +import org.bson.BsonDocumentWriter +import org.bson.BsonInt32 +import org.bson.codecs.BsonInt32Codec +import org.bson.codecs.BsonValueCodecProvider +import org.bson.codecs.CollectionCodecProvider +import org.bson.codecs.DecoderContext +import org.bson.codecs.EncoderContext +import org.bson.codecs.IntegerCodec +import org.bson.codecs.LongCodec +import org.bson.codecs.MapCodecProvider +import org.bson.codecs.UuidCodec +import org.bson.codecs.ValueCodecProvider +import org.bson.codecs.jsr310.Jsr310CodecProvider +import org.bson.internal.ProvidersCodecRegistry +import spock.lang.Specification + +import java.lang.reflect.ParameterizedType +import java.time.Instant + +import static CodecRegistries.fromCodecs +import static CodecRegistries.fromProviders +import static CodecRegistries.fromRegistries +import static java.util.Arrays.asList +import static org.bson.UuidRepresentation.STANDARD +import static org.bson.UuidRepresentation.UNSPECIFIED +import static org.bson.codecs.configuration.CodecRegistries.withUuidRepresentation + +class CodeRegistriesSpecification extends Specification { + def 'fromCodec should return a SingleCodecRegistry'() { + when: + def registry = fromCodecs(new UuidCodec(), new LongCodec()) + + then: + registry instanceof ProvidersCodecRegistry + registry.get(UUID) instanceof UuidCodec + registry.get(Long) instanceof LongCodec + } + + def 'fromProvider should return ProvidersCodecRegistry'() { + when: + def registry = fromProviders(new BsonValueCodecProvider()) + + then: + registry instanceof ProvidersCodecRegistry + registry.get(BsonInt32) instanceof BsonInt32Codec + } + + def 'fromProviders should return ProvidersCodecRegistry'() { + when: + def providers = fromProviders([new BsonValueCodecProvider(), new ValueCodecProvider()]) + + then: + providers instanceof ProvidersCodecRegistry + providers.get(BsonInt32) instanceof BsonInt32Codec + providers.get(Integer) instanceof IntegerCodec + } + + def 'fromRegistries should return ProvidersCodecRegistry'() { + def uuidCodec = new UuidCodec() + when: + def registry = fromRegistries(fromCodecs(uuidCodec), fromProviders(new ValueCodecProvider())) + + then: + registry instanceof ProvidersCodecRegistry + registry.get(UUID).is(uuidCodec) + registry.get(Integer) instanceof IntegerCodec + } + + def 'withUuidRepresentation should apply uuid representation'() { + given: + def registry = fromProviders(new ValueCodecProvider()) + def registryWithStandard = withUuidRepresentation(registry, STANDARD) + + when: + def uuidCodec = registry.get(UUID) as UuidCodec + + then: + uuidCodec.getUuidRepresentation() == UNSPECIFIED + + when: + uuidCodec = registryWithStandard.get(UUID) as UuidCodec + + then: + uuidCodec.getUuidRepresentation() == STANDARD + } + + def 'withUuidRepresentation should not break parameterization'() { + given: + def registry = fromProviders( + new Jsr310CodecProvider(), + new ValueCodecProvider(), + withUuidRepresentation(fromProviders(new CollectionCodecProvider()), STANDARD), + withUuidRepresentation(fromProviders(new MapCodecProvider()), STANDARD) + ) + def codec = registry.get(Collection, asList( + ((ParameterizedType) CodeRegistriesSpecification.getMethod('parameterizedTypeProvider').genericReturnType) + .actualTypeArguments)) + def writer = new BsonDocumentWriter(new BsonDocument()) + def reader = new BsonDocumentReader(writer.getDocument()) + def value = [ + ['firstMap': [Instant.ofEpochMilli(1), Instant.ofEpochMilli(2)]], + ['secondMap': [Instant.ofEpochMilli(3), Instant.ofEpochMilli(4)]]] + when: + writer.writeStartDocument() + writer.writeName('value') + codec.encode(writer, value, EncoderContext.builder().build()) + writer.writeEndDocument() + + then: + writer.getDocument() == new BsonDocument() + .append('value', new BsonArray( + [ + new BsonDocument('firstMap', new BsonArray([new BsonDateTime(1), new BsonDateTime(2)])), + new BsonDocument('secondMap', new BsonArray([new BsonDateTime(3), new BsonDateTime(4)])) + ])) + + when: + reader.readStartDocument() + reader.readName('value') + def decodedValue = codec.decode(reader, DecoderContext.builder().build()) + + then: + decodedValue == value + } + + @SuppressWarnings('unused') + List>> parameterizedTypeProvider() { + [] + } +} diff --git a/bson/src/test/unit/org/bson/codecs/jsr310/InstantCodecSpecification.groovy b/bson/src/test/unit/org/bson/codecs/jsr310/InstantCodecSpecification.groovy new file mode 100644 index 00000000000..bdf6aa3f3a6 --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/jsr310/InstantCodecSpecification.groovy @@ -0,0 +1,81 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.jsr310 + +import org.bson.BsonDocument +import org.bson.codecs.Codec +import org.bson.codecs.configuration.CodecConfigurationException + +import java.time.Instant +import java.time.LocalDateTime +import java.time.ZoneOffset + +class InstantCodecSpecification extends JsrSpecification { + + def 'should round trip Instant successfully'() { + when: + def writer = encode(instant) + + then: + writer.getDocument().get('key').asDateTime().value == millis + + when: + Instant actual = decode(writer) + + then: + instant == actual + + where: + instant | millis + Instant.EPOCH | 0 + LocalDateTime.of(2007, 10, 20, 0, 35).toInstant(ZoneOffset.UTC) | 1_192_840_500_000 + } + + def 'should wrap long overflow error in a CodecConfigurationException'() { + when: + encode(instant) + + then: + def e = thrown(CodecConfigurationException) + e.getCause().getClass() == ArithmeticException + + where: + instant << [ + Instant.MIN, + Instant.MAX + ] + } + + def 'should throw a CodecConfiguration exception if BsonType is invalid'() { + when: + decode(invalidDuration) + + then: + thrown(CodecConfigurationException) + + where: + invalidDuration << [ + BsonDocument.parse('{key: "10 Minutes"}'), + BsonDocument.parse('{key: 10}') + ] + } + + @Override + Codec getCodec() { + new InstantCodec() + } +} diff --git a/bson/src/test/unit/org/bson/codecs/jsr310/Jsr310CodecProviderSpecification.groovy b/bson/src/test/unit/org/bson/codecs/jsr310/Jsr310CodecProviderSpecification.groovy new file mode 100644 index 00000000000..e4f7a31281d --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/jsr310/Jsr310CodecProviderSpecification.groovy @@ -0,0 +1,40 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.jsr310 + +import org.bson.codecs.configuration.CodecRegistry +import spock.lang.Specification + +class Jsr310CodecProviderSpecification extends Specification { + + def 'should provide a codec for all JSR-310 classes'() { + given: + def codecRegistry = Stub(CodecRegistry) + def provider = new Jsr310CodecProvider() + + expect: + provider.get(clazz, codecRegistry) != null + + where: + clazz << [ + java.time.Instant, + java.time.LocalDate, + java.time.LocalDateTime, + java.time.LocalTime, + ] + } +} diff --git a/bson/src/test/unit/org/bson/codecs/jsr310/JsrSpecification.groovy b/bson/src/test/unit/org/bson/codecs/jsr310/JsrSpecification.groovy new file mode 100644 index 00000000000..119869b3495 --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/jsr310/JsrSpecification.groovy @@ -0,0 +1,51 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.jsr310 + +import org.bson.BsonDocument +import org.bson.BsonDocumentReader +import org.bson.BsonDocumentWriter +import org.bson.BsonReader +import org.bson.codecs.Codec +import org.bson.codecs.DecoderContext +import org.bson.codecs.EncoderContext +import spock.lang.Specification + +abstract class JsrSpecification extends Specification { + + abstract Codec getCodec() + + def encode(jsrDateTime) { + def writer = new BsonDocumentWriter(new BsonDocument()) + writer.writeStartDocument() + writer.writeName('key') + getCodec().encode(writer, jsrDateTime, EncoderContext.builder().build()) + writer.writeEndDocument() + writer + } + + def decode(BsonDocumentWriter writer) { + decode(writer.getDocument()) + } + + def decode(BsonDocument document) { + BsonReader bsonReader = new BsonDocumentReader(document) + bsonReader.readStartDocument() + bsonReader.readName() + getCodec().decode(bsonReader, DecoderContext.builder().build()) + } +} diff --git a/bson/src/test/unit/org/bson/codecs/jsr310/LocalDateCodecSpecification.groovy b/bson/src/test/unit/org/bson/codecs/jsr310/LocalDateCodecSpecification.groovy new file mode 100644 index 00000000000..a94753992b6 --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/jsr310/LocalDateCodecSpecification.groovy @@ -0,0 +1,106 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.jsr310 + +import org.bson.BsonDocument +import org.bson.codecs.Codec +import org.bson.codecs.configuration.CodecConfigurationException + +import java.time.LocalDate + +class LocalDateCodecSpecification extends JsrSpecification { + + def 'should round trip LocalDate successfully'() { + when: + def writer = encode(localDate) + + then: + writer.getDocument().get('key').asDateTime().value == millis + + when: + LocalDate actual = decode(writer) + + then: + localDate == actual + + where: + localDate | millis + LocalDate.of(2007, 10, 20) | 1_192_838_400_000 + LocalDate.ofEpochDay(0) | 0 + LocalDate.ofEpochDay(-99_999_999_999) | -99_999_999_999 * 86_400_000 + LocalDate.ofEpochDay(99_999_999_999) | 99_999_999_999 * 86_400_000 + } + + def 'should round trip different timezones the same'() { + given: + def defaultTimeZone = TimeZone.getDefault() + TimeZone.setDefault(TimeZone.getTimeZone(timeZone)) + def localDate = LocalDate.ofEpochDay(0) + + when: + def writer = encode(localDate) + + then: + writer.getDocument().get('key').asDateTime().value == 0 + + when: + def actual = decode(writer) + + then: + localDate == actual + + cleanup: + TimeZone.setDefault(defaultTimeZone) + + where: + timeZone << ['Pacific/Auckland', 'UTC', 'US/Hawaii'] + } + + def 'should wrap long overflow error in a CodecConfigurationException'() { + when: + encode(localDate) + + then: + def e = thrown(CodecConfigurationException) + e.getCause().getClass() == ArithmeticException + + where: + localDate << [ + LocalDate.MIN, + LocalDate.MAX + ] + } + + def 'should throw a CodecConfiguration exception if BsonType is invalid'() { + when: + decode(invalidDuration) + + then: + thrown(CodecConfigurationException) + + where: + invalidDuration << [ + BsonDocument.parse('{key: "10 Minutes"}'), + BsonDocument.parse('{key: 10}') + ] + } + + @Override + Codec getCodec() { + new LocalDateCodec() + } +} diff --git a/bson/src/test/unit/org/bson/codecs/jsr310/LocalDateTimeCodecSpecification.groovy b/bson/src/test/unit/org/bson/codecs/jsr310/LocalDateTimeCodecSpecification.groovy new file mode 100644 index 00000000000..0140c9ba13d --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/jsr310/LocalDateTimeCodecSpecification.groovy @@ -0,0 +1,109 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.jsr310 + +import org.bson.BsonDocument +import org.bson.codecs.Codec +import org.bson.codecs.configuration.CodecConfigurationException + +import java.time.LocalDate +import java.time.LocalDateTime +import java.time.LocalTime +import java.time.ZoneOffset + +class LocalDateTimeCodecSpecification extends JsrSpecification { + + def 'should round trip LocalDateTime successfully'() { + when: + def writer = encode(localDateTime) + + then: + writer.getDocument().get('key').asDateTime().value == millis + + when: + LocalDateTime actual = decode(writer) + + then: + localDateTime == actual + + where: + localDateTime | millis + LocalDateTime.of(2007, 10, 20, 0, 35) | 1_192_840_500_000 + LocalDateTime.ofEpochSecond(0, 0, ZoneOffset.UTC) | 0 + LocalDateTime.ofEpochSecond(-99_999_999_999, 0, ZoneOffset.UTC) | -99_999_999_999 * 1000 + LocalDateTime.ofEpochSecond(99_999_999_999, 0, ZoneOffset.UTC) | 99_999_999_999 * 1000 + } + + def 'should round trip different timezones the same'() { + given: + def defaultTimeZone = TimeZone.getDefault() + TimeZone.setDefault(TimeZone.getTimeZone(timeZone)) + def localDate = LocalDateTime.of(LocalDate.ofEpochDay(0), LocalTime.MIDNIGHT) + + when: + def writer = encode(localDate) + + then: + writer.getDocument().get('key').asDateTime().value == 0 + + when: + def actual = decode(writer) + + then: + localDate == actual + + cleanup: + TimeZone.setDefault(defaultTimeZone) + + where: + timeZone << ['Pacific/Auckland', 'UTC', 'US/Hawaii'] + } + + def 'should wrap long overflow error in a CodecConfigurationException'() { + when: + encode(localDateTime) + + then: + def e = thrown(CodecConfigurationException) + e.getCause().getClass() == ArithmeticException + + where: + localDateTime << [ + LocalDateTime.MIN, + LocalDateTime.MAX + ] + } + + def 'should throw a CodecConfiguration exception if BsonType is invalid'() { + when: + decode(invalidDuration) + + then: + thrown(CodecConfigurationException) + + where: + invalidDuration << [ + BsonDocument.parse('{key: "10 Minutes"}'), + BsonDocument.parse('{key: 10}') + ] + } + + @Override + Codec getCodec() { + new LocalDateTimeCodec() + } +} diff --git a/bson/src/test/unit/org/bson/codecs/jsr310/LocalTimeCodecSpecification.groovy b/bson/src/test/unit/org/bson/codecs/jsr310/LocalTimeCodecSpecification.groovy new file mode 100644 index 00000000000..609e9bc899d --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/jsr310/LocalTimeCodecSpecification.groovy @@ -0,0 +1,88 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.jsr310 + +import org.bson.BsonDocument +import org.bson.codecs.Codec +import org.bson.codecs.configuration.CodecConfigurationException + +import java.time.LocalTime + +class LocalTimeCodecSpecification extends JsrSpecification { + + def 'should round trip LocalTime successfully'() { + when: + def writer = encode(localTime) + + then: + writer.getDocument().get('key').asDateTime().value == millis + + when: + LocalTime actual = decode(writer) + + then: + localTime == actual + + where: + localTime | millis + LocalTime.MIN | 0 + LocalTime.of(23, 59, 59, 999_000_000) | 86_399_999 + } + + def 'should round trip different timezones the same'() { + given: + def defaultTimeZone = TimeZone.getDefault() + TimeZone.setDefault(TimeZone.getTimeZone(timeZone)) + def localDate = LocalTime.MIDNIGHT + + when: + def writer = encode(localDate) + + then: + writer.getDocument().get('key').asDateTime().value == 0 + + when: + def actual = decode(writer) + + then: + localDate == actual + + cleanup: + TimeZone.setDefault(defaultTimeZone) + + where: + timeZone << ['Pacific/Auckland', 'UTC', 'US/Hawaii'] + } + + def 'should throw a CodecConfiguration exception if BsonType is invalid'() { + when: + decode(invalidDuration) + + then: + thrown(CodecConfigurationException) + + where: + invalidDuration << [ + BsonDocument.parse('{key: "10:00"}') + ] + } + + @Override + Codec getCodec() { + new LocalTimeCodec() + } +} diff --git a/bson/src/test/unit/org/bson/codecs/pojo/ClassModelBuilderTest.java b/bson/src/test/unit/org/bson/codecs/pojo/ClassModelBuilderTest.java new file mode 100644 index 00000000000..83c9c432a07 --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/pojo/ClassModelBuilderTest.java @@ -0,0 +1,208 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo; + +import org.bson.codecs.configuration.CodecConfigurationException; +import org.bson.codecs.pojo.annotations.BsonProperty; +import org.bson.codecs.pojo.entities.ConcreteCollectionsModel; +import org.bson.codecs.pojo.entities.GenericHolderModel; +import org.bson.codecs.pojo.entities.NestedGenericHolderModel; +import org.bson.codecs.pojo.entities.SimpleGenericsModel; +import org.bson.codecs.pojo.entities.SimpleIdModel; +import org.bson.codecs.pojo.entities.UpperBoundsConcreteModel; +import org.bson.codecs.pojo.entities.UpperBoundsModel; +import org.junit.jupiter.api.Test; + +import java.lang.annotation.Annotation; +import java.lang.reflect.Field; +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; + +@SuppressWarnings("rawtypes") +public final class ClassModelBuilderTest { + + @Test + public void testDefaults() { + Class clazz = SimpleGenericsModel.class; + ClassModelBuilder builder = ClassModel.builder(clazz); + assertEquals(4, builder.getPropertyModelBuilders().size()); + for (Field field : clazz.getDeclaredFields()) { + assertEquals(field.getName(), builder.getProperty(field.getName()).getWriteName()); + } + + Map fieldNameToTypeParameterMap = new HashMap<>(); + fieldNameToTypeParameterMap.put("myIntegerField", TypeParameterMap.builder().build()); + fieldNameToTypeParameterMap.put("myGenericField", TypeParameterMap.builder().addIndex(0).build()); + fieldNameToTypeParameterMap.put("myListField", TypeParameterMap.builder().addIndex(0, 1).build()); + fieldNameToTypeParameterMap.put("myMapField", TypeParameterMap.builder().addIndex(0, TypeParameterMap.builder().build()) + .addIndex(1, 2).build()); + + assertEquals(fieldNameToTypeParameterMap, builder.getPropertyNameToTypeParameterMap()); + assertEquals(3, builder.getConventions().size()); + assertTrue(builder.getAnnotations().isEmpty()); + assertEquals(clazz, builder.getType()); + assertNull(builder.getIdPropertyName()); + assertFalse(builder.useDiscriminator()); + assertNull(builder.getDiscriminator()); + } + + @Test + public void testCanReflectObjectClass() { + Class clazz = Object.class; + ClassModelBuilder builder = ClassModel.builder(clazz); + + assertEquals(0, builder.getPropertyModelBuilders().size()); + assertTrue(builder.getPropertyNameToTypeParameterMap().isEmpty()); + assertEquals(3, builder.getConventions().size()); + assertTrue(builder.getAnnotations().isEmpty()); + assertEquals(clazz, builder.getType()); + assertNull(builder.getIdPropertyName()); + assertFalse(builder.useDiscriminator()); + assertNull(builder.getDiscriminator()); + } + + @Test + public void testMappedBoundedClasses() { + ClassModelBuilder builder = ClassModel.builder(UpperBoundsModel.class); + assertEquals(Number.class, builder.getProperty("myGenericField").getTypeData().getType()); + + builder = ClassModel.builder(UpperBoundsConcreteModel.class); + assertEquals(Long.class, builder.getProperty("myGenericField").getTypeData().getType()); + } + + @Test + public void testNestedGenericHolderModel() { + ClassModelBuilder builder = + ClassModel.builder(NestedGenericHolderModel.class); + assertEquals(GenericHolderModel.class, builder.getProperty("nested").getTypeData().getType()); + assertEquals(TypeData.builder(GenericHolderModel.class).addTypeParameter(TypeData.builder(String.class).build()).build(), + builder.getProperty("nested").getTypeData()); + } + + @Test + public void testFieldsMappedClassTypes() { + ClassModelBuilder builder = + ClassModel.builder(ConcreteCollectionsModel.class); + + assertEquals(Collection.class, builder.getProperty("collection").getTypeData().getType()); + assertEquals(List.class, builder.getProperty("list").getTypeData().getType()); + assertEquals(LinkedList.class, builder.getProperty("linked").getTypeData().getType()); + assertEquals(Map.class, builder.getProperty("map").getTypeData().getType()); + assertEquals(ConcurrentHashMap.class, builder.getProperty("concurrent").getTypeData().getType()); + } + + @Test + public void testOverrides() throws NoSuchFieldException { + ClassModelBuilder builder = ClassModel.builder(SimpleGenericsModel.class) + .annotations(TEST_ANNOTATIONS) + .conventions(TEST_CONVENTIONS) + .discriminatorKey("_cls") + .discriminator("myColl") + .enableDiscriminator(true) + .idPropertyName("myIntegerField") + .instanceCreatorFactory(TEST_INSTANCE_CREATOR_FACTORY); + + assertEquals(TEST_ANNOTATIONS, builder.getAnnotations()); + assertEquals(TEST_CONVENTIONS, builder.getConventions()); + assertEquals("myIntegerField", builder.getIdPropertyName()); + assertEquals(SimpleGenericsModel.class, builder.getType()); + assertTrue(builder.useDiscriminator()); + assertEquals("_cls", builder.getDiscriminatorKey()); + assertEquals("myColl", builder.getDiscriminator()); + assertEquals(TEST_INSTANCE_CREATOR_FACTORY, builder.getInstanceCreatorFactory()); + } + + @Test + public void testCanRemoveField() { + ClassModelBuilder builder = ClassModel.builder(SimpleGenericsModel.class) + .idPropertyName("ID"); + assertEquals(4, builder.getPropertyModelBuilders().size()); + builder.removeProperty("myIntegerField"); + assertEquals(3, builder.getPropertyModelBuilders().size()); + + builder.removeProperty("myIntegerField"); + assertEquals(3, builder.getPropertyModelBuilders().size()); + } + + @Test() + public void testValidationIdProperty() { + assertThrows(CodecConfigurationException.class, () -> + ClassModel.builder(SimpleGenericsModel.class).idPropertyName("ID").build()); + } + + @Test() + public void testValidationDuplicateDocumentFieldName() { + assertThrows(CodecConfigurationException.class, () -> { + ClassModelBuilder builder = ClassModel.builder(SimpleGenericsModel.class); + builder.getProperty("myIntegerField").writeName("myGenericField"); + builder.build(); + }); + } + + @Test() + public void testDifferentTypeIdGenerator() { + assertThrows(CodecConfigurationException.class, () -> + ClassModel.builder(SimpleIdModel.class) + .idGenerator(new IdGenerator() { + @Override + public String generate() { + return "id"; + } + + @Override + public Class getType() { + return String.class; + } + }).build()); + } + + private static final List TEST_ANNOTATIONS = Collections.singletonList( + new BsonProperty() { + @Override + public Class annotationType() { + return BsonProperty.class; + } + + @Override + public String value() { + return ""; + } + + @Override + public boolean useDiscriminator() { + return true; + } + }); + + private static final List TEST_CONVENTIONS = Collections.singletonList( + builder -> { + }); + + private static final InstanceCreatorFactory TEST_INSTANCE_CREATOR_FACTORY = + () -> null; +} diff --git a/bson/src/test/unit/org/bson/codecs/pojo/ClassModelTest.java b/bson/src/test/unit/org/bson/codecs/pojo/ClassModelTest.java new file mode 100644 index 00000000000..d0ee3cb1cc7 --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/pojo/ClassModelTest.java @@ -0,0 +1,283 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo; + +import java.util.SortedSet; + +import org.bson.codecs.pojo.entities.CollectionNestedPojoModel; +import org.bson.codecs.pojo.entities.ConcreteAndNestedAbstractInterfaceModel; +import org.bson.codecs.pojo.entities.GenericHolderModel; +import org.bson.codecs.pojo.entities.InterfaceBasedModel; +import org.bson.codecs.pojo.entities.ListGenericExtendedModel; +import org.bson.codecs.pojo.entities.ListListGenericExtendedModel; +import org.bson.codecs.pojo.entities.ListMapGenericExtendedModel; +import org.bson.codecs.pojo.entities.MapGenericExtendedModel; +import org.bson.codecs.pojo.entities.MapListGenericExtendedModel; +import org.bson.codecs.pojo.entities.MapMapGenericExtendedModel; +import org.bson.codecs.pojo.entities.MultipleBoundsModel; +import org.bson.codecs.pojo.entities.NestedGenericHolderMapModel; +import org.bson.codecs.pojo.entities.PropertySelectionModel; +import org.bson.codecs.pojo.entities.ShapeHolderCircleModel; +import org.bson.codecs.pojo.entities.ShapeHolderModel; +import org.bson.codecs.pojo.entities.ShapeModelAbstract; +import org.bson.codecs.pojo.entities.ShapeModelCircle; +import org.bson.codecs.pojo.entities.SimpleGenericsModel; +import org.bson.codecs.pojo.entities.SimpleModel; +import org.bson.codecs.pojo.entities.SimpleWithStaticModel; +import org.bson.codecs.pojo.entities.conventions.AnnotationInheritedModel; +import org.bson.codecs.pojo.entities.conventions.AnnotationModel; +import org.junit.jupiter.api.Test; + +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.Set; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertTrue; + +public final class ClassModelTest { + + @Test + public void testSimpleGenericsModel() { + ClassModel classModel = ClassModel.builder(SimpleGenericsModel.class).build(); + + assertEquals("SimpleGenericsModel", classModel.getName()); + assertEquals(SimpleGenericsModel.class, classModel.getType()); + assertFalse(classModel.useDiscriminator()); + assertEquals("_t", classModel.getDiscriminatorKey()); + assertEquals("org.bson.codecs.pojo.entities.SimpleGenericsModel", classModel.getDiscriminator()); + assertNull(classModel.getIdPropertyModel()); + assertTrue(classModel.getInstanceCreatorFactory() instanceof InstanceCreatorFactoryImpl); + + assertEquals(4, classModel.getPropertyModels().size()); + assertEquals(classModel.getPropertyModel("myIntegerField").getTypeData(), createTypeData(Integer.class)); + assertEquals(classModel.getPropertyModel("myGenericField").getTypeData(), createTypeData(Object.class)); + assertEquals(classModel.getPropertyModel("myListField").getTypeData(), createTypeData(List.class, Object.class)); + assertEquals(classModel.getPropertyModel("myMapField").getTypeData(), createTypeData(Map.class, String.class, Object.class)); + } + + @Test + @SuppressWarnings("rawtypes") + public void testCollectionNestedPojoModelPropertyTypes() { + TypeData string = createTypeData(String.class); + TypeData simple = createTypeData(SimpleModel.class); + TypeData list = createBuilder(List.class).addTypeParameter(simple).build(); + TypeData listList = createBuilder(List.class).addTypeParameter(list).build(); + TypeData set = createBuilder(Set.class).addTypeParameter(simple).build(); + TypeData setSet = createBuilder(Set.class).addTypeParameter(set).build(); + TypeData sortedSet = createBuilder(SortedSet.class).addTypeParameter(simple).build(); + TypeData map = createBuilder(Map.class).addTypeParameter(string).addTypeParameter(simple).build(); + TypeData listMap = createBuilder(List.class).addTypeParameter(map).build(); + TypeData mapMap = createBuilder(Map.class).addTypeParameter(string).addTypeParameter(map).build(); + TypeData mapList = createBuilder(Map.class).addTypeParameter(string).addTypeParameter(list).build(); + TypeData mapListMap = createBuilder(Map.class).addTypeParameter(string).addTypeParameter(listMap).build(); + TypeData mapSet = createBuilder(Map.class).addTypeParameter(string).addTypeParameter(set).build(); + TypeData listMapList = createBuilder(List.class).addTypeParameter(mapList).build(); + TypeData listMapSet = createBuilder(List.class).addTypeParameter(mapSet).build(); + + ClassModel classModel = ClassModel.builder(CollectionNestedPojoModel.class).build(); + + assertEquals(13, classModel.getPropertyModels().size()); + assertEquals(classModel.getPropertyModel("listSimple").getTypeData(), list); + assertEquals(classModel.getPropertyModel("listListSimple").getTypeData(), listList); + + assertEquals(classModel.getPropertyModel("setSimple").getTypeData(), set); + assertEquals(classModel.getPropertyModel("setSetSimple").getTypeData(), setSet); + + assertEquals(classModel.getPropertyModel("sortedSetSimple").getTypeData(), sortedSet); + + assertEquals(classModel.getPropertyModel("mapSimple").getTypeData(), map); + assertEquals(classModel.getPropertyModel("mapMapSimple").getTypeData(), mapMap); + + assertEquals(classModel.getPropertyModel("mapListSimple").getTypeData(), mapList); + assertEquals(classModel.getPropertyModel("mapListMapSimple").getTypeData(), mapListMap); + assertEquals(classModel.getPropertyModel("mapSetSimple").getTypeData(), mapSet); + + assertEquals(classModel.getPropertyModel("listMapSimple").getTypeData(), listMap); + assertEquals(classModel.getPropertyModel("listMapListSimple").getTypeData(), listMapList); + assertEquals(classModel.getPropertyModel("listMapSetSimple").getTypeData(), listMapSet); + } + + @Test + public void testWildcardModel() { + ClassModel classModel = ClassModel.builder(ConcreteAndNestedAbstractInterfaceModel.class).build(); + + assertEquals(3, classModel.getPropertyModels().size()); + assertEquals(classModel.getPropertyModel("name").getTypeData(), createTypeData(String.class)); + assertEquals(classModel.getPropertyModel("child").getTypeData(), createTypeData(InterfaceBasedModel.class)); + assertEquals(classModel.getPropertyModel("wildcardList").getTypeData(), createTypeData(List.class, InterfaceBasedModel.class)); + } + + @Test + public void testPropertySelection() { + ClassModel classModel = ClassModel.builder(PropertySelectionModel.class).build(); + + assertEquals(2, classModel.getPropertyModels().size()); + assertEquals(classModel.getPropertyModel("stringField").getTypeData(), createTypeData(String.class)); + assertEquals(classModel.getPropertyModel("finalStringField").getTypeData(), createTypeData(String.class)); + } + + @Test + public void testMappingConcreteGenericTypes() { + ClassModel classModel = ClassModel.builder(NestedGenericHolderMapModel.class).build(); + + assertEquals(1, classModel.getPropertyModels().size()); + assertEquals(classModel.getPropertyModels().get(0).getTypeData(), createBuilder(GenericHolderModel.class) + .addTypeParameter(createTypeData(Map.class, String.class, SimpleModel.class)).build()); + } + + @Test + public void testAnnotationModel() { + ClassModel classModel = ClassModel.builder(AnnotationModel.class).build(); + PropertyModel propertyModel = classModel.getIdPropertyModel(); + + assertEquals("AnnotationModel", classModel.getName()); + assertEquals(AnnotationModel.class, classModel.getType()); + assertTrue(classModel.useDiscriminator()); + assertEquals("_cls", classModel.getDiscriminatorKey()); + assertEquals("MyAnnotationModel", classModel.getDiscriminator()); + + assertEquals(propertyModel, classModel.getPropertyModel("customId")); + assertTrue(classModel.getInstanceCreatorFactory() instanceof InstanceCreatorFactoryImpl); + + assertEquals(3, classModel.getPropertyModels().size()); + assertEquals(createTypeData(String.class), classModel.getPropertyModel("customId").getTypeData()); + assertEquals(createTypeData(AnnotationModel.class), classModel.getPropertyModel("child").getTypeData()); + assertEquals(createTypeData(AnnotationModel.class), classModel.getPropertyModel("alternative").getTypeData()); + } + + @Test + public void testInheritedClassAnnotations() { + ClassModel classModel = ClassModel.builder(AnnotationInheritedModel.class).build(); + assertTrue(classModel.useDiscriminator()); + assertEquals("_cls", classModel.getDiscriminatorKey()); + assertEquals("org.bson.codecs.pojo.entities.conventions.AnnotationInheritedModel", classModel.getDiscriminator()); + + assertEquals(2, classModel.getPropertyModels().size()); + assertEquals(createTypeData(String.class), classModel.getPropertyModel("customId").getTypeData()); + assertEquals(createTypeData(AnnotationModel.class), classModel.getPropertyModel("child").getTypeData()); + + PropertyModel propertyModel = classModel.getPropertyModel("customId"); + assertEquals(propertyModel, classModel.getIdPropertyModel()); + + propertyModel = classModel.getPropertyModel("child"); + assertTrue(propertyModel.useDiscriminator()); + } + + @Test + public void testOverridePropertyWithSubclass() { + ClassModel classModel = ClassModel.builder(ShapeHolderModel.class).build(); + assertEquals(1, classModel.getPropertyModels().size()); + assertEquals(createTypeData(ShapeModelAbstract.class), classModel.getPropertyModel("shape").getTypeData()); + + ClassModel overriddenClassModel = ClassModel.builder(ShapeHolderCircleModel.class).build(); + assertEquals(1, overriddenClassModel.getPropertyModels().size()); + assertEquals(createTypeData(ShapeModelCircle.class), overriddenClassModel.getPropertyModel("shape").getTypeData()); + } + + @Test + public void testListGenericExtendedModel() { + ClassModel classModel = ClassModel.builder(ListGenericExtendedModel.class).build(); + + assertEquals(1, classModel.getPropertyModels().size()); + assertEquals(createTypeData(List.class, Integer.class), classModel.getPropertyModel("values").getTypeData()); + } + + @Test + public void testListListGenericExtendedModel() { + ClassModel classModel = ClassModel.builder(ListListGenericExtendedModel.class).build(); + + assertEquals(1, classModel.getPropertyModels().size()); + assertEquals(createBuilder(List.class).addTypeParameter(createTypeData(List.class, Integer.class)).build(), classModel.getPropertyModel("values").getTypeData()); + } + + @Test + public void testMapGenericExtendedModel() { + ClassModel classModel = ClassModel.builder(MapGenericExtendedModel.class).build(); + + assertEquals(1, classModel.getPropertyModels().size()); + assertEquals(createTypeData(Map.class, String.class, Integer.class), classModel.getPropertyModel("values").getTypeData()); + } + + @Test + public void testMapMapGenericExtendedModel() { + ClassModel classModel = ClassModel.builder(MapMapGenericExtendedModel.class).build(); + + assertEquals(1, classModel.getPropertyModels().size()); + assertEquals(createBuilder(Map.class).addTypeParameter(createTypeData(String.class)) + .addTypeParameter(createTypeData(Map.class, String.class, Integer.class)).build(), classModel.getPropertyModel("values").getTypeData()); + } + + @Test + public void testListMapGenericExtendedModel() { + ClassModel classModel = ClassModel.builder(ListMapGenericExtendedModel.class).build(); + + assertEquals(1, classModel.getPropertyModels().size()); + assertEquals(createBuilder(List.class).addTypeParameter(createTypeData(Map.class, String.class, Integer.class)).build(), classModel.getPropertyModel("values").getTypeData()); + } + + + @Test + public void testMapListGenericExtendedModel() { + ClassModel classModel = ClassModel.builder(MapListGenericExtendedModel.class).build(); + + assertEquals(1, classModel.getPropertyModels().size()); + assertEquals(createBuilder(Map.class) + .addTypeParameter(createTypeData(String.class)) + .addTypeParameter(createTypeData(List.class, Integer.class)).build(), classModel.getPropertyModel("values").getTypeData()); + } + + + @Test + public void testMultipleBoundsModel() { + ClassModel classModel = ClassModel.builder(MultipleBoundsModel.class).build(); + + assertEquals(3, classModel.getPropertyModels().size()); + + assertEquals(createTypeData(Double.class), classModel.getPropertyModel("level1").getTypeData()); + assertEquals(createTypeData(List.class, Integer.class), classModel.getPropertyModel("level2").getTypeData()); + assertEquals(createTypeData(Map.class, String.class, String.class), classModel.getPropertyModel("level3").getTypeData()); + } + + @Test + public void testSimpleWithStaticModel() { + ClassModel classModel = ClassModel.builder(SimpleWithStaticModel.class).build(); + + assertEquals(2, classModel.getPropertyModels().size()); + assertEquals(createTypeData(Integer.class), classModel.getPropertyModel("integerField").getTypeData()); + assertEquals(createTypeData(String.class), classModel.getPropertyModel("stringField").getTypeData()); + + } + + TypeData.Builder createBuilder(final Class clazz, final Class... types) { + TypeData.Builder builder = TypeData.builder(clazz); + List> subTypes = new ArrayList<>(); + for (final Class type : types) { + subTypes.add(TypeData.builder(type).build()); + } + builder.addTypeParameters(subTypes); + return builder; + } + + TypeData createTypeData(final Class clazz, final Class... types) { + return createBuilder(clazz, types).build(); + } + +} diff --git a/bson/src/test/unit/org/bson/codecs/pojo/ConventionsTest.java b/bson/src/test/unit/org/bson/codecs/pojo/ConventionsTest.java new file mode 100644 index 00000000000..6554ab318ec --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/pojo/ConventionsTest.java @@ -0,0 +1,268 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo; + +import org.bson.BsonType; +import org.bson.codecs.configuration.CodecConfigurationException; +import org.bson.codecs.pojo.entities.BsonIdModel; +import org.bson.codecs.pojo.entities.ConventionModel; +import org.bson.codecs.pojo.entities.SimpleModel; +import org.bson.codecs.pojo.entities.conventions.AnnotationBsonPropertyIdModel; +import org.bson.codecs.pojo.entities.conventions.AnnotationBsonRepresentation; +import org.bson.codecs.pojo.entities.conventions.AnnotationCollision; +import org.bson.codecs.pojo.entities.conventions.AnnotationDefaultsModel; +import org.bson.codecs.pojo.entities.conventions.AnnotationNameCollision; +import org.bson.codecs.pojo.entities.conventions.AnnotationWithObjectIdModel; +import org.bson.codecs.pojo.entities.conventions.AnnotationWriteCollision; +import org.bson.codecs.pojo.entities.conventions.BsonIgnoreDuplicatePropertyMultipleTypes; +import org.bson.codecs.pojo.entities.conventions.CreatorConstructorNoKnownIdModel; +import org.bson.codecs.pojo.entities.conventions.CreatorInvalidConstructorModel; +import org.bson.codecs.pojo.entities.conventions.CreatorInvalidMethodModel; +import org.bson.codecs.pojo.entities.conventions.CreatorInvalidMethodReturnTypeModel; +import org.bson.codecs.pojo.entities.conventions.CreatorInvalidMultipleConstructorsModel; +import org.bson.codecs.pojo.entities.conventions.CreatorInvalidMultipleCreatorsModel; +import org.bson.codecs.pojo.entities.conventions.CreatorInvalidMultipleStaticCreatorsModel; +import org.bson.codecs.pojo.entities.conventions.CreatorInvalidTypeConstructorModel; +import org.bson.codecs.pojo.entities.conventions.CreatorInvalidTypeMethodModel; +import org.junit.jupiter.api.Test; + +import static java.util.Collections.singletonList; +import static org.bson.codecs.pojo.Conventions.ANNOTATION_CONVENTION; +import static org.bson.codecs.pojo.Conventions.CLASS_AND_PROPERTY_CONVENTION; +import static org.bson.codecs.pojo.Conventions.DEFAULT_CONVENTIONS; +import static org.bson.codecs.pojo.Conventions.NO_CONVENTIONS; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; + +public final class ConventionsTest { + + @Test + public void testDefaultConventions() { + ClassModel classModel = ClassModel.builder(AnnotationWithObjectIdModel.class) + .conventions(DEFAULT_CONVENTIONS).build(); + + assertTrue(classModel.useDiscriminator()); + assertEquals("_cls", classModel.getDiscriminatorKey()); + assertEquals("MyAnnotationModel", classModel.getDiscriminator()); + + assertEquals(3, classModel.getPropertyModels().size()); + PropertyModel idPropertyModel = classModel.getIdPropertyModel(); + assertNotNull(idPropertyModel); + assertEquals("customId", idPropertyModel.getName()); + assertEquals("_id", idPropertyModel.getWriteName()); + assertEquals(classModel.getIdPropertyModelHolder().getIdGenerator(), IdGenerators.OBJECT_ID_GENERATOR); + + PropertyModel childPropertyModel = classModel.getPropertyModel("child"); + assertNotNull(childPropertyModel); + assertFalse(childPropertyModel.useDiscriminator()); + + PropertyModel renamedPropertyModel = classModel.getPropertyModel("alternative"); + assertEquals("renamed", renamedPropertyModel.getReadName()); + assertEquals("renamed", renamedPropertyModel.getWriteName()); + } + + @Test + public void testAnnotationDefaults() { + ClassModel classModel = ClassModel.builder(AnnotationDefaultsModel.class) + .conventions(singletonList(ANNOTATION_CONVENTION)).build(); + + assertTrue(classModel.useDiscriminator()); + assertEquals("_t", classModel.getDiscriminatorKey()); + assertEquals("AnnotationDefaultsModel", classModel.getDiscriminator()); + + assertEquals(2, classModel.getPropertyModels().size()); + PropertyModel idPropertyModel = classModel.getIdPropertyModel(); + assertNotNull(idPropertyModel); + assertEquals("customId", idPropertyModel.getName()); + assertEquals("_id", idPropertyModel.getWriteName()); + + PropertyModel childPropertyModel = classModel.getPropertyModel("child"); + assertNotNull(childPropertyModel); + assertFalse(childPropertyModel.useDiscriminator()); + } + + @Test + public void testBsonPropertyIdModelModel() { + ClassModel classModel = ClassModel.builder(AnnotationBsonPropertyIdModel.class) + .conventions(DEFAULT_CONVENTIONS).build(); + + assertFalse(classModel.useDiscriminator()); + assertEquals(1, classModel.getPropertyModels().size()); + assertNull(classModel.getIdPropertyModel()); + } + + @Test + public void testBsonRepresentation() { + ClassModel classModel = ClassModel.builder(AnnotationBsonRepresentation.class).build(); + assertEquals(classModel.getPropertyModel("id").getBsonRepresentation(), BsonType.OBJECT_ID); + assertEquals(classModel.getPropertyModel("parentId").getBsonRepresentation(), BsonType.OBJECT_ID); + assertNull(classModel.getPropertyModel("friendId").getBsonRepresentation()); + assertNull(classModel.getPropertyModel("age").getBsonRepresentation()); + } + + @Test + public void testIdGeneratorChoice() { + ClassModel stringIdObjectRep = ClassModel.builder(AnnotationBsonRepresentation.class).build(); + assertEquals(stringIdObjectRep.getIdPropertyModelHolder().getIdGenerator(), IdGenerators.STRING_ID_GENERATOR); + + ClassModel stringIdStringRep = ClassModel.builder(ConventionModel.class).build(); + assertNull(stringIdStringRep.getIdPropertyModelHolder().getIdGenerator()); + + ClassModel bsonId = ClassModel.builder(BsonIdModel.class).build(); + assertEquals(bsonId.getIdPropertyModelHolder().getIdGenerator(), IdGenerators.BSON_OBJECT_ID_GENERATOR); + } + + + @Test + @SuppressWarnings("unchecked") + public void testClassAndFieldConventionDoesNotOverwrite() { + ClassModelBuilder builder = ClassModel.builder(SimpleModel.class) + .enableDiscriminator(true) + .discriminatorKey("_cls") + .discriminator("Simples") + .conventions(singletonList(CLASS_AND_PROPERTY_CONVENTION)) + .instanceCreatorFactory(() -> null); + + PropertyModelBuilder propertyModelBuilder = (PropertyModelBuilder) builder.getProperty("integerField"); + propertyModelBuilder.writeName("id") + .propertySerialization(new PropertyModelSerializationImpl<>()) + .propertyAccessor(new PropertyAccessorTest<>()); + + PropertyModelBuilder propertyModelBuilder2 = (PropertyModelBuilder) builder.getProperty("stringField"); + propertyModelBuilder2.writeName("_id") + .propertySerialization(new PropertyModelSerializationImpl<>()) + .propertyAccessor(new PropertyAccessorTest<>()); + + ClassModel classModel = builder.idPropertyName("stringField").build(); + + assertTrue(classModel.useDiscriminator()); + assertEquals("_cls", classModel.getDiscriminatorKey()); + assertEquals("Simples", classModel.getDiscriminator()); + + assertEquals(2, classModel.getPropertyModels().size()); + PropertyModel idPropertyModel = classModel.getIdPropertyModel(); + assertEquals("stringField", idPropertyModel.getName()); + assertEquals("_id", idPropertyModel.getWriteName()); + assertNull(idPropertyModel.useDiscriminator()); + } + + @Test + public void testAnnotationCollision() { + assertThrows(CodecConfigurationException.class, () -> + ClassModel.builder(AnnotationCollision.class).conventions(DEFAULT_CONVENTIONS).build()); + } + + @Test + public void testAnnotationWriteCollision() { + assertThrows(CodecConfigurationException.class, () -> + ClassModel.builder(AnnotationWriteCollision.class).conventions(DEFAULT_CONVENTIONS).build()); + } + + @Test + public void testAnnotationNameCollision() { + assertThrows(CodecConfigurationException.class, () -> + ClassModel.builder(AnnotationNameCollision.class) + .conventions(singletonList(ANNOTATION_CONVENTION)).build()); + } + + @Test + public void testCreatorInvalidConstructorModel() { + assertThrows(CodecConfigurationException.class, () -> + ClassModel.builder(CreatorInvalidConstructorModel.class) + .conventions(singletonList(ANNOTATION_CONVENTION)).build()); + } + + @Test + public void testCreatorInvalidMethodModel() { + assertThrows(CodecConfigurationException.class, () -> + ClassModel.builder(CreatorInvalidMethodModel.class) + .conventions(singletonList(ANNOTATION_CONVENTION)).build()); + } + + @Test + public void testCreatorInvalidMultipleConstructorsModel() { + assertThrows(CodecConfigurationException.class, () -> + ClassModel.builder(CreatorInvalidMultipleConstructorsModel.class) + .conventions(singletonList(ANNOTATION_CONVENTION)).build()); + } + + @Test + public void testCreatorInvalidMultipleCreatorsModel() { + assertThrows(CodecConfigurationException.class, () -> + ClassModel.builder(CreatorInvalidMultipleCreatorsModel.class) + .conventions(singletonList(ANNOTATION_CONVENTION)).build()); + } + + @Test + public void testCreatorInvalidMultipleStaticCreatorsModel() { + assertThrows(CodecConfigurationException.class, () -> + ClassModel.builder(CreatorInvalidMultipleStaticCreatorsModel.class) + .conventions(singletonList(ANNOTATION_CONVENTION)).build()); + } + + @Test + public void testCreatorInvalidMethodReturnTypeModel() { + assertThrows(CodecConfigurationException.class, () -> + ClassModel.builder(CreatorInvalidMethodReturnTypeModel.class) + .conventions(singletonList(ANNOTATION_CONVENTION)).build()); + } + + @Test + public void testCreatorInvalidTypeConstructorModel() { + assertThrows(CodecConfigurationException.class, () -> + ClassModel.builder(CreatorInvalidTypeConstructorModel.class) + .conventions(singletonList(ANNOTATION_CONVENTION)).build()); + } + + @Test + public void testCreatorInvalidTypeMethodModel() { + assertThrows(CodecConfigurationException.class, () -> + ClassModel.builder(CreatorInvalidTypeMethodModel.class) + .conventions(singletonList(ANNOTATION_CONVENTION)).build()); + } + + @Test + public void testCreatorConstructorNoKnownIdModel() { + assertThrows(CodecConfigurationException.class, () -> + ClassModel.builder(CreatorConstructorNoKnownIdModel.class) + .conventions(singletonList(ANNOTATION_CONVENTION)).build()); + } + + @Test + public void testBsonIgnoreDuplicatePropertyMultipleTypesModel() { + assertThrows(CodecConfigurationException.class, () -> + ClassModel.builder(BsonIgnoreDuplicatePropertyMultipleTypes.class) + .conventions(NO_CONVENTIONS).build()); + } + + private class PropertyAccessorTest implements PropertyAccessor { + + @Override + public T get(final S instance) { + return null; + } + + @Override + public void set(final S instance, final T value) { + + } + } +} diff --git a/bson/src/test/unit/org/bson/codecs/pojo/IdGeneratorsTest.java b/bson/src/test/unit/org/bson/codecs/pojo/IdGeneratorsTest.java new file mode 100644 index 00000000000..fe812ba8fe9 --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/pojo/IdGeneratorsTest.java @@ -0,0 +1,51 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo; + +import org.bson.BsonObjectId; +import org.bson.types.ObjectId; +import org.junit.jupiter.api.Test; + +import static org.junit.jupiter.api.Assertions.assertEquals; + +public class IdGeneratorsTest { + + @Test + public void testObjectIdGenerator() { + IdGenerator idGenerator = IdGenerators.OBJECT_ID_GENERATOR; + + assertEquals(ObjectId.class, idGenerator.getType()); + assertEquals(ObjectId.class, idGenerator.generate().getClass()); + } + + @Test + public void testBsonObjectIdGenerator() { + IdGenerator idGenerator = IdGenerators.BSON_OBJECT_ID_GENERATOR; + + assertEquals(BsonObjectId.class, idGenerator.getType()); + assertEquals(BsonObjectId.class, idGenerator.generate().getClass()); + } + + @Test + public void testStringIdGenerator() { + IdGenerator idGenerator = IdGenerators.STRING_ID_GENERATOR; + + assertEquals(String.class, idGenerator.getType()); + assertEquals(String.class, idGenerator.generate().getClass()); + } + +} diff --git a/bson/src/test/unit/org/bson/codecs/pojo/PojoCodecCyclicalLookupTest.java b/bson/src/test/unit/org/bson/codecs/pojo/PojoCodecCyclicalLookupTest.java new file mode 100644 index 00000000000..161a54fd902 --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/pojo/PojoCodecCyclicalLookupTest.java @@ -0,0 +1,166 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.bson.codecs.pojo; + +import org.bson.codecs.BsonValueCodecProvider; +import org.bson.codecs.Codec; +import org.bson.codecs.ValueCodecProvider; +import org.bson.codecs.configuration.CodecProvider; +import org.bson.codecs.configuration.CodecRegistry; +import org.bson.codecs.pojo.entities.ConventionModel; +import org.bson.codecs.pojo.entities.GenericHolderModel; +import org.bson.codecs.pojo.entities.GenericTreeModel; +import org.bson.codecs.pojo.entities.ListListGenericExtendedModel; +import org.bson.codecs.pojo.entities.NestedGenericHolderFieldWithMultipleTypeParamsModel; +import org.bson.codecs.pojo.entities.NestedGenericTreeModel; +import org.bson.codecs.pojo.entities.PropertyWithMultipleTypeParamsModel; +import org.bson.codecs.pojo.entities.SimpleGenericsModel; +import org.bson.codecs.pojo.entities.SimpleModel; +import org.junit.jupiter.api.Test; + +import java.util.List; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.atomic.AtomicInteger; + +import static java.util.Arrays.asList; +import static org.junit.jupiter.api.Assertions.assertEquals; + +public class PojoCodecCyclicalLookupTest extends PojoTestCase { + + @Test + void testSimpleModel() { + SimpleModel model = getSimpleModel(); + LookupCountingCodecRegistry registry = createRegistry(SimpleModel.class); + roundTrip(registry, model, SIMPLE_MODEL_JSON); + + assertEquals(2, registry.counters.get(SimpleModel.class).get()); // Looked up in encodesTo & decodesTo + assertEquals(1, registry.counters.get(String.class).get()); // Lookup on encode then cached (PropertyCodecRegistry) + assertEquals(1, registry.counters.get(Integer.class).get()); // Lookup on encode then cached (PropertyCodecRegistry) + } + + @Test + void testConventionModel() { + ConventionModel model = getConventionModel(); + String json = "{'_id': 'id', '_cls': 'AnnotatedConventionModel', 'myFinalField': 10, 'myIntField': 10," + + "'child': {'_id': 'child', 'myFinalField': 10, 'myIntField': 10," + + "'model': {'integerField': 42, 'stringField': 'myString'}}}"; + LookupCountingCodecRegistry registry = createRegistry(ConventionModel.class, SimpleModel.class); + roundTrip(registry, model, json); + + assertEquals(2, registry.counters.get(ConventionModel.class).get()); // Looked up in encodesTo & decodesTo + assertEquals(1, registry.counters.get(SimpleModel.class).get()); // Lookup on encode then cached (PropertyCodecRegistry) + assertEquals(2, registry.counters.get(String.class).get()); // Once for ConventionModel & once for SimpleModel + assertEquals(2, registry.counters.get(Integer.class).get()); // Once for ConventionModel & once for SimpleModel + } + + @Test + void testNestedGenericTreeModel() { + NestedGenericTreeModel model = new NestedGenericTreeModel(42, getGenericTreeModel()); + String json = "{'intField': 42, 'nested': {'field1': 'top', 'field2': 1, " + + "'left': {'field1': 'left', 'field2': 2, 'left': {'field1': 'left', 'field2': 3}}, " + + "'right': {'field1': 'right', 'field2': 4, 'left': {'field1': 'left', 'field2': 5}}}}"; + LookupCountingCodecRegistry registry = createRegistry(NestedGenericTreeModel.class, GenericTreeModel.class); + roundTrip(registry, model, json); + + assertEquals(2, registry.counters.get(NestedGenericTreeModel.class).get()); + assertEquals(1, registry.counters.get(GenericTreeModel.class).get()); + assertEquals(1, registry.counters.get(String.class).get()); + assertEquals(1, registry.counters.get(Integer.class).get()); + } + + @Test + void testNestedGenericHolderFieldWithMultipleTypeParamsModel() { + NestedGenericHolderFieldWithMultipleTypeParamsModel model = getNestedGenericHolderFieldWithMultipleTypeParamsModel(); + LookupCountingCodecRegistry registry = createRegistry(NestedGenericHolderFieldWithMultipleTypeParamsModel.class, + PropertyWithMultipleTypeParamsModel.class, SimpleGenericsModel.class, GenericHolderModel.class); + String json = "{'nested': {'myGenericField': {_t: 'PropertyWithMultipleTypeParamsModel', " + + "'simpleGenericsModel': {_t: 'org.bson.codecs.pojo.entities.SimpleGenericsModel', 'myIntegerField': 42, " + + "'myGenericField': {'$numberLong': '101'}, 'myListField': ['B', 'C'], 'myMapField': {'D': 2, 'E': 3, 'F': 4 }}}," + + "'myLongField': {'$numberLong': '42'}}}"; + + + roundTrip(registry, model, json); + + assertEquals(2, registry.counters.get(NestedGenericHolderFieldWithMultipleTypeParamsModel.class).get()); + assertEquals(1, registry.counters.get(PropertyWithMultipleTypeParamsModel.class).get()); + assertEquals(1, registry.counters.get(SimpleGenericsModel.class).get()); + assertEquals(1, registry.counters.get(GenericHolderModel.class).get()); + assertEquals(1, registry.counters.get(Long.class).get()); + assertEquals(1, registry.counters.get(String.class).get()); + assertEquals(1, registry.counters.get(Integer.class).get()); + } + + @Test + void testListListGenericExtendedModel() { + ListListGenericExtendedModel model = new ListListGenericExtendedModel(asList(asList(1, 2, 3), asList(4, 5, 6))); + LookupCountingCodecRegistry registry = createRegistry(ListListGenericExtendedModel .class); + String json = "{values: [[1, 2, 3], [4, 5, 6]]}"; + roundTrip(registry, model, json); + + assertEquals(2, registry.counters.get(ListListGenericExtendedModel.class).get()); + assertEquals(1, registry.counters.get(Integer.class).get()); + } + + + LookupCountingCodecRegistry createRegistry(final Class... classes) { + return new LookupCountingCodecRegistry( + new BsonValueCodecProvider(), + new ValueCodecProvider(), + getPojoCodecProviderBuilder(classes).build() + ); + } + + + static class LookupCountingCodecRegistry implements CodecRegistry { + + private final ConcurrentHashMap, AtomicInteger> counters; + private final List codecProviders; + + LookupCountingCodecRegistry(final CodecProvider... providers) { + this.codecProviders = asList(providers); + this.counters = new ConcurrentHashMap<>(); + } + + @Override + public Codec get(final Class clazz) { + incrementCount(clazz); + for (CodecProvider provider : codecProviders) { + Codec codec = provider.get(clazz, this); + if (codec != null) { + return codec; + } + } + return null; + } + + public Codec get(final Class clazz, final CodecRegistry registry) { + incrementCount(clazz); + for (CodecProvider provider : codecProviders) { + Codec codec = provider.get(clazz, registry); + if (codec != null) { + return codec; + } + } + return null; + } + + private synchronized void incrementCount(final Class clazz) { + AtomicInteger atomicInteger = counters.computeIfAbsent(clazz, k -> new AtomicInteger()); + atomicInteger.incrementAndGet(); + } + } + +} diff --git a/bson/src/test/unit/org/bson/codecs/pojo/PojoCodecDiscriminatorTest.java b/bson/src/test/unit/org/bson/codecs/pojo/PojoCodecDiscriminatorTest.java new file mode 100644 index 00000000000..b95e7bcefda --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/pojo/PojoCodecDiscriminatorTest.java @@ -0,0 +1,82 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo; + +import org.bson.codecs.pojo.entities.DiscriminatorModel; +import org.bson.codecs.pojo.entities.DiscriminatorWithGetterModel; +import org.bson.codecs.pojo.entities.DiscriminatorWithProperty; +import org.bson.codecs.pojo.entities.DiscriminatorWithPropertyAndIgnore; +import org.junit.jupiter.api.Test; + +import static org.junit.jupiter.api.Assertions.assertArrayEquals; + +public final class PojoCodecDiscriminatorTest extends PojoTestCase { + + @Test + public void testDiscriminatorEncodedOnceWhenItIsAlsoAGetter() { + byte[] encodedDiscriminatorModel = encode( + getCodec(DiscriminatorModel.class), + new DiscriminatorModel(), + false + ).toByteArray(); + byte[] encodedDiscriminatorWithGetter = encode( + getCodec(DiscriminatorWithGetterModel.class), + new DiscriminatorWithGetterModel(), + false + ).toByteArray(); + assertArrayEquals(encodedDiscriminatorModel, encodedDiscriminatorWithGetter); + } + + @Test + public void testDiscriminatorRoundTripWhenItIsAlsoAGetter() { + roundTrip( + new DiscriminatorWithGetterModel(), + "{discriminatorKey:'discriminatorValue'}" + ); + } + + @Test + public void testDiscriminatorEncodedOnceWhenItIsAlsoAProperty() { + byte[] encodedDiscriminatorModel = encode( + getCodec(DiscriminatorModel.class), + new DiscriminatorModel(), + false + ).toByteArray(); + byte[] encodedDiscriminatorWithProperty = encode( + getCodec(DiscriminatorWithProperty.class), + new DiscriminatorWithProperty(), + false + ).toByteArray(); + assertArrayEquals(encodedDiscriminatorModel, encodedDiscriminatorWithProperty); + } + + @Test + public void testDiscriminatorRoundTripWhenItIsAlsoAProperty() { + roundTrip( + new DiscriminatorWithProperty(), + "{discriminatorKey:'discriminatorValue'}" + ); + } + + @Test + public void testDiscriminatorRoundTripWhenItIsAlsoAPropertyWithIgnoredProperty() { + roundTrip( + new DiscriminatorWithPropertyAndIgnore(), + "{discriminatorKey:'discriminatorValue'}" + ); + } +} diff --git a/bson/src/test/unit/org/bson/codecs/pojo/PojoCodecProviderTest.java b/bson/src/test/unit/org/bson/codecs/pojo/PojoCodecProviderTest.java new file mode 100644 index 00000000000..1921e161854 --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/pojo/PojoCodecProviderTest.java @@ -0,0 +1,72 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo; + +import org.bson.codecs.Codec; +import org.bson.codecs.ValueCodecProvider; +import org.bson.codecs.configuration.CodecRegistry; +import org.bson.codecs.pojo.entities.SimpleModel; +import org.bson.codecs.pojo.entities.conventions.CreatorInvalidMethodModel; +import org.junit.jupiter.api.Test; + +import static org.bson.codecs.configuration.CodecRegistries.fromProviders; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertNull; + +public final class PojoCodecProviderTest extends PojoTestCase { + + @Test + public void testClassNotFound() { + PojoCodecProvider provider = PojoCodecProvider.builder().build(); + CodecRegistry registry = fromProviders(provider, new ValueCodecProvider()); + Codec codec = provider.get(SimpleModel.class, registry); + assertNull(codec); + } + + @Test + public void testPackageLessClasses() { + PojoCodecProvider provider = PojoCodecProvider.builder().build(); + CodecRegistry registry = fromProviders(provider, new ValueCodecProvider()); + Codec codec = provider.get(byte.class, registry); + assertNull(codec); + } + + @Test + public void testAutomatic() { + PojoCodecProvider provider = PojoCodecProvider.builder().automatic(true).build(); + CodecRegistry registry = fromProviders(provider, new ValueCodecProvider()); + Codec codec = provider.get(SimpleModel.class, registry); + assertNotNull(codec); + } + + @Test + public void testAutomaticNoProperty() { + PojoCodecProvider provider = PojoCodecProvider.builder().automatic(true).build(); + CodecRegistry registry = fromProviders(provider); + Codec codec = provider.get(Integer.class, registry); + assertNull(codec); + } + + @Test + public void testAutomaticInvalidModel() { + PojoCodecProvider provider = PojoCodecProvider.builder().automatic(true).build(); + CodecRegistry registry = fromProviders(provider, new ValueCodecProvider()); + Codec codec = provider.get(CreatorInvalidMethodModel.class, registry); + assertNull(codec); + } + +} diff --git a/bson/src/test/unit/org/bson/codecs/pojo/PojoCustomTest.java b/bson/src/test/unit/org/bson/codecs/pojo/PojoCustomTest.java new file mode 100644 index 00000000000..7b38e16ef2e --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/pojo/PojoCustomTest.java @@ -0,0 +1,723 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo; + +import org.bson.BsonReader; +import org.bson.BsonWriter; +import org.bson.Document; +import org.bson.codecs.BsonValueCodecProvider; +import org.bson.codecs.Codec; +import org.bson.codecs.DecoderContext; +import org.bson.codecs.EncoderContext; +import org.bson.codecs.IterableCodecProvider; +import org.bson.codecs.LongCodec; +import org.bson.codecs.MapCodecProvider; +import org.bson.codecs.SimpleEnum; +import org.bson.codecs.ValueCodecProvider; +import org.bson.codecs.configuration.CodecConfigurationException; +import org.bson.codecs.configuration.CodecRegistry; +import org.bson.codecs.pojo.entities.AbstractInterfaceModel; +import org.bson.codecs.pojo.entities.AsymmetricalCreatorModel; +import org.bson.codecs.pojo.entities.AsymmetricalIgnoreModel; +import org.bson.codecs.pojo.entities.AsymmetricalModel; +import org.bson.codecs.pojo.entities.BsonRepresentationUnsupportedInt; +import org.bson.codecs.pojo.entities.BsonRepresentationUnsupportedString; +import org.bson.codecs.pojo.entities.ComposeInterfaceModel; +import org.bson.codecs.pojo.entities.ConcreteAndNestedAbstractInterfaceModel; +import org.bson.codecs.pojo.entities.ConcreteCollectionsModel; +import org.bson.codecs.pojo.entities.ConcreteModel; +import org.bson.codecs.pojo.entities.ConcreteField; +import org.bson.codecs.pojo.entities.ConcreteStandAloneAbstractInterfaceModel; +import org.bson.codecs.pojo.entities.ConstructorNotPublicModel; +import org.bson.codecs.pojo.entities.ConventionModel; +import org.bson.codecs.pojo.entities.ConverterModel; +import org.bson.codecs.pojo.entities.CustomPropertyCodecOptionalModel; +import org.bson.codecs.pojo.entities.GenericBaseModel; +import org.bson.codecs.pojo.entities.GenericHolderModel; +import org.bson.codecs.pojo.entities.GenericTreeModel; +import org.bson.codecs.pojo.entities.InterfaceBasedModel; +import org.bson.codecs.pojo.entities.InterfaceModelB; +import org.bson.codecs.pojo.entities.InterfaceModelImpl; +import org.bson.codecs.pojo.entities.InvalidCollectionModel; +import org.bson.codecs.pojo.entities.InvalidGetterAndSetterModel; +import org.bson.codecs.pojo.entities.InvalidMapModel; +import org.bson.codecs.pojo.entities.InvalidMapPropertyCodecProvider; +import org.bson.codecs.pojo.entities.InvalidSetterArgsModel; +import org.bson.codecs.pojo.entities.MapStringObjectModel; +import org.bson.codecs.pojo.entities.NestedGenericHolderFieldWithMultipleTypeParamsModel; +import org.bson.codecs.pojo.entities.NestedSimpleIdModel; +import org.bson.codecs.pojo.entities.Optional; +import org.bson.codecs.pojo.entities.OptionalPropertyCodecProvider; +import org.bson.codecs.pojo.entities.PrimitivesModel; +import org.bson.codecs.pojo.entities.PrivateSetterFieldModel; +import org.bson.codecs.pojo.entities.PropertyWithMultipleTypeParamsModel; +import org.bson.codecs.pojo.entities.SimpleEnumModel; +import org.bson.codecs.pojo.entities.SimpleGenericsModel; +import org.bson.codecs.pojo.entities.SimpleIdImmutableModel; +import org.bson.codecs.pojo.entities.SimpleIdModel; +import org.bson.codecs.pojo.entities.SimpleModel; +import org.bson.codecs.pojo.entities.SimpleNestedPojoModel; +import org.bson.codecs.pojo.entities.UpperBoundsModel; +import org.bson.codecs.pojo.entities.conventions.AnnotationModel; +import org.bson.codecs.pojo.entities.conventions.BsonExtraElementsInvalidModel; +import org.bson.codecs.pojo.entities.conventions.BsonRepresentationModel; +import org.bson.codecs.pojo.entities.conventions.CollectionsGetterImmutableModel; +import org.bson.codecs.pojo.entities.conventions.CollectionsGetterMutableModel; +import org.bson.codecs.pojo.entities.conventions.CollectionsGetterNonEmptyModel; +import org.bson.codecs.pojo.entities.conventions.CollectionsGetterNullModel; +import org.bson.codecs.pojo.entities.conventions.CreatorConstructorPrimitivesModel; +import org.bson.codecs.pojo.entities.conventions.CreatorConstructorThrowsExceptionModel; +import org.bson.codecs.pojo.entities.conventions.CreatorMethodThrowsExceptionModel; +import org.bson.codecs.pojo.entities.conventions.InterfaceModelBInstanceCreatorConvention; +import org.bson.codecs.pojo.entities.conventions.MapGetterImmutableModel; +import org.bson.codecs.pojo.entities.conventions.MapGetterMutableModel; +import org.bson.codecs.pojo.entities.conventions.MapGetterNonEmptyModel; +import org.bson.codecs.pojo.entities.conventions.MapGetterNullModel; +import org.bson.types.ObjectId; +import org.junit.jupiter.api.Test; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import static java.lang.String.format; +import static java.util.Arrays.asList; +import static java.util.Collections.singletonList; +import static org.bson.codecs.configuration.CodecRegistries.fromCodecs; +import static org.bson.codecs.configuration.CodecRegistries.fromProviders; +import static org.bson.codecs.configuration.CodecRegistries.fromRegistries; +import static org.bson.codecs.pojo.Conventions.CLASS_AND_PROPERTY_CONVENTION; +import static org.bson.codecs.pojo.Conventions.DEFAULT_CONVENTIONS; +import static org.bson.codecs.pojo.Conventions.NO_CONVENTIONS; +import static org.bson.codecs.pojo.Conventions.SET_PRIVATE_FIELDS_CONVENTION; +import static org.bson.codecs.pojo.Conventions.USE_GETTERS_FOR_SETTERS; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; + +public final class PojoCustomTest extends PojoTestCase { + + @Test + public void testRegisterClassModelPreferredOverClass() { + ClassModel classModel = ClassModel.builder(SimpleModel.class).enableDiscriminator(true).build(); + PojoCodecProvider.Builder builder = PojoCodecProvider.builder().automatic(true).register(SimpleModel.class).register(classModel); + + roundTrip(builder, getSimpleModel(), "{_t: 'org.bson.codecs.pojo.entities.SimpleModel', 'integerField': 42," + + "'stringField': 'myString'}"); + } + + @Test + public void testPackageDiscriminator() { + AnnotationModel model = new AnnotationModel("myId", new AnnotationModel("child", null, null), + new AnnotationModel("alternative", null, null)); + + roundTrip(PojoCodecProvider.builder().register("org.bson.codecs.pojo.entities", "org.bson.codecs.pojo.entities.conventions"), model, + "{_id: 'myId', _cls: 'MyAnnotationModel', renamed: {_id: 'alternative'}, child: {_id: 'child'}}"); + } + + @Test + public void testAsymmetricalModel() { + AsymmetricalModel model = new AsymmetricalModel(42); + + encodesTo(getPojoCodecProviderBuilder(AsymmetricalModel.class), model, "{foo: 42}"); + decodesTo(getPojoCodecProviderBuilder(AsymmetricalModel.class), "{bar: 42}", model); + } + + @Test + public void testAsymmetricalCreatorModel() { + AsymmetricalCreatorModel model = new AsymmetricalCreatorModel("Foo", "Bar"); + + encodesTo(getPojoCodecProviderBuilder(AsymmetricalCreatorModel.class), model, "{baz: 'FooBar'}"); + decodesTo(getPojoCodecProviderBuilder(AsymmetricalCreatorModel.class), "{a: 'Foo', b: 'Bar'}", model); + } + + @Test + public void testAsymmetricalIgnoreModel() { + AsymmetricalIgnoreModel encode = new AsymmetricalIgnoreModel("property", "getter", "setter", "getterAndSetter"); + AsymmetricalIgnoreModel decoded = new AsymmetricalIgnoreModel(); + decoded.setGetterIgnored("getter"); + + encodesTo(getPojoCodecProviderBuilder(AsymmetricalIgnoreModel.class), encode, "{'setterIgnored': 'setter'}"); + decodesTo(getPojoCodecProviderBuilder(AsymmetricalIgnoreModel.class), + "{'propertyIgnored': 'property', 'getterIgnored': 'getter', 'setterIgnored': 'setter', " + + "'getterAndSetterIgnored': 'getterAndSetter'}", decoded); + } + + @Test + public void testConventionsEmpty() { + ClassModelBuilder classModel = ClassModel.builder(ConventionModel.class).conventions(NO_CONVENTIONS); + ClassModelBuilder nestedClassModel = ClassModel.builder(SimpleModel.class).conventions(NO_CONVENTIONS); + + roundTrip(getPojoCodecProviderBuilder(classModel, nestedClassModel), getConventionModel(), + "{'myFinalField': 10, 'myIntField': 10, 'customId': 'id'," + + "'child': {'myFinalField': 10, 'myIntField': 10, 'customId': 'child'," + + " 'simpleModel': {'integerField': 42, 'stringField': 'myString' } } }"); + } + + @Test + public void testConventionsCustom() { + List conventions = singletonList( + classModelBuilder -> { + for (PropertyModelBuilder fieldModelBuilder : classModelBuilder.getPropertyModelBuilders()) { + fieldModelBuilder.discriminatorEnabled(false); + fieldModelBuilder.readName( + fieldModelBuilder.getName() + .replaceAll("([^_A-Z])([A-Z])", "$1_$2").toLowerCase()); + fieldModelBuilder.writeName( + fieldModelBuilder.getName() + .replaceAll("([^_A-Z])([A-Z])", "$1_$2").toLowerCase()); + } + if (classModelBuilder.getProperty("customId") != null) { + classModelBuilder.idPropertyName("customId"); + } + classModelBuilder.enableDiscriminator(true); + classModelBuilder.discriminatorKey("_cls"); + classModelBuilder.discriminator(classModelBuilder.getType().getSimpleName() + .replaceAll("([^_A-Z])([A-Z])", "$1_$2").toLowerCase()); + }); + + ClassModelBuilder classModel = ClassModel.builder(ConventionModel.class).conventions(conventions); + ClassModelBuilder nestedClassModel = ClassModel.builder(SimpleModel.class).conventions(conventions); + + roundTrip(getPojoCodecProviderBuilder(classModel, nestedClassModel), getConventionModel(), + "{ '_id': 'id', '_cls': 'convention_model', 'my_final_field': 10, 'my_int_field': 10," + + "'child': { '_id': 'child', 'my_final_field': 10, 'my_int_field': 10, " + + " 'simple_model': {'integer_field': 42, 'string_field': 'myString' } } }"); + } + + @Test + public void testIdGeneratorMutable() { + SimpleIdModel simpleIdModel = new SimpleIdModel(42, "myString"); + assertNull(simpleIdModel.getId()); + ClassModelBuilder builder = ClassModel.builder(SimpleIdModel.class).idGenerator(new ObjectIdGenerator()); + + roundTrip(getPojoCodecProviderBuilder(builder), simpleIdModel, "{'integerField': 42, 'stringField': 'myString'}"); + assertNull(simpleIdModel.getId()); + + encodesTo(getPojoCodecProviderBuilder(builder), simpleIdModel, + "{'_id': {'$oid': '123412341234123412341234'}, 'integerField': 42, 'stringField': 'myString'}", true); + assertEquals(new ObjectId("123412341234123412341234"), simpleIdModel.getId()); + } + + @Test + public void testIdGeneratorImmutable() { + SimpleIdImmutableModel simpleIdModelNoId = new SimpleIdImmutableModel(42, "myString"); + SimpleIdImmutableModel simpleIdModelWithId = new SimpleIdImmutableModel(new ObjectId("123412341234123412341234"), 42, "myString"); + ClassModelBuilder builder = ClassModel.builder(SimpleIdImmutableModel.class) + .idGenerator(new ObjectIdGenerator()); + String json = "{'_id': {'$oid': '123412341234123412341234'}, 'integerField': 42, 'stringField': 'myString'}"; + + encodesTo(getPojoCodecProviderBuilder(builder), simpleIdModelNoId, json, true); + decodesTo(getPojoCodecProviderBuilder(builder), json, simpleIdModelWithId); + } + + @Test + public void testIdGeneratorNonObjectId() { + NestedSimpleIdModel nestedSimpleIdModel = new NestedSimpleIdModel(new SimpleIdModel(42, "myString")); + assertNull(nestedSimpleIdModel.getId()); + ClassModelBuilder builder = ClassModel.builder(NestedSimpleIdModel.class) + .idGenerator(new IdGenerator() { + @Override + public String generate() { + return "a"; + } + + @Override + public Class getType() { + return String.class; + } + }); + + roundTrip(getPojoCodecProviderBuilder(builder, ClassModel.builder(SimpleIdModel.class)), nestedSimpleIdModel, + "{'nestedSimpleIdModel': {'integerField': 42, 'stringField': 'myString'}}"); + assertNull(nestedSimpleIdModel.getId()); + + encodesTo(getPojoCodecProviderBuilder(builder, ClassModel.builder(SimpleIdModel.class)), nestedSimpleIdModel, + "{'_id': 'a', 'nestedSimpleIdModel': {'integerField': 42, 'stringField': 'myString'}}", true); + assertEquals("a", nestedSimpleIdModel.getId()); + } + + @Test + public void testSetPrivateFieldConvention() { + PojoCodecProvider.Builder builder = getPojoCodecProviderBuilder(PrivateSetterFieldModel.class); + ArrayList conventions = new ArrayList<>(DEFAULT_CONVENTIONS); + conventions.add(SET_PRIVATE_FIELDS_CONVENTION); + builder.conventions(conventions); + + roundTrip(builder, new PrivateSetterFieldModel(1, "2", asList("a", "b")), + "{'someMethod': 'some method', 'integerField': 1, 'stringField': '2', listField: ['a', 'b']}"); + } + + @Test + public void testUseGettersForSettersConvention() { + PojoCodecProvider.Builder builder = getPojoCodecProviderBuilder(CollectionsGetterMutableModel.class, MapGetterMutableModel.class) + .conventions(getDefaultAndUseGettersConvention()); + + roundTrip(builder, new CollectionsGetterMutableModel(asList(1, 2)), "{listField: [1, 2]}"); + roundTrip(builder, new MapGetterMutableModel(Collections.singletonMap("a", 3)), "{mapField: {a: 3}}"); + } + + @Test + public void testWithWildcardListField() { + ClassModel interfaceBasedModelClassModel = + ClassModel.builder(InterfaceBasedModel.class).enableDiscriminator(true).build(); + PojoCodecProvider.Builder builder = PojoCodecProvider.builder().automatic(true) + .register(interfaceBasedModelClassModel) + .register(AbstractInterfaceModel.class, ConcreteStandAloneAbstractInterfaceModel.class, + ConcreteAndNestedAbstractInterfaceModel.class); + + roundTrip(builder, + new ConcreteAndNestedAbstractInterfaceModel("A", + singletonList(new ConcreteStandAloneAbstractInterfaceModel("B"))), + "{'_t': 'org.bson.codecs.pojo.entities.ConcreteAndNestedAbstractInterfaceModel', 'name': 'A', " + + " 'wildcardList': [{'_t': 'org.bson.codecs.pojo.entities.ConcreteStandAloneAbstractInterfaceModel', " + + "'name': 'B'}]}"); + } + + @Test + public void testUseGettersForSettersConventionInvalidTypeForCollection() { + PojoCodecProvider.Builder builder = getPojoCodecProviderBuilder(CollectionsGetterMutableModel.class) + .conventions(getDefaultAndUseGettersConvention()); + assertThrows(CodecConfigurationException.class, () -> + decodingShouldFail(getCodec(builder, CollectionsGetterMutableModel.class), "{listField: ['1', '2']}")); + } + + @Test + public void testUseGettersForSettersConventionInvalidTypeForMap() { + PojoCodecProvider.Builder builder = getPojoCodecProviderBuilder(MapGetterMutableModel.class) + .conventions(getDefaultAndUseGettersConvention()); + assertThrows(CodecConfigurationException.class, () -> + decodingShouldFail(getCodec(builder, MapGetterMutableModel.class), "{mapField: {a: '1'}}")); + } + + @Test + public void testUseGettersForSettersConventionImmutableCollection() { + PojoCodecProvider.Builder builder = getPojoCodecProviderBuilder(CollectionsGetterImmutableModel.class) + .conventions(getDefaultAndUseGettersConvention()); + assertThrows(CodecConfigurationException.class, () -> + roundTrip(builder, new CollectionsGetterImmutableModel(asList(1, 2)), "{listField: [1, 2]}")); + } + + @Test + public void testUseGettersForSettersConventionImmutableMap() { + PojoCodecProvider.Builder builder = getPojoCodecProviderBuilder(MapGetterImmutableModel.class) + .conventions(getDefaultAndUseGettersConvention()); + assertThrows(CodecConfigurationException.class, () -> + roundTrip(builder, new MapGetterImmutableModel(Collections.singletonMap("a", 3)), "{mapField: {a: 3}}")); + } + + @Test + public void testUseGettersForSettersConventionNullCollection() { + PojoCodecProvider.Builder builder = getPojoCodecProviderBuilder(CollectionsGetterNullModel.class) + .conventions(getDefaultAndUseGettersConvention()); + assertThrows(CodecConfigurationException.class, () -> + roundTrip(builder, new CollectionsGetterNullModel(asList(1, 2)), "{listField: [1, 2]}")); + } + + @Test + public void testUseGettersForSettersConventionNullMap() { + PojoCodecProvider.Builder builder = getPojoCodecProviderBuilder(MapGetterNullModel.class) + .conventions(getDefaultAndUseGettersConvention()); + assertThrows(CodecConfigurationException.class, () -> + roundTrip(builder, new MapGetterNullModel(Collections.singletonMap("a", 3)), "{mapField: {a: 3}}")); + } + + @Test + public void testUseGettersForSettersConventionNotEmptyCollection() { + PojoCodecProvider.Builder builder = getPojoCodecProviderBuilder(CollectionsGetterNonEmptyModel.class) + .conventions(getDefaultAndUseGettersConvention()); + assertThrows(CodecConfigurationException.class, () -> + roundTrip(builder, new CollectionsGetterNonEmptyModel(asList(1, 2)), "{listField: [1, 2]}")); + } + + @Test + public void testUseGettersForSettersConventionNotEmptyMap() { + PojoCodecProvider.Builder builder = getPojoCodecProviderBuilder(MapGetterNonEmptyModel.class) + .conventions(getDefaultAndUseGettersConvention()); + assertThrows(CodecConfigurationException.class, () -> + roundTrip(builder, new MapGetterNonEmptyModel(Collections.singletonMap("a", 3)), "{mapField: {a: 3}}")); + } + + @Test + public void testEnumSupportWithCustomCodec() { + CodecRegistry registry = fromRegistries(fromCodecs(new SimpleEnumCodec()), + getCodecRegistry(getPojoCodecProviderBuilder(SimpleEnumModel.class))); + roundTrip(registry, new SimpleEnumModel(SimpleEnum.BRAVO), "{ 'myEnum': 1 }"); + } + + @Test + public void testEnumSupportWithFallback() { + // Create a registry without EnumCodecProvider, to test the fallback in EnumPropertyCodecProvider#get + CodecRegistry registry = fromRegistries(fromProviders(new ValueCodecProvider(), + getPojoCodecProviderBuilder(SimpleEnumModel.class).build())); + roundTrip(registry, new SimpleEnumModel(SimpleEnum.BRAVO), "{ 'myEnum': 'BRAVO' }"); + } + + @Test + @SuppressWarnings("unchecked") + public void testCustomCodec() { + ObjectId id = new ObjectId(); + ConverterModel model = new ConverterModel(id.toHexString(), "myName"); + + ClassModelBuilder classModel = ClassModel.builder(ConverterModel.class); + PropertyModelBuilder idPropertyModelBuilder = (PropertyModelBuilder) classModel.getProperty("id"); + idPropertyModelBuilder.codec(new StringToObjectIdCodec()); + + roundTrip(getPojoCodecProviderBuilder(classModel), model, + format("{'_id': {'$oid': '%s'}, 'name': 'myName'}", id.toHexString())); + } + + @Test + @SuppressWarnings("unchecked") + public void testCustomPropertySerializer() { + SimpleModel model = getSimpleModel(); + model.setIntegerField(null); + ClassModelBuilder classModel = ClassModel.builder(SimpleModel.class); + ((PropertyModelBuilder) classModel.getProperty("integerField")) + .propertySerialization(value -> true); + + roundTrip(getPojoCodecProviderBuilder(classModel), model, "{'integerField': null, 'stringField': 'myString'}"); + } + + @Test + @SuppressWarnings("unchecked") + public void testCanHandleNullValuesForNestedModels() { + SimpleNestedPojoModel model = getSimpleNestedPojoModel(); + model.setSimple(null); + ClassModelBuilder classModel = ClassModel.builder(SimpleNestedPojoModel.class); + ((PropertyModelBuilder) classModel.getProperty("simple")) + .propertySerialization(value -> true); + ClassModelBuilder classModelSimple = ClassModel.builder(SimpleModel.class); + + roundTrip(getPojoCodecProviderBuilder(classModel, classModelSimple), model, "{'simple': null}"); + } + + @Test + @SuppressWarnings("unchecked") + public void testCanHandleNullValuesForCollectionsAndMaps() { + ConcreteCollectionsModel model = getConcreteCollectionsModel(); + model.setCollection(null); + model.setMap(null); + + ClassModelBuilder classModel = + ClassModel.builder(ConcreteCollectionsModel.class); + ((PropertyModelBuilder>) classModel.getProperty("collection")) + .propertySerialization(value -> true); + ((PropertyModelBuilder>) classModel.getProperty("map")) + .propertySerialization(value -> true); + + roundTrip(getPojoCodecProviderBuilder(classModel), model, + "{'collection': null, 'list': [4, 5, 6], 'linked': [7, 8, 9], 'map': null," + + "'concurrent': {'D': 4.4, 'E': 5.5, 'F': 6.6}}"); + } + + @Test + public void testCanHandleExtraData() { + decodesTo(getCodec(SimpleModel.class), "{'integerField': 42, 'stringField': 'myString', 'extraFieldA': 1, 'extraFieldB': 2}", + getSimpleModel()); + } + + @Test + public void testDataCanHandleMissingData() { + SimpleModel model = getSimpleModel(); + model.setIntegerField(null); + + decodesTo(getCodec(SimpleModel.class), "{'_t': 'SimpleModel', 'stringField': 'myString'}", model); + } + + @Test + @SuppressWarnings({"unchecked", "rawtypes"}) + public void testCanHandleTopLevelGenericIfHasCodec() { + UpperBoundsModel model = new UpperBoundsModel<>(5L); + + ClassModelBuilder classModelBuilder = ClassModel.builder(UpperBoundsModel.class); + ((PropertyModelBuilder) classModelBuilder.getProperty("myGenericField")).codec(new LongCodec()); + + roundTrip(getPojoCodecProviderBuilder(classModelBuilder), model, + "{'myGenericField': {'$numberLong': '5'}}"); + } + + @Test + public void testCustomRegisteredPropertyCodecWithValue() { + CustomPropertyCodecOptionalModel model = new CustomPropertyCodecOptionalModel(Optional.of("foo")); + roundTrip(getPojoCodecProviderBuilder(CustomPropertyCodecOptionalModel.class).register(new OptionalPropertyCodecProvider()), + model, "{'optionalField': 'foo'}"); + } + + @Test + public void testCustomRegisteredPropertyCodecOmittedValue() { + CustomPropertyCodecOptionalModel model = new CustomPropertyCodecOptionalModel(Optional.empty()); + roundTrip(getPojoCodecProviderBuilder(CustomPropertyCodecOptionalModel.class).register(new OptionalPropertyCodecProvider()), + model, "{'optionalField': null}"); + } + + @Test + public void testMapStringObjectModel() { + MapStringObjectModel model = new MapStringObjectModel(new HashMap<>(Document.parse("{a : 1, b: 'b', c: [1, 2, 3]}"))); + CodecRegistry registry = fromRegistries(fromProviders(new MapCodecProvider(), new IterableCodecProvider(), new ValueCodecProvider(), + getPojoCodecProviderBuilder(MapStringObjectModel.class).build())); + roundTrip(registry, model, "{ map: {a : 1, b: 'b', c: [1, 2, 3]}}"); + } + + @Test + public void testMapStringObjectModelWithObjectCodec() { + MapStringObjectModel model = new MapStringObjectModel(new HashMap<>(Document.parse("{a : 1, b: 'b', c: [1, 2, 3]}"))); + CodecRegistry registry = fromRegistries(fromProviders(new MapCodecProvider()), fromCodecs(new ObjectCodec()), + fromProviders(getPojoCodecProviderBuilder(MapStringObjectModel.class).build())); + assertThrows(UnsupportedOperationException.class, () -> + roundTrip(registry, model, "{ map: {a : 1, b: 'b', c: [1, 2, 3]}}")); + } + + @Test + public void testEncodingInvalidMapModel() { + assertThrows(CodecConfigurationException.class, () -> + encodesTo(getPojoCodecProviderBuilder(InvalidMapModel.class), getInvalidMapModel(), "{'invalidMap': {'1': 1, '2': 2}}")); + } + + @Test + public void testDecodingInvalidMapModel() { + CodecConfigurationException e = assertThrows(CodecConfigurationException.class, () -> + decodingShouldFail(getCodec(InvalidMapModel.class), "{'invalidMap': {'1': 1, '2': 2}}")); + assertTrue(e.getMessage().startsWith("Failed to decode 'InvalidMapModel'. Decoding 'invalidMap' errored with:")); + } + + @Test + public void testEncodingInvalidCollectionModel() { + CodecConfigurationException e = assertThrows(CodecConfigurationException.class, () -> + encodesTo(getPojoCodecProviderBuilder(InvalidCollectionModel.class), new InvalidCollectionModel(asList(1, 2, 3)), + "{collectionField: [1, 2, 3]}")); + assertTrue(e.getMessage().startsWith("Failed to encode 'InvalidCollectionModel'. Encoding 'collectionField' errored with:")); + } + + @Test + public void testInvalidMapModelWithCustomPropertyCodecProvider() { + encodesTo(getPojoCodecProviderBuilder(InvalidMapModel.class).register(new InvalidMapPropertyCodecProvider()), getInvalidMapModel(), + "{'invalidMap': {'1': 1, '2': 2}}"); + } + + @Test + public void testInterfaceModelCreatorMadeInConvention() { + roundTrip( + getPojoCodecProviderBuilder(ComposeInterfaceModel.class, InterfaceModelB.class, InterfaceModelImpl.class) + .conventions(Collections.singletonList(new InterfaceModelBInstanceCreatorConvention())), + new ComposeInterfaceModel("someTitle", + new InterfaceModelImpl("a", "b")), + "{'title': 'someTitle', 'nestedModel': {'propertyA': 'a', 'propertyB': 'b'}}" + ); + } + + @Test + public void testConstructorNotPublicModel() { + assertThrows(CodecConfigurationException.class, () -> + decodingShouldFail(getCodec(ConstructorNotPublicModel.class), "{'integerField': 99}")); + } + + @Test + public void testDataUnknownClass() { + ClassModel classModel = ClassModel.builder(SimpleModel.class).enableDiscriminator(true).build(); + CodecConfigurationException e = assertThrows(CodecConfigurationException.class, () -> + decodingShouldFail(getCodec(PojoCodecProvider.builder().register(classModel), SimpleModel.class), "{'_t': 'FakeModel'}")); + assertTrue(e.getMessage().startsWith("Failed to decode 'SimpleModel'. Decoding errored with:")); + } + + @Test + public void testInvalidTypeForField() { + assertThrows(CodecConfigurationException.class, () -> + decodingShouldFail(getCodec(SimpleModel.class), "{'_t': 'SimpleModel', 'stringField': 123}")); + } + + @Test + public void testInvalidTypeForPrimitiveField() { + assertThrows(CodecConfigurationException.class, () -> + decodingShouldFail(getCodec(PrimitivesModel.class), "{ '_t': 'PrimitivesModel', 'myBoolean': null}")); + } + + @Test + public void testInvalidTypeForModelField() { + assertThrows(CodecConfigurationException.class, () -> + decodingShouldFail(getCodec(SimpleNestedPojoModel.class), "{ '_t': 'SimpleNestedPojoModel', 'simple': 123}")); + } + + @Test + public void testInvalidDiscriminatorInNestedModel() { + assertThrows(CodecConfigurationException.class, () -> + decodingShouldFail(getCodec(SimpleNestedPojoModel.class), "{ '_t': 'SimpleNestedPojoModel'," + + "'simple': {'_t': 'FakeModel', 'integerField': 42, 'stringField': 'myString'}}")); + } + + @Test + public void testGenericBaseClass() { + CodecRegistry registry = fromProviders(new ValueCodecProvider(), PojoCodecProvider.builder().automatic(true).build()); + + ConcreteModel model = new ConcreteModel(new ConcreteField("name1")); + + String json = "{\"_t\": \"org.bson.codecs.pojo.entities.ConcreteModel\", \"field\": {\"name\": \"name1\"}}"; + roundTrip(PojoCodecProvider.builder().automatic(true), GenericBaseModel.class, model, json); + } + + + @Test + public void testCannotEncodeUnspecializedClasses() { + CodecRegistry registry = fromProviders(getPojoCodecProviderBuilder(GenericTreeModel.class).build()); + assertThrows(CodecConfigurationException.class, () -> + encode(registry.get(GenericTreeModel.class), getGenericTreeModel(), false)); + } + + @Test + public void testCannotDecodeUnspecializedClassesWithoutADiscriminator() { + assertThrows(CodecConfigurationException.class, () -> + decodingShouldFail(getCodec(GenericTreeModel.class), + "{'field1': 'top', 'field2': 1, " + + "'left': {'field1': 'left', 'field2': 2, 'left': {'field1': 'left', 'field2': 3}}, " + + "'right': {'field1': 'right', 'field2': 4, 'left': {'field1': 'left', 'field2': 5}}}")); + } + + @Test + public void testBsonCreatorPrimitivesAndNullValues() { + assertThrows(CodecConfigurationException.class, () -> + decodingShouldFail(getCodec(CreatorConstructorPrimitivesModel.class), "{intField: 100, stringField: 'test'}")); + } + + @Test + public void testCreatorMethodThrowsExceptionModel() { + assertThrows(CodecConfigurationException.class, () -> + decodingShouldFail(getCodec(CreatorMethodThrowsExceptionModel.class), + "{'integerField': 10, 'stringField': 'eleven', 'longField': {$numberLong: '12'}}")); + } + + @Test + public void testCreatorConstructorThrowsExceptionModel() { + assertThrows(CodecConfigurationException.class, () -> + decodingShouldFail(getCodec(CreatorConstructorThrowsExceptionModel.class), "{}")); + } + + @Test + public void testInvalidSetterModel() { + assertThrows(CodecConfigurationException.class, () -> + decodingShouldFail(getCodec(InvalidSetterArgsModel.class), "{'integerField': 42, 'stringField': 'myString'}")); + } + + @Test + public void testInvalidGetterAndSetterModelEncoding() { + InvalidGetterAndSetterModel model = new InvalidGetterAndSetterModel(42, "myString"); + assertThrows(CodecConfigurationException.class, () -> + roundTrip(getPojoCodecProviderBuilder(InvalidGetterAndSetterModel.class), model, "{'integerField': 42, 'stringField': 'myString'}")); + } + + @Test + public void testInvalidGetterAndSetterModelDecoding() { + assertThrows(CodecConfigurationException.class, () -> + decodingShouldFail(getCodec(InvalidGetterAndSetterModel.class), "{'integerField': 42, 'stringField': 'myString'}")); + } + + @Test + public void testInvalidBsonRepresentationStringDecoding() { + assertThrows(CodecConfigurationException.class, () -> + decodingShouldFail(getCodec(BsonRepresentationUnsupportedString.class), "{'id': 'hello', s: 3}")); + } + + @Test + public void testInvalidBsonRepresentationStringEncoding() { + assertThrows(CodecConfigurationException.class, () -> + encodesTo(getPojoCodecProviderBuilder(BsonRepresentationUnsupportedString.class), + new BsonRepresentationUnsupportedString("1"), "")); + } + + @Test + public void testInvalidBsonRepresentationIntDecoding() { + assertThrows(CodecConfigurationException.class, () -> + decodingShouldFail(getCodec(BsonRepresentationUnsupportedInt.class), "{'id': 'hello', age: '3'}")); + } + + @Test + public void testStringIdIsNotObjectId() { + assertThrows(IllegalArgumentException.class, () -> + encodesTo(getCodec(BsonRepresentationModel.class), new BsonRepresentationModel("notanobjectid", 1), null)); + } + + @Test + public void testRoundTripWithoutBsonAnnotation() { + roundTrip(getPojoCodecProviderBuilder(BsonRepresentationModel.class).conventions(asList(CLASS_AND_PROPERTY_CONVENTION)), + new BsonRepresentationModel("hello", 1), "{'_id': 'hello', 'age': 1}"); + } + + @Test + public void testMultiplePojoProviders() { + NestedGenericHolderFieldWithMultipleTypeParamsModel model = getNestedGenericHolderFieldWithMultipleTypeParamsModel(); + PojoCodecProvider provider1 = PojoCodecProvider.builder().register(NestedGenericHolderFieldWithMultipleTypeParamsModel.class) + .build(); + PojoCodecProvider provider2 = PojoCodecProvider.builder().register(PropertyWithMultipleTypeParamsModel.class).build(); + PojoCodecProvider provider3 = PojoCodecProvider.builder().register(SimpleGenericsModel.class).build(); + PojoCodecProvider provider4 = PojoCodecProvider.builder().register(GenericHolderModel.class).build(); + + CodecRegistry registry = fromProviders(provider1, provider2, provider3, provider4); + CodecRegistry actualRegistry = fromRegistries(fromProviders(new BsonValueCodecProvider(), new ValueCodecProvider()), registry); + + String json = "{'nested': {'myGenericField': {_t: 'PropertyWithMultipleTypeParamsModel', " + + "'simpleGenericsModel': {_t: 'org.bson.codecs.pojo.entities.SimpleGenericsModel', 'myIntegerField': 42, " + + "'myGenericField': {'$numberLong': '101'}, 'myListField': ['B', 'C'], 'myMapField': {'D': 2, 'E': 3, 'F': 4 }}}," + + "'myLongField': {'$numberLong': '42'}}}"; + + roundTrip(actualRegistry, model, json); + } + + @Test + public void testBsonExtraElementsInvalidModel() { + assertThrows(CodecConfigurationException.class, () -> + getPojoCodecProviderBuilder(BsonExtraElementsInvalidModel.class).build()); + } + + private List getDefaultAndUseGettersConvention() { + List conventions = new ArrayList<>(DEFAULT_CONVENTIONS); + conventions.add(USE_GETTERS_FOR_SETTERS); + return conventions; + } + + class ObjectCodec implements Codec { + + @Override + public Object decode(final BsonReader reader, final DecoderContext decoderContext) { + throw new UnsupportedOperationException(); + } + + @Override + public void encode(final BsonWriter writer, final Object value, final EncoderContext encoderContext) { + throw new UnsupportedOperationException(); + } + + @Override + public Class getEncoderClass() { + return Object.class; + } + } + + class ObjectIdGenerator implements IdGenerator { + @Override + public ObjectId generate() { + return new ObjectId("123412341234123412341234"); + } + + @Override + public Class getType() { + return ObjectId.class; + } + } +} diff --git a/bson/src/test/unit/org/bson/codecs/pojo/PojoRoundTripTest.java b/bson/src/test/unit/org/bson/codecs/pojo/PojoRoundTripTest.java new file mode 100644 index 00000000000..53f5d363535 --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/pojo/PojoRoundTripTest.java @@ -0,0 +1,578 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo; + +import org.bson.BsonDocument; +import org.bson.codecs.SimpleEnum; +import org.bson.codecs.pojo.entities.AbstractInterfaceModel; +import org.bson.codecs.pojo.entities.CollectionNestedPojoModel; +import org.bson.codecs.pojo.entities.CollectionSpecificReturnTypeCreatorModel; +import org.bson.codecs.pojo.entities.CollectionSpecificReturnTypeModel; +import org.bson.codecs.pojo.entities.ConcreteAndNestedAbstractInterfaceModel; +import org.bson.codecs.pojo.entities.ConcreteCollectionsModel; +import org.bson.codecs.pojo.entities.ConcreteInterfaceGenericModel; +import org.bson.codecs.pojo.entities.ConcreteStandAloneAbstractInterfaceModel; +import org.bson.codecs.pojo.entities.ContainsAlternativeMapAndCollectionModel; +import org.bson.codecs.pojo.entities.ConventionModel; +import org.bson.codecs.pojo.entities.DuplicateAnnotationAllowedModel; +import org.bson.codecs.pojo.entities.FieldAndPropertyTypeMismatchModel; +import org.bson.codecs.pojo.entities.GenericHolderModel; +import org.bson.codecs.pojo.entities.GenericTreeModel; +import org.bson.codecs.pojo.entities.InterfaceBasedModel; +import org.bson.codecs.pojo.entities.InterfaceModelImpl; +import org.bson.codecs.pojo.entities.InterfaceUpperBoundsModelAbstractImpl; +import org.bson.codecs.pojo.entities.InterfaceWithDefaultMethodModelImpl; +import org.bson.codecs.pojo.entities.InterfaceWithOverrideDefaultMethodModelImpl; +import org.bson.codecs.pojo.entities.ListGenericExtendedModel; +import org.bson.codecs.pojo.entities.ListListGenericExtendedModel; +import org.bson.codecs.pojo.entities.ListMapGenericExtendedModel; +import org.bson.codecs.pojo.entities.MapGenericExtendedModel; +import org.bson.codecs.pojo.entities.MapListGenericExtendedModel; +import org.bson.codecs.pojo.entities.MapMapGenericExtendedModel; +import org.bson.codecs.pojo.entities.MultipleBoundsModel; +import org.bson.codecs.pojo.entities.MultipleLevelGenericModel; +import org.bson.codecs.pojo.entities.NestedFieldReusingClassTypeParameter; +import org.bson.codecs.pojo.entities.NestedGenericHolderFieldWithMultipleTypeParamsModel; +import org.bson.codecs.pojo.entities.NestedGenericHolderMapModel; +import org.bson.codecs.pojo.entities.NestedGenericHolderModel; +import org.bson.codecs.pojo.entities.NestedGenericHolderSimpleGenericsModel; +import org.bson.codecs.pojo.entities.NestedGenericTreeModel; +import org.bson.codecs.pojo.entities.NestedMultipleLevelGenericModel; +import org.bson.codecs.pojo.entities.NestedReusedGenericsModel; +import org.bson.codecs.pojo.entities.NestedSelfReferentialGenericHolderModel; +import org.bson.codecs.pojo.entities.NestedSelfReferentialGenericModel; +import org.bson.codecs.pojo.entities.NestedSimpleIdModel; +import org.bson.codecs.pojo.entities.PrimitivesModel; +import org.bson.codecs.pojo.entities.PropertyReusingClassTypeParameter; +import org.bson.codecs.pojo.entities.PropertySelectionModel; +import org.bson.codecs.pojo.entities.PropertyWithMultipleTypeParamsModel; +import org.bson.codecs.pojo.entities.ReusedGenericsModel; +import org.bson.codecs.pojo.entities.SelfReferentialGenericModel; +import org.bson.codecs.pojo.entities.ShapeHolderCircleModel; +import org.bson.codecs.pojo.entities.ShapeHolderModel; +import org.bson.codecs.pojo.entities.ShapeModelAbstract; +import org.bson.codecs.pojo.entities.ShapeModelCircle; +import org.bson.codecs.pojo.entities.ShapeModelRectangle; +import org.bson.codecs.pojo.entities.SimpleEnumModel; +import org.bson.codecs.pojo.entities.SimpleGenericsModel; +import org.bson.codecs.pojo.entities.SimpleIdImmutableModel; +import org.bson.codecs.pojo.entities.SimpleIdModel; +import org.bson.codecs.pojo.entities.SimpleModel; +import org.bson.codecs.pojo.entities.SimpleNestedPojoModel; +import org.bson.codecs.pojo.entities.SimpleWithStaticModel; +import org.bson.codecs.pojo.entities.TreeWithIdModel; +import org.bson.codecs.pojo.entities.UpperBoundsConcreteModel; +import org.bson.codecs.pojo.entities.conventions.AnnotationBsonPropertyIdModel; +import org.bson.codecs.pojo.entities.conventions.BsonExtraElementsMapModel; +import org.bson.codecs.pojo.entities.conventions.BsonExtraElementsModel; +import org.bson.codecs.pojo.entities.conventions.BsonIgnoreDuplicatePropertyMultipleTypes; +import org.bson.codecs.pojo.entities.conventions.BsonIgnoreInvalidMapModel; +import org.bson.codecs.pojo.entities.conventions.BsonIgnoreSyntheticProperty; +import org.bson.codecs.pojo.entities.conventions.BsonRepresentationModel; +import org.bson.codecs.pojo.entities.conventions.CollectionDiscriminatorAbstractClassesModel; +import org.bson.codecs.pojo.entities.conventions.CollectionDiscriminatorInterfacesModel; +import org.bson.codecs.pojo.entities.conventions.CreatorAllFinalFieldsModel; +import org.bson.codecs.pojo.entities.conventions.CreatorConstructorIdModel; +import org.bson.codecs.pojo.entities.conventions.CreatorConstructorLegacyBsonPropertyModel; +import org.bson.codecs.pojo.entities.conventions.CreatorConstructorModel; +import org.bson.codecs.pojo.entities.conventions.CreatorConstructorRenameModel; +import org.bson.codecs.pojo.entities.conventions.CreatorInSuperClassModel; +import org.bson.codecs.pojo.entities.conventions.CreatorInSuperClassModelImpl; +import org.bson.codecs.pojo.entities.conventions.CreatorMethodModel; +import org.bson.codecs.pojo.entities.conventions.CreatorNoArgsConstructorModel; +import org.bson.codecs.pojo.entities.conventions.CreatorNoArgsMethodModel; +import org.bson.codecs.pojo.entities.conventions.InterfaceModel; +import org.bson.codecs.pojo.entities.conventions.InterfaceModelImplA; +import org.bson.codecs.pojo.entities.conventions.InterfaceModelImplB; +import org.bson.codecs.pojo.entities.conventions.Subclass1Model; +import org.bson.codecs.pojo.entities.conventions.Subclass2Model; +import org.bson.codecs.pojo.entities.conventions.SuperClassModel; +import org.bson.types.ObjectId; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.Arguments; +import org.junit.jupiter.params.provider.MethodSource; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.stream.Stream; + +import static java.lang.String.format; +import static java.util.Arrays.asList; + +public final class PojoRoundTripTest extends PojoTestCase { + + @ParameterizedTest(name = "{0}") + @MethodSource("data") + public void test(final String name, final Object model, final String json, final PojoCodecProvider.Builder builder) { + roundTrip(builder, model, json); + threadedRoundTrip(builder, model, json); + } + + private static List testCases() { + List data = new ArrayList<>(); + data.add(new TestData("Simple model", getSimpleModel(), PojoCodecProvider.builder().register(SimpleModel.class), + SIMPLE_MODEL_JSON)); + + data.add(new TestData("Simple model with statics", new SimpleWithStaticModel(42, "myString"), + PojoCodecProvider.builder().register(SimpleWithStaticModel.class), + SIMPLE_MODEL_JSON)); + + data.add(new TestData("Property selection model", new PropertySelectionModel(), + getPojoCodecProviderBuilder(PropertySelectionModel.class), + "{'finalStringField': 'finalStringField', 'stringField': 'stringField'}")); + + data.add(new TestData("Conventions default", getConventionModel(), + getPojoCodecProviderBuilder(ConventionModel.class, SimpleModel.class), + "{'_id': 'id', '_cls': 'AnnotatedConventionModel', 'myFinalField': 10, 'myIntField': 10," + + "'child': {'_id': 'child', 'myFinalField': 10, 'myIntField': 10," + + "'model': {'integerField': 42, 'stringField': 'myString'}}}")); + + data.add(new TestData("BsonIgnore invalid map", new BsonIgnoreInvalidMapModel("myString"), + getPojoCodecProviderBuilder(BsonIgnoreInvalidMapModel.class), + "{stringField: 'myString'}")); + + data.add(new TestData("Interfaced based model", new InterfaceModelImpl("a", "b"), + getPojoCodecProviderBuilder(InterfaceModelImpl.class), + "{'propertyA': 'a', 'propertyB': 'b'}")); + + data.add(new TestData("Interfaced based model with default method", new InterfaceWithDefaultMethodModelImpl("a", + "c"), + getPojoCodecProviderBuilder(InterfaceWithDefaultMethodModelImpl.class), + "{'propertyA': 'a', 'propertyC': 'c'}")); + + data.add(new TestData("Interfaced based model with override default method", + new InterfaceWithOverrideDefaultMethodModelImpl("a", "c-override"), + getPojoCodecProviderBuilder(InterfaceWithOverrideDefaultMethodModelImpl.class), + "{'propertyA': 'a', 'propertyC': 'c-override'}")); + + data.add(new TestData("Interfaced based model with bound", new InterfaceUpperBoundsModelAbstractImpl("someName", + new InterfaceModelImpl("a", "b")), + getPojoCodecProviderBuilder(InterfaceUpperBoundsModelAbstractImpl.class, InterfaceModelImpl.class), + "{'name': 'someName', 'nestedModel': {'propertyA': 'a', 'propertyB': 'b'}}")); + + data.add(new TestData("Interface concrete and abstract model", + new ConcreteAndNestedAbstractInterfaceModel("A", new ConcreteAndNestedAbstractInterfaceModel("B", + new ConcreteStandAloneAbstractInterfaceModel("C"))), + getPojoCodecProviderBuilder(InterfaceBasedModel.class, AbstractInterfaceModel.class, + ConcreteAndNestedAbstractInterfaceModel.class, ConcreteStandAloneAbstractInterfaceModel.class), + "{'_t': 'org.bson.codecs.pojo.entities.ConcreteAndNestedAbstractInterfaceModel', 'name': 'A', " + + "'child': {'_t': 'org.bson.codecs.pojo.entities.ConcreteAndNestedAbstractInterfaceModel', 'name': 'B', " + + " 'child': {'_t': 'org.bson.codecs.pojo.entities.ConcreteStandAloneAbstractInterfaceModel', 'name': 'C'}}}}")); + + data.add(new TestData("Concrete generic interface model", new ConcreteInterfaceGenericModel("someValue"), + getPojoCodecProviderBuilder(ConcreteInterfaceGenericModel.class), "{propertyA: 'someValue'}")); + + data.add(new TestData("Primitives model", getPrimitivesModel(), + getPojoCodecProviderBuilder(PrimitivesModel.class), + "{ 'myBoolean': true, 'myByte': 1, 'myCharacter': '1', 'myDouble': 1.0, 'myFloat': 2.0, 'myInteger': 3, " + + "'myLong': { '$numberLong': '5' }, 'myShort': 6}")); + + data.add(new TestData("Concrete collections model", getConcreteCollectionsModel(), + getPojoCodecProviderBuilder(ConcreteCollectionsModel.class), + "{'collection': [1, 2, 3], 'list': [4, 5, 6], 'linked': [7, 8, 9], 'map': {'A': 1.1, 'B': 2.2, 'C': 3.3}," + + "'concurrent': {'D': 4.4, 'E': 5.5, 'F': 6.6}}")); + + data.add(new TestData("Handling of nulls inside collections", getConcreteCollectionsModelWithNulls(), + getPojoCodecProviderBuilder(ConcreteCollectionsModel.class), + "{'collection': [1, null, 3], 'list': [4, null, 6], 'linked': [null, 8, 9], 'map': {'A': 1.1, 'B': null, 'C': 3.3}}")); + + data.add(new TestData("Concrete specific return collection type model through BsonCreator", + new CollectionSpecificReturnTypeCreatorModel(asList("foo", "bar")), + getPojoCodecProviderBuilder(CollectionSpecificReturnTypeCreatorModel.class), + "{'properties': ['foo', 'bar']}")); + + data.add(new TestData("Concrete specific return collection type model through getter and setter", + new CollectionSpecificReturnTypeModel(asList("foo", "bar")), + getPojoCodecProviderBuilder(CollectionSpecificReturnTypeModel.class), + "{'properties': ['foo', 'bar']}")); + + data.add(new TestData("Concrete specific return collection type model", getConcreteCollectionsModel(), + getPojoCodecProviderBuilder(ConcreteCollectionsModel.class), + "{'collection': [1, 2, 3], 'list': [4, 5, 6], 'linked': [7, 8, 9], 'map': {'A': 1.1, 'B': 2.2, 'C': 3.3}," + + "'concurrent': {'D': 4.4, 'E': 5.5, 'F': 6.6}}")); + + data.add(new TestData("Nested simple", getSimpleNestedPojoModel(), + getPojoCodecProviderBuilder(SimpleNestedPojoModel.class, SimpleModel.class), + "{'simple': " + SIMPLE_MODEL_JSON + "}")); + + data.add(new TestData("Nested collection", getCollectionNestedPojoModel(), + getPojoCodecProviderBuilder(CollectionNestedPojoModel.class, SimpleModel.class), + "{ 'listSimple': [" + SIMPLE_MODEL_JSON + "]," + + "'listListSimple': [[" + SIMPLE_MODEL_JSON + "]]," + + "'setSimple': [" + SIMPLE_MODEL_JSON + "]," + + "'setSetSimple': [[" + SIMPLE_MODEL_JSON + "]]," + + "'sortedSetSimple': [" + SIMPLE_MODEL_JSON + "]," + + "'mapSimple': {'s': " + SIMPLE_MODEL_JSON + "}," + + "'mapMapSimple': {'ms': {'s': " + SIMPLE_MODEL_JSON + "}}," + + "'mapListSimple': {'ls': [" + SIMPLE_MODEL_JSON + "]}," + + "'mapListMapSimple': {'lm': [{'s': " + SIMPLE_MODEL_JSON + "}]}," + + "'mapSetSimple': {'s': [" + SIMPLE_MODEL_JSON + "]}," + + "'listMapSimple': [{'s': " + SIMPLE_MODEL_JSON + "}]," + + "'listMapListSimple': [{'ls': [" + SIMPLE_MODEL_JSON + "]}]," + + "'listMapSetSimple': [{'s': [" + SIMPLE_MODEL_JSON + "]}]," + + "}")); + + data.add(new TestData("Nested collection", getCollectionNestedPojoModelWithNulls(), + getPojoCodecProviderBuilder(CollectionNestedPojoModel.class, SimpleModel.class), + "{ 'listListSimple': [ null ]," + + "'setSetSimple': [ null ]," + + "'mapMapSimple': {'ms': null}," + + "'mapListSimple': {'ls': null}," + + "'mapListMapSimple': {'lm': [null]}," + + "'mapSetSimple': {'s': null}," + + "'listMapSimple': [null]," + + "'listMapListSimple': [{'ls': null}]," + + "'listMapSetSimple': [{'s': null}]," + + "}")); + + data.add(new TestData("Nested generic holder", getNestedGenericHolderModel(), + getPojoCodecProviderBuilder(NestedGenericHolderModel.class, GenericHolderModel.class), + "{'nested': {'myGenericField': 'generic', 'myLongField': {'$numberLong': '1'}}}")); + + data.add(new TestData("Nested generic holder map", getNestedGenericHolderMapModel(), + getPojoCodecProviderBuilder(NestedGenericHolderMapModel.class, + GenericHolderModel.class, SimpleGenericsModel.class, SimpleModel.class), + "{ 'nested': { 'myGenericField': {'s': " + SIMPLE_MODEL_JSON + "}, 'myLongField': {'$numberLong': '1'}}}")); + + data.add(new TestData("Nested reused generic", getNestedReusedGenericsModel(), + getPojoCodecProviderBuilder(NestedReusedGenericsModel.class, ReusedGenericsModel.class, SimpleModel.class), + "{ 'nested':{ 'field1':{ '$numberLong':'1' }, 'field2':[" + SIMPLE_MODEL_JSON + "], " + + "'field3':'field3', 'field4':42, 'field5':'field5', 'field6':[" + SIMPLE_MODEL_JSON + ", " + + SIMPLE_MODEL_JSON + "], 'field7':{ '$numberLong':'2' }, 'field8':'field8' } }")); + + + data.add(new TestData("Nested generic holder with multiple types", getNestedGenericHolderFieldWithMultipleTypeParamsModel(), + getPojoCodecProviderBuilder(NestedGenericHolderFieldWithMultipleTypeParamsModel.class, + PropertyWithMultipleTypeParamsModel.class, SimpleGenericsModel.class, GenericHolderModel.class), + "{'nested': {'myGenericField': {_t: 'PropertyWithMultipleTypeParamsModel', " + + "'simpleGenericsModel': {_t: 'org.bson.codecs.pojo.entities.SimpleGenericsModel', 'myIntegerField': 42, " + + "'myGenericField': {'$numberLong': '101'}, 'myListField': ['B', 'C'], 'myMapField': {'D': 2, 'E': 3, 'F': 4 }}}," + + "'myLongField': {'$numberLong': '42'}}}")); + + + data.add(new TestData("Nested generic tree", new NestedGenericTreeModel(42, getGenericTreeModel()), + getPojoCodecProviderBuilder(NestedGenericTreeModel.class, GenericTreeModel.class), + "{'intField': 42, 'nested': {'field1': 'top', 'field2': 1, " + + "'left': {'field1': 'left', 'field2': 2, 'left': {'field1': 'left', 'field2': 3}}, " + + "'right': {'field1': 'right', 'field2': 4, 'left': {'field1': 'left', 'field2': 5}}}}")); + + data.add(new TestData("Nested multiple level", + new NestedMultipleLevelGenericModel(42, new MultipleLevelGenericModel<>("string", getGenericTreeModel())), + getPojoCodecProviderBuilder(NestedMultipleLevelGenericModel.class, MultipleLevelGenericModel.class, GenericTreeModel.class), + "{'intField': 42, 'nested': {'stringField': 'string', 'nested': {'field1': 'top', 'field2': 1, " + + "'left': {'field1': 'left', 'field2': 2, 'left': {'field1': 'left', 'field2': 3}}, " + + "'right': {'field1': 'right', 'field2': 4, 'left': {'field1': 'left', 'field2': 5}}}}}")); + + data.add(new TestData("Nested Generics holder", getNestedGenericHolderSimpleGenericsModel(), + getPojoCodecProviderBuilder(NestedGenericHolderSimpleGenericsModel.class, GenericHolderModel.class, + SimpleGenericsModel.class, SimpleModel.class), + "{'nested': {'myGenericField': {'myIntegerField': 42, 'myGenericField': 42," + + " 'myListField': [[" + SIMPLE_MODEL_JSON + "]], " + + " 'myMapField': {'A': {'A': " + SIMPLE_MODEL_JSON + "}}}," + + " 'myLongField': {'$numberLong': '42' }}}")); + + data.add(new TestData("Nested property reusing type parameter", + new NestedFieldReusingClassTypeParameter(new PropertyReusingClassTypeParameter<>(getGenericTreeModelStrings())), + getPojoCodecProviderBuilder(NestedFieldReusingClassTypeParameter.class, PropertyReusingClassTypeParameter.class, + GenericTreeModel.class), + "{'nested': {'tree': {'field1': 'top', 'field2': '1', " + + "'left': {'field1': 'left', 'field2': '2', 'left': {'field1': 'left', 'field2': '3'}}, " + + "'right': {'field1': 'right', 'field2': '4', 'left': {'field1': 'left', 'field2': '5'}}}}}")); + + data.add(new TestData("Abstract shape model - circle", + new ShapeHolderModel(getShapeModelCircle()), getPojoCodecProviderBuilder(ShapeModelAbstract.class, + ShapeModelCircle.class, ShapeModelRectangle.class, ShapeHolderModel.class), + "{'shape': {'_t': 'org.bson.codecs.pojo.entities.ShapeModelCircle', 'color': 'orange', 'radius': 4.2}}")); + + data.add(new TestData("Abstract shape model - rectangle", + new ShapeHolderModel(getShapeModelRectangle()), getPojoCodecProviderBuilder(ShapeModelAbstract.class, + ShapeModelCircle.class, ShapeModelRectangle.class, ShapeHolderModel.class), + "{'shape': {'_t': 'org.bson.codecs.pojo.entities.ShapeModelRectangle', 'color': 'green', 'width': 22.1, 'height': " + + "105.0}}}")); + + data.add(new TestData("Upper bounds", + new UpperBoundsConcreteModel(1L), getPojoCodecProviderBuilder(UpperBoundsConcreteModel.class), + "{'myGenericField': {'$numberLong': '1'}}")); + + data.add(new TestData("Multiple bounds", getMultipleBoundsModel(), getPojoCodecProviderBuilder(MultipleBoundsModel.class), + "{'level1' : 2.2, 'level2': [1, 2, 3], 'level3': {key: 'value'}}")); + + data.add(new TestData("Self referential", getNestedSelfReferentialGenericHolderModel(), + getPojoCodecProviderBuilder(NestedSelfReferentialGenericHolderModel.class, NestedSelfReferentialGenericModel.class, + SelfReferentialGenericModel.class), + "{'nested': { 't': true, 'v': {'$numberLong': '42'}, 'z': 44.0, " + + "'selfRef1': {'t': true, 'v': {'$numberLong': '33'}, 'child': {'t': {'$numberLong': '44'}, 'v': false}}, " + + "'selfRef2': {'t': true, 'v': 3.14, 'child': {'t': 3.42, 'v': true}}}}")); + + data.add(new TestData("Creator constructor", new CreatorConstructorModel(asList(10, 11), "twelve", 13), + getPojoCodecProviderBuilder(CreatorConstructorModel.class), + "{'integersField': [10, 11], 'stringField': 'twelve', 'longField': {$numberLong: '13'}}")); + + data.add(new TestData("Creator constructor with legacy BsonProperty using name", + new CreatorConstructorLegacyBsonPropertyModel(asList(10, 11), "twelve", 13), + getPojoCodecProviderBuilder(CreatorConstructorLegacyBsonPropertyModel.class), + "{'integersField': [10, 11], 'stringField': 'twelve', 'longField': {$numberLong: '13'}}")); + + data.add(new TestData("Creator constructor with rename", new CreatorConstructorRenameModel(asList(10, 11), "twelve", 13), + getPojoCodecProviderBuilder(CreatorConstructorRenameModel.class), + "{'integerList': [10, 11], 'stringField': 'twelve', 'longField': {$numberLong: '13'}}")); + + data.add(new TestData("Creator constructor with ID", new CreatorConstructorIdModel("1234-34567-890", asList(10, 11), "twelve", 13), + getPojoCodecProviderBuilder(CreatorConstructorIdModel.class), + "{'_id': '1234-34567-890', 'integersField': [10, 11], 'stringField': 'twelve', 'longField': {$numberLong: '13'}}")); + + data.add(new TestData("Creator no-args constructor", new CreatorNoArgsConstructorModel(40, "one", 42), + getPojoCodecProviderBuilder(CreatorNoArgsConstructorModel.class), + "{'integerField': 40, 'stringField': 'one', 'longField': {$numberLong: '42'}}")); + + data.add(new TestData("Creator method", new CreatorMethodModel(30, "two", 32), + getPojoCodecProviderBuilder(CreatorMethodModel.class), + "{'integerField': 30, 'stringField': 'two', 'longField': {$numberLong: '32'}}")); + + data.add(new TestData("Creator method", CreatorMethodModel.create(30), + getPojoCodecProviderBuilder(CreatorMethodModel.class), + "{'integerField': 30, 'longField': {$numberLong: '0'}}")); + + data.add(new TestData("Creator no-args method", new CreatorNoArgsMethodModel(10, "one", 11), + getPojoCodecProviderBuilder(CreatorNoArgsMethodModel.class), + "{'integerField': 10, 'stringField': 'one', 'longField': {$numberLong: '11'}}")); + + data.add(new TestData("Creator all final", new CreatorAllFinalFieldsModel("pId", "Ada", "Lovelace"), + getPojoCodecProviderBuilder(CreatorAllFinalFieldsModel.class), + "{'_id': 'pId', '_t': 'org.bson.codecs.pojo.entities.conventions.CreatorAllFinalFieldsModel', " + + "'firstName': 'Ada', 'lastName': 'Lovelace'}")); + + data.add(new TestData("Creator all final with nulls", new CreatorAllFinalFieldsModel("pId", "Ada", null), + getPojoCodecProviderBuilder(CreatorAllFinalFieldsModel.class), + "{'_id': 'pId', '_t': 'org.bson.codecs.pojo.entities.conventions.CreatorAllFinalFieldsModel', 'firstName': 'Ada'}")); + + data.add(new TestData("Can handle custom Maps and Collections", + new ContainsAlternativeMapAndCollectionModel(BsonDocument.parse("{customList: [1,2,3], customMap: {'field': 'value'}}")), + getPojoCodecProviderBuilder(ContainsAlternativeMapAndCollectionModel.class), + "{customList: [1,2,3], customMap: {'field': 'value'}}")); + + data.add(new TestData("Collection of discriminators abstract classes", new CollectionDiscriminatorAbstractClassesModel().setList( + asList(new Subclass1Model().setName("abc").setValue(true), new Subclass2Model().setInteger(234).setValue(false))).setMap( + Collections.singletonMap("key", new Subclass2Model().setInteger(123).setValue(true))), + getPojoCodecProviderBuilder(CollectionDiscriminatorAbstractClassesModel.class, SuperClassModel.class, Subclass1Model.class, + Subclass2Model.class), + "{list: [{_t: 'org.bson.codecs.pojo.entities.conventions.Subclass1Model', value: true, name: 'abc'}," + + "{_t: 'org.bson.codecs.pojo.entities.conventions.Subclass2Model', value: false, integer: 234}]," + + "map: {key: {_t: 'org.bson.codecs.pojo.entities.conventions.Subclass2Model', value: true, integer: 123}}}")); + + data.add(new TestData("Collection of discriminators interfaces", new CollectionDiscriminatorInterfacesModel().setList( + asList(new InterfaceModelImplA().setName("abc").setValue(true), + new InterfaceModelImplB().setInteger(234).setValue(false))).setMap( + Collections.singletonMap("key", new InterfaceModelImplB().setInteger(123).setValue(true))), + getPojoCodecProviderBuilder(CollectionDiscriminatorInterfacesModel.class, InterfaceModelImplA.class, + InterfaceModelImplB.class, InterfaceModel.class), + "{list: [{_t: 'org.bson.codecs.pojo.entities.conventions.InterfaceModelImplA', value: true, name: 'abc'}," + + "{_t: 'org.bson.codecs.pojo.entities.conventions.InterfaceModelImplB', value: false, integer: 234}]," + + "map: {key: {_t: 'org.bson.codecs.pojo.entities.conventions.InterfaceModelImplB', value: true, integer: 123}}}")); + + data.add(new TestData("Creator in super class factory method", + CreatorInSuperClassModel.newInstance("a", "b"), + getPojoCodecProviderBuilder(CreatorInSuperClassModelImpl.class), + "{'propertyA': 'a', 'propertyB': 'b'}")); + + data.add(new TestData("Primitive field type doesn't match private property", + new FieldAndPropertyTypeMismatchModel("foo"), + getPojoCodecProviderBuilder(FieldAndPropertyTypeMismatchModel.class), + "{'stringField': 'foo'}")); + + data.add(new TestData("Enums support", + new SimpleEnumModel(SimpleEnum.BRAVO), + getPojoCodecProviderBuilder(SimpleEnumModel.class), + "{ 'myEnum': 'BRAVO' }")); + + data.add(new TestData("AnnotationBsonPropertyIdModel", new AnnotationBsonPropertyIdModel(99L), + getPojoCodecProviderBuilder(AnnotationBsonPropertyIdModel.class), + "{'id': {'$numberLong': '99' }}")); + + data.add(new TestData("Shape model - circle", + new ShapeHolderCircleModel(getShapeModelCircle()), + getPojoCodecProviderBuilder(ShapeModelCircle.class, ShapeHolderCircleModel.class), + "{'shape': {'_t': 'org.bson.codecs.pojo.entities.ShapeModelCircle', 'color': 'orange', 'radius': 4.2}}")); + + data.add(new TestData("BsonIgnore synthentic property", + new BsonIgnoreSyntheticProperty("string value"), + getPojoCodecProviderBuilder(BsonIgnoreSyntheticProperty.class).conventions(Conventions.DEFAULT_CONVENTIONS), + "{stringField: 'string value'}")); + + data.add(new TestData("SimpleIdModel with existing id", + new SimpleIdModel(new ObjectId("123412341234123412341234"), 42, "myString"), + getPojoCodecProviderBuilder(SimpleIdModel.class).conventions(Conventions.DEFAULT_CONVENTIONS), + "{'_id': {'$oid': '123412341234123412341234'}, 'integerField': 42, 'stringField': 'myString'}")); + + + data.add(new TestData("SimpleIdImmutableModel with existing id", + new SimpleIdImmutableModel(new ObjectId("123412341234123412341234"), 42, "myString"), + getPojoCodecProviderBuilder(SimpleIdImmutableModel.class).conventions(Conventions.DEFAULT_CONVENTIONS), + "{'_id': {'$oid': '123412341234123412341234'}, 'integerField': 42, 'stringField': 'myString'}")); + + data.add(new TestData("NestedSimpleIdModel", + new NestedSimpleIdModel(new SimpleIdModel(42, "myString")), + getPojoCodecProviderBuilder(NestedSimpleIdModel.class, SimpleIdModel.class).conventions(Conventions.DEFAULT_CONVENTIONS), + "{'nestedSimpleIdModel': {'integerField': 42, 'stringField': 'myString'}}")); + + data.add(new TestData("TreeWithIdModel", + new TreeWithIdModel(new ObjectId("123412341234123412341234"), "top", + new TreeWithIdModel("left-1", new TreeWithIdModel("left-2"), null), new TreeWithIdModel("right-1")), + getPojoCodecProviderBuilder(TreeWithIdModel.class).conventions(Conventions.DEFAULT_CONVENTIONS), + "{'_id': {'$oid': '123412341234123412341234'}, 'level': 'top'," + + "'left': {'level': 'left-1', 'left': {'level': 'left-2'}}," + + "'right': {'level': 'right-1'}}")); + + data.add(new TestData("DuplicateAnnotationAllowedModel", + new DuplicateAnnotationAllowedModel("abc"), + getPojoCodecProviderBuilder(DuplicateAnnotationAllowedModel.class).conventions(Conventions.DEFAULT_CONVENTIONS), + "{'_id': 'abc'}")); + + data.add(new TestData("BsonIgnore duplicate property with multiple types", + new BsonIgnoreDuplicatePropertyMultipleTypes("string value"), + getPojoCodecProviderBuilder(BsonIgnoreDuplicatePropertyMultipleTypes.class).conventions(Conventions.DEFAULT_CONVENTIONS), + "{stringField: 'string value'}")); + + data.add(new TestData("Can handle concrete generic list types", + new ListGenericExtendedModel(asList(1, 2, 3)), + getPojoCodecProviderBuilder(ListGenericExtendedModel.class), + "{values: [1, 2, 3]}")); + + data.add(new TestData("Can handle concrete nested generic list types", + new ListListGenericExtendedModel(asList(asList(1, 2, 3), asList(4, 5, 6))), + getPojoCodecProviderBuilder(ListListGenericExtendedModel.class), + "{values: [[1, 2, 3], [4, 5, 6]]}")); + + + data.add(new TestData("Can handle concrete generic map types", + new MapGenericExtendedModel(new HashMap() {{ + put("a", 1); + put("b", 2); + }}), + getPojoCodecProviderBuilder(MapGenericExtendedModel.class), + "{values: {a: 1, b: 2}}")); + + data.add(new TestData("Can handle concrete nested generic map types", + new MapMapGenericExtendedModel(new HashMap>() {{ + put("a", new HashMap() {{ + put("aa", 1); + put("ab", 2); + }}); + put("b", new HashMap() {{ + put("ba", 1); + put("bb", 2); + }}); + }} + ), + getPojoCodecProviderBuilder(MapMapGenericExtendedModel.class), + "{values: {a: {aa: 1, ab: 2}, b: {ba: 1, bb: 2}}}")); + + data.add(new TestData("Can handle concrete lists with generic map types", + new ListMapGenericExtendedModel(asList(new HashMap() {{ + put("a", 1); + put("b", 2); + }}, new HashMap() {{ + put("c", 3); + put("d", 4); + }})), + getPojoCodecProviderBuilder(ListMapGenericExtendedModel.class), + "{values: [{a: 1, b: 2}, {c: 3, d: 4}]}")); + + + data.add(new TestData("Can handle concrete maps with generic list types", + new MapListGenericExtendedModel(new HashMap>() {{ + put("a", asList(1, 2, 3)); + put("b", asList(4, 5, 6)); + }}), + getPojoCodecProviderBuilder(MapListGenericExtendedModel.class), + "{values: {a: [1, 2, 3], b: [4, 5, 6]}}")); + + data.add(new TestData("BsonRepresentation is encoded and decoded correctly", new BsonRepresentationModel(1), + getPojoCodecProviderBuilder(BsonRepresentationModel.class), + "{'_id': {'$oid': '111111111111111111111111'}, 'age': 1}")); + + data.add(new TestData("BsonExtraElements with no extra data", + new BsonExtraElementsModel(42, "myString", null), + getPojoCodecProviderBuilder(BsonExtraElementsModel.class), + "{'integerField': 42, 'stringField': 'myString'}")); + + data.add(new TestData("BsonExtraElements are encoded and decoded correctly", + new BsonExtraElementsModel(42, "myString", BsonDocument.parse("{a: 1, b: 2, c: [1, 2, {a: 1}]}")), + getPojoCodecProviderBuilder(BsonExtraElementsModel.class), + "{'integerField': 42, 'stringField': 'myString', 'a': 1, 'b': 2, c: [1, 2, {a: 1}]}")); + + Map stringMap = new HashMap<>(); + stringMap.put("a", "a"); + stringMap.put("b", "b"); + data.add(new TestData("BsonExtraElements are encoded and decoded correctly to a Map", + new BsonExtraElementsMapModel(42, "myString", stringMap), + getPojoCodecProviderBuilder(BsonExtraElementsMapModel.class), + "{'integerField': 42, 'stringField': 'myString', 'a': 'a', 'b': 'b'}")); + + return data; + } + + public static Stream data() { + List data = new ArrayList<>(); + + for (TestData testData : testCases()) { + data.add(Arguments.of(format("%s", testData.getName()), testData.getModel(), testData.getJson(), testData.getBuilder())); + data.add(Arguments.of(format("%s [Auto]", testData.getName()), testData.getModel(), testData.getJson(), AUTOMATIC_BUILDER)); + data.add(Arguments.of(format("%s [Package]", testData.getName()), testData.getModel(), testData.getJson(), PACKAGE_BUILDER)); + } + return data.stream(); + } + + private static final PojoCodecProvider.Builder AUTOMATIC_BUILDER = PojoCodecProvider.builder().automatic(true); + private static final PojoCodecProvider.Builder PACKAGE_BUILDER = PojoCodecProvider.builder().register("org.bson.codecs.pojo.entities", + "org.bson.codecs.pojo.entities.conventions"); + + private static class TestData { + private final String name; + private final Object model; + private final PojoCodecProvider.Builder builder; + private final String json; + + TestData(final String name, final Object model, final PojoCodecProvider.Builder builder, final String json) { + this.name = name; + this.model = model; + this.builder = builder; + this.json = json; + } + + public String getName() { + return name; + } + + public Object getModel() { + return model; + } + + public PojoCodecProvider.Builder getBuilder() { + return builder; + } + + public String getJson() { + return json; + } + } + + +} diff --git a/bson/src/test/unit/org/bson/codecs/pojo/PojoTestCase.java b/bson/src/test/unit/org/bson/codecs/pojo/PojoTestCase.java new file mode 100644 index 00000000000..eb380bb7986 --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/pojo/PojoTestCase.java @@ -0,0 +1,470 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo; + +import java.util.SortedSet; +import java.util.TreeSet; + +import org.bson.BsonBinaryReader; +import org.bson.BsonBinaryWriter; +import org.bson.BsonDocument; +import org.bson.BsonReader; +import org.bson.BsonWriter; +import org.bson.ByteBufNIO; +import org.bson.codecs.BsonDocumentCodec; +import org.bson.codecs.BsonValueCodecProvider; +import org.bson.codecs.Codec; +import org.bson.codecs.DecoderContext; +import org.bson.codecs.EncoderContext; +import org.bson.codecs.EnumCodecProvider; +import org.bson.codecs.SimpleEnum; +import org.bson.codecs.ValueCodecProvider; +import org.bson.codecs.configuration.CodecRegistry; +import org.bson.codecs.jsr310.Jsr310CodecProvider; +import org.bson.codecs.pojo.entities.CollectionNestedPojoModel; +import org.bson.codecs.pojo.entities.ConcreteCollectionsModel; +import org.bson.codecs.pojo.entities.ConventionModel; +import org.bson.codecs.pojo.entities.GenericHolderModel; +import org.bson.codecs.pojo.entities.GenericTreeModel; +import org.bson.codecs.pojo.entities.InvalidMapModel; +import org.bson.codecs.pojo.entities.MultipleBoundsModel; +import org.bson.codecs.pojo.entities.NestedGenericHolderFieldWithMultipleTypeParamsModel; +import org.bson.codecs.pojo.entities.NestedGenericHolderMapModel; +import org.bson.codecs.pojo.entities.NestedGenericHolderModel; +import org.bson.codecs.pojo.entities.NestedGenericHolderSimpleGenericsModel; +import org.bson.codecs.pojo.entities.NestedReusedGenericsModel; +import org.bson.codecs.pojo.entities.NestedSelfReferentialGenericHolderModel; +import org.bson.codecs.pojo.entities.NestedSelfReferentialGenericModel; +import org.bson.codecs.pojo.entities.PrimitivesModel; +import org.bson.codecs.pojo.entities.PropertyWithMultipleTypeParamsModel; +import org.bson.codecs.pojo.entities.ReusedGenericsModel; +import org.bson.codecs.pojo.entities.SelfReferentialGenericModel; +import org.bson.codecs.pojo.entities.ShapeModelCircle; +import org.bson.codecs.pojo.entities.ShapeModelRectangle; +import org.bson.codecs.pojo.entities.SimpleGenericsModel; +import org.bson.codecs.pojo.entities.SimpleModel; +import org.bson.codecs.pojo.entities.SimpleNestedPojoModel; +import org.bson.io.BasicOutputBuffer; +import org.bson.io.ByteBufferBsonInput; +import org.bson.io.OutputBuffer; +import org.bson.types.ObjectId; + +import java.nio.ByteBuffer; +import java.util.ArrayList; +import java.util.Collection; +import java.util.HashMap; +import java.util.HashSet; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; + +import static java.util.Arrays.asList; +import static java.util.Collections.singletonList; +import static org.bson.codecs.configuration.CodecRegistries.fromProviders; +import static org.bson.codecs.pojo.Conventions.DEFAULT_CONVENTIONS; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static util.ThreadTestHelpers.executeAll; + +abstract class PojoTestCase { + + static final BsonDocumentCodec DOCUMENT_CODEC = new BsonDocumentCodec(); + + void roundTrip(final T value, final String json) { + roundTrip(PojoCodecProvider.builder().automatic(true), value, json); + } + + void roundTrip(final PojoCodecProvider.Builder builder, final T value, final String json) { + roundTrip(builder, value.getClass(), value, json); + } + + void roundTrip(final PojoCodecProvider.Builder builder, final Class clazz, final T value, final String json) { + encodesTo(getCodecRegistry(builder), clazz, value, json); + decodesTo(getCodecRegistry(builder), clazz, json, value); + } + + void threadedRoundTrip(final PojoCodecProvider.Builder builder, final T value, final String json) { + int numberOfThreads = 5; + CodecRegistry codecRegistry = getCodecRegistry(builder); + + executeAll(numberOfThreads, () -> { + encodesTo(codecRegistry, value, json); + decodesTo(codecRegistry, json, value); + }); + } + + void roundTrip(final CodecRegistry registry, final T value, final String json) { + encodesTo(registry, value, json); + decodesTo(registry, json, value); + } + + void roundTrip(final CodecRegistry registry, final Class clazz, final T value, final String json) { + encodesTo(registry, clazz, value, json); + decodesTo(registry, clazz, json, value); + } + + void encodesTo(final PojoCodecProvider.Builder builder, final T value, final String json) { + encodesTo(builder, value, json, false); + } + + void encodesTo(final PojoCodecProvider.Builder builder, final T value, final String json, final boolean collectible) { + encodesTo(getCodecRegistry(builder), value.getClass(), value, json, collectible); + } + + void encodesTo(final CodecRegistry registry, final T value, final String json) { + encodesTo(registry, value.getClass(), value, json, false); + } + + void encodesTo(final CodecRegistry registry, final Class clazz, final T value, final String json) { + encodesTo(registry, clazz, value, json, false); + } + + @SuppressWarnings("unchecked") + void encodesTo(final CodecRegistry registry, final Class clazz, final T value, final String json, final boolean collectible) { + Codec codec = (Codec) registry.get(clazz); + encodesTo(codec, value, json, collectible); + } + + void encodesTo(final Codec codec, final T value, final String json) { + encodesTo(codec, value, json, false); + } + + void encodesTo(final Codec codec, final T value, final String json, final boolean collectible) { + OutputBuffer encoded = encode(codec, value, collectible); + + BsonDocument asBsonDocument = decode(DOCUMENT_CODEC, encoded); + assertEquals(BsonDocument.parse(json), asBsonDocument); + } + + void decodesTo(final PojoCodecProvider.Builder builder, final String json, final T expected) { + decodesTo(getCodecRegistry(builder), json, expected); + } + + @SuppressWarnings("unchecked") + void decodesTo(final CodecRegistry registry, final String json, final T expected) { + decodesTo(registry, expected.getClass(), json, expected); + } + + @SuppressWarnings("unchecked") + void decodesTo(final CodecRegistry registry, final Class clazz, final String json, final T expected) { + Codec codec = (Codec) registry.get(clazz); + decodesTo(codec, json, expected); + } + + void decodesTo(final Codec codec, final String json, final T expected) { + OutputBuffer encoded = encode(DOCUMENT_CODEC, BsonDocument.parse(json), false); + T result = decode(codec, encoded); + assertEquals(expected, result); + } + + void decodingShouldFail(final Codec codec, final String json) { + decodesTo(codec, json, null); + } + + OutputBuffer encode(final Codec codec, final T value, final boolean collectible) { + OutputBuffer buffer = new BasicOutputBuffer(); + BsonWriter writer = new BsonBinaryWriter(buffer); + codec.encode(writer, value, EncoderContext.builder().isEncodingCollectibleDocument(collectible).build()); + return buffer; + } + + T decode(final Codec codec, final OutputBuffer buffer) { + BsonBinaryReader reader = new BsonBinaryReader(new ByteBufferBsonInput(new ByteBufNIO(ByteBuffer.wrap(buffer.toByteArray())))); + return codec.decode(reader, DecoderContext.builder().build()); + } + + static PojoCodecProvider.Builder getPojoCodecProviderBuilder(final Class... classes) { + PojoCodecProvider.Builder builder = PojoCodecProvider.builder(); + for (final Class clazz : classes) { + builder.register(clazz); + } + builder.conventions(DEFAULT_CONVENTIONS); + return builder; + } + + PojoCodec getCodec(final PojoCodecProvider.Builder builder, final Class clazz) { + return (PojoCodec) getCodecRegistry(builder).get(clazz); + } + + PojoCodec getCodec(final Class clazz) { + return getCodec(getPojoCodecProviderBuilder(clazz), clazz); + } + + PojoCodecProvider.Builder getPojoCodecProviderBuilder(final ClassModelBuilder... classModelBuilders) { + List> builders = new ArrayList<>(); + for (ClassModelBuilder classModelBuilder : classModelBuilders) { + builders.add(classModelBuilder.build()); + } + return PojoCodecProvider.builder().register(builders.toArray(new ClassModel[builders.size()])); + } + + CodecRegistry getCodecRegistry(final PojoCodecProvider.Builder builder) { + return fromProviders(new BsonValueCodecProvider(), new ValueCodecProvider(), new Jsr310CodecProvider(), new EnumCodecProvider(), + builder.build()); + } + + static SimpleModel getSimpleModel() { + return new SimpleModel(42, "myString"); + } + + static PrimitivesModel getPrimitivesModel() { + return new PrimitivesModel(true, Byte.parseByte("1", 2), '1', 1.0, 2f, 3, 5L, (short) 6); + } + + SimpleGenericsModel getSimpleGenericsModel() { + HashMap map = new HashMap<>(); + map.put("D", 2); + map.put("E", 3); + map.put("F", 4); + + return new SimpleGenericsModel<>(42, "A", asList("B", "C"), map); + } + + static SimpleGenericsModel getSimpleGenericsModelAlt() { + HashMap map = new HashMap<>(); + map.put("D", 2); + map.put("E", 3); + map.put("F", 4); + + return new SimpleGenericsModel<>(42, 101L, asList("B", "C"), map); + } + + static ConcreteCollectionsModel getConcreteCollectionsModel() { + Collection collection = asList(1, 2, 3); + List list = asList(4, 5, 6); + LinkedList linked = new LinkedList<>(asList(7, 8, 9)); + Map map = new HashMap<>(); + map.put("A", 1.1); + map.put("B", 2.2); + map.put("C", 3.3); + ConcurrentHashMap concurrent = new ConcurrentHashMap<>(); + concurrent.put("D", 4.4); + concurrent.put("E", 5.5); + concurrent.put("F", 6.6); + + return new ConcreteCollectionsModel(collection, list, linked, map, concurrent); + } + + + static ConcreteCollectionsModel getConcreteCollectionsModelWithNulls() { + Collection collection = asList(1, null, 3); + List list = asList(4, null, 6); + LinkedList linked = new LinkedList<>(asList(null, 8, 9)); + Map map = new HashMap<>(); + map.put("A", 1.1); + map.put("B", null); + map.put("C", 3.3); + + return new ConcreteCollectionsModel(collection, list, linked, map, null); + } + + static SimpleNestedPojoModel getSimpleNestedPojoModel() { + SimpleModel simpleModel = getSimpleModel(); + return new SimpleNestedPojoModel(simpleModel); + } + + static CollectionNestedPojoModel getCollectionNestedPojoModel() { + return getCollectionNestedPojoModel(false); + } + + static CollectionNestedPojoModel getCollectionNestedPojoModelWithNulls() { + return getCollectionNestedPojoModel(true); + } + + private static CollectionNestedPojoModel getCollectionNestedPojoModel(final boolean useNulls) { + List listSimple; + Set setSimple; + SortedSet sortedSetSimple; + Map mapSimple; + + if (useNulls) { + listSimple = null; + setSimple = null; + sortedSetSimple = null; + mapSimple = null; + } else { + SimpleModel simpleModel = getSimpleModel(); + listSimple = singletonList(simpleModel); + setSimple = new HashSet<>(listSimple); + sortedSetSimple = new TreeSet<>(listSimple); + mapSimple = new HashMap<>(); + mapSimple.put("s", simpleModel); + } + + List> listListSimple = singletonList(listSimple); + Set> setSetSimple = new HashSet<>(singletonList(setSimple)); + + Map> mapMapSimple = new HashMap<>(); + mapMapSimple.put("ms", mapSimple); + + Map> mapListSimple = new HashMap<>(); + mapListSimple.put("ls", listSimple); + + Map>> mapListMapSimple = new HashMap<>(); + mapListMapSimple.put("lm", singletonList(mapSimple)); + + Map> mapSetSimple = new HashMap<>(); + mapSetSimple.put("s", setSimple); + + List> listMapSimple = singletonList(mapSimple); + List>> listMapListSimple = singletonList(mapListSimple); + List>> listMapSetSimple = singletonList(mapSetSimple); + + return new CollectionNestedPojoModel(listSimple, listListSimple, setSimple, setSetSimple, sortedSetSimple, + mapSimple, mapMapSimple, mapListSimple, mapListMapSimple, mapSetSimple, listMapSimple, listMapListSimple, listMapSetSimple); + } + + static ConventionModel getConventionModel() { + SimpleModel simpleModel = getSimpleModel(); + ConventionModel child = new ConventionModel("child", null, simpleModel); + return new ConventionModel("id", child, null); + } + + static ShapeModelCircle getShapeModelCircle() { + return new ShapeModelCircle("orange", 4.2); + } + + static ShapeModelRectangle getShapeModelRectangle() { + return new ShapeModelRectangle("green", 22.1, 105.0); + } + + static MultipleBoundsModel getMultipleBoundsModel() { + HashMap map = new HashMap<>(); + map.put("key", "value"); + List list = asList(1, 2, 3); + return new MultipleBoundsModel(map, list, 2.2); + } + + static NestedGenericHolderFieldWithMultipleTypeParamsModel getNestedGenericHolderFieldWithMultipleTypeParamsModel() { + SimpleGenericsModel simple = getSimpleGenericsModelAlt(); + PropertyWithMultipleTypeParamsModel field = + new PropertyWithMultipleTypeParamsModel<>(simple); + GenericHolderModel> nested = new + GenericHolderModel<>(field, 42L); + return new NestedGenericHolderFieldWithMultipleTypeParamsModel(nested); + } + + static NestedGenericHolderSimpleGenericsModel getNestedGenericHolderSimpleGenericsModel() { + SimpleModel simpleModel = getSimpleModel(); + Map map = new HashMap<>(); + map.put("A", simpleModel); + Map> mapB = new HashMap<>(); + mapB.put("A", map); + SimpleGenericsModel, Map> simpleGenericsModel = + new SimpleGenericsModel<>(42, 42, + singletonList(singletonList(simpleModel)), mapB); + GenericHolderModel, Map>> nested = + new GenericHolderModel<>(simpleGenericsModel, 42L); + + return new NestedGenericHolderSimpleGenericsModel(nested); + } + + static NestedSelfReferentialGenericHolderModel getNestedSelfReferentialGenericHolderModel() { + SelfReferentialGenericModel selfRef1 = new SelfReferentialGenericModel<>(true, 33L, + new SelfReferentialGenericModel<>(44L, false, null)); + SelfReferentialGenericModel selfRef2 = new SelfReferentialGenericModel<>(true, 3.14, + new SelfReferentialGenericModel<>(3.42, true, null)); + NestedSelfReferentialGenericModel nested = + new NestedSelfReferentialGenericModel<>(true, 42L, 44.0, selfRef1, selfRef2); + return new NestedSelfReferentialGenericHolderModel(nested); + } + + static NestedGenericHolderModel getNestedGenericHolderModel() { + return new NestedGenericHolderModel(new GenericHolderModel<>("generic", 1L)); + } + + static NestedGenericHolderMapModel getNestedGenericHolderMapModel() { + Map mapSimple = new HashMap<>(); + mapSimple.put("s", getSimpleModel()); + return new NestedGenericHolderMapModel(new GenericHolderModel<>(mapSimple, 1L)); + } + + static NestedReusedGenericsModel getNestedReusedGenericsModel() { + return new NestedReusedGenericsModel(new ReusedGenericsModel<>(1L, + singletonList(getSimpleModel()), "field3", 42, "field5", asList(getSimpleModel(), getSimpleModel()), 2L, "field8")); + } + + static GenericTreeModel getGenericTreeModel() { + return new GenericTreeModel<>("top", 1, + new GenericTreeModel<>("left", 2, + new GenericTreeModel<>("left", 3, null, null), null), + new GenericTreeModel<>("right", 4, + new GenericTreeModel<>("left", 5, null, null), null)); + } + + static GenericTreeModel getGenericTreeModelStrings() { + return new GenericTreeModel<>("top", "1", + new GenericTreeModel<>("left", "2", + new GenericTreeModel<>("left", "3", null, null), null), + new GenericTreeModel<>("right", "4", + new GenericTreeModel<>("left", "5", null, null), null)); + } + + static InvalidMapModel getInvalidMapModel() { + Map map = new HashMap<>(); + map.put(1, 1); + map.put(2, 2); + return new InvalidMapModel(map); + } + + static final String SIMPLE_MODEL_JSON = "{'integerField': 42, 'stringField': 'myString'}"; + + class StringToObjectIdCodec implements Codec { + + @Override + public void encode(final BsonWriter writer, final String value, final EncoderContext encoderContext) { + writer.writeObjectId(new ObjectId(value)); + } + + @Override + public Class getEncoderClass() { + return String.class; + } + + @Override + public String decode(final BsonReader reader, final DecoderContext decoderContext) { + return reader.readObjectId().toHexString(); + } + } + + class SimpleEnumCodec implements Codec { + + @Override + public void encode(final BsonWriter writer, final SimpleEnum value, final EncoderContext encoderContext) { + writer.writeInt32(value.ordinal()); + } + + @Override + public Class getEncoderClass() { + return SimpleEnum.class; + } + + @Override + public SimpleEnum decode(final BsonReader reader, final DecoderContext decoderContext) { + int ordinal = reader.readInt32(); + switch (ordinal){ + case 0: + return SimpleEnum.ALPHA; + case 1: + return SimpleEnum.BRAVO; + default: + return SimpleEnum.CHARLIE; + } + } + } +} diff --git a/bson/src/test/unit/org/bson/codecs/pojo/PropertyModelBuilderTest.java b/bson/src/test/unit/org/bson/codecs/pojo/PropertyModelBuilderTest.java new file mode 100644 index 00000000000..9ec8ffb96f7 --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/pojo/PropertyModelBuilderTest.java @@ -0,0 +1,109 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo; + +import org.bson.codecs.IntegerCodec; +import org.bson.codecs.pojo.annotations.BsonProperty; +import org.junit.jupiter.api.Test; + +import java.lang.annotation.Annotation; +import java.util.Collections; +import java.util.List; + +import static org.bson.codecs.pojo.PojoBuilderHelper.createPropertyModelBuilder; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; + +public final class PropertyModelBuilderTest { + + private static final String FIELD_NAME = "myFieldName"; + private static final PropertyMetadata PROPERTY_METADATA = + new PropertyMetadata<>(FIELD_NAME, "MyClass", TypeData.builder(Integer.class).build()); + + @Test + public void testFieldMapping() throws NoSuchFieldException { + PropertyModelBuilder propertyModelBuilder = createPropertyModelBuilder(PROPERTY_METADATA); + assertEquals(FIELD_NAME, propertyModelBuilder.getName()); + assertEquals(FIELD_NAME, propertyModelBuilder.getWriteName()); + assertTrue(propertyModelBuilder.getReadAnnotations().isEmpty()); + assertNull(propertyModelBuilder.isDiscriminatorEnabled()); + } + + @Test + public void testFieldOverrides() throws NoSuchFieldException { + IntegerCodec codec = new IntegerCodec(); + PropertyModelBuilder propertyModelBuilder = createPropertyModelBuilder(PROPERTY_METADATA) + .codec(codec) + .writeName("altDocumentFieldName") + .readAnnotations(ANNOTATIONS) + .propertySerialization(CUSTOM_SERIALIZATION) + .typeData(TypeData.builder(Integer.class).build()) + .propertyAccessor(FIELD_ACCESSOR) + .discriminatorEnabled(false); + + assertEquals(FIELD_NAME, propertyModelBuilder.getName()); + assertEquals("altDocumentFieldName", propertyModelBuilder.getWriteName()); + assertEquals(codec, propertyModelBuilder.getCodec()); + assertEquals(Integer.class, propertyModelBuilder.getTypeData().getType()); + assertEquals(ANNOTATIONS, propertyModelBuilder.getReadAnnotations()); + assertEquals(CUSTOM_SERIALIZATION, propertyModelBuilder.getPropertySerialization()); + assertEquals(FIELD_ACCESSOR, propertyModelBuilder.getPropertyAccessor()); + assertFalse(propertyModelBuilder.isDiscriminatorEnabled()); + } + + @Test + public void testMustBeReadableOrWritable() { + assertThrows(IllegalStateException.class, () -> + createPropertyModelBuilder(PROPERTY_METADATA) + .readName(null) + .writeName(null) + .build()); + } + + private static final List ANNOTATIONS = Collections.singletonList( + new BsonProperty() { + @Override + public Class annotationType() { + return BsonProperty.class; + } + + @Override + public String value() { + return ""; + } + + @Override + public boolean useDiscriminator() { + return true; + } + }); + + private static final PropertySerialization CUSTOM_SERIALIZATION = value -> false; + + private static final PropertyAccessor FIELD_ACCESSOR = new PropertyAccessor() { + @Override + public Integer get(final S instance) { + return null; + } + @Override + public void set(final S instance, final Integer value) { + } + }; +} diff --git a/bson/src/test/unit/org/bson/codecs/pojo/PropertyModelTest.java b/bson/src/test/unit/org/bson/codecs/pojo/PropertyModelTest.java new file mode 100644 index 00000000000..bd7f2160a4d --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/pojo/PropertyModelTest.java @@ -0,0 +1,108 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo; + +import org.bson.codecs.IntegerCodec; +import org.bson.codecs.pojo.annotations.BsonProperty; +import org.junit.jupiter.api.Test; + +import java.lang.annotation.Annotation; +import java.util.Collections; +import java.util.List; + +import static org.bson.codecs.pojo.PojoBuilderHelper.createPropertyModelBuilder; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNull; + +public final class PropertyModelTest { + + private static final String FIELD_NAME = "myFieldName"; + private static final PropertyMetadata PROPERTY_METADATA = + new PropertyMetadata<>(FIELD_NAME, "MyClass", TypeData.builder(Integer.class).build()); + + @Test + public void testPropertyMapping() throws NoSuchFieldException { + PropertySerialization serializer = new PropertyModelSerializationImpl<>(); + PropertyAccessor accessor = new PropertyAccessorImpl<>(PROPERTY_METADATA); + PropertyModel propertyModel = createPropertyModelBuilder(PROPERTY_METADATA) + .propertySerialization(serializer) + .propertyAccessor(accessor) + .build(); + assertEquals(FIELD_NAME, propertyModel.getName()); + assertEquals(FIELD_NAME, propertyModel.getWriteName()); + assertEquals(serializer, propertyModel.getPropertySerialization()); + assertEquals(accessor, propertyModel.getPropertyAccessor()); + assertNull(propertyModel.getCodec()); + assertNull(propertyModel.getCachedCodec()); + assertNull(propertyModel.useDiscriminator()); + } + + @Test + public void testPropertyOverrides() throws NoSuchFieldException { + IntegerCodec codec = new IntegerCodec(); + PropertyModel propertyModel = createPropertyModelBuilder(PROPERTY_METADATA) + .codec(codec) + .writeName("altDocumentFieldName") + .readAnnotations(ANNOTATIONS) + .propertySerialization(CUSTOM_SERIALIZATION) + .typeData(TypeData.builder(Integer.class).build()) + .propertyAccessor(FIELD_ACCESSOR) + .discriminatorEnabled(false) + .build(); + + assertEquals(FIELD_NAME, propertyModel.getName()); + assertEquals("altDocumentFieldName", propertyModel.getWriteName()); + assertEquals(codec, propertyModel.getCodec()); + assertEquals(codec, propertyModel.getCachedCodec()); + assertEquals(Integer.class, propertyModel.getTypeData().getType()); + assertEquals(CUSTOM_SERIALIZATION, propertyModel.getPropertySerialization()); + assertEquals(FIELD_ACCESSOR, propertyModel.getPropertyAccessor()); + assertFalse(propertyModel.useDiscriminator()); + } + + private static final List ANNOTATIONS = Collections.singletonList( + new BsonProperty() { + @Override + public Class annotationType() { + return BsonProperty.class; + } + + @Override + public String value() { + return ""; + } + + @Override + public boolean useDiscriminator() { + return true; + } + }); + + private static final PropertySerialization CUSTOM_SERIALIZATION = value -> false; + + private static final PropertyAccessor FIELD_ACCESSOR = new PropertyAccessor() { + @Override + public Integer get(final S instance) { + return null; + } + @Override + public void set(final S instance, final Integer value) { + } + }; + +} diff --git a/bson/src/test/unit/org/bson/codecs/pojo/TypeDataTest.java b/bson/src/test/unit/org/bson/codecs/pojo/TypeDataTest.java new file mode 100644 index 00000000000..ee52e7e7bcf --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/pojo/TypeDataTest.java @@ -0,0 +1,80 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo; + +import org.bson.codecs.pojo.entities.GenericHolderModel; +import org.junit.jupiter.api.Test; + +import java.util.Arrays; +import java.util.List; +import java.util.Map; + +import static java.util.Collections.singletonList; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; + +@SuppressWarnings("rawtypes") +public final class TypeDataTest { + + @Test + public void testDefaults() { + TypeData typeData = TypeData.builder(String.class).build(); + + assertEquals(String.class, typeData.getType()); + assertTrue(typeData.getTypeParameters().isEmpty()); + } + + @Test + public void testListTypeParameters() { + TypeData subTypeData = TypeData.builder(String.class).build(); + TypeData typeData = TypeData.builder(List.class).addTypeParameter(subTypeData).build(); + + assertEquals(List.class, typeData.getType()); + assertEquals(singletonList(subTypeData), typeData.getTypeParameters()); + } + + @Test + public void testMapTypeParameters() { + TypeData keyTypeData = TypeData.builder(String.class).build(); + TypeData valueTypeData = TypeData.builder(Integer.class).build(); + TypeData typeData = TypeData.builder(Map.class).addTypeParameter(keyTypeData).addTypeParameter(valueTypeData).build(); + + assertEquals(Map.class, typeData.getType()); + assertEquals(Arrays.>asList(keyTypeData, valueTypeData), typeData.getTypeParameters()); + } + + @Test + public void testToString() { + TypeData stringType = TypeData.builder(String.class).build(); + TypeData mapTypeData = TypeData.builder(Map.class) + .addTypeParameter(stringType) + .addTypeParameter(TypeData.builder(Map.class).addTypeParameter(stringType).addTypeParameter(stringType).build()) + .build(); + + assertEquals("TypeData{type=String}", stringType.toString()); + assertEquals("TypeData{type=Map, typeParameters=[String, Map]}", mapTypeData.toString()); + } + + @Test + public void testRecursiveTypeData() { + TypeData typeData = TypeData.builder(GenericHolderModel.class) + .addTypeParameter(TypeData.builder(GenericHolderModel.class) + .addTypeParameter(TypeData.builder(GenericHolderModel.class).build()).build()).build(); + + typeData.toString(); + } +} diff --git a/bson/src/test/unit/org/bson/codecs/pojo/TypeParameterMapTest.java b/bson/src/test/unit/org/bson/codecs/pojo/TypeParameterMapTest.java new file mode 100644 index 00000000000..6b743da53a2 --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/pojo/TypeParameterMapTest.java @@ -0,0 +1,58 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo; + +import org.junit.jupiter.api.Test; + +import java.util.HashMap; +import java.util.Map; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; + +public final class TypeParameterMapTest { + + @Test + public void testDefault() { + TypeParameterMap typeParameterMap = TypeParameterMap.builder().build(); + assertTrue(typeParameterMap.getPropertyToClassParamIndexMap().isEmpty()); + } + + @Test + public void testClassParamMapsToField() { + TypeParameterMap typeParameterMap = TypeParameterMap.builder().addIndex(1).build(); + Map> expected = new HashMap<>(); + expected.put(-1, Either.left(1)); + assertEquals(expected, typeParameterMap.getPropertyToClassParamIndexMap()); + } + + @Test + public void testMapsClassAndFieldIndices() { + TypeParameterMap typeParameterMap = TypeParameterMap.builder().addIndex(1, 2).addIndex(2, 2).build(); + Map> expected = new HashMap<>(); + expected.put(1, Either.left(2)); + expected.put(2, Either.left(2)); + assertEquals(expected, typeParameterMap.getPropertyToClassParamIndexMap()); + } + + @Test + public void testFieldCannotBeGenericAndContainTypeParameters() { + assertThrows(IllegalStateException.class, () -> + TypeParameterMap.builder().addIndex(1).addIndex(2, 2).build()); + } +} diff --git a/bson/src/test/unit/org/bson/codecs/pojo/entities/AbstractCollectionSpecificReturnTypeCreatorModel.java b/bson/src/test/unit/org/bson/codecs/pojo/entities/AbstractCollectionSpecificReturnTypeCreatorModel.java new file mode 100644 index 00000000000..b9b8226f522 --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/pojo/entities/AbstractCollectionSpecificReturnTypeCreatorModel.java @@ -0,0 +1,23 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo.entities; + +import java.util.List; + +public abstract class AbstractCollectionSpecificReturnTypeCreatorModel { + public abstract List getProperties(); +} diff --git a/bson/src/test/unit/org/bson/codecs/pojo/entities/AbstractInterfaceModel.java b/bson/src/test/unit/org/bson/codecs/pojo/entities/AbstractInterfaceModel.java new file mode 100644 index 00000000000..309e11db127 --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/pojo/entities/AbstractInterfaceModel.java @@ -0,0 +1,62 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo.entities; + +import org.bson.codecs.pojo.annotations.BsonDiscriminator; + +@BsonDiscriminator +public abstract class AbstractInterfaceModel implements InterfaceBasedModel { + private String name; + + public AbstractInterfaceModel() { + } + + public AbstractInterfaceModel(final String name) { + this.name = name; + } + + public String getName() { + return name; + } + + public void setName(final String name) { + this.name = name; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + AbstractInterfaceModel that = (AbstractInterfaceModel) o; + + if (getName() != null ? !getName().equals(that.getName()) : that.getName() != null) { + return false; + } + + return true; + } + + @Override + public int hashCode() { + return getName() != null ? getName().hashCode() : 0; + } +} diff --git a/bson/src/test/unit/org/bson/codecs/pojo/entities/AsymmetricalCreatorModel.java b/bson/src/test/unit/org/bson/codecs/pojo/entities/AsymmetricalCreatorModel.java new file mode 100644 index 00000000000..8021ab3cf82 --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/pojo/entities/AsymmetricalCreatorModel.java @@ -0,0 +1,56 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo.entities; + +import org.bson.codecs.pojo.annotations.BsonCreator; +import org.bson.codecs.pojo.annotations.BsonProperty; + +public final class AsymmetricalCreatorModel { + private final String baz; + + @BsonCreator + public AsymmetricalCreatorModel(@BsonProperty("a") final String a, @BsonProperty("b") final String b) { + this.baz = a + b; + } + + public String getBaz() { + return baz; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + AsymmetricalCreatorModel that = (AsymmetricalCreatorModel) o; + + if (getBaz() != null ? !getBaz().equals(that.getBaz()) : that.getBaz() != null) { + return false; + } + + return true; + } + + @Override + public int hashCode() { + return getBaz() != null ? getBaz().hashCode() : 0; + } +} diff --git a/bson/src/test/unit/org/bson/codecs/pojo/entities/AsymmetricalIgnoreModel.java b/bson/src/test/unit/org/bson/codecs/pojo/entities/AsymmetricalIgnoreModel.java new file mode 100644 index 00000000000..5c1005d07a3 --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/pojo/entities/AsymmetricalIgnoreModel.java @@ -0,0 +1,114 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo.entities; + +import org.bson.codecs.pojo.annotations.BsonIgnore; + +public final class AsymmetricalIgnoreModel { + @BsonIgnore + private String propertyIgnored; + + private String getterIgnored; + + private String setterIgnored; + + private String getterAndSetterIgnored; + + public AsymmetricalIgnoreModel() { + } + + public AsymmetricalIgnoreModel(final String propertyIgnored, final String getterIgnored, final String setterIgnored, + final String getterAndSetterIgnored) { + this.propertyIgnored = propertyIgnored; + this.getterIgnored = getterIgnored; + this.setterIgnored = setterIgnored; + this.getterAndSetterIgnored = getterAndSetterIgnored; + } + + public String getPropertyIgnored() { + return propertyIgnored; + } + + public void setPropertyIgnored(final String propertyIgnored) { + this.propertyIgnored = propertyIgnored; + } + + @BsonIgnore + public String getGetterIgnored() { + return getterIgnored; + } + + public void setGetterIgnored(final String getterIgnored) { + this.getterIgnored = getterIgnored; + } + + public String getSetterIgnored() { + return setterIgnored; + } + + @BsonIgnore + public void setSetterIgnored(final String setterIgnored) { + this.setterIgnored = setterIgnored; + } + + @BsonIgnore + public String getGetterAndSetterIgnored() { + return getterAndSetterIgnored; + } + + @BsonIgnore + public void setGetterAndSetterIgnored(final String getterAndSetterIgnored) { + this.getterAndSetterIgnored = getterAndSetterIgnored; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + AsymmetricalIgnoreModel that = (AsymmetricalIgnoreModel) o; + + if (getPropertyIgnored() != null ? !getPropertyIgnored().equals(that.getPropertyIgnored()) : that.getPropertyIgnored() != null) { + return false; + } + if (getGetterIgnored() != null ? !getGetterIgnored().equals(that.getGetterIgnored()) : that.getGetterIgnored() != null) { + return false; + } + if (getSetterIgnored() != null ? !getSetterIgnored().equals(that.getSetterIgnored()) : that.getSetterIgnored() != null) { + return false; + } + if (getGetterAndSetterIgnored() != null ? !getGetterAndSetterIgnored().equals(that.getGetterAndSetterIgnored()) : that + .getGetterAndSetterIgnored() != null) { + return false; + } + + return true; + } + + @Override + public int hashCode() { + int result = getPropertyIgnored() != null ? getPropertyIgnored().hashCode() : 0; + result = 31 * result + (getGetterIgnored() != null ? getGetterIgnored().hashCode() : 0); + result = 31 * result + (getSetterIgnored() != null ? getSetterIgnored().hashCode() : 0); + result = 31 * result + (getGetterAndSetterIgnored() != null ? getGetterAndSetterIgnored().hashCode() : 0); + return result; + } +} diff --git a/bson/src/test/unit/org/bson/codecs/pojo/entities/AsymmetricalModel.java b/bson/src/test/unit/org/bson/codecs/pojo/entities/AsymmetricalModel.java new file mode 100644 index 00000000000..f7366973023 --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/pojo/entities/AsymmetricalModel.java @@ -0,0 +1,63 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo.entities; + +import org.bson.codecs.pojo.annotations.BsonProperty; + +public final class AsymmetricalModel { + private int baz; + + public AsymmetricalModel() { + } + + public AsymmetricalModel(final int baz) { + this.baz = baz; + } + + @BsonProperty("foo") + public int getBaz() { + return baz; + } + + @BsonProperty("bar") + public void setBaz(final int bar) { + this.baz = bar; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + AsymmetricalModel that = (AsymmetricalModel) o; + + if (getBaz() != that.getBaz()) { + return false; + } + + return true; + } + + @Override + public int hashCode() { + return getBaz(); + } +} diff --git a/bson/src/test/unit/org/bson/codecs/pojo/entities/BaseField.java b/bson/src/test/unit/org/bson/codecs/pojo/entities/BaseField.java new file mode 100644 index 00000000000..4393c5f2d7f --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/pojo/entities/BaseField.java @@ -0,0 +1,55 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo.entities; + +import java.util.Objects; + +public abstract class BaseField { + private String name; + + public BaseField(final String name) { + this.name = name; + } + + protected BaseField() { + } + + public String getName() { + return name; + } + + public void setName(final String name) { + this.name = name; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + BaseField baseField = (BaseField) o; + return Objects.equals(name, baseField.name); + } + + @Override + public int hashCode() { + return Objects.hashCode(name); + } +} diff --git a/bson/src/test/unit/org/bson/codecs/pojo/entities/BsonIdModel.java b/bson/src/test/unit/org/bson/codecs/pojo/entities/BsonIdModel.java new file mode 100644 index 00000000000..8bf98785c61 --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/pojo/entities/BsonIdModel.java @@ -0,0 +1,50 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo.entities; + +import org.bson.BsonObjectId; + +import java.util.Objects; + +public class BsonIdModel { + private BsonObjectId id; + + public BsonObjectId getId() { + return id; + } + + public void setId(final BsonObjectId id) { + this.id = id; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + BsonIdModel bsonId = (BsonIdModel) o; + return Objects.equals(id, bsonId.id); + } + + @Override + public int hashCode() { + return Objects.hash(id); + } +} diff --git a/bson/src/test/unit/org/bson/codecs/pojo/entities/BsonRepresentationUnsupportedInt.java b/bson/src/test/unit/org/bson/codecs/pojo/entities/BsonRepresentationUnsupportedInt.java new file mode 100644 index 00000000000..379c904f26f --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/pojo/entities/BsonRepresentationUnsupportedInt.java @@ -0,0 +1,69 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo.entities; + +import org.bson.BsonType; +import org.bson.codecs.pojo.annotations.BsonRepresentation; + +import java.util.Objects; + +public class BsonRepresentationUnsupportedInt { + private String id; + + @BsonRepresentation(BsonType.STRING) + private int age; + + public BsonRepresentationUnsupportedInt() {} + + public BsonRepresentationUnsupportedInt(final int age) { + this.id = "1"; + this.age = age; + } + + public String getId() { + return id; + } + + public void setId(final String id) { + this.id = id; + } + + public int getAge() { + return age; + } + + public void setAge(final int age) { + this.age = age; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + BsonRepresentationUnsupportedInt that = (BsonRepresentationUnsupportedInt) o; + return age == that.age && Objects.equals(id, that.id); + } + + @Override + public int hashCode() { + return Objects.hash(id, age); + } +} diff --git a/bson/src/test/unit/org/bson/codecs/pojo/entities/BsonRepresentationUnsupportedString.java b/bson/src/test/unit/org/bson/codecs/pojo/entities/BsonRepresentationUnsupportedString.java new file mode 100644 index 00000000000..f9ddbbf61e1 --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/pojo/entities/BsonRepresentationUnsupportedString.java @@ -0,0 +1,72 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo.entities; + +import org.bson.BsonType; +import org.bson.codecs.pojo.annotations.BsonId; +import org.bson.codecs.pojo.annotations.BsonRepresentation; + +import java.util.Objects; + +public class BsonRepresentationUnsupportedString { + @BsonId + private String id; + + @BsonRepresentation(BsonType.INT32) + private String s; + + public BsonRepresentationUnsupportedString() { + } + + public BsonRepresentationUnsupportedString(final String s) { + this.id = "1"; + this.s = s; + } + + public void setId(final String id) { + this.id = id; + } + + public void setS(final String s) { + this.s = s; + } + + public String getId() { + return id; + } + + public String getS() { + return s; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + BsonRepresentationUnsupportedString that = (BsonRepresentationUnsupportedString) o; + return Objects.equals(id, that.id) && Objects.equals(s, that.s); + } + + @Override + public int hashCode() { + return Objects.hash(id, s); + } +} diff --git a/bson/src/test/unit/org/bson/codecs/pojo/entities/CollectionNestedPojoModel.java b/bson/src/test/unit/org/bson/codecs/pojo/entities/CollectionNestedPojoModel.java new file mode 100644 index 00000000000..554469249d8 --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/pojo/entities/CollectionNestedPojoModel.java @@ -0,0 +1,279 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo.entities; + +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.SortedSet; + +import static java.util.Collections.singletonList; + +public final class CollectionNestedPojoModel { + + @SuppressWarnings("checkstyle:name") + private static List staticSimple = singletonList(new SimpleModel(1, "static")); + private List listSimple; + private List> listListSimple; + + private Set setSimple; + private Set> setSetSimple; + + private SortedSet sortedSetSimple; + + private Map mapSimple; + private Map> mapMapSimple; + + private Map> mapListSimple; + private Map>> mapListMapSimple; + private Map> mapSetSimple; + + private List> listMapSimple; + private List>> listMapListSimple; + private List>> listMapSetSimple; + + public CollectionNestedPojoModel() { + } + + public CollectionNestedPojoModel(final List listSimple, final List> listListSimple, final + Set setSimple, final Set> setSetSimple, final SortedSet sortedSetSimple, + final Map mapSimple, final Map> mapMapSimple, final Map> mapListSimple, final Map>> mapListMapSimple, final Map> mapSetSimple, final List> listMapSimple, final List>> listMapListSimple, final List>> listMapSetSimple) { + this.listSimple = listSimple; + this.listListSimple = listListSimple; + this.setSimple = setSimple; + this.setSetSimple = setSetSimple; + this.sortedSetSimple = sortedSetSimple; + this.mapSimple = mapSimple; + this.mapMapSimple = mapMapSimple; + this.mapListSimple = mapListSimple; + this.mapListMapSimple = mapListMapSimple; + this.mapSetSimple = mapSetSimple; + this.listMapSimple = listMapSimple; + this.listMapListSimple = listMapListSimple; + this.listMapSetSimple = listMapSetSimple; + } + + public static List getStaticSimple() { + return staticSimple; + } + + public static void setStaticSimple(final List staticSimple) { + CollectionNestedPojoModel.staticSimple = staticSimple; + } + + public List getListSimple() { + return listSimple; + } + + public void setListSimple(final List listSimple) { + this.listSimple = listSimple; + } + + public List> getListListSimple() { + return listListSimple; + } + + public void setListListSimple(final List> listListSimple) { + this.listListSimple = listListSimple; + } + + public Set getSetSimple() { + return setSimple; + } + + public void setSetSimple(final Set setSimple) { + this.setSimple = setSimple; + } + + public SortedSet getSortedSetSimple() { + return sortedSetSimple; + } + + public void setSortedSetSimple(final SortedSet sortedSetSimple) { + this.sortedSetSimple = sortedSetSimple; + } + + public Set> getSetSetSimple() { + return setSetSimple; + } + + public void setSetSetSimple(final Set> setSetSimple) { + this.setSetSimple = setSetSimple; + } + + public Map getMapSimple() { + return mapSimple; + } + + public void setMapSimple(final Map mapSimple) { + this.mapSimple = mapSimple; + } + + public Map> getMapMapSimple() { + return mapMapSimple; + } + + public void setMapMapSimple(final Map> mapMapSimple) { + this.mapMapSimple = mapMapSimple; + } + + public Map> getMapListSimple() { + return mapListSimple; + } + + public void setMapListSimple(final Map> mapListSimple) { + this.mapListSimple = mapListSimple; + } + + public Map>> getMapListMapSimple() { + return mapListMapSimple; + } + + public void setMapListMapSimple(final Map>> mapListMapSimple) { + this.mapListMapSimple = mapListMapSimple; + } + + public Map> getMapSetSimple() { + return mapSetSimple; + } + + public void setMapSetSimple(final Map> mapSetSimple) { + this.mapSetSimple = mapSetSimple; + } + + public List> getListMapSimple() { + return listMapSimple; + } + + public void setListMapSimple(final List> listMapSimple) { + this.listMapSimple = listMapSimple; + } + + public List>> getListMapListSimple() { + return listMapListSimple; + } + + public void setListMapListSimple(final List>> listMapListSimple) { + this.listMapListSimple = listMapListSimple; + } + + public List>> getListMapSetSimple() { + return listMapSetSimple; + } + + public void setListMapSetSimple(final List>> listMapSetSimple) { + this.listMapSetSimple = listMapSetSimple; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + CollectionNestedPojoModel that = (CollectionNestedPojoModel) o; + + if (getListSimple() != null ? !getListSimple().equals(that.getListSimple()) : that.getListSimple() != null) { + return false; + } + if (getListListSimple() != null ? !getListListSimple().equals(that.getListListSimple()) : that.getListListSimple() != null) { + return false; + } + if (getSetSimple() != null ? !getSetSimple().equals(that.getSetSimple()) : that.getSetSimple() != null) { + return false; + } + if (getSetSetSimple() != null ? !getSetSetSimple().equals(that.getSetSetSimple()) : that.getSetSetSimple() != null) { + return false; + } + if (getSortedSetSimple() != null ? !getSortedSetSimple().equals(that.getSortedSetSimple()) : that.getSortedSetSimple() != null) { + return false; + } + if (getMapSimple() != null ? !getMapSimple().equals(that.getMapSimple()) : that.getMapSimple() != null) { + return false; + } + if (getMapMapSimple() != null ? !getMapMapSimple().equals(that.getMapMapSimple()) : that.getMapMapSimple() != null) { + return false; + } + if (getMapListSimple() != null ? !getMapListSimple().equals(that.getMapListSimple()) : that.getMapListSimple() != null) { + return false; + } + if (getMapListMapSimple() != null ? !getMapListMapSimple().equals(that.getMapListMapSimple()) + : that.getMapListMapSimple() != null) { + return false; + } + if (getMapSetSimple() != null ? !getMapSetSimple().equals(that.getMapSetSimple()) : that.getMapSetSimple() != null) { + return false; + } + if (getListMapSimple() != null ? !getListMapSimple().equals(that.getListMapSimple()) : that.getListMapSimple() != null) { + return false; + } + if (getListMapListSimple() != null ? !getListMapListSimple().equals(that.getListMapListSimple()) + : that.getListMapListSimple() != null) { + return false; + } + if (getListMapSetSimple() != null ? !getListMapSetSimple().equals(that.getListMapSetSimple()) + : that.getListMapSetSimple() != null) { + return false; + } + + return true; + } + + @Override + public int hashCode() { + int result = getListSimple() != null ? getListSimple().hashCode() : 0; + result = 31 * result + (getListListSimple() != null ? getListListSimple().hashCode() : 0); + result = 31 * result + (getSetSimple() != null ? getSetSimple().hashCode() : 0); + result = 31 * result + (getSetSetSimple() != null ? getSetSetSimple().hashCode() : 0); + result = 31 * result + (getSortedSetSimple() != null ? getSortedSetSimple().hashCode() : 0); + result = 31 * result + (getMapSimple() != null ? getMapSimple().hashCode() : 0); + result = 31 * result + (getMapMapSimple() != null ? getMapMapSimple().hashCode() : 0); + result = 31 * result + (getMapListSimple() != null ? getMapListSimple().hashCode() : 0); + result = 31 * result + (getMapListMapSimple() != null ? getMapListMapSimple().hashCode() : 0); + result = 31 * result + (getMapSetSimple() != null ? getMapSetSimple().hashCode() : 0); + result = 31 * result + (getListMapSimple() != null ? getListMapSimple().hashCode() : 0); + result = 31 * result + (getListMapListSimple() != null ? getListMapListSimple().hashCode() : 0); + result = 31 * result + (getListMapSetSimple() != null ? getListMapSetSimple().hashCode() : 0); + return result; + } + + @Override + public String toString() { + return "CollectionNestedPojoModel{" + + "listSimple=" + listSimple + + ", listListSimple=" + listListSimple + + ", setSimple=" + setSimple + + ", setSetSimple=" + setSetSimple + + ", setSortedSimple=" + sortedSetSimple + + ", mapSimple=" + mapSimple + + ", mapMapSimple=" + mapMapSimple + + ", mapListSimple=" + mapListSimple + + ", mapListMapSimple=" + mapListMapSimple + + ", mapSetSimple=" + mapSetSimple + + ", listMapSimple=" + listMapSimple + + ", listMapListSimple=" + listMapListSimple + + ", listMapSetSimple=" + listMapSetSimple + + "}"; + } +} diff --git a/bson/src/test/unit/org/bson/codecs/pojo/entities/CollectionSpecificReturnTypeCreatorModel.java b/bson/src/test/unit/org/bson/codecs/pojo/entities/CollectionSpecificReturnTypeCreatorModel.java new file mode 100644 index 00000000000..b14027304e0 --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/pojo/entities/CollectionSpecificReturnTypeCreatorModel.java @@ -0,0 +1,55 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo.entities; + +import org.bson.codecs.pojo.annotations.BsonCreator; +import org.bson.codecs.pojo.annotations.BsonProperty; + +import java.util.List; +import java.util.Objects; + +public class CollectionSpecificReturnTypeCreatorModel extends AbstractCollectionSpecificReturnTypeCreatorModel { + private final ImmutableList properties; + + @BsonCreator + public CollectionSpecificReturnTypeCreatorModel(@BsonProperty("properties") final List properties) { + this.properties = ImmutableList.copyOf(properties); + } + + public ImmutableList getProperties() { + return properties; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + CollectionSpecificReturnTypeCreatorModel that = (CollectionSpecificReturnTypeCreatorModel) o; + + return Objects.equals(properties, that.properties); + } + + @Override + public int hashCode() { + return properties != null ? properties.hashCode() : 0; + } +} diff --git a/bson/src/test/unit/org/bson/codecs/pojo/entities/CollectionSpecificReturnTypeModel.java b/bson/src/test/unit/org/bson/codecs/pojo/entities/CollectionSpecificReturnTypeModel.java new file mode 100644 index 00000000000..877e35ac91d --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/pojo/entities/CollectionSpecificReturnTypeModel.java @@ -0,0 +1,58 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo.entities; + +import java.util.List; +import java.util.Objects; + +public class CollectionSpecificReturnTypeModel { + private ImmutableList properties; + + public CollectionSpecificReturnTypeModel() { + } + + public CollectionSpecificReturnTypeModel(final List properties) { + this.properties = ImmutableList.copyOf(properties); + } + + public ImmutableList getProperties() { + return properties; + } + + public void setProperties(final List properties) { + this.properties = ImmutableList.copyOf(properties); + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + CollectionSpecificReturnTypeModel that = (CollectionSpecificReturnTypeModel) o; + + return Objects.equals(properties, that.properties); + } + + @Override + public int hashCode() { + return properties != null ? properties.hashCode() : 0; + } +} diff --git a/bson/src/test/unit/org/bson/codecs/pojo/entities/ComposeInterfaceModel.java b/bson/src/test/unit/org/bson/codecs/pojo/entities/ComposeInterfaceModel.java new file mode 100644 index 00000000000..b2d7beb765d --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/pojo/entities/ComposeInterfaceModel.java @@ -0,0 +1,74 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo.entities; + +import java.util.Objects; + +public class ComposeInterfaceModel { + private String title; + private InterfaceModelB nestedModel; + + public ComposeInterfaceModel() { + } + + public ComposeInterfaceModel(final String title, final InterfaceModelB nestedModel) { + this.title = title; + this.nestedModel = nestedModel; + } + + public String getTitle() { + return title; + } + + public void setTitle(final String title) { + this.title = title; + } + + public InterfaceModelB getNestedModel() { + return nestedModel; + } + + public void setNestedModel(final InterfaceModelB nestedModel) { + this.nestedModel = nestedModel; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + ComposeInterfaceModel that = (ComposeInterfaceModel) o; + return Objects.equals(title, that.title) + && Objects.equals(nestedModel, that.nestedModel); + } + + @Override + public int hashCode() { + return Objects.hash(title, nestedModel); + } + + @Override + public String toString() { + return "ComposeInterfaceModel{" + + "title='" + title + '\'' + + ", nestedModel=" + nestedModel + + '}'; + } +} diff --git a/bson/src/test/unit/org/bson/codecs/pojo/entities/ConcreteAndNestedAbstractInterfaceModel.java b/bson/src/test/unit/org/bson/codecs/pojo/entities/ConcreteAndNestedAbstractInterfaceModel.java new file mode 100644 index 00000000000..c907a58dda2 --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/pojo/entities/ConcreteAndNestedAbstractInterfaceModel.java @@ -0,0 +1,86 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo.entities; + + +import org.bson.codecs.pojo.annotations.BsonProperty; + +import java.util.List; + +public final class ConcreteAndNestedAbstractInterfaceModel extends AbstractInterfaceModel { + @BsonProperty(useDiscriminator = true) + private InterfaceBasedModel child; + private List wildcardList; + + public ConcreteAndNestedAbstractInterfaceModel() { + } + + public ConcreteAndNestedAbstractInterfaceModel(final String name, final InterfaceBasedModel child) { + super(name); + this.child = child; + } + + public ConcreteAndNestedAbstractInterfaceModel(final String name, final List wildcardList) { + super(name); + this.child = null; + this.wildcardList = wildcardList; + } + + public InterfaceBasedModel getChild() { + return child; + } + + public void setChild(final InterfaceBasedModel child) { + this.child = child; + } + + public List getWildcardList() { + return wildcardList; + } + + public void setWildcardList(final List wildcardList) { + this.wildcardList = wildcardList; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + if (!super.equals(o)) { + return false; + } + + ConcreteAndNestedAbstractInterfaceModel that = (ConcreteAndNestedAbstractInterfaceModel) o; + + if (getChild() != null ? !getChild().equals(that.getChild()) : that.getChild() != null) { + return false; + } + + return true; + } + + @Override + public int hashCode() { + int result = super.hashCode(); + result = 31 * result + (getChild() != null ? getChild().hashCode() : 0); + return result; + } +} diff --git a/bson/src/test/unit/org/bson/codecs/pojo/entities/ConcreteCollectionsModel.java b/bson/src/test/unit/org/bson/codecs/pojo/entities/ConcreteCollectionsModel.java new file mode 100644 index 00000000000..e29e45e309d --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/pojo/entities/ConcreteCollectionsModel.java @@ -0,0 +1,123 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo.entities; + +import java.util.Collection; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; + +public final class ConcreteCollectionsModel { + private Collection collection; + private List list; + private LinkedList linked; + private Map map; + private ConcurrentHashMap concurrent; + + public ConcreteCollectionsModel() { + } + + public ConcreteCollectionsModel(final Collection collection, final List list, final LinkedList linked, + final Map map, final ConcurrentHashMap concurrent) { + this.collection = collection; + this.list = list; + this.linked = linked; + this.map = map; + this.concurrent = concurrent; + } + + public Collection getCollection() { + return collection; + } + + public void setCollection(final Collection collection) { + this.collection = collection; + } + + public List getList() { + return list; + } + + public void setList(final List list) { + this.list = list; + } + + public LinkedList getLinked() { + return linked; + } + + public void setLinked(final LinkedList linked) { + this.linked = linked; + } + + public Map getMap() { + return map; + } + + public void setMap(final Map map) { + this.map = map; + } + + public ConcurrentHashMap getConcurrent() { + return concurrent; + } + + public void setConcurrent(final ConcurrentHashMap concurrent) { + this.concurrent = concurrent; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + ConcreteCollectionsModel that = (ConcreteCollectionsModel) o; + + if (getCollection() != null ? !getCollection().equals(that.getCollection()) : that.getCollection() != null) { + return false; + } + if (getList() != null ? !getList().equals(that.getList()) : that.getList() != null) { + return false; + } + if (getLinked() != null ? !getLinked().equals(that.getLinked()) : that.getLinked() != null) { + return false; + } + if (getMap() != null ? !getMap().equals(that.getMap()) : that.getMap() != null) { + return false; + } + if (getConcurrent() != null ? !getConcurrent().equals(that.getConcurrent()) : that.getConcurrent() != null) { + return false; + } + + return true; + } + + @Override + public int hashCode() { + int result = getCollection() != null ? getCollection().hashCode() : 0; + result = 31 * result + (getList() != null ? getList().hashCode() : 0); + result = 31 * result + (getLinked() != null ? getLinked().hashCode() : 0); + result = 31 * result + (getMap() != null ? getMap().hashCode() : 0); + result = 31 * result + (getConcurrent() != null ? getConcurrent().hashCode() : 0); + return result; + } +} diff --git a/bson/src/test/unit/org/bson/codecs/pojo/entities/ConcreteField.java b/bson/src/test/unit/org/bson/codecs/pojo/entities/ConcreteField.java new file mode 100644 index 00000000000..6fb06a70de9 --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/pojo/entities/ConcreteField.java @@ -0,0 +1,27 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo.entities; + +public class ConcreteField extends BaseField { + + public ConcreteField() { + } + + public ConcreteField(final String name) { + super(name); + } +} diff --git a/bson/src/test/unit/org/bson/codecs/pojo/entities/ConcreteInterfaceGenericModel.java b/bson/src/test/unit/org/bson/codecs/pojo/entities/ConcreteInterfaceGenericModel.java new file mode 100644 index 00000000000..9f2799db6eb --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/pojo/entities/ConcreteInterfaceGenericModel.java @@ -0,0 +1,59 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo.entities; + +import java.util.Objects; + +public class ConcreteInterfaceGenericModel implements InterfaceGenericModel { + private String property; + + public ConcreteInterfaceGenericModel() { + } + + public ConcreteInterfaceGenericModel(final String property) { + this.property = property; + } + + @Override + public String getPropertyA() { + return property; + } + + @Override + public void setPropertyA(final String property) { + this.property = property; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + ConcreteInterfaceGenericModel that = (ConcreteInterfaceGenericModel) o; + + return Objects.equals(property, that.property); + } + + @Override + public int hashCode() { + return property != null ? property.hashCode() : 0; + } +} diff --git a/bson/src/test/unit/org/bson/codecs/pojo/entities/ConcreteModel.java b/bson/src/test/unit/org/bson/codecs/pojo/entities/ConcreteModel.java new file mode 100644 index 00000000000..cd406fa1392 --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/pojo/entities/ConcreteModel.java @@ -0,0 +1,27 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo.entities; + +public class ConcreteModel extends GenericBaseModel { + + public ConcreteModel() { + } + + public ConcreteModel(final ConcreteField field) { + super(field); + } +} diff --git a/bson/src/test/unit/org/bson/codecs/pojo/entities/ConcreteStandAloneAbstractInterfaceModel.java b/bson/src/test/unit/org/bson/codecs/pojo/entities/ConcreteStandAloneAbstractInterfaceModel.java new file mode 100644 index 00000000000..aaf71875f08 --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/pojo/entities/ConcreteStandAloneAbstractInterfaceModel.java @@ -0,0 +1,28 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo.entities; + +public final class ConcreteStandAloneAbstractInterfaceModel extends AbstractInterfaceModel { + + public ConcreteStandAloneAbstractInterfaceModel() { + } + + public ConcreteStandAloneAbstractInterfaceModel(final String name) { + super(name); + } + +} diff --git a/bson/src/test/unit/org/bson/codecs/pojo/entities/ConstructorNotPublicModel.java b/bson/src/test/unit/org/bson/codecs/pojo/entities/ConstructorNotPublicModel.java new file mode 100644 index 00000000000..1f9f93ec76e --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/pojo/entities/ConstructorNotPublicModel.java @@ -0,0 +1,64 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo.entities; + +public final class ConstructorNotPublicModel { + private final Integer integerField; + + ConstructorNotPublicModel(final Integer integerField) { + this.integerField = integerField; + } + + public static ConstructorNotPublicModel create(final Integer integerField) { + return new ConstructorNotPublicModel(integerField); + } + + public Integer getIntegerField() { + return integerField; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + ConstructorNotPublicModel that = (ConstructorNotPublicModel) o; + + if (getIntegerField() != null ? !getIntegerField().equals(that.getIntegerField()) : that.getIntegerField() != null) { + return false; + } + + return true; + } + + @Override + public int hashCode() { + int result = getIntegerField() != null ? getIntegerField().hashCode() : 0; + return result; + } + + @Override + public String toString() { + return "ConstructorNotPublicModel{" + + "integerField=" + integerField + + "}"; + } +} diff --git a/bson/src/test/unit/org/bson/codecs/pojo/entities/ContainsAlternativeMapAndCollectionModel.java b/bson/src/test/unit/org/bson/codecs/pojo/entities/ContainsAlternativeMapAndCollectionModel.java new file mode 100644 index 00000000000..82176f55df8 --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/pojo/entities/ContainsAlternativeMapAndCollectionModel.java @@ -0,0 +1,85 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo.entities; + +import org.bson.BsonArray; +import org.bson.BsonDocument; + +public final class ContainsAlternativeMapAndCollectionModel { + private BsonArray customList; + private BsonDocument customMap; + + public ContainsAlternativeMapAndCollectionModel() { + } + + public ContainsAlternativeMapAndCollectionModel(final BsonDocument source) { + this.customList = source.getArray("customList"); + this.customMap = source.getDocument("customMap"); + } + + public void setCustomList(final BsonArray customList) { + this.customList = customList; + } + + public void setCustomMap(final BsonDocument customMap) { + this.customMap = customMap; + } + + public BsonArray getCustomList() { + return customList; + } + + public BsonDocument getCustomMap() { + return customMap; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + ContainsAlternativeMapAndCollectionModel that = (ContainsAlternativeMapAndCollectionModel) o; + + if (getCustomList() != null ? !getCustomList().equals(that.getCustomList()) : that.getCustomList() != null) { + return false; + } + if (getCustomMap() != null ? !getCustomMap().equals(that.getCustomMap()) : that.getCustomMap() != null) { + return false; + } + + return true; + } + + @Override + public int hashCode() { + int result = getCustomList() != null ? getCustomList().hashCode() : 0; + result = 31 * result + (getCustomMap() != null ? getCustomMap().hashCode() : 0); + return result; + } + + @Override + public String toString() { + return "ContainsAlternativeMapAndCollectionModel{" + + "customList=" + customList + + ", customMap=" + customMap + + "}"; + } +} diff --git a/bson/src/test/unit/org/bson/codecs/pojo/entities/ConventionModel.java b/bson/src/test/unit/org/bson/codecs/pojo/entities/ConventionModel.java new file mode 100644 index 00000000000..20417f075c4 --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/pojo/entities/ConventionModel.java @@ -0,0 +1,133 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo.entities; + +import org.bson.codecs.pojo.annotations.BsonDiscriminator; +import org.bson.codecs.pojo.annotations.BsonId; +import org.bson.codecs.pojo.annotations.BsonProperty; + + +@BsonDiscriminator(value = "AnnotatedConventionModel", key = "_cls") +public final class ConventionModel { + private static final int myStaticField = 10; + private transient int myTransientField = 10; + private final int myFinalField = 10; + private int myIntField = 10; + + @BsonId() + private String customId; + + @BsonProperty(useDiscriminator = false) + private ConventionModel child; + + @BsonProperty(value = "model", useDiscriminator = false) + private SimpleModel simpleModel; + + public ConventionModel(){ + } + + public ConventionModel(final String customId, final ConventionModel child, final SimpleModel simpleModel) { + this.customId = customId; + this.child = child; + this.simpleModel = simpleModel; + } + + public int getMyIntField() { + return myIntField; + } + + public void setMyIntField(final int myIntField) { + this.myIntField = myIntField; + } + + public int getMyFinalField() { + return myFinalField; + } + + public int getMyTransientField() { + return myTransientField; + } + + public void setMyTransientField(final int myTransientField) { + this.myTransientField = myTransientField; + } + + public String getCustomId() { + return customId; + } + + public void setCustomId(final String customId) { + this.customId = customId; + } + + public ConventionModel getChild() { + return child; + } + + public void setChild(final ConventionModel child) { + this.child = child; + } + + public SimpleModel getSimpleModel() { + return simpleModel; + } + + public void setSimpleModel(final SimpleModel simpleModel) { + this.simpleModel = simpleModel; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + ConventionModel that = (ConventionModel) o; + + if (getCustomId() != null ? !getCustomId().equals(that.getCustomId()) : that.getCustomId() != null) { + return false; + } + if (getChild() != null ? !getChild().equals(that.getChild()) : that.getChild() != null) { + return false; + } + if (getSimpleModel() != null ? !getSimpleModel().equals(that.getSimpleModel()) : that.getSimpleModel() != null) { + return false; + } + + return true; + } + + @Override + public int hashCode() { + int result = getCustomId() != null ? getCustomId().hashCode() : 0; + result = 31 * result + (getChild() != null ? getChild().hashCode() : 0); + result = 31 * result + (getSimpleModel() != null ? getSimpleModel().hashCode() : 0); + return result; + } + + @Override + public String toString() { + return "ConventionModel{" + + "customId='" + customId + "'" + + ", child=" + child + + ", simpleModel=" + simpleModel + + "}"; + } +} diff --git a/bson/src/test/unit/org/bson/codecs/pojo/entities/ConverterModel.java b/bson/src/test/unit/org/bson/codecs/pojo/entities/ConverterModel.java new file mode 100644 index 00000000000..f27d28aa529 --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/pojo/entities/ConverterModel.java @@ -0,0 +1,74 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo.entities; + +public final class ConverterModel { + private String id; + private String name; + + public ConverterModel() { + } + + public ConverterModel(final String id, final String name) { + this.id = id; + this.name = name; + } + + public String getId() { + return id; + } + + public void setId(final String id) { + this.id = id; + } + + public String getName() { + return name; + } + + public void setName(final String name) { + this.name = name; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + ConverterModel that = (ConverterModel) o; + + if (getId() != null ? !getId().equals(that.getId()) : that.getId() != null) { + return false; + } + if (getName() != null ? !getName().equals(that.getName()) : that.getName() != null) { + return false; + } + + return true; + } + + @Override + public int hashCode() { + int result = getId() != null ? getId().hashCode() : 0; + result = 31 * result + (getName() != null ? getName().hashCode() : 0); + return result; + } +} diff --git a/bson/src/test/unit/org/bson/codecs/pojo/entities/CustomPropertyCodecOptionalModel.java b/bson/src/test/unit/org/bson/codecs/pojo/entities/CustomPropertyCodecOptionalModel.java new file mode 100644 index 00000000000..2e5359b8500 --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/pojo/entities/CustomPropertyCodecOptionalModel.java @@ -0,0 +1,60 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.bson.codecs.pojo.entities; + +import org.bson.codecs.pojo.annotations.BsonCreator; +import org.bson.codecs.pojo.annotations.BsonProperty; + +import java.util.Objects; + +public class CustomPropertyCodecOptionalModel { + private final Optional optionalField; + + @BsonCreator + public CustomPropertyCodecOptionalModel(@BsonProperty("optionalField") final Optional optionalField) { + this.optionalField = optionalField == null ? Optional.empty() : optionalField; + } + + public Optional getOptionalField() { + return optionalField; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + CustomPropertyCodecOptionalModel that = (CustomPropertyCodecOptionalModel) o; + + return Objects.equals(optionalField, that.optionalField); + } + + @Override + public int hashCode() { + return optionalField != null ? optionalField.hashCode() : 0; + } + + @Override + public String toString() { + return "CustomPropertyCodecOptionalModel{" + + "optionalField=" + optionalField + + '}'; + } +} diff --git a/bson/src/test/unit/org/bson/codecs/pojo/entities/DiscriminatorModel.java b/bson/src/test/unit/org/bson/codecs/pojo/entities/DiscriminatorModel.java new file mode 100644 index 00000000000..1ef419540bd --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/pojo/entities/DiscriminatorModel.java @@ -0,0 +1,26 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo.entities; + +import org.bson.codecs.pojo.annotations.BsonDiscriminator; + +@BsonDiscriminator(key = "discriminatorKey", value = "discriminatorValue") +public class DiscriminatorModel { + + public DiscriminatorModel() { + } +} diff --git a/bson/src/test/unit/org/bson/codecs/pojo/entities/DiscriminatorWithGetterModel.java b/bson/src/test/unit/org/bson/codecs/pojo/entities/DiscriminatorWithGetterModel.java new file mode 100644 index 00000000000..53b57dabff7 --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/pojo/entities/DiscriminatorWithGetterModel.java @@ -0,0 +1,51 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo.entities; + +import org.bson.codecs.pojo.annotations.BsonDiscriminator; + +import java.util.Objects; + +@BsonDiscriminator(key = "discriminatorKey", value = "discriminatorValue") +public class DiscriminatorWithGetterModel { + + public DiscriminatorWithGetterModel() { + } + + public String getDiscriminatorKey() { + return "discriminatorValue"; + } + + @Override + public boolean equals(final Object o) { + if (o == null || getClass() != o.getClass()) { + return false; + } + final DiscriminatorWithGetterModel that = (DiscriminatorWithGetterModel) o; + return Objects.equals(getDiscriminatorKey(), that.getDiscriminatorKey()); + } + + @Override + public int hashCode() { + return Objects.hashCode(getDiscriminatorKey()); + } + + @Override + public String toString() { + return "DiscriminatorWithGetterModel{}"; + } +} diff --git a/bson/src/test/unit/org/bson/codecs/pojo/entities/DiscriminatorWithProperty.java b/bson/src/test/unit/org/bson/codecs/pojo/entities/DiscriminatorWithProperty.java new file mode 100644 index 00000000000..a7a5b4060f5 --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/pojo/entities/DiscriminatorWithProperty.java @@ -0,0 +1,53 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo.entities; + +import org.bson.codecs.pojo.annotations.BsonDiscriminator; +import org.bson.codecs.pojo.annotations.BsonProperty; + +import java.util.Objects; + +@BsonDiscriminator(key = "discriminatorKey", value = "discriminatorValue") +public class DiscriminatorWithProperty { + + public DiscriminatorWithProperty() { + } + + @BsonProperty("discriminatorKey") + public String getDiscriminator() { + return "discriminatorValue"; + } + + @Override + public boolean equals(final Object o) { + if (o == null || getClass() != o.getClass()){ + return false; + } + final DiscriminatorWithProperty that = (DiscriminatorWithProperty) o; + return Objects.equals(getDiscriminator(), that.getDiscriminator()); + } + + @Override + public int hashCode() { + return Objects.hashCode(getDiscriminator()); + } + + @Override + public String toString() { + return "DiscriminatorWithProperty{}"; + } +} diff --git a/bson/src/test/unit/org/bson/codecs/pojo/entities/DiscriminatorWithPropertyAndIgnore.java b/bson/src/test/unit/org/bson/codecs/pojo/entities/DiscriminatorWithPropertyAndIgnore.java new file mode 100644 index 00000000000..ea92fbea928 --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/pojo/entities/DiscriminatorWithPropertyAndIgnore.java @@ -0,0 +1,59 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo.entities; + +import org.bson.codecs.pojo.annotations.BsonDiscriminator; +import org.bson.codecs.pojo.annotations.BsonIgnore; +import org.bson.codecs.pojo.annotations.BsonProperty; + +import java.util.Objects; + +@BsonDiscriminator(key = "discriminatorKey", value = "discriminatorValue") +public class DiscriminatorWithPropertyAndIgnore { + + public DiscriminatorWithPropertyAndIgnore() { + } + + @BsonIgnore + public String getFoo() { + return "foo"; + } + + @BsonProperty("discriminatorKey") + public String getDiscriminator() { + return "discriminatorValue"; + } + + @Override + public boolean equals(final Object o) { + if (o == null || getClass() != o.getClass()){ + return false; + } + final DiscriminatorWithPropertyAndIgnore that = (DiscriminatorWithPropertyAndIgnore) o; + return Objects.equals(getDiscriminator(), that.getDiscriminator()); + } + + @Override + public int hashCode() { + return Objects.hashCode(getDiscriminator()); + } + + @Override + public String toString() { + return "DiscriminatorWithProperty{}"; + } +} diff --git a/bson/src/test/unit/org/bson/codecs/pojo/entities/DuplicateAnnotationAllowedModel.java b/bson/src/test/unit/org/bson/codecs/pojo/entities/DuplicateAnnotationAllowedModel.java new file mode 100644 index 00000000000..211b1e12b50 --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/pojo/entities/DuplicateAnnotationAllowedModel.java @@ -0,0 +1,90 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo.entities; + +import org.bson.codecs.pojo.annotations.BsonIgnore; +import org.bson.codecs.pojo.annotations.BsonProperty; + +import javax.annotation.Nullable; +import java.util.Objects; + +public class DuplicateAnnotationAllowedModel { + + @Nullable + private String id; + + @BsonIgnore + private String ignoredString; + + @BsonProperty("property") + private String propertyString; + + public DuplicateAnnotationAllowedModel() { + } + + public DuplicateAnnotationAllowedModel(final String id) { + this.id = id; + } + + @Nullable + public String getId() { + return id; + } + + public void setId(@Nullable final String id) { + this.id = id; + } + + @BsonIgnore + public String getIgnoredString() { + return ignoredString; + } + + @BsonIgnore + public void setIgnoredString(final String ignoredString) { + this.ignoredString = ignoredString; + } + + @BsonProperty("property") + public String getPropertyString() { + return propertyString; + } + + @BsonProperty("property") + public void setPropertyString(final String propertyString) { + this.propertyString = propertyString; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + DuplicateAnnotationAllowedModel that = (DuplicateAnnotationAllowedModel) o; + + return (Objects.equals(id, that.id)); + } + + @Override + public int hashCode() { + return id != null ? id.hashCode() : 0; + } +} diff --git a/bson/src/test/unit/org/bson/codecs/pojo/entities/FieldAndPropertyTypeMismatchModel.java b/bson/src/test/unit/org/bson/codecs/pojo/entities/FieldAndPropertyTypeMismatchModel.java new file mode 100644 index 00000000000..c6cb93ce6a7 --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/pojo/entities/FieldAndPropertyTypeMismatchModel.java @@ -0,0 +1,64 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo.entities; + +import java.util.Arrays; + +public class FieldAndPropertyTypeMismatchModel { + private byte[] stringField; + + public FieldAndPropertyTypeMismatchModel() { + } + + public FieldAndPropertyTypeMismatchModel(final String stringField) { + this.stringField = stringField.getBytes(); + } + + public String getStringField() { + return new String(stringField); + } + + public void setStringField(final String stringField) { + this.stringField = stringField.getBytes(); + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + FieldAndPropertyTypeMismatchModel that = (FieldAndPropertyTypeMismatchModel) o; + + return Arrays.equals(stringField, that.stringField); + } + + @Override + public int hashCode() { + return Arrays.hashCode(stringField); + } + + @Override + public String toString() { + return "FieldAndPropertyTypeMismatchModel{" + + "stringField=" + new String(stringField) + + '}'; + } +} diff --git a/bson/src/test/unit/org/bson/codecs/pojo/entities/GenericBaseModel.java b/bson/src/test/unit/org/bson/codecs/pojo/entities/GenericBaseModel.java new file mode 100644 index 00000000000..5164f9703e5 --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/pojo/entities/GenericBaseModel.java @@ -0,0 +1,59 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo.entities; + +import org.bson.codecs.pojo.annotations.BsonDiscriminator; + +import java.util.Objects; + +@BsonDiscriminator() +public class GenericBaseModel { + + private T field; + + public GenericBaseModel(final T field) { + this.field = field; + } + + public GenericBaseModel() { + } + + public T getField() { + return field; + } + + public void setField(final T field) { + this.field = field; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + GenericBaseModel that = (GenericBaseModel) o; + return Objects.equals(field, that.field); + } + + @Override + public int hashCode() { + return Objects.hashCode(field); + } +} diff --git a/bson/src/test/unit/org/bson/codecs/pojo/entities/GenericHolderModel.java b/bson/src/test/unit/org/bson/codecs/pojo/entities/GenericHolderModel.java new file mode 100644 index 00000000000..1a40376cd95 --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/pojo/entities/GenericHolderModel.java @@ -0,0 +1,75 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo.entities; + +public class GenericHolderModel

{ + + private P myGenericField; + private Long myLongField; + + public GenericHolderModel() { + } + + public GenericHolderModel(final P myGenericField, final Long myLongField) { + this.myGenericField = myGenericField; + this.myLongField = myLongField; + } + + public P getMyGenericField() { + return myGenericField; + } + + public void setMyGenericField(final P myGenericField) { + this.myGenericField = myGenericField; + } + + public Long getMyLongField() { + return myLongField; + } + + public void setMyLongField(final Long myLongField) { + this.myLongField = myLongField; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (!(o instanceof GenericHolderModel)) { + return false; + } + + GenericHolderModel that = (GenericHolderModel) o; + + if (getMyGenericField() != null ? !getMyGenericField().equals(that.getMyGenericField()) : that.getMyGenericField() != null) { + return false; + } + if (getMyLongField() != null ? !getMyLongField().equals(that.getMyLongField()) : that.getMyLongField() != null) { + return false; + } + + return true; + } + + @Override + public int hashCode() { + int result = getMyGenericField() != null ? getMyGenericField().hashCode() : 0; + result = 31 * result + (getMyLongField() != null ? getMyLongField().hashCode() : 0); + return result; + } +} diff --git a/bson/src/test/unit/org/bson/codecs/pojo/entities/GenericTreeModel.java b/bson/src/test/unit/org/bson/codecs/pojo/entities/GenericTreeModel.java new file mode 100644 index 00000000000..92ee9d9f9c6 --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/pojo/entities/GenericTreeModel.java @@ -0,0 +1,103 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo.entities; + +public final class GenericTreeModel { + + private A field1; + private B field2; + private GenericTreeModel left; + private GenericTreeModel right; + + public GenericTreeModel() { + } + + public GenericTreeModel(final A field1, final B field2, final GenericTreeModel left, final GenericTreeModel right) { + this.field1 = field1; + this.field2 = field2; + this.left = left; + this.right = right; + } + + public A getField1() { + return field1; + } + + public void setField1(final A field1) { + this.field1 = field1; + } + + public B getField2() { + return field2; + } + + public void setField2(final B field2) { + this.field2 = field2; + } + + public GenericTreeModel getLeft() { + return left; + } + + public void setLeft(final GenericTreeModel left) { + this.left = left; + } + + public GenericTreeModel getRight() { + return right; + } + + public void setRight(final GenericTreeModel right) { + this.right = right; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + GenericTreeModel that = (GenericTreeModel) o; + + if (getField1() != null ? !getField1().equals(that.getField1()) : that.getField1() != null) { + return false; + } + if (getField2() != null ? !getField2().equals(that.getField2()) : that.getField2() != null) { + return false; + } + if (getLeft() != null ? !getLeft().equals(that.getLeft()) : that.getLeft() != null) { + return false; + } + if (getRight() != null ? !getRight().equals(that.getRight()) : that.getRight() != null) { + return false; + } + + return true; + } + + @Override + public int hashCode() { + int result = getField1() != null ? getField1().hashCode() : 0; + result = 31 * result + (getField2() != null ? getField2().hashCode() : 0); + result = 31 * result + (getLeft() != null ? getLeft().hashCode() : 0); + result = 31 * result + (getRight() != null ? getRight().hashCode() : 0); + return result; + } +} diff --git a/bson/src/test/unit/org/bson/codecs/pojo/entities/ImmutableList.java b/bson/src/test/unit/org/bson/codecs/pojo/entities/ImmutableList.java new file mode 100644 index 00000000000..044e9f8e20e --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/pojo/entities/ImmutableList.java @@ -0,0 +1,166 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo.entities; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.Iterator; +import java.util.List; +import java.util.ListIterator; + + +public final class ImmutableList implements List { + + final List list; + + public static ImmutableList copyOf(final List list) { + if (list instanceof ImmutableList) { + return (ImmutableList) list; + } else { + return new ImmutableList<>(new ArrayList<>(list)); + } + } + + private ImmutableList(final List list) { + this.list = list; + } + + @Override + public int size() { + return list.size(); + } + + @Override + public boolean isEmpty() { + return list.isEmpty(); + } + + @Override + public boolean contains(final Object o) { + return list.contains(o); + } + + @Override + public Iterator iterator() { + return list.iterator(); + } + + @Override + public Object[] toArray() { + return list.toArray(); + } + + @Override + public T1[] toArray(final T1[] a) { + return list.toArray(a); + } + + @Override + public boolean add(final T t) { + throw new UnsupportedOperationException(); + } + + @Override + public boolean remove(final Object o) { + throw new UnsupportedOperationException(); + } + + @Override + public boolean containsAll(final Collection c) { + return list.containsAll(c); + } + + @Override + public boolean addAll(final Collection c) { + throw new UnsupportedOperationException(); + } + + @Override + public boolean addAll(final int index, final Collection c) { + throw new UnsupportedOperationException(); + } + + @Override + public boolean removeAll(final Collection c) { + throw new UnsupportedOperationException(); + } + + @Override + public boolean retainAll(final Collection c) { + throw new UnsupportedOperationException(); + } + + @Override + public void clear() { + + } + + @Override + public boolean equals(final Object o) { + return list.equals(o); + } + + @Override + public int hashCode() { + return list.hashCode(); + } + + @Override + public T get(final int index) { + return list.get(index); + } + + @Override + public T set(final int index, final T element) { + throw new UnsupportedOperationException(); + } + + @Override + public void add(final int index, final T element) { + throw new UnsupportedOperationException(); + } + + @Override + public T remove(final int index) { + throw new UnsupportedOperationException(); + } + + @Override + public int indexOf(final Object o) { + return list.indexOf(o); + } + + @Override + public int lastIndexOf(final Object o) { + return list.lastIndexOf(o); + } + + @Override + public ListIterator listIterator() { + return list.listIterator(); + } + + @Override + public ListIterator listIterator(final int index) { + return list.listIterator(index); + } + + @Override + public List subList(final int fromIndex, final int toIndex) { + return new ImmutableList<>(list.subList(fromIndex, toIndex)); + } +} diff --git a/bson/src/test/unit/org/bson/codecs/pojo/entities/InterfaceBasedModel.java b/bson/src/test/unit/org/bson/codecs/pojo/entities/InterfaceBasedModel.java new file mode 100644 index 00000000000..b641879b7df --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/pojo/entities/InterfaceBasedModel.java @@ -0,0 +1,20 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo.entities; + +public interface InterfaceBasedModel { +} diff --git a/bson/src/test/unit/org/bson/codecs/pojo/entities/InterfaceGenericModel.java b/bson/src/test/unit/org/bson/codecs/pojo/entities/InterfaceGenericModel.java new file mode 100644 index 00000000000..dab1683daae --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/pojo/entities/InterfaceGenericModel.java @@ -0,0 +1,25 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo.entities; + + +public interface InterfaceGenericModel { + + T getPropertyA(); + + void setPropertyA(T property); +} diff --git a/bson/src/test/unit/org/bson/codecs/pojo/entities/InterfaceModelA.java b/bson/src/test/unit/org/bson/codecs/pojo/entities/InterfaceModelA.java new file mode 100644 index 00000000000..a4771f4b3f5 --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/pojo/entities/InterfaceModelA.java @@ -0,0 +1,25 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo.entities; + + +public interface InterfaceModelA { + + String getPropertyA(); + + void setPropertyA(String property); +} diff --git a/bson/src/test/unit/org/bson/codecs/pojo/entities/InterfaceModelAbstract.java b/bson/src/test/unit/org/bson/codecs/pojo/entities/InterfaceModelAbstract.java new file mode 100644 index 00000000000..34a1a7c4350 --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/pojo/entities/InterfaceModelAbstract.java @@ -0,0 +1,62 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo.entities; + +public abstract class InterfaceModelAbstract implements InterfaceModelA { + + private String propertyA; + + public InterfaceModelAbstract() { + } + + public InterfaceModelAbstract(final String propertyA) { + this.propertyA = propertyA; + } + + @Override + public String getPropertyA() { + return propertyA; + } + + @Override + public void setPropertyA(final String property) { + this.propertyA = property; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + InterfaceModelAbstract that = (InterfaceModelAbstract) o; + + if (getPropertyA() != null ? !getPropertyA().equals(that.getPropertyA()) : that.getPropertyA() != null) { + return false; + } + + return true; + } + + @Override + public int hashCode() { + return getPropertyA() != null ? getPropertyA().hashCode() : 0; + } +} diff --git a/bson/src/test/unit/org/bson/codecs/pojo/entities/InterfaceModelB.java b/bson/src/test/unit/org/bson/codecs/pojo/entities/InterfaceModelB.java new file mode 100644 index 00000000000..687ac46af7e --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/pojo/entities/InterfaceModelB.java @@ -0,0 +1,25 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo.entities; + + +public interface InterfaceModelB extends InterfaceModelA { + + String getPropertyB(); + + void setPropertyB(String propertyB); +} diff --git a/bson/src/test/unit/org/bson/codecs/pojo/entities/InterfaceModelC.java b/bson/src/test/unit/org/bson/codecs/pojo/entities/InterfaceModelC.java new file mode 100644 index 00000000000..e1bf81788dc --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/pojo/entities/InterfaceModelC.java @@ -0,0 +1,27 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo.entities; + + +public interface InterfaceModelC extends InterfaceModelA { + + default String getPropertyC() { + return "c"; + } + + void setPropertyC(String propertyC); +} diff --git a/bson/src/test/unit/org/bson/codecs/pojo/entities/InterfaceModelImpl.java b/bson/src/test/unit/org/bson/codecs/pojo/entities/InterfaceModelImpl.java new file mode 100644 index 00000000000..9db110c6115 --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/pojo/entities/InterfaceModelImpl.java @@ -0,0 +1,77 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo.entities; + +public class InterfaceModelImpl extends InterfaceModelAbstract implements InterfaceModelB { + + private String propertyB; + + public InterfaceModelImpl() { + } + + public InterfaceModelImpl(final String propertyA, final String propertyB) { + super(propertyA); + this.propertyB = propertyB; + } + + @Override + public String getPropertyB() { + return propertyB; + } + + @Override + public void setPropertyB(final String propertyB) { + this.propertyB = propertyB; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + InterfaceModelImpl that = (InterfaceModelImpl) o; + + if (getPropertyA() != null ? !getPropertyA().equals(that.getPropertyA()) : that.getPropertyA() != null) { + return false; + } + + if (getPropertyB() != null ? !getPropertyB().equals(that.getPropertyB()) : that.getPropertyB() != null) { + return false; + } + + return true; + } + + @Override + public int hashCode() { + int result = getPropertyA() != null ? getPropertyA().hashCode() : 0; + result = 31 * result + (getPropertyB() != null ? getPropertyB().hashCode() : 0); + return result; + } + + @Override + public String toString() { + return "InterfaceModelImpl{" + + "propertyA='" + getPropertyA() + "', " + + "propertyB='" + getPropertyB() + '\'' + + '}'; + } +} diff --git a/bson/src/test/unit/org/bson/codecs/pojo/entities/InterfaceUpperBoundsModel.java b/bson/src/test/unit/org/bson/codecs/pojo/entities/InterfaceUpperBoundsModel.java new file mode 100644 index 00000000000..e74cf54c5df --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/pojo/entities/InterfaceUpperBoundsModel.java @@ -0,0 +1,21 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo.entities; + +public interface InterfaceUpperBoundsModel { + T getNestedModel(); +} diff --git a/bson/src/test/unit/org/bson/codecs/pojo/entities/InterfaceUpperBoundsModelAbstract.java b/bson/src/test/unit/org/bson/codecs/pojo/entities/InterfaceUpperBoundsModelAbstract.java new file mode 100644 index 00000000000..451615aebe7 --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/pojo/entities/InterfaceUpperBoundsModelAbstract.java @@ -0,0 +1,21 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo.entities; + +abstract class InterfaceUpperBoundsModelAbstract implements InterfaceUpperBoundsModel { + public abstract String getName(); +} diff --git a/bson/src/test/unit/org/bson/codecs/pojo/entities/InterfaceUpperBoundsModelAbstractImpl.java b/bson/src/test/unit/org/bson/codecs/pojo/entities/InterfaceUpperBoundsModelAbstractImpl.java new file mode 100644 index 00000000000..9766c70e70c --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/pojo/entities/InterfaceUpperBoundsModelAbstractImpl.java @@ -0,0 +1,74 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo.entities; + +import java.util.Objects; + +public class InterfaceUpperBoundsModelAbstractImpl extends InterfaceUpperBoundsModelAbstract { + private String name; + private InterfaceModelImpl nestedModel; + + public InterfaceUpperBoundsModelAbstractImpl() { + } + + public InterfaceUpperBoundsModelAbstractImpl(final String name, final InterfaceModelImpl nestedModel) { + this.name = name; + this.nestedModel = nestedModel; + } + + @Override + public String getName() { + return name; + } + + @Override + public InterfaceModelImpl getNestedModel() { + return nestedModel; + } + + public void setName(final String name) { + this.name = name; + } + + public void setNestedModel(final InterfaceModelImpl nestedModel) { + this.nestedModel = nestedModel; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + InterfaceUpperBoundsModelAbstractImpl that = (InterfaceUpperBoundsModelAbstractImpl) o; + + if (!Objects.equals(name, that.name)) { + return false; + } + return Objects.equals(nestedModel, that.nestedModel); + } + + @Override + public int hashCode() { + int result = name != null ? name.hashCode() : 0; + result = 31 * result + (nestedModel != null ? nestedModel.hashCode() : 0); + return result; + } +} diff --git a/bson/src/test/unit/org/bson/codecs/pojo/entities/InterfaceWithDefaultMethodModelImpl.java b/bson/src/test/unit/org/bson/codecs/pojo/entities/InterfaceWithDefaultMethodModelImpl.java new file mode 100644 index 00000000000..1f9a104be5c --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/pojo/entities/InterfaceWithDefaultMethodModelImpl.java @@ -0,0 +1,64 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo.entities; + +public class InterfaceWithDefaultMethodModelImpl extends InterfaceModelAbstract implements InterfaceModelC { + + private String propertyC; + + public InterfaceWithDefaultMethodModelImpl() { + } + + public InterfaceWithDefaultMethodModelImpl(final String propertyA, final String propertyC) { + super(propertyA); + this.propertyC = propertyC; + } + + @Override + public void setPropertyC(final String propertyC) { + this.propertyC = propertyC; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + InterfaceWithDefaultMethodModelImpl that = (InterfaceWithDefaultMethodModelImpl) o; + + if (getPropertyA() != null ? !getPropertyA().equals(that.getPropertyA()) : that.getPropertyA() != null) { + return false; + } + + if (getPropertyC() != null ? !getPropertyC().equals(that.getPropertyC()) : that.getPropertyC() != null) { + return false; + } + + return true; + } + + @Override + public int hashCode() { + int result = getPropertyA() != null ? getPropertyA().hashCode() : 0; + result = 31 * result + getPropertyC() != null ? getPropertyC().hashCode() : 0; + return result; + } +} diff --git a/bson/src/test/unit/org/bson/codecs/pojo/entities/InterfaceWithOverrideDefaultMethodModelImpl.java b/bson/src/test/unit/org/bson/codecs/pojo/entities/InterfaceWithOverrideDefaultMethodModelImpl.java new file mode 100644 index 00000000000..6e6c9b19761 --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/pojo/entities/InterfaceWithOverrideDefaultMethodModelImpl.java @@ -0,0 +1,69 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo.entities; + +public class InterfaceWithOverrideDefaultMethodModelImpl extends InterfaceModelAbstract implements InterfaceModelC { + + private String propertyC; + + public InterfaceWithOverrideDefaultMethodModelImpl() { + } + + public InterfaceWithOverrideDefaultMethodModelImpl(final String propertyA, final String propertyC) { + super(propertyA); + this.propertyC = propertyC; + } + + @Override + public String getPropertyC() { + return propertyC; + } + + @Override + public void setPropertyC(final String propertyC) { + this.propertyC = propertyC; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + InterfaceWithOverrideDefaultMethodModelImpl that = (InterfaceWithOverrideDefaultMethodModelImpl) o; + + if (getPropertyA() != null ? !getPropertyA().equals(that.getPropertyA()) : that.getPropertyA() != null) { + return false; + } + + if (getPropertyC() != null ? !getPropertyC().equals(that.getPropertyC()) : that.getPropertyC() != null) { + return false; + } + + return true; + } + + @Override + public int hashCode() { + int result = getPropertyA() != null ? getPropertyA().hashCode() : 0; + result = 31 * result + getPropertyC() != null ? getPropertyC().hashCode() : 0; + return result; + } +} diff --git a/bson/src/test/unit/org/bson/codecs/pojo/entities/InvalidCollection.java b/bson/src/test/unit/org/bson/codecs/pojo/entities/InvalidCollection.java new file mode 100644 index 00000000000..f6951e7a5e1 --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/pojo/entities/InvalidCollection.java @@ -0,0 +1,113 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo.entities; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.Iterator; +import java.util.List; + +@SuppressWarnings("rawtypes") +public class InvalidCollection implements Collection { + private final List wrapped; + + public InvalidCollection(final List wrapped) { + this.wrapped = new ArrayList<>(wrapped); + } + + @Override + public int size() { + return wrapped.size(); + } + + @Override + public boolean isEmpty() { + return wrapped.isEmpty(); + } + + @Override + public boolean contains(final Object o) { + return wrapped.contains(o); + } + + @Override + public Iterator iterator() { + return wrapped.iterator(); + } + + @Override + public Object[] toArray() { + return wrapped.toArray(); + } + + @Override + public boolean add(final Object o) { + return false; + } + + @Override + public boolean remove(final Object o) { + return false; + } + + @Override + public boolean addAll(final Collection c) { + return false; + } + + @Override + public void clear() { + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + + if (o == null || getClass() != o.getClass()) { + return false; + } + InvalidCollection that = (InvalidCollection) o; + return wrapped.equals(that.wrapped); + } + + @Override + public int hashCode() { + return wrapped.hashCode(); + } + + @Override + public boolean retainAll(final Collection c) { + return false; + } + + @Override + public boolean removeAll(final Collection c) { + return false; + } + + @Override + public boolean containsAll(final Collection c) { + return wrapped.containsAll(c); + } + + @Override + public Object[] toArray(final Object[] a) { + return wrapped.toArray(a); + } +} diff --git a/bson/src/test/unit/org/bson/codecs/pojo/entities/InvalidCollectionModel.java b/bson/src/test/unit/org/bson/codecs/pojo/entities/InvalidCollectionModel.java new file mode 100644 index 00000000000..7a5f045bc67 --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/pojo/entities/InvalidCollectionModel.java @@ -0,0 +1,56 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo.entities; + +import java.util.List; + +public class InvalidCollectionModel { + + private InvalidCollection collectionField; + + public InvalidCollectionModel() { + } + + public InvalidCollectionModel(final List list) { + this.collectionField = new InvalidCollection(list); + } + + public InvalidCollection getCollectionField() { + return collectionField; + } + + public void setCollectionField(final InvalidCollection collectionField) { + this.collectionField = collectionField; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + InvalidCollectionModel that = (InvalidCollectionModel) o; + return collectionField.equals(that.collectionField); + } + + @Override + public int hashCode() { + return collectionField.hashCode(); + } +} diff --git a/bson/src/test/unit/org/bson/codecs/pojo/entities/InvalidGetterAndSetterModel.java b/bson/src/test/unit/org/bson/codecs/pojo/entities/InvalidGetterAndSetterModel.java new file mode 100644 index 00000000000..c185505f17b --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/pojo/entities/InvalidGetterAndSetterModel.java @@ -0,0 +1,82 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo.entities; + +public final class InvalidGetterAndSetterModel { + private Integer integerField; + private String stringField; + + public InvalidGetterAndSetterModel(){ + } + + public InvalidGetterAndSetterModel(final Integer integerField, final String stringField) { + this.integerField = integerField; + this.stringField = stringField; + } + + public Integer getIntegerField() { + return integerField; + } + + public void setIntegerField(final Integer integerField) { + throw new UnsupportedOperationException("Nope"); + } + + public String getStringField() { + throw new UnsupportedOperationException("Nope"); + } + + public void setStringField(final String stringField) { + this.stringField = stringField; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + InvalidGetterAndSetterModel that = (InvalidGetterAndSetterModel) o; + + if (getIntegerField() != null ? !getIntegerField().equals(that.getIntegerField()) : that.getIntegerField() != null) { + return false; + } + if (getStringField() != null ? !getStringField().equals(that.getStringField()) : that.getStringField() != null) { + return false; + } + + return true; + } + + @Override + public int hashCode() { + int result = getIntegerField() != null ? getIntegerField().hashCode() : 0; + result = 31 * result + (getStringField() != null ? getStringField().hashCode() : 0); + return result; + } + + @Override + public String toString() { + return "InvalidGetterAndSetterModel{" + + "integerField=" + integerField + + ", stringField='" + stringField + "'" + + "}"; + } +} diff --git a/bson/src/test/unit/org/bson/codecs/pojo/entities/InvalidMapModel.java b/bson/src/test/unit/org/bson/codecs/pojo/entities/InvalidMapModel.java new file mode 100644 index 00000000000..8e7990da785 --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/pojo/entities/InvalidMapModel.java @@ -0,0 +1,58 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo.entities; + +import java.util.Map; +import java.util.Objects; + +public final class InvalidMapModel { + private Map invalidMap; + + public InvalidMapModel() { + } + + public InvalidMapModel(final Map invalidMap) { + this.invalidMap = invalidMap; + } + + public Map getInvalidMap() { + return invalidMap; + } + + public void setInvalidMap(final Map invalidMap) { + this.invalidMap = invalidMap; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + InvalidMapModel that = (InvalidMapModel) o; + + return Objects.equals(invalidMap, that.invalidMap); + } + + @Override + public int hashCode() { + return invalidMap != null ? invalidMap.hashCode() : 0; + } +} diff --git a/bson/src/test/unit/org/bson/codecs/pojo/entities/InvalidMapPropertyCodecProvider.java b/bson/src/test/unit/org/bson/codecs/pojo/entities/InvalidMapPropertyCodecProvider.java new file mode 100644 index 00000000000..3716c00a179 --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/pojo/entities/InvalidMapPropertyCodecProvider.java @@ -0,0 +1,79 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo.entities; + +import org.bson.BsonReader; +import org.bson.BsonType; +import org.bson.BsonWriter; +import org.bson.codecs.Codec; +import org.bson.codecs.DecoderContext; +import org.bson.codecs.EncoderContext; +import org.bson.codecs.pojo.PropertyCodecProvider; +import org.bson.codecs.pojo.PropertyCodecRegistry; +import org.bson.codecs.pojo.TypeWithTypeParameters; + +import java.util.HashMap; +import java.util.Map; + +public class InvalidMapPropertyCodecProvider implements PropertyCodecProvider { + + @SuppressWarnings("unchecked") + @Override + public Codec get(final TypeWithTypeParameters type, final PropertyCodecRegistry registry) { + if (Map.class.isAssignableFrom(type.getType()) && type.getTypeParameters().size() == 2 + && type.getTypeParameters().get(0).getType().equals(Integer.class) + && type.getTypeParameters().get(1).getType().equals(Integer.class)) { + return (Codec) new InvalidMapModelCodec((Class>) type.getType()); + } else { + return null; + } + } + + private static final class InvalidMapModelCodec implements Codec> { + private final Class> encoderClass; + + private InvalidMapModelCodec(final Class> encoderClass) { + this.encoderClass = encoderClass; + } + + @Override + public Map decode(final BsonReader reader, final DecoderContext decoderContext) { + Map map = new HashMap<>(); + + reader.readStartDocument(); + while (reader.readBsonType() != BsonType.END_OF_DOCUMENT) { + map.put(Integer.valueOf(reader.readName()), reader.readInt32()); + } + reader.readEndDocument(); + return map; + } + + @Override + public void encode(final BsonWriter writer, final Map value, final EncoderContext encoderContext) { + writer.writeStartDocument(); + for (Map.Entry entry : value.entrySet()) { + writer.writeInt32(entry.getKey().toString(), entry.getValue()); + } + writer.writeEndDocument(); + } + + @Override + public Class> getEncoderClass() { + return encoderClass; + } + } +} diff --git a/bson/src/test/unit/org/bson/codecs/pojo/entities/InvalidSetterArgsModel.java b/bson/src/test/unit/org/bson/codecs/pojo/entities/InvalidSetterArgsModel.java new file mode 100644 index 00000000000..f913025e3aa --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/pojo/entities/InvalidSetterArgsModel.java @@ -0,0 +1,82 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo.entities; + +public final class InvalidSetterArgsModel { + private Integer integerField; + private String stringField; + + public InvalidSetterArgsModel(){ + } + + public InvalidSetterArgsModel(final Integer integerField, final String stringField) { + this.integerField = integerField; + this.stringField = stringField; + } + + public Integer getIntegerField() { + return integerField; + } + + public void setIntegerField(final Integer integerField) { + this.integerField = integerField; + } + + public String getStringField() { + return stringField; + } + + public void setStringField(final Integer stringField) { + this.stringField = stringField.toString(); + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + InvalidSetterArgsModel that = (InvalidSetterArgsModel) o; + + if (getIntegerField() != null ? !getIntegerField().equals(that.getIntegerField()) : that.getIntegerField() != null) { + return false; + } + if (getStringField() != null ? !getStringField().equals(that.getStringField()) : that.getStringField() != null) { + return false; + } + + return true; + } + + @Override + public int hashCode() { + int result = getIntegerField() != null ? getIntegerField().hashCode() : 0; + result = 31 * result + (getStringField() != null ? getStringField().hashCode() : 0); + return result; + } + + @Override + public String toString() { + return "InvalidSetterArgsModel{" + + "integerField=" + integerField + + ", stringField='" + stringField + "'" + + "}"; + } +} diff --git a/bson/src/test/unit/org/bson/codecs/pojo/entities/ListGenericExtendedModel.java b/bson/src/test/unit/org/bson/codecs/pojo/entities/ListGenericExtendedModel.java new file mode 100644 index 00000000000..5d7072caad8 --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/pojo/entities/ListGenericExtendedModel.java @@ -0,0 +1,34 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo.entities; + +import java.util.List; + +public class ListGenericExtendedModel extends ListGenericModel { + + public ListGenericExtendedModel() { + } + + public ListGenericExtendedModel(final List values) { + super(values); + } + + @Override + public String toString() { + return "ListGenericExtendedModel{} " + super.toString(); + } +} diff --git a/bson/src/test/unit/org/bson/codecs/pojo/entities/ListGenericModel.java b/bson/src/test/unit/org/bson/codecs/pojo/entities/ListGenericModel.java new file mode 100644 index 00000000000..012395bddda --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/pojo/entities/ListGenericModel.java @@ -0,0 +1,64 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo.entities; + +import java.util.List; +import java.util.Objects; + +public class ListGenericModel { + + private List values; + + public ListGenericModel() { + } + + public ListGenericModel(final List values) { + this.values = values; + } + + public List getValues() { + return values; + } + + public void setValues(final List values) { + this.values = values; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + ListGenericModel that = (ListGenericModel) o; + return Objects.equals(values, that.values); + } + + @Override + public int hashCode() { + return Objects.hash(values); + } + + @Override + public String toString() { + return "ListGenericModel{" + + "values=" + values + + '}'; + } +} diff --git a/bson/src/test/unit/org/bson/codecs/pojo/entities/ListListGenericExtendedModel.java b/bson/src/test/unit/org/bson/codecs/pojo/entities/ListListGenericExtendedModel.java new file mode 100644 index 00000000000..4310785ac45 --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/pojo/entities/ListListGenericExtendedModel.java @@ -0,0 +1,34 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo.entities; + +import java.util.List; + +public class ListListGenericExtendedModel extends ListListGenericModel { + + public ListListGenericExtendedModel() { + } + + public ListListGenericExtendedModel(final List> values) { + super(values); + } + + @Override + public String toString() { + return "ListListGenericExtendedModel{} " + super.toString(); + } +} diff --git a/bson/src/test/unit/org/bson/codecs/pojo/entities/ListListGenericModel.java b/bson/src/test/unit/org/bson/codecs/pojo/entities/ListListGenericModel.java new file mode 100644 index 00000000000..7fe97c79b5f --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/pojo/entities/ListListGenericModel.java @@ -0,0 +1,64 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo.entities; + +import java.util.List; +import java.util.Objects; + +public class ListListGenericModel { + + private List> values; + + public ListListGenericModel() { + } + + public ListListGenericModel(final List> values) { + this.values = values; + } + + public List> getValues() { + return values; + } + + public void setValues(final List> values) { + this.values = values; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + ListListGenericModel that = (ListListGenericModel) o; + return Objects.equals(values, that.values); + } + + @Override + public int hashCode() { + return Objects.hash(values); + } + + @Override + public String toString() { + return "ListListGenericModel{" + + "values=" + values + + '}'; + } +} diff --git a/bson/src/test/unit/org/bson/codecs/pojo/entities/ListMapGenericExtendedModel.java b/bson/src/test/unit/org/bson/codecs/pojo/entities/ListMapGenericExtendedModel.java new file mode 100644 index 00000000000..d0f3c471fae --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/pojo/entities/ListMapGenericExtendedModel.java @@ -0,0 +1,35 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo.entities; + +import java.util.List; +import java.util.Map; + +public class ListMapGenericExtendedModel extends ListMapGenericModel { + + public ListMapGenericExtendedModel() { + } + + public ListMapGenericExtendedModel(final List> values) { + super(values); + } + + @Override + public String toString() { + return "ListMapGenericExtendedModel{} " + super.toString(); + } +} diff --git a/bson/src/test/unit/org/bson/codecs/pojo/entities/ListMapGenericModel.java b/bson/src/test/unit/org/bson/codecs/pojo/entities/ListMapGenericModel.java new file mode 100644 index 00000000000..ef913c88920 --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/pojo/entities/ListMapGenericModel.java @@ -0,0 +1,65 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo.entities; + +import java.util.List; +import java.util.Map; +import java.util.Objects; + +public class ListMapGenericModel { + + private List> values; + + public ListMapGenericModel() { + } + + public ListMapGenericModel(final List> values) { + this.values = values; + } + + public List> getValues() { + return values; + } + + public void setValues(final List> values) { + this.values = values; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + ListMapGenericModel that = (ListMapGenericModel) o; + return Objects.equals(values, that.values); + } + + @Override + public int hashCode() { + return Objects.hash(values); + } + + @Override + public String toString() { + return "ListMapGenericModel{" + + "values=" + values + + '}'; + } +} diff --git a/bson/src/test/unit/org/bson/codecs/pojo/entities/MapGenericExtendedModel.java b/bson/src/test/unit/org/bson/codecs/pojo/entities/MapGenericExtendedModel.java new file mode 100644 index 00000000000..cdda1325248 --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/pojo/entities/MapGenericExtendedModel.java @@ -0,0 +1,34 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo.entities; + +import java.util.Map; + +public class MapGenericExtendedModel extends MapGenericModel { + + public MapGenericExtendedModel() { + } + + public MapGenericExtendedModel(final Map values) { + super(values); + } + + @Override + public String toString() { + return "MapGenericExtendedModel{} " + super.toString(); + } +} diff --git a/bson/src/test/unit/org/bson/codecs/pojo/entities/MapGenericModel.java b/bson/src/test/unit/org/bson/codecs/pojo/entities/MapGenericModel.java new file mode 100644 index 00000000000..a4db52b4fa9 --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/pojo/entities/MapGenericModel.java @@ -0,0 +1,64 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo.entities; + +import java.util.Map; +import java.util.Objects; + +public class MapGenericModel { + + private Map values; + + public MapGenericModel() { + } + + public MapGenericModel(final Map values) { + this.values = values; + } + + public Map getValues() { + return values; + } + + public void setValues(final Map values) { + this.values = values; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + MapGenericModel that = (MapGenericModel) o; + return Objects.equals(values, that.values); + } + + @Override + public int hashCode() { + return Objects.hash(values); + } + + @Override + public String toString() { + return "MapGenericModel{" + + "values=" + values + + '}'; + } +} diff --git a/bson/src/test/unit/org/bson/codecs/pojo/entities/MapListGenericExtendedModel.java b/bson/src/test/unit/org/bson/codecs/pojo/entities/MapListGenericExtendedModel.java new file mode 100644 index 00000000000..776bd1f6a9b --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/pojo/entities/MapListGenericExtendedModel.java @@ -0,0 +1,35 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo.entities; + +import java.util.List; +import java.util.Map; + +public class MapListGenericExtendedModel extends MapListGenericModel { + + public MapListGenericExtendedModel() { + } + + public MapListGenericExtendedModel(final Map> values) { + super(values); + } + + @Override + public String toString() { + return "MapListGenericExtendedModel{} " + super.toString(); + } +} diff --git a/bson/src/test/unit/org/bson/codecs/pojo/entities/MapListGenericModel.java b/bson/src/test/unit/org/bson/codecs/pojo/entities/MapListGenericModel.java new file mode 100644 index 00000000000..e75019c2247 --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/pojo/entities/MapListGenericModel.java @@ -0,0 +1,65 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo.entities; + +import java.util.List; +import java.util.Map; +import java.util.Objects; + +public class MapListGenericModel { + + private Map> values; + + public MapListGenericModel() { + } + + public MapListGenericModel(final Map> values) { + this.values = values; + } + + public Map> getValues() { + return values; + } + + public void setValues(final Map> values) { + this.values = values; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + MapListGenericModel that = (MapListGenericModel) o; + return Objects.equals(values, that.values); + } + + @Override + public int hashCode() { + return Objects.hash(values); + } + + @Override + public String toString() { + return "MapListGenericModel{" + + "values=" + values + + '}'; + } +} diff --git a/bson/src/test/unit/org/bson/codecs/pojo/entities/MapMapGenericExtendedModel.java b/bson/src/test/unit/org/bson/codecs/pojo/entities/MapMapGenericExtendedModel.java new file mode 100644 index 00000000000..8b68dddd151 --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/pojo/entities/MapMapGenericExtendedModel.java @@ -0,0 +1,34 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo.entities; + +import java.util.Map; + +public class MapMapGenericExtendedModel extends MapMapGenericModel { + + public MapMapGenericExtendedModel() { + } + + public MapMapGenericExtendedModel(final Map> values) { + super(values); + } + + @Override + public String toString() { + return "MapMapGenericExtendedModel{} " + super.toString(); + } +} diff --git a/bson/src/test/unit/org/bson/codecs/pojo/entities/MapMapGenericModel.java b/bson/src/test/unit/org/bson/codecs/pojo/entities/MapMapGenericModel.java new file mode 100644 index 00000000000..190291d74d0 --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/pojo/entities/MapMapGenericModel.java @@ -0,0 +1,64 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo.entities; + +import java.util.Map; +import java.util.Objects; + +public class MapMapGenericModel { + + private Map> values; + + public MapMapGenericModel() { + } + + public MapMapGenericModel(final Map> values) { + this.values = values; + } + + public Map> getValues() { + return values; + } + + public void setValues(final Map> values) { + this.values = values; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + MapMapGenericModel that = (MapMapGenericModel) o; + return Objects.equals(values, that.values); + } + + @Override + public int hashCode() { + return Objects.hash(values); + } + + @Override + public String toString() { + return "MapMapGenericModel{" + + "values=" + values + + '}'; + } +} diff --git a/bson/src/test/unit/org/bson/codecs/pojo/entities/MapStringObjectModel.java b/bson/src/test/unit/org/bson/codecs/pojo/entities/MapStringObjectModel.java new file mode 100644 index 00000000000..45bd893f918 --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/pojo/entities/MapStringObjectModel.java @@ -0,0 +1,57 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo.entities; + +import java.util.Map; + +public class MapStringObjectModel { + private Map map; + + public MapStringObjectModel() { + } + + public MapStringObjectModel(final Map map) { + this.map = map; + } + + public Map getMap() { + return map; + } + + public void setMap(final Map map) { + this.map = map; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + MapStringObjectModel that = (MapStringObjectModel) o; + + return getMap() != null ? getMap().equals(that.getMap()) : that.getMap() == null; + } + + @Override + public int hashCode() { + return getMap() != null ? getMap().hashCode() : 0; + } +} diff --git a/bson/src/test/unit/org/bson/codecs/pojo/entities/MultipleBoundsLevel1.java b/bson/src/test/unit/org/bson/codecs/pojo/entities/MultipleBoundsLevel1.java new file mode 100644 index 00000000000..b61b3243745 --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/pojo/entities/MultipleBoundsLevel1.java @@ -0,0 +1,69 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo.entities; + +import java.util.List; +import java.util.Map; +import java.util.Objects; + +public class MultipleBoundsLevel1 extends MultipleBoundsLevel2 { + private T level1; + + public MultipleBoundsLevel1() { + } + + public MultipleBoundsLevel1(final Map level3, final List level2, final T level1) { + super(level3, level2); + this.level1 = level1; + } + + public T getLevel1() { + return level1; + } + + public void setLevel1(final T level1) { + this.level1 = level1; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + if (!super.equals(o)) { + return false; + } + + MultipleBoundsLevel1 that = (MultipleBoundsLevel1) o; + + if (!Objects.equals(level1, that.level1)) { + return false; + } + + return true; + } + + @Override + public int hashCode() { + int result = super.hashCode(); + result = 31 * result + (level1 != null ? level1.hashCode() : 0); + return result; + } +} diff --git a/bson/src/test/unit/org/bson/codecs/pojo/entities/MultipleBoundsLevel2.java b/bson/src/test/unit/org/bson/codecs/pojo/entities/MultipleBoundsLevel2.java new file mode 100644 index 00000000000..6be7ea478ac --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/pojo/entities/MultipleBoundsLevel2.java @@ -0,0 +1,69 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo.entities; + +import java.util.List; +import java.util.Map; +import java.util.Objects; + +public class MultipleBoundsLevel2 extends MultipleBoundsLevel3 { + private List level2; + + public MultipleBoundsLevel2() { + } + + public MultipleBoundsLevel2(final Map level3, final List level2) { + super(level3); + this.level2 = level2; + } + + public List getLevel2() { + return level2; + } + + public void setLevel2(final List level2) { + this.level2 = level2; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + if (!super.equals(o)) { + return false; + } + + MultipleBoundsLevel2 that = (MultipleBoundsLevel2) o; + + if (!Objects.equals(level2, that.level2)) { + return false; + } + + return true; + } + + @Override + public int hashCode() { + int result = super.hashCode(); + result = 31 * result + (level2 != null ? level2.hashCode() : 0); + return result; + } +} diff --git a/bson/src/test/unit/org/bson/codecs/pojo/entities/MultipleBoundsLevel3.java b/bson/src/test/unit/org/bson/codecs/pojo/entities/MultipleBoundsLevel3.java new file mode 100644 index 00000000000..b3f7ff03fef --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/pojo/entities/MultipleBoundsLevel3.java @@ -0,0 +1,62 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo.entities; + +import java.util.Map; +import java.util.Objects; + +public class MultipleBoundsLevel3 { + private Map level3; + + public MultipleBoundsLevel3() { + } + + public MultipleBoundsLevel3(final Map level3) { + this.level3 = level3; + } + + public Map getLevel3() { + return level3; + } + + public void setLevel3(final Map level3) { + this.level3 = level3; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + MultipleBoundsLevel3 that = (MultipleBoundsLevel3) o; + + if (!Objects.equals(level3, that.level3)) { + return false; + } + + return true; + } + + @Override + public int hashCode() { + return level3 != null ? level3.hashCode() : 0; + } +} diff --git a/bson/src/test/unit/org/bson/codecs/pojo/entities/MultipleBoundsModel.java b/bson/src/test/unit/org/bson/codecs/pojo/entities/MultipleBoundsModel.java new file mode 100644 index 00000000000..ae3910f8819 --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/pojo/entities/MultipleBoundsModel.java @@ -0,0 +1,30 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo.entities; + +import java.util.List; +import java.util.Map; + +public final class MultipleBoundsModel extends MultipleBoundsLevel1 { + + public MultipleBoundsModel() { + } + + public MultipleBoundsModel(final Map level3, final List level2, final Double level1) { + super(level3, level2, level1); + } +} diff --git a/bson/src/test/unit/org/bson/codecs/pojo/entities/MultipleLevelGenericModel.java b/bson/src/test/unit/org/bson/codecs/pojo/entities/MultipleLevelGenericModel.java new file mode 100644 index 00000000000..dc25ee57b29 --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/pojo/entities/MultipleLevelGenericModel.java @@ -0,0 +1,83 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo.entities; + +public final class MultipleLevelGenericModel { + + private A stringField; + private GenericTreeModel nested; + + public MultipleLevelGenericModel() { + } + + public MultipleLevelGenericModel(final A stringField, final GenericTreeModel nested) { + this.stringField = stringField; + this.nested = nested; + } + + public A getStringField() { + return stringField; + } + + public void setStringField(final A stringField) { + this.stringField = stringField; + } + + public GenericTreeModel getNested() { + return nested; + } + + public void setNested(final GenericTreeModel nested) { + this.nested = nested; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + MultipleLevelGenericModel that = (MultipleLevelGenericModel) o; + + if (getStringField() != null ? !getStringField().equals(that.getStringField()) : that.getStringField() != null) { + return false; + } + if (getNested() != null ? !getNested().equals(that.getNested()) : that.getNested() != null) { + return false; + } + + return true; + } + + @Override + public int hashCode() { + int result = getStringField() != null ? getStringField().hashCode() : 0; + result = 31 * result + (getNested() != null ? getNested().hashCode() : 0); + return result; + } + + @Override + public String toString() { + return "MultipleLevelGenericModel{" + + "stringField=" + stringField + + ", nested=" + nested + + "}"; + } +} diff --git a/bson/src/test/unit/org/bson/codecs/pojo/entities/NestedFieldReusingClassTypeParameter.java b/bson/src/test/unit/org/bson/codecs/pojo/entities/NestedFieldReusingClassTypeParameter.java new file mode 100644 index 00000000000..114eead120e --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/pojo/entities/NestedFieldReusingClassTypeParameter.java @@ -0,0 +1,53 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo.entities; + +import java.util.Objects; + +public final class NestedFieldReusingClassTypeParameter { + public PropertyReusingClassTypeParameter nested; + + public NestedFieldReusingClassTypeParameter() { + } + + public NestedFieldReusingClassTypeParameter(final PropertyReusingClassTypeParameter nested) { + this.nested = nested; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + NestedFieldReusingClassTypeParameter that = (NestedFieldReusingClassTypeParameter) o; + + if (!Objects.equals(nested, that.nested)) { + return false; + } + + return true; + } + + @Override + public int hashCode() { + return nested != null ? nested.hashCode() : 0; + } +} diff --git a/bson/src/test/unit/org/bson/codecs/pojo/entities/NestedGenericHolderFieldWithMultipleTypeParamsModel.java b/bson/src/test/unit/org/bson/codecs/pojo/entities/NestedGenericHolderFieldWithMultipleTypeParamsModel.java new file mode 100644 index 00000000000..fd5aefcd0b1 --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/pojo/entities/NestedGenericHolderFieldWithMultipleTypeParamsModel.java @@ -0,0 +1,71 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo.entities; + +import org.bson.codecs.pojo.annotations.BsonProperty; + +public final class NestedGenericHolderFieldWithMultipleTypeParamsModel { + + @BsonProperty(useDiscriminator = false) + private GenericHolderModel> nested; + + public NestedGenericHolderFieldWithMultipleTypeParamsModel() { + } + + public NestedGenericHolderFieldWithMultipleTypeParamsModel( + final GenericHolderModel> nested) { + this.nested = nested; + } + + public GenericHolderModel> getNested() { + return nested; + } + + public void setNested(final GenericHolderModel> nested) { + this.nested = nested; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + NestedGenericHolderFieldWithMultipleTypeParamsModel that = (NestedGenericHolderFieldWithMultipleTypeParamsModel) o; + + if (getNested() != null ? !getNested().equals(that.getNested()) : that.getNested() != null) { + return false; + } + + return true; + } + + @Override + public int hashCode() { + return getNested() != null ? getNested().hashCode() : 0; + } + + @Override + public String toString() { + return "NestedGenericHolderFieldWithMultipleTypeParamsModel{" + + "nested=" + nested + + "}"; + } +} diff --git a/bson/src/test/unit/org/bson/codecs/pojo/entities/NestedGenericHolderMapModel.java b/bson/src/test/unit/org/bson/codecs/pojo/entities/NestedGenericHolderMapModel.java new file mode 100644 index 00000000000..50c8cf30ec6 --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/pojo/entities/NestedGenericHolderMapModel.java @@ -0,0 +1,70 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo.entities; + +import java.util.Map; + +public final class NestedGenericHolderMapModel { + + private GenericHolderModel> nested; + + public NestedGenericHolderMapModel() { + } + + public NestedGenericHolderMapModel(final GenericHolderModel> nested) { + this.nested = nested; + } + + public GenericHolderModel> getNested() { + return nested; + } + + public void setNested(final GenericHolderModel> nested) { + this.nested = nested; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (!(o instanceof NestedGenericHolderMapModel)) { + return false; + } + + NestedGenericHolderMapModel that = (NestedGenericHolderMapModel) o; + + if (getNested() != null ? !getNested().equals(that.getNested()) : that.getNested() != null) { + return false; + } + + return true; + } + + @Override + public int hashCode() { + int result = getNested() != null ? getNested().hashCode() : 0; + return result; + } + + @Override + public String toString() { + return "NestedGenericHolderMapModel{" + + "nested=" + nested + + "}"; + } +} diff --git a/bson/src/test/unit/org/bson/codecs/pojo/entities/NestedGenericHolderModel.java b/bson/src/test/unit/org/bson/codecs/pojo/entities/NestedGenericHolderModel.java new file mode 100644 index 00000000000..0979a25389c --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/pojo/entities/NestedGenericHolderModel.java @@ -0,0 +1,59 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo.entities; + +public final class NestedGenericHolderModel { + + private GenericHolderModel nested; + + public NestedGenericHolderModel() { + } + + public NestedGenericHolderModel(final GenericHolderModel nested) { + this.nested = nested; + } + + public GenericHolderModel getNested() { + return nested; + } + + public void setNested(final GenericHolderModel nested) { + this.nested = nested; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (!(o instanceof NestedGenericHolderModel)) { + return false; + } + + NestedGenericHolderModel that = (NestedGenericHolderModel) o; + + if (getNested() != null ? !getNested().equals(that.getNested()) : that.getNested() != null) { + return false; + } + return true; + } + + @Override + public int hashCode() { + return getNested() != null ? getNested().hashCode() : 0; + } +} diff --git a/bson/src/test/unit/org/bson/codecs/pojo/entities/NestedGenericHolderSimpleGenericsModel.java b/bson/src/test/unit/org/bson/codecs/pojo/entities/NestedGenericHolderSimpleGenericsModel.java new file mode 100644 index 00000000000..0ce36b8a8cb --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/pojo/entities/NestedGenericHolderSimpleGenericsModel.java @@ -0,0 +1,71 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo.entities; + +import java.util.List; +import java.util.Map; +import java.util.Objects; + +public final class NestedGenericHolderSimpleGenericsModel { + private GenericHolderModel, Map>> nested; + + public NestedGenericHolderSimpleGenericsModel() { + } + + public NestedGenericHolderSimpleGenericsModel( + final GenericHolderModel, Map>> nested) { + this.nested = nested; + } + + public GenericHolderModel, Map>> getNested() { + return nested; + } + + public void setNested(final GenericHolderModel, Map>> nested) { + this.nested = nested; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (!(o instanceof NestedGenericHolderSimpleGenericsModel)) { + return false; + } + + NestedGenericHolderSimpleGenericsModel that = (NestedGenericHolderSimpleGenericsModel) o; + + if (!Objects.equals(nested, that.nested)) { + return false; + } + + return true; + } + + @Override + public int hashCode() { + return nested != null ? nested.hashCode() : 0; + } + + @Override + public String toString() { + return "NestedGenericHolderSimpleGenericsModel{" + + "nested=" + nested + + "}"; + } +} diff --git a/bson/src/test/unit/org/bson/codecs/pojo/entities/NestedGenericTreeModel.java b/bson/src/test/unit/org/bson/codecs/pojo/entities/NestedGenericTreeModel.java new file mode 100644 index 00000000000..17b384795ed --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/pojo/entities/NestedGenericTreeModel.java @@ -0,0 +1,82 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo.entities; + +public final class NestedGenericTreeModel { + private Integer intField; + private GenericTreeModel nested; + + public NestedGenericTreeModel() { + } + + public NestedGenericTreeModel(final Integer intField, final GenericTreeModel nested) { + this.intField = intField; + this.nested = nested; + } + + public Integer getIntField() { + return intField; + } + + public void setIntField(final Integer intField) { + this.intField = intField; + } + + public GenericTreeModel getNested() { + return nested; + } + + public void setNested(final GenericTreeModel nested) { + this.nested = nested; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + NestedGenericTreeModel that = (NestedGenericTreeModel) o; + + if (getIntField() != null ? !getIntField().equals(that.getIntField()) : that.getIntField() != null) { + return false; + } + if (getNested() != null ? !getNested().equals(that.getNested()) : that.getNested() != null) { + return false; + } + + return true; + } + + @Override + public int hashCode() { + int result = getIntField() != null ? getIntField().hashCode() : 0; + result = 31 * result + (getNested() != null ? getNested().hashCode() : 0); + return result; + } + + @Override + public String toString() { + return "NestedGenericTreeModel{" + + "intField=" + intField + + ", nested=" + nested + + "}"; + } +} diff --git a/bson/src/test/unit/org/bson/codecs/pojo/entities/NestedMultipleLevelGenericModel.java b/bson/src/test/unit/org/bson/codecs/pojo/entities/NestedMultipleLevelGenericModel.java new file mode 100644 index 00000000000..ca8af3a3113 --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/pojo/entities/NestedMultipleLevelGenericModel.java @@ -0,0 +1,82 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo.entities; + +public final class NestedMultipleLevelGenericModel { + private Integer intField; + private MultipleLevelGenericModel nested; + + public NestedMultipleLevelGenericModel() { + } + + public NestedMultipleLevelGenericModel(final Integer intField, final MultipleLevelGenericModel nested) { + this.intField = intField; + this.nested = nested; + } + + public Integer getIntField() { + return intField; + } + + public void setIntField(final Integer intField) { + this.intField = intField; + } + + public MultipleLevelGenericModel getNested() { + return nested; + } + + public void setNested(final MultipleLevelGenericModel nested) { + this.nested = nested; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + NestedMultipleLevelGenericModel that = (NestedMultipleLevelGenericModel) o; + + if (getIntField() != null ? !getIntField().equals(that.getIntField()) : that.getIntField() != null) { + return false; + } + if (getNested() != null ? !getNested().equals(that.getNested()) : that.getNested() != null) { + return false; + } + + return true; + } + + @Override + public int hashCode() { + int result = getIntField() != null ? getIntField().hashCode() : 0; + result = 31 * result + (getNested() != null ? getNested().hashCode() : 0); + return result; + } + + @Override + public String toString() { + return "NestedMultipleLevelGenericModel{" + + "intField=" + intField + + ", nested=" + nested + + "}"; + } +} diff --git a/bson/src/test/unit/org/bson/codecs/pojo/entities/NestedReusedGenericsModel.java b/bson/src/test/unit/org/bson/codecs/pojo/entities/NestedReusedGenericsModel.java new file mode 100644 index 00000000000..f3756f5b4bc --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/pojo/entities/NestedReusedGenericsModel.java @@ -0,0 +1,74 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo.entities; + +import java.util.List; + +public final class NestedReusedGenericsModel { + + private ReusedGenericsModel, String> nested; + + public NestedReusedGenericsModel() { + } + + public NestedReusedGenericsModel(final ReusedGenericsModel, String> nested) { + this.nested = nested; + } + + /** + * Returns the nested + * + * @return the nested + */ + public ReusedGenericsModel, String> getNested() { + return nested; + } + + public void setNested(final ReusedGenericsModel, String> nested) { + this.nested = nested; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (!(o instanceof NestedReusedGenericsModel)) { + return false; + } + + NestedReusedGenericsModel that = (NestedReusedGenericsModel) o; + + if (getNested() != null ? !getNested().equals(that.getNested()) : that.getNested() != null) { + return false; + } + + return true; + } + + @Override + public int hashCode() { + return getNested() != null ? getNested().hashCode() : 0; + } + + @Override + public String toString() { + return "NestedReusedGenericsModel{" + + "nested=" + nested + + "}"; + } +} diff --git a/bson/src/test/unit/org/bson/codecs/pojo/entities/NestedSelfReferentialGenericHolderModel.java b/bson/src/test/unit/org/bson/codecs/pojo/entities/NestedSelfReferentialGenericHolderModel.java new file mode 100644 index 00000000000..9ff6f8c5d68 --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/pojo/entities/NestedSelfReferentialGenericHolderModel.java @@ -0,0 +1,68 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo.entities; + +import java.util.Objects; + +public final class NestedSelfReferentialGenericHolderModel { + private NestedSelfReferentialGenericModel nested; + + public NestedSelfReferentialGenericHolderModel() { + } + + public NestedSelfReferentialGenericHolderModel(final NestedSelfReferentialGenericModel nested) { + this.nested = nested; + } + + public NestedSelfReferentialGenericModel getNested() { + return nested; + } + + public void setNested(final NestedSelfReferentialGenericModel nested) { + this.nested = nested; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + NestedSelfReferentialGenericHolderModel that = (NestedSelfReferentialGenericHolderModel) o; + + if (!Objects.equals(nested, that.nested)) { + return false; + } + + return true; + } + + @Override + public int hashCode() { + return nested != null ? nested.hashCode() : 0; + } + + @Override + public String toString() { + return "NestedSelfReferentialGenericHolderModel{" + + "nested=" + nested + + "}"; + } +} diff --git a/bson/src/test/unit/org/bson/codecs/pojo/entities/NestedSelfReferentialGenericModel.java b/bson/src/test/unit/org/bson/codecs/pojo/entities/NestedSelfReferentialGenericModel.java new file mode 100644 index 00000000000..01fb1685ae5 --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/pojo/entities/NestedSelfReferentialGenericModel.java @@ -0,0 +1,130 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo.entities; + +import java.util.Objects; + +public final class NestedSelfReferentialGenericModel { + private T t; + private V v; + private Z z; + private SelfReferentialGenericModel selfRef1; + private SelfReferentialGenericModel selfRef2; + + public NestedSelfReferentialGenericModel() { + } + + public NestedSelfReferentialGenericModel(final T t, final V v, final Z z, final SelfReferentialGenericModel selfRef1, + final SelfReferentialGenericModel selfRef2) { + this.t = t; + this.v = v; + this.z = z; + this.selfRef1 = selfRef1; + this.selfRef2 = selfRef2; + } + + public T getT() { + return t; + } + + public void setT(final T t) { + this.t = t; + } + + public V getV() { + return v; + } + + public void setV(final V v) { + this.v = v; + } + + public Z getZ() { + return z; + } + + public void setZ(final Z z) { + this.z = z; + } + + public SelfReferentialGenericModel getSelfRef1() { + return selfRef1; + } + + public void setSelfRef1(final SelfReferentialGenericModel selfRef1) { + this.selfRef1 = selfRef1; + } + + public SelfReferentialGenericModel getSelfRef2() { + return selfRef2; + } + + public void setSelfRef2(final SelfReferentialGenericModel selfRef2) { + this.selfRef2 = selfRef2; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + NestedSelfReferentialGenericModel that = (NestedSelfReferentialGenericModel) o; + + if (!Objects.equals(t, that.t)) { + return false; + } + if (!Objects.equals(v, that.v)) { + return false; + } + if (!Objects.equals(z, that.z)) { + return false; + } + if (!Objects.equals(selfRef1, that.selfRef1)) { + return false; + } + if (!Objects.equals(selfRef2, that.selfRef2)) { + return false; + } + + return true; + } + + @Override + public int hashCode() { + int result = t != null ? t.hashCode() : 0; + result = 31 * result + (v != null ? v.hashCode() : 0); + result = 31 * result + (z != null ? z.hashCode() : 0); + result = 31 * result + (selfRef1 != null ? selfRef1.hashCode() : 0); + result = 31 * result + (selfRef2 != null ? selfRef2.hashCode() : 0); + return result; + } + + @Override + public String toString() { + return "NestedSelfReferentialGenericModel{" + + "t=" + t + + ", v=" + v + + ", z=" + z + + ", selfRef1=" + selfRef1 + + ", selfRef2=" + selfRef2 + + "}"; + } +} diff --git a/bson/src/test/unit/org/bson/codecs/pojo/entities/NestedSimpleIdModel.java b/bson/src/test/unit/org/bson/codecs/pojo/entities/NestedSimpleIdModel.java new file mode 100644 index 00000000000..9a5462acd30 --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/pojo/entities/NestedSimpleIdModel.java @@ -0,0 +1,84 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo.entities; + +import java.util.Objects; + +public class NestedSimpleIdModel { + private String id; + private SimpleIdModel nestedSimpleIdModel; + + public NestedSimpleIdModel(){ + } + + public NestedSimpleIdModel(final SimpleIdModel nestedSimpleIdModel) { + this(null, nestedSimpleIdModel); + } + + public NestedSimpleIdModel(final String id, final SimpleIdModel nestedSimpleIdModel) { + this.id = id; + this.nestedSimpleIdModel = nestedSimpleIdModel; + } + + public String getId() { + return id; + } + + public void setId(final String id) { + this.id = id; + } + + public SimpleIdModel getNestedSimpleIdModel() { + return nestedSimpleIdModel; + } + + public void setNestedSimpleIdModel(final SimpleIdModel nestedSimpleIdModel) { + this.nestedSimpleIdModel = nestedSimpleIdModel; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + NestedSimpleIdModel that = (NestedSimpleIdModel) o; + + if (!Objects.equals(id, that.id)) { + return false; + } + return Objects.equals(nestedSimpleIdModel, that.nestedSimpleIdModel); + } + + @Override + public int hashCode() { + int result = id != null ? id.hashCode() : 0; + result = 31 * result + (nestedSimpleIdModel != null ? nestedSimpleIdModel.hashCode() : 0); + return result; + } + + @Override + public String toString() { + return "NestedSimpleIdModel{" + + "id='" + id + '\'' + + ", nestedSimpleIdModel=" + nestedSimpleIdModel + + '}'; + } +} diff --git a/bson/src/test/unit/org/bson/codecs/pojo/entities/Optional.java b/bson/src/test/unit/org/bson/codecs/pojo/entities/Optional.java new file mode 100644 index 00000000000..e70b47a08cf --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/pojo/entities/Optional.java @@ -0,0 +1,103 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo.entities; + + +import java.util.NoSuchElementException; + +public abstract class Optional { + + private static final Optional NONE = new Optional() { + @Override + public Object get() { + throw new NoSuchElementException(".get call on None!"); + } + + @Override + public boolean isEmpty() { + return true; + } + }; + + @SuppressWarnings("unchecked") + public static Optional empty() { + return (Optional) NONE; + } + + @SuppressWarnings("unchecked") + public static Optional of(final T it) { + if (it == null) { + return (Optional) Optional.NONE; + } else { + return new Optional.Some<>(it); + } + } + + public abstract T get(); + + public abstract boolean isEmpty(); + + @Override + public String toString() { + return "None"; + } + + public boolean isDefined() { + return !isEmpty(); + } + + public static class Some extends Optional { + private final T value; + + Some(final T value) { + this.value = value; + } + + @Override + public T get() { + return value; + } + + @Override + public boolean isEmpty() { + return false; + } + + @Override + public String toString() { + return String.format("Some(%s)", value); + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + Some some = (Some) o; + return value.equals(some.value); + } + + @Override + public int hashCode() { + return value.hashCode(); + } + } +} diff --git a/bson/src/test/unit/org/bson/codecs/pojo/entities/OptionalPropertyCodecProvider.java b/bson/src/test/unit/org/bson/codecs/pojo/entities/OptionalPropertyCodecProvider.java new file mode 100644 index 00000000000..12fa603ccc0 --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/pojo/entities/OptionalPropertyCodecProvider.java @@ -0,0 +1,67 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.bson.codecs.pojo.entities; + +import org.bson.BsonReader; +import org.bson.BsonWriter; +import org.bson.codecs.Codec; +import org.bson.codecs.DecoderContext; +import org.bson.codecs.EncoderContext; +import org.bson.codecs.pojo.PropertyCodecProvider; +import org.bson.codecs.pojo.PropertyCodecRegistry; +import org.bson.codecs.pojo.TypeWithTypeParameters; + +public class OptionalPropertyCodecProvider implements PropertyCodecProvider { + @Override + @SuppressWarnings({"rawtypes", "unchecked"}) + public Codec get(final TypeWithTypeParameters type, final PropertyCodecRegistry registry) { + if (Optional.class.isAssignableFrom(type.getType()) && type.getTypeParameters().size() == 1) { + Codec valueCodec = registry.get(type.getTypeParameters().get(0)); + return new OptionalCodec(type.getType(), valueCodec); + } else { + return null; + } + } + + private static final class OptionalCodec implements Codec> { + private final Class> encoderClass; + private final Codec codec; + + private OptionalCodec(final Class> encoderClass, final Codec codec) { + this.encoderClass = encoderClass; + this.codec = codec; + } + + @Override + public void encode(final BsonWriter writer, final Optional optionalValue, final EncoderContext encoderContext) { + if (optionalValue == null || optionalValue.isEmpty()) { + writer.writeNull(); + } else { + codec.encode(writer, optionalValue.get(), encoderContext); + } + } + + @Override + public Optional decode(final BsonReader reader, final DecoderContext context) { + return Optional.of(codec.decode(reader, context)); + } + + @Override + public Class> getEncoderClass() { + return encoderClass; + } + } +} diff --git a/bson/src/test/unit/org/bson/codecs/pojo/entities/PrimitivesModel.java b/bson/src/test/unit/org/bson/codecs/pojo/entities/PrimitivesModel.java new file mode 100644 index 00000000000..bc8dcc633ea --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/pojo/entities/PrimitivesModel.java @@ -0,0 +1,163 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo.entities; + +public final class PrimitivesModel { + + private boolean myBoolean; + private byte myByte; + private char myCharacter; + private double myDouble; + private float myFloat; + private int myInteger; + private long myLong; + private short myShort; + + public PrimitivesModel() { + } + + public PrimitivesModel(final boolean myBoolean, final byte myByte, final char myCharacter, final double myDouble, + final float myFloat, final int myInteger, final long myLong, final short myShort) { + this.myBoolean = myBoolean; + this.myByte = myByte; + this.myCharacter = myCharacter; + this.myDouble = myDouble; + this.myFloat = myFloat; + this.myInteger = myInteger; + this.myLong = myLong; + this.myShort = myShort; + } + + public boolean isMyBoolean() { + return myBoolean; + } + + public void setMyBoolean(final boolean myBoolean) { + this.myBoolean = myBoolean; + } + + public byte getMyByte() { + return myByte; + } + + public void setMyByte(final byte myByte) { + this.myByte = myByte; + } + + public char getMyCharacter() { + return myCharacter; + } + + public void setMyCharacter(final char myCharacter) { + this.myCharacter = myCharacter; + } + + public double getMyDouble() { + return myDouble; + } + + public void setMyDouble(final double myDouble) { + this.myDouble = myDouble; + } + + public float getMyFloat() { + return myFloat; + } + + public void setMyFloat(final float myFloat) { + this.myFloat = myFloat; + } + + public int getMyInteger() { + return myInteger; + } + + public void setMyInteger(final int myInteger) { + this.myInteger = myInteger; + } + + public long getMyLong() { + return myLong; + } + + public void setMyLong(final long myLong) { + this.myLong = myLong; + } + + public short getMyShort() { + return myShort; + } + + public void setMyShort(final short myShort) { + this.myShort = myShort; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + PrimitivesModel that = (PrimitivesModel) o; + + if (isMyBoolean() != that.isMyBoolean()) { + return false; + } + if (getMyByte() != that.getMyByte()) { + return false; + } + if (getMyCharacter() != that.getMyCharacter()) { + return false; + } + if (Double.compare(that.getMyDouble(), getMyDouble()) != 0) { + return false; + } + if (Float.compare(that.getMyFloat(), getMyFloat()) != 0) { + return false; + } + if (getMyInteger() != that.getMyInteger()) { + return false; + } + if (getMyLong() != that.getMyLong()) { + return false; + } + if (getMyShort() != that.getMyShort()) { + return false; + } + + return true; + } + + @Override + public int hashCode() { + int result; + long temp; + result = (isMyBoolean() ? 1 : 0); + result = 31 * result + (int) getMyByte(); + result = 31 * result + (int) getMyCharacter(); + temp = Double.doubleToLongBits(getMyDouble()); + result = 31 * result + (int) (temp ^ (temp >>> 32)); + result = 31 * result + (getMyFloat() != +0.0f ? Float.floatToIntBits(getMyFloat()) : 0); + result = 31 * result + getMyInteger(); + result = 31 * result + (int) (getMyLong() ^ (getMyLong() >>> 32)); + result = 31 * result + (int) getMyShort(); + return result; + } +} diff --git a/bson/src/test/unit/org/bson/codecs/pojo/entities/PrivateSetterFieldModel.java b/bson/src/test/unit/org/bson/codecs/pojo/entities/PrivateSetterFieldModel.java new file mode 100644 index 00000000000..8580aec4dec --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/pojo/entities/PrivateSetterFieldModel.java @@ -0,0 +1,79 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo.entities; + +import java.util.List; + +public final class PrivateSetterFieldModel { + + private Integer integerField; + private String stringField; + private List listField; + + public PrivateSetterFieldModel(){ + } + + public PrivateSetterFieldModel(final Integer integerField, final String stringField, final List listField) { + this.integerField = integerField; + this.stringField = stringField; + this.listField = listField; + } + + public String getSomeMethod() { + return "some method"; + } + + public Integer getIntegerField() { + return integerField; + } + + public String getStringField() { + return stringField; + } + + public List getListField() { + return listField; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + PrivateSetterFieldModel that = (PrivateSetterFieldModel) o; + + if (getIntegerField() != null ? !getIntegerField().equals(that.getIntegerField()) : that.getIntegerField() != null) { + return false; + } + if (getStringField() != null ? !getStringField().equals(that.getStringField()) : that.getStringField() != null) { + return false; + } + return getListField() != null ? getListField().equals(that.getListField()) : that.getListField() == null; + } + + @Override + public int hashCode() { + int result = getIntegerField() != null ? getIntegerField().hashCode() : 0; + result = 31 * result + (getStringField() != null ? getStringField().hashCode() : 0); + result = 31 * result + (getListField() != null ? getListField().hashCode() : 0); + return result; + } +} diff --git a/bson/src/test/unit/org/bson/codecs/pojo/entities/PropertyReusingClassTypeParameter.java b/bson/src/test/unit/org/bson/codecs/pojo/entities/PropertyReusingClassTypeParameter.java new file mode 100644 index 00000000000..0e1d5d68c12 --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/pojo/entities/PropertyReusingClassTypeParameter.java @@ -0,0 +1,54 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo.entities; + +import java.util.Objects; + +public final class PropertyReusingClassTypeParameter { + + public GenericTreeModel tree; + + public PropertyReusingClassTypeParameter(){ + } + + public PropertyReusingClassTypeParameter(final GenericTreeModel tree) { + this.tree = tree; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + PropertyReusingClassTypeParameter that = (PropertyReusingClassTypeParameter) o; + + if (!Objects.equals(tree, that.tree)) { + return false; + } + + return true; + } + + @Override + public int hashCode() { + return tree != null ? tree.hashCode() : 0; + } +} diff --git a/bson/src/test/unit/org/bson/codecs/pojo/entities/PropertySelectionModel.java b/bson/src/test/unit/org/bson/codecs/pojo/entities/PropertySelectionModel.java new file mode 100644 index 00000000000..c7e44bcb776 --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/pojo/entities/PropertySelectionModel.java @@ -0,0 +1,150 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo.entities; + +import org.bson.codecs.pojo.annotations.BsonIgnore; + +public final class PropertySelectionModel { + private String stringField = "stringField"; + + private final String finalStringField = "finalStringField"; + + @BsonIgnore + private String ignoredStringField = "ignoreMe"; + + private String anotherIgnoredStringField = "ignoreMe"; + + private static final String staticFinalStringField = "staticFinalStringField"; + + private static final String staticStringField = "staticStringField"; + + private final transient String transientString = "transientString"; + + public PropertySelectionModel() { + } + + public String getStringField() { + return stringField; + } + + public void setStringField(final String stringField) { + this.stringField = stringField; + } + + public String getFinalStringField() { + return finalStringField; + } + + public void setFinalStringField(final String finalStringField) { + throw new IllegalStateException("Not allowed"); + } + + public static String getStaticFinalStringField() { + return staticFinalStringField; + } + + public void setStaticFinalStringField(final String staticFinalStringField) { + throw new IllegalStateException("Not allowed"); + } + + public static String getStaticStringField() { + return staticStringField; + } + + public static void setStaticStringField(final String staticStringField) { + throw new IllegalStateException("Not allowed"); + } + + public String getTransientString() { + return transientString; + } + + public void setTransientString(final String transientString) { + throw new IllegalStateException("Not allowed"); + } + + public String getIgnoredStringField() { + return ignoredStringField; + } + + public void setIgnoredStringField(final String ignoredStringField) { + this.ignoredStringField = ignoredStringField; + } + + @BsonIgnore + public String getAnotherIgnoredStringField() { + return anotherIgnoredStringField; + } + + @BsonIgnore + public void setAnotherIgnoredStringField(final String anotherIgnoredStringField) { + this.anotherIgnoredStringField = anotherIgnoredStringField; + } + + public int getfoo() { + return 42; + } + + public void setfoo(final int foo) { + } + + public void is() { + } + + public void isfoo() { + } + + public int get() { + return 42; + } + + public void set(final int foo) { + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + PropertySelectionModel that = (PropertySelectionModel) o; + + if (getStringField() != null ? !getStringField().equals(that.getStringField()) : that.getStringField() != null) { + return false; + } + if (getFinalStringField() != null ? !getFinalStringField().equals(that.getFinalStringField()) + : that.getFinalStringField() != null) { + return false; + } + if (getTransientString() != null ? !getTransientString().equals(that.getTransientString()) : that.getTransientString() != null) { + return false; + } + + return true; + } + + @Override + public int hashCode() { + int result = getStringField() != null ? getStringField().hashCode() : 0; + result = 31 * result + (getFinalStringField() != null ? getFinalStringField().hashCode() : 0); + result = 31 * result + (getTransientString() != null ? getTransientString().hashCode() : 0); + return result; + } +} diff --git a/bson/src/test/unit/org/bson/codecs/pojo/entities/PropertyWithMultipleTypeParamsModel.java b/bson/src/test/unit/org/bson/codecs/pojo/entities/PropertyWithMultipleTypeParamsModel.java new file mode 100644 index 00000000000..c0dbe349b34 --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/pojo/entities/PropertyWithMultipleTypeParamsModel.java @@ -0,0 +1,73 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo.entities; + +import org.bson.codecs.pojo.annotations.BsonDiscriminator; +import org.bson.codecs.pojo.annotations.BsonProperty; + +@BsonDiscriminator("PropertyWithMultipleTypeParamsModel") +public final class PropertyWithMultipleTypeParamsModel { + + @BsonProperty(useDiscriminator = true) + private SimpleGenericsModel simpleGenericsModel; + + public PropertyWithMultipleTypeParamsModel() { + } + + public PropertyWithMultipleTypeParamsModel(final SimpleGenericsModel simpleGenericsModel) { + this.simpleGenericsModel = simpleGenericsModel; + } + + public SimpleGenericsModel getSimpleGenericsModel() { + return simpleGenericsModel; + } + + public void setSimpleGenericsModel(final SimpleGenericsModel simpleGenericsModel) { + this.simpleGenericsModel = simpleGenericsModel; + } + + @Override + public String toString() { + return "PropertyWithMultipleTypeParamsModel{" + + "nested=" + simpleGenericsModel + + "}"; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (!(o instanceof PropertyWithMultipleTypeParamsModel)) { + return false; + } + + PropertyWithMultipleTypeParamsModel that = (PropertyWithMultipleTypeParamsModel) o; + + if (getSimpleGenericsModel() != null ? !getSimpleGenericsModel().equals(that.getSimpleGenericsModel()) + : that.getSimpleGenericsModel() != null) { + return false; + } + + return true; + } + + @Override + public int hashCode() { + return getSimpleGenericsModel() != null ? getSimpleGenericsModel().hashCode() : 0; + } +} diff --git a/bson/src/test/unit/org/bson/codecs/pojo/entities/ReusedGenericsModel.java b/bson/src/test/unit/org/bson/codecs/pojo/entities/ReusedGenericsModel.java new file mode 100644 index 00000000000..66d31ab4721 --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/pojo/entities/ReusedGenericsModel.java @@ -0,0 +1,214 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo.entities; + +public final class ReusedGenericsModel { + + private A field1; + private B field2; + private C field3; + private Integer field4; + private C field5; + private B field6; + private A field7; + private String field8; + + public ReusedGenericsModel() { + } + + public ReusedGenericsModel(final A field1, final B field2, final C field3, final Integer field4, final C field5, final B field6, + final A field7, final String field8) { + this.field1 = field1; + this.field2 = field2; + this.field3 = field3; + this.field4 = field4; + this.field5 = field5; + this.field6 = field6; + this.field7 = field7; + this.field8 = field8; + } + + /** + * Returns the field1 + * + * @return the field1 + */ + public A getField1() { + return field1; + } + + public void setField1(final A field1) { + this.field1 = field1; + } + + /** + * Returns the field2 + * + * @return the field2 + */ + public B getField2() { + return field2; + } + + public void setField2(final B field2) { + this.field2 = field2; + } + + /** + * Returns the field3 + * + * @return the field3 + */ + public C getField3() { + return field3; + } + + public void setField3(final C field3) { + this.field3 = field3; + } + + /** + * Returns the field4 + * + * @return the field4 + */ + public Integer getField4() { + return field4; + } + + public void setField4(final Integer field4) { + this.field4 = field4; + } + + /** + * Returns the field5 + * + * @return the field5 + */ + public C getField5() { + return field5; + } + + public void setField5(final C field5) { + this.field5 = field5; + } + + /** + * Returns the field6 + * + * @return the field6 + */ + public B getField6() { + return field6; + } + + public void setField6(final B field6) { + this.field6 = field6; + } + + /** + * Returns the field7 + * + * @return the field7 + */ + public A getField7() { + return field7; + } + + public void setField7(final A field7) { + this.field7 = field7; + } + + /** + * Returns the field8 + * + * @return the field8 + */ + public String getField8() { + return field8; + } + + public void setField8(final String field8) { + this.field8 = field8; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (!(o instanceof ReusedGenericsModel)) { + return false; + } + + ReusedGenericsModel that = (ReusedGenericsModel) o; + + if (getField1() != null ? !getField1().equals(that.getField1()) : that.getField1() != null) { + return false; + } + if (getField2() != null ? !getField2().equals(that.getField2()) : that.getField2() != null) { + return false; + } + if (getField3() != null ? !getField3().equals(that.getField3()) : that.getField3() != null) { + return false; + } + if (getField4() != null ? !getField4().equals(that.getField4()) : that.getField4() != null) { + return false; + } + if (getField5() != null ? !getField5().equals(that.getField5()) : that.getField5() != null) { + return false; + } + if (getField6() != null ? !getField6().equals(that.getField6()) : that.getField6() != null) { + return false; + } + if (getField7() != null ? !getField7().equals(that.getField7()) : that.getField7() != null) { + return false; + } + if (getField8() != null ? !getField8().equals(that.getField8()) : that.getField8() != null) { + return false; + } + + return true; + } + + @Override + public int hashCode() { + int result = getField1() != null ? getField1().hashCode() : 0; + result = 31 * result + (getField2() != null ? getField2().hashCode() : 0); + result = 31 * result + (getField3() != null ? getField3().hashCode() : 0); + result = 31 * result + (getField4() != null ? getField4().hashCode() : 0); + result = 31 * result + (getField5() != null ? getField5().hashCode() : 0); + result = 31 * result + (getField6() != null ? getField6().hashCode() : 0); + result = 31 * result + (getField7() != null ? getField7().hashCode() : 0); + result = 31 * result + (getField8() != null ? getField8().hashCode() : 0); + return result; + } + + @Override + public String toString() { + return "ReusedGenericsModel{" + + "field1=" + field1 + + ", field2=" + field2 + + ", field3=" + field3 + + ", field4=" + field4 + + ", field5=" + field5 + + ", field6=" + field6 + + ", field7=" + field7 + + ", field8='" + field8 + "'" + + "}"; + } +} diff --git a/bson/src/test/unit/org/bson/codecs/pojo/entities/SelfReferentialGenericModel.java b/bson/src/test/unit/org/bson/codecs/pojo/entities/SelfReferentialGenericModel.java new file mode 100644 index 00000000000..2558b3fbf24 --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/pojo/entities/SelfReferentialGenericModel.java @@ -0,0 +1,99 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo.entities; + +import java.util.Objects; + +public final class SelfReferentialGenericModel { + private T t; + private V v; + private SelfReferentialGenericModel child; + + public SelfReferentialGenericModel() { + } + + public SelfReferentialGenericModel(final T t, final V v, final SelfReferentialGenericModel child) { + this.t = t; + this.v = v; + this.child = child; + } + + public T getT() { + return t; + } + + public void setT(final T t) { + this.t = t; + } + + public V getV() { + return v; + } + + public void setV(final V v) { + this.v = v; + } + + public SelfReferentialGenericModel getChild() { + return child; + } + + public void setChild(final SelfReferentialGenericModel child) { + this.child = child; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + SelfReferentialGenericModel that = (SelfReferentialGenericModel) o; + + if (!Objects.equals(t, that.t)) { + return false; + } + if (!Objects.equals(v, that.v)) { + return false; + } + if (!Objects.equals(child, that.child)) { + return false; + } + + return true; + } + + @Override + public int hashCode() { + int result = t != null ? t.hashCode() : 0; + result = 31 * result + (v != null ? v.hashCode() : 0); + result = 31 * result + (child != null ? child.hashCode() : 0); + return result; + } + + @Override + public String toString() { + return "SelfReferentialGenericModel{" + + "t=" + t + + ", v=" + v + + ", child=" + child + + "}"; + } +} diff --git a/bson/src/test/unit/org/bson/codecs/pojo/entities/ShapeHolderCircleModel.java b/bson/src/test/unit/org/bson/codecs/pojo/entities/ShapeHolderCircleModel.java new file mode 100644 index 00000000000..ca2dd40afa6 --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/pojo/entities/ShapeHolderCircleModel.java @@ -0,0 +1,33 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo.entities; + +public class ShapeHolderCircleModel extends ShapeHolderModel { + + public ShapeHolderCircleModel() { + } + + public ShapeHolderCircleModel(final ShapeModelCircle shape) { + super(shape); + } + + @Override + public ShapeModelCircle getShape() { + return (ShapeModelCircle) super.getShape(); + } +} + diff --git a/bson/src/test/unit/org/bson/codecs/pojo/entities/ShapeHolderModel.java b/bson/src/test/unit/org/bson/codecs/pojo/entities/ShapeHolderModel.java new file mode 100644 index 00000000000..2ccd6c4f477 --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/pojo/entities/ShapeHolderModel.java @@ -0,0 +1,61 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo.entities; + +public class ShapeHolderModel { + + private ShapeModelAbstract shape; + + public ShapeHolderModel() { + } + + public ShapeHolderModel(final ShapeModelAbstract shape) { + this.shape = shape; + } + + public ShapeModelAbstract getShape() { + return shape; + } + + public void setShape(final ShapeModelAbstract shape) { + this.shape = shape; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (!(o instanceof ShapeHolderModel)) { + return false; + } + + ShapeHolderModel that = (ShapeHolderModel) o; + + if (getShape() != null ? !getShape().equals(that.getShape()) : that.getShape() != null) { + return false; + } + + return true; + } + + @Override + public int hashCode() { + return getShape() != null ? getShape().hashCode() : 0; + } +} + diff --git a/bson/src/test/unit/org/bson/codecs/pojo/entities/ShapeModelAbstract.java b/bson/src/test/unit/org/bson/codecs/pojo/entities/ShapeModelAbstract.java new file mode 100644 index 00000000000..9b0524fe26c --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/pojo/entities/ShapeModelAbstract.java @@ -0,0 +1,64 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo.entities; + + +import org.bson.codecs.pojo.annotations.BsonDiscriminator; + +@BsonDiscriminator() +public abstract class ShapeModelAbstract { + + private String color; + + public ShapeModelAbstract() { + } + + public ShapeModelAbstract(final String color) { + this.color = color; + } + + public String getColor() { + return color; + } + + public void setColor(final String color) { + this.color = color; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (!(o instanceof ShapeModelAbstract)) { + return false; + } + + ShapeModelAbstract that = (ShapeModelAbstract) o; + + if (getColor() != null ? !getColor().equals(that.getColor()) : that.getColor() != null) { + return false; + } + + return true; + } + + @Override + public int hashCode() { + return getColor() != null ? getColor().hashCode() : 0; + } +} diff --git a/bson/src/test/unit/org/bson/codecs/pojo/entities/ShapeModelCircle.java b/bson/src/test/unit/org/bson/codecs/pojo/entities/ShapeModelCircle.java new file mode 100644 index 00000000000..d2d544e3f84 --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/pojo/entities/ShapeModelCircle.java @@ -0,0 +1,67 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo.entities; + +public final class ShapeModelCircle extends ShapeModelAbstract { + + private Double radius; + + public ShapeModelCircle() { + } + + public ShapeModelCircle(final String color, final Double radius) { + super(color); + this.radius = radius; + } + + public Double getRadius() { + return radius; + } + + public void setRadius(final Double radius) { + this.radius = radius; + } + + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (!(o instanceof ShapeModelCircle)) { + return false; + } + if (!super.equals(o)) { + return false; + } + + ShapeModelCircle that = (ShapeModelCircle) o; + + if (getRadius() != null ? !getRadius().equals(that.getRadius()) : that.getRadius() != null) { + return false; + } + + return true; + } + + @Override + public int hashCode() { + int result = super.hashCode(); + result = 31 * result + (getRadius() != null ? getRadius().hashCode() : 0); + return result; + } +} diff --git a/bson/src/test/unit/org/bson/codecs/pojo/entities/ShapeModelRectangle.java b/bson/src/test/unit/org/bson/codecs/pojo/entities/ShapeModelRectangle.java new file mode 100644 index 00000000000..d644cea85b8 --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/pojo/entities/ShapeModelRectangle.java @@ -0,0 +1,80 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo.entities; + +public final class ShapeModelRectangle extends ShapeModelAbstract { + + private Double width; + private Double height; + + public ShapeModelRectangle() { + } + + public ShapeModelRectangle(final String color, final Double width, final Double height) { + super(color); + this.width = width; + this.height = height; + } + + public Double getWidth() { + return width; + } + + public void setWidth(final Double width) { + this.width = width; + } + + public Double getHeight() { + return height; + } + + public void setHeight(final Double height) { + this.height = height; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (!(o instanceof ShapeModelRectangle)) { + return false; + } + if (!super.equals(o)) { + return false; + } + + ShapeModelRectangle that = (ShapeModelRectangle) o; + + if (getWidth() != null ? !getWidth().equals(that.getWidth()) : that.getWidth() != null) { + return false; + } + if (getHeight() != null ? !getHeight().equals(that.getHeight()) : that.getHeight() != null) { + return false; + } + + return true; + } + + @Override + public int hashCode() { + int result = super.hashCode(); + result = 31 * result + (getWidth() != null ? getWidth().hashCode() : 0); + result = 31 * result + (getHeight() != null ? getHeight().hashCode() : 0); + return result; + } +} diff --git a/bson/src/test/unit/org/bson/codecs/pojo/entities/SimpleEnumModel.java b/bson/src/test/unit/org/bson/codecs/pojo/entities/SimpleEnumModel.java new file mode 100644 index 00000000000..3d65dc1ea8c --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/pojo/entities/SimpleEnumModel.java @@ -0,0 +1,74 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo.entities; + +import org.bson.codecs.SimpleEnum; + +public final class SimpleEnumModel { + + private SimpleEnum myEnum; + + public SimpleEnumModel() { + } + + public SimpleEnumModel(final SimpleEnum myEnum) { + this.myEnum = myEnum; + } + + /** + * Returns the myEnum + * + * @return the myEnum + */ + public SimpleEnum getMyEnum() { + return myEnum; + } + + /** + * Sets the myEnum + * + * @param myEnum the myEnum + * @return this + */ + public SimpleEnumModel setMyEnum(final SimpleEnum myEnum) { + this.myEnum = myEnum; + return this; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + SimpleEnumModel that = (SimpleEnumModel) o; + + if (getMyEnum() != that.getMyEnum()) { + return false; + } + + return true; + } + + @Override + public int hashCode() { + return getMyEnum() != null ? getMyEnum().hashCode() : 0; + } +} diff --git a/bson/src/test/unit/org/bson/codecs/pojo/entities/SimpleGenericsModel.java b/bson/src/test/unit/org/bson/codecs/pojo/entities/SimpleGenericsModel.java new file mode 100644 index 00000000000..4753953240e --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/pojo/entities/SimpleGenericsModel.java @@ -0,0 +1,116 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo.entities; + +import java.util.List; +import java.util.Map; + +public final class SimpleGenericsModel { + private Integer myIntegerField; + private T myGenericField; + private List myListField; + private Map myMapField; + + public SimpleGenericsModel() { + } + + public SimpleGenericsModel(final Integer myIntegerField, final T myGenericField, final List myListField, final Map + myMapField) { + this.myIntegerField = myIntegerField; + this.myGenericField = myGenericField; + this.myListField = myListField; + this.myMapField = myMapField; + } + + public Integer getMyIntegerField() { + return myIntegerField; + } + + public void setMyIntegerField(final Integer myIntegerField) { + this.myIntegerField = myIntegerField; + } + + public T getMyGenericField() { + return myGenericField; + } + + public void setMyGenericField(final T myGenericField) { + this.myGenericField = myGenericField; + } + + public List getMyListField() { + return myListField; + } + + public void setMyListField(final List myListField) { + this.myListField = myListField; + } + + public Map getMyMapField() { + return myMapField; + } + + public void setMyMapField(final Map myMapField) { + this.myMapField = myMapField; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + SimpleGenericsModel that = (SimpleGenericsModel) o; + + if (getMyIntegerField() != null ? !getMyIntegerField().equals(that.getMyIntegerField()) : that.getMyIntegerField() != null) { + return false; + } + if (getMyGenericField() != null ? !getMyGenericField().equals(that.getMyGenericField()) : that.getMyGenericField() != null) { + return false; + } + if (getMyListField() != null ? !getMyListField().equals(that.getMyListField()) : that.getMyListField() != null) { + return false; + } + if (getMyMapField() != null ? !getMyMapField().equals(that.getMyMapField()) : that.getMyMapField() != null) { + return false; + } + + return true; + } + + @Override + public int hashCode() { + int result = getMyIntegerField() != null ? getMyIntegerField().hashCode() : 0; + result = 31 * result + (getMyGenericField() != null ? getMyGenericField().hashCode() : 0); + result = 31 * result + (getMyListField() != null ? getMyListField().hashCode() : 0); + result = 31 * result + (getMyMapField() != null ? getMyMapField().hashCode() : 0); + return result; + } + + @Override + public String toString() { + return "SimpleGenericsModel{" + + "myIntegerField=" + myIntegerField + + ", myGenericField=" + myGenericField + + ", myListField=" + myListField + + ", myMapField=" + myMapField + + "}"; + } +} diff --git a/bson/src/test/unit/org/bson/codecs/pojo/entities/SimpleIdImmutableModel.java b/bson/src/test/unit/org/bson/codecs/pojo/entities/SimpleIdImmutableModel.java new file mode 100644 index 00000000000..15c34c1a16c --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/pojo/entities/SimpleIdImmutableModel.java @@ -0,0 +1,91 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo.entities; + +import org.bson.codecs.pojo.annotations.BsonCreator; +import org.bson.codecs.pojo.annotations.BsonProperty; +import org.bson.types.ObjectId; + +import java.util.Objects; + +public class SimpleIdImmutableModel { + private final ObjectId id; + private final Integer integerField; + private final String stringField; + + public SimpleIdImmutableModel(final Integer integerField, final String stringField){ + this(null, integerField, stringField); + } + + @BsonCreator + public SimpleIdImmutableModel(@BsonProperty("id") final ObjectId id, + @BsonProperty("integerField") final Integer integerField, + @BsonProperty("stringField") final String stringField) { + this.id = id; + this.integerField = integerField; + this.stringField = stringField; + } + + public ObjectId getId() { + return id; + } + + public Integer getIntegerField() { + return integerField; + } + + public String getStringField() { + return stringField; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + SimpleIdImmutableModel that = (SimpleIdImmutableModel) o; + + if (!Objects.equals(id, that.id)) { + return false; + } + if (!Objects.equals(integerField, that.integerField)) { + return false; + } + return Objects.equals(stringField, that.stringField); + } + + @Override + public int hashCode() { + int result = id != null ? id.hashCode() : 0; + result = 31 * result + (integerField != null ? integerField.hashCode() : 0); + result = 31 * result + (stringField != null ? stringField.hashCode() : 0); + return result; + } + + @Override + public String toString() { + return "SimpleIdImmutableModel{" + + "id=" + id + + ", integerField=" + integerField + + ", stringField='" + stringField + '\'' + + '}'; + } +} diff --git a/bson/src/test/unit/org/bson/codecs/pojo/entities/SimpleIdModel.java b/bson/src/test/unit/org/bson/codecs/pojo/entities/SimpleIdModel.java new file mode 100644 index 00000000000..65e9aa1fb90 --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/pojo/entities/SimpleIdModel.java @@ -0,0 +1,101 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo.entities; + +import org.bson.types.ObjectId; + +import java.util.Objects; + +public class SimpleIdModel { + private ObjectId id; + private Integer integerField; + private String stringField; + + public SimpleIdModel(){ + } + + public SimpleIdModel(final Integer integerField, final String stringField) { + this(null, integerField, stringField); + } + + public SimpleIdModel(final ObjectId objectId, final Integer integerField, final String stringField) { + this.id = objectId; + this.integerField = integerField; + this.stringField = stringField; + } + + public ObjectId getId() { + return id; + } + + public void setId(final ObjectId id) { + this.id = id; + } + + public Integer getIntegerField() { + return integerField; + } + + public void setIntegerField(final Integer integerField) { + this.integerField = integerField; + } + + public String getStringField() { + return stringField; + } + + public void setStringField(final String stringField) { + this.stringField = stringField; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + SimpleIdModel that = (SimpleIdModel) o; + + if (!Objects.equals(id, that.id)) { + return false; + } + if (!Objects.equals(integerField, that.integerField)) { + return false; + } + return Objects.equals(stringField, that.stringField); + } + + @Override + public int hashCode() { + int result = id != null ? id.hashCode() : 0; + result = 31 * result + (integerField != null ? integerField.hashCode() : 0); + result = 31 * result + (stringField != null ? stringField.hashCode() : 0); + return result; + } + + @Override + public String toString() { + return "SimpleIdModel{" + + "id=" + id + + ", integerField=" + integerField + + ", stringField='" + stringField + '\'' + + '}'; + } +} diff --git a/bson/src/test/unit/org/bson/codecs/pojo/entities/SimpleModel.java b/bson/src/test/unit/org/bson/codecs/pojo/entities/SimpleModel.java new file mode 100644 index 00000000000..7566066eef5 --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/pojo/entities/SimpleModel.java @@ -0,0 +1,88 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo.entities; + +public final class SimpleModel implements Comparable { + private Integer integerField; + private String stringField; + + public SimpleModel(){ + } + + public SimpleModel(final Integer integerField, final String stringField) { + this.integerField = integerField; + this.stringField = stringField; + } + + public Integer getIntegerField() { + return integerField; + } + + public void setIntegerField(final Integer integerField) { + this.integerField = integerField; + } + + public String getStringField() { + return stringField; + } + + public void setStringField(final String stringField) { + this.stringField = stringField; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + SimpleModel that = (SimpleModel) o; + + if (getIntegerField() != null ? !getIntegerField().equals(that.getIntegerField()) : that.getIntegerField() != null) { + return false; + } + if (getStringField() != null ? !getStringField().equals(that.getStringField()) : that.getStringField() != null) { + return false; + } + + return true; + } + + @Override + public int hashCode() { + int result = getIntegerField() != null ? getIntegerField().hashCode() : 0; + result = 31 * result + (getStringField() != null ? getStringField().hashCode() : 0); + return result; + } + + @Override + public String toString() { + return "SimpleModel{" + + "integerField=" + integerField + + ", stringField='" + stringField + "'" + + "}"; + } + + @Override + public int compareTo(final SimpleModel o) { + int integerFieldCompareResult = this.integerField.compareTo(o.integerField); + return integerFieldCompareResult == 0 ? this.stringField.compareTo(o.stringField) : integerFieldCompareResult; + } +} diff --git a/bson/src/test/unit/org/bson/codecs/pojo/entities/SimpleNestedPojoModel.java b/bson/src/test/unit/org/bson/codecs/pojo/entities/SimpleNestedPojoModel.java new file mode 100644 index 00000000000..6682b5c376d --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/pojo/entities/SimpleNestedPojoModel.java @@ -0,0 +1,59 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo.entities; + +public final class SimpleNestedPojoModel { + private SimpleModel simple; + + public SimpleNestedPojoModel() { + } + + public SimpleNestedPojoModel(final SimpleModel simple) { + this.simple = simple; + } + + public SimpleModel getSimple() { + return simple; + } + + public void setSimple(final SimpleModel simple) { + this.simple = simple; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + SimpleNestedPojoModel that = (SimpleNestedPojoModel) o; + + if (getSimple() != null ? !getSimple().equals(that.getSimple()) : that.getSimple() != null) { + return false; + } + + return true; + } + + @Override + public int hashCode() { + return getSimple() != null ? getSimple().hashCode() : 0; + } +} diff --git a/bson/src/test/unit/org/bson/codecs/pojo/entities/SimpleWithStaticModel.java b/bson/src/test/unit/org/bson/codecs/pojo/entities/SimpleWithStaticModel.java new file mode 100644 index 00000000000..81eee44c74e --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/pojo/entities/SimpleWithStaticModel.java @@ -0,0 +1,85 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo.entities; + +import java.util.Objects; + +public final class SimpleWithStaticModel { + private Integer integerField; + private String stringField; + + public SimpleWithStaticModel(){ + } + + public SimpleWithStaticModel(final Integer integerField, final String stringField) { + this.integerField = integerField; + this.stringField = stringField; + } + + public Integer getIntegerField() { + return integerField; + } + + public void setIntegerField(final Integer integerField) { + this.integerField = integerField; + } + + public String getStringField() { + return stringField; + } + + public static void getStringField$Annotations() { + // Mimics the static kotlin synthetic annotations field + } + + public static void setIntegerField$Annotations() { + // Mimics the static kotlin synthetic annotations field + } + + public void getStringField$Alternative() { + // Non static void getter field + } + + public void setStringField(final String stringField) { + this.stringField = stringField; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + SimpleWithStaticModel that = (SimpleWithStaticModel) o; + return Objects.equals(integerField, that.integerField) && Objects.equals(stringField, that.stringField); + } + + @Override + public int hashCode() { + return Objects.hash(integerField, stringField); + } + + @Override + public String toString() { + return "SimpleWithStaticModel{" + + "integerField=" + integerField + + ", stringField='" + stringField + "'" + + "}"; + } +} diff --git a/bson/src/test/unit/org/bson/codecs/pojo/entities/TreeWithIdModel.java b/bson/src/test/unit/org/bson/codecs/pojo/entities/TreeWithIdModel.java new file mode 100644 index 00000000000..01937a5a3f2 --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/pojo/entities/TreeWithIdModel.java @@ -0,0 +1,120 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo.entities; + +import org.bson.types.ObjectId; + +import java.util.Objects; + +public class TreeWithIdModel { + private ObjectId id; + private String level; + private TreeWithIdModel left; + private TreeWithIdModel right; + + public TreeWithIdModel() { + } + + public TreeWithIdModel(final String level) { + this(null, level, null, null); + } + + public TreeWithIdModel(final String level, final TreeWithIdModel left, final TreeWithIdModel right) { + this(null, level, left, right); + } + + public TreeWithIdModel(final ObjectId id, final String level, final TreeWithIdModel left, final TreeWithIdModel right) { + this.id = id; + this.level = level; + this.left = left; + this.right = right; + } + + public ObjectId getId() { + return id; + } + + public void setId(final ObjectId id) { + this.id = id; + } + + public String getLevel() { + return level; + } + + public void setLevel(final String level) { + this.level = level; + } + + public TreeWithIdModel getLeft() { + return left; + } + + public void setLeft(final TreeWithIdModel left) { + this.left = left; + } + + public TreeWithIdModel getRight() { + return right; + } + + public void setRight(final TreeWithIdModel right) { + this.right = right; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + TreeWithIdModel that = (TreeWithIdModel) o; + + if (!Objects.equals(id, that.id)) { + return false; + } + if (!Objects.equals(level, that.level)) { + return false; + } + if (!Objects.equals(left, that.left)) { + return false; + } + return Objects.equals(right, that.right); + } + + @Override + public int hashCode() { + int result = id != null ? id.hashCode() : 0; + result = 31 * result + (level != null ? level.hashCode() : 0); + result = 31 * result + (left != null ? left.hashCode() : 0); + result = 31 * result + (right != null ? right.hashCode() : 0); + return result; + } + + @Override + public String toString() { + return "TreeWithIdModel{" + + "id=" + id + + ", level=" + level + + ", left=" + left + + ", right=" + right + + '}'; + } +} diff --git a/bson/src/test/unit/org/bson/codecs/pojo/entities/UpperBoundsConcreteModel.java b/bson/src/test/unit/org/bson/codecs/pojo/entities/UpperBoundsConcreteModel.java new file mode 100644 index 00000000000..b68a05d4507 --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/pojo/entities/UpperBoundsConcreteModel.java @@ -0,0 +1,27 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo.entities; + +public final class UpperBoundsConcreteModel extends UpperBoundsModel { + + public UpperBoundsConcreteModel() { + } + + public UpperBoundsConcreteModel(final Long myGenericField) { + super(myGenericField); + } +} diff --git a/bson/src/test/unit/org/bson/codecs/pojo/entities/UpperBoundsModel.java b/bson/src/test/unit/org/bson/codecs/pojo/entities/UpperBoundsModel.java new file mode 100644 index 00000000000..2161dfe5d0c --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/pojo/entities/UpperBoundsModel.java @@ -0,0 +1,61 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo.entities; + +import java.util.Objects; + +public class UpperBoundsModel { + private T myGenericField; + + public UpperBoundsModel() { + } + + public UpperBoundsModel(final T myGenericField) { + this.myGenericField = myGenericField; + } + + public T getMyGenericField() { + return myGenericField; + } + + public void setMyGenericField(final T myGenericField) { + this.myGenericField = myGenericField; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + UpperBoundsModel that = (UpperBoundsModel) o; + + if (!Objects.equals(myGenericField, that.myGenericField)) { + return false; + } + + return true; + } + + @Override + public int hashCode() { + return myGenericField != null ? myGenericField.hashCode() : 0; + } +} diff --git a/bson/src/test/unit/org/bson/codecs/pojo/entities/conventions/AnnotationAbstract.java b/bson/src/test/unit/org/bson/codecs/pojo/entities/conventions/AnnotationAbstract.java new file mode 100644 index 00000000000..0257ee71b17 --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/pojo/entities/conventions/AnnotationAbstract.java @@ -0,0 +1,27 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo.entities.conventions; + +import org.bson.codecs.pojo.annotations.BsonDiscriminator; +import org.bson.codecs.pojo.annotations.BsonProperty; + +@BsonDiscriminator(key = "_key") +public abstract class AnnotationAbstract { + + @BsonProperty + public AnnotationModel child; +} diff --git a/bson/src/test/unit/org/bson/codecs/pojo/entities/conventions/AnnotationBsonPropertyIdModel.java b/bson/src/test/unit/org/bson/codecs/pojo/entities/conventions/AnnotationBsonPropertyIdModel.java new file mode 100644 index 00000000000..906b458026a --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/pojo/entities/conventions/AnnotationBsonPropertyIdModel.java @@ -0,0 +1,57 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo.entities.conventions; + +import org.bson.codecs.pojo.annotations.BsonProperty; + +public class AnnotationBsonPropertyIdModel { + @BsonProperty("id") + private long id; + + public AnnotationBsonPropertyIdModel() { + } + + public AnnotationBsonPropertyIdModel(final long id) { + this.id = id; + } + + public long getId() { + return id; + } + + public void setId(final long id) { + this.id = id; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + AnnotationBsonPropertyIdModel that = (AnnotationBsonPropertyIdModel) o; + return getId() == that.getId(); + } + + @Override + public int hashCode() { + return (int) (getId() ^ (getId() >>> 32)); + } +} diff --git a/bson/src/test/unit/org/bson/codecs/pojo/entities/conventions/AnnotationBsonRepresentation.java b/bson/src/test/unit/org/bson/codecs/pojo/entities/conventions/AnnotationBsonRepresentation.java new file mode 100644 index 00000000000..58df622fc57 --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/pojo/entities/conventions/AnnotationBsonRepresentation.java @@ -0,0 +1,101 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo.entities.conventions; + +import org.bson.BsonType; +import org.bson.codecs.pojo.annotations.BsonRepresentation; +import org.bson.types.ObjectId; + +import java.util.Objects; + +public class AnnotationBsonRepresentation { + private String id; + private String friendId; + + @BsonRepresentation(BsonType.OBJECT_ID) + private String parentId; + + private int age; + + public AnnotationBsonRepresentation() {} + + public AnnotationBsonRepresentation(final int age) { + id = new ObjectId("111111111111111111111111").toHexString(); + friendId = ""; + parentId = ""; + this.age = age; + } + + public AnnotationBsonRepresentation(final String id, final String friendId, final String parentId, final int age) { + this.id = id; + this.friendId = friendId; + this.parentId = parentId; + this.age = age; + } + + @BsonRepresentation(BsonType.OBJECT_ID) + public String getId() { + return id; + } + + public void setId(final String id) { + this.id = id; + } + + public String getFriendId() { + return friendId; + } + + @BsonRepresentation(BsonType.OBJECT_ID) + public void setFriendId(final String friendId) { + this.friendId = friendId; + } + + public String getParentId() { + return parentId; + } + + public void setParentId(final String parentId) { + this.parentId = parentId; + } + + public int getAge() { + return age; + } + + public void setAge(final int age) { + this.age = age; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + AnnotationBsonRepresentation that = (AnnotationBsonRepresentation) o; + return age == that.age && Objects.equals(id, that.id); + } + + @Override + public int hashCode() { + return Objects.hash(id, age); + } + +} diff --git a/bson/src/test/unit/org/bson/codecs/pojo/entities/conventions/AnnotationCollision.java b/bson/src/test/unit/org/bson/codecs/pojo/entities/conventions/AnnotationCollision.java new file mode 100644 index 00000000000..8d6a2856e39 --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/pojo/entities/conventions/AnnotationCollision.java @@ -0,0 +1,44 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo.entities.conventions; + +import org.bson.codecs.pojo.annotations.BsonProperty; + +public final class AnnotationCollision { + + public String id; + + @BsonProperty("color") + private String color; + + public String getId() { + return id; + } + + public void setId(final String id) { + this.id = id; + } + + @BsonProperty("theme") + public String getColor() { + return color; + } + + public void setColor(final String color) { + this.color = color; + } +} diff --git a/bson/src/test/unit/org/bson/codecs/pojo/entities/conventions/AnnotationDefaultsModel.java b/bson/src/test/unit/org/bson/codecs/pojo/entities/conventions/AnnotationDefaultsModel.java new file mode 100644 index 00000000000..01277592677 --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/pojo/entities/conventions/AnnotationDefaultsModel.java @@ -0,0 +1,32 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo.entities.conventions; + +import org.bson.codecs.pojo.annotations.BsonDiscriminator; +import org.bson.codecs.pojo.annotations.BsonId; +import org.bson.codecs.pojo.annotations.BsonProperty; + +@BsonDiscriminator("AnnotationDefaultsModel") +public final class AnnotationDefaultsModel { + + @BsonId + public String customId; + + @BsonProperty + public AnnotationModel child; + +} diff --git a/bson/src/test/unit/org/bson/codecs/pojo/entities/conventions/AnnotationInheritedModel.java b/bson/src/test/unit/org/bson/codecs/pojo/entities/conventions/AnnotationInheritedModel.java new file mode 100644 index 00000000000..4bf23fc5fda --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/pojo/entities/conventions/AnnotationInheritedModel.java @@ -0,0 +1,31 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo.entities.conventions; + +import org.bson.codecs.pojo.annotations.BsonDiscriminator; +import org.bson.codecs.pojo.annotations.BsonId; +import org.bson.codecs.pojo.annotations.BsonProperty; + +@BsonDiscriminator(key = "_cls") +public final class AnnotationInheritedModel extends AnnotationAbstract { + + @BsonId + public String customId; + + @BsonProperty(useDiscriminator = true) + public AnnotationModel child; +} diff --git a/bson/src/test/unit/org/bson/codecs/pojo/entities/conventions/AnnotationModel.java b/bson/src/test/unit/org/bson/codecs/pojo/entities/conventions/AnnotationModel.java new file mode 100644 index 00000000000..b45d092d54e --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/pojo/entities/conventions/AnnotationModel.java @@ -0,0 +1,99 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo.entities.conventions; + +import org.bson.codecs.pojo.annotations.BsonDiscriminator; +import org.bson.codecs.pojo.annotations.BsonId; +import org.bson.codecs.pojo.annotations.BsonProperty; + +@BsonDiscriminator(value = "MyAnnotationModel", key = "_cls") +public final class AnnotationModel { + + @BsonId() + public String customId; + + @BsonProperty(useDiscriminator = false) + public AnnotationModel child; + + @BsonProperty("renamed") + public AnnotationModel alternative; + + public AnnotationModel() { + } + + public AnnotationModel(final String customId, final AnnotationModel child, final AnnotationModel alternative) { + this.customId = customId; + this.child = child; + this.alternative = alternative; + } + + public String getCustomId() { + return customId; + } + + public void setCustomId(final String customId) { + this.customId = customId; + } + + public AnnotationModel getChild() { + return child; + } + + public void setChild(final AnnotationModel child) { + this.child = child; + } + + public AnnotationModel getAlternative() { + return alternative; + } + + public void setAlternative(final AnnotationModel alternative) { + this.alternative = alternative; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + AnnotationModel that = (AnnotationModel) o; + + if (getCustomId() != null ? !getCustomId().equals(that.getCustomId()) : that.getCustomId() != null) { + return false; + } + if (getChild() != null ? !getChild().equals(that.getChild()) : that.getChild() != null) { + return false; + } + if (getAlternative() != null ? !getAlternative().equals(that.getAlternative()) : that.getAlternative() != null) { + return false; + } + + return true; + } + + @Override + public int hashCode() { + int result = getCustomId() != null ? getCustomId().hashCode() : 0; + result = 31 * result + (getChild() != null ? getChild().hashCode() : 0); + result = 31 * result + (getAlternative() != null ? getAlternative().hashCode() : 0); + return result; + } +} diff --git a/bson/src/test/unit/org/bson/codecs/pojo/entities/conventions/AnnotationNameCollision.java b/bson/src/test/unit/org/bson/codecs/pojo/entities/conventions/AnnotationNameCollision.java new file mode 100644 index 00000000000..4161eea087d --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/pojo/entities/conventions/AnnotationNameCollision.java @@ -0,0 +1,27 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo.entities.conventions; + +import org.bson.codecs.pojo.annotations.BsonProperty; + +public final class AnnotationNameCollision { + + public String id; + + @BsonProperty("id") + public String alternative; +} diff --git a/bson/src/test/unit/org/bson/codecs/pojo/entities/conventions/AnnotationWithObjectIdModel.java b/bson/src/test/unit/org/bson/codecs/pojo/entities/conventions/AnnotationWithObjectIdModel.java new file mode 100644 index 00000000000..8cddf1e6160 --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/pojo/entities/conventions/AnnotationWithObjectIdModel.java @@ -0,0 +1,101 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo.entities.conventions; + +import org.bson.codecs.pojo.annotations.BsonDiscriminator; +import org.bson.codecs.pojo.annotations.BsonId; +import org.bson.codecs.pojo.annotations.BsonProperty; +import org.bson.types.ObjectId; + +@BsonDiscriminator(value = "MyAnnotationModel", key = "_cls") +public final class AnnotationWithObjectIdModel { + + @BsonId() + public ObjectId customId; + + @BsonProperty(useDiscriminator = false) + public AnnotationWithObjectIdModel child; + + @BsonProperty("renamed") + public AnnotationWithObjectIdModel alternative; + + public AnnotationWithObjectIdModel() { + } + + public AnnotationWithObjectIdModel(final ObjectId customId, final AnnotationWithObjectIdModel child, + final AnnotationWithObjectIdModel alternative) { + this.customId = customId; + this.child = child; + this.alternative = alternative; + } + + public ObjectId getCustomId() { + return customId; + } + + public void setCustomId(final ObjectId customId) { + this.customId = customId; + } + + public AnnotationWithObjectIdModel getChild() { + return child; + } + + public void setChild(final AnnotationWithObjectIdModel child) { + this.child = child; + } + + public AnnotationWithObjectIdModel getAlternative() { + return alternative; + } + + public void setAlternative(final AnnotationWithObjectIdModel alternative) { + this.alternative = alternative; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + AnnotationWithObjectIdModel that = (AnnotationWithObjectIdModel) o; + + if (getCustomId() != null ? !getCustomId().equals(that.getCustomId()) : that.getCustomId() != null) { + return false; + } + if (getChild() != null ? !getChild().equals(that.getChild()) : that.getChild() != null) { + return false; + } + if (getAlternative() != null ? !getAlternative().equals(that.getAlternative()) : that.getAlternative() != null) { + return false; + } + + return true; + } + + @Override + public int hashCode() { + int result = getCustomId() != null ? getCustomId().hashCode() : 0; + result = 31 * result + (getChild() != null ? getChild().hashCode() : 0); + result = 31 * result + (getAlternative() != null ? getAlternative().hashCode() : 0); + return result; + } +} diff --git a/bson/src/test/unit/org/bson/codecs/pojo/entities/conventions/AnnotationWriteCollision.java b/bson/src/test/unit/org/bson/codecs/pojo/entities/conventions/AnnotationWriteCollision.java new file mode 100644 index 00000000000..1d826cc5689 --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/pojo/entities/conventions/AnnotationWriteCollision.java @@ -0,0 +1,45 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo.entities.conventions; + +import org.bson.codecs.pojo.annotations.BsonProperty; + +public final class AnnotationWriteCollision { + + public String id; + + @BsonProperty("color") + private String color; + + + public String getId() { + return id; + } + + public void setId(final String id) { + this.id = id; + } + + public String getColor() { + return color; + } + + @BsonProperty("theme") + public void setColor(final String color) { + this.color = color; + } +} diff --git a/bson/src/test/unit/org/bson/codecs/pojo/entities/conventions/BsonExtraElementsInvalidModel.java b/bson/src/test/unit/org/bson/codecs/pojo/entities/conventions/BsonExtraElementsInvalidModel.java new file mode 100644 index 00000000000..d456a6c2333 --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/pojo/entities/conventions/BsonExtraElementsInvalidModel.java @@ -0,0 +1,88 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo.entities.conventions; + +import org.bson.codecs.pojo.annotations.BsonExtraElements; + +import java.util.Objects; + +public class BsonExtraElementsInvalidModel { + + private Integer integerField; + private String stringField; + @BsonExtraElements + private Integer extraElements; + + public BsonExtraElementsInvalidModel(){ + } + + public BsonExtraElementsInvalidModel(final Integer integerField, final String stringField, final Integer extraElements) { + this.integerField = integerField; + this.stringField = stringField; + this.extraElements = extraElements; + } + + public Integer getIntegerField() { + return integerField; + } + + public BsonExtraElementsInvalidModel setIntegerField(final Integer integerField) { + this.integerField = integerField; + return this; + } + + public String getStringField() { + return stringField; + } + + public BsonExtraElementsInvalidModel setStringField(final String stringField) { + this.stringField = stringField; + return this; + } + + public Integer getExtraElements() { + return extraElements; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + BsonExtraElementsInvalidModel that = (BsonExtraElementsInvalidModel) o; + return Objects.equals(integerField, that.integerField) + && Objects.equals(stringField, that.stringField) + && Objects.equals(extraElements, that.extraElements); + } + + @Override + public int hashCode() { + return Objects.hash(integerField, stringField, extraElements); + } + + @Override + public String toString() { + return "BsonExtraElementsModel{" + + "integerField=" + integerField + + ", stringField='" + stringField + '\'' + + ", extraElements=" + extraElements + + '}'; + } +} diff --git a/bson/src/test/unit/org/bson/codecs/pojo/entities/conventions/BsonExtraElementsMapModel.java b/bson/src/test/unit/org/bson/codecs/pojo/entities/conventions/BsonExtraElementsMapModel.java new file mode 100644 index 00000000000..678f021fa2f --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/pojo/entities/conventions/BsonExtraElementsMapModel.java @@ -0,0 +1,98 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo.entities.conventions; + +import org.bson.codecs.pojo.annotations.BsonExtraElements; + +import java.util.Map; +import java.util.Objects; + +public class BsonExtraElementsMapModel { + + private Integer integerField; + private String stringField; + @BsonExtraElements + private Map extraElements; + + public BsonExtraElementsMapModel(){ + } + + public BsonExtraElementsMapModel(final Integer integerField, final String stringField, final Map extraElements) { + this.integerField = integerField; + this.stringField = stringField; + this.extraElements = extraElements; + } + + public Integer getIntegerField() { + return integerField; + } + + public BsonExtraElementsMapModel setIntegerField(final Integer integerField) { + this.integerField = integerField; + return this; + } + + public String getStringField() { + return stringField; + } + + public BsonExtraElementsMapModel setStringField(final String stringField) { + this.stringField = stringField; + return this; + } + + public Map getExtraElements() { + return extraElements; + } + + public BsonExtraElementsMapModel setExtraElement(final String key, final String value) { + extraElements.put(key, value); + return this; + } + + public Object get(final String key) { + return extraElements.get(key); + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + BsonExtraElementsMapModel that = (BsonExtraElementsMapModel) o; + return Objects.equals(integerField, that.integerField) + && Objects.equals(stringField, that.stringField) + && Objects.equals(extraElements, that.extraElements); + } + + @Override + public int hashCode() { + return Objects.hash(integerField, stringField, extraElements); + } + + @Override + public String toString() { + return "BsonExtraElementsModel{" + + "integerField=" + integerField + + ", stringField='" + stringField + '\'' + + ", extraElements=" + extraElements + + '}'; + } +} diff --git a/bson/src/test/unit/org/bson/codecs/pojo/entities/conventions/BsonExtraElementsModel.java b/bson/src/test/unit/org/bson/codecs/pojo/entities/conventions/BsonExtraElementsModel.java new file mode 100644 index 00000000000..2d1b8b1f554 --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/pojo/entities/conventions/BsonExtraElementsModel.java @@ -0,0 +1,99 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo.entities.conventions; + +import org.bson.BsonDocument; +import org.bson.BsonValue; +import org.bson.codecs.pojo.annotations.BsonExtraElements; + +import java.util.Objects; + +public class BsonExtraElementsModel { + + private Integer integerField; + private String stringField; + @BsonExtraElements + private BsonDocument extraElements; + + public BsonExtraElementsModel(){ + } + + public BsonExtraElementsModel(final Integer integerField, final String stringField, final BsonDocument extraElements) { + this.integerField = integerField; + this.stringField = stringField; + this.extraElements = extraElements; + } + + public Integer getIntegerField() { + return integerField; + } + + public BsonExtraElementsModel setIntegerField(final Integer integerField) { + this.integerField = integerField; + return this; + } + + public String getStringField() { + return stringField; + } + + public BsonExtraElementsModel setStringField(final String stringField) { + this.stringField = stringField; + return this; + } + + public BsonDocument getExtraElements() { + return extraElements; + } + + public BsonExtraElementsModel setExtraElement(final String key, final BsonValue value) { + extraElements.append(key, value); + return this; + } + + public Object get(final String key) { + return extraElements.get(key); + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + BsonExtraElementsModel that = (BsonExtraElementsModel) o; + return Objects.equals(integerField, that.integerField) + && Objects.equals(stringField, that.stringField) + && Objects.equals(extraElements, that.extraElements); + } + + @Override + public int hashCode() { + return Objects.hash(integerField, stringField, extraElements); + } + + @Override + public String toString() { + return "BsonExtraElementsModel{" + + "integerField=" + integerField + + ", stringField='" + stringField + '\'' + + ", extraElements=" + extraElements + + '}'; + } +} diff --git a/bson/src/test/unit/org/bson/codecs/pojo/entities/conventions/BsonIgnoreDuplicatePropertyMultipleTypes.java b/bson/src/test/unit/org/bson/codecs/pojo/entities/conventions/BsonIgnoreDuplicatePropertyMultipleTypes.java new file mode 100644 index 00000000000..89b6c16d934 --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/pojo/entities/conventions/BsonIgnoreDuplicatePropertyMultipleTypes.java @@ -0,0 +1,84 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo.entities.conventions; + +import org.bson.codecs.pojo.annotations.BsonCreator; +import org.bson.codecs.pojo.annotations.BsonIgnore; +import org.bson.codecs.pojo.annotations.BsonProperty; + +import java.util.Objects; + +public class BsonIgnoreDuplicatePropertyMultipleTypes { + private final String stringField; + private String altStringField; + + @BsonCreator + public BsonIgnoreDuplicatePropertyMultipleTypes(@BsonProperty("stringField") final String stringField) { + this.stringField = stringField; + } + + public String getStringField() { + return stringField; + } + + @BsonIgnore + public String getAltStringField() { + return altStringField; + } + + @BsonIgnore + public void setAltStringField(final String altStringField) { + this.altStringField = altStringField; + } + + @BsonIgnore + public void setAltStringField(final Integer i) { + this.altStringField = i.toString(); + } + + @Override + public String toString() { + return "BsonIgnoreDuplicatePropertyMultipleTypes{" + + "stringField='" + stringField + '\'' + + ", altStringField='" + altStringField + '\'' + + '}'; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + BsonIgnoreDuplicatePropertyMultipleTypes that = (BsonIgnoreDuplicatePropertyMultipleTypes) o; + + if (!Objects.equals(stringField, that.stringField)) { + return false; + } + return Objects.equals(altStringField, that.altStringField); + } + + @Override + public int hashCode() { + int result = stringField != null ? stringField.hashCode() : 0; + result = 31 * result + (altStringField != null ? altStringField.hashCode() : 0); + return result; + } +} diff --git a/bson/src/test/unit/org/bson/codecs/pojo/entities/conventions/BsonIgnoreInvalidMapModel.java b/bson/src/test/unit/org/bson/codecs/pojo/entities/conventions/BsonIgnoreInvalidMapModel.java new file mode 100644 index 00000000000..33f7601b28f --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/pojo/entities/conventions/BsonIgnoreInvalidMapModel.java @@ -0,0 +1,77 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo.entities.conventions; + +import org.bson.codecs.pojo.annotations.BsonIgnore; + +import java.util.Map; +import java.util.Objects; + +public class BsonIgnoreInvalidMapModel { + + private String stringField; + + @BsonIgnore + private Map invalidMap; + + public BsonIgnoreInvalidMapModel() { + } + + public BsonIgnoreInvalidMapModel(final String stringField) { + this.stringField = stringField; + } + + public String getStringField() { + return stringField; + } + + public void setStringField(final String stringField) { + this.stringField = stringField; + } + + public Map getInvalidMap() { + return invalidMap; + } + + public void setInvalidMap(final Map invalidMap) { + this.invalidMap = invalidMap; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + BsonIgnoreInvalidMapModel that = (BsonIgnoreInvalidMapModel) o; + + if (!Objects.equals(stringField, that.stringField)) { + return false; + } + return Objects.equals(invalidMap, that.invalidMap); + } + + @Override + public int hashCode() { + int result = stringField != null ? stringField.hashCode() : 0; + result = 31 * result + (invalidMap != null ? invalidMap.hashCode() : 0); + return result; + } +} diff --git a/bson/src/test/unit/org/bson/codecs/pojo/entities/conventions/BsonIgnoreSyntheticProperty.java b/bson/src/test/unit/org/bson/codecs/pojo/entities/conventions/BsonIgnoreSyntheticProperty.java new file mode 100644 index 00000000000..0d6544f64fa --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/pojo/entities/conventions/BsonIgnoreSyntheticProperty.java @@ -0,0 +1,67 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo.entities.conventions; + +import org.bson.codecs.pojo.annotations.BsonCreator; +import org.bson.codecs.pojo.annotations.BsonIgnore; +import org.bson.codecs.pojo.annotations.BsonProperty; + +import java.util.Objects; + +public class BsonIgnoreSyntheticProperty { + private final String stringField; + + @BsonCreator + public BsonIgnoreSyntheticProperty(@BsonProperty("stringField") final String stringField) { + this.stringField = stringField; + } + + public String getStringField() { + return stringField; + } + + @BsonIgnore + public Object getSyntheticProperty() { + return null; + } + + @Override + public String toString() { + return "BsonIgnoreSyntheticProperty{" + + "stringField='" + stringField + '\'' + + '}'; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + BsonIgnoreSyntheticProperty that = (BsonIgnoreSyntheticProperty) o; + + return Objects.equals(stringField, that.stringField); + } + + @Override + public int hashCode() { + return stringField != null ? stringField.hashCode() : 0; + } +} diff --git a/bson/src/test/unit/org/bson/codecs/pojo/entities/conventions/BsonRepresentationModel.java b/bson/src/test/unit/org/bson/codecs/pojo/entities/conventions/BsonRepresentationModel.java new file mode 100644 index 00000000000..c9a52908167 --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/pojo/entities/conventions/BsonRepresentationModel.java @@ -0,0 +1,76 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo.entities.conventions; + +import org.bson.BsonType; +import org.bson.codecs.pojo.annotations.BsonRepresentation; +import org.bson.types.ObjectId; + +import java.util.Objects; + +public class BsonRepresentationModel { + @BsonRepresentation(BsonType.OBJECT_ID) + private String id; + + private int age; + + public BsonRepresentationModel() {} + + public BsonRepresentationModel(final int age) { + id = new ObjectId("111111111111111111111111").toHexString(); + this.age = age; + } + + public BsonRepresentationModel(final String id, final int age) { + this.id = id; + this.age = age; + } + + public String getId() { + return id; + } + + public void setId(final String id) { + this.id = id; + } + + public int getAge() { + return age; + } + + public void setAge(final int age) { + this.age = age; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + BsonRepresentationModel that = (BsonRepresentationModel) o; + return age == that.age && Objects.equals(id, that.id); + } + + @Override + public int hashCode() { + return Objects.hash(id, age); + } + +} diff --git a/bson/src/test/unit/org/bson/codecs/pojo/entities/conventions/CollectionDiscriminatorAbstractClassesModel.java b/bson/src/test/unit/org/bson/codecs/pojo/entities/conventions/CollectionDiscriminatorAbstractClassesModel.java new file mode 100644 index 00000000000..b719145c8f8 --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/pojo/entities/conventions/CollectionDiscriminatorAbstractClassesModel.java @@ -0,0 +1,75 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo.entities.conventions; + +import java.util.List; +import java.util.Map; + +public class CollectionDiscriminatorAbstractClassesModel { + private List list; + private Map map; + + public List getList() { + return list; + } + + public CollectionDiscriminatorAbstractClassesModel setList(final List list) { + this.list = list; + return this; + } + + public Map getMap() { + return map; + } + + public CollectionDiscriminatorAbstractClassesModel setMap(final Map map) { + this.map = map; + return this; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + CollectionDiscriminatorAbstractClassesModel that = (CollectionDiscriminatorAbstractClassesModel) o; + + if (getList() != null ? !getList().equals(that.getList()) : that.getList() != null) { + return false; + } + return getMap() != null ? getMap().equals(that.getMap()) : that.getMap() == null; + } + + @Override + public int hashCode() { + int result = getList() != null ? getList().hashCode() : 0; + result = 31 * result + (getMap() != null ? getMap().hashCode() : 0); + return result; + } + + @Override + public String toString() { + return "CollectionDiscriminatorModel{" + + "list=" + list + + ", map=" + map + + '}'; + } +} diff --git a/bson/src/test/unit/org/bson/codecs/pojo/entities/conventions/CollectionDiscriminatorInterfacesModel.java b/bson/src/test/unit/org/bson/codecs/pojo/entities/conventions/CollectionDiscriminatorInterfacesModel.java new file mode 100644 index 00000000000..b9155e738d1 --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/pojo/entities/conventions/CollectionDiscriminatorInterfacesModel.java @@ -0,0 +1,75 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo.entities.conventions; + +import java.util.List; +import java.util.Map; + +public class CollectionDiscriminatorInterfacesModel { + private List list; + private Map map; + + public List getList() { + return list; + } + + public CollectionDiscriminatorInterfacesModel setList(final List list) { + this.list = list; + return this; + } + + public Map getMap() { + return map; + } + + public CollectionDiscriminatorInterfacesModel setMap(final Map map) { + this.map = map; + return this; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + CollectionDiscriminatorInterfacesModel that = (CollectionDiscriminatorInterfacesModel) o; + + if (getList() != null ? !getList().equals(that.getList()) : that.getList() != null) { + return false; + } + return getMap() != null ? getMap().equals(that.getMap()) : that.getMap() == null; + } + + @Override + public int hashCode() { + int result = getList() != null ? getList().hashCode() : 0; + result = 31 * result + (getMap() != null ? getMap().hashCode() : 0); + return result; + } + + @Override + public String toString() { + return "CollectionDiscriminatorModel{" + + "list=" + list + + ", map=" + map + + '}'; + } +} diff --git a/bson/src/test/unit/org/bson/codecs/pojo/entities/conventions/CollectionNameModel.java b/bson/src/test/unit/org/bson/codecs/pojo/entities/conventions/CollectionNameModel.java new file mode 100644 index 00000000000..225982c9ebc --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/pojo/entities/conventions/CollectionNameModel.java @@ -0,0 +1,20 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo.entities.conventions; + +public final class CollectionNameModel { +} diff --git a/bson/src/test/unit/org/bson/codecs/pojo/entities/conventions/CollectionsGetterImmutableModel.java b/bson/src/test/unit/org/bson/codecs/pojo/entities/conventions/CollectionsGetterImmutableModel.java new file mode 100644 index 00000000000..31bcd3d62e9 --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/pojo/entities/conventions/CollectionsGetterImmutableModel.java @@ -0,0 +1,57 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo.entities.conventions; + +import java.util.Collections; +import java.util.List; +import java.util.Objects; + +public class CollectionsGetterImmutableModel { + + private final List listField; + + public CollectionsGetterImmutableModel() { + this(Collections.emptyList()); + } + + public CollectionsGetterImmutableModel(final List listField) { + this.listField = Collections.unmodifiableList(listField); + } + + public List getListField() { + return listField; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()){ + return false; + } + + CollectionsGetterImmutableModel that = (CollectionsGetterImmutableModel) o; + + return Objects.equals(listField, that.listField); + } + + @Override + public int hashCode() { + return listField != null ? listField.hashCode() : 0; + } +} diff --git a/bson/src/test/unit/org/bson/codecs/pojo/entities/conventions/CollectionsGetterMutableModel.java b/bson/src/test/unit/org/bson/codecs/pojo/entities/conventions/CollectionsGetterMutableModel.java new file mode 100644 index 00000000000..bc928b37c34 --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/pojo/entities/conventions/CollectionsGetterMutableModel.java @@ -0,0 +1,56 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo.entities.conventions; + +import java.util.ArrayList; +import java.util.List; +import java.util.Objects; + +public class CollectionsGetterMutableModel { + + private final List listField; + + public CollectionsGetterMutableModel() { + this(new ArrayList<>()); + } + + public CollectionsGetterMutableModel(final List listField) { + this.listField = listField; + } + + public List getListField() { + return listField; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + CollectionsGetterMutableModel that = (CollectionsGetterMutableModel) o; + return Objects.equals(listField, that.listField); + } + + @Override + public int hashCode() { + return listField != null ? listField.hashCode() : 0; + } +} diff --git a/bson/src/test/unit/org/bson/codecs/pojo/entities/conventions/CollectionsGetterNonEmptyModel.java b/bson/src/test/unit/org/bson/codecs/pojo/entities/conventions/CollectionsGetterNonEmptyModel.java new file mode 100644 index 00000000000..87f832124e5 --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/pojo/entities/conventions/CollectionsGetterNonEmptyModel.java @@ -0,0 +1,58 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo.entities.conventions; + +import java.util.List; +import java.util.Objects; + +import static java.util.Arrays.asList; + +public class CollectionsGetterNonEmptyModel { + + private final List listField; + + public CollectionsGetterNonEmptyModel() { + this(asList(1, 2)); + } + + public CollectionsGetterNonEmptyModel(final List listField) { + this.listField = listField; + } + + public List getListField() { + return listField; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()){ + return false; + } + + CollectionsGetterNonEmptyModel that = (CollectionsGetterNonEmptyModel) o; + + return Objects.equals(listField, that.listField); + } + + @Override + public int hashCode() { + return listField != null ? listField.hashCode() : 0; + } +} diff --git a/bson/src/test/unit/org/bson/codecs/pojo/entities/conventions/CollectionsGetterNullModel.java b/bson/src/test/unit/org/bson/codecs/pojo/entities/conventions/CollectionsGetterNullModel.java new file mode 100644 index 00000000000..df670c4570a --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/pojo/entities/conventions/CollectionsGetterNullModel.java @@ -0,0 +1,55 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo.entities.conventions; + +import java.util.List; +import java.util.Objects; + +public class CollectionsGetterNullModel { + + private final List listField; + + public CollectionsGetterNullModel() { + this(null); + } + + public CollectionsGetterNullModel(final List listField) { + this.listField = listField; + } + + public List getListField() { + return listField; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()){ + return false; + } + + CollectionsGetterNullModel that = (CollectionsGetterNullModel) o; + return Objects.equals(listField, that.listField); + } + + @Override + public int hashCode() { + return listField != null ? listField.hashCode() : 0; + } +} diff --git a/bson/src/test/unit/org/bson/codecs/pojo/entities/conventions/CreatorAllFinalFieldsModel.java b/bson/src/test/unit/org/bson/codecs/pojo/entities/conventions/CreatorAllFinalFieldsModel.java new file mode 100644 index 00000000000..07d76c6291c --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/pojo/entities/conventions/CreatorAllFinalFieldsModel.java @@ -0,0 +1,85 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo.entities.conventions; + +import org.bson.codecs.pojo.annotations.BsonCreator; +import org.bson.codecs.pojo.annotations.BsonDiscriminator; +import org.bson.codecs.pojo.annotations.BsonId; +import org.bson.codecs.pojo.annotations.BsonProperty; + +import java.util.Objects; + +@BsonDiscriminator +public final class CreatorAllFinalFieldsModel { + private final String pid; + private final String fName; + private final String lName; + + @BsonCreator + public CreatorAllFinalFieldsModel(@BsonProperty("personId") final String personId, + @BsonProperty("firstName") final String firstName, + @BsonProperty("lastName") final String lastName) { + this.pid = personId; + this.fName = firstName; + this.lName = lastName; + } + + @BsonId + public String getPersonId() { + return pid; + } + + public String getFirstName() { + return fName; + } + + public String getLastName() { + return lName; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + CreatorAllFinalFieldsModel that = (CreatorAllFinalFieldsModel) o; + + if (!Objects.equals(pid, that.pid)) { + return false; + } + if (!Objects.equals(fName, that.fName)) { + return false; + } + if (!Objects.equals(lName, that.lName)) { + return false; + } + + return true; + } + + @Override + public int hashCode() { + int result = pid != null ? pid.hashCode() : 0; + result = 31 * result + (fName != null ? fName.hashCode() : 0); + result = 31 * result + (lName != null ? lName.hashCode() : 0); + return result; + } +} diff --git a/bson/src/test/unit/org/bson/codecs/pojo/entities/conventions/CreatorConstructorIdModel.java b/bson/src/test/unit/org/bson/codecs/pojo/entities/conventions/CreatorConstructorIdModel.java new file mode 100644 index 00000000000..d3520e9f02f --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/pojo/entities/conventions/CreatorConstructorIdModel.java @@ -0,0 +1,112 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo.entities.conventions; + +import org.bson.codecs.pojo.annotations.BsonCreator; +import org.bson.codecs.pojo.annotations.BsonId; +import org.bson.codecs.pojo.annotations.BsonProperty; + +import java.util.List; + +public class CreatorConstructorIdModel { + private final String id; + private final List integersField; + private String stringField; + public long longField; + + @BsonCreator + public CreatorConstructorIdModel(@BsonId final String id, @BsonProperty("integersField") final List integerField, + @BsonProperty("longField") final long longField) { + this.id = id; + this.integersField = integerField; + this.longField = longField; + } + + public CreatorConstructorIdModel(final String id, final List integersField, final String stringField, final long longField) { + this.id = id; + this.integersField = integersField; + this.stringField = stringField; + this.longField = longField; + } + + public String getId() { + return id; + } + + public List getIntegersField() { + return integersField; + } + + public String getStringField() { + return stringField; + } + + public void setStringField(final String stringField) { + this.stringField = stringField; + } + + public long getLongField() { + return longField; + } + + public void setLongField(final long longField) { + this.longField = longField; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + CreatorConstructorIdModel that = (CreatorConstructorIdModel) o; + + if (getLongField() != that.getLongField()) { + return false; + } + if (getId() != null ? !getId().equals(that.getId()) : that.getId() != null) { + return false; + } + if (getIntegersField() != null ? !getIntegersField().equals(that.getIntegersField()) + : that.getIntegersField() != null) { + return false; + } + return getStringField() != null ? getStringField().equals(that.getStringField()) : that.getStringField() == null; + } + + @Override + public int hashCode() { + int result = getId() != null ? getId().hashCode() : 0; + result = 31 * result + (getIntegersField() != null ? getIntegersField().hashCode() : 0); + result = 31 * result + (getStringField() != null ? getStringField().hashCode() : 0); + result = 31 * result + (int) (getLongField() ^ (getLongField() >>> 32)); + return result; + } + + @Override + public String toString() { + return "CreatorConstructorIdModel{" + + "id='" + id + '\'' + + ", integersField=" + integersField + + ", stringField='" + stringField + '\'' + + ", longField=" + longField + + '}'; + } +} diff --git a/bson/src/test/unit/org/bson/codecs/pojo/entities/conventions/CreatorConstructorLegacyBsonPropertyModel.java b/bson/src/test/unit/org/bson/codecs/pojo/entities/conventions/CreatorConstructorLegacyBsonPropertyModel.java new file mode 100644 index 00000000000..81011970500 --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/pojo/entities/conventions/CreatorConstructorLegacyBsonPropertyModel.java @@ -0,0 +1,104 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo.entities.conventions; + +import org.bson.codecs.pojo.annotations.BsonCreator; +import org.bson.codecs.pojo.annotations.BsonProperty; + +import java.util.List; + +public final class CreatorConstructorLegacyBsonPropertyModel { + private final List integersField; + private String stringField; + @BsonProperty("longField") + private long myLongField; + + // Here we use the @BsonProperty using the actual field name, that has been set to read and write to "longField" + @BsonCreator + public CreatorConstructorLegacyBsonPropertyModel(@BsonProperty("integersField") final List integerField, + @BsonProperty("myLongField") final long longField) { + this.integersField = integerField; + this.myLongField = longField; + } + + public CreatorConstructorLegacyBsonPropertyModel(final List integersField, final String stringField, final long longField) { + this.integersField = integersField; + this.stringField = stringField; + this.myLongField = longField; + } + + public List getIntegersField() { + return integersField; + } + + public String getStringField() { + return stringField; + } + + public void setStringField(final String stringField) { + this.stringField = stringField; + } + + public long getMyLongField() { + return myLongField; + } + + public void setMyLongField(final long myLongField) { + this.myLongField = myLongField; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + CreatorConstructorLegacyBsonPropertyModel that = (CreatorConstructorLegacyBsonPropertyModel) o; + + if (getMyLongField() != that.getMyLongField()) { + return false; + } + if (getIntegersField() != null ? !getIntegersField().equals(that.getIntegersField()) : that.getIntegersField() != null) { + return false; + } + if (getStringField() != null ? !getStringField().equals(that.getStringField()) : that.getStringField() != null) { + return false; + } + + return true; + } + + @Override + public int hashCode() { + int result = getIntegersField() != null ? getIntegersField().hashCode() : 0; + result = 31 * result + (getStringField() != null ? getStringField().hashCode() : 0); + result = 31 * result + (int) (getMyLongField() ^ (getMyLongField() >>> 32)); + return result; + } + + @Override + public String toString() { + return "CreatorConstructorModel{" + + "integersField=" + integersField + + ", stringField='" + stringField + "'" + + ", myLongField=" + myLongField + + "}"; + } +} diff --git a/bson/src/test/unit/org/bson/codecs/pojo/entities/conventions/CreatorConstructorModel.java b/bson/src/test/unit/org/bson/codecs/pojo/entities/conventions/CreatorConstructorModel.java new file mode 100644 index 00000000000..d3a5c028b3b --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/pojo/entities/conventions/CreatorConstructorModel.java @@ -0,0 +1,102 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo.entities.conventions; + +import org.bson.codecs.pojo.annotations.BsonCreator; +import org.bson.codecs.pojo.annotations.BsonProperty; + +import java.util.List; + +public final class CreatorConstructorModel { + private final List integersField; + private String stringField; + public long longField; + + @BsonCreator + public CreatorConstructorModel(@BsonProperty("integersField") final List integerField, + @BsonProperty("longField") final long longField) { + this.integersField = integerField; + this.longField = longField; + } + + public CreatorConstructorModel(final List integersField, final String stringField, final long longField) { + this.integersField = integersField; + this.stringField = stringField; + this.longField = longField; + } + + public List getIntegersField() { + return integersField; + } + + public String getStringField() { + return stringField; + } + + public void setStringField(final String stringField) { + this.stringField = stringField; + } + + public long getLongField() { + return longField; + } + + public void setLongField(final long longField) { + this.longField = longField; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + CreatorConstructorModel that = (CreatorConstructorModel) o; + + if (getLongField() != that.getLongField()) { + return false; + } + if (getIntegersField() != null ? !getIntegersField().equals(that.getIntegersField()) : that.getIntegersField() != null) { + return false; + } + if (getStringField() != null ? !getStringField().equals(that.getStringField()) : that.getStringField() != null) { + return false; + } + + return true; + } + + @Override + public int hashCode() { + int result = getIntegersField() != null ? getIntegersField().hashCode() : 0; + result = 31 * result + (getStringField() != null ? getStringField().hashCode() : 0); + result = 31 * result + (int) (getLongField() ^ (getLongField() >>> 32)); + return result; + } + + @Override + public String toString() { + return "CreatorConstructorModel{" + + "integersField=" + integersField + + ", stringField='" + stringField + "'" + + ", longField=" + longField + + "}"; + } +} diff --git a/bson/src/test/unit/org/bson/codecs/pojo/entities/conventions/CreatorConstructorNoKnownIdModel.java b/bson/src/test/unit/org/bson/codecs/pojo/entities/conventions/CreatorConstructorNoKnownIdModel.java new file mode 100644 index 00000000000..2027aefd605 --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/pojo/entities/conventions/CreatorConstructorNoKnownIdModel.java @@ -0,0 +1,69 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo.entities.conventions; + +import org.bson.codecs.pojo.annotations.BsonCreator; +import org.bson.codecs.pojo.annotations.BsonId; +import org.bson.codecs.pojo.annotations.BsonProperty; + +import java.util.Objects; + +public class CreatorConstructorNoKnownIdModel { + private final String stringField; + private final long longField; + + @BsonCreator + public CreatorConstructorNoKnownIdModel( + @BsonId final String stringField, + @BsonProperty("longField") final long longField) { + this.stringField = stringField; + this.longField = longField; + } + + public String getStringField() { + return stringField; + } + + public long getLongField() { + return longField; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + final CreatorConstructorNoKnownIdModel that = (CreatorConstructorNoKnownIdModel) o; + return longField == that.longField && Objects.equals(stringField, that.stringField); + } + + @Override + public int hashCode() { + return Objects.hash(stringField, longField); + } + + @Override + public String toString() { + return "CreatorConstructorNoKnownIdModel{" + + ", stringField='" + stringField + '\'' + + ", longField=" + longField + + '}'; + } +} diff --git a/bson/src/test/unit/org/bson/codecs/pojo/entities/conventions/CreatorConstructorPrimitivesModel.java b/bson/src/test/unit/org/bson/codecs/pojo/entities/conventions/CreatorConstructorPrimitivesModel.java new file mode 100644 index 00000000000..ded3c18516e --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/pojo/entities/conventions/CreatorConstructorPrimitivesModel.java @@ -0,0 +1,104 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo.entities.conventions; + +import org.bson.codecs.pojo.annotations.BsonCreator; +import org.bson.codecs.pojo.annotations.BsonProperty; + +public final class CreatorConstructorPrimitivesModel { + private final int intField; + private final String stringField; + private final long longField; + + + @BsonCreator + public CreatorConstructorPrimitivesModel(@BsonProperty("intField") final int intField, + @BsonProperty("stringField") final String stringField, + @BsonProperty("longField") final long longField) { + this.intField = intField; + this.stringField = stringField; + this.longField = longField; + } + + /** + * Returns the intField + * + * @return the intField + */ + public int getIntField() { + return intField; + } + + /** + * Returns the stringField + * + * @return the stringField + */ + public String getStringField() { + return stringField; + } + + /** + * Returns the longField + * + * @return the longField + */ + public long getLongField() { + return longField; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + CreatorConstructorPrimitivesModel that = (CreatorConstructorPrimitivesModel) o; + + if (getIntField() != that.getIntField()) { + return false; + } + if (getLongField() != that.getLongField()) { + return false; + } + if (getStringField() != null ? !getStringField().equals(that.getStringField()) : that.getStringField() != null) { + return false; + } + + return true; + } + + @Override + public int hashCode() { + int result = getIntField(); + result = 31 * result + (getStringField() != null ? getStringField().hashCode() : 0); + result = 31 * result + (int) (getLongField() ^ (getLongField() >>> 32)); + return result; + } + + @Override + public String toString() { + return "CreatorConstructorPrimativesModel{" + + "intField=" + intField + + ", stringField='" + stringField + "'" + + ", longField=" + longField + + "}"; + } +} diff --git a/bson/src/test/unit/org/bson/codecs/pojo/entities/conventions/CreatorConstructorRenameModel.java b/bson/src/test/unit/org/bson/codecs/pojo/entities/conventions/CreatorConstructorRenameModel.java new file mode 100644 index 00000000000..ee94086fe98 --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/pojo/entities/conventions/CreatorConstructorRenameModel.java @@ -0,0 +1,103 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo.entities.conventions; + +import org.bson.codecs.pojo.annotations.BsonCreator; +import org.bson.codecs.pojo.annotations.BsonProperty; + +import java.util.List; + +public class CreatorConstructorRenameModel { + private final List integersField; + private String stringField; + public long longField; + + @BsonCreator + public CreatorConstructorRenameModel(@BsonProperty("integerList") final List integerField, + @BsonProperty("longField") final long longField) { + this.integersField = integerField; + this.longField = longField; + } + + public CreatorConstructorRenameModel(final List integersField, final String stringField, final long longField) { + this.integersField = integersField; + this.stringField = stringField; + this.longField = longField; + } + + @BsonProperty("integerList") + public List getIntegersField() { + return integersField; + } + + public String getStringField() { + return stringField; + } + + public void setStringField(final String stringField) { + this.stringField = stringField; + } + + public long getLongField() { + return longField; + } + + public void setLongField(final long longField) { + this.longField = longField; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + CreatorConstructorRenameModel that = (CreatorConstructorRenameModel) o; + + if (getLongField() != that.getLongField()) { + return false; + } + if (getIntegersField() != null ? !getIntegersField().equals(that.getIntegersField()) : that.getIntegersField() != null) { + return false; + } + if (getStringField() != null ? !getStringField().equals(that.getStringField()) : that.getStringField() != null) { + return false; + } + + return true; + } + + @Override + public int hashCode() { + int result = getIntegersField() != null ? getIntegersField().hashCode() : 0; + result = 31 * result + (getStringField() != null ? getStringField().hashCode() : 0); + result = 31 * result + (int) (getLongField() ^ (getLongField() >>> 32)); + return result; + } + + @Override + public String toString() { + return "CreatorConstructorRenameModel{" + + "integersField=" + integersField + + ", stringField='" + stringField + "'" + + ", longField=" + longField + + "}"; + } +} diff --git a/bson/src/test/unit/org/bson/codecs/pojo/entities/conventions/CreatorConstructorThrowsExceptionModel.java b/bson/src/test/unit/org/bson/codecs/pojo/entities/conventions/CreatorConstructorThrowsExceptionModel.java new file mode 100644 index 00000000000..10c2721395d --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/pojo/entities/conventions/CreatorConstructorThrowsExceptionModel.java @@ -0,0 +1,87 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo.entities.conventions; + +import org.bson.codecs.pojo.annotations.BsonCreator; + +public final class CreatorConstructorThrowsExceptionModel { + private Integer integerField; + private String stringField; + public long longField; + + @BsonCreator + public CreatorConstructorThrowsExceptionModel(){ + throw new UnsupportedOperationException("Nope"); + } + + public Integer getIntegerField() { + return integerField; + } + + public void setIntegerField(final Integer integerField) { + this.integerField = integerField; + } + + public String getStringField() { + return stringField; + } + + public void setStringField(final String stringField) { + this.stringField = stringField; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + CreatorConstructorThrowsExceptionModel that = (CreatorConstructorThrowsExceptionModel) o; + + if (longField != that.longField) { + return false; + } + if (getIntegerField() != null ? !getIntegerField().equals(that.getIntegerField()) : that.getIntegerField() != null) { + return false; + } + if (getStringField() != null ? !getStringField().equals(that.getStringField()) : that.getStringField() != null) { + return false; + } + + return true; + } + + @Override + public int hashCode() { + int result = getIntegerField() != null ? getIntegerField().hashCode() : 0; + result = 31 * result + (getStringField() != null ? getStringField().hashCode() : 0); + result = 31 * result + (int) (longField ^ (longField >>> 32)); + return result; + } + + @Override + public String toString() { + return "CreatorNoArgsConstructorModel{" + + "integerField=" + integerField + + ", stringField='" + stringField + "'" + + ", longField=" + longField + + "}"; + } +} diff --git a/bson/src/test/unit/org/bson/codecs/pojo/entities/conventions/CreatorInSuperClassModel.java b/bson/src/test/unit/org/bson/codecs/pojo/entities/conventions/CreatorInSuperClassModel.java new file mode 100644 index 00000000000..2064663ff88 --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/pojo/entities/conventions/CreatorInSuperClassModel.java @@ -0,0 +1,30 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo.entities.conventions; + +import org.bson.codecs.pojo.annotations.BsonCreator; +import org.bson.codecs.pojo.annotations.BsonProperty; + +public abstract class CreatorInSuperClassModel { + @BsonCreator + public static CreatorInSuperClassModel newInstance(@BsonProperty("propertyA") final String propertyA, + @BsonProperty("propertyB") final String propertyB) { + return new CreatorInSuperClassModelImpl(propertyA, propertyB); + } + public abstract String getPropertyA(); + public abstract String getPropertyB(); +} diff --git a/bson/src/test/unit/org/bson/codecs/pojo/entities/conventions/CreatorInSuperClassModelImpl.java b/bson/src/test/unit/org/bson/codecs/pojo/entities/conventions/CreatorInSuperClassModelImpl.java new file mode 100644 index 00000000000..12ea066b6c7 --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/pojo/entities/conventions/CreatorInSuperClassModelImpl.java @@ -0,0 +1,63 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo.entities.conventions; + +import java.util.Objects; + +public class CreatorInSuperClassModelImpl extends CreatorInSuperClassModel { + private final String propertyA; + private final String propertyB; + + CreatorInSuperClassModelImpl(final String propertyA, final String propertyB) { + this.propertyA = propertyA; + this.propertyB = propertyB; + } + + @Override + public String getPropertyA() { + return propertyA; + } + + @Override + public String getPropertyB() { + return propertyB; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + CreatorInSuperClassModelImpl that = (CreatorInSuperClassModelImpl) o; + + if (!Objects.equals(propertyA, that.propertyA)) { + return false; + } + return Objects.equals(propertyB, that.propertyB); + } + + @Override + public int hashCode() { + int result = propertyA != null ? propertyA.hashCode() : 0; + result = 31 * result + (propertyB != null ? propertyB.hashCode() : 0); + return result; + } +} diff --git a/bson/src/test/unit/org/bson/codecs/pojo/entities/conventions/CreatorInvalidConstructorModel.java b/bson/src/test/unit/org/bson/codecs/pojo/entities/conventions/CreatorInvalidConstructorModel.java new file mode 100644 index 00000000000..efe2ecac762 --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/pojo/entities/conventions/CreatorInvalidConstructorModel.java @@ -0,0 +1,85 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo.entities.conventions; + +import org.bson.codecs.pojo.annotations.BsonCreator; +import org.bson.codecs.pojo.annotations.BsonProperty; + +public final class CreatorInvalidConstructorModel { + private final Integer integerField; + private String stringField; + public long longField; + + @BsonCreator + public CreatorInvalidConstructorModel(@BsonProperty("integerField") final Integer integerField, final String stringField) { + this.integerField = integerField; + this.stringField = stringField; + } + + public Integer getIntegerField() { + return integerField; + } + + public String getStringField() { + return stringField; + } + + public void setStringField(final String stringField) { + this.stringField = stringField; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + CreatorInvalidConstructorModel that = (CreatorInvalidConstructorModel) o; + + if (longField != that.longField) { + return false; + } + if (getIntegerField() != null ? !getIntegerField().equals(that.getIntegerField()) : that.getIntegerField() != null) { + return false; + } + if (getStringField() != null ? !getStringField().equals(that.getStringField()) : that.getStringField() != null) { + return false; + } + + return true; + } + + @Override + public int hashCode() { + int result = getIntegerField() != null ? getIntegerField().hashCode() : 0; + result = 31 * result + (getStringField() != null ? getStringField().hashCode() : 0); + result = 31 * result + (int) (longField ^ (longField >>> 32)); + return result; + } + + @Override + public String toString() { + return "CreatorInvalidConstructorModel{" + + "integerField=" + integerField + + ", stringField='" + stringField + "'" + + ", longField=" + longField + + "}"; + } +} diff --git a/bson/src/test/unit/org/bson/codecs/pojo/entities/conventions/CreatorInvalidMethodModel.java b/bson/src/test/unit/org/bson/codecs/pojo/entities/conventions/CreatorInvalidMethodModel.java new file mode 100644 index 00000000000..26d60ea5d24 --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/pojo/entities/conventions/CreatorInvalidMethodModel.java @@ -0,0 +1,89 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo.entities.conventions; + +import org.bson.codecs.pojo.annotations.BsonCreator; +import org.bson.codecs.pojo.annotations.BsonProperty; + +public final class CreatorInvalidMethodModel { + private final Integer integerField; + private String stringField; + public long longField; + + private CreatorInvalidMethodModel(final Integer integerField, final String stringField) { + this.integerField = integerField; + this.stringField = stringField; + } + + @BsonCreator + public static CreatorInvalidMethodModel create(@BsonProperty("integerField") final Integer integerField, final String stringField) { + return new CreatorInvalidMethodModel(integerField, stringField); + } + + public Integer getIntegerField() { + return integerField; + } + + public String getStringField() { + return stringField; + } + + public void setStringField(final String stringField) { + this.stringField = stringField; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + CreatorMethodModel that = (CreatorMethodModel) o; + + if (longField != that.longField) { + return false; + } + if (getIntegerField() != null ? !getIntegerField().equals(that.getIntegerField()) : that.getIntegerField() != null) { + return false; + } + if (getStringField() != null ? !getStringField().equals(that.getStringField()) : that.getStringField() != null) { + return false; + } + + return true; + } + + @Override + public int hashCode() { + int result = getIntegerField() != null ? getIntegerField().hashCode() : 0; + result = 31 * result + (getStringField() != null ? getStringField().hashCode() : 0); + result = 31 * result + (int) (longField ^ (longField >>> 32)); + return result; + } + + @Override + public String toString() { + return "CreatorInvalidMethodModel{" + + "integerField=" + integerField + + ", stringField='" + stringField + "'" + + ", longField=" + longField + + "}"; + } +} diff --git a/bson/src/test/unit/org/bson/codecs/pojo/entities/conventions/CreatorInvalidMethodReturnTypeModel.java b/bson/src/test/unit/org/bson/codecs/pojo/entities/conventions/CreatorInvalidMethodReturnTypeModel.java new file mode 100644 index 00000000000..5929d979ca7 --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/pojo/entities/conventions/CreatorInvalidMethodReturnTypeModel.java @@ -0,0 +1,89 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo.entities.conventions; + +import org.bson.codecs.pojo.annotations.BsonCreator; +import org.bson.codecs.pojo.annotations.BsonProperty; + +public final class CreatorInvalidMethodReturnTypeModel { + private final Integer integerField; + private String stringField; + public long longField; + + private CreatorInvalidMethodReturnTypeModel(final Integer integerField, final String stringField) { + this.integerField = integerField; + this.stringField = stringField; + } + + @BsonCreator + public static String create(@BsonProperty("integerField") final Integer integerField) { + return "Nope"; + } + + public Integer getIntegerField() { + return integerField; + } + + public String getStringField() { + return stringField; + } + + public void setStringField(final String stringField) { + this.stringField = stringField; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + CreatorMethodModel that = (CreatorMethodModel) o; + + if (longField != that.longField) { + return false; + } + if (getIntegerField() != null ? !getIntegerField().equals(that.getIntegerField()) : that.getIntegerField() != null) { + return false; + } + if (getStringField() != null ? !getStringField().equals(that.getStringField()) : that.getStringField() != null) { + return false; + } + + return true; + } + + @Override + public int hashCode() { + int result = getIntegerField() != null ? getIntegerField().hashCode() : 0; + result = 31 * result + (getStringField() != null ? getStringField().hashCode() : 0); + result = 31 * result + (int) (longField ^ (longField >>> 32)); + return result; + } + + @Override + public String toString() { + return "CreatorInvalidMethodModel{" + + "integerField=" + integerField + + ", stringField='" + stringField + "'" + + ", longField=" + longField + + "}"; + } +} diff --git a/bson/src/test/unit/org/bson/codecs/pojo/entities/conventions/CreatorInvalidMultipleConstructorsModel.java b/bson/src/test/unit/org/bson/codecs/pojo/entities/conventions/CreatorInvalidMultipleConstructorsModel.java new file mode 100644 index 00000000000..9451be68824 --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/pojo/entities/conventions/CreatorInvalidMultipleConstructorsModel.java @@ -0,0 +1,98 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo.entities.conventions; + +import org.bson.codecs.pojo.annotations.BsonCreator; +import org.bson.codecs.pojo.annotations.BsonProperty; + +public final class CreatorInvalidMultipleConstructorsModel { + private final Integer integerField; + private String stringField; + public long longField; + + @BsonCreator + public CreatorInvalidMultipleConstructorsModel(@BsonProperty("integerField") final Integer integerField) { + this.integerField = integerField; + } + + @BsonCreator + public CreatorInvalidMultipleConstructorsModel(@BsonProperty("integerField") final Integer integerField, + @BsonProperty("stringField") final String string) { + this(integerField); + setStringField(stringField); + } + + + public CreatorInvalidMultipleConstructorsModel(final Integer integerField, final String stringField, final long longField) { + this.integerField = integerField; + this.stringField = stringField; + this.longField = longField; + } + + public Integer getIntegerField() { + return integerField; + } + + public String getStringField() { + return stringField; + } + + public void setStringField(final String stringField) { + this.stringField = stringField; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + CreatorInvalidMultipleConstructorsModel that = (CreatorInvalidMultipleConstructorsModel) o; + + if (longField != that.longField) { + return false; + } + if (getIntegerField() != null ? !getIntegerField().equals(that.getIntegerField()) : that.getIntegerField() != null) { + return false; + } + if (getStringField() != null ? !getStringField().equals(that.getStringField()) : that.getStringField() != null) { + return false; + } + + return true; + } + + @Override + public int hashCode() { + int result = getIntegerField() != null ? getIntegerField().hashCode() : 0; + result = 31 * result + (getStringField() != null ? getStringField().hashCode() : 0); + result = 31 * result + (int) (longField ^ (longField >>> 32)); + return result; + } + + @Override + public String toString() { + return "CreatorInvalidMultipleModel{" + + "integerField=" + integerField + + ", stringField='" + stringField + "'" + + ", longField=" + longField + + "}"; + } +} diff --git a/bson/src/test/unit/org/bson/codecs/pojo/entities/conventions/CreatorInvalidMultipleCreatorsModel.java b/bson/src/test/unit/org/bson/codecs/pojo/entities/conventions/CreatorInvalidMultipleCreatorsModel.java new file mode 100644 index 00000000000..034c78fa530 --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/pojo/entities/conventions/CreatorInvalidMultipleCreatorsModel.java @@ -0,0 +1,95 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo.entities.conventions; + +import org.bson.codecs.pojo.annotations.BsonCreator; +import org.bson.codecs.pojo.annotations.BsonProperty; + +public final class CreatorInvalidMultipleCreatorsModel { + private final Integer integerField; + private String stringField; + public long longField; + + @BsonCreator + public CreatorInvalidMultipleCreatorsModel(@BsonProperty("integerField") final Integer integerField) { + this.integerField = integerField; + } + + public CreatorInvalidMultipleCreatorsModel(final Integer integerField, final String stringField, final long longField) { + this.integerField = integerField; + this.stringField = stringField; + this.longField = longField; + } + + @BsonCreator + public static CreatorInvalidMultipleCreatorsModel create(@BsonProperty("integerField") final Integer integerField) { + return new CreatorInvalidMultipleCreatorsModel(integerField); + } + + public Integer getIntegerField() { + return integerField; + } + + public String getStringField() { + return stringField; + } + + public void setStringField(final String stringField) { + this.stringField = stringField; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + CreatorInvalidMultipleCreatorsModel that = (CreatorInvalidMultipleCreatorsModel) o; + + if (longField != that.longField) { + return false; + } + if (getIntegerField() != null ? !getIntegerField().equals(that.getIntegerField()) : that.getIntegerField() != null) { + return false; + } + if (getStringField() != null ? !getStringField().equals(that.getStringField()) : that.getStringField() != null) { + return false; + } + + return true; + } + + @Override + public int hashCode() { + int result = getIntegerField() != null ? getIntegerField().hashCode() : 0; + result = 31 * result + (getStringField() != null ? getStringField().hashCode() : 0); + result = 31 * result + (int) (longField ^ (longField >>> 32)); + return result; + } + + @Override + public String toString() { + return "CreatorInvalidMultipleModel{" + + "integerField=" + integerField + + ", stringField='" + stringField + "'" + + ", longField=" + longField + + "}"; + } +} diff --git a/bson/src/test/unit/org/bson/codecs/pojo/entities/conventions/CreatorInvalidMultipleStaticCreatorsModel.java b/bson/src/test/unit/org/bson/codecs/pojo/entities/conventions/CreatorInvalidMultipleStaticCreatorsModel.java new file mode 100644 index 00000000000..fa3d445219c --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/pojo/entities/conventions/CreatorInvalidMultipleStaticCreatorsModel.java @@ -0,0 +1,102 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo.entities.conventions; + +import org.bson.codecs.pojo.annotations.BsonCreator; +import org.bson.codecs.pojo.annotations.BsonProperty; + +public final class CreatorInvalidMultipleStaticCreatorsModel { + private final Integer integerField; + private String stringField; + public long longField; + + private CreatorInvalidMultipleStaticCreatorsModel(final Integer integerField) { + this.integerField = integerField; + } + + public CreatorInvalidMultipleStaticCreatorsModel(final Integer integerField, final String stringField, final long longField) { + this.integerField = integerField; + this.stringField = stringField; + this.longField = longField; + } + + @BsonCreator + public static CreatorInvalidMultipleStaticCreatorsModel create(@BsonProperty("integerField") final Integer integerField) { + return new CreatorInvalidMultipleStaticCreatorsModel(integerField); + } + + @BsonCreator + public static CreatorInvalidMultipleStaticCreatorsModel create(@BsonProperty("integerField") final Integer integerField, + @BsonProperty("stringField") final String stringField) { + CreatorInvalidMultipleStaticCreatorsModel model = new CreatorInvalidMultipleStaticCreatorsModel(integerField); + model.setStringField(stringField); + return model; + } + + public Integer getIntegerField() { + return integerField; + } + + public String getStringField() { + return stringField; + } + + public void setStringField(final String stringField) { + this.stringField = stringField; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + CreatorInvalidMultipleStaticCreatorsModel that = (CreatorInvalidMultipleStaticCreatorsModel) o; + + if (longField != that.longField) { + return false; + } + if (getIntegerField() != null ? !getIntegerField().equals(that.getIntegerField()) : that.getIntegerField() != null) { + return false; + } + if (getStringField() != null ? !getStringField().equals(that.getStringField()) : that.getStringField() != null) { + return false; + } + + return true; + } + + @Override + public int hashCode() { + int result = getIntegerField() != null ? getIntegerField().hashCode() : 0; + result = 31 * result + (getStringField() != null ? getStringField().hashCode() : 0); + result = 31 * result + (int) (longField ^ (longField >>> 32)); + return result; + } + + @Override + public String toString() { + return "CreatorInvalidMultipleModel{" + + "integerField=" + integerField + + ", stringField='" + stringField + "'" + + ", longField=" + longField + + "}"; + } +} diff --git a/bson/src/test/unit/org/bson/codecs/pojo/entities/conventions/CreatorInvalidTypeConstructorModel.java b/bson/src/test/unit/org/bson/codecs/pojo/entities/conventions/CreatorInvalidTypeConstructorModel.java new file mode 100644 index 00000000000..f5e3a242b13 --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/pojo/entities/conventions/CreatorInvalidTypeConstructorModel.java @@ -0,0 +1,84 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo.entities.conventions; + +import org.bson.codecs.pojo.annotations.BsonCreator; +import org.bson.codecs.pojo.annotations.BsonProperty; + +public final class CreatorInvalidTypeConstructorModel { + private final Integer integerField; + private String stringField; + public long longField; + + @BsonCreator + public CreatorInvalidTypeConstructorModel(@BsonProperty("integerField") final String integerField) { + this.integerField = Integer.parseInt(integerField); + } + + public Integer getIntegerField() { + return integerField; + } + + public String getStringField() { + return stringField; + } + + public void setStringField(final String stringField) { + this.stringField = stringField; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + CreatorInvalidTypeConstructorModel that = (CreatorInvalidTypeConstructorModel) o; + + if (longField != that.longField) { + return false; + } + if (getIntegerField() != null ? !getIntegerField().equals(that.getIntegerField()) : that.getIntegerField() != null) { + return false; + } + if (getStringField() != null ? !getStringField().equals(that.getStringField()) : that.getStringField() != null) { + return false; + } + + return true; + } + + @Override + public int hashCode() { + int result = getIntegerField() != null ? getIntegerField().hashCode() : 0; + result = 31 * result + (getStringField() != null ? getStringField().hashCode() : 0); + result = 31 * result + (int) (longField ^ (longField >>> 32)); + return result; + } + + @Override + public String toString() { + return "CreatorInvalidConstructorModel{" + + "integerField=" + integerField + + ", stringField='" + stringField + "'" + + ", longField=" + longField + + "}"; + } +} diff --git a/bson/src/test/unit/org/bson/codecs/pojo/entities/conventions/CreatorInvalidTypeMethodModel.java b/bson/src/test/unit/org/bson/codecs/pojo/entities/conventions/CreatorInvalidTypeMethodModel.java new file mode 100644 index 00000000000..c5cb636add3 --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/pojo/entities/conventions/CreatorInvalidTypeMethodModel.java @@ -0,0 +1,88 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo.entities.conventions; + +import org.bson.codecs.pojo.annotations.BsonCreator; +import org.bson.codecs.pojo.annotations.BsonProperty; + +public final class CreatorInvalidTypeMethodModel { + private final Integer integerField; + private String stringField; + public long longField; + + private CreatorInvalidTypeMethodModel(final Integer integerField) { + this.integerField = integerField; + } + + @BsonCreator + public static CreatorInvalidTypeMethodModel create(@BsonProperty("integerField") final String integerField) { + return new CreatorInvalidTypeMethodModel(Integer.parseInt(integerField)); + } + + public Integer getIntegerField() { + return integerField; + } + + public String getStringField() { + return stringField; + } + + public void setStringField(final String stringField) { + this.stringField = stringField; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + CreatorMethodModel that = (CreatorMethodModel) o; + + if (longField != that.longField) { + return false; + } + if (getIntegerField() != null ? !getIntegerField().equals(that.getIntegerField()) : that.getIntegerField() != null) { + return false; + } + if (getStringField() != null ? !getStringField().equals(that.getStringField()) : that.getStringField() != null) { + return false; + } + + return true; + } + + @Override + public int hashCode() { + int result = getIntegerField() != null ? getIntegerField().hashCode() : 0; + result = 31 * result + (getStringField() != null ? getStringField().hashCode() : 0); + result = 31 * result + (int) (longField ^ (longField >>> 32)); + return result; + } + + @Override + public String toString() { + return "CreatorInvalidMethodModel{" + + "integerField=" + integerField + + ", stringField='" + stringField + "'" + + ", longField=" + longField + + "}"; + } +} diff --git a/bson/src/test/unit/org/bson/codecs/pojo/entities/conventions/CreatorMethodModel.java b/bson/src/test/unit/org/bson/codecs/pojo/entities/conventions/CreatorMethodModel.java new file mode 100644 index 00000000000..55924e9c94d --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/pojo/entities/conventions/CreatorMethodModel.java @@ -0,0 +1,94 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo.entities.conventions; + +import org.bson.codecs.pojo.annotations.BsonCreator; +import org.bson.codecs.pojo.annotations.BsonProperty; + +public final class CreatorMethodModel { + private final Integer integerField; + private String stringField; + public long longField; + + private CreatorMethodModel(final Integer integerField) { + this.integerField = integerField; + } + + @BsonCreator + public static CreatorMethodModel create(@BsonProperty("integerField") final Integer integerField) { + return new CreatorMethodModel(integerField); + } + + public CreatorMethodModel(final Integer integerField, final String stringField, final long longField) { + this.integerField = integerField; + this.stringField = stringField; + this.longField = longField; + } + + public Integer getIntegerField() { + return integerField; + } + + public String getStringField() { + return stringField; + } + + public void setStringField(final String stringField) { + this.stringField = stringField; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + CreatorMethodModel that = (CreatorMethodModel) o; + + if (longField != that.longField) { + return false; + } + if (getIntegerField() != null ? !getIntegerField().equals(that.getIntegerField()) : that.getIntegerField() != null) { + return false; + } + if (getStringField() != null ? !getStringField().equals(that.getStringField()) : that.getStringField() != null) { + return false; + } + + return true; + } + + @Override + public int hashCode() { + int result = getIntegerField() != null ? getIntegerField().hashCode() : 0; + result = 31 * result + (getStringField() != null ? getStringField().hashCode() : 0); + result = 31 * result + (int) (longField ^ (longField >>> 32)); + return result; + } + + @Override + public String toString() { + return "CreatorMethodModel{" + + "integerField=" + integerField + + ", stringField='" + stringField + "'" + + ", longField=" + longField + + "}"; + } +} diff --git a/bson/src/test/unit/org/bson/codecs/pojo/entities/conventions/CreatorMethodThrowsExceptionModel.java b/bson/src/test/unit/org/bson/codecs/pojo/entities/conventions/CreatorMethodThrowsExceptionModel.java new file mode 100644 index 00000000000..4b09c75b76d --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/pojo/entities/conventions/CreatorMethodThrowsExceptionModel.java @@ -0,0 +1,91 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo.entities.conventions; + +import org.bson.codecs.pojo.annotations.BsonCreator; +import org.bson.codecs.pojo.annotations.BsonProperty; + +public final class CreatorMethodThrowsExceptionModel { + private Integer integerField; + private String stringField; + public long longField; + + @BsonCreator + public static CreatorMethodThrowsExceptionModel create(@BsonProperty("integerField") final Integer integerField) { + throw new UnsupportedOperationException("Nope"); + } + + private CreatorMethodThrowsExceptionModel() { + } + + public Integer getIntegerField() { + return integerField; + } + + public void setIntegerField(final Integer integerField) { + this.integerField = integerField; + } + + public String getStringField() { + return stringField; + } + + public void setStringField(final String stringField) { + this.stringField = stringField; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + CreatorNoArgsMethodModel that = (CreatorNoArgsMethodModel) o; + + if (longField != that.longField) { + return false; + } + if (getIntegerField() != null ? !getIntegerField().equals(that.getIntegerField()) : that.getIntegerField() != null) { + return false; + } + if (getStringField() != null ? !getStringField().equals(that.getStringField()) : that.getStringField() != null) { + return false; + } + + return true; + } + + @Override + public int hashCode() { + int result = getIntegerField() != null ? getIntegerField().hashCode() : 0; + result = 31 * result + (getStringField() != null ? getStringField().hashCode() : 0); + result = 31 * result + (int) (longField ^ (longField >>> 32)); + return result; + } + + @Override + public String toString() { + return "CreatorMethodThrowsExceptionModel{" + + "integerField=" + integerField + + ", stringField='" + stringField + "'" + + ", longField=" + longField + + "}"; + } +} diff --git a/bson/src/test/unit/org/bson/codecs/pojo/entities/conventions/CreatorNoArgsConstructorModel.java b/bson/src/test/unit/org/bson/codecs/pojo/entities/conventions/CreatorNoArgsConstructorModel.java new file mode 100644 index 00000000000..c4e5b4e69c9 --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/pojo/entities/conventions/CreatorNoArgsConstructorModel.java @@ -0,0 +1,92 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo.entities.conventions; + +import org.bson.codecs.pojo.annotations.BsonCreator; + +public final class CreatorNoArgsConstructorModel { + private Integer integerField; + private String stringField; + public long longField; + + @BsonCreator + public CreatorNoArgsConstructorModel(){ + } + + public CreatorNoArgsConstructorModel(final Integer integerField, final String stringField, final long longField) { + this.integerField = integerField; + this.stringField = stringField; + this.longField = longField; + } + + public Integer getIntegerField() { + return integerField; + } + + public void setIntegerField(final Integer integerField) { + this.integerField = integerField; + } + + public String getStringField() { + return stringField; + } + + public void setStringField(final String stringField) { + this.stringField = stringField; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + CreatorNoArgsConstructorModel that = (CreatorNoArgsConstructorModel) o; + + if (longField != that.longField) { + return false; + } + if (getIntegerField() != null ? !getIntegerField().equals(that.getIntegerField()) : that.getIntegerField() != null) { + return false; + } + if (getStringField() != null ? !getStringField().equals(that.getStringField()) : that.getStringField() != null) { + return false; + } + + return true; + } + + @Override + public int hashCode() { + int result = getIntegerField() != null ? getIntegerField().hashCode() : 0; + result = 31 * result + (getStringField() != null ? getStringField().hashCode() : 0); + result = 31 * result + (int) (longField ^ (longField >>> 32)); + return result; + } + + @Override + public String toString() { + return "CreatorNoArgsConstructorModel{" + + "integerField=" + integerField + + ", stringField='" + stringField + "'" + + ", longField=" + longField + + "}"; + } +} diff --git a/bson/src/test/unit/org/bson/codecs/pojo/entities/conventions/CreatorNoArgsMethodModel.java b/bson/src/test/unit/org/bson/codecs/pojo/entities/conventions/CreatorNoArgsMethodModel.java new file mode 100644 index 00000000000..ddee7f48668 --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/pojo/entities/conventions/CreatorNoArgsMethodModel.java @@ -0,0 +1,96 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo.entities.conventions; + +import org.bson.codecs.pojo.annotations.BsonCreator; + +public final class CreatorNoArgsMethodModel { + private Integer integerField; + private String stringField; + public long longField; + + @BsonCreator + public static CreatorNoArgsMethodModel create() { + return new CreatorNoArgsMethodModel(); + } + + private CreatorNoArgsMethodModel() { + } + + public CreatorNoArgsMethodModel(final Integer integerField, final String stringField, final long longField) { + this.integerField = integerField; + this.stringField = stringField; + this.longField = longField; + } + + public Integer getIntegerField() { + return integerField; + } + + public void setIntegerField(final Integer integerField) { + this.integerField = integerField; + } + + public String getStringField() { + return stringField; + } + + public void setStringField(final String stringField) { + this.stringField = stringField; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + CreatorNoArgsMethodModel that = (CreatorNoArgsMethodModel) o; + + if (longField != that.longField) { + return false; + } + if (getIntegerField() != null ? !getIntegerField().equals(that.getIntegerField()) : that.getIntegerField() != null) { + return false; + } + if (getStringField() != null ? !getStringField().equals(that.getStringField()) : that.getStringField() != null) { + return false; + } + + return true; + } + + @Override + public int hashCode() { + int result = getIntegerField() != null ? getIntegerField().hashCode() : 0; + result = 31 * result + (getStringField() != null ? getStringField().hashCode() : 0); + result = 31 * result + (int) (longField ^ (longField >>> 32)); + return result; + } + + @Override + public String toString() { + return "CreatorNoArgsMethodModel{" + + "integerField=" + integerField + + ", stringField='" + stringField + "'" + + ", longField=" + longField + + "}"; + } +} diff --git a/bson/src/test/unit/org/bson/codecs/pojo/entities/conventions/DiscriminatorNameModel.java b/bson/src/test/unit/org/bson/codecs/pojo/entities/conventions/DiscriminatorNameModel.java new file mode 100644 index 00000000000..dd9bde84836 --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/pojo/entities/conventions/DiscriminatorNameModel.java @@ -0,0 +1,20 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo.entities.conventions; + +public final class DiscriminatorNameModel { +} diff --git a/bson/src/test/unit/org/bson/codecs/pojo/entities/conventions/FieldStorageModel.java b/bson/src/test/unit/org/bson/codecs/pojo/entities/conventions/FieldStorageModel.java new file mode 100644 index 00000000000..f0609c0c8b2 --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/pojo/entities/conventions/FieldStorageModel.java @@ -0,0 +1,21 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo.entities.conventions; + +public final class FieldStorageModel { + private String id; +} diff --git a/bson/src/test/unit/org/bson/codecs/pojo/entities/conventions/InterfaceModel.java b/bson/src/test/unit/org/bson/codecs/pojo/entities/conventions/InterfaceModel.java new file mode 100644 index 00000000000..b5def72fb17 --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/pojo/entities/conventions/InterfaceModel.java @@ -0,0 +1,23 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo.entities.conventions; + +import org.bson.codecs.pojo.annotations.BsonDiscriminator; + +@BsonDiscriminator +public interface InterfaceModel { +} diff --git a/bson/src/test/unit/org/bson/codecs/pojo/entities/conventions/InterfaceModelBInstanceCreatorConvention.java b/bson/src/test/unit/org/bson/codecs/pojo/entities/conventions/InterfaceModelBInstanceCreatorConvention.java new file mode 100644 index 00000000000..88781c40513 --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/pojo/entities/conventions/InterfaceModelBInstanceCreatorConvention.java @@ -0,0 +1,54 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo.entities.conventions; + +import org.bson.codecs.pojo.ClassModelBuilder; +import org.bson.codecs.pojo.Convention; +import org.bson.codecs.pojo.InstanceCreator; +import org.bson.codecs.pojo.PropertyModel; +import org.bson.codecs.pojo.entities.InterfaceModelB; +import org.bson.codecs.pojo.entities.InterfaceModelImpl; + +public class InterfaceModelBInstanceCreatorConvention implements Convention { + @Override + @SuppressWarnings("unchecked") + public void apply(final ClassModelBuilder classModelBuilder) { + if (classModelBuilder.getType().equals(InterfaceModelB.class)) { + // Simulate a custom implementation of InstanceCreator factory + // (This one can be generated automatically, but, a real use case can have an advanced reflection based + // solution that the POJO Codec doesn't support out of the box) + ((ClassModelBuilder) classModelBuilder).instanceCreatorFactory(() -> { + InterfaceModelB interfaceModelB = new InterfaceModelImpl(); + return new InstanceCreator() { + @Override + public void set(final S value, final PropertyModel propertyModel) { + if (propertyModel.getName().equals("propertyA")) { + interfaceModelB.setPropertyA((String) value); + } else if (propertyModel.getName().equals("propertyB")) { + interfaceModelB.setPropertyB((String) value); + } + } + + @Override + public InterfaceModelB getInstance() { + return interfaceModelB; + } + }; + }); + } + } +} diff --git a/bson/src/test/unit/org/bson/codecs/pojo/entities/conventions/InterfaceModelImplA.java b/bson/src/test/unit/org/bson/codecs/pojo/entities/conventions/InterfaceModelImplA.java new file mode 100644 index 00000000000..553ac977e55 --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/pojo/entities/conventions/InterfaceModelImplA.java @@ -0,0 +1,69 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo.entities.conventions; + +import org.bson.codecs.pojo.annotations.BsonDiscriminator; + +import java.util.Objects; + +@BsonDiscriminator +public class InterfaceModelImplA implements InterfaceModel { + private boolean value; + private String name; + + public boolean isValue() { + return value; + } + + public InterfaceModelImplA setValue(final boolean value) { + this.value = value; + return this; + } + + public String getName() { + return name; + } + + public InterfaceModelImplA setName(final String name) { + this.name = name; + return this; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + InterfaceModelImplA that = (InterfaceModelImplA) o; + + if (value != that.value) { + return false; + } + return Objects.equals(name, that.name); + } + + @Override + public int hashCode() { + int result = (value ? 1 : 0); + result = 31 * result + (name != null ? name.hashCode() : 0); + return result; + } +} diff --git a/bson/src/test/unit/org/bson/codecs/pojo/entities/conventions/InterfaceModelImplB.java b/bson/src/test/unit/org/bson/codecs/pojo/entities/conventions/InterfaceModelImplB.java new file mode 100644 index 00000000000..5cf71117115 --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/pojo/entities/conventions/InterfaceModelImplB.java @@ -0,0 +1,67 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo.entities.conventions; + +import org.bson.codecs.pojo.annotations.BsonDiscriminator; + +@BsonDiscriminator +public class InterfaceModelImplB implements InterfaceModel { + private boolean value; + private int integer; + + public boolean isValue() { + return value; + } + + public InterfaceModelImplB setValue(final boolean value) { + this.value = value; + return this; + } + + public int getInteger() { + return integer; + } + + public InterfaceModelImplB setInteger(final int integer) { + this.integer = integer; + return this; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + InterfaceModelImplB that = (InterfaceModelImplB) o; + + if (value != that.value) { + return false; + } + return integer == that.integer; + } + + @Override + public int hashCode() { + int result = (value ? 1 : 0); + result = 31 * result + integer; + return result; + } +} diff --git a/bson/src/test/unit/org/bson/codecs/pojo/entities/conventions/MapGetterImmutableModel.java b/bson/src/test/unit/org/bson/codecs/pojo/entities/conventions/MapGetterImmutableModel.java new file mode 100644 index 00000000000..6e17a1778c7 --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/pojo/entities/conventions/MapGetterImmutableModel.java @@ -0,0 +1,57 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo.entities.conventions; + +import java.util.Collections; +import java.util.Map; +import java.util.Objects; + +public class MapGetterImmutableModel { + + private final Map mapField; + + public MapGetterImmutableModel() { + this(Collections.emptyMap()); + } + + public MapGetterImmutableModel(final Map mapField) { + this.mapField = Collections.unmodifiableMap(mapField); + } + + public Map getMapField() { + return mapField; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()){ + return false; + } + + MapGetterImmutableModel that = (MapGetterImmutableModel) o; + + return Objects.equals(mapField, that.mapField); + } + + @Override + public int hashCode() { + return mapField != null ? mapField.hashCode() : 0; + } +} diff --git a/bson/src/test/unit/org/bson/codecs/pojo/entities/conventions/MapGetterMutableModel.java b/bson/src/test/unit/org/bson/codecs/pojo/entities/conventions/MapGetterMutableModel.java new file mode 100644 index 00000000000..61366762f14 --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/pojo/entities/conventions/MapGetterMutableModel.java @@ -0,0 +1,56 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo.entities.conventions; + +import java.util.HashMap; +import java.util.Map; +import java.util.Objects; + +public class MapGetterMutableModel { + + private final Map mapField; + + public MapGetterMutableModel() { + this.mapField = new HashMap<>(); + } + + public MapGetterMutableModel(final Map mapField) { + this.mapField = mapField; + } + + public Map getMapField() { + return mapField; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()){ + return false; + } + + MapGetterMutableModel that = (MapGetterMutableModel) o; + return Objects.equals(mapField, that.mapField); + } + + @Override + public int hashCode() { + return mapField != null ? mapField.hashCode() : 0; + } +} diff --git a/bson/src/test/unit/org/bson/codecs/pojo/entities/conventions/MapGetterNonEmptyModel.java b/bson/src/test/unit/org/bson/codecs/pojo/entities/conventions/MapGetterNonEmptyModel.java new file mode 100644 index 00000000000..8c1e77022b4 --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/pojo/entities/conventions/MapGetterNonEmptyModel.java @@ -0,0 +1,57 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo.entities.conventions; + +import java.util.Collections; +import java.util.Map; +import java.util.Objects; + +public class MapGetterNonEmptyModel { + + private final Map mapField; + + public MapGetterNonEmptyModel() { + this(Collections.singletonMap("a", 1)); + } + + public MapGetterNonEmptyModel(final Map mapField) { + this.mapField = mapField; + } + + public Map getMapField() { + return mapField; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()){ + return false; + } + + MapGetterNonEmptyModel that = (MapGetterNonEmptyModel) o; + + return Objects.equals(mapField, that.mapField); + } + + @Override + public int hashCode() { + return mapField != null ? mapField.hashCode() : 0; + } +} diff --git a/bson/src/test/unit/org/bson/codecs/pojo/entities/conventions/MapGetterNullModel.java b/bson/src/test/unit/org/bson/codecs/pojo/entities/conventions/MapGetterNullModel.java new file mode 100644 index 00000000000..f67a621af11 --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/pojo/entities/conventions/MapGetterNullModel.java @@ -0,0 +1,56 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo.entities.conventions; + +import java.util.Map; +import java.util.Objects; + +public class MapGetterNullModel { + + private final Map mapField; + + public MapGetterNullModel() { + this(null); + } + + public MapGetterNullModel(final Map mapField) { + this.mapField = mapField; + } + + public Map getMapField() { + return mapField; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()){ + return false; + } + + MapGetterNullModel that = (MapGetterNullModel) o; + + return Objects.equals(mapField, that.mapField); + } + + @Override + public int hashCode() { + return mapField != null ? mapField.hashCode() : 0; + } +} diff --git a/bson/src/test/unit/org/bson/codecs/pojo/entities/conventions/PropertyNameModel.java b/bson/src/test/unit/org/bson/codecs/pojo/entities/conventions/PropertyNameModel.java new file mode 100644 index 00000000000..0f87d599cb4 --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/pojo/entities/conventions/PropertyNameModel.java @@ -0,0 +1,22 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo.entities.conventions; + +public final class PropertyNameModel { + private int myModelField; + private int myModel2Field; +} diff --git a/bson/src/test/unit/org/bson/codecs/pojo/entities/conventions/Subclass1Model.java b/bson/src/test/unit/org/bson/codecs/pojo/entities/conventions/Subclass1Model.java new file mode 100644 index 00000000000..51bbdd46583 --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/pojo/entities/conventions/Subclass1Model.java @@ -0,0 +1,64 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo.entities.conventions; + +import org.bson.codecs.pojo.annotations.BsonDiscriminator; + +@BsonDiscriminator +public class Subclass1Model extends SuperClassModel { + private String name; + + public String getName() { + return name; + } + + public Subclass1Model setName(final String name) { + this.name = name; + return this; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + if (!super.equals(o)) { + return false; + } + + Subclass1Model that = (Subclass1Model) o; + + return getName() != null ? getName().equals(that.getName()) : that.getName() == null; + } + + @Override + public int hashCode() { + int result = super.hashCode(); + result = 31 * result + (getName() != null ? getName().hashCode() : 0); + return result; + } + + @Override + public String toString() { + return "Subclass1Model{" + + "name='" + name + '\'' + + "} " + super.toString(); + } +} diff --git a/bson/src/test/unit/org/bson/codecs/pojo/entities/conventions/Subclass2Model.java b/bson/src/test/unit/org/bson/codecs/pojo/entities/conventions/Subclass2Model.java new file mode 100644 index 00000000000..1fd69138651 --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/pojo/entities/conventions/Subclass2Model.java @@ -0,0 +1,64 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo.entities.conventions; + +import org.bson.codecs.pojo.annotations.BsonDiscriminator; + +@BsonDiscriminator +public class Subclass2Model extends SuperClassModel { + private int integer; + + public int getInteger() { + return integer; + } + + public Subclass2Model setInteger(final int integer) { + this.integer = integer; + return this; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + if (!super.equals(o)) { + return false; + } + + Subclass2Model that = (Subclass2Model) o; + + return getInteger() == that.getInteger(); + } + + @Override + public int hashCode() { + int result = super.hashCode(); + result = 31 * result + getInteger(); + return result; + } + + @Override + public String toString() { + return "Subclass2Model{" + + "integer=" + integer + + "} " + super.toString(); + } +} diff --git a/bson/src/test/unit/org/bson/codecs/pojo/entities/conventions/SuperClassModel.java b/bson/src/test/unit/org/bson/codecs/pojo/entities/conventions/SuperClassModel.java new file mode 100644 index 00000000000..b46ebbd699b --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/pojo/entities/conventions/SuperClassModel.java @@ -0,0 +1,59 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.codecs.pojo.entities.conventions; + +import org.bson.codecs.pojo.annotations.BsonDiscriminator; + +@BsonDiscriminator +public abstract class SuperClassModel { + private boolean value; + + public boolean isValue() { + return value; + } + + public SuperClassModel setValue(final boolean value) { + this.value = value; + return this; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + SuperClassModel that = (SuperClassModel) o; + + return isValue() == that.isValue(); + } + + @Override + public int hashCode() { + return (isValue() ? 1 : 0); + } + + @Override + public String toString() { + return "SuperClassModel{" + + "value=" + value + + '}'; + } +} diff --git a/bson/src/test/unit/org/bson/codecs/pojo/entities/conventions/package-info.java b/bson/src/test/unit/org/bson/codecs/pojo/entities/conventions/package-info.java new file mode 100644 index 00000000000..44ac0e845ad --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/pojo/entities/conventions/package-info.java @@ -0,0 +1,20 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * Package contains test models for conventions + */ +package org.bson.codecs.pojo.entities.conventions; diff --git a/bson/src/test/unit/org/bson/codecs/pojo/entities/package-info.java b/bson/src/test/unit/org/bson/codecs/pojo/entities/package-info.java new file mode 100644 index 00000000000..abd43766a50 --- /dev/null +++ b/bson/src/test/unit/org/bson/codecs/pojo/entities/package-info.java @@ -0,0 +1,20 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * Test entities and related classes for the conventions tests + */ +package org.bson.codecs.pojo.entities; diff --git a/bson/src/test/unit/org/bson/internal/BsonUtilTest.java b/bson/src/test/unit/org/bson/internal/BsonUtilTest.java new file mode 100644 index 00000000000..f0ed7c24b26 --- /dev/null +++ b/bson/src/test/unit/org/bson/internal/BsonUtilTest.java @@ -0,0 +1,132 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.bson.internal; + +import org.bson.BsonArray; +import org.bson.BsonBinary; +import org.bson.BsonDocument; +import org.bson.BsonDocumentWrapper; +import org.bson.BsonJavaScriptWithScope; +import org.bson.BsonValue; +import org.bson.RawBsonArray; +import org.bson.RawBsonDocument; +import org.bson.conversions.Bson; +import org.junit.jupiter.api.Test; + +import java.nio.charset.StandardCharsets; +import java.util.AbstractMap.SimpleImmutableEntry; +import java.util.Map.Entry; + +import static java.util.Arrays.asList; +import static java.util.Collections.singletonList; +import static org.bson.assertions.Assertions.fail; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotSame; + +final class BsonUtilTest { + @Test + public void mutableDeepCopy() { + Entry originalBsonBinaryEntry = new SimpleImmutableEntry<>( + "bsonBinary", + new BsonBinary("bsonBinary".getBytes(StandardCharsets.UTF_8)) + ); + Entry originalBsonJavaScriptWithScopeEntry = new SimpleImmutableEntry<>( + "bsonJavaScriptWithScopeEntry", + new BsonJavaScriptWithScope("\"use strict\";", new BsonDocument()) + ); + Entry originalRawBsonDocumentEntry = new SimpleImmutableEntry<>( + "rawBsonDocument", + RawBsonDocument.parse("{rawBsonDocument: 'rawBsonDocument_value'}") + ); + Entry> originalBsonDocumentWrapperEntry = new SimpleImmutableEntry<>( + "bsonDocumentWrapper", + new BsonDocumentWrapper<>(originalRawBsonDocumentEntry.getValue(), Bson.DEFAULT_CODEC_REGISTRY.get(RawBsonDocument.class)) + ); + Entry originalBsonDocumentEntry = new SimpleImmutableEntry<>( + "bsonDocument", + new BsonDocument() + .append(originalBsonBinaryEntry.getKey(), originalBsonBinaryEntry.getValue()) + .append(originalBsonJavaScriptWithScopeEntry.getKey(), originalBsonJavaScriptWithScopeEntry.getValue()) + .append(originalRawBsonDocumentEntry.getKey(), originalRawBsonDocumentEntry.getValue()) + .append(originalBsonDocumentWrapperEntry.getKey(), originalBsonDocumentWrapperEntry.getValue()) + ); + Entry originalBsonArrayEntry = new SimpleImmutableEntry<>( + "bsonArray", + new BsonArray(singletonList(new BsonArray())) + ); + Entry originalRawBsonArrayEntry = new SimpleImmutableEntry<>( + "rawBsonArray", + rawBsonArray( + originalBsonBinaryEntry.getValue(), + originalBsonJavaScriptWithScopeEntry.getValue(), + originalRawBsonDocumentEntry.getValue(), + originalBsonDocumentWrapperEntry.getValue(), + originalBsonDocumentEntry.getValue(), + originalBsonArrayEntry.getValue()) + ); + BsonDocument original = new BsonDocument() + .append(originalBsonBinaryEntry.getKey(), originalBsonBinaryEntry.getValue()) + .append(originalBsonJavaScriptWithScopeEntry.getKey(), originalBsonJavaScriptWithScopeEntry.getValue()) + .append(originalRawBsonDocumentEntry.getKey(), originalRawBsonDocumentEntry.getValue()) + .append(originalBsonDocumentWrapperEntry.getKey(), originalBsonDocumentWrapperEntry.getValue()) + .append(originalBsonDocumentEntry.getKey(), originalBsonDocumentEntry.getValue()) + .append(originalBsonArrayEntry.getKey(), originalBsonArrayEntry.getValue()) + .append(originalRawBsonArrayEntry.getKey(), originalRawBsonArrayEntry.getValue()); + BsonDocument copy = BsonUtil.mutableDeepCopy(original); + assertEqualNotSameAndMutable(original, copy); + original.forEach((key, value) -> assertEqualNotSameAndMutable(value, copy.get(key))); + // check nested document + String nestedDocumentKey = originalBsonDocumentEntry.getKey(); + BsonDocument originalNestedDocument = original.getDocument(nestedDocumentKey); + BsonDocument copyNestedDocument = copy.getDocument(nestedDocumentKey); + assertEqualNotSameAndMutable(originalNestedDocument, copyNestedDocument); + originalNestedDocument.forEach((key, value) -> assertEqualNotSameAndMutable(value, copyNestedDocument.get(key))); + // check nested array + String nestedArrayKey = originalRawBsonArrayEntry.getKey(); + BsonArray originalNestedArray = original.getArray(nestedArrayKey); + BsonArray copyNestedArray = copy.getArray(nestedArrayKey); + assertEqualNotSameAndMutable(originalNestedArray, copyNestedArray); + for (int i = 0; i < originalNestedArray.size(); i++) { + assertEqualNotSameAndMutable(originalNestedArray.get(i), copyNestedArray.get(i)); + } + } + + private static RawBsonArray rawBsonArray(final BsonValue... elements) { + return (RawBsonArray) new RawBsonDocument( + new BsonDocument("a", new BsonArray(asList(elements))), Bson.DEFAULT_CODEC_REGISTRY.get(BsonDocument.class)) + .get("a"); + } + + private static void assertEqualNotSameAndMutable(final Object expected, final Object actual) { + assertEquals(expected, actual); + assertNotSame(expected, actual); + Class actualClass = actual.getClass(); + if (expected instanceof BsonDocument) { + assertEquals(BsonDocument.class, actualClass); + } else if (expected instanceof BsonArray) { + assertEquals(BsonArray.class, actualClass); + } else if (expected instanceof BsonBinary) { + assertEquals(BsonBinary.class, actualClass); + } else if (expected instanceof BsonJavaScriptWithScope) { + assertEquals(BsonJavaScriptWithScope.class, actualClass); + } else { + fail("Unexpected " + expected.getClass().toString()); + } + } + + private BsonUtilTest() { + } +} diff --git a/bson/src/test/unit/org/bson/internal/CodecCacheSpecification.groovy b/bson/src/test/unit/org/bson/internal/CodecCacheSpecification.groovy new file mode 100644 index 00000000000..09b40735f1f --- /dev/null +++ b/bson/src/test/unit/org/bson/internal/CodecCacheSpecification.groovy @@ -0,0 +1,64 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.internal + +import org.bson.codecs.MinKeyCodec +import org.bson.types.MinKey +import spock.lang.Specification + +class CodecCacheSpecification extends Specification { + + def 'should return the cached codec if a codec for the class exists'() { + when: + def codec = new MinKeyCodec() + def cache = new CodecCache() + def cacheKey = new CodecCache.CodecCacheKey(MinKey, null) + cache.putIfAbsent(cacheKey, codec) + + then: + cache.get(cacheKey).get().is(codec) + } + + def 'should return empty if codec for class does not exist'() { + when: + def cache = new CodecCache() + def cacheKey = new CodecCache.CodecCacheKey(MinKey, null) + + then: + !cache.get(cacheKey).isPresent() + } + + def 'should return the cached codec if a codec for the parameterized class exists'() { + when: + def codec = new MinKeyCodec() + def cache = new CodecCache() + def cacheKey = new CodecCache.CodecCacheKey(List, [Integer]) + cache.putIfAbsent(cacheKey, codec) + + then: + cache.get(cacheKey).get().is(codec) + } + + def 'should return empty if codec for the parameterized class does not exist'() { + when: + def cache = new CodecCache() + def cacheKey = new CodecCache.CodecCacheKey(List, [Integer]) + + then: + !cache.get(cacheKey).isPresent() + } +} diff --git a/bson/src/test/unit/org/bson/internal/Holder.java b/bson/src/test/unit/org/bson/internal/Holder.java new file mode 100644 index 00000000000..afbf2f3f78c --- /dev/null +++ b/bson/src/test/unit/org/bson/internal/Holder.java @@ -0,0 +1,24 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.internal; + +import java.util.Collection; + +public class Holder { + @SuppressWarnings("VisibilityModifier") + public Collection> c; +} diff --git a/bson/src/test/unit/org/bson/internal/ProvidersCodecRegistrySpecification.groovy b/bson/src/test/unit/org/bson/internal/ProvidersCodecRegistrySpecification.groovy new file mode 100644 index 00000000000..40897b3a5aa --- /dev/null +++ b/bson/src/test/unit/org/bson/internal/ProvidersCodecRegistrySpecification.groovy @@ -0,0 +1,456 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.internal + +import org.bson.BsonBinaryReader +import org.bson.BsonBinaryWriter +import org.bson.BsonReader +import org.bson.BsonType +import org.bson.BsonWriter +import org.bson.ByteBufNIO +import org.bson.codecs.Codec +import org.bson.codecs.DecoderContext +import org.bson.codecs.EncoderContext +import org.bson.codecs.MinKeyCodec +import org.bson.codecs.configuration.CodecConfigurationException +import org.bson.codecs.configuration.CodecProvider +import org.bson.codecs.configuration.CodecRegistry +import org.bson.io.BasicOutputBuffer +import org.bson.io.ByteBufferBsonInput +import org.bson.types.MaxKey +import org.bson.types.MinKey +import spock.lang.Specification + +import java.nio.ByteBuffer + +import static java.util.Arrays.asList + +class ProvidersCodecRegistrySpecification extends Specification { + + def 'should throw if supplied codecProviders is null or an empty list'() { + when: + new ProvidersCodecRegistry(null) + + then: + thrown(IllegalArgumentException) + + when: + new ProvidersCodecRegistry([]) + + then: + thrown(IllegalArgumentException) + } + + def 'should throw a CodecConfigurationException if codec not found'() { + when: + new ProvidersCodecRegistry([new SingleCodecProvider(new MinKeyCodec())]).get(MaxKey) + + then: + thrown(CodecConfigurationException) + } + + def 'get should return registered codec'() { + given: + def minKeyCodec = new MinKeyCodec() + def registry = new ProvidersCodecRegistry([new SingleCodecProvider(minKeyCodec)]) + + expect: + registry.get(MinKey).is(minKeyCodec) + } + + def 'get should return the codec from the first source that has one'() { + given: + def minKeyCodec1 = new MinKeyCodec() + def minKeyCodec2 = new MinKeyCodec() + def registry = new ProvidersCodecRegistry([new SingleCodecProvider(minKeyCodec1), new SingleCodecProvider(minKeyCodec2)]) + + expect: + registry.get(MinKey).is(minKeyCodec1) + } + + def 'should handle cycles'() { + given: + def registry = new ProvidersCodecRegistry([new ClassModelCodecProvider()]) + + when: + Codec topCodec = registry.get(Top) + + then: + topCodec instanceof TopCodec + + when: + def top = new Top('Bob', + new Top('Jim', null, null), + new Nested('George', new Top('Joe', null, null))) + def writer = new BsonBinaryWriter(new BasicOutputBuffer()) + topCodec.encode(writer, top, EncoderContext.builder().build()) + ByteArrayOutputStream os = new ByteArrayOutputStream() + writer.getBsonOutput().pipe(os) + writer.close() + + then: + topCodec.decode(new BsonBinaryReader(new ByteBufferBsonInput(new ByteBufNIO(ByteBuffer.wrap(os.toByteArray())))), + DecoderContext.builder().build()) == top + } + + def 'get should use the codecCache'() { + given: + def codec = Mock(Codec) + def provider = new CodecProvider() { + private int counter = 0 + + @Override + Codec get(final Class clazz, final CodecRegistry registry) { + if (counter == 0) { + counter++ + return codec + } + throw new AssertionError((Object)'Must not be called more than once.') + } + } + + when: + def registry = new ProvidersCodecRegistry([provider]) + def codecFromRegistry = registry.get(MinKey) + + then: + codecFromRegistry == codec + + when: + codecFromRegistry = registry.get(MinKey) + + then: + codecFromRegistry == codec + } + + def 'get with codec registry should return the codec from the first source that has one'() { + given: + def provider = new ProvidersCodecRegistry([new ClassModelCodecProvider([Simple])]) + def registry = Mock(CodecRegistry) + + expect: + provider.get(Simple, registry) instanceof SimpleCodec + } + + def 'get with codec registry should return null if codec not found'() { + given: + def provider = new ProvidersCodecRegistry([new ClassModelCodecProvider([Top])]) + def registry = Mock(CodecRegistry) + + expect: + !provider.get(Simple, registry) + } + + def 'get with codec registry should pass the outer registry to its providers'() { + given: + def provider = new ProvidersCodecRegistry([new ClassModelCodecProvider([Simple])]) + def registry = Mock(CodecRegistry) + + expect: + ((SimpleCodec) provider.get(Simple, registry)).registry.is(registry) + } +} + +class SingleCodecProvider implements CodecProvider { + + private final Codec codec + + SingleCodecProvider(final Codec codec) { + this.codec = codec + } + + @Override + Codec get(final Class clazz, final CodecRegistry registry) { + if (clazz == codec.getEncoderClass()) { + return codec + } + null + } +} + +class ClassModelCodecProvider implements CodecProvider { + + private final List> supportedClasses + + ClassModelCodecProvider() { + this(asList(Top, Nested)) + } + + ClassModelCodecProvider(List> supportedClasses) { + this.supportedClasses = supportedClasses + } + + @Override + @SuppressWarnings('ReturnNullFromCatchBlock') + Codec get(final Class clazz, final CodecRegistry registry) { + if (!supportedClasses.contains(clazz)) { + null + } else if (clazz == Top) { + try { + new TopCodec(registry) + } catch (CodecConfigurationException e) { + null + } + } else if (clazz == Nested) { + try { + new NestedCodec(registry) + } catch (CodecConfigurationException e) { + null + } + } else if (clazz == Simple) { + new SimpleCodec(registry) + } else { + null + } + } +} + +class TopCodec implements Codec { + Codec codecForOther + Codec codecForNested + CodecRegistry registry + + TopCodec(final CodecRegistry registry) { + this.registry = registry + codecForOther = registry.get(Top) + codecForNested = registry.get(Nested) + } + + @Override + void encode(final BsonWriter writer, final Top top, EncoderContext encoderContext) { + if (top == null) { + writer.writeNull() + return + } + + writer.writeStartDocument() + writer.writeString('name', top.getName()) + writer.writeName('other') + codecForOther.encode(writer, top.getOther(), EncoderContext.builder().build()) + writer.writeName('nested') + codecForNested.encode(writer, top.getNested(), EncoderContext.builder().build()) + writer.writeEndDocument() + } + + @Override + Class getEncoderClass() { + Top + } + + @Override + Top decode(final BsonReader reader, DecoderContext decoderContext) { + reader.readStartDocument() + reader.readName() + def name = reader.readString() + def other = null + def nested = null + + def type = reader.readBsonType() + + reader.readName() + if (type == BsonType.NULL) { + reader.readNull() + } else { + other = codecForOther.decode(reader, decoderContext) + } + + reader.readName('nested') + if (type == BsonType.NULL) { + reader.readNull() + } else { + nested = codecForNested.decode(reader, decoderContext) + } + reader.readEndDocument() + new Top(name, other, nested) + } +} + +class NestedCodec implements Codec { + Codec codecForTop + + NestedCodec(final CodecRegistry registry) { + codecForTop = registry.get(Top) + } + + @Override + void encode(final BsonWriter writer, final Nested nested, EncoderContext encoderContext) { + if (nested == null) { + writer.writeNull() + return + } + + writer.writeStartDocument() + writer.writeString('name', nested.getName()) + writer.writeName('top') + codecForTop.encode(writer, nested.getTop(), EncoderContext.builder().build()) + writer.writeEndDocument() + } + + @Override + Class getEncoderClass() { + Top + } + + @Override + Nested decode(final BsonReader reader, DecoderContext decoderContext) { + reader.readStartDocument() + reader.readName() + def name = reader.readString() + def type = reader.readBsonType() + reader.readName() + def top = null + if (type == BsonType.NULL) { + reader.readNull() + } else { + top = codecForTop.decode(reader, decoderContext) + } + reader.readEndDocument() + new Nested(name, top) + } +} + +class SimpleCodec implements Codec { + private final CodecRegistry registry + + SimpleCodec(CodecRegistry registry) { + this.registry = registry + } + + CodecRegistry getRegistry() { + registry + } + + @Override + void encode(final BsonWriter writer, final Simple value, final EncoderContext encoderContext) { + writer.writeNull() + } + + @Override + Class getEncoderClass() { + Simple + } + + @Override + Simple decode(final BsonReader reader, final DecoderContext decoderContext) { + reader.readNull() + new Simple() + } +} + +class Top { + private String name + private Top other + private Nested nested + + Top(final String name, final Top other, final Nested nested) { + this.name = name + this.other = other + this.nested = nested + } + + String getName() { + name + } + + Top getOther() { + other + } + + Nested getNested() { + nested + } + + boolean equals(final o) { + if (this.is(o)) { + return true + } + if (getClass() != o.class) { + return false + } + + Top top = (Top) o + + if (name != top.name) { + return false + } + if (nested != top.nested) { + return false + } + if (other != top.other) { + return false + } + + true + } + + int hashCode() { + int result + result = name.hashCode() + result = 31 * result + (other != null ? other.hashCode() : 0) + result = 31 * result + (nested != null ? nested.hashCode() : 0) + result + } +} + +class Nested { + private String name + private Top top + + Nested(final String name, final Top top) { + this.name = name + this.top = top + } + + String getName() { + name + } + + Top getTop() { + top + } + + boolean equals(final o) { + if (this.is(o)) { + return true + } + if (getClass() != o.class) { + return false + } + + Nested nested = (Nested) o + + if (name != nested.name) { + return false + } + if (top != nested.top) { + return false + } + + true + } + + int hashCode() { + int result + result = name.hashCode() + result = 31 * result + (top != null ? top.hashCode() : 0) + result + } +} + +class Simple { + int value = 0 +} diff --git a/bson/src/test/unit/org/bson/internal/UuidHelperSpecification.groovy b/bson/src/test/unit/org/bson/internal/UuidHelperSpecification.groovy new file mode 100644 index 00000000000..a42d09d6d9e --- /dev/null +++ b/bson/src/test/unit/org/bson/internal/UuidHelperSpecification.groovy @@ -0,0 +1,53 @@ +package org.bson.internal + +import org.bson.BSONException +import org.bson.UuidRepresentation +import spock.lang.Specification +import spock.lang.Unroll + +class UuidHelperSpecification extends Specification { + + @Unroll + def 'should encode different types of UUID'() { + given: + def expectedUuid = UUID.fromString('08070605-0403-0201-100f-0e0d0c0b0a09') + + expect: + bytes == UuidHelper.encodeUuidToBinary(expectedUuid, uuidRepresentation) + + where: + bytes | uuidRepresentation + [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16] | UuidRepresentation.JAVA_LEGACY + [8, 7, 6, 5, 4, 3, 2, 1, 16, 15, 14, 13, 12, 11, 10, 9] | UuidRepresentation.STANDARD + [8, 7, 6, 5, 4, 3, 2, 1, 16, 15, 14, 13, 12, 11, 10, 9] | UuidRepresentation.PYTHON_LEGACY + [5, 6, 7, 8, 3, 4, 1, 2, 16, 15, 14, 13, 12, 11, 10, 9] | UuidRepresentation.C_SHARP_LEGACY + } + + @Unroll + def 'should decode different types of UUID'() { + given: + byte[] expectedBytes = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16] as byte[] + + expect: + uuid == UuidHelper.decodeBinaryToUuid(expectedBytes, (byte) type, uuidRepresentation) + expectedBytes == [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16] as byte[] + + where: + uuid | type | uuidRepresentation + UUID.fromString('08070605-0403-0201-100f-0e0d0c0b0a09') | 3 | UuidRepresentation.JAVA_LEGACY + UUID.fromString('01020304-0506-0708-090a-0b0c0d0e0f10') | 3 | UuidRepresentation.PYTHON_LEGACY + UUID.fromString('04030201-0605-0807-090a-0b0c0d0e0f10') | 3 | UuidRepresentation.C_SHARP_LEGACY + UUID.fromString('01020304-0506-0708-090a-0b0c0d0e0f10') | 4 | UuidRepresentation.STANDARD + } + + def 'should error when decoding a subtype 3 binary to standard representation'() { + given: + byte[] expectedBytes = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16] + + when: + UuidHelper.decodeBinaryToUuid(expectedBytes, (byte) 3, UuidRepresentation.STANDARD) + + then: + thrown(BSONException) + } +} diff --git a/bson/src/test/unit/org/bson/io/BasicOutputBufferSpecification.groovy b/bson/src/test/unit/org/bson/io/BasicOutputBufferSpecification.groovy new file mode 100644 index 00000000000..758d4fc1cfd --- /dev/null +++ b/bson/src/test/unit/org/bson/io/BasicOutputBufferSpecification.groovy @@ -0,0 +1,464 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.io + +import org.bson.BsonSerializationException +import org.bson.types.ObjectId +import spock.lang.Specification + +class BasicOutputBufferSpecification extends Specification { + + def 'position and size should be 0 after constructor'() { + when: + def bsonOutput = new BasicOutputBuffer() + + then: + bsonOutput.position == 0 + bsonOutput.size == 0 + } + + def 'should write a byte'() { + given: + def bsonOutput = new BasicOutputBuffer() + + when: + bsonOutput.writeByte(11) + + then: + getBytes(bsonOutput) == [11] as byte[] + bsonOutput.position == 1 + bsonOutput.size == 1 + } + + def 'writeBytes shorthand should extend buffer'() { + given: + def bsonOutput = new BasicOutputBuffer(3) + + when: + bsonOutput.write([1, 2, 3, 4] as byte[]) + + then: + getBytes(bsonOutput) == [1, 2, 3, 4] as byte[] + bsonOutput.position == 4 + bsonOutput.size == 4 + } + + def 'should write bytes'() { + given: + def bsonOutput = new BasicOutputBuffer(3) + + when: + bsonOutput.writeBytes([1, 2, 3, 4] as byte[]) + + then: + getBytes(bsonOutput) == [1, 2, 3, 4] as byte[] + bsonOutput.position == 4 + bsonOutput.size == 4 + } + + def 'should write bytes from offset until length'() { + given: + def bsonOutput = new BasicOutputBuffer(5) + + when: + bsonOutput.writeBytes([0, 1, 2, 3, 4, 5] as byte[], 1, 4) + + then: + getBytes(bsonOutput) == [1, 2, 3, 4] as byte[] + bsonOutput.position == 4 + bsonOutput.size == 4 + } + + def 'toByteArray should be idempotent'() { + given: + def bsonOutput = new BasicOutputBuffer(10) + bsonOutput.writeBytes([1, 2, 3, 4] as byte[]) + + when: + def first = bsonOutput.toByteArray() + def second = bsonOutput.toByteArray() + + then: + getBytes(bsonOutput) == [1, 2, 3, 4] as byte[] + first == [1, 2, 3, 4] as byte[] + second == [1, 2, 3, 4] as byte[] + bsonOutput.position == 4 + bsonOutput.size == 4 + } + + def 'toByteArray creates a copy'() { + given: + def bsonOutput = new BasicOutputBuffer(10) + bsonOutput.writeBytes([1, 2, 3, 4] as byte[]) + + when: + def first = bsonOutput.toByteArray() + def second = bsonOutput.toByteArray() + + then: + first !== second + first == [1, 2, 3, 4] as byte[] + second == [1, 2, 3, 4] as byte[] + } + def 'should write a little endian Int32'() { + given: + def bsonOutput = new BasicOutputBuffer(3) + + when: + bsonOutput.writeInt32(0x1020304) + + then: + getBytes(bsonOutput) == [4, 3, 2, 1] as byte[] + bsonOutput.position == 4 + bsonOutput.size == 4 + } + + def 'should write a little endian Int64'() { + given: + def bsonOutput = new BasicOutputBuffer(7) + + when: + bsonOutput.writeInt64(0x102030405060708L) + + then: + getBytes(bsonOutput) == [8, 7, 6, 5, 4, 3, 2, 1] as byte[] + bsonOutput.position == 8 + bsonOutput.size == 8 + } + + def 'should write a double'() { + given: + def bsonOutput = new BasicOutputBuffer(7) + + when: + bsonOutput.writeDouble(Double.longBitsToDouble(0x102030405060708L)) + + then: + getBytes(bsonOutput) == [8, 7, 6, 5, 4, 3, 2, 1] as byte[] + bsonOutput.position == 8 + bsonOutput.size == 8 + } + + def 'should write an ObjectId'() { + given: + def objectIdAsByteArray = [12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1] as byte[] + def bsonOutput = new BasicOutputBuffer(11) + + when: + bsonOutput.writeObjectId(new ObjectId(objectIdAsByteArray)) + + then: + getBytes(bsonOutput) == objectIdAsByteArray + bsonOutput.position == 12 + bsonOutput.size == 12 + } + + def 'write ObjectId should throw after close'() { + given: + def objectIdAsByteArray = [12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1] as byte[] + def bsonOutput = new BasicOutputBuffer() + bsonOutput.close() + + when: + bsonOutput.writeObjectId(new ObjectId(objectIdAsByteArray)) + + then: + thrown(IllegalStateException) + } + + def 'should write an empty string'() { + given: + def bsonOutput = new BasicOutputBuffer() + + when: + bsonOutput.writeString('') + + then: + getBytes(bsonOutput) == [1, 0, 0, 0, 0] as byte[] + bsonOutput.position == 5 + bsonOutput.size == 5 + } + + def 'should write an ASCII string'() { + given: + def bsonOutput = new BasicOutputBuffer() + + when: + bsonOutput.writeString('Java') + + then: + getBytes(bsonOutput) == [5, 0, 0, 0, 0x4a, 0x61, 0x76, 0x61, 0] as byte[] + bsonOutput.position == 9 + bsonOutput.size == 9 + } + + def 'should write a UTF-8 string'() { + given: + def bsonOutput = new BasicOutputBuffer(7) + + when: + bsonOutput.writeString('\u0900') + + then: + getBytes(bsonOutput) == [4, 0, 0, 0, 0xe0, 0xa4, 0x80, 0] as byte[] + bsonOutput.position == 8 + bsonOutput.size == 8 + } + + def 'should write an empty CString'() { + given: + def bsonOutput = new BasicOutputBuffer() + + when: + bsonOutput.writeCString('') + + then: + getBytes(bsonOutput) == [0] as byte[] + bsonOutput.position == 1 + bsonOutput.size == 1 + } + + def 'should write an ASCII CString'() { + given: + def bsonOutput = new BasicOutputBuffer() + + when: + bsonOutput.writeCString('Java') + + then: + getBytes(bsonOutput) == [0x4a, 0x61, 0x76, 0x61, 0] as byte[] + bsonOutput.position == 5 + bsonOutput.size == 5 + } + + def 'should write a UTF-8 CString'() { + given: + def bsonOutput = new BasicOutputBuffer() + + when: + bsonOutput.writeCString('\u0900') + + then: + getBytes(bsonOutput) == [0xe0, 0xa4, 0x80, 0] as byte[] + bsonOutput.position == 4 + bsonOutput.size == 4 + } + + def 'null character in CString should throw SerializationException'() { + given: + def bsonOutput = new BasicOutputBuffer() + + when: + bsonOutput.writeCString('hell\u0000world') + + then: + thrown(BsonSerializationException) + } + + def 'null character in String should not throw SerializationException'() { + given: + def bsonOutput = new BasicOutputBuffer() + + when: + bsonOutput.writeString('h\u0000i') + + then: + getBytes(bsonOutput) == [4, 0, 0, 0, (byte) 'h', 0, (byte) 'i', 0] as byte[] + } + + def 'write Int32 at position should throw with invalid position'() { + given: + def bsonOutput = new BasicOutputBuffer() + bsonOutput.writeBytes([1, 2, 3, 4] as byte[]) + + when: + bsonOutput.writeInt32(-1, 0x1020304) + + then: + thrown(IllegalArgumentException) + + when: + bsonOutput.writeInt32(1, 0x1020304) + + then: + thrown(IllegalArgumentException) + } + + def 'should write Int32 at position'() { + given: + def bsonOutput = new BasicOutputBuffer() + bsonOutput.writeBytes([0, 0, 0, 0, 1, 2, 3, 4] as byte[]) + + when: + bsonOutput.writeInt32(0, 0x1020304) + + then: + getBytes(bsonOutput) == [4, 3, 2, 1, 1, 2, 3, 4] as byte[] + bsonOutput.position == 8 + bsonOutput.size == 8 + + when: + bsonOutput.writeInt32(4, 0x1020304) + + then: + getBytes(bsonOutput) == [4, 3, 2, 1, 4, 3, 2, 1] as byte[] + bsonOutput.position == 8 + bsonOutput.size == 8 + } + + def 'absolute write should throw with invalid position'() { + given: + def bsonOutput = new BasicOutputBuffer() + bsonOutput.writeBytes([1, 2, 3, 4] as byte[]) + + when: + bsonOutput.write(-1, 0x1020304) + + then: + thrown(IllegalArgumentException) + + when: + bsonOutput.write(4, 0x1020304) + + then: + thrown(IllegalArgumentException) + } + + def 'absolute write should write lower byte at position'() { + given: + def bsonOutput = new BasicOutputBuffer() + bsonOutput.writeBytes([0, 0, 0, 0, 1, 2, 3, 4] as byte[]) + + when: + bsonOutput.write(0, 0x1020304) + + then: + getBytes(bsonOutput) == [4, 0, 0, 0, 1, 2, 3, 4] as byte[] + bsonOutput.position == 8 + bsonOutput.size == 8 + + when: + bsonOutput.write(7, 0x1020304) + + then: + getBytes(bsonOutput) == [4, 0, 0, 0, 1, 2, 3, 4] as byte[] + bsonOutput.position == 8 + bsonOutput.size == 8 + } + + def 'truncate should throw with invalid position'() { + given: + def bsonOutput = new BasicOutputBuffer() + bsonOutput.writeBytes([1, 2, 3, 4] as byte[]) + + when: + bsonOutput.truncateToPosition(5) + + then: + thrown(IllegalArgumentException) + + when: + bsonOutput.truncateToPosition(-1) + + then: + thrown(IllegalArgumentException) + } + + def 'should truncate to position'() { + given: + def bsonOutput = new BasicOutputBuffer() + bsonOutput.writeBytes([1, 2, 3, 4] as byte[]) + + when: + bsonOutput.truncateToPosition(2) + + then: + getBytes(bsonOutput) == [1, 2] as byte[] + bsonOutput.position == 2 + bsonOutput.size == 2 + } + + def 'should grow'() { + given: + def bsonOutput = new BasicOutputBuffer(4) + bsonOutput.writeBytes([1, 2, 3, 4] as byte[]) + + when: + bsonOutput.writeBytes([5, 6, 7, 8, 9, 10] as byte[]) + + then: + getBytes(bsonOutput) == [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] as byte[] + bsonOutput.position == 10 + bsonOutput.size == 10 + } + + def 'should get byte buffer as little endian'() { + given: + def bsonOutput = new BasicOutputBuffer(4) + + when: + bsonOutput.writeBytes([1, 0, 0, 0] as byte[]) + + then: + bsonOutput.getByteBuffers()[0].getInt() == 1 + } + + def 'should get byte buffer with limit'() { + given: + def bsonOutput = new BasicOutputBuffer(8) + bsonOutput.writeBytes([1, 0, 0, 0] as byte[]) + + when: + def buffers = bsonOutput.getByteBuffers() + + then: + buffers.size() == 1 + buffers[0].position() == 0 + buffers[0].limit() == 4 + } + + def 'should get internal buffer'() { + given: + def bsonOutput = new BasicOutputBuffer(4) + + when: + bsonOutput.writeBytes([1, 2] as byte[]) + + then: + bsonOutput.getInternalBuffer() == [1, 2, 0, 0] as byte[] + } + + def 'should close'() { + given: + def bsonOutput = new BasicOutputBuffer() + + when: + bsonOutput.close() + bsonOutput.writeByte(11) + + then: + thrown(IllegalStateException) + } + + def getBytes(final BasicOutputBuffer basicOutputBuffer) { + ByteArrayOutputStream baos = new ByteArrayOutputStream(basicOutputBuffer.size) + + basicOutputBuffer.pipe(baos) + + baos.toByteArray() + } +} diff --git a/bson/src/test/unit/org/bson/io/BasicOutputBufferTest.java b/bson/src/test/unit/org/bson/io/BasicOutputBufferTest.java new file mode 100644 index 00000000000..795df289876 --- /dev/null +++ b/bson/src/test/unit/org/bson/io/BasicOutputBufferTest.java @@ -0,0 +1,58 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.io; + +import org.junit.jupiter.api.Test; + +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.util.Arrays; + +import static org.junit.jupiter.api.Assertions.assertArrayEquals; + +// for tests that are too slow to run in Groovy +public class BasicOutputBufferTest { + + @Test + public void shouldEncodeAllCodePointsThatAreLettersOrDigits() throws IOException { + for (int codePoint = 1; codePoint <= Character.MAX_CODE_POINT; codePoint++) { + if (!Character.isLetterOrDigit(codePoint)) { + continue; + } + // given + BasicOutputBuffer bsonOutput = new BasicOutputBuffer(8); + + // when + String str = new String(Character.toChars(codePoint)); + bsonOutput.writeCString(str); + + // then + byte[] bytes = getBytes(bsonOutput); + assertArrayEquals(str.getBytes(StandardCharsets.UTF_8), Arrays.copyOfRange(bytes, 0, bytes.length - 1), "failed with code point " + codePoint); + } + } + + byte[] getBytes(final BasicOutputBuffer basicOutputBuffer) throws IOException { + ByteArrayOutputStream baos = new ByteArrayOutputStream(basicOutputBuffer.getSize()); + + basicOutputBuffer.pipe(baos); + + return baos.toByteArray(); + } + +} diff --git a/bson/src/test/unit/org/bson/io/ByteBufferBsonInputSpecification.groovy b/bson/src/test/unit/org/bson/io/ByteBufferBsonInputSpecification.groovy new file mode 100644 index 00000000000..a6ff9e1d609 --- /dev/null +++ b/bson/src/test/unit/org/bson/io/ByteBufferBsonInputSpecification.groovy @@ -0,0 +1,429 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.io + +import org.bson.BsonSerializationException +import org.bson.ByteBufNIO +import org.bson.types.ObjectId +import spock.lang.Specification + +import java.nio.ByteBuffer +import java.nio.charset.Charset + +class ByteBufferBsonInputSpecification extends Specification { + def 'constructor should throw of buffer is null'() { + when: + new ByteBufferBsonInput(null) + + then: + thrown(IllegalArgumentException) + } + + def 'position should start at 0'() { + when: + def stream = new ByteBufferBsonInput(new ByteBufNIO(ByteBuffer.wrap(new byte[4]))) + + then: + stream.position == 0 + } + + def 'should read a byte'() { + given: + def stream = new ByteBufferBsonInput(new ByteBufNIO(ByteBuffer.wrap([11] as byte[]))) + + expect: + stream.readByte() == 11 + stream.position == 1 + } + + def 'should read into a byte array'() { + given: + def bytes = [11, 12, 13] as byte[] + def stream = new ByteBufferBsonInput(new ByteBufNIO(ByteBuffer.wrap(bytes))) + def bytesRead = new byte[bytes.length] + stream.readBytes(bytesRead) + + expect: + bytesRead == bytes + stream.position == 3 + } + + def 'should read into a byte array at offset until length'() { + given: + def bytes = [11, 12, 13] as byte[] + def stream = new ByteBufferBsonInput(new ByteBufNIO(ByteBuffer.wrap(bytes))) + def bytesRead = new byte[bytes.length + 2] + stream.readBytes(bytesRead, 1, 3) + + expect: + bytesRead[1..3] == bytes + stream.position == 3 + } + + def 'should read a little endian Int32'() { + given: + def stream = new ByteBufferBsonInput(new ByteBufNIO(ByteBuffer.wrap([4, 3, 2, 1] as byte[]))) + + expect: + stream.readInt32() == 16909060 + stream.position == 4 + } + + def 'should read a little endian Int64'() { + given: + def stream = new ByteBufferBsonInput(new ByteBufNIO(ByteBuffer.wrap([8, 7, 6, 5, 4, 3, 2, 1] as byte[]))) + + expect: + stream.readInt64() == 72623859790382856 + stream.position == 8 + } + + def 'should read a double'() { + given: + def stream = new ByteBufferBsonInput(new ByteBufNIO(ByteBuffer.wrap([8, 7, 6, 5, 4, 3, 2, 1] as byte[]))) + + expect: + stream.readDouble() == Double.longBitsToDouble(72623859790382856) + stream.position == 8 + } + + def 'should read ObjectId'() { + given: + def objectIdAsByteArray = [12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1] as byte[] + def stream = new ByteBufferBsonInput(new ByteBufNIO(ByteBuffer.wrap(objectIdAsByteArray))) + + expect: + stream.readObjectId() == new ObjectId(objectIdAsByteArray) + stream.position == 12 + } + + def 'should read an empty string'() { + given: + def stream = new ByteBufferBsonInput(new ByteBufNIO(ByteBuffer.wrap([1, 0, 0, 0, 0] as byte[]))) + + expect: + stream.readString() == '' + stream.position == 5 + } + + def 'should read a one byte string'() { + given: + def stream = new ByteBufferBsonInput(new ByteBufNIO(ByteBuffer.wrap([2, 0, 0, 0, b, 0] as byte[]))) + + expect: + stream.readString() == new String([b] as byte[], Charset.forName('UTF-8')) + stream.position == 6 + + where: + b << [0x0, 0x1, 0x20, 0x7e, 0x7f] + } + + def 'should read an invalid one byte string'() { + given: + def stream = new ByteBufferBsonInput(new ByteBufNIO(ByteBuffer.wrap([2, 0, 0, 0, -0x1, 0] as byte[]))) + + expect: + stream.readString() == '\uFFFD' + stream.position == 6 + } + + def 'should read an ASCII string'() { + given: + def stream = new ByteBufferBsonInput(new ByteBufNIO(ByteBuffer.wrap([5, 0, 0, 0, 0x4a, 0x61, 0x76, 0x61, 0] as byte[]))) + + expect: + stream.readString() == 'Java' + stream.position == 9 + } + + def 'should read a UTF-8 string'() { + given: + def stream = new ByteBufferBsonInput(new ByteBufNIO(ByteBuffer.wrap([4, 0, 0, 0, 0xe0, 0xa4, 0x80, 0] as byte[]))) + + expect: + stream.readString() == '\u0900' + stream.position == 8 + } + + def 'should read an empty CString'() { + given: + def stream = new ByteBufferBsonInput(new ByteBufNIO(ByteBuffer.wrap([0] as byte[]))) + + expect: + stream.readCString() == '' + stream.position == 1 + } + + def 'should read a one byte CString'() { + given: + def stream = new ByteBufferBsonInput(new ByteBufNIO(ByteBuffer.wrap([b, 0] as byte[]))) + + expect: + stream.readCString() == new String([b] as byte[], Charset.forName('UTF-8')) + stream.position == 2 + + where: + b << [0x1, 0x20, 0x7e, 0x7f] + } + + def 'should read an invalid one byte CString'() { + given: + def stream = new ByteBufferBsonInput(new ByteBufNIO(ByteBuffer.wrap([-0x01, 0] as byte[]))) + + expect: + stream.readCString() == '\uFFFD' + stream.position == 2 + } + + def 'should read an ASCII CString'() { + given: + def stream = new ByteBufferBsonInput(new ByteBufNIO(ByteBuffer.wrap([0x4a, 0x61, 0x76, 0x61, 0] as byte[]))) + + expect: + stream.readCString() == 'Java' + stream.position == 5 + } + + def 'should read a UTF-8 CString'() { + given: + def stream = new ByteBufferBsonInput(new ByteBufNIO(ByteBuffer.wrap([0xe0, 0xa4, 0x80, 0] as byte[]))) + + expect: + stream.readCString() == '\u0900' + stream.position == 4 + } + + def 'should handle invalid CString not null terminated'() { + when: + def stream = new ByteBufferBsonInput(new ByteBufNIO(ByteBuffer.wrap([0xe0, 0xa4, 0x80] as byte[]))) + stream.readCString() + + then: + def e = thrown(BsonSerializationException) + e.getMessage() == 'Found a BSON string that is not null-terminated' + } + + def 'should handle invalid CString not null terminated when skipping value'() { + when: + def stream = new ByteBufferBsonInput(new ByteBufNIO(ByteBuffer.wrap([0xe0, 0xa4, 0x80] as byte[]))) + stream.skipCString() + + then: + def e = thrown(BsonSerializationException) + e.getMessage() == 'Found a BSON string that is not null-terminated' + } + + def 'should read from position'() { + given: + def stream = new ByteBufferBsonInput(new ByteBufNIO(ByteBuffer.wrap([4, 3, 2, 1] as byte[]))) + + expect: + stream.readByte() == 4 + stream.readByte() == 3 + stream.readByte() == 2 + stream.readByte() == 1 + } + + def 'should skip CString'() { + given: + def stream = new ByteBufferBsonInput(new ByteBufNIO(ByteBuffer.wrap([0x4a, 0x61, 0x76, 0x61, 0] as byte[]))) + + when: + stream.skipCString() + + then: + stream.position == 5 + } + + def 'should skip'() { + given: + def stream = new ByteBufferBsonInput(new ByteBufNIO(ByteBuffer.wrap([0x4a, 0x61, 0x76, 0x61, 0] as byte[]))) + + when: + stream.skip(5) + + then: + stream.position == 5 + } + + def 'should reset to the BsonInputMark'() { + given: + def stream = new ByteBufferBsonInput(new ByteBufNIO(ByteBuffer.wrap([0x4a, 0x61, 0x76, 0x61, 0] as byte[]))) + + when: + BsonInputMark markOne = null + BsonInputMark markTwo = null + + stream.with { + readByte() + readByte() + markOne = getMark(1024) + readByte() + readByte() + markTwo = getMark(1025) + readByte() + } + markOne.reset() + + then: + stream.position == 2 + + when: + markTwo.reset() + + then: + stream.position == 4 + } + + def 'should have remaining when there are more bytes'() { + given: + def stream = new ByteBufferBsonInput(new ByteBufNIO(ByteBuffer.wrap([0x4a, 0x61, 0x76, 0x61, 0] as byte[]))) + + expect: + stream.hasRemaining() + } + + def 'should not have remaining when there are no more bytes'() { + given: + def stream = new ByteBufferBsonInput(new ByteBufNIO(ByteBuffer.wrap([] as byte[]))) + + expect: + !stream.hasRemaining() + } + + def 'should close the stream'() { + given: + def stream = new ByteBufferBsonInput(new ByteBufNIO(ByteBuffer.wrap([] as byte[]))) + + when: + stream.close() + stream.hasRemaining() + + then: + thrown(IllegalStateException) + } + + def 'should throw BsonSerializationException reading a byte if no byte is available'() { + given: + def stream = new ByteBufferBsonInput(new ByteBufNIO(ByteBuffer.wrap([] as byte[]))) + + when: + stream.readByte() + + then: + thrown(BsonSerializationException) + } + + def 'should throw BsonSerializationException reading an Int32 if less than 4 bytes are available'() { + given: + def stream = new ByteBufferBsonInput(new ByteBufNIO(ByteBuffer.wrap([0, 0, 0] as byte[]))) + + when: + stream.readInt32() + + then: + thrown(BsonSerializationException) + } + + def 'should throw BsonSerializationException reading an Int64 if less than 8 bytes are available'() { + given: + def stream = new ByteBufferBsonInput(new ByteBufNIO(ByteBuffer.wrap([0, 0, 0, 0, 0, 0, 0] as byte[]))) + + when: + stream.readInt64() + + then: + thrown(BsonSerializationException) + } + + def 'should throw BsonSerializationException reading a double if less than 8 bytes are available'() { + given: + def stream = new ByteBufferBsonInput(new ByteBufNIO(ByteBuffer.wrap([0, 0, 0, 0, 0, 0, 0] as byte[]))) + + when: + stream.readDouble() + + then: + thrown(BsonSerializationException) + } + + def 'should throw BsonSerializationException reading an ObjectId if less than 12 bytes are available'() { + given: + def stream = new ByteBufferBsonInput(new ByteBufNIO(ByteBuffer.wrap([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] as byte[]))) + + when: + stream.readObjectId() + + then: + thrown(BsonSerializationException) + } + + def 'should throw BsonSerializationException reading into a byte array if not enough bytes are available'() { + given: + def stream = new ByteBufferBsonInput(new ByteBufNIO(ByteBuffer.wrap([0, 0, 0, 0, 0, 0, 0] as byte[]))) + + when: + stream.readBytes(new byte[8]) + + then: + thrown(BsonSerializationException) + } + + def 'should throw BsonSerializationException reading partially into a byte array if not enough bytes are available'() { + given: + def stream = new ByteBufferBsonInput(new ByteBufNIO(ByteBuffer.wrap([0, 0, 0, 0] as byte[]))) + + when: + stream.readBytes(new byte[8], 2, 5) + + then: + thrown(BsonSerializationException) + } + + def 'should throw BsonSerializationException if the length of a BSON string is not positive'() { + given: + def stream = new ByteBufferBsonInput(new ByteBufNIO(ByteBuffer.wrap([-1, -1, -1, -1, 41, 42, 43, 0] as byte[]))) + + when: + stream.readString() + + then: + thrown(BsonSerializationException) + } + + def 'should throw BsonSerializationException if a BSON string is not null-terminated'() { + given: + def stream = new ByteBufferBsonInput(new ByteBufNIO(ByteBuffer.wrap([4, 0, 0, 0, 41, 42, 43, 99] as byte[]))) + + when: + stream.readString() + + then: + thrown(BsonSerializationException) + } + + def 'should throw BsonSerializationException if a one-byte BSON string is not null-terminated'() { + given: + def stream = new ByteBufferBsonInput(new ByteBufNIO(ByteBuffer.wrap([2, 0, 0, 0, 1, 3] as byte[]))) + + when: + stream.readString() + + then: + thrown(BsonSerializationException) + } +} diff --git a/bson/src/test/unit/org/bson/json/JsonObjectTest.java b/bson/src/test/unit/org/bson/json/JsonObjectTest.java new file mode 100644 index 00000000000..80cfe07196c --- /dev/null +++ b/bson/src/test/unit/org/bson/json/JsonObjectTest.java @@ -0,0 +1,127 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.json; + +import org.bson.BsonDocument; +import org.bson.BsonInt32; +import org.bson.codecs.BsonCodecProvider; +import org.bson.codecs.JsonObjectCodecProvider; +import org.junit.jupiter.api.Test; + +import static org.bson.codecs.configuration.CodecRegistries.fromProviders; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; + +public class JsonObjectTest { + + @Test + public void testNull() { + assertThrows(IllegalArgumentException.class, () -> new JsonObject(null)); + } + + @Test + public void testArray() { + assertThrows(IllegalArgumentException.class, () ->new JsonObject("['A', 'B', 'C']")); + } + + @Test + public void testSpaceInvalidObject() { + assertThrows(IllegalArgumentException.class, () ->new JsonObject(" ['A']")); + } + + @Test + public void testLineFeedInvalidObject() { + assertThrows(IllegalArgumentException.class, () ->new JsonObject("\nvalue")); + } + + @Test + public void testCarriageReturnInvalidObject() { + assertThrows(IllegalArgumentException.class, () ->new JsonObject("\r123")); + } + + @Test + public void testHorizontalTabInvalidObject() { + assertThrows(IllegalArgumentException.class, () ->new JsonObject("\t123")); + } + + @Test + public void testOnlyWhitespace() { + assertThrows(IllegalArgumentException.class, () ->new JsonObject(" \t\n \r ")); + } + + @Test + public void testSpaceValidObject() { + String json = " {hello: 2}"; + assertEquals(new JsonObject(json).getJson(), json); + } + + @Test + public void testLineFeedValidObject() { + String json = "\n{hello: 2}"; + assertEquals(new JsonObject(json).getJson(), json); + } + + @Test + public void testCarriageReturnValidObject() { + String json = "\r{hello: 2}"; + assertEquals(new JsonObject(json).getJson(), json); + } + + @Test + public void testHorizontalTabValidObject() { + String json = "\t{hello: 2}"; + assertEquals(new JsonObject(json).getJson(), json); + } + + @Test + public void testLeadingAndTrailingWhitespace() { + String json = "\n\t\r {hello: 2} \n"; + assertEquals(new JsonObject(json).getJson(), json); + } + + @Test + public void testEqualsAndHashCode() { + JsonObject j1 = new JsonObject("{hello: 1}"); + JsonObject j2 = new JsonObject("{hello: 1}"); + JsonObject j3 = new JsonObject("{world: 2}"); + + assertEquals(j1, j1); + assertEquals(j1, j2); + assertEquals(j2, j1); + assertNotEquals(j1, j3); + assertNotEquals(j3, j1); + assertNotEquals(null, j1); + assertNotEquals("{hello: 1}", j1); + + assertEquals(j1.hashCode(), j1.hashCode()); + assertEquals(j1.hashCode(), j2.hashCode()); + } + + @Test + public void testGetJson() { + JsonObject j1 = new JsonObject("{hello: 1}"); + assertEquals(j1.getJson(), "{hello: 1}"); + } + + @Test + public void testToBsonDocument() { + JsonObject j1 = new JsonObject("{hello: 1}"); + BsonDocument b1 = new BsonDocument("hello", new BsonInt32(1)); + assertEquals(j1.toBsonDocument(null, fromProviders(new JsonObjectCodecProvider(), new BsonCodecProvider())), b1); + } +} diff --git a/bson/src/test/unit/org/bson/json/JsonReaderSpecification.groovy b/bson/src/test/unit/org/bson/json/JsonReaderSpecification.groovy new file mode 100644 index 00000000000..70417300595 --- /dev/null +++ b/bson/src/test/unit/org/bson/json/JsonReaderSpecification.groovy @@ -0,0 +1,53 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.json + +import org.bson.BsonDocument +import org.bson.BsonHelper +import spock.lang.Specification +import spock.lang.Unroll + +import static org.bson.AbstractBsonReader.State.DONE +import static org.bson.AbstractBsonReader.State.TYPE + +class JsonReaderSpecification extends Specification { + + @Unroll + def 'should skip value #value'() { + given: + def document = new BsonDocument('name', value) + def reader = new JsonReader(document.toJson()) + reader.readStartDocument() + reader.readBsonType() + + when: + reader.skipName() + reader.skipValue() + + then: + reader.getState() == TYPE + + when: + reader.readEndDocument() + + then: + reader.getState() == DONE + + where: + value << BsonHelper.valuesOfEveryType() + } +} diff --git a/bson/src/test/unit/org/bson/json/JsonReaderTest.java b/bson/src/test/unit/org/bson/json/JsonReaderTest.java new file mode 100644 index 00000000000..27e1980a3e3 --- /dev/null +++ b/bson/src/test/unit/org/bson/json/JsonReaderTest.java @@ -0,0 +1,1344 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.json; + +import org.bson.AbstractBsonReader; +import org.bson.BsonBinary; +import org.bson.BsonBinarySubType; +import org.bson.BsonDbPointer; +import org.bson.BsonReaderMark; +import org.bson.BsonRegularExpression; +import org.bson.BsonTimestamp; +import org.bson.BsonType; +import org.bson.types.Decimal128; +import org.bson.types.ObjectId; +import org.junit.jupiter.api.Test; + +import java.io.BufferedReader; +import java.io.ByteArrayInputStream; +import java.io.InputStreamReader; +import java.io.Reader; +import java.nio.charset.StandardCharsets; +import java.text.ParsePosition; +import java.text.SimpleDateFormat; +import java.util.Date; +import java.util.List; +import java.util.Locale; +import java.util.function.Function; + +import static java.util.Arrays.asList; +import static org.junit.jupiter.api.Assertions.assertArrayEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; + + +public class JsonReaderTest { + + @Test + public void testArrayEmpty() { + String json = "[]"; + testStringAndStream(json, bsonReader -> { + assertEquals(BsonType.ARRAY, bsonReader.readBsonType()); + bsonReader.readStartArray(); + assertEquals(BsonType.END_OF_DOCUMENT, bsonReader.readBsonType()); + bsonReader.readEndArray(); + assertEquals(AbstractBsonReader.State.DONE, bsonReader.getState()); + return null; + }); + } + + @Test + public void testArrayOneElement() { + String json = "[1]"; + testStringAndStream(json, bsonReader -> { + assertEquals(BsonType.ARRAY, bsonReader.readBsonType()); + bsonReader.readStartArray(); + assertEquals(BsonType.INT32, bsonReader.readBsonType()); + assertEquals(1, bsonReader.readInt32()); + assertEquals(BsonType.END_OF_DOCUMENT, bsonReader.readBsonType()); + bsonReader.readEndArray(); + assertEquals(AbstractBsonReader.State.DONE, bsonReader.getState()); + return null; + }); + } + + @Test + public void testArrayTwoElements() { + String json = "[1, 2]"; + testStringAndStream(json, bsonReader -> { + assertEquals(BsonType.ARRAY, bsonReader.readBsonType()); + bsonReader.readStartArray(); + assertEquals(BsonType.INT32, bsonReader.readBsonType()); + assertEquals(1, bsonReader.readInt32()); + assertEquals(BsonType.INT32, bsonReader.readBsonType()); + assertEquals(2, bsonReader.readInt32()); + assertEquals(BsonType.END_OF_DOCUMENT, bsonReader.readBsonType()); + bsonReader.readEndArray(); + assertEquals(AbstractBsonReader.State.DONE, bsonReader.getState()); + return null; + }); + } + + @Test + public void testBooleanFalse() { + String json = "false"; + testStringAndStream(json, bsonReader -> { + assertEquals(BsonType.BOOLEAN, bsonReader.readBsonType()); + assertFalse(bsonReader.readBoolean()); + assertEquals(AbstractBsonReader.State.DONE, bsonReader.getState()); + return null; + }); + } + + @Test + public void testBooleanTrue() { + String json = "true"; + testStringAndStream(json, bsonReader -> { + assertEquals(BsonType.BOOLEAN, bsonReader.readBsonType()); + assertTrue(bsonReader.readBoolean()); + assertEquals(AbstractBsonReader.State.DONE, bsonReader.getState()); + return null; + }); + } + + @Test + public void testDateTimeMinBson() { + String json = "new Date(-9223372036854775808)"; + testStringAndStream(json, bsonReader -> { + assertEquals(BsonType.DATE_TIME, bsonReader.readBsonType()); + assertEquals(-9223372036854775808L, bsonReader.readDateTime()); + assertEquals(AbstractBsonReader.State.DONE, bsonReader.getState()); + return null; + }); + } + + @Test + public void testDateTimeMaxBson() { + String json = "new Date(9223372036854775807)"; + testStringAndStream(json, bsonReader -> { + assertEquals(BsonType.DATE_TIME, bsonReader.readBsonType()); + long k = bsonReader.readDateTime(); + assertEquals(9223372036854775807L, k); + assertEquals(AbstractBsonReader.State.DONE, bsonReader.getState()); + return null; + }); + } + + @Test + public void testDateTimeShellDateOnly() { + String json = "ISODate(\"1970-01-01\")"; + testStringAndStream(json, bsonReader -> { + assertEquals(BsonType.DATE_TIME, bsonReader.readBsonType()); + assertEquals(0, bsonReader.readDateTime()); + assertEquals(AbstractBsonReader.State.DONE, bsonReader.getState()); + return null; + }); + } + + @Test + public void testDateTimeShell() { + String json = "ISODate(\"1970-01-01T00:00:00Z\")"; + testStringAndStream(json, bsonReader -> { + assertEquals(BsonType.DATE_TIME, bsonReader.readBsonType()); + assertEquals(0, bsonReader.readDateTime()); + assertEquals(AbstractBsonReader.State.DONE, bsonReader.getState()); + return null; + }); + } + + @Test + public void testDateTimeShellWith24HourTimeSpecification() { + String json = "ISODate(\"2013-10-04T12:07:30.443Z\")"; + testStringAndStream(json, bsonReader -> { + assertEquals(BsonType.DATE_TIME, bsonReader.readBsonType()); + assertEquals(1380888450443L, bsonReader.readDateTime()); + assertEquals(AbstractBsonReader.State.DONE, bsonReader.getState()); + return null; + }); + } + + @Test + public void testDateTimeStrict() { + String json = "{ \"$date\" : 0 }"; + testStringAndStream(json, bsonReader -> { + assertEquals(BsonType.DATE_TIME, bsonReader.readBsonType()); + assertEquals(0, bsonReader.readDateTime()); + assertEquals(AbstractBsonReader.State.DONE, bsonReader.getState()); + return null; + }); + } + + @Test + public void testNestedDateTimeStrict() { + String json = "{d1 : { \"$date\" : 0 }, d2 : { \"$date\" : 1 } }"; + testStringAndStream(json, bsonReader -> { + bsonReader.readStartDocument(); + assertEquals(0L, bsonReader.readDateTime("d1")); + assertEquals(1L, bsonReader.readDateTime("d2")); + bsonReader.readEndDocument(); + return null; + }); + } + + @Test + public void testDateTimeISOString() { + String json = "{ \"$date\" : \"2015-04-16T14:55:57.626Z\" }"; + testStringAndStream(json, bsonReader -> { + assertEquals(BsonType.DATE_TIME, bsonReader.readBsonType()); + assertEquals(1429196157626L, bsonReader.readDateTime()); + assertEquals(AbstractBsonReader.State.DONE, bsonReader.getState()); + return null; + }); + } + + @Test + public void testDateTimeISOStringWithTimeOffset() { + String json = "{ \"$date\" : \"2015-04-16T16:55:57.626+02:00\" }"; + testStringAndStream(json, bsonReader -> { + assertEquals(BsonType.DATE_TIME, bsonReader.readBsonType()); + assertEquals(1429196157626L, bsonReader.readDateTime()); + assertEquals(AbstractBsonReader.State.DONE, bsonReader.getState()); + return null; + }); + } + + @Test + public void testDateTimeTengen() { + String json = "new Date(0)"; + testStringAndStream(json, bsonReader -> { + assertEquals(BsonType.DATE_TIME, bsonReader.readBsonType()); + assertEquals(0, bsonReader.readDateTime()); + assertEquals(AbstractBsonReader.State.DONE, bsonReader.getState()); + return null; + }); + } + + @Test + public void testDocumentEmpty() { + String json = "{ }"; + testStringAndStream(json, bsonReader -> { + assertEquals(BsonType.DOCUMENT, bsonReader.readBsonType()); + bsonReader.readStartDocument(); + assertEquals(BsonType.END_OF_DOCUMENT, bsonReader.readBsonType()); + bsonReader.readEndDocument(); + return null; + }); + } + + @Test + public void testDocumentNested() { + String json = "{ \"a\" : { \"x\" : 1 }, \"y\" : 2 }"; + testStringAndStream(json, bsonReader -> { + assertEquals(BsonType.DOCUMENT, bsonReader.readBsonType()); + bsonReader.readStartDocument(); + assertEquals(BsonType.DOCUMENT, bsonReader.readBsonType()); + assertEquals("a", bsonReader.readName()); + bsonReader.readStartDocument(); + assertEquals(BsonType.INT32, bsonReader.readBsonType()); + assertEquals("x", bsonReader.readName()); + assertEquals(1, bsonReader.readInt32()); + assertEquals(BsonType.END_OF_DOCUMENT, bsonReader.readBsonType()); + bsonReader.readEndDocument(); + assertEquals(BsonType.INT32, bsonReader.readBsonType()); + assertEquals("y", bsonReader.readName()); + assertEquals(2, bsonReader.readInt32()); + assertEquals(BsonType.END_OF_DOCUMENT, bsonReader.readBsonType()); + bsonReader.readEndDocument(); + assertEquals(AbstractBsonReader.State.DONE, bsonReader.getState()); + return null; + }); + } + + @Test + public void testDocumentOneElement() { + String json = "{ \"x\" : 1 }"; + testStringAndStream(json, bsonReader -> { + assertEquals(BsonType.DOCUMENT, bsonReader.readBsonType()); + bsonReader.readStartDocument(); + assertEquals(BsonType.INT32, bsonReader.readBsonType()); + assertEquals("x", bsonReader.readName()); + assertEquals(1, bsonReader.readInt32()); + assertEquals(BsonType.END_OF_DOCUMENT, bsonReader.readBsonType()); + bsonReader.readEndDocument(); + assertEquals(AbstractBsonReader.State.DONE, bsonReader.getState()); + return null; + }); + } + + @Test + public void testDocumentTwoElements() { + String json = "{ \"x\" : 1, \"y\" : 2 }"; + testStringAndStream(json, bsonReader -> { + assertEquals(BsonType.DOCUMENT, bsonReader.readBsonType()); + bsonReader.readStartDocument(); + assertEquals(BsonType.INT32, bsonReader.readBsonType()); + assertEquals("x", bsonReader.readName()); + assertEquals(1, bsonReader.readInt32()); + assertEquals(BsonType.INT32, bsonReader.readBsonType()); + assertEquals("y", bsonReader.readName()); + assertEquals(2, bsonReader.readInt32()); + assertEquals(BsonType.END_OF_DOCUMENT, bsonReader.readBsonType()); + bsonReader.readEndDocument(); + assertEquals(AbstractBsonReader.State.DONE, bsonReader.getState()); + return null; + }); + } + + @Test + public void testDouble() { + String json = "1.5"; + testStringAndStream(json, bsonReader -> { + assertEquals(BsonType.DOUBLE, bsonReader.readBsonType()); + assertEquals(1.5, bsonReader.readDouble(), 0); + assertEquals(AbstractBsonReader.State.DONE, bsonReader.getState()); + return null; + }); + } + + @Test + public void testHexData() { + byte[] expectedBytes = {0x01, 0x23}; + String json = "HexData(0, \"0123\")"; + testStringAndStream(json, bsonReader -> { + assertEquals(BsonType.BINARY, bsonReader.readBsonType()); + BsonBinary binary = bsonReader.readBinaryData(); + assertArrayEquals(expectedBytes, binary.getData()); + assertEquals(AbstractBsonReader.State.DONE, bsonReader.getState()); + return null; + }); + } + + @Test + public void testHexDataWithNew() { + byte[] expectedBytes = {0x01, 0x23}; + String json = "new HexData(0, \"0123\")"; + testStringAndStream(json, bsonReader -> { + assertEquals(BsonType.BINARY, bsonReader.readBsonType()); + BsonBinary binary = bsonReader.readBinaryData(); + assertArrayEquals(expectedBytes, binary.getData()); + assertEquals(AbstractBsonReader.State.DONE, bsonReader.getState()); + return null; + }); + } + + @Test + public void testInt32() { + String json = "123"; + testStringAndStream(json, bsonReader -> { + assertEquals(BsonType.INT32, bsonReader.readBsonType()); + assertEquals(123, bsonReader.readInt32()); + assertEquals(AbstractBsonReader.State.DONE, bsonReader.getState()); + return null; + }); + + } + + @Test + public void testInt64() { + String json = String.valueOf(Long.MAX_VALUE); + testStringAndStream(json, bsonReader -> { + assertEquals(BsonType.INT64, bsonReader.readBsonType()); + assertEquals(Long.MAX_VALUE, bsonReader.readInt64()); + assertEquals(AbstractBsonReader.State.DONE, bsonReader.getState()); + return null; + }); + } + + @Test + public void testNumberLongExtendedJson() { + String json = "{\"$numberLong\":\"123\"}"; + testStringAndStream(json, bsonReader -> { + assertEquals(BsonType.INT64, bsonReader.readBsonType()); + assertEquals(123, bsonReader.readInt64()); + assertEquals(AbstractBsonReader.State.DONE, bsonReader.getState()); + return null; + }); + } + + @Test + public void testNumberLong() { + List jsonTexts = asList( + "NumberLong(123)", + "NumberLong(\"123\")", + "new NumberLong(123)", + "new NumberLong(\"123\")"); + for (String json : jsonTexts) { + testStringAndStream(json, bsonReader -> { + assertEquals(BsonType.INT64, bsonReader.readBsonType()); + assertEquals(123, bsonReader.readInt64()); + assertEquals(AbstractBsonReader.State.DONE, bsonReader.getState()); + return null; + }); + } + } + + @Test + public void testNumberInt() { + List jsonTexts = asList( + "NumberInt(123)", + "NumberInt(\"123\")", + "new NumberInt(123)", + "new NumberInt(\"123\")"); + for (String json : jsonTexts) { + testStringAndStream(json, bsonReader -> { + assertEquals(BsonType.INT32, bsonReader.readBsonType()); + assertEquals(123, bsonReader.readInt32()); + assertEquals(AbstractBsonReader.State.DONE, bsonReader.getState()); + return null; + }); + } + } + + @Test + public void testDecimal128StringConstructor() { + String json = "NumberDecimal(\"314E-2\")"; + testStringAndStream(json, bsonReader -> { + assertEquals(BsonType.DECIMAL128, bsonReader.readBsonType()); + assertEquals(Decimal128.parse("314E-2"), bsonReader.readDecimal128()); + assertEquals(AbstractBsonReader.State.DONE, bsonReader.getState()); + return null; + }); + } + + @Test + public void testDecimal128Int32Constructor() { + String json = "NumberDecimal(" + Integer.MAX_VALUE + ")"; + testStringAndStream(json, bsonReader -> { + assertEquals(BsonType.DECIMAL128, bsonReader.readBsonType()); + assertEquals(new Decimal128(Integer.MAX_VALUE), bsonReader.readDecimal128()); + assertEquals(AbstractBsonReader.State.DONE, bsonReader.getState()); + return null; + }); + } + + @Test + public void testDecimal128Int64Constructor() { + String json = "NumberDecimal(" + Long.MAX_VALUE + ")"; + testStringAndStream(json, bsonReader -> { + assertEquals(BsonType.DECIMAL128, bsonReader.readBsonType()); + assertEquals(new Decimal128(Long.MAX_VALUE), bsonReader.readDecimal128()); + assertEquals(AbstractBsonReader.State.DONE, bsonReader.getState()); + return null; + }); + } + + @Test + public void testDecimal128DoubleConstructor() { + String json = "NumberDecimal(" + 1.0 + ")"; + testStringAndStream(json, bsonReader -> { + assertEquals(BsonType.DECIMAL128, bsonReader.readBsonType()); + assertEquals(Decimal128.parse("1"), bsonReader.readDecimal128()); + assertEquals(AbstractBsonReader.State.DONE, bsonReader.getState()); + return null; + }); + } + + @Test + public void testDecimal128BooleanConstructor() { + String json = "NumberDecimal(true)"; + testStringAndStream(json, bsonReader -> { + try { + bsonReader.readBsonType(); + fail("Should fail to parse NumberDecimal constructor with a string"); + } catch (JsonParseException e) { + // all good + } + return null; + }); + } + + @Test + public void testDecimal128WithNew() { + String json = "new NumberDecimal(\"314E-2\")"; + testStringAndStream(json, bsonReader -> { + assertEquals(BsonType.DECIMAL128, bsonReader.readBsonType()); + assertEquals(Decimal128.parse("314E-2"), bsonReader.readDecimal128()); + assertEquals(AbstractBsonReader.State.DONE, bsonReader.getState()); + return null; + }); + } + + @Test + public void testDecimal128ExtendedJson() { + String json = "{\"$numberDecimal\":\"314E-2\"}"; + testStringAndStream(json, bsonReader -> { + assertEquals(BsonType.DECIMAL128, bsonReader.readBsonType()); + assertEquals(Decimal128.parse("314E-2"), bsonReader.readDecimal128()); + assertEquals(AbstractBsonReader.State.DONE, bsonReader.getState()); + return null; + }); + } + + @Test + public void testJavaScript() { + String json = "{ \"$code\" : \"function f() { return 1; }\" }"; + testStringAndStream(json, bsonReader -> { + assertEquals(BsonType.JAVASCRIPT, bsonReader.readBsonType()); + assertEquals("function f() { return 1; }", bsonReader.readJavaScript()); + assertEquals(AbstractBsonReader.State.DONE, bsonReader.getState()); + return null; + }); + } + + @Test + public void testJavaScriptWithScope() { + String json = "{\"codeWithScope\": { \"$code\" : \"function f() { return n; }\", \"$scope\" : { \"n\" : 1 } } }"; + testStringAndStream(json, bsonReader -> { + bsonReader.readStartDocument(); + assertEquals(BsonType.JAVASCRIPT_WITH_SCOPE, bsonReader.readBsonType()); + assertEquals("codeWithScope", bsonReader.readName()); + assertEquals("function f() { return n; }", bsonReader.readJavaScriptWithScope()); + bsonReader.readStartDocument(); + assertEquals(BsonType.INT32, bsonReader.readBsonType()); + assertEquals("n", bsonReader.readName()); + assertEquals(1, bsonReader.readInt32()); + bsonReader.readEndDocument(); + bsonReader.readEndDocument(); + assertEquals(AbstractBsonReader.State.DONE, bsonReader.getState()); + return null; + }); + } + + @Test + public void testMaxKey() { + for (String maxKeyJson : asList("{ \"$maxKey\" : 1 }", "MaxKey", "MaxKey()", "new MaxKey", "new MaxKey()")) { + String json = "{ maxKey : " + maxKeyJson + " }"; + testStringAndStream(json, bsonReader -> { + bsonReader.readStartDocument(); + assertEquals("maxKey", bsonReader.readName()); + assertEquals(BsonType.MAX_KEY, bsonReader.getCurrentBsonType()); + bsonReader.readMaxKey(); + bsonReader.readEndDocument(); + assertEquals(AbstractBsonReader.State.DONE, bsonReader.getState()); + return null; + }); + } + } + + @Test + public void testMinKey() { + for (String minKeyJson : asList("{ \"$minKey\" : 1 }", "MinKey", "MinKey()", "new MinKey", "new MinKey()")) { + String json = "{ minKey : " + minKeyJson + " }"; + testStringAndStream(json, bsonReader -> { + bsonReader.readStartDocument(); + assertEquals("minKey", bsonReader.readName()); + assertEquals(BsonType.MIN_KEY, bsonReader.getCurrentBsonType()); + bsonReader.readMinKey(); + bsonReader.readEndDocument(); + assertEquals(AbstractBsonReader.State.DONE, bsonReader.getState()); + return null; + }); + } + } + + @Test + public void testNestedArray() { + String json = "{ \"a\" : [1, 2] }"; + testStringAndStream(json, bsonReader -> { + assertEquals(BsonType.DOCUMENT, bsonReader.readBsonType()); + bsonReader.readStartDocument(); + assertEquals(BsonType.ARRAY, bsonReader.readBsonType()); + assertEquals("a", bsonReader.readName()); + bsonReader.readStartArray(); + assertEquals(1, bsonReader.readInt32()); + assertEquals(2, bsonReader.readInt32()); + bsonReader.readEndArray(); + bsonReader.readEndDocument(); + assertEquals(AbstractBsonReader.State.DONE, bsonReader.getState()); + return null; + }); + } + + @Test + public void testNestedDocument() { + String json = "{ \"a\" : { \"b\" : 1, \"c\" : 2 } }"; + testStringAndStream(json, bsonReader -> { + assertEquals(BsonType.DOCUMENT, bsonReader.readBsonType()); + bsonReader.readStartDocument(); + assertEquals(BsonType.DOCUMENT, bsonReader.readBsonType()); + assertEquals("a", bsonReader.readName()); + bsonReader.readStartDocument(); + assertEquals("b", bsonReader.readName()); + assertEquals(1, bsonReader.readInt32()); + assertEquals("c", bsonReader.readName()); + assertEquals(2, bsonReader.readInt32()); + bsonReader.readEndDocument(); + bsonReader.readEndDocument(); + assertEquals(AbstractBsonReader.State.DONE, bsonReader.getState()); + return null; + }); + } + + @Test + public void testNull() { + String json = "null"; + testStringAndStream(json, bsonReader -> { + assertEquals(BsonType.NULL, bsonReader.readBsonType()); + bsonReader.readNull(); + assertEquals(AbstractBsonReader.State.DONE, bsonReader.getState()); + return null; + }); + } + + @Test + public void testObjectIdShell() { + String json = "ObjectId(\"4d0ce088e447ad08b4721a37\")"; + testStringAndStream(json, bsonReader -> { + assertEquals(BsonType.OBJECT_ID, bsonReader.readBsonType()); + ObjectId objectId = bsonReader.readObjectId(); + assertEquals("4d0ce088e447ad08b4721a37", objectId.toString()); + assertEquals(AbstractBsonReader.State.DONE, bsonReader.getState()); + return null; + }); + } + + @Test + public void testObjectIdWithNew() { + String json = "new ObjectId(\"4d0ce088e447ad08b4721a37\")"; + testStringAndStream(json, bsonReader -> { + assertEquals(BsonType.OBJECT_ID, bsonReader.readBsonType()); + ObjectId objectId = bsonReader.readObjectId(); + assertEquals("4d0ce088e447ad08b4721a37", objectId.toString()); + assertEquals(AbstractBsonReader.State.DONE, bsonReader.getState()); + return null; + }); + } + + @Test + public void testObjectIdStrict() { + String json = "{ \"$oid\" : \"4d0ce088e447ad08b4721a37\" }"; + testStringAndStream(json, bsonReader -> { + assertEquals(BsonType.OBJECT_ID, bsonReader.readBsonType()); + ObjectId objectId = bsonReader.readObjectId(); + assertEquals("4d0ce088e447ad08b4721a37", objectId.toString()); + assertEquals(AbstractBsonReader.State.DONE, bsonReader.getState()); + return null; + }); + } + + @Test + public void testObjectIdTenGen() { + String json = "ObjectId(\"4d0ce088e447ad08b4721a37\")"; + testStringAndStream(json, bsonReader -> { + assertEquals(BsonType.OBJECT_ID, bsonReader.readBsonType()); + ObjectId objectId = bsonReader.readObjectId(); + assertEquals("4d0ce088e447ad08b4721a37", objectId.toString()); + assertEquals(AbstractBsonReader.State.DONE, bsonReader.getState()); + return null; + }); + } + + @Test + public void testRegularExpressionShell() { + String json = "/pattern/imxs"; + testStringAndStream(json, bsonReader -> { + assertEquals(BsonType.REGULAR_EXPRESSION, bsonReader.readBsonType()); + BsonRegularExpression regex = bsonReader.readRegularExpression(); + assertEquals("pattern", regex.getPattern()); + assertEquals("imsx", regex.getOptions()); + assertEquals(AbstractBsonReader.State.DONE, bsonReader.getState()); + return null; + }); + } + + @Test + public void testRegularExpressionStrict() { + String json = "{ \"$regex\" : \"pattern\", \"$options\" : \"imxs\" }"; + testStringAndStream(json, bsonReader -> { + assertEquals(BsonType.REGULAR_EXPRESSION, bsonReader.readBsonType()); + BsonRegularExpression regex = bsonReader.readRegularExpression(); + assertEquals("pattern", regex.getPattern()); + assertEquals("imsx", regex.getOptions()); + assertEquals(AbstractBsonReader.State.DONE, bsonReader.getState()); + return null; + }); + } + + @Test + public void testRegularExpressionCanonical() { + String json = "{ \"$regularExpression\" : { \"pattern\" : \"pattern\", \"options\" : \"imxs\" }}"; + testStringAndStream(json, bsonReader -> { + assertEquals(BsonType.REGULAR_EXPRESSION, bsonReader.readBsonType()); + BsonRegularExpression regex = bsonReader.readRegularExpression(); + assertEquals("pattern", regex.getPattern()); + assertEquals("imsx", regex.getOptions()); + assertEquals(AbstractBsonReader.State.DONE, bsonReader.getState()); + return null; + }); + } + + @Test + public void testRegularExpressionQuery() { + String json = "{ \"$regex\" : { \"$regularExpression\" : { \"pattern\" : \"pattern\", \"options\" : \"imxs\" }}}"; + testStringAndStream(json, bsonReader -> { + bsonReader.readStartDocument(); + BsonRegularExpression regex = bsonReader.readRegularExpression("$regex"); + assertEquals("pattern", regex.getPattern()); + assertEquals("imsx", regex.getOptions()); + bsonReader.readEndDocument(); + assertEquals(AbstractBsonReader.State.DONE, bsonReader.getState()); + return null; + }); + } + + @Test + public void testRegularExpressionQueryShell() { + String json = "{ \"$regex\" : /pattern/imxs}"; + testStringAndStream(json, bsonReader -> { + bsonReader.readStartDocument(); + BsonRegularExpression regex = bsonReader.readRegularExpression("$regex"); + assertEquals("pattern", regex.getPattern()); + assertEquals("imsx", regex.getOptions()); + bsonReader.readEndDocument(); + assertEquals(AbstractBsonReader.State.DONE, bsonReader.getState()); + return null; + }); + } + + /** + * Test a $regularExpression extended json with unquoted keys + */ + @Test + public void testRegularExpressionCanonicalWithUnquotedKeys() { + String json = "{$regularExpression: {pattern: \"[a-z]\", options: \"imxs\"}}"; + testStringAndStream(json, bsonReader -> { + assertEquals(BsonType.REGULAR_EXPRESSION, bsonReader.readBsonType()); + assertEquals(new BsonRegularExpression("[a-z]", "imxs"), bsonReader.readRegularExpression()); + assertEquals(AbstractBsonReader.State.DONE, bsonReader.getState()); + return null; + }); + } + + /** + * Test a $regex extended json query version with unquoted keys + */ + @Test + public void testRegularExpressionQueryWithUnquotedKeys() { + String json = "{$regex : { $regularExpression : { pattern : \"[a-z]\", options : \"imxs\" }}}"; + testStringAndStream(json, bsonReader -> { + bsonReader.readStartDocument(); + BsonRegularExpression regex = bsonReader.readRegularExpression("$regex"); + assertEquals("[a-z]", regex.getPattern()); + assertEquals("imsx", regex.getOptions()); + bsonReader.readEndDocument(); + assertEquals(AbstractBsonReader.State.DONE, bsonReader.getState()); + return null; + }); + } + + @Test + public void testString() { + final String str = "abc"; + final String json = '"' + str + '"'; + testStringAndStream(json, bsonReader -> { + assertEquals(BsonType.STRING, bsonReader.readBsonType()); + assertEquals(str, bsonReader.readString()); + assertEquals(AbstractBsonReader.State.DONE, bsonReader.getState()); + return null; + }); + + final String str2 = "\ud806\udc5c"; + final String json2 = '"' + str2 + '"'; + testStringAndStream(json2, bsonReader -> { + assertEquals(BsonType.STRING, bsonReader.readBsonType()); + assertEquals(str2, bsonReader.readString()); + assertEquals(AbstractBsonReader.State.DONE, bsonReader.getState()); + return null; + }); + + final String str3 = "\\ud806\\udc5c"; + final String json3 = '"' + str3 + '"'; + testStringAndStream(json3, bsonReader -> { + assertEquals(BsonType.STRING, bsonReader.readBsonType()); + assertEquals("\ud806\udc5c", bsonReader.readString()); + assertEquals(AbstractBsonReader.State.DONE, bsonReader.getState()); + return null; + }); + + final String str4 = "꼢𑡜ᳫ鉠鮻罖᧭䆔瘉"; + final String json4 = '"' + str4 + '"'; + testStringAndStream(json4, bsonReader -> { + assertEquals(BsonType.STRING, bsonReader.readBsonType()); + assertEquals(str4, bsonReader.readString()); + assertEquals(AbstractBsonReader.State.DONE, bsonReader.getState()); + return null; + }); + } + + @Test + public void testStringEmpty() { + String json = "\"\""; + testStringAndStream(json, bsonReader -> { + assertEquals(BsonType.STRING, bsonReader.readBsonType()); + assertEquals("", bsonReader.readString()); + assertEquals(AbstractBsonReader.State.DONE, bsonReader.getState()); + return null; + }); + } + + @Test + public void testSymbol() { + String json = "{ \"$symbol\" : \"symbol\" }"; + testStringAndStream(json, bsonReader -> { + assertEquals(BsonType.SYMBOL, bsonReader.readBsonType()); + assertEquals("symbol", bsonReader.readSymbol()); + assertEquals(AbstractBsonReader.State.DONE, bsonReader.getState()); + return null; + }); + } + + @Test + public void testTimestampStrict() { + String json = "{ \"$timestamp\" : { \"t\" : 1234, \"i\" : 1 } }"; + testStringAndStream(json, bsonReader -> { + assertEquals(BsonType.TIMESTAMP, bsonReader.readBsonType()); + assertEquals(new BsonTimestamp(1234, 1), bsonReader.readTimestamp()); + assertEquals(AbstractBsonReader.State.DONE, bsonReader.getState()); + return null; + }); + } + + @Test + public void testTimestampStrictWithOutOfOrderFields() { + String json = "{ \"$timestamp\" : { \"i\" : 1, \"t\" : 1234 } }"; + testStringAndStream(json, bsonReader -> { + assertEquals(BsonType.TIMESTAMP, bsonReader.readBsonType()); + assertEquals(new BsonTimestamp(1234, 1), bsonReader.readTimestamp()); + assertEquals(AbstractBsonReader.State.DONE, bsonReader.getState()); + return null; + }); + } + + @Test + public void testTimestampShell() { + String json = "Timestamp(1234, 1)"; + testStringAndStream(json, bsonReader -> { + assertEquals(BsonType.TIMESTAMP, bsonReader.readBsonType()); + assertEquals(new BsonTimestamp(1234, 1), bsonReader.readTimestamp()); + assertEquals(AbstractBsonReader.State.DONE, bsonReader.getState()); + return null; + }); + } + + /** + * Test a $timestamp extended json with unquoted keys + */ + @Test + public void testTimestampStrictWithUnquotedKeys() { + String json = "{$timestamp : { t : 1234, i : 1 }}"; + testStringAndStream(json, bsonReader -> { + assertEquals(BsonType.TIMESTAMP, bsonReader.readBsonType()); + assertEquals(new BsonTimestamp(1234, 1), bsonReader.readTimestamp()); + assertEquals(AbstractBsonReader.State.DONE, bsonReader.getState()); + return null; + }); + } + + @Test + public void testUndefined() { + String json = "undefined"; + testStringAndStream(json, bsonReader -> { + assertEquals(BsonType.UNDEFINED, bsonReader.readBsonType()); + bsonReader.readUndefined(); + assertEquals(AbstractBsonReader.State.DONE, bsonReader.getState()); + return null; + }); + } + + @Test + public void testUndefinedExtended() { + String json = "{ \"$undefined\" : true }"; + testStringAndStream(json, bsonReader -> { + assertEquals(BsonType.UNDEFINED, bsonReader.readBsonType()); + bsonReader.readUndefined(); + assertEquals(AbstractBsonReader.State.DONE, bsonReader.getState()); + return null; + }); + } + + @Test + public void testClosedState() { + AbstractBsonReader bsonReader = new JsonReader(""); + bsonReader.close(); + assertThrows(IllegalStateException.class, () -> bsonReader.readBinaryData()); + } + + @Test + public void testEndOfFile0() { + String json = "{"; + testStringAndStream(json, bsonReader -> { + assertEquals(BsonType.DOCUMENT, bsonReader.readBsonType()); + bsonReader.readStartDocument(); + bsonReader.readBsonType(); + return null; + }, JsonParseException.class); + } + + @Test + public void testEndOfFile1() { + String json = "{ test : "; + testStringAndStream(json, bsonReader -> { + assertEquals(BsonType.DOCUMENT, bsonReader.readBsonType()); + bsonReader.readStartDocument(); + bsonReader.readBsonType(); + return null; + }, JsonParseException.class); + } + + @Test + public void testLegacyBinary() { + String json = "{ \"$binary\" : \"AQID\", \"$type\" : \"0\" }"; + testStringAndStream(json, bsonReader -> { + assertEquals(BsonType.BINARY, bsonReader.readBsonType()); + BsonBinary binary = bsonReader.readBinaryData(); + assertEquals(BsonBinarySubType.BINARY.getValue(), binary.getType()); + assertArrayEquals(new byte[]{1, 2, 3}, binary.getData()); + assertEquals(AbstractBsonReader.State.DONE, bsonReader.getState()); + return null; + }); + } + + @Test + public void testLegacyBinaryWithNumericType() { + String json = "{ \"$binary\" : \"AQID\", \"$type\" : 0 }"; + testStringAndStream(json, bsonReader -> { + assertEquals(BsonType.BINARY, bsonReader.readBsonType()); + BsonBinary binary = bsonReader.readBinaryData(); + assertEquals(BsonBinarySubType.BINARY.getValue(), binary.getType()); + assertArrayEquals(new byte[]{1, 2, 3}, binary.getData()); + assertEquals(AbstractBsonReader.State.DONE, bsonReader.getState()); + return null; + }); + } + + @Test + public void testLegacyUserDefinedBinary() { + String json = "{ \"$binary\" : \"AQID\", \"$type\" : \"80\" }"; + testStringAndStream(json, bsonReader -> { + assertEquals(BsonType.BINARY, bsonReader.readBsonType()); + BsonBinary binary = bsonReader.readBinaryData(); + assertEquals(BsonBinarySubType.USER_DEFINED.getValue(), binary.getType()); + assertArrayEquals(new byte[]{1, 2, 3}, binary.getData()); + assertEquals(AbstractBsonReader.State.DONE, bsonReader.getState()); + return null; + }); + } + + @Test + public void testLegacyUserDefinedBinaryWithKeyOrderReversed() { + String json = "{ \"$type\" : \"80\", \"$binary\" : \"AQID\" }"; + testStringAndStream(json, bsonReader -> { + assertEquals(BsonType.BINARY, bsonReader.readBsonType()); + BsonBinary binary = bsonReader.readBinaryData(); + assertEquals(BsonBinarySubType.USER_DEFINED.getValue(), binary.getType()); + assertArrayEquals(new byte[]{1, 2, 3}, binary.getData()); + assertEquals(AbstractBsonReader.State.DONE, bsonReader.getState()); + return null; + }); + } + + @Test + public void testLegacyUserDefinedBinaryWithNumericType() { + String json = "{ \"$binary\" : \"AQID\", \"$type\" : 128 }"; + testStringAndStream(json, bsonReader -> { + assertEquals(BsonType.BINARY, bsonReader.readBsonType()); + BsonBinary binary = bsonReader.readBinaryData(); + assertEquals(BsonBinarySubType.USER_DEFINED.getValue(), binary.getType()); + assertArrayEquals(new byte[]{1, 2, 3}, binary.getData()); + assertEquals(AbstractBsonReader.State.DONE, bsonReader.getState()); + return null; + }); + } + + @Test + public void testCanonicalExtendedJsonBinary() { + String json = "{ \"$binary\" : { \"base64\" : \"AQID\", \"subType\" : \"80\" } }"; + testStringAndStream(json, bsonReader -> { + assertEquals(BsonType.BINARY, bsonReader.readBsonType()); + BsonBinary binary = bsonReader.readBinaryData(); + assertEquals(BsonBinarySubType.USER_DEFINED.getValue(), binary.getType()); + assertArrayEquals(new byte[]{1, 2, 3}, binary.getData()); + assertEquals(AbstractBsonReader.State.DONE, bsonReader.getState()); + return null; + }); + } + + @Test + public void testCanonicalExtendedJsonBinaryWithKeysReversed() { + String json = "{ \"$binary\" : { \"subType\" : \"80\", \"base64\" : \"AQID\" } }"; + testStringAndStream(json, bsonReader -> { + assertEquals(BsonType.BINARY, bsonReader.readBsonType()); + BsonBinary binary = bsonReader.readBinaryData(); + assertEquals(BsonBinarySubType.USER_DEFINED.getValue(), binary.getType()); + assertArrayEquals(new byte[]{1, 2, 3}, binary.getData()); + assertEquals(AbstractBsonReader.State.DONE, bsonReader.getState()); + return null; + }); + } + + @Test + public void testCanonicalExtendedJsonBinaryWithIncorrectFirstKey() { + String json = "{ \"$binary\" : { \"badKey\" : \"80\", \"base64\" : \"AQID\" } }"; + testStringAndStream(json, bsonReader -> { + assertEquals(BsonType.BINARY, bsonReader.readBsonType()); + return null; + }, JsonParseException.class); + } + + @Test + public void testUuid() { + String json = "{ \"$uuid\" : \"b5f21e0c-2a0d-42d6-ad03-d827008d8ab6\"}}"; + testStringAndStream(json, bsonReader -> { + assertEquals(BsonType.BINARY, bsonReader.readBsonType()); + BsonBinary binary = bsonReader.readBinaryData(); + assertEquals(BsonBinarySubType.UUID_STANDARD.getValue(), binary.getType()); + assertArrayEquals(new byte[]{-75, -14, 30, 12, 42, 13, 66, -42, -83, 3, -40, 39, 0, -115, -118, -74}, binary.getData()); + assertEquals(AbstractBsonReader.State.DONE, bsonReader.getState()); + return null; + }); + } + + // testing that JsonReader uses internal UuidStringValidator, as UUID.fromString accepts this UUID + @Test + public void testInvalidUuid() { + // first hyphen out of place + String json = "{ \"$uuid\" : \"73ff-d26444b-34c6-990e8e-7d1dfc035d4\"}}"; + assertThrows(JsonParseException.class, () -> + testStringAndStream(json, bsonReader -> { + bsonReader.readBinaryData(); + return null; + })); + } + + @Test + public void testUuidConstructor() { + String json = "UUID(\"b5f21e0c-2a0d-42d6-ad03-d827008d8ab6\")"; + testStringAndStream(json, bsonReader -> { + assertEquals(BsonType.BINARY, bsonReader.readBsonType()); + BsonBinary binary = bsonReader.readBinaryData(); + assertEquals(BsonBinarySubType.UUID_STANDARD.getValue(), binary.getType()); + assertArrayEquals(new byte[]{-75, -14, 30, 12, 42, 13, 66, -42, -83, 3, -40, 39, 0, -115, -118, -74}, binary.getData()); + assertEquals(AbstractBsonReader.State.DONE, bsonReader.getState()); + return null; + }); + } + + @Test + public void testInfinity() { + String json = "{ \"value\" : Infinity }"; + testStringAndStream(json, bsonReader -> { + bsonReader.readStartDocument(); + assertEquals(BsonType.DOUBLE, bsonReader.readBsonType()); + bsonReader.readName(); + assertEquals(Double.POSITIVE_INFINITY, bsonReader.readDouble(), 0.0001); + return null; + }); + } + + @Test + public void testNaN() { + String json = "{ \"value\" : NaN }"; + testStringAndStream(json, bsonReader -> { + bsonReader.readStartDocument(); + assertEquals(BsonType.DOUBLE, bsonReader.readBsonType()); + bsonReader.readName(); + assertEquals(Double.NaN, bsonReader.readDouble(), 0.0001); + return null; + }); + } + + @Test + public void testBinData() { + String json = "{ \"a\" : BinData(3, AQID) }"; + testStringAndStream(json, bsonReader -> { + bsonReader.readStartDocument(); + assertEquals(BsonType.BINARY, bsonReader.readBsonType()); + BsonBinary binary = bsonReader.readBinaryData(); + assertEquals(3, binary.getType()); + assertArrayEquals(new byte[]{1, 2, 3}, binary.getData()); + bsonReader.readEndDocument(); + assertEquals(AbstractBsonReader.State.DONE, bsonReader.getState()); + return null; + }); + } + + @Test + public void testBinDataUserDefined() { + String json = "{ \"a\" : BinData(128, AQID) }"; + testStringAndStream(json, bsonReader -> { + bsonReader.readStartDocument(); + assertEquals(BsonType.BINARY, bsonReader.readBsonType()); + BsonBinary binary = bsonReader.readBinaryData(); + assertEquals(BsonBinarySubType.USER_DEFINED.getValue(), binary.getType()); + assertArrayEquals(new byte[]{1, 2, 3}, binary.getData()); + bsonReader.readEndDocument(); + assertEquals(AbstractBsonReader.State.DONE, bsonReader.getState()); + return null; + }); + } + + @Test + public void testBinDataWithNew() { + String json = "{ \"a\" : new BinData(3, AQID) }"; + testStringAndStream(json, bsonReader -> { + bsonReader.readStartDocument(); + assertEquals(BsonType.BINARY, bsonReader.readBsonType()); + BsonBinary binary = bsonReader.readBinaryData(); + assertEquals(3, binary.getType()); + assertArrayEquals(new byte[]{1, 2, 3}, binary.getData()); + bsonReader.readEndDocument(); + assertEquals(AbstractBsonReader.State.DONE, bsonReader.getState()); + return null; + }); + } + + @Test + public void testBinDataQuoted() { + String json = "{ \"a\" : BinData(3, \"AQIDBA==\") }"; + testStringAndStream(json, bsonReader -> { + bsonReader.readStartDocument(); + assertEquals(BsonType.BINARY, bsonReader.readBsonType()); + BsonBinary binary = bsonReader.readBinaryData(); + assertEquals(3, binary.getType()); + assertArrayEquals(new byte[]{1, 2, 3, 4}, binary.getData()); + bsonReader.readEndDocument(); + assertEquals(AbstractBsonReader.State.DONE, bsonReader.getState()); + return null; + }); + } + + @Test + public void testDateWithNumbers() { + String json = "new Date(1988, 06, 13 , 22 , 1)"; + testStringAndStream(json, bsonReader -> { + assertEquals(BsonType.DATE_TIME, bsonReader.readBsonType()); + assertEquals(584834460000L, bsonReader.readDateTime()); + assertEquals(AbstractBsonReader.State.DONE, bsonReader.getState()); + return null; + }); + } + + @Test + public void testDateTimeConstructorWithNew() { + String json = "new Date(\"Sat Jul 13 2013 11:10:05 UTC\")"; + testStringAndStream(json, bsonReader -> { + assertEquals(BsonType.DATE_TIME, bsonReader.readBsonType()); + assertEquals(1373713805000L, bsonReader.readDateTime()); + assertEquals(AbstractBsonReader.State.DONE, bsonReader.getState()); + return null; + }); + } + + @Test + public void testEmptyDateTimeConstructorWithNew() { + long currentTime = new Date().getTime(); + String json = "new Date()"; + testStringAndStream(json, bsonReader -> { + assertEquals(BsonType.DATE_TIME, bsonReader.readBsonType()); + assertTrue(bsonReader.readDateTime() >= currentTime); + assertEquals(AbstractBsonReader.State.DONE, bsonReader.getState()); + return null; + }); + } + + @Test + public void testDateTimeWithOutNew() { + long currentTime = currentTimeWithoutMillis(); + String json = "Date()"; + testStringAndStream(json, bsonReader -> { + assertEquals(BsonType.STRING, bsonReader.readBsonType()); + assertTrue(dateStringToTime(bsonReader.readString()) >= currentTime); + assertEquals(AbstractBsonReader.State.DONE, bsonReader.getState()); + return null; + }); + } + + @Test + public void testDateTimeWithOutNewContainingJunk() { + long currentTime = currentTimeWithoutMillis(); + String json = "Date({ok: 1}, 1234)"; + testStringAndStream(json, bsonReader -> { + assertEquals(BsonType.STRING, bsonReader.readBsonType()); + assertTrue(dateStringToTime(bsonReader.readString()) >= currentTime); + assertEquals(AbstractBsonReader.State.DONE, bsonReader.getState()); + return null; + }); + } + + @Test + public void testEmptyISODateTimeConstructorWithNew() { + long currentTime = new Date().getTime(); + String json = "new ISODate()"; + testStringAndStream(json, bsonReader -> { + assertEquals(BsonType.DATE_TIME, bsonReader.readBsonType()); + assertTrue(bsonReader.readDateTime() >= currentTime); + assertEquals(AbstractBsonReader.State.DONE, bsonReader.getState()); + return null; + }); + } + + @Test + public void testEmptyISODateTimeConstructor() { + long currentTime = new Date().getTime(); + String json = "ISODate()"; + testStringAndStream(json, bsonReader -> { + assertEquals(BsonType.DATE_TIME, bsonReader.readBsonType()); + assertTrue(bsonReader.readDateTime() >= currentTime); + assertEquals(AbstractBsonReader.State.DONE, bsonReader.getState()); + return null; + }); + } + + @Test + public void testRegExp() { + String json = "RegExp(\"abc\",\"im\")"; + testStringAndStream(json, bsonReader -> { + assertEquals(BsonType.REGULAR_EXPRESSION, bsonReader.readBsonType()); + BsonRegularExpression regularExpression = bsonReader.readRegularExpression(); + assertEquals("abc", regularExpression.getPattern()); + assertEquals("im", regularExpression.getOptions()); + return null; + }); + } + + @Test + public void testRegExpWithNew() { + String json = "new RegExp(\"abc\",\"im\")"; + testStringAndStream(json, bsonReader -> { + assertEquals(BsonType.REGULAR_EXPRESSION, bsonReader.readBsonType()); + BsonRegularExpression regularExpression = bsonReader.readRegularExpression(); + assertEquals("abc", regularExpression.getPattern()); + assertEquals("im", regularExpression.getOptions()); + return null; + }); + } + + @Test + public void testSkip() { + String json = "{ \"a\" : 2 }"; + testStringAndStream(json, bsonReader -> { + bsonReader.readStartDocument(); + bsonReader.readBsonType(); + bsonReader.skipName(); + bsonReader.skipValue(); + assertEquals(BsonType.END_OF_DOCUMENT, bsonReader.readBsonType()); + bsonReader.readEndDocument(); + assertEquals(AbstractBsonReader.State.DONE, bsonReader.getState()); + return null; + }); + } + + @Test + public void testDBPointer() { + String json = "DBPointer(\"b\",\"5209296cd6c4e38cf96fffdc\")"; + testStringAndStream(json, bsonReader -> { + assertEquals(BsonType.DB_POINTER, bsonReader.readBsonType()); + BsonDbPointer dbPointer = bsonReader.readDBPointer(); + assertEquals("b", dbPointer.getNamespace()); + assertEquals(new ObjectId("5209296cd6c4e38cf96fffdc"), dbPointer.getId()); + return null; + }); + } + + @Test + public void testDBPointerWithNew() { + String json = "new DBPointer(\"b\",\"5209296cd6c4e38cf96fffdc\")"; + testStringAndStream(json, bsonReader -> { + assertEquals(BsonType.DB_POINTER, bsonReader.readBsonType()); + BsonDbPointer dbPointer = bsonReader.readDBPointer(); + assertEquals("b", dbPointer.getNamespace()); + assertEquals(new ObjectId("5209296cd6c4e38cf96fffdc"), dbPointer.getId()); + return null; + }); + } + + @Test + public void testMultipleMarks() { + String json = "{a : { b : 1 }}"; + testStringAndStream(json, bsonReader -> { + bsonReader.readStartDocument(); + BsonReaderMark markOne = bsonReader.getMark(); + bsonReader.readName("a"); + bsonReader.readStartDocument(); + BsonReaderMark markTwo = bsonReader.getMark(); + bsonReader.readName("b"); + bsonReader.readInt32(); + bsonReader.readEndDocument(); + markTwo.reset(); + bsonReader.readName("b"); + markOne.reset(); + bsonReader.readName("a"); + return null; + }); + } + + @Test + public void testTwoDocuments() { + Reader reader = new BufferedReader(new InputStreamReader(new ByteArrayInputStream("{a : 1}{b : 1}".getBytes()))); + + JsonReader jsonReader = new JsonReader(reader); + jsonReader.readStartDocument(); + jsonReader.readName("a"); + jsonReader.readInt32(); + jsonReader.readEndDocument(); + + jsonReader = new JsonReader(reader); + jsonReader.readStartDocument(); + jsonReader.readName("b"); + jsonReader.readInt32(); + jsonReader.readEndDocument(); + } + + private void testStringAndStream(final String json, final Function testFunc, + final Class exClass) { + try { + testFunc.apply(new JsonReader(json)); + } catch (Exception e) { + if (exClass == null) { + throw e; + } + assertEquals(exClass, e.getClass()); + } + try { + testFunc.apply(new JsonReader(new InputStreamReader(new ByteArrayInputStream(json.getBytes(StandardCharsets.UTF_8)), + StandardCharsets.UTF_8))); + } catch (Exception e) { + if (exClass == null) { + throw e; + } + assertEquals(exClass, e.getClass()); + } + } + + private void testStringAndStream(final String json, final Function testFunc) { + testStringAndStream(json, testFunc, null); + } + + private long dateStringToTime(final String date) { + SimpleDateFormat df = new SimpleDateFormat("EEE MMM dd yyyy HH:mm:ss z", Locale.ENGLISH); + return df.parse(date, new ParsePosition(0)).getTime(); + } + + private long currentTimeWithoutMillis() { + long currentTime = new Date().getTime(); + return currentTime - (currentTime % 1000); + } + +} diff --git a/bson/src/test/unit/org/bson/json/JsonScannerTest.java b/bson/src/test/unit/org/bson/json/JsonScannerTest.java new file mode 100644 index 00000000000..cf0647b08de --- /dev/null +++ b/bson/src/test/unit/org/bson/json/JsonScannerTest.java @@ -0,0 +1,538 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.json; + +import org.bson.BsonRegularExpression; +import org.junit.jupiter.api.Test; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; + +public class JsonScannerTest { + + @Test + public void testEndOfFile() { + String json = "\t "; + JsonBuffer buffer = new JsonStringBuffer(json); + JsonScanner scanner = new JsonScanner(buffer); + JsonToken token = scanner.nextToken(); + assertEquals(JsonTokenType.END_OF_FILE, token.getType()); + assertEquals("", token.getValue()); + } + + @Test + public void testBeginObject() { + String json = "\t {x"; + JsonBuffer buffer = new JsonStringBuffer(json); + JsonScanner scanner = new JsonScanner(buffer); + JsonToken token = scanner.nextToken(); + assertEquals(JsonTokenType.BEGIN_OBJECT, token.getType()); + assertEquals("{", token.getValue()); + assertEquals('x', buffer.read()); + } + + @Test + public void testEndObject() { + String json = "\t }x"; + JsonBuffer buffer = new JsonStringBuffer(json); + JsonScanner scanner = new JsonScanner(buffer); + JsonToken token = scanner.nextToken(); + assertEquals(JsonTokenType.END_OBJECT, token.getType()); + assertEquals("}", token.getValue()); + assertEquals('x', buffer.read()); + } + + @Test + public void testBeginArray() { + String json = "\t [x"; + JsonBuffer buffer = new JsonStringBuffer(json); + JsonScanner scanner = new JsonScanner(buffer); + JsonToken token = scanner.nextToken(); + assertEquals(JsonTokenType.BEGIN_ARRAY, token.getType()); + assertEquals("[", token.getValue()); + assertEquals('x', buffer.read()); + } + + @Test + public void testEndArray() { + String json = "\t ]x"; + JsonBuffer buffer = new JsonStringBuffer(json); + JsonScanner scanner = new JsonScanner(buffer); + JsonToken token = scanner.nextToken(); + assertEquals(JsonTokenType.END_ARRAY, token.getType()); + assertEquals("]", token.getValue()); + assertEquals('x', buffer.read()); + } + + @Test + public void testParentheses() { + String json = "\t (jj)x"; + JsonBuffer buffer = new JsonStringBuffer(json); + JsonScanner scanner = new JsonScanner(buffer); + JsonToken token = scanner.nextToken(); + assertEquals(JsonTokenType.LEFT_PAREN, token.getType()); + assertEquals("(", token.getValue()); + token = scanner.nextToken(); + token = scanner.nextToken(); + assertEquals(JsonTokenType.RIGHT_PAREN, token.getType()); + assertEquals('x', buffer.read()); + } + + + @Test + public void testNameSeparator() { + String json = "\t :x"; + JsonBuffer buffer = new JsonStringBuffer(json); + JsonScanner scanner = new JsonScanner(buffer); + JsonToken token = scanner.nextToken(); + assertEquals(JsonTokenType.COLON, token.getType()); + assertEquals(":", token.getValue()); + assertEquals('x', buffer.read()); + } + + @Test + public void testValueSeparator() { + String json = "\t ,x"; + JsonBuffer buffer = new JsonStringBuffer(json); + JsonScanner scanner = new JsonScanner(buffer); + JsonToken token = scanner.nextToken(); + assertEquals(JsonTokenType.COMMA, token.getType()); + assertEquals(",", token.getValue()); + assertEquals('x', buffer.read()); + } + + @Test + public void testEmptyString() { + String json = "\t \"\"x"; + JsonBuffer buffer = new JsonStringBuffer(json); + JsonScanner scanner = new JsonScanner(buffer); + JsonToken token = scanner.nextToken(); + assertEquals(JsonTokenType.STRING, token.getType()); + assertEquals("", token.getValue()); + assertEquals('x', buffer.read()); + } + + @Test + public void test1CharacterString() { + String json = "\t \"1\"x"; + JsonBuffer buffer = new JsonStringBuffer(json); + JsonScanner scanner = new JsonScanner(buffer); + JsonToken token = scanner.nextToken(); + assertEquals(JsonTokenType.STRING, token.getType()); + assertEquals("1", token.getValue()); + assertEquals('x', buffer.read()); + } + + @Test + public void test2CharacterString() { + String json = "\t \"12\"x"; + JsonBuffer buffer = new JsonStringBuffer(json); + JsonScanner scanner = new JsonScanner(buffer); + JsonToken token = scanner.nextToken(); + assertEquals(JsonTokenType.STRING, token.getType()); + assertEquals("12", token.getValue()); + assertEquals('x', buffer.read()); + } + + @Test + public void test3CharacterString() { + String json = "\t \"123\"x"; + JsonBuffer buffer = new JsonStringBuffer(json); + JsonScanner scanner = new JsonScanner(buffer); + JsonToken token = scanner.nextToken(); + assertEquals(JsonTokenType.STRING, token.getType()); + assertEquals("123", token.getValue()); + assertEquals('x', buffer.read()); + } + + @Test + public void testEscapeSequences() { + String json = "\t \"x\\\"\\\\\\/\\b\\f\\n\\r\\t\\u0030y\"x"; + JsonBuffer buffer = new JsonStringBuffer(json); + JsonScanner scanner = new JsonScanner(buffer); + JsonToken token = scanner.nextToken(); + assertEquals(JsonTokenType.STRING, token.getType()); + assertEquals("x\"\\/\b\f\n\r\t0y", token.getValue()); + assertEquals('x', buffer.read()); + } + + + @Test + public void testTrue() { + String json = "\t true,"; + JsonBuffer buffer = new JsonStringBuffer(json); + JsonScanner scanner = new JsonScanner(buffer); + JsonToken token = scanner.nextToken(); + assertEquals(JsonTokenType.UNQUOTED_STRING, token.getType()); + assertEquals("true", token.getValue()); + assertEquals(',', buffer.read()); + } + + @Test + public void testMinusInfinity() { + String json = "\t -Infinity]"; + JsonBuffer buffer = new JsonStringBuffer(json); + JsonScanner scanner = new JsonScanner(buffer); + JsonToken token = scanner.nextToken(); + assertEquals(JsonTokenType.DOUBLE, token.getType()); + assertEquals(Double.NEGATIVE_INFINITY, token.getValue()); + assertEquals(']', buffer.read()); + } + + @Test + public void testFalse() { + String json = "\t false,"; + JsonBuffer buffer = new JsonStringBuffer(json); + JsonScanner scanner = new JsonScanner(buffer); + JsonToken token = scanner.nextToken(); + assertEquals(JsonTokenType.UNQUOTED_STRING, token.getType()); + assertEquals("false", token.getValue()); + assertEquals(',', buffer.read()); + } + + @Test + public void testNull() { + String json = "\t null,"; + JsonBuffer buffer = new JsonStringBuffer(json); + JsonScanner scanner = new JsonScanner(buffer); + JsonToken token = scanner.nextToken(); + assertEquals(JsonTokenType.UNQUOTED_STRING, token.getType()); + assertEquals("null", token.getValue()); + assertEquals(',', buffer.read()); + } + + @Test + public void testUndefined() { + String json = "\t undefined,"; + JsonBuffer buffer = new JsonStringBuffer(json); + JsonScanner scanner = new JsonScanner(buffer); + JsonToken token = scanner.nextToken(); + assertEquals(JsonTokenType.UNQUOTED_STRING, token.getType()); + assertEquals("undefined", token.getValue()); + assertEquals(',', buffer.read()); + } + + @Test + public void testUnquotedStringWithSeparator() { + String json = "\t name123:1"; + JsonBuffer buffer = new JsonStringBuffer(json); + JsonScanner scanner = new JsonScanner(buffer); + JsonToken token = scanner.nextToken(); + assertEquals(JsonTokenType.UNQUOTED_STRING, token.getType()); + assertEquals("name123", token.getValue()); + assertEquals(':', buffer.read()); + } + + @Test + public void testUnquotedString() { + String json = "name123"; + JsonBuffer buffer = new JsonStringBuffer(json); + JsonScanner scanner = new JsonScanner(buffer); + JsonToken token = scanner.nextToken(); + assertEquals(JsonTokenType.UNQUOTED_STRING, token.getType()); + assertEquals("name123", token.getValue()); + assertEquals(-1, buffer.read()); + } + + @Test + public void testZero() { + String json = "\t 0,"; + JsonBuffer buffer = new JsonStringBuffer(json); + JsonScanner scanner = new JsonScanner(buffer); + JsonToken token = scanner.nextToken(); + assertEquals(JsonTokenType.INT32, token.getType()); + assertEquals(0, token.getValue()); + assertEquals(',', buffer.read()); + } + + @Test + public void testMinusZero() { + String json = "\t -0,"; + JsonBuffer buffer = new JsonStringBuffer(json); + JsonScanner scanner = new JsonScanner(buffer); + JsonToken token = scanner.nextToken(); + assertEquals(JsonTokenType.INT32, token.getType()); + assertEquals(-0, token.getValue()); + assertEquals(',', buffer.read()); + } + + @Test + public void testOne() { + String json = "\t 1,"; + JsonBuffer buffer = new JsonStringBuffer(json); + JsonScanner scanner = new JsonScanner(buffer); + JsonToken token = scanner.nextToken(); + assertEquals(JsonTokenType.INT32, token.getType()); + assertEquals(1, token.getValue()); + assertEquals(',', buffer.read()); + } + + @Test + public void testMinusOne() { + String json = "\t -1,"; + JsonBuffer buffer = new JsonStringBuffer(json); + JsonScanner scanner = new JsonScanner(buffer); + JsonToken token = scanner.nextToken(); + assertEquals(JsonTokenType.INT32, token.getType()); + assertEquals(-1, token.getValue()); + assertEquals(',', buffer.read()); + } + + @Test + public void testTwelve() { + String json = "\t 12,"; + JsonBuffer buffer = new JsonStringBuffer(json); + JsonScanner scanner = new JsonScanner(buffer); + JsonToken token = scanner.nextToken(); + assertEquals(JsonTokenType.INT32, token.getType()); + assertEquals(12, token.getValue()); + assertEquals(',', buffer.read()); + } + + @Test + public void testMinusTwelve() { + String json = "\t -12,"; + JsonBuffer buffer = new JsonStringBuffer(json); + JsonScanner scanner = new JsonScanner(buffer); + JsonToken token = scanner.nextToken(); + assertEquals(JsonTokenType.INT32, token.getType()); + assertEquals(-12, token.getValue()); + assertEquals(',', buffer.read()); + } + + @Test + public void testZeroPointZero() { + String json = "\t 0.0,"; + JsonBuffer buffer = new JsonStringBuffer(json); + JsonScanner scanner = new JsonScanner(buffer); + JsonToken token = scanner.nextToken(); + assertEquals(JsonTokenType.DOUBLE, token.getType()); + assertEquals(0.0, token.getValue()); + assertEquals(',', buffer.read()); + } + + @Test + public void testMinusZeroPointZero() { + String json = "\t -0.0,"; + JsonBuffer buffer = new JsonStringBuffer(json); + JsonScanner scanner = new JsonScanner(buffer); + JsonToken token = scanner.nextToken(); + assertEquals(JsonTokenType.DOUBLE, token.getType()); + assertEquals(-0.0, token.getValue()); + assertEquals(',', buffer.read()); + } + + @Test + public void testZeroExponentOne() { + String json = "\t 0e1,"; + JsonBuffer buffer = new JsonStringBuffer(json); + JsonScanner scanner = new JsonScanner(buffer); + JsonToken token = scanner.nextToken(); + assertEquals(JsonTokenType.DOUBLE, token.getType()); + assertEquals(0e1, token.getValue()); + assertEquals(',', buffer.read()); + } + + @Test + public void testMinusZeroExponentOne() { + String json = "\t -0e1,"; + JsonBuffer buffer = new JsonStringBuffer(json); + JsonScanner scanner = new JsonScanner(buffer); + JsonToken token = scanner.nextToken(); + assertEquals(JsonTokenType.DOUBLE, token.getType()); + assertEquals(-0e1, token.getValue()); + assertEquals(',', buffer.read()); + } + + @Test + public void testZeroExponentMinusOne() { + String json = "\t 0e-1,"; + JsonBuffer buffer = new JsonStringBuffer(json); + JsonScanner scanner = new JsonScanner(buffer); + JsonToken token = scanner.nextToken(); + assertEquals(JsonTokenType.DOUBLE, token.getType()); + assertEquals(0e-1, token.getValue()); + assertEquals(',', buffer.read()); + } + + @Test + public void testMinusZeroExponentMinusOne() { + String json = "\t -0e-1,"; + JsonBuffer buffer = new JsonStringBuffer(json); + JsonScanner scanner = new JsonScanner(buffer); + JsonToken token = scanner.nextToken(); + assertEquals(JsonTokenType.DOUBLE, token.getType()); + assertEquals(-0e-1, token.getValue()); + assertEquals(',', buffer.read()); + } + + @Test + public void testOnePointTwo() { + String json = "\t 1.2,"; + JsonBuffer buffer = new JsonStringBuffer(json); + JsonScanner scanner = new JsonScanner(buffer); + JsonToken token = scanner.nextToken(); + assertEquals(JsonTokenType.DOUBLE, token.getType()); + assertEquals(1.2, token.getValue()); + assertEquals(',', buffer.read()); + } + + @Test + public void testMinusOnePointTwo() { + String json = "\t -1.2,"; + JsonBuffer buffer = new JsonStringBuffer(json); + JsonScanner scanner = new JsonScanner(buffer); + JsonToken token = scanner.nextToken(); + assertEquals(JsonTokenType.DOUBLE, token.getType()); + assertEquals(-1.2, token.getValue()); + assertEquals(',', buffer.read()); + } + + @Test + public void testOneExponentTwelve() { + String json = "\t 1e12,"; + JsonBuffer buffer = new JsonStringBuffer(json); + JsonScanner scanner = new JsonScanner(buffer); + JsonToken token = scanner.nextToken(); + assertEquals(JsonTokenType.DOUBLE, token.getType()); + assertEquals(1e12, token.getValue()); + assertEquals(',', buffer.read()); + } + + @Test + public void testMinusZeroExponentTwelve() { + String json = "\t -1e12,"; + JsonBuffer buffer = new JsonStringBuffer(json); + JsonScanner scanner = new JsonScanner(buffer); + JsonToken token = scanner.nextToken(); + assertEquals(JsonTokenType.DOUBLE, token.getType()); + assertEquals(-1e12, token.getValue()); + assertEquals(',', buffer.read()); + } + + @Test + public void testOneExponentMinuesTwelve() { + String json = "\t 1e-12,"; + JsonBuffer buffer = new JsonStringBuffer(json); + JsonScanner scanner = new JsonScanner(buffer); + JsonToken token = scanner.nextToken(); + assertEquals(JsonTokenType.DOUBLE, token.getType()); + assertEquals(1e-12, token.getValue()); + assertEquals(',', buffer.read()); + } + + @Test + public void testMinusZeroExponentMinusTwelve() { + String json = "\t -1e-12,"; + JsonBuffer buffer = new JsonStringBuffer(json); + JsonScanner scanner = new JsonScanner(buffer); + JsonToken token = scanner.nextToken(); + assertEquals(JsonTokenType.DOUBLE, token.getType()); + assertEquals(-1e-12, token.getValue()); + assertEquals(',', buffer.read()); + } + + @Test + public void testRegularExpressionEmpty() { + String json = "\t //,"; + JsonBuffer buffer = new JsonStringBuffer(json); + JsonScanner scanner = new JsonScanner(buffer); + JsonToken token = scanner.nextToken(); + assertEquals(JsonTokenType.REGULAR_EXPRESSION, token.getType()); + + BsonRegularExpression regularExpression = token.getValue(BsonRegularExpression.class); + + assertEquals("", regularExpression.getPattern()); + assertEquals("", regularExpression.getOptions()); + assertEquals(',', buffer.read()); + } + + @Test + public void testRegularExpressionPattern() { + String json = "\t /pattern/,"; + + JsonBuffer buffer = new JsonStringBuffer(json); + JsonScanner scanner = new JsonScanner(buffer); + JsonToken token = scanner.nextToken(); + assertEquals(JsonTokenType.REGULAR_EXPRESSION, token.getType()); + assertEquals("pattern", token.getValue(BsonRegularExpression.class).getPattern()); + assertEquals(',', buffer.read()); + } + + @Test + public void testRegularExpressionPatternAndOptions() { + String json = "\t /pattern/im,"; + JsonBuffer buffer = new JsonStringBuffer(json); + JsonScanner scanner = new JsonScanner(buffer); + JsonToken token = scanner.nextToken(); + assertEquals(JsonTokenType.REGULAR_EXPRESSION, token.getType()); + + BsonRegularExpression regularExpression = token.getValue(BsonRegularExpression.class); + assertEquals("pattern", regularExpression.getPattern()); + assertEquals("im", regularExpression.getOptions()); + assertEquals(',', buffer.read()); + } + + @Test + public void testRegularExpressionPatternAndEscapeSequence() { + String json = "\t /patte\\.n/,"; + JsonBuffer buffer = new JsonStringBuffer(json); + JsonScanner scanner = new JsonScanner(buffer); + JsonToken token = scanner.nextToken(); + assertEquals(JsonTokenType.REGULAR_EXPRESSION, token.getType()); + assertEquals("patte\\.n", token.getValue(BsonRegularExpression.class).getPattern()); + assertEquals(',', buffer.read()); + } + + @Test + public void testInvalidRegularExpression() { + String json = "\t /pattern/nsk,"; + JsonBuffer buffer = new JsonStringBuffer(json); + JsonScanner scanner = new JsonScanner(buffer); + assertThrows(JsonParseException.class, () -> scanner.nextToken()); + } + + @Test + public void testInvalidRegularExpressionNoEnd() { + String json = "/b"; + JsonBuffer buffer = new JsonStringBuffer(json); + JsonScanner scanner = new JsonScanner(buffer); + assertThrows(JsonParseException.class, () ->scanner.nextToken()); + } + + @Test + public void testInvalidInput() { + String json = "\t &&"; + JsonScanner scanner = new JsonScanner(json); + assertThrows(JsonParseException.class, () -> scanner.nextToken()); + } + + @Test + public void testInvalidNumber() { + String json = "\t 123a]"; + JsonScanner scanner = new JsonScanner(json); + assertThrows(JsonParseException.class, () -> scanner.nextToken()); + } + + @Test + public void testInvalidInfinity() { + String json = "\t -Infinnity]"; + JsonScanner scanner = new JsonScanner(json); + assertThrows(JsonParseException.class, () -> scanner.nextToken()); + } +} + diff --git a/bson/src/test/unit/org/bson/json/JsonStreamBufferTest.java b/bson/src/test/unit/org/bson/json/JsonStreamBufferTest.java new file mode 100644 index 00000000000..c8bdfb42449 --- /dev/null +++ b/bson/src/test/unit/org/bson/json/JsonStreamBufferTest.java @@ -0,0 +1,107 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.json; + +import org.junit.jupiter.api.Test; + +import java.io.ByteArrayInputStream; +import java.io.InputStreamReader; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; + +public class JsonStreamBufferTest { + + @Test + public void testRead() { + JsonStreamBuffer buffer = new JsonStreamBuffer(new InputStreamReader(new ByteArrayInputStream("ABC".getBytes()))); + assertEquals('A', buffer.read()); + assertEquals('B', buffer.read()); + assertEquals('C', buffer.read()); + assertEquals(-1, buffer.read()); + } + + @Test + public void testUnRead() { + JsonStreamBuffer buffer = new JsonStreamBuffer(new InputStreamReader(new ByteArrayInputStream("A".getBytes()))); + buffer.unread(buffer.read()); + assertEquals('A', buffer.read()); + assertEquals(-1, buffer.read()); + } + + @Test + public void testPosition() { + JsonStreamBuffer buffer = new JsonStreamBuffer(new InputStreamReader(new ByteArrayInputStream("ABC".getBytes()))); + + buffer.read(); + buffer.read(); + assertEquals(2, buffer.getPosition()); + } + + @Test + public void testEOFCheck() { + JsonStreamBuffer buffer = new JsonStreamBuffer(new InputStreamReader(new ByteArrayInputStream("".getBytes()))); + + buffer.read(); + assertThrows(JsonParseException.class, () -> buffer.read()); + } + + @Test + public void testMarkAndReset() { + JsonStreamBuffer buffer = + new JsonStreamBuffer(new InputStreamReader(new ByteArrayInputStream("ABCDEFGHIJKLMNOPQRSTUPWXYZ".getBytes())), 4); + + int pos = buffer.mark(); + assertEquals(0, pos); + assertEquals('A', buffer.read()); + + buffer.reset(pos); + assertEquals('A', buffer.read()); + + pos = buffer.mark(); + buffer.discard(pos); + assertEquals('B', buffer.read()); + + pos = buffer.mark(); + assertEquals(2, pos); + + buffer.read(); + buffer.mark(); + + buffer.read(); + buffer.mark(); + + buffer.reset(pos + 1); + assertEquals(pos + 1, buffer.getPosition()); + assertEquals('D', buffer.read()); + + pos = buffer.mark(); + buffer.read(); + buffer.read(); + buffer.read(); + buffer.read(); + buffer.read(); + + buffer.reset(pos); + assertEquals('E', buffer.read()); + assertEquals('F', buffer.read()); + assertEquals('G', buffer.read()); + assertEquals('H', buffer.read()); + assertEquals('I', buffer.read()); + assertEquals('J', buffer.read()); + } +} diff --git a/bson/src/test/unit/org/bson/json/JsonStringBufferTest.java b/bson/src/test/unit/org/bson/json/JsonStringBufferTest.java new file mode 100644 index 00000000000..058a27cd4d0 --- /dev/null +++ b/bson/src/test/unit/org/bson/json/JsonStringBufferTest.java @@ -0,0 +1,59 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.json; + +import org.junit.jupiter.api.Test; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; + +public class JsonStringBufferTest { + + @Test + public void testRead() { + JsonBuffer buffer = new JsonStringBuffer("ABC"); + assertEquals('A', buffer.read()); + assertEquals('B', buffer.read()); + assertEquals('C', buffer.read()); + assertEquals(-1, buffer.read()); + } + + @Test + public void testUnRead() { + JsonStringBuffer buffer = new JsonStringBuffer("A"); + buffer.unread(buffer.read()); + assertEquals('A', buffer.read()); + assertEquals(-1, buffer.read()); + } + + @Test + public void testPosition() { + JsonStringBuffer buffer = new JsonStringBuffer("ABC"); + + buffer.read(); + buffer.read(); + assertEquals(2, buffer.getPosition()); + } + + @Test + public void testEOFCheck() { + JsonStringBuffer buffer = new JsonStringBuffer(""); + + buffer.read(); + assertThrows(JsonParseException.class, () -> buffer.read()); + } +} diff --git a/bson/src/test/unit/org/bson/json/JsonWriterSettingsSpecification.groovy b/bson/src/test/unit/org/bson/json/JsonWriterSettingsSpecification.groovy new file mode 100644 index 00000000000..b2568b0bae6 --- /dev/null +++ b/bson/src/test/unit/org/bson/json/JsonWriterSettingsSpecification.groovy @@ -0,0 +1,204 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.json + +import spock.lang.Specification + +class JsonWriterSettingsSpecification extends Specification { + + def 'test defaults'() { + when: + def settings = JsonWriterSettings.builder().build() + + then: + !settings.isIndent() + settings.getOutputMode() == JsonMode.RELAXED + settings.getMaxLength() == 0 + } + + + def 'test output mode'() { + when: + def settings = JsonWriterSettings.builder().outputMode(JsonMode.SHELL).build() + + then: + settings.getOutputMode() == JsonMode.SHELL + } + + def 'test indent defaults'() { + when: + def settings = JsonWriterSettings.builder().indent(true).build() + + then: + settings.isIndent() + settings.getIndentCharacters() == ' ' + settings.getNewLineCharacters() == System.getProperty('line.separator') + } + + def 'test indent settings'() { + when: + def settings = JsonWriterSettings.builder() + .indent(true).indentCharacters('\t').newLineCharacters('\r\n').build() + + then: + settings.getIndentCharacters() == '\t' + settings.getNewLineCharacters() == '\r\n' + } + + def 'test max length setting'() { + when: + def settings = JsonWriterSettings.builder() + .maxLength(100).build() + + then: + settings.getMaxLength() == 100 + } + + @SuppressWarnings('deprecation') + def 'should use legacy extended json converters for strict mode'() { + when: + def settings = JsonWriterSettings.builder().outputMode(JsonMode.STRICT).build() + + then: + settings.binaryConverter.class == LegacyExtendedJsonBinaryConverter + settings.booleanConverter.class == JsonBooleanConverter + settings.dateTimeConverter.class == LegacyExtendedJsonDateTimeConverter + settings.decimal128Converter.class == ExtendedJsonDecimal128Converter + settings.doubleConverter.class == JsonDoubleConverter + settings.int32Converter.class == JsonInt32Converter + settings.int64Converter.class == ExtendedJsonInt64Converter + settings.javaScriptConverter.class == JsonJavaScriptConverter + settings.maxKeyConverter.class == ExtendedJsonMaxKeyConverter + settings.minKeyConverter.class == ExtendedJsonMinKeyConverter + settings.nullConverter.class == JsonNullConverter + settings.objectIdConverter.class == ExtendedJsonObjectIdConverter + settings.regularExpressionConverter.class == LegacyExtendedJsonRegularExpressionConverter + settings.stringConverter.class == JsonStringConverter + settings.symbolConverter.class == JsonSymbolConverter + settings.timestampConverter.class == ExtendedJsonTimestampConverter + settings.undefinedConverter.class == ExtendedJsonUndefinedConverter + } + + def 'should use extended json converters for extended json mode'() { + when: + def settings = JsonWriterSettings.builder().outputMode(JsonMode.EXTENDED).build() + + then: + settings.binaryConverter.class == ExtendedJsonBinaryConverter + settings.booleanConverter.class == JsonBooleanConverter + settings.dateTimeConverter.class == ExtendedJsonDateTimeConverter + settings.decimal128Converter.class == ExtendedJsonDecimal128Converter + settings.doubleConverter.class == ExtendedJsonDoubleConverter + settings.int32Converter.class == ExtendedJsonInt32Converter + settings.int64Converter.class == ExtendedJsonInt64Converter + settings.javaScriptConverter.class == JsonJavaScriptConverter + settings.maxKeyConverter.class == ExtendedJsonMaxKeyConverter + settings.minKeyConverter.class == ExtendedJsonMinKeyConverter + settings.nullConverter.class == JsonNullConverter + settings.objectIdConverter.class == ExtendedJsonObjectIdConverter + settings.regularExpressionConverter.class == ExtendedJsonRegularExpressionConverter + settings.stringConverter.class == JsonStringConverter + settings.symbolConverter.class == JsonSymbolConverter + settings.timestampConverter.class == ExtendedJsonTimestampConverter + settings.undefinedConverter.class == ExtendedJsonUndefinedConverter + } + + def 'should use shell converters for shell mode'() { + when: + def settings = JsonWriterSettings.builder().outputMode(JsonMode.SHELL).build() + + then: + settings.binaryConverter.class == ShellBinaryConverter + settings.booleanConverter.class == JsonBooleanConverter + settings.dateTimeConverter.class == ShellDateTimeConverter + settings.decimal128Converter.class == ShellDecimal128Converter + settings.doubleConverter.class == JsonDoubleConverter + settings.int32Converter.class == JsonInt32Converter + settings.int64Converter.class == ShellInt64Converter + settings.javaScriptConverter.class == JsonJavaScriptConverter + settings.maxKeyConverter.class == ShellMaxKeyConverter + settings.minKeyConverter.class == ShellMinKeyConverter + settings.nullConverter.class == JsonNullConverter + settings.objectIdConverter.class == ShellObjectIdConverter + settings.regularExpressionConverter.class == ShellRegularExpressionConverter + settings.stringConverter.class == JsonStringConverter + settings.symbolConverter.class == JsonSymbolConverter + settings.timestampConverter.class == ShellTimestampConverter + settings.undefinedConverter.class == ShellUndefinedConverter + } + + def 'should set converters'() { + given: + def binaryConverter = new ShellBinaryConverter() + def booleanConverter = new JsonBooleanConverter() + def dateTimeConverter = new ShellDateTimeConverter() + def decimal128Converter = new ShellDecimal128Converter() + def doubleConverter = new JsonDoubleConverter() + def int32Converter = new JsonInt32Converter() + def int64Converter = new ShellInt64Converter() + def javaScriptConverter = new JsonJavaScriptConverter() + def maxKeyConverter = new ShellMaxKeyConverter() + def minKeyConverter = new ShellMinKeyConverter() + def nullConverter = new JsonNullConverter() + def objectIdConverter = new ShellObjectIdConverter() + def regularExpressionConverter = new ShellRegularExpressionConverter() + def stringConverter = new JsonStringConverter() + def symbolConverter = new JsonSymbolConverter() + def timestampConverter = new ShellTimestampConverter() + def undefinedConverter = new ShellUndefinedConverter() + + when: + def settings = JsonWriterSettings.builder() + .binaryConverter(binaryConverter) + .booleanConverter(booleanConverter) + .dateTimeConverter(dateTimeConverter) + .decimal128Converter(decimal128Converter) + .doubleConverter(doubleConverter) + .int32Converter(int32Converter) + .int64Converter(int64Converter) + .javaScriptConverter(javaScriptConverter) + .maxKeyConverter(maxKeyConverter) + .minKeyConverter(minKeyConverter) + .nullConverter(nullConverter) + .objectIdConverter(objectIdConverter) + .regularExpressionConverter(regularExpressionConverter) + .stringConverter(stringConverter) + .symbolConverter(symbolConverter) + .timestampConverter(timestampConverter) + .undefinedConverter(undefinedConverter) + .build() + + then: + settings.binaryConverter == binaryConverter + settings.booleanConverter == booleanConverter + settings.dateTimeConverter == dateTimeConverter + settings.decimal128Converter == decimal128Converter + settings.doubleConverter == doubleConverter + settings.int32Converter == int32Converter + settings.int64Converter == int64Converter + settings.javaScriptConverter == javaScriptConverter + settings.maxKeyConverter == maxKeyConverter + settings.minKeyConverter == minKeyConverter + settings.nullConverter == nullConverter + settings.objectIdConverter == objectIdConverter + settings.regularExpressionConverter == regularExpressionConverter + settings.stringConverter == stringConverter + settings.symbolConverter == symbolConverter + settings.timestampConverter == timestampConverter + settings.undefinedConverter == undefinedConverter + } +} diff --git a/bson/src/test/unit/org/bson/json/JsonWriterSpecification.groovy b/bson/src/test/unit/org/bson/json/JsonWriterSpecification.groovy new file mode 100644 index 00000000000..8cb8ecbea4b --- /dev/null +++ b/bson/src/test/unit/org/bson/json/JsonWriterSpecification.groovy @@ -0,0 +1,336 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.json + +import org.bson.BsonBinary +import org.bson.BsonDbPointer +import org.bson.BsonDocumentReader +import org.bson.BsonRegularExpression +import org.bson.BsonTimestamp +import org.bson.types.Decimal128 +import org.bson.types.ObjectId +import spock.lang.Specification + +import static org.bson.BsonHelper.documentWithValuesOfEveryType + +class JsonWriterSpecification extends Specification { + + def stringWriter = new StringWriter() + def writer = new JsonWriter(stringWriter, JsonWriterSettings.builder().outputMode(JsonMode.RELAXED).build()) + def jsonWithValuesOfEveryType = documentWithValuesOfEveryType().toJson(JsonWriterSettings.builder().build()) + + def 'should pipe all types'() { + given: + def reader = new BsonDocumentReader(documentWithValuesOfEveryType()) + + when: + writer.pipe(reader) + + then: + stringWriter.toString() == documentWithValuesOfEveryType().toJson() + } + + def 'should pipe all types with capped length'() { + given: + def reader = new BsonDocumentReader(documentWithValuesOfEveryType()) + def writer = new JsonWriter(stringWriter, JsonWriterSettings.builder().maxLength(maxLength).build()) + + when: + writer.pipe(reader) + + then: + stringWriter.toString() == ((maxLength == 0) ? + jsonWithValuesOfEveryType : jsonWithValuesOfEveryType[0.. { + private final T value; + private final String expected; + + TestData(final T value, final String expected) { + this.value = value; + this.expected = expected; + } + } + + @Test + public void shouldThrowExceptionForBooleanWhenWritingBeforeStartingDocument() { + assertThrows(BsonInvalidOperationException.class, () -> + writer.writeBoolean("b1", true)); + } + + @Test + public void shouldThrowExceptionForNameWhenWritingBeforeStartingDocument() { + assertThrows(BsonInvalidOperationException.class, () -> + writer.writeName("name")); + } + + @Test + public void shouldThrowExceptionForStringWhenStateIsValue() { + writer.writeStartDocument(); + assertThrows(BsonInvalidOperationException.class, () -> + writer.writeString("SomeString")); + } + + @Test + public void shouldThrowExceptionWhenEndingAnArrayWhenStateIsValue() { + writer.writeStartDocument(); + assertThrows(BsonInvalidOperationException.class, () -> + writer.writeEndArray()); + } + + @Test + public void shouldThrowExceptionWhenWritingASecondName() { + writer.writeStartDocument(); + writer.writeName("f1"); + assertThrows(BsonInvalidOperationException.class, () -> + writer.writeName("i2")); + } + + @Test + public void shouldThrowExceptionWhenEndingADocumentBeforeValueIsWritten() { + writer.writeStartDocument(); + writer.writeName("f1"); + assertThrows(BsonInvalidOperationException.class, () -> + writer.writeEndDocument()); + } + + @Test + public void shouldThrowAnExceptionWhenTryingToWriteASecondValue() { + writer.writeStartDocument(); + writer.writeName("f1"); + writer.writeDouble(100); + assertThrows(BsonInvalidOperationException.class, () -> + writer.writeString("i2")); + } + + @Test + public void shouldThrowAnExceptionWhenTryingToWriteJavaScript() { + writer.writeStartDocument(); + writer.writeName("f1"); + writer.writeDouble(100); + assertThrows(BsonInvalidOperationException.class, () -> + writer.writeJavaScript("var i")); + } + + @Test + public void shouldThrowAnExceptionWhenWritingANameInAnArray() { + writer.writeStartDocument(); + writer.writeStartArray("f2"); + assertThrows(BsonInvalidOperationException.class, () -> + writer.writeName("i3")); + } + + @Test + public void shouldThrowAnExceptionWhenEndingDocumentInTheMiddleOfWritingAnArray() { + writer.writeStartDocument(); + writer.writeStartArray("f2"); + assertThrows(BsonInvalidOperationException.class, () -> + writer.writeEndDocument()); + } + + @Test + public void shouldThrowAnExceptionWhenEndingAnArrayInASubDocument() { + writer.writeStartDocument(); + writer.writeStartArray("f2"); + writer.writeStartDocument(); + assertThrows(BsonInvalidOperationException.class, () -> + writer.writeEndArray()); + } + + @Test + public void shouldThrowAnExceptionWhenWritingANameInAnArrayEvenWhenSubDocumentExistsInArray() { + writer.writeStartDocument(); + writer.writeStartArray("f2"); + writer.writeStartDocument(); + writer.writeEndDocument(); + assertThrows(BsonInvalidOperationException.class, () -> + writer.writeName("i3")); + } + + @Test + public void shouldThrowAnExceptionWhenAttemptingToEndAnArrayThatWasNotStarted() { + writer.writeStartDocument(); + writer.writeStartArray("f2"); + writer.writeEndArray(); + assertThrows(BsonInvalidOperationException.class, () -> + writer.writeEndArray()); + } + + @Test + public void shouldThrowAnErrorIfTryingToWriteNameIntoAJavascriptScope() { + writer.writeStartDocument(); + writer.writeJavaScriptWithScope("js1", "var i = 1"); + assertThrows(BsonInvalidOperationException.class, () -> + writer.writeName("b1")); + } + + @Test + public void shouldThrowAnErrorIfTryingToWriteValueIntoAJavascriptScope() { + writer.writeStartDocument(); + writer.writeJavaScriptWithScope("js1", "var i = 1"); + assertThrows(BsonInvalidOperationException.class, () -> + writer.writeBinaryData(new BsonBinary(new byte[]{0, 0, 1, 0}))); + } + + @Test + public void shouldThrowAnErrorIfTryingToWriteArrayIntoAJavascriptScope() { + writer.writeStartDocument(); + writer.writeJavaScriptWithScope("js1", "var i = 1"); + assertThrows(BsonInvalidOperationException.class, () -> + writer.writeStartArray()); + } + + @Test + public void shouldThrowAnErrorIfTryingToWriteEndDocumentIntoAJavascriptScope() { + writer.writeStartDocument(); + writer.writeJavaScriptWithScope("js1", "var i = 1"); + assertThrows(BsonInvalidOperationException.class, () -> + writer.writeEndDocument()); + } + + @Test + public void testEmptyDocument() { + writer.writeStartDocument(); + writer.writeEndDocument(); + String expected = "{}"; + assertEquals(expected, stringWriter.toString()); + } + + @Test + public void testSingleElementDocument() { + writer.writeStartDocument(); + writer.writeName("s"); + writer.writeString("str"); + writer.writeEndDocument(); + String expected = "{\"s\": \"str\"}"; + assertEquals(expected, stringWriter.toString()); + } + + @Test + public void testTwoElementDocument() { + writer.writeStartDocument(); + writer.writeName("s"); + writer.writeString("str"); + writer.writeName("d"); + writer.writeString("str2"); + writer.writeEndDocument(); + String expected = "{\"s\": \"str\", \"d\": \"str2\"}"; + assertEquals(expected, stringWriter.toString()); + } + + @Test + public void testNestedDocument() { + writer.writeStartDocument(); + writer.writeName("doc"); + writer.writeStartDocument(); + writer.writeName("doc"); + writer.writeStartDocument(); + writer.writeName("s"); + writer.writeString("str"); + writer.writeEndDocument(); + writer.writeEndDocument(); + writer.writeEndDocument(); + String expected = "{\"doc\": {\"doc\": {\"s\": \"str\"}}}"; + assertEquals(expected, stringWriter.toString()); + } + + @Test + public void testSingleString() { + writer.writeStartDocument(); + writer.writeString("abc", "xyz"); + writer.writeEndDocument(); + String expected = "{\"abc\": \"xyz\"}"; + assertEquals(expected, stringWriter.toString()); + } + + + @Test + public void testBoolean() { + writer.writeStartDocument(); + writer.writeBoolean("abc", true); + writer.writeEndDocument(); + String expected = "{\"abc\": true}"; + assertEquals(expected, stringWriter.toString()); + } + + @Test + public void testDouble() { + List> tests = asList(new TestData<>(0.0, "0.0"), new TestData<>(0.0005, "5.0E-4"), + new TestData<>(0.5, "0.5"), new TestData<>(1.0, "1.0"), + new TestData<>(1.5, "1.5"), new TestData<>(1.5E+40, "1.5E40"), + new TestData<>(1.5E-40, "1.5E-40"), + new TestData<>(1234567890.1234568E+123, "1.2345678901234568E132"), + new TestData<>(Double.MAX_VALUE, "1.7976931348623157E308"), + new TestData<>(Double.MIN_VALUE, "4.9E-324"), + + new TestData<>(-0.0005, "-5.0E-4"), + new TestData<>(-0.5, "-0.5"), + new TestData<>(-1.0, "-1.0"), + new TestData<>(-1.5, "-1.5"), + new TestData<>(-1.5E+40, "-1.5E40"), + new TestData<>(-1.5E-40, "-1.5E-40"), + new TestData<>(-1234567890.1234568E+123, "-1.2345678901234568E132"), + + new TestData<>(Double.NaN, "NaN"), + new TestData<>(Double.NEGATIVE_INFINITY, "-Infinity"), + new TestData<>(Double.POSITIVE_INFINITY, "Infinity")); + for (final TestData cur : tests) { + stringWriter = new StringWriter(); + writer = new JsonWriter(stringWriter, JsonWriterSettings.builder().outputMode(JsonMode.EXTENDED).build()); + writer.writeStartDocument(); + writer.writeDouble("d", cur.value); + writer.writeEndDocument(); + String expected = "{\"d\": {\"$numberDouble\": \"" + cur.expected + "\"}}"; + assertEquals(expected, stringWriter.toString()); + } + } + + @Test + public void testInt64Shell() { + List> tests = asList(new TestData<>(Long.MIN_VALUE, "NumberLong(\"-9223372036854775808\")"), + new TestData<>(Integer.MIN_VALUE - 1L, "NumberLong(\"-2147483649\")"), + new TestData<>((long) Integer.MIN_VALUE, "NumberLong(-2147483648)"), + new TestData<>(0L, "NumberLong(0)"), + new TestData<>((long) Integer.MAX_VALUE, "NumberLong(2147483647)"), + new TestData<>(Integer.MAX_VALUE + 1L, "NumberLong(\"2147483648\")"), + new TestData<>(Long.MAX_VALUE, "NumberLong(\"9223372036854775807\")")); + for (final TestData cur : tests) { + stringWriter = new StringWriter(); + writer = new JsonWriter(stringWriter, JsonWriterSettings.builder().outputMode(JsonMode.SHELL).build()); + writer.writeStartDocument(); + writer.writeInt64("l", cur.value); + writer.writeEndDocument(); + String expected = "{\"l\": " + cur.expected + "}"; + assertEquals(expected, stringWriter.toString()); + } + } + + @Test + public void testInt64Relaxed() { + List> tests = asList(new TestData<>(Long.MIN_VALUE, "-9223372036854775808"), + new TestData<>(Integer.MIN_VALUE - 1L, "-2147483649"), + new TestData<>((long) Integer.MIN_VALUE, "-2147483648"), + new TestData<>(0L, "0"), + new TestData<>((long) Integer.MAX_VALUE, "2147483647"), + new TestData<>(Integer.MAX_VALUE + 1L, "2147483648"), + new TestData<>(Long.MAX_VALUE, "9223372036854775807")); + + for (final TestData cur : tests) { + stringWriter = new StringWriter(); + writer = new JsonWriter(stringWriter, JsonWriterSettings.builder().outputMode(JsonMode.RELAXED).build()); + writer.writeStartDocument(); + writer.writeInt64("l", cur.value); + writer.writeEndDocument(); + String expected = "{\"l\": " + cur.expected + "}"; + assertEquals(expected, stringWriter.toString()); + } + } + + @Test + public void testDecimal128SShell() { + List> tests = asList( + new TestData<>(Decimal128.parse("1.0"), "1.0"), + new TestData<>(Decimal128.POSITIVE_INFINITY, Decimal128.POSITIVE_INFINITY.toString())); + + + for (final TestData cur : tests) { + stringWriter = new StringWriter(); + writer = new JsonWriter(stringWriter, JsonWriterSettings.builder().outputMode(JsonMode.SHELL).build()); + writer.writeStartDocument(); + writer.writeDecimal128("d", cur.value); + writer.writeEndDocument(); + String expected = "{\"d\": NumberDecimal(\"" + cur.expected + "\")}"; + assertEquals(expected, stringWriter.toString()); + } + } + + @Test + public void testDecimal128Relaxed() { + List> tests = asList( + new TestData<>(Decimal128.parse("1.0"), "1.0"), + new TestData<>(Decimal128.POSITIVE_INFINITY, Decimal128.POSITIVE_INFINITY.toString())); + + + for (final TestData cur : tests) { + stringWriter = new StringWriter(); + writer = new JsonWriter(stringWriter, JsonWriterSettings.builder().outputMode(JsonMode.RELAXED).build()); + writer.writeStartDocument(); + writer.writeDecimal128("d", cur.value); + writer.writeEndDocument(); + String expected = "{\"d\": {\"$numberDecimal\": \"" + cur.expected + "\"}}"; + assertEquals(expected, stringWriter.toString()); + } + } + + @Test + public void testArray() { + writer.writeStartDocument(); + writer.writeStartArray("array"); + writer.writeInt32(1); + writer.writeInt32(2); + writer.writeInt32(3); + writer.writeEndArray(); + writer.writeEndDocument(); + String expected = "{\"array\": [1, 2, 3]}"; + assertEquals(expected, stringWriter.toString()); + } + + @Test + public void testBinaryRelaxed() { + List> tests = asList(new TestData<>(new BsonBinary(new byte[0]), + "{\"$binary\": {\"base64\": \"\", " + + "\"subType\": \"00\"}}"), + new TestData<>(new BsonBinary(new byte[]{1}), + "{\"$binary\": {\"base64\": \"AQ==\", " + + "\"subType\": \"00\"}}"), + new TestData<>(new BsonBinary(new byte[]{1, 2}), + "{\"$binary\": {\"base64\": \"AQI=\", " + + "\"subType\": \"00\"}}"), + new TestData<>(new BsonBinary(new byte[]{1, 2, 3}), + "{\"$binary\": {\"base64\": \"AQID\", " + + "\"subType\": \"00\"}}"), + new TestData<>(new BsonBinary((byte) 0x80, new byte[]{1, 2, 3}), + "{\"$binary\": {\"base64\": \"AQID\", " + + "\"subType\": \"80\"}}")); + for (final TestData cur : tests) { + stringWriter = new StringWriter(); + writer = new JsonWriter(stringWriter, JsonWriterSettings.builder().outputMode(JsonMode.RELAXED).build()); + writer.writeStartDocument(); + writer.writeBinaryData("binary", cur.value); + writer.writeEndDocument(); + String expected = "{\"binary\": " + cur.expected + "}"; + assertEquals(expected, stringWriter.toString()); + } + } + + @Test + public void testBinaryShell() { + List> tests = asList(new TestData<>(new BsonBinary(new byte[0]), "new BinData(0, \"\")"), + new TestData<>(new BsonBinary(new byte[]{1}), "new BinData(0, \"AQ==\")"), + new TestData<>(new BsonBinary(new byte[]{1, 2}), "new BinData(0, \"AQI=\")"), + new TestData<>(new BsonBinary(new byte[]{1, 2, 3}), "new BinData(0, \"AQID\")"), + new TestData<>(new BsonBinary((byte) 0x80, new byte[]{1, 2, 3}), + "new BinData(128, \"AQID\")")); + for (final TestData cur : tests) { + stringWriter = new StringWriter(); + writer = new JsonWriter(stringWriter, JsonWriterSettings.builder().outputMode(JsonMode.SHELL).build()); + writer.writeStartDocument(); + writer.writeBinaryData("binary", cur.value); + writer.writeEndDocument(); + String expected = "{\"binary\": " + cur.expected + "}"; + assertEquals(expected, stringWriter.toString()); + } + } + + @Test + public void testDateTimeRelaxed() { + List> tests = asList(new TestData<>(new Date(0), "{\"$date\": \"1970-01-01T00:00:00Z\"}"), + new TestData<>(new Date(Long.MAX_VALUE), "{\"$date\": {\"$numberLong\": \"9223372036854775807\"}}"), + new TestData<>(new Date(Long.MIN_VALUE), "{\"$date\": {\"$numberLong\": \"-9223372036854775808\"}}")); + for (final TestData cur : tests) { + stringWriter = new StringWriter(); + writer = new JsonWriter(stringWriter, JsonWriterSettings.builder().outputMode(JsonMode.RELAXED).build()); + writer.writeStartDocument(); + writer.writeDateTime("date", cur.value.getTime()); + writer.writeEndDocument(); + String expected = "{\"date\": " + cur.expected + "}"; + assertEquals(expected, stringWriter.toString()); + } + } + + @Test + public void testDateTimeShell() { + List> tests = asList(new TestData<>(new Date(0), "ISODate(\"1970-01-01T00:00:00.000Z\")"), + new TestData<>(new Date(1), "ISODate(\"1970-01-01T00:00:00.001Z\")"), + new TestData<>(new Date(-1), "ISODate(\"1969-12-31T23:59:59.999Z\")"), + new TestData<>(new Date(Long.MAX_VALUE), "new Date(9223372036854775807)"), + new TestData<>(new Date(Long.MIN_VALUE), "new Date(-9223372036854775808)")); + for (final TestData cur : tests) { + stringWriter = new StringWriter(); + writer = new JsonWriter(stringWriter, JsonWriterSettings.builder().outputMode(JsonMode.SHELL).build()); + writer.writeStartDocument(); + writer.writeDateTime("date", cur.value.getTime()); + writer.writeEndDocument(); + String expected = "{\"date\": " + cur.expected + "}"; + assertEquals(expected, stringWriter.toString()); + } + } + + @Test + public void testJavaScript() { + writer.writeStartDocument(); + writer.writeJavaScript("f", "function f() { return 1; }"); + writer.writeEndDocument(); + String expected = "{\"f\": {\"$code\": \"function f() { return 1; }\"}}"; + assertEquals(expected, stringWriter.toString()); + } + + @Test + public void testJavaScriptWithScope() { + writer.writeStartDocument(); + writer.writeJavaScriptWithScope("f", "function f() { return n; }"); + writer.writeStartDocument(); + writer.writeInt32("n", 1); + writer.writeEndDocument(); + writer.writeEndDocument(); + String expected = + "{\"f\": {\"$code\": \"function f() { return n; }\", " + "\"$scope\": {\"n\": 1}}}"; + assertEquals(expected, stringWriter.toString()); + } + + @Test + public void testMaxKeyStrict() { + writer.writeStartDocument(); + writer.writeMaxKey("maxkey"); + writer.writeEndDocument(); + String expected = "{\"maxkey\": {\"$maxKey\": 1}}"; + assertEquals(expected, stringWriter.toString()); + } + + @Test + public void testMinKeyStrict() { + writer.writeStartDocument(); + writer.writeMinKey("minkey"); + writer.writeEndDocument(); + String expected = "{\"minkey\": {\"$minKey\": 1}}"; + assertEquals(expected, stringWriter.toString()); + } + + + @Test + public void testMaxKeyShell() { + writer = new JsonWriter(stringWriter, JsonWriterSettings.builder().outputMode(JsonMode.SHELL).build()); + writer.writeStartDocument(); + writer.writeMaxKey("maxkey"); + writer.writeEndDocument(); + String expected = "{\"maxkey\": MaxKey}"; + assertEquals(expected, stringWriter.toString()); + } + + @Test + public void testMinKeyShell() { + writer = new JsonWriter(stringWriter, JsonWriterSettings.builder().outputMode(JsonMode.SHELL).build()); + writer.writeStartDocument(); + writer.writeMinKey("minkey"); + writer.writeEndDocument(); + String expected = "{\"minkey\": MinKey}"; + assertEquals(expected, stringWriter.toString()); + } + + @Test + public void testNull() { + writer.writeStartDocument(); + writer.writeNull("null"); + writer.writeEndDocument(); + String expected = "{\"null\": null}"; + assertEquals(expected, stringWriter.toString()); + } + + @Test + public void testObjectIdShell() { + writer = new JsonWriter(stringWriter, JsonWriterSettings.builder().outputMode(JsonMode.SHELL).build()); + ObjectId objectId = new ObjectId("4d0ce088e447ad08b4721a37"); + + writer.writeStartDocument(); + writer.writeObjectId("_id", objectId); + writer.writeEndDocument(); + + String expected = "{\"_id\": ObjectId(\"4d0ce088e447ad08b4721a37\")}"; + assertEquals(expected, stringWriter.toString()); + } + + @Test + public void testObjectIdStrict() { + ObjectId objectId = new ObjectId("4d0ce088e447ad08b4721a37"); + + writer.writeStartDocument(); + writer.writeObjectId("_id", objectId); + writer.writeEndDocument(); + + String expected = "{\"_id\": {\"$oid\": \"4d0ce088e447ad08b4721a37\"}}"; + assertEquals(expected, stringWriter.toString()); + } + + @Test + public void testRegularExpressionShell() { + List> tests; + tests = asList(new TestData<>(new BsonRegularExpression(""), "/(?:)/"), + new TestData<>(new BsonRegularExpression("a"), "/a/"), + new TestData<>(new BsonRegularExpression("a/b"), "/a\\/b/"), + new TestData<>(new BsonRegularExpression("a\\b"), "/a\\b/"), + new TestData<>(new BsonRegularExpression("a", "i"), "/a/i"), + new TestData<>(new BsonRegularExpression("a", "m"), "/a/m"), + new TestData<>(new BsonRegularExpression("a", "x"), "/a/x"), + new TestData<>(new BsonRegularExpression("a", "s"), "/a/s"), + new TestData<>(new BsonRegularExpression("a", "imxs"), "/a/imsx")); + for (final TestData cur : tests) { + stringWriter = new StringWriter(); + writer = new JsonWriter(stringWriter, JsonWriterSettings.builder().outputMode(JsonMode.SHELL).build()); + writer.writeStartDocument(); + writer.writeRegularExpression("regex", cur.value); + writer.writeEndDocument(); + String expected = "{\"regex\": " + cur.expected + "}"; + assertEquals(expected, stringWriter.toString()); + } + } + + @Test + public void testRegularExpressionRelaxed() { + List> tests; + tests = asList(new TestData<>(new BsonRegularExpression(""), + "{\"$regularExpression\": {\"pattern\": \"\", \"options\": \"\"}}"), + new TestData<>(new BsonRegularExpression("a"), + "{\"$regularExpression\": {\"pattern\": \"a\", \"options\": \"\"}}"), + new TestData<>(new BsonRegularExpression("a/b"), + "{\"$regularExpression\": {\"pattern\": \"a/b\", \"options\": \"\"}}"), + new TestData<>(new BsonRegularExpression("a\\b"), + "{\"$regularExpression\": {\"pattern\": \"a\\\\b\", \"options\": \"\"}}"), + new TestData<>(new BsonRegularExpression("a", "i"), + "{\"$regularExpression\": {\"pattern\": \"a\", \"options\": \"i\"}}"), + new TestData<>(new BsonRegularExpression("a", "m"), + "{\"$regularExpression\": {\"pattern\": \"a\", \"options\": \"m\"}}"), + new TestData<>(new BsonRegularExpression("a", "x"), + "{\"$regularExpression\": {\"pattern\": \"a\", \"options\": \"x\"}}"), + new TestData<>(new BsonRegularExpression("a", "s"), + "{\"$regularExpression\": {\"pattern\": \"a\", \"options\": \"s\"}}"), + new TestData<>(new BsonRegularExpression("a", "imxs"), + "{\"$regularExpression\": {\"pattern\": \"a\", \"options\": \"imsx\"}}")); + for (final TestData cur : tests) { + stringWriter = new StringWriter(); + writer = new JsonWriter(stringWriter, JsonWriterSettings.builder().outputMode(JsonMode.RELAXED).build()); + writer.writeStartDocument(); + writer.writeRegularExpression("regex", cur.value); + writer.writeEndDocument(); + String expected = "{\"regex\": " + cur.expected + "}"; + assertEquals(expected, stringWriter.toString()); + } + } + + @Test + public void testSymbol() { + writer.writeStartDocument(); + writer.writeSymbol("symbol", "name"); + writer.writeEndDocument(); + String expected = "{\"symbol\": {\"$symbol\": \"name\"}}"; + assertEquals(expected, stringWriter.toString()); + } + + // + @Test + public void testTimestampStrict() { + writer.writeStartDocument(); + writer.writeTimestamp("timestamp", new BsonTimestamp(1000, 1)); + writer.writeEndDocument(); + String expected = "{\"timestamp\": {\"$timestamp\": {\"t\": 1000, \"i\": 1}}}"; + assertEquals(expected, stringWriter.toString()); + } + + @Test + public void testTimestampShell() { + writer = new JsonWriter(stringWriter, JsonWriterSettings.builder().outputMode(JsonMode.SHELL).build()); + writer.writeStartDocument(); + writer.writeTimestamp("timestamp", new BsonTimestamp(1000, 1)); + writer.writeEndDocument(); + String expected = "{\"timestamp\": Timestamp(1000, 1)}"; + assertEquals(expected, stringWriter.toString()); + } + + @Test + public void testUndefinedRelaxed() { + writer = new JsonWriter(stringWriter, JsonWriterSettings.builder().outputMode(JsonMode.RELAXED).build()); + writer.writeStartDocument(); + writer.writeUndefined("undefined"); + writer.writeEndDocument(); + String expected = "{\"undefined\": {\"$undefined\": true}}"; + assertEquals(expected, stringWriter.toString()); + } + + @Test + public void testUndefinedShell() { + writer = new JsonWriter(stringWriter, JsonWriterSettings.builder().outputMode(JsonMode.SHELL).build()); + writer.writeStartDocument(); + writer.writeUndefined("undefined"); + writer.writeEndDocument(); + String expected = "{\"undefined\": undefined}"; + assertEquals(expected, stringWriter.toString()); + } + + @Test + public void testDBPointer() { + writer.writeStartDocument(); + writer.writeDBPointer("dbPointer", new BsonDbPointer("my.test", new ObjectId("4d0ce088e447ad08b4721a37"))); + writer.writeEndDocument(); + String expected = "{\"dbPointer\": {\"$ref\": \"my.test\", \"$id\": {\"$oid\": \"4d0ce088e447ad08b4721a37\"}}}"; + assertEquals(expected, stringWriter.toString()); + } +} diff --git a/bson/src/test/unit/org/bson/json/StrictCharacterStreamJsonWriterSpecification.groovy b/bson/src/test/unit/org/bson/json/StrictCharacterStreamJsonWriterSpecification.groovy new file mode 100644 index 00000000000..8a3d16036f3 --- /dev/null +++ b/bson/src/test/unit/org/bson/json/StrictCharacterStreamJsonWriterSpecification.groovy @@ -0,0 +1,569 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.json + +import org.bson.BsonInvalidOperationException +import spock.lang.Specification + +import static java.lang.String.format + +class StrictCharacterStreamJsonWriterSpecification extends Specification { + + private StringWriter stringWriter + private StrictCharacterStreamJsonWriter writer + + def setup() { + stringWriter = new StringWriter() + writer = new StrictCharacterStreamJsonWriter(stringWriter, StrictCharacterStreamJsonWriterSettings.builder().build()) + } + + def 'should write empty document'() { + when: + writer.writeStartObject() + writer.writeEndObject() + + then: + stringWriter.toString() == '{}' + } + + def 'should write empty array'() { + when: + writer.writeStartArray() + writer.writeEndArray() + + then: + stringWriter.toString() == '[]' + } + + def 'should write null'() { + when: + writer.writeStartObject() + writer.writeNull('n') + writer.writeEndObject() + + then: + stringWriter.toString() == '{"n": null}' + } + + def 'should write boolean'() { + when: + writer.writeStartObject() + writer.writeBoolean('b1', true) + writer.writeEndObject() + + then: + stringWriter.toString() == '{"b1": true}' + } + + def 'should write number'() { + when: + writer.writeStartObject() + writer.writeNumber('n', '42') + writer.writeEndObject() + + then: + stringWriter.toString() == '{"n": 42}' + } + + def 'should write string'() { + when: + writer.writeStartObject() + writer.writeString('n', '42') + writer.writeEndObject() + + then: + stringWriter.toString() == '{"n": "42"}' + } + + def 'should write unquoted string'() { + when: + writer.writeStartObject() + writer.writeRaw('s', 'NumberDecimal("42.0")') + writer.writeEndObject() + + then: + stringWriter.toString() == '{"s": NumberDecimal("42.0")}' + } + + def 'should write document'() { + when: + writer.writeStartObject() + writer.writeStartObject('d') + writer.writeEndObject() + writer.writeEndObject() + + then: + stringWriter.toString() == '{"d": {}}' + } + + def 'should write array'() { + when: + writer.writeStartObject() + writer.writeStartArray('a') + writer.writeEndArray() + writer.writeEndObject() + + then: + stringWriter.toString() == '{"a": []}' + } + + def 'should write array of values'() { + when: + writer.writeStartObject() + writer.writeStartArray('a') + writer.writeNumber('1') + writer.writeNull() + writer.writeString('str') + writer.writeEndArray() + writer.writeEndObject() + + then: + stringWriter.toString() == '{"a": [1, null, "str"]}' + } + + def 'should write strings'() { + when: + writer.writeStartObject() + writer.writeString('str', value) + writer.writeEndObject() + + then: + stringWriter.toString() == '{"str": ' + expected + '}' + + where: + value | expected + '' | '""' + ' ' | '" "' + 'a' | '"a"' + 'ab' | '"ab"' + 'abc' | '"abc"' + 'abc\u0000def' | '"abc\\u0000def"' + '\\' | '"\\\\"' + '\'' | '"\'"' + '"' | '"\\""' + '\0' | '"\\u0000"' + '\b' | '"\\b"' + '\f' | '"\\f"' + '\n' | '"\\n"' + '\r' | '"\\r"' + '\t' | '"\\t"' + '\u0080' | '"\\u0080"' + '\u0080\u0081' | '"\\u0080\\u0081"' + '\u0080\u0081\u0082' | '"\\u0080\\u0081\\u0082"' + } + + def 'should write two object elements'() { + when: + writer.writeStartObject() + writer.writeBoolean('b1', true) + writer.writeBoolean('b2', false) + writer.writeEndObject() + + then: + stringWriter.toString() == '{"b1": true, "b2": false}' + } + + def 'should indent one element'() { + given: + writer = new StrictCharacterStreamJsonWriter(stringWriter, StrictCharacterStreamJsonWriterSettings.builder().indent(true).build()) + + when: + writer.writeStartObject() + writer.writeString('name', 'value') + writer.writeEndObject() + + then: + stringWriter.toString() == format('{%n "name": "value"%n}') + } + + def 'should indent one element with indent and newline characters'() { + given: + writer = new StrictCharacterStreamJsonWriter(stringWriter, StrictCharacterStreamJsonWriterSettings.builder() + .indent(true) + .indentCharacters('\t') + .newLineCharacters('\r') + .build()) + + when: + writer.writeStartObject() + writer.writeString('name', 'value') + writer.writeEndObject() + + then: + stringWriter.toString() == format('{\r\t"name": "value"\r}') + } + + def 'should indent two elements'() { + given: + writer = new StrictCharacterStreamJsonWriter(stringWriter, StrictCharacterStreamJsonWriterSettings.builder().indent(true).build()) + + when: + writer.writeStartObject() + writer.writeString('a', 'x') + writer.writeString('b', 'y') + writer.writeEndObject() + + then: + stringWriter.toString() == format('{%n "a": "x",%n "b": "y"%n}') + } + + def 'should indent two array elements'() { + given: + writer = new StrictCharacterStreamJsonWriter(stringWriter, StrictCharacterStreamJsonWriterSettings.builder().indent(true).build()) + + when: + writer.writeStartObject() + writer.writeStartArray('a') + writer.writeNull() + writer.writeNumber('4') + writer.writeEndArray() + writer.writeEndObject() + + then: + stringWriter.toString() == format('{%n "a": [%n null,%n 4%n ]%n}') + } + + def 'should indent two document elements'() { + given: + writer = new StrictCharacterStreamJsonWriter(stringWriter, StrictCharacterStreamJsonWriterSettings.builder().indent(true).build()) + + when: + writer.writeStartObject() + writer.writeStartArray('a') + writer.writeStartObject() + writer.writeNull('a') + writer.writeEndObject() + writer.writeStartObject() + writer.writeNull('a') + writer.writeEndObject() + writer.writeEndArray() + writer.writeEndObject() + + then: + stringWriter.toString() == format('{%n "a": [%n {%n "a": null%n },%n {%n "a": null%n }%n ]%n}') + } + + def 'should indent embedded document'() { + given: + writer = new StrictCharacterStreamJsonWriter(stringWriter, StrictCharacterStreamJsonWriterSettings.builder().indent(true).build()) + + when: + writer.writeStartObject() + writer.writeStartObject('doc') + writer.writeNumber('a', '1') + writer.writeNumber('b', '2') + writer.writeEndObject() + writer.writeEndObject() + + then: + stringWriter.toString() == format('{%n "doc": {%n "a": 1,%n "b": 2%n }%n}') + } + + def shouldThrowExceptionForBooleanWhenWritingBeforeStartingDocument() { + when: + writer.writeBoolean('b1', true) + + then: + thrown(BsonInvalidOperationException) + } + + def shouldThrowExceptionForNameWhenWritingBeforeStartingDocument() { + when: + writer.writeName('name') + + then: + thrown(BsonInvalidOperationException) + } + + def shouldThrowExceptionForStringWhenStateIsValue() { + given: + writer.writeStartObject() + + when: + writer.writeString('SomeString') + + then: + thrown(BsonInvalidOperationException) + } + + def shouldThrowExceptionWhenEndingAnArrayWhenStateIsValue() { + given: + writer.writeStartObject() + + when: + writer.writeEndArray() + + then: + thrown(BsonInvalidOperationException) + } + + def shouldThrowExceptionWhenWritingASecondName() { + given: + writer.writeStartObject() + writer.writeName('f1') + + when: + writer.writeName('i2') + + then: + thrown(BsonInvalidOperationException) + } + + def shouldThrowExceptionWhenEndingADocumentBeforeValueIsWritten() { + given: + writer.writeStartObject() + writer.writeName('f1') + + when: + writer.writeEndObject() + + then: + thrown(BsonInvalidOperationException) + } + + def shouldThrowAnExceptionWhenTryingToWriteAValue() { + when: + writer.writeString('i2') + + then: + thrown(BsonInvalidOperationException) + } + + def shouldThrowAnExceptionWhenWritingANameInAnArray() { + given: + writer.writeStartObject() + writer.writeStartArray('f2') + + when: + writer.writeName('i3') + + then: + thrown(BsonInvalidOperationException) + } + + def shouldThrowAnExceptionWhenEndingDocumentInTheMiddleOfWritingAnArray() { + given: + writer.writeStartObject() + writer.writeStartArray('f2') + + when: + writer.writeEndObject() + + then: + thrown(BsonInvalidOperationException) + } + + def shouldThrowAnExceptionWhenEndingAnArrayInASubDocument() { + given: + writer.writeStartObject() + writer.writeStartArray('f2') + writer.writeStartObject() + + when: + writer.writeEndArray() + + then: + thrown(BsonInvalidOperationException) + } + + def shouldThrowAnExceptionWhenEndingAnArrayWhenValueIsExpected() { + given: + writer.writeStartObject() + writer.writeName('a') + + when: + writer.writeEndArray() + + then: + thrown(BsonInvalidOperationException) + } + + def shouldThrowAnExceptionWhenWritingANameInAnArrayEvenWhenSubDocumentExistsInArray() { + given: + writer.writeStartObject() + writer.writeStartArray('f2') + writer.writeStartObject() + writer.writeEndObject() + + when: + writer.writeName('i3') + + then: + thrown(BsonInvalidOperationException) + } + + def shouldThrowAnExceptionWhenStartingAnObjectWhenDone() { + given: + writer.writeStartObject() + writer.writeEndObject() + + when: + writer.writeStartObject() + + then: + thrown(BsonInvalidOperationException) + } + + def shouldThrowAnExceptionWhenStartingAnObjectWhenNameIsExpected() { + given: + writer.writeStartObject() + + when: + writer.writeStartObject() + + then: + thrown(BsonInvalidOperationException) + } + + def shouldThrowAnExceptionWhenAttemptingToEndAnArrayThatWasNotStarted() { + given: + writer.writeStartObject() + writer.writeStartArray('f2') + writer.writeEndArray() + + when: + writer.writeEndArray() + + then: + thrown(BsonInvalidOperationException) + } + + def shouldThrowAnExceptionWhenWritingNullName() { + given: + writer.writeStartObject() + + when: + writer.writeName(null) + + then: + thrown(IllegalArgumentException) + } + + def shouldThrowAnExceptionWhenWritingNullValue() { + given: + writer.writeStartObject() + writer.writeName('v') + + when: + writer.writeNumber(null) + + then: + thrown(IllegalArgumentException) + + when: + writer.writeString(null) + + then: + thrown(IllegalArgumentException) + + when: + writer.writeRaw(null) + + then: + thrown(IllegalArgumentException) + } + + def shouldThrowAnExceptionWhenWritingNullMemberValue() { + given: + writer.writeStartObject() + + when: + writer.writeNumber('v', null) + + then: + thrown(IllegalArgumentException) + + when: + writer.writeString('v', null) + + then: + thrown(IllegalArgumentException) + + when: + writer.writeRaw('v', null) + + then: + thrown(IllegalArgumentException) + } + + def shouldThrowAnExceptionWhenWritingNullMemberName() { + given: + writer.writeStartObject() + + when: + writer.writeNumber(null, '1') + + then: + thrown(IllegalArgumentException) + + when: + writer.writeString(null, 'str') + + then: + thrown(IllegalArgumentException) + + when: + writer.writeRaw(null, 'raw') + + then: + thrown(IllegalArgumentException) + + when: + writer.writeBoolean(null, true) + + then: + thrown(IllegalArgumentException) + + when: + writer.writeNull(null) + + then: + thrown(IllegalArgumentException) + + when: + writer.writeStartObject(null) + + then: + thrown(IllegalArgumentException) + + when: + writer.writeStartArray(null) + + then: + thrown(IllegalArgumentException) + } + + def shouldStopAtMaxLength() { + given: + def fullJsonText = '{"n": null}' + writer = new StrictCharacterStreamJsonWriter(stringWriter, + StrictCharacterStreamJsonWriterSettings.builder().maxLength(maxLength).build()) + + when: + writer.writeStartObject() + writer.writeNull('n') + writer.writeEndObject() + + then: + stringWriter.toString() == fullJsonText[0.. validate(uuidString)); + } + + @ParameterizedTest + @ValueSource(strings = { + "aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaa", + "aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaaa", + "aaaaaaaa+aaaa-aaaa-aaaa-aaaaaaaaaaaa", + "aaaaaaaa-aaaa+aaaa-aaaa-aaaaaaaaaaaa", + "aaaaaaaa-aaaa-aaaa+aaaa-aaaaaaaaaaaa", + "aaaaaaaa-aaaa-aaaa-aaaa+aaaaaaaaaaaa", + "`aaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa", + "{aaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa", + "@aaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa", + "[aaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa", + "/aaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa", + ":aaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa", + "a:aaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa", + "aa:aaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa", + "aaa:aaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa", + "aaaa:aaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa", + "aaaaa:aa-aaaa-aaaa-aaaa-aaaaaaaaaaaa", + "aaaaaa:a-aaaa-aaaa-aaaa-aaaaaaaaaaaa", + "aaaaaaaa-:aaa-aaaa-aaaa-aaaaaaaaaaaa", + "aaaaaaaa-a:aa-aaaa-aaaa-aaaaaaaaaaaa", + "aaaaaaaa-aa:a-aaaa-aaaa-aaaaaaaaaaaa", + "aaaaaaaa-aaa:-aaaa-aaaa-aaaaaaaaaaaa", + "aaaaaaaa-aaaa-:aaa-aaaa-aaaaaaaaaaaa", + "aaaaaaaa-aaaa-a:aa-aaaa-aaaaaaaaaaaa", + "aaaaaaaa-aaaa-aa:a-aaaa-aaaaaaaaaaaa", + "aaaaaaaa-aaaa-aaa:-aaaa-aaaaaaaaaaaa", + "aaaaaaaa-aaaa-aaaa-:aaa-aaaaaaaaaaaa", + "aaaaaaaa-aaaa-aaaa-a:aa-aaaaaaaaaaaa", + "aaaaaaaa-aaaa-aaaa-aa:a-aaaaaaaaaaaa", + "aaaaaaaa-aaaa-aaaa-aaa:-aaaaaaaaaaaa", + "aaaaaaaa-aaaa-aaaa-aaaa-:aaaaaaaaaaa", + "aaaaaaaa-aaaa-aaaa-aaaa-a:aaaaaaaaaa", + "aaaaaaaa-aaaa-aaaa-aaaa-aa:aaaaaaaaa", + "aaaaaaaa-aaaa-aaaa-aaaa-aaa:aaaaaaaa", + "aaaaaaaa-aaaa-aaaa-aaaa-aaaa:aaaaaaa", + "aaaaaaaa-aaaa-aaaa-aaaa-aaaaa:aaaaaa", + "aaaaaaaa-aaaa-aaaa-aaaa-aaaaaa:aaaaa", + "aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaa:aaaa", + "aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaa:aaa", + "aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaa:aa", + "aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaa:a", + "aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaa:"}) + public void testInvalidUuidStrings(final String uuidString) { + assertThrows(IllegalArgumentException.class, () -> validate(uuidString)); + } +} diff --git a/bson/src/test/unit/org/bson/types/BSONBsonTimestampTest.java b/bson/src/test/unit/org/bson/types/BSONBsonTimestampTest.java new file mode 100644 index 00000000000..f2a210d1d3e --- /dev/null +++ b/bson/src/test/unit/org/bson/types/BSONBsonTimestampTest.java @@ -0,0 +1,45 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.types; + +import org.junit.jupiter.api.Test; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; + +public class BSONBsonTimestampTest { + + @Test + public void testComparable() { + int currTime = (int) (System.currentTimeMillis() / 1000); + + BSONTimestamp t1 = new BSONTimestamp(currTime, 1); + BSONTimestamp t2 = new BSONTimestamp(currTime, 1); + + assertEquals(0, t1.compareTo(t2)); + + t2 = new BSONTimestamp(currTime, 2); + + assertTrue(t1.compareTo(t2) < 0); + assertTrue(t2.compareTo(t1) > 0); + + t2 = new BSONTimestamp(currTime + 1, 1); + + assertTrue(t1.compareTo(t2) < 0); + assertTrue(t2.compareTo(t1) > 0); + } +} diff --git a/bson/src/test/unit/org/bson/types/BasicBSONListSpecification.groovy b/bson/src/test/unit/org/bson/types/BasicBSONListSpecification.groovy new file mode 100644 index 00000000000..e7dc5cd4e54 --- /dev/null +++ b/bson/src/test/unit/org/bson/types/BasicBSONListSpecification.groovy @@ -0,0 +1,89 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.types + +import org.bson.BSONObject +import spock.lang.Specification + +class BasicBSONListSpecification extends Specification { + + def 'should support int keys'() { + when: + BSONObject obj = new BasicBSONList() + obj.put(0, 'a') + obj.put(1, 'b') + obj.put(2, 'c') + + then: + obj == ['a', 'b', 'c'] as BasicBSONList + } + + def 'should support keys that are strings which be converted to ints'() { + when: + BSONObject obj = new BasicBSONList() + obj.put('0', 'a') + obj.put('1', 'b') + obj.put('2', 'c') + + then: + obj == ['a', 'b', 'c'] as BasicBSONList + } + + def 'should throw IllegalArgumentException if passed invalid string key'() { + when: + BSONObject obj = new BasicBSONList() + obj.put('ZERO', 'a') + + then: + thrown IllegalArgumentException + } + + def 'should insert null values for missing keys'() { + when: + BSONObject obj = new BasicBSONList() + obj.put(0, 'a') + obj.put(1, 'b') + obj.put(5, 'c') + + then: + obj == ['a', 'b', null, null, null, 'c'] as BasicBSONList + } + + def 'should provide an iterable keySet'() { + when: + BSONObject obj = new BasicBSONList() + obj.put(0, 'a') + obj.put(1, 'b') + obj.put(5, 'c') + def iter = obj.keySet().iterator() + + then: + iter.hasNext() + iter.next() == '0' + iter.hasNext() + iter.next() == '1' + iter.hasNext() + iter.next() == '2' + iter.hasNext() + iter.next() == '3' + iter.hasNext() + iter.next() == '4' + iter.hasNext() + iter.next() == '5' + !iter.hasNext() + } +} diff --git a/bson/src/test/unit/org/bson/types/Decimal128Test.java b/bson/src/test/unit/org/bson/types/Decimal128Test.java new file mode 100644 index 00000000000..4d662aefb37 --- /dev/null +++ b/bson/src/test/unit/org/bson/types/Decimal128Test.java @@ -0,0 +1,603 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.types; + +import org.junit.jupiter.api.Test; + +import java.math.BigDecimal; + +import static org.bson.types.Decimal128.NEGATIVE_INFINITY; +import static org.bson.types.Decimal128.NEGATIVE_NaN; +import static org.bson.types.Decimal128.NEGATIVE_ZERO; +import static org.bson.types.Decimal128.NaN; +import static org.bson.types.Decimal128.POSITIVE_INFINITY; +import static org.bson.types.Decimal128.POSITIVE_ZERO; +import static org.bson.types.Decimal128.fromIEEE754BIDEncoding; +import static org.bson.types.Decimal128.parse; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNotEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; + +public class Decimal128Test { + + @Test + public void shouldHaveCorrectConstants() { + // expect + assertEquals(fromIEEE754BIDEncoding(0x3040000000000000L, 0x0000000000000000L), POSITIVE_ZERO); + assertEquals(fromIEEE754BIDEncoding(0xb040000000000000L, 0x0000000000000000L), NEGATIVE_ZERO); + assertEquals(fromIEEE754BIDEncoding(0x7800000000000000L, 0x0000000000000000L), POSITIVE_INFINITY); + assertEquals(fromIEEE754BIDEncoding(0xf800000000000000L, 0x0000000000000000L), NEGATIVE_INFINITY); + assertEquals(fromIEEE754BIDEncoding(0x7c00000000000000L, 0x0000000000000000L), NaN); + } + + @Test + public void shouldConstructFromHighAndLow() { + // given + Decimal128 val = fromIEEE754BIDEncoding(0x3040000000000000L, 0x0000000000000001L); + + // then + assertEquals(0x3040000000000000L, val.getHigh()); + assertEquals(0x0000000000000001L, val.getLow()); + } + + @Test + public void shouldConstructFromSimpleString() { + // expect + assertEquals(fromIEEE754BIDEncoding(0x3040000000000000L, 0x0000000000000000L), parse("0")); + assertEquals(fromIEEE754BIDEncoding(0xb040000000000000L, 0x0000000000000000L), parse("-0")); + assertEquals(fromIEEE754BIDEncoding(0x3040000000000000L, 0x0000000000000001L), parse("1")); + assertEquals(fromIEEE754BIDEncoding(0xb040000000000000L, 0x0000000000000001L), parse("-1")); + assertEquals(fromIEEE754BIDEncoding(0x3040000000000000L, 0x002bdc545d6b4b87L), parse("12345678901234567")); + assertEquals(fromIEEE754BIDEncoding(0x3040000000000000L, 0x000000e67a93c822L), parse("989898983458")); + assertEquals(fromIEEE754BIDEncoding(0xb040000000000000L, 0x002bdc545d6b4b87L), parse("-12345678901234567")); + assertEquals(fromIEEE754BIDEncoding(0x3036000000000000L, 0x0000000000003039L), parse("0.12345")); + assertEquals(fromIEEE754BIDEncoding(0x3032000000000000L, 0x0000000000003039L), parse("0.0012345")); + assertEquals(fromIEEE754BIDEncoding(0x3040000000000000L, 0x002bdc545d6b4b87L), parse("00012345678901234567")); + } + + @Test + public void shouldRoundExactly() { + // expect + assertEquals(parse("1.234567890123456789012345678901234"), parse("1.234567890123456789012345678901234")); + assertEquals(parse("1.234567890123456789012345678901234"), parse("1.2345678901234567890123456789012340")); + assertEquals(parse("1.234567890123456789012345678901234"), parse("1.23456789012345678901234567890123400")); + assertEquals(parse("1.234567890123456789012345678901234"), parse("1.234567890123456789012345678901234000")); + } + + @Test + public void shouldClampPositiveExponents() { + // expect + assertEquals(parse("10E6111"), parse("1E6112")); + assertEquals(parse("100E6111"), parse("1E6113")); + assertEquals(parse("100000000000000000000000000000000E+6111"), parse("1E6143")); + assertEquals(parse("1000000000000000000000000000000000E+6111"), parse("1E6144")); + assertEquals(parse("1100000000000000000000000000000000E+6111"), parse("11E6143")); + assertEquals(parse("0E6111"), parse("0E8000")); + assertEquals(parse("0E6111"), parse("0E2147483647")); + + assertEquals(parse("-10E6111"), parse("-1E6112")); + assertEquals(parse("-100E6111"), parse("-1E6113")); + assertEquals(parse("-100000000000000000000000000000000E+6111"), parse("-1E6143")); + assertEquals(parse("-1000000000000000000000000000000000E+6111"), parse("-1E6144")); + assertEquals(parse("-1100000000000000000000000000000000E+6111"), parse("-11E6143")); + assertEquals(parse("-0E6111"), parse("-0E8000")); + assertEquals(parse("-0E6111"), parse("-0E2147483647")); + } + + @Test + public void shouldClampNegativeExponents() { + // expect + assertEquals(parse("0E-6176"), parse("0E-8000")); + assertEquals(parse("0E-6176"), parse("0E-2147483647")); + assertEquals(parse("1E-6176"), parse("10E-6177")); + assertEquals(parse("1E-6176"), parse("100E-6178")); + assertEquals(parse("11E-6176"), parse("110E-6177")); + + assertEquals(parse("-0E-6176"), parse("-0E-8000")); + assertEquals(parse("-0E-6176"), parse("-0E-2147483647")); + assertEquals(parse("-1E-6176"), parse("-10E-6177")); + assertEquals(parse("-1E-6176"), parse("-100E-6178")); + assertEquals(parse("-11E-6176"), parse("-110E-6177")); + } + + @Test + public void shouldConstructFromLong() { + // expect + assertEquals(new Decimal128(new BigDecimal("1")), new Decimal128(1L)); + assertEquals(new Decimal128(new BigDecimal(Long.MIN_VALUE)), new Decimal128(Long.MIN_VALUE)); + assertEquals(new Decimal128(new BigDecimal(Long.MAX_VALUE)), new Decimal128(Long.MAX_VALUE)); + } + + @Test + public void shouldConstructFromLargeBigDecimal() { + // expect + assertEquals(fromIEEE754BIDEncoding(0x304000000000029dL, 0x42da3a76f9e0d979L), parse("12345689012345789012345")); + assertEquals(fromIEEE754BIDEncoding(0x30403cde6fff9732L, 0xde825cd07e96aff2L), parse("1234567890123456789012345678901234")); + assertEquals(fromIEEE754BIDEncoding(0x5fffed09bead87c0L, 0x378d8e63ffffffffL), parse("9.999999999999999999999999999999999E+6144")); + assertEquals(fromIEEE754BIDEncoding(0x0001ed09bead87c0L, 0x378d8e63ffffffffL), parse("9.999999999999999999999999999999999E-6143")); + assertEquals(fromIEEE754BIDEncoding(0x3040ffffffffffffL, 0xffffffffffffffffL), parse("5.192296858534827628530496329220095E+33")); + } + + @Test + public void shouldConvertToSimpleBigDecimal() { + // expect + assertEquals(new BigDecimal("0"), fromIEEE754BIDEncoding(0x3040000000000000L, 0x0000000000000000L).bigDecimalValue()); + assertEquals(new BigDecimal("1"), fromIEEE754BIDEncoding(0x3040000000000000L, 0x0000000000000001L).bigDecimalValue()); + assertEquals(new BigDecimal("-1"), fromIEEE754BIDEncoding(0xb040000000000000L, 0x0000000000000001L).bigDecimalValue()); + assertEquals(new BigDecimal("12345678901234567"), + fromIEEE754BIDEncoding(0x3040000000000000L, 0x002bdc545d6b4b87L).bigDecimalValue()); + assertEquals(new BigDecimal("989898983458"), fromIEEE754BIDEncoding(0x3040000000000000L, 0x000000e67a93c822L).bigDecimalValue()); + assertEquals(new BigDecimal("-12345678901234567"), + fromIEEE754BIDEncoding(0xb040000000000000L, 0x002bdc545d6b4b87L).bigDecimalValue()); + assertEquals(new BigDecimal("0.12345"), fromIEEE754BIDEncoding(0x3036000000000000L, 0x0000000000003039L).bigDecimalValue()); + assertEquals(new BigDecimal("0.0012345"), fromIEEE754BIDEncoding(0x3032000000000000L, 0x0000000000003039L).bigDecimalValue()); + assertEquals(new BigDecimal("00012345678901234567"), + fromIEEE754BIDEncoding(0x3040000000000000L, 0x002bdc545d6b4b87L).bigDecimalValue()); + } + + @Test + public void shouldConvertToLargeBigDecimal() { + // expect + assertEquals(new BigDecimal("12345689012345789012345"), + fromIEEE754BIDEncoding(0x304000000000029dL, 0x42da3a76f9e0d979L).bigDecimalValue()); + + assertEquals(new BigDecimal("1234567890123456789012345678901234"), fromIEEE754BIDEncoding(0x30403cde6fff9732L, + 0xde825cd07e96aff2L).bigDecimalValue()); + + assertEquals(new BigDecimal("9.999999999999999999999999999999999E+6144"), + fromIEEE754BIDEncoding(0x5fffed09bead87c0L, 0x378d8e63ffffffffL).bigDecimalValue()); + + assertEquals(new BigDecimal("9.999999999999999999999999999999999E-6143"), + fromIEEE754BIDEncoding(0x0001ed09bead87c0L, 0x378d8e63ffffffffL).bigDecimalValue()); + + assertEquals(new BigDecimal("5.192296858534827628530496329220095E+33"), + fromIEEE754BIDEncoding(0x3040ffffffffffffL, 0xffffffffffffffffL).bigDecimalValue()); + } + + @Test + public void shouldConvertInvalidRepresentationsOfZeroAsBigDecimalZero() { + // expect + assertEquals(new BigDecimal("0"), fromIEEE754BIDEncoding(0x6C10000000000000L, 0x0).bigDecimalValue()); + assertEquals(new BigDecimal("0E+3"), fromIEEE754BIDEncoding(0x6C11FFFFFFFFFFFFL, 0xffffffffffffffffL).bigDecimalValue()); + } + + @Test + public void shouldDetectInfinity() { + // expect + assertTrue(POSITIVE_INFINITY.isInfinite()); + assertTrue(NEGATIVE_INFINITY.isInfinite()); + assertFalse(parse("0").isInfinite()); + assertFalse(parse("9.999999999999999999999999999999999E+6144").isInfinite()); + assertFalse(parse("9.999999999999999999999999999999999E-6143").isInfinite()); + assertFalse(POSITIVE_INFINITY.isFinite()); + assertFalse(NEGATIVE_INFINITY.isFinite()); + assertTrue(parse("0").isFinite()); + assertTrue(parse("9.999999999999999999999999999999999E+6144").isFinite()); + assertTrue(parse("9.999999999999999999999999999999999E-6143").isFinite()); + } + + @Test + public void shouldDetectNaN() { + // expect + assertTrue(NaN.isNaN()); + assertTrue(fromIEEE754BIDEncoding(0x7e00000000000000L, 0).isNaN()); // SNaN + assertFalse(POSITIVE_INFINITY.isNaN()); + assertFalse(NEGATIVE_INFINITY.isNaN()); + assertFalse(parse("0").isNaN()); + assertFalse(parse("9.999999999999999999999999999999999E+6144").isNaN()); + assertFalse(parse("9.999999999999999999999999999999999E-6143").isNaN()); + } + + @Test + public void shouldConvertNaNToString() { + // expect + assertEquals("NaN", NaN.toString()); + } + + @Test + public void shouldConvertNaNFromString() { + // expect + assertEquals(NaN, parse("NaN")); + assertEquals(NaN, parse("nan")); + assertEquals(NaN, parse("nAn")); + assertEquals(NEGATIVE_NaN, parse("-NaN")); + assertEquals(NEGATIVE_NaN, parse("-nan")); + assertEquals(NEGATIVE_NaN, parse("-nAn")); + } + + @Test + public void shouldNotConvertNaNToBigDecimal() { + assertThrows(ArithmeticException.class, () -> + // when + NaN.bigDecimalValue()); + } + + @Test + public void shouldConvertInfinityToString() { + // expect + assertEquals("Infinity", POSITIVE_INFINITY.toString()); + assertEquals("-Infinity", NEGATIVE_INFINITY.toString()); + } + + @Test + public void shouldConvertInfinityFromString() { + // expect + assertEquals(POSITIVE_INFINITY, parse("Inf")); + assertEquals(POSITIVE_INFINITY, parse("inf")); + assertEquals(POSITIVE_INFINITY, parse("inF")); + assertEquals(POSITIVE_INFINITY, parse("+Inf")); + assertEquals(POSITIVE_INFINITY, parse("+inf")); + assertEquals(POSITIVE_INFINITY, parse("+inF")); + assertEquals(POSITIVE_INFINITY, parse("Infinity")); + assertEquals(POSITIVE_INFINITY, parse("infinity")); + assertEquals(POSITIVE_INFINITY, parse("infiniTy")); + assertEquals(POSITIVE_INFINITY, parse("+Infinity")); + assertEquals(POSITIVE_INFINITY, parse("+infinity")); + assertEquals(POSITIVE_INFINITY, parse("+infiniTy")); + assertEquals(NEGATIVE_INFINITY, parse("-Inf")); + assertEquals(NEGATIVE_INFINITY, parse("-inf")); + assertEquals(NEGATIVE_INFINITY, parse("-inF")); + assertEquals(NEGATIVE_INFINITY, parse("-Infinity")); + assertEquals(NEGATIVE_INFINITY, parse("-infinity")); + assertEquals(NEGATIVE_INFINITY, parse("-infiniTy")); + } + + @Test + public void shouldConvertFiniteToString() { + // expect + assertEquals("0", parse("0").toString()); + assertEquals("-0", parse("-0").toString()); + assertEquals("0E+10", parse("0E10").toString()); + assertEquals("-0E+10", parse("-0E10").toString()); + assertEquals("1", parse("1").toString()); + assertEquals("-1", parse("-1").toString()); + assertEquals("-1.1", parse("-1.1").toString()); + + assertEquals("1.23E-7", parse("123E-9").toString()); + assertEquals("0.00000123", parse("123E-8").toString()); + assertEquals("0.0000123", parse("123E-7").toString()); + assertEquals("0.000123", parse("123E-6").toString()); + assertEquals("0.00123", parse("123E-5").toString()); + assertEquals("0.0123", parse("123E-4").toString()); + assertEquals("0.123", parse("123E-3").toString()); + assertEquals("1.23", parse("123E-2").toString()); + assertEquals("12.3", parse("123E-1").toString()); + assertEquals("123", parse("123E0").toString()); + assertEquals("1.23E+3", parse("123E1").toString()); + + assertEquals("0.0001234", parse("1234E-7").toString()); + assertEquals("0.001234", parse("1234E-6").toString()); + + assertEquals("1E+6", parse("1E6").toString()); + } + + @Test + public void shouldConvertInvalidRepresentationsOfZeroToString() { + // expect + assertEquals("0", fromIEEE754BIDEncoding(0x6C10000000000000L, 0x0).bigDecimalValue().toString()); + assertEquals("0E+3", fromIEEE754BIDEncoding(0x6C11FFFFFFFFFFFFL, 0xffffffffffffffffL).toString()); + } + + @Test + public void testEquals() { + // given + Decimal128 d1 = fromIEEE754BIDEncoding(0x3040000000000000L, 0x0000000000000001L); + Decimal128 d2 = fromIEEE754BIDEncoding(0x3040000000000000L, 0x0000000000000001L); + Decimal128 d3 = fromIEEE754BIDEncoding(0x3040000000000001L, 0x0000000000000001L); + Decimal128 d4 = fromIEEE754BIDEncoding(0x3040000000000000L, 0x0000000000000011L); + + // expect + assertEquals(d1, d1); + assertEquals(d1, d2); + assertNotEquals(d1, d3); + assertNotEquals(d1, d4); + assertNotEquals(null, d1); + assertNotEquals(0L, d1); + } + + @Test + public void testHashCode() { + // expect + assertEquals(809500703, fromIEEE754BIDEncoding(0x3040000000000000L, 0x0000000000000001L).hashCode()); + } + + @Test + public void shouldNotConvertPositiveInfinityToBigDecimal() { + assertThrows(ArithmeticException.class, () -> POSITIVE_INFINITY.bigDecimalValue()); + } + + @Test + public void shouldNotConvertNegativeInfinityToBigDecimal() { + assertThrows(ArithmeticException.class, () ->NEGATIVE_INFINITY.bigDecimalValue()); + } + + @Test + public void shouldNotConvertNegativeZeroToBigDecimal() { + try { + parse("-0").bigDecimalValue(); + fail(); + } catch (ArithmeticException e) { + // pass + } + + try { + parse("-0E+1").bigDecimalValue(); + fail(); + } catch (ArithmeticException e) { + // pass + } + + try { + parse("-0E-1").bigDecimalValue(); + fail(); + } catch (ArithmeticException e) { + // pass + } + } + + @Test + public void shouldNotRoundInexactly() { + try { + parse("12345678901234567890123456789012345E+6111"); + fail(); + } catch (IllegalArgumentException e) { + // pass + } + try { + parse("123456789012345678901234567890123456E+6111"); + fail(); + } catch (IllegalArgumentException e) { + // pass + } + try { + parse("1234567890123456789012345678901234567E+6111"); + fail(); + } catch (IllegalArgumentException e) { + // pass + } + try { + parse("12345678901234567890123456789012345E-6176"); + fail(); + } catch (IllegalArgumentException e) { + // pass + } + try { + parse("123456789012345678901234567890123456E-6176"); + fail(); + } catch (IllegalArgumentException e) { + // pass + } + try { + parse("1234567890123456789012345678901234567E-6176"); + fail(); + } catch (IllegalArgumentException e) { + // pass + } + try { + parse("-12345678901234567890123456789012345E+6111"); + fail(); + } catch (IllegalArgumentException e) { + // pass + } + try { + parse("-123456789012345678901234567890123456E+6111"); + fail(); + } catch (IllegalArgumentException e) { + // pass + } + try { + parse("-1234567890123456789012345678901234567E+6111"); + fail(); + } catch (IllegalArgumentException e) { + // pass + } + try { + parse("-12345678901234567890123456789012345E-6176"); + fail(); + } catch (IllegalArgumentException e) { + // pass + } + try { + parse("-123456789012345678901234567890123456E-6176"); + fail(); + } catch (IllegalArgumentException e) { + // pass + } + try { + parse("-1234567890123456789012345678901234567E-6176"); + fail(); + } catch (IllegalArgumentException e) { + // pass + } + } + + @Test + public void shouldNotClampLargeExponentsIfNoExtraPrecisionIsAvailable() { + try { + parse("1234567890123456789012345678901234E+6112"); + fail(); + } catch (IllegalArgumentException e) { + // pass + } + try { + parse("1234567890123456789012345678901234E+6113"); + fail(); + } catch (IllegalArgumentException e) { + // pass + } + try { + parse("1234567890123456789012345678901234E+6114"); + fail(); + } catch (IllegalArgumentException e) { + // pass + } + try { + parse("-1234567890123456789012345678901234E+6112"); + fail(); + } catch (IllegalArgumentException e) { + // pass + } + try { + parse("-1234567890123456789012345678901234E+6113"); + fail(); + } catch (IllegalArgumentException e) { + // pass + } + try { + parse("-1234567890123456789012345678901234E+6114"); + fail(); + } catch (IllegalArgumentException e) { + // pass + } + } + + @Test + public void shouldNotClampSmallExponentsIfNoExtraPrecisionCanBeDiscarded() { + try { + parse("1234567890123456789012345678901234E-6177"); + fail(); + } catch (IllegalArgumentException e) { + // pass + } + try { + parse("1234567890123456789012345678901234E-6178"); + fail(); + } catch (IllegalArgumentException e) { + // pass + } + try { + parse("1234567890123456789012345678901234E-6179"); + fail(); + } catch (IllegalArgumentException e) { + // pass + } + try { + parse("-1234567890123456789012345678901234E-6177"); + fail(); + } catch (IllegalArgumentException e) { + // pass + } + try { + parse("-1234567890123456789012345678901234E-6178"); + fail(); + } catch (IllegalArgumentException e) { + // pass + } + try { + parse("-1234567890123456789012345678901234E-6179"); + fail(); + } catch (IllegalArgumentException e) { + // pass + } + } + + @Test + public void shouldThrowIllegalArgumentExceptionIfBigDecimalIsTooLarge() { + assertThrows(IllegalArgumentException.class, () -> new Decimal128(new BigDecimal("12345678901234567890123456789012345"))); + } + + @Test + public void shouldExtendNumber() { + // expect + assertEquals(Double.POSITIVE_INFINITY, POSITIVE_INFINITY.doubleValue(), 0); + assertEquals(Float.POSITIVE_INFINITY, POSITIVE_INFINITY.floatValue(), 0); + assertEquals(Long.MAX_VALUE, POSITIVE_INFINITY.longValue()); + assertEquals(Integer.MAX_VALUE, POSITIVE_INFINITY.intValue()); + + assertEquals(Double.NEGATIVE_INFINITY, NEGATIVE_INFINITY.doubleValue(), 0); + assertEquals(Float.NEGATIVE_INFINITY, NEGATIVE_INFINITY.floatValue(), 0); + assertEquals(Long.MIN_VALUE, NEGATIVE_INFINITY.longValue()); + assertEquals(Integer.MIN_VALUE, NEGATIVE_INFINITY.intValue()); + + assertEquals(Double.NaN, NaN.doubleValue(), 0); + assertEquals(Double.NaN, NaN.floatValue(), 0); + assertEquals(0, NaN.longValue()); + assertEquals(0, NaN.intValue()); + + assertEquals(Double.NaN, NEGATIVE_NaN.doubleValue(), 0); + assertEquals(Float.NaN, NEGATIVE_NaN.floatValue(), 0); + assertEquals(0, NEGATIVE_NaN.longValue()); + assertEquals(0, NEGATIVE_NaN.intValue()); + + assertEquals(0.0d, POSITIVE_ZERO.doubleValue(), 0); + assertEquals(0.0f, POSITIVE_ZERO.floatValue(), 0); + assertEquals(0L, POSITIVE_ZERO.longValue()); + assertEquals(0, POSITIVE_ZERO.intValue()); + + assertEquals(NEGATIVE_ZERO.doubleValue(), -0d, 0); + assertEquals(NEGATIVE_ZERO.floatValue(), -0f, 0); + assertEquals(0L, NEGATIVE_ZERO.longValue()); + assertEquals(0, NEGATIVE_ZERO.intValue()); + + assertEquals(parse("-0.0").doubleValue(), -0d, 0); + assertEquals(parse("-0.0").floatValue(), -0f, 0); + assertEquals(0L, parse("-0.0").longValue()); + assertEquals(0, parse("-0.0").intValue()); + + assertEquals(5.4d, parse("5.4").doubleValue(), 0); + assertEquals(5.4f, parse("5.4").floatValue(), 0); + assertEquals(5L, parse("5.4").longValue()); + assertEquals(5, parse("5.4").intValue()); + + assertEquals(1.2345678901234568E33d, parse("1234567890123456789012345678901234").doubleValue(), 0); + assertEquals(1.2345679E33f, parse("1234567890123456789012345678901234").floatValue(), 0); + assertEquals(Long.MAX_VALUE, parse("1234567890123456789012345678901234").longValue()); + assertEquals(Integer.MAX_VALUE, parse("1234567890123456789012345678901234").intValue()); + + assertEquals(-1.2345678901234568E33d, parse("-1234567890123456789012345678901234").doubleValue(), 0); + assertEquals(-1.2345679E33f, parse("-1234567890123456789012345678901234").floatValue(), 0); + assertEquals(Long.MIN_VALUE, parse("-1234567890123456789012345678901234").longValue()); + assertEquals(Integer.MIN_VALUE, parse("-1234567890123456789012345678901234").intValue()); + } + + @Test + public void shouldImplementComparable() { + assertEquals(1, NaN.compareTo(NEGATIVE_ZERO)); + assertEquals(0, NaN.compareTo(NaN)); + assertEquals(1, NaN.compareTo(POSITIVE_INFINITY)); + assertEquals(1, NaN.compareTo(NEGATIVE_INFINITY)); + assertEquals(1, NaN.compareTo(parse("1"))); + assertEquals(1, POSITIVE_INFINITY.compareTo(NEGATIVE_INFINITY)); + assertEquals(0, POSITIVE_INFINITY.compareTo(POSITIVE_INFINITY)); + assertEquals(-1, POSITIVE_INFINITY.compareTo(NaN)); + assertEquals(1, POSITIVE_INFINITY.compareTo(NEGATIVE_ZERO)); + assertEquals(1, POSITIVE_INFINITY.compareTo(parse("1"))); + assertEquals(-1, NEGATIVE_INFINITY.compareTo(POSITIVE_INFINITY)); + assertEquals(0, NEGATIVE_INFINITY.compareTo(NEGATIVE_INFINITY)); + assertEquals(-1, NEGATIVE_INFINITY.compareTo(NaN)); + assertEquals(-1, NEGATIVE_INFINITY.compareTo(NEGATIVE_ZERO)); + assertEquals(-1, NEGATIVE_INFINITY.compareTo(parse("1"))); + assertEquals(-1, parse("1").compareTo(NaN)); + assertEquals(-1, parse("1").compareTo(POSITIVE_INFINITY)); + assertEquals(1, parse("1").compareTo(NEGATIVE_INFINITY)); + assertEquals(1, parse("1").compareTo(NEGATIVE_ZERO)); + assertEquals(-1, parse("-0").compareTo(parse("0"))); + assertEquals(0, parse("-0").compareTo(parse("-0"))); + assertEquals(-1, parse("-0").compareTo(NaN)); + assertEquals(-1, parse("-0").compareTo(POSITIVE_INFINITY)); + assertEquals(1, parse("-0").compareTo(NEGATIVE_INFINITY)); + assertEquals(1, parse("0").compareTo(parse("-0"))); + assertEquals(0, parse("0").compareTo(parse("0"))); + assertEquals(0, parse("5.4").compareTo(parse("5.4"))); + assertEquals(1, parse("5.4").compareTo(parse("5.3"))); + assertEquals(-1, parse("5.3").compareTo(parse("5.4"))); + assertEquals(0, parse("5.4").compareTo(parse("5.40"))); + } +} diff --git a/bson/src/test/unit/org/bson/types/DocumentSpecification.groovy b/bson/src/test/unit/org/bson/types/DocumentSpecification.groovy new file mode 100644 index 00000000000..1066edc5317 --- /dev/null +++ b/bson/src/test/unit/org/bson/types/DocumentSpecification.groovy @@ -0,0 +1,292 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + + +package org.bson.types + +import org.bson.BsonRegularExpression +import org.bson.Document +import org.bson.codecs.DocumentCodec +import org.bson.json.JsonParseException +import spock.lang.Specification + +class DocumentSpecification extends Specification { + + def 'should return correct type for each typed method'() { + given: + Date date = new Date() + ObjectId objectId = new ObjectId() + + when: + Document doc = new Document() + .append('int', 1).append('long', 2L).append('double', 3.0 as double).append('string', 'hi').append('boolean', true) + .append('objectId', objectId).append('date', date) + + then: + doc.getInteger('int') == 1 + doc.getInteger('intNoVal', 42) == 42 + doc.getLong('long') == 2L + doc.getDouble('double') == 3.0d + doc.getString('string') == 'hi' + doc.getBoolean('boolean') + doc.getBoolean('booleanNoVal', true) + doc.getObjectId('objectId') == objectId + doc.getDate('date') == date + + + doc.get('objectId', ObjectId) == objectId + doc.get('int', Integer) == 1 + doc.get('long', Long) == 2L + doc.get('double', Double) == 3.0d + doc.get('string', String) == 'hi' + doc.get('boolean', Boolean) + doc.get('date', Date) == date + + doc.get('noVal', 42L) == 42L + doc.get('noVal', 3.1d) == 3.1d + doc.get('noVal', 'defVal') == 'defVal' + doc.get('noVal', true) + doc.get('noVal', objectId) == objectId + doc.get('noVal', date) == date + doc.get('noVal', objectId) == objectId + } + + def 'should return a list with elements of the specified class'() { + when: + Document doc = Document.parse("{x: 1, y: ['two', 'three'], z: [{a: 'one'}, {b:2}], w: {a: ['One', 'Two']}}") + .append('numberList', [10, 20.5d, 30L]) + .append('listWithNullElement', [10, null, 20]) + List defaultList = ['a', 'b', 'c'] + + then: + doc.getList('y', String).get(0) == 'two' + doc.getList('y', String).get(1) == 'three' + doc.getList('z', Document).get(0).getString('a') == 'one' + doc.getList('z', Document).get(1).getInteger('b') == 2 + doc.get('w', Document).getList('a', String).get(0) == 'One' + doc.get('w', Document).getList('a', String).get(1) == 'Two' + doc.getList('invalidKey', Document, defaultList).get(0) == 'a' + doc.getList('invalidKey', Document, defaultList).get(1) == 'b' + doc.getList('invalidKey', Document, defaultList).get(2) == 'c' + doc.getList('numberList', Number).get(0) == 10 + doc.getList('numberList', Number).get(1) == 20.5d + doc.getList('numberList', Number).get(2) == 30L + doc.getList('listWithNullElement', Number).get(0) == 10 + doc.getList('listWithNullElement', Number).get(1) == null + doc.getList('listWithNullElement', Number).get(2) == 20 + } + + def 'should return null list when key is not found'() { + when: + Document doc = Document.parse('{x: 1}') + + then: + doc.getList('a', String) == null + } + + def 'should return specified default value when key is not found'() { + when: + Document doc = Document.parse('{x: 1}') + List defaultList = ['a', 'b', 'c'] + + then: + doc.getList('a', String, defaultList) == defaultList + } + + + def 'should throw an exception when the list elements are not objects of the specified class'() { + given: + Document doc = Document.parse('{x: 1, y: [{a: 1}, {b: 2}], z: [1, 2]}') + + when: + doc.getList('x', String) + + then: + thrown(ClassCastException) + + when: + doc.getList('y', String) + + then: + thrown(ClassCastException) + + when: + doc.getList('z', String) + + then: + thrown(ClassCastException) + } + + def 'should return null when getting embedded value'() { + when: + Document document = Document.parse("{a: 1, b: {x: [2, 3, 4], y: {m: 'one', len: 3}}, 'a.b': 'two'}") + + then: + document.getEmbedded(['notAKey'], String) == null + document.getEmbedded(['b', 'y', 'notAKey'], String) == null + document.getEmbedded(['b', 'b', 'm'], String) == null + Document.parse('{}').getEmbedded(['a', 'b'], Integer) == null + Document.parse('{b: 1}').getEmbedded(['a'], Integer) == null + Document.parse('{b: 1}').getEmbedded(['a', 'b'], Integer) == null + Document.parse('{a: {c: 1}}').getEmbedded(['a', 'b'], Integer) == null + Document.parse('{a: {c: 1}}').getEmbedded(['a', 'b', 'c'], Integer) == null + } + + def 'should return embedded value'() { + given: + Date date = new Date() + ObjectId objectId = new ObjectId() + + when: + Document document = Document.parse("{a: 1, b: {x: [2, 3, 4], y: {m: 'one', len: 3}}, 'a.b': 'two'}") + .append('l', new Document('long', 2L)) + .append('d', new Document('double', 3.0 as double)) + .append('t', new Document('boolean', true)) + .append('o', new Document('objectId', objectId)) + .append('n', new Document('date', date)) + + then: + document.getEmbedded(['a'], Integer) == 1 + document.getEmbedded(['b', 'x'], List).get(0) == 2 + document.getEmbedded(['b', 'x'], List).get(1) == 3 + document.getEmbedded(['b', 'x'], List).get(2) == 4 + document.getEmbedded(['b', 'y', 'm'], String) == 'one' + document.getEmbedded(['b', 'y', 'len'], Integer) == 3 + document.getEmbedded(['a.b'], String) == 'two' + document.getEmbedded(['b', 'y'], Document).getString('m') == 'one' + document.getEmbedded(['b', 'y'], Document).getInteger('len') == 3 + + document.getEmbedded(['l', 'long'], Long) == 2L + document.getEmbedded(['d', 'double'], Double) == 3.0d + document.getEmbedded(['l', 'long'], Number) == 2L + document.getEmbedded(['d', 'double'], Number) == 3.0d + document.getEmbedded(['t', 'boolean'], Boolean) == true + document.getEmbedded(['t', 'x'], false) == false + document.getEmbedded(['o', 'objectId'], ObjectId) == objectId + document.getEmbedded(['n', 'date'], Date) == date + } + + def 'should throw an exception getting an embedded value'() { + given: + Document document = Document.parse("{a: 1, b: {x: [2, 3, 4], y: {m: 'one', len: 3}}, 'a.b': 'two'}") + + when: + document.getEmbedded(null, String) == null + + then: + thrown(IllegalArgumentException) + + when: + document.getEmbedded([], String) == null + + then: + thrown(IllegalStateException) + + when: + document.getEmbedded(['a', 'b'], Integer) + + then: + thrown(ClassCastException) + + when: + document.getEmbedded(['b', 'y', 'm'], Integer) + + then: + thrown(ClassCastException) + + when: + document.getEmbedded(['b', 'x'], Document) + + then: + thrown(ClassCastException) + + when: + document.getEmbedded(['b', 'x', 'm'], String) + + then: + thrown(ClassCastException) + + when: + document.getEmbedded(['b', 'x', 'm'], 'invalid') + + then: + thrown(ClassCastException) + } + + def 'should parse a valid JSON string to a Document'() { + when: + Document document = Document.parse("{ 'int' : 1, 'string' : 'abc' }") + + then: + document != null + document.keySet().size() == 2 + document.getInteger('int') == 1 + document.getString('string') == 'abc' + + when: + document = Document.parse("{ 'int' : 1, 'string' : 'abc' }", new DocumentCodec()) + + then: + document != null + document.keySet().size() == 2 + document.getInteger('int') == 1 + document.getString('string') == 'abc' + } + + def 'test parse method with mode'() { + when: + Document document = Document.parse("{'regex' : /abc/im }") + + then: + document != null + document.keySet().size() == 1 + + BsonRegularExpression regularExpression = (BsonRegularExpression) document.get('regex') + regularExpression.options == 'im' + regularExpression.pattern == 'abc' + } + + def 'should throw an exception when parsing an invalid JSON String'() { + when: + Document.parse("{ 'int' : 1, 'string' : }") + + then: + thrown(JsonParseException) + } + + def 'should cast to correct type'() { + given: + Document document = new Document('str', 'a string') + + when: + String s = document.get('str', String) + + then: + s == document.get('str') + } + + def 'should throw ClassCastException when value is the wrong type'() { + given: + Document document = new Document('int', 'not an int') + + when: + document.get('int', Integer) + + then: + thrown(ClassCastException) + } +} diff --git a/bson/src/test/unit/org/bson/types/ObjectIdTest.java b/bson/src/test/unit/org/bson/types/ObjectIdTest.java new file mode 100644 index 00000000000..cfe04623b90 --- /dev/null +++ b/bson/src/test/unit/org/bson/types/ObjectIdTest.java @@ -0,0 +1,332 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.types; + +import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.MethodSource; + +import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.io.ObjectInputStream; +import java.io.ObjectOutputStream; +import java.nio.Buffer; +import java.nio.ByteBuffer; +import java.nio.ByteOrder; +import java.text.ParseException; +import java.text.SimpleDateFormat; +import java.util.ArrayList; +import java.util.Date; +import java.util.List; +import java.util.Locale; +import java.util.Random; + +import static org.junit.Assert.assertFalse; +import static org.junit.jupiter.api.Assertions.assertArrayEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; + +public class ObjectIdTest { + + /** Calls the base method of ByteBuffer.position(int) since the override is not available in jdk8. */ + private static ByteBuffer setPosition(final ByteBuffer buf, final int pos) { + ((Buffer) buf).position(pos); + return buf; + } + + /** + * MethodSource for valid ByteBuffers that can hold an ObjectID + */ + public static List validOutputBuffers() { + List result = new ArrayList<>(); + result.add(ByteBuffer.allocate(12)); + result.add(ByteBuffer.allocate(12).order(ByteOrder.LITTLE_ENDIAN)); + result.add(ByteBuffer.allocate(24).put(new byte[12])); + result.add(ByteBuffer.allocateDirect(12)); + result.add(ByteBuffer.allocateDirect(12).order(ByteOrder.LITTLE_ENDIAN)); + return result; + } + + @MethodSource("validOutputBuffers") + @ParameterizedTest + public void testToBytes(final ByteBuffer output) { + int originalPosition = output.position(); + ByteOrder originalOrder = output.order(); + byte[] expectedBytes = {81, 6, -4, -102, -68, -126, 55, 85, -127, 54, -46, -119}; + byte[] result = new byte[12]; + ObjectId objectId = new ObjectId(expectedBytes); + + assertArrayEquals(expectedBytes, objectId.toByteArray()); + + objectId.putToByteBuffer(output); + ((Buffer) output).position(output.position() - 12); + output.get(result); // read last 12 bytes leaving position intact + + assertArrayEquals(expectedBytes, result); + assertEquals(originalPosition + 12, output.position()); + assertEquals(originalOrder, output.order()); + } + + @Test + public void testFromBytes() { + + try { + new ObjectId((byte[]) null); + fail("Expected IllegalArgumentException"); + } catch (IllegalArgumentException e) { + assertEquals("bytes can not be null", e.getMessage()); + } + + try { + new ObjectId(new byte[11]); + fail("Expected IllegalArgumentException"); + } catch (IllegalArgumentException e) { + assertEquals("state should be: bytes has length of 12", e.getMessage()); + } + + try { + new ObjectId(new byte[13]); + fail("Expected IllegalArgumentException"); + } catch (IllegalArgumentException e) { + assertEquals("state should be: bytes has length of 12", e.getMessage()); + } + + byte[] bytes = {81, 6, -4, -102, -68, -126, 55, 85, -127, 54, -46, -119}; + + ObjectId objectId1 = new ObjectId(bytes); + assertEquals(0x5106FC9A, objectId1.getTimestamp()); + + ObjectId objectId2 = new ObjectId(ByteBuffer.wrap(bytes)); + assertEquals(0x5106FC9A, objectId2.getTimestamp()); + } + + @Test + public void testBytesRoundtrip() { + ObjectId expected = new ObjectId(); + ObjectId actual = new ObjectId(expected.toByteArray()); + assertEquals(expected, actual); + + byte[] b = new byte[12]; + Random r = new Random(17); + for (int i = 0; i < b.length; i++) { + b[i] = (byte) (r.nextInt()); + } + expected = new ObjectId(b); + assertEquals(expected, new ObjectId(expected.toByteArray())); + assertEquals("41d91c58988b09375cc1fe9f", expected.toString()); + } + + @Test + public void testGetSmallestWithDate() { + Date date = new Date(1588467737760L); + byte[] expectedBytes = {94, -82, 24, 25, 0, 0, 0, 0, 0, 0, 0, 0}; + ObjectId objectId = ObjectId.getSmallestWithDate(date); + assertArrayEquals(expectedBytes, objectId.toByteArray()); + assertEquals(date.getTime() / 1000 * 1000, objectId.getDate().getTime()); + assertEquals(-1, objectId.compareTo(new ObjectId(date))); + } + + @Test + public void testGetTimeZero() { + assertEquals(0L, new ObjectId(0, 0).getDate().getTime()); + } + + @Test + public void testGetTimeMaxSignedInt() { + assertEquals(0x7FFFFFFFL * 1000, new ObjectId(0x7FFFFFFF, 0).getDate().getTime()); + } + + @Test + public void testGetTimeMaxSignedIntPlusOne() { + assertEquals(0x80000000L * 1000, new ObjectId(0x80000000, 0).getDate().getTime()); + } + + @Test + public void testGetTimeMaxInt() { + assertEquals(0xFFFFFFFFL * 1000, new ObjectId(0xFFFFFFFF, 0).getDate().getTime()); + } + + @Test + public void testTime() { + long a = System.currentTimeMillis(); + long b = (new ObjectId()).getDate().getTime(); + assertTrue(Math.abs(b - a) < 3000); + } + + @Test + public void testDateConstructor() { + assertEquals(new Date().getTime() / 1000, new ObjectId(new Date()).getDate().getTime() / 1000); + assertNotEquals(new ObjectId(new Date(1_000)), new ObjectId(new Date(1_000))); + assertEquals("00000001", new ObjectId(new Date(1_000)).toHexString().substring(0, 8)); + } + + @Test + public void testDateConstructorWithCounter() { + assertEquals(new ObjectId(new Date(1_000), 1), new ObjectId(new Date(1_000), 1)); + assertEquals("00000001", new ObjectId(new Date(1_000), 1).toHexString().substring(0, 8)); + assertThrows(NullPointerException.class, () -> new ObjectId(null, Integer.MAX_VALUE)); + assertThrows(IllegalArgumentException.class, () -> new ObjectId(new Date(1_000), Integer.MAX_VALUE)); + } + + @Test + public void testTimestampConstructor() { + assertEquals(1_000, new ObjectId(1_000, 1).getTimestamp()); + assertEquals(new ObjectId(1_000, 1), new ObjectId(1_000, 1)); + assertEquals("7fffffff", new ObjectId(Integer.MAX_VALUE, 1).toHexString().substring(0, 8)); + assertThrows(IllegalArgumentException.class, () -> new ObjectId(Integer.MAX_VALUE, Integer.MAX_VALUE)); + } + + /** + * MethodSource for valid ByteBuffers containing an ObjectID at the current position. + */ + public static List validInputBuffers() { + byte[] data = new byte[12]; + for (byte i = 0; i < data.length; ++i) { + data[i] = i; + } + + List result = new ArrayList<>(); + result.add(ByteBuffer.wrap(data)); + result.add(ByteBuffer.wrap(data).order(ByteOrder.LITTLE_ENDIAN)); + result.add(setPosition(ByteBuffer.allocateDirect(data.length).put(data), 0)); + result.add(setPosition(ByteBuffer.allocateDirect(data.length).put(data).order(ByteOrder.LITTLE_ENDIAN), 0)); + result.add(setPosition(ByteBuffer.allocate(2 * data.length).put(data), 0)); + result.add(setPosition(ByteBuffer.allocate(2 * data.length).put(new byte[12]).put(data), 12)); + return result; + } + + @ParameterizedTest + @MethodSource(value = "validInputBuffers") + public void testByteBufferConstructor(final ByteBuffer input) { + ByteOrder order = input.order(); + int position = input.position(); + + byte[] result = new ObjectId(input).toByteArray(); + + assertArrayEquals(new byte[]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}, result); + assertEquals(order, input.order()); + assertEquals(position + 12, input.position()); + } + + @Test + public void testInvalidByteBufferConstructor() { + assertThrows(IllegalArgumentException.class, () -> new ObjectId((ByteBuffer) null)); + assertThrows(IllegalArgumentException.class, () -> new ObjectId(ByteBuffer.allocate(11))); + } + + @Test + public void testHexStringConstructor() { + ObjectId id = new ObjectId(); + assertEquals(id, new ObjectId(id.toHexString())); + assertEquals(id, new ObjectId(id.toHexString().toUpperCase(Locale.US))); + assertThrows(IllegalArgumentException.class, () -> new ObjectId((String) null)); + assertThrows(IllegalArgumentException.class, () -> new ObjectId(id.toHexString().substring(0, 23))); + assertThrows(IllegalArgumentException.class, () -> new ObjectId(id.toHexString().substring(0, 23) + '%')); + } + + @Test + public void testCompareTo() { + Date dateOne = new Date(); + Date dateTwo = new Date(dateOne.getTime() + 10000); + ObjectId first = new ObjectId(dateOne, 0); + ObjectId second = new ObjectId(dateOne, 1); + ObjectId third = new ObjectId(dateTwo, 0); + assertEquals(0, first.compareTo(first)); + assertEquals(-1, first.compareTo(second)); + assertEquals(-1, first.compareTo(third)); + assertEquals(1, second.compareTo(first)); + assertEquals(1, third.compareTo(first)); + assertThrows(NullPointerException.class, () -> first.compareTo(null)); + } + + @Test + public void testEquals() { + Date dateOne = new Date(); + Date dateTwo = new Date(dateOne.getTime() + 10000); + ObjectId first = new ObjectId(dateOne, 0); + ObjectId second = new ObjectId(dateOne, 1); + ObjectId third = new ObjectId(dateTwo, 0); + ObjectId fourth = new ObjectId(first.toByteArray()); + assertEquals(first, first); + assertEquals(first, fourth); + assertNotEquals(first, second); + assertNotEquals(first, third); + assertNotEquals(second, third); + assertFalse(first.equals(null)); + } + + @Test + public void testToHexString() { + assertEquals("000000000000000000000000", new ObjectId(new byte[12]).toHexString()); + assertEquals("7fffffff007fff7fff007fff", new ObjectId(new byte[]{127, -1, -1, -1, 0, 127, -1, 127, -1, 0, 127, -1}).toHexString()); + } + + private Date getDate(final String s) throws ParseException { + return new SimpleDateFormat("dd-MMM-yyyy HH:mm:ss Z").parse(s); + } + + @Test + public void testTimeZero() throws ParseException { + assertEquals(getDate("01-Jan-1970 00:00:00 -0000"), new ObjectId(0, 0).getDate()); + } + + @Test + public void testTimeMaxSignedInt() throws ParseException { + assertEquals(getDate("19-Jan-2038 03:14:07 -0000"), new ObjectId(0x7FFFFFFF, 0).getDate()); + } + + @Test + public void testTimeMaxSignedIntPlusOne() throws ParseException { + assertEquals(getDate("19-Jan-2038 03:14:08 -0000"), new ObjectId(0x80000000, 0).getDate()); + } + + @Test + public void testTimeMaxInt() throws ParseException { + assertEquals(getDate("07-Feb-2106 06:28:15 -0000"), new ObjectId(0xFFFFFFFF, 0).getDate()); + } + + @Test + public void testObjectSerialization() throws IOException, ClassNotFoundException { + // given + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + ObjectOutputStream oos = new ObjectOutputStream(baos); + ObjectId objectId = new ObjectId("5f8f4fcf27516f05e7eae5be"); + + // when + oos.writeObject(objectId); + + // then + assertTrue(baos.toString().contains("org.bson.types.ObjectId$SerializationProxy")); + assertArrayEquals(new byte[] {-84, -19, 0, 5, 115, 114, 0, 42, 111, 114, 103, 46, 98, 115, 111, 110, 46, 116, 121, 112, 101, 115, + 46, 79, 98, 106, 101, 99, 116, 73, 100, 36, 83, 101, 114, 105, 97, 108, 105, 122, 97, 116, 105, 111, 110, 80, 114, + 111, 120, 121, 0, 0, 0, 0, 0, 0, 0, 1, 2, 0, 1, 91, 0, 5, 98, 121, 116, 101, 115, 116, 0, 2, 91, 66, 120, 112, 117, + 114, 0, 2, 91, 66, -84, -13, 23, -8, 6, 8, 84, -32, 2, 0, 0, 120, 112, 0, 0, 0, 12, 95, -113, 79, -49, 39, 81, 111, + 5, -25, -22, -27, -66}, baos.toByteArray()); + + // when + ByteArrayInputStream bais = new ByteArrayInputStream(baos.toByteArray()); + ObjectInputStream ois = new ObjectInputStream(bais); + ObjectId deserializedObjectId = (ObjectId) ois.readObject(); + + // then + assertEquals(objectId, deserializedObjectId); + } +} diff --git a/bson/src/test/unit/org/bson/types/StringRangeSetSpecification.groovy b/bson/src/test/unit/org/bson/types/StringRangeSetSpecification.groovy new file mode 100644 index 00000000000..3f19df94b7b --- /dev/null +++ b/bson/src/test/unit/org/bson/types/StringRangeSetSpecification.groovy @@ -0,0 +1,187 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.types + +import spock.lang.Specification + +class StringRangeSetSpecification extends Specification { + + def 'should be empty if size is 0'() { + when: + def stringSet = new StringRangeSet(0) + + then: + stringSet.size() == 0 + stringSet.isEmpty() + } + + def 'should contain all strings between zero and size'() { + when: + def stringSet = new StringRangeSet(5) + + then: + stringSet.size() == 5 + !stringSet.contains('-1') + stringSet.contains('0') + stringSet.contains('1') + stringSet.contains('2') + stringSet.contains('3') + stringSet.contains('4') + !stringSet.contains('5') + stringSet.containsAll(['0', '1', '2', '3', '4']) + !stringSet.containsAll(['0', '1', '2', '3', '4', '5']) + } + + def 'should not contain integers'() { + when: + def stringSet = new StringRangeSet(5) + + then: + !stringSet.contains(0) + !stringSet.containsAll([0, 1, 2]) + } + + def 'should not contain strings that do not parse as integers'() { + when: + def stringSet = new StringRangeSet(5) + + then: + !stringSet.contains('foo') + !stringSet.containsAll(['foo', 'bar', 'baz']) + } + + def 'set should be ordered string representations of the range'() { + given: + def size = 2000 + def expectedKeys = [] + for (def i : (0..JSON-based tests that included in test resources. + */ +class BinaryVectorGenericBsonTest { + + private static final List TEST_NAMES_TO_IGNORE = Arrays.asList( + //NO API to set padding for floats available. + "FLOAT32 with padding", + //NO API to set padding for floats available. + "INT8 with padding", + //It is impossible to provide float inputs for INT8 in the API. + "INT8 with float inputs", + //It is impossible to provide float inputs for INT8. + "Underflow Vector PACKED_BIT", + //It is impossible to provide float inputs for PACKED_BIT in the API. + "Vector with float values PACKED_BIT", + //It is impossible to provide float inputs for INT8. + "Overflow Vector PACKED_BIT", + //It is impossible to overflow byte with values higher than 127 in the API. + "Overflow Vector INT8", + //It is impossible to underflow byte with values lower than -128 in the API. + "Underflow Vector INT8"); + + + @ParameterizedTest(name = "{0}") + @MethodSource("data") + void shouldPassAllOutcomes(@SuppressWarnings("unused") final String description, + final BsonDocument testDefinition, final BsonDocument testCase) { + assumeFalse(TEST_NAMES_TO_IGNORE.contains(testCase.get("description").asString().getValue())); + + String testKey = testDefinition.getString("test_key").getValue(); + boolean isValidVector = testCase.getBoolean("valid").getValue(); + if (isValidVector) { + runValidTestCase(testKey, testCase); + } else { + runInvalidTestCase(testCase); + } + } + + private static void runInvalidTestCase(final BsonDocument testCase) { + BsonArray arrayVector = testCase.getArray("vector"); + byte expectedPadding = (byte) testCase.getInt32("padding").getValue(); + byte dtypeByte = Byte.decode(testCase.getString("dtype_hex").getValue()); + BinaryVector.DataType expectedDType = determineVectorDType(dtypeByte); + + switch (expectedDType) { + case INT8: + byte[] expectedVectorData = toByteArray(arrayVector); + assertValidationException(assertThrows(RuntimeException.class, + () -> BinaryVector.int8Vector(expectedVectorData))); + break; + case PACKED_BIT: + byte[] expectedVectorPackedBitData = toByteArray(arrayVector); + assertValidationException(assertThrows(RuntimeException.class, + () -> BinaryVector.packedBitVector(expectedVectorPackedBitData, expectedPadding))); + break; + case FLOAT32: + float[] expectedFloatVector = toFloatArray(arrayVector); + assertValidationException(assertThrows(RuntimeException.class, () -> BinaryVector.floatVector(expectedFloatVector))); + break; + default: + throw new IllegalArgumentException("Unsupported vector data type: " + expectedDType); + } + } + + private static void runValidTestCase(final String testKey, final BsonDocument testCase) { + String description = testCase.getString("description").getValue(); + byte dtypeByte = Byte.decode(testCase.getString("dtype_hex").getValue()); + + byte expectedPadding = (byte) testCase.getInt32("padding").getValue(); + BinaryVector.DataType expectedDType = determineVectorDType(dtypeByte); + String expectedCanonicalBsonHex = testCase.getString("canonical_bson").getValue().toUpperCase(); + + BsonArray arrayVector = testCase.getArray("vector"); + BsonDocument actualDecodedDocument = decodeToDocument(expectedCanonicalBsonHex, description); + BinaryVector actualVector = actualDecodedDocument.getBinary("vector").asVector(); + + switch (expectedDType) { + case INT8: + byte[] expectedVectorData = toByteArray(arrayVector); + byte[] actualVectorData = actualVector.asInt8Vector().getData(); + assertVectorDecoding( + expectedVectorData, + expectedDType, + actualVectorData, + actualVector); + + assertThatVectorCreationResultsInCorrectBinary(BinaryVector.int8Vector(expectedVectorData), + testKey, + actualDecodedDocument, + expectedCanonicalBsonHex, + description); + break; + case PACKED_BIT: + PackedBitBinaryVector actualPackedBitVector = actualVector.asPackedBitVector(); + byte[] expectedVectorPackedBitData = toByteArray(arrayVector); + assertVectorDecoding( + expectedVectorPackedBitData, + expectedDType, expectedPadding, + actualPackedBitVector); + + assertThatVectorCreationResultsInCorrectBinary( + BinaryVector.packedBitVector(expectedVectorPackedBitData, expectedPadding), + testKey, + actualDecodedDocument, + expectedCanonicalBsonHex, + description); + break; + case FLOAT32: + Float32BinaryVector actualFloat32Vector = actualVector.asFloat32Vector(); + float[] expectedFloatVector = toFloatArray(arrayVector); + assertVectorDecoding( + expectedFloatVector, + expectedDType, + actualFloat32Vector); + assertThatVectorCreationResultsInCorrectBinary( + BinaryVector.floatVector(expectedFloatVector), + testKey, + actualDecodedDocument, + expectedCanonicalBsonHex, + description); + break; + default: + throw new IllegalArgumentException("Unsupported vector data type: " + expectedDType); + } + } + + private static void assertValidationException(final RuntimeException runtimeException) { + assertTrue(runtimeException instanceof IllegalArgumentException || runtimeException instanceof IllegalStateException); + } + + private static void assertThatVectorCreationResultsInCorrectBinary(final BinaryVector expectedVectorData, + final String testKey, + final BsonDocument actualDecodedDocument, + final String expectedCanonicalBsonHex, + final String description) { + BsonDocument documentToEncode = new BsonDocument(testKey, new BsonBinary(expectedVectorData)); + assertEquals(documentToEncode, actualDecodedDocument); + assertEquals(expectedCanonicalBsonHex, encodeToHex(documentToEncode), + format("Failed to create expected BSON for document with description '%s'", description)); + } + + private static void assertVectorDecoding(final byte[] expectedVectorData, + final BinaryVector.DataType expectedDType, + final byte[] actualVectorData, + final BinaryVector actualVector) { + Assertions.assertArrayEquals(actualVectorData, expectedVectorData, + () -> "Actual: " + Arrays.toString(actualVectorData) + " != Expected:" + Arrays.toString(expectedVectorData)); + assertEquals(expectedDType, actualVector.getDataType()); + } + + private static void assertVectorDecoding(final byte[] expectedVectorData, + final BinaryVector.DataType expectedDType, + final byte expectedPadding, + final PackedBitBinaryVector actualVector) { + byte[] actualVectorData = actualVector.getData(); + assertVectorDecoding( + expectedVectorData, + expectedDType, + actualVectorData, + actualVector); + assertEquals(expectedPadding, actualVector.getPadding()); + } + + private static void assertVectorDecoding(final float[] expectedVectorData, + final BinaryVector.DataType expectedDType, + final Float32BinaryVector actualVector) { + float[] actualVectorArray = actualVector.getData(); + Assertions.assertArrayEquals(actualVectorArray, expectedVectorData, + () -> "Actual: " + Arrays.toString(actualVectorArray) + " != Expected:" + Arrays.toString(expectedVectorData)); + assertEquals(expectedDType, actualVector.getDataType()); + } + + private static byte[] toByteArray(final BsonArray arrayVector) { + byte[] bytes = new byte[arrayVector.size()]; + for (int i = 0; i < arrayVector.size(); i++) { + bytes[i] = (byte) arrayVector.get(i).asInt32().getValue(); + } + return bytes; + } + + private static float[] toFloatArray(final BsonArray arrayVector) { + float[] floats = new float[arrayVector.size()]; + for (int i = 0; i < arrayVector.size(); i++) { + BsonValue bsonValue = arrayVector.get(i); + if (bsonValue.isString()) { + floats[i] = parseFloat(bsonValue.asString()); + } else { + floats[i] = (float) arrayVector.get(i).asDouble().getValue(); + } + } + return floats; + } + + private static float parseFloat(final BsonString bsonValue) { + String floatValue = bsonValue.getValue(); + switch (floatValue) { + case "-inf": + return Float.NEGATIVE_INFINITY; + case "inf": + return Float.POSITIVE_INFINITY; + default: + return Float.parseFloat(floatValue); + } + } + + private static Stream data() { + List data = new ArrayList<>(); + for (BsonDocument testDocument : JsonPoweredTestHelper.getTestDocuments("/bson-binary-vector")) { + for (BsonValue curValue : testDocument.getArray("tests", new BsonArray())) { + BsonDocument testCaseDocument = curValue.asDocument(); + data.add(Arguments.of(createTestCaseDescription(testDocument, testCaseDocument), testDocument, testCaseDocument)); + } + } + return data.stream(); + } + + private static String createTestCaseDescription(final BsonDocument testDocument, + final BsonDocument testCaseDocument) { + boolean isValidTestCase = testCaseDocument.getBoolean("valid").getValue(); + String fileDescription = testDocument.getString("description").getValue(); + String testDescription = testCaseDocument.getString("description").getValue(); + return "[Valid input: " + isValidTestCase + "] " + fileDescription + ": " + testDescription; + } +} diff --git a/bson/src/test/unit/util/GroovyHelpers.java b/bson/src/test/unit/util/GroovyHelpers.java new file mode 100644 index 00000000000..8319f7f9e16 --- /dev/null +++ b/bson/src/test/unit/util/GroovyHelpers.java @@ -0,0 +1,27 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package util; + +public final class GroovyHelpers { + // Workaround for the fact that Groovy will use its own custom equals method instead of calling the one on the instance. + public static boolean areEqual(final Object first, final Object second) { + return first.equals(second); + } + + private GroovyHelpers() { + } +} diff --git a/bson/src/test/unit/util/Hex.java b/bson/src/test/unit/util/Hex.java new file mode 100644 index 00000000000..3ea40ab39db --- /dev/null +++ b/bson/src/test/unit/util/Hex.java @@ -0,0 +1,53 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package util; + +public final class Hex { + public static byte[] decode(final String hex) { + if (hex.length() % 2 != 0) { + throw new IllegalArgumentException("A hex string must contain an even number of characters: " + hex); + } + + byte[] out = new byte[hex.length() / 2]; + + for (int i = 0; i < hex.length(); i += 2) { + int high = Character.digit(hex.charAt(i), 16); + int low = Character.digit(hex.charAt(i + 1), 16); + if (high == -1 || low == -1) { + throw new IllegalArgumentException("A hex string can only contain the characters 0-9, A-F, a-f: " + hex); + } + + out[i / 2] = (byte) (high * 16 + low); + } + + return out; + } + + private static final char[] UPPER_HEX_DIGITS = {'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'B', 'C', 'D', 'E', 'F', }; + + public static String encode(final byte[] bytes) { + StringBuilder stringBuilder = new StringBuilder(bytes.length * 2); + for (byte cur : bytes) { + stringBuilder.append(UPPER_HEX_DIGITS[(cur >> 4) & 0xF]); + stringBuilder.append(UPPER_HEX_DIGITS[(cur & 0xF)]); + } + return stringBuilder.toString(); + } + + private Hex() { + } +} diff --git a/bson/src/test/unit/util/JsonPoweredTestHelper.java b/bson/src/test/unit/util/JsonPoweredTestHelper.java new file mode 100644 index 00000000000..e261e132ab4 --- /dev/null +++ b/bson/src/test/unit/util/JsonPoweredTestHelper.java @@ -0,0 +1,137 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package util; + +import org.bson.BsonDocument; +import org.bson.BsonString; +import org.bson.BsonValue; +import org.bson.assertions.Assertions; + +import java.io.BufferedReader; +import java.io.IOException; +import java.io.InputStream; +import java.io.InputStreamReader; +import java.net.URI; +import java.net.URL; +import java.nio.charset.StandardCharsets; +import java.nio.file.FileSystem; +import java.nio.file.FileSystems; +import java.nio.file.FileVisitResult; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.nio.file.SimpleFileVisitor; +import java.nio.file.attribute.BasicFileAttributes; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.List; + +public final class JsonPoweredTestHelper { + + private static final String SPECIFICATIONS_PREFIX = "/specifications/source/"; + + public static BsonDocument getTestDocument(final String resourcePath) { + BsonDocument testDocument = getTestDocumentWithMetaData(SPECIFICATIONS_PREFIX + resourcePath); + testDocument.remove("resourcePath"); + testDocument.remove("fileName"); + return testDocument; + } + + public static Collection getTestData(final String resourcePath) { + List data = new ArrayList<>(); + for (BsonDocument document : getSpecTestDocuments(resourcePath)) { + for (BsonValue test : document.getArray("tests")) { + BsonDocument testDocument = test.asDocument(); + data.add(new Object[]{document.getString("fileName").getValue(), + testDocument.getString("description").getValue(), + testDocument.getString("uri", new BsonString("")).getValue(), + testDocument}); + } + } + return data; + } + + public static List getSpecTestDocuments(final String resourcePath) { + return getTestDocuments(SPECIFICATIONS_PREFIX + resourcePath); + } + + public static List getTestDocuments(final String resourcePath) { + List files = new ArrayList<>(); + try { + URL urlResource = JsonPoweredTestHelper.class.getResource(resourcePath); + if (urlResource == null) { + Assertions.fail("No such resource: " + resourcePath); + } + + URI resource = urlResource.toURI(); + try (FileSystem fileSystem = (resource.getScheme().equals("jar") ? FileSystems.newFileSystem(resource, Collections.emptyMap()) : null)) { + Path myPath = Paths.get(resource); + Files.walkFileTree(myPath, new SimpleFileVisitor() { + @Override + public FileVisitResult visitFile(final Path filePath, final BasicFileAttributes attrs) throws IOException { + if (filePath.toString().endsWith(".json")) { + if (fileSystem == null) { + files.add(getTestDocumentWithMetaData(filePath.toString().substring(filePath.toString().lastIndexOf(resourcePath)))); + } else { + files.add(getTestDocumentWithMetaData(filePath.toString())); + } + } + return super.visitFile(filePath, attrs); + } + }); + } + } catch (Exception e) { + Assertions.fail("Unable to load resource: " + resourcePath, e); + } + + if (files.isEmpty()) { + Assertions.fail("No test documents found in: " + resourcePath); + } + return files; + } + + private static BsonDocument getTestDocumentWithMetaData(final String resourcePath) { + BsonDocument testDocument = BsonDocument.parse(resourcePathToString(resourcePath)); + testDocument.append("resourcePath", new BsonString(resourcePath)) + .append("fileName", new BsonString(resourcePath.substring(resourcePath.lastIndexOf('/') + 1))); + return testDocument; + } + + private static String resourcePathToString(final String resourcePath) { + StringBuilder stringBuilder = new StringBuilder(); + String line; + String ls = System.lineSeparator(); + try (InputStream inputStream = JsonPoweredTestHelper.class.getResourceAsStream(resourcePath)) { + if (inputStream == null) { + Assertions.fail("Unable to load resource: " + resourcePath); + } + try (BufferedReader reader = new BufferedReader(new InputStreamReader(inputStream, StandardCharsets.UTF_8))) { + while ((line = reader.readLine()) != null) { + stringBuilder.append(line); + stringBuilder.append(ls); + } + } + } catch (Exception e) { + Assertions.fail("Unable to load resource", e); + } + return stringBuilder.toString(); + } + + private JsonPoweredTestHelper() { + } +} diff --git a/bson/src/test/unit/util/ThreadTestHelpers.java b/bson/src/test/unit/util/ThreadTestHelpers.java new file mode 100644 index 00000000000..e2115da079f --- /dev/null +++ b/bson/src/test/unit/util/ThreadTestHelpers.java @@ -0,0 +1,71 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package util; + +import org.opentest4j.MultipleFailuresError; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; + +public final class ThreadTestHelpers { + + private ThreadTestHelpers() { + } + + public static void executeAll(final int nThreads, final Runnable c) { + executeAll(Collections.nCopies(nThreads, c).toArray(new Runnable[0])); + } + + public static void executeAll(final Runnable... runnables) { + ExecutorService service = null; + try { + service = Executors.newFixedThreadPool(runnables.length); + CountDownLatch latch = new CountDownLatch(runnables.length); + List failures = Collections.synchronizedList(new ArrayList<>()); + for (final Runnable runnable : runnables) { + service.submit(() -> { + try { + runnable.run(); + } catch (Throwable e) { + failures.add(e); + } finally { + latch.countDown(); + } + }); + } + try { + latch.await(); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + throw new RuntimeException(e); + } + if (!failures.isEmpty()) { + MultipleFailuresError multipleFailuresError = new MultipleFailuresError("Failed to execute all", failures); + failures.forEach(multipleFailuresError::addSuppressed); + throw multipleFailuresError; + } + } finally { + if (service != null) { + service.shutdown(); + } + } + } +} diff --git a/build.gradle.kts b/build.gradle.kts new file mode 100644 index 00000000000..3112e2c59b9 --- /dev/null +++ b/build.gradle.kts @@ -0,0 +1,49 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +import java.time.Duration + +plugins { + id("eclipse") + id("idea") + alias(libs.plugins.nexus.publish) +} + +val nexusUsername: Provider = providers.gradleProperty("nexusUsername") +val nexusPassword: Provider = providers.gradleProperty("nexusPassword") + +nexusPublishing { + packageGroup.set("org.mongodb") + repositories { + sonatype { + username.set(nexusUsername) + password.set(nexusPassword) + + // central portal URLs + nexusUrl.set(uri("https://ossrh-staging-api.central.sonatype.com/service/local/")) + snapshotRepositoryUrl.set(uri("https://central.sonatype.com/repository/maven-snapshots/")) + } + } + + connectTimeout.set(Duration.ofMinutes(5)) + clientTimeout.set(Duration.ofMinutes(30)) + + transitionCheckOptions { + // We have many artifacts and Maven Central can take a long time on its compliance checks. + // Set the timeout for waiting for the repository to close to a comfortable 50 minutes. + maxRetries.set(300) + delayBetween.set(Duration.ofSeconds(10)) + } +} diff --git a/build.properties b/build.properties deleted file mode 100644 index a41535ce2f4..00000000000 --- a/build.properties +++ /dev/null @@ -1,27 +0,0 @@ -# -# Copyright 2011, 10gen -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at: -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -javac.source=1.5 - -# IMPORTANT: MAKE SURE YOU CHANGE BOTH lib.version AND lib.version.osi.compat, ACCORDING TO THIS PATTERN -# lib.version=2.8.0-SNAPSHOT ==> lib.version.osgi.compat=2.8.0.BUILD-SNAPSHOT -# lib.version=2.8.0-RC1 ==> lib.version.osgi.compat=2.8.0.RC1 -# lib.version=2.8.0 ==> lib.version.osgi.compat=2.8.0.RELEASE -lib.version=2.12.0-SNAPSHOT -lib.version.osgi.compat=2.12.0.BUILD-SNAPSHOT - -compatibility.baseline.version=2.11.0 -url.libbase=http://driver-downloads.mongodb.org/java diff --git a/build.xml b/build.xml deleted file mode 100644 index 1a2651304fe..00000000000 --- a/build.xml +++ /dev/null @@ -1,382 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/buildSrc/build.gradle.kts b/buildSrc/build.gradle.kts new file mode 100644 index 00000000000..33d758d0753 --- /dev/null +++ b/buildSrc/build.gradle.kts @@ -0,0 +1,69 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +plugins { + id("java-library") + `kotlin-dsl` + alias(libs.plugins.spotless) + alias(libs.plugins.detekt) apply false +} + +repositories { + gradlePluginPortal() + mavenCentral() + google() +} + +// Spotless configuration for `buildSrc` code. +spotless { + kotlinGradle { + target("**/*.gradle.kts") + ktfmt("0.39").dropboxStyle().configure { + it.setMaxWidth(120) + it.setRemoveUnusedImport(true) + } + trimTrailingWhitespace() + indentWithSpaces() + endWithNewline() + licenseHeaderFile( + "../config/mongodb.license", "(package|group|plugins|import|buildscript|rootProject|@Suppress)") + } + + kotlin { + target("**/*.kt") + ktfmt().dropboxStyle().configure { + it.setMaxWidth(120) + it.setRemoveUnusedImport(true) + } + trimTrailingWhitespace() + indentWithSpaces() + endWithNewline() + licenseHeaderFile(rootProject.file("../config/mongodb.license")) + } + + java { + palantirJavaFormat() + target("src/*/java/**/*.java") + removeUnusedImports() + trimTrailingWhitespace() + indentWithSpaces() + endWithNewline() + licenseHeaderFile(rootProject.file("../config/mongodb.license")) + } +} + +java { toolchain { languageVersion.set(JavaLanguageVersion.of("17")) } } + +tasks.findByName("check")?.dependsOn("spotlessCheck") diff --git a/buildSrc/settings.gradle.kts b/buildSrc/settings.gradle.kts new file mode 100644 index 00000000000..832331d3e3e --- /dev/null +++ b/buildSrc/settings.gradle.kts @@ -0,0 +1,21 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +plugins { + // Add support for `libs.versions.toml` within `buildSrc` + // https://github.com/radoslaw-panuszewski/typesafe-conventions-gradle-plugin + // https://github.com/gradle/gradle/issues/15383 + id("dev.panuszewski.typesafe-conventions") version "0.7.3" +} diff --git a/buildSrc/src/main/java/com/mongodb/doclet/AtlasManualTaglet.java b/buildSrc/src/main/java/com/mongodb/doclet/AtlasManualTaglet.java new file mode 100644 index 00000000000..673b55a6bf6 --- /dev/null +++ b/buildSrc/src/main/java/com/mongodb/doclet/AtlasManualTaglet.java @@ -0,0 +1,33 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.doclet; + +public final class AtlasManualTaglet extends DocTaglet { + @Override + public String getName() { + return "mongodb.atlas.manual"; + } + + @Override + protected String getHeader() { + return "MongoDB Atlas documentation"; + } + + @Override + protected String getBaseDocURI() { + return "https://www.mongodb.com/docs/atlas/"; + } +} diff --git a/buildSrc/src/main/java/com/mongodb/doclet/DocTaglet.java b/buildSrc/src/main/java/com/mongodb/doclet/DocTaglet.java new file mode 100644 index 00000000000..0f51f45f197 --- /dev/null +++ b/buildSrc/src/main/java/com/mongodb/doclet/DocTaglet.java @@ -0,0 +1,77 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.doclet; + +import static java.util.Arrays.asList; +import static jdk.javadoc.doclet.Taglet.Location.CONSTRUCTOR; +import static jdk.javadoc.doclet.Taglet.Location.FIELD; +import static jdk.javadoc.doclet.Taglet.Location.METHOD; +import static jdk.javadoc.doclet.Taglet.Location.OVERVIEW; +import static jdk.javadoc.doclet.Taglet.Location.PACKAGE; +import static jdk.javadoc.doclet.Taglet.Location.TYPE; + +import com.sun.source.doctree.DocTree; +import com.sun.source.doctree.UnknownBlockTagTree; +import java.util.HashSet; +import java.util.List; +import java.util.Set; +import javax.lang.model.element.Element; +import jdk.javadoc.doclet.Taglet; + +public abstract class DocTaglet implements Taglet { + + @Override + public Set getAllowedLocations() { + return new HashSet<>(asList(CONSTRUCTOR, METHOD, FIELD, OVERVIEW, PACKAGE, TYPE)); + } + + @Override + public boolean isInlineTag() { + return false; + } + + @Override + public String toString(List tags, Element element) { + if (tags.size() == 0) { + return null; + } + + StringBuilder buf = + new StringBuilder(String.format("
%s
", getHeader())); + for (DocTree tag : tags) { + String text = ((UnknownBlockTagTree) tag).getContent().get(0).toString(); + buf.append("
").append(genLink(text)).append("
"); + } + return buf.toString(); + } + + protected String genLink(final String text) { + String relativePath = text; + String display = text; + + int firstSpace = text.indexOf(' '); + if (firstSpace != -1) { + relativePath = text.substring(0, firstSpace); + display = text.substring(firstSpace).trim(); + } + + return String.format("%s", getBaseDocURI(), relativePath, display); + } + + protected abstract String getHeader(); + + protected abstract String getBaseDocURI(); +} diff --git a/buildSrc/src/main/java/com/mongodb/doclet/DochubTaglet.java b/buildSrc/src/main/java/com/mongodb/doclet/DochubTaglet.java new file mode 100644 index 00000000000..a6b960eaa27 --- /dev/null +++ b/buildSrc/src/main/java/com/mongodb/doclet/DochubTaglet.java @@ -0,0 +1,34 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.doclet; + +public class DochubTaglet extends DocTaglet { + + @Override + public String getName() { + return "mongodb.driver.dochub"; + } + + @Override + protected String getHeader() { + return "MongoDB documentation"; + } + + @Override + protected String getBaseDocURI() { + return "https://dochub.mongodb.org/"; + } +} diff --git a/buildSrc/src/main/java/com/mongodb/doclet/ManualTaglet.java b/buildSrc/src/main/java/com/mongodb/doclet/ManualTaglet.java new file mode 100644 index 00000000000..ff49c9ab37c --- /dev/null +++ b/buildSrc/src/main/java/com/mongodb/doclet/ManualTaglet.java @@ -0,0 +1,34 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.doclet; + +public class ManualTaglet extends DocTaglet { + + @Override + public String getName() { + return "mongodb.driver.manual"; + } + + @Override + protected String getHeader() { + return "MongoDB documentation"; + } + + @Override + protected String getBaseDocURI() { + return "https://www.mongodb.com/docs/manual/"; + } +} diff --git a/buildSrc/src/main/java/com/mongodb/doclet/ServerReleaseTaglet.java b/buildSrc/src/main/java/com/mongodb/doclet/ServerReleaseTaglet.java new file mode 100644 index 00000000000..9b4f88fbf92 --- /dev/null +++ b/buildSrc/src/main/java/com/mongodb/doclet/ServerReleaseTaglet.java @@ -0,0 +1,34 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.doclet; + +public class ServerReleaseTaglet extends DocTaglet { + + @Override + public String getName() { + return "mongodb.server.release"; + } + + @Override + protected String getHeader() { + return "Since server release"; + } + + @Override + protected String getBaseDocURI() { + return "https://www.mongodb.com/docs/manual/release-notes/"; + } +} diff --git a/buildSrc/src/main/kotlin/ProjectExtensions.kt b/buildSrc/src/main/kotlin/ProjectExtensions.kt new file mode 100644 index 00000000000..a369aefc9d2 --- /dev/null +++ b/buildSrc/src/main/kotlin/ProjectExtensions.kt @@ -0,0 +1,56 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +import org.gradle.api.Project +import org.gradle.api.java.archives.Manifest +import org.gradle.api.publish.maven.MavenPublication +import org.gradle.api.tasks.bundling.Jar +import org.gradle.kotlin.dsl.named +import org.gradle.kotlin.dsl.withType + +object ProjectExtensions { + + /** + * Extension function to get and validate the current scala version + * + * See: gradle.properties for `supportedScalaVersions` and `defaultScalaVersion` + */ + fun Project.scalaVersion(): String { + val supportedScalaVersions = (project.property("supportedScalaVersions") as String).split(",") + val scalaVersion: String = + (project.findProperty("scalaVersion") ?: project.property("defaultScalaVersion")) as String + + if (!supportedScalaVersions.contains(scalaVersion)) { + throw UnsupportedOperationException( + """Scala version: $scalaVersion is not a supported scala version. + |Supported versions: $supportedScalaVersions + """ + .trimMargin()) + } + + return scalaVersion + } + + /** Extension function to configure the maven publication */ + fun Project.configureMavenPublication(configure: MavenPublication.() -> Unit = {}) { + val publishing = extensions.getByName("publishing") as org.gradle.api.publish.PublishingExtension + publishing.publications.named("maven") { configure() } + } + + /** Extension function to configure the jars manifest */ + fun Project.configureJarManifest(configure: Manifest.() -> Unit = {}) { + tasks.withType { manifest { afterEvaluate { configure() } } } + } +} diff --git a/buildSrc/src/main/kotlin/conventions/Companion.kt b/buildSrc/src/main/kotlin/conventions/Companion.kt new file mode 100644 index 00000000000..c28eef2a080 --- /dev/null +++ b/buildSrc/src/main/kotlin/conventions/Companion.kt @@ -0,0 +1,29 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package conventions + +import org.gradle.accessors.dm.LibrariesForLibs +import org.gradle.api.Project +import org.gradle.kotlin.dsl.getByType + +// Adds the `libs` value for use in conventions +internal val Project.libs: LibrariesForLibs + get() = extensions.getByType() + +/** Extension function to determine if a project property has been set. */ +fun Project.buildingWith(name: String): Boolean { + return this.findProperty(name)?.toString()?.toBoolean() ?: false +} diff --git a/buildSrc/src/main/kotlin/conventions/bnd.gradle.kts b/buildSrc/src/main/kotlin/conventions/bnd.gradle.kts new file mode 100644 index 00000000000..bbea4bf9878 --- /dev/null +++ b/buildSrc/src/main/kotlin/conventions/bnd.gradle.kts @@ -0,0 +1,23 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package conventions + +import libs + +// Gradle Plugin for developing OSGi bundles with Bnd. +// https://plugins.gradle.org/plugin/biz.aQute.bnd.builder + +plugins { alias(libs.plugins.bnd) } diff --git a/buildSrc/src/main/kotlin/conventions/codenarc.gradle.kts b/buildSrc/src/main/kotlin/conventions/codenarc.gradle.kts new file mode 100644 index 00000000000..25cd5c00cc8 --- /dev/null +++ b/buildSrc/src/main/kotlin/conventions/codenarc.gradle.kts @@ -0,0 +1,25 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package conventions + +// The CodeNarc plugin performs quality checks on your project’s Groovy source files +// https://docs.gradle.org/current/userguide/codenarc_plugin.html +plugins { id("codenarc") } + +codenarc { + toolVersion = "1.6.1" + reportFormat = if (project.buildingWith("xmlReports.enabled")) "xml" else "html" +} diff --git a/buildSrc/src/main/kotlin/conventions/detekt.gradle.kts b/buildSrc/src/main/kotlin/conventions/detekt.gradle.kts new file mode 100644 index 00000000000..4759138904a --- /dev/null +++ b/buildSrc/src/main/kotlin/conventions/detekt.gradle.kts @@ -0,0 +1,45 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package conventions + +import io.gitlab.arturbosch.detekt.Detekt +import libs + +// Static code analysis for Kotlin +// https://plugins.gradle.org/plugin/io.gitlab.arturbosch.detekt +plugins { alias(libs.plugins.detekt) } + +detekt { + allRules = true // fail build on any finding + buildUponDefaultConfig = true // preconfigure defaults + config = rootProject.files("config/detekt/detekt.yml") // point to your custom config defining rules to run, + // overwriting default behavior + baseline = rootProject.file("config/detekt/baseline.xml") // a way of suppressing issues before introducing detekt + source = + files( + file("src/main/kotlin"), + file("src/test/kotlin"), + file("src/integrationTest/kotlin"), + ) +} + +tasks.withType().configureEach { + reports { + html.required.set(true) // observe findings in your browser with structure and code snippets + xml.required.set(true) // checkstyle like format mainly for integrations like Jenkins + txt.required.set(false) // similar to the console output, contains issue signature to manually edit + } +} diff --git a/buildSrc/src/main/kotlin/conventions/dokka.gradle.kts b/buildSrc/src/main/kotlin/conventions/dokka.gradle.kts new file mode 100644 index 00000000000..06b40161697 --- /dev/null +++ b/buildSrc/src/main/kotlin/conventions/dokka.gradle.kts @@ -0,0 +1,49 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package conventions + +import libs + +// Dokka, the documentation engine for Kotlin +// https://plugins.gradle.org/plugin/org.jetbrains.dokka +plugins { + alias(libs.plugins.dokka) + id("conventions.publishing") +} + +// Create a generic `docs` task +tasks.register("docs") { + group = "documentation" + dependsOn("dokkaHtml") +} + +val dokkaOutputDir: Provider = rootProject.layout.buildDirectory.dir("docs/${base.archivesName.get()}") + +tasks.dokkaHtml.configure { + outputDirectory.set(dokkaOutputDir.get().asFile) + moduleName.set(base.archivesName.get()) +} + +val cleanDokka by tasks.register("cleanDokka") { delete(dokkaOutputDir) } + +// Ensure dokka is used for the javadoc +afterEvaluate { + tasks.named("javadocJar").configure { + dependsOn("cleanDokka", "dokkaHtml") + archiveClassifier.set("javadoc") + from(dokkaOutputDir) + } +} diff --git a/buildSrc/src/main/kotlin/conventions/git-version.gradle.kts b/buildSrc/src/main/kotlin/conventions/git-version.gradle.kts new file mode 100644 index 00000000000..9ddfd25cab2 --- /dev/null +++ b/buildSrc/src/main/kotlin/conventions/git-version.gradle.kts @@ -0,0 +1,35 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package conventions + +// Provides the current git version for the build + +val gitVersion: Provider = + providers + .exec { + commandLine("git", "describe", "--tags", "--always", "--dirty") + isIgnoreExitValue = true + } + .standardOutput + .asText + .map { it.trim().removePrefix("r") } + .orElse("UNKNOWN") + +// Allows access to gitVersion extension to other conventions +extensions.add("gitVersion", gitVersion) + +// Debug task that outputs the gitVersion. +tasks.register("gitVersion") { doLast { println("Git version: ${gitVersion.get()}") } } diff --git a/buildSrc/src/main/kotlin/conventions/javadoc.gradle.kts b/buildSrc/src/main/kotlin/conventions/javadoc.gradle.kts new file mode 100644 index 00000000000..8ab2ef5bb5b --- /dev/null +++ b/buildSrc/src/main/kotlin/conventions/javadoc.gradle.kts @@ -0,0 +1,125 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package conventions + +// Provides the Javadoc configuration for the build +plugins { + id("java-library") + id("maven-publish") +} + +// Create a generic `docs` task +tasks.register("docs") { + group = "documentation" + dependsOn("javadoc") +} + +tasks.withType { + exclude("**/com/mongodb/**/assertions/**") + exclude("**/com/mongodb/**/internal/**") + exclude("**/org/bson/**/internal/**") + + setDestinationDir(rootProject.file("build/docs/${project.base.archivesName.get()}")) + + val standardDocletOptions = options as StandardJavadocDocletOptions + standardDocletOptions.apply { + author(true) + version(true) + links = + listOf( + "https://docs.oracle.com/en/java/javase/11/docs/api/", + "https://www.reactive-streams.org/reactive-streams-1.0.3-javadoc/") + tagletPath(rootProject.projectDir.resolve("buildSrc/build/classes/java/main")) + taglets("com.mongodb.doclet.AtlasManualTaglet") + taglets("com.mongodb.doclet.ManualTaglet") + taglets("com.mongodb.doclet.DochubTaglet") + taglets("com.mongodb.doclet.ServerReleaseTaglet") + encoding = "UTF-8" + charSet("UTF-8") + docEncoding("UTF-8") + addBooleanOption("html5", true) + addBooleanOption("-allow-script-in-comments", true) + header( + """ + + """.trimIndent()) + } + + // Customizations for specific projects + afterEvaluate { + val docVersion = docVersion(project.version as String) + if (project.name != "bson") linksOfflineHelper(docVersion, "bson", standardDocletOptions) + if (!project.name.contains("bson") && project.name != "mongodb-driver-core") + linksOfflineHelper(docVersion, "mongodb-driver-core", standardDocletOptions) + if (!project.name.contains("bson") && project.name != "mongodb-driver-sync") + linksOfflineHelper(docVersion, "mongodb-driver-sync", standardDocletOptions) + } +} + +// Helper functions +internal fun docVersion(version: String): String { + val (major, minor, patch) = version.split("-").first().split(".").map { it.toInt() } + var docVersion = "${major}.${minor}" + if (version.contains("-SNAPSHOT") && patch == 0 && minor > 0) { + docVersion = "${major}.${minor - 1}" + } + return docVersion +} + +internal fun linksOfflineHelper(docVersion: String, packageName: String, options: StandardJavadocDocletOptions): Unit { + val docsPath = rootProject.file("build/docs/${packageName}") + if (docsPath.exists()) { + options.apply { + linksOffline( + "http://mongodb.github.io/mongo-java-driver/${docVersion}/apidocs/${packageName}/", docsPath.path) + } + } +} diff --git a/buildSrc/src/main/kotlin/conventions/optional.gradle.kts b/buildSrc/src/main/kotlin/conventions/optional.gradle.kts new file mode 100644 index 00000000000..1bf10321971 --- /dev/null +++ b/buildSrc/src/main/kotlin/conventions/optional.gradle.kts @@ -0,0 +1,33 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package conventions + +// Provides the optional dependencies support eg: optionalApi, optionalImplementation +plugins { + id("java-library") + id("maven-publish") +} + +java { registerFeature("optional") { usingSourceSet(sourceSets["main"]) } } + +// Suppress POM warnings for the optional features (eg: optionalApi, optionalImplementation) +afterEvaluate { + configurations + .filter { it.name.startsWith("optional") } + .forEach { optional -> + publishing.publications.named("maven") { suppressPomMetadataWarningsFor(optional.name) } + } +} diff --git a/buildSrc/src/main/kotlin/conventions/publishing.gradle.kts b/buildSrc/src/main/kotlin/conventions/publishing.gradle.kts new file mode 100644 index 00000000000..b243ce7df2e --- /dev/null +++ b/buildSrc/src/main/kotlin/conventions/publishing.gradle.kts @@ -0,0 +1,160 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package conventions + +// Provides the publishing configuration for the build +// +// Note: Further configuration can be achieved using the `project.configureMavenPublication` and +// `project.configureJarManifest` helpers. +// See: `ProjectExtensions.kt` for more information +plugins { + id("conventions.git-version") + id("maven-publish") + id("signing") +} + +val signingKey: Provider = providers.gradleProperty("signingKey") +val signingPassword: Provider = providers.gradleProperty("signingPassword") +@Suppress("UNCHECKED_CAST") val gitVersion: Provider = project.findProperty("gitVersion") as Provider + +tasks.withType().configureEach { + // Gradle warns about some signing tasks using publishing task outputs without explicit + // dependencies. Here's a quick fix. + dependsOn(tasks.withType()) + mustRunAfter(tasks.withType()) + + doLast { + logger.lifecycle("[task: ${name}] ${publication.groupId}:${publication.artifactId}:${publication.version}") + } +} + +val localBuildRepo: Provider = rootProject.layout.buildDirectory.dir("repo") + +publishing { + repositories { + + // publish to local dir, for artifact tracking and testing + // `./gradlew publishMavenPublicationToLocalBuildRepository` + maven { + url = uri(localBuildRepo.get()) + name = "LocalBuild" + } + } + + publications.create("maven") { + components.findByName("java")?.let { from(it) } + + pom { + url.set("https://www.mongodb.com/") + scm { + url.set("https://github.com/mongodb/mongo-java-driver") + connection.set("scm:https://github.com/mongodb/mongo-java-driver.git") + developerConnection.set("scm:https://github.com/mongodb/mongo-java-driver.git") + } + + developers { + developer { + name.set("Various") + organization.set("MongoDB") + } + } + + licenses { + license { + name.set("The Apache License, Version 2.0") + url.set("https://www.apache.org/licenses/LICENSE-2.0.txt") + } + } + } + + // Ensure get the final set `base.archivesName` not the default one (project name). + afterEvaluate { artifactId = base.archivesName.get() } + } +} + +tasks.withType { + manifest { attributes["-exportcontents"] = "*;-noimport:=true" } + + afterEvaluate { + manifest { + if (attributes.containsKey("-nomanifest")) { + attributes.remove("-exportcontents") + } else { + attributes["Bundle-Version"] = project.version + attributes["Bundle-SymbolicName"] = + "${project.findProperty("group")}.${project.findProperty("archivesBaseName")}" + attributes["Build-Version"] = gitVersion.get() + attributes["Bundle-Name"] = base.archivesName.get() + } + } + } +} + +signing { + if (signingKey.isPresent && signingPassword.isPresent) { + logger.info("[${project.displayName}] Signing is enabled") + useInMemoryPgpKeys(signingKey.get(), signingPassword.get()) + sign(publishing.publications["maven"]) + } else { + logger.info("[${project.displayName}] No Signing keys found, skipping signing configuration") + } +} + +tasks.named("clean") { delete.add(localBuildRepo) } + +tasks.withType { enabled = false } + +tasks.register("publishSnapshots") { + group = "publishing" + description = "Publishes snapshots to Sonatype" + + if (version.toString().endsWith("-SNAPSHOT")) { + dependsOn(tasks.named("publishAllPublicationsToLocalBuildRepository")) + dependsOn(tasks.named("publishToSonatype")) + } +} + +tasks.register("publishArchives") { + group = "publishing" + description = "Publishes a release and uploads to Sonatype / Maven Central" + + val currentGitVersion = gitVersion.get() + val gitVersionMatch = currentGitVersion == version + doFirst { + if (!gitVersionMatch) { + val cause = + """ + Version mismatch: + ================= + + $version != $currentGitVersion + + The project version does not match the git tag. + """.trimMargin() + throw GradleException(cause) + } else { + println("Publishing: ${project.name} : $currentGitVersion") + } + } + if (gitVersionMatch) { + dependsOn(tasks.named("publishAllPublicationsToLocalBuildRepository")) + dependsOn(tasks.named("publishToSonatype")) + } +} + +// workaround for https://github.com/gradle/gradle/issues/16543 +inline fun TaskContainer.provider(taskName: String): Provider = + providers.provider { taskName }.flatMap { named(it) } diff --git a/buildSrc/src/main/kotlin/conventions/scaladoc.gradle.kts b/buildSrc/src/main/kotlin/conventions/scaladoc.gradle.kts new file mode 100644 index 00000000000..b14d9573e72 --- /dev/null +++ b/buildSrc/src/main/kotlin/conventions/scaladoc.gradle.kts @@ -0,0 +1,34 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package conventions + +// Provides the scaladoc configuration for the build +plugins { + id("scala") + id("conventions.publishing") +} + +// Create a generic `docs` task +tasks.register("docs") { + group = "documentation" + dependsOn("scaladoc") +} + +tasks.withType { + group = "documentation" + + destinationDir = rootProject.file("build/docs/${project.base.archivesName.get()}") +} diff --git a/buildSrc/src/main/kotlin/conventions/spotbugs.gradle.kts b/buildSrc/src/main/kotlin/conventions/spotbugs.gradle.kts new file mode 100644 index 00000000000..e7ea096fc33 --- /dev/null +++ b/buildSrc/src/main/kotlin/conventions/spotbugs.gradle.kts @@ -0,0 +1,53 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package conventions + +import com.github.spotbugs.snom.SpotBugsTask +import libs +import org.gradle.kotlin.dsl.dependencies + +// Performs quality checks on your project's Java source files using SpotBug +// https://plugins.gradle.org/plugin/com.github.spotbugs +plugins { + id("java-library") + alias(libs.plugins.spotbugs) +} + +dependencies { + compileOnly(libs.findbugs.jsr) + + testImplementation(libs.findbugs.jsr) +} + +spotbugs { + if (!project.buildingWith("ssdlcReport.enabled")) { + excludeFilter.set(rootProject.file("config/spotbugs/exclude.xml")) + } +} + +tasks.withType().configureEach { + if (name == "spotbugsMain") { + reports { + register("xml") { required.set(project.buildingWith("xmlReports.enabled")) } + register("html") { required.set(!project.buildingWith("xmlReports.enabled")) } + register("sarif") { required.set(project.buildingWith("ssdlcReport.enabled")) } + } + } else if (name == "spotbugsTest") { + enabled = false + } else if (name == "spotbugsIntegrationTest") { + enabled = false + } +} diff --git a/buildSrc/src/main/kotlin/conventions/spotless.gradle.kts b/buildSrc/src/main/kotlin/conventions/spotless.gradle.kts new file mode 100644 index 00000000000..7a148f57735 --- /dev/null +++ b/buildSrc/src/main/kotlin/conventions/spotless.gradle.kts @@ -0,0 +1,70 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package conventions + +import com.diffplug.gradle.spotless.SpotlessApply +import com.diffplug.gradle.spotless.SpotlessCheck +import libs + +// Spotless - keep your code spotless +// https://plugins.gradle.org/plugin/com.diffplug.spotless +plugins { alias(libs.plugins.spotless) } + +val doesNotHaveACustomLicenseHeader = "/^(?s)(?!.*@custom-license-header).*/" + +spotless { + kotlinGradle { + ktfmt("0.39").dropboxStyle().configure { it.setMaxWidth(120) } + trimTrailingWhitespace() + indentWithSpaces() + endWithNewline() + licenseHeaderFile(rootProject.file("config/mongodb.license"), "(group|plugins|import|buildscript|rootProject)") + } + + scala { + target("**/*.scala") + scalafmt().configFile(rootProject.file("config/scala/scalafmt.conf")) + } + + kotlin { + target("**/*.kt") + ktfmt().dropboxStyle().configure { it.setMaxWidth(120) } + trimTrailingWhitespace() + indentWithSpaces() + endWithNewline() + licenseHeaderFile(rootProject.file("config/mongodb.license")) + .named("standard") + .onlyIfContentMatches(doesNotHaveACustomLicenseHeader) + } + + format("extraneous") { + target("*.xml", "*.yml", "*.md") + trimTrailingWhitespace() + indentWithSpaces() + endWithNewline() + } +} + +tasks.named("check") { dependsOn("spotlessApply") } + +tasks { + withType().configureEach { + notCompatibleWithConfigurationCache("https://github.com/diffplug/spotless/issues/644") + } + withType().configureEach { + notCompatibleWithConfigurationCache("https://github.com/diffplug/spotless/issues/644") + } +} diff --git a/buildSrc/src/main/kotlin/conventions/test-artifacts-runtime-dependencies.gradle.kts b/buildSrc/src/main/kotlin/conventions/test-artifacts-runtime-dependencies.gradle.kts new file mode 100644 index 00000000000..73b2b891faa --- /dev/null +++ b/buildSrc/src/main/kotlin/conventions/test-artifacts-runtime-dependencies.gradle.kts @@ -0,0 +1,27 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package conventions + +plugins { id("java-library") } + +// Also include test runtime dependencies +dependencies { + testRuntimeClasspath(platform(libs.netty.bom)) + testRuntimeClasspath(libs.netty.tcnative.boringssl.static) + listOf("linux-x86_64", "linux-aarch_64", "osx-x86_64", "osx-aarch_64", "windows-x86_64").forEach { arch -> + testRuntimeClasspath(variantOf(libs.netty.tcnative.boringssl.static) { classifier(arch) }) + } +} diff --git a/buildSrc/src/main/kotlin/conventions/test-artifacts.gradle.kts b/buildSrc/src/main/kotlin/conventions/test-artifacts.gradle.kts new file mode 100644 index 00000000000..f82a88c7df9 --- /dev/null +++ b/buildSrc/src/main/kotlin/conventions/test-artifacts.gradle.kts @@ -0,0 +1,43 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package conventions + +import project.DEFAULT_JAVA_VERSION + +// Allows test artifacts (resources and code) to be shared between projects +plugins { id("java-library") } + +/** Create a test artifact configuration so that test resources can be consumed by other projects. */ +val testArtifacts by configurations.creating +val testJar by + tasks.registering(Jar::class) { + archiveBaseName.set("${project.name}-test") + from(sourceSets.test.get().output) + setDuplicatesStrategy(DuplicatesStrategy.EXCLUDE) + } + +val testJavaVersion: Int = findProperty("javaVersion")?.toString()?.toInt() ?: DEFAULT_JAVA_VERSION + +tasks.withType() { + mustRunAfter(testJar) + + // Needed for OidcAuthenticationProseTests calls `field.setAccessible(true)` + if (testJavaVersion >= DEFAULT_JAVA_VERSION) { + jvmArgs("--add-opens=java.base/java.lang=ALL-UNNAMED") + } +} + +artifacts { add("testArtifacts", testJar) } diff --git a/buildSrc/src/main/kotlin/conventions/test-include-optionals.gradle.kts b/buildSrc/src/main/kotlin/conventions/test-include-optionals.gradle.kts new file mode 100644 index 00000000000..e7fde0b4c0f --- /dev/null +++ b/buildSrc/src/main/kotlin/conventions/test-include-optionals.gradle.kts @@ -0,0 +1,37 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package conventions + +import org.gradle.kotlin.dsl.dependencies +import org.gradle.kotlin.dsl.project + +// Adds common optional dependencies to the testImplementations +dependencies { + + // Encryption testing + "testImplementation"(project(path = ":mongodb-crypt", configuration = "default")) + + // Netty stream type testing + "testImplementation"(platform(libs.netty.bom)) + "testImplementation"(libs.bundles.netty) + + // Snappy / zstd testing + "testImplementation"(libs.snappy.java) + "testImplementation"(libs.zstd.jni) + + // Socket testing + "testImplementation"(libs.jnr.unixsocket) +} diff --git a/buildSrc/src/main/kotlin/conventions/testing-base.gradle.kts b/buildSrc/src/main/kotlin/conventions/testing-base.gradle.kts new file mode 100644 index 00000000000..4708c742d40 --- /dev/null +++ b/buildSrc/src/main/kotlin/conventions/testing-base.gradle.kts @@ -0,0 +1,110 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package conventions + +import com.adarshr.gradle.testlogger.theme.ThemeType +import libs +import project.DEFAULT_JAVA_VERSION + +// Default test configuration for projects +// +// Utilizes the test-logger plugin: +// https://plugins.gradle.org/plugin/com.adarshr.test-logger +plugins { + id("java-library") + alias(libs.plugins.test.logger) +} + +tasks.withType { + maxHeapSize = "4g" + maxParallelForks = 1 + + useJUnitPlatform() + + jvmArgs.add("-Dio.netty.leakDetection.level=paranoid") + + // Pass any `org.mongodb.*` system settings + systemProperties = + System.getProperties() + .map { (key, value) -> Pair(key.toString(), value) } + .filter { it.first.startsWith("org.mongodb.") } + .toMap() + + // Convert any ssl based properties + if (project.buildingWith("ssl.enabled")) { + if (project.hasProperty("ssl.keyStoreType")) { + systemProperties( + mapOf( + "javax.net.ssl.keyStoreType" to project.property("ssl.keyStoreType"), + "javax.net.ssl.keyStore" to project.property("ssl.keyStore"), + "javax.net.ssl.keyStorePassword" to project.property("ssl.keyStorePassword"))) + } + if (project.hasProperty("ssl.trustStoreType")) { + systemProperties( + mapOf( + "javax.net.ssl.trustStoreType" to project.property("ssl.trustStoreType"), + "javax.net.ssl.trustStore" to project.property("ssl.trustStore"), + "javax.net.ssl.trustStorePassword" to project.property("ssl.trustStorePassword"))) + } + if (project.hasProperty("ocsp.property")) { + systemProperties( + mapOf( + "org.mongodb.test.ocsp.tls.should.succeed" to project.property("ocsp.tls.should.succeed"), + "java.security.properties" to file(project.property("ocsp.property").toString()), + "com.sun.net.ssl.checkRevocation" to project.property("ssl.checkRevocation"), + "jdk.tls.client.enableStatusRequestExtension" to + project.property("client.enableStatusRequestExtension"), + "jdk.tls.client.protocols" to project.property("client.protocols"))) + } + } + + // Convert gssapi properties + if (project.buildingWith("gssapi.enabled")) { + systemProperties( + mapOf( + "sun.security.krb5.debug" to project.property("sun.security.krb5.debug"), + "javax.security.auth.useSubjectCredsOnly" to "false", + "java.security.krb5.kdc" to project.property("krb5.kdc"), + "java.security.krb5.realm" to project.property("krb5.realm"), + "java.security.auth.login.config" to project.property("auth.login.config"), + )) + } + + // Allow testing with an alternative JDK version + val testJavaVersion: Int = findProperty("javaVersion")?.toString()?.toInt() ?: DEFAULT_JAVA_VERSION + javaLauncher.set(javaToolchains.launcherFor { languageVersion = JavaLanguageVersion.of(testJavaVersion) }) +} + +// Pretty test output +testlogger { + theme = ThemeType.STANDARD + showExceptions = true + showStackTraces = true + showFullStackTraces = false + showCauses = true + slowThreshold = 2000 + showSummary = true + showSimpleNames = false + showPassed = true + showSkipped = true + showFailed = true + showOnlySlow = false + showStandardStreams = false + showPassedStandardStreams = true + showSkippedStandardStreams = true + showFailedStandardStreams = true + logLevel = LogLevel.LIFECYCLE +} diff --git a/buildSrc/src/main/kotlin/conventions/testing-integration.gradle.kts b/buildSrc/src/main/kotlin/conventions/testing-integration.gradle.kts new file mode 100644 index 00000000000..bdd30028b18 --- /dev/null +++ b/buildSrc/src/main/kotlin/conventions/testing-integration.gradle.kts @@ -0,0 +1,50 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package conventions + +// Adds separate `integrationTest` configuration to a project +// Allows unit and integrations tests to be separate tasks +// +// See: +// https://docs.gradle.org/current/samples/sample_jvm_multi_project_with_additional_test_types.html +plugins { id("java-library") } + +val integrationTest by sourceSets.creating + +configurations[integrationTest.implementationConfigurationName].extendsFrom(configurations.testImplementation.get()) + +configurations[integrationTest.runtimeOnlyConfigurationName].extendsFrom(configurations.testRuntimeOnly.get()) + +val integrationTestTask = + tasks.register("integrationTest") { + description = "Runs integration tests." + group = "verification" + useJUnitPlatform() + + testClassesDirs = integrationTest.output.classesDirs + classpath = configurations[integrationTest.runtimeClasspathConfigurationName] + integrationTest.output + shouldRunAfter(tasks.test) + } + +tasks.findByName("check")?.dependsOn(integrationTestTask) + +dependencies { + "integrationTestImplementation"(project) + "integrationTestImplementation"(platform(libs.junit.bom)) + "integrationTestImplementation"(libs.bundles.junit.vintage) +} + +sourceSets["integrationTest"].java.srcDirs("src/integrationTest", "src/integrationTest/java") diff --git a/buildSrc/src/main/kotlin/conventions/testing-junit-vintage.gradle.kts b/buildSrc/src/main/kotlin/conventions/testing-junit-vintage.gradle.kts new file mode 100644 index 00000000000..48f6eee92eb --- /dev/null +++ b/buildSrc/src/main/kotlin/conventions/testing-junit-vintage.gradle.kts @@ -0,0 +1,26 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package conventions + +// Default junit vintage (aka junit4) test configuration for projects +plugins { id("conventions.testing-base") } + +dependencies { + testImplementation(platform(libs.junit.bom)) + testImplementation(libs.bundles.junit.vintage) +} + +sourceSets["test"].java { srcDirs("src/test", "src/test/unit", "src/test/functional") } diff --git a/buildSrc/src/main/kotlin/conventions/testing-junit.gradle.kts b/buildSrc/src/main/kotlin/conventions/testing-junit.gradle.kts new file mode 100644 index 00000000000..7e72c5101bf --- /dev/null +++ b/buildSrc/src/main/kotlin/conventions/testing-junit.gradle.kts @@ -0,0 +1,26 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package conventions + +// Default junit test configuration for projects +plugins { id("conventions.testing-base") } + +dependencies { + testImplementation(platform(libs.junit.bom)) + testImplementation(libs.bundles.junit) +} + +sourceSets["test"].java { srcDirs("src/test", "src/test/unit", "src/test/functional") } diff --git a/buildSrc/src/main/kotlin/conventions/testing-mockito.gradle.kts b/buildSrc/src/main/kotlin/conventions/testing-mockito.gradle.kts new file mode 100644 index 00000000000..08c33262e9e --- /dev/null +++ b/buildSrc/src/main/kotlin/conventions/testing-mockito.gradle.kts @@ -0,0 +1,27 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package conventions + +// Adds mockito support to a project +plugins { id("java-library") } + +dependencies { + if (project.findProperty("javaVersion")?.toString().equals("8")) { + testImplementation(libs.bundles.mockito.java8) + } else { + testImplementation(libs.bundles.mockito) + } +} diff --git a/buildSrc/src/main/kotlin/conventions/testing-spock-exclude-slow.gradle.kts b/buildSrc/src/main/kotlin/conventions/testing-spock-exclude-slow.gradle.kts new file mode 100644 index 00000000000..706bca27e8c --- /dev/null +++ b/buildSrc/src/main/kotlin/conventions/testing-spock-exclude-slow.gradle.kts @@ -0,0 +1,36 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package conventions + +import org.gradle.api.tasks.testing.Test +import org.gradle.kotlin.dsl.withType + +// Adds groovy spock testing framework support +// See: https://spockframework.org/ +plugins { id("conventions.testing-spock") } + +tasks.withType().configureEach { + exclude("examples/**") + useJUnitPlatform { excludeTags("Slow") } + + systemProperty("spock.configuration", "${rootProject.file("config/spock/ExcludeSlow.groovy")}") +} + +tasks.register("testSlowOnly", Test::class.java) { + useJUnitPlatform { includeTags("Slow") } + + systemProperty("spock.configuration", "${rootProject.file("config/spock/OnlySlow.groovy")}") +} diff --git a/buildSrc/src/main/kotlin/conventions/testing-spock.gradle.kts b/buildSrc/src/main/kotlin/conventions/testing-spock.gradle.kts new file mode 100644 index 00000000000..e4d46007856 --- /dev/null +++ b/buildSrc/src/main/kotlin/conventions/testing-spock.gradle.kts @@ -0,0 +1,42 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package conventions + +import org.gradle.kotlin.dsl.dependencies +import project.libs + +// Adds groovy spock testing framework support +// See: https://spockframework.org/ +plugins { + id("groovy") + id("conventions.codenarc") + id("conventions.testing-base") + id("conventions.testing-junit-vintage") +} + +dependencies { + testImplementation(platform(libs.spock.bom)) + testImplementation(libs.bundles.spock) +} + +sourceSets { + test { + groovy { srcDirs("src/test", "src/test/unit", "src/test/functional", "src/examples") } + + // Disable java src directories - groovy will compile the mixed java and groovy test code + java { setSrcDirs(emptyList()) } + } +} diff --git a/buildSrc/src/main/kotlin/project/Companion.kt b/buildSrc/src/main/kotlin/project/Companion.kt new file mode 100644 index 00000000000..b4b9650031a --- /dev/null +++ b/buildSrc/src/main/kotlin/project/Companion.kt @@ -0,0 +1,26 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package project + +import org.gradle.accessors.dm.LibrariesForLibs +import org.gradle.api.Project +import org.gradle.kotlin.dsl.getByType + +// Adds the `libs` value for use in project +internal val Project.libs: LibrariesForLibs + get() = extensions.getByType() + +internal const val DEFAULT_JAVA_VERSION = 17 diff --git a/buildSrc/src/main/kotlin/project/base.gradle.kts b/buildSrc/src/main/kotlin/project/base.gradle.kts new file mode 100644 index 00000000000..ed13c40cb76 --- /dev/null +++ b/buildSrc/src/main/kotlin/project/base.gradle.kts @@ -0,0 +1,26 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package project + +plugins { id("conventions.git-version") } + +group = "org.mongodb" + +repositories { + mavenLocal() + google() + mavenCentral() +} diff --git a/buildSrc/src/main/kotlin/project/java.gradle.kts b/buildSrc/src/main/kotlin/project/java.gradle.kts new file mode 100644 index 00000000000..60861167f17 --- /dev/null +++ b/buildSrc/src/main/kotlin/project/java.gradle.kts @@ -0,0 +1,50 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package project + +plugins { + id("java-library") + id("checkstyle") + id("project.base") + id("conventions.bnd") + id("conventions.javadoc") + id("conventions.optional") + id("conventions.publishing") + id("conventions.spotbugs") + id("conventions.spotless") + id("conventions.testing-junit") +} + +dependencies { "optionalApi"(libs.slf4j) } + +logger.info("Compiling ${project.name} using JDK${DEFAULT_JAVA_VERSION}") + +java { + sourceCompatibility = JavaVersion.VERSION_1_8 + targetCompatibility = JavaVersion.VERSION_1_8 + + toolchain { languageVersion = JavaLanguageVersion.of(DEFAULT_JAVA_VERSION) } + + withSourcesJar() + withJavadocJar() +} + +tasks.withType { + options.encoding = "UTF-8" + options.release.set(8) +} + +sourceSets["main"].java { srcDir("src/main") } diff --git a/buildSrc/src/main/kotlin/project/kotlin.gradle.kts b/buildSrc/src/main/kotlin/project/kotlin.gradle.kts new file mode 100644 index 00000000000..a0f53e0ad28 --- /dev/null +++ b/buildSrc/src/main/kotlin/project/kotlin.gradle.kts @@ -0,0 +1,68 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package project + +import libs +import org.jetbrains.kotlin.gradle.dsl.JvmTarget +import org.jetbrains.kotlin.gradle.tasks.KotlinJvmCompile + +plugins { + alias(libs.plugins.kotlin.gradle) + id("project.base") + id("conventions.bnd") + id("conventions.detekt") + id("conventions.dokka") + id("conventions.optional") + id("conventions.publishing") + id("conventions.spotbugs") + id("conventions.spotless") + id("conventions.testing-integration") + id("conventions.testing-junit") +} + +/* Compiling */ +logger.info("Compiling ${project.name} using JDK${DEFAULT_JAVA_VERSION}") + +kotlin { + explicitApi() + jvmToolchain(DEFAULT_JAVA_VERSION) +} + +tasks.withType { compilerOptions { jvmTarget = JvmTarget.JVM_1_8 } } + +java { + sourceCompatibility = JavaVersion.VERSION_1_8 + targetCompatibility = JavaVersion.VERSION_1_8 + + withSourcesJar() + withJavadocJar() +} + +dependencies { + "optionalApi"(libs.slf4j) + + // Align versions of all Kotlin components + implementation(platform(libs.kotlin.bom)) + implementation(libs.kotlin.stdlib.jdk8) + + testImplementation(libs.kotlin.reflect) + testImplementation(libs.junit.kotlin) + testImplementation(libs.bundles.mockito.kotlin) + testImplementation(libs.assertj) + testImplementation(libs.classgraph) + + "integrationTestImplementation"(libs.junit.kotlin) +} diff --git a/buildSrc/src/main/kotlin/project/scala.gradle.kts b/buildSrc/src/main/kotlin/project/scala.gradle.kts new file mode 100644 index 00000000000..ff5918ae695 --- /dev/null +++ b/buildSrc/src/main/kotlin/project/scala.gradle.kts @@ -0,0 +1,113 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package project + +import ProjectExtensions.configureMavenPublication +import ProjectExtensions.scalaVersion + +plugins { + id("scala") + id("project.base") + id("conventions.bnd") + id("conventions.optional") + id("conventions.publishing") + id("conventions.scaladoc") + id("conventions.spotless") + id("conventions.testing-junit") + id("conventions.testing-integration") +} + +group = "org.mongodb.scala" + +val scalaVersion: String by lazy { project.scalaVersion() } + +sourceSets["integrationTest"].scala.srcDir("src/integrationTest/scala") + +tasks.register("scalaCheck") { + description = "Runs all the Scala checks" + group = "verification" + + dependsOn("clean", "compileTestScala", "check") + tasks.findByName("check")?.mustRunAfter("clean") +} + +tasks.withType { + doFirst { println("Running Test task using scala version: $scalaVersion") } + useJUnitPlatform() +} + +tasks.named("clean") { delete.add(rootProject.file("build/docs/")) } + +java { + sourceCompatibility = JavaVersion.VERSION_1_8 + targetCompatibility = JavaVersion.VERSION_1_8 + + withSourcesJar() + withJavadocJar() +} + +afterEvaluate { + configureMavenPublication { artifactId = "${base.archivesName.get()}_${scalaVersion}" } + + // ============================================ + // Scala version specific configuration + // ============================================ + val compileOptions = mutableListOf("-target:jvm-1.8") + when (scalaVersion) { + "2.13" -> { + dependencies { + api(libs.bundles.scala.v2.v13) + + testImplementation(libs.bundles.scala.test.v2.v13) + } + sourceSets { main { scala { setSrcDirs(listOf("src/main/scala", "src/main/scala-2.13+")) } } } + + compileOptions.addAll( + listOf( + "-feature", + "-unchecked", + "-language:reflectiveCalls", + "-Wconf:cat=deprecation:ws", + "-Xlint:strict-unsealed-patmat")) + } + "2.12" -> { + dependencies { + api(libs.bundles.scala.v2.v12) + + testImplementation(libs.bundles.scala.test.v2.v12) + } + sourceSets { main { scala { setSrcDirs(listOf("src/main/scala", "src/main/scala-2.13-")) } } } + } + "2.11" -> { + dependencies { + api(libs.bundles.scala.v2.v11) + + testImplementation(libs.bundles.scala.test.v2.v11) + } + // Reuse the scala-2.12 source as its compatible. + sourceSets { main { scala { setSrcDirs(listOf("src/main/scala", "src/main/scala-2.13-")) } } } + + compileOptions.add("-Xexperimental") + } + } + + tasks.withType { + doFirst { println("Compiling using scala version: $scalaVersion") } + + scalaCompileOptions.isDeprecation = false + scalaCompileOptions.additionalParameters = compileOptions + } +} diff --git a/config/LICENSE b/config/LICENSE new file mode 100644 index 00000000000..0d2bce7803f --- /dev/null +++ b/config/LICENSE @@ -0,0 +1,13 @@ +Copyright 2008-$today.year MongoDB, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/config/checkstyle/checkstyle.xml b/config/checkstyle/checkstyle.xml new file mode 100644 index 00000000000..3a88f90de8c --- /dev/null +++ b/config/checkstyle/checkstyle.xml @@ -0,0 +1,237 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/config/checkstyle/suppressions.xml b/config/checkstyle/suppressions.xml new file mode 100644 index 00000000000..6d24f861e08 --- /dev/null +++ b/config/checkstyle/suppressions.xml @@ -0,0 +1,164 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/config/codenarc/codenarc.xml b/config/codenarc/codenarc.xml new file mode 100644 index 00000000000..cfdd190abf6 --- /dev/null +++ b/config/codenarc/codenarc.xml @@ -0,0 +1,152 @@ + + + + + MongoDB Java driver rule set + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/config/detekt/baseline.xml b/config/detekt/baseline.xml new file mode 100644 index 00000000000..d462c314e9c --- /dev/null +++ b/config/detekt/baseline.xml @@ -0,0 +1,36 @@ + + + + + EmptyDefaultConstructor:UnifiedCrudTest.kt$UnifiedCrudTest$() + EmptyDefaultConstructor:UnifiedTest.kt$UnifiedTest$() + EmptyFunctionBlock:SyncMongoCursor.kt$SyncMongoCursor${} + IteratorNotThrowingNoSuchElementException:MongoCursor.kt$MongoCursor<T : Any> : IteratorCloseable + LargeClass:MongoCollectionTest.kt$MongoCollectionTest + LongMethod:FindFlowTest.kt$FindFlowTest$@Test fun shouldCallTheUnderlyingMethods() + LongMethod:FindIterableTest.kt$FindIterableTest$@Test fun shouldCallTheUnderlyingMethods() + LongMethod:KotlinSerializerCodecTest.kt$KotlinSerializerCodecTest$@Test fun testDataClassOptionalBsonValues() + MaxLineLength:ListCollectionNamesFlow.kt$ListCollectionNamesFlow$* + MaxLineLength:ListCollectionNamesIterable.kt$ListCollectionNamesIterable$* + MaxLineLength:ListCollectionsFlow.kt$ListCollectionsFlow$* + MaxLineLength:ListCollectionsIterable.kt$ListCollectionsIterable$* + MaxLineLength:MapReduceFlow.kt$MapReduceFlow$* + MaxLineLength:MapReduceIterable.kt$MapReduceIterable$* + SwallowedException:MockitoHelper.kt$MockitoHelper.DeepReflectionEqMatcher$e: Throwable + TooManyFunctions:ClientSession.kt$ClientSession : jClientSession + TooManyFunctions:FindFlow.kt$FindFlow<T : Any> : Flow + TooManyFunctions:FindIterable.kt$FindIterable<T : Any> : MongoIterable + TooManyFunctions:MongoCollection.kt$MongoCollection<T : Any> + TooManyFunctions:MongoDatabase.kt$MongoDatabase + TooManyFunctions:SyncClientSession.kt$SyncClientSession : JClientSession + TooManyFunctions:SyncFindIterable.kt$SyncFindIterable<T : Any> : JFindIterableSyncMongoIterable + TooManyFunctions:SyncMongoCluster.kt$SyncMongoCluster : JMongoCluster + TooManyFunctions:SyncMongoCollection.kt$SyncMongoCollection<T : Any> : JMongoCollection + TooManyFunctions:SyncMongoDatabase.kt$SyncMongoDatabase : JMongoDatabase + UnnecessaryAbstractClass:UnifiedTest.kt$UnifiedTest$UnifiedTest + UnsafeCallOnNullableType:SmokeTests.kt$SmokeTests$collection!! + UnusedPrivateMember:SyncMongoIterable.kt$SyncMongoIterable$private var timeoutMode: TimeoutMode? = null + VarCouldBeVal:SyncMongoIterable.kt$SyncMongoIterable$private var timeoutMode: TimeoutMode? = null + WildcardImport:SyncMongoDatabase.kt$import com.mongodb.client.* + + diff --git a/config/detekt/detekt.yml b/config/detekt/detekt.yml new file mode 100644 index 00000000000..4ac460b0738 --- /dev/null +++ b/config/detekt/detekt.yml @@ -0,0 +1,712 @@ +build: + maxIssues: 0 + excludeCorrectable: false + weights: + # complexity: 2 + # LongParameterList: 1 + # style: 1 + # comments: 1 + +config: + validation: true + warningsAsErrors: false + # when writing own rules with new properties, exclude the property path e.g.: 'my_rule_set,.*>.*>[my_property]' + excludes: '' + +processors: + active: true + exclude: + - 'DetektProgressListener' + # - 'KtFileCountProcessor' + # - 'PackageCountProcessor' + # - 'ClassCountProcessor' + # - 'FunctionCountProcessor' + # - 'PropertyCountProcessor' + # - 'ProjectComplexityProcessor' + # - 'ProjectCognitiveComplexityProcessor' + # - 'ProjectLLOCProcessor' + # - 'ProjectCLOCProcessor' + # - 'ProjectLOCProcessor' + # - 'ProjectSLOCProcessor' + # - 'LicenseHeaderLoaderExtension' + +console-reports: + active: true + exclude: + - 'ProjectStatisticsReport' + - 'ComplexityReport' + - 'NotificationReport' + - 'FindingsReport' + - 'FileBasedFindingsReport' + # - 'LiteFindingsReport' + +output-reports: + active: true + exclude: + # - 'TxtOutputReport' + # - 'XmlOutputReport' + # - 'HtmlOutputReport' + # - 'MdOutputReport' + +comments: + active: true + AbsentOrWrongFileLicense: + active: false + licenseTemplateFile: 'license.template' + licenseTemplateIsRegex: false + CommentOverPrivateFunction: + active: false + CommentOverPrivateProperty: + active: false + DeprecatedBlockTag: + active: false + EndOfSentenceFormat: + active: false + endOfSentenceFormat: '([.?!][ \t\n\r\f<])|([.?!:]$)' + KDocReferencesNonPublicProperty: + active: false + excludes: ['**/test/**'] + OutdatedDocumentation: + active: false + matchTypeParameters: true + matchDeclarationsOrder: true + allowParamOnConstructorProperties: false + UndocumentedPublicClass: + active: false + excludes: ['**/test/**'] + searchInNestedClass: true + searchInInnerClass: true + searchInInnerObject: true + searchInInnerInterface: true + UndocumentedPublicFunction: + active: false + excludes: ['**/test/**'] + UndocumentedPublicProperty: + active: false + excludes: ['**/test/**'] + +complexity: + active: true + ComplexCondition: + active: true + threshold: 4 + ComplexInterface: + active: false + threshold: 10 + includeStaticDeclarations: false + includePrivateDeclarations: false + ComplexMethod: + active: true + threshold: 15 + ignoreSingleWhenExpression: false + ignoreSimpleWhenEntries: false + ignoreNestingFunctions: false + nestingFunctions: + - 'also' + - 'apply' + - 'forEach' + - 'isNotNull' + - 'ifNull' + - 'let' + - 'run' + - 'use' + - 'with' + LabeledExpression: + active: false + ignoredLabels: [] + LargeClass: + active: true + threshold: 600 + LongMethod: + active: true + threshold: 60 + LongParameterList: + active: true + functionThreshold: 6 + constructorThreshold: 7 + ignoreDefaultParameters: false + ignoreDataClasses: true + ignoreAnnotatedParameter: [] + MethodOverloading: + active: false + threshold: 6 + NamedArguments: + active: false + threshold: 3 + ignoreArgumentsMatchingNames: false + NestedBlockDepth: + active: true + threshold: 4 + NestedScopeFunctions: + active: false + threshold: 1 + functions: + - 'kotlin.apply' + - 'kotlin.run' + - 'kotlin.with' + - 'kotlin.let' + - 'kotlin.also' + ReplaceSafeCallChainWithRun: + active: false + StringLiteralDuplication: + active: false + excludes: ['**/test/**'] + threshold: 3 + ignoreAnnotation: true + excludeStringsWithLessThan5Characters: true + ignoreStringsRegex: '$^' + TooManyFunctions: + active: true + excludes: ['**/test/**'] + thresholdInFiles: 25 + thresholdInClasses: 27 + thresholdInInterfaces: 25 + thresholdInObjects: 25 + thresholdInEnums: 25 + ignoreDeprecated: false + ignorePrivate: false + ignoreOverridden: false + +coroutines: + active: true + GlobalCoroutineUsage: + active: false + InjectDispatcher: + active: true + dispatcherNames: + - 'IO' + - 'Default' + - 'Unconfined' + RedundantSuspendModifier: + active: true + SleepInsteadOfDelay: + active: true + SuspendFunWithCoroutineScopeReceiver: + active: false + SuspendFunWithFlowReturnType: + active: true + +empty-blocks: + active: true + EmptyCatchBlock: + active: true + allowedExceptionNameRegex: '_|(ignore|expected).*' + EmptyClassBlock: + active: true + EmptyDefaultConstructor: + active: true + EmptyDoWhileBlock: + active: true + EmptyElseBlock: + active: true + EmptyFinallyBlock: + active: true + EmptyForBlock: + active: true + EmptyFunctionBlock: + active: true + ignoreOverridden: false + EmptyIfBlock: + active: true + EmptyInitBlock: + active: true + EmptyKtFile: + active: true + EmptySecondaryConstructor: + active: true + EmptyTryBlock: + active: true + EmptyWhenBlock: + active: true + EmptyWhileBlock: + active: true + +exceptions: + active: true + ExceptionRaisedInUnexpectedLocation: + active: true + methodNames: + - 'equals' + - 'finalize' + - 'hashCode' + - 'toString' + InstanceOfCheckForException: + active: true + excludes: ['**/test/**'] + NotImplementedDeclaration: + active: false + ObjectExtendsThrowable: + active: false + PrintStackTrace: + active: true + RethrowCaughtException: + active: true + ReturnFromFinally: + active: true + ignoreLabeled: false + SwallowedException: + active: true + ignoredExceptionTypes: + - 'InterruptedException' + - 'MalformedURLException' + - 'NumberFormatException' + - 'ParseException' + allowedExceptionNameRegex: '_|(ignore|expected).*' + ThrowingExceptionFromFinally: + active: true + ThrowingExceptionInMain: + active: false + ThrowingExceptionsWithoutMessageOrCause: + active: true + excludes: ['**/test/**'] + exceptions: + - 'ArrayIndexOutOfBoundsException' + - 'Exception' + - 'IllegalArgumentException' + - 'IllegalMonitorStateException' + - 'IllegalStateException' + - 'IndexOutOfBoundsException' + - 'NullPointerException' + - 'RuntimeException' + - 'Throwable' + ThrowingNewInstanceOfSameException: + active: true + TooGenericExceptionCaught: + active: true + excludes: ['**/test/**'] + exceptionNames: + - 'ArrayIndexOutOfBoundsException' + - 'Error' + - 'Exception' + - 'IllegalMonitorStateException' + - 'IndexOutOfBoundsException' + - 'NullPointerException' + - 'RuntimeException' + - 'Throwable' + allowedExceptionNameRegex: '_|(ignore|expected).*' + TooGenericExceptionThrown: + active: true + exceptionNames: + - 'Error' + - 'Exception' + - 'RuntimeException' + - 'Throwable' + +naming: + active: true + BooleanPropertyNaming: + active: false + allowedPattern: '^(is|has|are)' + ignoreOverridden: true + ClassNaming: + active: true + classPattern: '[A-Z][a-zA-Z0-9]*' + ConstructorParameterNaming: + active: true + parameterPattern: '[a-z][A-Za-z0-9]*' + privateParameterPattern: '[a-z][A-Za-z0-9]*' + excludeClassPattern: '$^' + ignoreOverridden: true + EnumNaming: + active: true + enumEntryPattern: '[A-Z][_a-zA-Z0-9]*' + ForbiddenClassName: + active: false + forbiddenName: [] + FunctionMaxLength: + active: false + maximumFunctionNameLength: 30 + FunctionMinLength: + active: false + minimumFunctionNameLength: 3 + FunctionNaming: + active: true + excludes: ['**/test/**'] + functionPattern: '[a-z][a-zA-Z0-9]*' + excludeClassPattern: '$^' + ignoreOverridden: true + FunctionParameterNaming: + active: true + parameterPattern: '[a-z][A-Za-z0-9]*' + excludeClassPattern: '$^' + ignoreOverridden: true + InvalidPackageDeclaration: + active: true + rootPackage: '' + requireRootInDeclaration: false + LambdaParameterNaming: + active: false + parameterPattern: '[a-z][A-Za-z0-9]*|_' + MatchingDeclarationName: + active: true + mustBeFirst: true + MemberNameEqualsClassName: + active: true + ignoreOverridden: true + NoNameShadowing: + active: true + NonBooleanPropertyPrefixedWithIs: + active: false + ObjectPropertyNaming: + active: true + constantPattern: '[A-Za-z][_A-Za-z0-9]*' + propertyPattern: '[A-Za-z][_A-Za-z0-9]*' + privatePropertyPattern: '(_)?[A-Za-z][_A-Za-z0-9]*' + PackageNaming: + active: true + packagePattern: '[a-z]+(\.[a-z][A-Za-z0-9]*)*' + TopLevelPropertyNaming: + active: true + constantPattern: '[A-Z][_A-Z0-9]*' + propertyPattern: '[A-Za-z][_A-Za-z0-9]*' + privatePropertyPattern: '_?[A-Za-z][_A-Za-z0-9]*' + VariableMaxLength: + active: false + maximumVariableNameLength: 64 + VariableMinLength: + active: false + minimumVariableNameLength: 1 + VariableNaming: + active: true + variablePattern: '[a-z][A-Za-z0-9]*' + privateVariablePattern: '(_)?[a-z][A-Za-z0-9]*' + excludeClassPattern: '$^' + ignoreOverridden: true + +performance: + active: true + ArrayPrimitive: + active: true + CouldBeSequence: + active: false + threshold: 3 + ForEachOnRange: + active: true + excludes: ['**/test/**'] + SpreadOperator: + active: true + excludes: ['**/test/**'] + UnnecessaryTemporaryInstantiation: + active: true + +potential-bugs: + active: true + AvoidReferentialEquality: + active: true + forbiddenTypePatterns: + - 'kotlin.String' + CastToNullableType: + active: false + Deprecation: + active: false + DontDowncastCollectionTypes: + active: false + DoubleMutabilityForCollection: + active: true + mutableTypes: + - 'kotlin.collections.MutableList' + - 'kotlin.collections.MutableMap' + - 'kotlin.collections.MutableSet' + - 'java.util.ArrayList' + - 'java.util.LinkedHashSet' + - 'java.util.HashSet' + - 'java.util.LinkedHashMap' + - 'java.util.HashMap' + DuplicateCaseInWhenExpression: + active: true + ElseCaseInsteadOfExhaustiveWhen: + active: false + EqualsAlwaysReturnsTrueOrFalse: + active: true + EqualsWithHashCodeExist: + active: true + ExitOutsideMain: + active: false + ExplicitGarbageCollectionCall: + active: true + HasPlatformType: + active: true + IgnoredReturnValue: + active: true + restrictToAnnotatedMethods: true + returnValueAnnotations: + - '*.CheckResult' + - '*.CheckReturnValue' + ignoreReturnValueAnnotations: + - '*.CanIgnoreReturnValue' + ignoreFunctionCall: [] + ImplicitDefaultLocale: + active: true + ImplicitUnitReturnType: + active: false + allowExplicitReturnType: true + InvalidRange: + active: true + IteratorHasNextCallsNextMethod: + active: true + IteratorNotThrowingNoSuchElementException: + active: true + LateinitUsage: + active: false + excludes: ['**/test/**'] + ignoreOnClassesPattern: '' + MapGetWithNotNullAssertionOperator: + active: true + MissingPackageDeclaration: + active: false + excludes: ['**/*.kts'] + MissingWhenCase: + active: true + allowElseExpression: true + NullCheckOnMutableProperty: + active: false + NullableToStringCall: + active: false + RedundantElseInWhen: + active: true + UnconditionalJumpStatementInLoop: + active: false + UnnecessaryNotNullOperator: + active: true + UnnecessarySafeCall: + active: true + UnreachableCatchBlock: + active: true + UnreachableCode: + active: true + UnsafeCallOnNullableType: + active: true + excludes: ['**/test/**'] + UnsafeCast: + active: true + UnusedUnaryOperator: + active: true + UselessPostfixExpression: + active: true + WrongEqualsTypeParameter: + active: true + +style: + active: true + CanBeNonNullable: + active: false + CascadingCallWrapping: + active: false + includeElvis: true + ClassOrdering: + active: false + CollapsibleIfStatements: + active: false + DataClassContainsFunctions: + active: false + conversionFunctionPrefix: 'to' + DataClassShouldBeImmutable: + active: false + DestructuringDeclarationWithTooManyEntries: + active: true + maxDestructuringEntries: 3 + EqualsNullCall: + active: true + EqualsOnSignatureLine: + active: false + ExplicitCollectionElementAccessMethod: + active: false + ExplicitItLambdaParameter: + active: true + ExpressionBodySyntax: + active: false + includeLineWrapping: false + ForbiddenComment: + active: true + values: + - 'FIXME:' + - 'STOPSHIP:' + - 'TODO:' + allowedPatterns: '' + customMessage: '' + ForbiddenImport: + active: false + imports: [] + forbiddenPatterns: '' + ForbiddenMethodCall: + active: false + methods: + - 'kotlin.io.print' + - 'kotlin.io.println' + ForbiddenPublicDataClass: + active: true + excludes: ['**'] + ignorePackages: + - '*.internal' + - '*.internal.*' + ForbiddenSuppress: + active: false + rules: [] + ForbiddenVoid: + active: true + ignoreOverridden: false + ignoreUsageInGenerics: false + FunctionOnlyReturningConstant: + active: true + ignoreOverridableFunction: true + ignoreActualFunction: true + excludedFunctions: '' + LibraryCodeMustSpecifyReturnType: + active: true + excludes: ['**'] + LibraryEntitiesShouldNotBePublic: + active: true + excludes: ['**'] + LoopWithTooManyJumpStatements: + active: true + maxJumpCount: 1 + MagicNumber: + active: true + excludes: ['**/test/**', '**/*.kts'] + ignoreNumbers: + - '-1' + - '0' + - '1' + - '2' + ignoreHashCodeFunction: true + ignorePropertyDeclaration: false + ignoreLocalVariableDeclaration: false + ignoreConstantDeclaration: true + ignoreCompanionObjectPropertyDeclaration: true + ignoreAnnotation: false + ignoreNamedArgument: true + ignoreEnums: true + ignoreRanges: false + ignoreExtensionFunctions: true + MandatoryBracesIfStatements: + active: false + MandatoryBracesLoops: + active: false + MaxChainedCallsOnSameLine: + active: false + maxChainedCalls: 5 + MaxLineLength: + active: true + maxLineLength: 120 + excludePackageStatements: true + excludeImportStatements: true + excludeCommentStatements: false + MayBeConst: + active: true + ModifierOrder: + active: true + MultilineLambdaItParameter: + active: false + NestedClassesVisibility: + active: true + NewLineAtEndOfFile: + active: true + NoTabs: + active: false + NullableBooleanCheck: + active: false + ObjectLiteralToLambda: + active: true + OptionalAbstractKeyword: + active: true + OptionalUnit: + active: false + OptionalWhenBraces: + active: false + PreferToOverPairSyntax: + active: false + ProtectedMemberInFinalClass: + active: true + RedundantExplicitType: + active: false + RedundantHigherOrderMapUsage: + active: true + RedundantVisibilityModifierRule: + active: false + ReturnCount: + active: true + max: 2 + excludedFunctions: 'equals' + excludeLabeled: false + excludeReturnFromLambda: true + excludeGuardClauses: false + SafeCast: + active: true + SerialVersionUIDInSerializableClass: + active: true + SpacingBetweenPackageAndImports: + active: false + ThrowsCount: + active: true + max: 2 + excludeGuardClauses: false + TrailingWhitespace: + active: false + UnderscoresInNumericLiterals: + active: false + acceptableLength: 4 + allowNonStandardGrouping: false + UnnecessaryAbstractClass: + active: true + UnnecessaryAnnotationUseSiteTarget: + active: false + UnnecessaryApply: + active: true + UnnecessaryBackticks: + active: false + UnnecessaryFilter: + active: true + UnnecessaryInheritance: + active: true + UnnecessaryInnerClass: + active: false + UnnecessaryLet: + active: false + UnnecessaryParentheses: + active: false + UntilInsteadOfRangeTo: + active: false + UnusedImports: + active: false + UnusedPrivateClass: + active: true + UnusedPrivateMember: + active: true + allowedNames: '(_|ignored|expected|serialVersionUID)' + UseAnyOrNoneInsteadOfFind: + active: true + UseArrayLiteralsInAnnotations: + active: true + UseCheckNotNull: + active: true + UseCheckOrError: + active: true + UseDataClass: + active: false + allowVars: false + UseEmptyCounterpart: + active: false + UseIfEmptyOrIfBlank: + active: false + UseIfInsteadOfWhen: + active: false + UseIsNullOrEmpty: + active: true + UseOrEmpty: + active: true + UseRequire: + active: true + UseRequireNotNull: + active: true + UselessCallOnNotNull: + active: true + UtilityClassWithPublicConstructor: + active: true + VarCouldBeVal: + active: true + ignoreLateinitVar: false + WildcardImport: + active: true + excludes: ['**/test/**'] + excludeImports: + - 'java.util.*' diff --git a/config/mongodb.license b/config/mongodb.license new file mode 100644 index 00000000000..6a2444433a7 --- /dev/null +++ b/config/mongodb.license @@ -0,0 +1,15 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ diff --git a/config/scala/scalafmt.conf b/config/scala/scalafmt.conf new file mode 100644 index 00000000000..6c5e35eae69 --- /dev/null +++ b/config/scala/scalafmt.conf @@ -0,0 +1,16 @@ +version = "3.7.1" +runner.dialect = scala213 + +preset = default + +danglingParentheses.preset = true +docstrings.style = keep +#docstrings.style = Asterisk +#docstrings.wrap = no +maxColumn = 120 +rewrite.rules = [SortImports] +newlines.topLevelStatements = [] +newlines.source=keep +newlines.implicitParamListModifierPrefer=before + +spaces.inImportCurlyBraces = true diff --git a/config/spock/ExcludeSlow.groovy b/config/spock/ExcludeSlow.groovy new file mode 100644 index 00000000000..033fbdb2a7d --- /dev/null +++ b/config/spock/ExcludeSlow.groovy @@ -0,0 +1,6 @@ +package spock + +runner { + println "Excluding Slow Spock tests" + exclude com.mongodb.spock.Slow +} diff --git a/config/spock/OnlySlow.groovy b/config/spock/OnlySlow.groovy new file mode 100644 index 00000000000..d98c04bd826 --- /dev/null +++ b/config/spock/OnlySlow.groovy @@ -0,0 +1,6 @@ +package spock + +runner { + println "Only including Slow Spock tests" + include com.mongodb.spock.Slow +} diff --git a/config/spotbugs/exclude.xml b/config/spotbugs/exclude.xml new file mode 100644 index 00000000000..20684680865 --- /dev/null +++ b/config/spotbugs/exclude.xml @@ -0,0 +1,293 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/driver-benchmarks/build.gradle.kts b/driver-benchmarks/build.gradle.kts new file mode 100644 index 00000000000..6387de8d058 --- /dev/null +++ b/driver-benchmarks/build.gradle.kts @@ -0,0 +1,67 @@ +/* + * Copyright 2016-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +plugins { + id("application") + id("java-library") + id("project.base") +} + +application { + mainClass = "com.mongodb.benchmark.benchmarks.BenchmarkSuite" + applicationDefaultJvmArgs = listOf( + "-Dorg.mongodb.benchmarks.data=${System.getProperty("org.mongodb.benchmarks.data")}", + "-Dorg.mongodb.benchmarks.output=${System.getProperty("org.mongodb.benchmarks.output")}") +} + +sourceSets { + main { + java { setSrcDirs(listOf("src/main")) } + resources { setSrcDirs(listOf("src/resources")) } + } +} + +dependencies { + api(project(":driver-sync")) + api(project(":mongodb-crypt")) + + implementation(platform(libs.netty.bom)) + implementation(libs.bundles.netty) + + implementation(libs.logback.classic) + implementation(libs.jmh.core) + annotationProcessor(libs.jmh.generator.annprocess) + +} + +tasks.register("jmh") { + group = "benchmark" + description = "Run JMH benchmarks." + mainClass = "org.openjdk.jmh.Main" + classpath = sourceSets.main.get().runtimeClasspath +} + +tasks.register("runNetty") { + group = "application" + description = "Run the Netty main class." + mainClass.set("com.mongodb.benchmark.benchmarks.netty.BenchmarkNettyProviderSuite") + classpath = sourceSets["main"].runtimeClasspath + jvmArgs = application.applicationDefaultJvmArgs.toList() +} + +tasks.withType().configureEach { + enabled = false +} diff --git a/driver-benchmarks/src/main/com/mongodb/benchmark/benchmarks/AbstractBsonDocumentBenchmark.java b/driver-benchmarks/src/main/com/mongodb/benchmark/benchmarks/AbstractBsonDocumentBenchmark.java new file mode 100644 index 00000000000..89f932f03cd --- /dev/null +++ b/driver-benchmarks/src/main/com/mongodb/benchmark/benchmarks/AbstractBsonDocumentBenchmark.java @@ -0,0 +1,72 @@ +/* + * Copyright 2016-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.mongodb.benchmark.benchmarks; + +import com.mongodb.benchmark.framework.Benchmark; +import com.mongodb.internal.connection.PowerOfTwoBufferPool; +import org.bson.BsonBinaryWriter; +import org.bson.codecs.Codec; +import org.bson.codecs.DecoderContext; +import org.bson.codecs.EncoderContext; +import org.bson.io.BasicOutputBuffer; +import org.bson.json.JsonReader; + +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.nio.charset.StandardCharsets; + +public abstract class AbstractBsonDocumentBenchmark extends Benchmark { + + protected final PowerOfTwoBufferPool bufferPool = PowerOfTwoBufferPool.DEFAULT; + protected final Codec codec; + private final String resourcePath; + + protected T document; + protected byte[] documentBytes; + private int fileLength; + + public AbstractBsonDocumentBenchmark(final String name, final String resourcePath, final Codec codec) { + super(name); + this.resourcePath = resourcePath; + this.codec = codec; + } + + public void setUp() throws IOException { + byte[] bytes = readAllBytesFromRelativePath(resourcePath); + + fileLength = bytes.length; + + document = codec.decode(new JsonReader(new String(bytes, StandardCharsets.UTF_8)), + DecoderContext.builder().build()); + documentBytes = getDocumentAsBuffer(document); + } + + @Override + public int getBytesPerRun() { + return fileLength * NUM_INTERNAL_ITERATIONS; + } + + private byte[] getDocumentAsBuffer(final T document) throws IOException { + BasicOutputBuffer buffer = new BasicOutputBuffer(); + codec.encode(new BsonBinaryWriter(buffer), document, EncoderContext.builder().build()); + + ByteArrayOutputStream baos = new ByteArrayOutputStream(buffer.getSize()); + buffer.pipe(baos); + return baos.toByteArray(); + } +} diff --git a/driver-benchmarks/src/main/com/mongodb/benchmark/benchmarks/AbstractCollectionWriteBenchmark.java b/driver-benchmarks/src/main/com/mongodb/benchmark/benchmarks/AbstractCollectionWriteBenchmark.java new file mode 100644 index 00000000000..a77d4f671f3 --- /dev/null +++ b/driver-benchmarks/src/main/com/mongodb/benchmark/benchmarks/AbstractCollectionWriteBenchmark.java @@ -0,0 +1,51 @@ +/* + * Copyright 2016-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.mongodb.benchmark.benchmarks; + +import com.mongodb.client.MongoCollection; +import com.mongodb.client.MongoDatabase; + +public abstract class AbstractCollectionWriteBenchmark extends AbstractWriteBenchmark { + + protected MongoCollection collection; + protected MongoDatabase database; + private final Class clazz; + + protected AbstractCollectionWriteBenchmark(final String name, + final String resourcePath, + int numIterations, + int numDocuments, + final Class clazz) { + super(name, resourcePath, numIterations, numDocuments, clazz); + this.clazz = clazz; + } + + @Override + public void setUp() throws Exception { + super.setUp(); + database = client.getDatabase(DATABASE_NAME); + collection = database.getCollection(COLLECTION_NAME, clazz); + database.drop(); + } + + @Override + public void before() throws Exception { + super.before(); + collection.drop(); + } +} diff --git a/driver-benchmarks/src/main/com/mongodb/benchmark/benchmarks/AbstractFindBenchmark.java b/driver-benchmarks/src/main/com/mongodb/benchmark/benchmarks/AbstractFindBenchmark.java new file mode 100644 index 00000000000..df3eda6d14d --- /dev/null +++ b/driver-benchmarks/src/main/com/mongodb/benchmark/benchmarks/AbstractFindBenchmark.java @@ -0,0 +1,77 @@ +/* + * Copyright 2016-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.mongodb.benchmark.benchmarks; + +import com.mongodb.client.MongoCollection; +import com.mongodb.client.MongoDatabase; +import org.bson.BsonDocument; +import org.bson.BsonInt32; +import org.bson.codecs.BsonDocumentCodec; +import org.bson.codecs.DecoderContext; +import org.bson.json.JsonReader; + +import java.nio.charset.StandardCharsets; +import java.util.ArrayList; +import java.util.List; + +public abstract class AbstractFindBenchmark extends AbstractMongoBenchmark { + protected MongoCollection collection; + + private final String resourcePath; + private final Class clazz; + + private int fileLength; + + public AbstractFindBenchmark(final String name, final String resourcePath, final Class clazz) { + super(name); + this.resourcePath = resourcePath; + this.clazz = clazz; + } + + public void setUp() throws Exception { + super.setUp(); + collection = client.getDatabase(DATABASE_NAME).getCollection(COLLECTION_NAME, clazz); + byte[] bytes = readAllBytesFromRelativePath(resourcePath); + + fileLength = bytes.length; + + MongoDatabase setUpDatabase = client.getDatabase(DATABASE_NAME); + setUpDatabase.drop(); + + insertCopiesOfDocument(setUpDatabase.getCollection(COLLECTION_NAME, BsonDocument.class), + new BsonDocumentCodec().decode(new JsonReader(new String(bytes, StandardCharsets.UTF_8)), + DecoderContext.builder().build()) + ); + } + + @Override + public int getBytesPerRun() { + return fileLength * NUM_INTERNAL_ITERATIONS; + } + + private void insertCopiesOfDocument(final MongoCollection collection, + final BsonDocument document) { + List documents = new ArrayList<>(NUM_INTERNAL_ITERATIONS); + for (int i = 0; i < NUM_INTERNAL_ITERATIONS; i++) { + BsonDocument copy = document.clone(); + copy.put("_id", new BsonInt32(i)); + documents.add(copy); + } + collection.insertMany(documents); + } +} diff --git a/driver-benchmarks/src/main/com/mongodb/benchmark/benchmarks/AbstractGridFSBenchmark.java b/driver-benchmarks/src/main/com/mongodb/benchmark/benchmarks/AbstractGridFSBenchmark.java new file mode 100644 index 00000000000..aeaf908e0a3 --- /dev/null +++ b/driver-benchmarks/src/main/com/mongodb/benchmark/benchmarks/AbstractGridFSBenchmark.java @@ -0,0 +1,48 @@ +/* + * Copyright 2016-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.mongodb.benchmark.benchmarks; + +import com.mongodb.client.MongoDatabase; +import com.mongodb.client.gridfs.GridFSBucket; +import com.mongodb.client.gridfs.GridFSBuckets; + +public abstract class AbstractGridFSBenchmark extends AbstractMongoBenchmark { + private final String resourcePath; + protected MongoDatabase database; + protected GridFSBucket bucket; + protected byte[] fileBytes; + + public AbstractGridFSBenchmark(final String name, final String resourcePath) { + super(name); + this.resourcePath = resourcePath; + } + + @Override + public void setUp() throws Exception { + super.setUp(); + database = client.getDatabase(DATABASE_NAME); + bucket = GridFSBuckets.create(database); + fileBytes = readAllBytesFromRelativePath(resourcePath); + database.drop(); + } + + @Override + public int getBytesPerRun() { + return fileBytes.length; + } +} diff --git a/driver-benchmarks/src/main/com/mongodb/benchmark/benchmarks/AbstractMongoBenchmark.java b/driver-benchmarks/src/main/com/mongodb/benchmark/benchmarks/AbstractMongoBenchmark.java new file mode 100644 index 00000000000..f6a07691596 --- /dev/null +++ b/driver-benchmarks/src/main/com/mongodb/benchmark/benchmarks/AbstractMongoBenchmark.java @@ -0,0 +1,65 @@ +/* + * Copyright 2016-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.mongodb.benchmark.benchmarks; + +import com.mongodb.MongoClientSettings; +import com.mongodb.MongoNamespace; +import com.mongodb.benchmark.framework.Benchmark; +import com.mongodb.client.MongoClient; +import com.mongodb.client.MongoClients; + +public abstract class AbstractMongoBenchmark extends Benchmark { + + protected static final int GRIDFS_READING_THREAD_POOL_SIZE = 8; + protected static final int MONGODB_READING_THREAD_POOL_SIZE = 8; + protected static final int MONGODB_WRITING_THREAD_POOL_SIZE = 8; + protected static final int FILE_WRITING_THREAD_POOL_SIZE = 2; + protected static final int FILE_READING_THREAD_POOL_SIZE = 4; + + protected static final int ONE_MB = 1000000; + + protected static final String DATABASE_NAME = "perftest"; + protected static final String COLLECTION_NAME = "corpus"; + protected static final MongoNamespace NAMESPACE = new MongoNamespace( + AbstractMongoBenchmark.DATABASE_NAME, AbstractMongoBenchmark.COLLECTION_NAME); + protected MongoClientSettings mongoClientSettings; + + public AbstractMongoBenchmark(final String name) { + super(name); + } + + protected MongoClient client; + + public void setUp() throws Exception { + if (mongoClientSettings != null) { + client = MongoClients.create(mongoClientSettings); + } else { + client = MongoClients.create(); + } + } + + @Override + public void tearDown() throws Exception { + client.close(); + } + + public AbstractMongoBenchmark applyMongoClientSettings(final MongoClientSettings mongoClientSettings) { + this.mongoClientSettings = mongoClientSettings; + return this; + } +} diff --git a/driver-benchmarks/src/main/com/mongodb/benchmark/benchmarks/AbstractWriteBenchmark.java b/driver-benchmarks/src/main/com/mongodb/benchmark/benchmarks/AbstractWriteBenchmark.java new file mode 100644 index 00000000000..d9c1a2968f9 --- /dev/null +++ b/driver-benchmarks/src/main/com/mongodb/benchmark/benchmarks/AbstractWriteBenchmark.java @@ -0,0 +1,68 @@ +/* + * Copyright 2016-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.mongodb.benchmark.benchmarks; + +import com.mongodb.client.model.Filters; +import org.bson.codecs.Codec; +import org.bson.codecs.DecoderContext; +import org.bson.conversions.Bson; +import org.bson.json.JsonReader; + +import java.nio.charset.StandardCharsets; + +public abstract class AbstractWriteBenchmark extends AbstractMongoBenchmark { + protected static final Bson EMPTY_FILTER = Filters.empty(); + private final String resourcePath; + private final Class clazz; + private byte[] bytes; + protected int fileLength; + protected T document; + protected int numInternalIterations; + protected int numDocuments; + + protected AbstractWriteBenchmark(final String name, + final String resourcePath, + int numInternalIterations, + int numDocuments, + final Class clazz) { + super(name); + this.resourcePath = resourcePath; + this.clazz = clazz; + this.numInternalIterations = numInternalIterations; + this.numDocuments = numDocuments; + } + + @Override + public void setUp() throws Exception { + super.setUp(); + bytes = readAllBytesFromRelativePath(resourcePath); + fileLength = bytes.length; + Codec codec = client.getCodecRegistry().get(clazz); + document = codec.decode(new JsonReader(new String(bytes, StandardCharsets.UTF_8)), DecoderContext.builder().build()); + } + + protected T createDocument() { + Codec codec = client.getCodecRegistry().get(clazz); + return codec.decode(new JsonReader(new String(bytes, StandardCharsets.UTF_8)), DecoderContext.builder().build()); + } + + @Override + public int getBytesPerRun() { + return fileLength * numInternalIterations * numDocuments; + } +} diff --git a/driver-benchmarks/src/main/com/mongodb/benchmark/benchmarks/BenchmarkSuite.java b/driver-benchmarks/src/main/com/mongodb/benchmark/benchmarks/BenchmarkSuite.java new file mode 100644 index 00000000000..2595568f148 --- /dev/null +++ b/driver-benchmarks/src/main/com/mongodb/benchmark/benchmarks/BenchmarkSuite.java @@ -0,0 +1,133 @@ +/* + * Copyright 2016-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.mongodb.benchmark.benchmarks; + +import com.mongodb.benchmark.benchmarks.bulk.ClientBulkWriteBenchmark; +import com.mongodb.benchmark.benchmarks.bulk.CollectionBulkWriteBenchmark; +import com.mongodb.benchmark.benchmarks.bulk.MixedClientBulkWriteBenchmark; +import com.mongodb.benchmark.benchmarks.bulk.MixedCollectionBulkWriteBenchmark; +import com.mongodb.benchmark.framework.Benchmark; +import com.mongodb.benchmark.framework.BenchmarkResult; +import com.mongodb.benchmark.framework.BenchmarkResultWriter; +import com.mongodb.benchmark.framework.BenchmarkRunner; +import com.mongodb.benchmark.framework.EvergreenBenchmarkResultWriter; +import com.mongodb.benchmark.framework.MongoCryptBenchmarkRunner; +import com.mongodb.benchmark.framework.MongocryptBecnhmarkResult; +import org.bson.Document; +import org.bson.codecs.Codec; + +import java.util.Arrays; +import java.util.List; + +import static com.mongodb.MongoClientSettings.getDefaultCodecRegistry; + +@SuppressWarnings({"rawtypes", "unchecked"}) +public class BenchmarkSuite { + + protected static final int NUM_WARMUP_ITERATIONS = 1; + protected static final int NUM_ITERATIONS = 100; + protected static final int MIN_TIME_SECONDS = 60; + protected static final int MAX_TIME_SECONDS = 300; + + protected static final Class DOCUMENT_CLASS = Document.class; + protected static final IdRemover ID_REMOVER = document -> document.remove("_id"); + protected static final Codec DOCUMENT_CODEC = getDefaultCodecRegistry().get(DOCUMENT_CLASS); + + protected static final List WRITERS = Arrays.asList( + new EvergreenBenchmarkResultWriter()); + + public static void main(String[] args) throws Exception { + runBenchmarks(); + + for (BenchmarkResultWriter writer : WRITERS) { + writer.close(); + } + } + + private static void runBenchmarks() + throws Exception { + + runMongoCryptBenchMarks(); + runBenchmark(new BsonEncodingBenchmark<>("Flat", "extended_bson/flat_bson.json", DOCUMENT_CODEC)); + runBenchmark(new BsonEncodingBenchmark<>("Deep", "extended_bson/deep_bson.json", DOCUMENT_CODEC)); + runBenchmark(new BsonEncodingBenchmark<>("Full", "extended_bson/full_bson.json", DOCUMENT_CODEC)); + + runBenchmark(new BsonDecodingBenchmark<>("Flat", "extended_bson/flat_bson.json", DOCUMENT_CODEC)); + runBenchmark(new BsonDecodingBenchmark<>("Deep", "extended_bson/deep_bson.json", DOCUMENT_CODEC)); + runBenchmark(new BsonDecodingBenchmark<>("Full", "extended_bson/full_bson.json", DOCUMENT_CODEC)); + + runBenchmark(new RunCommandBenchmark<>(DOCUMENT_CODEC)); + runBenchmark(new FindOneBenchmark("single_and_multi_document/tweet.json", BenchmarkSuite.DOCUMENT_CLASS)); + + runBenchmark(new InsertOneBenchmark("Small", "./single_and_multi_document/small_doc.json", 10_000, + DOCUMENT_CLASS, ID_REMOVER)); + runBenchmark(new InsertOneBenchmark("Large", "./single_and_multi_document/large_doc.json", 10, + DOCUMENT_CLASS, ID_REMOVER)); + + runBenchmark(new FindManyBenchmark("single_and_multi_document/tweet.json", BenchmarkSuite.DOCUMENT_CLASS)); + runBenchmark(new InsertManyBenchmark("Small", "./single_and_multi_document/small_doc.json", 10_000, + DOCUMENT_CLASS)); + runBenchmark(new InsertManyBenchmark("Large", "./single_and_multi_document/large_doc.json", 10, + DOCUMENT_CLASS)); + + runBenchmark(new CollectionBulkWriteBenchmark<>("Small", "./single_and_multi_document/small_doc.json", 10_000, + DOCUMENT_CLASS)); + runBenchmark(new CollectionBulkWriteBenchmark<>("Large", "./single_and_multi_document/large_doc.json", 10, + DOCUMENT_CLASS)); + + runBenchmark(new ClientBulkWriteBenchmark<>("Small", "./single_and_multi_document/small_doc.json", 10_000, + DOCUMENT_CLASS)); + runBenchmark(new ClientBulkWriteBenchmark<>("Large", "./single_and_multi_document/large_doc.json", 10, + DOCUMENT_CLASS)); + + runBenchmark(new MixedCollectionBulkWriteBenchmark<>("./single_and_multi_document/small_doc.json", 10_000, + DOCUMENT_CLASS)); + runBenchmark(new MixedClientBulkWriteBenchmark<>("./single_and_multi_document/small_doc.json", 10_000, + DOCUMENT_CLASS)); + + runBenchmark(new GridFSUploadBenchmark("single_and_multi_document/gridfs_large.bin")); + runBenchmark(new GridFSDownloadBenchmark("single_and_multi_document/gridfs_large.bin")); + + runBenchmark(new MultiFileImportBenchmark()); + runBenchmark(new MultiFileExportBenchmark()); + runBenchmark(new GridFSMultiFileUploadBenchmark()); + runBenchmark(new GridFSMultiFileDownloadBenchmark()); + } + + private static void runMongoCryptBenchMarks() throws InterruptedException { + // This runner has been migrated from libmongocrypt as it is. + List results = new MongoCryptBenchmarkRunner().run(); + + for (BenchmarkResultWriter writer : WRITERS) { + for (MongocryptBecnhmarkResult result : results) { + writer.write(result); + } + } + } + + protected static void runBenchmark(final Benchmark benchmark) throws Exception { + long startTime = System.currentTimeMillis(); + BenchmarkResult benchmarkResult = new BenchmarkRunner(benchmark, NUM_WARMUP_ITERATIONS, NUM_ITERATIONS, MIN_TIME_SECONDS, + MAX_TIME_SECONDS).run(); + long endTime = System.currentTimeMillis(); + System.out.println(benchmarkResult.getName() + ": " + (endTime - startTime) / 1000.0); + for (BenchmarkResultWriter writer : WRITERS) { + writer.write(benchmarkResult); + } + } +} diff --git a/driver-benchmarks/src/main/com/mongodb/benchmark/benchmarks/BsonDecodingBenchmark.java b/driver-benchmarks/src/main/com/mongodb/benchmark/benchmarks/BsonDecodingBenchmark.java new file mode 100644 index 00000000000..9f184b78e25 --- /dev/null +++ b/driver-benchmarks/src/main/com/mongodb/benchmark/benchmarks/BsonDecodingBenchmark.java @@ -0,0 +1,38 @@ +/* + * Copyright 2016-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.mongodb.benchmark.benchmarks; + +import org.bson.BsonBinaryReader; +import org.bson.codecs.Codec; +import org.bson.codecs.DecoderContext; + +import java.nio.ByteBuffer; + +public class BsonDecodingBenchmark extends AbstractBsonDocumentBenchmark { + + public BsonDecodingBenchmark(final String name, final String resourcePath, final Codec codec) { + super(name + " BSON Decoding", resourcePath, codec); + } + + @Override + public void run() { + for (int i = 0; i < NUM_INTERNAL_ITERATIONS; i++) { + codec.decode(new BsonBinaryReader(ByteBuffer.wrap(documentBytes)), DecoderContext.builder().build()); + } + } +} diff --git a/driver-benchmarks/src/main/com/mongodb/benchmark/benchmarks/BsonEncodingBenchmark.java b/driver-benchmarks/src/main/com/mongodb/benchmark/benchmarks/BsonEncodingBenchmark.java new file mode 100644 index 00000000000..5099427ba7a --- /dev/null +++ b/driver-benchmarks/src/main/com/mongodb/benchmark/benchmarks/BsonEncodingBenchmark.java @@ -0,0 +1,38 @@ +/* + * Copyright 2016-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.mongodb.benchmark.benchmarks; + +import com.mongodb.internal.connection.ByteBufferBsonOutput; +import org.bson.BsonBinaryWriter; +import org.bson.codecs.Codec; +import org.bson.codecs.EncoderContext; + +public class BsonEncodingBenchmark extends AbstractBsonDocumentBenchmark { + public BsonEncodingBenchmark(final String name, final String resourcePath, final Codec codec) { + super(name + " BSON Encoding", resourcePath, codec); + } + + @Override + public void run() { + for (int i = 0; i < NUM_INTERNAL_ITERATIONS; i++) { + ByteBufferBsonOutput bsonOutput = new ByteBufferBsonOutput(bufferPool); + codec.encode(new BsonBinaryWriter(bsonOutput), document, EncoderContext.builder().build()); + bsonOutput.close(); + } + } +} diff --git a/driver-benchmarks/src/main/com/mongodb/benchmark/benchmarks/FindManyBenchmark.java b/driver-benchmarks/src/main/com/mongodb/benchmark/benchmarks/FindManyBenchmark.java new file mode 100644 index 00000000000..d848531fb8c --- /dev/null +++ b/driver-benchmarks/src/main/com/mongodb/benchmark/benchmarks/FindManyBenchmark.java @@ -0,0 +1,35 @@ +/* + * Copyright 2016-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.mongodb.benchmark.benchmarks; + +import com.mongodb.client.MongoCursor; + +public class FindManyBenchmark extends AbstractFindBenchmark { + public FindManyBenchmark(final String resourcePath, final Class clazz) { + super("Find many and empty the cursor", resourcePath, clazz); + } + + @Override + public void run() { + try (MongoCursor cursor = collection.find().iterator()) { + while (cursor.hasNext()) { + cursor.next(); + } + } + } +} diff --git a/driver-benchmarks/src/main/com/mongodb/benchmark/benchmarks/FindOneBenchmark.java b/driver-benchmarks/src/main/com/mongodb/benchmark/benchmarks/FindOneBenchmark.java new file mode 100644 index 00000000000..d0d7bd475fa --- /dev/null +++ b/driver-benchmarks/src/main/com/mongodb/benchmark/benchmarks/FindOneBenchmark.java @@ -0,0 +1,40 @@ +/* + * Copyright 2016-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.mongodb.benchmark.benchmarks; + +import com.mongodb.benchmark.framework.BenchmarkRunner; +import org.bson.BsonDocument; +import org.bson.BsonInt32; + +public class FindOneBenchmark extends AbstractFindBenchmark { + + public FindOneBenchmark(final String resourcePath, Class clazz) { + super("Find one by ID", resourcePath, clazz); + } + + @Override + public void run() { + for (int i = 0; i < NUM_INTERNAL_ITERATIONS; i++) { + collection.find(new BsonDocument("_id", new BsonInt32(i))).first(); + } + } + + public static void main(String[] args) throws Exception { + new BenchmarkRunner(new FindOneBenchmark<>("/benchmarks/TWEET.json", BsonDocument.class), 0, 1).run(); + } +} diff --git a/driver-benchmarks/src/main/com/mongodb/benchmark/benchmarks/GridFSDownloadBenchmark.java b/driver-benchmarks/src/main/com/mongodb/benchmark/benchmarks/GridFSDownloadBenchmark.java new file mode 100644 index 00000000000..71a71900771 --- /dev/null +++ b/driver-benchmarks/src/main/com/mongodb/benchmark/benchmarks/GridFSDownloadBenchmark.java @@ -0,0 +1,47 @@ +/* + * Copyright 2016-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.mongodb.benchmark.benchmarks; + +import com.mongodb.client.gridfs.GridFSDownloadStream; +import org.bson.types.ObjectId; + +import java.io.ByteArrayInputStream; + +public class GridFSDownloadBenchmark extends AbstractGridFSBenchmark { + + private ObjectId fileId; + + public GridFSDownloadBenchmark(final String resourcePath) { + super("GridFS download", resourcePath); + } + + @Override + public void setUp() throws Exception { + super.setUp(); + fileId = bucket.uploadFromStream("gridfstest", new ByteArrayInputStream(fileBytes)); + } + + @Override + public void run() { + GridFSDownloadStream stream = bucket.openDownloadStream(fileId); + byte[] chunk = new byte[bucket.getChunkSizeBytes()]; + while (stream.read(chunk) != -1) { + // discard result + } + } +} diff --git a/driver-benchmarks/src/main/com/mongodb/benchmark/benchmarks/GridFSMultiFileDownloadBenchmark.java b/driver-benchmarks/src/main/com/mongodb/benchmark/benchmarks/GridFSMultiFileDownloadBenchmark.java new file mode 100644 index 00000000000..e39c0fb46ba --- /dev/null +++ b/driver-benchmarks/src/main/com/mongodb/benchmark/benchmarks/GridFSMultiFileDownloadBenchmark.java @@ -0,0 +1,161 @@ +/* + * Copyright 2016-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.mongodb.benchmark.benchmarks; + +import com.mongodb.benchmark.framework.BenchmarkResult; +import com.mongodb.benchmark.framework.BenchmarkRunner; +import com.mongodb.benchmark.framework.TextBasedBenchmarkResultWriter; +import com.mongodb.client.MongoDatabase; +import com.mongodb.client.gridfs.GridFSBucket; +import com.mongodb.client.gridfs.GridFSBuckets; +import com.mongodb.client.gridfs.model.GridFSUploadOptions; + +import java.io.File; +import java.io.FileOutputStream; +import java.io.IOException; +import java.nio.file.Files; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.TimeUnit; + + +public class GridFSMultiFileDownloadBenchmark extends AbstractMongoBenchmark { + + private GridFSBucket bucket; + + private ExecutorService gridFSService; + private ExecutorService fileService; + + private File tempDirectory; + + public GridFSMultiFileDownloadBenchmark() { + super("GridFS multi-file download"); + } + + @Override + public void setUp() throws Exception { + super.setUp(); + MongoDatabase database = client.getDatabase(DATABASE_NAME); + bucket = GridFSBuckets.create(database); + + database.drop(); + + gridFSService = Executors.newFixedThreadPool(GRIDFS_READING_THREAD_POOL_SIZE); + fileService = Executors.newFixedThreadPool(FILE_WRITING_THREAD_POOL_SIZE); + + importFiles(); + } + + @Override + public void tearDown() throws Exception { + gridFSService.shutdown(); + gridFSService.awaitTermination(1, TimeUnit.MINUTES); + fileService.shutdown(); + fileService.awaitTermination(1, TimeUnit.MINUTES); + + super.tearDown(); + } + + @Override + public void before() throws Exception { + super.before(); + + tempDirectory = Files.createTempDirectory("GRIDFS_MULTI").toFile(); + } + + @SuppressWarnings("ConstantConditions") + @Override + public void after() throws Exception { + for (File file : tempDirectory.listFiles()) { + file.delete(); + } + + tempDirectory.delete(); + + super.after(); + } + + @Override + public void run() throws Exception { + + CountDownLatch latch = new CountDownLatch(50); + + for (int i = 0; i < 50; i++) { + gridFSService.submit(exportFile(latch, i)); + } + + latch.await(1, TimeUnit.MINUTES); + } + + private Runnable exportFile(final CountDownLatch latch, final int fileId) { + return () -> { + UnsafeByteArrayOutputStream outputStream = new UnsafeByteArrayOutputStream(5242880); + bucket.downloadToStream(GridFSMultiFileDownloadBenchmark.this.getFileName(fileId), outputStream); + fileService.submit(() -> { + try { + FileOutputStream fos = new FileOutputStream(new File(tempDirectory, String.format("%02d", fileId) + ".txt")); + fos.write(outputStream.getByteArray()); + fos.close(); + } catch (IOException e) { + throw new RuntimeException(e); + } + latch.countDown(); + }); + }; + } + + private void importFiles() throws Exception { + CountDownLatch latch = new CountDownLatch(50); + + for (int i = 0; i < 50; i++) { + fileService.submit(importFile(latch, i)); + } + + latch.await(1, TimeUnit.MINUTES); + } + + private Runnable importFile(final CountDownLatch latch, final int fileId) { + return () -> { + try { + String fileName = GridFSMultiFileDownloadBenchmark.this.getFileName(fileId); + String resourcePath = "parallel/gridfs_multi/" + fileName; + bucket.uploadFromStream(fileName, streamFromRelativePath(resourcePath), + new GridFSUploadOptions().chunkSizeBytes(ONE_MB)); + latch.countDown(); + } catch (IOException e) { + throw new RuntimeException(e); + } + }; + } + + private String getFileName(final int fileId) { + return "file" + String.format("%02d", fileId) + ".txt"; + } + + @Override + public int getBytesPerRun() { + return 262144000; + } + + public static void main(String[] args) throws Exception { + BenchmarkResult benchmarkResult = new BenchmarkRunner(new GridFSMultiFileDownloadBenchmark(), 20, 100).run(); + new TextBasedBenchmarkResultWriter(System.out, false, true).write(benchmarkResult); + } + +} diff --git a/driver-benchmarks/src/main/com/mongodb/benchmark/benchmarks/GridFSMultiFileUploadBenchmark.java b/driver-benchmarks/src/main/com/mongodb/benchmark/benchmarks/GridFSMultiFileUploadBenchmark.java new file mode 100644 index 00000000000..cefdc7eaf1c --- /dev/null +++ b/driver-benchmarks/src/main/com/mongodb/benchmark/benchmarks/GridFSMultiFileUploadBenchmark.java @@ -0,0 +1,107 @@ +/* + * * Copyright 2016-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.mongodb.benchmark.benchmarks; + +import com.mongodb.benchmark.framework.BenchmarkResult; +import com.mongodb.benchmark.framework.BenchmarkRunner; +import com.mongodb.benchmark.framework.TextBasedBenchmarkResultWriter; +import com.mongodb.client.MongoDatabase; +import com.mongodb.client.gridfs.GridFSBucket; +import com.mongodb.client.gridfs.GridFSBuckets; +import com.mongodb.client.gridfs.model.GridFSUploadOptions; + +import java.io.ByteArrayInputStream; +import java.io.IOException; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.TimeUnit; + + +public class GridFSMultiFileUploadBenchmark extends AbstractMongoBenchmark { + + private MongoDatabase database; + private GridFSBucket bucket; + + private ExecutorService fileService; + + public GridFSMultiFileUploadBenchmark() { + super("GridFS multi-file upload"); + } + + @Override + public void setUp() throws Exception { + super.setUp(); + database = client.getDatabase(DATABASE_NAME); + bucket = GridFSBuckets.create(database); + + database.drop(); + + fileService = Executors.newFixedThreadPool(FILE_READING_THREAD_POOL_SIZE); + } + + @Override + public void tearDown() throws Exception { + fileService.shutdown(); + fileService.awaitTermination(1, TimeUnit.MINUTES); + + super.tearDown(); + } + + @Override + public void before() throws Exception { + super.before(); + database.drop(); + bucket.uploadFromStream("small", new ByteArrayInputStream(new byte[1])); + } + + @Override + public void run() throws Exception { + CountDownLatch latch = new CountDownLatch(50); + + for (int i = 0; i < 50; i++) { + fileService.submit(importFile(latch, i)); + } + + latch.await(1, TimeUnit.MINUTES); + } + + private Runnable importFile(final CountDownLatch latch, final int fileId) { + return () -> { + try { + String fileName = "file" + String.format("%02d", fileId) + ".txt"; + String resourcePath = "parallel/gridfs_multi/" + fileName; + bucket.uploadFromStream(fileName, streamFromRelativePath(resourcePath), + new GridFSUploadOptions().chunkSizeBytes(ONE_MB)); + latch.countDown(); + } catch (IOException e) { + throw new RuntimeException(e); + } + }; + } + + @Override + public int getBytesPerRun() { + return 262144000; + } + + public static void main(String[] args) throws Exception { + BenchmarkResult benchmarkResult = new BenchmarkRunner(new GridFSMultiFileUploadBenchmark(), 4, 10).run(); + new TextBasedBenchmarkResultWriter(System.out).write(benchmarkResult); + } +} diff --git a/driver-benchmarks/src/main/com/mongodb/benchmark/benchmarks/GridFSUploadBenchmark.java b/driver-benchmarks/src/main/com/mongodb/benchmark/benchmarks/GridFSUploadBenchmark.java new file mode 100644 index 00000000000..2181f89be5a --- /dev/null +++ b/driver-benchmarks/src/main/com/mongodb/benchmark/benchmarks/GridFSUploadBenchmark.java @@ -0,0 +1,45 @@ +/* + * Copyright 2016-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.mongodb.benchmark.benchmarks; + +import java.io.ByteArrayInputStream; + +public class GridFSUploadBenchmark extends AbstractGridFSBenchmark { + + public GridFSUploadBenchmark(final String resourcePath) { + super("GridFS upload", resourcePath); + } + + @Override + public void setUp() throws Exception { + super.setUp(); + bucket.uploadFromStream("gridfstest", new ByteArrayInputStream(fileBytes)); + } + + @Override + public void before() throws Exception { + super.before(); + database.drop(); + bucket.uploadFromStream("small", new ByteArrayInputStream(new byte[1])); + } + + @Override + public void run() { + bucket.uploadFromStream("gridfstest", new ByteArrayInputStream(fileBytes)); + } +} diff --git a/driver-benchmarks/src/main/com/mongodb/benchmark/benchmarks/IdRemover.java b/driver-benchmarks/src/main/com/mongodb/benchmark/benchmarks/IdRemover.java new file mode 100644 index 00000000000..5ba57d05046 --- /dev/null +++ b/driver-benchmarks/src/main/com/mongodb/benchmark/benchmarks/IdRemover.java @@ -0,0 +1,22 @@ +/* + * Copyright 2016-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.mongodb.benchmark.benchmarks; + +public interface IdRemover { + void removeId(T document); +} diff --git a/driver-benchmarks/src/main/com/mongodb/benchmark/benchmarks/InsertManyBenchmark.java b/driver-benchmarks/src/main/com/mongodb/benchmark/benchmarks/InsertManyBenchmark.java new file mode 100644 index 00000000000..a67466740e8 --- /dev/null +++ b/driver-benchmarks/src/main/com/mongodb/benchmark/benchmarks/InsertManyBenchmark.java @@ -0,0 +1,49 @@ +/* + * Copyright 2016-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.mongodb.benchmark.benchmarks; + +import java.util.ArrayList; +import java.util.List; + +public class InsertManyBenchmark extends AbstractCollectionWriteBenchmark { + private final List documentList; + + public InsertManyBenchmark(final String name, final String resourcePath, final int numDocuments, final Class clazz) { + super(name + " doc bulk insert", resourcePath, 1, numDocuments, clazz); + documentList = new ArrayList<>(numDocuments); + } + + @Override + public void setUp() throws Exception { + super.setUp(); + } + + @Override + public void before() throws Exception { + super.before(); + documentList.clear(); + for (int i = 0; i < numDocuments; i++) { + documentList.add(createDocument()); + } + } + + @Override + public void run() { + collection.insertMany(documentList); + } +} diff --git a/driver-benchmarks/src/main/com/mongodb/benchmark/benchmarks/InsertOneBenchmark.java b/driver-benchmarks/src/main/com/mongodb/benchmark/benchmarks/InsertOneBenchmark.java new file mode 100644 index 00000000000..af6f91b91df --- /dev/null +++ b/driver-benchmarks/src/main/com/mongodb/benchmark/benchmarks/InsertOneBenchmark.java @@ -0,0 +1,38 @@ +/* + * Copyright 2016-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.mongodb.benchmark.benchmarks; + +public class InsertOneBenchmark extends AbstractCollectionWriteBenchmark { + private final int numIterations; + private final IdRemover idRemover; + + public InsertOneBenchmark(final String name, final String resourcePath, final int numIterations, final Class clazz, + final IdRemover idRemover) { + super(name + " doc insertOne", resourcePath, numIterations, 1, clazz); + this.numIterations = numIterations; + this.idRemover = idRemover; + } + + @Override + public void run() { + for (int i = 0; i < numIterations; i++) { + idRemover.removeId(document); + collection.insertOne(document); + } + } +} diff --git a/driver-benchmarks/src/main/com/mongodb/benchmark/benchmarks/MultiFileExportBenchmark.java b/driver-benchmarks/src/main/com/mongodb/benchmark/benchmarks/MultiFileExportBenchmark.java new file mode 100644 index 00000000000..30c74084419 --- /dev/null +++ b/driver-benchmarks/src/main/com/mongodb/benchmark/benchmarks/MultiFileExportBenchmark.java @@ -0,0 +1,188 @@ +/* + * Copyright 2016-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.mongodb.benchmark.benchmarks; + +import com.mongodb.benchmark.framework.BenchmarkResult; +import com.mongodb.benchmark.framework.BenchmarkRunner; +import com.mongodb.benchmark.framework.TextBasedBenchmarkResultWriter; +import com.mongodb.client.MongoCollection; +import com.mongodb.client.MongoDatabase; +import com.mongodb.client.model.InsertManyOptions; +import org.bson.BsonDocument; +import org.bson.BsonInt32; +import org.bson.RawBsonDocument; +import org.bson.codecs.BsonDocumentCodec; +import org.bson.codecs.DecoderContext; +import org.bson.codecs.EncoderContext; +import org.bson.codecs.RawBsonDocumentCodec; +import org.bson.json.JsonReader; +import org.bson.json.JsonWriter; + +import java.io.BufferedReader; +import java.io.File; +import java.io.FileOutputStream; +import java.io.IOException; +import java.io.OutputStreamWriter; +import java.io.Writer; +import java.nio.charset.StandardCharsets; +import java.nio.file.Files; +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.TimeUnit; + +public class MultiFileExportBenchmark extends AbstractMongoBenchmark { + + private MongoDatabase database; + + private MongoCollection collection; + + private ExecutorService fileWritingService; + private ExecutorService documentReadingService; + private File tempDirectory; + + public MultiFileExportBenchmark() { + super("LDJSON multi-file export"); + } + + @Override + public void setUp() throws Exception { + super.setUp(); + + database = client.getDatabase(DATABASE_NAME); + collection = database.getCollection(COLLECTION_NAME, RawBsonDocument.class); + + database.drop(); + + importJsonFiles(); + + fileWritingService = Executors.newFixedThreadPool(FILE_WRITING_THREAD_POOL_SIZE); + documentReadingService = Executors.newFixedThreadPool(MONGODB_READING_THREAD_POOL_SIZE); + } + + @Override + public void tearDown() throws Exception { + fileWritingService.shutdown(); + documentReadingService.shutdown(); + fileWritingService.awaitTermination(1, TimeUnit.MINUTES); + documentReadingService.awaitTermination(1, TimeUnit.MINUTES); + + super.tearDown(); + } + + @Override + public void before() throws Exception { + super.before(); + + tempDirectory = Files.createTempDirectory("LDJSON_MULTI").toFile(); + } + + @Override + public void after() throws Exception { + for (File file : tempDirectory.listFiles()) { + file.delete(); + } + + tempDirectory.delete(); + super.after(); + } + + @Override + public void run() throws Exception { + CountDownLatch latch = new CountDownLatch(100); + + for (int i = 0; i < 100; i++) { + documentReadingService.submit(exportJsonFile(i, latch)); + } + + latch.await(1, TimeUnit.MINUTES); + } + + @Override + public int getBytesPerRun() { + return 557610482; + } + + private Runnable exportJsonFile(final int fileId, final CountDownLatch latch) { + return () -> { + List documents = collection.find(new BsonDocument("fileId", new BsonInt32(fileId))) + .batchSize(5000) + .into(new ArrayList<>(5000)); + fileWritingService.submit(writeJsonFile(fileId, documents, latch)); + }; + } + + private Runnable writeJsonFile(final int fileId, final List documents, final CountDownLatch latch) { + return () -> { + try { + try (Writer writer = new OutputStreamWriter( + new FileOutputStream(new File(tempDirectory, String.format("%03d", fileId) + ".txt")), StandardCharsets.UTF_8)) { + RawBsonDocumentCodec codec = new RawBsonDocumentCodec(); + for (RawBsonDocument cur : documents) { + codec.encode(new JsonWriter(writer), cur, EncoderContext.builder().build()); + writer.write('\n'); + } + } + latch.countDown(); + } catch (IOException e) { + throw new RuntimeException(e); + } + }; + } + + private void importJsonFiles() throws InterruptedException { + ExecutorService importService = Executors.newFixedThreadPool(FILE_READING_THREAD_POOL_SIZE); + + CountDownLatch latch = new CountDownLatch(100); + + for (int i = 0; i < 100; i++) { + int fileId = i; + importService.submit(() -> { + String resourcePath = "parallel/ldjson_multi/ldjson" + String.format("%03d", fileId) + ".txt"; + try (BufferedReader reader = new BufferedReader(readFromRelativePath(resourcePath), 1024 * 64)) { + String json; + List documents = new ArrayList<>(1000); + while ((json = reader.readLine()) != null) { + BsonDocument document = new BsonDocumentCodec().decode(new JsonReader(json), + DecoderContext.builder().build()); + document.put("fileId", new BsonInt32(fileId)); + documents.add(document); + } + database.getCollection(COLLECTION_NAME, BsonDocument.class).insertMany(documents, + new InsertManyOptions().ordered(false)); + latch.countDown(); + } catch (IOException e) { + throw new RuntimeException(e); + } + }); + } + latch.await(1, TimeUnit.MINUTES); + + collection.createIndex(new BsonDocument("fileId", new BsonInt32(1))); + + importService.shutdown(); + importService.awaitTermination(1, TimeUnit.MINUTES); + } + + public static void main(String[] args) throws Exception { + BenchmarkResult benchmarkResult = new BenchmarkRunner(new MultiFileExportBenchmark(), 0, 1).run(); + new TextBasedBenchmarkResultWriter(System.out).write(benchmarkResult); + } +} diff --git a/driver-benchmarks/src/main/com/mongodb/benchmark/benchmarks/MultiFileImportBenchmark.java b/driver-benchmarks/src/main/com/mongodb/benchmark/benchmarks/MultiFileImportBenchmark.java new file mode 100644 index 00000000000..03d1a721bee --- /dev/null +++ b/driver-benchmarks/src/main/com/mongodb/benchmark/benchmarks/MultiFileImportBenchmark.java @@ -0,0 +1,132 @@ +/* + * Copyright 2016-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.mongodb.benchmark.benchmarks; + +import com.mongodb.benchmark.framework.BenchmarkResult; +import com.mongodb.benchmark.framework.BenchmarkRunner; +import com.mongodb.benchmark.framework.TextBasedBenchmarkResultWriter; +import com.mongodb.client.MongoCollection; +import com.mongodb.client.MongoDatabase; +import com.mongodb.client.model.InsertManyOptions; +import org.bson.RawBsonDocument; +import org.bson.codecs.DecoderContext; +import org.bson.codecs.RawBsonDocumentCodec; +import org.bson.json.JsonReader; + +import java.io.BufferedReader; +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.TimeUnit; + +public class MultiFileImportBenchmark extends AbstractMongoBenchmark { + private MongoDatabase database; + + private MongoCollection collection; + + private ExecutorService fileReadingService; + private ExecutorService documentWritingService; + + public MultiFileImportBenchmark() { + super("LDJSON multi-file import"); + } + + @Override + public void setUp() throws Exception { + super.setUp(); + + database = client.getDatabase(DATABASE_NAME); + collection = database.getCollection(COLLECTION_NAME, RawBsonDocument.class); + + database.drop(); + + fileReadingService = Executors.newFixedThreadPool(FILE_READING_THREAD_POOL_SIZE); + documentWritingService = Executors.newFixedThreadPool(MONGODB_WRITING_THREAD_POOL_SIZE); + } + + @Override + public void before() throws Exception { + super.before(); + + collection.drop(); + database.createCollection(collection.getNamespace().getCollectionName()); + + } + + @Override + public void tearDown() throws Exception { + fileReadingService.shutdown(); + documentWritingService.shutdown(); + fileReadingService.awaitTermination(1, TimeUnit.MINUTES); + documentWritingService.awaitTermination(1, TimeUnit.MINUTES); + + super.tearDown(); + } + + @Override + public void run() throws InterruptedException { + CountDownLatch latch = new CountDownLatch(500); + + for (int i = 0; i < 100; i++) { + fileReadingService.submit(importJsonFile(latch, i)); + } + + latch.await(1, TimeUnit.MINUTES); + } + + private Runnable importJsonFile(final CountDownLatch latch, final int fileId) { + RawBsonDocumentCodec codec = new RawBsonDocumentCodec(); + return () -> { + String resourcePath = "parallel/ldjson_multi/ldjson" + String.format("%03d", fileId) + ".txt"; + try (BufferedReader reader = new BufferedReader(readFromRelativePath(resourcePath), 1024 * 64)) { + String json; + List documents = new ArrayList<>(1000); + while ((json = reader.readLine()) != null) { + RawBsonDocument document = codec.decode(new JsonReader(json), DecoderContext.builder().build()); + documents.add(document); + if (documents.size() == 1000) { + List documentsToInsert = documents; + documentWritingService.submit(() -> { + collection.insertMany(documentsToInsert, new InsertManyOptions().ordered(false)); + latch.countDown(); + }); + documents = new ArrayList<>(1000); + } + } + if (!documents.isEmpty()) { + throw new IllegalStateException("Document count not a multiple of 1000"); + } + } catch (IOException e) { + throw new RuntimeException(e); + } + }; + } + + @Override + public int getBytesPerRun() { + return 557610482; + } + + public static void main(String[] args) throws Exception { + BenchmarkResult benchmarkResult = new BenchmarkRunner(new MultiFileImportBenchmark(), 10, 100).run(); + new TextBasedBenchmarkResultWriter(System.out, true, true).write(benchmarkResult); + } +} diff --git a/driver-benchmarks/src/main/com/mongodb/benchmark/benchmarks/RunCommandBenchmark.java b/driver-benchmarks/src/main/com/mongodb/benchmark/benchmarks/RunCommandBenchmark.java new file mode 100644 index 00000000000..ba90066a349 --- /dev/null +++ b/driver-benchmarks/src/main/com/mongodb/benchmark/benchmarks/RunCommandBenchmark.java @@ -0,0 +1,69 @@ +/* + * Copyright 2016-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.mongodb.benchmark.benchmarks; + +import com.mongodb.client.MongoDatabase; +import org.bson.BsonBinaryWriter; +import org.bson.codecs.Codec; +import org.bson.codecs.DecoderContext; +import org.bson.codecs.EncoderContext; +import org.bson.conversions.Bson; +import org.bson.io.BasicOutputBuffer; +import org.bson.json.JsonReader; + +public class RunCommandBenchmark extends AbstractMongoBenchmark { + + private MongoDatabase database; + private final Codec codec; + private final T command; + + public RunCommandBenchmark(final Codec codec) { + super("Run command"); + this.codec = codec; + this.command = createCommand(); + } + + @Override + public void setUp() throws Exception { + super.setUp(); + database = client.getDatabase("admin"); + } + + @Override + public void run() { + for (int i = 0; i < NUM_INTERNAL_ITERATIONS; i++) { + database.runCommand(command); + } + } + + @Override + public int getBytesPerRun() { + return NUM_INTERNAL_ITERATIONS * getCommandSize(); + } + + private int getCommandSize() { + T command = createCommand(); + BasicOutputBuffer buffer = new BasicOutputBuffer(); + codec.encode(new BsonBinaryWriter(buffer), command, EncoderContext.builder().build()); + return buffer.getSize(); + } + + private T createCommand() { + return codec.decode(new JsonReader("{ismaster: true}"),DecoderContext.builder().build()); + } +} diff --git a/driver-benchmarks/src/main/com/mongodb/benchmark/benchmarks/UnsafeByteArrayOutputStream.java b/driver-benchmarks/src/main/com/mongodb/benchmark/benchmarks/UnsafeByteArrayOutputStream.java new file mode 100644 index 00000000000..1887912220b --- /dev/null +++ b/driver-benchmarks/src/main/com/mongodb/benchmark/benchmarks/UnsafeByteArrayOutputStream.java @@ -0,0 +1,30 @@ +/* + * Copyright (c) 2008 - 2013 10gen, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.mongodb.benchmark.benchmarks; + +import java.io.ByteArrayOutputStream; + +class UnsafeByteArrayOutputStream extends ByteArrayOutputStream { + public UnsafeByteArrayOutputStream(final int size) { + super(size); + } + + public byte[] getByteArray() { + return buf; + } +} diff --git a/driver-benchmarks/src/main/com/mongodb/benchmark/benchmarks/bulk/ClientBulkWriteBenchmark.java b/driver-benchmarks/src/main/com/mongodb/benchmark/benchmarks/bulk/ClientBulkWriteBenchmark.java new file mode 100644 index 00000000000..3926192ec4b --- /dev/null +++ b/driver-benchmarks/src/main/com/mongodb/benchmark/benchmarks/bulk/ClientBulkWriteBenchmark.java @@ -0,0 +1,50 @@ +/* + * Copyright 2016-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.mongodb.benchmark.benchmarks.bulk; + +import com.mongodb.benchmark.benchmarks.AbstractCollectionWriteBenchmark; +import com.mongodb.client.model.bulk.ClientNamespacedInsertOneModel; +import com.mongodb.client.model.bulk.ClientNamespacedWriteModel; + +import java.util.ArrayList; +import java.util.List; + +public class ClientBulkWriteBenchmark extends AbstractCollectionWriteBenchmark { + private final List modelList; + + public ClientBulkWriteBenchmark(final String name, final String resourcePath, final int numDocuments, final Class clazz) { + super(name + " doc Client BulkWrite insert", resourcePath, 1, numDocuments, clazz); + modelList = new ArrayList<>(numDocuments); + } + + @Override + public void before() throws Exception { + super.before(); + database.createCollection(COLLECTION_NAME); + + modelList.clear(); + for (int i = 0; i < numDocuments; i++) { + modelList.add(ClientNamespacedWriteModel.insertOne(NAMESPACE, createDocument())); + } + } + + @Override + public void run() { + client.bulkWrite(modelList); + } +} diff --git a/driver-benchmarks/src/main/com/mongodb/benchmark/benchmarks/bulk/CollectionBulkWriteBenchmark.java b/driver-benchmarks/src/main/com/mongodb/benchmark/benchmarks/bulk/CollectionBulkWriteBenchmark.java new file mode 100644 index 00000000000..6a0d74d4736 --- /dev/null +++ b/driver-benchmarks/src/main/com/mongodb/benchmark/benchmarks/bulk/CollectionBulkWriteBenchmark.java @@ -0,0 +1,48 @@ +/* + * Copyright 2016-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.mongodb.benchmark.benchmarks.bulk; + +import com.mongodb.benchmark.benchmarks.AbstractCollectionWriteBenchmark; +import com.mongodb.client.model.InsertOneModel; + +import java.util.ArrayList; +import java.util.List; + +public class CollectionBulkWriteBenchmark extends AbstractCollectionWriteBenchmark { + private final List> modelList; + + public CollectionBulkWriteBenchmark(final String name, final String resourcePath, final int numDocuments, final Class clazz) { + super(name + " doc Collection BulkWrite insert", resourcePath, 1, numDocuments, clazz); + modelList = new ArrayList<>(numDocuments); + } + + @Override + public void before() throws Exception { + super.before(); + database.createCollection(COLLECTION_NAME); + modelList.clear(); + for (int i = 0; i < numDocuments; i++) { + modelList.add(new InsertOneModel<>((createDocument()))); + } + } + + @Override + public void run() { + collection.bulkWrite(modelList); + } +} diff --git a/driver-benchmarks/src/main/com/mongodb/benchmark/benchmarks/bulk/MixedClientBulkWriteBenchmark.java b/driver-benchmarks/src/main/com/mongodb/benchmark/benchmarks/bulk/MixedClientBulkWriteBenchmark.java new file mode 100644 index 00000000000..7c23712cce7 --- /dev/null +++ b/driver-benchmarks/src/main/com/mongodb/benchmark/benchmarks/bulk/MixedClientBulkWriteBenchmark.java @@ -0,0 +1,86 @@ +/* + * Copyright 2016-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.mongodb.benchmark.benchmarks.bulk; + +import com.mongodb.MongoNamespace; +import com.mongodb.benchmark.benchmarks.AbstractMongoBenchmark; +import com.mongodb.benchmark.benchmarks.AbstractWriteBenchmark; +import com.mongodb.client.MongoDatabase; +import com.mongodb.client.model.bulk.ClientNamespacedWriteModel; + +import java.util.ArrayList; +import java.util.List; + +import static com.mongodb.client.model.bulk.ClientNamespacedWriteModel.deleteOne; +import static com.mongodb.client.model.bulk.ClientNamespacedWriteModel.insertOne; +import static com.mongodb.client.model.bulk.ClientNamespacedWriteModel.replaceOne; + +public class MixedClientBulkWriteBenchmark extends AbstractWriteBenchmark { + private static final int NAMESPACES_COUNT = 10; + private MongoDatabase database; + private final List modelList; + private List namespaces; + + public MixedClientBulkWriteBenchmark(final String resourcePath, final int numDocuments, final Class clazz) { + // numDocuments * 2 aligns with bytes transferred (insertOne + replaceOne documents) + super("Small doc Client BulkWrite Mixed Operations", resourcePath, 1, numDocuments * 2, clazz); + this.modelList = new ArrayList<>(numDocuments * 3); + this.namespaces = new ArrayList<>(NAMESPACES_COUNT); + } + + @Override + public void setUp() throws Exception { + super.setUp(); + database = client.getDatabase(DATABASE_NAME); + database.drop(); + + namespaces = new ArrayList<>(); + for (int i = 1; i <= NAMESPACES_COUNT; i++) { + namespaces.add(new MongoNamespace(AbstractMongoBenchmark.DATABASE_NAME, AbstractMongoBenchmark.COLLECTION_NAME + "_" + i)); + } + } + + @Override + public void before() throws Exception { + super.before(); + database.drop(); + database = client.getDatabase(DATABASE_NAME); + + for (MongoNamespace namespace : namespaces) { + database.createCollection(namespace.getCollectionName()); + } + + modelList.clear(); + for (int i = 0; i < numDocuments / 2; i++) { + MongoNamespace namespace = namespaces.get(i % NAMESPACES_COUNT); + modelList.add(insertOne( + namespace, + createDocument())); + modelList.add(replaceOne( + namespace, EMPTY_FILTER, + createDocument())); + modelList.add(deleteOne( + namespace, EMPTY_FILTER)); + } + } + + @Override + public void run() { + client.bulkWrite(modelList); + } +} diff --git a/driver-benchmarks/src/main/com/mongodb/benchmark/benchmarks/bulk/MixedCollectionBulkWriteBenchmark.java b/driver-benchmarks/src/main/com/mongodb/benchmark/benchmarks/bulk/MixedCollectionBulkWriteBenchmark.java new file mode 100644 index 00000000000..84bf29e0d2e --- /dev/null +++ b/driver-benchmarks/src/main/com/mongodb/benchmark/benchmarks/bulk/MixedCollectionBulkWriteBenchmark.java @@ -0,0 +1,56 @@ +/* + * Copyright 2016-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.mongodb.benchmark.benchmarks.bulk; + +import com.mongodb.benchmark.benchmarks.AbstractCollectionWriteBenchmark; +import com.mongodb.client.model.DeleteOneModel; +import com.mongodb.client.model.InsertOneModel; +import com.mongodb.client.model.ReplaceOneModel; +import com.mongodb.client.model.WriteModel; + +import java.util.ArrayList; +import java.util.List; + +public class MixedCollectionBulkWriteBenchmark extends AbstractCollectionWriteBenchmark { + private final List> modelList; + + public MixedCollectionBulkWriteBenchmark(final String resourcePath, final int numDocuments, final Class clazz) { + // numDocuments * 2 aligns with bytes transferred (insertOne + replaceOne documents) + super("Small doc Collection BulkWrite Mixed Operations", resourcePath, 1, numDocuments * 2, clazz); + this.modelList = new ArrayList<>(numDocuments * 3); + } + + + @Override + public void before() throws Exception { + super.before(); + database.createCollection(COLLECTION_NAME); + + modelList.clear(); + for (int i = 0; i < numDocuments / 2; i++) { + modelList.add(new InsertOneModel<>((createDocument()))); + modelList.add(new ReplaceOneModel<>(EMPTY_FILTER, createDocument())); + modelList.add(new DeleteOneModel<>(EMPTY_FILTER)); + } + } + + @Override + public void run() { + collection.bulkWrite(modelList); + } +} diff --git a/driver-benchmarks/src/main/com/mongodb/benchmark/benchmarks/netty/BenchmarkNettyProviderSuite.java b/driver-benchmarks/src/main/com/mongodb/benchmark/benchmarks/netty/BenchmarkNettyProviderSuite.java new file mode 100644 index 00000000000..5e142376940 --- /dev/null +++ b/driver-benchmarks/src/main/com/mongodb/benchmark/benchmarks/netty/BenchmarkNettyProviderSuite.java @@ -0,0 +1,107 @@ +/* + * Copyright 2016-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.mongodb.benchmark.benchmarks.netty; + +import com.mongodb.MongoClientSettings; +import com.mongodb.benchmark.benchmarks.BenchmarkSuite; +import com.mongodb.benchmark.benchmarks.FindManyBenchmark; +import com.mongodb.benchmark.benchmarks.FindOneBenchmark; +import com.mongodb.benchmark.benchmarks.GridFSDownloadBenchmark; +import com.mongodb.benchmark.benchmarks.GridFSMultiFileDownloadBenchmark; +import com.mongodb.benchmark.benchmarks.GridFSMultiFileUploadBenchmark; +import com.mongodb.benchmark.benchmarks.GridFSUploadBenchmark; +import com.mongodb.benchmark.benchmarks.InsertManyBenchmark; +import com.mongodb.benchmark.benchmarks.InsertOneBenchmark; +import com.mongodb.benchmark.benchmarks.MultiFileExportBenchmark; +import com.mongodb.benchmark.benchmarks.MultiFileImportBenchmark; +import com.mongodb.benchmark.benchmarks.RunCommandBenchmark; +import com.mongodb.benchmark.benchmarks.bulk.ClientBulkWriteBenchmark; +import com.mongodb.benchmark.benchmarks.bulk.CollectionBulkWriteBenchmark; +import com.mongodb.benchmark.benchmarks.bulk.MixedClientBulkWriteBenchmark; +import com.mongodb.benchmark.benchmarks.bulk.MixedCollectionBulkWriteBenchmark; +import com.mongodb.benchmark.framework.BenchmarkResultWriter; +import com.mongodb.connection.NettyTransportSettings; +import io.netty.buffer.PooledByteBufAllocator; +import org.bson.Document; + +@SuppressWarnings({"rawtypes", "unchecked"}) +public class BenchmarkNettyProviderSuite extends BenchmarkSuite { + + public static final MongoClientSettings MONGO_CLIENT_SETTINGS = MongoClientSettings.builder() + .transportSettings(NettyTransportSettings.nettyBuilder() + .allocator(PooledByteBufAllocator.DEFAULT) + .build()) + .build(); + + public static void main(String[] args) throws Exception { + runBenchmarks(); + + for (BenchmarkResultWriter writer : WRITERS) { + writer.close(); + } + } + + private static void runBenchmarks() + throws Exception { + runBenchmark(new RunCommandBenchmark<>(DOCUMENT_CODEC) + .applyMongoClientSettings(MONGO_CLIENT_SETTINGS)); + runBenchmark(new FindOneBenchmark("./single_and_multi_document/tweet.json", + DOCUMENT_CLASS).applyMongoClientSettings(MONGO_CLIENT_SETTINGS)); + + runBenchmark(new InsertOneBenchmark("Small", "./single_and_multi_document/small_doc.json", 10000, + DOCUMENT_CLASS, ID_REMOVER).applyMongoClientSettings(MONGO_CLIENT_SETTINGS)); + runBenchmark(new InsertOneBenchmark("Large", "./single_and_multi_document/large_doc.json", 10, + DOCUMENT_CLASS, ID_REMOVER).applyMongoClientSettings(MONGO_CLIENT_SETTINGS)); + + runBenchmark(new FindManyBenchmark("./single_and_multi_document/tweet.json", + DOCUMENT_CLASS).applyMongoClientSettings(MONGO_CLIENT_SETTINGS)); + runBenchmark(new InsertManyBenchmark("Small", "./single_and_multi_document/small_doc.json", 10000, + DOCUMENT_CLASS).applyMongoClientSettings(MONGO_CLIENT_SETTINGS)); + runBenchmark(new InsertManyBenchmark("Large", "./single_and_multi_document/large_doc.json", 10, + DOCUMENT_CLASS).applyMongoClientSettings(MONGO_CLIENT_SETTINGS)); + + runBenchmark(new GridFSUploadBenchmark("./single_and_multi_document/gridfs_large.bin") + .applyMongoClientSettings(MONGO_CLIENT_SETTINGS)); + runBenchmark(new GridFSDownloadBenchmark("./single_and_multi_document/gridfs_large.bin") + .applyMongoClientSettings(MONGO_CLIENT_SETTINGS)); + + runBenchmark(new CollectionBulkWriteBenchmark<>("Small", "./single_and_multi_document/small_doc.json", 10_000, + DOCUMENT_CLASS).applyMongoClientSettings(MONGO_CLIENT_SETTINGS)); + runBenchmark(new CollectionBulkWriteBenchmark<>("Large", "./single_and_multi_document/large_doc.json", 10, + DOCUMENT_CLASS)); + + runBenchmark(new ClientBulkWriteBenchmark<>("Small", "./single_and_multi_document/small_doc.json", 10_000, + DOCUMENT_CLASS).applyMongoClientSettings(MONGO_CLIENT_SETTINGS)); + runBenchmark(new ClientBulkWriteBenchmark<>("Large", "./single_and_multi_document/large_doc.json", 10, + DOCUMENT_CLASS).applyMongoClientSettings(MONGO_CLIENT_SETTINGS)); + + runBenchmark(new MixedCollectionBulkWriteBenchmark<>("./single_and_multi_document/small_doc.json", 10_000, + DOCUMENT_CLASS).applyMongoClientSettings(MONGO_CLIENT_SETTINGS)); + runBenchmark(new MixedClientBulkWriteBenchmark<>("./single_and_multi_document/small_doc.json", 10_000, + DOCUMENT_CLASS).applyMongoClientSettings(MONGO_CLIENT_SETTINGS)); + + runBenchmark(new MultiFileImportBenchmark() + .applyMongoClientSettings(MONGO_CLIENT_SETTINGS)); + runBenchmark(new MultiFileExportBenchmark() + .applyMongoClientSettings(MONGO_CLIENT_SETTINGS)); + runBenchmark(new GridFSMultiFileUploadBenchmark() + .applyMongoClientSettings(MONGO_CLIENT_SETTINGS)); + runBenchmark(new GridFSMultiFileDownloadBenchmark() + .applyMongoClientSettings(MONGO_CLIENT_SETTINGS)); + } +} diff --git a/driver-benchmarks/src/main/com/mongodb/benchmark/framework/Benchmark.java b/driver-benchmarks/src/main/com/mongodb/benchmark/framework/Benchmark.java new file mode 100644 index 00000000000..3715887d900 --- /dev/null +++ b/driver-benchmarks/src/main/com/mongodb/benchmark/framework/Benchmark.java @@ -0,0 +1,73 @@ +/* + * Copyright 2016-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.mongodb.benchmark.framework; + +import java.io.FileInputStream; +import java.io.FileReader; +import java.io.IOException; +import java.io.InputStream; +import java.io.Reader; +import java.nio.file.Files; +import java.nio.file.Paths; + +public abstract class Benchmark { + + protected static final int NUM_INTERNAL_ITERATIONS = 10000; + static final String TEST_DATA_SYSTEM_PROPERTY_NAME = "org.mongodb.benchmarks.data"; + private String name; + + protected Benchmark(final String name) { + this.name = name; + } + + public void setUp() throws Exception { + } + + public void tearDown() throws Exception { + } + + public void before() throws Exception { + } + + public void after() throws Exception { + } + + public String getName() { + return name; + } + + public abstract void run() throws Exception; + + public abstract int getBytesPerRun(); + + protected byte[] readAllBytesFromRelativePath(final String relativePath) throws IOException { + return Files.readAllBytes(Paths.get(getResourceRoot() + relativePath)); + } + + protected Reader readFromRelativePath(final String relativePath) throws IOException { + return new FileReader(getResourceRoot() + relativePath); + } + + protected InputStream streamFromRelativePath(final String relativePath) throws IOException { + return new FileInputStream(getResourceRoot() + relativePath); + } + + private String getResourceRoot() { + return System.getProperty(TEST_DATA_SYSTEM_PROPERTY_NAME); + } +} diff --git a/driver-benchmarks/src/main/com/mongodb/benchmark/framework/BenchmarkResult.java b/driver-benchmarks/src/main/com/mongodb/benchmark/framework/BenchmarkResult.java new file mode 100644 index 00000000000..0cd8f40d131 --- /dev/null +++ b/driver-benchmarks/src/main/com/mongodb/benchmark/framework/BenchmarkResult.java @@ -0,0 +1,66 @@ +/* + * Copyright 2016-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.mongodb.benchmark.framework; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; + +public class BenchmarkResult { + private final String name; + private final List elapsedTimeNanosList; + private final List sortedElapsedTimeNanosList; + private final int bytesPerRun; + + public BenchmarkResult(final String name, final List elapsedTimeNanosList, final int bytesPerRun) { + this.name = name; + this.elapsedTimeNanosList = new ArrayList<>(elapsedTimeNanosList); + this.bytesPerRun = bytesPerRun; + this.sortedElapsedTimeNanosList = new ArrayList<>(elapsedTimeNanosList); + Collections.sort(this.sortedElapsedTimeNanosList); + } + + public int getBytesPerIteration() { + return bytesPerRun; + } + + public String getName() { + return name; + } + + public List getElapsedTimeNanosList() { + return elapsedTimeNanosList; + } + + public long getElapsedTimeNanosAtPercentile(final int percentile) { + return sortedElapsedTimeNanosList.get(Math.max(0, ((int) (getNumIterations() * percentile / 100.0)) - 1)); + } + + public int getNumIterations() { + return elapsedTimeNanosList.size(); + } + + @Override + public String toString() { + return "BenchmarkResult{" + + "name='" + name + '\'' + + ", elapsedTimeNanosList=" + elapsedTimeNanosList + + ", bytesPerRun=" + bytesPerRun + + '}'; + } +} diff --git a/driver-benchmarks/src/main/com/mongodb/benchmark/framework/BenchmarkResultWriter.java b/driver-benchmarks/src/main/com/mongodb/benchmark/framework/BenchmarkResultWriter.java new file mode 100644 index 00000000000..26828a5a75f --- /dev/null +++ b/driver-benchmarks/src/main/com/mongodb/benchmark/framework/BenchmarkResultWriter.java @@ -0,0 +1,26 @@ +/* + * Copyright 2016-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.mongodb.benchmark.framework; + +import java.io.Closeable; + +public interface BenchmarkResultWriter extends Closeable { + void write(BenchmarkResult benchmarkResult); + + void write(MongocryptBecnhmarkResult result); +} diff --git a/driver-benchmarks/src/main/com/mongodb/benchmark/framework/BenchmarkRunner.java b/driver-benchmarks/src/main/com/mongodb/benchmark/framework/BenchmarkRunner.java new file mode 100644 index 00000000000..2233c8fe1a8 --- /dev/null +++ b/driver-benchmarks/src/main/com/mongodb/benchmark/framework/BenchmarkRunner.java @@ -0,0 +1,88 @@ +/* + * Copyright 2016-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.mongodb.benchmark.framework; + +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.TimeUnit; + +public class BenchmarkRunner { + private final Benchmark benchmark; + private final int numWarmupIterations; + private final int numIterations; + private final int minTimeSeconds; + private final int maxTimeSeconds; + + public BenchmarkRunner(final Benchmark benchmark, final int numWarmupIterations, int numIterations) { + this(benchmark, numWarmupIterations, numIterations, 5, 30); + } + + public BenchmarkRunner(final Benchmark benchmark, final int numWarmupIterations, int numIterations, + int minTimeSeconds, int maxTimeSeconds) { + this.benchmark = benchmark; + this.numWarmupIterations = numWarmupIterations; + this.numIterations = numIterations; + this.minTimeSeconds = minTimeSeconds; + this.maxTimeSeconds = maxTimeSeconds; + } + + public BenchmarkResult run() throws Exception { + benchmark.setUp(); + + for (int i = 0; i < numWarmupIterations; i++) { + benchmark.before(); + + benchmark.run(); + + benchmark.after(); + } + + List elapsedTimeNanosList = new ArrayList<>(numIterations); + + long totalTimeNanos = 0; + + for (int i = 0; shouldContinue(i, totalTimeNanos); i++) { + benchmark.before(); + + long startTimeNanos = System.nanoTime(); + benchmark.run(); + long elapsedTimeNanos = System.nanoTime() - startTimeNanos; + elapsedTimeNanosList.add(elapsedTimeNanos); + totalTimeNanos += elapsedTimeNanos; + + benchmark.after(); + } + + benchmark.tearDown(); + + return new BenchmarkResult(benchmark.getName(), elapsedTimeNanosList, benchmark.getBytesPerRun()); + } + + private boolean shouldContinue(final int iterationCount, final long totalTimeNanos) { + if ((totalTimeNanos) < TimeUnit.SECONDS.toNanos(minTimeSeconds)) { + return true; + } + + if ((totalTimeNanos) > TimeUnit.SECONDS.toNanos(maxTimeSeconds)) { + return false; + } + + return iterationCount < numIterations; + } + +} diff --git a/driver-benchmarks/src/main/com/mongodb/benchmark/framework/EvergreenBenchmarkResultWriter.java b/driver-benchmarks/src/main/com/mongodb/benchmark/framework/EvergreenBenchmarkResultWriter.java new file mode 100644 index 00000000000..f1e5361ffeb --- /dev/null +++ b/driver-benchmarks/src/main/com/mongodb/benchmark/framework/EvergreenBenchmarkResultWriter.java @@ -0,0 +1,111 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.benchmark.framework; + +import org.bson.json.JsonMode; +import org.bson.json.JsonWriter; +import org.bson.json.JsonWriterSettings; + +import java.io.FileOutputStream; +import java.io.IOException; +import java.io.OutputStreamWriter; +import java.io.StringWriter; +import java.nio.charset.StandardCharsets; + +public class EvergreenBenchmarkResultWriter implements BenchmarkResultWriter { + + private static final String OUTPUT_FILE_SYSTEM_PROPERTY = "org.mongodb.benchmarks.output"; + + private final StringWriter writer; + private final JsonWriter jsonWriter; + + public EvergreenBenchmarkResultWriter() { + writer = new StringWriter(); + jsonWriter = new JsonWriter(writer, JsonWriterSettings.builder().outputMode(JsonMode.RELAXED).indent(true).build()); + jsonWriter.writeStartDocument(); + jsonWriter.writeStartArray("results"); + } + + @Override + public void write(final BenchmarkResult benchmarkResult) { + jsonWriter.writeStartDocument(); + + jsonWriter.writeStartDocument("info"); + jsonWriter.writeString("test_name", benchmarkResult.getName()); + + jsonWriter.writeStartDocument("args"); + jsonWriter.writeInt32("threads", 1); + jsonWriter.writeEndDocument(); + jsonWriter.writeEndDocument(); + + jsonWriter.writeStartArray("metrics"); + + jsonWriter.writeStartDocument(); + jsonWriter.writeString("name","ops_per_sec" ); + jsonWriter.writeDouble("value", + (benchmarkResult.getBytesPerIteration() / 1000000d) / + (benchmarkResult.getElapsedTimeNanosAtPercentile(50) / 1000000000d)); + jsonWriter.writeEndDocument(); + + jsonWriter.writeEndArray(); + jsonWriter.writeEndDocument(); + } + + @Override + public void write(final MongocryptBecnhmarkResult result) { + jsonWriter.writeStartDocument(); + + jsonWriter.writeStartDocument("info"); + jsonWriter.writeString("test_name", result.getTestName()); + + jsonWriter.writeStartDocument("args"); + jsonWriter.writeInt32("threads", result.getThreadCount()); + jsonWriter.writeEndDocument(); + jsonWriter.writeEndDocument(); + + jsonWriter.writeString("created_at", result.getCreatedAt()); + jsonWriter.writeString("completed_at", result.getCompletedAt()); + jsonWriter.writeStartArray("metrics"); + + jsonWriter.writeStartDocument(); + jsonWriter.writeString("name", result.getMetricName()); + jsonWriter.writeString("type", result.getMetricType()); + jsonWriter.writeDouble("value", result.getMedianOpsPerSec()); + jsonWriter.writeEndDocument(); + + jsonWriter.writeEndArray(); + jsonWriter.writeEndDocument(); + } + + @Override + public void close() throws IOException { + jsonWriter.writeEndArray(); + jsonWriter.writeEndDocument(); + + try (OutputStreamWriter fileWriter = new OutputStreamWriter(new FileOutputStream(System.getProperty(OUTPUT_FILE_SYSTEM_PROPERTY)), + StandardCharsets.UTF_8)) { + fileWriter.write(getJsonResultsArrayFromJsonDocument()); + } + } + + private String getJsonResultsArrayFromJsonDocument() { + String jsonDocument = writer.toString(); + int startArrayIndex = jsonDocument.indexOf('['); + int endArrayIndex = jsonDocument.lastIndexOf(']'); + return writer.toString().substring(startArrayIndex, endArrayIndex + 1) + "\n"; + } +} diff --git a/driver-benchmarks/src/main/com/mongodb/benchmark/framework/MinimalTextBasedBenchmarkResultWriter.java b/driver-benchmarks/src/main/com/mongodb/benchmark/framework/MinimalTextBasedBenchmarkResultWriter.java new file mode 100644 index 00000000000..b5ed85f1f2e --- /dev/null +++ b/driver-benchmarks/src/main/com/mongodb/benchmark/framework/MinimalTextBasedBenchmarkResultWriter.java @@ -0,0 +1,46 @@ +/* + * Copyright 2016-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.mongodb.benchmark.framework; + +import java.io.PrintStream; + +public class MinimalTextBasedBenchmarkResultWriter implements BenchmarkResultWriter { + + public static final double ONE_BILLION = 1000000000.0; // To convert nanoseconds to seconds + private final PrintStream printStream; + + public MinimalTextBasedBenchmarkResultWriter(final PrintStream printStream) { + this.printStream = printStream; + } + + @Override + public void write(final BenchmarkResult benchmarkResult) { + printStream.printf("%s: %.3f%n", benchmarkResult.getName(), + benchmarkResult.getElapsedTimeNanosAtPercentile(50) / ONE_BILLION); + } + + @Override + public void write(final MongocryptBecnhmarkResult result) { + printStream.printf("%s: %d%n", result.getTestName(), + result.getMedianOpsPerSec()); + } + + @Override + public void close() { + } +} diff --git a/driver-benchmarks/src/main/com/mongodb/benchmark/framework/MongoCryptBenchmarkRunner.java b/driver-benchmarks/src/main/com/mongodb/benchmark/framework/MongoCryptBenchmarkRunner.java new file mode 100644 index 00000000000..718ab9f21af --- /dev/null +++ b/driver-benchmarks/src/main/com/mongodb/benchmark/framework/MongoCryptBenchmarkRunner.java @@ -0,0 +1,224 @@ +package com.mongodb.benchmark.framework; + +/* + * Copyright 2023-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +import com.mongodb.internal.crypt.capi.CAPI; +import com.mongodb.internal.crypt.capi.MongoCrypt; +import com.mongodb.internal.crypt.capi.MongoCryptContext; +import com.mongodb.internal.crypt.capi.MongoCryptOptions; +import com.mongodb.internal.crypt.capi.MongoCrypts; +import com.mongodb.internal.crypt.capi.MongoExplicitEncryptOptions; +import com.mongodb.internal.crypt.capi.MongoLocalKmsProviderOptions; +import org.bson.BsonBinary; +import org.bson.BsonBinarySubType; +import org.bson.BsonDocument; +import org.bson.BsonString; +import org.bson.BsonValue; +import org.bson.RawBsonDocument; + +import java.net.URL; +import java.nio.ByteBuffer; +import java.nio.file.Files; +import java.nio.file.Paths; +import java.time.ZoneOffset; +import java.time.ZonedDateTime; +import java.time.format.DateTimeFormatter; +import java.util.ArrayList; +import java.util.Base64; +import java.util.Collections; +import java.util.List; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.TimeUnit; + +public class MongoCryptBenchmarkRunner { + static final int NUM_FIELDS = 1500; + static final int NUM_WARMUP_SECS = 2; + static final int NUM_SECS = 10; + static final byte[] LOCAL_MASTER_KEY = new byte[]{ + -99, -108, 75, 13, -109, -48, -59, 68, -91, 114, -3, 50, 27, -108, 48, -112, 35, 53, + 115, 124, -16, -10, -62, -12, -38, 35, 86, -25, -113, 4, -52, -6, -34, 117, -76, 81, + -121, -13, -117, -105, -41, 75, 68, 59, -84, 57, -94, -58, 77, -111, 0, 62, -47, -6, 74, + 48, -63, -46, -58, 94, -5, -84, 65, -14, 72, 19, 60, -101, 80, -4, -89, 36, 122, 46, 2, + 99, -93, -58, 22, 37, 81, 80, 120, 62, 15, -40, 110, -124, -90, -20, -115, 45, 36, 71, + -27, -81 + }; + + private static String getFileAsString(final String fileName) { + try { + URL resource = BenchmarkRunner.class.getResource("/" + fileName); + if (resource == null) { + throw new RuntimeException("Could not find file " + fileName); + } + return new String(Files.readAllBytes(Paths.get(resource.toURI()))); + } catch (Throwable t) { + throw new RuntimeException("Could not parse file " + fileName, t); + } + } + + private static BsonDocument getResourceAsDocument(final String fileName) { + return BsonDocument.parse(getFileAsString(fileName)); + } + + private static MongoCrypt createMongoCrypt() { + return MongoCrypts.create(MongoCryptOptions + .builder() + .localKmsProviderOptions(MongoLocalKmsProviderOptions.builder() + .localMasterKey(ByteBuffer.wrap(LOCAL_MASTER_KEY)) + .build()) + .build()); + } + + // DecryptTask decrypts a document repeatedly for a specified number of seconds and records ops/sec. + private static class DecryptTask implements Runnable { + public DecryptTask(MongoCrypt mongoCrypt, BsonDocument toDecrypt, int numSecs, CountDownLatch doneSignal) { + this.mongoCrypt = mongoCrypt; + this.toDecrypt = toDecrypt; + this.opsPerSecs = new ArrayList(numSecs); + this.numSecs = numSecs; + this.doneSignal = doneSignal; + } + + public void run() { + for (int i = 0; i < numSecs; i++) { + long opsPerSec = 0; + long start = System.nanoTime(); + // Run for one second. + while (System.nanoTime() - start < 1_000_000_000) { + try (MongoCryptContext ctx = mongoCrypt.createDecryptionContext(toDecrypt)) { + assert ctx.getState() == MongoCryptContext.State.READY; + ctx.finish(); + opsPerSec++; + } + } + opsPerSecs.add(opsPerSec); + } + doneSignal.countDown(); + } + + public long getMedianOpsPerSecs() { + if (opsPerSecs.size() == 0) { + throw new IllegalStateException("opsPerSecs is empty. Was `run` called?"); + } + Collections.sort(opsPerSecs); + return opsPerSecs.get(numSecs / 2); + } + + private MongoCrypt mongoCrypt; + private BsonDocument toDecrypt; + private ArrayList opsPerSecs; + private int numSecs; + private CountDownLatch doneSignal; + } + + public List run() throws InterruptedException { + System.out.printf("BenchmarkRunner is using libmongocrypt version=%s, NUM_WARMUP_SECS=%d, NUM_SECS=%d%n", + CAPI.mongocrypt_version(null).toString(), NUM_WARMUP_SECS, NUM_SECS); + // `keyDocument` is a Data Encryption Key (DEK) encrypted with the Key Encryption Key (KEK) `LOCAL_MASTER_KEY`. + BsonDocument keyDocument = getResourceAsDocument("keyDocument.json"); + try (MongoCrypt mongoCrypt = createMongoCrypt()) { + // `encrypted` will contain encrypted fields. + BsonDocument encrypted = new BsonDocument(); + { + for (int i = 0; i < NUM_FIELDS; i++) { + MongoExplicitEncryptOptions options = MongoExplicitEncryptOptions.builder() + .keyId(new BsonBinary(BsonBinarySubType.UUID_STANDARD, Base64.getDecoder().decode("YWFhYWFhYWFhYWFhYWFhYQ=="))) + .algorithm("AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic") + .build(); + BsonDocument toEncrypt = new BsonDocument("v", new BsonString(String.format("value %04d", i))); + try (MongoCryptContext ctx = mongoCrypt.createExplicitEncryptionContext(toEncrypt, options)) { + // If mongocrypt_t has not yet cached the DEK, supply it. + if (MongoCryptContext.State.NEED_MONGO_KEYS == ctx.getState()) { + ctx.addMongoOperationResult(keyDocument); + ctx.completeMongoOperation(); + } + assert ctx.getState() == MongoCryptContext.State.READY; + RawBsonDocument result = ctx.finish(); + BsonValue encryptedValue = result.get("v"); + String key = String.format("key%04d", i); + encrypted.append(key, encryptedValue); + } + } + } + + // Warm up benchmark and discard the result. + DecryptTask warmup = new DecryptTask(mongoCrypt, encrypted, NUM_WARMUP_SECS, new CountDownLatch(1)); + warmup.run(); + + // Decrypt `encrypted` and measure ops/sec. + // Check with varying thread counts to measure impact of a shared pool of Cipher instances. + int[] threadCounts = {1, 2, 8, 64}; + ArrayList totalMedianOpsPerSecs = new ArrayList(threadCounts.length); + ArrayList createdAts = new ArrayList(threadCounts.length); + ArrayList completedAts = new ArrayList(threadCounts.length); + + for (int threadCount : threadCounts) { + ExecutorService executorService = Executors.newFixedThreadPool(threadCount); + CountDownLatch doneSignal = new CountDownLatch(threadCount); + ArrayList decryptTasks = new ArrayList(threadCount); + createdAts.add(ZonedDateTime.now(ZoneOffset.UTC).format(DateTimeFormatter.ISO_INSTANT)); + + for (int i = 0; i < threadCount; i++) { + DecryptTask decryptTask = new DecryptTask(mongoCrypt, encrypted, NUM_SECS, doneSignal); + decryptTasks.add(decryptTask); + executorService.submit(decryptTask); + } + + // Await completion of all tasks. Tasks are expected to complete shortly after NUM_SECS. Time out `await` if time exceeds 2 * NUM_SECS. + boolean ok = doneSignal.await(NUM_SECS * 2, TimeUnit.SECONDS); + assert ok; + completedAts.add(ZonedDateTime.now(ZoneOffset.UTC).format(DateTimeFormatter.ISO_INSTANT)); + // Sum the median ops/secs of all tasks to get total throughput. + long totalMedianOpsPerSec = 0; + for (DecryptTask decryptTask : decryptTasks) { + totalMedianOpsPerSec += decryptTask.getMedianOpsPerSecs(); + } + System.out.printf("threadCount=%d. Decrypting 1500 fields median ops/sec : %d%n", threadCount, totalMedianOpsPerSec); + totalMedianOpsPerSecs.add(totalMedianOpsPerSec); + executorService.shutdown(); + ok = executorService.awaitTermination(NUM_SECS * 2, TimeUnit.SECONDS); + assert ok; + } + + // Print the results in JSON that can be accepted by the `perf.send` command. + // See https://docs.devprod.prod.corp.mongodb.com/evergreen/Project-Configuration/Project-Commands#perfsend for the expected `perf.send` input. + List results = new ArrayList<>(threadCounts.length); + for (int i = 0; i < threadCounts.length; i++) { + int threadCount = threadCounts[i]; + long totalMedianOpsPerSec = totalMedianOpsPerSecs.get(i); + String createdAt = createdAts.get(i); + String completedAt = completedAts.get(i); + + MongocryptBecnhmarkResult result = new MongocryptBecnhmarkResult( + "java_decrypt_1500", + threadCount, + totalMedianOpsPerSec, + createdAt, + completedAt, + "medianOpsPerSec", + "THROUGHPUT"); + + results.add(result); + } + System.out.println("Results: " + results); + return results; + } + } +} + diff --git a/driver-benchmarks/src/main/com/mongodb/benchmark/framework/MongocryptBecnhmarkResult.java b/driver-benchmarks/src/main/com/mongodb/benchmark/framework/MongocryptBecnhmarkResult.java new file mode 100644 index 00000000000..92ef999bee2 --- /dev/null +++ b/driver-benchmarks/src/main/com/mongodb/benchmark/framework/MongocryptBecnhmarkResult.java @@ -0,0 +1,84 @@ +package com.mongodb.benchmark.framework; +/* + * Copyright 2016-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +public class MongocryptBecnhmarkResult { + private final String testName; + private final int threadCount; + private final long medianOpsPerSec; + private final String createdAt; + private final String completedAt; + private final String metricName; + private final String metricType; + + public MongocryptBecnhmarkResult(final String testName, + final int threadCount, + final long medianOpsPerSec, + final String createdAt, + final String completedAt, + final String metricName, + final String metricType) { + this.testName = testName; + this.threadCount = threadCount; + this.medianOpsPerSec = medianOpsPerSec; + this.createdAt = createdAt; + this.completedAt = completedAt; + this.metricName = metricName; + this.metricType = metricType; + } + + public String getTestName() { + return testName; + } + + public int getThreadCount() { + return threadCount; + } + + public long getMedianOpsPerSec() { + return medianOpsPerSec; + } + + public String getCreatedAt() { + return createdAt; + } + + public String getCompletedAt() { + return completedAt; + } + + public String getMetricName() { + return metricName; + } + + public String getMetricType() { + return metricType; + } + + @Override + public String toString() { + return "MongocryptBecnhmarkResult{" + + "testName='" + testName + '\'' + + ", threadCount=" + threadCount + + ", medianOpsPerSec=" + medianOpsPerSec + + ", createdAt=" + createdAt + + ", completedAt=" + completedAt + + ", metricName=" + metricName + + ", metricType=" + metricType + + '}'; + } +} diff --git a/driver-benchmarks/src/main/com/mongodb/benchmark/framework/TextBasedBenchmarkResultWriter.java b/driver-benchmarks/src/main/com/mongodb/benchmark/framework/TextBasedBenchmarkResultWriter.java new file mode 100644 index 00000000000..9a29c9bd621 --- /dev/null +++ b/driver-benchmarks/src/main/com/mongodb/benchmark/framework/TextBasedBenchmarkResultWriter.java @@ -0,0 +1,102 @@ +/* + * Copyright 2016-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.mongodb.benchmark.framework; + +import java.io.PrintStream; +import java.util.Arrays; +import java.util.List; + +public class TextBasedBenchmarkResultWriter implements BenchmarkResultWriter { + + public static final double ONE_MEGABYTE = 1000000.0; // Intentionally in base 10 + public static final double ONE_BILLION = 1000000000.0; // To convert nanoseconds to seconds + private final PrintStream printStream; + private final List percentiles; + private final boolean includeMegabytes; + private final boolean includeRaw; + + public TextBasedBenchmarkResultWriter(final PrintStream printStream) { + this(printStream, false, false); + } + + public TextBasedBenchmarkResultWriter(final PrintStream printStream, boolean includeMegabytes, boolean includeRaw) { + this(printStream, Arrays.asList(1, 10, 25, 50, 75, 90, 95, 99), includeMegabytes, includeRaw); + } + + public TextBasedBenchmarkResultWriter(final PrintStream printStream, final List percentiles) { + this(printStream, percentiles, false, false); + } + + public TextBasedBenchmarkResultWriter(final PrintStream printStream, final List percentiles, + boolean includeMegabytes, boolean includeRaw) { + this.printStream = printStream; + this.percentiles = percentiles; + this.includeMegabytes = includeMegabytes; + this.includeRaw = includeRaw; + } + + @Override + public void write(final BenchmarkResult benchmarkResult) { + printStream.println(benchmarkResult.getName()); + printStream.println(benchmarkResult.getNumIterations() + " iterations"); + + double megabytesPerIteration = benchmarkResult.getBytesPerIteration() / ONE_MEGABYTE; + + for (int percentile : percentiles) { + double secondsPerIteration = benchmarkResult.getElapsedTimeNanosAtPercentile(percentile) / ONE_BILLION; + printStream.printf("%dth percentile: %.3f sec/iteration%n", percentile, secondsPerIteration); + } + + if (includeMegabytes) { + printStream.println(); + for (int percentile : percentiles) { + double secondsPerIteration = benchmarkResult.getElapsedTimeNanosAtPercentile(percentile) / ONE_BILLION; + printStream.printf("%dth percentile: %.3f MB/sec%n", percentile, megabytesPerIteration / secondsPerIteration); + } + } + + if (includeRaw) { + printStream.println(); + for (int i = 0; i < benchmarkResult.getElapsedTimeNanosList().size(); i++) { + double secondsPerIteration = benchmarkResult.getElapsedTimeNanosList().get(i) / ONE_BILLION; + printStream.printf("%d: %.3f sec/iteration%n", i, secondsPerIteration); + } + } + + printStream.println(); + printStream.println(); + } + + @Override + public void write(final MongocryptBecnhmarkResult result) { + printStream.println(result.getTestName()); + + printStream.println("CreatedAt: " + result.getCreatedAt()); + printStream.println("CompletedAt: " + result.getCompletedAt()); + printStream.println("ThreadCount: " + result.getThreadCount()); + printStream.println("MedianOpsPerSec: " + result.getMedianOpsPerSec()); + printStream.println("MetricType: " + result.getMetricType()); + + printStream.println(); + printStream.println(); + } + + @Override + public void close() { + } +} diff --git a/driver-benchmarks/src/main/com/mongodb/benchmark/jmh/codec/BsonArrayCodecBenchmark.java b/driver-benchmarks/src/main/com/mongodb/benchmark/jmh/codec/BsonArrayCodecBenchmark.java new file mode 100644 index 00000000000..75cc9dab337 --- /dev/null +++ b/driver-benchmarks/src/main/com/mongodb/benchmark/jmh/codec/BsonArrayCodecBenchmark.java @@ -0,0 +1,99 @@ +/* + * Copyright 2016-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.mongodb.benchmark.jmh.codec; + +import com.mongodb.internal.connection.ByteBufferBsonOutput; +import com.mongodb.internal.connection.PowerOfTwoBufferPool; +import org.bson.BsonArray; +import org.bson.BsonBinaryReader; +import org.bson.BsonBinaryWriter; +import org.bson.BsonDocument; +import org.bson.BsonDouble; +import org.bson.codecs.BsonArrayCodec; +import org.bson.codecs.DecoderContext; +import org.bson.codecs.EncoderContext; +import com.mongodb.lang.NonNull; +import org.openjdk.jmh.annotations.Benchmark; +import org.openjdk.jmh.annotations.BenchmarkMode; +import org.openjdk.jmh.annotations.Fork; +import org.openjdk.jmh.annotations.Level; +import org.openjdk.jmh.annotations.Measurement; +import org.openjdk.jmh.annotations.Mode; +import org.openjdk.jmh.annotations.OutputTimeUnit; +import org.openjdk.jmh.annotations.Scope; +import org.openjdk.jmh.annotations.Setup; +import org.openjdk.jmh.annotations.State; +import org.openjdk.jmh.annotations.Warmup; +import org.openjdk.jmh.infra.Blackhole; + +import java.io.IOException; +import java.nio.ByteBuffer; +import java.util.concurrent.TimeUnit; +import static com.mongodb.benchmark.jmh.codec.BsonUtils.getDocumentAsBuffer; + +@BenchmarkMode(Mode.Throughput) +@Warmup(iterations = 20, time = 2, timeUnit = TimeUnit.SECONDS) +@Measurement(iterations = 20, time = 2, timeUnit = TimeUnit.SECONDS) +@OutputTimeUnit(TimeUnit.SECONDS) +@Fork(3) +public class BsonArrayCodecBenchmark { + + @State(Scope.Benchmark) + public static class Input { + protected final PowerOfTwoBufferPool bufferPool = PowerOfTwoBufferPool.DEFAULT; + protected final BsonArrayCodec bsonArrayCodec = new BsonArrayCodec(); + protected BsonDocument document; + protected byte[] documentBytes; + private BsonBinaryReader reader; + private BsonBinaryWriter writer; + private BsonArray bsonValues; + + @Setup + public void setup() throws IOException { + bsonValues = new BsonArray(); + document = new BsonDocument("array", bsonValues); + + for (int i = 0; i < 1000; i++) { + bsonValues.add(new BsonDouble(i)); + } + + documentBytes = getDocumentAsBuffer(document); + } + + @Setup(Level.Invocation) + public void beforeIteration() { + reader = new BsonBinaryReader(ByteBuffer.wrap(documentBytes)); + writer = new BsonBinaryWriter(new ByteBufferBsonOutput(bufferPool)); + + reader.readStartDocument(); + writer.writeStartDocument(); + writer.writeName("array"); + } + } + + @Benchmark + public void decode(@NonNull Input input, @NonNull Blackhole blackhole) { + blackhole.consume(input.bsonArrayCodec.decode(input.reader, DecoderContext.builder().build())); + } + + @Benchmark + public void encode(@NonNull Input input, @NonNull Blackhole blackhole) { + input.bsonArrayCodec.encode(input.writer, input.bsonValues, EncoderContext.builder().build()); + blackhole.consume(input); + } +} diff --git a/driver-benchmarks/src/main/com/mongodb/benchmark/jmh/codec/BsonDocumentBenchmark.java b/driver-benchmarks/src/main/com/mongodb/benchmark/jmh/codec/BsonDocumentBenchmark.java new file mode 100644 index 00000000000..b050f19007e --- /dev/null +++ b/driver-benchmarks/src/main/com/mongodb/benchmark/jmh/codec/BsonDocumentBenchmark.java @@ -0,0 +1,86 @@ +/* + * Copyright 2016-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.mongodb.benchmark.jmh.codec; + +import com.mongodb.internal.connection.ByteBufferBsonOutput; +import com.mongodb.internal.connection.PowerOfTwoBufferPool; +import org.bson.BsonBinaryReader; +import org.bson.BsonBinaryWriter; +import org.bson.BsonDocument; +import org.bson.BsonInt32; +import org.bson.codecs.BsonDocumentCodec; +import org.bson.codecs.DecoderContext; +import org.bson.codecs.EncoderContext; +import com.mongodb.lang.NonNull; +import org.openjdk.jmh.annotations.Benchmark; +import org.openjdk.jmh.annotations.BenchmarkMode; +import org.openjdk.jmh.annotations.Fork; +import org.openjdk.jmh.annotations.Measurement; +import org.openjdk.jmh.annotations.Mode; +import org.openjdk.jmh.annotations.OutputTimeUnit; +import org.openjdk.jmh.annotations.Scope; +import org.openjdk.jmh.annotations.Setup; +import org.openjdk.jmh.annotations.State; +import org.openjdk.jmh.annotations.Warmup; +import org.openjdk.jmh.infra.Blackhole; + +import java.io.IOException; +import java.nio.ByteBuffer; +import java.util.concurrent.TimeUnit; +import static com.mongodb.benchmark.jmh.codec.BsonUtils.getDocumentAsBuffer; + +/** + * Benchmark with minimal dependency on other codecs to evaluate BsonDocumentCodec's internal performance. + */ +@BenchmarkMode(Mode.Throughput) +@Warmup(iterations = 20, time = 2, timeUnit = TimeUnit.SECONDS) +@Measurement(iterations = 20, time = 2, timeUnit = TimeUnit.SECONDS) +@OutputTimeUnit(TimeUnit.SECONDS) +@Fork(3) +public class BsonDocumentBenchmark { + + @State(Scope.Benchmark) + public static class Input { + protected final PowerOfTwoBufferPool bufferPool = PowerOfTwoBufferPool.DEFAULT; + protected final BsonDocumentCodec bsonDocumentCodec = new BsonDocumentCodec(); + protected BsonDocument document; + protected byte[] documentBytes; + + @Setup + public void setup() throws IOException { + document = new BsonDocument(); + + for (int i = 0; i < 500; i++) { + document.append(Integer.toString(i), new BsonInt32(i)); + } + + documentBytes = getDocumentAsBuffer(document); + } + } + + @Benchmark + public void decode(@NonNull Input input, @NonNull Blackhole blackhole) { + blackhole.consume(input.bsonDocumentCodec.decode(new BsonBinaryReader(ByteBuffer.wrap(input.documentBytes)), DecoderContext.builder().build())); + } + + @Benchmark + public void encode(@NonNull Input input, @NonNull Blackhole blackhole) { + input.bsonDocumentCodec.encode(new BsonBinaryWriter(new ByteBufferBsonOutput(input.bufferPool)), input.document, EncoderContext.builder().build()); + blackhole.consume(input); + } +} diff --git a/driver-benchmarks/src/main/com/mongodb/benchmark/jmh/codec/BsonUtils.java b/driver-benchmarks/src/main/com/mongodb/benchmark/jmh/codec/BsonUtils.java new file mode 100644 index 00000000000..58ad034788b --- /dev/null +++ b/driver-benchmarks/src/main/com/mongodb/benchmark/jmh/codec/BsonUtils.java @@ -0,0 +1,46 @@ +/* + * Copyright 2016-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.mongodb.benchmark.jmh.codec; + +import org.bson.BsonBinaryWriter; +import org.bson.BsonDocument; +import org.bson.codecs.BsonDocumentCodec; +import org.bson.codecs.Codec; +import org.bson.codecs.EncoderContext; +import org.bson.io.BasicOutputBuffer; + +import java.io.ByteArrayOutputStream; +import java.io.IOException; + +public class BsonUtils { + + private static final Codec BSON_DOCUMENT_CODEC = new BsonDocumentCodec(); + + private BsonUtils(){ + //NOP + } + + public static byte[] getDocumentAsBuffer(final BsonDocument document) throws IOException { + BasicOutputBuffer buffer = new BasicOutputBuffer(); + BSON_DOCUMENT_CODEC.encode(new BsonBinaryWriter(buffer), document, EncoderContext.builder().build()); + + ByteArrayOutputStream baos = new ByteArrayOutputStream(buffer.getSize()); + buffer.pipe(baos); + return baos.toByteArray(); + } +} diff --git a/driver-benchmarks/src/main/com/mongodb/benchmark/jmh/codec/package-info.java b/driver-benchmarks/src/main/com/mongodb/benchmark/jmh/codec/package-info.java new file mode 100644 index 00000000000..4c2731a218f --- /dev/null +++ b/driver-benchmarks/src/main/com/mongodb/benchmark/jmh/codec/package-info.java @@ -0,0 +1,27 @@ +/* + * Copyright 2016-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +/** + * Contains JMH benchmarks for targeted components and code paths. + * + *

When changes are made, the existing benchmarks can be quickly executed to assess + * any performance impact. These benchmarks are intended for targeted evaluation in a local environment or spawn host + * and are not currently executed on the Evergreen. If a benchmark for a particular code path or component does not yet + * exist, this package provides a convenient location to set up a new one + * for performance testing.

+ */ +package com.mongodb.benchmark.jmh.codec; diff --git a/driver-benchmarks/src/resources/keyDocument.json b/driver-benchmarks/src/resources/keyDocument.json new file mode 100644 index 00000000000..20d631db86c --- /dev/null +++ b/driver-benchmarks/src/resources/keyDocument.json @@ -0,0 +1,24 @@ +{ + "_id": { + "$binary": { + "base64": "YWFhYWFhYWFhYWFhYWFhYQ==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "ACR7Hm33dDOAAD7l2ubZhSpSUWK8BkALUY+qW3UgBAEcTV8sBwZnaAWnzDsmrX55dgmYHWfynDlJogC/e33u6pbhyXvFTs5ow9OLCuCWBJ39T/Ivm3kMaZJybkejY0V+uc4UEdHvVVz/SbitVnzs2WXdMGmo1/HmDRrxGYZjewFslquv8wtUHF5pyB+QDlQBd/al9M444/8bJZFbMSmtIg==", + "subType": "00" + } + }, + "creationDate": { + "$date": "2023-08-21T14:28:20.875Z" + }, + "updateDate": { + "$date": "2023-08-21T14:28:20.875Z" + }, + "status": 0, + "masterKey": { + "provider": "local" + } +} \ No newline at end of file diff --git a/driver-benchmarks/src/resources/logback.xml b/driver-benchmarks/src/resources/logback.xml new file mode 100644 index 00000000000..a88a7e73a5d --- /dev/null +++ b/driver-benchmarks/src/resources/logback.xml @@ -0,0 +1,12 @@ + + + + + %d{HH:mm:ss.SSS} [%thread] %-5level %logger{36} - %msg%n + + + + + + + diff --git a/driver-core/.gitignore b/driver-core/.gitignore new file mode 100644 index 00000000000..e69de29bb2d diff --git a/driver-core/build.gradle.kts b/driver-core/build.gradle.kts new file mode 100644 index 00000000000..4f06805a6ea --- /dev/null +++ b/driver-core/build.gradle.kts @@ -0,0 +1,100 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +import ProjectExtensions.configureJarManifest +import ProjectExtensions.configureMavenPublication + +plugins { + id("project.java") + id("conventions.test-artifacts") + id("conventions.testing-mockito") + id("conventions.testing-junit") + id("conventions.testing-spock-exclude-slow") + alias(libs.plugins.build.config) +} + +base.archivesName.set("mongodb-driver-core") + +buildConfig { + className("MongoDriverVersion") + packageName("com.mongodb.internal.build") + useJavaOutput() + buildConfigField("String", "NAME", "\"mongo-java-driver\"") + buildConfigField("String", "VERSION", "\"${(project.findProperty("gitVersion") as Provider<*>?)?.get()}\"") +} + +dependencies { + api(project(path = ":bson", configuration = "default")) + implementation(project(path = ":bson-record-codec", configuration = "default")) + compileOnly(libs.graal.sdk) + + optionalImplementation(project(path = ":bson-kotlin", configuration = "default")) + optionalImplementation(project(path = ":bson-kotlinx", configuration = "default")) + optionalImplementation(project(path = ":mongodb-crypt", configuration = "default")) + optionalImplementation(libs.jnr.unixsocket) + optionalApi(platform(libs.netty.bom)) + optionalApi(libs.bundles.netty) + + // Optionally depend on both AWS SDK v2 and v1. + // The driver will choose: v2 or v1 or fallback to built-in functionality + optionalImplementation(libs.bundles.aws.java.sdk.v1) + optionalImplementation(libs.bundles.aws.java.sdk.v2) + + optionalImplementation(libs.snappy.java) + optionalImplementation(libs.zstd.jni) + + testImplementation(project(path = ":bson", configuration = "testArtifacts")) + testImplementation(libs.reflections) + testImplementation(libs.netty.tcnative.boringssl.static) + listOf("linux-x86_64", "linux-aarch_64", "osx-x86_64", "osx-aarch_64", "windows-x86_64").forEach { arch -> + testImplementation("${libs.netty.tcnative.boringssl.static.get()}::$arch") + } +} + +configureMavenPublication { + pom { + name.set("MongoDB Java Driver Core") + description.set( + "Shared components for the Synchronous and Reactive Streams implementations of the MongoDB Java Driver.") + } +} + +configureJarManifest { + attributes["Automatic-Module-Name"] = "org.mongodb.driver.core" + attributes["Bundle-SymbolicName"] = "org.mongodb.driver-core" + attributes["Import-Package"] = + listOf( + "!sun.misc.*", // Used by DirectBufferDeallocator only for java 8 + "!sun.nio.ch.*", // Used by DirectBufferDeallocator only for java 8 + "!javax.annotation.*", // Brought in by com.google.code.findbugs:annotations + "!com.oracle.svm.core.annotate.*", // this dependency is provided by the GraalVM + // runtime + "io.netty.*;resolution:=optional", + "com.amazonaws.*;resolution:=optional", + "software.amazon.awssdk.*;resolution:=optional", + "org.xerial.snappy.*;resolution:=optional", + "com.github.luben.zstd.*;resolution:=optional", + "org.slf4j.*;resolution:=optional", + "jnr.unixsocket.*;resolution:=optional", + "com.mongodb.internal.crypt.capi.*;resolution:=optional", + "jdk.net.*;resolution:=optional", // Used by SocketStreamHelper & depends on JDK + // version + "org.bson.codecs.record.*;resolution:=optional", // Depends on JDK version + "org.bson.codecs.kotlin.*;resolution:=optional", + "org.bson.codecs.kotlinx.*;resolution:=optional", + "*" // import all that is not excluded or modified before + ) + .joinToString(",") +} diff --git a/driver-core/src/main/com/mongodb/AuthenticationMechanism.java b/driver-core/src/main/com/mongodb/AuthenticationMechanism.java new file mode 100644 index 00000000000..7a7b7415ef6 --- /dev/null +++ b/driver-core/src/main/com/mongodb/AuthenticationMechanism.java @@ -0,0 +1,109 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb; + +import java.util.HashMap; +import java.util.Map; + +/** + * An enumeration of the MongodDB-supported authentication mechanisms. + * + * @since 3.0 + */ +public enum AuthenticationMechanism { + /** + * The GSSAPI mechanism. See the RFC. + */ + GSSAPI("GSSAPI"), + + /** + * The MONGODB-AWS mechanism. + * @since 4.1 + * @mongodb.server.release 4.4 + */ + MONGODB_AWS("MONGODB-AWS"), + + /** + * The MONGODB-OIDC mechanism. + * @since 4.10 + * @mongodb.server.release 7.0 + */ + MONGODB_OIDC("MONGODB-OIDC"), + + /** + * The MongoDB X.509 mechanism. This mechanism is available only with client certificates over SSL. + */ + MONGODB_X509("MONGODB-X509"), + + /** + * The PLAIN mechanism. See the RFC. + */ + PLAIN("PLAIN"), + + /** + * The SCRAM-SHA-1 mechanism. See the RFC. + */ + SCRAM_SHA_1("SCRAM-SHA-1"), + + /** + * The SCRAM-SHA-256 mechanism. See the RFC. + * @since 3.8 + */ + SCRAM_SHA_256("SCRAM-SHA-256"); + + private static final Map AUTH_MAP = new HashMap<>(); + private final String mechanismName; + + AuthenticationMechanism(final String mechanismName) { + this.mechanismName = mechanismName; + } + + /** + * Get the mechanism name. + * + * @return the mechanism name + */ + public String getMechanismName() { + return mechanismName; + } + + @Override + public String toString() { + return mechanismName; + } + + static { + for (final AuthenticationMechanism value : values()) { + AUTH_MAP.put(value.getMechanismName(), value); + } + } + + /** + * Gets the mechanism by its name. + * + * @param mechanismName the mechanism name + * @return the mechanism + * @see #getMechanismName() + */ + public static AuthenticationMechanism fromMechanismName(final String mechanismName) { + AuthenticationMechanism mechanism = AUTH_MAP.get(mechanismName); + if (mechanism == null) { + throw new IllegalArgumentException("Unsupported authMechanism: " + mechanismName); + } + return mechanism; + } +} diff --git a/driver-core/src/main/com/mongodb/AutoEncryptionSettings.java b/driver-core/src/main/com/mongodb/AutoEncryptionSettings.java new file mode 100644 index 00000000000..187d3421235 --- /dev/null +++ b/driver-core/src/main/com/mongodb/AutoEncryptionSettings.java @@ -0,0 +1,545 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb; + +import com.mongodb.annotations.NotThreadSafe; +import com.mongodb.lang.Nullable; +import org.bson.BsonDocument; + +import javax.net.ssl.SSLContext; +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; +import java.util.concurrent.TimeUnit; +import java.util.function.Supplier; + +import static com.mongodb.assertions.Assertions.assertTrue; +import static com.mongodb.assertions.Assertions.notNull; +import static java.util.Collections.unmodifiableMap; + +/** + * The client-side automatic encryption settings. In-use encryption enables an application to specify what fields in a collection + * must be encrypted, and the driver automatically encrypts commands sent to MongoDB and decrypts responses. + *

+ * Automatic encryption is an enterprise only feature that only applies to operations on a collection. Automatic encryption is not + * supported for operations on a database or view and will result in error. To bypass automatic encryption, + * set bypassAutoEncryption=true in {@code AutoEncryptionSettings}. + *

+ *

+ * Explicit encryption/decryption and automatic decryption is a community feature, enabled with the new + * {@code com.mongodb.client.vault.ClientEncryption} type. + *

+ *

+ * A MongoClient configured with bypassAutoEncryption=true will still automatically decrypt. + *

+ *

+ * If automatic encryption fails on an operation, use a MongoClient configured with bypassAutoEncryption=true and use + * ClientEncryption#encrypt to manually encrypt values. + *

+ *

+ * Enabling client side encryption reduces the maximum document and message size (using a maxBsonObjectSize of 2MiB and + * maxMessageSizeBytes of 6MB) and may have a negative performance impact. + *

+ *

+ * Automatic encryption requires the authenticated user to have the listCollections privilege action. + *

+ *

+ * Supplying an {@code encryptedFieldsMap} provides more security than relying on an encryptedFields obtained from the server. + * It protects against a malicious server advertising false encryptedFields. + *

+ * + * @since 3.11 + */ +public final class AutoEncryptionSettings { + private final MongoClientSettings keyVaultMongoClientSettings; + private final String keyVaultNamespace; + private final Map> kmsProviders; + private final Map kmsProviderSslContextMap; + private final Map>> kmsProviderPropertySuppliers; + private final Map schemaMap; + private final Map extraOptions; + private final boolean bypassAutoEncryption; + private final Map encryptedFieldsMap; + private final boolean bypassQueryAnalysis; + @Nullable + private final Long keyExpirationMS; + + /** + * A builder for {@code AutoEncryptionSettings} so that {@code AutoEncryptionSettings} can be immutable, and to support easier + * construction through chaining. + */ + @NotThreadSafe + public static final class Builder { + private MongoClientSettings keyVaultMongoClientSettings; + private String keyVaultNamespace; + private Map> kmsProviders; + private Map kmsProviderSslContextMap = new HashMap<>(); + private Map>> kmsProviderPropertySuppliers = new HashMap<>(); + private Map schemaMap = Collections.emptyMap(); + private Map extraOptions = Collections.emptyMap(); + private boolean bypassAutoEncryption; + private Map encryptedFieldsMap = Collections.emptyMap(); + private boolean bypassQueryAnalysis; + @Nullable private Long keyExpirationMS; + + /** + * Sets the key vault settings. + * + * @param keyVaultMongoClientSettings the key vault mongo client settings, which may be null. + * @return this + * @see #getKeyVaultMongoClientSettings() + */ + public Builder keyVaultMongoClientSettings(final MongoClientSettings keyVaultMongoClientSettings) { + this.keyVaultMongoClientSettings = keyVaultMongoClientSettings; + return this; + } + + /** + * Sets the key vault namespace + * + * @param keyVaultNamespace the key vault namespace, which may not be null + * @return this + * @see #getKeyVaultNamespace() + */ + public Builder keyVaultNamespace(final String keyVaultNamespace) { + this.keyVaultNamespace = notNull("keyVaultNamespace", keyVaultNamespace); + return this; + } + + /** + * Sets the KMS providers map. + * + * @param kmsProviders the KMS providers map, which may not be null + * @return this + * @see #kmsProviderPropertySuppliers(Map) + * @see #getKmsProviders() + */ + public Builder kmsProviders(final Map> kmsProviders) { + this.kmsProviders = notNull("kmsProviders", kmsProviders); + return this; + } + + /** + * This method is similar to {@link #kmsProviders(Map)}, but instead of configuring properties for KMS providers, + * it configures {@link Supplier}s of properties. + * + * @param kmsProviderPropertySuppliers A {@link Map} where keys identify KMS providers, + * and values specify {@link Supplier}s of properties for the KMS providers. + * Must not be null. Each {@link Supplier} must return non-empty properties. + * @return this + * @see #getKmsProviderPropertySuppliers() + * @since 4.6 + */ + public Builder kmsProviderPropertySuppliers(final Map>> kmsProviderPropertySuppliers) { + this.kmsProviderPropertySuppliers = notNull("kmsProviderPropertySuppliers", kmsProviderPropertySuppliers); + return this; + } + + /** + * Sets the KMS provider to SSLContext map + * + * @param kmsProviderSslContextMap the KMS provider to SSLContext map, which may not be null + * @return this + * @see #getKmsProviderSslContextMap() + * @since 4.4 + */ + public Builder kmsProviderSslContextMap(final Map kmsProviderSslContextMap) { + this.kmsProviderSslContextMap = notNull("kmsProviderSslContextMap", kmsProviderSslContextMap); + return this; + } + + /** + * Sets the map from namespace to local schema document + * + * @param schemaMap the map from namespace to local schema document + * @return this + * @see #getSchemaMap() + */ + public Builder schemaMap(final Map schemaMap) { + this.schemaMap = notNull("schemaMap", schemaMap); + return this; + } + + /** + * Sets the extra options. + * + *

+ * Note: When setting {@code cryptSharedLibPath}, the override path must be given as a path to the shared + * crypt library file itself, and not simply the directory that contains it. + *

+ * + * @param extraOptions the extra options, which may not be null + * @return this + * @see #getExtraOptions() + */ + public Builder extraOptions(final Map extraOptions) { + this.extraOptions = notNull("extraOptions", extraOptions); + return this; + } + + /** + * Sets whether auto-encryption should be bypassed. + * + * @param bypassAutoEncryption whether auto-encryption should be bypassed + * @return this + * @see #isBypassAutoEncryption() + */ + public Builder bypassAutoEncryption(final boolean bypassAutoEncryption) { + this.bypassAutoEncryption = bypassAutoEncryption; + return this; + } + + /** + * Maps a collection namespace to an encryptedFields. + * + *

Note: only applies to queryable encryption. + * Automatic encryption in queryable encryption is configured with the encryptedFields.

+ *

If a collection is present in both the {@code encryptedFieldsMap} and {@link #schemaMap}, the driver will error.

+ *

If a collection is present on the {@code encryptedFieldsMap}, the behavior of {@code collection.createCollection()} and + * {@code collection.drop()} is altered.

+ * + *

If a collection is not present on the {@code encryptedFieldsMap} a server-side collection {@code encryptedFieldsMap} may be + * used by the driver. + * + * @param encryptedFieldsMap the mapping of the collection namespace to the encryptedFields + * @return this + * @since 4.7 + * @mongodb.server.release 7.0 + */ + public Builder encryptedFieldsMap(final Map encryptedFieldsMap) { + this.encryptedFieldsMap = notNull("encryptedFieldsMap", encryptedFieldsMap); + return this; + } + + /** + * Enable or disable automatic analysis of outgoing commands. + * + *

Set bypassQueryAnalysis to true to use explicit encryption on indexed fields + * without the MongoDB Enterprise Advanced licensed crypt shared library.

+ * + * @param bypassQueryAnalysis whether query analysis should be bypassed + * @return this + * @since 4.7 + * @mongodb.server.release 7.0 + */ + public Builder bypassQueryAnalysis(final boolean bypassQueryAnalysis) { + this.bypassQueryAnalysis = bypassQueryAnalysis; + return this; + } + + /** + * The cache expiration time for data encryption keys. + *

Defaults to {@code null} which defers to libmongocrypt's default which is currently 60000 ms. Set to 0 to disable key expiration.

+ * + * @param keyExpiration the cache expiration time in milliseconds or null to use libmongocrypt's default. + * @param timeUnit the time unit + * @return this + * @see #getKeyExpiration(TimeUnit) + * @since 5.5 + */ + public Builder keyExpiration(@Nullable final Long keyExpiration, final TimeUnit timeUnit) { + assertTrue(keyExpiration == null || keyExpiration >= 0, "keyExpiration must be >= 0 or null"); + this.keyExpirationMS = keyExpiration == null ? null : TimeUnit.MILLISECONDS.convert(keyExpiration, timeUnit); + return this; + } + + /** + * Build an instance of {@code AutoEncryptionSettings}. + * + * @return the settings from this builder + */ + public AutoEncryptionSettings build() { + return new AutoEncryptionSettings(this); + } + + private Builder() { + } + } + + /** + * Convenience method to create a Builder. + * + * @return a builder + */ + public static Builder builder() { + return new Builder(); + } + + /** + * Gets the key vault settings. + * + *

+ * The key vault collection is assumed to reside on the same MongoDB cluster as the encrypted collections. But the optional + * keyVaultMongoClientSettings can be used to route data key queries to a separate MongoDB cluster, or the same cluster but using a + * different credential. + *

+ * + * @return the key vault settings, which may be null to indicate that the same {@code MongoClient} should be used to access the key + * vault collection as is used for the rest of the application. + */ + @Nullable + public MongoClientSettings getKeyVaultMongoClientSettings() { + return keyVaultMongoClientSettings; + } + + /** + * Gets the key vault namespace. + * + *

+ * The key vault namespace refers to a collection that contains all data keys used for encryption and decryption (aka the key vault + * collection). Data keys are stored as documents in a special MongoDB collection. Data keys are protected with encryption by a KMS + * provider (AWS, Azure, GCP KMS or a local master key). + *

+ * + * @return the key vault namespace, which may not be null + */ + public String getKeyVaultNamespace() { + return keyVaultNamespace; + } + + /** + * Gets the map of KMS provider properties. + * + *

Multiple KMS providers can be specified within this map. Each KMS provider is identified by a unique key. + * Keys are formatted as either {@code "KMS provider type"} or {@code "KMS provider type:KMS provider name"} (e.g., "aws" or "aws:myname"). + * The KMS provider name must only contain alphanumeric characters (a-z, A-Z, 0-9), underscores (_), and must not be empty. + *

+ * Supported KMS provider types include "aws", "azure", "gcp", and "local". The provider name is optional and allows + * for the configuration of multiple providers of the same type under different names (e.g., "aws:name1" and + * "aws:name2" could represent different AWS accounts). + *

+ * The kmsProviders map values differ by provider type. The following properties are supported for each provider type: + *

+ *

+ * For "aws", the properties are: + *

+ *
    + *
  • accessKeyId: a String, the AWS access key identifier
  • + *
  • secretAccessKey: a String, the AWS secret access key
  • + *
  • sessionToken: an optional String, the AWS session token
  • + *
+ *

+ * For "azure", the properties are: + *

+ *
    + *
  • tenantId: a String, the tenantId that identifies the organization for the account.
  • + *
  • clientId: a String, the clientId to authenticate a registered application.
  • + *
  • clientSecret: a String, the client secret to authenticate a registered application.
  • + *
  • identityPlatformEndpoint: optional String, a host with optional port. e.g. "example.com" or "example.com:443". + * Generally used for private Azure instances.
  • + *
+ *

+ * For "gcp", the properties are: + *

+ *
    + *
  • email: a String, the service account email to authenticate.
  • + *
  • privateKey: a String or byte[], the encoded PKCS#8 encrypted key
  • + *
  • endpoint: optional String, a host with optional port. e.g. "example.com" or "example.com:443".
  • + *
+ *

+ * For "kmip", the properties are: + *

+ *
    + *
  • endpoint: a String, the endpoint as a host with required port. e.g. "example.com:443".
  • + *
+ *

+ * For "local", the properties are: + *

+ *
    + *
  • key: byte[] of length 96, the local key
  • + *
+ *

+ * It is also permitted for the value of a kms provider to be an empty map, in which case the driver will first + *

+ *
    + *
  • use the {@link Supplier} configured in {@link #getKmsProviderPropertySuppliers()} to obtain a non-empty map
  • + *
  • attempt to obtain the properties from the environment
  • + *
+ * However, KMS providers containing a name (e.g., "aws:myname") do not support dynamically obtaining KMS properties from the {@link Supplier} + * or environment. + * @return map of KMS provider properties + * @see #getKmsProviderPropertySuppliers() + */ + public Map> getKmsProviders() { + return unmodifiableMap(kmsProviders); + } + + /** + * This method is similar to {@link #getKmsProviders()}, but instead of getting properties for KMS providers, + * it gets {@link Supplier}s of properties. + *

If {@link #getKmsProviders()} returns empty properties for a KMS provider, + * the driver will use a {@link Supplier} of properties configured for the KMS provider to obtain non-empty properties.

+ * + * @return A {@link Map} where keys identify KMS providers, and values specify {@link Supplier}s of properties for the KMS providers. + * @since 4.6 + */ + public Map>> getKmsProviderPropertySuppliers() { + return unmodifiableMap(kmsProviderPropertySuppliers); + } + + /** + * Gets the KMS provider to SSLContext map. + * + *

+ * If a KMS provider is mapped to a non-null {@link SSLContext}, the context will be used to establish a TLS connection to the KMS. + * Otherwise, the default context will be used. + *

+ * + * @return the KMS provider to SSLContext map + * @since 4.4 + */ + public Map getKmsProviderSslContextMap() { + return unmodifiableMap(kmsProviderSslContextMap); + } + + /** + * Gets the map of namespace to local JSON schema. + *

+ * Automatic encryption is configured with an "encrypt" field in a collection's JSONSchema. By default, a collection's JSONSchema is + * periodically polled with the listCollections command. But a JSONSchema may be specified locally with the schemaMap option. + *

+ *

+ * The key into the map is the full namespace of the collection, which is {@code <database name>.<collection name>}. For + * example, if the database name is {@code "test"} and the collection name is {@code "users"}, then the namesspace is + * {@code "test.users"}. + *

+ *

+ * Supplying a schemaMap provides more security than relying on JSON Schemas obtained from the server. It protects against a + * malicious server advertising a false JSON Schema, which could trick the client into sending unencrypted data that should be + * encrypted. + *

+ *

+ * Schemas supplied in the schemaMap only apply to configuring automatic encryption for client side encryption. Other validation + * rules in the JSON schema will not be enforced by the driver and will result in an error. + *

+ * + * @return map of namespace to local JSON schema + */ + public Map getSchemaMap() { + return schemaMap; + } + + /** + * Gets the extra options that control the behavior of auto-encryption components. + *

+ * The extraOptions currently only relate to the mongocryptd process. The following options keys are supported: + *

+ *
    + *
  • mongocryptdURI: a String which defaults to "mongodb://%2Fvar%2Fmongocryptd.sock" if domain sockets are available or + * "mongodb://localhost:27020" otherwise.
  • + *
  • mongocryptdBypassSpawn: a boolean which defaults to false. If true, the driver will not attempt to automatically spawn a + * mongocryptd process
  • + *
  • mongocryptdSpawnPath: specifies the full path to the mongocryptd executable. By default the driver spawns mongocryptd from + * the system path.
  • + *
  • mongocryptdSpawnArgs: Used to control the behavior of mongocryptd when the driver spawns it. By default, the driver spawns + * mongocryptd with the single command line argument {@code "--idleShutdownTimeoutSecs=60"}
  • + *
  • cryptSharedLibPath: Optional, override the path used to load the crypt shared library. Note: All MongoClient objects in the + * same process should use the same setting for cryptSharedLibPath, as it is an error to load more that one crypt shared library + * simultaneously in a single operating system process.
  • + *
  • cryptSharedLibRequired: boolean, if 'true', refuse to continue encryption without a crypt shared library.
  • + *
+ * + * @return the extra options map + */ + public Map getExtraOptions() { + return extraOptions; + } + + /** + * Gets whether auto-encryption should be bypassed. Even when this option is true, auto-decryption is still enabled. + *

+ * This option is useful for cases where the driver throws an exception because it is unable to prove that the command does not + * contain any fields that should be automatically encrypted, but the application is able to determine that it does not. For these + * cases, the application can construct a {@code MongoClient} with {@code AutoEncryptionSettings} with {@code bypassAutoEncryption} + * enabled. + *

+ * + * @return true if auto-encryption should be bypassed + */ + public boolean isBypassAutoEncryption() { + return bypassAutoEncryption; + } + + /** + * Gets the mapping of a collection namespace to encryptedFields. + * + *

Note: only applies to Queryable Encryption. + * Automatic encryption in Queryable Encryption is configured with the encryptedFields.

+ *

If a collection is present in both the {@code encryptedFieldsMap} and {@link #schemaMap}, the driver will error.

+ *

If a collection is present on the {@code encryptedFieldsMap}, the behavior of {@code collection.createCollection()} and + * {@code collection.drop()} is altered.

+ * + *

If a collection is not present on the {@code encryptedFieldsMap} a server-side collection {@code encryptedFieldsMap} may be + * used by the driver.

+ * + * @return the mapping of the collection namespaces to encryptedFields + * @since 4.7 + * @mongodb.server.release 7.0 + */ + @Nullable + public Map getEncryptedFieldsMap() { + return encryptedFieldsMap; + } + + /** + * Gets whether automatic analysis of outgoing commands is set. + * + *

Set bypassQueryAnalysis to true to use explicit encryption on indexed fields + * without the MongoDB Enterprise Advanced licensed crypt shared library.

+ * + * @return true if query analysis should be bypassed + * @since 4.7 + * @mongodb.server.release 7.0 + */ + public boolean isBypassQueryAnalysis() { + return bypassQueryAnalysis; + } + + /** + * Returns the cache expiration time for data encryption keys. + * + *

Defaults to {@code null} which defers to libmongocrypt's default which is currently {@code 60000 ms}. + * Set to {@code 0} to disable key expiration.

+ * + * @param timeUnit the time unit, which must not be null + * @return the cache expiration time or null if not set. + * @since 5.5 + */ + @Nullable + public Long getKeyExpiration(final TimeUnit timeUnit) { + return keyExpirationMS == null ? null : timeUnit.convert(keyExpirationMS, TimeUnit.MILLISECONDS); + } + + private AutoEncryptionSettings(final Builder builder) { + this.keyVaultMongoClientSettings = builder.keyVaultMongoClientSettings; + this.keyVaultNamespace = notNull("keyVaultNamespace", builder.keyVaultNamespace); + this.kmsProviders = notNull("kmsProviders", builder.kmsProviders); + this.kmsProviderSslContextMap = notNull("kmsProviderSslContextMap", builder.kmsProviderSslContextMap); + this.kmsProviderPropertySuppliers = notNull("kmsProviderPropertySuppliers", builder.kmsProviderPropertySuppliers); + this.schemaMap = notNull("schemaMap", builder.schemaMap); + this.extraOptions = notNull("extraOptions", builder.extraOptions); + this.bypassAutoEncryption = builder.bypassAutoEncryption; + this.encryptedFieldsMap = builder.encryptedFieldsMap; + this.bypassQueryAnalysis = builder.bypassQueryAnalysis; + this.keyExpirationMS = builder.keyExpirationMS; + } + + @Override + public String toString() { + return "AutoEncryptionSettings{}"; + } +} diff --git a/driver-core/src/main/com/mongodb/AwsCredential.java b/driver-core/src/main/com/mongodb/AwsCredential.java new file mode 100644 index 00000000000..2fd6f8fb6f4 --- /dev/null +++ b/driver-core/src/main/com/mongodb/AwsCredential.java @@ -0,0 +1,78 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb; + +import com.mongodb.annotations.Beta; +import com.mongodb.annotations.Reason; +import com.mongodb.lang.Nullable; + +import static com.mongodb.assertions.Assertions.notNull; + +/** + * A representation of Amazon Web Services credentials for API authentication. + * + * @see MongoCredential#createAwsCredential(String, char[]) + * @see MongoCredential#AWS_CREDENTIAL_PROVIDER_KEY + * @since 4.4 + */ +@Beta(Reason.CLIENT) +public final class AwsCredential { + private final String accessKeyId; + private final String secretAccessKey; + private final String sessionToken; + + /** + * Construct a new instance. + * + * @param accessKeyId the non-null access key ID that identifies the temporary security credentials. + * @param secretAccessKey the non-null secret access key that can be used to sign requests + * @param sessionToken the session token, which may be null + */ + public AwsCredential(final String accessKeyId, final String secretAccessKey, @Nullable final String sessionToken) { + this.accessKeyId = notNull("accessKeyId", accessKeyId); + this.secretAccessKey = notNull("secretAccessKey", secretAccessKey); + this.sessionToken = sessionToken; + } + + /** + * Gets the access key ID that identifies the temporary security credentials. + * + * @return the accessKeyId, which may not be null + */ + public String getAccessKeyId() { + return accessKeyId; + } + + /** + * Gets the secret access key that can be used to sign requests. + * + * @return the secretAccessKey, which may not be null + */ + public String getSecretAccessKey() { + return secretAccessKey; + } + + /** + * Gets the session token. + * + * @return the sessionToken, which may not be null + */ + @Nullable + public String getSessionToken() { + return sessionToken; + } +} diff --git a/driver-core/src/main/com/mongodb/BSONTimestampCodec.java b/driver-core/src/main/com/mongodb/BSONTimestampCodec.java new file mode 100644 index 00000000000..d5463d97df3 --- /dev/null +++ b/driver-core/src/main/com/mongodb/BSONTimestampCodec.java @@ -0,0 +1,49 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb; + +import org.bson.BsonReader; +import org.bson.BsonTimestamp; +import org.bson.BsonWriter; +import org.bson.codecs.Codec; +import org.bson.codecs.DecoderContext; +import org.bson.codecs.EncoderContext; +import org.bson.types.BSONTimestamp; + +/** + * Knows how to encode and decode BSON timestamps. + * + * @mongodb.driver.manual reference/bson-types/#timestamps Timestamps + * @since 3.0 + */ +public class BSONTimestampCodec implements Codec { + @Override + public void encode(final BsonWriter writer, final BSONTimestamp value, final EncoderContext encoderContext) { + writer.writeTimestamp(new BsonTimestamp(value.getTime(), value.getInc())); + } + + @Override + public BSONTimestamp decode(final BsonReader reader, final DecoderContext decoderContext) { + BsonTimestamp timestamp = reader.readTimestamp(); + return new BSONTimestamp(timestamp.getTime(), timestamp.getInc()); + } + + @Override + public Class getEncoderClass() { + return BSONTimestamp.class; + } +} diff --git a/driver-core/src/main/com/mongodb/BasicDBList.java b/driver-core/src/main/com/mongodb/BasicDBList.java new file mode 100644 index 00000000000..88db1d0ae4e --- /dev/null +++ b/driver-core/src/main/com/mongodb/BasicDBList.java @@ -0,0 +1,65 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// BasicDBList.java + +package com.mongodb; + +import org.bson.types.BasicBSONList; + +/** + * An implementation of List that reflects the way BSON lists work. + */ +public class BasicDBList extends BasicBSONList implements DBObject { + + private static final long serialVersionUID = -4415279469780082174L; + + @Override + public boolean isPartialObject() { + return _isPartialObject; + } + + @Override + public void markAsPartialObject() { + _isPartialObject = true; + } + + /** + * Copies this instance into a new Object. + * + * @return a new BasicDBList with the same values as this instance + */ + public Object copy() { + // copy field values into new object + BasicDBList newobj = new BasicDBList(); + // need to clone the sub obj + for (int i = 0; i < size(); ++i) { + Object val = get(i); + if (val instanceof BasicDBObject) { + val = ((BasicDBObject) val).copy(); + } else if (val instanceof BasicDBList) { + val = ((BasicDBList) val).copy(); + } + newobj.add(val); + } + return newobj; + } + + /** + * Whether the object is partial + */ + private boolean _isPartialObject; +} diff --git a/driver-core/src/main/com/mongodb/BasicDBObject.java b/driver-core/src/main/com/mongodb/BasicDBObject.java new file mode 100644 index 00000000000..0125f9b7a86 --- /dev/null +++ b/driver-core/src/main/com/mongodb/BasicDBObject.java @@ -0,0 +1,327 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb; + +import org.bson.BSONObject; +import org.bson.BasicBSONObject; +import org.bson.BsonBinaryWriter; +import org.bson.BsonDocument; +import org.bson.BsonDocumentWrapper; +import org.bson.UuidRepresentation; +import org.bson.codecs.Codec; +import org.bson.codecs.Decoder; +import org.bson.codecs.DecoderContext; +import org.bson.codecs.Encoder; +import org.bson.codecs.EncoderContext; +import org.bson.codecs.configuration.CodecRegistry; +import org.bson.conversions.Bson; +import org.bson.io.BasicOutputBuffer; +import org.bson.io.OutputBuffer; +import org.bson.json.JsonMode; +import org.bson.json.JsonReader; +import org.bson.json.JsonWriter; +import org.bson.json.JsonWriterSettings; +import org.bson.types.BasicBSONList; + +import java.io.StringWriter; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; +import java.util.TreeSet; + +import static org.bson.codecs.configuration.CodecRegistries.withUuidRepresentation; + +/** + * A basic implementation of BSON object that is MongoDB specific. A {@code DBObject} can be created as follows, using this class: + *
+ * DBObject obj = new BasicDBObject();
+ * obj.put( "foo", "bar" );
+ * 
+ * + * @mongodb.driver.manual core/document/ MongoDB Documents + */ +@SuppressWarnings({"rawtypes"}) +public class BasicDBObject extends BasicBSONObject implements DBObject, Bson { + private static final long serialVersionUID = -4415279469780082174L; + + private static final Codec DEFAULT_CODEC = + withUuidRepresentation(DBObjectCodec.getDefaultRegistry(), UuidRepresentation.STANDARD) + .get(BasicDBObject.class); + + /** + * Whether the object is partial + */ + private boolean isPartialObject; + + /** + * Parses a string in MongoDB Extended JSON format to a {@code BasicDBObject}. + * + * @param json the JSON string + * @return a corresponding {@code BasicDBObject} object + * @see org.bson.json.JsonReader + * @mongodb.driver.manual reference/mongodb-extended-json/ MongoDB Extended JSON + */ + public static BasicDBObject parse(final String json) { + return parse(json, DEFAULT_CODEC); + } + + /** + * Parses a string in MongoDB Extended JSON format to a {@code BasicDBObject}. + * + * @param json the JSON string + * @param decoder the decoder to use to decode the BasicDBObject instance + * @return a corresponding {@code BasicDBObject} object + * @see org.bson.json.JsonReader + * @mongodb.driver.manual reference/mongodb-extended-json/ MongoDB Extended JSON + */ + public static BasicDBObject parse(final String json, final Decoder decoder) { + return decoder.decode(new JsonReader(json), DecoderContext.builder().build()); + } + + /** + * Creates an empty object. + */ + public BasicDBObject() { + } + + /** + * Creates an empty object + * + * @param size an estimate of number of fields that will be inserted + */ + public BasicDBObject(final int size) { + super(size); + } + + /** + * Creates an object with the given key/value + * + * @param key key under which to store + * @param value value to store + */ + public BasicDBObject(final String key, final Object value) { + super(key, value); + } + + /** + * Creates an object from a map. + * + * @param map map to convert + */ + public BasicDBObject(final Map map) { + super(map); + } + + /** + * Add a key/value pair to this object + * + * @param key the field name + * @param val the field value + * @return this BasicDBObject with the new values added + */ + @Override + public BasicDBObject append(final String key, final Object val) { + put(key, val); + return this; + } + + /** + * Whether {@link #markAsPartialObject} was ever called only matters if you are going to upsert and do not want to risk losing fields. + * + * @return true if this has been marked as a partial object + */ + @Override + public boolean isPartialObject() { + return isPartialObject; + } + + /** + * Gets a JSON representation of this document using the {@link org.bson.json.JsonMode#RELAXED} output mode, and otherwise the default + * settings of {@link JsonWriterSettings.Builder} and {@link DBObjectCodec}. + * + * @return a JSON representation of this document + * @throws org.bson.codecs.configuration.CodecConfigurationException if the document contains types not in the default registry + * @see #toJson(JsonWriterSettings) + * @see JsonWriterSettings + */ + public String toJson() { + return toJson(JsonWriterSettings.builder().outputMode(JsonMode.RELAXED).build()); + } + + /** + * Gets a JSON representation of this document + * + *

With the default {@link DBObjectCodec}.

+ * + * @param writerSettings the json writer settings to use when encoding + * @return a JSON representation of this document + * @throws org.bson.codecs.configuration.CodecConfigurationException if the document contains types not in the default registry + */ + public String toJson(final JsonWriterSettings writerSettings) { + return toJson(writerSettings, DEFAULT_CODEC); + } + + /** + * Gets a JSON representation of this document + * + *

With the default {@link JsonWriterSettings}.

+ * + * @param encoder the BasicDBObject codec instance to encode the document with + * @return a JSON representation of this document + * @throws org.bson.codecs.configuration.CodecConfigurationException if the registry does not contain a codec for the document values. + */ + public String toJson(final Encoder encoder) { + return toJson(JsonWriterSettings.builder().outputMode(JsonMode.RELAXED).build(), encoder); + } + + /** + * Gets a JSON representation of this document + * + * @param writerSettings the json writer settings to use when encoding + * @param encoder the BasicDBObject codec instance to encode the document with + * @return a JSON representation of this document + * @throws org.bson.codecs.configuration.CodecConfigurationException if the registry does not contain a codec for the document values. + */ + public String toJson(final JsonWriterSettings writerSettings, final Encoder encoder) { + JsonWriter writer = new JsonWriter(new StringWriter(), writerSettings); + encoder.encode(writer, this, EncoderContext.builder().build()); + return writer.getWriter().toString(); + } + + @Override + public boolean equals(final Object o) { + if (o == this) { + return true; + } + + if (!(o instanceof BSONObject)) { + return false; + } + + BSONObject other = (BSONObject) o; + + if (!keySet().equals(other.keySet())) { + return false; + } + + return Arrays.equals(toBson(canonicalizeBSONObject(this)), toBson(canonicalizeBSONObject(other))); + } + + @Override + public int hashCode() { + return Arrays.hashCode(toBson(canonicalizeBSONObject(this))); + } + + /** + * Convert the object to its BSON representation, using the {@code STANDARD} representation for UUID. This is safe to do in the context + * of this class because currently this method is only used for equality and hash code, and is not passed to any other parts of the + * library. + */ + private static byte[] toBson(final BasicDBObject dbObject) { + OutputBuffer outputBuffer = new BasicOutputBuffer(); + DEFAULT_CODEC.encode(new BsonBinaryWriter(outputBuffer), dbObject, EncoderContext.builder().build()); + return outputBuffer.toByteArray(); + } + + /** + *

Returns a JSON serialization of this object

+ * + *

The output will look like: {@code {"a":1, "b":["x","y","z"]} }

+ * + * @return JSON serialization + */ + public String toString() { + return toJson(); + } + + /** + * If this object was retrieved with only some fields (using a field filter) this method will be called to mark it as such. + */ + @Override + public void markAsPartialObject() { + isPartialObject = true; + } + + /** + * Creates a new instance which is a copy of this BasicDBObject. + * + * @return a BasicDBObject with exactly the same values as this instance. + */ + public Object copy() { + // copy field values into new object + BasicDBObject newCopy = new BasicDBObject(this.toMap()); + // need to clone the sub obj + for (final String field : keySet()) { + Object val = get(field); + if (val instanceof BasicDBObject) { + newCopy.put(field, ((BasicDBObject) val).copy()); + } else if (val instanceof BasicDBList) { + newCopy.put(field, ((BasicDBList) val).copy()); + } + } + return newCopy; + } + + @Override + public BsonDocument toBsonDocument(final Class documentClass, final CodecRegistry codecRegistry) { + return new BsonDocumentWrapper<>(this, codecRegistry.get(BasicDBObject.class)); + } + + // create a copy of "from", but with keys ordered alphabetically + @SuppressWarnings("unchecked") + private static Object canonicalize(final Object from) { + if (from instanceof BSONObject && !(from instanceof BasicBSONList)) { + return canonicalizeBSONObject((BSONObject) from); + } else if (from instanceof List) { + return canonicalizeList((List) from); + } else if (from instanceof Map) { + return canonicalizeMap((Map) from); + } else { + return from; + } + } + + private static Map canonicalizeMap(final Map from) { + Map canonicalized = new LinkedHashMap<>(from.size()); + TreeSet keysInOrder = new TreeSet<>(from.keySet()); + for (String key : keysInOrder) { + Object val = from.get(key); + canonicalized.put(key, canonicalize(val)); + } + return canonicalized; + } + + private static BasicDBObject canonicalizeBSONObject(final BSONObject from) { + BasicDBObject canonicalized = new BasicDBObject(); + TreeSet keysInOrder = new TreeSet<>(from.keySet()); + for (String key : keysInOrder) { + Object val = from.get(key); + canonicalized.put(key, canonicalize(val)); + } + return canonicalized; + } + + private static List canonicalizeList(final List list) { + List canonicalized = new ArrayList<>(list.size()); + for (Object cur : list) { + canonicalized.add(canonicalize(cur)); + } + return canonicalized; + } +} diff --git a/driver-core/src/main/com/mongodb/BasicDBObjectBuilder.java b/driver-core/src/main/com/mongodb/BasicDBObjectBuilder.java new file mode 100644 index 00000000000..d7b8bd65f99 --- /dev/null +++ b/driver-core/src/main/com/mongodb/BasicDBObjectBuilder.java @@ -0,0 +1,151 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb; + +import java.util.Iterator; +import java.util.LinkedList; +import java.util.Map; + +/** + *

Utility for building complex objects. For example:

+ *
+ *   {@code BasicDBObjectBuilder.start().add( "name" , "eliot").add("number" , 17).get()}
+ * 
+ */ +@SuppressWarnings("rawtypes") +public class BasicDBObjectBuilder { + + /** + * Creates a builder intialized with an empty document. + */ + public BasicDBObjectBuilder() { + _stack = new LinkedList<>(); + _stack.add(new BasicDBObject()); + } + + /** + * Creates a builder intialized with an empty document. + * + * @return The new empty builder + */ + public static BasicDBObjectBuilder start() { + return new BasicDBObjectBuilder(); + } + + /** + * Creates a builder initialized with the given key/value. + * + * @param key The field name + * @param val The value + * @return the new builder + */ + public static BasicDBObjectBuilder start(final String key, final Object val) { + return (new BasicDBObjectBuilder()).add(key, val); + } + + /** + * Creates an object builder from an existing map of key value pairs. + * + * @param documentAsMap a document in Map form. + * @return the new builder + */ + @SuppressWarnings("unchecked") + public static BasicDBObjectBuilder start(final Map documentAsMap) { + BasicDBObjectBuilder builder = new BasicDBObjectBuilder(); + Iterator i = documentAsMap.entrySet().iterator(); + while (i.hasNext()) { + Map.Entry entry = i.next(); + builder.add(entry.getKey().toString(), entry.getValue()); + } + return builder; + } + + /** + * Appends the key/value to the active object + * + * @param key the field name + * @param val the value of the field + * @return {@code this} so calls can be chained + */ + public BasicDBObjectBuilder append(final String key, final Object val) { + _cur().put(key, val); + return this; + } + + /** + * Same as append + * + * @param key the field name + * @param val the value of the field + * @return {@code this} so calls can be chained + * @see #append(String, Object) + */ + public BasicDBObjectBuilder add(final String key, final Object val) { + return append(key, val); + } + + /** + * Creates a new empty object and inserts it into the current object with the given key. The new child object becomes the active one. + * + * @param key the field name + * @return {@code this} so calls can be chained + */ + public BasicDBObjectBuilder push(final String key) { + BasicDBObject o = new BasicDBObject(); + _cur().put(key, o); + _stack.addLast(o); + return this; + } + + /** + * Pops the active object, which means that the parent object becomes active + * + * @return {@code this} so calls can be chained + */ + public BasicDBObjectBuilder pop() { + if (_stack.size() <= 1) { + throw new IllegalArgumentException("can't pop last element"); + } + _stack.removeLast(); + return this; + } + + /** + * Gets the top level document. + * + * @return The base object + */ + public DBObject get() { + return _stack.getFirst(); + } + + /** + * Returns true if no key/value was inserted into the top level document. + * + * @return true if empty + */ + public boolean isEmpty() { + return ((BasicDBObject) _stack.getFirst()).size() == 0; + } + + private DBObject _cur() { + return _stack.getLast(); + } + + private final LinkedList _stack; + +} diff --git a/driver-core/src/main/com/mongodb/BasicDBObjectFactory.java b/driver-core/src/main/com/mongodb/BasicDBObjectFactory.java new file mode 100644 index 00000000000..c15794e2ae6 --- /dev/null +++ b/driver-core/src/main/com/mongodb/BasicDBObjectFactory.java @@ -0,0 +1,31 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb; + +import java.util.List; + +class BasicDBObjectFactory implements DBObjectFactory { + @Override + public DBObject getInstance() { + return new BasicDBObject(); + } + + @Override + public DBObject getInstance(final List path) { + return new BasicDBObject(); + } +} diff --git a/driver-core/src/main/com/mongodb/Block.java b/driver-core/src/main/com/mongodb/Block.java new file mode 100644 index 00000000000..ec094e44c1d --- /dev/null +++ b/driver-core/src/main/com/mongodb/Block.java @@ -0,0 +1,32 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb; + +/** + * An interface for applying some logic against the given parameter. + * + * @param the value type + * @since 3.0 + */ +public interface Block { + /** + * Apply some logic to the value. + * + * @param t the value to apply to + */ + void apply(T t); +} diff --git a/driver-core/src/main/com/mongodb/ClientBulkWriteException.java b/driver-core/src/main/com/mongodb/ClientBulkWriteException.java new file mode 100644 index 00000000000..89a3eebabce --- /dev/null +++ b/driver-core/src/main/com/mongodb/ClientBulkWriteException.java @@ -0,0 +1,154 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb; + +import com.mongodb.bulk.WriteConcernError; +import com.mongodb.client.model.bulk.ClientBulkWriteResult; +import com.mongodb.client.model.bulk.ClientNamespacedWriteModel; +import com.mongodb.lang.Nullable; + +import java.util.List; +import java.util.Map; +import java.util.Optional; + +import static com.mongodb.assertions.Assertions.isTrueArgument; +import static com.mongodb.assertions.Assertions.notNull; +import static com.mongodb.internal.operation.ClientBulkWriteOperation.Exceptions.serverAddressFromException; +import static java.util.Collections.emptyList; +import static java.util.Collections.emptyMap; +import static java.util.Collections.unmodifiableList; +import static java.util.Collections.unmodifiableMap; +import static java.util.Optional.ofNullable; + +/** + * The result of an unsuccessful or partially unsuccessful client-level bulk write operation. + * Note that the {@linkplain #getCode() code} and {@linkplain #getErrorLabels() labels} from this exception are not useful. + * An application should use those from the {@linkplain #getCause() top-level error}. + * + * @see ClientBulkWriteResult + * @since 5.3 + * @serial exclude + */ +public final class ClientBulkWriteException extends MongoServerException { + private static final long serialVersionUID = 1; + + private final List writeConcernErrors; + private final Map writeErrors; + @Nullable + private final ClientBulkWriteResult partialResult; + + /** + * Constructs a new instance. + * + * @param error The {@linkplain #getCause() top-level error}. + * @param writeConcernErrors The {@linkplain #getWriteConcernErrors() write concern errors}. + * @param writeErrors The {@linkplain #getWriteErrors() write errors}. + * @param partialResult The {@linkplain #getPartialResult() partial result}. + * @param serverAddress The {@linkplain MongoServerException#getServerAddress() server address}. + * If {@code error} is a {@link MongoServerException} or a {@link MongoSocketException}, then {@code serverAddress} + * must be equal to the {@link ServerAddress} they bear. + */ + public ClientBulkWriteException( + @Nullable final MongoException error, + @Nullable final List writeConcernErrors, + @Nullable final Map writeErrors, + @Nullable final ClientBulkWriteResult partialResult, + final ServerAddress serverAddress) { + super( + message( + error, writeConcernErrors, writeErrors, partialResult, + notNull("serverAddress", serverAddress)), + validateServerAddress(error, serverAddress)); + initCause(error); + isTrueArgument("At least one of `writeConcernErrors`, `writeErrors`, `partialResult` must be non-null or non-empty", + !(writeConcernErrors == null || writeConcernErrors.isEmpty()) + || !(writeErrors == null || writeErrors.isEmpty()) + || partialResult != null); + this.writeConcernErrors = writeConcernErrors == null ? emptyList() : unmodifiableList(writeConcernErrors); + this.writeErrors = writeErrors == null ? emptyMap() : unmodifiableMap(writeErrors); + this.partialResult = partialResult; + } + + private static String message( + @Nullable final MongoException error, + @Nullable final List writeConcernErrors, + @Nullable final Map writeErrors, + @Nullable final ClientBulkWriteResult partialResult, + final ServerAddress serverAddress) { + return "Client-level bulk write operation error on MongoDB server " + serverAddress + "." + + (error == null ? "" : " Top-level error: " + error + ".") + + (writeErrors == null || writeErrors.isEmpty() ? "" : " Write errors: " + writeErrors + ".") + + (writeConcernErrors == null || writeConcernErrors.isEmpty() ? "" : " Write concern errors: " + writeConcernErrors + ".") + + (partialResult == null ? "" : " Partial result: " + partialResult + "."); + } + + private static ServerAddress validateServerAddress(@Nullable final MongoException error, final ServerAddress serverAddress) { + serverAddressFromException(error).ifPresent(serverAddressFromError -> + isTrueArgument("`serverAddress` must be equal to that of the `error`", serverAddressFromError.equals(serverAddress))); + return error instanceof MongoServerException + ? ((MongoServerException) error).getServerAddress() + : serverAddress; + } + + /** + * The top-level error. That is an error that is neither a {@linkplain #getWriteConcernErrors() write concern error}, + * nor is an {@linkplain #getWriteErrors() error of an individual write operation}. + * + * @return The top-level error. Non-{@code null} only if a top-level error occurred. + */ + @Override + @Nullable + public MongoException getCause() { + return (MongoException) super.getCause(); + } + + /** + * The {@link WriteConcernError}s that occurred while executing the client-level bulk write operation. + *

+ * There are no guarantees on mutability of the {@link List} returned.

+ * + * @return The {@link WriteConcernError}s. + */ + public List getWriteConcernErrors() { + return writeConcernErrors; + } + + /** + * The indexed {@link WriteError}s. + * The {@linkplain Map#keySet() keys} are the indexes of the corresponding {@link ClientNamespacedWriteModel}s + * in the corresponding client-level bulk write operation. + *

+ * There are no guarantees on mutability or iteration order of the {@link Map} returned.

+ * + * @return The indexed {@link WriteError}s. + * @see ClientBulkWriteResult.VerboseResults#getInsertResults() + * @see ClientBulkWriteResult.VerboseResults#getUpdateResults() + * @see ClientBulkWriteResult.VerboseResults#getDeleteResults() + */ + public Map getWriteErrors() { + return writeErrors; + } + + /** + * The result of the part of a client-level bulk write operation that is known to be successful. + * + * @return The successful partial result. {@linkplain Optional#isPresent() Present} only if the client received a response indicating success + * of at least one {@linkplain ClientNamespacedWriteModel individual write operation}. + */ + public Optional getPartialResult() { + return ofNullable(partialResult); + } +} diff --git a/driver-core/src/main/com/mongodb/ClientEncryptionSettings.java b/driver-core/src/main/com/mongodb/ClientEncryptionSettings.java new file mode 100644 index 00000000000..6f0411d749c --- /dev/null +++ b/driver-core/src/main/com/mongodb/ClientEncryptionSettings.java @@ -0,0 +1,391 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb; + +import com.mongodb.annotations.Alpha; +import com.mongodb.annotations.NotThreadSafe; +import com.mongodb.annotations.Reason; +import com.mongodb.lang.Nullable; + +import javax.net.ssl.SSLContext; +import java.util.HashMap; +import java.util.Map; +import java.util.concurrent.TimeUnit; +import java.util.function.Supplier; + +import static com.mongodb.assertions.Assertions.assertTrue; +import static com.mongodb.assertions.Assertions.notNull; +import static com.mongodb.internal.TimeoutSettings.convertAndValidateTimeout; +import static java.util.Collections.unmodifiableMap; +import static java.util.concurrent.TimeUnit.MILLISECONDS; + +/** + * The client-side settings for data key creation and explicit encryption. + * + *

+ * Explicit encryption/decryption is a community feature, enabled with the new {@code com.mongodb.client.vault.ClientEncryption} type, + * for which this is the settings. + *

+ * + * @since 3.11 + */ +public final class ClientEncryptionSettings { + private final MongoClientSettings keyVaultMongoClientSettings; + private final String keyVaultNamespace; + private final Map> kmsProviders; + private final Map>> kmsProviderPropertySuppliers; + private final Map kmsProviderSslContextMap; + @Nullable + private final Long timeoutMS; + @Nullable + private final Long keyExpirationMS; + + /** + * A builder for {@code ClientEncryptionSettings} so that {@code ClientEncryptionSettings} can be immutable, and to support easier + * construction through chaining. + */ + @NotThreadSafe + public static final class Builder { + private MongoClientSettings keyVaultMongoClientSettings; + private String keyVaultNamespace; + private Map> kmsProviders; + private Map>> kmsProviderPropertySuppliers = new HashMap<>(); + private Map kmsProviderSslContextMap = new HashMap<>(); + @Nullable + private Long timeoutMS; + @Nullable + private Long keyExpirationMS; + + /** + * Sets the {@link MongoClientSettings} that will be used to access the key vault. + * + * @param keyVaultMongoClientSettings the key vault mongo client settings, which may not be null. + * @return this + * @see #getKeyVaultMongoClientSettings() + */ + public Builder keyVaultMongoClientSettings(final MongoClientSettings keyVaultMongoClientSettings) { + this.keyVaultMongoClientSettings = notNull("keyVaultMongoClientSettings", keyVaultMongoClientSettings); + return this; + } + + /** + * Sets the key vault namespace + * + * @param keyVaultNamespace the key vault namespace, which may not be null + * @return this + * @see #getKeyVaultNamespace() + */ + public Builder keyVaultNamespace(final String keyVaultNamespace) { + this.keyVaultNamespace = notNull("keyVaultNamespace", keyVaultNamespace); + return this; + } + + /** + * Sets the KMS providers map. + * + * @param kmsProviders the KMS providers map, which may not be null + * @return this + * @see #kmsProviderPropertySuppliers(Map) + * @see #getKmsProviders() + */ + public Builder kmsProviders(final Map> kmsProviders) { + this.kmsProviders = notNull("kmsProviders", kmsProviders); + return this; + } + + /** + * This method is similar to {@link #kmsProviders(Map)}, but instead of setting properties for KMS providers, + * it sets {@link Supplier}s of properties. + * + * @param kmsProviderPropertySuppliers A {@link Map} where keys identify KMS providers, + * and values specify {@link Supplier}s of properties for the KMS providers. + * Must not be null. Each {@link Supplier} must return non-empty properties. + * @return this + * @see #getKmsProviderPropertySuppliers() + * @since 4.6 + */ + public Builder kmsProviderPropertySuppliers(final Map>> kmsProviderPropertySuppliers) { + this.kmsProviderPropertySuppliers = notNull("kmsProviderPropertySuppliers", kmsProviderPropertySuppliers); + return this; + } + + /** + * Sets the KMS provider to SSLContext map + * + * @param kmsProviderSslContextMap the KMS provider to SSLContext map, which may not be null + * @return this + * @see #getKmsProviderSslContextMap() + * @since 4.4 + */ + public Builder kmsProviderSslContextMap(final Map kmsProviderSslContextMap) { + this.kmsProviderSslContextMap = notNull("kmsProviderSslContextMap", kmsProviderSslContextMap); + return this; + } + + /** + * The cache expiration time for data encryption keys. + *

Defaults to {@code null} which defers to libmongocrypt's default which is currently 60000 ms. Set to 0 to disable key expiration.

+ * + * @param keyExpiration the cache expiration time in milliseconds or null to use libmongocrypt's default. + * @param timeUnit the time unit + * @return this + * @see #getKeyExpiration(TimeUnit) + * @since 5.5 + */ + public Builder keyExpiration(@Nullable final Long keyExpiration, final TimeUnit timeUnit) { + assertTrue(keyExpiration == null || keyExpiration >= 0, "keyExpiration must be >= 0 or null"); + this.keyExpirationMS = keyExpiration == null ? null : TimeUnit.MILLISECONDS.convert(keyExpiration, timeUnit); + return this; + } + + /** + * Sets the time limit for the full execution of an operation. + * + *
    + *
  • {@code null} means that the timeout mechanism for operations will defer to using: + *
      + *
    • {@code waitQueueTimeoutMS}: The maximum wait time in milliseconds that a thread may wait for a connection to become + * available
    • + *
    • {@code socketTimeoutMS}: How long a send or receive on a socket can take before timing out.
    • + *
    • {@code wTimeoutMS}: How long the server will wait for the write concern to be fulfilled before timing out.
    • + *
    • {@code maxTimeMS}: The cumulative time limit for processing operations on a cursor. + * See: cursor.maxTimeMS.
    • + *
    • {@code maxCommitTimeMS}: The maximum amount of time to allow a single {@code commitTransaction} command to execute. + * See: {@link TransactionOptions#getMaxCommitTime}.
    • + *
    + *
  • + *
  • {@code 0} means infinite timeout.
  • + *
  • {@code > 0} The time limit to use for the full execution of an operation.
  • + *
+ * + *

Note: The timeout set through this method overrides the timeout defined in the key vault client settings + * specified in {@link #keyVaultMongoClientSettings(MongoClientSettings)}. + * Essentially, for operations that require accessing the key vault, the remaining timeout from the initial operation + * determines the duration allowed for key vault access.

+ * + * @param timeout the timeout + * @param timeUnit the time unit + * @return this + * @since 5.2 + * @see #getTimeout + */ + @Alpha(Reason.CLIENT) + public ClientEncryptionSettings.Builder timeout(final long timeout, final TimeUnit timeUnit) { + this.timeoutMS = convertAndValidateTimeout(timeout, timeUnit); + return this; + } + + /** + * Build an instance of {@code ClientEncryptionSettings}. + * + * @return the settings from this builder + */ + public ClientEncryptionSettings build() { + return new ClientEncryptionSettings(this); + } + + private Builder() { + } + } + + /** + * Convenience method to create a Builder. + * + * @return a builder + */ + public static Builder builder() { + return new Builder(); + } + + /** + * Gets the {@link MongoClientSettings} that will be used to access the key vault. + * + * @return the key vault settings, which may be not be null + */ + public MongoClientSettings getKeyVaultMongoClientSettings() { + return keyVaultMongoClientSettings; + } + + /** + * Gets the key vault namespace. + *

+ * The key vault namespace refers to a collection that contains all data keys used for encryption and decryption (aka the key vault + * collection). Data keys are stored as documents in a special MongoDB collection. Data keys are protected with encryption by a KMS + * provider (AWS, Azure, GCP KMS or a local master key). + *

+ * + * @return the key vault namespace, which may not be null + */ + + public String getKeyVaultNamespace() { + return keyVaultNamespace; + } + + /** + * Gets the map of KMS provider properties. + * + *

Multiple KMS providers can be specified within this map. Each KMS provider is identified by a unique key. + * Keys are formatted as either {@code "KMS provider type"} or {@code "KMS provider type:KMS provider name"} (e.g., "aws" or "aws:myname"). + * The KMS provider name must only contain alphanumeric characters (a-z, A-Z, 0-9), underscores (_), and must not be empty. + *

+ * Supported KMS provider types include "aws", "azure", "gcp", and "local". The provider name is optional and allows + * for the configuration of multiple providers of the same type under different names (e.g., "aws:name1" and + * "aws:name2" could represent different AWS accounts). + *

+ * The kmsProviders map values differ by provider type. The following properties are supported for each provider type: + *

+ *

+ * For "aws", the properties are: + *

+ *
    + *
  • accessKeyId: a String, the AWS access key identifier
  • + *
  • secretAccessKey: a String, the AWS secret access key
  • + *
  • sessionToken: an optional String, the AWS session token
  • + *
+ *

+ * For "azure", the properties are: + *

+ *
    + *
  • tenantId: a String, the tenantId that identifies the organization for the account.
  • + *
  • clientId: a String, the clientId to authenticate a registered application.
  • + *
  • clientSecret: a String, the client secret to authenticate a registered application.
  • + *
  • identityPlatformEndpoint: optional String, a host with optional port. e.g. "example.com" or "example.com:443". + * Generally used for private Azure instances.
  • + *
+ *

+ * For "gcp", the properties are: + *

+ *
    + *
  • email: a String, the service account email to authenticate.
  • + *
  • privateKey: a String or byte[], the encoded PKCS#8 encrypted key
  • + *
  • endpoint: optional String, a host with optional port. e.g. "example.com" or "example.com:443".
  • + *
+ *

+ * For "kmip", the properties are: + *

+ *
    + *
  • endpoint: a String, the endpoint as a host with required port. e.g. "example.com:443".
  • + *
+ *

+ * For "local", the properties are: + *

+ *
    + *
  • key: byte[] of length 96, the local key
  • + *
+ *

+ * It is also permitted for the value of a kms provider to be an empty map, in which case the driver will first + *

+ *
    + *
  • use the {@link Supplier} configured in {@link #getKmsProviderPropertySuppliers()} to obtain a non-empty map
  • + *
  • attempt to obtain the properties from the environment
  • + *
+ * However, KMS providers containing a name (e.g., "aws:myname") do not support dynamically obtaining KMS properties from the {@link Supplier} + * or environment. + * @return map of KMS provider properties + * @see #getKmsProviderPropertySuppliers() + */ + public Map> getKmsProviders() { + return unmodifiableMap(kmsProviders); + } + + /** + * This method is similar to {@link #getKmsProviders()}, but instead of getting properties for KMS providers, + * it gets {@link Supplier}s of properties. + *

If {@link #getKmsProviders()} returns empty properties for a KMS provider, + * the driver will use a {@link Supplier} of properties configured for the KMS provider to obtain non-empty properties.

+ * + * @return A {@link Map} where keys identify KMS providers, and values specify {@link Supplier}s of properties for the KMS providers. + * @since 4.6 + */ + public Map>> getKmsProviderPropertySuppliers() { + return unmodifiableMap(kmsProviderPropertySuppliers); + } + + /** + * Gets the KMS provider to SSLContext map. + * + *

+ * If a KMS provider is mapped to a non-null {@link SSLContext}, the context will be used to establish a TLS connection to the KMS. + * Otherwise, the default context will be used. + *

+ * + * @return the KMS provider to SSLContext map + * @since 4.4 + */ + public Map getKmsProviderSslContextMap() { + return unmodifiableMap(kmsProviderSslContextMap); + } + + /** + * Returns the cache expiration time for data encryption keys. + * + *

Defaults to {@code null} which defers to libmongocrypt's default which is currently {@code 60000 ms}. + * Set to {@code 0} to disable key expiration.

+ * + * @param timeUnit the time unit, which may not be null + * @return the cache expiration time or null if not set. + * @since 5.5 + */ + @Nullable + public Long getKeyExpiration(final TimeUnit timeUnit) { + return keyExpirationMS == null ? null : timeUnit.convert(keyExpirationMS, TimeUnit.MILLISECONDS); + } + + /** + * The time limit for the full execution of an operation. + * + *

If set the following deprecated options will be ignored: + * {@code waitQueueTimeoutMS}, {@code socketTimeoutMS}, {@code wTimeoutMS}, {@code maxTimeMS} and {@code maxCommitTimeMS}

+ * + *
    + *
  • {@code null} means that the timeout mechanism for operations will defer to using: + *
      + *
    • {@code waitQueueTimeoutMS}: The maximum wait time in milliseconds that a thread may wait for a connection to become + * available
    • + *
    • {@code socketTimeoutMS}: How long a send or receive on a socket can take before timing out.
    • + *
    • {@code wTimeoutMS}: How long the server will wait for the write concern to be fulfilled before timing out.
    • + *
    • {@code maxTimeMS}: The cumulative time limit for processing operations on a cursor. + * See: cursor.maxTimeMS.
    • + *
    • {@code maxCommitTimeMS}: The maximum amount of time to allow a single {@code commitTransaction} command to execute. + * See: {@link TransactionOptions#getMaxCommitTime}.
    • + *
    + *
  • + *
  • {@code 0} means infinite timeout.
  • + *
  • {@code > 0} The time limit to use for the full execution of an operation.
  • + *
+ * + * @param timeUnit the time unit + * @return the timeout in the given time unit + * @since 5.2 + */ + @Alpha(Reason.CLIENT) + @Nullable + public Long getTimeout(final TimeUnit timeUnit) { + return timeoutMS == null ? null : timeUnit.convert(timeoutMS, MILLISECONDS); + } + + private ClientEncryptionSettings(final Builder builder) { + this.keyVaultMongoClientSettings = notNull("keyVaultMongoClientSettings", builder.keyVaultMongoClientSettings); + this.keyVaultNamespace = notNull("keyVaultNamespace", builder.keyVaultNamespace); + this.kmsProviders = notNull("kmsProviders", builder.kmsProviders); + this.kmsProviderPropertySuppliers = notNull("kmsProviderPropertySuppliers", builder.kmsProviderPropertySuppliers); + this.kmsProviderSslContextMap = notNull("kmsProviderSslContextMap", builder.kmsProviderSslContextMap); + this.timeoutMS = builder.timeoutMS; + this.keyExpirationMS = builder.keyExpirationMS; + } + +} diff --git a/driver-core/src/main/com/mongodb/ClientSessionOptions.java b/driver-core/src/main/com/mongodb/ClientSessionOptions.java new file mode 100644 index 00000000000..160d16c3486 --- /dev/null +++ b/driver-core/src/main/com/mongodb/ClientSessionOptions.java @@ -0,0 +1,259 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb; + +import com.mongodb.annotations.Alpha; +import com.mongodb.annotations.Immutable; +import com.mongodb.annotations.NotThreadSafe; +import com.mongodb.annotations.Reason; +import com.mongodb.lang.Nullable; +import com.mongodb.session.ClientSession; + +import java.util.Objects; +import java.util.concurrent.TimeUnit; + +import static com.mongodb.assertions.Assertions.notNull; +import static com.mongodb.internal.TimeoutSettings.convertAndValidateTimeout; +import static java.util.concurrent.TimeUnit.MILLISECONDS; + +/** + * The options to apply to a {@code ClientSession}. + * + * @mongodb.server.release 3.6 + * @since 3.6 + * @see ClientSession + * @mongodb.driver.dochub core/causal-consistency Causal Consistency + */ +@Immutable +public final class ClientSessionOptions { + + private final Boolean causallyConsistent; + private final Boolean snapshot; + private final Long defaultTimeoutMS; + private final TransactionOptions defaultTransactionOptions; + + /** + * Whether operations using the session should causally consistent with each other. + * + * @return whether operations using the session should be causally consistent. A null value indicates to use the global default, + * which is currently true. + * @mongodb.driver.dochub core/causal-consistency Causal Consistency + */ + @Nullable + public Boolean isCausallyConsistent() { + return causallyConsistent; + } + + /** + * Whether read operations using this session should all share the same snapshot. + * + * @return whether read operations using this session should all share the same snapshot. A null value indicates to use the global + * default, which is false. + * @since 4.3 + * @mongodb.server.release 5.0 + * @mongodb.driver.manual reference/read-concern-snapshot/#read-concern-and-atclustertime Snapshot reads + */ + @Nullable + public Boolean isSnapshot() { + return snapshot; + } + + /** + * Gets the default transaction options for the session. + * + * @return the default transaction options for the session + * @since 3.8 + * @mongodb.server.release 4.0 + */ + public TransactionOptions getDefaultTransactionOptions() { + return defaultTransactionOptions; + } + + /** + * Gets the default time limit for the following operations executed on the session: + * + *
    + *
  • {@code commitTransaction}
  • + *
  • {@code abortTransaction}
  • + *
  • {@code withTransaction}
  • + *
  • {@code close}
  • + *
+ * @param timeUnit the time unit + * @return the default timeout + * @since 5.2 + */ + @Alpha(Reason.CLIENT) + @Nullable + public Long getDefaultTimeout(final TimeUnit timeUnit) { + return defaultTimeoutMS == null ? null : timeUnit.convert(defaultTimeoutMS, MILLISECONDS); + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + final ClientSessionOptions that = (ClientSessionOptions) o; + return Objects.equals(causallyConsistent, that.causallyConsistent) + && Objects.equals(snapshot, that.snapshot) + && Objects.equals(defaultTimeoutMS, that.defaultTimeoutMS) + && Objects.equals(defaultTransactionOptions, that.defaultTransactionOptions); + } + + @Override + public int hashCode() { + return Objects.hash(causallyConsistent, snapshot, defaultTimeoutMS, defaultTransactionOptions); + } + + @Override + public String toString() { + return "ClientSessionOptions{" + + "causallyConsistent=" + causallyConsistent + + ", snapshot=" + snapshot + + ", defaultTimeoutMS=" + defaultTimeoutMS + + ", defaultTransactionOptions=" + defaultTransactionOptions + + '}'; + } + + /** + * Gets an instance of a builder + * + * @return a builder instance + */ + public static Builder builder() { + return new Builder(); + } + + /** + * Gets an instance of a builder initialized with the given options + * + * @param options the options with which to initialize the builder + * @return a builder instance + * @since 3.8 + */ + public static Builder builder(final ClientSessionOptions options) { + notNull("options", options); + Builder builder = new Builder(); + builder.causallyConsistent = options.isCausallyConsistent(); + builder.snapshot = options.isSnapshot(); + builder.defaultTransactionOptions = options.getDefaultTransactionOptions(); + builder.defaultTimeoutMS = options.defaultTimeoutMS; + return builder; + } + + /** + * A builder for instances of {@code ClientSession} + */ + @NotThreadSafe + public static final class Builder { + private Boolean causallyConsistent; + private Boolean snapshot; + private Long defaultTimeoutMS; + private TransactionOptions defaultTransactionOptions = TransactionOptions.builder().build(); + + /** + * Sets whether operations using the session should causally consistent with each other. + * + * @param causallyConsistent whether operations using the session should be causally consistent + * @return this + * @mongodb.driver.dochub core/causal-consistency Causal Consistency + */ + public Builder causallyConsistent(final boolean causallyConsistent) { + this.causallyConsistent = causallyConsistent; + return this; + } + + /** + * Sets whether read operations using the session should share the same snapshot. + * + *

+ * The default value is unset, in which case the driver will use the global default value, which is currently false. + *

+ * + * @param snapshot true for snapshot reads, false otherwise + * @return this + * @since 4.3 + * @mongodb.server.release 5.0 + * @mongodb.driver.manual reference/read-concern-snapshot/#read-concern-and-atclustertime Snapshot reads + */ + public Builder snapshot(final boolean snapshot) { + this.snapshot = snapshot; + return this; + } + + /** + * Sets whether operations using the session should causally consistent with each other. + * + * @param defaultTransactionOptions the default transaction options to use for all transactions on this session, + * @return this + * @since 3.8 + * @mongodb.server.release 4.0 + */ + public Builder defaultTransactionOptions(final TransactionOptions defaultTransactionOptions) { + this.defaultTransactionOptions = notNull("defaultTransactionOptions", defaultTransactionOptions); + return this; + } + + /** + * Sets the default time limit for the following operations executed on the session: + * + *
    + *
  • {@code commitTransaction}
  • + *
  • {@code abortTransaction}
  • + *
  • {@code withTransaction}
  • + *
  • {@code close}
  • + *
+ * @param defaultTimeout the timeout + * @param timeUnit the time unit + * @return this + * @since 5.2 + * @see #getDefaultTimeout + */ + @Alpha(Reason.CLIENT) + public Builder defaultTimeout(final long defaultTimeout, final TimeUnit timeUnit) { + this.defaultTimeoutMS = convertAndValidateTimeout(defaultTimeout, timeUnit, "defaultTimeout"); + return this; + } + + /** + * Build the session options instance. + * + * @return The {@code ClientSessionOptions} + */ + public ClientSessionOptions build() { + return new ClientSessionOptions(this); + } + + private Builder() { + } + } + + private ClientSessionOptions(final Builder builder) { + if (builder.causallyConsistent != null && builder.causallyConsistent && builder.snapshot != null && builder.snapshot) { + throw new IllegalArgumentException("A session can not be both a snapshot and causally consistent"); + } + this.causallyConsistent = builder.causallyConsistent != null || builder.snapshot == null + ? builder.causallyConsistent + : Boolean.valueOf(!builder.snapshot); + this.snapshot = builder.snapshot; + this.defaultTransactionOptions = builder.defaultTransactionOptions; + this.defaultTimeoutMS = builder.defaultTimeoutMS; + } +} diff --git a/driver-core/src/main/com/mongodb/ConnectionString.java b/driver-core/src/main/com/mongodb/ConnectionString.java new file mode 100644 index 00000000000..659e8fd02aa --- /dev/null +++ b/driver-core/src/main/com/mongodb/ConnectionString.java @@ -0,0 +1,1835 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb; + +import com.mongodb.annotations.Alpha; +import com.mongodb.annotations.Reason; +import com.mongodb.connection.ClusterSettings; +import com.mongodb.connection.ConnectionPoolSettings; +import com.mongodb.connection.ServerMonitoringMode; +import com.mongodb.connection.ServerSettings; +import com.mongodb.connection.SocketSettings; +import com.mongodb.event.ConnectionCheckOutStartedEvent; +import com.mongodb.event.ConnectionCheckedInEvent; +import com.mongodb.event.ConnectionCheckedOutEvent; +import com.mongodb.event.ConnectionCreatedEvent; +import com.mongodb.event.ConnectionReadyEvent; +import com.mongodb.internal.connection.ServerMonitoringModeUtil; +import com.mongodb.internal.diagnostics.logging.Logger; +import com.mongodb.internal.diagnostics.logging.Loggers; +import com.mongodb.internal.dns.DefaultDnsResolver; +import com.mongodb.lang.Nullable; +import com.mongodb.spi.dns.DnsClient; +import org.bson.UuidRepresentation; + +import java.io.UnsupportedEncodingException; +import java.net.URLDecoder; +import java.nio.charset.StandardCharsets; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.LinkedHashSet; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.Set; +import java.util.concurrent.TimeUnit; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +import static com.mongodb.MongoCredential.ALLOWED_HOSTS_KEY; +import static com.mongodb.internal.connection.OidcAuthenticator.OidcValidator.validateCreateOidcCredential; +import static java.lang.String.format; +import static java.util.Arrays.asList; +import static java.util.Collections.emptyList; +import static java.util.Collections.singletonList; +import static java.util.Collections.unmodifiableList; + + +/** + *

Represents a Connection String. + * The Connection String describes the hosts to be used and options.

+ * + *

The format of the Connection String is:

+ *
+ *   mongodb://[username:password@]host1[:port1][,host2[:port2],...[,hostN[:portN]]][/[database.collection][?options]]
+ * 
+ *
    + *
  • {@code mongodb://} is a required prefix to identify that this is a string in the standard connection format.
  • + *
  • {@code username:password@} are optional. If given, the driver will attempt to login to a database after + * connecting to a database server. For some authentication mechanisms, only the username is specified and the password is not, + * in which case the ":" after the username is left off as well
  • + *
  • {@code host1} is the only required part of the connection string. It identifies a server address to connect to. + * Support for Unix domain sockets was added in 3.7. Note: The path must be urlencoded eg: {@code mongodb://%2Ftmp%2Fmongodb-27017.sock} + * and the {@code jnr.unixsocket} library installed. + *
  • + *
  • {@code :portX} is optional and defaults to :27017 if not provided.
  • + *
  • {@code /database} is the name of the database to login to and thus is only relevant if the + * {@code username:password@} syntax is used. If not specified the "admin" database will be used by default.
  • + *
  • {@code ?options} are connection options. Options are name=value pairs and the pairs + * are separated by "&". For backwards compatibility, ";" is accepted as a separator in addition to "&", + * but should be considered as deprecated.
  • + *
+ *

An alternative format, using the mongodb+srv protocol, is: + *

+ *   mongodb+srv://[username:password@]host[/[database][?options]]
+ * 
+ *
    + *
  • {@code mongodb+srv://} is a required prefix for this format.
  • + *
  • {@code username:password@} are optional. If given, the driver will attempt to login to a database after + * connecting to a database server. For some authentication mechanisms, only the username is specified and the password is not, + * in which case the ":" after the username is left off as well
  • + *
  • {@code host} is the only required part of the URI. It identifies a single host name for which SRV records are looked up + * from a Domain Name Server after prefixing the host name with, by default, {@code "_mongodb._tcp"} ({@code "mongodb"} is the default SRV + * service name, but can be replaced via the {@code srvServiceName} query parameter), The host/port for each SRV record becomes the + * seed list used to connect, as if each one were provided as host/port pair in a URI using the normal mongodb protocol.
  • + *
  • {@code /database} is the name of the database to login to and thus is only relevant if the + * {@code username:password@} syntax is used. If not specified the "admin" database will be used by default.
  • + *
  • {@code ?options} are connection options. Options are name=value pairs and the pairs + * are separated by "&". For backwards compatibility, ";" is accepted as a separator in addition to "&", + * but should be considered as deprecated. Additionally with the mongodb+srv protocol, TXT records are looked up from a Domain Name + * Server for the given host, and the text value of each one is prepended to any options on the URI itself. Because the last specified + * value for any option wins, that means that options provided on the URI will override any that are provided via TXT records.
  • + *
+ *

The following options are supported (case insensitive):

+ * + *

Server Selection Configuration:

+ *
    + *
  • {@code serverSelectionTimeoutMS=ms}: How long the driver will wait for server selection to succeed before throwing an exception.
  • + *
  • {@code localThresholdMS=ms}: When choosing among multiple MongoDB servers to send a request, the driver will only + * send that request to a server whose ping time is less than or equal to the server with the fastest ping time plus the local + * threshold.
  • + *
+ *

Server Monitoring Configuration:

+ *
    + *
  • {@code heartbeatFrequencyMS=ms}: The frequency that the driver will attempt to determine the current state of each server in the + * cluster.
  • + *
  • {@code serverMonitoringMode=enum}: The server monitoring mode, which defines the monitoring protocol to use. Enumerated values: + *
      + *
    • {@code stream};
    • + *
    • {@code poll};
    • + *
    • {@code auto} - the default.
    • + *
    + *
  • + *
+ *

Replica set configuration:

+ *
    + *
  • {@code replicaSet=name}: Implies that the hosts given are a seed list, and the driver will attempt to find + * all members of the set.
  • + *
+ *

Connection Configuration:

+ *
    + *
  • {@code ssl=true|false}: Whether to connect using TLS.
  • + *
  • {@code tls=true|false}: Whether to connect using TLS. Supersedes the ssl option
  • + *
  • {@code tlsInsecure=true|false}: If connecting with TLS, this option enables insecure TLS connections. Currently this has the + * same effect of setting tlsAllowInvalidHostnames to true. Other mechanism for relaxing TLS security constraints must be handled in + * the application by customizing the {@link javax.net.ssl.SSLContext}
  • + *
  • {@code sslInvalidHostNameAllowed=true|false}: Whether to allow invalid host names for TLS connections.
  • + *
  • {@code tlsAllowInvalidHostnames=true|false}: Whether to allow invalid host names for TLS connections. Supersedes the + * sslInvalidHostNameAllowed option
  • + *
  • {@code timeoutMS=ms}: Time limit for the full execution of an operation. Note: This parameter is part of an {@linkplain Alpha Alpha API} and may be + * subject to changes or even removal in future releases.
  • + *
  • {@code connectTimeoutMS=ms}: How long a connection can take to be opened before timing out.
  • + *
  • {@code socketTimeoutMS=ms}: How long a receive on a socket can take before timing out. + * This option is the same as {@link SocketSettings#getReadTimeout(TimeUnit)}. + * Deprecated, use {@code timeoutMS} instead.
  • + *
  • {@code maxIdleTimeMS=ms}: Maximum idle time of a pooled connection. A connection that exceeds this limit will be closed
  • + *
  • {@code maxLifeTimeMS=ms}: Maximum life time of a pooled connection. A connection that exceeds this limit will be closed
  • + *
+ *

Proxy Configuration:

+ *
    + *
  • {@code proxyHost=string}: The SOCKS5 proxy host to establish a connection through. + * It can be provided as a valid IPv4 address, IPv6 address, or a domain name. Required if either proxyPassword, proxyUsername or + * proxyPort are specified
  • + *
  • {@code proxyPort=n}: The port number for the SOCKS5 proxy server. Must be a non-negative integer.
  • + *
  • {@code proxyUsername=string}: Username for authenticating with the proxy server. Required if proxyPassword is specified.
  • + *
  • {@code proxyPassword=string}: Password for authenticating with the proxy server. Required if proxyUsername is specified.
  • + *
+ *

Connection pool configuration:

+ *
    + *
  • {@code maxPoolSize=n}: The maximum number of connections in the connection pool.
  • + *
  • {@code minPoolSize=n}: The minimum number of connections in the connection pool.
  • + *
  • {@code waitQueueTimeoutMS=ms}: The maximum duration to wait until either: + * an {@linkplain ConnectionCheckedOutEvent in-use connection} becomes {@linkplain ConnectionCheckedInEvent available}, + * or a {@linkplain ConnectionCreatedEvent connection is created} and begins to be {@linkplain ConnectionReadyEvent established}. + * See {@link #getMaxWaitTime()} for more details. . Deprecated, use {@code timeoutMS} instead.
  • + *
  • {@code maxConnecting=n}: The maximum number of connections a pool may be establishing concurrently.
  • + *
+ *

Write concern configuration:

+ *
    + *
  • {@code safe=true|false} + *
      + *
    • {@code true}: the driver ensures that all writes are acknowledged by the MongoDB server, or else throws an exception. + * (see also {@code w} and {@code wtimeoutMS}).
    • + *
    • {@code false}: the driver does not ensure that all writes are acknowledged by the MongoDB server.
    • + *
    + *
  • + *
  • {@code journal=true|false} + *
      + *
    • {@code true}: the driver waits for the server to group commit to the journal file on disk.
    • + *
    • {@code false}: the driver does not wait for the server to group commit to the journal file on disk.
    • + *
    + *
  • + *
  • {@code w=wValue} + *
      + *
    • The driver adds { w : wValue } to all write commands. Implies {@code safe=true}.
    • + *
    • wValue is typically a number, but can be any string in order to allow for specifications like + * {@code "majority"}
    • + *
    + *
  • + *
  • {@code wtimeoutMS=ms} + *
      + *
    • The driver adds { wtimeout : ms } to all write commands. Implies {@code safe=true}.
    • + *
    • Used in combination with {@code w}. Deprecated, use {@code timeoutMS} instead
    • + *
    + *
  • + *
+ *

Read preference configuration:

+ *
    + *
  • {@code readPreference=enum}: The read preference for this connection. + *
      + *
    • Enumerated values: + *
        + *
      • {@code primary}
      • + *
      • {@code primaryPreferred}
      • + *
      • {@code secondary}
      • + *
      • {@code secondaryPreferred}
      • + *
      • {@code nearest}
      • + *
      + *
    • + *
    + *
  • + *
  • {@code readPreferenceTags=string}. A representation of a tag set as a comma-separated list of colon-separated + * key-value pairs, e.g. {@code "dc:ny,rack:1}". Spaces are stripped from beginning and end of all keys and values. + * To specify a list of tag sets, using multiple readPreferenceTags, + * e.g. {@code readPreferenceTags=dc:ny,rack:1;readPreferenceTags=dc:ny;readPreferenceTags=} + *
      + *
    • Note the empty value for the last one, which means match any secondary as a last resort.
    • + *
    • Order matters when using multiple readPreferenceTags.
    • + *
    + *
  • + *
  • {@code maxStalenessSeconds=seconds}. The maximum staleness in seconds. For use with any non-primary read preference, the driver + * estimates the staleness of each secondary, based on lastWriteDate values provided in server hello responses, and selects only those + * secondaries whose staleness is less than or equal to maxStalenessSeconds. Not providing the parameter or explicitly setting it to -1 + * indicates that there should be no max staleness check. The maximum staleness feature is designed to prevent badly-lagging servers from + * being selected. The staleness estimate is imprecise and shouldn't be used to try to select "up-to-date" secondaries. The minimum value + * is either 90 seconds, or the heartbeat frequency plus 10 seconds, whichever is greatest. + *
  • + *
+ *

Authentication configuration:

+ *
    + *
  • {@code authMechanism=MONGO-CR|GSSAPI|PLAIN|MONGODB-X509|MONGODB-OIDC}: The authentication mechanism to use if a credential was supplied. + * The default is unspecified, in which case the client will pick the most secure mechanism available based on the sever version. For the + * GSSAPI, MONGODB-X509, and MONGODB-OIDC mechanisms, no password is accepted, only the username. + *
  • + *
  • {@code authSource=string}: The source of the authentication credentials. This is typically the database that + * the credentials have been created. The value defaults to the database specified in the path portion of the connection string. + * If the database is specified in neither place, the default value is "admin". This option is only respected when using the MONGO-CR + * mechanism (the default). + *
  • + *
  • {@code authMechanismProperties=PROPERTY_NAME:PROPERTY_VALUE,PROPERTY_NAME2:PROPERTY_VALUE2}: This option allows authentication + * mechanism properties to be set on the connection string. + *
  • + *
  • {@code gssapiServiceName=string}: This option only applies to the GSSAPI mechanism and is used to alter the service name. + * Deprecated, please use {@code authMechanismProperties=SERVICE_NAME:string} instead. + *
  • + *
+ *

Server Handshake configuration:

+ *
    + *
  • {@code appName=string}: Sets the logical name of the application. The application name may be used by the client to identify + * the application to the server, for use in server logs, slow query logs, and profile collection.
  • + *
+ *

Compressor configuration:

+ *
    + *
  • {@code compressors=string}: A comma-separated list of compressors to request from the server. The supported compressors + * currently are 'zlib', 'snappy' and 'zstd'.
  • + *
  • {@code zlibCompressionLevel=integer}: Integer value from -1 to 9 representing the zlib compression level. Lower values will make + * compression faster, while higher values will make compression better.
  • + *
+ *

SRV configuration:

+ *
    + *
  • {@code srvServiceName=string}: The SRV service name. See {@link ClusterSettings#getSrvServiceName()} for details.
  • + *
  • {@code srvMaxHosts=number}: The maximum number of hosts from the SRV record to connect to.
  • + *
+ *

General configuration:

+ *
    + *
  • {@code retryWrites=true|false}. If true the driver will retry supported write operations if they fail due to a network error. + * Defaults to true.
  • + *
  • {@code retryReads=true|false}. If true the driver will retry supported read operations if they fail due to a network error. + * Defaults to true.
  • + *
  • {@code uuidRepresentation=unspecified|standard|javaLegacy|csharpLegacy|pythonLegacy}. See + * {@link MongoClientSettings#getUuidRepresentation()} for documentation of semantics of this parameter. Defaults to "javaLegacy", but + * will change to "unspecified" in the next major release.
  • + *
  • {@code directConnection=true|false}. If true the driver will set the connection to be a direct connection to the host.
  • + *
  • {@code loadBalanced=true|false}. If true the driver will assume that it's connecting to MongoDB through a load balancer.
  • + *
+ * + * @mongodb.driver.manual reference/connection-string Connection String Format + * @since 3.0.0 + */ +public class ConnectionString { + + private static final String MONGODB_PREFIX = "mongodb://"; + private static final String MONGODB_SRV_PREFIX = "mongodb+srv://"; + private static final Set ALLOWED_OPTIONS_IN_TXT_RECORD = + new HashSet<>(asList("authsource", "replicaset", "loadbalanced")); + private static final Logger LOGGER = Loggers.getLogger("uri"); + private static final List MECHANISM_KEYS_DISALLOWED_IN_CONNECTION_STRING = Stream.of(ALLOWED_HOSTS_KEY) + .map(k -> k.toLowerCase()) + .collect(Collectors.toList()); + + private final MongoCredential credential; + private final boolean isSrvProtocol; + private final List hosts; + private final String database; + private final String collection; + private final String connectionString; + + private Integer srvMaxHosts; + private String srvServiceName; + private Boolean directConnection; + private Boolean loadBalanced; + private ReadPreference readPreference; + private WriteConcern writeConcern; + private Boolean retryWrites; + private Boolean retryReads; + private ReadConcern readConcern; + + private Integer minConnectionPoolSize; + private Integer maxConnectionPoolSize; + private Integer maxWaitTime; + private Integer maxConnectionIdleTime; + private Integer maxConnectionLifeTime; + private Integer maxConnecting; + private Integer connectTimeout; + private Long timeout; + private Integer socketTimeout; + private Boolean sslEnabled; + private Boolean sslInvalidHostnameAllowed; + private String proxyHost; + private Integer proxyPort; + private String proxyUsername; + private String proxyPassword; + private String requiredReplicaSetName; + private Integer serverSelectionTimeout; + private Integer localThreshold; + private Integer heartbeatFrequency; + private ServerMonitoringMode serverMonitoringMode; + private String applicationName; + private List compressorList; + private UuidRepresentation uuidRepresentation; + + /** + * Creates a ConnectionString from the given string. + * + * @param connectionString the connection string + * @since 3.0 + */ + public ConnectionString(final String connectionString) { + this(connectionString, null); + } + + /** + * Creates a ConnectionString from the given string with the given {@link DnsClient}. + * + *

If setting {@link MongoClientSettings#getDnsClient()} explicitly, care should be taken to call this constructor with the same + * {@link DnsClient}. + * + * @param connectionString the connection string + * @param dnsClient the DNS client with which to resolve TXT record for the mongodb+srv protocol + * @since 4.10 + * @see MongoClientSettings#getDnsClient() + */ + public ConnectionString(final String connectionString, @Nullable final DnsClient dnsClient) { + this.connectionString = connectionString; + boolean isMongoDBProtocol = connectionString.startsWith(MONGODB_PREFIX); + isSrvProtocol = connectionString.startsWith(MONGODB_SRV_PREFIX); + if (!isMongoDBProtocol && !isSrvProtocol) { + throw new IllegalArgumentException(format("The connection string is invalid. " + + "Connection strings must start with either '%s' or '%s", MONGODB_PREFIX, MONGODB_SRV_PREFIX)); + } + + String unprocessedConnectionString; + if (isMongoDBProtocol) { + unprocessedConnectionString = connectionString.substring(MONGODB_PREFIX.length()); + } else { + unprocessedConnectionString = connectionString.substring(MONGODB_SRV_PREFIX.length()); + } + + // Split out the user and host information + String userAndHostInformation; + int firstForwardSlashIdx = unprocessedConnectionString.indexOf("/"); + int firstQuestionMarkIdx = unprocessedConnectionString.indexOf("?"); + if (firstQuestionMarkIdx == -1 && firstForwardSlashIdx == -1) { + userAndHostInformation = unprocessedConnectionString; + unprocessedConnectionString = ""; + } else if (firstQuestionMarkIdx != -1 && (firstForwardSlashIdx == -1 || firstQuestionMarkIdx < firstForwardSlashIdx)) { + // there is a question mark, and there is no slash or the question mark comes before any slash + userAndHostInformation = unprocessedConnectionString.substring(0, firstQuestionMarkIdx); + unprocessedConnectionString = unprocessedConnectionString.substring(firstQuestionMarkIdx); + } else { + userAndHostInformation = unprocessedConnectionString.substring(0, firstForwardSlashIdx); + unprocessedConnectionString = unprocessedConnectionString.substring(firstForwardSlashIdx + 1); + } + + // Split the user and host information + String userInfo; + String hostIdentifier; + String userName = null; + char[] password = null; + int idx = userAndHostInformation.lastIndexOf("@"); + if (idx > 0) { + userInfo = userAndHostInformation.substring(0, idx).replace("+", "%2B"); + hostIdentifier = userAndHostInformation.substring(idx + 1); + int colonCount = countOccurrences(userInfo, ":"); + if (userInfo.contains("@") || colonCount > 1) { + throw new IllegalArgumentException("The connection string contains invalid user information. " + + "If the username or password contains a colon (:) or an at-sign (@) then it must be urlencoded"); + } + if (colonCount == 0) { + userName = urldecode(userInfo); + } else { + idx = userInfo.indexOf(":"); + if (idx == 0) { + throw new IllegalArgumentException("No username is provided in the connection string"); + } + userName = urldecode(userInfo.substring(0, idx)); + password = urldecode(userInfo.substring(idx + 1), true).toCharArray(); + } + } else if (idx == 0) { + throw new IllegalArgumentException("The connection string contains an at-sign (@) without a user name"); + } else { + hostIdentifier = userAndHostInformation; + } + + // Validate the hosts + List unresolvedHosts = unmodifiableList(parseHosts(asList(hostIdentifier.split(",")))); + if (isSrvProtocol) { + if (unresolvedHosts.size() > 1) { + throw new IllegalArgumentException("Only one host allowed when using mongodb+srv protocol"); + } + if (unresolvedHosts.get(0).contains(":")) { + throw new IllegalArgumentException("Host for when using mongodb+srv protocol can not contain a port"); + } + } + this.hosts = unresolvedHosts; + + // Process the authDB section + String nsPart; + idx = unprocessedConnectionString.indexOf("?"); + if (idx == -1) { + nsPart = unprocessedConnectionString; + unprocessedConnectionString = ""; + } else { + nsPart = unprocessedConnectionString.substring(0, idx); + unprocessedConnectionString = unprocessedConnectionString.substring(idx + 1); + } + if (nsPart.length() > 0) { + nsPart = urldecode(nsPart); + idx = nsPart.indexOf("."); + if (idx < 0) { + database = nsPart; + collection = null; + } else { + database = nsPart.substring(0, idx); + collection = nsPart.substring(idx + 1); + } + MongoNamespace.checkDatabaseNameValidity(database); + } else { + database = null; + collection = null; + } + + String txtRecordsQueryParameters = isSrvProtocol + ? new DefaultDnsResolver(dnsClient).resolveAdditionalQueryParametersFromTxtRecords(unresolvedHosts.get(0)) : ""; + String connectionStringQueryParameters = unprocessedConnectionString; + + Map> connectionStringOptionsMap = parseOptions(connectionStringQueryParameters); + Map> txtRecordsOptionsMap = parseOptions(txtRecordsQueryParameters); + if (!ALLOWED_OPTIONS_IN_TXT_RECORD.containsAll(txtRecordsOptionsMap.keySet())) { + throw new MongoConfigurationException(format("A TXT record is only permitted to contain the keys %s, but the TXT record for " + + "'%s' contains the keys %s", ALLOWED_OPTIONS_IN_TXT_RECORD, unresolvedHosts.get(0), txtRecordsOptionsMap.keySet())); + } + Map> combinedOptionsMaps = combineOptionsMaps(txtRecordsOptionsMap, connectionStringOptionsMap); + if (isSrvProtocol && !(combinedOptionsMaps.containsKey("tls") || combinedOptionsMaps.containsKey("ssl"))) { + combinedOptionsMaps.put("tls", singletonList("true")); + } + translateOptions(combinedOptionsMaps); + + if (!isSrvProtocol && srvMaxHosts != null) { + throw new IllegalArgumentException("srvMaxHosts can only be specified with mongodb+srv protocol"); + } + + if (!isSrvProtocol && srvServiceName != null) { + throw new IllegalArgumentException("srvServiceName can only be specified with mongodb+srv protocol"); + } + + if (directConnection != null && directConnection) { + if (isSrvProtocol) { + throw new IllegalArgumentException("Direct connections are not supported when using mongodb+srv protocol"); + } else if (hosts.size() > 1) { + throw new IllegalArgumentException("Direct connections are not supported when using multiple hosts"); + } + } + + if (loadBalanced != null && loadBalanced) { + if (directConnection != null && directConnection) { + throw new IllegalArgumentException("directConnection=true can not be specified with loadBalanced=true"); + } + if (requiredReplicaSetName != null) { + throw new IllegalArgumentException("replicaSet can not be specified with loadBalanced=true"); + } + if (hosts.size() > 1) { + throw new IllegalArgumentException("Only one host can be specified with loadBalanced=true"); + } + if (srvMaxHosts != null && srvMaxHosts > 0) { + throw new IllegalArgumentException("srvMaxHosts can not be specified with loadBalanced=true"); + } + } + + if (requiredReplicaSetName != null && srvMaxHosts != null && srvMaxHosts > 0) { + throw new IllegalArgumentException("srvMaxHosts can not be specified with replica set name"); + } + + validateProxyParameters(combinedOptionsMaps); + + credential = createCredentials(combinedOptionsMaps, userName, password); + warnOnUnsupportedOptions(combinedOptionsMaps); + } + + private static final Set GENERAL_OPTIONS_KEYS = new LinkedHashSet<>(); + private static final Set AUTH_KEYS = new HashSet<>(); + private static final Set READ_PREFERENCE_KEYS = new HashSet<>(); + private static final Set WRITE_CONCERN_KEYS = new HashSet<>(); + private static final Set COMPRESSOR_KEYS = new HashSet<>(); + private static final Set ALL_KEYS = new HashSet<>(); + + static { + GENERAL_OPTIONS_KEYS.add("minpoolsize"); + GENERAL_OPTIONS_KEYS.add("maxpoolsize"); + GENERAL_OPTIONS_KEYS.add("timeoutms"); + GENERAL_OPTIONS_KEYS.add("sockettimeoutms"); + GENERAL_OPTIONS_KEYS.add("waitqueuetimeoutms"); + GENERAL_OPTIONS_KEYS.add("connecttimeoutms"); + GENERAL_OPTIONS_KEYS.add("maxidletimems"); + GENERAL_OPTIONS_KEYS.add("maxlifetimems"); + GENERAL_OPTIONS_KEYS.add("maxconnecting"); + + // Order matters here: Having tls after ssl means than the tls option will supersede the ssl option when both are set + GENERAL_OPTIONS_KEYS.add("ssl"); + GENERAL_OPTIONS_KEYS.add("tls"); + + // Order matters here: Having tlsinsecure before sslinvalidhostnameallowed and tlsallowinvalidhostnames means that those options + // will supersede this one when both are set. + GENERAL_OPTIONS_KEYS.add("tlsinsecure"); + + // Order matters here: Having tlsallowinvalidhostnames after sslinvalidhostnameallowed means than the tlsallowinvalidhostnames + // option will supersede the sslinvalidhostnameallowed option when both are set + GENERAL_OPTIONS_KEYS.add("sslinvalidhostnameallowed"); + GENERAL_OPTIONS_KEYS.add("tlsallowinvalidhostnames"); + + //Socks5 proxy settings + GENERAL_OPTIONS_KEYS.add("proxyhost"); + GENERAL_OPTIONS_KEYS.add("proxyport"); + GENERAL_OPTIONS_KEYS.add("proxyusername"); + GENERAL_OPTIONS_KEYS.add("proxypassword"); + + GENERAL_OPTIONS_KEYS.add("replicaset"); + GENERAL_OPTIONS_KEYS.add("readconcernlevel"); + + GENERAL_OPTIONS_KEYS.add("serverselectiontimeoutms"); + GENERAL_OPTIONS_KEYS.add("localthresholdms"); + GENERAL_OPTIONS_KEYS.add("heartbeatfrequencyms"); + GENERAL_OPTIONS_KEYS.add("servermonitoringmode"); + GENERAL_OPTIONS_KEYS.add("retrywrites"); + GENERAL_OPTIONS_KEYS.add("retryreads"); + + GENERAL_OPTIONS_KEYS.add("appname"); + + GENERAL_OPTIONS_KEYS.add("uuidrepresentation"); + + GENERAL_OPTIONS_KEYS.add("directconnection"); + GENERAL_OPTIONS_KEYS.add("loadbalanced"); + + GENERAL_OPTIONS_KEYS.add("srvmaxhosts"); + GENERAL_OPTIONS_KEYS.add("srvservicename"); + + COMPRESSOR_KEYS.add("compressors"); + COMPRESSOR_KEYS.add("zlibcompressionlevel"); + + READ_PREFERENCE_KEYS.add("readpreference"); + READ_PREFERENCE_KEYS.add("readpreferencetags"); + READ_PREFERENCE_KEYS.add("maxstalenessseconds"); + + WRITE_CONCERN_KEYS.add("safe"); + WRITE_CONCERN_KEYS.add("w"); + WRITE_CONCERN_KEYS.add("wtimeoutms"); + WRITE_CONCERN_KEYS.add("journal"); + + AUTH_KEYS.add("authmechanism"); + AUTH_KEYS.add("authsource"); + AUTH_KEYS.add("gssapiservicename"); + AUTH_KEYS.add("authmechanismproperties"); + + ALL_KEYS.addAll(GENERAL_OPTIONS_KEYS); + ALL_KEYS.addAll(AUTH_KEYS); + ALL_KEYS.addAll(READ_PREFERENCE_KEYS); + ALL_KEYS.addAll(WRITE_CONCERN_KEYS); + ALL_KEYS.addAll(COMPRESSOR_KEYS); + } + + // Any options contained in the connection string completely replace the corresponding options specified in TXT records, + // even for options which multiple values, e.g. readPreferenceTags + private Map> combineOptionsMaps(final Map> txtRecordsOptionsMap, + final Map> connectionStringOptionsMap) { + Map> combinedOptionsMaps = new HashMap<>(txtRecordsOptionsMap); + combinedOptionsMaps.putAll(connectionStringOptionsMap); + return combinedOptionsMaps; + } + + + private void warnOnUnsupportedOptions(final Map> optionsMap) { + if (LOGGER.isWarnEnabled()) { + optionsMap.keySet() + .stream() + .filter(k -> !ALL_KEYS.contains(k)) + .forEach(k -> LOGGER.warn(format("Connection string contains unsupported option '%s'.", k))); + } + } + + private void translateOptions(final Map> optionsMap) { + boolean tlsInsecureSet = false; + boolean tlsAllowInvalidHostnamesSet = false; + + for (final String key : GENERAL_OPTIONS_KEYS) { + String value = getLastValue(optionsMap, key); + if (value == null) { + continue; + } + switch (key) { + case "maxpoolsize": + maxConnectionPoolSize = parseInteger(value, "maxpoolsize"); + break; + case "minpoolsize": + minConnectionPoolSize = parseInteger(value, "minpoolsize"); + break; + case "maxidletimems": + maxConnectionIdleTime = parseInteger(value, "maxidletimems"); + break; + case "maxlifetimems": + maxConnectionLifeTime = parseInteger(value, "maxlifetimems"); + break; + case "maxconnecting": + maxConnecting = parseInteger(value, "maxConnecting"); + break; + case "waitqueuetimeoutms": + maxWaitTime = parseInteger(value, "waitqueuetimeoutms"); + break; + case "connecttimeoutms": + connectTimeout = parseInteger(value, "connecttimeoutms"); + break; + case "sockettimeoutms": + socketTimeout = parseInteger(value, "sockettimeoutms"); + break; + case "timeoutms": + timeout = parseLong(value, "timeoutms"); + break; + case "proxyhost": + proxyHost = value; + break; + case "proxyport": + proxyPort = parseInteger(value, "proxyPort"); + break; + case "proxyusername": + proxyUsername = value; + break; + case "proxypassword": + proxyPassword = value; + break; + case "tlsallowinvalidhostnames": + sslInvalidHostnameAllowed = parseBoolean(value, "tlsAllowInvalidHostnames"); + tlsAllowInvalidHostnamesSet = true; + break; + case "sslinvalidhostnameallowed": + sslInvalidHostnameAllowed = parseBoolean(value, "sslinvalidhostnameallowed"); + tlsAllowInvalidHostnamesSet = true; + break; + case "tlsinsecure": + sslInvalidHostnameAllowed = parseBoolean(value, "tlsinsecure"); + tlsInsecureSet = true; + break; + case "ssl": + initializeSslEnabled("ssl", value); + break; + case "tls": + initializeSslEnabled("tls", value); + break; + case "replicaset": + requiredReplicaSetName = value; + break; + case "readconcernlevel": + readConcern = new ReadConcern(ReadConcernLevel.fromString(value)); + break; + case "serverselectiontimeoutms": + serverSelectionTimeout = parseInteger(value, "serverselectiontimeoutms"); + break; + case "localthresholdms": + localThreshold = parseInteger(value, "localthresholdms"); + break; + case "heartbeatfrequencyms": + heartbeatFrequency = parseInteger(value, "heartbeatfrequencyms"); + break; + case "servermonitoringmode": + serverMonitoringMode = ServerMonitoringModeUtil.fromString(value); + break; + case "appname": + applicationName = value; + break; + case "retrywrites": + retryWrites = parseBoolean(value, "retrywrites"); + break; + case "retryreads": + retryReads = parseBoolean(value, "retryreads"); + break; + case "uuidrepresentation": + uuidRepresentation = createUuidRepresentation(value); + break; + case "directconnection": + directConnection = parseBoolean(value, "directconnection"); + break; + case "loadbalanced": + loadBalanced = parseBoolean(value, "loadbalanced"); + break; + case "srvmaxhosts": + srvMaxHosts = parseInteger(value, "srvmaxhosts"); + if (srvMaxHosts < 0) { + throw new IllegalArgumentException("srvMaxHosts must be >= 0"); + } + break; + case "srvservicename": + srvServiceName = value; + break; + default: + break; + } + } + + if (tlsInsecureSet && tlsAllowInvalidHostnamesSet) { + throw new IllegalArgumentException("tlsAllowInvalidHostnames or sslInvalidHostnameAllowed set along with tlsInsecure " + + "is not allowed"); + } + + writeConcern = createWriteConcern(optionsMap); + readPreference = createReadPreference(optionsMap); + compressorList = createCompressors(optionsMap); + } + + private void initializeSslEnabled(final String key, final String value) { + Boolean booleanValue = parseBoolean(value, key); + if (sslEnabled != null && !sslEnabled.equals(booleanValue)) { + throw new IllegalArgumentException("Conflicting tls and ssl parameter values are not allowed"); + } + sslEnabled = booleanValue; + } + + private List createCompressors(final Map> optionsMap) { + String compressors = ""; + Integer zlibCompressionLevel = null; + + for (final String key : COMPRESSOR_KEYS) { + String value = getLastValue(optionsMap, key); + if (value == null) { + continue; + } + + if (key.equals("compressors")) { + compressors = value; + } else if (key.equals("zlibcompressionlevel")) { + zlibCompressionLevel = Integer.parseInt(value); + } + } + return buildCompressors(compressors, zlibCompressionLevel); + } + + private List buildCompressors(final String compressors, @Nullable final Integer zlibCompressionLevel) { + List compressorsList = new ArrayList<>(); + + for (String cur : compressors.split(",")) { + if (cur.equals("zlib")) { + MongoCompressor zlibCompressor = MongoCompressor.createZlibCompressor(); + if (zlibCompressionLevel != null) { + zlibCompressor = zlibCompressor.withProperty(MongoCompressor.LEVEL, zlibCompressionLevel); + } + compressorsList.add(zlibCompressor); + } else if (cur.equals("snappy")) { + compressorsList.add(MongoCompressor.createSnappyCompressor()); + } else if (cur.equals("zstd")) { + compressorsList.add(MongoCompressor.createZstdCompressor()); + } else if (!cur.isEmpty()) { + throw new IllegalArgumentException("Unsupported compressor '" + cur + "'"); + } + } + + return unmodifiableList(compressorsList); + } + + @Nullable + private WriteConcern createWriteConcern(final Map> optionsMap) { + String w = null; + Integer wTimeout = null; + Boolean safe = null; + Boolean journal = null; + + for (final String key : WRITE_CONCERN_KEYS) { + String value = getLastValue(optionsMap, key); + if (value == null) { + continue; + } + + switch (key) { + case "safe": + safe = parseBoolean(value, "safe"); + break; + case "w": + w = value; + break; + case "wtimeoutms": + wTimeout = Integer.parseInt(value); + break; + case "journal": + journal = parseBoolean(value, "journal"); + break; + default: + break; + } + } + return buildWriteConcern(safe, w, wTimeout, journal); + } + + @Nullable + private ReadPreference createReadPreference(final Map> optionsMap) { + String readPreferenceType = null; + List tagSetList = new ArrayList<>(); + long maxStalenessSeconds = -1; + + for (final String key : READ_PREFERENCE_KEYS) { + String value = getLastValue(optionsMap, key); + if (value == null) { + continue; + } + + switch (key) { + case "readpreference": + readPreferenceType = value; + break; + case "maxstalenessseconds": + maxStalenessSeconds = parseInteger(value, "maxstalenessseconds"); + break; + case "readpreferencetags": + for (final String cur : optionsMap.get(key)) { + TagSet tagSet = getTags(cur.trim()); + tagSetList.add(tagSet); + } + break; + default: + break; + } + } + return buildReadPreference(readPreferenceType, tagSetList, maxStalenessSeconds); + } + + private UuidRepresentation createUuidRepresentation(final String value) { + if (value.equalsIgnoreCase("unspecified")) { + return UuidRepresentation.UNSPECIFIED; + } + if (value.equalsIgnoreCase("javaLegacy")) { + return UuidRepresentation.JAVA_LEGACY; + } + if (value.equalsIgnoreCase("csharpLegacy")) { + return UuidRepresentation.C_SHARP_LEGACY; + } + if (value.equalsIgnoreCase("pythonLegacy")) { + return UuidRepresentation.PYTHON_LEGACY; + } + if (value.equalsIgnoreCase("standard")) { + return UuidRepresentation.STANDARD; + } + throw new IllegalArgumentException("Unknown uuid representation: " + value); + } + + @Nullable + private MongoCredential createCredentials(final Map> optionsMap, @Nullable final String userName, + @Nullable final char[] password) { + AuthenticationMechanism mechanism = null; + String authSource = null; + String gssapiServiceName = null; + String authMechanismProperties = null; + + for (final String key : AUTH_KEYS) { + String value = getLastValue(optionsMap, key); + + if (value == null) { + continue; + } + + switch (key) { + case "authmechanism": + if (value.equals("MONGODB-CR")) { + if (userName == null) { + throw new IllegalArgumentException("username can not be null"); + } + LOGGER.warn("Deprecated MONGDOB-CR authentication mechanism used in connection string"); + } else { + mechanism = AuthenticationMechanism.fromMechanismName(value); + } + break; + case "authsource": + if (value.equals("")) { + throw new IllegalArgumentException("authSource can not be an empty string"); + } + authSource = value; + break; + case "gssapiservicename": + gssapiServiceName = value; + break; + case "authmechanismproperties": + authMechanismProperties = value; + break; + default: + break; + } + } + + MongoCredential credential = null; + if (mechanism != null) { + credential = createMongoCredentialWithMechanism(mechanism, userName, password, authSource, gssapiServiceName); + } else if (userName != null) { + credential = MongoCredential.createCredential(userName, + getAuthSourceOrDefault(authSource, database != null ? database : "admin"), password); + } + + if (credential != null && authMechanismProperties != null) { + for (String part : authMechanismProperties.split(",")) { + String[] mechanismPropertyKeyValue = part.split(":", 2); + if (mechanismPropertyKeyValue.length != 2) { + throw new IllegalArgumentException(format("The connection string contains invalid authentication properties. " + + "'%s' is not a key value pair", part)); + } + String key = mechanismPropertyKeyValue[0].trim().toLowerCase(); + String value = mechanismPropertyKeyValue[1].trim(); + if (MECHANISM_KEYS_DISALLOWED_IN_CONNECTION_STRING.contains(key)) { + throw new IllegalArgumentException(format("The connection string contains disallowed mechanism properties. " + + "'%s' must be set on the credential programmatically.", key)); + } + + if (key.equals("canonicalize_host_name")) { + credential = credential.withMechanismProperty(key, Boolean.valueOf(value)); + } else { + credential = credential.withMechanismProperty(key, value); + } + } + } + return credential; + } + + private MongoCredential createMongoCredentialWithMechanism(final AuthenticationMechanism mechanism, final String userName, + @Nullable final char[] password, + @Nullable final String authSource, + @Nullable final String gssapiServiceName) { + MongoCredential credential; + String mechanismAuthSource; + switch (mechanism) { + case PLAIN: + mechanismAuthSource = getAuthSourceOrDefault(authSource, database != null ? database : "$external"); + break; + case GSSAPI: + case MONGODB_X509: + mechanismAuthSource = getAuthSourceOrDefault(authSource, "$external"); + if (!mechanismAuthSource.equals("$external")) { + throw new IllegalArgumentException(format("Invalid authSource for %s, it must be '$external'", mechanism)); + } + break; + default: + mechanismAuthSource = getAuthSourceOrDefault(authSource, database != null ? database : "admin"); + } + + switch (mechanism) { + case GSSAPI: + credential = MongoCredential.createGSSAPICredential(userName); + if (gssapiServiceName != null) { + credential = credential.withMechanismProperty("SERVICE_NAME", gssapiServiceName); + } + if (password != null && LOGGER.isWarnEnabled()) { + LOGGER.warn("Password in connection string not used with MONGODB_X509 authentication mechanism."); + } + break; + case PLAIN: + credential = MongoCredential.createPlainCredential(userName, mechanismAuthSource, password); + break; + case MONGODB_X509: + if (password != null) { + throw new IllegalArgumentException("Invalid mechanism, MONGODB_x509 does not support passwords"); + } + credential = MongoCredential.createMongoX509Credential(userName); + break; + case SCRAM_SHA_1: + credential = MongoCredential.createScramSha1Credential(userName, mechanismAuthSource, password); + break; + case SCRAM_SHA_256: + credential = MongoCredential.createScramSha256Credential(userName, mechanismAuthSource, password); + break; + case MONGODB_AWS: + credential = MongoCredential.createAwsCredential(userName, password); + break; + case MONGODB_OIDC: + validateCreateOidcCredential(password); + credential = MongoCredential.createOidcCredential(userName); + break; + default: + throw new UnsupportedOperationException(format("The connection string contains an invalid authentication mechanism'. " + + "'%s' is not a supported authentication mechanism", + mechanism)); + } + return credential; + } + + private String getAuthSourceOrDefault(@Nullable final String authSource, final String defaultAuthSource) { + if (authSource != null) { + return authSource; + } else { + return defaultAuthSource; + } + } + + @Nullable + private String getLastValue(final Map> optionsMap, final String key) { + List valueList = optionsMap.get(key); + if (valueList == null) { + return null; + } + return valueList.get(valueList.size() - 1); + } + + private Map> parseOptions(final String optionsPart) { + Map> optionsMap = new HashMap<>(); + if (optionsPart.isEmpty()) { + return optionsMap; + } + + for (final String part : optionsPart.split("&|;")) { + if (part.isEmpty()) { + continue; + } + int idx = part.indexOf("="); + if (idx >= 0) { + String key = part.substring(0, idx).toLowerCase(); + String value = part.substring(idx + 1); + List valueList = optionsMap.get(key); + if (valueList == null) { + valueList = new ArrayList<>(1); + } + valueList.add(urldecode(value)); + optionsMap.put(key, valueList); + } else { + throw new IllegalArgumentException(format("The connection string contains an invalid option '%s'. " + + "'%s' is missing the value delimiter eg '%s=value'", optionsPart, part, part)); + } + } + + // handle legacy wtimeout settings + if (optionsMap.containsKey("wtimeout") && !optionsMap.containsKey("wtimeoutms")) { + optionsMap.put("wtimeoutms", optionsMap.remove("wtimeout")); + if (LOGGER.isWarnEnabled()) { + LOGGER.warn("Uri option 'wtimeout' has been deprecated, use 'wtimeoutms' instead."); + } + } + // handle legacy j settings + if (optionsMap.containsKey("j") && !optionsMap.containsKey("journal")) { + optionsMap.put("journal", optionsMap.remove("j")); + if (LOGGER.isWarnEnabled()) { + LOGGER.warn("Uri option 'j' has been deprecated, use 'journal' instead."); + } + } + + return optionsMap; + } + + @Nullable + private ReadPreference buildReadPreference(@Nullable final String readPreferenceType, + final List tagSetList, final long maxStalenessSeconds) { + if (readPreferenceType != null) { + if (tagSetList.isEmpty() && maxStalenessSeconds == -1) { + return ReadPreference.valueOf(readPreferenceType); + } else if (maxStalenessSeconds == -1) { + return ReadPreference.valueOf(readPreferenceType, tagSetList); + } else { + return ReadPreference.valueOf(readPreferenceType, tagSetList, maxStalenessSeconds, TimeUnit.SECONDS); + } + } else if (!(tagSetList.isEmpty() && maxStalenessSeconds == -1)) { + throw new IllegalArgumentException("Read preference mode must be specified if " + + "either read preference tags or max staleness is specified"); + } + return null; + } + + @Nullable + private WriteConcern buildWriteConcern(@Nullable final Boolean safe, @Nullable final String w, + @Nullable final Integer wTimeout, + @Nullable final Boolean journal) { + WriteConcern retVal = null; + if (w != null || wTimeout != null || journal != null) { + if (w == null) { + retVal = WriteConcern.ACKNOWLEDGED; + } else { + try { + retVal = new WriteConcern(Integer.parseInt(w)); + } catch (NumberFormatException e) { + retVal = new WriteConcern(w); + } + } + if (wTimeout != null) { + retVal = retVal.withWTimeout(wTimeout, TimeUnit.MILLISECONDS); + } + if (journal != null) { + retVal = retVal.withJournal(journal); + } + return retVal; + } else if (safe != null) { + if (safe) { + retVal = WriteConcern.ACKNOWLEDGED; + } else { + retVal = WriteConcern.UNACKNOWLEDGED; + } + } + return retVal; + } + + private TagSet getTags(final String tagSetString) { + List tagList = new ArrayList<>(); + if (tagSetString.length() > 0) { + for (final String tag : tagSetString.split(",")) { + String[] tagKeyValuePair = tag.split(":"); + if (tagKeyValuePair.length != 2) { + throw new IllegalArgumentException(format("The connection string contains an invalid read preference tag. " + + "'%s' is not a key value pair", tagSetString)); + } + tagList.add(new Tag(tagKeyValuePair[0].trim(), tagKeyValuePair[1].trim())); + } + } + return new TagSet(tagList); + } + + private static final Set TRUE_VALUES = new HashSet<>(asList("true", "yes", "1")); + private static final Set FALSE_VALUES = new HashSet<>(asList("false", "no", "0")); + + @Nullable + private Boolean parseBoolean(final String input, final String key) { + String trimmedInput = input.trim().toLowerCase(); + + if (TRUE_VALUES.contains(trimmedInput)) { + if (!trimmedInput.equals("true")) { + LOGGER.warn(format("Deprecated boolean value '%s' in the connection string for '%s'. Replace with 'true'", + trimmedInput, key)); + } + return true; + } else if (FALSE_VALUES.contains(trimmedInput)) { + if (!trimmedInput.equals("false")) { + LOGGER.warn(format("Deprecated boolean value '%s' in the connection string for '%s'. Replace with'false'", + trimmedInput, key)); + } + return false; + } else { + LOGGER.warn(format("Ignoring unrecognized boolean value '%s' in the connection string for '%s'. " + + "Replace with either 'true' or 'false'", trimmedInput, key)); + return null; + } + } + + private int parseInteger(final String input, final String key) { + try { + return Integer.parseInt(input); + } catch (NumberFormatException e) { + throw new IllegalArgumentException(format("The connection string contains an invalid value for '%s'. " + + "'%s' is not a valid integer", key, input)); + } + } + + private long parseLong(final String input, final String key) { + try { + return Long.parseLong(input); + } catch (NumberFormatException e) { + throw new IllegalArgumentException(format("The connection string contains an invalid value for '%s'. " + + "'%s' is not a valid long", key, input)); + } + } + + private List parseHosts(final List rawHosts) { + if (rawHosts.size() == 0){ + throw new IllegalArgumentException("The connection string must contain at least one host"); + } + List hosts = new ArrayList<>(); + for (String host : rawHosts) { + if (host.length() == 0) { + throw new IllegalArgumentException(format("The connection string contains an empty host '%s'. ", rawHosts)); + } else if (host.endsWith(".sock")) { + host = urldecode(host); + } else if (host.startsWith("[")) { + if (!host.contains("]")) { + throw new IllegalArgumentException(format("The connection string contains an invalid host '%s'. " + + "IPv6 address literals must be enclosed in '[' and ']' according to RFC 2732", host)); + } + int idx = host.indexOf("]:"); + if (idx != -1) { + validatePort(host.substring(idx + 2)); + } + } else { + int colonCount = countOccurrences(host, ":"); + if (colonCount > 1) { + throw new IllegalArgumentException(format("The connection string contains an invalid host '%s'. " + + "Reserved characters such as ':' must be escaped according RFC 2396. " + + "Any IPv6 address literal must be enclosed in '[' and ']' according to RFC 2732.", host)); + } else if (colonCount == 1) { + validatePort(host.substring(host.indexOf(":") + 1)); + } + } + hosts.add(host); + } + Collections.sort(hosts); + return hosts; + } + + private void validatePort(final String port) { + try { + int portInt = Integer.parseInt(port); + if (portInt <= 0 || portInt > 65535) { + throw new IllegalArgumentException("The connection string contains an invalid host and port. " + + "The port must be an integer between 0 and 65535."); + } + } catch (NumberFormatException e) { + throw new IllegalArgumentException("The connection string contains an invalid host and port. " + + "The port contains non-digit characters, it must be an integer between 0 and 65535. " + + "Hint: username and password must be escaped according to RFC 3986."); + } + } + + private void validateProxyParameters(final Map> optionsMap) { + if (proxyHost == null) { + if (proxyPort != null) { + throw new IllegalArgumentException("proxyPort can only be specified with proxyHost"); + } else if (proxyUsername != null) { + throw new IllegalArgumentException("proxyUsername can only be specified with proxyHost"); + } else if (proxyPassword != null) { + throw new IllegalArgumentException("proxyPassword can only be specified with proxyHost"); + } + } + if (proxyPort != null && (proxyPort < 0 || proxyPort > 65535)) { + throw new IllegalArgumentException("proxyPort should be within the valid range (0 to 65535)"); + } + if (proxyUsername != null) { + if (proxyUsername.isEmpty()) { + throw new IllegalArgumentException("proxyUsername cannot be empty"); + } + if (proxyUsername.getBytes(StandardCharsets.UTF_8).length >= 255) { + throw new IllegalArgumentException("username's length in bytes cannot be greater than 255"); + } + } + if (proxyPassword != null) { + if (proxyPassword.isEmpty()) { + throw new IllegalArgumentException("proxyPassword cannot be empty"); + } + if (proxyPassword.getBytes(StandardCharsets.UTF_8).length >= 255) { + throw new IllegalArgumentException("password's length in bytes cannot be greater than 255"); + } + } + if (proxyUsername == null ^ proxyPassword == null) { + throw new IllegalArgumentException( + "Both proxyUsername and proxyPassword must be set together. They cannot be set individually"); + } + + if (containsDuplicatedOptions("proxyhost", optionsMap)) { + throw new IllegalArgumentException("Duplicated values for proxyHost: " + optionsMap.get("proxyhost")); + } + if (containsDuplicatedOptions("proxyport", optionsMap)) { + throw new IllegalArgumentException("Duplicated values for proxyPort: " + optionsMap.get("proxyport")); + } + if (containsDuplicatedOptions("proxypassword", optionsMap)) { + throw new IllegalArgumentException("Duplicated values for proxyPassword: " + optionsMap.get("proxypassword")); + } + if (containsDuplicatedOptions("proxyusername", optionsMap)) { + throw new IllegalArgumentException("Duplicated values for proxyUsername: " + optionsMap.get("proxyusername")); + } + } + + private static boolean containsDuplicatedOptions(final String optionName, final Map> optionsMap) { + return optionsMap.getOrDefault(optionName, emptyList()).size() > 1; + } + + private int countOccurrences(final String haystack, final String needle) { + return haystack.length() - haystack.replace(needle, "").length(); + } + + private String urldecode(final String input) { + return urldecode(input, false); + } + + private String urldecode(final String input, final boolean password) { + try { + return URLDecoder.decode(input, StandardCharsets.UTF_8.name()); + } catch (UnsupportedEncodingException e) { + if (password) { + throw new IllegalArgumentException("The connection string contained unsupported characters in the password."); + } else { + throw new IllegalArgumentException(format("The connection string contained unsupported characters: '%s'." + + "Decoding produced the following error: %s", input, e.getMessage())); + } + } + } + + // --------------------------------- + + /** + * Gets the username + * + * @return the username + */ + @Nullable + public String getUsername() { + return credential != null ? credential.getUserName() : null; + } + + /** + * Gets the password + * + * @return the password + */ + @Nullable + public char[] getPassword() { + return credential != null ? credential.getPassword() : null; + } + + /** + * Returns true if the connection string requires SRV protocol to resolve the host lists from the configured host. + * + * @return true if SRV protocol is required to resolve hosts. + */ + public boolean isSrvProtocol() { + return isSrvProtocol; + } + + /** + * Gets the maximum number of hosts to connect to when using SRV protocol. + * + * @return the maximum number of hosts to connect to when using SRV protocol. Defaults to null. + * @since 4.4 + */ + @Nullable + public Integer getSrvMaxHosts() { + return srvMaxHosts; + } + + /** + * Gets the SRV service name. + * + * @return the SRV service name. Defaults to null in the connection string, but defaults to {@code "mongodb"} in + * {@link ClusterSettings}. + * @since 4.5 + * @see ClusterSettings#getSrvServiceName() + */ + @Nullable + public String getSrvServiceName() { + return srvServiceName; + } + + /** + * Gets the list of hosts + * + * @return the host list + */ + public List getHosts() { + return hosts; + } + + /** + * Gets the database name + * + * @return the database name + */ + @Nullable + public String getDatabase() { + return database; + } + + /** + * Gets the collection name + * + * @return the collection name + */ + @Nullable + public String getCollection() { + return collection; + } + + /** + * Indicates if the connection should be a direct connection + * + * @return true if a direct connection + * @since 4.1 + */ + @Nullable + public Boolean isDirectConnection() { + return directConnection; + } + + /** + * Indicates if the connection is through a load balancer. + * + * @return true if a load-balanced connection + * @since 4.3 + */ + @Nullable + public Boolean isLoadBalanced() { + return loadBalanced; + } + + /** + * Get the unparsed connection string. + * + * @return the connection string + * @since 3.1 + */ + public String getConnectionString() { + return connectionString; + } + + /** + * Gets the credential or null if no credentials were specified in the connection string. + * + * @return the credentials in an immutable list + * @since 3.6 + */ + @Nullable + public MongoCredential getCredential() { + return credential; + } + + /** + * Gets the read preference specified in the connection string. + * @return the read preference + */ + @Nullable + public ReadPreference getReadPreference() { + return readPreference; + } + + /** + * Gets the read concern specified in the connection string. + * @return the read concern + */ + @Nullable + public ReadConcern getReadConcern() { + return readConcern; + } + + /** + * Gets the write concern specified in the connection string. + * @return the write concern + */ + @Nullable + public WriteConcern getWriteConcern() { + return writeConcern; + } + + /** + *

Gets whether writes should be retried if they fail due to a network error

+ * + * The name of this method differs from others in this class so as not to conflict with the now removed + * getRetryWrites() method, which returned a primitive {@code boolean} value, and didn't allow callers to differentiate + * between a false value and an unset value. + * + * @return the retryWrites value, or null if unset + * @since 3.9 + * @mongodb.server.release 3.6 + */ + @Nullable + public Boolean getRetryWritesValue() { + return retryWrites; + } + + /** + *

Gets whether reads should be retried if they fail due to a network error

+ * + * @return the retryWrites value + * @since 3.11 + * @mongodb.server.release 3.6 + */ + @Nullable + public Boolean getRetryReads() { + return retryReads; + } + + /** + * Gets the minimum connection pool size specified in the connection string. + * @return the minimum connection pool size + */ + @Nullable + public Integer getMinConnectionPoolSize() { + return minConnectionPoolSize; + } + + /** + * Gets the maximum connection pool size specified in the connection string. + * @return the maximum connection pool size + * @see ConnectionPoolSettings#getMaxSize() + */ + @Nullable + public Integer getMaxConnectionPoolSize() { + return maxConnectionPoolSize; + } + + /** + * The maximum duration to wait until either: + *
    + *
  • + * an {@linkplain ConnectionCheckedOutEvent in-use connection} becomes {@linkplain ConnectionCheckedInEvent available}; or + *
  • + *
  • + * a {@linkplain ConnectionCreatedEvent connection is created} and begins to be {@linkplain ConnectionReadyEvent established}. + * The time between {@linkplain ConnectionCheckOutStartedEvent requesting} a connection + * and it being created is limited by this maximum duration. + * The maximum time between it being created and {@linkplain ConnectionCheckedOutEvent successfully checked out}, + * which includes the time to {@linkplain ConnectionReadyEvent establish} the created connection, + * is affected by {@link SocketSettings#getConnectTimeout(TimeUnit)}, {@link SocketSettings#getReadTimeout(TimeUnit)} + * among others, and is not affected by this maximum duration. + *
  • + *
+ * The reasons it is not always possible to create and start establishing a connection + * whenever there is no available connection: + *
    + *
  • + * the number of connections per pool is limited by {@link #getMaxConnectionPoolSize()}; + *
  • + *
  • + * the number of connections a pool may be establishing concurrently is limited by {@link #getMaxConnecting()}. + *
  • + *
+ * + * @return The value of the {@code waitQueueTimeoutMS} option, if specified. + * @see ConnectionPoolSettings#getMaxWaitTime(TimeUnit) + */ + @Nullable + public Integer getMaxWaitTime() { + return maxWaitTime; + } + + /** + * Gets the maximum connection idle time specified in the connection string. + * @return the maximum connection idle time + */ + @Nullable + public Integer getMaxConnectionIdleTime() { + return maxConnectionIdleTime; + } + + /** + * Gets the maximum connection lifetime specified in the connection string. + * + * @return the maximum connection lifetime + */ + @Nullable + public Integer getMaxConnectionLifeTime() { + return maxConnectionLifeTime; + } + + /** + * Gets the maximum number of connections a pool may be establishing concurrently specified in the connection string. + * @return The maximum number of connections a pool may be establishing concurrently + * if the {@code maxConnecting} option is specified in the connection string, or {@code null} otherwise. + * @see ConnectionPoolSettings#getMaxConnecting() + * @since 4.4 + */ + @Nullable + public Integer getMaxConnecting() { + return maxConnecting; + } + + /** + * The time limit for the full execution of an operation in milliseconds. + * + *

If set the following deprecated options will be ignored: + * {@code waitQueueTimeoutMS}, {@code socketTimeoutMS}, {@code wTimeoutMS}, {@code maxTimeMS} and {@code maxCommitTimeMS}

+ * + *
    + *
  • {@code null} means that the timeout mechanism for operations will defer to using: + *
      + *
    • {@code waitQueueTimeoutMS}: The maximum wait time in milliseconds that a thread may wait for a connection to become + * available
    • + *
    • {@code socketTimeoutMS}: How long a send or receive on a socket can take before timing out.
    • + *
    • {@code wTimeoutMS}: How long the server will wait for the write concern to be fulfilled before timing out.
    • + *
    • {@code maxTimeMS}: The cumulative time limit for processing operations on a cursor. + * See: cursor.maxTimeMS.
    • + *
    • {@code maxCommitTimeMS}: The maximum amount of time to allow a single {@code commitTransaction} command to execute. + * See: {@link TransactionOptions#getMaxCommitTime}.
    • + *
    + *
  • + *
  • {@code 0} means infinite timeout.
  • + *
  • {@code > 0} The time limit to use for the full execution of an operation.
  • + *
+ * + * @return the time limit for the full execution of an operation in milliseconds or null. + * @since 5.2 + */ + @Alpha(Reason.CLIENT) + @Nullable + public Long getTimeout() { + return timeout; + } + + /** + * Gets the socket connect timeout specified in the connection string. + * @return the socket connect timeout + */ + @Nullable + public Integer getConnectTimeout() { + return connectTimeout; + } + + /** + * Gets the socket timeout specified in the connection string. + * @return the socket timeout + */ + @Nullable + public Integer getSocketTimeout() { + return socketTimeout; + } + + /** + * Gets the SSL enabled value specified in the connection string. + * @return the SSL enabled value + */ + @Nullable + public Boolean getSslEnabled() { + return sslEnabled; + } + + /** + * Gets the SOCKS5 proxy host specified in the connection string. + * + * @return the proxy host value. + * @since 4.11 + */ + @Nullable + public String getProxyHost() { + return proxyHost; + } + + /** + * Gets the SOCKS5 proxy port specified in the connection string. + * + * @return the proxy port value. + * @since 4.11 + */ + @Nullable + public Integer getProxyPort() { + return proxyPort; + } + + /** + * Gets the SOCKS5 proxy username specified in the connection string. + * + * @return the proxy username value. + * @since 4.11 + */ + @Nullable + public String getProxyUsername() { + return proxyUsername; + } + + /** + * Gets the SOCKS5 proxy password specified in the connection string. + * + * @return the proxy password value. + * @since 4.11 + */ + @Nullable + public String getProxyPassword() { + return proxyPassword; + } + /** + * Gets the SSL invalidHostnameAllowed value specified in the connection string. + * + * @return the SSL invalidHostnameAllowed value + * @since 3.3 + */ + @Nullable + public Boolean getSslInvalidHostnameAllowed() { + return sslInvalidHostnameAllowed; + } + + /** + * Gets the required replica set name specified in the connection string. + * @return the required replica set name + */ + @Nullable + public String getRequiredReplicaSetName() { + return requiredReplicaSetName; + } + + /** + * + * @return the server selection timeout (in milliseconds), or null if unset + * @since 3.3 + */ + @Nullable + public Integer getServerSelectionTimeout() { + return serverSelectionTimeout; + } + + /** + * + * @return the local threshold (in milliseconds), or null if unset + * since 3.3 + */ + @Nullable + public Integer getLocalThreshold() { + return localThreshold; + } + + /** + * + * @return the heartbeat frequency (in milliseconds), or null if unset + * since 3.3 + */ + @Nullable + public Integer getHeartbeatFrequency() { + return heartbeatFrequency; + } + + /** + * The server monitoring mode, which defines the monitoring protocol to use. + *

+ * Default is {@link ServerMonitoringMode#AUTO}.

+ * + * @return The {@link ServerMonitoringMode}, or {@code null} if unset and the default is to be used. + * @see ServerSettings#getServerMonitoringMode() + * @since 5.1 + */ + @Nullable + public ServerMonitoringMode getServerMonitoringMode() { + return serverMonitoringMode; + } + + /** + * Gets the logical name of the application. The application name may be used by the client to identify the application to the server, + * for use in server logs, slow query logs, and profile collection. + * + *

Default is null.

+ * + * @return the application name, which may be null + * @since 3.4 + * @mongodb.server.release 3.4 + */ + @Nullable + public String getApplicationName() { + return applicationName; + } + + /** + * Gets the list of compressors. + * + * @return the non-null list of compressors + * @since 3.6 + */ + public List getCompressorList() { + return compressorList; + } + + /** + * Gets the UUID representation. + * + *

Default is null.

+ * + * @return the UUID representation, which may be null if it was unspecified + * @since 3.12 + */ + @Nullable + public UuidRepresentation getUuidRepresentation() { + return uuidRepresentation; + } + + @Override + public String toString() { + return connectionString; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + ConnectionString that = (ConnectionString) o; + return isSrvProtocol == that.isSrvProtocol + && Objects.equals(directConnection, that.directConnection) + && Objects.equals(credential, that.credential) + && Objects.equals(hosts, that.hosts) + && Objects.equals(database, that.database) + && Objects.equals(collection, that.collection) + && Objects.equals(readPreference, that.readPreference) + && Objects.equals(writeConcern, that.writeConcern) + && Objects.equals(retryWrites, that.retryWrites) + && Objects.equals(retryReads, that.retryReads) + && Objects.equals(readConcern, that.readConcern) + && Objects.equals(minConnectionPoolSize, that.minConnectionPoolSize) + && Objects.equals(maxConnectionPoolSize, that.maxConnectionPoolSize) + && Objects.equals(maxWaitTime, that.maxWaitTime) + && Objects.equals(maxConnectionIdleTime, that.maxConnectionIdleTime) + && Objects.equals(maxConnectionLifeTime, that.maxConnectionLifeTime) + && Objects.equals(maxConnecting, that.maxConnecting) + && Objects.equals(connectTimeout, that.connectTimeout) + && Objects.equals(timeout, that.timeout) + && Objects.equals(socketTimeout, that.socketTimeout) + && Objects.equals(proxyHost, that.proxyHost) + && Objects.equals(proxyPort, that.proxyPort) + && Objects.equals(proxyUsername, that.proxyUsername) + && Objects.equals(proxyPassword, that.proxyPassword) + && Objects.equals(sslEnabled, that.sslEnabled) + && Objects.equals(sslInvalidHostnameAllowed, that.sslInvalidHostnameAllowed) + && Objects.equals(requiredReplicaSetName, that.requiredReplicaSetName) + && Objects.equals(serverSelectionTimeout, that.serverSelectionTimeout) + && Objects.equals(localThreshold, that.localThreshold) + && Objects.equals(heartbeatFrequency, that.heartbeatFrequency) + && Objects.equals(serverMonitoringMode, that.serverMonitoringMode) + && Objects.equals(applicationName, that.applicationName) + && Objects.equals(compressorList, that.compressorList) + && Objects.equals(uuidRepresentation, that.uuidRepresentation) + && Objects.equals(srvServiceName, that.srvServiceName) + && Objects.equals(srvMaxHosts, that.srvMaxHosts); + } + + @Override + public int hashCode() { + return Objects.hash(credential, isSrvProtocol, hosts, database, collection, directConnection, readPreference, + writeConcern, retryWrites, retryReads, readConcern, minConnectionPoolSize, maxConnectionPoolSize, maxWaitTime, + maxConnectionIdleTime, maxConnectionLifeTime, maxConnecting, connectTimeout, timeout, socketTimeout, sslEnabled, + sslInvalidHostnameAllowed, requiredReplicaSetName, serverSelectionTimeout, localThreshold, heartbeatFrequency, + serverMonitoringMode, applicationName, compressorList, uuidRepresentation, srvServiceName, srvMaxHosts, proxyHost, + proxyPort, proxyUsername, proxyPassword); + } +} diff --git a/driver-core/src/main/com/mongodb/ContextProvider.java b/driver-core/src/main/com/mongodb/ContextProvider.java new file mode 100644 index 00000000000..40914fabb57 --- /dev/null +++ b/driver-core/src/main/com/mongodb/ContextProvider.java @@ -0,0 +1,30 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb; + +import com.mongodb.annotations.ThreadSafe; + +/** + * A marker interface for providers of {@code RequestContext}. Sub-interfaces in higher-level modules define methods that actually + * return instances of {@code RequestContext}, depending on whether the client is synchronous or reactive. + * @see RequestContext + * @see MongoClientSettings#getContextProvider() + * @since 4.4 + */ +@ThreadSafe +public interface ContextProvider { +} diff --git a/driver-core/src/main/com/mongodb/CreateIndexCommitQuorum.java b/driver-core/src/main/com/mongodb/CreateIndexCommitQuorum.java new file mode 100644 index 00000000000..cba79536323 --- /dev/null +++ b/driver-core/src/main/com/mongodb/CreateIndexCommitQuorum.java @@ -0,0 +1,163 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb; + +import org.bson.BsonInt32; +import org.bson.BsonString; +import org.bson.BsonValue; + +import static com.mongodb.assertions.Assertions.notNull; + +/** + * A commit quorum specifies how many data-bearing members of a replica set, including the primary, must + * complete the index builds successfully before the primary marks the indexes as ready. + * + * @mongodb.driver.manual reference/command/createIndexes/ Create indexes + * @mongodb.server.release 4.4 + * @since 4.1 + */ +public abstract class CreateIndexCommitQuorum { + + /** + * A create index commit quorum of majority. + */ + public static final CreateIndexCommitQuorum MAJORITY = new CreateIndexCommitQuorumWithMode("majority"); + + /** + * A create index commit quorum of voting members. + */ + public static final CreateIndexCommitQuorum VOTING_MEMBERS = new CreateIndexCommitQuorumWithMode("votingMembers"); + + /** + * Create a create index commit quorum with a mode value. + * + * @param mode the mode value + * @return a create index commit quorum of the specified mode + */ + public static CreateIndexCommitQuorum create(final String mode) { + return new CreateIndexCommitQuorumWithMode(mode); + } + + /** + * Create a create index commit quorum with a w value. + * + * @param w the w value + * @return a create index commit quorum with the specified w value + */ + public static CreateIndexCommitQuorum create(final int w) { + return new CreateIndexCommitQuorumWithW(w); + } + + /** + * Converts the create index commit quorum to a Bson value. + * + * @return the BsonValue that represents the create index commit quorum + */ + public abstract BsonValue toBsonValue(); + + private CreateIndexCommitQuorum() { + } + + private static final class CreateIndexCommitQuorumWithMode extends CreateIndexCommitQuorum { + private final String mode; + + private CreateIndexCommitQuorumWithMode(final String mode) { + notNull("mode", mode); + this.mode = mode; + } + + public String getMode() { + return mode; + } + + @Override + public BsonValue toBsonValue() { + return new BsonString(mode); + } + + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + CreateIndexCommitQuorumWithMode that = (CreateIndexCommitQuorumWithMode) o; + return mode.equals(that.mode); + } + + @Override + public int hashCode() { + return mode.hashCode(); + } + + @Override + public String toString() { + return "CreateIndexCommitQuorum{" + + "mode=" + mode + + '}'; + } + } + + private static final class CreateIndexCommitQuorumWithW extends CreateIndexCommitQuorum { + private final int w; + + private CreateIndexCommitQuorumWithW(final int w) { + if (w < 0) { + throw new IllegalArgumentException("w cannot be less than zero"); + } + this.w = w; + } + + public int getW() { + return w; + } + + @Override + public BsonValue toBsonValue() { + return new BsonInt32(w); + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + CreateIndexCommitQuorumWithW that = (CreateIndexCommitQuorumWithW) o; + return w == that.w; + } + + @Override + public int hashCode() { + return w; + } + + @Override + public String toString() { + return "CreateIndexCommitQuorum{" + + "w=" + w + + '}'; + } + } +} diff --git a/driver-core/src/main/com/mongodb/CursorType.java b/driver-core/src/main/com/mongodb/CursorType.java new file mode 100644 index 00000000000..8511a77f73e --- /dev/null +++ b/driver-core/src/main/com/mongodb/CursorType.java @@ -0,0 +1,65 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb; + +/** + * An enumeration of cursor types. + * + * @since 3.0 + * @mongodb.driver.manual ../meta-driver/latest/legacy/mongodb-wire-protocol/#op-query OP_QUERY + */ +public enum CursorType { + /** + * A non-tailable cursor. This is sufficient for a vast majority of uses. + */ + NonTailable { + @Override + public boolean isTailable() { + return false; + } + }, + + /** + * Tailable means the cursor is not closed when the last data is retrieved. Rather, the cursor marks the final object's position. You + * can resume using the cursor later, from where it was located, if more data were received. Like any "latent cursor", + * the cursor may become invalid at some point - for example if the final object it references were deleted. + */ + Tailable { + @Override + public boolean isTailable() { + return true; + } + }, + + /** + * A tailable cursor with a built-in server sleep before returning an empty batch. In most cases this is preferred type of tailable + * cursor, as it is less resource intensive. + */ + TailableAwait { + @Override + public boolean isTailable() { + return true; + } + }; + + /** + * True if the cursor type is tailable. + * + * @return true if the cursor type is tailable + */ + public abstract boolean isTailable(); +} diff --git a/driver-core/src/main/com/mongodb/DBObject.java b/driver-core/src/main/com/mongodb/DBObject.java new file mode 100644 index 00000000000..0986e9464d7 --- /dev/null +++ b/driver-core/src/main/com/mongodb/DBObject.java @@ -0,0 +1,38 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb; + +import org.bson.BSONObject; + +/** + * This interface adds some specific behaviour to {@link org.bson.BSONObject} for MongoDB documents. + * + * @mongodb.driver.manual core/document/ Documents + */ +public interface DBObject extends BSONObject { + /** + * If this object was retrieved with only some fields (using a field filter) this method will be called to mark it as such. + */ + void markAsPartialObject(); + + /** + * Whether {@link #markAsPartialObject} was ever called only matters if you are going to upsert and do not want to risk losing fields. + * + * @return true if this has been marked as a partial object + */ + boolean isPartialObject(); +} diff --git a/driver-core/src/main/com/mongodb/DBObjectCodec.java b/driver-core/src/main/com/mongodb/DBObjectCodec.java new file mode 100644 index 00000000000..262971a27a9 --- /dev/null +++ b/driver-core/src/main/com/mongodb/DBObjectCodec.java @@ -0,0 +1,421 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb; + +import com.mongodb.lang.Nullable; +import org.bson.BSONObject; +import org.bson.BsonBinary; +import org.bson.BsonBinarySubType; +import org.bson.BsonDbPointer; +import org.bson.BsonDocument; +import org.bson.BsonDocumentWriter; +import org.bson.BsonReader; +import org.bson.BsonType; +import org.bson.BsonValue; +import org.bson.BsonWriter; +import org.bson.UuidRepresentation; +import org.bson.codecs.BsonTypeClassMap; +import org.bson.codecs.BsonTypeCodecMap; +import org.bson.codecs.BsonValueCodecProvider; +import org.bson.codecs.Codec; +import org.bson.codecs.CollectibleCodec; +import org.bson.codecs.DecoderContext; +import org.bson.codecs.EncoderContext; +import org.bson.codecs.IdGenerator; +import org.bson.codecs.ObjectIdGenerator; +import org.bson.codecs.OverridableUuidRepresentationCodec; +import org.bson.codecs.ValueCodecProvider; +import org.bson.codecs.configuration.CodecRegistry; +import org.bson.types.BSONTimestamp; +import org.bson.types.Binary; +import org.bson.types.CodeWScope; +import org.bson.types.Symbol; + +import java.lang.reflect.Array; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.UUID; +import java.util.regex.Pattern; + +import static com.mongodb.assertions.Assertions.notNull; +import static java.util.Arrays.asList; +import static org.bson.BsonBinarySubType.BINARY; +import static org.bson.BsonBinarySubType.OLD_BINARY; +import static org.bson.codecs.configuration.CodecRegistries.fromProviders; + +/** + * A collectible codec for a DBObject. + * + * @since 3.0 + */ +@SuppressWarnings({"rawtypes"}) +public class DBObjectCodec implements CollectibleCodec, OverridableUuidRepresentationCodec { + private static final BsonTypeClassMap DEFAULT_BSON_TYPE_CLASS_MAP = createDefaultBsonTypeClassMap(); + private static final CodecRegistry DEFAULT_REGISTRY = + fromProviders(asList(new ValueCodecProvider(), new BsonValueCodecProvider(), new DBObjectCodecProvider())); + + private static final String ID_FIELD_NAME = "_id"; + + private final CodecRegistry codecRegistry; + private final BsonTypeCodecMap bsonTypeCodecMap; + private final DBObjectFactory objectFactory; + private final IdGenerator idGenerator = new ObjectIdGenerator(); + private final UuidRepresentation uuidRepresentation; + + private static BsonTypeClassMap createDefaultBsonTypeClassMap() { + Map> replacements = new HashMap<>(); + replacements.put(BsonType.REGULAR_EXPRESSION, Pattern.class); + replacements.put(BsonType.SYMBOL, String.class); + replacements.put(BsonType.TIMESTAMP, BSONTimestamp.class); + replacements.put(BsonType.JAVASCRIPT_WITH_SCOPE, null); + replacements.put(BsonType.DOCUMENT, null); + + return new BsonTypeClassMap(replacements); + } + + static BsonTypeClassMap getDefaultBsonTypeClassMap() { + return DEFAULT_BSON_TYPE_CLASS_MAP; + } + + static CodecRegistry getDefaultRegistry() { + return DEFAULT_REGISTRY; + } + + /** + * Construct an instance with the default codec registry + * + * @since 3.7 + */ + public DBObjectCodec() { + this(DEFAULT_REGISTRY); + } + + /** + * Construct an instance with the given codec registry. + * + * @param codecRegistry the non-null codec registry + */ + public DBObjectCodec(final CodecRegistry codecRegistry) { + this(codecRegistry, DEFAULT_BSON_TYPE_CLASS_MAP); + } + + /** + * Construct an instance. + * + * @param codecRegistry the codec registry + * @param bsonTypeClassMap the non-null BsonTypeClassMap + */ + public DBObjectCodec(final CodecRegistry codecRegistry, final BsonTypeClassMap bsonTypeClassMap) { + this(codecRegistry, bsonTypeClassMap, new BasicDBObjectFactory()); + } + + /** + * Construct an instance. + * + * @param codecRegistry the non-null codec registry + * @param bsonTypeClassMap the non-null BsonTypeClassMap + * @param objectFactory the non-null object factory used to create empty DBObject instances when decoding + */ + public DBObjectCodec(final CodecRegistry codecRegistry, final BsonTypeClassMap bsonTypeClassMap, final DBObjectFactory objectFactory) { + this(codecRegistry, new BsonTypeCodecMap(notNull("bsonTypeClassMap", bsonTypeClassMap), codecRegistry), objectFactory, + UuidRepresentation.UNSPECIFIED); + } + + private DBObjectCodec(final CodecRegistry codecRegistry, final BsonTypeCodecMap bsonTypeCodecMap, final DBObjectFactory objectFactory, + final UuidRepresentation uuidRepresentation) { + this.objectFactory = notNull("objectFactory", objectFactory); + this.codecRegistry = notNull("codecRegistry", codecRegistry); + this.uuidRepresentation = notNull("uuidRepresentation", uuidRepresentation); + this.bsonTypeCodecMap = bsonTypeCodecMap; + } + + @Override + public void encode(final BsonWriter writer, final DBObject document, final EncoderContext encoderContext) { + writer.writeStartDocument(); + + beforeFields(writer, encoderContext, document); + + for (final String key : document.keySet()) { + if (skipField(encoderContext, key)) { + continue; + } + writer.writeName(key); + writeValue(writer, encoderContext, document.get(key)); + } + writer.writeEndDocument(); + } + + @Override + public DBObject decode(final BsonReader reader, final DecoderContext decoderContext) { + List path = new ArrayList<>(10); + return readDocument(reader, decoderContext, path); + } + + @Override + public Class getEncoderClass() { + return DBObject.class; + } + + @Override + public boolean documentHasId(final DBObject document) { + return document.containsField(ID_FIELD_NAME); + } + + @Override + public BsonValue getDocumentId(final DBObject document) { + if (!documentHasId(document)) { + throw new IllegalStateException("The document does not contain an _id"); + } + + Object id = document.get(ID_FIELD_NAME); + if (id instanceof BsonValue) { + return (BsonValue) id; + } + + BsonDocument idHoldingDocument = new BsonDocument(); + BsonWriter writer = new BsonDocumentWriter(idHoldingDocument); + writer.writeStartDocument(); + writer.writeName(ID_FIELD_NAME); + writeValue(writer, EncoderContext.builder().build(), id); + writer.writeEndDocument(); + return idHoldingDocument.get(ID_FIELD_NAME); + } + + @Override + public DBObject generateIdIfAbsentFromDocument(final DBObject document) { + if (!documentHasId(document)) { + document.put(ID_FIELD_NAME, idGenerator.generate()); + } + return document; + } + + @Override + public Codec withUuidRepresentation(final UuidRepresentation uuidRepresentation) { + if (this.uuidRepresentation.equals(uuidRepresentation)) { + return this; + } + return new DBObjectCodec(codecRegistry, bsonTypeCodecMap, objectFactory, uuidRepresentation); + } + + private void beforeFields(final BsonWriter bsonWriter, final EncoderContext encoderContext, final DBObject document) { + if (encoderContext.isEncodingCollectibleDocument() && document.containsField(ID_FIELD_NAME)) { + bsonWriter.writeName(ID_FIELD_NAME); + writeValue(bsonWriter, encoderContext, document.get(ID_FIELD_NAME)); + } + } + + private boolean skipField(final EncoderContext encoderContext, final String key) { + return encoderContext.isEncodingCollectibleDocument() && key.equals(ID_FIELD_NAME); + } + + @SuppressWarnings("unchecked") + private void writeValue(final BsonWriter bsonWriter, final EncoderContext encoderContext, @Nullable final Object value) { + if (value == null) { + bsonWriter.writeNull(); + } else if (value instanceof DBRef) { + encodeDBRef(bsonWriter, (DBRef) value, encoderContext); + } else if (value instanceof Map) { + encodeMap(bsonWriter, (Map) value, encoderContext); + } else if (value instanceof Iterable) { + encodeIterable(bsonWriter, (Iterable) value, encoderContext); + } else if (value instanceof BSONObject) { + encodeBsonObject(bsonWriter, (BSONObject) value, encoderContext); + } else if (value instanceof CodeWScope) { + encodeCodeWScope(bsonWriter, (CodeWScope) value, encoderContext); + } else if (value instanceof byte[]) { + encodeByteArray(bsonWriter, (byte[]) value); + } else if (value.getClass().isArray()) { + encodeArray(bsonWriter, value, encoderContext); + } else if (value instanceof Symbol) { + bsonWriter.writeSymbol(((Symbol) value).getSymbol()); + } else { + Codec codec = codecRegistry.get(value.getClass()); + encoderContext.encodeWithChildContext(codec, bsonWriter, value); + } + } + + private void encodeMap(final BsonWriter bsonWriter, final Map document, final EncoderContext encoderContext) { + bsonWriter.writeStartDocument(); + + for (final Map.Entry entry : document.entrySet()) { + bsonWriter.writeName(entry.getKey()); + writeValue(bsonWriter, encoderContext.getChildContext(), entry.getValue()); + } + bsonWriter.writeEndDocument(); + } + + private void encodeBsonObject(final BsonWriter bsonWriter, final BSONObject document, final EncoderContext encoderContext) { + bsonWriter.writeStartDocument(); + + for (String key : document.keySet()) { + bsonWriter.writeName(key); + writeValue(bsonWriter, encoderContext.getChildContext(), document.get(key)); + } + bsonWriter.writeEndDocument(); + } + + private void encodeByteArray(final BsonWriter bsonWriter, final byte[] value) { + bsonWriter.writeBinaryData(new BsonBinary(value)); + } + + private void encodeArray(final BsonWriter bsonWriter, final Object value, final EncoderContext encoderContext) { + bsonWriter.writeStartArray(); + + int size = Array.getLength(value); + for (int i = 0; i < size; i++) { + writeValue(bsonWriter, encoderContext.getChildContext(), Array.get(value, i)); + } + + bsonWriter.writeEndArray(); + } + + private void encodeDBRef(final BsonWriter bsonWriter, final DBRef dbRef, final EncoderContext encoderContext) { + bsonWriter.writeStartDocument(); + + bsonWriter.writeString("$ref", dbRef.getCollectionName()); + bsonWriter.writeName("$id"); + writeValue(bsonWriter, encoderContext.getChildContext(), dbRef.getId()); + if (dbRef.getDatabaseName() != null) { + bsonWriter.writeString("$db", dbRef.getDatabaseName()); + } + bsonWriter.writeEndDocument(); + } + + private void encodeCodeWScope(final BsonWriter bsonWriter, final CodeWScope value, final EncoderContext encoderContext) { + bsonWriter.writeJavaScriptWithScope(value.getCode()); + encodeBsonObject(bsonWriter, value.getScope(), encoderContext.getChildContext()); + } + + private void encodeIterable(final BsonWriter bsonWriter, final Iterable iterable, final EncoderContext encoderContext) { + bsonWriter.writeStartArray(); + for (final Object cur : iterable) { + writeValue(bsonWriter, encoderContext.getChildContext(), cur); + } + bsonWriter.writeEndArray(); + } + + @Nullable private Object readValue(final BsonReader reader, final DecoderContext decoderContext, @Nullable final String fieldName, + final List path) { + Object initialRetVal; + BsonType bsonType = reader.getCurrentBsonType(); + + if (bsonType.isContainer() && fieldName != null) { + //if we got into some new context like nested document or array + path.add(fieldName); + } + + switch (bsonType) { + case DOCUMENT: + initialRetVal = verifyForDBRef(readDocument(reader, decoderContext, path)); + break; + case ARRAY: + initialRetVal = readArray(reader, decoderContext, path); + break; + case JAVASCRIPT_WITH_SCOPE: //custom for driver-compat types + initialRetVal = readCodeWScope(reader, decoderContext, path); + break; + case DB_POINTER: //custom for driver-compat types + BsonDbPointer dbPointer = reader.readDBPointer(); + initialRetVal = new DBRef(dbPointer.getNamespace(), dbPointer.getId()); + break; + case BINARY: + initialRetVal = readBinary(reader, decoderContext); + break; + case NULL: + reader.readNull(); + initialRetVal = null; + break; + default: + initialRetVal = bsonTypeCodecMap.get(bsonType).decode(reader, decoderContext); + } + + if (bsonType.isContainer() && fieldName != null) { + //step out of current context to a parent + path.remove(fieldName); + } + + return initialRetVal; + } + + private Object readBinary(final BsonReader reader, final DecoderContext decoderContext) { + byte bsonBinarySubType = reader.peekBinarySubType(); + Codec codec; + + if (BsonBinarySubType.isUuid(bsonBinarySubType) && reader.peekBinarySize() == 16) { + codec = codecRegistry.get(Binary.class); + switch (bsonBinarySubType) { + case 3: + if (uuidRepresentation == UuidRepresentation.JAVA_LEGACY + || uuidRepresentation == UuidRepresentation.C_SHARP_LEGACY + || uuidRepresentation == UuidRepresentation.PYTHON_LEGACY) { + codec = codecRegistry.get(UUID.class); + } + break; + case 4: + if (uuidRepresentation == UuidRepresentation.STANDARD) { + codec = codecRegistry.get(UUID.class); + } + break; + default: + throw new UnsupportedOperationException("Unknown UUID binary subtype " + bsonBinarySubType); + } + } else if (bsonBinarySubType == BINARY.getValue() || bsonBinarySubType == OLD_BINARY.getValue()) { + codec = codecRegistry.get(byte[].class); + } else { + codec = codecRegistry.get(Binary.class); + } + return codec.decode(reader, decoderContext); + } + + private List readArray(final BsonReader reader, final DecoderContext decoderContext, final List path) { + reader.readStartArray(); + BasicDBList list = new BasicDBList(); + while (reader.readBsonType() != BsonType.END_OF_DOCUMENT) { + list.add(readValue(reader, decoderContext, null, path)); + } + reader.readEndArray(); + return list; + } + + private DBObject readDocument(final BsonReader reader, final DecoderContext decoderContext, final List path) { + DBObject document = objectFactory.getInstance(path); + + reader.readStartDocument(); + while (reader.readBsonType() != BsonType.END_OF_DOCUMENT) { + String fieldName = reader.readName(); + document.put(fieldName, readValue(reader, decoderContext, fieldName, path)); + } + + reader.readEndDocument(); + return document; + } + + private CodeWScope readCodeWScope(final BsonReader reader, final DecoderContext decoderContext, final List path) { + return new CodeWScope(reader.readJavaScriptWithScope(), readDocument(reader, decoderContext, path)); + } + + private Object verifyForDBRef(final DBObject document) { + if (document.containsField("$ref") && document.containsField("$id")) { + return new DBRef((String) document.get("$db"), (String) document.get("$ref"), document.get("$id")); + } else { + return document; + } + } +} + diff --git a/driver-core/src/main/com/mongodb/DBObjectCodecProvider.java b/driver-core/src/main/com/mongodb/DBObjectCodecProvider.java new file mode 100644 index 00000000000..417bba64e48 --- /dev/null +++ b/driver-core/src/main/com/mongodb/DBObjectCodecProvider.java @@ -0,0 +1,96 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb; + +import org.bson.codecs.BsonTypeClassMap; +import org.bson.codecs.Codec; +import org.bson.codecs.DateCodec; +import org.bson.codecs.configuration.CodecProvider; +import org.bson.codecs.configuration.CodecRegistry; +import org.bson.types.BSONTimestamp; + +import java.util.Date; +import java.util.List; + +import static com.mongodb.assertions.Assertions.notNull; + +/** + * A provider for a DBObjectCodec. + * + * @since 3.0 + */ +public class DBObjectCodecProvider implements CodecProvider { + private final BsonTypeClassMap bsonTypeClassMap; + + /** + * Construct an instance using the default {@code BsonTypeClassMap}. + * + * @see DBObjectCodec#getDefaultBsonTypeClassMap() + */ + public DBObjectCodecProvider() { + this(DBObjectCodec.getDefaultBsonTypeClassMap()); + } + + /** + * Construct an instance with the given {@code BsonTypeClassMap}. + * + * @param bsonTypeClassMap the BsonTypeClassMap + */ + public DBObjectCodecProvider(final BsonTypeClassMap bsonTypeClassMap) { + this.bsonTypeClassMap = notNull("bsonTypeClassMap", bsonTypeClassMap); + } + + @Override + @SuppressWarnings("unchecked") + public Codec get(final Class clazz, final CodecRegistry registry) { + if (clazz == BSONTimestamp.class) { + return (Codec) new BSONTimestampCodec(); + } + + if (DBObject.class.isAssignableFrom(clazz) && !List.class.isAssignableFrom(clazz)) { + return (Codec) new DBObjectCodec(registry, bsonTypeClassMap); + } + + if (Date.class.isAssignableFrom(clazz)) { + return (Codec) new DateCodec(); + } + + return null; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + return true; + } + + @Override + public int hashCode() { + return getClass().hashCode(); + } + + @Override + public String toString() { + return "DBObjectCodecProvider{}"; + } +} diff --git a/driver-core/src/main/com/mongodb/DBObjectFactory.java b/driver-core/src/main/com/mongodb/DBObjectFactory.java new file mode 100644 index 00000000000..4798eb6f040 --- /dev/null +++ b/driver-core/src/main/com/mongodb/DBObjectFactory.java @@ -0,0 +1,25 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb; + +import java.util.List; + +interface DBObjectFactory { + DBObject getInstance(); + + DBObject getInstance(List path); +} diff --git a/driver-core/src/main/com/mongodb/DBRef.java b/driver-core/src/main/com/mongodb/DBRef.java new file mode 100644 index 00000000000..2e3ca1d2db8 --- /dev/null +++ b/driver-core/src/main/com/mongodb/DBRef.java @@ -0,0 +1,143 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// DBRef.java + +package com.mongodb; + +import com.mongodb.lang.Nullable; + +import java.io.Serializable; +import java.util.Objects; + +import static com.mongodb.assertions.Assertions.notNull; + +/** + * A representation of a database reference. + * + * @mongodb.driver.manual reference/database-references/ Database References + */ +public class DBRef implements Serializable { + + private static final long serialVersionUID = -849581217713362618L; + + /** + * The id. + */ + private final Object id; + /** + * The collection name. + */ + private final String collectionName; + /** + * The database name, which may be null. + */ + private final String databaseName; + + /** + * Construct an instance. + * + * @param collectionName the name of the collection where the document is stored + * @param id the object id + */ + public DBRef(final String collectionName, final Object id) { + this(null, collectionName, id); + } + + /** + * Construct an instance. + * + * @param databaseName the name of the database where the document is stored + * @param collectionName the name of the collection where the document is stored + * @param id the object id + * @since 3.3 + */ + public DBRef(@Nullable final String databaseName, final String collectionName, final Object id) { + this.id = notNull("id", id); + this.collectionName = notNull("collectionName", collectionName); + this.databaseName = databaseName; + } + + /** + * Gets the _id of the referenced document + * + * @return the _id of the referenced document + */ + public Object getId() { + return id; + } + + /** + * Gets the name of the collection in which the referenced document is stored. + * + * @return the name of the collection in which the referenced is stored + */ + public String getCollectionName() { + return collectionName; + } + + /** + * Gets the name of the database in which the referenced document is stored. A null value implies that the referenced document is + * stored in the same database as the referring document. + * + * @return the possibly-null database name + * @since 3.3 + */ + @Nullable + public String getDatabaseName() { + return databaseName; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + DBRef dbRef = (DBRef) o; + + if (!id.equals(dbRef.id)) { + return false; + } + if (!collectionName.equals(dbRef.collectionName)) { + return false; + } + if (!Objects.equals(databaseName, dbRef.databaseName)) { + return false; + } + + return true; + } + + @Override + public int hashCode() { + int result = id.hashCode(); + result = 31 * result + collectionName.hashCode(); + result = 31 * result + (databaseName != null ? databaseName.hashCode() : 0); + return result; + } + + @Override + public String toString() { + return "{ " + + "\"$ref\" : \"" + collectionName + "\", \"$id\" : \"" + id + "\"" + + (databaseName == null ? "" : ", \"$db\" : \"" + databaseName + "\"") + + " }"; + } +} diff --git a/driver-core/src/main/com/mongodb/DBRefCodec.java b/driver-core/src/main/com/mongodb/DBRefCodec.java new file mode 100644 index 00000000000..b0091a8ff7e --- /dev/null +++ b/driver-core/src/main/com/mongodb/DBRefCodec.java @@ -0,0 +1,68 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb; + +import org.bson.BsonReader; +import org.bson.BsonWriter; +import org.bson.codecs.Codec; +import org.bson.codecs.DecoderContext; +import org.bson.codecs.EncoderContext; +import org.bson.codecs.configuration.CodecRegistry; + +import static com.mongodb.assertions.Assertions.notNull; + +/** + * A Codec for DBRef instances. + * + * @since 3.0 + */ +public class DBRefCodec implements Codec { + private final CodecRegistry registry; + + /** + * Construct an instance with the given registry, which is used to encode the id of the referenced document. + * + * @param registry the non-null codec registry + */ + public DBRefCodec(final CodecRegistry registry) { + this.registry = notNull("registry", registry); + } + + @Override + @SuppressWarnings({"unchecked", "rawtypes"}) + public void encode(final BsonWriter writer, final DBRef value, final EncoderContext encoderContext) { + writer.writeStartDocument(); + writer.writeString("$ref", value.getCollectionName()); + writer.writeName("$id"); + Codec codec = registry.get(value.getId().getClass()); + codec.encode(writer, value.getId(), encoderContext); + if (value.getDatabaseName() != null) { + writer.writeString("$db", value.getDatabaseName()); + } + writer.writeEndDocument(); + } + + @Override + public Class getEncoderClass() { + return DBRef.class; + } + + @Override + public DBRef decode(final BsonReader reader, final DecoderContext decoderContext) { + throw new UnsupportedOperationException("DBRefCodec does not support decoding"); + } +} diff --git a/driver-core/src/main/com/mongodb/DBRefCodecProvider.java b/driver-core/src/main/com/mongodb/DBRefCodecProvider.java new file mode 100644 index 00000000000..83a04fae206 --- /dev/null +++ b/driver-core/src/main/com/mongodb/DBRefCodecProvider.java @@ -0,0 +1,62 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb; + +import org.bson.codecs.Codec; +import org.bson.codecs.configuration.CodecProvider; +import org.bson.codecs.configuration.CodecRegistry; + +/** + * A codec provider for DBRef. + * + * @since 3.0 + */ +public class DBRefCodecProvider implements CodecProvider { + + @Override + @SuppressWarnings("unchecked") + public Codec get(final Class clazz, final CodecRegistry registry) { + if (clazz == DBRef.class) { + return (Codec) new DBRefCodec(registry); + } + + return null; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + + if (o == null || getClass() != o.getClass()) { + return false; + } + + return true; + } + + @Override + public int hashCode() { + return 0; + } + + @Override + public String toString() { + return "DBRefCodecProvider{}"; + } +} diff --git a/driver-core/src/main/com/mongodb/DocumentToDBRefTransformer.java b/driver-core/src/main/com/mongodb/DocumentToDBRefTransformer.java new file mode 100644 index 00000000000..d17c9a3a46f --- /dev/null +++ b/driver-core/src/main/com/mongodb/DocumentToDBRefTransformer.java @@ -0,0 +1,57 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb; + +import org.bson.Document; +import org.bson.Transformer; + +/** + * A Document to DBRef Transformer. + *

+ * Can be used with any {@link org.bson.codecs.Codec} that takes a {@link Transformer}. + * + * @since 3.5 + */ +public final class DocumentToDBRefTransformer implements Transformer { + @Override + public Object transform(final Object value) { + if (value instanceof Document) { + Document document = (Document) value; + if (document.containsKey("$id") && document.containsKey("$ref")) { + return new DBRef((String) document.get("$db"), (String) document.get("$ref"), document.get("$id")); + } + } + return value; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + return true; + } + + @Override + public int hashCode() { + return 0; + } +} diff --git a/driver-core/src/main/com/mongodb/DuplicateKeyException.java b/driver-core/src/main/com/mongodb/DuplicateKeyException.java new file mode 100644 index 00000000000..a079c430b32 --- /dev/null +++ b/driver-core/src/main/com/mongodb/DuplicateKeyException.java @@ -0,0 +1,58 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb; + +import org.bson.BsonDocument; + +/** + * The legacy {@link WriteConcernException}, thrown when trying to insert or update a document containing a duplicate key. + * + *

Only thrown when using the legacy deprecated API, which is accessed via {@code com.mongodb.MongoClient.getDB}.

+ * + *

For application using the {@code MongoCollection}-based API, duplicate key exceptions can be determined via:

+ *
    + *
  • + * Single document inserts or updates: a {@link MongoWriteException} is thrown with a {@link WriteError} in the category + * {@link ErrorCategory#DUPLICATE_KEY}. + *
  • + *
  • + * Bulk document inserts or updates: A {@link MongoBulkWriteException} is thrown where one or more of the {@link WriteError}'s in the + * list of errors is in the category {@link ErrorCategory#DUPLICATE_KEY}. + *
  • + *
+ * + * @since 2.12 + * @see MongoWriteException + * @see MongoBulkWriteException + * @see WriteError + * @see ErrorCategory#DUPLICATE_KEY + */ +public class DuplicateKeyException extends WriteConcernException { + + private static final long serialVersionUID = -4415279469780082174L; + + /** + * Construct an instance. + * + * @param response the response from the server + * @param address the server address + * @param writeConcernResult the write concern result + */ + public DuplicateKeyException(final BsonDocument response, final ServerAddress address, final WriteConcernResult writeConcernResult) { + super(response, address, writeConcernResult); + } +} diff --git a/driver-core/src/main/com/mongodb/ErrorCategory.java b/driver-core/src/main/com/mongodb/ErrorCategory.java new file mode 100644 index 00000000000..b7766a10a0a --- /dev/null +++ b/driver-core/src/main/com/mongodb/ErrorCategory.java @@ -0,0 +1,65 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb; + +import java.util.Arrays; +import java.util.List; + +/** + * A categorization of errors returned by a MongoDB server command. + * + * @since 3.0 + */ +public enum ErrorCategory { + /** + * An uncategorized error + */ + UNCATEGORIZED, + + /** + * A duplicate key error + * + * @mongodb.driver.manual core/index-unique/ Unique Indexes + */ + DUPLICATE_KEY, + + /** + * An execution timeout error + * + * @mongodb.driver.manual reference/operator/meta/maxTimeMS/ maxTimeMS + */ + EXECUTION_TIMEOUT; + + private static final List DUPLICATE_KEY_ERROR_CODES = Arrays.asList(11000, 11001, 12582); + private static final List EXECUTION_TIMEOUT_ERROR_CODES = Arrays.asList(50); + + /** + * Translate an error code into an error category + * + * @param code the error code + * @return the error category for the given code + */ + public static ErrorCategory fromErrorCode(final int code) { + if (DUPLICATE_KEY_ERROR_CODES.contains(code)) { + return DUPLICATE_KEY; + } else if (EXECUTION_TIMEOUT_ERROR_CODES.contains(code)) { + return EXECUTION_TIMEOUT; + } else { + return UNCATEGORIZED; + } + } +} diff --git a/driver-core/src/main/com/mongodb/ExplainVerbosity.java b/driver-core/src/main/com/mongodb/ExplainVerbosity.java new file mode 100644 index 00000000000..bea9ef75800 --- /dev/null +++ b/driver-core/src/main/com/mongodb/ExplainVerbosity.java @@ -0,0 +1,44 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb; + +/** + * An enumeration of the verbosity levels available for explaining query execution. + * + * @since 3.0 + * @mongodb.server.release 3.0 + */ +public enum ExplainVerbosity { + /** + * Runs the query planner and chooses the winning plan, but does not actually execute it. The use case for this verbosity level is + * "Which plan will MongoDB choose to run my query." + */ + QUERY_PLANNER, + + /** + * Runs the query optimizer, and then runs the winning plan to completion. In addition to the planner information, this makes execution + * stats available. The use case for this verbosity level is "Is my query performing well." + */ + EXECUTION_STATS, + + /** + * Runs the query optimizer and chooses the winning plan, but then runs all generated plans to completion. This makes execution + * stats available for all of the query plans. The use case for this verbosity level is "I have a problem with this query, + * and I want as much information as possible in order to diagnose why it might be slow." + */ + ALL_PLANS_EXECUTIONS +} diff --git a/driver-core/src/main/com/mongodb/Function.java b/driver-core/src/main/com/mongodb/Function.java new file mode 100644 index 00000000000..7595687cfbd --- /dev/null +++ b/driver-core/src/main/com/mongodb/Function.java @@ -0,0 +1,35 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb; + +/** + * Apply a function to the input object yielding an appropriate result object. A function may variously provide a mapping between types, + * object instances or keys and values or any other form of transformation upon the input. + * + * @param the type of input objects to the {@code apply} operation + * @param the type of result objects from the {@code apply} operation. May be the same type as {@code }. + */ +public interface Function { + + /** + * Yield an appropriate result object for the input object. + * + * @param t the input object + * @return the function result + */ + R apply(T t); +} diff --git a/driver-core/src/main/com/mongodb/Jep395RecordCodecProvider.java b/driver-core/src/main/com/mongodb/Jep395RecordCodecProvider.java new file mode 100644 index 00000000000..b53d8595b6f --- /dev/null +++ b/driver-core/src/main/com/mongodb/Jep395RecordCodecProvider.java @@ -0,0 +1,80 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb; + +import com.mongodb.internal.VisibleForTesting; +import com.mongodb.lang.Nullable; +import org.bson.codecs.Codec; +import org.bson.codecs.configuration.CodecProvider; +import org.bson.codecs.configuration.CodecRegistry; +import org.bson.codecs.record.RecordCodecProvider; + +import java.lang.reflect.Type; +import java.util.Collections; +import java.util.List; + +import static com.mongodb.internal.VisibleForTesting.AccessModifier.PRIVATE; + + +/** + * A CodecProvider for Java Records. + * Delegates to {@code org.bson.codecs.record.RecordCodecProvider}. + * If neither the runtime supports {@code java.lang.Record}, which was introduced in Java SE 17, + * nor {@code org.bson.codecs.record.RecordCodecProvider} is available, + * {@linkplain CodecProvider#get(Class, CodecRegistry) provides} {@code null}. + * + * @since 4.6 + */ +public class Jep395RecordCodecProvider implements CodecProvider { + + @Nullable + private static final CodecProvider RECORD_CODEC_PROVIDER; + static { + + CodecProvider possibleCodecProvider; + try { + Class.forName("java.lang.Record"); // JEP-395 support canary test. + Class.forName("org.bson.codecs.record.RecordCodecProvider"); // Java 17 canary test + possibleCodecProvider = new RecordCodecProvider(); + } catch (ClassNotFoundException | UnsupportedClassVersionError e) { + // No JEP-395 support + possibleCodecProvider = null; + } + RECORD_CODEC_PROVIDER = possibleCodecProvider; + } + + @Override + @Nullable + public Codec get(final Class clazz, final CodecRegistry registry) { + return get(clazz, Collections.emptyList(), registry); + } + + @Override + @Nullable + public Codec get(final Class clazz, final List typeArguments, final CodecRegistry registry) { + return RECORD_CODEC_PROVIDER != null ? RECORD_CODEC_PROVIDER.get(clazz, typeArguments, registry) : null; + } + + /** + * This method is not part of the public API and may be removed or changed at any time. + * + * @return true if records are supported + */ + @VisibleForTesting(otherwise = PRIVATE) + public boolean hasRecordSupport() { + return RECORD_CODEC_PROVIDER != null; + } +} diff --git a/driver-core/src/main/com/mongodb/KerberosSubjectProvider.java b/driver-core/src/main/com/mongodb/KerberosSubjectProvider.java new file mode 100644 index 00000000000..af480d713ca --- /dev/null +++ b/driver-core/src/main/com/mongodb/KerberosSubjectProvider.java @@ -0,0 +1,134 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb; + +import com.mongodb.annotations.ThreadSafe; +import com.mongodb.internal.diagnostics.logging.Logger; +import com.mongodb.internal.diagnostics.logging.Loggers; +import com.mongodb.lang.NonNull; +import com.mongodb.lang.Nullable; + +import javax.security.auth.Subject; +import javax.security.auth.kerberos.KerberosTicket; +import javax.security.auth.login.LoginContext; +import javax.security.auth.login.LoginException; +import java.util.concurrent.locks.ReentrantLock; + +import static com.mongodb.assertions.Assertions.notNull; +import static com.mongodb.internal.Locks.checkedWithInterruptibleLock; +import static java.lang.String.format; +import static java.util.concurrent.TimeUnit.MILLISECONDS; +import static java.util.concurrent.TimeUnit.MINUTES; + +/** + * An implementation of {@link SubjectProvider} suitable for use as the value of the {@link MongoCredential#JAVA_SUBJECT_PROVIDER_KEY} + * mechanism property for Kerberos credentials, created via {@link MongoCredential#createGSSAPICredential(String)}. + *

+ * An instance of this class will cache a Kerberos {@link Subject} until its TGT is close to expiration, at which point it will replace + * the {@code Subject} with a new one. + *

+ *

+ * {@code Subject} instances are created by first constructing a {@link LoginContext} with the specified name, then calling its + * {@link LoginContext#login()} method, and finally acquiring the {@code Subject} via a call to {@link LoginContext#getSubject()}. + *

+ * + * @see LoginContext + * @see Subject + * @see KerberosTicket + * @since 4.2 + */ +@ThreadSafe +public class KerberosSubjectProvider implements SubjectProvider { + private static final Logger LOGGER = Loggers.getLogger("authenticator"); + private static final String TGT_PREFIX = "krbtgt/"; + + private final ReentrantLock lock = new ReentrantLock(); + private String loginContextName; + private String fallbackLoginContextName; + private Subject subject; + + /** + * Construct an instance with the default login context name {@code "com.sun.security.jgss.krb5.initiate"}. + *

For compatibility, falls back to {@code "com.sun.security.jgss.initiate"}

+ */ + public KerberosSubjectProvider() { + this("com.sun.security.jgss.krb5.initiate", "com.sun.security.jgss.initiate"); + } + + /** + * Construct an instance with the specified login context name + * + * @param loginContextName the login context name + */ + public KerberosSubjectProvider(final String loginContextName) { + this(loginContextName, null); + } + + private KerberosSubjectProvider(final String loginContextName, @Nullable final String fallbackLoginContextName) { + this.loginContextName = notNull("loginContextName", loginContextName); + this.fallbackLoginContextName = fallbackLoginContextName; + } + + /** + * Gets a {@code Subject} instance associated with a {@link LoginContext} after its been logged in. + * + * @return the non-null {@code Subject} instance + * @throws LoginException any exception resulting from a call to {@link LoginContext#login()} + */ + @NonNull + public Subject getSubject() throws LoginException { + return checkedWithInterruptibleLock(lock, () -> { + if (subject == null || needNewSubject(subject)) { + subject = createNewSubject(); + } + return subject; + }); + } + + private Subject createNewSubject() throws LoginException { + LoginContext loginContext; + try { + LOGGER.debug(format("Creating LoginContext with name '%s'", loginContextName)); + loginContext = new LoginContext(loginContextName); + } catch (LoginException e) { + if (fallbackLoginContextName == null) { + throw e; + } + LOGGER.debug(format("Creating LoginContext with fallback name '%s'", fallbackLoginContextName)); + loginContext = new LoginContext(fallbackLoginContextName); + loginContextName = fallbackLoginContextName; + fallbackLoginContextName = null; + } + + loginContext.login(); + LOGGER.debug("Login successful"); + return loginContext.getSubject(); + } + + private static boolean needNewSubject(final Subject subject) { + for (KerberosTicket cur : subject.getPrivateCredentials(KerberosTicket.class)) { + if (cur.getServer().getName().startsWith(TGT_PREFIX)) { + if (System.currentTimeMillis() > cur.getEndTime().getTime() - MILLISECONDS.convert(5, MINUTES)) { + LOGGER.info("The TGT is close to expiring. Time to reacquire."); + return true; + } + break; + } + } + return false; + } +} diff --git a/driver-core/src/main/com/mongodb/KotlinCodecProvider.java b/driver-core/src/main/com/mongodb/KotlinCodecProvider.java new file mode 100644 index 00000000000..d3bc5fc5604 --- /dev/null +++ b/driver-core/src/main/com/mongodb/KotlinCodecProvider.java @@ -0,0 +1,90 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb; + +import com.mongodb.lang.Nullable; +import org.bson.codecs.Codec; +import org.bson.codecs.configuration.CodecProvider; +import org.bson.codecs.configuration.CodecRegistry; +import org.bson.codecs.kotlin.ArrayCodecProvider; +import org.bson.codecs.kotlin.DataClassCodecProvider; +import org.bson.codecs.kotlinx.KotlinSerializerCodecProvider; + +import java.lang.reflect.Type; +import java.util.Collections; +import java.util.List; + + +import static org.bson.codecs.configuration.CodecRegistries.fromProviders; + +/** + * A CodecProvider for Kotlin data classes. + * Delegates to {@code org.bson.codecs.kotlinx.KotlinSerializerCodecProvider} + * and falls back to {@code org.bson.codecs.kotlin.DataClassCodecProvider}. + * If neither bson-kotlin package nor the bson-kotlinx package is available, + * {@linkplain CodecProvider#get(Class, CodecRegistry) provides} {@code null}. + * + * @since 4.10 + */ +public class KotlinCodecProvider implements CodecProvider { + + @Nullable + private static final CodecProvider KOTLIN_SERIALIZABLE_CODEC_PROVIDER; + @Nullable + private static final CodecProvider DATA_CLASS_CODEC_PROVIDER; + + static { + CodecProvider possibleCodecProvider = null; + + try { + Class.forName("org.bson.codecs.kotlinx.KotlinSerializerCodecProvider"); // Kotlinx bson canary test + possibleCodecProvider = new KotlinSerializerCodecProvider(); + } catch (ClassNotFoundException e) { + // No kotlinx support + } + KOTLIN_SERIALIZABLE_CODEC_PROVIDER = possibleCodecProvider; + + possibleCodecProvider = null; + try { + Class.forName("org.bson.codecs.kotlin.DataClassCodecProvider"); // Kotlin bson canary test + possibleCodecProvider = fromProviders(new ArrayCodecProvider(), new DataClassCodecProvider()); + } catch (ClassNotFoundException e) { + // No kotlin data class support + } + DATA_CLASS_CODEC_PROVIDER = possibleCodecProvider; + } + + @Override + @Nullable + public Codec get(final Class clazz, final CodecRegistry registry) { + return get(clazz, Collections.emptyList(), registry); + } + + @Override + @Nullable + public Codec get(final Class clazz, final List typeArguments, final CodecRegistry registry) { + Codec codec = null; + if (KOTLIN_SERIALIZABLE_CODEC_PROVIDER != null) { + codec = KOTLIN_SERIALIZABLE_CODEC_PROVIDER.get(clazz, typeArguments, registry); + } + + if (codec == null && DATA_CLASS_CODEC_PROVIDER != null) { + codec = DATA_CLASS_CODEC_PROVIDER.get(clazz, typeArguments, registry); + } + return codec; + } + +} diff --git a/driver-core/src/main/com/mongodb/LoggerSettings.java b/driver-core/src/main/com/mongodb/LoggerSettings.java new file mode 100644 index 00000000000..fe1fb19ae33 --- /dev/null +++ b/driver-core/src/main/com/mongodb/LoggerSettings.java @@ -0,0 +1,146 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb; + +import com.mongodb.annotations.Immutable; + +import java.util.Objects; + +import static com.mongodb.assertions.Assertions.notNull; + +/** + * An immutable class representing settings for logging. + * + *

+ * The driver logs using the SLF4J 1.0 API with a root logger of {@code org.mongodb.driver}. See + * Logging Fundamentals + * for additional information. + *

+ * + * @since 4.9 + */ +@Immutable +public final class LoggerSettings { + private final int maxDocumentLength; + /** + * Gets a builder for an instance of {@code LoggerSettings}. + * @return the builder + */ + public static Builder builder() { + return new Builder(); + } + + /** + * Creates a builder instance. + * + * @param loggerSettings existing LoggerSettings to default the builder settings on. + * @return a builder + */ + public static Builder builder(final LoggerSettings loggerSettings) { + return builder().applySettings(loggerSettings); + } + + /** + * A builder for an instance of {@code LoggerSettings}. + */ + public static final class Builder { + private int maxDocumentLength = 1000; + private Builder() { + } + + /** + * Applies the loggerSettings to the builder + * + *

Note: Overwrites all existing settings

+ * + * @param loggerSettings the loggerSettings + * @return this + */ + public Builder applySettings(final LoggerSettings loggerSettings) { + notNull("loggerSettings", loggerSettings); + maxDocumentLength = loggerSettings.maxDocumentLength; + return this; + } + + /** + * Sets the max document length. + * + * @param maxDocumentLength the max document length + * @return this + * @see #getMaxDocumentLength() + */ + public Builder maxDocumentLength(final int maxDocumentLength) { + this.maxDocumentLength = maxDocumentLength; + return this; + } + + /** + * Build an instance of {@code LoggerSettings}. + * @return the logger settings for this builder + */ + public LoggerSettings build() { + return new LoggerSettings(this); + } + } + + /** + * Gets the max length of the extended JSON representation of a BSON document within a log message. + * + *

+ * For example, when the driver logs a command or its reply via the {@code org.mongodb.driver.protocol.command} SFL4J logger, it + * truncates its JSON representation to the maximum length defined by this setting. + *

+ * + *

+ * Defaults to 1000 characters. + *

+ * + * @return the max document length + */ + public int getMaxDocumentLength() { + return maxDocumentLength; + } + + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + LoggerSettings that = (LoggerSettings) o; + return maxDocumentLength == that.maxDocumentLength; + } + + @Override + public int hashCode() { + return Objects.hash(maxDocumentLength); + } + + @Override + public String toString() { + return "LoggerSettings{" + + "maxDocumentLength=" + maxDocumentLength + + '}'; + } + + private LoggerSettings(final Builder builder) { + maxDocumentLength = builder.maxDocumentLength; + } +} diff --git a/driver-core/src/main/com/mongodb/MongoBulkWriteException.java b/driver-core/src/main/com/mongodb/MongoBulkWriteException.java new file mode 100644 index 00000000000..1d84aa9fa89 --- /dev/null +++ b/driver-core/src/main/com/mongodb/MongoBulkWriteException.java @@ -0,0 +1,140 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb; + +import com.mongodb.bulk.BulkWriteError; +import com.mongodb.bulk.BulkWriteResult; +import com.mongodb.bulk.WriteConcernError; +import com.mongodb.lang.Nullable; + +import java.util.List; +import java.util.Objects; +import java.util.Set; + +/** + * An exception that represents all errors associated with a bulk write operation. + * + * @since 3.0 + * @serial exclude + */ +public class MongoBulkWriteException extends MongoServerException { + + private static final long serialVersionUID = -4345399805987210275L; + + private final BulkWriteResult writeResult; + private final List errors; + private final ServerAddress serverAddress; + private final WriteConcernError writeConcernError; + + /** + * Constructs a new instance. + * + * @param writeResult the write result + * @param writeErrors the list of errors + * @param writeConcernError the write concern error + * @param serverAddress the server address. + * @param errorLabels any server errorLabels + * @since 4.1 + */ + public MongoBulkWriteException(final BulkWriteResult writeResult, final List writeErrors, + @Nullable final WriteConcernError writeConcernError, final ServerAddress serverAddress, + final Set errorLabels) { + super("Bulk write operation error on MongoDB server " + serverAddress + ". " + + (writeErrors.isEmpty() ? "" : "Write errors: " + writeErrors + ". ") + + (writeConcernError == null ? "" : "Write concern error: " + writeConcernError + ". "), serverAddress); + this.writeResult = writeResult; + this.errors = writeErrors; + this.writeConcernError = writeConcernError; + this.serverAddress = serverAddress; + + addLabels(errorLabels); + } + + /** + * The result of all successfully processed write operations. This will never be null. + * + * @return the bulk write result + */ + public BulkWriteResult getWriteResult() { + return writeResult; + } + + /** + * The list of errors, which will not be null, but may be empty (if the write concern error is not null). + * + * @return the list of errors + */ + public List getWriteErrors() { + return errors; + } + + + /** + * The write concern error, which may be null (in which case the list of errors will not be empty). + * + * @return the write concern error + */ + @Nullable + public WriteConcernError getWriteConcernError() { + return writeConcernError; + } + + /** + * The address of the server which performed the bulk write operation. + * + * @return the address + */ + public ServerAddress getServerAddress() { + return serverAddress; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + MongoBulkWriteException that = (MongoBulkWriteException) o; + + if (!errors.equals(that.errors)) { + return false; + } + if (!serverAddress.equals(that.serverAddress)) { + return false; + } + if (!Objects.equals(writeConcernError, that.writeConcernError)) { + return false; + } + if (!writeResult.equals(that.writeResult)) { + return false; + } + + return true; + } + + @Override + public int hashCode() { + int result = writeResult.hashCode(); + result = 31 * result + errors.hashCode(); + result = 31 * result + serverAddress.hashCode(); + result = 31 * result + (writeConcernError != null ? writeConcernError.hashCode() : 0); + return result; + } +} diff --git a/driver-core/src/main/com/mongodb/MongoChangeStreamException.java b/driver-core/src/main/com/mongodb/MongoChangeStreamException.java new file mode 100644 index 00000000000..b3a363d2797 --- /dev/null +++ b/driver-core/src/main/com/mongodb/MongoChangeStreamException.java @@ -0,0 +1,37 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb; + +/** + * An exception indicating that a failure occurred when running a {@code $changeStream}. + * + * @mongodb.driver.dochub core/changestreams Change Streams + * @since 3.6 + */ +public class MongoChangeStreamException extends MongoException { + private static final long serialVersionUID = 3621370414132219001L; + + /** + * Constructs a new instance. + * + * @param message the message + */ + public MongoChangeStreamException(final String message) { + super(message); + } + +} diff --git a/driver-core/src/main/com/mongodb/MongoClientException.java b/driver-core/src/main/com/mongodb/MongoClientException.java new file mode 100644 index 00000000000..ab19432636c --- /dev/null +++ b/driver-core/src/main/com/mongodb/MongoClientException.java @@ -0,0 +1,48 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb; + +import com.mongodb.lang.Nullable; + +/** + * A base class for exceptions indicating a failure condition with the MongoClient. + * + * @since 2.12 + */ +public class MongoClientException extends MongoException { + + private static final long serialVersionUID = -5127414714432646066L; + + /** + * Constructs a new instance. + * + * @param message the message + */ + public MongoClientException(final String message) { + super(message); + } + + /** + * Constructs a new instance. + * + * @param message the message + * @param cause the cause + */ + public MongoClientException(final String message, @Nullable final Throwable cause) { + super(message, cause); + } +} diff --git a/driver-core/src/main/com/mongodb/MongoClientSettings.java b/driver-core/src/main/com/mongodb/MongoClientSettings.java new file mode 100644 index 00000000000..1c9ffc5b04c --- /dev/null +++ b/driver-core/src/main/com/mongodb/MongoClientSettings.java @@ -0,0 +1,1160 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb; + +import com.mongodb.annotations.Alpha; +import com.mongodb.annotations.Immutable; +import com.mongodb.annotations.NotThreadSafe; +import com.mongodb.annotations.Reason; +import com.mongodb.client.gridfs.codecs.GridFSFileCodecProvider; +import com.mongodb.client.model.geojson.codecs.GeoJsonCodecProvider; +import com.mongodb.client.model.mql.ExpressionCodecProvider; +import com.mongodb.connection.ClusterSettings; +import com.mongodb.connection.ConnectionPoolSettings; +import com.mongodb.connection.ServerSettings; +import com.mongodb.connection.SocketSettings; +import com.mongodb.connection.SslSettings; +import com.mongodb.connection.TransportSettings; +import com.mongodb.event.CommandListener; +import com.mongodb.lang.Nullable; +import com.mongodb.spi.dns.DnsClient; +import com.mongodb.spi.dns.InetAddressResolver; +import org.bson.UuidRepresentation; +import org.bson.codecs.BsonCodecProvider; +import org.bson.codecs.BsonValueCodecProvider; +import org.bson.codecs.CollectionCodecProvider; +import org.bson.codecs.DocumentCodecProvider; +import org.bson.codecs.EnumCodecProvider; +import org.bson.codecs.IterableCodecProvider; +import org.bson.codecs.JsonObjectCodecProvider; +import org.bson.codecs.MapCodecProvider; +import org.bson.codecs.ValueCodecProvider; +import org.bson.codecs.configuration.CodecRegistry; +import org.bson.codecs.jsr310.Jsr310CodecProvider; + +import java.nio.charset.StandardCharsets; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.Objects; +import java.util.concurrent.TimeUnit; + +import static com.mongodb.assertions.Assertions.isTrue; +import static com.mongodb.assertions.Assertions.isTrueArgument; +import static com.mongodb.assertions.Assertions.notNull; +import static com.mongodb.internal.TimeoutSettings.convertAndValidateTimeout; +import static java.util.Arrays.asList; +import static java.util.concurrent.TimeUnit.MILLISECONDS; +import static org.bson.codecs.configuration.CodecRegistries.fromProviders; + + +/** + * Various settings to control the behavior of a {@code MongoClient}. + * + * @since 3.7 + */ +@Immutable +public final class MongoClientSettings { + private static final CodecRegistry DEFAULT_CODEC_REGISTRY = + fromProviders(asList(new ValueCodecProvider(), + new BsonValueCodecProvider(), + new DBRefCodecProvider(), + new DBObjectCodecProvider(), + new DocumentCodecProvider(new DocumentToDBRefTransformer()), + new CollectionCodecProvider(new DocumentToDBRefTransformer()), + new IterableCodecProvider(new DocumentToDBRefTransformer()), + new MapCodecProvider(new DocumentToDBRefTransformer()), + new GeoJsonCodecProvider(), + new GridFSFileCodecProvider(), + new Jsr310CodecProvider(), + new JsonObjectCodecProvider(), + new BsonCodecProvider(), + new ExpressionCodecProvider(), + new Jep395RecordCodecProvider(), + new KotlinCodecProvider(), + new EnumCodecProvider())); + + private final ReadPreference readPreference; + private final WriteConcern writeConcern; + private final boolean retryWrites; + private final boolean retryReads; + private final ReadConcern readConcern; + private final MongoCredential credential; + private final TransportSettings transportSettings; + private final List commandListeners; + private final CodecRegistry codecRegistry; + private final LoggerSettings loggerSettings; + private final ClusterSettings clusterSettings; + private final SocketSettings socketSettings; + private final SocketSettings heartbeatSocketSettings; + private final ConnectionPoolSettings connectionPoolSettings; + private final ServerSettings serverSettings; + private final SslSettings sslSettings; + private final String applicationName; + private final List compressorList; + private final UuidRepresentation uuidRepresentation; + private final ServerApi serverApi; + + private final AutoEncryptionSettings autoEncryptionSettings; + private final boolean heartbeatSocketTimeoutSetExplicitly; + private final boolean heartbeatConnectTimeoutSetExplicitly; + + private final ContextProvider contextProvider; + private final DnsClient dnsClient; + private final InetAddressResolver inetAddressResolver; + @Nullable + private final Long timeoutMS; + + /** + * Gets the default codec registry. It includes the following providers: + * + *
    + *
  • {@link org.bson.codecs.ValueCodecProvider}
  • + *
  • {@link org.bson.codecs.BsonValueCodecProvider}
  • + *
  • {@link com.mongodb.DBRefCodecProvider}
  • + *
  • {@link com.mongodb.DBObjectCodecProvider}
  • + *
  • {@link org.bson.codecs.DocumentCodecProvider}
  • + *
  • {@link org.bson.codecs.CollectionCodecProvider}
  • + *
  • {@link org.bson.codecs.IterableCodecProvider}
  • + *
  • {@link org.bson.codecs.MapCodecProvider}
  • + *
  • {@link com.mongodb.client.model.geojson.codecs.GeoJsonCodecProvider}
  • + *
  • {@link com.mongodb.client.gridfs.codecs.GridFSFileCodecProvider}
  • + *
  • {@link org.bson.codecs.jsr310.Jsr310CodecProvider}
  • + *
  • {@link org.bson.codecs.JsonObjectCodecProvider}
  • + *
  • {@link org.bson.codecs.BsonCodecProvider}
  • + *
  • {@link org.bson.codecs.EnumCodecProvider}
  • + *
  • {@link ExpressionCodecProvider}
  • + *
  • {@link com.mongodb.Jep395RecordCodecProvider}
  • + *
  • {@link com.mongodb.KotlinCodecProvider}
  • + *
+ * + *

+ * Additional providers may be added in a future release. + *

+ * + * @return the default codec registry + */ + public static CodecRegistry getDefaultCodecRegistry() { + return DEFAULT_CODEC_REGISTRY; + } + + /** + * Convenience method to create a Builder. + * + * @return a builder + */ + public static Builder builder() { + return new Builder(); + } + + /** + * Convenience method to create a from an existing {@code MongoClientSettings}. + * + * @param settings create a builder from existing settings + * @return a builder + */ + public static Builder builder(final MongoClientSettings settings) { + return new Builder(settings); + } + + /** + * Gets the {@link DnsClient} to use for resolving DNS queries. + * + *

If set, it will be used to resolve SRV and TXT records for mongodb+srv connections. Otherwise, + * implementations of {@link com.mongodb.spi.dns.DnsClientProvider} will be discovered via {@link java.util.ServiceLoader}. + * If no implementations are discovered, then {@code com.sun.jndi.dns.DnsContextFactory} will be used to resolve these records. + * + *

If applying a connection string to these settings, care must be taken to also pass the same {@link DnsClient} as an argument to + * the {@link ConnectionString} constructor. + * + * @return the DNS client + * @since 4.10 + * @see ConnectionString#ConnectionString(String, DnsClient) + */ + @Nullable + public DnsClient getDnsClient() { + return dnsClient; + } + + /** + * Gets the explicitly set {@link InetAddressResolver} to use for looking up the {@link java.net.InetAddress} instances for each host. + * + * @return the {@link java.net.InetAddress} resolver + * @see Builder#inetAddressResolver(InetAddressResolver) + * @since 4.10 + */ + @Nullable + public InetAddressResolver getInetAddressResolver() { + return inetAddressResolver; + } + + /** + * A builder for {@code MongoClientSettings} so that {@code MongoClientSettings} can be immutable, and to support easier construction + * through chaining. + */ + @NotThreadSafe + public static final class Builder { + private ReadPreference readPreference = ReadPreference.primary(); + private WriteConcern writeConcern = WriteConcern.ACKNOWLEDGED; + private boolean retryWrites = true; + private boolean retryReads = true; + private ReadConcern readConcern = ReadConcern.DEFAULT; + private CodecRegistry codecRegistry = MongoClientSettings.getDefaultCodecRegistry(); + private TransportSettings transportSettings; + private List commandListeners = new ArrayList<>(); + + private final LoggerSettings.Builder loggerSettingsBuilder = LoggerSettings.builder(); + private final ClusterSettings.Builder clusterSettingsBuilder = ClusterSettings.builder(); + private final SocketSettings.Builder socketSettingsBuilder = SocketSettings.builder(); + private final ConnectionPoolSettings.Builder connectionPoolSettingsBuilder = ConnectionPoolSettings.builder(); + private final ServerSettings.Builder serverSettingsBuilder = ServerSettings.builder(); + private final SslSettings.Builder sslSettingsBuilder = SslSettings.builder(); + private MongoCredential credential; + private String applicationName; + private List compressorList = Collections.emptyList(); + private UuidRepresentation uuidRepresentation = UuidRepresentation.UNSPECIFIED; + private ServerApi serverApi; + + private AutoEncryptionSettings autoEncryptionSettings; + + private int heartbeatConnectTimeoutMS; + private int heartbeatSocketTimeoutMS; + private Long timeoutMS; + + private ContextProvider contextProvider; + private DnsClient dnsClient; + private InetAddressResolver inetAddressResolver; + + private Builder() { + } + + private Builder(final MongoClientSettings settings) { + notNull("settings", settings); + applicationName = settings.getApplicationName(); + commandListeners = new ArrayList<>(settings.getCommandListeners()); + compressorList = new ArrayList<>(settings.getCompressorList()); + codecRegistry = settings.getCodecRegistry(); + readPreference = settings.getReadPreference(); + writeConcern = settings.getWriteConcern(); + retryWrites = settings.getRetryWrites(); + retryReads = settings.getRetryReads(); + readConcern = settings.getReadConcern(); + credential = settings.getCredential(); + uuidRepresentation = settings.getUuidRepresentation(); + serverApi = settings.getServerApi(); + dnsClient = settings.getDnsClient(); + timeoutMS = settings.getTimeout(MILLISECONDS); + inetAddressResolver = settings.getInetAddressResolver(); + transportSettings = settings.getTransportSettings(); + autoEncryptionSettings = settings.getAutoEncryptionSettings(); + contextProvider = settings.getContextProvider(); + loggerSettingsBuilder.applySettings(settings.getLoggerSettings()); + clusterSettingsBuilder.applySettings(settings.getClusterSettings()); + serverSettingsBuilder.applySettings(settings.getServerSettings()); + socketSettingsBuilder.applySettings(settings.getSocketSettings()); + connectionPoolSettingsBuilder.applySettings(settings.getConnectionPoolSettings()); + sslSettingsBuilder.applySettings(settings.getSslSettings()); + + if (settings.heartbeatConnectTimeoutSetExplicitly) { + heartbeatConnectTimeoutMS = settings.heartbeatSocketSettings.getConnectTimeout(MILLISECONDS); + } + if (settings.heartbeatSocketTimeoutSetExplicitly) { + heartbeatSocketTimeoutMS = settings.heartbeatSocketSettings.getReadTimeout(MILLISECONDS); + } + } + + /** + * Takes the settings from the given {@code ConnectionString} and applies them to the builder + * + * @param connectionString the connection string containing details of how to connect to MongoDB + * @return this + */ + public Builder applyConnectionString(final ConnectionString connectionString) { + if (connectionString.getApplicationName() != null) { + applicationName = connectionString.getApplicationName(); + } + clusterSettingsBuilder.applyConnectionString(connectionString); + if (!connectionString.getCompressorList().isEmpty()) { + compressorList = connectionString.getCompressorList(); + } + connectionPoolSettingsBuilder.applyConnectionString(connectionString); + if (connectionString.getCredential() != null) { + credential = connectionString.getCredential(); + } + if (connectionString.getReadConcern() != null) { + readConcern = connectionString.getReadConcern(); + } + if (connectionString.getReadPreference() != null) { + readPreference = connectionString.getReadPreference(); + } + + Boolean retryWritesValue = connectionString.getRetryWritesValue(); + if (retryWritesValue != null) { + retryWrites = retryWritesValue; + } + Boolean retryReadsValue = connectionString.getRetryReads(); + if (retryReadsValue != null) { + retryReads = retryReadsValue; + } + if (connectionString.getUuidRepresentation() != null) { + uuidRepresentation = connectionString.getUuidRepresentation(); + } + + serverSettingsBuilder.applyConnectionString(connectionString); + socketSettingsBuilder.applyConnectionString(connectionString); + sslSettingsBuilder.applyConnectionString(connectionString); + if (connectionString.getWriteConcern() != null) { + writeConcern = connectionString.getWriteConcern(); + } + if (connectionString.getTimeout() != null) { + timeoutMS = connectionString.getTimeout(); + } + return this; + } + + /** + * Applies the {@link LoggerSettings.Builder} block and then sets the loggerSettings. + * + * @param block the block to apply to the LoggerSettings. + * @return this + * @see MongoClientSettings#getLoggerSettings() + * @since 4.9 + */ + public Builder applyToLoggerSettings(final Block block) { + notNull("block", block).apply(loggerSettingsBuilder); + return this; + } + + /** + * Applies the {@link ClusterSettings.Builder} block and then sets the clusterSettings. + * + * @param block the block to apply to the ClusterSettings. + * @return this + * @see MongoClientSettings#getClusterSettings() + */ + public Builder applyToClusterSettings(final Block block) { + notNull("block", block).apply(clusterSettingsBuilder); + return this; + } + + /** + * Applies the {@link SocketSettings.Builder} block and then sets the socketSettings. + * + * @param block the block to apply to the SocketSettings. + * @return this + * @see MongoClientSettings#getSocketSettings() + */ + public Builder applyToSocketSettings(final Block block) { + notNull("block", block).apply(socketSettingsBuilder); + return this; + } + + /** + * Applies the {@link ConnectionPoolSettings.Builder} block and then sets the connectionPoolSettings. + * + * @param block the block to apply to the ConnectionPoolSettings. + * @return this + * @see MongoClientSettings#getConnectionPoolSettings() + */ + public Builder applyToConnectionPoolSettings(final Block block) { + notNull("block", block).apply(connectionPoolSettingsBuilder); + return this; + } + + /** + * Applies the {@link ServerSettings.Builder} block and then sets the serverSettings. + * + * @param block the block to apply to the ServerSettings. + * @return this + * @see MongoClientSettings#getServerSettings() + */ + public Builder applyToServerSettings(final Block block) { + notNull("block", block).apply(serverSettingsBuilder); + return this; + } + + /** + * Applies the {@link SslSettings.Builder} block and then sets the sslSettings. + * + * @param block the block to apply to the SslSettings. + * @return this + * @see MongoClientSettings#getSslSettings() + */ + public Builder applyToSslSettings(final Block block) { + notNull("block", block).apply(sslSettingsBuilder); + return this; + } + + /** + * Sets the read preference. + * + * @param readPreference read preference + * @return this + * @see MongoClientSettings#getReadPreference() + */ + public Builder readPreference(final ReadPreference readPreference) { + this.readPreference = notNull("readPreference", readPreference); + return this; + } + + /** + * Sets the write concern. + * + * @param writeConcern the write concern + * @return this + * @see MongoClientSettings#getWriteConcern() + */ + public Builder writeConcern(final WriteConcern writeConcern) { + this.writeConcern = notNull("writeConcern", writeConcern); + return this; + } + + /** + * Sets whether writes should be retried if they fail due to a network error. + * + *

Starting with the 3.11.0 release, the default value is true

+ * + * @param retryWrites sets if writes should be retried if they fail due to a network error. + * @return this + * @see #getRetryWrites() + * @mongodb.server.release 3.6 + */ + public Builder retryWrites(final boolean retryWrites) { + this.retryWrites = retryWrites; + return this; + } + + /** + * Sets whether reads should be retried if they fail due to a network error. + * + * @param retryReads sets if reads should be retried if they fail due to a network error. + * @return this + * @see #getRetryReads() + * @since 3.11 + * @mongodb.server.release 3.6 + */ + public Builder retryReads(final boolean retryReads) { + this.retryReads = retryReads; + return this; + } + + /** + * Sets the read concern. + * + * @param readConcern the read concern + * @return this + * @mongodb.server.release 3.2 + * @mongodb.driver.manual reference/readConcern/ Read Concern + */ + public Builder readConcern(final ReadConcern readConcern) { + this.readConcern = notNull("readConcern", readConcern); + return this; + } + + /** + * Sets the credential. + * + * @param credential the credential + * @return this + */ + public Builder credential(final MongoCredential credential) { + this.credential = notNull("credential", credential); + return this; + } + + /** + * Sets the codec registry + * + *

The {@link CodecRegistry} configured by this method is effectively treated by the driver as an instance of + * {@link org.bson.codecs.configuration.CodecProvider}, which {@link CodecRegistry} extends. So there is no benefit to defining + * a class that implements {@link CodecRegistry}. Rather, an application should always create {@link CodecRegistry} instances + * using the factory methods in {@link org.bson.codecs.configuration.CodecRegistries}.

+ * + * @param codecRegistry the codec registry + * @return this + * @see MongoClientSettings#getCodecRegistry() + * @see org.bson.codecs.configuration.CodecRegistries + */ + public Builder codecRegistry(final CodecRegistry codecRegistry) { + this.codecRegistry = notNull("codecRegistry", codecRegistry); + return this; + } + + /** + * Sets the {@link TransportSettings} to apply. + * + * @param transportSettings the transport settings + * @return this + * @see #getTransportSettings() + */ + public Builder transportSettings(final TransportSettings transportSettings) { + this.transportSettings = notNull("transportSettings", transportSettings); + return this; + } + + /** + * Adds the given command listener. + * + * @param commandListener the command listener + * @return this + */ + public Builder addCommandListener(final CommandListener commandListener) { + notNull("commandListener", commandListener); + commandListeners.add(commandListener); + return this; + } + + /** + * Sets the command listeners + * + * @param commandListeners the list of command listeners + * @return this + */ + public Builder commandListenerList(final List commandListeners) { + notNull("commandListeners", commandListeners); + this.commandListeners = new ArrayList<>(commandListeners); + return this; + } + + /** + * Sets the logical name of the application using this MongoClient. The application name may be used by the client to identify + * the application to the server, for use in server logs, slow query logs, and profile collection. + * + * @param applicationName the logical name of the application using this MongoClient. It may be null. + * The UTF-8 encoding may not exceed 128 bytes. + * @return this + * @see #getApplicationName() + * @mongodb.server.release 3.4 + */ + public Builder applicationName(@Nullable final String applicationName) { + if (applicationName != null) { + isTrueArgument("applicationName UTF-8 encoding length <= 128", + applicationName.getBytes(StandardCharsets.UTF_8).length <= 128); + } + this.applicationName = applicationName; + return this; + } + + /** + * Sets the compressors to use for compressing messages to the server. The driver will use the first compressor in the list + * that the server is configured to support. + * + * @param compressorList the list of compressors to request + * @return this + * @see #getCompressorList() + * @mongodb.server.release 3.4 + */ + public Builder compressorList(final List compressorList) { + notNull("compressorList", compressorList); + this.compressorList = new ArrayList<>(compressorList); + return this; + } + + /** + * Sets the UUID representation to use when encoding instances of {@link java.util.UUID} and when decoding BSON binary values with + * subtype of 3. + * + *

See {@link #getUuidRepresentation()} for recommendations on settings this value

+ * + * @param uuidRepresentation the UUID representation, which may not be null + * @return this + * @since 3.12 + */ + public Builder uuidRepresentation(final UuidRepresentation uuidRepresentation) { + this.uuidRepresentation = notNull("uuidRepresentation", uuidRepresentation); + return this; + } + + /** + * Sets the server API to use when sending commands to the server. + *

+ * This is required for some MongoDB deployments. + *

+ * + * @param serverApi the server API, which may not be null + * @return this + * @since 4.3 + */ + public Builder serverApi(final ServerApi serverApi) { + this.serverApi = notNull("serverApi", serverApi); + return this; + } + + /** + * Sets the auto-encryption settings + *

+ * A separate, internal {@code MongoClient} is created if any of the following are true: + * + *

    + *
  • {@code AutoEncryptionSettings.keyVaultClient} is not passed
  • + *
  • {@code AutoEncryptionSettings.bypassAutomaticEncryption} is {@code false}
  • + *
+ * + * If an internal {@code MongoClient} is created, it is configured with the same + * options as the parent {@code MongoClient} except {@code minPoolSize} is set to {@code 0} + * and {@code AutoEncryptionSettings} is omitted. + * + * @param autoEncryptionSettings the auto-encryption settings + * @return this + * @since 3.11 + * @see #getAutoEncryptionSettings() + */ + public Builder autoEncryptionSettings(@Nullable final AutoEncryptionSettings autoEncryptionSettings) { + this.autoEncryptionSettings = autoEncryptionSettings; + return this; + } + + /** + * Sets the context provider + * + *

+ * When used with the synchronous driver, this must be an instance of {@code com.mongodb.client.SynchronousContextProvider}. + * When used with the reactive streams driver, this must be an instance of + * {@code com.mongodb.reactivestreams.client.ReactiveContextProvider}. + * + *

+ * + * @param contextProvider the context provider + * @return this + * @since 4.4 + */ + public Builder contextProvider(@Nullable final ContextProvider contextProvider) { + this.contextProvider = contextProvider; + return this; + } + + /** + * Sets the {@link DnsClient} to use for resolving DNS queries. + * + *

If set, it will be used to resolve SRV and TXT records for mongodb+srv connections. Otherwise, + * implementation of {@link com.mongodb.spi.dns.DnsClientProvider} will be discovered via {@link java.util.ServiceLoader} + * and used to create an instance of {@link DnsClient}. If no implementation is discovered, then + * {@code com.sun.jndi.dns.DnsContextFactory} will be used to resolve these records. + * + *

If {@linkplain #applyConnectionString(ConnectionString) applying a connection string to these settings}, care must be + * taken to also pass the same {@link DnsClient} as an argument to the {@link ConnectionString} constructor. + * + * @param dnsClient the DNS client + * @return the DNS client + * @since 4.10 + * @see ConnectionString#ConnectionString(String, DnsClient) + */ + public Builder dnsClient(@Nullable final DnsClient dnsClient) { + this.dnsClient = dnsClient; + return this; + } + + /** + * Sets the {@link InetAddressResolver} to use for looking up the {@link java.net.InetAddress} instances for each host. + * + *

If set, it will be used to look up the {@link java.net.InetAddress} for each host, via + * {@link InetAddressResolver#lookupByName(String)}. Otherwise, + * an implementation of {@link com.mongodb.spi.dns.InetAddressResolverProvider} will be discovered via + * {@link java.util.ServiceLoader} and used to create an instance of {@link InetAddressResolver}. If no implementation is + * discovered, {@link java.net.InetAddress#getAllByName(String)} will be used to lookup the {@link java.net.InetAddress} + * instances for a host. + * + * @param inetAddressResolver the InetAddress provider + * @return the {@link java.net.InetAddress} resolver + * @see #getInetAddressResolver() + * @since 4.10 + */ + public Builder inetAddressResolver(@Nullable final InetAddressResolver inetAddressResolver) { + this.inetAddressResolver = inetAddressResolver; + return this; + } + + + /** + * Sets the time limit for the full execution of an operation. + * + *

    + *
  • {@code null} means that the timeout mechanism for operations will defer to using: + *
      + *
    • {@code waitQueueTimeoutMS}: The maximum wait time in milliseconds that a thread may wait for a connection to become + * available
    • + *
    • {@code socketTimeoutMS}: How long a send or receive on a socket can take before timing out.
    • + *
    • {@code wTimeoutMS}: How long the server will wait for the write concern to be fulfilled before timing out.
    • + *
    • {@code maxTimeMS}: The cumulative time limit for processing operations on a cursor. + * See: cursor.maxTimeMS.
    • + *
    • {@code maxCommitTimeMS}: The maximum amount of time to allow a single {@code commitTransaction} command to execute. + * See: {@link TransactionOptions#getMaxCommitTime}.
    • + *
    + *
  • + *
  • {@code 0} means infinite timeout.
  • + *
  • {@code > 0} The time limit to use for the full execution of an operation.
  • + *
+ * + * @param timeout the timeout + * @param timeUnit the time unit + * @return this + * @since 5.2 + * @see #getTimeout + */ + @Alpha(Reason.CLIENT) + public Builder timeout(final long timeout, final TimeUnit timeUnit) { + this.timeoutMS = convertAndValidateTimeout(timeout, timeUnit); + return this; + } + + // Package-private to provide interop with MongoClientOptions + Builder heartbeatConnectTimeoutMS(final int heartbeatConnectTimeoutMS) { + this.heartbeatConnectTimeoutMS = heartbeatConnectTimeoutMS; + return this; + } + + // Package-private to provide interop with MongoClientOptions + Builder heartbeatSocketTimeoutMS(final int heartbeatSocketTimeoutMS) { + this.heartbeatSocketTimeoutMS = heartbeatSocketTimeoutMS; + return this; + } + + /** + * Build an instance of {@code MongoClientSettings}. + * + * @return the settings from this builder + */ + public MongoClientSettings build() { + return new MongoClientSettings(this); + } + } + + /** + * The read preference to use for queries, map-reduce, aggregation, and count. + * + *

Default is {@code ReadPreference.primary()}.

+ * + * @return the read preference + * @see ReadPreference#primary() + */ + public ReadPreference getReadPreference() { + return readPreference; + } + + /** + * Gets the credential. + * + * @return the credential, which may be null + */ + @Nullable + public MongoCredential getCredential() { + return credential; + } + + /** + * The write concern to use. + * + *

Default is {@code WriteConcern.ACKNOWLEDGED}.

+ * + * @return the write concern + * @see Builder#writeConcern(WriteConcern) + */ + public WriteConcern getWriteConcern() { + return writeConcern; + } + + /** + * Returns true if writes should be retried if they fail due to a network error or other retryable error. + * + *

Starting with the 3.11.0 release, the default value is true

+ * + * @return the retryWrites value + * @mongodb.server.release 3.6 + */ + public boolean getRetryWrites() { + return retryWrites; + } + + /** + * Returns true if reads should be retried if they fail due to a network error or other retryable error. The default value is true. + * + * @return the retryReads value + * @since 3.11 + * @mongodb.server.release 3.6 + */ + public boolean getRetryReads() { + return retryReads; + } + + /** + * The read concern to use. + * + * @return the read concern + * @mongodb.server.release 3.2 + * @mongodb.driver.manual reference/readConcern/ Read Concern + */ + public ReadConcern getReadConcern() { + return readConcern; + } + + /** + * The codec registry to use, or null if not set. + * + * @return the codec registry + */ + public CodecRegistry getCodecRegistry() { + return codecRegistry; + } + + /** + * Gets the settings for the underlying transport implementation + * + * @return the settings for the underlying transport implementation + * + * @since 4.11 + * @see Builder#transportSettings(TransportSettings) + */ + @Nullable + public TransportSettings getTransportSettings() { + return transportSettings; + } + + /** + * Gets the list of added {@code CommandListener}. + * + *

The default is an empty list.

+ * + * @return the unmodifiable list of command listeners + */ + public List getCommandListeners() { + return Collections.unmodifiableList(commandListeners); + } + + /** + * Gets the logical name of the application using this MongoClient. The application name may be used by the client to identify + * the application to the server, for use in server logs, slow query logs, and profile collection. + * + *

Default is null.

+ * + * @return the application name, which may be null + * @see Builder#applicationName(String) + * @mongodb.server.release 3.4 + */ + @Nullable + public String getApplicationName() { + return applicationName; + } + + /** + * Gets the compressors to use for compressing messages to the server. The driver will use the first compressor in the list + * that the server is configured to support. + * + *

Default is the empty list.

+ * + * @return the compressors + * @mongodb.server.release 3.4 + */ + public List getCompressorList() { + return Collections.unmodifiableList(compressorList); + } + + /** + * Gets the UUID representation to use when encoding instances of {@link java.util.UUID} and when decoding BSON binary values with + * subtype of 3. + * + *

The default is {@link UuidRepresentation#UNSPECIFIED}, If your application stores UUID values in MongoDB, you must set this + * value to the desired representation. New applications should prefer {@link UuidRepresentation#STANDARD}, while existing Java + * applications should prefer {@link UuidRepresentation#JAVA_LEGACY}. Applications wishing to interoperate with existing Python or + * .NET applications should prefer {@link UuidRepresentation#PYTHON_LEGACY} or {@link UuidRepresentation#C_SHARP_LEGACY}, + * respectively. Applications that do not store UUID values in MongoDB don't need to set this value. + *

+ * + * @return the UUID representation, which may not be null + * @since 3.12 + */ + public UuidRepresentation getUuidRepresentation() { + return uuidRepresentation; + } + + /** + * Gets the server API to use when sending commands to the server. + * + * @return the server API, which may be null + * @since 4.3 + */ + @Nullable + public ServerApi getServerApi() { + return serverApi; + } + + /** + * The time limit for the full execution of an operation. + * + *

If set the following deprecated options will be ignored: + * {@code waitQueueTimeoutMS}, {@code socketTimeoutMS}, {@code wTimeoutMS}, {@code maxTimeMS} and {@code maxCommitTimeMS}

+ * + *
    + *
  • {@code null} means that the timeout mechanism for operations will defer to using: + *
      + *
    • {@code waitQueueTimeoutMS}: The maximum wait time in milliseconds that a thread may wait for a connection to become + * available
    • + *
    • {@code socketTimeoutMS}: How long a send or receive on a socket can take before timing out.
    • + *
    • {@code wTimeoutMS}: How long the server will wait for the write concern to be fulfilled before timing out.
    • + *
    • {@code maxTimeMS}: The cumulative time limit for processing operations on a cursor. + * See: cursor.maxTimeMS.
    • + *
    • {@code maxCommitTimeMS}: The maximum amount of time to allow a single {@code commitTransaction} command to execute. + * See: {@link TransactionOptions#getMaxCommitTime}.
    • + *
    + *
  • + *
  • {@code 0} means infinite timeout.
  • + *
  • {@code > 0} The time limit to use for the full execution of an operation.
  • + *
+ * + * @param timeUnit the time unit + * @return the timeout in the given time unit + * @since 5.2 + */ + @Alpha(Reason.CLIENT) + @Nullable + public Long getTimeout(final TimeUnit timeUnit) { + return timeoutMS == null ? null : timeUnit.convert(timeoutMS, MILLISECONDS); + } + + /** + * Gets the auto-encryption settings. + *

+ * In-use encryption enables an application to specify what fields in a collection must be + * encrypted, and the driver automatically encrypts commands and decrypts results. + *

+ *

+ * Automatic encryption is an enterprise only feature that only applies to operations on a collection. Automatic encryption is not + * supported for operations on a database or view and will result in error. To bypass automatic encryption, + * set bypassAutoEncryption=true in ClientSideEncryptionOptions. + *

+ *

+ * Explicit encryption/decryption and automatic decryption is a community feature, enabled with the new + * {@code com.mongodb.client.vault .ClientEncryption} type. A MongoClient configured with bypassAutoEncryption=true will still + * automatically decrypt. + *

+ *

+ * Automatic encryption requires the authenticated user to have the listCollections privilege action. + *

+ *

+ * Supplying an {@code encryptedFieldsMap} provides more security than relying on an encryptedFields obtained from the server. + * It protects against a malicious server advertising false encryptedFields. + *

+ * + * @return the auto-encryption settings, which may be null + * @since 3.11 + */ + @Nullable + public AutoEncryptionSettings getAutoEncryptionSettings() { + return autoEncryptionSettings; + } + + /** + * Gets the logger settings. + * + * @return the logger settings + * @since 4.9 + */ + public LoggerSettings getLoggerSettings() { + return loggerSettings; + } + + /** + * Gets the cluster settings. + * + * @return the cluster settings + */ + public ClusterSettings getClusterSettings() { + return clusterSettings; + } + + /** + * Gets the SSL settings. + * + * @return the SSL settings + */ + public SslSettings getSslSettings() { + return sslSettings; + } + + /** + * Gets the connection-specific settings wrapped in a settings object. This settings object uses the values for connectTimeout + * and socketTimeout. + * + * @return a SocketSettings object populated with the connection settings from this {@code MongoClientSettings} instance. + * @see SocketSettings + */ + public SocketSettings getSocketSettings() { + return socketSettings; + } + + /** + * Gets the connection settings for the heartbeat thread (the background task that checks the state of the cluster) wrapped in a + * settings object. + * + * @return the SocketSettings for the heartbeat thread + * @see SocketSettings + */ + public SocketSettings getHeartbeatSocketSettings() { + return heartbeatSocketSettings; + } + + /** + * Gets the settings for the connection provider in a settings object. This settings object wraps the values for minConnectionPoolSize, + * maxConnectionPoolSize, maxWaitTime, maxConnectionIdleTime and maxConnectionLifeTime. + * + * @return a ConnectionPoolSettings populated with the settings from this {@code MongoClientSettings} instance that relate to the + * connection provider. + * @see Builder#applyToConnectionPoolSettings(Block) + */ + public ConnectionPoolSettings getConnectionPoolSettings() { + return connectionPoolSettings; + } + + /** + * Gets the server-specific settings wrapped in a settings object. This settings object uses the heartbeatFrequency and + * minHeartbeatFrequency values from this {@code MongoClientSettings} instance. + * + * @return a ServerSettings + * @see ServerSettings + */ + public ServerSettings getServerSettings() { + return serverSettings; + } + + /** + * Get the context provider + * + * @return the context provider + * @since 4.4 + */ + @Nullable + public ContextProvider getContextProvider() { + return contextProvider; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + MongoClientSettings that = (MongoClientSettings) o; + return retryWrites == that.retryWrites + && retryReads == that.retryReads + && heartbeatSocketTimeoutSetExplicitly == that.heartbeatSocketTimeoutSetExplicitly + && heartbeatConnectTimeoutSetExplicitly == that.heartbeatConnectTimeoutSetExplicitly + && Objects.equals(readPreference, that.readPreference) + && Objects.equals(writeConcern, that.writeConcern) + && Objects.equals(readConcern, that.readConcern) + && Objects.equals(credential, that.credential) + && Objects.equals(transportSettings, that.transportSettings) + && Objects.equals(commandListeners, that.commandListeners) + && Objects.equals(codecRegistry, that.codecRegistry) + && Objects.equals(loggerSettings, that.loggerSettings) + && Objects.equals(clusterSettings, that.clusterSettings) + && Objects.equals(socketSettings, that.socketSettings) + && Objects.equals(heartbeatSocketSettings, that.heartbeatSocketSettings) + && Objects.equals(connectionPoolSettings, that.connectionPoolSettings) + && Objects.equals(serverSettings, that.serverSettings) + && Objects.equals(sslSettings, that.sslSettings) + && Objects.equals(applicationName, that.applicationName) + && Objects.equals(compressorList, that.compressorList) + && uuidRepresentation == that.uuidRepresentation + && Objects.equals(serverApi, that.serverApi) + && Objects.equals(autoEncryptionSettings, that.autoEncryptionSettings) + && Objects.equals(dnsClient, that.dnsClient) + && Objects.equals(inetAddressResolver, that.inetAddressResolver) + && Objects.equals(contextProvider, that.contextProvider) + && Objects.equals(timeoutMS, that.timeoutMS); + } + + @Override + public int hashCode() { + return Objects.hash(readPreference, writeConcern, retryWrites, retryReads, readConcern, credential, transportSettings, + commandListeners, codecRegistry, loggerSettings, clusterSettings, socketSettings, + heartbeatSocketSettings, connectionPoolSettings, serverSettings, sslSettings, applicationName, compressorList, + uuidRepresentation, serverApi, autoEncryptionSettings, heartbeatSocketTimeoutSetExplicitly, + heartbeatConnectTimeoutSetExplicitly, dnsClient, inetAddressResolver, contextProvider, timeoutMS); + + } + + @Override + public String toString() { + return "MongoClientSettings{" + + "readPreference=" + readPreference + + ", writeConcern=" + writeConcern + + ", retryWrites=" + retryWrites + + ", retryReads=" + retryReads + + ", readConcern=" + readConcern + + ", credential=" + credential + + ", transportSettings=" + transportSettings + + ", commandListeners=" + commandListeners + + ", codecRegistry=" + codecRegistry + + ", loggerSettings=" + loggerSettings + + ", clusterSettings=" + clusterSettings + + ", socketSettings=" + socketSettings + + ", heartbeatSocketSettings=" + heartbeatSocketSettings + + ", connectionPoolSettings=" + connectionPoolSettings + + ", serverSettings=" + serverSettings + + ", sslSettings=" + sslSettings + + ", applicationName='" + applicationName + '\'' + + ", compressorList=" + compressorList + + ", uuidRepresentation=" + uuidRepresentation + + ", serverApi=" + serverApi + + ", autoEncryptionSettings=" + autoEncryptionSettings + + ", dnsClient=" + dnsClient + + ", inetAddressResolver=" + inetAddressResolver + + ", contextProvider=" + contextProvider + + ", timeoutMS=" + timeoutMS + + '}'; + } + + private MongoClientSettings(final Builder builder) { + isTrue("timeoutMS > 0 ", builder.timeoutMS == null || builder.timeoutMS >= 0); + readPreference = builder.readPreference; + writeConcern = builder.writeConcern; + retryWrites = builder.retryWrites; + retryReads = builder.retryReads; + readConcern = builder.readConcern; + credential = builder.credential; + transportSettings = builder.transportSettings; + codecRegistry = builder.codecRegistry; + commandListeners = builder.commandListeners; + applicationName = builder.applicationName; + loggerSettings = builder.loggerSettingsBuilder.build(); + clusterSettings = builder.clusterSettingsBuilder.build(); + serverSettings = builder.serverSettingsBuilder.build(); + socketSettings = builder.socketSettingsBuilder.build(); + connectionPoolSettings = builder.connectionPoolSettingsBuilder.build(); + sslSettings = builder.sslSettingsBuilder.build(); + compressorList = builder.compressorList; + uuidRepresentation = builder.uuidRepresentation; + serverApi = builder.serverApi; + dnsClient = builder.dnsClient; + inetAddressResolver = builder.inetAddressResolver; + autoEncryptionSettings = builder.autoEncryptionSettings; + heartbeatSocketSettings = SocketSettings.builder() + .readTimeout(builder.heartbeatSocketTimeoutMS == 0 + ? socketSettings.getConnectTimeout(MILLISECONDS) : builder.heartbeatSocketTimeoutMS, + MILLISECONDS) + .connectTimeout(builder.heartbeatConnectTimeoutMS == 0 + ? socketSettings.getConnectTimeout(MILLISECONDS) : builder.heartbeatConnectTimeoutMS, + MILLISECONDS) + .applyToProxySettings(proxyBuilder -> proxyBuilder.applySettings(socketSettings.getProxySettings())) + .build(); + heartbeatSocketTimeoutSetExplicitly = builder.heartbeatSocketTimeoutMS != 0; + heartbeatConnectTimeoutSetExplicitly = builder.heartbeatConnectTimeoutMS != 0; + contextProvider = builder.contextProvider; + timeoutMS = builder.timeoutMS; + } +} diff --git a/driver-core/src/main/com/mongodb/MongoCommandException.java b/driver-core/src/main/com/mongodb/MongoCommandException.java new file mode 100644 index 00000000000..ac61f2cafe0 --- /dev/null +++ b/driver-core/src/main/com/mongodb/MongoCommandException.java @@ -0,0 +1,121 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb; + +import org.bson.BsonDocument; +import org.bson.BsonString; +import org.bson.codecs.BsonDocumentCodec; +import org.bson.codecs.EncoderContext; +import org.bson.json.JsonWriter; + +import java.io.StringWriter; + +import static com.mongodb.internal.ExceptionUtils.MongoCommandExceptionUtils.extractErrorCode; +import static com.mongodb.internal.ExceptionUtils.MongoCommandExceptionUtils.extractErrorCodeName; +import static com.mongodb.internal.ExceptionUtils.MongoCommandExceptionUtils.extractErrorLabelsAsBson; +import static java.lang.String.format; + +/** + * An exception indicating that a command sent to a MongoDB server returned a failure. + * + * @since 2.13 + * @serial exclude + */ +public class MongoCommandException extends MongoServerException { + private static final long serialVersionUID = 8160676451944215078L; + + private final BsonDocument response; + + /** + * Construct a new instance with the CommandResult from a failed command + * + * @param response the command response + * @param address the address of the server that generated the response + */ + public MongoCommandException(final BsonDocument response, final ServerAddress address) { + super(extractErrorCode(response), extractErrorCodeName(response), + format("Command execution failed on MongoDB server with error %s: '%s' on server %s. The full response is %s", extractErrorCodeAndName(response), + extractErrorMessage(response), address, getResponseAsJson(response)), address); + this.response = response; + addLabels(extractErrorLabelsAsBson(response)); + } + + /** + * Gets the error code associated with the command failure. + * + * @return the error code + */ + public int getErrorCode() { + return getCode(); + } + + /** + * Gets the name associated with the error code. + * + * @return the error code name, which may be the empty string + * @since 3.8 + * @mongodb.server.release 3.4 + */ + public String getErrorCodeName() { + return super.getErrorCodeName(); + } + + /** + * Gets the error message associated with the command failure. + * + * @return the error message + */ + public String getErrorMessage() { + return extractErrorMessage(response); + } + + /** + * Gets the full server response document describing the error. + * + * @return the full response to the command failure. + * @since 4.8 + */ + public BsonDocument getResponse() { + return response; + } + + private static String getResponseAsJson(final BsonDocument commandResponse) { + StringWriter writer = new StringWriter(); + JsonWriter jsonWriter = new JsonWriter(writer); + new BsonDocumentCodec().encode(jsonWriter, commandResponse, EncoderContext.builder().build()); + return writer.toString(); + } + + private static String extractErrorCodeAndName(final BsonDocument response) { + int errorCode = extractErrorCode(response); + String errorCodeName = extractErrorCodeName(response); + if (errorCodeName.isEmpty()) { + return Integer.toString(errorCode); + } else { + return format("%d (%s)", errorCode, errorCodeName); + } + } + + private static String extractErrorMessage(final BsonDocument response) { + String errorMessage = response.getString("errmsg", new BsonString("")).getValue(); + // Satisfy nullability checker + if (errorMessage == null) { + throw new MongoInternalException("This value should not be null"); + } + return errorMessage; + } +} diff --git a/driver-core/src/main/com/mongodb/MongoCompressor.java b/driver-core/src/main/com/mongodb/MongoCompressor.java new file mode 100644 index 00000000000..57f88716d89 --- /dev/null +++ b/driver-core/src/main/com/mongodb/MongoCompressor.java @@ -0,0 +1,175 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb; + +import com.mongodb.lang.Nullable; + +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; + +import static com.mongodb.assertions.Assertions.notNull; + +/** + * Metadata describing a compressor to use for sending and receiving messages to a MongoDB server. + * + * @since 3.6 + * @mongodb.server.release 3.4 + */ +public final class MongoCompressor { + + /** + * The property key for defining the compression level. + */ + public static final String LEVEL = "LEVEL"; + + private final String name; + private final Map properties; + + + /** + * Create an instance for snappy compression. + * + * @return A compressor based on the snappy compression algorithm + * @mongodb.server.release 3.4 + */ + public static MongoCompressor createSnappyCompressor() { + return new MongoCompressor("snappy"); + } + + /** + * Create an instance for zlib compression. + * + * @return A compressor based on the zlib compression algorithm + * @mongodb.server.release 3.6 + */ + public static MongoCompressor createZlibCompressor() { + return new MongoCompressor("zlib"); + } + + /** + * Create an instance for zstd compression. + * + * @return A compressor based on the zstd compression algorithm + * @mongodb.server.release 4.2 + */ + public static MongoCompressor createZstdCompressor() { + return new MongoCompressor("zstd"); + } + + /** + * Gets the name of the compressor. + * + * @return the non-null compressor name + */ + public String getName() { + return name; + } + + /** + * Gets the property with the given key. + * + * @param key the key + * @param defaultValue the default value + * @param the property value type + * @return the property value, or the default value if the property is not defined + */ + @SuppressWarnings("unchecked") + @Nullable + public T getProperty(final String key, final T defaultValue) { + notNull("key", key); + + T value = (T) properties.get(key.toLowerCase()); + return (value == null && !properties.containsKey(key)) ? defaultValue : value; + } + + /** + * Gets the property with the given key. + * + * @param key the key + * @param defaultValue the default value + * @param the property value type + * @return the property value, or the default value if the property is not defined + * @throws IllegalArgumentException if the value and default value are null + * @since 3.7 + */ + public T getPropertyNonNull(final String key, final T defaultValue) { + T value = getProperty(key, defaultValue); + if (value == null) { + throw new IllegalArgumentException(); + } + return value; + } + + /** + * Creates a new compressor from this compressor with the given property added to it. + * + * @param key the property key + * @param value the property value + * @param the property value type + * @return the new compressor + */ + public MongoCompressor withProperty(final String key, final T value) { + return new MongoCompressor(this, key, value); + } + + + private MongoCompressor(final String name) { + this.name = name; + properties = Collections.emptyMap(); + } + + private MongoCompressor(final MongoCompressor from, final String propertyKey, final T propertyValue) { + notNull("propertyKey", propertyKey); + + this.name = from.name; + this.properties = new HashMap<>(from.properties); + this.properties.put(propertyKey.toLowerCase(), propertyValue); + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + MongoCompressor that = (MongoCompressor) o; + + if (!name.equals(that.name)) { + return false; + } + return properties.equals(that.properties); + } + + @Override + public int hashCode() { + int result = name.hashCode(); + result = 31 * result + properties.hashCode(); + return result; + } + + @Override + public String toString() { + return "MongoCompressor{" + + "name='" + name + '\'' + + ", properties=" + properties + + '}'; + } +} diff --git a/driver-core/src/main/com/mongodb/MongoConfigurationException.java b/driver-core/src/main/com/mongodb/MongoConfigurationException.java new file mode 100644 index 00000000000..af714131219 --- /dev/null +++ b/driver-core/src/main/com/mongodb/MongoConfigurationException.java @@ -0,0 +1,47 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb; + +/** + * An exception indicating a configuration error in the client. + * + * @since 3.4 + */ +public class MongoConfigurationException extends MongoClientException { + private static final long serialVersionUID = -2343119787572079323L; + + /** + * Construct an instance with the given message. + * + * @param message the message + */ + public MongoConfigurationException(final String message) { + super(message); + } + + /** + * Construct an instance with the given message and cause. + * + * @param message the message + * @param cause the cause + * + * @since 3.6 + */ + public MongoConfigurationException(final String message, final Throwable cause) { + super(message, cause); + } +} diff --git a/driver-core/src/main/com/mongodb/MongoConnectionPoolClearedException.java b/driver-core/src/main/com/mongodb/MongoConnectionPoolClearedException.java new file mode 100644 index 00000000000..5096d56f0e0 --- /dev/null +++ b/driver-core/src/main/com/mongodb/MongoConnectionPoolClearedException.java @@ -0,0 +1,45 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb; + +import com.mongodb.connection.ServerId; +import com.mongodb.lang.Nullable; + +import static com.mongodb.assertions.Assertions.assertNotNull; + +/* This exception is our way to deal with a race condition existing due to threads concurrently + * checking out connections from ConnectionPool and invalidating it.*/ +/** + * An exception that may usually happen as a result of another thread clearing a connection pool. + * Such clearing usually itself happens as a result of an exception, + * in which case it may be specified via the {@link #getCause()} method. + *

+ * It is always safe to retry an operation that failed with this exception. + */ +public final class MongoConnectionPoolClearedException extends MongoClientException { + private static final long serialVersionUID = 1; + + /** + * Not part of the public API. + * + * @param connectionPoolServerId A {@link ServerId} specifying the server used by the connection pool that creates a new exception. + * @param cause The {@linkplain #getCause() cause}. + */ + public MongoConnectionPoolClearedException(final ServerId connectionPoolServerId, @Nullable final Throwable cause) { + super("Connection pool for " + assertNotNull(connectionPoolServerId) + " is paused" + + (cause == null ? "" : " because another operation failed"), cause); + } +} diff --git a/driver-core/src/main/com/mongodb/MongoCredential.java b/driver-core/src/main/com/mongodb/MongoCredential.java new file mode 100644 index 00000000000..6e83e54a3cf --- /dev/null +++ b/driver-core/src/main/com/mongodb/MongoCredential.java @@ -0,0 +1,823 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb; + +import com.mongodb.annotations.Beta; +import com.mongodb.annotations.Evolving; +import com.mongodb.annotations.Immutable; +import com.mongodb.annotations.Reason; +import com.mongodb.lang.Nullable; + +import java.time.Duration; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Objects; + +import static com.mongodb.AuthenticationMechanism.GSSAPI; +import static com.mongodb.AuthenticationMechanism.MONGODB_AWS; +import static com.mongodb.AuthenticationMechanism.MONGODB_OIDC; +import static com.mongodb.AuthenticationMechanism.MONGODB_X509; +import static com.mongodb.AuthenticationMechanism.PLAIN; +import static com.mongodb.AuthenticationMechanism.SCRAM_SHA_1; +import static com.mongodb.AuthenticationMechanism.SCRAM_SHA_256; +import static com.mongodb.assertions.Assertions.notNull; +import static com.mongodb.internal.connection.OidcAuthenticator.OidcValidator.validateCreateOidcCredential; +import static com.mongodb.internal.connection.OidcAuthenticator.OidcValidator.validateOidcCredentialConstruction; + +/** + * Represents credentials to authenticate to a mongo server,as well as the source of the credentials and the authentication mechanism to + * use. + * + * @since 2.11 + */ +@Immutable +public final class MongoCredential { + + private final AuthenticationMechanism mechanism; + private final String userName; + private final String source; + private final char[] password; + private final Map mechanismProperties; + + /** + * The GSSAPI mechanism. See the RFC. + * + * @mongodb.driver.manual core/authentication/#kerberos-authentication GSSAPI + */ + public static final String GSSAPI_MECHANISM = GSSAPI.getMechanismName(); + + /** + * The PLAIN mechanism. See the RFC. + * + * @since 2.12 + * @mongodb.driver.manual core/authentication/#ldap-proxy-authority-authentication PLAIN + */ + public static final String PLAIN_MECHANISM = PLAIN.getMechanismName(); + + /** + * The MongoDB X.509 + * + * @since 2.12 + * @mongodb.driver.manual core/authentication/#x-509-certificate-authentication X-509 + */ + public static final String MONGODB_X509_MECHANISM = MONGODB_X509.getMechanismName(); + + /** + * The SCRAM-SHA-1 Mechanism. + * + * @since 2.13 + * @mongodb.server.release 3.0 + * @mongodb.driver.manual core/authentication/#authentication-scram-sha-1 SCRAM-SHA-1 + */ + public static final String SCRAM_SHA_1_MECHANISM = SCRAM_SHA_1.getMechanismName(); + + /** + * The SCRAM-SHA-256 Mechanism. + * + * @since 3.8 + * @mongodb.server.release 4.0 + * @mongodb.driver.manual core/authentication/#authentication-scram-sha-256 SCRAM-SHA-256 + */ + public static final String SCRAM_SHA_256_MECHANISM = SCRAM_SHA_256.getMechanismName(); + + /** + * Mechanism property key for overriding the service name for GSSAPI authentication. + * + * @see #createGSSAPICredential(String) + * @see #withMechanismProperty(String, Object) + * @since 3.3 + */ + public static final String SERVICE_NAME_KEY = "SERVICE_NAME"; + + /** + * Mechanism property key for specifying whether to canonicalize the host name for GSSAPI authentication. + * + * @see #createGSSAPICredential(String) + * @see #withMechanismProperty(String, Object) + * @since 3.3 + */ + public static final String CANONICALIZE_HOST_NAME_KEY = "CANONICALIZE_HOST_NAME"; + + /** + * Mechanism property key for overriding the SaslClient properties for GSSAPI authentication. + *

+ * The value of this property must be a {@code Map}. In most cases there is no need to set this mechanism property. + * But if an application does: + *

    + *
  • Generally it must set the {@link javax.security.sasl.Sasl#CREDENTIALS} property to an instance of + * {@link org.ietf.jgss.GSSCredential}.
  • + *
  • It's recommended that it set the {@link javax.security.sasl.Sasl#MAX_BUFFER} property to "0" to ensure compatibility with all + * versions of MongoDB.
  • + *
+ * + * @see #createGSSAPICredential(String) + * @see #withMechanismProperty(String, Object) + * @see javax.security.sasl.Sasl + * @see javax.security.sasl.Sasl#CREDENTIALS + * @see javax.security.sasl.Sasl#MAX_BUFFER + * @since 3.3 + */ + public static final String JAVA_SASL_CLIENT_PROPERTIES_KEY = "JAVA_SASL_CLIENT_PROPERTIES"; + + /** + * Mechanism property key for controlling the {@link javax.security.auth.Subject} under which GSSAPI authentication executes. + *

+ * See the {@link SubjectProvider} documentation for a description of how this mechanism property is used. + *

+ *

+ * This property is ignored if the {@link #JAVA_SUBJECT_KEY} property is set. + *

+ * @see SubjectProvider + * @see #createGSSAPICredential(String) + * @see #withMechanismProperty(String, Object) + * @since 4.2 + */ + public static final String JAVA_SUBJECT_PROVIDER_KEY = "JAVA_SUBJECT_PROVIDER"; + + /** + * Mechanism property key for overriding the {@link javax.security.auth.Subject} under which GSSAPI authentication executes. + * + * @see #createGSSAPICredential(String) + * @see #withMechanismProperty(String, Object) + * @since 3.3 + */ + public static final String JAVA_SUBJECT_KEY = "JAVA_SUBJECT"; + + /** + * Mechanism property key for specifying the AWS session token. The type of the value must be {@link String}. + * + * @see #createAwsCredential(String, char[]) + * @since 4.4 + */ + public static final String AWS_SESSION_TOKEN_KEY = "AWS_SESSION_TOKEN"; + + /** + * Mechanism property key for specifying a provider for an AWS credential, useful for refreshing a credential that could expire + * during the lifetime of the {@code MongoClient} with which it is associated. The type of the value must be a + * {@code java.util.function.Supplier} + * + *

+ * If this key is added to an AWS MongoCredential, the userName (i.e. accessKeyId), password (i.e. secretAccessKey), and + * {@link MongoCredential#AWS_SESSION_TOKEN_KEY} value must all be null. + *

+ * + * @see #createAwsCredential(String, char[]) + * @see java.util.function.Supplier + * @see AwsCredential + * @since 4.4 + */ + @Beta(Reason.CLIENT) + public static final String AWS_CREDENTIAL_PROVIDER_KEY = "AWS_CREDENTIAL_PROVIDER"; + + /** + * Mechanism property key for specifying the environment for OIDC, which is + * the name of a built-in OIDC application environment integration to use + * to obtain credentials. The value must be either "k8s", "gcp", or "azure". + * This is an alternative to supplying a callback. + *

+ * The "gcp" and "azure" environments require + * {@link MongoCredential#TOKEN_RESOURCE_KEY} to be specified. + *

+ * If this is provided, + * {@link MongoCredential#OIDC_CALLBACK_KEY} and + * {@link MongoCredential#OIDC_HUMAN_CALLBACK_KEY} + * must not be provided. + *

+ * The "k8s" environment will check the env vars + * {@code AZURE_FEDERATED_TOKEN_FILE}, and then {@code AWS_WEB_IDENTITY_TOKEN_FILE}, + * for the token file path, and if neither is set will then use the path + * {@code /var/run/secrets/kubernetes.io/serviceaccount/token}. + * + * @see #createOidcCredential(String) + * @see MongoCredential#TOKEN_RESOURCE_KEY + * @since 5.1 + */ + public static final String ENVIRONMENT_KEY = "ENVIRONMENT"; + + /** + * Mechanism property key for the OIDC callback. + * This callback is invoked when the OIDC-based authenticator requests + * a token. The type of the value must be {@link OidcCallback}. + * {@link IdpInfo} will not be supplied to the callback, + * and a {@linkplain com.mongodb.MongoCredential.OidcCallbackResult#getRefreshToken() refresh token} + * must not be returned by the callback. + *

+ * If this is provided, {@link MongoCredential#ENVIRONMENT_KEY} + * and {@link MongoCredential#OIDC_HUMAN_CALLBACK_KEY} + * must not be provided. + * + * @see #createOidcCredential(String) + * @since 5.1 + */ + public static final String OIDC_CALLBACK_KEY = "OIDC_CALLBACK"; + + /** + * Mechanism property key for the OIDC human callback. + * This callback is invoked when the OIDC-based authenticator requests + * a token from the identity provider (IDP) using the IDP information + * from the MongoDB server. The type of the value must be + * {@link OidcCallback}. + *

+ * If this is provided, {@link MongoCredential#ENVIRONMENT_KEY} + * and {@link MongoCredential#OIDC_CALLBACK_KEY} + * must not be provided. + * + * @see #createOidcCredential(String) + * @since 5.1 + */ + public static final String OIDC_HUMAN_CALLBACK_KEY = "OIDC_HUMAN_CALLBACK"; + + + /** + * Mechanism property key for a list of allowed hostnames or ip-addresses for MongoDB connections. Ports must be excluded. + * The hostnames may include a leading "*." wildcard, which allows for matching (potentially nested) subdomains. + * When MONGODB-OIDC authentication is attempted against a hostname that does not match any of list of allowed hosts + * the driver will raise an error. The type of the value must be {@code List}. + * + * @see MongoCredential#DEFAULT_ALLOWED_HOSTS + * @see #createOidcCredential(String) + * @since 5.1 + */ + public static final String ALLOWED_HOSTS_KEY = "ALLOWED_HOSTS"; + + /** + * The list of allowed hosts that will be used if no + * {@link MongoCredential#ALLOWED_HOSTS_KEY} value is supplied. + * The default allowed hosts are: + * {@code "*.mongodb.net", "*.mongodb-qa.net", "*.mongodb-dev.net", "*.mongodbgov.net", "localhost", "127.0.0.1", "::1"} + * + * @see #createOidcCredential(String) + * @since 5.1 + */ + public static final List DEFAULT_ALLOWED_HOSTS = Collections.unmodifiableList(Arrays.asList( + "*.mongodb.net", "*.mongodb-qa.net", "*.mongodb-dev.net", "*.mongodbgov.net", "localhost", "127.0.0.1", "::1")); + + /** + * Mechanism property key for specifying the URI of the target resource (sometimes called the audience), + * used in some OIDC environments. + * + *

A TOKEN_RESOURCE with a comma character must be given as a `MongoClient` configuration and not as + * part of the connection string. The TOKEN_RESOURCE value can contain a colon character. + * + * @see MongoCredential#ENVIRONMENT_KEY + * @see #createOidcCredential(String) + * @since 5.1 + */ + public static final String TOKEN_RESOURCE_KEY = "TOKEN_RESOURCE"; + + /** + * Creates a MongoCredential instance with an unspecified mechanism. The client will negotiate the best mechanism based on the + * version of the server that the client is authenticating to. + * + *

If the server version is 4.0 or higher, the driver will negotiate with the server preferring the SCRAM-SHA-256 mechanism. 3.x + * servers will authenticate using SCRAM-SHA-1, older servers will authenticate using the MONGODB_CR mechanism.

+ * + * @param userName the user name + * @param database the database where the user is defined + * @param password the user's password + * @return the credential + * + * @since 2.13 + * @mongodb.driver.manual core/authentication/#authentication-scram-sha-256 SCRAM-SHA-256 + * @mongodb.driver.manual core/authentication/#authentication-scram-sha-1 SCRAM-SHA-1 + * @mongodb.driver.manual core/authentication/#mongodb-cr-authentication MONGODB-CR + */ + public static MongoCredential createCredential(final String userName, final String database, final char[] password) { + return new MongoCredential(null, userName, database, password); + } + + /** + * Creates a MongoCredential instance for the SCRAM-SHA-1 SASL mechanism. Use this method only if you want to ensure that + * the driver uses the SCRAM-SHA-1 mechanism regardless of whether the server you are connecting to supports the + * authentication mechanism. Otherwise use the {@link #createCredential(String, String, char[])} method to allow the driver to + * negotiate the best mechanism based on the server version. + * + * @param userName the non-null user name + * @param source the source where the user is defined. + * @param password the non-null user password + * @return the credential + * @see #createCredential(String, String, char[]) + * + * @since 2.13 + * @mongodb.server.release 3.0 + * @mongodb.driver.manual core/authentication/#authentication-scram-sha-1 SCRAM-SHA-1 + */ + public static MongoCredential createScramSha1Credential(final String userName, final String source, final char[] password) { + return new MongoCredential(SCRAM_SHA_1, userName, source, password); + } + + /** + * Creates a MongoCredential instance for the SCRAM-SHA-256 SASL mechanism. + * + * @param userName the non-null user name + * @param source the source where the user is defined. + * @param password the non-null user password + * @return the credential + * @see #createCredential(String, String, char[]) + * + * @since 3.8 + * @mongodb.server.release 4.0 + * @mongodb.driver.manual core/authentication/#authentication-scram-sha-256 SCRAM-SHA-256 + */ + public static MongoCredential createScramSha256Credential(final String userName, final String source, final char[] password) { + return new MongoCredential(SCRAM_SHA_256, userName, source, password); + } + + /** + * Creates a MongoCredential instance for the MongoDB X.509 protocol. + * + * @param userName the user name + * @return the credential + * + * @since 2.12 + * @mongodb.driver.manual core/authentication/#x-509-certificate-authentication X-509 + */ + public static MongoCredential createMongoX509Credential(final String userName) { + return new MongoCredential(MONGODB_X509, userName, "$external", null); + } + + /** + * Creates a MongoCredential instance for the MongoDB X.509 protocol where the distinguished subject name of the client certificate + * acts as the userName. + *

+ * Available on MongoDB server versions >= 3.4. + *

+ * @return the credential + * + * @since 3.4 + * @mongodb.server.release 3.4 + * @mongodb.driver.manual core/authentication/#x-509-certificate-authentication X-509 + */ + public static MongoCredential createMongoX509Credential() { + return new MongoCredential(MONGODB_X509, null, "$external", null); + } + + /** + * Creates a MongoCredential instance for the PLAIN SASL mechanism. + * + * @param userName the non-null user name + * @param source the source where the user is defined. This can be either {@code "$external"} or the name of a database. + * @param password the non-null user password + * @return the credential + * + * @since 2.12 + * @mongodb.driver.manual core/authentication/#ldap-proxy-authority-authentication PLAIN + */ + public static MongoCredential createPlainCredential(final String userName, final String source, final char[] password) { + return new MongoCredential(PLAIN, userName, source, password); + } + + /** + * Creates a MongoCredential instance for the GSSAPI SASL mechanism. + *

+ * To override the default service name of {@code "mongodb"}, add a mechanism property with the name {@code "SERVICE_NAME"}. + *

+ * To force canonicalization of the host name prior to authentication, add a mechanism property with the name + * {@code "CANONICALIZE_HOST_NAME"} with the value{@code true}. + *

+ * To override the {@link javax.security.auth.Subject} with which the authentication executes, add a mechanism property with the name + * {@code "JAVA_SUBJECT"} with the value of a {@code Subject} instance. + *

+ * To override the properties of the {@link javax.security.sasl.SaslClient} with which the authentication executes, add a mechanism + * property with the name {@code "JAVA_SASL_CLIENT_PROPERTIES"} with the value of a {@code Map} instance containing the + * necessary properties. This can be useful if the application is customizing the default + * {@link javax.security.sasl.SaslClientFactory}. + * + * @param userName the non-null user name + * @return the credential + * @mongodb.server.release 2.4 + * @mongodb.driver.manual core/authentication/#kerberos-authentication GSSAPI + * @see #withMechanismProperty(String, Object) + * @see #SERVICE_NAME_KEY + * @see #CANONICALIZE_HOST_NAME_KEY + * @see #JAVA_SUBJECT_KEY + * @see #JAVA_SASL_CLIENT_PROPERTIES_KEY + */ + public static MongoCredential createGSSAPICredential(final String userName) { + return new MongoCredential(GSSAPI, userName, "$external", null); + } + + /** + * Creates a MongoCredential instance for the MONGODB-AWS mechanism. + * + * @param userName the user name, which may be null. This maps to the AWS accessKeyId + * @param password the user password, which may be null if the userName is also null. This maps to the AWS secretAccessKey. + * @return the credential + * @since 4.1 + * @see #withMechanismProperty(String, Object) + * @see #AWS_SESSION_TOKEN_KEY + * @see #AWS_CREDENTIAL_PROVIDER_KEY + * @mongodb.server.release 4.4 + */ + public static MongoCredential createAwsCredential(@Nullable final String userName, @Nullable final char[] password) { + return new MongoCredential(MONGODB_AWS, userName, "$external", password); + } + + /** + * Creates a MongoCredential instance for the MONGODB-OIDC mechanism. + * + * @param userName the user name, which may be null. This is the OIDC principal name. + * @return the credential + * @since 5.1 + * @see #withMechanismProperty(String, Object) + * @see #ENVIRONMENT_KEY + * @see #TOKEN_RESOURCE_KEY + * @see #OIDC_CALLBACK_KEY + * @see #OIDC_HUMAN_CALLBACK_KEY + * @see #ALLOWED_HOSTS_KEY + * @mongodb.server.release 7.0 + */ + public static MongoCredential createOidcCredential(@Nullable final String userName) { + return new MongoCredential(MONGODB_OIDC, userName, "$external", null); + } + + /** + * Creates a new MongoCredential as a copy of this instance, with the specified mechanism property added. + * + * @param key the key to the property, which is treated as case-insensitive + * @param value the value of the property + * @param the property type + * @return the credential + * @since 2.12 + */ + public MongoCredential withMechanismProperty(final String key, final T value) { + return new MongoCredential(this, key, value); + } + + /** + * Creates a new MongoCredential with the set mechanism. The existing mechanism must be null. + * + * @param mechanism the mechanism to set + * @return the credential + * @since 3.8 + */ + public MongoCredential withMechanism(final AuthenticationMechanism mechanism) { + if (this.mechanism != null) { + throw new IllegalArgumentException("Mechanism already set"); + } + return new MongoCredential(mechanism, userName, source, password, mechanismProperties); + } + + /** + * Constructs a new instance using the given mechanism, userName, source, and password + * + * @param mechanism the authentication mechanism + * @param userName the user name + * @param source the source of the user name, typically a database name + * @param password the password + */ + MongoCredential(@Nullable final AuthenticationMechanism mechanism, @Nullable final String userName, final String source, + @Nullable final char[] password) { + this(mechanism, userName, source, password, Collections.emptyMap()); + } + + MongoCredential(@Nullable final AuthenticationMechanism mechanism, @Nullable final String userName, final String source, + @Nullable final char[] password, final Map mechanismProperties) { + + if (mechanism == MONGODB_OIDC) { + validateOidcCredentialConstruction(source, mechanismProperties); + validateCreateOidcCredential(password); + } + + if (userName == null && !Arrays.asList(MONGODB_X509, MONGODB_AWS, MONGODB_OIDC).contains(mechanism)) { + throw new IllegalArgumentException("username can not be null"); + } + + if (mechanism == null && password == null) { + throw new IllegalArgumentException("Password can not be null when the authentication mechanism is unspecified"); + } + + if (mechanismRequiresPassword(mechanism) && password == null) { + throw new IllegalArgumentException("Password can not be null for " + mechanism + " mechanism"); + } + + if ((mechanism == GSSAPI || mechanism == MONGODB_X509) && password != null) { + throw new IllegalArgumentException("Password must be null for the " + mechanism + " mechanism"); + } + + if (mechanism == MONGODB_AWS && userName != null && password == null) { + throw new IllegalArgumentException("Password can not be null when username is provided for " + mechanism + " mechanism"); + } + + this.mechanism = mechanism; + this.userName = userName; + this.source = notNull("source", source); + + this.password = password != null ? password.clone() : null; + this.mechanismProperties = new HashMap<>(mechanismProperties); + } + + private boolean mechanismRequiresPassword(@Nullable final AuthenticationMechanism mechanism) { + return mechanism == PLAIN || mechanism == SCRAM_SHA_1 || mechanism == SCRAM_SHA_256; + } + + /** + * Constructs a new instance using the given credential plus an additional mechanism property. + * + * @param from the credential to copy from + * @param mechanismPropertyKey the new mechanism property key + * @param mechanismPropertyValue the new mechanism property value + * @param the mechanism property type + */ + MongoCredential(final MongoCredential from, final String mechanismPropertyKey, final T mechanismPropertyValue) { + this(from.mechanism, from.userName, from.source, from.password, mapWith(from.mechanismProperties, notNull( + "mechanismPropertyKey", mechanismPropertyKey).toLowerCase(), mechanismPropertyValue)); + } + + private static Map mapWith(final Map map, final String key, final T value) { + HashMap result = new HashMap<>(map); + result.put(key, value); + return result; + } + + /** + * Gets the mechanism + * + * @return the mechanism. + */ + @Nullable + public String getMechanism() { + return mechanism == null ? null : mechanism.getMechanismName(); + } + + /** + * Gets the mechanism + * + * @return the mechanism. + * @since 3.0 + */ + @Nullable + public AuthenticationMechanism getAuthenticationMechanism() { + return mechanism; + } + + /** + * Gets the user name + * + * @return the user name. + */ + @Nullable + public String getUserName() { + return userName; + } + + /** + * Gets the source of the user name, typically the name of the database where the user is defined. + * + * @return the source of the user name. Can never be null. + */ + public String getSource() { + return source; + } + + /** + * Gets the password. + * + * @return the password. Can be null for some mechanisms. + */ + @Nullable + public char[] getPassword() { + if (password == null) { + return null; + } + return password.clone(); + } + + /** + * Get the value of the given key to a mechanism property, or defaultValue if there is no mapping. + * + * @param key the mechanism property key, which is treated as case-insensitive + * @param defaultValue the default value, if no mapping exists + * @param the value type + * @return the mechanism property value + * @since 2.12 + */ + @SuppressWarnings("unchecked") + @Nullable + public T getMechanismProperty(final String key, @Nullable final T defaultValue) { + notNull("key", key); + + T value = (T) mechanismProperties.get(key.toLowerCase()); + return (value == null) ? defaultValue : value; + + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + MongoCredential that = (MongoCredential) o; + + if (mechanism != that.mechanism) { + return false; + } + if (!Arrays.equals(password, that.password)) { + return false; + } + if (!source.equals(that.source)) { + return false; + } + if (!Objects.equals(userName, that.userName)) { + return false; + } + if (!mechanismProperties.equals(that.mechanismProperties)) { + return false; + } + + return true; + } + + @Override + public int hashCode() { + int result = mechanism != null ? mechanism.hashCode() : 0; + result = 31 * result + (userName != null ? userName.hashCode() : 0); + result = 31 * result + source.hashCode(); + result = 31 * result + (password != null ? Arrays.hashCode(password) : 0); + result = 31 * result + mechanismProperties.hashCode(); + return result; + } + + @Override + public String toString() { + return "MongoCredential{" + + "mechanism=" + mechanism + + ", userName='" + userName + '\'' + + ", source='" + source + '\'' + + ", password=" + + ", mechanismProperties=" + + '}'; + } + + /** + * The context for the {@link OidcCallback#onRequest(OidcCallbackContext) OIDC request callback}. + * + * @since 5.1 + */ + @Evolving + public interface OidcCallbackContext { + /** + * @return Convenience method to obtain the {@linkplain MongoCredential#getUserName() username}. + */ + @Nullable + String getUserName(); + + /** + * @return The timeout that this callback must complete within. + */ + Duration getTimeout(); + + /** + * @return The OIDC callback API version. Currently, version 1. + */ + int getVersion(); + + /** + * @return The OIDC Identity Provider's configuration that can be used + * to acquire an Access Token, or null if not using a + * {@linkplain MongoCredential#OIDC_HUMAN_CALLBACK_KEY human callback.} + */ + @Nullable + IdpInfo getIdpInfo(); + + /** + * @return The OIDC Refresh token supplied by a prior callback invocation, + * or null if no token was supplied, or if not using a + * {@linkplain MongoCredential#OIDC_HUMAN_CALLBACK_KEY human callback.} + */ + @Nullable + String getRefreshToken(); + } + + /** + * This callback is invoked when the OIDC-based authenticator requests + * tokens from the identity provider. + *

+ * It does not have to be thread-safe, unless it is provided to multiple + * MongoClients. + * + * @since 5.1 + */ + public interface OidcCallback { + /** + * @param context The context. + * @return The response produced by an OIDC Identity Provider + */ + OidcCallbackResult onRequest(OidcCallbackContext context); + } + + /** + * The OIDC Identity Provider's configuration that can be used to acquire an Access Token. + * + * @since 5.1 + */ + @Evolving + public interface IdpInfo { + /** + * @return URL which describes the Authorization Server. This identifier is the + * iss of provided access tokens, and is viable for RFC8414 metadata + * discovery and RFC9207 identification. + */ + String getIssuer(); + + /** + * @return Unique client ID for this OIDC client. + */ + @Nullable + String getClientId(); + + /** + * @return Additional scopes to request from Identity Provider. Immutable. + */ + List getRequestScopes(); + } + + /** + * The OIDC credential information. + * + * @since 5.1 + */ + public static final class OidcCallbackResult { + + private final String accessToken; + + private final Duration expiresIn; + + @Nullable + private final String refreshToken; + + + /** + * An access token that does not expire. + * @param accessToken The OIDC access token. + */ + public OidcCallbackResult(final String accessToken) { + this(accessToken, Duration.ZERO, null); + } + + /** + * @param accessToken The OIDC access token. + * @param expiresIn Time until the access token expires. + * A {@linkplain Duration#isZero() zero-length} duration + * means that the access token does not expire. + */ + public OidcCallbackResult(final String accessToken, final Duration expiresIn) { + this(accessToken, expiresIn, null); + } + + /** + * @param accessToken The OIDC access token. + * @param expiresIn Time until the access token expires. + * A {@linkplain Duration#isZero() zero-length} duration + * means that the access token does not expire. + * @param refreshToken The refresh token. If null, refresh will not be attempted. + */ + public OidcCallbackResult(final String accessToken, final Duration expiresIn, + @Nullable final String refreshToken) { + notNull("accessToken", accessToken); + notNull("expiresIn", expiresIn); + if (expiresIn.isNegative()) { + throw new IllegalArgumentException("expiresIn must not be a negative value"); + } + this.accessToken = accessToken; + this.expiresIn = expiresIn; + this.refreshToken = refreshToken; + } + + /** + * @return The OIDC access token. + */ + public String getAccessToken() { + return accessToken; + } + + /** + * @return The OIDC refresh token. If null, refresh will not be attempted. + */ + @Nullable + public String getRefreshToken() { + return refreshToken; + } + } +} diff --git a/driver-core/src/main/com/mongodb/MongoCursorNotFoundException.java b/driver-core/src/main/com/mongodb/MongoCursorNotFoundException.java new file mode 100644 index 00000000000..77492b8a6e2 --- /dev/null +++ b/driver-core/src/main/com/mongodb/MongoCursorNotFoundException.java @@ -0,0 +1,54 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb; + +import org.bson.BsonDocument; + +/** + * Subclass of {@link MongoException} representing a cursor-not-found exception. + * + * @since 2.12 + * @serial exclude + */ +public class MongoCursorNotFoundException extends MongoQueryException { + + private static final long serialVersionUID = -4415279469780082174L; + + private final long cursorId; + + /** + * Construct an instance. + * + * @param cursorId cursor identifier + * @param response the server response document + * @param serverAddress the server address + * @since 4.8 + */ + public MongoCursorNotFoundException(final long cursorId, final BsonDocument response, final ServerAddress serverAddress) { + super(response, serverAddress); + this.cursorId = cursorId; + } + + /** + * Get the cursor id that wasn't found. + * + * @return the ID of the cursor + */ + public long getCursorId() { + return cursorId; + } +} diff --git a/driver-core/src/main/com/mongodb/MongoDriverInformation.java b/driver-core/src/main/com/mongodb/MongoDriverInformation.java new file mode 100644 index 00000000000..a3b28b62fad --- /dev/null +++ b/driver-core/src/main/com/mongodb/MongoDriverInformation.java @@ -0,0 +1,180 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb; + +import com.mongodb.annotations.Internal; +import com.mongodb.annotations.NotThreadSafe; +import com.mongodb.internal.client.DriverInformation; +import com.mongodb.internal.client.DriverInformationHelper; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; + +import static com.mongodb.assertions.Assertions.notNull; + +/** + * The MongoDriverInformation class allows driver and library authors to add extra information about their library. This information is + * then available in the MongoD/MongoS logs. + * + *

+ * The following metadata can be included when creating a {@code MongoClient}. + *

+ *
    + *
  • The driver name. Eg: {@code mongo-scala-driver}
  • + *
  • The driver version. Eg: {@code 1.2.0}
  • + *
  • Extra platform information. Eg: {@code Scala 2.11}
  • + *
+ *

+ * Note: Library authors are responsible for accepting {@code MongoDriverInformation} from external libraries using their library. + * Also all the meta data is limited to 512 bytes and any excess data will be truncated. + *

+ * + * @since 3.4 + * @mongodb.server.release 3.4 + */ +public final class MongoDriverInformation { + private final List driverInformationList; + + /** + * Convenience method to create a Builder. + * + * @return a builder + */ + public static Builder builder() { + return new Builder(); + } + + /** + * Convenience method to create a Builder. + * + * @param mongoDriverInformation the mongoDriverInformation to extend + * @return a builder + */ + public static Builder builder(final MongoDriverInformation mongoDriverInformation) { + return new Builder(mongoDriverInformation); + } + + /** + * Returns the driverNames + * + * @return the driverNames + */ + public List getDriverNames() { + return DriverInformationHelper.getNames(driverInformationList); + } + + /** + * Returns the driverVersions + * + * @return the driverVersions + */ + public List getDriverVersions() { + return DriverInformationHelper.getVersions(driverInformationList); + } + + /** + * Returns the driverPlatforms + * + * @return the driverPlatforms + */ + public List getDriverPlatforms() { + return DriverInformationHelper.getPlatforms(driverInformationList); + } + + /** + * For internal use only + */ + @Internal + public List getDriverInformationList() { + return driverInformationList; + } + + /** + * + */ + @NotThreadSafe + public static final class Builder { + private final MongoDriverInformation mongoDriverInformation; + private String driverName; + private String driverVersion; + private String driverPlatform; + + /** + * Sets the name + * + * @param driverName the name + * @return this + */ + public Builder driverName(final String driverName) { + this.driverName = notNull("driverName", driverName); + return this; + } + + /** + * Sets the version + * + *

+ * Note: You must also set a driver name if setting a driver version. + *

+ * + * @param driverVersion the version + * @return this + */ + public Builder driverVersion(final String driverVersion) { + this.driverVersion = notNull("driverVersion", driverVersion); + return this; + } + + /** + * Sets the platform + * + * @param driverPlatform the platform + * @return this + */ + public Builder driverPlatform(final String driverPlatform) { + this.driverPlatform = notNull("driverPlatform", driverPlatform); + return this; + } + + /** + * @return the driver information + */ + public MongoDriverInformation build() { + DriverInformation driverInformation = new DriverInformation(driverName, driverVersion, driverPlatform); + if (mongoDriverInformation.driverInformationList.contains(driverInformation)) { + return mongoDriverInformation; + } + + List driverInformationList = new ArrayList<>(mongoDriverInformation.driverInformationList); + driverInformationList.add(driverInformation); + return new MongoDriverInformation(Collections.unmodifiableList(driverInformationList)); + } + + private Builder() { + mongoDriverInformation = new MongoDriverInformation(Collections.emptyList()); + } + + private Builder(final MongoDriverInformation driverInformation) { + this.mongoDriverInformation = notNull("driverInformation", driverInformation); + } + } + + private MongoDriverInformation(final List driverInformation) { + this.driverInformationList = notNull("driverInformation", driverInformation); + } +} diff --git a/driver-core/src/main/com/mongodb/MongoException.java b/driver-core/src/main/com/mongodb/MongoException.java new file mode 100644 index 00000000000..a668dd344b7 --- /dev/null +++ b/driver-core/src/main/com/mongodb/MongoException.java @@ -0,0 +1,215 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb; + +import com.mongodb.lang.Nullable; +import org.bson.BsonArray; +import org.bson.BsonDocument; +import org.bson.BsonValue; + +import java.util.Collection; +import java.util.Collections; +import java.util.HashSet; +import java.util.Set; + +import static com.mongodb.assertions.Assertions.notNull; + +/** + * Top level Exception for all Exceptions, server-side or client-side, that come from the driver. + * @serial exclude + */ +public class MongoException extends RuntimeException { + + /** + * An error label indicating that the exception can be treated as a transient transaction error. + * + * @see #hasErrorLabel(String) + * @since 3.8 + */ + public static final String TRANSIENT_TRANSACTION_ERROR_LABEL = "TransientTransactionError"; + + /** + * An error label indicating that the exception can be treated as an unknown transaction commit result. + * + * @see #hasErrorLabel(String) + * @since 3.8 + */ + public static final String UNKNOWN_TRANSACTION_COMMIT_RESULT_LABEL = "UnknownTransactionCommitResult"; + + private static final long serialVersionUID = -4415279469780082174L; + + private final int code; + private final Set errorLabels = new HashSet<>(); + + /** + * Static helper to create or cast a MongoException from a throwable + * + * @param t a throwable, which may be null + * @return a MongoException + */ + @Nullable + public static MongoException fromThrowable(@Nullable final Throwable t) { + if (t == null) { + return null; + } else { + return fromThrowableNonNull(t); + } + } + + /** + * Static helper to create or cast a MongoException from a throwable + * + * @param t a throwable, which may not be null + * @return a MongoException + * @since 3.7 + */ + public static MongoException fromThrowableNonNull(final Throwable t) { + if (t instanceof MongoException) { + return (MongoException) t; + } else { + return new MongoException(t.getMessage(), t); + } + } + + /** + * @param msg the message + */ + public MongoException(final String msg) { + super(msg); + code = -3; + } + + /** + * @param code the error code + * @param msg the message + */ + public MongoException(final int code, final String msg) { + super(msg); + this.code = code; + } + + /** + * @param msg the message + * @param t the throwable cause + */ + public MongoException(@Nullable final String msg, @Nullable final Throwable t) { + super(msg, t); + code = -4; + } + + /** + * @param code the error code + * @param msg the message + * @param t the throwable cause + */ + public MongoException(final int code, final String msg, final Throwable t) { + super(msg, t); + this.code = code; + if (t instanceof MongoException) { + addLabels(((MongoException) t).getErrorLabels()); + } + } + + /** + * @param code the error code + * @param msg the message + * @param response the response + * @since 4.1 + */ + public MongoException(final int code, final String msg, final BsonDocument response) { + super(msg); + this.code = code; + addLabels(response.getArray("errorLabels", new BsonArray())); + } + + /** + * Gets the exception code + * + * @return the error code. + */ + public int getCode() { + return code; + } + + /** + * Adds the given error label to the exception. + * + * @param errorLabel the non-null error label to add to the exception + * + * @since 3.8 + */ + public void addLabel(final String errorLabel) { + notNull("errorLabel", errorLabel); + errorLabels.add(errorLabel); + } + + /** + * Removes the given error label from the exception. + * + * @param errorLabel the non-null error label to remove from the exception + * + * @since 3.8 + */ + public void removeLabel(final String errorLabel) { + notNull("errorLabel", errorLabel); + errorLabels.remove(errorLabel); + } + + /** + * Gets the set of error labels associated with this exception. + * + * @return the error labels, which may not be null but may be empty + * @since 3.8 + */ + public Set getErrorLabels() { + return Collections.unmodifiableSet(errorLabels); + } + + /** + * Return true if the exception is labelled with the given error label, and false otherwise. + * + * @param errorLabel the non-null error label + * @return true if the exception is labelled with the given error label + * @since 3.8 + */ + public boolean hasErrorLabel(final String errorLabel) { + notNull("errorLabel", errorLabel); + return errorLabels.contains(errorLabel); + } + + /** + * Add labels. + * + * @param labels the labels + */ + protected void addLabels(final BsonArray labels) { + for (final BsonValue errorLabel : labels) { + addLabel(errorLabel.asString().getValue()); + } + } + + /** + * Add labels. + * + * @param labels the labels + */ + protected void addLabels(final Collection labels) { + for (final String errorLabel : labels) { + addLabel(errorLabel); + } + } +} diff --git a/driver-core/src/main/com/mongodb/MongoExecutionTimeoutException.java b/driver-core/src/main/com/mongodb/MongoExecutionTimeoutException.java new file mode 100644 index 00000000000..e257991ccda --- /dev/null +++ b/driver-core/src/main/com/mongodb/MongoExecutionTimeoutException.java @@ -0,0 +1,66 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb; + +import com.mongodb.annotations.Alpha; +import com.mongodb.annotations.Reason; +import org.bson.BsonDocument; + +/** + * Exception indicating that the execution of the current operation timed out as a result of the maximum operation time being exceeded. + * + * @since 2.12 + */ +public class MongoExecutionTimeoutException extends MongoException { + private static final long serialVersionUID = 5955669123800274594L; + + /** + * Construct a new instance. + * + * @param message the error message + * @since 5.2 + */ + @Alpha(Reason.CLIENT) + public MongoExecutionTimeoutException(final String message) { + super(message); + + } + + /** + * Construct a new instance. + * + * @param code the error code + * @param message the error message + */ + public MongoExecutionTimeoutException(final int code, final String message) { + super(code, message); + + } + + /** + * Construct a new instance. + * + * @param code the error code + * @param message the error message + * @param response the response + * @since 4.1 + */ + public MongoExecutionTimeoutException(final int code, final String message, final BsonDocument response) { + super(code, message, response); + + } +} diff --git a/driver-core/src/main/com/mongodb/MongoGridFSException.java b/driver-core/src/main/com/mongodb/MongoGridFSException.java new file mode 100644 index 00000000000..78d2956fe41 --- /dev/null +++ b/driver-core/src/main/com/mongodb/MongoGridFSException.java @@ -0,0 +1,45 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb; + +/** + * An exception indicating that a failure occurred in GridFS. + * + * @since 3.1 + */ +public class MongoGridFSException extends MongoException { + private static final long serialVersionUID = -3894346172927543978L; + + /** + * Constructs a new instance. + * + * @param message the message + */ + public MongoGridFSException(final String message) { + super(message); + } + + /** + * Constructs a new instance. + * + * @param message the message + * @param t the throwable cause + */ + public MongoGridFSException(final String message, final Throwable t) { + super(message, t); + } +} diff --git a/driver-core/src/main/com/mongodb/MongoIncompatibleDriverException.java b/driver-core/src/main/com/mongodb/MongoIncompatibleDriverException.java new file mode 100644 index 00000000000..84dc959bfcc --- /dev/null +++ b/driver-core/src/main/com/mongodb/MongoIncompatibleDriverException.java @@ -0,0 +1,52 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb; + +import com.mongodb.connection.ClusterDescription; + +/** + * An exception indicating that this version of the driver is not compatible with at least one of the servers that it is currently + * connected to. + * + * @since 2.12.0 + * @serial exclude + */ +public class MongoIncompatibleDriverException extends MongoException { + private static final long serialVersionUID = -5213381354402601890L; + private final ClusterDescription clusterDescription; + + /** + * Construct a new instance. + * + * @param message the error message + * @param clusterDescription the cluster description + */ + public MongoIncompatibleDriverException(final String message, final ClusterDescription clusterDescription) { + super(message); + this.clusterDescription = clusterDescription; + } + + /** + * The cluster description which was determined to be incompatible. + * + * @return the cluster description + */ + public ClusterDescription getClusterDescription() { + return clusterDescription; + } + +} diff --git a/driver-core/src/main/com/mongodb/MongoInternalException.java b/driver-core/src/main/com/mongodb/MongoInternalException.java new file mode 100644 index 00000000000..a6c3b33dc33 --- /dev/null +++ b/driver-core/src/main/com/mongodb/MongoInternalException.java @@ -0,0 +1,42 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// MongoInternalException.java + +package com.mongodb; + +/** + * A Mongo exception internal to the driver, not carrying any error code. + */ +public class MongoInternalException extends MongoException { + private static final long serialVersionUID = -4415279469780082174L; + + /** + * @param msg the description of the problem + */ + public MongoInternalException(final String msg) { + super(msg); + } + + /** + * @param msg the description of the problem + * @param t the Throwable root cause + */ + public MongoInternalException(final String msg, final Throwable t) { + super(msg, t); + } +} + diff --git a/driver-core/src/main/com/mongodb/MongoInterruptedException.java b/driver-core/src/main/com/mongodb/MongoInterruptedException.java new file mode 100644 index 00000000000..e0adce7978c --- /dev/null +++ b/driver-core/src/main/com/mongodb/MongoInterruptedException.java @@ -0,0 +1,60 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb; + +import com.mongodb.lang.Nullable; + +import java.io.InputStream; +import java.io.InterruptedIOException; +import java.io.OutputStream; +import java.net.Socket; +import java.net.SocketAddress; +import java.net.SocketException; +import java.nio.channels.ClosedByInterruptException; +import java.nio.channels.InterruptibleChannel; + +/** + * A driver-specific non-checked counterpart to {@link InterruptedException}. + * Before this exception is thrown, the {@linkplain Thread#isInterrupted() interrupt status} of the thread will have been set + * unless the {@linkplain #getCause() cause} is {@link InterruptedIOException}, in which case the driver leaves the status as is. + *

+ * The Java SE API uses exceptions different from {@link InterruptedException} to communicate the same information:

+ *
    + *
  • {@link InterruptibleChannel} uses {@link ClosedByInterruptException}.
  • + *
  • {@link Socket#connect(SocketAddress)}, + * {@linkplain InputStream}/{@link OutputStream} obtained via {@link Socket#getInputStream()}/{@link Socket#getOutputStream()} + * use either {@link ClosedByInterruptException} or {@link SocketException}.
  • + *
  • There is also {@link InterruptedIOException}, which is documented to an extent as an IO-specific counterpart to + * {@link InterruptedException}.
  • + *
+ * The driver strives to wrap those in {@link MongoInterruptedException} where relevant. + * + * @see Thread#interrupt() + */ +public class MongoInterruptedException extends MongoException { + private static final long serialVersionUID = -4110417867718417860L; + + /** + * Construct a new instance. + * + * @param message the message + * @param e the cause + */ + public MongoInterruptedException(@Nullable final String message, @Nullable final Exception e) { + super(message, e); + } +} diff --git a/driver-core/src/main/com/mongodb/MongoNamespace.java b/driver-core/src/main/com/mongodb/MongoNamespace.java new file mode 100644 index 00000000000..2395eaab80f --- /dev/null +++ b/driver-core/src/main/com/mongodb/MongoNamespace.java @@ -0,0 +1,203 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb; + +import com.mongodb.annotations.Immutable; +import org.bson.codecs.pojo.annotations.BsonCreator; +import org.bson.codecs.pojo.annotations.BsonIgnore; +import org.bson.codecs.pojo.annotations.BsonProperty; + +import java.util.HashSet; +import java.util.Set; + +import static com.mongodb.assertions.Assertions.isTrueArgument; +import static com.mongodb.assertions.Assertions.notNull; +import static java.util.Arrays.asList; + +/** + * A MongoDB namespace, which includes a database name and collection name. + * + * @since 3.0 + */ +@Immutable +public final class MongoNamespace { + /** + * The collection name in which to execute a command. + * @deprecated there is no replacement for this constant, as it is only needed for the OP_QUERY wire protocol message, which has + * been replaced by OP_MSG + */ + @Deprecated + public static final String COMMAND_COLLECTION_NAME = "$cmd"; + + private static final Set PROHIBITED_CHARACTERS_IN_DATABASE_NAME = + new HashSet<>(asList('\0', '/', '\\', ' ', '"', '.')); + + private final String databaseName; + private final String collectionName; + @BsonIgnore + private final String fullName; // cache to avoid repeated string building + + /** + * Check the validity of the given database name. A valid database name is non-null, non-empty, and does not contain any of the + * following characters: {@code '\0', '/', '\\', ' ', '"', '.'}. The server may impose additional restrictions on database names. + * + * @param databaseName the database name + * @throws IllegalArgumentException if the database name is invalid + * @since 3.4 + * @mongodb.driver.manual reference/limits/#naming-restrictions Naming Restrictions + */ + public static void checkDatabaseNameValidity(final String databaseName) { + notNull("databaseName", databaseName); + isTrueArgument("databaseName is not empty", !databaseName.isEmpty()); + for (int i = 0; i < databaseName.length(); i++) { + if (PROHIBITED_CHARACTERS_IN_DATABASE_NAME.contains(databaseName.charAt(i))) { + throw new IllegalArgumentException("state should be: databaseName does not contain '" + databaseName.charAt(i) + "'"); + } + } + } + + /** + * Check the validity of the given collection name. A valid collection name is non-null and non-empty. The server may impose + * additional restrictions on collection names. + * + * @param collectionName the collection name + * @throws IllegalArgumentException if the collection name is invalid + * @since 3.4 + * @mongodb.driver.manual reference/limits/#naming-restrictions Naming Restrictions + */ + public static void checkCollectionNameValidity(final String collectionName) { + notNull("collectionName", collectionName); + isTrueArgument("collectionName is not empty", !collectionName.isEmpty()); + } + + /** + * Construct an instance for the given full name. The database name is the string preceding the first {@code "."} character. + * + * @param fullName the non-null full namespace + * @see #checkDatabaseNameValidity(String) + * @see #checkCollectionNameValidity(String) + */ + public MongoNamespace(final String fullName) { + notNull("fullName", fullName); + this.fullName = fullName; + this.databaseName = getDatatabaseNameFromFullName(fullName); + this.collectionName = getCollectionNameFullName(fullName); + checkDatabaseNameValidity(databaseName); + checkCollectionNameValidity(collectionName); + } + + /** + * Construct an instance from the given database name and collection name. + * + * @param databaseName the valid database name + * @param collectionName the valid collection name + * @see #checkDatabaseNameValidity(String) + * @see #checkCollectionNameValidity(String) + */ + @BsonCreator + public MongoNamespace(@BsonProperty("db") final String databaseName, + @BsonProperty("coll") final String collectionName) { + checkDatabaseNameValidity(databaseName); + checkCollectionNameValidity(collectionName); + this.databaseName = databaseName; + this.collectionName = collectionName; + this.fullName = databaseName + '.' + collectionName; + } + + /** + * Gets the database name. + * + * @return the database name + */ + @BsonProperty("db") + public String getDatabaseName() { + return databaseName; + } + + /** + * Gets the collection name. + * + * @return the collection name + */ + @BsonProperty("coll") + public String getCollectionName() { + return collectionName; + } + + /** + * Gets the full name, which is the database name and the collection name, separated by a period. + * + * @return the full name + */ + public String getFullName() { + return fullName; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + MongoNamespace that = (MongoNamespace) o; + + if (!collectionName.equals(that.collectionName)) { + return false; + } + if (!databaseName.equals(that.databaseName)) { + return false; + } + + return true; + } + + /** + * Returns the standard MongoDB representation of a namespace, which is {@code <database>.<collection>}. + * + * @return string representation of the namespace. + */ + @Override + public String toString() { + return fullName; + } + + @Override + public int hashCode() { + int result = databaseName.hashCode(); + result = 31 * result + (collectionName.hashCode()); + return result; + } + + private static String getCollectionNameFullName(final String namespace) { + int firstDot = namespace.indexOf('.'); + if (firstDot == -1) { + return namespace; + } + return namespace.substring(firstDot + 1); + } + + private static String getDatatabaseNameFromFullName(final String namespace) { + int firstDot = namespace.indexOf('.'); + if (firstDot == -1) { + return ""; + } + return namespace.substring(0, firstDot); + } +} diff --git a/driver-core/src/main/com/mongodb/MongoNodeIsRecoveringException.java b/driver-core/src/main/com/mongodb/MongoNodeIsRecoveringException.java new file mode 100644 index 00000000000..ebaa5e2276f --- /dev/null +++ b/driver-core/src/main/com/mongodb/MongoNodeIsRecoveringException.java @@ -0,0 +1,40 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb; + +import org.bson.BsonDocument; + +/** + * An exception indicating that the server is a member of a replica set but is in recovery mode, and therefore refused to execute + * the operation. This can happen when a server is starting up and trying to join the replica set. + * + * @since 3.0 + */ +public class MongoNodeIsRecoveringException extends MongoCommandException { + private static final long serialVersionUID = 6062524147327071635L; + + /** + * Construct an instance. + * + * @param response the full response from the server + * @param serverAddress the address of the server + * @since 3.8 + */ + public MongoNodeIsRecoveringException(final BsonDocument response, final ServerAddress serverAddress) { + super(response, serverAddress); + } +} diff --git a/driver-core/src/main/com/mongodb/MongoNotPrimaryException.java b/driver-core/src/main/com/mongodb/MongoNotPrimaryException.java new file mode 100644 index 00000000000..a12fd66b79e --- /dev/null +++ b/driver-core/src/main/com/mongodb/MongoNotPrimaryException.java @@ -0,0 +1,40 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb; + +import org.bson.BsonDocument; + +/** + * An exception indicating that the server is a member of a replica set but is not the primary, and therefore refused to execute either a + * write operation or a read operation that required a primary. This can happen during a replica set election. + * + * @since 3.0 + */ +public class MongoNotPrimaryException extends MongoCommandException { + private static final long serialVersionUID = 694876345217027108L; + + /** + * Construct an instance. + * + * @param response the full response from the server + * @param serverAddress the address of the server + * @since 3.8 + */ + public MongoNotPrimaryException(final BsonDocument response, final ServerAddress serverAddress) { + super(response, serverAddress); + } +} diff --git a/driver-core/src/main/com/mongodb/MongoOperationTimeoutException.java b/driver-core/src/main/com/mongodb/MongoOperationTimeoutException.java new file mode 100644 index 00000000000..50006339167 --- /dev/null +++ b/driver-core/src/main/com/mongodb/MongoOperationTimeoutException.java @@ -0,0 +1,63 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb; + +import com.mongodb.annotations.Alpha; +import com.mongodb.annotations.Reason; +import com.mongodb.lang.Nullable; + +import java.util.concurrent.TimeUnit; + +/** + * Exception thrown to indicate that a MongoDB operation has exceeded the specified timeout for + * the full execution of operation. + * + *

The {@code MongoOperationTimeoutException} might provide information about the underlying + * cause of the timeout, if available. For example, if retries are attempted due to transient failures, + * and a timeout occurs in any of the attempts, the exception from one of the retries may be appended + * as the cause to this {@code MongoOperationTimeoutException}. + * + *

The key difference between {@code MongoOperationTimeoutException} and {@code MongoExecutionTimeoutException} + * lies in the nature of these exceptions. {@code MongoExecutionTimeoutException} indicates a server-side timeout + * capped by a user-specified number. These server errors are transformed into the new {@code MongoOperationTimeoutException}. + * On the other hand, {@code MongoOperationExecutionException} denotes a timeout during the execution of the entire operation. + * + * @see MongoClientSettings.Builder#timeout(long, TimeUnit) + * @see MongoClientSettings#getTimeout(TimeUnit) + * @since 5.2 + */ +@Alpha(Reason.CLIENT) +public final class MongoOperationTimeoutException extends MongoTimeoutException { + private static final long serialVersionUID = 1L; + + /** + * Construct a new instance. + * + * @param message the message + */ + public MongoOperationTimeoutException(final String message) { + super(message); + } + + /** + * Construct a new instance + * @param message the message + * @param cause the cause + */ + public MongoOperationTimeoutException(final String message, @Nullable final Throwable cause) { + super(message, cause); + } +} diff --git a/driver-core/src/main/com/mongodb/MongoQueryException.java b/driver-core/src/main/com/mongodb/MongoQueryException.java new file mode 100644 index 00000000000..eb9909a4806 --- /dev/null +++ b/driver-core/src/main/com/mongodb/MongoQueryException.java @@ -0,0 +1,40 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb; + +import org.bson.BsonDocument; + +/** + * An exception indicating that a query operation failed on the server. + * + * @since 3.0 + * @serial exclude + */ +public class MongoQueryException extends MongoCommandException { + private static final long serialVersionUID = -5113350133297015801L; + + /** + * Construct an instance. + * + * @param response the server response document + * @param serverAddress the server address + * @since 4.8 + */ + public MongoQueryException(final BsonDocument response, final ServerAddress serverAddress) { + super(response, serverAddress); + } +} diff --git a/driver-core/src/main/com/mongodb/MongoSecurityException.java b/driver-core/src/main/com/mongodb/MongoSecurityException.java new file mode 100644 index 00000000000..ec090bc28e6 --- /dev/null +++ b/driver-core/src/main/com/mongodb/MongoSecurityException.java @@ -0,0 +1,61 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb; + +/** + * This exception is thrown when there is an error reported by the underlying client authentication mechanism. + * + * @since 3.0 + * @serial exclude + */ +public class MongoSecurityException extends MongoClientException { + private static final long serialVersionUID = -7044790409935567275L; + + private final MongoCredential credential; + + /** + * Construct an instance + * + * @param credential the credential + * @param message the message + * @param cause the cause + */ + public MongoSecurityException(final MongoCredential credential, final String message, final Throwable cause) { + super(message, cause); + this.credential = credential; + } + + /** + * Construct an instance + * + * @param credential the credential + * @param message the message + */ + public MongoSecurityException(final MongoCredential credential, final String message) { + super(message); + this.credential = credential; + } + + /** + * The credential being authenticated. + * + * @return the credential + */ + public MongoCredential getCredential() { + return credential; + } +} diff --git a/driver-core/src/main/com/mongodb/MongoServerException.java b/driver-core/src/main/com/mongodb/MongoServerException.java new file mode 100644 index 00000000000..a981dc1c923 --- /dev/null +++ b/driver-core/src/main/com/mongodb/MongoServerException.java @@ -0,0 +1,94 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb; + +import com.mongodb.lang.Nullable; + +/** + * An exception indicating that some error has been raised by a MongoDB server in response to an operation. + * + * @since 2.13 + * @serial exclude + */ +public abstract class MongoServerException extends MongoException { + private static final long serialVersionUID = -5213859742051776206L; + @Nullable + private final String errorCodeName; + private final ServerAddress serverAddress; + + /** + * Construct a new instance. + * + * @param message the message from the server + * @param serverAddress the address of the server + */ + public MongoServerException(final String message, final ServerAddress serverAddress) { + super(message); + this.serverAddress = serverAddress; + this.errorCodeName = null; + } + + /** + * Construct a new instance. + * + * @param code the error code from the server + * @param message the message from the server + * @param serverAddress the address of the server + */ + public MongoServerException(final int code, final String message, final ServerAddress serverAddress) { + super(code, message); + this.serverAddress = serverAddress; + this.errorCodeName = null; + } + + /** + * Construct a new instance. + * + * @param code the error code from the server + * @param errorCodeName the error code name from the server + * @param message the message from the server + * @param serverAddress the address of the server + * @since 4.6 + */ + public MongoServerException(final int code, @Nullable final String errorCodeName, final String message, + final ServerAddress serverAddress) { + super(code, message); + this.errorCodeName = errorCodeName; + this.serverAddress = serverAddress; + } + + /** + * Gets the address of the server. + * + * @return the address + */ + public ServerAddress getServerAddress() { + return serverAddress; + } + + /** + * Get the error code name, which may be null + * + * @return the error code nam + * @mongodb.server.release 3.4 + * @since 4.6 + */ + @Nullable + public String getErrorCodeName() { + return errorCodeName; + } +} diff --git a/driver-core/src/main/com/mongodb/MongoServerUnavailableException.java b/driver-core/src/main/com/mongodb/MongoServerUnavailableException.java new file mode 100644 index 00000000000..cadf046ecae --- /dev/null +++ b/driver-core/src/main/com/mongodb/MongoServerUnavailableException.java @@ -0,0 +1,40 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb; + +/** + * An exception indicating that the server on which an operation is selected to run is no longer available to execute operations. + * + *

+ * An example is when a replica set is reconfigured to hide a member on which there is an open cursor, and the application attempts to + * get more cursor results. + *

+ * + * @since 4.4 + */ +public final class MongoServerUnavailableException extends MongoClientException { + private static final long serialVersionUID = 5465094535584085700L; + + /** + * Construct a new instance. + * + * @param message the message + */ + public MongoServerUnavailableException(final String message) { + super(message); + } +} diff --git a/driver-core/src/main/com/mongodb/MongoSocketClosedException.java b/driver-core/src/main/com/mongodb/MongoSocketClosedException.java new file mode 100644 index 00000000000..d6f4337ca70 --- /dev/null +++ b/driver-core/src/main/com/mongodb/MongoSocketClosedException.java @@ -0,0 +1,37 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb; + +/** + * This exception is thrown when trying to read or write from a closed socket. + * + * @since 3.0 + */ +public class MongoSocketClosedException extends MongoSocketException { + + private static final long serialVersionUID = -6855036625330867705L; + + /** + * Construct an instance. + * + * @param message the message + * @param address the server address + */ + public MongoSocketClosedException(final String message, final ServerAddress address) { + super(message, address); + } +} diff --git a/driver-core/src/main/com/mongodb/MongoSocketException.java b/driver-core/src/main/com/mongodb/MongoSocketException.java new file mode 100644 index 00000000000..820c2cb769f --- /dev/null +++ b/driver-core/src/main/com/mongodb/MongoSocketException.java @@ -0,0 +1,60 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb; + +/** + * Subclass of {@link MongoException} representing a network-related exception + * + * @since 2.12 + * @serial exclude + */ +public class MongoSocketException extends MongoException { + + private static final long serialVersionUID = -4415279469780082174L; + + private final ServerAddress serverAddress; + + /** + * @param serverAddress the address + * @param msg the message + * @param e the cause + */ + public MongoSocketException(final String msg, final ServerAddress serverAddress, final Throwable e) { + super(-2, msg, e); + this.serverAddress = serverAddress; + } + + /** + * Construct a new instance. + * + * @param message the message + * @param serverAddress the address + */ + public MongoSocketException(final String message, final ServerAddress serverAddress) { + super(-2, message); + this.serverAddress = serverAddress; + } + + /** + * Gets the server address for this exception. + * + * @return the address + */ + public ServerAddress getServerAddress() { + return serverAddress; + } +} diff --git a/driver-core/src/main/com/mongodb/MongoSocketOpenException.java b/driver-core/src/main/com/mongodb/MongoSocketOpenException.java new file mode 100644 index 00000000000..822c592cb0a --- /dev/null +++ b/driver-core/src/main/com/mongodb/MongoSocketOpenException.java @@ -0,0 +1,48 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb; + +/** + * This exception is thrown when there is an exception opening a Socket. + * + * @since 3.0 + */ +public class MongoSocketOpenException extends MongoSocketException { + private static final long serialVersionUID = 4176754100200191238L; + + /** + * Construct an instance. + * + * @param message the message + * @param address the server address + * @param cause the cause + */ + public MongoSocketOpenException(final String message, final ServerAddress address, final Throwable cause) { + super(message, address, cause); + } + + /** + * Construct an instance. + * + * @param message the message + * @param address the server address + * @since 3.10 + */ + public MongoSocketOpenException(final String message, final ServerAddress address) { + super(message, address); + } +} diff --git a/driver-core/src/main/com/mongodb/MongoSocketReadException.java b/driver-core/src/main/com/mongodb/MongoSocketReadException.java new file mode 100644 index 00000000000..942429bf049 --- /dev/null +++ b/driver-core/src/main/com/mongodb/MongoSocketReadException.java @@ -0,0 +1,47 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb; + +/** + * This exception is thrown when there is an exception reading a response from a Socket. + * + * @since 3.0 + */ +public class MongoSocketReadException extends MongoSocketException { + private static final long serialVersionUID = -1142547119966956531L; + + /** + * Construct a new instance. + * + * @param message the message + * @param address the address + */ + public MongoSocketReadException(final String message, final ServerAddress address) { + super(message, address); + } + + /** + * Construct a new instance. + * + * @param message the message + * @param address the address + * @param cause the cause + */ + public MongoSocketReadException(final String message, final ServerAddress address, final Throwable cause) { + super(message, address, cause); + } +} diff --git a/driver-core/src/main/com/mongodb/MongoSocketReadTimeoutException.java b/driver-core/src/main/com/mongodb/MongoSocketReadTimeoutException.java new file mode 100644 index 00000000000..4bb658a9061 --- /dev/null +++ b/driver-core/src/main/com/mongodb/MongoSocketReadTimeoutException.java @@ -0,0 +1,39 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb; + +/** + * This exception is thrown when there is a timeout reading a response from the socket. + * + * @since 3.0 + */ +public class MongoSocketReadTimeoutException extends MongoSocketException { + + private static final long serialVersionUID = -7237059971254608960L; + + /** + * Construct a new instance + * + * @param message the message + * @param address the address + * @param cause the cause + */ + public MongoSocketReadTimeoutException(final String message, final ServerAddress address, final Throwable cause) { + super(message, address, cause); + } + +} diff --git a/driver-core/src/main/com/mongodb/MongoSocketWriteException.java b/driver-core/src/main/com/mongodb/MongoSocketWriteException.java new file mode 100644 index 00000000000..aeab09b463d --- /dev/null +++ b/driver-core/src/main/com/mongodb/MongoSocketWriteException.java @@ -0,0 +1,37 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb; + +/** + * This exception is thrown when there is an exception writing a response to a Socket. + * + * @since 3.0 + */ +public class MongoSocketWriteException extends MongoSocketException { + private static final long serialVersionUID = 5088061954415484493L; + + /** + * Construct a new instance. + * + * @param message the message + * @param address the address + * @param cause the cause + */ + public MongoSocketWriteException(final String message, final ServerAddress address, final Throwable cause) { + super(message, address, cause); + } +} diff --git a/driver-core/src/main/com/mongodb/MongoSocketWriteTimeoutException.java b/driver-core/src/main/com/mongodb/MongoSocketWriteTimeoutException.java new file mode 100644 index 00000000000..bd95430e595 --- /dev/null +++ b/driver-core/src/main/com/mongodb/MongoSocketWriteTimeoutException.java @@ -0,0 +1,43 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb; + +import com.mongodb.annotations.Alpha; +import com.mongodb.annotations.Reason; + +/** + * This exception is thrown when there is a timeout writing a response from the socket. + * + * @since 5.2 + */ +@Alpha(Reason.CLIENT) +public class MongoSocketWriteTimeoutException extends MongoSocketException { + + private static final long serialVersionUID = 1L; + + /** + * Construct a new instance + * + * @param message the message + * @param address the address + * @param cause the cause + */ + public MongoSocketWriteTimeoutException(final String message, final ServerAddress address, final Throwable cause) { + super(message, address, cause); + } + +} diff --git a/driver-core/src/main/com/mongodb/MongoStalePrimaryException.java b/driver-core/src/main/com/mongodb/MongoStalePrimaryException.java new file mode 100644 index 00000000000..7654253a8c6 --- /dev/null +++ b/driver-core/src/main/com/mongodb/MongoStalePrimaryException.java @@ -0,0 +1,36 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb; + +/** + * Exception thrown when a replica set primary is identified as a stale primary during Server Discovery and Monitoring (SDAM). + * This occurs when a new primary is discovered, causing the previously known primary to be marked stale, typically during network + * partitions or elections. + * + * @since 5.6 + */ +public class MongoStalePrimaryException extends MongoException { + + /** + * Construct an instance. + * + * @param message the exception message. + */ + public MongoStalePrimaryException(final String message) { + super(message); + } +} diff --git a/driver-core/src/main/com/mongodb/MongoTimeoutException.java b/driver-core/src/main/com/mongodb/MongoTimeoutException.java new file mode 100644 index 00000000000..ded287ea516 --- /dev/null +++ b/driver-core/src/main/com/mongodb/MongoTimeoutException.java @@ -0,0 +1,49 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb; + +import com.mongodb.annotations.Alpha; +import com.mongodb.annotations.Reason; +import com.mongodb.lang.Nullable; + +/** + * An exception indicating that the driver has timed out waiting for either a server or a connection to become available. + */ +public class MongoTimeoutException extends MongoClientException { + + private static final long serialVersionUID = -3016560214331826577L; + + /** + * Construct a new instance. + * + * @param message the message + */ + public MongoTimeoutException(final String message) { + super(message); + } + + /** + * Construct a new instance + * @param message the message + * @param cause the cause + * @since 5.2 + */ + @Alpha(Reason.CLIENT) + public MongoTimeoutException(final String message, @Nullable final Throwable cause) { + super(message, cause); + } +} diff --git a/driver-core/src/main/com/mongodb/MongoUpdatedEncryptedFieldsException.java b/driver-core/src/main/com/mongodb/MongoUpdatedEncryptedFieldsException.java new file mode 100644 index 00000000000..6c4b10ac0bc --- /dev/null +++ b/driver-core/src/main/com/mongodb/MongoUpdatedEncryptedFieldsException.java @@ -0,0 +1,68 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb; + +import org.bson.BsonDocument; + +import static com.mongodb.assertions.Assertions.assertNotNull; + +/** + * An exception thrown by methods that may automatically create data encryption keys + * where needed based on the {@code encryptedFields} configuration. + * + * @since 4.9 + */ +public final class MongoUpdatedEncryptedFieldsException extends MongoClientException { + private static final long serialVersionUID = 1; + + /** + * The encrypted fields + */ + private final BsonDocument encryptedFields; + + /** + * Not part of the public API. + * + * @param encryptedFields The (partially) updated {@code encryptedFields} document, + * which allows users to infer which data keys are known to be created before the exception happened + * (see {@link #getEncryptedFields()} for more details). + * Reporting this back to a user may be helpful because creation of a data key includes persisting it in the key vault. + * @param msg The message. + * @param cause The cause. + */ + public MongoUpdatedEncryptedFieldsException(final BsonDocument encryptedFields, final String msg, final Throwable cause) { + super(msg, assertNotNull(cause)); + this.encryptedFields = assertNotNull(encryptedFields); + } + + /** + * The {@code encryptedFields} document that allows inferring which data keys are known to be created + * before {@code this} exception happened by comparing this document with the original {@code encryptedFields} configuration. + * Creation of a data key includes persisting it in the key vault. + *

+ * Note that the returned {@code encryptedFields} document is not guaranteed to contain information about all the data keys that + * may be created, only about those that the driver is certain about. For example, if persisting a data key times out, + * the driver does not know whether it can be considered created or not, and does not include the information about the key in + * the {@code encryptedFields} document. You can analyze whether the {@linkplain #getCause() cause} is a definite or indefinite + * error, and rely on the returned {@code encryptedFields} to be containing information on all created keys + * only if the error is definite.

+ * + * @return The updated {@code encryptedFields} document. + */ + public BsonDocument getEncryptedFields() { + return encryptedFields; + } +} diff --git a/driver-core/src/main/com/mongodb/MongoWriteConcernException.java b/driver-core/src/main/com/mongodb/MongoWriteConcernException.java new file mode 100644 index 00000000000..77aca03e02a --- /dev/null +++ b/driver-core/src/main/com/mongodb/MongoWriteConcernException.java @@ -0,0 +1,105 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb; + +import com.mongodb.bulk.WriteConcernError; +import com.mongodb.lang.Nullable; + +import java.util.Collection; +import java.util.Collections; + +import static com.mongodb.assertions.Assertions.notNull; + +/** + * An exception indicating a failure to apply the write concern to the requested write operation + * + * @see com.mongodb.WriteConcern + * + * @since 3.0 + * @serial exclude + */ +public class MongoWriteConcernException extends MongoServerException { + private static final long serialVersionUID = 4577579466973523211L; + + private final WriteConcernError writeConcernError; + private final WriteConcernResult writeConcernResult; + + /** + * Construct an instance. + * + * @param writeConcernError the non-null write concern error + * @param serverAddress the non-null server address + * @deprecated Prefer {@link MongoWriteConcernException(WriteConcernError, WriteConcernResult, ServerAddress, Collection)} + */ + @Deprecated + public MongoWriteConcernException(final WriteConcernError writeConcernError, final ServerAddress serverAddress) { + this(writeConcernError, null, serverAddress, Collections.emptySet()); + } + + /** + * Construct an instance. + * + * @param writeConcernError the non-null write concern error + * @param writeConcernResult the write result + * @param serverAddress the non-null server address + * @since 3.2 + * @deprecated Prefer {@link MongoWriteConcernException(WriteConcernError, WriteConcernResult, ServerAddress, Collection)} + */ + @Deprecated + public MongoWriteConcernException(final WriteConcernError writeConcernError, @Nullable final WriteConcernResult writeConcernResult, + final ServerAddress serverAddress) { + this(writeConcernError, writeConcernResult, serverAddress, Collections.emptySet()); + } + + /** + * Construct an instance. + * + * @param writeConcernError the non-null write concern error + * @param writeConcernResult the write result + * @param serverAddress the non-null server address + * @param errorLabels the server errorLabels + * @since 5.0 + */ + public MongoWriteConcernException(final WriteConcernError writeConcernError, @Nullable final WriteConcernResult writeConcernResult, + final ServerAddress serverAddress, final Collection errorLabels) { + super(writeConcernError.getCode(), writeConcernError.getMessage(), serverAddress); + this.writeConcernResult = writeConcernResult; + this.writeConcernError = notNull("writeConcernError", writeConcernError); + addLabels(errorLabels); + } + + + /** + * Gets the write concern error. + * + * @return the write concern error, which may not be null + */ + public WriteConcernError getWriteConcernError() { + return writeConcernError; + } + + /** + * Gets the write result. + * + * @return the write result + * + * @since 3.2 + */ + public WriteConcernResult getWriteResult() { + return writeConcernResult; + } +} diff --git a/driver-core/src/main/com/mongodb/MongoWriteException.java b/driver-core/src/main/com/mongodb/MongoWriteException.java new file mode 100644 index 00000000000..f54ca334640 --- /dev/null +++ b/driver-core/src/main/com/mongodb/MongoWriteException.java @@ -0,0 +1,66 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb; + +import java.util.Collection; +import java.util.Collections; + +/** + * An exception indicating the failure of a write operation. + * + * @since 3.0 + * @serial exclude + */ +public class MongoWriteException extends MongoServerException { + + private static final long serialVersionUID = -1906795074458258147L; + + private final WriteError error; + + /** + * Construct an instance + * @param error the error + * @param serverAddress the server address + * @deprecated Prefer {@link MongoWriteException(WriteError, ServerAddress, Collection)} + */ + @Deprecated + public MongoWriteException(final WriteError error, final ServerAddress serverAddress) { + this(error, serverAddress, Collections.emptySet()); + } + + /** + * Construct an instance + * @param error the error + * @param serverAddress the server address + * @param errorLabels the server errorLabels + * @since 5.0 + */ + public MongoWriteException(final WriteError error, final ServerAddress serverAddress, final Collection errorLabels) { + super(error.getCode(), "Write operation error on MongoDB server " + serverAddress + ". Write error: " + error + ".", serverAddress); + this.error = error; + addLabels(errorLabels); + } + + /** + * Gets the error. + * + * @return the error + */ + public WriteError getError() { + return error; + } +} diff --git a/driver-core/src/main/com/mongodb/ReadConcern.java b/driver-core/src/main/com/mongodb/ReadConcern.java new file mode 100644 index 00000000000..395e9255a9d --- /dev/null +++ b/driver-core/src/main/com/mongodb/ReadConcern.java @@ -0,0 +1,148 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb; + +import com.mongodb.lang.Nullable; +import org.bson.BsonDocument; +import org.bson.BsonString; + +import static com.mongodb.assertions.Assertions.notNull; + +/** + * A read concern allows clients to choose a level of isolation for their reads. + * + * @mongodb.server.release 3.2 + * @mongodb.driver.manual reference/readConcern/ Read Concern + * @since 3.2 + */ +public final class ReadConcern { + private final ReadConcernLevel level; + + /** + * Construct a new read concern + * + * @param level the read concern level + */ + public ReadConcern(final ReadConcernLevel level) { + this.level = notNull("level", level); + } + + /** + * Use the servers default read concern. + */ + public static final ReadConcern DEFAULT = new ReadConcern(); + + /** + * The local read concern. + */ + public static final ReadConcern LOCAL = new ReadConcern(ReadConcernLevel.LOCAL); + + /** + * The majority read concern. + */ + public static final ReadConcern MAJORITY = new ReadConcern(ReadConcernLevel.MAJORITY); + + /** + * The linearizable read concern. + * + *

+ * This read concern is only compatible with {@link ReadPreference#primary()}. + *

+ * + * @since 3.4 + * @mongodb.server.release 3.4 + */ + public static final ReadConcern LINEARIZABLE = new ReadConcern(ReadConcernLevel.LINEARIZABLE); + + /** + * The snapshot read concern. + * + * @since 3.8 + * @mongodb.server.release 4.0 + */ + public static final ReadConcern SNAPSHOT = new ReadConcern(ReadConcernLevel.SNAPSHOT); + + /** + * The available read concern. + * + * @since 3.9 + * @mongodb.server.release 3.6 + */ + public static final ReadConcern AVAILABLE = new ReadConcern(ReadConcernLevel.AVAILABLE); + + /** + * Gets the read concern level. + * + * @return the read concern level, which may be null (which indicates to use the server's default level) + * @since 3.6 + */ + @Nullable + public ReadConcernLevel getLevel() { + return level; + } + + /** + * @return true if this is the server default read concern + */ + public boolean isServerDefault() { + return level == null; + } + + /** + * Gets this read concern as a document. + * + * @return The read concern as a BsonDocument + */ + public BsonDocument asDocument() { + BsonDocument readConcern = new BsonDocument(); + if (level != null) { + readConcern.put("level", new BsonString(level.getValue())); + } + return readConcern; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + ReadConcern that = (ReadConcern) o; + + return level == that.level; + } + + @Override + public int hashCode() { + return level != null ? level.hashCode() : 0; + } + + + @Override + public String toString() { + return "ReadConcern{" + + "level=" + level + + '}'; + } + + private ReadConcern() { + this.level = null; + } +} diff --git a/driver-core/src/main/com/mongodb/ReadConcernLevel.java b/driver-core/src/main/com/mongodb/ReadConcernLevel.java new file mode 100644 index 00000000000..c959804a8c5 --- /dev/null +++ b/driver-core/src/main/com/mongodb/ReadConcernLevel.java @@ -0,0 +1,97 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb; + +import static com.mongodb.assertions.Assertions.notNull; +import static java.lang.String.format; + +/** + * A read concern level allows clients to choose a level of isolation for their reads. + * + * @mongodb.server.release 3.2 + * @mongodb.driver.manual reference/read-concern/#read-concern-levels Read Concern Levels + * @since 3.2 + */ +public enum ReadConcernLevel { + + /** + * The local read concern level. + */ + LOCAL("local"), + + /** + * The majority read concern level. + */ + MAJORITY("majority"), + + /** + * The linearizable read concern level. + * + *

+ * This read concern level is only compatible with {@link ReadPreference#primary()}. + *

+ * + * @since 3.4 + * @mongodb.server.release 3.4 + */ + LINEARIZABLE("linearizable"), + + /** + * The snapshot read concern level. + * + * @since 3.8 + * @mongodb.server.release 4.0 + */ + SNAPSHOT("snapshot"), + + /** + * The available read concern level. + * + * @since 3.9 + * @mongodb.server.release 3.6 + */ + AVAILABLE("available"); + + private final String value; + + ReadConcernLevel(final String readConcernLevel) { + this.value = readConcernLevel; + } + + /** + * @return the String representation of the read concern level that the MongoDB server understands + */ + public String getValue() { + return value; + } + + /** + * Returns the ReadConcern from the string read concern level. + * + * @param readConcernLevel the read concern level string. + * @return the read concern + */ + public static ReadConcernLevel fromString(final String readConcernLevel) { + notNull("readConcernLevel", readConcernLevel); + for (ReadConcernLevel level : ReadConcernLevel.values()) { + if (readConcernLevel.equalsIgnoreCase(level.value)) { + return level; + } + } + throw new IllegalArgumentException(format("'%s' is not a valid readConcernLevel", readConcernLevel)); + } +} diff --git a/driver-core/src/main/com/mongodb/ReadPreference.java b/driver-core/src/main/com/mongodb/ReadPreference.java new file mode 100644 index 00000000000..036a4de5df5 --- /dev/null +++ b/driver-core/src/main/com/mongodb/ReadPreference.java @@ -0,0 +1,746 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb; + +import com.mongodb.TaggableReadPreference.NearestReadPreference; +import com.mongodb.TaggableReadPreference.PrimaryPreferredReadPreference; +import com.mongodb.TaggableReadPreference.SecondaryPreferredReadPreference; +import com.mongodb.TaggableReadPreference.SecondaryReadPreference; +import com.mongodb.annotations.Immutable; +import com.mongodb.connection.ClusterDescription; +import com.mongodb.connection.ServerDescription; +import com.mongodb.lang.Nullable; +import org.bson.BsonDocument; +import org.bson.BsonString; + +import java.util.Collections; +import java.util.List; +import java.util.concurrent.TimeUnit; + +import static com.mongodb.assertions.Assertions.notNull; +import static com.mongodb.internal.connection.ClusterDescriptionHelper.getAny; +import static com.mongodb.internal.connection.ClusterDescriptionHelper.getPrimaries; +import static java.util.Collections.singletonList; +import static java.util.concurrent.TimeUnit.MILLISECONDS; + +/** + * A class that represents preferred replica set members to which a query or command can be sent. + * + * @mongodb.driver.manual core/read-preference Read Preference + */ +@Immutable +public abstract class ReadPreference { + + ReadPreference() { + } + + /** + * Create a new ReadPreference instance with a new tag set. + *

+ * Note: this method is not supported for a primary read preference. + *

+ * + * @param tagSet the new tag set + * @return a new ReadPreference instance with a new tag set + * @since 4.1 + */ + public abstract ReadPreference withTagSet(TagSet tagSet); + + /** + * Create a new ReadPreference instance with a new tag set list. + *

+ * Note: this method is not supported for a primary read preference. + *

+ * + * @param tagSet the new tag set list + * @return a new ReadPreference instance with a new tag set list + * @since 4.1 + */ + public abstract ReadPreference withTagSetList(List tagSet); + + /** + * Create a new ReadPreference instance with the maximum acceptable staleness of a secondary in order to be considered for + * read operations. + *

+ * Note: this method is not supported for a primary read preference. + *

+ * + * @param maxStalenessMS the max allowable staleness of secondaries. The minimum value is either 90 seconds, or the heartbeat frequency + * plus 10 seconds, whichever is greatest. + * @param timeUnit the time unit of maxStaleness + * @return a new ReadPreference instance with a new maximum allowable staleness + * @since 4.1 + * @mongodb.server.release 3.4 + */ + public abstract ReadPreference withMaxStalenessMS(Long maxStalenessMS, TimeUnit timeUnit); + + /** + * Create a new ReadPreference instance with hedge options. + *

+ * Note: this method is not supported for a primary read preference. + *

+ * + * @param hedgeOptions the hedge options + * @return a new ReadPreference instance with hedge options + * @since 4.1 + * @mongodb.server.release 4.4 + * @deprecated As of MongoDB 8.1, the server ignores the option and periodically logs a warning + */ + @Deprecated + public abstract ReadPreference withHedgeOptions(ReadPreferenceHedgeOptions hedgeOptions); + + /** + * True if this read preference allows reading from a secondary member of a replica set. + * + * @return if reading from a secondary is ok + * @since 4.4 + */ + public abstract boolean isSecondaryOk(); + + /** + * Gets the name of this read preference. + * + * @return the name + */ + public abstract String getName(); + + /** + * Gets a document representing this read preference in the wire protocol. + * + * @return the document + */ + public abstract BsonDocument toDocument(); + + /** + * Chooses the servers from the given cluster than match this read preference. + * + * @param clusterDescription the cluster description + * @return a list of matching server descriptions, which may be empty but may not be null + */ + public final List choose(final ClusterDescription clusterDescription) { + switch (clusterDescription.getType()) { + case REPLICA_SET: + return chooseForReplicaSet(clusterDescription); + case SHARDED: + case STANDALONE: + return chooseForNonReplicaSet(clusterDescription); + case LOAD_BALANCED: + return clusterDescription.getServerDescriptions(); + case UNKNOWN: + return Collections.emptyList(); + default: + throw new UnsupportedOperationException("Unsupported cluster type: " + clusterDescription.getType()); + } + } + + /** + * Choose for non-replica sets. + * + * @param clusterDescription the cluster description + * @return the list of matching server descriptions + */ + protected abstract List chooseForNonReplicaSet(ClusterDescription clusterDescription); + + /** + * Choose for replica sets. + * + * @param clusterDescription the cluster description + * @return the list of matching server descriptions + */ + protected abstract List chooseForReplicaSet(ClusterDescription clusterDescription); + + /** + * Gets a read preference that forces read to the primary. + * + * @return ReadPreference which reads from primary only + */ + public static ReadPreference primary() { + return PRIMARY; + } + + /** + * Gets a read preference that forces reads to the primary if available, otherwise to a secondary. + * + * @return ReadPreference which reads primary if available. + */ + public static ReadPreference primaryPreferred() { + return PRIMARY_PREFERRED; + } + + /** + * Gets a read preference that forces reads to a secondary. + * + * @return ReadPreference which reads secondary. + */ + public static ReadPreference secondary() { + return SECONDARY; + } + + /** + * Gets a read preference that forces reads to a secondary if one is available, otherwise to the primary. + * + * @return ReadPreference which reads secondary if available, otherwise from primary. + */ + public static ReadPreference secondaryPreferred() { + return SECONDARY_PREFERRED; + } + + /** + * Gets a read preference that forces reads to a primary or a secondary. + * + * @return ReadPreference which reads nearest + */ + public static ReadPreference nearest() { + return NEAREST; + } + + /** + * Gets a read preference that forces reads to the primary if available, otherwise to a secondary. + * + * @param maxStaleness the max allowable staleness of secondaries. The minimum value is either 90 seconds, or the heartbeat frequency + * plus 10 seconds, whichever is greatest. + * @param timeUnit the time unit of maxStaleness + * @return ReadPreference which reads primary if available. + * @since 3.4 + * @see TaggableReadPreference#getMaxStaleness(TimeUnit) + */ + public static ReadPreference primaryPreferred(final long maxStaleness, final TimeUnit timeUnit) { + return new PrimaryPreferredReadPreference(Collections.emptyList(), maxStaleness, timeUnit); + } + + /** + * Gets a read preference that forces reads to a secondary that is less stale than the given maximum. + * + *

+ * The driver estimates the staleness of each secondary, based on lastWriteDate values provided in server hello responses, + * and selects only those secondaries whose staleness is less than or equal to maxStaleness. + *

+ * + * @param maxStaleness the max allowable staleness of secondaries. The minimum value is either 90 seconds, or the heartbeat frequency + * plus 10 seconds, whichever is greatest. + * @param timeUnit the time unit of maxStaleness + * @return ReadPreference which reads secondary. + * @since 3.4 + * @see TaggableReadPreference#getMaxStaleness(TimeUnit) + */ + public static ReadPreference secondary(final long maxStaleness, final TimeUnit timeUnit) { + return new SecondaryReadPreference(Collections.emptyList(), maxStaleness, timeUnit); + } + + /** + * Gets a read preference that forces reads to a secondary that is less stale than the given maximumm if one is available, + * otherwise to the primary. + * + *

+ * The driver estimates the staleness of each secondary, based on lastWriteDate values provided in server hello responses, + * and selects only those secondaries whose staleness is less than or equal to maxStaleness. + *

* + * @param maxStaleness the max allowable staleness of secondaries. The minimum value is either 90 seconds, or the heartbeat frequency + * plus 10 seconds, whichever is greatest. + * @param timeUnit the time unit of maxStaleness + * @return ReadPreference which reads secondary if available, otherwise from primary. + * @since 3.4 + * @see TaggableReadPreference#getMaxStaleness(TimeUnit) + */ + public static ReadPreference secondaryPreferred(final long maxStaleness, final TimeUnit timeUnit) { + return new SecondaryPreferredReadPreference(Collections.emptyList(), maxStaleness, timeUnit); + } + + /** + * Gets a read preference that forces reads to a primary or a secondary that is less stale than the given maximum. + * + *

+ * The driver estimates the staleness of each secondary, based on lastWriteDate values provided in server hello responses, + * and selects only those secondaries whose staleness is less than or equal to maxStaleness. + *

+ * + * @param maxStaleness the max allowable staleness of secondaries. The minimum value is either 90 seconds, or the heartbeat frequency + * plus 10 seconds, whichever is greatest. + * @param timeUnit the time unit of maxStaleness + * @return ReadPreference which reads nearest + * @since 3.4 + * @see TaggableReadPreference#getMaxStaleness(TimeUnit) + */ + public static ReadPreference nearest(final long maxStaleness, final TimeUnit timeUnit) { + return new NearestReadPreference(Collections.emptyList(), maxStaleness, timeUnit); + } + + /** + * Gets a read preference that forces reads to the primary if available, otherwise to a secondary with the given set of tags. + * + * @param tagSet the set of tags to limit the list of secondaries to. + * @return ReadPreference which reads primary if available, otherwise a secondary respective of tags.\ + * @since 2.13 + */ + public static TaggableReadPreference primaryPreferred(final TagSet tagSet) { + return new PrimaryPreferredReadPreference(singletonList(tagSet), null, MILLISECONDS); + } + + /** + * Gets a read preference that forces reads to a secondary with the given set of tags. + * + * @param tagSet the set of tags to limit the list of secondaries to + * @return ReadPreference which reads secondary respective of tags. + * @since 2.13 + */ + public static TaggableReadPreference secondary(final TagSet tagSet) { + return new SecondaryReadPreference(singletonList(tagSet), null, MILLISECONDS); + } + + /** + * Gets a read preference that forces reads to a secondary with the given set of tags, or the primary is none are available. + * + * @param tagSet the set of tags to limit the list of secondaries to + * @return ReadPreference which reads secondary if available respective of tags, otherwise from primary irrespective of tags. + * @since 2.13 + */ + public static TaggableReadPreference secondaryPreferred(final TagSet tagSet) { + return new SecondaryPreferredReadPreference(singletonList(tagSet), null, MILLISECONDS); + } + + /** + * Gets a read preference that forces reads to the primary or a secondary with the given set of tags. + * + * @param tagSet the set of tags to limit the list of secondaries to + * @return ReadPreference which reads nearest node respective of tags. + * @since 2.13 + */ + public static TaggableReadPreference nearest(final TagSet tagSet) { + return new NearestReadPreference(singletonList(tagSet), null, MILLISECONDS); + } + + /** + * Gets a read preference that forces reads to the primary if available, otherwise to a secondary with the given set of tags + * that is less stale than the given maximum. + * + *

+ * The driver estimates the staleness of each secondary, based on lastWriteDate values provided in server hello responses, + * and selects only those secondaries whose staleness is less than or equal to maxStaleness. + *

+ * + * @param tagSet the set of tags to limit the list of secondaries to. + * @param maxStaleness the max allowable staleness of secondaries. The minimum value is either 90 seconds, or the heartbeat frequency + * plus 10 seconds, whichever is greatest. + * @param timeUnit the time unit of maxStaleness + * @return ReadPreference which reads primary if available, otherwise a secondary respective of tags.\ + * @since 3.4 + * @see TaggableReadPreference#getMaxStaleness(TimeUnit) + */ + public static TaggableReadPreference primaryPreferred(final TagSet tagSet, + final long maxStaleness, final TimeUnit timeUnit) { + return new PrimaryPreferredReadPreference(singletonList(tagSet), maxStaleness, timeUnit); + } + + /** + * Gets a read preference that forces reads to a secondary with the given set of tags that is less stale than the given maximum. + * + *

+ * The driver estimates the staleness of each secondary, based on lastWriteDate values provided in server hello responses, + * and selects only those secondaries whose staleness is less than or equal to maxStaleness. + *

+ * + * @param tagSet the set of tags to limit the list of secondaries to + * @param maxStaleness the max allowable staleness of secondaries. The minimum value is either 90 seconds, or the heartbeat frequency + * plus 10 seconds, whichever is greatest. + * @param timeUnit the time unit of maxStaleness + * @return ReadPreference which reads secondary respective of tags. + * @since 3.4 + * @see TaggableReadPreference#getMaxStaleness(TimeUnit) + */ + public static TaggableReadPreference secondary(final TagSet tagSet, + final long maxStaleness, final TimeUnit timeUnit) { + return new SecondaryReadPreference(singletonList(tagSet), maxStaleness, timeUnit); + } + + /** + * Gets a read preference that forces reads to a secondary with the given set of tags that is less stale than the given maximum, + * or the primary is none are available. + * + *

+ * The driver estimates the staleness of each secondary, based on lastWriteDate values provided in server hello responses, + * and selects only those secondaries whose staleness is less than or equal to maxStaleness. + *

* + * @param tagSet the set of tags to limit the list of secondaries to + * @param maxStaleness the max allowable staleness of secondaries. The minimum value is either 90 seconds, or the heartbeat frequency + * plus 10 seconds, whichever is greatest. + * @param timeUnit the time unit of maxStaleness + * @return ReadPreference which reads secondary if available respective of tags, otherwise from primary irrespective of tags. + * @since 3.4 + * @see TaggableReadPreference#getMaxStaleness(TimeUnit) + */ + public static TaggableReadPreference secondaryPreferred(final TagSet tagSet, + final long maxStaleness, final TimeUnit timeUnit) { + return new SecondaryPreferredReadPreference(singletonList(tagSet), maxStaleness, timeUnit); + } + + /** + * Gets a read preference that forces reads to the primary or a secondary with the given set of tags that is less stale than the + * given maximum. + * + *

+ * The driver estimates the staleness of each secondary, based on lastWriteDate values provided in server hello responses, + * and selects only those secondaries whose staleness is less than or equal to maxStaleness. + *

+ * + * @param tagSet the set of tags to limit the list of secondaries to + * @param maxStaleness the max allowable staleness of secondaries. The minimum value is either 90 seconds, or the heartbeat frequency + * plus 10 seconds, whichever is greatest. + * @param timeUnit the time unit of maxStaleness + * @return ReadPreference which reads nearest node respective of tags. + * @since 3.4 + * @see TaggableReadPreference#getMaxStaleness(TimeUnit) + */ + public static TaggableReadPreference nearest(final TagSet tagSet, + final long maxStaleness, final TimeUnit timeUnit) { + return new NearestReadPreference(singletonList(tagSet), maxStaleness, timeUnit); + } + + + /** + * Gets a read preference that forces reads to the primary if available, otherwise to a secondary with one of the given sets of tags. + * The driver will look for a secondary with each tag set in the given list, stopping after one is found, + * or failing if no secondary can be found that matches any of the tag sets in the list. + * + * @param tagSetList the list of tag sets to limit the list of secondaries to + * @return ReadPreference which reads primary if available, otherwise a secondary respective of tags. + * @since 2.13 + */ + public static TaggableReadPreference primaryPreferred(final List tagSetList) { + return new PrimaryPreferredReadPreference(tagSetList, null, MILLISECONDS); + } + + /** + * Gets a read preference that forces reads to a secondary with one of the given sets of tags. + * The driver will look for a secondary with each tag set in the given list, stopping after one is found, + * or failing if no secondary can be found that matches any of the tag sets in the list. + * + * @param tagSetList the list of tag sets to limit the list of secondaries to + * @return ReadPreference which reads secondary respective of tags. + * @since 2.13 + */ + public static TaggableReadPreference secondary(final List tagSetList) { + return new SecondaryReadPreference(tagSetList, null, MILLISECONDS); + } + + /** + * Gets a read preference that forces reads to a secondary with one of the given sets of tags. + * The driver will look for a secondary with each tag set in the given list, stopping after one is found, + * or the primary if none are available. + * + * @param tagSetList the list of tag sets to limit the list of secondaries to + * @return ReadPreference which reads secondary if available respective of tags, otherwise from primary irrespective of tags. + * @since 2.13 + */ + public static TaggableReadPreference secondaryPreferred(final List tagSetList) { + return new SecondaryPreferredReadPreference(tagSetList, null, MILLISECONDS); + } + + /** + * Gets a read preference that forces reads to the primary or a secondary with one of the given sets of tags. + * The driver will look for a secondary with each tag set in the given list, stopping after one is found, + * or the primary if none are available. + * + * @param tagSetList the list of tag sets to limit the list of secondaries to + * @return ReadPreference which reads nearest node respective of tags. + * @since 2.13 + */ + public static TaggableReadPreference nearest(final List tagSetList) { + return new NearestReadPreference(tagSetList, null, MILLISECONDS); + } + + /** + * Gets a read preference that forces reads to the primary if available, otherwise to a secondary with one of the given sets of tags + * that is less stale than the given maximum. + * + *

+ * The driver will look for a secondary with each tag set in the given list, stopping after one is found, + * or failing if no secondary can be found that matches any of the tag sets in the list. + *

+ * + *

+ * The driver estimates the staleness of each secondary, based on lastWriteDate values provided in server hello responses, + * and selects only those secondaries whose staleness is less than or equal to maxStaleness. + *

+ * + * @param tagSetList the list of tag sets to limit the list of secondaries to + * @param maxStaleness the max allowable staleness of secondaries. The minimum value is either 90 seconds, or the heartbeat frequency + * plus 10 seconds, whichever is greatest. + * @param timeUnit the time unit of maxStaleness + * @return ReadPreference which reads primary if available, otherwise a secondary respective of tags. + * @since 3.4 + * @see TaggableReadPreference#getMaxStaleness(TimeUnit) + */ + public static TaggableReadPreference primaryPreferred(final List tagSetList, + final long maxStaleness, final TimeUnit timeUnit) { + return new PrimaryPreferredReadPreference(tagSetList, maxStaleness, timeUnit); + } + + /** + * Gets a read preference that forces reads to a secondary with one of the given sets of tags that is less stale than + * the given maximum. + * + *

+ * The driver will look for a secondary with each tag set in the given list, stopping after one is found, + * or failing if no secondary can be found that matches any of the tag sets in the list. + *

+ * + *

+ * The driver estimates the staleness of each secondary, based on lastWriteDate values provided in server hello responses, + * and selects only those secondaries whose staleness is less than or equal to maxStaleness. + *

+ * + * @param tagSetList the list of tag sets to limit the list of secondaries to + * @param maxStaleness the max allowable staleness of secondaries. The minimum value is either 90 seconds, or the heartbeat frequency + * plus 10 seconds, whichever is greatest. + * @param timeUnit the time unit of maxStaleness + * @return ReadPreference which reads secondary respective of tags. + * @since 3.4 + * @see TaggableReadPreference#getMaxStaleness(TimeUnit) + */ + public static TaggableReadPreference secondary(final List tagSetList, + final long maxStaleness, final TimeUnit timeUnit) { + return new SecondaryReadPreference(tagSetList, maxStaleness, timeUnit); + } + + /** + * Gets a read preference that forces reads to a secondary with one of the given sets of tags that is less stale than + * the given maximum. + * + *

+ * The driver will look for a secondary with each tag set in the given list, stopping after one is found, + * or the primary if none are available. + *

+ * + *

+ * The driver estimates the staleness of each secondary, based on lastWriteDate values provided in server hello responses, + * and selects only those secondaries whose staleness is less than or equal to maxStaleness. + *

+ * + * @param tagSetList the list of tag sets to limit the list of secondaries to + * @param maxStaleness the max allowable staleness of secondaries. The minimum value is either 90 seconds, or the heartbeat frequency + * plus 10 seconds, whichever is greatest. + * @param timeUnit the time unit of maxStaleness + * @return ReadPreference which reads secondary if available respective of tags, otherwise from primary irrespective of tags. + * @since 3.4 + * @see TaggableReadPreference#getMaxStaleness(TimeUnit) + */ + public static TaggableReadPreference secondaryPreferred(final List tagSetList, + final long maxStaleness, final TimeUnit timeUnit) { + return new SecondaryPreferredReadPreference(tagSetList, maxStaleness, timeUnit); + } + + /** + * Gets a read preference that forces reads to the primary or a secondary with one of the given sets of tags that is less stale than + * the given maximum. + * + *

+ * The driver will look for a secondary with each tag set in the given list, stopping after one is found, + * or the primary if none are available. + *

+ * + *

+ * The driver estimates the staleness of each secondary, based on lastWriteDate values provided in server hello responses, + * and selects only those secondaries whose staleness is less than or equal to maxStaleness. + *

+ * + * @param tagSetList the list of tag sets to limit the list of secondaries to + * @param maxStaleness the max allowable staleness of secondaries. The minimum value is either 90 seconds, or the heartbeat frequency + * plus 10 seconds, whichever is greatest. + * @param timeUnit the time unit of maxStaleness + * @return ReadPreference which reads nearest node respective of tags. + * @since 3.4 + * @see TaggableReadPreference#getMaxStaleness(TimeUnit) + */ + public static TaggableReadPreference nearest(final List tagSetList, + final long maxStaleness, final TimeUnit timeUnit) { + return new NearestReadPreference(tagSetList, maxStaleness, timeUnit); + } + + /** + * Creates a read preference from the given read preference name. + * + * @param name the name of the read preference + * @return the read preference + */ + public static ReadPreference valueOf(final String name) { + notNull("name", name); + + String nameToCheck = name.toLowerCase(); + + if (nameToCheck.equals(PRIMARY.getName().toLowerCase())) { + return PRIMARY; + } + if (nameToCheck.equals(SECONDARY.getName().toLowerCase())) { + return SECONDARY; + } + if (nameToCheck.equals(SECONDARY_PREFERRED.getName().toLowerCase())) { + return SECONDARY_PREFERRED; + } + if (nameToCheck.equals(PRIMARY_PREFERRED.getName().toLowerCase())) { + return PRIMARY_PREFERRED; + } + if (nameToCheck.equals(NEAREST.getName().toLowerCase())) { + return NEAREST; + } + + throw new IllegalArgumentException("No match for read preference of " + name); + } + + /** + * Creates a taggable read preference from the given read preference name and list of tag sets. + * + * @param name the name of the read preference + * @param tagSetList the list of tag sets + * @return the taggable read preference + * @since 2.13 + */ + public static TaggableReadPreference valueOf(final String name, final List tagSetList) { + return valueOf(name, tagSetList, null, MILLISECONDS); + } + + /** + * Creates a taggable read preference from the given read preference name, list of tag sets, and max allowable staleness of secondaries. + * + *

+ * The driver estimates the staleness of each secondary, based on lastWriteDate values provided in server hello responses, + * and selects only those secondaries whose staleness is less than or equal to maxStaleness. + *

+ * + * @param name the name of the read preference + * @param tagSetList the list of tag sets + * @param maxStaleness the max allowable staleness of secondaries. The minimum value is either 90 seconds, or the heartbeat frequency + * plus 10 seconds, whichever is greatest. + * @param timeUnit the time unit of maxStaleness + * @return the taggable read preference + * @since 3.4 + * @see TaggableReadPreference#getMaxStaleness(TimeUnit) + */ + public static TaggableReadPreference valueOf(final String name, final List tagSetList, final long maxStaleness, + final TimeUnit timeUnit) { + return valueOf(name, tagSetList, (Long) maxStaleness, timeUnit); + } + + private static TaggableReadPreference valueOf(final String name, final List tagSetList, @Nullable final Long maxStaleness, + final TimeUnit timeUnit) { + notNull("name", name); + notNull("tagSetList", tagSetList); + notNull("timeUnit", timeUnit); + + String nameToCheck = name.toLowerCase(); + + if (nameToCheck.equals(PRIMARY.getName().toLowerCase())) { + throw new IllegalArgumentException("Primary read preference can not also specify tag sets, max staleness or hedge"); + } + + if (nameToCheck.equals(SECONDARY.getName().toLowerCase())) { + return new SecondaryReadPreference(tagSetList, maxStaleness, timeUnit); + } + if (nameToCheck.equals(SECONDARY_PREFERRED.getName().toLowerCase())) { + return new SecondaryPreferredReadPreference(tagSetList, maxStaleness, timeUnit); + } + if (nameToCheck.equals(PRIMARY_PREFERRED.getName().toLowerCase())) { + return new PrimaryPreferredReadPreference(tagSetList, maxStaleness, timeUnit); + } + if (nameToCheck.equals(NEAREST.getName().toLowerCase())) { + return new NearestReadPreference(tagSetList, maxStaleness, timeUnit); + } + + throw new IllegalArgumentException("No match for read preference of " + name); + } + + /** + * Preference to read from primary only. Cannot be combined with tags. + */ + private static final class PrimaryReadPreference extends ReadPreference { + private PrimaryReadPreference() { + } + + @Override + public ReadPreference withTagSet(final TagSet tagSet) { + throw new UnsupportedOperationException("Primary read preference can not also specify tag sets"); + } + + @Override + public TaggableReadPreference withTagSetList(final List tagSet) { + throw new UnsupportedOperationException("Primary read preference can not also specify tag sets"); + } + + @Override + public TaggableReadPreference withMaxStalenessMS(final Long maxStalenessMS, final TimeUnit timeUnit) { + throw new UnsupportedOperationException("Primary read preference can not also specify max staleness"); + } + + @Deprecated + @Override + public TaggableReadPreference withHedgeOptions(final ReadPreferenceHedgeOptions hedgeOptions) { + throw new UnsupportedOperationException("Primary read preference can not also specify hedge"); + } + + @Override + public boolean isSecondaryOk() { + return false; + } + + @Override + public String toString() { + return getName(); + } + + @Override + public boolean equals(final Object o) { + return o != null && getClass() == o.getClass(); + } + + @Override + public int hashCode() { + return getName().hashCode(); + } + + public BsonDocument toDocument() { + return new BsonDocument("mode", new BsonString(getName())); + } + + @Override + protected List chooseForReplicaSet(final ClusterDescription clusterDescription) { + return getPrimaries(clusterDescription); + } + + @Override + protected List chooseForNonReplicaSet(final ClusterDescription clusterDescription) { + return getAny(clusterDescription); + } + + @Override + public String getName() { + return "primary"; + } + } + + private static final ReadPreference PRIMARY; + private static final ReadPreference SECONDARY; + private static final ReadPreference SECONDARY_PREFERRED; + private static final ReadPreference PRIMARY_PREFERRED; + private static final ReadPreference NEAREST; + + static { + PRIMARY = new PrimaryReadPreference(); + SECONDARY = new SecondaryReadPreference(); + SECONDARY_PREFERRED = new SecondaryPreferredReadPreference(); + PRIMARY_PREFERRED = new PrimaryPreferredReadPreference(); + NEAREST = new NearestReadPreference(); + } +} diff --git a/driver-core/src/main/com/mongodb/ReadPreferenceHedgeOptions.java b/driver-core/src/main/com/mongodb/ReadPreferenceHedgeOptions.java new file mode 100644 index 00000000000..655f747c808 --- /dev/null +++ b/driver-core/src/main/com/mongodb/ReadPreferenceHedgeOptions.java @@ -0,0 +1,122 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb; + +import com.mongodb.annotations.Immutable; +import org.bson.BsonBoolean; +import org.bson.BsonDocument; + +/** + * Options to apply to hedged reads in the server. + * + * @since 4.1 + * @mongodb.server.release 4.4 + * @deprecated As of MongoDB 8.1, the server ignores the option and periodically logs a warning + */ +@Deprecated +@Immutable +public final class ReadPreferenceHedgeOptions { + private final boolean enabled; + + /** + * Gets whether hedged reads are enabled in the server. + * + * @return true if hedged reads are enabled in the server + */ + public boolean isEnabled() { + return enabled; + } + + /** + * Gets an instance of a builder + * + * @return a builder instance + */ + public static Builder builder() { + return new Builder(); + } + + /** + * Convert the hedge options to a BsonDocument. + * + * @return a BsonDocument containing the hedge options + */ + public BsonDocument toBsonDocument() { + return new BsonDocument("enabled", new BsonBoolean(enabled)); + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + ReadPreferenceHedgeOptions that = (ReadPreferenceHedgeOptions) o; + + return enabled == that.enabled; + } + + @Override + public int hashCode() { + return enabled ? 1 : 0; + } + + @Override + public String toString() { + return "ReadPreferenceHedgeOptions{" + + "enabled=" + enabled + + '}'; + } + + /** + * The builder for read preference hedge options + */ + public static final class Builder { + private boolean enabled; + + /** + * Sets whether hedged reads are enabled in the server. + * + * @param enabled true if hedged reads are enabled + * @return this + */ + public Builder enabled(final boolean enabled) { + this.enabled = enabled; + return this; + } + + /** + * Build the transaction options instance. + * + * @return The {@code TransactionOptions} + */ + public ReadPreferenceHedgeOptions build() { + return new ReadPreferenceHedgeOptions(this); + } + + private Builder() { + } + } + + + private ReadPreferenceHedgeOptions(final Builder builder) { + enabled = builder.enabled; + } +} diff --git a/driver-core/src/main/com/mongodb/RequestContext.java b/driver-core/src/main/com/mongodb/RequestContext.java new file mode 100644 index 00000000000..889edb8ce78 --- /dev/null +++ b/driver-core/src/main/com/mongodb/RequestContext.java @@ -0,0 +1,163 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb; + +import com.mongodb.lang.Nullable; + +import java.util.Map; +import java.util.NoSuchElementException; +import java.util.Optional; +import java.util.stream.Stream; + +/** + * The request context, useful for implementing distributed tracing. + * + * @see MongoClientSettings#getContextProvider() + * @since 4.4 + */ +public interface RequestContext { + /** + * Resolve a value given a key that exists within the {@link RequestContext}, or throw + * a {@link NoSuchElementException} if the key is not present. + * + * @param key a lookup key to resolve the value within the context + * @param an unchecked casted generic for fluent typing convenience + * @return the value resolved for this key (throws if key not found) + * @throws NoSuchElementException when the given key is not present + * @see #getOrDefault(Object, Object) + * @see #getOrEmpty(Object) + * @see #hasKey(Object) + */ + T get(Object key); + + /** + * Resolve a value given a type key within the {@link RequestContext}. + * + * @param key a type key to resolve the value within the context + * @param an unchecked casted generic for fluent typing convenience + * @return the value resolved for this type key (throws if key not found) + * @throws NoSuchElementException when the given type key is not present + * @see #getOrDefault(Object, Object) + * @see #getOrEmpty(Object) + */ + default T get(Class key) { + T v = get((Object) key); + if (key.isInstance(v)) { + return v; + } + throw new NoSuchElementException("Context does not contain a value of type " + key + .getName()); + } + + /** + * Resolve a value given a key within the {@link RequestContext}. If unresolved return the + * passed default value. + * + * @param an unchecked casted generic for fluent typing convenience + * @param key a lookup key to resolve the value within the context + * @param defaultValue a fallback value if key doesn't resolve + * @return the value resolved for this key, or the given default if not present + */ + @Nullable + default T getOrDefault(Object key, @Nullable T defaultValue) { + if (!hasKey(key)) { + return defaultValue; + } + return get(key); + } + + /** + * Resolve a value given a key within the {@link RequestContext}. + * + * @param an unchecked casted generic for fluent typing convenience + * @param key a lookup key to resolve the value within the context + * @return an {@link Optional} of the value for that key. + */ + default Optional getOrEmpty(Object key) { + if (hasKey(key)) { + return Optional.of(get(key)); + } + return Optional.empty(); + } + + /** + * Return true if a particular key resolves to a value within the {@link RequestContext}. + * + * @param key a lookup key to test for + * @return true if this context contains the given key + */ + boolean hasKey(Object key); + + /** + * Return true if the {@link RequestContext} is empty. + * + * @return true if the {@link RequestContext} is empty. + */ + boolean isEmpty(); + + /** + * Modifies this instance with the given key and value. If that key existed in the current {@link RequestContext}, its associated + * value is replaced. + * + * @param key the key to add/update + * @param value the value to associate to the key + * @throws NullPointerException if either the key or value are null + */ + void put(Object key, Object value); + + /** + * Modifies this instance with the given key and value only if the value is not {@literal null}. If that key existed + * in the current Context, its associated value is replaced in the resulting {@link RequestContext}. + * + * @param key the key to add/update + * @param valueOrNull the value to associate to the key, null to ignore the operation + * @throws NullPointerException if the key is null + */ + default void putNonNull(Object key, @Nullable Object valueOrNull) { + if (valueOrNull != null) { + put(key, valueOrNull); + } + } + + /** + * Delete the given key and its associated value from the RequestContext. + * + * @param key the key to remove. + */ + void delete(Object key); + + /** + * Return the size of this {@link RequestContext}, the number of key/value pairs stored inside it. + * + * @return the size of the {@link RequestContext} + */ + int size(); + + /** + * Stream key/value pairs from this {@link RequestContext} + * + *

+ * It is not specified whether modification of a {@code Map.Entry} instance in the {@code Stream} results in a modification of the + * state of the {@code RequestContext}, or whether the {@code Map.Entry} instances are modifiable. That is considered an + * implementation detail, so users of this method should not rely on the behavior one way or the other unless the implementing class + * has documented it. + *

+ * + * @return a {@link Stream} of key/value pairs held by this context + */ + Stream> stream(); +} diff --git a/driver-core/src/main/com/mongodb/ServerAddress.java b/driver-core/src/main/com/mongodb/ServerAddress.java new file mode 100644 index 00000000000..f9c31180dd9 --- /dev/null +++ b/driver-core/src/main/com/mongodb/ServerAddress.java @@ -0,0 +1,206 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb; + +import com.mongodb.annotations.Immutable; +import com.mongodb.lang.Nullable; + +import java.io.Serializable; +import java.net.InetAddress; +import java.net.InetSocketAddress; + +/** + * Represents the location of a Mongo server - i.e. server name and port number + */ +@Immutable +public class ServerAddress implements Serializable { + private static final long serialVersionUID = 4027873363095395504L; + + /** + * The host. + */ + private final String host; + /** + * The port. + */ + private final int port; + + /** + * Creates a ServerAddress with default host and port + */ + public ServerAddress() { + this(defaultHost(), defaultPort()); + } + + /** + * Creates a ServerAddress with default port + * + * @param host hostname + */ + public ServerAddress(@Nullable final String host) { + this(host, defaultPort()); + } + + /** + * Creates a ServerAddress with default port + * + * @param inetAddress host address + */ + public ServerAddress(final InetAddress inetAddress) { + this(inetAddress.getHostName(), defaultPort()); + } + + /** + * Creates a ServerAddress + * + * @param inetAddress host address + * @param port mongod port + */ + public ServerAddress(final InetAddress inetAddress, final int port) { + this(inetAddress.getHostName(), port); + } + + /** + * Creates a ServerAddress + * + * @param inetSocketAddress inet socket address containing hostname and port + */ + public ServerAddress(final InetSocketAddress inetSocketAddress) { + this(inetSocketAddress.getAddress(), inetSocketAddress.getPort()); + } + + /** + * Creates a ServerAddress + * + * @param host hostname + * @param port mongod port + */ + public ServerAddress(@Nullable final String host, final int port) { + String hostToUse = host; + if (hostToUse == null) { + hostToUse = defaultHost(); + } + hostToUse = hostToUse.trim(); + if (hostToUse.length() == 0) { + hostToUse = defaultHost(); + } + int portToUse = port; + + if (hostToUse.startsWith("[")) { + int idx = host.indexOf("]"); + if (idx == -1) { + throw new IllegalArgumentException("an IPV6 address must be enclosed with '[' and ']'" + + " according to RFC 2732."); + } + + int portIdx = host.indexOf("]:"); + if (portIdx != -1) { + if (port != defaultPort()) { + throw new IllegalArgumentException("can't specify port in construct and via host"); + } + portToUse = Integer.parseInt(host.substring(portIdx + 2)); + } + hostToUse = host.substring(1, idx); + } else { + int idx = hostToUse.indexOf(":"); + int lastIdx = hostToUse.lastIndexOf(":"); + if (idx == lastIdx && idx > 0) { + if (port != defaultPort()) { + throw new IllegalArgumentException("can't specify port in construct and via host"); + } + try { + portToUse = Integer.parseInt(hostToUse.substring(idx + 1)); + } catch (NumberFormatException e) { + throw new MongoException("host and port should be specified in host:port format"); + } + hostToUse = hostToUse.substring(0, idx).trim(); + } + } + this.host = hostToUse.toLowerCase(); + this.port = portToUse; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + ServerAddress that = (ServerAddress) o; + + if (port != that.port) { + return false; + } + + if (!host.equals(that.host)) { + return false; + } + + return true; + } + + @Override + public int hashCode() { + int result = host.hashCode(); + result = 31 * result + port; + return result; + } + + /** + * Gets the hostname + * + * @return hostname + */ + public String getHost() { + return host; + } + + /** + * Gets the port number + * + * @return port + */ + public int getPort() { + return port; + } + + @Override + public String toString() { + return host + ":" + port; + } + + /** + * Returns the default database host: "127.0.0.1" + * + * @return IP address of default host. + */ + public static String defaultHost() { + return "127.0.0.1"; // NOPMD + } + + /** + * Returns the default database port: 27017 + * + * @return the default port + */ + public static int defaultPort() { + return 27017; + } +} diff --git a/driver-core/src/main/com/mongodb/ServerApi.java b/driver-core/src/main/com/mongodb/ServerApi.java new file mode 100644 index 00000000000..590e3332663 --- /dev/null +++ b/driver-core/src/main/com/mongodb/ServerApi.java @@ -0,0 +1,193 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb; + +import com.mongodb.annotations.NotThreadSafe; +import com.mongodb.lang.Nullable; + +import java.util.Objects; +import java.util.Optional; + +import static com.mongodb.assertions.Assertions.notNull; + +/** + * A specification of the server API on which the application relies. + * + * @since 4.3 + */ +public final class ServerApi { + + private final ServerApiVersion version; + private final Boolean deprecationErrors; + private final Boolean strict; + + private ServerApi(final ServerApiVersion version, @Nullable final Boolean strict, @Nullable final Boolean deprecationErrors) { + this.version = notNull("version", version); + this.deprecationErrors = deprecationErrors; + this.strict = strict; + } + + /** + * Gets the server API version + * + * @return the server API version + */ + public ServerApiVersion getVersion() { + return version; + } + + /** + * Gets whether the application requires strict server API version enforcement. + * + *

+ * The default is false. + *

+ * + * @return whether the application requires strict server API version enforcement + */ + public Optional getStrict() { + return Optional.ofNullable(strict); + } + + /** + * Gets whether the application requires use of deprecated server APIs to be reported as errors. + * + *

+ * The default is false. + *

+ * + * @return whether the application requires use of deprecated server APIs to be reported as errors + */ + public Optional getDeprecationErrors() { + return Optional.ofNullable(deprecationErrors); + } + + /** + * Gets a {@code Builder} for instances of this class. + * + * @return the builder + */ + public static Builder builder() { + return new Builder(); + } + + @Override + public String toString() { + return "ServerApi{" + + "version=" + version + + ", deprecationErrors=" + deprecationErrors + + ", strict=" + strict + + '}'; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + ServerApi serverApi = (ServerApi) o; + + if (version != serverApi.version) { + return false; + } + if (!Objects.equals(deprecationErrors, serverApi.deprecationErrors)) { + return false; + } + if (!Objects.equals(strict, serverApi.strict)) { + return false; + } + + return true; + } + + @Override + public int hashCode() { + int result = version.hashCode(); + result = 31 * result + (deprecationErrors != null ? deprecationErrors.hashCode() : 0); + result = 31 * result + (strict != null ? strict.hashCode() : 0); + return result; + } + + /** + * A builder for {@code ServerApi} so that {@code ServerApi} can be immutable, and to support easier construction + * through chaining. + */ + @NotThreadSafe + public static final class Builder { + + private ServerApiVersion version; + private Boolean deprecationErrors; + private Boolean strict; + + private Builder() { + } + + /** + * Gets the server API version + * + * @param version the server API version + * @return the server API version + */ + public Builder version(final ServerApiVersion version) { + this.version = version; + return this; + } + + /** + * Sets whether the application requires use of deprecated server APIs to be reported as errors. + * + *

+ * The default is false. + *

+ * + * @param deprecationErrors whether the application requires use of deprecated server APIs to be reported as errors + * @return this + */ + public Builder deprecationErrors(final boolean deprecationErrors) { + this.deprecationErrors = deprecationErrors; + return this; + } + + /** + * Sets whether the application requires strict server API version enforcement. + * + *

+ * The default is false. + *

+ * + * @param strict whether the application requires strict server API version enforcement + * @return this + */ + public Builder strict(final boolean strict) { + this.strict = strict; + return this; + } + + /** + * Build an instance of {@code ServerApi}. + * + * @return the settings from this builder + */ + public ServerApi build() { + return new ServerApi(version, strict, deprecationErrors); + } + } +} diff --git a/driver-core/src/main/com/mongodb/ServerApiVersion.java b/driver-core/src/main/com/mongodb/ServerApiVersion.java new file mode 100644 index 00000000000..eb6552cc7c0 --- /dev/null +++ b/driver-core/src/main/com/mongodb/ServerApiVersion.java @@ -0,0 +1,62 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb; + +/** + * The Server API version. + * + * @since 4.3 + */ +public enum ServerApiVersion { + /** + * Server API version 1 + */ + V1("1"); + + private final String versionString; + + ServerApiVersion(final String versionString) { + this.versionString = versionString; + } + + /** + * Gets the version as a string. + * + * @return the version string + */ + public String getValue() { + return versionString; + } + + /** + * Gets the {@code ServerApiVersion} that corresponds to the given value. + * + * @param value the String value of the desired server API version + * @return the corresponding {@code ServerApiVersion} + * @throws MongoClientException if no matching enumeration exists + */ + public static ServerApiVersion findByValue(final String value) { + //noinspection SwitchStatementWithTooFewBranches + switch (value) { + case "1": + return V1; + default: + throw new MongoClientException("Unsupported server API version: " + value); + + } + } +} diff --git a/driver-core/src/main/com/mongodb/ServerCursor.java b/driver-core/src/main/com/mongodb/ServerCursor.java new file mode 100644 index 00000000000..dead3676643 --- /dev/null +++ b/driver-core/src/main/com/mongodb/ServerCursor.java @@ -0,0 +1,107 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb; + +import com.mongodb.annotations.Immutable; + +import java.io.Serializable; + +/** + * A class representing a cursor id associated with a server address (host/port) Since cursor ids are only useful in the context of a single + * MongoDB server process, you need both values to do a getMore on the cursor. + * + * @since 3.0 + */ +@Immutable +public final class ServerCursor implements Serializable { + + private static final long serialVersionUID = -7013636754565190109L; + + /** + * The cursor id. + */ + private final long id; + /** + * The server address. + */ + private final ServerAddress address; + + /** + * Construct an instance. + * + * @param id the non-null cursor id + * @param address the non-null server address that has the cursor + */ + public ServerCursor(final long id, final ServerAddress address) { + if (id == 0) { + throw new IllegalArgumentException(); + } + this.id = id; + this.address = address; + } + + /** + * Gets the cursor id that the server uses to uniquely identify the cursor. + * + * @return the cursor id + */ + public long getId() { + return id; + } + + /** + * Gets the server address. + * + * @return the server address + */ + public ServerAddress getAddress() { + return address; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + ServerCursor that = (ServerCursor) o; + + if (id != that.id) { + return false; + } + if (!address.equals(that.address)) { + return false; + } + + return true; + } + + @Override + public int hashCode() { + int result = (int) (id ^ (id >>> 32)); + result = 31 * result + (address.hashCode()); + return result; + } + + @Override + public String toString() { + return "ServerCursor{getId=" + id + ", address=" + address + '}'; + } +} diff --git a/driver-core/src/main/com/mongodb/SubjectProvider.java b/driver-core/src/main/com/mongodb/SubjectProvider.java new file mode 100644 index 00000000000..ce029810a53 --- /dev/null +++ b/driver-core/src/main/com/mongodb/SubjectProvider.java @@ -0,0 +1,60 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb; + +import com.mongodb.annotations.ThreadSafe; +import com.mongodb.lang.Nullable; + +import javax.security.auth.Subject; +import javax.security.auth.login.LoginException; +import java.security.PrivilegedAction; + +/** + * This interface enables applications to take full control of the lifecycle of the {@link Subject} with which authentication requests + * are executed. For each authentication request, the driver will call the {@link #getSubject()} method and execute the SASL + * conversation via a call to {@link Subject#doAs(Subject, PrivilegedAction)}. + *

+ * Implementations of this interface will typically cache a {@code Subject} instance for some period of time before replacing it with a + * different instance, say, after the expiration time of a ticket has passed. + *

+ *

+ * Applications should register an instance of a class implementation this interface as a mechanism property of a {@link MongoCredential} + * via a call to {@link MongoCredential#withMechanismProperty(String, Object)} using the key + * {@link MongoCredential#JAVA_SUBJECT_PROVIDER_KEY} + *

+ *

+ * If use of the same {@code Subject} for the lifetime of the application is sufficient, an application can simply create a single + * {@code Subject} and associate it with a {@code MongoCredential} as a mechanism property using the key + * {@link MongoCredential#JAVA_SUBJECT_KEY}. + * + *

+ * @see MongoCredential + * @see MongoCredential#JAVA_SUBJECT_PROVIDER_KEY + * @since 4.2 + */ +@ThreadSafe +public interface SubjectProvider { + + /** + * Gets the Subject to use for an authentication request. + * + * @return the {@code Subject}, which may be null + * @throws LoginException a login exception + */ + @Nullable + Subject getSubject() throws LoginException; +} diff --git a/driver-core/src/main/com/mongodb/Tag.java b/driver-core/src/main/com/mongodb/Tag.java new file mode 100644 index 00000000000..bb4c6ac1bcb --- /dev/null +++ b/driver-core/src/main/com/mongodb/Tag.java @@ -0,0 +1,96 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb; + +import com.mongodb.annotations.Immutable; + +import static com.mongodb.assertions.Assertions.notNull; + +/** + * A replica set tag. + * + * @since 2.13 + * @mongodb.driver.manual tutorial/configure-replica-set-tag-sets Tag Sets + */ +@Immutable +public final class Tag { + private final String name; + private final String value; + + /** + * Construct a new instance. + * + * @param name the tag name + * @param value the value of the tag + */ + public Tag(final String name, final String value) { + this.name = notNull("name", name); + this.value = notNull("value", value); + } + + /** + * Gets the name of the replica set tag. + * @return the name + */ + public String getName() { + return name; + } + + /** + * Gets the value of the replica set tag. + * @return the value + */ + public String getValue() { + return value; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + Tag that = (Tag) o; + + if (!name.equals(that.name)) { + return false; + } + if (!value.equals(that.value)) { + return false; + } + + return true; + } + + @Override + public int hashCode() { + int result = name.hashCode(); + result = 31 * result + value.hashCode(); + return result; + } + + @Override + public String toString() { + return "Tag{" + + "name='" + name + '\'' + + ", value='" + value + '\'' + + '}'; + } +} diff --git a/driver-core/src/main/com/mongodb/TagSet.java b/driver-core/src/main/com/mongodb/TagSet.java new file mode 100644 index 00000000000..8c11da5ef8f --- /dev/null +++ b/driver-core/src/main/com/mongodb/TagSet.java @@ -0,0 +1,125 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb; + +import com.mongodb.annotations.Immutable; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashSet; +import java.util.Iterator; +import java.util.List; +import java.util.Set; + +import static com.mongodb.assertions.Assertions.notNull; + +/** + * An immutable set of tags, used to select members of a replica set to use for read operations. + * + * @mongodb.driver.manual tutorial/configure-replica-set-tag-sets Tag Sets + * @see com.mongodb.ReadPreference + * @since 2.13 + */ +@Immutable +public final class TagSet implements Iterable { + private final List wrapped; + + /** + * An empty set of tags. + */ + public TagSet() { + wrapped = Collections.emptyList(); + } + + /** + * A set of tags contain the single given tag + * + * @param tag the tag + */ + public TagSet(final Tag tag) { + notNull("tag", tag); + wrapped = Collections.singletonList(tag); + } + + /** + * A set of tags containing the given list of tags. + * + * @param tagList the list of tags + */ + public TagSet(final List tagList) { + notNull("tagList", tagList); + + // Ensure no duplicates + Set tagNames = new HashSet<>(); + for (Tag tag : tagList) { + if (tag == null) { + throw new IllegalArgumentException("Null tags are not allowed"); + } + if (!tagNames.add(tag.getName())) { + throw new IllegalArgumentException("Duplicate tag names not allowed in a tag set: " + tag.getName()); + } + } + ArrayList copy = new ArrayList<>(tagList); + Collections.sort(copy, (o1, o2) -> o1.getName().compareTo(o2.getName())); + this.wrapped = Collections.unmodifiableList(copy); + } + + @Override + public Iterator iterator() { + return wrapped.iterator(); + } + + /** + * Returns {@code true} if this tag set contains all of the elements of the specified tag set. + * + * @param tagSet tag set to be checked for containment in this tag set + * @return {@code true} if this tag set contains all of the elements of the specified tag set + */ + public boolean containsAll(final TagSet tagSet) { + return wrapped.containsAll(tagSet.wrapped); + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + TagSet tags = (TagSet) o; + + if (!wrapped.equals(tags.wrapped)) { + return false; + } + + return true; + } + + @Override + public int hashCode() { + return wrapped.hashCode(); + } + + @Override + public String toString() { + return "TagSet{" + + wrapped + + '}'; + } +} diff --git a/driver-core/src/main/com/mongodb/TaggableReadPreference.java b/driver-core/src/main/com/mongodb/TaggableReadPreference.java new file mode 100644 index 00000000000..426dc69e966 --- /dev/null +++ b/driver-core/src/main/com/mongodb/TaggableReadPreference.java @@ -0,0 +1,579 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb; + +import com.mongodb.annotations.Immutable; +import com.mongodb.connection.ClusterDescription; +import com.mongodb.connection.ClusterType; +import com.mongodb.connection.ServerDescription; +import com.mongodb.lang.Nullable; +import org.bson.BsonArray; +import org.bson.BsonDocument; +import org.bson.BsonInt64; +import org.bson.BsonString; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.Date; +import java.util.List; +import java.util.Objects; +import java.util.concurrent.TimeUnit; + +import static com.mongodb.assertions.Assertions.isTrueArgument; +import static com.mongodb.assertions.Assertions.notNull; +import static com.mongodb.internal.connection.ClusterDescriptionHelper.getAny; +import static com.mongodb.internal.connection.ClusterDescriptionHelper.getAnyPrimaryOrSecondary; +import static com.mongodb.internal.connection.ClusterDescriptionHelper.getPrimaries; +import static com.mongodb.internal.connection.ClusterDescriptionHelper.getSecondaries; +import static java.lang.String.format; +import static java.util.Collections.singletonList; +import static java.util.concurrent.TimeUnit.MILLISECONDS; +import static java.util.concurrent.TimeUnit.SECONDS; + +/** + * Abstract class for all preference which can be combined with tags + */ +@Immutable +public abstract class TaggableReadPreference extends ReadPreference { + private static final int SMALLEST_MAX_STALENESS_MS = 90000; + private static final int IDLE_WRITE_PERIOD_MS = 10000; + + private final List tagSetList = new ArrayList<>(); + private final Long maxStalenessMS; + @SuppressWarnings("deprecation") + private final ReadPreferenceHedgeOptions hedgeOptions; + + TaggableReadPreference() { + this.maxStalenessMS = null; + this.hedgeOptions = null; + } + + @SuppressWarnings("deprecation") + TaggableReadPreference(final List tagSetList, @Nullable final Long maxStaleness, final TimeUnit timeUnit, + @Nullable final ReadPreferenceHedgeOptions hedgeOptions) { + notNull("tagSetList", tagSetList); + isTrueArgument("maxStaleness is null or >= 0", maxStaleness == null || maxStaleness >= 0); + this.maxStalenessMS = maxStaleness == null ? null : MILLISECONDS.convert(maxStaleness, timeUnit); + + this.tagSetList.addAll(tagSetList); + this.hedgeOptions = hedgeOptions; + } + + @Override + public abstract TaggableReadPreference withTagSet(TagSet tagSet); + + @Override + public abstract TaggableReadPreference withTagSetList(List tagSet); + + @Override + public abstract TaggableReadPreference withMaxStalenessMS(Long maxStalenessMS, TimeUnit timeUnit); + + @Deprecated + @Override + public abstract TaggableReadPreference withHedgeOptions(ReadPreferenceHedgeOptions hedgeOptions); + + @Override + public boolean isSecondaryOk() { + return true; + } + + @Override + public BsonDocument toDocument() { + BsonDocument readPrefObject = new BsonDocument("mode", new BsonString(getName())); + + if (!tagSetList.isEmpty()) { + readPrefObject.put("tags", tagsListToBsonArray()); + } + + if (maxStalenessMS != null) { + readPrefObject.put("maxStalenessSeconds", new BsonInt64(MILLISECONDS.toSeconds(maxStalenessMS))); + } + + if (hedgeOptions != null) { + readPrefObject.put("hedge", hedgeOptions.toBsonDocument()); + } + return readPrefObject; + } + + /** + * Gets the list of tag sets as a list of {@code TagSet} instances. + * + * @return the list of tag sets + * @since 2.13 + */ + public List getTagSetList() { + return Collections.unmodifiableList(tagSetList); + } + + /** + * Gets the maximum acceptable staleness of a secondary in order to be considered for read operations. + *

+ * The maximum staleness feature is designed to prevent badly-lagging servers from being selected. The staleness estimate is imprecise + * and shouldn't be used to try to select "up-to-date" secondaries. + *

+ *

+ * The driver estimates the staleness of each secondary, based on lastWriteDate values provided in server hello responses, + * and selects only those secondaries whose staleness is less than or equal to maxStaleness. + *

+ * @param timeUnit the time unit in which to return the value + * @return the maximum acceptable staleness in the given time unit, or null if the value is not set + * @mongodb.server.release 3.4 + * @since 3.4 + */ + @Nullable + public Long getMaxStaleness(final TimeUnit timeUnit) { + notNull("timeUnit", timeUnit); + if (maxStalenessMS == null) { + return null; + } + return timeUnit.convert(maxStalenessMS, MILLISECONDS); + } + + /** + * Gets the hedge options. + * + * @return the hedge options + * @mongodb.server.release 4.4 + * @since 4.1 + * @deprecated As of MongoDB 8.1, the server ignores the option and periodically logs a warning + */ + @Deprecated + @Nullable + public ReadPreferenceHedgeOptions getHedgeOptions() { + return hedgeOptions; + } + + @Override + public String toString() { + return "ReadPreference{" + + "name=" + getName() + + (tagSetList.isEmpty() ? "" : ", tagSetList=" + tagSetList) + + (maxStalenessMS == null ? "" : ", maxStalenessMS=" + maxStalenessMS) + + ", hedgeOptions=" + hedgeOptions + + '}'; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + TaggableReadPreference that = (TaggableReadPreference) o; + + if (!Objects.equals(maxStalenessMS, that.maxStalenessMS)) { + return false; + } + if (!tagSetList.equals(that.tagSetList)) { + return false; + } + if (!Objects.equals(hedgeOptions, that.hedgeOptions)) { + return false; + } + + return true; + } + + @Override + public int hashCode() { + int result = tagSetList.hashCode(); + result = 31 * result + getName().hashCode(); + result = 31 * result + (maxStalenessMS != null ? maxStalenessMS.hashCode() : 0); + result = 31 * result + (hedgeOptions != null ? hedgeOptions.hashCode() : 0); + return result; + } + + @Override + protected List chooseForNonReplicaSet(final ClusterDescription clusterDescription) { + return selectFreshServers(clusterDescription, getAny(clusterDescription)); + } + + static ClusterDescription copyClusterDescription(final ClusterDescription clusterDescription, + final List selectedServers) { + return new ClusterDescription(clusterDescription.getConnectionMode(), + clusterDescription.getType(), + selectedServers, + clusterDescription.getClusterSettings(), + clusterDescription.getServerSettings()); + } + + List selectFreshServers(final ClusterDescription clusterDescription, + final List servers) { + Long maxStaleness = getMaxStaleness(MILLISECONDS); + if (maxStaleness == null) { + return servers; + } + + if (clusterDescription.getServerSettings() == null) { + throw new MongoConfigurationException("heartbeat frequency must be provided in cluster description"); + } + + if (!serversAreAllThreeDotFour(clusterDescription)) { + throw new MongoConfigurationException("Servers must all be at least version 3.4 when max staleness is configured"); + } + + if (clusterDescription.getType() != ClusterType.REPLICA_SET) { + return servers; + } + + long heartbeatFrequencyMS = clusterDescription.getServerSettings().getHeartbeatFrequency(MILLISECONDS); + + if (maxStaleness < Math.max(SMALLEST_MAX_STALENESS_MS, heartbeatFrequencyMS + IDLE_WRITE_PERIOD_MS)) { + if (SMALLEST_MAX_STALENESS_MS > heartbeatFrequencyMS + IDLE_WRITE_PERIOD_MS){ + throw new MongoConfigurationException(format("Max staleness (%d sec) must be at least 90 seconds", + getMaxStaleness(SECONDS))); + } else { + throw new MongoConfigurationException(format("Max staleness (%d ms) must be at least the heartbeat period (%d ms) " + + "plus the idle write period (%d ms)", + maxStaleness, heartbeatFrequencyMS, IDLE_WRITE_PERIOD_MS)); + } + } + List freshServers = new ArrayList<>(servers.size()); + + ServerDescription primary = findPrimary(clusterDescription); + + if (primary != null) { + for (ServerDescription cur : servers) { + if (cur.isPrimary()) { + freshServers.add(cur); + } else { + if (getStalenessOfSecondaryRelativeToPrimary(primary, cur, heartbeatFrequencyMS) <= maxStaleness) { + freshServers.add(cur); + } + } + } + } else { + ServerDescription mostUpToDateSecondary = findMostUpToDateSecondary(clusterDescription); + if (mostUpToDateSecondary != null) { + for (ServerDescription cur : servers) { + if (getLastWriteDateNonNull(mostUpToDateSecondary).getTime() - getLastWriteDateNonNull(cur).getTime() + + heartbeatFrequencyMS <= maxStaleness) { + freshServers.add(cur); + } + } + } + } + + return freshServers; + } + + private long getStalenessOfSecondaryRelativeToPrimary(final ServerDescription primary, final ServerDescription serverDescription, + final long heartbeatFrequencyMS) { + return getLastWriteDateNonNull(primary).getTime() + + (serverDescription.getLastUpdateTime(MILLISECONDS) - primary.getLastUpdateTime(MILLISECONDS)) + - getLastWriteDateNonNull(serverDescription).getTime() + heartbeatFrequencyMS; + } + + @Nullable + private ServerDescription findPrimary(final ClusterDescription clusterDescription) { + for (ServerDescription cur : clusterDescription.getServerDescriptions()) { + if (cur.isPrimary()) { + return cur; + } + } + return null; + } + + private ServerDescription findMostUpToDateSecondary(final ClusterDescription clusterDescription) { + ServerDescription mostUpdateToDateSecondary = null; + for (ServerDescription cur : clusterDescription.getServerDescriptions()) { + if (cur.isSecondary()) { + if (mostUpdateToDateSecondary == null + || getLastWriteDateNonNull(cur).getTime() > getLastWriteDateNonNull(mostUpdateToDateSecondary).getTime()) { + mostUpdateToDateSecondary = cur; + } + } + } + return mostUpdateToDateSecondary; + } + + private Date getLastWriteDateNonNull(final ServerDescription serverDescription) { + Date lastWriteDate = serverDescription.getLastWriteDate(); + if (lastWriteDate == null) { + throw new MongoClientException("lastWriteDate should not be null in " + serverDescription); + } + return lastWriteDate; + } + + private boolean serversAreAllThreeDotFour(final ClusterDescription clusterDescription) { + for (ServerDescription cur : clusterDescription.getServerDescriptions()) { + if (cur.isOk() && cur.getMaxWireVersion() < 5) { + return false; + } + } + return true; + } + + /** + * Read from secondary + */ + static class SecondaryReadPreference extends TaggableReadPreference { + SecondaryReadPreference() { + } + + SecondaryReadPreference(final List tagSetList, @Nullable final Long maxStaleness, final TimeUnit timeUnit) { + this(tagSetList, maxStaleness, timeUnit, null); + } + + @SuppressWarnings("deprecation") + SecondaryReadPreference(final List tagSetList, @Nullable final Long maxStaleness, final TimeUnit timeUnit, + @Nullable final ReadPreferenceHedgeOptions hedgeOptions) { + super(tagSetList, maxStaleness, timeUnit, hedgeOptions); + } + + @Override + public TaggableReadPreference withTagSet(final TagSet tagSet) { + return withTagSetList(singletonList(tagSet)); + } + + @Override + public TaggableReadPreference withTagSetList(final List tagSetList) { + notNull("tagSetList", tagSetList); + return new SecondaryReadPreference(tagSetList, getMaxStaleness(MILLISECONDS), MILLISECONDS, getHedgeOptions()); + } + + @Override + public TaggableReadPreference withMaxStalenessMS(@Nullable final Long maxStaleness, final TimeUnit timeUnit) { + isTrueArgument("maxStaleness is null or >= 0", maxStaleness == null || maxStaleness >= 0); + return new SecondaryReadPreference(getTagSetList(), maxStaleness, timeUnit, getHedgeOptions()); + } + + @Deprecated + @Override + public TaggableReadPreference withHedgeOptions(final ReadPreferenceHedgeOptions hedgeOptions) { + return new SecondaryReadPreference(getTagSetList(), getMaxStaleness(MILLISECONDS), MILLISECONDS, hedgeOptions); + } + + @Override + public String getName() { + return "secondary"; + } + + @Override + protected List chooseForReplicaSet(final ClusterDescription clusterDescription) { + List selectedServers = selectFreshServers(clusterDescription, getSecondaries(clusterDescription)); + if (!getTagSetList().isEmpty()) { + ClusterDescription nonStaleClusterDescription = copyClusterDescription(clusterDescription, selectedServers); + selectedServers = Collections.emptyList(); + for (final TagSet tagSet : getTagSetList()) { + List servers = getSecondaries(nonStaleClusterDescription, tagSet); + if (!servers.isEmpty()) { + selectedServers = servers; + break; + } + } + } + return selectedServers; + } + } + + /** + * Read from secondary if available, otherwise from primary, irrespective of tags. + */ + static class SecondaryPreferredReadPreference extends SecondaryReadPreference { + SecondaryPreferredReadPreference() { + } + + SecondaryPreferredReadPreference(final List tagSetList, @Nullable final Long maxStaleness, final TimeUnit timeUnit) { + this(tagSetList, maxStaleness, timeUnit, null); + } + + @SuppressWarnings("deprecation") + SecondaryPreferredReadPreference(final List tagSetList, @Nullable final Long maxStaleness, final TimeUnit timeUnit, + @Nullable final ReadPreferenceHedgeOptions hedgeOptions) { + super(tagSetList, maxStaleness, timeUnit, hedgeOptions); + } + + @Override + public TaggableReadPreference withTagSet(final TagSet tagSet) { + return withTagSetList(singletonList(tagSet)); + } + + @Override + public TaggableReadPreference withTagSetList(final List tagSetList) { + notNull("tagSetList", tagSetList); + return new SecondaryPreferredReadPreference(tagSetList, getMaxStaleness(MILLISECONDS), MILLISECONDS, getHedgeOptions()); + } + + @Override + public TaggableReadPreference withMaxStalenessMS(@Nullable final Long maxStaleness, final TimeUnit timeUnit) { + isTrueArgument("maxStaleness is null or >= 0", maxStaleness == null || maxStaleness >= 0); + return new SecondaryPreferredReadPreference(getTagSetList(), maxStaleness, timeUnit, getHedgeOptions()); + } + + @Deprecated + @Override + public TaggableReadPreference withHedgeOptions(final ReadPreferenceHedgeOptions hedgeOptions) { + return new SecondaryPreferredReadPreference(getTagSetList(), getMaxStaleness(MILLISECONDS), MILLISECONDS, hedgeOptions); + } + + @Override + public String getName() { + return "secondaryPreferred"; + } + + @Override + protected List chooseForReplicaSet(final ClusterDescription clusterDescription) { + List selectedServers = super.chooseForReplicaSet(clusterDescription); + if (selectedServers.isEmpty()) { + selectedServers = getPrimaries(clusterDescription); + } + return selectedServers; + } + } + + /** + * Read from nearest node respective of tags. + */ + static class NearestReadPreference extends TaggableReadPreference { + NearestReadPreference() { + } + + NearestReadPreference(final List tagSetList, @Nullable final Long maxStaleness, final TimeUnit timeUnit) { + this(tagSetList, maxStaleness, timeUnit, null); + } + + @SuppressWarnings("deprecation") + NearestReadPreference(final List tagSetList, @Nullable final Long maxStaleness, final TimeUnit timeUnit, + @Nullable final ReadPreferenceHedgeOptions hedgeOptions) { + super(tagSetList, maxStaleness, timeUnit, hedgeOptions); + } + + @Override + public TaggableReadPreference withTagSet(final TagSet tagSet) { + return withTagSetList(singletonList(tagSet)); + } + + @Override + public TaggableReadPreference withTagSetList(final List tagSetList) { + notNull("tagSetList", tagSetList); + return new NearestReadPreference(tagSetList, getMaxStaleness(MILLISECONDS), MILLISECONDS, getHedgeOptions()); + } + + @Override + public TaggableReadPreference withMaxStalenessMS(@Nullable final Long maxStaleness, final TimeUnit timeUnit) { + isTrueArgument("maxStaleness is null or >= 0", maxStaleness == null || maxStaleness >= 0); + return new NearestReadPreference(getTagSetList(), maxStaleness, timeUnit, getHedgeOptions()); + } + + @Deprecated + @Override + public TaggableReadPreference withHedgeOptions(final ReadPreferenceHedgeOptions hedgeOptions) { + return new NearestReadPreference(getTagSetList(), getMaxStaleness(MILLISECONDS), MILLISECONDS, hedgeOptions); + } + + @Override + public String getName() { + return "nearest"; + } + + + @Override + public List chooseForReplicaSet(final ClusterDescription clusterDescription) { + List selectedServers = selectFreshServers(clusterDescription, getAnyPrimaryOrSecondary(clusterDescription)); + if (!getTagSetList().isEmpty()) { + ClusterDescription nonStaleClusterDescription = copyClusterDescription(clusterDescription, selectedServers); + selectedServers = Collections.emptyList(); + for (final TagSet tagSet : getTagSetList()) { + List servers = getAnyPrimaryOrSecondary(nonStaleClusterDescription, tagSet); + if (!servers.isEmpty()) { + selectedServers = servers; + break; + } + } + } + return selectedServers; + } + } + + /** + * Read from primary if available, otherwise a secondary. + */ + static class PrimaryPreferredReadPreference extends SecondaryReadPreference { + PrimaryPreferredReadPreference() { + } + + PrimaryPreferredReadPreference(final List tagSetList, @Nullable final Long maxStaleness, final TimeUnit timeUnit) { + this(tagSetList, maxStaleness, timeUnit, null); + } + + @SuppressWarnings("deprecation") + PrimaryPreferredReadPreference(final List tagSetList, @Nullable final Long maxStaleness, final TimeUnit timeUnit, + @Nullable final ReadPreferenceHedgeOptions hedgeOptions) { + super(tagSetList, maxStaleness, timeUnit, hedgeOptions); + } + + @Override + public TaggableReadPreference withTagSet(final TagSet tagSet) { + return withTagSetList(singletonList(tagSet)); + } + + @Override + public TaggableReadPreference withTagSetList(final List tagSetList) { + notNull("tagSetList", tagSetList); + return new PrimaryPreferredReadPreference(tagSetList, getMaxStaleness(MILLISECONDS), MILLISECONDS, getHedgeOptions()); + } + + @Override + public TaggableReadPreference withMaxStalenessMS(@Nullable final Long maxStaleness, final TimeUnit timeUnit) { + isTrueArgument("maxStaleness is null or >= 0", maxStaleness == null || maxStaleness >= 0); + return new PrimaryPreferredReadPreference(getTagSetList(), maxStaleness, timeUnit, getHedgeOptions()); + } + + @Deprecated + @Override + public TaggableReadPreference withHedgeOptions(final ReadPreferenceHedgeOptions hedgeOptions) { + return new PrimaryPreferredReadPreference(getTagSetList(), getMaxStaleness(MILLISECONDS), MILLISECONDS, hedgeOptions); + } + + @Override + public String getName() { + return "primaryPreferred"; + } + + @Override + protected List chooseForReplicaSet(final ClusterDescription clusterDescription) { + List selectedServers = selectFreshServers(clusterDescription, getPrimaries(clusterDescription)); + if (selectedServers.isEmpty()) { + selectedServers = super.chooseForReplicaSet(clusterDescription); + } + return selectedServers; + } + } + + private BsonArray tagsListToBsonArray() { + BsonArray bsonArray = new BsonArray(tagSetList.size()); + for (TagSet tagSet : tagSetList) { + bsonArray.add(toDocument(tagSet)); + } + return bsonArray; + } + + private BsonDocument toDocument(final TagSet tagSet) { + BsonDocument document = new BsonDocument(); + + for (Tag tag : tagSet) { + document.put(tag.getName(), new BsonString(tag.getValue())); + } + + return document; + } + +} diff --git a/driver-core/src/main/com/mongodb/TransactionOptions.java b/driver-core/src/main/com/mongodb/TransactionOptions.java new file mode 100644 index 00000000000..e5f22c22def --- /dev/null +++ b/driver-core/src/main/com/mongodb/TransactionOptions.java @@ -0,0 +1,327 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb; + +import com.mongodb.annotations.Alpha; +import com.mongodb.annotations.Immutable; +import com.mongodb.annotations.Reason; +import com.mongodb.lang.Nullable; + +import java.util.Objects; +import java.util.concurrent.TimeUnit; + +import static com.mongodb.assertions.Assertions.isTrueArgument; +import static com.mongodb.assertions.Assertions.notNull; +import static com.mongodb.internal.TimeoutSettings.convertAndValidateTimeoutNullable; +import static java.util.concurrent.TimeUnit.MILLISECONDS; + +/** + * Options to apply to transactions. The default values for the options depend on context. For options specified per-transaction, the + * default values come from the default transaction options. For the default transaction options themselves, the default values come from + * the MongoClient on which the session was started. + * + * @see com.mongodb.session.ClientSession + * @see ClientSessionOptions + * @since 3.8 + * @mongodb.server.release 4.0 + */ +@Immutable +public final class TransactionOptions { + private final ReadConcern readConcern; + private final WriteConcern writeConcern; + private final ReadPreference readPreference; + private final Long maxCommitTimeMS; + private final Long timeoutMS; + + /** + * Gets the read concern. + * + * @return the read concern + */ + @Nullable + public ReadConcern getReadConcern() { + return readConcern; + } + + /** + * Gets the write concern. + * + * @return the write concern + */ + @Nullable + public WriteConcern getWriteConcern() { + return writeConcern; + } + + /** + * Gets the read preference. + * + * @return the write concern + */ + @Nullable + public ReadPreference getReadPreference() { + return readPreference; + } + + /** + * Gets the maximum amount of time to allow a single commitTransaction command to execute. The default is null, which places no + * limit on the execution time. + * + * @param timeUnit the time unit to return the result in + * @return the maximum execution time in the given time unit + * @mongodb.server.release 4.2 + * @since 3.11 + */ + @Nullable + public Long getMaxCommitTime(final TimeUnit timeUnit) { + notNull("timeUnit", timeUnit); + if (maxCommitTimeMS == null) { + return null; + } + return timeUnit.convert(maxCommitTimeMS, MILLISECONDS); + } + + /** + * The time limit for the full execution of the transaction. + * + *

If set the following deprecated options will be ignored: + * {@code waitQueueTimeoutMS}, {@code socketTimeoutMS}, {@code wTimeoutMS}, {@code maxTimeMS} and {@code maxCommitTimeMS}

+ * + *
    + *
  • {@code null} means that the timeout mechanism for operations will defer to using + * {@link ClientSessionOptions#getDefaultTimeout(TimeUnit)} or {@link MongoClientSettings#getTimeout(TimeUnit)} + *
  • + *
  • {@code 0} means infinite timeout.
  • + *
  • {@code > 0} The time limit to use for the full execution of an operation.
  • + *
+ * + * @param timeUnit the time unit + * @return the timeout in the given time unit + * @since 5.2 + */ + @Nullable + @Alpha(Reason.CLIENT) + public Long getTimeout(final TimeUnit timeUnit) { + notNull("timeUnit", timeUnit); + if (timeoutMS == null) { + return null; + } + return timeUnit.convert(timeoutMS, MILLISECONDS); + } + + /** + * Gets an instance of a builder + * + * @return a builder instance + */ + public static Builder builder() { + return new Builder(); + } + + /** + * Merge the two provided transaction options, with the first taking precedence over the second. + * + * @param options the transaction options, which take precedence for any property that is non-null + * @param defaultOptions the default transaction options + * @return the merged transaction options + */ + public static TransactionOptions merge(final TransactionOptions options, final TransactionOptions defaultOptions) { + notNull("options", options); + notNull("defaultOptions", defaultOptions); + return TransactionOptions.builder() + .writeConcern(options.getWriteConcern() == null + ? defaultOptions.getWriteConcern() : options.getWriteConcern()) + .readConcern(options.getReadConcern() == null + ? defaultOptions.getReadConcern() : options.getReadConcern()) + .readPreference(options.getReadPreference() == null + ? defaultOptions.getReadPreference() : options.getReadPreference()) + .maxCommitTime(options.getMaxCommitTime(MILLISECONDS) == null + ? defaultOptions.getMaxCommitTime(MILLISECONDS) : options.getMaxCommitTime(MILLISECONDS), + MILLISECONDS) + .timeout(options.getTimeout(MILLISECONDS) == null + ? defaultOptions.getTimeout(MILLISECONDS) : options.getTimeout(MILLISECONDS), + MILLISECONDS) + .build(); + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + TransactionOptions that = (TransactionOptions) o; + + if (!Objects.equals(timeoutMS, that.timeoutMS)) { + return false; + } + if (!Objects.equals(maxCommitTimeMS, that.maxCommitTimeMS)) { + return false; + } + if (!Objects.equals(readConcern, that.readConcern)) { + return false; + } + if (!Objects.equals(writeConcern, that.writeConcern)) { + return false; + } + if (!Objects.equals(readPreference, that.readPreference)) { + return false; + } + + return true; + } + + @Override + public int hashCode() { + int result = readConcern != null ? readConcern.hashCode() : 0; + result = 31 * result + (writeConcern != null ? writeConcern.hashCode() : 0); + result = 31 * result + (readPreference != null ? readPreference.hashCode() : 0); + result = 31 * result + (maxCommitTimeMS != null ? maxCommitTimeMS.hashCode() : 0); + result = 31 * result + (timeoutMS != null ? timeoutMS.hashCode() : 0); + return result; + } + + @Override + public String toString() { + return "TransactionOptions{" + + "readConcern=" + readConcern + + ", writeConcern=" + writeConcern + + ", readPreference=" + readPreference + + ", maxCommitTimeMS=" + maxCommitTimeMS + + ", timeoutMS=" + timeoutMS + + '}'; + } + + /** + * The builder for transaction options + */ + public static final class Builder { + private ReadConcern readConcern; + private WriteConcern writeConcern; + private ReadPreference readPreference; + private Long maxCommitTimeMS; + @Nullable + private Long timeoutMS; + + /** + * Sets the read concern. + * + * @param readConcern the read concern + * @return this + */ + public Builder readConcern(@Nullable final ReadConcern readConcern) { + this.readConcern = readConcern; + return this; + } + + /** + * Sets the write concern. + * + * @param writeConcern the write concern, which must be acknowledged + * @return this + */ + public Builder writeConcern(@Nullable final WriteConcern writeConcern) { + this.writeConcern = writeConcern; + return this; + } + + /** + * Sets the read preference. + * + * @param readPreference the read preference, which currently must be primary. This restriction may be relaxed in future versions. + * @return this + */ + public Builder readPreference(@Nullable final ReadPreference readPreference) { + this.readPreference = readPreference; + return this; + } + + /** + * Sets the maximum execution time on the server for the commitTransaction operation. + * + * @param maxCommitTime the max commit time, which must be either null or greater than zero, in the given time unit + * @param timeUnit the time unit, which may not be null + * @return this + * @mongodb.server.release 4.2 + * @since 3.11 + */ + public Builder maxCommitTime(@Nullable final Long maxCommitTime, final TimeUnit timeUnit) { + if (maxCommitTime == null) { + this.maxCommitTimeMS = null; + } else { + notNull("timeUnit", timeUnit); + isTrueArgument("maxCommitTime > 0", maxCommitTime > 0); + this.maxCommitTimeMS = MILLISECONDS.convert(maxCommitTime, timeUnit); + } + return this; + } + + /** + * Sets the time limit for the full execution of the operations for this transaction. + * + *
    + *
  • {@code null} means that the timeout mechanism for operations will defer to using: + *
      + *
    • {@code waitQueueTimeoutMS}: The maximum wait time in milliseconds that a thread may wait for a connection to become + * available
    • + *
    • {@code socketTimeoutMS}: How long a send or receive on a socket can take before timing out.
    • + *
    • {@code wTimeoutMS}: How long the server will wait for the write concern to be fulfilled before timing out.
    • + *
    • {@code maxTimeMS}: The cumulative time limit for processing operations on a cursor. + * See: cursor.maxTimeMS.
    • + *
    • {@code maxCommitTimeMS}: The maximum amount of time to allow a single {@code commitTransaction} command to execute.
    • + *
    + *
  • + *
  • {@code 0} means infinite timeout.
  • + *
  • {@code > 0} The time limit to use for the full execution of an operation.
  • + *
+ * + * @param timeout the timeout + * @param timeUnit the time unit + * @return this + * @since 5.2 + */ + @Alpha(Reason.CLIENT) + public Builder timeout(@Nullable final Long timeout, final TimeUnit timeUnit) { + this.timeoutMS = convertAndValidateTimeoutNullable(timeout, timeUnit); + return this; + } + + /** + * Build the transaction options instance. + * + * @return The {@code TransactionOptions} + */ + public TransactionOptions build() { + return new TransactionOptions(this); + } + + private Builder() { + } + } + + + private TransactionOptions(final Builder builder) { + readConcern = builder.readConcern; + writeConcern = builder.writeConcern; + readPreference = builder.readPreference; + maxCommitTimeMS = builder.maxCommitTimeMS; + timeoutMS = builder.timeoutMS; + } +} diff --git a/driver-core/src/main/com/mongodb/UnixServerAddress.java b/driver-core/src/main/com/mongodb/UnixServerAddress.java new file mode 100644 index 00000000000..8bd42052004 --- /dev/null +++ b/driver-core/src/main/com/mongodb/UnixServerAddress.java @@ -0,0 +1,57 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb; + +import com.mongodb.annotations.Immutable; +import com.mongodb.internal.graalvm.substitution.UnixServerAddressSubstitution; + +import static com.mongodb.assertions.Assertions.isTrueArgument; +import static com.mongodb.assertions.Assertions.notNull; + +/** + * Represents the location of a MongoD unix domain socket. + * It is {@linkplain UnixServerAddressSubstitution not supported in GraalVM native image}. + * + *

Requires the 'jnr.unixsocket' library.

+ * @since 3.7 + */ +@Immutable +public final class UnixServerAddress extends ServerAddress { + private static final long serialVersionUID = 154466643544866543L; + + /** + * Creates a new instance + * @param path the path of the MongoD unix domain socket. + * @throws UnsupportedOperationException If {@linkplain UnixServerAddressSubstitution called in a GraalVM native image}. + */ + public UnixServerAddress(final String path) { + super(notNull("The path cannot be null", path)); + isTrueArgument("The path must end in .sock", path.endsWith(".sock")); + checkNotInGraalVmNativeImage(); + } + + /** + * @throws UnsupportedOperationException If {@linkplain UnixServerAddressSubstitution called in a GraalVM native image}. + */ + private static void checkNotInGraalVmNativeImage() { + } + + @Override + public String toString() { + return getHost(); + } +} diff --git a/driver-core/src/main/com/mongodb/WriteConcern.java b/driver-core/src/main/com/mongodb/WriteConcern.java new file mode 100644 index 00000000000..61c8e510b90 --- /dev/null +++ b/driver-core/src/main/com/mongodb/WriteConcern.java @@ -0,0 +1,429 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// WriteConcern.java + +package com.mongodb; + +import com.mongodb.annotations.Immutable; +import com.mongodb.lang.Nullable; +import org.bson.BsonBoolean; +import org.bson.BsonDocument; +import org.bson.BsonInt32; +import org.bson.BsonString; + +import java.io.Serializable; +import java.lang.reflect.Field; +import java.lang.reflect.Modifier; +import java.util.HashMap; +import java.util.Map; +import java.util.Objects; +import java.util.concurrent.TimeUnit; + +import static com.mongodb.assertions.Assertions.isTrue; +import static com.mongodb.assertions.Assertions.isTrueArgument; +import static com.mongodb.assertions.Assertions.notNull; + +/** + *

Controls the acknowledgment of write operations with various options.

+ * + *

{@code w}

+ *
    + *
  • 0: Don't wait for acknowledgement from the server
  • + *
  • 1: Wait for acknowledgement, but don't wait for secondaries to replicate
  • + *
  • >=2: Wait for one or more secondaries to also acknowledge
  • + *
  • "majority": Wait for a majority of data bearing nodes to acknowledge
  • + *
  • "<tag set name>": Wait for one or more secondaries to also acknowledge based on a tag set name
  • + *
+ *

{@code wtimeout} - how long to wait for secondaries to acknowledge before failing

+ *
    + *
  • 0: indefinite
  • + *
  • >0: time to wait in milliseconds
  • + *
+ * + *

Other options:

+ *
    + *
  • {@code journal}: If true block until write operations have been committed to the journal. Cannot be used in combination with + * {@code fsync}. Write operations will fail with an exception if this option is used when the server is running without journaling.
  • + *
+ * + * @mongodb.driver.manual core/write-concern Write Concern + * @mongodb.driver.manual reference/write-concern/ Write Concern Reference + */ +@Immutable +public class WriteConcern implements Serializable { + + private static final long serialVersionUID = 1884671104750417011L; + + // map of the constants from above for use by fromString + private static final Map NAMED_CONCERNS; + + /** + * The w value. + */ + private final Object w; + + /** + * The w timeout value. + */ + private final Integer wTimeoutMS; + + /** + * The journal value. + */ + private final Boolean journal; + + /** + * Write operations that use this write concern will wait for acknowledgement, using the default write concern configured on the server. + * + * @since 2.10.0 + * @mongodb.driver.manual core/write-concern/#write-concern-acknowledged Acknowledged + */ + public static final WriteConcern ACKNOWLEDGED = new WriteConcern(null, null, null); + + /** + * Write operations that use this write concern will wait for acknowledgement from a single member. + * + * @since 3.2 + * @mongodb.driver.manual reference/write-concern/#w-option w option + */ + public static final WriteConcern W1 = new WriteConcern(1); + + /** + * Write operations that use this write concern will wait for acknowledgement from two members. + * + * @since 3.2 + * @mongodb.driver.manual reference/write-concern/#w-option w option + */ + public static final WriteConcern W2 = new WriteConcern(2); + + /** + * Write operations that use this write concern will wait for acknowledgement from three members. + * + * @since 3.2 + * @mongodb.driver.manual reference/write-concern/#w-option w option + */ + public static final WriteConcern W3 = new WriteConcern(3); + + + /** + * Write operations that use this write concern will return as soon as the message is written to the socket. Exceptions are raised for + * network issues, but not server errors. + * + * @since 2.10.0 + * @mongodb.driver.manual core/write-concern/#unacknowledged Unacknowledged + */ + public static final WriteConcern UNACKNOWLEDGED = new WriteConcern(0); + + /** + * Write operations wait for the server to group commit to the journal file on disk. + * + * @mongodb.driver.manual core/write-concern/#journaled Journaled + */ + public static final WriteConcern JOURNALED = ACKNOWLEDGED.withJournal(true); + + /** + * Exceptions are raised for network issues, and server errors; waits on a majority of servers for the write operation. + */ + public static final WriteConcern MAJORITY = new WriteConcern("majority"); + + /** + * Construct an instance with the given integer-based value for w. + * + * @param w number of servers to ensure write propagation to before acknowledgment, which must be {@code >= 0} + * @mongodb.driver.manual reference/write-concern/#w-option w option + */ + public WriteConcern(final int w) { + this(w, null, null); + } + + /** + * Construct an instance with the given tag set-based value for w. + * + * @param w tag set name, or "majority", representing the servers to ensure write propagation to before acknowledgment. Do not use + * string representation of integer values for w + * @mongodb.driver.manual tutorial/configure-replica-set-tag-sets/#replica-set-configuration-tag-sets Tag Sets + * @mongodb.driver.manual reference/write-concern/#w-option w option + */ + public WriteConcern(final String w) { + this(w, null, null); + notNull("w", w); + } + + /** + * Constructs an instance with the given integer-based value for w and the given value for wTimeoutMS. + * + * @param w the w value, which must be >= 0 + * @param wTimeoutMS the wTimeout in milliseconds, which must be >= 0 + * @mongodb.driver.manual reference/write-concern/#w-option w option + * @mongodb.driver.manual reference/write-concern/#wtimeout wtimeout option + */ + public WriteConcern(final int w, final int wTimeoutMS) { + this(w, wTimeoutMS, null); + } + + // Private constructor for creating the "default" unacknowledged write concern. Necessary because there already a no-args + // constructor that means something else. + private WriteConcern(@Nullable final Object w, @Nullable final Integer wTimeoutMS, @Nullable final Boolean journal) { + if (w instanceof Integer) { + isTrueArgument("w >= 0", ((Integer) w) >= 0); + if ((Integer) w == 0) { + isTrueArgument("journal is false when w is 0", journal == null || !journal); + } + } else if (w != null) { + isTrueArgument("w must be String or int", w instanceof String); + } + isTrueArgument("wtimeout >= 0", wTimeoutMS == null || wTimeoutMS >= 0); + this.w = w; + this.wTimeoutMS = wTimeoutMS; + this.journal = journal; + } + + /** + * Gets the w value. + * + * @return w, either an instance of Integer or String or null + */ + @Nullable + public Object getWObject() { + return w; + } + + /** + * Gets the w value as an integer. + * + * @return w as an int + * @throws IllegalStateException if w is null or not an integer + */ + public int getW() { + isTrue("w is an Integer", w != null && w instanceof Integer); + return (Integer) w; + } + + /** + * Gets the w parameter as a String. + * + * @return w as a String + * @throws IllegalStateException if w is null or not a String + */ + public String getWString() { + isTrue("w is a String", w != null && w instanceof String); + return (String) w; + } + + /** + * Gets the wTimeout in the given time unit. + * + * @param timeUnit the non-null time unit for the result + * @return the WTimeout, which may be null if a wTimeout has not been specified + * @see #withWTimeout(long, TimeUnit) + * @since 3.2 + * @mongodb.driver.manual core/write-concern/#timeouts wTimeout + */ + @Nullable + public Integer getWTimeout(final TimeUnit timeUnit) { + notNull("timeUnit", timeUnit); + return wTimeoutMS == null ? null : (int) timeUnit.convert(wTimeoutMS, TimeUnit.MILLISECONDS); + } + + /** + * Gets the journal property. The default value is null. + * + * @return whether journal syncing is enabled, or null if unspecified. + * @since 3.2 + * @mongodb.driver.manual core/write-concern/#journaled Journaled + */ + @Nullable + public Boolean getJournal() { + return journal; + } + + /** + * Gets whether this write concern indicates that the server's default write concern will be used. + * + * @return true if this write concern indicates that the server's default write concern will be used + * @mongodb.driver.manual /reference/replica-configuration/#local.system.replset.settings.getLastErrorDefaults getLastErrorDefaults + */ + public boolean isServerDefault() { + return equals(ACKNOWLEDGED); + } + + /** + * Gets this write concern as a document. + * + * @return The write concern as a BsonDocument, even if {@code w <= 0} + */ + public BsonDocument asDocument() { + BsonDocument document = new BsonDocument(); + + addW(document); + addWTimeout(document); + addJ(document); + + return document; + } + + /** + * Returns true if this write concern indicates that write operations must be acknowledged. + * + * @return true w != null or w > 0 or journal is true or fsync is true + * @mongodb.driver.manual core/write-concern/#acknowledged Acknowledged + */ + public boolean isAcknowledged() { + if (w instanceof Integer) { + return (Integer) w > 0 || (journal != null && journal); + } + return true; + } + + /** + * Gets the WriteConcern constants by name (matching is done case insensitively). + * + * @param name the name of the WriteConcern + * @return the {@code WriteConcern instance} + */ + public static WriteConcern valueOf(final String name) { + return NAMED_CONCERNS.get(name.toLowerCase()); + } + + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + WriteConcern that = (WriteConcern) o; + + if (!Objects.equals(w, that.w)) { + return false; + } + if (!Objects.equals(wTimeoutMS, that.wTimeoutMS)) { + return false; + } + if (!Objects.equals(journal, that.journal)) { + return false; + } + + return true; + } + + @Override + public int hashCode() { + int result = w != null ? w.hashCode() : 0; + result = 31 * result + (wTimeoutMS != null ? wTimeoutMS.hashCode() : 0); + result = 31 * result + (journal != null ? journal.hashCode() : 0); + return result; + } + + @Override + public String toString() { + return "WriteConcern{w=" + w + ", wTimeout=" + wTimeoutMS + " ms, journal=" + journal + "}"; + + } + + /** + * Constructs a new WriteConcern from the current one and the specified integer-based value for w + * + * @param w number of servers to ensure write propagation to before acknowledgment, which must be {@code >= 0} + * @return the new WriteConcern + * @mongodb.driver.manual core/write-concern/#replica-acknowledged Replica Acknowledged + */ + public WriteConcern withW(final int w) { + return new WriteConcern(Integer.valueOf(w), wTimeoutMS, journal); + } + + /** + * Constructs a new WriteConcern from the current one and the specified tag-set based value for w + * + * @param w tag set, or "majority", representing the servers to ensure write propagation to before acknowledgment. Do not use string + * representation of integer values for w + * @return the new WriteConcern + * @see #withW(int) + * @mongodb.driver.manual tutorial/configure-replica-set-tag-sets/#replica-set-configuration-tag-sets Tag Sets + */ + public WriteConcern withW(final String w) { + notNull("w", w); + return new WriteConcern(w, wTimeoutMS, journal); + } + + /** + * Constructs a new WriteConcern from the current one and the specified journal value + * + * @param journal true if journalling is required for acknowledgement, false if not, or null if unspecified + * @return the new WriteConcern + * @since 3.2 + * @mongodb.driver.manual reference/write-concern/#j-option j option + */ + public WriteConcern withJournal(@Nullable final Boolean journal) { + return new WriteConcern(w, wTimeoutMS, journal); + } + + /** + * Constructs a new WriteConcern from the current one and the specified wTimeout in the given time unit. + * + * @param wTimeout the wTimeout, which must be >= 0 and <= Integer.MAX_VALUE after conversion to milliseconds + * @param timeUnit the non-null time unit to apply to wTimeout + * @return the WriteConcern with the given wTimeout + * @see #getWTimeout(TimeUnit) + * @since 3.2 + * @mongodb.driver.manual reference/write-concern/#wtimeout wtimeout option + */ + public WriteConcern withWTimeout(final long wTimeout, final TimeUnit timeUnit) { + notNull("timeUnit", timeUnit); + long newWTimeOutMS = TimeUnit.MILLISECONDS.convert(wTimeout, timeUnit); + isTrueArgument("wTimeout >= 0", wTimeout >= 0); + isTrueArgument("wTimeout <= " + Integer.MAX_VALUE + " ms", newWTimeOutMS <= Integer.MAX_VALUE); + return new WriteConcern(w, (int) newWTimeOutMS, journal); + } + + private void addW(final BsonDocument document) { + if (w instanceof String) { + document.put("w", new BsonString((String) w)); + } else if (w instanceof Integer){ + document.put("w", new BsonInt32((Integer) w)); + } + } + + private void addJ(final BsonDocument document) { + if (journal != null) { + document.put("j", BsonBoolean.valueOf(journal)); + } + } + + private void addWTimeout(final BsonDocument document) { + if (wTimeoutMS != null) { + document.put("wtimeout", new BsonInt32(wTimeoutMS)); + } + } + + static { + NAMED_CONCERNS = new HashMap<>(); + for (final Field f : WriteConcern.class.getFields()) { + if (Modifier.isStatic(f.getModifiers()) && f.getType().equals(WriteConcern.class)) { + String key = f.getName().toLowerCase(); + try { + NAMED_CONCERNS.put(key, (WriteConcern) f.get(null)); + } catch (IllegalAccessException e) { + throw new RuntimeException(e); + } + } + } + } +} diff --git a/driver-core/src/main/com/mongodb/WriteConcernException.java b/driver-core/src/main/com/mongodb/WriteConcernException.java new file mode 100644 index 00000000000..f1636378809 --- /dev/null +++ b/driver-core/src/main/com/mongodb/WriteConcernException.java @@ -0,0 +1,148 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb; + +import com.mongodb.lang.Nullable; +import org.bson.BsonDocument; +import org.bson.BsonInt32; +import org.bson.BsonValue; + +import static java.lang.String.format; + +/** + * An exception representing an error reported due to a write failure. + * + *

Only thrown when using the legacy deprecated API, which is accessed via {@code com.mongodb.MongoClient.getDB}.

+ * + *

For application using the {@code MongoCollection}-based API, write failures can be determined via:

+ *
    + *
  • + * Single document writes: a {@link MongoWriteException} is thrown. + *
  • + *
  • + * Bulk document writes: A {@link MongoBulkWriteException} is thrown. + *
  • + *
+ * @see MongoWriteConcernException + * @see MongoBulkWriteException + * @serial exclude + */ +public class WriteConcernException extends MongoServerException { + + private static final long serialVersionUID = -1100801000476719450L; + + private final WriteConcernResult writeConcernResult; + private final BsonDocument response; + + /** + * Construct a new instance. + * + * @param response the response to the write operation + * @param address the address of the server that executed the operation + * @param writeConcernResult the write concern result + */ + public WriteConcernException(final BsonDocument response, final ServerAddress address, + final WriteConcernResult writeConcernResult) { + super(extractErrorCode(response), + format("Write failed with error code %d and error message '%s'", extractErrorCode(response), extractErrorMessage(response)), + address); + this.response = response; + this.writeConcernResult = writeConcernResult; + } + + /** + * For internal use only: extract the error code from the response to a write command. + * @param response the response + * @return the code, or -1 if there is none + */ + public static int extractErrorCode(final BsonDocument response) { + // mongos may set an err field containing duplicate key error information + String errorMessage = extractErrorMessage(response); + if (errorMessage != null) { + if (response.containsKey("err")) { + if (errorMessage.contains("E11000 duplicate key error")) { + return 11000; + } + } + + // mongos may return a list of documents representing write command responses from each shard. Return the one with a matching + // "err" field, so that it can be used to get the error code + if (!response.containsKey("code") && response.containsKey("errObjects")) { + for (BsonValue curErrorDocument : response.getArray("errObjects")) { + if (errorMessage.equals(extractErrorMessage(curErrorDocument.asDocument()))) { + return curErrorDocument.asDocument().getNumber("code").intValue(); + } + } + } + } + return response.getNumber("code", new BsonInt32(-1)).intValue(); + } + + /** + * For internal use only: extract the error message from the response to a write command. + * + * @param response the response + * @return the error message + */ + @Nullable + public static String extractErrorMessage(final BsonDocument response) { + if (response.isString("err")) { + return response.getString("err").getValue(); + } else if (response.isString("errmsg")) { + return response.getString("errmsg").getValue(); + } else { + return null; + } + } + + /** + * Gets the write result. + * + * @return the write result + */ + public WriteConcernResult getWriteConcernResult() { + return writeConcernResult; + } + + /** + * Gets the error code associated with the write concern failure. + * + * @return the error code + */ + public int getErrorCode() { + return extractErrorCode(response); + } + + /** + * Gets the error message associated with the write concern failure. + * + * @return the error message + */ + @Nullable + public String getErrorMessage() { + return extractErrorMessage(response); + } + + /** + * Gets the response to the write operation. + * + * @return the response to the write operation + */ + public BsonDocument getResponse() { + return response; + } +} diff --git a/driver-core/src/main/com/mongodb/WriteConcernResult.java b/driver-core/src/main/com/mongodb/WriteConcernResult.java new file mode 100644 index 00000000000..11be4780080 --- /dev/null +++ b/driver-core/src/main/com/mongodb/WriteConcernResult.java @@ -0,0 +1,195 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb; + +import com.mongodb.lang.Nullable; +import org.bson.BsonValue; + +/** + * The result of a successful write operation. If the write was unacknowledged, then {@code wasAcknowledged} will return false and all + * other methods will throw {@code MongoUnacknowledgedWriteException}. + * + * @see com.mongodb.WriteConcern#UNACKNOWLEDGED + * @since 3.0 + */ +public abstract class WriteConcernResult { + + /** + * Returns true if the write was acknowledged. + * + * @return true if the write was acknowledged + */ + public abstract boolean wasAcknowledged(); + + /** + * Returns the number of documents affected by the write operation. + * + * @return the number of documents affected by the write operation + * @throws UnsupportedOperationException if the write was unacknowledged. + */ + public abstract int getCount(); + + /** + * Returns true if the write was an update of an existing document. + * + * @return true if the write was an update of an existing document + * @throws UnsupportedOperationException if the write was unacknowledged. + */ + public abstract boolean isUpdateOfExisting(); + + /** + * Returns the value of _id if this write resulted in an upsert. + * + * @return the value of _id if this write resulted in an upsert. + * @throws UnsupportedOperationException if the write was unacknowledged. + */ + @Nullable + public abstract BsonValue getUpsertedId(); + + /** + * Create an acknowledged WriteConcernResult + * + * @param count the count of matched documents + * @param isUpdateOfExisting whether an existing document was updated + * @param upsertedId if an upsert resulted in an inserted document, this is the _id of that document. This may be null + * @return an acknowledged WriteConcernResult + */ + public static WriteConcernResult acknowledged(final int count, final boolean isUpdateOfExisting, @Nullable final BsonValue upsertedId) { + return new WriteConcernResult() { + @Override + public boolean wasAcknowledged() { + return true; + } + + @Override + public int getCount() { + return count; + } + + @Override + public boolean isUpdateOfExisting() { + return isUpdateOfExisting; + } + + @Override + @Nullable + public BsonValue getUpsertedId() { + return upsertedId; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + WriteConcernResult that = (WriteConcernResult) o; + if (!that.wasAcknowledged()) { + return false; + } + + if (count != that.getCount()) { + return false; + } + if (isUpdateOfExisting != that.isUpdateOfExisting()) { + return false; + } + if (upsertedId != null ? !upsertedId.equals(that.getUpsertedId()) : that.getUpsertedId() != null) { + return false; + } + + return true; + } + + @Override + public int hashCode() { + int result = count; + result = 31 * result + (isUpdateOfExisting ? 1 : 0); + result = 31 * result + (upsertedId != null ? upsertedId.hashCode() : 0); + return result; + } + + @Override + public String toString() { + return "AcknowledgedWriteResult{" + + "count=" + count + + ", isUpdateOfExisting=" + isUpdateOfExisting + + ", upsertedId=" + upsertedId + + '}'; + } + }; + } + + /** + * Create an unacknowledged WriteConcernResult + * + * @return an unacknowledged WriteConcernResult + */ + public static WriteConcernResult unacknowledged() { + return new WriteConcernResult() { + @Override + public boolean wasAcknowledged() { + return false; + } + + @Override + public int getCount() { + throw getUnacknowledgedWriteException(); + } + + @Override + public boolean isUpdateOfExisting() { + throw getUnacknowledgedWriteException(); + } + + @Override + public BsonValue getUpsertedId() { + throw getUnacknowledgedWriteException(); + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + WriteConcernResult that = (WriteConcernResult) o; + return !that.wasAcknowledged(); + } + + @Override + public int hashCode() { + return 1; + } + + @Override + public String toString() { + return "UnacknowledgedWriteResult{}"; + } + + private UnsupportedOperationException getUnacknowledgedWriteException() { + return new UnsupportedOperationException("Cannot get information about an unacknowledged write"); + } + }; + } +} diff --git a/driver-core/src/main/com/mongodb/WriteError.java b/driver-core/src/main/com/mongodb/WriteError.java new file mode 100644 index 00000000000..65cb6370671 --- /dev/null +++ b/driver-core/src/main/com/mongodb/WriteError.java @@ -0,0 +1,133 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb; + +import org.bson.BsonDocument; + +import static com.mongodb.assertions.Assertions.notNull; + +/** + * Represents the details of a write error , e.g. a duplicate key error + * + * @since 3.0 + */ +public class WriteError { + private final int code; + private final String message; + private final BsonDocument details; + + /** + * Constructs a new instance. + * + * @param code the error code + * @param message the error message + * @param details details about the error + */ + public WriteError(final int code, final String message, final BsonDocument details) { + this.code = code; + this.message = notNull("message", message); + this.details = notNull("details", details); + } + + /** + * Construct an instance that is a shallow copy of the given instance. + * + * @param writeError the write error to copy + */ + public WriteError(final WriteError writeError) { + this.code = writeError.code; + this.message = writeError.message; + this.details = writeError.details; + } + + /** + * Gets the category of this error. + * + * @return the category of this write error + */ + public ErrorCategory getCategory() { + return ErrorCategory.fromErrorCode(code); + } + + /** + * Gets the code associated with this error. + * + * @return the code + */ + public int getCode() { + return code; + } + + /** + * Gets the message associated with this error. + * + * @return the message + */ + public String getMessage() { + return message; + } + + /** + * Gets the details associated with this error. This document will not be null, but may be empty. + * + * @return the details + */ + public BsonDocument getDetails() { + return details; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + WriteError that = (WriteError) o; + + if (code != that.code) { + return false; + } + if (!details.equals(that.details)) { + return false; + } + if (!message.equals(that.message)) { + return false; + } + + return true; + } + + @Override + public int hashCode() { + int result = code; + result = 31 * result + message.hashCode(); + result = 31 * result + details.hashCode(); + return result; + } + + @Override + public String toString() { + return "WriteError{" + + "code=" + code + + ", message='" + message + '\'' + + ", details=" + details + + '}'; + } +} diff --git a/driver-core/src/main/com/mongodb/annotations/Alpha.java b/driver-core/src/main/com/mongodb/annotations/Alpha.java new file mode 100644 index 00000000000..3698c7ac860 --- /dev/null +++ b/driver-core/src/main/com/mongodb/annotations/Alpha.java @@ -0,0 +1,51 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * Copyright 2010 The Guava Authors + * Copyright 2011 The Guava Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.annotations; + +import java.lang.annotation.Documented; +import java.lang.annotation.ElementType; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.annotation.Target; + +/** + * Signifies that a public API element is in the early stages of development, subject to + * incompatible changes, or even removal, in a future release and may lack some intended features. + * An API bearing this annotation may contain known issues affecting functionality, performance, + * and stability. It is also exempt from any compatibility guarantees made by its containing library. + * + *

It is inadvisable for applications to use Alpha APIs in production environments or + * for libraries (which get included on users' CLASSPATHs, outside the library developers' + * control) to depend on these APIs. Alpha APIs are intended for experimental purposes only.

+ */ +@Retention(RetentionPolicy.CLASS) +@Target({ + ElementType.ANNOTATION_TYPE, + ElementType.CONSTRUCTOR, + ElementType.FIELD, + ElementType.METHOD, + ElementType.PACKAGE, + ElementType.TYPE }) +@Documented +@Beta(Reason.CLIENT) +public @interface Alpha { + /** + * @return The reason an API element is marked with {@link Alpha}. + */ + Reason[] value(); +} diff --git a/driver-core/src/main/com/mongodb/annotations/Beta.java b/driver-core/src/main/com/mongodb/annotations/Beta.java new file mode 100644 index 00000000000..55753ddc051 --- /dev/null +++ b/driver-core/src/main/com/mongodb/annotations/Beta.java @@ -0,0 +1,56 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * Copyright 2010 The Guava Authors + * Copyright 2011 The Guava Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.annotations; + +import java.lang.annotation.Documented; +import java.lang.annotation.ElementType; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.annotation.Target; + +/** + * Signifies that a public API (public class, method or field) is subject to + * incompatible changes, or even removal, in a future release. An API bearing + * this annotation is exempt from any compatibility guarantees made by its + * containing library. Note that the presence of this annotation implies nothing + * about the quality or performance of the API in question, only the fact that + * it is not "API-frozen." + * + *

It is generally safe for applications to depend on beta APIs, at + * the cost of some extra work during upgrades. However it is generally + * inadvisable for libraries (which get included on users' CLASSPATHs, + * outside the library developers' control) to do so. + * + **/ +@Retention(RetentionPolicy.CLASS) +@Target({ + ElementType.ANNOTATION_TYPE, + ElementType.CONSTRUCTOR, + ElementType.FIELD, + ElementType.METHOD, + ElementType.PACKAGE, + ElementType.TYPE }) +@Documented +@Beta(Reason.CLIENT) +public @interface Beta { + /** + * @return The reason an API element is marked with {@link Beta}. + */ + Reason[] value(); +} diff --git a/driver-core/src/main/com/mongodb/annotations/Evolving.java b/driver-core/src/main/com/mongodb/annotations/Evolving.java new file mode 100644 index 00000000000..ba9d8825dc6 --- /dev/null +++ b/driver-core/src/main/com/mongodb/annotations/Evolving.java @@ -0,0 +1,77 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * Copyright 2010 The Guava Authors + * Copyright 2011 The Guava Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.annotations; + +import org.bson.codecs.Codec; +import org.bson.conversions.Bson; + +import java.lang.annotation.Documented; +import java.lang.annotation.ElementType; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.annotation.Target; + +/** + * Signifies that the annotated program element is subject to incompatible changes by means of adding abstract methods. + * This, in turn, means that implementing interfaces or extending classes annotated with {@link Evolving} bears the risk + * of doing extra work during upgrades. + * Using such program elements is no different from using ordinary unannotated program elements. + * Note that the presence of this annotation implies nothing about the quality or performance of the API in question. + *

+ * Unless we currently want to allow users to extend/implement API program elements, we must annotate them with + * {@code @}{@link Sealed} rather than {@code @}{@link Evolving}. Replacing {@code @}{@link Sealed} with {@code @}{@link Evolving} + * is a backward-compatible change, while the opposite is not.

+ * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
Reasons we may allow users to extend/implement an API program element
ReasonExampleApplicability of {@code @}{@link Evolving}
Doing so allows/simplifies integrating user code with the API.{@link Bson}Not applicable.
Doing so allows customizing API behavior.{@link Codec}Not applicable.
Doing so facilitates writing application unit tests by creating a fake implementation.{@code com.mongodb.client.MongoClient}Applicable.
The program element was introduced before {@code @}{@link Evolving}.{@code com.mongodb.client.MongoClient}Applicable.
+ * + * @see Sealed + */ +@Retention(RetentionPolicy.CLASS) +@Target(ElementType.TYPE) +@Documented +@Evolving +public @interface Evolving { +} diff --git a/driver-core/src/main/com/mongodb/annotations/Immutable.java b/driver-core/src/main/com/mongodb/annotations/Immutable.java new file mode 100644 index 00000000000..684fd589bac --- /dev/null +++ b/driver-core/src/main/com/mongodb/annotations/Immutable.java @@ -0,0 +1,37 @@ +/* + * Copyright (c) 2005 Brian Goetz and Tim Peierls + * Released under the Creative Commons Attribution License + * (http://creativecommons.org/licenses/by/2.5) + * Official home: http://www.jcip.net + * + * Any republication or derived work distributed in source code form + * must include this copyright and license notice. + */ + +package com.mongodb.annotations; + +import java.lang.annotation.Documented; +import java.lang.annotation.ElementType; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.annotation.Target; + +/** + *

The class to which this annotation is applied is immutable. This means that its state cannot be seen to change by callers, which + * implies that

+ *
    + *
  • all public fields are final,
  • + *
  • all public final reference fields refer to other immutable objects, and
  • + *
  • constructors and methods do not publish references to any internal state which is potentially mutable by the + * implementation.
  • + *
+ *

Immutable objects may still have internal mutable state for purposes of performance optimization; some state + * variables may be lazily computed, so long as they are computed from immutable state and that callers cannot tell the difference.

+ * + *

Immutable objects are inherently thread-safe; they may be passed between threads or published without synchronization.

+ */ +@Documented +@Target(ElementType.TYPE) +@Retention(RetentionPolicy.RUNTIME) +public @interface Immutable { +} diff --git a/driver-core/src/main/com/mongodb/annotations/Internal.java b/driver-core/src/main/com/mongodb/annotations/Internal.java new file mode 100644 index 00000000000..97108736717 --- /dev/null +++ b/driver-core/src/main/com/mongodb/annotations/Internal.java @@ -0,0 +1,40 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.annotations; + +import java.lang.annotation.Documented; +import java.lang.annotation.ElementType; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.annotation.Target; + +/** + * Signifies that a public API element is intended for internal use only. + * + *

It is inadvisable for applications to use Internal APIs as they are intended for internal library purposes only.

+ */ +@Retention(RetentionPolicy.CLASS) +@Target({ + ElementType.ANNOTATION_TYPE, + ElementType.CONSTRUCTOR, + ElementType.FIELD, + ElementType.METHOD, + ElementType.PACKAGE, + ElementType.TYPE }) +@Documented +@Alpha(Reason.CLIENT) +public @interface Internal { +} diff --git a/driver-core/src/main/com/mongodb/annotations/NotThreadSafe.java b/driver-core/src/main/com/mongodb/annotations/NotThreadSafe.java new file mode 100644 index 00000000000..c0d49acc49e --- /dev/null +++ b/driver-core/src/main/com/mongodb/annotations/NotThreadSafe.java @@ -0,0 +1,30 @@ +/* + * Copyright (c) 2005 Brian Goetz and Tim Peierls + * Released under the Creative Commons Attribution License + * (http://creativecommons.org/licenses/by/2.5) + * Official home: http://www.jcip.net + * + * Any republication or derived work distributed in source code form + * must include this copyright and license notice. + */ + +package com.mongodb.annotations; + +import java.lang.annotation.Documented; +import java.lang.annotation.ElementType; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.annotation.Target; + +/** + * The class to which this annotation is applied is not thread-safe. This annotation primarily exists for clarifying the non-thread-safety + * of a class that might otherwise be assumed to be thread-safe, despite the fact that it is a bad idea to assume a class is thread-safe + * without good reason. + * + * @see ThreadSafe + */ +@Documented +@Target(ElementType.TYPE) +@Retention(RetentionPolicy.RUNTIME) +public @interface NotThreadSafe { +} diff --git a/driver-core/src/main/com/mongodb/annotations/Reason.java b/driver-core/src/main/com/mongodb/annotations/Reason.java new file mode 100644 index 00000000000..af72098a9de --- /dev/null +++ b/driver-core/src/main/com/mongodb/annotations/Reason.java @@ -0,0 +1,34 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.annotations; + +/** + * Enumerates the reasons an API element might be marked with annotations like {@link Alpha} or {@link Beta}. + */ +@Beta(Reason.CLIENT) +public enum Reason { + /** + * Indicates that the status of the driver API is the reason for the annotation. + */ + CLIENT, + + /** + * The driver API relies on the server API. + * This dependency is the reason for the annotation and suggests that changes in the server API could impact the driver API. + */ + SERVER +} diff --git a/driver-core/src/main/com/mongodb/annotations/Sealed.java b/driver-core/src/main/com/mongodb/annotations/Sealed.java new file mode 100644 index 00000000000..0a091fa7cc2 --- /dev/null +++ b/driver-core/src/main/com/mongodb/annotations/Sealed.java @@ -0,0 +1,44 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * Copyright 2010 The Guava Authors + * Copyright 2011 The Guava Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.annotations; + +import java.lang.annotation.Documented; +import java.lang.annotation.ElementType; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.annotation.Target; + +/** + * Signifies that the annotated class or interface should be treated as sealed: + * it must not be extended or implemented by consumers of the library. + * + *

Using such classes and interfaces is no different from using ordinary + * unannotated classes and interfaces. + * + *

This annotation does not imply that the API is experimental or + * {@link Beta}, or that the quality or performance of the API is inferior. + * + * @see Evolving + */ +@Retention(RetentionPolicy.CLASS) +@Target(ElementType.TYPE) +@Documented +@Sealed +public @interface Sealed { +} diff --git a/driver-core/src/main/com/mongodb/annotations/ThreadSafe.java b/driver-core/src/main/com/mongodb/annotations/ThreadSafe.java new file mode 100644 index 00000000000..e07f8dc8c1e --- /dev/null +++ b/driver-core/src/main/com/mongodb/annotations/ThreadSafe.java @@ -0,0 +1,29 @@ +/* + * Copyright (c) 2005 Brian Goetz and Tim Peierls + * Released under the Creative Commons Attribution License + * (http://creativecommons.org/licenses/by/2.5) + * Official home: http://www.jcip.net + * + * Any republication or derived work distributed in source code form + * must include this copyright and license notice. + */ + +package com.mongodb.annotations; + +import java.lang.annotation.Documented; +import java.lang.annotation.ElementType; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.annotation.Target; + + +/** + * The class to which this annotation is applied is thread-safe. This means that no sequences of accesses (reads and writes to public + * fields, calls to public methods) may put the object into an invalid state, regardless of the interleaving of those actions by the + * runtime, and without requiring any additional synchronization or coordination on the part of the caller. + */ +@Documented +@Target(ElementType.TYPE) +@Retention(RetentionPolicy.RUNTIME) +public @interface ThreadSafe { +} diff --git a/driver-core/src/main/com/mongodb/annotations/package-info.java b/driver-core/src/main/com/mongodb/annotations/package-info.java new file mode 100644 index 00000000000..361a63fa423 --- /dev/null +++ b/driver-core/src/main/com/mongodb/annotations/package-info.java @@ -0,0 +1,20 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * Contains annotations that can apply to any part of the driver code. + */ +package com.mongodb.annotations; diff --git a/driver-core/src/main/com/mongodb/assertions/Assertions.java b/driver-core/src/main/com/mongodb/assertions/Assertions.java new file mode 100644 index 00000000000..bf38638dc6d --- /dev/null +++ b/driver-core/src/main/com/mongodb/assertions/Assertions.java @@ -0,0 +1,241 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * Copyright (c) 2008-2014 Atlassian Pty Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.assertions; + +import com.mongodb.lang.Nullable; + +import java.util.Collection; +import java.util.function.Function; +import java.util.function.Supplier; + +/** + *

Design by contract assertions.

+ * All {@code assert...} methods throw {@link AssertionError} and should be used to check conditions which may be violated if and only if + * the driver code is incorrect. The intended usage of this methods is the same as of the + * Java {@code assert} statement. The reason + * for not using the {@code assert} statements is that they are not always enabled. We prefer having internal checks always done at the + * cost of our code doing a relatively small amount of additional work in production. + * The {@code assert...} methods return values to open possibilities of being used fluently. + * + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public final class Assertions { + /** + * Throw IllegalArgumentException if the value is null. + * + * @param name the parameter name + * @param value the value that should not be null + * @param the value type + * @return the value + * @throws java.lang.IllegalArgumentException if value is null + */ + public static T notNull(final String name, final T value) { + if (value == null) { + throw new IllegalArgumentException(name + " can not be null"); + } + return value; + } + + /** + * Throw IllegalArgumentException if the values is null or contains null. + * + *

Note: If performance is a concern, consider deferring the integrity validation + * to the point of actual data iteration to avoid incurring additional reference chasing for collections of complex objects. + * However, if performance considerations are low and it is acceptable to iterate over the data twice, + * this method can still be used for validation purposes. + * + * @param name the parameter name. + * @param values the values that should not contain null elements. + * @param the type of elements in the collection. + * @return the input collection if it passes the null element validation. + * @throws java.lang.IllegalArgumentException if the input collection is null or contains null elements. + */ + public static Iterable notNullElements(final String name, final Iterable values) { + if (values == null) { + throw new IllegalArgumentException(name + " can not be null"); + } + + for (T value : values) { + if (value == null){ + throw new IllegalArgumentException(name + " can not contain null"); + } + } + + return values; + } + + /** + * Throw IllegalStateException if the condition if false. + * + * @param name the name of the state that is being checked + * @param condition the condition about the parameter to check + * @throws java.lang.IllegalStateException if the condition is false + */ + public static void isTrue(final String name, final boolean condition) { + if (!condition) { + throw new IllegalStateException("state should be: " + name); + } + } + + /** + * Throw IllegalArgumentException if the condition if false. + * + * @param name the name of the state that is being checked + * @param condition the condition about the parameter to check + * @throws java.lang.IllegalArgumentException if the condition is false + */ + public static void isTrueArgument(final String name, final boolean condition) { + if (!condition) { + throw new IllegalArgumentException("state should be: " + name); + } + } + + /** + * Throw IllegalArgumentException if the condition returns false. + * + * @param msg the error message if the condition returns false + * @param supplier the supplier of the value + * @param condition the condition function + * @return the supplied value if it meets the condition + * @param the type of the supplied value + */ + public static T isTrueArgument(final String msg, final Supplier supplier, final Function condition) { + T value = doesNotThrow(supplier); + if (!condition.apply(value)) { + throw new IllegalArgumentException(msg); + } + + return value; + } + + /** + * Throw IllegalArgumentException if the collection contains a null value. + * + * @param name the name of the collection + * @param collection the collection + * @throws java.lang.IllegalArgumentException if the collection contains a null value + */ + public static void doesNotContainNull(final String name, final Collection collection) { + // Use a loop instead of the contains method, as some implementations of that method will throw an exception if passed null as a + // parameter (in particular, lists returned by List.of methods) + for (Object o : collection) { + if (o == null) { + throw new IllegalArgumentException(name + " can not contain a null value"); + } + } + } + + /** + * @param value A value to check. + * @param The type of {@code value}. + * @return {@code null}. + * @throws AssertionError If {@code value} is not {@code null}. + */ + @Nullable + public static T assertNull(@Nullable final T value) throws AssertionError { + if (value != null) { + throw new AssertionError(value.toString()); + } + return null; + } + + /** + * @param value A value to check. + * @param The type of {@code value}. + * @return {@code value} + * @throws AssertionError If {@code value} is {@code null}. + */ + public static T assertNotNull(@Nullable final T value) throws AssertionError { + if (value == null) { + throw new AssertionError(); + } + return value; + } + + /** + * @param value A value to check. + * @return {@code true}. + * @throws AssertionError If {@code value} is {@code false}. + */ + public static boolean assertTrue(final boolean value) throws AssertionError { + if (!value) { + throw new AssertionError(); + } + return true; + } + + /** + * @param value A value to check. + * @param message The message. + * @return {@code true}. + * @throws AssertionError If {@code value} is {@code false}. + */ + public static boolean assertTrue(final boolean value, final String message) throws AssertionError { + if (!value) { + throw new AssertionError(message); + } + return true; + } + + /** + * @param value A value to check. + * @return {@code false}. + * @throws AssertionError If {@code value} is {@code true}. + */ + public static boolean assertFalse(final boolean value) throws AssertionError { + if (value) { + throw new AssertionError(); + } + return false; + } + + /** + * @throws AssertionError Always + * @return Never completes normally. The return type is {@link AssertionError} to allow writing {@code throw fail()}. + * This may be helpful in non-{@code void} methods. + */ + public static AssertionError fail() throws AssertionError { + throw new AssertionError(); + } + + /** + * @param msg The failure message. + * @throws AssertionError Always + * @return Never completes normally. The return type is {@link AssertionError} to allow writing {@code throw fail("failure message")}. + * This may be helpful in non-{@code void} methods. + */ + public static AssertionError fail(final String msg) throws AssertionError { + throw new AssertionError(assertNotNull(msg)); + } + + /** + * @param supplier the supplier to check + * @return {@code supplier.get()} + * @throws AssertionError If {@code supplier.get()} throws an exception + */ + public static T doesNotThrow(final Supplier supplier) throws AssertionError { + try { + return supplier.get(); + } catch (Exception e) { + throw new AssertionError(e.getMessage(), e); + } + } + + private Assertions() { + } +} diff --git a/driver-core/src/main/com/mongodb/assertions/package-info.java b/driver-core/src/main/com/mongodb/assertions/package-info.java new file mode 100644 index 00000000000..42b39ec55e0 --- /dev/null +++ b/driver-core/src/main/com/mongodb/assertions/package-info.java @@ -0,0 +1,20 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * Contains design by contract assertions + */ +package com.mongodb.assertions; diff --git a/driver-core/src/main/com/mongodb/bulk/BulkWriteError.java b/driver-core/src/main/com/mongodb/bulk/BulkWriteError.java new file mode 100644 index 00000000000..cf6a307166e --- /dev/null +++ b/driver-core/src/main/com/mongodb/bulk/BulkWriteError.java @@ -0,0 +1,86 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.bulk; + +import com.mongodb.WriteError; +import org.bson.BsonDocument; + +/** + * Represents an error for an item included in a bulk write operation, e.g. a duplicate key error + * + * @since 3.0 + */ +public class BulkWriteError extends WriteError { + private final int index; + + /** + * Constructs a new instance. + * + * @param code the error code + * @param message the error message + * @param details details about the error + * @param index the index of the item in the bulk write operation that had this error + */ + public BulkWriteError(final int code, final String message, final BsonDocument details, final int index) { + super(code, message, details); + this.index = index; + } + + /** + * The index of the item in the bulk write operation with this error. + * + * @return the index + */ + public int getIndex() { + return index; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + BulkWriteError that = (BulkWriteError) o; + + if (index != that.index) { + return false; + } + + return super.equals(that); + } + + @Override + public int hashCode() { + int result = super.hashCode(); + result = 31 * result + index; + return result; + } + + @Override + public String toString() { + return "BulkWriteError{" + + "index=" + index + + ", code=" + getCode() + + ", message='" + getMessage() + '\'' + + ", details=" + getDetails() + + '}'; + } +} diff --git a/driver-core/src/main/com/mongodb/bulk/BulkWriteInsert.java b/driver-core/src/main/com/mongodb/bulk/BulkWriteInsert.java new file mode 100644 index 00000000000..7b2fff7f871 --- /dev/null +++ b/driver-core/src/main/com/mongodb/bulk/BulkWriteInsert.java @@ -0,0 +1,85 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.bulk; + +import org.bson.BsonValue; + +import java.util.Objects; + +/** + * Represents an item in the bulk write that was inserted. + * + * @since 4.0 + */ +public class BulkWriteInsert { + private final int index; + private final BsonValue id; + + /** + * Construct an instance. + * + * @param index the index in the list of bulk write requests that the insert occurred in + * @param id the id of the document that was inserted as the result of the insert + */ + public BulkWriteInsert(final int index, final BsonValue id) { + this.index = index; + this.id = id; + } + + /** + * Gets the index of the inserted item based on the order it was added to the bulk write operation. + * + * @return the index + */ + public int getIndex() { + return index; + } + + /** + * Gets the id of the inserted item. + * + * @return the id + */ + public BsonValue getId() { + return id; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + BulkWriteInsert that = (BulkWriteInsert) o; + return index == that.index && Objects.equals(id, that.id); + } + + @Override + public int hashCode() { + return Objects.hash(index, id); + } + + @Override + public String toString() { + return "BulkWriteInsert{" + + "index=" + index + + ", id=" + id + + '}'; + } +} diff --git a/driver-core/src/main/com/mongodb/bulk/BulkWriteResult.java b/driver-core/src/main/com/mongodb/bulk/BulkWriteResult.java new file mode 100644 index 00000000000..d42c0252a8c --- /dev/null +++ b/driver-core/src/main/com/mongodb/bulk/BulkWriteResult.java @@ -0,0 +1,301 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.bulk; + +import com.mongodb.internal.bulk.WriteRequest; + +import java.util.List; + +import static com.mongodb.assertions.Assertions.assertNotNull; +import static java.util.Collections.unmodifiableList; + +/** + * The result of a successful bulk write operation. + * + * @since 3.0 + */ +public abstract class BulkWriteResult { + + /** + * Returns true if the write was acknowledged. + * + * @return true if the write was acknowledged + * @see com.mongodb.WriteConcern#UNACKNOWLEDGED + */ + public abstract boolean wasAcknowledged(); + + /** + * Returns the number of documents inserted by the write operation. + * + * @return the number of documents inserted by the write operation + * @throws java.lang.UnsupportedOperationException if the write was unacknowledged. + * @see com.mongodb.WriteConcern#UNACKNOWLEDGED + */ + public abstract int getInsertedCount(); + + /** + * Returns the number of documents matched by updates or replacements in the write operation. This will include documents that matched + * the query but where the modification didn't result in any actual change to the document; for example, if you set the value of some + * field, and the field already has that value, that will still count as an update. + * + * @return the number of documents matched by updates in the write operation + * @throws java.lang.UnsupportedOperationException if the write was unacknowledged. + * @see com.mongodb.WriteConcern#UNACKNOWLEDGED + */ + public abstract int getMatchedCount(); + + /** + * Returns the number of documents deleted by the write operation. + * + * @return the number of documents deleted by the write operation + * @throws java.lang.UnsupportedOperationException if the write was unacknowledged. + * @see com.mongodb.WriteConcern#UNACKNOWLEDGED + */ + public abstract int getDeletedCount(); + + /** + * Returns the number of documents modified by the write operation. This only applies to updates or replacements, and will only count + * documents that were actually changed; for example, if you set the value of some field , and the field already has that value, that + * will not count as a modification. + * + * @return the number of documents modified by the write operation + * @see com.mongodb.WriteConcern#UNACKNOWLEDGED + */ + public abstract int getModifiedCount(); + + /** + * Gets an unmodifiable list of inserted items, or the empty list if there were none. + * + * @return a list of inserted items, or the empty list if there were none. + * @throws java.lang.UnsupportedOperationException if the write was unacknowledged. + * @see com.mongodb.WriteConcern#UNACKNOWLEDGED + * @since 4.0 + */ + public abstract List getInserts(); + + /** + * Gets an unmodifiable list of upserted items, or the empty list if there were none. + * + * @return a list of upserted items, or the empty list if there were none. + * @throws java.lang.UnsupportedOperationException if the write was unacknowledged. + * @see com.mongodb.WriteConcern#UNACKNOWLEDGED + */ + public abstract List getUpserts(); + + /** + * Create an acknowledged BulkWriteResult + * + * @param type the type of the write + * @param count the number of documents matched + * @param modifiedCount the number of documents modified, which may be null if the server was not able to provide the count + * @param upserts the list of upserts + * @param inserts the list of inserts + * @return an acknowledged BulkWriteResult + * @since 4.0 + */ + public static BulkWriteResult acknowledged(final WriteRequest.Type type, final int count, final Integer modifiedCount, + final List upserts, final List inserts) { + return acknowledged(type == WriteRequest.Type.INSERT ? count : 0, + (type == WriteRequest.Type.UPDATE || type == WriteRequest.Type.REPLACE) ? count : 0, + type == WriteRequest.Type.DELETE ? count : 0, + modifiedCount, upserts, inserts); + } + + /** + * Create an acknowledged BulkWriteResult + * + * @param insertedCount the number of documents inserted by the write operation + * @param matchedCount the number of documents matched by the write operation + * @param removedCount the number of documents removed by the write operation + * @param modifiedCount the number of documents modified, which may not be null + * @param upserts the list of upserts + * @param inserts the list of inserts + * @return an acknowledged BulkWriteResult + * @since 4.0 + */ + public static BulkWriteResult acknowledged(final int insertedCount, final int matchedCount, final int removedCount, + final Integer modifiedCount, final List upserts, + final List inserts) { + return new BulkWriteResult() { + @Override + public boolean wasAcknowledged() { + return true; + } + + @Override + public int getInsertedCount() { + return insertedCount; + } + + @Override + public int getMatchedCount() { + return matchedCount; + } + + @Override + public int getDeletedCount() { + return removedCount; + } + + @Override + public int getModifiedCount() { + return assertNotNull(modifiedCount); + } + + @Override + public List getInserts() { + return unmodifiableList(inserts); + } + + @Override + public List getUpserts() { + return unmodifiableList(upserts); + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + BulkWriteResult that = (BulkWriteResult) o; + + if (!that.wasAcknowledged()) { + return false; + } + if (insertedCount != that.getInsertedCount()) { + return false; + } + if (!modifiedCount.equals(that.getModifiedCount())) { + return false; + } + if (removedCount != that.getDeletedCount()) { + return false; + } + if (matchedCount != that.getMatchedCount()) { + return false; + } + if (!upserts.equals(that.getUpserts())) { + return false; + } + if (!inserts.equals(that.getInserts())) { + return false; + } + + return true; + } + + @Override + public int hashCode() { + int result = upserts.hashCode(); + result = 31 * result + inserts.hashCode(); + result = 31 * result + insertedCount; + result = 31 * result + matchedCount; + result = 31 * result + removedCount; + result = 31 * result + modifiedCount.hashCode(); + return result; + } + + @Override + public String toString() { + return "AcknowledgedBulkWriteResult{" + + "insertedCount=" + insertedCount + + ", matchedCount=" + matchedCount + + ", removedCount=" + removedCount + + ", modifiedCount=" + modifiedCount + + ", upserts=" + upserts + + ", inserts=" + inserts + + '}'; + } + }; + } + + /** + * Create an unacknowledged BulkWriteResult + * + * @return an unacknowledged BulkWriteResult + */ + public static BulkWriteResult unacknowledged() { + return new BulkWriteResult() { + @Override + public boolean wasAcknowledged() { + return false; + } + + @Override + public int getInsertedCount() { + throw getUnacknowledgedWriteException(); + } + + @Override + public int getMatchedCount() { + throw getUnacknowledgedWriteException(); + } + + @Override + public int getDeletedCount() { + throw getUnacknowledgedWriteException(); + } + + @Override + public int getModifiedCount() { + throw getUnacknowledgedWriteException(); + } + + @Override + public List getInserts() { + throw getUnacknowledgedWriteException(); + } + + @Override + public List getUpserts() { + throw getUnacknowledgedWriteException(); + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + BulkWriteResult that = (BulkWriteResult) o; + return !that.wasAcknowledged(); + } + + @Override + public int hashCode() { + return 0; + } + + @Override + public String toString() { + return "UnacknowledgedBulkWriteResult{}"; + } + + private UnsupportedOperationException getUnacknowledgedWriteException() { + return new UnsupportedOperationException("Cannot get information about an unacknowledged write"); + } + }; + } + +} diff --git a/driver-core/src/main/com/mongodb/bulk/BulkWriteUpsert.java b/driver-core/src/main/com/mongodb/bulk/BulkWriteUpsert.java new file mode 100644 index 00000000000..a2f876b465a --- /dev/null +++ b/driver-core/src/main/com/mongodb/bulk/BulkWriteUpsert.java @@ -0,0 +1,94 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.bulk; + +import org.bson.BsonValue; + +/** + * Represents an item in the bulk write that was upserted. + * + * @since 3.0 + */ +public class BulkWriteUpsert { + private final int index; + private final BsonValue id; + + /** + * Construct an instance. + * + * @param index the index in the list of bulk write requests that the upsert occurred in + * @param id the id of the document that was inserted as the result of the upsert + */ + public BulkWriteUpsert(final int index, final BsonValue id) { + this.index = index; + this.id = id; + } + + /** + * Gets the index of the upserted item based on the order it was added to the bulk write operation. + * + * @return the index + */ + public int getIndex() { + return index; + } + + /** + * Gets the id of the upserted item. + * + * @return the id + */ + public BsonValue getId() { + return id; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + BulkWriteUpsert that = (BulkWriteUpsert) o; + + if (index != that.index) { + return false; + } + if (!id.equals(that.id)) { + return false; + } + + return true; + } + + @Override + public int hashCode() { + int result = index; + result = 31 * result + id.hashCode(); + return result; + } + + @Override + public String toString() { + return "BulkWriteUpsert{" + + "index=" + index + + ", id=" + id + + '}'; + } +} diff --git a/driver-core/src/main/com/mongodb/bulk/WriteConcernError.java b/driver-core/src/main/com/mongodb/bulk/WriteConcernError.java new file mode 100644 index 00000000000..682922430bd --- /dev/null +++ b/driver-core/src/main/com/mongodb/bulk/WriteConcernError.java @@ -0,0 +1,133 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.bulk; + +import org.bson.BsonDocument; + +import static com.mongodb.assertions.Assertions.notNull; + +/** + * An error representing a failure by the server to apply the requested write concern to the bulk operation. + * + * @mongodb.driver.manual core/write-concern/ Write Concern + * @since 3.0 + */ +public class WriteConcernError { + private final int code; + private final String codeName; + private final String message; + private final BsonDocument details; + + /** + * Constructs a new instance. + * + * @param code the error code + * @param codeName the error code name + * @param message the error message + * @param details any details + */ + public WriteConcernError(final int code, final String codeName, final String message, final BsonDocument details) { + this.code = code; + this.codeName = notNull("codeName", codeName); + this.message = notNull("message", message); + this.details = notNull("details", details); + } + + /** + * Gets the code associated with this error. + * + * @return the code + */ + public int getCode() { + return code; + } + + /** + * Gets the name associated with the error code. + * + * @return the error code name, which may be the empty string + * @since 3.8 + * @mongodb.server.release 3.4 + */ + public String getCodeName() { + return codeName; + } + + /** + * Gets the message associated with this error. + * + * @return the message + */ + public String getMessage() { + return message; + } + + /** + * Gets the details associated with this error. This document will not be null, but may be empty. + * + * @return the details + */ + public BsonDocument getDetails() { + return details; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + WriteConcernError that = (WriteConcernError) o; + + if (code != that.code) { + return false; + } + if (!codeName.equals(that.codeName)) { + return false; + } + if (!details.equals(that.details)) { + return false; + } + if (!message.equals(that.message)) { + return false; + } + + return true; + } + + @Override + public int hashCode() { + int result = code; + result = 31 * result + codeName.hashCode(); + result = 31 * result + message.hashCode(); + result = 31 * result + details.hashCode(); + return result; + } + + @Override + public String toString() { + return "WriteConcernError{" + + "code=" + code + + ", codeName='" + codeName + '\'' + + ", message='" + message + '\'' + + ", details=" + details + + '}'; + } +} diff --git a/driver-core/src/main/com/mongodb/bulk/package-info.java b/driver-core/src/main/com/mongodb/bulk/package-info.java new file mode 100644 index 00000000000..2c741bdaccc --- /dev/null +++ b/driver-core/src/main/com/mongodb/bulk/package-info.java @@ -0,0 +1,23 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * Contains classes for representing the result of a bulk write operation. + */ +@NonNullApi +package com.mongodb.bulk; + +import com.mongodb.lang.NonNullApi; diff --git a/driver-core/src/main/com/mongodb/client/cursor/TimeoutMode.java b/driver-core/src/main/com/mongodb/client/cursor/TimeoutMode.java new file mode 100644 index 00000000000..cdaa92d4923 --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/cursor/TimeoutMode.java @@ -0,0 +1,44 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.client.cursor; + +import com.mongodb.annotations.Alpha; +import com.mongodb.annotations.Reason; + +import java.util.concurrent.TimeUnit; + +/** + * The timeout mode for a cursor + * + *

For operations that create cursors, {@code timeoutMS} can either cap the lifetime of the cursor or be applied separately to the + * original operation and all next calls. + *

+ * @see com.mongodb.MongoClientSettings#getTimeout(TimeUnit) + * @since 5.2 + */ +@Alpha(Reason.CLIENT) +public enum TimeoutMode { + + /** + * The timeout lasts for the lifetime of the cursor + */ + CURSOR_LIFETIME, + + /** + * The timeout is reset for each batch iteration of the cursor + */ + ITERATION +} diff --git a/driver-core/src/main/com/mongodb/client/cursor/package-info.java b/driver-core/src/main/com/mongodb/client/cursor/package-info.java new file mode 100644 index 00000000000..ea907688087 --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/cursor/package-info.java @@ -0,0 +1,23 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * This package contains models and options that help describe MongoCollection operations + */ +@NonNullApi +package com.mongodb.client.cursor; + +import com.mongodb.lang.NonNullApi; diff --git a/driver-core/src/main/com/mongodb/client/gridfs/codecs/GridFSFileCodec.java b/driver-core/src/main/com/mongodb/client/gridfs/codecs/GridFSFileCodec.java new file mode 100644 index 00000000000..38e70ddd00f --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/gridfs/codecs/GridFSFileCodec.java @@ -0,0 +1,107 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.gridfs.codecs; + +import com.mongodb.client.gridfs.model.GridFSFile; +import com.mongodb.lang.Nullable; +import org.bson.BsonDateTime; +import org.bson.BsonDocument; +import org.bson.BsonDocumentReader; +import org.bson.BsonDocumentWrapper; +import org.bson.BsonInt32; +import org.bson.BsonInt64; +import org.bson.BsonReader; +import org.bson.BsonString; +import org.bson.BsonValue; +import org.bson.BsonWriter; +import org.bson.Document; +import org.bson.codecs.Codec; +import org.bson.codecs.DecoderContext; +import org.bson.codecs.EncoderContext; +import org.bson.codecs.configuration.CodecRegistry; + +import java.util.Date; + +import static com.mongodb.assertions.Assertions.notNull; + + +/** + * A codec for GridFS Files + * + * @since 3.3 + */ +public final class GridFSFileCodec implements Codec { + private final Codec documentCodec; + private final Codec bsonDocumentCodec; + + /** + * Create a new instance + * + * @param registry the codec registry + */ + public GridFSFileCodec(final CodecRegistry registry) { + this.documentCodec = notNull("DocumentCodec", notNull("registry", registry).get(Document.class)); + this.bsonDocumentCodec = notNull("BsonDocumentCodec", registry.get(BsonDocument.class)); + } + + @Override + public GridFSFile decode(final BsonReader reader, final DecoderContext decoderContext) { + BsonDocument bsonDocument = bsonDocumentCodec.decode(reader, decoderContext); + + BsonValue id = bsonDocument.get("_id"); + String filename = bsonDocument.get("filename", new BsonString("")).asString().getValue(); + long length = bsonDocument.getNumber("length").longValue(); + int chunkSize = bsonDocument.getNumber("chunkSize").intValue(); + Date uploadDate = new Date(bsonDocument.getDateTime("uploadDate").getValue()); + BsonDocument metadataBsonDocument = bsonDocument.getDocument("metadata", new BsonDocument()); + + Document optionalMetadata = asDocumentOrNull(metadataBsonDocument); + + return new GridFSFile(id, filename, length, chunkSize, uploadDate, optionalMetadata); + } + + @Override + public void encode(final BsonWriter writer, final GridFSFile value, final EncoderContext encoderContext) { + BsonDocument bsonDocument = new BsonDocument(); + bsonDocument.put("_id", value.getId()); + bsonDocument.put("filename", new BsonString(value.getFilename())); + bsonDocument.put("length", new BsonInt64(value.getLength())); + bsonDocument.put("chunkSize", new BsonInt32(value.getChunkSize())); + bsonDocument.put("uploadDate", new BsonDateTime(value.getUploadDate().getTime())); + + Document metadata = value.getMetadata(); + if (metadata != null) { + bsonDocument.put("metadata", new BsonDocumentWrapper<>(metadata, documentCodec)); + } + bsonDocumentCodec.encode(writer, bsonDocument, encoderContext); + } + + @Override + public Class getEncoderClass() { + return GridFSFile.class; + } + + @Nullable + private Document asDocumentOrNull(final BsonDocument bsonDocument) { + if (bsonDocument.isEmpty()) { + return null; + } else { + BsonDocumentReader reader = new BsonDocumentReader(bsonDocument); + return documentCodec.decode(reader, DecoderContext.builder().build()); + } + } +} diff --git a/driver-core/src/main/com/mongodb/client/gridfs/codecs/GridFSFileCodecProvider.java b/driver-core/src/main/com/mongodb/client/gridfs/codecs/GridFSFileCodecProvider.java new file mode 100644 index 00000000000..1d65b2621d6 --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/gridfs/codecs/GridFSFileCodecProvider.java @@ -0,0 +1,45 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.gridfs.codecs; + +import com.mongodb.client.gridfs.model.GridFSFile; +import org.bson.codecs.Codec; +import org.bson.codecs.configuration.CodecProvider; +import org.bson.codecs.configuration.CodecRegistry; + +/** + * A provider of codecs for {@link GridFSFile}s. + * + * @since 3.3 + */ +public final class GridFSFileCodecProvider implements CodecProvider { + + @Override + @SuppressWarnings("unchecked") + public Codec get(final Class clazz, final CodecRegistry registry) { + if (clazz.equals(GridFSFile.class)) { + return (Codec) new GridFSFileCodec(registry); + } else { + return null; + } + } + + @Override + public String toString() { + return "GridFSFileCodecProvider{}"; + } +} diff --git a/driver-core/src/main/com/mongodb/client/gridfs/codecs/package-info.java b/driver-core/src/main/com/mongodb/client/gridfs/codecs/package-info.java new file mode 100644 index 00000000000..379c6a5d889 --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/gridfs/codecs/package-info.java @@ -0,0 +1,25 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * This package contains codecs for use with GridFS + * + * @since 3.3 + */ +@NonNullApi +package com.mongodb.client.gridfs.codecs; + +import com.mongodb.lang.NonNullApi; diff --git a/driver-core/src/main/com/mongodb/client/gridfs/model/GridFSDownloadOptions.java b/driver-core/src/main/com/mongodb/client/gridfs/model/GridFSDownloadOptions.java new file mode 100644 index 00000000000..eace8fa732f --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/gridfs/model/GridFSDownloadOptions.java @@ -0,0 +1,68 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.gridfs.model; + +/** + * The GridFS download by name options + * + *

Controls the selection of the revision to download

+ * + * @since 3.3 + */ +public final class GridFSDownloadOptions { + private int revision; + + /** + * Download the most recent version of the file. + * + *

Defaults to the most recent revision.

+ */ + public GridFSDownloadOptions() { + revision = -1; + } + + /** + * Set the revision of the file to retrieve. + * + *

Revision numbers are defined as follows:

+ *
    + *
  • 0 = the original stored file
  • + *
  • 1 = the first revision
  • + *
  • 2 = the second revision
  • + *
  • etc..
  • + *
  • -2 = the second most recent revision
  • + *
  • -1 = the most recent revision
  • + *
+ * + * + * @param revision the file revision to download + * @return this + */ + public GridFSDownloadOptions revision(final int revision) { + this.revision = revision; + return this; + } + + /** + * Gets the revision to download identifier + * + * @return the revision to download identifier + */ + public int getRevision() { + return revision; + } +} diff --git a/driver-core/src/main/com/mongodb/client/gridfs/model/GridFSFile.java b/driver-core/src/main/com/mongodb/client/gridfs/model/GridFSFile.java new file mode 100644 index 00000000000..e98ced50377 --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/gridfs/model/GridFSFile.java @@ -0,0 +1,188 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.gridfs.model; + +import com.mongodb.MongoGridFSException; +import com.mongodb.lang.Nullable; +import org.bson.BsonValue; +import org.bson.Document; +import org.bson.types.ObjectId; + +import java.util.Date; +import java.util.Objects; + +import static com.mongodb.assertions.Assertions.notNull; + +/** + * The GridFSFile + * + * @since 3.1 + */ +public final class GridFSFile { + private final BsonValue id; + private final String filename; + private final long length; + private final int chunkSize; + private final Date uploadDate; + + // Optional values + private final Document metadata; + + /** + * Creates a new GridFSFile + * + * @param id the id of the file + * @param filename the filename + * @param length the length, in bytes of the file + * @param chunkSize the chunkSize, in bytes of the file + * @param uploadDate the upload date of the file + * @param metadata the optional metadata for the file + */ + public GridFSFile(final BsonValue id, final String filename, final long length, final int chunkSize, final Date uploadDate, + @Nullable final Document metadata) { + this.id = notNull("id", id); + this.filename = notNull("filename", filename); + this.length = notNull("length", length); + this.chunkSize = notNull("chunkSize", chunkSize); + this.uploadDate = notNull("uploadDate", uploadDate); + this.metadata = metadata != null && metadata.isEmpty() ? null : metadata; + } + + /** + * The {@link ObjectId} for this file. + *

+ * Throws a MongoGridFSException if the file id is not an ObjectId. + * + * @return the id for this file. + */ + public ObjectId getObjectId() { + if (!id.isObjectId()) { + throw new MongoGridFSException("Custom id type used for this GridFS file"); + } + return id.asObjectId().getValue(); + } + + /** + * The {@link BsonValue} id for this file. + * + * @return the id for this file + */ + public BsonValue getId() { + return id; + } + + /** + * The filename + * + * @return the filename + */ + public String getFilename() { + return filename; + } + + /** + * The length, in bytes of this file + * + * @return the length, in bytes of this file + */ + public long getLength() { + return length; + } + + /** + * The size, in bytes, of each data chunk of this file + * + * @return the size, in bytes, of each data chunk of this file + */ + public int getChunkSize() { + return chunkSize; + } + + /** + * The date and time this file was added to GridFS + * + * @return the date and time this file was added to GridFS + */ + public Date getUploadDate() { + return uploadDate; + } + + /** + * Any additional metadata stored along with the file + * + * @return the metadata document or null + */ + @Nullable + public Document getMetadata() { + return metadata; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + GridFSFile that = (GridFSFile) o; + + if (!Objects.equals(id, that.id)) { + return false; + } + if (!filename.equals(that.filename)) { + return false; + } + if (length != that.length) { + return false; + } + if (chunkSize != that.chunkSize) { + return false; + } + if (!uploadDate.equals(that.uploadDate)) { + return false; + } + if (!Objects.equals(metadata, that.metadata)) { + return false; + } + return true; + } + + @Override + public int hashCode() { + int result = id != null ? id.hashCode() : 0; + result = 31 * result + filename.hashCode(); + result = 31 * result + (int) (length ^ (length >>> 32)); + result = 31 * result + chunkSize; + result = 31 * result + uploadDate.hashCode(); + result = 31 * result + (metadata != null ? metadata.hashCode() : 0); + return result; + } + + @Override + public String toString() { + return "GridFSFile{" + + "id=" + id + + ", filename='" + filename + '\'' + + ", length=" + length + + ", chunkSize=" + chunkSize + + ", uploadDate=" + uploadDate + + ", metadata=" + metadata + + '}'; + } +} diff --git a/driver-core/src/main/com/mongodb/client/gridfs/model/GridFSUploadOptions.java b/driver-core/src/main/com/mongodb/client/gridfs/model/GridFSUploadOptions.java new file mode 100644 index 00000000000..7478c20ecfa --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/gridfs/model/GridFSUploadOptions.java @@ -0,0 +1,83 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.gridfs.model; + +import com.mongodb.lang.Nullable; +import org.bson.Document; + +/** + * GridFS upload options + *

+ * Customizable options used when uploading files into GridFS + * + * @since 3.1 + */ +public final class GridFSUploadOptions { + private Integer chunkSizeBytes; + private Document metadata; + + /** + * Construct a new instance. + */ + public GridFSUploadOptions() { + } + + /** + * The number of bytes per chunk of this file. + * + *

If no value has been set then, the chunkSizeBytes from the GridFSBucket will be used.

+ * + * @return number of bytes per chunk if set or null + */ + @Nullable + public Integer getChunkSizeBytes() { + return chunkSizeBytes; + } + + /** + * Sets the chunk size in bytes. + * + * @param chunkSizeBytes the number of bytes per chunk for the uploaded file + * @return this + */ + public GridFSUploadOptions chunkSizeBytes(@Nullable final Integer chunkSizeBytes) { + this.chunkSizeBytes = chunkSizeBytes; + return this; + } + + /** + * Returns any user provided data for the 'metadata' field of the files collection document. + * + * @return the user provided metadata for the file if set or null + */ + @Nullable + public Document getMetadata() { + return metadata; + } + + /** + * Sets metadata to stored alongside the filename in the files collection + * + * @param metadata the metadata to be stored + * @return this + */ + public GridFSUploadOptions metadata(@Nullable final Document metadata) { + this.metadata = metadata; + return this; + } + +} diff --git a/driver-core/src/main/com/mongodb/client/gridfs/model/package-info.java b/driver-core/src/main/com/mongodb/client/gridfs/model/package-info.java new file mode 100644 index 00000000000..4c6da8813cc --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/gridfs/model/package-info.java @@ -0,0 +1,25 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * This package contains models for use with GridFS + * + * @since 3.1 + */ +@NonNullApi +package com.mongodb.client.gridfs.model; + +import com.mongodb.lang.NonNullApi; diff --git a/driver-core/src/main/com/mongodb/client/model/Accumulators.java b/driver-core/src/main/com/mongodb/client/model/Accumulators.java new file mode 100644 index 00000000000..e6752976613 --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/Accumulators.java @@ -0,0 +1,571 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model; + +import com.mongodb.lang.Nullable; +import org.bson.BsonArray; +import org.bson.BsonDocument; +import org.bson.BsonString; +import org.bson.Document; +import org.bson.conversions.Bson; + +import java.util.List; + +import static java.util.stream.Collectors.toList; +import static org.bson.assertions.Assertions.notNull; + +/** + * Builders for accumulators used in the group pipeline stage of an aggregation pipeline. + * + * @mongodb.driver.manual core/aggregation-pipeline/ Aggregation pipeline + * @mongodb.driver.manual reference/operator/aggregation/group/#accumulator-operator Accumulators + * @mongodb.driver.manual meta/aggregation-quick-reference/#aggregation-expressions Expressions + * @mongodb.server.release 2.2 + * @since 3.1 + */ +public final class Accumulators { + + /** + * Gets a field name for a $group operation representing the sum of the values of the given expression when applied to all members of + * the group. + * + * @param fieldName the field name + * @param expression the expression + * @param the expression type + * @return the field + * @mongodb.driver.manual reference/operator/aggregation/sum/ $sum + */ + public static BsonField sum(final String fieldName, final TExpression expression) { + return accumulatorOperator("$sum", fieldName, expression); + } + + /** + * Gets a field name for a $group operation representing the average of the values of the given expression when applied to all + * members of the group. + * + * @param fieldName the field name + * @param expression the expression + * @param the expression type + * @return the field + * @mongodb.driver.manual reference/operator/aggregation/avg/ $avg + */ + public static BsonField avg(final String fieldName, final TExpression expression) { + return accumulatorOperator("$avg", fieldName, expression); + } + + /** + * Returns a combination of a computed field and an accumulator that generates a BSON {@link org.bson.BsonType#ARRAY Array} + * containing computed values from the given {@code inExpression} based on the provided {@code pExpression}, which represents an array + * of percentiles of interest within a group, where each element is a numeric value between 0.0 and 1.0 (inclusive). + * + * @param fieldName The field computed by the accumulator. + * @param inExpression The input expression. + * @param pExpression The expression representing a percentiles of interest. + * @param method The method to be used for computing the percentiles. + * @param The type of the input expression. + * @param The type of the percentile expression. + * @return The requested {@link BsonField}. + * @mongodb.driver.manual reference/operator/aggregation/percentile/ $percentile + * @since 4.10 + * @mongodb.server.release 7.0 + */ + public static BsonField percentile(final String fieldName, final InExpression inExpression, + final PExpression pExpression, final QuantileMethod method) { + notNull("fieldName", fieldName); + notNull("inExpression", inExpression); + notNull("pExpression", inExpression); + notNull("method", method); + return quantileAccumulator("$percentile", fieldName, inExpression, pExpression, method); + } + + /** + * Returns a combination of a computed field and an accumulator that generates a BSON {@link org.bson.BsonType#DOUBLE Double } + * representing the median value computed from the given {@code inExpression} within a group. + * + * @param fieldName The field computed by the accumulator. + * @param inExpression The input expression. + * @param method The method to be used for computing the median. + * @param The type of the input expression. + * @return The requested {@link BsonField}. + * @mongodb.driver.manual reference/operator/aggregation/median/ $median + * @since 4.10 + * @mongodb.server.release 7.0 + */ + public static BsonField median(final String fieldName, final InExpression inExpression, final QuantileMethod method) { + notNull("fieldName", fieldName); + notNull("inExpression", inExpression); + notNull("method", method); + return quantileAccumulator("$median", fieldName, inExpression, null, method); + } + + /** + * Gets a field name for a $group operation representing the value of the given expression when applied to the first member of + * the group. + * + * @param fieldName the field name + * @param expression the expression + * @param the expression type + * @return the field + * @mongodb.driver.manual reference/operator/aggregation/first/ $first + */ + public static BsonField first(final String fieldName, final TExpression expression) { + return accumulatorOperator("$first", fieldName, expression); + } + + /** + * Returns a combination of a computed field and an accumulator that produces a BSON {@link org.bson.BsonType#ARRAY Array} + * of values of the given {@code inExpression} computed for the first {@code N} elements within a presorted group, + * where {@code N} is the positive integral value of the {@code nExpression}. + * + * @param fieldName The field computed by the accumulator. + * @param inExpression The input expression. + * @param nExpression The expression limiting the number of produced values. + * @param The type of the input expression. + * @param The type of the limiting expression. + * @return The requested {@link BsonField}. + * @mongodb.driver.manual reference/operator/aggregation/firstN/ $firstN + * @since 4.7 + * @mongodb.server.release 5.2 + */ + public static BsonField firstN( + final String fieldName, final InExpression inExpression, final NExpression nExpression) { + return pickNAccumulator(notNull("fieldName", fieldName), "$firstN", + notNull("inExpression", inExpression), notNull("nExpression", nExpression)); + } + + /** + * Returns a combination of a computed field and an accumulator that produces + * a value of the given {@code outExpression} computed for the top element within a group + * sorted according to the provided {@code sortBy} specification. + * + * @param fieldName The field computed by the accumulator. + * @param sortBy The {@linkplain Sorts sort specification}. The syntax is identical to the one expected by {@link Aggregates#sort(Bson)}. + * @param outExpression The output expression. + * @param The type of the output expression. + * @return The requested {@link BsonField}. + * @mongodb.driver.manual reference/operator/aggregation/top/ $top + * @since 4.7 + * @mongodb.server.release 5.2 + */ + public static BsonField top(final String fieldName, final Bson sortBy, final OutExpression outExpression) { + return sortingPickAccumulator(notNull("fieldName", fieldName), "$top", + notNull("sortBy", sortBy), notNull("outExpression", outExpression)); + } + + /** + * Returns a combination of a computed field and an accumulator that produces a BSON {@link org.bson.BsonType#ARRAY Array} + * of values of the given {@code outExpression} computed for the top {@code N} elements within a group + * sorted according to the provided {@code sortBy} specification, + * where {@code N} is the positive integral value of the {@code nExpression}. + * + * @param fieldName The field computed by the accumulator. + * @param sortBy The {@linkplain Sorts sort specification}. The syntax is identical to the one expected by {@link Aggregates#sort(Bson)}. + * @param outExpression The output expression. + * @param nExpression The expression limiting the number of produced values. + * @param The type of the output expression. + * @param The type of the limiting expression. + * @return The requested {@link BsonField}. + * @mongodb.driver.manual reference/operator/aggregation/topN/ $topN + * @since 4.7 + * @mongodb.server.release 5.2 + */ + public static BsonField topN( + final String fieldName, final Bson sortBy, final OutExpression outExpression, final NExpression nExpression) { + return sortingPickNAccumulator(notNull("fieldName", fieldName), "$topN", + notNull("sortBy", sortBy), notNull("outExpression", outExpression), notNull("nExpression", nExpression)); + } + + /** + * Gets a field name for a $group operation representing the value of the given expression when applied to the last member of + * the group. + * + * @param fieldName the field name + * @param expression the expression + * @param the expression type + * @return the field + * @mongodb.driver.manual reference/operator/aggregation/last/ $last + */ + public static BsonField last(final String fieldName, final TExpression expression) { + return accumulatorOperator("$last", fieldName, expression); + } + + /** + * Returns a combination of a computed field and an accumulator that produces a BSON {@link org.bson.BsonType#ARRAY Array} + * of values of the given {@code inExpression} computed for the last {@code N} elements within a presorted group, + * where {@code N} is the positive integral value of the {@code nExpression}. + * + * @param fieldName The field computed by the accumulator. + * @param inExpression The input expression. + * @param nExpression The expression limiting the number of produced values. + * @param The type of the input expression. + * @param The type of the limiting expression. + * @return The requested {@link BsonField}. + * @mongodb.driver.manual reference/operator/aggregation/lastN/ $lastN + * @since 4.7 + * @mongodb.server.release 5.2 + */ + public static BsonField lastN( + final String fieldName, final InExpression inExpression, final NExpression nExpression) { + return pickNAccumulator(notNull("fieldName", fieldName), "$lastN", + notNull("inExpression", inExpression), notNull("nExpression", nExpression)); + } + + /** + * Returns a combination of a computed field and an accumulator that produces + * a value of the given {@code outExpression} computed for the bottom element within a group + * sorted according to the provided {@code sortBy} specification. + * + * @param fieldName The field computed by the accumulator. + * @param sortBy The {@linkplain Sorts sort specification}. The syntax is identical to the one expected by {@link Aggregates#sort(Bson)}. + * @param outExpression The output expression. + * @param The type of the output expression. + * @return The requested {@link BsonField}. + * @mongodb.driver.manual reference/operator/aggregation/bottom/ $bottom + * @since 4.7 + * @mongodb.server.release 5.2 + */ + public static BsonField bottom(final String fieldName, final Bson sortBy, final OutExpression outExpression) { + return sortingPickAccumulator(notNull("fieldName", fieldName), "$bottom", + notNull("sortBy", sortBy), notNull("outExpression", outExpression)); + } + + /** + * Returns a combination of a computed field and an accumulator that produces a BSON {@link org.bson.BsonType#ARRAY Array} + * of values of the given {@code outExpression} computed for the bottom {@code N} elements within a group + * sorted according to the provided {@code sortBy} specification, + * where {@code N} is the positive integral value of the {@code nExpression}. + * + * @param fieldName The field computed by the accumulator. + * @param sortBy The {@linkplain Sorts sort specification}. The syntax is identical to the one expected by {@link Aggregates#sort(Bson)}. + * @param outExpression The output expression. + * @param nExpression The expression limiting the number of produced values. + * @param The type of the output expression. + * @param The type of the limiting expression. + * @return The requested {@link BsonField}. + * @mongodb.driver.manual reference/operator/aggregation/bottomN/ $bottomN + * @since 4.7 + * @mongodb.server.release 5.2 + */ + public static BsonField bottomN( + final String fieldName, final Bson sortBy, final OutExpression outExpression, final NExpression nExpression) { + return sortingPickNAccumulator(notNull("fieldName", fieldName), "$bottomN", + notNull("sortBy", sortBy), notNull("outExpression", outExpression), notNull("nExpression", nExpression)); + } + + /** + * Gets a field name for a $group operation representing the maximum of the values of the given expression when applied to all + * members of the group. + * + * @param fieldName the field name + * @param expression the expression + * @param the expression type + * @return the field + * @mongodb.driver.manual reference/operator/aggregation/max/ $max + */ + public static BsonField max(final String fieldName, final TExpression expression) { + return accumulatorOperator("$max", fieldName, expression); + } + + /** + * Returns a combination of a computed field and an accumulator that produces a BSON {@link org.bson.BsonType#ARRAY Array} + * of {@code N} largest values of the given {@code inExpression}, + * where {@code N} is the positive integral value of the {@code nExpression}. + * + * @param fieldName The field computed by the accumulator. + * @param inExpression The input expression. + * @param nExpression The expression limiting the number of produced values. + * @param The type of the input expression. + * @param The type of the limiting expression. + * @return The requested {@link BsonField}. + * @mongodb.driver.manual reference/operator/aggregation/maxN/ $maxN + * @since 4.7 + * @mongodb.server.release 5.2 + */ + public static BsonField maxN( + final String fieldName, final InExpression inExpression, final NExpression nExpression) { + return pickNAccumulator(notNull("fieldName", fieldName), "$maxN", + notNull("inExpression", inExpression), notNull("nExpression", nExpression)); + } + + /** + * Gets a field name for a $group operation representing the minimum of the values of the given expression when applied to all + * members of the group. + * + * @param fieldName the field name + * @param expression the expression + * @param the expression type + * @return the field + * @mongodb.driver.manual reference/operator/aggregation/min/ $min + */ + public static BsonField min(final String fieldName, final TExpression expression) { + return accumulatorOperator("$min", fieldName, expression); + } + + /** + * Returns a combination of a computed field and an accumulator that produces a BSON {@link org.bson.BsonType#ARRAY Array} + * of {@code N} smallest values of the given {@code inExpression}, + * where {@code N} is the positive integral value of the {@code nExpression}. + * + * @param fieldName The field computed by the accumulator. + * @param inExpression The input expression. + * @param nExpression The expression limiting the number of produced values. + * @param The type of the input expression. + * @param The type of the limiting expression. + * @return The requested {@link BsonField}. + * @mongodb.driver.manual reference/operator/aggregation/minN/ $minN + * @since 4.7 + * @mongodb.server.release 5.2 + */ + public static BsonField minN( + final String fieldName, final InExpression inExpression, final NExpression nExpression) { + return pickNAccumulator(notNull("fieldName", fieldName), "$minN", + notNull("inExpression", inExpression), notNull("nExpression", nExpression)); + } + + /** + * Gets a field name for a $group operation representing an array of all values that results from applying an expression to each + * document in a group of documents that share the same group by key. + * + * @param fieldName the field name + * @param expression the expression + * @param the expression type + * @return the field + * @mongodb.driver.manual reference/operator/aggregation/push/ $push + */ + public static BsonField push(final String fieldName, final TExpression expression) { + return accumulatorOperator("$push", fieldName, expression); + } + + /** + * Gets a field name for a $group operation representing all unique values that results from applying the given expression to each + * document in a group of documents that share the same group by key. + * + * @param fieldName the field name + * @param expression the expression + * @param the expression type + * @return the field + * @mongodb.driver.manual reference/operator/aggregation/addToSet/ $addToSet + */ + public static BsonField addToSet(final String fieldName, final TExpression expression) { + return accumulatorOperator("$addToSet", fieldName, expression); + } + + + /** + * Gets a field name for a $group operation representing the result of merging the fields of the documents. + * If documents to merge include the same field name, the field, in the resulting document, has the value from the last document + * merged for the field. + * + * @param fieldName the field name + * @param expression the expression + * @param the expression type + * @return the field + * @since 4.4 + * @mongodb.driver.manual reference/operator/aggregation/mergeObjects/ $mergeObjects + */ + public static BsonField mergeObjects(final String fieldName, final TExpression expression) { + return accumulatorOperator("$mergeObjects", fieldName, expression); + } + + /** + * Gets a field name for a $group operation representing the sample standard deviation of the values of the given expression + * when applied to all members of the group. + * + *

Use if the values encompass the entire population of data you want to represent and do not wish to generalize about + * a larger population.

+ * + * @param fieldName the field name + * @param expression the expression + * @param the expression type + * @return the field + * @mongodb.driver.manual reference/operator/aggregation/stdDevPop/ $stdDevPop + * @mongodb.server.release 3.2 + * @since 3.2 + */ + public static BsonField stdDevPop(final String fieldName, final TExpression expression) { + return accumulatorOperator("$stdDevPop", fieldName, expression); + } + + /** + * Gets a field name for a $group operation representing the sample standard deviation of the values of the given expression + * when applied to all members of the group. + * + *

Use if the values encompass a sample of a population of data from which to generalize about the population.

+ * + * @param fieldName the field name + * @param expression the expression + * @param the expression type + * @return the field + * @mongodb.driver.manual reference/operator/aggregation/stdDevSamp/ $stdDevSamp + * @mongodb.server.release 3.2 + * @since 3.2 + */ + public static BsonField stdDevSamp(final String fieldName, final TExpression expression) { + return accumulatorOperator("$stdDevSamp", fieldName, expression); + } + + /** + * Creates an $accumulator pipeline stage + * + * @param fieldName the field name + * @param initFunction a function used to initialize the state + * @param accumulateFunction a function used to accumulate documents + * @param mergeFunction a function used to merge two internal states, e.g. accumulated on different shards or threads. It + * returns the resulting state of the accumulator. + * @return the $accumulator pipeline stage + * @mongodb.driver.manual reference/operator/aggregation/accumulator/ $accumulator + * @mongodb.server.release 4.4 + * @since 4.1 + */ + public static BsonField accumulator(final String fieldName, final String initFunction, final String accumulateFunction, + final String mergeFunction) { + return accumulator(fieldName, initFunction, null, accumulateFunction, null, mergeFunction, null, "js"); + } + + /** + * Creates an $accumulator pipeline stage + * + * @param fieldName the field name + * @param initFunction a function used to initialize the state + * @param accumulateFunction a function used to accumulate documents + * @param mergeFunction a function used to merge two internal states, e.g. accumulated on different shards or threads. It + * returns the resulting state of the accumulator. + * @param finalizeFunction a function used to finalize the state and return the result (may be null) + * @return the $accumulator pipeline stage + * @mongodb.driver.manual reference/operator/aggregation/accumulator/ $accumulator + * @mongodb.server.release 4.4 + * @since 4.1 + */ + public static BsonField accumulator(final String fieldName, final String initFunction, final String accumulateFunction, + final String mergeFunction, @Nullable final String finalizeFunction) { + return accumulator(fieldName, initFunction, null, accumulateFunction, null, mergeFunction, finalizeFunction, "js"); + } + + /** + * Creates an $accumulator pipeline stage + * + * @param fieldName the field name + * @param initFunction a function used to initialize the state + * @param initArgs init function’s arguments (may be null) + * @param accumulateFunction a function used to accumulate documents + * @param accumulateArgs additional accumulate function’s arguments (may be null). The first argument to the function + * is ‘state’. + * @param mergeFunction a function used to merge two internal states, e.g. accumulated on different shards or threads. It + * returns the resulting state of the accumulator. + * @param finalizeFunction a function used to finalize the state and return the result (may be null) + * @return the $accumulator pipeline stage + * @mongodb.driver.manual reference/operator/aggregation/accumulator/ $accumulator + * @mongodb.server.release 4.4 + * @since 4.1 + */ + public static BsonField accumulator(final String fieldName, final String initFunction, @Nullable final List initArgs, + final String accumulateFunction, @Nullable final List accumulateArgs, + final String mergeFunction, @Nullable final String finalizeFunction) { + return accumulator(fieldName, initFunction, initArgs, accumulateFunction, accumulateArgs, mergeFunction, finalizeFunction, "js"); + } + + /** + * Creates an $accumulator pipeline stage + * + * @param fieldName the field name + * @param initFunction a function used to initialize the state + * @param accumulateFunction a function used to accumulate documents + * @param mergeFunction a function used to merge two internal states, e.g. accumulated on different shards or threads. It + * returns the resulting state of the accumulator. + * @param finalizeFunction a function used to finalize the state and return the result (may be null) + * @param lang a language specifier + * @return the $accumulator pipeline stage + * @mongodb.driver.manual reference/operator/aggregation/accumulator/ $accumulator + * @mongodb.server.release 4.4 + * @since 4.1 + */ + public static BsonField accumulator(final String fieldName, final String initFunction, final String accumulateFunction, + final String mergeFunction, @Nullable final String finalizeFunction, final String lang) { + return accumulator(fieldName, initFunction, null, accumulateFunction, null, mergeFunction, finalizeFunction, lang); + } + + /** + * Creates an $accumulator pipeline stage + * + * @param fieldName the field name + * @param initFunction a function used to initialize the state + * @param initArgs init function’s arguments (may be null) + * @param accumulateFunction a function used to accumulate documents + * @param accumulateArgs additional accumulate function’s arguments (may be null). The first argument to the function + * is ‘state’. + * @param mergeFunction a function used to merge two internal states, e.g. accumulated on different shards or threads. It + * returns the resulting state of the accumulator. + * @param finalizeFunction a function used to finalize the state and return the result (may be null) + * @param lang a language specifier + * @return the $accumulator pipeline stage + * @mongodb.driver.manual reference/operator/aggregation/accumulator/ $accumulator + * @mongodb.server.release 4.4 + * @since 4.1 + */ + public static BsonField accumulator(final String fieldName, final String initFunction, @Nullable final List initArgs, + final String accumulateFunction, @Nullable final List accumulateArgs, + final String mergeFunction, @Nullable final String finalizeFunction, final String lang) { + BsonDocument accumulatorStage = new BsonDocument("init", new BsonString(initFunction)) + .append("initArgs", initArgs != null ? new BsonArray(initArgs.stream().map(initArg -> + new BsonString(initArg)).collect(toList())) : new BsonArray()) + .append("accumulate", new BsonString(accumulateFunction)) + .append("accumulateArgs", accumulateArgs != null ? new BsonArray(accumulateArgs.stream().map(accumulateArg -> + new BsonString(accumulateArg)).collect(toList())) : new BsonArray()) + .append("merge", new BsonString(mergeFunction)) + .append("lang", new BsonString(lang)); + if (finalizeFunction != null) { + accumulatorStage.append("finalize", new BsonString(finalizeFunction)); + } + return accumulatorOperator("$accumulator", fieldName, accumulatorStage); + } + + private static BsonField accumulatorOperator(final String name, final String fieldName, final TExpression expression) { + return new BsonField(fieldName, new SimpleExpression<>(name, expression)); + } + + private static BsonField pickNAccumulator( + final String fieldName, final String accumulatorName, final InExpression inExpression, final NExpression nExpression) { + return new BsonField(fieldName, new Document(accumulatorName, new Document("input", inExpression).append("n", nExpression))); + } + + private static BsonField sortingPickAccumulator( + final String fieldName, final String accumulatorName, final Bson sort, final OutExpression outExpression) { + return new BsonField(fieldName, new Document(accumulatorName, new Document("sortBy", sort).append("output", outExpression))); + } + + private static BsonField sortingPickNAccumulator( + final String fieldName, final String accumulatorName, + final Bson sort, final OutExpression outExpression, final NExpression nExpression) { + return new BsonField(fieldName, new Document(accumulatorName, new Document("sortBy", sort) + .append("output", outExpression) + .append("n", nExpression))); + } + + private static BsonField quantileAccumulator(final String quantileAccumulatorName, + final String fieldName, final InExpression inExpression, + @Nullable final PExpression pExpression, final QuantileMethod method) { + Document document = new Document("input", inExpression) + .append("method", method.toBsonValue()); + if (pExpression != null) { + document.append("p", pExpression); + } + return accumulatorOperator(quantileAccumulatorName, fieldName, document); + } + + private Accumulators() { + } +} diff --git a/driver-core/src/main/com/mongodb/client/model/Aggregates.java b/driver-core/src/main/com/mongodb/client/model/Aggregates.java new file mode 100644 index 00000000000..44283ccba04 --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/Aggregates.java @@ -0,0 +1,2199 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model; + +import com.mongodb.MongoNamespace; +import com.mongodb.client.model.densify.DensifyOptions; +import com.mongodb.client.model.densify.DensifyRange; +import com.mongodb.client.model.fill.FillOptions; +import com.mongodb.client.model.fill.FillOutputField; +import com.mongodb.client.model.geojson.Point; +import com.mongodb.client.model.search.FieldSearchPath; +import com.mongodb.client.model.search.SearchCollector; +import com.mongodb.client.model.search.SearchOperator; +import com.mongodb.client.model.search.SearchOptions; +import com.mongodb.client.model.search.VectorSearchOptions; +import com.mongodb.lang.Nullable; +import org.bson.BsonArray; +import org.bson.BsonBoolean; +import org.bson.BsonDocument; +import org.bson.BsonDocumentWriter; +import org.bson.BsonInt32; +import org.bson.BsonString; +import org.bson.BsonType; +import org.bson.BsonValue; +import org.bson.Document; +import org.bson.BinaryVector; +import org.bson.codecs.configuration.CodecRegistry; +import org.bson.conversions.Bson; + +import java.util.List; +import java.util.Map; +import java.util.Objects; + +import static com.mongodb.assertions.Assertions.assertTrue; +import static com.mongodb.assertions.Assertions.isTrueArgument; +import static com.mongodb.assertions.Assertions.notNull; +import static com.mongodb.client.model.GeoNearOptions.geoNearOptions; +import static com.mongodb.client.model.densify.DensifyOptions.densifyOptions; +import static com.mongodb.client.model.search.SearchOptions.searchOptions; +import static com.mongodb.internal.Iterables.concat; +import static com.mongodb.internal.client.model.Util.sizeAtLeast; +import static java.util.Arrays.asList; + +/** + * Builders for aggregation pipeline stages. + * + * @mongodb.driver.manual core/aggregation-pipeline/ Aggregation pipeline + * @mongodb.server.release 2.2 + * @since 3.1 + */ +@SuppressWarnings("overloads") +public final class Aggregates { + + /** + * Creates an $addFields pipeline stage + * + * @param fields the fields to add + * @return the $addFields pipeline stage + * @mongodb.driver.manual reference/operator/aggregation/addFields/ $addFields + * @mongodb.server.release 3.4 + * @since 3.4 + */ + public static Bson addFields(final Field... fields) { + return addFields(asList(fields)); + } + + /** + * Creates an $addFields pipeline stage + * + * @param fields the fields to add + * @return the $addFields pipeline stage + * @mongodb.driver.manual reference/operator/aggregation/addFields/ $addFields + * @mongodb.server.release 3.4 + * @since 3.4 + */ + public static Bson addFields(final List> fields) { + return new FieldsStage("$addFields", fields); + } + + /** + * Creates a $set pipeline stage for the specified projection + * + * @param fields the fields to add + * @return the $set pipeline stage + * @see Projections + * @since 4.3 + * @mongodb.server.release 4.2 + * @mongodb.driver.manual reference/operator/aggregation/set/ $set + */ + public static Bson set(final Field... fields) { + return set(asList(fields)); + } + + /** + * Creates a $set pipeline stage for the specified projection + * + * @param fields the fields to add + * @return the $set pipeline stage + * @see Projections + * @since 4.3 + * @mongodb.server.release 4.2 + * @mongodb.driver.manual reference/operator/aggregation/set/ $set + */ + public static Bson set(final List> fields) { + return new FieldsStage("$set", fields); + } + + + /** + * Creates a $bucket pipeline stage + * + * @param the groupBy expression type + * @param the boundary type + * @param groupBy the criteria to group By + * @param boundaries the boundaries of the buckets + * @return the $bucket pipeline stage + * @mongodb.driver.manual reference/operator/aggregation/bucket/ $bucket + * @mongodb.server.release 3.4 + * @since 3.4 + */ + public static Bson bucket(final TExpression groupBy, final List boundaries) { + return bucket(groupBy, boundaries, new BucketOptions()); + } + + /** + * Creates a $bucket pipeline stage + * + * @param the groupBy expression type + * @param the boundary type + * @param groupBy the criteria to group By + * @param boundaries the boundaries of the buckets + * @param options the optional values for the $bucket stage + * @return the $bucket pipeline stage + * @mongodb.driver.manual reference/operator/aggregation/bucket/ $bucket + * @mongodb.server.release 3.4 + * @since 3.4 + */ + public static Bson bucket(final TExpression groupBy, final List boundaries, + final BucketOptions options) { + return new BucketStage<>(groupBy, boundaries, options); + } + + /** + * Creates a $bucketAuto pipeline stage + * + * @param the groupBy expression type + * @param groupBy the criteria to group By + * @param buckets the number of the buckets + * @return the $bucketAuto pipeline stage + * @mongodb.driver.manual reference/operator/aggregation/bucketAuto/ $bucketAuto + * @mongodb.server.release 3.4 + * @since 3.4 + */ + public static Bson bucketAuto(final TExpression groupBy, final int buckets) { + return bucketAuto(groupBy, buckets, new BucketAutoOptions()); + } + + /** + * Creates a $bucketAuto pipeline stage + * + * @param the groupBy expression type + * @param groupBy the criteria to group By + * @param buckets the number of the buckets + * @param options the optional values for the $bucketAuto stage + * @return the $bucketAuto pipeline stage + * @mongodb.driver.manual reference/operator/aggregation/bucketAuto/ $bucketAuto + * @mongodb.server.release 3.4 + * @since 3.4 + */ + public static Bson bucketAuto(final TExpression groupBy, final int buckets, final BucketAutoOptions options) { + return new BucketAutoStage<>(groupBy, buckets, options); + } + + /** + * Creates a $count pipeline stage using the field name "count" to store the result + * + * @return the $count pipeline stage + * @mongodb.driver.manual reference/operator/aggregation/count/ $count + * @mongodb.server.release 3.4 + * @since 3.4 + */ + public static Bson count() { + return count("count"); + } + + /** + * Creates a $count pipeline stage using the named field to store the result + * + * @param field the field in which to store the count + * @return the $count pipeline stage + * @mongodb.driver.manual reference/operator/aggregation/count/ $count + * @mongodb.server.release 3.4 + * @since 3.4 + */ + public static Bson count(final String field) { + return new BsonDocument("$count", new BsonString(field)); + } + + /** + * Creates a $match pipeline stage for the specified filter + * + * @param filter the filter to match + * @return the $match pipeline stage + * @see Filters + * @mongodb.driver.manual reference/operator/aggregation/match/ $match + */ + public static Bson match(final Bson filter) { + return new SimplePipelineStage("$match", filter); + } + + /** + * Creates a $project pipeline stage for the specified projection + * + * @param projection the projection + * @return the $project pipeline stage + * @see Projections + * @mongodb.driver.manual reference/operator/aggregation/project/ $project + */ + public static Bson project(final Bson projection) { + return new SimplePipelineStage("$project", projection); + } + + /** + * Creates a $sort pipeline stage for the specified sort specification + * + * @param sort the sort specification + * @return the $sort pipeline stage + * @see Sorts + * @mongodb.driver.manual reference/operator/aggregation/sort/#sort-aggregation $sort + */ + public static Bson sort(final Bson sort) { + return new SimplePipelineStage("$sort", sort); + } + + /** + * Creates a $sortByCount pipeline stage for the specified filter + * + * @param the expression type + * @param filter the filter specification + * @return the $sortByCount pipeline stage + * @mongodb.driver.manual reference/operator/aggregation/sortByCount/ $sortByCount + * @mongodb.server.release 3.4 + * @since 3.4 + */ + public static Bson sortByCount(final TExpression filter) { + return new SortByCountStage<>(filter); + } + + /** + * Creates a $skip pipeline stage + * + * @param skip the number of documents to skip + * @return the $skip pipeline stage + * @mongodb.driver.manual reference/operator/aggregation/skip/ $skip + */ + public static Bson skip(final int skip) { + return new BsonDocument("$skip", new BsonInt32(skip)); + } + + /** + * Creates a $limit pipeline stage for the specified filter + * + * @param limit the limit + * @return the $limit pipeline stage + * @mongodb.driver.manual reference/operator/aggregation/limit/ $limit + */ + public static Bson limit(final int limit) { + return new BsonDocument("$limit", new BsonInt32(limit)); + } + + /** + * Creates a $lookup pipeline stage, joining the current collection with the one specified in from + * using equality match between the local field and the foreign field + * + * @param from the name of the collection in the same database to perform the join with. + * @param localField the field from the local collection to match values against. + * @param foreignField the field in the from collection to match values against. + * @param as the name of the new array field to add to the input documents. + * @return the $lookup pipeline stage + * @mongodb.driver.manual reference/operator/aggregation/lookup/ $lookup + * @mongodb.server.release 3.2 + * @since 3.2 + */ + public static Bson lookup(final String from, final String localField, final String foreignField, final String as) { + return new BsonDocument("$lookup", new BsonDocument("from", new BsonString(from)) + .append("localField", new BsonString(localField)) + .append("foreignField", new BsonString(foreignField)) + .append("as", new BsonString(as))); + } + + /** + * Creates a $lookup pipeline stage, joining the current collection with the + * one specified in from using the given pipeline. If the first stage in the + * pipeline is a {@link Aggregates#documents(List) $documents} stage, then + * the {@code from} collection is ignored. + * + * @param from the name of the collection in the same database to + * perform the join with. Must be {$code null} if the + * first pipeline stage is $documents. + * @param pipeline the pipeline to run on the joined collection. + * @param as the name of the new array field to add to the input documents. + * @return the $lookup pipeline stage + * @mongodb.driver.manual reference/operator/aggregation/lookup/ $lookup + * @mongodb.server.release 3.6 + * @since 3.7 + * + */ + public static Bson lookup(@Nullable final String from, final List pipeline, final String as) { + return lookup(from, null, pipeline, as); + } + + /** + * Creates a $lookup pipeline stage, joining the current collection with the + * one specified in from using the given pipeline. If the first stage in the + * pipeline is a {@link Aggregates#documents(List) $documents} stage, then + * the {@code from} collection is ignored. + * + * @param the Variable value expression type + * @param from the name of the collection in the same database to + * perform the join with. Must be {$code null} if the + * first pipeline stage is $documents. + * @param let the variables to use in the pipeline field stages. + * @param pipeline the pipeline to run on the joined collection. + * @param as the name of the new array field to add to the input documents. + * @return the $lookup pipeline stage + * @mongodb.driver.manual reference/operator/aggregation/lookup/ $lookup + * @mongodb.server.release 3.6 + * @since 3.7 + */ + public static Bson lookup(@Nullable final String from, @Nullable final List> let, + final List pipeline, final String as) { + return new LookupStage<>(from, let, pipeline, as); + } + + /** + * Creates a facet pipeline stage + * + * @param facets the facets to use + * @return the new pipeline stage + * @mongodb.driver.manual reference/operator/aggregation/facet/ $facet + * @mongodb.server.release 3.4 + * @since 3.4 + */ + public static Bson facet(final List facets) { + return new FacetStage(facets); + } + + /** + * Creates a facet pipeline stage + * + * @param facets the facets to use + * @return the new pipeline stage + * @mongodb.driver.manual reference/operator/aggregation/facet/ $facet + * @mongodb.server.release 3.4 + * @since 3.4 + */ + public static Bson facet(final Facet... facets) { + return new FacetStage(asList(facets)); + } + + /** + * Creates a graphLookup pipeline stage for the specified filter + * + * @param the expression type + * @param from the collection to query + * @param startWith the expression to start the graph lookup with + * @param connectFromField the from field + * @param connectToField the to field + * @param as name of field in output document + * @return the $graphLookup pipeline stage + * @mongodb.driver.manual reference/operator/aggregation/graphLookup/ $graphLookup + * @mongodb.server.release 3.4 + * @since 3.4 + */ + public static Bson graphLookup(final String from, final TExpression startWith, final String connectFromField, + final String connectToField, final String as) { + return graphLookup(from, startWith, connectFromField, connectToField, as, new GraphLookupOptions()); + } + + /** + * Creates a graphLookup pipeline stage for the specified filter + * + * @param the expression type + * @param from the collection to query + * @param startWith the expression to start the graph lookup with + * @param connectFromField the from field + * @param connectToField the to field + * @param as name of field in output document + * @param options optional values for the graphLookup + * @return the $graphLookup pipeline stage + * @mongodb.driver.manual reference/operator/aggregation/graphLookup/ $graphLookup + * @mongodb.server.release 3.4 + * @since 3.4 + */ + public static Bson graphLookup(final String from, final TExpression startWith, final String connectFromField, + final String connectToField, final String as, final GraphLookupOptions options) { + notNull("options", options); + return new GraphLookupStage<>(from, startWith, connectFromField, connectToField, as, options); + } + + /** + * Creates a $group pipeline stage for the specified filter + * + * @param the expression type + * @param id the id expression for the group, which may be null + * @param fieldAccumulators zero or more field accumulator pairs + * @return the $group pipeline stage + * @mongodb.driver.manual reference/operator/aggregation/group/ $group + * @mongodb.driver.manual meta/aggregation-quick-reference/#aggregation-expressions Expressions + */ + public static Bson group(@Nullable final TExpression id, final BsonField... fieldAccumulators) { + return group(id, asList(fieldAccumulators)); + } + + /** + * Creates a $group pipeline stage for the specified filter + * + * @param the expression type + * @param id the id expression for the group, which may be null + * @param fieldAccumulators zero or more field accumulator pairs + * @return the $group pipeline stage + * @mongodb.driver.manual reference/operator/aggregation/group/ $group + * @mongodb.driver.manual meta/aggregation-quick-reference/#aggregation-expressions Expressions + */ + public static Bson group(@Nullable final TExpression id, final List fieldAccumulators) { + return new GroupStage<>(id, fieldAccumulators); + } + + /** + * Creates a $unionWith pipeline stage. + * + * @param collection the name of the collection in the same database to perform the union with. + * @param pipeline the pipeline to run on the union. + * @return the $unionWith pipeline stage + * @mongodb.driver.manual reference/operator/aggregation/unionWith/ $unionWith + * @mongodb.server.release 4.4 + * @since 4.1 + */ + public static Bson unionWith(final String collection, final List pipeline) { + return new UnionWithStage(collection, pipeline); + } + + /** + * Creates a $unwind pipeline stage for the specified field name, which must be prefixed by a {@code '$'} sign. + * + * @param fieldName the field name, prefixed by a {@code '$' sign} + * @return the $unwind pipeline stage + * @mongodb.driver.manual reference/operator/aggregation/unwind/ $unwind + */ + public static Bson unwind(final String fieldName) { + return new BsonDocument("$unwind", new BsonString(fieldName)); + } + + /** + * Creates a $unwind pipeline stage for the specified field name, which must be prefixed by a {@code '$'} sign. + * + * @param fieldName the field name, prefixed by a {@code '$' sign} + * @param unwindOptions options for the unwind pipeline stage + * @return the $unwind pipeline stage + * @mongodb.driver.manual reference/operator/aggregation/unwind/ $unwind + * @mongodb.server.release 3.2 + * @since 3.2 + */ + public static Bson unwind(final String fieldName, final UnwindOptions unwindOptions) { + notNull("unwindOptions", unwindOptions); + BsonDocument options = new BsonDocument("path", new BsonString(fieldName)); + Boolean preserveNullAndEmptyArrays = unwindOptions.isPreserveNullAndEmptyArrays(); + if (preserveNullAndEmptyArrays != null) { + options.append("preserveNullAndEmptyArrays", BsonBoolean.valueOf(preserveNullAndEmptyArrays)); + } + String includeArrayIndex = unwindOptions.getIncludeArrayIndex(); + if (includeArrayIndex != null) { + options.append("includeArrayIndex", new BsonString(includeArrayIndex)); + } + return new BsonDocument("$unwind", options); + } + + /** + * Creates a $out pipeline stage that writes into the specified collection + * + * @param collectionName the collection name + * @return the $out pipeline stage + * @mongodb.driver.manual reference/operator/aggregation/out/ $out + */ + public static Bson out(final String collectionName) { + return new BsonDocument("$out", new BsonString(collectionName)); + } + + /** + * Creates a $out pipeline stage that supports outputting to a different database. + * + * @param databaseName the database name + * @param collectionName the collection name + * @return the $out pipeline stage + * @mongodb.driver.manual reference/operator/aggregation/out/ $out + * @mongodb.server.release 4.4 + * @since 4.1 + */ + public static Bson out(final String databaseName, final String collectionName) { + return new BsonDocument("$out", new BsonDocument("db", new BsonString(databaseName)) + .append("coll", new BsonString(collectionName))); + } + + /** + * Creates a $out pipeline stage that writes out to the specified destination + * + * @param destination the destination details + * @return the $out pipeline stage + * @mongodb.driver.manual reference/operator/aggregation/out/ $out + * @since 4.1 + */ + public static Bson out(final Bson destination) { + return new SimplePipelineStage("$out", destination); + } + + /** + * Creates a $merge pipeline stage that merges into the specified collection + * + * @param collectionName the name of the collection to merge into + * @return the $merge pipeline stage + * @since 3.11 + * @mongodb.driver.manual reference/operator/aggregation/merge/ $merge + * @mongodb.server.release 4.2 + */ + public static Bson merge(final String collectionName) { + return merge(collectionName, new MergeOptions()); + } + + /** + * Creates a $merge pipeline stage that merges into the specified namespace + * + * @param namespace the namespace to merge into + * @return the $merge pipeline stage + * @since 3.11 + * @mongodb.driver.manual reference/operator/aggregation/merge/ $merge + * @mongodb.server.release 4.2 + */ + public static Bson merge(final MongoNamespace namespace) { + return merge(namespace, new MergeOptions()); + } + + /** + * Creates a $merge pipeline stage that merges into the specified collection using the specified options. + * + * @param collectionName the name of the collection to merge into + * @param options the merge options + * @return the $merge pipeline stage + * @since 3.11 + * @mongodb.driver.manual reference/operator/aggregation/merge/ $merge + * @mongodb.server.release 4.2 + */ + public static Bson merge(final String collectionName, final MergeOptions options) { + return new MergeStage(new BsonString(collectionName), options); + } + + /** + * Creates a $merge pipeline stage that merges into the specified namespace using the specified options. + * + * @param namespace the namespace to merge into + * @param options the merge options + * @return the $merge pipeline stage + * @since 3.11 + * @mongodb.driver.manual reference/operator/aggregation/merge/ $merge + * @mongodb.server.release 4.2 + */ + public static Bson merge(final MongoNamespace namespace, final MergeOptions options) { + return new MergeStage(new BsonDocument("db", new BsonString(namespace.getDatabaseName())) + .append("coll", new BsonString(namespace.getCollectionName())), options); + } + + /** + * Creates a $replaceRoot pipeline stage + * + * @param the new root type + * @param value the new root value + * @return the $replaceRoot pipeline stage + * @mongodb.driver.manual reference/operator/aggregation/replaceRoot/ $replaceRoot + * @mongodb.server.release 3.4 + * @since 3.4 + */ + public static Bson replaceRoot(final TExpression value) { + return new ReplaceStage<>(value); + } + + /** + * Creates a $replaceRoot pipeline stage + * + *

With $replaceWith, you can promote an embedded document to the top-level. + * You can also specify a new document as the replacement.

+ * + *

The $replaceWith is an alias for {@link #replaceRoot(Object)}.

+ * + * @param the new root type + * @param value the new root value + * @return the $replaceRoot pipeline stage + * @mongodb.driver.manual reference/operator/aggregation/replaceWith/ $replaceWith + * @mongodb.server.release 4.2 + * @since 3.11 + */ + public static Bson replaceWith(final TExpression value) { + return new ReplaceStage<>(value, true); + } + + /** + * Creates a $sample pipeline stage with the specified sample size + * + * @param size the sample size + * @return the $sample pipeline stage + * @mongodb.driver.manual reference/operator/aggregation/sample/ $sample + * @mongodb.server.release 3.2 + * @since 3.2 + */ + public static Bson sample(final int size) { + return new BsonDocument("$sample", new BsonDocument("size", new BsonInt32(size))); + } + + /** + * Creates a {@code $setWindowFields} pipeline stage, which allows using window operators. + * This stage partitions the input documents similarly to the {@link #group(Object, List) $group} pipeline stage, + * optionally sorts them, computes fields in the documents by computing window functions over {@linkplain Window windows} specified per + * function, and outputs the documents. The important difference from the {@code $group} pipeline stage is that + * documents belonging to the same partition or window are not folded into a single document. + * + * @param partitionBy Optional partitioning of data specified like {@code id} in {@link #group(Object, List)}. + * If {@code null}, then all documents belong to the same partition. + * @param sortBy Fields to sort by. The syntax is identical to {@code sort} in {@link #sort(Bson)} (see {@link Sorts}). + * Sorting is required by certain functions and may be required by some windows (see {@link Windows} for more details). + * Sorting is used only for the purpose of computing window functions and does not guarantee ordering + * of the output documents. + * @param output A {@linkplain WindowOutputField windowed computation}. + * @param moreOutput More {@linkplain WindowOutputField windowed computations}. + * @param The {@code partitionBy} expression type. + * @return The {@code $setWindowFields} pipeline stage. + * @mongodb.driver.dochub core/window-functions-set-window-fields $setWindowFields + * @mongodb.server.release 5.0 + * @since 4.3 + */ + public static Bson setWindowFields(@Nullable final TExpression partitionBy, @Nullable final Bson sortBy, + final WindowOutputField output, final WindowOutputField... moreOutput) { + return setWindowFields(partitionBy, sortBy, concat(notNull("output", output), moreOutput)); + } + + /** + * Creates a {@code $setWindowFields} pipeline stage, which allows using window operators. + * This stage partitions the input documents similarly to the {@link #group(Object, List) $group} pipeline stage, + * optionally sorts them, computes fields in the documents by computing window functions over {@linkplain Window windows} specified per + * function, and outputs the documents. The important difference from the {@code $group} pipeline stage is that + * documents belonging to the same partition or window are not folded into a single document. + * + * @param partitionBy Optional partitioning of data specified like {@code id} in {@link #group(Object, List)}. + * If {@code null}, then all documents belong to the same partition. + * @param sortBy Fields to sort by. The syntax is identical to {@code sort} in {@link #sort(Bson)} (see {@link Sorts}). + * Sorting is required by certain functions and may be required by some windows (see {@link Windows} for more details). + * Sorting is used only for the purpose of computing window functions and does not guarantee ordering + * of the output documents. + * @param output A list of {@linkplain WindowOutputField windowed computations}. + * Specifying an empty list is not an error, but the resulting stage does not do anything useful. + * @param The {@code partitionBy} expression type. + * @return The {@code $setWindowFields} pipeline stage. + * @mongodb.driver.dochub core/window-functions-set-window-fields $setWindowFields + * @mongodb.server.release 5.0 + * @since 4.3 + */ + public static Bson setWindowFields(@Nullable final TExpression partitionBy, @Nullable final Bson sortBy, + final Iterable output) { + notNull("output", output); + return new SetWindowFieldsStage<>(partitionBy, sortBy, output); + } + + /** + * Creates a {@code $densify} pipeline stage, which adds documents to a sequence of documents + * where certain values in the {@code field} are missing. + * + * @param field The field to densify. + * @param range The range. + * @return The requested pipeline stage. + * @mongodb.driver.manual reference/operator/aggregation/densify/ $densify + * @mongodb.driver.manual core/document/#dot-notation Dot notation + * @mongodb.server.release 5.1 + * @since 4.7 + */ + public static Bson densify(final String field, final DensifyRange range) { + return densify(notNull("field", field), notNull("range", range), densifyOptions()); + } + + /** + * Creates a {@code $densify} pipeline stage, which adds documents to a sequence of documents + * where certain values in the {@code field} are missing. + * + * @param field The field to densify. + * @param range The range. + * @param options The densify options. + * Specifying {@link DensifyOptions#densifyOptions()} is equivalent to calling {@link #densify(String, DensifyRange)}. + * @return The requested pipeline stage. + * @mongodb.driver.manual reference/operator/aggregation/densify/ $densify + * @mongodb.driver.manual core/document/#dot-notation Dot notation + * @mongodb.server.release 5.1 + * @since 4.7 + */ + public static Bson densify(final String field, final DensifyRange range, final DensifyOptions options) { + notNull("field", field); + notNull("range", range); + notNull("options", options); + return new Bson() { + @Override + public BsonDocument toBsonDocument(final Class documentClass, final CodecRegistry codecRegistry) { + BsonDocument densifySpecificationDoc = new BsonDocument("field", new BsonString(field)); + densifySpecificationDoc.append("range", range.toBsonDocument(documentClass, codecRegistry)); + densifySpecificationDoc.putAll(options.toBsonDocument(documentClass, codecRegistry)); + return new BsonDocument("$densify", densifySpecificationDoc); + } + + @Override + public String toString() { + return "Stage{name='$densify'" + + ", field=" + field + + ", range=" + range + + ", options=" + options + + '}'; + } + }; + } + + /** + * Creates a {@code $fill} pipeline stage, which assigns values to fields when they are {@link BsonType#NULL Null} or missing. + * + * @param options The fill options. + * @param output The {@link FillOutputField}. + * @param moreOutput More {@link FillOutputField}s. + * @return The requested pipeline stage. + * @mongodb.driver.manual reference/operator/aggregation/fill/ $fill + * @mongodb.server.release 5.3 + * @since 4.7 + */ + public static Bson fill(final FillOptions options, final FillOutputField output, final FillOutputField... moreOutput) { + return fill(options, concat(notNull("output", output), moreOutput)); + } + + /** + * Creates a {@code $fill} pipeline stage, which assigns values to fields when they are {@link BsonType#NULL Null} or missing. + * + * @param options The fill options. + * @param output The non-empty {@link FillOutputField}s. + * @return The requested pipeline stage. + * @mongodb.driver.manual reference/operator/aggregation/fill/ $fill + * @mongodb.server.release 5.3 + * @since 4.7 + */ + public static Bson fill(final FillOptions options, final Iterable output) { + notNull("options", options); + notNull("output", output); + isTrueArgument("output must not be empty", sizeAtLeast(output, 1)); + return new Bson() { + @Override + public BsonDocument toBsonDocument(final Class documentClass, final CodecRegistry codecRegistry) { + BsonDocument fillSpecificationDoc = new BsonDocument(); + fillSpecificationDoc.putAll(options.toBsonDocument(documentClass, codecRegistry)); + BsonDocument outputDoc = new BsonDocument(); + for (final FillOutputField computation : output) { + BsonDocument computationDoc = computation.toBsonDocument(documentClass, codecRegistry); + assertTrue(computationDoc.size() == 1); + outputDoc.putAll(computationDoc); + } + fillSpecificationDoc.append("output", outputDoc); + return new BsonDocument("$fill", fillSpecificationDoc); + } + + @Override + public String toString() { + return "Stage{name='$fill'" + + ", options=" + options + + ", output=" + output + + '}'; + } + }; + } + + /** + * Creates a {@code $search} pipeline stage supported by MongoDB Atlas. + * You may use the {@code $meta: "searchScore"} expression, e.g., via {@link Projections#metaSearchScore(String)}, + * to extract the relevance score assigned to each found document. + *

+ * {@link Filters#text(String, TextSearchOptions)} is a legacy text search alternative.

+ * + * @param operator A search operator. + * @return The {@code $search} pipeline stage. + * + * @mongodb.atlas.manual atlas-search/query-syntax/#-search $search + * @mongodb.atlas.manual atlas-search/operators-and-collectors/#operators Search operators + * @mongodb.atlas.manual atlas-search/scoring/ Scoring + * @since 4.7 + */ + public static Bson search(final SearchOperator operator) { + return search(operator, searchOptions()); + } + + /** + * Creates a {@code $search} pipeline stage supported by MongoDB Atlas. + * You may use the {@code $meta: "searchScore"} expression, e.g., via {@link Projections#metaSearchScore(String)}, + * to extract the relevance score assigned to each found document. + *

+ * {@link Filters#text(String, TextSearchOptions)} is a legacy text search alternative.

+ * + * @param operator A search operator. + * @param options Optional {@code $search} pipeline stage fields. + * Specifying {@link SearchOptions#searchOptions()} is equivalent to calling {@link #search(SearchOperator)}. + * @return The {@code $search} pipeline stage. + * + * @mongodb.atlas.manual atlas-search/query-syntax/#-search $search + * @mongodb.atlas.manual atlas-search/operators-and-collectors/#operators Search operators + * @mongodb.atlas.manual atlas-search/scoring/ Scoring + * @since 4.7 + */ + public static Bson search(final SearchOperator operator, final SearchOptions options) { + return new SearchStage("$search", notNull("operator", operator), notNull("options", options)); + } + + /** + * Creates a {@code $search} pipeline stage supported by MongoDB Atlas. + * You may use {@code $meta: "searchScore"}, e.g., via {@link Projections#metaSearchScore(String)}, + * to extract the relevance score assigned to each found document. + * + * @param collector A search collector. + * @return The {@code $search} pipeline stage. + * + * @mongodb.atlas.manual atlas-search/query-syntax/#-search $search + * @mongodb.atlas.manual atlas-search/operators-and-collectors/#collectors Search collectors + * @mongodb.atlas.manual atlas-search/scoring/ Scoring + * @since 4.7 + */ + public static Bson search(final SearchCollector collector) { + return search(collector, searchOptions()); + } + + /** + * Creates a {@code $search} pipeline stage supported by MongoDB Atlas. + * You may use {@code $meta: "searchScore"}, e.g., via {@link Projections#metaSearchScore(String)}, + * to extract the relevance score assigned to each found document. + * + * @param collector A search collector. + * @param options Optional {@code $search} pipeline stage fields. + * Specifying {@link SearchOptions#searchOptions()} is equivalent to calling {@link #search(SearchCollector)}. + * @return The {@code $search} pipeline stage. + * + * @mongodb.atlas.manual atlas-search/query-syntax/#-search $search + * @mongodb.atlas.manual atlas-search/operators-and-collectors/#collectors Search collectors + * @mongodb.atlas.manual atlas-search/scoring/ Scoring + * @since 4.7 + */ + public static Bson search(final SearchCollector collector, final SearchOptions options) { + return new SearchStage("$search", notNull("collector", collector), notNull("options", options)); + } + + /** + * Creates a {@code $searchMeta} pipeline stage supported by MongoDB Atlas. + * Unlike {@link #search(SearchOperator) $search}, it does not return found documents, + * instead it returns metadata, which in case of using the {@code $search} stage + * may be extracted by using {@code $$SEARCH_META} variable, e.g., via {@link Projections#computedSearchMeta(String)}. + * + * @param operator A search operator. + * @return The {@code $searchMeta} pipeline stage. + * + * @mongodb.atlas.manual atlas-search/query-syntax/#-searchmeta $searchMeta + * @mongodb.atlas.manual atlas-search/operators-and-collectors/#operators Search operators + * @since 4.7 + */ + public static Bson searchMeta(final SearchOperator operator) { + return searchMeta(operator, searchOptions()); + } + + /** + * Creates a {@code $searchMeta} pipeline stage supported by MongoDB Atlas. + * Unlike {@link #search(SearchOperator, SearchOptions) $search}, it does not return found documents, + * instead it returns metadata, which in case of using the {@code $search} stage + * may be extracted by using {@code $$SEARCH_META} variable, e.g., via {@link Projections#computedSearchMeta(String)}. + * + * @param operator A search operator. + * @param options Optional {@code $search} pipeline stage fields. + * Specifying {@link SearchOptions#searchOptions()} is equivalent to calling {@link #searchMeta(SearchOperator)}. + * @return The {@code $searchMeta} pipeline stage. + * + * @mongodb.atlas.manual atlas-search/query-syntax/#-searchmeta $searchMeta + * @mongodb.atlas.manual atlas-search/operators-and-collectors/#operators Search operators + * @since 4.7 + */ + public static Bson searchMeta(final SearchOperator operator, final SearchOptions options) { + return new SearchStage("$searchMeta", notNull("operator", operator), notNull("options", options)); + } + + /** + * Creates a {@code $searchMeta} pipeline stage supported by MongoDB Atlas. + * Unlike {@link #search(SearchCollector) $search}, it does not return found documents, + * instead it returns metadata, which in case of using the {@code $search} stage + * may be extracted by using {@code $$SEARCH_META} variable, e.g., via {@link Projections#computedSearchMeta(String)}. + * + * @param collector A search collector. + * @return The {@code $searchMeta} pipeline stage. + * + * @mongodb.atlas.manual atlas-search/query-syntax/#-searchmeta $searchMeta + * @mongodb.atlas.manual atlas-search/operators-and-collectors/#collectors Search collectors + * @since 4.7 + */ + public static Bson searchMeta(final SearchCollector collector) { + return searchMeta(collector, searchOptions()); + } + + /** + * Creates a {@code $searchMeta} pipeline stage supported by MongoDB Atlas. + * Unlike {@link #search(SearchCollector, SearchOptions) $search}, it does not return found documents, + * instead it returns metadata, which in case of using the {@code $search} stage + * may be extracted by using {@code $$SEARCH_META} variable, e.g., via {@link Projections#computedSearchMeta(String)}. + * + * @param collector A search collector. + * @param options Optional {@code $search} pipeline stage fields. + * Specifying {@link SearchOptions#searchOptions()} is equivalent to calling {@link #searchMeta(SearchCollector)}. + * @return The {@code $searchMeta} pipeline stage. + * + * @mongodb.atlas.manual atlas-search/query-syntax/#-searchmeta $searchMeta + * @mongodb.atlas.manual atlas-search/operators-and-collectors/#collectors Search collectors + * @since 4.7 + */ + public static Bson searchMeta(final SearchCollector collector, final SearchOptions options) { + return new SearchStage("$searchMeta", notNull("collector", collector), notNull("options", options)); + } + + /** + * Creates a {@code $vectorSearch} pipeline stage supported by MongoDB Atlas. + * You may use the {@code $meta: "vectorSearchScore"} expression, e.g., via {@link Projections#metaVectorSearchScore(String)}, + * to extract the relevance score assigned to each found document. + * + * @param queryVector The query vector. The number of dimensions must match that of the {@code index}. + * @param path The field to be searched. + * @param index The name of the index to use. + * @param limit The limit on the number of documents produced by the pipeline stage. + * @param options Optional {@code $vectorSearch} pipeline stage fields. + * @return The {@code $vectorSearch} pipeline stage. + * + * @mongodb.atlas.manual atlas-vector-search/vector-search-stage/ $vectorSearch + * @mongodb.atlas.manual atlas-search/scoring/ Scoring + * @mongodb.server.release 6.0.11 + * @since 4.11 + */ + public static Bson vectorSearch( + final FieldSearchPath path, + final Iterable queryVector, + final String index, + final long limit, + final VectorSearchOptions options) { + notNull("path", path); + notNull("queryVector", queryVector); + notNull("index", index); + notNull("options", options); + return new VectorSearchBson(path, queryVector, index, limit, options); + } + + /** + * Creates a {@code $vectorSearch} pipeline stage supported by MongoDB Atlas. + * You may use the {@code $meta: "vectorSearchScore"} expression, e.g., via {@link Projections#metaVectorSearchScore(String)}, + * to extract the relevance score assigned to each found document. + * + * @param queryVector The {@linkplain BinaryVector query vector}. The number of dimensions must match that of the {@code index}. + * @param path The field to be searched. + * @param index The name of the index to use. + * @param limit The limit on the number of documents produced by the pipeline stage. + * @param options Optional {@code $vectorSearch} pipeline stage fields. + * @return The {@code $vectorSearch} pipeline stage. + * @mongodb.atlas.manual atlas-vector-search/vector-search-stage/ $vectorSearch + * @mongodb.atlas.manual atlas-search/scoring/ Scoring + * @mongodb.server.release 6.0 + * @see BinaryVector + * @since 5.3 + */ + public static Bson vectorSearch( + final FieldSearchPath path, + final BinaryVector queryVector, + final String index, + final long limit, + final VectorSearchOptions options) { + notNull("path", path); + notNull("queryVector", queryVector); + notNull("index", index); + notNull("options", options); + return new VectorSearchBson(path, queryVector, index, limit, options); + } + + /** + * Creates an $unset pipeline stage that removes/excludes fields from documents + * + * @param fields the fields to exclude. May use dot notation. + * @return the $unset pipeline stage + * @mongodb.driver.manual reference/operator/aggregation/unset/ $unset + * @mongodb.server.release 4.2 + * @since 4.8 + */ + public static Bson unset(final String... fields) { + return unset(asList(fields)); + } + + /** + * Creates an $unset pipeline stage that removes/excludes fields from documents + * + * @param fields the fields to exclude. May use dot notation. + * @return the $unset pipeline stage + * @mongodb.driver.manual reference/operator/aggregation/unset/ $unset + * @mongodb.server.release 4.2 + * @since 4.8 + */ + public static Bson unset(final List fields) { + if (fields.size() == 1) { + return new BsonDocument("$unset", new BsonString(fields.get(0))); + } + BsonArray array = new BsonArray(); + fields.stream().map(BsonString::new).forEach(array::add); + return new BsonDocument().append("$unset", array); + } + + /** + * Creates a $geoNear pipeline stage that outputs documents in order of nearest to farthest from a specified point. + * + * @param near The point for which to find the closest documents. + * @param distanceField The output field that contains the calculated distance. + * To specify a field within an embedded document, use dot notation. + * @param options {@link GeoNearOptions} + * @return the $geoNear pipeline stage + * @mongodb.driver.manual reference/operator/aggregation/geoNear/ $geoNear + * @since 4.8 + */ + public static Bson geoNear( + final Point near, + final String distanceField, + final GeoNearOptions options) { + notNull("near", near); + notNull("distanceField", distanceField); + notNull("options", options); + return new Bson() { + @Override + public BsonDocument toBsonDocument(final Class documentClass, final CodecRegistry codecRegistry) { + BsonDocumentWriter writer = new BsonDocumentWriter(new BsonDocument()); + writer.writeStartDocument(); + writer.writeStartDocument("$geoNear"); + + writer.writeName("near"); + BuildersHelper.encodeValue(writer, near, codecRegistry); + writer.writeName("distanceField"); + BuildersHelper.encodeValue(writer, distanceField, codecRegistry); + + options.toBsonDocument(documentClass, codecRegistry).forEach((optionName, optionValue) -> { + writer.writeName(optionName); + BuildersHelper.encodeValue(writer, optionValue, codecRegistry); + }); + + writer.writeEndDocument(); + writer.writeEndDocument(); + return writer.getDocument(); + } + + @Override + public String toString() { + return "Stage{name='$geoNear'" + + ", near=" + near + + ", distanceField=" + distanceField + + ", options=" + options + + '}'; + } + }; + } + + /** + * Creates a $geoNear pipeline stage that outputs documents in order of nearest to farthest from a specified point. + * + * @param near The point for which to find the closest documents. + * @param distanceField The output field that contains the calculated distance. + * To specify a field within an embedded document, use dot notation. + * @return the $geoNear pipeline stage + * @mongodb.driver.manual reference/operator/aggregation/geoNear/ $geoNear + * @since 4.8 + */ + public static Bson geoNear( + final Point near, + final String distanceField) { + return geoNear(near, distanceField, geoNearOptions()); + } + + /** + * Creates a $documents pipeline stage. + * + * @param documents the documents. + * @return the $documents pipeline stage. + * @mongodb.driver.manual reference/operator/aggregation/documents/ $documents + * @mongodb.server.release 5.1 + * @since 4.9 + */ + public static Bson documents(final List documents) { + notNull("documents", documents); + return new Bson() { + @Override + public BsonDocument toBsonDocument(final Class documentClass, final CodecRegistry codecRegistry) { + BsonDocumentWriter writer = new BsonDocumentWriter(new BsonDocument()); + writer.writeStartDocument(); + writer.writeStartArray("$documents"); + for (Bson bson : documents) { + BuildersHelper.encodeValue(writer, bson, codecRegistry); + } + writer.writeEndArray(); + writer.writeEndDocument(); + return writer.getDocument(); + } + }; + } + + static void writeBucketOutput(final CodecRegistry codecRegistry, final BsonDocumentWriter writer, + @Nullable final List output) { + if (output != null) { + writer.writeName("output"); + writer.writeStartDocument(); + for (BsonField field : output) { + writer.writeName(field.getName()); + BuildersHelper.encodeValue(writer, field.getValue(), codecRegistry); + } + writer.writeEndDocument(); + } + } + + private static class SimplePipelineStage implements Bson { + private final String name; + private final Bson value; + + SimplePipelineStage(final String name, final Bson value) { + this.name = name; + this.value = value; + } + + @Override + public BsonDocument toBsonDocument(final Class documentClass, final CodecRegistry codecRegistry) { + return new BsonDocument(name, value.toBsonDocument(documentClass, codecRegistry)); + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + SimplePipelineStage that = (SimplePipelineStage) o; + + if (!Objects.equals(name, that.name)) { + return false; + } + return Objects.equals(value, that.value); + } + + @Override + public int hashCode() { + int result = name != null ? name.hashCode() : 0; + result = 31 * result + (value != null ? value.hashCode() : 0); + return result; + } + + @Override + public String toString() { + return "Stage{" + + "name='" + name + '\'' + + ", value=" + value + + '}'; + } + } + + private static final class BucketStage implements Bson { + + private final TExpression groupBy; + private final List boundaries; + private final BucketOptions options; + + BucketStage(final TExpression groupBy, final List boundaries, final BucketOptions options) { + notNull("options", options); + this.groupBy = groupBy; + this.boundaries = boundaries; + this.options = options; + } + + @Override + public BsonDocument toBsonDocument(final Class tDocumentClass, final CodecRegistry codecRegistry) { + BsonDocumentWriter writer = new BsonDocumentWriter(new BsonDocument()); + + writer.writeStartDocument(); + + writer.writeStartDocument("$bucket"); + + writer.writeName("groupBy"); + BuildersHelper.encodeValue(writer, groupBy, codecRegistry); + + writer.writeStartArray("boundaries"); + for (TBoundary boundary : boundaries) { + BuildersHelper.encodeValue(writer, boundary, codecRegistry); + } + writer.writeEndArray(); + Object defaultBucket = options.getDefaultBucket(); + if (defaultBucket != null) { + writer.writeName("default"); + BuildersHelper.encodeValue(writer, defaultBucket, codecRegistry); + } + writeBucketOutput(codecRegistry, writer, options.getOutput()); + writer.writeEndDocument(); + + return writer.getDocument(); + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + BucketStage that = (BucketStage) o; + + if (!Objects.equals(groupBy, that.groupBy)) { + return false; + } + if (!Objects.equals(boundaries, that.boundaries)) { + return false; + } + return options.equals(that.options); + } + + @Override + public int hashCode() { + int result = groupBy != null ? groupBy.hashCode() : 0; + result = 31 * result + (boundaries != null ? boundaries.hashCode() : 0); + result = 31 * result + options.hashCode(); + return result; + } + + @Override + public String toString() { + return "Stage{" + + "name='$bucket'" + + ", boundaries=" + boundaries + + ", groupBy=" + groupBy + + ", options=" + options + + '}'; + } + } + + private static final class BucketAutoStage implements Bson { + + private final TExpression groupBy; + private final int buckets; + private final BucketAutoOptions options; + + BucketAutoStage(final TExpression groupBy, final int buckets, final BucketAutoOptions options) { + notNull("options", options); + this.groupBy = groupBy; + this.buckets = buckets; + this.options = options; + } + + @Override + public BsonDocument toBsonDocument(final Class tDocumentClass, final CodecRegistry codecRegistry) { + BsonDocumentWriter writer = new BsonDocumentWriter(new BsonDocument()); + + writer.writeStartDocument(); + + writer.writeStartDocument("$bucketAuto"); + + writer.writeName("groupBy"); + BuildersHelper.encodeValue(writer, groupBy, codecRegistry); + + writer.writeInt32("buckets", buckets); + + writeBucketOutput(codecRegistry, writer, options.getOutput()); + + BucketGranularity granularity = options.getGranularity(); + if (granularity != null) { + writer.writeString("granularity", granularity.getValue()); + } + writer.writeEndDocument(); + + return writer.getDocument(); + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + BucketAutoStage that = (BucketAutoStage) o; + + if (buckets != that.buckets) { + return false; + } + if (!Objects.equals(groupBy, that.groupBy)) { + return false; + } + return options.equals(that.options); + } + + @Override + public int hashCode() { + int result = groupBy != null ? groupBy.hashCode() : 0; + result = 31 * result + buckets; + result = 31 * result + options.hashCode(); + return result; + } + + @Override + public String toString() { + return "Stage{" + + "name='$bucketAuto'" + + ", buckets=" + buckets + + ", groupBy=" + groupBy + + ", options=" + options + + '}'; + } + } + + private static final class LookupStage implements Bson { + private final String from; + private final List> let; + private final List pipeline; + private final String as; + + private LookupStage( + @Nullable final String from, + @Nullable final List> let, + final List pipeline, + final String as) { + this.from = from; + this.let = let; + this.pipeline = pipeline; + this.as = as; + } + + @Override + public BsonDocument toBsonDocument(final Class tDocumentClass, final CodecRegistry codecRegistry) { + BsonDocumentWriter writer = new BsonDocumentWriter(new BsonDocument()); + + writer.writeStartDocument(); + + writer.writeStartDocument("$lookup"); + + if (from != null) { + writer.writeString("from", from); + } + + if (let != null) { + writer.writeStartDocument("let"); + + for (Variable variable : let) { + writer.writeName(variable.getName()); + BuildersHelper.encodeValue(writer, variable.getValue(), codecRegistry); + } + + writer.writeEndDocument(); + } + + writer.writeName("pipeline"); + writer.writeStartArray(); + for (Bson stage : pipeline) { + BuildersHelper.encodeValue(writer, stage, codecRegistry); + } + writer.writeEndArray(); + + writer.writeString("as", as); + + writer.writeEndDocument(); + + return writer.getDocument(); + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + LookupStage that = (LookupStage) o; + + if (!Objects.equals(from, that.from)) { + return false; + } + if (!Objects.equals(let, that.let)) { + return false; + } + if (!Objects.equals(pipeline, that.pipeline)) { + return false; + } + return Objects.equals(as, that.as); + } + + @Override + public int hashCode() { + int result = from != null ? from.hashCode() : 0; + result = 31 * result + (let != null ? let.hashCode() : 0); + result = 31 * result + (pipeline != null ? pipeline.hashCode() : 0); + result = 31 * result + (as != null ? as.hashCode() : 0); + return result; + } + + @Override + public String toString() { + return "Stage{" + + "name='$lookup'" + + ", from='" + from + '\'' + + ", let=" + let + + ", pipeline=" + pipeline + + ", as='" + as + '\'' + + '}'; + } + } + + private static final class GraphLookupStage implements Bson { + private final String from; + private final TExpression startWith; + private final String connectFromField; + private final String connectToField; + private final String as; + private final GraphLookupOptions options; + + private GraphLookupStage(final String from, final TExpression startWith, final String connectFromField, final String connectToField, + final String as, final GraphLookupOptions options) { + this.from = from; + this.startWith = startWith; + this.connectFromField = connectFromField; + this.connectToField = connectToField; + this.as = as; + this.options = options; + } + + @Override + public BsonDocument toBsonDocument(final Class tDocumentClass, final CodecRegistry codecRegistry) { + BsonDocumentWriter writer = new BsonDocumentWriter(new BsonDocument()); + + writer.writeStartDocument(); + + writer.writeStartDocument("$graphLookup"); + + writer.writeString("from", from); + writer.writeName("startWith"); + BuildersHelper.encodeValue(writer, startWith, codecRegistry); + + writer.writeString("connectFromField", connectFromField); + writer.writeString("connectToField", connectToField); + writer.writeString("as", as); + Integer maxDepth = options.getMaxDepth(); + if (maxDepth != null) { + writer.writeInt32("maxDepth", maxDepth); + } + String depthField = options.getDepthField(); + if (depthField != null) { + writer.writeString("depthField", depthField); + } + Bson restrictSearchWithMatch = options.getRestrictSearchWithMatch(); + if (restrictSearchWithMatch != null) { + writer.writeName("restrictSearchWithMatch"); + BuildersHelper.encodeValue(writer, restrictSearchWithMatch, codecRegistry); + } + + writer.writeEndDocument(); + + return writer.getDocument(); + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + GraphLookupStage that = (GraphLookupStage) o; + + if (!Objects.equals(from, that.from)) { + return false; + } + if (!Objects.equals(startWith, that.startWith)) { + return false; + } + if (!Objects.equals(connectFromField, that.connectFromField)) { + return false; + } + if (!Objects.equals(connectToField, that.connectToField)) { + return false; + } + if (!Objects.equals(as, that.as)) { + return false; + } + return Objects.equals(options, that.options); + } + + @Override + public int hashCode() { + int result = from != null ? from.hashCode() : 0; + result = 31 * result + (startWith != null ? startWith.hashCode() : 0); + result = 31 * result + (connectFromField != null ? connectFromField.hashCode() : 0); + result = 31 * result + (connectToField != null ? connectToField.hashCode() : 0); + result = 31 * result + (as != null ? as.hashCode() : 0); + result = 31 * result + (options != null ? options.hashCode() : 0); + return result; + } + + @Override + public String toString() { + return "Stage{" + + "name='$graphLookup'" + + ", as='" + as + '\'' + + ", connectFromField='" + connectFromField + '\'' + + ", connectToField='" + connectToField + '\'' + + ", from='" + from + '\'' + + ", options=" + options + + ", startWith=" + startWith + + '}'; + } + } + + private static class GroupStage implements Bson { + private final TExpression id; + private final List fieldAccumulators; + + GroupStage(final TExpression id, final List fieldAccumulators) { + this.id = id; + this.fieldAccumulators = fieldAccumulators; + } + + @Override + public BsonDocument toBsonDocument(final Class tDocumentClass, final CodecRegistry codecRegistry) { + BsonDocumentWriter writer = new BsonDocumentWriter(new BsonDocument()); + + writer.writeStartDocument(); + + writer.writeStartDocument("$group"); + + writer.writeName("_id"); + BuildersHelper.encodeValue(writer, id, codecRegistry); + + for (BsonField fieldAccumulator : fieldAccumulators) { + writer.writeName(fieldAccumulator.getName()); + BuildersHelper.encodeValue(writer, fieldAccumulator.getValue(), codecRegistry); + } + + writer.writeEndDocument(); + writer.writeEndDocument(); + + return writer.getDocument(); + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + GroupStage that = (GroupStage) o; + + if (!Objects.equals(id, that.id)) { + return false; + } + return Objects.equals(fieldAccumulators, that.fieldAccumulators); + } + + @Override + public int hashCode() { + int result = id != null ? id.hashCode() : 0; + result = 31 * result + (fieldAccumulators != null ? fieldAccumulators.hashCode() : 0); + return result; + } + + @Override + public String toString() { + return "Stage{" + + "name='$group'" + + ", id=" + id + + ", fieldAccumulators=" + fieldAccumulators + + '}'; + } + } + + private static class SortByCountStage implements Bson { + private final TExpression filter; + + SortByCountStage(final TExpression filter) { + this.filter = filter; + } + + @Override + public BsonDocument toBsonDocument(final Class tDocumentClass, final CodecRegistry codecRegistry) { + BsonDocumentWriter writer = new BsonDocumentWriter(new BsonDocument()); + + writer.writeStartDocument(); + + writer.writeName("$sortByCount"); + BuildersHelper.encodeValue(writer, filter, codecRegistry); + + writer.writeEndDocument(); + + return writer.getDocument(); + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + SortByCountStage that = (SortByCountStage) o; + + return Objects.equals(filter, that.filter); + } + + @Override + public int hashCode() { + return filter != null ? filter.hashCode() : 0; + } + + @Override + public String toString() { + return "Stage{" + + "name='$sortByCount'" + + ", id=" + filter + + '}'; + } + } + + private static class FacetStage implements Bson { + + private final List facets; + FacetStage(final List facets) { + this.facets = facets; + } + + @Override + public BsonDocument toBsonDocument(final Class tDocumentClass, final CodecRegistry codecRegistry) { + BsonDocumentWriter writer = new BsonDocumentWriter(new BsonDocument()); + writer.writeStartDocument(); + writer.writeName("$facet"); + writer.writeStartDocument(); + for (Facet facet : facets) { + writer.writeName(facet.getName()); + writer.writeStartArray(); + for (Bson bson : facet.getPipeline()) { + BuildersHelper.encodeValue(writer, bson, codecRegistry); + } + writer.writeEndArray(); + } + writer.writeEndDocument(); + writer.writeEndDocument(); + + return writer.getDocument(); + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + FacetStage that = (FacetStage) o; + + return Objects.equals(facets, that.facets); + } + + @Override + public int hashCode() { + return facets != null ? facets.hashCode() : 0; + } + + @Override + public String toString() { + return "Stage{" + + "name='$facet', " + + "facets=" + facets + '}'; + } + + } + + private static class FieldsStage implements Bson { + private final List> fields; + private final String stageName; //one of $addFields or $set + + FieldsStage(final String stageName, final List> fields) { + this.stageName = stageName; + this.fields = notNull("fields", fields); + } + + @Override + public BsonDocument toBsonDocument(final Class tDocumentClass, final CodecRegistry codecRegistry) { + BsonDocumentWriter writer = new BsonDocumentWriter(new BsonDocument()); + writer.writeStartDocument(); + writer.writeName(stageName); + writer.writeStartDocument(); + for (Field field : fields) { + writer.writeName(field.getName()); + BuildersHelper.encodeValue(writer, field.getValue(), codecRegistry); + } + writer.writeEndDocument(); + writer.writeEndDocument(); + + return writer.getDocument(); + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + FieldsStage that = (FieldsStage) o; + + if (!fields.equals(that.fields)) { + return false; + } + return stageName.equals(that.stageName); + } + + @Override + public int hashCode() { + int result = fields.hashCode(); + result = 31 * result + stageName.hashCode(); + return result; + } + + @Override + public String toString() { + return "Stage{" + + "name='" + stageName + "', " + + "fields=" + fields + + '}'; + } + } + + private static class ReplaceStage implements Bson { + private final TExpression value; + private final boolean replaceWith; + + ReplaceStage(final TExpression value) { + this(value, false); + } + + ReplaceStage(final TExpression value, final boolean replaceWith) { + this.value = value; + this.replaceWith = replaceWith; + } + + @Override + public BsonDocument toBsonDocument(final Class tDocumentClass, final CodecRegistry codecRegistry) { + BsonDocumentWriter writer = new BsonDocumentWriter(new BsonDocument()); + writer.writeStartDocument(); + + if (replaceWith) { + writer.writeName("$replaceWith"); + BuildersHelper.encodeValue(writer, value, codecRegistry); + } else { + writer.writeName("$replaceRoot"); + writer.writeStartDocument(); + writer.writeName("newRoot"); + BuildersHelper.encodeValue(writer, value, codecRegistry); + writer.writeEndDocument(); + } + writer.writeEndDocument(); + + return writer.getDocument(); + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + ReplaceStage that = (ReplaceStage) o; + + return Objects.equals(value, that.value); + } + + @Override + public int hashCode() { + return value != null ? value.hashCode() : 0; + } + + @Override + public String toString() { + return "Stage{" + + "name='$replaceRoot', " + + "value=" + value + + '}'; + } + } + + private static class MergeStage implements Bson { + private final BsonValue intoValue; + private final MergeOptions options; + + MergeStage(final BsonValue intoValue, final MergeOptions options) { + this.intoValue = intoValue; + this.options = options; + } + + @Override + public BsonDocument toBsonDocument(final Class documentClass, final CodecRegistry codecRegistry) { + BsonDocumentWriter writer = new BsonDocumentWriter(new BsonDocument()); + writer.writeStartDocument(); + writer.writeStartDocument("$merge"); + writer.writeName("into"); + if (intoValue.isString()) { + writer.writeString(intoValue.asString().getValue()); + } else { + writer.writeStartDocument(); + writer.writeString("db", intoValue.asDocument().getString("db").getValue()); + writer.writeString("coll", intoValue.asDocument().getString("coll").getValue()); + writer.writeEndDocument(); + } + if (options.getUniqueIdentifier() != null) { + if (options.getUniqueIdentifier().size() == 1) { + writer.writeString("on", options.getUniqueIdentifier().get(0)); + } else { + writer.writeStartArray("on"); + for (String cur : options.getUniqueIdentifier()) { + writer.writeString(cur); + } + writer.writeEndArray(); + } + } + if (options.getVariables() != null) { + writer.writeStartDocument("let"); + + for (Variable variable : options.getVariables()) { + writer.writeName(variable.getName()); + BuildersHelper.encodeValue(writer, variable.getValue(), codecRegistry); + } + + writer.writeEndDocument(); + } + + if (options.getWhenMatched() != null) { + writer.writeName("whenMatched"); + switch (options.getWhenMatched()) { + case REPLACE: + writer.writeString("replace"); + break; + case KEEP_EXISTING: + writer.writeString("keepExisting"); + break; + case MERGE: + writer.writeString("merge"); + break; + case PIPELINE: + writer.writeStartArray(); + for (Bson curStage : options.getWhenMatchedPipeline()) { + BuildersHelper.encodeValue(writer, curStage, codecRegistry); + } + writer.writeEndArray(); + break; + case FAIL: + writer.writeString("fail"); + break; + default: + throw new UnsupportedOperationException("Unexpected value: " + options.getWhenMatched()); + } + } + if (options.getWhenNotMatched() != null) { + writer.writeName("whenNotMatched"); + switch (options.getWhenNotMatched()) { + case INSERT: + writer.writeString("insert"); + break; + case DISCARD: + writer.writeString("discard"); + break; + case FAIL: + writer.writeString("fail"); + break; + default: + throw new UnsupportedOperationException("Unexpected value: " + options.getWhenNotMatched()); + } + } + writer.writeEndDocument(); + writer.writeEndDocument(); + return writer.getDocument(); + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + MergeStage that = (MergeStage) o; + + if (!intoValue.equals(that.intoValue)) { + return false; + } + if (!options.equals(that.options)) { + return false; + } + + return true; + } + + @Override + public int hashCode() { + int result = intoValue.hashCode(); + result = 31 * result + options.hashCode(); + return result; + } + + @Override + public String toString() { + return "Stage{" + + "name='$merge', " + + ", into=" + intoValue + + ", options=" + options + + '}'; + } + } + + private static final class UnionWithStage implements Bson { + private final String collection; + private final List pipeline; + + private UnionWithStage(final String collection, final List pipeline) { + this.collection = collection; + this.pipeline = pipeline; + } + + @Override + public BsonDocument toBsonDocument(final Class tDocumentClass, final CodecRegistry codecRegistry) { + BsonDocumentWriter writer = new BsonDocumentWriter(new BsonDocument()); + + writer.writeStartDocument(); + + writer.writeStartDocument("$unionWith"); + writer.writeString("coll", collection); + + writer.writeName("pipeline"); + writer.writeStartArray(); + for (Bson stage : pipeline) { + BuildersHelper.encodeValue(writer, stage, codecRegistry); + } + writer.writeEndArray(); + + writer.writeEndDocument(); + + return writer.getDocument(); + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + UnionWithStage that = (UnionWithStage) o; + + if (!collection.equals(that.collection)) { + return false; + } + return !Objects.equals(pipeline, that.pipeline); + } + + @Override + public int hashCode() { + int result = collection.hashCode(); + result = 31 * result + (pipeline != null ? pipeline.hashCode() : 0); + return result; + } + + @Override + public String toString() { + return "Stage{" + + "name='$unionWith'" + + ", collection='" + collection + '\'' + + ", pipeline=" + pipeline + + '}'; + } + } + + private static final class SetWindowFieldsStage implements Bson { + @Nullable + private final TExpression partitionBy; + @Nullable + private final Bson sortBy; + private final Iterable output; + + SetWindowFieldsStage( + @Nullable final TExpression partitionBy, + @Nullable final Bson sortBy, + final Iterable output) { + this.partitionBy = partitionBy; + this.sortBy = sortBy; + this.output = output; + } + + @Override + public BsonDocument toBsonDocument(final Class tDocumentClass, final CodecRegistry codecRegistry) { + BsonDocumentWriter writer = new BsonDocumentWriter(new BsonDocument()); + writer.writeStartDocument(); + writer.writeStartDocument("$setWindowFields"); + if (partitionBy != null) { + writer.writeName("partitionBy"); + BuildersHelper.encodeValue(writer, partitionBy, codecRegistry); + } + if (sortBy != null) { + writer.writeName("sortBy"); + BuildersHelper.encodeValue(writer, sortBy, codecRegistry); + } + writer.writeStartDocument("output"); + for (WindowOutputField windowOutputField : output) { + BsonField field = windowOutputField.toBsonField(); + writer.writeName(field.getName()); + BuildersHelper.encodeValue(writer, field.getValue(), codecRegistry); + } + writer.writeEndDocument(); // end output + writer.writeEndDocument(); // end $setWindowFields + writer.writeEndDocument(); + return writer.getDocument(); + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + SetWindowFieldsStage that = (SetWindowFieldsStage) o; + return Objects.equals(partitionBy, that.partitionBy) && Objects.equals(sortBy, that.sortBy) && output.equals(that.output); + } + + @Override + public int hashCode() { + return Objects.hash(partitionBy, sortBy, output); + } + + @Override + public String toString() { + return "Stage{" + + "name='$setWindowFields'" + + ", partitionBy=" + partitionBy + + ", sortBy=" + sortBy + + ", output=" + output + + '}'; + } + } + + private static final class SearchStage implements Bson { + private final String name; + private final Bson operatorOrCollector; + @Nullable + private final SearchOptions options; + + SearchStage(final String name, final Bson operatorOrCollector, @Nullable final SearchOptions options) { + this.name = name; + this.operatorOrCollector = operatorOrCollector; + this.options = options; + } + + @Override + public BsonDocument toBsonDocument(final Class documentClass, final CodecRegistry codecRegistry) { + BsonDocumentWriter writer = new BsonDocumentWriter(new BsonDocument()); + writer.writeStartDocument(); + writer.writeStartDocument(name); + BsonDocument operatorOrCollectorDoc = operatorOrCollector.toBsonDocument(documentClass, codecRegistry); + assertTrue(operatorOrCollectorDoc.size() == 1); + Map.Entry operatorOrCollectorEntry = operatorOrCollectorDoc.entrySet().iterator().next(); + writer.writeName(operatorOrCollectorEntry.getKey()); + BuildersHelper.encodeValue(writer, operatorOrCollectorEntry.getValue(), codecRegistry); + if (options != null) { + options.toBsonDocument(documentClass, codecRegistry).forEach((optionName, optionValue) -> { + writer.writeName(optionName); + BuildersHelper.encodeValue(writer, optionValue, codecRegistry); + }); + } + // end `name` + writer.writeEndDocument(); + writer.writeEndDocument(); + return writer.getDocument(); + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + SearchStage that = (SearchStage) o; + return name.equals(that.name) + && operatorOrCollector.equals(that.operatorOrCollector) + && Objects.equals(options, that.options); + } + + @Override + public int hashCode() { + return Objects.hash(name, operatorOrCollector, options); + } + + @Override + public String toString() { + return "Stage{" + + "name='" + name + "'" + + ", operatorOrCollector=" + operatorOrCollector + + ", options=" + options + + '}'; + } + } + + private static class VectorSearchBson implements Bson { + private final FieldSearchPath path; + private final Object queryVector; + private final String index; + private final long limit; + private final VectorSearchOptions options; + + VectorSearchBson(final FieldSearchPath path, final Object queryVector, + final String index, final long limit, + final VectorSearchOptions options) { + this.path = path; + this.queryVector = queryVector; + this.index = index; + this.limit = limit; + this.options = options; + } + + @Override + public BsonDocument toBsonDocument(final Class documentClass, final CodecRegistry codecRegistry) { + Document specificationDoc = new Document("path", path.toValue()) + .append("queryVector", queryVector) + .append("index", index) + .append("limit", limit); + specificationDoc.putAll(options.toBsonDocument(documentClass, codecRegistry)); + return new Document("$vectorSearch", specificationDoc).toBsonDocument(documentClass, codecRegistry); + } + + @Override + public String toString() { + return "Stage{name=$vectorSearch" + + ", path=" + path + + ", queryVector=" + queryVector + + ", index=" + index + + ", limit=" + limit + + ", options=" + options + + '}'; + } + } + + private Aggregates() { + } +} diff --git a/driver-core/src/main/com/mongodb/client/model/ApproximateQuantileMethod.java b/driver-core/src/main/com/mongodb/client/model/ApproximateQuantileMethod.java new file mode 100644 index 00000000000..9d525b151b0 --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/ApproximateQuantileMethod.java @@ -0,0 +1,29 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.client.model; + +import com.mongodb.annotations.Sealed; + + +/** + * @see QuantileMethod#approximate() + * @since 4.10 + * @mongodb.server.release 7.0 + */ +@Sealed +public interface ApproximateQuantileMethod extends QuantileMethod { +} + diff --git a/driver-core/src/main/com/mongodb/client/model/BsonField.java b/driver-core/src/main/com/mongodb/client/model/BsonField.java new file mode 100644 index 00000000000..02a17ef4605 --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/BsonField.java @@ -0,0 +1,91 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model; + +import org.bson.conversions.Bson; + +import static com.mongodb.assertions.Assertions.notNull; + +/** + * A representation of a BSON document field whose value is another BSON document. + * + * @since 3.1 + * @see Aggregates#group(Object, BsonField...) + */ +public final class BsonField { + private final String name; + private final Bson value; + + /** + * Construct an instance + * + * @param name the field name + * @param value the field value + */ + public BsonField(final String name, final Bson value) { + this.name = notNull("name", name); + this.value = notNull("value", value); + } + + /** + * Gets the field name + * @return the field name + */ + public String getName() { + return name; + } + + /** + * Gets the field value + * @return the field value + */ + public Bson getValue() { + return value; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + BsonField bsonField = (BsonField) o; + + if (!name.equals(bsonField.name)) { + return false; + } + return value.equals(bsonField.value); + } + + @Override + public int hashCode() { + int result = name.hashCode(); + result = 31 * result + value.hashCode(); + return result; + } + + @Override + public String toString() { + return "BsonField{" + + "name='" + name + '\'' + + ", value=" + value + + '}'; + } +} diff --git a/driver-core/src/main/com/mongodb/client/model/BucketAutoOptions.java b/driver-core/src/main/com/mongodb/client/model/BucketAutoOptions.java new file mode 100644 index 00000000000..b31c4913395 --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/BucketAutoOptions.java @@ -0,0 +1,120 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model; + +import com.mongodb.lang.Nullable; + +import java.util.ArrayList; +import java.util.List; +import java.util.Objects; + +import static java.util.Arrays.asList; + +/** + * The options for a $bucketAuto aggregation pipeline stage + * + * @mongodb.driver.manual reference/operator/aggregation/bucketAuto/ $bucketAuto + * @mongodb.server.release 3.4 + * @since 3.4 + */ +public class BucketAutoOptions { + private List output; + private BucketGranularity granularity; + + /** + * @return the granularity of the bucket definitions + */ + @Nullable + public BucketGranularity getGranularity() { + return granularity; + } + + /** + * @return the output document definition + */ + @Nullable + public List getOutput() { + return output == null ? null : new ArrayList<>(output); + } + + /** + * Specifies the granularity of the bucket definitions. + * + * @param granularity the granularity of the bucket definitions + * @return this + * @see Preferred numbers + * @see BucketGranularity + */ + public BucketAutoOptions granularity(@Nullable final BucketGranularity granularity) { + this.granularity = granularity; + return this; + } + + /** + * The definition of the output document in each bucket + * + * @param output the output document definition + * @return this + */ + public BucketAutoOptions output(final BsonField... output) { + this.output = asList(output); + return this; + } + + /** + * The definition of the output document in each bucket + * + * @param output the output document definition + * @return this + */ + public BucketAutoOptions output(@Nullable final List output) { + this.output = output; + return this; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + BucketAutoOptions that = (BucketAutoOptions) o; + + if (!Objects.equals(output, that.output)) { + return false; + } + return granularity == that.granularity; + } + + @Override + public int hashCode() { + int result = output != null ? output.hashCode() : 0; + result = 31 * result + (granularity != null ? granularity.hashCode() : 0); + return result; + } + + @Override + public String toString() { + return "BucketAutoOptions{" + + "output=" + output + + ", granularity=" + granularity + + '}'; + } +} diff --git a/driver-core/src/main/com/mongodb/client/model/BucketGranularity.java b/driver-core/src/main/com/mongodb/client/model/BucketGranularity.java new file mode 100644 index 00000000000..489b4324807 --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/BucketGranularity.java @@ -0,0 +1,114 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model; + +/** + * Granularity values for automatic bucketing. + * + * @mongodb.driver.manual reference/operator/aggregation/bucketAuto/ $bucketAuto + * @mongodb.server.release 3.4 + * @see Preferred numbers + * @since 3.4 + */ +public enum BucketGranularity { + /** + * R5 + */ + R5, + /** + * R10 + */ + R10, + /** + * R20 + */ + R20, + /** + * R40 + */ + R40, + /** + * R80 + */ + R80, + /** + * SERIES_125 + */ + SERIES_125("1-2-5"), + /** + * E6 + */ + E6, + /** + * E12 + */ + E12, + /** + * E24 + */ + E24, + /** + * E48 + */ + E48, + /** + * E96 + */ + E96, + /** + * E192 + */ + E192, + /** + * POWERSOF2 + */ + POWERSOF2; + + private final String value; + + BucketGranularity() { + value = name(); + } + + BucketGranularity(final String name) { + value = name; + } + + /** + * Tries find the enum instance for the given value + * + * @param value the value to search for + * @return the enum instance + */ + public static BucketGranularity fromString(final String value) { + for (BucketGranularity granularity : BucketGranularity.values()) { + if (granularity.getValue().equals(value)) { + return granularity; + } + } + throw new IllegalArgumentException("No Granularity exists for the value " + value); + } + + /** + * Returns the display as defined in the preferred number article + * + * @return the display name + */ + public String getValue() { + return value; + } +} diff --git a/driver-core/src/main/com/mongodb/client/model/BucketOptions.java b/driver-core/src/main/com/mongodb/client/model/BucketOptions.java new file mode 100644 index 00000000000..77bfbba4350 --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/BucketOptions.java @@ -0,0 +1,118 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model; + +import com.mongodb.lang.Nullable; + +import java.util.ArrayList; +import java.util.List; +import java.util.Objects; + +import static java.util.Arrays.asList; + +/** + * The options for a $bucket aggregation pipeline stage + * + * @mongodb.driver.manual reference/operator/aggregation/bucketAuto/ $bucket + * @mongodb.server.release 3.4 + * @since 3.4 + */ +public class BucketOptions { + private Object defaultBucket; + private List output; + + /** + * The name of the default bucket for values outside the defined buckets + * + * @param name the bucket value + * @return this + */ + public BucketOptions defaultBucket(@Nullable final Object name) { + defaultBucket = name; + return this; + } + + /** + * @return the default bucket value + */ + @Nullable + public Object getDefaultBucket() { + return defaultBucket; + } + + /** + * @return the output document definition + */ + @Nullable + public List getOutput() { + return output == null ? null : new ArrayList<>(output); + } + + /** + * The definition of the output document in each bucket + * + * @param output the output document definition + * @return this + */ + public BucketOptions output(final BsonField... output) { + this.output = asList(output); + return this; + } + + /** + * The definition of the output document in each bucket + * + * @param output the output document definition + * @return this + */ + public BucketOptions output(final List output) { + this.output = output; + return this; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + BucketOptions that = (BucketOptions) o; + + if (!Objects.equals(defaultBucket, that.defaultBucket)) { + return false; + } + return Objects.equals(output, that.output); + } + + @Override + public int hashCode() { + int result = defaultBucket != null ? defaultBucket.hashCode() : 0; + result = 31 * result + (output != null ? output.hashCode() : 0); + return result; + } + + @Override + public String toString() { + return "BucketOptions{" + + "defaultBucket=" + defaultBucket + + ", output=" + output + + '}'; + } +} diff --git a/driver-core/src/main/com/mongodb/client/model/BuildersHelper.java b/driver-core/src/main/com/mongodb/client/model/BuildersHelper.java new file mode 100644 index 00000000000..e67cb40a622 --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/BuildersHelper.java @@ -0,0 +1,43 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model; + +import org.bson.BsonDocument; +import org.bson.BsonDocumentWriter; +import org.bson.codecs.Encoder; +import org.bson.codecs.EncoderContext; +import org.bson.codecs.configuration.CodecRegistry; +import org.bson.conversions.Bson; + +final class BuildersHelper { + + @SuppressWarnings("unchecked") + static void encodeValue(final BsonDocumentWriter writer, final TItem value, final CodecRegistry codecRegistry) { + if (value == null) { + writer.writeNull(); + } else if (value instanceof Bson) { + codecRegistry.get(BsonDocument.class).encode(writer, + ((Bson) value).toBsonDocument(BsonDocument.class, codecRegistry), + EncoderContext.builder().build()); + } else { + ((Encoder) codecRegistry.get(value.getClass())).encode(writer, value, EncoderContext.builder().build()); + } + } + + private BuildersHelper() { + } +} diff --git a/driver-core/src/main/com/mongodb/client/model/BulkWriteOptions.java b/driver-core/src/main/com/mongodb/client/model/BulkWriteOptions.java new file mode 100644 index 00000000000..12b6c990d47 --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/BulkWriteOptions.java @@ -0,0 +1,159 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model; + +import com.mongodb.lang.Nullable; +import org.bson.BsonString; +import org.bson.BsonValue; +import org.bson.conversions.Bson; + +/** + * The options to apply to a bulk write. + * + * @since 3.0 + */ +public final class BulkWriteOptions { + private boolean ordered = true; + private Boolean bypassDocumentValidation; + private BsonValue comment; + private Bson variables; + + /** + * If true, then when a write fails, return without performing the remaining + * writes. If false, then when a write fails, continue with the remaining writes, if any. + * Defaults to true. + * + * @return true if the writes are ordered + */ + public boolean isOrdered() { + return ordered; + } + + /** + * If true, then when a write fails, return without performing the remaining + * writes. If false, then when a write fails, continue with the remaining writes, if any. + * Defaults to true. + * + * @param ordered true if the writes should be ordered + * @return this + */ + public BulkWriteOptions ordered(final boolean ordered) { + this.ordered = ordered; + return this; + } + + /** + * Gets the bypass document level validation flag + * + * @return the bypass document level validation flag + * @since 3.2 + * @mongodb.server.release 3.2 + */ + @Nullable + public Boolean getBypassDocumentValidation() { + return bypassDocumentValidation; + } + + /** + * Sets the bypass document level validation flag. + * + * @param bypassDocumentValidation If true, allows the write to opt-out of document level validation. + * @return this + * @since 3.2 + * @mongodb.server.release 3.2 + */ + public BulkWriteOptions bypassDocumentValidation(@Nullable final Boolean bypassDocumentValidation) { + this.bypassDocumentValidation = bypassDocumentValidation; + return this; + } + + /** + * Returns the comment to send with the query. The default is not to include a comment with the query. + * + * @return the comment + * @since 4.6 + * @mongodb.server.release 4.4 + */ + @Nullable + public BsonValue getComment() { + return comment; + } + + /** + * Sets the comment for this operation. A null value means no comment is set. + * + * @param comment the comment + * @return this + * @since 4.6 + * @mongodb.server.release 4.4 + */ + public BulkWriteOptions comment(@Nullable final String comment) { + this.comment = comment != null ? new BsonString(comment) : null; + return this; + } + + /** + * Sets the comment for this operation. A null value means no comment is set. + * + * @param comment the comment + * @return this + * @since 4.6 + * @mongodb.server.release 4.4 + */ + public BulkWriteOptions comment(@Nullable final BsonValue comment) { + this.comment = comment; + return this; + } + + /** + * Add top-level variables to the operation + * + * @return the top level variables if set or null. + * @mongodb.server.release 5.0 + * @since 4.6 + */ + @Nullable + public Bson getLet() { + return variables; + } + + /** + * Add top-level variables for the operation + * + *

Allows for improved command readability by separating the variables from the query text. + * The value of let will be passed to all update and delete, but not insert, commands. + * + * @param variables for the operation or null + * @return this + * @mongodb.server.release 5.0 + * @since 4.6 + */ + public BulkWriteOptions let(@Nullable final Bson variables) { + this.variables = variables; + return this; + } + + @Override + public String toString() { + return "BulkWriteOptions{" + + "ordered=" + ordered + + ", bypassDocumentValidation=" + bypassDocumentValidation + + ", comment=" + comment + + ", let=" + variables + + '}'; + } +} diff --git a/driver-core/src/main/com/mongodb/client/model/ChangeStreamPreAndPostImagesOptions.java b/driver-core/src/main/com/mongodb/client/model/ChangeStreamPreAndPostImagesOptions.java new file mode 100644 index 00000000000..99c5887c809 --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/ChangeStreamPreAndPostImagesOptions.java @@ -0,0 +1,53 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model; + +/** + * Options for change stream pre- and post- images. + * + * @see CreateCollectionOptions + * @since 4.7 + * @mongodb.driver.manual reference/command/create/ Create Collection + */ +public class ChangeStreamPreAndPostImagesOptions { + private final boolean enabled; + + /** + * Construct an instance + * + * @param enabled whether change stream pre- and post- images are enabled for the collection + */ + public ChangeStreamPreAndPostImagesOptions(final boolean enabled) { + this.enabled = enabled; + } + + /** + * Gets whether change stream pre- and post- images are enabled for the collection. + * + * @return whether change stream pre- and post- images are enabled for the collection + */ + public boolean isEnabled() { + return enabled; + } + + @Override + public String toString() { + return "ChangeStreamPreAndPostImagesOptions{" + + "enabled=" + enabled + + '}'; + } +} diff --git a/driver-core/src/main/com/mongodb/client/model/ClusteredIndexOptions.java b/driver-core/src/main/com/mongodb/client/model/ClusteredIndexOptions.java new file mode 100644 index 00000000000..3ff4eacaaf8 --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/ClusteredIndexOptions.java @@ -0,0 +1,92 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model; + +import com.mongodb.lang.Nullable; +import org.bson.conversions.Bson; + +import static com.mongodb.assertions.Assertions.notNull; + +/** + * Options for cluster index on a collection. + * + * @see CreateCollectionOptions + * @since 4.7 + * @mongodb.server.release 5.3 + */ +public class ClusteredIndexOptions { + private final Bson key; + private final boolean unique; + private String name; + + /** + * Construct an instance with the required options. + * + * @param key the index key, which currently must be {@code {_id: 1}} + * @param unique whether the index entries must be unique, which currently must be true + */ + public ClusteredIndexOptions(final Bson key, final boolean unique) { + this.key = notNull("key", key); + this.unique = unique; + } + + /** + * Gets the index key. + * + * @return the index key + */ + public Bson getKey() { + return key; + } + + /** + * Gets whether the index entries must be unique + * @return whether the index entries must be unique + */ + public boolean isUnique() { + return unique; + } + + /** + * Gets the index name + * + * @return the index name + */ + @Nullable + public String getName() { + return name; + } + + /** + * Sets the index name + * @param name the index name + * @return this + */ + public ClusteredIndexOptions name(final String name) { + this.name = name; + return this; + } + + @Override + public String toString() { + return "ClusteredIndexOptions{" + + "key=" + key + + ", unique=" + unique + + ", name='" + name + '\'' + + '}'; + } +} diff --git a/driver-core/src/main/com/mongodb/client/model/Collation.java b/driver-core/src/main/com/mongodb/client/model/Collation.java new file mode 100644 index 00000000000..c3c388e88a6 --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/Collation.java @@ -0,0 +1,422 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model; + +import com.mongodb.annotations.NotThreadSafe; +import com.mongodb.lang.Nullable; +import org.bson.BsonBoolean; +import org.bson.BsonDocument; +import org.bson.BsonInt32; +import org.bson.BsonString; + +/** + * The options regarding collation support in MongoDB 3.4+ + * + * @mongodb.driver.manual reference/method/db.createCollection/ Create Collection + * @mongodb.driver.manual reference/command/createIndexes Index options + * @since 3.4 + * @mongodb.server.release 3.4 + */ +public final class Collation { + private final String locale; + private final Boolean caseLevel; + private final CollationCaseFirst caseFirst; + private final CollationStrength strength; + private final Boolean numericOrdering; + private final CollationAlternate alternate; + private final CollationMaxVariable maxVariable; + private final Boolean normalization; + private final Boolean backwards; + + /** + * Convenience method to create a Builder. + * + * @return a builder + */ + public static Builder builder() { + return new Builder(); + } + + /** + * Convenience method to create a from an existing {@code Collation}. + * + * @param options create a builder from existing options + * @return a builder + */ + public static Builder builder(final Collation options) { + return new Builder(options); + } + + /** + * A Collation builder. + */ + @NotThreadSafe + public static final class Builder { + private String locale; + private Boolean caseLevel; + private CollationCaseFirst caseFirst; + private CollationStrength strength; + private Boolean numericOrdering; + private CollationAlternate alternate; + private CollationMaxVariable maxVariable; + private Boolean normalization; + private Boolean backwards; + + private Builder() { + } + + private Builder(final Collation options) { + this.locale = options.getLocale(); + this.caseLevel = options.getCaseLevel(); + this.caseFirst = options.getCaseFirst(); + this.strength = options.getStrength(); + this.numericOrdering = options.getNumericOrdering(); + this.alternate = options.getAlternate(); + this.maxVariable = options.getMaxVariable(); + this.normalization = options.getNormalization(); + this.backwards = options.getBackwards(); + } + + /** + * Sets the locale + * + * @param locale the locale + * @see ICU User Guide - Locale + * @return this + */ + public Builder locale(@Nullable final String locale) { + this.locale = locale; + return this; + } + + /** + * Sets the case level value + * + *

Turns on case sensitivity

+ * @param caseLevel the case level value + * @return this + */ + public Builder caseLevel(@Nullable final Boolean caseLevel) { + this.caseLevel = caseLevel; + return this; + } + + /** + * Sets the collation case first value + * + *

Determines if Uppercase or lowercase values should come first

+ * @param caseFirst the collation case first value + * @return this + */ + public Builder collationCaseFirst(@Nullable final CollationCaseFirst caseFirst) { + this.caseFirst = caseFirst; + return this; + } + + /** + * Sets the collation strength + * + * @param strength the strength + * @return this + */ + public Builder collationStrength(@Nullable final CollationStrength strength) { + this.strength = strength; + return this; + } + + /** + * Sets the numeric ordering + * + * @param numericOrdering if true will order numbers based on numerical order and not collation order + * @return this + */ + public Builder numericOrdering(@Nullable final Boolean numericOrdering) { + this.numericOrdering = numericOrdering; + return this; + } + + /** + * Sets the alternate + * + *

Controls whether spaces and punctuation are considered base characters

+ * + * @param alternate the alternate + * @return this + */ + public Builder collationAlternate(@Nullable final CollationAlternate alternate) { + this.alternate = alternate; + return this; + } + + /** + * Sets the maxVariable + * + * @param maxVariable the maxVariable + * @return this + */ + public Builder collationMaxVariable(@Nullable final CollationMaxVariable maxVariable) { + this.maxVariable = maxVariable; + return this; + } + + /** + * Sets the normalization value + * + *

If true, normalizes text into Unicode NFD.

+ * @param normalization the normalization value + * @return this + */ + public Builder normalization(@Nullable final Boolean normalization) { + this.normalization = normalization; + return this; + } + + /** + * Sets the backwards value + * + *

Causes secondary differences to be considered in reverse order, as it is done in the French language

+ * + * @param backwards the backwards value + * @return this + */ + public Builder backwards(@Nullable final Boolean backwards) { + this.backwards = backwards; + return this; + } + + /** + * Creates a new Collation object with the settings initialised on this builder. + * + * @return a new Collation object + */ + public Collation build() { + return new Collation(this); + } + } + + /** + * Returns the locale + * + * @see ICU User Guide - Locale + * @return the locale + */ + @Nullable + public String getLocale() { + return locale; + } + + /** + * Returns the case level value + * + * @return the case level value + */ + @Nullable + public Boolean getCaseLevel() { + return caseLevel; + } + + /** + * Returns the collation case first value + * + * @return the collation case first value + */ + @Nullable + public CollationCaseFirst getCaseFirst() { + return caseFirst; + } + + /** + * Returns the collation strength + * + * @return the collation strength + */ + @Nullable + public CollationStrength getStrength() { + return strength; + } + + /** + * Returns the numeric ordering, if true will order numbers based on numerical order and not collation order. + * + * @return the numeric ordering + */ + @Nullable + public Boolean getNumericOrdering() { + return numericOrdering; + } + + /** + * Returns the collation alternate + * + * @return the alternate + */ + @Nullable + public CollationAlternate getAlternate() { + return alternate; + } + + /** + * Returns the maxVariable + * + *

Controls which characters are affected by collection alternate {@link CollationAlternate#SHIFTED}.

+ * @return the maxVariable + */ + @Nullable + public CollationMaxVariable getMaxVariable() { + return maxVariable; + } + + /** + * Returns the normalization value + * + *

If true, normalizes text into Unicode NFD.

+ * @return the normalization + */ + @Nullable + public Boolean getNormalization() { + return normalization; + } + + /** + * Returns the backwards value + * + * @return the backwards value + */ + @Nullable + public Boolean getBackwards() { + return backwards; + } + + /** + * Gets this collation options as a document. + * + * @return The collation options as a BsonDocument + */ + public BsonDocument asDocument() { + BsonDocument collation = new BsonDocument(); + if (locale != null) { + collation.put("locale", new BsonString(locale)); + } + if (caseLevel != null) { + collation.put("caseLevel", new BsonBoolean(caseLevel)); + } + if (caseFirst != null) { + collation.put("caseFirst", new BsonString(caseFirst.getValue())); + } + if (strength != null) { + collation.put("strength", new BsonInt32(strength.getIntRepresentation())); + } + if (numericOrdering != null) { + collation.put("numericOrdering", new BsonBoolean(numericOrdering)); + } + if (alternate != null) { + collation.put("alternate", new BsonString(alternate.getValue())); + } + if (maxVariable != null) { + collation.put("maxVariable", new BsonString(maxVariable.getValue())); + } + if (normalization != null) { + collation.put("normalization", new BsonBoolean(normalization)); + } + if (backwards != null) { + collation.put("backwards", new BsonBoolean(backwards)); + } + return collation; + } + + @Override + public boolean equals(final Object o) { + if (this == o){ + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + Collation that = (Collation) o; + + if (locale != null ? !locale.equals(that.getLocale()) : that.getLocale() != null) { + return false; + } + if (caseLevel != null ? !caseLevel.equals(that.getCaseLevel()) : that.getCaseLevel() != null) { + return false; + } + if (getCaseFirst() != that.getCaseFirst()) { + return false; + } + if (getStrength() != that.getStrength()) { + return false; + } + if (numericOrdering != null ? !numericOrdering.equals(that.getNumericOrdering()) : that.getNumericOrdering() != null) { + return false; + } + if (getAlternate() != that.getAlternate()) { + return false; + } + if (getMaxVariable() != that.getMaxVariable()) { + return false; + } + if (normalization != null ? !normalization.equals(that.getNormalization()) : that.getNormalization() != null) { + return false; + } + if (backwards != null ? !backwards.equals(that.getBackwards()) : that.getBackwards() != null) { + return false; + } + return true; + } + + @Override + public int hashCode() { + int result = locale != null ? locale.hashCode() : 0; + result = 31 * result + (caseLevel != null ? caseLevel.hashCode() : 0); + result = 31 * result + (caseFirst != null ? caseFirst.hashCode() : 0); + result = 31 * result + (strength != null ? strength.hashCode() : 0); + result = 31 * result + (numericOrdering != null ? numericOrdering.hashCode() : 0); + result = 31 * result + (alternate != null ? alternate.hashCode() : 0); + result = 31 * result + (maxVariable != null ? maxVariable.hashCode() : 0); + result = 31 * result + (normalization != null ? normalization.hashCode() : 0); + result = 31 * result + (backwards != null ? backwards.hashCode() : 0); + return result; + } + + @Override + public String toString() { + return "Collation{" + + "locale='" + locale + "'" + + ", caseLevel=" + caseLevel + + ", caseFirst=" + caseFirst + + ", strength=" + strength + + ", numericOrdering=" + numericOrdering + + ", alternate=" + alternate + + ", maxVariable=" + maxVariable + + ", normalization=" + normalization + + ", backwards=" + backwards + + "}"; + } + + + private Collation(final Builder builder) { + this.locale = builder.locale; + this.caseLevel = builder.caseLevel; + this.caseFirst = builder.caseFirst; + this.strength = builder.strength; + this.numericOrdering = builder.numericOrdering; + this.alternate = builder.alternate; + this.maxVariable = builder.maxVariable; + this.normalization = builder.normalization; + this.backwards = builder.backwards; + } +} diff --git a/driver-core/src/main/com/mongodb/client/model/CollationAlternate.java b/driver-core/src/main/com/mongodb/client/model/CollationAlternate.java new file mode 100644 index 00000000000..e3df3aa7a3e --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/CollationAlternate.java @@ -0,0 +1,71 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model; + +import static java.lang.String.format; + +/** + * Collation support allows the specific configuration of whether or not spaces and punctuation are considered base characters. + * + * @since 3.4 + * @mongodb.server.release 3.4 + */ +public enum CollationAlternate { + /** + * Non-ignorable + * + *

Spaces and punctuation are considered base characters

+ */ + NON_IGNORABLE("non-ignorable"), + + /** + * Shifted + * + *

Spaces and punctuation are not considered base characters, and are only distinguished when the collation strength is > 3

+ * @see CollationMaxVariable + */ + SHIFTED("shifted"); + + private final String value; + CollationAlternate(final String caseFirst) { + this.value = caseFirst; + } + + /** + * @return the String representation of the collation case first value + */ + public String getValue() { + return value; + } + + /** + * Returns the CollationAlternate from the string value. + * + * @param collationAlternate the string value. + * @return the read concern + */ + public static CollationAlternate fromString(final String collationAlternate) { + if (collationAlternate != null) { + for (CollationAlternate alternate : CollationAlternate.values()) { + if (collationAlternate.equals(alternate.value)) { + return alternate; + } + } + } + throw new IllegalArgumentException(format("'%s' is not a valid collationAlternate", collationAlternate)); + } +} diff --git a/driver-core/src/main/com/mongodb/client/model/CollationCaseFirst.java b/driver-core/src/main/com/mongodb/client/model/CollationCaseFirst.java new file mode 100644 index 00000000000..16d666593ca --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/CollationCaseFirst.java @@ -0,0 +1,72 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model; + +import static java.lang.String.format; + +/** + * Collation support allows the specific configuration of how character cases are handled. + * + * @since 3.4 + * @mongodb.server.release 3.4 + */ +public enum CollationCaseFirst { + + /** + * Uppercase first + */ + UPPER("upper"), + + /** + * Lowercase first + */ + LOWER("lower"), + + /** + * Off + */ + OFF("off"); + + private final String value; + CollationCaseFirst(final String caseFirst) { + this.value = caseFirst; + } + + /** + * @return the String representation of the collation case first value + */ + public String getValue() { + return value; + } + + /** + * Returns the CollationCaseFirst from the string value. + * + * @param collationCaseFirst the string value. + * @return the read concern + */ + public static CollationCaseFirst fromString(final String collationCaseFirst) { + if (collationCaseFirst != null) { + for (CollationCaseFirst caseFirst : CollationCaseFirst.values()) { + if (collationCaseFirst.equals(caseFirst.value)) { + return caseFirst; + } + } + } + throw new IllegalArgumentException(format("'%s' is not a valid collationCaseFirst", collationCaseFirst)); + } +} diff --git a/driver-core/src/main/com/mongodb/client/model/CollationMaxVariable.java b/driver-core/src/main/com/mongodb/client/model/CollationMaxVariable.java new file mode 100644 index 00000000000..3c1ede0dc35 --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/CollationMaxVariable.java @@ -0,0 +1,73 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model; + +import static java.lang.String.format; + +/** + * Collation support allows the specific configuration of whether or not spaces and punctuation are considered base characters. + * + *

{@code CollationMaxVariable} controls which characters are affected by {@link CollationAlternate#SHIFTED}.

+ * + * @see CollationAlternate#SHIFTED + * @since 3.4 + * @mongodb.server.release 3.4 + */ +public enum CollationMaxVariable { + /** + * Punct + * + *

Both punctuation and spaces are affected.

+ */ + PUNCT("punct"), + + /** + * Shifted + * + *

Only spaces are affected.

+ */ + SPACE("space"); + + private final String value; + CollationMaxVariable(final String caseFirst) { + this.value = caseFirst; + } + + /** + * @return the String representation of the collation case first value + */ + public String getValue() { + return value; + } + + /** + * Returns the CollationMaxVariable from the string value. + * + * @param collationMaxVariable the string value. + * @return the read concern + */ + public static CollationMaxVariable fromString(final String collationMaxVariable) { + if (collationMaxVariable != null) { + for (CollationMaxVariable maxVariable : CollationMaxVariable.values()) { + if (collationMaxVariable.equals(maxVariable.value)) { + return maxVariable; + } + } + } + throw new IllegalArgumentException(format("'%s' is not a valid collationMaxVariable", collationMaxVariable)); + } +} diff --git a/driver-core/src/main/com/mongodb/client/model/CollationStrength.java b/driver-core/src/main/com/mongodb/client/model/CollationStrength.java new file mode 100644 index 00000000000..7f8ea347d7f --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/CollationStrength.java @@ -0,0 +1,91 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model; + +/** + * Collation support allows the specific configuration of how differences between characters are handled. + * + * @since 3.4 + * @mongodb.server.release 3.4 + */ +public enum CollationStrength { + + /** + * Strongest level, denote difference between base characters + */ + PRIMARY(1), + + /** + * Accents in characters are considered secondary differences + */ + SECONDARY(2), + + /** + * Upper and lower case differences in characters are distinguished at the tertiary level. The server default. + */ + TERTIARY(3), + + /** + * When punctuation is ignored at level 1-3, an additional level can be used to distinguish words with and without punctuation. + */ + QUATERNARY(4), + + /** + * When all other levels are equal, the identical level is used as a tiebreaker. + * The Unicode code point values of the NFD form of each string are compared at this level, just in case there is no difference at + * levels 1-4 + */ + IDENTICAL(5); + + private final int intRepresentation; + + CollationStrength(final int intRepresentation) { + this.intRepresentation = intRepresentation; + } + + /** + * The integer representation of the collation strength. + * + * @return the integer representation + */ + public int getIntRepresentation() { + return intRepresentation; + } + + /** + * Gets the order from the given integer representation. + * + * @param intRepresentation the integer representation + * @return the order + */ + public static CollationStrength fromInt(final int intRepresentation) { + switch (intRepresentation) { + case 1: + return PRIMARY; + case 2: + return SECONDARY; + case 3: + return TERTIARY; + case 4: + return QUATERNARY; + case 5: + return IDENTICAL; + default: + throw new IllegalArgumentException(intRepresentation + " is not a valid collation strength"); + } + } +} diff --git a/driver-core/src/main/com/mongodb/client/model/CountOptions.java b/driver-core/src/main/com/mongodb/client/model/CountOptions.java new file mode 100644 index 00000000000..17888908f8d --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/CountOptions.java @@ -0,0 +1,229 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model; + +import com.mongodb.lang.Nullable; +import org.bson.BsonString; +import org.bson.BsonValue; +import org.bson.conversions.Bson; + +import java.util.concurrent.TimeUnit; + +import static com.mongodb.assertions.Assertions.notNull; + +/** + * The options for a count operation. + * + * @since 3.0 + * @mongodb.driver.manual reference/command/count/ Count + */ +public class CountOptions { + private Bson hint; + private String hintString; + private int limit; + private int skip; + private long maxTimeMS; + private Collation collation; + private BsonValue comment; + + /** + * Gets the hint to apply. + * + * @return the hint, which should describe an existing + */ + @Nullable + public Bson getHint() { + return hint; + } + + /** + * Gets the hint string to apply. + * + * @return the hint string, which should be the name of an existing index + */ + @Nullable + public String getHintString() { + return hintString; + } + + /** + * Sets the hint to apply. + * + * @param hint a document describing the index which should be used for this operation. + * @return this + */ + public CountOptions hint(@Nullable final Bson hint) { + this.hint = hint; + return this; + } + + /** + * Sets the hint to apply. + * + *

Note: If {@link CountOptions#hint(Bson)} is set that will be used instead of any hint string.

+ * + * @param hint the name of the index which should be used for the operation + * @return this + */ + public CountOptions hintString(@Nullable final String hint) { + this.hintString = hint; + return this; + } + + /** + * Gets the limit to apply. The default is 0, which means there is no limit. + * + * @return the limit + * @mongodb.driver.manual reference/method/cursor.limit/#cursor.limit Limit + */ + public int getLimit() { + return limit; + } + + /** + * Sets the limit to apply. + * + * @param limit the limit + * @return this + * @mongodb.driver.manual reference/method/cursor.limit/#cursor.limit Limit + */ + public CountOptions limit(final int limit) { + this.limit = limit; + return this; + } + + /** + * Gets the number of documents to skip. The default is 0. + * + * @return the number of documents to skip + * @mongodb.driver.manual reference/method/cursor.skip/#cursor.skip Skip + */ + public int getSkip() { + return skip; + } + + /** + * Sets the number of documents to skip. + * + * @param skip the number of documents to skip + * @return this + * @mongodb.driver.manual reference/method/cursor.skip/#cursor.skip Skip + */ + public CountOptions skip(final int skip) { + this.skip = skip; + return this; + } + + /** + * Gets the maximum execution time on the server for this operation. The default is 0, which places no limit on the execution time. + * + * @param timeUnit the time unit to return the result in + * @return the maximum execution time in the given time unit + */ + public long getMaxTime(final TimeUnit timeUnit) { + notNull("timeUnit", timeUnit); + return timeUnit.convert(maxTimeMS, TimeUnit.MILLISECONDS); + } + + /** + * Sets the maximum execution time on the server for this operation. + * + * @param maxTime the max time + * @param timeUnit the time unit, which may not be null + * @return this + */ + public CountOptions maxTime(final long maxTime, final TimeUnit timeUnit) { + notNull("timeUnit", timeUnit); + this.maxTimeMS = TimeUnit.MILLISECONDS.convert(maxTime, timeUnit); + return this; + } + + /** + * Returns the collation options + * + * @return the collation options + * @since 3.4 + * @mongodb.server.release 3.4 + */ + @Nullable + public Collation getCollation() { + return collation; + } + + /** + * Sets the collation options + * + *

A null value represents the server default.

+ * @param collation the collation options to use + * @return this + * @since 3.4 + * @mongodb.server.release 3.4 + */ + public CountOptions collation(@Nullable final Collation collation) { + this.collation = collation; + return this; + } + + /** + * @return the comment for this operation. A null value means no comment is set. + * @since 4.6 + * @mongodb.server.release 4.4 + */ + @Nullable + public BsonValue getComment() { + return comment; + } + + /** + * Sets the comment for this operation. A null value means no comment is set. + * + * @param comment the comment + * @return this + * @since 4.6 + * @mongodb.server.release 4.4 + */ + public CountOptions comment(@Nullable final String comment) { + this.comment = comment == null ? null : new BsonString(comment); + return this; + } + + /** + * Sets the comment for this operation. A null value means no comment is set. + * + * @param comment the comment + * @return this + * @since 4.6 + * @mongodb.server.release 4.4 + */ + public CountOptions comment(@Nullable final BsonValue comment) { + this.comment = comment; + return this; + } + + @Override + public String toString() { + return "CountOptions{" + + "hint=" + hint + + ", hintString='" + hintString + '\'' + + ", limit=" + limit + + ", skip=" + skip + + ", maxTimeMS=" + maxTimeMS + + ", collation=" + collation + + ", comment=" + comment + + '}'; + } +} diff --git a/driver-core/src/main/com/mongodb/client/model/CreateCollectionOptions.java b/driver-core/src/main/com/mongodb/client/model/CreateCollectionOptions.java new file mode 100644 index 00000000000..f0ea455607d --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/CreateCollectionOptions.java @@ -0,0 +1,392 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model; + +import com.mongodb.AutoEncryptionSettings; +import com.mongodb.lang.Nullable; +import org.bson.conversions.Bson; + +import java.util.concurrent.TimeUnit; + +import static com.mongodb.assertions.Assertions.notNull; + +/** + * Options for creating a collection + * + * @mongodb.driver.manual reference/command/create/ Create Collection + * @mongodb.driver.manual core/timeseries-collections/ Time-series collections + * @since 3.0 + */ +public class CreateCollectionOptions { + private long maxDocuments; + private boolean capped; + private long sizeInBytes; + private Bson storageEngineOptions; + private IndexOptionDefaults indexOptionDefaults = new IndexOptionDefaults(); + private ValidationOptions validationOptions = new ValidationOptions(); + private Collation collation; + private long expireAfterSeconds; + private TimeSeriesOptions timeSeriesOptions; + private ChangeStreamPreAndPostImagesOptions changeStreamPreAndPostImagesOptions; + private ClusteredIndexOptions clusteredIndexOptions; + private Bson encryptedFields; + + /** + * Construct a new instance + */ + public CreateCollectionOptions() { + } + + /** + * A shallow copy constructor. + * + * @param options The options to copy. + * + * @since 4.9 + */ + public CreateCollectionOptions(final CreateCollectionOptions options) { + notNull("options", options); + maxDocuments = options.maxDocuments; + capped = options.capped; + sizeInBytes = options.sizeInBytes; + storageEngineOptions = options.storageEngineOptions; + indexOptionDefaults = options.indexOptionDefaults; + validationOptions = options.validationOptions; + collation = options.collation; + expireAfterSeconds = options.expireAfterSeconds; + timeSeriesOptions = options.timeSeriesOptions; + changeStreamPreAndPostImagesOptions = options.changeStreamPreAndPostImagesOptions; + clusteredIndexOptions = options.clusteredIndexOptions; + encryptedFields = options.encryptedFields; + } + + /** + * Gets the maximum number of documents allowed in a capped collection. + * + * @return max number of documents in a capped collection + */ + public long getMaxDocuments() { + return maxDocuments; + } + + /** + * Sets the maximum number of documents allowed in a capped collection. + * + * @param maxDocuments the maximum number of documents allowed in capped collection + * @return this + */ + public CreateCollectionOptions maxDocuments(final long maxDocuments) { + this.maxDocuments = maxDocuments; + return this; + } + + /** + * Gets whether the collection is capped. + * + * @return whether the collection is capped + */ + public boolean isCapped() { + return capped; + } + + + /** + * sets whether the collection is capped. + * + * @param capped whether the collection is capped + * @return this + */ + public CreateCollectionOptions capped(final boolean capped) { + this.capped = capped; + return this; + } + + /** + * Gets the maximum size in bytes of a capped collection. + * + * @return the maximum size of a capped collection. + */ + public long getSizeInBytes() { + return sizeInBytes; + } + + /** + * Gets the maximum size of in bytes of a capped collection. + * + * @param sizeInBytes the maximum size of a capped collection. + * @return this + */ + public CreateCollectionOptions sizeInBytes(final long sizeInBytes) { + this.sizeInBytes = sizeInBytes; + return this; + } + + /** + * Gets the storage engine options document for the collection. + * + * @return the storage engine options + * @mongodb.server.release 3.0 + */ + @Nullable + public Bson getStorageEngineOptions() { + return storageEngineOptions; + } + + /** + * Sets the storage engine options document defaults for the collection + * + * @param storageEngineOptions the storage engine options + * @return this + * @mongodb.server.release 3.0 + */ + public CreateCollectionOptions storageEngineOptions(@Nullable final Bson storageEngineOptions) { + this.storageEngineOptions = storageEngineOptions; + return this; + } + + /** + * Gets the index option defaults for the collection. + * + * @return the index option defaults + * @since 3.2 + * @mongodb.server.release 3.2 + */ + public IndexOptionDefaults getIndexOptionDefaults() { + return indexOptionDefaults; + } + + /** + * Sets the index option defaults for the collection. + * + * @param indexOptionDefaults the index option defaults + * @return this + * @since 3.2 + * @mongodb.server.release 3.2 + */ + public CreateCollectionOptions indexOptionDefaults(final IndexOptionDefaults indexOptionDefaults) { + this.indexOptionDefaults = notNull("indexOptionDefaults", indexOptionDefaults); + return this; + } + + /** + * Gets the validation options for documents being inserted or updated in a collection + * + * @return the validation options + * @since 3.2 + * @mongodb.server.release 3.2 + */ + public ValidationOptions getValidationOptions() { + return validationOptions; + } + + /** + * Sets the validation options for documents being inserted or updated in a collection + * + * @param validationOptions the validation options + * @return this + * @since 3.2 + * @mongodb.server.release 3.2 + */ + public CreateCollectionOptions validationOptions(final ValidationOptions validationOptions) { + this.validationOptions = notNull("validationOptions", validationOptions); + return this; + } + + /** + * Returns the collation options + * + * @return the collation options + * @since 3.4 + * @mongodb.server.release 3.4 + */ + @Nullable + public Collation getCollation() { + return collation; + } + + /** + * Sets the collation options + * + *

A null value represents the server default.

+ * @param collation the collation options to use + * @return this + * @since 3.4 + * @mongodb.server.release 3.4 + */ + public CreateCollectionOptions collation(@Nullable final Collation collation) { + this.collation = collation; + return this; + } + + /** + * Returns the expire-after option. The default value is 0, which indicates no expiration. + * + * @param timeUnit the time unit + * @return the expire-after option, which may be null. + * @since 4.3 + * @mongodb.driver.manual core/timeseries-collections/ Time-series collections + */ + public long getExpireAfter(final TimeUnit timeUnit) { + notNull("timeUnit", timeUnit); + return timeUnit.convert(expireAfterSeconds, TimeUnit.SECONDS); + } + + /** + * Sets the expire-after option. + * + *

+ * A duration indicating after how long old time-series data should be deleted. Partial seconds are ignored. + *

+ *

+ * Currently applies only to time-series collections, so if this value is set then so must the time-series options + *

+ * @param expireAfter the expire-after duration. After conversion to seconds using + * {@link TimeUnit#convert(long, java.util.concurrent.TimeUnit)}, the value must be >= 0. A value of 0 indicates no expiration. + * @param timeUnit the time unit + * @return this + * @since 4.3 + * @see #timeSeriesOptions(TimeSeriesOptions) + * @mongodb.driver.manual core/timeseries-collections/ Time-series collections + */ + public CreateCollectionOptions expireAfter(final long expireAfter, final TimeUnit timeUnit) { + notNull("timeUnit", timeUnit); + long asSeconds = TimeUnit.SECONDS.convert(expireAfter, timeUnit); + if (asSeconds < 0) { + throw new IllegalArgumentException("expireAfter, after conversion to seconds, must be >= 0"); + } + this.expireAfterSeconds = asSeconds; + return this; + } + + /** + * Gets the time series collection options. + * + * @return the options for a time-series collection + * @since 4.3 + * @mongodb.driver.manual core/timeseries-collections/ Time-series collections + */ + @Nullable + public TimeSeriesOptions getTimeSeriesOptions() { + return timeSeriesOptions; + } + + /** + * Sets the time-series collection options. + * + * @param timeSeriesOptions the time-series options + * @return this + * @since 4.3 + * @mongodb.driver.manual core/timeseries-collections/ Time-series collections + */ + public CreateCollectionOptions timeSeriesOptions(final TimeSeriesOptions timeSeriesOptions) { + this.timeSeriesOptions = timeSeriesOptions; + return this; + } + + /** + * Gets the clustered index collection options. + * + * @return the options for a clustered index + * @since 4.7 + */ + @Nullable + public ClusteredIndexOptions getClusteredIndexOptions() { + return clusteredIndexOptions; + } + + /** + * Sets the clustered index collection options. + * + * @param clusteredIndexOptions the clustered index options + * @return this + * @since 4.7 + */ + public CreateCollectionOptions clusteredIndexOptions(final ClusteredIndexOptions clusteredIndexOptions) { + this.clusteredIndexOptions = clusteredIndexOptions; + return this; + } + + /** + * Gets change stream pre- and post- images options. + * + * @return the options for change stream pre- and post- images + * @since 4.7 + */ + @Nullable + public ChangeStreamPreAndPostImagesOptions getChangeStreamPreAndPostImagesOptions() { + return changeStreamPreAndPostImagesOptions; + } + + /** + * Sets the change stream pre- and post- images options. + * + * @param changeStreamPreAndPostImagesOptions the change stream pre- and post- images options + * @return this + * @since 4.7 + */ + public CreateCollectionOptions changeStreamPreAndPostImagesOptions( + final ChangeStreamPreAndPostImagesOptions changeStreamPreAndPostImagesOptions) { + this.changeStreamPreAndPostImagesOptions = changeStreamPreAndPostImagesOptions; + return this; + } + + /** + * Gets any explicitly set encrypted fields. + * + *

Note: If not set the driver will lookup the namespace in {@link AutoEncryptionSettings#getEncryptedFieldsMap()}

+ * @return the encrypted fields document + * @since 4.7 + * @mongodb.server.release 7.0 + */ + @Nullable + public Bson getEncryptedFields() { + return encryptedFields; + } + + /** + * Sets the encrypted fields + * + *

Explicitly set encrypted fields.

+ *

Note: If not set the driver will lookup the namespace in {@link AutoEncryptionSettings#getEncryptedFieldsMap()}

+ * @param encryptedFields the encrypted fields document + * @return this + * @since 4.7 + * @mongodb.driver.manual core/security-client-side-encryption/ In-use encryption + * @mongodb.server.release 7.0 + */ + public CreateCollectionOptions encryptedFields(@Nullable final Bson encryptedFields) { + this.encryptedFields = encryptedFields; + return this; + } + + @Override + public String toString() { + return "CreateCollectionOptions{" + + ", maxDocuments=" + maxDocuments + + ", capped=" + capped + + ", sizeInBytes=" + sizeInBytes + + ", storageEngineOptions=" + storageEngineOptions + + ", indexOptionDefaults=" + indexOptionDefaults + + ", validationOptions=" + validationOptions + + ", collation=" + collation + + ", expireAfterSeconds=" + expireAfterSeconds + + ", timeSeriesOptions=" + timeSeriesOptions + + ", changeStreamPreAndPostImagesOptions=" + changeStreamPreAndPostImagesOptions + + ", encryptedFields=" + encryptedFields + + '}'; + } +} diff --git a/driver-core/src/main/com/mongodb/client/model/CreateEncryptedCollectionParams.java b/driver-core/src/main/com/mongodb/client/model/CreateEncryptedCollectionParams.java new file mode 100644 index 00000000000..8df26cad913 --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/CreateEncryptedCollectionParams.java @@ -0,0 +1,81 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model; + +import com.mongodb.client.model.vault.DataKeyOptions; +import com.mongodb.lang.Nullable; +import org.bson.BsonDocument; + +import static com.mongodb.assertions.Assertions.notNull; + +/** + * Auxiliary parameters for creating an encrypted collection. + * + * @since 4.9 + */ +public final class CreateEncryptedCollectionParams { + private final String kmsProvider; + @Nullable + private BsonDocument masterKey; + + /** + * @param kmsProvider The name of the KMS provider. + */ + public CreateEncryptedCollectionParams(final String kmsProvider) { + this.kmsProvider = notNull("kmsProvider", kmsProvider); + masterKey = null; + } + + /** + * The name of the KMS provider. + * + * @return The name of the KMS provider. + */ + public String getKmsProvider() { + return kmsProvider; + } + + /** + * Sets the {@linkplain DataKeyOptions#getMasterKey() master key} for creating a data key. + * + * @param masterKey The master key for creating a data key. + * @return {@code this}. + */ + public CreateEncryptedCollectionParams masterKey(@Nullable final BsonDocument masterKey) { + this.masterKey = masterKey; + return this; + } + + /** + * The {@linkplain DataKeyOptions#getMasterKey() master key} for creating a data key. + * The default is {@code null}. + * + * @return The master key for creating a data key. + */ + @Nullable + public BsonDocument getMasterKey() { + return masterKey; + } + + @Override + public String toString() { + return "CreateEncryptedCollectionParams{" + + ", kmsProvider=" + kmsProvider + + ", masterKey=" + masterKey + + '}'; + } +} diff --git a/driver-core/src/main/com/mongodb/client/model/CreateIndexOptions.java b/driver-core/src/main/com/mongodb/client/model/CreateIndexOptions.java new file mode 100644 index 00000000000..e93da219fb1 --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/CreateIndexOptions.java @@ -0,0 +1,93 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model; + + +import com.mongodb.CreateIndexCommitQuorum; +import com.mongodb.lang.Nullable; + +import java.util.concurrent.TimeUnit; + +import static com.mongodb.assertions.Assertions.notNull; + +/** + * The options to apply to the command when creating indexes. + * + * @mongodb.driver.manual reference/command/createIndexes Index options + * @since 3.6 + */ +public class CreateIndexOptions { + private long maxTimeMS; + private CreateIndexCommitQuorum commitQuorum; + + /** + * Gets the maximum execution time on the server for this operation. The default is 0, which places no limit on the execution time. + * + * @param timeUnit the time unit to return the result in + * @return the maximum execution time in the given time unit + */ + public long getMaxTime(final TimeUnit timeUnit) { + notNull("timeUnit", timeUnit); + return timeUnit.convert(maxTimeMS, TimeUnit.MILLISECONDS); + } + + /** + * Sets the maximum execution time on the server for this operation. + * + * @param maxTime the max time + * @param timeUnit the time unit, which may not be null + * @return this + */ + public CreateIndexOptions maxTime(final long maxTime, final TimeUnit timeUnit) { + notNull("timeUnit", timeUnit); + this.maxTimeMS = TimeUnit.MILLISECONDS.convert(maxTime, timeUnit); + return this; + } + + /** + * Gets the create index commit quorum for this operation. + * + * @return the create index commit quorum + * @mongodb.server.release 4.4 + * @since 4.1 + */ + @Nullable + public CreateIndexCommitQuorum getCommitQuorum() { + return commitQuorum; + } + + /** + * Sets the create index commit quorum for this operation. + * + * @param commitQuorum the create index commit quorum + * @return this + * @mongodb.server.release 4.4 + * @since 4.1 + */ + public CreateIndexOptions commitQuorum(final CreateIndexCommitQuorum commitQuorum) { + this.commitQuorum = commitQuorum; + return this; + } + + @Override + public String toString() { + return "CreateIndexOptions{" + + "maxTimeMS=" + maxTimeMS + + ", commitQuorum=" + commitQuorum + + '}'; + } +} diff --git a/driver-core/src/main/com/mongodb/client/model/CreateViewOptions.java b/driver-core/src/main/com/mongodb/client/model/CreateViewOptions.java new file mode 100644 index 00000000000..72e3ccef650 --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/CreateViewOptions.java @@ -0,0 +1,59 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model; + +import com.mongodb.lang.Nullable; + +/** + * Options for creating a view + * + * @since 3.4 + * @mongodb.server.release 3.4 + * @mongodb.driver.manual reference/command/create Create Command + */ +public class CreateViewOptions { + private Collation collation; + + /** + * Returns the collation options + * + * @return the collation options + */ + @Nullable + public Collation getCollation() { + return collation; + } + + /** + * Sets the collation options + * + *

A null value represents the server default.

+ * @param collation the collation options to use + * @return this + */ + public CreateViewOptions collation(@Nullable final Collation collation) { + this.collation = collation; + return this; + } + + @Override + public String toString() { + return "CreateViewOptions{" + + "collation=" + collation + + '}'; + } +} diff --git a/driver-core/src/main/com/mongodb/client/model/DeleteManyModel.java b/driver-core/src/main/com/mongodb/client/model/DeleteManyModel.java new file mode 100644 index 00000000000..24fc81c3c0a --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/DeleteManyModel.java @@ -0,0 +1,83 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model; + +import org.bson.conversions.Bson; + +import static com.mongodb.assertions.Assertions.notNull; + +/** + * A model describing the removal of all documents matching the query filter. + * + * @param the type of document to update. In practice this doesn't actually apply to updates but is here for consistency with the + * other write models + * @since 3.0 + * @mongodb.driver.manual tutorial/remove-documents/ Remove + */ +public final class DeleteManyModel extends WriteModel { + private final Bson filter; + private final DeleteOptions options; + + /** + * Construct a new instance. + * + * @param filter a document describing the query filter, which may not be null. + */ + public DeleteManyModel(final Bson filter) { + this(filter, new DeleteOptions()); + } + + /** + * Construct a new instance. + * + * @param filter a document describing the query filter, which may not be null. + * @param options the options to apply + * @since 3.4 + * @mongodb.server.release 3.4 + */ + public DeleteManyModel(final Bson filter, final DeleteOptions options) { + this.filter = notNull("filter", filter); + this.options = notNull("options", options); + } + + /** + * Gets the query filter. + * + * @return the query filter + */ + public Bson getFilter() { + return filter; + } + + /** + * Gets the options to apply. + * + * @return the options + * @since 3.4 + */ + public DeleteOptions getOptions() { + return options; + } + + @Override + public String toString() { + return "DeleteManyModel{" + + "filter=" + filter + + ", options=" + options + + '}'; + } +} diff --git a/driver-core/src/main/com/mongodb/client/model/DeleteOneModel.java b/driver-core/src/main/com/mongodb/client/model/DeleteOneModel.java new file mode 100644 index 00000000000..5c49bae292b --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/DeleteOneModel.java @@ -0,0 +1,83 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model; + +import org.bson.conversions.Bson; + +import static com.mongodb.assertions.Assertions.notNull; + +/** + * A model describing the removal of at most one document matching the query filter. + * + * @param the type of document to update. In practice this doesn't actually apply to updates but is here for consistency with the + * other write models + * @since 3.0 + * @mongodb.driver.manual tutorial/remove-documents/ Remove + */ +public class DeleteOneModel extends WriteModel { + private final Bson filter; + private final DeleteOptions options; + + /** + * Construct a new instance. + * + * @param filter a document describing the query filter, which may not be null. + */ + public DeleteOneModel(final Bson filter) { + this(filter, new DeleteOptions()); + } + + /** + * Construct a new instance. + * + * @param filter a document describing the query filter, which may not be null. + * @param options the options to apply + * @since 3.4 + * @mongodb.server.release 3.4 + */ + public DeleteOneModel(final Bson filter, final DeleteOptions options) { + this.filter = notNull("filter", filter); + this.options = notNull("options", options); + } + + /** + * Gets the query filter. + * + * @return the query filter + */ + public Bson getFilter() { + return filter; + } + + /** + * Gets the options to apply. + * + * @return the options + * @since 3.4 + */ + public DeleteOptions getOptions() { + return options; + } + + @Override + public String toString() { + return "DeleteOneModel{" + + "filter=" + filter + + ", options=" + options + + '}'; + } +} diff --git a/driver-core/src/main/com/mongodb/client/model/DeleteOptions.java b/driver-core/src/main/com/mongodb/client/model/DeleteOptions.java new file mode 100644 index 00000000000..2574ccde6fb --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/DeleteOptions.java @@ -0,0 +1,195 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model; + +import com.mongodb.lang.Nullable; +import org.bson.BsonString; +import org.bson.BsonValue; +import org.bson.conversions.Bson; + +/** + * The options to apply when deleting documents. + * + * @since 3.4 + * @mongodb.driver.manual tutorial/remove-documents/ Remove documents + * @mongodb.driver.manual reference/command/delete/ Delete Command + */ +public class DeleteOptions { + private Bson hint; + private String hintString; + private Collation collation; + private BsonValue comment; + private Bson variables; + + /** + * Returns the collation options + * + * @return the collation options + * @mongodb.server.release 3.4 + */ + @Nullable + public Collation getCollation() { + return collation; + } + + /** + * Sets the collation options + * + *

A null value represents the server default.

+ * @param collation the collation options to use + * @return this + * @mongodb.server.release 3.4 + */ + public DeleteOptions collation(@Nullable final Collation collation) { + this.collation = collation; + return this; + } + + /** + * Gets the hint to apply. + * + * @return the hint, which should describe an existing index + * @since 4.1 + * @mongodb.server.release 4.4 + */ + @Nullable + public Bson getHint() { + return hint; + } + + /** + * Gets the hint string to apply. + * + * @return the hint string, which should be the name of an existing index + * @since 4.1 + * @mongodb.server.release 4.4 + */ + @Nullable + public String getHintString() { + return hintString; + } + + /** + * Sets the hint to apply. + * + * @param hint a document describing the index which should be used for this operation. + * @return this + * @since 4.1 + * @mongodb.server.release 4.4 + */ + public DeleteOptions hint(@Nullable final Bson hint) { + this.hint = hint; + return this; + } + + /** + * Sets the hint to apply. + * + *

Note: If {@link DeleteOptions#hint(Bson)} is set that will be used instead of any hint string.

+ * + * @param hint the name of the index which should be used for the operation + * @return this + * @since 4.1 + * @mongodb.server.release 4.4 + */ + public DeleteOptions hintString(@Nullable final String hint) { + this.hintString = hint; + return this; + } + + /** + * @return the comment for this operation. A null value means no comment is set. + * @since 4.6 + * @mongodb.server.release 4.4 + */ + @Nullable + public BsonValue getComment() { + return comment; + } + + /** + * Sets the comment for this operation. A null value means no comment is set. + * + *

For bulk operations use: {@link BulkWriteOptions#comment(String)}

+ * + * @param comment the comment + * @return this + * @since 4.6 + * @mongodb.server.release 4.4 + */ + public DeleteOptions comment(@Nullable final String comment) { + this.comment = comment != null ? new BsonString(comment) : null; + return this; + } + + /** + * Sets the comment for this operation. A null value means no comment is set. + * + *

For bulk operations use: {@link BulkWriteOptions#comment(BsonValue)}

+ * + * @param comment the comment + * @return this + * @since 4.6 + * @mongodb.server.release 4.4 + */ + public DeleteOptions comment(@Nullable final BsonValue comment) { + this.comment = comment; + return this; + } + + /** + * Add top-level variables to the operation + * + *

The value of let will be passed to all update and delete, but not insert, commands. + * + * @return the top level variables if set or null. + * @mongodb.server.release 5.0 + * @since 4.6 + */ + @Nullable + public Bson getLet() { + return variables; + } + + /** + * Add top-level variables for the operation + * + *

Allows for improved command readability by separating the variables from the query text. + *

For bulk operations use: {@link BulkWriteOptions#let(Bson)} + * + * @param variables for the operation or null + * @return this + * @mongodb.server.release 5.0 + * @since 4.6 + */ + public DeleteOptions let(final Bson variables) { + this.variables = variables; + return this; + } + + @Override + public String toString() { + return "DeleteOptions{" + + "collation=" + collation + + ", hint=" + hint + + ", hintString='" + hintString + '\'' + + ", comment=" + comment + + ", let=" + variables + + '}'; + } +} + diff --git a/driver-core/src/main/com/mongodb/client/model/DropCollectionOptions.java b/driver-core/src/main/com/mongodb/client/model/DropCollectionOptions.java new file mode 100644 index 00000000000..5ae247547b8 --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/DropCollectionOptions.java @@ -0,0 +1,68 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model; + +import com.mongodb.AutoEncryptionSettings; +import com.mongodb.lang.Nullable; +import org.bson.conversions.Bson; + +/** + * Options for dropping a collection + * + * @mongodb.driver.manual reference/command/drop/ Drop Collection + * @mongodb.driver.manual core/security-client-side-encryption/ In-use encryption + * @since 4.7 + */ +public class DropCollectionOptions { + private Bson encryptedFields; + + /** + * Gets any explicitly set encrypted fields. + * + *

Note: If not set the driver will lookup the namespace in {@link AutoEncryptionSettings#getEncryptedFieldsMap()}

+ * @return the encrypted fields document + * @since 4.7 + * @mongodb.server.release 7.0 + */ + @Nullable + public Bson getEncryptedFields() { + return encryptedFields; + } + + /** + * Sets the encrypted fields document + * + *

Explicitly set encrypted fields.

+ *

Note: If not set the driver will lookup the namespace in {@link AutoEncryptionSettings#getEncryptedFieldsMap()}

+ * @param encryptedFields the encrypted fields document + * @return this + * @since 4.7 + * @mongodb.server.release 7.0 + * @mongodb.driver.manual core/security-client-side-encryption/ In-use encryption + */ + public DropCollectionOptions encryptedFields(@Nullable final Bson encryptedFields) { + this.encryptedFields = encryptedFields; + return this; + } + + @Override + public String toString() { + return "DropCollectionOptions{" + + ", encryptedFields=" + encryptedFields + + '}'; + } +} diff --git a/driver-core/src/main/com/mongodb/client/model/DropIndexOptions.java b/driver-core/src/main/com/mongodb/client/model/DropIndexOptions.java new file mode 100644 index 00000000000..982a0253a3d --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/DropIndexOptions.java @@ -0,0 +1,63 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model; + + +import java.util.concurrent.TimeUnit; + +import static com.mongodb.assertions.Assertions.notNull; + +/** + * The options to apply to the command when dropping indexes. + * + * @mongodb.driver.manual reference/command/dropIndexes/ Drop indexes + * @since 3.6 + */ +public class DropIndexOptions { + private long maxTimeMS; + + /** + * Gets the maximum execution time on the server for this operation. The default is 0, which places no limit on the execution time. + * + * @param timeUnit the time unit to return the result in + * @return the maximum execution time in the given time unit + */ + public long getMaxTime(final TimeUnit timeUnit) { + notNull("timeUnit", timeUnit); + return timeUnit.convert(maxTimeMS, TimeUnit.MILLISECONDS); + } + + /** + * Sets the maximum execution time on the server for this operation. + * + * @param maxTime the max time + * @param timeUnit the time unit, which may not be null + * @return this + */ + public DropIndexOptions maxTime(final long maxTime, final TimeUnit timeUnit) { + notNull("timeUnit", timeUnit); + this.maxTimeMS = TimeUnit.MILLISECONDS.convert(maxTime, timeUnit); + return this; + } + + @Override + public String toString() { + return "DropIndexOptions{" + + "maxTimeMS=" + maxTimeMS + + '}'; + } +} diff --git a/driver-core/src/main/com/mongodb/client/model/EstimatedDocumentCountOptions.java b/driver-core/src/main/com/mongodb/client/model/EstimatedDocumentCountOptions.java new file mode 100644 index 00000000000..a69e9e58a5d --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/EstimatedDocumentCountOptions.java @@ -0,0 +1,104 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model; + +import com.mongodb.lang.Nullable; +import org.bson.BsonString; +import org.bson.BsonValue; + +import java.util.concurrent.TimeUnit; + +import static com.mongodb.assertions.Assertions.notNull; + +/** + * The options an estimated count operation. + * + * @since 3.8 + * @mongodb.driver.manual reference/command/count/ Count + */ +public class EstimatedDocumentCountOptions { + private long maxTimeMS; + private BsonValue comment; + + /** + * Gets the maximum execution time on the server for this operation. The default is 0, which places no limit on the execution time. + * + * @param timeUnit the time unit to return the result in + * @return the maximum execution time in the given time unit + */ + public long getMaxTime(final TimeUnit timeUnit) { + notNull("timeUnit", timeUnit); + return timeUnit.convert(maxTimeMS, TimeUnit.MILLISECONDS); + } + + /** + * Sets the maximum execution time on the server for this operation. + * + * @param maxTime the max time + * @param timeUnit the time unit, which may not be null + * @return this + */ + public EstimatedDocumentCountOptions maxTime(final long maxTime, final TimeUnit timeUnit) { + notNull("timeUnit", timeUnit); + this.maxTimeMS = TimeUnit.MILLISECONDS.convert(maxTime, timeUnit); + return this; + } + + /** + * @return the comment for this operation. A null value means no comment is set. + * @since 4.7 + * @mongodb.server.release 4.4 + */ + @Nullable + public BsonValue getComment() { + return comment; + } + + /** + * Sets the comment for this operation. A null value means no comment is set. + * + * @param comment the comment + * @return this + * @since 4.7 + * @mongodb.server.release 4.4 + */ + public EstimatedDocumentCountOptions comment(@Nullable final String comment) { + this.comment = comment == null ? null : new BsonString(comment); + return this; + } + + /** + * Sets the comment for this operation. A null value means no comment is set. + * + * @param comment the comment + * @return this + * @since 4.7 + * @mongodb.server.release 4.4 + */ + public EstimatedDocumentCountOptions comment(@Nullable final BsonValue comment) { + this.comment = comment; + return this; + } + + @Override + public String toString() { + return "EstimatedCountOptions{" + + ", maxTimeMS=" + maxTimeMS + + ", comment=" + comment + + '}'; + } +} diff --git a/driver-core/src/main/com/mongodb/client/model/Facet.java b/driver-core/src/main/com/mongodb/client/model/Facet.java new file mode 100644 index 00000000000..2e53cf71085 --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/Facet.java @@ -0,0 +1,99 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model; + +import org.bson.conversions.Bson; + +import java.util.List; +import java.util.Objects; + +import static java.util.Arrays.asList; + +/** + * Defines a Facet for use in $facet pipeline stages. + * + * @mongodb.driver.manual reference/operator/aggregation/facet/ $facet + * @mongodb.server.release 3.4 + * @since 3.4 + */ +public class Facet { + private final String name; + private final List pipeline; + + /** + * @param name the name of this facet + * @param pipeline the facet definition pipeline + */ + public Facet(final String name, final List pipeline) { + this.name = name; + this.pipeline = pipeline; + } + + /** + * @param name the name of this facet + * @param pipeline the facet definition pipeline + */ + public Facet(final String name, final Bson... pipeline) { + this(name, asList(pipeline)); + } + + /** + * @return the facet name + */ + public String getName() { + return name; + } + + /** + * @return the pipeline definition + */ + public List getPipeline() { + return pipeline; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + Facet facet = (Facet) o; + + if (!Objects.equals(name, facet.name)) { + return false; + } + return Objects.equals(pipeline, facet.pipeline); + } + + @Override + public int hashCode() { + int result = name != null ? name.hashCode() : 0; + result = 31 * result + (pipeline != null ? pipeline.hashCode() : 0); + return result; + } + + @Override + public String toString() { + return "Facet{" + + "name='" + name + '\'' + + ", pipeline=" + pipeline + + '}'; + } +} diff --git a/driver-core/src/main/com/mongodb/client/model/Field.java b/driver-core/src/main/com/mongodb/client/model/Field.java new file mode 100644 index 00000000000..b8e9ab9e265 --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/Field.java @@ -0,0 +1,93 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model; + +import java.util.Objects; + +import static com.mongodb.assertions.Assertions.notNull; + +/** + * Helps define new fields for the $addFields pipeline stage + * + * @param the type of the value for the new field + * @mongodb.driver.manual reference/operator/aggregation/addFields/ $addFields + * @mongodb.server.release 3.4 + * @since 3.4 + */ +public class Field { + private final String name; + private final TExpression value; + + /** + * Creates a new field definition for use in $addFields pipeline stages + * + * @param name the name of the new field + * @param value the value of the new field + * @mongodb.driver.manual reference/operator/aggregation/addFields/ $addFields + */ + public Field(final String name, final TExpression value) { + this.name = notNull("name", name); + this.value = value; + } + + /** + * @return the name of the new field + */ + public String getName() { + return name; + } + + /** + * @return the value of the new field + */ + public TExpression getValue() { + return value; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (!(o instanceof Field)) { + return false; + } + + Field field = (Field) o; + + if (!name.equals(field.name)) { + return false; + } + return Objects.equals(value, field.value); + + } + + @Override + public int hashCode() { + int result = name.hashCode(); + result = 31 * result + (value != null ? value.hashCode() : 0); + return result; + } + + @Override + public String toString() { + return "Field{" + + "name='" + name + '\'' + + ", value=" + value + + '}'; + } +} diff --git a/driver-core/src/main/com/mongodb/client/model/Filters.java b/driver-core/src/main/com/mongodb/client/model/Filters.java new file mode 100644 index 00000000000..c516fe28930 --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/Filters.java @@ -0,0 +1,1458 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model; + +import com.mongodb.client.model.geojson.Geometry; +import com.mongodb.client.model.geojson.Point; +import com.mongodb.client.model.search.SearchCollector; +import com.mongodb.client.model.search.SearchOperator; +import com.mongodb.client.model.search.SearchOptions; +import com.mongodb.lang.Nullable; +import org.bson.BsonArray; +import org.bson.BsonBoolean; +import org.bson.BsonDocument; +import org.bson.BsonDocumentWriter; +import org.bson.BsonDouble; +import org.bson.BsonInt32; +import org.bson.BsonInt64; +import org.bson.BsonRegularExpression; +import org.bson.BsonString; +import org.bson.BsonType; +import org.bson.BsonValue; +import org.bson.codecs.configuration.CodecRegistry; +import org.bson.conversions.Bson; + +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.Set; +import java.util.regex.Pattern; + +import static com.mongodb.assertions.Assertions.notNull; +import static com.mongodb.client.model.BuildersHelper.encodeValue; +import static java.util.Arrays.asList; +import static java.util.Collections.unmodifiableSet; + +/** + * A factory for query filters. A convenient way to use this class is to statically import all of its methods, which allows usage like: + *
+ *    collection.find(and(eq("x", 1), lt("y", 3)));
+ * 
+ * @since 3.0 + */ +public final class Filters { + + private Filters() { + } + + /** + * Creates a filter that matches all documents where the value of _id field equals the specified value. Note that this doesn't + * actually generate a $eq operator, as the query language doesn't require it. + * + * @param value the value, which may be null + * @param the value type + * @return the filter + * @mongodb.driver.manual reference/operator/query/eq $eq + * + * @since 3.4 + */ + public static Bson eq(@Nullable final TItem value) { + return eq("_id", value); + } + + /** + * Creates a filter that matches all documents where the value of the field name equals the specified value. Note that this doesn't + * actually generate a $eq operator, as the query language doesn't require it. + * + * @param fieldName the field name + * @param value the value, which may be null + * @param the value type + * @return the filter + * @mongodb.driver.manual reference/operator/query/eq $eq + */ + public static Bson eq(final String fieldName, @Nullable final TItem value) { + return new SimpleEncodingFilter<>(fieldName, value); + } + + /** + * Creates a filter that matches all documents where the value of the field name does not equal the specified value. + * + * @param fieldName the field name + * @param value the value, which may be null + * @param the value type + * @return the filter + * @mongodb.driver.manual reference/operator/query/ne $ne + */ + public static Bson ne(final String fieldName, @Nullable final TItem value) { + return new OperatorFilter<>("$ne", fieldName, value); + } + + /** + * Creates a filter that matches all documents where the value of the given field is greater than the specified value. + * + * @param fieldName the field name + * @param value the value + * @param the value type + * @return the filter + * @mongodb.driver.manual reference/operator/query/gt $gt + */ + public static Bson gt(final String fieldName, final TItem value) { + return new OperatorFilter<>("$gt", fieldName, value); + } + + /** + * Creates a filter that matches all documents where the value of the given field is less than the specified value. + * + * @param fieldName the field name + * @param value the value + * @param the value type + * @return the filter + * @mongodb.driver.manual reference/operator/query/lt $lt + */ + public static Bson lt(final String fieldName, final TItem value) { + return new OperatorFilter<>("$lt", fieldName, value); + } + + /** + * Creates a filter that matches all documents where the value of the given field is greater than or equal to the specified value. + * + * @param fieldName the field name + * @param value the value + * @param the value type + * @return the filter + * @mongodb.driver.manual reference/operator/query/gte $gte + */ + public static Bson gte(final String fieldName, final TItem value) { + return new OperatorFilter<>("$gte", fieldName, value); + } + + /** + * Creates a filter that matches all documents where the value of the given field is less than or equal to the specified value. + * + * @param fieldName the field name + * @param value the value + * @param the value type + * @return the filter + * @mongodb.driver.manual reference/operator/query/lte $lte + */ + public static Bson lte(final String fieldName, final TItem value) { + return new OperatorFilter<>("$lte", fieldName, value); + } + + /** + * Creates a filter that matches all documents where the value of a field equals any value in the list of specified values. + * + * @param fieldName the field name + * @param values the list of values + * @param the value type + * @return the filter + * @mongodb.driver.manual reference/operator/query/in $in + */ + @SafeVarargs + @SuppressWarnings("varargs") + public static Bson in(final String fieldName, final TItem... values) { + return in(fieldName, asList(values)); + } + + /** + * Creates a filter that matches all documents where the value of a field equals any value in the list of specified values. + * + * @param fieldName the field name + * @param values the list of values + * @param the value type + * @return the filter + * @mongodb.driver.manual reference/operator/query/in $in + */ + public static Bson in(final String fieldName, final Iterable values) { + return new IterableOperatorFilter<>(fieldName, "$in", values); + } + + /** + * Creates a filter that matches all documents where the value of a field does not equal any of the specified values or does not exist. + * + * @param fieldName the field name + * @param values the list of values + * @param the value type + * @return the filter + * @mongodb.driver.manual reference/operator/query/nin $nin + */ + @SafeVarargs + @SuppressWarnings("varargs") + public static Bson nin(final String fieldName, final TItem... values) { + return nin(fieldName, asList(values)); + } + + /** + * Creates a filter that matches all documents where the value of a field does not equal any of the specified values or does not exist. + * + * @param fieldName the field name + * @param values the list of values + * @param the value type + * @return the filter + * @mongodb.driver.manual reference/operator/query/nin $nin + */ + public static Bson nin(final String fieldName, final Iterable values) { + return new IterableOperatorFilter<>(fieldName, "$nin", values); + } + + /** + * Creates a filter that performs a logical AND of the provided list of filters. + * + *
+     *    and(eq("x", 1), lt("y", 3))
+     * 
+ * + * will generate a MongoDB query like: + *
+     *    { $and: [{x : 1}, {y : {$lt : 3}}]}
+     * 
+ * + * @param filters the list of filters to and together + * @return the filter + * @mongodb.driver.manual reference/operator/query/and $and + */ + public static Bson and(final Iterable filters) { + return new AndFilter(filters); + } + + /** + * Creates a filter that performs a logical AND of the provided list of filters. + * + *
+     *    and(eq("x", 1), lt("y", 3))
+     * 
+ * + * will generate a MongoDB query like: + *
+     *    { $and: [{x : 1}, {y : {$lt : 3}}]}
+     * 
+ * + * @param filters the list of filters to and together + * @return the filter + * @mongodb.driver.manual reference/operator/query/and $and + */ + public static Bson and(final Bson... filters) { + return and(asList(filters)); + } + + /** + * Creates a filter that preforms a logical OR of the provided list of filters. + * + * @param filters the list of filters to and together + * @return the filter + * @mongodb.driver.manual reference/operator/query/or $or + */ + public static Bson or(final Iterable filters) { + return new OrNorFilter(OrNorFilter.Operator.OR, filters); + } + + /** + * Creates a filter that preforms a logical OR of the provided list of filters. + * + * @param filters the list of filters to and together + * @return the filter + * @mongodb.driver.manual reference/operator/query/or $or + */ + public static Bson or(final Bson... filters) { + return or(asList(filters)); + } + + /** + * Creates a filter that matches all documents that do not match the passed in filter. + * Requires the field name to passed as part of the value passed in and lifts it to create a valid "$not" query: + * + *
+     *    not(eq("x", 1))
+     * 
+ * + * will generate a MongoDB query like: + *
+     *    {x : $not: {$eq : 1}}
+     * 
+ * + * @param filter the value + * @return the filter + * @mongodb.driver.manual reference/operator/query/not $not + */ + public static Bson not(final Bson filter) { + return new NotFilter(filter); + } + + /** + * Creates a filter that performs a logical NOR operation on all the specified filters. + * + * @param filters the list of values + * @return the filter + * @mongodb.driver.manual reference/operator/query/nor $nor + */ + public static Bson nor(final Bson... filters) { + return nor(asList(filters)); + } + + /** + * Creates a filter that performs a logical NOR operation on all the specified filters. + * + * @param filters the list of values + * @return the filter + * @mongodb.driver.manual reference/operator/query/nor $nor + */ + public static Bson nor(final Iterable filters) { + return new OrNorFilter(OrNorFilter.Operator.NOR, filters); + } + + /** + * Creates a filter that matches all documents that contain the given field. + * + * @param fieldName the field name + * @return the filter + * @mongodb.driver.manual reference/operator/query/exists $exists + */ + public static Bson exists(final String fieldName) { + return exists(fieldName, true); + } + + /** + * Creates a filter that matches all documents that either contain or do not contain the given field, depending on the value of the + * exists parameter. + * + * @param fieldName the field name + * @param exists true to check for existence, false to check for absence + * @return the filter + * @mongodb.driver.manual reference/operator/query/exists $exists + */ + + public static Bson exists(final String fieldName, final boolean exists) { + return new OperatorFilter<>("$exists", fieldName, BsonBoolean.valueOf(exists)); + } + + /** + * Creates a filter that matches all documents where the value of the field is of the specified BSON type. + * + * @param fieldName the field name + * @param type the BSON type + * @return the filter + * @mongodb.driver.manual reference/operator/query/type $type + */ + public static Bson type(final String fieldName, final BsonType type) { + return new OperatorFilter<>("$type", fieldName, new BsonInt32(type.getValue())); + } + + /** + * Creates a filter that matches all documents where the value of the field is of the specified BSON type. + * + * @param fieldName the field name + * @param type the string representation of the BSON type + * @return the filter + * @mongodb.driver.manual reference/operator/query/type $type + */ + public static Bson type(final String fieldName, final String type) { + return new OperatorFilter<>("$type", fieldName, new BsonString(type)); + } + + /** + * Creates a filter that matches all documents where the value of a field divided by a divisor has the specified remainder (i.e. perform + * a modulo operation to select documents). + * + * @param fieldName the field name + * @param divisor the modulus + * @param remainder the remainder + * @return the filter + * @mongodb.driver.manual reference/operator/query/mod $mod + */ + public static Bson mod(final String fieldName, final long divisor, final long remainder) { + return new OperatorFilter<>("$mod", fieldName, new BsonArray(asList(new BsonInt64(divisor), new BsonInt64(remainder)))); + } + + /** + * Creates a filter that matches all documents where the value of the field matches the given regular expression pattern. + * + * @param fieldName the field name + * @param pattern the pattern + * @return the filter + * @mongodb.driver.manual reference/operator/query/regex $regex + */ + public static Bson regex(final String fieldName, final String pattern) { + return regex(fieldName, pattern, null); + } + + /** + * Creates a filter that matches all documents where the value of the field matches the given regular expression pattern with the given + * options applied. + * + * @param fieldName the field name + * @param pattern the pattern + * @param options the options + * @return the filter + * @mongodb.driver.manual reference/operator/query/regex $regex + */ + public static Bson regex(final String fieldName, final String pattern, @Nullable final String options) { + notNull("pattern", pattern); + return new SimpleFilter(fieldName, new BsonRegularExpression(pattern, options)); + } + + /** + * Creates a filter that matches all documents where the value of the field matches the given regular expression pattern. + * + * @param fieldName the field name + * @param pattern the pattern + * @return the filter + * @mongodb.driver.manual reference/operator/query/regex $regex + */ + public static Bson regex(final String fieldName, final Pattern pattern) { + notNull("pattern", pattern); + return new SimpleEncodingFilter<>(fieldName, pattern); + } + + /** + * Creates a filter that matches all documents matching the given search term. + * You may use {@link Projections#metaTextScore(String)} to extract the relevance score assigned to each matched document. + *

+ * {@link Aggregates#search(SearchOperator, SearchOptions)} / {@link Aggregates#search(SearchCollector, SearchOptions)} + * is a more powerful full-text search alternative.

+ * + * @param search the search term + * @return the filter + * @mongodb.driver.manual reference/operator/query/text $text + */ + public static Bson text(final String search) { + notNull("search", search); + return text(search, new TextSearchOptions()); + } + + /** + * Creates a filter that matches all documents matching the given the search term with the given text search options. + * You may use {@link Projections#metaTextScore(String)} to extract the relevance score assigned to each matched document. + *

+ * {@link Aggregates#search(SearchOperator, SearchOptions)} / {@link Aggregates#search(SearchCollector, SearchOptions)} + * is a more powerful full-text search alternative.

+ * + * @param search the search term + * @param textSearchOptions the text search options to use + * @return the filter + * @mongodb.driver.manual reference/operator/query/text $text + * @since 3.2 + */ + public static Bson text(final String search, final TextSearchOptions textSearchOptions) { + notNull("search", search); + notNull("textSearchOptions", textSearchOptions); + return new TextFilter(search, textSearchOptions); + } + + /** + * Creates a filter that matches all documents for which the given expression is true. + * + * @param javaScriptExpression the JavaScript expression + * @return the filter + * @mongodb.driver.manual reference/operator/query/where $where + */ + public static Bson where(final String javaScriptExpression) { + notNull("javaScriptExpression", javaScriptExpression); + return new BsonDocument("$where", new BsonString(javaScriptExpression)); + } + + /** + * Allows the use of aggregation expressions within the query language. + * + * @param expression the aggregation expression + * @param the expression type + * @return the filter + * @since 3.6 + * @mongodb.server.release 3.6 + * @mongodb.driver.manual reference/operator/query/expr/ $expr + */ + public static Bson expr(final TExpression expression) { + return new SimpleEncodingFilter<>("$expr", expression); + } + + /** + * Creates a filter that matches all documents where the value of a field is an array that contains all the specified values. + * + * @param fieldName the field name + * @param values the list of values + * @param the value type + * @return the filter + * @mongodb.driver.manual reference/operator/query/all $all + */ + @SafeVarargs + @SuppressWarnings("varargs") + public static Bson all(final String fieldName, final TItem... values) { + return all(fieldName, asList(values)); + } + + /** + * Creates a filter that matches all documents where the value of a field is an array that contains all the specified values. + * + * @param fieldName the field name + * @param values the list of values + * @param the value type + * @return the filter + * @mongodb.driver.manual reference/operator/query/all $all + */ + public static Bson all(final String fieldName, final Iterable values) { + return new IterableOperatorFilter<>(fieldName, "$all", values); + } + + /** + * Creates a filter that matches all documents containing a field that is an array where at least one member of the array matches the + * given filter. + * + * @param fieldName the field name + * @param filter the filter to apply to each element + * @return the filter + * @mongodb.driver.manual reference/operator/query/elemMatch $elemMatch + */ + public static Bson elemMatch(final String fieldName, final Bson filter) { + return new Bson() { + @Override + public BsonDocument toBsonDocument(final Class documentClass, final CodecRegistry codecRegistry) { + return new BsonDocument(fieldName, new BsonDocument("$elemMatch", filter.toBsonDocument(documentClass, codecRegistry))); + } + }; + } + + /** + * Creates a filter that matches all documents where the value of a field is an array of the specified size. + * + * @param fieldName the field name + * @param size the size of the array + * @return the filter + * @mongodb.driver.manual reference/operator/query/size $size + */ + public static Bson size(final String fieldName, final int size) { + return new OperatorFilter<>("$size", fieldName, size); + } + + /** + * Creates a filter that matches all documents where all of the bit positions are clear in the field. + * + * @param fieldName the field name + * @param bitmask the bitmask + * @return the filter + * @mongodb.server.release 3.2 + * @mongodb.driver.manual reference/operator/query/bitsAllClear $bitsAllClear + * @since 3.2 + */ + public static Bson bitsAllClear(final String fieldName, final long bitmask) { + return new OperatorFilter<>("$bitsAllClear", fieldName, bitmask); + } + + /** + * Creates a filter that matches all documents where all of the bit positions are set in the field. + * + * @param fieldName the field name + * @param bitmask the bitmask + * @return the filter + * @mongodb.server.release 3.2 + * @mongodb.driver.manual reference/operator/query/bitsAllSet $bitsAllSet + * @since 3.2 + */ + public static Bson bitsAllSet(final String fieldName, final long bitmask) { + return new OperatorFilter<>("$bitsAllSet", fieldName, bitmask); + } + + /** + * Creates a filter that matches all documents where any of the bit positions are clear in the field. + * + * @param fieldName the field name + * @param bitmask the bitmask + * @return the filter + * @mongodb.server.release 3.2 + * @mongodb.driver.manual reference/operator/query/bitsAllClear $bitsAllClear + * @since 3.2 + */ + public static Bson bitsAnyClear(final String fieldName, final long bitmask) { + return new OperatorFilter<>("$bitsAnyClear", fieldName, bitmask); + } + + /** + * Creates a filter that matches all documents where any of the bit positions are set in the field. + * + * @param fieldName the field name + * @param bitmask the bitmask + * @return the filter + * @mongodb.server.release 3.2 + * @mongodb.driver.manual reference/operator/query/bitsAnySet $bitsAnySet + * @since 3.2 + */ + public static Bson bitsAnySet(final String fieldName, final long bitmask) { + return new OperatorFilter<>("$bitsAnySet", fieldName, bitmask); + } + + /** + * Creates a filter that matches all documents containing a field with geospatial data that exists entirely within the specified shape. + * + * @param fieldName the field name + * @param geometry the bounding GeoJSON geometry object + * @return the filter + * @since 3.1 + * @mongodb.driver.manual reference/operator/query/geoWithin/ $geoWithin + * @mongodb.server.release 2.4 + */ + public static Bson geoWithin(final String fieldName, final Geometry geometry) { + return new GeometryOperatorFilter<>("$geoWithin", fieldName, geometry); + } + + /** + * Creates a filter that matches all documents containing a field with geospatial data that exists entirely within the specified shape. + * + * @param fieldName the field name + * @param geometry the bounding GeoJSON geometry object + * @return the filter + * @since 3.1 + * @mongodb.driver.manual reference/operator/query/geoWithin/ $geoWithin + * @mongodb.server.release 2.4 + */ + public static Bson geoWithin(final String fieldName, final Bson geometry) { + return new GeometryOperatorFilter<>("$geoWithin", fieldName, geometry); + } + + /** + * Creates a filter that matches all documents containing a field with grid coordinates data that exist entirely within the specified + * box. + * + * @param fieldName the field name + * @param lowerLeftX the lower left x coordinate of the box + * @param lowerLeftY the lower left y coordinate of the box + * @param upperRightX the upper left x coordinate of the box + * @param upperRightY the upper left y coordinate of the box + * @return the filter + * @mongodb.driver.manual reference/operator/query/geoWithin/ $geoWithin + * @mongodb.driver.manual reference/operator/query/box/#op._S_box $box + * @mongodb.server.release 2.4 + * @since 3.1 + */ + public static Bson geoWithinBox(final String fieldName, final double lowerLeftX, final double lowerLeftY, final double upperRightX, + final double upperRightY) { + BsonDocument box = new BsonDocument("$box", + new BsonArray(asList(new BsonArray(asList(new BsonDouble(lowerLeftX), + new BsonDouble(lowerLeftY))), + new BsonArray(asList(new BsonDouble(upperRightX), + new BsonDouble(upperRightY)))))); + return new OperatorFilter<>("$geoWithin", fieldName, box); + } + + /** + * Creates a filter that matches all documents containing a field with grid coordinates data that exist entirely within the specified + * polygon. + * + * @param fieldName the field name + * @param points a list of pairs of x, y coordinates. Any extra dimensions are ignored + * @return the filter + * @mongodb.driver.manual reference/operator/query/geoWithin/ $geoWithin + * @mongodb.driver.manual reference/operator/query/polygon/#op._S_polygon $polygon + * @mongodb.server.release 2.4 + * @since 3.1 + */ + public static Bson geoWithinPolygon(final String fieldName, final List> points) { + BsonArray pointsArray = new BsonArray(points.size()); + for (List point : points) { + pointsArray.add(new BsonArray(asList(new BsonDouble(point.get(0)), new BsonDouble(point.get(1))))); + } + BsonDocument polygon = new BsonDocument("$polygon", pointsArray); + return new OperatorFilter<>("$geoWithin", fieldName, polygon); + } + + /** + * Creates a filter that matches all documents containing a field with grid coordinates data that exist entirely within the specified + * circle. + * + * @param fieldName the field name + * @param x the x coordinate of the circle + * @param y the y coordinate of the circle + * @param radius the radius of the circle, as measured in the units used by the coordinate system + * @return the filter + * @mongodb.driver.manual reference/operator/query/geoWithin/ $geoWithin + * @mongodb.driver.manual reference/operator/query/center/#op._S_center $center + * @mongodb.server.release 2.4 + * @since 3.1 + */ + public static Bson geoWithinCenter(final String fieldName, final double x, final double y, final double radius) { + BsonDocument center = new BsonDocument("$center", + new BsonArray(asList(new BsonArray(asList(new BsonDouble(x), + new BsonDouble(y))), + new BsonDouble(radius)))); + return new OperatorFilter<>("$geoWithin", fieldName, center); + } + + /** + * Creates a filter that matches all documents containing a field with geospatial data (GeoJSON or legacy coordinate pairs) that exist + * entirely within the specified circle, using spherical geometry. If using longitude and latitude, specify longitude first. + * + * @param fieldName the field name + * @param x the x coordinate of the circle + * @param y the y coordinate of the circle + * @param radius the radius of the circle, in radians + * @return the filter + * @mongodb.driver.manual reference/operator/query/geoWithin/ $geoWithin + * @mongodb.driver.manual reference/operator/query/centerSphere/#op._S_centerSphere $centerSphere + * @mongodb.server.release 2.4 + * @since 3.1 + */ + public static Bson geoWithinCenterSphere(final String fieldName, final double x, final double y, final double radius) { + BsonDocument centerSphere = new BsonDocument("$centerSphere", + new BsonArray(asList(new BsonArray(asList(new BsonDouble(x), + new BsonDouble(y))), + new BsonDouble(radius)))); + return new OperatorFilter<>("$geoWithin", fieldName, centerSphere); + } + + /** + * Creates a filter that matches all documents containing a field with geospatial data that intersects with the specified shape. + * + * @param fieldName the field name + * @param geometry the bounding GeoJSON geometry object + * @return the filter + * @since 3.1 + * @mongodb.driver.manual reference/operator/query/geoIntersects/ $geoIntersects + * @mongodb.server.release 2.4 + */ + public static Bson geoIntersects(final String fieldName, final Bson geometry) { + return new GeometryOperatorFilter<>("$geoIntersects", fieldName, geometry); + } + + /** + * Creates a filter that matches all documents containing a field with geospatial data that intersects with the specified shape. + * + * @param fieldName the field name + * @param geometry the bounding GeoJSON geometry object + * @return the filter + * @since 3.1 + * @mongodb.driver.manual reference/operator/query/geoIntersects/ $geoIntersects + * @mongodb.server.release 2.4 + */ + public static Bson geoIntersects(final String fieldName, final Geometry geometry) { + return new GeometryOperatorFilter<>("$geoIntersects", fieldName, geometry); + } + + /** + * Creates a filter that matches all documents containing a field with geospatial data that is near the specified GeoJSON point. + * + * @param fieldName the field name + * @param geometry the bounding GeoJSON geometry object + * @param maxDistance the maximum distance from the point, in meters. It may be null. + * @param minDistance the minimum distance from the point, in meters. It may be null. + * @return the filter + * @since 3.1 + * @mongodb.driver.manual reference/operator/query/near/ $near + * @mongodb.server.release 2.4 + */ + public static Bson near(final String fieldName, final Point geometry, @Nullable final Double maxDistance, + @Nullable final Double minDistance) { + return new GeometryOperatorFilter<>("$near", fieldName, geometry, maxDistance, minDistance); + } + + /** + * Creates a filter that matches all documents containing a field with geospatial data that is near the specified GeoJSON point. + * + * @param fieldName the field name + * @param geometry the bounding GeoJSON geometry object + * @param maxDistance the maximum distance from the point, in meters. It may be null. + * @param minDistance the minimum distance from the point, in meters. It may be null. + * @return the filter + * @since 3.1 + * @mongodb.driver.manual reference/operator/query/near/ $near + * @mongodb.server.release 2.4 + */ + public static Bson near(final String fieldName, final Bson geometry, @Nullable final Double maxDistance, + @Nullable final Double minDistance) { + return new GeometryOperatorFilter<>("$near", fieldName, geometry, maxDistance, minDistance); + } + + /** + * Creates a filter that matches all documents containing a field with geospatial data that is near the specified point. + * + * @param fieldName the field name + * @param x the x coordinate + * @param y the y coordinate + * @param maxDistance the maximum distance from the point, in radians. It may be null. + * @param minDistance the minimum distance from the point, in radians. It may be null. + * @return the filter + * @since 3.1 + * @mongodb.driver.manual reference/operator/query/near/ $near + * @mongodb.server.release 2.4 + */ + public static Bson near(final String fieldName, final double x, final double y, @Nullable final Double maxDistance, + @Nullable final Double minDistance) { + return createNearFilterDocument(fieldName, x, y, maxDistance, minDistance, "$near"); + } + + /** + * Creates a filter that matches all documents containing a field with geospatial data that is near the specified GeoJSON point using + * spherical geometry. + * + * @param fieldName the field name + * @param geometry the bounding GeoJSON geometry object + * @param maxDistance the maximum distance from the point, in meters. It may be null. + * @param minDistance the minimum distance from the point, in meters. It may be null. + * @return the filter + * @since 3.1 + * @mongodb.driver.manual reference/operator/query/near/ $near + * @mongodb.server.release 2.4 + */ + public static Bson nearSphere(final String fieldName, final Point geometry, @Nullable final Double maxDistance, + @Nullable final Double minDistance) { + return new GeometryOperatorFilter<>("$nearSphere", fieldName, geometry, maxDistance, minDistance); + } + + /** + * Creates a filter that matches all documents containing a field with geospatial data that is near the specified GeoJSON point using + * spherical geometry. + * + * @param fieldName the field name + * @param geometry the bounding GeoJSON geometry object + * @param maxDistance the maximum distance from the point, in meters. It may be null. + * @param minDistance the minimum distance from the point, in meters. It may be null. + * @return the filter + * @since 3.1 + * @mongodb.driver.manual reference/operator/query/near/ $near + * @mongodb.server.release 2.4 + */ + public static Bson nearSphere(final String fieldName, final Bson geometry, @Nullable final Double maxDistance, + @Nullable final Double minDistance) { + return new GeometryOperatorFilter<>("$nearSphere", fieldName, geometry, maxDistance, minDistance); + } + + /** + * Creates a filter that matches all documents containing a field with geospatial data that is near the specified point using + * spherical geometry. + * + * @param fieldName the field name + * @param x the x coordinate + * @param y the y coordinate + * @param maxDistance the maximum distance from the point, in radians. It may be null. + * @param minDistance the minimum distance from the point, in radians. It may be null. + * @return the filter + * @since 3.1 + * @mongodb.driver.manual reference/operator/query/near/ $near + * @mongodb.server.release 2.4 + */ + public static Bson nearSphere(final String fieldName, final double x, final double y, @Nullable final Double maxDistance, + @Nullable final Double minDistance) { + return createNearFilterDocument(fieldName, x, y, maxDistance, minDistance, "$nearSphere"); + } + + /** + * Creates a filter that matches all documents that validate against the given JSON schema document. + * + * @param schema the JSON schema to validate against + * @return the filter + * @since 3.6 + * @mongodb.server.release 3.6 + * @mongodb.driver.manual reference/operator/query/jsonSchema/ $jsonSchema + */ + public static Bson jsonSchema(final Bson schema) { + return new SimpleEncodingFilter<>("$jsonSchema", schema); + } + + /** + * Creates an empty filter that will match all documents. + * + * @return the filter + * @since 4.2 + */ + public static Bson empty() { + return new BsonDocument(); + } + + + private static Bson createNearFilterDocument(final String fieldName, final double x, final double y, @Nullable final Double maxDistance, + @Nullable final Double minDistance, final String operator) { + BsonDocument nearFilter = new BsonDocument(operator, new BsonArray(asList(new BsonDouble(x), new BsonDouble(y)))); + if (maxDistance != null) { + nearFilter.append("$maxDistance", new BsonDouble(maxDistance)); + } + if (minDistance != null) { + nearFilter.append("$minDistance", new BsonDouble(minDistance)); + } + return new BsonDocument(fieldName, nearFilter); + } + + private static String operatorFilterToString(final String fieldName, final String operator, final Object value) { + return "Operator Filter{" + + "fieldName='" + fieldName + '\'' + + ", operator='" + operator + '\'' + + ", value=" + value + + '}'; + } + + private static final class SimpleFilter implements Bson { + private final String fieldName; + private final BsonValue value; + + private SimpleFilter(final String fieldName, final BsonValue value) { + this.fieldName = notNull("fieldName", fieldName); + this.value = notNull("value", value); + } + + @Override + public BsonDocument toBsonDocument(final Class documentClass, final CodecRegistry codecRegistry) { + return new BsonDocument(fieldName, value); + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + SimpleFilter that = (SimpleFilter) o; + + if (!fieldName.equals(that.fieldName)) { + return false; + } + return value.equals(that.value); + } + + @Override + public int hashCode() { + int result = fieldName.hashCode(); + result = 31 * result + value.hashCode(); + return result; + } + + @Override + public String toString() { + return operatorFilterToString(fieldName, "$eq", value); + } + } + + private static final class OperatorFilter implements Bson { + private final String operatorName; + private final String fieldName; + private final TItem value; + + OperatorFilter(final String operatorName, final String fieldName, @Nullable final TItem value) { + this.operatorName = notNull("operatorName", operatorName); + this.fieldName = notNull("fieldName", fieldName); + this.value = value; + } + + @Override + public BsonDocument toBsonDocument(final Class documentClass, final CodecRegistry codecRegistry) { + BsonDocumentWriter writer = new BsonDocumentWriter(new BsonDocument()); + + writer.writeStartDocument(); + writer.writeName(fieldName); + writer.writeStartDocument(); + writer.writeName(operatorName); + encodeValue(writer, value, codecRegistry); + writer.writeEndDocument(); + writer.writeEndDocument(); + + return writer.getDocument(); + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + OperatorFilter that = (OperatorFilter) o; + + if (!operatorName.equals(that.operatorName)) { + return false; + } + if (!fieldName.equals(that.fieldName)) { + return false; + } + return Objects.equals(value, that.value); + } + + @Override + public int hashCode() { + int result = operatorName.hashCode(); + result = 31 * result + fieldName.hashCode(); + result = 31 * result + (value != null ? value.hashCode() : 0); + return result; + } + + @Override + public String toString() { + return operatorFilterToString(fieldName, operatorName, value); + } + } + + private static class AndFilter implements Bson { + private final Iterable filters; + + AndFilter(final Iterable filters) { + this.filters = notNull("filters", filters); + } + + @Override + public BsonDocument toBsonDocument(final Class documentClass, final CodecRegistry codecRegistry) { + BsonArray clauses = new BsonArray(); + for (Bson filter : filters) { + clauses.add(filter.toBsonDocument(documentClass, codecRegistry)); + } + + return new BsonDocument("$and", clauses); + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + AndFilter andFilter = (AndFilter) o; + + return filters.equals(andFilter.filters); + } + + @Override + public int hashCode() { + return filters.hashCode(); + } + + @Override + public String toString() { + return "And Filter{" + + "filters=" + filters + + '}'; + } + } + + private static class OrNorFilter implements Bson { + private enum Operator { + OR("$or", "Or"), + NOR("$nor", "Nor"); + + private final String name; + private final String toStringName; + + Operator(final String name, final String toStringName) { + this.name = name; + this.toStringName = toStringName; + } + } + + private final Operator operator; + private final Iterable filters; + + OrNorFilter(final Operator operator, final Iterable filters) { + this.operator = notNull("operator", operator); + this.filters = notNull("filters", filters); + } + + @Override + public BsonDocument toBsonDocument(final Class documentClass, final CodecRegistry codecRegistry) { + BsonDocument orRenderable = new BsonDocument(); + + BsonArray filtersArray = new BsonArray(); + for (Bson filter : filters) { + filtersArray.add(filter.toBsonDocument(documentClass, codecRegistry)); + } + + orRenderable.put(operator.name, filtersArray); + + return orRenderable; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + OrNorFilter that = (OrNorFilter) o; + + if (operator != that.operator) { + return false; + } + return filters.equals(that.filters); + } + + @Override + public int hashCode() { + int result = operator.hashCode(); + result = 31 * result + filters.hashCode(); + return result; + } + + @Override + public String toString() { + return operator.toStringName + " Filter{" + + "filters=" + filters + + '}'; + } + } + + private static class IterableOperatorFilter implements Bson { + private final String fieldName; + private final String operatorName; + private final Iterable values; + + IterableOperatorFilter(final String fieldName, final String operatorName, final Iterable values) { + this.fieldName = notNull("fieldName", fieldName); + this.operatorName = notNull("operatorName", operatorName); + this.values = notNull("values", values); + } + + @Override + public BsonDocument toBsonDocument(final Class documentClass, final CodecRegistry codecRegistry) { + BsonDocumentWriter writer = new BsonDocumentWriter(new BsonDocument()); + + writer.writeStartDocument(); + writer.writeName(fieldName); + + writer.writeStartDocument(); + writer.writeName(operatorName); + writer.writeStartArray(); + for (TItem value : values) { + encodeValue(writer, value, codecRegistry); + } + writer.writeEndArray(); + writer.writeEndDocument(); + + writer.writeEndDocument(); + + return writer.getDocument(); + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + IterableOperatorFilter that = (IterableOperatorFilter) o; + + if (!fieldName.equals(that.fieldName)) { + return false; + } + if (!operatorName.equals(that.operatorName)) { + return false; + } + return values.equals(that.values); + } + + @Override + public int hashCode() { + int result = fieldName.hashCode(); + result = 31 * result + operatorName.hashCode(); + result = 31 * result + values.hashCode(); + return result; + } + + @Override + public String toString() { + return operatorFilterToString(fieldName, operatorName, values); + } + } + + private static class SimpleEncodingFilter implements Bson { + private final String fieldName; + private final TItem value; + + SimpleEncodingFilter(final String fieldName, @Nullable final TItem value) { + this.fieldName = notNull("fieldName", fieldName); + this.value = value; + } + + @Override + public BsonDocument toBsonDocument(final Class documentClass, final CodecRegistry codecRegistry) { + BsonDocumentWriter writer = new BsonDocumentWriter(new BsonDocument()); + + writer.writeStartDocument(); + writer.writeName(fieldName); + encodeValue(writer, value, codecRegistry); + writer.writeEndDocument(); + + return writer.getDocument(); + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + SimpleEncodingFilter that = (SimpleEncodingFilter) o; + + if (!fieldName.equals(that.fieldName)) { + return false; + } + return Objects.equals(value, that.value); + } + + @Override + public int hashCode() { + int result = fieldName.hashCode(); + result = 31 * result + (value != null ? value.hashCode() : 0); + return result; + } + + @Override + public String toString() { + return "Filter{" + + "fieldName='" + fieldName + '\'' + + ", value=" + value + + '}'; + } + } + + private static class NotFilter implements Bson { + private static final Set DBREF_KEYS = unmodifiableSet(new HashSet<>(asList("$ref", "$id"))); + private static final Set DBREF_KEYS_WITH_DB = unmodifiableSet(new HashSet<>(asList("$ref", "$id", "$db"))); + private final Bson filter; + + NotFilter(final Bson filter) { + this.filter = notNull("filter", filter); + } + + @Override + public BsonDocument toBsonDocument(final Class documentClass, final CodecRegistry codecRegistry) { + BsonDocument filterDocument = filter.toBsonDocument(documentClass, codecRegistry); + if (filterDocument.size() == 1) { + Map.Entry entry = filterDocument.entrySet().iterator().next(); + return createFilter(entry.getKey(), entry.getValue()); + } else { + BsonArray values = new BsonArray(filterDocument.size()); + for (Map.Entry docs : filterDocument.entrySet()) { + values.add(new BsonDocument(docs.getKey(), docs.getValue())); + } + return createFilter("$and", values); + } + } + + private boolean containsOperator(final BsonDocument value) { + Set keys = value.keySet(); + if (keys.equals(DBREF_KEYS) || keys.equals(DBREF_KEYS_WITH_DB)) { + return false; + } + + for (String key : keys) { + if (key.startsWith("$")) { + return true; + } + } + + return false; + } + + private BsonDocument createFilter(final String fieldName, final BsonValue value) { + if (fieldName.startsWith("$")) { + return new BsonDocument("$not", new BsonDocument(fieldName, value)); + } else if ((value.isDocument() && containsOperator(value.asDocument())) || value.isRegularExpression()) { + return new BsonDocument(fieldName, new BsonDocument("$not", value)); + } + return new BsonDocument(fieldName, new BsonDocument("$not", new BsonDocument("$eq", value))); + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + NotFilter notFilter = (NotFilter) o; + + return filter.equals(notFilter.filter); + } + + @Override + public int hashCode() { + return filter.hashCode(); + } + + @Override + public String toString() { + return "Not Filter{" + + "filter=" + filter + + '}'; + } + } + + private static class GeometryOperatorFilter implements Bson { + private final String operatorName; + private final String fieldName; + private final TItem geometry; + private final Double maxDistance; + private final Double minDistance; + + GeometryOperatorFilter(final String operatorName, final String fieldName, final TItem geometry) { + this(operatorName, fieldName, geometry, null, null); + } + + GeometryOperatorFilter(final String operatorName, final String fieldName, final TItem geometry, + @Nullable final Double maxDistance, @Nullable final Double minDistance) { + this.operatorName = operatorName; + this.fieldName = notNull("fieldName", fieldName); + this.geometry = notNull("geometry", geometry); + this.maxDistance = maxDistance; + this.minDistance = minDistance; + } + + @Override + public BsonDocument toBsonDocument(final Class documentClass, final CodecRegistry codecRegistry) { + BsonDocumentWriter writer = new BsonDocumentWriter(new BsonDocument()); + writer.writeStartDocument(); + writer.writeName(fieldName); + writer.writeStartDocument(); + writer.writeName(operatorName); + writer.writeStartDocument(); + writer.writeName("$geometry"); + encodeValue(writer, geometry, codecRegistry); + if (maxDistance != null) { + writer.writeDouble("$maxDistance", maxDistance); + } + if (minDistance != null) { + writer.writeDouble("$minDistance", minDistance); + } + writer.writeEndDocument(); + writer.writeEndDocument(); + writer.writeEndDocument(); + + return writer.getDocument(); + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + GeometryOperatorFilter that = (GeometryOperatorFilter) o; + + if (!Objects.equals(operatorName, that.operatorName)) { + return false; + } + if (!fieldName.equals(that.fieldName)) { + return false; + } + if (!geometry.equals(that.geometry)) { + return false; + } + if (!Objects.equals(maxDistance, that.maxDistance)) { + return false; + } + return Objects.equals(minDistance, that.minDistance); + } + + @Override + public int hashCode() { + int result = operatorName != null ? operatorName.hashCode() : 0; + result = 31 * result + fieldName.hashCode(); + result = 31 * result + geometry.hashCode(); + result = 31 * result + (maxDistance != null ? maxDistance.hashCode() : 0); + result = 31 * result + (minDistance != null ? minDistance.hashCode() : 0); + return result; + } + + @Override + public String toString() { + return "Geometry Operator Filter{" + + "fieldName='" + fieldName + '\'' + + ", operator='" + operatorName + '\'' + + ", geometry=" + geometry + + ", maxDistance=" + maxDistance + + ", minDistance=" + minDistance + + '}'; + } + } + + private static class TextFilter implements Bson { + private final String search; + private final TextSearchOptions textSearchOptions; + + TextFilter(final String search, final TextSearchOptions textSearchOptions) { + this.search = search; + this.textSearchOptions = textSearchOptions; + } + + @Override + public BsonDocument toBsonDocument(final Class documentClass, final CodecRegistry codecRegistry) { + BsonDocument searchDocument = new BsonDocument("$search", new BsonString(search)); + + String language = textSearchOptions.getLanguage(); + if (language != null) { + searchDocument.put("$language", new BsonString(language)); + } + + Boolean caseSensitive = textSearchOptions.getCaseSensitive(); + if (caseSensitive != null) { + searchDocument.put("$caseSensitive", BsonBoolean.valueOf(caseSensitive)); + } + + Boolean diacriticSensitive = textSearchOptions.getDiacriticSensitive(); + if (diacriticSensitive != null) { + searchDocument.put("$diacriticSensitive", BsonBoolean.valueOf(diacriticSensitive)); + } + return new BsonDocument("$text", searchDocument); + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + TextFilter that = (TextFilter) o; + + if (!Objects.equals(search, that.search)) { + return false; + } + return Objects.equals(textSearchOptions, that.textSearchOptions); + } + + @Override + public int hashCode() { + int result = search != null ? search.hashCode() : 0; + result = 31 * result + (textSearchOptions != null ? textSearchOptions.hashCode() : 0); + return result; + } + + @Override + public String toString() { + return "Text Filter{" + + "search='" + search + '\'' + + ", textSearchOptions=" + textSearchOptions + + '}'; + } + } + +} diff --git a/driver-core/src/main/com/mongodb/client/model/FindOneAndDeleteOptions.java b/driver-core/src/main/com/mongodb/client/model/FindOneAndDeleteOptions.java new file mode 100644 index 00000000000..3b25cb69692 --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/FindOneAndDeleteOptions.java @@ -0,0 +1,271 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model; + +import com.mongodb.lang.Nullable; +import org.bson.BsonString; +import org.bson.BsonValue; +import org.bson.conversions.Bson; + +import java.util.concurrent.TimeUnit; + +import static com.mongodb.assertions.Assertions.notNull; +import static java.util.concurrent.TimeUnit.MILLISECONDS; + +/** + * The options to apply to an operation that atomically finds a document and deletes it. + * + * @since 3.0 + * @mongodb.driver.manual reference/command/findAndModify/ + */ +public class FindOneAndDeleteOptions { + private Bson projection; + private Bson sort; + private long maxTimeMS; + private Collation collation; + private Bson hint; + private String hintString; + private BsonValue comment; + private Bson variables; + + /** + * Gets a document describing the fields to return for all matching documents. + * + * @return the project document, which may be null + * @mongodb.driver.manual tutorial/project-fields-from-query-results Projection + */ + @Nullable + public Bson getProjection() { + return projection; + } + + /** + * Sets a document describing the fields to return for all matching documents. + * + * @param projection the project document, which may be null. + * @return this + * @mongodb.driver.manual tutorial/project-fields-from-query-results Projection + * @see Projections + */ + public FindOneAndDeleteOptions projection(@Nullable final Bson projection) { + this.projection = projection; + return this; + } + + /** + * Gets the sort criteria to apply to the query. The default is null, which means that the documents will be returned in an undefined + * order. + * + * @return a document describing the sort criteria + * @mongodb.driver.manual reference/method/cursor.sort/ Sort + */ + @Nullable + public Bson getSort() { + return sort; + } + + /** + * Sets the sort criteria to apply to the query. + * + * @param sort the sort criteria, which may be null. + * @return this + * @mongodb.driver.manual reference/method/cursor.sort/ Sort + */ + public FindOneAndDeleteOptions sort(@Nullable final Bson sort) { + this.sort = sort; + return this; + } + + /** + * Sets the maximum execution time on the server for this operation. + * + * @param maxTime the max time + * @param timeUnit the time unit, which may not be null + * @return this + */ + public FindOneAndDeleteOptions maxTime(final long maxTime, final TimeUnit timeUnit) { + notNull("timeUnit", timeUnit); + this.maxTimeMS = MILLISECONDS.convert(maxTime, timeUnit); + return this; + } + + /** + * Gets the maximum execution time for the find one and delete operation. + * + * @param timeUnit the time unit for the result + * @return the max time + */ + public long getMaxTime(final TimeUnit timeUnit) { + return timeUnit.convert(maxTimeMS, MILLISECONDS); + } + + /** + * Returns the collation options + * + * @return the collation options + * @since 3.4 + * @mongodb.server.release 3.4 + */ + @Nullable + public Collation getCollation() { + return collation; + } + + /** + * Sets the collation options + * + *

A null value represents the server default.

+ * @param collation the collation options to use + * @return this + * @since 3.4 + * @mongodb.server.release 3.4 + */ + public FindOneAndDeleteOptions collation(@Nullable final Collation collation) { + this.collation = collation; + return this; + } + + /** + * Gets the hint to apply. + * + * @return the hint, which should describe an existing index + * @since 4.1 + * @mongodb.server.release 4.4 + */ + @Nullable + public Bson getHint() { + return hint; + } + + /** + * Gets the hint string to apply. + * + * @return the hint string, which should be the name of an existing index + * @since 4.1 + * @mongodb.server.release 4.4 + */ + @Nullable + public String getHintString() { + return hintString; + } + + /** + * Sets the hint to apply. + * + * @param hint a document describing the index which should be used for this operation. + * @return this + * @since 4.1 + * @mongodb.server.release 4.4 + */ + public FindOneAndDeleteOptions hint(@Nullable final Bson hint) { + this.hint = hint; + return this; + } + + /** + * Sets the hint to apply. + * + *

Note: If {@link FindOneAndDeleteOptions#hint(Bson)} is set that will be used instead of any hint string.

+ * + * @param hint the name of the index which should be used for the operation + * @return this + * @since 4.1 + * @mongodb.server.release 4.4 + */ + public FindOneAndDeleteOptions hintString(@Nullable final String hint) { + this.hintString = hint; + return this; + } + + + /** + * @return the comment for this operation. A null value means no comment is set. + * @since 4.6 + * @mongodb.server.release 4.4 + */ + @Nullable + public BsonValue getComment() { + return comment; + } + + /** + * Sets the comment for this operation. A null value means no comment is set. + * + * @param comment the comment + * @return this + * @since 4.6 + * @mongodb.server.release 4.4 + */ + public FindOneAndDeleteOptions comment(@Nullable final String comment) { + this.comment = comment != null ? new BsonString(comment) : null; + return this; + } + + /** + * Sets the comment for this operation. A null value means no comment is set. + * + * @param comment the comment + * @return this + * @since 4.6 + * @mongodb.server.release 4.4 + */ + public FindOneAndDeleteOptions comment(@Nullable final BsonValue comment) { + this.comment = comment; + return this; + } + + /** + * Add top-level variables to the operation + * + * @return the top level variables if set or null. + * @mongodb.server.release 5.0 + * @since 4.6 + */ + @Nullable + public Bson getLet() { + return variables; + } + + /** + * Add top-level variables for the operation + * + *

Allows for improved command readability by separating the variables from the query text. + * + * @param variables for the operation or null + * @return this + * @mongodb.server.release 5.0 + * @since 4.6 + */ + public FindOneAndDeleteOptions let(final Bson variables) { + this.variables = variables; + return this; + } + + @Override + public String toString() { + return "FindOneAndDeleteOptions{" + + "projection=" + projection + + ", sort=" + sort + + ", maxTimeMS=" + maxTimeMS + + ", collation=" + collation + + ", hint=" + hint + + ", hintString='" + hintString + '\'' + + ", comment=" + comment + + ", let=" + variables + + '}'; + } +} diff --git a/driver-core/src/main/com/mongodb/client/model/FindOneAndReplaceOptions.java b/driver-core/src/main/com/mongodb/client/model/FindOneAndReplaceOptions.java new file mode 100644 index 00000000000..fe17d4f24bd --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/FindOneAndReplaceOptions.java @@ -0,0 +1,337 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model; + +import com.mongodb.lang.Nullable; +import org.bson.BsonString; +import org.bson.BsonValue; +import org.bson.conversions.Bson; + +import java.util.concurrent.TimeUnit; + +import static com.mongodb.assertions.Assertions.notNull; +import static java.util.concurrent.TimeUnit.MILLISECONDS; + +/** + * The options to apply to an operation that atomically finds a document and replaces it. + * + * @mongodb.driver.manual reference/command/findAndModify/ + * @since 3.0 + */ +public class FindOneAndReplaceOptions { + private Bson projection; + private Bson sort; + private boolean upsert; + private ReturnDocument returnDocument = ReturnDocument.BEFORE; + private long maxTimeMS; + private Boolean bypassDocumentValidation; + private Collation collation; + private Bson hint; + private String hintString; + private BsonValue comment; + private Bson variables; + + /** + * Gets a document describing the fields to return for all matching documents. + * + * @return the project document, which may be null + * @mongodb.driver.manual tutorial/project-fields-from-query-results Projection + */ + @Nullable + public Bson getProjection() { + return projection; + } + + /** + * Sets a document describing the fields to return for all matching documents. + * + * @param projection the project document, which may be null. + * @return this + * @mongodb.driver.manual tutorial/project-fields-from-query-results Projection + * @see Projections + */ + public FindOneAndReplaceOptions projection(@Nullable final Bson projection) { + this.projection = projection; + return this; + } + + /** + * Gets the sort criteria to apply to the query. The default is null, which means that the documents will be returned in an undefined + * order. + * + * @return a document describing the sort criteria + * @mongodb.driver.manual reference/method/cursor.sort/ Sort + */ + @Nullable + public Bson getSort() { + return sort; + } + + /** + * Sets the sort criteria to apply to the query. + * + * @param sort the sort criteria, which may be null. + * @return this + * @mongodb.driver.manual reference/method/cursor.sort/ Sort + */ + public FindOneAndReplaceOptions sort(@Nullable final Bson sort) { + this.sort = sort; + return this; + } + + /** + * Returns true if a new document should be inserted if there are no matches to the query filter. The default is false. + * + * @return true if a new document should be inserted if there are no matches to the query filter + */ + public boolean isUpsert() { + return upsert; + } + + /** + * Set to true if a new document should be inserted if there are no matches to the query filter. + * + * @param upsert true if a new document should be inserted if there are no matches to the query filter + * @return this + */ + public FindOneAndReplaceOptions upsert(final boolean upsert) { + this.upsert = upsert; + return this; + } + + /** + * Gets the {@link ReturnDocument} value indicating whether to return the document before it was replaced or after + * + * @return {@link ReturnDocument#BEFORE} if returning the document before it was replaced otherwise return {@link ReturnDocument#AFTER} + */ + public ReturnDocument getReturnDocument() { + return returnDocument; + } + + /** + * Set whether to return the document before it was replaced or after + * + * @param returnDocument set whether to return the document before it was replaced or after + * @return this + */ + public FindOneAndReplaceOptions returnDocument(final ReturnDocument returnDocument) { + this.returnDocument = notNull("returnDocument", returnDocument); + return this; + } + + /** + * Sets the maximum execution time on the server for this operation. + * + * @param maxTime the max time + * @param timeUnit the time unit, which may not be null + * @return this + */ + public FindOneAndReplaceOptions maxTime(final long maxTime, final TimeUnit timeUnit) { + notNull("timeUnit", timeUnit); + this.maxTimeMS = MILLISECONDS.convert(maxTime, timeUnit); + return this; + } + + /** + * Gets the maximum execution time for the find one and replace operation. + * + * @param timeUnit the time unit for the result + * @return the max time + */ + public long getMaxTime(final TimeUnit timeUnit) { + return timeUnit.convert(maxTimeMS, MILLISECONDS); + } + + /** + * Gets the bypass document level validation flag + * + * @return the bypass document level validation flag + * @since 3.2 + * @mongodb.server.release 3.2 + */ + @Nullable + public Boolean getBypassDocumentValidation() { + return bypassDocumentValidation; + } + + /** + * Sets the bypass document level validation flag. + * + * @param bypassDocumentValidation If true, allows the write to opt-out of document level validation. + * @return this + * @since 3.2 + * @mongodb.server.release 3.2 + */ + public FindOneAndReplaceOptions bypassDocumentValidation(@Nullable final Boolean bypassDocumentValidation) { + this.bypassDocumentValidation = bypassDocumentValidation; + return this; + } + + /** + * Returns the collation options + * + * @return the collation options + * @since 3.4 + * @mongodb.server.release 3.4 + */ + @Nullable + public Collation getCollation() { + return collation; + } + + /** + * Sets the collation options + * + *

A null value represents the server default.

+ * @param collation the collation options to use + * @return this + * @since 3.4 + * @mongodb.server.release 3.4 + */ + public FindOneAndReplaceOptions collation(@Nullable final Collation collation) { + this.collation = collation; + return this; + } + + /** + * Returns the hint for which index to use. The default is not to set a hint. + * + * @return the hint + * @since 4.1 + */ + @Nullable + public Bson getHint() { + return hint; + } + + /** + * Sets the hint for which index to use. A null value means no hint is set. + * + * @param hint the hint + * @return this + * @since 4.1 + */ + public FindOneAndReplaceOptions hint(@Nullable final Bson hint) { + this.hint = hint; + return this; + } + + /** + * Gets the hint string to apply. + * + * @return the hint string, which should be the name of an existing index + * @since 4.1 + */ + @Nullable + public String getHintString() { + return hintString; + } + + /** + * Sets the hint to apply. + * + * @param hint the name of the index which should be used for the operation + * @return this + * @since 4.1 + */ + public FindOneAndReplaceOptions hintString(@Nullable final String hint) { + this.hintString = hint; + return this; + } + + + /** + * @return the comment for this operation. A null value means no comment is set. + * @since 4.6 + * @mongodb.server.release 4.4 + */ + @Nullable + public BsonValue getComment() { + return comment; + } + + /** + * Sets the comment for this operation. A null value means no comment is set. + * + * @param comment the comment + * @return this + * @since 4.6 + * @mongodb.server.release 4.4 + */ + public FindOneAndReplaceOptions comment(@Nullable final String comment) { + this.comment = comment != null ? new BsonString(comment) : null; + return this; + } + + /** + * Sets the comment for this operation. A null value means no comment is set. + * + * @param comment the comment + * @return this + * @since 4.6 + * @mongodb.server.release 4.4 + */ + public FindOneAndReplaceOptions comment(@Nullable final BsonValue comment) { + this.comment = comment; + return this; + } + + /** + * Add top-level variables to the operation + * + * @return the top level variables if set or null. + * @mongodb.server.release 5.0 + * @since 4.6 + */ + @Nullable + public Bson getLet() { + return variables; + } + + /** + * Add top-level variables for the operation + * + *

Allows for improved command readability by separating the variables from the query text. + * + * @param variables for the operation or null + * @return this + * @mongodb.server.release 5.0 + * @since 4.6 + */ + public FindOneAndReplaceOptions let(final Bson variables) { + this.variables = variables; + return this; + } + + @Override + public String toString() { + return "FindOneAndReplaceOptions{" + + "projection=" + projection + + ", sort=" + sort + + ", upsert=" + upsert + + ", returnDocument=" + returnDocument + + ", maxTimeMS=" + maxTimeMS + + ", bypassDocumentValidation=" + bypassDocumentValidation + + ", collation=" + collation + + ", hint=" + hint + + ", hintString" + hintString + + ", comment=" + comment + + ", let=" + variables + + '}'; + } +} + diff --git a/driver-core/src/main/com/mongodb/client/model/FindOneAndUpdateOptions.java b/driver-core/src/main/com/mongodb/client/model/FindOneAndUpdateOptions.java new file mode 100644 index 00000000000..a850bdcc0f2 --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/FindOneAndUpdateOptions.java @@ -0,0 +1,363 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model; + +import com.mongodb.lang.Nullable; +import org.bson.BsonString; +import org.bson.BsonValue; +import org.bson.conversions.Bson; + +import java.util.List; +import java.util.concurrent.TimeUnit; + +import static com.mongodb.assertions.Assertions.notNull; +import static java.util.concurrent.TimeUnit.MILLISECONDS; + +/** + * The options to apply to an operation that atomically finds a document and updates it. + * + * @since 3.0 + * @mongodb.driver.manual reference/command/findAndModify/ + */ +public class FindOneAndUpdateOptions { + private Bson projection; + private Bson sort; + private boolean upsert; + private ReturnDocument returnDocument = ReturnDocument.BEFORE; + private long maxTimeMS; + private Boolean bypassDocumentValidation; + private Collation collation; + private List arrayFilters; + private Bson hint; + private String hintString; + private BsonValue comment; + private Bson variables; + + /** + * Gets a document describing the fields to return for all matching documents. + * + * @return the project document, which may be null + * @mongodb.driver.manual tutorial/project-fields-from-query-results Projection + */ + @Nullable + public Bson getProjection() { + return projection; + } + + /** + * Sets a document describing the fields to return for all matching documents. + * + * @param projection the project document, which may be null. + * @return this + * @mongodb.driver.manual tutorial/project-fields-from-query-results Projection + * @see Projections + */ + public FindOneAndUpdateOptions projection(@Nullable final Bson projection) { + this.projection = projection; + return this; + } + + /** + * Gets the sort criteria to apply to the query. The default is null, which means that the documents will be returned in an undefined + * order. + * + * @return a document describing the sort criteria + * @mongodb.driver.manual reference/method/cursor.sort/ Sort + */ + @Nullable + public Bson getSort() { + return sort; + } + + /** + * Sets the sort criteria to apply to the query. + * + * @param sort the sort criteria, which may be null. + * @return this + * @mongodb.driver.manual reference/method/cursor.sort/ Sort + */ + public FindOneAndUpdateOptions sort(@Nullable final Bson sort) { + this.sort = sort; + return this; + } + + /** + * Returns true if a new document should be inserted if there are no matches to the query filter. The default is false. + * + * @return true if a new document should be inserted if there are no matches to the query filter + */ + public boolean isUpsert() { + return upsert; + } + + /** + * Set to true if a new document should be inserted if there are no matches to the query filter. + * + * @param upsert true if a new document should be inserted if there are no matches to the query filter + * @return this + */ + public FindOneAndUpdateOptions upsert(final boolean upsert) { + this.upsert = upsert; + return this; + } + + /** + * Gets the {@link ReturnDocument} value indicating whether to return the document before it was updated / inserted or after + * + * @return {@link ReturnDocument#BEFORE} if returning the document before it was updated or inserted otherwise + * returns {@link ReturnDocument#AFTER} + */ + public ReturnDocument getReturnDocument() { + return returnDocument; + } + + /** + * Set whether to return the document before it was updated / inserted or after + * + * @param returnDocument set whether to return the document before it was updated / inserted or after + * @return this + */ + public FindOneAndUpdateOptions returnDocument(final ReturnDocument returnDocument) { + this.returnDocument = notNull("returnDocument", returnDocument); + return this; + } + + /** + * Sets the maximum execution time on the server for this operation. + * + * @param maxTime the max time + * @param timeUnit the time unit, which may not be null + * @return this + */ + public FindOneAndUpdateOptions maxTime(final long maxTime, final TimeUnit timeUnit) { + notNull("timeUnit", timeUnit); + this.maxTimeMS = MILLISECONDS.convert(maxTime, timeUnit); + return this; + } + + /** + * Gets the maximum execution time for the find one and update operation. + * + * @param timeUnit the time unit for the result + * @return the max time + */ + public long getMaxTime(final TimeUnit timeUnit) { + return timeUnit.convert(maxTimeMS, MILLISECONDS); + } + + /** + * Gets the bypass document level validation flag + * + * @return the bypass document level validation flag + * @since 3.2 + * @mongodb.server.release 3.2 + */ + @Nullable + public Boolean getBypassDocumentValidation() { + return bypassDocumentValidation; + } + + /** + * Sets the bypass document level validation flag. + * + * @param bypassDocumentValidation If true, allows the write to opt-out of document level validation. + * @return this + * @since 3.2 + * @mongodb.server.release 3.2 + */ + public FindOneAndUpdateOptions bypassDocumentValidation(@Nullable final Boolean bypassDocumentValidation) { + this.bypassDocumentValidation = bypassDocumentValidation; + return this; + } + + /** + * Returns the collation options + * + * @return the collation options + * @since 3.4 + * @mongodb.server.release 3.4 + */ + @Nullable + public Collation getCollation() { + return collation; + } + + /** + * Sets the collation options + * + *

A null value represents the server default.

+ * @param collation the collation options to use + * @return this + * @since 3.4 + * @mongodb.server.release 3.4 + */ + public FindOneAndUpdateOptions collation(@Nullable final Collation collation) { + this.collation = collation; + return this; + } + /** + * Sets the array filters option + * + * @param arrayFilters the array filters, which may be null + * @return this + * @since 3.6 + * @mongodb.server.release 3.6 + */ + public FindOneAndUpdateOptions arrayFilters(@Nullable final List arrayFilters) { + this.arrayFilters = arrayFilters; + return this; + } + + /** + * Returns the array filters option + * + * @return the array filters, which may be null + * @since 3.6 + * @mongodb.server.release 3.6 + */ + @Nullable + public List getArrayFilters() { + return arrayFilters; + } + + /** + * Returns the hint for which index to use. The default is not to set a hint. + * + * @return the hint + * @since 4.1 + */ + @Nullable + public Bson getHint() { + return hint; + } + + /** + * Sets the hint for which index to use. A null value means no hint is set. + * + * @param hint the hint + * @return this + * @since 4.1 + */ + public FindOneAndUpdateOptions hint(@Nullable final Bson hint) { + this.hint = hint; + return this; + } + + /** + * Gets the hint string to apply. + * + * @return the hint string, which should be the name of an existing index + * @since 4.1 + */ + @Nullable + public String getHintString() { + return hintString; + } + + /** + * Sets the hint to apply. + * + * @param hint the name of the index which should be used for the operation + * @return this + * @since 4.1 + */ + public FindOneAndUpdateOptions hintString(@Nullable final String hint) { + this.hintString = hint; + return this; + } + + /** + * @return the comment for this operation. A null value means no comment is set. + * @since 4.6 + * @mongodb.server.release 4.4 + */ + @Nullable + public BsonValue getComment() { + return comment; + } + + /** + * Sets the comment for this operation. A null value means no comment is set. + * + * @param comment the comment + * @return this + * @since 4.6 + * @mongodb.server.release 4.4 + */ + public FindOneAndUpdateOptions comment(@Nullable final String comment) { + this.comment = comment != null ? new BsonString(comment) : null; + return this; + } + + /** + * Sets the comment for this operation. A null value means no comment is set. + * + * @param comment the comment + * @return this + * @since 4.6 + * @mongodb.server.release 4.4 + */ + public FindOneAndUpdateOptions comment(@Nullable final BsonValue comment) { + this.comment = comment; + return this; + } + + /** + * Add top-level variables to the operation + * + * @return the top level variables if set or null. + * @mongodb.server.release 5.0 + * @since 4.6 + */ + @Nullable + public Bson getLet() { + return variables; + } + + /** + * Add top-level variables for the operation + * + *

Allows for improved command readability by separating the variables from the query text. + * + * @param variables for the operation or null + * @return this + * @mongodb.server.release 5.0 + * @since 4.6 + */ + public FindOneAndUpdateOptions let(final Bson variables) { + this.variables = variables; + return this; + } + + @Override + public String toString() { + return "FindOneAndUpdateOptions{" + + "projection=" + projection + + ", sort=" + sort + + ", upsert=" + upsert + + ", returnDocument=" + returnDocument + + ", maxTimeMS=" + maxTimeMS + + ", bypassDocumentValidation=" + bypassDocumentValidation + + ", collation=" + collation + + ", arrayFilters=" + arrayFilters + + ", hint=" + hint + + ", hintString=" + hintString + + ", comment=" + comment + + ", let=" + variables + + '}'; + } +} diff --git a/driver-core/src/main/com/mongodb/client/model/GeoNearConstructibleBson.java b/driver-core/src/main/com/mongodb/client/model/GeoNearConstructibleBson.java new file mode 100644 index 00000000000..1f1fc5ab9b9 --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/GeoNearConstructibleBson.java @@ -0,0 +1,82 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model; + +import com.mongodb.annotations.Immutable; +import com.mongodb.internal.client.model.AbstractConstructibleBson; +import org.bson.BsonDocument; +import org.bson.Document; +import org.bson.conversions.Bson; + +final class GeoNearConstructibleBson extends AbstractConstructibleBson implements GeoNearOptions { + /** + * An {@linkplain Immutable immutable} {@link BsonDocument#isEmpty() empty} instance. + */ + static final GeoNearOptions EMPTY_IMMUTABLE = new GeoNearConstructibleBson(AbstractConstructibleBson.EMPTY_IMMUTABLE); + + private GeoNearConstructibleBson(final Bson base) { + super(base); + } + + private GeoNearConstructibleBson(final Bson base, final Document appended) { + super(base, appended); + } + + private GeoNearOptions setOption(final String key, final Object value) { + return newAppended(key, value); + } + + @Override + public GeoNearOptions distanceMultiplier(final Number distanceMultiplier) { + return setOption("distanceMultiplier", distanceMultiplier); + } + + @Override + public GeoNearOptions includeLocs(final String includeLocs) { + return setOption("includeLocs", includeLocs); + } + + @Override + public GeoNearOptions key(final String key) { + return setOption("key", key); + } + + @Override + public GeoNearOptions minDistance(final Number minDistance) { + return setOption("minDistance", minDistance); + } + + @Override + public GeoNearOptions maxDistance(final Number maxDistance) { + return setOption("maxDistance", maxDistance); + } + + @Override + public GeoNearOptions query(final Document query) { + return setOption("query", query); + } + + @Override + public GeoNearOptions spherical() { + return setOption("spherical", true); + } + + @Override + protected GeoNearConstructibleBson newSelf(final Bson base, final Document appended) { + return new GeoNearConstructibleBson(base, appended); + } +} diff --git a/driver-core/src/main/com/mongodb/client/model/GeoNearOptions.java b/driver-core/src/main/com/mongodb/client/model/GeoNearOptions.java new file mode 100644 index 00000000000..7f3ccaaec51 --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/GeoNearOptions.java @@ -0,0 +1,106 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model; + +import org.bson.Document; +import org.bson.conversions.Bson; + +/** + * The options for a {@link Aggregates#geoNear} pipeline stage. + * + * @mongodb.driver.manual reference/operator/aggregation/unwind/ $geoNear + * @since 4.8 + */ +public interface GeoNearOptions extends Bson { + /** + * Returns {@link GeoNearOptions} that represents server defaults. + * + * @return {@link GeoNearOptions} that represents server defaults. + */ + static GeoNearOptions geoNearOptions() { + return GeoNearConstructibleBson.EMPTY_IMMUTABLE; + } + + /** + * @param distanceMultiplier The factor to multiply all distances returned by the query. + * @return a new {@link GeoNearOptions} with the provided option set + * @since 4.8 + */ + GeoNearOptions distanceMultiplier(Number distanceMultiplier); + + /** + * This specifies the output field that identifies the location used to calculate the distance. + * This option is useful when a location field contains multiple locations. + * To specify a field within an embedded document, use dot notation. + * + * @param includeLocs the output field + * @return a new {@link GeoNearOptions} with the provided option set + * @since 4.8 + */ + GeoNearOptions includeLocs(String includeLocs); + + /** + * Specify the geospatial indexed field to use when calculating the distance. + * + * @param key the geospatial indexed field. + * @return a new {@link GeoNearOptions} with the provided option set + * @since 4.8 + */ + GeoNearOptions key(String key); + + /** + * The minimum distance from the center point that the documents can be. + * MongoDB limits the results to those documents that fall outside the specified distance from the center point. + * + * @param minDistance the distance in meters for GeoJSON data. + * @return a new {@link GeoNearOptions} with the provided option set + * @since 4.8 + */ + GeoNearOptions minDistance(Number minDistance); + + /** + * The maximum distance from the center point that the documents can be. + * MongoDB limits the results to those documents that fall within the specified distance from the center point. + * + * @param maxDistance the distance in meters for GeoJSON data. + * @return a new {@link GeoNearOptions} with the provided option set + * @since 4.8 + */ + GeoNearOptions maxDistance(Number maxDistance); + + /** + * Limits the results to the documents that match the query. + * The query syntax is the usual MongoDB read operation query syntax. + * + * @param query the query + * @return a new {@link GeoNearOptions} with the provided option set + * @since 4.8 + */ + GeoNearOptions query(Document query); + + /** + * Determines how MongoDB calculates the distance between two points. + * By default, when this option is not provided, MongoDB uses $near semantics: + * spherical geometry for 2dsphere indexes and planar geometry for 2d indexes. + * When provided, MongoDB uses $nearSphere semantics and calculates distances + * using spherical geometry. + * + * @return a new {@link GeoNearOptions} with the provided option set + * @since 4.8 + */ + GeoNearOptions spherical(); +} diff --git a/driver-core/src/main/com/mongodb/client/model/GraphLookupOptions.java b/driver-core/src/main/com/mongodb/client/model/GraphLookupOptions.java new file mode 100644 index 00000000000..31812c39b19 --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/GraphLookupOptions.java @@ -0,0 +1,153 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model; + +import com.mongodb.lang.Nullable; +import org.bson.conversions.Bson; + +import java.util.Objects; + +/** + * The options for a graphLookup aggregation pipeline stage + * + * @mongodb.driver.manual reference/operator/aggregation/graphLookup/ graphLookup + * @mongodb.server.release 3.4 + * @since 3.4 + */ +public final class GraphLookupOptions { + private Integer maxDepth; + private String depthField; + private Bson restrictSearchWithMatch; + + /** + * The name of the field in which to store the depth value + * + * @param field the field name + * @return this + */ + public GraphLookupOptions depthField(@Nullable final String field) { + depthField = field; + return this; + } + + /** + * @return the field name + */ + @Nullable + public String getDepthField() { + return depthField; + } + + /** + * Specifies a maximum recursive depth for the $graphLookup. This number must be non-negative. + * + * @param max the maximum depth + * @return this + */ + public GraphLookupOptions maxDepth(@Nullable final Integer max) { + maxDepth = max; + return this; + } + + /** + * @return the maximum depth + */ + @Nullable + public Integer getMaxDepth() { + return maxDepth; + } + + /** + * A document specifying additional conditions for the recursive search + * + * @param filter the filter expression + * @return this + * @since 3.6 + */ + public GraphLookupOptions restrictSearchWithMatch(@Nullable final Bson filter) { + restrictSearchWithMatch = filter; + return this; + } + + /** + * @return the filter expression + * @since 3.6 + */ + @Nullable + public Bson getRestrictSearchWithMatch() { + return restrictSearchWithMatch; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + GraphLookupOptions that = (GraphLookupOptions) o; + + if (!Objects.equals(maxDepth, that.maxDepth)) { + return false; + } + if (!Objects.equals(depthField, that.depthField)) { + return false; + } + return Objects.equals(restrictSearchWithMatch, that.restrictSearchWithMatch); + } + + @Override + public int hashCode() { + int result = maxDepth != null ? maxDepth.hashCode() : 0; + result = 31 * result + (depthField != null ? depthField.hashCode() : 0); + result = 31 * result + (restrictSearchWithMatch != null ? restrictSearchWithMatch.hashCode() : 0); + return result; + } + + @Override + public String toString() { + StringBuilder stringBuilder = new StringBuilder() + .append("GraphLookupOptions{"); + if (depthField != null) { + stringBuilder.append("depthField='") + .append(depthField) + .append('\''); + if (maxDepth != null) { + stringBuilder.append(", "); + } + } + if (maxDepth != null) { + stringBuilder + .append("maxDepth=") + .append(maxDepth); + if (restrictSearchWithMatch != null) { + stringBuilder.append(", "); + } + } + if (restrictSearchWithMatch != null) { + stringBuilder + .append("restrictSearchWithMatch=") + .append(restrictSearchWithMatch); + } + + return stringBuilder + .append('}') + .toString(); + } +} diff --git a/driver-core/src/main/com/mongodb/client/model/IndexModel.java b/driver-core/src/main/com/mongodb/client/model/IndexModel.java new file mode 100644 index 00000000000..65998691810 --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/IndexModel.java @@ -0,0 +1,78 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model; + +import org.bson.conversions.Bson; + +import static com.mongodb.assertions.Assertions.notNull; + +/** + * A model describing the creation of a single index. + * + * @mongodb.driver.manual reference/command/createIndexes Index options + * @since 3.0 + */ +public class IndexModel { + private final Bson keys; + private final IndexOptions options; + + /** + * Construct an instance with the given keys. + * + * @param keys the index keys + */ + public IndexModel(final Bson keys) { + this(keys, new IndexOptions()); + } + + /** + * Construct an instance with the given keys and options. + * + * @param keys the index keys + * @param options the index options + */ + public IndexModel(final Bson keys, final IndexOptions options) { + this.keys = notNull("keys", keys); + this.options = notNull("options", options); + } + + /** + * Gets the index keys. + * + * @return the index keys + */ + public Bson getKeys() { + return keys; + } + + /** + * Gets the index options. + * + * @return the index options + */ + public IndexOptions getOptions() { + return options; + } + + @Override + public String toString() { + return "IndexModel{" + + "keys=" + keys + + ", options=" + options + + '}'; + } +} diff --git a/driver-core/src/main/com/mongodb/client/model/IndexOptionDefaults.java b/driver-core/src/main/com/mongodb/client/model/IndexOptionDefaults.java new file mode 100644 index 00000000000..30dafd004db --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/IndexOptionDefaults.java @@ -0,0 +1,60 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model; + +import com.mongodb.lang.Nullable; +import org.bson.conversions.Bson; + +/** + * The default options for a collection to apply on the creation of indexes. + * + * @since 3.2 + * @mongodb.driver.manual reference/method/db.createCollection/ Create Collection + * @mongodb.driver.manual reference/command/createIndexes Index options + * @mongodb.server.release 3.2 + */ +public final class IndexOptionDefaults { + private Bson storageEngine; + + /** + * Gets the default storage engine options document for indexes. + * + * @return the storage engine options + */ + @Nullable + public Bson getStorageEngine() { + return storageEngine; + } + + /** + * Sets the default storage engine options document for indexes. + * + * @param storageEngine the storage engine options + * @return this + */ + public IndexOptionDefaults storageEngine(@Nullable final Bson storageEngine) { + this.storageEngine = storageEngine; + return this; + } + + @Override + public String toString() { + return "IndexOptionDefaults{" + + "storageEngine=" + storageEngine + + '}'; + } +} diff --git a/driver-core/src/main/com/mongodb/client/model/IndexOptions.java b/driver-core/src/main/com/mongodb/client/model/IndexOptions.java new file mode 100644 index 00000000000..f3cf45b5a3f --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/IndexOptions.java @@ -0,0 +1,519 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model; + +import com.mongodb.lang.Nullable; +import org.bson.conversions.Bson; + +import java.util.concurrent.TimeUnit; + +/** + * The options to apply to the creation of an index. + * + * @mongodb.driver.manual reference/command/createIndexes Index options + * @since 3.0 + */ +public class IndexOptions { + private boolean background; + private boolean unique; + private String name; + private boolean sparse; + private Long expireAfterSeconds; + private Integer version; + private Bson weights; + private String defaultLanguage; + private String languageOverride; + private Integer textVersion; + private Integer sphereVersion; + private Integer bits; + private Double min; + private Double max; + private Bson storageEngine; + private Bson partialFilterExpression; + private Collation collation; + private Bson wildcardProjection; + private boolean hidden; + + /** + * Create the index in the background + * + * @return true if should create the index in the background + */ + public boolean isBackground() { + return background; + } + + /** + * Should the index should be created in the background + * + * @param background true if should create the index in the background + * @return this + */ + public IndexOptions background(final boolean background) { + this.background = background; + return this; + } + + /** + * Gets if the index should be unique. + * + * @return true if the index should be unique + */ + public boolean isUnique() { + return unique; + } + + /** + * Should the index should be unique. + * + * @param unique if the index should be unique + * @return this + */ + public IndexOptions unique(final boolean unique) { + this.unique = unique; + return this; + } + + /** + * Gets the name of the index. + * + * @return the name of the index + */ + @Nullable + public String getName() { + return name; + } + + /** + * Sets the name of the index. + * + * @param name of the index + * @return this + */ + public IndexOptions name(@Nullable final String name) { + this.name = name; + return this; + } + + /** + * If true, the index only references documents with the specified field + * + * @return if the index should only reference documents with the specified field + */ + public boolean isSparse() { + return sparse; + } + + /** + * Should the index only references documents with the specified field + * + * @param sparse if true, the index only references documents with the specified field + * @return this + */ + public IndexOptions sparse(final boolean sparse) { + this.sparse = sparse; + return this; + } + + /** + * Gets the time to live for documents in the collection + * + * @return the time to live for documents in the collection + * @param timeUnit the time unit + * @mongodb.driver.manual tutorial/expire-data TTL + */ + @Nullable + public Long getExpireAfter(final TimeUnit timeUnit) { + if (expireAfterSeconds == null) { + return null; + } + return timeUnit.convert(expireAfterSeconds, TimeUnit.SECONDS); + } + + /** + * Sets the time to live for documents in the collection + * + * @param expireAfter the time to live for documents in the collection + * @param timeUnit the time unit for expireAfter + * @return this + * @mongodb.driver.manual tutorial/expire-data TTL + */ + public IndexOptions expireAfter(@Nullable final Long expireAfter, final TimeUnit timeUnit) { + if (expireAfter == null) { + this.expireAfterSeconds = null; + } else { + this.expireAfterSeconds = TimeUnit.SECONDS.convert(expireAfter, timeUnit); + } + return this; + } + + /** + * Gets the index version number. + * + * @return the index version number + */ + @Nullable + public Integer getVersion() { + return this.version; + } + + /** + * Sets the index version number. + * + * @param version the index version number + * @return this + */ + public IndexOptions version(@Nullable final Integer version) { + this.version = version; + return this; + } + + /** + * Gets the weighting object for use with a text index + * + *

A document that represents field and weight pairs. The weight is an integer ranging from 1 to 99,999 and denotes the significance + * of the field relative to the other indexed fields in terms of the score.

+ * + * @return the weighting object + * @mongodb.driver.manual tutorial/control-results-of-text-search Control Search Results with Weights + */ + @Nullable + public Bson getWeights() { + return weights; + } + + /** + * Sets the weighting object for use with a text index. + * + *

An document that represents field and weight pairs. The weight is an integer ranging from 1 to 99,999 and denotes the significance + * of the field relative to the other indexed fields in terms of the score.

+ * + * @param weights the weighting object + * @return this + * @mongodb.driver.manual tutorial/control-results-of-text-search Control Search Results with Weights + */ + public IndexOptions weights(@Nullable final Bson weights) { + this.weights = weights; + return this; + } + + /** + * Gets the language for a text index. + * + *

The language that determines the list of stop words and the rules for the stemmer and tokenizer.

+ * + * @return the language for a text index. + * @mongodb.driver.manual reference/text-search-languages Text Search languages + */ + @Nullable + public String getDefaultLanguage() { + return defaultLanguage; + } + + /** + * Sets the language for the text index. + * + *

The language that determines the list of stop words and the rules for the stemmer and tokenizer.

+ * + * @param defaultLanguage the language for the text index. + * @return this + * @mongodb.driver.manual reference/text-search-languages Text Search languages + */ + public IndexOptions defaultLanguage(@Nullable final String defaultLanguage) { + this.defaultLanguage = defaultLanguage; + return this; + } + + /** + * Gets the name of the field that contains the language string. + * + *

For text indexes, the name of the field, in the collection's documents, that contains the override language for the document.

+ * + * @return the name of the field that contains the language string. + * @mongodb.driver.manual tutorial/specify-language-for-text-index/#specify-language-field-text-index-example Language override + */ + @Nullable + public String getLanguageOverride() { + return languageOverride; + } + + /** + * Sets the name of the field that contains the language string. + * + *

For text indexes, the name of the field, in the collection's documents, that contains the override language for the document.

+ * + * @param languageOverride the name of the field that contains the language string. + * @return this + * @mongodb.driver.manual tutorial/specify-language-for-text-index/#specify-language-field-text-index-example Language override + */ + public IndexOptions languageOverride(@Nullable final String languageOverride) { + this.languageOverride = languageOverride; + return this; + } + + /** + * The text index version number. + * + * @return the text index version number. + */ + @Nullable + public Integer getTextVersion() { + return textVersion; + } + + /** + * Set the text index version number. + * + * @param textVersion the text index version number. + * @return this + */ + public IndexOptions textVersion(@Nullable final Integer textVersion) { + this.textVersion = textVersion; + return this; + } + + /** + * Gets the 2dsphere index version number. + * + * @return the 2dsphere index version number + */ + @Nullable + public Integer getSphereVersion() { + return sphereVersion; + } + + /** + * Sets the 2dsphere index version number. + * + * @param sphereVersion the 2dsphere index version number. + * @return this + */ + public IndexOptions sphereVersion(@Nullable final Integer sphereVersion) { + this.sphereVersion = sphereVersion; + return this; + } + + /** + * Gets the number of precision of the stored geohash value of the location data in 2d indexes. + * + * @return the number of precision of the stored geohash value + */ + @Nullable + public Integer getBits() { + return bits; + } + + /** + * Sets the number of precision of the stored geohash value of the location data in 2d indexes. + * + * @param bits the number of precision of the stored geohash value + * @return this + */ + public IndexOptions bits(@Nullable final Integer bits) { + this.bits = bits; + return this; + } + + /** + * Gets the lower inclusive boundary for the longitude and latitude values for 2d indexes.. + * + * @return the lower inclusive boundary for the longitude and latitude values. + */ + @Nullable + public Double getMin() { + return min; + } + + /** + * Sets the lower inclusive boundary for the longitude and latitude values for 2d indexes.. + * + * @param min the lower inclusive boundary for the longitude and latitude values + * @return this + */ + public IndexOptions min(@Nullable final Double min) { + this.min = min; + return this; + } + + /** + * Gets the upper inclusive boundary for the longitude and latitude values for 2d indexes.. + * + * @return the upper inclusive boundary for the longitude and latitude values. + */ + @Nullable + public Double getMax() { + return max; + } + + /** + * Sets the upper inclusive boundary for the longitude and latitude values for 2d indexes.. + * + * @param max the upper inclusive boundary for the longitude and latitude values + * @return this + */ + public IndexOptions max(@Nullable final Double max) { + this.max = max; + return this; + } + + /** + * Gets the storage engine options document for this index. + * + * @return the storage engine options + * @mongodb.server.release 3.0 + */ + @Nullable + public Bson getStorageEngine() { + return storageEngine; + } + + /** + * Sets the storage engine options document for this index. + * + * @param storageEngine the storage engine options + * @return this + * @mongodb.server.release 3.0 + */ + public IndexOptions storageEngine(@Nullable final Bson storageEngine) { + this.storageEngine = storageEngine; + return this; + } + + /** + * Get the filter expression for the documents to be included in the index or null if not set + * + * @return the filter expression for the documents to be included in the index or null if not set + * @mongodb.server.release 3.2 + * @since 3.2 + */ + @Nullable + public Bson getPartialFilterExpression() { + return partialFilterExpression; + } + + /** + * Sets the filter expression for the documents to be included in the index + * + * @param partialFilterExpression the filter expression for the documents to be included in the index + * @return this + * @mongodb.server.release 3.2 + * @since 3.2 + */ + public IndexOptions partialFilterExpression(@Nullable final Bson partialFilterExpression) { + this.partialFilterExpression = partialFilterExpression; + return this; + } + + /** + * Returns the collation options + * + * @return the collation options + * @since 3.4 + * @mongodb.server.release 3.4 + */ + @Nullable + public Collation getCollation() { + return collation; + } + + /** + * Sets the collation options + * + *

A null value represents the server default.

+ * @param collation the collation options to use + * @return this + * @since 3.4 + * @mongodb.server.release 3.4 + */ + public IndexOptions collation(@Nullable final Collation collation) { + this.collation = collation; + return this; + } + + /** + * Gets the wildcard projection of a wildcard index + * + * @return the wildcard projection + * @mongodb.server.release 4.2 + * @since 3.10 + */ + public Bson getWildcardProjection() { + return wildcardProjection; + } + + /** + * Sets the wildcard projection of a wildcard index + * + * @param wildcardProjection the wildcard projection + * @return this + * @mongodb.server.release 4.2 + * @since 3.10 + */ + public IndexOptions wildcardProjection(final Bson wildcardProjection) { + this.wildcardProjection = wildcardProjection; + return this; + } + + /** + * Gets whether the index should not be used by the query planner when executing operations. + * + * @return true if the index should not be used by the query planner when executing operations. + * @mongodb.server.release 4.4 + * @since 4.1 + */ + public boolean isHidden() { + return hidden; + } + + /** + * Should the index not be used by the query planner when executing operations. + * + * @param hidden true if the index should be hidden + * @return this + * @mongodb.server.release 4.4 + * @since 4.1 + */ + public IndexOptions hidden(final boolean hidden) { + this.hidden = hidden; + return this; + } + + @Override + public String toString() { + return "IndexOptions{" + + "background=" + background + + ", unique=" + unique + + ", name='" + name + '\'' + + ", sparse=" + sparse + + ", expireAfterSeconds=" + expireAfterSeconds + + ", version=" + version + + ", weights=" + weights + + ", defaultLanguage='" + defaultLanguage + '\'' + + ", languageOverride='" + languageOverride + '\'' + + ", textVersion=" + textVersion + + ", sphereVersion=" + sphereVersion + + ", bits=" + bits + + ", min=" + min + + ", max=" + max + + ", storageEngine=" + storageEngine + + ", partialFilterExpression=" + partialFilterExpression + + ", collation=" + collation + + ", wildcardProjection=" + wildcardProjection + + ", hidden=" + hidden + + '}'; + } +} diff --git a/driver-core/src/main/com/mongodb/client/model/Indexes.java b/driver-core/src/main/com/mongodb/client/model/Indexes.java new file mode 100644 index 00000000000..e310e4bbcb7 --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/Indexes.java @@ -0,0 +1,234 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model; + +import org.bson.BsonDocument; +import org.bson.BsonInt32; +import org.bson.BsonString; +import org.bson.BsonValue; +import org.bson.codecs.configuration.CodecRegistry; +import org.bson.conversions.Bson; + +import java.util.List; + +import static com.mongodb.assertions.Assertions.notNull; +import static java.util.Arrays.asList; + +/** + * A factory for defining index keys. A convenient way to use this class is to statically import all of its methods, which allows usage + * like: + *
+ *    collection.createIndex(compoundIndex(ascending("x"), descending("y")));
+ * 
+ * @since 3.1 + */ +public final class Indexes { + + private Indexes() { + } + + /** + * Create an index key for an ascending index on the given fields. + * + * @param fieldNames the field names, which must contain at least one + * @return the index specification + * @mongodb.driver.manual core/indexes indexes + */ + public static Bson ascending(final String... fieldNames) { + return ascending(asList(fieldNames)); + } + + /** + * Create an index key for an ascending index on the given fields. + * + * @param fieldNames the field names, which must contain at least one + * @return the index specification + * @mongodb.driver.manual core/indexes indexes + */ + public static Bson ascending(final List fieldNames) { + notNull("fieldNames", fieldNames); + return compoundIndex(fieldNames, new BsonInt32(1)); + } + + /** + * Create an index key for an descending index on the given fields. + * + * @param fieldNames the field names, which must contain at least one + * @return the index specification + * @mongodb.driver.manual core/indexes indexes + */ + public static Bson descending(final String... fieldNames) { + return descending(asList(fieldNames)); + } + + /** + * Create an index key for an descending index on the given fields. + * + * @param fieldNames the field names, which must contain at least one + * @return the index specification + * @mongodb.driver.manual core/indexes indexes + */ + public static Bson descending(final List fieldNames) { + notNull("fieldNames", fieldNames); + return compoundIndex(fieldNames, new BsonInt32(-1)); + } + + /** + * Create an index key for an 2dsphere index on the given fields. + * + * @param fieldNames the field names, which must contain at least one + * @return the index specification + * @mongodb.driver.manual core/2dsphere 2dsphere Index + */ + public static Bson geo2dsphere(final String... fieldNames) { + return geo2dsphere(asList(fieldNames)); + } + + /** + * Create an index key for an 2dsphere index on the given fields. + * + * @param fieldNames the field names, which must contain at least one + * @return the index specification + * @mongodb.driver.manual core/indexes indexes + */ + public static Bson geo2dsphere(final List fieldNames) { + notNull("fieldNames", fieldNames); + return compoundIndex(fieldNames, new BsonString("2dsphere")); + } + + /** + * Create an index key for a 2d index on the given field. + * + *

+ * Note: A 2d index is for data stored as points on a two-dimensional plane. + * The 2d index is intended for legacy coordinate pairs used in MongoDB 2.2 and earlier. + *

+ * + * @param fieldName the field to create a 2d index on + * @return the index specification + * @mongodb.driver.manual core/2d 2d index + */ + public static Bson geo2d(final String fieldName) { + notNull("fieldName", fieldName); + return new BsonDocument(fieldName, new BsonString("2d")); + } + + /** + * Create an index key for a text index on the given field. + * + * @param fieldName the field to create a text index on + * @return the index specification + * @mongodb.driver.manual core/text text index + */ + public static Bson text(final String fieldName) { + notNull("fieldName", fieldName); + return new BsonDocument(fieldName, new BsonString("text")); + } + + /** + * Create an index key for a text index on every field that contains string data. + * + * @return the index specification + * @mongodb.driver.manual core/text text index + */ + public static Bson text() { + return text("$**"); + } + + /** + * Create an index key for a hashed index on the given field. + * + * @param fieldName the field to create a hashed index on + * @return the index specification + * @mongodb.driver.manual core/hashed hashed index + */ + public static Bson hashed(final String fieldName) { + notNull("fieldName", fieldName); + return new BsonDocument(fieldName, new BsonString("hashed")); + } + + /** + * create a compound index specifications. If any field names are repeated, the last one takes precedence. + * + * @param indexes the index specifications + * @return the compound index specification + * @mongodb.driver.manual core/index-compound compoundIndex + */ + public static Bson compoundIndex(final Bson... indexes) { + return compoundIndex(asList(indexes)); + } + + /** + * compound multiple index specifications. If any field names are repeated, the last one takes precedence. + * + * @param indexes the index specifications + * @return the compound index specification + * @mongodb.driver.manual core/index-compound compoundIndex + */ + public static Bson compoundIndex(final List indexes) { + return new CompoundIndex(indexes); + } + + private static Bson compoundIndex(final List fieldNames, final BsonValue value) { + BsonDocument document = new BsonDocument(); + for (String fieldName : fieldNames) { + document.append(fieldName, value); + } + return document; + } + + private static class CompoundIndex implements Bson { + private final List indexes; + + CompoundIndex(final List indexes) { + notNull("indexes", indexes); + this.indexes = indexes; + } + + @Override + public BsonDocument toBsonDocument(final Class documentClass, final CodecRegistry codecRegistry) { + BsonDocument compoundIndex = new BsonDocument(); + for (Bson index : indexes) { + BsonDocument indexDocument = index.toBsonDocument(documentClass, codecRegistry); + for (String key : indexDocument.keySet()) { + compoundIndex.append(key, indexDocument.get(key)); + } + } + return compoundIndex; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + CompoundIndex that = (CompoundIndex) o; + + return indexes.equals(that.indexes); + } + + @Override + public int hashCode() { + return indexes.hashCode(); + } + } +} + diff --git a/driver-core/src/main/com/mongodb/client/model/InsertManyOptions.java b/driver-core/src/main/com/mongodb/client/model/InsertManyOptions.java new file mode 100644 index 00000000000..631d35e62ef --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/InsertManyOptions.java @@ -0,0 +1,131 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model; + +import com.mongodb.lang.Nullable; +import org.bson.BsonString; +import org.bson.BsonValue; + +/** + * The options to apply to an operation that inserts multiple documents into a collection. + * + * @since 3.0 + * @mongodb.driver.manual tutorial/insert-documents/ Insert Tutorial + * @mongodb.driver.manual reference/command/insert/ Insert Command + */ +public final class InsertManyOptions { + private boolean ordered = true; + private Boolean bypassDocumentValidation; + private BsonValue comment; + + /** + * Gets whether the documents should be inserted in the order provided, stopping on the first failed insertion. The default is true. + * If false, the server will attempt to insert all the documents regardless of an failures. + * + * @return whether the documents should be inserted in order + */ + public boolean isOrdered() { + return ordered; + } + + /** + * Sets whether the server should insert the documents in the order provided. + * + * @param ordered true if documents should be inserted in order + * @return this + */ + public InsertManyOptions ordered(final boolean ordered) { + this.ordered = ordered; + return this; + } + + /** + * Gets the bypass document level validation flag + * + * @return the bypass document level validation flag + * @since 3.2 + * @mongodb.server.release 3.2 + */ + @Nullable + public Boolean getBypassDocumentValidation() { + return bypassDocumentValidation; + } + + /** + * Sets the bypass document level validation flag. + * + *

For bulk operations use: {@link BulkWriteOptions#bypassDocumentValidation(Boolean)}

+ * + * @param bypassDocumentValidation If true, allows the write to opt-out of document level validation. + * @return this + * @since 3.2 + * @mongodb.server.release 3.2 + */ + public InsertManyOptions bypassDocumentValidation(@Nullable final Boolean bypassDocumentValidation) { + this.bypassDocumentValidation = bypassDocumentValidation; + return this; + } + + /** + * @return the comment for this operation. A null value means no comment is set. + * @since 4.6 + * @mongodb.server.release 4.4 + */ + @Nullable + public BsonValue getComment() { + return comment; + } + + /** + * Sets the comment for this operation. A null value means no comment is set. + * + *

For bulk operations use: {@link BulkWriteOptions#comment(String)}

+ * + * @param comment the comment + * @return this + * @since 4.6 + * @mongodb.server.release 4.4 + */ + public InsertManyOptions comment(@Nullable final String comment) { + this.comment = comment != null ? new BsonString(comment) : null; + return this; + } + + /** + * Sets the comment for this operation. A null value means no comment is set. + * + *

For bulk operations use: {@link BulkWriteOptions#comment(BsonValue)}

+ * + * @param comment the comment + * @return this + * @since 4.6 + * @mongodb.server.release 4.4 + */ + public InsertManyOptions comment(@Nullable final BsonValue comment) { + this.comment = comment; + return this; + } + + @Override + public String toString() { + return "InsertManyOptions{" + + "ordered=" + ordered + + ", bypassDocumentValidation=" + bypassDocumentValidation + + ", comment=" + comment + + '}'; + } +} diff --git a/driver-core/src/main/com/mongodb/client/model/InsertOneModel.java b/driver-core/src/main/com/mongodb/client/model/InsertOneModel.java new file mode 100644 index 00000000000..0f2dcd993b4 --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/InsertOneModel.java @@ -0,0 +1,55 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model; + +import static com.mongodb.assertions.Assertions.notNull; + +/** + * A model describing an insert of a single document. + * + * @since 3.0 + * @mongodb.driver.manual tutorial/insert-documents/ Insert + * @param the type of document to insert. This can be of any type for which a {@code Codec} is registered + */ +public final class InsertOneModel extends WriteModel { + private final T document; + + /** + * Construct a new instance. + * + * @param document the document to insert, which may not be null. + */ + public InsertOneModel(final T document) { + this.document = notNull("document", document); + } + + /** + * Gets the document to insert. + * + * @return the document to insert + */ + public T getDocument() { + return document; + } + + @Override + public String toString() { + return "InsertOneModel{" + + "document=" + document + + '}'; + } +} diff --git a/driver-core/src/main/com/mongodb/client/model/InsertOneOptions.java b/driver-core/src/main/com/mongodb/client/model/InsertOneOptions.java new file mode 100644 index 00000000000..d3dcc25fbf1 --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/InsertOneOptions.java @@ -0,0 +1,105 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model; + +import com.mongodb.lang.Nullable; +import org.bson.BsonString; +import org.bson.BsonValue; + +/** + * The options to apply to an operation that inserts a single document into a collection. + * + * @since 3.2 + * @mongodb.server.release 3.2 + * @mongodb.driver.manual tutorial/insert-documents/ Insert Tutorial + * @mongodb.driver.manual reference/command/insert/ Insert Command + */ +public final class InsertOneOptions { + private Boolean bypassDocumentValidation; + private BsonValue comment; + + /** + * Gets the bypass document level validation flag + * + * @return the bypass document level validation flag + */ + @Nullable + public Boolean getBypassDocumentValidation() { + return bypassDocumentValidation; + } + + /** + * Sets the bypass document level validation flag. + * + *

For bulk operations use: {@link BulkWriteOptions#bypassDocumentValidation(Boolean)}

+ * + * @param bypassDocumentValidation If true, allows the write to opt-out of document level validation. + * @return this + */ + public InsertOneOptions bypassDocumentValidation(@Nullable final Boolean bypassDocumentValidation) { + this.bypassDocumentValidation = bypassDocumentValidation; + return this; + } + + /** + * @return the comment for this operation. A null value means no comment is set. + * @since 4.6 + * @mongodb.server.release 4.4 + */ + @Nullable + public BsonValue getComment() { + return comment; + } + + /** + * Sets the comment for this operation. A null value means no comment is set. + * + *

For bulk operations use: {@link BulkWriteOptions#comment(String)}

+ * + * @param comment the comment + * @return this + * @since 4.6 + * @mongodb.server.release 4.4 + */ + public InsertOneOptions comment(@Nullable final String comment) { + this.comment = comment != null ? new BsonString(comment) : null; + return this; + } + + /** + * Sets the comment for this operation. A null value means no comment is set. + * + *

For bulk operations use: {@link BulkWriteOptions#comment(BsonValue)}

+ * + * @param comment the comment + * @return this + * @since 4.6 + * @mongodb.server.release 4.4 + */ + public InsertOneOptions comment(@Nullable final BsonValue comment) { + this.comment = comment; + return this; + } + + @Override + public String toString() { + return "InsertOneOptions{" + + "bypassDocumentValidation=" + bypassDocumentValidation + + ", comment=" + comment + + '}'; + } +} diff --git a/driver-core/src/main/com/mongodb/client/model/MapReduceAction.java b/driver-core/src/main/com/mongodb/client/model/MapReduceAction.java new file mode 100644 index 00000000000..4919733ea0d --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/MapReduceAction.java @@ -0,0 +1,64 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model; + + + +/** + * The map reduce to collection actions. + * + *

These actions are only available when passing out a collection that already exists. This option is not available on secondary members + * of replica sets. The Enum values dictate what to do with the output collection if it already exists when the map reduce is run.

+ * + * @since 3.0 + * @mongodb.driver.manual reference/command/mapReduce/ mapReduce Command + * @mongodb.driver.manual core/map-reduce/ mapReduce Overview + * @deprecated Superseded by aggregate + */ +@Deprecated +public enum MapReduceAction { + /** + * Replace the contents of the {@code collectionName} if the collection with the {@code collectionName} exists. + */ + REPLACE("replace"), + + /** + * Merge the new result with the existing result if the output collection already exists. If an existing document has the same key + * as the new result, overwrite that existing document. + */ + MERGE("merge"), + + /** + * Merge the new result with the existing result if the output collection already exists. If an existing document has the same key + * as the new result, apply the reduce function to both the new and the existing documents and overwrite the existing document with + * the result. + */ + REDUCE("reduce"); + + private final String value; + + MapReduceAction(final String value) { + this.value = value; + } + + /** + * @return the String representation of this Action that the MongoDB server understands + */ + public String getValue() { + return value; + } +} diff --git a/driver-core/src/main/com/mongodb/client/model/MergeOptions.java b/driver-core/src/main/com/mongodb/client/model/MergeOptions.java new file mode 100644 index 00000000000..ab63abe664f --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/MergeOptions.java @@ -0,0 +1,262 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model; + +import org.bson.conversions.Bson; + +import java.util.Collections; +import java.util.List; +import java.util.Objects; + +/** + * Options to control the behavior of the $merge aggregation stage + * + * @mongodb.driver.manual reference/operator/aggregation/merge/ $merge stage + * @mongodb.server.release 4.2 + * @see Aggregates#merge(String, MergeOptions) + * @see Aggregates#merge(com.mongodb.MongoNamespace, MergeOptions) + * @since 3.11 + */ +public final class MergeOptions { + + /** + * The behavior of $merge if a result document and an existing document in the collection have the same value for the specified on + * field(s). + */ + public enum WhenMatched { + /** + * Replace the existing document in the output collection with the matching results document. + */ + REPLACE, + + /** + * Keep the existing document in the output collection. + */ + KEEP_EXISTING, + + /** + * Merge the matching documents + */ + MERGE, + + /** + * An aggregation pipeline to update the document in the collection. + * + * @see #whenMatchedPipeline(List) + */ + PIPELINE, + + /** + * Stop and fail the aggregation operation. Any changes to the output collection from previous documents are not reverted. + */ + FAIL, + } + + /** + * The behavior of $merge if a result document does not match an existing document in the out collection. + */ + public enum WhenNotMatched { + /** + * Insert the document into the output collection. + */ + INSERT, + + /** + * Discard the document; i.e. $merge does not insert the document into the output collection. + */ + DISCARD, + + /** + * Stop and fail the aggregation operation. Any changes to the output collection from previous documents are not reverted. + */ + FAIL + } + + private List uniqueIdentifier; + private WhenMatched whenMatched; + private List> variables; + private List whenMatchedPipeline; + private WhenNotMatched whenNotMatched; + + /** + * Gets the fields that act as a unique identifier for a document. The identifier determine if a results document matches an + * already existing document in the output collection. + * + * @return the unique identifier + */ + public List getUniqueIdentifier() { + return uniqueIdentifier; + } + + /** + * Sets the field that act as a unique identifier for a document. The identifier determine if a results document matches an + * already existing document in the output collection. + * + * @param uniqueIdentifier the unique identifier + * @return this + */ + public MergeOptions uniqueIdentifier(final String uniqueIdentifier) { + this.uniqueIdentifier = Collections.singletonList(uniqueIdentifier); + return this; + } + + /** + * Sets the field that act as a unique identifier for a document. The identifier determine if a results document matches an + * already existing document in the output collection. + * + * @param uniqueIdentifier the unique identifier + * @return this + */ + public MergeOptions uniqueIdentifier(final List uniqueIdentifier) { + this.uniqueIdentifier = uniqueIdentifier; + return this; + } + + /** + * Gets the behavior of $merge if a result document and an existing document in the collection have the same value for the specified + * on field(s). + * + * @return when matched + */ + public WhenMatched getWhenMatched() { + return whenMatched; + } + + /** + * Sets the behavior of $merge if a result document and an existing document in the collection have the same value for the specified + * on field(s). + * + * @param whenMatched when matched + * @return this + */ + public MergeOptions whenMatched(final WhenMatched whenMatched) { + this.whenMatched = whenMatched; + return this; + } + + /** + * Gets the variables accessible for use in the whenMatched pipeline + * @return the variables + */ + public List> getVariables() { + return variables; + } + + /** + * Sets the variables accessible for use in the whenMatched pipeline. + * + * @param variables the variables + * @return this + */ + public MergeOptions variables(final List> variables) { + this.variables = variables; + return this; + } + + /** + * Gets aggregation pipeline to update the document in the collection. + * + * @return when matched pipeline + * @see WhenMatched#PIPELINE + */ + public List getWhenMatchedPipeline() { + return whenMatchedPipeline; + } + + /** + * Sets aggregation pipeline to update the document in the collection. + * + * @param whenMatchedPipeline when matched pipeline + * @return this + * @see WhenMatched#PIPELINE + */ + public MergeOptions whenMatchedPipeline(final List whenMatchedPipeline) { + this.whenMatchedPipeline = whenMatchedPipeline; + return this; + } + + /** + * Gets the behavior of $merge if a result document does not match an existing document in the out collection. + * + * @return when not matched + */ + public WhenNotMatched getWhenNotMatched() { + return whenNotMatched; + } + + /** + * Sets the behavior of $merge if a result document does not match an existing document in the out collection. + * + * @param whenNotMatched when not matched + * @return this + */ + public MergeOptions whenNotMatched(final WhenNotMatched whenNotMatched) { + this.whenNotMatched = whenNotMatched; + return this; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + MergeOptions that = (MergeOptions) o; + + if (!Objects.equals(uniqueIdentifier, that.uniqueIdentifier)) { + return false; + } + if (whenMatched != that.whenMatched) { + return false; + } + if (!Objects.equals(variables, that.variables)) { + return false; + } + if (!Objects.equals(whenMatchedPipeline, that.whenMatchedPipeline)) { + return false; + } + if (whenNotMatched != that.whenNotMatched) { + return false; + } + + return true; + } + + @Override + public int hashCode() { + int result = uniqueIdentifier != null ? uniqueIdentifier.hashCode() : 0; + result = 31 * result + (whenMatched != null ? whenMatched.hashCode() : 0); + result = 31 * result + (variables != null ? variables.hashCode() : 0); + result = 31 * result + (whenMatchedPipeline != null ? whenMatchedPipeline.hashCode() : 0); + result = 31 * result + (whenNotMatched != null ? whenNotMatched.hashCode() : 0); + return result; + } + + @Override + public String toString() { + return "MergeOptions{" + + "uniqueIdentifier=" + uniqueIdentifier + + ", whenMatched=" + whenMatched + + ", variables=" + variables + + ", whenMatchedPipeline=" + whenMatchedPipeline + + ", whenNotMatched=" + whenNotMatched + + '}'; + } +} diff --git a/driver-core/src/main/com/mongodb/client/model/MongoTimeUnit.java b/driver-core/src/main/com/mongodb/client/model/MongoTimeUnit.java new file mode 100644 index 00000000000..26e5c7dbac4 --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/MongoTimeUnit.java @@ -0,0 +1,93 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.client.model; + +import org.bson.conversions.Bson; +import com.mongodb.client.model.densify.DensifyRange; + +/** + * Units for specifying time-based values. + * + * @see Windows + * @see WindowOutputFields + * @see DensifyRange + * @mongodb.server.release 5.0 + * @since 4.3 + */ +public enum MongoTimeUnit { + /** + * {@linkplain #value() "year"} + */ + YEAR("year", false), + /** + * {@linkplain #value() "quarter"} + */ + QUARTER("quarter", false), + /** + * {@linkplain #value() "month"} + */ + MONTH("month", false), + /** + * {@linkplain #value() "week"} + */ + WEEK("week", true), + /** + * {@linkplain #value() "day"} + */ + DAY("day", true), + /** + * {@linkplain #value() "hour"} + */ + HOUR("hour", true), + /** + * {@linkplain #value() "minute"} + */ + MINUTE("minute", true), + /** + * {@linkplain #value() "second"} + */ + SECOND("second", true), + /** + * {@linkplain #value() "millisecond"} + */ + MILLISECOND("millisecond", true); + + private final String value; + private final boolean fixed; + + MongoTimeUnit(final String value, final boolean fixed) { + this.value = value; + this.fixed = fixed; + } + + /** + * Returns a {@link String} representation of the unit, which may be useful when using methods like + * {@link Windows#of(Bson)}, {@link DensifyRange#of(Bson)}. + * + * @return A {@link String} representation of the unit. + */ + public String value() { + return value; + } + + /** + * Returns {@code true} iff the unit represents a fixed duration. + * E.g., a minute is a fixed duration equal to 60_000 milliseconds, while the duration of a month varies. + */ + boolean fixed() { + return fixed; + } +} diff --git a/driver-core/src/main/com/mongodb/client/model/Projections.java b/driver-core/src/main/com/mongodb/client/model/Projections.java new file mode 100644 index 00000000000..470c3cb7e4a --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/Projections.java @@ -0,0 +1,395 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model; + +import com.mongodb.client.model.search.FieldSearchPath; +import com.mongodb.client.model.search.SearchCollector; +import com.mongodb.client.model.search.SearchCount; +import com.mongodb.client.model.search.SearchOperator; +import com.mongodb.client.model.search.SearchOptions; +import com.mongodb.client.model.search.VectorSearchOptions; +import org.bson.BsonArray; +import org.bson.BsonDocument; +import org.bson.BsonInt32; +import org.bson.BsonString; +import org.bson.BsonValue; +import org.bson.codecs.configuration.CodecRegistry; +import org.bson.conversions.Bson; + +import java.util.List; +import java.util.Objects; + +import static com.mongodb.assertions.Assertions.notNull; +import static java.util.Arrays.asList; + +/** + * A factory for projections. A convenient way to use this class is to statically import all of its methods, which allows usage like: + * + *
+ *    collection.find().projection(fields(include("x", "y"), excludeId()))
+ * 
+ * + * @mongodb.driver.manual tutorial/project-fields-from-query-results/#limit-fields-to-return-from-a-query Projection + * @since 3.0 + */ +public final class Projections { + private Projections() { + } + + /** + * Creates a projection of a field whose value is computed from the given expression. Projection with an expression can be used in the + * following contexts: + *
    + *
  • $project aggregation pipeline stage.
  • + *
  • Starting from MongoDB 4.4, it's also accepted in various find-related methods within the + * {@code MongoCollection}-based API where projection is supported, for example: + *
      + *
    • {@code find()}
    • + *
    • {@code findOneAndReplace()}
    • + *
    • {@code findOneAndUpdate()}
    • + *
    • {@code findOneAndDelete()}
    • + *
    + *
  • + *
+ * + * @param fieldName the field name + * @param expression the expression + * @param the expression type + * @return the projection + * @see #computedSearchMeta(String) + * @see Aggregates#project(Bson) + */ + public static Bson computed(final String fieldName, final TExpression expression) { + return new SimpleExpression<>(fieldName, expression); + } + + /** + * Creates a projection of a field whose value is equal to the {@code $$SEARCH_META} variable, + * for use with {@link Aggregates#search(SearchOperator, SearchOptions)} / {@link Aggregates#search(SearchCollector, SearchOptions)}. + * Calling this method is equivalent to calling {@link #computed(String, Object)} with {@code "$$SEARCH_META"} as the second argument. + * + * @param fieldName the field name + * @return the projection + * @see SearchCount + * @see SearchCollector + * @since 4.7 + */ + public static Bson computedSearchMeta(final String fieldName) { + return computed(fieldName, "$$SEARCH_META"); + } + + /** + * Creates a projection that includes all of the given fields. + * + * @param fieldNames the field names + * @return the projection + */ + public static Bson include(final String... fieldNames) { + return include(asList(fieldNames)); + } + + /** + * Creates a projection that includes all of the given fields. + * + * @param fieldNames the field names + * @return the projection + */ + public static Bson include(final List fieldNames) { + return combine(fieldNames, new BsonInt32(1)); + } + + /** + * Creates a projection that excludes all of the given fields. + * + * @param fieldNames the field names + * @return the projection + */ + public static Bson exclude(final String... fieldNames) { + return exclude(asList(fieldNames)); + } + + /** + * Creates a projection that excludes all of the given fields. + * + * @param fieldNames the field names + * @return the projection + */ + public static Bson exclude(final List fieldNames) { + return combine(fieldNames, new BsonInt32(0)); + } + + /** + * Creates a projection that excludes the _id field. This suppresses the automatic inclusion of _id that is the default, even when + * other fields are explicitly included. + * + * @return the projection + */ + public static Bson excludeId() { + return new BsonDocument("_id", new BsonInt32(0)); + } + + /** + * Creates a projection that includes for the given field only the first element of an array that matches the query filter. This is + * referred to as the positional $ operator. + * + * @param fieldName the field name whose value is the array + * @return the projection + * @mongodb.driver.manual reference/operator/projection/positional/#projection Project the first matching element ($ operator) + */ + public static Bson elemMatch(final String fieldName) { + return new BsonDocument(fieldName + ".$", new BsonInt32(1)); + } + + /** + * Creates a projection that includes for the given field only the first element of the array value of that field that matches the given + * query filter. + * + * @param fieldName the field name + * @param filter the filter to apply + * @return the projection + * @mongodb.driver.manual reference/operator/projection/elemMatch elemMatch + */ + public static Bson elemMatch(final String fieldName, final Bson filter) { + return new ElemMatchFilterProjection(fieldName, filter); + } + + /** + * Creates a $meta projection to the given field name for the given meta field name. + * + * @param fieldName the field name + * @param metaFieldName the meta field name + * @return the projection + * @mongodb.driver.manual reference/operator/aggregation/meta/ + * @since 4.1 + * @see #metaTextScore(String) + * @see #metaSearchScore(String) + * @see #metaVectorSearchScore(String) + * @see #metaSearchHighlights(String) + */ + public static Bson meta(final String fieldName, final String metaFieldName) { + return new BsonDocument(fieldName, new BsonDocument("$meta", new BsonString(metaFieldName))); + } + + /** + * Creates a projection to the given field name of the textScore, for use with text queries. + * Calling this method is equivalent to calling {@link #meta(String, String)} with {@code "textScore"} as the second argument. + * + * @param fieldName the field name + * @return the projection + * @see Filters#text(String, TextSearchOptions) + * @mongodb.driver.manual reference/operator/aggregation/meta/#text-score-metadata--meta---textscore- textScore + */ + public static Bson metaTextScore(final String fieldName) { + return meta(fieldName, "textScore"); + } + + /** + * Creates a projection to the given field name of the searchScore, + * for use with {@link Aggregates#search(SearchOperator, SearchOptions)} / {@link Aggregates#search(SearchCollector, SearchOptions)}. + * Calling this method is equivalent to calling {@link #meta(String, String)} with {@code "searchScore"} as the second argument. + * + * @param fieldName the field name + * @return the projection + * @mongodb.atlas.manual atlas-search/scoring/ Scoring + * @since 4.7 + */ + public static Bson metaSearchScore(final String fieldName) { + return meta(fieldName, "searchScore"); + } + + /** + * Creates a projection to the given field name of the vectorSearchScore, + * for use with {@link Aggregates#vectorSearch(FieldSearchPath, Iterable, String, long, VectorSearchOptions)} . + * Calling this method is equivalent to calling {@link #meta(String, String)} with {@code "vectorSearchScore"} as the second argument. + * + * @param fieldName the field name + * @return the projection + * @mongodb.atlas.manual atlas-search/scoring/ Scoring + * @mongodb.server.release 6.0.10 + * @since 4.11 + */ + public static Bson metaVectorSearchScore(final String fieldName) { + return meta(fieldName, "vectorSearchScore"); + } + + /** + * Creates a projection to the given field name of the searchHighlights, + * for use with {@link Aggregates#search(SearchOperator, SearchOptions)} / {@link Aggregates#search(SearchCollector, SearchOptions)}. + * Calling this method is equivalent to calling {@link #meta(String, String)} with {@code "searchHighlights"} as the second argument. + * + * @param fieldName the field name + * @return the projection + * @see com.mongodb.client.model.search.SearchHighlight + * @mongodb.atlas.manual atlas-search/highlighting/ Highlighting + * @since 4.7 + */ + public static Bson metaSearchHighlights(final String fieldName) { + return meta(fieldName, "searchHighlights"); + } + + /** + * Creates a projection to the given field name of a slice of the array value of that field. + * + * @param fieldName the field name + * @param limit the number of elements to project. + * @return the projection + * @mongodb.driver.manual reference/operator/projection/slice Slice + */ + public static Bson slice(final String fieldName, final int limit) { + return new BsonDocument(fieldName, new BsonDocument("$slice", new BsonInt32(limit))); + } + + /** + * Creates a projection to the given field name of a slice of the array value of that field. + * + * @param fieldName the field name + * @param skip the number of elements to skip before applying the limit + * @param limit the number of elements to project + * @return the projection + * @mongodb.driver.manual reference/operator/projection/slice Slice + */ + public static Bson slice(final String fieldName, final int skip, final int limit) { + return new BsonDocument(fieldName, new BsonDocument("$slice", new BsonArray(asList(new BsonInt32(skip), new BsonInt32(limit))))); + } + + /** + * Creates a projection that combines the list of projections into a single one. If there are duplicate keys, the last one takes + * precedence. + * + * @param projections the list of projections to combine + * @return the combined projection + */ + public static Bson fields(final Bson... projections) { + return fields(asList(projections)); + } + + /** + * Creates a projection that combines the list of projections into a single one. If there are duplicate keys, the last one takes + * precedence. + * + * @param projections the list of projections to combine + * @return the combined projection + */ + public static Bson fields(final List projections) { + notNull("projections", projections); + return new FieldsProjection(projections); + } + + private static class FieldsProjection implements Bson { + private final List projections; + + FieldsProjection(final List projections) { + this.projections = projections; + } + + @Override + public BsonDocument toBsonDocument(final Class documentClass, final CodecRegistry codecRegistry) { + BsonDocument combinedDocument = new BsonDocument(); + for (Bson sort : projections) { + BsonDocument sortDocument = sort.toBsonDocument(documentClass, codecRegistry); + for (String key : sortDocument.keySet()) { + combinedDocument.remove(key); + combinedDocument.append(key, sortDocument.get(key)); + } + } + return combinedDocument; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + FieldsProjection that = (FieldsProjection) o; + + return Objects.equals(projections, that.projections); + } + + @Override + public int hashCode() { + return projections != null ? projections.hashCode() : 0; + } + + @Override + public String toString() { + return "Projections{" + + "projections=" + projections + + '}'; + } + } + + + private static class ElemMatchFilterProjection implements Bson { + private final String fieldName; + private final Bson filter; + + ElemMatchFilterProjection(final String fieldName, final Bson filter) { + this.fieldName = fieldName; + this.filter = filter; + } + + @Override + public BsonDocument toBsonDocument(final Class documentClass, final CodecRegistry codecRegistry) { + return new BsonDocument(fieldName, new BsonDocument("$elemMatch", filter.toBsonDocument(documentClass, codecRegistry))); + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + ElemMatchFilterProjection that = (ElemMatchFilterProjection) o; + + if (!Objects.equals(fieldName, that.fieldName)) { + return false; + } + return Objects.equals(filter, that.filter); + } + + @Override + public int hashCode() { + int result = fieldName != null ? fieldName.hashCode() : 0; + result = 31 * result + (filter != null ? filter.hashCode() : 0); + return result; + } + + @Override + public String toString() { + return "ElemMatch Projection{" + + "fieldName='" + fieldName + '\'' + + ", filter=" + filter + + '}'; + } + } + + private static Bson combine(final List fieldNames, final BsonValue value) { + BsonDocument document = new BsonDocument(); + for (String fieldName : fieldNames) { + document.remove(fieldName); + document.append(fieldName, value); + } + return document; + } +} diff --git a/driver-core/src/main/com/mongodb/client/model/PushOptions.java b/driver-core/src/main/com/mongodb/client/model/PushOptions.java new file mode 100644 index 00000000000..17a876af522 --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/PushOptions.java @@ -0,0 +1,180 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model; + +import com.mongodb.lang.Nullable; +import org.bson.conversions.Bson; + +import java.util.Objects; + +/** + * The options to apply to a $push update operator. + * + * @mongodb.driver.manual reference/operator/update/push/ $push + * @see Updates#pushEach(String, java.util.List, PushOptions) + * @since 3.1 + */ +public class PushOptions { + private Integer position; + private Integer slice; + private Integer sort; + private Bson sortDocument; + + /** + * Gets the position at which to add the pushed values in the array. + * + * @return the position, which may be null + * @mongodb.driver.manual reference/operator/update/position/ $position + */ + @Nullable + public Integer getPosition() { + return position; + } + + /** + * Sets the position at which to add the pushed values in the array. + * + * @param position the position + * @return this + * @mongodb.driver.manual reference/operator/update/position/ $position + */ + public PushOptions position(@Nullable final Integer position) { + this.position = position; + return this; + } + + /** + * Gets the slice value, which is the limit on the number of array elements allowed. + * + * @return the slice value representing the limit on the number of array elements allowed + * @mongodb.driver.manual reference/operator/update/slice/ $slice + */ + @Nullable + public Integer getSlice() { + return slice; + } + + /** + * Sets the limit on the number of array elements allowed. + * + * @param slice the limit + * @return this + * @mongodb.driver.manual reference/operator/update/slice/ $slice + */ + public PushOptions slice(@Nullable final Integer slice) { + this.slice = slice; + return this; + } + + /** + * Gets the sort direction for sorting array elements that are not documents. + * + * @return the sort direction + * @mongodb.driver.manual reference/operator/update/sort/ $sort + * @mongodb.driver.manual reference/operator/update/sort/#sort-array-elements-that-are-not-documents + */ + @Nullable + public Integer getSort() { + return sort; + } + + /** + * Sets the sort direction for sorting array elements that are not documents. + * + * @param sort the sort direction + * @return this + * @throws IllegalStateException if sortDocument property is already set + * @mongodb.driver.manual reference/operator/update/sort/ $sort + * @mongodb.driver.manual reference/operator/update/sort/#sort-array-elements-that-are-not-documents + */ + public PushOptions sort(@Nullable final Integer sort) { + if (sortDocument != null) { + throw new IllegalStateException("sort can not be set if sortDocument already is"); + } + this.sort = sort; + return this; + } + + /** + * Gets the sort direction for sorting array elements that are documents. + * + * @return the sort document + * @mongodb.driver.manual reference/operator/update/sort/ $sort + */ + @Nullable + public Bson getSortDocument() { + return sortDocument; + } + + /** + * Sets the sort direction for sorting array elements that are documents. + * + * @param sortDocument the sort document + * @return this + * @throws IllegalStateException if sort property is already set + * @mongodb.driver.manual reference/operator/update/sort/ $sort + */ + public PushOptions sortDocument(@Nullable final Bson sortDocument) { + if (sort != null) { + throw new IllegalStateException("sortDocument can not be set if sort already is"); + } + this.sortDocument = sortDocument; + return this; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + PushOptions that = (PushOptions) o; + + if (!Objects.equals(position, that.position)) { + return false; + } + if (!Objects.equals(slice, that.slice)) { + return false; + } + if (!Objects.equals(sort, that.sort)) { + return false; + } + return Objects.equals(sortDocument, that.sortDocument); + } + + @Override + public int hashCode() { + int result = position != null ? position.hashCode() : 0; + result = 31 * result + (slice != null ? slice.hashCode() : 0); + result = 31 * result + (sort != null ? sort.hashCode() : 0); + result = 31 * result + (sortDocument != null ? sortDocument.hashCode() : 0); + return result; + } + + @Override + public String toString() { + return "Push Options{" + + "position=" + position + + ", slice=" + slice + + ((sort == null) ? "" : ", sort=" + sort) + + ((sortDocument == null) ? "" : ", sortDocument=" + sortDocument) + + '}'; + } +} diff --git a/driver-core/src/main/com/mongodb/client/model/QuantileMethod.java b/driver-core/src/main/com/mongodb/client/model/QuantileMethod.java new file mode 100644 index 00000000000..190b6b0ba0c --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/QuantileMethod.java @@ -0,0 +1,76 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.client.model; + +import com.mongodb.annotations.Sealed; +import org.bson.BsonString; +import org.bson.BsonValue; + +import static com.mongodb.assertions.Assertions.notNull; + +/** + * This interface represents a quantile method used in quantile accumulators of the {@code $group} and + * {@code $setWindowFields} stages. + *

+ * It provides methods for creating and converting quantile methods to {@link BsonValue}. + *

+ * + * @see Accumulators#percentile(String, Object, Object, QuantileMethod) + * @see Accumulators#median(String, Object, QuantileMethod) + * @see WindowOutputFields#percentile(String, Object, Object, QuantileMethod, Window) + * @see WindowOutputFields#median(String, Object, QuantileMethod, Window) + * @since 4.10 + * @mongodb.server.release 7.0 + */ +@Sealed +public interface QuantileMethod { + /** + * Returns a {@link QuantileMethod} instance representing the "approximate" quantile method. + * + * @return The requested {@link QuantileMethod}. + */ + static ApproximateQuantileMethod approximate() { + return new QuantileMethodBson(new BsonString("approximate")); + } + + /** + * Creates a {@link QuantileMethod} from a {@link BsonValue} in situations when there is no builder method + * that better satisfies your needs. + * This method cannot be used to validate the syntax. + *

+ * Example
+ * The following code creates two functionally equivalent {@link QuantileMethod}s, + * though they may not be {@linkplain Object#equals(Object) equal}. + *

{@code
+     *  QuantileMethod method1 = QuantileMethod.approximate();
+     *  QuantileMethod method2 = QuantileMethod.of(new BsonString("approximate"));
+     * }
+ * + * @param method A {@link BsonValue} representing the required {@link QuantileMethod}. + * @return The requested {@link QuantileMethod}. + */ + static QuantileMethod of(final BsonValue method) { + notNull("method", method); + return new QuantileMethodBson(method); + } + + /** + * Converts this object to {@link BsonValue}. + * + * @return A {@link BsonValue} representing this {@link QuantileMethod}. + */ + BsonValue toBsonValue(); +} diff --git a/driver-core/src/main/com/mongodb/client/model/QuantileMethodBson.java b/driver-core/src/main/com/mongodb/client/model/QuantileMethodBson.java new file mode 100644 index 00000000000..2aef1b4a930 --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/QuantileMethodBson.java @@ -0,0 +1,50 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.client.model; + +import org.bson.BsonValue; + +import java.util.Objects; + +final class QuantileMethodBson implements ApproximateQuantileMethod { + private final BsonValue bsonValue; + + QuantileMethodBson(final BsonValue bsonValue) { + this.bsonValue = bsonValue; + } + + @Override + public BsonValue toBsonValue() { + return bsonValue; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + QuantileMethodBson that = (QuantileMethodBson) o; + return Objects.equals(bsonValue, that.bsonValue); + } + + @Override + public int hashCode() { + return Objects.hash(bsonValue); + } +} diff --git a/driver-core/src/main/com/mongodb/client/model/RenameCollectionOptions.java b/driver-core/src/main/com/mongodb/client/model/RenameCollectionOptions.java new file mode 100644 index 00000000000..363910df8b9 --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/RenameCollectionOptions.java @@ -0,0 +1,54 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model; + +/** + * The options to apply when renaming a collection. + * + * @mongodb.driver.manual reference/command/renameCollection renameCollection + * @since 3.0 + */ +public class RenameCollectionOptions { + private boolean dropTarget; + + /** + * Gets if mongod should drop the target of renameCollection prior to renaming the collection. + * + * @return true if mongod should drop the target of renameCollection prior to renaming the collection. + */ + public boolean isDropTarget() { + return dropTarget; + } + + /** + * Sets if mongod should drop the target of renameCollection prior to renaming the collection. + * + * @param dropTarget true if mongod should drop the target of renameCollection prior to renaming the collection. + * @return this + */ + public RenameCollectionOptions dropTarget(final boolean dropTarget) { + this.dropTarget = dropTarget; + return this; + } + + @Override + public String toString() { + return "RenameCollectionOptions{" + + "dropTarget=" + dropTarget + + '}'; + } +} diff --git a/driver-core/src/main/com/mongodb/client/model/ReplaceOneModel.java b/driver-core/src/main/com/mongodb/client/model/ReplaceOneModel.java new file mode 100644 index 00000000000..6a517a2bb0a --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/ReplaceOneModel.java @@ -0,0 +1,95 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model; + +import org.bson.conversions.Bson; + +import static com.mongodb.assertions.Assertions.notNull; + +/** + * A model describing the replacement of at most one document that matches the query filter. + * + * @param the type of document to replace. This can be of any type for which a {@code Codec} is registered + * @since 3.0 + * @mongodb.driver.manual tutorial/modify-documents/#replace-the-document Replace + */ +public final class ReplaceOneModel extends WriteModel { + private final Bson filter; + private final T replacement; + private final ReplaceOptions options; + + /** + * Construct a new instance. + * + * @param filter a document describing the query filter, which may not be null. + * @param replacement the replacement document + */ + public ReplaceOneModel(final Bson filter, final T replacement) { + this(filter, replacement, new ReplaceOptions()); + } + + /** + * Construct a new instance. + * + * @param filter a document describing the query filter, which may not be null. + * @param replacement the replacement document + * @param options the options to apply + * @since 3.7 + */ + public ReplaceOneModel(final Bson filter, final T replacement, final ReplaceOptions options) { + this.filter = notNull("filter", filter); + this.options = notNull("options", options); + this.replacement = notNull("replacement", replacement); + } + + /** + * Gets the query filter. + * + * @return the query filter + */ + public Bson getFilter() { + return filter; + } + + /** + * Gets the document which will replace the document matching the query filter. + * + * @return the replacement document + */ + public T getReplacement() { + return replacement; + } + + /** + * Gets the ReplaceOptions to apply. + * + * @return the replace options + * @since 3.7 + */ + public ReplaceOptions getReplaceOptions() { + return options; + } + + @Override + public String toString() { + return "ReplaceOneModel{" + + "filter=" + filter + + ", replacement=" + replacement + + ", options=" + options + + '}'; + } +} diff --git a/driver-core/src/main/com/mongodb/client/model/ReplaceOptions.java b/driver-core/src/main/com/mongodb/client/model/ReplaceOptions.java new file mode 100644 index 00000000000..7a26e0997ba --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/ReplaceOptions.java @@ -0,0 +1,275 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model; + +import com.mongodb.lang.Nullable; +import org.bson.BsonString; +import org.bson.BsonValue; +import org.bson.conversions.Bson; + +/** + * The options to apply when replacing documents. + * + * @since 3.7 + * @mongodb.driver.manual tutorial/modify-documents/ Updates + * @mongodb.driver.manual reference/operator/update/ Update Operators + * @mongodb.driver.manual reference/command/update/ Update Command + */ +public class ReplaceOptions { + private boolean upsert; + private Boolean bypassDocumentValidation; + private Collation collation; + private Bson hint; + private String hintString; + private BsonValue comment; + private Bson variables; + private Bson sort; + + /** + * Returns true if a new document should be inserted if there are no matches to the query filter. The default is false. + * + * @return true if a new document should be inserted if there are no matches to the query filter + */ + public boolean isUpsert() { + return upsert; + } + + /** + * Set to true if a new document should be inserted if there are no matches to the query filter. + * + * @param upsert true if a new document should be inserted if there are no matches to the query filter + * @return this + */ + public ReplaceOptions upsert(final boolean upsert) { + this.upsert = upsert; + return this; + } + + /** + * Gets the bypass document level validation flag + * + * @return the bypass document level validation flag + * @mongodb.server.release 3.2 + */ + @Nullable + public Boolean getBypassDocumentValidation() { + return bypassDocumentValidation; + } + + /** + * Sets the bypass document level validation flag. + * + *

For bulk operations use: {@link BulkWriteOptions#bypassDocumentValidation(Boolean)}

+ * + * @param bypassDocumentValidation If true, allows the write to opt-out of document level validation. + * @return this + * @mongodb.server.release 3.2 + */ + public ReplaceOptions bypassDocumentValidation(@Nullable final Boolean bypassDocumentValidation) { + this.bypassDocumentValidation = bypassDocumentValidation; + return this; + } + + /** + * Returns the collation options + * + * @return the collation options + * @mongodb.server.release 3.4 + */ + @Nullable + public Collation getCollation() { + return collation; + } + + /** + * Sets the collation options + * + *

A null value represents the server default.

+ * @param collation the collation options to use + * @return this + * @mongodb.server.release 3.4 + */ + public ReplaceOptions collation(@Nullable final Collation collation) { + this.collation = collation; + return this; + } + + /** + * Returns the hint for which index to use. The default is not to set a hint. + * + * @return the hint + * @since 4.1 + */ + @Nullable + public Bson getHint() { + return hint; + } + + /** + * Sets the hint for which index to use. A null value means no hint is set. + * + * @param hint the hint + * @return this + * @since 4.1 + */ + public ReplaceOptions hint(@Nullable final Bson hint) { + this.hint = hint; + return this; + } + + /** + * Gets the hint string to apply. + * + * @return the hint string, which should be the name of an existing index + * @since 4.1 + */ + @Nullable + public String getHintString() { + return hintString; + } + + /** + * Sets the hint to apply. + * + * @param hint the name of the index which should be used for the operation + * @return this + * @since 4.1 + */ + public ReplaceOptions hintString(@Nullable final String hint) { + this.hintString = hint; + return this; + } + /** + * @return the comment for this operation. A null value means no comment is set. + * @since 4.6 + * @mongodb.server.release 4.4 + */ + @Nullable + public BsonValue getComment() { + return comment; + } + + /** + * Sets the comment for this operation. A null value means no comment is set. + * + *

For bulk operations use: {@link BulkWriteOptions#comment(String)}

+ * + * @param comment the comment + * @return this + * @since 4.6 + * @mongodb.server.release 4.4 + */ + public ReplaceOptions comment(@Nullable final String comment) { + this.comment = comment != null ? new BsonString(comment) : null; + return this; + } + + /** + * Sets the comment for this operation. A null value means no comment is set. + * + *

For bulk operations use: {@link BulkWriteOptions#comment(BsonValue)}

+ * + * @param comment the comment + * @return this + * @since 4.6 + * @mongodb.server.release 4.4 + */ + public ReplaceOptions comment(@Nullable final BsonValue comment) { + this.comment = comment; + return this; + } + /** + * Add top-level variables to the operation + * + *

The value of let will be passed to all update and delete, but not insert, commands. + * + * @return the top level variables if set or null. + * @mongodb.server.release 5.0 + * @since 4.6 + */ + @Nullable + public Bson getLet() { + return variables; + } + + /** + * Add top-level variables for the operation + * + *

Allows for improved command readability by separating the variables from the query text. + *

For bulk operations use: {@link BulkWriteOptions#let(Bson)} + * + * @param variables for the operation or null + * @return this + * @mongodb.server.release 5.0 + * @since 4.6 + */ + public ReplaceOptions let(final Bson variables) { + this.variables = variables; + return this; + } + + /** + * Gets the sort criteria to apply to the operation. + * + *

+ * The sort criteria determines which document the operation replaces if the query matches multiple documents. + * The first document matched by the sort criteria will be replaced. + * The default is null, which means no specific sort criteria is applied. + * + * @return a document describing the sort criteria, or null if no sort is specified. + * @mongodb.driver.manual reference/method/db.collection.replaceOne/ Sort + * @mongodb.server.release 8.0 + * @since 5.3 + * @see #sort(Bson) + */ + @Nullable + public Bson getSort() { + return sort; + } + + /** + * Sets the sort criteria to apply to the operation. A null value means no sort criteria is set. + * + *

+ * The sort criteria determines which document the operation replaces if the query matches multiple documents. + * The first document matched by the specified sort criteria will be replaced. + * + * @param sort the sort criteria, which may be null. + * @return this + * @mongodb.driver.manual reference/method/db.collection.replaceOne/ Sort + * @mongodb.server.release 8.0 + * @since 5.3 + */ + public ReplaceOptions sort(@Nullable final Bson sort) { + this.sort = sort; + return this; + } + + @Override + public String toString() { + return "ReplaceOptions{" + + "upsert=" + upsert + + ", bypassDocumentValidation=" + bypassDocumentValidation + + ", collation=" + collation + + ", hint=" + hint + + ", hintString=" + hintString + + ", comment=" + comment + + ", let=" + variables + + ", sort=" + sort + + '}'; + } +} diff --git a/driver-core/src/main/com/mongodb/client/model/ReturnDocument.java b/driver-core/src/main/com/mongodb/client/model/ReturnDocument.java new file mode 100644 index 00000000000..e480e142c7d --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/ReturnDocument.java @@ -0,0 +1,34 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model; + +/** + * Indicates which document to return, the original document before change or the document after the change + * + * @mongodb.driver.manual reference/command/findAndModify findAndModify + * @since 3.0 + */ +public enum ReturnDocument { + /** + * Indicates to return the document before the update, replacement, or insert occurred. + */ + BEFORE, + /** + * Indicates to return the document after the update, replacement, or insert occurred. + */ + AFTER +} diff --git a/driver-core/src/main/com/mongodb/client/model/SearchIndexModel.java b/driver-core/src/main/com/mongodb/client/model/SearchIndexModel.java new file mode 100644 index 00000000000..2a229e1a579 --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/SearchIndexModel.java @@ -0,0 +1,112 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model; + +import com.mongodb.lang.Nullable; +import org.bson.conversions.Bson; + +import static com.mongodb.assertions.Assertions.notNull; + +/** + * A model describing the creation of a single Atlas Search index. + * + * @since 4.11 + * @mongodb.server.release 6.0 + */ +public final class SearchIndexModel { + @Nullable + private final String name; + private final Bson definition; + @Nullable + private final SearchIndexType type; + + /** + * Construct an instance with the given Atlas Search index mapping definition. + * + *

After calling this constructor, the {@code name} field will be {@code null}. In that case, when passing this + * {@code SearchIndexModel} to the {@code createSearchIndexes} method, the default search index name 'default' + * will be used to create the search index.

+ * + * @param definition the search index mapping definition. + */ + public SearchIndexModel(final Bson definition) { + this(null, definition, null); + } + + /** + * Construct an instance with the given Atlas Search name and index definition. + * + * @param name the search index name. + * @param definition the search index mapping definition. + */ + public SearchIndexModel(final String name, final Bson definition) { + this(name, definition, null); + } + + /** + * Construct an instance with the given Atlas Search name, index definition, and type. + * + * @param name the search index name. + * @param definition the search index mapping definition. + * @param type the search index type. + * @since 5.2 + */ + public SearchIndexModel(@Nullable final String name, final Bson definition, @Nullable final SearchIndexType type) { + this.definition = notNull("definition", definition); + this.name = name; + this.type = type; + } + + /** + * Get the Atlas Search index mapping definition. + * + * @return the index definition. + */ + public Bson getDefinition() { + return definition; + } + + /** + * Get the Atlas Search index name. + * + * @return the search index name. + */ + @Nullable + public String getName() { + return name; + } + + /** + * Get the Atlas Search index type. + * + * @return the search index type. + * @since 5.2 + */ + @Nullable + public SearchIndexType getType() { + return type; + } + + @Override + public String toString() { + return "SearchIndexModel{" + + "name=" + name + + ", definition=" + definition + + ", type=" + (type == null ? "null" : type.toBsonValue()) + + '}'; + } +} diff --git a/driver-core/src/main/com/mongodb/client/model/SearchIndexType.java b/driver-core/src/main/com/mongodb/client/model/SearchIndexType.java new file mode 100644 index 00000000000..5ed73461a05 --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/SearchIndexType.java @@ -0,0 +1,83 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model; + +import com.mongodb.annotations.Sealed; +import org.bson.BsonString; +import org.bson.BsonValue; + +import static com.mongodb.assertions.Assertions.notNull; + +/** + * This interface represents an Atlas Search Index type, which is utilized for creating specific types of indexes. + *

+ * It provides methods for creating and converting Atlas Search Index types to {@link BsonValue}. + *

+ * + * @mongodb.server.release 6.0 + * @see SearchIndexModel The model class that utilizes this index type. + * @since 5.2 + */ +@Sealed +public interface SearchIndexType { + + /** + * Returns a {@link SearchIndexType} instance representing the "search" index type. + * + * @return The requested {@link SearchIndexType}. + */ + static SearchIndexType search() { + return new SearchIndexTypeBson(new BsonString("search")); + } + + /** + * Returns a {@link SearchIndexType} instance representing the "vectorSearch" index type. + * + * @return The requested {@link SearchIndexType}. + */ + static SearchIndexType vectorSearch() { + return new SearchIndexTypeBson(new BsonString("vectorSearch")); + } + + /** + * Creates a {@link SearchIndexType} from a {@link BsonValue} in situations when there is no builder method + * that better satisfies your needs. + * This method cannot be used to validate the syntax. + *

+ * Example
+ * The following code creates two functionally equivalent {@link SearchIndexType}s, + * though they may not be {@linkplain Object#equals(Object) equal}. + *

{@code
+     *  SearchIndexType type1 = SearchIndexType.vectorSearch();
+     *  SearchIndexType type2 = SearchIndexType.of(new BsonString("vectorSearch"));
+     * }
+ * + * @param indexType A {@link BsonValue} representing the required {@link SearchIndexType}. + * @return The requested {@link SearchIndexType}. + */ + static SearchIndexType of(final BsonValue indexType) { + notNull("indexType", indexType); + return new SearchIndexTypeBson(indexType); + } + + /** + * Converts this object to {@link BsonValue}. + * + * @return A {@link BsonValue} representing this {@link SearchIndexType}. + */ + BsonValue toBsonValue(); +} diff --git a/driver-core/src/main/com/mongodb/client/model/SearchIndexTypeBson.java b/driver-core/src/main/com/mongodb/client/model/SearchIndexTypeBson.java new file mode 100644 index 00000000000..75e8788e681 --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/SearchIndexTypeBson.java @@ -0,0 +1,52 @@ +package com.mongodb.client.model; + +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import org.bson.BsonValue; + +import java.util.Objects; + +final class SearchIndexTypeBson implements SearchIndexType { + private final BsonValue bsonValue; + + SearchIndexTypeBson(final BsonValue bsonValue) { + this.bsonValue = bsonValue; + } + + @Override + public BsonValue toBsonValue() { + return bsonValue; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + SearchIndexTypeBson that = (SearchIndexTypeBson) o; + return Objects.equals(bsonValue, that.bsonValue); + } + + @Override + public int hashCode() { + return Objects.hash(bsonValue); + } +} + diff --git a/driver-core/src/main/com/mongodb/client/model/SimpleExpression.java b/driver-core/src/main/com/mongodb/client/model/SimpleExpression.java new file mode 100644 index 00000000000..40fadda8aa1 --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/SimpleExpression.java @@ -0,0 +1,78 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model; + +import org.bson.BsonDocument; +import org.bson.BsonDocumentWriter; +import org.bson.codecs.configuration.CodecRegistry; +import org.bson.conversions.Bson; + +import java.util.Objects; + +class SimpleExpression implements Bson { + private final String name; + private final TExpression expression; + + SimpleExpression(final String name, final TExpression expression) { + this.name = name; + this.expression = expression; + } + + @Override + public BsonDocument toBsonDocument(final Class documentClass, final CodecRegistry codecRegistry) { + BsonDocumentWriter writer = new BsonDocumentWriter(new BsonDocument()); + + writer.writeStartDocument(); + writer.writeName(name); + BuildersHelper.encodeValue(writer, expression, codecRegistry); + writer.writeEndDocument(); + + return writer.getDocument(); + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + SimpleExpression that = (SimpleExpression) o; + + if (!Objects.equals(name, that.name)) { + return false; + } + return Objects.equals(expression, that.expression); + } + + @Override + public int hashCode() { + int result = name != null ? name.hashCode() : 0; + result = 31 * result + (expression != null ? expression.hashCode() : 0); + return result; + } + + @Override + public String toString() { + return "Expression{" + + "name='" + name + '\'' + + ", expression=" + expression + + '}'; + } +} diff --git a/driver-core/src/main/com/mongodb/client/model/Sorts.java b/driver-core/src/main/com/mongodb/client/model/Sorts.java new file mode 100644 index 00000000000..7d8a72ac7a2 --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/Sorts.java @@ -0,0 +1,179 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model; + +import org.bson.BsonDocument; +import org.bson.BsonInt32; +import org.bson.BsonString; +import org.bson.BsonValue; +import org.bson.codecs.configuration.CodecRegistry; +import org.bson.conversions.Bson; + +import java.util.List; +import java.util.Objects; + +import static com.mongodb.assertions.Assertions.notNull; +import static java.util.Arrays.asList; + +/** + * A factory for sort specifications. A convenient way to use this class is to statically import all of its methods, which allows + * usage like: + * + *
+ *    collection.find().sort(orderBy(ascending("x", "y"), descending("z")))
+ * 
+ * + * @since 3.0 + * @mongodb.driver.manual reference/operator/meta/orderby Sort + */ +public final class Sorts { + private Sorts() { + } + + /** + * Create a sort specification for an ascending sort on the given fields. + * + * @param fieldNames the field names, which must contain at least one + * @return the sort specification + * @mongodb.driver.manual reference/operator/meta/orderby Sort + */ + public static Bson ascending(final String... fieldNames) { + return ascending(asList(fieldNames)); + } + + /** + * Create a sort specification for an ascending sort on the given fields. + * + * @param fieldNames the field names, which must contain at least one + * @return the sort specification + * @mongodb.driver.manual reference/operator/meta/orderby Sort + */ + public static Bson ascending(final List fieldNames) { + notNull("fieldNames", fieldNames); + return orderBy(fieldNames, new BsonInt32(1)); + } + + /** + * Create a sort specification for a descending sort on the given fields. + * + * @param fieldNames the field names, which must contain at least one + * @return the sort specification + * @mongodb.driver.manual reference/operator/meta/orderby Sort + */ + public static Bson descending(final String... fieldNames) { + return descending(asList(fieldNames)); + } + + /** + * Create a sort specification for a descending sort on the given fields. + * + * @param fieldNames the field names, which must contain at least one + * @return the sort specification + * @mongodb.driver.manual reference/operator/meta/orderby Sort + */ + public static Bson descending(final List fieldNames) { + notNull("fieldNames", fieldNames); + return orderBy(fieldNames, new BsonInt32(-1)); + } + + /** + * Create a sort specification for the text score meta projection on the given field. + * + * @param fieldName the field name + * @return the sort specification + * @see Filters#text(String, TextSearchOptions) + * @mongodb.driver.manual reference/operator/aggregation/meta/#text-score-metadata--meta---textscore- textScore + */ + public static Bson metaTextScore(final String fieldName) { + return new BsonDocument(fieldName, new BsonDocument("$meta", new BsonString("textScore"))); + } + + /** + * Combine multiple sort specifications. If any field names are repeated, the last one takes precedence. + * + * @param sorts the sort specifications + * @return the combined sort specification + */ + public static Bson orderBy(final Bson... sorts) { + return orderBy(asList(sorts)); + } + + /** + * Combine multiple sort specifications. If any field names are repeated, the last one takes precedence. + * + * @param sorts the sort specifications + * @return the combined sort specification + */ + public static Bson orderBy(final List sorts) { + notNull("sorts", sorts); + return new CompoundSort(sorts); + } + + private static Bson orderBy(final List fieldNames, final BsonValue value) { + BsonDocument document = new BsonDocument(); + for (String fieldName : fieldNames) { + document.append(fieldName, value); + } + return document; + } + + private static final class CompoundSort implements Bson { + private final List sorts; + + private CompoundSort(final List sorts) { + this.sorts = sorts; + } + + @Override + public BsonDocument toBsonDocument(final Class documentClass, final CodecRegistry codecRegistry) { + BsonDocument combinedDocument = new BsonDocument(); + for (Bson sort : sorts) { + BsonDocument sortDocument = sort.toBsonDocument(documentClass, codecRegistry); + for (String key : sortDocument.keySet()) { + combinedDocument.append(key, sortDocument.get(key)); + } + } + return combinedDocument; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + CompoundSort that = (CompoundSort) o; + + return Objects.equals(sorts, that.sorts); + } + + @Override + public int hashCode() { + return sorts != null ? sorts.hashCode() : 0; + } + + @Override + public String toString() { + return "Compound Sort{" + + "sorts=" + sorts + + '}'; + } + } +} diff --git a/driver-core/src/main/com/mongodb/client/model/TextSearchOptions.java b/driver-core/src/main/com/mongodb/client/model/TextSearchOptions.java new file mode 100644 index 00000000000..4654666b4be --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/TextSearchOptions.java @@ -0,0 +1,138 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model; + +import com.mongodb.lang.Nullable; + +import java.util.Objects; + +/** + * Text search options for the {@link Filters#text(String, TextSearchOptions)} helper + * + * @mongodb.driver.manual reference/operator/query/text $text + * @since 3.2 + */ +public final class TextSearchOptions { + + private String language; + private Boolean caseSensitive; + private Boolean diacriticSensitive; + + /** + * Returns the language to be used with the text search + * + * @return the language to use for the text search if set or null + */ + @Nullable + public String getLanguage() { + return language; + } + + /** + * Set the language for the text search + * + * @param language the language to use for the text search + * @return this + */ + public TextSearchOptions language(@Nullable final String language) { + this.language = language; + return this; + } + + /** + * Returns the case-sensitive flag to use with the text search + * + * @return the case-sensitive flag if set or null + * @mongodb.server.release 3.2 + */ + @Nullable + public Boolean getCaseSensitive() { + return caseSensitive; + } + + /** + * Set the case-sensitive flag for the text search + * + * @param caseSensitive the case-sensitive flag for the text search + * @return this + * @mongodb.server.release 3.2 + */ + public TextSearchOptions caseSensitive(@Nullable final Boolean caseSensitive) { + this.caseSensitive = caseSensitive; + return this; + } + + /** + * Returns the diacritic-sensitive flag to use with the text search + * + * @return the diacritic-sensitive flag if set or null + * @mongodb.server.release 3.2 + */ + @Nullable + public Boolean getDiacriticSensitive() { + return diacriticSensitive; + } + + /** + * Set the diacritic-sensitive flag for the text search + * + * @param diacriticSensitive the diacritic-sensitive flag for the text search + * @return this + * @mongodb.server.release 3.2 + */ + public TextSearchOptions diacriticSensitive(@Nullable final Boolean diacriticSensitive) { + this.diacriticSensitive = diacriticSensitive; + return this; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + TextSearchOptions that = (TextSearchOptions) o; + + if (!Objects.equals(language, that.language)) { + return false; + } + if (!Objects.equals(caseSensitive, that.caseSensitive)) { + return false; + } + return Objects.equals(diacriticSensitive, that.diacriticSensitive); + } + + @Override + public int hashCode() { + int result = language != null ? language.hashCode() : 0; + result = 31 * result + (caseSensitive != null ? caseSensitive.hashCode() : 0); + result = 31 * result + (diacriticSensitive != null ? diacriticSensitive.hashCode() : 0); + return result; + } + + @Override + public String toString() { + return "Text Search Options{" + + "language='" + language + '\'' + + ", caseSensitive=" + caseSensitive + + ", diacriticSensitive=" + diacriticSensitive + + '}'; + } +} diff --git a/driver-core/src/main/com/mongodb/client/model/TimeSeriesGranularity.java b/driver-core/src/main/com/mongodb/client/model/TimeSeriesGranularity.java new file mode 100644 index 00000000000..266ef8489a0 --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/TimeSeriesGranularity.java @@ -0,0 +1,46 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model; + +/** + * An enumeration of time-series data granularity. + *

+ * It describes the units one would use to describe the expected interval between subsequent measurements for a time-series. + *

+ * @since 4.3 + * @see TimeSeriesOptions + * @see CreateCollectionOptions + */ +public enum TimeSeriesGranularity { + /** + * Seconds-level granularity. + *

+ * This is the default value. + *

+ */ + SECONDS, + + /** + * Minutes-level granularity. + */ + MINUTES, + + /** + * Hours-level granularity. + */ + HOURS +} diff --git a/driver-core/src/main/com/mongodb/client/model/TimeSeriesOptions.java b/driver-core/src/main/com/mongodb/client/model/TimeSeriesOptions.java new file mode 100644 index 00000000000..6844e13848a --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/TimeSeriesOptions.java @@ -0,0 +1,218 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model; + +import com.mongodb.lang.Nullable; + +import java.util.concurrent.TimeUnit; + +import static com.mongodb.assertions.Assertions.isTrue; +import static com.mongodb.assertions.Assertions.isTrueArgument; +import static com.mongodb.assertions.Assertions.notNull; + +/** + * Options related to the creation of time-series collections. + * + * @since 4.3 + * @see CreateCollectionOptions + * @mongodb.driver.manual core/timeseries-collections/ Time-series collections + */ +public final class TimeSeriesOptions { + private final String timeField; + private String metaField; + private TimeSeriesGranularity granularity; + private Long bucketMaxSpanSeconds; + private Long bucketRoundingSeconds; + + /** + * Construct a new instance. + * + * @param timeField the name of the top-level field to be used for time. Inserted documents must have this field, and the field must be + * of the BSON datetime type. + */ + public TimeSeriesOptions(final String timeField) { + this.timeField = notNull("timeField", timeField); + } + + /** + * Gets the name of the field holding the time value. + * + * @return the name of the field holding the time value. + */ + public String getTimeField() { + return timeField; + } + + /** + * Gets the name of the meta field. + * + * @return the name of the meta field + * @see #metaField(String) + */ + @Nullable + public String getMetaField() { + return metaField; + } + + /** + * Sets the name of the meta field. + *

+ * The name of the field which contains metadata in each time series document. The metadata in the specified field should be data + * that is used to label a unique series of documents. The metadata should rarely, if ever, change. This field is used to group + * related data and may be of any BSON type, except for array. This name may not be the same as the {@code timeField} or "_id". + *

+ * @param metaField the name of the meta field + * @return this + * @see #getMetaField() + */ + public TimeSeriesOptions metaField(@Nullable final String metaField) { + this.metaField = metaField; + return this; + } + + /** + * Gets the granularity of the time-series data. + * + * @return the time-series granularity + * @see #granularity(TimeSeriesGranularity) + */ + @Nullable + public TimeSeriesGranularity getGranularity() { + return granularity; + } + + /** + * Sets the granularity of the time-series data. + *

+ * The default value is {@link TimeSeriesGranularity#SECONDS} if neither {@link #bucketMaxSpan(Long, TimeUnit)} nor + * {@link #bucketRounding(Long, TimeUnit)} is set. If any of these bucketing options are set, the granularity parameter cannot be set. + *

+ * + * @param granularity the time-series granularity + * @return this + * @see #getGranularity() + */ + public TimeSeriesOptions granularity(@Nullable final TimeSeriesGranularity granularity) { + isTrue("granularity is not allowed when bucketMaxSpan is set", bucketMaxSpanSeconds == null); + isTrue("granularity is not allowed when bucketRounding is set", bucketRoundingSeconds == null); + this.granularity = granularity; + return this; + } + + /** + * Returns the maximum time span between measurements in a bucket. + * + * @param timeUnit the time unit. + * @return time span between measurements, or {@code null} if not set. + * @since 4.10 + * @mongodb.server.release 6.3 + * @see #bucketMaxSpan(Long, TimeUnit) + */ + @Nullable + public Long getBucketMaxSpan(final TimeUnit timeUnit) { + notNull("timeUnit", timeUnit); + if (bucketMaxSpanSeconds == null) { + return null; + } + return timeUnit.convert(bucketMaxSpanSeconds, TimeUnit.SECONDS); + } + + /** + * Sets the maximum time span between measurements in a bucket. + *

+ * The value of {@code bucketMaxSpan} must be the same as {@link #bucketRounding(Long, TimeUnit)}, which also means that the options + * must either be both set or both unset. If you set the {@code bucketMaxSpan} parameter, you can't set the granularity parameter. + *

+ * + * @param bucketMaxSpan time span between measurements. After conversion to seconds using {@link TimeUnit#convert(long, java.util.concurrent.TimeUnit)}, + * the value must be >= 1. {@code null} can be provided to unset any previously set value. + * @param timeUnit the time unit. + * @return this + * @since 4.10 + * @mongodb.server.release 6.3 + * @see #getBucketMaxSpan(TimeUnit) + */ + public TimeSeriesOptions bucketMaxSpan(@Nullable final Long bucketMaxSpan, final TimeUnit timeUnit) { + notNull("timeUnit", timeUnit); + if (bucketMaxSpan == null) { + this.bucketMaxSpanSeconds = null; + } else { + isTrue("bucketMaxSpan is not allowed when granularity is set", granularity == null); + long seconds = TimeUnit.SECONDS.convert(bucketMaxSpan, timeUnit); + isTrueArgument("bucketMaxSpan, after conversion to seconds, must be >= 1", seconds > 0); + this.bucketMaxSpanSeconds = seconds; + } + return this; + } + + /** + * Returns the time interval that determines the starting timestamp for a new bucket. + * + * @param timeUnit the time unit. + * @return the time interval, or {@code null} if not set. + * @since 4.10 + * @mongodb.server.release 6.3 + * @see #bucketRounding(Long, TimeUnit) + */ + @Nullable + public Long getBucketRounding(final TimeUnit timeUnit) { + notNull("timeUnit", timeUnit); + if (bucketRoundingSeconds == null) { + return null; + } + return timeUnit.convert(bucketRoundingSeconds, TimeUnit.SECONDS); + } + + /** + * Specifies the time interval that determines the starting timestamp for a new bucket. + *

+ * The value of {@code bucketRounding} must be the same as {@link #bucketMaxSpan(Long, TimeUnit)}, which also means that the options + * must either be both set or both unset. If you set the {@code bucketRounding} parameter, you can't set the granularity parameter. + *

+ * + * @param bucketRounding time interval. After conversion to seconds using {@link TimeUnit#convert(long, java.util.concurrent.TimeUnit)}, + * the value must be >= 1. {@code null} can be provided to unset any previously set value. + * @param timeUnit the time unit. + * @return this + * @since 4.10 + * @mongodb.server.release 6.3 + * @see #getBucketRounding(TimeUnit) + */ + public TimeSeriesOptions bucketRounding(@Nullable final Long bucketRounding, final TimeUnit timeUnit) { + notNull("timeUnit", timeUnit); + if (bucketRounding == null) { + this.bucketRoundingSeconds = null; + } else { + isTrue("bucketRounding is not allowed when granularity is set", granularity == null); + long seconds = TimeUnit.SECONDS.convert(bucketRounding, timeUnit); + isTrueArgument("bucketRounding, after conversion to seconds, must be >= 1", seconds > 0); + this.bucketRoundingSeconds = seconds; + } + return this; + } + + @Override + public String toString() { + return "TimeSeriesOptions{" + + "timeField='" + timeField + '\'' + + ", metaField='" + metaField + '\'' + + ", granularity=" + granularity + + ", bucketMaxSpanSeconds=" + bucketMaxSpanSeconds + + ", bucketRoundingSeconds=" + bucketRoundingSeconds + + '}'; + } +} diff --git a/driver-core/src/main/com/mongodb/client/model/UnwindOptions.java b/driver-core/src/main/com/mongodb/client/model/UnwindOptions.java new file mode 100644 index 00000000000..c15e62e4b09 --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/UnwindOptions.java @@ -0,0 +1,104 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model; + +import com.mongodb.lang.Nullable; + +import java.util.Objects; + +/** + * The options for an unwind aggregation pipeline stage + * + * @mongodb.driver.manual reference/operator/aggregation/unwind/ $unwind + * @mongodb.server.release 3.2 + * @since 3.2 + */ +public final class UnwindOptions { + + private Boolean preserveNullAndEmptyArrays; + private String includeArrayIndex; + + /** + * If true the unwind stage will include documents that have null values or empty arrays + * + * @return the preserve null values and empty arrays value or null + */ + @Nullable + public Boolean isPreserveNullAndEmptyArrays() { + return preserveNullAndEmptyArrays; + } + + /** + * Sets true if the unwind stage should include documents that have null values or empty arrays + * + * @param preserveNullAndEmptyArrays flag depicting if the unwind stage should include documents that have null values or empty arrays + * @return this + */ + public UnwindOptions preserveNullAndEmptyArrays(@Nullable final Boolean preserveNullAndEmptyArrays) { + this.preserveNullAndEmptyArrays = preserveNullAndEmptyArrays; + return this; + } + + /** + * Gets the includeArrayIndex field if set or null + * + * @return the includeArrayIndex field if set or null + */ + @Nullable + public String getIncludeArrayIndex() { + return includeArrayIndex; + } + + /** + * Sets the field to be used to store the array index of the unwound item + * + * @param arrayIndexFieldName the field to be used to store the array index of the unwound item + * @return this + */ + public UnwindOptions includeArrayIndex(@Nullable final String arrayIndexFieldName) { + this.includeArrayIndex = arrayIndexFieldName; + return this; + } + + @Override + public String toString() { + return "UnwindOptions{" + + "preserveNullAndEmptyArrays=" + preserveNullAndEmptyArrays + + ", includeArrayIndex='" + includeArrayIndex + '\'' + + '}'; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + UnwindOptions that = (UnwindOptions) o; + return Objects.equals(preserveNullAndEmptyArrays, that.preserveNullAndEmptyArrays) && Objects.equals(includeArrayIndex, that.includeArrayIndex); + } + + @Override + public int hashCode() { + int result = Objects.hashCode(preserveNullAndEmptyArrays); + result = 31 * result + Objects.hashCode(includeArrayIndex); + return result; + } +} diff --git a/driver-core/src/main/com/mongodb/client/model/UpdateManyModel.java b/driver-core/src/main/com/mongodb/client/model/UpdateManyModel.java new file mode 100644 index 00000000000..1c18a7f1645 --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/UpdateManyModel.java @@ -0,0 +1,146 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model; + +import com.mongodb.lang.Nullable; +import org.bson.conversions.Bson; + +import java.util.List; + +import static com.mongodb.assertions.Assertions.notNull; + +/** + * A model describing an update to all documents that matches the query filter. The update to apply must include only update + * operators. + * + * @param the type of document to update. In practice this doesn't actually apply to updates but is here for consistency with the + * other write models + * @since 3.0 + * @mongodb.driver.manual tutorial/modify-documents/ Updates + * @mongodb.driver.manual reference/operator/update/ Update Operators + */ +public final class UpdateManyModel extends WriteModel { + private final Bson filter; + private final Bson update; + private final List updatePipeline; + private final UpdateOptions options; + + /** + * Construct a new instance. + * + * @param filter a document describing the query filter, which may not be null. + * @param update a document describing the update, which may not be null. The update to apply must include only update + * operators. + */ + public UpdateManyModel(final Bson filter, final Bson update) { + this(filter, update, new UpdateOptions()); + } + + /** + * Construct a new instance. + * + * @param filter a document describing the query filter, which may not be null. + * @param update a document describing the update, which may not be null. The update to apply must include only update + * operators. + * @param options the options to apply + */ + public UpdateManyModel(final Bson filter, final Bson update, final UpdateOptions options) { + this.filter = notNull("filter", filter); + this.update = notNull("update", update); + this.updatePipeline = null; + this.options = notNull("options", options); + } + + /** + * Construct a new instance. + * + * @param filter a document describing the query filter, which may not be null. + * @param update a pipeline describing the update, which may not be null. + * @since 3.11 + * @mongodb.server.release 4.2 + */ + public UpdateManyModel(final Bson filter, final List update) { + this(filter, update, new UpdateOptions()); + } + + /** + * Construct a new instance. + * + * @param filter a document describing the query filter, which may not be null. + * @param update a pipeline describing the update, which may not be null. + * @param options the options to apply + * @since 3.11 + * @mongodb.server.release 4.2 + */ + public UpdateManyModel(final Bson filter, final List update, final UpdateOptions options) { + this.filter = notNull("filter", filter); + this.update = null; + this.updatePipeline = update; + this.options = notNull("options", options); + } + + /** + * Gets the query filter. + * + * @return the query filter + */ + public Bson getFilter() { + return filter; + } + + /** + * Gets the document specifying the updates to apply to the matching document. The update to apply must include only update + * operators. + * + * @return the document specifying the updates to apply + */ + @Nullable + public Bson getUpdate() { + return update; + } + + /** + * Gets the pipeline specifying the updates to apply to the matching document. The update to apply must include only update + * operators. + * + * @return the pipeline specifying the updates to apply + * @since 3.11 + * @mongodb.server.release 4.2 + */ + @Nullable + public List getUpdatePipeline() { + return updatePipeline; + } + + /** + * Gets the options to apply. + * + * @return the options + */ + public UpdateOptions getOptions() { + return options; + } + + @Override + public String toString() { + return "UpdateManyModel{" + + "filter=" + filter + + ", update=" + (update != null ? update : updatePipeline) + + ", options=" + options + + '}'; + } +} diff --git a/driver-core/src/main/com/mongodb/client/model/UpdateOneModel.java b/driver-core/src/main/com/mongodb/client/model/UpdateOneModel.java new file mode 100644 index 00000000000..43d1e75616c --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/UpdateOneModel.java @@ -0,0 +1,142 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model; + +import com.mongodb.lang.Nullable; +import org.bson.conversions.Bson; + +import java.util.List; + +import static com.mongodb.assertions.Assertions.notNull; + +/** + * A model describing an update to at most one document that matches the query filter. The update to apply must include only update + * operators. + * + * @param the type of document to update. In practice this doesn't actually apply to updates but is here for consistency with the other + * write models + * @mongodb.driver.manual tutorial/modify-documents/ Updates + * @mongodb.driver.manual reference/operator/update/ Update Operators + * @since 3.0 + */ +public final class UpdateOneModel extends WriteModel { + private final Bson filter; + private final Bson update; + private final List updatePipeline; + private final UpdateOptions options; + + /** + * Construct a new instance. + * + * @param filter a document describing the query filter, which may not be null. + * @param update a document describing the update, which may not be null. The update to apply must include only update operators. + */ + public UpdateOneModel(final Bson filter, final Bson update) { + this(filter, update, new UpdateOptions()); + } + + /** + * Construct a new instance. + * + * @param filter a document describing the query filter, which may not be null. + * @param update a document describing the update, which may not be null. The update to apply must include only update operators. + * @param options the options to apply + */ + public UpdateOneModel(final Bson filter, final Bson update, final UpdateOptions options) { + this.filter = notNull("filter", filter); + this.update = notNull("update", update); + this.updatePipeline = null; + this.options = notNull("options", options); + } + + /** + * Construct a new instance. + * + * @param filter a document describing the query filter, which may not be null. + * @param update a pipeline describing the update, which may not be null. + * @since 3.11 + * @mongodb.server.release 4.2 + */ + public UpdateOneModel(final Bson filter, final List update) { + this(filter, update, new UpdateOptions()); + } + + /** + * Construct a new instance. + * + * @param filter a document describing the query filter, which may not be null. + * @param update a pipeline describing the update, which may not be null. + * @param options the options to apply + * @since 3.11 + * @mongodb.server.release 4.2 + */ + public UpdateOneModel(final Bson filter, final List update, final UpdateOptions options) { + this.filter = notNull("filter", filter); + this.update = null; + this.updatePipeline = update; + this.options = notNull("options", options); + } + + /** + * Gets the query filter. + * + * @return the query filter + */ + public Bson getFilter() { + return filter; + } + + /** + * Gets the document specifying the updates to apply to the matching document. The update to apply must include only update operators. + * + * @return the document specifying the updates to apply + */ + @Nullable + public Bson getUpdate() { + return update; + } + + /** + * Gets the pipeline specifying the updates to apply to the matching document. The update to apply must include only update operators. + * + * @return the pipeline specifying the updates to apply + * @since 3.11 + * @mongodb.server.release 4.2 + */ + @Nullable + public List getUpdatePipeline() { + return updatePipeline; + } + + /** + * Gets the options to apply. + * + * @return the options + */ + public UpdateOptions getOptions() { + return options; + } + + @Override + public String toString() { + return "UpdateOneModel{" + + "filter=" + filter + + ", update=" + (update != null ? update : updatePipeline) + + ", options=" + options + + '}'; + } +} diff --git a/driver-core/src/main/com/mongodb/client/model/UpdateOptions.java b/driver-core/src/main/com/mongodb/client/model/UpdateOptions.java new file mode 100644 index 00000000000..88eb3cb6acb --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/UpdateOptions.java @@ -0,0 +1,311 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model; + +import com.mongodb.lang.Nullable; +import org.bson.BsonString; +import org.bson.BsonValue; +import org.bson.conversions.Bson; + +import java.util.List; + +/** + * The options to apply when updating documents. + * + * @since 3.0 + * @mongodb.driver.manual tutorial/modify-documents/ Updates + * @mongodb.driver.manual reference/operator/update/ Update Operators + * @mongodb.driver.manual reference/command/update/ Update Command + */ +public class UpdateOptions { + private boolean upsert; + private Boolean bypassDocumentValidation; + private Collation collation; + private List arrayFilters; + private Bson hint; + private String hintString; + private BsonValue comment; + private Bson variables; + private Bson sort; + + /** + * Returns true if a new document should be inserted if there are no matches to the query filter. The default is false. + * + * @return true if a new document should be inserted if there are no matches to the query filter + */ + public boolean isUpsert() { + return upsert; + } + + /** + * Set to true if a new document should be inserted if there are no matches to the query filter. + * + * @param upsert true if a new document should be inserted if there are no matches to the query filter + * @return this + */ + public UpdateOptions upsert(final boolean upsert) { + this.upsert = upsert; + return this; + } + + /** + * Gets the bypass document level validation flag + * + * @return the bypass document level validation flag + * @since 3.2 + * @mongodb.server.release 3.2 + */ + @Nullable + public Boolean getBypassDocumentValidation() { + return bypassDocumentValidation; + } + + /** + * Sets the bypass document level validation flag. + * + *

For bulk operations use: {@link BulkWriteOptions#bypassDocumentValidation(Boolean)}

+ * + * @param bypassDocumentValidation If true, allows the write to opt-out of document level validation. + * @return this + * @since 3.2 + * @mongodb.server.release 3.2 + */ + public UpdateOptions bypassDocumentValidation(@Nullable final Boolean bypassDocumentValidation) { + this.bypassDocumentValidation = bypassDocumentValidation; + return this; + } + + /** + * Returns the collation options + * + * @return the collation options + * @since 3.4 + * @mongodb.server.release 3.4 + */ + @Nullable + public Collation getCollation() { + return collation; + } + + /** + * Sets the collation options + * + *

A null value represents the server default.

+ * @param collation the collation options to use + * @return this + * @since 3.4 + * @mongodb.server.release 3.4 + */ + public UpdateOptions collation(@Nullable final Collation collation) { + this.collation = collation; + return this; + } + + /** + * Sets the array filters option + * + * @param arrayFilters the array filters, which may be null + * @return this + * @since 3.6 + * @mongodb.server.release 3.6 + */ + public UpdateOptions arrayFilters(@Nullable final List arrayFilters) { + this.arrayFilters = arrayFilters; + return this; + } + + /** + * Returns the array filters option + * + * @return the array filters, which may be null + * @since 3.6 + * @mongodb.server.release 3.6 + */ + @Nullable + public List getArrayFilters() { + return arrayFilters; + } + + /** + * Returns the hint for which index to use. The default is not to set a hint. + * + * @return the hint + * @since 4.1 + */ + @Nullable + public Bson getHint() { + return hint; + } + + /** + * Sets the hint for which index to use. A null value means no hint is set. + * + * @param hint the hint + * @return this + * @since 4.1 + */ + public UpdateOptions hint(@Nullable final Bson hint) { + this.hint = hint; + return this; + } + + /** + * Gets the hint string to apply. + * + * @return the hint string, which should be the name of an existing index + * @since 4.1 + */ + @Nullable + public String getHintString() { + return hintString; + } + + /** + * Sets the hint to apply. + * + * @param hint the name of the index which should be used for the operation + * @return this + * @since 4.1 + */ + public UpdateOptions hintString(@Nullable final String hint) { + this.hintString = hint; + return this; + } + + + /** + * @return the comment for this operation. A null value means no comment is set. + * @since 4.6 + * @mongodb.server.release 4.4 + */ + @Nullable + public BsonValue getComment() { + return comment; + } + + /** + * Sets the comment for this operation. A null value means no comment is set. + * + *

For bulk operations use: {@link BulkWriteOptions#comment(String)}

+ * + * @param comment the comment + * @return this + * @since 4.6 + * @mongodb.server.release 4.4 + */ + public UpdateOptions comment(@Nullable final String comment) { + this.comment = comment != null ? new BsonString(comment) : null; + return this; + } + + /** + * Sets the comment for this operation. A null value means no comment is set. + * + *

For bulk operations use: {@link BulkWriteOptions#comment(BsonValue)}

+ * + * @param comment the comment + * @return this + * @since 4.6 + * @mongodb.server.release 4.4 + */ + public UpdateOptions comment(@Nullable final BsonValue comment) { + this.comment = comment; + return this; + } + + /** + * Add top-level variables to the operation + * + *

The value of let will be passed to all update and delete, but not insert, commands. + * + * @return the top level variables if set or null. + * @mongodb.server.release 5.0 + * @since 4.6 + */ + @Nullable + public Bson getLet() { + return variables; + } + + /** + * Add top-level variables for the operation + * + *

Allows for improved command readability by separating the variables from the query text. + *

For bulk operations use: {@link BulkWriteOptions#let(Bson)} + * + * @param variables for the operation or null + * @return this + * @mongodb.server.release 5.0 + * @since 4.6 + */ + public UpdateOptions let(final Bson variables) { + this.variables = variables; + return this; + } + + /** + * Gets the sort criteria to apply to the operation. + * + *

+ * The sort criteria determines which document the operation updates if the query matches multiple documents. + * The first document matched by the sort criteria will be updated. + * The default is null, which means no specific sort criteria is applied. + * + * @return a document describing the sort criteria, or null if no sort is specified. + * @mongodb.driver.manual reference/method/db.collection.updateOne/ Sort + * @mongodb.server.release 8.0 + * @since 5.3 + * @see #sort(Bson) + */ + @Nullable + public Bson getSort() { + return sort; + } + + /** + * Sets the sort criteria to apply to the operation. A null value means no sort criteria is set. + * + *

+ * The sort criteria determines which document the operation updates if the query matches multiple documents. + * The first document matched by the specified sort criteria will be updated. + * + * @param sort the sort criteria, which may be null. + * @return this + * @mongodb.driver.manual reference/method/db.collection.updateOne/ Sort + * @mongodb.server.release 8.0 + * @since 5.3 + */ + public UpdateOptions sort(@Nullable final Bson sort) { + this.sort = sort; + return this; + } + + @Override + public String toString() { + return "UpdateOptions{" + + "upsert=" + upsert + + ", bypassDocumentValidation=" + bypassDocumentValidation + + ", collation=" + collation + + ", arrayFilters=" + arrayFilters + + ", hint=" + hint + + ", hintString=" + hintString + + ", comment=" + comment + + ", let=" + variables + + ", sort=" + sort + + '}'; + } +} diff --git a/driver-core/src/main/com/mongodb/client/model/Updates.java b/driver-core/src/main/com/mongodb/client/model/Updates.java new file mode 100644 index 00000000000..64b403cb6d9 --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/Updates.java @@ -0,0 +1,843 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model; + +import com.mongodb.lang.Nullable; +import org.bson.BsonDocument; +import org.bson.BsonDocumentWriter; +import org.bson.BsonInt32; +import org.bson.BsonInt64; +import org.bson.BsonString; +import org.bson.BsonValue; +import org.bson.codecs.configuration.CodecRegistry; +import org.bson.conversions.Bson; + +import java.util.List; +import java.util.Map; +import java.util.Objects; + +import static com.mongodb.assertions.Assertions.notNull; +import static com.mongodb.client.model.BuildersHelper.encodeValue; +import static java.util.Arrays.asList; + +/** + * A factory for document updates. A convenient way to use this class is to statically import all of its methods, which allows usage like: + *

+ *    collection.updateOne(eq("x", 1), set("x", 2));
+ * 
+ * + * @since 3.1 + * @mongodb.driver.manual reference/operator/update/ Update Operators + */ +public final class Updates { + + /** + * Combine a list of updates into a single update. + * + * @param updates the list of updates + * @return a combined update + */ + public static Bson combine(final Bson... updates) { + return combine(asList(updates)); + } + + /** + * Combine a list of updates into a single update. + * + * @param updates the list of updates + * @return a combined update + */ + public static Bson combine(final List updates) { + notNull("updates", updates); + return new CompositeUpdate(updates); + } + + /** + * Creates an update that sets the values for the document. + * + * @param value the value + * @return the update + * @mongodb.driver.manual reference/operator/update/set/ $set + */ + public static Bson set(final Bson value) { + return new SimpleBsonKeyValue("$set", value); + } + + /** + * Creates an update that sets the value of the field with the given name to the given value. + * + * @param fieldName the non-null field name + * @param value the value, which may be null + * @param the value type + * @return the update + * @mongodb.driver.manual reference/operator/update/set/ $set + */ + public static Bson set(final String fieldName, @Nullable final TItem value) { + return new SimpleUpdate<>(fieldName, value, "$set"); + } + + /** + * Creates an update that deletes the field with the given name. + * + * @param fieldName the non-null field name + * @return the update + * @mongodb.driver.manual reference/operator/update/unset/ $unset + */ + public static Bson unset(final String fieldName) { + return new SimpleUpdate<>(fieldName, "", "$unset"); + } + + /** + * Creates an update that sets the values for the document, but only if the update is an upsert that results in an insert of a document. + * + * @param value the value + * @return the update + * @mongodb.driver.manual reference/operator/update/setOnInsert/ $setOnInsert + * @since 3.10.0 + * @see UpdateOptions#upsert(boolean) + */ + public static Bson setOnInsert(final Bson value) { + return new SimpleBsonKeyValue("$setOnInsert", value); + } + + /** + * Creates an update that sets the value of the field with the given name to the given value, but only if the update is an upsert that + * results in an insert of a document. + * + * @param fieldName the non-null field name + * @param value the value, which may be null + * @param the value type + * @return the update + * @mongodb.driver.manual reference/operator/update/setOnInsert/ $setOnInsert + * @see UpdateOptions#upsert(boolean) + */ + public static Bson setOnInsert(final String fieldName, @Nullable final TItem value) { + return new SimpleUpdate<>(fieldName, value, "$setOnInsert"); + } + + /** + * Creates an update that renames a field. + * + * @param fieldName the non-null field name + * @param newFieldName the non-null new field name + * @return the update + * @mongodb.driver.manual reference/operator/update/rename/ $rename + */ + public static Bson rename(final String fieldName, final String newFieldName) { + notNull("newFieldName", newFieldName); + return new SimpleUpdate<>(fieldName, newFieldName, "$rename"); + } + + /** + * Creates an update that increments the value of the field with the given name by the given value. + * + * @param fieldName the non-null field name + * @param number the value + * @return the update + * @mongodb.driver.manual reference/operator/update/inc/ $inc + */ + public static Bson inc(final String fieldName, final Number number) { + notNull("number", number); + return new SimpleUpdate<>(fieldName, number, "$inc"); + } + + /** + * Creates an update that multiplies the value of the field with the given name by the given number. + * + * @param fieldName the non-null field name + * @param number the non-null number + * @return the update + * @mongodb.driver.manual reference/operator/update/mul/ $mul + */ + public static Bson mul(final String fieldName, final Number number) { + notNull("number", number); + return new SimpleUpdate<>(fieldName, number, "$mul"); + } + + + /** + * Creates an update that sets the value of the field to the given value if the given value is less than the current value of the + * field. + * + * @param fieldName the non-null field name + * @param value the value + * @param the value type + * @return the update + * @mongodb.driver.manual reference/operator/update/min/ $min + */ + public static Bson min(final String fieldName, final TItem value) { + return new SimpleUpdate<>(fieldName, value, "$min"); + } + + /** + * Creates an update that sets the value of the field to the given value if the given value is greater than the current value of the + * field. + * + * @param fieldName the non-null field name + * @param value the value + * @param the value type + * @return the update + * @mongodb.driver.manual reference/operator/update/min/ $min + */ + public static Bson max(final String fieldName, final TItem value) { + return new SimpleUpdate<>(fieldName, value, "$max"); + } + + /** + * Creates an update that sets the value of the field to the current date as a BSON date. + * + * @param fieldName the non-null field name + * @return the update + * @mongodb.driver.manual reference/operator/update/currentDate/ $currentDate + * @mongodb.driver.manual reference/bson-types/#date Date + */ + public static Bson currentDate(final String fieldName) { + return new SimpleUpdate<>(fieldName, true, "$currentDate"); + } + + /** + * Creates an update that sets the value of the field to the current date as a BSON timestamp. + * + * @param fieldName the non-null field name + * @return the update + * @mongodb.driver.manual reference/operator/update/currentDate/ $currentDate + * @mongodb.driver.manual reference/bson-types/#document-bson-type-timestamp Timestamp + */ + public static Bson currentTimestamp(final String fieldName) { + return new SimpleUpdate<>(fieldName, new BsonDocument("$type", new BsonString("timestamp")), "$currentDate"); + } + + /** + * Creates an update that adds the given value to the array value of the field with the given name, unless the value is + * already present, in which case it does nothing + * + * @param fieldName the non-null field name + * @param value the value, which may be null + * @param the value type + * @return the update + * @mongodb.driver.manual reference/operator/update/addToSet/ $addToSet + */ + public static Bson addToSet(final String fieldName, @Nullable final TItem value) { + return new SimpleUpdate<>(fieldName, value, "$addToSet"); + } + + /** + * Creates an update that adds each of the given values to the array value of the field with the given name, unless the value is + * already present, in which case it does nothing + * + * @param fieldName the non-null field name + * @param values the values + * @param the value type + * @return the update + * @mongodb.driver.manual reference/operator/update/addToSet/ $addToSet + */ + public static Bson addEachToSet(final String fieldName, final List values) { + return new WithEachUpdate<>(fieldName, values, "$addToSet"); + } + + /** + * Creates an update that adds the given value to the array value of the field with the given name. + * + * @param fieldName the non-null field name + * @param value the value, which may be null + * @param the value type + * @return the update + * @mongodb.driver.manual reference/operator/update/push/ $push + */ + public static Bson push(final String fieldName, @Nullable final TItem value) { + return new SimpleUpdate<>(fieldName, value, "$push"); + } + + /** + * Creates an update that adds each of the given values to the array value of the field with the given name. + * + * @param fieldName the non-null field name + * @param values the values + * @param the value type + * @return the update + * @mongodb.driver.manual reference/operator/update/push/ $push + */ + public static Bson pushEach(final String fieldName, final List values) { + return new PushUpdate<>(fieldName, values, new PushOptions()); + } + + /** + * Creates an update that adds each of the given values to the array value of the field with the given name, applying the given + * options for positioning the pushed values, and then slicing and/or sorting the array. + * + * @param fieldName the non-null field name + * @param values the values + * @param options the non-null push options + * @param the value type + * @return the update + * @mongodb.driver.manual reference/operator/update/push/ $push + */ + public static Bson pushEach(final String fieldName, final List values, final PushOptions options) { + return new PushUpdate<>(fieldName, values, options); + } + + /** + * Creates an update that removes all instances of the given value from the array value of the field with the given name. + * + * @param fieldName the non-null field name + * @param value the value, which may be null + * @param the value type + * @return the update + * @mongodb.driver.manual reference/operator/update/pull/ $pull + */ + public static Bson pull(final String fieldName, @Nullable final TItem value) { + return new SimpleUpdate<>(fieldName, value, "$pull"); + } + + /** + * Creates an update that removes from an array all elements that match the given filter. + * + * @param filter the query filter + * @return the update + * @mongodb.driver.manual reference/operator/update/pull/ $pull + */ + public static Bson pullByFilter(final Bson filter) { + return new Bson() { + @Override + public BsonDocument toBsonDocument(final Class tDocumentClass, final CodecRegistry codecRegistry) { + BsonDocumentWriter writer = new BsonDocumentWriter(new BsonDocument()); + + writer.writeStartDocument(); + writer.writeName("$pull"); + + encodeValue(writer, filter, codecRegistry); + + writer.writeEndDocument(); + + return writer.getDocument(); + } + }; + } + + /** + * Creates an update that removes all instances of the given values from the array value of the field with the given name. + * + * @param fieldName the non-null field name + * @param values the values + * @param the value type + * @return the update + * @mongodb.driver.manual reference/operator/update/pull/ $pull + */ + public static Bson pullAll(final String fieldName, final List values) { + return new PullAllUpdate<>(fieldName, values); + } + + /** + * Creates an update that pops the first element of an array that is the value of the field with the given name. + * + * @param fieldName the non-null field name + * @return the update + * @mongodb.driver.manual reference/operator/update/pop/ $pop + */ + public static Bson popFirst(final String fieldName) { + return new SimpleUpdate<>(fieldName, -1, "$pop"); + } + + /** + * Creates an update that pops the last element of an array that is the value of the field with the given name. + * + * @param fieldName the non-null field name + * @return the update + * @mongodb.driver.manual reference/operator/update/pop/ $pop + */ + public static Bson popLast(final String fieldName) { + return new SimpleUpdate<>(fieldName, 1, "$pop"); + } + + /** + * Creates an update that performs a bitwise and between the given integer value and the integral value of the field with the given + * name. + * + * @param fieldName the field name + * @param value the value + * @return the update + */ + public static Bson bitwiseAnd(final String fieldName, final int value) { + return createBitUpdateDocument(fieldName, "and", value); + } + + /** + * Creates an update that performs a bitwise and between the given long value and the integral value of the field with the given name. + * + * @param fieldName the field name + * @param value the value + * @return the update + * @mongodb.driver.manual reference/operator/update/bit/ $bit + */ + public static Bson bitwiseAnd(final String fieldName, final long value) { + return createBitUpdateDocument(fieldName, "and", value); + } + + /** + * Creates an update that performs a bitwise or between the given integer value and the integral value of the field with the given + * name. + * + * @param fieldName the field name + * @param value the value + * @return the update + * @mongodb.driver.manual reference/operator/update/bit/ $bit + */ + public static Bson bitwiseOr(final String fieldName, final int value) { + return createBitUpdateDocument(fieldName, "or", value); + } + + /** + * Creates an update that performs a bitwise or between the given long value and the integral value of the field with the given name. + * + * @param fieldName the field name + * @param value the value + * @return the update + * @mongodb.driver.manual reference/operator/update/bit/ $bit + */ + public static Bson bitwiseOr(final String fieldName, final long value) { + return createBitUpdateDocument(fieldName, "or", value); + } + + /** + * Creates an update that performs a bitwise xor between the given integer value and the integral value of the field with the given + * name. + * + * @param fieldName the field name + * @param value the value + * @return the update + */ + public static Bson bitwiseXor(final String fieldName, final int value) { + return createBitUpdateDocument(fieldName, "xor", value); + } + + /** + * Creates an update that performs a bitwise xor between the given long value and the integral value of the field with the given name. + * + * @param fieldName the field name + * @param value the value + * @return the update + */ + public static Bson bitwiseXor(final String fieldName, final long value) { + return createBitUpdateDocument(fieldName, "xor", value); + } + + private static Bson createBitUpdateDocument(final String fieldName, final String bitwiseOperator, final int value) { + return createBitUpdateDocument(fieldName, bitwiseOperator, new BsonInt32(value)); + } + + private static Bson createBitUpdateDocument(final String fieldName, final String bitwiseOperator, final long value) { + return createBitUpdateDocument(fieldName, bitwiseOperator, new BsonInt64(value)); + } + + private static Bson createBitUpdateDocument(final String fieldName, final String bitwiseOperator, final BsonValue value) { + return new BsonDocument("$bit", new BsonDocument(fieldName, new BsonDocument(bitwiseOperator, value))); + } + + private static class SimpleBsonKeyValue implements Bson { + private final String fieldName; + private final Bson value; + + SimpleBsonKeyValue(final String fieldName, final Bson value) { + this.fieldName = notNull("fieldName", fieldName); + this.value = notNull("value", value); + } + + @Override + public BsonDocument toBsonDocument(final Class tDocumentClass, final CodecRegistry codecRegistry) { + BsonDocumentWriter writer = new BsonDocumentWriter(new BsonDocument()); + writer.writeStartDocument(); + writer.writeName(fieldName); + encodeValue(writer, value, codecRegistry); + writer.writeEndDocument(); + + return writer.getDocument(); + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + SimpleBsonKeyValue that = (SimpleBsonKeyValue) o; + + if (!fieldName.equals(that.fieldName)) { + return false; + } + return value.equals(that.value); + } + + @Override + public int hashCode() { + int result = fieldName.hashCode(); + result = 31 * result + value.hashCode(); + return result; + } + + @Override + public String toString() { + return "SimpleBsonKeyValue{" + + "fieldName='" + fieldName + '\'' + + ", value=" + value + + '}'; + } + } + + private static class SimpleUpdate implements Bson { + private final String fieldName; + private final TItem value; + private final String operator; + + SimpleUpdate(final String fieldName, final TItem value, final String operator) { + this.fieldName = notNull("fieldName", fieldName); + this.value = value; + this.operator = operator; + } + + @Override + public BsonDocument toBsonDocument(final Class tDocumentClass, final CodecRegistry codecRegistry) { + BsonDocumentWriter writer = new BsonDocumentWriter(new BsonDocument()); + + writer.writeStartDocument(); + writer.writeName(operator); + + writer.writeStartDocument(); + writer.writeName(fieldName); + encodeValue(writer, value, codecRegistry); + writer.writeEndDocument(); + + writer.writeEndDocument(); + + return writer.getDocument(); + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + SimpleUpdate that = (SimpleUpdate) o; + + if (!fieldName.equals(that.fieldName)) { + return false; + } + if (!Objects.equals(value, that.value)) { + return false; + } + return Objects.equals(operator, that.operator); + } + + @Override + public int hashCode() { + int result = fieldName.hashCode(); + result = 31 * result + (value != null ? value.hashCode() : 0); + result = 31 * result + (operator != null ? operator.hashCode() : 0); + return result; + } + + @Override + public String toString() { + return "Update{" + + "fieldName='" + fieldName + '\'' + + ", operator='" + operator + '\'' + + ", value=" + value + + '}'; + } + } + + private static class WithEachUpdate implements Bson { + private final String fieldName; + private final List values; + private final String operator; + + WithEachUpdate(final String fieldName, final List values, final String operator) { + this.fieldName = notNull("fieldName", fieldName); + this.values = notNull("values", values); + this.operator = operator; + } + + @Override + public BsonDocument toBsonDocument(final Class tDocumentClass, final CodecRegistry codecRegistry) { + BsonDocumentWriter writer = new BsonDocumentWriter(new BsonDocument()); + + writer.writeStartDocument(); + writer.writeName(operator); + + writer.writeStartDocument(); + writer.writeName(fieldName); + writer.writeStartDocument(); + + writer.writeStartArray("$each"); + for (TItem value : values) { + encodeValue(writer, value, codecRegistry); + } + writer.writeEndArray(); + + writeAdditionalFields(writer, tDocumentClass, codecRegistry); + + writer.writeEndDocument(); + + writer.writeEndDocument(); + + writer.writeEndDocument(); + + return writer.getDocument(); + } + + protected void writeAdditionalFields(final BsonDocumentWriter writer, final Class tDocumentClass, + final CodecRegistry codecRegistry) { + } + + + protected String additionalFieldsToString() { + return ""; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + WithEachUpdate that = (WithEachUpdate) o; + + if (!fieldName.equals(that.fieldName)) { + return false; + } + if (!values.equals(that.values)) { + return false; + } + return Objects.equals(operator, that.operator); + } + + @Override + public int hashCode() { + int result = fieldName.hashCode(); + result = 31 * result + values.hashCode(); + result = 31 * result + (operator != null ? operator.hashCode() : 0); + return result; + } + + @Override + public String toString() { + return "Each Update{" + + "fieldName='" + fieldName + '\'' + + ", operator='" + operator + '\'' + + ", values=" + values + + additionalFieldsToString() + + '}'; + } + } + + private static class PushUpdate extends WithEachUpdate { + + private final PushOptions options; + + PushUpdate(final String fieldName, final List values, final PushOptions options) { + super(fieldName, values, "$push"); + this.options = notNull("options", options); + } + + @Override + protected void writeAdditionalFields(final BsonDocumentWriter writer, final Class tDocumentClass, + final CodecRegistry codecRegistry) { + Integer position = options.getPosition(); + if (position != null) { + writer.writeInt32("$position", position); + } + Integer slice = options.getSlice(); + if (slice != null) { + writer.writeInt32("$slice", slice); + } + Integer sort = options.getSort(); + if (sort != null) { + writer.writeInt32("$sort", sort); + } else { + Bson sortDocument = options.getSortDocument(); + if (sortDocument != null) { + writer.writeName("$sort"); + encodeValue(writer, sortDocument, codecRegistry); + } + } + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + if (!super.equals(o)) { + return false; + } + + PushUpdate that = (PushUpdate) o; + + return options.equals(that.options); + } + + @Override + public int hashCode() { + int result = super.hashCode(); + result = 31 * result + options.hashCode(); + return result; + } + + @Override + protected String additionalFieldsToString() { + return ", options=" + options; + } + } + + private static class PullAllUpdate implements Bson { + private final String fieldName; + private final List values; + + PullAllUpdate(final String fieldName, final List values) { + this.fieldName = notNull("fieldName", fieldName); + this.values = notNull("values", values); + } + + @Override + public BsonDocument toBsonDocument(final Class tDocumentClass, final CodecRegistry codecRegistry) { + BsonDocumentWriter writer = new BsonDocumentWriter(new BsonDocument()); + + writer.writeStartDocument(); + writer.writeName("$pullAll"); + + writer.writeStartDocument(); + writer.writeName(fieldName); + + writer.writeStartArray(); + for (TItem value : values) { + encodeValue(writer, value, codecRegistry); + } + writer.writeEndArray(); + + writer.writeEndDocument(); + + writer.writeEndDocument(); + + return writer.getDocument(); + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + PullAllUpdate that = (PullAllUpdate) o; + + if (!fieldName.equals(that.fieldName)) { + return false; + } + return values.equals(that.values); + } + + @Override + public int hashCode() { + int result = fieldName.hashCode(); + result = 31 * result + values.hashCode(); + return result; + } + + @Override + public String toString() { + return "Update{" + + "fieldName='" + fieldName + '\'' + + ", operator='$pullAll'" + + ", value=" + values + + '}'; + } + } + + private static class CompositeUpdate implements Bson { + private final List updates; + + CompositeUpdate(final List updates) { + this.updates = updates; + } + + @Override + public BsonDocument toBsonDocument(final Class tDocumentClass, final CodecRegistry codecRegistry) { + BsonDocument document = new BsonDocument(); + + for (Bson update : updates) { + BsonDocument rendered = update.toBsonDocument(tDocumentClass, codecRegistry); + for (Map.Entry element : rendered.entrySet()) { + if (document.containsKey(element.getKey())) { + BsonDocument currentOperatorDocument = (BsonDocument) element.getValue(); + BsonDocument existingOperatorDocument = document.getDocument(element.getKey()); + for (Map.Entry currentOperationDocumentElements : currentOperatorDocument.entrySet()) { + existingOperatorDocument.append(currentOperationDocumentElements.getKey(), + currentOperationDocumentElements.getValue()); + } + } else { + document.append(element.getKey(), element.getValue()); + } + } + } + + return document; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + CompositeUpdate that = (CompositeUpdate) o; + + return Objects.equals(updates, that.updates); + } + + @Override + public int hashCode() { + return updates != null ? updates.hashCode() : 0; + } + + @Override + public String toString() { + return "Updates{" + + "updates=" + updates + + '}'; + } + } + + private Updates() { + } +} diff --git a/driver-core/src/main/com/mongodb/client/model/ValidationAction.java b/driver-core/src/main/com/mongodb/client/model/ValidationAction.java new file mode 100644 index 00000000000..a6749adfaf4 --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/ValidationAction.java @@ -0,0 +1,70 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model; + +import static com.mongodb.assertions.Assertions.notNull; +import static java.lang.String.format; + +/** + * Determines whether to error on invalid documents or just warn about the violations but allow invalid documents. + * + * @since 3.2 + * @mongodb.server.release 3.2 + * @mongodb.driver.manual reference/method/db.createCollection/ Create Collection + */ +public enum ValidationAction { + + /** + * Documents must pass validation before the write occurs. Otherwise, the write operation fails. + */ + ERROR("error"), + + /** + * Documents do not have to pass validation. If the document fails validation, the write operation logs the validation failure to + * the mongod logs. + */ + WARN("warn"); + + + private final String value; + ValidationAction(final String value) { + this.value = value; + } + + /** + * @return the String representation of the validation level that the MongoDB server understands + */ + public String getValue() { + return value; + } + + /** + * Returns the validationAction from the string representation of a validation action. + * + * @param validationAction the string representation of the validation action. + * @return the validation action + */ + public static ValidationAction fromString(final String validationAction) { + notNull("validationAction", validationAction); + for (ValidationAction action : ValidationAction.values()) { + if (validationAction.equalsIgnoreCase(action.value)) { + return action; + } + } + throw new IllegalArgumentException(format("'%s' is not a valid validationAction", validationAction)); + } +} diff --git a/driver-core/src/main/com/mongodb/client/model/ValidationLevel.java b/driver-core/src/main/com/mongodb/client/model/ValidationLevel.java new file mode 100644 index 00000000000..4ad4fdd5bf1 --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/ValidationLevel.java @@ -0,0 +1,75 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model; + +import static com.mongodb.assertions.Assertions.notNull; +import static java.lang.String.format; + +/** + * Determines how strictly MongoDB applies the validation rules to existing documents during an insert or update. + * + * @since 3.2 + * @mongodb.server.release 3.2 + * @mongodb.driver.manual reference/method/db.createCollection/ Create Collection + */ +public enum ValidationLevel { + + /** + * No validation for inserts or updates. + */ + OFF("off"), + + /** + * Apply validation rules to all inserts and all updates. + */ + STRICT("strict"), + + /** + * Applies validation rules to inserts and to updates on existing valid documents. + * + *

Does not apply rules to updates on existing invalid documents.

+ */ + MODERATE("moderate"); + + private final String value; + ValidationLevel(final String value) { + this.value = value; + } + + /** + * @return the String representation of the validation level that the MongoDB server understands + */ + public String getValue() { + return value; + } + + /** + * Returns the ValidationLevel from the string representation of the validation level. + * + * @param validationLevel the string representation of the validation level. + * @return the validation level + */ + public static ValidationLevel fromString(final String validationLevel) { + notNull("ValidationLevel", validationLevel); + for (ValidationLevel action : ValidationLevel.values()) { + if (validationLevel.equalsIgnoreCase(action.value)) { + return action; + } + } + throw new IllegalArgumentException(format("'%s' is not a valid ValidationLevel", validationLevel)); + } +} diff --git a/driver-core/src/main/com/mongodb/client/model/ValidationOptions.java b/driver-core/src/main/com/mongodb/client/model/ValidationOptions.java new file mode 100644 index 00000000000..8f514aa2a91 --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/ValidationOptions.java @@ -0,0 +1,108 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model; + +import com.mongodb.lang.Nullable; +import org.bson.conversions.Bson; + +/** + * Validation options for documents being inserted or updated in a collection + * + * @since 3.2 + * @mongodb.server.release 3.2 + * @mongodb.driver.manual reference/method/db.createCollection/ Create Collection + */ +public final class ValidationOptions { + private Bson validator; + private ValidationLevel validationLevel; + private ValidationAction validationAction; + + /** + * Gets the validation rules if set or null. + * + * @return the validation rules if set or null + */ + @Nullable + public Bson getValidator() { + return validator; + } + + /** + * Sets the validation rules for all + * + * @param validator the validation rules + * @return this + */ + public ValidationOptions validator(@Nullable final Bson validator) { + this.validator = validator; + return this; + } + + /** + * Gets the {@link ValidationLevel} that determines how strictly MongoDB applies the validation rules to existing documents during an + * insert or update. + * + * @return the ValidationLevel. + */ + @Nullable + public ValidationLevel getValidationLevel() { + return validationLevel; + } + + /** + * Sets the validation level that determines how strictly MongoDB applies the validation rules to existing documents during an insert + * or update. + * + * @param validationLevel the validation level + * @return this + */ + public ValidationOptions validationLevel(@Nullable final ValidationLevel validationLevel) { + this.validationLevel = validationLevel; + return this; + } + + /** + * Gets the {@link ValidationAction}. + * + * @return the ValidationAction. + */ + @Nullable + public ValidationAction getValidationAction() { + return validationAction; + } + + /** + * Sets the {@link ValidationAction} that determines whether to error on invalid documents or just warn about the violations but allow + * invalid documents. + * + * @param validationAction the validation action + * @return this + */ + public ValidationOptions validationAction(@Nullable final ValidationAction validationAction) { + this.validationAction = validationAction; + return this; + } + + @Override + public String toString() { + return "ValidationOptions{" + + "validator=" + validator + + ", validationLevel=" + validationLevel + + ", validationAction=" + validationAction + + '}'; + } +} diff --git a/driver-core/src/main/com/mongodb/client/model/Variable.java b/driver-core/src/main/com/mongodb/client/model/Variable.java new file mode 100644 index 00000000000..cbf7da219f8 --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/Variable.java @@ -0,0 +1,93 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model; + +import java.util.Objects; + +import static com.mongodb.assertions.Assertions.notNull; + +/** + * Helps define new variable for the $lookup pipeline stage + * + * @param the type of the value for the new variable + * @mongodb.driver.manual reference/operator/aggregation/lookup/ $lookup + * @mongodb.server.release 3.6 + * @since 3.7 + */ +public class Variable { + private final String name; + private final TExpression value; + + /** + * Creates a new variable definition for use in $lookup pipeline stages + * + * @param name the name of the new variable + * @param value the value of the new variable + * @mongodb.driver.manual reference/operator/aggregation/lookup/ $lookup + */ + public Variable(final String name, final TExpression value) { + this.name = notNull("name", name); + this.value = value; + } + + /** + * @return the name of the new variable + */ + public String getName() { + return name; + } + + /** + * @return the value of the new variable + */ + public TExpression getValue() { + return value; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (!(o instanceof Variable)) { + return false; + } + + Variable variable = (Variable) o; + + if (!name.equals(variable.name)) { + return false; + } + return Objects.equals(value, variable.value); + + } + + @Override + public int hashCode() { + int result = name.hashCode(); + result = 31 * result + (value != null ? value.hashCode() : 0); + return result; + } + + @Override + public String toString() { + return "Variable{" + + "name='" + name + '\'' + + ", value=" + value + + '}'; + } +} diff --git a/driver-core/src/main/com/mongodb/client/model/Window.java b/driver-core/src/main/com/mongodb/client/model/Window.java new file mode 100644 index 00000000000..d689dbe4a98 --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/Window.java @@ -0,0 +1,28 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.client.model; + +import org.bson.conversions.Bson; + +/** + * A subset of documents within a partition in the {@link Aggregates#setWindowFields(Object, Bson, Iterable) $setWindowFields} pipeline stage + * of an aggregation pipeline (see {@code partitionBy} in {@link Aggregates#setWindowFields(Object, Bson, Iterable)}). + * + * @see Windows + * @since 4.3 + */ +public interface Window extends Bson { +} diff --git a/driver-core/src/main/com/mongodb/client/model/WindowOutputField.java b/driver-core/src/main/com/mongodb/client/model/WindowOutputField.java new file mode 100644 index 00000000000..f4d35f272db --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/WindowOutputField.java @@ -0,0 +1,34 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.client.model; + +import org.bson.conversions.Bson; + +/** + * The core part of the {@link Aggregates#setWindowFields(Object, Bson, Iterable) $setWindowFields} pipeline stage of an aggregation pipeline. + * A triple of a window function, a {@linkplain Window window} and a path to a field to be computed by the window function over the window. + * + * @see WindowOutputFields + * @since 4.3 + */ +public interface WindowOutputField { + /** + * Render into {@link BsonField}. + * + * @return A {@link BsonField} representation. + */ + BsonField toBsonField(); +} diff --git a/driver-core/src/main/com/mongodb/client/model/WindowOutputFields.java b/driver-core/src/main/com/mongodb/client/model/WindowOutputFields.java new file mode 100644 index 00000000000..1e4ead71d83 --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/WindowOutputFields.java @@ -0,0 +1,1091 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.client.model; + +import com.mongodb.client.model.Windows.Bound; +import com.mongodb.lang.Nullable; +import org.bson.BsonDocument; +import org.bson.BsonDocumentWriter; +import org.bson.BsonType; +import org.bson.codecs.configuration.CodecRegistry; +import org.bson.conversions.Bson; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.Optional; + +import static com.mongodb.assertions.Assertions.assertNotNull; +import static com.mongodb.assertions.Assertions.isTrueArgument; +import static org.bson.assertions.Assertions.notNull; + +/** + * Builders for {@linkplain WindowOutputField window output fields} used in the + * {@link Aggregates#setWindowFields(Object, Bson, Iterable) $setWindowFields} pipeline stage + * of an aggregation pipeline. Each windowed computation is a triple: + *
    + *
  • A window function. Some functions require documents in a window to be sorted + * (see {@code sortBy} in {@link Aggregates#setWindowFields(Object, Bson, Iterable)}).
  • + *
  • An optional {@linkplain Window window}, a.k.a. frame. + * Specifying {@code null} window is equivalent to specifying an unbounded window, + * i.e., a window with both ends specified as {@link Bound#UNBOUNDED}. + * Some window functions, e.g., {@link #derivative(String, Object, Window)}, + * require an explicit unbounded window instead of {@code null}.
  • + *
  • A path to an output field to be computed by the window function over the window.
  • + *
+ * A window output field is similar to an {@linkplain Accumulators accumulator} but does not result in folding documents constituting + * the window into a single document. + * + * @mongodb.driver.manual meta/aggregation-quick-reference/#field-paths Field paths + * @since 4.3 + * @mongodb.server.release 5.0 + */ +public final class WindowOutputFields { + /** + * Creates a windowed output field from a document field in situations when there is no builder method that better satisfies your needs. + * This method cannot be used to validate the document field syntax. + *

+ * Example
+ * The following code creates two functionally equivalent window output fields, + * though they may not be {@linkplain #equals(Object) equal}. + *

{@code
+     *  Window pastWeek = Windows.timeRange(-1, MongoTimeUnit.WEEK, Windows.Bound.CURRENT);
+     *  WindowOutputField pastWeekExpenses1 = WindowOutputFields.sum("pastWeekExpenses", "$expenses", pastWeek);
+     *  WindowOutputField pastWeekExpenses2 = WindowOutputFields.of(
+     *          new BsonField("pastWeekExpenses", new Document("$sum", "$expenses")
+     *                  .append("window", pastWeek.toBsonDocument())));
+     * }
+ * + * @param windowOutputField A document field representing the required windowed output field. + * @return The constructed windowed output field. + */ + public static WindowOutputField of(final BsonField windowOutputField) { + return new BsonFieldWindowOutputField(notNull("windowOutputField", windowOutputField)); + } + + /** + * Builds a window output field of the sum of the evaluation results of the {@code expression} over the {@code window}. + * + * @param path The output field path. + * @param expression The expression. + * @param window The window. + * @param The expression type. + * @return The constructed windowed output field. + * @mongodb.driver.dochub core/window-functions-sum $sum + */ + public static WindowOutputField sum(final String path, final TExpression expression, @Nullable final Window window) { + notNull("path", path); + notNull("expression", expression); + return simpleParameterWindowFunction(path, "$sum", expression, window); + } + + /** + * Builds a window output field of the average of the evaluation results of the {@code expression} over the {@code window}. + * + * @param path The output field path. + * @param expression The expression. + * @param window The window. + * @param The expression type. + * @return The constructed windowed output field. + * @mongodb.driver.dochub core/window-functions-avg $avg + */ + public static WindowOutputField avg(final String path, final TExpression expression, @Nullable final Window window) { + notNull("path", path); + notNull("expression", expression); + return simpleParameterWindowFunction(path, "$avg", expression, window); + } + + /** + * Builds a window output field of percentiles of the evaluation results of the {@code inExpression} + * over documents in the specified {@code window}. The {@code pExpression} parameter represents an array of + * percentiles of interest, with each element being a numeric value between 0.0 and 1.0 (inclusive). + * + * @param path The output field path. + * @param inExpression The input expression. + * @param pExpression The expression representing a percentiles of interest. + * @param method The method to be used for computing the percentiles. + * @param window The window. + * @param The type of the input expression. + * @param The type of the percentile expression. + * @return The constructed windowed output field. + * @mongodb.driver.manual reference/operator/aggregation/percentile/ $percentile + * @since 4.10 + * @mongodb.server.release 7.0 + */ + public static WindowOutputField percentile(final String path, final InExpression inExpression, + final PExpression pExpression, final QuantileMethod method, + @Nullable final Window window) { + notNull("path", path); + notNull("inExpression", inExpression); + notNull("pExpression", pExpression); + notNull("method", method); + Map args = new LinkedHashMap<>(3); + args.put(ParamName.INPUT, inExpression); + args.put(ParamName.P_LOWERCASE, pExpression); + args.put(ParamName.METHOD, method.toBsonValue()); + return compoundParameterWindowFunction(path, "$percentile", args, window); + } + + /** + * Builds a window output field representing the median value of the evaluation results of the {@code inExpression} + * over documents in the specified {@code window}. + * + * @param path The output field path. + * @param inExpression The input expression. + * @param method The method to be used for computing the median. + * @param window The window. + * @param The type of the input expression. + * @return The constructed windowed output field. + * @mongodb.driver.manual reference/operator/aggregation/median/ $median + * @since 4.10 + * @mongodb.server.release 7.0 + */ + public static WindowOutputField median(final String path, final InExpression inExpression, + final QuantileMethod method, + @Nullable final Window window) { + notNull("path", path); + notNull("inExpression", inExpression); + notNull("method", method); + Map args = new LinkedHashMap<>(2); + args.put(ParamName.INPUT, inExpression); + args.put(ParamName.METHOD, method.toBsonValue()); + return compoundParameterWindowFunction(path, "$median", args, window); + } + + /** + * Builds a window output field of the sample standard deviation of the evaluation results of the {@code expression} over the + * {@code window}. + * + * @param path The output field path. + * @param expression The expression. + * @param window The window. + * @param The expression type. + * @return The constructed windowed output field. + * @mongodb.driver.dochub core/window-functions-std-dev-samp $stdDevSamp + */ + public static WindowOutputField stdDevSamp(final String path, final TExpression expression, + @Nullable final Window window) { + notNull("path", path); + notNull("expression", expression); + return simpleParameterWindowFunction(path, "$stdDevSamp", expression, window); + } + + /** + * Builds a window output field of the population standard deviation of the evaluation results of the {@code expression} + * over the {@code window}. + * + * @param path The output field path. + * @param expression The expression. + * @param window The window. + * @param The expression type. + * @return The constructed windowed output field. + * @mongodb.driver.dochub core/window-functions-std-dev-pop $stdDevPop + */ + public static WindowOutputField stdDevPop(final String path, final TExpression expression, + @Nullable final Window window) { + notNull("path", path); + notNull("expression", expression); + return simpleParameterWindowFunction(path, "$stdDevPop", expression, window); + } + + /** + * Builds a window output field of the smallest of the evaluation results of the {@code expression} over the {@code window}. + * + * @param path The output field path. + * @param expression The expression. + * @param window The window. + * @param The expression type. + * @return The constructed windowed output field. + * @mongodb.driver.manual reference/operator/aggregation/min/ $min + */ + public static WindowOutputField min(final String path, final TExpression expression, @Nullable final Window window) { + notNull("path", path); + notNull("expression", expression); + return simpleParameterWindowFunction(path, "$min", expression, window); + } + + /** + * Builds a window output field of a BSON {@link org.bson.BsonType#ARRAY Array} + * of {@code N} smallest evaluation results of the {@code inExpression} over the {@code window}, + * where {@code N} is the positive integral value of the {@code nExpression}. + * + * @param path The output field path. + * @param inExpression The input expression. + * @param nExpression The expression limiting the number of produced values. + * @param window The window. + * @param The type of the input expression. + * @param The type of the limiting expression. + * @return The constructed windowed output field. + * @mongodb.driver.manual reference/operator/aggregation/minN/ $minN + * @since 4.7 + * @mongodb.server.release 5.2 + */ + public static WindowOutputField minN( + final String path, final InExpression inExpression, final NExpression nExpression, @Nullable final Window window) { + notNull("path", path); + notNull("inExpression", inExpression); + notNull("nExpression", nExpression); + Map args = new LinkedHashMap<>(3); + args.put(ParamName.INPUT, inExpression); + args.put(ParamName.N_LOWERCASE, nExpression); + return compoundParameterWindowFunction(path, "$minN", args, window); + } + + /** + * Builds a window output field of the largest of the evaluation results of the {@code expression} over the {@code window}. + * + * @param path The output field path. + * @param expression The expression. + * @param window The window. + * @param The expression type. + * @return The constructed windowed output field. + * @mongodb.driver.manual reference/operator/aggregation/max/ $max + */ + public static WindowOutputField max(final String path, final TExpression expression, @Nullable final Window window) { + notNull("path", path); + notNull("expression", expression); + return simpleParameterWindowFunction(path, "$max", expression, window); + } + + /** + * Builds a window output field of a BSON {@link org.bson.BsonType#ARRAY Array} + * of {@code N} largest evaluation results of the {@code inExpression} over the {@code window}, + * where {@code N} is the positive integral value of the {@code nExpression}. + * + * @param path The output field path. + * @param inExpression The input expression. + * @param nExpression The expression limiting the number of produced values. + * @param window The window. + * @param The type of the input expression. + * @param The type of the limiting expression. + * @return The constructed windowed output field. + * @mongodb.driver.manual reference/operator/aggregation/maxN/ $maxN + * @since 4.7 + * @mongodb.server.release 5.2 + */ + public static WindowOutputField maxN( + final String path, final InExpression inExpression, final NExpression nExpression, @Nullable final Window window) { + notNull("path", path); + notNull("inExpression", inExpression); + notNull("nExpression", nExpression); + Map args = new LinkedHashMap<>(3); + args.put(ParamName.INPUT, inExpression); + args.put(ParamName.N_LOWERCASE, nExpression); + return compoundParameterWindowFunction(path, "$maxN", args, window); + } + + /** + * Builds a window output field of the number of documents in the {@code window}. + * + * @param path The output field path. + * @param window The window. + * @return The constructed windowed output field. + * @mongodb.driver.dochub core/window-functions-count $count + */ + public static WindowOutputField count(final String path, @Nullable final Window window) { + notNull("path", path); + return simpleParameterWindowFunction(path, "$count", null, window); + } + + /** + * Builds a window output field of the time derivative by subtracting the evaluation result of the {@code expression} against the last document + * and the first document in the {@code window} and dividing it by the difference in the values of the + * {@link Aggregates#setWindowFields(Object, Bson, Iterable) sortBy} field of the respective documents. + * Other documents in the {@code window} have no effect on the computation. + *

+ * {@linkplain Aggregates#setWindowFields(Object, Bson, Iterable) Sorting} is required.

+ * + * @param path The output field path. + * @param expression The expression. + * @param window The window. + * @param The expression type. + * @return The constructed windowed output field. + * @mongodb.driver.dochub core/window-functions-derivative $derivative + */ + public static WindowOutputField derivative(final String path, final TExpression expression, + final Window window) { + notNull("path", path); + notNull("expression", expression); + notNull("window", window); + Map args = new HashMap<>(1); + args.put(ParamName.INPUT, expression); + return compoundParameterWindowFunction(path, "$derivative", args, window); + } + + /** + * Builds a window output field of the time derivative by subtracting the evaluation result of the {@code expression} against the last + * document and the first document in the {@code window} and dividing it by the difference in the BSON {@link BsonType#DATE_TIME Date} + * values of the {@link Aggregates#setWindowFields(Object, Bson, Iterable) sortBy} field of the respective documents. + * Other documents in the {@code window} have no effect on the computation. + *

+ * {@linkplain Aggregates#setWindowFields(Object, Bson, Iterable) Sorting} is required.

+ * + * @param path The output field path. + * @param expression The expression. + * @param window The window. + * @param unit The desired time unit for the divisor. Allowed values are: + * {@link MongoTimeUnit#WEEK}, {@link MongoTimeUnit#DAY}, {@link MongoTimeUnit#HOUR}, {@link MongoTimeUnit#MINUTE}, + * {@link MongoTimeUnit#SECOND}, {@link MongoTimeUnit#MILLISECOND}. + * @param The expression type. + * @return The constructed windowed output field. + * @mongodb.driver.dochub core/window-functions-derivative $derivative + */ + public static WindowOutputField timeDerivative(final String path, final TExpression expression, final Window window, + final MongoTimeUnit unit) { + notNull("path", path); + notNull("expression", expression); + notNull("window", window); + notNull("unit", unit); + isTrueArgument("unit must be either of WEEK, DAY, HOUR, MINUTE, SECOND, MILLISECOND", unit.fixed()); + Map args = new LinkedHashMap<>(2); + args.put(ParamName.INPUT, expression); + args.put(ParamName.UNIT, unit.value()); + return compoundParameterWindowFunction(path, "$derivative", args, window); + } + + /** + * Builds a window output field of the approximate integral of a function that maps values of + * the {@link Aggregates#setWindowFields(Object, Bson, Iterable) sortBy} field to evaluation results of the {@code expression} + * against the same document. The limits of integration match the {@code window} bounds. + * The approximation is done by using the + * + * trapezoidal rule. + *

+ * {@linkplain Aggregates#setWindowFields(Object, Bson, Iterable) Sorting} is required.

+ * + * @param path The output field path. + * @param expression The expression. + * @param window The window. + * @param The expression type. + * @return The constructed windowed output field. + * @mongodb.driver.dochub core/window-functions-integral $integral + */ + public static WindowOutputField integral(final String path, final TExpression expression, final Window window) { + notNull("path", path); + notNull("expression", expression); + notNull("window", window); + Map args = new HashMap<>(1); + args.put(ParamName.INPUT, expression); + return compoundParameterWindowFunction(path, "$integral", args, window); + } + + /** + * Builds a window output field of the approximate integral of a function that maps BSON {@link BsonType#DATE_TIME Date} values of + * the {@link Aggregates#setWindowFields(Object, Bson, Iterable) sortBy} field to evaluation results of the {@code expression} + * against the same document. The limits of integration match the {@code window} bounds. + * The approximation is done by using the trapezoidal rule. + *

+ * {@linkplain Aggregates#setWindowFields(Object, Bson, Iterable) Sorting} is required.

+ * + * @param path The output field path. + * @param expression The expression. + * @param window The window. + * @param unit The desired time unit for the divisor. Allowed values are: + * {@link MongoTimeUnit#WEEK}, {@link MongoTimeUnit#DAY}, {@link MongoTimeUnit#HOUR}, {@link MongoTimeUnit#MINUTE}, + * {@link MongoTimeUnit#SECOND}, {@link MongoTimeUnit#MILLISECOND}. + * @param The expression type. + * @return The constructed windowed output field. + * @mongodb.driver.dochub core/window-functions-integral $integral + */ + public static WindowOutputField timeIntegral(final String path, final TExpression expression, final Window window, + final MongoTimeUnit unit) { + notNull("path", path); + notNull("expression", expression); + notNull("window", window); + notNull("unit", unit); + isTrueArgument("unit must be either of WEEK, DAY, HOUR, MINUTE, SECOND, MILLISECOND", unit.fixed()); + Map args = new LinkedHashMap<>(2); + args.put(ParamName.INPUT, expression); + args.put(ParamName.UNIT, unit.value()); + return compoundParameterWindowFunction(path, "$integral", args, window); + } + + /** + * Builds a window output field of the sample covariance between the evaluation results of the two expressions over the {@code window}. + * + * @param path The output field path. + * @param expression1 The first expression. + * @param expression2 The second expression. + * @param window The window. + * @param The expression type. + * @return The constructed windowed output field. + * @mongodb.driver.dochub core/window-functions-covariance-samp $covarianceSamp + */ + public static WindowOutputField covarianceSamp(final String path, final TExpression expression1, + final TExpression expression2, @Nullable final Window window) { + notNull("path", path); + notNull("expression1", expression1); + notNull("expression2", expression2); + List expressions = new ArrayList<>(2); + expressions.add(expression1); + expressions.add(expression2); + return simpleParameterWindowFunction(path, "$covarianceSamp", expressions, window); + } + + /** + * Builds a window output field of the population covariance between the evaluation results of the two expressions over the {@code window}. + * + * @param path The output field path. + * @param expression1 The first expression. + * @param expression2 The second expression. + * @param window The window. + * @param The expression type. + * @return The constructed windowed output field. + * @mongodb.driver.dochub core/window-functions-covariance-pop $covariancePop + */ + public static WindowOutputField covariancePop(final String path, final TExpression expression1, + final TExpression expression2, @Nullable final Window window) { + notNull("path", path); + notNull("expression1", expression1); + notNull("expression2", expression2); + List expressions = new ArrayList<>(2); + expressions.add(expression1); + expressions.add(expression2); + return simpleParameterWindowFunction(path, "$covariancePop", expressions, window); + } + + /** + * Builds a window output field of the exponential moving average of the evaluation results of the {@code expression} over a window + * that includes {@code n} - 1 documents preceding the current document and the current document, with more weight on documents + * closer to the current one. + *

+ * {@linkplain Aggregates#setWindowFields(Object, Bson, Iterable) Sorting} is required.

+ * + * @param path The output field path. + * @param expression The expression. + * @param n Must be positive. + * @param The expression type. + * @return The constructed windowed output field. + * @mongodb.driver.dochub core/window-functions-exp-moving-avg $expMovingAvg + */ + public static WindowOutputField expMovingAvg(final String path, final TExpression expression, final int n) { + notNull("path", path); + notNull("expression", expression); + isTrueArgument("n > 0", n > 0); + Map args = new LinkedHashMap<>(2); + args.put(ParamName.INPUT, expression); + args.put(ParamName.N_UPPERCASE, n); + return compoundParameterWindowFunction(path, "$expMovingAvg", args, null); + } + + /** + * Builds a window output field of the exponential moving average of the evaluation results of the {@code expression} over the half-bounded + * window [{@link Bound#UNBOUNDED}, {@link Bound#CURRENT}], with {@code alpha} representing the degree of weighting decrease. + *

+ * {@linkplain Aggregates#setWindowFields(Object, Bson, Iterable) Sorting} is required.

+ * + * @param path The output field path. + * @param expression The expression. + * @param alpha A parameter specifying how fast weighting decrease happens. A higher {@code alpha} discounts older observations faster. + * Must belong to the interval (0, 1). + * @param The expression type. + * @return The constructed windowed output field. + * @mongodb.driver.dochub core/window-functions-exp-moving-avg $expMovingAvg + */ + public static WindowOutputField expMovingAvg(final String path, final TExpression expression, final double alpha) { + notNull("path", path); + notNull("expression", expression); + isTrueArgument("alpha > 0", alpha > 0); + isTrueArgument("alpha < 1", alpha < 1); + Map args = new LinkedHashMap<>(2); + args.put(ParamName.INPUT, expression); + args.put(ParamName.ALPHA, alpha); + return compoundParameterWindowFunction(path, "$expMovingAvg", args, null); + } + + /** + * Builds a window output field that adds the evaluation results of the {@code expression} over the {@code window} + * to a BSON {@link org.bson.BsonType#ARRAY Array}. + * Order within the array is guaranteed if {@link Aggregates#setWindowFields(Object, Bson, Iterable) sortBy} is specified. + * + * @param path The output field path. + * @param expression The expression. + * @param window The window. + * @param The expression type. + * @return The constructed windowed output field. + * @mongodb.driver.dochub core/window-functions-push $push + */ + public static WindowOutputField push(final String path, final TExpression expression, @Nullable final Window window) { + notNull("path", path); + notNull("expression", expression); + return simpleParameterWindowFunction(path, "$push", expression, window); + } + + /** + * Builds a window output field that adds the evaluation results of the {@code expression} over the {@code window} + * to a BSON {@link org.bson.BsonType#ARRAY Array} and excludes duplicates. + * Order within the array is not specified. + * + * @param path The output field path. + * @param expression The expression. + * @param window The window. + * @param The expression type. + * @return The constructed windowed output field. + * @mongodb.driver.dochub core/window-functions-add-to-set $addToSet + */ + public static WindowOutputField addToSet(final String path, final TExpression expression, + @Nullable final Window window) { + notNull("path", path); + notNull("expression", expression); + return simpleParameterWindowFunction(path, "$addToSet", expression, window); + } + + /** + * Builds a window output field of the evaluation result of the {@code expression} against the first document in the {@code window}. + *

+ * {@linkplain Aggregates#setWindowFields(Object, Bson, Iterable) Sorting} is required.

+ * + * @param path The output field path. + * @param expression The expression. + * @param window The window. + * @param The expression type. + * @return The constructed windowed output field. + * @mongodb.driver.manual reference/operator/aggregation/first/ $first + */ + public static WindowOutputField first(final String path, final TExpression expression, @Nullable final Window window) { + notNull("path", path); + notNull("expression", expression); + return simpleParameterWindowFunction(path, "$first", expression, window); + } + + /** + * Builds a window output field of a BSON {@link org.bson.BsonType#ARRAY Array} + * of evaluation results of the {@code inExpression} against the first {@code N} documents in the {@code window}, + * where {@code N} is the positive integral value of the {@code nExpression}. + *

+ * {@linkplain Aggregates#setWindowFields(Object, Bson, Iterable) Sorting} is required.

+ * + * @param path The output field path. + * @param inExpression The input expression. + * @param nExpression The expression limiting the number of produced values. + * @param window The window. + * @param The type of the input expression. + * @param The type of the limiting expression. + * @return The constructed windowed output field. + * @mongodb.driver.manual reference/operator/aggregation/firstN/ $firstN + * @since 4.7 + * @mongodb.server.release 5.2 + */ + public static WindowOutputField firstN( + final String path, final InExpression inExpression, final NExpression nExpression, @Nullable final Window window) { + notNull("path", path); + notNull("inExpression", inExpression); + notNull("nExpression", nExpression); + Map args = new LinkedHashMap<>(3); + args.put(ParamName.INPUT, inExpression); + args.put(ParamName.N_LOWERCASE, nExpression); + return compoundParameterWindowFunction(path, "$firstN", args, window); + } + + /** + * Builds a window output field of the evaluation result of the {@code outExpression} against the top document in the {@code window} + * sorted according to the provided {@code sortBy} specification. + * + * @param path The output field path. + * @param sortBy The {@linkplain Sorts sortBy specification}. The syntax is identical to the one expected by {@link Aggregates#sort(Bson)}. + * @param outExpression The input expression. + * @param window The window. + * @param The type of the output expression. + * @return The constructed windowed output field. + * @mongodb.driver.manual reference/operator/aggregation/top/ $top + * @since 4.7 + * @mongodb.server.release 5.2 + */ + public static WindowOutputField top( + final String path, final Bson sortBy, final OutExpression outExpression, @Nullable final Window window) { + notNull("path", path); + notNull("sortBy", sortBy); + notNull("outExpression", outExpression); + Map args = new LinkedHashMap<>(3); + args.put(ParamName.SORT_BY, sortBy); + args.put(ParamName.OUTPUT, outExpression); + return compoundParameterWindowFunction(path, "$top", args, window); + } + + /** + * Builds a window output field of a BSON {@link org.bson.BsonType#ARRAY Array} + * of evaluation results of the {@code outExpression} against the top {@code N} documents in the {@code window} + * sorted according to the provided {@code sortBy} specification, + * where {@code N} is the positive integral value of the {@code nExpression}. + * + * @param path The output field path. + * @param sortBy The {@linkplain Sorts sortBy specification}. The syntax is identical to the one expected by {@link Aggregates#sort(Bson)}. + * @param outExpression The input expression. + * @param nExpression The expression limiting the number of produced values. + * @param window The window. + * @param The type of the output expression. + * @param The type of the limiting expression. + * @return The constructed windowed output field. + * @mongodb.driver.manual reference/operator/aggregation/topN/ $topN + * @since 4.7 + * @mongodb.server.release 5.2 + */ + public static WindowOutputField topN( + final String path, final Bson sortBy, final OutExpression outExpression, final NExpression nExpression, @Nullable final Window window) { + notNull("path", path); + notNull("sortBy", sortBy); + notNull("outExpression", outExpression); + notNull("nExpression", nExpression); + Map args = new LinkedHashMap<>(3); + args.put(ParamName.SORT_BY, sortBy); + args.put(ParamName.OUTPUT, outExpression); + args.put(ParamName.N_LOWERCASE, nExpression); + return compoundParameterWindowFunction(path, "$topN", args, window); + } + + /** + * Builds a window output field of the evaluation result of the {@code expression} against the last document in the {@code window}. + *

+ * {@linkplain Aggregates#setWindowFields(Object, Bson, Iterable) Sorting} is required.

+ * + * @param path The output field path. + * @param expression The expression. + * @param window The window. + * @param The expression type. + * @return The constructed windowed output field. + * @mongodb.driver.manual reference/operator/aggregation/last/ $last + */ + public static WindowOutputField last(final String path, final TExpression expression, @Nullable final Window window) { + notNull("path", path); + notNull("expression", expression); + return simpleParameterWindowFunction(path, "$last", expression, window); + } + + /** + * Builds a window output field of a BSON {@link org.bson.BsonType#ARRAY Array} + * of evaluation results of the {@code inExpression} against the last {@code N} documents in the {@code window}, + * where {@code N} is the positive integral value of the {@code nExpression}. + *

+ * {@linkplain Aggregates#setWindowFields(Object, Bson, Iterable) Sorting} is required.

+ * + * @param path The output field path. + * @param inExpression The input expression. + * @param nExpression The expression limiting the number of produced values. + * @param window The window. + * @param The type of the input expression. + * @param The type of the limiting expression. + * @return The constructed windowed output field. + * @mongodb.driver.manual reference/operator/aggregation/lastN/ $lastN + * @since 4.7 + * @mongodb.server.release 5.2 + */ + public static WindowOutputField lastN( + final String path, final InExpression inExpression, final NExpression nExpression, @Nullable final Window window) { + notNull("path", path); + notNull("inExpression", inExpression); + notNull("nExpression", nExpression); + Map args = new LinkedHashMap<>(3); + args.put(ParamName.INPUT, inExpression); + args.put(ParamName.N_LOWERCASE, nExpression); + return compoundParameterWindowFunction(path, "$lastN", args, window); + } + + /** + * Builds a window output field of the evaluation result of the {@code outExpression} against the bottom document in the {@code window} + * sorted according to the provided {@code sortBy} specification. + * + * @param path The output field path. + * @param sortBy The {@linkplain Sorts sortBy specification}. The syntax is identical to the one expected by {@link Aggregates#sort(Bson)}. + * @param outExpression The input expression. + * @param window The window. + * @param The type of the output expression. + * @return The constructed windowed output field. + * @mongodb.driver.manual reference/operator/aggregation/bottom/ $bottom + * @since 4.7 + * @mongodb.server.release 5.2 + */ + public static WindowOutputField bottom( + final String path, final Bson sortBy, final OutExpression outExpression, @Nullable final Window window) { + notNull("path", path); + notNull("sortBy", sortBy); + notNull("outExpression", outExpression); + Map args = new LinkedHashMap<>(3); + args.put(ParamName.SORT_BY, sortBy); + args.put(ParamName.OUTPUT, outExpression); + return compoundParameterWindowFunction(path, "$bottom", args, window); + } + + /** + * Builds a window output field of a BSON {@link org.bson.BsonType#ARRAY Array} + * of evaluation results of the {@code outExpression} against the bottom {@code N} documents in the {@code window} + * sorted according to the provided {@code sortBy} specification, + * where {@code N} is the positive integral value of the {@code nExpression}. + * + * @param path The output field path. + * @param sortBy The {@linkplain Sorts sortBy specification}. The syntax is identical to the one expected by {@link Aggregates#sort(Bson)}. + * @param outExpression The input expression. + * @param nExpression The expression limiting the number of produced values. + * @param window The window. + * @param The type of the output expression. + * @param The type of the limiting expression. + * @return The constructed windowed output field. + * @mongodb.driver.manual reference/operator/aggregation/bottomN/ $bottomN + * @since 4.7 + * @mongodb.server.release 5.2 + */ + public static WindowOutputField bottomN( + final String path, final Bson sortBy, final OutExpression outExpression, final NExpression nExpression, @Nullable final Window window) { + notNull("path", path); + notNull("sortBy", sortBy); + notNull("outExpression", outExpression); + notNull("nExpression", nExpression); + Map args = new LinkedHashMap<>(3); + args.put(ParamName.SORT_BY, sortBy); + args.put(ParamName.OUTPUT, outExpression); + args.put(ParamName.N_LOWERCASE, nExpression); + return compoundParameterWindowFunction(path, "$bottomN", args, window); + } + + /** + * Builds a window output field of the evaluation result of the {@code expression} for the document whose position is shifted by the given + * amount relative to the current document. If the shifted document is outside of the + * {@linkplain Aggregates#setWindowFields(Object, Bson, Iterable) partition} containing the current document, + * then the {@code defaultExpression} is used instead of the {@code expression}. + *

+ * {@linkplain Aggregates#setWindowFields(Object, Bson, Iterable) Sorting} is required.

+ * + * @param path The output field path. + * @param expression The expression. + * @param defaultExpression The default expression. + * If {@code null}, then the default expression is evaluated to BSON {@link org.bson.BsonNull null}. + * Must evaluate to a constant value. + * @param by The shift specified similarly to {@linkplain Windows rules for window bounds}: + *
    + *
  • 0 means the current document;
  • + *
  • a negative value refers to the document preceding the current one;
  • + *
  • a positive value refers to the document following the current one.
  • + *
+ * @param The expression type. + * @return The constructed windowed output field. + * @mongodb.driver.dochub core/window-functions-shift $shift + */ + public static WindowOutputField shift(final String path, final TExpression expression, + @Nullable final TExpression defaultExpression, final int by) { + notNull("path", path); + notNull("expression", expression); + Map args = new LinkedHashMap<>(3); + args.put(ParamName.OUTPUT, expression); + args.put(ParamName.BY, by); + if (defaultExpression != null) { + args.put(ParamName.DEFAULT, defaultExpression); + } + return compoundParameterWindowFunction(path, "$shift", args, null); + } + + /** + * Builds a window output field of the order number of each document in its + * {@linkplain Aggregates#setWindowFields(Object, Bson, Iterable) partition}. + *

+ * {@linkplain Aggregates#setWindowFields(Object, Bson, Iterable) Sorting} is required.

+ * + * @param path The output field path. + * @return The constructed windowed output field. + * @mongodb.driver.dochub core/window-functions-document-number $documentNumber + */ + public static WindowOutputField documentNumber(final String path) { + notNull("path", path); + return simpleParameterWindowFunction(path, "$documentNumber", null, null); + } + + /** + * Builds a window output field of the rank of each document in its + * {@linkplain Aggregates#setWindowFields(Object, Bson, Iterable) partition}. + * Documents with the same value(s) of the {@linkplain Aggregates#setWindowFields(Object, Bson, Iterable) sortBy} fields result in + * the same ranking and result in gaps in the returned ranks. + * For example, a partition with the sequence [1, 3, 3, 5] representing the values of the single {@code sortBy} field + * produces the following sequence of rank values: [1, 2, 2, 4]. + *

+ * {@linkplain Aggregates#setWindowFields(Object, Bson, Iterable) Sorting} is required.

+ * + * @param path The output field path. + * @return The constructed windowed output field. + * @mongodb.driver.dochub core/window-functions-rank $rank + */ + public static WindowOutputField rank(final String path) { + notNull("path", path); + return simpleParameterWindowFunction(path, "$rank", null, null); + } + + /** + * Builds a window output field of the dense rank of each document in its + * {@linkplain Aggregates#setWindowFields(Object, Bson, Iterable) partition}. + * Documents with the same value(s) of the {@linkplain Aggregates#setWindowFields(Object, Bson, Iterable) sortBy} fields result in + * the same ranking but do not result in gaps in the returned ranks. + * For example, a partition with the sequence [1, 3, 3, 5] representing the values of the single {@code sortBy} field + * produces the following sequence of rank values: [1, 2, 2, 3]. + *

+ * {@linkplain Aggregates#setWindowFields(Object, Bson, Iterable) Sorting} is required.

+ * + * @param path The output field path. + * @return The constructed windowed output field. + * @mongodb.driver.dochub core/window-functions-dense-rank $denseRank + */ + public static WindowOutputField denseRank(final String path) { + notNull("path", path); + return simpleParameterWindowFunction(path, "$denseRank", null, null); + } + + /** + * Builds a window output field of the last observed non-{@link BsonType#NULL Null} evaluation result of the {@code expression}. + * + * @param path The output field path. + * @param expression The expression. + * @param The expression type. + * @return The constructed windowed output field. + * @mongodb.driver.manual reference/operator/aggregation/locf/ $locf + * @since 4.7 + * @mongodb.server.release 5.2 + */ + public static WindowOutputField locf(final String path, final TExpression expression) { + notNull("path", path); + notNull("expression", expression); + return simpleParameterWindowFunction(path, "$locf", expression, null); + } + + /** + * Builds a window output field of a value that is equal to the evaluation result of the {@code expression} when it is non-{@link BsonType#NULL Null}, + * or to the linear interpolation of surrounding evaluation results of the {@code expression} when the result is {@link BsonType#NULL Null}. + *

+ * {@linkplain Aggregates#setWindowFields(Object, Bson, Iterable) Sorting} is required.

+ * + * @param path The output field path. + * @param expression The expression. + * @param The expression type. + * @return The constructed windowed output field. + * @mongodb.driver.manual reference/operator/aggregation/linearFill/ $linearFill + * @since 4.7 + * @mongodb.server.release 5.3 + */ + public static WindowOutputField linearFill(final String path, final TExpression expression) { + notNull("path", path); + notNull("expression", expression); + return simpleParameterWindowFunction(path, "$linearFill", expression, null); + } + + private static WindowOutputField simpleParameterWindowFunction(final String path, final String functionName, + @Nullable final Object expression, + @Nullable final Window window) { + return new BsonFieldWindowOutputField(new BsonField(path, + new SimpleParameterFunctionAndWindow(functionName, expression, window))); + } + + private static WindowOutputField compoundParameterWindowFunction(final String path, final String functionName, + final Map args, + @Nullable final Window window) { + return new BsonFieldWindowOutputField(new BsonField(path, + new CompoundParameterFunctionAndWindow(functionName, args, window))); + } + + private WindowOutputFields() { + throw new UnsupportedOperationException(); + } + + private static final class BsonFieldWindowOutputField implements WindowOutputField { + private final BsonField wrapped; + + BsonFieldWindowOutputField(final BsonField field) { + wrapped = assertNotNull(field); + } + + @Override + public BsonField toBsonField() { + return wrapped; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + BsonFieldWindowOutputField that = (BsonFieldWindowOutputField) o; + return wrapped.equals(that.wrapped); + } + + @Override + public int hashCode() { + return wrapped.hashCode(); + } + + @Override + public String toString() { + return wrapped.toString(); + } + } + + /** + * A combination of a window function and its window. + */ + private abstract static class AbstractFunctionAndWindow implements Bson { + private final String functionName; + @Nullable + private final Window window; + + AbstractFunctionAndWindow(final String functionName, @Nullable final Window window) { + this.functionName = functionName; + this.window = window; + } + + final void writeWindow(final CodecRegistry codecRegistry, final BsonDocumentWriter writer) { + if (window != null) { + writer.writeName("window"); + BuildersHelper.encodeValue(writer, window, codecRegistry); + } + } + + final String functionName() { + return functionName; + } + + final Optional window() { + return Optional.ofNullable(window); + } + } + + private static final class SimpleParameterFunctionAndWindow extends AbstractFunctionAndWindow { + @Nullable + private final Object expression; + + SimpleParameterFunctionAndWindow(final String functionName, @Nullable final Object expression, @Nullable final Window window) { + super(functionName, window); + this.expression = expression; + } + + @Override + public BsonDocument toBsonDocument(final Class tDocumentClass, final CodecRegistry codecRegistry) { + BsonDocumentWriter writer = new BsonDocumentWriter(new BsonDocument()); + writer.writeStartDocument(); + writer.writeName(functionName()); + if (expression == null) { + writer.writeStartDocument(); + writer.writeEndDocument(); + } else { + BuildersHelper.encodeValue(writer, expression, codecRegistry); + } + writeWindow(codecRegistry, writer); + writer.writeEndDocument(); + return writer.getDocument(); + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + SimpleParameterFunctionAndWindow that = (SimpleParameterFunctionAndWindow) o; + return functionName().equals(that.functionName()) && Objects.equals(expression, that.expression) + && window().equals(that.window()); + } + + @Override + public int hashCode() { + return Objects.hash(functionName(), expression, window()); + } + + @Override + public String toString() { + return "WindowFunction{" + + "name='" + functionName() + '\'' + + ", expression=" + expression + + ", window=" + window() + + '}'; + } + } + + private static final class CompoundParameterFunctionAndWindow extends AbstractFunctionAndWindow { + private final Map args; + + CompoundParameterFunctionAndWindow(final String functionName, final Map args, + @Nullable final Window window) { + super(functionName, window); + this.args = args; + } + + @Override + public BsonDocument toBsonDocument(final Class tDocumentClass, final CodecRegistry codecRegistry) { + BsonDocumentWriter writer = new BsonDocumentWriter(new BsonDocument()); + writer.writeStartDocument(); + writer.writeName(functionName()); + writer.writeStartDocument(); + args.forEach((paramName, paramValue) -> { + writer.writeName(paramName.value()); + BuildersHelper.encodeValue(writer, paramValue, codecRegistry); + }); + writer.writeEndDocument(); + writeWindow(codecRegistry, writer); + writer.writeEndDocument(); + return writer.getDocument(); + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + CompoundParameterFunctionAndWindow that = (CompoundParameterFunctionAndWindow) o; + return functionName().equals(that.functionName()) && Objects.equals(args, that.args) && window().equals(that.window()); + } + + @Override + public int hashCode() { + return Objects.hash(functionName(), args, window()); + } + + @Override + public String toString() { + return "WindowFunction{" + + "name='" + functionName() + '\'' + + ", args=" + args + + ", window=" + window() + + '}'; + } + } + + private enum ParamName { + INPUT("input"), + UNIT("unit"), + N_UPPERCASE("N"), + N_LOWERCASE("n"), + P_LOWERCASE("p"), + ALPHA("alpha"), + OUTPUT("output"), + BY("by"), + DEFAULT("default"), + SORT_BY("sortBy"), + METHOD("method"); + + private final String value; + + ParamName(final String value) { + this.value = value; + } + + String value() { + return value; + } + } +} diff --git a/driver-core/src/main/com/mongodb/client/model/Windows.java b/driver-core/src/main/com/mongodb/client/model/Windows.java new file mode 100644 index 00000000000..9506bb9b8af --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/Windows.java @@ -0,0 +1,455 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.client.model; + +import com.mongodb.lang.Nullable; +import org.bson.BsonDocument; +import org.bson.BsonDocumentWriter; +import org.bson.BsonType; +import org.bson.codecs.configuration.CodecRegistry; +import org.bson.conversions.Bson; +import org.bson.types.Decimal128; + +import java.util.Objects; + +import static com.mongodb.assertions.Assertions.assertNotNull; +import static com.mongodb.assertions.Assertions.isTrueArgument; +import static org.bson.assertions.Assertions.notNull; + +/** + * Builders for {@linkplain Window windows} used when expressing {@linkplain WindowOutputField windowed computations}. + * There are two types of windows: {@linkplain #documents(int, int) documents} and {@linkplain #range(long, long) range}. + *

+ * Bounded and half-bounded windows require sorting. + * Window bounds are inclusive and the lower bound must always be less than or equal to the upper bound. + * The following type-specific rules are applied to windows: + *

    + *
  • documents + *
      + *
    • bounds + *
        + *
      • 0 refers to the current document and is functionally equivalent to {@link Bound#CURRENT};
      • + *
      • a negative value refers to documents preceding the current one;
      • + *
      • a positive value refers to documents following the current one;
      • + *
      + *
    • + *
    + *
  • + *
  • range + *
      + *
    • {@link Aggregates#setWindowFields(Object, Bson, Iterable) sortBy} + *
        + *
      • must contain exactly one field;
      • + *
      • must specify the ascending sort order;
      • + *
      • the {@code sortBy} field must be of either a numeric BSON type + * (see the {@code $isNumber} aggregation pipeline stage) + * or the BSON {@link BsonType#DATE_TIME Date} type if {@linkplain #timeRange(long, long, MongoTimeUnit) time} + * bounds are used;
      • + *
      + *
    • + *
    • bounds + *
        + *
      • if numeric, i.e., not {@link Bound}, then the bound is calculated by adding + * the value to the value of the {@code sortBy} field in the current document;
      • + *
      • if {@link Bound#CURRENT}, then the bound is determined by the current document + * and not the current value of the {@code sortBy} field;
      • + *
      • time bounds require specifying a {@linkplain MongoTimeUnit time unit} and are added as per the + * {@code $dateAdd}/{@code $dateSubtract} aggregation pipeline stage specification.
      • + *
      + *
    • + *
    + *
  • + *
+ * + * @see WindowOutputField + * @mongodb.driver.manual reference/operator/aggregation/isNumber/ $isNumber aggregation pipeline stage + * @mongodb.driver.manual reference/bson-types/#date BSON Date type + * @mongodb.driver.manual reference/operator/aggregation/dateAdd/ $dateAdd aggregation pipeline stage + * @mongodb.driver.manual reference/operator/aggregation/dateSubtract/ $dateSubtract aggregation pipeline stage + * @mongodb.server.release 5.0 + * @since 4.3 + */ +public final class Windows { + /** + * Creates a window from {@link Bson} in situations when there is no builder method that better satisfies your needs. + * This method cannot be used to validate the syntax. + *

+ * Example
+ * The following code creates two functionally equivalent windows, though they may not be {@linkplain #equals(Object) equal}. + *

{@code
+     *  Window pastWeek1 = Windows.timeRange(-1, MongoTimeUnit.WEEK, Windows.Bound.CURRENT);
+     *  Window pastWeek2 = Windows.of(
+     *          new Document("range", Arrays.asList(-1, "current"))
+     *                  .append("unit", MongoTimeUnit.WEEK.value()));
+     * }
+ * + * @param window A {@link Bson} representing the required window. + * @return The constructed window. + */ + public static Window of(final Bson window) { + return new BsonWindow(notNull("window", window)); + } + + /** + * Creates a documents window whose bounds are determined by a number of documents before and after the current document. + * + * @param lower A value based on which the lower bound of the window is calculated. + * @param upper A value based on which the upper bound of the window is calculated. + * @return The constructed documents window. + */ + public static Window documents(final int lower, final int upper) { + isTrueArgument("lower <= upper", lower <= upper); + return new SimpleWindow<>(SimpleWindow.TYPE_POSITION_BASED, lower, upper, null); + } + + /** + * Creates a documents window whose bounds are determined by a number of documents before and after the current document. + * + * @param lower A value based on which the lower bound of the window is calculated. + * @param upper A value based on which the upper bound of the window is calculated. + * @return The constructed documents window. + */ + public static Window documents(final Bound lower, final int upper) { + notNull("lower", lower); + if (lower == Bound.CURRENT) { + isTrueArgument("lower <= upper", upper >= 0); + } + return new SimpleWindow<>(SimpleWindow.TYPE_POSITION_BASED, lower.value(), upper, null); + } + + /** + * Creates a documents window whose bounds are determined by a number of documents before and after the current document. + * + * @param lower A value based on which the lower bound of the window is calculated. + * @param upper A value based on which the upper bound of the window is calculated. + * @return The constructed documents window. + */ + public static Window documents(final int lower, final Bound upper) { + notNull("upper", upper); + if (upper == Bound.CURRENT) { + isTrueArgument("lower <= upper", lower <= 0); + } + return new SimpleWindow<>(SimpleWindow.TYPE_POSITION_BASED, lower, upper.value(), null); + } + + /** + * Creates a documents window whose bounds are determined by a number of documents before and after the current document. + * + * @param lower A value based on which the lower bound of the window is calculated. + * @param upper A value based on which the upper bound of the window is calculated. + * @return The constructed documents window. + */ + public static Window documents(final Bound lower, final Bound upper) { + notNull("lower", lower); + notNull("upper", upper); + return new SimpleWindow<>(SimpleWindow.TYPE_POSITION_BASED, lower.value(), upper.value(), null); + } + + /** + * Creates a dynamically-sized range window whose bounds are determined by a range of possible values around + * the value of the {@link Aggregates#setWindowFields(Object, Bson, Iterable) sortBy} field in the current document. + * + * @param lower A value based on which the lower bound of the window is calculated. + * @param upper A value based on which the upper bound of the window is calculated. + * @return The constructed range window. + */ + public static Window range(final long lower, final long upper) { + isTrueArgument("lower <= upper", lower <= upper); + return new SimpleWindow<>(SimpleWindow.TYPE_RANGE_BASED, lower, upper, null); + } + + /** + * Creates a dynamically-sized range window whose bounds are determined by a range of possible values around + * the value of the {@link Aggregates#setWindowFields(Object, Bson, Iterable) sortBy} field in the current document. + * + * @param lower A value based on which the lower bound of the window is calculated. + * @param upper A value based on which the upper bound of the window is calculated. + * @return The constructed range window. + */ + public static Window range(final double lower, final double upper) { + isTrueArgument("lower <= upper", lower <= upper); + return new SimpleWindow<>(SimpleWindow.TYPE_RANGE_BASED, lower, upper, null); + } + + /** + * Creates a dynamically-sized range window whose bounds are determined by a range of possible values around + * the value of the {@link Aggregates#setWindowFields(Object, Bson, Iterable) sortBy} field in the current document. + * + * @param lower A value based on which the lower bound of the window is calculated. + * @param upper A value based on which the upper bound of the window is calculated. + * @return The constructed range window. + */ + public static Window range(final Decimal128 lower, final Decimal128 upper) { + notNull("lower", lower); + notNull("upper", upper); + isTrueArgument("lower <= upper", lower.compareTo(upper) <= 0); + return new SimpleWindow<>(SimpleWindow.TYPE_RANGE_BASED, lower, upper, null); + } + + /** + * Creates a dynamically-sized range window whose bounds are determined by a range of possible values around + * the value of the {@link Aggregates#setWindowFields(Object, Bson, Iterable) sortBy} field in the current document. + * + * @param lower A value based on which the lower bound of the window is calculated. + * @param upper A value based on which the upper bound of the window is calculated. + * @return The constructed range window. + */ + public static Window range(final Bound lower, final long upper) { + notNull("lower", lower); + return new SimpleWindow<>(SimpleWindow.TYPE_RANGE_BASED, lower.value(), upper, null); + } + + /** + * Creates a dynamically-sized range window whose bounds are determined by a range of possible values around + * the value of the {@link Aggregates#setWindowFields(Object, Bson, Iterable) sortBy} field in the current document. + * + * @param lower A value based on which the lower bound of the window is calculated. + * @param upper A value based on which the upper bound of the window is calculated. + * @return The constructed range window. + */ + public static Window range(final Bound lower, final double upper) { + notNull("lower", lower); + return new SimpleWindow<>(SimpleWindow.TYPE_RANGE_BASED, lower.value(), upper, null); + } + + /** + * Creates a dynamically-sized range window whose bounds are determined by a range of possible values around + * the value of the {@link Aggregates#setWindowFields(Object, Bson, Iterable) sortBy} field in the current document. + * + * @param lower A value based on which the lower bound of the window is calculated. + * @param upper A value based on which the upper bound of the window is calculated. + * @return The constructed range window. + */ + public static Window range(final Bound lower, final Decimal128 upper) { + notNull("lower", lower); + notNull("upper", upper); + return new SimpleWindow<>(SimpleWindow.TYPE_RANGE_BASED, lower.value(), upper, null); + } + + /** + * Creates a dynamically-sized range window whose bounds are determined by a range of possible values around + * the value of the {@link Aggregates#setWindowFields(Object, Bson, Iterable) sortBy} field in the current document. + * + * @param lower A value based on which the lower bound of the window is calculated. + * @param upper A value based on which the upper bound of the window is calculated. + * @return The constructed range window. + */ + public static Window range(final long lower, final Bound upper) { + notNull("upper", upper); + return new SimpleWindow<>(SimpleWindow.TYPE_RANGE_BASED, lower, upper.value(), null); + } + + /** + * Creates a dynamically-sized range window whose bounds are determined by a range of possible values around + * the value of the {@link Aggregates#setWindowFields(Object, Bson, Iterable) sortBy} field in the current document. + * + * @param lower A value based on which the lower bound of the window is calculated. + * @param upper A value based on which the upper bound of the window is calculated. + * @return The constructed range window. + */ + public static Window range(final double lower, final Bound upper) { + notNull("upper", upper); + return new SimpleWindow<>(SimpleWindow.TYPE_RANGE_BASED, lower, upper.value(), null); + } + + /** + * Creates a dynamically-sized range window whose bounds are determined by a range of possible values around + * the value of the {@link Aggregates#setWindowFields(Object, Bson, Iterable) sortBy} field in the current document. + * + * @param lower A value based on which the lower bound of the window is calculated. + * @param upper A value based on which the upper bound of the window is calculated. + * @return The constructed range window. + */ + public static Window range(final Decimal128 lower, final Bound upper) { + notNull("lower", lower); + notNull("upper", upper); + return new SimpleWindow<>(SimpleWindow.TYPE_RANGE_BASED, lower, upper.value(), null); + } + + /** + * Creates a dynamically-sized range window whose bounds are determined by a range of possible values around + * the BSON {@link BsonType#DATE_TIME Date} value of the {@link Aggregates#setWindowFields(Object, Bson, Iterable) sortBy} + * field in the current document. + * + * @param lower A value based on which the lower bound of the window is calculated. + * @param upper A value based on which the upper bound of the window is calculated. + * @param unit A time unit in which {@code lower} and {@code upper} are specified. + * @return The constructed range window. + */ + public static Window timeRange(final long lower, final long upper, final MongoTimeUnit unit) { + notNull("unit", unit); + isTrueArgument("lower <= upper", lower <= upper); + return new SimpleWindow<>(SimpleWindow.TYPE_RANGE_BASED, lower, upper, unit); + } + + /** + * Creates a dynamically-sized range window whose bounds are determined by a range of possible values around + * the BSON {@link BsonType#DATE_TIME Date} value of the {@link Aggregates#setWindowFields(Object, Bson, Iterable) sortBy} + * field in the current document. + * + * @param lower A value based on which the lower bound of the window is calculated. + * @param upper A value based on which the upper bound of the window is calculated. + * @param unit A time unit in which {@code upper} is specified. + * @return The constructed range window. + */ + public static Window timeRange(final Bound lower, final long upper, final MongoTimeUnit unit) { + notNull("lower", lower); + notNull("unit", unit); + return new SimpleWindow<>(SimpleWindow.TYPE_RANGE_BASED, lower.value(), upper, unit); + } + + /** + * Creates a dynamically-sized range window whose bounds are determined by a range of possible values around + * the BSON {@link BsonType#DATE_TIME Date} value of the {@link Aggregates#setWindowFields(Object, Bson, Iterable) sortBy} + * field in the current document. + * + * @param lower A value based on which the lower bound of the window is calculated. + * @param unit A time unit in which {@code lower} is specified. + * @param upper A value based on which the upper bound of the window is calculated. + * @return The constructed range window. + */ + public static Window timeRange(final long lower, final MongoTimeUnit unit, final Bound upper) { + notNull("unit", unit); + notNull("upper", upper); + return new SimpleWindow<>(SimpleWindow.TYPE_RANGE_BASED, lower, upper.value(), unit); + } + + private Windows() { + throw new UnsupportedOperationException(); + } + + /** + * Special values that may be used when specifying the bounds of a {@linkplain Window window}. + * + * @mongodb.server.release 5.0 + * @since 4.3 + */ + public enum Bound { + /** + * The {@linkplain Window window} bound is determined by the current document and is inclusive. + */ + CURRENT("current"), + /** + * The {@linkplain Window window} bound is the same as the corresponding bound of the partition encompassing it. + */ + UNBOUNDED("unbounded"); + + private final String value; + + Bound(final String value) { + this.value = value; + } + + String value() { + return value; + } + } + + private static class SimpleWindow implements Window { + static final String TYPE_POSITION_BASED = "documents"; + static final String TYPE_RANGE_BASED = "range"; + + private final String type; + private final L lower; + private final U upper; + @Nullable + private final MongoTimeUnit unit; + + SimpleWindow(final String type, final L lower, final U upper, @Nullable final MongoTimeUnit unit) { + this.lower = lower; + this.upper = upper; + this.type = type; + this.unit = unit; + } + + @Override + public BsonDocument toBsonDocument(final Class tDocumentClass, final CodecRegistry codecRegistry) { + BsonDocumentWriter writer = new BsonDocumentWriter(new BsonDocument()); + writer.writeStartDocument(); + writer.writeStartArray(type); + BuildersHelper.encodeValue(writer, lower, codecRegistry); + BuildersHelper.encodeValue(writer, upper, codecRegistry); + writer.writeEndArray(); + if (unit != null) { + writer.writeString("unit", unit.value()); + } + writer.writeEndDocument(); + return writer.getDocument(); + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + SimpleWindow that = (SimpleWindow) o; + return type.equals(that.type) && lower.equals(that.lower) && upper.equals(that.upper) && unit == that.unit; + } + + @Override + public int hashCode() { + return Objects.hash(type, lower, upper, unit); + } + + @Override + public String toString() { + return "Window{" + + "type=" + type + + ", lower=" + lower + + ", upper=" + upper + + ", unit=" + unit + + '}'; + } + } + + private static final class BsonWindow implements Window { + private final Bson wrapped; + + BsonWindow(final Bson document) { + wrapped = assertNotNull(document); + } + + @Override + public BsonDocument toBsonDocument(final Class tDocumentClass, final CodecRegistry codecRegistry) { + return wrapped.toBsonDocument(tDocumentClass, codecRegistry); + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + BsonWindow that = (BsonWindow) o; + return wrapped.equals(that.wrapped); + } + + @Override + public int hashCode() { + return Objects.hash(wrapped); + } + + @Override + public String toString() { + return wrapped.toString(); + } + } +} diff --git a/driver-core/src/main/com/mongodb/client/model/WriteModel.java b/driver-core/src/main/com/mongodb/client/model/WriteModel.java new file mode 100644 index 00000000000..c0fb2b6bae9 --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/WriteModel.java @@ -0,0 +1,28 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model; + +/** + * A base class for models that can be used in a bulk write operations. + * + * @param the document type for storage + * @since 3.0 + */ +public abstract class WriteModel { + WriteModel() { + } +} diff --git a/driver-core/src/main/com/mongodb/client/model/bulk/BaseClientDeleteOptions.java b/driver-core/src/main/com/mongodb/client/model/bulk/BaseClientDeleteOptions.java new file mode 100644 index 00000000000..8c0a74406ef --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/bulk/BaseClientDeleteOptions.java @@ -0,0 +1,23 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model.bulk; + +/** + * The methods declared in this interface are part of the public API of subclasses or sub-interfaces. + */ +interface BaseClientDeleteOptions extends BaseClientWriteModelOptions { +} diff --git a/driver-core/src/main/com/mongodb/client/model/bulk/BaseClientUpdateOptions.java b/driver-core/src/main/com/mongodb/client/model/bulk/BaseClientUpdateOptions.java new file mode 100644 index 00000000000..10b97e2f570 --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/bulk/BaseClientUpdateOptions.java @@ -0,0 +1,27 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.client.model.bulk; + +import com.mongodb.lang.Nullable; +import org.bson.conversions.Bson; + +/** + * The methods declared in this interface are part of the public API of subclasses or sub-interfaces. + */ +interface BaseClientUpdateOptions extends BaseClientWriteModelOptions, BaseClientUpsertableWriteModelOptions { + + BaseClientUpdateOptions arrayFilters(@Nullable Iterable arrayFilters); +} diff --git a/driver-core/src/main/com/mongodb/client/model/bulk/BaseClientUpsertableWriteModelOptions.java b/driver-core/src/main/com/mongodb/client/model/bulk/BaseClientUpsertableWriteModelOptions.java new file mode 100644 index 00000000000..d26a96e1ba5 --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/bulk/BaseClientUpsertableWriteModelOptions.java @@ -0,0 +1,26 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model.bulk; + +import com.mongodb.lang.Nullable; + +/** + * The methods declared in this interface are part of the public API of subclasses or sub-interfaces. + */ +interface BaseClientUpsertableWriteModelOptions { + BaseClientUpsertableWriteModelOptions upsert(@Nullable Boolean upsert); +} diff --git a/driver-core/src/main/com/mongodb/client/model/bulk/BaseClientWriteModelOptions.java b/driver-core/src/main/com/mongodb/client/model/bulk/BaseClientWriteModelOptions.java new file mode 100644 index 00000000000..f7cd4e7a491 --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/bulk/BaseClientWriteModelOptions.java @@ -0,0 +1,32 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model.bulk; + +import com.mongodb.client.model.Collation; +import com.mongodb.lang.Nullable; +import org.bson.conversions.Bson; + +/** + * The methods declared in this interface are part of the public API of subclasses or sub-interfaces. + */ +interface BaseClientWriteModelOptions { + BaseClientWriteModelOptions collation(@Nullable Collation collation); + + BaseClientWriteModelOptions hint(@Nullable Bson hint); + + BaseClientWriteModelOptions hintString(@Nullable String hintString); +} diff --git a/driver-core/src/main/com/mongodb/client/model/bulk/ClientBulkWriteOptions.java b/driver-core/src/main/com/mongodb/client/model/bulk/ClientBulkWriteOptions.java new file mode 100644 index 00000000000..942a37c43df --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/bulk/ClientBulkWriteOptions.java @@ -0,0 +1,90 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.client.model.bulk; + +import com.mongodb.annotations.Sealed; +import com.mongodb.client.model.Filters; +import com.mongodb.internal.client.model.bulk.ConcreteClientBulkWriteOptions; +import com.mongodb.lang.Nullable; +import org.bson.BsonValue; +import org.bson.conversions.Bson; + +/** + * The options to apply when executing a client-level bulk write operation. + * + * @since 5.3 + */ +@Sealed +public interface ClientBulkWriteOptions { + /** + * Creates the default options. + * + * @return The default options. + */ + static ClientBulkWriteOptions clientBulkWriteOptions() { + return new ConcreteClientBulkWriteOptions(); + } + + /** + * Enables or disables ordered execution of {@linkplain ClientNamespacedWriteModel individual write operations}. + * In an ordered execution a failure of an individual operation prevents the rest of them + * from being executed. + * In an unordered execution failures of individual operations do not prevent the rest of them + * from being executed. + * + * @param ordered The ordered flag. If {@code null}, the client defaults to {@code true}. + * @return {@code this}. + */ + ClientBulkWriteOptions ordered(@Nullable Boolean ordered); + + /** + * Disables or enables checking against document validation rules, a.k.a., schema validation. + * + * @param bypassDocumentValidation The flag specifying whether to bypass the document validation rules. + * {@code null} represents the server default. + * @return {@code this}. + */ + ClientBulkWriteOptions bypassDocumentValidation(@Nullable Boolean bypassDocumentValidation); + + /** + * Sets variables that can be referenced from {@linkplain ClientNamespacedWriteModel individual write operations} + * with the {@code "$$"} syntax, which in turn requires using {@link Filters#expr(Object)} when specifying filters. + * Values must be constants or expressions that do not reference fields. + * + * @param let The variables. {@code null} represents the server default. + * @return {@code this}. + * @mongodb.driver.manual reference/aggregation-variables/ Variables in Aggregation Expressions + */ + ClientBulkWriteOptions let(@Nullable Bson let); + + /** + * Sets the comment to attach to the {@code bulkWrite} administration command. + * + * @param comment The comment. {@code null} represents the server default. + * @return {@code this}. + */ + ClientBulkWriteOptions comment(@Nullable BsonValue comment); + + /** + * Enables or disables requesting {@linkplain ClientBulkWriteResult#getVerboseResults() verbose results}. + * + * @param verboseResults The flag specifying whether to request verbose results. + * If {@code null}, the client defaults to {@code false}. + * This value corresponds inversely to the {@code errorsOnly} field of the {@code bulkWrite} administration command. + * @return {@code this}. + */ + ClientBulkWriteOptions verboseResults(@Nullable Boolean verboseResults); +} diff --git a/driver-core/src/main/com/mongodb/client/model/bulk/ClientBulkWriteResult.java b/driver-core/src/main/com/mongodb/client/model/bulk/ClientBulkWriteResult.java new file mode 100644 index 00000000000..04257cb8460 --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/bulk/ClientBulkWriteResult.java @@ -0,0 +1,136 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.client.model.bulk; + +import com.mongodb.ClientBulkWriteException; +import com.mongodb.WriteConcern; +import com.mongodb.annotations.Evolving; + +import java.util.Map; +import java.util.Optional; + +/** + * The result of a successful or partially successful client-level bulk write operation. + * Note that if a client-level bulk write operation fails while some of the + * {@linkplain ClientNamespacedWriteModel individual write operations} are known to be successful, + * then the successful partial result is still accessible via {@link ClientBulkWriteException#getPartialResult()}. + * + * @see ClientBulkWriteException + * @since 5.3 + */ +@Evolving +public interface ClientBulkWriteResult { + /** + * Indicates whether this result was {@linkplain WriteConcern#isAcknowledged() acknowledged}. + * If not, then all other methods throw {@link UnsupportedOperationException}. + * + * @return Whether this result was acknowledged. + */ + boolean isAcknowledged(); + + /** + * The number of documents that were inserted across all insert operations. + * + * @return The number of documents that were inserted. + * @throws UnsupportedOperationException If this result is not {@linkplain #isAcknowledged() acknowledged}. + */ + long getInsertedCount(); + + /** + * The number of documents that were upserted across all update and replace operations. + * + * @return The number of documents that were upserted. + * @throws UnsupportedOperationException If this result is not {@linkplain #isAcknowledged() acknowledged}. + */ + long getUpsertedCount(); + + /** + * The number of documents that matched the filters across all operations with filters. + * + * @return The number of documents that were matched. + * @throws UnsupportedOperationException If this result is not {@linkplain #isAcknowledged() acknowledged}. + */ + long getMatchedCount(); + + /** + * The number of documents that were modified across all update and replace operations. + * + * @return The number of documents that were modified. + * @throws UnsupportedOperationException If this result is not {@linkplain #isAcknowledged() acknowledged}. + */ + long getModifiedCount(); + + /** + * The number of documents that were deleted across all delete operations. + * + * @return The number of documents that were deleted. + * @throws UnsupportedOperationException If this result is not {@linkplain #isAcknowledged() acknowledged}. + */ + long getDeletedCount(); + + /** + * The verbose results of individual operations. + * + * @return {@link Optional} verbose results of individual operations. + * @throws UnsupportedOperationException If this result is not {@linkplain #isAcknowledged() acknowledged}. + * @see ClientBulkWriteOptions#verboseResults(Boolean) + */ + Optional getVerboseResults(); + + /** + * The {@linkplain ClientBulkWriteResult#getVerboseResults() verbose results} of individual operations. + * + * @since 5.3 + */ + @Evolving + interface VerboseResults { + /** + * The indexed {@link ClientInsertOneResult}s. + * The {@linkplain Map#keySet() keys} are the indexes of the corresponding {@link ClientNamespacedWriteModel}s + * in the client-level bulk write operation. + *

+ * There are no guarantees on mutability or iteration order of the {@link Map} returned.

+ * + * @return The indexed {@link ClientInsertOneResult}s. + * @see ClientBulkWriteException#getWriteErrors() + */ + Map getInsertResults(); + + /** + * The indexed {@link ClientUpdateResult}s. + * The {@linkplain Map#keySet() keys} are the indexes of the corresponding {@link ClientNamespacedWriteModel}s + * in the client-level bulk write operation. + *

+ * There are no guarantees on mutability or iteration order of the {@link Map} returned.

+ * + * @return The indexed {@link ClientUpdateResult}s. + * @see ClientBulkWriteException#getWriteErrors() + */ + Map getUpdateResults(); + + /** + * The indexed {@link ClientDeleteResult}s. + * The {@linkplain Map#keySet() keys} are the indexes of the corresponding {@link ClientNamespacedWriteModel}s + * in the client-level bulk write operation. + *

+ * There are no guarantees on mutability or iteration order of the {@link Map} returned.

+ * + * @return The indexed {@link ClientDeleteResult}s. + * @see ClientBulkWriteException#getWriteErrors() + */ + Map getDeleteResults(); + } +} diff --git a/driver-core/src/main/com/mongodb/client/model/bulk/ClientDeleteManyOptions.java b/driver-core/src/main/com/mongodb/client/model/bulk/ClientDeleteManyOptions.java new file mode 100644 index 00000000000..f899c5244c3 --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/bulk/ClientDeleteManyOptions.java @@ -0,0 +1,68 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.client.model.bulk; + +import com.mongodb.annotations.Sealed; +import com.mongodb.client.model.Collation; +import com.mongodb.internal.client.model.bulk.ConcreteClientDeleteManyOptions; +import com.mongodb.lang.Nullable; +import org.bson.conversions.Bson; + +/** + * The options to apply when deleting documents. + * + * @since 5.3 + */ +@Sealed +public interface ClientDeleteManyOptions extends BaseClientDeleteOptions { + /** + * Creates the default options. + * + * @return The default options. + */ + static ClientDeleteManyOptions clientDeleteManyOptions() { + return new ConcreteClientDeleteManyOptions(); + } + + /** + * Sets the collation. + * + * @param collation The collation. {@code null} represents the server default. + * @return {@code this}. + */ + @Override + ClientDeleteManyOptions collation(@Nullable Collation collation); + + /** + * Sets the index specification, + * {@code null}-ifies {@linkplain #hintString(String) hint string}. + * + * @param hint The index specification. {@code null} represents the server default. + * @return {@code this}. + */ + @Override + ClientDeleteManyOptions hint(@Nullable Bson hint); + + /** + * Sets the index name, + * {@code null}-ifies {@linkplain #hint(Bson) hint}. + * + * @param hintString The index name. {@code null} represents the server default. + * @return {@code this}. + */ + @Override + ClientDeleteManyOptions hintString(@Nullable String hintString); +} diff --git a/driver-core/src/main/com/mongodb/client/model/bulk/ClientDeleteOneOptions.java b/driver-core/src/main/com/mongodb/client/model/bulk/ClientDeleteOneOptions.java new file mode 100644 index 00000000000..0c515c7960b --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/bulk/ClientDeleteOneOptions.java @@ -0,0 +1,68 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.client.model.bulk; + +import com.mongodb.annotations.Sealed; +import com.mongodb.client.model.Collation; +import com.mongodb.internal.client.model.bulk.ConcreteClientDeleteOneOptions; +import com.mongodb.lang.Nullable; +import org.bson.conversions.Bson; + +/** + * The options to apply when deleting a document. + * + * @since 5.3 + */ +@Sealed +public interface ClientDeleteOneOptions extends BaseClientDeleteOptions { + /** + * Creates the default options. + * + * @return The default options. + */ + static ClientDeleteOneOptions clientDeleteOneOptions() { + return new ConcreteClientDeleteOneOptions(); + } + + /** + * Sets the collation. + * + * @param collation The collation. {@code null} represents the server default. + * @return {@code this}. + */ + @Override + ClientDeleteOneOptions collation(@Nullable Collation collation); + + /** + * Sets the index specification, + * {@code null}-ifies {@linkplain #hintString(String) hint string}. + * + * @param hint The index specification. {@code null} represents the server default. + * @return {@code this}. + */ + @Override + ClientDeleteOneOptions hint(@Nullable Bson hint); + + /** + * Sets the index name, + * {@code null}-ifies {@linkplain #hint(Bson) hint}. + * + * @param hintString The index name. {@code null} represents the server default. + * @return {@code this}. + */ + @Override + ClientDeleteOneOptions hintString(@Nullable String hintString); +} diff --git a/driver-core/src/main/com/mongodb/client/model/bulk/ClientDeleteResult.java b/driver-core/src/main/com/mongodb/client/model/bulk/ClientDeleteResult.java new file mode 100644 index 00000000000..fcf66488114 --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/bulk/ClientDeleteResult.java @@ -0,0 +1,35 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.client.model.bulk; + +import com.mongodb.annotations.Evolving; +import com.mongodb.bulk.WriteConcernError; + +/** + * The result of a successful {@linkplain ClientNamespacedWriteModel individual delete operation}. + * Note that {@link WriteConcernError}s are not considered as making individual operations unsuccessful. + * + * @since 5.3 + */ +@Evolving +public interface ClientDeleteResult { + /** + * The number of documents that were deleted. + * + * @return The number of documents that were deleted. + */ + long getDeletedCount(); +} diff --git a/driver-core/src/main/com/mongodb/client/model/bulk/ClientInsertOneResult.java b/driver-core/src/main/com/mongodb/client/model/bulk/ClientInsertOneResult.java new file mode 100644 index 00000000000..960078c6be5 --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/bulk/ClientInsertOneResult.java @@ -0,0 +1,42 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.client.model.bulk; + +import com.mongodb.annotations.Evolving; +import com.mongodb.bulk.WriteConcernError; +import org.bson.BsonValue; +import org.bson.RawBsonDocument; + +import java.util.Optional; + +/** + * The result of a successful {@linkplain ClientNamespacedWriteModel individual insert one operation}. + * Note that {@link WriteConcernError}s are not considered as making individual operations unsuccessful. + * + * @since 5.3 + */ +@Evolving +public interface ClientInsertOneResult { + /** + * The {@code "_id"} of the inserted document. + * + * @return The {@code "_id"} of the inserted document. + * {@linkplain Optional#isPresent() Present} unless a {@link RawBsonDocument} is inserted, + * because the driver neither generates the missing {@code "_id"} field for a {@link RawBsonDocument}, + * nor does it read the {@code "_id"} field from a {@link RawBsonDocument} when inserting it. + */ + Optional getInsertedId(); +} diff --git a/driver-core/src/main/com/mongodb/client/model/bulk/ClientNamespacedDeleteManyModel.java b/driver-core/src/main/com/mongodb/client/model/bulk/ClientNamespacedDeleteManyModel.java new file mode 100644 index 00000000000..a4e445e5e86 --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/bulk/ClientNamespacedDeleteManyModel.java @@ -0,0 +1,28 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model.bulk; + +import com.mongodb.annotations.Sealed; + +/** + * A model for deleting all documents matching a filter. + * + * @since 5.3 + */ +@Sealed +public interface ClientNamespacedDeleteManyModel extends ClientNamespacedWriteModel { +} diff --git a/driver-core/src/main/com/mongodb/client/model/bulk/ClientNamespacedDeleteOneModel.java b/driver-core/src/main/com/mongodb/client/model/bulk/ClientNamespacedDeleteOneModel.java new file mode 100644 index 00000000000..0ba508007a6 --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/bulk/ClientNamespacedDeleteOneModel.java @@ -0,0 +1,28 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model.bulk; + +import com.mongodb.annotations.Sealed; + +/** + * A model for deleting at most one document matching a filter. + * + * @since 5.3 + */ +@Sealed +public interface ClientNamespacedDeleteOneModel extends ClientNamespacedWriteModel { +} diff --git a/driver-core/src/main/com/mongodb/client/model/bulk/ClientNamespacedInsertOneModel.java b/driver-core/src/main/com/mongodb/client/model/bulk/ClientNamespacedInsertOneModel.java new file mode 100644 index 00000000000..66d9f39c74d --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/bulk/ClientNamespacedInsertOneModel.java @@ -0,0 +1,28 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model.bulk; + +import com.mongodb.annotations.Sealed; + +/** + * A model for inserting a document. + * + * @since 5.3 + */ +@Sealed +public interface ClientNamespacedInsertOneModel extends ClientNamespacedWriteModel { +} diff --git a/driver-core/src/main/com/mongodb/client/model/bulk/ClientNamespacedReplaceOneModel.java b/driver-core/src/main/com/mongodb/client/model/bulk/ClientNamespacedReplaceOneModel.java new file mode 100644 index 00000000000..a4edf9b716a --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/bulk/ClientNamespacedReplaceOneModel.java @@ -0,0 +1,28 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model.bulk; + +import com.mongodb.annotations.Sealed; + +/** + * A model for replacing at most one document matching a filter. + * + * @since 5.3 + */ +@Sealed +public interface ClientNamespacedReplaceOneModel extends ClientNamespacedWriteModel { +} diff --git a/driver-core/src/main/com/mongodb/client/model/bulk/ClientNamespacedUpdateManyModel.java b/driver-core/src/main/com/mongodb/client/model/bulk/ClientNamespacedUpdateManyModel.java new file mode 100644 index 00000000000..3900c8779f7 --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/bulk/ClientNamespacedUpdateManyModel.java @@ -0,0 +1,28 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model.bulk; + +import com.mongodb.annotations.Sealed; + +/** + * A model for updating all documents matching a filter. + * + * @since 5.3 + */ +@Sealed +public interface ClientNamespacedUpdateManyModel extends ClientNamespacedWriteModel { +} diff --git a/driver-core/src/main/com/mongodb/client/model/bulk/ClientNamespacedUpdateOneModel.java b/driver-core/src/main/com/mongodb/client/model/bulk/ClientNamespacedUpdateOneModel.java new file mode 100644 index 00000000000..3d9e785004f --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/bulk/ClientNamespacedUpdateOneModel.java @@ -0,0 +1,28 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model.bulk; + +import com.mongodb.annotations.Sealed; + +/** + * A model for updating at most one document matching a filter. + * + * @since 5.3 + */ +@Sealed +public interface ClientNamespacedUpdateOneModel extends ClientNamespacedWriteModel { +} diff --git a/driver-core/src/main/com/mongodb/client/model/bulk/ClientNamespacedWriteModel.java b/driver-core/src/main/com/mongodb/client/model/bulk/ClientNamespacedWriteModel.java new file mode 100644 index 00000000000..3673c35a9de --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/bulk/ClientNamespacedWriteModel.java @@ -0,0 +1,325 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model.bulk; + +import com.mongodb.MongoNamespace; +import com.mongodb.annotations.Sealed; +import com.mongodb.client.model.Aggregates; +import com.mongodb.client.model.Filters; +import com.mongodb.client.model.Updates; +import com.mongodb.internal.client.model.bulk.ConcreteClientDeleteManyModel; +import com.mongodb.internal.client.model.bulk.ConcreteClientDeleteOneModel; +import com.mongodb.internal.client.model.bulk.ConcreteClientInsertOneModel; +import com.mongodb.internal.client.model.bulk.ConcreteClientNamespacedDeleteManyModel; +import com.mongodb.internal.client.model.bulk.ConcreteClientNamespacedDeleteOneModel; +import com.mongodb.internal.client.model.bulk.ConcreteClientNamespacedInsertOneModel; +import com.mongodb.internal.client.model.bulk.ConcreteClientNamespacedReplaceOneModel; +import com.mongodb.internal.client.model.bulk.ConcreteClientNamespacedUpdateManyModel; +import com.mongodb.internal.client.model.bulk.ConcreteClientNamespacedUpdateOneModel; +import com.mongodb.internal.client.model.bulk.ConcreteClientReplaceOneModel; +import com.mongodb.internal.client.model.bulk.ConcreteClientUpdateManyModel; +import com.mongodb.internal.client.model.bulk.ConcreteClientUpdateOneModel; +import org.bson.Document; +import org.bson.conversions.Bson; + +import static com.mongodb.assertions.Assertions.notNull; + +/** + * A combination of an individual write operation and a {@linkplain MongoNamespace namespace} + * the operation is targeted at. + * + * @since 5.3 + */ +@Sealed +public interface ClientNamespacedWriteModel { + /** + * Creates a model for inserting the {@code document} into the {@code namespace}. + * + * @param namespace The namespace. + * @param document The document. + * @return The requested {@link ClientNamespacedInsertOneModel}. + * @param The document type, for example {@link Document}. + */ + static ClientNamespacedInsertOneModel insertOne(final MongoNamespace namespace, final TDocument document) { + notNull("namespace", namespace); + notNull("document", document); + return new ConcreteClientNamespacedInsertOneModel(namespace, new ConcreteClientInsertOneModel(document)); + } + + /** + * Creates a model for updating at most one document in the {@code namespace} matching the {@code filter}. + * This method is functionally equivalent to {@link #updateOne(MongoNamespace, Bson, Bson, ClientUpdateOneOptions)} + * with the {@linkplain ClientUpdateOneOptions#clientUpdateOneOptions() default options}. + * + * @param namespace The namespace. + * @param filter The filter. + * @param update The update. + * @return The requested {@link ClientNamespacedUpdateOneModel}. + * @see Filters + * @see Updates + */ + static ClientNamespacedUpdateOneModel updateOne(final MongoNamespace namespace, final Bson filter, final Bson update) { + notNull("namespace", namespace); + notNull("filter", filter); + notNull("update", update); + return new ConcreteClientNamespacedUpdateOneModel(namespace, new ConcreteClientUpdateOneModel(filter, update, null, null)); + } + + /** + * Creates a model for updating at most one document in the {@code namespace} matching the {@code filter}. + * + * @param namespace The namespace. + * @param filter The filter. + * @param update The update. + * @param options The options. + * @return The requested {@link ClientNamespacedUpdateOneModel}. + * @see Filters + * @see Updates + */ + static ClientNamespacedUpdateOneModel updateOne( + final MongoNamespace namespace, final Bson filter, final Bson update, final ClientUpdateOneOptions options) { + notNull("namespace", namespace); + notNull("filter", filter); + notNull("update", update); + notNull("options", options); + return new ConcreteClientNamespacedUpdateOneModel(namespace, new ConcreteClientUpdateOneModel(filter, update, null, options)); + } + + /** + * Creates a model for updating at most one document in the {@code namespace} matching the {@code filter}. + * This method is functionally equivalent to {@link #updateOne(MongoNamespace, Bson, Iterable, ClientUpdateOneOptions)} + * with the {@linkplain ClientUpdateOneOptions#clientUpdateOneOptions() default options}. + * + * @param namespace The namespace. + * @param filter The filter. + * @param updatePipeline The update pipeline. + * @return The requested {@link ClientNamespacedUpdateOneModel}. + * @see Filters + * @see Aggregates + */ + static ClientNamespacedUpdateOneModel updateOne( + final MongoNamespace namespace, final Bson filter, final Iterable updatePipeline) { + notNull("namespace", namespace); + notNull("filter", filter); + notNull("updatePipeline", updatePipeline); + return new ConcreteClientNamespacedUpdateOneModel(namespace, new ConcreteClientUpdateOneModel(filter, null, updatePipeline, null)); + } + + /** + * Creates a model for updating at most one document in the {@code namespace} matching the {@code filter}. + * + * @param namespace The namespace. + * @param filter The filter. + * @param updatePipeline The update pipeline. + * @param options The options. + * @return The requested {@link ClientNamespacedUpdateOneModel}. + * @see Filters + * @see Aggregates + */ + static ClientNamespacedUpdateOneModel updateOne( + final MongoNamespace namespace, final Bson filter, final Iterable updatePipeline, final ClientUpdateOneOptions options) { + notNull("namespace", namespace); + notNull("filter", filter); + notNull("updatePipeline", updatePipeline); + notNull("options", options); + return new ConcreteClientNamespacedUpdateOneModel(namespace, new ConcreteClientUpdateOneModel(filter, null, updatePipeline, options)); + } + + /** + * Creates a model for updating all documents in the {@code namespace} matching the {@code filter}. + * This method is functionally equivalent to {@link #updateMany(MongoNamespace, Bson, Bson, ClientUpdateManyOptions)} + * with the {@linkplain ClientUpdateManyOptions#clientUpdateManyOptions() default}. + * + * @param namespace The namespace. + * @param filter The filter. + * @param update The update. + * @return The requested {@link ClientNamespacedUpdateManyModel}. + * @see Filters + * @see Updates + */ + static ClientNamespacedUpdateManyModel updateMany(final MongoNamespace namespace, final Bson filter, final Bson update) { + notNull("namespace", namespace); + notNull("filter", filter); + notNull("update", update); + return new ConcreteClientNamespacedUpdateManyModel(namespace, new ConcreteClientUpdateManyModel(filter, update, null, null)); + } + + /** + * Creates a model for updating all documents in the {@code namespace} matching the {@code filter}. + * + * @param namespace The namespace. + * @param filter The filter. + * @param update The update. + * @param options The options. + * @return The requested {@link ClientNamespacedUpdateManyModel}. + * @see Filters + * @see Updates + */ + static ClientNamespacedUpdateManyModel updateMany( + final MongoNamespace namespace, final Bson filter, final Bson update, final ClientUpdateManyOptions options) { + notNull("namespace", namespace); + notNull("filter", filter); + notNull("update", update); + notNull("options", options); + return new ConcreteClientNamespacedUpdateManyModel(namespace, new ConcreteClientUpdateManyModel(filter, update, null, options)); + } + + /** + * Creates a model for updating all documents in the {@code namespace} matching the {@code filter}. + * This method is functionally equivalent to {@link #updateMany(MongoNamespace, Bson, Iterable, ClientUpdateManyOptions)} + * with the {@linkplain ClientUpdateManyOptions#clientUpdateManyOptions() default options}. + * + * @param namespace The namespace. + * @param filter The filter. + * @param updatePipeline The update pipeline. + * @return The requested {@link ClientNamespacedUpdateManyModel}. + * @see Filters + * @see Aggregates + */ + static ClientNamespacedUpdateManyModel updateMany( + final MongoNamespace namespace, final Bson filter, final Iterable updatePipeline) { + notNull("namespace", namespace); + notNull("filter", filter); + notNull("updatePipeline", updatePipeline); + return new ConcreteClientNamespacedUpdateManyModel(namespace, new ConcreteClientUpdateManyModel(filter, null, updatePipeline, null)); + } + + /** + * Creates a model for updating all documents in the {@code namespace} matching the {@code filter}. + * + * @param namespace The namespace. + * @param filter The filter. + * @param updatePipeline The update pipeline. + * @param options The options. + * @return The requested {@link ClientNamespacedUpdateManyModel}. + * @see Filters + * @see Aggregates + */ + static ClientNamespacedUpdateManyModel updateMany( + final MongoNamespace namespace, final Bson filter, final Iterable updatePipeline, final ClientUpdateManyOptions options) { + notNull("namespace", namespace); + notNull("filter", filter); + notNull("updatePipeline", updatePipeline); + notNull("options", options); + return new ConcreteClientNamespacedUpdateManyModel(namespace, new ConcreteClientUpdateManyModel(filter, null, updatePipeline, options)); + } + + /** + * Creates a model for replacing at most one document in the {@code namespace} matching the {@code filter}. + * This method is functionally equivalent to {@link #replaceOne(MongoNamespace, Bson, Object, ClientReplaceOneOptions)} + * with the {@linkplain ClientReplaceOneOptions#clientReplaceOneOptions() default options}. + * + * @param namespace The namespace. + * @param filter The filter. + * @param replacement The replacement. + * The keys of this document must not start with {@code $}, unless they express a {@linkplain com.mongodb.DBRef database reference}. + * @return The requested {@link ClientNamespacedReplaceOneModel}. + * @param The document type, for example {@link Document}. + * @see Filters + */ + static ClientNamespacedReplaceOneModel replaceOne(final MongoNamespace namespace, final Bson filter, final TDocument replacement) { + notNull("namespace", namespace); + notNull("filter", filter); + notNull("replacement", replacement); + return new ConcreteClientNamespacedReplaceOneModel(namespace, new ConcreteClientReplaceOneModel(filter, replacement, null)); + } + + /** + * Creates a model for replacing at most one document in the {@code namespace} matching the {@code filter}. + * + * @param namespace The namespace. + * @param filter The filter. + * @param replacement The replacement. + * The keys of this document must not start with {@code $}, unless they express a {@linkplain com.mongodb.DBRef database reference}. + * @param options The options. + * @return The requested {@link ClientNamespacedReplaceOneModel}. + * @param The document type, for example {@link Document}. + * @see Filters + */ + static ClientNamespacedReplaceOneModel replaceOne( + final MongoNamespace namespace, final Bson filter, final TDocument replacement, final ClientReplaceOneOptions options) { + notNull("namespace", namespace); + notNull("filter", filter); + notNull("replacement", replacement); + notNull("options", options); + return new ConcreteClientNamespacedReplaceOneModel(namespace, new ConcreteClientReplaceOneModel(filter, replacement, options)); + } + + /** + * Creates a model for deleting at most one document from the {@code namespace} matching the {@code filter}. + * This method is functionally equivalent to {@link #deleteOne(MongoNamespace, Bson, ClientDeleteOneOptions)} + * with the {@linkplain ClientDeleteOneOptions#clientDeleteOneOptions() default options}. + * + * @param namespace The namespace. + * @param filter The filter. + * @return The requested {@link ClientNamespacedDeleteOneModel}. + * @see Filters + */ + static ClientNamespacedDeleteOneModel deleteOne(final MongoNamespace namespace, final Bson filter) { + notNull("namespace", namespace); + notNull("filter", filter); + return new ConcreteClientNamespacedDeleteOneModel(namespace, new ConcreteClientDeleteOneModel(filter, null)); + } + + /** + * Creates a model for deleting at most one document from the {@code namespace} matching the {@code filter}. + * + * @param namespace The namespace. + * @param filter The filter. + * @param options The options. + * @return The requested {@link ClientNamespacedDeleteOneModel}. + * @see Filters + */ + static ClientNamespacedDeleteOneModel deleteOne(final MongoNamespace namespace, final Bson filter, final ClientDeleteOneOptions options) { + notNull("namespace", namespace); + notNull("filter", filter); + notNull("options", options); + return new ConcreteClientNamespacedDeleteOneModel(namespace, new ConcreteClientDeleteOneModel(filter, options)); + } + + /** + * Creates a model for deleting all documents from the {@code namespace} matching the {@code filter}. + * This method is functionally equivalent to {@link #deleteMany(MongoNamespace, Bson, ClientDeleteManyOptions)} + * with the {@linkplain ClientDeleteManyOptions#clientDeleteManyOptions() default options}. + * + * @param namespace The namespace. + * @param filter The filter. + * @return The requested {@link ClientNamespacedDeleteManyModel}. + * @see Filters + */ + static ClientNamespacedDeleteManyModel deleteMany(final MongoNamespace namespace, final Bson filter) { + notNull("namespace", namespace); + notNull("filter", filter); + return new ConcreteClientNamespacedDeleteManyModel(namespace, new ConcreteClientDeleteManyModel(filter, null)); + } + + /** + * Creates a model for deleting all documents from the {@code namespace} matching the {@code filter}. + * + * @param namespace The namespace. + * @param filter The filter. + * @param options The options. + * @return The requested {@link ClientNamespacedDeleteManyModel}. + * @see Filters + */ + static ClientNamespacedDeleteManyModel deleteMany(final MongoNamespace namespace, final Bson filter, final ClientDeleteManyOptions options) { + notNull("namespace", namespace); + notNull("filter", filter); + notNull("options", options); + return new ConcreteClientNamespacedDeleteManyModel(namespace, new ConcreteClientDeleteManyModel(filter, options)); + } +} diff --git a/driver-core/src/main/com/mongodb/client/model/bulk/ClientReplaceOneOptions.java b/driver-core/src/main/com/mongodb/client/model/bulk/ClientReplaceOneOptions.java new file mode 100644 index 00000000000..4de01a94843 --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/bulk/ClientReplaceOneOptions.java @@ -0,0 +1,92 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.client.model.bulk; + +import com.mongodb.annotations.Sealed; +import com.mongodb.client.model.Collation; +import com.mongodb.internal.client.model.bulk.ConcreteClientReplaceOneOptions; +import com.mongodb.lang.Nullable; +import org.bson.conversions.Bson; + +/** + * The options to apply when replacing a document. + * + * @since 5.3 + */ +@Sealed +public interface ClientReplaceOneOptions extends BaseClientWriteModelOptions, BaseClientUpsertableWriteModelOptions { + /** + * Creates the default options. + * + * @return The default options. + */ + static ClientReplaceOneOptions clientReplaceOneOptions() { + return new ConcreteClientReplaceOneOptions(); + } + + /** + * Sets the collation. + * + * @param collation The collation. {@code null} represents the server default. + * @return {@code this}. + */ + @Override + ClientReplaceOneOptions collation(@Nullable Collation collation); + + /** + * Sets the index specification, + * {@code null}-ifies {@linkplain #hintString(String) hint string}. + * + * @param hint The index specification. {@code null} represents the server default. + * @return {@code this}. + */ + @Override + ClientReplaceOneOptions hint(@Nullable Bson hint); + + /** + * Sets the index name, + * {@code null}-ifies {@linkplain #hint(Bson) hint}. + * + * @param hintString The index name. {@code null} represents the server default. + * @return {@code this}. + */ + @Override + ClientReplaceOneOptions hintString(@Nullable String hintString); + + /** + * Enables or disables creation of a document if no documents match the filter. + * + * @param upsert The upsert flag. {@code null} represents the server default. + * @return {@code this}. + */ + @Override + ClientReplaceOneOptions upsert(@Nullable Boolean upsert); + + /** + * Sets the sort criteria to apply to the operation. A null value means no sort criteria is set. + * + *

+ * The sort criteria determines which document the operation replaces if the query matches multiple documents. + * The first document matched by the specified sort criteria will be replaced. + * + * @param sort The sort criteria. {@code null} represents the server default. + * @return this + * @mongodb.driver.manual reference/method/db.collection.replaceOne/ Sort + * @mongodb.server.release 8.0 + * @since 5.4 + */ + ClientReplaceOneOptions sort(@Nullable Bson sort); +} diff --git a/driver-core/src/main/com/mongodb/client/model/bulk/ClientUpdateManyOptions.java b/driver-core/src/main/com/mongodb/client/model/bulk/ClientUpdateManyOptions.java new file mode 100644 index 00000000000..fd0b0d12f08 --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/bulk/ClientUpdateManyOptions.java @@ -0,0 +1,88 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.client.model.bulk; + +import com.mongodb.annotations.Sealed; +import com.mongodb.client.model.Collation; +import com.mongodb.client.model.Filters; +import com.mongodb.internal.client.model.bulk.ConcreteClientUpdateManyOptions; +import com.mongodb.lang.Nullable; +import org.bson.conversions.Bson; + +/** + * The options to apply when updating documents. + * + * @since 5.3 + */ +@Sealed +public interface ClientUpdateManyOptions extends BaseClientUpdateOptions { + /** + * Creates the default options. + * + * @return The default options. + */ + static ClientUpdateManyOptions clientUpdateManyOptions() { + return new ConcreteClientUpdateManyOptions(); + } + + /** + * Sets the filters specifying to which array elements an update should apply. + * + * @param arrayFilters The array filters. {@code null} represents the server default. + * @return {@code this}. + * @see Filters + */ + @Override + ClientUpdateManyOptions arrayFilters(@Nullable Iterable arrayFilters); + + /** + * Sets the collation. + * + * @param collation The collation. {@code null} represents the server default. + * @return {@code this}. + */ + @Override + ClientUpdateManyOptions collation(@Nullable Collation collation); + + /** + * Sets the index specification, + * {@code null}-ifies {@linkplain #hintString(String) hint string}. + * + * @param hint The index specification. {@code null} represents the server default. + * @return {@code this}. + */ + @Override + ClientUpdateManyOptions hint(@Nullable Bson hint); + + /** + * Sets the index name, + * {@code null}-ifies {@linkplain #hint(Bson) hint}. + * + * @param hintString The index name. {@code null} represents the server default. + * @return {@code this}. + */ + @Override + ClientUpdateManyOptions hintString(@Nullable String hintString); + + /** + * Enables or disables creation of a document if no documents match the filter. + * + * @param upsert The upsert flag. {@code null} represents the server default. + * @return {@code this}. + */ + @Override + ClientUpdateManyOptions upsert(@Nullable Boolean upsert); +} diff --git a/driver-core/src/main/com/mongodb/client/model/bulk/ClientUpdateOneOptions.java b/driver-core/src/main/com/mongodb/client/model/bulk/ClientUpdateOneOptions.java new file mode 100644 index 00000000000..c5abea43b2a --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/bulk/ClientUpdateOneOptions.java @@ -0,0 +1,103 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.client.model.bulk; + +import com.mongodb.annotations.Sealed; +import com.mongodb.client.model.Collation; +import com.mongodb.client.model.Filters; +import com.mongodb.internal.client.model.bulk.ConcreteClientUpdateOneOptions; +import com.mongodb.lang.Nullable; +import org.bson.conversions.Bson; + +/** + * The options to apply when updating a document. + * + * @since 5.3 + */ +@Sealed +public interface ClientUpdateOneOptions extends BaseClientUpdateOptions { + /** + * Creates the default options. + * + * @return The default options. + */ + static ClientUpdateOneOptions clientUpdateOneOptions() { + return new ConcreteClientUpdateOneOptions(); + } + + /** + * Sets the filters specifying to which array elements an update should apply. + * + * @param arrayFilters The array filters. {@code null} represents the server default. + * @return {@code this}. + * @see Filters + */ + @Override + ClientUpdateOneOptions arrayFilters(@Nullable Iterable arrayFilters); + + /** + * Sets the collation. + * + * @param collation The collation. {@code null} represents the server default. + * @return {@code this}. + */ + @Override + ClientUpdateOneOptions collation(@Nullable Collation collation); + + /** + * Sets the index specification, + * {@code null}-ifies {@linkplain #hintString(String) hint string}. + * + * @param hint The index specification. {@code null} represents the server default. + * @return {@code this}. + */ + @Override + ClientUpdateOneOptions hint(@Nullable Bson hint); + + /** + * Sets the index name, + * {@code null}-ifies {@linkplain #hint(Bson) hint}. + * + * @param hintString The index name. {@code null} represents the server default. + * @return {@code this}. + */ + @Override + ClientUpdateOneOptions hintString(@Nullable String hintString); + + /** + * Enables or disables creation of a document if no documents match the filter. + * + * @param upsert The upsert flag. {@code null} represents the server default. + * @return {@code this}. + */ + @Override + ClientUpdateOneOptions upsert(@Nullable Boolean upsert); + + /** + * Sets the sort criteria to apply to the operation. A null value means no sort criteria is set. + * + *

+ * The sort criteria determines which document the operation updates if the query matches multiple documents. + * The first document matched by the specified sort criteria will be updated. + * + * @param sort The sort criteria. {@code null} represents the server default. + * @return this + * @mongodb.driver.manual reference/method/db.collection.updateOne/ Sort + * @mongodb.server.release 8.0 + * @since 5.4 + */ + ClientUpdateOneOptions sort(@Nullable Bson sort); +} diff --git a/driver-core/src/main/com/mongodb/client/model/bulk/ClientUpdateResult.java b/driver-core/src/main/com/mongodb/client/model/bulk/ClientUpdateResult.java new file mode 100644 index 00000000000..c667db97c9e --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/bulk/ClientUpdateResult.java @@ -0,0 +1,53 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.client.model.bulk; + +import com.mongodb.annotations.Evolving; +import com.mongodb.bulk.WriteConcernError; +import org.bson.BsonValue; + +import java.util.Optional; + +/** + * The result of a successful {@linkplain ClientNamespacedWriteModel individual update or replace operation}. + * Note that {@link WriteConcernError}s are not considered as making individual operations unsuccessful. + * + * @since 5.3 + */ +@Evolving +public interface ClientUpdateResult { + /** + * The number of documents that matched the filter. + * + * @return The number of documents that matched the filter. + */ + long getMatchedCount(); + + /** + * The number of documents that were modified. + * + * @return The number of documents that were modified. + */ + long getModifiedCount(); + + /** + * The {@code "_id"} of the upserted document if and only if an upsert occurred. + * + * @return The {@code "_id"} of the upserted. + * {@linkplain Optional#isPresent() Present} if and only if an upsert occurred. + */ + Optional getUpsertedId(); +} diff --git a/driver-core/src/main/com/mongodb/client/model/bulk/package-info.java b/driver-core/src/main/com/mongodb/client/model/bulk/package-info.java new file mode 100644 index 00000000000..b9cb98f41a7 --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/bulk/package-info.java @@ -0,0 +1,23 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * Models, options, results for the client-level bulk write operation. + */ +@NonNullApi +package com.mongodb.client.model.bulk; + +import com.mongodb.lang.NonNullApi; diff --git a/driver-core/src/main/com/mongodb/client/model/changestream/ChangeStreamDocument.java b/driver-core/src/main/com/mongodb/client/model/changestream/ChangeStreamDocument.java new file mode 100644 index 00000000000..ad71ca794ff --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/changestream/ChangeStreamDocument.java @@ -0,0 +1,527 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model.changestream; + +import com.mongodb.MongoNamespace; +import com.mongodb.lang.Nullable; +import org.bson.BsonDateTime; +import org.bson.BsonDocument; +import org.bson.BsonInt64; +import org.bson.BsonTimestamp; +import org.bson.codecs.Codec; +import org.bson.codecs.configuration.CodecRegistry; +import org.bson.codecs.pojo.annotations.BsonCreator; +import org.bson.codecs.pojo.annotations.BsonExtraElements; +import org.bson.codecs.pojo.annotations.BsonId; +import org.bson.codecs.pojo.annotations.BsonIgnore; +import org.bson.codecs.pojo.annotations.BsonProperty; + +import java.util.Objects; + +/** + * Represents the {@code $changeStream} aggregation output document. + * + *

Note: this class will not be applicable for all change stream outputs. If using custom pipelines that radically change the + * change stream result, then an alternative document format should be used.

+ * + * @param The type that this collection will encode the {@code fullDocument} field into. + * @since 3.6 + */ +public final class ChangeStreamDocument { + + @BsonId() + private final BsonDocument resumeToken; + private final BsonDocument namespaceDocument; + + @BsonProperty("nsType") + private final String namespaceTypeString; + @BsonIgnore + private final NamespaceType namespaceType; + private final BsonDocument destinationNamespaceDocument; + private final TDocument fullDocument; + private final TDocument fullDocumentBeforeChange; + private final BsonDocument documentKey; + private final BsonTimestamp clusterTime; + @BsonProperty("operationType") + private final String operationTypeString; + @BsonIgnore + private final OperationType operationType; + private final UpdateDescription updateDescription; + private final BsonInt64 txnNumber; + private final BsonDocument lsid; + private final BsonDateTime wallTime; + private final SplitEvent splitEvent; + @BsonExtraElements + private final BsonDocument extraElements; + + /** + * Creates a new instance + * + * @param operationType the operation type + * @param resumeToken the resume token + * @param namespaceDocument the BsonDocument representing the namespace + * @param namespaceType the namespace type + * @param destinationNamespaceDocument the BsonDocument representing the destinatation namespace + * @param fullDocument the full document + * @param fullDocumentBeforeChange the full document before change + * @param documentKey a document containing the _id of the changed document + * @param clusterTime the cluster time at which the change occured + * @param updateDescription the update description + * @param txnNumber the transaction number + * @param lsid the identifier for the session associated with the transaction + * @param wallTime the wall time of the server at the moment the change occurred + * @param splitEvent the split event + * @param extraElements any extra elements that are part of the change stream document but not otherwise mapped to fields + * + * @since 4.11 + */ + @BsonCreator + public ChangeStreamDocument( + @Nullable @BsonProperty("operationType") final String operationType, + @BsonProperty("resumeToken") final BsonDocument resumeToken, + @Nullable @BsonProperty("ns") final BsonDocument namespaceDocument, + @Nullable @BsonProperty("nsType") final String namespaceType, + @Nullable @BsonProperty("to") final BsonDocument destinationNamespaceDocument, + @Nullable @BsonProperty("fullDocument") final TDocument fullDocument, + @Nullable @BsonProperty("fullDocumentBeforeChange") final TDocument fullDocumentBeforeChange, + @Nullable @BsonProperty("documentKey") final BsonDocument documentKey, + @Nullable @BsonProperty("clusterTime") final BsonTimestamp clusterTime, + @Nullable @BsonProperty("updateDescription") final UpdateDescription updateDescription, + @Nullable @BsonProperty("txnNumber") final BsonInt64 txnNumber, + @Nullable @BsonProperty("lsid") final BsonDocument lsid, + @Nullable @BsonProperty("wallTime") final BsonDateTime wallTime, + @Nullable @BsonProperty("splitEvent") final SplitEvent splitEvent, + @Nullable @BsonProperty final BsonDocument extraElements) { + this.resumeToken = resumeToken; + this.namespaceDocument = namespaceDocument; + this.namespaceTypeString = namespaceType; + this.namespaceType = namespaceTypeString == null ? null : NamespaceType.fromString(namespaceType); + this.destinationNamespaceDocument = destinationNamespaceDocument; + this.fullDocumentBeforeChange = fullDocumentBeforeChange; + this.documentKey = documentKey; + this.fullDocument = fullDocument; + this.clusterTime = clusterTime; + this.operationTypeString = operationType; + this.operationType = operationTypeString == null ? null : OperationType.fromString(operationTypeString); + this.updateDescription = updateDescription; + this.txnNumber = txnNumber; + this.lsid = lsid; + this.wallTime = wallTime; + this.splitEvent = splitEvent; + this.extraElements = extraElements; + } + + /** + * Returns the resumeToken + * + * @return the resumeToken + */ + public BsonDocument getResumeToken() { + return resumeToken; + } + + /** + * Returns the namespace, derived from the "ns" field in a change stream document. + *

+ * The invalidate operation type does include a MongoNamespace in the ChangeStreamDocument response. The + * dropDatabase operation type includes a MongoNamespace, but does not include a collection name as part + * of the namespace. + * + * @return the namespace. If the namespaceDocument is null or if it is missing either the 'db' or 'coll' keys, + * then this will return null. + * @see #getNamespaceType() + * @see #getNamespaceTypeString() + */ + @BsonIgnore + @Nullable + public MongoNamespace getNamespace() { + if (namespaceDocument == null) { + return null; + } + if (!namespaceDocument.containsKey("db") || !namespaceDocument.containsKey("coll")) { + return null; + } + + return new MongoNamespace(namespaceDocument.getString("db").getValue(), namespaceDocument.getString("coll").getValue()); + } + + /** + * Returns the namespace document, derived from the "ns" field in a change stream document. + *

+ * The namespace document is a BsonDocument containing the values associated with a MongoNamespace. The + * 'db' key refers to the database name and the 'coll' key refers to the collection name. + * + * @return the namespaceDocument + * @since 3.8 + * @see #getNamespaceType() + * @see #getNamespaceTypeString() + */ + @BsonProperty("ns") + @Nullable + public BsonDocument getNamespaceDocument() { + return namespaceDocument; + } + + /** + * Returns the type of the newly created namespace object as a String, derived from the "nsType" field in a change stream document. + *

+ * This method is useful when using a driver release that has not yet been updated to include a newer namespace type in the + * {@link NamespaceType} enum. In that case, {@link #getNamespaceType()} will return {@link NamespaceType#OTHER} and this method can + * be used to retrieve the actual namespace type as a string value. + *

+ * May return null only if $changeStreamSplitLargeEvent is used. + * + * @return the namespace type as a string + * @since 5.6 + * @mongodb.server.release 8.1 + * @see #getNamespaceType() + * @see #getNamespaceDocument() + */ + @Nullable + public String getNamespaceTypeString() { + return namespaceTypeString; + } + + /** + * Returns the type of the newly created namespace object, derived from the "nsType" field in a change stream document. + * + * @return the namespace type. + * @since 5.6 + * @mongodb.server.release 8.1 + * @see #getNamespaceTypeString() + * @see #getNamespaceDocument() + */ + @Nullable + public NamespaceType getNamespaceType() { + return namespaceType; + } + + /** + * Returns the destination namespace, derived from the "to" field in a change stream document. + * + *

+ * The destination namespace is used to indicate the destination of a collection rename event. + *

+ * + * @return the namespace. If the "to" document is null or absent, then this will return null. + * @see OperationType#RENAME + * @since 3.11 + */ + @BsonIgnore + @Nullable + public MongoNamespace getDestinationNamespace() { + if (destinationNamespaceDocument == null) { + return null; + } + + return new MongoNamespace(destinationNamespaceDocument.getString("db").getValue(), + destinationNamespaceDocument.getString("coll").getValue()); + } + + /** + * Returns the destination namespace document, derived from the "to" field in a change stream document. + * + *

+ * The destination namespace document is a BsonDocument containing the values associated with a MongoNamespace. The + * 'db' key refers to the database name and the 'coll' key refers to the collection name. + *

+ * @return the destinationNamespaceDocument + * @since 3.11 + */ + @BsonProperty("to") + @Nullable + public BsonDocument getDestinationNamespaceDocument() { + return destinationNamespaceDocument; + } + + /** + * Returns the database name + * + * @return the databaseName. If the namespaceDocument is null or if it is missing the 'db' key, then this will + * return null. + * @since 3.8 + */ + @BsonIgnore + @Nullable + public String getDatabaseName() { + if (namespaceDocument == null) { + return null; + } + if (!namespaceDocument.containsKey("db")) { + return null; + } + return namespaceDocument.getString("db").getValue(); + } + + /** + * Returns the fullDocument. + * + *

+ * Always present for operations of type {@link OperationType#INSERT} and {@link OperationType#REPLACE}. Also present for operations + * of type {@link OperationType#UPDATE} if the user has specified {@link FullDocument#UPDATE_LOOKUP} for the {@code fullDocument} + * option when creating the change stream. + *

+ * + *

+ * For operations of type {@link OperationType#INSERT} and {@link OperationType#REPLACE}, the value will contain the document being + * inserted or the new version of the document that is replacing the existing document, respectively. + *

+ * + *

+ * For operations of type {@link OperationType#UPDATE}, the value will contain a copy of the full version of the document from some + * point after the update occurred. If the document was deleted since the updated happened, the value may be null. + *

+ * + *

+ * Contains the point-in-time post-image of the modified document if the post-image is available and either + * {@link FullDocument#REQUIRED} or {@link FullDocument#WHEN_AVAILABLE} was specified for the {@code fullDocument} option when + * creating the change stream. A post-image is always available for {@link OperationType#INSERT} and {@link OperationType#REPLACE} + * events. + *

+ * + * @return the fullDocument + */ + @Nullable + public TDocument getFullDocument() { + return fullDocument; + } + + /** + * Returns the fullDocument before change + * + *

+ * Contains the pre-image of the modified or deleted document if the pre-image is available for the change event and either + * {@link FullDocumentBeforeChange#REQUIRED} or {@link FullDocumentBeforeChange#WHEN_AVAILABLE} was specified for the + * {@code fullDocumentBeforeChange} option when creating the change stream. If {@link FullDocumentBeforeChange#WHEN_AVAILABLE} was + * specified but the pre-image is unavailable, the value will be null. + *

+ * + * @return the fulDocument before change + * @since 4.7 + * @mongodb.server.release 6.0 + */ + @Nullable + public TDocument getFullDocumentBeforeChange() { + return fullDocumentBeforeChange; + } + + /** + * Returns a document containing just the _id of the changed document. + *

+ * For unsharded collections this contains a single field, _id, with the + * value of the _id of the document updated. For sharded collections, + * this will contain all the components of the shard key in order, + * followed by the _id if the _id isn’t part of the shard key. + *

+ * + * @return the document key, or null if the event is not associated with a single document (e.g. a collection rename event) + */ + @Nullable + public BsonDocument getDocumentKey() { + return documentKey; + } + + /** + * Gets the cluster time at which the change occurred. + * + * @return the cluster time at which the change occurred + * @since 3.8 + * @mongodb.server.release 4.0 + */ + @Nullable + public BsonTimestamp getClusterTime() { + return clusterTime; + } + + + /** + * Returns the operation type as a string. + *

+ * This method is useful when using a driver release that has not yet been updated to include a newer operation type in the + * {@link OperationType} enum. In that case, {@link #getOperationType()} will return {@link OperationType#OTHER} and this method can + * be used to retrieve the actual operation type as a string value. + *

+ * May return null only if $changeStreamSplitLargeEvent is used. + * + * @return the operation type as a string + * @since 4.6 + * @see #getOperationType() + */ + @Nullable + public String getOperationTypeString() { + return operationTypeString; + } + + /** + * Returns the operationType. + *

+ * May return null only if $changeStreamSplitLargeEvent is used. + * + * @return the operationType + */ + @Nullable + public OperationType getOperationType() { + return operationType; + } + + /** + * Returns the updateDescription + * + * @return the updateDescription, or null if the event is not associated with a single document (e.g. a collection rename event) + */ + @Nullable + public UpdateDescription getUpdateDescription() { + return updateDescription; + } + + /** + * Returns the transaction number + * + * @return the transaction number, or null if not part of a multi-document transaction + * @since 3.11 + * @mongodb.server.release 4.0 + */ + @Nullable + public BsonInt64 getTxnNumber() { + return txnNumber; + } + + /** + * Returns the identifier for the session associated with the transaction + * + * @return the lsid, or null if not part of a multi-document transaction + * @since 3.11 + * @mongodb.server.release 4.0 + */ + @Nullable + public BsonDocument getLsid() { + return lsid; + } + + /** + * The wall time of the server at the moment the change occurred. + * + * @return The wall time of the server at the moment the change occurred. + * @since 4.7 + * @mongodb.server.release 6.0 + */ + @Nullable + public BsonDateTime getWallTime() { + return wallTime; + } + + /** + * The split event. + * + * @return the split event + * @since 4.11 + * @mongodb.server.release 6.0.9 + */ + @Nullable + public SplitEvent getSplitEvent() { + return splitEvent; + } + + /** + * Any extra elements that are part of the change stream document but not otherwise mapped to fields. + * + * @return Any extra elements that are part of the change stream document but not otherwise mapped to fields. + * @since 4.7 + */ + @Nullable + public BsonDocument getExtraElements() { + return extraElements; + } + + /** + * Creates the codec for the specific ChangeStreamOutput type + * + * @param fullDocumentClass the class to use to represent the fullDocument + * @param codecRegistry the codec registry + * @param the fullDocument type + * @return the codec + */ + public static Codec> createCodec(final Class fullDocumentClass, + final CodecRegistry codecRegistry) { + return new ChangeStreamDocumentCodec<>(fullDocumentClass, codecRegistry); + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + ChangeStreamDocument that = (ChangeStreamDocument) o; + return Objects.equals(resumeToken, that.resumeToken) + && Objects.equals(namespaceDocument, that.namespaceDocument) + && Objects.equals(destinationNamespaceDocument, that.destinationNamespaceDocument) + && Objects.equals(fullDocument, that.fullDocument) + && Objects.equals(fullDocumentBeforeChange, that.fullDocumentBeforeChange) + && Objects.equals(documentKey, that.documentKey) + && Objects.equals(clusterTime, that.clusterTime) + && Objects.equals(operationTypeString, that.operationTypeString) + // operationType covered by operationTypeString + && Objects.equals(updateDescription, that.updateDescription) + && Objects.equals(txnNumber, that.txnNumber) + && Objects.equals(lsid, that.lsid) + && Objects.equals(wallTime, that.wallTime) + && Objects.equals(splitEvent, that.splitEvent) + && Objects.equals(extraElements, that.extraElements); + } + + @Override + public int hashCode() { + return Objects.hash( + resumeToken, + namespaceDocument, + destinationNamespaceDocument, + fullDocument, + fullDocumentBeforeChange, + documentKey, + clusterTime, + operationTypeString, + // operationType covered by operationTypeString + updateDescription, + txnNumber, + lsid, + wallTime, + splitEvent, + extraElements); + } + + @Override + public String toString() { + return "ChangeStreamDocument{" + + " operationType=" + operationTypeString + + ", resumeToken=" + resumeToken + + ", namespace=" + getNamespace() + + ", destinationNamespace=" + getDestinationNamespace() + + ", fullDocument=" + fullDocument + + ", fullDocumentBeforeChange=" + fullDocumentBeforeChange + + ", documentKey=" + documentKey + + ", clusterTime=" + clusterTime + + ", updateDescription=" + updateDescription + + ", txnNumber=" + txnNumber + + ", lsid=" + lsid + + ", splitEvent=" + splitEvent + + ", wallTime=" + wallTime + + "}"; + } +} diff --git a/driver-core/src/main/com/mongodb/client/model/changestream/ChangeStreamDocumentCodec.java b/driver-core/src/main/com/mongodb/client/model/changestream/ChangeStreamDocumentCodec.java new file mode 100644 index 00000000000..7889a2dd4bb --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/changestream/ChangeStreamDocumentCodec.java @@ -0,0 +1,75 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model.changestream; + +import com.mongodb.MongoNamespace; +import org.bson.BsonReader; +import org.bson.BsonWriter; +import org.bson.codecs.BsonValueCodecProvider; +import org.bson.codecs.Codec; +import org.bson.codecs.DecoderContext; +import org.bson.codecs.EncoderContext; +import org.bson.codecs.configuration.CodecRegistry; +import org.bson.codecs.pojo.ClassModel; +import org.bson.codecs.pojo.ClassModelBuilder; +import org.bson.codecs.pojo.PojoCodecProvider; +import org.bson.codecs.pojo.PropertyModelBuilder; + +import static org.bson.codecs.configuration.CodecRegistries.fromProviders; +import static org.bson.codecs.configuration.CodecRegistries.fromRegistries; + +@SuppressWarnings({"unchecked", "rawtypes"}) +final class ChangeStreamDocumentCodec implements Codec> { + + private final Codec> codec; + + ChangeStreamDocumentCodec(final Class fullDocumentClass, final CodecRegistry codecRegistry) { + + ClassModelBuilder classModelBuilder = ClassModel.builder(ChangeStreamDocument.class); + ((PropertyModelBuilder) classModelBuilder.getProperty("fullDocument")).codec(codecRegistry.get(fullDocumentClass)); + ((PropertyModelBuilder) classModelBuilder.getProperty("fullDocumentBeforeChange")) + .codec(codecRegistry.get(fullDocumentClass)); + ClassModel changeStreamDocumentClassModel = classModelBuilder.build(); + + PojoCodecProvider provider = PojoCodecProvider.builder() + .register(MongoNamespace.class) + .register(UpdateDescription.class) + .register(SplitEvent.class) + .register(TruncatedArray.class) + .register(changeStreamDocumentClassModel) + .build(); + + CodecRegistry registry = fromRegistries(fromProviders(provider, new BsonValueCodecProvider()), codecRegistry); + this.codec = (Codec>) (Codec) + registry.get(ChangeStreamDocument.class); + } + + @Override + public ChangeStreamDocument decode(final BsonReader reader, final DecoderContext decoderContext) { + return codec.decode(reader, decoderContext); + } + + @Override + public void encode(final BsonWriter writer, final ChangeStreamDocument value, final EncoderContext encoderContext) { + codec.encode(writer, value, encoderContext); + } + + @Override + public Class> getEncoderClass() { + return (Class>) (Class) ChangeStreamDocument.class; + } +} diff --git a/driver-core/src/main/com/mongodb/client/model/changestream/FullDocument.java b/driver-core/src/main/com/mongodb/client/model/changestream/FullDocument.java new file mode 100644 index 00000000000..5d06b2ff17f --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/changestream/FullDocument.java @@ -0,0 +1,95 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model.changestream; + +import static java.lang.String.format; + +/** + * + * Change Stream fullDocument configuration. + * + *

Determines what to return for update operations when using a Change Stream. Defaults to {@link FullDocument#DEFAULT}. + * When set to {@link FullDocument#UPDATE_LOOKUP}, the change stream for partial updates will include both a delta describing the + * changes to the document as well as a copy of the entire document that was changed from some time after the change occurred.

+ * + * @since 3.6 + * @mongodb.server.release 3.6 + */ +public enum FullDocument { + + /** + * Default + * + *

Returns the servers default value in the {@code fullDocument} field.

+ */ + DEFAULT("default"), + + /** + * Lookup + * + *

The change stream for partial updates will include both a delta describing the changes to the document as well as a copy of the + * entire document that was changed from some time after the change occurred.

+ */ + UPDATE_LOOKUP("updateLookup"), + + /** + * Configures the change stream to return the post-image of the modified document for replace and update change events, if it + * is available. + * + * @since 4.7 + * @mongodb.server.release 6.0 + */ + WHEN_AVAILABLE("whenAvailable"), + + /** + * The same behavior as {@link #WHEN_AVAILABLE} except that an error is raised if the post-image is not available. + * + * @since 4.7 + * @mongodb.server.release 6.0 + */ + REQUIRED("required"); + + + private final String value; + FullDocument(final String caseFirst) { + this.value = caseFirst; + } + + /** + * @return the String representation of the collation case first value + */ + public String getValue() { + return value; + } + + /** + * Returns the ChangeStreamFullDocument from the string value. + * + * @param changeStreamFullDocument the string value. + * @return the read concern + */ + public static FullDocument fromString(final String changeStreamFullDocument) { + if (changeStreamFullDocument != null) { + for (FullDocument fullDocument : FullDocument.values()) { + if (changeStreamFullDocument.equals(fullDocument.value)) { + return fullDocument; + } + } + } + throw new IllegalArgumentException(format("'%s' is not a valid ChangeStreamFullDocument", changeStreamFullDocument)); + } +} diff --git a/driver-core/src/main/com/mongodb/client/model/changestream/FullDocumentBeforeChange.java b/driver-core/src/main/com/mongodb/client/model/changestream/FullDocumentBeforeChange.java new file mode 100644 index 00000000000..946ae184866 --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/changestream/FullDocumentBeforeChange.java @@ -0,0 +1,84 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model.changestream; + +import static com.mongodb.assertions.Assertions.assertNotNull; +import static java.lang.String.format; + +/** + * Change Stream fullDocumentBeforeChange configuration. + * + *

+ * Determines what to return for update operations when using a Change Stream. Defaults to {@link FullDocumentBeforeChange#DEFAULT}. + *

+ * + * @since 4.7 + * @mongodb.server.release 6.0 + */ +public enum FullDocumentBeforeChange { + /** + * The default value + */ + DEFAULT("default"), + + /** + * Configures the change stream to not include the pre-image of the modified document. + */ + OFF("off"), + + /** + * Configures the change stream to return the pre-image of the modified document for replace, update, and delete change events if it + * is available. + */ + WHEN_AVAILABLE("whenAvailable"), + + /** + * The same behavior as {@link #WHEN_AVAILABLE} except that an error is raised by the server if the pre-image is not available. + */ + REQUIRED("required"); + + + private final String value; + + /** + * The string value. + * + * @return the string value + */ + public String getValue() { + return value; + } + + FullDocumentBeforeChange(final String value) { + this.value = value; + } + + /** + * Returns the FullDocumentBeforeChange from the string value. + * + * @param value the string value. + * @return the full document before change + */ + public static FullDocumentBeforeChange fromString(final String value) { + assertNotNull(value); + for (FullDocumentBeforeChange fullDocumentBeforeChange : FullDocumentBeforeChange.values()) { + if (value.equals(fullDocumentBeforeChange.value)) { + return fullDocumentBeforeChange; + } + } + throw new IllegalArgumentException(format("'%s' is not a valid FullDocumentBeforeChange", value)); + }} diff --git a/driver-core/src/main/com/mongodb/client/model/changestream/NamespaceType.java b/driver-core/src/main/com/mongodb/client/model/changestream/NamespaceType.java new file mode 100644 index 00000000000..5a963ebf62b --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/changestream/NamespaceType.java @@ -0,0 +1,87 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model.changestream; + +import com.mongodb.lang.Nullable; + +/** + * Represents the type of the newly created namespace object in change stream events. + *

+ * Only present for operations of type {@code create} and when the {@code showExpandedEvents} + * change stream option is enabled. + *

+ * + * @since 5.6 + * @mongodb.server.release 8.1 + */ +public enum NamespaceType { + /** + * The collection namespace type. + */ + COLLECTION("collection"), + /** + * The timeseries namespace type. + */ + TIMESERIES("timeseries"), + /** + * The view namespace type. + */ + VIEW("view"), + /** + * The other namespace type. + * + *

A placeholder for newer namespace types issued by the server. + * Users encountering OTHER namespace types are advised to update the driver to get the actual namespace type.

+ */ + OTHER("other"); + + private final String value; + NamespaceType(final String namespaceTypeName) { + this.value = namespaceTypeName; + } + + /** + * @return the String representation of the namespace type + */ + public String getValue() { + return value; + } + + /** + * Returns the ChangeStreamNamespaceType from the string value. + * + * @param namespaceTypeName the string value. + * @return the namespace type. + */ + public static NamespaceType fromString(@Nullable final String namespaceTypeName) { + if (namespaceTypeName != null) { + for (NamespaceType namespaceType : NamespaceType.values()) { + if (namespaceTypeName.equals(namespaceType.value)) { + return namespaceType; + } + } + } + return OTHER; + } + + @Override + public String toString() { + return "NamespaceType{" + + "value='" + value + "'" + + "}"; + } +} diff --git a/driver-core/src/main/com/mongodb/client/model/changestream/OperationType.java b/driver-core/src/main/com/mongodb/client/model/changestream/OperationType.java new file mode 100644 index 00000000000..3c04973fa18 --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/changestream/OperationType.java @@ -0,0 +1,119 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model.changestream; + +import com.mongodb.lang.Nullable; + +/** + * The {@code $changeStream} operation type. + * + * @since 3.6 + */ +public enum OperationType { + + /** + * The insert operation type + */ + INSERT("insert"), + + /** + * The update operation type + */ + UPDATE("update"), + + /** + * The replace operation type + */ + REPLACE("replace"), + + /** + * The delete operation type + */ + DELETE("delete"), + + /** + * The invalidate operation type + */ + INVALIDATE("invalidate"), + + /** + * The drop operation type + * + * @since 3.8.2 + */ + DROP("drop"), + + /** + * The dropDatabase operation type + * + * @since 3.8.2 + */ + DROP_DATABASE("dropDatabase"), + + /** + * The rename operation type for renaming collections + * + * @since 3.8.2 + */ + RENAME("rename"), + + /** + * The other operation type. + * + *

A placeholder for newer operation types issued by the server. + * Users encountering OTHER operation types are advised to update the driver to get the actual operation type.

+ * + * @since 3.8.2 + */ + OTHER("other"); + + private final String value; + OperationType(final String operationTypeName) { + this.value = operationTypeName; + } + + /** + * @return the String representation of the operation type + */ + public String getValue() { + return value; + } + + /** + * Returns the ChangeStreamOperationType from the string value. + * + * @param operationTypeName the string value. + * @return the operation type. + */ + public static OperationType fromString(@Nullable final String operationTypeName) { + if (operationTypeName != null) { + for (OperationType operationType : OperationType.values()) { + if (operationTypeName.equals(operationType.value)) { + return operationType; + } + } + } + return OTHER; + } + + @Override + public String toString() { + return "OperationType{" + + "value='" + value + "'" + + "}"; + } +} diff --git a/driver-core/src/main/com/mongodb/client/model/changestream/SplitEvent.java b/driver-core/src/main/com/mongodb/client/model/changestream/SplitEvent.java new file mode 100644 index 00000000000..362b36c2587 --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/changestream/SplitEvent.java @@ -0,0 +1,90 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model.changestream; + +import org.bson.codecs.pojo.annotations.BsonCreator; +import org.bson.codecs.pojo.annotations.BsonProperty; + +import java.util.Objects; + +/** + * The current fragment, out of the total number of fragments. + * When the change stream's backing aggregation pipeline contains the + * $changeStreamSplitLargeEvent stage, events larger than 16MB + * will be split into multiple events. + * + * @since 4.11 + * @mongodb.server.release 6.0.9 + * @mongodb.driver.manual reference/operator/aggregation/changeStreamSplitLargeEvent/ $changeStreamSplitLargeEvent + */ +public final class SplitEvent { + private final int fragment; + private final int of; + + /** + * @param fragment the fragment number + * @param of the total number of fragments + */ + @BsonCreator + public SplitEvent( + @BsonProperty("fragment") final int fragment, + @BsonProperty("of") final int of) { + this.fragment = fragment; + this.of = of; + } + + /** + * Which 1-based fragment this is, out of the total number of fragments. + * @return the fragment number + */ + public int getFragment() { + return fragment; + } + + /** + * The total number of fragments. + * @return the total number of fragments. + */ + public int getOf() { + return of; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + SplitEvent that = (SplitEvent) o; + return fragment == that.fragment && of == that.of; + } + + @Override + public int hashCode() { + return Objects.hash(fragment, of); + } + + @Override + public String toString() { + return "SplitEvent{" + + "fragment=" + fragment + + ", of=" + of + + '}'; + } +} diff --git a/driver-core/src/main/com/mongodb/client/model/changestream/TruncatedArray.java b/driver-core/src/main/com/mongodb/client/model/changestream/TruncatedArray.java new file mode 100644 index 00000000000..cf8570aae6b --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/changestream/TruncatedArray.java @@ -0,0 +1,90 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model.changestream; + +import org.bson.codecs.pojo.annotations.BsonCreator; +import org.bson.codecs.pojo.annotations.BsonProperty; + +import java.util.Objects; + +import static com.mongodb.assertions.Assertions.isTrueArgument; +import static com.mongodb.assertions.Assertions.notNull; + +/** + * A part of an {@link UpdateDescription} object specifying a change to a field of the {@linkplain org.bson.BsonType#ARRAY array} type + * when the change is reported as truncation. + * + * @since 4.3 + */ +public final class TruncatedArray { + private final String field; + private final int newSize; + + /** + * @param field The name of the field that was truncated. + * @param newSize The size of the new field value. + */ + @BsonCreator + public TruncatedArray(@BsonProperty("field") final String field, @BsonProperty("newSize") final int newSize) { + this.field = notNull("field", field); + isTrueArgument("newSize >= 0", newSize >= 0); + this.newSize = newSize; + } + + /** + * Returns the name of the truncated field. + * + * @return {@code field}. + */ + public String getField() { + return field; + } + + /** + * Returns the size of the new {@linkplain #getField() field} value. + * + * @return {@code newSize}. + */ + public int getNewSize() { + return newSize; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + TruncatedArray that = (TruncatedArray) o; + return newSize == that.newSize && field.equals(that.field); + } + + @Override + public int hashCode() { + return Objects.hash(field, newSize); + } + + @Override + public String toString() { + return "TruncatedArray{" + + "field=" + field + + ", newSize=" + newSize + + '}'; + } +} diff --git a/driver-core/src/main/com/mongodb/client/model/changestream/UpdateDescription.java b/driver-core/src/main/com/mongodb/client/model/changestream/UpdateDescription.java new file mode 100644 index 00000000000..632bad99c53 --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/changestream/UpdateDescription.java @@ -0,0 +1,216 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model.changestream; + +import com.mongodb.lang.NonNull; +import com.mongodb.lang.Nullable; +import org.bson.BsonDocument; +import org.bson.codecs.pojo.annotations.BsonCreator; +import org.bson.codecs.pojo.annotations.BsonProperty; + +import java.util.List; +import java.util.Objects; + +import static java.util.Collections.emptyList; + +/** + * The update description for changed fields in a {@code $changeStream} operation. + * + * @since 3.6 + */ +public final class UpdateDescription { + private final List removedFields; + private final BsonDocument updatedFields; + private final List truncatedArrays; + private final BsonDocument disambiguatedPaths; + + /** + * Creates a new instance + * + * @param removedFields See {@link #UpdateDescription(List, BsonDocument, List)}. + * @param updatedFields See {@link #UpdateDescription(List, BsonDocument, List)}. + * @see #UpdateDescription(List, BsonDocument, List) + */ + public UpdateDescription(@Nullable final List removedFields, + @Nullable final BsonDocument updatedFields) { + this(removedFields, updatedFields, null); + } + + /** + * @param removedFields Names of the fields that were removed. + * @param updatedFields Information about the updated fields. + * @param truncatedArrays Information about the updated fields of the {@linkplain org.bson.BsonType#ARRAY array} type + * when the changes are reported as truncations. + * If {@code null}, then {@link #getTruncatedArrays()} returns an {@linkplain List#isEmpty() empty} {@link List}. + * @since 4.3 + */ + public UpdateDescription( + @Nullable final List removedFields, + @Nullable final BsonDocument updatedFields, + @Nullable final List truncatedArrays) { + this(removedFields, updatedFields, truncatedArrays, null); + } + + /** + * @param removedFields Names of the fields that were removed. + * @param updatedFields Information about the updated fields. + * @param truncatedArrays Information about the updated fields of the {@linkplain org.bson.BsonType#ARRAY array} type + * when the changes are reported as truncations. If {@code null}, then {@link #getTruncatedArrays()} returns + * an {@linkplain List#isEmpty() empty} {@link List}. + * @param disambiguatedPaths a document containing a map that associates an update path to an array containing the path components + * used in the update document. + * @since 4.8 + */ + @BsonCreator + public UpdateDescription( + @Nullable @BsonProperty("removedFields") final List removedFields, + @Nullable @BsonProperty("updatedFields") final BsonDocument updatedFields, + @Nullable @BsonProperty("truncatedArrays") final List truncatedArrays, + @Nullable @BsonProperty("disambiguatedPaths") final BsonDocument disambiguatedPaths) { + this.removedFields = removedFields; + this.updatedFields = updatedFields; + this.truncatedArrays = truncatedArrays == null ? emptyList() : truncatedArrays; + this.disambiguatedPaths = disambiguatedPaths; + } + + /** + * Returns the removedFields + * + * @return the removedFields + */ + @Nullable + public List getRemovedFields() { + return removedFields; + } + + /** + * Returns information about the updated fields excluding the fields reported via {@link #getTruncatedArrays()}. + *

+ * Despite {@linkplain org.bson.BsonType#ARRAY array} fields reported via {@link #getTruncatedArrays()} being excluded from the + * information returned by this method, changes to fields of the elements of the array values may be reported via this method. + * For example, given the original field {@code "arrayField": ["foo", {"a": "bar"}, 1, 2, 3]} + * and the updated field {@code "arrayField": ["foo", {"a": "bar", "b": 3}]}, the following is how such a change may be reported: + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
An example showing how the aforementioned change may be reported
MethodResult
{@link #getUpdatedFields()}{"arrayField.1.b": 3}
{@link #getTruncatedArrays()}{"field": "arrayField", "newSize": 2}
+ * + * @return {@code updatedFields}. + * @see #getTruncatedArrays() + */ + @Nullable + public BsonDocument getUpdatedFields() { + return updatedFields; + } + + /** + * Returns information about the updated fields of the {@linkplain org.bson.BsonType#ARRAY array} type + * when the changes are reported as truncations. + * + * @return {@code truncatedArrays}. + * There are no guarantees on the mutability of the {@code List} returned. + * @see #getUpdatedFields() + * @since 4.3 + */ + @NonNull + public List getTruncatedArrays() { + return truncatedArrays; + } + + /** + * A document containing a map that associates an update path to an array containing the path components used in the update document. + * + *

+ * This data can be used in combination with the other fields in an `UpdateDescription` to determine the actual path in the document + * that was updated. This is necessary in cases where a key contains dot-separated strings (i.e., {"a.b": "c"}) or a + * document contains a numeric literal string key (i.e., { "a": { "0": "a" } }. Note that in this + * scenario, the numeric key can't be the top level key, because { "0": "a" } is not ambiguous - update paths + * would simply be '0' which is unambiguous because BSON documents cannot have arrays at the top level.). + *

+ *

+ * Each entry in the document maps an update path to an array which contains the actual path used when the document was updated. For + * example, given a document with the following shape { "a": { "0": 0 } } and an update of + * { $inc: { "a.0": 1 } }, disambiguatedPaths would look like the following: + * { "a.0": ["a", "0"] }. + *

+ *

+ * In each array, all elements will be returned as strings, except for array indices, which will be returned as 32-bit integers. + *

+ * + * @return the disambiguated paths as a BSON document, which may be null + * @since 4.8 + * @mongodb.server.release 6.1 + */ + @Nullable + public BsonDocument getDisambiguatedPaths() { + return disambiguatedPaths; + } + + /** + * @return {@code true} if and only if all of the following is true for the compared objects + *
    + *
  • {@linkplain #getClass()} results are the same
  • + *
  • {@linkplain #getRemovedFields()} results are {@linkplain Objects#equals(Object, Object) equal}
  • + *
  • {@linkplain #getUpdatedFields()} results are {@linkplain Objects#equals(Object, Object) equal}
  • + *
  • + * {@linkplain #getTruncatedArrays()} results are {@linkplain Objects#equals(Object, Object) equal} + * or both contain no data ({@code null} or {@linkplain List#isEmpty() empty}). + *
  • + *
+ */ + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + UpdateDescription that = (UpdateDescription) o; + return Objects.equals(removedFields, that.removedFields) + && Objects.equals(updatedFields, that.updatedFields) + && Objects.equals(truncatedArrays, that.truncatedArrays) + && Objects.equals(disambiguatedPaths, that.disambiguatedPaths); + } + + @Override + public int hashCode() { + return Objects.hash(removedFields, updatedFields, truncatedArrays, disambiguatedPaths); + } + + @Override + public String toString() { + return "UpdateDescription{" + + "removedFields=" + removedFields + + ", updatedFields=" + updatedFields + + ", truncatedArrays=" + truncatedArrays + + ", disambiguatedPaths=" + disambiguatedPaths + + "}"; + } +} diff --git a/driver-core/src/main/com/mongodb/client/model/changestream/package-info.java b/driver-core/src/main/com/mongodb/client/model/changestream/package-info.java new file mode 100644 index 00000000000..cde58eb608f --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/changestream/package-info.java @@ -0,0 +1,23 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * This package contains classes for the change stream api + */ +@NonNullApi +package com.mongodb.client.model.changestream; + +import com.mongodb.lang.NonNullApi; diff --git a/driver-core/src/main/com/mongodb/client/model/densify/DateDensifyRange.java b/driver-core/src/main/com/mongodb/client/model/densify/DateDensifyRange.java new file mode 100644 index 00000000000..50879c02026 --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/densify/DateDensifyRange.java @@ -0,0 +1,32 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUNumber WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.client.model.densify; + +import com.mongodb.annotations.Evolving; +import com.mongodb.client.model.MongoTimeUnit; + +import java.time.Instant; + +/** + * @see DensifyRange#fullRangeWithStep(long, MongoTimeUnit) + * @see DensifyRange#partitionRangeWithStep(long, MongoTimeUnit) + * @see DensifyRange#rangeWithStep(Instant, Instant, long, MongoTimeUnit) + * @mongodb.server.release 5.1 + * @since 4.7 + */ +@Evolving +public interface DateDensifyRange extends DensifyRange { +} diff --git a/driver-core/src/main/com/mongodb/client/model/densify/DensifyConstructibleBson.java b/driver-core/src/main/com/mongodb/client/model/densify/DensifyConstructibleBson.java new file mode 100644 index 00000000000..4fd2212dfc1 --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/densify/DensifyConstructibleBson.java @@ -0,0 +1,59 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.client.model.densify; + +import com.mongodb.internal.client.model.AbstractConstructibleBson; +import org.bson.Document; +import org.bson.conversions.Bson; + +import static com.mongodb.assertions.Assertions.notNull; +import static com.mongodb.internal.client.model.Util.sizeAtLeast; + +final class DensifyConstructibleBson extends AbstractConstructibleBson implements + NumberDensifyRange, DateDensifyRange, + DensifyOptions { + static final DensifyConstructibleBson EMPTY_IMMUTABLE = new DensifyConstructibleBson(AbstractConstructibleBson.EMPTY_IMMUTABLE); + + DensifyConstructibleBson(final Bson base) { + super(base); + } + + private DensifyConstructibleBson(final Bson base, final Document appended) { + super(base, appended); + } + + @Override + protected DensifyConstructibleBson newSelf(final Bson base, final Document appended) { + return new DensifyConstructibleBson(base, appended); + } + + @Override + public DensifyOptions partitionByFields(final Iterable fields) { + notNull("partitionByFields", fields); + return newMutated(doc -> { + if (sizeAtLeast(fields, 1)) { + doc.append("partitionByFields", fields); + } else { + doc.remove("partitionByFields"); + } + }); + } + + @Override + public DensifyOptions option(final String name, final Object value) { + return newAppended(notNull("name", name), notNull("value", value)); + } +} diff --git a/driver-core/src/main/com/mongodb/client/model/densify/DensifyOptions.java b/driver-core/src/main/com/mongodb/client/model/densify/DensifyOptions.java new file mode 100644 index 00000000000..06524c74fe2 --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/densify/DensifyOptions.java @@ -0,0 +1,87 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.client.model.densify; + +import com.mongodb.annotations.Evolving; +import com.mongodb.client.model.Aggregates; +import com.mongodb.lang.Nullable; +import org.bson.conversions.Bson; + +import static java.util.Arrays.asList; +import static java.util.Collections.emptyList; + +/** + * Represents optional fields of the {@code $densify} pipeline stage of an aggregation pipeline. + * + * @see Aggregates#densify(String, DensifyRange, DensifyOptions) + * @see Aggregates#densify(String, DensifyRange) + * @mongodb.server.release 5.1 + * @since 4.7 + */ +@Evolving +public interface DensifyOptions extends Bson { + /** + * Creates a new {@link DensifyOptions} with the specified {@code fields} to partition by. + * + * @param fields The fields to partition by. + * If no fields are specified, then the whole sequence is considered to be a single partition. + * @return A new {@link DensifyOptions}. + * @mongodb.driver.manual core/document/#dot-notation Dot notation + */ + default DensifyOptions partitionByFields(@Nullable final String... fields) { + return partitionByFields(fields == null ? emptyList() : asList(fields)); + } + + /** + * Creates a new {@link DensifyOptions} with the specified {@code fields} to partition by. + * + * @param fields The fields to partition by. + * If no fields are specified, then the whole sequence is considered to be a single partition. + * @return A new {@link DensifyOptions}. + * @mongodb.driver.manual core/document/#dot-notation Dot notation + */ + DensifyOptions partitionByFields(Iterable fields); + + /** + * Creates a new {@link DensifyOptions} with the specified option in situations when there is no builder method + * that better satisfies your needs. + * This method cannot be used to validate the syntax. + *

+ * Example
+ * The following code creates two functionally equivalent {@link DensifyOptions} objects, + * though they may not be {@linkplain Object#equals(Object) equal}. + *

{@code
+     *  DensifyOptions options1 = DensifyOptions.densifyOptions()
+     *          .partitionByFields("fieldName");
+     *  DensifyOptions options2 = DensifyOptions.densifyOptions()
+     *          .option("partitionByFields", Collections.singleton("fieldName"));
+     * }
+ * + * @param name The option name. + * @param value The option value. + * @return A new {@link DensifyOptions}. + */ + DensifyOptions option(String name, Object value); + + /** + * Returns {@link DensifyOptions} that represents server defaults. + * + * @return {@link DensifyOptions} that represents server defaults. + */ + static DensifyOptions densifyOptions() { + return DensifyConstructibleBson.EMPTY_IMMUTABLE; + } +} diff --git a/driver-core/src/main/com/mongodb/client/model/densify/DensifyRange.java b/driver-core/src/main/com/mongodb/client/model/densify/DensifyRange.java new file mode 100644 index 00000000000..45c66a4be9f --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/densify/DensifyRange.java @@ -0,0 +1,164 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.client.model.densify; + +import com.mongodb.annotations.Evolving; +import com.mongodb.client.model.Aggregates; +import com.mongodb.client.model.MongoTimeUnit; +import org.bson.BsonDocument; +import org.bson.BsonInt64; +import org.bson.BsonString; +import org.bson.BsonType; +import org.bson.Document; +import org.bson.conversions.Bson; + +import java.time.Instant; + +import static com.mongodb.assertions.Assertions.notNull; +import static java.util.Arrays.asList; + +/** + * A specification of how to compute the missing {@linkplain Aggregates#densify(String, DensifyRange, DensifyOptions) field} values + * for which new documents must be added. It specifies a half-closed interval of values with the lower bound being inclusive, and a step. + * The first potentially missing value within each interval is its lower bound, other values are computed by adding the step + * multiple times, until the result is out of the interval. Each time the step is added, the result is a potentially missing value for + * which a new document must be added if the sequence of documents that is being densified does not have a document + * with equal value of the field. + * + * @see Aggregates#densify(String, DensifyRange, DensifyOptions) + * @see Aggregates#densify(String, DensifyRange) + * @mongodb.server.release 5.1 + * @since 4.7 + */ +@Evolving +public interface DensifyRange extends Bson { + /** + * Returns a {@link DensifyRange} that represents an interval with the smallest + * BSON {@link BsonType#INT32 32-bit integer} / {@link BsonType#INT64 64-bit integer} / {@link BsonType#DOUBLE Double} / + * {@link BsonType#DECIMAL128 Decimal128} value of the {@linkplain Aggregates#densify(String, DensifyRange, DensifyOptions) field} + * in the sequence of documents being its lower bound, and the largest value being the upper bound. + * + * @param step The step. + * @return The requested {@link DensifyRange}. + */ + static NumberDensifyRange fullRangeWithStep(final Number step) { + return new DensifyConstructibleBson(new Document("bounds", "full") + .append("step", notNull("step", step))); + } + + /** + * Returns a {@link DensifyRange} that represents an interval with the smallest + * BSON {@link BsonType#INT32 32-bit integer} / {@link BsonType#INT64 64-bit integer} / {@link BsonType#DOUBLE Double} / + * {@link BsonType#DECIMAL128 Decimal128} value of the {@linkplain Aggregates#densify(String, DensifyRange, DensifyOptions) field} + * in the {@linkplain DensifyOptions#partitionByFields(Iterable) partition} of documents being its lower bound, + * and the largest value being the upper bound. + * + * @param step The step. + * @return The requested {@link DensifyRange}. + */ + static NumberDensifyRange partitionRangeWithStep(final Number step) { + return new DensifyConstructibleBson(new Document("bounds", "partition") + .append("step", notNull("step", step))); + } + + /** + * Returns a {@link DensifyRange} that represents a single interval [l, u). + * + * @param l The lower bound. + * @param u The upper bound. + * @param step The step. + * @return The requested {@link DensifyRange}. + */ + static NumberDensifyRange rangeWithStep(final Number l, final Number u, final Number step) { + notNull("l", l); + notNull("u", u); + notNull("step", step); + return new DensifyConstructibleBson(new Document("bounds", asList(l, u)) + .append("step", notNull("step", step))); + } + + /** + * Returns a {@link DensifyRange} that represents an interval with the smallest BSON {@link BsonType#DATE_TIME Date} value + * of the {@linkplain Aggregates#densify(String, DensifyRange, DensifyOptions) field} + * in the sequence of documents being its lower bound, and the largest value being the upper bound. + * + * @param step The step. + * @param unit The unit in which the {@code step} is specified. + * @return The requested {@link DensifyRange}. + */ + static DateDensifyRange fullRangeWithStep(final long step, final MongoTimeUnit unit) { + notNull("unit", unit); + return new DensifyConstructibleBson(new BsonDocument("bounds", new BsonString("full")) + .append("step", new BsonInt64(step)) + .append("unit", new BsonString(unit.value()))); + } + + /** + * Returns a {@link DensifyRange} that represents an interval with the smallest BSON {@link BsonType#DATE_TIME Date} value + * of the {@linkplain Aggregates#densify(String, DensifyRange, DensifyOptions) field} + * in the {@linkplain DensifyOptions#partitionByFields(Iterable) partition} of documents being its lower bound, + * and the largest value being the upper bound. + * + * @param step The step. + * @param unit The unit in which the {@code step} is specified. + * @return The requested {@link DensifyRange}. + */ + static DateDensifyRange partitionRangeWithStep(final long step, final MongoTimeUnit unit) { + notNull("unit", unit); + return new DensifyConstructibleBson(new BsonDocument("bounds", new BsonString("partition")) + .append("step", new BsonInt64(step)) + .append("unit", new BsonString(unit.value()))); + } + + /** + * Returns a {@link DensifyRange} that represents a single interval [l, u). + * + * @param l The lower bound. + * @param u The upper bound. + * @param step The step. + * @param unit The unit in which the {@code step} is specified. + * @return The requested {@link DensifyRange}. + */ + static DateDensifyRange rangeWithStep(final Instant l, final Instant u, final long step, final MongoTimeUnit unit) { + notNull("l", l); + notNull("u", u); + notNull("unit", unit); + return new DensifyConstructibleBson(new Document("bounds", asList(l, u)) + .append("step", step) + .append("unit", unit.value())); + } + + /** + * Creates a {@link DensifyRange} from a {@link Bson} in situations when there is no builder method that better satisfies your needs. + * This method cannot be used to validate the syntax. + *

+ * Example
+ * The following code creates two functionally equivalent {@link DensifyRange}s, + * though they may not be {@linkplain Object#equals(Object) equal}. + *

{@code
+     *  DensifyRange range1 = DensifyRange.partitionRangeWithStep(
+     *          1, MongoTimeUnit.MINUTE);
+     *  DensifyRange range2 = DensifyRange.of(new Document("bounds", "partition")
+     *          .append("step", 1).append("unit", MongoTimeUnit.MINUTE.value()));
+     * }
+ * + * @param range A {@link Bson} representing the required {@link DensifyRange}. + * @return The requested {@link DensifyRange}. + */ + static DensifyRange of(final Bson range) { + return new DensifyConstructibleBson(notNull("range", range)); + } +} diff --git a/driver-core/src/main/com/mongodb/client/model/densify/NumberDensifyRange.java b/driver-core/src/main/com/mongodb/client/model/densify/NumberDensifyRange.java new file mode 100644 index 00000000000..bad0273adaf --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/densify/NumberDensifyRange.java @@ -0,0 +1,29 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUNumber WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.client.model.densify; + +import com.mongodb.annotations.Evolving; + +/** + * @see DensifyRange#fullRangeWithStep(Number) + * @see DensifyRange#partitionRangeWithStep(Number) + * @see DensifyRange#rangeWithStep(Number, Number, Number) + * @mongodb.server.release 5.1 + * @since 4.7 + */ +@Evolving +public interface NumberDensifyRange extends DensifyRange { +} diff --git a/driver-core/src/main/com/mongodb/client/model/densify/package-info.java b/driver-core/src/main/com/mongodb/client/model/densify/package-info.java new file mode 100644 index 00000000000..42cac44e432 --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/densify/package-info.java @@ -0,0 +1,26 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * @see com.mongodb.client.model.Aggregates#densify(java.lang.String, com.mongodb.client.model.densify.DensifyRange, com.mongodb.client.model.densify.DensifyOptions) + * @see com.mongodb.client.model.Aggregates#densify(java.lang.String, com.mongodb.client.model.densify.DensifyRange) + * @mongodb.server.release 5.1 + * @since 4.7 + */ +@NonNullApi +package com.mongodb.client.model.densify; + +import com.mongodb.lang.NonNullApi; diff --git a/driver-core/src/main/com/mongodb/client/model/fill/FillConstructibleBson.java b/driver-core/src/main/com/mongodb/client/model/fill/FillConstructibleBson.java new file mode 100644 index 00000000000..b801b2749a1 --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/fill/FillConstructibleBson.java @@ -0,0 +1,72 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.client.model.fill; + +import com.mongodb.internal.client.model.AbstractConstructibleBson; +import org.bson.Document; +import org.bson.conversions.Bson; + +import static com.mongodb.assertions.Assertions.notNull; +import static com.mongodb.internal.client.model.Util.sizeAtLeast; + +final class FillConstructibleBson extends AbstractConstructibleBson implements FillOptions { + static final FillConstructibleBson EMPTY_IMMUTABLE = new FillConstructibleBson(AbstractConstructibleBson.EMPTY_IMMUTABLE); + + FillConstructibleBson(final Bson base) { + super(base); + } + + private FillConstructibleBson(final Bson base, final Document appended) { + super(base, appended); + } + + @Override + protected FillConstructibleBson newSelf(final Bson base, final Document appended) { + return new FillConstructibleBson(base, appended); + } + + @Override + public FillOptions partitionBy(final TExpression expression) { + notNull("expression", expression); + return newMutated(doc -> { + doc.remove("partitionByFields"); + doc.append("partitionBy", expression); + }); + } + + @Override + public FillOptions partitionByFields(final Iterable fields) { + notNull("fields", fields); + return newMutated(doc -> { + doc.remove("partitionBy"); + if (sizeAtLeast(fields, 1)) { + doc.append("partitionByFields", fields); + } else { + doc.remove("partitionByFields"); + } + }); + } + + @Override + public FillOptions sortBy(final Bson sortBy) { + return newAppended("sortBy", notNull("sortBy", sortBy)); + } + + @Override + public FillOptions option(final String name, final Object value) { + return newAppended(notNull("name", name), notNull("value", value)); + } +} diff --git a/driver-core/src/main/com/mongodb/client/model/fill/FillConstructibleBsonElement.java b/driver-core/src/main/com/mongodb/client/model/fill/FillConstructibleBsonElement.java new file mode 100644 index 00000000000..c7d8f526bf1 --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/fill/FillConstructibleBsonElement.java @@ -0,0 +1,39 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.client.model.fill; + +import com.mongodb.internal.client.model.AbstractConstructibleBsonElement; +import org.bson.conversions.Bson; + +final class FillConstructibleBsonElement extends AbstractConstructibleBsonElement implements + ValueFillOutputField, LocfFillOutputField, LinearFillOutputField { + FillConstructibleBsonElement(final String name, final Bson value) { + super(name, value); + } + + FillConstructibleBsonElement(final Bson baseElement) { + super(baseElement); + } + + private FillConstructibleBsonElement(final Bson baseElement, final Bson appendedElementValue) { + super(baseElement, appendedElementValue); + } + + @Override + protected FillConstructibleBsonElement newSelf(final Bson baseElement, final Bson appendedElementValue) { + return new FillConstructibleBsonElement(baseElement, appendedElementValue); + } +} diff --git a/driver-core/src/main/com/mongodb/client/model/fill/FillOptions.java b/driver-core/src/main/com/mongodb/client/model/fill/FillOptions.java new file mode 100644 index 00000000000..b6b82252637 --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/fill/FillOptions.java @@ -0,0 +1,107 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.client.model.fill; + +import com.mongodb.annotations.Evolving; +import com.mongodb.client.model.Aggregates; +import com.mongodb.client.model.Sorts; +import com.mongodb.lang.Nullable; +import org.bson.conversions.Bson; + +import java.util.List; + +import static java.util.Arrays.asList; +import static java.util.Collections.emptyList; + +/** + * Represents optional fields of the {@code $fill} pipeline stage of an aggregation pipeline. + * + * @see Aggregates#fill(FillOptions, Iterable) + * @see Aggregates#fill(FillOptions, FillOutputField, FillOutputField...) + * @mongodb.server.release 5.3 + * @since 4.7 + */ +@Evolving +public interface FillOptions extends Bson { + /** + * Creates a new {@link FillOptions} with the specified partitioning. + * Overrides {@link #partitionByFields(Iterable)}. + * + * @param expression The expression specifying how to partition the data. + * The syntax is the same as the syntax for {@code id} in {@link Aggregates#group(Object, List)}. + * @param The type of the {@code expression} expression. + * @return A new {@link FillOptions}. + */ + FillOptions partitionBy(TExpression expression); + + /** + * Creates a new {@link FillOptions} with the specified partitioning. + * Overrides {@link #partitionBy(Object)}. + * + * @param fields The fields to partition by. + * @return A new {@link FillOptions}. + * @mongodb.driver.manual core/document/#dot-notation + */ + default FillOptions partitionByFields(@Nullable final String... fields) { + return partitionByFields(fields == null ? emptyList() : asList(fields)); + } + + /** + * Creates a new {@link FillOptions} with the specified partitioning. + * Overrides {@link #partitionBy(Object)}. + * + * @param fields The fields to partition by. + * @return A new {@link FillOptions}. + * @mongodb.driver.manual core/document/#dot-notation + */ + FillOptions partitionByFields(Iterable fields); + + /** + * Creates a new {@link FillOptions} with the specified sorting. + * + * @param sortBy The sort specification, which may be constructed via {@link Sorts}. + * @return A new {@link FillOptions}. + */ + FillOptions sortBy(Bson sortBy); + + /** + * Creates a new {@link FillOptions} with the specified option in situations when there is no builder method + * that better satisfies your needs. + * This method cannot be used to validate the syntax. + *

+ * Example
+ * The following code creates two functionally equivalent {@link FillOptions} objects, + * though they may not be {@linkplain Object#equals(Object) equal}. + *

{@code
+     *  FillOptions options1 = FillOptions.fillOptions().partitionByFields("fieldName");
+     *  FillOptions options2 = FillOptions.fillOptions().option("partitionByFields", Collections.singleton("fieldName"));
+     * }
+ * + * @param name The option name. + * @param value The option value. + * @return A new {@link FillOptions}. + */ + FillOptions option(String name, Object value); + + /** + * Returns {@link FillOptions} that represents server defaults. + * + * @return {@link FillOptions} that represents server defaults. + */ + static FillOptions fillOptions() { + return FillConstructibleBson.EMPTY_IMMUTABLE; + } +} diff --git a/driver-core/src/main/com/mongodb/client/model/fill/FillOutputField.java b/driver-core/src/main/com/mongodb/client/model/fill/FillOutputField.java new file mode 100644 index 00000000000..5bcc38325a1 --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/fill/FillOutputField.java @@ -0,0 +1,96 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.client.model.fill; + +import com.mongodb.annotations.Evolving; +import com.mongodb.client.model.Aggregates; +import com.mongodb.client.model.WindowOutputFields; +import org.bson.Document; +import org.bson.conversions.Bson; + +import static com.mongodb.assertions.Assertions.notNull; + +/** + * The core part of the {@code $fill} pipeline stage of an aggregation pipeline. + * A pair of an expression/method and a path to a field to be filled with evaluation results of the expression/method. + * + * @see Aggregates#fill(FillOptions, Iterable) + * @see Aggregates#fill(FillOptions, FillOutputField, FillOutputField...) + * @mongodb.server.release 5.3 + * @since 4.7 + */ +@Evolving +public interface FillOutputField extends Bson { + /** + * Returns a {@link FillOutputField} that uses the specified {@code expression}. + * + * @param field The field to fill. + * @param expression The expression. + * @param The {@code expression} type. + * @return The requested {@link FillOutputField}. + * @mongodb.driver.manual core/document/#dot-notation Dot notation + */ + static ValueFillOutputField value(final String field, TExpression expression) { + return new FillConstructibleBsonElement(notNull("field", field), + new Document("value", (notNull("expression", expression)))); + } + + /** + * Returns a {@link FillOutputField} that uses the {@link WindowOutputFields#locf(String, Object) locf} method. + * + * @param field The field to fill. + * @return The requested {@link FillOutputField}. + * @mongodb.driver.manual core/document/#dot-notation Dot notation + */ + static LocfFillOutputField locf(final String field) { + return new FillConstructibleBsonElement(notNull("field", field), + new Document("method", "locf")); + } + + /** + * Returns a {@link FillOutputField} that uses the {@link WindowOutputFields#linearFill(String, Object) linear} method. + *

+ * {@linkplain FillOptions#sortBy(Bson) Sorting} is required.

+ * + * @param field The field to fill. + * @return The requested {@link FillOutputField}. + * @mongodb.driver.manual core/document/#dot-notation Dot notation + */ + static LinearFillOutputField linear(final String field) { + return new FillConstructibleBsonElement(notNull("field", field), + new Document("method", "linear")); + } + + /** + * Creates a {@link FillOutputField} from a {@link Bson} in situations when there is no builder method + * that better satisfies your needs. + * This method cannot be used to validate the syntax. + *

+ * Example
+ * The following code creates two functionally equivalent {@link FillOutputField}s, + * though they may not be {@linkplain Object#equals(Object) equal}. + *

{@code
+     *  FillOutputField field1 = FillOutputField.locf("fieldName");
+     *  FillOutputField field2 = FillOutputField.of(new Document("fieldName", new Document("method", "locf")));
+     * }
+ * + * @param fill A {@link Bson} representing the required {@link FillOutputField}. + * @return The requested {@link FillOutputField}. + */ + static FillOutputField of(final Bson fill) { + return new FillConstructibleBsonElement(notNull("fill", fill)); + } +} diff --git a/driver-core/src/main/com/mongodb/client/model/fill/LinearFillOutputField.java b/driver-core/src/main/com/mongodb/client/model/fill/LinearFillOutputField.java new file mode 100644 index 00000000000..1a05429c363 --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/fill/LinearFillOutputField.java @@ -0,0 +1,27 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.client.model.fill; + +import com.mongodb.annotations.Evolving; + +/** + * @see FillOutputField#linear(String) + * @mongodb.server.release 5.3 + * @since 4.7 + */ +@Evolving +public interface LinearFillOutputField extends FillOutputField { +} diff --git a/driver-core/src/main/com/mongodb/client/model/fill/LocfFillOutputField.java b/driver-core/src/main/com/mongodb/client/model/fill/LocfFillOutputField.java new file mode 100644 index 00000000000..a634671013d --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/fill/LocfFillOutputField.java @@ -0,0 +1,27 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.client.model.fill; + +import com.mongodb.annotations.Evolving; + +/** + * @see FillOutputField#locf(String) + * @mongodb.server.release 5.3 + * @since 4.7 + */ +@Evolving +public interface LocfFillOutputField extends FillOutputField { +} diff --git a/driver-core/src/main/com/mongodb/client/model/fill/ValueFillOutputField.java b/driver-core/src/main/com/mongodb/client/model/fill/ValueFillOutputField.java new file mode 100644 index 00000000000..8b73127e475 --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/fill/ValueFillOutputField.java @@ -0,0 +1,27 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.client.model.fill; + +import com.mongodb.annotations.Evolving; + +/** + * @see FillOutputField#value(String, Object) + * @mongodb.server.release 5.3 + * @since 4.7 + */ +@Evolving +public interface ValueFillOutputField extends FillOutputField { +} diff --git a/driver-core/src/main/com/mongodb/client/model/fill/package-info.java b/driver-core/src/main/com/mongodb/client/model/fill/package-info.java new file mode 100644 index 00000000000..0dc393c196b --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/fill/package-info.java @@ -0,0 +1,27 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * @see com.mongodb.client.model.Aggregates#fill(com.mongodb.client.model.fill.FillOptions, java.lang.Iterable) + * @see com.mongodb.client.model.Aggregates#fill( + * com.mongodb.client.model.fill.FillOptions, FillOutputField, FillOutputField...) + * @mongodb.server.release 5.3 + * @since 4.7 + */ +@NonNullApi +package com.mongodb.client.model.fill; + +import com.mongodb.lang.NonNullApi; diff --git a/driver-core/src/main/com/mongodb/client/model/geojson/CoordinateReferenceSystem.java b/driver-core/src/main/com/mongodb/client/model/geojson/CoordinateReferenceSystem.java new file mode 100644 index 00000000000..3e03bd1761c --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/geojson/CoordinateReferenceSystem.java @@ -0,0 +1,35 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model.geojson; + +import com.mongodb.annotations.Immutable; + +/** + * A GeoJSON Coordinate Reference System (CRS). + * + * @since 3.1 + */ +@Immutable +public abstract class CoordinateReferenceSystem { + + /** + * Gets the type of this Coordinate Reference System. + * + * @return the type + */ + public abstract CoordinateReferenceSystemType getType(); +} diff --git a/driver-core/src/main/com/mongodb/client/model/geojson/CoordinateReferenceSystemType.java b/driver-core/src/main/com/mongodb/client/model/geojson/CoordinateReferenceSystemType.java new file mode 100644 index 00000000000..29ab2d8f2d1 --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/geojson/CoordinateReferenceSystemType.java @@ -0,0 +1,49 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model.geojson; + +/** + * An enumeration of the GeoJSON coordinate reference system types. + * + * @since 3.1 + */ +public enum CoordinateReferenceSystemType { + /** + * A coordinate reference system that is specifed by name + */ + NAME("name"), + + /** + * A coordinate reference system that is specifed by a dereferenceable URI + */ + LINK("link"); + + /** + * Gets the GeoJSON-defined name for the type. + * + * @return the GeoJSON-defined type name + */ + public String getTypeName() { + return typeName; + } + + private final String typeName; + + CoordinateReferenceSystemType(final String typeName) { + this.typeName = typeName; + } +} diff --git a/driver-core/src/main/com/mongodb/client/model/geojson/GeoJsonObjectType.java b/driver-core/src/main/com/mongodb/client/model/geojson/GeoJsonObjectType.java new file mode 100644 index 00000000000..d7392f9116d --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/geojson/GeoJsonObjectType.java @@ -0,0 +1,74 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model.geojson; + +/** + * An enumeration of GeoJSON object types. + * + * @since 3.1 + */ +public enum GeoJsonObjectType { + /** + * A GeometryCollection + */ + GEOMETRY_COLLECTION("GeometryCollection"), + + /** + * A LineString + */ + LINE_STRING("LineString"), + + /** + * A MultiLineString + */ + MULTI_LINE_STRING("MultiLineString"), + + /** + * A MultiPoint + */ + MULTI_POINT("MultiPoint"), + + /** + * A MultiPolygon + */ + MULTI_POLYGON("MultiPolygon"), + + /** + * A Point + */ + POINT("Point"), + + /** + * A Polygon + */ + POLYGON("Polygon"); + + /** + * Gets the GeoJSON-defined name for the object type. + * + * @return the GeoJSON-defined type name + */ + public String getTypeName() { + return typeName; + } + + private final String typeName; + + GeoJsonObjectType(final String typeName) { + this.typeName = typeName; + } +} diff --git a/driver-core/src/main/com/mongodb/client/model/geojson/Geometry.java b/driver-core/src/main/com/mongodb/client/model/geojson/Geometry.java new file mode 100644 index 00000000000..80658116558 --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/geojson/Geometry.java @@ -0,0 +1,116 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model.geojson; + +import com.mongodb.client.model.geojson.codecs.GeoJsonCodecProvider; +import com.mongodb.lang.Nullable; +import org.bson.codecs.Codec; +import org.bson.codecs.EncoderContext; +import org.bson.codecs.configuration.CodecRegistries; +import org.bson.codecs.configuration.CodecRegistry; +import org.bson.json.JsonWriter; +import org.bson.json.JsonWriterSettings; + +import java.io.StringWriter; +import java.util.Objects; + +/** + * An abstract class for representations of GeoJSON geometry objects. + * + * @since 3.1 + */ +public abstract class Geometry { + + private static final CodecRegistry REGISTRY = CodecRegistries.fromProviders(new GeoJsonCodecProvider()); + + private final CoordinateReferenceSystem coordinateReferenceSystem; + + /** + * Construct an instance with no specified coordinate reference system. + * + */ + protected Geometry() { + this(null); + } + + /** + * Construct an instance with the specified coordinate reference system. + * + * @param coordinateReferenceSystem the coordinate reference system + */ + protected Geometry(@Nullable final CoordinateReferenceSystem coordinateReferenceSystem) { + this.coordinateReferenceSystem = coordinateReferenceSystem; + } + + /** + * Gets the GeoJSON object type. + * + * @return the type + */ + public abstract GeoJsonObjectType getType(); + + /** + * Converts to GeoJSON representation + * + * @return the GeoJSON representation + */ + @SuppressWarnings({"unchecked", "rawtypes"}) + public String toJson() { + StringWriter stringWriter = new StringWriter(); + JsonWriter writer = new JsonWriter(stringWriter, JsonWriterSettings.builder().build()); + Codec codec = getRegistry().get(getClass()); + codec.encode(writer, this, EncoderContext.builder().build()); + return stringWriter.toString(); + } + + static CodecRegistry getRegistry() { + return REGISTRY; + } + + /** + * Gets the coordinate reference system, which may be null + * + * @return the possibly-null coordinate reference system + */ + @Nullable + public CoordinateReferenceSystem getCoordinateReferenceSystem() { + return coordinateReferenceSystem; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + Geometry geometry = (Geometry) o; + + if (!Objects.equals(coordinateReferenceSystem, geometry.coordinateReferenceSystem)) { + return false; + } + + return true; + } + + @Override + public int hashCode() { + return coordinateReferenceSystem != null ? coordinateReferenceSystem.hashCode() : 0; + } +} diff --git a/driver-core/src/main/com/mongodb/client/model/geojson/GeometryCollection.java b/driver-core/src/main/com/mongodb/client/model/geojson/GeometryCollection.java new file mode 100644 index 00000000000..ac1be2a86b3 --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/geojson/GeometryCollection.java @@ -0,0 +1,108 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model.geojson; + +import com.mongodb.lang.Nullable; + +import java.util.Collections; +import java.util.List; + +import static com.mongodb.assertions.Assertions.doesNotContainNull; +import static com.mongodb.assertions.Assertions.notNull; + +/** + * A representation of a GeoJSON GeometryCollection. + * + * @since 3.1 + */ +public final class GeometryCollection extends Geometry { + private final List geometries; + + /** + * Construct an instance with the given list of Geometry objects + * + * @param geometries the list of Geometry objects + */ + public GeometryCollection(final List geometries) { + this(null, geometries); + } + + /** + * Construct an instance with the given list of Geometry objects + * + * @param coordinateReferenceSystem the coordinate reference system + * @param geometries the list of Geometry objects + */ + public GeometryCollection(@Nullable final CoordinateReferenceSystem coordinateReferenceSystem, + final List geometries) { + super(coordinateReferenceSystem); + notNull("geometries", geometries); + doesNotContainNull("geometries", geometries); + this.geometries = Collections.unmodifiableList(geometries); + } + + @Override + public GeoJsonObjectType getType() { + return GeoJsonObjectType.GEOMETRY_COLLECTION; + } + + /** + * Gets the list of Geometry objects in this collection. + * + * @return the list + */ + public List getGeometries() { + return geometries; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + if (!super.equals(o)) { + return false; + } + + GeometryCollection that = (GeometryCollection) o; + + if (!geometries.equals(that.geometries)) { + return false; + } + + return true; + } + + @Override + public int hashCode() { + int result = super.hashCode(); + result = 31 * result + geometries.hashCode(); + return result; + } + + @Override + public String toString() { + CoordinateReferenceSystem coordinateReferenceSystem = getCoordinateReferenceSystem(); + return "GeometryCollection{" + + "geometries=" + geometries + + ((coordinateReferenceSystem == null) ? "" : ", coordinateReferenceSystem=" + coordinateReferenceSystem) + + '}'; + } +} diff --git a/driver-core/src/main/com/mongodb/client/model/geojson/LineString.java b/driver-core/src/main/com/mongodb/client/model/geojson/LineString.java new file mode 100644 index 00000000000..9545b2a83a3 --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/geojson/LineString.java @@ -0,0 +1,111 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model.geojson; + +import com.mongodb.lang.Nullable; + +import java.util.Collections; +import java.util.List; + +import static com.mongodb.assertions.Assertions.doesNotContainNull; +import static com.mongodb.assertions.Assertions.isTrueArgument; +import static com.mongodb.assertions.Assertions.notNull; + +/** + * A representation of a GeoJSON LineString. + * + * @since 3.1 + */ +public final class LineString extends Geometry { + + private final List coordinates; + + /** + * Construct an instance with the given coordinates. + * + * @param coordinates the coordinates + */ + public LineString(final List coordinates) { + this(null, coordinates); + } + + /** + * Construct an instance with the given coordinates and coordinate reference system. + * + * @param coordinateReferenceSystem the coordinate reference system + * @param coordinates the coordinates + */ + public LineString(@Nullable final CoordinateReferenceSystem coordinateReferenceSystem, + final List coordinates) { + super(coordinateReferenceSystem); + notNull("coordinates", coordinates); + isTrueArgument("coordinates must contain at least two positions", coordinates.size() >= 2); + doesNotContainNull("coordinates", coordinates); + + this.coordinates = Collections.unmodifiableList(coordinates); + } + + @Override + public GeoJsonObjectType getType() { + return GeoJsonObjectType.LINE_STRING; + } + + /** + * Gets the GeoJSON coordinates of this LineString. + * + * @return the coordinates + */ + public List getCoordinates() { + return coordinates; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + if (!super.equals(o)) { + return false; + } + + LineString lineString = (LineString) o; + + if (!coordinates.equals(lineString.coordinates)) { + return false; + } + + return true; + } + + @Override + public int hashCode() { + int result = super.hashCode(); + return 31 * result + coordinates.hashCode(); + } + + @Override + public String toString() { + return "LineString{" + + "coordinates=" + coordinates + + ((getCoordinateReferenceSystem() == null) ? "" : ", coordinateReferenceSystem=" + getCoordinateReferenceSystem()) + + '}'; + } +} diff --git a/driver-core/src/main/com/mongodb/client/model/geojson/MultiLineString.java b/driver-core/src/main/com/mongodb/client/model/geojson/MultiLineString.java new file mode 100644 index 00000000000..4c4c1cb0287 --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/geojson/MultiLineString.java @@ -0,0 +1,114 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model.geojson; + +import com.mongodb.lang.Nullable; + +import java.util.Collections; +import java.util.List; + +import static com.mongodb.assertions.Assertions.doesNotContainNull; +import static com.mongodb.assertions.Assertions.notNull; + +/** + * A representation of a GeoJSON MultiLineString. + * + * @since 3.1 + */ +public final class MultiLineString extends Geometry { + + private final List> coordinates; + + /** + * Construct an instance with the given coordinates. + * + * @param coordinates the coordinates of each line + */ + public MultiLineString(final List> coordinates) { + this(null, coordinates); + } + + /** + * Construct an instance with the given coordinates and coordinate reference system. + * + * @param coordinateReferenceSystem the coordinate reference system + * @param coordinates the coordinates of each line + */ + public MultiLineString(@Nullable final CoordinateReferenceSystem coordinateReferenceSystem, final List> coordinates) { + super(coordinateReferenceSystem); + + notNull("coordinates", coordinates); + + for (List line : coordinates) { + notNull("line", line); + doesNotContainNull("line", line); + } + + this.coordinates = Collections.unmodifiableList(coordinates); + } + + @Override + public GeoJsonObjectType getType() { + return GeoJsonObjectType.MULTI_LINE_STRING; + } + + /** + * Gets the GeoJSON coordinates of this MultiLineString + * + * @return the coordinates + */ + public List> getCoordinates() { + return coordinates; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + if (!super.equals(o)) { + return false; + } + + MultiLineString polygon = (MultiLineString) o; + + if (!coordinates.equals(polygon.coordinates)) { + return false; + } + + return true; + } + + @Override + public int hashCode() { + int result = super.hashCode(); + result = 31 * result + coordinates.hashCode(); + return result; + } + + @Override + public String toString() { + return "MultiLineString{" + + "coordinates=" + coordinates + + ((getCoordinateReferenceSystem() == null) ? "" : ", coordinateReferenceSystem=" + getCoordinateReferenceSystem()) + + '}'; + } +} diff --git a/driver-core/src/main/com/mongodb/client/model/geojson/MultiPoint.java b/driver-core/src/main/com/mongodb/client/model/geojson/MultiPoint.java new file mode 100644 index 00000000000..dca5b73c8a9 --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/geojson/MultiPoint.java @@ -0,0 +1,108 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model.geojson; + +import com.mongodb.lang.Nullable; + +import java.util.Collections; +import java.util.List; + +import static com.mongodb.assertions.Assertions.isTrueArgument; +import static com.mongodb.assertions.Assertions.notNull; + +/** + * A representation of a GeoJSON MultiPoint. + * + * @since 3.1 + */ +public final class MultiPoint extends Geometry { + + private final List coordinates; + + /** + * Construct an instance with the given coordinates. + * + * @param coordinates the coordinates + */ + public MultiPoint(final List coordinates) { + this(null, coordinates); + } + + /** + * Construct an instance with the given coordinates and coordinate reference system. + * + * @param coordinateReferenceSystem the coordinate reference system + * @param coordinates the coordinates + */ + public MultiPoint(@Nullable final CoordinateReferenceSystem coordinateReferenceSystem, final List coordinates) { + super(coordinateReferenceSystem); + notNull("coordinates", coordinates); + isTrueArgument("coordinates contains only non-null positions", !coordinates.contains(null)); + + this.coordinates = Collections.unmodifiableList(coordinates); + } + + @Override + public GeoJsonObjectType getType() { + return GeoJsonObjectType.MULTI_POINT; + } + + /** + * Gets the GeoJSON coordinates of this MultiPoint. + * + * @return the coordinates + */ + public List getCoordinates() { + return coordinates; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + if (!super.equals(o)) { + return false; + } + + MultiPoint multiPoint = (MultiPoint) o; + + if (!coordinates.equals(multiPoint.coordinates)) { + return false; + } + + return true; + } + + @Override + public int hashCode() { + int result = super.hashCode(); + return 31 * result + coordinates.hashCode(); + } + + @Override + public String toString() { + return "MultiPoint{" + + "coordinates=" + coordinates + + ((getCoordinateReferenceSystem() == null) ? "" : ", coordinateReferenceSystem=" + getCoordinateReferenceSystem()) + + '}'; + } +} diff --git a/driver-core/src/main/com/mongodb/client/model/geojson/MultiPolygon.java b/driver-core/src/main/com/mongodb/client/model/geojson/MultiPolygon.java new file mode 100644 index 00000000000..75f50141501 --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/geojson/MultiPolygon.java @@ -0,0 +1,107 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model.geojson; + +import com.mongodb.lang.Nullable; + +import java.util.Collections; +import java.util.List; + +import static com.mongodb.assertions.Assertions.doesNotContainNull; +import static com.mongodb.assertions.Assertions.notNull; + +/** + * A representation of a GeoJSON MultiPolygon. + * + * @since 3.1 + */ +public final class MultiPolygon extends Geometry { + + private final List coordinates; + + /** + * Construct an instance. + * + * @param coordinates the coordinates + */ + public MultiPolygon(final List coordinates) { + this(null, coordinates); + } + + /** + * Construct an instance. + * + * @param coordinateReferenceSystem the coordinate reference system + * @param coordinates the coordinates + */ + public MultiPolygon(@Nullable final CoordinateReferenceSystem coordinateReferenceSystem, final List coordinates) { + super(coordinateReferenceSystem); + notNull("coordinates", coordinates); + doesNotContainNull("coordinates", coordinates); + this.coordinates = Collections.unmodifiableList(coordinates); + } + + @Override + public GeoJsonObjectType getType() { + return GeoJsonObjectType.MULTI_POLYGON; + } + + /** + * Gets the coordinates. + * + * @return the coordinates + */ + public List getCoordinates() { + return coordinates; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + if (!super.equals(o)) { + return false; + } + + MultiPolygon that = (MultiPolygon) o; + + if (!coordinates.equals(that.coordinates)) { + return false; + } + + return true; + } + + @Override + public int hashCode() { + int result = super.hashCode(); + result = 31 * result + coordinates.hashCode(); + return result; + } + + @Override + public String toString() { + return "MultiPolygon{" + + "coordinates=" + coordinates + + ((getCoordinateReferenceSystem() == null) ? "" : ", coordinateReferenceSystem=" + getCoordinateReferenceSystem()) + + '}'; + } +} diff --git a/driver-core/src/main/com/mongodb/client/model/geojson/NamedCoordinateReferenceSystem.java b/driver-core/src/main/com/mongodb/client/model/geojson/NamedCoordinateReferenceSystem.java new file mode 100644 index 00000000000..041a409a629 --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/geojson/NamedCoordinateReferenceSystem.java @@ -0,0 +1,106 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model.geojson; + +import com.mongodb.annotations.Immutable; + +import static com.mongodb.assertions.Assertions.notNull; + +/** + * A GeoJSON named Coordinate Reference System. + * + * @since 3.1 + */ +@Immutable +public final class NamedCoordinateReferenceSystem extends CoordinateReferenceSystem { + + /** + * The EPSG:4326 Coordinate Reference System. + */ + public static final NamedCoordinateReferenceSystem EPSG_4326 = + new NamedCoordinateReferenceSystem("EPSG:4326"); + + /** + * The urn:ogc:def:crs:OGC:1.3:CRS84 Coordinate Reference System + */ + public static final NamedCoordinateReferenceSystem CRS_84 = + new NamedCoordinateReferenceSystem("urn:ogc:def:crs:OGC:1.3:CRS84"); + + /** + * A custom MongoDB EPSG:4326 Coordinate Reference System that uses a strict counter-clockwise winding order. + * + * @mongodb.driver.manual reference/operator/query/geometry/ Strict Winding + */ + public static final NamedCoordinateReferenceSystem EPSG_4326_STRICT_WINDING = + new NamedCoordinateReferenceSystem("urn:x-mongodb:crs:strictwinding:EPSG:4326"); + + private final String name; + + /** + * Construct an instance + * + * @param name the name + */ + public NamedCoordinateReferenceSystem(final String name) { + this.name = notNull("name", name); + + } + + @Override + public CoordinateReferenceSystemType getType() { + return CoordinateReferenceSystemType.NAME; + } + + /** + * Gets the name of this Coordinate Reference System. + * + * @return the name + */ + public String getName() { + return name; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + NamedCoordinateReferenceSystem that = (NamedCoordinateReferenceSystem) o; + + if (!name.equals(that.name)) { + return false; + } + + return true; + } + + @Override + public int hashCode() { + return name.hashCode(); + } + + @Override + public String toString() { + return "NamedCoordinateReferenceSystem{" + + "name='" + name + '\'' + + '}'; + } +} diff --git a/driver-core/src/main/com/mongodb/client/model/geojson/Point.java b/driver-core/src/main/com/mongodb/client/model/geojson/Point.java new file mode 100644 index 00000000000..8d3e196d18a --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/geojson/Point.java @@ -0,0 +1,109 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model.geojson; + +import com.mongodb.lang.Nullable; + +import static com.mongodb.assertions.Assertions.notNull; + +/** + * A representation of a GeoJSON Point. + * + * @since 3.1 + */ +public final class Point extends Geometry { + private final Position coordinate; + + /** + * Construct an instance with the given coordinate. + * + * @param coordinate the non-null coordinate of the point + */ + public Point(final Position coordinate) { + this(null, coordinate); + } + + /** + * Construct an instance with the given coordinate and coordinate reference system. + * + * @param coordinateReferenceSystem the coordinate reference system + * @param coordinate the non-null coordinate of the point + */ + public Point(@Nullable final CoordinateReferenceSystem coordinateReferenceSystem, final Position coordinate) { + super(coordinateReferenceSystem); + this.coordinate = notNull("coordinates", coordinate); + } + + @Override + public GeoJsonObjectType getType() { + return GeoJsonObjectType.POINT; + } + + /** + * Gets the GeoJSON coordinates of this point. + * + * @return the coordinates + */ + public Position getCoordinates() { + return coordinate; + } + + /** + * Gets the position of this point. + * + * @return the position + */ + public Position getPosition(){ + return coordinate; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + if (!super.equals(o)) { + return false; + } + + Point point = (Point) o; + + if (!coordinate.equals(point.coordinate)) { + return false; + } + + return true; + } + + @Override + public int hashCode() { + int result = super.hashCode(); + return 31 * result + coordinate.hashCode(); + } + + @Override + public String toString() { + return "Point{" + + "coordinate=" + coordinate + + ((getCoordinateReferenceSystem() == null) ? "" : ", coordinateReferenceSystem=" + getCoordinateReferenceSystem()) + + '}'; + } +} diff --git a/driver-core/src/main/com/mongodb/client/model/geojson/Polygon.java b/driver-core/src/main/com/mongodb/client/model/geojson/Polygon.java new file mode 100644 index 00000000000..58b48a64168 --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/geojson/Polygon.java @@ -0,0 +1,145 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model.geojson; + +import com.mongodb.lang.Nullable; + +import java.util.List; + +import static com.mongodb.assertions.Assertions.notNull; + +/** + * A representation of a GeoJSON Polygon. + * + * @since 3.1 + */ +public final class Polygon extends Geometry { + + private final PolygonCoordinates coordinates; + + /** + * Construct an instance with the given coordinates. + * + * @param exterior the exterior ring of the polygon + * @param holes optional interior rings of the polygon + */ + @SafeVarargs + public Polygon(final List exterior, final List... holes) { + this(new PolygonCoordinates(exterior, holes)); + } + + /** + * Construct an instance with the given coordinates. + * + * @param exterior the exterior ring of the polygon + * @param holes optional interior rings of the polygon + * @since 4.3 + */ + public Polygon(final List exterior, final List> holes) { + this(new PolygonCoordinates(exterior, holes)); + } + + /** + * Construct an instance with the given coordinates. + * + * @param coordinates the coordinates + */ + public Polygon(final PolygonCoordinates coordinates) { + this(null, coordinates); + } + + /** + * Construct an instance with the given coordinates and coordinate reference system. + * + * @param coordinateReferenceSystem the coordinate reference system + * @param coordinates the coordinates + */ + public Polygon(@Nullable final CoordinateReferenceSystem coordinateReferenceSystem, final PolygonCoordinates coordinates) { + super(coordinateReferenceSystem); + this.coordinates = notNull("coordinates", coordinates); + } + + @Override + public GeoJsonObjectType getType() { + return GeoJsonObjectType.POLYGON; + } + + /** + * Gets the GeoJSON coordinates of the polygon + * + * @return the coordinates, which must have at least one element + */ + public PolygonCoordinates getCoordinates() { + return coordinates; + } + + /** + * Gets the exterior coordinates. + * + * @return the exterior coordinates + */ + public List getExterior() { + return coordinates.getExterior(); + } + + /** + * Get the holes in this polygon. + * + * @return the possibly-empty list of holes + */ + public List> getHoles() { + return coordinates.getHoles(); + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + if (!super.equals(o)) { + return false; + } + + Polygon polygon = (Polygon) o; + + if (!coordinates.equals(polygon.coordinates)) { + return false; + } + + return true; + } + + @Override + public int hashCode() { + int result = super.hashCode(); + result = 31 * result + coordinates.hashCode(); + return result; + } + + @Override + public String toString() { + return "Polygon{" + + "exterior=" + coordinates.getExterior() + + (coordinates.getHoles().isEmpty() ? "" : ", holes=" + coordinates.getHoles()) + + ((getCoordinateReferenceSystem() == null) ? "" : ", coordinateReferenceSystem=" + getCoordinateReferenceSystem()) + + '}'; + } +} diff --git a/driver-core/src/main/com/mongodb/client/model/geojson/PolygonCoordinates.java b/driver-core/src/main/com/mongodb/client/model/geojson/PolygonCoordinates.java new file mode 100644 index 00000000000..d032d25761c --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/geojson/PolygonCoordinates.java @@ -0,0 +1,129 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model.geojson; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; + +import static com.mongodb.assertions.Assertions.doesNotContainNull; +import static com.mongodb.assertions.Assertions.isTrueArgument; +import static com.mongodb.assertions.Assertions.notNull; + +/** + * Coordinates for a GeoJSON Polygon. + * + * @since 3.1 + */ +public final class PolygonCoordinates { + private final List exterior; + private final List> holes; + + /** + * Construct an instance. + * + * @param exterior the exterior ring of the polygon + * @param holes optional interior rings of the polygon + */ + @SafeVarargs + @SuppressWarnings("varargs") + public PolygonCoordinates(final List exterior, final List... holes) { + this(exterior, Arrays.asList(holes)); + } + + /** + * Construct an instance. + * + * @param exterior the exterior ring of the polygon + * @param holes optional interior rings of the polygon + * @since 4.3 + */ + public PolygonCoordinates(final List exterior, final List> holes) { + notNull("exteriorRing", exterior); + doesNotContainNull("exterior", exterior); + isTrueArgument("ring must contain at least four positions", exterior.size() >= 4); + isTrueArgument("first and last position must be the same", exterior.get(0).equals(exterior.get(exterior.size() - 1))); + + this.exterior = Collections.unmodifiableList(exterior); + + List> holesList = new ArrayList<>(holes.size()); + for (List hole : holes) { + notNull("interiorRing", hole); + doesNotContainNull("hole", hole); + isTrueArgument("ring must contain at least four positions", hole.size() >= 4); + isTrueArgument("first and last position must be the same", hole.get(0).equals(hole.get(hole.size() - 1))); + holesList.add(Collections.unmodifiableList(hole)); + } + + this.holes = Collections.unmodifiableList(holesList); + } + + /** + * Gets the exterior of the polygon. + * + * @return the exterior of the polygon + */ + public List getExterior() { + return exterior; + } + + /** + * Gets the holes in the polygon. + * + * @return the holes in the polygon, which will not be null but may be empty + */ + public List> getHoles() { + return holes; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + PolygonCoordinates that = (PolygonCoordinates) o; + + if (!exterior.equals(that.exterior)) { + return false; + } + if (!holes.equals(that.holes)) { + return false; + } + + return true; + } + + @Override + public int hashCode() { + int result = exterior.hashCode(); + result = 31 * result + holes.hashCode(); + return result; + } + + @Override + public String toString() { + return "PolygonCoordinates{" + + "exterior=" + exterior + + (holes.isEmpty() ? "" : ", holes=" + holes) + + '}'; + } +} diff --git a/driver-core/src/main/com/mongodb/client/model/geojson/Position.java b/driver-core/src/main/com/mongodb/client/model/geojson/Position.java new file mode 100644 index 00000000000..f7ee469674d --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/geojson/Position.java @@ -0,0 +1,104 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model.geojson; + +import com.mongodb.annotations.Immutable; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; + +import static com.mongodb.assertions.Assertions.doesNotContainNull; +import static com.mongodb.assertions.Assertions.isTrueArgument; +import static com.mongodb.assertions.Assertions.notNull; + +/** + * A representation of a GeoJSON Position. + * + * @since 3.1 + */ +@Immutable +public final class Position { + private final List values; + + /** + * Construct an instance. + * + * @param values the non-null values + */ + public Position(final List values) { + notNull("values", values); + doesNotContainNull("values", values); + isTrueArgument("value must contain at least two elements", values.size() >= 2); + this.values = Collections.unmodifiableList(values); + } + + /** + * Construct an instance. + * + * @param first the first value + * @param second the second value + * @param remaining the remaining values + */ + public Position(final double first, final double second, final double... remaining) { + List values = new ArrayList<>(); + values.add(first); + values.add(second); + for (double cur : remaining) { + values.add(cur); + } + this.values = Collections.unmodifiableList(values); + } + + /** + * Gets the values of this position + * @return the values of the position + */ + public List getValues() { + return values; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + Position that = (Position) o; + + if (!values.equals(that.values)) { + return false; + } + + return true; + } + + @Override + public int hashCode() { + return values.hashCode(); + } + + @Override + public String toString() { + return "Position{" + + "values=" + values + + '}'; + } +} diff --git a/driver-core/src/main/com/mongodb/client/model/geojson/codecs/AbstractGeometryCodec.java b/driver-core/src/main/com/mongodb/client/model/geojson/codecs/AbstractGeometryCodec.java new file mode 100644 index 00000000000..553393e2776 --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/geojson/codecs/AbstractGeometryCodec.java @@ -0,0 +1,53 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model.geojson.codecs; + +import com.mongodb.client.model.geojson.Geometry; +import org.bson.BsonReader; +import org.bson.BsonWriter; +import org.bson.codecs.Codec; +import org.bson.codecs.DecoderContext; +import org.bson.codecs.EncoderContext; +import org.bson.codecs.configuration.CodecRegistry; + +import static com.mongodb.client.model.geojson.codecs.GeometryDecoderHelper.decodeGeometry; +import static com.mongodb.client.model.geojson.codecs.GeometryEncoderHelper.encodeGeometry; + +abstract class AbstractGeometryCodec implements Codec { + private final CodecRegistry registry; + private final Class encoderClass; + + AbstractGeometryCodec(final CodecRegistry registry, final Class encoderClass) { + this.registry = registry; + this.encoderClass = encoderClass; + } + + @Override + public void encode(final BsonWriter writer, final T value, final EncoderContext encoderContext) { + encodeGeometry(writer, value, encoderContext, registry); + } + + @Override + public T decode(final BsonReader reader, final DecoderContext decoderContext) { + return decodeGeometry(reader, getEncoderClass()); + } + + @Override + public Class getEncoderClass() { + return encoderClass; + } +} diff --git a/driver-core/src/main/com/mongodb/client/model/geojson/codecs/GeoJsonCodecProvider.java b/driver-core/src/main/com/mongodb/client/model/geojson/codecs/GeoJsonCodecProvider.java new file mode 100644 index 00000000000..1c993a86ceb --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/geojson/codecs/GeoJsonCodecProvider.java @@ -0,0 +1,68 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model.geojson.codecs; + +import com.mongodb.client.model.geojson.Geometry; +import com.mongodb.client.model.geojson.GeometryCollection; +import com.mongodb.client.model.geojson.LineString; +import com.mongodb.client.model.geojson.MultiLineString; +import com.mongodb.client.model.geojson.MultiPoint; +import com.mongodb.client.model.geojson.MultiPolygon; +import com.mongodb.client.model.geojson.NamedCoordinateReferenceSystem; +import com.mongodb.client.model.geojson.Point; +import com.mongodb.client.model.geojson.Polygon; +import org.bson.codecs.Codec; +import org.bson.codecs.configuration.CodecProvider; +import org.bson.codecs.configuration.CodecRegistry; + +/** + * A provider of codecs for GeoJSON objects. + * + * @since 3.1 + */ +public class GeoJsonCodecProvider implements CodecProvider { + @Override + @SuppressWarnings("unchecked") + public Codec get(final Class clazz, final CodecRegistry registry) { + if (clazz.equals(Polygon.class)) { + return (Codec) new PolygonCodec(registry); + } else if (clazz.equals(Point.class)) { + return (Codec) new PointCodec(registry); + } else if (clazz.equals(LineString.class)) { + return (Codec) new LineStringCodec(registry); + } else if (clazz.equals(MultiPoint.class)) { + return (Codec) new MultiPointCodec(registry); + } else if (clazz.equals(MultiLineString.class)) { + return (Codec) new MultiLineStringCodec(registry); + } else if (clazz.equals(MultiPolygon.class)) { + return (Codec) new MultiPolygonCodec(registry); + } else if (clazz.equals(GeometryCollection.class)) { + return (Codec) new GeometryCollectionCodec(registry); + } else if (clazz.equals(NamedCoordinateReferenceSystem.class)) { + return (Codec) new NamedCoordinateReferenceSystemCodec(); + } else if (clazz.equals(Geometry.class)) { + return (Codec) new GeometryCodec(registry); + } + + return null; + } + + @Override + public String toString() { + return "GeoJsonCodecProvider{}"; + } +} diff --git a/driver-core/src/main/com/mongodb/client/model/geojson/codecs/GeometryCodec.java b/driver-core/src/main/com/mongodb/client/model/geojson/codecs/GeometryCodec.java new file mode 100644 index 00000000000..b2808bcad63 --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/geojson/codecs/GeometryCodec.java @@ -0,0 +1,37 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model.geojson.codecs; + +import com.mongodb.client.model.geojson.Geometry; +import org.bson.codecs.configuration.CodecRegistry; + +/** + * A Codec for a GeoJSON Geometry. + * + * @since 3.5 + */ +public final class GeometryCodec extends AbstractGeometryCodec { + + /** + * Construct a new instance + * + * @param registry the CodecRegistry + */ + public GeometryCodec(final CodecRegistry registry) { + super(registry, Geometry.class); + } +} diff --git a/driver-core/src/main/com/mongodb/client/model/geojson/codecs/GeometryCollectionCodec.java b/driver-core/src/main/com/mongodb/client/model/geojson/codecs/GeometryCollectionCodec.java new file mode 100644 index 00000000000..4ad4fa54aba --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/geojson/codecs/GeometryCollectionCodec.java @@ -0,0 +1,37 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model.geojson.codecs; + +import com.mongodb.client.model.geojson.GeometryCollection; +import org.bson.codecs.configuration.CodecRegistry; + +/** + * A Codec for a GeoJSON GeometryCollection. + * + * @since 3.1 + */ +public class GeometryCollectionCodec extends AbstractGeometryCodec { + + /** + * Constructs an instance. + * + * @param registry the registry + */ + public GeometryCollectionCodec(final CodecRegistry registry) { + super(registry, GeometryCollection.class); + } +} diff --git a/driver-core/src/main/com/mongodb/client/model/geojson/codecs/GeometryDecoderHelper.java b/driver-core/src/main/com/mongodb/client/model/geojson/codecs/GeometryDecoderHelper.java new file mode 100644 index 00000000000..e68245b058e --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/geojson/codecs/GeometryDecoderHelper.java @@ -0,0 +1,480 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model.geojson.codecs; + +import com.mongodb.client.model.geojson.CoordinateReferenceSystem; +import com.mongodb.client.model.geojson.Geometry; +import com.mongodb.client.model.geojson.GeometryCollection; +import com.mongodb.client.model.geojson.LineString; +import com.mongodb.client.model.geojson.MultiLineString; +import com.mongodb.client.model.geojson.MultiPoint; +import com.mongodb.client.model.geojson.MultiPolygon; +import com.mongodb.client.model.geojson.NamedCoordinateReferenceSystem; +import com.mongodb.client.model.geojson.Point; +import com.mongodb.client.model.geojson.Polygon; +import com.mongodb.client.model.geojson.PolygonCoordinates; +import com.mongodb.client.model.geojson.Position; +import com.mongodb.lang.Nullable; +import org.bson.BsonReader; +import org.bson.BsonReaderMark; +import org.bson.BsonType; +import org.bson.codecs.configuration.CodecConfigurationException; + +import java.util.ArrayList; +import java.util.List; + +import static java.lang.String.format; + +final class GeometryDecoderHelper { + + @SuppressWarnings("unchecked") + static T decodeGeometry(final BsonReader reader, final Class clazz) { + if (clazz.equals(Point.class)) { + return (T) decodePoint(reader); + } else if (clazz.equals(MultiPoint.class)) { + return (T) decodeMultiPoint(reader); + } else if (clazz.equals(Polygon.class)) { + return (T) decodePolygon(reader); + } else if (clazz.equals(MultiPolygon.class)) { + return (T) decodeMultiPolygon(reader); + } else if (clazz.equals(LineString.class)) { + return (T) decodeLineString(reader); + } else if (clazz.equals(MultiLineString.class)) { + return (T) decodeMultiLineString(reader); + } else if (clazz.equals(GeometryCollection.class)) { + return (T) decodeGeometryCollection(reader); + } else if (clazz.equals(Geometry.class)) { + return (T) decodeGeometry(reader); + } + + throw new CodecConfigurationException(format("Unsupported Geometry: %s", clazz)); + } + + private static Point decodePoint(final BsonReader reader) { + String type = null; + Position position = null; + CoordinateReferenceSystem crs = null; + reader.readStartDocument(); + while (reader.readBsonType() != BsonType.END_OF_DOCUMENT) { + String key = reader.readName(); + if (key.equals("type")) { + type = reader.readString(); + } else if (key.equals("coordinates")) { + position = decodePosition(reader); + } else if (key.equals("crs")) { + crs = decodeCoordinateReferenceSystem(reader); + } else { + throw new CodecConfigurationException(format("Unexpected key '%s' found when decoding a GeoJSON point", key)); + } + } + reader.readEndDocument(); + + if (type == null) { + throw new CodecConfigurationException("Invalid Point, document contained no type information."); + } else if (!type.equals("Point")) { + throw new CodecConfigurationException(format("Invalid Point, found type '%s'.", type)); + } else if (position == null) { + throw new CodecConfigurationException("Invalid Point, missing position coordinates."); + } + return crs != null ? new Point(crs, position) : new Point(position); + } + + private static MultiPoint decodeMultiPoint(final BsonReader reader) { + String type = null; + List coordinates = null; + CoordinateReferenceSystem crs = null; + reader.readStartDocument(); + while (reader.readBsonType() != BsonType.END_OF_DOCUMENT) { + String key = reader.readName(); + if (key.equals("type")) { + type = reader.readString(); + } else if (key.equals("coordinates")) { + coordinates = decodeCoordinates(reader); + } else if (key.equals("crs")) { + crs = decodeCoordinateReferenceSystem(reader); + } else { + throw new CodecConfigurationException(format("Unexpected key '%s' found when decoding a GeoJSON point", key)); + } + } + reader.readEndDocument(); + + if (type == null) { + throw new CodecConfigurationException("Invalid MultiPoint, document contained no type information."); + } else if (!type.equals("MultiPoint")) { + throw new CodecConfigurationException(format("Invalid MultiPoint, found type '%s'.", type)); + } else if (coordinates == null) { + throw new CodecConfigurationException("Invalid MultiPoint, missing position coordinates."); + } + return crs != null ? new MultiPoint(crs, coordinates) : new MultiPoint(coordinates); + } + + private static Polygon decodePolygon(final BsonReader reader) { + String type = null; + PolygonCoordinates coordinates = null; + CoordinateReferenceSystem crs = null; + + reader.readStartDocument(); + while (reader.readBsonType() != BsonType.END_OF_DOCUMENT) { + String key = reader.readName(); + if (key.equals("type")) { + type = reader.readString(); + } else if (key.equals("coordinates")) { + coordinates = decodePolygonCoordinates(reader); + } else if (key.equals("crs")) { + crs = decodeCoordinateReferenceSystem(reader); + } else { + throw new CodecConfigurationException(format("Unexpected key '%s' found when decoding a GeoJSON Polygon", key)); + } + } + reader.readEndDocument(); + + if (type == null) { + throw new CodecConfigurationException("Invalid Polygon, document contained no type information."); + } else if (!type.equals("Polygon")) { + throw new CodecConfigurationException(format("Invalid Polygon, found type '%s'.", type)); + } else if (coordinates == null) { + throw new CodecConfigurationException("Invalid Polygon, missing coordinates."); + } + return crs != null ? new Polygon(crs, coordinates) : new Polygon(coordinates); + } + + private static MultiPolygon decodeMultiPolygon(final BsonReader reader) { + String type = null; + List coordinates = null; + CoordinateReferenceSystem crs = null; + + reader.readStartDocument(); + while (reader.readBsonType() != BsonType.END_OF_DOCUMENT) { + String key = reader.readName(); + if (key.equals("type")) { + type = reader.readString(); + } else if (key.equals("coordinates")) { + coordinates = decodeMultiPolygonCoordinates(reader); + } else if (key.equals("crs")) { + crs = decodeCoordinateReferenceSystem(reader); + } else { + throw new CodecConfigurationException(format("Unexpected key '%s' found when decoding a GeoJSON Polygon", key)); + } + } + reader.readEndDocument(); + + if (type == null) { + throw new CodecConfigurationException("Invalid MultiPolygon, document contained no type information."); + } else if (!type.equals("MultiPolygon")) { + throw new CodecConfigurationException(format("Invalid MultiPolygon, found type '%s'.", type)); + } else if (coordinates == null) { + throw new CodecConfigurationException("Invalid MultiPolygon, missing coordinates."); + } + return crs != null ? new MultiPolygon(crs, coordinates) : new MultiPolygon(coordinates); + } + + private static LineString decodeLineString(final BsonReader reader) { + String type = null; + List coordinates = null; + CoordinateReferenceSystem crs = null; + + reader.readStartDocument(); + while (reader.readBsonType() != BsonType.END_OF_DOCUMENT) { + String key = reader.readName(); + if (key.equals("type")) { + type = reader.readString(); + } else if (key.equals("coordinates")) { + coordinates = decodeCoordinates(reader); + } else if (key.equals("crs")) { + crs = decodeCoordinateReferenceSystem(reader); + } else { + throw new CodecConfigurationException(format("Unexpected key '%s' found when decoding a GeoJSON Polygon", key)); + } + } + reader.readEndDocument(); + + if (type == null) { + throw new CodecConfigurationException("Invalid LineString, document contained no type information."); + } else if (!type.equals("LineString")) { + throw new CodecConfigurationException(format("Invalid LineString, found type '%s'.", type)); + } else if (coordinates == null) { + throw new CodecConfigurationException("Invalid LineString, missing coordinates."); + } + return crs != null ? new LineString(crs, coordinates) : new LineString(coordinates); + } + + private static MultiLineString decodeMultiLineString(final BsonReader reader) { + String type = null; + List> coordinates = null; + CoordinateReferenceSystem crs = null; + + reader.readStartDocument(); + while (reader.readBsonType() != BsonType.END_OF_DOCUMENT) { + String key = reader.readName(); + if (key.equals("type")) { + type = reader.readString(); + } else if (key.equals("coordinates")) { + coordinates = decodeMultiCoordinates(reader); + } else if (key.equals("crs")) { + crs = decodeCoordinateReferenceSystem(reader); + } else { + throw new CodecConfigurationException(format("Unexpected key '%s' found when decoding a GeoJSON Polygon", key)); + } + } + reader.readEndDocument(); + + if (type == null) { + throw new CodecConfigurationException("Invalid MultiLineString, document contained no type information."); + } else if (!type.equals("MultiLineString")) { + throw new CodecConfigurationException(format("Invalid MultiLineString, found type '%s'.", type)); + } else if (coordinates == null) { + throw new CodecConfigurationException("Invalid MultiLineString, missing coordinates."); + } + return crs != null ? new MultiLineString(crs, coordinates) : new MultiLineString(coordinates); + } + + private static GeometryCollection decodeGeometryCollection(final BsonReader reader) { + String type = null; + List geometries = null; + CoordinateReferenceSystem crs = null; + + reader.readStartDocument(); + while (reader.readBsonType() != BsonType.END_OF_DOCUMENT) { + String key = reader.readName(); + if (key.equals("type")) { + type = reader.readString(); + } else if (key.equals("geometries")) { + geometries = decodeGeometries(reader); + } else if (key.equals("crs")) { + crs = decodeCoordinateReferenceSystem(reader); + } else { + throw new CodecConfigurationException(format("Unexpected key '%s' found when decoding a GeoJSON Polygon", key)); + } + } + reader.readEndDocument(); + + if (type == null) { + throw new CodecConfigurationException("Invalid GeometryCollection, document contained no type information."); + } else if (!type.equals("GeometryCollection")) { + throw new CodecConfigurationException(format("Invalid GeometryCollection, found type '%s'.", type)); + } else if (geometries == null) { + throw new CodecConfigurationException("Invalid GeometryCollection, missing geometries."); + } + return crs != null ? new GeometryCollection(crs, geometries) : new GeometryCollection(geometries); + } + + private static List decodeGeometries(final BsonReader reader) { + validateIsArray(reader); + reader.readStartArray(); + List values = new ArrayList<>(); + while (reader.readBsonType() != BsonType.END_OF_DOCUMENT) { + Geometry geometry = decodeGeometry(reader); + values.add(geometry); + } + reader.readEndArray(); + + + return values; + } + + private static Geometry decodeGeometry(final BsonReader reader) { + String type = null; + BsonReaderMark mark = reader.getMark(); + validateIsDocument(reader); + reader.readStartDocument(); + while (reader.readBsonType() != BsonType.END_OF_DOCUMENT) { + String key = reader.readName(); + if (key.equals("type")) { + type = reader.readString(); + break; + } else { + reader.skipValue(); + } + } + mark.reset(); + + if (type == null) { + throw new CodecConfigurationException("Invalid Geometry item, document contained no type information."); + } + Geometry geometry = null; + if (type.equals("Point")) { + geometry = decodePoint(reader); + } else if (type.equals("MultiPoint")) { + geometry = decodeMultiPoint(reader); + } else if (type.equals("Polygon")) { + geometry = decodePolygon(reader); + } else if (type.equals("MultiPolygon")) { + geometry = decodeMultiPolygon(reader); + } else if (type.equals("LineString")) { + geometry = decodeLineString(reader); + } else if (type.equals("MultiLineString")) { + geometry = decodeMultiLineString(reader); + } else if (type.equals("GeometryCollection")) { + geometry = decodeGeometryCollection(reader); + } else { + throw new CodecConfigurationException(format("Invalid Geometry item, found type '%s'.", type)); + } + return geometry; + } + + @SuppressWarnings({"unchecked", "rawtypes"}) + private static PolygonCoordinates decodePolygonCoordinates(final BsonReader reader) { + validateIsArray(reader); + reader.readStartArray(); + List> values = new ArrayList<>(); + while (reader.readBsonType() != BsonType.END_OF_DOCUMENT) { + values.add(decodeCoordinates(reader)); + } + reader.readEndArray(); + + if (values.isEmpty()) { + throw new CodecConfigurationException("Invalid Polygon no coordinates."); + } + + List exterior = values.remove(0); + ArrayList[] holes = values.toArray(new ArrayList[values.size()]); + + try { + return new PolygonCoordinates(exterior, holes); + } catch (IllegalArgumentException e) { + throw new CodecConfigurationException(format("Invalid Polygon: %s", e.getMessage())); + } + } + + private static List decodeMultiPolygonCoordinates(final BsonReader reader) { + validateIsArray(reader); + reader.readStartArray(); + List values = new ArrayList<>(); + while (reader.readBsonType() != BsonType.END_OF_DOCUMENT) { + values.add(decodePolygonCoordinates(reader)); + } + reader.readEndArray(); + + if (values.isEmpty()) { + throw new CodecConfigurationException("Invalid MultiPolygon no coordinates."); + } + return values; + } + + private static List decodeCoordinates(final BsonReader reader) { + validateIsArray(reader); + reader.readStartArray(); + List values = new ArrayList<>(); + while (reader.readBsonType() != BsonType.END_OF_DOCUMENT) { + values.add(decodePosition(reader)); + } + reader.readEndArray(); + return values; + } + + private static List> decodeMultiCoordinates(final BsonReader reader) { + validateIsArray(reader); + reader.readStartArray(); + List> values = new ArrayList<>(); + while (reader.readBsonType() != BsonType.END_OF_DOCUMENT) { + values.add(decodeCoordinates(reader)); + } + reader.readEndArray(); + return values; + } + + private static Position decodePosition(final BsonReader reader) { + validateIsArray(reader); + reader.readStartArray(); + List values = new ArrayList<>(); + while (reader.readBsonType() != BsonType.END_OF_DOCUMENT) { + values.add(readAsDouble(reader)); + } + reader.readEndArray(); + + try { + return new Position(values); + } catch (IllegalArgumentException e) { + throw new CodecConfigurationException(format("Invalid Position: %s", e.getMessage())); + } + } + + private static double readAsDouble(final BsonReader reader) { + if (reader.getCurrentBsonType() == BsonType.DOUBLE) { + return reader.readDouble(); + } else if (reader.getCurrentBsonType() == BsonType.INT32) { + return reader.readInt32(); + } else if (reader.getCurrentBsonType() == BsonType.INT64) { + return reader.readInt64(); + } + + throw new CodecConfigurationException("A GeoJSON position value must be a numerical type, but the value is of type " + + reader.getCurrentBsonType()); + } + + @Nullable + static CoordinateReferenceSystem decodeCoordinateReferenceSystem(final BsonReader reader) { + String crsName = null; + validateIsDocument(reader); + reader.readStartDocument(); + while (reader.readBsonType() != BsonType.END_OF_DOCUMENT) { + String name = reader.readName(); + if (name.equals("type")) { + String type = reader.readString(); + if (!type.equals("name")) { + throw new CodecConfigurationException(format("Unsupported CoordinateReferenceSystem '%s'.", type)); + } + } else if (name.equals("properties")) { + crsName = decodeCoordinateReferenceSystemProperties(reader); + } else { + throw new CodecConfigurationException(format("Found invalid key '%s' in the CoordinateReferenceSystem.", name)); + } + } + reader.readEndDocument(); + + return crsName != null ? new NamedCoordinateReferenceSystem(crsName) : null; + } + + private static String decodeCoordinateReferenceSystemProperties(final BsonReader reader) { + String crsName = null; + validateIsDocument(reader); + reader.readStartDocument(); + while (reader.readBsonType() != BsonType.END_OF_DOCUMENT) { + String name = reader.readName(); + if (name.equals("name")) { + crsName = reader.readString(); + } else { + throw new CodecConfigurationException(format("Found invalid key '%s' in the CoordinateReferenceSystem.", name)); + } + } + reader.readEndDocument(); + + if (crsName == null) { + throw new CodecConfigurationException("Found invalid properties in the CoordinateReferenceSystem."); + } + return crsName; + } + + private static void validateIsDocument(final BsonReader reader) { + BsonType currentType = reader.getCurrentBsonType(); + if (currentType == null) { + currentType = reader.readBsonType(); + } + if (!currentType.equals(BsonType.DOCUMENT)) { + throw new CodecConfigurationException("Invalid BsonType expecting a Document"); + } + } + + private static void validateIsArray(final BsonReader reader) { + if (reader.getCurrentBsonType() != BsonType.ARRAY) { + throw new CodecConfigurationException("Invalid BsonType expecting an Array"); + } + } + + private GeometryDecoderHelper() { + } +} diff --git a/driver-core/src/main/com/mongodb/client/model/geojson/codecs/GeometryEncoderHelper.java b/driver-core/src/main/com/mongodb/client/model/geojson/codecs/GeometryEncoderHelper.java new file mode 100644 index 00000000000..55e6a595a97 --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/geojson/codecs/GeometryEncoderHelper.java @@ -0,0 +1,168 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model.geojson.codecs; + +import com.mongodb.client.model.geojson.CoordinateReferenceSystem; +import com.mongodb.client.model.geojson.Geometry; +import com.mongodb.client.model.geojson.GeometryCollection; +import com.mongodb.client.model.geojson.LineString; +import com.mongodb.client.model.geojson.MultiLineString; +import com.mongodb.client.model.geojson.MultiPoint; +import com.mongodb.client.model.geojson.MultiPolygon; +import com.mongodb.client.model.geojson.Point; +import com.mongodb.client.model.geojson.Polygon; +import com.mongodb.client.model.geojson.PolygonCoordinates; +import com.mongodb.client.model.geojson.Position; +import org.bson.BsonWriter; +import org.bson.codecs.Codec; +import org.bson.codecs.EncoderContext; +import org.bson.codecs.configuration.CodecConfigurationException; +import org.bson.codecs.configuration.CodecRegistry; + +import java.util.List; + +import static java.lang.String.format; + +final class GeometryEncoderHelper { + + + static void encodeGeometry(final BsonWriter writer, final Geometry value, final EncoderContext encoderContext, + final CodecRegistry registry) { + + writer.writeStartDocument(); + writer.writeString("type", value.getType().getTypeName()); + + if (value instanceof GeometryCollection) { + writer.writeName("geometries"); + encodeGeometryCollection(writer, (GeometryCollection) value, encoderContext, registry); + } else { + writer.writeName("coordinates"); + if (value instanceof Point) { + encodePoint(writer, (Point) value); + } else if (value instanceof MultiPoint) { + encodeMultiPoint(writer, (MultiPoint) value); + } else if (value instanceof Polygon) { + encodePolygon(writer, (Polygon) value); + } else if (value instanceof MultiPolygon) { + encodeMultiPolygon(writer, (MultiPolygon) value); + } else if (value instanceof LineString) { + encodeLineString(writer, (LineString) value); + } else if (value instanceof MultiLineString) { + encodeMultiLineString(writer, (MultiLineString) value); + } else { + throw new CodecConfigurationException(format("Unsupported Geometry: %s", value)); + } + } + + encodeCoordinateReferenceSystem(writer, value, encoderContext, registry); + writer.writeEndDocument(); + } + + private static void encodePoint(final BsonWriter writer, final Point value) { + encodePosition(writer, value.getPosition()); + } + + private static void encodeMultiPoint(final BsonWriter writer, final MultiPoint value) { + writer.writeStartArray(); + for (Position position : value.getCoordinates()) { + encodePosition(writer, position); + } + writer.writeEndArray(); + } + + private static void encodePolygon(final BsonWriter writer, final Polygon value) { + encodePolygonCoordinates(writer, value.getCoordinates()); + } + + private static void encodeMultiPolygon(final BsonWriter writer, final MultiPolygon value) { + writer.writeStartArray(); + for (PolygonCoordinates polygonCoordinates : value.getCoordinates()) { + encodePolygonCoordinates(writer, polygonCoordinates); + } + writer.writeEndArray(); + } + + private static void encodeLineString(final BsonWriter writer, final LineString value) { + writer.writeStartArray(); + for (Position position : value.getCoordinates()) { + encodePosition(writer, position); + } + writer.writeEndArray(); + } + + private static void encodeMultiLineString(final BsonWriter writer, final MultiLineString value) { + writer.writeStartArray(); + for (List ring : value.getCoordinates()) { + writer.writeStartArray(); + for (Position position : ring) { + encodePosition(writer, position); + } + writer.writeEndArray(); + } + writer.writeEndArray(); + } + + private static void encodeGeometryCollection(final BsonWriter writer, final GeometryCollection value, + final EncoderContext encoderContext, final CodecRegistry registry) { + writer.writeStartArray(); + for (Geometry geometry : value.getGeometries()) { + encodeGeometry(writer, geometry, encoderContext, registry); + } + writer.writeEndArray(); + } + + @SuppressWarnings({"unchecked", "rawtypes"}) + static void encodeCoordinateReferenceSystem(final BsonWriter writer, final Geometry geometry, + final EncoderContext encoderContext, final CodecRegistry registry) { + CoordinateReferenceSystem coordinateReferenceSystem = geometry.getCoordinateReferenceSystem(); + if (coordinateReferenceSystem != null) { + writer.writeName("crs"); + Codec codec = registry.get(coordinateReferenceSystem.getClass()); + encoderContext.encodeWithChildContext(codec, writer, coordinateReferenceSystem); + } + } + + static void encodePolygonCoordinates(final BsonWriter writer, final PolygonCoordinates polygonCoordinates) { + writer.writeStartArray(); + encodeLinearRing(polygonCoordinates.getExterior(), writer); + for (List ring : polygonCoordinates.getHoles()) { + encodeLinearRing(ring, writer); + } + writer.writeEndArray(); + } + + private static void encodeLinearRing(final List ring, final BsonWriter writer) { + writer.writeStartArray(); + for (Position position : ring) { + encodePosition(writer, position); + } + writer.writeEndArray(); + } + + static void encodePosition(final BsonWriter writer, final Position value) { + writer.writeStartArray(); + + for (double number : value.getValues()) { + writer.writeDouble(number); + } + + writer.writeEndArray(); + } + + private GeometryEncoderHelper() { + } +} diff --git a/driver-core/src/main/com/mongodb/client/model/geojson/codecs/LineStringCodec.java b/driver-core/src/main/com/mongodb/client/model/geojson/codecs/LineStringCodec.java new file mode 100644 index 00000000000..e7a9dbb5ae6 --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/geojson/codecs/LineStringCodec.java @@ -0,0 +1,36 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model.geojson.codecs; + +import com.mongodb.client.model.geojson.LineString; +import org.bson.codecs.configuration.CodecRegistry; + +/** + * A Codec for a GeoJSON LineString. + * + * @since 3.1 + */ +public class LineStringCodec extends AbstractGeometryCodec { + + /** + * Construct a new instance + * @param registry the CodecRegistry + */ + public LineStringCodec(final CodecRegistry registry) { + super(registry, LineString.class); + } +} diff --git a/driver-core/src/main/com/mongodb/client/model/geojson/codecs/MultiLineStringCodec.java b/driver-core/src/main/com/mongodb/client/model/geojson/codecs/MultiLineStringCodec.java new file mode 100644 index 00000000000..140783a9410 --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/geojson/codecs/MultiLineStringCodec.java @@ -0,0 +1,37 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model.geojson.codecs; + +import com.mongodb.client.model.geojson.MultiLineString; +import org.bson.codecs.configuration.CodecRegistry; + +/** + * A Codec for a GeoJSON MultiLineString. + * + * @since 3.1 + */ +public class MultiLineStringCodec extends AbstractGeometryCodec { + + /** + * Constructs an instance. + * + * @param registry the registry + */ + public MultiLineStringCodec(final CodecRegistry registry) { + super(registry, MultiLineString.class); + } +} diff --git a/driver-core/src/main/com/mongodb/client/model/geojson/codecs/MultiPointCodec.java b/driver-core/src/main/com/mongodb/client/model/geojson/codecs/MultiPointCodec.java new file mode 100644 index 00000000000..2bd24d43e75 --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/geojson/codecs/MultiPointCodec.java @@ -0,0 +1,37 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model.geojson.codecs; + +import com.mongodb.client.model.geojson.MultiPoint; +import org.bson.codecs.configuration.CodecRegistry; + +/** + * A Codec for a GeoJSON MultiPoint. + * + * @since 3.1 + */ +public class MultiPointCodec extends AbstractGeometryCodec { + + /** + * Constructs an instance. + * + * @param registry the registry + */ + public MultiPointCodec(final CodecRegistry registry) { + super(registry, MultiPoint.class); + } +} diff --git a/driver-core/src/main/com/mongodb/client/model/geojson/codecs/MultiPolygonCodec.java b/driver-core/src/main/com/mongodb/client/model/geojson/codecs/MultiPolygonCodec.java new file mode 100644 index 00000000000..c43a4938421 --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/geojson/codecs/MultiPolygonCodec.java @@ -0,0 +1,37 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model.geojson.codecs; + +import com.mongodb.client.model.geojson.MultiPolygon; +import org.bson.codecs.configuration.CodecRegistry; + +/** + * A Codec for a GeoJSON MultiPolygon. + * + * @since 3.1 + */ +public class MultiPolygonCodec extends AbstractGeometryCodec { + + /** + * Constructs an instance. + * + * @param registry the registry + */ + public MultiPolygonCodec(final CodecRegistry registry) { + super(registry, MultiPolygon.class); + } +} diff --git a/driver-core/src/main/com/mongodb/client/model/geojson/codecs/NamedCoordinateReferenceSystemCodec.java b/driver-core/src/main/com/mongodb/client/model/geojson/codecs/NamedCoordinateReferenceSystemCodec.java new file mode 100644 index 00000000000..e3c8d387385 --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/geojson/codecs/NamedCoordinateReferenceSystemCodec.java @@ -0,0 +1,62 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model.geojson.codecs; + +import com.mongodb.client.model.geojson.CoordinateReferenceSystem; +import com.mongodb.client.model.geojson.NamedCoordinateReferenceSystem; +import org.bson.BsonReader; +import org.bson.BsonWriter; +import org.bson.codecs.Codec; +import org.bson.codecs.DecoderContext; +import org.bson.codecs.EncoderContext; +import org.bson.codecs.configuration.CodecConfigurationException; + +import static com.mongodb.client.model.geojson.codecs.GeometryDecoderHelper.decodeCoordinateReferenceSystem; + +/** + * Codec for a GeoJson Coordinate Reference System of type name. + * + * @since 3.1 + */ +public class NamedCoordinateReferenceSystemCodec implements Codec { + @Override + public void encode(final BsonWriter writer, final NamedCoordinateReferenceSystem value, final EncoderContext encoderContext) { + writer.writeStartDocument(); + + writer.writeString("type", value.getType().getTypeName()); + + writer.writeStartDocument("properties"); + writer.writeString("name", value.getName()); + writer.writeEndDocument(); + + writer.writeEndDocument(); + } + + @Override + public Class getEncoderClass() { + return NamedCoordinateReferenceSystem.class; + } + + @Override + public NamedCoordinateReferenceSystem decode(final BsonReader reader, final DecoderContext decoderContext) { + CoordinateReferenceSystem crs = decodeCoordinateReferenceSystem(reader); + if (crs == null || !(crs instanceof NamedCoordinateReferenceSystem)) { + throw new CodecConfigurationException("Invalid NamedCoordinateReferenceSystem."); + } + return (NamedCoordinateReferenceSystem) crs; + } +} diff --git a/driver-core/src/main/com/mongodb/client/model/geojson/codecs/PointCodec.java b/driver-core/src/main/com/mongodb/client/model/geojson/codecs/PointCodec.java new file mode 100644 index 00000000000..4fe9ec345b6 --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/geojson/codecs/PointCodec.java @@ -0,0 +1,37 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model.geojson.codecs; + +import com.mongodb.client.model.geojson.Point; +import org.bson.codecs.configuration.CodecRegistry; + +/** + * A Codec for a GeoJSON point. + * + * @since 3.1 + */ +public class PointCodec extends AbstractGeometryCodec { + + /** + * Constructs an instance. + * + * @param registry the registry + */ + public PointCodec(final CodecRegistry registry) { + super(registry, Point.class); + } +} diff --git a/driver-core/src/main/com/mongodb/client/model/geojson/codecs/PolygonCodec.java b/driver-core/src/main/com/mongodb/client/model/geojson/codecs/PolygonCodec.java new file mode 100644 index 00000000000..a0b98d086cc --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/geojson/codecs/PolygonCodec.java @@ -0,0 +1,37 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model.geojson.codecs; + +import com.mongodb.client.model.geojson.Polygon; +import org.bson.codecs.configuration.CodecRegistry; + +/** + * A Codec for a GeoJSON polygon. + * + * @since 3.1 + */ +public class PolygonCodec extends AbstractGeometryCodec { + + /** + * Constructs an instance. + * + * @param registry the registry + */ + public PolygonCodec(final CodecRegistry registry) { + super(registry, Polygon.class); + } +} diff --git a/driver-core/src/main/com/mongodb/client/model/geojson/codecs/package-info.java b/driver-core/src/main/com/mongodb/client/model/geojson/codecs/package-info.java new file mode 100644 index 00000000000..b640ba2d9eb --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/geojson/codecs/package-info.java @@ -0,0 +1,23 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * This package contains classes that encode and decode GeoJSON objects. + */ +@NonNullApi +package com.mongodb.client.model.geojson.codecs; + +import com.mongodb.lang.NonNullApi; diff --git a/driver-core/src/main/com/mongodb/client/model/geojson/package-info.java b/driver-core/src/main/com/mongodb/client/model/geojson/package-info.java new file mode 100644 index 00000000000..7fa17603e82 --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/geojson/package-info.java @@ -0,0 +1,23 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * This package contains classes that represent GeoJSON objects. + */ +@NonNullApi +package com.mongodb.client.model.geojson; + +import com.mongodb.lang.NonNullApi; diff --git a/driver-core/src/main/com/mongodb/client/model/mql/Branches.java b/driver-core/src/main/com/mongodb/client/model/mql/Branches.java new file mode 100644 index 00000000000..c6b414de213 --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/mql/Branches.java @@ -0,0 +1,269 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model.mql; + +import com.mongodb.annotations.Beta; +import com.mongodb.annotations.Reason; +import com.mongodb.assertions.Assertions; + +import java.util.ArrayList; +import java.util.List; +import java.util.function.Function; + +import static com.mongodb.client.model.mql.MqlUnchecked.Unchecked.TYPE_ARGUMENT; + +/** + * Branches are used in {@linkplain MqlValue#switchOn}, and + * define a sequence of checks that will be performed. The first check + * to succeed will produce the value that it specifies. If no check succeeds, + * then the operation + * {@linkplain BranchesIntermediary#defaults(Function) defaults} to a default + * value, or if none is specified, the operation will cause an error. + * + * @param the type of the values that may be checked. + * @since 4.9.0 + */ +@Beta(Reason.CLIENT) +public final class Branches { + + Branches() { + } + + private static BranchesIntermediary with(final Function> switchCase) { + List>> v = new ArrayList<>(); + v.add(switchCase); + return new BranchesIntermediary<>(v); + } + + private static MqlExpression mqlEx(final T value) { + return (MqlExpression) value; + } + + // is fn + + /** + * A successful check for the specified {@code predicate} + * produces a value specified by the {@code mapping}. + * + * @param predicate the predicate. + * @param mapping the mapping. + * @param the type of the produced value. + * @return the appended sequence of checks. + */ + public BranchesIntermediary is(final Function predicate, final Function mapping) { + Assertions.notNull("predicate", predicate); + Assertions.notNull("mapping", mapping); + return with(value -> new SwitchCase<>(predicate.apply(value), mapping.apply(value))); + } + + // eq lt lte + + /** + * A successful check for {@linkplain MqlValue#eq equality} + * produces a value specified by the {@code mapping}. + * + * @param v the value to check against. + * @param mapping the mapping. + * @param the type of the produced value. + * @return the appended sequence of checks. + */ + public BranchesIntermediary eq(final T v, final Function mapping) { + Assertions.notNull("v", v); + Assertions.notNull("mapping", mapping); + return is(value -> value.eq(v), mapping); + } + + /** + * A successful check for being + * {@linkplain MqlValue#lt less than} + * the provided value {@code v} + * produces a value specified by the {@code mapping}. + * + * @param v the value to check against. + * @param mapping the mapping. + * @param the type of the produced value. + * @return the appended sequence of checks. + */ + public BranchesIntermediary lt(final T v, final Function mapping) { + Assertions.notNull("v", v); + Assertions.notNull("mapping", mapping); + return is(value -> value.lt(v), mapping); + } + + /** + * A successful check for being + * {@linkplain MqlValue#lte less than or equal to} + * the provided value {@code v} + * produces a value specified by the {@code mapping}. + * + * @param v the value to check against. + * @param mapping the mapping. + * @param the type of the produced value. + * @return the appended sequence of checks. + */ + public BranchesIntermediary lte(final T v, final Function mapping) { + Assertions.notNull("v", v); + Assertions.notNull("mapping", mapping); + return is(value -> value.lte(v), mapping); + } + + // is type + + /** + * A successful check for + * {@linkplain MqlValue#isBooleanOr(MqlBoolean) being a boolean} + * produces a value specified by the {@code mapping}. + * + * @param mapping the mapping. + * @return the appended sequence of checks. + * @param the type of the produced value. + */ + public BranchesIntermediary isBoolean(final Function mapping) { + Assertions.notNull("mapping", mapping); + return is(v -> mqlEx(v).isBoolean(), v -> mapping.apply((MqlBoolean) v)); + } + + /** + * A successful check for + * {@linkplain MqlValue#isNumberOr(MqlNumber) being a number} + * produces a value specified by the {@code mapping}. + * + * @mongodb.server.release 4.4 + * @param mapping the mapping. + * @return the appended sequence of checks. + * @param the type of the produced value. + */ + public BranchesIntermediary isNumber(final Function mapping) { + Assertions.notNull("mapping", mapping); + return is(v -> mqlEx(v).isNumber(), v -> mapping.apply((MqlNumber) v)); + } + + /** + * A successful check for + * {@linkplain MqlValue#isIntegerOr(MqlInteger) being an integer} + * produces a value specified by the {@code mapping}. + * + * @mongodb.server.release 4.4 + * @param mapping the mapping. + * @return the appended sequence of checks. + * @param the type of the produced value. + */ + public BranchesIntermediary isInteger(final Function mapping) { + Assertions.notNull("mapping", mapping); + return is(v -> mqlEx(v).isInteger(), v -> mapping.apply((MqlInteger) v)); + } + + /** + * A successful check for + * {@linkplain MqlValue#isStringOr(MqlString) being a string} + * produces a value specified by the {@code mapping}. + * + * @param mapping the mapping. + * @return the appended sequence of checks. + * @param the type of the produced value. + */ + public BranchesIntermediary isString(final Function mapping) { + Assertions.notNull("mapping", mapping); + return is(v -> mqlEx(v).isString(), v -> mapping.apply((MqlString) v)); + } + + /** + * A successful check for + * {@linkplain MqlValue#isDateOr(MqlDate) being a date} + * produces a value specified by the {@code mapping}. + * + * @param mapping the mapping. + * @return the appended sequence of checks. + * @param the type of the produced value. + */ + public BranchesIntermediary isDate(final Function mapping) { + Assertions.notNull("mapping", mapping); + return is(v -> mqlEx(v).isDate(), v -> mapping.apply((MqlDate) v)); + } + + /** + * A successful check for + * {@linkplain MqlValue#isArrayOr(MqlArray) being an array} + * produces a value specified by the {@code mapping}. + * + *

Warning: The type argument of the array is not + * enforced by the API. The use of this method is an + * unchecked assertion that the type argument is correct. + * + * @param mapping the mapping. + * @return the appended sequence of checks. + * @param the type of the produced value. + * @param the type of the array. + */ + @SuppressWarnings("unchecked") + public BranchesIntermediary isArray(final Function, ? extends R> mapping) { + Assertions.notNull("mapping", mapping); + return is(v -> mqlEx(v).isArray(), v -> mapping.apply((MqlArray) v)); + } + + /** + * A successful check for + * {@linkplain MqlValue#isDocumentOr(MqlDocument) being a document} + * (or document-like value, see + * {@link MqlMap} and {@link MqlEntry}) + * produces a value specified by the {@code mapping}. + * + * @param mapping the mapping. + * @return the appended sequence of checks. + * @param the type of the produced value. + */ + public BranchesIntermediary isDocument(final Function mapping) { + Assertions.notNull("mapping", mapping); + return is(v -> mqlEx(v).isDocumentOrMap(), v -> mapping.apply((MqlDocument) v)); + } + + /** + * A successful check for + * {@linkplain MqlValue#isMapOr(MqlMap) being a map} + * (or map-like value, see + * {@link MqlDocument} and {@link MqlEntry}) + * produces a value specified by the {@code mapping}. + * + *

Warning: The type argument of the map is not + * enforced by the API. The use of this method is an + * unchecked assertion that the type argument is correct. + * + * @param mapping the mapping. + * @return the appended sequence of checks. + * @param the type of the produced value. + * @param the type of the array. + */ + @SuppressWarnings("unchecked") + public BranchesIntermediary isMap(final Function, ? extends R> mapping) { + Assertions.notNull("mapping", mapping); + return is(v -> mqlEx(v).isDocumentOrMap(), v -> mapping.apply((MqlMap) v)); + } + + /** + * A successful check for + * {@linkplain MqlValues#ofNull()} being the null value} + * produces a value specified by the {@code mapping}. + * + * @param mapping the mapping. + * @return the appended sequence of checks. + * @param the type of the produced value. + */ + public BranchesIntermediary isNull(final Function mapping) { + Assertions.notNull("mapping", mapping); + return is(v -> mqlEx(v).isNull(), v -> mapping.apply(v)); + } +} diff --git a/driver-core/src/main/com/mongodb/client/model/mql/BranchesIntermediary.java b/driver-core/src/main/com/mongodb/client/model/mql/BranchesIntermediary.java new file mode 100644 index 00000000000..b068c118ad3 --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/mql/BranchesIntermediary.java @@ -0,0 +1,265 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model.mql; + +import com.mongodb.annotations.Beta; +import com.mongodb.annotations.Reason; +import com.mongodb.assertions.Assertions; + +import java.util.ArrayList; +import java.util.List; +import java.util.function.Function; + +import static com.mongodb.client.model.mql.MqlUnchecked.Unchecked.TYPE_ARGUMENT; + +/** + * See {@link Branches}. + * + * @param the type of the values that may be checked. + * @param the type of the value produced. + * @since 4.9.0 + */ +@Beta(Reason.CLIENT) +public final class BranchesIntermediary extends BranchesTerminal { + BranchesIntermediary(final List>> branches) { + super(branches, null); + } + + private BranchesIntermediary with(final Function> switchCase) { + List>> v = new ArrayList<>(this.getBranches()); + v.add(switchCase); + return new BranchesIntermediary<>(v); + } + + private static MqlExpression mqlEx(final T value) { + return (MqlExpression) value; + } + + // is fn + + /** + * A successful check for the specified {@code predicate} + * produces a value specified by the {@code mapping}. + * + * @param predicate the predicate. + * @param mapping the mapping. + * @return the appended sequence of checks. + */ + public BranchesIntermediary is(final Function predicate, final Function mapping) { + Assertions.notNull("predicate", predicate); + Assertions.notNull("mapping", mapping); + return this.with(value -> new SwitchCase<>(predicate.apply(value), mapping.apply(value))); + } + + // eq lt lte + + /** + * A successful check for {@linkplain MqlValue#eq equality} + * produces a value specified by the {@code mapping}. + * + * @param v the value to check against. + * @param mapping the mapping. + * @return the appended sequence of checks. + */ + public BranchesIntermediary eq(final T v, final Function mapping) { + Assertions.notNull("v", v); + Assertions.notNull("mapping", mapping); + return is(value -> value.eq(v), mapping); + } + + /** + * A successful check for being + * {@linkplain MqlValue#lt less than} + * the provided value {@code v} + * produces a value specified by the {@code mapping}. + * + * @param v the value to check against. + * @param mapping the mapping. + * @return the appended sequence of checks. + */ + public BranchesIntermediary lt(final T v, final Function mapping) { + Assertions.notNull("v", v); + Assertions.notNull("mapping", mapping); + return is(value -> value.lt(v), mapping); + } + + /** + * A successful check for being + * {@linkplain MqlValue#lte less than or equal to} + * the provided value {@code v} + * produces a value specified by the {@code mapping}. + * + * @param v the value to check against. + * @param mapping the mapping. + * @return the appended sequence of checks. + */ + public BranchesIntermediary lte(final T v, final Function mapping) { + Assertions.notNull("v", v); + Assertions.notNull("mapping", mapping); + return is(value -> value.lte(v), mapping); + } + + // is type + + /** + * A successful check for + * {@linkplain MqlValue#isBooleanOr(MqlBoolean) being a boolean} + * produces a value specified by the {@code mapping}. + * + * @param mapping the mapping. + * @return the appended sequence of checks. + */ + public BranchesIntermediary isBoolean(final Function mapping) { + Assertions.notNull("mapping", mapping); + return is(v -> mqlEx(v).isBoolean(), v -> mapping.apply((MqlBoolean) v)); + } + + /** + * A successful check for + * {@linkplain MqlValue#isNumberOr(MqlNumber) being a number} + * produces a value specified by the {@code mapping}. + * + * @mongodb.server.release 4.4 + * @param mapping the mapping. + * @return the appended sequence of checks. + */ + public BranchesIntermediary isNumber(final Function mapping) { + Assertions.notNull("mapping", mapping); + return is(v -> mqlEx(v).isNumber(), v -> mapping.apply((MqlNumber) v)); + } + + /** + * A successful check for + * {@linkplain MqlValue#isIntegerOr(MqlInteger) being an integer} + * produces a value specified by the {@code mapping}. + * + * @mongodb.server.release 4.4 + * @param mapping the mapping. + * @return the appended sequence of checks. + */ + public BranchesIntermediary isInteger(final Function mapping) { + Assertions.notNull("mapping", mapping); + return is(v -> mqlEx(v).isInteger(), v -> mapping.apply((MqlInteger) v)); + } + + /** + * A successful check for + * {@linkplain MqlValue#isStringOr(MqlString) being a string} + * produces a value specified by the {@code mapping}. + * + * @param mapping the mapping. + * @return the appended sequence of checks. + */ + public BranchesIntermediary isString(final Function mapping) { + Assertions.notNull("mapping", mapping); + return is(v -> mqlEx(v).isString(), v -> mapping.apply((MqlString) v)); + } + + /** + * A successful check for + * {@linkplain MqlValue#isDateOr(MqlDate) being a date} + * produces a value specified by the {@code mapping}. + * + * @param mapping the mapping. + * @return the appended sequence of checks. + */ + public BranchesIntermediary isDate(final Function mapping) { + Assertions.notNull("mapping", mapping); + return is(v -> mqlEx(v).isDate(), v -> mapping.apply((MqlDate) v)); + } + + /** + * A successful check for + * {@linkplain MqlValue#isArrayOr(MqlArray) being an array} + * produces a value specified by the {@code mapping}. + * + *

Warning: The type argument of the array is not + * enforced by the API. The use of this method is an + * unchecked assertion that the type argument is correct. + * + * @param mapping the mapping. + * @return the appended sequence of checks. + * @param the type of the elements of the resulting array. + */ + @SuppressWarnings("unchecked") + public BranchesIntermediary isArray(final Function, ? extends R> mapping) { + Assertions.notNull("mapping", mapping); + return is(v -> mqlEx(v).isArray(), v -> mapping.apply((MqlArray) v)); + } + + /** + * A successful check for + * {@linkplain MqlValue#isDocumentOr(MqlDocument) being a document} + * (or document-like value, see + * {@link MqlMap} and {@link MqlEntry}) + * produces a value specified by the {@code mapping}. + * + * @param mapping the mapping. + * @return the appended sequence of checks. + */ + public BranchesIntermediary isDocument(final Function mapping) { + Assertions.notNull("mapping", mapping); + return is(v -> mqlEx(v).isDocumentOrMap(), v -> mapping.apply((MqlDocument) v)); + } + + /** + * A successful check for + * {@linkplain MqlValue#isMapOr(MqlMap) being a map} + * (or map-like value, see + * {@link MqlDocument} and {@link MqlEntry}) + * produces a value specified by the {@code mapping}. + * + *

Warning: The type argument of the map is not + * enforced by the API. The use of this method is an + * unchecked assertion that the type argument is correct. + * + * @param mapping the mapping. + * @return the appended sequence of checks. + * @param the type of the array. + */ + @SuppressWarnings("unchecked") + public BranchesIntermediary isMap(final Function, ? extends R> mapping) { + Assertions.notNull("mapping", mapping); + return is(v -> mqlEx(v).isDocumentOrMap(), v -> mapping.apply((MqlMap) v)); + } + + /** + * A successful check for + * {@linkplain MqlValues#ofNull()} being the null value} + * produces a value specified by the {@code mapping}. + * + * @param mapping the mapping. + * @return the appended sequence of checks. + */ + public BranchesIntermediary isNull(final Function mapping) { + Assertions.notNull("mapping", mapping); + return is(v -> mqlEx(v).isNull(), v -> mapping.apply(v)); + } + + /** + * If no other check succeeds, + * produces a value specified by the {@code mapping}. + * + * @param mapping the mapping. + * @return the appended sequence of checks. + */ + public BranchesTerminal defaults(final Function mapping) { + Assertions.notNull("mapping", mapping); + return this.withDefault(value -> mapping.apply(value)); + } + +} diff --git a/driver-core/src/main/com/mongodb/client/model/mql/BranchesTerminal.java b/driver-core/src/main/com/mongodb/client/model/mql/BranchesTerminal.java new file mode 100644 index 00000000000..299942ebdbf --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/mql/BranchesTerminal.java @@ -0,0 +1,58 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model.mql; + +import com.mongodb.annotations.Beta; +import com.mongodb.annotations.Reason; +import com.mongodb.lang.Nullable; + +import java.util.List; +import java.util.function.Function; + +/** + * See {@link Branches}. This is the terminal branch, to which no additional + * checks may be added, since the default value has been specified. + * + * @param the type of the values that may be checked. + * @param the type of the value produced. + * @since 4.9.0 + */ +@Beta(Reason.CLIENT) +public class BranchesTerminal { + + private final List>> branches; + + private final Function defaults; + + BranchesTerminal(final List>> branches, @Nullable final Function defaults) { + this.branches = branches; + this.defaults = defaults; + } + + BranchesTerminal withDefault(final Function defaults) { + return new BranchesTerminal<>(branches, defaults); + } + + List>> getBranches() { + return branches; + } + + @Nullable + Function getDefaults() { + return defaults; + } +} diff --git a/driver-core/src/main/com/mongodb/client/model/mql/ExpressionCodecProvider.java b/driver-core/src/main/com/mongodb/client/model/mql/ExpressionCodecProvider.java new file mode 100644 index 00000000000..893c57c5c86 --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/mql/ExpressionCodecProvider.java @@ -0,0 +1,51 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model.mql; + +import com.mongodb.annotations.Beta; +import com.mongodb.annotations.Immutable; +import com.mongodb.annotations.Reason; +import com.mongodb.lang.Nullable; +import org.bson.codecs.Codec; +import org.bson.codecs.configuration.CodecProvider; +import org.bson.codecs.configuration.CodecRegistry; + +/** + * Provides Codec instances for the {@link MqlValue MQL API}. + * + *

Responsible for converting values and computations expressed using the + * driver's implementation of the {@link MqlValue MQL API} into the corresponding + * values and computations expressed in MQL BSON. Booleans are converted to BSON + * booleans, documents to BSON documents, and so on. The specific structure + * representing numbers is preserved where possible (that is, number literals + * specified as Java longs are converted into BSON int64, and so on). + * + * @since 4.9.0 + */ +@Beta(Reason.CLIENT) +@Immutable +public final class ExpressionCodecProvider implements CodecProvider { + @Override + @SuppressWarnings("unchecked") + @Nullable + public Codec get(final Class clazz, final CodecRegistry registry) { + if (MqlExpression.class.equals(clazz)) { + return (Codec) new MqlExpressionCodec(registry); + } + return null; + } +} diff --git a/driver-core/src/main/com/mongodb/client/model/mql/MqlArray.java b/driver-core/src/main/com/mongodb/client/model/mql/MqlArray.java new file mode 100644 index 00000000000..e979b4687e7 --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/mql/MqlArray.java @@ -0,0 +1,360 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model.mql; + +import com.mongodb.annotations.Beta; +import com.mongodb.annotations.Reason; +import com.mongodb.annotations.Sealed; + +import java.util.function.Function; + +import static com.mongodb.client.model.mql.MqlValues.of; +import static com.mongodb.client.model.mql.MqlUnchecked.Unchecked.PRESENT; + +/** + * An array {@link MqlValue value} in the context of the MongoDB Query + * Language (MQL). An array is a finite, ordered collection of elements of a + * certain type. It is also known as a finite mathematical sequence. + * + * @param the type of the elements + * @since 4.9.0 + */ +@Sealed +@Beta(Reason.CLIENT) +public interface MqlArray extends MqlValue { + + /** + * An array consisting of only those elements in {@code this} array that + * match the provided predicate. + * + * @param predicate the predicate to apply to each element to determine if + * it should be included. + * @return the resulting array. + */ + MqlArray filter(Function predicate); + + /** + * An array consisting of the results of applying the provided function to + * the elements of {@code this} array. + * + * @param in the function to apply to each element. + * @return the resulting array. + * @param the type of the elements of the resulting array. + */ + MqlArray map(Function in); + + /** + * The size of {@code this} array. + * + * @return the size. + */ + MqlInteger size(); + + /** + * Whether any value in {@code this} array satisfies the predicate. + * + * @param predicate the predicate. + * @return the resulting value. + */ + MqlBoolean any(Function predicate); + + /** + * Whether all values in {@code this} array satisfy the predicate. + * + * @param predicate the predicate. + * @return the resulting value. + */ + MqlBoolean all(Function predicate); + + /** + * The sum of adding together all the values of {@code this} array, + * via the provided {@code mapper}. Returns 0 if the array is empty. + * + *

The mapper may be used to transform the values of {@code this} array + * into {@linkplain MqlNumber numbers}. If no transformation is + * necessary, then the identity function {@code array.sum(v -> v)} should + * be used. + * + * @param mapper the mapper function. + * @return the resulting value. + */ + MqlNumber sum(Function mapper); + + /** + * The product of multiplying together all the values of {@code this} array, + * via the provided {@code mapper}. Returns 1 if the array is empty. + * + *

The mapper may be used to transform the values of {@code this} array + * into {@linkplain MqlNumber numbers}. If no transformation is + * necessary, then the identity function {@code array.multiply(v -> v)} + * should be used. + * + * @param mapper the mapper function. + * @return the resulting value. + */ + MqlNumber multiply(Function mapper); + + /** + * The {@linkplain #gt(MqlValue) largest} value all the values of + * {@code this} array, or the {@code other} value if this array is empty. + * + * @mongodb.server.release 5.2 + * @param other the other value. + * @return the resulting value. + */ + T max(T other); + + /** + * The {@linkplain #lt(MqlValue) smallest} value all the values of + * {@code this} array, or the {@code other} value if this array is empty. + * + * @mongodb.server.release 5.2 + * @param other the other value. + * @return the resulting value. + */ + T min(T other); + + /** + * The {@linkplain #gt(MqlValue) largest} {@code n} elements of + * {@code this} array, or all elements if the array contains fewer than + * {@code n} elements. + * + * @mongodb.server.release 5.2 + * @param n the number of elements. + * @return the resulting value. + */ + MqlArray maxN(MqlInteger n); + + /** + * The {@linkplain #lt(MqlValue) smallest} {@code n} elements of + * {@code this} array, or all elements if the array contains fewer than + * {@code n} elements. + * + * @mongodb.server.release 5.2 + * @param n the number of elements. + * @return the resulting value. + */ + MqlArray minN(MqlInteger n); + + /** + * The string-concatenation of all the values of {@code this} array, + * via the provided {@code mapper}. Returns the empty string if the array + * is empty. + * + *

The mapper may be used to transform the values of {@code this} array + * into {@linkplain MqlString strings}. If no transformation is + * necessary, then the identity function {@code array.join(v -> v)} should + * be used. + * + * @param mapper the mapper function. + * @return the resulting value. + */ + MqlString joinStrings(Function mapper); + + /** + * The {@linkplain #concat(MqlArray) array-concatenation} + * of all the array values of {@code this} array, + * via the provided {@code mapper}. Returns the empty array if the array + * is empty. + * + *

The mapper may be used to transform the values of {@code this} array + * into {@linkplain MqlArray arrays}. If no transformation is + * necessary, then the identity function {@code array.concat(v -> v)} should + * be used. + * + * @param mapper the mapper function. + * @return the resulting value. + * @param the type of the elements of the array. + */ + MqlArray concatArrays(Function> mapper); + + /** + * The {@linkplain #union(MqlArray) set-union} + * of all the array values of {@code this} array, + * via the provided {@code mapper}. Returns the empty array if the array + * is empty. + * + *

The mapper may be used to transform the values of {@code this} array + * into {@linkplain MqlArray arrays}. If no transformation is + * necessary, then the identity function {@code array.union(v -> v)} should + * be used. + * + * @param mapper the mapper function. + * @return the resulting value. + * @param the type of the elements of the array. + */ + MqlArray unionArrays(Function> mapper); + + /** + * The {@linkplain MqlMap map} value corresponding to the + * {@linkplain MqlEntry entry} values of {@code this} array, + * via the provided {@code mapper}. Returns the empty map if the array + * is empty. + * + *

The mapper may be used to transform the values of {@code this} array + * into {@linkplain MqlEntry entries}. If no transformation is + * necessary, then the identity function {@code array.union(v -> v)} should + * be used. + * + * @see MqlMap#entries() + * @param mapper the mapper function. + * @return the resulting value. + * @param the type of the resulting map's values. + */ + MqlMap asMap(Function> mapper); + + /** + * Returns the element at the provided index {@code i} for + * {@code this} array. + * + *

Warning: The use of this method is an assertion that + * the index {@code i} is in bounds for the array. + * If the index is out of bounds for this array, then + * the behaviour of the API is not specified. + * + * @param i the index. + * @return the resulting value. + */ + @MqlUnchecked(PRESENT) + T elementAt(MqlInteger i); + + /** + * Returns the element at the provided index {@code i} for + * {@code this} array. + * + *

Warning: The use of this method is an assertion that + * the index {@code i} is in bounds for the array. + * If the index is out of bounds for this array, then + * the behaviour of the API is not specified. + * + * @param i the index. + * @return the resulting value. + */ + @MqlUnchecked(PRESENT) + default T elementAt(final int i) { + return this.elementAt(of(i)); + } + + /** + * Returns the first element of {@code this} array. + * + *

Warning: The use of this method is an assertion that + * the array is not empty. + * If the array is empty then the behaviour of the API is not specified. + * + * @mongodb.server.release 4.4 + * @return the resulting value. + */ + @MqlUnchecked(PRESENT) + T first(); + + /** + * Returns the last element of {@code this} array. + * + *

Warning: The use of this method is an assertion that + * the array is not empty. + * If the array is empty then the behaviour of the API is not specified. + * + * @mongodb.server.release 4.4 + * @return the resulting value. + */ + @MqlUnchecked(PRESENT) + T last(); + + /** + * Whether {@code this} array contains a value that is + * {@linkplain #eq equal} to the provided {@code value}. + * + * @param value the value. + * @return the resulting value. + */ + MqlBoolean contains(T value); + + /** + * The result of concatenating {@code this} array first with + * the {@code other} array ensuing. + * + * @param other the other array. + * @return the resulting array. + */ + MqlArray concat(MqlArray other); + + /** + * The subarray of {@code this} array, from the {@code start} index + * inclusive, and continuing for the specified {@code length}, up to + * the end of the array. + * + * @param start start index + * @param length length + * @return the resulting value + */ + MqlArray slice(MqlInteger start, MqlInteger length); + + /** + * The subarray of {@code this} array, from the {@code start} index + * inclusive, and continuing for the specified {@code length}, or + * to the end of the array. + * + * @param start start index + * @param length length + * @return the resulting value + */ + default MqlArray slice(final int start, final int length) { + return this.slice(of(start), of(length)); + } + + /** + * The set-union of {@code this} array and the {@code other} array ensuing, + * containing only the distinct values of both. + * No guarantee is made regarding order. + * + * @param other the other array. + * @return the resulting array. + */ + MqlArray union(MqlArray other); + + + /** + * An array containing only the distinct values of {@code this} array. + * No guarantee is made regarding order. + * + * @return the resulting value + */ + MqlArray distinct(); + + /** + * The result of passing {@code this} value to the provided function. + * Equivalent to {@code f.apply(this)}, and allows lambdas and static, + * user-defined functions to use the chaining syntax. + * + * @see MqlValue#passTo + * @param f the function to apply. + * @return the resulting value. + * @param the type of the resulting value. + */ + R passArrayTo(Function, ? extends R> f); + + /** + * The result of applying the provided switch mapping to {@code this} value. + * + * @see MqlValue#switchOn + * @param mapping the switch mapping. + * @return the resulting value. + * @param the type of the resulting value. + */ + R switchArrayOn(Function>, ? extends BranchesTerminal, ? extends R>> mapping); +} diff --git a/driver-core/src/main/com/mongodb/client/model/mql/MqlBoolean.java b/driver-core/src/main/com/mongodb/client/model/mql/MqlBoolean.java new file mode 100644 index 00000000000..28290cf25f4 --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/mql/MqlBoolean.java @@ -0,0 +1,90 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model.mql; + +import com.mongodb.annotations.Beta; +import com.mongodb.annotations.Reason; +import com.mongodb.annotations.Sealed; + +import java.util.function.Function; + +/** + * A boolean {@linkplain MqlValue value} in the context of the + * MongoDB Query Language (MQL). + * + * @since 4.9.0 + */ +@Sealed +@Beta(Reason.CLIENT) +public interface MqlBoolean extends MqlValue { + + /** + * The logical negation of {@code this} value. + * + * @return the resulting value. + */ + MqlBoolean not(); + + /** + * The logical conjunction of {@code this} and the {@code other} value. + * + * @param other the other boolean value. + * @return the resulting value. + */ + MqlBoolean or(MqlBoolean other); + + /** + * The logical disjunction of {@code this} and the {@code other} value. + * + * @param other the other boolean value. + * @return the resulting value. + */ + MqlBoolean and(MqlBoolean other); + + /** + * The {@code ifTrue} value when {@code this} is true, + * and the {@code ifFalse} value otherwise. + * + * @param ifTrue the ifTrue value. + * @param ifFalse the ifFalse value. + * @return the resulting value. + * @param The type of the resulting value. + */ + T cond(T ifTrue, T ifFalse); + + /** + * The result of passing {@code this} value to the provided function. + * Equivalent to {@code f.apply(this)}, and allows lambdas and static, + * user-defined functions to use the chaining syntax. + * + * @see MqlValue#passTo + * @param f the function to apply. + * @return the resulting value. + * @param the type of the resulting value. + */ + R passBooleanTo(Function f); + + /** + * The result of applying the provided switch mapping to {@code this} value. + * + * @see MqlValue#switchOn + * @param mapping the switch mapping. + * @return the resulting value. + * @param the type of the resulting value. + */ + R switchBooleanOn(Function, ? extends BranchesTerminal> mapping); +} diff --git a/driver-core/src/main/com/mongodb/client/model/mql/MqlDate.java b/driver-core/src/main/com/mongodb/client/model/mql/MqlDate.java new file mode 100644 index 00000000000..b6600aaf689 --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/mql/MqlDate.java @@ -0,0 +1,162 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model.mql; + +import com.mongodb.annotations.Beta; +import com.mongodb.annotations.Reason; +import com.mongodb.annotations.Sealed; + +import java.util.function.Function; + +/** + * A UTC date-time {@linkplain MqlValue value} in the context + * of the MongoDB Query Language (MQL). Tracks the number of + * milliseconds since the Unix epoch, and does not track the timezone. + * + * @mongodb.driver.manual reference/operator/aggregation/dateToString/ Format Specifiers, UTC Offset, and Olson Timezone Identifier + * @since 4.9.0 + */ +@Sealed +@Beta(Reason.CLIENT) +public interface MqlDate extends MqlValue { + + /** + * The year of {@code this} date as determined by the provided + * {@code timezone}. + * + * @param timezone the UTC Offset or Olson Timezone Identifier. + * @return the resulting value. + */ + MqlInteger year(MqlString timezone); + + /** + * The month of {@code this} date as determined by the provided + * {@code timezone}, as an integer between 1 and 12. + * + * @param timezone the UTC Offset or Olson Timezone Identifier. + * @return the resulting value. + */ + MqlInteger month(MqlString timezone); + + /** + * The day of the month of {@code this} date as determined by the provided + * {@code timezone}, as an integer between 1 and 31. + * + * @param timezone the UTC Offset or Olson Timezone Identifier. + * @return the resulting value. + */ + MqlInteger dayOfMonth(MqlString timezone); + + /** + * The day of the week of {@code this} date as determined by the provided + * {@code timezone}, as an integer between 1 (Sunday) and 7 (Saturday). + * + * @param timezone the UTC Offset or Olson Timezone Identifier. + * @return the resulting value. + */ + MqlInteger dayOfWeek(MqlString timezone); + + /** + * The day of the year of {@code this} date as determined by the provided + * {@code timezone}, as an integer between 1 and 366. + * + * @param timezone the UTC Offset or Olson Timezone Identifier. + * @return the resulting value. + */ + MqlInteger dayOfYear(MqlString timezone); + + /** + * The hour of {@code this} date as determined by the provided + * {@code timezone}, as an integer between 0 and 23. + * + * @param timezone the UTC Offset or Olson Timezone Identifier. + * @return the resulting value. + */ + MqlInteger hour(MqlString timezone); + + /** + * The minute of {@code this} date as determined by the provided + * {@code timezone}, as an integer between 0 and 59. + * + * @param timezone the UTC Offset or Olson Timezone Identifier. + * @return the resulting value. + */ + MqlInteger minute(MqlString timezone); + + /** + * The second of {@code this} date as determined by the provided + * {@code timezone}, as an integer between 0 and 59, and 60 in the case + * of a leap second. + * + * @param timezone the UTC Offset or Olson Timezone Identifier. + * @return the resulting value. + */ + MqlInteger second(MqlString timezone); + + /** + * The week of the year of {@code this} date as determined by the provided + * {@code timezone}, as an integer between 0 and 53. + * + *

Weeks begin on Sundays, and week 1 begins with the first Sunday of the + * year. Days preceding the first Sunday of the year are in week 0. + * + * @param timezone the UTC Offset or Olson Timezone Identifier. + * @return the resulting value. + */ + MqlInteger week(MqlString timezone); + + /** + * The millisecond part of {@code this} date as determined by the provided + * {@code timezone}, as an integer between 0 and 999. + * + * @param timezone the UTC Offset or Olson Timezone Identifier. + * @return the resulting value. + */ + MqlInteger millisecond(MqlString timezone); + + /** + * The string representation of {@code this} date as determined by the + * provided {@code timezone}, and formatted according to the {@code format}. + * + * @param timezone the UTC Offset or Olson Timezone Identifier. + * @param format the format specifier. + * @return the resulting value. + */ + MqlString asString(MqlString timezone, MqlString format); + + /** + * The result of passing {@code this} value to the provided function. + * Equivalent to {@code f.apply(this)}, and allows lambdas and static, + * user-defined functions to use the chaining syntax. + * + * @see MqlValue#passTo + * @param f the function to apply. + * @return the resulting value. + * @param the type of the resulting value. + */ + R passDateTo(Function f); + + /** + * The result of applying the provided switch mapping to {@code this} value. + * + * @see MqlValue#switchOn + * @param mapping the switch mapping. + * @return the resulting value. + * @param the type of the resulting value. + */ + R switchDateOn(Function, ? extends BranchesTerminal> mapping); +} diff --git a/driver-core/src/main/com/mongodb/client/model/mql/MqlDocument.java b/driver-core/src/main/com/mongodb/client/model/mql/MqlDocument.java new file mode 100644 index 00000000000..c60fde8f82a --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/mql/MqlDocument.java @@ -0,0 +1,548 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model.mql; + +import com.mongodb.annotations.Beta; +import com.mongodb.annotations.Reason; +import com.mongodb.annotations.Sealed; +import com.mongodb.assertions.Assertions; +import org.bson.conversions.Bson; + +import java.time.Instant; +import java.util.function.Function; + +import static com.mongodb.client.model.mql.MqlValues.of; +import static com.mongodb.client.model.mql.MqlValues.ofMap; +import static com.mongodb.client.model.mql.MqlUnchecked.Unchecked.PRESENT; +import static com.mongodb.client.model.mql.MqlUnchecked.Unchecked.TYPE; +import static com.mongodb.client.model.mql.MqlUnchecked.Unchecked.TYPE_ARGUMENT; + +/** + * A document {@link MqlValue value} in the context of the MongoDB Query + * Language (MQL). A document is a finite set of fields, where the field + * name is a string, together with a value of any other + * {@linkplain MqlValue type in the type hierarchy}. + * No field name is repeated. + * + * @since 4.9.0 + */ +@Sealed +@Beta(Reason.CLIENT) +public interface MqlDocument extends MqlValue { + + /** + * Whether {@code this} document has a field with the provided + * {@code fieldName} (if a field is set to null, it is present). + * + * @mongodb.server.release 5.0 + * @param fieldName the name of the field. + * @return the resulting value. + */ + MqlBoolean hasField(String fieldName); + + /** + * Returns a document with the same fields as {@code this} document, but + * with the {@code fieldName} field set to the specified {@code value}. + * + *

This does not affect the original document. + * + *

Warning: Users should take care to assign values, such that the types + * of those values correspond to the types of ensuing {@code get...} + * invocations, since this API has no way of verifying this correspondence. + * + * @mongodb.server.release 5.0 + * @param fieldName the name of the field. + * @param value the value. + * @return the resulting document. + */ + MqlDocument setField(String fieldName, MqlValue value); + + /** + * Returns a document with the same fields as {@code this} document, but + * excluding the field with the specified {@code fieldName}. + * + *

This does not affect the original document. + * + * @mongodb.server.release 5.0 + * @param fieldName the name of the field. + * @return the resulting document. + */ + MqlDocument unsetField(String fieldName); + + /** + * Returns the {@linkplain MqlValue} value of the field + * with the provided {@code fieldName}. + * + *

Warning: Use of this method is an assertion that the document + * {@linkplain #hasField(String) has} the named field. + * + * @mongodb.server.release 5.0 + * @param fieldName the name of the field. + * @return the resulting value. + */ + @MqlUnchecked(PRESENT) + MqlValue getField(String fieldName); + + /** + * Returns the {@linkplain MqlBoolean boolean} value of the field + * with the provided {@code fieldName}. + * + *

Warning: The type and presence of the resulting value is not + * enforced by the API. The use of this method is an + * unchecked assertion that the document + * {@linkplain #hasField(String) has} the named field and + * the field value is of the specified type. + * + * @mongodb.server.release 5.0 + * @param fieldName the name of the field. + * @return the resulting value. + */ + @MqlUnchecked({PRESENT, TYPE}) + MqlBoolean getBoolean(String fieldName); + + /** + * Returns the {@linkplain MqlBoolean boolean} value of the field + * with the provided {@code fieldName}, + * or the {@code other} value if the field is not a boolean + * or if the document {@linkplain #hasField} no such field. + * + * @mongodb.server.release 5.0 + * @param fieldName the name of the field. + * @param other the other value. + * @return the resulting value. + */ + MqlBoolean getBoolean(String fieldName, MqlBoolean other); + + /** + * Returns the {@linkplain MqlBoolean boolean} value of the field + * with the provided {@code fieldName}, + * or the {@code other} value if the field is not a boolean + * or if the document {@linkplain #hasField} no such field. + * + * @mongodb.server.release 5.0 + * @param fieldName the name of the field. + * @param other the other value. + * @return the resulting value. + */ + default MqlBoolean getBoolean(final String fieldName, final boolean other) { + Assertions.notNull("fieldName", fieldName); + return getBoolean(fieldName, of(other)); + } + + /** + * Returns the {@linkplain MqlNumber number} value of the field + * with the provided {@code fieldName}. + * + *

Warning: The type and presence of the resulting value is not + * enforced by the API. The use of this method is an + * unchecked assertion that the document + * {@linkplain #hasField(String) has} the named field and + * the field value is of the specified type. + * + * @mongodb.server.release 5.0 + * @param fieldName the name of the field. + * @return the resulting value. + */ + @MqlUnchecked({PRESENT, TYPE}) + MqlNumber getNumber(String fieldName); + + /** + * Returns the {@linkplain MqlNumber number} value of the field + * with the provided {@code fieldName}, + * or the {@code other} value if the field is not a number + * or if the document {@linkplain #hasField} no such field. + * + * @mongodb.server.release 5.0 + * @param fieldName the name of the field. + * @param other the other value. + * @return the resulting value. + */ + MqlNumber getNumber(String fieldName, MqlNumber other); + + /** + * Returns the {@linkplain MqlNumber number} value of the field + * with the provided {@code fieldName}, + * or the {@code other} value if the field is not a number + * or if the document {@linkplain #hasField} no such field. + * + * @mongodb.server.release 5.0 + * @param fieldName the name of the field. + * @param other the other value. + * @return the resulting value. + */ + default MqlNumber getNumber(final String fieldName, final Number other) { + Assertions.notNull("fieldName", fieldName); + Assertions.notNull("other", other); + return getNumber(fieldName, MqlValues.numberToMqlNumber(other)); + } + + /** + * Returns the {@linkplain MqlInteger integer} value of the field + * with the provided {@code fieldName}. + * + *

Warning: The type and presence of the resulting value is not + * enforced by the API. The use of this method is an + * unchecked assertion that the document + * {@linkplain #hasField(String) has} the named field and + * the field value is of the specified type. + * + * @mongodb.server.release 5.0 + * @param fieldName the name of the field. + * @return the resulting value. + */ + @MqlUnchecked({PRESENT, TYPE}) + MqlInteger getInteger(String fieldName); + + /** + * Returns the {@linkplain MqlInteger integer} value of the field + * with the provided {@code fieldName}, + * or the {@code other} value if the field is not an integer + * or if the document {@linkplain #hasField} no such field. + * + * @mongodb.server.release 5.0 + * @param fieldName the name of the field. + * @param other the other value. + * @return the resulting value. + */ + MqlInteger getInteger(String fieldName, MqlInteger other); + + /** + * Returns the {@linkplain MqlInteger integer} value of the field + * with the provided {@code fieldName}, + * or the {@code other} value if the field is not an integer + * or if the document {@linkplain #hasField} no such field. + * + * @mongodb.server.release 5.0 + * @param fieldName the name of the field. + * @param other the other value. + * @return the resulting value. + */ + default MqlInteger getInteger(final String fieldName, final int other) { + Assertions.notNull("fieldName", fieldName); + return getInteger(fieldName, of(other)); + } + + /** + * Returns the {@linkplain MqlInteger integer} value of the field + * with the provided {@code fieldName}, + * or the {@code other} value if the field is not an integer + * or if the document {@linkplain #hasField} no such field. + * + * @mongodb.server.release 5.0 + * @param fieldName the name of the field. + * @param other the other value. + * @return the resulting value. + */ + default MqlInteger getInteger(final String fieldName, final long other) { + Assertions.notNull("fieldName", fieldName); + return getInteger(fieldName, of(other)); + } + + /** + * Returns the {@linkplain MqlString string} value of the field + * with the provided {@code fieldName}. + * + *

Warning: The type and presence of the resulting value is not + * enforced by the API. The use of this method is an + * unchecked assertion that the document + * {@linkplain #hasField(String) has} the named field and + * the field value is of the specified type. + * + * @mongodb.server.release 5.0 + * @param fieldName the name of the field. + * @return the resulting value. + */ + @MqlUnchecked({PRESENT, TYPE}) + MqlString getString(String fieldName); + + /** + * Returns the {@linkplain MqlString string} value of the field + * with the provided {@code fieldName}, + * or the {@code other} value if the field is not a string + * or if the document {@linkplain #hasField} no such field. + * + * @mongodb.server.release 5.0 + * @param fieldName the name of the field. + * @param other the other value. + * @return the resulting value. + */ + MqlString getString(String fieldName, MqlString other); + + /** + * Returns the {@linkplain MqlString string} value of the field + * with the provided {@code fieldName}, + * or the {@code other} value if the field is not a string + * or if the document {@linkplain #hasField} no such field. + * + * @mongodb.server.release 5.0 + * @param fieldName the name of the field. + * @param other the other value. + * @return the resulting value. + */ + default MqlString getString(final String fieldName, final String other) { + Assertions.notNull("fieldName", fieldName); + Assertions.notNull("other", other); + return getString(fieldName, of(other)); + } + + /** + * Returns the {@linkplain MqlDate date} value of the field + * with the provided {@code fieldName}. + * + *

Warning: The type and presence of the resulting value is not + * enforced by the API. The use of this method is an + * unchecked assertion that the document + * {@linkplain #hasField(String) has} the named field and + * the field value is of the specified type. + * + * @mongodb.server.release 5.0 + * @param fieldName the name of the field. + * @return the resulting value. + */ + @MqlUnchecked({PRESENT, TYPE}) + MqlDate getDate(String fieldName); + + /** + * Returns the {@linkplain MqlDate date} value of the field + * with the provided {@code fieldName}, + * or the {@code other} value if the field is not a date + * or if the document {@linkplain #hasField} no such field. + * + * @mongodb.server.release 5.0 + * @param fieldName the name of the field. + * @param other the other value. + * @return the resulting value. + */ + MqlDate getDate(String fieldName, MqlDate other); + + /** + * Returns the {@linkplain MqlDate date} value of the field + * with the provided {@code fieldName}, + * or the {@code other} value if the field is not a date + * or if the document {@linkplain #hasField} no such field. + * + * @mongodb.server.release 5.0 + * @param fieldName the name of the field. + * @param other the other value. + * @return the resulting value. + */ + default MqlDate getDate(final String fieldName, final Instant other) { + Assertions.notNull("fieldName", fieldName); + Assertions.notNull("other", other); + return getDate(fieldName, of(other)); + } + + /** + * Returns the {@linkplain MqlDocument document} value of the field + * with the provided {@code fieldName}. + * + *

Warning: The type and presence of the resulting value is not + * enforced by the API. The use of this method is an + * unchecked assertion that the document + * {@linkplain #hasField(String) has} the named field and + * the field value is of the specified type. + * + * @mongodb.server.release 5.0 + * @param fieldName the name of the field. + * @return the resulting value. + */ + @MqlUnchecked({PRESENT, TYPE}) + MqlDocument getDocument(String fieldName); + + /** + * Returns the {@linkplain MqlDocument document} value of the field + * with the provided {@code fieldName}, + * or the {@code other} value + * if the document {@linkplain #hasField} no such field, + * or if the specified field is not a (child) document + * (or other {@linkplain MqlValue#isDocumentOr document-like value}. + * + * @mongodb.server.release 5.0 + * @param fieldName the name of the field. + * @param other the other value. + * @return the resulting value. + */ + MqlDocument getDocument(String fieldName, MqlDocument other); + + /** + * Returns the {@linkplain MqlDocument document} value of the field + * with the provided {@code fieldName}, + * or the {@code other} value + * if the document {@linkplain #hasField} no such field, + * or if the specified field is not a (child) document + * (or other {@linkplain MqlValue#isDocumentOr document-like value}. + * + * @mongodb.server.release 5.0 + * @param fieldName the name of the field. + * @param other the other value. + * @return the resulting value. + */ + default MqlDocument getDocument(final String fieldName, final Bson other) { + Assertions.notNull("fieldName", fieldName); + Assertions.notNull("other", other); + return getDocument(fieldName, of(other)); + } + + /** + * Returns the {@linkplain MqlMap map} value of the field + * with the provided {@code fieldName}. + * + *

Warning: The type and presence of the resulting value is not + * enforced by the API. The use of this method is an + * unchecked assertion that the document + * {@linkplain #hasField(String) has} the named field, + * and the field value is of the specified raw type, + * and the field value's type has the specified type argument. + * + * @mongodb.server.release 5.0 + * @param fieldName the name of the field. + * @return the resulting value. + * @param the type. + */ + @MqlUnchecked({PRESENT, TYPE}) + MqlMap<@MqlUnchecked(TYPE_ARGUMENT) T> getMap(String fieldName); + + + /** + * Returns the {@linkplain MqlMap map} value of the field + * with the provided {@code fieldName}, + * or the {@code other} value + * if the document {@linkplain #hasField} no such field, + * or if the specified field is not a map + * (or other {@linkplain MqlValue#isMapOr} map-like value}). + * + *

Warning: The type argument of the resulting value is not + * enforced by the API. The use of this method is an + * unchecked assertion that the type argument is correct. + * + * @mongodb.server.release 5.0 + * @param fieldName the name of the field. + * @param other the other value. + * @return the resulting value. + * @param the type. + */ + MqlMap getMap(String fieldName, MqlMap<@MqlUnchecked(TYPE_ARGUMENT) ? extends T> other); + + /** + * Returns the {@linkplain MqlMap map} value of the field + * with the provided {@code fieldName}, + * or the {@code other} value + * if the document {@linkplain #hasField} no such field, + * or if the specified field is not a map + * (or other {@linkplain MqlValue#isMapOr} map-like value}). + * + *

Warning: The type argument of the resulting value is not + * enforced by the API. The use of this method is an + * unchecked assertion that the type argument is correct. + * + * @mongodb.server.release 5.0 + * @param fieldName the name of the field. + * @param other the other value. + * @return the resulting value. + * @param the type. + */ + default MqlMap<@MqlUnchecked(TYPE_ARGUMENT) T> getMap(final String fieldName, final Bson other) { + Assertions.notNull("fieldName", fieldName); + Assertions.notNull("other", other); + return getMap(fieldName, ofMap(other)); + } + + /** + * Returns the {@linkplain MqlArray array} value of the field + * with the provided {@code fieldName}. + * + *

Warning: The type and presence of the resulting value is not + * enforced by the API. The use of this method is an + * unchecked assertion that the document + * {@linkplain #hasField(String) has} the named field, + * and the field value is of the specified raw type, + * and the field value's type has the specified type argument. + * + * @mongodb.server.release 5.0 + * @param fieldName the name of the field. + * @return the resulting value. + * @param the type. + */ + @MqlUnchecked({PRESENT, TYPE}) + MqlArray<@MqlUnchecked(TYPE_ARGUMENT) T> getArray(String fieldName); + + /** + * Returns the {@linkplain MqlArray array} value of the field + * with the provided {@code fieldName}, + * or the {@code other} value if the field is not an array + * or if the document {@linkplain #hasField} no such field. + * + *

Warning: The type argument of the resulting value is not + * enforced by the API. The use of this method is an + * unchecked assertion that the type argument is correct. + * + * @mongodb.server.release 5.0 + * @param fieldName the name of the field. + * @param other the other value. + * @return the resulting value. + * @param the type. + */ + MqlArray<@MqlUnchecked(TYPE_ARGUMENT) T> getArray(String fieldName, MqlArray other); + + /** + * Returns a document with the same fields as {@code this} document, but + * with any fields present in the {@code other} document overwritten with + * the fields of that other document. That is, fields from both this and the + * other document are merged, with the other document having priority. + * + *

This does not affect the original document. + * + * @param other the other document. + * @return the resulting value. + */ + MqlDocument merge(MqlDocument other); + + /** + * {@code this} document as a {@linkplain MqlMap map}. + * + *

Warning: The type argument of the resulting value is not + * enforced by the API. The use of this method is an + * unchecked assertion that the type argument is correct. + * + * @return the resulting value. + * @param the type. + */ + + MqlMap<@MqlUnchecked(TYPE_ARGUMENT) T> asMap(); + + /** + * The result of passing {@code this} value to the provided function. + * Equivalent to {@code f.apply(this)}, and allows lambdas and static, + * user-defined functions to use the chaining syntax. + * + * @see MqlValue#passTo + * @param f the function to apply. + * @return the resulting value. + * @param the type of the resulting value. + */ + R passDocumentTo(Function f); + + /** + * The result of applying the provided switch mapping to {@code this} value. + * + * @see MqlValue#switchOn + * @param mapping the switch mapping. + * @return the resulting value. + * @param the type of the resulting value. + */ + R switchDocumentOn(Function, ? extends BranchesTerminal> mapping); +} diff --git a/driver-core/src/main/com/mongodb/client/model/mql/MqlEntry.java b/driver-core/src/main/com/mongodb/client/model/mql/MqlEntry.java new file mode 100644 index 00000000000..dffa35405f1 --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/mql/MqlEntry.java @@ -0,0 +1,76 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model.mql; + +import com.mongodb.annotations.Beta; +import com.mongodb.annotations.Reason; +import com.mongodb.annotations.Sealed; + +/** + * A map entry {@linkplain MqlValue value} in the context + * of the MongoDB Query Language (MQL). An entry has a + * {@linkplain MqlString string} key and some + * {@linkplain MqlValue value}. Entries are used with + * {@linkplain MqlMap maps}. + * + *

Entries are {@linkplain MqlValue#isDocumentOr document-like} and + * {@linkplain MqlValue#isMapOr map-like}, unless the method returning the + * entry specifies otherwise. + * + * @param The type of the value + * @since 4.9.0 + */ +@Sealed +@Beta(Reason.CLIENT) +public interface MqlEntry extends MqlValue { + + /** + * The key of {@code this} entry. + * + * @mongodb.server.release 5.0 + * @return the key. + */ + MqlString getKey(); + + /** + * The value of {@code this} entry. + * + * @mongodb.server.release 5.0 + * @return the value. + */ + T getValue(); + + /** + * An entry with the same key as {@code this} entry, and the + * specified {@code value}. + * + * @mongodb.server.release 5.0 + * @param value the value. + * @return the resulting entry. + */ + MqlEntry setValue(T value); + + /** + * An entry with the same value as {@code this} entry, and the + * specified {@code key}. + * + * @mongodb.server.release 5.0 + * @param key the key. + * @return the resulting entry. + */ + MqlEntry setKey(MqlString key); +} diff --git a/driver-core/src/main/com/mongodb/client/model/mql/MqlExpression.java b/driver-core/src/main/com/mongodb/client/model/mql/MqlExpression.java new file mode 100644 index 00000000000..d4169c6028c --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/mql/MqlExpression.java @@ -0,0 +1,1117 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model.mql; + +import com.mongodb.assertions.Assertions; +import org.bson.BsonArray; +import org.bson.BsonDocument; +import org.bson.BsonInt32; +import org.bson.BsonString; +import org.bson.BsonValue; +import org.bson.codecs.configuration.CodecRegistry; + +import java.util.Collections; +import java.util.function.BinaryOperator; +import java.util.function.Function; + +import static com.mongodb.assertions.Assertions.fail; +import static com.mongodb.client.model.mql.MqlValues.of; +import static com.mongodb.client.model.mql.MqlValues.ofNull; +import static com.mongodb.client.model.mql.MqlValues.ofStringArray; + +final class MqlExpression + implements MqlValue, MqlBoolean, MqlInteger, MqlNumber, + MqlString, MqlDate, MqlDocument, MqlArray, MqlMap, MqlEntry { + + private final Function fn; + + MqlExpression(final Function fn) { + this.fn = fn; + } + + /** + * Exposes the evaluated BsonValue so that this mql expression may be used + * in aggregations. Non-public, as it is intended to be used only by the + * {@link MqlExpressionCodec}. + */ + BsonValue toBsonValue(final CodecRegistry codecRegistry) { + return fn.apply(codecRegistry).bsonValue; + } + + private AstPlaceholder astDoc(final String name, final BsonDocument value) { + return new AstPlaceholder(new BsonDocument(name, value)); + } + + @Override + public MqlString getKey() { + return new MqlExpression<>(getFieldInternal("k")); + } + + @Override + public T getValue() { + return newMqlExpression(getFieldInternal("v")); + } + + @Override + public MqlEntry setValue(final T value) { + Assertions.notNull("value", value); + return setFieldInternal("v", value); + } + + @Override + public MqlEntry setKey(final MqlString key) { + Assertions.notNull("key", key); + return setFieldInternal("k", key); + } + + static final class AstPlaceholder { + private final BsonValue bsonValue; + + AstPlaceholder(final BsonValue bsonValue) { + this.bsonValue = bsonValue; + } + } + + private Function ast(final String name) { + return (cr) -> new AstPlaceholder(new BsonDocument(name, this.toBsonValue(cr))); + } + + // in cases where we must wrap the first argument in an array + private Function astWrapped(final String name) { + return (cr) -> new AstPlaceholder(new BsonDocument(name, + new BsonArray(Collections.singletonList(this.toBsonValue(cr))))); + } + + private Function ast(final String name, final MqlValue param1) { + return (cr) -> { + BsonArray value = new BsonArray(); + value.add(this.toBsonValue(cr)); + value.add(toBsonValue(cr, param1)); + return new AstPlaceholder(new BsonDocument(name, value)); + }; + } + + private Function ast(final String name, final MqlValue param1, final MqlValue param2) { + return (cr) -> { + BsonArray value = new BsonArray(); + value.add(this.toBsonValue(cr)); + value.add(toBsonValue(cr, param1)); + value.add(toBsonValue(cr, param2)); + return new AstPlaceholder(new BsonDocument(name, value)); + }; + } + + /** + * Takes an expression and converts it to a BsonValue. MqlExpression will be + * the only implementation of Expression and all subclasses, so this will + * not mis-cast an expression as anything else. + */ + static BsonValue toBsonValue(final CodecRegistry cr, final MqlValue mqlValue) { + return ((MqlExpression) mqlValue).toBsonValue(cr); + } + + /** + * Converts an MqlExpression to any subtype of Expression. Users must not + * extend Expression or its subtypes, so MqlExpression will implement any R. + */ + @SuppressWarnings("unchecked") + R assertImplementsAllExpressions() { + return (R) this; + } + + private static R newMqlExpression(final Function ast) { + return new MqlExpression<>(ast).assertImplementsAllExpressions(); + } + + private R variable(final String variable) { + return newMqlExpression((cr) -> new AstPlaceholder(new BsonString(variable))); + } + + /** @see MqlBoolean */ + + @Override + public MqlBoolean not() { + return new MqlExpression<>(ast("$not")); + } + + @Override + public MqlBoolean or(final MqlBoolean other) { + Assertions.notNull("other", other); + return new MqlExpression<>(ast("$or", other)); + } + + @Override + public MqlBoolean and(final MqlBoolean other) { + Assertions.notNull("other", other); + return new MqlExpression<>(ast("$and", other)); + } + + @Override + public R cond(final R ifTrue, final R ifFalse) { + Assertions.notNull("ifTrue", ifTrue); + Assertions.notNull("ifFalse", ifFalse); + return newMqlExpression(ast("$cond", ifTrue, ifFalse)); + } + + /** @see MqlDocument */ + + private Function getFieldInternal(final String fieldName) { + return (cr) -> { + BsonValue value = fieldName.startsWith("$") + ? new BsonDocument("$literal", new BsonString(fieldName)) + : new BsonString(fieldName); + return astDoc("$getField", new BsonDocument() + .append("input", this.fn.apply(cr).bsonValue) + .append("field", value)); + }; + } + + @Override + public MqlValue getField(final String fieldName) { + Assertions.notNull("fieldName", fieldName); + return new MqlExpression<>(getFieldInternal(fieldName)); + } + + @Override + public MqlBoolean getBoolean(final String fieldName) { + Assertions.notNull("fieldName", fieldName); + return new MqlExpression<>(getFieldInternal(fieldName)); + } + + @Override + public MqlBoolean getBoolean(final String fieldName, final MqlBoolean other) { + Assertions.notNull("fieldName", fieldName); + Assertions.notNull("other", other); + return getBoolean(fieldName).isBooleanOr(other); + } + + @Override + public MqlNumber getNumber(final String fieldName) { + Assertions.notNull("fieldName", fieldName); + return new MqlExpression<>(getFieldInternal(fieldName)); + } + + @Override + public MqlNumber getNumber(final String fieldName, final MqlNumber other) { + Assertions.notNull("fieldName", fieldName); + Assertions.notNull("other", other); + return getNumber(fieldName).isNumberOr(other); + } + + @Override + public MqlInteger getInteger(final String fieldName) { + Assertions.notNull("fieldName", fieldName); + return new MqlExpression<>(getFieldInternal(fieldName)); + } + + @Override + public MqlInteger getInteger(final String fieldName, final MqlInteger other) { + Assertions.notNull("fieldName", fieldName); + Assertions.notNull("other", other); + return getInteger(fieldName).isIntegerOr(other); + } + + @Override + public MqlString getString(final String fieldName) { + Assertions.notNull("fieldName", fieldName); + return new MqlExpression<>(getFieldInternal(fieldName)); + } + + @Override + public MqlString getString(final String fieldName, final MqlString other) { + Assertions.notNull("fieldName", fieldName); + Assertions.notNull("other", other); + return getString(fieldName).isStringOr(other); + } + + @Override + public MqlDate getDate(final String fieldName) { + Assertions.notNull("fieldName", fieldName); + return new MqlExpression<>(getFieldInternal(fieldName)); + } + + @Override + public MqlDate getDate(final String fieldName, final MqlDate other) { + Assertions.notNull("fieldName", fieldName); + Assertions.notNull("other", other); + return getDate(fieldName).isDateOr(other); + } + + @Override + public MqlDocument getDocument(final String fieldName) { + Assertions.notNull("fieldName", fieldName); + return new MqlExpression<>(getFieldInternal(fieldName)); + } + + @Override + public MqlMap getMap(final String fieldName) { + Assertions.notNull("fieldName", fieldName); + return new MqlExpression<>(getFieldInternal(fieldName)); + } + + @Override + public MqlMap getMap(final String fieldName, final MqlMap other) { + Assertions.notNull("fieldName", fieldName); + Assertions.notNull("other", other); + return getMap(fieldName).isMapOr(other); + } + + @Override + public MqlDocument getDocument(final String fieldName, final MqlDocument other) { + Assertions.notNull("fieldName", fieldName); + Assertions.notNull("other", other); + return getDocument(fieldName).isDocumentOr(other); + } + + @Override + public MqlArray getArray(final String fieldName) { + Assertions.notNull("fieldName", fieldName); + return new MqlExpression<>(getFieldInternal(fieldName)); + } + + @Override + public MqlArray getArray(final String fieldName, final MqlArray other) { + Assertions.notNull("fieldName", fieldName); + Assertions.notNull("other", other); + return getArray(fieldName).isArrayOr(other); + } + + @Override + public MqlDocument merge(final MqlDocument other) { + Assertions.notNull("other", other); + return new MqlExpression<>(ast("$mergeObjects", other)); + } + + @Override + public MqlDocument setField(final String fieldName, final MqlValue value) { + Assertions.notNull("fieldName", fieldName); + Assertions.notNull("value", value); + return setFieldInternal(fieldName, value); + } + + private MqlExpression setFieldInternal(final String fieldName, final MqlValue value) { + Assertions.notNull("fieldName", fieldName); + return newMqlExpression((cr) -> astDoc("$setField", new BsonDocument() + .append("field", new BsonString(fieldName)) + .append("input", this.toBsonValue(cr)) + .append("value", toBsonValue(cr, value)))); + } + + @Override + public MqlDocument unsetField(final String fieldName) { + Assertions.notNull("fieldName", fieldName); + return newMqlExpression((cr) -> astDoc("$unsetField", new BsonDocument() + .append("field", new BsonString(fieldName)) + .append("input", this.toBsonValue(cr)))); + } + + /** @see MqlValue */ + + @Override + public R passTo(final Function f) { + Assertions.notNull("f", f); + return f.apply(this.assertImplementsAllExpressions()); + } + + @Override + public R switchOn(final Function, ? extends BranchesTerminal> mapping) { + Assertions.notNull("mapping", mapping); + return switchMapInternal(this.assertImplementsAllExpressions(), mapping.apply(new Branches<>())); + } + + @Override + public R passBooleanTo(final Function f) { + Assertions.notNull("f", f); + return f.apply(this.assertImplementsAllExpressions()); + } + + @Override + public R switchBooleanOn(final Function, ? extends BranchesTerminal> mapping) { + Assertions.notNull("mapping", mapping); + return switchMapInternal(this.assertImplementsAllExpressions(), mapping.apply(new Branches<>())); + } + + @Override + public R passIntegerTo(final Function f) { + Assertions.notNull("f", f); + return f.apply(this.assertImplementsAllExpressions()); + } + + @Override + public R switchIntegerOn(final Function, ? extends BranchesTerminal> mapping) { + Assertions.notNull("mapping", mapping); + return switchMapInternal(this.assertImplementsAllExpressions(), mapping.apply(new Branches<>())); + } + + @Override + public R passNumberTo(final Function f) { + Assertions.notNull("f", f); + return f.apply(this.assertImplementsAllExpressions()); + } + + @Override + public R switchNumberOn(final Function, ? extends BranchesTerminal> mapping) { + Assertions.notNull("mapping", mapping); + return switchMapInternal(this.assertImplementsAllExpressions(), mapping.apply(new Branches<>())); + } + + @Override + public R passStringTo(final Function f) { + Assertions.notNull("f", f); + return f.apply(this.assertImplementsAllExpressions()); + } + + @Override + public R switchStringOn(final Function, ? extends BranchesTerminal> mapping) { + Assertions.notNull("mapping", mapping); + return switchMapInternal(this.assertImplementsAllExpressions(), mapping.apply(new Branches<>())); + } + + @Override + public R passDateTo(final Function f) { + Assertions.notNull("f", f); + return f.apply(this.assertImplementsAllExpressions()); + } + + @Override + public R switchDateOn(final Function, ? extends BranchesTerminal> mapping) { + Assertions.notNull("mapping", mapping); + return switchMapInternal(this.assertImplementsAllExpressions(), mapping.apply(new Branches<>())); + } + + @Override + public R passArrayTo(final Function, ? extends R> f) { + Assertions.notNull("f", f); + return f.apply(this.assertImplementsAllExpressions()); + } + + @Override + public R switchArrayOn(final Function>, ? extends BranchesTerminal, ? extends R>> mapping) { + Assertions.notNull("mapping", mapping); + return switchMapInternal(this.assertImplementsAllExpressions(), mapping.apply(new Branches<>())); + } + + @Override + public R passMapTo(final Function, ? extends R> f) { + Assertions.notNull("f", f); + return f.apply(this.assertImplementsAllExpressions()); + } + + @Override + public R switchMapOn(final Function>, ? extends BranchesTerminal, ? extends R>> mapping) { + Assertions.notNull("mapping", mapping); + return switchMapInternal(this.assertImplementsAllExpressions(), mapping.apply(new Branches<>())); + } + + @Override + public R passDocumentTo(final Function f) { + Assertions.notNull("f", f); + return f.apply(this.assertImplementsAllExpressions()); + } + + @Override + public R switchDocumentOn(final Function, ? extends BranchesTerminal> mapping) { + Assertions.notNull("mapping", mapping); + return switchMapInternal(this.assertImplementsAllExpressions(), mapping.apply(new Branches<>())); + } + + private R0 switchMapInternal( + final T0 value, final BranchesTerminal construct) { + return newMqlExpression((cr) -> { + BsonArray branches = new BsonArray(); + for (Function> fn : construct.getBranches()) { + SwitchCase result = fn.apply(value); + branches.add(new BsonDocument() + .append("case", toBsonValue(cr, result.getCaseValue())) + .append("then", toBsonValue(cr, result.getThenValue()))); + } + BsonDocument switchBson = new BsonDocument().append("branches", branches); + if (construct.getDefaults() != null) { + switchBson = switchBson.append("default", toBsonValue(cr, construct.getDefaults().apply(value))); + } + return astDoc("$switch", switchBson); + }); + } + + @Override + public MqlBoolean eq(final MqlValue other) { + Assertions.notNull("other", other); + return new MqlExpression<>(ast("$eq", other)); + } + + @Override + public MqlBoolean ne(final MqlValue other) { + Assertions.notNull("other", other); + return new MqlExpression<>(ast("$ne", other)); + } + + @Override + public MqlBoolean gt(final MqlValue other) { + Assertions.notNull("other", other); + return new MqlExpression<>(ast("$gt", other)); + } + + @Override + public MqlBoolean gte(final MqlValue other) { + Assertions.notNull("other", other); + return new MqlExpression<>(ast("$gte", other)); + } + + @Override + public MqlBoolean lt(final MqlValue other) { + Assertions.notNull("other", other); + return new MqlExpression<>(ast("$lt", other)); + } + + @Override + public MqlBoolean lte(final MqlValue other) { + Assertions.notNull("other", other); + return new MqlExpression<>(ast("$lte", other)); + } + + MqlBoolean isBoolean() { + return new MqlExpression<>(astWrapped("$type")).eq(of("bool")); + } + + @Override + public MqlBoolean isBooleanOr(final MqlBoolean other) { + Assertions.notNull("other", other); + return this.isBoolean().cond(this, other); + } + + MqlBoolean isNumber() { + return new MqlExpression<>(astWrapped("$isNumber")); + } + + @Override + public MqlNumber isNumberOr(final MqlNumber other) { + Assertions.notNull("other", other); + return this.isNumber().cond(this, other); + } + + MqlBoolean isInteger() { + return switchOn(on -> on + .isNumber(v -> v.round().eq(v)) + .defaults(v -> of(false))); + } + + @Override + public MqlInteger isIntegerOr(final MqlInteger other) { + Assertions.notNull("other", other); + /* + The server does not evaluate both branches of and/or/cond unless needed. + However, the server has a pipeline optimization stage prior to + evaluation that does attempt to optimize both branches, and fails with + "Failed to optimize pipeline" when there is a problem arising from the + use of literals and typed expressions. Using "switch" avoids this, + otherwise we could just use: + this.isNumber().and(this.eq(this.round())) + */ + return this.switchOn(on -> on + .isNumber(v -> (MqlInteger) v.round().eq(v).cond(v, other)) + .defaults(v -> other)); + } + + MqlBoolean isString() { + return new MqlExpression<>(astWrapped("$type")).eq(of("string")); + } + + @Override + public MqlString isStringOr(final MqlString other) { + Assertions.notNull("other", other); + return this.isString().cond(this, other); + } + + MqlBoolean isDate() { + return ofStringArray("date").contains(new MqlExpression<>(astWrapped("$type"))); + } + + @Override + public MqlDate isDateOr(final MqlDate other) { + Assertions.notNull("other", other); + return this.isDate().cond(this, other); + } + + MqlBoolean isArray() { + return new MqlExpression<>(astWrapped("$isArray")); + } + + /** + * checks if array (but cannot check type) + * user asserts array is of type R + * + * @param other + * @return + * @param + */ + @SuppressWarnings("unchecked") + @Override + public MqlArray isArrayOr(final MqlArray other) { + Assertions.notNull("other", other); + return (MqlArray) this.isArray().cond(this.assertImplementsAllExpressions(), other); + } + + MqlBoolean isDocumentOrMap() { + return new MqlExpression<>(astWrapped("$type")).eq(of("object")); + } + + @Override + public R isDocumentOr(final R other) { + Assertions.notNull("other", other); + return this.isDocumentOrMap().cond(this.assertImplementsAllExpressions(), other); + } + + @Override + public MqlMap isMapOr(final MqlMap other) { + Assertions.notNull("other", other); + MqlExpression isMap = (MqlExpression) this.isDocumentOrMap(); + return newMqlExpression(isMap.ast("$cond", this.assertImplementsAllExpressions(), other)); + } + + MqlBoolean isNull() { + return this.eq(ofNull()); + } + + @Override + public MqlString asString() { + return new MqlExpression<>(astWrapped("$toString")); + } + + private Function convertInternal(final String to, final MqlValue other) { + return (cr) -> astDoc("$convert", new BsonDocument() + .append("input", this.fn.apply(cr).bsonValue) + .append("onError", toBsonValue(cr, other)) + .append("to", new BsonString(to))); + } + + @Override + public MqlInteger parseInteger() { + MqlValue asLong = new MqlExpression<>(ast("$toLong")); + return new MqlExpression<>(convertInternal("int", asLong)); + } + + /** @see MqlArray */ + + @Override + public MqlArray map(final Function in) { + Assertions.notNull("in", in); + T varThis = variable("$$this"); + return new MqlExpression<>((cr) -> astDoc("$map", new BsonDocument() + .append("input", this.toBsonValue(cr)) + .append("in", toBsonValue(cr, in.apply(varThis))))); + } + + @Override + public MqlArray filter(final Function predicate) { + Assertions.notNull("predicate", predicate); + T varThis = variable("$$this"); + return new MqlExpression<>((cr) -> astDoc("$filter", new BsonDocument() + .append("input", this.toBsonValue(cr)) + .append("cond", toBsonValue(cr, predicate.apply(varThis))))); + } + + MqlArray sort() { + return new MqlExpression<>((cr) -> astDoc("$sortArray", new BsonDocument() + .append("input", this.toBsonValue(cr)) + .append("sortBy", new BsonInt32(1)))); + } + + private T reduce(final T initialValue, final BinaryOperator in) { + T varThis = variable("$$this"); + T varValue = variable("$$value"); + return newMqlExpression((cr) -> astDoc("$reduce", new BsonDocument() + .append("input", this.toBsonValue(cr)) + .append("initialValue", toBsonValue(cr, initialValue)) + .append("in", toBsonValue(cr, in.apply(varValue, varThis))))); + } + + @Override + public MqlBoolean any(final Function predicate) { + Assertions.notNull("predicate", predicate); + MqlExpression array = (MqlExpression) this.map(predicate); + return array.reduce(of(false), (a, b) -> a.or(b)); + } + + @Override + public MqlBoolean all(final Function predicate) { + Assertions.notNull("predicate", predicate); + MqlExpression array = (MqlExpression) this.map(predicate); + return array.reduce(of(true), (a, b) -> a.and(b)); + } + + @SuppressWarnings("unchecked") + @Override + public MqlNumber sum(final Function mapper) { + Assertions.notNull("mapper", mapper); + // no sum that returns IntegerExpression, both have same erasure + MqlExpression array = (MqlExpression) this.map(mapper); + return array.reduce(of(0), (a, b) -> a.add(b)); + } + + @SuppressWarnings("unchecked") + @Override + public MqlNumber multiply(final Function mapper) { + Assertions.notNull("mapper", mapper); + MqlExpression array = (MqlExpression) this.map(mapper); + return array.reduce(of(1), (MqlNumber a, MqlNumber b) -> a.multiply(b)); + } + + @Override + public T max(final T other) { + Assertions.notNull("other", other); + return this.size().eq(of(0)).cond(other, this.maxN(of(1)).first()); + } + + @Override + public T min(final T other) { + Assertions.notNull("other", other); + return this.size().eq(of(0)).cond(other, this.minN(of(1)).first()); + } + + @Override + public MqlArray maxN(final MqlInteger n) { + Assertions.notNull("n", n); + return newMqlExpression((CodecRegistry cr) -> astDoc("$maxN", new BsonDocument() + .append("input", toBsonValue(cr, this)) + .append("n", toBsonValue(cr, n)))); + } + + @Override + public MqlArray minN(final MqlInteger n) { + Assertions.notNull("n", n); + return newMqlExpression((CodecRegistry cr) -> astDoc("$minN", new BsonDocument() + .append("input", toBsonValue(cr, this)) + .append("n", toBsonValue(cr, n)))); + } + + @Override + public MqlString joinStrings(final Function mapper) { + Assertions.notNull("mapper", mapper); + MqlExpression array = (MqlExpression) this.map(mapper); + return array.reduce(of(""), (a, b) -> a.append(b)); + } + + @SuppressWarnings("unchecked") + @Override + public MqlArray concatArrays(final Function> mapper) { + Assertions.notNull("mapper", mapper); + MqlExpression> array = (MqlExpression>) this.map(mapper); + return array.reduce(MqlValues.ofArray(), (a, b) -> a.concat(b)); + } + + @SuppressWarnings("unchecked") + @Override + public MqlArray unionArrays(final Function> mapper) { + Assertions.notNull("mapper", mapper); + Assertions.notNull("mapper", mapper); + MqlExpression> array = (MqlExpression>) this.map(mapper); + return array.reduce(MqlValues.ofArray(), (a, b) -> a.union(b)); + } + + @Override + public MqlInteger size() { + return new MqlExpression<>(astWrapped("$size")); + } + + @Override + public T elementAt(final MqlInteger i) { + Assertions.notNull("i", i); + return new MqlExpression<>(ast("$arrayElemAt", i)) + .assertImplementsAllExpressions(); + } + + @Override + public T first() { + return new MqlExpression<>(astWrapped("$first")) + .assertImplementsAllExpressions(); + } + + @Override + public T last() { + return new MqlExpression<>(astWrapped("$last")) + .assertImplementsAllExpressions(); + } + + @Override + public MqlBoolean contains(final T value) { + Assertions.notNull("value", value); + String name = "$in"; + return new MqlExpression<>((cr) -> { + BsonArray array = new BsonArray(); + array.add(toBsonValue(cr, value)); + array.add(this.toBsonValue(cr)); + return new AstPlaceholder(new BsonDocument(name, array)); + }).assertImplementsAllExpressions(); + } + + @Override + public MqlArray concat(final MqlArray other) { + Assertions.notNull("other", other); + return new MqlExpression<>(ast("$concatArrays", other)) + .assertImplementsAllExpressions(); + } + + @Override + public MqlArray slice(final MqlInteger start, final MqlInteger length) { + Assertions.notNull("start", start); + Assertions.notNull("length", length); + return new MqlExpression<>(ast("$slice", start, length)) + .assertImplementsAllExpressions(); + } + + @Override + public MqlArray union(final MqlArray other) { + Assertions.notNull("other", other); + return new MqlExpression<>(ast("$setUnion", other)) + .assertImplementsAllExpressions(); + } + + @Override + public MqlArray distinct() { + return new MqlExpression<>(astWrapped("$setUnion")); + } + + + /** @see MqlInteger + * @see MqlNumber */ + + @Override + public MqlInteger multiply(final MqlNumber other) { + Assertions.notNull("other", other); + return newMqlExpression(ast("$multiply", other)); + } + + @Override + public MqlNumber add(final MqlNumber other) { + Assertions.notNull("other", other); + return new MqlExpression<>(ast("$add", other)); + } + + @Override + public MqlNumber divide(final MqlNumber other) { + Assertions.notNull("other", other); + return new MqlExpression<>(ast("$divide", other)); + } + + @Override + public MqlNumber max(final MqlNumber other) { + Assertions.notNull("other", other); + return new MqlExpression<>(ast("$max", other)); + } + + @Override + public MqlNumber min(final MqlNumber other) { + Assertions.notNull("other", other); + return new MqlExpression<>(ast("$min", other)); + } + + @Override + public MqlInteger round() { + return new MqlExpression<>(ast("$round")); + } + + @Override + public MqlNumber round(final MqlInteger place) { + Assertions.notNull("place", place); + return new MqlExpression<>(ast("$round", place)); + } + + @Override + public MqlInteger multiply(final MqlInteger other) { + Assertions.notNull("other", other); + return new MqlExpression<>(ast("$multiply", other)); + } + + @Override + public MqlInteger abs() { + return newMqlExpression(ast("$abs")); + } + + @Override + public MqlDate millisecondsAsDate() { + return newMqlExpression(ast("$toDate")); + } + + @Override + public MqlNumber subtract(final MqlNumber other) { + Assertions.notNull("other", other); + return new MqlExpression<>(ast("$subtract", other)); + } + + @Override + public MqlInteger add(final MqlInteger other) { + Assertions.notNull("other", other); + return new MqlExpression<>(ast("$add", other)); + } + + @Override + public MqlInteger subtract(final MqlInteger other) { + Assertions.notNull("other", other); + return new MqlExpression<>(ast("$subtract", other)); + } + + @Override + public MqlInteger max(final MqlInteger other) { + Assertions.notNull("other", other); + return new MqlExpression<>(ast("$max", other)); + } + + @Override + public MqlInteger min(final MqlInteger other) { + Assertions.notNull("other", other); + return new MqlExpression<>(ast("$min", other)); + } + + /** @see MqlDate */ + + private MqlExpression usingTimezone(final String name, final MqlString timezone) { + return new MqlExpression<>((cr) -> astDoc(name, new BsonDocument() + .append("date", this.toBsonValue(cr)) + .append("timezone", toBsonValue(cr, timezone)))); + } + + @Override + public MqlInteger year(final MqlString timezone) { + Assertions.notNull("timezone", timezone); + return usingTimezone("$year", timezone); + } + + @Override + public MqlInteger month(final MqlString timezone) { + Assertions.notNull("timezone", timezone); + return usingTimezone("$month", timezone); + } + + @Override + public MqlInteger dayOfMonth(final MqlString timezone) { + Assertions.notNull("timezone", timezone); + return usingTimezone("$dayOfMonth", timezone); + } + + @Override + public MqlInteger dayOfWeek(final MqlString timezone) { + Assertions.notNull("timezone", timezone); + return usingTimezone("$dayOfWeek", timezone); + } + + @Override + public MqlInteger dayOfYear(final MqlString timezone) { + Assertions.notNull("timezone", timezone); + return usingTimezone("$dayOfYear", timezone); + } + + @Override + public MqlInteger hour(final MqlString timezone) { + Assertions.notNull("timezone", timezone); + return usingTimezone("$hour", timezone); + } + + @Override + public MqlInteger minute(final MqlString timezone) { + Assertions.notNull("timezone", timezone); + return usingTimezone("$minute", timezone); + } + + @Override + public MqlInteger second(final MqlString timezone) { + Assertions.notNull("timezone", timezone); + return usingTimezone("$second", timezone); + } + + @Override + public MqlInteger week(final MqlString timezone) { + Assertions.notNull("timezone", timezone); + return usingTimezone("$week", timezone); + } + + @Override + public MqlInteger millisecond(final MqlString timezone) { + Assertions.notNull("timezone", timezone); + return usingTimezone("$millisecond", timezone); + } + + @Override + public MqlString asString(final MqlString timezone, final MqlString format) { + Assertions.notNull("timezone", timezone); + Assertions.notNull("format", format); + return newMqlExpression((cr) -> astDoc("$dateToString", new BsonDocument() + .append("date", this.toBsonValue(cr)) + .append("format", toBsonValue(cr, format)) + .append("timezone", toBsonValue(cr, timezone)))); + } + + public MqlString asString(final MqlString timezone) { + // Given that server versions < 7.1 return a wrong format, not implementing this method helps prevent users + // from encountering the bug, described in DRIVERS-2620, by avoiding the method that requires a format as a parameter. + throw fail(); + } + + @Override + public MqlDate parseDate(final MqlString timezone, final MqlString format) { + Assertions.notNull("timezone", timezone); + Assertions.notNull("format", format); + return newMqlExpression((cr) -> astDoc("$dateFromString", new BsonDocument() + .append("dateString", this.toBsonValue(cr)) + .append("format", toBsonValue(cr, format)) + .append("timezone", toBsonValue(cr, timezone)))); + } + + @Override + public MqlDate parseDate(final MqlString format) { + Assertions.notNull("format", format); + return newMqlExpression((cr) -> astDoc("$dateFromString", new BsonDocument() + .append("dateString", this.toBsonValue(cr)) + .append("format", toBsonValue(cr, format)))); + } + + @Override + public MqlDate parseDate() { + return newMqlExpression((cr) -> astDoc("$dateFromString", new BsonDocument() + .append("dateString", this.toBsonValue(cr)))); + } + + /** @see MqlString */ + + @Override + public MqlString toLower() { + return new MqlExpression<>(ast("$toLower")); + } + + @Override + public MqlString toUpper() { + return new MqlExpression<>(ast("$toUpper")); + } + + @Override + public MqlString append(final MqlString other) { + Assertions.notNull("other", other); + return new MqlExpression<>(ast("$concat", other)); + } + + @Override + public MqlInteger length() { + return new MqlExpression<>(ast("$strLenCP")); + } + + @Override + public MqlInteger lengthBytes() { + return new MqlExpression<>(ast("$strLenBytes")); + } + + @Override + public MqlString substr(final MqlInteger start, final MqlInteger length) { + Assertions.notNull("start", start); + Assertions.notNull("length", length); + return new MqlExpression<>(ast("$substrCP", start, length)); + } + + @Override + public MqlString substrBytes(final MqlInteger start, final MqlInteger length) { + Assertions.notNull("start", start); + Assertions.notNull("length", length); + return new MqlExpression<>(ast("$substrBytes", start, length)); + } + + @Override + public MqlBoolean has(final MqlString key) { + Assertions.notNull("key", key); + return get(key).ne(ofRem()); + } + + + @Override + public MqlBoolean hasField(final String fieldName) { + Assertions.notNull("fieldName", fieldName); + return this.has(of(fieldName)); + } + + static R ofRem() { + // $$REMOVE is intentionally not exposed to users + return new MqlExpression<>((cr) -> new MqlExpression.AstPlaceholder(new BsonString("$$REMOVE"))) + .assertImplementsAllExpressions(); + } + + /** @see MqlMap + * @see MqlEntry */ + + @Override + public T get(final MqlString key) { + Assertions.notNull("key", key); + return newMqlExpression((cr) -> astDoc("$getField", new BsonDocument() + .append("input", this.fn.apply(cr).bsonValue) + .append("field", toBsonValue(cr, key)))); + } + + @SuppressWarnings("unchecked") + @Override + public T get(final MqlString key, final T other) { + Assertions.notNull("key", key); + Assertions.notNull("other", other); + MqlExpression mqlExpression = (MqlExpression) get(key); + return (T) mqlExpression.eq(ofRem()).cond(other, mqlExpression); + } + + @Override + public MqlMap set(final MqlString key, final T value) { + Assertions.notNull("key", key); + Assertions.notNull("value", value); + return newMqlExpression((cr) -> astDoc("$setField", new BsonDocument() + .append("field", toBsonValue(cr, key)) + .append("input", this.toBsonValue(cr)) + .append("value", toBsonValue(cr, value)))); + } + + @Override + public MqlMap unset(final MqlString key) { + Assertions.notNull("key", key); + return newMqlExpression((cr) -> astDoc("$unsetField", new BsonDocument() + .append("field", toBsonValue(cr, key)) + .append("input", this.toBsonValue(cr)))); + } + + @Override + public MqlMap merge(final MqlMap other) { + Assertions.notNull("other", other); + return new MqlExpression<>(ast("$mergeObjects", other)); + } + + @Override + public MqlArray> entries() { + return newMqlExpression(ast("$objectToArray")); + } + + @Override + public MqlMap asMap( + final Function> mapper) { + Assertions.notNull("mapper", mapper); + @SuppressWarnings("unchecked") + MqlExpression> array = (MqlExpression>) this.map(mapper); + return newMqlExpression(array.astWrapped("$arrayToObject")); + } + + @SuppressWarnings("unchecked") + @Override + public MqlMap asMap() { + return (MqlMap) this; + } + + @SuppressWarnings("unchecked") + @Override + public R asDocument() { + return (R) this; + } +} diff --git a/driver-core/src/main/com/mongodb/client/model/mql/MqlExpressionCodec.java b/driver-core/src/main/com/mongodb/client/model/mql/MqlExpressionCodec.java new file mode 100644 index 00000000000..70f4329b6d0 --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/mql/MqlExpressionCodec.java @@ -0,0 +1,52 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model.mql; + +import org.bson.BsonReader; +import org.bson.BsonValue; +import org.bson.BsonWriter; +import org.bson.codecs.Codec; +import org.bson.codecs.DecoderContext; +import org.bson.codecs.EncoderContext; +import org.bson.codecs.configuration.CodecRegistry; + +@SuppressWarnings("rawtypes") +final class MqlExpressionCodec implements Codec { + private final CodecRegistry codecRegistry; + + MqlExpressionCodec(final CodecRegistry codecRegistry) { + this.codecRegistry = codecRegistry; + } + + @Override + public MqlExpression decode(final BsonReader reader, final DecoderContext decoderContext) { + throw new UnsupportedOperationException("Decoding to an MqlExpression is not supported"); + } + + @Override + @SuppressWarnings({"unchecked"}) + public void encode(final BsonWriter writer, final MqlExpression value, final EncoderContext encoderContext) { + BsonValue bsonValue = value.toBsonValue(codecRegistry); + Codec codec = codecRegistry.get(bsonValue.getClass()); + codec.encode(writer, bsonValue, encoderContext); + } + + @Override + public Class getEncoderClass() { + return MqlExpression.class; + } +} diff --git a/driver-core/src/main/com/mongodb/client/model/mql/MqlInteger.java b/driver-core/src/main/com/mongodb/client/model/mql/MqlInteger.java new file mode 100644 index 00000000000..46380b57773 --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/mql/MqlInteger.java @@ -0,0 +1,146 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model.mql; + +import com.mongodb.annotations.Beta; +import com.mongodb.annotations.Reason; +import com.mongodb.annotations.Sealed; + +import java.util.function.Function; + +/** + * An integer {@linkplain MqlValue value} in the context of the MongoDB Query + * Language (MQL). Integers are a subset of {@linkplain MqlNumber numbers}, + * and so, for example, the integer 0 and the number 0 are + * {@linkplain #eq(MqlValue) equal}. + * + * @since 4.9.0 + */ +@Sealed +@Beta(Reason.CLIENT) +public interface MqlInteger extends MqlNumber { + + /** + * The product of multiplying {@code this} and the {@code other} value. + * + * @param other the other value. + * @return the resulting value. + */ + MqlInteger multiply(MqlInteger other); + + /** + * The product of multiplying {@code this} and the {@code other} value. + * + * @param other the other value. + * @return the resulting value. + */ + default MqlInteger multiply(final int other) { + return this.multiply(MqlValues.of(other)); + } + + /** + * The sum of adding {@code this} and the {@code other} value. + * + * @param other the other value. + * @return the resulting value. + */ + MqlInteger add(MqlInteger other); + + /** + * The sum of adding {@code this} and the {@code other} value. + * + * @param other the other value. + * @return the resulting value. + */ + default MqlInteger add(final int other) { + return this.add(MqlValues.of(other)); + } + + /** + * The difference of subtracting the {@code other} value from {@code this}. + * + * @param other the other value. + * @return the resulting value. + */ + MqlInteger subtract(MqlInteger other); + + /** + * The difference of subtracting the {@code other} value from {@code this}. + * + * @param other the other value. + * @return the resulting value. + */ + default MqlInteger subtract(final int other) { + return this.subtract(MqlValues.of(other)); + } + + /** + * The {@linkplain #gt(MqlValue) larger} value of {@code this} + * and the {@code other} value. + * + * @param other the other value. + * @return the resulting value. + */ + MqlInteger max(MqlInteger other); + + /** + * The {@linkplain #lt(MqlValue) smaller} value of {@code this} + * and the {@code other} value. + * + * @param other the other value. + * @return the resulting value. + */ + MqlInteger min(MqlInteger other); + + /** + * The absolute value of {@code this} value. + * + * @return the resulting value. + */ + MqlInteger abs(); + + /** + * The {@linkplain MqlDate date} corresponding to {@code this} value + * when taken to be the number of milliseconds since the Unix epoch. + * + * @mongodb.server.release 4.0 + * @return the resulting value. + */ + MqlDate millisecondsAsDate(); + + /** + * The result of passing {@code this} value to the provided function. + * Equivalent to {@code f.apply(this)}, and allows lambdas and static, + * user-defined functions to use the chaining syntax. + * + * @see MqlValue#passTo + * @param f the function to apply. + * @return the resulting value. + * @param the type of the resulting value. + */ + R passIntegerTo(Function f); + + /** + * The result of applying the provided switch mapping to {@code this} value. + * + * @see MqlValue#switchOn + * @param mapping the switch mapping. + * @return the resulting value. + * @param the type of the resulting value. + */ + R switchIntegerOn(Function, ? extends BranchesTerminal> mapping); +} diff --git a/driver-core/src/main/com/mongodb/client/model/mql/MqlMap.java b/driver-core/src/main/com/mongodb/client/model/mql/MqlMap.java new file mode 100644 index 00000000000..58a279c89c7 --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/mql/MqlMap.java @@ -0,0 +1,224 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model.mql; + +import com.mongodb.annotations.Beta; +import com.mongodb.annotations.Reason; +import com.mongodb.annotations.Sealed; +import com.mongodb.assertions.Assertions; + +import java.util.function.Function; + +import static com.mongodb.client.model.mql.MqlValues.of; +import static com.mongodb.client.model.mql.MqlUnchecked.Unchecked.PRESENT; + +/** + * A map {@link MqlValue value} in the context of the MongoDB Query + * Language (MQL). A map is a finite set of + * {@link MqlEntry entries} of a certain type. + * No entry key is repeated. It is a mapping from keys to values. + * + * @param the type of the entry values + * @since 4.9.0 + */ +@Sealed +@Beta(Reason.CLIENT) +public interface MqlMap extends MqlValue { + + /** + * Whether {@code this} map has a value (including null) for + * the provided key. + * + * @param key the key. + * @return the resulting value. + */ + MqlBoolean has(MqlString key); + + /** + * Whether {@code this} map has a value (including null) for + * the provided key. + * + * @param key the key. + * @return the resulting value. + */ + default MqlBoolean has(final String key) { + Assertions.notNull("key", key); + return has(of(key)); + } + + /** + * The value corresponding to the provided key. + * + *

Warning: The use of this method is an unchecked assertion that + * the key is present (which may be confirmed via {@link #has}). See + * {@link #get(MqlString, MqlValue)} for a typesafe variant. + * + * @param key the key. + * @return the value. + */ + @MqlUnchecked(PRESENT) + T get(MqlString key); + + /** + * The value corresponding to the provided key. + * + *

Warning: The use of this method is an unchecked assertion that + * the key is present (which may be confirmed via {@link #has}). See + * {@link #get(MqlString, MqlValue)} for a typesafe variant. + * + * @param key the key. + * @return the value. + */ + @MqlUnchecked(PRESENT) + default T get(final String key) { + Assertions.notNull("key", key); + return get(of(key)); + } + + /** + * The value corresponding to the provided {@code key}, or the + * {@code other} value if an entry for the key is not + * {@linkplain #has(MqlString) present}. + * + * @param key the key. + * @param other the other value. + * @return the resulting value. + */ + T get(MqlString key, T other); + + /** + * The value corresponding to the provided {@code key}, or the + * {@code other} value if an entry for the key is not + * {@linkplain #has(MqlString) present}. + * + * @param key the key. + * @param other the other value. + * @return the resulting value. + */ + default T get(final String key, final T other) { + Assertions.notNull("key", key); + Assertions.notNull("other", other); + return get(of(key), other); + } + + /** + * Returns a map with the same entries as {@code this} map, but with + * the specified {@code key} set to the specified {@code value}. + * + *

This does not affect the original map. + * + * @param key the key. + * @param value the value. + * @return the resulting value. + */ + MqlMap set(MqlString key, T value); + + /** + * Returns a map with the same entries as {@code this} map, but with + * the specified {@code key} set to the specified {@code value}. + * + *

This does not affect the original map. + * + * @param key the key. + * @param value the value. + * @return the resulting value. + */ + default MqlMap set(final String key, final T value) { + Assertions.notNull("key", key); + Assertions.notNull("value", value); + return set(of(key), value); + } + + /** + * Returns a map with the same entries as {@code this} map, but which + * {@linkplain #has(MqlString) has} no entry with the specified + * {@code key}. + * + *

This does not affect the original map. + * + * @param key the key. + * @return the resulting value. + */ + MqlMap unset(MqlString key); + + /** + * Returns a map with the same entries as {@code this} map, but which + * {@linkplain #has(MqlString) has} no entry with the specified + * {@code key}. + * + *

This does not affect the original map. + * + * @param key the key. + * @return the resulting value. + */ + default MqlMap unset(final String key) { + Assertions.notNull("key", key); + return unset(of(key)); + } + + /** + * Returns a map with the same entries as {@code this} map, but with + * any keys present in the {@code other} map overwritten with the + * values of that other map. That is, entries from both this and the + * other map are merged, with the other map having priority. + * + *

This does not affect the original map. + * + * @param other the other map. + * @return the resulting value. + */ + MqlMap merge(MqlMap other); + + /** + * The {@linkplain MqlEntry entries} of this map as an array. + * No guarantee is made regarding order. + * + * @see MqlArray#asMap + * @return the resulting value. + */ + MqlArray> entries(); + + /** + * {@code this} map as a {@linkplain MqlDocument document}. + * + * @return the resulting value. + * @param the resulting type. + */ + R asDocument(); + + /** + * The result of passing {@code this} value to the provided function. + * Equivalent to {@code f.apply(this)}, and allows lambdas and static, + * user-defined functions to use the chaining syntax. + * + * @see MqlValue#passTo + * @param f the function to apply. + * @return the resulting value. + * @param the type of the resulting value. + */ + R passMapTo(Function, ? extends R> f); + + /** + * The result of applying the provided switch mapping to {@code this} value. + * + * @see MqlValue#switchOn + * @param mapping the switch mapping. + * @return the resulting value. + * @param the type of the resulting value. + */ + R switchMapOn(Function>, ? extends BranchesTerminal, ? extends R>> mapping); +} diff --git a/driver-core/src/main/com/mongodb/client/model/mql/MqlNumber.java b/driver-core/src/main/com/mongodb/client/model/mql/MqlNumber.java new file mode 100644 index 00000000000..7b6590b7624 --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/mql/MqlNumber.java @@ -0,0 +1,183 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model.mql; + +import com.mongodb.annotations.Beta; +import com.mongodb.annotations.Reason; +import com.mongodb.annotations.Sealed; +import com.mongodb.assertions.Assertions; + +import java.util.function.Function; + +/** + * A number {@linkplain MqlValue value} in the context of the MongoDB Query + * Language (MQL). {@linkplain MqlInteger Integers} are a subset of + * numbers, and so, for example, the integer 0 and the number 0 are + * {@linkplain #eq(MqlValue) equal}. + * + * @since 4.9.0 + */ +@Sealed +@Beta(Reason.CLIENT) +public interface MqlNumber extends MqlValue { + + /** + * The product of multiplying {@code this} and the {@code other} value. + * + * @param other the other value. + * @return the resulting value. + */ + MqlNumber multiply(MqlNumber other); + + /** + * The product of multiplying {@code this} and the {@code other} value. + * + * @param other the other value. + * @return the resulting value. + */ + default MqlNumber multiply(final Number other) { + Assertions.notNull("other", other); + return this.multiply(MqlValues.numberToMqlNumber(other)); + } + + /** + * The quotient of dividing {@code this} value by the {@code other} value. + * This is not integer division: dividing {@code 1} by {@code 2} will + * always yield {@code 0.5}. + * + * @param other the other value. + * @return the resulting value. + */ + MqlNumber divide(MqlNumber other); + + /** + * The quotient of dividing {@code this} value by the {@code other} value. + * This is not integer division: dividing {@code 1} by {@code 2} will + * always yield {@code 0.5}. + * + * @param other the other value. + * @return the resulting value. + */ + default MqlNumber divide(final Number other) { + Assertions.notNull("other", other); + return this.divide(MqlValues.numberToMqlNumber(other)); + } + + /** + * The sum of adding {@code this} and the {@code other} value. + * + * @param other the other value. + * @return the resulting value. + */ + MqlNumber add(MqlNumber other); + + /** + * The sum of adding {@code this} and the {@code other} value. + * + * @param other the other value. + * @return the resulting value. + */ + default MqlNumber add(final Number other) { + Assertions.notNull("other", other); + return this.add(MqlValues.numberToMqlNumber(other)); + } + + /** + * The difference of subtracting the {@code other} value from {@code this}. + * + * @param other the other value. + * @return the resulting value. + */ + MqlNumber subtract(MqlNumber other); + + /** + * The difference of subtracting the {@code other} value from {@code this}. + * + * @param other the other value. + * @return the resulting value. + */ + default MqlNumber subtract(final Number other) { + Assertions.notNull("other", other); + return this.subtract(MqlValues.numberToMqlNumber(other)); + } + + /** + * The {@linkplain #gt(MqlValue) larger} value of {@code this} + * and the {@code other} value. + * + * @param other the other value. + * @return the resulting value. + */ + MqlNumber max(MqlNumber other); + + /** + * The {@linkplain #lt(MqlValue) smaller} value of {@code this} + * and the {@code other} value. + * + * @param other the other value. + * @return the resulting value. + */ + MqlNumber min(MqlNumber other); + + /** + * The integer result of rounding {@code this} to the nearest even value. + * + * @mongodb.server.release 4.2 + * @return the resulting value. + */ + MqlInteger round(); + + /** + * The result of rounding {@code this} to {@code place} decimal places + * using the "half to even" approach. + * + * @param place the decimal place to round to, from -20 to 100, exclusive. + * Positive values specify the place to the right of the + * decimal point, while negative values, to the left. + * @return the resulting value. + */ + MqlNumber round(MqlInteger place); + + /** + * The absolute value of {@code this} value. + * + * @return the resulting value. + */ + MqlNumber abs(); + + /** + * The result of passing {@code this} value to the provided function. + * Equivalent to {@code f.apply(this)}, and allows lambdas and static, + * user-defined functions to use the chaining syntax. + * + * @see MqlValue#passTo + * @param f the function to apply. + * @return the resulting value. + * @param the type of the resulting value. + */ + R passNumberTo(Function f); + + /** + * The result of applying the provided switch mapping to {@code this} value. + * + * @see MqlValue#switchOn + * @param mapping the switch mapping. + * @return the resulting value. + * @param the type of the resulting value. + */ + R switchNumberOn(Function, ? extends BranchesTerminal> mapping); +} diff --git a/driver-core/src/main/com/mongodb/client/model/mql/MqlString.java b/driver-core/src/main/com/mongodb/client/model/mql/MqlString.java new file mode 100644 index 00000000000..e5b6e8fa8bc --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/mql/MqlString.java @@ -0,0 +1,225 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model.mql; + +import com.mongodb.annotations.Beta; +import com.mongodb.annotations.Reason; +import com.mongodb.annotations.Sealed; + +import java.util.function.Function; + +import static com.mongodb.client.model.mql.MqlValues.of; + +/** + * A string {@linkplain MqlValue value} in the context of the MongoDB Query + * Language (MQL). + * + * @since 4.9.0 + */ +@Sealed +@Beta(Reason.CLIENT) +public interface MqlString extends MqlValue { + + /** + * Converts {@code this} string to lowercase. + * + * @return the resulting value. + */ + MqlString toLower(); + + /** + * Converts {@code this} string to uppercase. + * + * @return the resulting value. + */ + MqlString toUpper(); + + /** + * The result of appending the {@code other} string to the end of + * {@code this} string (strict concatenation). + * + * @param other the other value. + * @return the resulting value. + */ + MqlString append(MqlString other); + + /** + * The number of Unicode code points in {@code this} string. + * + * @return the resulting value. + */ + MqlInteger length(); + + /** + * The number of UTF-8 encoded bytes in {@code this} string. + * + * @return the resulting value. + */ + MqlInteger lengthBytes(); + + /** + * The substring of {@code this} string, from the {@code start} index + * inclusive, and including the specified {@code length}, up to + * the end of the string. + * + *

Warning: the index position is in Unicode code points, not in + * UTF-8 encoded bytes. + * + * @param start the start index in Unicode code points. + * @param length the length in Unicode code points. + * @return the resulting value. + */ + MqlString substr(MqlInteger start, MqlInteger length); + + /** + * The substring of {@code this} string, from the {@code start} index + * inclusive, and including the specified {@code length}, up to + * the end of the string. + * + *

Warning: the index position is in Unicode code points, not in + * UTF-8 encoded bytes. + * + * @param start the start index in Unicode code points. + * @param length the length in Unicode code points. + * @return the resulting value. + */ + default MqlString substr(final int start, final int length) { + return this.substr(of(start), of(length)); + } + + /** + * The substring of {@code this} string, from the {@code start} index + * inclusive, and including the specified {@code length}, up to + * the end of the string. + * + *

The index position is in UTF-8 encoded bytes, not in + * Unicode code points. + * + * @param start the start index in UTF-8 encoded bytes. + * @param length the length in UTF-8 encoded bytes. + * @return the resulting value. + */ + MqlString substrBytes(MqlInteger start, MqlInteger length); + + /** + * The substring of {@code this} string, from the {@code start} index + * inclusive, and including the specified {@code length}, up to + * the end of the string. + * + *

The index position is in UTF-8 encoded bytes, not in + * Unicode code points. + * + * @param start the start index in UTF-8 encoded bytes. + * @param length the length in UTF-8 encoded bytes. + * @return the resulting value. + */ + default MqlString substrBytes(final int start, final int length) { + return this.substrBytes(of(start), of(length)); + } + + /** + * Converts {@code this} string to an {@linkplain MqlInteger integer}. + * + *

This will cause an error if this string does not represent an integer. + * + * @mongodb.server.release 4.0 + * @return the resulting value. + */ + MqlInteger parseInteger(); + + /** + * Converts {@code this} string to a {@linkplain MqlDate date}. + * + *

This method behaves like {@link #parseDate(MqlString)}, + * with the default format, which is {@code "%Y-%m-%dT%H:%M:%S.%LZ"}. + * + *

Will cause an error if this string does not represent a valid + * date string (such as "2018-03-20", "2018-03-20T12:00:00Z", or + * "2018-03-20T12:00:00+0500"). + * + * @see MqlDate#asString() + * @see MqlDate#asString(MqlString, MqlString) + * @return the resulting value. + */ + MqlDate parseDate(); + + /** + * Converts {@code this} string to a {@linkplain MqlDate date}, + * using the specified {@code format}. UTC is assumed if the timezone + * offset element is not specified in the format. + * + *

Will cause an error if {@code this} string does not match the + * specified {@code format}. + * Will cause an error if an element is specified that is finer-grained + * than an element that is not specified, with year being coarsest + * (for example, minute is specified, but hour is not). + * Omitted finer-grained elements will be parsed to 0. + * + * @see MqlDate#asString() + * @see MqlDate#asString(MqlString, MqlString) + * @mongodb.server.release 4.0 + * @mongodb.driver.manual reference/operator/aggregation/dateFromString/ Format Specifiers, UTC Offset, and Olson Timezone Identifier + * @param format the format. + * @return the resulting value. + */ + MqlDate parseDate(MqlString format); + + /** + * Converts {@code this} string to a {@linkplain MqlDate date}, + * using the specified {@code timezone} and {@code format}. + * + + *

Will cause an error if {@code this} string does not match the + * specified {@code format}. + * Will cause an error if an element is specified that is finer-grained + * than an element that is not specified, with year being coarsest + * (for example, minute is specified, but hour is not). + * Omitted finer-grained elements will be parsed to 0. + * Will cause an error if the format includes an offset or + * timezone, even if it matches the supplied {@code timezone}. + * + * @see MqlDate#asString() + * @see MqlDate#asString(MqlString, MqlString) + * @mongodb.driver.manual reference/operator/aggregation/dateFromString/ Format Specifiers, UTC Offset, and Olson Timezone Identifier + * @param format the format. + * @param timezone the UTC Offset or Olson Timezone Identifier. + * @return the resulting value. + */ + MqlDate parseDate(MqlString timezone, MqlString format); + + /** + * The result of passing {@code this} value to the provided function. + * Equivalent to {@code f.apply(this)}, and allows lambdas and static, + * user-defined functions to use the chaining syntax. + * + * @see MqlValue#passTo + * @param f the function to apply. + * @return the resulting value. + * @param the type of the resulting value. + */ + R passStringTo(Function f); + + /** + * The result of applying the provided switch mapping to {@code this} value. + * + * @see MqlValue#switchOn + * @param mapping the switch mapping. + * @return the resulting value. + * @param the type of the resulting value. + */ + R switchStringOn(Function, ? extends BranchesTerminal> mapping); +} diff --git a/driver-core/src/main/com/mongodb/client/model/mql/MqlUnchecked.java b/driver-core/src/main/com/mongodb/client/model/mql/MqlUnchecked.java new file mode 100644 index 00000000000..ec53a927b4e --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/mql/MqlUnchecked.java @@ -0,0 +1,80 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.client.model.mql; + +import com.mongodb.annotations.Sealed; + +import java.lang.annotation.Documented; +import java.lang.annotation.ElementType; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.annotation.Target; + +/** + * Documents places where the API relies on a user asserting + * something that is not checked at run-time. + * If the assertion turns out to be false, the API behavior is unspecified. + * + *

This class is not part of the public API and may be removed or changed at any time

+ */ +@Documented +@Retention(RetentionPolicy.SOURCE) +@Target({ElementType.METHOD, ElementType.TYPE_USE}) +@Sealed +public @interface MqlUnchecked { + /** + * @return A hint on the user assertion the API relies on. + */ + Unchecked[] value(); + + /** + * @see MqlUnchecked#value() + */ + enum Unchecked { + /** + * The API relies on the values it encounters being of the + * (raw or non-parameterized) type + * implied, specified by, or inferred from the user code. + * + *

For example, {@link MqlDocument#getBoolean(String)} + * relies on the values of the document field being of the + * {@linkplain MqlBoolean boolean} type. + */ + TYPE, + /** + * The API checks the raw type, but relies on the type argument + * implied, specified by, or inferred from user code. + * + *

For example, {@link MqlValue#isArrayOr(MqlArray)} + * checks that the value is of the + * {@linkplain MqlArray array} raw type, + * but relies on the elements of the array being of + * the type derived from the user code. + */ + TYPE_ARGUMENT, + /** + * The presence of the specified value is not checked by the API. + * The use of the annotated method is an unchecked assertion that the + * specified (whether by index, name, key, position, or otherwise) + * element is present in the structure involved. + * + *

For example, {@link MqlDocument#getField(String)} relies + * on the field being present, and {@link MqlArray#first} relies + * on the array being non-empty. + */ + PRESENT, + } +} diff --git a/driver-core/src/main/com/mongodb/client/model/mql/MqlValue.java b/driver-core/src/main/com/mongodb/client/model/mql/MqlValue.java new file mode 100644 index 00000000000..8cb50885584 --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/mql/MqlValue.java @@ -0,0 +1,335 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model.mql; + +import com.mongodb.annotations.Beta; +import com.mongodb.annotations.Reason; +import com.mongodb.annotations.Sealed; + +import java.util.function.Function; + +import static com.mongodb.client.model.mql.MqlUnchecked.Unchecked.TYPE_ARGUMENT; + +/** + * A value in the context of the MongoDB Query Language (MQL). + * + *

The API provided by this base type and its subtypes is the Java-native + * variant of MQL. It is used to query the MongoDB server, to perform remote + * computations, to store and retrieve data, or to otherwise work with data on + * a MongoDB server or compatible execution context. Though the methods exposed + * through this API generally correspond to MQL operations, this correspondence + * is not exact. + * + *

The following is an example of usage within an aggregation pipeline. Here, + * the current document value is obtained and its "numberArray" field is + * filtered and summed, in a style similar to that of the Java Stream API: + * + *

{@code
+ * import static com.mongodb.client.model.mql.MqlValues.current;
+ * MongoCollection col = ...;
+ * AggregateIterable result = col.aggregate(Arrays.asList(
+ *     addFields(new Field<>("result", current()
+ *         .getArray("numberArray")
+ *         .filter(v -> v.gt(of(0)))
+ *         .sum(v -> v)))));
+ * }
+ * + *

Values are typically initially obtained via the current document and its + * fields, or specified via statically-imported methods on the + * {@link MqlValues} class. + * + *

As with the Java Stream API's terminal operations, corresponding Java + * values are not directly available, but must be obtained indirectly via + * {@code MongoCollection.aggregate} or {@code MongoCollection.find}. + * Certain methods may cause an error, which will be produced + * through these "terminal operations". + * + *

The null value is not part of, and cannot be used as if it were part + * of, any explicit type (except the root type {@link MqlValue} itself). + * See {@link MqlValues#ofNull} for more details. + * + *

This API specifies no "missing" or "undefined" value. Users may use + * {@link MqlMap#has} to check whether a value is present. + * + *

This type hierarchy differs from the {@linkplain org.bson} types in that + * they provide computational operations, the numeric types are less granular, + * and it offers multiple abstractions of certain types (document, map, entry). + * It differs from the corresponding Java types (such as {@code int}, + * {@link String}, {@link java.util.Map}) in that the operations + * available differ, and in that an implementation of this API may be used to + * produce MQL in the form of BSON. (This API makes no guarantee regarding the + * BSON output produced by its implementation, which in any case may vary due + * to optimization or other factors.) + * + *

Some methods within the API constitute an assertion by the user that the + * data is of a certain type. For example, {@link MqlDocument#getArray}} + * requires that the underlying field is both an array, and an array of some + * certain type. If the field is not an array in the underlying data, behaviour + * is undefined by this API (though behaviours may be defined by the execution + * context, users are strongly discouraged from relying on behaviour that is not + * part of this API). + * + *

This API should be treated as sealed: + * it must not be extended or implemented (unless explicitly allowed). + * + * @see MqlValues + * @since 4.9.0 + */ +@Sealed +@Beta(Reason.CLIENT) +public interface MqlValue { + + /** + * The method {@link MqlValue#eq} should be used to compare values for + * equality. This method checks reference equality. + */ + @Override + boolean equals(Object other); + + /** + * Whether {@code this} value is equal to the {@code other} value. + * + *

The result does not correlate with {@link MqlValue#equals(Object)}. + * + * @param other the other value. + * @return the resulting value. + */ + MqlBoolean eq(MqlValue other); + + /** + * Whether {@code this} value is not equal to the {@code other} value. + * + *

The result does not correlate with {@link MqlValue#equals(Object)}. + * + * @param other the other value. + * @return the resulting value. + */ + MqlBoolean ne(MqlValue other); + + /** + * Whether {@code this} value is greater than the {@code other} value. + * + * @param other the other value. + * @return the resulting value. + */ + MqlBoolean gt(MqlValue other); + + /** + * Whether {@code this} value is greater than or equal to the {@code other} + * value. + * + * @param other the other value. + * @return the resulting value. + */ + MqlBoolean gte(MqlValue other); + + /** + * Whether {@code this} value is less than the {@code other} value. + * + * @param other the other value. + * @return the resulting value. + */ + MqlBoolean lt(MqlValue other); + + /** + * Whether {@code this} value is less than or equal to the {@code other} + * value. + * + * @param other the other value. + * @return the resulting value. + */ + MqlBoolean lte(MqlValue other); + + /** + * {@code this} value as a {@linkplain MqlBoolean boolean} if + * {@code this} is a boolean, or the {@code other} boolean value if + * {@code this} is null, or is missing, or is of any other non-boolean type. + * + * @param other the other value. + * @return the resulting value. + */ + MqlBoolean isBooleanOr(MqlBoolean other); + + /** + * {@code this} value as a {@linkplain MqlNumber number} if + * {@code this} is a number, or the {@code other} number value if + * {@code this} is null, or is missing, or is of any other non-number type. + * + * @mongodb.server.release 4.4 + * @param other the other value. + * @return the resulting value. + */ + MqlNumber isNumberOr(MqlNumber other); + + /** + * {@code this} value as an {@linkplain MqlInteger integer} if + * {@code this} is an integer, or the {@code other} integer value if + * {@code this} is null, or is missing, or is of any other non-integer type. + * + * @mongodb.server.release 5.2 + * @param other the other value. + * @return the resulting value. + */ + MqlInteger isIntegerOr(MqlInteger other); + + /** + * {@code this} value as a {@linkplain MqlString string} if + * {@code this} is a string, or the {@code other} string value if + * {@code this} is null, or is missing, or is of any other non-string type. + * + * @param other the other value. + * @return the resulting value. + */ + MqlString isStringOr(MqlString other); + + /** + * {@code this} value as a {@linkplain MqlDate boolean} if + * {@code this} is a date, or the {@code other} date value if + * {@code this} is null, or is missing, or is of any other non-date type. + * + * @param other the other value. + * @return the resulting value. + */ + MqlDate isDateOr(MqlDate other); + + /** + * {@code this} value as a {@linkplain MqlArray array} if + * {@code this} is an array, or the {@code other} array value if + * {@code this} is null, or is missing, or is of any other non-array type. + * + *

Warning: The type of the elements of the resulting array are not + * enforced by the API. The specification of a type by the user is an + * unchecked assertion that all elements are of that type. + * If the array contains multiple types (such as both nulls and integers) + * then a super-type encompassing all types must be chosen, and + * if necessary the elements should be individually type-checked when used. + * + * @param other the other value. + * @return the resulting value. + * @param the type of the elements of the resulting array. + */ + MqlArray<@MqlUnchecked(TYPE_ARGUMENT) T> isArrayOr(MqlArray other); + + /** + * {@code this} value as a {@linkplain MqlDocument document} if + * {@code this} is a document (or document-like value, see + * {@link MqlMap} and {@link MqlEntry}) + * or the {@code other} document value if {@code this} is null, + * or is missing, or is of any other non-document type. + * + * @param other the other value. + * @return the resulting value. + * @param the type. + */ + T isDocumentOr(T other); + + /** + * {@code this} value as a {@linkplain MqlMap map} if + * {@code this} is a map (or map-like value, see + * {@link MqlDocument} and {@link MqlEntry}) + * or the {@code other} map value if {@code this} is null, + * or is missing, or is of any other non-map type. + * + *

Warning: The type of the values of the resulting map are not + * enforced by the API. The specification of a type by the user is an + * unchecked assertion that all map values are of that type. + * If the map contains multiple types (such as both nulls and integers) + * then a super-type encompassing all types must be chosen, and + * if necessary the elements should be individually type-checked when used. + * + * @param other the other value. + * @return the resulting value. + * @param the type of the values of the resulting map. + */ + MqlMap<@MqlUnchecked(TYPE_ARGUMENT) T> isMapOr(MqlMap other); + + /** + * The {@linkplain MqlString string} representation of {@code this} value. + * + *

This will cause an error if the type cannot be converted + * to a {@linkplain MqlString string}, as is the case with + * {@linkplain MqlArray arrays}, + * {@linkplain MqlDocument documents}, + * {@linkplain MqlMap maps}, + * {@linkplain MqlEntry entries}, and the + * {@linkplain MqlValues#ofNull() null value}. + * + * @mongodb.server.release 4.0 + * @see MqlString#parseDate() + * @see MqlString#parseInteger() + * @return the resulting value. + */ + MqlString asString(); + + /** + * The result of passing {@code this} value to the provided function. + * Equivalent to {@code f.apply(this)}, and allows lambdas and static, + * user-defined functions to use the chaining syntax. + * + *

The appropriate type-based variant should be used when the type + * of {@code this} is known. + * + * @see MqlBoolean#passBooleanTo + * @see MqlInteger#passIntegerTo + * @see MqlNumber#passNumberTo + * @see MqlString#passStringTo + * @see MqlDate#passDateTo + * @see MqlArray#passArrayTo + * @see MqlMap#passMapTo + * @see MqlDocument#passDocumentTo + * + * @param f the function to apply. + * @return the resulting value. + * @param the type of the resulting value. + */ + R passTo(Function f); + + /** + * The result of applying the provided switch mapping to {@code this} value. + * + *

Can be used to perform pattern matching on the type of {@code this} + * value, or to perform comparisons, or to perform any arbitrary check on + * {@code this} value. + * + *

The suggested convention is to use "{@code on}" as the name of the + * {@code mapping} parameter, for example: + * + *

{@code
+     * myValue.switchOn(on -> on
+     *     .isInteger(...)
+     *     ...
+     *     .defaults(...))
+     * }
+ * + *

The appropriate type-based variant should be used when the type + * of {@code this} is known. + * + * @see MqlBoolean#switchBooleanOn + * @see MqlInteger#switchIntegerOn + * @see MqlNumber#switchNumberOn + * @see MqlString#switchStringOn + * @see MqlDate#switchDateOn + * @see MqlArray#switchArrayOn + * @see MqlMap#switchMapOn + * @see MqlDocument#switchDocumentOn + * + * @param mapping the switch mapping. + * @return the resulting value. + * @param the type of the resulting value. + */ + R switchOn(Function, ? extends BranchesTerminal> mapping); +} diff --git a/driver-core/src/main/com/mongodb/client/model/mql/MqlValues.java b/driver-core/src/main/com/mongodb/client/model/mql/MqlValues.java new file mode 100644 index 00000000000..e3e2bbd56a2 --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/mql/MqlValues.java @@ -0,0 +1,422 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model.mql; + +import com.mongodb.annotations.Beta; +import com.mongodb.annotations.Reason; +import com.mongodb.assertions.Assertions; +import org.bson.BsonArray; +import org.bson.BsonBoolean; +import org.bson.BsonDateTime; +import org.bson.BsonDecimal128; +import org.bson.BsonDocument; +import org.bson.BsonDouble; +import org.bson.BsonInt32; +import org.bson.BsonInt64; +import org.bson.BsonNull; +import org.bson.BsonString; +import org.bson.BsonValue; +import org.bson.conversions.Bson; +import org.bson.types.Decimal128; + +import java.time.Instant; +import java.util.ArrayList; +import java.util.List; + +import static com.mongodb.client.model.mql.MqlExpression.AstPlaceholder; +import static com.mongodb.client.model.mql.MqlExpression.toBsonValue; +import static com.mongodb.client.model.mql.MqlUnchecked.Unchecked.TYPE_ARGUMENT; + +/** + * Convenience methods related to {@link MqlValue}, used primarily to + * produce values in the context of the MongoDB Query Language (MQL). + * + * @since 4.9.0 + */ +@Beta(Reason.CLIENT) +public final class MqlValues { + + private MqlValues() {} + + /** + * Returns a {@linkplain MqlBoolean boolean} value corresponding to + * the provided {@code boolean} primitive. + * + * @param of the {@code boolean} primitive. + * @return the resulting value. + */ + public static MqlBoolean of(final boolean of) { + // we intentionally disallow ofBoolean(null) + return new MqlExpression<>((codecRegistry) -> new AstPlaceholder(new BsonBoolean(of))); + } + + /** + * Returns an {@linkplain MqlArray array} of + * {@linkplain MqlBoolean booleans} corresponding to + * the provided {@code boolean} primitives. + * + * @param array the array. + * @return the resulting value. + */ + public static MqlArray ofBooleanArray(final boolean... array) { + Assertions.notNull("array", array); + BsonArray bsonArray = new BsonArray(); + for (boolean b : array) { + bsonArray.add(new BsonBoolean(b)); + } + return new MqlExpression<>((cr) -> new AstPlaceholder(bsonArray)); + } + + /** + * Returns an {@linkplain MqlInteger integer} value corresponding to + * the provided {@code int} primitive. + * + * @param of the {@code int} primitive. + * @return the resulting value. + */ + public static MqlInteger of(final int of) { + return new MqlExpression<>((codecRegistry) -> new AstPlaceholder(new BsonInt32(of))); + } + + /** + * Returns an {@linkplain MqlArray array} of + * {@linkplain MqlInteger integers} corresponding to + * the provided {@code int} primitives. + * + * @param array the array. + * @return the resulting value. + */ + public static MqlArray ofIntegerArray(final int... array) { + Assertions.notNull("array", array); + BsonArray bsonArray = new BsonArray(); + for (int i : array) { + bsonArray.add(new BsonInt32(i)); + } + return new MqlExpression<>((cr) -> new AstPlaceholder(bsonArray)); + } + + /** + * Returns an {@linkplain MqlInteger integer} value corresponding to + * the provided {@code long} primitive. + * + * @param of the {@code long} primitive. + * @return the resulting value. + */ + public static MqlInteger of(final long of) { + return new MqlExpression<>((codecRegistry) -> new AstPlaceholder(new BsonInt64(of))); + } + + /** + * Returns an {@linkplain MqlArray array} of + * {@linkplain MqlInteger integers} corresponding to + * the provided {@code long} primitives. + * + * @param array the array. + * @return the resulting value. + */ + public static MqlArray ofIntegerArray(final long... array) { + Assertions.notNull("array", array); + BsonArray bsonArray = new BsonArray(); + for (long i : array) { + bsonArray.add(new BsonInt64(i)); + } + return new MqlExpression<>((cr) -> new AstPlaceholder(bsonArray)); + } + + /** + * Returns a {@linkplain MqlNumber number} value corresponding to + * the provided {@code double} primitive. + * + * @param of the {@code double} primitive. + * @return the resulting value. + */ + public static MqlNumber of(final double of) { + return new MqlExpression<>((codecRegistry) -> new AstPlaceholder(new BsonDouble(of))); + } + + /** + * Returns an {@linkplain MqlArray array} of + * {@linkplain MqlNumber numbers} corresponding to + * the provided {@code double} primitives. + * + * @param array the array. + * @return the resulting value. + */ + public static MqlArray ofNumberArray(final double... array) { + Assertions.notNull("array", array); + BsonArray bsonArray = new BsonArray(); + for (double n : array) { + bsonArray.add(new BsonDouble(n)); + } + return new MqlExpression<>((cr) -> new AstPlaceholder(bsonArray)); + } + + /** + * Returns a {@linkplain MqlNumber number} value corresponding to + * the provided {@link Decimal128}. + * + * @param of the {@link Decimal128}. + * @return the resulting value. + */ + public static MqlNumber of(final Decimal128 of) { + Assertions.notNull("Decimal128", of); + return new MqlExpression<>((codecRegistry) -> new AstPlaceholder(new BsonDecimal128(of))); + } + + /** + * Returns an {@linkplain MqlArray array} of + * {@linkplain MqlNumber numbers} corresponding to + * the provided {@link Decimal128}s. + * + * @param array the array. + * @return the resulting value. + */ + public static MqlArray ofNumberArray(final Decimal128... array) { + Assertions.notNull("array", array); + List result = new ArrayList<>(); + for (Decimal128 e : array) { + Assertions.notNull("elements of array", e); + result.add(new BsonDecimal128(e)); + } + return new MqlExpression<>((cr) -> new AstPlaceholder(new BsonArray(result))); + } + + /** + * Returns a {@linkplain MqlDate date and time} value corresponding to + * the provided {@link Instant}. + * + * @param of the {@link Instant}. + * @return the resulting value. + */ + public static MqlDate of(final Instant of) { + Assertions.notNull("Instant", of); + return new MqlExpression<>((codecRegistry) -> new AstPlaceholder(new BsonDateTime(of.toEpochMilli()))); + } + + /** + * Returns an {@linkplain MqlArray array} of + * {@linkplain MqlDate dates} corresponding to + * the provided {@link Instant}s. + * + * @param array the array. + * @return the resulting value. + */ + public static MqlArray ofDateArray(final Instant... array) { + Assertions.notNull("array", array); + List result = new ArrayList<>(); + for (Instant e : array) { + Assertions.notNull("elements of array", e); + result.add(new BsonDateTime(e.toEpochMilli())); + } + return new MqlExpression<>((cr) -> new AstPlaceholder(new BsonArray(result))); + } + + /** + * Returns an {@linkplain MqlString string} value corresponding to + * the provided {@link String}. + * + * @param of the {@link String}. + * @return the resulting value. + */ + public static MqlString of(final String of) { + Assertions.notNull("String", of); + return new MqlExpression<>((codecRegistry) -> new AstPlaceholder(wrapString(of))); + } + + /** + * Returns an {@linkplain MqlArray array} of + * {@linkplain MqlString strings} corresponding to + * the provided {@link String}s. + * + * @param array the array. + * @return the resulting value. + */ + public static MqlArray ofStringArray(final String... array) { + Assertions.notNull("array", array); + List result = new ArrayList<>(); + for (String e : array) { + Assertions.notNull("elements of array", e); + result.add(wrapString(e)); + } + return new MqlExpression<>((cr) -> new AstPlaceholder(new BsonArray(result))); + } + + private static BsonValue wrapString(final String s) { + BsonString bson = new BsonString(s); + if (s.contains("$")) { + return new BsonDocument("$literal", bson); + } else { + return bson; + } + } + + /** + * Returns a reference to the "current" + * {@linkplain MqlDocument document} value. + * The "current" value is the top-level document currently being processed + * in the aggregation pipeline stage. + * + * @return a reference to the current value + */ + public static MqlDocument current() { + return new MqlExpression<>((cr) -> new AstPlaceholder(new BsonString("$$CURRENT"))) + .assertImplementsAllExpressions(); + } + + /** + * Returns a reference to the "current" + * value as a {@linkplain MqlMap map} value. + * The "current" value is the top-level document currently being processed + * in the aggregation pipeline stage. + * + *

Warning: The type of the values of the resulting map are not + * enforced by the API. The specification of a type by the user is an + * unchecked assertion that all map values are of that type. + * If the map contains multiple types (such as both nulls and integers) + * then a super-type encompassing all types must be chosen, and + * if necessary the elements should be individually type-checked when used. + * + * @return a reference to the current value as a map. + * @param the type of the map's values. + */ + public static MqlMap<@MqlUnchecked(TYPE_ARGUMENT) R> currentAsMap() { + return new MqlExpression<>((cr) -> new AstPlaceholder(new BsonString("$$CURRENT"))) + .assertImplementsAllExpressions(); + } + + /** + * Returns an {@linkplain MqlDocument array} value, containing the + * {@linkplain MqlValue values} provided. + * + * @param array the {@linkplain MqlValue values}. + * @return the resulting value. + * @param the type of the array elements. + */ + @SafeVarargs // nothing is stored in the array + public static MqlArray ofArray(final T... array) { + Assertions.notNull("array", array); + return new MqlExpression<>((cr) -> { + BsonArray bsonArray = new BsonArray(); + for (T v : array) { + Assertions.notNull("elements of array", v); + bsonArray.add(((MqlExpression) v).toBsonValue(cr)); + } + return new AstPlaceholder(bsonArray); + }); + } + + /** + * Returns an {@linkplain MqlEntry entry} value. + * + * @param k the key. + * @param v the value. + * @return the resulting value. + * @param the type of the key. + */ + public static MqlEntry ofEntry(final MqlString k, final T v) { + Assertions.notNull("k", k); + Assertions.notNull("v", v); + return new MqlExpression<>((cr) -> { + BsonDocument document = new BsonDocument(); + document.put("k", toBsonValue(cr, k)); + document.put("v", toBsonValue(cr, v)); + return new AstPlaceholder(document); + }); + } + + /** + * Returns an empty {@linkplain MqlMap map} value. + * + * @param the type of the resulting map's values. + * @return the resulting map value. + */ + public static MqlMap ofMap() { + return ofMap(new BsonDocument()); + } + + /** + * Returns a {@linkplain MqlMap map} value corresponding to the + * provided {@link Bson Bson document}. + * + *

Warning: The type of the values of the resulting map are not + * enforced by the API. The specification of a type by the user is an + * unchecked assertion that all map values are of that type. + * If the map contains multiple types (such as both nulls and integers) + * then a super-type encompassing all types must be chosen, and + * if necessary the elements should be individually type-checked when used. + * + * @param map the map as a {@link Bson Bson document}. + * @param the type of the resulting map's values. + * @return the resulting map value. + */ + public static MqlMap<@MqlUnchecked(TYPE_ARGUMENT) T> ofMap(final Bson map) { + Assertions.notNull("map", map); + return new MqlExpression<>((cr) -> new AstPlaceholder(new BsonDocument("$literal", + map.toBsonDocument(BsonDocument.class, cr)))); + } + + /** + * Returns a {@linkplain MqlDocument document} value corresponding to the + * provided {@link Bson Bson document}. + * + * @param document the {@linkplain Bson BSON document}. + * @return the resulting value. + */ + public static MqlDocument of(final Bson document) { + Assertions.notNull("document", document); + // All documents are wrapped in a $literal; this is the least brittle approach. + return new MqlExpression<>((cr) -> new AstPlaceholder(new BsonDocument("$literal", + document.toBsonDocument(BsonDocument.class, cr)))); + } + + /** + * The null value in the context of the MongoDB Query Language (MQL). + * + *

The null value is not part of, and cannot be used as if it were part + * of, any explicit type (except the root type {@link MqlValue} itself). + * It has no explicit type of its own. + * + *

Instead of checking that a value is null, users should generally + * check that a value is of their expected type, via methods such as + * {@link MqlValue#isNumberOr(MqlNumber)}. Where the null value + * must be checked explicitly, users may use {@link Branches#isNull} within + * {@link MqlValue#switchOn}. + * + * @return the null value + */ + public static MqlValue ofNull() { + // There is no specific mql type corresponding to Null, + // and Null is not a value in any other mql type. + return new MqlExpression<>((cr) -> new AstPlaceholder(new BsonNull())) + .assertImplementsAllExpressions(); + } + + static MqlNumber numberToMqlNumber(final Number number) { + Assertions.notNull("number", number); + if (number instanceof Integer) { + return of((int) number); + } else if (number instanceof Long) { + return of((long) number); + } else if (number instanceof Double) { + return of((double) number); + } else if (number instanceof Decimal128) { + return of((Decimal128) number); + } else { + throw new IllegalArgumentException("Number must be one of: Integer, Long, Double, Decimal128"); + } + } +} diff --git a/driver-core/src/main/com/mongodb/client/model/mql/SwitchCase.java b/driver-core/src/main/com/mongodb/client/model/mql/SwitchCase.java new file mode 100644 index 00000000000..3210a9bfdf2 --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/mql/SwitchCase.java @@ -0,0 +1,35 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model.mql; + +final class SwitchCase { + private final MqlBoolean caseValue; + private final R thenValue; + + SwitchCase(final MqlBoolean caseValue, final R thenValue) { + this.caseValue = caseValue; + this.thenValue = thenValue; + } + + MqlBoolean getCaseValue() { + return caseValue; + } + + R getThenValue() { + return thenValue; + } +} diff --git a/driver-core/src/main/com/mongodb/client/model/mql/package-info.java b/driver-core/src/main/com/mongodb/client/model/mql/package-info.java new file mode 100644 index 00000000000..caef0925787 --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/mql/package-info.java @@ -0,0 +1,27 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * @see com.mongodb.client.model.mql.MqlValue + * @see com.mongodb.client.model.mql.MqlValues + * @since 4.9.0 + */ +@Beta(Reason.CLIENT) +@NonNullApi +package com.mongodb.client.model.mql; +import com.mongodb.annotations.Beta; +import com.mongodb.annotations.Reason; +import com.mongodb.lang.NonNullApi; diff --git a/driver-core/src/main/com/mongodb/client/model/package-info.java b/driver-core/src/main/com/mongodb/client/model/package-info.java new file mode 100644 index 00000000000..5434fa8d4b9 --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/package-info.java @@ -0,0 +1,23 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * This package contains models and options that help describe MongoCollection operations + */ +@NonNullApi +package com.mongodb.client.model; + +import com.mongodb.lang.NonNullApi; diff --git a/driver-core/src/main/com/mongodb/client/model/search/AddSearchScoreExpression.java b/driver-core/src/main/com/mongodb/client/model/search/AddSearchScoreExpression.java new file mode 100644 index 00000000000..d8a2fe5e908 --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/search/AddSearchScoreExpression.java @@ -0,0 +1,29 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.client.model.search; + +import com.mongodb.annotations.Beta; +import com.mongodb.annotations.Reason; +import com.mongodb.annotations.Sealed; + +/** + * @see SearchScoreExpression#addExpression(Iterable) + * @since 4.7 + */ +@Sealed +@Beta(Reason.CLIENT) +public interface AddSearchScoreExpression extends SearchScoreExpression { +} diff --git a/driver-core/src/main/com/mongodb/client/model/search/ApproximateVectorSearchOptions.java b/driver-core/src/main/com/mongodb/client/model/search/ApproximateVectorSearchOptions.java new file mode 100644 index 00000000000..d8e920990f9 --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/search/ApproximateVectorSearchOptions.java @@ -0,0 +1,34 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model.search; + +import com.mongodb.annotations.Sealed; +import com.mongodb.client.model.Aggregates; + +/** + * Represents optional fields of the {@code $vectorSearch} pipeline stage of an aggregation pipeline. + *

+ * Configures approximate vector search for Atlas Vector Search to enable searches that may not return the exact closest vectors. + * + * @see Aggregates#vectorSearch(FieldSearchPath, Iterable, String, long, VectorSearchOptions) + * @mongodb.atlas.manual atlas-vector-search/vector-search-stage/ $vectorSearch + * @mongodb.server.release 6.0.11, 7.0.2 + * @since 5.2 + */ +@Sealed +public interface ApproximateVectorSearchOptions extends VectorSearchOptions { +} diff --git a/driver-core/src/main/com/mongodb/client/model/search/AutocompleteSearchOperator.java b/driver-core/src/main/com/mongodb/client/model/search/AutocompleteSearchOperator.java new file mode 100644 index 00000000000..447de8168cd --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/search/AutocompleteSearchOperator.java @@ -0,0 +1,64 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.client.model.search; + +import com.mongodb.annotations.Beta; +import com.mongodb.annotations.Reason; +import com.mongodb.annotations.Sealed; + +/** + * @see SearchOperator#autocomplete(FieldSearchPath, String, String...) + * @see SearchOperator#autocomplete(FieldSearchPath, Iterable) + * @since 4.7 + */ +@Sealed +@Beta(Reason.CLIENT) +public interface AutocompleteSearchOperator extends SearchOperator { + @Override + AutocompleteSearchOperator score(SearchScore modifier); + + /** + * Creates a new {@link AutocompleteSearchOperator} that uses fuzzy search. + * + * @return A new {@link AutocompleteSearchOperator}. + */ + AutocompleteSearchOperator fuzzy(); + + /** + * Creates a new {@link AutocompleteSearchOperator} that uses fuzzy search. + * + * @param options The fuzzy search options. + * Specifying {@link FuzzySearchOptions#fuzzySearchOptions()} is equivalent to calling {@link #fuzzy()}. + * @return A new {@link AutocompleteSearchOperator}. + */ + AutocompleteSearchOperator fuzzy(FuzzySearchOptions options); + + /** + * Creates a new {@link AutocompleteSearchOperator} that does not require tokens to appear in the same order as they are specified. + * + * @return A new {@link AutocompleteSearchOperator}. + * @see #sequentialTokenOrder() + */ + AutocompleteSearchOperator anyTokenOrder(); + + /** + * Creates a new {@link AutocompleteSearchOperator} that requires tokens to appear in the same order as they are specified. + * + * @return A new {@link AutocompleteSearchOperator}. + * @see #anyTokenOrder() + */ + AutocompleteSearchOperator sequentialTokenOrder(); +} diff --git a/driver-core/src/main/com/mongodb/client/model/search/CompoundSearchOperator.java b/driver-core/src/main/com/mongodb/client/model/search/CompoundSearchOperator.java new file mode 100644 index 00000000000..b12a86ae78a --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/search/CompoundSearchOperator.java @@ -0,0 +1,31 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.client.model.search; + +import com.mongodb.annotations.Beta; +import com.mongodb.annotations.Reason; +import com.mongodb.annotations.Sealed; + +/** + * @see SearchOperator#compound() + * @since 4.7 + */ +@Sealed +@Beta(Reason.CLIENT) +public interface CompoundSearchOperator extends CompoundSearchOperatorBase, SearchOperator { + @Override + CompoundSearchOperator score(SearchScore modifier); +} diff --git a/driver-core/src/main/com/mongodb/client/model/search/CompoundSearchOperatorBase.java b/driver-core/src/main/com/mongodb/client/model/search/CompoundSearchOperatorBase.java new file mode 100644 index 00000000000..2834199a4e0 --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/search/CompoundSearchOperatorBase.java @@ -0,0 +1,72 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.client.model.search; + +import com.mongodb.annotations.Beta; +import com.mongodb.annotations.Reason; +import com.mongodb.annotations.Sealed; + +/** + * A base for a {@link CompoundSearchOperator} which allows creating instances of this operator. + * This interface is a technicality and does not represent a meaningful element of the full-text search query syntax. + * + * @see SearchOperator#compound() + * @since 4.7 + */ +@Sealed +@Beta(Reason.CLIENT) +public interface CompoundSearchOperatorBase { + /** + * Creates a new {@link CompoundSearchOperator} by adding to it {@code clauses} that must all be satisfied. + *

+ * This method may be called multiple times.

+ * + * @param clauses The non-empty clauses to add. + * @return A new {@link CompoundSearchOperator}. + */ + MustCompoundSearchOperator must(Iterable clauses); + + /** + * Creates a new {@link CompoundSearchOperator} by adding to it {@code clauses} that must all not be satisfied. + *

+ * This method may be called multiple times.

+ * + * @param clauses The non-empty clauses to add. + * @return A new {@link CompoundSearchOperator}. + */ + MustNotCompoundSearchOperator mustNot(Iterable clauses); + + /** + * Creates a new {@link CompoundSearchOperator} by adding to it {@code clauses} that are preferred to be satisfied. + *

+ * This method may be called multiple times.

+ * + * @param clauses The non-empty clauses to add. + * @return A new {@link CompoundSearchOperator}. + */ + ShouldCompoundSearchOperator should(Iterable clauses); + + /** + * Creates a new {@link CompoundSearchOperator} by adding to it {@code clauses} that, similarly to {@link #must(Iterable)}, + * must all be satisfied. The difference is that this method does not affect the relevance score. + *

+ * This method may be called multiple times.

+ * + * @param clauses The non-empty clauses to add. + * @return A new {@link CompoundSearchOperator}. + */ + FilterCompoundSearchOperator filter(Iterable clauses); +} diff --git a/driver-core/src/main/com/mongodb/client/model/search/ConstantSearchScore.java b/driver-core/src/main/com/mongodb/client/model/search/ConstantSearchScore.java new file mode 100644 index 00000000000..463df7634e3 --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/search/ConstantSearchScore.java @@ -0,0 +1,29 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.client.model.search; + +import com.mongodb.annotations.Beta; +import com.mongodb.annotations.Reason; +import com.mongodb.annotations.Sealed; + +/** + * @see SearchScore#constant(float) + * @since 4.7 + */ +@Sealed +@Beta(Reason.CLIENT) +public interface ConstantSearchScore extends SearchScore { +} diff --git a/driver-core/src/main/com/mongodb/client/model/search/ConstantSearchScoreExpression.java b/driver-core/src/main/com/mongodb/client/model/search/ConstantSearchScoreExpression.java new file mode 100644 index 00000000000..691ee643572 --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/search/ConstantSearchScoreExpression.java @@ -0,0 +1,29 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.client.model.search; + +import com.mongodb.annotations.Beta; +import com.mongodb.annotations.Reason; +import com.mongodb.annotations.Sealed; + +/** + * @see SearchScoreExpression#constantExpression(float) + * @since 4.7 + */ +@Sealed +@Beta(Reason.CLIENT) +public interface ConstantSearchScoreExpression extends SearchScoreExpression { +} diff --git a/driver-core/src/main/com/mongodb/client/model/search/DateNearSearchOperator.java b/driver-core/src/main/com/mongodb/client/model/search/DateNearSearchOperator.java new file mode 100644 index 00000000000..8421d058eeb --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/search/DateNearSearchOperator.java @@ -0,0 +1,35 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.client.model.search; + +import com.mongodb.annotations.Beta; +import com.mongodb.annotations.Reason; +import com.mongodb.annotations.Sealed; + +import java.time.Duration; +import java.time.Instant; + +/** + * @see SearchOperator#near(Instant, Duration, FieldSearchPath, FieldSearchPath...) + * @see SearchOperator#near(Instant, Duration, Iterable) + * @since 4.7 + */ +@Sealed +@Beta(Reason.CLIENT) +public interface DateNearSearchOperator extends SearchOperator { + @Override + DateNearSearchOperator score(SearchScore modifier); +} diff --git a/driver-core/src/main/com/mongodb/client/model/search/DateRangeConstructibleBsonElement.java b/driver-core/src/main/com/mongodb/client/model/search/DateRangeConstructibleBsonElement.java new file mode 100644 index 00000000000..03d20befe73 --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/search/DateRangeConstructibleBsonElement.java @@ -0,0 +1,76 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.client.model.search; + +import org.bson.conversions.Bson; + +import java.time.Instant; + +final class DateRangeConstructibleBsonElement extends RangeConstructibleBsonElement + implements DateRangeSearchOperator { + DateRangeConstructibleBsonElement(final String name, final Bson value) { + super(name, value); + } + + private DateRangeConstructibleBsonElement(final Bson baseElement, final Bson appendedElementValue) { + super(baseElement, appendedElementValue); + } + + @Override + protected DateRangeConstructibleBsonElement newSelf(final Bson baseElement, final Bson appendedElementValue) { + return new DateRangeConstructibleBsonElement(baseElement, appendedElementValue); + } + + @Override + public DateRangeSearchOperator gt(final Instant l) { + return internalGt(l); + } + + @Override + public DateRangeSearchOperator lt(final Instant u) { + return internalLt(u); + } + + @Override + public DateRangeSearchOperator gte(final Instant l) { + return internalGte(l); + } + + @Override + public DateRangeSearchOperator lte(final Instant u) { + return internalLte(u); + } + + @Override + public DateRangeSearchOperator gtLt(final Instant l, final Instant u) { + return internalGtLt(l, u); + } + + @Override + public DateRangeSearchOperator gteLte(final Instant l, final Instant u) { + return internalGteLte(l, u); + } + + @Override + public DateRangeSearchOperator gtLte(final Instant l, final Instant u) { + return internalGtLte(l, u); + } + + @Override + public DateRangeSearchOperator gteLt(final Instant l, final Instant u) { + return internalGteLt(l, u); + } +} diff --git a/driver-core/src/main/com/mongodb/client/model/search/DateRangeSearchOperator.java b/driver-core/src/main/com/mongodb/client/model/search/DateRangeSearchOperator.java new file mode 100644 index 00000000000..f8c654cae1d --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/search/DateRangeSearchOperator.java @@ -0,0 +1,32 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.client.model.search; + +import com.mongodb.annotations.Beta; +import com.mongodb.annotations.Reason; +import com.mongodb.annotations.Sealed; + +/** + * @see SearchOperator#dateRange(FieldSearchPath, FieldSearchPath...) + * @see SearchOperator#dateRange(Iterable) + * @since 4.7 + */ +@Sealed +@Beta(Reason.CLIENT) +public interface DateRangeSearchOperator extends DateRangeSearchOperatorBase, SearchOperator { + @Override + DateRangeSearchOperator score(SearchScore modifier); +} diff --git a/driver-core/src/main/com/mongodb/client/model/search/DateRangeSearchOperatorBase.java b/driver-core/src/main/com/mongodb/client/model/search/DateRangeSearchOperatorBase.java new file mode 100644 index 00000000000..df8fbaa93d8 --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/search/DateRangeSearchOperatorBase.java @@ -0,0 +1,102 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUNumber WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.client.model.search; + +import com.mongodb.annotations.Beta; +import com.mongodb.annotations.Reason; +import com.mongodb.annotations.Sealed; + +import java.time.Instant; + +/** + * A base for a {@link DateRangeSearchOperator} which allows creating instances of this operator. + * This interface is a technicality and does not represent a meaningful element of the full-text search query syntax. + * + * @see SearchOperator#dateRange(FieldSearchPath, FieldSearchPath...) + * @see SearchOperator#dateRange(Iterable) + * @since 4.7 + */ +@Sealed +@Beta(Reason.CLIENT) +public interface DateRangeSearchOperatorBase { + /** + * Creates a new {@link DateRangeSearchOperator} that tests if values are within (l; ∞). + * + * @param l The lower bound. + * @return A new {@link DateRangeSearchOperator}. + */ + DateRangeSearchOperator gt(Instant l); + + /** + * Creates a new {@link DateRangeSearchOperator} that tests if values are within (-∞; u). + * + * @param u The upper bound. + * @return A new {@link DateRangeSearchOperator}. + */ + DateRangeSearchOperator lt(Instant u); + + /** + * Creates a new {@link DateRangeSearchOperator} that tests if values are within [l; ∞). + * + * @param l The lower bound. + * @return A new {@link DateRangeSearchOperator}. + */ + DateRangeSearchOperator gte(Instant l); + + /** + * Creates a new {@link DateRangeSearchOperator} that tests if values are within (-∞; u]. + * + * @param u The upper bound. + * @return A new {@link DateRangeSearchOperator}. + */ + DateRangeSearchOperator lte(Instant u); + + /** + * Creates a new {@link DateRangeSearchOperator} that tests if values are within (l; u). + * + * @param l The lower bound. + * @param u The upper bound. + * @return A new {@link DateRangeSearchOperator}. + */ + DateRangeSearchOperator gtLt(Instant l, Instant u); + + /** + * Creates a new {@link DateRangeSearchOperator} that tests if values are within [l; u]. + * + * @param l The lower bound. + * @param u The upper bound. + * @return A new {@link DateRangeSearchOperator}. + */ + DateRangeSearchOperator gteLte(Instant l, Instant u); + + /** + * Creates a new {@link DateRangeSearchOperator} that tests if values are within (l; u]. + * + * @param l The lower bound. + * @param u The upper bound. + * @return A new {@link DateRangeSearchOperator}. + */ + DateRangeSearchOperator gtLte(Instant l, Instant u); + + /** + * Creates a new {@link DateRangeSearchOperator} that tests if values are within [l; u). + * + * @param l The lower bound. + * @param u The upper bound. + * @return A new {@link DateRangeSearchOperator}. + */ + DateRangeSearchOperator gteLt(Instant l, Instant u); +} diff --git a/driver-core/src/main/com/mongodb/client/model/search/DateSearchFacet.java b/driver-core/src/main/com/mongodb/client/model/search/DateSearchFacet.java new file mode 100644 index 00000000000..39d8bb2ddf0 --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/search/DateSearchFacet.java @@ -0,0 +1,36 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.client.model.search; + +import com.mongodb.annotations.Beta; +import com.mongodb.annotations.Reason; +import com.mongodb.annotations.Sealed; + +/** + * @see SearchFacet#dateFacet(String, FieldSearchPath, Iterable) + * @since 4.7 + */ +@Sealed +@Beta({Reason.CLIENT, Reason.SERVER}) +public interface DateSearchFacet extends SearchFacet { + /** + * Creates a new {@link DateSearchFacet} with the default bucket specified. + * + * @param name The name of the bucket for documents that do not fall within the specified boundaries. + * @return A new {@link DateSearchFacet}. + */ + DateSearchFacet defaultBucket(String name); +} diff --git a/driver-core/src/main/com/mongodb/client/model/search/EqualsSearchOperator.java b/driver-core/src/main/com/mongodb/client/model/search/EqualsSearchOperator.java new file mode 100644 index 00000000000..b3aa4c278ea --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/search/EqualsSearchOperator.java @@ -0,0 +1,43 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.client.model.search; + +import com.mongodb.annotations.Beta; +import com.mongodb.annotations.Reason; +import com.mongodb.annotations.Sealed; + +import java.util.UUID; + +import java.time.Instant; + +import org.bson.types.ObjectId; + +/** + * @see SearchOperator#equals(FieldSearchPath, boolean) + * @see SearchOperator#equals(FieldSearchPath, ObjectId) + * @see SearchOperator#equals(FieldSearchPath, Number) + * @see SearchOperator#equals(FieldSearchPath, Instant) + * @see SearchOperator#equals(FieldSearchPath, String) + * @see SearchOperator#equals(FieldSearchPath, UUID) + * @see SearchOperator#equalsNull(FieldSearchPath) + * @since 5.3 + */ +@Sealed +@Beta(Reason.CLIENT) +public interface EqualsSearchOperator extends SearchOperator { + @Override + EqualsSearchOperator score(SearchScore modifier); +} diff --git a/driver-core/src/main/com/mongodb/client/model/search/ExactVectorSearchOptions.java b/driver-core/src/main/com/mongodb/client/model/search/ExactVectorSearchOptions.java new file mode 100644 index 00000000000..d58b69e5a37 --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/search/ExactVectorSearchOptions.java @@ -0,0 +1,35 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model.search; + +import com.mongodb.annotations.Sealed; +import com.mongodb.client.model.Aggregates; + +/** + * Represents optional fields of the {@code $vectorSearch} pipeline stage of an aggregation pipeline. + *

+ * Configures exact vector search for Atlas Vector Search to enable precise matching, ensuring that + * results are the closest vectors to a given query vector. + * + * @see Aggregates#vectorSearch(FieldSearchPath, Iterable, String, long, VectorSearchOptions) + * @mongodb.atlas.manual atlas-vector-search/vector-search-stage/ $vectorSearch + * @mongodb.server.release 6.0.16, 7.0.10, 7.3.2 + * @since 5.2 + */ +@Sealed +public interface ExactVectorSearchOptions extends VectorSearchOptions { +} diff --git a/driver-core/src/main/com/mongodb/client/model/search/ExistsSearchOperator.java b/driver-core/src/main/com/mongodb/client/model/search/ExistsSearchOperator.java new file mode 100644 index 00000000000..847070dc3bc --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/search/ExistsSearchOperator.java @@ -0,0 +1,31 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.client.model.search; + +import com.mongodb.annotations.Beta; +import com.mongodb.annotations.Reason; +import com.mongodb.annotations.Sealed; + +/** + * @see SearchOperator#exists(FieldSearchPath) + * @since 4.7 + */ +@Sealed +@Beta(Reason.CLIENT) +public interface ExistsSearchOperator extends SearchOperator { + @Override + ExistsSearchOperator score(SearchScore modifier); +} diff --git a/driver-core/src/main/com/mongodb/client/model/search/FacetSearchCollector.java b/driver-core/src/main/com/mongodb/client/model/search/FacetSearchCollector.java new file mode 100644 index 00000000000..01190216633 --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/search/FacetSearchCollector.java @@ -0,0 +1,29 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.client.model.search; + +import com.mongodb.annotations.Beta; +import com.mongodb.annotations.Reason; +import com.mongodb.annotations.Sealed; + +/** + * @see SearchCollector#facet(SearchOperator, Iterable) + * @since 4.7 + */ +@Sealed +@Beta({Reason.CLIENT, Reason.SERVER}) +public interface FacetSearchCollector extends SearchCollector { +} diff --git a/driver-core/src/main/com/mongodb/client/model/search/FieldSearchPath.java b/driver-core/src/main/com/mongodb/client/model/search/FieldSearchPath.java new file mode 100644 index 00000000000..2be4cdecb90 --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/search/FieldSearchPath.java @@ -0,0 +1,54 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.client.model.search; + +import com.mongodb.annotations.Beta; +import com.mongodb.annotations.Reason; +import com.mongodb.annotations.Sealed; +import org.bson.conversions.Bson; + +import static com.mongodb.internal.client.model.Util.SEARCH_PATH_VALUE_KEY; + +/** + * @see SearchPath#fieldPath(String) + * @since 4.7 + */ +@Sealed +@Beta(Reason.CLIENT) +public interface FieldSearchPath extends SearchPath { + /** + * Creates a new {@link FieldSearchPath} with the name of the alternate analyzer specified. + * + * @param analyzerName The name of the alternate analyzer. + * @return A new {@link FieldSearchPath}. + */ + FieldSearchPath multi(String analyzerName); + + /** + * Returns the name of the field represented by this path. + *

+ * This method may be useful when using the {@code of} methods, e.g., {@link SearchScore#of(Bson)}. + * Depending on the syntax of the document being constructed, + * it may be required to use the method {@link SearchPath#toBsonValue()} instead.

+ * + * @return A {@link String} {@linkplain String#equals(Object) equal} to the one used to {@linkplain SearchPath#fieldPath(String) create} + * this path. + * @see SearchPath#toBsonValue() + */ + default String toValue() { + return toBsonDocument().getString(SEARCH_PATH_VALUE_KEY).getValue(); + } +} diff --git a/driver-core/src/main/com/mongodb/client/model/search/FilterCompoundSearchOperator.java b/driver-core/src/main/com/mongodb/client/model/search/FilterCompoundSearchOperator.java new file mode 100644 index 00000000000..df23133d1a8 --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/search/FilterCompoundSearchOperator.java @@ -0,0 +1,35 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.client.model.search; + +import com.mongodb.annotations.Beta; +import com.mongodb.annotations.Reason; +import com.mongodb.annotations.Sealed; + +/** + * A representation of a {@link CompoundSearchOperator} that allows changing + * {@link CompoundSearchOperator#filter(Iterable) filter}-specific options, if any. + * This interface is a technicality and does not represent a meaningful element of the full-text search query syntax. + * + * @see CompoundSearchOperatorBase#filter(Iterable) + * @since 4.7 + */ +@Sealed +@Beta(Reason.CLIENT) +public interface FilterCompoundSearchOperator extends CompoundSearchOperator { + @Override + FilterCompoundSearchOperator score(SearchScore modifier); +} diff --git a/driver-core/src/main/com/mongodb/client/model/search/FunctionSearchScore.java b/driver-core/src/main/com/mongodb/client/model/search/FunctionSearchScore.java new file mode 100644 index 00000000000..e2bf09bf1a5 --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/search/FunctionSearchScore.java @@ -0,0 +1,29 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.client.model.search; + +import com.mongodb.annotations.Beta; +import com.mongodb.annotations.Reason; +import com.mongodb.annotations.Sealed; + +/** + * @see SearchScore#function(SearchScoreExpression) + * @since 4.7 + */ +@Sealed +@Beta(Reason.CLIENT) +public interface FunctionSearchScore extends SearchScore { +} diff --git a/driver-core/src/main/com/mongodb/client/model/search/FuzzySearchOptions.java b/driver-core/src/main/com/mongodb/client/model/search/FuzzySearchOptions.java new file mode 100644 index 00000000000..2acbb244537 --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/search/FuzzySearchOptions.java @@ -0,0 +1,86 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.client.model.search; + +import com.mongodb.annotations.Beta; +import com.mongodb.annotations.Reason; +import com.mongodb.annotations.Sealed; +import org.bson.conversions.Bson; + +/** + * Fuzzy search options that may be used with some {@link SearchOperator}s. + * + * @mongodb.atlas.manual atlas-search/autocomplete/ autocomplete operator + * @mongodb.atlas.manual atlas-search/text/ text operator + * @since 4.7 + */ +@Sealed +@Beta(Reason.CLIENT) +public interface FuzzySearchOptions extends Bson { + /** + * Creates a new {@link FuzzySearchOptions} with the maximum + * number of single-character edits + * required to match a search term. + * + * @param maxEdits The maximum number of single-character edits required to match a search term. + * @return A new {@link FuzzySearchOptions}. + */ + FuzzySearchOptions maxEdits(int maxEdits); + + /** + * Creates a new {@link FuzzySearchOptions} with the number of characters at the beginning of a search term that must exactly match. + * + * @param prefixLength The number of characters at the beginning of a search term that must exactly match. + * @return A new {@link FuzzySearchOptions}. + */ + FuzzySearchOptions prefixLength(int prefixLength); + + /** + * Creates a new {@link FuzzySearchOptions} with the maximum number of variations to generate and consider to match a search term. + * + * @param maxExpansions The maximum number of variations to generate and consider to match a search term. + * @return A new {@link FuzzySearchOptions}. + */ + FuzzySearchOptions maxExpansions(int maxExpansions); + + /** + * Creates a new {@link FuzzySearchOptions} with the specified option in situations when there is no builder method + * that better satisfies your needs. + * This method cannot be used to validate the syntax. + *

+ * Example
+ * The following code creates two functionally equivalent {@link FuzzySearchOptions} objects, + * though they may not be {@linkplain Object#equals(Object) equal}. + *

{@code
+     *  FuzzySearchOptions options1 = FuzzySearchOptions.fuzzySearchOptions().maxEdits(1);
+     *  FuzzySearchOptions options2 = FuzzySearchOptions.fuzzySearchOptions().option("maxEdits", 1);
+     * }
+ * + * @param name The option name. + * @param value The option value. + * @return A new {@link FuzzySearchOptions}. + */ + FuzzySearchOptions option(String name, Object value); + + /** + * Returns {@link FuzzySearchOptions} that represents server defaults. + * + * @return {@link FuzzySearchOptions} that represents server defaults. + */ + static FuzzySearchOptions fuzzySearchOptions() { + return SearchConstructibleBson.EMPTY_IMMUTABLE; + } +} diff --git a/driver-core/src/main/com/mongodb/client/model/search/GaussSearchScoreExpression.java b/driver-core/src/main/com/mongodb/client/model/search/GaussSearchScoreExpression.java new file mode 100644 index 00000000000..b3ac5fadedb --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/search/GaussSearchScoreExpression.java @@ -0,0 +1,48 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.client.model.search; + +import com.mongodb.annotations.Beta; +import com.mongodb.annotations.Reason; +import com.mongodb.annotations.Sealed; + +/** + * @see SearchScoreExpression#gaussExpression(double, PathSearchScoreExpression, double) + * @since 4.7 + */ +@Sealed +@Beta(Reason.CLIENT) +public interface GaussSearchScoreExpression extends SearchScoreExpression { + /** + * Creates a new {@link GaussSearchScoreExpression} which does not decay, i.e., its output stays 1, if the value of the + * {@link SearchScoreExpression#gaussExpression(double, PathSearchScoreExpression, double) path} expression is within the interval + * [{@link SearchScoreExpression#gaussExpression(double, PathSearchScoreExpression, double) origin} - {@code offset}; + * {@code origin} + {@code offset}]. + * + * @param offset The offset from the origin where no decay happens. + * @return A new {@link GaussSearchScoreExpression}. + */ + GaussSearchScoreExpression offset(double offset); + + /** + * Creates a new {@link GaussSearchScoreExpression} with the factor by which the output of the Gaussian function must decay at distance + * {@link SearchScoreExpression#gaussExpression(double, PathSearchScoreExpression, double) scale}. + * + * @param decay The decay. + * @return A new {@link GaussSearchScoreExpression}. + */ + GaussSearchScoreExpression decay(double decay); +} diff --git a/driver-core/src/main/com/mongodb/client/model/search/GeoNearSearchOperator.java b/driver-core/src/main/com/mongodb/client/model/search/GeoNearSearchOperator.java new file mode 100644 index 00000000000..1501bbd819e --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/search/GeoNearSearchOperator.java @@ -0,0 +1,33 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.client.model.search; + +import com.mongodb.annotations.Beta; +import com.mongodb.annotations.Reason; +import com.mongodb.annotations.Sealed; +import com.mongodb.client.model.geojson.Point; + +/** + * @see SearchOperator#near(Point, Number, FieldSearchPath, FieldSearchPath...) + * @see SearchOperator#near(Point, Number, Iterable) + * @since 4.7 + */ +@Sealed +@Beta(Reason.CLIENT) +public interface GeoNearSearchOperator extends SearchOperator { + @Override + GeoNearSearchOperator score(SearchScore modifier); +} diff --git a/driver-core/src/main/com/mongodb/client/model/search/InSearchOperator.java b/driver-core/src/main/com/mongodb/client/model/search/InSearchOperator.java new file mode 100644 index 00000000000..4719d1b0bc6 --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/search/InSearchOperator.java @@ -0,0 +1,41 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.client.model.search; + +import com.mongodb.annotations.Beta; +import com.mongodb.annotations.Reason; +import com.mongodb.annotations.Sealed; +import org.bson.types.ObjectId; + +import java.time.Instant; +import java.util.UUID; + +/** + * @see SearchOperator#in(FieldSearchPath, boolean, boolean...) + * @see SearchOperator#in(FieldSearchPath, ObjectId, ObjectId...) + * @see SearchOperator#in(FieldSearchPath, Number, Number...) + * @see SearchOperator#in(FieldSearchPath, Instant, Instant...) + * @see SearchOperator#in(FieldSearchPath, UUID, UUID...) + * @see SearchOperator#in(FieldSearchPath, String, String...) + * @see SearchOperator#in(FieldSearchPath, Iterable) + * @since 5.3 + */ +@Sealed +@Beta(Reason.CLIENT) +public interface InSearchOperator extends SearchOperator { + @Override + InSearchOperator score(SearchScore modifier); +} diff --git a/driver-core/src/main/com/mongodb/client/model/search/Log1pSearchScoreExpression.java b/driver-core/src/main/com/mongodb/client/model/search/Log1pSearchScoreExpression.java new file mode 100644 index 00000000000..40ad061cbcb --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/search/Log1pSearchScoreExpression.java @@ -0,0 +1,29 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.client.model.search; + +import com.mongodb.annotations.Beta; +import com.mongodb.annotations.Reason; +import com.mongodb.annotations.Sealed; + +/** + * @see SearchScoreExpression#log1pExpression(SearchScoreExpression) + * @since 4.7 + */ +@Sealed +@Beta(Reason.CLIENT) +public interface Log1pSearchScoreExpression extends SearchScoreExpression { +} diff --git a/driver-core/src/main/com/mongodb/client/model/search/LogSearchScoreExpression.java b/driver-core/src/main/com/mongodb/client/model/search/LogSearchScoreExpression.java new file mode 100644 index 00000000000..ae4e5fa8725 --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/search/LogSearchScoreExpression.java @@ -0,0 +1,29 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.client.model.search; + +import com.mongodb.annotations.Beta; +import com.mongodb.annotations.Reason; +import com.mongodb.annotations.Sealed; + +/** + * @see SearchScoreExpression#logExpression(SearchScoreExpression) + * @since 4.7 + */ +@Sealed +@Beta(Reason.CLIENT) +public interface LogSearchScoreExpression extends SearchScoreExpression { +} diff --git a/driver-core/src/main/com/mongodb/client/model/search/LowerBoundSearchCount.java b/driver-core/src/main/com/mongodb/client/model/search/LowerBoundSearchCount.java new file mode 100644 index 00000000000..15576d4a5b6 --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/search/LowerBoundSearchCount.java @@ -0,0 +1,37 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.client.model.search; + +import com.mongodb.annotations.Beta; +import com.mongodb.annotations.Reason; +import com.mongodb.annotations.Sealed; + +/** + * @see SearchCount#lowerBound() + * @since 4.7 + */ +@Sealed +@Beta({Reason.CLIENT, Reason.SERVER}) +public interface LowerBoundSearchCount extends SearchCount { + /** + * Creates a new {@link LowerBoundSearchCount} that instructs to count documents up to the {@code threshold} exactly, + * then to count roughly. + * + * @param threshold The number of documents to include in the exact count. + * @return A new {@link LowerBoundSearchCount}. + */ + LowerBoundSearchCount threshold(int threshold); +} diff --git a/driver-core/src/main/com/mongodb/client/model/search/MoreLikeThisSearchOperator.java b/driver-core/src/main/com/mongodb/client/model/search/MoreLikeThisSearchOperator.java new file mode 100644 index 00000000000..b9f9826d858 --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/search/MoreLikeThisSearchOperator.java @@ -0,0 +1,33 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.client.model.search; + +import com.mongodb.annotations.Beta; +import com.mongodb.annotations.Reason; +import com.mongodb.annotations.Sealed; +import org.bson.BsonDocument; + +/** + * @see SearchOperator#moreLikeThis(BsonDocument) + * @see SearchOperator#moreLikeThis(Iterable) + * @since 4.7 + */ +@Sealed +@Beta(Reason.CLIENT) +public interface MoreLikeThisSearchOperator extends SearchOperator { + @Override + TextSearchOperator score(SearchScore modifier); +} diff --git a/driver-core/src/main/com/mongodb/client/model/search/MultiplySearchScoreExpression.java b/driver-core/src/main/com/mongodb/client/model/search/MultiplySearchScoreExpression.java new file mode 100644 index 00000000000..e6ab2332bfe --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/search/MultiplySearchScoreExpression.java @@ -0,0 +1,29 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.client.model.search; + +import com.mongodb.annotations.Beta; +import com.mongodb.annotations.Reason; +import com.mongodb.annotations.Sealed; + +/** + * @see SearchScoreExpression#multiplyExpression(Iterable) + * @since 4.7 + */ +@Sealed +@Beta(Reason.CLIENT) +public interface MultiplySearchScoreExpression extends SearchScoreExpression { +} diff --git a/driver-core/src/main/com/mongodb/client/model/search/MustCompoundSearchOperator.java b/driver-core/src/main/com/mongodb/client/model/search/MustCompoundSearchOperator.java new file mode 100644 index 00000000000..d9db7f7e34b --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/search/MustCompoundSearchOperator.java @@ -0,0 +1,35 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.client.model.search; + +import com.mongodb.annotations.Beta; +import com.mongodb.annotations.Reason; +import com.mongodb.annotations.Sealed; + +/** + * A representation of a {@link CompoundSearchOperator} that allows changing + * {@link CompoundSearchOperator#must(Iterable) must}-specific options, if any. + * This interface is a technicality and does not represent a meaningful element of the full-text search query syntax. + * + * @see CompoundSearchOperatorBase#must(Iterable) + * @since 4.7 + */ +@Sealed +@Beta(Reason.CLIENT) +public interface MustCompoundSearchOperator extends CompoundSearchOperator { + @Override + MustCompoundSearchOperator score(SearchScore modifier); +} diff --git a/driver-core/src/main/com/mongodb/client/model/search/MustNotCompoundSearchOperator.java b/driver-core/src/main/com/mongodb/client/model/search/MustNotCompoundSearchOperator.java new file mode 100644 index 00000000000..5bdcc56009d --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/search/MustNotCompoundSearchOperator.java @@ -0,0 +1,35 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.client.model.search; + +import com.mongodb.annotations.Beta; +import com.mongodb.annotations.Reason; +import com.mongodb.annotations.Sealed; + +/** + * A representation of a {@link CompoundSearchOperator} that allows changing + * {@link CompoundSearchOperator#mustNot(Iterable) mustNot}-specific options, if any. + * This interface is a technicality and does not represent a meaningful element of the full-text search query syntax. + * + * @see CompoundSearchOperatorBase#mustNot(Iterable) + * @since 4.7 + */ +@Sealed +@Beta(Reason.CLIENT) +public interface MustNotCompoundSearchOperator extends CompoundSearchOperator { + @Override + MustNotCompoundSearchOperator score(SearchScore modifier); +} diff --git a/driver-core/src/main/com/mongodb/client/model/search/NumberNearSearchOperator.java b/driver-core/src/main/com/mongodb/client/model/search/NumberNearSearchOperator.java new file mode 100644 index 00000000000..65d6ec4969e --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/search/NumberNearSearchOperator.java @@ -0,0 +1,32 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.client.model.search; + +import com.mongodb.annotations.Beta; +import com.mongodb.annotations.Reason; +import com.mongodb.annotations.Sealed; + +/** + * @see SearchOperator#near(Number, Number, FieldSearchPath, FieldSearchPath...) + * @see SearchOperator#near(Number, Number, Iterable) + * @since 4.7 + */ +@Sealed +@Beta(Reason.CLIENT) +public interface NumberNearSearchOperator extends SearchOperator { + @Override + NumberNearSearchOperator score(SearchScore modifier); +} diff --git a/driver-core/src/main/com/mongodb/client/model/search/NumberRangeConstructibleBsonElement.java b/driver-core/src/main/com/mongodb/client/model/search/NumberRangeConstructibleBsonElement.java new file mode 100644 index 00000000000..4a1fb6a4e7a --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/search/NumberRangeConstructibleBsonElement.java @@ -0,0 +1,74 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.client.model.search; + +import org.bson.conversions.Bson; + +final class NumberRangeConstructibleBsonElement extends RangeConstructibleBsonElement + implements NumberRangeSearchOperator { + NumberRangeConstructibleBsonElement(final String name, final Bson value) { + super(name, value); + } + + private NumberRangeConstructibleBsonElement(final Bson baseElement, final Bson appendedElementValue) { + super(baseElement, appendedElementValue); + } + + @Override + protected NumberRangeConstructibleBsonElement newSelf(final Bson baseElement, final Bson appendedElementValue) { + return new NumberRangeConstructibleBsonElement(baseElement, appendedElementValue); + } + + @Override + public NumberRangeSearchOperator gt(final Number l) { + return internalGt(l); + } + + @Override + public NumberRangeSearchOperator lt(final Number u) { + return internalLt(u); + } + + @Override + public NumberRangeSearchOperator gte(final Number l) { + return internalGte(l); + } + + @Override + public NumberRangeSearchOperator lte(final Number u) { + return internalLte(u); + } + + @Override + public NumberRangeSearchOperator gtLt(final Number l, final Number u) { + return internalGtLt(l, u); + } + + @Override + public NumberRangeSearchOperator gteLte(final Number l, final Number u) { + return internalGteLte(l, u); + } + + @Override + public NumberRangeSearchOperator gtLte(final Number l, final Number u) { + return internalGtLte(l, u); + } + + @Override + public NumberRangeSearchOperator gteLt(final Number l, final Number u) { + return internalGteLt(l, u); + } +} diff --git a/driver-core/src/main/com/mongodb/client/model/search/NumberRangeSearchOperator.java b/driver-core/src/main/com/mongodb/client/model/search/NumberRangeSearchOperator.java new file mode 100644 index 00000000000..fe5d37bdc41 --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/search/NumberRangeSearchOperator.java @@ -0,0 +1,32 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.client.model.search; + +import com.mongodb.annotations.Beta; +import com.mongodb.annotations.Reason; +import com.mongodb.annotations.Sealed; + +/** + * @see SearchOperator#numberRange(FieldSearchPath, FieldSearchPath...) + * @see SearchOperator#numberRange(Iterable) + * @since 4.7 + */ +@Sealed +@Beta(Reason.CLIENT) +public interface NumberRangeSearchOperator extends NumberRangeSearchOperatorBase, SearchOperator { + @Override + NumberRangeSearchOperator score(SearchScore modifier); +} diff --git a/driver-core/src/main/com/mongodb/client/model/search/NumberRangeSearchOperatorBase.java b/driver-core/src/main/com/mongodb/client/model/search/NumberRangeSearchOperatorBase.java new file mode 100644 index 00000000000..daa31d48656 --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/search/NumberRangeSearchOperatorBase.java @@ -0,0 +1,100 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUNumber WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.client.model.search; + +import com.mongodb.annotations.Beta; +import com.mongodb.annotations.Reason; +import com.mongodb.annotations.Sealed; + +/** + * A base for a {@link NumberRangeSearchOperator} which allows creating instances of this operator. + * This interface is a technicality and does not represent a meaningful element of the full-text search query syntax. + * + * @see SearchOperator#numberRange(FieldSearchPath, FieldSearchPath...) + * @see SearchOperator#numberRange(Iterable) + * @since 4.7 + */ +@Sealed +@Beta(Reason.CLIENT) +public interface NumberRangeSearchOperatorBase { + /** + * Creates a new {@link NumberRangeSearchOperator} that tests if values are within (l; ∞). + * + * @param l The lower bound. + * @return A new {@link NumberRangeSearchOperator}. + */ + NumberRangeSearchOperator gt(Number l); + + /** + * Creates a new {@link NumberRangeSearchOperator} that tests if values are within (-∞; u). + * + * @param u The upper bound. + * @return A new {@link NumberRangeSearchOperator}. + */ + NumberRangeSearchOperator lt(Number u); + + /** + * Creates a new {@link NumberRangeSearchOperator} that tests if values are within [l; ∞). + * + * @param l The lower bound. + * @return A new {@link NumberRangeSearchOperator}. + */ + NumberRangeSearchOperator gte(Number l); + + /** + * Creates a new {@link NumberRangeSearchOperator} that tests if values are within (-∞; u]. + * + * @param u The upper bound. + * @return A new {@link NumberRangeSearchOperator}. + */ + NumberRangeSearchOperator lte(Number u); + + /** + * Creates a new {@link NumberRangeSearchOperator} that tests if values are within (l; u). + * + * @param l The lower bound. + * @param u The upper bound. + * @return A new {@link NumberRangeSearchOperator}. + */ + NumberRangeSearchOperator gtLt(Number l, Number u); + + /** + * Creates a new {@link NumberRangeSearchOperator} that tests if values are within [l; u]. + * + * @param l The lower bound. + * @param u The upper bound. + * @return A new {@link NumberRangeSearchOperator}. + */ + NumberRangeSearchOperator gteLte(Number l, Number u); + + /** + * Creates a new {@link NumberRangeSearchOperator} that tests if values are within (l; u]. + * + * @param l The lower bound. + * @param u The upper bound. + * @return A new {@link NumberRangeSearchOperator}. + */ + NumberRangeSearchOperator gtLte(Number l, Number u); + + /** + * Creates a new {@link NumberRangeSearchOperator} that tests if values are within [l; u). + * + * @param l The lower bound. + * @param u The upper bound. + * @return A new {@link NumberRangeSearchOperator}. + */ + NumberRangeSearchOperator gteLt(Number l, Number u); +} diff --git a/driver-core/src/main/com/mongodb/client/model/search/NumberSearchFacet.java b/driver-core/src/main/com/mongodb/client/model/search/NumberSearchFacet.java new file mode 100644 index 00000000000..4587f688097 --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/search/NumberSearchFacet.java @@ -0,0 +1,36 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.client.model.search; + +import com.mongodb.annotations.Beta; +import com.mongodb.annotations.Reason; +import com.mongodb.annotations.Sealed; + +/** + * @see SearchFacet#numberFacet(String, FieldSearchPath, Iterable) + * @since 4.7 + */ +@Sealed +@Beta({Reason.CLIENT, Reason.SERVER}) +public interface NumberSearchFacet extends SearchFacet { + /** + * Creates a new {@link NumberSearchFacet} with the default bucket specified. + * + * @param name The name of the bucket for documents that do not fall within the specified boundaries. + * @return A new {@link NumberSearchFacet}. + */ + NumberSearchFacet defaultBucket(String name); +} diff --git a/driver-core/src/main/com/mongodb/client/model/search/PathBoostSearchScore.java b/driver-core/src/main/com/mongodb/client/model/search/PathBoostSearchScore.java new file mode 100644 index 00000000000..40459fa1724 --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/search/PathBoostSearchScore.java @@ -0,0 +1,37 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.client.model.search; + +import com.mongodb.annotations.Beta; +import com.mongodb.annotations.Reason; +import com.mongodb.annotations.Sealed; + +/** + * @see SearchScore#boost(FieldSearchPath) + * @since 4.7 + */ +@Sealed +@Beta(Reason.CLIENT) +public interface PathBoostSearchScore extends SearchScore { + /** + * Creates a new {@link PathBoostSearchScore} with the value to fall back to + * if the field specified via {@link SearchScore#boost(FieldSearchPath)} is not found in a document. + * + * @param fallback The fallback value. Unlike {@link SearchScore#constant(float)}, does not have constraints. + * @return A new {@link PathBoostSearchScore}. + */ + PathBoostSearchScore undefined(float fallback); +} diff --git a/driver-core/src/main/com/mongodb/client/model/search/PathSearchScoreExpression.java b/driver-core/src/main/com/mongodb/client/model/search/PathSearchScoreExpression.java new file mode 100644 index 00000000000..b3c14025f4e --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/search/PathSearchScoreExpression.java @@ -0,0 +1,37 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.client.model.search; + +import com.mongodb.annotations.Beta; +import com.mongodb.annotations.Reason; +import com.mongodb.annotations.Sealed; + +/** + * @see SearchScoreExpression#pathExpression(FieldSearchPath) + * @since 4.7 + */ +@Sealed +@Beta(Reason.CLIENT) +public interface PathSearchScoreExpression extends SearchScoreExpression { + /** + * Creates a new {@link PathSearchScoreExpression} with the value to fall back to + * if the field specified via {@link SearchScoreExpression#pathExpression(FieldSearchPath)} is not found in a document. + * + * @param fallback The fallback value. + * @return A new {@link PathSearchScoreExpression}. + */ + PathSearchScoreExpression undefined(float fallback); +} diff --git a/driver-core/src/main/com/mongodb/client/model/search/PhraseConstructibleBsonElement.java b/driver-core/src/main/com/mongodb/client/model/search/PhraseConstructibleBsonElement.java new file mode 100644 index 00000000000..0f18e2db7a9 --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/search/PhraseConstructibleBsonElement.java @@ -0,0 +1,53 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.client.model.search; + +import com.mongodb.internal.client.model.AbstractConstructibleBsonElement; + +import org.bson.conversions.Bson; + +import static com.mongodb.assertions.Assertions.notNull; + +final class PhraseConstructibleBsonElement extends AbstractConstructibleBsonElement implements + PhraseSearchOperator { + PhraseConstructibleBsonElement(final String name, final Bson value) { + super(name, value); + } + + private PhraseConstructibleBsonElement(final Bson baseElement, final Bson appendedElementValue) { + super(baseElement, appendedElementValue); + } + + @Override + protected PhraseConstructibleBsonElement newSelf(final Bson baseElement, final Bson appendedElementValue) { + return new PhraseConstructibleBsonElement(baseElement, appendedElementValue); + } + + @Override + public PhraseSearchOperator synonyms(final String name) { + return newWithAppendedValue("synonyms", notNull("name", name)); + } + + @Override + public PhraseSearchOperator slop(final int slop) { + return newWithAppendedValue("slop", slop); + } + + @Override + public PhraseConstructibleBsonElement score(final SearchScore modifier) { + return newWithAppendedValue("score", notNull("modifier", modifier)); + } +} diff --git a/driver-core/src/main/com/mongodb/client/model/search/PhraseSearchOperator.java b/driver-core/src/main/com/mongodb/client/model/search/PhraseSearchOperator.java new file mode 100644 index 00000000000..3ac2abe05ad --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/search/PhraseSearchOperator.java @@ -0,0 +1,51 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.client.model.search; + +import com.mongodb.annotations.Beta; +import com.mongodb.annotations.Reason; +import com.mongodb.annotations.Sealed; + +/** + * @see SearchOperator#phrase(SearchPath, String) + * @see SearchOperator#phrase(Iterable, Iterable) + * @since 5.3 + */ + +@Sealed +@Beta(Reason.CLIENT) +public interface PhraseSearchOperator extends SearchOperator { + @Override + PhraseSearchOperator score(SearchScore modifier); + + /** + * Creates a new {@link PhraseSearchOperator} that uses slop. The default value is 0. + * + * @param slop The allowable distance between words in the query phrase. + * @return A new {@link PhraseSearchOperator}. + */ + PhraseSearchOperator slop(int slop); + + /** + * Creates a new {@link PhraseSearchOperator} that uses synonyms. + * + * @param name The name of the synonym mapping. + * @return A new {@link PhraseSearchOperator}. + * + * @mongodb.atlas.manual atlas-search/synonyms/ Synonym mappings + */ + PhraseSearchOperator synonyms(String name); +} diff --git a/driver-core/src/main/com/mongodb/client/model/search/QueryStringSearchOperator.java b/driver-core/src/main/com/mongodb/client/model/search/QueryStringSearchOperator.java new file mode 100644 index 00000000000..eb32ee8c733 --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/search/QueryStringSearchOperator.java @@ -0,0 +1,31 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.client.model.search; + +import com.mongodb.annotations.Beta; +import com.mongodb.annotations.Reason; +import com.mongodb.annotations.Sealed; + +/** + * @see SearchOperator#queryString(FieldSearchPath, String) + * @since 5.3 + */ +@Sealed +@Beta(Reason.CLIENT) +public interface QueryStringSearchOperator extends SearchOperator { + @Override + QueryStringSearchOperator score(SearchScore modifier); +} diff --git a/driver-core/src/main/com/mongodb/client/model/search/RangeConstructibleBsonElement.java b/driver-core/src/main/com/mongodb/client/model/search/RangeConstructibleBsonElement.java new file mode 100644 index 00000000000..5d0afaf5a20 --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/search/RangeConstructibleBsonElement.java @@ -0,0 +1,98 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.client.model.search; + +import com.mongodb.internal.client.model.AbstractConstructibleBsonElement; +import com.mongodb.lang.Nullable; +import org.bson.conversions.Bson; + +import static com.mongodb.assertions.Assertions.notNull; + +abstract class RangeConstructibleBsonElement> extends AbstractConstructibleBsonElement { + RangeConstructibleBsonElement(final String name, final Bson value) { + super(name, value); + } + + RangeConstructibleBsonElement(final Bson baseElement, final Bson appendedElementValue) { + super(baseElement, appendedElementValue); + } + + public final S score(final SearchScore modifier) { + return newWithAppendedValue("score", notNull("modifier", modifier)); + } + + final S internalGt(final T l) { + return newWithMutatedValue(l, false, null, false); + } + + final S internalLt(final T u) { + return newWithMutatedValue(null, false, u, false); + } + + final S internalGte(final T l) { + return newWithMutatedValue(l, true, null, false); + } + + final S internalLte(final T u) { + return newWithMutatedValue(null, false, u, true); + } + + final S internalGtLt(final T l, final T u) { + return newWithMutatedValue(l, false, u, false); + } + + final S internalGteLte(final T l, final T u) { + return newWithMutatedValue(l, true, u, true); + } + + final S internalGtLte(final T l, final T u) { + return newWithMutatedValue(l, false, u, true); + } + + final S internalGteLt(final T l, final T u) { + return newWithMutatedValue(l, true, u, false); + } + + private S newWithMutatedValue( + @Nullable final T l, final boolean includeL, + @Nullable final T u, final boolean includeU) { + if (l == null) { + notNull("u", u); + } else if (u == null) { + notNull("l", l); + } + return newWithMutatedValue(doc -> { + doc.remove("gte"); + doc.remove("gt"); + doc.remove("lte"); + doc.remove("lt"); + if (l != null) { + if (includeL) { + doc.append("gte", l); + } else { + doc.append("gt", l); + } + } + if (u != null) { + if (includeU) { + doc.append("lte", u); + } else { + doc.append("lt", u); + } + } + }); + } +} diff --git a/driver-core/src/main/com/mongodb/client/model/search/RegexSearchOperator.java b/driver-core/src/main/com/mongodb/client/model/search/RegexSearchOperator.java new file mode 100644 index 00000000000..c0286079714 --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/search/RegexSearchOperator.java @@ -0,0 +1,33 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.client.model.search; + +import com.mongodb.annotations.Beta; +import com.mongodb.annotations.Reason; +import com.mongodb.annotations.Sealed; + +/** + * @see SearchOperator#regex(SearchPath, String) + * @see SearchOperator#regex(Iterable, Iterable) + * @since 5.3 + */ + +@Sealed +@Beta(Reason.CLIENT) +public interface RegexSearchOperator extends SearchOperator { + @Override + RegexSearchOperator score(SearchScore modifier); +} diff --git a/driver-core/src/main/com/mongodb/client/model/search/RelevanceSearchScoreExpression.java b/driver-core/src/main/com/mongodb/client/model/search/RelevanceSearchScoreExpression.java new file mode 100644 index 00000000000..2a36a679ad5 --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/search/RelevanceSearchScoreExpression.java @@ -0,0 +1,29 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.client.model.search; + +import com.mongodb.annotations.Beta; +import com.mongodb.annotations.Reason; +import com.mongodb.annotations.Sealed; + +/** + * @see SearchScoreExpression#relevanceExpression() + * @since 4.7 + */ +@Sealed +@Beta(Reason.CLIENT) +public interface RelevanceSearchScoreExpression extends SearchScoreExpression { +} diff --git a/driver-core/src/main/com/mongodb/client/model/search/SearchCollector.java b/driver-core/src/main/com/mongodb/client/model/search/SearchCollector.java new file mode 100644 index 00000000000..6f2c45b4961 --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/search/SearchCollector.java @@ -0,0 +1,95 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.client.model.search; + +import com.mongodb.annotations.Beta; +import com.mongodb.annotations.Reason; +import com.mongodb.annotations.Sealed; +import com.mongodb.client.model.Aggregates; +import com.mongodb.client.model.Projections; +import org.bson.Document; +import org.bson.conversions.Bson; + +import static com.mongodb.client.model.search.SearchFacet.combineToBson; +import static com.mongodb.assertions.Assertions.notNull; + +/** + * The core part of the {@link Aggregates#search(SearchCollector, SearchOptions) $search} pipeline stage of an aggregation pipeline. + * {@link SearchCollector}s allow returning metadata together with the search results. + * You may use the {@code $$SEARCH_META} variable, e.g., via {@link Projections#computedSearchMeta(String)}, to extract this metadata. + * + * @mongodb.atlas.manual atlas-search/operators-and-collectors/#collectors Search collectors + * @since 4.7 + */ +@Sealed +@Beta(Reason.CLIENT) +public interface SearchCollector extends Bson { + /** + * Returns a {@link SearchCollector} that groups results by values or ranges in the specified faceted fields and returns the count + * for each of those groups. + * + * @param operator The search operator to use. + * @param facets The non-empty facet definitions. + * @return The requested {@link SearchCollector}. + * @mongodb.atlas.manual atlas-search/facet/ facet collector + */ + @Beta({Reason.CLIENT, Reason.SERVER}) + static FacetSearchCollector facet(final SearchOperator operator, final Iterable facets) { + notNull("operator", operator); + notNull("facets", facets); + return new SearchConstructibleBsonElement("facet", new Document("operator", operator) + .append("facets", combineToBson(facets))); + } + + /** + * Creates a {@link SearchCollector} from a {@link Bson} in situations when there is no builder method that better satisfies your needs. + * This method cannot be used to validate the syntax. + *

+ * Example
+ * The following code creates two functionally equivalent {@link SearchCollector}s, + * though they may not be {@linkplain Object#equals(Object) equal}. + *

{@code
+     *  SearchCollector collector1 = SearchCollector.facet(
+     *          SearchOperator.exists(
+     *                  SearchPath.fieldPath("fieldName")),
+     *          Arrays.asList(
+     *                  SearchFacet.stringFacet(
+     *                          "stringFacetName",
+     *                          SearchPath.fieldPath("stringFieldName")),
+     *                  SearchFacet.numberFacet(
+     *                          "numberFacetName",
+     *                          SearchPath.fieldPath("numberFieldName"),
+     *                          Arrays.asList(10, 20, 30))));
+     *  SearchCollector collector2 = SearchCollector.of(new Document("facet",
+     *          new Document("operator", SearchOperator.exists(
+     *                  SearchPath.fieldPath("fieldName")))
+     *                  .append("facets", SearchFacet.combineToBson(Arrays.asList(
+     *                          SearchFacet.stringFacet(
+     *                                  "stringFacetName",
+     *                                  SearchPath.fieldPath("stringFieldName")),
+     *                          SearchFacet.numberFacet(
+     *                                  "numberFacetName",
+     *                                  SearchPath.fieldPath("numberFieldName"),
+     *                                  Arrays.asList(10, 20, 30)))))));
+     * }
+ * + * @param collector A {@link Bson} representing the required {@link SearchCollector}. + * @return The requested {@link SearchCollector}. + */ + static SearchCollector of(final Bson collector) { + return new SearchConstructibleBsonElement(notNull("collector", collector)); + } +} diff --git a/driver-core/src/main/com/mongodb/client/model/search/SearchConstructibleBson.java b/driver-core/src/main/com/mongodb/client/model/search/SearchConstructibleBson.java new file mode 100644 index 00000000000..dae84a54457 --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/search/SearchConstructibleBson.java @@ -0,0 +1,114 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.client.model.search; + +import com.mongodb.annotations.Immutable; +import com.mongodb.internal.client.model.AbstractConstructibleBson; +import org.bson.BsonBoolean; +import org.bson.BsonDocument; +import org.bson.BsonInt32; +import org.bson.BsonString; +import org.bson.Document; +import org.bson.conversions.Bson; + +import static com.mongodb.assertions.Assertions.notNull; + +final class SearchConstructibleBson extends AbstractConstructibleBson implements + RelevanceSearchScoreExpression, ConstantSearchScoreExpression, LogSearchScoreExpression, Log1pSearchScoreExpression, + AddSearchScoreExpression, MultiplySearchScoreExpression, + SearchOptions, + SearchHighlight, + TotalSearchCount, LowerBoundSearchCount, + FuzzySearchOptions, + FieldSearchPath, WildcardSearchPath { + /** + * An {@linkplain Immutable immutable} {@link BsonDocument#isEmpty() empty} instance. + */ + static final SearchConstructibleBson EMPTY_IMMUTABLE = new SearchConstructibleBson(AbstractConstructibleBson.EMPTY_IMMUTABLE); + + SearchConstructibleBson(final Bson base) { + super(base); + } + + private SearchConstructibleBson(final Bson base, final Document appended) { + super(base, appended); + } + + @Override + protected SearchConstructibleBson newSelf(final Bson base, final Document appended) { + return new SearchConstructibleBson(base, appended); + } + + @Override + public SearchOptions index(final String name) { + return newAppended("index", new BsonString(notNull("name", name))); + } + + @Override + public SearchOptions highlight(final SearchHighlight option) { + return newAppended("highlight", notNull("option", option)); + } + + @Override + public SearchOptions count(final SearchCount option) { + return newAppended("count", notNull("option", option)); + } + + @Override + public SearchOptions returnStoredSource(final boolean returnStoredSource) { + return newAppended("returnStoredSource", new BsonBoolean(returnStoredSource)); + } + + @Override + public SearchConstructibleBson option(final String name, final Object value) { + return newAppended(notNull("name", name), notNull("value", value)); + } + + @Override + public SearchHighlight maxCharsToExamine(final int maxCharsToExamine) { + return newAppended("maxCharsToExamine", new BsonInt32(maxCharsToExamine)); + } + + @Override + public SearchHighlight maxNumPassages(final int maxNumPassages) { + return newAppended("maxNumPassages", new BsonInt32(maxNumPassages)); + } + + @Override + public LowerBoundSearchCount threshold(final int threshold) { + return newAppended("threshold", new BsonInt32(threshold)); + } + + @Override + public FuzzySearchOptions maxEdits(final int maxEdits) { + return newAppended("maxEdits", maxEdits); + } + + @Override + public FuzzySearchOptions prefixLength(final int prefixLength) { + return newAppended("prefixLength", prefixLength); + } + + @Override + public FuzzySearchOptions maxExpansions(final int maxExpansions) { + return newAppended("maxExpansions", maxExpansions); + } + + @Override + public FieldSearchPath multi(final String analyzerName) { + return newAppended("multi", new BsonString(notNull("analyzerName", analyzerName))); + } +} diff --git a/driver-core/src/main/com/mongodb/client/model/search/SearchConstructibleBsonElement.java b/driver-core/src/main/com/mongodb/client/model/search/SearchConstructibleBsonElement.java new file mode 100644 index 00000000000..c1a37597c1a --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/search/SearchConstructibleBsonElement.java @@ -0,0 +1,164 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.client.model.search; + +import com.mongodb.internal.client.model.AbstractConstructibleBsonElement; +import org.bson.BsonInt32; +import org.bson.conversions.Bson; + +import java.util.stream.Collectors; +import java.util.stream.Stream; +import java.util.stream.StreamSupport; + +import static com.mongodb.assertions.Assertions.isTrueArgument; +import static com.mongodb.assertions.Assertions.notNull; +import static com.mongodb.client.model.search.FuzzySearchOptions.fuzzySearchOptions; +import static com.mongodb.internal.client.model.Util.sizeAtLeast; + +final class SearchConstructibleBsonElement extends AbstractConstructibleBsonElement implements + MustCompoundSearchOperator, MustNotCompoundSearchOperator, ShouldCompoundSearchOperator, FilterCompoundSearchOperator, + ExistsSearchOperator, TextSearchOperator, AutocompleteSearchOperator, + NumberNearSearchOperator, DateNearSearchOperator, GeoNearSearchOperator, + EqualsSearchOperator, InSearchOperator, MoreLikeThisSearchOperator, + RegexSearchOperator, QueryStringSearchOperator, WildcardSearchOperator, + ValueBoostSearchScore, PathBoostSearchScore, ConstantSearchScore, FunctionSearchScore, + GaussSearchScoreExpression, PathSearchScoreExpression, + FacetSearchCollector, + StringSearchFacet, NumberSearchFacet, DateSearchFacet { + SearchConstructibleBsonElement(final String name) { + super(name); + } + + SearchConstructibleBsonElement(final String name, final Bson value) { + super(name, value); + } + + SearchConstructibleBsonElement(final Bson baseElement) { + super(baseElement); + } + + private SearchConstructibleBsonElement(final Bson baseElement, final Bson appendedElementValue) { + super(baseElement, appendedElementValue); + } + + @Override + protected SearchConstructibleBsonElement newSelf(final Bson baseElement, final Bson appendedElementValue) { + return new SearchConstructibleBsonElement(baseElement, appendedElementValue); + } + + @Override + public StringSearchFacet numBuckets(final int max) { + return newWithAppendedValue("numBuckets", max); + } + + @Override + public SearchConstructibleBsonElement defaultBucket(final String name) { + return newWithAppendedValue("default", notNull("name", name)); + } + + @Override + public SearchConstructibleBsonElement fuzzy() { + return fuzzy(fuzzySearchOptions()); + } + + @Override + public SearchConstructibleBsonElement fuzzy(final FuzzySearchOptions options) { + return newWithMutatedValue(doc -> { + doc.remove("synonyms"); + doc.append("fuzzy", notNull("options", options)); + }); + } + + @Override + public TextSearchOperator synonyms(final String name) { + return newWithMutatedValue(doc -> { + doc.remove("fuzzy"); + doc.append("synonyms", notNull("name", name)); + }); + } + + @Override + public AutocompleteSearchOperator anyTokenOrder() { + return newWithAppendedValue("tokenOrder", "any"); + } + + @Override + public AutocompleteSearchOperator sequentialTokenOrder() { + return newWithAppendedValue("tokenOrder", "sequential"); + } + + @Override + public MustCompoundSearchOperator must(final Iterable clauses) { + return newCombined("must", clauses); + } + + @Override + public MustNotCompoundSearchOperator mustNot(final Iterable clauses) { + return newCombined("mustNot", clauses); + } + + @Override + public ShouldCompoundSearchOperator should(final Iterable clauses) { + return newCombined("should", clauses); + } + + @Override + public FilterCompoundSearchOperator filter(final Iterable clauses) { + return newCombined("filter", clauses); + } + + private SearchConstructibleBsonElement newCombined(final String ruleName, final Iterable clauses) { + notNull("clauses", clauses); + isTrueArgument("clauses must not be empty", sizeAtLeast(clauses, 1)); + return newWithMutatedValue(doc -> { + Iterable existingClauses = doc.get(ruleName, Iterable.class); + Iterable newClauses; + if (existingClauses == null) { + newClauses = clauses; + } else { + newClauses = Stream.concat( + StreamSupport.stream(existingClauses.spliterator(), false), + StreamSupport.stream(clauses.spliterator(), false)).collect(Collectors.toList()); + } + doc.append(ruleName, newClauses); + }); + } + + @Override + public ShouldCompoundSearchOperator minimumShouldMatch(final int minimumShouldMatch) { + return newWithAppendedValue("minimumShouldMatch", new BsonInt32(minimumShouldMatch)); + } + + @Override + public SearchConstructibleBsonElement score(final SearchScore modifier) { + return newWithAppendedValue("score", notNull("modifier", modifier)); + } + + @Override + public SearchConstructibleBsonElement undefined(final float fallback) { + return newWithAppendedValue("undefined", fallback); + } + + @Override + public GaussSearchScoreExpression offset(final double offset) { + return newWithAppendedValue("offset", offset); + } + + @Override + public GaussSearchScoreExpression decay(final double decay) { + return newWithAppendedValue("decay", decay); + } +} diff --git a/driver-core/src/main/com/mongodb/client/model/search/SearchCount.java b/driver-core/src/main/com/mongodb/client/model/search/SearchCount.java new file mode 100644 index 00000000000..f9a5917582b --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/search/SearchCount.java @@ -0,0 +1,76 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.client.model.search; + +import com.mongodb.annotations.Beta; +import com.mongodb.annotations.Reason; +import com.mongodb.annotations.Sealed; +import com.mongodb.client.model.Projections; +import org.bson.BsonDocument; +import org.bson.BsonString; +import org.bson.conversions.Bson; + +import static com.mongodb.assertions.Assertions.notNull; + +/** + * Counting options. + * You may use the {@code $$SEARCH_META} variable, e.g., via {@link Projections#computedSearchMeta(String)}, + * to extract the results of counting. + * + * @mongodb.atlas.manual atlas-search/counting/ Counting + * @since 4.7 + */ +@Sealed +@Beta({Reason.CLIENT, Reason.SERVER}) +public interface SearchCount extends Bson { + /** + * Returns a {@link SearchCount} that instructs to count documents exactly. + * + * @return The requested {@link SearchCount}. + */ + static TotalSearchCount total() { + return new SearchConstructibleBson(new BsonDocument("type", new BsonString("total"))); + } + + /** + * Returns a {@link SearchCount} that instructs to count documents exactly only up to a + * {@linkplain LowerBoundSearchCount#threshold(int) threshold}. + * + * @return The requested {@link SearchCount}. + */ + static LowerBoundSearchCount lowerBound() { + return new SearchConstructibleBson(new BsonDocument("type", new BsonString("lowerBound"))); + } + + /** + * Creates a {@link SearchCount} from a {@link Bson} in situations when there is no builder method that better satisfies your needs. + * This method cannot be used to validate the syntax. + *

+ * Example
+ * The following code creates two functionally equivalent {@link SearchCount}s, + * though they may not be {@linkplain Object#equals(Object) equal}. + *

{@code
+     *  SearchCount count1 = SearchCount.lowerBound();
+     *  SearchCount count2 = SearchCount.of(new Document("type", "lowerBound"));
+     * }
+ * + * @param count A {@link Bson} representing the required {@link SearchCount}. + * @return The requested {@link SearchCount}. + */ + static SearchCount of(final Bson count) { + return new SearchConstructibleBson(notNull("count", count)); + } +} diff --git a/driver-core/src/main/com/mongodb/client/model/search/SearchFacet.java b/driver-core/src/main/com/mongodb/client/model/search/SearchFacet.java new file mode 100644 index 00000000000..4aac0fef089 --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/search/SearchFacet.java @@ -0,0 +1,156 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.client.model.search; + +import com.mongodb.annotations.Beta; +import com.mongodb.annotations.Reason; +import com.mongodb.annotations.Sealed; +import org.bson.BsonDocument; +import org.bson.BsonType; +import org.bson.BsonValue; +import org.bson.Document; +import org.bson.codecs.configuration.CodecRegistry; +import org.bson.conversions.Bson; + +import java.time.Instant; +import java.util.HashSet; +import java.util.Map; +import java.util.Set; + +import static com.mongodb.assertions.Assertions.assertTrue; +import static com.mongodb.assertions.Assertions.isTrue; +import static com.mongodb.assertions.Assertions.isTrueArgument; +import static com.mongodb.internal.client.model.Util.sizeAtLeast; +import static java.lang.String.format; +import static com.mongodb.assertions.Assertions.notNull; + +/** + * A facet definition for {@link FacetSearchCollector}. + * + * @mongodb.atlas.manual atlas-search/facet/#facet-definition Facet definition + * @since 4.7 + */ +@Sealed +@Beta({Reason.CLIENT, Reason.SERVER}) +public interface SearchFacet extends Bson { + /** + * Returns a {@link SearchFacet} that allows narrowing down search results based on the most frequent + * BSON {@link BsonType#STRING String} values of the specified field. + * + * @param name The facet name. + * @param path The field to facet on. + * @return The requested {@link SearchFacet}. + * @mongodb.atlas.manual atlas-search/facet/#string-facets String facet definition + */ + static StringSearchFacet stringFacet(final String name, final FieldSearchPath path) { + return new SearchConstructibleBsonElement(notNull("name", name), + new Document("type", "string") + .append("path", notNull("path", path).toValue())); + } + + /** + * Returns a {@link SearchFacet} that allows determining the frequency of + * BSON {@link BsonType#INT32 32-bit integer} / {@link BsonType#INT64 64-bit integer} / {@link BsonType#DOUBLE Double} values + * in the search results by breaking the results into separate ranges. + * + * @param name The facet name. + * @param path The path to facet on. + * @param boundaries Bucket boundaries in ascending order. Must contain at least two boundaries. + * @return The requested {@link SearchFacet}. + * @mongodb.atlas.manual atlas-search/facet/#numeric-facets Numeric facet definition + */ + static NumberSearchFacet numberFacet(final String name, final FieldSearchPath path, final Iterable boundaries) { + isTrueArgument("boundaries must contain at least 2 elements", sizeAtLeast(boundaries, 2)); + return new SearchConstructibleBsonElement(notNull("name", name), + new Document("type", "number") + .append("path", notNull("path", path).toValue()) + .append("boundaries", notNull("boundaries", boundaries))); + } + + /** + * Returns a {@link SearchFacet} that allows determining the frequency of BSON {@link BsonType#DATE_TIME Date} values + * in the search results by breaking the results into separate ranges. + * + * @param name The facet name. + * @param path The path to facet on. + * @param boundaries Bucket boundaries in ascending order. Must contain at least two boundaries. + * @return The requested {@link SearchFacet}. + * @mongodb.atlas.manual atlas-search/facet/#date-facets Date facet definition + * @see org.bson.codecs.jsr310.InstantCodec + */ + static DateSearchFacet dateFacet(final String name, final FieldSearchPath path, final Iterable boundaries) { + isTrueArgument("boundaries must contain at least 2 elements", sizeAtLeast(boundaries, 2)); + return new SearchConstructibleBsonElement(notNull("name", name), + new Document("type", "date") + .append("path", notNull("path", path).toValue()) + .append("boundaries", notNull("boundaries", boundaries))); + } + + /** + * Creates a {@link SearchFacet} from a {@link Bson} in situations when there is no builder method that better satisfies your needs. + * This method cannot be used to validate the syntax. + *

+ * Example
+ * The following code creates two functionally equivalent {@link SearchFacet}s, + * though they may not be {@linkplain Object#equals(Object) equal}. + *

{@code
+     *  SearchFacet facet1 = SearchFacet.stringFacet("facetName",
+     *          SearchPath.fieldPath("fieldName"));
+     *  SearchFacet facet2 = SearchFacet.of(new Document("facetName", new Document("type", "string")
+     *          .append("path", SearchPath.fieldPath("fieldName").toValue())));
+     * }
+ * + * @param facet A {@link Bson} representing the required {@link SearchFacet}. + * @return The requested {@link SearchFacet}. + */ + static SearchFacet of(final Bson facet) { + return new SearchConstructibleBsonElement(notNull("facet", facet)); + } + + /** + * Combines {@link SearchFacet}s into a {@link Bson}. + *

+ * This method may be useful when using {@link SearchCollector#of(Bson)}.

+ * + * @param facets The non-empty facet definitions to combine. + * @return A {@link Bson} representing combined {@code facets}. + */ + static Bson combineToBson(final Iterable facets) { + notNull("facets", facets); + isTrueArgument("facets must not be empty", sizeAtLeast(facets, 1)); + return new Bson() { + @Override + public BsonDocument toBsonDocument(final Class documentClass, final CodecRegistry codecRegistry) { + Set names = new HashSet<>(); + BsonDocument result = new BsonDocument(); + for (final SearchFacet facet : facets) { + BsonDocument doc = facet.toBsonDocument(documentClass, codecRegistry); + assertTrue(doc.size() == 1); + Map.Entry entry = doc.entrySet().iterator().next(); + String name = entry.getKey(); + isTrue(format("facet names must be unique. '%s' is used at least twice in %s", names, facets), names.add(name)); + result.append(name, entry.getValue()); + } + return result; + } + + @Override + public String toString() { + return facets.toString(); + } + }; + } +} diff --git a/driver-core/src/main/com/mongodb/client/model/search/SearchHighlight.java b/driver-core/src/main/com/mongodb/client/model/search/SearchHighlight.java new file mode 100644 index 00000000000..6610c57590f --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/search/SearchHighlight.java @@ -0,0 +1,106 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.client.model.search; + +import com.mongodb.annotations.Beta; +import com.mongodb.annotations.Reason; +import com.mongodb.annotations.Sealed; +import com.mongodb.client.model.Projections; +import org.bson.BsonDocument; +import org.bson.conversions.Bson; + +import java.util.Iterator; + +import static com.mongodb.assertions.Assertions.isTrueArgument; +import static com.mongodb.assertions.Assertions.notNull; +import static com.mongodb.internal.Iterables.concat; +import static com.mongodb.internal.client.model.Util.combineToBsonValue; + +/** + * Highlighting options. + * You may use the {@code $meta: "searchHighlights"} expression, e.g., via {@link Projections#metaSearchHighlights(String)}, + * to extract the results of highlighting. + * + * @mongodb.atlas.manual atlas-search/highlighting/ Highlighting + * @since 4.7 + */ +@Sealed +@Beta(Reason.CLIENT) +public interface SearchHighlight extends Bson { + /** + * Creates a new {@link SearchHighlight} with the maximum number of characters to examine on a document + * when performing highlighting for a field. + * + * @param maxCharsToExamine The maximum number of characters to examine. + * @return A new {@link SearchHighlight}. + */ + SearchHighlight maxCharsToExamine(int maxCharsToExamine); + + /** + * Creates a new {@link SearchHighlight} with the maximum number of high-scoring passages to return per document + * in the {@code "highlights"} results for each field. + * + * @param maxNumPassages The maximum number of high-scoring passages. + * @return A new {@link SearchHighlight}. + */ + SearchHighlight maxNumPassages(int maxNumPassages); + + /** + * Returns a {@link SearchHighlight} for the given {@code paths}. + * + * @param path The field to be searched. + * @param paths More fields to be searched. + * @return The requested {@link SearchHighlight}. + */ + static SearchHighlight paths(final SearchPath path, final SearchPath... paths) { + return paths(concat(notNull("path", path), paths)); + } + + /** + * Returns a {@link SearchHighlight} for the given {@code paths}. + * + * @param paths The non-empty fields to be searched. + * @return The requested {@link SearchHighlight}. + */ + static SearchHighlight paths(final Iterable paths) { + Iterator pathIterator = notNull("paths", paths).iterator(); + isTrueArgument("paths must not be empty", pathIterator.hasNext()); + return new SearchConstructibleBson(new BsonDocument("path", combineToBsonValue(pathIterator, false))); + } + + /** + * Creates a {@link SearchHighlight} from a {@link Bson} in situations when there is no builder method that better satisfies your needs. + * This method cannot be used to validate the syntax. + *

+ * Example
+ * The following code creates two functionally equivalent {@link SearchHighlight}s, + * though they may not be {@linkplain Object#equals(Object) equal}. + *

{@code
+     *  SearchHighlight highlight1 = SearchHighlight.paths(
+     *          SearchPath.fieldPath("fieldName"),
+     *          SearchPath.wildcardPath("wildc*rd"));
+     *  SearchHighlight highlight2 = SearchHighlight.of(new Document("path", Arrays.asList(
+     *          SearchPath.fieldPath("fieldName").toBsonValue(),
+     *          SearchPath.wildcardPath("wildc*rd").toBsonValue())));
+     * }
+ * + * @param highlight A {@link Bson} representing the required {@link SearchHighlight}. + * @return The requested {@link SearchHighlight}. + */ + static SearchHighlight of(final Bson highlight) { + return new SearchConstructibleBson(notNull("highlight", highlight)); + } +} diff --git a/driver-core/src/main/com/mongodb/client/model/search/SearchOperator.java b/driver-core/src/main/com/mongodb/client/model/search/SearchOperator.java new file mode 100644 index 00000000000..aa8b01b29d4 --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/search/SearchOperator.java @@ -0,0 +1,656 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.client.model.search; + +import com.mongodb.annotations.Beta; +import com.mongodb.annotations.Reason; +import com.mongodb.annotations.Sealed; +import com.mongodb.client.model.Aggregates; +import com.mongodb.client.model.geojson.Point; +import org.bson.BsonArray; +import org.bson.BsonBinary; +import org.bson.BsonDocument; +import org.bson.BsonNull; +import org.bson.BsonBoolean; +import org.bson.BsonType; +import org.bson.Document; +import org.bson.conversions.Bson; +import org.bson.types.ObjectId; + +import java.time.Duration; +import java.time.Instant; +import java.util.Iterator; +import java.util.UUID; + +import static com.mongodb.assertions.Assertions.isTrueArgument; +import static com.mongodb.assertions.Assertions.notNull; +import static com.mongodb.internal.Iterables.concat; +import static com.mongodb.internal.client.model.Util.combineToBsonValue; +import static java.util.Collections.singleton; + +/** + * The core part of the {@link Aggregates#search(SearchOperator, SearchOptions) $search} pipeline stage of an aggregation pipeline. + * + * @mongodb.atlas.manual atlas-search/operators-and-collectors/#operators Search operators + * @since 4.7 + */ +@Sealed +@Beta(Reason.CLIENT) +public interface SearchOperator extends Bson { + /** + * Creates a new {@link SearchOperator} with the scoring modifier specified. + * + * @param modifier The scoring modifier. + * @return A new {@link SearchOperator}. + */ + SearchOperator score(SearchScore modifier); + + /** + * Returns a base for a {@link SearchOperator} that may combine multiple {@link SearchOperator}s. + * Combining {@link SearchOperator}s affects calculation of the relevance score. + * + * @return A base for a {@link CompoundSearchOperator}. + * @mongodb.atlas.manual atlas-search/compound/ compound operator + */ + static CompoundSearchOperatorBase compound() { + return new SearchConstructibleBsonElement("compound"); + } + + /** + * Returns a {@link SearchOperator} that tests if the {@code path} exists in a document. + * + * @param path The path to test. + * @return The requested {@link SearchOperator}. + * @mongodb.atlas.manual atlas-search/exists/ exists operator + */ + static ExistsSearchOperator exists(final FieldSearchPath path) { + return new SearchConstructibleBsonElement("exists", new Document("path", notNull("path", path).toValue())); + } + + /** + * Returns a {@link SearchOperator} that performs a full-text search. + * + * @param path The field to be searched. + * @param query The string to search for. + * @return The requested {@link SearchOperator}. + * @mongodb.atlas.manual atlas-search/text/ text operator + */ + static TextSearchOperator text(final SearchPath path, final String query) { + return text(singleton(notNull("path", path)), singleton(notNull("query", query))); + } + + /** + * Returns a {@link SearchOperator} that performs a full-text search. + * + * @param paths The non-empty fields to be searched. + * @param queries The non-empty strings to search for. + * @return The requested {@link SearchOperator}. + * @mongodb.atlas.manual atlas-search/text/ text operator + */ + static TextSearchOperator text(final Iterable paths, final Iterable queries) { + Iterator queryIterator = notNull("queries", queries).iterator(); + isTrueArgument("queries must not be empty", queryIterator.hasNext()); + String firstQuery = queryIterator.next(); + Iterator pathIterator = notNull("paths", paths).iterator(); + isTrueArgument("paths must not be empty", pathIterator.hasNext()); + return new SearchConstructibleBsonElement("text", new Document("query", queryIterator.hasNext() ? queries : firstQuery) + .append("path", combineToBsonValue(pathIterator, false))); + } + + /** + * Returns a {@link SearchOperator} that may be used to implement search-as-you-type functionality. + * + * @param path The field to be searched. + * @param query The string to search for. + * @param queries More strings to search for. + * @return The requested {@link SearchOperator}. + * @mongodb.atlas.manual atlas-search/autocomplete/ autocomplete operator + */ + static AutocompleteSearchOperator autocomplete(final FieldSearchPath path, final String query, final String... queries) { + return autocomplete(path, concat(notNull("query", query), queries)); + } + + /** + * Returns a {@link SearchOperator} that may be used to implement search-as-you-type functionality. + * + * @param path The field to be searched. + * @param queries The non-empty strings to search for. + * @return The requested {@link SearchOperator}. + * @mongodb.atlas.manual atlas-search/autocomplete/ autocomplete operator + */ + static AutocompleteSearchOperator autocomplete(final FieldSearchPath path, final Iterable queries) { + Iterator queryIterator = notNull("queries", queries).iterator(); + isTrueArgument("queries must not be empty", queryIterator.hasNext()); + String firstQuery = queryIterator.next(); + return new SearchConstructibleBsonElement("autocomplete", new Document("query", queryIterator.hasNext() ? queries : firstQuery) + .append("path", notNull("path", path).toValue())); + } + + /** + * Returns a base for a {@link SearchOperator} that tests if the + * BSON {@link BsonType#INT32 32-bit integer} / {@link BsonType#INT64 64-bit integer} / {@link BsonType#DOUBLE Double} values + * of the specified fields are within an interval. + * + * @param path The field to be searched. + * @param paths More fields to be searched. + * @return A base for a {@link NumberRangeSearchOperator}. + * @mongodb.atlas.manual atlas-search/range/ range operator + */ + static NumberRangeSearchOperatorBase numberRange(final FieldSearchPath path, final FieldSearchPath... paths) { + return numberRange(concat(notNull("path", path), paths)); + } + + /** + * Returns a base for a {@link SearchOperator} that tests if the + * BSON {@link BsonType#INT32 32-bit integer} / {@link BsonType#INT64 64-bit integer} / {@link BsonType#DOUBLE Double} values + * of the specified fields are within an interval. + * + * @param paths The non-empty fields to be searched. + * @return A base for a {@link NumberRangeSearchOperator}. + * @mongodb.atlas.manual atlas-search/range/ range operator + */ + static NumberRangeSearchOperatorBase numberRange(final Iterable paths) { + Iterator pathIterator = notNull("paths", paths).iterator(); + isTrueArgument("paths must not be empty", pathIterator.hasNext()); + return new NumberRangeConstructibleBsonElement("range", new Document("path", combineToBsonValue(pathIterator, true))); + } + + /** + * Returns a base for a {@link SearchOperator} that tests if the + * BSON {@link BsonType#DATE_TIME Date} values of the specified fields are within an interval. + * + * @param path The field to be searched. + * @param paths More fields to be searched. + * @return A base for a {@link DateRangeSearchOperator}. + * @mongodb.atlas.manual atlas-search/range/ range operator + */ + static DateRangeSearchOperatorBase dateRange(final FieldSearchPath path, final FieldSearchPath... paths) { + return dateRange(concat(notNull("path", path), paths)); + } + + /** + * Returns a base for a {@link SearchOperator} that tests if the + * BSON {@link BsonType#DATE_TIME Date} values of the specified fields are within an interval. + * + * @param paths The non-empty fields to be searched. + * @return A base for a {@link DateRangeSearchOperator}. + * @mongodb.atlas.manual atlas-search/range/ range operator + */ + static DateRangeSearchOperatorBase dateRange(final Iterable paths) { + Iterator pathIterator = notNull("paths", paths).iterator(); + isTrueArgument("paths must not be empty", pathIterator.hasNext()); + return new DateRangeConstructibleBsonElement("range", new Document("path", combineToBsonValue(pathIterator, true))); + } + + /** + * Returns a {@link SearchOperator} that allows finding results that are near the specified {@code origin}. + * + * @param origin The origin from which the proximity of the results is measured. + * The relevance score is 1 if the values of the fields are {@code origin}. + * @param pivot The distance from the {@code origin} at which the relevance score drops in half. + * @param path The field to be searched. + * @param paths More fields to be searched. + * @return The requested {@link SearchOperator}. + * @mongodb.atlas.manual atlas-search/near/ near operator + */ + static NumberNearSearchOperator near(final Number origin, final Number pivot, final FieldSearchPath path, final FieldSearchPath... paths) { + return near(origin, pivot, concat(notNull("path", path), paths)); + } + + /** + * Returns a {@link SearchOperator} that allows finding results that are near the specified {@code origin}. + * + * @param origin The origin from which the proximity of the results is measured. + * The relevance score is 1 if the values of the fields are {@code origin}. + * @param pivot The distance from the {@code origin} at which the relevance score drops in half. + * @param paths The non-empty fields to be searched. + * @return The requested {@link SearchOperator}. + * @mongodb.atlas.manual atlas-search/near/ near operator + */ + static NumberNearSearchOperator near(final Number origin, final Number pivot, final Iterable paths) { + Iterator pathIterator = notNull("paths", paths).iterator(); + isTrueArgument("paths must not be empty", pathIterator.hasNext()); + return new SearchConstructibleBsonElement("near", new Document("origin", notNull("origin", origin)) + .append("path", combineToBsonValue(pathIterator, true)) + .append("pivot", notNull("pivot", pivot))); + } + + /** + * Returns a {@link SearchOperator} that allows finding results that are near the specified {@code origin}. + * + * @param origin The origin from which the proximity of the results is measured. + * The relevance score is 1 if the values of the fields are {@code origin}. + * @param pivot The distance from the {@code origin} at which the relevance score drops in half. + * Data is extracted via {@link Duration#toMillis()}. + * @param path The field to be searched. + * @param paths More fields to be searched. + * @return The requested {@link SearchOperator}. + * @mongodb.atlas.manual atlas-search/near/ near operator + * @see org.bson.codecs.jsr310.InstantCodec + */ + static DateNearSearchOperator near(final Instant origin, final Duration pivot, final FieldSearchPath path, final FieldSearchPath... paths) { + return near(origin, pivot, concat(notNull("path", path), paths)); + } + + /** + * Returns a {@link SearchOperator} that allows finding results that are near the specified {@code origin}. + * + * @param origin The origin from which the proximity of the results is measured. + * The relevance score is 1 if the values of the fields are {@code origin}. + * @param pivot The distance from the {@code origin} at which the relevance score drops in half. + * Data is extracted via {@link Duration#toMillis()}. + * @param paths The non-empty fields to be searched. + * @return The requested {@link SearchOperator}. + * @mongodb.atlas.manual atlas-search/near/ near operator + * @see org.bson.codecs.jsr310.InstantCodec + */ + static DateNearSearchOperator near(final Instant origin, final Duration pivot, final Iterable paths) { + Iterator pathIterator = notNull("paths", paths).iterator(); + isTrueArgument("paths must not be empty", pathIterator.hasNext()); + notNull("pivot", pivot); + return new SearchConstructibleBsonElement("near", new Document("origin", notNull("origin", origin)) + .append("path", combineToBsonValue(pathIterator, true)) + .append("pivot", pivot.toMillis())); + } + + /** + * Returns a {@link SearchOperator} that allows finding results that are near the specified {@code origin}. + * + * @param origin The origin from which the proximity of the results is measured. + * The relevance score is 1 if the values of the fields are {@code origin}. + * @param pivot The distance in meters from the {@code origin} at which the relevance score drops in half. + * @param path The field to be searched. + * @param paths More fields to be searched. + * @return The requested {@link SearchOperator}. + * @mongodb.atlas.manual atlas-search/near/ near operator + */ + static GeoNearSearchOperator near(final Point origin, final Number pivot, final FieldSearchPath path, final FieldSearchPath... paths) { + return near(origin, pivot, concat(notNull("path", path), paths)); + } + + /** + * Returns a {@link SearchOperator} that allows finding results that are near the specified {@code origin}. + * + * @param origin The origin from which the proximity of the results is measured. + * The relevance score is 1 if the values of the fields are {@code origin}. + * @param pivot The distance in meters from the {@code origin} at which the relevance score drops in half. + * @param paths The non-empty fields to be searched. + * @return The requested {@link SearchOperator}. + * @mongodb.atlas.manual atlas-search/near/ near operator + */ + static GeoNearSearchOperator near(final Point origin, final Number pivot, final Iterable paths) { + Iterator pathIterator = notNull("paths", paths).iterator(); + isTrueArgument("paths must not be empty", pathIterator.hasNext()); + return new SearchConstructibleBsonElement("near", new Document("origin", notNull("origin", origin)) + .append("path", combineToBsonValue(pathIterator, true)) + .append("pivot", notNull("pivot", pivot))); + } + + /** + * Returns a {@link SearchOperator} that searches for documents where the + * value or array of values at a given path contains any of the specified values + * + * @param path The indexed field to be searched. + * @param value The boolean value to search for. + * @param values More fields to be searched. + * @return The requested {@link SearchOperator}. + * @mongodb.atlas.manual atlas-search/in/ in operator + */ + static InSearchOperator in(final FieldSearchPath path, final boolean value, final boolean... values) { + notNull("values", values); + BsonArray bsonArray = new BsonArray(); + bsonArray.add(new BsonBoolean(value)); + for (boolean v : values) { + bsonArray.add(new BsonBoolean(v)); + } + return in(notNull("path", path), bsonArray); + } + + /** + * Returns a {@link SearchOperator} that searches for documents where the + * value or array of values at a given path contains any of the specified values + * + * @param path The indexed field to be searched. + * @param value The objectId value to search for. + * @param values More fields to be searched. + * @return The requested {@link SearchOperator}. + * @mongodb.atlas.manual atlas-search/in/ in operator + */ + static InSearchOperator in(final FieldSearchPath path, final ObjectId value, final ObjectId... values) { + return in(notNull("path", path), concat(notNull("value", value), values)); + } + + /** + * Returns a {@link SearchOperator} that searches for documents where the + * value or array of values at a given path contains any of the specified values + * + * @param path The indexed field to be searched. + * @param value The number value to search for. + * @param values More fields to be searched. + * @return The requested {@link SearchOperator}. + * @mongodb.atlas.manual atlas-search/in/ in operator + */ + static InSearchOperator in(final FieldSearchPath path, final Number value, final Number... values) { + return in(notNull("path", path), concat(notNull("value", value), values)); + } + + /** + * Returns a {@link SearchOperator} that searches for documents where the + * value or array of values at a given path contains any of the specified values + * + * @param path The indexed field to be searched. + * @param value The instant date value to search for. + * @param values More fields to be searched. + * @return The requested {@link SearchOperator}. + * @mongodb.atlas.manual atlas-search/in/ in operator + */ + static InSearchOperator in(final FieldSearchPath path, final Instant value, final Instant... values) { + return in(notNull("path", path), concat(notNull("value", value), values)); + } + + /** + * Returns a {@link SearchOperator} that searches for documents where the + * value or array of values at a given path contains any of the specified values + * + * @param path The indexed field to be searched. + * @param value The uuid value to search for. + * @param values More fields to be searched. + * @return The requested {@link SearchOperator}. + * @mongodb.atlas.manual atlas-search/in/ in operator + */ + static InSearchOperator in(final FieldSearchPath path, final UUID value, final UUID... values) { + return in(notNull("path", path), concat(notNull("value", value), values)); + } + + /** + * Returns a {@link SearchOperator} that searches for documents where the + * value or array of values at a given path contains any of the specified values + * + * @param path The indexed field to be searched. + * @param value The string value to search for. + * @param values More fields to be searched. + * @return The requested {@link SearchOperator}. + * @mongodb.atlas.manual atlas-search/in/ in operator + */ + static InSearchOperator in(final FieldSearchPath path, final String value, final String... values) { + return in(notNull("path", path), concat(notNull("value", value), values)); + } + + /** + * Returns a {@link SearchOperator} that searches for documents where the + * value or array of values at a given path contains any of the specified values + * + * @param path The indexed field to be searched. + * @param values The non-empty values to search for. Value can be either a single value or an array of values of only one of the supported BSON types and can't be a mix of different types. + * @param the type of elements in {@code values}. + * @return The requested {@link SearchOperator}. + * @mongodb.atlas.manual atlas-search/in/ in operator + */ + static InSearchOperator in(final FieldSearchPath path, final Iterable values) { + notNull("path", path); + Iterator valueIterator = notNull("values", values).iterator(); + isTrueArgument("values must not be empty", valueIterator.hasNext()); + T firstValue = valueIterator.next(); + boolean hasMore = valueIterator.hasNext(); + return new SearchConstructibleBsonElement("in", new Document() + .append("path", path.toValue()) + .append("value", hasMore ? values : firstValue)); + } + + /** + * Returns a {@link SearchOperator} that searches for documents where a field matches the specified value. + * + * @param path The indexed field to be searched. + * @param value The boolean value to query for. + * @return The requested {@link SearchOperator}. + * @mongodb.atlas.manual atlas-search/equals/ equals operator + */ + static EqualsSearchOperator equals(final FieldSearchPath path, final boolean value) { + return new SearchConstructibleBsonElement("equals", new Document("path", notNull("path", path).toValue()) + .append("value", value)); + } + + /** + * Returns a {@link SearchOperator} that searches for documents where a field matches the specified value. + * + * @param path The indexed field to be searched. + * @param value The object id value to query for. + * @return The requested {@link SearchOperator}. + * @mongodb.atlas.manual atlas-search/equals/ equals operator + */ + static EqualsSearchOperator equals(final FieldSearchPath path, final ObjectId value) { + return new SearchConstructibleBsonElement("equals", new Document("path", notNull("path", path).toValue()) + .append("value", notNull("value", value))); + } + + /** + * Returns a {@link SearchOperator} that searches for documents where a field matches the specified value. + * + * @param path The indexed field to be searched. + * @param value The number value to query for. + * @return The requested {@link SearchOperator}. + * @mongodb.atlas.manual atlas-search/equals/ equals operator + */ + static EqualsSearchOperator equals(final FieldSearchPath path, final Number value) { + return new SearchConstructibleBsonElement("equals", new Document("path", notNull("path", path).toValue()) + .append("value", notNull("value", value))); + } + + /** + * Returns a {@link SearchOperator} that searches for documents where a field matches the specified value. + * + * @param path The indexed field to be searched. + * @param value The instant date value to query for. + * @return The requested {@link SearchOperator}. + * @mongodb.atlas.manual atlas-search/equals/ equals operator + */ + static EqualsSearchOperator equals(final FieldSearchPath path, final Instant value) { + return new SearchConstructibleBsonElement("equals", new Document("path", notNull("path", path).toValue()) + .append("value", notNull("value", value))); + } + + /** + * Returns a {@link SearchOperator} that searches for documents where a field matches the specified value. + * + * @param path The indexed field to be searched. + * @param value The string value to query for. + * @return The requested {@link SearchOperator}. + * @mongodb.atlas.manual atlas-search/equals/ equals operator + */ + static EqualsSearchOperator equals(final FieldSearchPath path, final String value) { + return new SearchConstructibleBsonElement("equals", new Document("path", notNull("path", path).toValue()) + .append("value", notNull("value", value))); + } + + /** + * Returns a {@link SearchOperator} that searches for documents where a field matches the specified value. + * + * @param path The indexed field to be searched. + * @param value The uuid value to query for. + * @return The requested {@link SearchOperator}. + * @mongodb.atlas.manual atlas-search/equals/ equals operator + */ + static EqualsSearchOperator equals(final FieldSearchPath path, final UUID value) { + return new SearchConstructibleBsonElement("equals", new Document("path", notNull("path", path).toValue()) + .append("value", notNull("value", new BsonBinary(value)))); + } + + /** + * Returns a {@link SearchOperator} that searches for documents where a field matches null. + * + * @param path The indexed field to be searched. + * @return The requested {@link SearchOperator}. + * @mongodb.atlas.manual atlas-search/equals/ equals operator + */ + static EqualsSearchOperator equalsNull(final FieldSearchPath path) { + return new SearchConstructibleBsonElement("equals", new Document("path", notNull("path", path).toValue()) + .append("value", BsonNull.VALUE)); + } + + /** + * Returns a {@link SearchOperator} that returns documents similar to input document. + * + * @param like The BSON document that is used to extract representative terms to query for. + * @return The requested {@link SearchOperator}. + * @mongodb.atlas.manual atlas-search/morelikethis/ moreLikeThis operator + */ + static MoreLikeThisSearchOperator moreLikeThis(final BsonDocument like) { + return moreLikeThis(singleton(notNull("like", like))); + } + + /** + * Returns a {@link SearchOperator} that returns documents similar to input documents. + * + * @param likes The BSON documents that are used to extract representative terms to query for. + * @return The requested {@link SearchOperator}. + * @mongodb.atlas.manual atlas-search/morelikethis/ moreLikeThis operator + */ + static MoreLikeThisSearchOperator moreLikeThis(final Iterable likes) { + Iterator likesIterator = notNull("likes", likes).iterator(); + isTrueArgument("likes must not be empty", likesIterator.hasNext()); + BsonDocument firstLike = likesIterator.next(); + return new SearchConstructibleBsonElement("moreLikeThis", new Document("like", likesIterator.hasNext() ? likes : firstLike)); + } + + /** + * Returns a {@link SearchOperator} that supports querying a combination of indexed fields and values. + * + * @param defaultPath The field to be searched by default. + * @param query One or more indexed fields and values to search. + * @return The requested {@link SearchOperator}. + * @mongodb.atlas.manual atlas-search/queryString/ queryString operator + */ + static QueryStringSearchOperator queryString(final FieldSearchPath defaultPath, final String query) { + isTrueArgument("path must not be empty", defaultPath != null); + isTrueArgument("query must not be empty", query != null); + + return new SearchConstructibleBsonElement("queryString", + new Document("defaultPath", defaultPath.toBsonValue()) + .append("query", query)); + } + + /** + * Returns a {@link SearchOperator} that performs a search for documents containing an ordered sequence of terms. + * + * @param path The field to be searched. + * @param query The string to search for. + * @return The requested {@link SearchOperator}. + * @mongodb.atlas.manual atlas-search/phrase/ phrase operator + */ + static PhraseSearchOperator phrase(final SearchPath path, final String query) { + return phrase(singleton(notNull("path", path)), singleton(notNull("query", query))); + } + + /** + * Returns a {@link SearchOperator} that performs a search for documents containing an ordered sequence of terms. + * + * @param paths The non-empty fields to be searched. + * @param queries The non-empty strings to search for. + * @return The requested {@link SearchOperator}. + * @mongodb.atlas.manual atlas-search/phrase/ phrase operator + */ + static PhraseSearchOperator phrase(final Iterable paths, final Iterable queries) { + Iterator pathIterator = notNull("paths", paths).iterator(); + isTrueArgument("paths must not be empty", pathIterator.hasNext()); + Iterator queryIterator = notNull("queries", queries).iterator(); + isTrueArgument("queries must not be empty", queryIterator.hasNext()); + String firstQuery = queryIterator.next(); + return new PhraseConstructibleBsonElement("phrase", new Document("path", combineToBsonValue(pathIterator, false)) + .append("query", queryIterator.hasNext() ? queries : firstQuery)); + } + + /** + * Returns a {@link SearchOperator} that performs a search using a special + * characters in the search string that can match any character. + * + * @param path The indexed field to be searched. + * @param query The string to search for. + * @return The requested {@link SearchOperator}. + * @mongodb.atlas.manual atlas-search/wildcard/ wildcard operator + */ + static WildcardSearchOperator wildcard(final SearchPath path, final String query) { + return wildcard(singleton(notNull("query", query)), singleton(notNull("path", path))); + } + + /** + * Returns a {@link SearchOperator} that performs a search using a special characters in the search string that can match any character. + * + * @param queries The non-empty strings to search for. + * @param paths The non-empty index fields to be searched. + * @return The requested {@link SearchOperator}. + * @mongodb.atlas.manual atlas-search/wildcard/ wildcard operator + */ + static WildcardSearchOperator wildcard(final Iterable queries, final Iterable paths) { + Iterator queryIterator = notNull("queries", queries).iterator(); + isTrueArgument("queries must not be empty", queryIterator.hasNext()); + String firstQuery = queryIterator.next(); + Iterator pathIterator = notNull("paths", paths).iterator(); + isTrueArgument("paths must not be empty", pathIterator.hasNext()); + return new SearchConstructibleBsonElement("wildcard", new Document("query", queryIterator.hasNext() ? queries : firstQuery) + .append("path", combineToBsonValue(pathIterator, false))); + } + + /** + * Returns a {@link SearchOperator} that performs a search using a regular expression. + * + * @param path The field to be searched. + * @param query The string to search for. + * @return The requested {@link SearchOperator}. + * @mongodb.atlas.manual atlas-search/regex/ regex operator + */ + static RegexSearchOperator regex(final SearchPath path, final String query) { + return regex(singleton(notNull("path", path)), singleton(notNull("query", query))); + } + + /** + * Returns a {@link SearchOperator} that performs a search using a regular expression. + * + * @param paths The non-empty fields to be searched. + * @param queries The non-empty strings to search for. + * @return The requested {@link SearchOperator}. + * @mongodb.atlas.manual atlas-search/regex/ regex operator + */ + static RegexSearchOperator regex(final Iterable paths, final Iterable queries) { + Iterator pathIterator = notNull("paths", paths).iterator(); + isTrueArgument("paths must not be empty", pathIterator.hasNext()); + Iterator queryIterator = notNull("queries", queries).iterator(); + isTrueArgument("queries must not be empty", queryIterator.hasNext()); + String firstQuery = queryIterator.next(); + return new SearchConstructibleBsonElement("regex", new Document("path", combineToBsonValue(pathIterator, false)) + .append("query", queryIterator.hasNext() ? queries : firstQuery)); + } + + /** + * Creates a {@link SearchOperator} from a {@link Bson} in situations when there is no builder method that better satisfies your needs. + * This method cannot be used to validate the syntax. + *

+ * Example
+ * The following code creates two functionally equivalent {@link SearchOperator}s, + * though they may not be {@linkplain Object#equals(Object) equal}. + *

{@code
+     *  SearchOperator operator1 = SearchOperator.exists(
+     *          SearchPath.fieldPath("fieldName"));
+     *  SearchOperator operator2 = SearchOperator.of(new Document("exists",
+     *          new Document("path", SearchPath.fieldPath("fieldName").toValue())));
+     * }
+ * + * @param operator A {@link Bson} representing the required {@link SearchOperator}. + * @return The requested {@link SearchOperator}. + */ + static SearchOperator of(final Bson operator) { + return new SearchConstructibleBsonElement(notNull("operator", operator)); + } +} diff --git a/driver-core/src/main/com/mongodb/client/model/search/SearchOptions.java b/driver-core/src/main/com/mongodb/client/model/search/SearchOptions.java new file mode 100644 index 00000000000..f5cd0261e8f --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/search/SearchOptions.java @@ -0,0 +1,97 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.client.model.search; + +import com.mongodb.annotations.Beta; +import com.mongodb.annotations.Reason; +import com.mongodb.annotations.Sealed; +import com.mongodb.client.model.Aggregates; +import org.bson.conversions.Bson; + +/** + * Represents optional fields of the {@code $search} pipeline stage of an aggregation pipeline. + * + * @see Aggregates#search(SearchOperator, SearchOptions) + * @see Aggregates#search(SearchCollector, SearchOptions) + * @mongodb.atlas.manual atlas-search/query-syntax/#-search $search syntax + * @since 4.7 + */ +@Sealed +@Beta(Reason.CLIENT) +public interface SearchOptions extends Bson { + /** + * Creates a new {@link SearchOptions} with the index name specified. + * + * @param name The name of the index to use. + * @return A new {@link SearchOptions}. + */ + SearchOptions index(String name); + + /** + * Creates a new {@link SearchOptions} with the highlighting options specified. + * + * @param option The highlighting option. + * @return A new {@link SearchOptions}. + */ + SearchOptions highlight(SearchHighlight option); + + /** + * Creates a new {@link SearchOptions} with the counting options specified. + * + * @param option The counting option. + * @return A new {@link SearchOptions}. + */ + @Beta({Reason.CLIENT, Reason.SERVER}) + SearchOptions count(SearchCount option); + + /** + * Creates a new {@link SearchOptions} that instruct to return only stored source fields. + * + * @param returnStoredSource The option to return only stored source fields. + * @return A new {@link SearchOptions}. + * @mongodb.atlas.manual atlas-search/return-stored-source/ Return stored source fields + */ + @Beta({Reason.CLIENT, Reason.SERVER}) + SearchOptions returnStoredSource(boolean returnStoredSource); + + /** + * Creates a new {@link SearchOptions} with the specified option in situations when there is no builder method + * that better satisfies your needs. + * This method cannot be used to validate the syntax. + *

+ * Example
+ * The following code creates two functionally equivalent {@link SearchOptions} objects, + * though they may not be {@linkplain Object#equals(Object) equal}. + *

{@code
+     *  SearchOptions options1 = SearchOptions.searchOptions().index("indexName");
+     *  SearchOptions options2 = SearchOptions.searchOptions().option("index", "indexName");
+     * }
+ * + * @param name The option name. + * @param value The option value. + * @return A new {@link SearchOptions}. + */ + SearchOptions option(String name, Object value); + + /** + * Returns {@link SearchOptions} that represents server defaults. + * + * @return {@link SearchOptions} that represents server defaults. + */ + static SearchOptions searchOptions() { + return SearchConstructibleBson.EMPTY_IMMUTABLE; + } +} diff --git a/driver-core/src/main/com/mongodb/client/model/search/SearchPath.java b/driver-core/src/main/com/mongodb/client/model/search/SearchPath.java new file mode 100644 index 00000000000..7213f3f894b --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/search/SearchPath.java @@ -0,0 +1,98 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.client.model.search; + +import com.mongodb.annotations.Beta; +import com.mongodb.annotations.Sealed; +import com.mongodb.annotations.Reason; +import com.mongodb.internal.client.model.Util; +import org.bson.BsonDocument; +import org.bson.BsonString; +import org.bson.BsonValue; +import org.bson.conversions.Bson; + +import static com.mongodb.assertions.Assertions.isTrueArgument; +import static com.mongodb.assertions.Assertions.notNull; +import static com.mongodb.internal.client.model.Util.SEARCH_PATH_VALUE_KEY; + +/** + * A specification of fields to be searched. + *

+ * Depending on the context, one of the following methods may be used to get a representation of a {@link SearchPath} + * with the correct syntax: {@link #toBsonDocument()}, {@link #toBsonValue()}, {@link FieldSearchPath#toValue()}.

+ * + * @mongodb.atlas.manual atlas-search/path-construction/ Path + * @since 4.7 + */ +@Sealed +@Beta(Reason.CLIENT) +public interface SearchPath extends Bson { + /** + * Returns a {@link SearchPath} for the given {@code path}. + * + * @param path The name of the field. Must not contain {@linkplain #wildcardPath(String) wildcard} characters. + * @return The requested {@link SearchPath}. + * @mongodb.driver.manual core/document/#dot-notation Dot notation + */ + static FieldSearchPath fieldPath(final String path) { + notNull("path", path); + isTrueArgument("path must not contain '*'", !path.contains("*")); + return new SearchConstructibleBson(new BsonDocument(SEARCH_PATH_VALUE_KEY, new BsonString(path))); + } + + /** + * Returns a {@link SearchPath} for the given {@code wildcardPath}. + * + * @param wildcardPath The specification of the fields that contains wildcard ({@code '*'}) characters. + * Must not contain {@code '**'}. + * @return The requested {@link SearchPath}. + * @mongodb.driver.manual core/document/#dot-notation Dot notation + */ + static WildcardSearchPath wildcardPath(final String wildcardPath) { + notNull("wildcardPath", wildcardPath); + isTrueArgument("wildcardPath must contain '*'", wildcardPath.contains("*")); + isTrueArgument("wildcardPath must not contain '**'", !wildcardPath.contains("**")); + return new SearchConstructibleBson(new BsonDocument("wildcard", new BsonString(wildcardPath))); + } + + /** + * Converts this object to {@link BsonValue}. + * If {@link #toBsonDocument()} contains only the {@value Util#SEARCH_PATH_VALUE_KEY} key, + * then returns {@link BsonString} representing the value of this key, + * otherwise returns {@link #toBsonDocument()}. + *

+ * This method may be useful when using the {@code of} methods, e.g., {@link SearchHighlight#of(Bson)}. + * Depending on the syntax of the document being constructed, + * it may be required to use the method {@link FieldSearchPath#toValue()} instead.

+ * + * @return A {@link BsonValue} representing this {@link SearchPath}. + * @see FieldSearchPath#toValue() + */ + default BsonValue toBsonValue() { + BsonDocument doc = toBsonDocument(); + if (doc.size() > 1) { + return doc; + } else { + BsonString value = doc.getString(SEARCH_PATH_VALUE_KEY, null); + if (value != null) { + // paths that contain only `SEARCH_PATH_VALUE_KEY` must be represented as a `BsonString` + return value; + } else { + return doc; + } + } + } +} diff --git a/driver-core/src/main/com/mongodb/client/model/search/SearchScore.java b/driver-core/src/main/com/mongodb/client/model/search/SearchScore.java new file mode 100644 index 00000000000..825264cf7f5 --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/search/SearchScore.java @@ -0,0 +1,106 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.client.model.search; + +import com.mongodb.annotations.Beta; +import com.mongodb.annotations.Reason; +import com.mongodb.annotations.Sealed; +import com.mongodb.client.model.Projections; +import org.bson.BsonDocument; +import org.bson.BsonDouble; +import org.bson.Document; +import org.bson.conversions.Bson; + +import static com.mongodb.assertions.Assertions.notNull; + +/** + * A modifier of the relevance score. + * You may use the {@code $meta: "searchScore"} expression, e.g., via {@link Projections#metaSearchScore(String)}, + * to extract the relevance score assigned to each found document. + * + * @mongodb.atlas.manual atlas-search/scoring/ Scoring + * @since 4.7 + */ +@Sealed +@Beta(Reason.CLIENT) +public interface SearchScore extends Bson { + /** + * Returns a {@link SearchScore} that instructs to multiply the score by the specified {@code value}. + * + * @param value The value to multiply the score by. + * @return The requested {@link SearchScore}. + * @mongodb.atlas.manual atlas-search/scoring/#boost boost score modifier + */ + static ValueBoostSearchScore boost(final float value) { + return new SearchConstructibleBsonElement("boost", new BsonDocument("value", new BsonDouble(value))); + } + + /** + * Returns a {@link SearchScore} that instructs to multiply the score by the value of the specified field. + * + * @param path The numeric field whose value to multiply the score by. + * @return The requested {@link SearchScore}. + * @mongodb.atlas.manual atlas-search/scoring/#boost boost score modifier + * @see SearchScoreExpression#pathExpression(FieldSearchPath) + */ + static PathBoostSearchScore boost(final FieldSearchPath path) { + return new SearchConstructibleBsonElement("boost", new Document("path", notNull("value", path).toValue())); + } + + /** + * Returns a {@link SearchScore} that instructs to replace the score with the specified {@code value}. + * + * @param value The value to replace the score with. + * @return The requested {@link SearchScore}. + * @mongodb.atlas.manual atlas-search/scoring/#constant constant score modifier + * @see SearchScoreExpression#constantExpression(float) + */ + static ConstantSearchScore constant(final float value) { + return new SearchConstructibleBsonElement("constant", new BsonDocument("value", new BsonDouble(value))); + } + + /** + * Returns a {@link SearchScore} that instructs to compute the score using the specified {@code expression}. + * + * @param expression The expression to use when calculating the score. + * @return The requested {@link SearchScore}. + * @mongodb.atlas.manual atlas-search/scoring/#function function score modifier + */ + static FunctionSearchScore function(final SearchScoreExpression expression) { + return new SearchConstructibleBsonElement("function", notNull("expression", expression)); + } + + /** + * Creates a {@link SearchScore} from a {@link Bson} in situations when there is no builder method that better satisfies your needs. + * This method cannot be used to validate the syntax. + *

+ * Example
+ * The following code creates two functionally equivalent {@link SearchScore}s, + * though they may not be {@linkplain Object#equals(Object) equal}. + *

{@code
+     *  SearchScore score1 = SearchScore.boost(
+     *      SearchPath.fieldPath("fieldName"));
+     *  SearchScore score2 = SearchScore.of(new Document("boost",
+     *      new Document("path", SearchPath.fieldPath("fieldName").toValue())));
+     * }
+ * + * @param score A {@link Bson} representing the required {@link SearchScore}. + * @return The requested {@link SearchScore}. + */ + static SearchScore of(final Bson score) { + return new SearchConstructibleBsonElement(notNull("score", score)); + } +} diff --git a/driver-core/src/main/com/mongodb/client/model/search/SearchScoreExpression.java b/driver-core/src/main/com/mongodb/client/model/search/SearchScoreExpression.java new file mode 100644 index 00000000000..268786c3344 --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/search/SearchScoreExpression.java @@ -0,0 +1,183 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.client.model.search; + +import com.mongodb.annotations.Beta; +import com.mongodb.annotations.Reason; +import com.mongodb.annotations.Sealed; +import org.bson.BsonDocument; +import org.bson.BsonDouble; +import org.bson.BsonString; +import org.bson.Document; +import org.bson.codecs.configuration.CodecRegistry; +import org.bson.conversions.Bson; + +import static com.mongodb.assertions.Assertions.assertTrue; +import static com.mongodb.assertions.Assertions.isTrueArgument; +import static com.mongodb.internal.client.model.Util.SEARCH_PATH_VALUE_KEY; +import static com.mongodb.internal.client.model.Util.sizeAtLeast; +import static com.mongodb.assertions.Assertions.notNull; + +/** + * @see SearchScore#function(SearchScoreExpression) + * @mongodb.atlas.manual atlas-search/scoring/#expressions Expressions for the function score modifier + * @since 4.7 + */ +@Sealed +@Beta(Reason.CLIENT) +public interface SearchScoreExpression extends Bson { + /** + * Returns a {@link SearchScoreExpression} that evaluates into the relevance score of a document. + * + * @return The requested {@link SearchScoreExpression}. + */ + static RelevanceSearchScoreExpression relevanceExpression() { + return new SearchConstructibleBson(new BsonDocument("score", new BsonString("relevance"))); + } + + /** + * Returns a {@link SearchScoreExpression} that evaluates into the value of the specified field. + * + * @param path The numeric field whose value to use as the result of the expression. + * @return The requested {@link SearchScoreExpression}. + * @see SearchScore#boost(FieldSearchPath) + */ + static PathSearchScoreExpression pathExpression(final FieldSearchPath path) { + return new SearchConstructibleBsonElement("path", new Document(SEARCH_PATH_VALUE_KEY, notNull("path", path).toValue())); + } + + /** + * Returns a {@link SearchScoreExpression} that evaluates into the specified {@code value}. + * + * @param value The value to use as the result of the expression. Unlike {@link SearchScore#constant(float)}, does not have constraints. + * @return The requested {@link SearchScoreExpression}. + * @see SearchScore#constant(float) + */ + static ConstantSearchScoreExpression constantExpression(final float value) { + return new SearchConstructibleBson(new BsonDocument("constant", new BsonDouble(value))); + } + + /** + * Returns a {@link SearchScoreExpression} that represents a Gaussian function whose output is within the interval [0, 1]. + * Roughly speaking, the further the value of the {@code path} expression is from the {@code origin}, + * the smaller the output of the function. + *

+ * The {@code scale} and {@link GaussSearchScoreExpression#decay(double) decay} are parameters of the Gaussian function, + * they define the rate at which the function decays. + * The input of the Gaussian function is the output of another function: + * max(0, abs({@code pathValue} - {@code origin}) - {@link GaussSearchScoreExpression#offset(double) offset}), + * where {@code pathValue} is the value of the {@code path} expression.

+ * + * @param origin The point of origin, see {@link GaussSearchScoreExpression#offset(double)}. + * The value of the Gaussian function is 1 if the value of the {@code path} expression is {@code origin}. + * @param path The expression whose value is used to calculate the input of the Gaussian function. + * @param scale The distance from the points {@code origin} ± {@link GaussSearchScoreExpression#offset(double) offset} + * at which the output of the Gaussian function must decay by the factor of {@link GaussSearchScoreExpression#decay(double) decay}. + * @return The requested {@link SearchScoreExpression}. + */ + static GaussSearchScoreExpression gaussExpression(final double origin, final PathSearchScoreExpression path, final double scale) { + notNull("path", path); + Bson value = new Bson() { + @Override + public BsonDocument toBsonDocument(final Class documentClass, final CodecRegistry codecRegistry) { + BsonDocument pathDoc = path.toBsonDocument(documentClass, codecRegistry); + assertTrue(pathDoc.size() == 1); + return new BsonDocument("origin", new BsonDouble(origin)) + .append("path", pathDoc.values().iterator().next()) + .append("scale", new BsonDouble(scale)); + } + + @Override + public String toString() { + return new Document("origin", origin) + .append("path", path) + .append("scale", scale) + .toString(); + } + }; + return new SearchConstructibleBsonElement("gauss", value); + } + + /** + * Returns a {@link SearchScoreExpression} that evaluates into log10({@code expressionValue}), + * where {@code expressionValue} is the value of the {@code expression}. + * + * @param expression The expression whose value is the input of the log10 function. + * @return The requested {@link SearchScoreExpression}. + */ + static LogSearchScoreExpression logExpression(final SearchScoreExpression expression) { + return new SearchConstructibleBson(new Document("log", notNull("expression", expression))); + } + + /** + * Returns a {@link SearchScoreExpression} that evaluates into log10({@code expressionValue} + 1), + * where {@code expressionValue} is the value of the {@code expression}. + * + * @param expression The expression whose value is used to calculate the input of the log10 function. + * @return The requested {@link SearchScoreExpression}. + */ + static Log1pSearchScoreExpression log1pExpression(final SearchScoreExpression expression) { + return new SearchConstructibleBson(new Document("log1p", notNull("expression", expression))); + } + + /** + * Returns a {@link SearchScoreExpression} that evaluates into the sum of the values of the specified {@code expressions}. + * + * @param expressions The expressions whose values to add. Must contain at least two expressions. + * @return The requested {@link SearchScoreExpression}. + */ + static AddSearchScoreExpression addExpression(final Iterable expressions) { + notNull("expressions", expressions); + isTrueArgument("expressions must contain at least 2 elements", sizeAtLeast(expressions, 2)); + return new SearchConstructibleBson(new Document("add", expressions)); + } + + /** + * Returns a {@link SearchScoreExpression} that evaluates into the product of the values of the specified {@code expressions}. + * + * @param expressions The expressions whose values to multiply. Must contain at least two expressions. + * @return The requested {@link SearchScoreExpression}. + */ + static MultiplySearchScoreExpression multiplyExpression(final Iterable expressions) { + notNull("expressions", expressions); + isTrueArgument("expressions must contain at least 2 elements", sizeAtLeast(expressions, 2)); + return new SearchConstructibleBson(new Document("multiply", expressions)); + } + + /** + * Creates a {@link SearchScoreExpression} from a {@link Bson} in situations when there is no builder method + * that better satisfies your needs. + * This method cannot be used to validate the syntax. + *

+ * Example
+ * The following code creates two functionally equivalent {@link SearchScoreExpression}s, + * though they may not be {@linkplain Object#equals(Object) equal}. + *

{@code
+     *  SearchScoreExpression expression1 = SearchScoreExpression.pathExpression(
+     *          SearchPath.fieldPath("fieldName"))
+     *          .undefined(-1.5f);
+     *  SearchScoreExpression expression2 = new Document("path",
+     *          new Document("value", SearchPath.fieldPath("fieldName").toValue())
+     *                  .append("undefined", -1.5));
+     * }
+ * + * @param expression A {@link Bson} representing the required {@link SearchScoreExpression}. + * @return The requested {@link SearchScoreExpression}. + */ + static SearchScoreExpression of(final Bson expression) { + return new SearchConstructibleBson(notNull("expression", expression)); + } +} diff --git a/driver-core/src/main/com/mongodb/client/model/search/ShouldCompoundSearchOperator.java b/driver-core/src/main/com/mongodb/client/model/search/ShouldCompoundSearchOperator.java new file mode 100644 index 00000000000..a6bda94e206 --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/search/ShouldCompoundSearchOperator.java @@ -0,0 +1,43 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.client.model.search; + +import com.mongodb.annotations.Beta; +import com.mongodb.annotations.Reason; +import com.mongodb.annotations.Sealed; + +/** + * A representation of a {@link CompoundSearchOperator} that allows changing + * {@link CompoundSearchOperator#should(Iterable) should}-specific options, if any. + * This interface is a technicality and does not represent a meaningful element of the full-text search query syntax. + * + * @see CompoundSearchOperatorBase#should(Iterable) + * @since 4.7 + */ +@Sealed +@Beta(Reason.CLIENT) +public interface ShouldCompoundSearchOperator extends CompoundSearchOperator { + @Override + ShouldCompoundSearchOperator score(SearchScore modifier); + /** + * Creates a new {@link ShouldCompoundSearchOperator} that requires at least the requested number of clauses of those specified via + * {@link CompoundSearchOperatorBase#should(Iterable)} to be satisfied. + * + * @param minimumShouldMatch The minimum number of clauses that must be satisfied. + * @return A new {@link ShouldCompoundSearchOperator}. + */ + ShouldCompoundSearchOperator minimumShouldMatch(int minimumShouldMatch); +} diff --git a/driver-core/src/main/com/mongodb/client/model/search/StringSearchFacet.java b/driver-core/src/main/com/mongodb/client/model/search/StringSearchFacet.java new file mode 100644 index 00000000000..209eaf9ff47 --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/search/StringSearchFacet.java @@ -0,0 +1,36 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.client.model.search; + +import com.mongodb.annotations.Beta; +import com.mongodb.annotations.Reason; +import com.mongodb.annotations.Sealed; + +/** + * @see SearchFacet#stringFacet(String, FieldSearchPath) + * @since 4.7 + */ +@Sealed +@Beta({Reason.CLIENT, Reason.SERVER}) +public interface StringSearchFacet extends SearchFacet { + /** + * Creates a new {@link StringSearchFacet} that explicitly limits the number of facet categories. + * + * @param max The maximum number of facet categories to return in the results. + * @return A new {@link StringSearchFacet}. + */ + StringSearchFacet numBuckets(int max); +} diff --git a/driver-core/src/main/com/mongodb/client/model/search/TextSearchOperator.java b/driver-core/src/main/com/mongodb/client/model/search/TextSearchOperator.java new file mode 100644 index 00000000000..241639f3a47 --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/search/TextSearchOperator.java @@ -0,0 +1,61 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.client.model.search; + +import com.mongodb.annotations.Beta; +import com.mongodb.annotations.Reason; +import com.mongodb.annotations.Sealed; + +/** + * @see SearchOperator#text(SearchPath, String) + * @see SearchOperator#text(Iterable, Iterable) + * @since 4.7 + */ +@Sealed +@Beta(Reason.CLIENT) +public interface TextSearchOperator extends SearchOperator { + @Override + TextSearchOperator score(SearchScore modifier); + + /** + * Creates a new {@link TextSearchOperator} that uses fuzzy search + * and does not use {@linkplain #synonyms(String) synonyms}. + * + * @return A new {@link TextSearchOperator}. + */ + TextSearchOperator fuzzy(); + + /** + * Creates a new {@link TextSearchOperator} that uses fuzzy search + * and does not use {@linkplain #synonyms(String) synonyms}. + * + * @param options The fuzzy search options. + * Specifying {@link FuzzySearchOptions#fuzzySearchOptions()} is equivalent to calling {@link #fuzzy()}. + * @return A new {@link TextSearchOperator}. + */ + TextSearchOperator fuzzy(FuzzySearchOptions options); + + /** + * Creates a new {@link TextSearchOperator} that uses synonyms + * and does not use {@linkplain #fuzzy(FuzzySearchOptions) fuzzy search}. + * + * @param name The name of the synonym mapping. + * @return A new {@link TextSearchOperator}. + * + * @mongodb.atlas.manual atlas-search/synonyms/ Synonym mappings + */ + TextSearchOperator synonyms(String name); +} diff --git a/driver-core/src/main/com/mongodb/client/model/search/TotalSearchCount.java b/driver-core/src/main/com/mongodb/client/model/search/TotalSearchCount.java new file mode 100644 index 00000000000..2bcbde468f3 --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/search/TotalSearchCount.java @@ -0,0 +1,29 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.client.model.search; + +import com.mongodb.annotations.Beta; +import com.mongodb.annotations.Reason; +import com.mongodb.annotations.Sealed; + +/** + * @see SearchCount#total() + * @since 4.7 + */ +@Sealed +@Beta({Reason.CLIENT, Reason.SERVER}) +public interface TotalSearchCount extends SearchCount { +} diff --git a/driver-core/src/main/com/mongodb/client/model/search/ValueBoostSearchScore.java b/driver-core/src/main/com/mongodb/client/model/search/ValueBoostSearchScore.java new file mode 100644 index 00000000000..d760bd60d52 --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/search/ValueBoostSearchScore.java @@ -0,0 +1,29 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.client.model.search; + +import com.mongodb.annotations.Beta; +import com.mongodb.annotations.Reason; +import com.mongodb.annotations.Sealed; + +/** + * @see SearchScore#boost(float) + * @since 4.7 + */ +@Sealed +@Beta(Reason.CLIENT) +public interface ValueBoostSearchScore extends SearchScore { +} diff --git a/driver-core/src/main/com/mongodb/client/model/search/VectorSearchConstructibleBson.java b/driver-core/src/main/com/mongodb/client/model/search/VectorSearchConstructibleBson.java new file mode 100644 index 00000000000..3e281890822 --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/search/VectorSearchConstructibleBson.java @@ -0,0 +1,55 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.client.model.search; + +import com.mongodb.annotations.Immutable; +import com.mongodb.internal.client.model.AbstractConstructibleBson; +import org.bson.BsonDocument; +import org.bson.Document; +import org.bson.conversions.Bson; + +import static com.mongodb.assertions.Assertions.notNull; + +final class VectorSearchConstructibleBson extends AbstractConstructibleBson + implements ApproximateVectorSearchOptions, ExactVectorSearchOptions { + /** + * An {@linkplain Immutable immutable} {@link BsonDocument#isEmpty() empty} instance. + */ + static final VectorSearchConstructibleBson EMPTY_IMMUTABLE = new VectorSearchConstructibleBson(AbstractConstructibleBson.EMPTY_IMMUTABLE); + + VectorSearchConstructibleBson(final Bson base) { + super(base); + } + + private VectorSearchConstructibleBson(final Bson base, final Document appended) { + super(base, appended); + } + + @Override + protected VectorSearchConstructibleBson newSelf(final Bson base, final Document appended) { + return new VectorSearchConstructibleBson(base, appended); + } + + @Override + public VectorSearchOptions filter(final Bson filter) { + return newAppended("filter", notNull("name", filter)); + } + + @Override + public VectorSearchOptions option(final String name, final Object value) { + return newAppended(notNull("name", name), notNull("value", value)); + } +} diff --git a/driver-core/src/main/com/mongodb/client/model/search/VectorSearchOptions.java b/driver-core/src/main/com/mongodb/client/model/search/VectorSearchOptions.java new file mode 100644 index 00000000000..073c05b2371 --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/search/VectorSearchOptions.java @@ -0,0 +1,86 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.client.model.search; + +import com.mongodb.annotations.Sealed; +import com.mongodb.client.model.Aggregates; +import com.mongodb.client.model.Filters; +import org.bson.conversions.Bson; + +/** + * Represents optional fields of the {@code $vectorSearch} pipeline stage of an aggregation pipeline. + * + * @see Aggregates#vectorSearch(FieldSearchPath, Iterable, String, long, VectorSearchOptions) + * @mongodb.atlas.manual atlas-vector-search/vector-search-stage/ $vectorSearch + * @mongodb.server.release 6.0.11 + * @since 4.11 + */ +@Sealed +public interface VectorSearchOptions extends Bson { + /** + * Creates a new {@link VectorSearchOptions} with the filter specified. + * + * @param filter A filter that is applied before applying the + * {@link Aggregates#vectorSearch(FieldSearchPath, Iterable, String, long, VectorSearchOptions) queryVector} + * One may use {@link Filters} to create this filter, though not all filters may be supported. + * See the MongoDB documentation for the list of supported filters. + * @return A new {@link VectorSearchOptions}. + */ + VectorSearchOptions filter(Bson filter); + + /** + * Creates a new {@link VectorSearchOptions} with the specified option in situations when there is no builder method + * that better satisfies your needs. + * This method cannot be used to validate the syntax. + *

+ * Example
+ * The following code creates two functionally equivalent {@link VectorSearchOptions} objects, + * though they may not be {@linkplain Object#equals(Object) equal}. + *

{@code
+     *  VectorSearchOptions options1 = VectorSearchOptions.vectorSearchOptions()
+     *      .filter(Filters.lt("fieldName", 1));
+     *  VectorSearchOptions options2 = VectorSearchOptions.vectorSearchOptions()
+     *      .option("filter", Filters.lt("fieldName", 1));
+     * }
+ * + * @param name The option name. + * @param value The option value. + * @return A new {@link VectorSearchOptions}. + */ + VectorSearchOptions option(String name, Object value); + + /** + * Returns {@link ApproximateVectorSearchOptions} that represents server defaults. + * + * @param numCandidates The number of candidates. + * @return {@link ApproximateVectorSearchOptions} that represents server defaults. + * @since 5.2 + */ + static ApproximateVectorSearchOptions approximateVectorSearchOptions(long numCandidates) { + return (ApproximateVectorSearchOptions) VectorSearchConstructibleBson.EMPTY_IMMUTABLE.option("numCandidates", numCandidates); + } + + /** + * Returns {@link ExactVectorSearchOptions} that represents server defaults with the {@code exact} option set to true. + * + * @return {@link ExactVectorSearchOptions} that represents server defaults. + * @since 5.2 + */ + static ExactVectorSearchOptions exactVectorSearchOptions() { + return (ExactVectorSearchOptions) VectorSearchConstructibleBson.EMPTY_IMMUTABLE + .option("exact", true); + } +} diff --git a/driver-core/src/main/com/mongodb/client/model/search/WildcardSearchOperator.java b/driver-core/src/main/com/mongodb/client/model/search/WildcardSearchOperator.java new file mode 100644 index 00000000000..95d4a5caad5 --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/search/WildcardSearchOperator.java @@ -0,0 +1,32 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.client.model.search; + +import com.mongodb.annotations.Beta; +import com.mongodb.annotations.Reason; +import com.mongodb.annotations.Sealed; + +/** + * @see SearchOperator#wildcard(SearchPath, String) + * @see SearchOperator#wildcard(Iterable, Iterable) + * @since 4.7 + */ +@Sealed +@Beta(Reason.CLIENT) +public interface WildcardSearchOperator extends SearchOperator { + @Override + WildcardSearchOperator score(SearchScore modifier); +} diff --git a/driver-core/src/main/com/mongodb/client/model/search/WildcardSearchPath.java b/driver-core/src/main/com/mongodb/client/model/search/WildcardSearchPath.java new file mode 100644 index 00000000000..2fceaaaad7a --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/search/WildcardSearchPath.java @@ -0,0 +1,29 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.client.model.search; + +import com.mongodb.annotations.Beta; +import com.mongodb.annotations.Reason; +import com.mongodb.annotations.Sealed; + +/** + * @see SearchPath#wildcardPath(String) + * @since 4.7 + */ +@Sealed +@Beta(Reason.CLIENT) +public interface WildcardSearchPath extends SearchPath { +} diff --git a/driver-core/src/main/com/mongodb/client/model/search/package-info.java b/driver-core/src/main/com/mongodb/client/model/search/package-info.java new file mode 100644 index 00000000000..e04257df29c --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/search/package-info.java @@ -0,0 +1,40 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * Query building API for MongoDB Atlas full-text search. + *

+ * While all the building blocks of this API, such as + * {@link com.mongodb.client.model.search.SearchOptions}, {@link com.mongodb.client.model.search.SearchHighlight}, etc., + * are not necessary {@link com.mongodb.annotations.Immutable immutable}, they are unmodifiable due to methods like + * {@link com.mongodb.client.model.search.SearchHighlight#maxCharsToExamine(int)} returning new instances instead of modifying the instance + * on which they are called. This allows storing and using such instances as templates.

+ * + * @see com.mongodb.client.model.Aggregates#search(SearchOperator, SearchOptions) + * @see com.mongodb.client.model.Aggregates#search(SearchCollector, SearchOptions) + * @see com.mongodb.client.model.Aggregates#vectorSearch(com.mongodb.client.model.search.FieldSearchPath, java.lang.Iterable, java.lang.String, + * long, com.mongodb.client.model.search.VectorSearchOptions) + * @mongodb.atlas.manual atlas-search/ Atlas Search + * @mongodb.atlas.manual atlas-search/query-syntax/ Atlas Search aggregation pipeline stages + * @since 4.7 + */ +@NonNullApi +@Beta(Reason.CLIENT) +package com.mongodb.client.model.search; + +import com.mongodb.annotations.Beta; +import com.mongodb.annotations.Reason; +import com.mongodb.lang.NonNullApi; diff --git a/driver-core/src/main/com/mongodb/client/model/vault/DataKeyOptions.java b/driver-core/src/main/com/mongodb/client/model/vault/DataKeyOptions.java new file mode 100644 index 00000000000..14a52a39904 --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/vault/DataKeyOptions.java @@ -0,0 +1,168 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model.vault; + +import com.mongodb.lang.Nullable; +import org.bson.BsonDocument; + +import java.util.Arrays; +import java.util.List; + +/** + * The options for creating a data key. + * + * @since 3.11 + */ +public class DataKeyOptions { + private List keyAltNames; + private BsonDocument masterKey; + private byte[] keyMaterial; + + /** + * Set the alternate key names. + * + * @param keyAltNames a list of alternate key names + * @return this + * @see #getKeyAltNames() + */ + public DataKeyOptions keyAltNames(final List keyAltNames) { + this.keyAltNames = keyAltNames; + return this; + } + + /** + * Sets the master key document. + * + * @param masterKey the master key document + * @return this + * @see #getMasterKey() + */ + public DataKeyOptions masterKey(final BsonDocument masterKey) { + this.masterKey = masterKey; + return this; + } + + /** + * Sets the key material + * + *

An optional BinData of 96 bytes to use as custom key material for the data key being created. + * If set the custom key material is used for encrypting and decrypting data. Otherwise, the key material for the new data key is + * generated from a cryptographically secure random device.

+ * + * @param keyMaterial the optional custom key material for the data key + * @return this + * @since 4.7 + * @see #getKeyMaterial() + */ + public DataKeyOptions keyMaterial(final byte[] keyMaterial) { + this.keyMaterial = keyMaterial; + return this; + } + + /** + * Gets the alternate key names. + * + *

+ * An optional list of alternate names used to reference a key. If a key is created with alternate names, then encryption may refer + * to the key by the unique alternate name instead of by _id. + *

+ * + * @return the list of alternate key names + */ + @Nullable + public List getKeyAltNames() { + return keyAltNames; + } + + /** + * Gets the master key document + * + *

+ * The masterKey identifies a KMS-specific key used to encrypt the new data key. + *

+ *

+ * If the kmsProvider type is "aws" the master key is required and must contain the following fields: + *

+ *
    + *
  • region: a String containing the AWS region in which to locate the master key
  • + *
  • key: a String containing the Amazon Resource Name (ARN) to the AWS customer master key
  • + *
+ *

+ * If the kmsProvider type is "azure" the master key is required and must contain the following fields: + *

+ *
    + *
  • keyVaultEndpoint: a String with the host name and an optional port. Example: "example.vault.azure.net".
  • + *
  • keyName: a String
  • + *
  • keyVersion: an optional String, the specific version of the named key, defaults to using the key's primary version.
  • + *
+ *

+ * If the kmsProvidertype type is "gcp" the master key is required and must contain the following fields: + *

+ *
    + *
  • projectId: a String
  • + *
  • location: String
  • + *
  • keyRing: String
  • + *
  • keyName: String
  • + *
  • keyVersion: an optional String, the specific version of the named key, defaults to using the key's primary version.
  • + *
  • endpoint: an optional String, with the host with optional port. Defaults to "cloudkms.googleapis.com".
  • + *
+ *

+ * If the kmsProvider type is "kmip" the master key is required and must contain the following fields: + *

+ *
    + *
  • keyId: optional String, keyId is the KMIP Unique Identifier to a 96 byte KMIP Secret Data managed object. If keyId is + * omitted, the driver creates a random 96 byte KMIP Secret Data managed object.
  • + *
  • endpoint: a String, the endpoint as a host with required port. e.g. "example.com:443". If endpoint is not provided, it + * defaults to the required endpoint from the KMS providers map.
  • + *
  • delegated: If true (recommended), the KMIP server will perform + * encryption and decryption. If delegated is not provided, defaults + * to false.
  • + *
+ *

+ * If the kmsProvider type is "local" the masterKey is not applicable. + *

+ * @return the master key document + */ + @Nullable + public BsonDocument getMasterKey() { + return masterKey; + } + + /** + * Gets the custom key material if set. + * + *

The optional BinData of 96 bytes to use as custom key material for the data key being created. + * If set the custom key material is used for encrypting and decrypting data. Otherwise, the key material for the new data key is + * generated from a cryptographically secure random device.

+ + * @return the custom key material for the data key or null + * @since 4.7 + */ + @Nullable + public byte[] getKeyMaterial() { + return keyMaterial; + } + + @Override + public String toString() { + return "DataKeyOptions{" + + "keyAltNames=" + keyAltNames + + ", masterKey=" + masterKey + + ", keyMaterial=" + Arrays.toString(keyMaterial) + + '}'; + } +} diff --git a/driver-core/src/main/com/mongodb/client/model/vault/EncryptOptions.java b/driver-core/src/main/com/mongodb/client/model/vault/EncryptOptions.java new file mode 100644 index 00000000000..f509f8b3ea3 --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/vault/EncryptOptions.java @@ -0,0 +1,246 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model.vault; + +import com.mongodb.annotations.Alpha; +import com.mongodb.annotations.Reason; +import com.mongodb.lang.Nullable; +import org.bson.BsonBinary; + +/** + * The options for explicit encryption. + * + * @since 3.11 + */ +public class EncryptOptions { + private BsonBinary keyId; + private String keyAltName; + private final String algorithm; + private Long contentionFactor; + private String queryType; + private RangeOptions rangeOptions; + private TextOptions textOptions; + + /** + * Construct an instance with the given algorithm. + * + * @param algorithm the encryption algorithm + * @see #getAlgorithm() + */ + public EncryptOptions(final String algorithm) { + this.algorithm = algorithm; + } + + /** + * Gets the encryption algorithm, which must be either: + * + *
    + *
  • AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic
  • + *
  • AEAD_AES_256_CBC_HMAC_SHA_512-Random
  • + *
  • Indexed
  • + *
  • Unindexed
  • + *
  • Range
  • + *
  • TextPreview
  • + *
+ * + *

The "TextPreview" algorithm is in preview and should be used for experimental workloads only. + * These features are unstable and their security is not guaranteed until released as Generally Available (GA). + * The GA version of these features may not be backwards compatible with the preview version.

+ * + * @return the encryption algorithm + */ + public String getAlgorithm() { + return algorithm; + } + + /** + * Gets the key identifier. + * + *

+ * Identifies the data key by its _id value. The value is a UUID (binary subtype 4). + *

+ * + * @return the key identifier + */ + @Nullable + public BsonBinary getKeyId() { + return keyId; + } + + /** + * Gets the alternate name with which to look up the key. + * + *

+ * Identifies the alternate key name to look up the key by. + *

+ * + * @return the alternate name + */ + @Nullable + public String getKeyAltName() { + return keyAltName; + } + + /** + * Sets the key identifier + * + * @param keyId the key identifier + * @return this + * @see #getKeyId() + */ + public EncryptOptions keyId(final BsonBinary keyId) { + this.keyId = keyId; + return this; + } + + /** + * Sets the alternate key name + * + * @param keyAltName the alternate key name + * @return this + * @see #getKeyAltName() + */ + public EncryptOptions keyAltName(final String keyAltName) { + this.keyAltName = keyAltName; + return this; + } + + /** + * The contention factor. + * + *

It is an error to set contentionFactor when algorithm is not "Indexed" or "Range". + * @param contentionFactor the contention factor, which must be {@code >= 0} or null. + * @return this + * @since 4.7 + * @mongodb.server.release 7.0 + */ + public EncryptOptions contentionFactor(@Nullable final Long contentionFactor) { + this.contentionFactor = contentionFactor; + return this; + } + + /** + * Gets the contention factor. + * + * @see #contentionFactor(Long) + * @return the contention factor + * @since 4.7 + * @mongodb.server.release 7.0 + */ + @Nullable + public Long getContentionFactor() { + return contentionFactor; + } + + /** + * The QueryType. + * + *

Currently, we support only "equality", "range", "prefixPreview", "suffixPreview" or "substringPreview" queryType.

+ *

It is an error to set queryType when the algorithm is not "Indexed", "Range" or "TextPreview".

+ * @param queryType the query type + * @return this + * @since 4.7 + * @mongodb.server.release 7.0 + */ + public EncryptOptions queryType(@Nullable final String queryType) { + this.queryType = queryType; + return this; + } + + /** + * Gets the QueryType. + * + *

Currently, we support only "equality" or "range" queryType.

+ * @see #queryType(String) + * @return the queryType or null + * @since 4.7 + * @mongodb.server.release 7.0 + */ + @Nullable + public String getQueryType() { + return queryType; + } + + /** + * The RangeOptions + * + *

It is an error to set RangeOptions when the algorithm is not "Range". + * @param rangeOptions the range options + * @return this + * @since 4.9 + * @mongodb.server.release 8.0 + * @mongodb.driver.manual /core/queryable-encryption/ queryable encryption + */ + public EncryptOptions rangeOptions(@Nullable final RangeOptions rangeOptions) { + this.rangeOptions = rangeOptions; + return this; + } + + /** + * Gets the RangeOptions + * @return the range options or null if not set + * @since 4.9 + * @mongodb.server.release 8.0 + * @mongodb.driver.manual /core/queryable-encryption/ queryable encryption + */ + @Nullable + public RangeOptions getRangeOptions() { + return rangeOptions; + } + + /** + * The TextOptions + * + *

It is an error to set TextOptions when the algorithm is not "TextPreview". + * @param textOptions the text options + * @return this + * @since 5.6 + * @mongodb.server.release 8.2 + * @mongodb.driver.manual /core/queryable-encryption/ queryable encryption + */ + @Alpha(Reason.SERVER) + public EncryptOptions textOptions(@Nullable final TextOptions textOptions) { + this.textOptions = textOptions; + return this; + } + + /** + * Gets the TextOptions + * @see #textOptions(TextOptions) + * @return the text options or null if not set + * @since 5.6 + * @mongodb.server.release 8.2 + * @mongodb.driver.manual /core/queryable-encryption/ queryable encryption + */ + @Alpha(Reason.SERVER) + @Nullable + public TextOptions getTextOptions() { + return textOptions; + } + + @Override + public String toString() { + return "EncryptOptions{" + + "keyId=" + keyId + + ", keyAltName='" + keyAltName + '\'' + + ", algorithm='" + algorithm + '\'' + + ", contentionFactor=" + contentionFactor + + ", queryType='" + queryType + '\'' + + ", rangeOptions=" + rangeOptions + + '}'; + } +} diff --git a/driver-core/src/main/com/mongodb/client/model/vault/RangeOptions.java b/driver-core/src/main/com/mongodb/client/model/vault/RangeOptions.java new file mode 100644 index 00000000000..495f06a0650 --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/vault/RangeOptions.java @@ -0,0 +1,156 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model.vault; + +import com.mongodb.lang.Nullable; +import org.bson.BsonValue; + +/** + * Range options specifies index options for a Queryable Encryption field supporting "range" queries. + * + *

{@code min}, {@code max}, {@code sparsity}, and {@code precision} must match the values set in the {@code encryptedFields} + * of the destination collection. + * + *

For {@code double} and {@code decimal128}, {@code min}/{@code max}/{@code precision} must all be set, or all be unset. + * + * @since 4.9 + * @mongodb.server.release 6.2 + * @mongodb.driver.manual /core/queryable-encryption/ queryable encryption + */ +public class RangeOptions { + + private BsonValue min; + private BsonValue max; + private Integer trimFactor; + private Long sparsity; + private Integer precision; + + /** + * Construct a new instance + */ + public RangeOptions() { + } + + /** + * Set the minimum value set in the encryptedFields of the destination collection. + * @param min the minimum value + * @return this + */ + public RangeOptions min(@Nullable final BsonValue min) { + this.min = min; + return this; + } + + /** + * @return the minimum value if set + */ + @Nullable + public BsonValue getMin() { + return min; + } + + /** + * Set the maximum value set in the encryptedFields of the destination collection. + * @param max the maximum value + * @return this + */ + public RangeOptions max(@Nullable final BsonValue max) { + this.max = max; + return this; + } + + /** + * @return the trim factor value if set + * @since 5.2 + */ + @Nullable + public Integer getTrimFactor() { + return trimFactor; + } + + /** + * Set the number of top-level edges stored per record. + *

+ * The trim factor may be used to tune performance. + * + * @param trimFactor the trim factor + * @return this + * @since 5.2 + */ + public RangeOptions trimFactor(@Nullable final Integer trimFactor) { + this.trimFactor = trimFactor; + return this; + } + + /** + * @return the maximum value if set + */ + @Nullable + public BsonValue getMax() { + return max; + } + + /** + * Set the Queryable Encryption range hypergraph sparsity factor. + *

+ * Sparsity may be used to tune performance. + * + * @param sparsity the sparsity + * @return this + */ + public RangeOptions sparsity(@Nullable final Long sparsity) { + this.sparsity = sparsity; + return this; + } + + /** + * @return the sparsity value if set + */ + @Nullable + public Long getSparsity() { + return sparsity; + } + + /** + * Set the precision of double or decimal128 values in the encryptedFields of the destination collection. + * @param precision the precision + * @return this + */ + public RangeOptions precision(@Nullable final Integer precision) { + this.precision = precision; + return this; + } + + /** + * @return the precision value if set + */ + @Nullable + public Integer getPrecision() { + return precision; + } + + @Override + public String toString() { + return "RangeOptions{" + + "min=" + min + + ", max=" + max + + ", trimFactor=" + trimFactor + + ", sparsity=" + sparsity + + ", precision=" + precision + + '}'; + } +} diff --git a/driver-core/src/main/com/mongodb/client/model/vault/RewrapManyDataKeyOptions.java b/driver-core/src/main/com/mongodb/client/model/vault/RewrapManyDataKeyOptions.java new file mode 100644 index 00000000000..da0279a8f8e --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/vault/RewrapManyDataKeyOptions.java @@ -0,0 +1,125 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model.vault; + +import com.mongodb.lang.Nullable; +import org.bson.BsonDocument; + +/** + * The rewrap many data key options + * + *

+ * The {@link #getMasterKey()} document MUST have the fields corresponding to the given provider as specified in masterKey. + *

+ * + * @since 4.7 + */ +public final class RewrapManyDataKeyOptions { + + private String provider; + private BsonDocument masterKey; + + /** + * Construct a new instance + */ + public RewrapManyDataKeyOptions() { + } + + /** + * Sets the provider name + * + * @param provider the provider name + * @return this + * @see #getProvider() + */ + public RewrapManyDataKeyOptions provider(final String provider) { + this.provider = provider; + return this; + } + + /** + * @return the provider name + */ + @Nullable + public String getProvider() { + return provider; + } + + /** + * Sets the optional master key document. + * + * @param masterKey the master key document + * @return this + * @see #getMasterKey() + */ + public RewrapManyDataKeyOptions masterKey(final BsonDocument masterKey) { + this.masterKey = masterKey; + return this; + } + + /** + * Gets the master key document + * + *

+ * The masterKey identifies a KMS-specific key used to encrypt the new data key. + *

+ *

+ * If the kmsProvider type is "aws" the master key is required and must contain the following fields: + *

+ *
    + *
  • region: a String containing the AWS region in which to locate the master key
  • + *
  • key: a String containing the Amazon Resource Name (ARN) to the AWS customer master key
  • + *
+ *

+ * If the kmsProvider type is "azure" the master key is required and must contain the following fields: + *

+ *
    + *
  • keyVaultEndpoint: a String with the host name and an optional port. Example: "example.vault.azure.net".
  • + *
  • keyName: a String
  • + *
  • keyVersion: an optional String, the specific version of the named key, defaults to using the key's primary version.
  • + *
+ *

+ * If the kmsProvider type is "gcp" the master key is required and must contain the following fields: + *

+ *
    + *
  • projectId: a String
  • + *
  • location: String
  • + *
  • keyRing: String
  • + *
  • keyName: String
  • + *
  • keyVersion: an optional String, the specific version of the named key, defaults to using the key's primary version.
  • + *
  • endpoint: an optional String, with the host with optional port. Defaults to "cloudkms.googleapis.com".
  • + *
+ *

+ * If the kmsProvider type is "kmip" the master key is required and must contain the following fields: + *

+ *
    + *
  • keyId: optional String, keyId is the KMIP Unique Identifier to a 96 byte KMIP Secret Data managed object. If keyId is + * omitted, the driver creates a random 96 byte KMIP Secret Data managed object.
  • + *
  • endpoint: a String, the endpoint as a host with required port. e.g. "example.com:443". If endpoint is not provided, it + * defaults to the required endpoint from the KMS providers map.
  • + *
+ *

+ * If the kmsProvider type is "local" the masterKey is not applicable. + *

+ * @return the master key document + */ + @Nullable + public BsonDocument getMasterKey() { + return masterKey; + } + +} diff --git a/driver-core/src/main/com/mongodb/client/model/vault/RewrapManyDataKeyResult.java b/driver-core/src/main/com/mongodb/client/model/vault/RewrapManyDataKeyResult.java new file mode 100644 index 00000000000..de7665a75b9 --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/vault/RewrapManyDataKeyResult.java @@ -0,0 +1,52 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model.vault; + +import com.mongodb.bulk.BulkWriteResult; +import com.mongodb.lang.Nullable; + +/** + * The result of the rewrapping of data keys + * + * @since 4.7 + */ +public final class RewrapManyDataKeyResult { + private final BulkWriteResult bulkWriteResult; + + /** + * Construct a new instance with no bulk write result + */ + public RewrapManyDataKeyResult() { + this.bulkWriteResult = null; + } + + /** + * Construct a new instance + * @param bulkWriteResult the bulk write result of the rewrapping data keys + */ + public RewrapManyDataKeyResult(final BulkWriteResult bulkWriteResult) { + this.bulkWriteResult = bulkWriteResult; + } + + /** + * @return the bulk write result of the rewrapping data keys or null if there was no bulk operation + */ + @Nullable + public BulkWriteResult getBulkWriteResult() { + return bulkWriteResult; + } +} diff --git a/driver-core/src/main/com/mongodb/client/model/vault/TextOptions.java b/driver-core/src/main/com/mongodb/client/model/vault/TextOptions.java new file mode 100644 index 00000000000..34dcd0d806d --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/vault/TextOptions.java @@ -0,0 +1,187 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model.vault; + +import com.mongodb.annotations.Alpha; +import com.mongodb.annotations.Reason; +import com.mongodb.lang.Nullable; +import org.bson.BsonDocument; + +/** + * Text options for a Queryable Encryption field that supports text queries. + * + *

Note: TextOptions is in Alpha and subject to backwards breaking changes. + * + * @since 5.6 + * @mongodb.server.release 8.2 + * @mongodb.driver.manual /core/queryable-encryption/ queryable encryption + */ +@Alpha(Reason.SERVER) +public class TextOptions { + private Boolean caseSensitive; + private Boolean diacriticSensitive; + @Nullable + private BsonDocument prefixOptions; + @Nullable + private BsonDocument suffixOptions; + @Nullable + private BsonDocument substringOptions; + + /** + * Construct a new instance + */ + public TextOptions() { + } + + /** + * @return true if text indexes for this field are case sensitive. + */ + public boolean getCaseSensitive() { + return caseSensitive; + } + + /** + * Set case sensitivity + * + * @param caseSensitive true if text indexes are case sensitive + * @return this + */ + public TextOptions caseSensitive(final boolean caseSensitive) { + this.caseSensitive = caseSensitive; + return this; + } + + /** + * @return true if text indexes are diacritic sensitive + */ + public boolean getDiacriticSensitive() { + return diacriticSensitive; + } + + /** + * Set diacritic sensitivity + * + * @param diacriticSensitive true if text indexes are diacritic sensitive + * @return this + */ + public TextOptions diacriticSensitive(final boolean diacriticSensitive) { + this.diacriticSensitive = diacriticSensitive; + return this; + } + + /** + * Set the prefix options. + * + *

Expected to be a {@link BsonDocument} in the format of:

+ * + *
+     * {@code
+     *   {
+     *    // strMinQueryLength is the minimum allowed query length. Querying with a shorter string will error.
+     *    strMinQueryLength: BsonInt32,
+     *    // strMaxQueryLength is the maximum allowed query length. Querying with a longer string will error.
+     *    strMaxQueryLength: BsonInt32
+     *   }
+     * }
+     * 
+ * + * @param prefixOptions the prefix options or null + * @return this + */ + public TextOptions prefixOptions(@Nullable final BsonDocument prefixOptions) { + this.prefixOptions = prefixOptions; + return this; + } + + /** + * @see #prefixOptions(BsonDocument) + * @return the prefix options document or null + */ + @Nullable + public BsonDocument getPrefixOptions() { + return prefixOptions; + } + + /** + * Set the suffix options. + * + *

Expected to be a {@link BsonDocument} in the format of:

+ * + *
+     * {@code
+     *   {
+     *    // strMinQueryLength is the minimum allowed query length. Querying with a shorter string will error.
+     *    strMinQueryLength: BsonInt32,
+     *    // strMaxQueryLength is the maximum allowed query length. Querying with a longer string will error.
+     *    strMaxQueryLength: BsonInt32
+     *   }
+     * }
+     * 
+ * + * @param suffixOptions the suffix options or null + * @return this + */ + public TextOptions suffixOptions(@Nullable final BsonDocument suffixOptions) { + this.suffixOptions = suffixOptions; + return this; + } + + /** + * @see #suffixOptions(BsonDocument) + * @return the suffix options document or null + */ + @Nullable + public BsonDocument getSuffixOptions() { + return suffixOptions; + } + + /** + * Set the substring options. + * + *

Expected to be a {@link BsonDocument} in the format of:

+ * + *
+     * {@code
+     *   {
+     *    // strMaxLength is the maximum allowed length to insert. Inserting longer strings will error.
+     *    strMaxLength: BsonInt32,
+     *    // strMinQueryLength is the minimum allowed query length. Querying with a shorter string will error.
+     *    strMinQueryLength: BsonInt32,
+     *    // strMaxQueryLength is the maximum allowed query length. Querying with a longer string will error.
+     *    strMaxQueryLength: BsonInt32
+     *   }
+     * }
+     * 
+ * + * @param substringOptions the substring options or null + * @return this + */ + public TextOptions substringOptions(@Nullable final BsonDocument substringOptions) { + this.substringOptions = substringOptions; + return this; + } + + /** + * @see #substringOptions(BsonDocument) + * @return the substring options document or null + */ + @Nullable + public BsonDocument getSubstringOptions() { + return substringOptions; + } + +} diff --git a/driver-core/src/main/com/mongodb/client/model/vault/package-info.java b/driver-core/src/main/com/mongodb/client/model/vault/package-info.java new file mode 100644 index 00000000000..9bc4adc14e8 --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/model/vault/package-info.java @@ -0,0 +1,25 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * This package contains options classes for the key vault API + * + * @since 3.11 + */ +@NonNullApi +package com.mongodb.client.model.vault; + +import com.mongodb.lang.NonNullApi; diff --git a/driver-core/src/main/com/mongodb/client/result/DeleteResult.java b/driver-core/src/main/com/mongodb/client/result/DeleteResult.java new file mode 100644 index 00000000000..103d624a8b7 --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/result/DeleteResult.java @@ -0,0 +1,143 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.result; + +/** + * The result of a delete operation. If the delete was unacknowledged, then {@code wasAcknowledged} will return false and all other methods + * will throw {@code UnsupportedOperationException}. + * + * @see com.mongodb.WriteConcern#UNACKNOWLEDGED + * @since 3.0 + */ +public abstract class DeleteResult { + + /** + * Returns true if the write was acknowledged. + * + * @return true if the write was acknowledged + */ + public abstract boolean wasAcknowledged(); + + /** + * Gets the number of documents deleted. + * + * @return the number of documents deleted + */ + public abstract long getDeletedCount(); + + + /** + * Create an acknowledged DeleteResult + * + * @param deletedCount the number of documents deleted + * @return an acknowledged DeleteResult + */ + public static DeleteResult acknowledged(final long deletedCount) { + return new AcknowledgedDeleteResult(deletedCount); + } + + /** + * Create an unacknowledged DeleteResult + * + * @return an unacknowledged DeleteResult + */ + public static DeleteResult unacknowledged() { + return new UnacknowledgedDeleteResult(); + } + + private static class AcknowledgedDeleteResult extends DeleteResult { + private final long deletedCount; + + AcknowledgedDeleteResult(final long deletedCount) { + this.deletedCount = deletedCount; + } + + @Override + public boolean wasAcknowledged() { + return true; + } + + @Override + public long getDeletedCount() { + return deletedCount; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + AcknowledgedDeleteResult that = (AcknowledgedDeleteResult) o; + + if (deletedCount != that.deletedCount) { + return false; + } + + return true; + } + + @Override + public int hashCode() { + return (int) (deletedCount ^ (deletedCount >>> 32)); + } + + @Override + public String toString() { + return "AcknowledgedDeleteResult{" + + "deletedCount=" + deletedCount + + '}'; + } + } + + private static class UnacknowledgedDeleteResult extends DeleteResult { + @Override + public boolean wasAcknowledged() { + return false; + } + + @Override + public long getDeletedCount() { + throw new UnsupportedOperationException("Cannot get information about an unacknowledged delete"); + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + return true; + } + + @Override + public int hashCode() { + return 0; + } + + @Override + public String toString() { + return "UnacknowledgedDeleteResult{}"; + } + } +} diff --git a/driver-core/src/main/com/mongodb/client/result/InsertManyResult.java b/driver-core/src/main/com/mongodb/client/result/InsertManyResult.java new file mode 100644 index 00000000000..4ba49911da5 --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/result/InsertManyResult.java @@ -0,0 +1,146 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.result; + +import org.bson.BsonValue; + +import java.util.HashMap; +import java.util.Map; +import java.util.Objects; + +import static java.util.Collections.unmodifiableMap; + +/** + * The result of an insert many operation. If the insert many was unacknowledged, then {@code wasAcknowledged} will + * return false and all other methods will throw {@code UnsupportedOperationException}. + * + * @see com.mongodb.WriteConcern#UNACKNOWLEDGED + * @since 4.0 + */ +public abstract class InsertManyResult { + + /** + * Returns true if the write was acknowledged. + * + * @return true if the write was acknowledged + */ + public abstract boolean wasAcknowledged(); + + /** + * An unmodifiable map of the index of the inserted document to the id of the inserted document. + * + *

Note: Inserting RawBsonDocuments does not generate an _id value and it's corresponding value will be null.

+ * + * @return A map of the index of the inserted document to the id of the inserted document. + */ + public abstract Map getInsertedIds(); + + /** + * Create an acknowledged InsertManyResult + * + * @param insertedIds the map of the index of the inserted document to the id of the inserted document. + * @return an acknowledged InsertManyResult + */ + public static InsertManyResult acknowledged(final Map insertedIds) { + return new AcknowledgedInsertManyResult(insertedIds); + } + + /** + * Create an unacknowledged InsertManyResult + * + * @return an unacknowledged InsertManyResult + */ + public static InsertManyResult unacknowledged() { + return new UnacknowledgedInsertManyResult(); + } + + private static class AcknowledgedInsertManyResult extends InsertManyResult { + private final Map insertedIds; + + AcknowledgedInsertManyResult(final Map insertedIds) { + this.insertedIds = unmodifiableMap(new HashMap<>(insertedIds)); + } + + @Override + public boolean wasAcknowledged() { + return true; + } + + @Override + public Map getInsertedIds() { + return insertedIds; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + AcknowledgedInsertManyResult that = (AcknowledgedInsertManyResult) o; + return Objects.equals(insertedIds, that.insertedIds); + } + + @Override + public int hashCode() { + return Objects.hash(insertedIds); + } + + @Override + public String toString() { + return "AcknowledgedInsertManyResult{" + + "insertedIds=" + insertedIds + + '}'; + } + } + + private static class UnacknowledgedInsertManyResult extends InsertManyResult { + @Override + public boolean wasAcknowledged() { + return false; + } + + @Override + public Map getInsertedIds() { + throw new UnsupportedOperationException("Cannot get information about an unacknowledged insert many"); + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + return true; + } + + @Override + public int hashCode() { + return 0; + } + + @Override + public String toString() { + return "UnacknowledgedInsertManyResult{}"; + } + } +} diff --git a/driver-core/src/main/com/mongodb/client/result/InsertOneResult.java b/driver-core/src/main/com/mongodb/client/result/InsertOneResult.java new file mode 100644 index 00000000000..e1ecb2aaf24 --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/result/InsertOneResult.java @@ -0,0 +1,146 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.result; + +import com.mongodb.lang.Nullable; +import org.bson.BsonValue; + +import java.util.Objects; + +/** + * The result of an insert one operation. If the insert one was unacknowledged, then {@code wasAcknowledged} will + * return false and all other methods will throw {@code UnsupportedOperationException}. + * + * @see com.mongodb.WriteConcern#UNACKNOWLEDGED + * @since 4.0 + */ +public abstract class InsertOneResult { + + /** + * Returns true if the write was acknowledged. + * + * @return true if the write was acknowledged + */ + public abstract boolean wasAcknowledged(); + + /** + * If the _id of the inserted document is available, otherwise null + * + *

Note: Inserting RawBsonDocuments does not generate an _id value.

+ * + * @return if _id of the inserted document is available, otherwise null + */ + @Nullable + public abstract BsonValue getInsertedId(); + + /** + * Create an acknowledged InsertOneResult + * + * @param insertId the id of the inserted document + * @return an acknowledged InsertOneResult + */ + public static InsertOneResult acknowledged(@Nullable final BsonValue insertId) { + return new AcknowledgedInsertOneResult(insertId); + } + + /** + * Create an unacknowledged InsertOneResult + * + * @return an unacknowledged InsertOneResult + */ + public static InsertOneResult unacknowledged() { + return new UnacknowledgedInsertOneResult(); + } + + private static class AcknowledgedInsertOneResult extends InsertOneResult { + private final BsonValue insertedId; + + AcknowledgedInsertOneResult(@Nullable final BsonValue insertId) { + this.insertedId = insertId; + } + + @Override + public boolean wasAcknowledged() { + return true; + } + + @Override + @Nullable + public BsonValue getInsertedId() { + return insertedId; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + AcknowledgedInsertOneResult that = (AcknowledgedInsertOneResult) o; + return Objects.equals(insertedId, that.insertedId); + } + + @Override + public int hashCode() { + return Objects.hash(insertedId); + } + + @Override + public String toString() { + return "AcknowledgedInsertOneResult{" + + "insertedId=" + insertedId + + '}'; + } + } + + private static class UnacknowledgedInsertOneResult extends InsertOneResult { + @Override + public boolean wasAcknowledged() { + return false; + } + + @Override + @Nullable + public BsonValue getInsertedId() { + throw new UnsupportedOperationException("Cannot get information about an unacknowledged insert"); + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + return true; + } + + @Override + public int hashCode() { + return 0; + } + + @Override + public String toString() { + return "UnacknowledgedInsertOneResult{}"; + } + } +} diff --git a/driver-core/src/main/com/mongodb/client/result/UpdateResult.java b/driver-core/src/main/com/mongodb/client/result/UpdateResult.java new file mode 100644 index 00000000000..b96d93287f0 --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/result/UpdateResult.java @@ -0,0 +1,206 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.result; + +import com.mongodb.lang.Nullable; +import org.bson.BsonValue; + +import java.util.Objects; + +/** + * The result of an update operation. If the update was unacknowledged, then {@code wasAcknowledged} will return false and all other + * methods will throw {@code UnsupportedOperationException}. + * + * @see com.mongodb.WriteConcern#UNACKNOWLEDGED + * @since 3.0 + */ +public abstract class UpdateResult { + + /** + * Returns true if the write was acknowledged. + * + * @return true if the write was acknowledged + */ + public abstract boolean wasAcknowledged(); + + /** + * Gets the number of documents matched by the query. + * + * @return the number of documents matched + */ + public abstract long getMatchedCount(); + + /** + * Gets the number of documents modified by the update. + * + * @return the number of documents modified + */ + public abstract long getModifiedCount(); + + /** + * If the replace resulted in an inserted document, gets the _id of the inserted document, otherwise null. + * + * @return if the replace resulted in an inserted document, the _id of the inserted document, otherwise null + */ + @Nullable + public abstract BsonValue getUpsertedId(); + + /** + * Create an acknowledged UpdateResult + * + * @param matchedCount the number of documents matched + * @param modifiedCount the number of documents modified + * @param upsertedId if the replace resulted in an inserted document, the id of the inserted document + * @return an acknowledged UpdateResult + */ + public static UpdateResult acknowledged(final long matchedCount, @Nullable final Long modifiedCount, + @Nullable final BsonValue upsertedId) { + return new AcknowledgedUpdateResult(matchedCount, modifiedCount, upsertedId); + } + + /** + * Create an unacknowledged UpdateResult + * + * @return an unacknowledged UpdateResult + */ + public static UpdateResult unacknowledged() { + return new UnacknowledgedUpdateResult(); + } + + private static class AcknowledgedUpdateResult extends UpdateResult { + private final long matchedCount; + private final Long modifiedCount; + private final BsonValue upsertedId; + + AcknowledgedUpdateResult(final long matchedCount, final Long modifiedCount, @Nullable final BsonValue upsertedId) { + this.matchedCount = matchedCount; + this.modifiedCount = modifiedCount; + this.upsertedId = upsertedId; + } + + @Override + public boolean wasAcknowledged() { + return true; + } + + @Override + public long getMatchedCount() { + return matchedCount; + } + + @Override + public long getModifiedCount() { + return modifiedCount; + } + + @Override + @Nullable + public BsonValue getUpsertedId() { + return upsertedId; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + AcknowledgedUpdateResult that = (AcknowledgedUpdateResult) o; + + if (matchedCount != that.matchedCount) { + return false; + } + if (!Objects.equals(modifiedCount, that.modifiedCount)) { + return false; + } + if (!Objects.equals(upsertedId, that.upsertedId)) { + return false; + } + + return true; + } + + @Override + public int hashCode() { + int result = (int) (matchedCount ^ (matchedCount >>> 32)); + result = 31 * result + (modifiedCount != null ? modifiedCount.hashCode() : 0); + result = 31 * result + (upsertedId != null ? upsertedId.hashCode() : 0); + return result; + } + + @Override + public String toString() { + return "AcknowledgedUpdateResult{" + + "matchedCount=" + matchedCount + + ", modifiedCount=" + modifiedCount + + ", upsertedId=" + upsertedId + + '}'; + } + } + + private static class UnacknowledgedUpdateResult extends UpdateResult { + @Override + public boolean wasAcknowledged() { + return false; + } + + @Override + public long getMatchedCount() { + throw getUnacknowledgedWriteException(); + } + + @Override + public long getModifiedCount() { + throw getUnacknowledgedWriteException(); + } + + @Override + @Nullable + public BsonValue getUpsertedId() { + throw getUnacknowledgedWriteException(); + } + + private UnsupportedOperationException getUnacknowledgedWriteException() { + return new UnsupportedOperationException("Cannot get information about an unacknowledged update"); + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + return true; + } + + @Override + public int hashCode() { + return 0; + } + + @Override + public String toString() { + return "UnacknowledgedUpdateResult{}"; + } + } +} diff --git a/driver-core/src/main/com/mongodb/client/result/package-info.java b/driver-core/src/main/com/mongodb/client/result/package-info.java new file mode 100644 index 00000000000..899324b0f07 --- /dev/null +++ b/driver-core/src/main/com/mongodb/client/result/package-info.java @@ -0,0 +1,23 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * This package contains classes representing operation results + */ +@NonNullApi +package com.mongodb.client.result; + +import com.mongodb.lang.NonNullApi; diff --git a/driver-core/src/main/com/mongodb/connection/AsyncCompletionHandler.java b/driver-core/src/main/com/mongodb/connection/AsyncCompletionHandler.java new file mode 100644 index 00000000000..a286f346427 --- /dev/null +++ b/driver-core/src/main/com/mongodb/connection/AsyncCompletionHandler.java @@ -0,0 +1,55 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.connection; + +import com.mongodb.internal.async.SingleResultCallback; +import com.mongodb.lang.Nullable; + +/** + * Completion handler for asynchronous I/O. + * + * @since 3.0 + * @param the type of a successful completion + */ +public interface AsyncCompletionHandler { + /** + * Invoked when an operation has completed. + * + * @param t the result of the completed operation + */ + void completed(@Nullable T t); + + /** + * Invoked when an operation fails. + * + * @param t the exception that describes the failure + */ + void failed(Throwable t); + + /** + * @return this handler as a callback + */ + default SingleResultCallback asCallback() { + return (r, t) -> { + if (t != null) { + failed(t); + } else { + completed(r); + } + }; + } +} diff --git a/driver-core/src/main/com/mongodb/connection/AsyncTransportSettings.java b/driver-core/src/main/com/mongodb/connection/AsyncTransportSettings.java new file mode 100644 index 00000000000..8e259392313 --- /dev/null +++ b/driver-core/src/main/com/mongodb/connection/AsyncTransportSettings.java @@ -0,0 +1,98 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * + */ +package com.mongodb.connection; + +import com.mongodb.lang.Nullable; + +import java.util.concurrent.ExecutorService; + +import static com.mongodb.assertions.Assertions.notNull; + +/** + * {@link TransportSettings} for a non-Netty-based async transport implementation. + * Shallowly immutable. + * + * @since 5.2 + */ +public final class AsyncTransportSettings extends TransportSettings { + + private final ExecutorService executorService; + + private AsyncTransportSettings(final Builder builder) { + this.executorService = builder.executorService; + } + + static Builder builder() { + return new Builder(); + } + + /** + * A builder for an instance of {@link AsyncTransportSettings} + */ + public static final class Builder { + + private ExecutorService executorService; + + private Builder() { + } + + /** + * The executor service, intended to be used exclusively by the mongo + * client. Closing the mongo client will result in {@linkplain ExecutorService#shutdown() orderly shutdown} + * of the executor service. + * + *

When {@linkplain SslSettings#isEnabled() TLS is not enabled}, see + * {@link java.nio.channels.AsynchronousChannelGroup#withThreadPool(ExecutorService)} + * for additional requirements for the executor service. + * + * @param executorService the executor service + * @return this + * @see #getExecutorService() + */ + public Builder executorService(final ExecutorService executorService) { + this.executorService = notNull("executorService", executorService); + return this; + } + + /** + * Build an instance of {@link AsyncTransportSettings} + * @return an instance of {@link AsyncTransportSettings} + */ + public AsyncTransportSettings build() { + return new AsyncTransportSettings(this); + } + } + + /** + * Gets the executor service + * + * @return the executor service + * @see Builder#executorService(ExecutorService) + */ + @Nullable + public ExecutorService getExecutorService() { + return executorService; + } + + @Override + public String toString() { + return "AsyncTransportSettings{" + + "executorService=" + executorService + + '}'; + } +} diff --git a/driver-core/src/main/com/mongodb/connection/ClusterConnectionMode.java b/driver-core/src/main/com/mongodb/connection/ClusterConnectionMode.java new file mode 100644 index 00000000000..6642f768f8e --- /dev/null +++ b/driver-core/src/main/com/mongodb/connection/ClusterConnectionMode.java @@ -0,0 +1,41 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.connection; + +/** + * The cluster connection mode. + * + * @since 3.0 + */ +public enum ClusterConnectionMode { + /** + * Connect directly to a server, regardless of the type of cluster it is a part of. + */ + SINGLE, + + /** + * Connect to multiple servers in a cluster (either a replica set or multiple mongos servers) + */ + MULTIPLE, + + /** + * Connect to one or more mongos servers via a load balancer + * + * @since 4.3 + */ + LOAD_BALANCED +} diff --git a/driver-core/src/main/com/mongodb/connection/ClusterDescription.java b/driver-core/src/main/com/mongodb/connection/ClusterDescription.java new file mode 100644 index 00000000000..f80fd00b48d --- /dev/null +++ b/driver-core/src/main/com/mongodb/connection/ClusterDescription.java @@ -0,0 +1,344 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.connection; + +import com.mongodb.MongoException; +import com.mongodb.ReadPreference; +import com.mongodb.annotations.Immutable; +import com.mongodb.internal.selector.ReadPreferenceServerSelector; +import com.mongodb.internal.selector.WritableServerSelector; +import com.mongodb.lang.Nullable; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.Objects; + +import static com.mongodb.assertions.Assertions.notNull; +import static com.mongodb.internal.connection.ClusterDescriptionHelper.getServersByPredicate; +import static java.lang.String.format; + +/** + * Immutable snapshot state of a cluster. + * + * @since 3.0 + */ +@Immutable +public class ClusterDescription { + private final ClusterConnectionMode connectionMode; + private final ClusterType type; + private final List serverDescriptions; + private final ClusterSettings clusterSettings; + private final ServerSettings serverSettings; + private final MongoException srvResolutionException; + private final Integer logicalSessionTimeoutMinutes; + + /** + * Creates a new ClusterDescription. + * + * @param connectionMode whether to connect directly to a single server or to multiple servers + * @param type what sort of cluster this is + * @param serverDescriptions the descriptions of all the servers currently in this cluster + */ + public ClusterDescription(final ClusterConnectionMode connectionMode, final ClusterType type, + final List serverDescriptions) { + this(connectionMode, type, serverDescriptions, null, null); + } + + /** + * Creates a new ClusterDescription. + * + * @param connectionMode whether to connect directly to a single server or to multiple servers + * @param type what sort of cluster this is + * @param serverDescriptions the descriptions of all the servers currently in this cluster + * @param clusterSettings the cluster settings + * @param serverSettings the server settings + * @since 3.4 + */ + public ClusterDescription(final ClusterConnectionMode connectionMode, final ClusterType type, + final List serverDescriptions, + @Nullable final ClusterSettings clusterSettings, + @Nullable final ServerSettings serverSettings) { + this(connectionMode, type, null, serverDescriptions, clusterSettings, serverSettings); + } + + /** + * Creates a new ClusterDescription. + * + * @param connectionMode whether to connect directly to a single server or to multiple servers + * @param type what sort of cluster this is + * @param srvResolutionException an exception resolving the SRV record + * @param serverDescriptions the descriptions of all the servers currently in this cluster + * @param clusterSettings the cluster settings + * @param serverSettings the server settings + * @since 3.10 + */ + public ClusterDescription(final ClusterConnectionMode connectionMode, final ClusterType type, + @Nullable final MongoException srvResolutionException, + final List serverDescriptions, + @Nullable final ClusterSettings clusterSettings, + @Nullable final ServerSettings serverSettings) { + notNull("all", serverDescriptions); + this.connectionMode = notNull("connectionMode", connectionMode); + this.type = notNull("type", type); + this.srvResolutionException = srvResolutionException; + this.serverDescriptions = new ArrayList<>(serverDescriptions); + this.clusterSettings = clusterSettings; + this.serverSettings = serverSettings; + this.logicalSessionTimeoutMinutes = calculateLogicalSessionTimeoutMinutes(); + } + + /** + * Gets the cluster settings, which may be null if not provided. + * + * @return the cluster settings + * @since 3.4 + */ + public ClusterSettings getClusterSettings() { + return clusterSettings; + } + + /** + * Gets the server settings, which may be null if not provided. + * + * @return the server settings + * @since 3.4 + */ + public ServerSettings getServerSettings() { + return serverSettings; + } + + /** + * Return whether all servers in the cluster are compatible with the driver. + * + * @return true if all servers in the cluster are compatible with the driver + */ + public boolean isCompatibleWithDriver() { + for (ServerDescription cur : serverDescriptions) { + if (!cur.isCompatibleWithDriver()) { + return false; + } + } + return true; + } + + /** + * Return a server in the cluster that is incompatibly older than the driver. + * + * @return a server in the cluster that is incompatibly older than the driver, or null if there are none + * @since 3.6 + */ + @Nullable + public ServerDescription findServerIncompatiblyOlderThanDriver() { + for (ServerDescription cur : serverDescriptions) { + if (cur.isIncompatiblyOlderThanDriver()) { + return cur; + } + } + return null; + } + + /** + * Return a server in the cluster that is incompatibly newer than the driver. + * + * @return a server in the cluster that is incompatibly newer than the driver, or null if there are none + * @since 3.6 + */ + @Nullable + public ServerDescription findServerIncompatiblyNewerThanDriver() { + for (ServerDescription cur : serverDescriptions) { + if (cur.isIncompatiblyNewerThanDriver()) { + return cur; + } + } + return null; + } + + /** + * Returns true if this cluster has at least one server that satisfies the given read preference. + * + * @param readPreference the non-null read preference + * @return whether this cluster has at least one server that satisfies the given read preference + * @since 3.3 + */ + public boolean hasReadableServer(final ReadPreference readPreference) { + notNull("readPreference", readPreference); + return !new ReadPreferenceServerSelector(readPreference).select(this).isEmpty(); + } + + + /** + * Returns true if this cluster has at least one server that can be used for write operations. + * + * @return true if this cluster has at least one server that can be used for write operations + * @since 3.3 + */ + public boolean hasWritableServer() { + return !new WritableServerSelector().select(this).isEmpty(); + } + + + /** + * Gets whether this cluster is connecting to a single server or multiple servers. + * + * @return the ClusterConnectionMode for this cluster + */ + public ClusterConnectionMode getConnectionMode() { + return connectionMode; + } + + /** + * Gets the specific type of this cluster + * + * @return a ClusterType enum representing the type of this cluster + */ + public ClusterType getType() { + return type; + } + + /** + * Gets any exception encountered while resolving the SRV record for the initial host. + * + * @return any exception encountered while resolving the SRV record for the initial host, or null if none + * @since 3.10 + */ + @Nullable + public MongoException getSrvResolutionException() { + return srvResolutionException; + } + + /** + * Returns an unmodifiable list of the server descriptions in this cluster description. + * + * @return an unmodifiable list of the server descriptions in this cluster description + * @since 3.3 + */ + public List getServerDescriptions() { + return Collections.unmodifiableList(serverDescriptions); + } + + /** + * Gets the logical session timeout in minutes, or null if at least one of the known servers does not support logical sessions. + * + * @return the logical session timeout in minutes, which may be null + * @mongodb.server.release 3.6 + * @since 3.6 + */ + @Nullable + public Integer getLogicalSessionTimeoutMinutes() { + return logicalSessionTimeoutMinutes; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + ClusterDescription that = (ClusterDescription) o; + + if (connectionMode != that.connectionMode) { + return false; + } + if (type != that.type) { + return false; + } + if (serverDescriptions.size() != that.serverDescriptions.size()) { + return false; + } + + if (!serverDescriptions.containsAll(that.serverDescriptions)) { + return false; + } + + // Compare class equality and message as exceptions rarely override equals + Class thisExceptionClass = srvResolutionException != null ? srvResolutionException.getClass() : null; + Class thatExceptionClass = that.srvResolutionException != null ? that.srvResolutionException.getClass() : null; + if (!Objects.equals(thisExceptionClass, thatExceptionClass)) { + return false; + } + + String thisExceptionMessage = srvResolutionException != null ? srvResolutionException.getMessage() : null; + String thatExceptionMessage = that.srvResolutionException != null ? that.srvResolutionException.getMessage() : null; + if (!Objects.equals(thisExceptionMessage, thatExceptionMessage)) { + return false; + } + + return true; + } + + @Override + public int hashCode() { + int result = connectionMode.hashCode(); + result = 31 * result + type.hashCode(); + result = 31 * result + (srvResolutionException == null ? 0 : srvResolutionException.hashCode()); + result = 31 * result + serverDescriptions.hashCode(); + return result; + } + + @Override + public String toString() { + return "ClusterDescription{" + + "type=" + getType() + + (srvResolutionException == null ? "" : ", srvResolutionException=" + srvResolutionException) + + ", connectionMode=" + connectionMode + + ", serverDescriptions=" + serverDescriptions + + '}'; + } + + /** + * Returns a short, pretty description for this ClusterDescription. + * + * @return a String describing this cluster. + */ + public String getShortDescription() { + StringBuilder serverDescriptions = new StringBuilder(); + String delimiter = ""; + for (final ServerDescription cur : this.serverDescriptions) { + serverDescriptions.append(delimiter).append(cur.getShortDescription()); + delimiter = ", "; + } + if (srvResolutionException == null) { + return format("{type=%s, servers=[%s]", type, serverDescriptions); + } else { + return format("{type=%s, srvResolutionException=%s, servers=[%s]", type, srvResolutionException, serverDescriptions); + } + } + + @Nullable + private Integer calculateLogicalSessionTimeoutMinutes() { + Integer retVal = null; + + for (ServerDescription cur : getServersByPredicate(this, serverDescription -> + serverDescription.isPrimary() || serverDescription.isSecondary())) { + + Integer logicalSessionTimeoutMinutes = cur.getLogicalSessionTimeoutMinutes(); + if (logicalSessionTimeoutMinutes == null) { + return null; + } + if (retVal == null) { + retVal = logicalSessionTimeoutMinutes; + } else { + retVal = Math.min(retVal, logicalSessionTimeoutMinutes); + } + } + return retVal; + } +} diff --git a/driver-core/src/main/com/mongodb/connection/ClusterId.java b/driver-core/src/main/com/mongodb/connection/ClusterId.java new file mode 100644 index 00000000000..654f1bccecc --- /dev/null +++ b/driver-core/src/main/com/mongodb/connection/ClusterId.java @@ -0,0 +1,116 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.connection; + +import com.mongodb.internal.VisibleForTesting; +import com.mongodb.lang.Nullable; +import org.bson.types.ObjectId; + +import java.util.Objects; + +import static com.mongodb.assertions.Assertions.notNull; +import static com.mongodb.internal.VisibleForTesting.AccessModifier.PRIVATE; + +/** + * A client-generated identifier that uniquely identifies a connection to a MongoDB cluster, which could be sharded, replica set, + * or standalone. + * + * @since 3.0 + */ +public final class ClusterId { + private final String value; + private final String description; + + /** + * Construct an instance. + * + */ + public ClusterId() { + this(null); + } + + /** + * Construct an instance. + * + * @param description the user defined description of the MongoClient + */ + public ClusterId(@Nullable final String description) { + this.value = new ObjectId().toHexString(); + this.description = description; + } + + @VisibleForTesting(otherwise = PRIVATE) + ClusterId(final String value, final String description) { + this.value = notNull("value", value); + this.description = description; + } + + /** + * Gets the value of the identifier. + * + * @return the value + */ + public String getValue() { + return value; + } + + /** + * Gets the user defined description of the MongoClient. + * + * @return the user defined description of the MongoClient + */ + @Nullable + public String getDescription() { + return description; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + ClusterId clusterId = (ClusterId) o; + + if (!value.equals(clusterId.value)) { + return false; + } + if (!Objects.equals(description, clusterId.description)) { + return false; + } + + return true; + } + + @Override + public int hashCode() { + int result = value.hashCode(); + result = 31 * result + (description != null ? description.hashCode() : 0); + return result; + } + + @Override + public String toString() { + return "ClusterId{" + + "value='" + value + '\'' + + ", description='" + description + '\'' + + '}'; + } +} diff --git a/driver-core/src/main/com/mongodb/connection/ClusterSettings.java b/driver-core/src/main/com/mongodb/connection/ClusterSettings.java new file mode 100644 index 00000000000..01e5c140441 --- /dev/null +++ b/driver-core/src/main/com/mongodb/connection/ClusterSettings.java @@ -0,0 +1,663 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.connection; + +import com.mongodb.ConnectionString; +import com.mongodb.ServerAddress; +import com.mongodb.annotations.Immutable; +import com.mongodb.annotations.NotThreadSafe; +import com.mongodb.event.ClusterListener; +import com.mongodb.internal.connection.ServerAddressHelper; +import com.mongodb.lang.Nullable; +import com.mongodb.selector.ServerSelector; + +import java.util.ArrayList; +import java.util.LinkedHashSet; +import java.util.List; +import java.util.Objects; +import java.util.Set; +import java.util.concurrent.TimeUnit; +import java.util.stream.Collectors; + +import static com.mongodb.assertions.Assertions.isTrueArgument; +import static com.mongodb.assertions.Assertions.notNull; +import static com.mongodb.internal.connection.ServerAddressHelper.createServerAddress; +import static java.util.Collections.singletonList; +import static java.util.Collections.unmodifiableList; +import static java.util.concurrent.TimeUnit.MILLISECONDS; + +/** + * Settings for the cluster. + * + * @since 3.0 + */ +@Immutable +public final class ClusterSettings { + private final String srvHost; + private final Integer srvMaxHosts; + private final String srvServiceName; + private final List hosts; + private final ClusterConnectionMode mode; + private final ClusterType requiredClusterType; + private final String requiredReplicaSetName; + private final ServerSelector serverSelector; + private final long localThresholdMS; + private final long serverSelectionTimeoutMS; + private final List clusterListeners; + + /** + * Get a builder for this class. + * + * @return a new Builder for creating ClusterSettings. + */ + public static Builder builder() { + return new Builder(); + } + + /** + * Creates a builder instance. + * + * @param clusterSettings existing ClusterSettings to default the builder settings on. + * @return a builder + * @since 3.5 + */ + public static Builder builder(final ClusterSettings clusterSettings) { + return builder().applySettings(clusterSettings); + } + + /** + * A builder for the cluster settings. + */ + @NotThreadSafe + public static final class Builder { + private static final List DEFAULT_HOSTS = singletonList(new ServerAddress()); + private String srvHost; + private Integer srvMaxHosts; + private String srvServiceName = "mongodb"; + private List hosts = DEFAULT_HOSTS; + private ClusterConnectionMode mode; + private ClusterType requiredClusterType = ClusterType.UNKNOWN; + private String requiredReplicaSetName; + private ServerSelector serverSelector; + private long serverSelectionTimeoutMS = MILLISECONDS.convert(30, TimeUnit.SECONDS); + private long localThresholdMS = MILLISECONDS.convert(15, MILLISECONDS); + private List clusterListeners = new ArrayList<>(); + + private Builder() { + } + + /** + * Applies the clusterSettings to the builder + * + *

Note: Overwrites all existing settings

+ * + * @param clusterSettings the clusterSettings + * @return this + * @since 3.7 + */ + public Builder applySettings(final ClusterSettings clusterSettings) { + notNull("clusterSettings", clusterSettings); + srvHost = clusterSettings.srvHost; + srvServiceName = clusterSettings.srvServiceName; + srvMaxHosts = clusterSettings.srvMaxHosts; + hosts = clusterSettings.hosts; + mode = clusterSettings.mode; + requiredReplicaSetName = clusterSettings.requiredReplicaSetName; + requiredClusterType = clusterSettings.requiredClusterType; + localThresholdMS = clusterSettings.localThresholdMS; + serverSelectionTimeoutMS = clusterSettings.serverSelectionTimeoutMS; + clusterListeners = new ArrayList<>(clusterSettings.clusterListeners); + serverSelector = clusterSettings.serverSelector; + return this; + } + + /** + * Sets the host name to use in order to look up an SRV DNS record to find the MongoDB hosts. + * + *

+ * Note that when setting srvHost via {@code ClusterSettings.Builder}, the driver will NOT process any associated TXT records + * associated with the host. In order to enable the processing of TXT records while still using {@code MongoClientSettings}, + * specify the SRV host via connection string and apply the connection string to the settings, e.g. + * {@code MongoClientSettings.builder().applyConnectionString(new ConnectionString("mongodb+srv://host1.acme.com")) }. + *

+ * + * @param srvHost the SRV host name + * @return this + * @see com.mongodb.MongoClientSettings.Builder#applyConnectionString(ConnectionString) + * @see ClusterSettings.Builder#applyConnectionString(ConnectionString) + */ + public Builder srvHost(final String srvHost) { + if (this.hosts != DEFAULT_HOSTS) { + throw new IllegalArgumentException("Can not set both hosts and srvHost"); + } + this.srvHost = srvHost; + return this; + } + + /** + * Sets the maximum number of hosts to connect to when using SRV protocol. + * This setting is not used if {@link #getMode()} is {@link ClusterConnectionMode#LOAD_BALANCED}. + * + * @param srvMaxHosts the maximum number of hosts to connect to when using SRV protocol + * @return this + * @since 4.4 + * @see #getSrvMaxHosts() + */ + public Builder srvMaxHosts(final Integer srvMaxHosts) { + this.srvMaxHosts = srvMaxHosts; + return this; + } + + /** + * Sets the SRV service name. + * + *

+ * The SRV resource record (RFC 2782) + * service name, which is limited to 15 characters + * (RFC 6335 section 5.1). + * It is combined with the host name specified by + * {@link #getSrvHost()} as follows: {@code _srvServiceName._tcp.hostName}. The combined string is an SRV resource record + * name (RFC 1035 section 2.3.1), which is limited to 255 + * characters (RFC 1035 section 2.3.4). + *

+ * + * @param srvServiceName the SRV service name + * @return this + * @since 4.5 + * @see #getSrvServiceName() + */ + public Builder srvServiceName(final String srvServiceName) { + this.srvServiceName = notNull("srvServiceName", srvServiceName); + return this; + } + + /** + * Sets the hosts for the cluster. Any duplicate server addresses are removed from the list. + * + * @param hosts the seed list of hosts + * @return this + */ + public Builder hosts(final List hosts) { + notNull("hosts", hosts); + if (hosts.isEmpty()) { + throw new IllegalArgumentException("hosts list may not be empty"); + } + if (srvHost != null) { + throw new IllegalArgumentException("srvHost must be null"); + } + Set hostsSet = new LinkedHashSet<>(hosts.size()); + for (ServerAddress serverAddress : hosts) { + notNull("serverAddress", serverAddress); + hostsSet.add(createServerAddress(serverAddress.getHost(), serverAddress.getPort())); + } + this.hosts = unmodifiableList(new ArrayList<>(hostsSet)); + return this; + } + + /** + * Sets the mode for this cluster. + * + * @param mode the cluster connection mode + * @return this; + */ + public Builder mode(final ClusterConnectionMode mode) { + this.mode = notNull("mode", mode); + return this; + } + + /** + * Sets the required replica set name for the cluster. + * This setting is not used if {@link #getMode()} is {@link ClusterConnectionMode#LOAD_BALANCED}. + * + * @param requiredReplicaSetName the required replica set name. + * @return this + * @see #getRequiredReplicaSetName() + */ + public Builder requiredReplicaSetName(@Nullable final String requiredReplicaSetName) { + this.requiredReplicaSetName = requiredReplicaSetName; + return this; + } + + /** + * Sets the required cluster type for the cluster. + * This setting is not used if {@link #getMode()} is {@link ClusterConnectionMode#LOAD_BALANCED}. + * + * @param requiredClusterType the required cluster type + * @return this + * @see #getRequiredClusterType() + */ + public Builder requiredClusterType(final ClusterType requiredClusterType) { + this.requiredClusterType = notNull("requiredClusterType", requiredClusterType); + return this; + } + + /** + * Sets the local threshold. + * + * @param localThreshold the acceptable latency difference, in milliseconds, which must be >= 0 + * @param timeUnit the time unit + * @throws IllegalArgumentException if {@code localThreshold < 0} + * @return this + * @since 3.7 + */ + public Builder localThreshold(final long localThreshold, final TimeUnit timeUnit) { + isTrueArgument("localThreshold must be >= 0", localThreshold >= 0); + this.localThresholdMS = MILLISECONDS.convert(localThreshold, timeUnit); + return this; + } + + /** + * Adds a server selector for the cluster to apply before selecting a server. + * + * @param serverSelector the server selector to apply as selector. + * @return this + * @see #getServerSelector() + */ + public Builder serverSelector(final ServerSelector serverSelector) { + this.serverSelector = serverSelector; + return this; + } + + /** + * Sets the timeout to apply when selecting a server. If the timeout expires before a server is found to handle a request, a + * {@link com.mongodb.MongoTimeoutException} will be thrown. The default value is 30 seconds. + * + *

A value of 0 means that it will timeout immediately if no server is available. A negative value means to wait + * indefinitely.

+ * + * @param serverSelectionTimeout the timeout + * @param timeUnit the time unit + * @return this + */ + public Builder serverSelectionTimeout(final long serverSelectionTimeout, final TimeUnit timeUnit) { + this.serverSelectionTimeoutMS = MILLISECONDS.convert(serverSelectionTimeout, timeUnit); + return this; + } + + /** + * Adds a cluster listener. + * + * @param clusterListener the non-null cluster listener + * @return this + * @since 3.3 + */ + public Builder addClusterListener(final ClusterListener clusterListener) { + notNull("clusterListener", clusterListener); + clusterListeners.add(clusterListener); + return this; + } + + /** + * Sets the cluster listeners. + * + * @param clusterListeners list of cluster listeners + * @return this + * @since 4.5 + */ + public Builder clusterListenerList(final List clusterListeners) { + notNull("clusterListeners", clusterListeners); + this.clusterListeners = new ArrayList<>(clusterListeners); + return this; + } + + /** + * Takes the settings from the given {@code ConnectionString} and applies them to the builder + * + * @param connectionString the connection string containing details of how to connect to MongoDB + * @return this + */ + public Builder applyConnectionString(final ConnectionString connectionString) { + Boolean directConnection = connectionString.isDirectConnection(); + Boolean loadBalanced = connectionString.isLoadBalanced(); + + if (loadBalanced != null && loadBalanced) { + mode(ClusterConnectionMode.LOAD_BALANCED); + if (connectionString.isSrvProtocol()) { + srvHost(connectionString.getHosts().get(0)); + } else { + hosts(singletonList(createServerAddress(connectionString.getHosts().get(0)))); + } + } else if (connectionString.isSrvProtocol()) { + mode(ClusterConnectionMode.MULTIPLE); + srvHost(connectionString.getHosts().get(0)); + Integer srvMaxHosts = connectionString.getSrvMaxHosts(); + if (srvMaxHosts != null) { + srvMaxHosts(srvMaxHosts); + } + String srvServiceName = connectionString.getSrvServiceName(); + if (srvServiceName != null) { + srvServiceName(srvServiceName); + } + } else if (directConnection != null) { + mode(directConnection ? ClusterConnectionMode.SINGLE : ClusterConnectionMode.MULTIPLE); + List hosts = directConnection ? singletonList(connectionString.getHosts().get(0)) : connectionString.getHosts(); + hosts(hosts.stream().map(ServerAddressHelper::createServerAddress).collect(Collectors.toList())); + } else { + mode = null; + List seedList = connectionString.getHosts().stream() + .map(ServerAddressHelper::createServerAddress) + .collect(Collectors.toList()); + hosts(seedList); + } + requiredReplicaSetName(connectionString.getRequiredReplicaSetName()); + + Integer serverSelectionTimeout = connectionString.getServerSelectionTimeout(); + if (serverSelectionTimeout != null) { + serverSelectionTimeout(serverSelectionTimeout, MILLISECONDS); + } + + Integer localThreshold = connectionString.getLocalThreshold(); + if (localThreshold != null) { + localThreshold(localThreshold, MILLISECONDS); + } + return this; + } + + /** + * Build the settings from the builder. + * + * @return the cluster settings + */ + public ClusterSettings build() { + return new ClusterSettings(this); + } + } + + /** + * Gets the host name from which to lookup SRV record for the seed list + * @return the SRV host, or null if none specified + * @since 3.10 + */ + @Nullable + public String getSrvHost() { + return srvHost; + } + + /** + * Gets the maximum number of hosts to connect to when using SRV protocol. + * This setting is not used if {@link #getMode()} is {@link ClusterConnectionMode#LOAD_BALANCED}. + * + * @return the maximum number of hosts to connect to when using SRV protocol. Defaults to null. + * @since 4.4 + * @see Builder#srvMaxHosts(Integer) + */ + @Nullable + public Integer getSrvMaxHosts() { + return srvMaxHosts; + } + + /** + * Gets the SRV service name. + * + *

+ * The SRV resource record (RFC 2782) + * service name, which is limited to 15 characters + * (RFC 6335 section 5.1). + * It is combined with the host name specified by + * {@link #getSrvHost()} as follows: {@code _srvServiceName._tcp.hostName}. The combined string is an SRV resource record + * name (RFC 1035 section 2.3.1), which is limited to 255 + * characters (RFC 1035 section 2.3.4). + *

+ * + * @return the SRV service name, which defaults to {@code "mongodb"} + * @since 4.5 + * @see Builder#srvServiceName(String) + */ + public String getSrvServiceName() { + return srvServiceName; + } + + /** + * Gets the seed list of hosts for the cluster. + * + * @return the seed list of hosts + */ + public List getHosts() { + return hosts; + } + + /** + * Gets the mode. + * + * @return the mode + */ + public ClusterConnectionMode getMode() { + return mode; + } + + /** + * Gets the required cluster type + * This setting is not used if {@link #getMode()} is {@link ClusterConnectionMode#LOAD_BALANCED}. + * + * @return the required cluster type + * @see Builder#requiredClusterType(ClusterType) + */ + public ClusterType getRequiredClusterType() { + return requiredClusterType; + } + + /** + * Gets the required replica set name. + * This setting is not used if {@link #getMode()} is {@link ClusterConnectionMode#LOAD_BALANCED}. + * + * @return the required replica set name + * @see Builder#requiredReplicaSetName(String) + */ + @Nullable + public String getRequiredReplicaSetName() { + return requiredReplicaSetName; + } + + /** + * Gets the server selector. + * + *

The server selector augments the normal server selection rules applied by the driver when determining + * which server to send an operation to. At the point that it's called by the driver, the + * {@link ClusterDescription} which is passed to it {@linkplain ClusterDescription#getServerDescriptions() contains} a list of + * {@link ServerDescription} instances which satisfy either the configured {@link com.mongodb.ReadPreference} + * for any read operation or ones that can take writes (e.g. a standalone, mongos, or replica set primary), + * barring those corresponding to servers that the driver considers unavailable or potentially problematic. + *

+ *

The server selector can then filter the {@code ServerDescription} list using whatever criteria that is required by the + * application.

+ *

After this selector executes, three additional selectors are applied by the driver:

+ *
    + *
  • select from within the latency window
  • + *
  • select at most two random servers from those remaining
  • + *
  • select the one with fewer outstanding concurrent operations
  • + *
+ *

To skip the latency window selector, an application can:

+ *
    + *
  • configure the local threshold to a sufficiently high value so that it doesn't exclude any servers
  • + *
  • return a list containing a single server from this selector (which will also make the random member selector a no-op)
  • + *
+ * + * @return the server selector, which may be null + * @see Builder#serverSelector(ServerSelector) + */ + @Nullable + public ServerSelector getServerSelector() { + return serverSelector; + } + + /** + * Gets the timeout to apply when selecting a server. If the timeout expires before a server is found to + * handle a request, a {@link com.mongodb.MongoTimeoutException} will be thrown. The default value is 30 seconds. + * + *

A value of 0 means that it will timeout immediately if no server is available. A negative value means to wait + * indefinitely.

+ * + * @param timeUnit the time unit + * @return the timeout in the given time unit + */ + public long getServerSelectionTimeout(final TimeUnit timeUnit) { + return timeUnit.convert(serverSelectionTimeoutMS, MILLISECONDS); + } + + /** + * Gets the local threshold. When choosing among multiple MongoDB servers to send a request, the MongoClient will only + * send that request to a server whose ping time is less than or equal to the server with the fastest ping time plus the local + * threshold. + * + *

For example, let's say that the client is choosing a server to send a query when the read preference is {@code + * ReadPreference.secondary()}, and that there are three secondaries, server1, server2, and server3, whose ping times are 10, 15, and 16 + * milliseconds, respectively. With a local threshold of 5 milliseconds, the client will send the query to either + * server1 or server2 (randomly selecting between the two). + *

+ * + *

Default is 15 milliseconds.

+ * + * @param timeUnit the time unit + * @return the local threshold in the given timeunit. + * @since 3.7 + * @mongodb.driver.manual reference/program/mongos/#cmdoption--localThreshold Local Threshold + */ + public long getLocalThreshold(final TimeUnit timeUnit) { + return timeUnit.convert(localThresholdMS, MILLISECONDS); + } + + /** + * Gets the cluster listeners. The default value is an empty list. + * + * @return the cluster listeners + * @since 3.3 + */ + public List getClusterListeners() { + return clusterListeners; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + ClusterSettings that = (ClusterSettings) o; + return localThresholdMS == that.localThresholdMS + && serverSelectionTimeoutMS == that.serverSelectionTimeoutMS + && Objects.equals(srvHost, that.srvHost) + && Objects.equals(srvMaxHosts, that.srvMaxHosts) + && srvServiceName.equals(that.srvServiceName) + && hosts.equals(that.hosts) + && mode == that.mode + && requiredClusterType == that.requiredClusterType + && Objects.equals(requiredReplicaSetName, that.requiredReplicaSetName) + && Objects.equals(serverSelector, that.serverSelector) + && clusterListeners.equals(that.clusterListeners); + } + + @Override + public int hashCode() { + return Objects.hash(srvHost, srvMaxHosts, srvServiceName, hosts, mode, requiredClusterType, requiredReplicaSetName, serverSelector, + localThresholdMS, serverSelectionTimeoutMS, clusterListeners); + } + + @Override + public String toString() { + return "{" + + (hosts.isEmpty() ? "" : "hosts=" + hosts) + + (srvHost == null ? "" : ", srvHost=" + srvHost) + + (srvServiceName == null ? "" : ", srvServiceName=" + srvServiceName) + + (srvMaxHosts == null ? "" : ", srvMaxHosts=" + srvMaxHosts) + + ", mode=" + mode + + ", requiredClusterType=" + requiredClusterType + + ", requiredReplicaSetName='" + requiredReplicaSetName + '\'' + + ", serverSelector='" + serverSelector + '\'' + + ", clusterListeners='" + clusterListeners + '\'' + + ", serverSelectionTimeout='" + serverSelectionTimeoutMS + " ms" + '\'' + + ", localThreshold='" + localThresholdMS + " ms" + '\'' + + '}'; + } + + /** + * Returns a short, pretty description for these ClusterSettings. + * + * @return a String description of the relevant settings. + */ + public String getShortDescription() { + return "{" + + (hosts.isEmpty() ? "" : "hosts=" + hosts) + + (srvHost == null ? "" : ", srvHost=" + srvHost) + + ", mode=" + mode + + ", requiredClusterType=" + requiredClusterType + + ", serverSelectionTimeout='" + serverSelectionTimeoutMS + " ms" + '\'' + + (requiredReplicaSetName == null ? "" : ", requiredReplicaSetName='" + requiredReplicaSetName + '\'') + + '}'; + } + + private ClusterSettings(final Builder builder) { + if (builder.srvHost != null) { + if (builder.srvHost.contains(":")) { + throw new IllegalArgumentException("The srvHost can not contain a host name that specifies a port"); + } + } + + if (builder.hosts.size() > 1 && builder.requiredClusterType == ClusterType.STANDALONE) { + throw new IllegalArgumentException("Multiple hosts cannot be specified when using ClusterType.STANDALONE."); + } + + if (builder.requiredReplicaSetName != null) { + if (builder.requiredClusterType == ClusterType.UNKNOWN) { + builder.requiredClusterType = ClusterType.REPLICA_SET; + } else if (builder.requiredClusterType != ClusterType.REPLICA_SET) { + throw new IllegalArgumentException("When specifying a replica set name, only ClusterType.UNKNOWN and " + + "ClusterType.REPLICA_SET are valid."); + } + } + + srvHost = builder.srvHost; + srvMaxHosts = builder.srvMaxHosts; + srvServiceName = builder.srvServiceName; + hosts = builder.hosts; + requiredReplicaSetName = builder.requiredReplicaSetName; + if (builder.mode != null) { + switch (builder.mode) { + case SINGLE: { + if (srvHost != null) { + throw new IllegalArgumentException("An SRV host name was provided but the connection mode is not MULTIPLE"); + } else if (builder.hosts.size() > 1) { + throw new IllegalArgumentException("Can not directly connect to more than one server"); + } + break; + } + case LOAD_BALANCED: { + if (builder.srvHost == null && builder.hosts.size() != 1) { + throw new IllegalArgumentException("Multiple hosts cannot be specified when in load balancing mode"); + } + break; + } + default: + } + mode = builder.mode; + } else { + if (srvHost != null) { + mode = ClusterConnectionMode.MULTIPLE; + } else { + mode = hosts.size() == 1 && requiredReplicaSetName == null + ? ClusterConnectionMode.SINGLE + : ClusterConnectionMode.MULTIPLE; + } + } + requiredClusterType = builder.requiredClusterType; + localThresholdMS = builder.localThresholdMS; + serverSelector = builder.serverSelector; + serverSelectionTimeoutMS = builder.serverSelectionTimeoutMS; + clusterListeners = unmodifiableList(builder.clusterListeners); + } +} diff --git a/driver-core/src/main/com/mongodb/connection/ClusterType.java b/driver-core/src/main/com/mongodb/connection/ClusterType.java new file mode 100644 index 00000000000..6bc05bbd224 --- /dev/null +++ b/driver-core/src/main/com/mongodb/connection/ClusterType.java @@ -0,0 +1,50 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.connection; + +/** + * An enumeration of all possible cluster types. + * + * @since 3.0 + */ +public enum ClusterType { + /** + * A standalone mongod server. A cluster of one. + */ + STANDALONE, + + /** + * A replicas set cluster. + */ + REPLICA_SET, + + /** + * A sharded cluster, connected via one or more mongos servers. + */ + SHARDED, + + /** + * A load-balanced cluster, connected via a single load balancer + * + * @since 4.3 + */ + LOAD_BALANCED, + /** + * The cluster type is not yet known. + */ + UNKNOWN +} diff --git a/driver-core/src/main/com/mongodb/connection/ConnectionDescription.java b/driver-core/src/main/com/mongodb/connection/ConnectionDescription.java new file mode 100644 index 00000000000..c8c213398b7 --- /dev/null +++ b/driver-core/src/main/com/mongodb/connection/ConnectionDescription.java @@ -0,0 +1,386 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.connection; + +import com.mongodb.ServerAddress; +import com.mongodb.annotations.Immutable; +import com.mongodb.lang.Nullable; +import org.bson.BsonArray; +import org.bson.types.ObjectId; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.Objects; + +import static com.mongodb.assertions.Assertions.notNull; +import static com.mongodb.connection.ServerDescription.getDefaultMaxDocumentSize; +import static com.mongodb.internal.operation.ServerVersionHelper.UNKNOWN_WIRE_VERSION; + +/** + * A description of a connection to a MongoDB server. + * + * @since 3.0 + */ +@Immutable +public class ConnectionDescription { + @Nullable private final ObjectId serviceId; + private final ConnectionId connectionId; + private final int maxWireVersion; + private final ServerType serverType; + private final int maxBatchCount; + private final int maxDocumentSize; + private final int maxMessageSize; + private final List compressors; + private final BsonArray saslSupportedMechanisms; + private final Integer logicalSessionTimeoutMinutes; + + private static final int DEFAULT_MAX_MESSAGE_SIZE = 0x2000000; // 32MB + private static final int DEFAULT_MAX_WRITE_BATCH_SIZE = 512; + + /** + * Construct a defaulted connection description instance. + * + * @param serverId the server address + */ + public ConnectionDescription(final ServerId serverId) { + this(new ConnectionId(serverId), UNKNOWN_WIRE_VERSION, ServerType.UNKNOWN, DEFAULT_MAX_WRITE_BATCH_SIZE, + getDefaultMaxDocumentSize(), DEFAULT_MAX_MESSAGE_SIZE, Collections.emptyList()); + } + + /** + * Construct an instance. + * + * @param connectionId the connection id + * @param maxWireVersion the max wire version + * @param serverType the server type + * @param maxBatchCount the max batch count + * @param maxDocumentSize the max document size in bytes + * @param maxMessageSize the max message size in bytes + * @param compressors the available compressors on the connection + * @since 3.10 + */ + public ConnectionDescription(final ConnectionId connectionId, final int maxWireVersion, + final ServerType serverType, final int maxBatchCount, final int maxDocumentSize, + final int maxMessageSize, final List compressors) { + this(connectionId, maxWireVersion, serverType, maxBatchCount, maxDocumentSize, maxMessageSize, compressors, null); + } + + /** + * Construct an instance. + * + * @param connectionId the connection id + * @param maxWireVersion the max wire version + * @param serverType the server type + * @param maxBatchCount the max batch count + * @param maxDocumentSize the max document size in bytes + * @param maxMessageSize the max message size in bytes + * @param compressors the available compressors on the connection + * @param saslSupportedMechanisms the supported SASL mechanisms + * @since 4.1 + */ + public ConnectionDescription(final ConnectionId connectionId, final int maxWireVersion, + final ServerType serverType, final int maxBatchCount, final int maxDocumentSize, + final int maxMessageSize, final List compressors, + @Nullable final BsonArray saslSupportedMechanisms) { + this(null, connectionId, maxWireVersion, serverType, maxBatchCount, maxDocumentSize, maxMessageSize, compressors, + saslSupportedMechanisms); + } + + /** + * Construct an instance. + * + * @param connectionId the connection id + * @param maxWireVersion the max wire version + * @param serverType the server type + * @param maxBatchCount the max batch count + * @param maxDocumentSize the max document size in bytes + * @param maxMessageSize the max message size in bytes + * @param compressors the available compressors on the connection + * @param saslSupportedMechanisms the supported SASL mechanisms + * @param logicalSessionTimeoutMinutes the logical session timeout, in minutes + * @since 4.10 + */ + public ConnectionDescription(final ConnectionId connectionId, final int maxWireVersion, + final ServerType serverType, final int maxBatchCount, final int maxDocumentSize, + final int maxMessageSize, final List compressors, + @Nullable final BsonArray saslSupportedMechanisms, + @Nullable final Integer logicalSessionTimeoutMinutes) { + this(null, connectionId, maxWireVersion, serverType, maxBatchCount, maxDocumentSize, maxMessageSize, compressors, + saslSupportedMechanisms, logicalSessionTimeoutMinutes); + } + + /** + * Construct an instance. + * + * @param serviceId the service id, which may be null + * @param connectionId the connection id + * @param maxWireVersion the max wire version + * @param serverType the server type + * @param maxBatchCount the max batch count + * @param maxDocumentSize the max document size in bytes + * @param maxMessageSize the max message size in bytes + * @param compressors the available compressors on the connection + * @param saslSupportedMechanisms the supported SASL mechanisms + * @since 4.3 + */ + public ConnectionDescription(@Nullable final ObjectId serviceId, final ConnectionId connectionId, final int maxWireVersion, + final ServerType serverType, final int maxBatchCount, final int maxDocumentSize, + final int maxMessageSize, final List compressors, + @Nullable final BsonArray saslSupportedMechanisms) { + this(serviceId, connectionId, maxWireVersion, serverType, maxBatchCount, maxDocumentSize, maxMessageSize, compressors, + saslSupportedMechanisms, null); + } + + private ConnectionDescription(@Nullable final ObjectId serviceId, final ConnectionId connectionId, final int maxWireVersion, + final ServerType serverType, final int maxBatchCount, final int maxDocumentSize, + final int maxMessageSize, final List compressors, + @Nullable final BsonArray saslSupportedMechanisms, @Nullable final Integer logicalSessionTimeoutMinutes) { + this.serviceId = serviceId; + this.connectionId = connectionId; + this.serverType = serverType; + this.maxBatchCount = maxBatchCount; + this.maxDocumentSize = maxDocumentSize; + this.maxMessageSize = maxMessageSize; + this.maxWireVersion = maxWireVersion; + this.compressors = notNull("compressors", Collections.unmodifiableList(new ArrayList<>(compressors))); + this.saslSupportedMechanisms = saslSupportedMechanisms; + this.logicalSessionTimeoutMinutes = logicalSessionTimeoutMinutes; + } + /** + * Creates a new connection description with the set connection id + * + * @param connectionId the connection id + * @return the new connection description + * @since 3.8 + */ + public ConnectionDescription withConnectionId(final ConnectionId connectionId) { + notNull("connectionId", connectionId); + return new ConnectionDescription(serviceId, connectionId, maxWireVersion, serverType, maxBatchCount, maxDocumentSize, + maxMessageSize, compressors, saslSupportedMechanisms, logicalSessionTimeoutMinutes); + } + + /** + * Creates a new connection description with the given service id + * + * @param serviceId the service id + * @return the new connection description + * @since 4.3 + */ + public ConnectionDescription withServiceId(final ObjectId serviceId) { + notNull("serviceId", serviceId); + return new ConnectionDescription(serviceId, connectionId, maxWireVersion, serverType, maxBatchCount, maxDocumentSize, + maxMessageSize, compressors, saslSupportedMechanisms, logicalSessionTimeoutMinutes); + } + + /** + * Gets the server address. + * + * @return the server address + */ + public ServerAddress getServerAddress() { + return connectionId.getServerId().getAddress(); + } + + /** + * Gets the id of the connection. If possible, this id will correlate with the connection id that the server puts in its log messages. + * + * @return the connection id + */ + public ConnectionId getConnectionId() { + return connectionId; + } + + /** + * Gets the id of the service this connection is to + * + * @return the service id, which may be null + * @since 4.3 + */ + @Nullable + public ObjectId getServiceId() { + return serviceId; + } + + /** + * The latest version of the wire protocol that this MongoDB server is capable of using to communicate with clients. + * + * @return the maximum protocol version supported by this server + * @since 3.10 + */ + public int getMaxWireVersion() { + return maxWireVersion; + } + + /** + * Gets the server type. + * + * @return the server type + */ + public ServerType getServerType() { + return serverType; + } + + /** + * Gets the max batch count for bulk write operations. + * + * @return the max batch count + */ + public int getMaxBatchCount() { + return maxBatchCount; + } + + /** + * Gets the max document size in bytes for documents to be stored in collections. + * + * @return the max document size in bytes + */ + public int getMaxDocumentSize() { + return maxDocumentSize; + } + + /** + * Gets the max message size in bytes for wire protocol messages to be sent to the server. + * + * @return the max message size in bytes. + */ + public int getMaxMessageSize() { + return maxMessageSize; + } + + /** + * Gets the compressors supported by this connection. + * + * @return the non-null list of compressors supported by this connection + */ + public List getCompressors() { + return compressors; + } + + /** + * Get the supported SASL mechanisms. + * + * @return the supported SASL mechanisms. + * @since 4.1 + */ + @Nullable + public BsonArray getSaslSupportedMechanisms() { + return saslSupportedMechanisms; + } + + /** + * Gets the session timeout in minutes. + * + * @return the session timeout in minutes, or null if sessions are not supported by this connection + * @mongodb.server.release 3.6 + * @since 4.10 + */ + @Nullable + public Integer getLogicalSessionTimeoutMinutes() { + return logicalSessionTimeoutMinutes; + } + /** + * Get the default maximum message size. + * + * @return the default maximum message size. + */ + public static int getDefaultMaxMessageSize() { + return DEFAULT_MAX_MESSAGE_SIZE; + } + + + /** + * Get the default maximum write batch size. + * + * @return the default maximum write batch size. + */ + public static int getDefaultMaxWriteBatchSize() { + return DEFAULT_MAX_WRITE_BATCH_SIZE; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + ConnectionDescription that = (ConnectionDescription) o; + + if (maxWireVersion != that.maxWireVersion) { + return false; + } + if (maxBatchCount != that.maxBatchCount) { + return false; + } + if (maxDocumentSize != that.maxDocumentSize) { + return false; + } + if (maxMessageSize != that.maxMessageSize) { + return false; + } + if (!Objects.equals(serviceId, that.serviceId)) { + return false; + } + if (!connectionId.equals(that.connectionId)) { + return false; + } + if (serverType != that.serverType) { + return false; + } + if (!compressors.equals(that.compressors)) { + return false; + } + if (!Objects.equals(logicalSessionTimeoutMinutes, that.logicalSessionTimeoutMinutes)) { + return false; + } + return Objects.equals(saslSupportedMechanisms, that.saslSupportedMechanisms); + } + + @Override + public int hashCode() { + int result = connectionId.hashCode(); + result = 31 * result + maxWireVersion; + result = 31 * result + serverType.hashCode(); + result = 31 * result + maxBatchCount; + result = 31 * result + maxDocumentSize; + result = 31 * result + maxMessageSize; + result = 31 * result + compressors.hashCode(); + result = 31 * result + (serviceId != null ? serviceId.hashCode() : 0); + result = 31 * result + (saslSupportedMechanisms != null ? saslSupportedMechanisms.hashCode() : 0); + result = 31 * result + (logicalSessionTimeoutMinutes != null ? logicalSessionTimeoutMinutes.hashCode() : 0); + return result; + } + + @Override + public String toString() { + return "ConnectionDescription{" + + "connectionId=" + connectionId + + ", maxWireVersion=" + maxWireVersion + + ", serverType=" + serverType + + ", maxBatchCount=" + maxBatchCount + + ", maxDocumentSize=" + maxDocumentSize + + ", maxMessageSize=" + maxMessageSize + + ", compressors=" + compressors + + ", logicialSessionTimeoutMinutes=" + logicalSessionTimeoutMinutes + + ", serviceId=" + serviceId + + '}'; + } +} + diff --git a/driver-core/src/main/com/mongodb/connection/ConnectionId.java b/driver-core/src/main/com/mongodb/connection/ConnectionId.java new file mode 100644 index 00000000000..f634f2ab2e4 --- /dev/null +++ b/driver-core/src/main/com/mongodb/connection/ConnectionId.java @@ -0,0 +1,153 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.connection; + +import com.mongodb.annotations.Immutable; +import com.mongodb.lang.Nullable; + +import java.util.Objects; +import java.util.concurrent.atomic.AtomicLong; + +import static com.mongodb.assertions.Assertions.isTrue; +import static com.mongodb.assertions.Assertions.notNull; +import static java.lang.String.format; + +/** + * An immutable connection identifier of a connection to a MongoDB server. + * + *

Contains a locally created id and if available the MongoDB server created connection id

+ * + * @since 3.0 + */ +@Immutable +public final class ConnectionId { + private static final AtomicLong INCREMENTING_ID = new AtomicLong(); + + private final ServerId serverId; + private final long localValue; + @Nullable + private final Long serverValue; + private final String stringValue; + + /** + * Construct an instance with the given server id. + * + * @param serverId the server id + * @since 3.8 + */ + public ConnectionId(final ServerId serverId) { + this(serverId, INCREMENTING_ID.incrementAndGet(), null); + } + + /** + * Construct an instance with the given serverId, localValue, and serverValue. + * + *

+ * Useful for testing, but generally prefer {@link #withServerValue(long)} + *

+ * + * @param serverId the server id + * @param localValue the local value + * @param serverValue the server value, which may be null + * @see #withServerValue(long) + * @since 3.11 + */ + public ConnectionId(final ServerId serverId, final long localValue, @Nullable final Long serverValue) { + this.serverId = notNull("serverId", serverId); + this.localValue = localValue; + this.serverValue = serverValue; + if (serverValue == null) { + stringValue = format("connectionId{localValue:%s}", localValue); + } else { + stringValue = format("connectionId{localValue:%s, serverValue:%s}", localValue, serverValue); + } + } + + /** + * Creates a new connectionId with the set server value + * + * @param serverValue the server value + * @return the new connection id + * @since 3.8 + */ + public ConnectionId withServerValue(final long serverValue) { + isTrue("server value is null", this.serverValue == null); + return new ConnectionId(serverId, localValue, serverValue); + } + + /** + * Gets the server id. + * + * @return the server id + */ + public ServerId getServerId() { + return serverId; + } + + /** + * Gets the locally created id value for the connection + * + * @return the locally created id value for the connection + */ + public long getLocalValue() { + return localValue; + } + + /** + * Gets the server generated id value for the connection or null if not set. + * + * @return the server generated id value for the connection or null if not set. + */ + @Nullable + public Long getServerValue() { + return serverValue; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + ConnectionId that = (ConnectionId) o; + + if (localValue != that.localValue) { + return false; + } + if (!serverId.equals(that.serverId)) { + return false; + } + if (!Objects.equals(serverValue, that.serverValue)) { + return false; + } + + return true; + } + + @Override + public int hashCode() { + return Objects.hash(serverId, localValue, serverValue); + } + + @Override + public String toString() { + return stringValue; + } +} diff --git a/driver-core/src/main/com/mongodb/connection/ConnectionPoolSettings.java b/driver-core/src/main/com/mongodb/connection/ConnectionPoolSettings.java new file mode 100644 index 00000000000..4b85893640f --- /dev/null +++ b/driver-core/src/main/com/mongodb/connection/ConnectionPoolSettings.java @@ -0,0 +1,549 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.connection; + +import com.mongodb.ConnectionString; +import com.mongodb.annotations.Immutable; +import com.mongodb.annotations.NotThreadSafe; +import com.mongodb.event.ConnectionCheckOutStartedEvent; +import com.mongodb.event.ConnectionCheckedInEvent; +import com.mongodb.event.ConnectionCheckedOutEvent; +import com.mongodb.event.ConnectionCreatedEvent; +import com.mongodb.event.ConnectionPoolListener; +import com.mongodb.event.ConnectionReadyEvent; + +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.TimeUnit; + +import static com.mongodb.assertions.Assertions.isTrue; +import static com.mongodb.assertions.Assertions.notNull; +import static java.util.Collections.unmodifiableList; +import static java.util.concurrent.TimeUnit.MILLISECONDS; +import static java.util.concurrent.TimeUnit.MINUTES; + +/** + * All settings that relate to the pool of connections to a MongoDB server. + * + * @since 3.0 + */ +@Immutable +public class ConnectionPoolSettings { + private final List connectionPoolListeners; + private final int maxSize; + private final int minSize; + private final long maxWaitTimeMS; + private final long maxConnectionLifeTimeMS; + private final long maxConnectionIdleTimeMS; + private final long maintenanceInitialDelayMS; + private final long maintenanceFrequencyMS; + private final int maxConnecting; + + /** + * Gets a Builder for creating a new ConnectionPoolSettings instance. + * + * @return a new Builder for ConnectionPoolSettings. + */ + public static Builder builder() { + return new Builder(); + } + + /** + * Gets a Builder for creating a new ConnectionPoolSettings instance. + * + * @param connectionPoolSettings the existing connection pool settings to configure the builder with + * @return a new Builder for ConnectionPoolSettings + * @since 3.5 + */ + public static Builder builder(final ConnectionPoolSettings connectionPoolSettings) { + return builder().applySettings(connectionPoolSettings); + } + + /** + * A builder for creating ConnectionPoolSettings. + */ + @NotThreadSafe + public static final class Builder { + private List connectionPoolListeners = new ArrayList<>(); + private int maxSize = 100; + private int minSize; + private long maxWaitTimeMS = 1000 * 60 * 2; + private long maxConnectionLifeTimeMS; + private long maxConnectionIdleTimeMS; + private long maintenanceInitialDelayMS; + private long maintenanceFrequencyMS = MILLISECONDS.convert(1, MINUTES); + private int maxConnecting = 2; + + Builder() { + } + + /** + * Applies the connectionPoolSettings to the builder + * + *

Note: Overwrites all existing settings

+ * + * @param connectionPoolSettings the connectionPoolSettings + * @return this + * @since 3.7 + */ + public Builder applySettings(final ConnectionPoolSettings connectionPoolSettings) { + notNull("connectionPoolSettings", connectionPoolSettings); + connectionPoolListeners = new ArrayList<>(connectionPoolSettings.connectionPoolListeners); + maxSize = connectionPoolSettings.maxSize; + minSize = connectionPoolSettings.minSize; + maxWaitTimeMS = connectionPoolSettings.maxWaitTimeMS; + maxConnectionLifeTimeMS = connectionPoolSettings.maxConnectionLifeTimeMS; + maxConnectionIdleTimeMS = connectionPoolSettings.maxConnectionIdleTimeMS; + maintenanceInitialDelayMS = connectionPoolSettings.maintenanceInitialDelayMS; + maintenanceFrequencyMS = connectionPoolSettings.maintenanceFrequencyMS; + maxConnecting = connectionPoolSettings.maxConnecting; + return this; + } + + /** + *

The maximum number of connections allowed. Those connections will be kept in the pool when idle. Once the pool is exhausted, + * any operation requiring a connection will block waiting for an available connection.

+ * + *

Default is 100.

+ * + * @param maxSize the maximum number of connections in the pool; if 0, then there is no limit. + * @return this + * @see #getMaxSize() + * @see #getMaxWaitTime(TimeUnit) + */ + public Builder maxSize(final int maxSize) { + this.maxSize = maxSize; + return this; + } + + /** + *

The minimum number of connections. Those connections will be kept in the pool when idle, and the pool will ensure that it + * contains at least this minimum number.

+ * + *

Default is 0.

+ * + * @param minSize the minimum number of connections to have in the pool at all times. + * @return this + */ + public Builder minSize(final int minSize) { + this.minSize = minSize; + return this; + } + + /** + * The maximum duration to wait until either: + *
    + *
  • + * an {@linkplain ConnectionCheckedOutEvent in-use connection} becomes {@linkplain ConnectionCheckedInEvent available}; or + *
  • + *
  • + * a {@linkplain ConnectionCreatedEvent connection is created} and begins to be {@linkplain ConnectionReadyEvent established}. + * The time between {@linkplain ConnectionCheckOutStartedEvent requesting} a connection + * and it being created is limited by this maximum duration. + * The maximum time between it being created and {@linkplain ConnectionCheckedOutEvent successfully checked out}, + * which includes the time to {@linkplain ConnectionReadyEvent establish} the created connection, + * is affected by {@link SocketSettings#getConnectTimeout(TimeUnit)}, {@link SocketSettings#getReadTimeout(TimeUnit)} + * among others, and is not affected by this maximum duration. + *
  • + *
+ * The reasons it is not always possible to create and start establishing a connection + * whenever there is no available connection: + *
    + *
  • + * the number of connections per pool is limited by {@link #getMaxSize()}; + *
  • + *
  • + * the number of connections a pool may be establishing concurrently is limited by {@link #getMaxConnecting()}. + *
  • + *
+ * + *

Default is 2 minutes. A value of 0 means that it will not wait. A negative value means it will wait indefinitely.

+ * + * @param maxWaitTime the maximum amount of time to wait + * @param timeUnit the TimeUnit for this wait period + * @return this + * @see #getMaxWaitTime(TimeUnit) + */ + public Builder maxWaitTime(final long maxWaitTime, final TimeUnit timeUnit) { + this.maxWaitTimeMS = MILLISECONDS.convert(maxWaitTime, timeUnit); + return this; + } + + /** + * The maximum time a pooled connection can live for. A zero value indicates no limit to the life time. A pooled connection that + * has exceeded its life time will be closed and replaced when necessary by a new connection. + * + * @param maxConnectionLifeTime the maximum length of time a connection can live + * @param timeUnit the TimeUnit for this time period + * @return this + */ + public Builder maxConnectionLifeTime(final long maxConnectionLifeTime, final TimeUnit timeUnit) { + this.maxConnectionLifeTimeMS = MILLISECONDS.convert(maxConnectionLifeTime, timeUnit); + return this; + } + + /** + * The maximum idle time of a pooled connection. A zero value indicates no limit to the idle time. A pooled connection that has + * exceeded its idle time will be closed and replaced when necessary by a new connection. + * + * @param maxConnectionIdleTime the maximum time a connection can be unused + * @param timeUnit the TimeUnit for this time period + * @return this + */ + public Builder maxConnectionIdleTime(final long maxConnectionIdleTime, final TimeUnit timeUnit) { + this.maxConnectionIdleTimeMS = MILLISECONDS.convert(maxConnectionIdleTime, timeUnit); + return this; + } + + /** + * The period of time to wait before running the first maintenance job on the connection pool. + * + * @param maintenanceInitialDelay the time period to wait + * @param timeUnit the TimeUnit for this time period + * @return this + */ + public Builder maintenanceInitialDelay(final long maintenanceInitialDelay, final TimeUnit timeUnit) { + this.maintenanceInitialDelayMS = MILLISECONDS.convert(maintenanceInitialDelay, timeUnit); + return this; + } + + /** + * The time period between runs of the maintenance job. + * + * @param maintenanceFrequency the time period between runs of the maintenance job + * @param timeUnit the TimeUnit for this time period + * @return this + */ + public Builder maintenanceFrequency(final long maintenanceFrequency, final TimeUnit timeUnit) { + this.maintenanceFrequencyMS = MILLISECONDS.convert(maintenanceFrequency, timeUnit); + return this; + } + + /** + * Adds the given connection pool listener. + * + * @param connectionPoolListener the non-null connection pool listener + * @return this + * @since 3.5 + */ + public Builder addConnectionPoolListener(final ConnectionPoolListener connectionPoolListener) { + connectionPoolListeners.add(notNull("connectionPoolListener", connectionPoolListener)); + return this; + } + + /** + * Sets the connection pool listeners. + * + * @param connectionPoolListeners list of connection pool listeners + * @return this + * @since 4.5 + */ + public Builder connectionPoolListenerList(final List connectionPoolListeners) { + notNull("connectionPoolListeners", connectionPoolListeners); + this.connectionPoolListeners = new ArrayList<>(connectionPoolListeners); + return this; + } + + /** + * The maximum number of connections a pool may be establishing concurrently. + * + * @param maxConnecting The maximum number of connections a pool may be establishing concurrently. Must be positive. + * @return {@code this}. + * @see ConnectionPoolSettings#getMaxConnecting() + * @see #getMaxWaitTime(TimeUnit) + * @since 4.4 + */ + public Builder maxConnecting(final int maxConnecting) { + this.maxConnecting = maxConnecting; + return this; + } + + /** + * Creates a new ConnectionPoolSettings object with the settings initialised on this builder. + * + * @return a new ConnectionPoolSettings object + */ + public ConnectionPoolSettings build() { + return new ConnectionPoolSettings(this); + } + + /** + * Takes the settings from the given {@code ConnectionString} and applies them to the builder + * + * @param connectionString the connection string containing details of how to connect to MongoDB + * @return this + */ + public Builder applyConnectionString(final ConnectionString connectionString) { + Integer maxConnectionPoolSize = connectionString.getMaxConnectionPoolSize(); + if (maxConnectionPoolSize != null) { + maxSize(maxConnectionPoolSize); + } + + Integer minConnectionPoolSize = connectionString.getMinConnectionPoolSize(); + if (minConnectionPoolSize != null) { + minSize(minConnectionPoolSize); + } + + Integer maxWaitTime = connectionString.getMaxWaitTime(); + if (maxWaitTime != null) { + maxWaitTime(maxWaitTime, MILLISECONDS); + } + + Integer maxConnectionIdleTime = connectionString.getMaxConnectionIdleTime(); + if (maxConnectionIdleTime != null) { + maxConnectionIdleTime(maxConnectionIdleTime, MILLISECONDS); + } + + Integer maxConnectionLifeTime = connectionString.getMaxConnectionLifeTime(); + if (maxConnectionLifeTime != null) { + maxConnectionLifeTime(maxConnectionLifeTime, MILLISECONDS); + } + + Integer maxConnecting = connectionString.getMaxConnecting(); + if (maxConnecting != null) { + maxConnecting(maxConnecting); + } + + return this; + } + } + + /** + *

The maximum number of connections allowed. Those connections will be kept in the pool when idle. Once the pool is exhausted, any + * operation requiring a connection will block waiting for an available connection.

+ * + *

Default is 100.

+ * + * @return the maximum number of connections in the pool; if 0, then there is no limit. + * @see Builder#maxSize(int) + * @see ConnectionString#getMaxConnectionPoolSize() + * @see #getMaxWaitTime(TimeUnit) + */ + public int getMaxSize() { + return maxSize; + } + + /** + *

The minimum number of connections. Those connections will be kept in the pool when idle, and the pool will ensure that it contains + * at least this minimum number.

+ * + *

Default is 0.

+ * + * @return the minimum number of connections to have in the pool at all times. + */ + public int getMinSize() { + return minSize; + } + + /** + * The maximum duration to wait until either: + *
    + *
  • + * an {@linkplain ConnectionCheckedOutEvent in-use connection} becomes {@linkplain ConnectionCheckedInEvent available}; or + *
  • + *
  • + * a {@linkplain ConnectionCreatedEvent connection is created} and begins to be {@linkplain ConnectionReadyEvent established}. + * The time between {@linkplain ConnectionCheckOutStartedEvent requesting} a connection + * and it being created is limited by this maximum duration. + * The maximum time between it being created and {@linkplain ConnectionCheckedOutEvent successfully checked out}, + * which includes the time to {@linkplain ConnectionReadyEvent establish} the created connection, + * is affected by {@link SocketSettings#getConnectTimeout(TimeUnit)}, {@link SocketSettings#getReadTimeout(TimeUnit)} + * among others, and is not affected by this maximum duration. + *
  • + *
+ * The reasons it is not always possible to create and start establishing a connection + * whenever there is no available connection: + *
    + *
  • + * the number of connections per pool is limited by {@link #getMaxSize()}; + *
  • + *
  • + * the number of connections a pool may be establishing concurrently is limited by {@link #getMaxConnecting()}. + *
  • + *
+ * + *

Default is 2 minutes. A value of 0 means that it will not wait. A negative value means it will wait indefinitely.

+ * + * @param timeUnit the TimeUnit for this wait period + * @return the maximum amount of time to wait in the given TimeUnits + * @see Builder#maxWaitTime(long, TimeUnit) + * @see ConnectionString#getMaxWaitTime() + */ + public long getMaxWaitTime(final TimeUnit timeUnit) { + return timeUnit.convert(maxWaitTimeMS, MILLISECONDS); + } + + /** + * The maximum time a pooled connection can live for. A zero value indicates no limit to the life time. A pooled connection that has + * exceeded its life time will be closed and replaced when necessary by a new connection. + * + * @param timeUnit the TimeUnit to use for this time period + * @return the maximum length of time a connection can live in the given TimeUnits + */ + public long getMaxConnectionLifeTime(final TimeUnit timeUnit) { + return timeUnit.convert(maxConnectionLifeTimeMS, MILLISECONDS); + } + + /** + * Returns the maximum idle time of a pooled connection. A zero value indicates no limit to the idle time. A pooled connection that + * has exceeded its idle time will be closed and replaced when necessary by a new connection. + * + * @param timeUnit the TimeUnit to use for this time period + * @return the maximum time a connection can be unused, in the given TimeUnits + */ + public long getMaxConnectionIdleTime(final TimeUnit timeUnit) { + return timeUnit.convert(maxConnectionIdleTimeMS, MILLISECONDS); + } + + /** + * Returns the period of time to wait before running the first maintenance job on the connection pool. + * + * @param timeUnit the TimeUnit to use for this time period + * @return the time period to wait in the given units + */ + public long getMaintenanceInitialDelay(final TimeUnit timeUnit) { + return timeUnit.convert(maintenanceInitialDelayMS, MILLISECONDS); + } + + /** + * Returns the time period between runs of the maintenance job. + * + * @param timeUnit the TimeUnit to use for this time period + * @return the time period between runs of the maintenance job in the given units + */ + public long getMaintenanceFrequency(final TimeUnit timeUnit) { + return timeUnit.convert(maintenanceFrequencyMS, MILLISECONDS); + } + + /** + * Gets the list of added {@code ConnectionPoolListener}. The default is an empty list. + * + * @return the unmodifiable list of connection pool listeners + * @since 3.5 + */ + public List getConnectionPoolListeners() { + return connectionPoolListeners; + } + + /** + * The maximum number of connections a pool may be establishing concurrently. + * Establishment of a connection is a part of its life cycle + * starting after a {@link ConnectionCreatedEvent} and ending before a {@link ConnectionReadyEvent}. + *

+ * Default is 2.

+ * + * @return The maximum number of connections a pool may be establishing concurrently. + * @see Builder#maxConnecting(int) + * @see ConnectionString#getMaxConnecting() + * @see #getMaxWaitTime(TimeUnit) + * @since 4.4 + */ + public int getMaxConnecting() { + return maxConnecting; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + ConnectionPoolSettings that = (ConnectionPoolSettings) o; + + if (maxConnectionIdleTimeMS != that.maxConnectionIdleTimeMS) { + return false; + } + if (maxConnectionLifeTimeMS != that.maxConnectionLifeTimeMS) { + return false; + } + if (maxSize != that.maxSize) { + return false; + } + if (minSize != that.minSize) { + return false; + } + if (maintenanceInitialDelayMS != that.maintenanceInitialDelayMS) { + return false; + } + if (maintenanceFrequencyMS != that.maintenanceFrequencyMS) { + return false; + } + if (maxWaitTimeMS != that.maxWaitTimeMS) { + return false; + } + if (!connectionPoolListeners.equals(that.connectionPoolListeners)) { + return false; + } + if (maxConnecting != that.maxConnecting) { + return false; + } + return true; + } + + @Override + public int hashCode() { + int result = maxSize; + result = 31 * result + minSize; + result = 31 * result + (int) (maxWaitTimeMS ^ (maxWaitTimeMS >>> 32)); + result = 31 * result + (int) (maxConnectionLifeTimeMS ^ (maxConnectionLifeTimeMS >>> 32)); + result = 31 * result + (int) (maxConnectionIdleTimeMS ^ (maxConnectionIdleTimeMS >>> 32)); + result = 31 * result + (int) (maintenanceInitialDelayMS ^ (maintenanceInitialDelayMS >>> 32)); + result = 31 * result + (int) (maintenanceFrequencyMS ^ (maintenanceFrequencyMS >>> 32)); + result = 31 * result + connectionPoolListeners.hashCode(); + result = 31 * result + maxConnecting; + return result; + } + + @Override + public String toString() { + return "ConnectionPoolSettings{" + + "maxSize=" + maxSize + + ", minSize=" + minSize + + ", maxWaitTimeMS=" + maxWaitTimeMS + + ", maxConnectionLifeTimeMS=" + maxConnectionLifeTimeMS + + ", maxConnectionIdleTimeMS=" + maxConnectionIdleTimeMS + + ", maintenanceInitialDelayMS=" + maintenanceInitialDelayMS + + ", maintenanceFrequencyMS=" + maintenanceFrequencyMS + + ", connectionPoolListeners=" + connectionPoolListeners + + ", maxConnecting=" + maxConnecting + + '}'; + } + + ConnectionPoolSettings(final Builder builder) { + isTrue("maxSize >= 0", builder.maxSize >= 0); + isTrue("minSize >= 0", builder.minSize >= 0); + isTrue("maintenanceInitialDelayMS >= 0", builder.maintenanceInitialDelayMS >= 0); + isTrue("maxConnectionLifeTime >= 0", builder.maxConnectionLifeTimeMS >= 0); + isTrue("maxConnectionIdleTime >= 0", builder.maxConnectionIdleTimeMS >= 0); + isTrue("sizeMaintenanceFrequency > 0", builder.maintenanceFrequencyMS > 0); + isTrue("maxSize >= minSize", builder.maxSize >= builder.minSize); + isTrue("maxConnecting > 0", builder.maxConnecting > 0); + + maxSize = builder.maxSize; + minSize = builder.minSize; + maxWaitTimeMS = builder.maxWaitTimeMS; + maxConnectionLifeTimeMS = builder.maxConnectionLifeTimeMS; + maxConnectionIdleTimeMS = builder.maxConnectionIdleTimeMS; + maintenanceInitialDelayMS = builder.maintenanceInitialDelayMS; + maintenanceFrequencyMS = builder.maintenanceFrequencyMS; + connectionPoolListeners = unmodifiableList(builder.connectionPoolListeners); + maxConnecting = builder.maxConnecting; + } +} diff --git a/driver-core/src/main/com/mongodb/connection/NettyTransportSettings.java b/driver-core/src/main/com/mongodb/connection/NettyTransportSettings.java new file mode 100644 index 00000000000..cb3a7c7c090 --- /dev/null +++ b/driver-core/src/main/com/mongodb/connection/NettyTransportSettings.java @@ -0,0 +1,206 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.connection; + +import com.mongodb.lang.Nullable; +import io.netty.buffer.ByteBufAllocator; +import io.netty.channel.EventLoopGroup; +import io.netty.channel.socket.SocketChannel; +import io.netty.handler.ssl.ReferenceCountedOpenSslClientContext; +import io.netty.handler.ssl.SslContext; +import io.netty.handler.ssl.SslContextBuilder; +import io.netty.handler.ssl.SslProvider; + +import java.security.Security; + +import static com.mongodb.assertions.Assertions.isTrueArgument; +import static com.mongodb.assertions.Assertions.notNull; + +/** + * {@code TransportSettings} for a Netty-based transport implementation. + * Shallowly immutable. + * + * @since 4.11 + */ +public final class NettyTransportSettings extends TransportSettings { + + private final EventLoopGroup eventLoopGroup; + private final Class socketChannelClass; + private final ByteBufAllocator allocator; + private final SslContext sslContext; + + static Builder builder() { + return new Builder(); + } + + /** + * A builder for an instance of {@link NettyTransportSettings}. + */ + public static final class Builder { + private ByteBufAllocator allocator; + private Class socketChannelClass; + private EventLoopGroup eventLoopGroup; + private SslContext sslContext; + + private Builder() { + } + + /** + * Sets the allocator. + * + * @param allocator the allocator to use for ByteBuf instances + * @return this + * @see #getAllocator() + */ + public Builder allocator(final ByteBufAllocator allocator) { + this.allocator = notNull("allocator", allocator); + return this; + } + + /** + * Sets the socket channel class + * + * @param socketChannelClass the socket channel class + * @return this + * @see #getSocketChannelClass() + */ + public Builder socketChannelClass(final Class socketChannelClass) { + this.socketChannelClass = notNull("socketChannelClass", socketChannelClass); + return this; + } + + /** + * Sets the event loop group. + * + *

The application is responsible for shutting down the provided {@code eventLoopGroup}

+ * + * @param eventLoopGroup the event loop group that all channels created by this factory will be a part of + * @return this + * @see #getEventLoopGroup() + */ + public Builder eventLoopGroup(final EventLoopGroup eventLoopGroup) { + this.eventLoopGroup = notNull("eventLoopGroup", eventLoopGroup); + return this; + } + + /** + * Sets a {@linkplain SslContextBuilder#forClient() client-side} {@link SslContext io.netty.handler.ssl.SslContext}, + * which overrides the standard {@link SslSettings#getContext()}. + * By default, it is {@code null} and {@link SslSettings#getContext()} is at play. + *

+ * This option may be used as a convenient way to utilize + * OpenSSL as an alternative to the TLS/SSL protocol implementation in a JDK. + * To achieve this, specify {@link SslProvider#OPENSSL} TLS/SSL protocol provider via + * {@link SslContextBuilder#sslProvider(SslProvider)}. Note that doing so adds a runtime dependency on + * netty-tcnative, which you must satisfy. + *

+ * Notes: + *

    + *
  • Netty {@link SslContext} may not examine some + * {@linkplain Security security}/{@linkplain System#getProperties() system} properties that are used to + * + * customize JSSE. Therefore, instead of using them you may have to apply the equivalent configuration programmatically, + * if both the {@link SslContextBuilder} and the TLS/SSL protocol provider of choice support it. + *
  • + *
  • Only {@link SslProvider#JDK} and {@link SslProvider#OPENSSL} TLS/SSL protocol providers are supported. + *
  • + *
+ * + * @param sslContext The Netty {@link SslContext}, which must be created via {@linkplain SslContextBuilder#forClient()}. + * @return {@code this}. + * @see #getSslContext() + */ + public Builder sslContext(final SslContext sslContext) { + this.sslContext = notNull("sslContext", sslContext); + isTrueArgument("sslContext must be client-side", sslContext.isClient()); + isTrueArgument("sslContext must use either SslProvider.JDK or SslProvider.OPENSSL TLS/SSL protocol provider", + !(sslContext instanceof ReferenceCountedOpenSslClientContext)); + + return this; + } + + /** + * Build an instance of {@code NettyTransportSettings}. + * + * @return an instance of {@code NettyTransportSettings} + */ + public NettyTransportSettings build() { + return new NettyTransportSettings(this); + } + } + + /** + * Gets the event loop group. + * + * @return the event loop group + * @see Builder#eventLoopGroup(EventLoopGroup) + */ + @Nullable + public EventLoopGroup getEventLoopGroup() { + return eventLoopGroup; + } + + /** + * Gets the socket channel class. + * + * @return the socket channel class + * @see Builder#socketChannelClass(Class) + */ + @Nullable + public Class getSocketChannelClass() { + return socketChannelClass; + } + + /** + * Gets the allocator. + * + * @return the allocator + * @see Builder#allocator(ByteBufAllocator) + */ + @Nullable + public ByteBufAllocator getAllocator() { + return allocator; + } + + /** + * Gets the SSL Context. + * + * @return the SSL context + * @see Builder#sslContext(SslContext) + */ + @Nullable + public SslContext getSslContext() { + return sslContext; + } + + @Override + public String toString() { + return "NettyTransportSettings{" + + "eventLoopGroup=" + eventLoopGroup + + ", socketChannelClass=" + socketChannelClass + + ", allocator=" + allocator + + ", sslContext=" + sslContext + + '}'; + } + + private NettyTransportSettings(final Builder builder) { + allocator = builder.allocator; + socketChannelClass = builder.socketChannelClass; + eventLoopGroup = builder.eventLoopGroup; + sslContext = builder.sslContext; + } +} diff --git a/driver-core/src/main/com/mongodb/connection/ProxySettings.java b/driver-core/src/main/com/mongodb/connection/ProxySettings.java new file mode 100644 index 00000000000..494060c0f93 --- /dev/null +++ b/driver-core/src/main/com/mongodb/connection/ProxySettings.java @@ -0,0 +1,349 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.connection; + + +import com.mongodb.AutoEncryptionSettings; +import com.mongodb.ClientEncryptionSettings; +import com.mongodb.ConnectionString; +import com.mongodb.MongoClientSettings; +import com.mongodb.annotations.Immutable; +import com.mongodb.lang.Nullable; + +import java.nio.charset.StandardCharsets; +import java.util.Objects; + +import static com.mongodb.assertions.Assertions.isTrue; +import static com.mongodb.assertions.Assertions.isTrueArgument; +import static com.mongodb.assertions.Assertions.notNull; + +/** + * This setting is only applicable when communicating with a MongoDB server using the synchronous variant of {@code MongoClient}. + *

+ * This setting is furthermore ignored if: + *

    + *
  • the communication is via {@linkplain com.mongodb.UnixServerAddress Unix domain socket}.
  • + *
  • a {@link TransportSettings} is {@linkplain MongoClientSettings.Builder#transportSettings(TransportSettings)} + * configured}.
  • + *
+ * + * @see SocketSettings#getProxySettings() + * @see ClientEncryptionSettings#getKeyVaultMongoClientSettings() + * @see AutoEncryptionSettings#getKeyVaultMongoClientSettings() + * @since 4.11 + */ +@Immutable +public final class ProxySettings { + + private static final int DEFAULT_PORT = 1080; + @Nullable + private final String host; + + @Nullable + private final Integer port; + + @Nullable + private final String username; + @Nullable + private final String password; + + /** + * Creates a {@link Builder} for creating a new {@link ProxySettings} instance. + * + * @return a new {@link Builder} for {@link ProxySettings}. + */ + public static ProxySettings.Builder builder() { + return new ProxySettings.Builder(); + } + + /** + * Creates a {@link Builder} for creating a new {@link ProxySettings} instance. + * + * @param proxySettings existing {@link ProxySettings} to default the builder settings on. + * @return a new {@link Builder} for {@link ProxySettings}. + */ + public static ProxySettings.Builder builder(final ProxySettings proxySettings) { + return builder().applySettings(proxySettings); + } + + /** + * A builder for an instance of {@code ProxySettings}. + */ + public static final class Builder { + private String host; + private Integer port; + private String username; + private String password; + + private Builder() { + } + + /** + * Applies the provided {@link ProxySettings} to this builder instance. + * + *

+ * Note: This method overwrites all existing proxy settings previously configured in this builder. + * + * @param proxySettings The {@link ProxySettings} instance containing the proxy configuration to apply. + * @return This {@link ProxySettings.Builder} instance with the updated proxy settings applied. + * @throws IllegalArgumentException If the provided {@link ProxySettings} instance is null. + */ + public ProxySettings.Builder applySettings(final ProxySettings proxySettings) { + notNull("ProxySettings", proxySettings); + this.host = proxySettings.host; + this.port = proxySettings.port; + this.username = proxySettings.username; + this.password = proxySettings.password; + return this; + } + + /** + * Sets the SOCKS5 proxy host to establish a connection through. + * + *

The host can be specified as an IPv4 address (e.g., "192.168.1.1"), + * an IPv6 address (e.g., "2001:0db8:85a3:0000:0000:8a2e:0370:7334"), + * or a domain name (e.g., "proxy.example.com").

+ * + * @param host The SOCKS5 proxy host to set. + * @return This ProxySettings.Builder instance, configured with the specified proxy host. + * @throws IllegalArgumentException If the provided host is null or empty after trimming. + * @see ProxySettings.Builder#port(int) + * @see #getHost() + */ + public ProxySettings.Builder host(final String host) { + notNull("proxyHost", host); + isTrueArgument("proxyHost is not empty", host.trim().length() > 0); + this.host = host; + return this; + } + + /** + * Sets the port number for the SOCKS5 proxy server. The port should be a non-negative integer + * representing the port through which the SOCKS5 proxy connection will be established. + *

+ * If a port is specified via this method, a corresponding host must be provided using the {@link #host(String)} method. + *

+ * If no port is provided, the default port 1080 will be used. + * + * @param port The port number to set for the SOCKS5 proxy server. + * @return This ProxySettings.Builder instance, configured with the specified proxy port. + * @throws IllegalArgumentException If the provided port is negative. + * @see ProxySettings.Builder#host(String) + * @see #getPort() + */ + public ProxySettings.Builder port(final int port) { + isTrueArgument("proxyPort is within the valid range (0 to 65535)", port >= 0 && port <= 65535); + this.port = port; + return this; + } + + /** + * Sets the username for authenticating with the SOCKS5 proxy server. + * The provided username should not be empty or null. + *

+ * If a username is specified, the corresponding password and proxy host must also be specified using the + * {@link #password(String)} and {@link #host(String)} methods, respectively. + * + * @param username The username to set for proxy authentication. + * @return This ProxySettings.Builder instance, configured with the specified username. + * @throws IllegalArgumentException If the provided username is empty or null. + * @see ProxySettings.Builder#password(String) + * @see ProxySettings.Builder#host(String) + * @see #getUsername() + */ + public ProxySettings.Builder username(final String username) { + notNull("username", username); + isTrueArgument("username is not empty", !username.isEmpty()); + isTrueArgument("username's length in bytes is not greater than 255", + username.getBytes(StandardCharsets.UTF_8).length <= 255); + this.username = username; + return this; + } + + /** + * Sets the password for authenticating with the SOCKS5 proxy server. + * The provided password should not be empty or null. + *

+ * If a password is specified, the corresponding username and proxy host must also be specified using the + * {@link #username(String)} and {@link #host(String)} methods, respectively. + * + * @param password The password to set for proxy authentication. + * @return This ProxySettings.Builder instance, configured with the specified password. + * @throws IllegalArgumentException If the provided password is empty or null. + * @see ProxySettings.Builder#username(String) + * @see ProxySettings.Builder#host(String) + * @see #getPassword() + */ + public ProxySettings.Builder password(final String password) { + notNull("password", password); + isTrueArgument("password is not empty", !password.isEmpty()); + isTrueArgument("password's length in bytes is not greater than 255", + password.getBytes(StandardCharsets.UTF_8).length <= 255); + this.password = password; + return this; + } + + + /** + * Takes the proxy settings from the given {@code ConnectionString} and applies them to the {@link Builder}. + * + * @param connectionString the connection string containing details of how to connect to proxy server. + * @return this. + * @see ConnectionString#getProxyHost() + * @see ConnectionString#getProxyPort() + * @see ConnectionString#getProxyUsername() + * @see ConnectionString#getProxyPassword() + */ + public ProxySettings.Builder applyConnectionString(final ConnectionString connectionString) { + String proxyHost = connectionString.getProxyHost(); + if (proxyHost != null) { + this.host(proxyHost); + } + + Integer proxyPort = connectionString.getProxyPort(); + if (proxyPort != null) { + this.port(proxyPort); + } + + String proxyUsername = connectionString.getProxyUsername(); + if (proxyUsername != null) { + this.username(proxyUsername); + } + + String proxyPassword = connectionString.getProxyPassword(); + if (proxyPassword != null) { + this.password(proxyPassword); + } + + return this; + } + + /** + * Build an instance of {@code ProxySettings}. + * + * @return the {@link ProxySettings}. + */ + public ProxySettings build() { + return new ProxySettings(this); + } + } + + /** + * Gets the SOCKS5 proxy host. + * + * @return the proxy host value. {@code null} if and only if the {@linkplain #isProxyEnabled() proxy functionality is not enabled}. + * @see Builder#host(String) + */ + @Nullable + public String getHost() { + return host; + } + + /** + * Gets the SOCKS5 proxy port. + * + * @return The port number of the SOCKS5 proxy. If a custom port has been set using {@link Builder#port(int)}, + * that custom port value is returned. Otherwise, the default SOCKS5 port {@value #DEFAULT_PORT} is returned. + * @see Builder#port(int) + */ + public int getPort() { + if (port != null) { + return port; + } + return DEFAULT_PORT; + } + + /** + * Gets the SOCKS5 proxy username. + * + * @return the proxy username value. + * @see Builder#username(String) + */ + @Nullable + public String getUsername() { + return username; + } + + /** + * Gets the SOCKS5 proxy password. + * + * @return the proxy password value. + * @see Builder#password(String) + */ + @Nullable + public String getPassword() { + return password; + } + + /** + * Checks if the SOCKS5 proxy is enabled. + * + * @return {@code true} if the proxy is enabled, {@code false} otherwise. + * @see Builder#host(String) + */ + public boolean isProxyEnabled() { + return host != null; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + final ProxySettings that = (ProxySettings) o; + return Objects.equals(host, that.host) + && Objects.equals(port, that.port) + && Objects.equals(username, that.username) + && Objects.equals(password, that.password); + } + + @Override + public int hashCode() { + return Objects.hash(host, port, username, password); + } + + @Override + public String toString() { + return "ProxySettings{" + + "host=" + host + + ", port=" + port + + ", username=" + username + + ", password=" + password + + '}'; + } + + private ProxySettings(final ProxySettings.Builder builder) { + if (builder.host == null) { + isTrue("proxyPort can only be specified with proxyHost", + builder.port == null); + isTrue("proxyPassword can only be specified with proxyHost", + builder.password == null); + isTrue("proxyUsername can only be specified with proxyHost", + builder.username == null); + } + isTrue("Both proxyUsername and proxyPassword must be set together. They cannot be set individually", + (builder.username == null) == (builder.password == null)); + + this.host = builder.host; + this.port = builder.port; + this.username = builder.username; + this.password = builder.password; + } +} + diff --git a/driver-core/src/main/com/mongodb/connection/ServerConnectionState.java b/driver-core/src/main/com/mongodb/connection/ServerConnectionState.java new file mode 100644 index 00000000000..c626b7d4c6a --- /dev/null +++ b/driver-core/src/main/com/mongodb/connection/ServerConnectionState.java @@ -0,0 +1,34 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.connection; + +/** + * Enum of the current state of attempting to connect to a server. + * + * @since 2.12 + */ +public enum ServerConnectionState { + /** + * The application is actively attempting to connect to the remote server. + */ + CONNECTING, + + /** + * The application is connected to the remote server. + */ + CONNECTED +} diff --git a/driver-core/src/main/com/mongodb/connection/ServerDescription.java b/driver-core/src/main/com/mongodb/connection/ServerDescription.java new file mode 100644 index 00000000000..2d675bce217 --- /dev/null +++ b/driver-core/src/main/com/mongodb/connection/ServerDescription.java @@ -0,0 +1,1108 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.connection; + +import com.mongodb.ServerAddress; +import com.mongodb.TagSet; +import com.mongodb.annotations.Alpha; +import com.mongodb.annotations.Immutable; +import com.mongodb.annotations.NotThreadSafe; +import com.mongodb.annotations.Reason; +import com.mongodb.internal.connection.DecimalFormatHelper; +import com.mongodb.internal.connection.Time; +import com.mongodb.lang.Nullable; +import org.bson.types.ObjectId; + +import java.util.Collections; +import java.util.Date; +import java.util.HashSet; +import java.util.Objects; +import java.util.Set; +import java.util.concurrent.TimeUnit; + +import static com.mongodb.assertions.Assertions.notNull; +import static com.mongodb.connection.ServerConnectionState.CONNECTED; +import static com.mongodb.connection.ServerType.LOAD_BALANCER; +import static com.mongodb.connection.ServerType.REPLICA_SET_PRIMARY; +import static com.mongodb.connection.ServerType.REPLICA_SET_SECONDARY; +import static com.mongodb.connection.ServerType.SHARD_ROUTER; +import static com.mongodb.connection.ServerType.STANDALONE; +import static com.mongodb.connection.ServerType.UNKNOWN; + +/** + * Immutable snapshot state of a server. + * + * @since 3.0 + */ +@Immutable +public class ServerDescription { + + /** + * The minimum supported driver server version + * @since 3.8 + */ + public static final String MIN_DRIVER_SERVER_VERSION = "4.2"; + /** + * The minimum supported driver wire version + * @since 3.8 + */ + public static final int MIN_DRIVER_WIRE_VERSION = 8; + /** + * The maximum supported driver wire version + * @since 3.8 + */ + public static final int MAX_DRIVER_WIRE_VERSION = 25; + + private static final int DEFAULT_MAX_DOCUMENT_SIZE = 0x1000000; // 16MB + + private final ServerAddress address; + + private final ServerType type; + /** + * Identifies whether the server is a mongocryptd. + */ + private final boolean cryptd; + private final String canonicalAddress; + private final Set hosts; + private final Set passives; + private final Set arbiters; + private final String primary; + private final int maxDocumentSize; + private final TagSet tagSet; + private final String setName; + private final long roundTripTimeNanos; + private final long minRoundTripTimeNanos; + private final boolean ok; + private final ServerConnectionState state; + + private final int minWireVersion; + private final int maxWireVersion; + + private final ObjectId electionId; + private final Integer setVersion; + private final TopologyVersion topologyVersion; + + private final Date lastWriteDate; + private final long lastUpdateTimeNanos; + + private final Integer logicalSessionTimeoutMinutes; + + private final Throwable exception; + private final boolean helloOk; + + /** + * Gets a Builder for creating a new ServerDescription instance. + * + * @return a new Builder for ServerDescription. + */ + public static Builder builder() { + return new Builder(); + } + + /** + * Creates a Builder for a ServerDescription instance based on a previous serverDescription. + * + * @param serverDescription the ServerDescription to base the builder from + * @return a new Builder for ServerDescription. + * @since 4.1 + */ + public static Builder builder(final ServerDescription serverDescription) { + return new Builder(serverDescription); + } + + /** + * Gets the string representing the host name and port that this member of a replica set was configured with, + * e.g. {@code "somehost:27019"}. This is typically derived from the "me" field from the "hello" command response. + * + * @return the host name and port that this replica set member is configured with. + */ + @Nullable + public String getCanonicalAddress() { + return canonicalAddress; + } + + /** + * Gets the session timeout in minutes. + * + * @return the session timeout in minutes, or null if sessions are not supported by this server + * @mongodb.server.release 3.6 + * @since 3.6 + */ + @Nullable + public Integer getLogicalSessionTimeoutMinutes() { + return logicalSessionTimeoutMinutes; + } + + + /** + * Gets whether this server supports the "hello" command. The default is {@code false}. + * + * @return true if this server supports the "hello" command. + * @mongodb.server.release 5.0 + * @since 4.3 + */ + public boolean isHelloOk() { + return helloOk; + } + + /** + * A builder for creating ServerDescription. + */ + @NotThreadSafe + public static class Builder { + private ServerAddress address; + private ServerType type = UNKNOWN; + private boolean cryptd = false; + private String canonicalAddress; + private Set hosts = Collections.emptySet(); + private Set passives = Collections.emptySet(); + private Set arbiters = Collections.emptySet(); + private String primary; + private int maxDocumentSize = DEFAULT_MAX_DOCUMENT_SIZE; + private TagSet tagSet = new TagSet(); + private String setName; + private long roundTripTimeNanos; + private long minRoundTripTimeNanos; + private boolean ok; + private ServerConnectionState state; + private int minWireVersion = 0; + private int maxWireVersion = 0; + private ObjectId electionId; + private Integer setVersion; + private TopologyVersion topologyVersion; + private Date lastWriteDate; + private long lastUpdateTimeNanos = Time.nanoTime(); + private Integer logicalSessionTimeoutMinutes; + private boolean helloOk; + + private Throwable exception; + + Builder() { + } + + Builder(final ServerDescription serverDescription) { + this.address = serverDescription.address; + this.type = serverDescription.type; + this.cryptd = serverDescription.cryptd; + this.canonicalAddress = serverDescription.canonicalAddress; + this.hosts = serverDescription.hosts; + this.passives = serverDescription.passives; + this.arbiters = serverDescription.arbiters; + this.primary = serverDescription.primary; + this.maxDocumentSize = serverDescription.maxDocumentSize; + this.tagSet = serverDescription.tagSet; + this.setName = serverDescription.setName; + this.roundTripTimeNanos = serverDescription.roundTripTimeNanos; + this.ok = serverDescription.ok; + this.state = serverDescription.state; + this.minWireVersion = serverDescription.minWireVersion; + this.maxWireVersion = serverDescription.maxWireVersion; + this.electionId = serverDescription.electionId; + this.setVersion = serverDescription.setVersion; + this.topologyVersion = serverDescription.topologyVersion; + this.lastWriteDate = serverDescription.lastWriteDate; + this.lastUpdateTimeNanos = serverDescription.lastUpdateTimeNanos; + this.logicalSessionTimeoutMinutes = serverDescription.logicalSessionTimeoutMinutes; + this.exception = serverDescription.exception; + } + + /** + * Sets the address of the server. + * + * @param address the address of the server + * @return this + */ + public Builder address(final ServerAddress address) { + this.address = address; + return this; + } + + /** + * Sets the canonical host name and port of this server. This is typically derived from the "me" field contained in the "hello" + * command. response. + * + * @param canonicalAddress the host name and port as a string + * + * @return this + */ + public Builder canonicalAddress(@Nullable final String canonicalAddress) { + this.canonicalAddress = canonicalAddress; + return this; + } + + /** + * Sets the type of the server, for example whether it's a standalone or in a replica set. + * + * @param type the Server type + * @return this + */ + public Builder type(final ServerType type) { + this.type = notNull("type", type); + return this; + } + + /** + * Sets whether this server is a mongocryptd. + * + * @param cryptd true if this server is a mongocryptd. + * @return this + */ + public Builder cryptd(final boolean cryptd) { + this.cryptd = cryptd; + return this; + } + + /** + * Sets all members of the replica set that are neither hidden, passive, nor arbiters. + * + * @param hosts A Set of strings in the format of "[hostname]:[port]" that contains all members of the replica set that are neither + * hidden, passive, nor arbiters. + * @return this + */ + public Builder hosts(@Nullable final Set hosts) { + this.hosts = hosts == null ? Collections.emptySet() : Collections.unmodifiableSet(new HashSet<>(hosts)); + return this; + } + + /** + * Sets the passive members of the replica set. + * + * @param passives A Set of strings in the format of "[hostname]:[port]" listing all members of the replica set which have a + * priority of 0. + * @return this + */ + public Builder passives(@Nullable final Set passives) { + this.passives = passives == null ? Collections.emptySet() : Collections.unmodifiableSet(new HashSet<>(passives)); + return this; + } + + /** + * Sets the arbiters in the replica set + * + * @param arbiters A Set of strings in the format of "[hostname]:[port]" containing all members of the replica set that are + * arbiters. + * @return this + */ + public Builder arbiters(@Nullable final Set arbiters) { + this.arbiters = arbiters == null ? Collections.emptySet() : Collections.unmodifiableSet(new HashSet<>(arbiters)); + return this; + } + + /** + * Sets the address of the current primary in the replica set + * + * @param primary A string in the format of "[hostname]:[port]" listing the current primary member of the replica set. + * @return this + */ + public Builder primary(@Nullable final String primary) { + this.primary = primary; + return this; + } + + /** + * The maximum permitted size of a BSON object in bytes for this mongod process. Defaults to 16MB. + * + * @param maxDocumentSize the maximum size a document can be + * @return this + */ + public Builder maxDocumentSize(final int maxDocumentSize) { + this.maxDocumentSize = maxDocumentSize; + return this; + } + + /** + * A set of any tags assigned to this member. + * + * @param tagSet a TagSet with all the tags for this server. + * @return this + */ + public Builder tagSet(@Nullable final TagSet tagSet) { + this.tagSet = tagSet == null ? new TagSet() : tagSet; + return this; + } + + /** + * Set the weighted average time it took to make the round trip for requesting this information from the server + * + * @param roundTripTime the time taken + * @param timeUnit the units of the time taken + * @return this + */ + public Builder roundTripTime(final long roundTripTime, final TimeUnit timeUnit) { + this.roundTripTimeNanos = timeUnit.toNanos(roundTripTime); + return this; + } + + + /** + * Set the recent min time it took to make the round trip for requesting this information from the server + * + * @param minRoundTripTime the minimum time taken + * @param timeUnit the units of the time taken + * @return this + * @since 5.2 + */ + @Alpha(Reason.CLIENT) + public Builder minRoundTripTime(final long minRoundTripTime, final TimeUnit timeUnit) { + this.minRoundTripTimeNanos = timeUnit.toNanos(minRoundTripTime); + return this; + } + + /** + * Sets the name of the replica set + * + * @param setName the name of the replica set + * @return this + */ + public Builder setName(@Nullable final String setName) { + this.setName = setName; + return this; + } + + /** + * The isOK() result from requesting this information from MongoDB + * + * @param ok true if the request executed correctly + * @return this + */ + public Builder ok(final boolean ok) { + this.ok = ok; + return this; + } + + /** + * The current state of the connection to the server. + * + * @param state ServerConnectionState representing whether the server has been successfully connected to + * @return this + */ + public Builder state(final ServerConnectionState state) { + this.state = state; + return this; + } + + /** + * The earliest version of the wire protocol that this MongoDB server is capable of using to communicate with clients. + * + * @param minWireVersion the minimum protocol version supported by this server + * @return this + */ + public Builder minWireVersion(final int minWireVersion) { + this.minWireVersion = minWireVersion; + return this; + } + + /** + * The latest version of the wire protocol that this MongoDB server is capable of using to communicate with clients. + * + * @param maxWireVersion the maximum protocol version supported by this server + * @return this + */ + public Builder maxWireVersion(final int maxWireVersion) { + this.maxWireVersion = maxWireVersion; + return this; + } + + /** + * Sets the electionId reported by this server. + * + * @param electionId the electionId + * @return this + */ + public Builder electionId(@Nullable final ObjectId electionId) { + this.electionId = electionId; + return this; + } + + /** + * Sets the setVersion reported by this server. + * + * @param setVersion the set version + * @return this + */ + public Builder setVersion(@Nullable final Integer setVersion) { + this.setVersion = setVersion; + return this; + } + + /** + * Sets the topologyVersion reported by this server. + * + * @param topologyVersion the topology version + * @return this + * @since 4.1 + * @mongodb.server.release 4.4 + */ + public Builder topologyVersion(@Nullable final TopologyVersion topologyVersion) { + this.topologyVersion = topologyVersion; + return this; + } + + /** + * Sets the lastWriteDate reported by this server + * + * @param lastWriteDate the last write date, which may be null for servers prior to 3.4 + * @return this + * + * @since 3.4 + * @mongodb.server.release 3.4 + */ + public Builder lastWriteDate(@Nullable final Date lastWriteDate) { + this.lastWriteDate = lastWriteDate; + return this; + } + + /** + * Sets the last update time for this description, which is simply the time that the server description was created. + * A monotonic clock such as {@link System#nanoTime()} should be used to initialize this value. + * + * @param lastUpdateTimeNanos the last update time of this server description + * @return this + * + * @since 3.4 + */ + public Builder lastUpdateTimeNanos(final long lastUpdateTimeNanos) { + this.lastUpdateTimeNanos = lastUpdateTimeNanos; + return this; + } + + /** + * Sets the session timeout in minutes. + * + * @param logicalSessionTimeoutMinutes the session timeout in minutes, or null if sessions are not supported by this server + * @return this + * @mongodb.server.release 3.6 + * @since 3.6 + */ + public Builder logicalSessionTimeoutMinutes(@Nullable final Integer logicalSessionTimeoutMinutes) { + this.logicalSessionTimeoutMinutes = logicalSessionTimeoutMinutes; + return this; + } + + /** + * Sets whether this server supports the "hello" command. The default is {@code false}. + * + * @param helloOk helloOk + * @return this + * @mongodb.server.release 5.0 + * @since 4.3 + */ + public Builder helloOk(final boolean helloOk) { + this.helloOk = helloOk; + return this; + } + + /** + * Sets the exception thrown while attempting to determine the server description. + * + * @param exception the exception + * @return this + */ + public Builder exception(final Throwable exception) { + this.exception = exception; + return this; + } + + /** + * Create a new ServerDescription from the settings in this builder. + * + * @return a new server description + */ + public ServerDescription build() { + return new ServerDescription(this); + } + } + + /** + * Return whether the server is compatible with the driver. An incompatible server is one that has a min wire version greater that the + * driver's max wire version or a max wire version less than the driver's min wire version. + * + *

+ * A load balancer is always deemed to be compatible. + *

+ * + * @return true if the server is compatible with the driver. + */ + public boolean isCompatibleWithDriver() { + if (type == LOAD_BALANCER) { + return true; + } + + if (isIncompatiblyOlderThanDriver()) { + return false; + } + + if (isIncompatiblyNewerThanDriver()) { + return false; + } + + return true; + } + + /** + * Return whether the server is compatible with the driver. An incompatible server is one that has a min wire version greater that the + * driver's max wire version or a max wire version less than the driver's min wire version. + * + *

+ * A load balancer is always deemed to be compatible. + *

+ * + * @return true if the server is compatible with the driver. + * @since 3.6 + */ + public boolean isIncompatiblyNewerThanDriver() { + return ok && type != LOAD_BALANCER && minWireVersion > MAX_DRIVER_WIRE_VERSION; + } + + /** + * Return whether the server is compatible with the driver. An incompatible server is one that has a min wire version greater that the + * driver's max wire version or a max wire version less than the driver's min wire version. + * + *

+ * A load balancer is always deemed to be compatible. + *

+ * + * @return true if the server is compatible with the driver. + * @since 3.6 + */ + public boolean isIncompatiblyOlderThanDriver() { + return ok && type != LOAD_BALANCER && maxWireVersion < MIN_DRIVER_WIRE_VERSION; + } + + /** + * Get the default maximum document size. + * + * @return the default maximum document size + */ + public static int getDefaultMaxDocumentSize() { + return DEFAULT_MAX_DOCUMENT_SIZE; + } + + /** + * Get the default minimum wire version + * + * @return the default minimum wire version + */ + public static int getDefaultMinWireVersion() { + return 0; + } + + /** + * Get the default maximum wire version + * + * @return the default maximum wire version + */ + public static int getDefaultMaxWireVersion() { + return 0; + } + + /** + * Gets the address of this server + * + * @return a ServerAddress containing the details of the address of this server. + */ + public ServerAddress getAddress() { + return address; + } + + /** + * Gets whether this server is a replica set member. + * + * @return true if this server is part of a replica set + */ + public boolean isReplicaSetMember() { + return type.getClusterType() == ClusterType.REPLICA_SET; + } + + /** + * Gets whether this is a server that is the entry point to a sharded instance of MongoDB. + * + * @return true if this server is a mongos instance + */ + public boolean isShardRouter() { + return type == SHARD_ROUTER; + } + + /** + * Gets whether this is part of a replica set/sharded system, or is a single server. + * + * @return true if this is a single server + */ + public boolean isStandAlone() { + return type == STANDALONE; + } + + /** + * Returns whether this can be treated as a primary server. + * + * @return true if this server is the primary in a replica set, is a mongos, a load balancer, or is a single standalone server + */ + public boolean isPrimary() { + return ok && (type == REPLICA_SET_PRIMARY || type == SHARD_ROUTER || type == STANDALONE || type == LOAD_BALANCER); + } + + /** + * Returns whether this can be treated as a secondary server. + * + * @return true if this server is a secondary in a replica set, is a mongos, a load balancer, or is a single standalone server + */ + public boolean isSecondary() { + return ok && (type == REPLICA_SET_SECONDARY || type == SHARD_ROUTER || type == STANDALONE || type == LOAD_BALANCER); + } + + /** + * Returns whether this server is mongocryptd. + * + * @return true if this server is a mongocryptd. + */ + public boolean isCryptd() { + return cryptd; + } + + /** + * Get a Set of strings in the format of "[hostname]:[port]" that contains all members of the replica set that are neither hidden, + * passive, nor arbiters. + * + * @return all members of the replica set that are neither hidden, passive, nor arbiters. + */ + public Set getHosts() { + return hosts; + } + + /** + * Gets the passive members of the replica set. + * + * @return A set of strings in the format of "[hostname]:[port]" listing all members of the replica set which have a priority of 0. + */ + public Set getPassives() { + return passives; + } + + /** + * Gets the arbiters in the replica set + * + * @return A Set of strings in the format of "[hostname]:[port]" containing all members of the replica set that are arbiters. + */ + public Set getArbiters() { + return arbiters; + } + + /** + * Gets the address of the current primary in the replica set + * + * @return A string in the format of "[hostname]:[port]" listing the current primary member of the replica set. + */ + @Nullable + public String getPrimary() { + return primary; + } + + /** + * The maximum permitted size of a BSON object in bytes for this mongod process. Defaults to 16MB. + * + * @return the maximum size a document can be + */ + public int getMaxDocumentSize() { + return maxDocumentSize; + } + + /** + * A set of all tags assigned to this member. + * + * @return a TagSet with all the tags for this server. + */ + public TagSet getTagSet() { + return tagSet; + } + + /** + * The earliest version of the wire protocol that this MongoDB server is capable of using to communicate with clients. + * + * @return the minimum protocol version supported by this server + */ + public int getMinWireVersion() { + return minWireVersion; + } + + /** + * The latest version of the wire protocol that this MongoDB server is capable of using to communicate with clients. + * + * @return the maximum protocol version supported by this server + */ + public int getMaxWireVersion() { + return maxWireVersion; + } + + /** + * The replica set electionid reported by this MongoDB server. + * + * @return the electionId, which may be null + */ + @Nullable + public ObjectId getElectionId() { + return electionId; + } + + /** + * The replica set setVersion reported by this MongoDB server. + * + * @return the setVersion, which may be null + */ + @Nullable + public Integer getSetVersion() { + return setVersion; + } + + /** + * The topologyVersion reported by this MongoDB server. + * + * @return the topologyVersion, which may be null + * @since 4.1 + * @mongodb.server.release 4.4 + */ + @Nullable + public TopologyVersion getTopologyVersion() { + return topologyVersion; + } + + /** + * Gets the last write date. + * @return the last write date, which may be null + * @since 3.4 + * @mongodb.server.release 3.4 + */ + @Nullable + public Date getLastWriteDate() { + return lastWriteDate; + } + + /** + * Gets the time that this server description was created, using a monotonic clock like {@link System#nanoTime()}. + * + * @param timeUnit the time unit + * @return the last update time in the given unit + * + * @since 3.4 + */ + public long getLastUpdateTime(final TimeUnit timeUnit) { + return timeUnit.convert(lastUpdateTimeNanos, TimeUnit.NANOSECONDS); + } + + /** + * Returns true if the server has the given tags. A server of either type {@code ServerType.STANDALONE} or {@code + * ServerType.SHARD_ROUTER} is considered to have all tags, so this method will always return true for instances of either of those + * types. + * + * @param desiredTags the tags + * @return true if this server has the given tags + */ + public boolean hasTags(final TagSet desiredTags) { + if (!ok) { + return false; + } + + if (type == STANDALONE || type == SHARD_ROUTER) { + return true; + } + + return tagSet.containsAll(desiredTags); + } + + /** + * Gets the name of the replica set + * + * @return the name of the replica set + */ + @Nullable + public String getSetName() { + return setName; + } + + /** + * The isOK() result from requesting this information from the server + * + * @return true if the request executed correctly + */ + public boolean isOk() { + return ok; + } + + /** + * Gets the current state of the connection to the server. + * + * @return ServerConnectionState representing whether the server has been successfully connected to + */ + public ServerConnectionState getState() { + return state; + } + + /** + * Gets the type of the server, for example whether it's a standalone or in a replica set. + * + * @return the server type + */ + public ServerType getType() { + return type; + } + + /** + * Gets the type of the cluster this server is in (for example, replica set). + * + * @return a ClusterType representing the type of the cluster this server is in + */ + public ClusterType getClusterType() { + return type.getClusterType(); + } + + /** + * Get the weighted average time it took to make the round trip for requesting this information from the server in nanoseconds. + * + * @return the time taken to request the information, in nano seconds + */ + public long getRoundTripTimeNanos() { + return roundTripTimeNanos; + } + + /** + * Get the recent min time it took to make the round trip for requesting this information from the server in nanoseconds. + * + * @return the recent min time taken to request the information, in nano seconds + * @since 5.2 + */ + @Alpha(Reason.CLIENT) + public long getMinRoundTripTimeNanos() { + return minRoundTripTimeNanos; + } + + /** + * Gets the exception thrown while attempting to determine the server description. This is useful for diagnostic purposed when + * determining the root cause of a connectivity failure. + * + * @return the exception, which may be null + */ + @Nullable + public Throwable getException() { + return exception; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + ServerDescription that = (ServerDescription) o; + + if (maxDocumentSize != that.maxDocumentSize) { + return false; + } + if (ok != that.ok) { + return false; + } + if (!address.equals(that.address)) { + return false; + } + if (!arbiters.equals(that.arbiters)) { + return false; + } + if (!Objects.equals(canonicalAddress, that.canonicalAddress)) { + return false; + } + if (!hosts.equals(that.hosts)) { + return false; + } + if (!passives.equals(that.passives)) { + return false; + } + if (!Objects.equals(primary, that.primary)) { + return false; + } + if (!Objects.equals(setName, that.setName)) { + return false; + } + if (state != that.state) { + return false; + } + if (!tagSet.equals(that.tagSet)) { + return false; + } + if (type != that.type) { + return false; + } + if (minWireVersion != that.minWireVersion) { + return false; + } + if (maxWireVersion != that.maxWireVersion) { + return false; + } + if (!Objects.equals(electionId, that.electionId)) { + return false; + } + if (!Objects.equals(setVersion, that.setVersion)) { + return false; + } + if (!Objects.equals(topologyVersion, that.topologyVersion)) { + return false; + } + + if (!Objects.equals(lastWriteDate, that.lastWriteDate)) { + return false; + } + + if (lastUpdateTimeNanos != that.lastUpdateTimeNanos) { + return false; + } + + if (!Objects.equals(logicalSessionTimeoutMinutes, that.logicalSessionTimeoutMinutes)) { + return false; + } + + if (helloOk != that.helloOk) { + return false; + } + + if (cryptd != that.cryptd) { + return false; + } + + // Compare class equality and message as exceptions rarely override equals + Class thisExceptionClass = exception != null ? exception.getClass() : null; + Class thatExceptionClass = that.exception != null ? that.exception.getClass() : null; + if (!Objects.equals(thisExceptionClass, thatExceptionClass)) { + return false; + } + + String thisExceptionMessage = exception != null ? exception.getMessage() : null; + String thatExceptionMessage = that.exception != null ? that.exception.getMessage() : null; + if (!Objects.equals(thisExceptionMessage, thatExceptionMessage)) { + return false; + } + + return true; + } + + @Override + public int hashCode() { + return Objects.hash(address, type, cryptd, canonicalAddress, hosts, passives, arbiters, primary, maxDocumentSize, tagSet, setName, + roundTripTimeNanos, minRoundTripTimeNanos, ok, state, minWireVersion, maxWireVersion, electionId, setVersion, + topologyVersion, lastWriteDate, lastUpdateTimeNanos, logicalSessionTimeoutMinutes, exception, helloOk); + } + + @Override + public String toString() { + return "ServerDescription{" + + "address=" + address + + ", type=" + type + + ", cryptd=" + cryptd + + ", state=" + state + + (state == CONNECTED + ? + ", ok=" + ok + + ", minWireVersion=" + minWireVersion + + ", maxWireVersion=" + maxWireVersion + + ", maxDocumentSize=" + maxDocumentSize + + ", logicalSessionTimeoutMinutes=" + logicalSessionTimeoutMinutes + + ", roundTripTimeNanos=" + roundTripTimeNanos + + ", minRoundTripTimeNanos=" + minRoundTripTimeNanos + : "") + + (isReplicaSetMember() + ? + ", setName='" + setName + '\'' + + ", canonicalAddress=" + canonicalAddress + + ", hosts=" + hosts + + ", passives=" + passives + + ", arbiters=" + arbiters + + ", primary='" + primary + '\'' + + ", tagSet=" + tagSet + + ", electionId=" + electionId + + ", setVersion=" + setVersion + + ", topologyVersion=" + topologyVersion + + ", lastWriteDate=" + lastWriteDate + + ", lastUpdateTimeNanos=" + lastUpdateTimeNanos + : "") + + (exception == null ? "" : ", exception=" + translateExceptionToString()) + + '}'; + } + + /** + * Returns a short, pretty description for this ServerDescription. + * + * @return a String containing the most pertinent information about this ServerDescription + */ + public String getShortDescription() { + return "{" + + "address=" + address + + ", type=" + type + + (!tagSet.iterator().hasNext() ? "" : ", " + tagSet) + + (state == CONNECTED ? (", roundTripTime=" + getRoundTripFormattedInMilliseconds() + " ms") : "") + + ", state=" + state + + (exception == null ? "" : ", exception=" + translateExceptionToString()) + + '}'; + } + + private String translateExceptionToString() { + StringBuilder builder = new StringBuilder(); + builder.append("{"); + builder.append(exception); + builder.append("}"); + Throwable cur = exception.getCause(); + while (cur != null) { + builder.append(", caused by "); + builder.append("{"); + builder.append(cur); + builder.append("}"); + cur = cur.getCause(); + } + + return builder.toString(); + } + + + private String getRoundTripFormattedInMilliseconds() { + return DecimalFormatHelper.format("#0.0", roundTripTimeNanos / 1000.0 / 1000.0); + } + + ServerDescription(final Builder builder) { + address = notNull("address", builder.address); + type = notNull("type", builder.type); + cryptd = builder.cryptd; + state = notNull("state", builder.state); + canonicalAddress = builder.canonicalAddress; + hosts = builder.hosts; + passives = builder.passives; + arbiters = builder.arbiters; + primary = builder.primary; + maxDocumentSize = builder.maxDocumentSize; + tagSet = builder.tagSet; + setName = builder.setName; + roundTripTimeNanos = builder.roundTripTimeNanos; + minRoundTripTimeNanos = builder.minRoundTripTimeNanos; + ok = builder.ok; + minWireVersion = builder.minWireVersion; + maxWireVersion = builder.maxWireVersion; + electionId = builder.electionId; + setVersion = builder.setVersion; + topologyVersion = builder.topologyVersion; + lastWriteDate = builder.lastWriteDate; + lastUpdateTimeNanos = builder.lastUpdateTimeNanos; + logicalSessionTimeoutMinutes = builder.logicalSessionTimeoutMinutes; + helloOk = builder.helloOk; + exception = builder.exception; + } +} diff --git a/driver-core/src/main/com/mongodb/connection/ServerId.java b/driver-core/src/main/com/mongodb/connection/ServerId.java new file mode 100644 index 00000000000..b1b0df4e618 --- /dev/null +++ b/driver-core/src/main/com/mongodb/connection/ServerId.java @@ -0,0 +1,97 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.connection; + +import com.mongodb.ServerAddress; +import com.mongodb.annotations.Immutable; + +import static com.mongodb.assertions.Assertions.notNull; + +/** + * A client-generated identifier that uniquely identifies a MongoDB server. + * + * @since 3.0 + */ +@Immutable +public final class ServerId { + private final ClusterId clusterId; + private final ServerAddress address; + + /** + * Construct an instance. + * + * @param clusterId the client-generated cluster identifier + * @param address the server address + */ + public ServerId(final ClusterId clusterId, final ServerAddress address) { + this.clusterId = notNull("clusterId", clusterId); + this.address = notNull("address", address); + } + + /** + * Gets the cluster identifier. + * + * @return the cluster identifier + */ + public ClusterId getClusterId() { + return clusterId; + } + + /** + * Gets the server address. + * @return the server address + */ + public ServerAddress getAddress() { + return address; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + ServerId serverId = (ServerId) o; + + if (!address.equals(serverId.address)) { + return false; + } + if (!clusterId.equals(serverId.clusterId)) { + return false; + } + + return true; + } + + @Override + public int hashCode() { + int result = clusterId.hashCode(); + result = 31 * result + address.hashCode(); + return result; + } + + @Override + public String toString() { + return "ServerId{" + + "clusterId=" + clusterId + + ", address=" + address + + '}'; + } +} diff --git a/driver-core/src/main/com/mongodb/connection/ServerMonitoringMode.java b/driver-core/src/main/com/mongodb/connection/ServerMonitoringMode.java new file mode 100644 index 00000000000..cf54afef4bb --- /dev/null +++ b/driver-core/src/main/com/mongodb/connection/ServerMonitoringMode.java @@ -0,0 +1,52 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.connection; + +import com.mongodb.event.ClusterListener; +import com.mongodb.event.ServerHeartbeatFailedEvent; +import com.mongodb.event.ServerHeartbeatStartedEvent; +import com.mongodb.event.ServerHeartbeatSucceededEvent; +import com.mongodb.event.ServerListener; + +/** + * The server monitoring mode, which defines the monitoring protocol to use. + * + * @see + * server discovery and monitoring (SDAM) + * @since 5.1 + */ +public enum ServerMonitoringMode { + /** + * Use the streaming protocol when the server supports it or fall back to the polling protocol otherwise. + * When the streaming protocol comes into play, + * {@link ServerHeartbeatStartedEvent#isAwaited()}, {@link ServerHeartbeatSucceededEvent#isAwaited()}, + * {@link ServerHeartbeatFailedEvent#isAwaited()} return {@code true} for new events. + *

+ * The streaming protocol uses long polling for server monitoring, and is intended to reduce the delay between a server change + * that warrants a new event for {@link ServerListener}/{@link ClusterListener}, + * and that event being emitted, as well as the related housekeeping work being done.

+ */ + STREAM(), + /** + * Use the polling protocol. + */ + POLL(), + /** + * Behave the same as {@link #POLL} if running in a FaaS environment, otherwise behave as {@link #STREAM}. + * This is the default. + */ + AUTO() +} diff --git a/driver-core/src/main/com/mongodb/connection/ServerSettings.java b/driver-core/src/main/com/mongodb/connection/ServerSettings.java new file mode 100644 index 00000000000..b4394d0dc79 --- /dev/null +++ b/driver-core/src/main/com/mongodb/connection/ServerSettings.java @@ -0,0 +1,317 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.connection; + +import com.mongodb.ConnectionString; +import com.mongodb.annotations.Immutable; +import com.mongodb.annotations.NotThreadSafe; +import com.mongodb.event.ServerListener; +import com.mongodb.event.ServerMonitorListener; + +import java.util.ArrayList; +import java.util.List; +import java.util.Objects; +import java.util.concurrent.TimeUnit; + +import static com.mongodb.assertions.Assertions.notNull; +import static java.util.Collections.unmodifiableList; + +/** + * Settings relating to monitoring of each server. + * + * @since 3.0 + */ +@Immutable +public class ServerSettings { + private final long heartbeatFrequencyMS; + private final long minHeartbeatFrequencyMS; + private final ServerMonitoringMode serverMonitoringMode; + private final List serverListeners; + private final List serverMonitorListeners; + + /** + * Creates a builder for ServerSettings. + * + * @return a new Builder for creating ServerSettings. + */ + public static Builder builder() { + return new Builder(); + } + + /** + * Creates a builder instance. + * + * @param serverSettings existing ServerSettings to default the builder settings on. + * @return a builder + * @since 3.5 + */ + public static Builder builder(final ServerSettings serverSettings) { + return builder().applySettings(serverSettings); + } + + /** + * A builder for the settings. + */ + @NotThreadSafe + public static final class Builder { + private long heartbeatFrequencyMS = 10000; + private long minHeartbeatFrequencyMS = 500; + private ServerMonitoringMode serverMonitoringMode = ServerMonitoringMode.AUTO; + private List serverListeners = new ArrayList<>(); + private List serverMonitorListeners = new ArrayList<>(); + + private Builder() { + } + + /** + * Applies the serverSettings to the builder + * + *

Note: Overwrites all existing settings

+ * + * @param serverSettings the serverSettings + * @return this + * @since 3.7 + */ + public Builder applySettings(final ServerSettings serverSettings) { + notNull("serverSettings", serverSettings); + heartbeatFrequencyMS = serverSettings.heartbeatFrequencyMS; + minHeartbeatFrequencyMS = serverSettings.minHeartbeatFrequencyMS; + serverMonitoringMode = serverSettings.serverMonitoringMode; + serverListeners = new ArrayList<>(serverSettings.serverListeners); + serverMonitorListeners = new ArrayList<>(serverSettings.serverMonitorListeners); + return this; + } + + /** + * Sets the frequency that the cluster monitor attempts to reach each server. The default value is 10 seconds. + * + * @param heartbeatFrequency the heartbeat frequency + * @param timeUnit the time unit + * @return this + */ + public Builder heartbeatFrequency(final long heartbeatFrequency, final TimeUnit timeUnit) { + this.heartbeatFrequencyMS = TimeUnit.MILLISECONDS.convert(heartbeatFrequency, timeUnit); + return this; + } + + /** + * Sets the minimum heartbeat frequency. In the event that the driver has to frequently re-check a server's availability, it will + * wait at least this long since the previous check to avoid wasted effort. The default value is 500 milliseconds. + * + * @param minHeartbeatFrequency the minimum heartbeat frequency + * @param timeUnit the time unit + * @return this + */ + public Builder minHeartbeatFrequency(final long minHeartbeatFrequency, final TimeUnit timeUnit) { + this.minHeartbeatFrequencyMS = TimeUnit.MILLISECONDS.convert(minHeartbeatFrequency, timeUnit); + return this; + } + + /** + * Sets the server monitoring mode, which defines the monitoring protocol to use. + * The default value is {@link ServerMonitoringMode#AUTO}. + * + * @param serverMonitoringMode The {@link ServerMonitoringMode}. + * @return {@code this}. + * @see #getServerMonitoringMode() + * @since 5.1 + */ + public Builder serverMonitoringMode(final ServerMonitoringMode serverMonitoringMode) { + this.serverMonitoringMode = notNull("serverMonitoringMode", serverMonitoringMode); + return this; + } + + /** + * Add a server listener. + * + * @param serverListener the non-null server listener + * @return this + * @since 3.3 + */ + public Builder addServerListener(final ServerListener serverListener) { + notNull("serverListener", serverListener); + serverListeners.add(serverListener); + return this; + } + + /** + * Sets the server listeners. + * + * @param serverListeners list of server listeners + * @return this + * @since 4.5 + */ + public Builder serverListenerList(final List serverListeners) { + notNull("serverListeners", serverListeners); + this.serverListeners = new ArrayList<>(serverListeners); + return this; + } + + /** + * Adds a server monitor listener. + * + * @param serverMonitorListener the non-null server monitor listener + * @return this + * @since 3.3 + */ + public Builder addServerMonitorListener(final ServerMonitorListener serverMonitorListener) { + notNull("serverMonitorListener", serverMonitorListener); + serverMonitorListeners.add(serverMonitorListener); + return this; + } + + /** + * Sets the server monitor listeners. + * + * @param serverMonitorListeners list of server monitor listeners + * @return this + * @since 4.5 + */ + public Builder serverMonitorListenerList(final List serverMonitorListeners) { + notNull("serverMonitorListeners", serverMonitorListeners); + this.serverMonitorListeners = new ArrayList<>(serverMonitorListeners); + return this; + } + + /** + * Takes the settings from the given {@code ConnectionString} and applies them to the builder + * + * @param connectionString the connection string containing details of how to connect to MongoDB + * @return this + * @since 3.3 + */ + public Builder applyConnectionString(final ConnectionString connectionString) { + Integer heartbeatFrequency = connectionString.getHeartbeatFrequency(); + if (heartbeatFrequency != null) { + heartbeatFrequencyMS = heartbeatFrequency; + } + ServerMonitoringMode serverMonitoringMode = connectionString.getServerMonitoringMode(); + if (serverMonitoringMode != null) { + this.serverMonitoringMode = serverMonitoringMode; + } + return this; + } + + /** + * Create a new ServerSettings from the settings applied to this builder. + * + * @return a ServerSettings with the given settings. + */ + public ServerSettings build() { + return new ServerSettings(this); + } + } + + /** + * Gets the frequency that the cluster monitor attempts to reach each server. The default value is 10 seconds. + * + * @param timeUnit the time unit + * @return the heartbeat frequency + */ + public long getHeartbeatFrequency(final TimeUnit timeUnit) { + return timeUnit.convert(heartbeatFrequencyMS, TimeUnit.MILLISECONDS); + } + + /** + * Gets the minimum heartbeat frequency. In the event that the driver has to frequently re-check a server's availability, it will wait + * at least this long since the previous check to avoid wasted effort. The default value is 500 milliseconds. + * + * @param timeUnit the time unit + * @return the heartbeat reconnect retry frequency + */ + public long getMinHeartbeatFrequency(final TimeUnit timeUnit) { + return timeUnit.convert(minHeartbeatFrequencyMS, TimeUnit.MILLISECONDS); + } + + /** + * Gets the server monitoring mode, which defines the monitoring protocol to use. + * The default value is {@link ServerMonitoringMode#AUTO}. + * + * @return The {@link ServerMonitoringMode}. + * @see Builder#serverMonitoringMode(ServerMonitoringMode) + * @see ConnectionString#getServerMonitoringMode() + * @since 5.1 + */ + public ServerMonitoringMode getServerMonitoringMode() { + return serverMonitoringMode; + } + + /** + * Gets the server listeners. The default value is an empty list. + * + * @return the server listeners + * @since 3.3 + */ + public List getServerListeners() { + return serverListeners; + } + + /** + * Gets the server monitor listeners. The default value is an empty list. + * + * @return the server monitor listeners + * @since 3.3 + */ + public List getServerMonitorListeners() { + return serverMonitorListeners; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + final ServerSettings that = (ServerSettings) o; + return heartbeatFrequencyMS == that.heartbeatFrequencyMS + && minHeartbeatFrequencyMS == that.minHeartbeatFrequencyMS + && serverMonitoringMode == that.serverMonitoringMode + && Objects.equals(serverListeners, that.serverListeners) + && Objects.equals(serverMonitorListeners, that.serverMonitorListeners); + } + + @Override + public int hashCode() { + return Objects.hash( + heartbeatFrequencyMS, + minHeartbeatFrequencyMS, + serverMonitoringMode, + serverListeners, + serverMonitorListeners); + } + + @Override + public String toString() { + return "ServerSettings{" + + "heartbeatFrequencyMS=" + heartbeatFrequencyMS + + ", minHeartbeatFrequencyMS=" + minHeartbeatFrequencyMS + + ", serverMonitoringMode=" + serverMonitoringMode + + ", serverListeners='" + serverListeners + '\'' + + ", serverMonitorListeners='" + serverMonitorListeners + '\'' + + '}'; + } + + ServerSettings(final Builder builder) { + heartbeatFrequencyMS = builder.heartbeatFrequencyMS; + minHeartbeatFrequencyMS = builder.minHeartbeatFrequencyMS; + serverMonitoringMode = builder.serverMonitoringMode; + serverListeners = unmodifiableList(builder.serverListeners); + serverMonitorListeners = unmodifiableList(builder.serverMonitorListeners); + } +} diff --git a/driver-core/src/main/com/mongodb/connection/ServerType.java b/driver-core/src/main/com/mongodb/connection/ServerType.java new file mode 100644 index 00000000000..9997394049e --- /dev/null +++ b/driver-core/src/main/com/mongodb/connection/ServerType.java @@ -0,0 +1,121 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.connection; + +/** + * The type of the server. + * + * @since 3.0 + */ +public enum ServerType { + /** + * A standalone mongod server. + */ + STANDALONE { + @Override + public ClusterType getClusterType() { + return ClusterType.STANDALONE; + } + }, + + /** + * A replica set primary. + */ + REPLICA_SET_PRIMARY { + @Override + public ClusterType getClusterType() { + return ClusterType.REPLICA_SET; + } + }, + + /** + * A replica set secondary. + */ + REPLICA_SET_SECONDARY { + @Override + public ClusterType getClusterType() { + return ClusterType.REPLICA_SET; + } + }, + + /** + * A replica set arbiter. + */ + REPLICA_SET_ARBITER { + @Override + public ClusterType getClusterType() { + return ClusterType.REPLICA_SET; + } + }, + + /** + * A replica set member that is none of the other types (a passive, for example). + */ + REPLICA_SET_OTHER { + @Override + public ClusterType getClusterType() { + return ClusterType.REPLICA_SET; + } + }, + + /** + * A replica set member that does not report a set name or a hosts list + */ + REPLICA_SET_GHOST { + @Override + public ClusterType getClusterType() { + return ClusterType.REPLICA_SET; + } + }, + + /** + * A router to a sharded cluster, i.e. a mongos server. + */ + SHARD_ROUTER { + @Override + public ClusterType getClusterType() { + return ClusterType.SHARDED; + } + }, + + /** + * + */ + LOAD_BALANCER { + @Override + public ClusterType getClusterType() { + return ClusterType.LOAD_BALANCED; + } + }, + + /** + * The server type is not yet known. + */ + UNKNOWN { + @Override + public ClusterType getClusterType() { + return ClusterType.UNKNOWN; + } + }; + + /** + * The type of the cluster to which this server belongs + * + * @return the cluster type + */ + public abstract ClusterType getClusterType(); +} diff --git a/driver-core/src/main/com/mongodb/connection/ServerVersion.java b/driver-core/src/main/com/mongodb/connection/ServerVersion.java new file mode 100644 index 00000000000..f35988d64c7 --- /dev/null +++ b/driver-core/src/main/com/mongodb/connection/ServerVersion.java @@ -0,0 +1,114 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.connection; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; + +import static com.mongodb.assertions.Assertions.isTrue; +import static com.mongodb.assertions.Assertions.notNull; +import static java.util.Arrays.asList; + +/** + * Holds an array of three integers representing the server version, e.g. [3, 4, 1]. + * + * @since 3.0 + */ +public class ServerVersion implements Comparable { + private final List versionList; + + /** + * Creates a server version which will compare as less than all other valid versions + */ + public ServerVersion() { + this.versionList = Collections.unmodifiableList(asList(0, 0, 0)); + } + + /** + * Constructs a new instance with the given version list of integers. + * + * @param versionList a non-null, three-item list of integers + */ + public ServerVersion(final List versionList) { + notNull("versionList", versionList); + isTrue("version array has three elements", versionList.size() == 3); + this.versionList = Collections.unmodifiableList(new ArrayList<>(versionList)); + } + + /** + * Constructs a new instance with the given major and minor versions and a patch version of 0. + * + * @param majorVersion the major version + * @param minorVersion the minor version + */ + public ServerVersion(final int majorVersion, final int minorVersion) { + this(asList(majorVersion, minorVersion, 0)); + } + + /** + * Gets the version list. + * + * @return an unmodifiable list of three integers + */ + public List getVersionList() { + return versionList; + } + + @Override + public int compareTo(final ServerVersion o) { + int retVal = 0; + for (int i = 0; i < versionList.size(); i++) { + retVal = versionList.get(i).compareTo(o.versionList.get(i)); + if (retVal != 0) { + break; + } + } + return retVal; + + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + ServerVersion that = (ServerVersion) o; + + if (!versionList.equals(that.versionList)) { + return false; + } + + return true; + } + + @Override + public int hashCode() { + return versionList.hashCode(); + } + + @Override + public String toString() { + return "ServerVersion{" + + "versionList=" + versionList + + '}'; + } +} diff --git a/driver-core/src/main/com/mongodb/connection/SocketSettings.java b/driver-core/src/main/com/mongodb/connection/SocketSettings.java new file mode 100644 index 00000000000..4e6890e785c --- /dev/null +++ b/driver-core/src/main/com/mongodb/connection/SocketSettings.java @@ -0,0 +1,296 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.connection; + +import com.mongodb.Block; +import com.mongodb.ConnectionString; +import com.mongodb.annotations.Immutable; + +import java.util.Objects; +import java.util.concurrent.TimeUnit; + +import static com.mongodb.assertions.Assertions.notNull; +import static java.lang.Math.toIntExact; +import static java.util.concurrent.TimeUnit.MILLISECONDS; + +/** + * An immutable class representing socket settings used for connections to a MongoDB server. + * + * @since 3.0 + */ +@Immutable +public final class SocketSettings { + private final int connectTimeoutMS; + private final int readTimeoutMS; + private final int receiveBufferSize; + private final int sendBufferSize; + private final ProxySettings proxySettings; + + /** + * Gets a builder for an instance of {@code SocketSettings}. + * + * @return the builder + */ + public static Builder builder() { + return new Builder(); + } + + /** + * Creates a builder instance. + * + * @param socketSettings existing SocketSettings to default the builder settings on. + * @return a builder + * @since 3.7 + */ + public static Builder builder(final SocketSettings socketSettings) { + return builder().applySettings(socketSettings); + } + + /** + * A builder for an instance of {@code SocketSettings}. + */ + public static final class Builder { + private int connectTimeoutMS = 10000; + private int readTimeoutMS; + private int receiveBufferSize; + private int sendBufferSize; + private ProxySettings.Builder proxySettingsBuilder = ProxySettings.builder(); + + private Builder() { + } + + /** + * Applies the socketSettings to the builder + * + *

Note: Overwrites all existing settings

+ * + * @param socketSettings the socketSettings + * @return this + * @since 3.7 + */ + public Builder applySettings(final SocketSettings socketSettings) { + notNull("socketSettings", socketSettings); + connectTimeoutMS = socketSettings.connectTimeoutMS; + readTimeoutMS = socketSettings.readTimeoutMS; + receiveBufferSize = socketSettings.receiveBufferSize; + sendBufferSize = socketSettings.sendBufferSize; + proxySettingsBuilder.applySettings(socketSettings.getProxySettings()); + return this; + } + + /** + * Sets the socket connect timeout. + * + * @param connectTimeout the connect timeout. + * The timeout converted to milliseconds must not be greater than {@link Integer#MAX_VALUE}. + * @param timeUnit the time unit + * @return this + */ + public Builder connectTimeout(final long connectTimeout, final TimeUnit timeUnit) { + this.connectTimeoutMS = timeoutArgumentToMillis(connectTimeout, timeUnit); + return this; + } + + /** + * Sets the socket read timeout. + * + * @param readTimeout the read timeout. + * The timeout converted to milliseconds must not be greater than {@link Integer#MAX_VALUE}. + * @param timeUnit the time unit + * @return this + * @see #getReadTimeout(TimeUnit) + */ + public Builder readTimeout(final long readTimeout, final TimeUnit timeUnit) { + this.readTimeoutMS = timeoutArgumentToMillis(readTimeout, timeUnit); + return this; + } + + /** + * Sets the receive buffer size. + * + * @param receiveBufferSize the receive buffer size + * @return this + */ + public Builder receiveBufferSize(final int receiveBufferSize) { + this.receiveBufferSize = receiveBufferSize; + return this; + } + + /** + * Sets the send buffer size. + * + * @param sendBufferSize the send buffer size + * @return this + */ + public Builder sendBufferSize(final int sendBufferSize) { + this.sendBufferSize = sendBufferSize; + return this; + } + + /** + * Applies the {@link ProxySettings.Builder} block and then sets the {@link SocketSettings#proxySettings}. + * + * @param block the block to apply to the {@link ProxySettings}. + * @return this + * @see SocketSettings#getProxySettings() + */ + public SocketSettings.Builder applyToProxySettings(final Block block) { + notNull("block", block).apply(proxySettingsBuilder); + return this; + } + + /** + * Takes the settings from the given {@code ConnectionString} and applies them to the builder + * + * @param connectionString the connection string containing details of how to connect to MongoDB + * @return this + * @see com.mongodb.ConnectionString#getConnectTimeout() + * @see com.mongodb.ConnectionString#getSocketTimeout() + */ + public Builder applyConnectionString(final ConnectionString connectionString) { + Integer connectTimeout = connectionString.getConnectTimeout(); + if (connectTimeout != null) { + this.connectTimeout(connectTimeout, MILLISECONDS); + } + + Integer socketTimeout = connectionString.getSocketTimeout(); + if (socketTimeout != null) { + this.readTimeout(socketTimeout, MILLISECONDS); + } + + proxySettingsBuilder.applyConnectionString(connectionString); + + return this; + } + + /** + * Build an instance of {@code SocketSettings}. + * @return the socket settings for this builder + */ + public SocketSettings build() { + return new SocketSettings(this); + } + } + + /** + * Gets the timeout for socket connect. Defaults to 10 seconds. + * + * @param timeUnit the time unit to get the timeout in + * @return the connect timeout in the requested time unit. + */ + public int getConnectTimeout(final TimeUnit timeUnit) { + return (int) timeUnit.convert(connectTimeoutMS, MILLISECONDS); + } + + /** + * Gets the timeout for socket reads. Defaults to 0, which indicates no timeout + * + * @param timeUnit the time unit to get the timeout in + * @return the read timeout in the requested time unit, or 0 if there is no timeout + * @see Builder#readTimeout(long, TimeUnit) + */ + public int getReadTimeout(final TimeUnit timeUnit) { + return (int) timeUnit.convert(readTimeoutMS, MILLISECONDS); + } + + /** + * Gets the proxy settings used for connecting to MongoDB via a SOCKS5 proxy server. + * + * @return The {@link ProxySettings} instance containing the SOCKS5 proxy configuration. + * @see Builder#applyToProxySettings(Block) + * @since 4.11 + */ + public ProxySettings getProxySettings() { + return proxySettings; + } + + /** + * Gets the receive buffer size. Defaults to the operating system default. + * + * @return the receive buffer size + */ + public int getReceiveBufferSize() { + return receiveBufferSize; + } + + /** + * Gets the send buffer size. Defaults to the operating system default. + * + * @return the send buffer size + */ + public int getSendBufferSize() { + return sendBufferSize; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + SocketSettings that = (SocketSettings) o; + + if (connectTimeoutMS != that.connectTimeoutMS) { + return false; + } + if (readTimeoutMS != that.readTimeoutMS) { + return false; + } + if (receiveBufferSize != that.receiveBufferSize) { + return false; + } + if (sendBufferSize != that.sendBufferSize) { + return false; + } + return proxySettings.equals(that.proxySettings); + } + + @Override + public int hashCode() { + return Objects.hash(connectTimeoutMS, readTimeoutMS, receiveBufferSize, sendBufferSize, proxySettings); + } + + @Override + public String toString() { + return "SocketSettings{" + + "connectTimeoutMS=" + connectTimeoutMS + + ", readTimeoutMS=" + readTimeoutMS + + ", receiveBufferSize=" + receiveBufferSize + + ", proxySettings=" + proxySettings + + '}'; + } + + private SocketSettings(final Builder builder) { + connectTimeoutMS = builder.connectTimeoutMS; + readTimeoutMS = builder.readTimeoutMS; + receiveBufferSize = builder.receiveBufferSize; + sendBufferSize = builder.sendBufferSize; + proxySettings = builder.proxySettingsBuilder.build(); + } + + private static int timeoutArgumentToMillis(final long timeout, final TimeUnit timeUnit) throws IllegalArgumentException { + try { + return toIntExact(MILLISECONDS.convert(timeout, timeUnit)); + } catch (ArithmeticException e) { + throw new IllegalArgumentException( + "The timeout converted to milliseconds must not be greater than `Integer.MAX_VALUE`", e); + } + } +} diff --git a/driver-core/src/main/com/mongodb/connection/SslSettings.java b/driver-core/src/main/com/mongodb/connection/SslSettings.java new file mode 100644 index 00000000000..58e2937a61b --- /dev/null +++ b/driver-core/src/main/com/mongodb/connection/SslSettings.java @@ -0,0 +1,229 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.connection; + +import com.mongodb.ConnectionString; +import com.mongodb.annotations.Immutable; +import com.mongodb.annotations.NotThreadSafe; +import com.mongodb.lang.Nullable; + +import javax.net.ssl.SSLContext; +import java.util.Objects; + +import static com.mongodb.assertions.Assertions.notNull; + +/** + * Settings for connecting to MongoDB via SSL. + * + * @since 3.0 + */ +@Immutable +public class SslSettings { + private final boolean enabled; + private final boolean invalidHostNameAllowed; + private final SSLContext context; + + /** + * Gets a Builder for creating a new SSLSettings instance. + * + * @return a new Builder for SSLSettings. + */ + public static Builder builder() { + return new Builder(); + } + + /** + * Creates a builder instance. + * + * @param sslSettings existing SslSettings to default the builder settings on. + * @return a builder + * @since 3.7 + */ + public static Builder builder(final SslSettings sslSettings) { + return builder().applySettings(sslSettings); + } + + /** + * A builder for creating SSLSettings. + */ + @NotThreadSafe + public static final class Builder { + private boolean enabled; + private boolean invalidHostNameAllowed; + private SSLContext context; + + private Builder(){ + } + + /** + * Applies the sslSettings to the builder + * + *

Note: Overwrites all existing settings

+ * + * @param sslSettings the sslSettings + * @return this + * @since 3.7 + */ + public Builder applySettings(final SslSettings sslSettings) { + notNull("sslSettings", sslSettings); + enabled = sslSettings.enabled; + invalidHostNameAllowed = sslSettings.invalidHostNameAllowed; + context = sslSettings.context; + return this; + } + + /** + * Define whether SSL should be enabled. + * + * @param enabled should be true if SSL is to be enabled. + * @return this + */ + public Builder enabled(final boolean enabled) { + this.enabled = enabled; + return this; + } + + /** + * Define whether invalid host names should be allowed. Defaults to false. Take care before setting this to true, as it makes + * the application susceptible to man-in-the-middle attacks. + * + * @param invalidHostNameAllowed whether invalid host names are allowed. + * @return this + */ + public Builder invalidHostNameAllowed(final boolean invalidHostNameAllowed) { + this.invalidHostNameAllowed = invalidHostNameAllowed; + return this; + } + + /** + * Sets the SSLContext for use when SSL is enabled. + * + * @param context the SSLContext to use for connections. Ignored if TLS/SSL is not {@linkplain #enabled(boolean) enabled}, or if + * overridden by {@link NettyTransportSettings#getSslContext()}. + * @return this + * @since 3.5 + */ + public Builder context(final SSLContext context) { + this.context = context; + return this; + } + + /** + * Takes the settings from the given {@code ConnectionString} and applies them to the builder + * + * @param connectionString the connection string containing details of how to connect to MongoDB + * @return this + */ + public Builder applyConnectionString(final ConnectionString connectionString) { + Boolean sslEnabled = connectionString.getSslEnabled(); + if (sslEnabled != null) { + this.enabled = sslEnabled; + } + + Boolean sslInvalidHostnameAllowed = connectionString.getSslInvalidHostnameAllowed(); + if (sslInvalidHostnameAllowed != null) { + this.invalidHostNameAllowed = sslInvalidHostnameAllowed; + } + + return this; + } + + /** + * Create a new SSLSettings from the settings in this builder. + * + * @return a new SSL settings + */ + public SslSettings build() { + return new SslSettings(this); + } + } + + /** + * Returns whether SSL is enabled. + * + * @return true if SSL is enabled. + */ + public boolean isEnabled() { + return enabled; + } + + /** + * Returns whether invalid host names should be allowed. Defaults to false. Take care before setting this to true, as it makes + * the application susceptible to man-in-the-middle attacks. + * + * @return true if invalid host names are allowed. + */ + public boolean isInvalidHostNameAllowed() { + return invalidHostNameAllowed; + } + + /** + * Gets the SSLContext configured for use with SSL connections. + * + * @return the SSLContext, which defaults to null if not configured. In that case {@code SSLContext.getDefault()} will be used if SSL + * is enabled. + * @since 3.5 + * @see SSLContext#getDefault() + */ + @Nullable + public SSLContext getContext() { + return context; + } + + SslSettings(final Builder builder) { + enabled = builder.enabled; + invalidHostNameAllowed = builder.invalidHostNameAllowed; + context = builder.context; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + SslSettings that = (SslSettings) o; + + if (enabled != that.enabled) { + return false; + } + if (invalidHostNameAllowed != that.invalidHostNameAllowed) { + return false; + } + return Objects.equals(context, that.context); + } + + @Override + public int hashCode() { + int result = (enabled ? 1 : 0); + result = 31 * result + (invalidHostNameAllowed ? 1 : 0); + result = 31 * result + (context != null ? context.hashCode() : 0); + return result; + } + + @Override + public String toString() { + return "SslSettings{" + + "enabled=" + enabled + + ", invalidHostNameAllowed=" + invalidHostNameAllowed + + ", context=" + context + + '}'; + } +} diff --git a/driver-core/src/main/com/mongodb/connection/TopologyVersion.java b/driver-core/src/main/com/mongodb/connection/TopologyVersion.java new file mode 100644 index 00000000000..b6d8f3bf9ee --- /dev/null +++ b/driver-core/src/main/com/mongodb/connection/TopologyVersion.java @@ -0,0 +1,117 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.connection; + +import com.mongodb.annotations.ThreadSafe; +import org.bson.BsonDocument; +import org.bson.BsonInt64; +import org.bson.BsonObjectId; +import org.bson.types.ObjectId; + +/** + * The topology version of a cluster. + * + * @since 4.1 + * @mongodb.server.release 4.4 + */ +@ThreadSafe +public final class TopologyVersion { + private final ObjectId processId; + private final long counter; + + /** + * Construct a new instance from a document description + * + * @param topologyVersionDocument a document description of the topology version + */ + public TopologyVersion(final BsonDocument topologyVersionDocument) { + processId = topologyVersionDocument.getObjectId("processId").getValue(); + counter = topologyVersionDocument.getInt64("counter").getValue(); + } + + /** + * Construct a new instance from process identifier and counter + * + * @param processId the process identifer + * @param counter the counter + */ + public TopologyVersion(final ObjectId processId, final long counter) { + this.processId = processId; + this.counter = counter; + } + + /** + * Get the process identifier + * + * @return the process identifier + */ + public ObjectId getProcessId() { + return processId; + } + + /** + * Get the counter + * + * @return the counter + */ + public long getCounter() { + return counter; + } + + /** + * Get the document representation of the topology version + * + * @return the document representation of the topology version + */ + public BsonDocument asDocument() { + return new BsonDocument("processId", new BsonObjectId(processId)) + .append("counter", new BsonInt64(counter)); + + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + TopologyVersion that = (TopologyVersion) o; + + if (counter != that.counter) { + return false; + } + return processId.equals(that.processId); + } + + @Override + public int hashCode() { + int result = processId.hashCode(); + result = 31 * result + (int) (counter ^ (counter >>> 32)); + return result; + } + + @Override + public String toString() { + return "TopologyVersion{" + + "processId=" + processId + + ", counter=" + counter + + '}'; + } +} diff --git a/driver-core/src/main/com/mongodb/connection/TransportSettings.java b/driver-core/src/main/com/mongodb/connection/TransportSettings.java new file mode 100644 index 00000000000..50797f541f5 --- /dev/null +++ b/driver-core/src/main/com/mongodb/connection/TransportSettings.java @@ -0,0 +1,48 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.connection; + +import com.mongodb.annotations.Immutable; +import com.mongodb.annotations.Sealed; + +/** + * Transport settings for the driver. + * + * @since 4.11 + */ +@Sealed +@Immutable +public abstract class TransportSettings { + /** + * A builder for {@link NettyTransportSettings}. + * + * @return a builder for {@link NettyTransportSettings} + */ + public static NettyTransportSettings.Builder nettyBuilder() { + return NettyTransportSettings.builder(); + } + + /** + * A builder for {@link AsyncTransportSettings}. + * + * @return a builder for {@link AsyncTransportSettings} + * @since 5.2 + */ + public static AsyncTransportSettings.Builder asyncBuilder() { + return AsyncTransportSettings.builder(); + } +} diff --git a/driver-core/src/main/com/mongodb/connection/package-info.java b/driver-core/src/main/com/mongodb/connection/package-info.java new file mode 100644 index 00000000000..4b33825bbcc --- /dev/null +++ b/driver-core/src/main/com/mongodb/connection/package-info.java @@ -0,0 +1,23 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * Contains classes that manage connecting to MongoDB servers. + */ +@NonNullApi +package com.mongodb.connection; + +import com.mongodb.lang.NonNullApi; diff --git a/driver-core/src/main/com/mongodb/event/ClusterClosedEvent.java b/driver-core/src/main/com/mongodb/event/ClusterClosedEvent.java new file mode 100644 index 00000000000..564d91509ec --- /dev/null +++ b/driver-core/src/main/com/mongodb/event/ClusterClosedEvent.java @@ -0,0 +1,59 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.event; + +import com.mongodb.connection.ClusterId; + +import static com.mongodb.assertions.Assertions.notNull; + +/** + * A cluster closed event. + * + *

This event is synonymous with TopologyClosedEvent

+ * + * @since 3.3 + */ +public final class ClusterClosedEvent { + + private final ClusterId clusterId; + + /** + * Constructs a new instance of the event. + * + * @param clusterId the cluster id + */ + public ClusterClosedEvent(final ClusterId clusterId) { + this.clusterId = notNull("clusterId", clusterId); + } + + /** + * Gets the cluster id associated with this event. + * + * @return the cluster id + */ + public ClusterId getClusterId() { + return clusterId; + } + + @Override + public String toString() { + return "ClusterClosedEvent{" + + "clusterId=" + clusterId + + '}'; + } +} + diff --git a/driver-core/src/main/com/mongodb/event/ClusterDescriptionChangedEvent.java b/driver-core/src/main/com/mongodb/event/ClusterDescriptionChangedEvent.java new file mode 100644 index 00000000000..c92dc864826 --- /dev/null +++ b/driver-core/src/main/com/mongodb/event/ClusterDescriptionChangedEvent.java @@ -0,0 +1,85 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.event; + +import com.mongodb.connection.ClusterDescription; +import com.mongodb.connection.ClusterId; + +import static com.mongodb.assertions.Assertions.notNull; + +/** + * An event signifying that the cluster description has changed. + * + *

This event is synonymous with TopologyDescriptionChangedEvent

+ * + * @since 3.3 + */ +public final class ClusterDescriptionChangedEvent { + private final ClusterId clusterId; + private final ClusterDescription newDescription; + private final ClusterDescription previousDescription; + + /** + * Constructs a new instance of the event. + * + * @param clusterId the non-null cluster id + * @param newDescription the non-null new cluster description + * @param previousDescription the non-null previous cluster description + */ + public ClusterDescriptionChangedEvent(final ClusterId clusterId, final ClusterDescription newDescription, + final ClusterDescription previousDescription) { + this.clusterId = notNull("clusterId", clusterId); + this.newDescription = notNull("newDescription", newDescription); + this.previousDescription = notNull("previousDescription", previousDescription); + } + + /** + * Gets the cluster id associated with this event. + * + * @return the cluster id + */ + public ClusterId getClusterId() { + return clusterId; + } + + /** + * Gets the new cluster description. + * + * @return the cluster description + */ + public ClusterDescription getNewDescription() { + return newDescription; + } + + /** + * Gets the previous cluster description. + * + * @return the previous cluster description + */ + public ClusterDescription getPreviousDescription() { + return previousDescription; + } + + @Override + public String toString() { + return "ClusterDescriptionChangedEvent{" + + "clusterId=" + clusterId + + ", newDescription=" + newDescription + + ", previousDescription=" + previousDescription + + '}'; + } +} diff --git a/driver-core/src/main/com/mongodb/event/ClusterListener.java b/driver-core/src/main/com/mongodb/event/ClusterListener.java new file mode 100644 index 00000000000..8524e06344c --- /dev/null +++ b/driver-core/src/main/com/mongodb/event/ClusterListener.java @@ -0,0 +1,60 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.event; + +import java.util.EventListener; + +/** + * A listener for cluster-related events. + *

+ * All events received by {@link ClusterListener}, {@link ServerListener}, + * {@link ServerMonitorListener} are totally ordered (and the event order implies the happens-before order), provided that the listeners + * are not shared by different {@code MongoClient}s. This guarantee holds even if you have a single class implementing + * all of {@link ClusterListener}, {@link ServerListener}, {@link ServerMonitorListener}. However, this guarantee does not mean that + * implementations automatically do not need to synchronize memory accesses to prevent data races. + * For example, if data that the listener collects in memory is accessed outside of the normal execution of the listener + * by the {@code MongoClient}, then reading and writing actions must be synchronized. + *

+ * @see ServerListener + * @see ServerMonitorListener + * @since 3.3 + */ +public interface ClusterListener extends EventListener { + /** + * Invoked when a cluster is opened. + * + * @param event the event + */ + default void clusterOpening(ClusterOpeningEvent event) { + } + + /** + * Invoked when a cluster is closed. + * + * @param event the event + */ + default void clusterClosed(ClusterClosedEvent event) { + } + + /** + * Invoked when a cluster description changes. + * + * @param event the event + */ + default void clusterDescriptionChanged(ClusterDescriptionChangedEvent event) { + } +} diff --git a/driver-core/src/main/com/mongodb/event/ClusterOpeningEvent.java b/driver-core/src/main/com/mongodb/event/ClusterOpeningEvent.java new file mode 100644 index 00000000000..55c9b4ee82e --- /dev/null +++ b/driver-core/src/main/com/mongodb/event/ClusterOpeningEvent.java @@ -0,0 +1,60 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.event; + +import com.mongodb.connection.ClusterId; + +import static com.mongodb.assertions.Assertions.notNull; + +/** + * A cluster opening event. + * + *

This event is synonymous with TopologyOpeningEvent

+ * + * @since 3.3 + */ +public final class ClusterOpeningEvent { + + private final ClusterId clusterId; + + /** + * Constructs a new instance of the event. + * + * @param clusterId the cluster id + */ + public ClusterOpeningEvent(final ClusterId clusterId) { + this.clusterId = notNull("clusterId", clusterId); + } + + + /*** + * Gets the cluster id. + * + * @return the cluster id + */ + public ClusterId getClusterId() { + return clusterId; + } + + @Override + public String toString() { + return "ClusterOpeningEvent{" + + "clusterId=" + clusterId + + '}'; + } +} + diff --git a/driver-core/src/main/com/mongodb/event/CommandEvent.java b/driver-core/src/main/com/mongodb/event/CommandEvent.java new file mode 100644 index 00000000000..9a5bd87b54b --- /dev/null +++ b/driver-core/src/main/com/mongodb/event/CommandEvent.java @@ -0,0 +1,118 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.event; + +import com.mongodb.RequestContext; +import com.mongodb.connection.ConnectionDescription; +import com.mongodb.lang.Nullable; + +/** + * An event representing a MongoDB database command. + * + * @since 3.1 + */ +public abstract class CommandEvent { + @Nullable + private final RequestContext requestContext; + private final int requestId; + private final ConnectionDescription connectionDescription; + private final String commandName; + private final String databaseName; + + private final long operationId; + + /** + * Construct an instance. + * + * @param requestContext the request context + * @param operationId the operation id + * @param requestId the request id + * @param connectionDescription the connection description + * @param databaseName the database name + * @param commandName the command name + * @since 4.11 + */ + public CommandEvent(@Nullable final RequestContext requestContext, final long operationId, final int requestId, + final ConnectionDescription connectionDescription, final String databaseName, final String commandName) { + this.requestContext = requestContext; + this.requestId = requestId; + this.connectionDescription = connectionDescription; + this.commandName = commandName; + this.databaseName = databaseName; + this.operationId = operationId; + } + + /** + * Gets the operation identifier + * + * @return the operation identifier + * @since 4.10 + */ + public long getOperationId() { + return operationId; + } + + /** + * Gets the request identifier + * + * @return the request identifier + */ + public int getRequestId() { + return requestId; + } + + /** + * Gets the description of the connection to which the operation will be sent. + * + * @return the connection description + */ + public ConnectionDescription getConnectionDescription() { + return connectionDescription; + } + + /** + * Gets the name of the command. + * + * @return the command name + */ + public String getCommandName() { + return commandName; + } + + /** + * Gets the database on which the operation will be executed. + * + * @return the database name + * @since 4.11 + */ + public String getDatabaseName() { + return databaseName; + } + + /** + * Gets the request context associated with this event. + * + * @return the request context + * @since 4.4 + */ + @Nullable + public RequestContext getRequestContext() { + return requestContext; + } +} + + diff --git a/driver-core/src/main/com/mongodb/event/CommandFailedEvent.java b/driver-core/src/main/com/mongodb/event/CommandFailedEvent.java new file mode 100644 index 00000000000..43dfe666fe0 --- /dev/null +++ b/driver-core/src/main/com/mongodb/event/CommandFailedEvent.java @@ -0,0 +1,77 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.event; + +import com.mongodb.RequestContext; +import com.mongodb.connection.ConnectionDescription; +import com.mongodb.lang.Nullable; + +import java.util.concurrent.TimeUnit; + +import static com.mongodb.assertions.Assertions.isTrueArgument; + +/** + * An event representing the failure of a MongoDB database command. + * + * @since 3.1 + */ +public final class CommandFailedEvent extends CommandEvent { + + private final long elapsedTimeNanos; + private final Throwable throwable; + + /** + * Construct an instance. + * + * @param requestContext the request context + * @param operationId the operation id + * @param requestId the request id + * @param connectionDescription the connection description + * @param databaseName the database name + * @param commandName the command name + * @param elapsedTimeNanos the non-negative elapsed time in nanoseconds for the operation to complete + * @param throwable the throwable cause of the failure + * @since 4.11 + */ + public CommandFailedEvent(@Nullable final RequestContext requestContext, final long operationId, final int requestId, + final ConnectionDescription connectionDescription, final String databaseName, final String commandName, + final long elapsedTimeNanos, final Throwable throwable) { + super(requestContext, operationId, requestId, connectionDescription, databaseName, commandName); + isTrueArgument("elapsed time is not negative", elapsedTimeNanos >= 0); + this.elapsedTimeNanos = elapsedTimeNanos; + this.throwable = throwable; + } + + /** + * Gets the elapsed time in the given unit of time. + * + * @param timeUnit the time unit in which to get the elapsed time + * @return the elapsed time + */ + public long getElapsedTime(final TimeUnit timeUnit) { + return timeUnit.convert(elapsedTimeNanos, TimeUnit.NANOSECONDS); + } + + /** + * Gets the throwable cause of the failure + * + * @return the throwable cause of the failure + */ + public Throwable getThrowable() { + return throwable; + } +} diff --git a/driver-core/src/main/com/mongodb/event/CommandListener.java b/driver-core/src/main/com/mongodb/event/CommandListener.java new file mode 100644 index 00000000000..0f3a95fa758 --- /dev/null +++ b/driver-core/src/main/com/mongodb/event/CommandListener.java @@ -0,0 +1,48 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.event; + +/** + * A listener for command events + * + * @since 3.1 + */ +public interface CommandListener { + /** + * Listener for command started events. + * + * @param event the event + */ + default void commandStarted(CommandStartedEvent event) { + } + + /** + * Listener for command completed events + * + * @param event the event + */ + default void commandSucceeded(CommandSucceededEvent event) { + } + + /** + * Listener for command failure events + * + * @param event the event + */ + default void commandFailed(CommandFailedEvent event) { + } +} diff --git a/driver-core/src/main/com/mongodb/event/CommandStartedEvent.java b/driver-core/src/main/com/mongodb/event/CommandStartedEvent.java new file mode 100644 index 00000000000..bab0015e56a --- /dev/null +++ b/driver-core/src/main/com/mongodb/event/CommandStartedEvent.java @@ -0,0 +1,60 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.event; + +import com.mongodb.RequestContext; +import com.mongodb.connection.ConnectionDescription; +import com.mongodb.lang.Nullable; +import org.bson.BsonDocument; + +/** + * An event representing the start of a command execution. + * + * @since 3.1 + */ +public final class CommandStartedEvent extends CommandEvent { + private final BsonDocument command; + + /** + * Construct an instance. + * + * @param requestContext the request context + * @param operationId the operation id + * @param requestId the request id + * @param connectionDescription the connection description + * @param databaseName the database name + * @param commandName the command name + * @param command the command as a BSON document + * @since 4.10 + */ + public CommandStartedEvent(@Nullable final RequestContext requestContext, final long operationId, final int requestId, + final ConnectionDescription connectionDescription, final String databaseName, final String commandName, + final BsonDocument command) { + super(requestContext, operationId, requestId, connectionDescription, databaseName, commandName); + this.command = command; + } + + /** + * Gets the command document. The document is only usable within the method that delivered the event. If it's needed for longer, it + * must be cloned via {@link Object#clone()}. + * + * @return the command document + */ + public BsonDocument getCommand() { + return command; + } +} diff --git a/driver-core/src/main/com/mongodb/event/CommandSucceededEvent.java b/driver-core/src/main/com/mongodb/event/CommandSucceededEvent.java new file mode 100644 index 00000000000..0daa83897a9 --- /dev/null +++ b/driver-core/src/main/com/mongodb/event/CommandSucceededEvent.java @@ -0,0 +1,78 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.event; + +import com.mongodb.RequestContext; +import com.mongodb.connection.ConnectionDescription; +import com.mongodb.lang.Nullable; +import org.bson.BsonDocument; + +import java.util.concurrent.TimeUnit; + +import static com.mongodb.assertions.Assertions.isTrueArgument; + +/** + * An event representing the completion of a MongoDB database command. + * + * @since 3.1 + */ +public final class CommandSucceededEvent extends CommandEvent { + private final BsonDocument response; + private final long elapsedTimeNanos; + + /** + * Construct an instance. + * + * @param requestContext the request context + * @param operationId the operation id + * @param requestId the request id + * @param connectionDescription the connection description + * @param databaseName the database name + * @param commandName the command name + * @param response the command response + * @param elapsedTimeNanos the non-negative elapsed time in nanoseconds for the operation to complete + * @since 4.11 + */ + public CommandSucceededEvent(@Nullable final RequestContext requestContext, final long operationId, final int requestId, + final ConnectionDescription connectionDescription, final String databaseName, final String commandName, + final BsonDocument response, final long elapsedTimeNanos) { + super(requestContext, operationId, requestId, connectionDescription, databaseName, commandName); + this.response = response; + isTrueArgument("elapsed time is not negative", elapsedTimeNanos >= 0); + this.elapsedTimeNanos = elapsedTimeNanos; + } + + /** + * Gets the elapsed time in the given unit of time. + * + * @param timeUnit the time unit in which to get the elapsed time + * @return the elapsed time + */ + public long getElapsedTime(final TimeUnit timeUnit) { + return timeUnit.convert(elapsedTimeNanos, TimeUnit.NANOSECONDS); + } + + /** + * Gets the response document. The document is only usable within the method that delivered the event. If it's needed for longer, it + * must be cloned via {@link Object#clone()}. + * + * @return the response document + */ + public BsonDocument getResponse() { + return response; + } +} diff --git a/driver-core/src/main/com/mongodb/event/ConnectionCheckOutFailedEvent.java b/driver-core/src/main/com/mongodb/event/ConnectionCheckOutFailedEvent.java new file mode 100644 index 00000000000..1907df63c41 --- /dev/null +++ b/driver-core/src/main/com/mongodb/event/ConnectionCheckOutFailedEvent.java @@ -0,0 +1,139 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.event; + +import com.mongodb.connection.ConnectionPoolSettings; +import com.mongodb.connection.ServerId; + +import java.util.concurrent.TimeUnit; + +import static com.mongodb.assertions.Assertions.isTrueArgument; +import static com.mongodb.assertions.Assertions.notNull; + +/** + * An event for when checking out a connection fails. + * + * @since 4.0 + */ +public final class ConnectionCheckOutFailedEvent { + + /** + * An enumeration of the reasons checking out a connection failed + */ + public enum Reason { + /** + * The pool was previously closed and cannot provide new connections + */ + POOL_CLOSED, + + /** + * The connection check out attempt exceeded the specific timeout + */ + TIMEOUT, + + /** + * The connection check out attempt experienced an error while setting up a new connection + */ + CONNECTION_ERROR, + + /** + * Reason unknown + */ + UNKNOWN, + } + + private final ServerId serverId; + private final long operationId; + private final Reason reason; + private final long elapsedTimeNanos; + + /** + * Constructs an instance. + * + * @param serverId The server ID. See {@link #getServerId()}. + * @param operationId The operation ID. See {@link #getOperationId()}. + * @param reason The reason the connection check out failed. See {@link #getReason()}. + * @param elapsedTimeNanos The time it took while trying to check out the connection. See {@link #getElapsedTime(TimeUnit)}. + * @since 4.11 + */ + public ConnectionCheckOutFailedEvent(final ServerId serverId, final long operationId, final Reason reason, final long elapsedTimeNanos) { + this.serverId = notNull("serverId", serverId); + this.operationId = operationId; + this.reason = notNull("reason", reason); + isTrueArgument("waited time is not negative", elapsedTimeNanos >= 0); + this.elapsedTimeNanos = elapsedTimeNanos; + } + + /** + * Gets the server id + * + * @return the server id + */ + public ServerId getServerId() { + return serverId; + } + + /** + * Gets the operation identifier + * + * @return the operation identifier + * @since 4.10 + */ + public long getOperationId() { + return operationId; + } + + /** + * Gets the reason for the check out failure. + * + * @return the reason + * @since 4.3 + */ + public Reason getReason() { + return reason; + } + + /** + * The time it took to check out the connection. + * More specifically, the time elapsed between emitting a {@link ConnectionCheckOutStartedEvent} + * and emitting this event as part of the same checking out. + *

+ * Naturally, if a new connection was not {@linkplain ConnectionCreatedEvent created} + * and {@linkplain ConnectionReadyEvent established} as part of checking out, + * this duration is usually not greater than {@link ConnectionPoolSettings#getMaxWaitTime(TimeUnit)}, + * but may occasionally be greater than that, because the driver does not provide hard real-time guarantees.

+ * + * @param timeUnit The time unit of the result. + * {@link TimeUnit#convert(long, TimeUnit)} specifies how the conversion from nanoseconds to {@code timeUnit} is done. + * @return The time it took to establish the connection. + * @since 4.11 + */ + public long getElapsedTime(final TimeUnit timeUnit) { + return timeUnit.convert(elapsedTimeNanos, TimeUnit.NANOSECONDS); + } + + @Override + public String toString() { + return "ConnectionCheckOutFailedEvent{" + + "server=" + serverId.getAddress() + + ", clusterId=" + serverId.getClusterId() + + ", operationId=" + operationId + + ", reason=" + reason + + ", elapsedTimeNanos=" + elapsedTimeNanos + + '}'; + } +} diff --git a/driver-core/src/main/com/mongodb/event/ConnectionCheckOutStartedEvent.java b/driver-core/src/main/com/mongodb/event/ConnectionCheckOutStartedEvent.java new file mode 100644 index 00000000000..b01a0577550 --- /dev/null +++ b/driver-core/src/main/com/mongodb/event/ConnectionCheckOutStartedEvent.java @@ -0,0 +1,71 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.event; + +import com.mongodb.connection.ServerId; + +import static com.mongodb.assertions.Assertions.notNull; + +/** + * An event for when the driver starts to check out a connection. + * + * @since 4.0 + */ +public final class ConnectionCheckOutStartedEvent { + private final ServerId serverId; + private final long operationId; + + /** + * Construct an instance + * + * @param serverId the server id + * @param operationId the operation id + * @since 4.10 + */ + public ConnectionCheckOutStartedEvent(final ServerId serverId, final long operationId) { + this.serverId = notNull("serverId", serverId); + this.operationId = operationId; + } + + /** + * Gets the server id + * + * @return the server id + */ + public ServerId getServerId() { + return serverId; + } + + /** + * Gets the operation identifier + * + * @return the operation identifier + * @since 4.10 + */ + public long getOperationId() { + return operationId; + } + + @Override + public String toString() { + return "ConnectionCheckOutStartedEvent{" + + "server=" + serverId.getAddress() + + ", clusterId=" + serverId.getClusterId() + + ", operationId=" + operationId + + '}'; + } +} diff --git a/driver-core/src/main/com/mongodb/event/ConnectionCheckedInEvent.java b/driver-core/src/main/com/mongodb/event/ConnectionCheckedInEvent.java new file mode 100644 index 00000000000..8f6b7ccff2d --- /dev/null +++ b/driver-core/src/main/com/mongodb/event/ConnectionCheckedInEvent.java @@ -0,0 +1,75 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.event; + +import com.mongodb.connection.ConnectionId; + +import static com.mongodb.assertions.Assertions.notNull; + +/** + * An event for checking in a connection to the pool. + * Such a connection is considered available until it becomes {@linkplain ConnectionCheckedOutEvent in use} + * or {@linkplain ConnectionClosedEvent closed}. + * + * @since 3.5 + */ +public final class ConnectionCheckedInEvent { + private final ConnectionId connectionId; + private final long operationId; + + + /** + * Construct an instance + * + * @param connectionId the connectionId + * @param operationId the operation id + * @since 4.10 + */ + public ConnectionCheckedInEvent(final ConnectionId connectionId, final long operationId) { + this.connectionId = notNull("connectionId", connectionId); + this.operationId = operationId; + } + + /** + * Gets the connection id + * + * @return the connection id + */ + public ConnectionId getConnectionId() { + return connectionId; + } + + /** + * Gets the operation identifier + * + * @return the operation identifier + * @since 4.10 + */ + public long getOperationId() { + return operationId; + } + + @Override + public String toString() { + return "ConnectionCheckedInEvent{" + + "connectionId=" + connectionId + + ", server=" + connectionId.getServerId().getAddress() + + ", clusterId=" + connectionId.getServerId().getClusterId() + + ", operationId=" + operationId + + '}'; + } +} diff --git a/driver-core/src/main/com/mongodb/event/ConnectionCheckedOutEvent.java b/driver-core/src/main/com/mongodb/event/ConnectionCheckedOutEvent.java new file mode 100644 index 00000000000..150ae8459b1 --- /dev/null +++ b/driver-core/src/main/com/mongodb/event/ConnectionCheckedOutEvent.java @@ -0,0 +1,101 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.event; + +import com.mongodb.connection.ConnectionId; +import com.mongodb.connection.ConnectionPoolSettings; + +import java.util.concurrent.TimeUnit; + +import static com.mongodb.assertions.Assertions.isTrueArgument; +import static com.mongodb.assertions.Assertions.notNull; + +/** + * An event for checking out a connection from the pool. + * Such a connection is considered in use until it becomes {@linkplain ConnectionCheckedInEvent available}. + * + * @since 3.5 + */ +public final class ConnectionCheckedOutEvent { + private final ConnectionId connectionId; + private final long operationId; + private final long elapsedTimeNanos; + + /** + * Constructs an instance. + * + * @param connectionId The connection ID. See {@link #getConnectionId()}. + * @param operationId The operation ID. See {@link #getOperationId()}. + * @param elapsedTimeNanos The time it took to check out the connection. See {@link #getElapsedTime(TimeUnit)}. + * @since 4.11 + */ + public ConnectionCheckedOutEvent(final ConnectionId connectionId, final long operationId, final long elapsedTimeNanos) { + this.connectionId = notNull("connectionId", connectionId); + this.operationId = operationId; + isTrueArgument("waited time is not negative", elapsedTimeNanos >= 0); + this.elapsedTimeNanos = elapsedTimeNanos; + } + + /** + * Gets the connection id + * + * @return the connection id + */ + public ConnectionId getConnectionId() { + return connectionId; + } + + /** + * Gets the operation identifier + * + * @return the operation identifier + * @since 4.10 + */ + public long getOperationId() { + return operationId; + } + + /** + * The time it took to check out the connection. + * More specifically, the time elapsed between emitting a {@link ConnectionCheckOutStartedEvent} + * and emitting this event as part of the same checking out. + *

+ * Naturally, if a new connection was not {@linkplain ConnectionCreatedEvent created} + * and {@linkplain ConnectionReadyEvent established} as part of checking out, + * this duration is usually not greater than {@link ConnectionPoolSettings#getMaxWaitTime(TimeUnit)}, + * but may occasionally be greater than that, because the driver does not provide hard real-time guarantees.

+ * + * @param timeUnit The time unit of the result. + * {@link TimeUnit#convert(long, TimeUnit)} specifies how the conversion from nanoseconds to {@code timeUnit} is done. + * @return The time it took to establish the connection. + * @since 4.11 + */ + public long getElapsedTime(final TimeUnit timeUnit) { + return timeUnit.convert(elapsedTimeNanos, TimeUnit.NANOSECONDS); + } + + @Override + public String toString() { + return "ConnectionCheckedOutEvent{" + + "connectionId=" + connectionId + + ", server=" + connectionId.getServerId().getAddress() + + ", clusterId=" + connectionId.getServerId().getClusterId() + + ", operationId=" + operationId + + ", elapsedTimeNanos=" + elapsedTimeNanos + + '}'; + } +} diff --git a/driver-core/src/main/com/mongodb/event/ConnectionClosedEvent.java b/driver-core/src/main/com/mongodb/event/ConnectionClosedEvent.java new file mode 100644 index 00000000000..f1888177fbb --- /dev/null +++ b/driver-core/src/main/com/mongodb/event/ConnectionClosedEvent.java @@ -0,0 +1,98 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.event; + +import com.mongodb.connection.ConnectionId; +import com.mongodb.connection.ConnectionPoolSettings; + +import static com.mongodb.assertions.Assertions.notNull; + +/** + * An event for when a connection pool closes a connection. + * Such a connection stops being counted towards {@link ConnectionPoolSettings#getMaxSize()}. + * + * @since 4.0 + */ +public final class ConnectionClosedEvent { + + /** + * An enumeration of the reasons a connection could be closed + */ + public enum Reason { + /** + * The connection is no longer valid because the pool has been cleared + */ + STALE, + + /** + * The connection became stale by being idle for too long + */ + IDLE, + + /** + * The connection experienced an error, making it no longer valid + */ + ERROR, + + /** + * The pool was closed, making the connection no longer valid + */ + POOL_CLOSED, + } + + private final ConnectionId connectionId; + private final Reason reason; + + /** + * Construct an instance + * + * @param connectionId the connection id + * @param reason the reason the connection was closed + */ + public ConnectionClosedEvent(final ConnectionId connectionId, final Reason reason) { + this.connectionId = notNull("connectionId", connectionId); + this.reason = notNull("reason", reason); + } + + /** + * Gets the connection id + * + * @return the connection id + */ + public ConnectionId getConnectionId() { + return connectionId; + } + + /** + * Get the reason the connection was removed. + * + * @return the reason + */ + public Reason getReason() { + return reason; + } + + @Override + public String toString() { + return "ConnectionClosedEvent{" + + "connectionId=" + connectionId + + ", server=" + connectionId.getServerId().getAddress() + + ", clusterId=" + connectionId.getServerId().getClusterId() + + ", reason=" + reason + + '}'; + } +} diff --git a/driver-core/src/main/com/mongodb/event/ConnectionCreatedEvent.java b/driver-core/src/main/com/mongodb/event/ConnectionCreatedEvent.java new file mode 100644 index 00000000000..b7d0eb06acf --- /dev/null +++ b/driver-core/src/main/com/mongodb/event/ConnectionCreatedEvent.java @@ -0,0 +1,58 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.event; + +import com.mongodb.connection.ConnectionId; + +import static com.mongodb.assertions.Assertions.notNull; + +/** + * An event for creating a connection in the pool. + * Such a connection is considered pending until it is {@linkplain ConnectionReadyEvent finished being established and becomes available}. + * + * @since 4.0 + */ +public final class ConnectionCreatedEvent { + private final ConnectionId connectionId; + + /** + * Construct an instance + * + * @param connectionId the connection id + */ + public ConnectionCreatedEvent(final ConnectionId connectionId) { + this.connectionId = notNull("connectionId", connectionId); + } + + /** + * Gets the connection id + * + * @return the connection id + */ + public ConnectionId getConnectionId() { + return connectionId; + } + + @Override + public String toString() { + return "ConnectionCreatedEvent{" + + "connectionId=" + connectionId + + ", server=" + connectionId.getServerId().getAddress() + + ", clusterId=" + connectionId.getServerId().getClusterId() + + '}'; + } +} diff --git a/driver-core/src/main/com/mongodb/event/ConnectionPoolClearedEvent.java b/driver-core/src/main/com/mongodb/event/ConnectionPoolClearedEvent.java new file mode 100644 index 00000000000..27458815ed4 --- /dev/null +++ b/driver-core/src/main/com/mongodb/event/ConnectionPoolClearedEvent.java @@ -0,0 +1,87 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.event; + +import com.mongodb.connection.ServerId; +import com.mongodb.lang.Nullable; +import org.bson.types.ObjectId; + +import static com.mongodb.assertions.Assertions.notNull; + +/** + * An event signifying when a connection pool is cleared and paused. + * + * @since 4.0 + */ +public final class ConnectionPoolClearedEvent { + private final ServerId serverId; + @Nullable private final ObjectId serviceId; + + /** + * Constructs a new instance of the event. + * + * @param serverId the server id + */ + public ConnectionPoolClearedEvent(final ServerId serverId) { + this(serverId, null); + } + + /** + * Constructs a new instance of the event. + * + * @param serverId the server id + * @param serviceId the service id, which may be null + * @since 4.3 + */ + public ConnectionPoolClearedEvent(final ServerId serverId, @Nullable final ObjectId serviceId) { + this.serverId = notNull("serverId", serverId); + this.serviceId = serviceId; + } + + /** + * Gets the server id + * + * @return the server id + */ + public ServerId getServerId() { + return serverId; + } + + /** + * Gets the service id. + * + *

+ * When connected to a load balancer, in some cases the driver clears only a subset of connections in the pool, based on the + * service id reported on the connection on which the error occurred. + *

+ * + * @return the service id, which may be null + * @since 4.3 + */ + @Nullable + public ObjectId getServiceId() { + return serviceId; + } + + @Override + public String toString() { + return "ConnectionPoolClearedEvent{" + + "serverId=" + serverId + + ", serviceId=" + serviceId + + '}'; + } +} diff --git a/driver-core/src/main/com/mongodb/event/ConnectionPoolClosedEvent.java b/driver-core/src/main/com/mongodb/event/ConnectionPoolClosedEvent.java new file mode 100644 index 00000000000..d7322e40a01 --- /dev/null +++ b/driver-core/src/main/com/mongodb/event/ConnectionPoolClosedEvent.java @@ -0,0 +1,55 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.event; + +import com.mongodb.connection.ServerId; + +import static com.mongodb.assertions.Assertions.notNull; + +/** + * An event signifying the closing of a connection pool. + * + * @since 3.5 + */ +public final class ConnectionPoolClosedEvent { + private final ServerId serverId; + + /** + * Constructs a new instance of the event. + * + * @param serverId the server id + */ + public ConnectionPoolClosedEvent(final ServerId serverId) { + this.serverId = notNull("serverId", serverId); + } + + /** + * Gets the server id + * + * @return the server id + */ + public ServerId getServerId() { + return serverId; + } + + @Override + public String toString() { + return "ConnectionPoolClosedEvent{" + + "serverId=" + serverId + + '}'; + } +} diff --git a/driver-core/src/main/com/mongodb/event/ConnectionPoolCreatedEvent.java b/driver-core/src/main/com/mongodb/event/ConnectionPoolCreatedEvent.java new file mode 100644 index 00000000000..e2941ace103 --- /dev/null +++ b/driver-core/src/main/com/mongodb/event/ConnectionPoolCreatedEvent.java @@ -0,0 +1,69 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.event; + +import com.mongodb.connection.ConnectionPoolSettings; +import com.mongodb.connection.ServerId; + +import static com.mongodb.assertions.Assertions.notNull; + +/** + * An event signifying that a connection pool was created. + * + * @since 4.0 + */ +public final class ConnectionPoolCreatedEvent { + private final ServerId serverId; + private final ConnectionPoolSettings settings; + + /** + * Constructs a new instance of the event. + * + * @param serverId the server id + * @param settings the connection pool settings + */ + public ConnectionPoolCreatedEvent(final ServerId serverId, final ConnectionPoolSettings settings) { + this.serverId = notNull("serverId", serverId); + this.settings = notNull("settings", settings); + } + + /** + * Gets the server id + * + * @return the server id + */ + public ServerId getServerId() { + return serverId; + } + + /** + * Gets the connection pool settings. + * + * @return the connection pool setttings. + */ + public ConnectionPoolSettings getSettings() { + return settings; + } + + @Override + public String toString() { + return "ConnectionPoolCreatedEvent{" + + "serverId=" + serverId + + " settings=" + settings + + '}'; + } +} diff --git a/driver-core/src/main/com/mongodb/event/ConnectionPoolListener.java b/driver-core/src/main/com/mongodb/event/ConnectionPoolListener.java new file mode 100644 index 00000000000..5cc2db467e7 --- /dev/null +++ b/driver-core/src/main/com/mongodb/event/ConnectionPoolListener.java @@ -0,0 +1,122 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.event; + +import java.util.EventListener; + +/** + * A listener for connection pool-related events. + * + * @since 3.5 + */ +public interface ConnectionPoolListener extends EventListener { + /** + * Invoked when a connection pool is created. The default implementation does nothing. + * + * @param event the event + * @since 4.0 + */ + default void connectionPoolCreated(ConnectionPoolCreatedEvent event) { + } + + /** + * Invoked when a connection pool is cleared and paused. The default implementation does nothing. + * + * @param event the event + * @since 4.0 + */ + default void connectionPoolCleared(ConnectionPoolClearedEvent event) { + } + + /** + * Invoked when a connection pool is ready. The default implementation does nothing. + * + * @param event the event + * @since 4.3 + */ + default void connectionPoolReady(ConnectionPoolReadyEvent event) { + } + + /** + * Invoked when a connection pool is closed. The default implementation does nothing. + * + * @param event the event + */ + default void connectionPoolClosed(ConnectionPoolClosedEvent event) { + } + + /** + * Invoked when attempting to check out a connection from a pool. The default implementation does nothing. + * + * @param event the event + * @since 4.0 + */ + default void connectionCheckOutStarted(ConnectionCheckOutStartedEvent event) { + } + + /** + * Invoked when a connection is checked out of a pool. The default implementation does nothing. + * + * @param event the event + */ + default void connectionCheckedOut(ConnectionCheckedOutEvent event) { + } + + /** + * Invoked when an attempt to check out a connection from a pool fails. The default implementation does nothing. + * + * @param event the event + * @since 4.0 + */ + default void connectionCheckOutFailed(ConnectionCheckOutFailedEvent event) { + } + + /** + * Invoked when a connection is checked in to a pool. The default implementation does nothing. + * + * @param event the event + */ + default void connectionCheckedIn(ConnectionCheckedInEvent event) { + } + + /** + * Invoked when a connection is created. The default implementation does nothing. + * + * @param event the event + * @since 4.0 + */ + default void connectionCreated(ConnectionCreatedEvent event) { + } + + /** + * Invoked when a connection is ready for use. The default implementation does nothing. + * + * @param event the event + * @since 4.0 + */ + default void connectionReady(ConnectionReadyEvent event) { + } + + /** + * Invoked when a connection is removed from a pool. The default implementation does nothing. + * + * @param event the event + * @since 4.0 + */ + default void connectionClosed(ConnectionClosedEvent event) { + } +} diff --git a/driver-core/src/main/com/mongodb/event/ConnectionPoolReadyEvent.java b/driver-core/src/main/com/mongodb/event/ConnectionPoolReadyEvent.java new file mode 100644 index 00000000000..944d70583c5 --- /dev/null +++ b/driver-core/src/main/com/mongodb/event/ConnectionPoolReadyEvent.java @@ -0,0 +1,55 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.event; + +import com.mongodb.connection.ServerId; + +import static com.mongodb.assertions.Assertions.assertNotNull; + +/** + * An event signifying when a connection pool is ready. + * + * @since 4.3 + */ +public final class ConnectionPoolReadyEvent { + private final ServerId serverId; + + /** + * Constructs a new instance of the event. + * + * @param serverId the server id + */ + public ConnectionPoolReadyEvent(final ServerId serverId) { + this.serverId = assertNotNull(serverId); + } + + /** + * Gets the server id + * + * @return the server id + */ + public ServerId getServerId() { + return serverId; + } + + @Override + public String toString() { + return "ConnectionPoolReadyEvent{" + + "serverId=" + serverId + + '}'; + } +} diff --git a/driver-core/src/main/com/mongodb/event/ConnectionReadyEvent.java b/driver-core/src/main/com/mongodb/event/ConnectionReadyEvent.java new file mode 100644 index 00000000000..e2c2f38ed45 --- /dev/null +++ b/driver-core/src/main/com/mongodb/event/ConnectionReadyEvent.java @@ -0,0 +1,86 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.event; + +import com.mongodb.connection.ConnectionId; + +import java.util.concurrent.TimeUnit; + +import static com.mongodb.assertions.Assertions.isTrueArgument; +import static com.mongodb.assertions.Assertions.notNull; + +/** + * An event for when a connection in the pool has finished being established. + * Such a connection is considered available until it becomes {@linkplain ConnectionCheckedOutEvent in use} + * or {@linkplain ConnectionClosedEvent closed}. + * + * @since 4.0 + */ +public final class ConnectionReadyEvent { + private final ConnectionId connectionId; + private final long elapsedTimeNanos; + + /** + * Constructs an instance. + * + * @param connectionId The connection ID. See {@link #getConnectionId()}. + * @param elapsedTimeNanos The time it took to establish the connection. See {@link #getElapsedTime(TimeUnit)}. + * @since 4.11 + */ + public ConnectionReadyEvent(final ConnectionId connectionId, final long elapsedTimeNanos) { + this.connectionId = notNull("connectionId", connectionId); + isTrueArgument("elapsed time is not negative", elapsedTimeNanos >= 0); + this.elapsedTimeNanos = elapsedTimeNanos; + } + + /** + * Gets the connection id + * + * @return the connection id + */ + public ConnectionId getConnectionId() { + return connectionId; + } + + /** + * The time it took to establish the connection. + * More specifically, the time elapsed between emitting a {@link ConnectionCreatedEvent} + * and emitting this event as part of the same checking out. + *

+ * Naturally, when establishing a connection is part of checking out, + * this duration is not greater than + * {@link ConnectionCheckedOutEvent#getElapsedTime(TimeUnit)}/{@link ConnectionCheckOutFailedEvent#getElapsedTime(TimeUnit)}.

+ * + * @param timeUnit The time unit of the result. + * {@link TimeUnit#convert(long, TimeUnit)} specifies how the conversion from nanoseconds to {@code timeUnit} is done. + * @return The time it took to establish the connection. + * @since 4.11 + */ + public long getElapsedTime(final TimeUnit timeUnit) { + return timeUnit.convert(elapsedTimeNanos, TimeUnit.NANOSECONDS); + } + + @Override + public String toString() { + return "ConnectionReadyEvent{" + + "connectionId=" + connectionId + + ", server=" + connectionId.getServerId().getAddress() + + ", clusterId=" + connectionId.getServerId().getClusterId() + + ", elapsedTimeNanos=" + elapsedTimeNanos + + '}'; + } +} diff --git a/driver-core/src/main/com/mongodb/event/ServerClosedEvent.java b/driver-core/src/main/com/mongodb/event/ServerClosedEvent.java new file mode 100644 index 00000000000..d3eac35c002 --- /dev/null +++ b/driver-core/src/main/com/mongodb/event/ServerClosedEvent.java @@ -0,0 +1,55 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.event; + +import com.mongodb.connection.ServerId; + +import static com.mongodb.assertions.Assertions.notNull; + +/** + * A server opening event. + * + * @since 3.3 + */ +public final class ServerClosedEvent { + private final ServerId serverId; + + /** + * Construct an instance. + * + * @param serverId the non-null serverId + */ + public ServerClosedEvent(final ServerId serverId) { + this.serverId = notNull("serverId", serverId); + } + + /** + * Gets the serverId. + * + * @return the serverId + */ + public ServerId getServerId() { + return serverId; + } + + @Override + public String toString() { + return "ServerClosedEvent{" + + "serverId=" + serverId + + '}'; + } +} diff --git a/driver-core/src/main/com/mongodb/event/ServerDescriptionChangedEvent.java b/driver-core/src/main/com/mongodb/event/ServerDescriptionChangedEvent.java new file mode 100644 index 00000000000..14685bdb420 --- /dev/null +++ b/driver-core/src/main/com/mongodb/event/ServerDescriptionChangedEvent.java @@ -0,0 +1,85 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.event; + +import com.mongodb.connection.ServerDescription; +import com.mongodb.connection.ServerId; + +import static com.mongodb.assertions.Assertions.notNull; + +/** + * An event for changes to the description of a server. + * + * @since 3.3 + */ +public final class ServerDescriptionChangedEvent { + + private final ServerId serverId; + private final ServerDescription newDescription; + private final ServerDescription previousDescription; + + /** + * Construct an instance. + * + * @param serverId the non-null serverId + * @param newDescription the non-null new description + * @param previousDescription the non-null previous description + */ + public ServerDescriptionChangedEvent(final ServerId serverId, final ServerDescription newDescription, + final ServerDescription previousDescription) { + this.serverId = notNull("serverId", serverId); + this.newDescription = notNull("newDescription", newDescription); + this.previousDescription = notNull("previousDescription", previousDescription); + } + + + /** + * Gets the serverId. + * + * @return the serverId + */ + public ServerId getServerId() { + return serverId; + } + + /** + * Gets the new server description. + * + * @return the new server description + */ + public ServerDescription getNewDescription() { + return newDescription; + } + + /** + * Gets the previous server description. + * + * @return the previous server description + */ + public ServerDescription getPreviousDescription() { + return previousDescription; + } + + @Override + public String toString() { + return "ServerDescriptionChangedEvent{" + + "serverId=" + serverId + + ", newDescription=" + newDescription + + ", previousDescription=" + previousDescription + + '}'; + } +} diff --git a/driver-core/src/main/com/mongodb/event/ServerHeartbeatFailedEvent.java b/driver-core/src/main/com/mongodb/event/ServerHeartbeatFailedEvent.java new file mode 100644 index 00000000000..a8468effe4f --- /dev/null +++ b/driver-core/src/main/com/mongodb/event/ServerHeartbeatFailedEvent.java @@ -0,0 +1,109 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.event; + +import com.mongodb.connection.ConnectionId; +import com.mongodb.connection.ServerMonitoringMode; + +import java.util.concurrent.TimeUnit; + +import static com.mongodb.assertions.Assertions.isTrueArgument; +import static com.mongodb.assertions.Assertions.notNull; + +/** + * An event for server heartbeat failures. + * + * @since 3.3 + */ +public final class ServerHeartbeatFailedEvent { + private final ConnectionId connectionId; + private final long elapsedTimeNanos; + private final boolean awaited; + private final Throwable throwable; + + /** + * Construct an instance. + * + * @param connectionId the non-null connectionId + * @param elapsedTimeNanos the non-negative elapsed time in nanoseconds + * @param awaited true if the response was awaited + * @param throwable the non-null exception that caused the failure + * @since 4.1 + */ + public ServerHeartbeatFailedEvent(final ConnectionId connectionId, final long elapsedTimeNanos, final boolean awaited, + final Throwable throwable) { + this.connectionId = notNull("connectionId", connectionId); + this.awaited = awaited; + isTrueArgument("elapsed time is not negative", elapsedTimeNanos >= 0); + this.elapsedTimeNanos = elapsedTimeNanos; + this.throwable = notNull("throwable", throwable); + } + + /** + * Gets the connectionId. + * + * @return the connectionId + */ + public ConnectionId getConnectionId() { + return connectionId; + } + + /** + * Gets the elapsed time in the given time unit. + * + * @param timeUnit the non-null timeUnit + * + * @return the elapsed time in the given time unit + */ + public long getElapsedTime(final TimeUnit timeUnit) { + return timeUnit.convert(elapsedTimeNanos, TimeUnit.NANOSECONDS); + } + + /** + * Gets whether the heartbeat was awaited. If true, then {@link #getElapsedTime(TimeUnit)} reflects the sum of the round trip time + * to the server and the time that the server waited before sending a response. + * + * @return whether the response was awaited + * @see ServerMonitoringMode#STREAM + * @since 4.1 + * @mongodb.server.release 4.4 + */ + public boolean isAwaited() { + return awaited; + } + + /** + * Gets the exceptions that caused the failure + * + * @return the exception + */ + public Throwable getThrowable() { + return throwable; + } + + @Override + public String toString() { + return "ServerHeartbeatFailedEvent{" + + "connectionId=" + connectionId + + ", server=" + connectionId.getServerId().getAddress() + + ", clusterId=" + connectionId.getServerId().getClusterId() + + ", elapsedTimeNanos=" + elapsedTimeNanos + + ", awaited=" + awaited + + ", throwable=" + throwable + + "} " + super.toString(); + } +} diff --git a/driver-core/src/main/com/mongodb/event/ServerHeartbeatStartedEvent.java b/driver-core/src/main/com/mongodb/event/ServerHeartbeatStartedEvent.java new file mode 100644 index 00000000000..f83dc5ef005 --- /dev/null +++ b/driver-core/src/main/com/mongodb/event/ServerHeartbeatStartedEvent.java @@ -0,0 +1,86 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.event; + +import com.mongodb.connection.ConnectionId; +import com.mongodb.connection.ServerMonitoringMode; + +import static com.mongodb.assertions.Assertions.notNull; + +/** + * An event for the start of a server heartbeat. + * + * @since 3.3 + */ +public final class ServerHeartbeatStartedEvent { + private final ConnectionId connectionId; + private final boolean awaited; + + /** + * Construct an instance. + * + * @param connectionId the non-null connnectionId + * @param awaited {@code true} if and only if the heartbeat is for an awaitable `hello` / legacy hello. + * @since 5.1 + */ + public ServerHeartbeatStartedEvent(final ConnectionId connectionId, final boolean awaited) { + this.connectionId = notNull("connectionId", connectionId); + this.awaited = awaited; + } + + /** + * Construct an instance. + * + * @param connectionId the non-null connnectionId + * @deprecated Prefer {@link #ServerHeartbeatStartedEvent(ConnectionId, boolean)}. + * If this constructor is used then {@link #isAwaited()} is {@code false}. + */ + @Deprecated + public ServerHeartbeatStartedEvent(final ConnectionId connectionId) { + this(connectionId, false); + } + + /** + * Gets the connectionId. + * + * @return the connectionId + */ + public ConnectionId getConnectionId() { + return connectionId; + } + + /** + * Gets whether the heartbeat is for an awaitable `hello` / legacy hello. + * + * @return {@code true} if and only if the heartbeat is for an awaitable `hello` / legacy hello. + * @see ServerMonitoringMode#STREAM + * @since 5.1 + */ + public boolean isAwaited() { + return awaited; + } + + @Override + public String toString() { + return "ServerHeartbeatStartedEvent{" + + "connectionId=" + connectionId + + ", server=" + connectionId.getServerId().getAddress() + + ", clusterId=" + connectionId.getServerId().getClusterId() + + ", awaited=" + awaited + + "} " + super.toString(); + } +} diff --git a/driver-core/src/main/com/mongodb/event/ServerHeartbeatSucceededEvent.java b/driver-core/src/main/com/mongodb/event/ServerHeartbeatSucceededEvent.java new file mode 100644 index 00000000000..20e9741275e --- /dev/null +++ b/driver-core/src/main/com/mongodb/event/ServerHeartbeatSucceededEvent.java @@ -0,0 +1,110 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.event; + +import com.mongodb.connection.ConnectionId; +import com.mongodb.connection.ServerMonitoringMode; +import org.bson.BsonDocument; + +import java.util.concurrent.TimeUnit; + +import static com.mongodb.assertions.Assertions.isTrueArgument; +import static com.mongodb.assertions.Assertions.notNull; + +/** + * An event for successful completion of a server heartbeat. + * + * @since 3.3 + */ +public final class ServerHeartbeatSucceededEvent { + private final ConnectionId connectionId; + private final BsonDocument reply; + private final long elapsedTimeNanos; + private final boolean awaited; + + /** + * Construct an instance. + * + * @param connectionId the non-null connectionId + * @param reply the non-null reply to an hello command + * @param elapsedTimeNanos the non-negative elapsed time in nanoseconds + * @param awaited true if the response was awaited + * @since 4.1 + */ + public ServerHeartbeatSucceededEvent(final ConnectionId connectionId, final BsonDocument reply, final long elapsedTimeNanos, + final boolean awaited) { + this.connectionId = notNull("connectionId", connectionId); + this.reply = notNull("reply", reply); + isTrueArgument("elapsed time is not negative", elapsedTimeNanos >= 0); + this.elapsedTimeNanos = elapsedTimeNanos; + this.awaited = awaited; + } + + /** + * Gets the connectionId. + * + * @return the connectionId + */ + public ConnectionId getConnectionId() { + return connectionId; + } + + /** + * Gets the reply to the hello command executed for this heartbeat. + * + * @return the reply + */ + public BsonDocument getReply() { + return reply; + } + + /** + * Gets the elapsed time in the given time unit. + * + * @param timeUnit the non-null timeUnit + * + * @return the elapsed time in the given time unit + */ + public long getElapsedTime(final TimeUnit timeUnit) { + return timeUnit.convert(elapsedTimeNanos, TimeUnit.NANOSECONDS); + } + + /** + * Gets whether the heartbeat was awaited. If true, then {@link #getElapsedTime(TimeUnit)} reflects the sum of the round trip time + * to the server and the time that the server waited before sending a response. + * + * @return whether the response was awaited + * @see ServerMonitoringMode#STREAM + * @since 4.1 + * @mongodb.server.release 4.4 + */ + public boolean isAwaited() { + return awaited; + } + + @Override + public String toString() { + return "ServerHeartbeatSucceededEvent{" + + "connectionId=" + connectionId + + ", server=" + connectionId.getServerId().getAddress() + + ", clusterId=" + connectionId.getServerId().getClusterId() + + ", reply=" + reply + + ", elapsedTimeNanos=" + elapsedTimeNanos + + ", awaited=" + awaited + + "} "; + } +} diff --git a/driver-core/src/main/com/mongodb/event/ServerListener.java b/driver-core/src/main/com/mongodb/event/ServerListener.java new file mode 100644 index 00000000000..ead6f4e1b51 --- /dev/null +++ b/driver-core/src/main/com/mongodb/event/ServerListener.java @@ -0,0 +1,55 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.event; + +import java.util.EventListener; + +/** + * A listener for server-related events + *

+ * See {@link ClusterListener} for the details regarding the order of events and memory synchronization. + *

+ * @see ClusterListener + * @see ServerMonitorListener + * @since 3.3 + */ +public interface ServerListener extends EventListener { + + /** + * Listener for server opening events. + * + * @param event the server opening event + */ + default void serverOpening(ServerOpeningEvent event) { + } + + /** + * Listener for server closed events. + * + * @param event the server closed event + */ + default void serverClosed(ServerClosedEvent event) { + } + + /** + * Listener for server description changed events. + * + * @param event the server description changed event + */ + default void serverDescriptionChanged(ServerDescriptionChangedEvent event) { + } +} diff --git a/driver-core/src/main/com/mongodb/event/ServerMonitorListener.java b/driver-core/src/main/com/mongodb/event/ServerMonitorListener.java new file mode 100644 index 00000000000..1c73ac30510 --- /dev/null +++ b/driver-core/src/main/com/mongodb/event/ServerMonitorListener.java @@ -0,0 +1,57 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +package com.mongodb.event; + + +import java.util.EventListener; + +/** + * A listener for server monitor-related events + *

+ * See {@link ClusterListener} for the details regarding the order of events and memory synchronization. + *

+ * @see ClusterListener + * @see ServerListener + * @since 3.3 + */ +public interface ServerMonitorListener extends EventListener { + + /** + * Listener for server heartbeat started events. + * + * @param event the server heartbeat started event + */ + default void serverHearbeatStarted(ServerHeartbeatStartedEvent event) { + } + + /** + * Listener for server heartbeat succeeded events. + * + * @param event the server heartbeat succeeded event + */ + default void serverHeartbeatSucceeded(ServerHeartbeatSucceededEvent event) { + } + + /** + * Listener for server heartbeat failed events. + * + * @param event the server heartbeat failed event + */ + default void serverHeartbeatFailed(ServerHeartbeatFailedEvent event) { + } +} diff --git a/driver-core/src/main/com/mongodb/event/ServerOpeningEvent.java b/driver-core/src/main/com/mongodb/event/ServerOpeningEvent.java new file mode 100644 index 00000000000..2d13f83cc18 --- /dev/null +++ b/driver-core/src/main/com/mongodb/event/ServerOpeningEvent.java @@ -0,0 +1,55 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.event; + +import com.mongodb.connection.ServerId; + +import static com.mongodb.assertions.Assertions.notNull; + +/** + * A server opening event. + * + * @since 3.3 + */ +public final class ServerOpeningEvent { + private final ServerId serverId; + + /** + * Construct an instance. + * + * @param serverId the non-null serverId + */ + public ServerOpeningEvent(final ServerId serverId) { + this.serverId = notNull("serverId", serverId); + } + + /** + * Gets the serverId. + * + * @return the serverId + */ + public ServerId getServerId() { + return serverId; + } + + @Override + public String toString() { + return "ServerOpeningEvent{" + + "serverId=" + serverId + + '}'; + } +} diff --git a/driver-core/src/main/com/mongodb/event/package-info.java b/driver-core/src/main/com/mongodb/event/package-info.java new file mode 100644 index 00000000000..4676b49eb5a --- /dev/null +++ b/driver-core/src/main/com/mongodb/event/package-info.java @@ -0,0 +1,23 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * This package contains cluster and connection event related classes + */ +@NonNullApi +package com.mongodb.event; + +import com.mongodb.lang.NonNullApi; diff --git a/driver-core/src/main/com/mongodb/internal/ExceptionUtils.java b/driver-core/src/main/com/mongodb/internal/ExceptionUtils.java new file mode 100644 index 00000000000..9ccb5ef0c8b --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/ExceptionUtils.java @@ -0,0 +1,120 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal; + +import com.mongodb.MongoCommandException; +import com.mongodb.MongoOperationTimeoutException; +import com.mongodb.MongoSocketException; +import org.bson.BsonArray; +import org.bson.BsonDocument; +import org.bson.BsonInt32; +import org.bson.BsonNumber; +import org.bson.BsonString; +import org.bson.BsonValue; + +import java.util.Set; +import java.util.function.Function; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +import static com.mongodb.internal.VisibleForTesting.AccessModifier.PRIVATE; + +/** + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public final class ExceptionUtils { + + public static boolean isMongoSocketException(final Throwable e) { + return e instanceof MongoSocketException; + } + + public static boolean isOperationTimeoutFromSocketException(final Throwable e) { + return e instanceof MongoOperationTimeoutException && e.getCause() instanceof MongoSocketException; + } + + public static final class MongoCommandExceptionUtils { + public static int extractErrorCode(final BsonDocument response) { + return extractErrorCodeAsBson(response).intValue(); + } + + public static String extractErrorCodeName(final BsonDocument response) { + return extractErrorCodeNameAsBson(response).getValue(); + } + + public static BsonArray extractErrorLabelsAsBson(final BsonDocument response) { + return response.getArray("errorLabels", new BsonArray()); + } + + private static BsonNumber extractErrorCodeAsBson(final BsonDocument response) { + return response.getNumber("code", new BsonInt32(-1)); + } + + private static BsonString extractErrorCodeNameAsBson(final BsonDocument response) { + return response.getString("codeName", new BsonString("")); + } + + /** + * Constructs a {@link MongoCommandException} with the data from the {@code original} redacted for security purposes. + */ + public static MongoCommandException redacted(final MongoCommandException original) { + BsonDocument originalResponse = original.getResponse(); + BsonDocument redactedResponse = new BsonDocument(); + for (SecurityInsensitiveResponseField field : SecurityInsensitiveResponseField.values()) { + redactedResponse.append(field.fieldName(), field.fieldValue(originalResponse)); + } + MongoCommandException result = new MongoCommandException(redactedResponse, original.getServerAddress()); + result.setStackTrace(original.getStackTrace()); + return result; + } + + @VisibleForTesting(otherwise = PRIVATE) + public enum SecurityInsensitiveResponseField { + CODE("code", MongoCommandExceptionUtils::extractErrorCodeAsBson), + CODE_NAME("codeName", MongoCommandExceptionUtils::extractErrorCodeNameAsBson), + ERROR_LABELS("errorLabels", MongoCommandExceptionUtils::extractErrorLabelsAsBson); + + private final String fieldName; + private final Function fieldValueExtractor; + + SecurityInsensitiveResponseField(final String fieldName, final Function fieldValueExtractor) { + this.fieldName = fieldName; + this.fieldValueExtractor = fieldValueExtractor; + } + + String fieldName() { + return fieldName; + } + + BsonValue fieldValue(final BsonDocument response) { + return fieldValueExtractor.apply(response); + } + + @VisibleForTesting(otherwise = PRIVATE) + public static Set fieldNames() { + return Stream.of(SecurityInsensitiveResponseField.values()) + .map(SecurityInsensitiveResponseField::fieldName) + .collect(Collectors.toSet()); + } + } + + private MongoCommandExceptionUtils() { + } + } + + private ExceptionUtils() { + } +} diff --git a/driver-core/src/main/com/mongodb/internal/ExpirableValue.java b/driver-core/src/main/com/mongodb/internal/ExpirableValue.java new file mode 100644 index 00000000000..06fc16a61ac --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/ExpirableValue.java @@ -0,0 +1,71 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal; + +import com.mongodb.annotations.ThreadSafe; +import com.mongodb.lang.Nullable; + +import java.time.Duration; +import java.util.Optional; + +import static com.mongodb.assertions.Assertions.assertNotNull; +import static com.mongodb.internal.VisibleForTesting.AccessModifier.PRIVATE; + +/** + * A value associated with a lifetime. + * + *

Instances are shallowly immutable.

+ *

This class is not part of the public API and may be removed or changed at any time

+ */ +@ThreadSafe +public final class ExpirableValue { + private final T value; + private final long deadline; + + public static ExpirableValue expired() { + return new ExpirableValue<>(null, Duration.ZERO, System.nanoTime()); + } + + public static ExpirableValue expirable(final T value, final Duration lifetime) { + return expirable(value, lifetime, System.nanoTime()); + } + + public static ExpirableValue expirable(final T value, final Duration lifetime, final long startNanoTime) { + return new ExpirableValue<>(assertNotNull(value), assertNotNull(lifetime), startNanoTime); + } + + private ExpirableValue(@Nullable final T value, final Duration lifetime, final long currentNanoTime) { + this.value = value; + deadline = currentNanoTime + lifetime.toNanos(); + } + + /** + * Returns {@link Optional#empty()} if the value is expired. Otherwise, returns an {@link Optional} describing the value. + */ + public Optional getValue() { + return getValue(System.nanoTime()); + } + + @VisibleForTesting(otherwise = PRIVATE) + Optional getValue(final long currentNanoTime) { + if (currentNanoTime - deadline >= 0) { + return Optional.empty(); + } else { + return Optional.of(value); + } + } +} diff --git a/driver-core/src/main/com/mongodb/internal/HexUtils.java b/driver-core/src/main/com/mongodb/internal/HexUtils.java new file mode 100644 index 00000000000..a95e9019229 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/HexUtils.java @@ -0,0 +1,82 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal; + +import java.nio.ByteBuffer; +import java.security.MessageDigest; +import java.security.NoSuchAlgorithmException; + +/** + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public final class HexUtils { + /** + * Converts the given byte buffer to a hexadecimal string using {@link java.lang.Integer#toHexString(int)}. + * + * @param bytes the bytes to convert to hex + * @return a String containing the hex representation of the given bytes. + */ + public static String toHex(final byte[] bytes) { + StringBuilder sb = new StringBuilder(); + for (final byte b : bytes) { + String s = Integer.toHexString(0xff & b); + + if (s.length() < 2) { + sb.append("0"); + } + sb.append(s); + } + return sb.toString(); + } + + /** + * Produce hex representation of the MD5 digest of a byte array. + * + * @param data bytes to digest + * @return hex string of the MD5 digest + */ + public static String hexMD5(final byte[] data) { + try { + MessageDigest md5 = MessageDigest.getInstance("MD5"); + + md5.reset(); + md5.update(data); + byte[] digest = md5.digest(); + + return toHex(digest); + } catch (NoSuchAlgorithmException e) { + throw new RuntimeException("Error - this implementation of Java doesn't support MD5."); + } + } + + /** + * Produce hex representation of the MD5 digest of a byte array. + * + * @param buf byte buffer containing the bytes to digest + * @param offset the position to start reading bytes from + * @param len the number of bytes to read from the buffer + * @return hex string of the MD5 digest + */ + public static String hexMD5(final ByteBuffer buf, final int offset, final int len) { + byte[] b = new byte[len]; + for (int i = 0; i < len; i++) { + b[i] = buf.get(offset + i); + } + + return hexMD5(b); + } +} diff --git a/driver-core/src/main/com/mongodb/internal/IgnorableRequestContext.java b/driver-core/src/main/com/mongodb/internal/IgnorableRequestContext.java new file mode 100644 index 00000000000..f387bc62f08 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/IgnorableRequestContext.java @@ -0,0 +1,71 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal; + +import com.mongodb.RequestContext; + +import java.util.Map; +import java.util.stream.Stream; + +/** + * A {@link RequestContext} that can be ignored by the driver. Useful to ensure that we always + * have a non-null {@link RequestContext} to pass around the driver. + * + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public final class IgnorableRequestContext implements RequestContext { + + public static final IgnorableRequestContext INSTANCE = new IgnorableRequestContext(); + + private IgnorableRequestContext() { + } + + @Override + public T get(final Object key) { + throw new UnsupportedOperationException(); + } + + @Override + public boolean hasKey(final Object key) { + throw new UnsupportedOperationException(); + } + + @Override + public boolean isEmpty() { + throw new UnsupportedOperationException(); + } + + @Override + public void put(final Object key, final Object value) { + throw new UnsupportedOperationException(); + } + + @Override + public void delete(final Object key) { + throw new UnsupportedOperationException(); + } + + @Override + public int size() { + throw new UnsupportedOperationException(); + } + + @Override + public Stream> stream() { + throw new UnsupportedOperationException(); + } +} diff --git a/driver-core/src/main/com/mongodb/internal/Iterables.java b/driver-core/src/main/com/mongodb/internal/Iterables.java new file mode 100644 index 00000000000..571b1bcb155 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/Iterables.java @@ -0,0 +1,106 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.internal; + +import com.mongodb.lang.Nullable; + +import java.util.Iterator; +import java.util.Objects; +import java.util.stream.Collectors; +import java.util.stream.Stream; +import java.util.stream.StreamSupport; + +import static com.mongodb.assertions.Assertions.fail; +import static java.util.Arrays.asList; +import static java.util.Collections.singleton; + +/** + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public final class Iterables { + @SafeVarargs + @SuppressWarnings("varargs") + public static Iterable concat(@Nullable final T first, @Nullable final T... more) { + return more == null ? singleton(first) : concat(first, asList(more)); + } + + public static Iterable concat(@Nullable final T first, final Iterable more) { + return new Iterable() { + @Override + public Iterator iterator() { + return new ConcatIterator<>(first, more); + } + + @Override + public String toString() { + return '[' + + Stream.concat(Stream.of(first), StreamSupport.stream(more.spliterator(), false)) + .map(Objects::toString) + .collect(Collectors.joining(", ")) + + ']'; + } + }; + } + + private Iterables() { + throw fail(); + } + + private static class ConcatIterator implements Iterator { + private static final Object NONE = new Object(); + + @Nullable + private T first; + @Nullable + private Iterator moreIterator; + private final Iterable more; + + ConcatIterator(@Nullable final T first, final Iterable more) { + this.first = first; + this.more = more; + } + + @Override + public boolean hasNext() { + return firstNotConsumed() || moreIterator().hasNext(); + } + + @Override + @Nullable + public T next() { + return firstNotConsumed() ? consumeFirst() : moreIterator().next(); + } + + private boolean firstNotConsumed() { + return first != NONE; + } + + @SuppressWarnings("unchecked") + @Nullable + private T consumeFirst() { + T result = first; + first = (T) NONE; + return result; + } + + private Iterator moreIterator() { + if (moreIterator == null) { + moreIterator = more.iterator(); + } + return moreIterator; + } + } +} diff --git a/driver-core/src/main/com/mongodb/internal/Locks.java b/driver-core/src/main/com/mongodb/internal/Locks.java new file mode 100644 index 00000000000..042fc9fd69f --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/Locks.java @@ -0,0 +1,98 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal; + +import com.mongodb.MongoInterruptedException; +import com.mongodb.internal.function.CheckedSupplier; + +import java.util.concurrent.locks.Lock; +import java.util.concurrent.locks.StampedLock; +import java.util.function.Supplier; + +import static com.mongodb.internal.thread.InterruptionUtil.interruptAndCreateMongoInterruptedException; + +/** + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public final class Locks { + public static void withLock(final Lock lock, final Runnable action) { + withLock(lock, () -> { + action.run(); + return null; + }); + } + + public static void withInterruptibleLock(final StampedLock lock, final Runnable runnable) throws MongoInterruptedException{ + long stamp; + try { + stamp = lock.writeLockInterruptibly(); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + throw new MongoInterruptedException("Interrupted waiting for lock", e); + } + try { + runnable.run(); + } finally { + lock.unlockWrite(stamp); + } + } + + public static V withLock(final Lock lock, final Supplier supplier) { + return checkedWithLock(lock, supplier::get); + } + + public static V checkedWithLock(final Lock lock, final CheckedSupplier supplier) throws E { + lock.lock(); + try { + return supplier.get(); + } finally { + lock.unlock(); + } + } + + public static void withInterruptibleLock(final Lock lock, final Runnable action) throws MongoInterruptedException { + withInterruptibleLock(lock, () -> { + action.run(); + return null; + }); + } + + public static V withInterruptibleLock(final Lock lock, final Supplier supplier) throws MongoInterruptedException { + return checkedWithInterruptibleLock(lock, supplier::get); + } + + public static V checkedWithInterruptibleLock(final Lock lock, final CheckedSupplier supplier) + throws MongoInterruptedException, E { + lockInterruptibly(lock); + try { + return supplier.get(); + } finally { + lock.unlock(); + } + } + + public static void lockInterruptibly(final Lock lock) throws MongoInterruptedException { + try { + lock.lockInterruptibly(); + } catch (InterruptedException e) { + throw interruptAndCreateMongoInterruptedException("Interrupted waiting for lock", e); + } + } + + private Locks() { + } +} diff --git a/driver-core/src/main/com/mongodb/internal/ResourceUtil.java b/driver-core/src/main/com/mongodb/internal/ResourceUtil.java new file mode 100644 index 00000000000..6c9c161d3ce --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/ResourceUtil.java @@ -0,0 +1,36 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal; + +import org.bson.ByteBuf; + +/** + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public final class ResourceUtil { + public static void release(final Iterable buffers) { + // we assume `ByteBuf::release` does not complete abruptly + buffers.forEach(buffer -> { + if (buffer != null) { + buffer.release(); + } + }); + } + + private ResourceUtil() { + } +} diff --git a/driver-core/src/main/com/mongodb/internal/TimeoutContext.java b/driver-core/src/main/com/mongodb/internal/TimeoutContext.java new file mode 100644 index 00000000000..16b4a1a87a7 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/TimeoutContext.java @@ -0,0 +1,472 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.internal; + +import com.mongodb.MongoClientException; +import com.mongodb.MongoOperationTimeoutException; +import com.mongodb.internal.async.AsyncRunnable; +import com.mongodb.internal.async.SingleResultCallback; +import com.mongodb.internal.connection.CommandMessage; +import com.mongodb.internal.time.StartTime; +import com.mongodb.internal.time.Timeout; +import com.mongodb.lang.Nullable; +import com.mongodb.session.ClientSession; + +import java.util.Objects; +import java.util.Optional; +import java.util.function.LongConsumer; + +import static com.mongodb.assertions.Assertions.assertNotNull; +import static com.mongodb.assertions.Assertions.assertNull; +import static com.mongodb.assertions.Assertions.isTrue; +import static com.mongodb.internal.VisibleForTesting.AccessModifier.PRIVATE; +import static com.mongodb.internal.async.AsyncRunnable.beginAsync; +import static com.mongodb.internal.time.Timeout.ZeroSemantics.ZERO_DURATION_MEANS_INFINITE; +import static java.util.Optional.empty; +import static java.util.Optional.ofNullable; +import static java.util.concurrent.TimeUnit.MILLISECONDS; +import static java.util.concurrent.TimeUnit.NANOSECONDS; + +/** + * Timeout Context. + * + *

The context for handling timeouts in relation to the Client Side Operation Timeout specification.

+ */ +public class TimeoutContext { + + private final boolean isMaintenanceContext; + private final TimeoutSettings timeoutSettings; + + @Nullable + private Timeout timeout; + @Nullable + private Timeout computedServerSelectionTimeout; + private long minRoundTripTimeMS = 0; + + @Nullable + private MaxTimeSupplier maxTimeSupplier = null; + + public static MongoOperationTimeoutException createMongoRoundTripTimeoutException() { + return createMongoTimeoutException("Remaining timeoutMS is less than or equal to the server's minimum round trip time."); + } + + public static MongoOperationTimeoutException createMongoTimeoutException(final String message) { + return new MongoOperationTimeoutException(message); + } + + public static T throwMongoTimeoutException(final String message) { + throw new MongoOperationTimeoutException(message); + } + public static T throwMongoTimeoutException() { + throw new MongoOperationTimeoutException("The operation exceeded the timeout limit."); + } + + public static MongoOperationTimeoutException createMongoTimeoutException(final Throwable cause) { + return createMongoTimeoutException("Operation exceeded the timeout limit: " + cause.getMessage(), cause); + } + + public static MongoOperationTimeoutException createMongoTimeoutException(final String message, @Nullable final Throwable cause) { + if (cause instanceof MongoOperationTimeoutException) { + return (MongoOperationTimeoutException) cause; + } + return new MongoOperationTimeoutException(message, cause); + } + + public static TimeoutContext createMaintenanceTimeoutContext(final TimeoutSettings timeoutSettings) { + return new TimeoutContext(true, timeoutSettings, startTimeout(timeoutSettings.getTimeoutMS())); + } + + public static TimeoutContext createTimeoutContext(final ClientSession session, final TimeoutSettings timeoutSettings) { + TimeoutContext sessionTimeoutContext = session.getTimeoutContext(); + + if (sessionTimeoutContext != null) { + TimeoutSettings sessionTimeoutSettings = sessionTimeoutContext.timeoutSettings; + if (timeoutSettings.getGenerationId() > sessionTimeoutSettings.getGenerationId()) { + throw new MongoClientException("Cannot change the timeoutMS during a transaction."); + } + + // Check for any legacy operation timeouts + if (sessionTimeoutSettings.getTimeoutMS() == null) { + if (timeoutSettings.getMaxTimeMS() != 0) { + sessionTimeoutSettings = sessionTimeoutSettings.withMaxTimeMS(timeoutSettings.getMaxTimeMS()); + } + if (timeoutSettings.getMaxAwaitTimeMS() != 0) { + sessionTimeoutSettings = sessionTimeoutSettings.withMaxAwaitTimeMS(timeoutSettings.getMaxAwaitTimeMS()); + } + if (timeoutSettings.getMaxCommitTimeMS() != null) { + sessionTimeoutSettings = sessionTimeoutSettings.withMaxCommitMS(timeoutSettings.getMaxCommitTimeMS()); + } + return new TimeoutContext(sessionTimeoutSettings); + } + return sessionTimeoutContext; + } + return new TimeoutContext(timeoutSettings); + } + + // Creates a copy of the timeout context that can be reset without resetting the original. + public TimeoutContext copyTimeoutContext() { + return new TimeoutContext(getTimeoutSettings(), getTimeout()); + } + + public TimeoutContext(final TimeoutSettings timeoutSettings) { + this(false, timeoutSettings, startTimeout(timeoutSettings.getTimeoutMS())); + } + + private TimeoutContext(final TimeoutSettings timeoutSettings, @Nullable final Timeout timeout) { + this(false, timeoutSettings, timeout); + } + + private TimeoutContext(final boolean isMaintenanceContext, final TimeoutSettings timeoutSettings, @Nullable final Timeout timeout) { + this.isMaintenanceContext = isMaintenanceContext; + this.timeoutSettings = timeoutSettings; + this.timeout = timeout; + } + + /** + * Allows for the differentiation between users explicitly setting a global operation timeout via {@code timeoutMS}. + * + * @return true if a timeout has been set. + */ + public boolean hasTimeoutMS() { + return timeoutSettings.getTimeoutMS() != null; + } + + /** + * Runs the runnable if the timeout is expired. + * @param onExpired the runnable to run + */ + public void onExpired(final Runnable onExpired) { + Timeout.nullAsInfinite(timeout).onExpired(onExpired); + } + + /** + * Sets the recent min round trip time + * @param minRoundTripTimeMS the min round trip time + * @return this + */ + public TimeoutContext minRoundTripTimeMS(final long minRoundTripTimeMS) { + isTrue("'minRoundTripTimeMS' must be a positive number", minRoundTripTimeMS >= 0); + this.minRoundTripTimeMS = minRoundTripTimeMS; + return this; + } + + @Nullable + public Timeout timeoutIncludingRoundTrip() { + return timeout == null ? null : timeout.shortenBy(minRoundTripTimeMS, MILLISECONDS); + } + + /** + * Returns the remaining {@code timeoutMS} if set or the {@code alternativeTimeoutMS}. + * + * @param alternativeTimeoutMS the alternative timeout. + * @return timeout to use. + */ + public long timeoutOrAlternative(final long alternativeTimeoutMS) { + if (timeout == null) { + return alternativeTimeoutMS; + } else { + return timeout.call(MILLISECONDS, + () -> 0L, + (ms) -> ms, + () -> throwMongoTimeoutException("The operation exceeded the timeout limit.")); + } + } + + public TimeoutSettings getTimeoutSettings() { + return timeoutSettings; + } + + public long getMaxAwaitTimeMS() { + return timeoutSettings.getMaxAwaitTimeMS(); + } + + public void runMaxTimeMS(final LongConsumer onRemaining) { + if (maxTimeSupplier != null) { + long maxTimeMS = maxTimeSupplier.get(); + if (maxTimeMS > 0) { + runMinTimeout(onRemaining, maxTimeMS); + } + return; + } + if (timeout == null) { + runWithFixedTimeout(timeoutSettings.getMaxTimeMS(), onRemaining); + return; + } + assertNotNull(timeoutIncludingRoundTrip()) + .run(MILLISECONDS, + () -> {}, + onRemaining, + () -> { + throw createMongoRoundTripTimeoutException(); + }); + + } + + private void runMinTimeout(final LongConsumer onRemaining, final long fixedMs) { + Timeout timeout = timeoutIncludingRoundTrip(); + if (timeout != null) { + timeout.run(MILLISECONDS, () -> { + onRemaining.accept(fixedMs); + }, + (renamingMs) -> { + onRemaining.accept(Math.min(renamingMs, fixedMs)); + }, () -> { + throwMongoTimeoutException("The operation exceeded the timeout limit."); + }); + } else { + onRemaining.accept(fixedMs); + } + } + + private static void runWithFixedTimeout(final long ms, final LongConsumer onRemaining) { + if (ms != 0) { + onRemaining.accept(ms); + } + } + + public void resetToDefaultMaxTime() { + this.maxTimeSupplier = null; + } + + /** + * The override will be provided as the remaining value in + * {@link #runMaxTimeMS}, where 0 is ignored. This is useful for setting timeout + * in {@link CommandMessage} as an extra element before we send it to the server. + * + *

+ * NOTE: Suitable for static user-defined values only (i.e MaxAwaitTimeMS), + * not for running timeouts that adjust dynamically (CSOT). + * + * If remaining CSOT timeout is less than this static timeout, then CSOT timeout will be used. + * + */ + public void setMaxTimeOverride(final long maxTimeMS) { + this.maxTimeSupplier = () -> maxTimeMS; + } + + /** + * Disable the maxTimeMS override. This way the maxTimeMS will not + * be appended to the command in the {@link CommandMessage}. + */ + public void disableMaxTimeOverride() { + this.maxTimeSupplier = () -> 0; + } + + /** + * The override will be provided as the remaining value in + * {@link #runMaxTimeMS}, where 0 is ignored. + */ + public void setMaxTimeOverrideToMaxCommitTime() { + this.maxTimeSupplier = () -> getMaxCommitTimeMS(); + } + + @VisibleForTesting(otherwise = PRIVATE) + public long getMaxCommitTimeMS() { + Long maxCommitTimeMS = timeoutSettings.getMaxCommitTimeMS(); + return timeoutOrAlternative(maxCommitTimeMS != null ? maxCommitTimeMS : 0); + } + + public long getReadTimeoutMS() { + return timeoutOrAlternative(timeoutSettings.getReadTimeoutMS()); + } + + public long getWriteTimeoutMS() { + return timeoutOrAlternative(0); + } + + public int getConnectTimeoutMs() { + final long connectTimeoutMS = getTimeoutSettings().getConnectTimeoutMS(); + if (isMaintenanceContext) { + return (int) connectTimeoutMS; + } + + return Math.toIntExact(Timeout.nullAsInfinite(timeout).call(MILLISECONDS, + () -> connectTimeoutMS, + (ms) -> connectTimeoutMS == 0 ? ms : Math.min(ms, connectTimeoutMS), + () -> throwMongoTimeoutException("The operation exceeded the timeout limit."))); + } + + /** + * @see #hasTimeoutMS() + * @see #doWithResetTimeout(Runnable) + * @see #doWithResetTimeout(AsyncRunnable, SingleResultCallback) + */ + public void resetTimeoutIfPresent() { + getAndResetTimeoutIfPresent(); + } + + /** + * @see #hasTimeoutMS() + * @return A {@linkplain Optional#isPresent() non-empty} previous {@linkplain Timeout} iff {@link #hasTimeoutMS()}, + * i.e., iff it was reset. + */ + private Optional getAndResetTimeoutIfPresent() { + Timeout result = timeout; + if (hasTimeoutMS()) { + timeout = startTimeout(timeoutSettings.getTimeoutMS()); + return ofNullable(result); + } + return empty(); + } + + /** + * @see #resetTimeoutIfPresent() + */ + public void doWithResetTimeout(final Runnable action) { + Optional originalTimeout = getAndResetTimeoutIfPresent(); + try { + action.run(); + } finally { + originalTimeout.ifPresent(original -> timeout = original); + } + } + + /** + * @see #resetTimeoutIfPresent() + */ + public void doWithResetTimeout(final AsyncRunnable action, final SingleResultCallback callback) { + beginAsync().thenRun(c -> { + Optional originalTimeout = getAndResetTimeoutIfPresent(); + beginAsync().thenRun(c2 -> { + action.finish(c2); + }).thenAlwaysRunAndFinish(() -> { + originalTimeout.ifPresent(original -> timeout = original); + }, c); + }).finish(callback); + } + + /** + * Resets the timeout if this timeout context is being used by pool maintenance + */ + public void resetMaintenanceTimeout() { + if (!isMaintenanceContext) { + return; + } + timeout = Timeout.nullAsInfinite(timeout).call(NANOSECONDS, + () -> timeout, + (ms) -> startTimeout(timeoutSettings.getTimeoutMS()), + () -> startTimeout(timeoutSettings.getTimeoutMS())); + } + + public TimeoutContext withAdditionalReadTimeout(final int additionalReadTimeout) { + // Only used outside timeoutMS usage + assertNull(timeout); + + // Check existing read timeout is infinite + if (timeoutSettings.getReadTimeoutMS() == 0) { + return this; + } + + long newReadTimeout = getReadTimeoutMS() + additionalReadTimeout; + return new TimeoutContext(timeoutSettings.withReadTimeoutMS(newReadTimeout > 0 ? newReadTimeout : Long.MAX_VALUE)); + } + + @Override + public String toString() { + return "TimeoutContext{" + + "isMaintenanceContext=" + isMaintenanceContext + + ", timeoutSettings=" + timeoutSettings + + ", timeout=" + timeout + + ", minRoundTripTimeMS=" + minRoundTripTimeMS + + '}'; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + final TimeoutContext that = (TimeoutContext) o; + return isMaintenanceContext == that.isMaintenanceContext + && minRoundTripTimeMS == that.minRoundTripTimeMS + && Objects.equals(timeoutSettings, that.timeoutSettings) + && Objects.equals(timeout, that.timeout); + } + + @Override + public int hashCode() { + return Objects.hash(isMaintenanceContext, timeoutSettings, timeout, minRoundTripTimeMS); + } + + @Nullable + public static Timeout startTimeout(@Nullable final Long timeoutMS) { + if (timeoutMS != null) { + return Timeout.expiresIn(timeoutMS, MILLISECONDS, ZERO_DURATION_MEANS_INFINITE); + } + return null; + } + + /** + * Returns the computed server selection timeout + * + *

Caches the computed server selection timeout if: + *

    + *
  • not in a maintenance context
  • + *
  • there is a timeoutMS, so to keep the same legacy behavior.
  • + *
  • the server selection timeout is less than the remaining overall timeout.
  • + *
+ * + * @return the timeout context + */ + public Timeout computeServerSelectionTimeout() { + Timeout serverSelectionTimeout = StartTime.now() + .timeoutAfterOrInfiniteIfNegative(getTimeoutSettings().getServerSelectionTimeoutMS(), MILLISECONDS); + + + if (isMaintenanceContext || !hasTimeoutMS()) { + return serverSelectionTimeout; + } + + if (timeout != null && Timeout.earliest(serverSelectionTimeout, timeout) == timeout) { + return timeout; + } + + computedServerSelectionTimeout = serverSelectionTimeout; + return computedServerSelectionTimeout; + } + + /** + * Returns the timeout context to use for the handshake process + * + * @return a new timeout context with the cached computed server selection timeout if available or this + */ + public TimeoutContext withComputedServerSelectionTimeoutContext() { + if (this.hasTimeoutMS() && computedServerSelectionTimeout != null) { + return new TimeoutContext(false, timeoutSettings, computedServerSelectionTimeout); + } + return this; + } + + public Timeout startMaxWaitTimeout(final StartTime checkoutStart) { + if (hasTimeoutMS()) { + return assertNotNull(timeout); + } + final long ms = getTimeoutSettings().getMaxWaitTimeMS(); + return checkoutStart.timeoutAfterOrInfiniteIfNegative(ms, MILLISECONDS); + } + + @Nullable + public Timeout getTimeout() { + return timeout; + } + + public interface MaxTimeSupplier { + long get(); + } +} diff --git a/driver-core/src/main/com/mongodb/internal/TimeoutSettings.java b/driver-core/src/main/com/mongodb/internal/TimeoutSettings.java new file mode 100644 index 00000000000..e1f0bc0b795 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/TimeoutSettings.java @@ -0,0 +1,270 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.internal; + +import com.mongodb.MongoClientSettings; +import com.mongodb.lang.Nullable; + +import java.util.Objects; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicLong; + +import static com.mongodb.assertions.Assertions.isTrueArgument; +import static java.util.concurrent.TimeUnit.MILLISECONDS; + +/** + * Timeout Settings. + * + *

Includes all client based timeouts

+ */ +public class TimeoutSettings { + private static final AtomicLong NEXT_ID = new AtomicLong(0); + private final long generationId; + private final long serverSelectionTimeoutMS; + private final long connectTimeoutMS; + @Nullable + private final Long timeoutMS; + + // Deprecated configuration timeout options + private final long readTimeoutMS; // aka socketTimeoutMS + private final long maxWaitTimeMS; // aka waitQueueTimeoutMS + @Nullable + private final Long wTimeoutMS; + + // Deprecated options for CRUD methods + private final long maxTimeMS; + private final long maxAwaitTimeMS; + @Nullable + private final Long maxCommitTimeMS; + + public static final TimeoutSettings DEFAULT = create(MongoClientSettings.builder().build()); + + @Nullable + public static Long convertAndValidateTimeoutNullable(@Nullable final Long timeout, final TimeUnit timeUnit) { + return timeout == null ? null : convertAndValidateTimeout(timeout, timeUnit, "timeout"); + } + + public static long convertAndValidateTimeout(final long timeout, final TimeUnit timeUnit) { + return convertAndValidateTimeout(timeout, timeUnit, "timeout"); + } + + public static long convertAndValidateTimeout(final long timeout, final TimeUnit timeUnit, final String fieldName) { + return isTrueArgument(fieldName + " was too small. After conversion it was rounded to 0 milliseconds, " + + " which would result in an unintended infinite timeout.", + () -> MILLISECONDS.convert(timeout, timeUnit), + (timeoutMS) -> timeout == 0 && timeoutMS == 0 || timeoutMS > 0); + } + + @SuppressWarnings("deprecation") + public static TimeoutSettings create(final MongoClientSettings settings) { + return new TimeoutSettings( + settings.getClusterSettings().getServerSelectionTimeout(TimeUnit.MILLISECONDS), + settings.getSocketSettings().getConnectTimeout(TimeUnit.MILLISECONDS), + settings.getSocketSettings().getReadTimeout(TimeUnit.MILLISECONDS), + settings.getTimeout(TimeUnit.MILLISECONDS), + settings.getConnectionPoolSettings().getMaxWaitTime(TimeUnit.MILLISECONDS)); + } + + public static TimeoutSettings createHeartbeatSettings(final MongoClientSettings settings) { + return new TimeoutSettings( + settings.getClusterSettings().getServerSelectionTimeout(TimeUnit.MILLISECONDS), + settings.getHeartbeatSocketSettings().getConnectTimeout(TimeUnit.MILLISECONDS), + settings.getHeartbeatSocketSettings().getReadTimeout(TimeUnit.MILLISECONDS), + settings.getTimeout(TimeUnit.MILLISECONDS), + settings.getConnectionPoolSettings().getMaxWaitTime(TimeUnit.MILLISECONDS)); + } + + public TimeoutSettings(final long serverSelectionTimeoutMS, final long connectTimeoutMS, final long readTimeoutMS, + @Nullable final Long timeoutMS, final long maxWaitTimeMS) { + this(-1, timeoutMS, serverSelectionTimeoutMS, connectTimeoutMS, readTimeoutMS, 0, 0, null, null, maxWaitTimeMS); + } + + TimeoutSettings(@Nullable final Long timeoutMS, final long serverSelectionTimeoutMS, final long connectTimeoutMS, + final long readTimeoutMS, final long maxAwaitTimeMS, final long maxTimeMS, @Nullable final Long maxCommitTimeMS, + @Nullable final Long wTimeoutMS, final long maxWaitTimeMS) { + this(timeoutMS != null ? NEXT_ID.incrementAndGet() : -1, timeoutMS, serverSelectionTimeoutMS, connectTimeoutMS, readTimeoutMS, + maxAwaitTimeMS, maxTimeMS, maxCommitTimeMS, wTimeoutMS, maxWaitTimeMS); + } + + private TimeoutSettings(final long generationId, @Nullable final Long timeoutMS, final long serverSelectionTimeoutMS, + final long connectTimeoutMS, final long readTimeoutMS, final long maxAwaitTimeMS, final long maxTimeMS, + @Nullable final Long maxCommitTimeMS, @Nullable final Long wTimeoutMS, final long maxWaitTimeMS) { + + isTrueArgument("timeoutMS must be >= 0", timeoutMS == null || timeoutMS >= 0); + isTrueArgument("maxAwaitTimeMS must be >= 0", maxAwaitTimeMS >= 0); + isTrueArgument("maxTimeMS must be >= 0", maxTimeMS >= 0); + isTrueArgument("timeoutMS must be greater than maxAwaitTimeMS", timeoutMS == null || timeoutMS == 0 + || timeoutMS > maxAwaitTimeMS); + isTrueArgument("maxCommitTimeMS must be >= 0", maxCommitTimeMS == null || maxCommitTimeMS >= 0); + + this.generationId = generationId; + this.serverSelectionTimeoutMS = serverSelectionTimeoutMS; + this.connectTimeoutMS = connectTimeoutMS; + this.timeoutMS = timeoutMS; + this.maxAwaitTimeMS = maxAwaitTimeMS; + this.readTimeoutMS = readTimeoutMS; + this.maxTimeMS = maxTimeMS; + this.maxCommitTimeMS = maxCommitTimeMS; + this.wTimeoutMS = wTimeoutMS; + this.maxWaitTimeMS = maxWaitTimeMS; + } + + public TimeoutSettings connectionOnly() { + return new TimeoutSettings(serverSelectionTimeoutMS, connectTimeoutMS, readTimeoutMS, null, maxWaitTimeMS); + } + + public TimeoutSettings withTimeout(@Nullable final Long timeout, final TimeUnit timeUnit) { + return withTimeoutMS(convertAndValidateTimeoutNullable(timeout, timeUnit)); + } + + TimeoutSettings withTimeoutMS(@Nullable final Long timeoutMS) { + return new TimeoutSettings(timeoutMS, serverSelectionTimeoutMS, connectTimeoutMS, readTimeoutMS, maxAwaitTimeMS, + maxTimeMS, maxCommitTimeMS, wTimeoutMS, maxWaitTimeMS); + } + + public TimeoutSettings withMaxTimeMS(final long maxTimeMS) { + return new TimeoutSettings(generationId, timeoutMS, serverSelectionTimeoutMS, connectTimeoutMS, readTimeoutMS, maxAwaitTimeMS, + maxTimeMS, maxCommitTimeMS, wTimeoutMS, maxWaitTimeMS); + } + + public TimeoutSettings withMaxAwaitTimeMS(final long maxAwaitTimeMS) { + return new TimeoutSettings(generationId, timeoutMS, serverSelectionTimeoutMS, connectTimeoutMS, readTimeoutMS, maxAwaitTimeMS, + maxTimeMS, maxCommitTimeMS, wTimeoutMS, maxWaitTimeMS); + } + + public TimeoutSettings withMaxTimeAndMaxAwaitTimeMS(final long maxTimeMS, final long maxAwaitTimeMS) { + return new TimeoutSettings(generationId, timeoutMS, serverSelectionTimeoutMS, connectTimeoutMS, readTimeoutMS, maxAwaitTimeMS, + maxTimeMS, maxCommitTimeMS, wTimeoutMS, maxWaitTimeMS); + } + + public TimeoutSettings withMaxCommitMS(@Nullable final Long maxCommitTimeMS) { + return new TimeoutSettings(generationId, timeoutMS, serverSelectionTimeoutMS, connectTimeoutMS, readTimeoutMS, maxAwaitTimeMS, + maxTimeMS, maxCommitTimeMS, wTimeoutMS, maxWaitTimeMS); + } + + public TimeoutSettings withWTimeoutMS(@Nullable final Long wTimeoutMS) { + return new TimeoutSettings(timeoutMS, serverSelectionTimeoutMS, connectTimeoutMS, readTimeoutMS, maxAwaitTimeMS, + maxTimeMS, maxCommitTimeMS, wTimeoutMS, maxWaitTimeMS); + } + + public TimeoutSettings withReadTimeoutMS(final long readTimeoutMS) { + return new TimeoutSettings(generationId, timeoutMS, serverSelectionTimeoutMS, connectTimeoutMS, readTimeoutMS, maxAwaitTimeMS, + maxTimeMS, maxCommitTimeMS, wTimeoutMS, maxWaitTimeMS); + } + + public TimeoutSettings withConnectTimeoutMS(final long connectTimeoutMS) { + return new TimeoutSettings(generationId, timeoutMS, serverSelectionTimeoutMS, connectTimeoutMS, readTimeoutMS, maxAwaitTimeMS, + maxTimeMS, maxCommitTimeMS, wTimeoutMS, maxWaitTimeMS); + } + + public TimeoutSettings withServerSelectionTimeoutMS(final long serverSelectionTimeoutMS) { + return new TimeoutSettings(timeoutMS, serverSelectionTimeoutMS, connectTimeoutMS, readTimeoutMS, maxAwaitTimeMS, + maxTimeMS, maxCommitTimeMS, wTimeoutMS, maxWaitTimeMS); + } + + public TimeoutSettings withMaxWaitTimeMS(final long maxWaitTimeMS) { + return new TimeoutSettings(timeoutMS, serverSelectionTimeoutMS, connectTimeoutMS, readTimeoutMS, maxAwaitTimeMS, + maxTimeMS, maxCommitTimeMS, wTimeoutMS, maxWaitTimeMS); + } + + public long getServerSelectionTimeoutMS() { + return serverSelectionTimeoutMS; + } + + public long getConnectTimeoutMS() { + return connectTimeoutMS; + } + + @Nullable + public Long getTimeoutMS() { + return timeoutMS; + } + + public long getMaxAwaitTimeMS() { + return maxAwaitTimeMS; + } + + public long getReadTimeoutMS() { + return readTimeoutMS; + } + + public long getMaxTimeMS() { + return maxTimeMS; + } + + @Nullable + public Long getWTimeoutMS() { + return wTimeoutMS; + } + + public long getMaxWaitTimeMS() { + return maxWaitTimeMS; + } + + @Nullable + public Long getMaxCommitTimeMS() { + return maxCommitTimeMS; + } + + /** + * The generation id represents a creation counter for {@code TimeoutSettings} that contain a {@code timeoutMS} value. + * + *

This is used to determine if a new set of {@code TimeoutSettings} has been created within a {@code withTransaction} + * block, so that a client side error can be issued.

+ * + * @return the generation id or -1 if no timeout MS is set. + */ + public long getGenerationId() { + return generationId; + } + + @Override + public String toString() { + return "TimeoutSettings{" + + "generationId=" + generationId + + ", timeoutMS=" + timeoutMS + + ", serverSelectionTimeoutMS=" + serverSelectionTimeoutMS + + ", connectTimeoutMS=" + connectTimeoutMS + + ", readTimeoutMS=" + readTimeoutMS + + ", maxWaitTimeMS=" + maxWaitTimeMS + + ", wTimeoutMS=" + wTimeoutMS + + ", maxTimeMS=" + maxTimeMS + + ", maxAwaitTimeMS=" + maxAwaitTimeMS + + ", maxCommitTimeMS=" + maxCommitTimeMS + + '}'; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + final TimeoutSettings that = (TimeoutSettings) o; + return serverSelectionTimeoutMS == that.serverSelectionTimeoutMS && connectTimeoutMS == that.connectTimeoutMS + && readTimeoutMS == that.readTimeoutMS && maxWaitTimeMS == that.maxWaitTimeMS && maxTimeMS == that.maxTimeMS + && maxAwaitTimeMS == that.maxAwaitTimeMS && Objects.equals(timeoutMS, that.timeoutMS) + && Objects.equals(wTimeoutMS, that.wTimeoutMS) && Objects.equals(maxCommitTimeMS, that.maxCommitTimeMS); + } + + @Override + public int hashCode() { + return Objects.hash(generationId, serverSelectionTimeoutMS, connectTimeoutMS, timeoutMS, readTimeoutMS, maxWaitTimeMS, wTimeoutMS, maxTimeMS, + maxAwaitTimeMS, maxCommitTimeMS); + } +} diff --git a/driver-core/src/main/com/mongodb/internal/VisibleForTesting.java b/driver-core/src/main/com/mongodb/internal/VisibleForTesting.java new file mode 100644 index 00000000000..8badfce0cdb --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/VisibleForTesting.java @@ -0,0 +1,49 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.internal; + +import java.lang.annotation.Documented; +import java.lang.annotation.ElementType; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.annotation.Target; + +/** + * Denotes that the annotated program element is made more accessible than otherwise necessary for the purpose of testing. + * The annotated program element must be used as if it had the {@linkplain #otherwise() intended} access modifier + * for any purpose other than testing. + * + *

This class is not part of the public API and may be removed or changed at any time

+ */ +@Documented +@Retention(RetentionPolicy.SOURCE) +@Target({ElementType.TYPE, ElementType.CONSTRUCTOR, ElementType.METHOD, ElementType.FIELD}) +public @interface VisibleForTesting { + /** + * The intended {@link AccessModifier}. + */ + AccessModifier otherwise(); + + /** + * A subset of access modifiers + * that includes only values relevant to be used as the {@linkplain #otherwise() intended} access modifier. + */ + enum AccessModifier { + PRIVATE, + PACKAGE, + PROTECTED + } +} diff --git a/driver-core/src/main/com/mongodb/internal/async/AsyncAggregateResponseBatchCursor.java b/driver-core/src/main/com/mongodb/internal/async/AsyncAggregateResponseBatchCursor.java new file mode 100644 index 00000000000..ccfc9f7a956 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/async/AsyncAggregateResponseBatchCursor.java @@ -0,0 +1,38 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.async; + +import com.mongodb.lang.Nullable; +import org.bson.BsonDocument; +import org.bson.BsonTimestamp; + +/** + * Extends the async batch cursor interface to include information included in an aggregate or getMore response. + * + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public interface AsyncAggregateResponseBatchCursor extends AsyncBatchCursor { + @Nullable + BsonDocument getPostBatchResumeToken(); + + @Nullable + BsonTimestamp getOperationTime(); + + boolean isFirstBatchEmpty(); + + int getMaxWireVersion(); +} diff --git a/driver-core/src/main/com/mongodb/internal/async/AsyncBatchCursor.java b/driver-core/src/main/com/mongodb/internal/async/AsyncBatchCursor.java new file mode 100644 index 00000000000..bd8d6c64a3f --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/async/AsyncBatchCursor.java @@ -0,0 +1,96 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.async; + +import com.mongodb.internal.operation.BatchCursor; + +import java.io.Closeable; +import java.util.ArrayList; +import java.util.List; + +import static com.mongodb.internal.async.AsyncRunnable.beginAsync; + +/** + * MongoDB returns query results as batches, and this interface provides an asynchronous iterator over those batches. The first call to + * the {@code next} method will return the first batch, and subsequent calls will trigger an asynchronous request to get the next batch + * of results. Clients can control the batch size by setting the {@code batchSize} property between calls to {@code next}. + * + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public interface AsyncBatchCursor extends Closeable { + /** + * Returns the next batch of results. A tailable cursor will block until another batch exists. + * Unlike the {@link BatchCursor} this method will automatically mark the cursor as closed when there are no more expected results. + * Care should be taken to check {@link #isClosed()} between calls. + * + * @param callback callback to receive the next batch of results + * @throws java.util.NoSuchElementException if no next batch exists + */ + void next(SingleResultCallback> callback); + + /** + * Sets the batch size to use when requesting the next batch. This is the number of documents to request in the next batch. + * + * @param batchSize the non-negative batch size. 0 means to use the server default. + */ + void setBatchSize(int batchSize); + + /** + * Gets the batch size to use when requesting the next batch. This is the number of documents to request in the next batch. + * + * @return the non-negative batch size. 0 means to use the server default. + */ + int getBatchSize(); + + /** + * Implementations of {@link AsyncBatchCursor} are allowed to close themselves, see {@link #close()} for more details. + * + * @return {@code true} if {@code this} has been closed or has closed itself. + */ + boolean isClosed(); + + /** + * Implementations of {@link AsyncBatchCursor} are allowed to close themselves synchronously via methods + * {@link #next(SingleResultCallback)}. + * Self-closing behavior is discouraged because it introduces an additional burden on code that uses {@link AsyncBatchCursor}. + * To help making such code simpler, this method is required to be idempotent. + *

+ * Another quirk is that this method is allowed to release resources "eventually", + * i.e., not before (in the happens-before order) returning. + * Nevertheless, {@link #isClosed()} called after (in the happens-before order) {@link #close()} must return {@code true}. + */ + @Override + void close(); + + default void exhaust(final SingleResultCallback>> finalCallback) { + List> results = new ArrayList<>(); + + beginAsync().thenRunDoWhileLoop(iterationCallback -> { + beginAsync().>thenSupply(c -> { + next(c); + }).thenConsume((batch, c) -> { + if (!batch.isEmpty()) { + results.add(batch); + } + c.complete(c); + }).finish(iterationCallback); + }, () -> !this.isClosed() + ).>>thenSupply(c -> { + c.complete(results); + }).finish(finalCallback); + } +} diff --git a/driver-core/src/main/com/mongodb/internal/async/AsyncConsumer.java b/driver-core/src/main/com/mongodb/internal/async/AsyncConsumer.java new file mode 100644 index 00000000000..93a10c9cd2d --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/async/AsyncConsumer.java @@ -0,0 +1,26 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.async; + +/** + * See {@link AsyncRunnable}. + *

+ * This class is not part of the public API and may be removed or changed at any time + */ +@FunctionalInterface +public interface AsyncConsumer extends AsyncFunction { +} diff --git a/driver-core/src/main/com/mongodb/internal/async/AsyncFunction.java b/driver-core/src/main/com/mongodb/internal/async/AsyncFunction.java new file mode 100644 index 00000000000..7203d3a4945 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/async/AsyncFunction.java @@ -0,0 +1,62 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.async; + +import com.mongodb.lang.Nullable; + +import java.util.concurrent.atomic.AtomicBoolean; + +/** + * See {@link AsyncRunnable} + *

+ * This class is not part of the public API and may be removed or changed at any time + */ +@FunctionalInterface +public interface AsyncFunction { + /** + * This should not be called externally, but should be implemented as a + * lambda. To "finish" an async chain, use one of the "finish" methods. + * + * @param value A {@code @}{@link Nullable} argument of the asynchronous function. + * @param callback the callback + */ + void unsafeFinish(T value, SingleResultCallback callback); + + /** + * Must be invoked at end of async chain or when executing a callback handler supplied by the caller. + * + * @param callback the callback provided by the method the chain is used in. + */ + default void finish(final T value, final SingleResultCallback callback) { + final AtomicBoolean callbackInvoked = new AtomicBoolean(false); + try { + this.unsafeFinish(value, (v, e) -> { + if (!callbackInvoked.compareAndSet(false, true)) { + throw new AssertionError(String.format("Callback has been already completed. It could happen " + + "if code throws an exception after invoking an async method. Value: %s", v), e); + } + callback.onResult(v, e); + }); + } catch (Throwable t) { + if (!callbackInvoked.compareAndSet(false, true)) { + throw t; + } else { + callback.completeExceptionally(t); + } + } + } +} diff --git a/driver-core/src/main/com/mongodb/internal/async/AsyncRunnable.java b/driver-core/src/main/com/mongodb/internal/async/AsyncRunnable.java new file mode 100644 index 00000000000..e404e2b8152 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/async/AsyncRunnable.java @@ -0,0 +1,274 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.async; + +import com.mongodb.internal.TimeoutContext; +import com.mongodb.internal.async.function.AsyncCallbackLoop; +import com.mongodb.internal.async.function.LoopState; +import com.mongodb.internal.async.function.RetryState; +import com.mongodb.internal.async.function.RetryingAsyncCallbackSupplier; + +import java.util.function.BooleanSupplier; +import java.util.function.Predicate; +import java.util.function.Supplier; + +/** + *

See the test code (AsyncFunctionsTest) for API usage. + * + *

This API is used to write "Async" methods. These must exhibit the + * same behaviour as their sync counterparts, except asynchronously, + * and will make use of a {@link SingleResultCallback} parameter. + * + *

This API makes it easy to compare and verify async code against + * corresponding sync code, since the "shape" and ordering of the + * async code matches that of the sync code. For example, given the + * following "sync" method: + * + *

+ * public T myMethod()
+ *     method1();
+ *     method2();
+ * }
+ * + *

The async counterpart would be: + * + *

+ * public void myMethodAsync(SingleResultCallback<T> callback)
+ *     beginAsync().thenRun(c -> {
+ *         method1Async(c);
+ *     }).thenRun(c -> {
+ *         method2Async(c);
+ *     }).finish(callback);
+ * }
+ * 
+ * + *

The usage of this API is defined in its tests (AsyncFunctionsTest). + * Each test specifies the Async API code that must be used to formally + * replace a particular pattern of sync code. These tests, in a sense, + * define formal rules of replacement. + * + *

Requirements and conventions: + * + *

Each async method SHOULD start with {@link #beginAsync()}, which begins + * a chain of lambdas. Each lambda provides a callback "c" that MUST be passed + * or completed at the lambda's end of execution. The async method's "callback" + * parameter MUST be passed to {@link #finish(SingleResultCallback)}, and MUST + * NOT be used otherwise. + * + *

Consider refactoring corresponding sync code to reduce nesting or to + * otherwise improve clarity, since minor issues will often be amplified in + * the async code. + * + *

Each async lambda MUST invoke its async method with "c", and MUST return + * immediately after invoking that method. It MUST NOT, for example, have + * a catch or finally (including close on try-with-resources) after the + * invocation of the async method. + * + *

In cases where the async method has "mixed" returns (some of which are + * plain sync, some async), the "c" callback MUST be completed on the + * plain sync path, using {@link SingleResultCallback#complete(Object)} or + * {@link SingleResultCallback#complete(SingleResultCallback)}, followed by a + * return or end of method. + * + *

Chains starting with {@link #beginAsync()} correspond roughly to code + * blocks. This includes method bodies, blocks used in if/try/catch/while/etc. + * statements, and places where anonymous code blocks might be used. For + * clarity, such nested/indented chains might be omitted (where possible, + * as demonstrated in tests). + * + *

Plain sync code MAY throw exceptions, and SHOULD NOT attempt to handle + * them asynchronously. The exceptions will be caught and handled by the API. + * + *

All code, including "plain" code (parameter checks) SHOULD be placed + * within the API's async lambdas. This ensures that exceptions are handled, + * and facilitates comparison/review. This excludes code that must be + * "shared", such as lambda and variable declarations. + * + *

For consistency, and ease of comparison/review, async chains SHOULD be + * formatted as in the tests; that is, with line-breaks at the curly-braces of + * lambda bodies, with no linebreak before the "." of any Async API method. + * + *

Code review checklist, for common mistakes: + * + *

    + *
  1. Is everything (that can be) inside the async lambdas?
  2. + *
  3. Is "callback" supplied to "finish"?
  4. + *
  5. In each block and nested block, is that same block's "c" always + * passed/completed at the end of execution?
  6. + *
  7. Is every c.complete followed by a return, to end execution?
  8. + *
  9. Have all sync method calls been converted to async, where needed?
  10. + *
+ * + *

This class is not part of the public API and may be removed or changed + * at any time + */ +@FunctionalInterface +public interface AsyncRunnable extends AsyncSupplier, AsyncConsumer { + + static AsyncRunnable beginAsync() { + return (c) -> c.complete(c); + } + + /** + * @param runnable The async runnable to run after this runnable + * @return the composition of this runnable and the runnable, a runnable + */ + default AsyncRunnable thenRun(final AsyncRunnable runnable) { + return (c) -> { + this.unsafeFinish((r, e) -> { + if (e == null) { + /* If 'runnable' is executed on a different thread from the one that executed the initial 'finish()', + then invoking 'finish()' within 'runnable' will catch and propagate any exceptions to 'c' (the callback). */ + runnable.finish(c); + } else { + c.completeExceptionally(e); + } + }); + }; + } + + /** + * The error check checks if the exception is an instance of the provided class. + * @see #thenRunTryCatchAsyncBlocks(AsyncRunnable, java.util.function.Predicate, AsyncFunction) + */ + default AsyncRunnable thenRunTryCatchAsyncBlocks( + final AsyncRunnable runnable, + final Class exceptionClass, + final AsyncFunction errorFunction) { + return thenRunTryCatchAsyncBlocks(runnable, e -> exceptionClass.isInstance(e), errorFunction); + } + + /** + * Convenience method corresponding to a try-catch block in sync code. + * This MUST be used to properly handle cases where there is code above + * the block, whose errors must not be caught by an ensuing + * {@link #onErrorIf(java.util.function.Predicate, AsyncFunction)}. + * + * @param runnable corresponds to the contents of the try block + * @param errorCheck for matching on an error (or, a more complex condition) + * @param errorFunction corresponds to the contents of the catch block + * @return the composition of this runnable, a runnable that runs the + * provided runnable, followed by (composed with) the error function, which + * is conditional on there being an exception meeting the error check. + */ + default AsyncRunnable thenRunTryCatchAsyncBlocks( + final AsyncRunnable runnable, + final Predicate errorCheck, + final AsyncFunction errorFunction) { + return this.thenRun(c -> { + beginAsync() + .thenRun(runnable) + .onErrorIf(errorCheck, errorFunction) + .finish(c); + }); + } + + /** + * @param condition the condition to check + * @param runnable The async runnable to run after this runnable, + * if and only if the condition is met + * @return the composition of this runnable and the runnable, a runnable + */ + default AsyncRunnable thenRunIf(final Supplier condition, final AsyncRunnable runnable) { + return (callback) -> { + this.unsafeFinish((r, e) -> { + if (e != null) { + callback.completeExceptionally(e); + return; + } + boolean matched; + try { + matched = condition.get(); + } catch (Throwable t) { + callback.completeExceptionally(t); + return; + } + if (matched) { + runnable.finish(callback); + } else { + callback.complete(callback); + } + }); + }; + } + + /** + * @param supplier The supplier to supply using after this runnable + * @return the composition of this runnable and the supplier, a supplier + * @param The return type of the resulting supplier + */ + default AsyncSupplier thenSupply(final AsyncSupplier supplier) { + return (c) -> { + this.unsafeFinish((r, e) -> { + if (e == null) { + supplier.finish(c); + } else { + c.completeExceptionally(e); + } + }); + }; + } + + /** + * @param runnable the runnable to loop + * @param shouldRetry condition under which to retry + * @return the composition of this, and the looping branch + * @see RetryingAsyncCallbackSupplier + */ + default AsyncRunnable thenRunRetryingWhile( + final TimeoutContext timeoutContext, final AsyncRunnable runnable, final Predicate shouldRetry) { + return thenRun(callback -> { + new RetryingAsyncCallbackSupplier( + new RetryState(timeoutContext), + (rs, lastAttemptFailure) -> shouldRetry.test(lastAttemptFailure), + // `finish` is required here instead of `unsafeFinish` + // because only `finish` meets the contract of + // `AsyncCallbackSupplier.get`, which we implement here + cb -> runnable.finish(cb) + ).get(callback); + }); + } + + /** + * This method is equivalent to a do-while loop, where the loop body is executed first and + * then the condition is checked to determine whether the loop should continue. + * + * @param loopBodyRunnable the asynchronous task to be executed in each iteration of the loop + * @param whileCheck a condition to check after each iteration; the loop continues as long as this condition returns true + * @return the composition of this and the looping branch + * @see AsyncCallbackLoop + */ + default AsyncRunnable thenRunDoWhileLoop(final AsyncRunnable loopBodyRunnable, final BooleanSupplier whileCheck) { + return thenRun(finalCallback -> { + LoopState loopState = new LoopState(); + new AsyncCallbackLoop(loopState, iterationCallback -> { + + loopBodyRunnable.finish((result, t) -> { + if (t != null) { + iterationCallback.completeExceptionally(t); + return; + } + if (loopState.breakAndCompleteIf(() -> !whileCheck.getAsBoolean(), iterationCallback)) { + return; + } + iterationCallback.complete(iterationCallback); + }); + + }).run(finalCallback); + }); + } +} diff --git a/driver-core/src/main/com/mongodb/internal/async/AsyncSupplier.java b/driver-core/src/main/com/mongodb/internal/async/AsyncSupplier.java new file mode 100644 index 00000000000..6dd89e4d9b0 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/async/AsyncSupplier.java @@ -0,0 +1,192 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.async; + +import com.mongodb.lang.Nullable; + +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.function.Predicate; + + +/** + * See {@link AsyncRunnable} + *

+ * This class is not part of the public API and may be removed or changed at any time + */ +@FunctionalInterface +public interface AsyncSupplier extends AsyncFunction { + /** + * This should not be called externally to this API. It should be + * implemented as a lambda. To "finish" an async chain, use one of + * the "finish" methods. + * + * @see #finish(SingleResultCallback) + */ + void unsafeFinish(SingleResultCallback callback); + + /** + * This is the async variant of a supplier's get method. + * This method must only be used when this AsyncSupplier corresponds + * to a {@link java.util.function.Supplier} (and is therefore being + * used within an async chain method lambda). + * @param callback the callback + */ + default void getAsync(final SingleResultCallback callback) { + finish(callback); + } + + @Override + default void unsafeFinish(@Nullable final Void value, final SingleResultCallback callback) { + unsafeFinish(callback); + } + + /** + * Must be invoked at end of async chain or when executing a callback handler supplied by the caller. + * + * @see #thenApply(AsyncFunction) + * @see #thenConsume(AsyncConsumer) + * @see #onErrorIf(Predicate, AsyncFunction) + * @param callback the callback provided by the method the chain is used in + */ + default void finish(final SingleResultCallback callback) { + final AtomicBoolean callbackInvoked = new AtomicBoolean(false); + try { + this.unsafeFinish((v, e) -> { + if (!callbackInvoked.compareAndSet(false, true)) { + throw new AssertionError(String.format("Callback has been already completed. It could happen " + + "if code throws an exception after invoking an async method. Value: %s", v), e); + } + callback.onResult(v, e); + }); + } catch (Throwable t) { + if (!callbackInvoked.compareAndSet(false, true)) { + throw t; + } else { + callback.completeExceptionally(t); + } + } + } + + /** + * Must be invoked at end of async chain + * @param runnable the sync code to invoke (under non-exceptional flow) + * prior to the callback + * @param callback the callback provided by the method the chain is used in + */ + default void thenRunAndFinish(final Runnable runnable, final SingleResultCallback callback) { + this.finish((r, e) -> { + if (e != null) { + callback.completeExceptionally(e); + return; + } + try { + runnable.run(); + } catch (Throwable t) { + callback.completeExceptionally(t); + return; + } + callback.onResult(r, null); + }); + } + + /** + * See {@link #thenRunAndFinish(Runnable, SingleResultCallback)}, but the runnable + * will always be executed, including on the exceptional path. + * @param runnable the runnable + * @param callback the callback + */ + default void thenAlwaysRunAndFinish(final Runnable runnable, final SingleResultCallback callback) { + this.finish((r, e) -> { + try { + runnable.run(); + } catch (Throwable t) { + if (e != null) { + t.addSuppressed(e); + } + callback.completeExceptionally(t); + return; + } + callback.onResult(r, e); + }); + } + + /** + * @param function The async function to run after this supplier + * @return the composition of this supplier and the function, a supplier + * @param The return type of the resulting supplier + */ + default AsyncSupplier thenApply(final AsyncFunction function) { + return (c) -> { + this.finish((v, e) -> { + if (e == null) { + function.finish(v, c); + } else { + c.completeExceptionally(e); + } + }); + }; + } + + + /** + * @param consumer The async consumer to run after this supplier + * @return the composition of this supplier and the consumer, a runnable + */ + default AsyncRunnable thenConsume(final AsyncConsumer consumer) { + return (c) -> { + this.unsafeFinish((v, e) -> { + if (e == null) { + consumer.finish(v, c); + } else { + c.completeExceptionally(e); + } + }); + }; + } + + /** + * @param errorCheck A check, comparable to a catch-if/otherwise-rethrow + * @param errorFunction The branch to execute if the error matches + * @return The composition of this, and the conditional branch + */ + default AsyncSupplier onErrorIf( + final Predicate errorCheck, + final AsyncFunction errorFunction) { + // finish is used here instead of unsafeFinish to ensure that + // exceptions thrown from the callback are properly handled + return (callback) -> this.finish((r, e) -> { + if (e == null) { + callback.complete(r); + return; + } + boolean errorMatched; + try { + errorMatched = errorCheck.test(e); + } catch (Throwable t) { + t.addSuppressed(e); + callback.completeExceptionally(t); + return; + } + if (errorMatched) { + errorFunction.finish(e, callback); + } else { + callback.completeExceptionally(e); + } + }); + } + +} diff --git a/driver-core/src/main/com/mongodb/internal/async/ErrorHandlingResultCallback.java b/driver-core/src/main/com/mongodb/internal/async/ErrorHandlingResultCallback.java new file mode 100644 index 00000000000..da5a09a10b8 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/async/ErrorHandlingResultCallback.java @@ -0,0 +1,53 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.async; + +import com.mongodb.internal.diagnostics.logging.Logger; +import com.mongodb.lang.Nullable; + +import static com.mongodb.assertions.Assertions.notNull; + +/** + *

This class is not part of the public API and may be removed or changed at any time.

+ */ +public class ErrorHandlingResultCallback implements SingleResultCallback { + private final SingleResultCallback wrapped; + private final Logger logger; + + public static SingleResultCallback errorHandlingCallback(final SingleResultCallback callback, final Logger logger) { + if (callback instanceof ErrorHandlingResultCallback) { + return callback; + } else { + return new ErrorHandlingResultCallback<>(callback, logger); + } + } + + ErrorHandlingResultCallback(final SingleResultCallback wrapped, final Logger logger) { + this.wrapped = notNull("wrapped", wrapped); + this.logger = notNull("logger", logger); + } + + @Override + public void onResult(@Nullable final T result, @Nullable final Throwable t) { + try { + wrapped.onResult(result, t); + } catch (Throwable e) { + logger.error("Callback onResult call produced an error", e); + } + } + +} diff --git a/driver-core/src/main/com/mongodb/internal/async/MutableValue.java b/driver-core/src/main/com/mongodb/internal/async/MutableValue.java new file mode 100644 index 00000000000..0ee793788ea --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/async/MutableValue.java @@ -0,0 +1,47 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.internal.async; + +import com.mongodb.annotations.NotThreadSafe; +import com.mongodb.lang.Nullable; + +import static com.mongodb.assertions.Assertions.assertNotNull; + +@NotThreadSafe +public final class MutableValue { + private T value; + + public MutableValue(@Nullable final T value) { + this.value = value; + } + + public MutableValue() { + this(null); + } + + public T get() { + return assertNotNull(value); + } + + @Nullable + public T getNullable() { + return value; + } + + public void set(@Nullable final T value) { + this.value = value; + } +} diff --git a/driver-core/src/main/com/mongodb/internal/async/SingleResultCallback.java b/driver-core/src/main/com/mongodb/internal/async/SingleResultCallback.java new file mode 100644 index 00000000000..632e453d0c0 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/async/SingleResultCallback.java @@ -0,0 +1,73 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.async; + +import com.mongodb.assertions.Assertions; +import com.mongodb.connection.AsyncCompletionHandler; +import com.mongodb.internal.async.function.AsyncCallbackFunction; +import com.mongodb.lang.Nullable; + +import static com.mongodb.assertions.Assertions.assertNotNull; + +/** + * An interface to describe the completion of an asynchronous function, which may be represented as {@link AsyncCallbackFunction}. + * + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public interface SingleResultCallback { + /** + * Called when the function completes. This method must not complete abruptly, see {@link AsyncCallbackFunction} for more details. + * + * @param result the result, which may be null. Always null if e is not null. + * @param t the throwable, or null if the operation completed normally + * @throws RuntimeException Never. + * @throws Error Never, on the best effort basis. + */ + void onResult(@Nullable T result, @Nullable Throwable t); + + /** + * @return this callback as a handler + */ + default AsyncCompletionHandler asHandler() { + return new AsyncCompletionHandler() { + @Override + public void completed(@Nullable final T result) { + onResult(result, null); + } + @Override + public void failed(final Throwable t) { + completeExceptionally(t); + } + }; + } + + default void complete(final SingleResultCallback callback) { + // takes a void callback (itself) to help ensure that this method + // is not accidentally used when "complete(T)" should have been used + // instead, since results are not marked nullable. + Assertions.assertTrue(callback == this); + this.onResult(null, null); + } + + default void complete(@Nullable final T result) { + this.onResult(result, null); + } + + default void completeExceptionally(final Throwable t) { + this.onResult(null, assertNotNull(t)); + } +} diff --git a/driver-core/src/main/com/mongodb/internal/async/function/AsyncCallbackBiFunction.java b/driver-core/src/main/com/mongodb/internal/async/function/AsyncCallbackBiFunction.java new file mode 100644 index 00000000000..bc54b15edb2 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/async/function/AsyncCallbackBiFunction.java @@ -0,0 +1,42 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.internal.async.function; + +import com.mongodb.internal.async.SingleResultCallback; +import com.mongodb.lang.Nullable; + +import java.util.function.BiFunction; + +/** + * An {@linkplain AsyncCallbackFunction asynchronous callback-based function} of two parameters. + * This class is a callback-based counterpart of {@link BiFunction}. + * + *

This class is not part of the public API and may be removed or changed at any time

+ * + * @param The type of the first parameter to the function. + * @param The type of the second parameter to the function. + * @param See {@link AsyncCallbackFunction} + * @see AsyncCallbackFunction + */ +@FunctionalInterface +public interface AsyncCallbackBiFunction { + /** + * @param p1 The first {@code @}{@link Nullable} argument of the asynchronous function. + * @param p2 The second {@code @}{@link Nullable} argument of the asynchronous function. + * @see AsyncCallbackFunction#apply(Object, SingleResultCallback) + */ + void apply(P1 p1, P2 p2, SingleResultCallback callback); +} diff --git a/driver-core/src/main/com/mongodb/internal/async/function/AsyncCallbackFunction.java b/driver-core/src/main/com/mongodb/internal/async/function/AsyncCallbackFunction.java new file mode 100644 index 00000000000..cf2fedbc1ef --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/async/function/AsyncCallbackFunction.java @@ -0,0 +1,57 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.internal.async.function; + +import com.mongodb.internal.async.SingleResultCallback; +import com.mongodb.lang.Nullable; + +import java.util.function.Function; + +/** + * An asynchronous callback-based function, its synchronous counterpart is {@link Function}. + *

+ * An asynchronous function provides no guarantee that it completes before + * (in the happens-before order) the method {@link #apply(Object, SingleResultCallback)} completes, + * and produces either a successful or a failed result by passing it to a {@link SingleResultCallback} after + * (in the happens-before order) the function completes. That is, a callback is used to emulate both normal and abrupt completion of a + * Java method, which is why the {@link #apply(Object, SingleResultCallback)} method is not allowed to complete abruptly. + * If it completes abruptly, then the behavior is not defined, unless otherwise is explicitly specified, e.g., as in + * {@link AsyncCallbackSupplier#whenComplete(Runnable)}. + *

+ * When talking about an asynchronous function, the terms + * "normal" and "abrupt completion" + * are used as they defined by the Java Language Specification, while the terms "successful" and "failed completion" are used to refer to a + * situation when the function produces either a successful or a failed result respectively. + * + *

This class is not part of the public API and may be removed or changed at any time

+ * + * @param

The type of the first parameter to the function. + * @param The type of successful result. A failed result is of the {@link Throwable} type + * as defined by {@link SingleResultCallback#onResult(Object, Throwable)}. + */ +@FunctionalInterface +public interface AsyncCallbackFunction { + /** + * Initiates execution of the asynchronous function. + * + * @param a A {@code @}{@link Nullable} argument of the asynchronous function. + * @param callback A consumer of a result, {@link SingleResultCallback#onResult(Object, Throwable) completed} after + * (in the happens-before order) the asynchronous function completes. + * @throws RuntimeException Never. Exceptions must be relayed to the {@code callback}. + * @throws Error Never, on the best effort basis. Errors should be relayed to the {@code callback}. + */ + void apply(P a, SingleResultCallback callback); +} diff --git a/driver-core/src/main/com/mongodb/internal/async/function/AsyncCallbackLoop.java b/driver-core/src/main/com/mongodb/internal/async/function/AsyncCallbackLoop.java new file mode 100644 index 00000000000..a347a2a7e47 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/async/function/AsyncCallbackLoop.java @@ -0,0 +1,90 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.internal.async.function; + +import com.mongodb.annotations.NotThreadSafe; +import com.mongodb.internal.async.SingleResultCallback; +import com.mongodb.lang.Nullable; + +import java.util.function.Supplier; + +/** + * A decorator that implements automatic repeating of an {@link AsyncCallbackRunnable}. + * {@link AsyncCallbackLoop} may execute the original asynchronous function multiple times sequentially, + * while guaranteeing that the callback passed to {@link #run(SingleResultCallback)} is completed at most once. + * This class emulates the {@code while(true)} + * statement. + *

+ * The original function may additionally observe or control looping via {@link LoopState}. + * Looping continues until either of the following happens: + *

    + *
  • the original function fails as specified by {@link AsyncCallbackFunction};
  • + *
  • the original function calls {@link LoopState#breakAndCompleteIf(Supplier, SingleResultCallback)}.
  • + *
+ * + *

This class is not part of the public API and may be removed or changed at any time

+ */ +@NotThreadSafe +public final class AsyncCallbackLoop implements AsyncCallbackRunnable { + private final LoopState state; + private final AsyncCallbackRunnable body; + + /** + * @param state The {@link LoopState} to be deemed as initial for the purpose of the new {@link AsyncCallbackLoop}. + * @param body The body of the loop. + */ + public AsyncCallbackLoop(final LoopState state, final AsyncCallbackRunnable body) { + this.state = state; + this.body = body; + } + + @Override + public void run(final SingleResultCallback callback) { + body.run(new LoopingCallback(callback)); + } + + /** + * This callback is allowed to be completed more than once. + */ + @NotThreadSafe + private class LoopingCallback implements SingleResultCallback { + private final SingleResultCallback wrapped; + + LoopingCallback(final SingleResultCallback callback) { + wrapped = callback; + } + + @Override + public void onResult(@Nullable final Void result, @Nullable final Throwable t) { + if (t != null) { + wrapped.onResult(null, t); + } else { + boolean continueLooping; + try { + continueLooping = state.advance(); + } catch (Throwable e) { + wrapped.onResult(null, e); + return; + } + if (continueLooping) { + body.run(this); + } else { + wrapped.onResult(result, null); + } + } + } + } +} diff --git a/driver-core/src/main/com/mongodb/internal/async/function/AsyncCallbackRunnable.java b/driver-core/src/main/com/mongodb/internal/async/function/AsyncCallbackRunnable.java new file mode 100644 index 00000000000..02fdbdf9699 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/async/function/AsyncCallbackRunnable.java @@ -0,0 +1,35 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.internal.async.function; + +import com.mongodb.internal.async.SingleResultCallback; + +/** + * An {@linkplain AsyncCallbackFunction asynchronous callback-based function} of no parameters and no successful result. + * This class is a callback-based counterpart of {@link Runnable}. + * + *

This class is not part of the public API and may be removed or changed at any time

+ * + * @see AsyncCallbackFunction + */ +@FunctionalInterface +public interface AsyncCallbackRunnable { + /** + * @see AsyncCallbackFunction#apply(Object, SingleResultCallback) + */ + void run(SingleResultCallback callback); + +} diff --git a/driver-core/src/main/com/mongodb/internal/async/function/AsyncCallbackSupplier.java b/driver-core/src/main/com/mongodb/internal/async/function/AsyncCallbackSupplier.java new file mode 100644 index 00000000000..1d98fb91a83 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/async/function/AsyncCallbackSupplier.java @@ -0,0 +1,112 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.internal.async.function; + +import com.mongodb.internal.async.MutableValue; +import com.mongodb.internal.async.SingleResultCallback; + +import java.util.function.Supplier; + +/** + * An {@linkplain AsyncCallbackFunction asynchronous callback-based function} of no parameters. + * This class is a callback-based counterpart of {@link Supplier}. + * Any asynchronous callback function with parameters may be represented this way by partially applying the function to its parameters + * until no parameters are left unapplied, and only a callback is left to be consumed. + * + *

This class is not part of the public API and may be removed or changed at any time

+ * + * @param See {@link AsyncCallbackFunction}. + * @see AsyncCallbackFunction + */ +@FunctionalInterface +public interface AsyncCallbackSupplier { + /** + * @see AsyncCallbackFunction#apply(Object, SingleResultCallback) + */ + void get(SingleResultCallback callback); + + /** + * Returns a composed asynchronous function that provides a guarantee of executing the {@code after} action similar to that of the + * {@code finally} block. + * The returned function that executes this {@link AsyncCallbackSupplier} always(1) followed + * (in the happens-before order) by the synchronous {@code after} action, which is then followed (in the happens-before order) + * by completing the callback of the composed asynchronous function. + * + * @param after The synchronous action to execute after this {@link AsyncCallbackSupplier}. + * If {@code after} completes abruptly, then its exception is used as the failed result of the composed asynchronous function, + * i.e., it is relayed to its callback. If this {@link AsyncCallbackSupplier} fails and {@code after} + * completes abruptly, then the {@code after} exception is {@linkplain Throwable#addSuppressed(Throwable) suppressed} + * by the failed result of this {@link AsyncCallbackSupplier}. + *

+ * The {@code after} action is executed even if + *

    + *
  • this {@link AsyncCallbackSupplier} fails;
  • + *
  • the method {@link AsyncCallbackSupplier#get(SingleResultCallback)} of this {@link AsyncCallbackSupplier} + * completes abruptly, thus violating its contract;
  • + *
+ * (1)but is not executed if + *
    + *
  • the method {@link AsyncCallbackSupplier#get(SingleResultCallback)} of this {@link AsyncCallbackSupplier} neither completes + * abruptly, nor completes its callback, i.e., violates its contract in the worst possible way.
  • + *
+ * In situations when {@code after} is executed despite + * {@link AsyncCallbackSupplier#get(SingleResultCallback)} violating its + * contract by completing abruptly, the {@code after} action is executed synchronously by the {@link #whenComplete(Runnable)} method. + * This is a price we have to pay to provide a guarantee similar to that of the {@code finally} block. + */ + default AsyncCallbackSupplier whenComplete(final Runnable after) { + MutableValue afterExecuted = new MutableValue<>(false); + Runnable trackableAfter = () -> { + try { + after.run(); + } finally { + afterExecuted.set(true); + } + }; + return callback -> { + SingleResultCallback callbackThatCallsAfter = (result, t) -> { + Throwable primaryException = t; + try { + trackableAfter.run(); + } catch (Throwable afterException) { + if (primaryException == null) { + primaryException = afterException; + } else { + primaryException.addSuppressed(afterException); + } + callback.onResult(null, primaryException); + return; + } + callback.onResult(result, primaryException); + }; + Throwable primaryUnexpectedException = null; + try { + get(callbackThatCallsAfter); + } catch (Throwable unexpectedException){ + primaryUnexpectedException = unexpectedException; + throw unexpectedException; + } finally { + if (primaryUnexpectedException != null && !afterExecuted.get()) { + try { + trackableAfter.run(); + } catch (Throwable afterException) { + primaryUnexpectedException.addSuppressed(afterException); + } + } + } + }; + } +} diff --git a/driver-core/src/main/com/mongodb/internal/async/function/LoopState.java b/driver-core/src/main/com/mongodb/internal/async/function/LoopState.java new file mode 100644 index 00000000000..f3b19fecde7 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/async/function/LoopState.java @@ -0,0 +1,215 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.internal.async.function; + +import com.mongodb.annotations.Immutable; +import com.mongodb.annotations.NotThreadSafe; +import com.mongodb.internal.async.SingleResultCallback; +import com.mongodb.lang.Nullable; + +import java.util.HashMap; +import java.util.Map; +import java.util.Optional; +import java.util.function.Supplier; + +import static com.mongodb.assertions.Assertions.assertFalse; +import static com.mongodb.assertions.Assertions.assertNotNull; + +/** + * Represents both the state associated with a loop and a handle that can be used to affect looping, e.g., + * to {@linkplain #breakAndCompleteIf(Supplier, SingleResultCallback) break} it. + * {@linkplain #attachment(AttachmentKey) Attachments} may be used by the associated loop + * to preserve a state between iterations. + * + *

This class is not part of the public API and may be removed or changed at any time

+ * + * @see AsyncCallbackLoop + */ +@NotThreadSafe +public final class LoopState { + private int iteration; + private boolean lastIteration; + @Nullable + private Map, AttachmentValueContainer> attachments; + + public LoopState() { + iteration = 0; + } + + /** + * Advances this {@link LoopState} such that it represents the state of a new iteration. + * Must not be called before the {@linkplain #isFirstIteration() first iteration}, must be called before each subsequent iteration. + * + * @return {@code true} if the next iteration must be executed; {@code false} iff the loop was {@link #isLastIteration() broken}. + */ + boolean advance() { + if (lastIteration) { + return false; + } else { + iteration++; + removeAutoRemovableAttachments(); + return true; + } + } + + /** + * Returns {@code true} iff the current iteration is the first one. + * + * @see #iteration() + */ + public boolean isFirstIteration() { + return iteration == 0; + } + + /** + * Returns {@code true} iff {@link #breakAndCompleteIf(Supplier, SingleResultCallback)} / {@link #markAsLastIteration()} was called. + */ + boolean isLastIteration() { + return lastIteration; + } + + /** + * A 0-based iteration number. + */ + public int iteration() { + return iteration; + } + + /** + * This method emulates executing the + * {@code break} statement. Must not be called more than once per {@link LoopState}. + * + * @param predicate {@code true} iff the associated loop needs to be broken. + * @return {@code true} iff the {@code callback} was completed, which happens iff any of the following is true: + *
    + *
  • the {@code predicate} completed abruptly, in which case the exception thrown is relayed to the {@code callback};
  • + *
  • this method broke the associated loop.
  • + *
+ * If {@code true} is returned, the caller must complete the ongoing attempt. + * @see #isLastIteration() + */ + public boolean breakAndCompleteIf(final Supplier predicate, final SingleResultCallback callback) { + assertFalse(lastIteration); + try { + lastIteration = predicate.get(); + } catch (Throwable t) { + callback.onResult(null, t); + return true; + } + if (lastIteration) { + callback.onResult(null, null); + return true; + } else { + return false; + } + } + + /** + * This method is similar to {@link #breakAndCompleteIf(Supplier, SingleResultCallback)}. + * The difference is that it allows the current iteration to continue, yet no more iterations will happen. + * + * @see #isLastIteration() + */ + void markAsLastIteration() { + assertFalse(lastIteration); + lastIteration = true; + } + + /** + * The associated loop may use this method to preserve a state between iterations. + * + * @param autoRemove Specifies whether the attachment must be automatically removed before (in the happens-before order) the next + * {@linkplain #iteration() iteration} as if this removal were the very first action of the iteration. + * Note that there is no guarantee that the attachment is removed after the {@linkplain #isLastIteration() last iteration}. + * @return {@code this}. + * @see #attachment(AttachmentKey) + */ + public LoopState attach(final AttachmentKey key, final V value, final boolean autoRemove) { + attachments().put(assertNotNull(key), new AttachmentValueContainer(assertNotNull(value), autoRemove)); + return this; + } + + /** + * @see #attach(AttachmentKey, Object, boolean) + */ + public Optional attachment(final AttachmentKey key) { + AttachmentValueContainer valueContainer = attachments().get(assertNotNull(key)); + @SuppressWarnings("unchecked") V value = valueContainer == null ? null : (V) valueContainer.value(); + return Optional.ofNullable(value); + } + + private Map, AttachmentValueContainer> attachments() { + if (attachments == null) { + attachments = new HashMap<>(); + } + return attachments; + } + + private void removeAutoRemovableAttachments() { + if (attachments == null) { + return; + } + attachments.entrySet().removeIf(entry -> entry.getValue().autoRemove()); + } + + @Override + public String toString() { + return "LoopState{" + + "iteration=" + iteration + + ", attachments=" + attachments + + '}'; + } + + /** + * A value-based + * identifier of an attachment. + * + * @param The type of the corresponding attachment value. + */ + @Immutable + // the type parameter V is of the essence even though it is not used in the interface itself + @SuppressWarnings("unused") + public interface AttachmentKey { + } + + private static final class AttachmentValueContainer { + @Nullable + private final Object value; + private final boolean autoRemove; + + AttachmentValueContainer(@Nullable final Object value, final boolean autoRemove) { + this.value = value; + this.autoRemove = autoRemove; + } + + @Nullable + Object value() { + return value; + } + + boolean autoRemove() { + return autoRemove; + } + + @Override + public String toString() { + return "AttachmentValueContainer{" + + "value=" + value + + ", autoRemove=" + autoRemove + + '}'; + } + } +} diff --git a/driver-core/src/main/com/mongodb/internal/async/function/RetryState.java b/driver-core/src/main/com/mongodb/internal/async/function/RetryState.java new file mode 100644 index 00000000000..e1cecf721fc --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/async/function/RetryState.java @@ -0,0 +1,449 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.internal.async.function; + +import com.mongodb.MongoOperationTimeoutException; +import com.mongodb.annotations.NotThreadSafe; +import com.mongodb.internal.TimeoutContext; +import com.mongodb.internal.async.SingleResultCallback; +import com.mongodb.internal.async.function.LoopState.AttachmentKey; +import com.mongodb.lang.NonNull; +import com.mongodb.lang.Nullable; + +import java.util.Optional; +import java.util.function.BiPredicate; +import java.util.function.BinaryOperator; +import java.util.function.Supplier; + +import static com.mongodb.assertions.Assertions.assertFalse; +import static com.mongodb.assertions.Assertions.assertNotNull; +import static com.mongodb.assertions.Assertions.assertTrue; +import static com.mongodb.internal.TimeoutContext.createMongoTimeoutException; + +/** + * Represents both the state associated with a retryable activity and a handle that can be used to affect retrying, e.g., + * to {@linkplain #breakAndThrowIfRetryAnd(Supplier) break} it. + * {@linkplain #attachment(AttachmentKey) Attachments} may be used by the associated retryable activity either + * to preserve a state between attempts. + * + *

This class is not part of the public API and may be removed or changed at any time

+ * + * @see RetryingSyncSupplier + * @see RetryingAsyncCallbackSupplier + */ +@NotThreadSafe +public final class RetryState { + public static final int RETRIES = 1; + private static final int INFINITE_ATTEMPTS = Integer.MAX_VALUE; + + private final LoopState loopState; + private final int attempts; + private final boolean retryUntilTimeoutThrowsException; + @Nullable + private Throwable previouslyChosenException; + + /** + * Creates a {@code RetryState} with a positive number of allowed retries. {@link Integer#MAX_VALUE} is a special value interpreted as + * being unlimited. + *

+ * If a timeout is not specified in the {@link TimeoutContext#hasTimeoutMS()}, the specified {@code retries} param acts as a fallback + * bound. Otherwise, retries are unbounded until the timeout is reached. + *

+ * It is possible to provide an additional {@code retryPredicate} in the {@link #doAdvanceOrThrow} method, + * which can be used to stop retrying based on a custom condition additionally to {@code retires} and {@link TimeoutContext}. + *

+ * + * @param retries A positive number of allowed retries. {@link Integer#MAX_VALUE} is a special value interpreted as being unlimited. + * @param timeoutContext A timeout context that will be used to determine if the operation has timed out. + * @see #attempts() + */ + public static RetryState withRetryableState(final int retries, final TimeoutContext timeoutContext) { + assertTrue(retries > 0); + if (timeoutContext.hasTimeoutMS()){ + return new RetryState(INFINITE_ATTEMPTS, timeoutContext); + } + return new RetryState(retries, null); + } + + public static RetryState withNonRetryableState() { + return new RetryState(0, null); + } + + /** + * Creates a {@link RetryState} that does not limit the number of retries. + * The number of attempts is limited iff {@link TimeoutContext#hasTimeoutMS()} is true and timeout has expired. + *

+ * It is possible to provide an additional {@code retryPredicate} in the {@link #doAdvanceOrThrow} method, + * which can be used to stop retrying based on a custom condition additionally to {@code retires} and {@link TimeoutContext}. + *

+ * + * @param timeoutContext A timeout context that will be used to determine if the operation has timed out. + * @see #attempts() + */ + public RetryState(final TimeoutContext timeoutContext) { + this(INFINITE_ATTEMPTS, timeoutContext); + } + + /** + * @param retries A non-negative number of allowed retries. {@link Integer#MAX_VALUE} is a special value interpreted as being unlimited. + * @param timeoutContext A timeout context that will be used to determine if the operation has timed out. + * @see #attempts() + */ + private RetryState(final int retries, @Nullable final TimeoutContext timeoutContext) { + assertTrue(retries >= 0); + loopState = new LoopState(); + attempts = retries == INFINITE_ATTEMPTS ? INFINITE_ATTEMPTS : retries + 1; + this.retryUntilTimeoutThrowsException = timeoutContext != null && timeoutContext.hasTimeoutMS(); + } + + /** + * Advances this {@link RetryState} such that it represents the state of a new attempt. + * If there is at least one more {@linkplain #attempts() attempt} left, it is consumed by this method. + * Must not be called before the {@linkplain #isFirstAttempt() first attempt}, must be called before each subsequent attempt. + *

+ * This method is intended to be used by code that generally does not handle {@link Error}s explicitly, + * which is usually synchronous code. + * + * @param attemptException The exception produced by the most recent attempt. + * It is passed to the {@code retryPredicate} and to the {@code onAttemptFailureOperator}. + * @param onAttemptFailureOperator The action that is called once per failed attempt before (in the happens-before order) the + * {@code retryPredicate}, regardless of whether the {@code retryPredicate} is called. + * This action is allowed to have side effects. + *

+ * It also has to choose which exception to preserve as a prospective failed result of the associated retryable activity. + * The {@code onAttemptFailureOperator} may mutate its arguments, choose from the arguments, or return a different exception, + * but it must return a {@code @}{@link NonNull} value. + * The choice is between

+ *
    + *
  • the previously chosen exception or {@code null} if none has been chosen + * (the first argument of the {@code onAttemptFailureOperator})
  • + *
  • and the exception from the most recent attempt (the second argument of the {@code onAttemptFailureOperator}).
  • + *
+ * The result of the {@code onAttemptFailureOperator} does not affect the exception passed to the {@code retryPredicate}. + * @param retryPredicate {@code true} iff another attempt needs to be made. The {@code retryPredicate} is called not more than once + * per attempt and only if all the following is true: + *
    + *
  • {@code onAttemptFailureOperator} completed normally;
  • + *
  • the most recent attempt is not the {@linkplain #isLastAttempt() last} one.
  • + *
+ * The {@code retryPredicate} accepts this {@link RetryState} and the exception from the most recent attempt, + * and may mutate the exception. The {@linkplain RetryState} advances to represent the state of a new attempt + * after (in the happens-before order) testing the {@code retryPredicate}, and only if the predicate completes normally. + * @throws RuntimeException Iff any of the following is true: + *
    + *
  • the {@code onAttemptFailureOperator} completed abruptly;
  • + *
  • the most recent attempt is the {@linkplain #isLastAttempt() last} one;
  • + *
  • the {@code retryPredicate} completed abruptly;
  • + *
  • the {@code retryPredicate} is {@code false}.
  • + *
+ * The exception thrown represents the failed result of the associated retryable activity, + * i.e., the caller must not do any more attempts. + * @see #advanceOrThrow(Throwable, BinaryOperator, BiPredicate) + */ + void advanceOrThrow(final RuntimeException attemptException, final BinaryOperator onAttemptFailureOperator, + final BiPredicate retryPredicate) throws RuntimeException { + try { + doAdvanceOrThrow(attemptException, onAttemptFailureOperator, retryPredicate, true); + } catch (RuntimeException | Error unchecked) { + throw unchecked; + } catch (Throwable checked) { + throw new AssertionError(checked); + } + } + + /** + * This method is intended to be used by code that generally handles all {@link Throwable} types explicitly, + * which is usually asynchronous code. + * + * @see #advanceOrThrow(RuntimeException, BinaryOperator, BiPredicate) + */ + void advanceOrThrow(final Throwable attemptException, final BinaryOperator onAttemptFailureOperator, + final BiPredicate retryPredicate) throws Throwable { + doAdvanceOrThrow(attemptException, onAttemptFailureOperator, retryPredicate, false); + } + + /** + * @param onlyRuntimeExceptions {@code true} iff the method must expect {@link #previouslyChosenException} and {@code attemptException} to be + * {@link RuntimeException}s and must not explicitly handle other {@link Throwable} types, of which only {@link Error} is possible + * as {@link RetryState} does not have any source of {@link Exception}s. + * @param onAttemptFailureOperator See {@link #advanceOrThrow(RuntimeException, BinaryOperator, BiPredicate)}. + */ + private void doAdvanceOrThrow(final Throwable attemptException, + final BinaryOperator onAttemptFailureOperator, + final BiPredicate retryPredicate, + final boolean onlyRuntimeExceptions) throws Throwable { + assertTrue(attempt() < attempts); + assertNotNull(attemptException); + if (onlyRuntimeExceptions) { + assertTrue(isRuntime(attemptException)); + } + assertTrue(!isFirstAttempt() || previouslyChosenException == null); + Throwable newlyChosenException = callOnAttemptFailureOperator(previouslyChosenException, attemptException, onlyRuntimeExceptions, onAttemptFailureOperator); + + /* + * A MongoOperationTimeoutException indicates that the operation timed out, either during command execution or server selection. + * The timeout for server selection is determined by the computedServerSelectionMS = min(serverSelectionTimeoutMS, timeoutMS). + * + * It is important to check if the exception is an instance of MongoOperationTimeoutException to detect a timeout. + */ + if (isLastAttempt() || attemptException instanceof MongoOperationTimeoutException) { + previouslyChosenException = newlyChosenException; + /* + * The function of isLastIteration() is to indicate if retrying has + * been explicitly halted. Such a stop is not interpreted as + * a timeout exception but as a deliberate cessation of retry attempts. + */ + if (retryUntilTimeoutThrowsException && !loopState.isLastIteration()) { + previouslyChosenException = createMongoTimeoutException( + "Retry attempt exceeded the timeout limit.", + previouslyChosenException); + } + throw previouslyChosenException; + } else { + // note that we must not update the state, e.g, `previouslyChosenException`, `loopState`, before calling `retryPredicate` + boolean retry = shouldRetry(this, attemptException, newlyChosenException, onlyRuntimeExceptions, retryPredicate); + previouslyChosenException = newlyChosenException; + if (retry) { + assertTrue(loopState.advance()); + } else { + throw previouslyChosenException; + } + } + } + + /** + * @param onlyRuntimeExceptions See {@link #doAdvanceOrThrow(Throwable, BinaryOperator, BiPredicate, boolean)}. + * @param onAttemptFailureOperator See {@link #advanceOrThrow(RuntimeException, BinaryOperator, BiPredicate)}. + */ + private static Throwable callOnAttemptFailureOperator( + @Nullable final Throwable previouslyChosenException, + final Throwable attemptException, + final boolean onlyRuntimeExceptions, + final BinaryOperator onAttemptFailureOperator) { + if (onlyRuntimeExceptions && previouslyChosenException != null) { + assertTrue(isRuntime(previouslyChosenException)); + } + Throwable result; + try { + result = assertNotNull(onAttemptFailureOperator.apply(previouslyChosenException, attemptException)); + if (onlyRuntimeExceptions) { + assertTrue(isRuntime(result)); + } + } catch (Throwable onAttemptFailureOperatorException) { + if (onlyRuntimeExceptions && !isRuntime(onAttemptFailureOperatorException)) { + throw onAttemptFailureOperatorException; + } + if (previouslyChosenException != null) { + onAttemptFailureOperatorException.addSuppressed(previouslyChosenException); + } + onAttemptFailureOperatorException.addSuppressed(attemptException); + throw onAttemptFailureOperatorException; + } + return result; + } + + /** + * @param readOnlyRetryState Must not be mutated by this method. + * @param onlyRuntimeExceptions See {@link #doAdvanceOrThrow(Throwable, BinaryOperator, BiPredicate, boolean)}. + */ + private boolean shouldRetry(final RetryState readOnlyRetryState, final Throwable attemptException, final Throwable newlyChosenException, + final boolean onlyRuntimeExceptions, final BiPredicate retryPredicate) { + try { + return retryPredicate.test(readOnlyRetryState, attemptException); + } catch (Throwable retryPredicateException) { + if (onlyRuntimeExceptions && !isRuntime(retryPredicateException)) { + throw retryPredicateException; + } + retryPredicateException.addSuppressed(newlyChosenException); + throw retryPredicateException; + } + } + + private static boolean isRuntime(@Nullable final Throwable exception) { + return exception instanceof RuntimeException; + } + + /** + * This method is similar to the semantics of the + * {@code break} statement, with the difference + * that breaking results in throwing an exception because the retry loop has more than one iteration only if the first iteration fails. + * Does nothing and completes normally if called during the {@linkplain #isFirstAttempt() first attempt}. + * This method is useful when the associated retryable activity detects that a retry attempt should not happen + * despite having been started. Must not be called more than once per {@link RetryState}. + *

+ * If the {@code predicate} completes abruptly, this method also completes abruptly with the same exception but does not break retrying; + * if the {@code predicate} is {@code true}, then the method breaks retrying and completes abruptly by throwing the exception that is + * currently deemed to be a prospective failed result of the associated retryable activity. The thrown exception must also be used + * by the caller to complete the ongoing attempt. + *

+ * If this method is called from + * {@linkplain RetryingSyncSupplier#RetryingSyncSupplier(RetryState, BinaryOperator, BiPredicate, Supplier) + * retry predicate / failed result transformer}, the behavior is unspecified. + * + * @param predicate {@code true} iff retrying needs to be broken. + * The {@code predicate} is not called during the {@linkplain #isFirstAttempt() first attempt}. + * @throws RuntimeException Iff any of the following is true: + *

    + *
  • the {@code predicate} completed abruptly;
  • + *
  • this method broke retrying.
  • + *
+ * The exception thrown represents the failed result of the associated retryable activity. + * @see #breakAndCompleteIfRetryAnd(Supplier, SingleResultCallback) + */ + public void breakAndThrowIfRetryAnd(final Supplier predicate) throws RuntimeException { + assertFalse(loopState.isLastIteration()); + if (!isFirstAttempt()) { + assertNotNull(previouslyChosenException); + assertTrue(previouslyChosenException instanceof RuntimeException); + RuntimeException localException = (RuntimeException) previouslyChosenException; + try { + if (predicate.get()) { + loopState.markAsLastIteration(); + } + } catch (Exception predicateException) { + predicateException.addSuppressed(localException); + throw predicateException; + } + if (loopState.isLastIteration()) { + throw localException; + } + } + } + + /** + * This method is intended to be used by callback-based code. It is similar to {@link #breakAndThrowIfRetryAnd(Supplier)}, + * but instead of throwing an exception, it relays it to the {@code callback}. + *

+ * If this method is called from + * {@linkplain RetryingAsyncCallbackSupplier#RetryingAsyncCallbackSupplier(RetryState, BinaryOperator, BiPredicate, AsyncCallbackSupplier) + * retry predicate / failed result transformer}, the behavior is unspecified. + * + * @return {@code true} iff the {@code callback} was completed, which happens in the same situations in which + * {@link #breakAndThrowIfRetryAnd(Supplier)} throws an exception. If {@code true} is returned, the caller must complete + * the ongoing attempt. + * @see #breakAndThrowIfRetryAnd(Supplier) + */ + public boolean breakAndCompleteIfRetryAnd(final Supplier predicate, final SingleResultCallback callback) { + try { + breakAndThrowIfRetryAnd(predicate); + return false; + } catch (Throwable t) { + callback.onResult(null, t); + return true; + } + } + + /** + * This method is similar to + * {@link RetryState#breakAndThrowIfRetryAnd(Supplier)} / {@link RetryState#breakAndCompleteIfRetryAnd(Supplier, SingleResultCallback)}. + * The difference is that it allows the current attempt to continue, yet no more attempts will happen. Also, unlike the aforementioned + * methods, this method has effect even if called during the {@linkplain #isFirstAttempt() first attempt}. + */ + public void markAsLastAttempt() { + loopState.markAsLastIteration(); + } + + /** + * Returns {@code true} iff the current attempt is the first one, i.e., no retries have been made. + * + * @see #attempts() + */ + public boolean isFirstAttempt() { + return loopState.isFirstIteration(); + } + + /** + * Returns {@code true} iff the current attempt is known to be the last one, i.e., it is known that no more retries will be made. + * An attempt is known to be the last one iff any of the following applies: + *

    + *
  • {@link #breakAndThrowIfRetryAnd(Supplier)} / {@link #breakAndCompleteIfRetryAnd(Supplier, SingleResultCallback)} / {@link #markAsLastAttempt()} was called.
  • + *
  • A timeout is set and has been reached.
  • + *
  • No timeout is set, and the number of {@linkplain #attempts() attempts} is limited, and the current attempt is the last one.
  • + *
+ * + * @see #attempts() + */ + public boolean isLastAttempt() { + if (loopState.isLastIteration()){ + return true; + } + if (retryUntilTimeoutThrowsException) { + return false; + } + return attempt() == attempts - 1; + } + + /** + * A 0-based attempt number. + * + * @see #attempts() + */ + public int attempt() { + return loopState.iteration(); + } + + /** + * Returns a positive maximum number of attempts: + *
    + *
  • 0 if the number of retries is {@linkplain #RetryState(TimeoutContext) unlimited};
  • + *
  • 1 if no retries are allowed;
  • + *
  • {@link #RetryState(int, TimeoutContext) retries} + 1 otherwise.
  • + *
+ * + * @see #attempt() + * @see #isFirstAttempt() + * @see #isLastAttempt() + */ + public int attempts() { + return attempts == INFINITE_ATTEMPTS ? 0 : attempts; + } + + /** + * Returns the exception that is currently deemed to be a prospective failed result of the associated retryable activity. + * Note that this exception is not necessary the one from the most recent failed attempt. + * Returns an {@linkplain Optional#isEmpty() empty} {@link Optional} iff called during the {@linkplain #isFirstAttempt() first attempt}. + *

+ * In synchronous code the returned exception is of the type {@link RuntimeException}. + */ + public Optional exception() { + assertTrue(previouslyChosenException == null || !isFirstAttempt()); + return Optional.ofNullable(previouslyChosenException); + } + + /** + * @see LoopState#attach(AttachmentKey, Object, boolean) + */ + public RetryState attach(final AttachmentKey key, final V value, final boolean autoRemove) { + loopState.attach(key, value, autoRemove); + return this; + } + + /** + * @see LoopState#attachment(AttachmentKey) + */ + public Optional attachment(final AttachmentKey key) { + return loopState.attachment(key); + } + + @Override + public String toString() { + return "RetryState{" + + "loopState=" + loopState + + ", attempts=" + (attempts == INFINITE_ATTEMPTS ? "infinite" : attempts) + + ", exception=" + previouslyChosenException + + '}'; + } +} diff --git a/driver-core/src/main/com/mongodb/internal/async/function/RetryingAsyncCallbackSupplier.java b/driver-core/src/main/com/mongodb/internal/async/function/RetryingAsyncCallbackSupplier.java new file mode 100644 index 00000000000..16f6f2e7086 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/async/function/RetryingAsyncCallbackSupplier.java @@ -0,0 +1,130 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.internal.async.function; + +import com.mongodb.annotations.NotThreadSafe; +import com.mongodb.internal.async.SingleResultCallback; +import com.mongodb.lang.NonNull; +import com.mongodb.lang.Nullable; + +import java.util.function.BiPredicate; +import java.util.function.BinaryOperator; +import java.util.function.Supplier; + +/** + * A decorator that implements automatic retrying of failed executions of an {@link AsyncCallbackSupplier}. + * {@link RetryingAsyncCallbackSupplier} may execute the original retryable asynchronous function multiple times sequentially, + * while guaranteeing that the callback passed to {@link #get(SingleResultCallback)} is completed at most once. + *

+ * The original function may additionally observe or control retrying via {@link RetryState}. + * For example, the {@link RetryState#breakAndCompleteIfRetryAnd(Supplier, SingleResultCallback)} method may be used to + * break retrying if the original function decides so. + * + *

This class is not part of the public API and may be removed or changed at any time

+ * + * @see RetryingSyncSupplier + */ +@NotThreadSafe +public final class RetryingAsyncCallbackSupplier implements AsyncCallbackSupplier { + private final RetryState state; + private final BiPredicate retryPredicate; + private final BinaryOperator onAttemptFailureOperator; + private final AsyncCallbackSupplier asyncFunction; + + /** + * @param state The {@link RetryState} to be deemed as initial for the purpose of the new {@link RetryingAsyncCallbackSupplier}. + * @param onAttemptFailureOperator The action that is called once per failed attempt before (in the happens-before order) the + * {@code retryPredicate}, regardless of whether the {@code retryPredicate} is called. + * This action is allowed to have side effects. + *

+ * It also has to choose which exception to preserve as a prospective failed result of this {@link RetryingAsyncCallbackSupplier}. + * The {@code onAttemptFailureOperator} may mutate its arguments, choose from the arguments, or return a different exception, + * but it must return a {@code @}{@link NonNull} value. + * The choice is between

+ *
    + *
  • the previously chosen failed result or {@code null} if none has been chosen + * (the first argument of the {@code onAttemptFailureOperator})
  • + *
  • and the failed result from the most recent attempt (the second argument of the {@code onAttemptFailureOperator}).
  • + *
+ * The result of the {@code onAttemptFailureOperator} does not affect the exception passed to the {@code retryPredicate}. + *

+ * If {@code onAttemptFailureOperator} completes abruptly, then the {@code asyncFunction} cannot be retried and the exception thrown by + * the {@code onAttemptFailureOperator} is used as a failed result of this {@link RetryingAsyncCallbackSupplier}.

+ * @param retryPredicate {@code true} iff another attempt needs to be made. If it completes abruptly, + * then the {@code asyncFunction} cannot be retried and the exception thrown by the {@code retryPredicate} + * is used as a failed result of this {@link RetryingAsyncCallbackSupplier}. The {@code retryPredicate} is called not more than once + * per attempt and only if all the following is true: + *
    + *
  • {@code onAttemptFailureOperator} completed normally;
  • + *
  • the most recent attempt is not the {@linkplain RetryState#isLastAttempt() last} one.
  • + *
+ * The {@code retryPredicate} accepts this {@link RetryState} and the exception from the most recent attempt, + * and may mutate the exception. The {@linkplain RetryState} advances to represent the state of a new attempt + * after (in the happens-before order) testing the {@code retryPredicate}, and only if the predicate completes normally. + * @param asyncFunction The retryable {@link AsyncCallbackSupplier} to be decorated. + */ + public RetryingAsyncCallbackSupplier( + final RetryState state, + final BinaryOperator onAttemptFailureOperator, + final BiPredicate retryPredicate, + final AsyncCallbackSupplier asyncFunction) { + this.state = state; + this.retryPredicate = retryPredicate; + this.onAttemptFailureOperator = onAttemptFailureOperator; + this.asyncFunction = asyncFunction; + } + + public RetryingAsyncCallbackSupplier( + final RetryState state, + final BiPredicate retryPredicate, + final AsyncCallbackSupplier asyncFunction) { + this(state, (previouslyChosenFailure, lastAttemptFailure) -> lastAttemptFailure, retryPredicate, asyncFunction); + } + + @Override + public void get(final SingleResultCallback callback) { + /* `asyncFunction` and `callback` are the only externally provided pieces of code for which we do not need to care about + * them throwing exceptions. If they do, that violates their contract and there is nothing we should do about it. */ + asyncFunction.get(new RetryingCallback(callback)); + } + + /** + * This callback is allowed to be completed more than once. + */ + @NotThreadSafe + private class RetryingCallback implements SingleResultCallback { + private final SingleResultCallback wrapped; + + RetryingCallback(final SingleResultCallback callback) { + wrapped = callback; + } + + @Override + public void onResult(@Nullable final R result, @Nullable final Throwable t) { + if (t != null) { + try { + state.advanceOrThrow(t, onAttemptFailureOperator, retryPredicate); + } catch (Throwable failedResult) { + wrapped.onResult(null, failedResult); + return; + } + asyncFunction.get(this); + } else { + wrapped.onResult(result, null); + } + } + } +} diff --git a/driver-core/src/main/com/mongodb/internal/async/function/RetryingSyncSupplier.java b/driver-core/src/main/com/mongodb/internal/async/function/RetryingSyncSupplier.java new file mode 100644 index 00000000000..ad3e4b2b807 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/async/function/RetryingSyncSupplier.java @@ -0,0 +1,76 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.internal.async.function; + +import com.mongodb.annotations.NotThreadSafe; + +import java.util.function.BiPredicate; +import java.util.function.BinaryOperator; +import java.util.function.Supplier; + +/** + * A decorator that implements automatic retrying of failed executions of a {@link Supplier}. + * {@link RetryingSyncSupplier} may execute the original retryable function multiple times sequentially. + *

+ * The original function may additionally observe or control retrying via {@link RetryState}. + * For example, the {@link RetryState#breakAndThrowIfRetryAnd(Supplier)} method may be used to + * break retrying if the original function decides so. + * + *

This class is not part of the public API and may be removed or changed at any time

+ * + * @see RetryingAsyncCallbackSupplier + */ +@NotThreadSafe +public final class RetryingSyncSupplier implements Supplier { + private final RetryState state; + private final BiPredicate retryPredicate; + private final BinaryOperator onAttemptFailureOperator; + private final Supplier syncFunction; + + /** + * See {@link RetryingAsyncCallbackSupplier#RetryingAsyncCallbackSupplier(RetryState, BinaryOperator, BiPredicate, AsyncCallbackSupplier)} + * for the documentation of the parameters. + * + * @param onAttemptFailureOperator Even though the {@code onAttemptFailureOperator} accepts {@link Throwable}, + * only {@link RuntimeException}s are passed to it. + * @param retryPredicate Even though the {@code retryPredicate} accepts {@link Throwable}, + * only {@link RuntimeException}s are passed to it. + */ + public RetryingSyncSupplier( + final RetryState state, + final BinaryOperator onAttemptFailureOperator, + final BiPredicate retryPredicate, + final Supplier syncFunction) { + this.state = state; + this.retryPredicate = retryPredicate; + this.onAttemptFailureOperator = onAttemptFailureOperator; + this.syncFunction = syncFunction; + } + + @Override + public R get() { + while (true) { + try { + return syncFunction.get(); + } catch (RuntimeException attemptException) { + state.advanceOrThrow(attemptException, onAttemptFailureOperator, retryPredicate); + } catch (Exception attemptException) { + // wrap potential sneaky / Kotlin exceptions + state.advanceOrThrow(new RuntimeException(attemptException), onAttemptFailureOperator, retryPredicate); + } + } + } +} diff --git a/driver-core/src/main/com/mongodb/internal/async/function/package-info.java b/driver-core/src/main/com/mongodb/internal/async/function/package-info.java new file mode 100644 index 00000000000..2a89dc73a54 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/async/function/package-info.java @@ -0,0 +1,26 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * This package contains internal functionality that may change at any time. + */ + +@Internal +@NonNullApi +package com.mongodb.internal.async.function; + +import com.mongodb.annotations.Internal; +import com.mongodb.lang.NonNullApi; diff --git a/driver-core/src/main/com/mongodb/internal/async/package-info.java b/driver-core/src/main/com/mongodb/internal/async/package-info.java new file mode 100644 index 00000000000..c6c62967258 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/async/package-info.java @@ -0,0 +1,26 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * This package contains internal functionality that may change at any time. + */ + +@Internal +@NonNullApi +package com.mongodb.internal.async; + +import com.mongodb.annotations.Internal; +import com.mongodb.lang.NonNullApi; diff --git a/driver-core/src/main/com/mongodb/internal/authentication/AwsCredentialHelper.java b/driver-core/src/main/com/mongodb/internal/authentication/AwsCredentialHelper.java new file mode 100644 index 00000000000..42f8fbfffdb --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/authentication/AwsCredentialHelper.java @@ -0,0 +1,102 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.authentication; + +import com.mongodb.AwsCredential; +import com.mongodb.internal.VisibleForTesting; +import com.mongodb.internal.diagnostics.logging.Logger; +import com.mongodb.internal.diagnostics.logging.Loggers; +import com.mongodb.lang.Nullable; + +import java.util.function.Supplier; + +import static com.mongodb.internal.VisibleForTesting.AccessModifier.PRIVATE; + +/** + * Utility class for working with AWS authentication. + * + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public final class AwsCredentialHelper { + public static final Logger LOGGER = Loggers.getLogger("authenticator"); + + private static volatile Supplier awsCredentialSupplier; + + static { + if (isClassAvailable("software.amazon.awssdk.auth.credentials.DefaultCredentialsProvider")) { + awsCredentialSupplier = new AwsSdkV2CredentialSupplier(); + LOGGER.info("Using DefaultCredentialsProvider from AWS SDK v2 to retrieve AWS credentials. This is the recommended " + + "configuration"); + } else if (isClassAvailable("com.amazonaws.auth.DefaultAWSCredentialsProviderChain")) { + awsCredentialSupplier = new AwsSdkV1CredentialSupplier(); + LOGGER.info("Using DefaultAWSCredentialsProviderChain from AWS SDK v1 to retrieve AWS credentials. Consider adding a " + + "dependency to AWS SDK v2's software.amazon.awssdk:auth artifact to get access to additional AWS authentication " + + "functionality."); + } else { + awsCredentialSupplier = new BuiltInAwsCredentialSupplier(); + LOGGER.info("Using built-in driver implementation to retrieve AWS credentials. Consider adding a dependency to AWS SDK " + + "v2's software.amazon.awssdk:auth artifact to get access to additional AWS authentication functionality."); + } + } + + private static boolean isClassAvailable(final String className) { + try { + Class.forName(className); + return true; + } catch (ClassNotFoundException e) { + return false; + } + } + + /** + * This method is visible to allow tests to require the built-in provider rather than rely on the fixed checks for classes on the + * classpath. It allows us to easily write tests of the built-in implementation without resorting to runtime classpath shenanigans. + */ + @VisibleForTesting(otherwise = PRIVATE) + public static void requireBuiltInProvider() { + LOGGER.info("Using built-in driver implementation to retrieve AWS credentials"); + awsCredentialSupplier = new BuiltInAwsCredentialSupplier(); + } + + /** + * This method is visible to allow tests to require the AWS SDK v1 provider rather than rely on the fixed checks for classes on the + * classpath. It allows us to easily write tests of the AWS SDK v1 implementation without resorting to runtime classpath shenanigans. + */ + @VisibleForTesting(otherwise = PRIVATE) + public static void requireAwsSdkV1Provider() { + LOGGER.info("Using AWS SDK v1 to retrieve AWS credentials"); + awsCredentialSupplier = new AwsSdkV1CredentialSupplier(); + } + + /** + * This method is visible to allow tests to require the AWS SDK v2 provider rather than rely on the fixed checks for classes on the + * classpath. It allows us to easily write tests of the AWS SDK v2 implementation without resorting to runtime classpath shenanigans. + */ + @VisibleForTesting(otherwise = PRIVATE) + public static void requireAwsSdkV2Provider() { + LOGGER.info("Using AWS SDK v2 to retrieve AWS credentials"); + awsCredentialSupplier = new AwsSdkV2CredentialSupplier(); + } + + @Nullable + public static AwsCredential obtainFromEnvironment() { + return awsCredentialSupplier.get(); + } + + private AwsCredentialHelper() { + } +} diff --git a/driver-core/src/main/com/mongodb/internal/authentication/AwsSdkV1CredentialSupplier.java b/driver-core/src/main/com/mongodb/internal/authentication/AwsSdkV1CredentialSupplier.java new file mode 100644 index 00000000000..103e61369c6 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/authentication/AwsSdkV1CredentialSupplier.java @@ -0,0 +1,42 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.authentication; + +import com.amazonaws.auth.AWSCredentials; +import com.amazonaws.auth.AWSCredentialsProvider; +import com.amazonaws.auth.AWSSessionCredentials; +import com.amazonaws.auth.DefaultAWSCredentialsProviderChain; +import com.mongodb.AwsCredential; + +import java.util.function.Supplier; + +final class AwsSdkV1CredentialSupplier implements Supplier { + + private final AWSCredentialsProvider provider = DefaultAWSCredentialsProviderChain.getInstance(); + + @Override + public AwsCredential get() { + AWSCredentials credentials = provider.getCredentials(); + if (credentials instanceof AWSSessionCredentials) { + AWSSessionCredentials sessionCredentials = (AWSSessionCredentials) credentials; + return new AwsCredential(sessionCredentials.getAWSAccessKeyId(), sessionCredentials.getAWSSecretKey(), + sessionCredentials.getSessionToken()); + } else { + return new AwsCredential(credentials.getAWSAccessKeyId(), credentials.getAWSSecretKey(), null); + } + } +} diff --git a/driver-core/src/main/com/mongodb/internal/authentication/AwsSdkV2CredentialSupplier.java b/driver-core/src/main/com/mongodb/internal/authentication/AwsSdkV2CredentialSupplier.java new file mode 100644 index 00000000000..97d5e3ed60d --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/authentication/AwsSdkV2CredentialSupplier.java @@ -0,0 +1,42 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.authentication; + +import com.mongodb.AwsCredential; +import software.amazon.awssdk.auth.credentials.AwsCredentials; +import software.amazon.awssdk.auth.credentials.AwsCredentialsProvider; +import software.amazon.awssdk.auth.credentials.AwsSessionCredentials; +import software.amazon.awssdk.auth.credentials.DefaultCredentialsProvider; + +import java.util.function.Supplier; + +final class AwsSdkV2CredentialSupplier implements Supplier { + + private final AwsCredentialsProvider provider = DefaultCredentialsProvider.create(); + + @Override + public AwsCredential get() { + AwsCredentials credentials = provider.resolveCredentials(); + if (credentials instanceof AwsSessionCredentials) { + AwsSessionCredentials sessionCredentials = (AwsSessionCredentials) credentials; + return new AwsCredential(sessionCredentials.accessKeyId(), sessionCredentials.secretAccessKey(), + sessionCredentials.sessionToken()); + } else { + return new AwsCredential(credentials.accessKeyId(), credentials.secretAccessKey(), null); + } + } +} diff --git a/driver-core/src/main/com/mongodb/internal/authentication/AzureCredentialHelper.java b/driver-core/src/main/com/mongodb/internal/authentication/AzureCredentialHelper.java new file mode 100644 index 00000000000..2a48b8b6fc3 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/authentication/AzureCredentialHelper.java @@ -0,0 +1,114 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.authentication; + +import com.mongodb.MongoClientException; +import com.mongodb.internal.ExpirableValue; +import com.mongodb.lang.Nullable; +import org.bson.BsonDocument; +import org.bson.BsonString; +import org.bson.json.JsonParseException; + +import java.io.UnsupportedEncodingException; +import java.net.URLEncoder; +import java.time.Duration; +import java.util.HashMap; +import java.util.Map; +import java.util.Optional; +import java.util.concurrent.locks.Lock; +import java.util.concurrent.locks.ReentrantLock; + +import static com.mongodb.internal.Locks.lockInterruptibly; +import static com.mongodb.internal.authentication.HttpHelper.getHttpContents; + +/** + * Utility class for working with Azure authentication. + * + *

This class should not be considered a part of the public API.

+ */ +public final class AzureCredentialHelper { + private static final String ACCESS_TOKEN_FIELD = "access_token"; + private static final String EXPIRES_IN_FIELD = "expires_in"; + private static final Lock CACHED_ACCESS_TOKEN_LOCK = new ReentrantLock(); + private static volatile ExpirableValue cachedAccessToken = ExpirableValue.expired(); + + public static BsonDocument obtainFromEnvironment() { + String accessToken; + Optional cachedValue = cachedAccessToken.getValue(); + if (cachedValue.isPresent()) { + accessToken = cachedValue.get(); + } else { + lockInterruptibly(CACHED_ACCESS_TOKEN_LOCK); + try { + cachedValue = cachedAccessToken.getValue(); + if (cachedValue.isPresent()) { + accessToken = cachedValue.get(); + } else { + long startNanoTime = System.nanoTime(); + CredentialInfo response = fetchAzureCredentialInfo("https://vault.azure.net", null); + accessToken = response.getAccessToken(); + Duration duration = response.getExpiresIn().minus(Duration.ofMinutes(1)); + cachedAccessToken = ExpirableValue.expirable(accessToken, duration, startNanoTime); + } + } finally { + CACHED_ACCESS_TOKEN_LOCK.unlock(); + } + } + return new BsonDocument("accessToken", new BsonString(accessToken)); + } + + public static CredentialInfo fetchAzureCredentialInfo(final String resource, @Nullable final String clientId) { + String endpoint = "http://169.254.169.254:80" + + "/metadata/identity/oauth2/token?api-version=2018-02-01" + + "&resource=" + getEncoded(resource) + + (clientId == null ? "" : "&client_id=" + getEncoded(clientId)); + + Map headers = new HashMap<>(); + headers.put("Metadata", "true"); + headers.put("Accept", "application/json"); + + BsonDocument responseDocument; + try { + responseDocument = BsonDocument.parse(getHttpContents("GET", endpoint, headers)); + } catch (JsonParseException e) { + throw new MongoClientException("Exception parsing JSON from Azure IMDS metadata response.", e); + } + + if (!responseDocument.isString(ACCESS_TOKEN_FIELD)) { + throw new MongoClientException(String.format( + "The %s field from Azure IMDS metadata response is missing or is not a string", ACCESS_TOKEN_FIELD)); + } + if (!responseDocument.isString(EXPIRES_IN_FIELD)) { + throw new MongoClientException(String.format( + "The %s field from Azure IMDS metadata response is missing or is not a string", EXPIRES_IN_FIELD)); + } + String accessToken = responseDocument.getString(ACCESS_TOKEN_FIELD).getValue(); + int expiresInSeconds = Integer.parseInt(responseDocument.getString(EXPIRES_IN_FIELD).getValue()); + return new CredentialInfo(accessToken, Duration.ofSeconds(expiresInSeconds)); + } + + static String getEncoded(final String resource) { + try { + return URLEncoder.encode(resource, "UTF-8"); + } catch (UnsupportedEncodingException e) { + throw new RuntimeException(e); + } + } + + private AzureCredentialHelper() { + } +} diff --git a/driver-core/src/main/com/mongodb/internal/authentication/BuiltInAwsCredentialSupplier.java b/driver-core/src/main/com/mongodb/internal/authentication/BuiltInAwsCredentialSupplier.java new file mode 100644 index 00000000000..87a5cfc37b1 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/authentication/BuiltInAwsCredentialSupplier.java @@ -0,0 +1,73 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.authentication; + +import com.mongodb.AwsCredential; +import org.bson.BsonDocument; + +import java.util.HashMap; +import java.util.Map; +import java.util.function.Supplier; + +import static com.mongodb.internal.authentication.HttpHelper.getHttpContents; + +class BuiltInAwsCredentialSupplier implements Supplier { + + @Override + public AwsCredential get() { + if (System.getenv("AWS_ACCESS_KEY_ID") != null) { + return obtainFromEnvironmentVariables(); + } else { + return obtainFromEc2OrEcsResponse(); + } + } + + private static AwsCredential obtainFromEnvironmentVariables() { + return new AwsCredential( + System.getenv("AWS_ACCESS_KEY_ID"), + System.getenv("AWS_SECRET_ACCESS_KEY"), + System.getenv("AWS_SESSION_TOKEN")); + } + + private static AwsCredential obtainFromEc2OrEcsResponse() { + String path = System.getenv("AWS_CONTAINER_CREDENTIALS_RELATIVE_URI"); + BsonDocument ec2OrEcsResponse = path == null ? BsonDocument.parse(getEc2Response()) : BsonDocument.parse(getEcsResponse(path)); + + return new AwsCredential( + ec2OrEcsResponse.getString("AccessKeyId").getValue(), + ec2OrEcsResponse.getString("SecretAccessKey").getValue(), + ec2OrEcsResponse.getString("Token").getValue()); + } + + private static String getEcsResponse(final String path) { + return getHttpContents("GET", "http://169.254.170.2" + path, null); + } + + private static String getEc2Response() { + final String endpoint = "http://169.254.169.254"; + final String path = "/latest/meta-data/iam/security-credentials/"; + + Map header = new HashMap<>(); + header.put("X-aws-ec2-metadata-token-ttl-seconds", "30"); + String token = getHttpContents("PUT", endpoint + "/latest/api/token", header); + + header.clear(); + header.put("X-aws-ec2-metadata-token", token); + String role = getHttpContents("GET", endpoint + path, header); + return getHttpContents("GET", endpoint + path + role, header); + } +} diff --git a/driver-core/src/main/com/mongodb/internal/authentication/CredentialInfo.java b/driver-core/src/main/com/mongodb/internal/authentication/CredentialInfo.java new file mode 100644 index 00000000000..8b1e601b13a --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/authentication/CredentialInfo.java @@ -0,0 +1,44 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.authentication; + +import java.time.Duration; + +/** + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public final class CredentialInfo { + private final String accessToken; + private final Duration expiresIn; + + /** + * @param expiresIn The meaning of {@linkplain Duration#isZero() zero-length} duration is the same as in + * {@link com.mongodb.MongoCredential.OidcCallbackResult#OidcCallbackResult(String, Duration)}. + */ + public CredentialInfo(final String accessToken, final Duration expiresIn) { + this.accessToken = accessToken; + this.expiresIn = expiresIn; + } + + public String getAccessToken() { + return accessToken; + } + + public Duration getExpiresIn() { + return expiresIn; + } +} diff --git a/driver-core/src/main/com/mongodb/internal/authentication/GcpCredentialHelper.java b/driver-core/src/main/com/mongodb/internal/authentication/GcpCredentialHelper.java new file mode 100644 index 00000000000..3f0272da48c --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/authentication/GcpCredentialHelper.java @@ -0,0 +1,62 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.authentication; + +import com.mongodb.MongoClientException; +import org.bson.BsonDocument; + +import java.time.Duration; +import java.util.HashMap; +import java.util.Map; + +import static com.mongodb.internal.authentication.AzureCredentialHelper.getEncoded; +import static com.mongodb.internal.authentication.HttpHelper.getHttpContents; + +/** + * Utility class for working with GCP authentication. + * + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public final class GcpCredentialHelper { + public static BsonDocument obtainFromEnvironment() { + String endpoint = "http://metadata.google.internal/computeMetadata/v1/instance/service-accounts/default/token"; + + Map header = new HashMap<>(); + header.put("Metadata-Flavor", "Google"); + String response = getHttpContents("GET", endpoint, header); + BsonDocument responseDocument = BsonDocument.parse(response); + if (responseDocument.containsKey("access_token")) { + return new BsonDocument("accessToken", responseDocument.get("access_token")); + } else { + throw new MongoClientException("access_token is missing from GCE metadata response. Full response is ''" + response); + } + } + + public static CredentialInfo fetchGcpCredentialInfo(final String audience) { + String endpoint = "http://metadata/computeMetadata/v1/instance/service-accounts/default/identity?audience=" + + getEncoded(audience); + Map header = new HashMap<>(); + header.put("Metadata-Flavor", "Google"); + String response = getHttpContents("GET", endpoint, header); + return new CredentialInfo( + response, + Duration.ZERO); + } + + private GcpCredentialHelper() { + } +} diff --git a/driver-core/src/main/com/mongodb/internal/authentication/HttpHelper.java b/driver-core/src/main/com/mongodb/internal/authentication/HttpHelper.java new file mode 100644 index 00000000000..c7658bb9e0d --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/authentication/HttpHelper.java @@ -0,0 +1,74 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.authentication; + +import com.mongodb.MongoClientException; +import com.mongodb.lang.NonNull; +import com.mongodb.lang.Nullable; + +import java.io.BufferedReader; +import java.io.IOException; +import java.io.InputStreamReader; +import java.net.HttpURLConnection; +import java.net.URL; +import java.nio.charset.StandardCharsets; +import java.util.Map; + +/** + * Utility class for working with HTTP servers. + */ +final class HttpHelper { + + private HttpHelper() { + } + + @NonNull + public static String getHttpContents(final String method, final String endpoint, @Nullable final Map headers) { + StringBuilder content = new StringBuilder(); + HttpURLConnection conn = null; + try { + conn = (HttpURLConnection) new URL(endpoint).openConnection(); + conn.setConnectTimeout(10000); + conn.setReadTimeout(10000); + conn.setRequestMethod(method); + if (headers != null) { + for (Map.Entry kvp : headers.entrySet()) { + conn.setRequestProperty(kvp.getKey(), kvp.getValue()); + } + } + + int status = conn.getResponseCode(); + if (status != HttpURLConnection.HTTP_OK) { + throw new IOException(String.format("%d %s", status, conn.getResponseMessage())); + } + + try (BufferedReader in = new BufferedReader(new InputStreamReader(conn.getInputStream(), StandardCharsets.UTF_8))) { + String inputLine; + while ((inputLine = in.readLine()) != null) { + content.append(inputLine); + } + } + } catch (IOException e) { + throw new MongoClientException("Unexpected IOException from endpoint " + endpoint + ".", e); + } finally { + if (conn != null) { + conn.disconnect(); + } + } + return content.toString(); + } +} diff --git a/driver-core/src/main/com/mongodb/internal/authentication/NativeAuthenticationHelper.java b/driver-core/src/main/com/mongodb/internal/authentication/NativeAuthenticationHelper.java new file mode 100644 index 00000000000..06bfbf59261 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/authentication/NativeAuthenticationHelper.java @@ -0,0 +1,80 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.authentication; + +import org.bson.BsonDocument; +import org.bson.BsonInt32; +import org.bson.BsonString; + +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.nio.charset.StandardCharsets; + +import static com.mongodb.internal.HexUtils.hexMD5; + +/** + * Utility class for working with MongoDB native authentication. + * + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public final class NativeAuthenticationHelper { + + /** + * Creates a hash of the given user name and password, which is the hex encoding of + * {@code MD5( + ":mongo:" + )}. + * + * @param userName the user name + * @param password the password + * @return the hash as a string + * @mongodb.driver.manual ../meta-driver/latest/legacy/implement-authentication-in-driver/ Authentication + */ + public static String createAuthenticationHash(final String userName, final char[] password) { + ByteArrayOutputStream bout = new ByteArrayOutputStream(userName.length() + 20 + password.length); + try { + bout.write(userName.getBytes(StandardCharsets.UTF_8)); + bout.write(":mongo:".getBytes(StandardCharsets.UTF_8)); + bout.write(new String(password).getBytes(StandardCharsets.UTF_8)); + } catch (IOException ioe) { + throw new RuntimeException("impossible", ioe); + } + return hexMD5(bout.toByteArray()); + } + + public static BsonDocument getAuthCommand(final String userName, final char[] password, final String nonce) { + return getAuthCommand(userName, createAuthenticationHash(userName, password), nonce); + } + + public static BsonDocument getAuthCommand(final String userName, final String authHash, final String nonce) { + String key = nonce + userName + authHash; + + BsonDocument cmd = new BsonDocument(); + + cmd.put("authenticate", new BsonInt32(1)); + cmd.put("user", new BsonString(userName)); + cmd.put("nonce", new BsonString(nonce)); + cmd.put("key", new BsonString(hexMD5(key.getBytes(StandardCharsets.UTF_8)))); + + return cmd; + } + + public static BsonDocument getNonceCommand() { + return new BsonDocument("getnonce", new BsonInt32(1)); + } + + private NativeAuthenticationHelper() { + } +} diff --git a/driver-core/src/main/com/mongodb/internal/authentication/SaslPrep.java b/driver-core/src/main/com/mongodb/internal/authentication/SaslPrep.java new file mode 100644 index 00000000000..14a99f90f6f --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/authentication/SaslPrep.java @@ -0,0 +1,294 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * Copyright 2017 Tom Bentley + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.authentication; + +import java.nio.CharBuffer; +import java.text.Normalizer; + +/** + * Utility class for Sasl string preparation. + * + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public final class SaslPrep { + + /** + * Return the {@code SASLPrep}-canonicalised version of the given {@code str} for use as a query string. + * This implements the {@code SASLPrep} algorithm defined in RFC 4013. + * + * @param str The string to canonicalise. + * @return The canonicalised string. + * @throws IllegalArgumentException if the string contained prohibited codepoints, or broke the requirements for bidirectional + * character handling. + * @see RFC 3454, Section 7 for discussion of what a query string is. + */ + public static String saslPrepQuery(final String str) { + return saslPrep(str, true); + } + + /** + * Return the {@code SASLPrep}-canonicalised version of the given {@code str} for use as a stored string. + * This implements the {@code SASLPrep} algorithm defined in RFC 4013. + * + * @param str The string to canonicalise. + * @return The canonicalised string. + * @throws IllegalArgumentException if the string contained prohibited codepoints, or broke the requirements for bidirectional + * character handling. + * @see RFC 3454, Section 7 for discussion of what a stored string is. + */ + public static String saslPrepStored(final String str) { + return saslPrep(str, false); + } + + + private static String saslPrep(final String str, final boolean allowUnassigned) { + char[] chars = str.toCharArray(); + + // 1. Map + + // non-ASCII space chars mapped to space + for (int i = 0; i < str.length(); i++) { + char ch = str.charAt(i); + if (nonAsciiSpace(ch)) { + chars[i] = ' '; + } + } + + int length = 0; + for (int i = 0; i < str.length(); i++) { + char ch = chars[i]; + if (!mappedToNothing(ch)) { + chars[length++] = ch; + } + } + + // 2. Normalize + String normalized = Normalizer.normalize(CharBuffer.wrap(chars, 0, length), Normalizer.Form.NFKC); + + boolean containsRandALCat = false; + boolean containsLCat = false; + boolean initialRandALCat = false; + for (int i = 0; i < normalized.length();) { + int codepoint = normalized.codePointAt(i); + // 3. Prohibit + if (prohibited(codepoint)) { + throw new IllegalArgumentException("Prohibited character at position " + i); + } + + // 4. Check bidi + byte directionality = Character.getDirectionality(codepoint); + boolean isRandALcat = directionality == Character.DIRECTIONALITY_RIGHT_TO_LEFT + || directionality == Character.DIRECTIONALITY_RIGHT_TO_LEFT_ARABIC; + containsRandALCat |= isRandALcat; + containsLCat |= directionality == Character.DIRECTIONALITY_LEFT_TO_RIGHT; + + initialRandALCat |= i == 0 && isRandALcat; + if (!allowUnassigned && !Character.isDefined(codepoint)) { + throw new IllegalArgumentException("Character at position " + i + " is unassigned"); + } + i += Character.charCount(codepoint); + + if (initialRandALCat && i >= normalized.length() && !isRandALcat) { + throw new IllegalArgumentException("First character is RandALCat, but last character is not"); + } + + } + if (containsRandALCat && containsLCat) { + throw new IllegalArgumentException("Contains both RandALCat characters and LCat characters"); + } + return normalized; + } + + /** + * Return true if the given {@code codepoint} is a prohibited character as defined by + * RFC 4013, Section 2.3. + */ + static boolean prohibited(final int codepoint) { + return nonAsciiSpace((char) codepoint) + || asciiControl((char) codepoint) + || nonAsciiControl(codepoint) + || privateUse(codepoint) + || nonCharacterCodePoint(codepoint) + || surrogateCodePoint(codepoint) + || inappropriateForPlainText(codepoint) + || inappropriateForCanonical(codepoint) + || changeDisplayProperties(codepoint) + || tagging(codepoint); + } + + /** + * Return true if the given {@code codepoint} is a tagging character as defined by + * RFC 3454, Appendix C.9. + */ + private static boolean tagging(final int codepoint) { + return codepoint == 0xE0001 + || 0xE0020 <= codepoint && codepoint <= 0xE007F; + } + + /** + * Return true if the given {@code codepoint} is change display properties or deprecated characters as defined by + * RFC 3454, Appendix C.8. + */ + private static boolean changeDisplayProperties(final int codepoint) { + return codepoint == 0x0340 + || codepoint == 0x0341 + || codepoint == 0x200E + || codepoint == 0x200F + || codepoint == 0x202A + || codepoint == 0x202B + || codepoint == 0x202C + || codepoint == 0x202D + || codepoint == 0x202E + || codepoint == 0x206A + || codepoint == 0x206B + || codepoint == 0x206C + || codepoint == 0x206D + || codepoint == 0x206E + || codepoint == 0x206F; + } + + /** + * Return true if the given {@code codepoint} is inappropriate for canonical representation characters as defined by + * RFC 3454, Appendix C.7. + */ + private static boolean inappropriateForCanonical(final int codepoint) { + return 0x2FF0 <= codepoint && codepoint <= 0x2FFB; + } + + /** + * Return true if the given {@code codepoint} is inappropriate for plain text characters as defined by + * RFC 3454, Appendix C.6. + */ + private static boolean inappropriateForPlainText(final int codepoint) { + return codepoint == 0xFFF9 + || codepoint == 0xFFFA + || codepoint == 0xFFFB + || codepoint == 0xFFFC + || codepoint == 0xFFFD; + } + + /** + * Return true if the given {@code codepoint} is a surrogate code point as defined by + * RFC 3454, Appendix C.5. + */ + private static boolean surrogateCodePoint(final int codepoint) { + return 0xD800 <= codepoint && codepoint <= 0xDFFF; + } + + /** + * Return true if the given {@code codepoint} is a non-character code point as defined by + * RFC 3454, Appendix C.4. + */ + private static boolean nonCharacterCodePoint(final int codepoint) { + return 0xFDD0 <= codepoint && codepoint <= 0xFDEF + || 0xFFFE <= codepoint && codepoint <= 0xFFFF + || 0x1FFFE <= codepoint && codepoint <= 0x1FFFF + || 0x2FFFE <= codepoint && codepoint <= 0x2FFFF + || 0x3FFFE <= codepoint && codepoint <= 0x3FFFF + || 0x4FFFE <= codepoint && codepoint <= 0x4FFFF + || 0x5FFFE <= codepoint && codepoint <= 0x5FFFF + || 0x6FFFE <= codepoint && codepoint <= 0x6FFFF + || 0x7FFFE <= codepoint && codepoint <= 0x7FFFF + || 0x8FFFE <= codepoint && codepoint <= 0x8FFFF + || 0x9FFFE <= codepoint && codepoint <= 0x9FFFF + || 0xAFFFE <= codepoint && codepoint <= 0xAFFFF + || 0xBFFFE <= codepoint && codepoint <= 0xBFFFF + || 0xCFFFE <= codepoint && codepoint <= 0xCFFFF + || 0xDFFFE <= codepoint && codepoint <= 0xDFFFF + || 0xEFFFE <= codepoint && codepoint <= 0xEFFFF + || 0xFFFFE <= codepoint && codepoint <= 0xFFFFF + || 0x10FFFE <= codepoint && codepoint <= 0x10FFFF; + } + + /** + * Return true if the given {@code codepoint} is a private use character as defined by + * RFC 3454, Appendix C.3. + */ + private static boolean privateUse(final int codepoint) { + return 0xE000 <= codepoint && codepoint <= 0xF8FF + || 0xF000 <= codepoint && codepoint <= 0xFFFFD + || 0x100000 <= codepoint && codepoint <= 0x10FFFD; + } + + /** + * Return true if the given {@code ch} is a non-ASCII control character as defined by + * RFC 3454, Appendix C.2.2. + */ + private static boolean nonAsciiControl(final int codepoint) { + return 0x0080 <= codepoint && codepoint <= 0x009F + || codepoint == 0x06DD + || codepoint == 0x070F + || codepoint == 0x180E + || codepoint == 0x200C + || codepoint == 0x200D + || codepoint == 0x2028 + || codepoint == 0x2029 + || codepoint == 0x2060 + || codepoint == 0x2061 + || codepoint == 0x2062 + || codepoint == 0x2063 + || 0x206A <= codepoint && codepoint <= 0x206F + || codepoint == 0xFEFF + || 0xFFF9 <= codepoint && codepoint <= 0xFFFC + || 0x1D173 <= codepoint && codepoint <= 0x1D17A; + } + + /** + * Return true if the given {@code ch} is an ASCII control character as defined by + * RFC 3454, Appendix C.2.1. + */ + private static boolean asciiControl(final char ch) { + return ch <= '\u001F' || ch == '\u007F'; + } + + /** + * Return true if the given {@code ch} is a non-ASCII space character as defined by + * RFC 3454, Appendix C.1.2. + */ + private static boolean nonAsciiSpace(final char ch) { + return ch == '\u00A0' + || ch == '\u1680' + || '\u2000' <= ch && ch <= '\u200B' + || ch == '\u202F' + || ch == '\u205F' + || ch == '\u3000'; + } + + /** + * Return true if the given {@code ch} is a "commonly mapped to nothing" character as defined by + * RFC 3454, + * Appendix B.1. + */ + private static boolean mappedToNothing(final char ch) { + return ch == '\u00AD' + || ch == '\u034F' + || ch == '\u1806' + || ch == '\u180B' + || ch == '\u180C' + || ch == '\u180D' + || ch == '\u200B' + || ch == '\u200C' + || ch == '\u200D' + || ch == '\u2060' + || '\uFE00' <= ch && ch <= '\uFE0F' + || ch == '\uFEFF'; + } + + private SaslPrep() { + } +} diff --git a/driver-core/src/main/com/mongodb/internal/authentication/package-info.java b/driver-core/src/main/com/mongodb/internal/authentication/package-info.java new file mode 100644 index 00000000000..5a0915e39a3 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/authentication/package-info.java @@ -0,0 +1,27 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * This package contains internal functionality that may change at any time. + */ + + +@Internal +@NonNullApi +package com.mongodb.internal.authentication; + +import com.mongodb.annotations.Internal; +import com.mongodb.lang.NonNullApi; diff --git a/driver-core/src/main/com/mongodb/internal/binding/AbstractReferenceCounted.java b/driver-core/src/main/com/mongodb/internal/binding/AbstractReferenceCounted.java new file mode 100644 index 00000000000..3d9284a1956 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/binding/AbstractReferenceCounted.java @@ -0,0 +1,47 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.binding; + +import java.util.concurrent.atomic.AtomicInteger; + +/** + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public abstract class AbstractReferenceCounted implements ReferenceCounted { + private final AtomicInteger referenceCount = new AtomicInteger(1); + @Override + public int getCount() { + return referenceCount.get(); + } + + @Override + public ReferenceCounted retain() { + if (referenceCount.incrementAndGet() == 1) { + throw new IllegalStateException("Attempted to increment the reference count when it is already 0"); + } + return this; + } + + @Override + public int release() { + int decrementedValue = referenceCount.decrementAndGet(); + if (decrementedValue < 0) { + throw new IllegalStateException("Attempted to decrement the reference count below 0"); + } + return decrementedValue; + } +} diff --git a/driver-core/src/main/com/mongodb/internal/binding/AsyncClusterAwareReadWriteBinding.java b/driver-core/src/main/com/mongodb/internal/binding/AsyncClusterAwareReadWriteBinding.java new file mode 100644 index 00000000000..c66dc321513 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/binding/AsyncClusterAwareReadWriteBinding.java @@ -0,0 +1,37 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.binding; + +import com.mongodb.ServerAddress; +import com.mongodb.internal.async.SingleResultCallback; + +/** + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public interface AsyncClusterAwareReadWriteBinding extends AsyncReadWriteBinding { + + /** + * Returns a connection source to the specified server + * + * @param serverAddress the server address + * @param callback the to be passed the connection source + */ + void getConnectionSource(ServerAddress serverAddress, SingleResultCallback callback); + + @Override + AsyncClusterAwareReadWriteBinding retain(); +} diff --git a/driver-core/src/main/com/mongodb/internal/binding/AsyncClusterBinding.java b/driver-core/src/main/com/mongodb/internal/binding/AsyncClusterBinding.java new file mode 100644 index 00000000000..fd46261a6df --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/binding/AsyncClusterBinding.java @@ -0,0 +1,177 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.binding; + +import com.mongodb.ReadConcern; +import com.mongodb.ReadPreference; +import com.mongodb.ServerAddress; +import com.mongodb.connection.ClusterConnectionMode; +import com.mongodb.connection.ServerDescription; +import com.mongodb.internal.async.SingleResultCallback; +import com.mongodb.internal.connection.AsyncConnection; +import com.mongodb.internal.connection.Cluster; +import com.mongodb.internal.connection.OperationContext; +import com.mongodb.internal.connection.Server; +import com.mongodb.internal.selector.ReadPreferenceServerSelector; +import com.mongodb.internal.selector.ReadPreferenceWithFallbackServerSelector; +import com.mongodb.internal.selector.ServerAddressSelector; +import com.mongodb.internal.selector.WritableServerSelector; +import com.mongodb.selector.ServerSelector; + +import static com.mongodb.assertions.Assertions.notNull; +import static java.util.concurrent.TimeUnit.NANOSECONDS; + +/** + * A simple ReadWriteBinding implementation that supplies write connection sources bound to a possibly different primary each time, and a + * read connection source bound to a possible different server each time. + * + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public class AsyncClusterBinding extends AbstractReferenceCounted implements AsyncClusterAwareReadWriteBinding { + private final Cluster cluster; + private final ReadPreference readPreference; + private final ReadConcern readConcern; + private final OperationContext operationContext; + + /** + * Creates an instance. + * + * @param cluster a non-null Cluster which will be used to select a server to bind to + * @param readPreference a non-null ReadPreference for read operations + * @param readConcern a non-null read concern + * @param operationContext the operation context + *

This class is not part of the public API and may be removed or changed at any time

+ */ + public AsyncClusterBinding(final Cluster cluster, final ReadPreference readPreference, final ReadConcern readConcern, + final OperationContext operationContext) { + this.cluster = notNull("cluster", cluster); + this.readPreference = notNull("readPreference", readPreference); + this.readConcern = notNull("readConcern", readConcern); + this.operationContext = notNull("operationContext", operationContext); + } + + @Override + public AsyncClusterAwareReadWriteBinding retain() { + super.retain(); + return this; + } + + @Override + public ReadPreference getReadPreference() { + return readPreference; + } + + @Override + public OperationContext getOperationContext() { + return operationContext; + } + + @Override + public void getReadConnectionSource(final SingleResultCallback callback) { + getAsyncClusterBindingConnectionSource(new ReadPreferenceServerSelector(readPreference), callback); + } + + @Override + public void getReadConnectionSource(final int minWireVersion, final ReadPreference fallbackReadPreference, + final SingleResultCallback callback) { + // Assume 5.0+ for load-balanced mode + if (cluster.getSettings().getMode() == ClusterConnectionMode.LOAD_BALANCED) { + getReadConnectionSource(callback); + } else { + ReadPreferenceWithFallbackServerSelector readPreferenceWithFallbackServerSelector + = new ReadPreferenceWithFallbackServerSelector(readPreference, minWireVersion, fallbackReadPreference); + cluster.selectServerAsync(readPreferenceWithFallbackServerSelector, operationContext, (result, t) -> { + if (t != null) { + callback.onResult(null, t); + } else { + callback.onResult(new AsyncClusterBindingConnectionSource(result.getServer(), result.getServerDescription(), + readPreferenceWithFallbackServerSelector.getAppliedReadPreference()), null); + } + }); + } + } + + @Override + public void getWriteConnectionSource(final SingleResultCallback callback) { + getAsyncClusterBindingConnectionSource(new WritableServerSelector(), callback); + } + + @Override + public void getConnectionSource(final ServerAddress serverAddress, final SingleResultCallback callback) { + getAsyncClusterBindingConnectionSource(new ServerAddressSelector(serverAddress), callback); + } + + private void getAsyncClusterBindingConnectionSource(final ServerSelector serverSelector, + final SingleResultCallback callback) { + cluster.selectServerAsync(serverSelector, operationContext, (result, t) -> { + if (t != null) { + callback.onResult(null, t); + } else { + callback.onResult(new AsyncClusterBindingConnectionSource(result.getServer(), result.getServerDescription(), + readPreference), null); + } + }); + } + + private final class AsyncClusterBindingConnectionSource extends AbstractReferenceCounted implements AsyncConnectionSource { + private final Server server; + private final ServerDescription serverDescription; + private final ReadPreference appliedReadPreference; + + private AsyncClusterBindingConnectionSource(final Server server, final ServerDescription serverDescription, + final ReadPreference appliedReadPreference) { + this.server = server; + this.serverDescription = serverDescription; + this.appliedReadPreference = appliedReadPreference; + operationContext.getTimeoutContext().minRoundTripTimeMS(NANOSECONDS.toMillis(serverDescription.getMinRoundTripTimeNanos())); + AsyncClusterBinding.this.retain(); + } + + @Override + public ServerDescription getServerDescription() { + return serverDescription; + } + + @Override + public OperationContext getOperationContext() { + return operationContext; + } + + @Override + public ReadPreference getReadPreference() { + return appliedReadPreference; + } + + @Override + public void getConnection(final SingleResultCallback callback) { + server.getConnectionAsync(operationContext, callback); + } + + public AsyncConnectionSource retain() { + super.retain(); + AsyncClusterBinding.this.retain(); + return this; + } + + @Override + public int release() { + int count = super.release(); + AsyncClusterBinding.this.release(); + return count; + } + } +} diff --git a/driver-core/src/main/com/mongodb/internal/binding/AsyncConnectionSource.java b/driver-core/src/main/com/mongodb/internal/binding/AsyncConnectionSource.java new file mode 100644 index 00000000000..5d70faf598e --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/binding/AsyncConnectionSource.java @@ -0,0 +1,54 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.binding; + +import com.mongodb.ReadPreference; +import com.mongodb.connection.ServerDescription; +import com.mongodb.internal.async.SingleResultCallback; +import com.mongodb.internal.connection.AsyncConnection; + +/** + * A source of connections to a single MongoDB server. + * + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public interface AsyncConnectionSource extends BindingContext, ReferenceCounted { + + /** + * Gets the current description of this source. + * + * @return the current details of the server state. + */ + ServerDescription getServerDescription(); + + /** + * Gets the read preference that was applied when selecting this source. + * + * @see AsyncReadBinding#getReadConnectionSource(int, ReadPreference, SingleResultCallback) + */ + ReadPreference getReadPreference(); + + /** + * Gets a connection from this source. + * + * @param callback the to be passed the connection + */ + void getConnection(SingleResultCallback callback); + + @Override + AsyncConnectionSource retain(); +} diff --git a/driver-core/src/main/com/mongodb/internal/binding/AsyncReadBinding.java b/driver-core/src/main/com/mongodb/internal/binding/AsyncReadBinding.java new file mode 100644 index 00000000000..633091b3efb --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/binding/AsyncReadBinding.java @@ -0,0 +1,55 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.binding; + +import com.mongodb.ReadPreference; +import com.mongodb.internal.async.SingleResultCallback; + +/** + * An asynchronous factory of connection sources to servers that can be read from and that satisfy the specified read preference. + * + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public interface AsyncReadBinding extends BindingContext, ReferenceCounted { + /** + * The read preference that all connection sources returned by this instance will satisfy. + * @return the non-null read preference + */ + ReadPreference getReadPreference(); + + /** + * Returns a connection source to a server that satisfies the read preference with which this instance is configured. + * @param callback the to be passed the connection source + */ + void getReadConnectionSource(SingleResultCallback callback); + + /** + * Return a connection source that satisfies the read preference with which this instance is configured, if all connected servers have + * a maxWireVersion >= the given minWireVersion. Otherwise, return a connection source that satisfied the given + * fallbackReadPreference. + *

+ * This is useful for operations that are able to execute on a secondary on later server versions, but must execute on the primary on + * earlier server versions. + * + * @see com.mongodb.internal.operation.AggregateToCollectionOperation + */ + void getReadConnectionSource(int minWireVersion, ReadPreference fallbackReadPreference, + SingleResultCallback callback); + + @Override + AsyncReadBinding retain(); +} diff --git a/driver-core/src/main/com/mongodb/internal/binding/AsyncReadWriteBinding.java b/driver-core/src/main/com/mongodb/internal/binding/AsyncReadWriteBinding.java new file mode 100644 index 00000000000..46bcc749fff --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/binding/AsyncReadWriteBinding.java @@ -0,0 +1,28 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.binding; + + +/** + * An asynchronous factory of connection sources to servers that can be read from or written to. + * + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public interface AsyncReadWriteBinding extends AsyncReadBinding, AsyncWriteBinding { + @Override + AsyncReadWriteBinding retain(); +} diff --git a/driver-core/src/main/com/mongodb/internal/binding/AsyncWriteBinding.java b/driver-core/src/main/com/mongodb/internal/binding/AsyncWriteBinding.java new file mode 100644 index 00000000000..39bdf4729c2 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/binding/AsyncWriteBinding.java @@ -0,0 +1,37 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.binding; + +import com.mongodb.internal.async.SingleResultCallback; + +/** + * An asynchronous factory of connection sources to servers that can be written to, e.g, a standalone, a mongos, or a replica set primary. + * + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public interface AsyncWriteBinding extends BindingContext, ReferenceCounted { + + /** + * Supply a connection source to a server that can be written to + * + * @param callback the to be passed the connection source + */ + void getWriteConnectionSource(SingleResultCallback callback); + + @Override + AsyncWriteBinding retain(); +} diff --git a/driver-core/src/main/com/mongodb/internal/binding/BindingContext.java b/driver-core/src/main/com/mongodb/internal/binding/BindingContext.java new file mode 100644 index 00000000000..c10f0fb16ac --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/binding/BindingContext.java @@ -0,0 +1,33 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.binding; + +import com.mongodb.internal.connection.OperationContext; + + +/** + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public interface BindingContext { + + /** + * Note: Will return the same operation context if called multiple times. + * + * @return the operation context for the binding context. + */ + OperationContext getOperationContext(); +} diff --git a/driver-core/src/main/com/mongodb/internal/binding/ClusterAwareReadWriteBinding.java b/driver-core/src/main/com/mongodb/internal/binding/ClusterAwareReadWriteBinding.java new file mode 100644 index 00000000000..8f7552341a7 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/binding/ClusterAwareReadWriteBinding.java @@ -0,0 +1,31 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.binding; + +import com.mongodb.ServerAddress; + +/** + * This interface is not part of the public API and may be removed or changed at any time. + */ +public interface ClusterAwareReadWriteBinding extends ReadWriteBinding { + + /** + * Returns a connection source to the specified server address. + * @return the connection source + */ + ConnectionSource getConnectionSource(ServerAddress serverAddress); +} diff --git a/driver-core/src/main/com/mongodb/internal/binding/ClusterBinding.java b/driver-core/src/main/com/mongodb/internal/binding/ClusterBinding.java new file mode 100644 index 00000000000..cd3f8473bbb --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/binding/ClusterBinding.java @@ -0,0 +1,154 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.binding; + +import com.mongodb.ReadConcern; +import com.mongodb.ReadPreference; +import com.mongodb.ServerAddress; +import com.mongodb.connection.ClusterConnectionMode; +import com.mongodb.connection.ServerDescription; +import com.mongodb.internal.connection.Cluster; +import com.mongodb.internal.connection.Connection; +import com.mongodb.internal.connection.OperationContext; +import com.mongodb.internal.connection.Server; +import com.mongodb.internal.connection.ServerTuple; +import com.mongodb.internal.selector.ReadPreferenceServerSelector; +import com.mongodb.internal.selector.ReadPreferenceWithFallbackServerSelector; +import com.mongodb.internal.selector.ServerAddressSelector; +import com.mongodb.internal.selector.WritableServerSelector; + +import static com.mongodb.assertions.Assertions.notNull; +import static java.util.concurrent.TimeUnit.NANOSECONDS; + +/** + * A simple ReadWriteBinding implementation that supplies write connection sources bound to a possibly different primary each time, and a + * read connection source bound to a possible different server each time. + * + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public class ClusterBinding extends AbstractReferenceCounted implements ClusterAwareReadWriteBinding { + private final Cluster cluster; + private final ReadPreference readPreference; + private final ReadConcern readConcern; + private final OperationContext operationContext; + + /** + * Creates an instance. + * @param cluster a non-null Cluster which will be used to select a server to bind to + * @param readPreference a non-null ReadPreference for read operations + * @param readConcern a non-null read concern + * @param operationContext the operation context + */ + public ClusterBinding(final Cluster cluster, final ReadPreference readPreference, final ReadConcern readConcern, + final OperationContext operationContext) { + this.cluster = notNull("cluster", cluster); + this.readPreference = notNull("readPreference", readPreference); + this.readConcern = notNull("readConcern", readConcern); + this.operationContext = notNull("operationContext", operationContext); + } + + @Override + public ReadWriteBinding retain() { + super.retain(); + return this; + } + + @Override + public ReadPreference getReadPreference() { + return readPreference; + } + + @Override + public OperationContext getOperationContext() { + return operationContext; + } + + @Override + public ConnectionSource getReadConnectionSource() { + return new ClusterBindingConnectionSource(cluster.selectServer(new ReadPreferenceServerSelector(readPreference), operationContext), readPreference); + } + + @Override + public ConnectionSource getReadConnectionSource(final int minWireVersion, final ReadPreference fallbackReadPreference) { + // Assume 5.0+ for load-balanced mode + if (cluster.getSettings().getMode() == ClusterConnectionMode.LOAD_BALANCED) { + return getReadConnectionSource(); + } else { + ReadPreferenceWithFallbackServerSelector readPreferenceWithFallbackServerSelector + = new ReadPreferenceWithFallbackServerSelector(readPreference, minWireVersion, fallbackReadPreference); + ServerTuple serverTuple = cluster.selectServer(readPreferenceWithFallbackServerSelector, operationContext); + return new ClusterBindingConnectionSource(serverTuple, readPreferenceWithFallbackServerSelector.getAppliedReadPreference()); + } + } + + @Override + public ConnectionSource getWriteConnectionSource() { + return new ClusterBindingConnectionSource(cluster.selectServer(new WritableServerSelector(), operationContext), readPreference); + } + + @Override + public ConnectionSource getConnectionSource(final ServerAddress serverAddress) { + return new ClusterBindingConnectionSource(cluster.selectServer(new ServerAddressSelector(serverAddress), operationContext), readPreference); + } + + private final class ClusterBindingConnectionSource extends AbstractReferenceCounted implements ConnectionSource { + private final Server server; + private final ServerDescription serverDescription; + private final ReadPreference appliedReadPreference; + + private ClusterBindingConnectionSource(final ServerTuple serverTuple, final ReadPreference appliedReadPreference) { + this.server = serverTuple.getServer(); + this.serverDescription = serverTuple.getServerDescription(); + this.appliedReadPreference = appliedReadPreference; + operationContext.getTimeoutContext().minRoundTripTimeMS(NANOSECONDS.toMillis(serverDescription.getMinRoundTripTimeNanos())); + ClusterBinding.this.retain(); + } + + @Override + public ServerDescription getServerDescription() { + return serverDescription; + } + + @Override + public OperationContext getOperationContext() { + return operationContext; + } + + @Override + public ReadPreference getReadPreference() { + return appliedReadPreference; + } + + @Override + public Connection getConnection() { + return server.getConnection(operationContext); + } + + public ConnectionSource retain() { + super.retain(); + ClusterBinding.this.retain(); + return this; + } + + @Override + public int release() { + int count = super.release(); + ClusterBinding.this.release(); + return count; + } + } +} diff --git a/driver-core/src/main/com/mongodb/internal/binding/ConnectionSource.java b/driver-core/src/main/com/mongodb/internal/binding/ConnectionSource.java new file mode 100644 index 00000000000..90c8b85cf16 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/binding/ConnectionSource.java @@ -0,0 +1,38 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.binding; + +import com.mongodb.ReadPreference; +import com.mongodb.connection.ServerDescription; +import com.mongodb.internal.connection.Connection; + +/** + * A source of connections to a single MongoDB server. + * + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public interface ConnectionSource extends BindingContext, ReferenceCounted { + + ServerDescription getServerDescription(); + + ReadPreference getReadPreference(); + + Connection getConnection(); + + @Override + ConnectionSource retain(); +} diff --git a/driver-core/src/main/com/mongodb/internal/binding/ReadBinding.java b/driver-core/src/main/com/mongodb/internal/binding/ReadBinding.java new file mode 100644 index 00000000000..ffdde848382 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/binding/ReadBinding.java @@ -0,0 +1,49 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.binding; + +import com.mongodb.ReadPreference; + +/** + * A factory of connection sources to servers that can be read from and that satisfy the specified read preference. + * + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public interface ReadBinding extends BindingContext, ReferenceCounted { + ReadPreference getReadPreference(); + + /** + * Returns a connection source to a server that satisfies the read preference with which this instance is configured. + * @return the connection source + */ + ConnectionSource getReadConnectionSource(); + + /** + * Return a connection source that satisfies the read preference with which this instance is configured, if all connected servers have + * a maxWireVersion >= the given minWireVersion. Otherwise, return a connection source that satisfied the given + * fallbackReadPreference. + *

+ * This is useful for operations that are able to execute on a secondary on later server versions, but must execute on the primary on + * earlier server versions. + * + * @see com.mongodb.internal.operation.AggregateToCollectionOperation + */ + ConnectionSource getReadConnectionSource(int minWireVersion, ReadPreference fallbackReadPreference); + + @Override + ReadBinding retain(); +} diff --git a/driver-core/src/main/com/mongodb/internal/binding/ReadWriteBinding.java b/driver-core/src/main/com/mongodb/internal/binding/ReadWriteBinding.java new file mode 100644 index 00000000000..6e8d559f843 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/binding/ReadWriteBinding.java @@ -0,0 +1,28 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.binding; + + +/** + * A factory of connection sources to servers that can be read from or written to. + * + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public interface ReadWriteBinding extends ReadBinding, WriteBinding { + @Override + ReadWriteBinding retain(); +} diff --git a/driver-core/src/main/com/mongodb/internal/binding/ReferenceCounted.java b/driver-core/src/main/com/mongodb/internal/binding/ReferenceCounted.java new file mode 100644 index 00000000000..bf545fb2786 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/binding/ReferenceCounted.java @@ -0,0 +1,69 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.binding; + +import com.mongodb.internal.VisibleForTesting; + +import static com.mongodb.internal.VisibleForTesting.AccessModifier.PRIVATE; + +/** + * An interface for reference-counted objects. + *

+ * The recommended usage pattern: + *

{@code
+ * ReferenceCounted resource = new ...;
+ * //there is no need to call resource.retain() as ReferenceCounted objects are created as retained (the getCount method returns 1)
+ * try {
+ *     //Use the resource.
+ *     //If the resource is passed as a method argument,
+ *     //it is the responsibility of the receiver to call the retain and the corresponding release methods,
+ *     //if the receiver stores the resource for later use.
+ * } finally {
+ *     resource.release();
+ * }
+ * }
+ * + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public interface ReferenceCounted { + /** + * Gets the current reference count. + * + *

+ * This method should only be used for testing. Production code should prefer using the count returned from {@link #release()} + *

+ * + * @return the current count, which must be greater than or equal to 0. + * Returns 1 for a newly created object. + */ + @VisibleForTesting(otherwise = PRIVATE) + int getCount(); + + /** + * Retain an additional reference to this object. All retained references must be released, or there will be a leak. + * + * @return this + */ + ReferenceCounted retain(); + + /** + * Release a reference to this object. + * @throws java.lang.IllegalStateException if the reference count is already 0 + * @return the reference count after the release + */ + int release(); +} diff --git a/driver-core/src/main/com/mongodb/internal/binding/SingleServerBinding.java b/driver-core/src/main/com/mongodb/internal/binding/SingleServerBinding.java new file mode 100644 index 00000000000..7d7e948c344 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/binding/SingleServerBinding.java @@ -0,0 +1,130 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.binding; + +import com.mongodb.ReadPreference; +import com.mongodb.ServerAddress; +import com.mongodb.connection.ServerDescription; +import com.mongodb.internal.connection.Cluster; +import com.mongodb.internal.connection.Connection; +import com.mongodb.internal.connection.OperationContext; +import com.mongodb.internal.connection.ServerTuple; +import com.mongodb.internal.selector.ServerAddressSelector; + +import static com.mongodb.assertions.Assertions.notNull; + +/** + * A simple binding where all connection sources are bound to the server specified in the constructor. + * + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public class SingleServerBinding extends AbstractReferenceCounted implements ReadWriteBinding { + private final Cluster cluster; + private final ServerAddress serverAddress; + private final OperationContext operationContext; + + /** + * Creates an instance, defaulting to {@link com.mongodb.ReadPreference#primary()} for reads. + * @param cluster a non-null Cluster which will be used to select a server to bind to + * @param serverAddress a non-null address of the server to bind to + * @param operationContext the operation context + */ + public SingleServerBinding(final Cluster cluster, final ServerAddress serverAddress, final OperationContext operationContext) { + this.cluster = notNull("cluster", cluster); + this.serverAddress = notNull("serverAddress", serverAddress); + this.operationContext = notNull("operationContext", operationContext); + } + + @Override + public ConnectionSource getWriteConnectionSource() { + return new SingleServerBindingConnectionSource(); + } + + @Override + public ReadPreference getReadPreference() { + return ReadPreference.primary(); + } + + @Override + public ConnectionSource getReadConnectionSource() { + return new SingleServerBindingConnectionSource(); + } + + @Override + public ConnectionSource getReadConnectionSource(final int minWireVersion, final ReadPreference fallbackReadPreference) { + throw new UnsupportedOperationException(); + } + + @Override + public OperationContext getOperationContext() { + return operationContext; + } + + @Override + public SingleServerBinding retain() { + super.retain(); + return this; + } + + private final class SingleServerBindingConnectionSource extends AbstractReferenceCounted implements ConnectionSource { + private final ServerDescription serverDescription; + + private SingleServerBindingConnectionSource() { + SingleServerBinding.this.retain(); + ServerTuple serverTuple = cluster.selectServer(new ServerAddressSelector(serverAddress), operationContext); + serverDescription = serverTuple.getServerDescription(); + } + + @Override + public ServerDescription getServerDescription() { + return serverDescription; + } + + @Override + public OperationContext getOperationContext() { + return operationContext; + } + + @Override + public ReadPreference getReadPreference() { + return ReadPreference.primary(); + } + + @Override + public Connection getConnection() { + return cluster + .selectServer(new ServerAddressSelector(serverAddress), operationContext) + .getServer() + .getConnection(operationContext); + } + + @Override + public ConnectionSource retain() { + super.retain(); + return this; + } + + @Override + public int release() { + int count = super.release(); + if (count == 0) { + SingleServerBinding.this.release(); + } + return count; + } + } +} diff --git a/driver-core/src/main/com/mongodb/internal/binding/TransactionContext.java b/driver-core/src/main/com/mongodb/internal/binding/TransactionContext.java new file mode 100644 index 00000000000..11e3ef7edb7 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/binding/TransactionContext.java @@ -0,0 +1,69 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.internal.binding; + +import com.mongodb.connection.ClusterType; +import com.mongodb.internal.connection.Connection; +import com.mongodb.lang.Nullable; +import com.mongodb.session.ClientSession; + +import java.util.function.BiConsumer; + +import static com.mongodb.connection.ClusterType.LOAD_BALANCED; + +/** + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public final class TransactionContext extends AbstractReferenceCounted { + private final ClusterType clusterType; + private C pinnedConnection; + + public TransactionContext(final ClusterType clusterType) { + this.clusterType = clusterType; + } + + @Nullable + public C getPinnedConnection() { + return pinnedConnection; + } + + @SuppressWarnings("unchecked") + public void pinConnection(final C connection, final BiConsumer markAsPinnedOperation) { + this.pinnedConnection = (C) connection.retain(); // safe because of the `retain` method contract + markAsPinnedOperation.accept(connection, Connection.PinningMode.TRANSACTION); + } + + public boolean isConnectionPinningRequired() { + return clusterType == LOAD_BALANCED; + } + + @Override + public int release() { + int count = super.release(); + if (count == 0) { + if (pinnedConnection != null) { + pinnedConnection.release(); + } + } + return count; + } + + @SuppressWarnings("unchecked") + @Nullable + public static > C get(final ClientSession session) { + return (C) session.getTransactionContext(); + } +} diff --git a/driver-core/src/main/com/mongodb/internal/binding/WriteBinding.java b/driver-core/src/main/com/mongodb/internal/binding/WriteBinding.java new file mode 100644 index 00000000000..b0ac674489c --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/binding/WriteBinding.java @@ -0,0 +1,34 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.binding; + +/** + * A factory of connection sources to servers that can be written to, e.g, a standalone, a mongos, or a replica set primary. + * + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public interface WriteBinding extends BindingContext, ReferenceCounted { + /** + * Supply a connection source to a server that can be written to + * + * @return a connection source + */ + ConnectionSource getWriteConnectionSource(); + + @Override + WriteBinding retain(); +} diff --git a/driver-core/src/main/com/mongodb/internal/binding/package-info.java b/driver-core/src/main/com/mongodb/internal/binding/package-info.java new file mode 100644 index 00000000000..d514628e1b4 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/binding/package-info.java @@ -0,0 +1,26 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * This package contains internal functionality that may change at any time. + */ + +@Internal +@NonNullApi +package com.mongodb.internal.binding; + +import com.mongodb.annotations.Internal; +import com.mongodb.lang.NonNullApi; diff --git a/driver-core/src/main/com/mongodb/internal/bulk/DeleteRequest.java b/driver-core/src/main/com/mongodb/internal/bulk/DeleteRequest.java new file mode 100644 index 00000000000..bcbf43142c1 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/bulk/DeleteRequest.java @@ -0,0 +1,88 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.bulk; + +import com.mongodb.client.model.Collation; +import com.mongodb.lang.Nullable; +import org.bson.BsonDocument; + +import static com.mongodb.assertions.Assertions.notNull; + +/** + * A representation of a delete. + * + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public final class DeleteRequest extends WriteRequest { + private final BsonDocument filter; + private boolean isMulti = true; + private Collation collation; + private BsonDocument hint; + private String hintString; + + public DeleteRequest(final BsonDocument filter) { + this.filter = notNull("filter", filter); + } + + public BsonDocument getFilter() { + return filter; + } + + public DeleteRequest multi(final boolean isMulti) { + this.isMulti = isMulti; + return this; + } + + public boolean isMulti() { + return isMulti; + } + + @Nullable + public Collation getCollation() { + return collation; + } + + public DeleteRequest collation(@Nullable final Collation collation) { + this.collation = collation; + return this; + } + + @Nullable + public BsonDocument getHint() { + return hint; + } + + public DeleteRequest hint(@Nullable final BsonDocument hint) { + this.hint = hint; + return this; + } + + @Nullable + public String getHintString() { + return hintString; + } + + public DeleteRequest hintString(@Nullable final String hint) { + this.hintString = hint; + return this; + } + + @Override + public Type getType() { + return Type.DELETE; + } +} diff --git a/driver-core/src/main/com/mongodb/internal/bulk/IndexRequest.java b/driver-core/src/main/com/mongodb/internal/bulk/IndexRequest.java new file mode 100644 index 00000000000..ce515a1e598 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/bulk/IndexRequest.java @@ -0,0 +1,275 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.bulk; + +import com.mongodb.client.model.Collation; +import com.mongodb.lang.Nullable; +import org.bson.BsonDocument; + +import java.util.List; +import java.util.concurrent.TimeUnit; + +import static com.mongodb.assertions.Assertions.isTrueArgument; +import static com.mongodb.assertions.Assertions.notNull; +import static java.util.Arrays.asList; + +/** + * The settings to apply to the creation of an index. + * + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public class IndexRequest { + private final BsonDocument keys; + private static final List VALID_TEXT_INDEX_VERSIONS = asList(1, 2, 3); + private static final List VALID_SPHERE_INDEX_VERSIONS = asList(1, 2, 3); + private boolean background; + private boolean unique; + private String name; + private boolean sparse; + private Long expireAfterSeconds; + private Integer version; + private BsonDocument weights; + private String defaultLanguage; + private String languageOverride; + private Integer textVersion; + private Integer sphereVersion; + private Integer bits; + private Double min; + private Double max; + private boolean dropDups; + private BsonDocument storageEngine; + private BsonDocument partialFilterExpression; + private Collation collation; + private BsonDocument wildcardProjection; + private boolean hidden; + + public IndexRequest(final BsonDocument keys) { + this.keys = notNull("keys", keys); + } + + public BsonDocument getKeys() { + return keys; + } + + public boolean isBackground() { + return background; + } + + public IndexRequest background(final boolean background) { + this.background = background; + return this; + } + + public boolean isUnique() { + return unique; + } + + public IndexRequest unique(final boolean unique) { + this.unique = unique; + return this; + } + + @Nullable + public String getName() { + return name; + } + + public IndexRequest name(@Nullable final String name) { + this.name = name; + return this; + } + + public boolean isSparse() { + return sparse; + } + + public IndexRequest sparse(final boolean sparse) { + this.sparse = sparse; + return this; + } + + @Nullable + public Long getExpireAfter(final TimeUnit timeUnit) { + if (expireAfterSeconds == null) { + return null; + } + return timeUnit.convert(expireAfterSeconds, TimeUnit.SECONDS); + } + + public IndexRequest expireAfter(@Nullable final Long expireAfter, final TimeUnit timeUnit) { + if (expireAfter == null) { + this.expireAfterSeconds = null; + } else { + this.expireAfterSeconds = TimeUnit.SECONDS.convert(expireAfter, timeUnit); + } + return this; + } + + @Nullable + public Integer getVersion() { + return this.version; + } + + public IndexRequest version(@Nullable final Integer version) { + this.version = version; + return this; + } + + @Nullable + public BsonDocument getWeights() { + return weights; + } + + public IndexRequest weights(@Nullable final BsonDocument weights) { + this.weights = weights; + return this; + } + + @Nullable + public String getDefaultLanguage() { + return defaultLanguage; + } + + public IndexRequest defaultLanguage(@Nullable final String defaultLanguage) { + this.defaultLanguage = defaultLanguage; + return this; + } + + @Nullable + public String getLanguageOverride() { + return languageOverride; + } + + public IndexRequest languageOverride(@Nullable final String languageOverride) { + this.languageOverride = languageOverride; + return this; + } + + @Nullable + public Integer getTextVersion() { + return textVersion; + } + + public IndexRequest textVersion(@Nullable final Integer textVersion) { + if (textVersion != null) { + isTrueArgument("textVersion must be 1, 2 or 3", VALID_TEXT_INDEX_VERSIONS.contains(textVersion)); + } + this.textVersion = textVersion; + return this; + } + + @Nullable + public Integer getSphereVersion() { + return sphereVersion; + } + + public IndexRequest sphereVersion(@Nullable final Integer sphereVersion) { + if (sphereVersion != null) { + isTrueArgument("sphereIndexVersion must be 1, 2 or 3", VALID_SPHERE_INDEX_VERSIONS.contains(sphereVersion)); + } + this.sphereVersion = sphereVersion; + return this; + } + + @Nullable + public Integer getBits() { + return bits; + } + + public IndexRequest bits(@Nullable final Integer bits) { + this.bits = bits; + return this; + } + + @Nullable + public Double getMin() { + return min; + } + + public IndexRequest min(@Nullable final Double min) { + this.min = min; + return this; + } + + @Nullable + public Double getMax() { + return max; + } + + public IndexRequest max(@Nullable final Double max) { + this.max = max; + return this; + } + + public boolean getDropDups() { + return dropDups; + } + + public IndexRequest dropDups(final boolean dropDups) { + this.dropDups = dropDups; + return this; + } + + @Nullable + public BsonDocument getStorageEngine() { + return storageEngine; + } + + public IndexRequest storageEngine(@Nullable final BsonDocument storageEngineOptions) { + this.storageEngine = storageEngineOptions; + return this; + } + + @Nullable + public BsonDocument getPartialFilterExpression() { + return partialFilterExpression; + } + + public IndexRequest partialFilterExpression(@Nullable final BsonDocument partialFilterExpression) { + this.partialFilterExpression = partialFilterExpression; + return this; + } + + @Nullable + public Collation getCollation() { + return collation; + } + + public IndexRequest collation(@Nullable final Collation collation) { + this.collation = collation; + return this; + } + + @Nullable + public BsonDocument getWildcardProjection() { + return wildcardProjection; + } + + public IndexRequest wildcardProjection(@Nullable final BsonDocument wildcardProjection) { + this.wildcardProjection = wildcardProjection; + return this; + } + + public boolean isHidden() { + return hidden; + } + + public IndexRequest hidden(final boolean hidden) { + this.hidden = hidden; + return this; + } +} diff --git a/driver-core/src/main/com/mongodb/internal/bulk/InsertRequest.java b/driver-core/src/main/com/mongodb/internal/bulk/InsertRequest.java new file mode 100644 index 00000000000..1d6d2fdbf38 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/bulk/InsertRequest.java @@ -0,0 +1,45 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.bulk; + +import org.bson.BsonDocument; + +import static com.mongodb.assertions.Assertions.notNull; + +/** + * A representation of a document to insert. + * + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public final class InsertRequest extends WriteRequest { + private final BsonDocument document; + + public InsertRequest(final BsonDocument document) { + this.document = notNull("document", document); + } + + public BsonDocument getDocument() { + return document; + } + + @Override + public Type getType() { + return Type.INSERT; + } + +} + diff --git a/driver-core/src/main/com/mongodb/internal/bulk/UpdateRequest.java b/driver-core/src/main/com/mongodb/internal/bulk/UpdateRequest.java new file mode 100644 index 00000000000..e9d0b13c3cd --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/bulk/UpdateRequest.java @@ -0,0 +1,143 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.bulk; + +import com.mongodb.client.model.Collation; +import com.mongodb.lang.Nullable; +import org.bson.BsonDocument; +import org.bson.BsonValue; + +import java.util.List; + +import static com.mongodb.assertions.Assertions.notNull; + +/** + * An update to one or more documents. + * + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public final class UpdateRequest extends WriteRequest { + private final BsonValue update; + private final Type updateType; + private final BsonDocument filter; + private boolean isMulti; + private boolean isUpsert = false; + private Collation collation; + private List arrayFilters; + @Nullable private BsonDocument hint; + @Nullable private String hintString; + @Nullable private BsonDocument sort; + + public UpdateRequest(final BsonDocument filter, @Nullable final BsonValue update, final Type updateType) { + if (updateType != Type.UPDATE && updateType != Type.REPLACE) { + throw new IllegalArgumentException("Update type must be UPDATE or REPLACE"); + } + if (update != null && !update.isDocument() && !update.isArray()) { + throw new IllegalArgumentException("Update operation type must be a document or a pipeline"); + } + + this.filter = notNull("filter", filter); + this.update = notNull("update", update); + this.updateType = updateType; + this.isMulti = updateType == Type.UPDATE; + } + + @Override + public Type getType() { + return updateType; + } + + public BsonDocument getFilter() { + return filter; + } + + public BsonValue getUpdateValue() { + return update; + } + + public boolean isMulti() { + return isMulti; + } + + public UpdateRequest multi(final boolean isMulti) { + if (isMulti && updateType == Type.REPLACE) { + throw new IllegalArgumentException("Replacements can not be multi"); + } + this.isMulti = isMulti; + return this; + } + + public boolean isUpsert() { + return isUpsert; + } + + public UpdateRequest upsert(final boolean isUpsert) { + this.isUpsert = isUpsert; + return this; + } + + @Nullable + public Collation getCollation() { + return collation; + } + + public UpdateRequest collation(@Nullable final Collation collation) { + this.collation = collation; + return this; + } + + public UpdateRequest arrayFilters(@Nullable final List arrayFilters) { + this.arrayFilters = arrayFilters; + return this; + } + + @Nullable + public List getArrayFilters() { + return arrayFilters; + } + + @Nullable + public BsonDocument getHint() { + return hint; + } + + public UpdateRequest hint(@Nullable final BsonDocument hint) { + this.hint = hint; + return this; + } + + @Nullable + public String getHintString() { + return hintString; + } + + public UpdateRequest hintString(@Nullable final String hint) { + this.hintString = hint; + return this; + } + + @Nullable + public BsonDocument getSort() { + return sort; + } + + public UpdateRequest sort(@Nullable final BsonDocument sort) { + this.sort = sort; + return this; + } +} + diff --git a/driver-core/src/main/com/mongodb/internal/bulk/WriteRequest.java b/driver-core/src/main/com/mongodb/internal/bulk/WriteRequest.java new file mode 100644 index 00000000000..97b0e8c0f02 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/bulk/WriteRequest.java @@ -0,0 +1,37 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.bulk; + +/** + * An abstract base class for a write request. + * + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public abstract class WriteRequest { + + public enum Type { + INSERT, + UPDATE, + REPLACE, + DELETE + } + + WriteRequest() { + } + + public abstract Type getType(); +} diff --git a/driver-core/src/main/com/mongodb/internal/bulk/WriteRequestWithIndex.java b/driver-core/src/main/com/mongodb/internal/bulk/WriteRequestWithIndex.java new file mode 100644 index 00000000000..ed0689cab3a --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/bulk/WriteRequestWithIndex.java @@ -0,0 +1,42 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.bulk; + +/** + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public class WriteRequestWithIndex { + private final int index; + private final WriteRequest writeRequest; + + public WriteRequestWithIndex(final WriteRequest writeRequest, final int index) { + this.writeRequest = writeRequest; + this.index = index; + } + + public int getIndex() { + return index; + } + + public WriteRequest getWriteRequest() { + return writeRequest; + } + + public WriteRequest.Type getType() { + return writeRequest.getType(); + } +} diff --git a/driver-core/src/main/com/mongodb/internal/bulk/package-info.java b/driver-core/src/main/com/mongodb/internal/bulk/package-info.java new file mode 100644 index 00000000000..40daa3eec72 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/bulk/package-info.java @@ -0,0 +1,26 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * This package contains internal functionality that may change at any time. + */ + +@Internal +@NonNullApi +package com.mongodb.internal.bulk; + +import com.mongodb.annotations.Internal; +import com.mongodb.lang.NonNullApi; diff --git a/driver-core/src/main/com/mongodb/internal/capi/MongoCryptHelper.java b/driver-core/src/main/com/mongodb/internal/capi/MongoCryptHelper.java new file mode 100644 index 00000000000..240f5051c92 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/capi/MongoCryptHelper.java @@ -0,0 +1,209 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.capi; + +import com.mongodb.AutoEncryptionSettings; +import com.mongodb.AwsCredential; +import com.mongodb.ClientEncryptionSettings; +import com.mongodb.ConnectionString; +import com.mongodb.MongoClientException; +import com.mongodb.MongoClientSettings; +import com.mongodb.MongoConfigurationException; +import com.mongodb.client.model.vault.RewrapManyDataKeyOptions; +import com.mongodb.internal.authentication.AwsCredentialHelper; +import com.mongodb.internal.authentication.AzureCredentialHelper; +import com.mongodb.internal.authentication.GcpCredentialHelper; +import com.mongodb.internal.crypt.capi.MongoCryptOptions; +import com.mongodb.lang.Nullable; +import org.bson.BsonDocument; +import org.bson.BsonDocumentWrapper; +import org.bson.BsonString; +import org.bson.Document; +import org.bson.codecs.DocumentCodec; + +import java.io.File; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.concurrent.TimeUnit; +import java.util.function.Supplier; + +import static java.lang.String.format; +import static java.util.Collections.emptyList; +import static java.util.Collections.emptyMap; +import static java.util.Collections.singletonList; + +/** + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public final class MongoCryptHelper { + + public static MongoCryptOptions createMongoCryptOptions(final ClientEncryptionSettings settings) { + return createMongoCryptOptions(settings.getKmsProviders(), false, emptyList(), emptyMap(), null, null, + settings.getKeyExpiration(TimeUnit.MILLISECONDS)); + } + + public static MongoCryptOptions createMongoCryptOptions(final AutoEncryptionSettings settings) { + return createMongoCryptOptions( + settings.getKmsProviders(), + settings.isBypassQueryAnalysis(), + settings.isBypassAutoEncryption() ? emptyList() : singletonList("$SYSTEM"), + settings.getExtraOptions(), + settings.getSchemaMap(), + settings.getEncryptedFieldsMap(), + settings.getKeyExpiration(TimeUnit.MILLISECONDS)); + } + + public static void validateRewrapManyDataKeyOptions(final RewrapManyDataKeyOptions options) { + if (options.getMasterKey() != null && options.getProvider() == null) { + throw new MongoClientException("Missing the provider but supplied a master key in the RewrapManyDataKeyOptions"); + } + } + + private static MongoCryptOptions createMongoCryptOptions( + final Map> kmsProviders, + final boolean bypassQueryAnalysis, + final List searchPaths, + @Nullable final Map extraOptions, + @Nullable final Map localSchemaMap, + @Nullable final Map encryptedFieldsMap, + @Nullable final Long keyExpirationMS) { + MongoCryptOptions.Builder mongoCryptOptionsBuilder = MongoCryptOptions.builder(); + mongoCryptOptionsBuilder.kmsProviderOptions(getKmsProvidersAsBsonDocument(kmsProviders)); + mongoCryptOptionsBuilder.bypassQueryAnalysis(bypassQueryAnalysis); + mongoCryptOptionsBuilder.searchPaths(searchPaths); + mongoCryptOptionsBuilder.extraOptions(toBsonDocument(extraOptions)); + mongoCryptOptionsBuilder.localSchemaMap(localSchemaMap); + mongoCryptOptionsBuilder.encryptedFieldsMap(encryptedFieldsMap); + mongoCryptOptionsBuilder.needsKmsCredentialsStateEnabled(true); + mongoCryptOptionsBuilder.keyExpirationMS(keyExpirationMS); + return mongoCryptOptionsBuilder.build(); + } + public static BsonDocument fetchCredentials(final Map> kmsProviders, + final Map>> kmsProviderPropertySuppliers) { + BsonDocument kmsProvidersDocument = MongoCryptHelper.getKmsProvidersAsBsonDocument(kmsProviders); + for (Map.Entry>> entry : kmsProviderPropertySuppliers.entrySet()) { + String kmsProviderName = entry.getKey(); + if (!kmsProvidersDocument.get(kmsProviderName).asDocument().isEmpty()) { + continue; + } + Map kmsProviderCredential; + try { + kmsProviderCredential = entry.getValue().get(); + } catch (Exception e) { + throw new MongoConfigurationException(format("Exception getting credential for kms provider %s from configured Supplier.", + kmsProviderName), e); + } + if (kmsProviderCredential == null || kmsProviderCredential.isEmpty()) { + throw new MongoConfigurationException(format("Exception getting credential for kms provider %s from configured Supplier." + + " The returned value is %s.", + kmsProviderName, kmsProviderCredential == null ? "null" : "empty")); + } + kmsProvidersDocument.put(kmsProviderName, toBsonDocument(kmsProviderCredential)); + } + if (kmsProvidersDocument.containsKey("aws") && kmsProvidersDocument.get("aws").asDocument().isEmpty()) { + AwsCredential awsCredential = AwsCredentialHelper.obtainFromEnvironment(); + if (awsCredential != null) { + BsonDocument awsCredentialDocument = new BsonDocument(); + awsCredentialDocument.put("accessKeyId", new BsonString(awsCredential.getAccessKeyId())); + awsCredentialDocument.put("secretAccessKey", new BsonString(awsCredential.getSecretAccessKey())); + if (awsCredential.getSessionToken() != null) { + awsCredentialDocument.put("sessionToken", new BsonString(awsCredential.getSessionToken())); + } + kmsProvidersDocument.put("aws", awsCredentialDocument); + } + } + if (kmsProvidersDocument.containsKey("gcp") && kmsProvidersDocument.get("gcp").asDocument().isEmpty()) { + kmsProvidersDocument.put("gcp", GcpCredentialHelper.obtainFromEnvironment()); + } + if (kmsProvidersDocument.containsKey("azure") && kmsProvidersDocument.get("azure").asDocument().isEmpty()) { + kmsProvidersDocument.put("azure", AzureCredentialHelper.obtainFromEnvironment()); + } + + return kmsProvidersDocument; + } + + private static BsonDocument getKmsProvidersAsBsonDocument(final Map> kmsProviders) { + BsonDocument bsonKmsProviders = new BsonDocument(); + kmsProviders.forEach((k, v) -> bsonKmsProviders.put(k, toBsonDocument(v))); + return bsonKmsProviders; + } + + private static BsonDocument toBsonDocument(@Nullable final Map optionsMap) { + if (optionsMap == null) { + return new BsonDocument(); + } + return new BsonDocumentWrapper<>(new Document(optionsMap), new DocumentCodec()); + } + + public static boolean isMongocryptdSpawningDisabled(@Nullable final String cryptSharedLibVersion, + final AutoEncryptionSettings settings) { + boolean cryptSharedLibIsAvailable = cryptSharedLibVersion != null && !cryptSharedLibVersion.isEmpty(); + boolean cryptSharedLibRequired = (boolean) settings.getExtraOptions().getOrDefault("cryptSharedLibRequired", false); + return settings.isBypassAutoEncryption() || settings.isBypassQueryAnalysis() || cryptSharedLibRequired || cryptSharedLibIsAvailable; + } + + @SuppressWarnings("unchecked") + public static List createMongocryptdSpawnArgs(final Map options) { + List spawnArgs = new ArrayList<>(); + + String path = options.containsKey("mongocryptdSpawnPath") + ? (String) options.get("mongocryptdSpawnPath") + : "mongocryptd"; + + spawnArgs.add(path); + if (options.containsKey("mongocryptdSpawnArgs")) { + spawnArgs.addAll((List) options.get("mongocryptdSpawnArgs")); + } + + if (!spawnArgs.contains("--idleShutdownTimeoutSecs")) { + spawnArgs.add("--idleShutdownTimeoutSecs"); + spawnArgs.add("60"); + } + return spawnArgs; + } + + public static MongoClientSettings createMongocryptdClientSettings(@Nullable final String connectionString) { + + return MongoClientSettings.builder() + .applyToClusterSettings(builder -> builder.serverSelectionTimeout(10, TimeUnit.SECONDS)) + .applyToSocketSettings(builder -> { + builder.readTimeout(10, TimeUnit.SECONDS); + builder.connectTimeout(10, TimeUnit.SECONDS); + }) + .applyConnectionString(new ConnectionString((connectionString != null) + ? connectionString : "mongodb://localhost:27020")) + .build(); + } + + public static ProcessBuilder createProcessBuilder(final Map options) { + return new ProcessBuilder(createMongocryptdSpawnArgs(options)); + } + + public static void startProcess(final ProcessBuilder processBuilder) { + try { + processBuilder.redirectErrorStream(true); + processBuilder.redirectOutput(new File(System.getProperty("os.name").startsWith("Windows") ? "NUL" : "/dev/null")); + processBuilder.start(); + } catch (Throwable t) { + throw new MongoClientException("Exception starting mongocryptd process. Is `mongocryptd` on the system path?", t); + } + } + + private MongoCryptHelper() { + } +} diff --git a/driver-core/src/main/com/mongodb/internal/capi/package-info.java b/driver-core/src/main/com/mongodb/internal/capi/package-info.java new file mode 100644 index 00000000000..d903b56c422 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/capi/package-info.java @@ -0,0 +1,27 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * This package contains internal functionality that may change at any time. + */ + + +@Internal +@NonNullApi +package com.mongodb.internal.capi; + +import com.mongodb.annotations.Internal; +import com.mongodb.lang.NonNullApi; diff --git a/driver-core/src/main/com/mongodb/internal/client/DriverInformation.java b/driver-core/src/main/com/mongodb/internal/client/DriverInformation.java new file mode 100644 index 00000000000..2eed2ff682e --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/client/DriverInformation.java @@ -0,0 +1,81 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.internal.client; + +import com.mongodb.lang.Nullable; + +import java.util.Objects; + +public final class DriverInformation { + @Nullable + private final String driverName; + @Nullable + private final String driverVersion; + @Nullable + private final String driverPlatform; + + public DriverInformation(@Nullable final String driverName, + @Nullable final String driverVersion, + @Nullable final String driverPlatform) { + this.driverName = driverName == null || driverName.isEmpty() ? null : driverName; + this.driverVersion = driverVersion == null || driverVersion.isEmpty() ? null : driverVersion; + this.driverPlatform = driverPlatform == null || driverPlatform.isEmpty() ? null : driverPlatform; + } + + @Nullable + public String getDriverName() { + return driverName; + } + + @Nullable + public String getDriverVersion() { + return driverVersion; + } + + @Nullable + public String getDriverPlatform() { + return driverPlatform; + } + + @Override + public boolean equals(final Object o) { + if (o == null || getClass() != o.getClass()) { + return false; + } + + final DriverInformation that = (DriverInformation) o; + return Objects.equals(driverName, that.driverName) + && Objects.equals(driverVersion, that.driverVersion) + && Objects.equals(driverPlatform, that.driverPlatform); + } + + @Override + public int hashCode() { + int result = Objects.hashCode(driverName); + result = 31 * result + Objects.hashCode(driverVersion); + result = 31 * result + Objects.hashCode(driverPlatform); + return result; + } + + @Override + public String toString() { + return "DriverInformation{" + + "driverName='" + driverName + '\'' + + ", driverVersion='" + driverVersion + '\'' + + ", driverPlatform='" + driverPlatform + '\'' + + '}'; + } +} diff --git a/driver-core/src/main/com/mongodb/internal/client/DriverInformationHelper.java b/driver-core/src/main/com/mongodb/internal/client/DriverInformationHelper.java new file mode 100644 index 00000000000..76149f1a83b --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/client/DriverInformationHelper.java @@ -0,0 +1,58 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.internal.client; + +import com.mongodb.internal.build.MongoDriverVersion; + +import java.util.Collections; +import java.util.List; +import java.util.Objects; +import java.util.function.Function; +import java.util.stream.Collectors; + +import static java.lang.String.format; +import static java.lang.System.getProperty; + +public final class DriverInformationHelper { + + public static final DriverInformation INITIAL_DRIVER_INFORMATION = + new DriverInformation(MongoDriverVersion.NAME, MongoDriverVersion.VERSION, + format("Java/%s/%s", getProperty("java.vendor", "unknown-vendor"), + getProperty("java.runtime.version", "unknown-version"))); + + public static List getNames(final List driverInformation) { + return getDriverField(DriverInformation::getDriverName, driverInformation); + } + + public static List getVersions(final List driverInformation) { + return getDriverField(DriverInformation::getDriverVersion, driverInformation); + } + + public static List getPlatforms(final List driverInformation) { + return getDriverField(DriverInformation::getDriverPlatform, driverInformation); + } + + private static List getDriverField(final Function fieldSupplier, + final List driverInformation) { + return Collections.unmodifiableList(driverInformation.stream() + .map(fieldSupplier) + .filter(Objects::nonNull) + .collect(Collectors.toList())); + } + + private DriverInformationHelper() { + } +} diff --git a/driver-core/src/main/com/mongodb/internal/client/model/AbstractConstructibleBson.java b/driver-core/src/main/com/mongodb/internal/client/model/AbstractConstructibleBson.java new file mode 100644 index 00000000000..278f7e273be --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/client/model/AbstractConstructibleBson.java @@ -0,0 +1,165 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.internal.client.model; + +import com.mongodb.annotations.Immutable; +import org.bson.BsonDocument; +import org.bson.Document; +import org.bson.codecs.configuration.CodecRegistry; +import org.bson.conversions.Bson; + +import java.util.LinkedHashMap; +import java.util.Map; +import java.util.Objects; +import java.util.Optional; +import java.util.function.Consumer; + +import static org.bson.internal.BsonUtil.mutableDeepCopy; + +/** + * A {@link Bson} that allows constructing new instances via {@link #newAppended(String, Object)} instead of mutating {@code this}. + * See {@link #AbstractConstructibleBson(Bson, Document)} for the note on mutability. + * + *

This class is not part of the public API and may be removed or changed at any time

+ * + * @param A type introduced by the concrete class that extends this abstract class. + * @see AbstractConstructibleBsonElement + */ +public abstract class AbstractConstructibleBson> implements Bson, ToMap { + private static final Document EMPTY_DOC = new Document(); + /** + * An {@linkplain Immutable immutable} {@link BsonDocument#isEmpty() empty} instance. + */ + public static final AbstractConstructibleBson EMPTY_IMMUTABLE = AbstractConstructibleBson.of(EMPTY_DOC); + + private final Bson base; + private final Document appended; + + /** + * This constructor is equivalent to {@link #AbstractConstructibleBson(Bson, Document)} with + * {@link #EMPTY_IMMUTABLE} being the second argument. + */ + protected AbstractConstructibleBson(final Bson base) { + this(base, EMPTY_DOC); + } + + /** + * If both {@code base} and {@code appended} are {@link BsonDocument#isEmpty() empty}, + * then the created instance is {@linkplain Immutable immutable} provided that these constructor arguments are not mutated. + * Otherwise, the created instance while being unmodifiable, + * may be mutated by mutating the result of {@link #toBsonDocument(Class, CodecRegistry)}. + */ + protected AbstractConstructibleBson(final Bson base, final Document appended) { + this.base = base; + this.appended = appended; + } + + protected abstract S newSelf(Bson base, Document appended); + + @Override + public final BsonDocument toBsonDocument(final Class documentClass, final CodecRegistry codecRegistry) { + BsonDocument baseDoc = base.toBsonDocument(documentClass, codecRegistry); + return baseDoc.isEmpty() && appended.isEmpty() + // eliminate the possibility of exposing internal state when it is empty to enforce immutability of empty objects + ? new BsonDocument() + : appended.isEmpty() ? baseDoc : newMerged(baseDoc, appended.toBsonDocument(documentClass, codecRegistry)); + } + + /** + * {@linkplain Document#append(String, Object) Appends} the specified mapping via {@link #newMutated(Consumer)}. + * + * @return A new instance. + */ + protected final S newAppended(final String name, final Object value) { + return newMutated(doc -> doc.append(name, value)); + } + + /** + * Creates a {@link Document#Document(java.util.Map) shallow copy} of {@code this} and mutates it via the specified {@code mutator}. + * + * @return A new instance. + */ + protected final S newMutated(final Consumer mutator) { + Document newAppended = new Document(appended); + mutator.accept(newAppended); + return newSelf(base, newAppended); + } + + @Override + public Optional> tryToMap() { + return ToMap.tryToMap(base) + .map(baseMap -> { + Map result = new LinkedHashMap<>(baseMap); + result.putAll(appended); + return result; + }); + } + + public static AbstractConstructibleBson of(final Bson doc) { + return doc instanceof AbstractConstructibleBson + // prevent double wrapping + ? (AbstractConstructibleBson) doc + : new ConstructibleBson(doc); + } + + @Override + public final boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + AbstractConstructibleBson that = (AbstractConstructibleBson) o; + return Objects.equals(base, that.base) && Objects.equals(appended, that.appended); + } + + @Override + public final int hashCode() { + return Objects.hash(base, appended); + } + + @Override + public String toString() { + return tryToMap() + .map(Document::new) + .map(Document::toString) + .orElseGet(() -> "ConstructibleBson{base=" + base + + ", appended=" + appended + + '}'); + } + + static BsonDocument newMerged(final BsonDocument base, final BsonDocument appended) { + BsonDocument result = mutableDeepCopy(base); + result.putAll(appended); + return result; + } + + private static final class ConstructibleBson extends AbstractConstructibleBson { + private ConstructibleBson(final Bson base) { + super(base); + } + + private ConstructibleBson(final Bson base, final Document appended) { + super(base, appended); + } + + @Override + protected ConstructibleBson newSelf(final Bson base, final Document appended) { + return new ConstructibleBson(base, appended); + } + } +} diff --git a/driver-core/src/main/com/mongodb/internal/client/model/AbstractConstructibleBsonElement.java b/driver-core/src/main/com/mongodb/internal/client/model/AbstractConstructibleBsonElement.java new file mode 100644 index 00000000000..b6ef3391430 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/client/model/AbstractConstructibleBsonElement.java @@ -0,0 +1,176 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.internal.client.model; + +import org.bson.BsonDocument; +import org.bson.BsonValue; +import org.bson.Document; +import org.bson.codecs.configuration.CodecRegistry; +import org.bson.conversions.Bson; + +import java.util.LinkedHashMap; +import java.util.Map; +import java.util.Objects; +import java.util.Optional; +import java.util.function.Consumer; + +import static com.mongodb.internal.client.model.AbstractConstructibleBson.EMPTY_IMMUTABLE; +import static com.mongodb.internal.client.model.AbstractConstructibleBson.newMerged; +import static java.lang.String.format; + +/** + * A {@link Bson} that contains exactly one name/value pair + * and allows constructing new instances via {@link #newWithAppendedValue(String, Object)} instead of mutating {@code this}. + * The value must itself be a {@code Bson}. + * + *

This class is not part of the public API and may be removed or changed at any time

+ * + * @param A type introduced by the concrete class that extends this abstract class. + * @see AbstractConstructibleBson + */ +public abstract class AbstractConstructibleBsonElement> implements Bson, ToMap { + private final Bson baseElement; + private final AbstractConstructibleBson appendedElementValue; + + protected AbstractConstructibleBsonElement(final String name) { + this(name, EMPTY_IMMUTABLE); + } + + protected AbstractConstructibleBsonElement(final String name, final Bson value) { + this(new Document(name, value)); + } + + protected AbstractConstructibleBsonElement(final Bson baseElement) { + this(baseElement, EMPTY_IMMUTABLE); + } + + protected AbstractConstructibleBsonElement(final Bson baseElement, final Bson appendedElementValue) { + this.baseElement = baseElement; + this.appendedElementValue = AbstractConstructibleBson.of(appendedElementValue); + } + + protected abstract S newSelf(Bson baseElement, Bson appendedElementValue); + + /** + * {@linkplain Document#append(String, Object) Appends} the specified mapping to the value via {@link #newWithMutatedValue(Consumer)}. + * + * @return A new instance. + */ + protected final S newWithAppendedValue(final String name, final Object value) { + return newWithMutatedValue(doc -> doc.append(name, value)); + } + + /** + * Creates a copy of {@code this} with a value that is + * a {@linkplain AbstractConstructibleBson#newMutated(Consumer) shallow copy} of this value mutated via the specified {@code mutator}. + * + * @return A new instance. + * @see AbstractConstructibleBson#newMutated(Consumer) + */ + protected final S newWithMutatedValue(final Consumer mutator) { + return newSelf(baseElement, appendedElementValue.newMutated(mutator)); + } + + @Override + public final BsonDocument toBsonDocument(final Class documentClass, final CodecRegistry codecRegistry) { + BsonDocument baseElementDoc = baseElement.toBsonDocument(documentClass, codecRegistry); + if (baseElementDoc.size() != 1) { + throw new IllegalStateException(format("baseElement must contain exactly one element, but contains %s", baseElementDoc.size())); + } + Map.Entry baseElementEntry = baseElementDoc.entrySet().iterator().next(); + String baseElementName = baseElementEntry.getKey(); + BsonValue baseElementValue = baseElementEntry.getValue(); + if (!baseElementValue.isDocument()) { + throw new IllegalStateException(format("baseElement value must be a document, but it is %s", baseElementValue.getBsonType())); + } + BsonDocument baseElementValueDoc = baseElementValue.asDocument(); + BsonDocument appendedElementValueDoc = appendedElementValue.toBsonDocument(documentClass, codecRegistry); + return appendedElementValueDoc.isEmpty() + ? baseElementDoc + : new BsonDocument(baseElementName, newMerged(baseElementValueDoc, appendedElementValueDoc)); + } + + @Override + public Optional> tryToMap() { + Map baseElementMap = ToMap.tryToMap(baseElement).orElse(null); + Map appendedElementValueMap = baseElementMap == null ? null : appendedElementValue.tryToMap().orElse(null); + if (baseElementMap != null && appendedElementValueMap != null) { + if (baseElementMap.size() != 1) { + throw new IllegalStateException(format("baseElement must contain exactly one element, but contains %s", baseElementMap.size())); + } + Map.Entry baseEntry = baseElementMap.entrySet().iterator().next(); + String elementName = baseEntry.getKey(); + return ToMap.tryToMap(baseEntry.getValue()) + .map(elementValueMap -> { + Map result = new LinkedHashMap<>(elementValueMap); + result.putAll(appendedElementValueMap); + return result; + }) + .map(mergedElementValueMap -> { + Map result = new LinkedHashMap<>(); + result.put(elementName, new Document(mergedElementValueMap)); + return result; + }); + } else { + return Optional.empty(); + } + } + + public static AbstractConstructibleBsonElement of(final Bson baseElement) { + return baseElement instanceof AbstractConstructibleBsonElement + // prevent double wrapping + ? (AbstractConstructibleBsonElement) baseElement + : new ConstructibleBsonElement(baseElement, EMPTY_IMMUTABLE); + } + + @Override + public final boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + AbstractConstructibleBsonElement that = (AbstractConstructibleBsonElement) o; + return baseElement.equals(that.baseElement) && appendedElementValue.equals(that.appendedElementValue); + } + + @Override + public final int hashCode() { + return Objects.hash(baseElement, appendedElementValue); + } + + @Override + public String toString() { + return tryToMap() + .map(Document::new) + .map(Document::toString) + .orElseGet(() -> "ConstructibleBsonElement{baseElement=" + baseElement + + ", appendedElementValue=" + appendedElementValue + + '}'); + } + + private static final class ConstructibleBsonElement extends AbstractConstructibleBsonElement { + private ConstructibleBsonElement(final Bson baseElement, final Bson appendedElementValue) { + super(baseElement, appendedElementValue); + } + + @Override + protected ConstructibleBsonElement newSelf(final Bson baseElement, final Bson appendedElementValue) { + return new ConstructibleBsonElement(baseElement, appendedElementValue); + } + } +} diff --git a/driver-core/src/main/com/mongodb/internal/client/model/AggregationLevel.java b/driver-core/src/main/com/mongodb/internal/client/model/AggregationLevel.java new file mode 100644 index 00000000000..77ef862ea29 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/client/model/AggregationLevel.java @@ -0,0 +1,35 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.client.model; + +/** + * The aggregation level where an aggregation command should take place. + * + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public enum AggregationLevel { + + /** + * Database level aggregation + */ + DATABASE, + + /** + * Collection level aggregation + */ + COLLECTION +} diff --git a/driver-core/src/main/com/mongodb/internal/client/model/CountStrategy.java b/driver-core/src/main/com/mongodb/internal/client/model/CountStrategy.java new file mode 100644 index 00000000000..21961239510 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/client/model/CountStrategy.java @@ -0,0 +1,33 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.client.model; + +/** + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public enum CountStrategy { + + /** + * Use the count command + */ + COMMAND, + + /** + * Use the Aggregate command + */ + AGGREGATE +} diff --git a/driver-core/src/main/com/mongodb/internal/client/model/FindOptions.java b/driver-core/src/main/com/mongodb/internal/client/model/FindOptions.java new file mode 100644 index 00000000000..1c7f3ef9858 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/client/model/FindOptions.java @@ -0,0 +1,605 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.client.model; + +import com.mongodb.CursorType; +import com.mongodb.annotations.Alpha; +import com.mongodb.annotations.Reason; +import com.mongodb.client.cursor.TimeoutMode; +import com.mongodb.client.model.Collation; +import com.mongodb.lang.Nullable; +import org.bson.BsonString; +import org.bson.BsonValue; +import org.bson.conversions.Bson; + +import java.util.concurrent.TimeUnit; + +import static com.mongodb.assertions.Assertions.isTrueArgument; +import static com.mongodb.assertions.Assertions.notNull; + +/** + * The options to apply to a find operation (also commonly referred to as a query). + * + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public final class FindOptions { + private int batchSize; + private int limit; + private Bson projection; + private long maxTimeMS; + private long maxAwaitTimeMS; + private int skip; + private Bson sort; + private CursorType cursorType = CursorType.NonTailable; + private boolean noCursorTimeout; + private boolean partial; + private Collation collation; + private BsonValue comment; + private Bson hint; + private String hintString; + private Bson variables; + private Bson max; + private Bson min; + private boolean returnKey; + private boolean showRecordId; + private Boolean allowDiskUse; + private TimeoutMode timeoutMode; + + /** + * Construct a new instance. + */ + public FindOptions() { + } + + //CHECKSTYLE:OFF + FindOptions( + final int batchSize, final int limit, final Bson projection, final long maxTimeMS, final long maxAwaitTimeMS, final int skip, + final Bson sort, final CursorType cursorType, final boolean noCursorTimeout, final boolean partial, + final Collation collation, final BsonValue comment, final Bson hint, final String hintString, final Bson variables, + final Bson max, final Bson min, final boolean returnKey, final boolean showRecordId, final Boolean allowDiskUse, + final TimeoutMode timeoutMode) { + this.batchSize = batchSize; + this.limit = limit; + this.projection = projection; + this.maxTimeMS = maxTimeMS; + this.maxAwaitTimeMS = maxAwaitTimeMS; + this.skip = skip; + this.sort = sort; + this.cursorType = cursorType; + this.noCursorTimeout = noCursorTimeout; + this.partial = partial; + this.collation = collation; + this.comment = comment; + this.hint = hint; + this.hintString = hintString; + this.variables = variables; + this.max = max; + this.min = min; + this.returnKey = returnKey; + this.showRecordId = showRecordId; + this.allowDiskUse = allowDiskUse; + this.timeoutMode = timeoutMode; + } + //CHECKSTYLE:ON + + public FindOptions withBatchSize(final int batchSize) { + return new FindOptions(batchSize, limit, projection, maxTimeMS, maxAwaitTimeMS, skip, sort, cursorType, noCursorTimeout, + partial, collation, comment, hint, hintString, variables, max, min, returnKey, showRecordId, allowDiskUse, timeoutMode); + } + + /** + * Gets the limit to apply. The default is null. + * + * @return the limit + * @mongodb.driver.manual reference/method/cursor.limit/#cursor.limit Limit + */ + public int getLimit() { + return limit; + } + + /** + * Sets the limit to apply. + * + * @param limit the limit, which may be null + * @return this + * @mongodb.driver.manual reference/method/cursor.limit/#cursor.limit Limit + */ + public FindOptions limit(final int limit) { + this.limit = limit; + return this; + } + + /** + * Gets the number of documents to skip. The default is 0. + * + * @return the number of documents to skip + * @mongodb.driver.manual reference/method/cursor.skip/#cursor.skip Skip + */ + public int getSkip() { + return skip; + } + + /** + * Sets the number of documents to skip. + * + * @param skip the number of documents to skip + * @return this + * @mongodb.driver.manual reference/method/cursor.skip/#cursor.skip Skip + */ + public FindOptions skip(final int skip) { + this.skip = skip; + return this; + } + + /** + * Gets the maximum execution time on the server for this operation. The default is 0, which places no limit on the execution time. + * + * @param timeUnit the time unit to return the result in + * @return the maximum execution time in the given time unit + * @mongodb.driver.manual reference/method/cursor.maxTimeMS/#cursor.maxTimeMS Max Time + */ + public long getMaxTime(final TimeUnit timeUnit) { + notNull("timeUnit", timeUnit); + return timeUnit.convert(maxTimeMS, TimeUnit.MILLISECONDS); + } + + /** + * Sets the maximum execution time on the server for this operation. + * + * @param maxTime the max time + * @param timeUnit the time unit, which may not be null + * @return this + * @mongodb.driver.manual reference/method/cursor.maxTimeMS/#cursor.maxTimeMS Max Time + */ + public FindOptions maxTime(final long maxTime, final TimeUnit timeUnit) { + notNull("timeUnit", timeUnit); + isTrueArgument("maxTime > = 0", maxTime >= 0); + this.maxTimeMS = TimeUnit.MILLISECONDS.convert(maxTime, timeUnit); + return this; + } + + /** + * The maximum amount of time for the server to wait on new documents to satisfy a tailable cursor + * query. This only applies to a TAILABLE_AWAIT cursor. When the cursor is not a TAILABLE_AWAIT cursor, + * this option is ignored. + *

+ * On servers >= 3.2, this option will be specified on the getMore command as "maxTimeMS". The default + * is no value: no "maxTimeMS" is sent to the server with the getMore command. + *

+ * On servers < 3.2, this option is ignored, and indicates that the driver should respect the server's default value + *

+ * A zero value will be ignored. + * + * @param timeUnit the time unit to return the result in + * @return the maximum await execution time in the given time unit + */ + public long getMaxAwaitTime(final TimeUnit timeUnit) { + notNull("timeUnit", timeUnit); + return timeUnit.convert(maxAwaitTimeMS, TimeUnit.MILLISECONDS); + } + + /** + * Sets the maximum await execution time on the server for this operation. + * + * @param maxAwaitTime the max await time. A zero value will be ignored, and indicates that the driver should respect the server's + * default value + * @param timeUnit the time unit, which may not be null + * @return this + */ + public FindOptions maxAwaitTime(final long maxAwaitTime, final TimeUnit timeUnit) { + notNull("timeUnit", timeUnit); + isTrueArgument("maxAwaitTime > = 0", maxAwaitTime >= 0); + this.maxAwaitTimeMS = TimeUnit.MILLISECONDS.convert(maxAwaitTime, timeUnit); + return this; + } + + /** + * Gets the number of documents to return per batch. Default to 0, which indicates that the server chooses an appropriate batch + * size. + * + * @return the batch size + * @mongodb.driver.manual reference/method/cursor.batchSize/#cursor.batchSize Batch Size + */ + public int getBatchSize() { + return batchSize; + } + + /** + * Sets the number of documents to return per batch. + * + * @param batchSize the batch size + * @return this + * @mongodb.driver.manual reference/method/cursor.batchSize/#cursor.batchSize Batch Size + */ + public FindOptions batchSize(final int batchSize) { + this.batchSize = batchSize; + return this; + } + + /** + * Sets the timeoutMode for the cursor. + * + *

+ * Requires the {@code timeout} to be set, either in the {@link com.mongodb.MongoClientSettings}, + * via {@code MongoDatabase} or via {@code MongoCollection} + *

+ *

+ * If the {@code timeout} is set then: + *

    + *
  • For non-tailable cursors, the default value of timeoutMode is {@link TimeoutMode#CURSOR_LIFETIME}
  • + *
  • For tailable cursors, the default value of timeoutMode is {@link TimeoutMode#ITERATION} and its an error + * to configure it as: {@link TimeoutMode#CURSOR_LIFETIME}
  • + *
+ *

+ * @param timeoutMode the timeout mode + * @return this + * @since 5.2 + */ + @Alpha(Reason.CLIENT) + public FindOptions timeoutMode(final TimeoutMode timeoutMode) { + this.timeoutMode = timeoutMode; + return this; + } + + /** + * @see #timeoutMode(TimeoutMode) + * @return timeout mode + */ + @Alpha(Reason.CLIENT) + @Nullable + public TimeoutMode getTimeoutMode() { + return timeoutMode; + } + + /** + * Gets a document describing the fields to return for all matching documents. + * + * @return the project document, which may be null + * @mongodb.driver.manual reference/method/db.collection.find/ Projection + */ + @Nullable + public Bson getProjection() { + return projection; + } + + /** + * Sets a document describing the fields to return for all matching documents. + * + * @param projection the project document, which may be null. + * @return this + * @mongodb.driver.manual reference/method/db.collection.find/ Projection + */ + public FindOptions projection(@Nullable final Bson projection) { + this.projection = projection; + return this; + } + + /** + * Gets the sort criteria to apply to the query. The default is null, which means that the documents will be returned in an undefined + * order. + * + * @return a document describing the sort criteria + * @mongodb.driver.manual reference/method/cursor.sort/ Sort + */ + @Nullable + public Bson getSort() { + return sort; + } + + /** + * Sets the sort criteria to apply to the query. + * + * @param sort the sort criteria, which may be null. + * @return this + * @mongodb.driver.manual reference/method/cursor.sort/ Sort + */ + public FindOptions sort(@Nullable final Bson sort) { + this.sort = sort; + return this; + } + + /** + * The server normally times out idle cursors after an inactivity period (10 minutes) + * to prevent excess memory use. If true, that timeout is disabled. + * + * @return true if cursor timeout is disabled + */ + public boolean isNoCursorTimeout() { + return noCursorTimeout; + } + + /** + * The server normally times out idle cursors after an inactivity period (10 minutes) + * to prevent excess memory use. Set this option to prevent that. + * + * @param noCursorTimeout true if cursor timeout is disabled + * @return this + */ + public FindOptions noCursorTimeout(final boolean noCursorTimeout) { + this.noCursorTimeout = noCursorTimeout; + return this; + } + + /** + * Get partial results from a sharded cluster if one or more shards are unreachable (instead of throwing an error). + * + * @return if partial results for sharded clusters is enabled + */ + public boolean isPartial() { + return partial; + } + + /** + * Get partial results from a sharded cluster if one or more shards are unreachable (instead of throwing an error). + * + * @param partial if partial results for sharded clusters is enabled + * @return this + */ + public FindOptions partial(final boolean partial) { + this.partial = partial; + return this; + } + + /** + * Get the cursor type. + * + * @return the cursor type + */ + public CursorType getCursorType() { + return cursorType; + } + + /** + * Sets the cursor type. + * + * @param cursorType the cursor type + * @return this + */ + public FindOptions cursorType(final CursorType cursorType) { + this.cursorType = notNull("cursorType", cursorType); + return this; + } + + /** + * Returns the collation options + * + * @return the collation options + */ + @Nullable + public Collation getCollation() { + return collation; + } + + /** + * Sets the collation options + * + *

A null value represents the server default.

+ * @param collation the collation options to use + * @return this + */ + public FindOptions collation(@Nullable final Collation collation) { + this.collation = collation; + return this; + } + + /** + * Returns the comment to send with the query. The default is not to include a comment with the query. + * + * @return the comment + */ + @Nullable + public BsonValue getComment() { + return comment; + } + + /** + * Sets the comment to the query. A null value means no comment is set. + * + * @param comment the comment + * @return this + */ + public FindOptions comment(@Nullable final BsonValue comment) { + this.comment = comment; + return this; + } + + /** + * Sets the comment to the query. A null value means no comment is set. + * + * @param comment the comment + * @return this + */ + public FindOptions comment(@Nullable final String comment) { + this.comment = comment != null ? new BsonString(comment) : null; + return this; + } + + /** + * Returns the hint for which index to use. The default is not to set a hint. + * + * @return the hint + */ + @Nullable + public Bson getHint() { + return hint; + } + + /** + * Sets the hint for which index to use. A null value means no hint is set. + * + * @param hint the hint + * @return this + */ + public FindOptions hint(@Nullable final Bson hint) { + this.hint = hint; + return this; + } + + /** + * Gets the hint string to apply. + * + * @return the hint string, which should be the name of an existing index + */ + @Nullable + public String getHintString() { + return hintString; + } + + /** + * Sets the hint to apply. + * + * @param hint the name of the index which should be used for the operation + * @return this + */ + public FindOptions hintString(@Nullable final String hint) { + this.hintString = hint; + return this; + } + + /** + * Add top-level variables to the operation + * + * @return the top level variables if set or null. + */ + @Nullable + public Bson getLet() { + return variables; + } + + /** + * Add top-level variables to the operation + * + *

Allows for improved command readability by separating the variables from the query text.

+ * + * @param variables for find operation or null + * @return this + */ + public FindOptions let(@Nullable final Bson variables) { + this.variables = variables; + return this; + } + + /** + * Returns the exclusive upper bound for a specific index. By default there is no max bound. + * + * @return the max + */ + @Nullable + public Bson getMax() { + return max; + } + + /** + * Sets the exclusive upper bound for a specific index. A null value means no max is set. + * + * @param max the max + * @return this + */ + public FindOptions max(@Nullable final Bson max) { + this.max = max; + return this; + } + + /** + * Returns the minimum inclusive lower bound for a specific index. By default there is no min bound. + * + * @return the min + */ + @Nullable + public Bson getMin() { + return min; + } + + /** + * Sets the minimum inclusive lower bound for a specific index. A null value means no max is set. + * + * @param min the min + * @return this + */ + public FindOptions min(@Nullable final Bson min) { + this.min = min; + return this; + } + + /** + * Returns the returnKey. If true the find operation will return only the index keys in the resulting documents. + *

+ * Default value is false. If returnKey is true and the find command does not use an index, the returned documents will be empty. + * + * @return the returnKey + */ + public boolean isReturnKey() { + return returnKey; + } + + /** + * Sets the returnKey. If true the find operation will return only the index keys in the resulting documents. + * + * @param returnKey the returnKey + * @return this + */ + public FindOptions returnKey(final boolean returnKey) { + this.returnKey = returnKey; + return this; + } + + /** + * Returns the showRecordId. + *

+ * Determines whether to return the record identifier for each document. If true, adds a field $recordId to the returned documents. + * The default is false. + * + * @return the showRecordId + */ + public boolean isShowRecordId() { + return showRecordId; + } + + /** + * Sets the showRecordId. Set to true to add a field {@code $recordId} to the returned documents. + * + * @param showRecordId the showRecordId + * @return this += */ + public FindOptions showRecordId(final boolean showRecordId) { + this.showRecordId = showRecordId; + return this; + } + + /** + * Returns the allowDiskUse value + * + * @return the allowDiskUse value + */ + public Boolean isAllowDiskUse() { + return allowDiskUse; + } + + /** + * Enables writing to temporary files on the server. When set to true, the server + * can write temporary data to disk while executing the find operation. + * + *

This option is sent only if the caller explicitly sets it to true.

+ * + * @param allowDiskUse the allowDiskUse + * @return this + */ + public FindOptions allowDiskUse(@Nullable final Boolean allowDiskUse) { + this.allowDiskUse = allowDiskUse; + return this; + } +} diff --git a/driver-core/src/main/com/mongodb/internal/client/model/ToMap.java b/driver-core/src/main/com/mongodb/internal/client/model/ToMap.java new file mode 100644 index 00000000000..3c03001d928 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/client/model/ToMap.java @@ -0,0 +1,38 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.internal.client.model; + +import org.bson.BsonDocument; +import org.bson.Document; + +import java.util.Map; +import java.util.Optional; + +interface ToMap { + Optional> tryToMap(); + + static Optional> tryToMap(final Object o) { + if (o instanceof ToMap) { + return ((ToMap) o).tryToMap(); + } else if (o instanceof Document || o instanceof BsonDocument) { + @SuppressWarnings("unchecked") + Map map = (Map) o; + return Optional.of(map); + } else { + return Optional.empty(); + } + } +} diff --git a/driver-core/src/main/com/mongodb/internal/client/model/Util.java b/driver-core/src/main/com/mongodb/internal/client/model/Util.java new file mode 100644 index 00000000000..55f6acebc69 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/client/model/Util.java @@ -0,0 +1,80 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.internal.client.model; + +import com.mongodb.Function; +import com.mongodb.client.model.search.FieldSearchPath; +import com.mongodb.client.model.search.SearchPath; +import org.bson.BsonArray; +import org.bson.BsonString; +import org.bson.BsonValue; + +import java.util.Iterator; + +import static com.mongodb.assertions.Assertions.fail; + +/** + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public final class Util { + public static final String SEARCH_PATH_VALUE_KEY = "value"; + + /** + * If {@code nonEmptyPaths} has exactly one element, then returns the result of {@link SearchPath#toBsonValue()}, + * otherwise returns a {@link BsonArray} of such results. + * + * @param nonEmptyPaths One or more {@link SearchPath} to convert. + * @param valueOnly If {@code true}, then {@link FieldSearchPath#toValue()} is used when possible; + * if {@code false}, then {@link SearchPath#toBsonValue()} is used. + * @return A single {@link BsonValue} representing the specified paths. + */ + public static BsonValue combineToBsonValue(final Iterator nonEmptyPaths, final boolean valueOnly) { + Function toBsonValueFunc = valueOnly + ? path -> { + if (path instanceof FieldSearchPath) { + return new BsonString(((FieldSearchPath) path).toValue()); + } else { + return path.toBsonValue(); + } + } + : SearchPath::toBsonValue; + BsonValue firstPath = toBsonValueFunc.apply(nonEmptyPaths.next()); + if (nonEmptyPaths.hasNext()) { + BsonArray bsonArray = new BsonArray(); + bsonArray.add(firstPath); + while (nonEmptyPaths.hasNext()) { + bsonArray.add(toBsonValueFunc.apply(nonEmptyPaths.next())); + } + return bsonArray; + } else { + return firstPath; + } + } + + public static boolean sizeAtLeast(final Iterable iterable, final int minInclusive) { + Iterator iter = iterable.iterator(); + int size = 0; + while (size < minInclusive && iter.hasNext()) { + iter.next(); + size++; + } + return size >= minInclusive; + } + + private Util() { + throw fail(); + } +} diff --git a/driver-core/src/main/com/mongodb/internal/client/model/bulk/AbstractClientDeleteModel.java b/driver-core/src/main/com/mongodb/internal/client/model/bulk/AbstractClientDeleteModel.java new file mode 100644 index 00000000000..f7cc0dd4e66 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/client/model/bulk/AbstractClientDeleteModel.java @@ -0,0 +1,39 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.internal.client.model.bulk; + +import org.bson.conversions.Bson; + +/** + * This class is not part of the public API and may be removed or changed at any time. + */ +public abstract class AbstractClientDeleteModel implements ClientWriteModel { + private final Bson filter; + private final O options; + + AbstractClientDeleteModel(final Bson filter, final O options) { + this.filter = filter; + this.options = options; + } + + public final Bson getFilter() { + return filter; + } + + public final O getOptions() { + return options; + } +} diff --git a/driver-core/src/main/com/mongodb/internal/client/model/bulk/AbstractClientDeleteOptions.java b/driver-core/src/main/com/mongodb/internal/client/model/bulk/AbstractClientDeleteOptions.java new file mode 100644 index 00000000000..fdacf540073 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/client/model/bulk/AbstractClientDeleteOptions.java @@ -0,0 +1,82 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.internal.client.model.bulk; + +import com.mongodb.client.model.Collation; +import com.mongodb.client.model.bulk.ClientDeleteManyOptions; +import com.mongodb.client.model.bulk.ClientDeleteOneOptions; +import com.mongodb.lang.Nullable; +import org.bson.conversions.Bson; + +import java.util.Optional; + +import static java.util.Optional.ofNullable; + +/** + * This class is not part of the public API and may be removed or changed at any time. + */ +public abstract class AbstractClientDeleteOptions { + @Nullable + private Collation collation; + @Nullable + private Bson hint; + @Nullable + private String hintString; + + AbstractClientDeleteOptions() { + } + + public AbstractClientDeleteOptions collation(@Nullable final Collation collation) { + this.collation = collation; + return this; + } + + /** + * @see ClientDeleteOneOptions#collation(Collation) + * @see ClientDeleteManyOptions#collation(Collation) + */ + public Optional getCollation() { + return ofNullable(collation); + } + + public AbstractClientDeleteOptions hint(@Nullable final Bson hint) { + this.hint = hint; + this.hintString = null; + return this; + } + + /** + * @see ClientDeleteOneOptions#hint(Bson) + * @see ClientDeleteManyOptions#hint(Bson) + */ + public Optional getHint() { + return ofNullable(hint); + } + + public AbstractClientDeleteOptions hintString(@Nullable final String hintString) { + this.hintString = hintString; + this.hint = null; + return this; + } + + /** + * @see ClientDeleteOneOptions#hintString(String) + * @see ClientDeleteManyOptions#hintString(String) + */ + public Optional getHintString() { + return ofNullable(hintString); + } +} diff --git a/driver-core/src/main/com/mongodb/internal/client/model/bulk/AbstractClientNamespacedWriteModel.java b/driver-core/src/main/com/mongodb/internal/client/model/bulk/AbstractClientNamespacedWriteModel.java new file mode 100644 index 00000000000..25daa0bea15 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/client/model/bulk/AbstractClientNamespacedWriteModel.java @@ -0,0 +1,48 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.client.model.bulk; + +import com.mongodb.MongoNamespace; + +/** + * This class is not part of the public API and may be removed or changed at any time. + */ +public abstract class AbstractClientNamespacedWriteModel { + private final MongoNamespace namespace; + private final ClientWriteModel model; + + AbstractClientNamespacedWriteModel(final MongoNamespace namespace, final ClientWriteModel model) { + this.namespace = namespace; + this.model = model; + } + + public final MongoNamespace getNamespace() { + return namespace; + } + + public final ClientWriteModel getModel() { + return model; + } + + @Override + public final String toString() { + return "ClientNamespacedWriteModel{" + + "namespace=" + namespace + + ", model=" + model + + '}'; + } +} diff --git a/driver-core/src/main/com/mongodb/internal/client/model/bulk/AbstractClientUpdateModel.java b/driver-core/src/main/com/mongodb/internal/client/model/bulk/AbstractClientUpdateModel.java new file mode 100644 index 00000000000..c55ddfc2def --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/client/model/bulk/AbstractClientUpdateModel.java @@ -0,0 +1,65 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.internal.client.model.bulk; + +import com.mongodb.lang.Nullable; +import org.bson.conversions.Bson; + +import java.util.Optional; + +import static com.mongodb.assertions.Assertions.assertTrue; +import static java.util.Optional.ofNullable; + +/** + * This class is not part of the public API and may be removed or changed at any time. + */ +public abstract class AbstractClientUpdateModel implements ClientWriteModel { + private final Bson filter; + @Nullable + private final Bson update; + @Nullable + private final Iterable updatePipeline; + private final O options; + + AbstractClientUpdateModel( + final Bson filter, + @Nullable + final Bson update, + @Nullable final Iterable updatePipeline, + final O options) { + this.filter = filter; + assertTrue(update == null ^ updatePipeline == null); + this.update = update; + this.updatePipeline = updatePipeline; + this.options = options; + } + + public final Bson getFilter() { + return filter; + } + + public final Optional getUpdate() { + return ofNullable(update); + } + + public final Optional> getUpdatePipeline() { + return ofNullable(updatePipeline); + } + + public final O getOptions() { + return options; + } +} diff --git a/driver-core/src/main/com/mongodb/internal/client/model/bulk/AbstractClientUpdateOptions.java b/driver-core/src/main/com/mongodb/internal/client/model/bulk/AbstractClientUpdateOptions.java new file mode 100644 index 00000000000..508330bd8b7 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/client/model/bulk/AbstractClientUpdateOptions.java @@ -0,0 +1,112 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.internal.client.model.bulk; + +import com.mongodb.client.model.Collation; +import com.mongodb.client.model.bulk.ClientUpdateManyOptions; +import com.mongodb.client.model.bulk.ClientUpdateOneOptions; +import com.mongodb.lang.Nullable; +import org.bson.conversions.Bson; + +import java.util.Optional; + +import static java.util.Optional.ofNullable; + +/** + * This class is not part of the public API and may be removed or changed at any time. + */ +public abstract class AbstractClientUpdateOptions { + @Nullable + private Iterable arrayFilters; + @Nullable + private Collation collation; + @Nullable + private Bson hint; + @Nullable + private String hintString; + @Nullable + private Boolean upsert; + + AbstractClientUpdateOptions() { + } + + public AbstractClientUpdateOptions arrayFilters(@Nullable final Iterable arrayFilters) { + this.arrayFilters = arrayFilters; + return this; + } + + /** + * @see ClientUpdateOneOptions#arrayFilters(Iterable) + * @see ClientUpdateManyOptions#arrayFilters(Iterable) + */ + public Optional> getArrayFilters() { + return ofNullable(arrayFilters); + } + + public AbstractClientUpdateOptions collation(@Nullable final Collation collation) { + this.collation = collation; + return this; + } + + /** + * @see ClientUpdateOneOptions#collation(Collation) + * @see ClientUpdateManyOptions#collation(Collation) + */ + public Optional getCollation() { + return ofNullable(collation); + } + + public AbstractClientUpdateOptions hint(@Nullable final Bson hint) { + this.hint = hint; + this.hintString = null; + return this; + } + + /** + * @see ClientUpdateOneOptions#hint(Bson) + * @see ClientUpdateManyOptions#hint(Bson) + */ + public Optional getHint() { + return ofNullable(hint); + } + + public AbstractClientUpdateOptions hintString(@Nullable final String hintString) { + this.hintString = hintString; + this.hint = null; + return this; + } + + /** + * @see ClientUpdateOneOptions#hintString(String) + * @see ClientUpdateManyOptions#hintString(String) + */ + public Optional getHintString() { + return ofNullable(hintString); + } + + public AbstractClientUpdateOptions upsert(@Nullable final Boolean upsert) { + this.upsert = upsert; + return this; + } + + /** + * @see ClientUpdateOneOptions#upsert(Boolean) + * @see ClientUpdateManyOptions#upsert(Boolean) + */ + public Optional isUpsert() { + return ofNullable(upsert); + } +} diff --git a/driver-core/src/main/com/mongodb/internal/client/model/bulk/AcknowledgedSummaryClientBulkWriteResult.java b/driver-core/src/main/com/mongodb/internal/client/model/bulk/AcknowledgedSummaryClientBulkWriteResult.java new file mode 100644 index 00000000000..fb088c662ae --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/client/model/bulk/AcknowledgedSummaryClientBulkWriteResult.java @@ -0,0 +1,114 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.internal.client.model.bulk; + +import com.mongodb.client.model.bulk.ClientBulkWriteResult; + +import java.util.Objects; +import java.util.Optional; + +import static java.util.Optional.empty; + +/** + * This class is not part of the public API and may be removed or changed at any time. + */ +public final class AcknowledgedSummaryClientBulkWriteResult implements ClientBulkWriteResult { + private final long insertedCount; + private final long upsertedCount; + private final long matchedCount; + private final long modifiedCount; + private final long deletedCount; + + public AcknowledgedSummaryClientBulkWriteResult( + final long insertedCount, + final long upsertedCount, + final long matchedCount, + final long modifiedCount, + final long deletedCount) { + this.insertedCount = insertedCount; + this.upsertedCount = upsertedCount; + this.matchedCount = matchedCount; + this.modifiedCount = modifiedCount; + this.deletedCount = deletedCount; + } + + @Override + public boolean isAcknowledged() { + return true; + } + + @Override + public long getInsertedCount() { + return insertedCount; + } + + @Override + public long getUpsertedCount() { + return upsertedCount; + } + + @Override + public long getMatchedCount() { + return matchedCount; + } + + @Override + public long getModifiedCount() { + return modifiedCount; + } + + @Override + public long getDeletedCount() { + return deletedCount; + } + + @Override + public Optional getVerboseResults() { + return empty(); + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + final AcknowledgedSummaryClientBulkWriteResult that = (AcknowledgedSummaryClientBulkWriteResult) o; + return insertedCount == that.insertedCount + && upsertedCount == that.upsertedCount + && matchedCount == that.matchedCount + && modifiedCount == that.modifiedCount + && deletedCount == that.deletedCount; + } + + @Override + public int hashCode() { + return Objects.hash(insertedCount, upsertedCount, matchedCount, modifiedCount, deletedCount); + } + + @Override + public String toString() { + return "AcknowledgedSummaryClientBulkWriteResult{" + + "insertedCount=" + insertedCount + + ", upsertedCount=" + upsertedCount + + ", matchedCount=" + matchedCount + + ", modifiedCount=" + modifiedCount + + ", deletedCount=" + deletedCount + + '}'; + } +} diff --git a/driver-core/src/main/com/mongodb/internal/client/model/bulk/AcknowledgedVerboseClientBulkWriteResult.java b/driver-core/src/main/com/mongodb/internal/client/model/bulk/AcknowledgedVerboseClientBulkWriteResult.java new file mode 100644 index 00000000000..14e9b016d07 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/client/model/bulk/AcknowledgedVerboseClientBulkWriteResult.java @@ -0,0 +1,170 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.internal.client.model.bulk; + +import com.mongodb.client.model.bulk.ClientBulkWriteResult; +import com.mongodb.client.model.bulk.ClientDeleteResult; +import com.mongodb.client.model.bulk.ClientInsertOneResult; +import com.mongodb.client.model.bulk.ClientUpdateResult; + +import java.util.Map; +import java.util.Objects; +import java.util.Optional; + +import static java.util.Optional.of; + +/** + * This class is not part of the public API and may be removed or changed at any time. + */ +public final class AcknowledgedVerboseClientBulkWriteResult implements ClientBulkWriteResult { + private final AcknowledgedSummaryClientBulkWriteResult summaryResults; + private final AcknowledgedVerboseClientBulkWriteResult.VerboseResults verboseResults; + + public AcknowledgedVerboseClientBulkWriteResult( + final AcknowledgedSummaryClientBulkWriteResult summaryResults, + final Map insertResults, + final Map updateResults, + final Map deleteResults) { + this.summaryResults = summaryResults; + this.verboseResults = new AcknowledgedVerboseClientBulkWriteResult.VerboseResults(insertResults, updateResults, deleteResults); + } + + @Override + public boolean isAcknowledged() { + return true; + } + + @Override + public long getInsertedCount() { + return summaryResults.getInsertedCount(); + } + + @Override + public long getUpsertedCount() { + return summaryResults.getUpsertedCount(); + } + + @Override + public long getMatchedCount() { + return summaryResults.getMatchedCount(); + } + + @Override + public long getModifiedCount() { + return summaryResults.getModifiedCount(); + } + + @Override + public long getDeletedCount() { + return summaryResults.getDeletedCount(); + } + + @Override + public Optional getVerboseResults() { + return of(verboseResults); + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + final AcknowledgedVerboseClientBulkWriteResult that = (AcknowledgedVerboseClientBulkWriteResult) o; + return Objects.equals(summaryResults, that.summaryResults) + && Objects.equals(verboseResults, that.verboseResults); + } + + @Override + public int hashCode() { + return Objects.hash(summaryResults, verboseResults); + } + + @Override + public String toString() { + return "AcknowledgedVerboseClientBulkWriteResult{" + + "insertedCount=" + summaryResults.getInsertedCount() + + ", upsertedCount=" + summaryResults.getUpsertedCount() + + ", matchedCount=" + summaryResults.getMatchedCount() + + ", modifiedCount=" + summaryResults.getModifiedCount() + + ", deletedCount=" + summaryResults.getDeletedCount() + + ", insertResults=" + verboseResults.insertResults + + ", updateResults=" + verboseResults.updateResults + + ", deleteResults=" + verboseResults.deleteResults + + '}'; + } + + private static final class VerboseResults implements ClientBulkWriteResult.VerboseResults { + private final Map insertResults; + private final Map updateResults; + private final Map deleteResults; + + VerboseResults( + final Map insertResults, + final Map updateResults, + final Map deleteResults) { + this.insertResults = insertResults; + this.updateResults = updateResults; + this.deleteResults = deleteResults; + } + + @Override + public Map getInsertResults() { + return insertResults; + } + + @Override + public Map getUpdateResults() { + return updateResults; + } + + @Override + public Map getDeleteResults() { + return deleteResults; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + final AcknowledgedVerboseClientBulkWriteResult.VerboseResults verbose = + (AcknowledgedVerboseClientBulkWriteResult.VerboseResults) o; + return Objects.equals(insertResults, verbose.insertResults) + && Objects.equals(updateResults, verbose.updateResults) + && Objects.equals(deleteResults, verbose.deleteResults); + } + + @Override + public int hashCode() { + return Objects.hash(insertResults, updateResults, deleteResults); + } + + @Override + public String toString() { + return "AcknowledgedVerboseClientBulkWriteResult.VerboseResults{" + + "insertResults=" + insertResults + + ", updateResults=" + updateResults + + ", deleteResults=" + deleteResults + + '}'; + } + } +} diff --git a/driver-core/src/main/com/mongodb/internal/client/model/bulk/ClientWriteModel.java b/driver-core/src/main/com/mongodb/internal/client/model/bulk/ClientWriteModel.java new file mode 100644 index 00000000000..56d431ec0e8 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/client/model/bulk/ClientWriteModel.java @@ -0,0 +1,24 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.internal.client.model.bulk; + +/** + * An individual write operation to be executed as part of a client-level bulk write operation. + *

+ * This class is not part of the public API and may be removed or changed at any time.

+ */ +public interface ClientWriteModel { +} diff --git a/driver-core/src/main/com/mongodb/internal/client/model/bulk/ConcreteClientBulkWriteOptions.java b/driver-core/src/main/com/mongodb/internal/client/model/bulk/ConcreteClientBulkWriteOptions.java new file mode 100644 index 00000000000..9599e1750bf --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/client/model/bulk/ConcreteClientBulkWriteOptions.java @@ -0,0 +1,123 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.internal.client.model.bulk; + +import com.mongodb.client.model.bulk.ClientBulkWriteOptions; +import com.mongodb.lang.Nullable; +import org.bson.BsonValue; +import org.bson.conversions.Bson; + +import java.util.Optional; + +import static java.util.Optional.ofNullable; + +/** + * This class is not part of the public API and may be removed or changed at any time. + */ +public final class ConcreteClientBulkWriteOptions implements ClientBulkWriteOptions { + private static final Boolean CLIENT_DEFAULT_ORDERED = true; + private static final Boolean CLIENT_DEFAULT_VERBOSE_RESULTS = false; + + @Nullable + private Boolean ordered; + @Nullable + private Boolean bypassDocumentValidation; + @Nullable + private Bson let; + @Nullable + private BsonValue comment; + @Nullable + private Boolean verboseResults; + + public ConcreteClientBulkWriteOptions() { + } + + @Override + public ClientBulkWriteOptions ordered(@Nullable final Boolean ordered) { + this.ordered = ordered; + return this; + } + + /** + * @see #ordered(Boolean) + */ + public boolean isOrdered() { + return ordered == null ? CLIENT_DEFAULT_ORDERED : ordered; + } + + @Override + public ClientBulkWriteOptions bypassDocumentValidation(@Nullable final Boolean bypassDocumentValidation) { + this.bypassDocumentValidation = bypassDocumentValidation; + return this; + } + + /** + * @see #bypassDocumentValidation(Boolean) + */ + public Optional isBypassDocumentValidation() { + return ofNullable(bypassDocumentValidation); + } + + @Override + public ClientBulkWriteOptions let(@Nullable final Bson let) { + this.let = let; + return this; + } + + /** + * @see #let(Bson) + */ + public Optional getLet() { + return ofNullable(let); + } + + @Override + public ClientBulkWriteOptions comment(@Nullable final BsonValue comment) { + this.comment = comment; + return this; + } + + /** + * @see #comment(BsonValue) + */ + public Optional getComment() { + return ofNullable(comment); + } + + @Override + public ClientBulkWriteOptions verboseResults(@Nullable final Boolean verboseResults) { + this.verboseResults = verboseResults; + return this; + } + + /** + * @see #verboseResults(Boolean) + */ + public boolean isVerboseResults() { + return verboseResults == null ? CLIENT_DEFAULT_VERBOSE_RESULTS : verboseResults; + } + + @Override + public String toString() { + return "ClientBulkWriteOptions{" + + "ordered=" + ordered + + ", bypassDocumentValidation=" + bypassDocumentValidation + + ", let=" + let + + ", comment=" + comment + + ", verboseResults=" + verboseResults + + '}'; + } +} diff --git a/driver-core/src/main/com/mongodb/internal/client/model/bulk/ConcreteClientDeleteManyModel.java b/driver-core/src/main/com/mongodb/internal/client/model/bulk/ConcreteClientDeleteManyModel.java new file mode 100644 index 00000000000..7db1a47d053 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/client/model/bulk/ConcreteClientDeleteManyModel.java @@ -0,0 +1,37 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.internal.client.model.bulk; + +import com.mongodb.client.model.bulk.ClientDeleteManyOptions; +import com.mongodb.lang.Nullable; +import org.bson.conversions.Bson; + +/** + * This class is not part of the public API and may be removed or changed at any time. + */ +public final class ConcreteClientDeleteManyModel extends AbstractClientDeleteModel implements ClientWriteModel { + public ConcreteClientDeleteManyModel(final Bson filter, @Nullable final ClientDeleteManyOptions options) { + super(filter, options == null ? ConcreteClientDeleteManyOptions.MUTABLE_EMPTY : (ConcreteClientDeleteManyOptions) options); + } + + @Override + public String toString() { + return "ClientDeleteManyModel{" + + "filter=" + getFilter() + + ", options=" + getOptions() + + '}'; + } +} diff --git a/driver-core/src/main/com/mongodb/internal/client/model/bulk/ConcreteClientDeleteManyOptions.java b/driver-core/src/main/com/mongodb/internal/client/model/bulk/ConcreteClientDeleteManyOptions.java new file mode 100644 index 00000000000..381cd84fa50 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/client/model/bulk/ConcreteClientDeleteManyOptions.java @@ -0,0 +1,55 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.internal.client.model.bulk; + +import com.mongodb.client.model.Collation; +import com.mongodb.client.model.bulk.ClientDeleteManyOptions; +import com.mongodb.lang.Nullable; +import org.bson.conversions.Bson; + +/** + * This class is not part of the public API and may be removed or changed at any time. + */ +public final class ConcreteClientDeleteManyOptions extends AbstractClientDeleteOptions implements ClientDeleteManyOptions { + static final ConcreteClientDeleteManyOptions MUTABLE_EMPTY = new ConcreteClientDeleteManyOptions(); + + public ConcreteClientDeleteManyOptions() { + } + + @Override + public ConcreteClientDeleteManyOptions collation(@Nullable final Collation collation) { + return (ConcreteClientDeleteManyOptions) super.collation(collation); + } + + @Override + public ConcreteClientDeleteManyOptions hint(@Nullable final Bson hint) { + return (ConcreteClientDeleteManyOptions) super.hint(hint); + } + + @Override + public ConcreteClientDeleteManyOptions hintString(@Nullable final String hintString) { + return (ConcreteClientDeleteManyOptions) super.hintString(hintString); + } + + @Override + public String toString() { + return "ClientDeleteManyOptions{" + + "collation=" + getCollation().orElse(null) + + ", hint=" + getHint().orElse(null) + + ", hintString=" + getHintString().map(s -> '\'' + s + '\'') .orElse(null) + + '}'; + } +} diff --git a/driver-core/src/main/com/mongodb/internal/client/model/bulk/ConcreteClientDeleteOneModel.java b/driver-core/src/main/com/mongodb/internal/client/model/bulk/ConcreteClientDeleteOneModel.java new file mode 100644 index 00000000000..9e969ba9eeb --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/client/model/bulk/ConcreteClientDeleteOneModel.java @@ -0,0 +1,37 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.internal.client.model.bulk; + +import com.mongodb.client.model.bulk.ClientDeleteOneOptions; +import com.mongodb.lang.Nullable; +import org.bson.conversions.Bson; + +/** + * This class is not part of the public API and may be removed or changed at any time. + */ +public final class ConcreteClientDeleteOneModel extends AbstractClientDeleteModel implements ClientWriteModel { + public ConcreteClientDeleteOneModel(final Bson filter, @Nullable final ClientDeleteOneOptions options) { + super(filter, options == null ? ConcreteClientDeleteOneOptions.MUTABLE_EMPTY : (ConcreteClientDeleteOneOptions) options); + } + + @Override + public String toString() { + return "ClientDeleteOneModel{" + + "filter=" + getFilter() + + ", options=" + getOptions() + + '}'; + } +} diff --git a/driver-core/src/main/com/mongodb/internal/client/model/bulk/ConcreteClientDeleteOneOptions.java b/driver-core/src/main/com/mongodb/internal/client/model/bulk/ConcreteClientDeleteOneOptions.java new file mode 100644 index 00000000000..3126903cfc5 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/client/model/bulk/ConcreteClientDeleteOneOptions.java @@ -0,0 +1,55 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.internal.client.model.bulk; + +import com.mongodb.client.model.Collation; +import com.mongodb.client.model.bulk.ClientDeleteOneOptions; +import com.mongodb.lang.Nullable; +import org.bson.conversions.Bson; + +/** + * This class is not part of the public API and may be removed or changed at any time. + */ +public final class ConcreteClientDeleteOneOptions extends AbstractClientDeleteOptions implements ClientDeleteOneOptions { + static final ConcreteClientDeleteOneOptions MUTABLE_EMPTY = new ConcreteClientDeleteOneOptions(); + + public ConcreteClientDeleteOneOptions() { + } + + @Override + public ConcreteClientDeleteOneOptions collation(@Nullable final Collation collation) { + return (ConcreteClientDeleteOneOptions) super.collation(collation); + } + + @Override + public ConcreteClientDeleteOneOptions hint(@Nullable final Bson hint) { + return (ConcreteClientDeleteOneOptions) super.hint(hint); + } + + @Override + public ConcreteClientDeleteOneOptions hintString(@Nullable final String hintString) { + return (ConcreteClientDeleteOneOptions) super.hintString(hintString); + } + + @Override + public String toString() { + return "ClientDeleteOneOptions{" + + "collation=" + getCollation().orElse(null) + + ", hint=" + getHint().orElse(null) + + ", hintString=" + getHintString().map(s -> '\'' + s + '\'') .orElse(null) + + '}'; + } +} diff --git a/driver-core/src/main/com/mongodb/internal/client/model/bulk/ConcreteClientDeleteResult.java b/driver-core/src/main/com/mongodb/internal/client/model/bulk/ConcreteClientDeleteResult.java new file mode 100644 index 00000000000..a82b8ee8b62 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/client/model/bulk/ConcreteClientDeleteResult.java @@ -0,0 +1,58 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.internal.client.model.bulk; + +import com.mongodb.client.model.bulk.ClientDeleteResult; + +/** + * This class is not part of the public API and may be removed or changed at any time. + */ +public final class ConcreteClientDeleteResult implements ClientDeleteResult { + private final long deletedCount; + + public ConcreteClientDeleteResult(final long deletedCount) { + this.deletedCount = deletedCount; + } + + @Override + public long getDeletedCount() { + return deletedCount; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + final ConcreteClientDeleteResult that = (ConcreteClientDeleteResult) o; + return deletedCount == that.deletedCount; + } + + @Override + public int hashCode() { + return Long.hashCode(deletedCount); + } + + @Override + public String toString() { + return "ClientDeleteResult{" + + "deletedCount=" + deletedCount + + '}'; + } +} diff --git a/driver-core/src/main/com/mongodb/internal/client/model/bulk/ConcreteClientInsertOneModel.java b/driver-core/src/main/com/mongodb/internal/client/model/bulk/ConcreteClientInsertOneModel.java new file mode 100644 index 00000000000..660944fc202 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/client/model/bulk/ConcreteClientInsertOneModel.java @@ -0,0 +1,38 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.internal.client.model.bulk; + +/** + * This class is not part of the public API and may be removed or changed at any time. + */ +public final class ConcreteClientInsertOneModel implements ClientWriteModel { + private final Object document; + + public ConcreteClientInsertOneModel(final Object document) { + this.document = document; + } + + public Object getDocument() { + return document; + } + + @Override + public String toString() { + return "ClientInsertOneModel{" + + "document=" + document + + '}'; + } +} diff --git a/driver-core/src/main/com/mongodb/internal/client/model/bulk/ConcreteClientInsertOneResult.java b/driver-core/src/main/com/mongodb/internal/client/model/bulk/ConcreteClientInsertOneResult.java new file mode 100644 index 00000000000..cc755e2c62d --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/client/model/bulk/ConcreteClientInsertOneResult.java @@ -0,0 +1,66 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.internal.client.model.bulk; + +import com.mongodb.client.model.bulk.ClientInsertOneResult; +import com.mongodb.lang.Nullable; +import org.bson.BsonValue; + +import java.util.Objects; +import java.util.Optional; + +import static java.util.Optional.ofNullable; + +/** + * This class is not part of the public API and may be removed or changed at any time. + */ +public final class ConcreteClientInsertOneResult implements ClientInsertOneResult { + @Nullable + private final BsonValue insertedId; + + public ConcreteClientInsertOneResult(@Nullable final BsonValue insertedId) { + this.insertedId = insertedId; + } + + @Override + public Optional getInsertedId() { + return ofNullable(insertedId); + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + final ConcreteClientInsertOneResult that = (ConcreteClientInsertOneResult) o; + return Objects.equals(insertedId, that.insertedId); + } + + @Override + public int hashCode() { + return Objects.hashCode(insertedId); + } + + @Override + public String toString() { + return "ClientInsertOneResult{" + + "insertedId=" + insertedId + + '}'; + } +} diff --git a/driver-core/src/main/com/mongodb/internal/client/model/bulk/ConcreteClientNamespacedDeleteManyModel.java b/driver-core/src/main/com/mongodb/internal/client/model/bulk/ConcreteClientNamespacedDeleteManyModel.java new file mode 100644 index 00000000000..4deb566cff1 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/client/model/bulk/ConcreteClientNamespacedDeleteManyModel.java @@ -0,0 +1,28 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.internal.client.model.bulk; + +import com.mongodb.MongoNamespace; +import com.mongodb.client.model.bulk.ClientNamespacedDeleteManyModel; + +/** + * This class is not part of the public API and may be removed or changed at any time. + */ +public final class ConcreteClientNamespacedDeleteManyModel extends AbstractClientNamespacedWriteModel implements ClientNamespacedDeleteManyModel { + public ConcreteClientNamespacedDeleteManyModel(final MongoNamespace namespace, final ClientWriteModel model) { + super(namespace, model); + } +} diff --git a/driver-core/src/main/com/mongodb/internal/client/model/bulk/ConcreteClientNamespacedDeleteOneModel.java b/driver-core/src/main/com/mongodb/internal/client/model/bulk/ConcreteClientNamespacedDeleteOneModel.java new file mode 100644 index 00000000000..db8a7ad9fde --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/client/model/bulk/ConcreteClientNamespacedDeleteOneModel.java @@ -0,0 +1,28 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.internal.client.model.bulk; + +import com.mongodb.MongoNamespace; +import com.mongodb.client.model.bulk.ClientNamespacedDeleteOneModel; + +/** + * This class is not part of the public API and may be removed or changed at any time. + */ +public final class ConcreteClientNamespacedDeleteOneModel extends AbstractClientNamespacedWriteModel implements ClientNamespacedDeleteOneModel { + public ConcreteClientNamespacedDeleteOneModel(final MongoNamespace namespace, final ClientWriteModel model) { + super(namespace, model); + } +} diff --git a/driver-core/src/main/com/mongodb/internal/client/model/bulk/ConcreteClientNamespacedInsertOneModel.java b/driver-core/src/main/com/mongodb/internal/client/model/bulk/ConcreteClientNamespacedInsertOneModel.java new file mode 100644 index 00000000000..e80861b947e --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/client/model/bulk/ConcreteClientNamespacedInsertOneModel.java @@ -0,0 +1,28 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.internal.client.model.bulk; + +import com.mongodb.MongoNamespace; +import com.mongodb.client.model.bulk.ClientNamespacedInsertOneModel; + +/** + * This class is not part of the public API and may be removed or changed at any time. + */ +public final class ConcreteClientNamespacedInsertOneModel extends AbstractClientNamespacedWriteModel implements ClientNamespacedInsertOneModel { + public ConcreteClientNamespacedInsertOneModel(final MongoNamespace namespace, final ClientWriteModel model) { + super(namespace, model); + } +} diff --git a/driver-core/src/main/com/mongodb/internal/client/model/bulk/ConcreteClientNamespacedReplaceOneModel.java b/driver-core/src/main/com/mongodb/internal/client/model/bulk/ConcreteClientNamespacedReplaceOneModel.java new file mode 100644 index 00000000000..96ea786169e --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/client/model/bulk/ConcreteClientNamespacedReplaceOneModel.java @@ -0,0 +1,28 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.internal.client.model.bulk; + +import com.mongodb.MongoNamespace; +import com.mongodb.client.model.bulk.ClientNamespacedReplaceOneModel; + +/** + * This class is not part of the public API and may be removed or changed at any time. + */ +public final class ConcreteClientNamespacedReplaceOneModel extends AbstractClientNamespacedWriteModel implements ClientNamespacedReplaceOneModel { + public ConcreteClientNamespacedReplaceOneModel(final MongoNamespace namespace, final ClientWriteModel model) { + super(namespace, model); + } +} diff --git a/driver-core/src/main/com/mongodb/internal/client/model/bulk/ConcreteClientNamespacedUpdateManyModel.java b/driver-core/src/main/com/mongodb/internal/client/model/bulk/ConcreteClientNamespacedUpdateManyModel.java new file mode 100644 index 00000000000..28f281287e0 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/client/model/bulk/ConcreteClientNamespacedUpdateManyModel.java @@ -0,0 +1,28 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.internal.client.model.bulk; + +import com.mongodb.MongoNamespace; +import com.mongodb.client.model.bulk.ClientNamespacedUpdateManyModel; + +/** + * This class is not part of the public API and may be removed or changed at any time. + */ +public final class ConcreteClientNamespacedUpdateManyModel extends AbstractClientNamespacedWriteModel implements ClientNamespacedUpdateManyModel { + public ConcreteClientNamespacedUpdateManyModel(final MongoNamespace namespace, final ClientWriteModel model) { + super(namespace, model); + } +} diff --git a/driver-core/src/main/com/mongodb/internal/client/model/bulk/ConcreteClientNamespacedUpdateOneModel.java b/driver-core/src/main/com/mongodb/internal/client/model/bulk/ConcreteClientNamespacedUpdateOneModel.java new file mode 100644 index 00000000000..ad3aa0853ab --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/client/model/bulk/ConcreteClientNamespacedUpdateOneModel.java @@ -0,0 +1,28 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.internal.client.model.bulk; + +import com.mongodb.MongoNamespace; +import com.mongodb.client.model.bulk.ClientNamespacedUpdateOneModel; + +/** + * This class is not part of the public API and may be removed or changed at any time. + */ +public final class ConcreteClientNamespacedUpdateOneModel extends AbstractClientNamespacedWriteModel implements ClientNamespacedUpdateOneModel { + public ConcreteClientNamespacedUpdateOneModel(final MongoNamespace namespace, final ClientWriteModel model) { + super(namespace, model); + } +} diff --git a/driver-core/src/main/com/mongodb/internal/client/model/bulk/ConcreteClientReplaceOneModel.java b/driver-core/src/main/com/mongodb/internal/client/model/bulk/ConcreteClientReplaceOneModel.java new file mode 100644 index 00000000000..7102fe6257d --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/client/model/bulk/ConcreteClientReplaceOneModel.java @@ -0,0 +1,56 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.internal.client.model.bulk; + +import com.mongodb.client.model.bulk.ClientReplaceOneOptions; +import com.mongodb.lang.Nullable; +import org.bson.conversions.Bson; + +/** + * This class is not part of the public API and may be removed or changed at any time. + */ +public final class ConcreteClientReplaceOneModel implements ClientWriteModel { + private final Bson filter; + private final Object replacement; + private final ConcreteClientReplaceOneOptions options; + + public ConcreteClientReplaceOneModel(final Bson filter, final Object replacement, @Nullable final ClientReplaceOneOptions options) { + this.filter = filter; + this.replacement = replacement; + this.options = options == null ? ConcreteClientReplaceOneOptions.MUTABLE_EMPTY : (ConcreteClientReplaceOneOptions) options; + } + + public Bson getFilter() { + return filter; + } + + public Object getReplacement() { + return replacement; + } + + public ConcreteClientReplaceOneOptions getOptions() { + return options; + } + + @Override + public String toString() { + return "ClientReplaceOneModel{" + + "filter=" + filter + + ", replacement=" + replacement + + ", options=" + options + + '}'; + } +} diff --git a/driver-core/src/main/com/mongodb/internal/client/model/bulk/ConcreteClientReplaceOneOptions.java b/driver-core/src/main/com/mongodb/internal/client/model/bulk/ConcreteClientReplaceOneOptions.java new file mode 100644 index 00000000000..f7172488bfc --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/client/model/bulk/ConcreteClientReplaceOneOptions.java @@ -0,0 +1,126 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.internal.client.model.bulk; + +import com.mongodb.client.model.Collation; +import com.mongodb.client.model.bulk.ClientReplaceOneOptions; +import com.mongodb.lang.Nullable; +import org.bson.conversions.Bson; + +import java.util.Optional; + +import static java.util.Optional.ofNullable; + +/** + * This class is not part of the public API and may be removed or changed at any time. + */ +public final class ConcreteClientReplaceOneOptions implements ClientReplaceOneOptions { + static final ConcreteClientReplaceOneOptions MUTABLE_EMPTY = new ConcreteClientReplaceOneOptions(); + + @Nullable + private Collation collation; + @Nullable + private Bson hint; + @Nullable + private String hintString; + @Nullable + private Boolean upsert; + @Nullable + private Bson sort; + + public ConcreteClientReplaceOneOptions() { + } + + @Override + public ClientReplaceOneOptions collation(@Nullable final Collation collation) { + this.collation = collation; + return this; + } + + /** + * @see #collation(Collation) + */ + public Optional getCollation() { + return ofNullable(collation); + } + + @Override + public ClientReplaceOneOptions hint(@Nullable final Bson hint) { + this.hint = hint; + this.hintString = null; + return this; + } + + /** + * @see #hint(Bson) + */ + public Optional getHint() { + return ofNullable(hint); + } + + @Override + public ClientReplaceOneOptions hintString(@Nullable final String hintString) { + this.hintString = hintString; + this.hint = null; + return this; + } + + /** + * @see #hintString(String) + */ + public Optional getHintString() { + return ofNullable(hintString); + } + + @Override + public ClientReplaceOneOptions upsert(@Nullable final Boolean upsert) { + this.upsert = upsert; + return this; + } + + /** + * @see ClientReplaceOneOptions#sort(Bson) + */ + public ClientReplaceOneOptions sort(final Bson sort) { + this.sort = sort; + return this; + } + + /** + * @see ClientReplaceOneOptions#sort(Bson) + */ + public Optional getSort() { + return ofNullable(sort); + } + + /** + * @see #upsert(Boolean) + */ + public Optional isUpsert() { + return ofNullable(upsert); + } + + @Override + public String toString() { + return "ClientReplaceOneOptions{" + + "collation=" + collation + + ", hint=" + hint + + ", hintString='" + hintString + '\'' + + ", upsert=" + upsert + + ", sort=" + sort + + '}'; + } +} diff --git a/driver-core/src/main/com/mongodb/internal/client/model/bulk/ConcreteClientUpdateManyModel.java b/driver-core/src/main/com/mongodb/internal/client/model/bulk/ConcreteClientUpdateManyModel.java new file mode 100644 index 00000000000..83d72e937f6 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/client/model/bulk/ConcreteClientUpdateManyModel.java @@ -0,0 +1,47 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.internal.client.model.bulk; + +import com.mongodb.assertions.Assertions; +import com.mongodb.client.model.bulk.ClientUpdateManyOptions; +import com.mongodb.lang.Nullable; +import org.bson.conversions.Bson; + +/** + * This class is not part of the public API and may be removed or changed at any time. + */ +public final class ConcreteClientUpdateManyModel extends AbstractClientUpdateModel implements ClientWriteModel { + public ConcreteClientUpdateManyModel( + final Bson filter, + @Nullable + final Bson update, + @Nullable + final Iterable updatePipeline, + @Nullable final ClientUpdateManyOptions options) { + super(filter, update, updatePipeline, + options == null ? ConcreteClientUpdateManyOptions.MUTABLE_EMPTY : (ConcreteClientUpdateManyOptions) options); + } + + @Override + public String toString() { + return "ClientUpdateManyModel{" + + "filter=" + getFilter() + + ", update=" + getUpdate().map(Object::toString).orElseGet(() -> + getUpdatePipeline().map(Object::toString).orElseThrow(Assertions::fail)) + + ", options=" + getOptions() + + '}'; + } +} diff --git a/driver-core/src/main/com/mongodb/internal/client/model/bulk/ConcreteClientUpdateManyOptions.java b/driver-core/src/main/com/mongodb/internal/client/model/bulk/ConcreteClientUpdateManyOptions.java new file mode 100644 index 00000000000..755b6fb56d7 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/client/model/bulk/ConcreteClientUpdateManyOptions.java @@ -0,0 +1,67 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.internal.client.model.bulk; + +import com.mongodb.client.model.Collation; +import com.mongodb.client.model.bulk.ClientUpdateManyOptions; +import com.mongodb.lang.Nullable; +import org.bson.conversions.Bson; + +/** + * This class is not part of the public API and may be removed or changed at any time. + */ +public final class ConcreteClientUpdateManyOptions extends AbstractClientUpdateOptions implements ClientUpdateManyOptions { + static final ConcreteClientUpdateManyOptions MUTABLE_EMPTY = new ConcreteClientUpdateManyOptions(); + + public ConcreteClientUpdateManyOptions() { + } + + @Override + public ConcreteClientUpdateManyOptions arrayFilters(@Nullable final Iterable arrayFilters) { + return (ConcreteClientUpdateManyOptions) super.arrayFilters(arrayFilters); + } + + @Override + public ConcreteClientUpdateManyOptions collation(@Nullable final Collation collation) { + return (ConcreteClientUpdateManyOptions) super.collation(collation); + } + + @Override + public ConcreteClientUpdateManyOptions hint(@Nullable final Bson hint) { + return (ConcreteClientUpdateManyOptions) super.hint(hint); + } + + @Override + public ConcreteClientUpdateManyOptions hintString(@Nullable final String hintString) { + return (ConcreteClientUpdateManyOptions) super.hintString(hintString); + } + + @Override + public ConcreteClientUpdateManyOptions upsert(@Nullable final Boolean upsert) { + return (ConcreteClientUpdateManyOptions) super.upsert(upsert); + } + + @Override + public String toString() { + return "ClientUpdateManyOptions{" + + "arrayFilters=" + getArrayFilters().orElse(null) + + ", collation=" + getCollation().orElse(null) + + ", hint=" + getHint().orElse(null) + + ", hintString=" + getHintString().map(s -> '\'' + s + '\'') .orElse(null) + + ", upsert=" + isUpsert().orElse(null) + + '}'; + } +} diff --git a/driver-core/src/main/com/mongodb/internal/client/model/bulk/ConcreteClientUpdateOneModel.java b/driver-core/src/main/com/mongodb/internal/client/model/bulk/ConcreteClientUpdateOneModel.java new file mode 100644 index 00000000000..83d02669514 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/client/model/bulk/ConcreteClientUpdateOneModel.java @@ -0,0 +1,47 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.internal.client.model.bulk; + +import com.mongodb.assertions.Assertions; +import com.mongodb.client.model.bulk.ClientUpdateOneOptions; +import com.mongodb.lang.Nullable; +import org.bson.conversions.Bson; + +/** + * This class is not part of the public API and may be removed or changed at any time. + */ +public final class ConcreteClientUpdateOneModel extends AbstractClientUpdateModel implements ClientWriteModel { + public ConcreteClientUpdateOneModel( + final Bson filter, + @Nullable + final Bson update, + @Nullable + final Iterable updatePipeline, + @Nullable final ClientUpdateOneOptions options) { + super(filter, update, updatePipeline, + options == null ? ConcreteClientUpdateOneOptions.MUTABLE_EMPTY : (ConcreteClientUpdateOneOptions) options); + } + + @Override + public String toString() { + return "ClientUpdateOneModel{" + + "filter=" + getFilter() + + ", update=" + getUpdate().map(Object::toString).orElseGet(() -> + getUpdatePipeline().map(Object::toString).orElseThrow(Assertions::fail)) + + ", options=" + getOptions() + + '}'; + } +} diff --git a/driver-core/src/main/com/mongodb/internal/client/model/bulk/ConcreteClientUpdateOneOptions.java b/driver-core/src/main/com/mongodb/internal/client/model/bulk/ConcreteClientUpdateOneOptions.java new file mode 100644 index 00000000000..3bd5f1451d7 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/client/model/bulk/ConcreteClientUpdateOneOptions.java @@ -0,0 +1,90 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.internal.client.model.bulk; + +import com.mongodb.client.model.Collation; +import com.mongodb.client.model.bulk.ClientUpdateOneOptions; +import com.mongodb.lang.Nullable; +import org.bson.conversions.Bson; + +import java.util.Optional; + +import static java.util.Optional.ofNullable; + +/** + * This class is not part of the public API and may be removed or changed at any time. + */ +public final class ConcreteClientUpdateOneOptions extends AbstractClientUpdateOptions implements ClientUpdateOneOptions { + static final ConcreteClientUpdateOneOptions MUTABLE_EMPTY = new ConcreteClientUpdateOneOptions(); + + @Nullable + private Bson sort; + + public ConcreteClientUpdateOneOptions() { + } + + @Override + public ConcreteClientUpdateOneOptions arrayFilters(@Nullable final Iterable arrayFilters) { + return (ConcreteClientUpdateOneOptions) super.arrayFilters(arrayFilters); + } + + @Override + public ConcreteClientUpdateOneOptions collation(@Nullable final Collation collation) { + return (ConcreteClientUpdateOneOptions) super.collation(collation); + } + + @Override + public ConcreteClientUpdateOneOptions hint(@Nullable final Bson hint) { + return (ConcreteClientUpdateOneOptions) super.hint(hint); + } + + @Override + public ConcreteClientUpdateOneOptions hintString(@Nullable final String hintString) { + return (ConcreteClientUpdateOneOptions) super.hintString(hintString); + } + + @Override + public ConcreteClientUpdateOneOptions upsert(@Nullable final Boolean upsert) { + return (ConcreteClientUpdateOneOptions) super.upsert(upsert); + } + + /** + * @see ClientUpdateOneOptions#sort(Bson) + */ + public ConcreteClientUpdateOneOptions sort(final Bson sort) { + this.sort = sort; + return this; + } + + /** + * @see ClientUpdateOneOptions#sort(Bson) + */ + public Optional getSort() { + return ofNullable(sort); + } + + @Override + public String toString() { + return "ClientUpdateOneOptions{" + + "arrayFilters=" + getArrayFilters().orElse(null) + + ", collation=" + getCollation().orElse(null) + + ", hint=" + getHint().orElse(null) + + ", hintString=" + getHintString().map(s -> '\'' + s + '\'') .orElse(null) + + ", upsert=" + isUpsert().orElse(null) + + ", sort=" + getSort().orElse(null) + + '}'; + } +} diff --git a/driver-core/src/main/com/mongodb/internal/client/model/bulk/ConcreteClientUpdateResult.java b/driver-core/src/main/com/mongodb/internal/client/model/bulk/ConcreteClientUpdateResult.java new file mode 100644 index 00000000000..54075b792f1 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/client/model/bulk/ConcreteClientUpdateResult.java @@ -0,0 +1,87 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.internal.client.model.bulk; + +import com.mongodb.client.model.bulk.ClientUpdateResult; +import com.mongodb.lang.Nullable; +import org.bson.BsonValue; + +import java.util.Objects; +import java.util.Optional; + +import static java.util.Optional.ofNullable; + +/** + * This class is not part of the public API and may be removed or changed at any time. + */ +public final class ConcreteClientUpdateResult implements ClientUpdateResult { + private final long matchedCount; + private final long modifiedCount; + @Nullable + private final BsonValue upsertedId; + + public ConcreteClientUpdateResult( + final long matchedCount, + final long modifiedCount, + @Nullable final BsonValue upsertedId) { + this.matchedCount = matchedCount; + this.modifiedCount = modifiedCount; + this.upsertedId = upsertedId; + } + + @Override + public long getMatchedCount() { + return matchedCount; + } + + @Override + public long getModifiedCount() { + return modifiedCount; + } + + @Override + public Optional getUpsertedId() { + return ofNullable(upsertedId); + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + final ConcreteClientUpdateResult that = (ConcreteClientUpdateResult) o; + return matchedCount == that.matchedCount + && modifiedCount == that.modifiedCount + && Objects.equals(upsertedId, that.upsertedId); + } + + @Override + public int hashCode() { + return Objects.hash(matchedCount, modifiedCount, upsertedId); + } + + @Override + public String toString() { + return "ClientUpdateResult{" + + "matchedCount=" + matchedCount + + ", modifiedCount=" + modifiedCount + + ", upsertedId=" + upsertedId + + '}'; + } +} diff --git a/driver-core/src/main/com/mongodb/internal/client/model/bulk/UnacknowledgedClientBulkWriteResult.java b/driver-core/src/main/com/mongodb/internal/client/model/bulk/UnacknowledgedClientBulkWriteResult.java new file mode 100644 index 00000000000..cdd649b3389 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/client/model/bulk/UnacknowledgedClientBulkWriteResult.java @@ -0,0 +1,76 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.internal.client.model.bulk; + +import com.mongodb.annotations.Immutable; +import com.mongodb.client.model.bulk.ClientBulkWriteResult; + +import java.util.Optional; + +/** + * This class is not part of the public API and may be removed or changed at any time. + */ +@Immutable +public final class UnacknowledgedClientBulkWriteResult implements ClientBulkWriteResult { + public static final UnacknowledgedClientBulkWriteResult INSTANCE = new UnacknowledgedClientBulkWriteResult(); + + private UnacknowledgedClientBulkWriteResult() { + } + + @Override + public boolean isAcknowledged() { + return false; + } + + @Override + public long getInsertedCount() throws UnsupportedOperationException { + throw createUnacknowledgedResultsException(); + } + + @Override + public long getUpsertedCount() throws UnsupportedOperationException { + throw createUnacknowledgedResultsException(); + } + + @Override + public long getMatchedCount() throws UnsupportedOperationException { + throw createUnacknowledgedResultsException(); + } + + @Override + public long getModifiedCount() throws UnsupportedOperationException { + throw createUnacknowledgedResultsException(); + } + + @Override + public long getDeletedCount() throws UnsupportedOperationException { + throw createUnacknowledgedResultsException(); + } + + @Override + public Optional getVerboseResults() throws UnsupportedOperationException { + throw createUnacknowledgedResultsException(); + } + + private static UnsupportedOperationException createUnacknowledgedResultsException() { + return new UnsupportedOperationException("Cannot get information about an unacknowledged write"); + } + + @Override + public String toString() { + return "UnacknowledgedClientBulkWriteResult{}"; + } +} diff --git a/driver-core/src/main/com/mongodb/internal/client/model/bulk/package-info.java b/driver-core/src/main/com/mongodb/internal/client/model/bulk/package-info.java new file mode 100644 index 00000000000..c344a50368d --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/client/model/bulk/package-info.java @@ -0,0 +1,26 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * This package contains internal functionality that may change at any time. + */ + +@Internal +@NonNullApi +package com.mongodb.internal.client.model.bulk; + +import com.mongodb.annotations.Internal; +import com.mongodb.lang.NonNullApi; diff --git a/driver-core/src/main/com/mongodb/internal/client/model/changestream/ChangeStreamLevel.java b/driver-core/src/main/com/mongodb/internal/client/model/changestream/ChangeStreamLevel.java new file mode 100644 index 00000000000..ccf38ac6921 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/client/model/changestream/ChangeStreamLevel.java @@ -0,0 +1,39 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.client.model.changestream; + +/** + * The level at which the change stream operation operates at. + * + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public enum ChangeStreamLevel { + /** + * Observing all changes on the Client + */ + CLIENT, + + /** + * Observing all changes on a specific database + */ + DATABASE, + + /** + * Observing all changes on a specific collection + */ + COLLECTION +} diff --git a/driver-core/src/main/com/mongodb/internal/client/model/changestream/package-info.java b/driver-core/src/main/com/mongodb/internal/client/model/changestream/package-info.java new file mode 100644 index 00000000000..35e0c4b5176 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/client/model/changestream/package-info.java @@ -0,0 +1,26 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * This package contains internal functionality that may change at any time. + */ + +@Internal +@NonNullApi +package com.mongodb.internal.client.model.changestream; + +import com.mongodb.annotations.Internal; +import com.mongodb.lang.NonNullApi; diff --git a/driver-core/src/main/com/mongodb/internal/client/model/package-info.java b/driver-core/src/main/com/mongodb/internal/client/model/package-info.java new file mode 100644 index 00000000000..f37e6c84515 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/client/model/package-info.java @@ -0,0 +1,26 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * This package contains internal functionality that may change at any time. + */ + +@Internal +@NonNullApi +package com.mongodb.internal.client.model; + +import com.mongodb.annotations.Internal; +import com.mongodb.lang.NonNullApi; diff --git a/driver-core/src/main/com/mongodb/internal/client/package-info.java b/driver-core/src/main/com/mongodb/internal/client/package-info.java new file mode 100644 index 00000000000..347b327f571 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/client/package-info.java @@ -0,0 +1,26 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * This package contains internal functionality that may change at any time. + */ + +@Internal +@NonNullApi +package com.mongodb.internal.client; + +import com.mongodb.annotations.Internal; +import com.mongodb.lang.NonNullApi; diff --git a/driver-core/src/main/com/mongodb/internal/client/vault/EncryptOptionsHelper.java b/driver-core/src/main/com/mongodb/internal/client/vault/EncryptOptionsHelper.java new file mode 100644 index 00000000000..2b472668d98 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/client/vault/EncryptOptionsHelper.java @@ -0,0 +1,103 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.internal.client.vault; + +import com.mongodb.client.model.vault.EncryptOptions; +import com.mongodb.client.model.vault.RangeOptions; +import com.mongodb.client.model.vault.TextOptions; +import com.mongodb.internal.crypt.capi.MongoExplicitEncryptOptions; +import org.bson.BsonBoolean; +import org.bson.BsonDocument; +import org.bson.BsonInt32; +import org.bson.BsonInt64; +import org.bson.BsonValue; + +public final class EncryptOptionsHelper { + + public static MongoExplicitEncryptOptions asMongoExplicitEncryptOptions(final EncryptOptions options) { + MongoExplicitEncryptOptions.Builder encryptOptionsBuilder = MongoExplicitEncryptOptions.builder() + .algorithm(options.getAlgorithm()); + + if (options.getKeyId() != null) { + encryptOptionsBuilder.keyId(options.getKeyId()); + } + + if (options.getKeyAltName() != null) { + encryptOptionsBuilder.keyAltName(options.getKeyAltName()); + } + + if (options.getContentionFactor() != null) { + encryptOptionsBuilder.contentionFactor(options.getContentionFactor()); + } + + if (options.getQueryType() != null) { + encryptOptionsBuilder.queryType(options.getQueryType()); + } + + RangeOptions rangeOptions = options.getRangeOptions(); + if (rangeOptions != null) { + BsonDocument rangeOptionsBsonDocument = new BsonDocument(); + BsonValue min = rangeOptions.getMin(); + if (min != null) { + rangeOptionsBsonDocument.put("min", min); + } + BsonValue max = rangeOptions.getMax(); + if (max != null) { + rangeOptionsBsonDocument.put("max", max); + } + Long sparsity = rangeOptions.getSparsity(); + if (sparsity != null) { + rangeOptionsBsonDocument.put("sparsity", new BsonInt64(sparsity)); + } + Integer trimFactor = rangeOptions.getTrimFactor(); + if (trimFactor != null) { + rangeOptionsBsonDocument.put("trimFactor", new BsonInt32(trimFactor)); + } + Integer precision = rangeOptions.getPrecision(); + if (precision != null) { + rangeOptionsBsonDocument.put("precision", new BsonInt32(precision)); + } + encryptOptionsBuilder.rangeOptions(rangeOptionsBsonDocument); + } + + TextOptions textOptions = options.getTextOptions(); + if (textOptions != null) { + BsonDocument textOptionsDocument = new BsonDocument(); + textOptionsDocument.put("caseSensitive", BsonBoolean.valueOf(textOptions.getCaseSensitive())); + textOptionsDocument.put("diacriticSensitive", BsonBoolean.valueOf(textOptions.getDiacriticSensitive())); + + BsonDocument substringOptions = textOptions.getSubstringOptions(); + if (substringOptions != null) { + textOptionsDocument.put("substring", substringOptions); + } + + BsonDocument prefixOptions = textOptions.getPrefixOptions(); + if (prefixOptions != null) { + textOptionsDocument.put("prefix", prefixOptions); + } + + BsonDocument suffixOptions = textOptions.getSuffixOptions(); + if (suffixOptions != null) { + textOptionsDocument.put("suffix", suffixOptions); + } + encryptOptionsBuilder.textOptions(textOptionsDocument); + } + + return encryptOptionsBuilder.build(); + } + private EncryptOptionsHelper() { + } +} diff --git a/driver-core/src/main/com/mongodb/internal/client/vault/package-info.java b/driver-core/src/main/com/mongodb/internal/client/vault/package-info.java new file mode 100644 index 00000000000..83322719b00 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/client/vault/package-info.java @@ -0,0 +1,26 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * This package contains internal functionality that may change at any time. + */ + +@Internal +@NonNullApi +package com.mongodb.internal.client.vault; + +import com.mongodb.annotations.Internal; +import com.mongodb.lang.NonNullApi; diff --git a/driver-core/src/main/com/mongodb/internal/connection/AbstractMultiServerCluster.java b/driver-core/src/main/com/mongodb/internal/connection/AbstractMultiServerCluster.java new file mode 100644 index 00000000000..acaf1a40e14 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/connection/AbstractMultiServerCluster.java @@ -0,0 +1,464 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection; + +import com.mongodb.MongoException; +import com.mongodb.MongoStalePrimaryException; +import com.mongodb.ServerAddress; +import com.mongodb.connection.ClusterDescription; +import com.mongodb.connection.ClusterId; +import com.mongodb.connection.ClusterSettings; +import com.mongodb.connection.ClusterType; +import com.mongodb.connection.ServerDescription; +import com.mongodb.event.ServerDescriptionChangedEvent; +import com.mongodb.internal.TimeoutContext; +import com.mongodb.internal.diagnostics.logging.Logger; +import com.mongodb.internal.diagnostics.logging.Loggers; +import com.mongodb.internal.time.Timeout; +import com.mongodb.lang.Nullable; +import org.bson.types.ObjectId; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentMap; + +import static com.mongodb.assertions.Assertions.assertNotNull; +import static com.mongodb.assertions.Assertions.isTrue; +import static com.mongodb.connection.ClusterConnectionMode.MULTIPLE; +import static com.mongodb.connection.ClusterType.UNKNOWN; +import static com.mongodb.connection.ServerConnectionState.CONNECTING; +import static com.mongodb.connection.ServerType.REPLICA_SET_GHOST; +import static com.mongodb.connection.ServerType.SHARD_ROUTER; +import static com.mongodb.connection.ServerType.STANDALONE; +import static com.mongodb.internal.operation.ServerVersionHelper.SIX_DOT_ZERO_WIRE_VERSION; +import static java.lang.String.format; + +/** + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public abstract class AbstractMultiServerCluster extends BaseCluster { + private static final Logger LOGGER = Loggers.getLogger("cluster"); + + private ClusterType clusterType; + private String replicaSetName; + private ObjectId maxElectionId; + private Integer maxSetVersion; + + private final ConcurrentMap addressToServerTupleMap = + new ConcurrentHashMap<>(); + + private static final class ServerTuple { + private final ClusterableServer server; + private ServerDescription description; + + private ServerTuple(final ClusterableServer server, final ServerDescription description) { + this.server = server; + this.description = description; + } + } + + AbstractMultiServerCluster(final ClusterId clusterId, + final ClusterSettings settings, + final ClusterableServerFactory serverFactory, + final ClientMetadata clientMetadata) { + super(clusterId, settings, serverFactory, clientMetadata); + isTrue("connection mode is multiple", settings.getMode() == MULTIPLE); + clusterType = settings.getRequiredClusterType(); + replicaSetName = settings.getRequiredReplicaSetName(); + } + + ClusterType getClusterType() { + return clusterType; + } + + @Nullable + MongoException getSrvResolutionException() { + return null; + } + + protected void initialize(final Collection serverAddresses) { + ClusterDescription currentDescription = getCurrentDescription(); + + // synchronizing this code because addServer registers a callback which is re-entrant to this instance. + // In other words, we are leaking a reference to "this" from the constructor. + withLock(() -> { + for (final ServerAddress serverAddress : serverAddresses) { + addServer(serverAddress); + } + ClusterDescription newDescription = updateDescription(); + fireChangeEvent(newDescription, currentDescription); + }); + } + + @Override + protected void connect() { + for (ServerTuple cur : addressToServerTupleMap.values()) { + cur.server.connect(); + } + } + + @Override + public void close() { + withLock(() -> { + if (!isClosed()) { + for (final ServerTuple serverTuple : addressToServerTupleMap.values()) { + serverTuple.server.close(); + } + } + super.close(); + }); + } + + @Override + public ServersSnapshot getServersSnapshot(final Timeout serverSelectionTimeout, + final TimeoutContext timeoutContext) { + isTrue("is open", !isClosed()); + Map nonAtomicSnapshot = new HashMap<>(addressToServerTupleMap); + return serverAddress -> { + ServerTuple serverTuple = nonAtomicSnapshot.get(serverAddress); + return serverTuple == null ? null : serverTuple.server; + }; + } + + void onChange(final Collection newHosts) { + withLock(() -> { + if (isClosed()) { + return; + } + + for (ServerAddress cur : newHosts) { + addServer(cur); + } + + for (Iterator iterator = addressToServerTupleMap.values().iterator(); iterator.hasNext();) { + ServerTuple cur = iterator.next(); + if (!newHosts.contains(cur.description.getAddress())) { + if (LOGGER.isInfoEnabled()) { + LOGGER.info(format("Removing %s from client view of cluster.", cur.description.getAddress())); + } + iterator.remove(); + cur.server.close(); + } + } + + ClusterDescription oldClusterDescription = getCurrentDescription(); + ClusterDescription newClusterDescription = updateDescription(); + + fireChangeEvent(newClusterDescription, oldClusterDescription); + }); + } + + @Override + public void onChange(final ServerDescriptionChangedEvent event) { + withLock(() -> { + if (isClosed()) { + return; + } + + ServerDescription newDescription = event.getNewDescription(); + + if (LOGGER.isTraceEnabled()) { + LOGGER.trace(format("Handling description changed event for server %s with description %s", + newDescription.getAddress(), newDescription)); + } + + ServerTuple serverTuple = addressToServerTupleMap.get(newDescription.getAddress()); + if (serverTuple == null) { + if (LOGGER.isTraceEnabled()) { + LOGGER.trace(format("Ignoring description changed event for removed server %s", + newDescription.getAddress())); + } + return; + } + + boolean shouldUpdateDescription = true; + if (newDescription.isOk()) { + if (clusterType == UNKNOWN && newDescription.getType() != REPLICA_SET_GHOST) { + clusterType = newDescription.getClusterType(); + if (LOGGER.isInfoEnabled()) { + LOGGER.info(format("Discovered cluster type of %s", clusterType)); + } + } + + switch (clusterType) { + case REPLICA_SET: + shouldUpdateDescription = handleReplicaSetMemberChanged(newDescription); + break; + case SHARDED: + shouldUpdateDescription = handleShardRouterChanged(newDescription); + break; + case STANDALONE: + shouldUpdateDescription = handleStandAloneChanged(newDescription); + break; + default: + break; + } + } + + ClusterDescription oldClusterDescription = null; + ClusterDescription newClusterDescription = null; + if (shouldUpdateDescription) { + serverTuple.description = newDescription; + oldClusterDescription = getCurrentDescription(); + newClusterDescription = updateDescription(); + } + if (shouldUpdateDescription) { + fireChangeEvent(newClusterDescription, oldClusterDescription); + } + }); + } + + private boolean handleReplicaSetMemberChanged(final ServerDescription newDescription) { + if (!newDescription.isReplicaSetMember()) { + LOGGER.error(format("Expecting replica set member, but found a %s. Removing %s from client view of cluster.", + newDescription.getType(), newDescription.getAddress())); + removeServer(newDescription.getAddress()); + return true; + } + + if (newDescription.getType() == REPLICA_SET_GHOST) { + LOGGER.info(format("Server %s does not appear to be a member of an initiated replica set.", newDescription.getAddress())); + return true; + } + + if (replicaSetName == null) { + replicaSetName = assertNotNull(newDescription.getSetName()); + } + + if (!replicaSetName.equals(newDescription.getSetName())) { + LOGGER.error(format("Expecting replica set member from set '%s', but found one from set '%s'. " + + "Removing %s from client view of cluster.", + replicaSetName, newDescription.getSetName(), newDescription.getAddress())); + removeServer(newDescription.getAddress()); + return true; + } + + ensureServers(newDescription); + + if (newDescription.getCanonicalAddress() != null + && !newDescription.getAddress().equals(new ServerAddress(newDescription.getCanonicalAddress())) + && !newDescription.isPrimary()) { + LOGGER.info(format("Canonical address %s does not match server address. Removing %s from client view of cluster", + newDescription.getCanonicalAddress(), newDescription.getAddress())); + removeServer(newDescription.getAddress()); + return true; + } + + if (!newDescription.isPrimary()) { + return true; + } + + if (isStalePrimary(newDescription)) { + invalidatePotentialPrimary(newDescription, new MongoStalePrimaryException("Primary marked stale due to electionId/setVersion mismatch")); + return false; + } + + maxElectionId = nullSafeMax(newDescription.getElectionId(), maxElectionId); + maxSetVersion = nullSafeMax(newDescription.getSetVersion(), maxSetVersion); + + invalidateOldPrimaries(newDescription.getAddress()); + + if (isNotAlreadyPrimary(newDescription.getAddress())) { + LOGGER.info(format("Discovered replica set primary %s with max election id %s and max set version %d", + newDescription.getAddress(), newDescription.getElectionId(), newDescription.getSetVersion())); + } + + return true; + } + + private boolean isStalePrimary(final ServerDescription description) { + ObjectId electionId = description.getElectionId(); + Integer setVersion = description.getSetVersion(); + if (description.getMaxWireVersion() >= SIX_DOT_ZERO_WIRE_VERSION) { + return nullSafeCompareTo(electionId, maxElectionId) < 0 + || (nullSafeCompareTo(electionId, maxElectionId) == 0 && nullSafeCompareTo(setVersion, maxSetVersion) < 0); + } else { + return setVersion != null && electionId != null + && (nullSafeCompareTo(setVersion, maxSetVersion) < 0 + || (nullSafeCompareTo(setVersion, maxSetVersion) == 0 + && nullSafeCompareTo(electionId, maxElectionId) < 0)); + } + } + + private void invalidatePotentialPrimary(final ServerDescription newDescription, final MongoStalePrimaryException cause) { + LOGGER.info(format("Invalidating potential primary %s whose (set version, election id) tuple of (%d, %s) " + + "is less than one already seen of (%d, %s)", + newDescription.getAddress(), newDescription.getSetVersion(), newDescription.getElectionId(), + maxSetVersion, maxElectionId)); + + addressToServerTupleMap.get(newDescription.getAddress()).server.resetToConnecting(cause); + } + + /** + * Implements the same contract as {@link Comparable#compareTo(Object)}, except that a null value is always considers less-than any + * other value (except null, which it considers as equal-to). + */ + private static > int nullSafeCompareTo(@Nullable final T first, @Nullable final T second) { + if (first == null) { + return second == null ? 0 : -1; + } + if (second == null) { + return 1; + } + return first.compareTo(second); + } + + @Nullable + private static > T nullSafeMax(@Nullable final T first, @Nullable final T second) { + if (first == null) { + return second; + } + if (second == null) { + return first; + } + return first.compareTo(second) >= 0 ? first : second; + } + + private boolean isNotAlreadyPrimary(final ServerAddress address) { + ServerTuple serverTuple = addressToServerTupleMap.get(address); + return serverTuple == null || !serverTuple.description.isPrimary(); + } + + private boolean handleShardRouterChanged(final ServerDescription newDescription) { + if (!newDescription.isShardRouter()) { + LOGGER.error(format("Expecting a %s, but found a %s. Removing %s from client view of cluster.", + SHARD_ROUTER, newDescription.getType(), newDescription.getAddress())); + removeServer(newDescription.getAddress()); + } + return true; + } + + private boolean handleStandAloneChanged(final ServerDescription newDescription) { + if (getSettings().getHosts().size() > 1) { + LOGGER.error(format("Expecting a single %s, but found more than one. Removing %s from client view of cluster.", + STANDALONE, newDescription.getAddress())); + clusterType = UNKNOWN; + removeServer(newDescription.getAddress()); + } + return true; + } + + private void addServer(final ServerAddress serverAddress) { + if (!addressToServerTupleMap.containsKey(serverAddress)) { + if (LOGGER.isInfoEnabled()) { + LOGGER.info(format("Adding discovered server %s to client view of cluster", serverAddress)); + } + ClusterableServer server = createServer(serverAddress); + addressToServerTupleMap.put(serverAddress, new ServerTuple(server, getConnectingServerDescription(serverAddress))); + } + } + + private void removeServer(final ServerAddress serverAddress) { + ServerTuple removed = addressToServerTupleMap.remove(serverAddress); + if (removed != null) { + removed.server.close(); + } + } + + private void invalidateOldPrimaries(final ServerAddress newPrimary) { + for (final ServerTuple serverTuple : addressToServerTupleMap.values()) { + if (!serverTuple.description.getAddress().equals(newPrimary) && serverTuple.description.isPrimary()) { + if (LOGGER.isInfoEnabled()) { + LOGGER.info(format("Rediscovering type of existing primary %s", serverTuple.description.getAddress())); + } + serverTuple.server.invalidate(new MongoStalePrimaryException("Primary marked stale due to discovery of newer primary")); + } + } + } + + private ServerDescription getConnectingServerDescription(final ServerAddress serverAddress) { + return ServerDescription.builder().state(CONNECTING).address(serverAddress).build(); + } + + private ClusterDescription updateDescription() { + ClusterDescription newDescription = new ClusterDescription(MULTIPLE, clusterType, getSrvResolutionException(), + getNewServerDescriptionList(), getSettings(), getServerFactory().getSettings()); + updateDescription(newDescription); + return newDescription; + } + + private List getNewServerDescriptionList() { + List serverDescriptions = new ArrayList<>(); + for (final ServerTuple cur : addressToServerTupleMap.values()) { + serverDescriptions.add(cur.description); + } + return serverDescriptions; + } + + private void ensureServers(final ServerDescription description) { + if (description.isPrimary() || !hasPrimary()) { + addNewHosts(description.getHosts()); + addNewHosts(description.getPassives()); + addNewHosts(description.getArbiters()); + } + + if (description.isPrimary()) { + removeExtraHosts(description); + } + } + + private boolean hasPrimary() { + for (ServerTuple serverTuple : addressToServerTupleMap.values()) { + if (serverTuple.description.isPrimary()) { + return true; + } + } + return false; + } + + private void addNewHosts(final Set hosts) { + for (final String cur : hosts) { + addServer(new ServerAddress(cur)); + } + } + + private void removeExtraHosts(final ServerDescription serverDescription) { + Set allServerAddresses = getAllServerAddresses(serverDescription); + for (Iterator iterator = addressToServerTupleMap.values().iterator(); iterator.hasNext();) { + ServerTuple cur = iterator.next(); + if (!allServerAddresses.contains(cur.description.getAddress())) { + if (LOGGER.isInfoEnabled()) { + LOGGER.info(format("Server %s is no longer a member of the replica set. Removing from client view of cluster.", + cur.description.getAddress())); + } + iterator.remove(); + cur.server.close(); + } + } + } + + private Set getAllServerAddresses(final ServerDescription serverDescription) { + Set retVal = new HashSet<>(); + addHostsToSet(serverDescription.getHosts(), retVal); + addHostsToSet(serverDescription.getPassives(), retVal); + addHostsToSet(serverDescription.getArbiters(), retVal); + return retVal; + } + + private void addHostsToSet(final Set hosts, final Set retVal) { + for (final String host : hosts) { + retVal.add(new ServerAddress(host)); + } + } +} diff --git a/driver-core/src/main/com/mongodb/internal/connection/AbstractProtocolExecutor.java b/driver-core/src/main/com/mongodb/internal/connection/AbstractProtocolExecutor.java new file mode 100644 index 00000000000..ba200933860 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/connection/AbstractProtocolExecutor.java @@ -0,0 +1,35 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection; + +import com.mongodb.internal.session.SessionContext; + +import static com.mongodb.internal.ExceptionUtils.isMongoSocketException; +import static com.mongodb.internal.ExceptionUtils.isOperationTimeoutFromSocketException; + +/** + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public abstract class AbstractProtocolExecutor implements ProtocolExecutor { + + protected boolean shouldMarkSessionDirty(final Throwable e, final SessionContext sessionContext) { + if (!sessionContext.hasSession()) { + return false; + } + return isMongoSocketException(e) || isOperationTimeoutFromSocketException(e); + } +} diff --git a/driver-core/src/main/com/mongodb/internal/connection/AbstractReferenceCounted.java b/driver-core/src/main/com/mongodb/internal/connection/AbstractReferenceCounted.java new file mode 100644 index 00000000000..a2411f410c7 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/connection/AbstractReferenceCounted.java @@ -0,0 +1,46 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection; + +import com.mongodb.internal.binding.ReferenceCounted; + +import java.util.concurrent.atomic.AtomicInteger; + +abstract class AbstractReferenceCounted implements ReferenceCounted { + private final AtomicInteger referenceCount = new AtomicInteger(1); + @Override + public int getCount() { + return referenceCount.get(); + } + + @Override + public ReferenceCounted retain() { + if (referenceCount.incrementAndGet() == 1) { + throw new IllegalStateException("Attempted to increment the reference count when it is already 0"); + } + return this; + } + + @Override + public int release() { + int decrementedValue = referenceCount.decrementAndGet(); + if (decrementedValue < 0) { + throw new IllegalStateException("Attempted to decrement the reference count below 0"); + } + return decrementedValue; + } +} diff --git a/driver-core/src/main/com/mongodb/internal/connection/AsyncConnection.java b/driver-core/src/main/com/mongodb/internal/connection/AsyncConnection.java new file mode 100644 index 00000000000..befc0d9aac2 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/connection/AsyncConnection.java @@ -0,0 +1,56 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection; + +import com.mongodb.ReadPreference; +import com.mongodb.annotations.ThreadSafe; +import com.mongodb.connection.ConnectionDescription; +import com.mongodb.internal.async.SingleResultCallback; +import com.mongodb.internal.binding.ReferenceCounted; +import com.mongodb.lang.Nullable; +import org.bson.BsonDocument; +import org.bson.FieldNameValidator; +import org.bson.codecs.Decoder; + +/** + * An asynchronous connection to a MongoDB server with non-blocking operations. + * + *

This class is not part of the public API and may be removed or changed at any time

+ */ +@ThreadSafe +public interface AsyncConnection extends ReferenceCounted { + + @Override + AsyncConnection retain(); + + /** + * Gets the description of the connection. + * + * @return the connection description + */ + ConnectionDescription getDescription(); + + void commandAsync(String database, BsonDocument command, FieldNameValidator fieldNameValidator, + @Nullable ReadPreference readPreference, Decoder commandResultDecoder, OperationContext operationContext, + SingleResultCallback callback); + + void commandAsync(String database, BsonDocument command, FieldNameValidator commandFieldNameValidator, + @Nullable ReadPreference readPreference, Decoder commandResultDecoder, + OperationContext operationContext, boolean responseExpected, MessageSequences sequences, SingleResultCallback callback); + + void markAsPinned(Connection.PinningMode pinningMode); +} diff --git a/driver-core/src/main/com/mongodb/internal/connection/AsynchronousChannelStream.java b/driver-core/src/main/com/mongodb/internal/connection/AsynchronousChannelStream.java new file mode 100644 index 00000000000..89396dae5d3 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/connection/AsynchronousChannelStream.java @@ -0,0 +1,332 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection; + +import com.mongodb.MongoException; +import com.mongodb.MongoInternalException; +import com.mongodb.MongoSocketReadException; +import com.mongodb.MongoSocketReadTimeoutException; +import com.mongodb.MongoSocketWriteTimeoutException; +import com.mongodb.ServerAddress; +import com.mongodb.connection.AsyncCompletionHandler; +import com.mongodb.connection.SocketSettings; +import com.mongodb.lang.Nullable; +import org.bson.ByteBuf; + +import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.channels.CompletionHandler; +import java.nio.channels.InterruptedByTimeoutException; +import java.util.Iterator; +import java.util.List; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.atomic.AtomicReference; + +import static com.mongodb.assertions.Assertions.assertTrue; +import static com.mongodb.internal.async.AsyncRunnable.beginAsync; +import static com.mongodb.internal.thread.InterruptionUtil.interruptAndCreateMongoInterruptedException; +import static java.util.concurrent.TimeUnit.MILLISECONDS; + +/** + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public abstract class AsynchronousChannelStream implements Stream { + private final ServerAddress serverAddress; + private final SocketSettings settings; + private final PowerOfTwoBufferPool bufferProvider; + // we use `AtomicReference` to guarantee that we do not call `ExtendedAsynchronousByteChannel.close` concurrently with itself + private final AtomicReference channel; + private volatile boolean isClosed; + + public AsynchronousChannelStream(final ServerAddress serverAddress, final SocketSettings settings, + final PowerOfTwoBufferPool bufferProvider) { + this.serverAddress = serverAddress; + this.settings = settings; + this.bufferProvider = bufferProvider; + channel = new AtomicReference<>(); + } + + public ServerAddress getServerAddress() { + return serverAddress; + } + + public SocketSettings getSettings() { + return settings; + } + + public PowerOfTwoBufferPool getBufferProvider() { + return bufferProvider; + } + + public ExtendedAsynchronousByteChannel getChannel() { + return channel.get(); + } + + protected void setChannel(final ExtendedAsynchronousByteChannel channel) { + if (isClosed) { + closeChannel(channel); + } else { + assertTrue(this.channel.compareAndSet(null, channel)); + if (isClosed) { + closeChannel(this.channel.getAndSet(null)); + } + } + } + + @Override + public void writeAsync(final List buffers, final OperationContext operationContext, + final AsyncCompletionHandler handler) { + AsyncWritableByteChannelAdapter byteChannel = new AsyncWritableByteChannelAdapter(); + Iterator iter = buffers.iterator(); + pipeOneBuffer(byteChannel, iter.next(), operationContext, new AsyncCompletionHandler() { + @Override + public void completed(@Nullable final Void t) { + if (iter.hasNext()) { + pipeOneBuffer(byteChannel, iter.next(), operationContext, this); + } else { + handler.completed(null); + } + } + + @Override + public void failed(final Throwable t) { + handler.failed(t); + } + }); + } + + @Override + public void readAsync(final int numBytes, final OperationContext operationContext, final AsyncCompletionHandler handler) { + ByteBuf buffer = bufferProvider.getBuffer(numBytes); + + long timeout = operationContext.getTimeoutContext().getReadTimeoutMS(); + getChannel().read(buffer.asNIO(), timeout, MILLISECONDS, null, new BasicCompletionHandler(buffer, operationContext, handler)); + } + + @Override + public void open(final OperationContext operationContext) throws IOException { + FutureAsyncCompletionHandler handler = new FutureAsyncCompletionHandler<>(); + openAsync(operationContext, handler); + handler.getOpen(); + } + + @Override + public void write(final List buffers, final OperationContext operationContext) throws IOException { + FutureAsyncCompletionHandler handler = new FutureAsyncCompletionHandler<>(); + writeAsync(buffers, operationContext, handler); + handler.getWrite(); + } + + @Override + public ByteBuf read(final int numBytes, final OperationContext operationContext) throws IOException { + FutureAsyncCompletionHandler handler = new FutureAsyncCompletionHandler<>(); + readAsync(numBytes, operationContext, handler); + return handler.getRead(); + } + + @Override + public ServerAddress getAddress() { + return serverAddress; + } + + @Override + public void close() { + isClosed = true; + closeChannel(this.channel.getAndSet(null)); + } + + private void closeChannel(@Nullable final ExtendedAsynchronousByteChannel channel) { + try { + if (channel != null) { + channel.close(); + } + } catch (IOException e) { + // ignore + } + } + + @Override + public boolean isClosed() { + return isClosed; + } + + @Override + public ByteBuf getBuffer(final int size) { + return bufferProvider.getBuffer(size); + } + + private void pipeOneBuffer(final AsyncWritableByteChannelAdapter byteChannel, final ByteBuf byteBuffer, + final OperationContext operationContext, final AsyncCompletionHandler outerHandler) { + byteChannel.write(byteBuffer.asNIO(), operationContext, new AsyncCompletionHandler() { + @Override + public void completed(@Nullable final Void t) { + if (byteBuffer.hasRemaining()) { + byteChannel.write(byteBuffer.asNIO(), operationContext, this); + } else { + outerHandler.completed(null); + } + } + + @Override + public void failed(final Throwable t) { + outerHandler.failed(t); + } + }); + } + + private class AsyncWritableByteChannelAdapter { + void write(final ByteBuffer src, final OperationContext operationContext, final AsyncCompletionHandler handler) { + beginAsync().thenRun((c) -> { + long writeTimeoutMS = operationContext.getTimeoutContext().getWriteTimeoutMS(); + getChannel().write(src, writeTimeoutMS, MILLISECONDS, null, + new AsyncWritableByteChannelAdapter.WriteCompletionHandler(c.asHandler())); + }).finish(handler.asCallback()); + } + + private class WriteCompletionHandler extends BaseCompletionHandler { + + WriteCompletionHandler(final AsyncCompletionHandler handler) { + super(handler); + } + + @Override + public void completed(final Integer result, final Object attachment) { + AsyncCompletionHandler localHandler = getHandlerAndClear(); + localHandler.completed(null); + } + + @Override + public void failed(final Throwable t, final Object attachment) { + AsyncCompletionHandler localHandler = getHandlerAndClear(); + if (t instanceof InterruptedByTimeoutException) { + localHandler.failed(new MongoSocketWriteTimeoutException("Timeout while writing message", serverAddress, t)); + } else { + localHandler.failed(t); + } + } + } + } + + private final class BasicCompletionHandler extends BaseCompletionHandler { + private final AtomicReference byteBufReference; + private final OperationContext operationContext; + + private BasicCompletionHandler(final ByteBuf dst, final OperationContext operationContext, + final AsyncCompletionHandler handler) { + super(handler); + this.byteBufReference = new AtomicReference<>(dst); + this.operationContext = operationContext; + } + + @Override + public void completed(final Integer result, final Void attachment) { + AsyncCompletionHandler localHandler = getHandlerAndClear(); + beginAsync().thenSupply((c) -> { + ByteBuf localByteBuf = byteBufReference.getAndSet(null); + if (result == -1) { + localByteBuf.release(); + throw new MongoSocketReadException("Prematurely reached end of stream", serverAddress); + } else if (!localByteBuf.hasRemaining()) { + localByteBuf.flip(); + c.complete(localByteBuf); + } else { + long readTimeoutMS = operationContext.getTimeoutContext().getReadTimeoutMS(); + getChannel().read(localByteBuf.asNIO(), readTimeoutMS, MILLISECONDS, null, + new BasicCompletionHandler(localByteBuf, operationContext, c.asHandler())); + } + }).finish(localHandler.asCallback()); + } + + @Override + public void failed(final Throwable t, final Void attachment) { + AsyncCompletionHandler localHandler = getHandlerAndClear(); + ByteBuf localByteBuf = byteBufReference.getAndSet(null); + localByteBuf.release(); + if (t instanceof InterruptedByTimeoutException) { + localHandler.failed(new MongoSocketReadTimeoutException("Timeout while receiving message", serverAddress, t)); + } else { + localHandler.failed(t); + } + } + } + + // Private base class for all CompletionHandler implementors that ensures the upstream handler is + // set to null before it is used. This is to work around an observed issue with implementations of + // AsynchronousSocketChannel that fail to clear references to handlers stored in instance fields of + // the class. + private abstract static class BaseCompletionHandler implements CompletionHandler { + private final AtomicReference> handlerReference; + + BaseCompletionHandler(final AsyncCompletionHandler handler) { + this.handlerReference = new AtomicReference<>(handler); + } + + AsyncCompletionHandler getHandlerAndClear() { + return handlerReference.getAndSet(null); + } + } + + static class FutureAsyncCompletionHandler implements AsyncCompletionHandler { + private final CountDownLatch latch = new CountDownLatch(1); + private volatile T result; + private volatile Throwable error; + + @Override + public void completed(@Nullable final T result) { + this.result = result; + latch.countDown(); + } + + @Override + public void failed(final Throwable t) { + this.error = t; + latch.countDown(); + } + + void getOpen() throws IOException { + get("Opening"); + } + + void getWrite() throws IOException { + get("Writing to"); + } + + T getRead() throws IOException { + return get("Reading from"); + } + + private T get(final String prefix) throws IOException { + try { + latch.await(); + } catch (InterruptedException e) { + throw interruptAndCreateMongoInterruptedException(prefix + " the AsynchronousSocketChannelStream failed", e); + + } + if (error != null) { + if (error instanceof IOException) { + throw (IOException) error; + } else if (error instanceof MongoException) { + throw (MongoException) error; + } else { + throw new MongoInternalException(prefix + " the TlsChannelStream failed", error); + } + } + return result; + } + + } +} diff --git a/driver-core/src/main/com/mongodb/internal/connection/AsynchronousClusterEventListener.java b/driver-core/src/main/com/mongodb/internal/connection/AsynchronousClusterEventListener.java new file mode 100644 index 00000000000..77f827f01fa --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/connection/AsynchronousClusterEventListener.java @@ -0,0 +1,186 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection; + +import com.mongodb.annotations.ThreadSafe; +import com.mongodb.connection.ClusterId; +import com.mongodb.event.ClusterClosedEvent; +import com.mongodb.event.ClusterDescriptionChangedEvent; +import com.mongodb.event.ClusterListener; +import com.mongodb.event.ClusterOpeningEvent; +import com.mongodb.event.ServerClosedEvent; +import com.mongodb.event.ServerDescriptionChangedEvent; +import com.mongodb.event.ServerHeartbeatFailedEvent; +import com.mongodb.event.ServerHeartbeatStartedEvent; +import com.mongodb.event.ServerHeartbeatSucceededEvent; +import com.mongodb.event.ServerListener; +import com.mongodb.event.ServerMonitorListener; +import com.mongodb.event.ServerOpeningEvent; +import com.mongodb.internal.VisibleForTesting; +import com.mongodb.internal.diagnostics.logging.Logger; +import com.mongodb.internal.diagnostics.logging.Loggers; + +import java.util.concurrent.BlockingQueue; +import java.util.concurrent.LinkedBlockingQueue; +import java.util.function.Supplier; + +import static com.mongodb.assertions.Assertions.notNull; +import static com.mongodb.internal.VisibleForTesting.AccessModifier.PRIVATE; + +/** + * An implementation of a listener for all cluster-related events. Its purpose is the following: + *

+ * 1. To ensure that cluster-related events are delivered one at a time, with happens-before semantics + * 2. To ensure that application-provided event listener methods do not execute within critical sections of the driver + *

+ * This is done by adding all events to an unbounded blocking queue, and then publishing them from a dedicated thread by taking + * them off the queue one at a time. + *

+ * There is an assumption that the last event that should be published is the {@link ClusterClosedEvent}. Once that event is published, + * the publishing thread is allowed to die. + */ +@ThreadSafe +final class AsynchronousClusterEventListener implements ClusterListener, ServerListener, ServerMonitorListener { + private static final Logger LOGGER = Loggers.getLogger("cluster"); + + private final BlockingQueue> eventPublishers = new LinkedBlockingQueue<>(); + private final ClusterListener clusterListener; + private final ServerListener serverListener; + private final ServerMonitorListener serverMonitorListener; + + private final Thread publishingThread; + + @FunctionalInterface + private interface VoidFunction { + void apply(T t); + } + + static AsynchronousClusterEventListener startNew(final ClusterId clusterId, final ClusterListener clusterListener, + final ServerListener serverListener, final ServerMonitorListener serverMonitorListener) { + AsynchronousClusterEventListener result = new AsynchronousClusterEventListener(clusterId, clusterListener, serverListener, + serverMonitorListener); + result.publishingThread.start(); + return result; + } + + private AsynchronousClusterEventListener(final ClusterId clusterId, final ClusterListener clusterListener, + final ServerListener serverListener, final ServerMonitorListener serverMonitorListener) { + this.clusterListener = notNull("clusterListener", clusterListener); + this.serverListener = notNull("serverListener", serverListener); + this.serverMonitorListener = notNull("serverMonitorListener", serverMonitorListener); + publishingThread = new Thread(this::publishEvents, "cluster-event-publisher-" + clusterId.getValue()); + publishingThread.setDaemon(true); + } + + @VisibleForTesting(otherwise = PRIVATE) + Thread getPublishingThread() { + return publishingThread; + } + + @Override + public void clusterOpening(final ClusterOpeningEvent event) { + addClusterEventInvocation(clusterListener -> clusterListener.clusterOpening(event), false); + } + + @Override + public void clusterClosed(final ClusterClosedEvent event) { + addClusterEventInvocation(clusterListener -> clusterListener.clusterClosed(event), true); + } + + @Override + public void clusterDescriptionChanged(final ClusterDescriptionChangedEvent event) { + addClusterEventInvocation(clusterListener -> clusterListener.clusterDescriptionChanged(event), false); + } + + @Override + public void serverOpening(final ServerOpeningEvent event) { + addServerEventInvocation(serverListener -> serverListener.serverOpening(event)); + } + + @Override + public void serverClosed(final ServerClosedEvent event) { + addServerEventInvocation(serverListener -> serverListener.serverClosed(event)); + } + + @Override + public void serverDescriptionChanged(final ServerDescriptionChangedEvent event) { + addServerEventInvocation(serverListener -> serverListener.serverDescriptionChanged(event)); + } + + @Override + public void serverHearbeatStarted(final ServerHeartbeatStartedEvent event) { + addServerMonitorEventInvocation(serverMonitorListener -> serverMonitorListener.serverHearbeatStarted(event)); + } + + @Override + public void serverHeartbeatSucceeded(final ServerHeartbeatSucceededEvent event) { + addServerMonitorEventInvocation(serverMonitorListener -> serverMonitorListener.serverHeartbeatSucceeded(event)); + } + + @Override + public void serverHeartbeatFailed(final ServerHeartbeatFailedEvent event) { + addServerMonitorEventInvocation(serverMonitorListener -> serverMonitorListener.serverHeartbeatFailed(event)); + } + + private void addClusterEventInvocation(final VoidFunction eventPublisher, final boolean isLastEvent) { + addEvent(() -> { + eventPublisher.apply(clusterListener); + return isLastEvent; + }); + } + + private void addServerEventInvocation(final VoidFunction eventPublisher) { + addEvent(() -> { + eventPublisher.apply(serverListener); + return false; + }); + } + + private void addServerMonitorEventInvocation(final VoidFunction eventPublisher) { + addEvent(() -> { + eventPublisher.apply(serverMonitorListener); + return false; + }); + } + + private void addEvent(final Supplier supplier) { + // protect against rogue publishers + if (!publishingThread.isAlive()) { + return; + } + eventPublishers.add(supplier); + } + + private void publishEvents() { + try { + while (true) { + try { + Supplier eventPublisher = eventPublishers.take(); + boolean isLastEvent = eventPublisher.get(); + if (isLastEvent) { + break; + } + } catch (Exception e) { + // ignore exceptions thrown from listeners, also ignore interrupts that user code may cause + } + } + } catch (Throwable t) { + LOGGER.error(this + " stopped working. You may want to recreate the MongoClient", t); + throw t; + } + } +} diff --git a/driver-core/src/main/com/mongodb/internal/connection/AsynchronousSocketChannelStream.java b/driver-core/src/main/com/mongodb/internal/connection/AsynchronousSocketChannelStream.java new file mode 100644 index 00000000000..c60981c115e --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/connection/AsynchronousSocketChannelStream.java @@ -0,0 +1,210 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection; + +import com.mongodb.MongoSocketException; +import com.mongodb.MongoSocketOpenException; +import com.mongodb.ServerAddress; +import com.mongodb.connection.AsyncCompletionHandler; +import com.mongodb.connection.SocketSettings; +import com.mongodb.lang.Nullable; +import com.mongodb.spi.dns.InetAddressResolver; + +import java.io.IOException; +import java.net.SocketAddress; +import java.net.StandardSocketOptions; +import java.nio.ByteBuffer; +import java.nio.channels.AsynchronousChannelGroup; +import java.nio.channels.AsynchronousSocketChannel; +import java.nio.channels.CompletionHandler; +import java.util.LinkedList; +import java.util.Queue; +import java.util.concurrent.Future; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicReference; + +import static com.mongodb.assertions.Assertions.isTrue; +import static com.mongodb.internal.connection.ServerAddressHelper.getSocketAddresses; + +/** + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public final class AsynchronousSocketChannelStream extends AsynchronousChannelStream { + private final ServerAddress serverAddress; + private final InetAddressResolver inetAddressResolver; + private final SocketSettings settings; + @Nullable + private final AsynchronousChannelGroup group; + + AsynchronousSocketChannelStream( + final ServerAddress serverAddress, final InetAddressResolver inetAddressResolver, + final SocketSettings settings, final PowerOfTwoBufferPool bufferProvider) { + this(serverAddress, inetAddressResolver, settings, bufferProvider, null); + } + + public AsynchronousSocketChannelStream( + final ServerAddress serverAddress, final InetAddressResolver inetAddressResolver, + final SocketSettings settings, final PowerOfTwoBufferPool bufferProvider, + @Nullable final AsynchronousChannelGroup group) { + super(serverAddress, settings, bufferProvider); + this.serverAddress = serverAddress; + this.inetAddressResolver = inetAddressResolver; + this.settings = settings; + this.group = group; + } + + @Override + public void openAsync(final OperationContext operationContext, final AsyncCompletionHandler handler) { + isTrue("unopened", getChannel() == null); + Queue socketAddressQueue; + + try { + socketAddressQueue = new LinkedList<>(getSocketAddresses(serverAddress, inetAddressResolver)); + } catch (Throwable t) { + handler.failed(t); + return; + } + + initializeSocketChannel(handler, socketAddressQueue); + } + + private void initializeSocketChannel(final AsyncCompletionHandler handler, final Queue socketAddressQueue) { + if (socketAddressQueue.isEmpty()) { + handler.failed(new MongoSocketException("Exception opening socket", serverAddress)); + } else { + SocketAddress socketAddress = socketAddressQueue.poll(); + + try { + AsynchronousSocketChannel attemptConnectionChannel; + attemptConnectionChannel = group == null + ? AsynchronousSocketChannel.open() + : AsynchronousSocketChannel.open(group); + attemptConnectionChannel.setOption(StandardSocketOptions.TCP_NODELAY, true); + attemptConnectionChannel.setOption(StandardSocketOptions.SO_KEEPALIVE, true); + if (settings.getReceiveBufferSize() > 0) { + attemptConnectionChannel.setOption(StandardSocketOptions.SO_RCVBUF, settings.getReceiveBufferSize()); + } + if (settings.getSendBufferSize() > 0) { + attemptConnectionChannel.setOption(StandardSocketOptions.SO_SNDBUF, settings.getSendBufferSize()); + } + + attemptConnectionChannel.connect(socketAddress, null, + new OpenCompletionHandler(handler, socketAddressQueue, attemptConnectionChannel)); + } catch (IOException e) { + handler.failed(new MongoSocketOpenException("Exception opening socket", serverAddress, e)); + } catch (Throwable t) { + handler.failed(t); + } + } + } + + private class OpenCompletionHandler implements CompletionHandler { + private final AtomicReference> handlerReference; + private final Queue socketAddressQueue; + private final AsynchronousSocketChannel attemptConnectionChannel; + + OpenCompletionHandler(final AsyncCompletionHandler handler, final Queue socketAddressQueue, + final AsynchronousSocketChannel attemptConnectionChannel) { + this.handlerReference = new AtomicReference<>(handler); + this.socketAddressQueue = socketAddressQueue; + this.attemptConnectionChannel = attemptConnectionChannel; + } + + @Override + public void completed(final Void result, final Object attachment) { + setChannel(new AsynchronousSocketChannelAdapter(attemptConnectionChannel)); + handlerReference.getAndSet(null).completed(null); + } + + @Override + public void failed(final Throwable exc, final Object attachment) { + AsyncCompletionHandler localHandler = handlerReference.getAndSet(null); + + if (socketAddressQueue.isEmpty()) { + if (exc instanceof IOException) { + localHandler.failed(new MongoSocketOpenException("Exception opening socket", getAddress(), exc)); + } else { + localHandler.failed(exc); + } + } else { + initializeSocketChannel(localHandler, socketAddressQueue); + } + } + } + + private static final class AsynchronousSocketChannelAdapter implements ExtendedAsynchronousByteChannel { + private final AsynchronousSocketChannel channel; + + private AsynchronousSocketChannelAdapter(final AsynchronousSocketChannel channel) { + this.channel = channel; + } + + @Override + public void read(final ByteBuffer dst, final long timeout, final TimeUnit unit, @Nullable final A attach, + final CompletionHandler handler) { + channel.read(dst, timeout, unit, attach, handler); + } + + @Override + public void read(final ByteBuffer[] dsts, final int offset, final int length, final long timeout, final TimeUnit unit, + @Nullable final A attach, final CompletionHandler handler) { + channel.read(dsts, offset, length, timeout, unit, attach, handler); + } + + @Override + public void write(final ByteBuffer src, final long timeout, final TimeUnit unit, final A attach, + final CompletionHandler handler) { + channel.write(src, timeout, unit, attach, handler); + } + + @Override + public void write(final ByteBuffer[] srcs, final int offset, final int length, final long timeout, final TimeUnit unit, + final A attach, final CompletionHandler handler) { + channel.write(srcs, offset, length, timeout, unit, attach, handler); + } + + @Override + public void read(final ByteBuffer dst, final A attachment, final CompletionHandler handler) { + channel.read(dst, attachment, handler); + } + + @Override + public Future read(final ByteBuffer dst) { + return channel.read(dst); + } + + @Override + public void write(final ByteBuffer src, final A attachment, final CompletionHandler handler) { + channel.write(src, attachment, handler); + } + + @Override + public Future write(final ByteBuffer src) { + return channel.write(src); + } + + @Override + public boolean isOpen() { + return channel.isOpen(); + } + + @Override + public void close() throws IOException { + channel.close(); + } + } +} diff --git a/driver-core/src/main/com/mongodb/internal/connection/AsynchronousSocketChannelStreamFactory.java b/driver-core/src/main/com/mongodb/internal/connection/AsynchronousSocketChannelStreamFactory.java new file mode 100644 index 00000000000..1ea15abe59d --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/connection/AsynchronousSocketChannelStreamFactory.java @@ -0,0 +1,67 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection; + +import com.mongodb.ServerAddress; +import com.mongodb.connection.SocketSettings; +import com.mongodb.connection.SslSettings; +import com.mongodb.lang.Nullable; +import com.mongodb.spi.dns.InetAddressResolver; + +import java.nio.channels.AsynchronousChannelGroup; + +import static com.mongodb.assertions.Assertions.assertFalse; +import static com.mongodb.assertions.Assertions.notNull; + +/** + * Factory to create a Stream that's an AsynchronousSocketChannelStream. Throws an exception if SSL is enabled. + */ +public class AsynchronousSocketChannelStreamFactory implements StreamFactory { + private final PowerOfTwoBufferPool bufferProvider = PowerOfTwoBufferPool.DEFAULT; + private final SocketSettings settings; + private final InetAddressResolver inetAddressResolver; + @Nullable + private final AsynchronousChannelGroup group; + + /** + * Create a new factory with the default {@code BufferProvider} and {@code AsynchronousChannelGroup}. + * + * @param settings the settings for the connection to a MongoDB server + * @param sslSettings the settings for connecting via SSL + */ + public AsynchronousSocketChannelStreamFactory( + final InetAddressResolver inetAddressResolver, final SocketSettings settings, + final SslSettings sslSettings) { + this(inetAddressResolver, settings, sslSettings, null); + } + + AsynchronousSocketChannelStreamFactory( + final InetAddressResolver inetAddressResolver, final SocketSettings settings, + final SslSettings sslSettings, @Nullable final AsynchronousChannelGroup group) { + assertFalse(sslSettings.isEnabled()); + this.inetAddressResolver = inetAddressResolver; + this.settings = notNull("settings", settings); + this.group = group; + } + + @Override + public Stream create(final ServerAddress serverAddress) { + return new AsynchronousSocketChannelStream( + serverAddress, inetAddressResolver, settings, bufferProvider, group); + } + +} diff --git a/driver-core/src/main/com/mongodb/internal/connection/AsynchronousSocketChannelStreamFactoryFactory.java b/driver-core/src/main/com/mongodb/internal/connection/AsynchronousSocketChannelStreamFactoryFactory.java new file mode 100644 index 00000000000..8c5a8f654c5 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/connection/AsynchronousSocketChannelStreamFactoryFactory.java @@ -0,0 +1,59 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection; + +import com.mongodb.connection.SocketSettings; +import com.mongodb.connection.SslSettings; +import com.mongodb.lang.Nullable; +import com.mongodb.spi.dns.InetAddressResolver; + +import java.nio.channels.AsynchronousChannelGroup; + +/** + * A {@code StreamFactoryFactory} implementation for AsynchronousSocketChannel-based streams. + * + * @see java.nio.channels.AsynchronousSocketChannel + */ +public final class AsynchronousSocketChannelStreamFactoryFactory implements StreamFactoryFactory { + private final InetAddressResolver inetAddressResolver; + @Nullable + private final AsynchronousChannelGroup group; + + public AsynchronousSocketChannelStreamFactoryFactory(final InetAddressResolver inetAddressResolver) { + this(inetAddressResolver, null); + } + + AsynchronousSocketChannelStreamFactoryFactory( + final InetAddressResolver inetAddressResolver, + @Nullable final AsynchronousChannelGroup group) { + this.inetAddressResolver = inetAddressResolver; + this.group = group; + } + + @Override + public StreamFactory create(final SocketSettings socketSettings, final SslSettings sslSettings) { + return new AsynchronousSocketChannelStreamFactory( + inetAddressResolver, socketSettings, sslSettings, group); + } + + @Override + public void close() { + if (group != null) { + group.shutdown(); + } + } +} diff --git a/driver-core/src/main/com/mongodb/internal/connection/Authenticator.java b/driver-core/src/main/com/mongodb/internal/connection/Authenticator.java new file mode 100644 index 00000000000..2889a938709 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/connection/Authenticator.java @@ -0,0 +1,120 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection; + +import com.mongodb.MongoCredential; +import com.mongodb.MongoInternalException; +import com.mongodb.ServerApi; +import com.mongodb.connection.ClusterConnectionMode; +import com.mongodb.connection.ConnectionDescription; +import com.mongodb.connection.ServerType; +import com.mongodb.internal.async.SingleResultCallback; +import com.mongodb.lang.NonNull; +import com.mongodb.lang.Nullable; + +import static com.mongodb.assertions.Assertions.notNull; +import static com.mongodb.internal.async.AsyncRunnable.beginAsync; + +/** + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public abstract class Authenticator { + private final MongoCredentialWithCache credential; + private final ClusterConnectionMode clusterConnectionMode; + private final ServerApi serverApi; + + Authenticator(@NonNull final MongoCredentialWithCache credential, final ClusterConnectionMode clusterConnectionMode, + @Nullable final ServerApi serverApi) { + this.credential = credential; + this.clusterConnectionMode = notNull("clusterConnectionMode", clusterConnectionMode); + this.serverApi = serverApi; + } + + public static boolean shouldAuthenticate(@Nullable final Authenticator authenticator, + final ConnectionDescription connectionDescription) { + return authenticator != null && connectionDescription.getServerType() != ServerType.REPLICA_SET_ARBITER; + } + + @NonNull + MongoCredentialWithCache getMongoCredentialWithCache() { + return credential; + } + + @NonNull + MongoCredential getMongoCredential() { + return credential.getCredential(); + } + + ClusterConnectionMode getClusterConnectionMode() { + return clusterConnectionMode; + } + + @Nullable + ServerApi getServerApi() { + return serverApi; + } + + @NonNull + String getUserNameNonNull() { + String userName = credential.getCredential().getUserName(); + if (userName == null) { + throw new MongoInternalException("User name can not be null"); + } + return userName; + } + + @NonNull + char[] getPasswordNonNull() { + char[] password = credential.getCredential().getPassword(); + if (password == null) { + throw new MongoInternalException("Password can not be null"); + } + return password; + } + + @NonNull + T getNonNullMechanismProperty(final String key, @Nullable final T defaultValue) { + T mechanismProperty = credential.getCredential().getMechanismProperty(key, defaultValue); + if (mechanismProperty == null) { + throw new MongoInternalException("Mechanism property can not be null"); + } + return mechanismProperty; + + } + + abstract void authenticate(InternalConnection connection, ConnectionDescription connectionDescription, + OperationContext operationContext); + + abstract void authenticateAsync(InternalConnection connection, ConnectionDescription connectionDescription, + OperationContext operationContext, SingleResultCallback callback); + + public void reauthenticate(final InternalConnection connection, final OperationContext operationContext) { + authenticate(connection, connection.getDescription(), operationContextWithoutSession(operationContext)); + } + + public void reauthenticateAsync(final InternalConnection connection, final OperationContext operationContext, + final SingleResultCallback callback) { + beginAsync().thenRun((c) -> { + authenticateAsync(connection, connection.getDescription(), operationContextWithoutSession(operationContext), c); + }).finish(callback); + } + + static OperationContext operationContextWithoutSession(final OperationContext operationContext) { + return operationContext.withSessionContext( + new ReadConcernAwareNoOpSessionContext(operationContext.getSessionContext().getReadConcern())); + } +} diff --git a/driver-core/src/main/com/mongodb/internal/connection/AuthorizationHeader.java b/driver-core/src/main/com/mongodb/internal/connection/AuthorizationHeader.java new file mode 100644 index 00000000000..7718bc06760 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/connection/AuthorizationHeader.java @@ -0,0 +1,242 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection; + +import com.mongodb.lang.Nullable; + +import javax.crypto.Mac; +import javax.crypto.spec.SecretKeySpec; +import javax.security.sasl.SaslException; +import java.nio.charset.StandardCharsets; +import java.security.MessageDigest; +import java.security.NoSuchAlgorithmException; +import java.util.Arrays; +import java.util.Base64; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; + +final class AuthorizationHeader { + private static final String AWS4_HMAC_SHA256 = "AWS4-HMAC-SHA256"; + private static final String SERVICE = "sts"; + + private final String host; + private final String timestamp; + private final String signature; + private final String sessionToken; + private final String authorizationHeader; + private final byte[] nonce; + private final Map requestHeaders; + private final String body; + + private AuthorizationHeader(final Builder builder) throws SaslException { + this.sessionToken = builder.sessionToken; + this.host = builder.host; + this.timestamp = builder.timestamp; + this.nonce = builder.nonce; + this.body = "Action=GetCallerIdentity&Version=2011-06-15"; + this.requestHeaders = getRequestHeaders(); + + String canonicalRequest = createCanonicalRequest("POST", "", body, requestHeaders); + String toSign = createStringToSign(hash(canonicalRequest), getTimestamp(), getCredentialScope()); + this.signature = calculateSignature(toSign, builder.secretKey, getDate(), getRegion(host), SERVICE); + + this.authorizationHeader = String.format("%s Credential=%s/%s, SignedHeaders=%s, Signature=%s", AWS4_HMAC_SHA256, + builder.accessKeyID, getCredentialScope(), getSignedHeaders(this.requestHeaders), getSignature()); + } + + static String createCanonicalRequest(final String method, final String query, final String body, + final Map requestHeaders) throws SaslException { + String headers = getCanonicalHeaders(requestHeaders); + String signedHeaders = getSignedHeaders(requestHeaders); + + List request = Arrays.asList(method, "/", query, headers, signedHeaders, hash(body)); + return String.join("\n", request); + } + + static String createStringToSign(final String hash, final String timestamp, final String credentialScope) { + List toSign = Arrays.asList(AWS4_HMAC_SHA256, timestamp, credentialScope, hash); + return String.join("\n", toSign); + } + + static String calculateSignature(final String toSign, final String secret, final String date, final String region, + final String service) throws SaslException { + + byte[] kDate = hmac(decodeUTF8("AWS4" + secret), decodeUTF8(date)); + byte[] kRegion = hmac(kDate, decodeUTF8(region)); + byte[] kService = hmac(kRegion, decodeUTF8(service)); + byte[] kSigning = hmac(kService, decodeUTF8("aws4_request")); + + return hexEncode(hmac(kSigning, decodeUTF8(toSign))); + } + + private Map getRequestHeaders() { + if (this.requestHeaders != null) { + return this.requestHeaders; + } + + Map requestHeaders = new HashMap<>(); + requestHeaders.put("Content-Type", "application/x-www-form-urlencoded"); + requestHeaders.put("Content-Length", String.valueOf(this.body.length())); + requestHeaders.put("Host", this.host); + requestHeaders.put("X-Amz-Date", this.timestamp); + requestHeaders.put("X-MongoDB-Server-Nonce", Base64.getEncoder().encodeToString(this.nonce)); + requestHeaders.put("X-MongoDB-GS2-CB-Flag", "n"); + if (this.sessionToken != null) { + requestHeaders.put("X-Amz-Security-Token", this.sessionToken); + } + return requestHeaders; + } + + private String getCredentialScope() throws SaslException { + return String.format("%s/%s/%s/aws4_request", getDate(), getRegion(this.host), SERVICE); + } + + static String getSignedHeaders(final Map requestHeaders) { + return requestHeaders.keySet().stream() + .map(String::toLowerCase) + .sorted() + .collect(Collectors.joining(";")); + } + + static String getCanonicalHeaders(final Map requestHeaders) { + return requestHeaders.entrySet().stream() + .map(kvp -> String.format("%s:%s\n", kvp.getKey().toLowerCase(), kvp.getValue().trim().replaceAll(" +", " "))) + .sorted() + .collect(Collectors.joining("")); + } + + static String getRegion(final String host) throws SaslException { + String word = "(\\w)+(-\\w)*"; + if (host.equals("sts.amazonaws.com") || host.matches(String.format("%s", word))) { + return "us-east-1"; + } + + if (host.matches(String.format("%s(.%s)+", word, word))) { + return host.split("\\.")[1]; + } + + throw new SaslException("Invalid host"); + } + + String getSignature() { + return this.signature; + } + + String getTimestamp() { + return this.timestamp; + } + + private String getDate() { + return getTimestamp().substring(0, "YYYYMMDD".length()); + } + + static String hash(final String str) throws SaslException { + return hexEncode(sha256(str)).toLowerCase(); + } + + private static String hexEncode(final byte[] bytes) { + StringBuilder sb = new StringBuilder(); + for (byte b : bytes) { + sb.append(String.format("%02x", b)); + } + return sb.toString(); + } + + + private static byte[] decodeUTF8(final String str) { + return str.getBytes(StandardCharsets.UTF_8); + } + + private static byte[] hmac(final byte[] secret, final byte[] message) throws SaslException { + byte[] hmacSha256; + try { + Mac mac = Mac.getInstance("HmacSHA256"); + SecretKeySpec spec = new SecretKeySpec(secret, "HmacSHA256"); + mac.init(spec); + hmacSha256 = mac.doFinal(message); + } catch (Exception e) { + throw new SaslException(e.getMessage()); + } + return hmacSha256; + } + + private static byte[] sha256(final String payload) throws SaslException { + MessageDigest md; + try { + md = MessageDigest.getInstance("SHA-256"); + } catch (NoSuchAlgorithmException e) { + throw new SaslException(e.getMessage()); + } + return md.digest(payload.getBytes(StandardCharsets.UTF_8)); + } + + @Override + public String toString() { + return this.authorizationHeader; + } + + public static AuthorizationHeader.Builder builder() { + return new AuthorizationHeader.Builder(); + } + + static final class Builder { + private String accessKeyID; + private String secretKey; + private String sessionToken; + private String host; + private String timestamp; + private byte[] nonce; + + private Builder() {} + + Builder setAccessKeyID(final String accessKeyID) { + this.accessKeyID = accessKeyID; + return this; + } + + Builder setSecretKey(final String secretKey) { + this.secretKey = secretKey; + return this; + } + + Builder setSessionToken(@Nullable final String sessionToken) { + this.sessionToken = sessionToken; + return this; + } + + Builder setHost(final String host) { + this.host = host; + return this; + } + + Builder setTimestamp(final String timestamp) { + this.timestamp = timestamp; + return this; + } + + Builder setNonce(final byte[] nonce) { + this.nonce = nonce; + return this; + } + + AuthorizationHeader build() throws SaslException { + return new AuthorizationHeader(this); + } + } +} diff --git a/driver-core/src/main/com/mongodb/internal/connection/AwsAuthenticator.java b/driver-core/src/main/com/mongodb/internal/connection/AwsAuthenticator.java new file mode 100644 index 00000000000..294e88b81ea --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/connection/AwsAuthenticator.java @@ -0,0 +1,171 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection; + +import com.mongodb.AwsCredential; +import com.mongodb.MongoClientException; +import com.mongodb.MongoCredential; +import com.mongodb.MongoException; +import com.mongodb.ServerAddress; +import com.mongodb.ServerApi; +import com.mongodb.connection.ClusterConnectionMode; +import com.mongodb.internal.authentication.AwsCredentialHelper; +import com.mongodb.lang.Nullable; +import org.bson.BsonBinary; +import org.bson.BsonDocument; +import org.bson.BsonInt32; +import org.bson.BsonString; +import org.bson.RawBsonDocument; + +import javax.security.sasl.SaslClient; +import javax.security.sasl.SaslException; +import java.security.SecureRandom; +import java.time.Instant; +import java.time.ZoneId; +import java.time.format.DateTimeFormatter; +import java.util.Arrays; +import java.util.function.Supplier; + +import static com.mongodb.AuthenticationMechanism.MONGODB_AWS; +import static com.mongodb.MongoCredential.AWS_CREDENTIAL_PROVIDER_KEY; +import static com.mongodb.MongoCredential.AWS_SESSION_TOKEN_KEY; +import static com.mongodb.assertions.Assertions.assertNotNull; +import static java.lang.String.format; + +/** + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public class AwsAuthenticator extends SaslAuthenticator { + private static final String MONGODB_AWS_MECHANISM_NAME = "MONGODB-AWS"; + private static final int RANDOM_LENGTH = 32; + + public AwsAuthenticator(final MongoCredentialWithCache credential, final ClusterConnectionMode clusterConnectionMode, + @Nullable final ServerApi serverApi) { + super(credential, clusterConnectionMode, serverApi); + + if (getMongoCredential().getAuthenticationMechanism() != MONGODB_AWS) { + throw new MongoException("Incorrect mechanism: " + getMongoCredential().getMechanism()); + } + } + + @Override + public String getMechanismName() { + return MONGODB_AWS_MECHANISM_NAME; + } + + @Override + protected SaslClient createSaslClient(final ServerAddress serverAddress, final OperationContext operationContext) { + return new AwsSaslClient(getMongoCredential()); + } + + private static class AwsSaslClient extends SaslClientImpl { + private final byte[] clientNonce = new byte[RANDOM_LENGTH]; + private int step = -1; + + AwsSaslClient(final MongoCredential credential) { + super(credential); + } + + @Override + public byte[] evaluateChallenge(final byte[] challenge) throws SaslException { + step++; + if (step == 0) { + return computeClientFirstMessage(); + } else if (step == 1) { + return computeClientFinalMessage(challenge); + } else { + throw new SaslException(format("Too many steps involved in the %s negotiation.", getMechanismName())); + } + } + + @Override + public boolean isComplete() { + return step == 1; + } + + private byte[] computeClientFirstMessage() { + new SecureRandom().nextBytes(this.clientNonce); + + BsonDocument document = new BsonDocument() + .append("r", new BsonBinary(this.clientNonce)) + .append("p", new BsonInt32('n')); + return toBson(document); + } + + private byte[] computeClientFinalMessage(final byte[] serverFirst) throws SaslException { + BsonDocument document = new RawBsonDocument(serverFirst); + String host = document.getString("h").getValue(); + + byte[] serverNonce = document.getBinary("s").getData(); + if (serverNonce.length != (2 * RANDOM_LENGTH)) { + throw new SaslException(format("Server nonce must be %d bytes", 2 * RANDOM_LENGTH)); + } else if (!Arrays.equals(Arrays.copyOf(serverNonce, RANDOM_LENGTH), this.clientNonce)) { + throw new SaslException(format("The first %d bytes of the server nonce must be the client nonce", RANDOM_LENGTH)); + } + + String timestamp = DateTimeFormatter.ofPattern("yyyyMMdd'T'HHmmss'Z'") + .withZone(ZoneId.of("UTC")) + .format(Instant.now()); + + AwsCredential awsCredential = createAwsCredential(); + String sessionToken = awsCredential.getSessionToken(); + AuthorizationHeader authorizationHeader = AuthorizationHeader.builder() + .setAccessKeyID(awsCredential.getAccessKeyId()) + .setSecretKey(awsCredential.getSecretAccessKey()) + .setSessionToken(sessionToken) + .setHost(host) + .setNonce(serverNonce) + .setTimestamp(timestamp) + .build(); + + BsonDocument ret = new BsonDocument() + .append("a", new BsonString(authorizationHeader.toString())) + .append("d", new BsonString(authorizationHeader.getTimestamp())); + if (sessionToken != null) { + ret.append("t", new BsonString(sessionToken)); + } + + return toBson(ret); + } + + private AwsCredential createAwsCredential() { + AwsCredential awsCredential; + MongoCredential credential = getCredential(); + if (credential.getUserName() != null) { + if (credential.getPassword() == null) { + throw new MongoClientException("secretAccessKey is required for AWS credential"); + } + awsCredential = new AwsCredential(assertNotNull(credential.getUserName()), + new String(assertNotNull(credential.getPassword())), + credential.getMechanismProperty(AWS_SESSION_TOKEN_KEY, null)); + } else if (credential.getMechanismProperty(AWS_CREDENTIAL_PROVIDER_KEY, null) != null) { + Supplier awsCredentialSupplier = assertNotNull( + credential.getMechanismProperty(AWS_CREDENTIAL_PROVIDER_KEY, null)); + awsCredential = awsCredentialSupplier.get(); + if (awsCredential == null) { + throw new MongoClientException("AWS_CREDENTIAL_PROVIDER_KEY must return an AwsCredential instance"); + } + } else { + awsCredential = AwsCredentialHelper.obtainFromEnvironment(); + if (awsCredential == null) { + throw new MongoClientException("Unable to obtain AWS credential from the environment"); + } + } + return awsCredential; + } + } +} diff --git a/driver-core/src/main/com/mongodb/internal/connection/BaseCluster.java b/driver-core/src/main/com/mongodb/internal/connection/BaseCluster.java new file mode 100644 index 00000000000..4146d06c22e --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/connection/BaseCluster.java @@ -0,0 +1,666 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection; + +import com.mongodb.MongoClientException; +import com.mongodb.MongoException; +import com.mongodb.MongoIncompatibleDriverException; +import com.mongodb.MongoInterruptedException; +import com.mongodb.MongoOperationTimeoutException; +import com.mongodb.MongoTimeoutException; +import com.mongodb.ServerAddress; +import com.mongodb.UnixServerAddress; +import com.mongodb.connection.ClusterDescription; +import com.mongodb.connection.ClusterId; +import com.mongodb.connection.ClusterSettings; +import com.mongodb.connection.ServerDescription; +import com.mongodb.event.ClusterClosedEvent; +import com.mongodb.event.ClusterDescriptionChangedEvent; +import com.mongodb.event.ClusterListener; +import com.mongodb.event.ClusterOpeningEvent; +import com.mongodb.internal.TimeoutContext; +import com.mongodb.internal.VisibleForTesting; +import com.mongodb.internal.async.SingleResultCallback; +import com.mongodb.internal.connection.OperationContext.ServerDeprioritization; +import com.mongodb.internal.diagnostics.logging.Logger; +import com.mongodb.internal.diagnostics.logging.Loggers; +import com.mongodb.internal.logging.LogMessage; +import com.mongodb.internal.logging.LogMessage.Entry; +import com.mongodb.internal.logging.StructuredLogger; +import com.mongodb.internal.selector.AtMostTwoRandomServerSelector; +import com.mongodb.internal.selector.LatencyMinimizingServerSelector; +import com.mongodb.internal.selector.MinimumOperationCountServerSelector; +import com.mongodb.internal.time.Timeout; +import com.mongodb.lang.Nullable; +import com.mongodb.selector.CompositeServerSelector; +import com.mongodb.selector.ServerSelector; + +import java.util.Deque; +import java.util.Iterator; +import java.util.List; +import java.util.Objects; +import java.util.concurrent.ConcurrentLinkedDeque; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.atomic.AtomicReference; +import java.util.concurrent.locks.ReentrantLock; +import java.util.stream.Stream; + +import static com.mongodb.assertions.Assertions.assertNotNull; +import static com.mongodb.assertions.Assertions.isTrue; +import static com.mongodb.assertions.Assertions.notNull; +import static com.mongodb.connection.ClusterType.UNKNOWN; +import static com.mongodb.connection.ServerDescription.MAX_DRIVER_WIRE_VERSION; +import static com.mongodb.connection.ServerDescription.MIN_DRIVER_SERVER_VERSION; +import static com.mongodb.connection.ServerDescription.MIN_DRIVER_WIRE_VERSION; +import static com.mongodb.internal.Locks.withInterruptibleLock; +import static com.mongodb.internal.VisibleForTesting.AccessModifier.PRIVATE; +import static com.mongodb.internal.connection.EventHelper.wouldDescriptionsGenerateEquivalentEvents; +import static com.mongodb.internal.event.EventListenerHelper.singleClusterListener; +import static com.mongodb.internal.logging.LogMessage.Component.SERVER_SELECTION; +import static com.mongodb.internal.logging.LogMessage.Component.TOPOLOGY; +import static com.mongodb.internal.logging.LogMessage.Entry.Name.FAILURE; +import static com.mongodb.internal.logging.LogMessage.Entry.Name.OPERATION; +import static com.mongodb.internal.logging.LogMessage.Entry.Name.OPERATION_ID; +import static com.mongodb.internal.logging.LogMessage.Entry.Name.REMAINING_TIME_MS; +import static com.mongodb.internal.logging.LogMessage.Entry.Name.SELECTOR; +import static com.mongodb.internal.logging.LogMessage.Entry.Name.SERVER_HOST; +import static com.mongodb.internal.logging.LogMessage.Entry.Name.SERVER_PORT; +import static com.mongodb.internal.logging.LogMessage.Entry.Name.TOPOLOGY_DESCRIPTION; +import static com.mongodb.internal.logging.LogMessage.Entry.Name.TOPOLOGY_ID; +import static com.mongodb.internal.logging.LogMessage.Entry.Name.TOPOLOGY_NEW_DESCRIPTION; +import static com.mongodb.internal.logging.LogMessage.Entry.Name.TOPOLOGY_PREVIOUS_DESCRIPTION; +import static com.mongodb.internal.logging.LogMessage.Level.DEBUG; +import static com.mongodb.internal.logging.LogMessage.Level.INFO; +import static com.mongodb.internal.time.Timeout.ZeroSemantics.ZERO_DURATION_MEANS_EXPIRED; +import static java.lang.String.format; +import static java.util.Arrays.asList; +import static java.util.Collections.emptyList; +import static java.util.Collections.singletonList; +import static java.util.concurrent.TimeUnit.MILLISECONDS; +import static java.util.concurrent.TimeUnit.NANOSECONDS; +import static java.util.stream.Collectors.toList; + +abstract class BaseCluster implements Cluster { + private static final Logger LOGGER = Loggers.getLogger("cluster"); + private static final StructuredLogger STRUCTURED_LOGGER = new StructuredLogger("cluster"); + + private final ReentrantLock lock = new ReentrantLock(); + private final AtomicReference phase = new AtomicReference<>(new CountDownLatch(1)); + private final ClusterableServerFactory serverFactory; + private final ClusterId clusterId; + private final ClusterSettings settings; + private final ClusterListener clusterListener; + private final Deque waitQueue = new ConcurrentLinkedDeque<>(); + private final ClusterClock clusterClock = new ClusterClock(); + private final ClientMetadata clientMetadata; + private Thread waitQueueHandler; + + private volatile boolean isClosed; + private volatile ClusterDescription description; + + BaseCluster(final ClusterId clusterId, + final ClusterSettings settings, + final ClusterableServerFactory serverFactory, + final ClientMetadata clientMetadata) { + this.clusterId = notNull("clusterId", clusterId); + this.settings = notNull("settings", settings); + this.serverFactory = notNull("serverFactory", serverFactory); + this.clusterListener = singleClusterListener(settings); + this.description = new ClusterDescription(settings.getMode(), UNKNOWN, emptyList(), + settings, serverFactory.getSettings()); + this.clientMetadata = clientMetadata; + logTopologyMonitoringStarting(clusterId); + ClusterOpeningEvent clusterOpeningEvent = new ClusterOpeningEvent(clusterId); + clusterListener.clusterOpening(clusterOpeningEvent); + } + + @Override + public ClusterClock getClock() { + return clusterClock; + } + + @Override + public ClientMetadata getClientMetadata() { + return clientMetadata; + } + + @Override + public ServerTuple selectServer(final ServerSelector serverSelector, final OperationContext operationContext) { + isTrue("open", !isClosed()); + + ServerDeprioritization serverDeprioritization = operationContext.getServerDeprioritization(); + boolean selectionWaitingLogged = false; + Timeout computedServerSelectionTimeout = operationContext.getTimeoutContext().computeServerSelectionTimeout(); + logServerSelectionStarted(operationContext, clusterId, serverSelector, description); + while (true) { + CountDownLatch currentPhaseLatch = phase.get(); + ClusterDescription currentDescription = description; + ServerTuple serverTuple = createCompleteSelectorAndSelectServer( + serverSelector, currentDescription, serverDeprioritization, + computedServerSelectionTimeout, operationContext.getTimeoutContext()); + + if (!currentDescription.isCompatibleWithDriver()) { + logAndThrowIncompatibleException(operationContext, serverSelector, currentDescription); + } + if (serverTuple != null) { + ServerAddress serverAddress = serverTuple.getServerDescription().getAddress(); + logServerSelectionSucceeded(operationContext, clusterId, serverAddress, serverSelector, currentDescription); + serverDeprioritization.updateCandidate(serverAddress); + return serverTuple; + } + computedServerSelectionTimeout.onExpired(() -> + logAndThrowTimeoutException(operationContext, serverSelector, currentDescription)); + + if (!selectionWaitingLogged) { + logServerSelectionWaiting(operationContext, clusterId, computedServerSelectionTimeout, serverSelector, currentDescription); + selectionWaitingLogged = true; + } + connect(); + + Timeout heartbeatLimitedTimeout = Timeout.earliest( + computedServerSelectionTimeout, + startMinWaitHeartbeatTimeout()); + + heartbeatLimitedTimeout.awaitOn(currentPhaseLatch, + () -> format("waiting for a server that matches %s", serverSelector)); + } + } + + @Override + public void selectServerAsync(final ServerSelector serverSelector, final OperationContext operationContext, + final SingleResultCallback callback) { + if (isClosed()) { + callback.onResult(null, new MongoClientException("Cluster was closed during server selection.")); + } + + Timeout computedServerSelectionTimeout = operationContext.getTimeoutContext().computeServerSelectionTimeout(); + ServerSelectionRequest request = new ServerSelectionRequest( + serverSelector, operationContext, computedServerSelectionTimeout, callback); + + CountDownLatch currentPhase = phase.get(); + ClusterDescription currentDescription = description; + + logServerSelectionStarted(operationContext, clusterId, serverSelector, currentDescription); + + if (!handleServerSelectionRequest(request, currentPhase, currentDescription)) { + notifyWaitQueueHandler(request); + } + } + + public ClusterId getClusterId() { + return clusterId; + } + + public ClusterSettings getSettings() { + return settings; + } + + public ClusterableServerFactory getServerFactory() { + return serverFactory; + } + + protected abstract void connect(); + + @Override + public void close() { + if (!isClosed()) { + isClosed = true; + phase.get().countDown(); + fireChangeEvent(new ClusterDescription(settings.getMode(), UNKNOWN, emptyList(), settings, serverFactory.getSettings()), + description); + logTopologyMonitoringStopping(clusterId); + ClusterClosedEvent clusterClosedEvent = new ClusterClosedEvent(clusterId); + clusterListener.clusterClosed(clusterClosedEvent); + stopWaitQueueHandler(); + } + } + + @Override + public boolean isClosed() { + return isClosed; + } + + protected void updateDescription(final ClusterDescription newDescription) { + withLock(() -> { + if (LOGGER.isDebugEnabled()) { + LOGGER.debug(format("Updating cluster description to %s", newDescription.getShortDescription())); + } + + description = newDescription; + updatePhase(); + }); + } + + /** + * Subclasses must ensure that this method is called in a way that events are delivered in a predictable order. + * Typically, this means calling it while holding a lock that includes both updates to the cluster state and firing the event. + */ + protected void fireChangeEvent(final ClusterDescription newDescription, final ClusterDescription previousDescription) { + if (!wouldDescriptionsGenerateEquivalentEvents(newDescription, previousDescription)) { + ClusterDescriptionChangedEvent changedEvent = new ClusterDescriptionChangedEvent(getClusterId(), newDescription, previousDescription); + logTopologyDescriptionChanged(getClusterId(), changedEvent); + clusterListener.clusterDescriptionChanged(changedEvent); + } + } + + @Override + public ClusterDescription getCurrentDescription() { + return description; + } + + @Override + public void withLock(final Runnable action) { + withInterruptibleLock(lock, action); + } + + private void updatePhase() { + withLock(() -> phase.getAndSet(new CountDownLatch(1)).countDown()); + } + + private Timeout startMinWaitHeartbeatTimeout() { + long minHeartbeatFrequency = serverFactory.getSettings().getMinHeartbeatFrequency(NANOSECONDS); + minHeartbeatFrequency = Math.max(0, minHeartbeatFrequency); + return Timeout.expiresIn(minHeartbeatFrequency, NANOSECONDS, ZERO_DURATION_MEANS_EXPIRED); + } + + private boolean handleServerSelectionRequest( + final ServerSelectionRequest request, final CountDownLatch currentPhase, + final ClusterDescription description) { + + try { + OperationContext operationContext = request.getOperationContext(); + if (currentPhase != request.phase) { + CountDownLatch prevPhase = request.phase; + request.phase = currentPhase; + if (!description.isCompatibleWithDriver()) { + logAndThrowIncompatibleException(operationContext, request.originalSelector, description); + } + + + ServerDeprioritization serverDeprioritization = request.operationContext.getServerDeprioritization(); + ServerTuple serverTuple = createCompleteSelectorAndSelectServer( + request.originalSelector, + description, + serverDeprioritization, + request.getTimeout(), + operationContext.getTimeoutContext()); + + if (serverTuple != null) { + ServerAddress serverAddress = serverTuple.getServerDescription().getAddress(); + logServerSelectionSucceeded(operationContext, clusterId, serverAddress, request.originalSelector, description); + serverDeprioritization.updateCandidate(serverAddress); + request.onResult(serverTuple, null); + return true; + } + if (prevPhase == null) { + logServerSelectionWaiting(operationContext, clusterId, request.getTimeout(), request.originalSelector, description); + } + } + + Timeout.onExistsAndExpired(request.getTimeout(), () -> { + logAndThrowTimeoutException(operationContext, request.originalSelector, description); + }); + return false; + } catch (Exception e) { + request.onResult(null, e); + return true; + } + } + + @Nullable + private ServerTuple createCompleteSelectorAndSelectServer( + final ServerSelector serverSelector, + final ClusterDescription clusterDescription, + final ServerDeprioritization serverDeprioritization, + final Timeout serverSelectionTimeout, + final TimeoutContext timeoutContext) { + return createCompleteSelectorAndSelectServer( + serverSelector, + clusterDescription, + getServersSnapshot(serverSelectionTimeout, timeoutContext), + serverDeprioritization, + settings); + } + + @Nullable + @VisibleForTesting(otherwise = PRIVATE) + static ServerTuple createCompleteSelectorAndSelectServer( + final ServerSelector serverSelector, + final ClusterDescription clusterDescription, + final ServersSnapshot serversSnapshot, + final ServerDeprioritization serverDeprioritization, + final ClusterSettings settings) { + ServerSelector completeServerSelector = getCompleteServerSelector(serverSelector, serverDeprioritization, serversSnapshot, settings); + return completeServerSelector.select(clusterDescription) + .stream() + .map(serverDescription -> new ServerTuple( + assertNotNull(serversSnapshot.getServer(serverDescription.getAddress())), + serverDescription)) + .findAny() + .orElse(null); + } + + private static ServerSelector getCompleteServerSelector( + final ServerSelector serverSelector, + final ServerDeprioritization serverDeprioritization, + final ServersSnapshot serversSnapshot, + final ClusterSettings settings) { + List selectors = Stream.of( + getRaceConditionPreFilteringSelector(serversSnapshot), + serverSelector, + serverDeprioritization.getServerSelector(), + settings.getServerSelector(), // may be null + new LatencyMinimizingServerSelector(settings.getLocalThreshold(MILLISECONDS), MILLISECONDS), + AtMostTwoRandomServerSelector.instance(), + new MinimumOperationCountServerSelector(serversSnapshot) + ).filter(Objects::nonNull).collect(toList()); + return new CompositeServerSelector(selectors); + } + + private static ServerSelector getRaceConditionPreFilteringSelector(final ServersSnapshot serversSnapshot) { + // The set of `Server`s maintained by the `Cluster` is updated concurrently with `clusterDescription` being read. + // Additionally, that set of servers continues to be concurrently updated while `serverSelector` selects. + // This race condition means that we are not guaranteed to observe all the servers from `clusterDescription` + // among the `Server`s maintained by the `Cluster`. + // To deal with this race condition, we take `serversSnapshot` of that set of `Server`s + // (the snapshot itself does not have to be atomic) non-atomically with reading `clusterDescription` + // (this means, `serversSnapshot` and `clusterDescription` are not guaranteed to be consistent with each other), + // and do pre-filtering to make sure that the only `ServerDescription`s we may select, + // are of those `Server`s that are known to both `clusterDescription` and `serversSnapshot`. + return clusterDescription -> clusterDescription.getServerDescriptions() + .stream() + .filter(serverDescription -> serversSnapshot.containsServer(serverDescription.getAddress())) + .collect(toList()); + } + + protected ClusterableServer createServer(final ServerAddress serverAddress) { + return serverFactory.create(this, serverAddress); + } + + private void logAndThrowIncompatibleException( + final OperationContext operationContext, + final ServerSelector serverSelector, + final ClusterDescription clusterDescription) { + MongoIncompatibleDriverException exception = createIncompatibleException(clusterDescription); + logServerSelectionFailed(operationContext, clusterId, exception, serverSelector, clusterDescription); + throw exception; + } + + private MongoIncompatibleDriverException createIncompatibleException(final ClusterDescription curDescription) { + String message; + ServerDescription incompatibleServer = curDescription.findServerIncompatiblyOlderThanDriver(); + if (incompatibleServer != null) { + message = format("Server at %s reports wire version %d, but this version of the driver requires at least %d (MongoDB %s).", + incompatibleServer.getAddress(), incompatibleServer.getMaxWireVersion(), + MIN_DRIVER_WIRE_VERSION, MIN_DRIVER_SERVER_VERSION); + } else { + incompatibleServer = curDescription.findServerIncompatiblyNewerThanDriver(); + if (incompatibleServer != null) { + message = format("Server at %s requires wire version %d, but this version of the driver only supports up to %d.", + incompatibleServer.getAddress(), incompatibleServer.getMinWireVersion(), MAX_DRIVER_WIRE_VERSION); + } else { + throw new IllegalStateException("Server can't be both older than the driver and newer."); + } + } + return new MongoIncompatibleDriverException(message, curDescription); + } + + private void logAndThrowTimeoutException( + final OperationContext operationContext, + final ServerSelector serverSelector, + final ClusterDescription clusterDescription) { + String message = format( + "Timed out while waiting for a server that matches %s. Client view of cluster state is %s", + serverSelector, clusterDescription.getShortDescription()); + + MongoTimeoutException exception = operationContext.getTimeoutContext().hasTimeoutMS() + ? new MongoOperationTimeoutException(message) : new MongoTimeoutException(message); + + logServerSelectionFailed(operationContext, clusterId, exception, serverSelector, clusterDescription); + throw exception; + } + + private static final class ServerSelectionRequest { + private final ServerSelector originalSelector; + private final SingleResultCallback callback; + private final OperationContext operationContext; + private final Timeout timeout; + private CountDownLatch phase; + + ServerSelectionRequest( + final ServerSelector serverSelector, + final OperationContext operationContext, + final Timeout timeout, + final SingleResultCallback callback) { + this.originalSelector = serverSelector; + this.operationContext = operationContext; + this.timeout = timeout; + this.callback = callback; + } + + void onResult(@Nullable final ServerTuple serverTuple, @Nullable final Throwable t) { + try { + callback.onResult(serverTuple, t); + } catch (Throwable tr) { + // ignore + } + } + + Timeout getTimeout() { + return timeout; + } + + public OperationContext getOperationContext() { + return operationContext; + } + } + + private void notifyWaitQueueHandler(final ServerSelectionRequest request) { + withLock(() -> { + if (isClosed) { + return; + } + + waitQueue.add(request); + + if (waitQueueHandler == null) { + waitQueueHandler = new Thread(new WaitQueueHandler(), "cluster-" + clusterId.getValue()); + waitQueueHandler.setDaemon(true); + waitQueueHandler.start(); + } else { + updatePhase(); + } + }); + } + + private void stopWaitQueueHandler() { + withLock(() -> { + if (waitQueueHandler != null) { + waitQueueHandler.interrupt(); + } + }); + } + + private final class WaitQueueHandler implements Runnable { + + WaitQueueHandler() { + } + + public void run() { + try { + while (!isClosed) { + CountDownLatch currentPhase = phase.get(); + ClusterDescription curDescription = description; + + Timeout timeout = Timeout.infinite(); + boolean someWaitersNotSatisfied = false; + for (Iterator iter = waitQueue.iterator(); iter.hasNext();) { + ServerSelectionRequest currentRequest = iter.next(); + if (handleServerSelectionRequest(currentRequest, currentPhase, curDescription)) { + iter.remove(); + } else { + someWaitersNotSatisfied = true; + timeout = Timeout.earliest( + timeout, + currentRequest.getTimeout(), + startMinWaitHeartbeatTimeout()); + } + } + + if (someWaitersNotSatisfied) { + connect(); + } + + try { + timeout.awaitOn(currentPhase, () -> "ignored"); + } catch (MongoInterruptedException closed) { + // The cluster has been closed and the while loop will exit. + } + } + // Notify all remaining waiters that a shutdown is in progress + for (Iterator iter = waitQueue.iterator(); iter.hasNext();) { + iter.next().onResult(null, new MongoClientException("Shutdown in progress")); + iter.remove(); + } + } catch (Throwable t) { + LOGGER.error(this + " stopped working. You may want to recreate the MongoClient", t); + throw t; + } + } + } + + static void logServerSelectionStarted( + final OperationContext operationContext, + final ClusterId clusterId, + final ServerSelector serverSelector, + final ClusterDescription clusterDescription) { + if (STRUCTURED_LOGGER.isRequired(DEBUG, clusterId)) { + STRUCTURED_LOGGER.log(new LogMessage( + SERVER_SELECTION, DEBUG, "Server selection started", clusterId, + asList( + new Entry(OPERATION, operationContext.getOperationName()), + new Entry(OPERATION_ID, operationContext.getId()), + new Entry(SELECTOR, serverSelector.toString()), + new Entry(TOPOLOGY_DESCRIPTION, clusterDescription.getShortDescription())), + "Server selection started for operation[ {}] with ID {}. Selector: {}, topology description: {}")); + } + } + + private static void logServerSelectionWaiting( + final OperationContext operationContext, + final ClusterId clusterId, + final Timeout timeout, + final ServerSelector serverSelector, + final ClusterDescription clusterDescription) { + if (STRUCTURED_LOGGER.isRequired(INFO, clusterId)) { + STRUCTURED_LOGGER.log(new LogMessage( + SERVER_SELECTION, INFO, "Waiting for suitable server to become available", clusterId, + asList( + new Entry(OPERATION, operationContext.getOperationName()), + new Entry(OPERATION_ID, operationContext.getId()), + timeout.call(MILLISECONDS, + () -> new Entry(REMAINING_TIME_MS, "infinite"), + (ms) -> new Entry(REMAINING_TIME_MS, ms), + () -> new Entry(REMAINING_TIME_MS, 0L)), + new Entry(SELECTOR, serverSelector.toString()), + new Entry(TOPOLOGY_DESCRIPTION, clusterDescription.getShortDescription())), + "Waiting for server to become available for operation[ {}] with ID {}.[ Remaining time: {} ms.]" + + " Selector: {}, topology description: {}.")); + } + } + + private static void logServerSelectionFailed( + final OperationContext operationContext, + final ClusterId clusterId, + final MongoException failure, + final ServerSelector serverSelector, + final ClusterDescription clusterDescription) { + if (STRUCTURED_LOGGER.isRequired(DEBUG, clusterId)) { + String failureDescription = failure instanceof MongoTimeoutException + // This hardcoded message guarantees that the `FAILURE` entry for `MongoTimeoutException` does not include + // any information that is specified via other entries, e.g., `SELECTOR` and `TOPOLOGY_DESCRIPTION`. + // The logging spec requires us to avoid such duplication of information. + ? MongoTimeoutException.class.getName() + ": Timed out while waiting for a suitable server" + : failure.toString(); + STRUCTURED_LOGGER.log(new LogMessage( + SERVER_SELECTION, DEBUG, "Server selection failed", clusterId, + asList( + new Entry(OPERATION, operationContext.getOperationName()), + new Entry(OPERATION_ID, operationContext.getId()), + new Entry(FAILURE, failureDescription), + new Entry(SELECTOR, serverSelector.toString()), + new Entry(TOPOLOGY_DESCRIPTION, clusterDescription.getShortDescription())), + "Server selection failed for operation[ {}] with ID {}. Failure: {}. Selector: {}, topology description: {}")); + } + } + + static void logServerSelectionSucceeded( + final OperationContext operationContext, + final ClusterId clusterId, + final ServerAddress serverAddress, + final ServerSelector serverSelector, + final ClusterDescription clusterDescription) { + if (STRUCTURED_LOGGER.isRequired(DEBUG, clusterId)) { + STRUCTURED_LOGGER.log(new LogMessage( + SERVER_SELECTION, DEBUG, "Server selection succeeded", clusterId, + asList( + new Entry(OPERATION, operationContext.getOperationName()), + new Entry(OPERATION_ID, operationContext.getId()), + new Entry(SERVER_HOST, serverAddress.getHost()), + new Entry(SERVER_PORT, serverAddress instanceof UnixServerAddress ? null : serverAddress.getPort()), + new Entry(SELECTOR, serverSelector.toString()), + new Entry(TOPOLOGY_DESCRIPTION, clusterDescription.getShortDescription())), + "Server selection succeeded for operation[ {}] with ID {}. Selected server: {}[:{}]." + + " Selector: {}, topology description: {}")); + } + } + + static void logTopologyMonitoringStarting(final ClusterId clusterId) { + if (STRUCTURED_LOGGER.isRequired(DEBUG, clusterId)) { + STRUCTURED_LOGGER.log(new LogMessage( + TOPOLOGY, DEBUG, "Starting topology monitoring", clusterId, + singletonList(new Entry(TOPOLOGY_ID, clusterId)), + "Starting monitoring for topology with ID {}")); + } + } + + static void logTopologyDescriptionChanged( + final ClusterId clusterId, + final ClusterDescriptionChangedEvent clusterDescriptionChangedEvent) { + if (STRUCTURED_LOGGER.isRequired(DEBUG, clusterId)) { + STRUCTURED_LOGGER.log(new LogMessage( + TOPOLOGY, DEBUG, "Topology description changed", clusterId, + asList( + new Entry(TOPOLOGY_ID, clusterId), + new Entry(TOPOLOGY_PREVIOUS_DESCRIPTION, + clusterDescriptionChangedEvent.getPreviousDescription().getShortDescription()), + new Entry(TOPOLOGY_NEW_DESCRIPTION, + clusterDescriptionChangedEvent.getNewDescription().getShortDescription())), + "Description changed for topology with ID {}. Previous description: {}. New description: {}")); + } + } + + static void logTopologyMonitoringStopping(final ClusterId clusterId) { + if (STRUCTURED_LOGGER.isRequired(DEBUG, clusterId)) { + STRUCTURED_LOGGER.log(new LogMessage( + TOPOLOGY, DEBUG, "Stopped topology monitoring", clusterId, + singletonList(new Entry(TOPOLOGY_ID, clusterId)), + "Stopped monitoring for topology with ID {}")); + } + } + +} diff --git a/driver-core/src/main/com/mongodb/internal/connection/BsonWriterDecorator.java b/driver-core/src/main/com/mongodb/internal/connection/BsonWriterDecorator.java new file mode 100644 index 00000000000..0ac9f51610d --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/connection/BsonWriterDecorator.java @@ -0,0 +1,278 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection; + +import org.bson.BsonBinary; +import org.bson.BsonDbPointer; +import org.bson.BsonReader; +import org.bson.BsonRegularExpression; +import org.bson.BsonTimestamp; +import org.bson.BsonWriter; +import org.bson.types.Decimal128; +import org.bson.types.ObjectId; + +import static org.bson.assertions.Assertions.notNull; + +/** + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public class BsonWriterDecorator implements BsonWriter { + private final BsonWriter bsonWriter; + + BsonWriterDecorator(final BsonWriter bsonWriter) { + this.bsonWriter = notNull("bsonWriter", bsonWriter); + } + + BsonWriter getBsonWriter() { + return bsonWriter; + } + + @Override + public void writeStartDocument(final String name) { + bsonWriter.writeStartDocument(name); + } + + @Override + public void writeStartDocument() { + bsonWriter.writeStartDocument(); + } + + @Override + public void writeEndDocument() { + bsonWriter.writeEndDocument(); + } + + @Override + public void writeStartArray(final String name) { + bsonWriter.writeStartArray(name); + } + + @Override + public void writeStartArray() { + bsonWriter.writeStartArray(); + } + + @Override + public void writeEndArray() { + bsonWriter.writeEndArray(); + } + + @Override + public void writeBinaryData(final String name, final BsonBinary binary) { + bsonWriter.writeBinaryData(name, binary); + } + + @Override + public void writeBinaryData(final BsonBinary binary) { + bsonWriter.writeBinaryData(binary); + } + + @Override + public void writeBoolean(final String name, final boolean value) { + bsonWriter.writeBoolean(name, value); + } + + @Override + public void writeBoolean(final boolean value) { + bsonWriter.writeBoolean(value); + } + + @Override + public void writeDateTime(final String name, final long value) { + bsonWriter.writeDateTime(name, value); + } + + @Override + public void writeDateTime(final long value) { + bsonWriter.writeDateTime(value); + } + + @Override + public void writeDBPointer(final String name, final BsonDbPointer value) { + bsonWriter.writeDBPointer(name, value); + } + + @Override + public void writeDBPointer(final BsonDbPointer value) { + bsonWriter.writeDBPointer(value); + } + + @Override + public void writeDouble(final String name, final double value) { + bsonWriter.writeDouble(name, value); + } + + @Override + public void writeDouble(final double value) { + bsonWriter.writeDouble(value); + } + + @Override + public void writeInt32(final String name, final int value) { + bsonWriter.writeInt32(name, value); + } + + @Override + public void writeInt32(final int value) { + bsonWriter.writeInt32(value); + } + + @Override + public void writeInt64(final String name, final long value) { + bsonWriter.writeInt64(name, value); + } + + @Override + public void writeInt64(final long value) { + bsonWriter.writeInt64(value); + } + + @Override + public void writeDecimal128(final Decimal128 value) { + bsonWriter.writeDecimal128(value); + } + + @Override + public void writeDecimal128(final String name, final Decimal128 value) { + bsonWriter.writeDecimal128(name, value); + } + + @Override + public void writeJavaScript(final String name, final String code) { + bsonWriter.writeJavaScript(name, code); + } + + @Override + public void writeJavaScript(final String code) { + bsonWriter.writeJavaScript(code); + } + + @Override + public void writeJavaScriptWithScope(final String name, final String code) { + bsonWriter.writeJavaScriptWithScope(name, code); + } + + @Override + public void writeJavaScriptWithScope(final String code) { + bsonWriter.writeJavaScriptWithScope(code); + } + + @Override + public void writeMaxKey(final String name) { + bsonWriter.writeMaxKey(name); + } + + @Override + public void writeMaxKey() { + bsonWriter.writeMaxKey(); + } + + @Override + public void writeMinKey(final String name) { + bsonWriter.writeMinKey(name); + } + + @Override + public void writeMinKey() { + bsonWriter.writeMinKey(); + } + + @Override + public void writeName(final String name) { + bsonWriter.writeName(name); + } + + @Override + public void writeNull(final String name) { + bsonWriter.writeNull(name); + } + + @Override + public void writeNull() { + bsonWriter.writeNull(); + } + + @Override + public void writeObjectId(final String name, final ObjectId objectId) { + bsonWriter.writeObjectId(name, objectId); + } + + @Override + public void writeObjectId(final ObjectId objectId) { + bsonWriter.writeObjectId(objectId); + } + + @Override + public void writeRegularExpression(final String name, final BsonRegularExpression regularExpression) { + bsonWriter.writeRegularExpression(name, regularExpression); + } + + @Override + public void writeRegularExpression(final BsonRegularExpression regularExpression) { + bsonWriter.writeRegularExpression(regularExpression); + } + + @Override + public void writeString(final String name, final String value) { + bsonWriter.writeString(name, value); + } + + @Override + public void writeString(final String value) { + bsonWriter.writeString(value); + } + + @Override + public void writeSymbol(final String name, final String value) { + bsonWriter.writeSymbol(name, value); + } + + @Override + public void writeSymbol(final String value) { + bsonWriter.writeSymbol(value); + } + + @Override + public void writeTimestamp(final String name, final BsonTimestamp value) { + bsonWriter.writeTimestamp(name, value); + } + + @Override + public void writeTimestamp(final BsonTimestamp value) { + bsonWriter.writeTimestamp(value); + } + + @Override + public void writeUndefined(final String name) { + bsonWriter.writeUndefined(name); + } + + @Override + public void writeUndefined() { + bsonWriter.writeUndefined(); + } + + @Override + public void pipe(final BsonReader reader) { + bsonWriter.pipe(reader); + } + + @Override + public void flush() { + bsonWriter.flush(); + } +} diff --git a/driver-core/src/main/com/mongodb/internal/connection/BsonWriterHelper.java b/driver-core/src/main/com/mongodb/internal/connection/BsonWriterHelper.java new file mode 100644 index 00000000000..63ccbf62a04 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/connection/BsonWriterHelper.java @@ -0,0 +1,271 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection; + +import com.mongodb.internal.connection.DualMessageSequences.EncodeDocumentsResult; +import com.mongodb.internal.connection.DualMessageSequences.WritersProviderAndLimitsChecker; +import com.mongodb.lang.Nullable; +import org.bson.BsonBinaryWriter; +import org.bson.BsonBinaryWriterSettings; +import org.bson.BsonContextType; +import org.bson.BsonDocument; +import org.bson.BsonElement; +import org.bson.BsonMaximumSizeExceededException; +import org.bson.BsonValue; +import org.bson.BsonWriter; +import org.bson.BsonWriterSettings; +import org.bson.FieldNameValidator; +import org.bson.codecs.BsonValueCodecProvider; +import org.bson.codecs.Encoder; +import org.bson.codecs.EncoderContext; +import org.bson.codecs.configuration.CodecRegistry; +import org.bson.io.BsonOutput; + +import java.util.List; + +import static com.mongodb.assertions.Assertions.assertTrue; +import static com.mongodb.internal.connection.DualMessageSequences.WritersProviderAndLimitsChecker.WriteResult.FAIL_LIMIT_EXCEEDED; +import static com.mongodb.internal.connection.DualMessageSequences.WritersProviderAndLimitsChecker.WriteResult.OK_LIMIT_NOT_REACHED; +import static com.mongodb.internal.connection.DualMessageSequences.WritersProviderAndLimitsChecker.WriteResult.OK_LIMIT_REACHED; +import static com.mongodb.internal.connection.MessageSettings.DOCUMENT_HEADROOM_SIZE; +import static java.lang.String.format; +import static org.bson.codecs.configuration.CodecRegistries.fromProviders; + +/** + * This class is not part of the public API and may be removed or changed at any time. + */ +public final class BsonWriterHelper { + private static final CodecRegistry REGISTRY = fromProviders(new BsonValueCodecProvider()); + private static final EncoderContext ENCODER_CONTEXT = EncoderContext.builder().build(); + + static void appendElementsToDocument( + final BsonOutput bsonOutputWithDocument, + final int documentStartPosition, + @Nullable final List bsonElements) { + if ((bsonElements == null) || bsonElements.isEmpty()) { + return; + } + try (AppendingBsonWriter writer = new AppendingBsonWriter(bsonOutputWithDocument, documentStartPosition)) { + for (BsonElement element : bsonElements) { + String name = element.getName(); + BsonValue value = element.getValue(); + writer.writeName(name); + encodeUsingRegistry(writer, value); + } + } + } + + static void writePayloadArray(final BsonWriter writer, final BsonOutput bsonOutput, final MessageSettings settings, + final int messageStartPosition, final SplittablePayload payload, final int maxSplittableDocumentSize) { + writer.writeStartArray(payload.getPayloadName()); + writePayload(writer, bsonOutput, getDocumentMessageSettings(settings), messageStartPosition, payload, maxSplittableDocumentSize); + writer.writeEndArray(); + } + + static void writePayload(final BsonWriter writer, final BsonOutput bsonOutput, final MessageSettings settings, + final int messageStartPosition, final SplittablePayload payload, final int maxSplittableDocumentSize) { + MessageSettings payloadSettings = getPayloadMessageSettings(payload.getPayloadType(), settings); + List payloadDocuments = payload.getPayload(); + for (int i = 0; i < payloadDocuments.size(); i++) { + if (writeDocument(writer, bsonOutput, payloadSettings, payloadDocuments.get(i), messageStartPosition, i + 1, + maxSplittableDocumentSize)) { + payload.setPosition(i + 1); + } else { + break; + } + } + + if (payload.getPosition() == 0) { + throw createBsonMaximumSizeExceededException(payloadSettings.getMaxDocumentSize()); + } + } + + /** + * @return See {@link DualMessageSequences#encodeDocuments(WritersProviderAndLimitsChecker)}. + */ + static EncodeDocumentsResult writeDocumentsOfDualMessageSequences( + final DualMessageSequences dualMessageSequences, + final int commandDocumentSizeInBytes, + final BsonOutput firstOutput, + final BsonOutput secondOutput, + final MessageSettings messageSettings) { + BsonBinaryWriter firstWriter = createBsonBinaryWriter(firstOutput, dualMessageSequences.getFirstFieldNameValidator(), null); + BsonBinaryWriter secondWriter = createBsonBinaryWriter(secondOutput, dualMessageSequences.getSecondFieldNameValidator(), null); + // the size of operation-agnostic command fields (a.k.a. extra elements) is counted towards `messageOverheadInBytes` + int messageOverheadInBytes = 1000; + int maxSizeInBytes = messageSettings.getMaxMessageSize() - (messageOverheadInBytes + commandDocumentSizeInBytes); + int firstStart = firstOutput.getPosition(); + int secondStart = secondOutput.getPosition(); + int maxBatchCount = messageSettings.getMaxBatchCount(); + return dualMessageSequences.encodeDocuments(writeAction -> { + int firstBeforeWritePosition = firstOutput.getPosition(); + int secondBeforeWritePosition = secondOutput.getPosition(); + int batchCountAfterWrite = writeAction.doAndGetBatchCount(firstWriter, secondWriter); + assertTrue(batchCountAfterWrite <= maxBatchCount); + int writtenSizeInBytes = + firstOutput.getPosition() - firstStart + + secondOutput.getPosition() - secondStart; + if (writtenSizeInBytes < maxSizeInBytes && batchCountAfterWrite < maxBatchCount) { + return OK_LIMIT_NOT_REACHED; + } else if (writtenSizeInBytes > maxSizeInBytes) { + firstOutput.truncateToPosition(firstBeforeWritePosition); + secondOutput.truncateToPosition(secondBeforeWritePosition); + if (batchCountAfterWrite == 1) { + // we have failed to write a single document + throw createBsonMaximumSizeExceededException(messageSettings.getMaxDocumentSize()); + } + return FAIL_LIMIT_EXCEEDED; + } else { + return OK_LIMIT_REACHED; + } + }); + } + + /** + * @param messageSettings Non-{@code null} iff the document size limit must be validated. + */ + static BsonBinaryWriter createBsonBinaryWriter( + final BsonOutput out, + final FieldNameValidator validator, + @Nullable final MessageSettings messageSettings) { + return new BsonBinaryWriter( + new BsonWriterSettings(), + messageSettings == null + ? new BsonBinaryWriterSettings() + : new BsonBinaryWriterSettings(messageSettings.getMaxDocumentSize() + DOCUMENT_HEADROOM_SIZE), + out, + validator); + } + + /** + * Backpatches the document/message/sequence length into the beginning of the document/message/sequence. + * + * @param startPosition The start position of the document/message/sequence in {@code bsonOutput}. + */ + static void backpatchLength(final int startPosition, final BsonOutput bsonOutput) { + int messageLength = bsonOutput.getPosition() - startPosition; + bsonOutput.writeInt32(startPosition, messageLength); + } + + private static BsonMaximumSizeExceededException createBsonMaximumSizeExceededException(final int maxSize) { + return new BsonMaximumSizeExceededException(format("Payload document size is larger than maximum of %d.", maxSize)); + } + + private static boolean writeDocument(final BsonWriter writer, final BsonOutput bsonOutput, final MessageSettings settings, + final BsonDocument document, final int messageStartPosition, final int batchItemCount, + final int maxSplittableDocumentSize) { + int currentPosition = bsonOutput.getPosition(); + encodeUsingRegistry(writer, document); + int messageSize = bsonOutput.getPosition() - messageStartPosition; + int documentSize = bsonOutput.getPosition() - currentPosition; + if (exceedsLimits(settings, messageSize, documentSize, batchItemCount) + || (batchItemCount > 1 && bsonOutput.getPosition() - messageStartPosition > maxSplittableDocumentSize)) { + bsonOutput.truncateToPosition(currentPosition); + return false; + } + return true; + } + + static void encodeUsingRegistry(final BsonWriter writer, final BsonValue value) { + @SuppressWarnings("unchecked") + Encoder encoder = (Encoder) REGISTRY.get(value.getClass()); + encoder.encode(writer, value, ENCODER_CONTEXT); + } + + private static MessageSettings getPayloadMessageSettings(final SplittablePayload.Type type, final MessageSettings settings) { + MessageSettings payloadMessageSettings = settings; + if (type != SplittablePayload.Type.INSERT) { + payloadMessageSettings = createMessageSettingsBuilder(settings) + .maxDocumentSize(settings.getMaxDocumentSize() + DOCUMENT_HEADROOM_SIZE) + .build(); + } + return payloadMessageSettings; + } + + private static MessageSettings getDocumentMessageSettings(final MessageSettings settings) { + return createMessageSettingsBuilder(settings) + .maxMessageSize(settings.getMaxDocumentSize() + DOCUMENT_HEADROOM_SIZE) + .build(); + } + + private static MessageSettings.Builder createMessageSettingsBuilder(final MessageSettings settings) { + return MessageSettings.builder() + .maxBatchCount(settings.getMaxBatchCount()) + .maxMessageSize(settings.getMaxMessageSize()) + .maxDocumentSize(settings.getMaxDocumentSize()) + .maxWireVersion(settings.getMaxWireVersion()); + } + + private static boolean exceedsLimits(final MessageSettings settings, final int messageSize, final int documentSize, + final int batchItemCount) { + if (batchItemCount > settings.getMaxBatchCount()) { + return true; + } else if (messageSize > settings.getMaxMessageSize()) { + return true; + } else if (documentSize > settings.getMaxDocumentSize()) { + return true; + } + return false; + } + + /** + * A {@link BsonWriter} that allows appending key/value pairs to a document that has been fully written to a {@link BsonOutput}. + */ + private static final class AppendingBsonWriter extends LevelCountingBsonWriter implements AutoCloseable { + private static final int INITIAL_LEVEL = DEFAULT_INITIAL_LEVEL + 1; + + /** + * @param bsonOutputWithDocument A {@link BsonOutput} {@linkplain BsonOutput#getPosition() positioned} + * immediately after the end of the document. + * @param documentStartPosition The {@linkplain BsonOutput#getPosition() position} of the start of the document + * in {@code bsonOutputWithDocument}. + */ + AppendingBsonWriter(final BsonOutput bsonOutputWithDocument, final int documentStartPosition) { + super( + new InternalAppendingBsonBinaryWriter(bsonOutputWithDocument, documentStartPosition), + INITIAL_LEVEL); + } + + @Override + public void writeEndDocument() { + assertTrue(getCurrentLevel() > INITIAL_LEVEL); + super.writeEndDocument(); + } + + @Override + public void close() { + try (InternalAppendingBsonBinaryWriter writer = (InternalAppendingBsonBinaryWriter) getBsonWriter()) { + writer.writeEndDocument(); + } + } + + private static final class InternalAppendingBsonBinaryWriter extends BsonBinaryWriter { + InternalAppendingBsonBinaryWriter(final BsonOutput bsonOutputWithDocument, final int documentStartPosition) { + super(bsonOutputWithDocument); + int documentEndPosition = bsonOutputWithDocument.getPosition(); + int bsonDocumentEndingSize = 1; + int appendFromPosition = documentEndPosition - bsonDocumentEndingSize; + bsonOutputWithDocument.truncateToPosition(appendFromPosition); + setState(State.NAME); + setContext(new Context(null, BsonContextType.DOCUMENT, documentStartPosition)); + } + } + } + + private BsonWriterHelper() { + } +} diff --git a/driver-core/src/main/com/mongodb/internal/connection/BufferProvider.java b/driver-core/src/main/com/mongodb/internal/connection/BufferProvider.java new file mode 100644 index 00000000000..6d21322cd1b --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/connection/BufferProvider.java @@ -0,0 +1,34 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection; + +import com.mongodb.annotations.ThreadSafe; +import org.bson.ByteBuf; + +/** + * A provider of instances of ByteBuf. + */ +@ThreadSafe +public interface BufferProvider { + /** + * Gets a buffer with the givens capacity. + * + * @param size the size required for the buffer + * @return a ByteBuf with the given size, which is now owned by the caller and must be released. + */ + ByteBuf getBuffer(int size); +} diff --git a/driver-core/src/main/com/mongodb/internal/connection/BulkWriteBatchCombiner.java b/driver-core/src/main/com/mongodb/internal/connection/BulkWriteBatchCombiner.java new file mode 100644 index 00000000000..b1e7d7f75bb --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/connection/BulkWriteBatchCombiner.java @@ -0,0 +1,214 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection; + +import com.mongodb.MongoBulkWriteException; +import com.mongodb.ServerAddress; +import com.mongodb.WriteConcern; +import com.mongodb.bulk.BulkWriteError; +import com.mongodb.bulk.BulkWriteInsert; +import com.mongodb.bulk.BulkWriteResult; +import com.mongodb.bulk.BulkWriteUpsert; +import com.mongodb.bulk.WriteConcernError; +import com.mongodb.lang.Nullable; + +import java.util.ArrayList; +import java.util.HashSet; +import java.util.List; +import java.util.Set; +import java.util.TreeSet; + +import static com.mongodb.assertions.Assertions.notNull; +import static java.util.Collections.singletonList; +import static java.util.Comparator.comparingInt; + +/** + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public class BulkWriteBatchCombiner { + private final ServerAddress serverAddress; + private final boolean ordered; + private final WriteConcern writeConcern; + + private int insertedCount; + private int matchedCount; + private int deletedCount; + private int modifiedCount = 0; + private final Set writeUpserts = new TreeSet<>(comparingInt(BulkWriteUpsert::getIndex)); + private final Set writeInserts = new TreeSet<>(comparingInt(BulkWriteInsert::getIndex)); + private final Set writeErrors = new TreeSet<>(comparingInt(BulkWriteError::getIndex)); + private final Set errorLabels = new HashSet<>(); + private final List writeConcernErrors = new ArrayList<>(); + + /** + * Construct an instance. + * + * @param serverAddress the server address + * @param ordered ordered + * @param writeConcern the write concern + */ + public BulkWriteBatchCombiner(final ServerAddress serverAddress, final boolean ordered, final WriteConcern writeConcern) { + this.writeConcern = notNull("writeConcern", writeConcern); + this.ordered = ordered; + this.serverAddress = notNull("serverAddress", serverAddress); + } + + /** + * Add a result + * + * @param result the result + */ + public void addResult(final BulkWriteResult result) { + insertedCount += result.getInsertedCount(); + matchedCount += result.getMatchedCount(); + deletedCount += result.getDeletedCount(); + modifiedCount += result.getModifiedCount(); + writeUpserts.addAll(result.getUpserts()); + writeInserts.addAll(result.getInserts()); + } + + /** + * Add an error result + * + * @param exception the exception + * @param indexMap the index map + */ + public void addErrorResult(final MongoBulkWriteException exception, final IndexMap indexMap) { + addResult(exception.getWriteResult()); + errorLabels.addAll(exception.getErrorLabels()); + mergeWriteErrors(exception.getWriteErrors(), indexMap); + mergeWriteConcernError(exception.getWriteConcernError()); + } + + /** + * Add a write error result + * + * @param writeError the write error + * @param indexMap the index map + */ + public void addWriteErrorResult(final BulkWriteError writeError, final IndexMap indexMap) { + notNull("writeError", writeError); + mergeWriteErrors(singletonList(writeError), indexMap); + } + + /** + * Add a write concern error result + * + * @param writeConcernError the write concern error + */ + public void addWriteConcernErrorResult(final WriteConcernError writeConcernError) { + notNull("writeConcernError", writeConcernError); + mergeWriteConcernError(writeConcernError); + } + + /** + * Add a list of error results and a write concern error + * + * @param writeErrors the errors + * @param writeConcernError the write concern error + * @param indexMap the index map + */ + public void addErrorResult(final List writeErrors, + final WriteConcernError writeConcernError, final IndexMap indexMap) { + mergeWriteErrors(writeErrors, indexMap); + mergeWriteConcernError(writeConcernError); + } + + /** + * Gets the combined result. + * + * @return the result + */ + public BulkWriteResult getResult() { + throwOnError(); + return createResult(); + } + + /** + * True if ordered and has write errors. + * + * @return true if no more batches should be sent + */ + public boolean shouldStopSendingMoreBatches() { + return ordered && hasWriteErrors(); + } + + /** + * Gets whether there are errors in the combined result. + * + * @return whether there are errors in the combined result + */ + public boolean hasErrors() { + return hasWriteErrors() || hasWriteConcernErrors(); + } + + /** + * Gets the combined errors as an exception + * @return the bulk write exception, or null if there were no errors + */ + @Nullable + public MongoBulkWriteException getError() { + if (!hasErrors()) { + return null; + } + return getErrorNonNullable(); + } + + private MongoBulkWriteException getErrorNonNullable() { + return new MongoBulkWriteException(createResult(), new ArrayList<>(writeErrors), + writeConcernErrors.isEmpty() ? null : writeConcernErrors.get(writeConcernErrors.size() - 1), + serverAddress, errorLabels); + } + + @SuppressWarnings("deprecation") + private void mergeWriteConcernError(@Nullable final WriteConcernError writeConcernError) { + if (writeConcernError != null) { + if (writeConcernErrors.isEmpty()) { + writeConcernErrors.add(writeConcernError); + } else if (!writeConcernError.equals(writeConcernErrors.get(writeConcernErrors.size() - 1))) { + writeConcernErrors.add(writeConcernError); + } + } + } + + private void mergeWriteErrors(final List newWriteErrors, final IndexMap indexMap) { + for (BulkWriteError cur : newWriteErrors) { + writeErrors.add(new BulkWriteError(cur.getCode(), cur.getMessage(), cur.getDetails(), indexMap.map(cur.getIndex()))); + } + } + + private void throwOnError() { + if (hasErrors()) { + throw getErrorNonNullable(); + } + } + + private BulkWriteResult createResult() { + return writeConcern.isAcknowledged() + ? BulkWriteResult.acknowledged(insertedCount, matchedCount, deletedCount, modifiedCount, + new ArrayList<>(writeUpserts), new ArrayList<>(writeInserts)) + : BulkWriteResult.unacknowledged(); + } + + private boolean hasWriteErrors() { + return !writeErrors.isEmpty(); + } + + private boolean hasWriteConcernErrors() { + return !writeConcernErrors.isEmpty(); + } +} diff --git a/driver-core/src/main/com/mongodb/internal/connection/ByteBufBsonArray.java b/driver-core/src/main/com/mongodb/internal/connection/ByteBufBsonArray.java new file mode 100644 index 00000000000..e02cee12629 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/connection/ByteBufBsonArray.java @@ -0,0 +1,300 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection; + +import org.bson.BsonArray; +import org.bson.BsonBinaryReader; +import org.bson.BsonType; +import org.bson.BsonValue; +import org.bson.ByteBuf; +import org.bson.io.ByteBufferBsonInput; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.Iterator; +import java.util.List; +import java.util.ListIterator; +import java.util.NoSuchElementException; +import java.util.Objects; + +import static com.mongodb.internal.connection.ByteBufBsonHelper.readBsonValue; + +final class ByteBufBsonArray extends BsonArray { + private final ByteBuf byteBuf; + + ByteBufBsonArray(final ByteBuf byteBuf) { + this.byteBuf = byteBuf; + } + + @Override + public Iterator iterator() { + return new ByteBufBsonArrayIterator(); + } + + @Override + public List getValues() { + List values = new ArrayList<>(); + for (BsonValue cur: this) { + //noinspection UseBulkOperation + values.add(cur); + } + return values; + } + + private static final String READ_ONLY_MESSAGE = "This BsonArray instance is read-only"; + + @Override + public int size() { + int size = 0; + for (BsonValue ignored : this) { + size++; + } + return size; + } + + @Override + public boolean isEmpty() { + return !iterator().hasNext(); + } + + @Override + public boolean equals(final Object o) { + if (o == this) { + return true; + } + if (!(o instanceof List)) { + return false; + } + Iterator e1 = iterator(); + Iterator e2 = ((List) o).iterator(); + while (e1.hasNext() && e2.hasNext()) { + if (!(Objects.equals(e1.next(), e2.next()))) { + return false; + } + } + return !(e1.hasNext() || e2.hasNext()); + } + + @Override + public int hashCode() { + int hashCode = 1; + for (BsonValue cur : this) { + hashCode = 31 * hashCode + (cur == null ? 0 : cur.hashCode()); + } + return hashCode; + } + + @Override + public boolean contains(final Object o) { + for (BsonValue cur : this) { + if (Objects.equals(o, cur)) { + return true; + } + } + + return false; + } + + @Override + public Object[] toArray() { + Object[] retVal = new Object[size()]; + Iterator it = iterator(); + for (int i = 0; i < retVal.length; i++) { + retVal[i] = it.next(); + } + return retVal; + } + + @Override + @SuppressWarnings("unchecked") + public T[] toArray(final T[] a) { + int size = size(); + T[] retVal = a.length >= size ? a : (T[]) java.lang.reflect.Array.newInstance(a.getClass().getComponentType(), size); + Iterator it = iterator(); + for (int i = 0; i < retVal.length; i++) { + retVal[i] = (T) it.next(); + } + return retVal; + } + + @Override + public boolean containsAll(final Collection c) { + for (Object e : c) { + if (!contains(e)) { + return false; + } + } + return true; + } + + @Override + public BsonValue get(final int index) { + if (index < 0) { + throw new IndexOutOfBoundsException("Index out of range: " + index); + } + + int i = 0; + for (BsonValue cur : this) { + if (i++ == index) { + return cur; + } + } + + throw new IndexOutOfBoundsException("Index out of range: " + index); + } + + @Override + public int indexOf(final Object o) { + int i = 0; + for (BsonValue cur : this) { + if (Objects.equals(o, cur)) { + return i; + } + i++; + } + + return -1; + } + + @Override + public int lastIndexOf(final Object o) { + ListIterator listIterator = listIterator(size()); + while (listIterator.hasPrevious()) { + if (Objects.equals(o, listIterator.previous())) { + return listIterator.nextIndex(); + } + } + return -1; + } + + @Override + public ListIterator listIterator() { + return listIterator(0); + } + + @Override + public ListIterator listIterator(final int index) { + // Not the most efficient way to do this, but unlikely anyone will notice in practice + return new ArrayList<>(this).listIterator(index); + } + + @Override + public List subList(final int fromIndex, final int toIndex) { + if (fromIndex < 0) { + throw new IndexOutOfBoundsException("fromIndex = " + fromIndex); + } + if (fromIndex > toIndex) { + throw new IllegalArgumentException("fromIndex(" + fromIndex + ") > toIndex(" + toIndex + ")"); + } + List subList = new ArrayList<>(); + int i = 0; + for (BsonValue cur: this) { + if (i == toIndex) { + break; + } + if (i >= fromIndex) { + subList.add(cur); + } + i++; + } + if (toIndex > i) { + throw new IndexOutOfBoundsException("toIndex = " + toIndex); + } + return subList; + } + + @Override + public boolean add(final BsonValue bsonValue) { + throw new UnsupportedOperationException(READ_ONLY_MESSAGE); + } + + @Override + public boolean remove(final Object o) { + throw new UnsupportedOperationException(READ_ONLY_MESSAGE); + } + + @Override + public boolean addAll(final Collection c) { + throw new UnsupportedOperationException(READ_ONLY_MESSAGE); + } + + @Override + public boolean addAll(final int index, final Collection c) { + throw new UnsupportedOperationException(READ_ONLY_MESSAGE); + } + + @Override + public boolean removeAll(final Collection c) { + throw new UnsupportedOperationException(READ_ONLY_MESSAGE); + } + + @Override + public boolean retainAll(final Collection c) { + throw new UnsupportedOperationException(READ_ONLY_MESSAGE); + } + + @Override + public void clear() { + throw new UnsupportedOperationException(READ_ONLY_MESSAGE); + } + + @Override + public BsonValue set(final int index, final BsonValue element) { + throw new UnsupportedOperationException(READ_ONLY_MESSAGE); + } + + @Override + public void add(final int index, final BsonValue element) { + throw new UnsupportedOperationException(READ_ONLY_MESSAGE); + } + + @Override + public BsonValue remove(final int index) { + throw new UnsupportedOperationException(READ_ONLY_MESSAGE); + } + + private class ByteBufBsonArrayIterator implements Iterator { + private final ByteBuf duplicatedByteBuf = byteBuf.duplicate(); + private final BsonBinaryReader bsonReader; + + { + bsonReader = new BsonBinaryReader(new ByteBufferBsonInput(duplicatedByteBuf)); + // While one might expect that this would be a call to BsonReader#readStartArray that doesn't work because BsonBinaryReader + // expects to be positioned at the start at the beginning of a document, not an array. Fortunately, a BSON array has exactly + // the same structure as a BSON document (the keys are just the array indices converted to a strings). So it works fine to + // call BsonReader#readStartDocument here, and just skip all the names via BsonReader#skipName below. + bsonReader.readStartDocument(); + bsonReader.readBsonType(); + } + + @Override + public boolean hasNext() { + return bsonReader.getCurrentBsonType() != BsonType.END_OF_DOCUMENT; + } + + @Override + public BsonValue next() { + if (!hasNext()) { + throw new NoSuchElementException(); + } + bsonReader.skipName(); + BsonValue value = readBsonValue(duplicatedByteBuf, bsonReader); + bsonReader.readBsonType(); + return value; + } + } +} diff --git a/driver-core/src/main/com/mongodb/internal/connection/ByteBufBsonDocument.java b/driver-core/src/main/com/mongodb/internal/connection/ByteBufBsonDocument.java new file mode 100644 index 00000000000..70ed10a75a8 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/connection/ByteBufBsonDocument.java @@ -0,0 +1,429 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection; + +import com.mongodb.lang.Nullable; +import org.bson.BsonBinaryReader; +import org.bson.BsonDocument; +import org.bson.BsonType; +import org.bson.BsonValue; +import org.bson.ByteBuf; +import org.bson.RawBsonDocument; +import org.bson.codecs.BsonDocumentCodec; +import org.bson.codecs.DecoderContext; +import org.bson.io.ByteBufferBsonInput; +import org.bson.json.JsonMode; +import org.bson.json.JsonWriter; +import org.bson.json.JsonWriterSettings; + +import java.io.InvalidObjectException; +import java.io.ObjectInputStream; +import java.io.StringWriter; +import java.util.AbstractCollection; +import java.util.AbstractMap; +import java.util.AbstractSet; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.NoSuchElementException; +import java.util.Set; + +import static com.mongodb.assertions.Assertions.assertNotNull; +import static com.mongodb.assertions.Assertions.notNull; +import static com.mongodb.internal.connection.ByteBufBsonHelper.readBsonValue; + +final class ByteBufBsonDocument extends BsonDocument { + private static final long serialVersionUID = 2L; + + private final transient ByteBuf byteBuf; + + /** + * Create a list of ByteBufBsonDocument from a buffer positioned at the start of the first document of an OP_MSG Section + * of type Document Sequence (Kind 1). + *

+ * The provided buffer will be positioned at the end of the section upon normal completion of the method + */ + static List createList(final ByteBuf outputByteBuf) { + List documents = new ArrayList<>(); + while (outputByteBuf.hasRemaining()) { + ByteBufBsonDocument curDocument = createOne(outputByteBuf); + documents.add(curDocument); + } + return documents; + } + + /** + * Create a ByteBufBsonDocument from a buffer positioned at the start of a BSON document. + * The provided buffer will be positioned at the end of the document upon normal completion of the method + */ + static ByteBufBsonDocument createOne(final ByteBuf outputByteBuf) { + int documentStart = outputByteBuf.position(); + int documentSizeInBytes = outputByteBuf.getInt(); + int documentEnd = documentStart + documentSizeInBytes; + ByteBuf slice = outputByteBuf.duplicate().position(documentStart).limit(documentEnd); + outputByteBuf.position(documentEnd); + return new ByteBufBsonDocument(slice); + } + + @Override + public String toJson() { + return toJson(JsonWriterSettings.builder().outputMode(JsonMode.RELAXED).build()); + } + + @Override + public String toJson(final JsonWriterSettings settings) { + StringWriter stringWriter = new StringWriter(); + JsonWriter jsonWriter = new JsonWriter(stringWriter, settings); + ByteBuf duplicate = byteBuf.duplicate(); + try (BsonBinaryReader reader = new BsonBinaryReader(new ByteBufferBsonInput(duplicate))) { + jsonWriter.pipe(reader); + return stringWriter.toString(); + } finally { + duplicate.release(); + } + } + + @Override + public BsonBinaryReader asBsonReader() { + return new BsonBinaryReader(new ByteBufferBsonInput(byteBuf.duplicate())); + } + + @SuppressWarnings("MethodDoesntCallSuperMethod") + @Override + public BsonDocument clone() { + byte[] clonedBytes = new byte[byteBuf.remaining()]; + byteBuf.get(byteBuf.position(), clonedBytes); + return new RawBsonDocument(clonedBytes); + } + + @Nullable + T findInDocument(final Finder finder) { + ByteBuf duplicateByteBuf = byteBuf.duplicate(); + try (BsonBinaryReader bsonReader = new BsonBinaryReader(new ByteBufferBsonInput(duplicateByteBuf))) { + bsonReader.readStartDocument(); + while (bsonReader.readBsonType() != BsonType.END_OF_DOCUMENT) { + T found = finder.find(duplicateByteBuf, bsonReader); + if (found != null) { + return found; + } + } + bsonReader.readEndDocument(); + } finally { + duplicateByteBuf.release(); + } + + return finder.notFound(); + } + + BsonDocument toBaseBsonDocument() { + ByteBuf duplicateByteBuf = byteBuf.duplicate(); + try (BsonBinaryReader bsonReader = new BsonBinaryReader(new ByteBufferBsonInput(duplicateByteBuf))) { + return new BsonDocumentCodec().decode(bsonReader, DecoderContext.builder().build()); + } finally { + duplicateByteBuf.release(); + } + } + + ByteBufBsonDocument(final ByteBuf byteBuf) { + this.byteBuf = byteBuf; + } + + @Override + public void clear() { + throw new UnsupportedOperationException("ByteBufBsonDocument instances are immutable"); + } + + @Override + public BsonValue put(final String key, final BsonValue value) { + throw new UnsupportedOperationException("ByteBufBsonDocument instances are immutable"); + } + + @Override + public BsonDocument append(final String key, final BsonValue value) { + throw new UnsupportedOperationException("ByteBufBsonDocument instances are immutable"); + } + + @Override + public void putAll(final Map m) { + throw new UnsupportedOperationException("ByteBufBsonDocument instances are immutable"); + } + + @Override + public BsonValue remove(final Object key) { + throw new UnsupportedOperationException("ByteBufBsonDocument instances are immutable"); + } + + @Override + public boolean isEmpty() { + return assertNotNull(findInDocument(new Finder() { + @Override + public Boolean find(final ByteBuf byteBuf, final BsonBinaryReader bsonReader) { + return false; + } + + @Override + public Boolean notFound() { + return true; + } + })); + } + + @Override + public int size() { + return assertNotNull(findInDocument(new Finder() { + private int size; + + @Override + @Nullable + public Integer find(final ByteBuf byteBuf, final BsonBinaryReader bsonReader) { + size++; + bsonReader.readName(); + bsonReader.skipValue(); + return null; + } + + @Override + public Integer notFound() { + return size; + } + })); + } + + @Override + public Set> entrySet() { + return new ByteBufBsonDocumentEntrySet(); + } + + @Override + public Collection values() { + return new ByteBufBsonDocumentValuesCollection(); + } + + @Override + public Set keySet() { + return new ByteBufBsonDocumentKeySet(); + } + + @Override + public boolean containsKey(final Object key) { + if (key == null) { + throw new IllegalArgumentException("key can not be null"); + } + + Boolean containsKey = findInDocument(new Finder() { + @Override + public Boolean find(final ByteBuf byteBuf, final BsonBinaryReader bsonReader) { + if (bsonReader.readName().equals(key)) { + return true; + } + bsonReader.skipValue(); + return null; + } + + @Override + public Boolean notFound() { + return false; + } + }); + return containsKey != null ? containsKey : false; + } + + @Override + public boolean containsValue(final Object value) { + Boolean containsValue = findInDocument(new Finder() { + @Override + public Boolean find(final ByteBuf byteBuf, final BsonBinaryReader bsonReader) { + bsonReader.skipName(); + if (readBsonValue(byteBuf, bsonReader).equals(value)) { + return true; + } + return null; + } + + @Override + public Boolean notFound() { + return false; + } + }); + return containsValue != null ? containsValue : false; + } + + @Nullable + @Override + public BsonValue get(final Object key) { + notNull("key", key); + return findInDocument(new Finder() { + @Override + public BsonValue find(final ByteBuf byteBuf, final BsonBinaryReader bsonReader) { + if (bsonReader.readName().equals(key)) { + return readBsonValue(byteBuf, bsonReader); + } + bsonReader.skipValue(); + return null; + } + + @Nullable + @Override + public BsonValue notFound() { + return null; + } + }); + } + + /** + * Gets the first key in this document. + * + * @return the first key in this document + * @throws java.util.NoSuchElementException if the document is empty + */ + public String getFirstKey() { + return assertNotNull(findInDocument(new Finder() { + @Override + public String find(final ByteBuf byteBuf, final BsonBinaryReader bsonReader) { + return bsonReader.readName(); + } + + @Override + public String notFound() { + throw new NoSuchElementException(); + } + })); + } + + private interface Finder { + @Nullable + T find(ByteBuf byteBuf, BsonBinaryReader bsonReader); + @Nullable + T notFound(); + } + + // see https://docs.oracle.com/javase/6/docs/platform/serialization/spec/output.html + private Object writeReplace() { + return toBaseBsonDocument(); + } + + // see https://docs.oracle.com/javase/6/docs/platform/serialization/spec/input.html + private void readObject(final ObjectInputStream stream) throws InvalidObjectException { + throw new InvalidObjectException("Proxy required"); + } + + private class ByteBufBsonDocumentEntrySet extends AbstractSet> { + @Override + public Iterator> iterator() { + return new Iterator>() { + private final ByteBuf duplicatedByteBuf = byteBuf.duplicate(); + private final BsonBinaryReader bsonReader; + + { + bsonReader = new BsonBinaryReader(new ByteBufferBsonInput(duplicatedByteBuf)); + bsonReader.readStartDocument(); + bsonReader.readBsonType(); + } + + @Override + public boolean hasNext() { + return bsonReader.getCurrentBsonType() != BsonType.END_OF_DOCUMENT; + } + + @Override + public Entry next() { + if (!hasNext()) { + throw new NoSuchElementException(); + } + String key = bsonReader.readName(); + BsonValue value = readBsonValue(duplicatedByteBuf, bsonReader); + bsonReader.readBsonType(); + return new AbstractMap.SimpleEntry<>(key, value); + } + + }; + } + + @Override + public boolean isEmpty() { + return !iterator().hasNext(); + } + + @Override + public int size() { + return ByteBufBsonDocument.this.size(); + } + } + + private class ByteBufBsonDocumentKeySet extends AbstractSet { + @SuppressWarnings("MismatchedQueryAndUpdateOfCollection") + private final Set> entrySet = new ByteBufBsonDocumentEntrySet(); + + @Override + public Iterator iterator() { + final Iterator> entrySetIterator = entrySet.iterator(); + return new Iterator() { + @Override + public boolean hasNext() { + return entrySetIterator.hasNext(); + } + + @Override + public String next() { + return entrySetIterator.next().getKey(); + } + }; + } + + @Override + public boolean isEmpty() { + return entrySet.isEmpty(); + } + + @Override + public int size() { + return entrySet.size(); + } + } + + private class ByteBufBsonDocumentValuesCollection extends AbstractCollection { + @SuppressWarnings("MismatchedQueryAndUpdateOfCollection") + private final Set> entrySet = new ByteBufBsonDocumentEntrySet(); + + @Override + public Iterator iterator() { + final Iterator> entrySetIterator = entrySet.iterator(); + return new Iterator() { + @Override + public boolean hasNext() { + return entrySetIterator.hasNext(); + } + + @Override + public BsonValue next() { + return entrySetIterator.next().getValue(); + } + }; + } + + @Override + public boolean isEmpty() { + return entrySet.isEmpty(); + } + @Override + public int size() { + return entrySet.size(); + } + } +} diff --git a/driver-core/src/main/com/mongodb/internal/connection/ByteBufBsonHelper.java b/driver-core/src/main/com/mongodb/internal/connection/ByteBufBsonHelper.java new file mode 100644 index 00000000000..55054112bf2 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/connection/ByteBufBsonHelper.java @@ -0,0 +1,126 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection; + +import org.bson.BsonBinaryReader; +import org.bson.BsonBoolean; +import org.bson.BsonDateTime; +import org.bson.BsonDecimal128; +import org.bson.BsonDocument; +import org.bson.BsonDouble; +import org.bson.BsonInt32; +import org.bson.BsonInt64; +import org.bson.BsonJavaScript; +import org.bson.BsonJavaScriptWithScope; +import org.bson.BsonMaxKey; +import org.bson.BsonMinKey; +import org.bson.BsonNull; +import org.bson.BsonObjectId; +import org.bson.BsonString; +import org.bson.BsonSymbol; +import org.bson.BsonUndefined; +import org.bson.BsonValue; +import org.bson.ByteBuf; +import org.bson.codecs.BsonDocumentCodec; +import org.bson.codecs.DecoderContext; + +final class ByteBufBsonHelper { + static BsonValue readBsonValue(final ByteBuf byteBuf, final BsonBinaryReader bsonReader) { + BsonValue value; + switch (bsonReader.getCurrentBsonType()) { + case DOCUMENT: + ByteBuf documentByteBuf = byteBuf.duplicate(); + value = new ByteBufBsonDocument(documentByteBuf); + bsonReader.skipValue(); + break; + case ARRAY: + ByteBuf arrayByteBuf = byteBuf.duplicate(); + value = new ByteBufBsonArray(arrayByteBuf); + bsonReader.skipValue(); + break; + case INT32: + value = new BsonInt32(bsonReader.readInt32()); + break; + case INT64: + value = new BsonInt64(bsonReader.readInt64()); + break; + case DOUBLE: + value = new BsonDouble(bsonReader.readDouble()); + break; + case DECIMAL128: + value = new BsonDecimal128(bsonReader.readDecimal128()); + break; + case DATE_TIME: + value = new BsonDateTime(bsonReader.readDateTime()); + break; + case TIMESTAMP: + value = bsonReader.readTimestamp(); + break; + case BOOLEAN: + value = new BsonBoolean(bsonReader.readBoolean()); + break; + case OBJECT_ID: + value = new BsonObjectId(bsonReader.readObjectId()); + break; + case STRING: + value = new BsonString(bsonReader.readString()); + break; + case BINARY: + value = bsonReader.readBinaryData(); + break; + case SYMBOL: + value = new BsonSymbol(bsonReader.readSymbol()); + break; + case UNDEFINED: + bsonReader.readUndefined(); + value = new BsonUndefined(); + break; + case REGULAR_EXPRESSION: + value = bsonReader.readRegularExpression(); + break; + case DB_POINTER: + value = bsonReader.readDBPointer(); + break; + case JAVASCRIPT: + value = new BsonJavaScript(bsonReader.readJavaScript()); + break; + case JAVASCRIPT_WITH_SCOPE: + String code = bsonReader.readJavaScriptWithScope(); + BsonDocument scope = new BsonDocumentCodec().decode(bsonReader, DecoderContext.builder().build()); + value = new BsonJavaScriptWithScope(code, scope); + break; + case MIN_KEY: + bsonReader.readMinKey(); + value = new BsonMinKey(); + break; + case MAX_KEY: + bsonReader.readMaxKey(); + value = new BsonMaxKey(); + break; + case NULL: + bsonReader.readNull(); + value = new BsonNull(); + break; + default: + throw new UnsupportedOperationException("Unexpected BSON type: " + bsonReader.getCurrentBsonType()); + } + return value; + } + + private ByteBufBsonHelper() { + } +} diff --git a/driver-core/src/main/com/mongodb/internal/connection/ByteBufferBsonOutput.java b/driver-core/src/main/com/mongodb/internal/connection/ByteBufferBsonOutput.java new file mode 100644 index 00000000000..600145db48f --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/connection/ByteBufferBsonOutput.java @@ -0,0 +1,552 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection; + +import org.bson.BsonSerializationException; +import org.bson.ByteBuf; +import org.bson.io.OutputBuffer; + +import java.io.IOException; +import java.io.OutputStream; +import java.nio.ByteOrder; +import java.util.ArrayList; +import java.util.List; + +import static com.mongodb.assertions.Assertions.assertTrue; +import static com.mongodb.assertions.Assertions.notNull; +import static java.lang.String.format; + +/** + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public class ByteBufferBsonOutput extends OutputBuffer { + + private static final int MAX_SHIFT = 31; + private static final int INITIAL_SHIFT = 10; + public static final int INITIAL_BUFFER_SIZE = 1 << INITIAL_SHIFT; + public static final int MAX_BUFFER_SIZE = 1 << 24; + + private final BufferProvider bufferProvider; + private final List bufferList = new ArrayList<>(); + private int curBufferIndex = 0; + private int position = 0; + private boolean closed; + private ByteBuf currentByteBuffer; + + /** + * Construct an instance that uses the given buffer provider to allocate byte buffers as needs as it grows. + * + * @param bufferProvider the non-null buffer provider + */ + public ByteBufferBsonOutput(final BufferProvider bufferProvider) { + this.bufferProvider = notNull("bufferProvider", bufferProvider); + } + + /** + * Creates a new empty {@link ByteBufferBsonOutput.Branch}, + * which gets merged into this {@link ByteBufferBsonOutput} on {@link ByteBufferBsonOutput.Branch#close()} + * by appending its data without copying it. + * If multiple branches are created, they are merged in the order they are {@linkplain ByteBufferBsonOutput.Branch#close() closed}. + * {@linkplain #close() Closing} this {@link ByteBufferBsonOutput} does not {@linkplain ByteBufferBsonOutput.Branch#close() close} the branch. + * + * @return A new {@link ByteBufferBsonOutput.Branch}. + */ + public ByteBufferBsonOutput.Branch branch() { + return new ByteBufferBsonOutput.Branch(this); + } + + @Override + public void writeBytes(final byte[] bytes, final int offset, final int length) { + ensureOpen(); + + int currentOffset = offset; + int remainingLen = length; + while (remainingLen > 0) { + ByteBuf buf = getCurrentByteBuffer(); + int bytesToPutInCurrentBuffer = Math.min(buf.remaining(), remainingLen); + buf.put(bytes, currentOffset, bytesToPutInCurrentBuffer); + remainingLen -= bytesToPutInCurrentBuffer; + currentOffset += bytesToPutInCurrentBuffer; + } + position += length; + } + + @Override + public void writeInt32(final int value) { + ensureOpen(); + ByteBuf buf = getCurrentByteBuffer(); + if (buf.remaining() >= 4) { + buf.putInt(value); + position += 4; + } else { + // fallback for edge cases + super.writeInt32(value); + } + } + + + @Override + public void writeInt32(final int absolutePosition, final int value) { + ensureOpen(); + + if (absolutePosition < 0) { + throw new IllegalArgumentException(String.format("position must be >= 0 but was %d", absolutePosition)); + } + + if (absolutePosition + 3 > position - 1) { + throw new IllegalArgumentException(String.format("Cannot write 4 bytes starting at position %d: current size is %d bytes", + position - 1, + absolutePosition + 3)); + } + + BufferPositionPair bufferPositionPair = getBufferPositionPair(absolutePosition); + ByteBuf byteBuffer = getByteBufferAtIndex(bufferPositionPair.bufferIndex); + int capacity = byteBuffer.position() - bufferPositionPair.position; + + if (capacity >= 4) { + byteBuffer.putInt(bufferPositionPair.position, value); + } else { + // fallback for edge cases + int valueToWrite = value; + int pos = bufferPositionPair.position; + int bufferIndex = bufferPositionPair.bufferIndex; + + for (int i = 0; i < 4; i++) { + byteBuffer.put(pos++, (byte) valueToWrite); + valueToWrite = valueToWrite >> 8; + if (--capacity == 0) { + byteBuffer = getByteBufferAtIndex(++bufferIndex); + pos = 0; + capacity = byteBuffer.position(); + } + } + } + } + + @Override + public void writeDouble(final double value) { + ensureOpen(); + ByteBuf buf = getCurrentByteBuffer(); + if (buf.remaining() >= 8) { + buf.putDouble(value); + position += 8; + } else { + // fallback for edge cases + writeInt64(Double.doubleToRawLongBits(value)); + } + } + + @Override + public void writeInt64(final long value) { + ensureOpen(); + ByteBuf buf = getCurrentByteBuffer(); + if (buf.remaining() >= 8) { + buf.putLong(value); + position += 8; + } else { + // fallback for edge cases + super.writeInt64(value); + } + } + + @Override + public void writeByte(final int value) { + ensureOpen(); + + getCurrentByteBuffer().put((byte) value); + position++; + } + + private ByteBuf getCurrentByteBuffer() { + if (currentByteBuffer == null) { + currentByteBuffer = getByteBufferAtIndex(curBufferIndex); + } + + if (currentByteBuffer.hasRemaining()) { + return currentByteBuffer; + } + + curBufferIndex++; + currentByteBuffer = getByteBufferAtIndex(curBufferIndex); + return currentByteBuffer; + } + + private ByteBuf getByteBufferAtIndex(final int index) { + if (bufferList.size() < index + 1) { + ByteBuf buffer = bufferProvider.getBuffer(index >= (MAX_SHIFT - INITIAL_SHIFT) + ? MAX_BUFFER_SIZE + : Math.min(INITIAL_BUFFER_SIZE << index, MAX_BUFFER_SIZE)); + bufferList.add(buffer); + } + return bufferList.get(index); + } + + @Override + public int getPosition() { + ensureOpen(); + return position; + } + + @Override + public int getSize() { + ensureOpen(); + return position; + } + + protected void write(final int absolutePosition, final int value) { + ensureOpen(); + + if (absolutePosition < 0) { + throw new IllegalArgumentException(String.format("position must be >= 0 but was %d", absolutePosition)); + } + if (absolutePosition > position - 1) { + throw new IllegalArgumentException(String.format("position must be <= %d but was %d", position - 1, absolutePosition)); + } + + BufferPositionPair bufferPositionPair = getBufferPositionPair(absolutePosition); + ByteBuf byteBuffer = getByteBufferAtIndex(bufferPositionPair.bufferIndex); + byteBuffer.put(bufferPositionPair.position++, (byte) value); + } + + @Override + public List getByteBuffers() { + ensureOpen(); + + List buffers = new ArrayList<>(bufferList.size()); + for (final ByteBuf cur : bufferList) { + buffers.add(cur.duplicate().order(ByteOrder.LITTLE_ENDIAN).flip()); + } + return buffers; + } + + public List getDuplicateByteBuffers() { + ensureOpen(); + + List buffers = new ArrayList<>(bufferList.size()); + for (final ByteBuf cur : bufferList) { + buffers.add(cur.duplicate().order(ByteOrder.LITTLE_ENDIAN)); + } + return buffers; + } + + + @Override + public int pipe(final OutputStream out) throws IOException { + ensureOpen(); + + byte[] tmp = new byte[INITIAL_BUFFER_SIZE]; + + int total = 0; + List byteBuffers = getByteBuffers(); + try { + for (final ByteBuf cur : byteBuffers) { + while (cur.hasRemaining()) { + int numBytesToCopy = Math.min(cur.remaining(), tmp.length); + cur.get(tmp, 0, numBytesToCopy); + out.write(tmp, 0, numBytesToCopy); + } + total += cur.limit(); + } + } finally { + byteBuffers.forEach(ByteBuf::release); + } + return total; + } + + @Override + public void truncateToPosition(final int newPosition) { + ensureOpen(); + if (newPosition == position) { + return; + } + if (newPosition > position || newPosition < 0) { + throw new IllegalArgumentException(); + } + + BufferPositionPair bufferPositionPair = getBufferPositionPair(newPosition); + + bufferList.get(bufferPositionPair.bufferIndex).position(bufferPositionPair.position); + + if (bufferPositionPair.bufferIndex + 1 < bufferList.size()) { + currentByteBuffer = null; + } + + while (bufferList.size() > bufferPositionPair.bufferIndex + 1) { + ByteBuf buffer = bufferList.remove(bufferList.size() - 1); + buffer.release(); + } + + curBufferIndex = bufferPositionPair.bufferIndex; + position = newPosition; + } + + /** + * The {@link #flush()} method of {@link ByteBufferBsonOutput} and of its subclasses does nothing.

+ */ + @Override + public final void flush() throws IOException { + } + + /** + * {@inheritDoc} + *

+ * Idempotent.

+ */ + @Override + public void close() { + if (isOpen()) { + for (final ByteBuf cur : bufferList) { + cur.release(); + } + currentByteBuffer = null; + bufferList.clear(); + closed = true; + } + } + + private BufferPositionPair getBufferPositionPair(final int absolutePosition) { + int positionInBuffer = absolutePosition; + int bufferIndex = 0; + int bufferSize = bufferList.get(bufferIndex).position(); + int startPositionOfBuffer = 0; + while (startPositionOfBuffer + bufferSize <= absolutePosition) { + bufferIndex++; + startPositionOfBuffer += bufferSize; + positionInBuffer -= bufferSize; + bufferSize = bufferList.get(bufferIndex).position(); + } + + return new BufferPositionPair(bufferIndex, positionInBuffer); + } + + private void ensureOpen() { + if (!isOpen()) { + throw new IllegalStateException("The output is closed"); + } + } + + boolean isOpen() { + return !closed; + } + + /** + * @see #branch() + */ + private void merge(final ByteBufferBsonOutput branch) { + assertTrue(branch instanceof ByteBufferBsonOutput.Branch); + branch.bufferList.forEach(ByteBuf::retain); + bufferList.addAll(branch.bufferList); + curBufferIndex += branch.curBufferIndex + 1; + position += branch.position; + currentByteBuffer = null; + } + + public static final class Branch extends ByteBufferBsonOutput { + private final ByteBufferBsonOutput parent; + + private Branch(final ByteBufferBsonOutput parent) { + super(parent.bufferProvider); + this.parent = parent; + } + + /** + * @see #branch() + */ + @Override + public void close() { + if (isOpen()) { + try { + assertTrue(parent.isOpen()); + parent.merge(this); + } finally { + super.close(); + } + } + } + } + + private static final class BufferPositionPair { + private final int bufferIndex; + private int position; + + BufferPositionPair(final int bufferIndex, final int position) { + this.bufferIndex = bufferIndex; + this.position = position; + } + } + + protected int writeCharacters(final String str, final boolean checkNullTermination) { + int stringLength = str.length(); + int sp = 0; + int prevPos = position; + + ByteBuf curBuffer = getCurrentByteBuffer(); + int curBufferPos = curBuffer.position(); + int curBufferLimit = curBuffer.limit(); + int remaining = curBufferLimit - curBufferPos; + + if (curBuffer.isBackedByArray()) { + byte[] dst = curBuffer.array(); + int arrayOffset = curBuffer.arrayOffset(); + if (remaining >= str.length() + 1) { + // Write ASCII characters directly to the array until we hit a non-ASCII character. + sp = writeOnArrayAscii(str, dst, arrayOffset + curBufferPos, checkNullTermination); + curBufferPos += sp; + // If the whole string was written as ASCII, append the null terminator. + if (sp == stringLength) { + dst[arrayOffset + curBufferPos++] = 0; + position += sp + 1; + curBuffer.position(curBufferPos); + return sp + 1; + } + // Otherwise, update the position to reflect the partial write. + position += sp; + curBuffer.position(curBufferPos); + } + } + + // We get here, when the buffer is not backed by an array, or when the string contains at least one non-ASCII characters. + return writeOnBuffers(str, + checkNullTermination, + sp, + stringLength, + curBufferLimit, + curBufferPos, + curBuffer, + prevPos); + } + + private int writeOnBuffers(final String str, + final boolean checkNullTermination, + final int stringPointer, + final int stringLength, + final int bufferLimit, + final int bufferPos, + final ByteBuf buffer, + final int prevPos) { + int remaining; + int sp = stringPointer; + int curBufferPos = bufferPos; + int curBufferLimit = bufferLimit; + ByteBuf curBuffer = buffer; + while (sp < stringLength) { + remaining = curBufferLimit - curBufferPos; + int c = str.charAt(sp); + + if (checkNullTermination && c == 0x0) { + throw new BsonSerializationException( + format("BSON cstring '%s' is not valid because it contains a null character " + "at index %d", str, sp)); + } + + if (c < 0x80) { + if (remaining == 0) { + curBuffer = getCurrentByteBuffer(); + curBufferPos = 0; + curBufferLimit = curBuffer.limit(); + } + curBuffer.put((byte) c); + curBufferPos++; + position++; + } else if (c < 0x800) { + if (remaining < 2) { + // Not enough space: use write() to handle buffer boundary + write((byte) (0xc0 + (c >> 6))); + write((byte) (0x80 + (c & 0x3f))); + + curBuffer = getCurrentByteBuffer(); + curBufferPos = curBuffer.position(); + curBufferLimit = curBuffer.limit(); + } else { + curBuffer.put((byte) (0xc0 + (c >> 6))); + curBuffer.put((byte) (0x80 + (c & 0x3f))); + curBufferPos += 2; + position += 2; + } + } else { + // Handle multibyte characters (may involve surrogate pairs). + c = Character.codePointAt(str, sp); + /* + Malformed surrogate pairs are encoded as-is (3 byte code unit) without substituting any code point. + This known deviation from the spec and current functionality remains for backward compatibility. + Ticket: JAVA-5575 + */ + if (c < 0x10000) { + if (remaining < 3) { + write((byte) (0xe0 + (c >> 12))); + write((byte) (0x80 + ((c >> 6) & 0x3f))); + write((byte) (0x80 + (c & 0x3f))); + + curBuffer = getCurrentByteBuffer(); + curBufferPos = curBuffer.position(); + curBufferLimit = curBuffer.limit(); + } else { + curBuffer.put((byte) (0xe0 + (c >> 12))); + curBuffer.put((byte) (0x80 + ((c >> 6) & 0x3f))); + curBuffer.put((byte) (0x80 + (c & 0x3f))); + curBufferPos += 3; + position += 3; + } + } else { + if (remaining < 4) { + write((byte) (0xf0 + (c >> 18))); + write((byte) (0x80 + ((c >> 12) & 0x3f))); + write((byte) (0x80 + ((c >> 6) & 0x3f))); + write((byte) (0x80 + (c & 0x3f))); + + curBuffer = getCurrentByteBuffer(); + curBufferPos = curBuffer.position(); + curBufferLimit = curBuffer.limit(); + } else { + curBuffer.put((byte) (0xf0 + (c >> 18))); + curBuffer.put((byte) (0x80 + ((c >> 12) & 0x3f))); + curBuffer.put((byte) (0x80 + ((c >> 6) & 0x3f))); + curBuffer.put((byte) (0x80 + (c & 0x3f))); + curBufferPos += 4; + position += 4; + } + } + } + sp += Character.charCount(c); + } + + getCurrentByteBuffer().put((byte) 0); + position++; + return position - prevPos; + } + + private static int writeOnArrayAscii(final String str, + final byte[] dst, + final int arrayPosition, + final boolean checkNullTermination) { + int pos = arrayPosition; + int sp = 0; + // Fast common path: This tight loop is JIT-friendly (simple, no calls, few branches), + // It might be unrolled for performance. + for (; sp < str.length(); sp++, pos++) { + char c = str.charAt(sp); + if (checkNullTermination && c == 0) { + throw new BsonSerializationException( + format("BSON cstring '%s' is not valid because it contains a null character " + "at index %d", str, sp)); + } + if (c >= 0x80) { + break; + } + dst[pos] = (byte) c; + } + return sp; + } +} diff --git a/driver-core/src/main/com/mongodb/internal/connection/ClientMetadata.java b/driver-core/src/main/com/mongodb/internal/connection/ClientMetadata.java new file mode 100644 index 00000000000..6c98a87a9bd --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/connection/ClientMetadata.java @@ -0,0 +1,313 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection; + +import com.mongodb.MongoDriverInformation; +import com.mongodb.annotations.ThreadSafe; +import com.mongodb.internal.VisibleForTesting; +import com.mongodb.internal.client.DriverInformation; +import com.mongodb.internal.client.DriverInformationHelper; +import com.mongodb.lang.Nullable; +import org.bson.BsonBinaryWriter; +import org.bson.BsonDocument; +import org.bson.BsonInt32; +import org.bson.BsonString; +import org.bson.BsonValue; +import org.bson.codecs.BsonDocumentCodec; +import org.bson.codecs.EncoderContext; +import org.bson.io.BasicOutputBuffer; + +import java.io.File; +import java.nio.charset.StandardCharsets; +import java.nio.file.Files; +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.locks.ReentrantReadWriteLock; +import java.util.function.Consumer; + +import static com.mongodb.assertions.Assertions.isTrueArgument; +import static com.mongodb.internal.Locks.withLock; +import static com.mongodb.internal.client.DriverInformationHelper.INITIAL_DRIVER_INFORMATION; +import static com.mongodb.internal.connection.FaasEnvironment.getFaasEnvironment; +import static java.lang.System.getProperty; +import static java.nio.file.Paths.get; + +/** + * Represents metadata of the current MongoClient. + * + * Metadata is used to identify the client in the server logs and metrics. + * + *

This class is not part of the public API and may be removed or changed at any time

+ */ +@ThreadSafe +public class ClientMetadata { + private static final String SEPARATOR = "|"; + private static final int MAXIMUM_CLIENT_METADATA_ENCODED_SIZE = 512; + private final ReentrantReadWriteLock readWriteLock = new ReentrantReadWriteLock(); + private final String applicationName; + private final List driverInformationList; + private BsonDocument clientMetadataBsonDocument; + + public ClientMetadata(@Nullable final String applicationName, final MongoDriverInformation mongoDriverInformation) { + this.applicationName = applicationName; + this.driverInformationList = new ArrayList<>(); + withLock(readWriteLock.writeLock(), () -> { + driverInformationList.add(INITIAL_DRIVER_INFORMATION); + driverInformationList.addAll(mongoDriverInformation.getDriverInformationList()); + this.clientMetadataBsonDocument = createClientMetadataDocument(applicationName, driverInformationList); + }); + } + + /** + * Returns mutable BsonDocument that represents the client metadata. + */ + public BsonDocument getBsonDocument() { + return withLock(readWriteLock.readLock(), () -> clientMetadataBsonDocument); + } + + public void append(final MongoDriverInformation mongoDriverInformationToAppend) { + withLock(readWriteLock.writeLock(), () -> { + boolean hasAddedNewData = false; + for (DriverInformation driverInformation : mongoDriverInformationToAppend.getDriverInformationList()) { + if (!driverInformationList.contains(driverInformation)) { + hasAddedNewData = true; + driverInformationList.add(driverInformation); + } + } + if (hasAddedNewData) { + clientMetadataBsonDocument = createClientMetadataDocument(applicationName, driverInformationList); + } + }); + } + + private static BsonDocument createClientMetadataDocument(@Nullable final String applicationName, + final List driverInformationList) { + if (applicationName != null) { + isTrueArgument("applicationName UTF-8 encoding length <= 128", + applicationName.getBytes(StandardCharsets.UTF_8).length <= 128); + } + + // client fields are added in "preservation" order: + BsonDocument clientMetadata = new BsonDocument(); + tryWithLimit(clientMetadata, d -> putAtPath(d, "application.name", applicationName)); + + // required fields: + tryWithLimit(clientMetadata, d -> { + putAtPath(d, "driver.name", INITIAL_DRIVER_INFORMATION.getDriverName()); + putAtPath(d, "driver.version", INITIAL_DRIVER_INFORMATION.getDriverVersion()); + }); + tryWithLimit(clientMetadata, d -> putAtPath(d, "os.type", getOperatingSystemType(getOperatingSystemName()))); + // full driver information: + tryWithLimit(clientMetadata, d -> { + putAtPath(d, "driver.name", listToString(DriverInformationHelper.getNames(driverInformationList))); + putAtPath(d, "driver.version", listToString(DriverInformationHelper.getVersions(driverInformationList))); + }); + + // optional fields: + FaasEnvironment faasEnvironment = getFaasEnvironment(); + ClientMetadata.ContainerRuntime containerRuntime = ClientMetadata.ContainerRuntime.determineExecutionContainer(); + ClientMetadata.Orchestrator orchestrator = ClientMetadata.Orchestrator.determineExecutionOrchestrator(); + + tryWithLimit(clientMetadata, d -> putAtPath(d, "platform", INITIAL_DRIVER_INFORMATION.getDriverPlatform())); + tryWithLimit(clientMetadata, d -> putAtPath(d, "platform", listToString(DriverInformationHelper.getPlatforms(driverInformationList)))); + tryWithLimit(clientMetadata, d -> putAtPath(d, "os.name", getOperatingSystemName())); + tryWithLimit(clientMetadata, d -> putAtPath(d, "os.architecture", getProperty("os.arch", "unknown"))); + tryWithLimit(clientMetadata, d -> putAtPath(d, "os.version", getProperty("os.version", "unknown"))); + + tryWithLimit(clientMetadata, d -> putAtPath(d, "env.name", faasEnvironment.getName())); + tryWithLimit(clientMetadata, d -> putAtPath(d, "env.timeout_sec", faasEnvironment.getTimeoutSec())); + tryWithLimit(clientMetadata, d -> putAtPath(d, "env.memory_mb", faasEnvironment.getMemoryMb())); + tryWithLimit(clientMetadata, d -> putAtPath(d, "env.region", faasEnvironment.getRegion())); + + tryWithLimit(clientMetadata, d -> putAtPath(d, "env.container.runtime", containerRuntime.getName())); + tryWithLimit(clientMetadata, d -> putAtPath(d, "env.container.orchestrator", orchestrator.getName())); + return clientMetadata; + } + + private static void putAtPath(final BsonDocument d, final String path, @Nullable final String value) { + if (value == null) { + return; + } + putAtPath(d, path, new BsonString(value)); + } + + private static void putAtPath(final BsonDocument d, final String path, @Nullable final Integer value) { + if (value == null) { + return; + } + putAtPath(d, path, new BsonInt32(value)); + } + + /** + * Assumes valid documents (or not set) on path. No-op if value is null. + */ + private static void putAtPath(final BsonDocument d, final String path, @Nullable final BsonValue value) { + if (value == null) { + return; + } + String[] split = path.split("\\.", 2); + String first = split[0]; + if (split.length == 1) { + d.append(first, value); + } else { + BsonDocument child; + if (d.containsKey(first)) { + child = d.getDocument(first); + } else { + child = new BsonDocument(); + d.append(first, child); + } + String rest = split[1]; + putAtPath(child, rest, value); + } + } + + private static void tryWithLimit(final BsonDocument document, final Consumer modifier) { + try { + BsonDocument temp = document.clone(); + modifier.accept(temp); + if (!clientMetadataDocumentTooLarge(temp)) { + modifier.accept(document); + } + } catch (Exception e) { + // do nothing. This could be a SecurityException, or any other issue while building the document + } + } + + static boolean clientMetadataDocumentTooLarge(final BsonDocument document) { + BasicOutputBuffer buffer = new BasicOutputBuffer(MAXIMUM_CLIENT_METADATA_ENCODED_SIZE); + new BsonDocumentCodec().encode(new BsonBinaryWriter(buffer), document, EncoderContext.builder().build()); + return buffer.getPosition() > MAXIMUM_CLIENT_METADATA_ENCODED_SIZE; + } + + private enum ContainerRuntime { + DOCKER("docker") { + @Override + boolean isCurrentRuntimeContainer() { + try { + return Files.exists(get(File.separator + ".dockerenv")); + } catch (Exception e) { + return false; + // NOOP. This could be a SecurityException. + } + } + }, + UNKNOWN(null); + + @Nullable + private final String name; + + ContainerRuntime(@Nullable final String name) { + this.name = name; + } + + @Nullable + public String getName() { + return name; + } + + boolean isCurrentRuntimeContainer() { + return false; + } + + static ClientMetadata.ContainerRuntime determineExecutionContainer() { + for (ClientMetadata.ContainerRuntime allegedContainer : ClientMetadata.ContainerRuntime.values()) { + if (allegedContainer.isCurrentRuntimeContainer()) { + return allegedContainer; + } + } + return UNKNOWN; + } + } + + private enum Orchestrator { + K8S("kubernetes") { + @Override + boolean isCurrentOrchestrator() { + return FaasEnvironment.getEnv("KUBERNETES_SERVICE_HOST") != null; + } + }, + UNKNOWN(null); + + @Nullable + private final String name; + + Orchestrator(@Nullable final String name) { + this.name = name; + } + + @Nullable + public String getName() { + return name; + } + + boolean isCurrentOrchestrator() { + return false; + } + + static ClientMetadata.Orchestrator determineExecutionOrchestrator() { + for (ClientMetadata.Orchestrator alledgedOrchestrator : ClientMetadata.Orchestrator.values()) { + if (alledgedOrchestrator.isCurrentOrchestrator()) { + return alledgedOrchestrator; + } + } + return UNKNOWN; + } + } + + private static String listToString(final List listOfStrings) { + StringBuilder stringBuilder = new StringBuilder(); + int i = 0; + for (String val : listOfStrings) { + if (i > 0) { + stringBuilder.append(SEPARATOR); + } + stringBuilder.append(val); + i++; + } + return stringBuilder.toString(); + } + + @VisibleForTesting(otherwise = VisibleForTesting.AccessModifier.PRIVATE) + public static String getOperatingSystemType(final String operatingSystemName) { + if (nameStartsWith(operatingSystemName, "linux")) { + return "Linux"; + } else if (nameStartsWith(operatingSystemName, "mac")) { + return "Darwin"; + } else if (nameStartsWith(operatingSystemName, "windows")) { + return "Windows"; + } else if (nameStartsWith(operatingSystemName, "hp-ux", "aix", "irix", "solaris", "sunos")) { + return "Unix"; + } else { + return "unknown"; + } + } + + private static String getOperatingSystemName() { + return getProperty("os.name", "unknown"); + } + + private static boolean nameStartsWith(final String name, final String... prefixes) { + for (String prefix : prefixes) { + if (name.toLowerCase().startsWith(prefix.toLowerCase())) { + return true; + } + } + return false; + } + +} diff --git a/driver-core/src/main/com/mongodb/internal/connection/Cluster.java b/driver-core/src/main/com/mongodb/internal/connection/Cluster.java new file mode 100644 index 00000000000..ba154b48308 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/connection/Cluster.java @@ -0,0 +1,105 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection; + + +import com.mongodb.ServerAddress; +import com.mongodb.annotations.ThreadSafe; +import com.mongodb.connection.ClusterDescription; +import com.mongodb.connection.ClusterId; +import com.mongodb.connection.ClusterSettings; +import com.mongodb.event.ServerDescriptionChangedEvent; +import com.mongodb.internal.TimeoutContext; +import com.mongodb.internal.async.SingleResultCallback; +import com.mongodb.internal.time.Timeout; +import com.mongodb.lang.Nullable; +import com.mongodb.selector.ServerSelector; + +import java.io.Closeable; + +/** + * Represents a cluster of MongoDB servers. Implementations can define the behaviour depending upon the type of cluster. + * + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public interface Cluster extends Closeable { + + ClusterSettings getSettings(); + + + ClusterId getClusterId(); + + ServersSnapshot getServersSnapshot(Timeout serverSelectionTimeout, TimeoutContext timeoutContext); + + /** + * Get the current description of this cluster. + * + * @return the current ClusterDescription representing the current state of the cluster. + */ + ClusterDescription getCurrentDescription(); + + /** + * Get the {@link ClusterClock} from which one may get the last seen cluster time. + */ + ClusterClock getClock(); + + ClientMetadata getClientMetadata(); + + ServerTuple selectServer(ServerSelector serverSelector, OperationContext operationContext); + + void selectServerAsync(ServerSelector serverSelector, OperationContext operationContext, + SingleResultCallback callback); + + /** + * Closes connections to the servers in the cluster. After this is called, this cluster instance can no longer be used. + */ + void close(); + + /** + * Whether all the servers in the cluster are closed or not. + * + * @return true if all the servers in this cluster have been closed + */ + boolean isClosed(); + + /** + * Does the supplied {@code action} while holding a reentrant cluster-wide lock. + * + * @param action The action to {@linkplain Runnable#run() do}. + */ + void withLock(Runnable action); + + /** + * This method allows {@link Server}s to notify the {@link Cluster} about changes in their state as per the + *
+ * Server Discovery And Monitoring specification. + */ + void onChange(ServerDescriptionChangedEvent event); + + /** + * A non-atomic snapshot of the servers in a {@link Cluster}. + */ + @ThreadSafe + interface ServersSnapshot { + @Nullable + Server getServer(ServerAddress serverAddress); + + default boolean containsServer(final ServerAddress serverAddress) { + return getServer(serverAddress) != null; + } + } +} diff --git a/driver-core/src/main/com/mongodb/internal/connection/ClusterClock.java b/driver-core/src/main/com/mongodb/internal/connection/ClusterClock.java new file mode 100644 index 00000000000..674e434b199 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/connection/ClusterClock.java @@ -0,0 +1,58 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection; + +import com.mongodb.lang.Nullable; +import org.bson.BsonDocument; +import org.bson.BsonTimestamp; + +import java.util.concurrent.locks.ReentrantLock; + +import static com.mongodb.internal.Locks.withInterruptibleLock; + +/** + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public class ClusterClock { + private static final String CLUSTER_TIME_KEY = "clusterTime"; + private final ReentrantLock lock = new ReentrantLock(); + private BsonDocument clusterTime; + + public BsonDocument getCurrent() { + return withInterruptibleLock(lock, () -> clusterTime); + } + + public BsonTimestamp getClusterTime() { + return withInterruptibleLock(lock, () -> clusterTime != null ? clusterTime.getTimestamp(CLUSTER_TIME_KEY) : null); + } + + public void advance(@Nullable final BsonDocument other) { + withInterruptibleLock(lock, () -> this.clusterTime = greaterOf(other)); + } + + public BsonDocument greaterOf(@Nullable final BsonDocument other) { + return withInterruptibleLock(lock, () -> { + if (other == null) { + return clusterTime; + } else if (clusterTime == null) { + return other; + } else { + return other.getTimestamp(CLUSTER_TIME_KEY).compareTo(clusterTime.getTimestamp(CLUSTER_TIME_KEY)) > 0 ? other : clusterTime; + } + }); + } +} diff --git a/driver-core/src/main/com/mongodb/internal/connection/ClusterClockAdvancingSessionContext.java b/driver-core/src/main/com/mongodb/internal/connection/ClusterClockAdvancingSessionContext.java new file mode 100644 index 00000000000..0b297e4b4e3 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/connection/ClusterClockAdvancingSessionContext.java @@ -0,0 +1,139 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection; + +import com.mongodb.ReadConcern; +import com.mongodb.internal.session.SessionContext; +import com.mongodb.lang.Nullable; +import org.bson.BsonDocument; +import org.bson.BsonTimestamp; + +/** + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public final class ClusterClockAdvancingSessionContext implements SessionContext { + + private final SessionContext wrapped; + private final ClusterClock clusterClock; + + public ClusterClockAdvancingSessionContext(final SessionContext wrapped, final ClusterClock clusterClock) { + this.wrapped = wrapped; + this.clusterClock = clusterClock; + } + + @Override + public boolean hasSession() { + return wrapped.hasSession(); + } + + @Override + public boolean isImplicitSession() { + return wrapped.isImplicitSession(); + } + + @Override + public BsonDocument getSessionId() { + return wrapped.getSessionId(); + } + + @Override + public boolean isCausallyConsistent() { + return wrapped.isCausallyConsistent(); + } + + @Override + public long getTransactionNumber() { + return wrapped.getTransactionNumber(); + } + + @Override + public long advanceTransactionNumber() { + return wrapped.advanceTransactionNumber(); + } + + @Override + public boolean notifyMessageSent() { + return wrapped.notifyMessageSent(); + } + + @Override + public BsonTimestamp getOperationTime() { + return wrapped.getOperationTime(); + } + + @Override + public void advanceOperationTime(@Nullable final BsonTimestamp operationTime) { + wrapped.advanceOperationTime(operationTime); + } + + @Override + public BsonDocument getClusterTime() { + return clusterClock.greaterOf(wrapped.getClusterTime()); + } + + @Override + public void advanceClusterTime(@Nullable final BsonDocument clusterTime) { + wrapped.advanceClusterTime(clusterTime); + clusterClock.advance(clusterTime); + } + + @Override + public boolean isSnapshot() { + return wrapped.isSnapshot(); + } + + @Override + public void setSnapshotTimestamp(@Nullable final BsonTimestamp snapshotTimestamp) { + wrapped.setSnapshotTimestamp(snapshotTimestamp); + } + + @Override + @Nullable + public BsonTimestamp getSnapshotTimestamp() { + return wrapped.getSnapshotTimestamp(); + } + + @Override + public boolean hasActiveTransaction() { + return wrapped.hasActiveTransaction(); + } + + @Override + public ReadConcern getReadConcern() { + return wrapped.getReadConcern(); + } + + @Override + public void setRecoveryToken(final BsonDocument recoveryToken) { + wrapped.setRecoveryToken(recoveryToken); + } + + @Override + public void clearTransactionContext() { + wrapped.clearTransactionContext(); + } + + @Override + public void markSessionDirty() { + wrapped.markSessionDirty(); + } + + @Override + public boolean isSessionMarkedDirty() { + return wrapped.isSessionMarkedDirty(); + } +} diff --git a/driver-core/src/main/com/mongodb/internal/connection/ClusterDescriptionHelper.java b/driver-core/src/main/com/mongodb/internal/connection/ClusterDescriptionHelper.java new file mode 100644 index 00000000000..f4cc96258a0 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/connection/ClusterDescriptionHelper.java @@ -0,0 +1,143 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection; + +import com.mongodb.ServerAddress; +import com.mongodb.TagSet; +import com.mongodb.connection.ClusterDescription; +import com.mongodb.connection.ServerDescription; +import com.mongodb.lang.Nullable; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.Comparator; +import java.util.List; +import java.util.Set; +import java.util.TreeSet; + +/** + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public final class ClusterDescriptionHelper { + + /** + * Returns the Set of all server descriptions in this cluster, sorted by the String value of the ServerAddress of each one. + * + * @return the set of server descriptions + */ + public static Set getAll(final ClusterDescription clusterDescription) { + Set serverDescriptionSet = new TreeSet<>(Comparator.comparing((ServerDescription o) -> + o.getAddress().getHost()).thenComparingInt(o -> o.getAddress().getPort())); + serverDescriptionSet.addAll(clusterDescription.getServerDescriptions()); + return Collections.unmodifiableSet(serverDescriptionSet); + } + + /** + * Returns the ServerDescription for the server at the given address + * + * @param serverAddress the ServerAddress for a server in this cluster + * @return the ServerDescription for this server + */ + @Nullable + public static ServerDescription getByServerAddress(final ClusterDescription clusterDescription, final ServerAddress serverAddress) { + for (final ServerDescription cur : clusterDescription.getServerDescriptions()) { + if (cur.isOk() && cur.getAddress().equals(serverAddress)) { + return cur; + } + } + return null; + } + + /** + * While it may seem counter-intuitive that a MongoDB cluster can have more than one primary, it can in the case where the client's view + * of the cluster is a set of mongos servers, any of which can serve as the primary. + * + * @return a list of servers that can act as primaries + */ + public static List getPrimaries(final ClusterDescription clusterDescription) { + return getServersByPredicate(clusterDescription, ServerDescription::isPrimary); + } + + /** + * Get a list of all the secondaries in this cluster + * + * @return a List of ServerDescriptions of all the secondaries this cluster is currently aware of + */ + public static List getSecondaries(final ClusterDescription clusterDescription) { + return getServersByPredicate(clusterDescription, ServerDescription::isSecondary); + } + + /** + * Get a list of all the secondaries in this cluster that match a given TagSet + * + * @param tagSet a Set of replica set tags + * @return a List of ServerDescriptions of all the secondaries this cluster that match all of the given tags + */ + public static List getSecondaries(final ClusterDescription clusterDescription, final TagSet tagSet) { + return getServersByPredicate(clusterDescription, serverDescription -> + serverDescription.isSecondary() && serverDescription.hasTags(tagSet)); + } + + /** + * Gets a list of ServerDescriptions for all the servers in this cluster which are currently accessible. + * + * @return a List of ServerDescriptions for all servers that have a status of OK + */ + public static List getAny(final ClusterDescription clusterDescription) { + return getServersByPredicate(clusterDescription, ServerDescription::isOk); + } + + /** + * Gets a list of all the primaries and secondaries in this cluster. + * + * @return a list of ServerDescriptions for all primary and secondary servers + */ + public static List getAnyPrimaryOrSecondary(final ClusterDescription clusterDescription) { + return getServersByPredicate(clusterDescription, serverDescription -> + serverDescription.isPrimary() || serverDescription.isSecondary()); + } + + /** + * Gets a list of all the primaries and secondaries in this cluster that match the given replica set tags. + * + * @param tagSet a Set of replica set tags + * @return a list of ServerDescriptions for all primary and secondary servers that contain all of the given tags + */ + public static List getAnyPrimaryOrSecondary(final ClusterDescription clusterDescription, final TagSet tagSet) { + return getServersByPredicate(clusterDescription, serverDescription -> + (serverDescription.isPrimary() || serverDescription.isSecondary()) && serverDescription.hasTags(tagSet)); + } + + public interface Predicate { + boolean apply(ServerDescription serverDescription); + } + + public static List getServersByPredicate(final ClusterDescription clusterDescription, final Predicate predicate) { + List membersByTag = new ArrayList<>(); + + for (final ServerDescription cur : clusterDescription.getServerDescriptions()) { + if (predicate.apply(cur)) { + membersByTag.add(cur); + } + } + + return membersByTag; + } + + private ClusterDescriptionHelper() { + } +} diff --git a/driver-core/src/main/com/mongodb/internal/connection/ClusterableServer.java b/driver-core/src/main/com/mongodb/internal/connection/ClusterableServer.java new file mode 100644 index 00000000000..ef3c383ab2f --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/connection/ClusterableServer.java @@ -0,0 +1,61 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection; + +import com.mongodb.MongoException; + +import java.util.List; + +import static java.util.Arrays.asList; + +/** + * A logical connection to a MongoDB server that supports clustering along with other servers. + */ +interface ClusterableServer extends Server { + + List SHUTDOWN_CODES = asList(91, 11600); + + /** + * Reset server description to connecting state + */ + void resetToConnecting(MongoException cause); + + /** + * Invalidate the description of this server. Implementation of this method should not block, but rather trigger an asynchronous + * attempt to connect with the server in order to determine its current status. + */ + void invalidate(MongoException cause); + + /** + *

Closes the server. Instances that have been closed will no longer be available for use.

+ * + *

Implementations should ensure that this method can be called multiple times with no ill effects.

+ */ + void close(); + + /** + * Returns true if the server is closed, false otherwise. + * + * @return whether the server is closed + */ + boolean isClosed(); + + /** + * Attempt to connect to the server. + */ + void connect(); +} diff --git a/driver-core/src/main/com/mongodb/internal/connection/ClusterableServerFactory.java b/driver-core/src/main/com/mongodb/internal/connection/ClusterableServerFactory.java new file mode 100644 index 00000000000..b5b1156cdd9 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/connection/ClusterableServerFactory.java @@ -0,0 +1,29 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection; + +import com.mongodb.ServerAddress; +import com.mongodb.connection.ServerSettings; + +/** + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public interface ClusterableServerFactory { + ClusterableServer create(Cluster cluster, ServerAddress serverAddress); + + ServerSettings getSettings(); +} diff --git a/driver-core/src/main/com/mongodb/internal/connection/CommandEventSender.java b/driver-core/src/main/com/mongodb/internal/connection/CommandEventSender.java new file mode 100644 index 00000000000..b9aea61cc7a --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/connection/CommandEventSender.java @@ -0,0 +1,27 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection; + +interface CommandEventSender { + void sendStartedEvent(); + + void sendFailedEvent(Throwable t); + + void sendSucceededEvent(ResponseBuffers responseBuffers); + + void sendSucceededEventForOneWayCommand(); +} diff --git a/driver-core/src/main/com/mongodb/internal/connection/CommandHelper.java b/driver-core/src/main/com/mongodb/internal/connection/CommandHelper.java new file mode 100644 index 00000000000..fea3ddcd0e4 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/connection/CommandHelper.java @@ -0,0 +1,136 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection; + +import com.mongodb.MongoServerException; +import com.mongodb.ServerApi; +import com.mongodb.connection.ClusterConnectionMode; +import com.mongodb.internal.TimeoutContext; +import com.mongodb.internal.async.SingleResultCallback; +import com.mongodb.internal.validator.NoOpFieldNameValidator; +import com.mongodb.lang.Nullable; +import org.bson.BsonDocument; +import org.bson.BsonInt64; +import org.bson.BsonValue; +import org.bson.codecs.BsonDocumentCodec; + +import java.util.Locale; + +import static com.mongodb.ReadPreference.primary; +import static com.mongodb.assertions.Assertions.assertNotNull; + +/** + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public final class CommandHelper { + + static final String HELLO = "hello"; + static final String LEGACY_HELLO = "isMaster"; + static final String LEGACY_HELLO_LOWER = LEGACY_HELLO.toLowerCase(Locale.ROOT); + + static BsonDocument executeCommand(final String database, final BsonDocument command, final ClusterConnectionMode clusterConnectionMode, + @Nullable final ServerApi serverApi, final InternalConnection internalConnection, final OperationContext operationContext) { + return sendAndReceive(database, command, clusterConnectionMode, serverApi, internalConnection, operationContext); + } + + static BsonDocument executeCommandWithoutCheckingForFailure(final String database, final BsonDocument command, + final ClusterConnectionMode clusterConnectionMode, @Nullable final ServerApi serverApi, + final InternalConnection internalConnection, final OperationContext operationContext) { + try { + return executeCommand(database, command, clusterConnectionMode, serverApi, internalConnection, operationContext); + } catch (MongoServerException e) { + return new BsonDocument(); + } + } + + static void executeCommandAsync(final String database, + final BsonDocument command, + final ClusterConnectionMode clusterConnectionMode, + @Nullable final ServerApi serverApi, + final InternalConnection internalConnection, + final OperationContext operationContext, + final SingleResultCallback callback) { + internalConnection.sendAndReceiveAsync( + getCommandMessage(database, command, internalConnection, clusterConnectionMode, serverApi), + new BsonDocumentCodec(), operationContext, (result, t) -> { + if (t != null) { + callback.onResult(null, t); + } else { + callback.onResult(result, null); + } + }); + } + + static boolean isCommandOk(final BsonDocument response) { + if (!response.containsKey("ok")) { + return false; + } + BsonValue okValue = response.get("ok"); + if (okValue.isBoolean()) { + return okValue.asBoolean().getValue(); + } else if (okValue.isNumber()) { + return okValue.asNumber().intValue() == 1; + } else { + return false; + } + } + + private static BsonDocument sendAndReceive(final String database, final BsonDocument command, + final ClusterConnectionMode clusterConnectionMode, + @Nullable final ServerApi serverApi, + final InternalConnection internalConnection, + final OperationContext operationContext) { + return assertNotNull( + internalConnection.sendAndReceive( + getCommandMessage(database, command, internalConnection, clusterConnectionMode, serverApi), + new BsonDocumentCodec(), operationContext) + ); + } + + private static CommandMessage getCommandMessage(final String database, final BsonDocument command, + final InternalConnection internalConnection, + final ClusterConnectionMode clusterConnectionMode, + @Nullable final ServerApi serverApi) { + return new CommandMessage(database, command, NoOpFieldNameValidator.INSTANCE, primary(), + MessageSettings + .builder() + // Note: server version will be 0.0 at this point when called from InternalConnectionInitializer, + // which means OP_MSG will not be used + .maxWireVersion(internalConnection.getDescription().getMaxWireVersion()) + .serverType(internalConnection.getDescription().getServerType()) + .cryptd(internalConnection.getInitialServerDescription().isCryptd()) + .build(), + clusterConnectionMode, serverApi); + } + + + /** + * Appends a user-defined maxTimeMS to the command if CSOT is not enabled. + * This is necessary when maxTimeMS must be explicitly set on the command being explained, + * rather than appending it lazily to the explain command in the {@link CommandMessage} via {@link TimeoutContext#setMaxTimeOverride(long)}. + * This ensures backwards compatibility with pre-CSOT behavior. + */ + public static void applyMaxTimeMS(final TimeoutContext timeoutContext, final BsonDocument command) { + if (!timeoutContext.hasTimeoutMS()) { + command.append("maxTimeMS", new BsonInt64(timeoutContext.getTimeoutSettings().getMaxTimeMS())); + timeoutContext.disableMaxTimeOverride(); + } + } + + private CommandHelper() { + } +} diff --git a/driver-core/src/main/com/mongodb/internal/connection/CommandMessage.java b/driver-core/src/main/com/mongodb/internal/connection/CommandMessage.java new file mode 100644 index 00000000000..6439cf88a0d --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/connection/CommandMessage.java @@ -0,0 +1,436 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection; + +import com.mongodb.MongoClientException; +import com.mongodb.MongoInternalException; +import com.mongodb.MongoNamespace; +import com.mongodb.ReadPreference; +import com.mongodb.ServerApi; +import com.mongodb.connection.ClusterConnectionMode; +import com.mongodb.internal.TimeoutContext; +import com.mongodb.internal.connection.MessageSequences.EmptyMessageSequences; +import com.mongodb.internal.session.SessionContext; +import com.mongodb.lang.Nullable; +import org.bson.BsonArray; +import org.bson.BsonBinaryWriter; +import org.bson.BsonBoolean; +import org.bson.BsonDocument; +import org.bson.BsonElement; +import org.bson.BsonInt64; +import org.bson.BsonString; +import org.bson.ByteBuf; +import org.bson.FieldNameValidator; +import org.bson.io.BsonOutput; + +import java.io.ByteArrayOutputStream; +import java.io.UnsupportedEncodingException; +import java.nio.charset.StandardCharsets; +import java.util.ArrayList; +import java.util.List; + +import static com.mongodb.ReadPreference.primary; +import static com.mongodb.ReadPreference.primaryPreferred; +import static com.mongodb.assertions.Assertions.assertFalse; +import static com.mongodb.assertions.Assertions.assertNotNull; +import static com.mongodb.assertions.Assertions.assertTrue; +import static com.mongodb.assertions.Assertions.fail; +import static com.mongodb.assertions.Assertions.notNull; +import static com.mongodb.connection.ClusterConnectionMode.LOAD_BALANCED; +import static com.mongodb.connection.ClusterConnectionMode.SINGLE; +import static com.mongodb.connection.ServerType.SHARD_ROUTER; +import static com.mongodb.connection.ServerType.STANDALONE; +import static com.mongodb.internal.connection.BsonWriterHelper.appendElementsToDocument; +import static com.mongodb.internal.connection.BsonWriterHelper.backpatchLength; +import static com.mongodb.internal.connection.BsonWriterHelper.createBsonBinaryWriter; +import static com.mongodb.internal.connection.BsonWriterHelper.encodeUsingRegistry; +import static com.mongodb.internal.connection.BsonWriterHelper.writeDocumentsOfDualMessageSequences; +import static com.mongodb.internal.connection.BsonWriterHelper.writePayload; +import static com.mongodb.internal.connection.ByteBufBsonDocument.createList; +import static com.mongodb.internal.connection.ByteBufBsonDocument.createOne; +import static com.mongodb.internal.connection.ReadConcernHelper.getReadConcernDocument; +import static com.mongodb.internal.operation.ServerVersionHelper.UNKNOWN_WIRE_VERSION; + +/** + * A command message that uses OP_MSG or OP_QUERY to send the command. + * + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public final class CommandMessage extends RequestMessage { + /** + * Specifies that the `OP_MSG` section payload is a BSON document. + */ + private static final byte PAYLOAD_TYPE_0_DOCUMENT = 0; + /** + * Specifies that the `OP_MSG` section payload is a sequence of BSON documents. + */ + private static final byte PAYLOAD_TYPE_1_DOCUMENT_SEQUENCE = 1; + + private static final int UNINITIALIZED_POSITION = -1; + + private final BsonDocument command; + private final FieldNameValidator commandFieldNameValidator; + private final ReadPreference readPreference; + private final boolean exhaustAllowed; + private final MessageSequences sequences; + private final boolean responseExpected; + private final String database; + private int firstDocumentPosition = UNINITIALIZED_POSITION; + + /** + * {@code null} iff either {@link #sequences} is not of the {@link DualMessageSequences} type, + * or it is of that type, but it has not been {@linkplain #encodeMessageBody(ByteBufferBsonOutput, OperationContext) encoded}. + */ + @Nullable + private Boolean dualMessageSequencesRequireResponse; + private final ClusterConnectionMode clusterConnectionMode; + private final ServerApi serverApi; + + CommandMessage(final String database, final BsonDocument command, final FieldNameValidator commandFieldNameValidator, + final ReadPreference readPreference, final MessageSettings settings, final ClusterConnectionMode clusterConnectionMode, + @Nullable final ServerApi serverApi) { + this(database, command, commandFieldNameValidator, readPreference, settings, true, EmptyMessageSequences.INSTANCE, + clusterConnectionMode, serverApi); + } + + CommandMessage(final String database, final BsonDocument command, final FieldNameValidator commandFieldNameValidator, + final ReadPreference readPreference, final MessageSettings settings, final boolean exhaustAllowed, + final ClusterConnectionMode clusterConnectionMode, @Nullable final ServerApi serverApi) { + this(database, command, commandFieldNameValidator, readPreference, settings, true, exhaustAllowed, EmptyMessageSequences.INSTANCE, + clusterConnectionMode, serverApi); + } + + CommandMessage(final String database, final BsonDocument command, final FieldNameValidator commandFieldNameValidator, + final ReadPreference readPreference, final MessageSettings settings, final boolean responseExpected, + final MessageSequences sequences, + final ClusterConnectionMode clusterConnectionMode, @Nullable final ServerApi serverApi) { + this(database, command, commandFieldNameValidator, readPreference, settings, responseExpected, false, + sequences, clusterConnectionMode, serverApi); + } + + CommandMessage(final String database, final BsonDocument command, final FieldNameValidator commandFieldNameValidator, + final ReadPreference readPreference, final MessageSettings settings, + final boolean responseExpected, final boolean exhaustAllowed, + final MessageSequences sequences, + final ClusterConnectionMode clusterConnectionMode, @Nullable final ServerApi serverApi) { + super(getOpCode(settings, clusterConnectionMode, serverApi), settings); + this.database = database; + this.command = command; + this.commandFieldNameValidator = commandFieldNameValidator; + this.readPreference = readPreference; + this.responseExpected = responseExpected; + this.dualMessageSequencesRequireResponse = null; + this.exhaustAllowed = exhaustAllowed; + this.sequences = sequences; + this.clusterConnectionMode = notNull("clusterConnectionMode", clusterConnectionMode); + this.serverApi = serverApi; + assertTrue(useOpMsg() || responseExpected); + } + + /** + * Create a BsonDocument representing the logical document encoded by an OP_MSG. + *

+ * The returned document will contain all the fields from the `PAYLOAD_TYPE_0_DOCUMENT` section, as well as all fields represented by + * `PAYLOAD_TYPE_1_DOCUMENT_SEQUENCE` sections. + */ + BsonDocument getCommandDocument(final ByteBufferBsonOutput bsonOutput) { + List byteBuffers = bsonOutput.getByteBuffers(); + try { + CompositeByteBuf byteBuf = new CompositeByteBuf(byteBuffers); + try { + byteBuf.position(firstDocumentPosition); + ByteBufBsonDocument byteBufBsonDocument = createOne(byteBuf); + + // If true, it means there is at least one `PAYLOAD_TYPE_1_DOCUMENT_SEQUENCE` section in the OP_MSG + if (byteBuf.hasRemaining()) { + BsonDocument commandBsonDocument = byteBufBsonDocument.toBaseBsonDocument(); + + // Each loop iteration processes one Document Sequence + // When there are no more bytes remaining, there are no more Document Sequences + while (byteBuf.hasRemaining()) { + // skip reading the payload type, we know it is `PAYLOAD_TYPE_1` + byteBuf.position(byteBuf.position() + 1); + int sequenceStart = byteBuf.position(); + int sequenceSizeInBytes = byteBuf.getInt(); + int sectionEnd = sequenceStart + sequenceSizeInBytes; + + String fieldName = getSequenceIdentifier(byteBuf); + // If this assertion fires, it means that the driver has started using document sequences for nested fields. If + // so, this method will need to change in order to append the value to the correct nested document. + assertFalse(fieldName.contains(".")); + + ByteBuf documentsByteBufSlice = byteBuf.duplicate().limit(sectionEnd); + try { + commandBsonDocument.append(fieldName, new BsonArray(createList(documentsByteBufSlice))); + } finally { + documentsByteBufSlice.release(); + } + byteBuf.position(sectionEnd); + } + return commandBsonDocument; + } else { + return byteBufBsonDocument; + } + } finally { + byteBuf.release(); + } + } finally { + byteBuffers.forEach(ByteBuf::release); + } + } + + /** + * Get the field name from a buffer positioned at the start of the document sequence identifier of an OP_MSG Section of type + * `PAYLOAD_TYPE_1_DOCUMENT_SEQUENCE`. + *

+ * Upon normal completion of the method, the buffer will be positioned at the start of the first BSON object in the sequence. + */ + private String getSequenceIdentifier(final ByteBuf byteBuf) { + ByteArrayOutputStream sequenceIdentifierBytes = new ByteArrayOutputStream(); + byte curByte = byteBuf.get(); + while (curByte != 0) { + sequenceIdentifierBytes.write(curByte); + curByte = byteBuf.get(); + } + try { + return sequenceIdentifierBytes.toString(StandardCharsets.UTF_8.name()); + } catch (UnsupportedEncodingException e) { + throw new MongoInternalException("Unexpected exception", e); + } + } + + boolean isResponseExpected() { + if (responseExpected) { + return true; + } else { + if (sequences instanceof SplittablePayload) { + SplittablePayload payload = (SplittablePayload) sequences; + return payload.isOrdered() && payload.hasAnotherSplit(); + } else if (sequences instanceof DualMessageSequences) { + return assertNotNull(dualMessageSequencesRequireResponse); + } else if (!(sequences instanceof EmptyMessageSequences)) { + fail(sequences.toString()); + } + return false; + } + } + + @Override + protected void encodeMessageBody(final ByteBufferBsonOutput bsonOutput, final OperationContext operationContext) { + this.firstDocumentPosition = useOpMsg() ? writeOpMsg(bsonOutput, operationContext) : writeOpQuery(bsonOutput); + } + + @SuppressWarnings("try") + private int writeOpMsg(final ByteBufferBsonOutput bsonOutput, final OperationContext operationContext) { + int messageStartPosition = bsonOutput.getPosition() - MESSAGE_PROLOGUE_LENGTH; + int flagPosition = bsonOutput.getPosition(); + bsonOutput.writeInt32(0); // flag bits + bsonOutput.writeByte(PAYLOAD_TYPE_0_DOCUMENT); + int commandStartPosition = bsonOutput.getPosition(); + List extraElements = getExtraElements(operationContext); + + int commandDocumentSizeInBytes = writeCommand(bsonOutput); + if (sequences instanceof SplittablePayload) { + appendElementsToDocument(bsonOutput, commandStartPosition, extraElements); + SplittablePayload payload = (SplittablePayload) sequences; + try (FinishOpMsgSectionWithPayloadType1 finishSection = startOpMsgSectionWithPayloadType1( + bsonOutput, payload.getPayloadName())) { + writePayload( + new BsonBinaryWriter(bsonOutput, payload.getFieldNameValidator()), + bsonOutput, getSettings(), messageStartPosition, payload, getSettings().getMaxDocumentSize()); + } + } else if (sequences instanceof DualMessageSequences) { + DualMessageSequences dualMessageSequences = (DualMessageSequences) sequences; + try (ByteBufferBsonOutput.Branch bsonOutputBranch2 = bsonOutput.branch(); + ByteBufferBsonOutput.Branch bsonOutputBranch1 = bsonOutput.branch()) { + DualMessageSequences.EncodeDocumentsResult encodeDocumentsResult; + try (FinishOpMsgSectionWithPayloadType1 finishSection1 = startOpMsgSectionWithPayloadType1( + bsonOutputBranch1, dualMessageSequences.getFirstSequenceId()); + FinishOpMsgSectionWithPayloadType1 finishSection2 = startOpMsgSectionWithPayloadType1( + bsonOutputBranch2, dualMessageSequences.getSecondSequenceId())) { + encodeDocumentsResult = writeDocumentsOfDualMessageSequences( + dualMessageSequences, commandDocumentSizeInBytes, bsonOutputBranch1, + bsonOutputBranch2, getSettings()); + } + dualMessageSequencesRequireResponse = encodeDocumentsResult.isServerResponseRequired(); + extraElements.addAll(encodeDocumentsResult.getExtraElements()); + appendElementsToDocument(bsonOutput, commandStartPosition, extraElements); + } + } else if (sequences instanceof EmptyMessageSequences) { + appendElementsToDocument(bsonOutput, commandStartPosition, extraElements); + } else { + fail(sequences.toString()); + } + + // Write the flag bits + bsonOutput.writeInt32(flagPosition, getOpMsgFlagBits()); + return commandStartPosition; + } + + private int writeOpQuery(final ByteBufferBsonOutput bsonOutput) { + bsonOutput.writeInt32(0); + bsonOutput.writeCString(new MongoNamespace(getDatabase(), "$cmd").getFullName()); + bsonOutput.writeInt32(0); + bsonOutput.writeInt32(-1); + + int commandStartPosition = bsonOutput.getPosition(); + + List elements = null; + if (serverApi != null) { + elements = new ArrayList<>(3); + addServerApiElements(elements); + } + writeCommand(bsonOutput); + appendElementsToDocument(bsonOutput, commandStartPosition, elements); + return commandStartPosition; + } + + private int getOpMsgFlagBits() { + int flagBits = 0; + if (!isResponseExpected()) { + flagBits = 1 << 1; + } + if (exhaustAllowed) { + flagBits |= 1 << 16; + } + return flagBits; + } + + private boolean isDirectConnectionToReplicaSetMember() { + return clusterConnectionMode == SINGLE + && getSettings().getServerType() != SHARD_ROUTER + && getSettings().getServerType() != STANDALONE; + } + + private boolean useOpMsg() { + return getOpCode().equals(OpCode.OP_MSG); + } + + private List getExtraElements(final OperationContext operationContext) { + SessionContext sessionContext = operationContext.getSessionContext(); + TimeoutContext timeoutContext = operationContext.getTimeoutContext(); + + ArrayList extraElements = new ArrayList<>(); + if (!getSettings().isCryptd()) { + timeoutContext.runMaxTimeMS(maxTimeMS -> + extraElements.add(new BsonElement("maxTimeMS", new BsonInt64(maxTimeMS))) + ); + } + extraElements.add(new BsonElement("$db", new BsonString(getDatabase()))); + if (sessionContext.getClusterTime() != null) { + extraElements.add(new BsonElement("$clusterTime", sessionContext.getClusterTime())); + } + if (sessionContext.hasSession()) { + if (!sessionContext.isImplicitSession() && !getSettings().isSessionSupported()) { + throw new MongoClientException("Attempting to use a ClientSession while connected to a server that doesn't support " + + "sessions"); + } + if (getSettings().isSessionSupported() && responseExpected) { + extraElements.add(new BsonElement("lsid", sessionContext.getSessionId())); + } + } + boolean firstMessageInTransaction = sessionContext.notifyMessageSent(); + + assertFalse(sessionContext.hasActiveTransaction() && sessionContext.isSnapshot()); + if (sessionContext.hasActiveTransaction()) { + extraElements.add(new BsonElement("txnNumber", new BsonInt64(sessionContext.getTransactionNumber()))); + if (firstMessageInTransaction) { + extraElements.add(new BsonElement("startTransaction", BsonBoolean.TRUE)); + addReadConcernDocument(extraElements, sessionContext); + } + extraElements.add(new BsonElement("autocommit", BsonBoolean.FALSE)); + } else if (sessionContext.isSnapshot()) { + addReadConcernDocument(extraElements, sessionContext); + } + + if (serverApi != null) { + addServerApiElements(extraElements); + } + + if (readPreference != null) { + if (!readPreference.equals(primary())) { + extraElements.add(new BsonElement("$readPreference", readPreference.toDocument())); + } else if (isDirectConnectionToReplicaSetMember()) { + extraElements.add(new BsonElement("$readPreference", primaryPreferred().toDocument())); + } + } + return extraElements; + } + + private void addServerApiElements(final List extraElements) { + extraElements.add(new BsonElement("apiVersion", new BsonString(serverApi.getVersion().getValue()))); + if (serverApi.getStrict().isPresent()) { + extraElements.add(new BsonElement("apiStrict", BsonBoolean.valueOf(serverApi.getStrict().get()))); + } + if (serverApi.getDeprecationErrors().isPresent()) { + extraElements.add(new BsonElement("apiDeprecationErrors", BsonBoolean.valueOf(serverApi.getDeprecationErrors().get()))); + } + } + + + private void addReadConcernDocument(final List extraElements, final SessionContext sessionContext) { + BsonDocument readConcernDocument = getReadConcernDocument(sessionContext, getSettings().getMaxWireVersion()); + if (!readConcernDocument.isEmpty()) { + extraElements.add(new BsonElement("readConcern", readConcernDocument)); + } + } + + /** + * @param sequenceId The identifier of the sequence contained in the {@code OP_MSG} section to be written. + * @see OP_MSG + */ + private FinishOpMsgSectionWithPayloadType1 startOpMsgSectionWithPayloadType1(final ByteBufferBsonOutput bsonOutput, final String sequenceId) { + bsonOutput.writeByte(PAYLOAD_TYPE_1_DOCUMENT_SEQUENCE); + int sequenceStart = bsonOutput.getPosition(); + // size to be patched back later + bsonOutput.writeInt32(0); + bsonOutput.writeCString(sequenceId); + return () -> backpatchLength(sequenceStart, bsonOutput); + } + + private static OpCode getOpCode(final MessageSettings settings, final ClusterConnectionMode clusterConnectionMode, + @Nullable final ServerApi serverApi) { + return isServerVersionKnown(settings) || clusterConnectionMode == LOAD_BALANCED || serverApi != null + ? OpCode.OP_MSG + : OpCode.OP_QUERY; + } + + private static boolean isServerVersionKnown(final MessageSettings settings) { + return settings.getMaxWireVersion() != UNKNOWN_WIRE_VERSION; + } + + /** + * Gets the database name + * + * @return the database name + */ + public String getDatabase() { + return database; + } + + private int writeCommand(final BsonOutput bsonOutput) { + BsonBinaryWriter writer = createBsonBinaryWriter(bsonOutput, commandFieldNameValidator, getSettings()); + int documentStart = bsonOutput.getPosition(); + encodeUsingRegistry(writer, command); + return bsonOutput.getPosition() - documentStart; + } + + @FunctionalInterface + private interface FinishOpMsgSectionWithPayloadType1 extends AutoCloseable { + void close(); + } +} diff --git a/driver-core/src/main/com/mongodb/internal/connection/CommandProtocol.java b/driver-core/src/main/com/mongodb/internal/connection/CommandProtocol.java new file mode 100644 index 00000000000..2cc78497980 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/connection/CommandProtocol.java @@ -0,0 +1,34 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection; + +import com.mongodb.internal.async.SingleResultCallback; +import com.mongodb.internal.session.SessionContext; +import com.mongodb.lang.Nullable; + +/** + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public interface CommandProtocol { + + @Nullable + T execute(InternalConnection connection); + + void executeAsync(InternalConnection connection, SingleResultCallback callback); + + CommandProtocol withSessionContext(SessionContext sessionContext); +} diff --git a/driver-core/src/main/com/mongodb/internal/connection/CommandProtocolImpl.java b/driver-core/src/main/com/mongodb/internal/connection/CommandProtocolImpl.java new file mode 100644 index 00000000000..f0bdebdfd60 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/connection/CommandProtocolImpl.java @@ -0,0 +1,91 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection; + +import com.mongodb.ReadPreference; +import com.mongodb.connection.ClusterConnectionMode; +import com.mongodb.internal.async.SingleResultCallback; +import com.mongodb.internal.session.SessionContext; +import com.mongodb.lang.Nullable; +import org.bson.BsonDocument; +import org.bson.FieldNameValidator; +import org.bson.codecs.Decoder; + +import static com.mongodb.assertions.Assertions.notNull; +import static com.mongodb.internal.connection.ProtocolHelper.getMessageSettings; + +class CommandProtocolImpl implements CommandProtocol { + private final String database; + private final BsonDocument command; + private final MessageSequences sequences; + private final ReadPreference readPreference; + private final FieldNameValidator commandFieldNameValidator; + private final Decoder commandResultDecoder; + private final boolean responseExpected; + private final ClusterConnectionMode clusterConnectionMode; + private final OperationContext operationContext; + + CommandProtocolImpl(final String database, final BsonDocument command, final FieldNameValidator commandFieldNameValidator, + @Nullable final ReadPreference readPreference, final Decoder commandResultDecoder, final boolean responseExpected, + final MessageSequences sequences, final ClusterConnectionMode clusterConnectionMode, final OperationContext operationContext) { + notNull("database", database); + this.database = notNull("database", database); + this.command = notNull("command", command); + this.commandFieldNameValidator = notNull("commandFieldNameValidator", commandFieldNameValidator); + this.readPreference = readPreference; + this.commandResultDecoder = notNull("commandResultDecoder", commandResultDecoder); + this.responseExpected = responseExpected; + this.sequences = sequences; + this.clusterConnectionMode = notNull("clusterConnectionMode", clusterConnectionMode); + this.operationContext = operationContext; + } + + @Nullable + @Override + public T execute(final InternalConnection connection) { + return connection.sendAndReceive(getCommandMessage(connection), commandResultDecoder, operationContext); + } + + @Override + public void executeAsync(final InternalConnection connection, final SingleResultCallback callback) { + try { + connection.sendAndReceiveAsync(getCommandMessage(connection), commandResultDecoder, operationContext, + (result, t) -> { + if (t != null) { + callback.onResult(null, t); + } else { + callback.onResult(result, null); + } + }); + } catch (Throwable t) { + callback.onResult(null, t); + } + } + + @Override + public CommandProtocolImpl withSessionContext(final SessionContext sessionContext) { + return new CommandProtocolImpl<>(database, command, commandFieldNameValidator, readPreference, + commandResultDecoder, responseExpected, sequences, clusterConnectionMode, + operationContext.withSessionContext(sessionContext)); + } + + private CommandMessage getCommandMessage(final InternalConnection connection) { + return new CommandMessage(database, command, commandFieldNameValidator, readPreference, + getMessageSettings(connection.getDescription(), connection.getInitialServerDescription()), responseExpected, + sequences, clusterConnectionMode, operationContext.getServerApi()); + } +} diff --git a/driver-core/src/main/com/mongodb/internal/connection/CompositeByteBuf.java b/driver-core/src/main/com/mongodb/internal/connection/CompositeByteBuf.java new file mode 100644 index 00000000000..a3ce668040c --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/connection/CompositeByteBuf.java @@ -0,0 +1,374 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection; + +import org.bson.ByteBuf; + +import java.nio.Buffer; +import java.nio.ByteBuffer; +import java.nio.ByteOrder; +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.atomic.AtomicInteger; + +import static java.lang.String.format; +import static org.bson.assertions.Assertions.isTrueArgument; +import static org.bson.assertions.Assertions.notNull; + +class CompositeByteBuf implements ByteBuf { + private final List components; + private final AtomicInteger referenceCount = new AtomicInteger(1); + private int position; + private int limit; + + CompositeByteBuf(final List buffers) { + notNull("buffers", buffers); + isTrueArgument("buffer list not empty", !buffers.isEmpty()); + components = new ArrayList<>(buffers.size()); + + int offset = 0; + for (ByteBuf cur : buffers) { + Component component = new Component(cur.asReadOnly().order(ByteOrder.LITTLE_ENDIAN), offset); + components.add(component); + offset = component.endOffset; + } + limit = components.get(components.size() - 1).endOffset; + } + + CompositeByteBuf(final CompositeByteBuf from) { + components = from.components; + position = from.position(); + limit = from.limit(); + } + + @Override + public ByteBuf order(final ByteOrder byteOrder) { + if (byteOrder == ByteOrder.BIG_ENDIAN) { + throw new UnsupportedOperationException(format("Only %s is supported", ByteOrder.BIG_ENDIAN)); + } + return this; + } + + @Override + public int capacity() { + return components.get(components.size() - 1).endOffset; + } + + @Override + public int remaining() { + return limit() - position(); + } + + @Override + public boolean hasRemaining() { + return remaining() > 0; + } + + @Override + public int position() { + return position; + } + + @Override + public ByteBuf position(final int newPosition) { + if (newPosition < 0 || newPosition > limit) { + throw new IndexOutOfBoundsException(format("%d is out of bounds", newPosition)); + } + position = newPosition; + return this; + } + + @Override + public ByteBuf clear() { + position = 0; + limit = capacity(); + return this; + } + + @Override + public int limit() { + return limit; + } + + @Override + public byte get() { + checkIndex(position); + position += 1; + return get(position - 1); + } + + @Override + public byte get(final int index) { + checkIndex(index); + Component component = findComponent(index); + return component.buffer.get(index - component.offset); + } + + @Override + public ByteBuf get(final byte[] bytes) { + checkIndex(position, bytes.length); + position += bytes.length; + return get(position - bytes.length, bytes); + } + + @Override + public ByteBuf get(final int index, final byte[] bytes) { + return get(index, bytes, 0, bytes.length); + } + + @Override + public ByteBuf get(final byte[] bytes, final int offset, final int length) { + checkIndex(position, length); + position += length; + return get(position - length, bytes, offset, length); + } + + @Override + public ByteBuf get(final int index, final byte[] bytes, final int offset, final int length) { + checkDstIndex(index, length, offset, bytes.length); + + int i = findComponentIndex(index); + int curIndex = index; + int curOffset = offset; + int curLength = length; + while (curLength > 0) { + Component c = components.get(i); + int localLength = Math.min(curLength, c.buffer.capacity() - (curIndex - c.offset)); + c.buffer.get(curIndex - c.offset, bytes, curOffset, localLength); + curIndex += localLength; + curOffset += localLength; + curLength -= localLength; + i++; + } + + return this; + } + + @Override + public long getLong() { + position += 8; + return getLong(position - 8); + } + + @Override + public long getLong(final int index) { + checkIndex(index, 8); + Component component = findComponent(index); + if (index + 8 <= component.endOffset) { + return component.buffer.getLong(index - component.offset); + } else { + return getInt(index) & 0xFFFFFFFFL | (getInt(index + 4) & 0xFFFFFFFFL) << 32; + } + } + + @Override + public double getDouble() { + position += 8; + return getDouble(position - 8); + } + + @Override + public double getDouble(final int index) { + return Double.longBitsToDouble(getLong(index)); + } + + @Override + public int getInt() { + position += 4; + return getInt(position - 4); + } + + @Override + public int getInt(final int index) { + checkIndex(index, 4); + Component component = findComponent(index); + if (index + 4 <= component.endOffset) { + return component.buffer.getInt(index - component.offset); + } else { + return getShort(index) & 0xFFFF | (getShort(index + 2) & 0xFFFF) << 16; + } + } + + private int getShort(final int index) { + checkIndex(index, 2); + return (short) (get(index) & 0xff | (get(index + 1) & 0xff) << 8); + } + + @Override + public byte[] array() { + throw new UnsupportedOperationException("Not implemented yet!"); + } + + @Override + public boolean isBackedByArray() { + return false; + } + + @Override + public int arrayOffset() { + throw new UnsupportedOperationException("Not implemented yet!"); + } + + @Override + public ByteBuf limit(final int newLimit) { + if (newLimit < 0 || newLimit > capacity()) { + throw new IndexOutOfBoundsException(format("%d is out of bounds", newLimit)); + } + this.limit = newLimit; + return this; + } + + @Override + public ByteBuf put(final int index, final byte b) { + throw new UnsupportedOperationException(); + } + + @Override + public ByteBuf put(final byte[] src, final int offset, final int length) { + throw new UnsupportedOperationException(); + } + + @Override + public ByteBuf put(final byte b) { + throw new UnsupportedOperationException(); + } + + @Override + public ByteBuf putInt(final int b) { + throw new UnsupportedOperationException(); + } + + @Override + public ByteBuf putInt(final int index, final int b) { + throw new UnsupportedOperationException(); + } + + @Override + public ByteBuf putDouble(final double b) { + throw new UnsupportedOperationException(); + } + + @Override + public ByteBuf putLong(final long b) { + throw new UnsupportedOperationException(); + } + + @Override + public ByteBuf flip() { + throw new UnsupportedOperationException(); + } + + @Override + public ByteBuf asReadOnly() { + throw new UnsupportedOperationException(); + } + + @Override + public ByteBuf duplicate() { + return new CompositeByteBuf(this); + } + + @Override + public ByteBuffer asNIO() { + if (components.size() == 1) { + ByteBuffer byteBuffer = components.get(0).buffer.asNIO().duplicate(); + ((Buffer) byteBuffer).position(position).limit(limit); + return byteBuffer; + } else { + byte[] bytes = new byte[remaining()]; + get(position, bytes, 0, bytes.length); + return ByteBuffer.wrap(bytes); + } + } + + @Override + public int getReferenceCount() { + return referenceCount.get(); + } + + @Override + public ByteBuf retain() { + if (referenceCount.incrementAndGet() == 1) { + referenceCount.decrementAndGet(); + throw new IllegalStateException("Attempted to increment the reference count when it is already 0"); + } + return this; + } + + @Override + public void release() { + if (referenceCount.decrementAndGet() < 0) { + referenceCount.incrementAndGet(); + throw new IllegalStateException("Attempted to decrement the reference count below 0"); + } + } + + private Component findComponent(final int index) { + return components.get(findComponentIndex(index)); + } + + private int findComponentIndex(final int index) { + for (int i = components.size() - 1; i >= 0; i--) { + Component cur = components.get(i); + if (index >= cur.offset) { + return i; + } + } + throw new IndexOutOfBoundsException(format("%d is out of bounds", index)); + } + + private void checkIndex(final int index) { + ensureAccessible(); + if (index < 0 || index >= capacity()) { + throw new IndexOutOfBoundsException(format("index: %d (expected: range(0, %d))", index, capacity())); + } + } + + private void checkIndex(final int index, final int fieldLength) { + ensureAccessible(); + if (index < 0 || index > capacity() - fieldLength) { + throw new IndexOutOfBoundsException(format("index: %d, length: %d (expected: range(0, %d))", index, fieldLength, capacity())); + } + } + + private void checkDstIndex(final int index, final int length, final int dstIndex, final int dstCapacity) { + checkIndex(index, length); + if (dstIndex < 0 || dstIndex > dstCapacity - length) { + throw new IndexOutOfBoundsException(format("dstIndex: %d, length: %d (expected: range(0, %d))", dstIndex, length, dstCapacity)); + } + } + + private void ensureAccessible() { + if (referenceCount.get() == 0) { + throw new IllegalStateException("Reference count is 0"); + } + } + + private static final class Component { + private final ByteBuf buffer; + private final int length; + private final int offset; + private final int endOffset; + + Component(final ByteBuf buffer, final int offset) { + this.buffer = buffer; + length = buffer.limit() - buffer.position(); + this.offset = offset; + this.endOffset = offset + length; + } + } +} diff --git a/driver-core/src/main/com/mongodb/internal/connection/CompressedHeader.java b/driver-core/src/main/com/mongodb/internal/connection/CompressedHeader.java new file mode 100644 index 00000000000..cb493b1f451 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/connection/CompressedHeader.java @@ -0,0 +1,95 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection; + +import com.mongodb.MongoInternalException; +import org.bson.ByteBuf; + +import static com.mongodb.internal.connection.MessageHeader.MESSAGE_HEADER_LENGTH; +import static com.mongodb.internal.connection.OpCode.OP_COMPRESSED; +import static java.lang.String.format; + +// Contains the details of an OP_COMPRESSED reply from a MongoDB server. +class CompressedHeader { + /** + * The length of the reply header in the MongoDB wire protocol. + */ + public static final int COMPRESSED_HEADER_LENGTH = 9; + + /** + * The length of the OP_COMPRESSED header plus the length of the standard message header + */ + public static final int TOTAL_COMPRESSED_HEADER_LENGTH = COMPRESSED_HEADER_LENGTH + MESSAGE_HEADER_LENGTH; + + private final int originalOpcode; + private final int uncompressedSize; + private final byte compressorId; + private final MessageHeader messageHeader; + + CompressedHeader(final ByteBuf header, final MessageHeader messageHeader) { + this.messageHeader = messageHeader; + + if (messageHeader.getOpCode() != OP_COMPRESSED.getValue()) { + throw new MongoInternalException(format("The reply message opCode %d does not match the expected opCode %d", + messageHeader.getOpCode(), OP_COMPRESSED.getValue())); + } + + if (messageHeader.getMessageLength() < TOTAL_COMPRESSED_HEADER_LENGTH) { + throw new MongoInternalException(format("The reply message length %d is less than the mimimum message length %d", + messageHeader.getMessageLength(), COMPRESSED_HEADER_LENGTH)); + } + + originalOpcode = header.getInt(); + uncompressedSize = header.getInt(); + compressorId = header.get(); + } + + /** + * + * @return the original opcode + */ + public int getOriginalOpcode() { + return originalOpcode; + } + + /** + * + * @return the uncompressed size of the wrapped message + */ + public int getUncompressedSize() { + return uncompressedSize; + } + + /** + * + * @return the compressor identifier with which the message is compressed + */ + public byte getCompressorId() { + return compressorId; + } + + /** + * @return the size of the compressed message. + */ + public int getCompressedSize() { + return messageHeader.getMessageLength() - COMPRESSED_HEADER_LENGTH - MESSAGE_HEADER_LENGTH; + } + + public MessageHeader getMessageHeader() { + return messageHeader; + } +} diff --git a/driver-core/src/main/com/mongodb/internal/connection/CompressedMessage.java b/driver-core/src/main/com/mongodb/internal/connection/CompressedMessage.java new file mode 100644 index 00000000000..581d43e01ce --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/connection/CompressedMessage.java @@ -0,0 +1,63 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection; + +import org.bson.ByteBuf; + +import java.util.List; + +import static com.mongodb.internal.connection.MessageHeader.MESSAGE_HEADER_LENGTH; + +class CompressedMessage extends RequestMessage { + private final OpCode wrappedOpcode; + private final List wrappedMessageBuffers; + private final Compressor compressor; + + CompressedMessage(final OpCode wrappedOpcode, final List wrappedMessageBuffers, final Compressor compressor, + final MessageSettings settings) { + super(OpCode.OP_COMPRESSED, getWrappedMessageRequestId(wrappedMessageBuffers), settings); + this.wrappedOpcode = wrappedOpcode; + this.wrappedMessageBuffers = wrappedMessageBuffers; + this.compressor = compressor; + } + + @Override + protected void encodeMessageBody(final ByteBufferBsonOutput bsonOutput, final OperationContext operationContext) { + bsonOutput.writeInt32(wrappedOpcode.getValue()); + bsonOutput.writeInt32(getWrappedMessageSize(wrappedMessageBuffers) - MESSAGE_HEADER_LENGTH); + bsonOutput.writeByte(compressor.getId()); + + getFirstWrappedMessageBuffer(wrappedMessageBuffers) + .position(getFirstWrappedMessageBuffer(wrappedMessageBuffers).position() + MESSAGE_HEADER_LENGTH); + + compressor.compress(wrappedMessageBuffers, bsonOutput); + } + + private static int getWrappedMessageSize(final List wrappedMessageBuffers) { + ByteBuf first = getFirstWrappedMessageBuffer(wrappedMessageBuffers); + return first.getInt(0); + } + + private static int getWrappedMessageRequestId(final List wrappedMessageBuffers) { + ByteBuf first = getFirstWrappedMessageBuffer(wrappedMessageBuffers); + return first.getInt(4); + } + + private static ByteBuf getFirstWrappedMessageBuffer(final List wrappedMessageBuffers) { + return wrappedMessageBuffers.get(0); + } +} diff --git a/driver-core/src/main/com/mongodb/internal/connection/Compressor.java b/driver-core/src/main/com/mongodb/internal/connection/Compressor.java new file mode 100644 index 00000000000..99005ed4ee0 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/connection/Compressor.java @@ -0,0 +1,116 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection; + +import com.mongodb.MongoInternalException; +import org.bson.ByteBuf; +import org.bson.io.BsonOutput; + +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.io.UnsupportedEncodingException; +import java.util.List; + +abstract class Compressor { + + static final int BUFFER_SIZE = 256; + + abstract String getName(); + + abstract byte getId(); + + void compress(final List source, final BsonOutput target) { + BufferExposingByteArrayOutputStream baos = new BufferExposingByteArrayOutputStream(1024); + try (OutputStream outputStream = getOutputStream(baos)) { + byte[] scratch = new byte[BUFFER_SIZE]; + for (ByteBuf cur : source) { + while (cur.hasRemaining()) { + int numBytes = Math.min(cur.remaining(), scratch.length); + cur.get(scratch, 0, numBytes); + outputStream.write(scratch, 0, numBytes); + } + } + + } catch (IOException e) { + throw new MongoInternalException("Unexpected IOException", e); + } + // ignore + target.writeBytes(baos.getInternalBytes(), 0, baos.size()); + } + + void uncompress(final ByteBuf source, final ByteBuf target) { + + try (InputStream inputStream = getInputStream(new ByteBufInputStream(source))) { + byte[] scratch = new byte[BUFFER_SIZE]; + int numBytes = inputStream.read(scratch); + while (numBytes != -1) { + target.put(scratch, 0, numBytes); + numBytes = inputStream.read(scratch); + } + } catch (IOException e) { + throw new MongoInternalException("Unexpected IOException", e); + } + // ignore + } + + // override this if not overriding the compress method + OutputStream getOutputStream(final OutputStream source) throws IOException { + throw new UnsupportedEncodingException(); + } + + // override this if not overriding the uncompress method + InputStream getInputStream(final InputStream source) throws IOException { + throw new UnsupportedOperationException(); + } + + private static final class ByteBufInputStream extends InputStream { + private final ByteBuf source; + + ByteBufInputStream(final ByteBuf source) { + this.source = source; + } + + @Override + public int read(final byte[] bytes, final int offset, final int length) { + if (!source.hasRemaining()) { + return -1; + } + + int bytesToRead = length > source.remaining() ? source.remaining() : length; + source.get(bytes, offset, bytesToRead); + return bytesToRead; + } + + @Override + public int read() { + throw new UnsupportedOperationException(); + } + } + + // Just so we don't have to copy the buffer + private static final class BufferExposingByteArrayOutputStream extends ByteArrayOutputStream { + BufferExposingByteArrayOutputStream(final int size) { + super(size); + } + + byte[] getInternalBytes() { + return buf; + } + } +} diff --git a/driver-core/src/main/com/mongodb/internal/connection/ConcurrentPool.java b/driver-core/src/main/com/mongodb/internal/connection/ConcurrentPool.java new file mode 100644 index 00000000000..26ba9c4f34d --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/connection/ConcurrentPool.java @@ -0,0 +1,463 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection; + +import com.mongodb.MongoException; +import com.mongodb.MongoInternalException; +import com.mongodb.MongoInterruptedException; +import com.mongodb.MongoServerUnavailableException; +import com.mongodb.MongoTimeoutException; +import com.mongodb.annotations.ThreadSafe; +import com.mongodb.internal.VisibleForTesting; +import com.mongodb.internal.time.StartTime; +import com.mongodb.lang.Nullable; + +import java.util.Deque; +import java.util.Iterator; +import java.util.concurrent.ConcurrentLinkedDeque; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.locks.Condition; +import java.util.concurrent.locks.ReentrantLock; +import java.util.function.Consumer; +import java.util.function.Supplier; + +import static com.mongodb.assertions.Assertions.assertNotNull; +import static com.mongodb.assertions.Assertions.assertTrue; +import static com.mongodb.assertions.Assertions.notNull; +import static com.mongodb.internal.Locks.lockInterruptibly; +import static com.mongodb.internal.Locks.withLock; +import static com.mongodb.internal.VisibleForTesting.AccessModifier.PRIVATE; +import static com.mongodb.internal.thread.InterruptionUtil.interruptAndCreateMongoInterruptedException; + +/** + * A concurrent pool implementation. + * + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public class ConcurrentPool implements Pool { + /** + * {@link Integer#MAX_VALUE}. + */ + public static final int INFINITE_SIZE = Integer.MAX_VALUE; + + private final int maxSize; + private final ItemFactory itemFactory; + + private final Deque available = new ConcurrentLinkedDeque<>(); + private final StateAndPermits stateAndPermits; + private final String poolClosedMessage; + + /** + * Factory for creating and closing pooled items. + * + * @param + */ + public interface ItemFactory { + T create(); + + void close(T t); + + boolean shouldPrune(T t); + } + + /** + * Initializes a new pool of objects. + * + * @param maxSize max to hold to at any given time, must be positive. + * @param itemFactory factory used to create and close items in the pool + */ + public ConcurrentPool(final int maxSize, final ItemFactory itemFactory) { + this(maxSize, itemFactory, "The pool is closed"); + } + + public ConcurrentPool(final int maxSize, final ItemFactory itemFactory, final String poolClosedMessage) { + assertTrue(maxSize > 0); + this.maxSize = maxSize; + this.itemFactory = itemFactory; + stateAndPermits = new StateAndPermits(maxSize, this::poolClosedException); + this.poolClosedMessage = notNull("poolClosedMessage", poolClosedMessage); + } + + /** + * Return an instance of T to the pool. This method simply calls {@code release(t, false)} + * Must not throw {@link Exception}s. + * + * @param t item to return to the pool + */ + @Override + public void release(final T t) { + release(t, false); + } + + /** + * call done when you are done with an object from the pool if there is room and the object is ok will get added + * Must not throw {@link Exception}s. + * + * @param t item to return to the pool + * @param prune true if the item should be closed, false if it should be put back in the pool + */ + @Override + public void release(final T t, final boolean prune) { + if (t == null) { + throw new IllegalArgumentException("Can not return a null item to the pool"); + } + if (stateAndPermits.closed()) { + close(t); + return; + } + + if (prune) { + close(t); + } else { + available.addLast(t); + } + + stateAndPermits.releasePermit(); + } + + /** + * Is equivalent to {@link #get(long, TimeUnit)} called with an infinite timeout. + * + * @return An object from the pool. + */ + @Override + public T get() { + return get(-1, TimeUnit.MILLISECONDS); + } + + /** + * Gets an object from the pool. Blocks until an object is available, or the specified {@code timeout} expires, + * or the pool is {@linkplain #close() closed}/{@linkplain #pause(Supplier) paused}. + * + * @param timeout See {@link StartTime#timeoutAfterOrInfiniteIfNegative(long, TimeUnit)}. + * @param timeUnit the time unit of the timeout + * @return An object from the pool, or null if can't get one in the given waitTime + * @throws MongoTimeoutException if the timeout has been exceeded + */ + @Override + public T get(final long timeout, final TimeUnit timeUnit) { + if (!stateAndPermits.acquirePermit(timeout, timeUnit)) { + throw new MongoTimeoutException(String.format("Timeout waiting for a pooled item after %d %s", timeout, timeUnit)); + } + + T t = available.pollLast(); + if (t == null) { + t = createNewAndReleasePermitIfFailure(); + } + + return t; + } + + /** + * This method is similar to {@link #get(long, TimeUnit)} with 0 timeout. + * The difference is that it never creates a new element + * and returns {@code null} instead of throwing {@link MongoTimeoutException}. + */ + @Nullable + T getImmediate() { + T element = null; + if (stateAndPermits.acquirePermitImmediate()) { + element = available.pollLast(); + if (element == null) { + stateAndPermits.releasePermit(); + } + } + return element; + } + + public void prune() { + // restrict number of iterations to the current size in order to avoid an infinite loop in the presence of concurrent releases + // back to the pool + int maxIterations = available.size(); + int numIterations = 0; + for (T cur : available) { + if (itemFactory.shouldPrune(cur) && available.remove(cur)) { + close(cur); + } + numIterations++; + if (numIterations == maxIterations) { + break; + } + } + } + + + /** + * Try to populate this pool with items so that {@link #getCount()} is not smaller than {@code minSize}. + * The {@code initAndRelease} action throwing an exception causes this method to stop and re-throw that exception. + * + * @param initAndRelease An action applied to non-{@code null} new items. + * If an exception is thrown by the action, the action must {@linkplain #release(Object, boolean) prune} the item. + * Otherwise, the action must {@linkplain #release(Object) release} the item. + */ + public void ensureMinSize(final int minSize, final Consumer initAndRelease) { + while (getCount() < minSize) { + if (!stateAndPermits.acquirePermit(0, TimeUnit.MILLISECONDS)) { + break; + } + initAndRelease.accept(createNewAndReleasePermitIfFailure()); + } + } + + private T createNewAndReleasePermitIfFailure() { + try { + T newMember = itemFactory.create(); + if (newMember == null) { + throw new MongoInternalException("The factory for the pool created a null item"); + } + return newMember; + } catch (Exception e) { + stateAndPermits.releasePermit(); + throw e; + } + } + + /** + * @param timeout See {@link StartTime#timeoutAfterOrInfiniteIfNegative(long, TimeUnit)}. + */ + @VisibleForTesting(otherwise = PRIVATE) + boolean acquirePermit(final long timeout, final TimeUnit timeUnit) { + return stateAndPermits.acquirePermit(timeout, timeUnit); + } + + /** + * Clears the pool of all objects. + * Must not throw {@link Exception}s. + */ + @Override + public void close() { + if (stateAndPermits.close()) { + Iterator iter = available.iterator(); + while (iter.hasNext()) { + T t = iter.next(); + close(t); + iter.remove(); + } + } + } + + int getMaxSize() { + return maxSize; + } + + public int getInUseCount() { + return maxSize - stateAndPermits.permits(); + } + + public int getAvailableCount() { + return available.size(); + } + + public int getCount() { + return getInUseCount() + getAvailableCount(); + } + + public String toString() { + return "pool: maxSize: " + sizeToString(maxSize) + + " availableCount " + getAvailableCount() + + " inUseCount " + getInUseCount(); + } + + /** + * Must not throw {@link Exception}s, so swallow exceptions from {@link ItemFactory#close(Object)}. + */ + private void close(final T t) { + try { + itemFactory.close(t); + } catch (Exception e) { + // ItemFactory.close() really should not throw + } + } + + void ready() { + stateAndPermits.ready(); + } + + void pause(final Supplier causeSupplier) { + stateAndPermits.pause(causeSupplier); + } + + /** + * @see #isPoolClosedException(Throwable) + */ + MongoServerUnavailableException poolClosedException() { + return new MongoServerUnavailableException(poolClosedMessage); + } + + /** + * @see #poolClosedException() + */ + static boolean isPoolClosedException(final Throwable e) { + return e instanceof MongoServerUnavailableException; + } + + /** + * Package-access methods are thread-safe, + * and only they should be called outside of the {@link StateAndPermits}'s code. + */ + @ThreadSafe + private static final class StateAndPermits { + private final Supplier poolClosedExceptionSupplier; + private final ReentrantLock lock; + private final Condition permitAvailableOrClosedOrPausedCondition; + private volatile boolean paused; + private volatile boolean closed; + private final int maxPermits; + private volatile int permits; + @Nullable + private Supplier causeSupplier; + + StateAndPermits(final int maxPermits, final Supplier poolClosedExceptionSupplier) { + this.poolClosedExceptionSupplier = poolClosedExceptionSupplier; + lock = new ReentrantLock(); + permitAvailableOrClosedOrPausedCondition = lock.newCondition(); + paused = false; + closed = false; + this.maxPermits = maxPermits; + permits = maxPermits; + causeSupplier = null; + } + + int permits() { + return permits; + } + + boolean acquirePermitImmediate() { + return withLock(lock, () -> { + throwIfClosedOrPaused(); + if (permits > 0) { + //noinspection NonAtomicOperationOnVolatileField + permits--; + return true; + } else { + return false; + } + }); + } + + /** + * This method also emulates the eager {@link InterruptedException} behavior of + * {@link java.util.concurrent.Semaphore#tryAcquire(long, TimeUnit)}. + * + * @param timeout See {@link StartTime#timeoutAfterOrInfiniteIfNegative(long, TimeUnit)}. + */ + boolean acquirePermit(final long timeout, final TimeUnit unit) throws MongoInterruptedException { + long remainingNanos = unit.toNanos(timeout); + lockInterruptibly(lock); + try { + while (permits == 0 + // the absence of short-circuiting is of importance + & !throwIfClosedOrPaused()) { + try { + if (timeout < 0 || remainingNanos == Long.MAX_VALUE) { + permitAvailableOrClosedOrPausedCondition.await(); + } else if (remainingNanos >= 0) { + remainingNanos = permitAvailableOrClosedOrPausedCondition.awaitNanos(remainingNanos); + } else { + return false; + } + } catch (InterruptedException e) { + throw interruptAndCreateMongoInterruptedException(null, e); + } + } + assertTrue(permits > 0); + //noinspection NonAtomicOperationOnVolatileField + permits--; + return true; + } finally { + lock.unlock(); + } + } + + void releasePermit() { + withLock(lock, () -> { + assertTrue(permits < maxPermits); + //noinspection NonAtomicOperationOnVolatileField + permits++; + permitAvailableOrClosedOrPausedCondition.signal(); + }); + } + + void pause(final Supplier causeSupplier) { + withLock(lock, () -> { + if (!paused) { + this.paused = true; + permitAvailableOrClosedOrPausedCondition.signalAll(); + } + this.causeSupplier = assertNotNull(causeSupplier); + }); + } + + void ready() { + if (paused) { + withLock(lock, () -> { + this.paused = false; + this.causeSupplier = null; + }); + } + } + + /** + * @return {@code true} if and only if the state changed as a result of the operation. + */ + boolean close() { + if (!closed) { + return withLock(lock, () -> { + if (!closed) { + closed = true; + permitAvailableOrClosedOrPausedCondition.signalAll(); + return true; + } + return false; + }); + } + return false; + } + + /** + * This method must be called by a {@link Thread} that holds the {@link #lock}. + * + * @return {@code false} which means that the method did not throw. + * The method returns to allow using it conveniently as part of a condition check when waiting on a {@link Condition}. + * Short-circuiting operators {@code &&} and {@code ||} must not be used with this method to ensure that it is called. + * @throws MongoServerUnavailableException If and only if {@linkplain #close() closed}. + * @throws MongoException If and only if {@linkplain #pause(Supplier) paused} + * and not {@linkplain #close() closed}. The exception is specified via the {@link #pause(Supplier)} method + * and may be a subtype of {@link MongoException}. + */ + boolean throwIfClosedOrPaused() { + if (closed) { + throw poolClosedExceptionSupplier.get(); + } + if (paused) { + throw assertNotNull(assertNotNull(causeSupplier).get()); + } + return false; + } + + boolean closed() { + return closed; + } + } + + /** + * @return {@link Integer#toString()} if {@code size} is not {@link #INFINITE_SIZE}, otherwise returns {@code "infinite"}. + */ + static String sizeToString(final int size) { + return size == INFINITE_SIZE ? "infinite" : Integer.toString(size); + } +} diff --git a/driver-core/src/main/com/mongodb/internal/connection/Connection.java b/driver-core/src/main/com/mongodb/internal/connection/Connection.java new file mode 100644 index 00000000000..219fb9ae6b9 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/connection/Connection.java @@ -0,0 +1,67 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection; + +import com.mongodb.ReadPreference; +import com.mongodb.annotations.ThreadSafe; +import com.mongodb.connection.ConnectionDescription; +import com.mongodb.internal.binding.ReferenceCounted; +import com.mongodb.lang.Nullable; +import org.bson.BsonDocument; +import org.bson.FieldNameValidator; +import org.bson.codecs.Decoder; + + +/** + * A synchronous connection to a MongoDB server with blocking operations. + * + *

This class is not part of the public API and may be removed or changed at any time

+ */ +@ThreadSafe +public interface Connection extends ReferenceCounted { + + @Override + Connection retain(); + + /** + * Gets the description of the connection. + * + * @return the connection description + */ + ConnectionDescription getDescription(); + + @Nullable + T command(String database, BsonDocument command, FieldNameValidator fieldNameValidator, @Nullable ReadPreference readPreference, + Decoder commandResultDecoder, OperationContext operationContext); + + @Nullable + T command(String database, BsonDocument command, FieldNameValidator commandFieldNameValidator, + @Nullable ReadPreference readPreference, Decoder commandResultDecoder, OperationContext operationContext, + boolean responseExpected, MessageSequences sequences); + + + enum PinningMode { + CURSOR, + TRANSACTION + } + + /** + * Marks the connection as pinned. Used so that any pool timeout exceptions can include information about the pinned connections, + * and what they are pinned to. + */ + void markAsPinned(PinningMode pinningMode); +} diff --git a/driver-core/src/main/com/mongodb/internal/connection/ConnectionFactory.java b/driver-core/src/main/com/mongodb/internal/connection/ConnectionFactory.java new file mode 100644 index 00000000000..4df57216f7a --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/connection/ConnectionFactory.java @@ -0,0 +1,26 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection; + +import com.mongodb.connection.ClusterConnectionMode; + +interface ConnectionFactory { + Connection create(InternalConnection internalConnection, ProtocolExecutor executor, ClusterConnectionMode clusterConnectionMode); + + AsyncConnection createAsync(InternalConnection internalConnection, ProtocolExecutor executor, + ClusterConnectionMode clusterConnectionMode); +} diff --git a/driver-core/src/main/com/mongodb/internal/connection/ConnectionGenerationSupplier.java b/driver-core/src/main/com/mongodb/internal/connection/ConnectionGenerationSupplier.java new file mode 100644 index 00000000000..8f07ed506b1 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/connection/ConnectionGenerationSupplier.java @@ -0,0 +1,28 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection; + +import com.mongodb.annotations.ThreadSafe; +import com.mongodb.lang.NonNull; +import org.bson.types.ObjectId; + +@ThreadSafe +interface ConnectionGenerationSupplier { + int getGeneration(); + + int getGeneration(@NonNull ObjectId serviceId); +} diff --git a/driver-core/src/main/com/mongodb/internal/connection/ConnectionPool.java b/driver-core/src/main/com/mongodb/internal/connection/ConnectionPool.java new file mode 100644 index 00000000000..2129d42b941 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/connection/ConnectionPool.java @@ -0,0 +1,80 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection; + +import com.mongodb.MongoConnectionPoolClearedException; +import com.mongodb.annotations.ThreadSafe; +import com.mongodb.internal.async.SingleResultCallback; +import com.mongodb.lang.Nullable; +import org.bson.types.ObjectId; + +import java.io.Closeable; + +/** + * An instance of an implementation must be created in the {@linkplain #invalidate(Throwable) paused} state. + */ +@ThreadSafe +interface ConnectionPool extends Closeable { + /** + * @param operationContext the operation context + * @throws MongoConnectionPoolClearedException If detects that the pool is {@linkplain #invalidate(Throwable) paused}. + */ + InternalConnection get(OperationContext operationContext) throws MongoConnectionPoolClearedException; + + /** + * Completes the {@code callback} with a {@link MongoConnectionPoolClearedException} + * if detects that the pool is {@linkplain #invalidate(Throwable) paused}. + */ + void getAsync(OperationContext operationContext, SingleResultCallback callback); + + /** + * Mark the pool as paused, unblock all threads waiting in {@link #get(OperationContext) get…} methods, unless they are blocked + * doing an IO operation, increment {@linkplain #getGeneration() generation} to lazily clear all connections managed by the pool + * (this is done via {@link #get(OperationContext) get…} and {@linkplain InternalConnection#close() check in} methods, and may also be done + * by a background task). In the paused state, connections can be created neither in the background + * nor via {@link #get(OperationContext) get…} methods. + * If the pool is paused, the method does nothing except for recording the specified {@code cause}. + * + * @see #ready() + */ + void invalidate(@Nullable Throwable cause); + + /** + * Unlike {@link #invalidate(Throwable)}, this method neither marks the pool as paused, + * nor affects the {@linkplain #getGeneration() generation}. + * + * @param generation The expected service-specific generation. + * If the expected {@code generation} does not match the current generation for the service + * identified by {@code serviceId}, the method does nothing. + */ + void invalidate(ObjectId serviceId, int generation); + + /** + * Mark the pool as ready, allowing connections to be created in the background and via {@link #get(OperationContext) get…} methods. + * If the pool is ready, the method does nothing. + * + * @see #invalidate(Throwable) + */ + void ready(); + + /** + * Mark the pool as closed, release the underlying resources and render the pool unusable. + */ + void close(); + + int getGeneration(); +} diff --git a/driver-core/src/main/com/mongodb/internal/connection/DecimalFormatHelper.java b/driver-core/src/main/com/mongodb/internal/connection/DecimalFormatHelper.java new file mode 100644 index 00000000000..472d0f692c4 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/connection/DecimalFormatHelper.java @@ -0,0 +1,40 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.internal.connection; + +import java.text.DecimalFormat; +import java.text.DecimalFormatSymbols; + +/** + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public final class DecimalFormatHelper { + + private static final DecimalFormatSymbols DECIMAL_FORMAT_SYMBOLS = initializeDecimalFormatSymbols(); + + private DecimalFormatHelper(){ + } + + private static DecimalFormatSymbols initializeDecimalFormatSymbols() { + DecimalFormatSymbols decimalFormatSymbols = new DecimalFormatSymbols(); + decimalFormatSymbols.setDecimalSeparator('.'); + return decimalFormatSymbols; + } + + public static String format(final String pattern, final double number) { + return new DecimalFormat(pattern, DECIMAL_FORMAT_SYMBOLS).format(number); + } +} diff --git a/driver-core/src/main/com/mongodb/internal/connection/DefaultAuthenticator.java b/driver-core/src/main/com/mongodb/internal/connection/DefaultAuthenticator.java new file mode 100644 index 00000000000..a9a3525a90a --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/connection/DefaultAuthenticator.java @@ -0,0 +1,116 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection; + +import com.mongodb.AuthenticationMechanism; +import com.mongodb.MongoException; +import com.mongodb.MongoSecurityException; +import com.mongodb.ServerApi; +import com.mongodb.connection.ClusterConnectionMode; +import com.mongodb.connection.ConnectionDescription; +import com.mongodb.internal.async.SingleResultCallback; +import com.mongodb.lang.Nullable; +import org.bson.BsonArray; +import org.bson.BsonDocument; +import org.bson.BsonString; + +import static com.mongodb.AuthenticationMechanism.SCRAM_SHA_1; +import static com.mongodb.AuthenticationMechanism.SCRAM_SHA_256; +import static com.mongodb.assertions.Assertions.assertNotNull; +import static com.mongodb.assertions.Assertions.isTrueArgument; +import static java.lang.String.format; + +class DefaultAuthenticator extends Authenticator implements SpeculativeAuthenticator { + static final int USER_NOT_FOUND_CODE = 11; + private static final BsonString DEFAULT_MECHANISM_NAME = new BsonString(SCRAM_SHA_256.getMechanismName()); + private Authenticator delegate; + + DefaultAuthenticator(final MongoCredentialWithCache credential, final ClusterConnectionMode clusterConnectionMode, + @Nullable final ServerApi serverApi) { + super(credential, clusterConnectionMode, serverApi); + isTrueArgument("unspecified authentication mechanism", credential.getAuthenticationMechanism() == null); + } + + @Override + void authenticate(final InternalConnection connection, final ConnectionDescription connectionDescription, + final OperationContext operationContext) { + try { + setDelegate(connectionDescription); + delegate.authenticate(connection, connectionDescription, operationContext); + } catch (Exception e) { + throw wrapException(e); + } + } + + @Override + void authenticateAsync(final InternalConnection connection, final ConnectionDescription connectionDescription, + final OperationContext operationContext, final SingleResultCallback callback) { + setDelegate(connectionDescription); + delegate.authenticateAsync(connection, connectionDescription, operationContext, callback); + } + + @Override + public BsonDocument createSpeculativeAuthenticateCommand(final InternalConnection connection) { + delegate = getAuthenticatorForHello(); + return ((SpeculativeAuthenticator) delegate).createSpeculativeAuthenticateCommand(connection); + } + + @Nullable + @Override + public BsonDocument getSpeculativeAuthenticateResponse() { + if (delegate != null) { + return ((SpeculativeAuthenticator) delegate).getSpeculativeAuthenticateResponse(); + } + return null; + } + + @Override + public void setSpeculativeAuthenticateResponse(final BsonDocument response) { + ((SpeculativeAuthenticator) delegate).setSpeculativeAuthenticateResponse(response); + } + + Authenticator getAuthenticatorForHello() { + return new ScramShaAuthenticator(getMongoCredentialWithCache().withMechanism(SCRAM_SHA_256), getClusterConnectionMode(), + getServerApi()); + } + + private void setDelegate(final ConnectionDescription connectionDescription) { + if (delegate != null && ((SpeculativeAuthenticator) delegate).getSpeculativeAuthenticateResponse() != null) { + return; + } + + if (connectionDescription.getSaslSupportedMechanisms() != null) { + BsonArray saslSupportedMechs = connectionDescription.getSaslSupportedMechanisms(); + AuthenticationMechanism mechanism = saslSupportedMechs.contains(DEFAULT_MECHANISM_NAME) ? SCRAM_SHA_256 : SCRAM_SHA_1; + delegate = new ScramShaAuthenticator(getMongoCredentialWithCache().withMechanism(mechanism), getClusterConnectionMode(), + getServerApi()); + } else { + delegate = new ScramShaAuthenticator(getMongoCredentialWithCache().withMechanism(SCRAM_SHA_1), getClusterConnectionMode(), + getServerApi()); + } + } + + private MongoException wrapException(final Throwable t) { + if (t instanceof MongoSecurityException) { + return (MongoSecurityException) t; + } else if (t instanceof MongoException && ((MongoException) t).getCode() == USER_NOT_FOUND_CODE) { + return new MongoSecurityException(getMongoCredential(), format("Exception authenticating %s", getMongoCredential()), t); + } else { + return assertNotNull(MongoException.fromThrowable(t)); + } + } +} diff --git a/driver-core/src/main/com/mongodb/internal/connection/DefaultClusterFactory.java b/driver-core/src/main/com/mongodb/internal/connection/DefaultClusterFactory.java new file mode 100644 index 00000000000..ac853cb002e --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/connection/DefaultClusterFactory.java @@ -0,0 +1,222 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection; + +import com.mongodb.LoggerSettings; +import com.mongodb.MongoCompressor; +import com.mongodb.MongoCredential; +import com.mongodb.MongoDriverInformation; +import com.mongodb.ServerAddress; +import com.mongodb.ServerApi; +import com.mongodb.connection.ClusterConnectionMode; +import com.mongodb.connection.ClusterId; +import com.mongodb.connection.ClusterSettings; +import com.mongodb.connection.ConnectionPoolSettings; +import com.mongodb.connection.ServerSettings; +import com.mongodb.event.ClusterListener; +import com.mongodb.event.CommandListener; +import com.mongodb.event.ServerListener; +import com.mongodb.event.ServerMonitorListener; +import com.mongodb.internal.TimeoutSettings; +import com.mongodb.internal.VisibleForTesting; +import com.mongodb.internal.diagnostics.logging.Logger; +import com.mongodb.internal.diagnostics.logging.Loggers; +import com.mongodb.lang.Nullable; +import com.mongodb.spi.dns.DnsClient; + +import java.util.List; + +import static com.mongodb.internal.connection.DefaultClusterFactory.ClusterEnvironment.detectCluster; +import static com.mongodb.internal.event.EventListenerHelper.NO_OP_CLUSTER_LISTENER; +import static com.mongodb.internal.event.EventListenerHelper.NO_OP_SERVER_LISTENER; +import static com.mongodb.internal.event.EventListenerHelper.NO_OP_SERVER_MONITOR_LISTENER; +import static com.mongodb.internal.event.EventListenerHelper.clusterListenerMulticaster; +import static com.mongodb.internal.event.EventListenerHelper.serverListenerMulticaster; +import static com.mongodb.internal.event.EventListenerHelper.serverMonitorListenerMulticaster; +import static java.lang.String.format; +import static java.util.Collections.singletonList; + +/** + * The default factory for cluster implementations. + * + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public final class DefaultClusterFactory { + private static final Logger LOGGER = Loggers.getLogger("client"); + + public Cluster createCluster(final ClusterSettings originalClusterSettings, final ServerSettings originalServerSettings, + final ConnectionPoolSettings connectionPoolSettings, + final InternalConnectionPoolSettings internalConnectionPoolSettings, + final TimeoutSettings clusterTimeoutSettings, + final StreamFactory streamFactory, + final TimeoutSettings heartbeatTimeoutSettings, + final StreamFactory heartbeatStreamFactory, + @Nullable final MongoCredential credential, + final LoggerSettings loggerSettings, + @Nullable final CommandListener commandListener, + @Nullable final String applicationName, + @Nullable final MongoDriverInformation mongoDriverInformation, + final List compressorList, @Nullable final ServerApi serverApi, + @Nullable final DnsClient dnsClient) { + + detectAndLogClusterEnvironment(originalClusterSettings); + + ClusterId clusterId = new ClusterId(applicationName); + ClusterSettings clusterSettings; + ServerSettings serverSettings; + + if (noClusterEventListeners(originalClusterSettings, originalServerSettings)) { + clusterSettings = ClusterSettings.builder(originalClusterSettings) + .clusterListenerList(singletonList(NO_OP_CLUSTER_LISTENER)) + .build(); + serverSettings = ServerSettings.builder(originalServerSettings) + .serverListenerList(singletonList(NO_OP_SERVER_LISTENER)) + .serverMonitorListenerList(singletonList(NO_OP_SERVER_MONITOR_LISTENER)) + .build(); + } else { + AsynchronousClusterEventListener clusterEventListener = + AsynchronousClusterEventListener.startNew(clusterId, getClusterListener(originalClusterSettings), + getServerListener(originalServerSettings), getServerMonitorListener(originalServerSettings)); + + clusterSettings = ClusterSettings.builder(originalClusterSettings) + .clusterListenerList(singletonList(clusterEventListener)) + .build(); + serverSettings = ServerSettings.builder(originalServerSettings) + .serverListenerList(singletonList(clusterEventListener)) + .serverMonitorListenerList(singletonList(clusterEventListener)) + .build(); + } + + DnsSrvRecordMonitorFactory dnsSrvRecordMonitorFactory = new DefaultDnsSrvRecordMonitorFactory(clusterId, serverSettings, dnsClient); + InternalOperationContextFactory clusterOperationContextFactory = + new InternalOperationContextFactory(clusterTimeoutSettings, serverApi); + InternalOperationContextFactory heartBeatOperationContextFactory = + new InternalOperationContextFactory(heartbeatTimeoutSettings, serverApi); + + ClientMetadata clientMetadata = new ClientMetadata( + applicationName, + mongoDriverInformation != null ? mongoDriverInformation : MongoDriverInformation.builder().build()); + + if (clusterSettings.getMode() == ClusterConnectionMode.LOAD_BALANCED) { + ClusterableServerFactory serverFactory = new LoadBalancedClusterableServerFactory(serverSettings, + connectionPoolSettings, internalConnectionPoolSettings, streamFactory, credential, loggerSettings, commandListener, + compressorList, serverApi, clusterOperationContextFactory); + return new LoadBalancedCluster(clusterId, clusterSettings, serverFactory, clientMetadata, dnsSrvRecordMonitorFactory); + } else { + ClusterableServerFactory serverFactory = new DefaultClusterableServerFactory(serverSettings, + connectionPoolSettings, internalConnectionPoolSettings, + clusterOperationContextFactory, streamFactory, heartBeatOperationContextFactory, heartbeatStreamFactory, credential, + loggerSettings, commandListener, compressorList, + serverApi, FaasEnvironment.getFaasEnvironment() != FaasEnvironment.UNKNOWN); + + if (clusterSettings.getMode() == ClusterConnectionMode.SINGLE) { + return new SingleServerCluster(clusterId, clusterSettings, serverFactory, clientMetadata); + } else if (clusterSettings.getMode() == ClusterConnectionMode.MULTIPLE) { + if (clusterSettings.getSrvHost() == null) { + return new MultiServerCluster(clusterId, clusterSettings, serverFactory, clientMetadata); + } else { + return new DnsMultiServerCluster(clusterId, clusterSettings, serverFactory, clientMetadata, dnsSrvRecordMonitorFactory); + } + } else { + throw new UnsupportedOperationException("Unsupported cluster mode: " + clusterSettings.getMode()); + } + } + } + + private boolean noClusterEventListeners(final ClusterSettings clusterSettings, final ServerSettings serverSettings) { + return clusterSettings.getClusterListeners().isEmpty() + && serverSettings.getServerListeners().isEmpty() + && serverSettings.getServerMonitorListeners().isEmpty(); + } + + private static ClusterListener getClusterListener(final ClusterSettings clusterSettings) { + return clusterSettings.getClusterListeners().size() == 0 + ? NO_OP_CLUSTER_LISTENER + : clusterListenerMulticaster(clusterSettings.getClusterListeners()); + } + + private static ServerListener getServerListener(final ServerSettings serverSettings) { + return serverSettings.getServerListeners().size() == 0 + ? NO_OP_SERVER_LISTENER + : serverListenerMulticaster(serverSettings.getServerListeners()); + } + + private static ServerMonitorListener getServerMonitorListener(final ServerSettings serverSettings) { + return serverSettings.getServerMonitorListeners().size() == 0 + ? NO_OP_SERVER_MONITOR_LISTENER + : serverMonitorListenerMulticaster(serverSettings.getServerMonitorListeners()); + } + + @VisibleForTesting(otherwise = VisibleForTesting.AccessModifier.PRIVATE) + public void detectAndLogClusterEnvironment(final ClusterSettings clusterSettings) { + String srvHost = clusterSettings.getSrvHost(); + ClusterEnvironment clusterEnvironment; + if (srvHost != null) { + clusterEnvironment = detectCluster(srvHost); + } else { + clusterEnvironment = detectCluster(clusterSettings.getHosts() + .stream() + .map(ServerAddress::getHost) + .toArray(String[]::new)); + } + + if (clusterEnvironment != null) { + LOGGER.info(format("You appear to be connected to a %s cluster. For more information regarding feature compatibility" + + " and support please visit %s", clusterEnvironment.clusterProductName, clusterEnvironment.documentationUrl)); + } + } + + enum ClusterEnvironment { + AZURE("https://www.mongodb.com/supportability/cosmosdb", + "CosmosDB", + ".cosmos.azure.com"), + AWS("https://www.mongodb.com/supportability/documentdb", + "DocumentDB", + ".docdb.amazonaws.com", ".docdb-elastic.amazonaws.com"); + + private final String documentationUrl; + private final String clusterProductName; + private final String[] hostSuffixes; + + ClusterEnvironment(final String url, final String name, final String... hostSuffixes) { + this.hostSuffixes = hostSuffixes; + this.documentationUrl = url; + this.clusterProductName = name; + } + @Nullable + public static ClusterEnvironment detectCluster(final String... hosts) { + for (String host : hosts) { + for (ClusterEnvironment clusterEnvironment : values()) { + if (clusterEnvironment.isExternalClusterProvider(host)) { + return clusterEnvironment; + } + } + } + return null; + } + + private boolean isExternalClusterProvider(final String host) { + for (String hostSuffix : hostSuffixes) { + String lowerCaseHost = host.toLowerCase(); + if (lowerCaseHost.endsWith(hostSuffix)) { + return true; + } + } + return false; + } + } +} diff --git a/driver-core/src/main/com/mongodb/internal/connection/DefaultClusterableServerFactory.java b/driver-core/src/main/com/mongodb/internal/connection/DefaultClusterableServerFactory.java new file mode 100644 index 00000000000..cb9830c4017 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/connection/DefaultClusterableServerFactory.java @@ -0,0 +1,110 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection; + +import com.mongodb.LoggerSettings; +import com.mongodb.MongoCompressor; +import com.mongodb.MongoCredential; +import com.mongodb.ServerAddress; +import com.mongodb.ServerApi; +import com.mongodb.connection.ClusterConnectionMode; +import com.mongodb.connection.ConnectionPoolSettings; +import com.mongodb.connection.ServerId; +import com.mongodb.connection.ServerSettings; +import com.mongodb.event.CommandListener; +import com.mongodb.event.ServerListener; +import com.mongodb.internal.inject.SameObjectProvider; +import com.mongodb.lang.Nullable; + +import java.util.List; + +import static com.mongodb.internal.event.EventListenerHelper.singleServerListener; +import static java.util.Collections.emptyList; + +/** + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public class DefaultClusterableServerFactory implements ClusterableServerFactory { + private final ServerSettings serverSettings; + private final ConnectionPoolSettings connectionPoolSettings; + private final InternalConnectionPoolSettings internalConnectionPoolSettings; + private final InternalOperationContextFactory clusterOperationContextFactory; + private final StreamFactory streamFactory; + private final InternalOperationContextFactory heartbeatOperationContextFactory; + private final StreamFactory heartbeatStreamFactory; + private final MongoCredentialWithCache credential; + private final LoggerSettings loggerSettings; + private final CommandListener commandListener; + private final List compressorList; + @Nullable + private final ServerApi serverApi; + private final boolean isFunctionAsAServiceEnvironment; + + public DefaultClusterableServerFactory( + final ServerSettings serverSettings, final ConnectionPoolSettings connectionPoolSettings, + final InternalConnectionPoolSettings internalConnectionPoolSettings, + final InternalOperationContextFactory clusterOperationContextFactory, final StreamFactory streamFactory, + final InternalOperationContextFactory heartbeatOperationContextFactory, final StreamFactory heartbeatStreamFactory, + @Nullable final MongoCredential credential, final LoggerSettings loggerSettings, + @Nullable final CommandListener commandListener, + final List compressorList, @Nullable final ServerApi serverApi, final boolean isFunctionAsAServiceEnvironment) { + this.serverSettings = serverSettings; + this.connectionPoolSettings = connectionPoolSettings; + this.internalConnectionPoolSettings = internalConnectionPoolSettings; + this.clusterOperationContextFactory = clusterOperationContextFactory; + this.streamFactory = streamFactory; + this.heartbeatOperationContextFactory = heartbeatOperationContextFactory; + this.heartbeatStreamFactory = heartbeatStreamFactory; + this.credential = credential == null ? null : new MongoCredentialWithCache(credential); + this.loggerSettings = loggerSettings; + this.commandListener = commandListener; + this.compressorList = compressorList; + this.serverApi = serverApi; + this.isFunctionAsAServiceEnvironment = isFunctionAsAServiceEnvironment; + } + + @Override + public ClusterableServer create(final Cluster cluster, final ServerAddress serverAddress) { + ServerId serverId = new ServerId(cluster.getClusterId(), serverAddress); + ClusterConnectionMode clusterMode = cluster.getSettings().getMode(); + SameObjectProvider sdamProvider = SameObjectProvider.uninitialized(); + ClientMetadata clientMetadata = cluster.getClientMetadata(); + + ServerMonitor serverMonitor = new DefaultServerMonitor(serverId, serverSettings, + // no credentials, compressor list, or command listener for the server monitor factory + new InternalStreamConnectionFactory(clusterMode, true, heartbeatStreamFactory, null, clientMetadata, + emptyList(), loggerSettings, null, serverApi), + clusterMode, serverApi, isFunctionAsAServiceEnvironment, sdamProvider, heartbeatOperationContextFactory); + + ConnectionPool connectionPool = new DefaultConnectionPool(serverId, + new InternalStreamConnectionFactory(clusterMode, streamFactory, credential, clientMetadata, + compressorList, loggerSettings, commandListener, serverApi), + connectionPoolSettings, internalConnectionPoolSettings, sdamProvider, clusterOperationContextFactory); + ServerListener serverListener = singleServerListener(serverSettings); + SdamServerDescriptionManager sdam = new DefaultSdamServerDescriptionManager(cluster, serverId, serverListener, serverMonitor, + connectionPool, clusterMode); + sdamProvider.initialize(sdam); + serverMonitor.start(); + return new DefaultServer(serverId, clusterMode, connectionPool, new DefaultConnectionFactory(), serverMonitor, + sdam, serverListener, commandListener, cluster.getClock(), true); + } + + @Override + public ServerSettings getSettings() { + return serverSettings; + } +} diff --git a/driver-core/src/main/com/mongodb/internal/connection/DefaultConnectionFactory.java b/driver-core/src/main/com/mongodb/internal/connection/DefaultConnectionFactory.java new file mode 100644 index 00000000000..0297eafa571 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/connection/DefaultConnectionFactory.java @@ -0,0 +1,33 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection; + +import com.mongodb.connection.ClusterConnectionMode; + +class DefaultConnectionFactory implements ConnectionFactory { + @Override + public Connection create(final InternalConnection internalConnection, final ProtocolExecutor executor, + final ClusterConnectionMode clusterConnectionMode) { + return new DefaultServerConnection(internalConnection, executor, clusterConnectionMode); + } + + @Override + public AsyncConnection createAsync(final InternalConnection internalConnection, final ProtocolExecutor executor, + final ClusterConnectionMode clusterConnectionMode) { + return new DefaultServerConnection(internalConnection, executor, clusterConnectionMode); + } +} diff --git a/driver-core/src/main/com/mongodb/internal/connection/DefaultConnectionPool.java b/driver-core/src/main/com/mongodb/internal/connection/DefaultConnectionPool.java new file mode 100644 index 00000000000..81a0e59e277 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/connection/DefaultConnectionPool.java @@ -0,0 +1,1639 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection; + +import com.mongodb.MongoConnectionPoolClearedException; +import com.mongodb.MongoException; +import com.mongodb.MongoInterruptedException; +import com.mongodb.MongoServerUnavailableException; +import com.mongodb.MongoTimeoutException; +import com.mongodb.annotations.NotThreadSafe; +import com.mongodb.annotations.ThreadSafe; +import com.mongodb.connection.ClusterId; +import com.mongodb.connection.ConnectionDescription; +import com.mongodb.connection.ConnectionId; +import com.mongodb.connection.ConnectionPoolSettings; +import com.mongodb.connection.ServerDescription; +import com.mongodb.connection.ServerId; +import com.mongodb.event.ConnectionCheckOutFailedEvent; +import com.mongodb.event.ConnectionCheckOutFailedEvent.Reason; +import com.mongodb.event.ConnectionCheckOutStartedEvent; +import com.mongodb.event.ConnectionCheckedInEvent; +import com.mongodb.event.ConnectionCheckedOutEvent; +import com.mongodb.event.ConnectionClosedEvent; +import com.mongodb.event.ConnectionCreatedEvent; +import com.mongodb.event.ConnectionPoolClearedEvent; +import com.mongodb.event.ConnectionPoolClosedEvent; +import com.mongodb.event.ConnectionPoolCreatedEvent; +import com.mongodb.event.ConnectionPoolListener; +import com.mongodb.event.ConnectionPoolReadyEvent; +import com.mongodb.event.ConnectionReadyEvent; +import com.mongodb.internal.TimeoutContext; +import com.mongodb.internal.VisibleForTesting; +import com.mongodb.internal.async.SingleResultCallback; +import com.mongodb.internal.connection.SdamServerDescriptionManager.SdamIssue; +import com.mongodb.internal.diagnostics.logging.Logger; +import com.mongodb.internal.diagnostics.logging.Loggers; +import com.mongodb.internal.event.EventReasonMessageResolver; +import com.mongodb.internal.inject.OptionalProvider; +import com.mongodb.internal.logging.LogMessage; +import com.mongodb.internal.logging.StructuredLogger; +import com.mongodb.internal.thread.DaemonThreadFactory; +import com.mongodb.internal.time.StartTime; +import com.mongodb.internal.time.Timeout; +import com.mongodb.lang.NonNull; +import com.mongodb.lang.Nullable; +import org.bson.ByteBuf; +import org.bson.codecs.Decoder; +import org.bson.types.ObjectId; + +import java.time.Duration; +import java.util.ArrayList; +import java.util.Deque; +import java.util.LinkedList; +import java.util.List; +import java.util.Optional; +import java.util.Queue; +import java.util.concurrent.BlockingQueue; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.Future; +import java.util.concurrent.LinkedBlockingQueue; +import java.util.concurrent.RejectedExecutionException; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.LongAdder; +import java.util.concurrent.locks.Condition; +import java.util.concurrent.locks.Lock; +import java.util.concurrent.locks.ReadWriteLock; +import java.util.concurrent.locks.ReentrantLock; +import java.util.concurrent.locks.StampedLock; +import java.util.function.Consumer; +import java.util.function.Predicate; +import java.util.function.Supplier; + +import static com.mongodb.assertions.Assertions.assertFalse; +import static com.mongodb.assertions.Assertions.assertNotNull; +import static com.mongodb.assertions.Assertions.assertNull; +import static com.mongodb.assertions.Assertions.assertTrue; +import static com.mongodb.assertions.Assertions.fail; +import static com.mongodb.assertions.Assertions.isTrue; +import static com.mongodb.assertions.Assertions.notNull; +import static com.mongodb.event.ConnectionClosedEvent.Reason.ERROR; +import static com.mongodb.internal.Locks.lockInterruptibly; +import static com.mongodb.internal.Locks.withLock; +import static com.mongodb.internal.TimeoutContext.createMongoTimeoutException; +import static com.mongodb.internal.VisibleForTesting.AccessModifier.PRIVATE; +import static com.mongodb.internal.async.ErrorHandlingResultCallback.errorHandlingCallback; +import static com.mongodb.internal.connection.ConcurrentPool.INFINITE_SIZE; +import static com.mongodb.internal.connection.ConcurrentPool.sizeToString; +import static com.mongodb.internal.event.EventListenerHelper.getConnectionPoolListener; +import static com.mongodb.internal.logging.LogMessage.Component.CONNECTION; +import static com.mongodb.internal.logging.LogMessage.Entry.Name.DRIVER_CONNECTION_ID; +import static com.mongodb.internal.logging.LogMessage.Entry.Name.DURATION_MS; +import static com.mongodb.internal.logging.LogMessage.Entry.Name.ERROR_DESCRIPTION; +import static com.mongodb.internal.logging.LogMessage.Entry.Name.MAX_CONNECTING; +import static com.mongodb.internal.logging.LogMessage.Entry.Name.MAX_IDLE_TIME_MS; +import static com.mongodb.internal.logging.LogMessage.Entry.Name.MAX_POOL_SIZE; +import static com.mongodb.internal.logging.LogMessage.Entry.Name.MAX_WAIT_TIMEOUT_MS; +import static com.mongodb.internal.logging.LogMessage.Entry.Name.MIN_POOL_SIZE; +import static com.mongodb.internal.logging.LogMessage.Entry.Name.REASON_DESCRIPTION; +import static com.mongodb.internal.logging.LogMessage.Entry.Name.SERVER_HOST; +import static com.mongodb.internal.logging.LogMessage.Entry.Name.SERVER_PORT; +import static com.mongodb.internal.logging.LogMessage.Entry.Name.SERVICE_ID; +import static com.mongodb.internal.logging.LogMessage.Level.DEBUG; +import static java.lang.String.format; +import static java.util.concurrent.TimeUnit.MILLISECONDS; +import static java.util.concurrent.TimeUnit.NANOSECONDS; + +@ThreadSafe +final class DefaultConnectionPool implements ConnectionPool { + private static final Logger LOGGER = Loggers.getLogger("connection"); + private static final StructuredLogger STRUCTURED_LOGGER = new StructuredLogger("connection"); + private final ConcurrentPool pool; + private final ConnectionPoolSettings settings; + private final InternalOperationContextFactory operationContextFactory; + private final BackgroundMaintenanceManager backgroundMaintenance; + private final AsyncWorkManager asyncWorkManager; + private final ConnectionPoolListener connectionPoolListener; + private final ServerId serverId; + private final PinnedStatsManager pinnedStatsManager = new PinnedStatsManager(); + private final ServiceStateManager serviceStateManager = new ServiceStateManager(); + private final ConnectionGenerationSupplier connectionGenerationSupplier; + private final OpenConcurrencyLimiter openConcurrencyLimiter; + private final StateAndGeneration stateAndGeneration; + private final OptionalProvider sdamProvider; + + @VisibleForTesting(otherwise = PRIVATE) + DefaultConnectionPool(final ServerId serverId, final InternalConnectionFactory internalConnectionFactory, + final ConnectionPoolSettings settings, final OptionalProvider sdamProvider, + final InternalOperationContextFactory operationContextFactory) { + this(serverId, internalConnectionFactory, settings, InternalConnectionPoolSettings.builder().build(), sdamProvider, + operationContextFactory); + } + + /** + * @param sdamProvider For handling exceptions via the + * + * SDAM machinery as specified + * + * here. + * Must provide an {@linkplain Optional#isPresent() empty} {@link Optional} if created in load-balanced mode, + * otherwise must provide a non-empty {@link Optional}. + */ + DefaultConnectionPool(final ServerId serverId, final InternalConnectionFactory internalConnectionFactory, + final ConnectionPoolSettings settings, final InternalConnectionPoolSettings internalSettings, + final OptionalProvider sdamProvider, + final InternalOperationContextFactory operationContextFactory) { + this.serverId = notNull("serverId", serverId); + this.settings = notNull("settings", settings); + UsageTrackingInternalConnectionItemFactory connectionItemFactory = + new UsageTrackingInternalConnectionItemFactory(internalConnectionFactory); + pool = new ConcurrentPool<>(maxSize(settings), connectionItemFactory, format("The server at %s is no longer available", + serverId.getAddress())); + this.operationContextFactory = assertNotNull(operationContextFactory); + this.sdamProvider = assertNotNull(sdamProvider); + this.connectionPoolListener = getConnectionPoolListener(settings); + backgroundMaintenance = new BackgroundMaintenanceManager(); + connectionPoolCreated(connectionPoolListener, serverId, settings); + openConcurrencyLimiter = new OpenConcurrencyLimiter(settings.getMaxConnecting()); + asyncWorkManager = new AsyncWorkManager(internalSettings.isPrestartAsyncWorkManager()); + stateAndGeneration = new StateAndGeneration(); + connectionGenerationSupplier = new ConnectionGenerationSupplier() { + @Override + public int getGeneration() { + return stateAndGeneration.generation(); + } + + @Override + public int getGeneration(@NonNull final ObjectId serviceId) { + return serviceStateManager.getGeneration(serviceId); + } + }; + } + + @Override + public InternalConnection get(final OperationContext operationContext) { + StartTime checkoutStart = connectionCheckoutStarted(operationContext); + Timeout maxWaitTimeout = operationContext.getTimeoutContext().startMaxWaitTimeout(checkoutStart); + try { + stateAndGeneration.throwIfClosedOrPaused(); + PooledConnection connection = getPooledConnection(maxWaitTimeout, checkoutStart, operationContext.getTimeoutContext()); + if (!connection.opened()) { + connection = openConcurrencyLimiter.openOrGetAvailable(operationContext, connection, maxWaitTimeout, checkoutStart); + } + connection.checkedOutForOperation(operationContext); + connectionCheckedOut(operationContext, connection, checkoutStart); + return connection; + } catch (Exception e) { + throw (RuntimeException) checkOutFailed(e, operationContext, checkoutStart); + } + } + + @Override + public void getAsync(final OperationContext operationContext, final SingleResultCallback callback) { + StartTime checkoutStart = connectionCheckoutStarted(operationContext); + Timeout maxWaitTimeout = operationContext.getTimeoutContext().startMaxWaitTimeout(checkoutStart); + SingleResultCallback eventSendingCallback = (connection, failure) -> { + SingleResultCallback errHandlingCallback = errorHandlingCallback(callback, LOGGER); + if (failure == null) { + assertNotNull(connection).checkedOutForOperation(operationContext); + connectionCheckedOut(operationContext, connection, checkoutStart); + errHandlingCallback.onResult(connection, null); + } else { + errHandlingCallback.onResult(null, checkOutFailed(failure, operationContext, checkoutStart)); + } + }; + try { + stateAndGeneration.throwIfClosedOrPaused(); + } catch (Exception e) { + eventSendingCallback.onResult(null, e); + return; + } + asyncWorkManager.enqueue(new Task(maxWaitTimeout, checkoutStart, operationContext.getTimeoutContext(), t -> { + if (t != null) { + eventSendingCallback.onResult(null, t); + } else { + PooledConnection connection; + try { + connection = getPooledConnection(maxWaitTimeout, checkoutStart, operationContext.getTimeoutContext()); + } catch (Exception e) { + eventSendingCallback.onResult(null, e); + return; + } + if (connection.opened()) { + eventSendingCallback.onResult(connection, null); + } else { + openConcurrencyLimiter.openWithConcurrencyLimitAsync( + operationContext, connection, maxWaitTimeout, checkoutStart, eventSendingCallback); + } + } + })); + } + + /** + * Sends {@link ConnectionCheckOutFailedEvent} + * and returns {@code t} if it is not {@link MongoOpenConnectionInternalException}, + * or returns {@code t.}{@linkplain MongoOpenConnectionInternalException#getCause() getCause()} otherwise. + */ + private Throwable checkOutFailed(final Throwable t, final OperationContext operationContext, final StartTime checkoutStart) { + Throwable result = t; + Reason reason; + if (t instanceof MongoTimeoutException) { + reason = Reason.TIMEOUT; + } else if (t instanceof MongoOpenConnectionInternalException) { + reason = Reason.CONNECTION_ERROR; + result = t.getCause(); + } else if (t instanceof MongoConnectionPoolClearedException) { + reason = Reason.CONNECTION_ERROR; + } else if (ConcurrentPool.isPoolClosedException(t)) { + reason = Reason.POOL_CLOSED; + } else { + reason = Reason.UNKNOWN; + } + + Duration checkoutDuration = checkoutStart.elapsed(); + ClusterId clusterId = serverId.getClusterId(); + if (requiresLogging(clusterId)) { + String message = "Checkout failed for connection to {}:{}. Reason: {}.[ Error: {}.] Duration: {} ms"; + List entries = createBasicEntries(); + entries.add(new LogMessage.Entry(REASON_DESCRIPTION, EventReasonMessageResolver.getMessage(reason))); + entries.add(new LogMessage.Entry(ERROR_DESCRIPTION, reason == Reason.CONNECTION_ERROR ? result.toString() : null)); + entries.add(new LogMessage.Entry(DURATION_MS, checkoutDuration.toMillis())); + logMessage("Connection checkout failed", clusterId, message, entries); + } + connectionPoolListener.connectionCheckOutFailed( + new ConnectionCheckOutFailedEvent(serverId, operationContext.getId(), reason, checkoutDuration.toNanos())); + return result; + } + + @Override + public void invalidate(@Nullable final Throwable cause) { + assertFalse(isLoadBalanced()); + if (stateAndGeneration.pauseAndIncrementGeneration(cause)) { + openConcurrencyLimiter.signalClosedOrPaused(); + } + } + + @Override + public void ready() { + stateAndGeneration.ready(); + } + + public void invalidate(final ObjectId serviceId, final int generation) { + assertTrue(isLoadBalanced()); + if (generation == InternalConnection.NOT_INITIALIZED_GENERATION) { + return; + } + if (serviceStateManager.incrementGeneration(serviceId, generation)) { + ClusterId clusterId = serverId.getClusterId(); + if (requiresLogging(clusterId)) { + String message = "Connection pool for {}:{} cleared for serviceId {}"; + List entries = createBasicEntries(); + entries.add(new LogMessage.Entry(SERVICE_ID, serviceId.toHexString())); + logMessage("Connection pool cleared", clusterId, message, entries); + } + connectionPoolListener.connectionPoolCleared(new ConnectionPoolClearedEvent(this.serverId, serviceId)); + } + } + + @Override + public void close() { + if (stateAndGeneration.close()) { + pool.close(); + backgroundMaintenance.close(); + asyncWorkManager.close(); + openConcurrencyLimiter.signalClosedOrPaused(); + logEventMessage("Connection pool closed", "Connection pool closed for {}:{}"); + + connectionPoolListener.connectionPoolClosed(new ConnectionPoolClosedEvent(serverId)); + } + } + + @Override + public int getGeneration() { + return stateAndGeneration.generation(); + } + + private PooledConnection getPooledConnection(final Timeout maxWaitTimeout, + final StartTime startTime, + final TimeoutContext timeoutContext) throws MongoTimeoutException { + try { + UsageTrackingInternalConnection internalConnection = maxWaitTimeout.call(NANOSECONDS, + () -> pool.get(-1L, NANOSECONDS), + (ns) -> pool.get(ns, NANOSECONDS), + () -> pool.get(0L, NANOSECONDS)); + while (shouldPrune(internalConnection)) { + pool.release(internalConnection, true); + internalConnection = maxWaitTimeout.call(NANOSECONDS, + () -> pool.get(-1L, NANOSECONDS), + (ns) -> pool.get(ns, NANOSECONDS), + () -> pool.get(0L, NANOSECONDS)); + } + return new PooledConnection(internalConnection); + } catch (MongoTimeoutException e) { + throw createTimeoutException(startTime, e, timeoutContext); + } + } + + @Nullable + private PooledConnection getPooledConnectionImmediate() { + UsageTrackingInternalConnection internalConnection = pool.getImmediate(); + while (internalConnection != null && shouldPrune(internalConnection)) { + pool.release(internalConnection, true); + internalConnection = pool.getImmediate(); + } + return internalConnection == null ? null : new PooledConnection(internalConnection); + } + + private MongoTimeoutException createTimeoutException(final StartTime startTime, + @Nullable final MongoTimeoutException cause, + final TimeoutContext timeoutContext) { + long elapsedMs = startTime.elapsed().toMillis(); + int numPinnedToCursor = pinnedStatsManager.getNumPinnedToCursor(); + int numPinnedToTransaction = pinnedStatsManager.getNumPinnedToTransaction(); + String errorMessage; + + if (numPinnedToCursor == 0 && numPinnedToTransaction == 0) { + errorMessage = format("Timed out after %d ms while waiting for a connection to server %s.", + elapsedMs, serverId.getAddress()); + } else { + int maxSize = pool.getMaxSize(); + int numInUse = pool.getInUseCount(); + /* At this point in an execution we consider at least one of `numPinnedToCursor`, `numPinnedToTransaction` to be positive. + * `numPinnedToCursor`, `numPinnedToTransaction` and `numInUse` are not a snapshot view, + * but we still must maintain the following invariants: + * - numInUse > 0 + * we consider at least one of `numPinnedToCursor`, `numPinnedToTransaction` to be positive, + * so if we observe `numInUse` to be 0, we have to estimate it based on `numPinnedToCursor` and `numPinnedToTransaction`; + * - numInUse <= maxSize + * `numInUse` must not exceed the limit in situations when we estimate `numInUse`; + * - numPinnedToCursor + numPinnedToTransaction <= numInUse + * otherwise the numbers do not make sense. + */ + if (numInUse == 0) { + numInUse = Math.min( + numPinnedToCursor + numPinnedToTransaction, // must be at least a big as this sum but not bigger than `maxSize` + maxSize); + } + numPinnedToCursor = Math.min( + numPinnedToCursor, // prefer the observed value, but it must not be bigger than `numInUse` + numInUse); + numPinnedToTransaction = Math.min( + numPinnedToTransaction, // prefer the observed value, but it must not be bigger than `numInUse` - `numPinnedToCursor` + numInUse - numPinnedToCursor); + int numOtherInUse = numInUse - numPinnedToCursor - numPinnedToTransaction; + assertTrue(numOtherInUse >= 0); + assertTrue(numPinnedToCursor + numPinnedToTransaction + numOtherInUse <= maxSize); + errorMessage = format("Timed out after %d ms while waiting for a connection to server %s. Details: " + + "maxPoolSize: %s, connections in use by cursors: %d, connections in use by transactions: %d, " + + "connections in use by other operations: %d", + elapsedMs, serverId.getAddress(), + sizeToString(maxSize), numPinnedToCursor, numPinnedToTransaction, + numOtherInUse); + } + + return timeoutContext.hasTimeoutMS() ? createMongoTimeoutException(errorMessage, cause) : new MongoTimeoutException(errorMessage, cause); + } + + @VisibleForTesting(otherwise = PRIVATE) + ConcurrentPool getPool() { + return pool; + } + + /** + * Synchronously prune idle connections and ensure the minimum pool size. + */ + @VisibleForTesting(otherwise = PRIVATE) + void doMaintenance() { + Predicate silentlyComplete = e -> + e instanceof MongoInterruptedException || e instanceof MongoTimeoutException + || e instanceof MongoConnectionPoolClearedException || ConcurrentPool.isPoolClosedException(e); + try { + pool.prune(); + if (shouldEnsureMinSize()) { + pool.ensureMinSize(settings.getMinSize(), newConnection -> { + try { + OperationContext operationContext = operationContextFactory.createMaintenanceContext(); + openConcurrencyLimiter.openImmediatelyAndTryHandOverOrRelease(operationContext, new PooledConnection(newConnection)); + } catch (MongoException | MongoOpenConnectionInternalException e) { + RuntimeException actualException = e instanceof MongoOpenConnectionInternalException + ? (RuntimeException) e.getCause() + : e; + try { + sdamProvider.optional().ifPresent(sdam -> { + if (!silentlyComplete.test(actualException)) { + sdam.handleExceptionBeforeHandshake(SdamIssue.of(actualException, sdam.context(newConnection))); + } + }); + } catch (Exception suppressed) { + actualException.addSuppressed(suppressed); + } + throw actualException; + } + }); + } + } catch (Exception e) { + if (!silentlyComplete.test(e)) { + LOGGER.warn("Exception thrown during connection pool background maintenance task", e); + throw e; + } + } + } + + private boolean shouldEnsureMinSize() { + return settings.getMinSize() > 0; + } + + private boolean shouldPrune(final UsageTrackingInternalConnection connection) { + return fromPreviousGeneration(connection) || pastMaxLifeTime(connection) || pastMaxIdleTime(connection); + } + + private boolean pastMaxIdleTime(final UsageTrackingInternalConnection connection) { + return expired(connection.getLastUsedAt(), System.currentTimeMillis(), settings.getMaxConnectionIdleTime(MILLISECONDS)); + } + + private boolean pastMaxLifeTime(final UsageTrackingInternalConnection connection) { + return expired(connection.getOpenedAt(), System.currentTimeMillis(), settings.getMaxConnectionLifeTime(MILLISECONDS)); + } + + private boolean fromPreviousGeneration(final UsageTrackingInternalConnection connection) { + int generation = connection.getGeneration(); + if (generation == InternalConnection.NOT_INITIALIZED_GENERATION) { + return false; + } + ObjectId serviceId = connection.getDescription().getServiceId(); + if (serviceId != null) { + return serviceStateManager.getGeneration(serviceId) > generation; + } else { + return stateAndGeneration.generation() > generation; + } + } + + private boolean expired(final long startTime, final long curTime, final long maxTime) { + return maxTime != 0 && curTime - startTime > maxTime; + } + + /** + * Send both current and deprecated events in order to preserve backwards compatibility. + * Must not throw {@link Exception}s. + */ + private void connectionPoolCreated(final ConnectionPoolListener connectionPoolListener, final ServerId serverId, + final ConnectionPoolSettings settings) { + ClusterId clusterId = serverId.getClusterId(); + if (requiresLogging(clusterId)) { + String message = "Connection pool created for {}:{} using options maxIdleTimeMS={}, minPoolSize={}, " + + "maxPoolSize={}, maxConnecting={}, waitQueueTimeoutMS={}"; + + List entries = createBasicEntries(); + entries.add(new LogMessage.Entry(MAX_IDLE_TIME_MS, settings.getMaxConnectionIdleTime(MILLISECONDS))); + entries.add(new LogMessage.Entry(MIN_POOL_SIZE, settings.getMinSize())); + entries.add(new LogMessage.Entry(MAX_POOL_SIZE, settings.getMaxSize())); + entries.add(new LogMessage.Entry(MAX_CONNECTING, settings.getMaxConnecting())); + entries.add(new LogMessage.Entry(MAX_WAIT_TIMEOUT_MS, settings.getMaxWaitTime(MILLISECONDS))); + + logMessage("Connection pool created", clusterId, message, entries); + } + connectionPoolListener.connectionPoolCreated(new ConnectionPoolCreatedEvent(serverId, settings)); + } + + /** + * Send both current and deprecated events in order to preserve backwards compatibility. + * Must not throw {@link Exception}s. + * + * @return A {@link StartTime} before executing {@link ConnectionPoolListener#connectionCreated(ConnectionCreatedEvent)} + * and logging the event. This order is required by + + * CMAP + * and {@link ConnectionReadyEvent#getElapsedTime(TimeUnit)}. + */ + private StartTime connectionCreated(final ConnectionPoolListener connectionPoolListener, final ConnectionId connectionId) { + StartTime openStart = StartTime.now(); + logEventMessage("Connection created", + "Connection created: address={}:{}, driver-generated ID={}", + connectionId.getLocalValue()); + + connectionPoolListener.connectionCreated(new ConnectionCreatedEvent(connectionId)); + return openStart; + } + + /** + * Send both current and deprecated events in order to preserve backwards compatibility. + * Must not throw {@link Exception}s. + */ + private void connectionClosed(final ConnectionPoolListener connectionPoolListener, final ConnectionId connectionId, + final ConnectionClosedEvent.Reason reason) { + ClusterId clusterId = serverId.getClusterId(); + if (requiresLogging(clusterId)) { + String errorReason = "There was a socket exception raised by this connection"; + + List entries = createBasicEntries(); + entries.add(new LogMessage.Entry(DRIVER_CONNECTION_ID, connectionId.getLocalValue())); + entries.add(new LogMessage.Entry(REASON_DESCRIPTION, EventReasonMessageResolver.getMessage(reason))); + entries.add(new LogMessage.Entry(ERROR_DESCRIPTION, reason == ERROR ? errorReason : null)); + + logMessage("Connection closed", + clusterId, + "Connection closed: address={}:{}, driver-generated ID={}. Reason: {}.[ Error: {}]", + entries); + } + connectionPoolListener.connectionClosed(new ConnectionClosedEvent(connectionId, reason)); + } + + private void connectionCheckedOut( + final OperationContext operationContext, + final PooledConnection connection, + final StartTime checkoutStart) { + Duration checkoutDuration = checkoutStart.elapsed(); + ConnectionId connectionId = getId(connection); + ClusterId clusterId = serverId.getClusterId(); + if (requiresLogging(clusterId)) { + List entries = createBasicEntries(); + entries.add(new LogMessage.Entry(DRIVER_CONNECTION_ID, connectionId.getLocalValue())); + entries.add(new LogMessage.Entry(DURATION_MS, checkoutDuration.toMillis())); + logMessage("Connection checked out", clusterId, + "Connection checked out: address={}:{}, driver-generated ID={}, duration={} ms", entries); + } + + connectionPoolListener.connectionCheckedOut( + new ConnectionCheckedOutEvent(connectionId, operationContext.getId(), checkoutDuration.toNanos())); + } + + /** + * @return A {@link StartTime} before executing + * {@link ConnectionPoolListener#connectionCheckOutStarted(ConnectionCheckOutStartedEvent)} and logging the event. + * This order is required by + * CMAP + * and {@link ConnectionCheckedOutEvent#getElapsedTime(TimeUnit)}, {@link ConnectionCheckOutFailedEvent#getElapsedTime(TimeUnit)}. + */ + private StartTime connectionCheckoutStarted(final OperationContext operationContext) { + StartTime checkoutStart = StartTime.now(); + logEventMessage("Connection checkout started", "Checkout started for connection to {}:{}"); + + connectionPoolListener.connectionCheckOutStarted(new ConnectionCheckOutStartedEvent(serverId, operationContext.getId())); + return checkoutStart; + + } + + /** + * Must not throw {@link Exception}s. + */ + private ConnectionId getId(final InternalConnection internalConnection) { + return internalConnection.getDescription().getConnectionId(); + } + + private boolean isLoadBalanced() { + return !sdamProvider.optional().isPresent(); + } + + /** + * @return {@link ConnectionPoolSettings#getMaxSize()} if it is not 0, otherwise returns {@link ConcurrentPool#INFINITE_SIZE}. + */ + private static int maxSize(final ConnectionPoolSettings settings) { + return settings.getMaxSize() == 0 ? INFINITE_SIZE : settings.getMaxSize(); + } + + private class PooledConnection implements InternalConnection { + private final UsageTrackingInternalConnection wrapped; + private final AtomicBoolean isClosed = new AtomicBoolean(); + private Connection.PinningMode pinningMode; + private long operationId; + + PooledConnection(final UsageTrackingInternalConnection wrapped) { + this.wrapped = notNull("wrapped", wrapped); + } + + @Override + public int getGeneration() { + return wrapped.getGeneration(); + } + + /** + * Associates this with the operation id and establishes the checked out start time + */ + public void checkedOutForOperation(final OperationContext operationContext) { + this.operationId = operationContext.getId(); + } + + @Override + public void open(final OperationContext operationContext) { + assertFalse(isClosed.get()); + StartTime openStart; + try { + openStart = connectionCreated(connectionPoolListener, wrapped.getDescription().getConnectionId()); + wrapped.open(operationContext); + } catch (Exception e) { + closeAndHandleOpenFailure(); + throw new MongoOpenConnectionInternalException(e); + } + handleOpenSuccess(openStart); + } + + @Override + public void openAsync(final OperationContext operationContext, final SingleResultCallback callback) { + assertFalse(isClosed.get()); + StartTime openStart = connectionCreated(connectionPoolListener, wrapped.getDescription().getConnectionId()); + wrapped.openAsync(operationContext, (nullResult, failure) -> { + if (failure != null) { + closeAndHandleOpenFailure(); + callback.onResult(null, new MongoOpenConnectionInternalException(failure)); + } else { + handleOpenSuccess(openStart); + callback.onResult(nullResult, null); + } + }); + } + + @Override + public void close() { + // All but the first call is a no-op + if (!isClosed.getAndSet(true)) { + unmarkAsPinned(); + connectionCheckedIn(); + if (wrapped.isClosed() || shouldPrune(wrapped)) { + pool.release(wrapped, true); + } else { + openConcurrencyLimiter.tryHandOverOrRelease(wrapped); + } + } + } + + private void connectionCheckedIn() { + ConnectionId connectionId = getId(wrapped); + logEventMessage("Connection checked in", + "Connection checked in: address={}:{}, driver-generated ID={}", + connectionId.getLocalValue()); + connectionPoolListener.connectionCheckedIn(new ConnectionCheckedInEvent(connectionId, operationId)); + } + + void release() { + if (!isClosed.getAndSet(true)) { + pool.release(wrapped); + } + } + + /** + * {@linkplain ConcurrentPool#release(Object, boolean) Prune} this connection without sending a {@link ConnectionClosedEvent}. + * This method must be used if and only if {@link ConnectionCreatedEvent} was not sent for the connection. + * Must not throw {@link Exception}s. + */ + void closeSilently() { + if (!isClosed.getAndSet(true)) { + wrapped.setCloseSilently(); + pool.release(wrapped, true); + } + } + + /** + * Must not throw {@link Exception}s. + */ + private void closeAndHandleOpenFailure() { + if (!isClosed.getAndSet(true)) { + if (wrapped.getDescription().getServiceId() != null) { + invalidate(assertNotNull(wrapped.getDescription().getServiceId()), wrapped.getGeneration()); + } + pool.release(wrapped, true); + } + } + + /** + * Must not throw {@link Exception}s. + */ + private void handleOpenSuccess(final StartTime openStart) { + Duration openDuration = openStart.elapsed(); + ConnectionId connectionId = getId(this); + ClusterId clusterId = serverId.getClusterId(); + if (requiresLogging(clusterId)) { + List entries = createBasicEntries(); + entries.add(new LogMessage.Entry(DRIVER_CONNECTION_ID, connectionId.getLocalValue())); + entries.add(new LogMessage.Entry(DURATION_MS, openDuration.toMillis())); + logMessage("Connection ready", clusterId, "Connection ready: address={}:{}, driver-generated ID={}, established in={} ms", entries); + } + connectionPoolListener.connectionReady(new ConnectionReadyEvent(connectionId, openDuration.toNanos())); + } + + @Override + public boolean opened() { + isTrue("open", !isClosed.get()); + return wrapped.opened(); + } + + @Override + public boolean isClosed() { + return isClosed.get() || wrapped.isClosed(); + } + + @Override + public ByteBuf getBuffer(final int capacity) { + return wrapped.getBuffer(capacity); + } + + @Override + public void sendMessage(final List byteBuffers, final int lastRequestId, final OperationContext operationContext) { + isTrue("open", !isClosed.get()); + wrapped.sendMessage(byteBuffers, lastRequestId, operationContext); + } + + @Override + public T sendAndReceive(final CommandMessage message, final Decoder decoder, final OperationContext operationContext) { + isTrue("open", !isClosed.get()); + return wrapped.sendAndReceive(message, decoder, operationContext); + } + + @Override + public void send(final CommandMessage message, final Decoder decoder, final OperationContext operationContext) { + isTrue("open", !isClosed.get()); + wrapped.send(message, decoder, operationContext); + } + + @Override + public T receive(final Decoder decoder, final OperationContext operationContext) { + isTrue("open", !isClosed.get()); + return wrapped.receive(decoder, operationContext); + } + + @Override + public boolean hasMoreToCome() { + isTrue("open", !isClosed.get()); + return wrapped.hasMoreToCome(); + } + + @Override + public void sendAndReceiveAsync(final CommandMessage message, final Decoder decoder, + final OperationContext operationContext, final SingleResultCallback callback) { + isTrue("open", !isClosed.get()); + wrapped.sendAndReceiveAsync(message, decoder, operationContext, callback); + } + + @Override + public ResponseBuffers receiveMessage(final int responseTo, final OperationContext operationContext) { + isTrue("open", !isClosed.get()); + return wrapped.receiveMessage(responseTo, operationContext); + } + + @Override + public void sendMessageAsync(final List byteBuffers, final int lastRequestId, final OperationContext operationContext, + final SingleResultCallback callback) { + isTrue("open", !isClosed.get()); + wrapped.sendMessageAsync(byteBuffers, lastRequestId, operationContext, (result, t) -> callback.onResult(null, t)); + } + + @Override + public void receiveMessageAsync(final int responseTo, final OperationContext operationContext, + final SingleResultCallback callback) { + isTrue("open", !isClosed.get()); + wrapped.receiveMessageAsync(responseTo, operationContext, callback); + } + + @Override + public void markAsPinned(final Connection.PinningMode pinningMode) { + assertNotNull(pinningMode); + // if the connection is already pinned for some other mode, the additional mode can be ignored. + // The typical case is the connection is first pinned for a transaction, then pinned for a cursor withing that transaction + // In this case, the cursor pinning is subsumed by the transaction pinning. + if (this.pinningMode == null) { + this.pinningMode = pinningMode; + pinnedStatsManager.increment(pinningMode); + } + } + + void unmarkAsPinned() { + if (pinningMode != null) { + pinnedStatsManager.decrement(pinningMode); + } + } + + @Override + public ConnectionDescription getDescription() { + return wrapped.getDescription(); + } + + @Override + public ServerDescription getInitialServerDescription() { + isTrue("open", !isClosed.get()); + return wrapped.getInitialServerDescription(); + } + } + + /** + * This internal exception is used to express an exceptional situation encountered when opening a connection. + * It exists because it allows consolidating the code that sends events for exceptional situations in a + * {@linkplain #checkOutFailed(Throwable, OperationContext, StartTime) single place}, it must not be observable by an external code. + */ + private static final class MongoOpenConnectionInternalException extends RuntimeException { + private static final long serialVersionUID = 1; + + MongoOpenConnectionInternalException(@NonNull final Throwable cause) { + super(cause); + } + + @Override + @NonNull + public Throwable getCause() { + return assertNotNull(super.getCause()); + } + } + + private class UsageTrackingInternalConnectionItemFactory implements ConcurrentPool.ItemFactory { + private final InternalConnectionFactory internalConnectionFactory; + + UsageTrackingInternalConnectionItemFactory(final InternalConnectionFactory internalConnectionFactory) { + this.internalConnectionFactory = internalConnectionFactory; + } + + @Override + public UsageTrackingInternalConnection create() { + return new UsageTrackingInternalConnection(internalConnectionFactory.create(serverId, connectionGenerationSupplier), + serviceStateManager); + } + + @Override + public void close(final UsageTrackingInternalConnection connection) { + if (!connection.isCloseSilently()) { + connectionClosed(connectionPoolListener, getId(connection), getReasonForClosing(connection)); + } + connection.close(); + } + + private ConnectionClosedEvent.Reason getReasonForClosing(final UsageTrackingInternalConnection connection) { + ConnectionClosedEvent.Reason reason; + if (connection.isClosed()) { + reason = ConnectionClosedEvent.Reason.ERROR; + } else if (fromPreviousGeneration(connection)) { + reason = ConnectionClosedEvent.Reason.STALE; + } else if (pastMaxIdleTime(connection)) { + reason = ConnectionClosedEvent.Reason.IDLE; + } else { + reason = ConnectionClosedEvent.Reason.POOL_CLOSED; + } + return reason; + } + + @Override + public boolean shouldPrune(final UsageTrackingInternalConnection usageTrackingConnection) { + return DefaultConnectionPool.this.shouldPrune(usageTrackingConnection); + } + } + + /** + * Package-access methods are thread-safe, + * and only they should be called outside of the {@link OpenConcurrencyLimiter}'s code. + */ + @ThreadSafe + private final class OpenConcurrencyLimiter { + private final ReentrantLock lock; + private final Condition permitAvailableOrHandedOverOrClosedOrPausedCondition; + private final int maxPermits; + private int permits; + private final Deque> desiredConnectionSlots; + + OpenConcurrencyLimiter(final int maxConnecting) { + lock = new ReentrantLock(false); + permitAvailableOrHandedOverOrClosedOrPausedCondition = lock.newCondition(); + maxPermits = maxConnecting; + permits = maxPermits; + desiredConnectionSlots = new LinkedList<>(); + } + + PooledConnection openOrGetAvailable(final OperationContext operationContext, final PooledConnection connection, + final Timeout maxWaitTimeout, final StartTime startTime) + throws MongoTimeoutException { + PooledConnection result = openWithConcurrencyLimit( + operationContext, connection, OpenWithConcurrencyLimitMode.TRY_GET_AVAILABLE, + maxWaitTimeout, startTime); + return assertNotNull(result); + } + + void openImmediatelyAndTryHandOverOrRelease(final OperationContext operationContext, + final PooledConnection connection) throws MongoTimeoutException { + StartTime startTime = StartTime.now(); + Timeout timeout = startTime.asTimeout(); + assertNull(openWithConcurrencyLimit( + operationContext, + connection, OpenWithConcurrencyLimitMode.TRY_HAND_OVER_OR_RELEASE, + timeout, startTime)); + } + + /** + * This method can be thought of as operating in two phases. In the first phase it tries to synchronously + * acquire a permit to open the {@code connection} or get a different + * {@linkplain PooledConnection#opened() opened} connection if {@code mode} is + * {@link OpenWithConcurrencyLimitMode#TRY_GET_AVAILABLE} and one becomes available while waiting for a permit. + * The first phase has one of the following outcomes: + *
    + *
  1. A {@link MongoTimeoutException} or a different {@link Exception} is thrown, + * and the specified {@code connection} is {@linkplain PooledConnection#closeSilently() silently closed}.
  2. + *
  3. An opened connection different from the specified one is returned, + * and the specified {@code connection} is {@linkplain PooledConnection#closeSilently() silently closed}. + * This outcome is possible only if {@code mode} is {@link OpenWithConcurrencyLimitMode#TRY_GET_AVAILABLE}.
  4. + *
  5. A permit is acquired, {@link #connectionCreated(ConnectionPoolListener, ConnectionId)} is reported + * and an attempt to open the specified {@code connection} is made. This is the second phase in which + * the {@code connection} is {@linkplain InternalConnection#open(OperationContext) opened synchronously}. + * The attempt to open the {@code connection} has one of the following outcomes + * combined with releasing the acquired permit: + *
      + *
    1. An {@link Exception} is thrown + * and the {@code connection} is {@linkplain PooledConnection#closeAndHandleOpenFailure() closed}.
    2. + *
    3. Else if the specified {@code connection} is opened successfully and + * {@code mode} is {@link OpenWithConcurrencyLimitMode#TRY_HAND_OVER_OR_RELEASE}, + * then {@link #tryHandOverOrRelease(UsageTrackingInternalConnection)} is called and {@code null} is returned.
    4. + *
    5. Else the specified {@code connection}, which is now opened, is returned.
    6. + *
    + *
  6. + *
+ * + * @param operationContext the operation context + * @param maxWaitTimeout Applies only to the first phase. + * @return An {@linkplain PooledConnection#opened() opened} connection which is either the specified + * {@code connection}, or potentially a different one if {@code mode} is + * {@link OpenWithConcurrencyLimitMode#TRY_GET_AVAILABLE}, or {@code null} if {@code mode} is + * {@link OpenWithConcurrencyLimitMode#TRY_HAND_OVER_OR_RELEASE}. + * @throws MongoTimeoutException If the first phase timed out. + */ + @Nullable + private PooledConnection openWithConcurrencyLimit(final OperationContext operationContext, + final PooledConnection connection, final OpenWithConcurrencyLimitMode mode, + final Timeout maxWaitTimeout, final StartTime startTime) + throws MongoTimeoutException { + PooledConnection availableConnection; + try {//phase one + availableConnection = acquirePermitOrGetAvailableOpenedConnection( + mode == OpenWithConcurrencyLimitMode.TRY_GET_AVAILABLE, maxWaitTimeout, startTime, + operationContext.getTimeoutContext()); + } catch (Exception e) { + connection.closeSilently(); + throw e; + } + if (availableConnection != null) { + connection.closeSilently(); + return availableConnection; + } else {//acquired a permit, phase two + try { + connection.open(operationContext); + if (mode == OpenWithConcurrencyLimitMode.TRY_HAND_OVER_OR_RELEASE) { + tryHandOverOrRelease(connection.wrapped); + return null; + } else { + return connection; + } + } finally { + releasePermit(); + } + } + } + + /** + * This method is similar to {@link #openWithConcurrencyLimit(OperationContext, PooledConnection, OpenWithConcurrencyLimitMode, Timeout, StartTime)} + * with the following differences: + *
    + *
  • It does not have the {@code mode} parameter and acts as if this parameter were + * {@link OpenWithConcurrencyLimitMode#TRY_GET_AVAILABLE}.
  • + *
  • While the first phase is still synchronous, the {@code connection} is + * {@linkplain InternalConnection#openAsync(OperationContext, SingleResultCallback) opened asynchronously} in the second phase.
  • + *
  • Instead of returning a result or throwing an exception via Java {@code return}/{@code throw} statements, + * it calls {@code callback.}{@link SingleResultCallback#onResult(Object, Throwable) onResult(result, failure)} + * and passes either a {@link PooledConnection} or an {@link Exception}.
  • + *
+ */ + void openWithConcurrencyLimitAsync( + final OperationContext operationContext, final PooledConnection connection, + final Timeout maxWaitTimeout, final StartTime startTime, + final SingleResultCallback callback) { + PooledConnection availableConnection; + try {//phase one + availableConnection = + acquirePermitOrGetAvailableOpenedConnection(true, maxWaitTimeout, startTime, operationContext.getTimeoutContext()); + } catch (Exception e) { + connection.closeSilently(); + callback.onResult(null, e); + return; + } + if (availableConnection != null) { + connection.closeSilently(); + callback.onResult(availableConnection, null); + } else {//acquired a permit, phase two + connection.openAsync(operationContext, (nullResult, failure) -> { + releasePermit(); + if (failure != null) { + callback.onResult(null, failure); + } else { + callback.onResult(connection, null); + } + }); + } + } + + /** + * @return Either {@code null} if a permit has been acquired, or a {@link PooledConnection} + * if {@code tryGetAvailable} is {@code true} and an {@linkplain PooledConnection#opened() opened} one becomes available while + * waiting for a permit. + * @throws MongoTimeoutException If timed out. + * @throws MongoInterruptedException If the current thread has its {@linkplain Thread#interrupted() interrupted status} + * set on entry to this method or is interrupted while waiting to get an available opened connection. + */ + @Nullable + private PooledConnection acquirePermitOrGetAvailableOpenedConnection(final boolean tryGetAvailable, + final Timeout maxWaitTimeout, final StartTime startTime, + final TimeoutContext timeoutContext) + throws MongoTimeoutException, MongoInterruptedException { + PooledConnection availableConnection = null; + boolean expressedDesireToGetAvailableConnection = false; + lockInterruptibly(lock); + try { + if (tryGetAvailable) { + /* An attempt to get an available opened connection from the pool (must be done while holding the lock) + * happens here at most once to prevent the race condition in the following execution + * (actions are specified in the execution total order, + * which by definition exists if an execution is either sequentially consistent or linearizable): + * 1. Thread#1 starts checking out and gets a non-opened connection. + * 2. Thread#2 checks in a connection. Tries to hand it over, but there are no threads desiring to get one. + * 3. Thread#1 executes the current code. Expresses the desire to get a connection via the hand-over mechanism, + * but thread#2 has already tried handing over and released its connection to the pool. + * As a result, thread#1 is waiting for a permit to open a connection despite one being available in the pool. */ + availableConnection = getPooledConnectionImmediate(); + if (availableConnection != null) { + return availableConnection; + } + expressDesireToGetAvailableConnection(); + expressedDesireToGetAvailableConnection = true; + } + while (permits == 0 + // the absence of short-circuiting is of importance + & !stateAndGeneration.throwIfClosedOrPaused() + & (availableConnection = tryGetAvailable ? tryGetAvailableConnection() : null) == null) { + + Timeout.onExistsAndExpired(maxWaitTimeout, () -> { + throw createTimeoutException(startTime, null, timeoutContext); + }); + maxWaitTimeout.awaitOn(permitAvailableOrHandedOverOrClosedOrPausedCondition, + () -> "acquiring permit or getting available opened connection"); + } + if (availableConnection == null) { + assertTrue(permits > 0); + permits--; + } + return availableConnection; + } finally { + try { + if (expressedDesireToGetAvailableConnection && availableConnection == null) { + giveUpOnTryingToGetAvailableConnection(); + } + } finally { + lock.unlock(); + } + } + } + + private void releasePermit() { + withLock(lock, () -> { + assertTrue(permits < maxPermits); + permits++; + permitAvailableOrHandedOverOrClosedOrPausedCondition.signal(); + }); + } + + private void expressDesireToGetAvailableConnection() { + desiredConnectionSlots.addLast(new MutableReference<>()); + } + + @Nullable + private PooledConnection tryGetAvailableConnection() { + assertFalse(desiredConnectionSlots.isEmpty()); + PooledConnection result = desiredConnectionSlots.peekFirst().reference; + if (result != null) { + desiredConnectionSlots.removeFirst(); + assertTrue(result.opened()); + } + return result; + } + + private void giveUpOnTryingToGetAvailableConnection() { + assertFalse(desiredConnectionSlots.isEmpty()); + PooledConnection connection = desiredConnectionSlots.removeLast().reference; + if (connection != null) { + connection.release(); + } + } + + /** + * The hand-over mechanism is needed to prevent other threads doing checkout from stealing newly released connections + * from threads that are waiting for a permit to open a connection. + */ + void tryHandOverOrRelease(final UsageTrackingInternalConnection openConnection) { + boolean handedOver = withLock(lock, () -> { + for (//iterate from first (head) to last (tail) + MutableReference desiredConnectionSlot : desiredConnectionSlots) { + if (desiredConnectionSlot.reference == null) { + desiredConnectionSlot.reference = new PooledConnection(openConnection); + permitAvailableOrHandedOverOrClosedOrPausedCondition.signal(); + return true; + } + } + return false; + }); + if (!handedOver) { + pool.release(openConnection); + } + } + + void signalClosedOrPaused() { + withLock(lock, permitAvailableOrHandedOverOrClosedOrPausedCondition::signalAll); + } + } + + /** + * @see OpenConcurrencyLimiter#openWithConcurrencyLimit(OperationContext, PooledConnection, OpenWithConcurrencyLimitMode, Timeout, StartTime) + */ + private enum OpenWithConcurrencyLimitMode { + TRY_GET_AVAILABLE, + TRY_HAND_OVER_OR_RELEASE + } + + @NotThreadSafe + private static final class MutableReference { + @Nullable + private T reference; + + private MutableReference() { + } + } + + @ThreadSafe + static final class ServiceStateManager { + private final ConcurrentHashMap stateByServiceId = new ConcurrentHashMap<>(); + + void addConnection(final ObjectId serviceId) { + stateByServiceId.compute(serviceId, (k, v) -> { + if (v == null) { + v = new ServiceState(); + } + v.incrementConnectionCount(); + return v; + }); + } + + /** + * Removes the mapping from {@code serviceId} to a {@link ServiceState} if its connection count reaches 0. + * This is done to prevent memory leaks. + *

+ * This method must be called once for any connection for which {@link #addConnection(ObjectId)} was called. + */ + void removeConnection(final ObjectId serviceId) { + stateByServiceId.compute(serviceId, (k, v) -> { + assertNotNull(v); + return v.decrementAndGetConnectionCount() == 0 ? null : v; + }); + } + + /** + * In some cases we may increment the generation even for an unregistered serviceId, as when open fails on the only connection to + * a given serviceId. In this case this method does not track the generation increment but does return true. + * + * @return true if the generation was incremented + */ + boolean incrementGeneration(final ObjectId serviceId, final int expectedGeneration) { + ServiceState state = stateByServiceId.get(serviceId); + return state == null || state.incrementGeneration(expectedGeneration); + } + + int getGeneration(final ObjectId serviceId) { + ServiceState state = stateByServiceId.get(serviceId); + return state == null ? 0 : state.getGeneration(); + } + + private static final class ServiceState { + private final AtomicInteger generation = new AtomicInteger(); + private final AtomicInteger connectionCount = new AtomicInteger(); + + void incrementConnectionCount() { + connectionCount.incrementAndGet(); + } + + int decrementAndGetConnectionCount() { + return connectionCount.decrementAndGet(); + } + + boolean incrementGeneration(final int expectedGeneration) { + return generation.compareAndSet(expectedGeneration, expectedGeneration + 1); + } + + public int getGeneration() { + return generation.get(); + } + } + } + + private static final class PinnedStatsManager { + private final LongAdder numPinnedToCursor = new LongAdder(); + private final LongAdder numPinnedToTransaction = new LongAdder(); + + void increment(final Connection.PinningMode pinningMode) { + switch (pinningMode) { + case CURSOR: + numPinnedToCursor.increment(); + break; + case TRANSACTION: + numPinnedToTransaction.increment(); + break; + default: + fail(); + } + } + + void decrement(final Connection.PinningMode pinningMode) { + switch (pinningMode) { + case CURSOR: + numPinnedToCursor.decrement(); + break; + case TRANSACTION: + numPinnedToTransaction.decrement(); + break; + default: + fail(); + } + } + + int getNumPinnedToCursor() { + return numPinnedToCursor.intValue(); + } + + int getNumPinnedToTransaction() { + return numPinnedToTransaction.intValue(); + } + } + + /** + * This class maintains threads needed to perform {@link ConnectionPool#getAsync(OperationContext, SingleResultCallback)}. + */ + @ThreadSafe + private static class AsyncWorkManager implements AutoCloseable { + private volatile State state; + private volatile BlockingQueue tasks; + private final Lock lock; + @Nullable + private ExecutorService worker; + + AsyncWorkManager(final boolean prestart) { + state = State.NEW; + tasks = new LinkedBlockingQueue<>(); + lock = new StampedLock().asWriteLock(); + if (prestart) { + assertTrue(initUnlessClosed()); + } + } + + void enqueue(final Task task) { + boolean closed = withLock(lock, () -> { + if (initUnlessClosed()) { + tasks.add(task); + return false; + } + return true; + }); + if (closed) { + task.failAsClosed(); + } + } + + /** + * Invocations of this method must be guarded by {@link #lock}, unless done from the constructor. + * + * @return {@code false} iff the {@link #state} is {@link State#CLOSED}. + */ + private boolean initUnlessClosed() { + boolean result = true; + if (state == State.NEW) { + worker = Executors.newSingleThreadExecutor(new DaemonThreadFactory("AsyncGetter")); + worker.submit(() -> runAndLogUncaught(this::workerRun)); + state = State.INITIALIZED; + } else if (state == State.CLOSED) { + result = false; + } + return result; + } + + /** + * {@linkplain Thread#interrupt() Interrupts} all workers and causes queued tasks to + * {@linkplain Task#failAsClosed() fail} asynchronously. + */ + @Override + @SuppressWarnings("try") + public void close() { + withLock(lock, () -> { + if (state != State.CLOSED) { + state = State.CLOSED; + if (worker != null) { + worker.shutdownNow(); // at this point we interrupt `worker`s thread + } + } + }); + } + + private void workerRun() { + while (state != State.CLOSED) { + try { + Task task = tasks.take(); + + task.timeout().run(NANOSECONDS, + () -> task.execute(), + (ns) -> task.execute(), + () -> task.failAsTimedOut()); + } catch (InterruptedException closed) { + // fail the rest of the tasks and stop + } catch (Exception e) { + LOGGER.error(null, e); + } + } + failAllTasksAfterClosing(); + } + + private void failAllTasksAfterClosing() { + Queue localGets = withLock(lock, () -> { + assertTrue(state == State.CLOSED); + // at this point it is guaranteed that no thread enqueues a task + Queue result = tasks; + if (!tasks.isEmpty()) { + tasks = new LinkedBlockingQueue<>(); + } + return result; + }); + localGets.forEach(Task::failAsClosed); + localGets.clear(); + } + + private void runAndLogUncaught(final Runnable runnable) { + try { + runnable.run(); + } catch (Throwable t) { + LOGGER.error(this + " stopped working. You may want to recreate the MongoClient", t); + throw t; + } + } + + private enum State { + NEW, + INITIALIZED, + CLOSED + } + } + + /** + * An action that is allowed to be completed (failed or executed) at most once, and a timeout associated with it. + */ + @NotThreadSafe + final class Task { + private final Timeout timeout; + private final StartTime startTime; + private final Consumer action; + private final TimeoutContext timeoutContext; + private boolean completed; + + Task(final Timeout timeout, + final StartTime startTime, + final TimeoutContext timeoutContext, + final Consumer action) { + this.timeout = timeout; + this.timeoutContext = timeoutContext; + this.startTime = startTime; + this.action = action; + } + + void execute() { + doComplete(() -> null); + } + + void failAsClosed() { + doComplete(pool::poolClosedException); + } + + void failAsTimedOut() { + doComplete(() -> createTimeoutException(startTime, null, timeoutContext)); + } + + private void doComplete(final Supplier failureSupplier) { + assertFalse(completed); + completed = true; + action.accept(failureSupplier.get()); + } + + Timeout timeout() { + return timeout; + } + } + + /** + * Methods {@link #start()} and {@link #runOnceAndStop()} must be called sequentially. Each {@link #start()} must be followed by + * {@link #runOnceAndStop()} unless {@link BackgroundMaintenanceManager} is {@linkplain #close() closed}. + *

+ * This class implements + * + * CMAP background thread. + */ + @NotThreadSafe + private final class BackgroundMaintenanceManager implements AutoCloseable { + @Nullable + private final ScheduledExecutorService maintainer; + @Nullable + private Future cancellationHandle; + private boolean initialStart; + + private BackgroundMaintenanceManager() { + maintainer = settings.getMaintenanceInitialDelay(NANOSECONDS) < Long.MAX_VALUE + ? Executors.newSingleThreadScheduledExecutor(new DaemonThreadFactory("MaintenanceTimer")) + : null; + cancellationHandle = null; + initialStart = true; + } + + void start() { + if (maintainer != null) { + assertTrue(cancellationHandle == null); + cancellationHandle = ignoreRejectedExectution(() -> maintainer.scheduleAtFixedRate( + DefaultConnectionPool.this::doMaintenance, + initialStart ? settings.getMaintenanceInitialDelay(MILLISECONDS) : 0, + settings.getMaintenanceFrequency(MILLISECONDS), MILLISECONDS)); + initialStart = false; + } + } + + void runOnceAndStop() { + if (maintainer != null) { + if (cancellationHandle != null) { + cancellationHandle.cancel(false); + cancellationHandle = null; + } + ignoreRejectedExectution(() -> maintainer.execute(DefaultConnectionPool.this::doMaintenance)); + } + } + + @Override + public void close() { + if (maintainer != null) { + maintainer.shutdownNow(); + } + } + + private void ignoreRejectedExectution(final Runnable action) { + ignoreRejectedExectution(() -> { + action.run(); + return null; + }); + } + + @Nullable + private T ignoreRejectedExectution(final Supplier action) { + try { + return action.get(); + } catch (RejectedExecutionException ignored) { + // `close` either completed or is in progress + return null; + } + } + } + + @ThreadSafe + private final class StateAndGeneration { + private final ReadWriteLock lock; + private volatile boolean paused; + private final AtomicBoolean closed; + private volatile int generation; + @Nullable + private Throwable cause; + + StateAndGeneration() { + lock = new StampedLock().asReadWriteLock(); + paused = true; + closed = new AtomicBoolean(); + generation = 0; + cause = null; + } + + int generation() { + return generation; + } + + /** + * @return {@code true} if and only if the state changed from ready to paused as a result of the operation. + * The generation is incremented regardless of the returned value. + */ + boolean pauseAndIncrementGeneration(@Nullable final Throwable cause) { + return withLock(lock.writeLock(), () -> { + boolean result = false; + if (!paused) { + paused = true; + pool.pause(() -> new MongoConnectionPoolClearedException(serverId, cause)); + result = true; + } + this.cause = cause; + //noinspection NonAtomicOperationOnVolatileField + generation++; + if (result) { + logEventMessage("Connection pool cleared", "Connection pool for {}:{} cleared"); + + connectionPoolListener.connectionPoolCleared(new ConnectionPoolClearedEvent(serverId)); + // one additional run is required to guarantee that a paused pool releases resources + backgroundMaintenance.runOnceAndStop(); + } + return result; + }); + } + + boolean ready() { + boolean result = false; + if (paused) { + result = withLock(lock.writeLock(), () -> { + if (paused) { + paused = false; + cause = null; + pool.ready(); + logEventMessage("Connection pool ready", "Connection pool ready for {}:{}"); + + connectionPoolListener.connectionPoolReady(new ConnectionPoolReadyEvent(serverId)); + backgroundMaintenance.start(); + return true; + } + return false; + }); + } + return result; + } + + /** + * @return {@code true} if and only if the state changed as a result of the operation. + */ + boolean close() { + return closed.compareAndSet(false, true); + } + + /** + * @return {@code false} which means that the method did not throw. + * The method returns to allow using it conveniently as part of a condition check when waiting on a {@link Condition}. + * Short-circuiting operators {@code &&} and {@code ||} must not be used with this method to ensure that it is called. + * @throws MongoServerUnavailableException If and only if {@linkplain #close() closed}. + * @throws MongoConnectionPoolClearedException If and only if {@linkplain #pauseAndIncrementGeneration(Throwable) paused} + * and not {@linkplain #close() closed}. + */ + boolean throwIfClosedOrPaused() { + if (closed.get()) { + throw pool.poolClosedException(); + } + if (paused) { + withLock(lock.readLock(), () -> { + if (paused) { + throw new MongoConnectionPoolClearedException(serverId, cause); + } + }); + } + return false; + } + + } + private void logEventMessage(final String messageId, final String format, final long driverConnectionId) { + ClusterId clusterId = serverId.getClusterId(); + if (requiresLogging(clusterId)) { + List entries = createBasicEntries(); + entries.add(new LogMessage.Entry(DRIVER_CONNECTION_ID, driverConnectionId)); + logMessage(messageId, clusterId, format, entries); + } + } + + private void logEventMessage(final String messageId, final String format) { + ClusterId clusterId = serverId.getClusterId(); + if (requiresLogging(clusterId)) { + List entries = createBasicEntries(); + logMessage(messageId, clusterId, format, entries); + } + } + + private List createBasicEntries() { + List entries = new ArrayList<>(); + entries.add(new LogMessage.Entry(SERVER_HOST, serverId.getAddress().getHost())); + entries.add(new LogMessage.Entry(SERVER_PORT, serverId.getAddress().getPort())); + return entries; + } + + private static void logMessage(final String messageId, final ClusterId clusterId, final String format, final List entries) { + STRUCTURED_LOGGER.log(new LogMessage(CONNECTION, DEBUG, messageId, clusterId, entries, format)); + } + + private static boolean requiresLogging(final ClusterId clusterId) { + return STRUCTURED_LOGGER.isRequired(DEBUG, clusterId); + } +} diff --git a/driver-core/src/main/com/mongodb/internal/connection/DefaultDnsSrvRecordMonitor.java b/driver-core/src/main/com/mongodb/internal/connection/DefaultDnsSrvRecordMonitor.java new file mode 100644 index 00000000000..9a17f2eb4f9 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/connection/DefaultDnsSrvRecordMonitor.java @@ -0,0 +1,137 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection; + +import com.mongodb.MongoException; +import com.mongodb.MongoInternalException; +import com.mongodb.ServerAddress; +import com.mongodb.connection.ClusterId; +import com.mongodb.connection.ClusterType; +import com.mongodb.internal.diagnostics.logging.Logger; +import com.mongodb.internal.diagnostics.logging.Loggers; +import com.mongodb.internal.dns.DnsResolver; + +import java.util.Collections; +import java.util.HashSet; +import java.util.List; +import java.util.Set; + +import static com.mongodb.internal.connection.ServerAddressHelper.createServerAddress; +import static java.util.Collections.unmodifiableSet; + +class DefaultDnsSrvRecordMonitor implements DnsSrvRecordMonitor { + private static final Logger LOGGER = Loggers.getLogger("cluster"); + + private final String hostName; + private final String srvServiceName; + private final long rescanFrequencyMillis; + private final long noRecordsRescanFrequencyMillis; + private final DnsSrvRecordInitializer dnsSrvRecordInitializer; + private final DnsResolver dnsResolver; + private final Thread monitorThread; + private volatile boolean isClosed; + + DefaultDnsSrvRecordMonitor(final String hostName, final String srvServiceName, final long rescanFrequencyMillis, final long noRecordsRescanFrequencyMillis, + final DnsSrvRecordInitializer dnsSrvRecordInitializer, final ClusterId clusterId, + final DnsResolver dnsResolver) { + this.hostName = hostName; + this.srvServiceName = srvServiceName; + this.rescanFrequencyMillis = rescanFrequencyMillis; + this.noRecordsRescanFrequencyMillis = noRecordsRescanFrequencyMillis; + this.dnsSrvRecordInitializer = dnsSrvRecordInitializer; + this.dnsResolver = dnsResolver; + monitorThread = new Thread(new DnsSrvRecordMonitorRunnable(), "cluster-" + clusterId + "-srv-" + hostName); + monitorThread.setDaemon(true); + } + + @Override + public void start() { + monitorThread.start(); + } + + @Override + public void close() { + isClosed = true; + monitorThread.interrupt(); + } + + private class DnsSrvRecordMonitorRunnable implements Runnable { + private Set currentHosts = Collections.emptySet(); + private ClusterType clusterType = ClusterType.UNKNOWN; + + @Override + public void run() { + try { + while (!isClosed && shouldContinueMonitoring()) { + try { + List resolvedHostNames = dnsResolver.resolveHostFromSrvRecords(hostName, srvServiceName); + Set hosts = createServerAddressSet(resolvedHostNames); + + if (isClosed) { + return; + } + + if (!hosts.equals(currentHosts)) { + try { + dnsSrvRecordInitializer.initialize(unmodifiableSet(hosts)); + currentHosts = hosts; + } catch (Exception e) { + LOGGER.warn("Exception in monitor thread during notification of DNS resolution state change", e); + } + } + } catch (MongoException e) { + if (currentHosts.isEmpty()) { + dnsSrvRecordInitializer.initialize(e); + } + LOGGER.info("Exception while resolving SRV records", e); + } catch (Exception e) { + if (currentHosts.isEmpty()) { + dnsSrvRecordInitializer.initialize(new MongoInternalException("Unexpected runtime exception", e)); + } + LOGGER.info("Unexpected runtime exception while resolving SRV record", e); + } + + try { + Thread.sleep(getRescanFrequencyMillis()); + } catch (InterruptedException closed) { + // fall through + } + clusterType = dnsSrvRecordInitializer.getClusterType(); + } + } catch (Throwable t) { + LOGGER.error(this + " stopped working. You may want to recreate the MongoClient", t); + throw t; + } + } + + private boolean shouldContinueMonitoring() { + return clusterType == ClusterType.UNKNOWN || clusterType == ClusterType.SHARDED; + } + + private long getRescanFrequencyMillis() { + return currentHosts.isEmpty() ? noRecordsRescanFrequencyMillis : rescanFrequencyMillis; + } + + private Set createServerAddressSet(final List resolvedHostNames) { + Set hosts = new HashSet<>(resolvedHostNames.size()); + for (String host : resolvedHostNames) { + hosts.add(createServerAddress(host)); + } + return hosts; + } + } +} diff --git a/driver-core/src/main/com/mongodb/internal/connection/DefaultDnsSrvRecordMonitorFactory.java b/driver-core/src/main/com/mongodb/internal/connection/DefaultDnsSrvRecordMonitorFactory.java new file mode 100644 index 00000000000..d303acdef99 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/connection/DefaultDnsSrvRecordMonitorFactory.java @@ -0,0 +1,50 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection; + +import com.mongodb.connection.ClusterId; +import com.mongodb.connection.ServerSettings; +import com.mongodb.internal.dns.DefaultDnsResolver; +import com.mongodb.lang.Nullable; +import com.mongodb.spi.dns.DnsClient; + +import static java.util.concurrent.TimeUnit.MILLISECONDS; + +/** + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public class DefaultDnsSrvRecordMonitorFactory implements DnsSrvRecordMonitorFactory { + + // JNDI doesn't provide the TTL for DNS records, so we have to hard-code it + private static final long DEFAULT_RESCAN_FREQUENCY_MILLIS = 60000; + + private final ClusterId clusterId; + private final long noRecordsRescanFrequency; + private final DnsClient dnsClient; + + public DefaultDnsSrvRecordMonitorFactory(final ClusterId clusterId, final ServerSettings serverSettings, @Nullable final DnsClient dnsClient) { + this.clusterId = clusterId; + this.noRecordsRescanFrequency = serverSettings.getHeartbeatFrequency(MILLISECONDS); + this.dnsClient = dnsClient; + } + + @Override + public DnsSrvRecordMonitor create(final String hostName, final String srvServiceName, final DnsSrvRecordInitializer dnsSrvRecordInitializer) { + return new DefaultDnsSrvRecordMonitor(hostName, srvServiceName, DEFAULT_RESCAN_FREQUENCY_MILLIS, noRecordsRescanFrequency, + dnsSrvRecordInitializer, clusterId, new DefaultDnsResolver(dnsClient)); + } +} diff --git a/driver-core/src/main/com/mongodb/internal/connection/DefaultInetAddressResolver.java b/driver-core/src/main/com/mongodb/internal/connection/DefaultInetAddressResolver.java new file mode 100644 index 00000000000..0f8158cf7d2 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/connection/DefaultInetAddressResolver.java @@ -0,0 +1,36 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection; + +import com.mongodb.spi.dns.InetAddressResolver; + +import java.net.InetAddress; +import java.net.UnknownHostException; +import java.util.List; + +import static java.util.Arrays.asList; + +/** + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public class DefaultInetAddressResolver implements InetAddressResolver { + + @Override + public List lookupByName(final String host) throws UnknownHostException { + return asList(InetAddress.getAllByName(host)); + } +} diff --git a/driver-core/src/main/com/mongodb/internal/connection/DefaultSdamServerDescriptionManager.java b/driver-core/src/main/com/mongodb/internal/connection/DefaultSdamServerDescriptionManager.java new file mode 100644 index 00000000000..af4acd8c031 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/connection/DefaultSdamServerDescriptionManager.java @@ -0,0 +1,148 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection; + +import com.mongodb.annotations.ThreadSafe; +import com.mongodb.connection.ClusterConnectionMode; +import com.mongodb.connection.ServerDescription; +import com.mongodb.connection.ServerId; +import com.mongodb.connection.ServerType; +import com.mongodb.event.ServerDescriptionChangedEvent; +import com.mongodb.event.ServerListener; + +import static com.mongodb.assertions.Assertions.assertFalse; +import static com.mongodb.assertions.Assertions.assertNotNull; +import static com.mongodb.assertions.Assertions.assertTrue; +import static com.mongodb.connection.ServerType.UNKNOWN; +import static com.mongodb.internal.connection.EventHelper.wouldDescriptionsGenerateEquivalentEvents; +import static com.mongodb.internal.connection.ServerDescriptionHelper.unknownConnectingServerDescription; + +@ThreadSafe +final class DefaultSdamServerDescriptionManager implements SdamServerDescriptionManager { + private final Cluster cluster; + private final ServerId serverId; + private final ServerListener serverListener; + private final ServerMonitor serverMonitor; + private final ConnectionPool connectionPool; + private final ClusterConnectionMode connectionMode; + private volatile ServerDescription description; + + DefaultSdamServerDescriptionManager(final Cluster cluster, + final ServerId serverId, + final ServerListener serverListener, final ServerMonitor serverMonitor, + final ConnectionPool connectionPool, + final ClusterConnectionMode connectionMode) { + this.cluster = cluster; + this.serverId = assertNotNull(serverId); + this.serverListener = assertNotNull(serverListener); + this.serverMonitor = assertNotNull(serverMonitor); + this.connectionPool = assertNotNull(connectionPool); + this.connectionMode = assertNotNull(connectionMode); + description = unknownConnectingServerDescription(serverId, null); + } + + @Override + public void monitorUpdate(final ServerDescription candidateDescription) { + cluster.withLock(() -> { + if (TopologyVersionHelper.newer(description.getTopologyVersion(), candidateDescription.getTopologyVersion())) { + return; + } + ServerType newServerType = candidateDescription.getType(); + boolean markedPoolReady = false; + /* A paused pool should not be exposed and used. Calling `ready` before updating description and calling `invalidate` after + * facilitates achieving this. However, because once the pool is observed, it may be used concurrently with the pool being + * invalidated by either the current method or the `handleException` method, the pool still may be used in a paused state. + * For those cases `MongoConnectionPoolClearedException` was introduced. */ + if (ServerTypeHelper.isDataBearing(newServerType) + || (newServerType != UNKNOWN && connectionMode == ClusterConnectionMode.SINGLE)) { + connectionPool.ready(); + markedPoolReady = true; + } + updateDescription(candidateDescription); + Throwable candidateDescriptionException = candidateDescription.getException(); + if (candidateDescriptionException != null) { + assertTrue(newServerType == UNKNOWN); + assertFalse(markedPoolReady); + connectionPool.invalidate(candidateDescriptionException); + } + }); + } + + @Override + public void updateToUnknown(final ServerDescription candidateDescription) { + assertTrue(candidateDescription.getType() == UNKNOWN); + cluster.withLock(() -> { + if (TopologyVersionHelper.newer(description.getTopologyVersion(), candidateDescription.getTopologyVersion())) { + return; + } + + updateDescription(candidateDescription); + }); + } + + @Override + public void handleExceptionBeforeHandshake(final SdamIssue sdamIssue) { + cluster.withLock(() -> handleException(sdamIssue, true)); + } + + @Override + public void handleExceptionAfterHandshake(final SdamIssue sdamIssue) { + cluster.withLock(() -> handleException(sdamIssue, false)); + } + + @Override + public SdamIssue.Context context() { + return new SdamIssue.Context(serverId, connectionPool.getGeneration(), description.getMaxWireVersion()); + } + + @Override + public SdamIssue.Context context(final InternalConnection connection) { + return new SdamIssue.Context(serverId, connection.getGeneration(), connection.getDescription().getMaxWireVersion()); + } + + private void updateDescription(final ServerDescription newDescription) { + ServerDescription previousDescription = description; + description = newDescription; + ServerDescriptionChangedEvent serverDescriptionChangedEvent = new ServerDescriptionChangedEvent( + serverId, newDescription, previousDescription); + if (!wouldDescriptionsGenerateEquivalentEvents(newDescription, previousDescription)) { + serverListener.serverDescriptionChanged(serverDescriptionChangedEvent); + } + cluster.onChange(serverDescriptionChangedEvent); + } + + private void handleException(final SdamIssue sdamIssue, final boolean beforeHandshake) { + if (sdamIssue.stale(connectionPool, description)) { + return; + } + if (sdamIssue.relatedToStateChange()) { + updateDescription(sdamIssue.serverDescription()); + if (sdamIssue.relatedToShutdown()) { + connectionPool.invalidate(sdamIssue.exception().orElse(null)); + } + serverMonitor.connect(); + } else if (sdamIssue.relatedToNetworkNotTimeout() + || (beforeHandshake && (sdamIssue.relatedToNetworkTimeout() || sdamIssue.relatedToAuth()))) { + updateDescription(sdamIssue.serverDescription()); + connectionPool.invalidate(sdamIssue.exception().orElse(null)); + serverMonitor.cancelCurrentCheck(); + } else if (sdamIssue.relatedToWriteConcern() || sdamIssue.relatedToStalePrimary()) { + updateDescription(sdamIssue.serverDescription()); + serverMonitor.connect(); + } + } +} diff --git a/driver-core/src/main/com/mongodb/internal/connection/DefaultServer.java b/driver-core/src/main/com/mongodb/internal/connection/DefaultServer.java new file mode 100644 index 00000000000..80b1a5c0c27 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/connection/DefaultServer.java @@ -0,0 +1,377 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection; + +import com.mongodb.MongoException; +import com.mongodb.MongoServerUnavailableException; +import com.mongodb.ReadPreference; +import com.mongodb.connection.ClusterConnectionMode; +import com.mongodb.connection.ConnectionDescription; +import com.mongodb.connection.ServerId; +import com.mongodb.event.CommandListener; +import com.mongodb.event.ServerClosedEvent; +import com.mongodb.event.ServerListener; +import com.mongodb.event.ServerOpeningEvent; +import com.mongodb.internal.VisibleForTesting; +import com.mongodb.internal.async.SingleResultCallback; +import com.mongodb.internal.connection.SdamServerDescriptionManager.SdamIssue; +import com.mongodb.internal.diagnostics.logging.Logger; +import com.mongodb.internal.diagnostics.logging.Loggers; +import com.mongodb.internal.session.SessionContext; +import com.mongodb.lang.Nullable; +import org.bson.BsonDocument; +import org.bson.FieldNameValidator; +import org.bson.codecs.Decoder; + +import java.util.concurrent.atomic.AtomicInteger; + +import static com.mongodb.assertions.Assertions.assertNotNull; +import static com.mongodb.assertions.Assertions.assertTrue; +import static com.mongodb.assertions.Assertions.notNull; +import static com.mongodb.internal.VisibleForTesting.AccessModifier.PRIVATE; +import static com.mongodb.internal.async.ErrorHandlingResultCallback.errorHandlingCallback; +import static com.mongodb.internal.connection.ServerDescriptionHelper.unknownConnectingServerDescription; + +class DefaultServer implements ClusterableServer { + private static final Logger LOGGER = Loggers.getLogger("connection"); + private final ServerId serverId; + private final ConnectionPool connectionPool; + private final ClusterConnectionMode clusterConnectionMode; + private final ConnectionFactory connectionFactory; + private final ServerMonitor serverMonitor; + private final SdamServerDescriptionManager sdam; + private final ServerListener serverListener; + private final CommandListener commandListener; + private final ClusterClock clusterClock; + @Nullable + private final AtomicInteger operationCount; + private volatile boolean isClosed; + + DefaultServer(final ServerId serverId, final ClusterConnectionMode clusterConnectionMode, final ConnectionPool connectionPool, + final ConnectionFactory connectionFactory, final ServerMonitor serverMonitor, + final SdamServerDescriptionManager sdam, final ServerListener serverListener, + final CommandListener commandListener, final ClusterClock clusterClock, final boolean trackOperationCount) { + this.sdam = assertNotNull(sdam); + this.serverListener = notNull("serverListener", serverListener); + this.commandListener = commandListener; + this.clusterClock = notNull("clusterClock", clusterClock); + notNull("serverAddress", serverId); + this.clusterConnectionMode = notNull("clusterConnectionMode", clusterConnectionMode); + this.connectionFactory = notNull("connectionFactory", connectionFactory); + this.connectionPool = notNull("connectionPool", connectionPool); + + this.serverId = serverId; + serverListener.serverOpening(new ServerOpeningEvent(this.serverId)); + + this.serverMonitor = serverMonitor; + operationCount = trackOperationCount ? new AtomicInteger() : null; + } + + @Override + public Connection getConnection(final OperationContext operationContext) { + if (isClosed) { + throw new MongoServerUnavailableException(String.format("The server at %s is no longer available", serverId.getAddress())); + } + SdamIssue.Context exceptionContext = sdam.context(); + operationBegin(); + try { + return OperationCountTrackingConnection.decorate(this, + connectionFactory.create(connectionPool.get(operationContext), new DefaultServerProtocolExecutor(), clusterConnectionMode)); + } catch (Throwable e) { + try { + operationEnd(); + if (e instanceof MongoException) { + sdam.handleExceptionBeforeHandshake(SdamIssue.of(e, exceptionContext)); + } + } catch (Exception suppressed) { + e.addSuppressed(suppressed); + } + throw e; + } + } + + @Override + public void getConnectionAsync(final OperationContext operationContext, final SingleResultCallback callback) { + if (isClosed) { + callback.onResult(null, new MongoServerUnavailableException( + String.format("The server at %s is no longer available", serverId.getAddress()))); + return; + } + SdamIssue.Context exceptionContext = sdam.context(); + operationBegin(); + connectionPool.getAsync(operationContext, (result, t) -> { + if (t != null) { + try { + operationEnd(); + sdam.handleExceptionBeforeHandshake(SdamIssue.of(t, exceptionContext)); + } catch (Exception suppressed) { + t.addSuppressed(suppressed); + } finally { + callback.onResult(null, t); + } + } else { + callback.onResult(AsyncOperationCountTrackingConnection.decorate(DefaultServer.this, + connectionFactory.createAsync(assertNotNull(result), new DefaultServerProtocolExecutor(), clusterConnectionMode)), + null); + } + }); + } + + @Override + public int operationCount() { + return operationCount == null ? -1 : operationCount.get(); + } + + private void operationBegin() { + if (operationCount != null) { + operationCount.incrementAndGet(); + } + } + + private void operationEnd() { + if (operationCount != null) { + assertTrue(operationCount.decrementAndGet() >= 0); + } + } + + @Override + public void resetToConnecting(final MongoException cause) { + sdam.updateToUnknown(unknownConnectingServerDescription(serverId, cause)); + } + + @Override + public void invalidate(final MongoException cause) { + if (!isClosed()) { + sdam.handleExceptionAfterHandshake(SdamIssue.of(cause, sdam.context())); + } + } + + @Override + public void close() { + if (!isClosed()) { + connectionPool.close(); + serverMonitor.close(); + isClosed = true; + serverListener.serverClosed(new ServerClosedEvent(serverId)); + } + } + + @Override + public boolean isClosed() { + return isClosed; + } + + @Override + public void connect() { + serverMonitor.connect(); + } + + @VisibleForTesting(otherwise = PRIVATE) + ConnectionPool getConnectionPool() { + return connectionPool; + } + + @VisibleForTesting(otherwise = PRIVATE) + SdamServerDescriptionManager sdamServerDescriptionManager() { + return sdam; + } + + @VisibleForTesting(otherwise = PRIVATE) + ServerId serverId() { + return serverId; + } + + private class DefaultServerProtocolExecutor extends AbstractProtocolExecutor { + + @SuppressWarnings("unchecked") + @Override + public T execute(final CommandProtocol protocol, final InternalConnection connection, + final SessionContext sessionContext) { + try { + return protocol + .withSessionContext(new ClusterClockAdvancingSessionContext(sessionContext, clusterClock)) + .execute(connection); + } catch (MongoException e) { + try { + sdam.handleExceptionAfterHandshake(SdamIssue.of(e, sdam.context(connection))); + } catch (Exception suppressed) { + e.addSuppressed(suppressed); + } + if (e instanceof MongoWriteConcernWithResponseException) { + return (T) ((MongoWriteConcernWithResponseException) e).getResponse(); + } else { + if (shouldMarkSessionDirty(e, sessionContext)) { + sessionContext.markSessionDirty(); + } + throw e; + } + } + } + + @SuppressWarnings("unchecked") + @Override + public void executeAsync(final CommandProtocol protocol, final InternalConnection connection, + final SessionContext sessionContext, final SingleResultCallback callback) { + protocol.withSessionContext(new ClusterClockAdvancingSessionContext(sessionContext, clusterClock)) + .executeAsync(connection, errorHandlingCallback((result, t) -> { + if (t != null) { + try { + sdam.handleExceptionAfterHandshake(SdamIssue.of(t, sdam.context(connection))); + } catch (Exception suppressed) { + t.addSuppressed(suppressed); + } finally { + if (t instanceof MongoWriteConcernWithResponseException) { + callback.onResult((T) ((MongoWriteConcernWithResponseException) t).getResponse(), null); + } else { + if (shouldMarkSessionDirty(t, sessionContext)) { + sessionContext.markSessionDirty(); + } + callback.onResult(null, t); + } + } + } else { + callback.onResult(result, null); + } + }, LOGGER)); + } + } + + private static final class OperationCountTrackingConnection implements Connection { + private final DefaultServer server; + private final Connection wrapped; + + static Connection decorate(final DefaultServer server, final Connection connection) { + return server.operationCount() < 0 + ? connection + : new OperationCountTrackingConnection(server, connection); + } + + private OperationCountTrackingConnection(final DefaultServer server, final Connection connection) { + this.server = server; + wrapped = connection; + } + + @Override + public int getCount() { + return wrapped.getCount(); + } + + @Override + public int release() { + int count = wrapped.release(); + if (count == 0) { + server.operationEnd(); + } + return count; + } + + @Override + public Connection retain() { + wrapped.retain(); + return this; + } + + @Override + public ConnectionDescription getDescription() { + return wrapped.getDescription(); + } + + @Override + public T command(final String database, final BsonDocument command, final FieldNameValidator fieldNameValidator, + @Nullable final ReadPreference readPreference, final Decoder commandResultDecoder, + final OperationContext operationContext) { + return wrapped.command(database, command, fieldNameValidator, readPreference, commandResultDecoder, operationContext); + } + + @Override + public T command(final String database, final BsonDocument command, final FieldNameValidator commandFieldNameValidator, + @Nullable final ReadPreference readPreference, final Decoder commandResultDecoder, + final OperationContext operationContext, final boolean responseExpected, + final MessageSequences sequences) { + return wrapped.command(database, command, commandFieldNameValidator, readPreference, commandResultDecoder, operationContext, + responseExpected, sequences); + } + + @Override + public void markAsPinned(final PinningMode pinningMode) { + wrapped.markAsPinned(pinningMode); + } + } + + private static final class AsyncOperationCountTrackingConnection implements AsyncConnection { + private final DefaultServer server; + private final AsyncConnection wrapped; + + static AsyncConnection decorate(final DefaultServer server, final AsyncConnection connection) { + return server.operationCount() < 0 + ? connection + : new AsyncOperationCountTrackingConnection(server, connection); + } + + AsyncOperationCountTrackingConnection(final DefaultServer server, final AsyncConnection connection) { + this.server = server; + wrapped = connection; + } + + @Override + public int getCount() { + return wrapped.getCount(); + } + + @Override + public int release() { + int count = wrapped.release(); + if (count == 0) { + server.operationEnd(); + } + return count; + } + + @Override + public AsyncConnection retain() { + wrapped.retain(); + return this; + } + + @Override + public ConnectionDescription getDescription() { + return wrapped.getDescription(); + } + + @Override + public void commandAsync(final String database, final BsonDocument command, final FieldNameValidator fieldNameValidator, + @Nullable final ReadPreference readPreference, final Decoder commandResultDecoder, + final OperationContext operationContext, final SingleResultCallback callback) { + wrapped.commandAsync(database, command, fieldNameValidator, readPreference, commandResultDecoder, + operationContext, callback); + } + + @Override + public void commandAsync(final String database, final BsonDocument command, final FieldNameValidator commandFieldNameValidator, + @Nullable final ReadPreference readPreference, final Decoder commandResultDecoder, + final OperationContext operationContext, final boolean responseExpected, final MessageSequences sequences, + final SingleResultCallback callback) { + wrapped.commandAsync(database, command, commandFieldNameValidator, readPreference, commandResultDecoder, + operationContext, responseExpected, sequences, callback); + } + + @Override + public void markAsPinned(final Connection.PinningMode pinningMode) { + wrapped.markAsPinned(pinningMode); + } + } +} diff --git a/driver-core/src/main/com/mongodb/internal/connection/DefaultServerConnection.java b/driver-core/src/main/com/mongodb/internal/connection/DefaultServerConnection.java new file mode 100644 index 00000000000..143ef5b76ae --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/connection/DefaultServerConnection.java @@ -0,0 +1,124 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection; + +import com.mongodb.ReadPreference; +import com.mongodb.connection.ClusterConnectionMode; +import com.mongodb.connection.ConnectionDescription; +import com.mongodb.internal.async.SingleResultCallback; +import com.mongodb.internal.connection.MessageSequences.EmptyMessageSequences; +import com.mongodb.internal.diagnostics.logging.Logger; +import com.mongodb.internal.diagnostics.logging.Loggers; +import com.mongodb.internal.session.SessionContext; +import com.mongodb.lang.Nullable; +import org.bson.BsonDocument; +import org.bson.FieldNameValidator; +import org.bson.codecs.Decoder; + +import static com.mongodb.internal.async.ErrorHandlingResultCallback.errorHandlingCallback; + +/** + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public class DefaultServerConnection extends AbstractReferenceCounted implements Connection, AsyncConnection { + private static final Logger LOGGER = Loggers.getLogger("connection"); + private final InternalConnection wrapped; + private final ProtocolExecutor protocolExecutor; + private final ClusterConnectionMode clusterConnectionMode; + + public DefaultServerConnection(final InternalConnection wrapped, final ProtocolExecutor protocolExecutor, + final ClusterConnectionMode clusterConnectionMode) { + this.wrapped = wrapped; + this.protocolExecutor = protocolExecutor; + this.clusterConnectionMode = clusterConnectionMode; + } + + @Override + public DefaultServerConnection retain() { + super.retain(); + return this; + } + + @Override + public int release() { + int count = super.release(); + if (count == 0) { + wrapped.close(); + } + return count; + } + + @Override + public ConnectionDescription getDescription() { + return wrapped.getDescription(); + } + + @Nullable + @Override + public T command(final String database, final BsonDocument command, final FieldNameValidator fieldNameValidator, + @Nullable final ReadPreference readPreference, final Decoder commandResultDecoder, final OperationContext operationContext) { + return command(database, command, fieldNameValidator, readPreference, commandResultDecoder, operationContext, true, EmptyMessageSequences.INSTANCE); + } + + @Nullable + @Override + public T command(final String database, final BsonDocument command, final FieldNameValidator commandFieldNameValidator, + @Nullable final ReadPreference readPreference, final Decoder commandResultDecoder, + final OperationContext operationContext, final boolean responseExpected, final MessageSequences sequences) { + return executeProtocol( + new CommandProtocolImpl<>(database, command, commandFieldNameValidator, readPreference, commandResultDecoder, + responseExpected, sequences, clusterConnectionMode, operationContext), + operationContext.getSessionContext()); + } + + @Override + public void commandAsync(final String database, final BsonDocument command, final FieldNameValidator fieldNameValidator, + @Nullable final ReadPreference readPreference, final Decoder commandResultDecoder, final OperationContext operationContext, + final SingleResultCallback callback) { + commandAsync(database, command, fieldNameValidator, readPreference, commandResultDecoder, + operationContext, true, EmptyMessageSequences.INSTANCE, callback); + } + + @Override + public void commandAsync(final String database, final BsonDocument command, final FieldNameValidator commandFieldNameValidator, + @Nullable final ReadPreference readPreference, final Decoder commandResultDecoder, final OperationContext operationContext, + final boolean responseExpected, final MessageSequences sequences, final SingleResultCallback callback) { + executeProtocolAsync(new CommandProtocolImpl<>(database, command, commandFieldNameValidator, readPreference, + commandResultDecoder, responseExpected, sequences, clusterConnectionMode, operationContext), + operationContext.getSessionContext(), callback); + } + + @Override + public void markAsPinned(final PinningMode pinningMode) { + wrapped.markAsPinned(pinningMode); + } + + @Nullable + private T executeProtocol(final CommandProtocol protocol, final SessionContext sessionContext) { + return protocolExecutor.execute(protocol, this.wrapped, sessionContext); + } + + private void executeProtocolAsync(final CommandProtocol protocol, final SessionContext sessionContext, + final SingleResultCallback callback) { + SingleResultCallback errHandlingCallback = errorHandlingCallback(callback, LOGGER); + try { + protocolExecutor.executeAsync(protocol, this.wrapped, sessionContext, errHandlingCallback); + } catch (Throwable t) { + errHandlingCallback.onResult(null, t); + } + } +} diff --git a/driver-core/src/main/com/mongodb/internal/connection/DefaultServerMonitor.java b/driver-core/src/main/com/mongodb/internal/connection/DefaultServerMonitor.java new file mode 100644 index 00000000000..bb97517d315 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/connection/DefaultServerMonitor.java @@ -0,0 +1,669 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection; + +import com.mongodb.MongoInterruptedException; +import com.mongodb.MongoSocketException; +import com.mongodb.ServerApi; +import com.mongodb.annotations.ThreadSafe; +import com.mongodb.connection.ClusterConnectionMode; +import com.mongodb.connection.ConnectionDescription; +import com.mongodb.connection.ServerDescription; +import com.mongodb.connection.ServerId; +import com.mongodb.connection.ServerSettings; +import com.mongodb.event.ServerHeartbeatFailedEvent; +import com.mongodb.event.ServerHeartbeatStartedEvent; +import com.mongodb.event.ServerHeartbeatSucceededEvent; +import com.mongodb.event.ServerMonitorListener; +import com.mongodb.internal.TimeoutContext; +import com.mongodb.internal.VisibleForTesting; +import com.mongodb.internal.diagnostics.logging.Logger; +import com.mongodb.internal.diagnostics.logging.Loggers; +import com.mongodb.internal.inject.Provider; +import com.mongodb.internal.logging.LogMessage; +import com.mongodb.internal.logging.StructuredLogger; +import com.mongodb.internal.validator.NoOpFieldNameValidator; +import com.mongodb.lang.Nullable; +import org.bson.BsonBoolean; +import org.bson.BsonDocument; +import org.bson.BsonInt32; +import org.bson.BsonInt64; +import org.bson.codecs.BsonDocumentCodec; +import org.bson.types.ObjectId; + +import java.util.Objects; +import java.util.concurrent.locks.Condition; +import java.util.concurrent.locks.Lock; +import java.util.concurrent.locks.ReentrantLock; + +import static com.mongodb.ReadPreference.primary; +import static com.mongodb.assertions.Assertions.assertNotNull; +import static com.mongodb.assertions.Assertions.fail; +import static com.mongodb.assertions.Assertions.notNull; +import static com.mongodb.connection.ServerType.UNKNOWN; +import static com.mongodb.internal.Locks.checkedWithLock; +import static com.mongodb.internal.Locks.withLock; +import static com.mongodb.internal.VisibleForTesting.AccessModifier.PRIVATE; +import static com.mongodb.internal.connection.CommandHelper.HELLO; +import static com.mongodb.internal.connection.CommandHelper.LEGACY_HELLO; +import static com.mongodb.internal.connection.CommandHelper.executeCommand; +import static com.mongodb.internal.connection.DescriptionHelper.createServerDescription; +import static com.mongodb.internal.connection.ServerDescriptionHelper.unknownConnectingServerDescription; +import static com.mongodb.internal.event.EventListenerHelper.singleServerMonitorListener; +import static com.mongodb.internal.logging.LogMessage.Component.TOPOLOGY; +import static com.mongodb.internal.logging.LogMessage.Entry.Name.AWAITED; +import static com.mongodb.internal.logging.LogMessage.Entry.Name.DRIVER_CONNECTION_ID; +import static com.mongodb.internal.logging.LogMessage.Entry.Name.DURATION_MS; +import static com.mongodb.internal.logging.LogMessage.Entry.Name.FAILURE; +import static com.mongodb.internal.logging.LogMessage.Entry.Name.REPLY; +import static com.mongodb.internal.logging.LogMessage.Entry.Name.SERVER_CONNECTION_ID; +import static com.mongodb.internal.logging.LogMessage.Entry.Name.SERVER_HOST; +import static com.mongodb.internal.logging.LogMessage.Entry.Name.SERVER_PORT; +import static com.mongodb.internal.logging.LogMessage.Entry.Name.TOPOLOGY_ID; +import static com.mongodb.internal.logging.LogMessage.Level.DEBUG; +import static java.lang.String.format; +import static java.util.Arrays.asList; +import static java.util.concurrent.TimeUnit.MILLISECONDS; +import static java.util.concurrent.TimeUnit.NANOSECONDS; + +@ThreadSafe +class DefaultServerMonitor implements ServerMonitor { + + private static final Logger LOGGER = Loggers.getLogger("cluster"); + private static final StructuredLogger STRUCTURED_LOGGER = new StructuredLogger("cluster"); + + private final ServerId serverId; + private final ServerMonitorListener serverMonitorListener; + private final Provider sdamProvider; + private final InternalOperationContextFactory operationContextFactory; + private final InternalConnectionFactory internalConnectionFactory; + private final ClusterConnectionMode clusterConnectionMode; + @Nullable + private final ServerApi serverApi; + private final boolean isFunctionAsAServiceEnvironment; + private final ServerSettings serverSettings; + private final ServerMonitor monitor; + /** + * Must be guarded by {@link #lock}. + */ + @Nullable + private RoundTripTimeMonitor roundTripTimeMonitor; + private final RoundTripTimeSampler roundTripTimeSampler = new RoundTripTimeSampler(); + private final Lock lock = new ReentrantLock(); + private final Condition condition = lock.newCondition(); + private volatile boolean isClosed; + + DefaultServerMonitor(final ServerId serverId, final ServerSettings serverSettings, + final InternalConnectionFactory internalConnectionFactory, + final ClusterConnectionMode clusterConnectionMode, + @Nullable final ServerApi serverApi, + final boolean isFunctionAsAServiceEnvironment, + final Provider sdamProvider, + final InternalOperationContextFactory operationContextFactory) { + this.serverSettings = notNull("serverSettings", serverSettings); + this.serverId = notNull("serverId", serverId); + this.serverMonitorListener = singleServerMonitorListener(serverSettings); + this.internalConnectionFactory = notNull("internalConnectionFactory", internalConnectionFactory); + this.clusterConnectionMode = notNull("clusterConnectionMode", clusterConnectionMode); + this.operationContextFactory = assertNotNull(operationContextFactory); + this.serverApi = serverApi; + this.isFunctionAsAServiceEnvironment = isFunctionAsAServiceEnvironment; + this.sdamProvider = sdamProvider; + monitor = new ServerMonitor(); + roundTripTimeMonitor = null; + isClosed = false; + } + + @Override + public void start() { + logStartedServerMonitoring(serverId); + monitor.start(); + } + + private void ensureRoundTripTimeMonitorStarted() { + withLock(lock, () -> { + if (!isClosed && roundTripTimeMonitor == null) { + roundTripTimeMonitor = new RoundTripTimeMonitor(); + roundTripTimeMonitor.start(); + } + }); + } + + @Override + public void connect() { + withLock(lock, condition::signal); + } + + @Override + @SuppressWarnings("try") + public void close() { + withLock(lock, () -> { + if (!isClosed) { + logStoppedServerMonitoring(serverId); + } + isClosed = true; + //noinspection EmptyTryBlock + try (ServerMonitor ignoredAutoClosed = monitor; + RoundTripTimeMonitor ignoredAutoClose2 = roundTripTimeMonitor) { + // we are automatically closing resources here + } + }); + } + + @Override + public void cancelCurrentCheck() { + monitor.cancelCurrentCheck(); + } + + @VisibleForTesting(otherwise = PRIVATE) + ServerMonitor getServerMonitor() { + return monitor; + } + + class ServerMonitor extends Thread implements AutoCloseable { + private volatile InternalConnection connection = null; + private volatile boolean alreadyLoggedHeartBeatStarted = false; + private volatile boolean currentCheckCancelled; + private volatile long lookupStartTimeNanos; + + ServerMonitor() { + super("cluster-" + serverId.getClusterId() + "-" + serverId.getAddress()); + setDaemon(true); + } + + @Override + public void close() { + interrupt(); + InternalConnection connection = this.connection; + if (connection != null) { + connection.close(); + } + } + + @Override + public void run() { + ServerDescription currentServerDescription = unknownConnectingServerDescription(serverId, null); + try { + while (!isClosed) { + ServerDescription previousServerDescription = currentServerDescription; + currentServerDescription = lookupServerDescription(currentServerDescription); + boolean shouldStreamResponses = shouldStreamResponses(currentServerDescription); + if (shouldStreamResponses) { + ensureRoundTripTimeMonitorStarted(); + } + + if (isClosed) { + continue; + } + + if (currentCheckCancelled) { + waitForNext(); + currentCheckCancelled = false; + continue; + } + + logStateChange(previousServerDescription, currentServerDescription); + sdamProvider.get().monitorUpdate(currentServerDescription); + + if ((shouldStreamResponses && currentServerDescription.getType() != UNKNOWN) + || (connection != null && connection.hasMoreToCome()) + || (currentServerDescription.getException() instanceof MongoSocketException + && previousServerDescription.getType() != UNKNOWN)) { + continue; + } + waitForNext(); + } + } catch (InterruptedException | MongoInterruptedException closed) { + // stop the monitor + } catch (Throwable t) { + LOGGER.error(format("%s for %s stopped working. You may want to recreate the MongoClient", this, serverId), t); + throw t; + } finally { + if (connection != null) { + connection.close(); + } + } + } + + private ServerDescription lookupServerDescription(final ServerDescription currentServerDescription) { + try { + if (LOGGER.isDebugEnabled()) { + LOGGER.debug(format("Checking status of %s", serverId.getAddress())); + } + + boolean shouldStreamResponses = shouldStreamResponses(currentServerDescription); + lookupStartTimeNanos = System.nanoTime(); + + // Handle connection setup + if (connection == null || connection.isClosed()) { + return setupNewConnectionAndGetInitialDescription(shouldStreamResponses); + } + + // Log heartbeat started if it hasn't been logged yet + if (!alreadyLoggedHeartBeatStarted) { + logAndNotifyHeartbeatStarted(shouldStreamResponses); + } + + // Get existing connection + return doHeartbeat(currentServerDescription, shouldStreamResponses); + } catch (Exception t) { + roundTripTimeSampler.reset(); + InternalConnection localConnection = withLock(lock, () -> { + InternalConnection result = connection; + connection = null; + return result; + }); + if (localConnection != null) { + localConnection.close(); + } + return unknownConnectingServerDescription(serverId, t); + } + } + + private ServerDescription setupNewConnectionAndGetInitialDescription(final boolean shouldStreamResponses) { + connection = internalConnectionFactory.create(serverId); + logAndNotifyHeartbeatStarted(shouldStreamResponses); + + try { + connection.open(operationContextFactory.create()); + roundTripTimeSampler.addSample(connection.getInitialServerDescription().getRoundTripTimeNanos()); + return connection.getInitialServerDescription(); + } catch (Exception e) { + logAndNotifyHeartbeatFailed(shouldStreamResponses, e); + throw e; + } + } + + /** + * Run hello command to get the server description. + */ + private ServerDescription doHeartbeat(final ServerDescription currentServerDescription, + final boolean shouldStreamResponses) { + try { + OperationContext operationContext = operationContextFactory.create(); + if (!connection.hasMoreToCome()) { + BsonDocument helloDocument = new BsonDocument(getHandshakeCommandName(currentServerDescription), new BsonInt32(1)) + .append("helloOk", BsonBoolean.TRUE); + if (shouldStreamResponses) { + helloDocument.append("topologyVersion", assertNotNull(currentServerDescription.getTopologyVersion()).asDocument()); + helloDocument.append("maxAwaitTimeMS", new BsonInt64(serverSettings.getHeartbeatFrequency(MILLISECONDS))); + } + connection.send(createCommandMessage(helloDocument, connection, currentServerDescription), new BsonDocumentCodec(), + operationContext); + } + + BsonDocument helloResult; + if (shouldStreamResponses) { + helloResult = connection.receive(new BsonDocumentCodec(), operationContextWithAdditionalTimeout(operationContext)); + } else { + helloResult = connection.receive(new BsonDocumentCodec(), operationContext); + } + logAndNotifyHeartbeatSucceeded(shouldStreamResponses, helloResult); + return createServerDescription(serverId.getAddress(), helloResult, roundTripTimeSampler.getAverage(), + roundTripTimeSampler.getMin()); + } catch (Exception e) { + logAndNotifyHeartbeatFailed(shouldStreamResponses, e); + throw e; + } + } + + private void logAndNotifyHeartbeatStarted(final boolean shouldStreamResponses) { + alreadyLoggedHeartBeatStarted = true; + logHeartbeatStarted(serverId, connection.getDescription(), shouldStreamResponses); + serverMonitorListener.serverHearbeatStarted(new ServerHeartbeatStartedEvent( + connection.getDescription().getConnectionId(), shouldStreamResponses)); + } + + private void logAndNotifyHeartbeatSucceeded(final boolean shouldStreamResponses, final BsonDocument helloResult) { + alreadyLoggedHeartBeatStarted = false; + long elapsedTimeNanos = getElapsedTimeNanos(); + if (!shouldStreamResponses) { + roundTripTimeSampler.addSample(elapsedTimeNanos); + } + logHeartbeatSucceeded(serverId, connection.getDescription(), shouldStreamResponses, elapsedTimeNanos, helloResult); + serverMonitorListener.serverHeartbeatSucceeded( + new ServerHeartbeatSucceededEvent(connection.getDescription().getConnectionId(), helloResult, + elapsedTimeNanos, shouldStreamResponses)); + } + + private void logAndNotifyHeartbeatFailed(final boolean shouldStreamResponses, final Exception e) { + alreadyLoggedHeartBeatStarted = false; + long elapsedTimeNanos = getElapsedTimeNanos(); + logHeartbeatFailed(serverId, connection.getDescription(), shouldStreamResponses, elapsedTimeNanos, e); + serverMonitorListener.serverHeartbeatFailed( + new ServerHeartbeatFailedEvent(connection.getDescription().getConnectionId(), elapsedTimeNanos, + shouldStreamResponses, e)); + } + + private long getElapsedTimeNanos() { + return System.nanoTime() - lookupStartTimeNanos; + } + + private OperationContext operationContextWithAdditionalTimeout(final OperationContext originalOperationContext) { + TimeoutContext newTimeoutContext = originalOperationContext.getTimeoutContext() + .withAdditionalReadTimeout(Math.toIntExact(serverSettings.getHeartbeatFrequency(MILLISECONDS))); + return originalOperationContext.withTimeoutContext(newTimeoutContext); + } + + private boolean shouldStreamResponses(final ServerDescription currentServerDescription) { + boolean serverSupportsStreaming = currentServerDescription.getTopologyVersion() != null; + switch (serverSettings.getServerMonitoringMode()) { + case STREAM: { + return serverSupportsStreaming; + } + case POLL: { + return false; + } + case AUTO: { + return !isFunctionAsAServiceEnvironment && serverSupportsStreaming; + } + default: { + throw fail(); + } + } + } + + private CommandMessage createCommandMessage(final BsonDocument command, final InternalConnection connection, + final ServerDescription currentServerDescription) { + return new CommandMessage("admin", command, + NoOpFieldNameValidator.INSTANCE, primary(), + MessageSettings.builder() + .maxWireVersion(connection.getDescription().getMaxWireVersion()) + .build(), + shouldStreamResponses(currentServerDescription), clusterConnectionMode, serverApi); + } + + private void logStateChange(final ServerDescription previousServerDescription, + final ServerDescription currentServerDescription) { + if (shouldLogStageChange(previousServerDescription, currentServerDescription)) { + if (currentServerDescription.getException() != null) { + LOGGER.info(format("Exception in monitor thread while connecting to server %s", serverId.getAddress()), + assertNotNull(currentServerDescription.getException())); + } else { + LOGGER.info(format("Monitor thread successfully connected to server with description %s", currentServerDescription)); + } + } + } + + private void waitForNext() throws InterruptedException { + long timeRemaining = waitForSignalOrTimeout(); + if (timeRemaining > 0) { + long timeWaiting = serverSettings.getHeartbeatFrequency(NANOSECONDS) - timeRemaining; + long minimumNanosToWait = serverSettings.getMinHeartbeatFrequency(NANOSECONDS); + if (timeWaiting < minimumNanosToWait) { + long millisToSleep = MILLISECONDS.convert(minimumNanosToWait - timeWaiting, NANOSECONDS); + if (millisToSleep > 0) { + Thread.sleep(millisToSleep); + } + } + } + } + + private long waitForSignalOrTimeout() throws InterruptedException { + return checkedWithLock(lock, () -> condition.awaitNanos(serverSettings.getHeartbeatFrequency(NANOSECONDS))); + } + + public void cancelCurrentCheck() { + InternalConnection localConnection = withLock(lock, () -> { + if (connection != null && !currentCheckCancelled) { + InternalConnection result = connection; + currentCheckCancelled = true; + return result; + } + return null; + }); + if (localConnection != null) { + localConnection.close(); + } + } + } + + static boolean shouldLogStageChange(final ServerDescription previous, final ServerDescription current) { + + if (previous.isOk() != current.isOk()) { + return true; + } + if (!previous.getAddress().equals(current.getAddress())) { + return true; + } + String previousCanonicalAddress = previous.getCanonicalAddress(); + if (previousCanonicalAddress != null + ? !previousCanonicalAddress.equals(current.getCanonicalAddress()) : current.getCanonicalAddress() != null) { + return true; + } + if (!previous.getHosts().equals(current.getHosts())) { + return true; + } + if (!previous.getArbiters().equals(current.getArbiters())) { + return true; + } + if (!previous.getPassives().equals(current.getPassives())) { + return true; + } + String previousPrimary = previous.getPrimary(); + if (previousPrimary != null ? !previousPrimary.equals(current.getPrimary()) : current.getPrimary() != null) { + return true; + } + String previousSetName = previous.getSetName(); + if (previousSetName != null ? !previousSetName.equals(current.getSetName()) : current.getSetName() != null) { + return true; + } + if (previous.getState() != current.getState()) { + return true; + } + if (!previous.getTagSet().equals(current.getTagSet())) { + return true; + } + if (previous.getType() != current.getType()) { + return true; + } + if (previous.getMaxWireVersion() != current.getMaxWireVersion()) { + return true; + } + ObjectId previousElectionId = previous.getElectionId(); + if (previousElectionId != null + ? !previousElectionId.equals(current.getElectionId()) : current.getElectionId() != null) { + return true; + } + Integer setVersion = previous.getSetVersion(); + if (setVersion != null + ? !setVersion.equals(current.getSetVersion()) : current.getSetVersion() != null) { + return true; + } + + // Compare class equality and message as exceptions rarely override equals + Throwable previousException = previous.getException(); + Throwable currentException = current.getException(); + Class thisExceptionClass = previousException != null ? previousException.getClass() : null; + Class thatExceptionClass = currentException != null ? currentException.getClass() : null; + if (!Objects.equals(thisExceptionClass, thatExceptionClass)) { + return true; + } + + String thisExceptionMessage = previousException != null ? previousException.getMessage() : null; + String thatExceptionMessage = currentException != null ? currentException.getMessage() : null; + if (!Objects.equals(thisExceptionMessage, thatExceptionMessage)) { + return true; + } + + return false; + } + + + private class RoundTripTimeMonitor extends Thread implements AutoCloseable { + private volatile InternalConnection connection = null; + + RoundTripTimeMonitor() { + super("cluster-rtt-" + serverId.getClusterId() + "-" + serverId.getAddress()); + setDaemon(true); + } + + @Override + public void close() { + interrupt(); + InternalConnection connection = this.connection; + if (connection != null) { + connection.close(); + } + } + + @Override + public void run() { + try { + while (!isClosed) { + try { + if (connection == null) { + initialize(); + } else { + pingServer(connection); + } + } catch (Exception t) { + if (connection != null) { + connection.close(); + connection = null; + } + } + waitForNext(); + } + } catch (InterruptedException closed) { + // stop the monitor + } catch (Throwable t) { + LOGGER.error(format("%s for %s stopped working. You may want to recreate the MongoClient", this, serverId), t); + throw t; + } finally { + if (connection != null) { + connection.close(); + } + } + } + + private void initialize() { + connection = null; + connection = internalConnectionFactory.create(serverId); + connection.open(operationContextFactory.create()); + roundTripTimeSampler.addSample(connection.getInitialServerDescription().getRoundTripTimeNanos()); + } + + private void pingServer(final InternalConnection connection) { + long start = System.nanoTime(); + OperationContext operationContext = operationContextFactory.create(); + executeCommand("admin", + new BsonDocument(getHandshakeCommandName(connection.getInitialServerDescription()), new BsonInt32(1)), + clusterConnectionMode, serverApi, connection, operationContext); + long elapsedTimeNanos = System.nanoTime() - start; + roundTripTimeSampler.addSample(elapsedTimeNanos); + } + } + + private void waitForNext() throws InterruptedException { + Thread.sleep(serverSettings.getHeartbeatFrequency(MILLISECONDS)); + } + + private String getHandshakeCommandName(final ServerDescription serverDescription) { + return serverDescription.isHelloOk() ? HELLO : LEGACY_HELLO; + } + + private static void logHeartbeatStarted( + final ServerId serverId, + final ConnectionDescription connectionDescription, + final boolean awaited) { + if (STRUCTURED_LOGGER.isRequired(DEBUG, serverId.getClusterId())) { + STRUCTURED_LOGGER.log(new LogMessage( + TOPOLOGY, DEBUG, "Server heartbeat started", serverId.getClusterId(), + asList( + new LogMessage.Entry(SERVER_HOST, serverId.getAddress().getHost()), + new LogMessage.Entry(SERVER_PORT, serverId.getAddress().getPort()), + new LogMessage.Entry(DRIVER_CONNECTION_ID, connectionDescription.getConnectionId().getLocalValue()), + new LogMessage.Entry(SERVER_CONNECTION_ID, connectionDescription.getConnectionId().getServerValue()), + new LogMessage.Entry(TOPOLOGY_ID, serverId.getClusterId()), + new LogMessage.Entry(AWAITED, awaited)), + "Heartbeat started for {}:{} on connection with driver-generated ID {} and server-generated ID {} " + + "in topology with ID {}. Awaited: {}")); + } + } + + private static void logHeartbeatSucceeded( + final ServerId serverId, + final ConnectionDescription connectionDescription, + final boolean awaited, + final long elapsedTimeNanos, + final BsonDocument reply) { + if (STRUCTURED_LOGGER.isRequired(DEBUG, serverId.getClusterId())) { + STRUCTURED_LOGGER.log(new LogMessage( + TOPOLOGY, DEBUG, "Server heartbeat succeeded", serverId.getClusterId(), + asList( + new LogMessage.Entry(DURATION_MS, MILLISECONDS.convert(elapsedTimeNanos, NANOSECONDS)), + new LogMessage.Entry(SERVER_HOST, serverId.getAddress().getHost()), + new LogMessage.Entry(SERVER_PORT, serverId.getAddress().getPort()), + new LogMessage.Entry(DRIVER_CONNECTION_ID, connectionDescription.getConnectionId().getLocalValue()), + new LogMessage.Entry(SERVER_CONNECTION_ID, connectionDescription.getConnectionId().getServerValue()), + new LogMessage.Entry(TOPOLOGY_ID, serverId.getClusterId()), + new LogMessage.Entry(AWAITED, awaited), + new LogMessage.Entry(REPLY, reply.toJson())), + "Heartbeat succeeded in {} ms for {}:{} on connection with driver-generated ID {} and server-generated ID {} " + + "in topology with ID {}. Awaited: {}. Reply: {}")); + } + } + + private static void logHeartbeatFailed( + final ServerId serverId, + final ConnectionDescription connectionDescription, + final boolean awaited, + final long elapsedTimeNanos, + final Exception failure) { + if (STRUCTURED_LOGGER.isRequired(DEBUG, serverId.getClusterId())) { + STRUCTURED_LOGGER.log(new LogMessage( + TOPOLOGY, DEBUG, "Server heartbeat failed", serverId.getClusterId(), + asList( + new LogMessage.Entry(DURATION_MS, MILLISECONDS.convert(elapsedTimeNanos, NANOSECONDS)), + new LogMessage.Entry(SERVER_HOST, serverId.getAddress().getHost()), + new LogMessage.Entry(SERVER_PORT, serverId.getAddress().getPort()), + new LogMessage.Entry(DRIVER_CONNECTION_ID, connectionDescription.getConnectionId().getLocalValue()), + new LogMessage.Entry(SERVER_CONNECTION_ID, connectionDescription.getConnectionId().getServerValue()), + new LogMessage.Entry(TOPOLOGY_ID, serverId.getClusterId()), + new LogMessage.Entry(AWAITED, awaited), + new LogMessage.Entry(FAILURE, failure.getMessage())), + "Heartbeat failed in {} ms for {}:{} on connection with driver-generated ID {} and server-generated ID {} " + + "in topology with ID {}. Awaited: {}. Failure: {}")); + } + } + + + private static void logStartedServerMonitoring(final ServerId serverId) { + if (STRUCTURED_LOGGER.isRequired(DEBUG, serverId.getClusterId())) { + STRUCTURED_LOGGER.log(new LogMessage( + TOPOLOGY, DEBUG, "Starting server monitoring", serverId.getClusterId(), + asList( + new LogMessage.Entry(SERVER_HOST, serverId.getAddress().getHost()), + new LogMessage.Entry(SERVER_PORT, serverId.getAddress().getPort()), + new LogMessage.Entry(TOPOLOGY_ID, serverId.getClusterId())), + "Starting monitoring for server {}:{} in topology with ID {}")); + } + } + + private static void logStoppedServerMonitoring(final ServerId serverId) { + if (STRUCTURED_LOGGER.isRequired(DEBUG, serverId.getClusterId())) { + STRUCTURED_LOGGER.log(new LogMessage( + TOPOLOGY, DEBUG, "Stopped server monitoring", serverId.getClusterId(), + asList( + new LogMessage.Entry(SERVER_HOST, serverId.getAddress().getHost()), + new LogMessage.Entry(SERVER_PORT, serverId.getAddress().getPort()), + new LogMessage.Entry(TOPOLOGY_ID, serverId.getClusterId())), + "Stopped monitoring for server {}:{} in topology with ID {}")); + } + } +} diff --git a/driver-core/src/main/com/mongodb/internal/connection/DescriptionHelper.java b/driver-core/src/main/com/mongodb/internal/connection/DescriptionHelper.java new file mode 100644 index 00000000000..26f73bcee9c --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/connection/DescriptionHelper.java @@ -0,0 +1,258 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection; + +import com.mongodb.MongoClientException; +import com.mongodb.ServerAddress; +import com.mongodb.Tag; +import com.mongodb.TagSet; +import com.mongodb.connection.ClusterConnectionMode; +import com.mongodb.connection.ConnectionDescription; +import com.mongodb.connection.ConnectionId; +import com.mongodb.connection.ServerDescription; +import com.mongodb.connection.ServerType; +import com.mongodb.connection.TopologyVersion; +import com.mongodb.lang.Nullable; +import org.bson.BsonArray; +import org.bson.BsonBoolean; +import org.bson.BsonDocument; +import org.bson.BsonInt32; +import org.bson.BsonString; +import org.bson.BsonValue; +import org.bson.types.ObjectId; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.Date; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; + +import static com.mongodb.connection.ConnectionDescription.getDefaultMaxMessageSize; +import static com.mongodb.connection.ConnectionDescription.getDefaultMaxWriteBatchSize; +import static com.mongodb.connection.ServerConnectionState.CONNECTED; +import static com.mongodb.connection.ServerDescription.getDefaultMaxDocumentSize; +import static com.mongodb.connection.ServerDescription.getDefaultMaxWireVersion; +import static com.mongodb.connection.ServerDescription.getDefaultMinWireVersion; +import static com.mongodb.connection.ServerType.REPLICA_SET_ARBITER; +import static com.mongodb.connection.ServerType.REPLICA_SET_OTHER; +import static com.mongodb.connection.ServerType.REPLICA_SET_PRIMARY; +import static com.mongodb.connection.ServerType.REPLICA_SET_SECONDARY; +import static com.mongodb.connection.ServerType.SHARD_ROUTER; +import static com.mongodb.connection.ServerType.STANDALONE; +import static com.mongodb.connection.ServerType.UNKNOWN; +import static com.mongodb.internal.connection.CommandHelper.LEGACY_HELLO_LOWER; +import static java.util.concurrent.TimeUnit.NANOSECONDS; + +/** + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public final class DescriptionHelper { + static ConnectionDescription createConnectionDescription(final ClusterConnectionMode clusterConnectionMode, + final ConnectionId connectionId, final BsonDocument helloResult) { + ConnectionDescription connectionDescription = new ConnectionDescription(connectionId, + getMaxWireVersion(helloResult), getServerType(helloResult), getMaxWriteBatchSize(helloResult), + getMaxBsonObjectSize(helloResult), getMaxMessageSizeBytes(helloResult), getCompressors(helloResult), + helloResult.getArray("saslSupportedMechs", null), getLogicalSessionTimeoutMinutes(helloResult)); + if (helloResult.containsKey("connectionId")) { + ConnectionId newConnectionId = + connectionDescription.getConnectionId().withServerValue(helloResult.getNumber("connectionId").longValue()); + connectionDescription = connectionDescription.withConnectionId(newConnectionId); + } + if (clusterConnectionMode == ClusterConnectionMode.LOAD_BALANCED) { + ObjectId serviceId = getServiceId(helloResult); + if (serviceId != null) { + connectionDescription = connectionDescription.withServiceId(serviceId); + } else { + throw new MongoClientException("Driver attempted to initialize in load balancing mode, but the server does not support " + + "this mode"); + } + } + return connectionDescription; + } + + public static ServerDescription createServerDescription(final ServerAddress serverAddress, final BsonDocument helloResult, + final long roundTripTime, final long minRoundTripTime) { + return ServerDescription.builder() + .state(CONNECTED) + .address(serverAddress) + .type(getServerType(helloResult)) + .cryptd(helloResult.getBoolean("iscryptd", BsonBoolean.FALSE).getValue()) + .canonicalAddress(helloResult.containsKey("me") ? helloResult.getString("me").getValue() : null) + .hosts(listToSet(helloResult.getArray("hosts", new BsonArray()))) + .passives(listToSet(helloResult.getArray("passives", new BsonArray()))) + .arbiters(listToSet(helloResult.getArray("arbiters", new BsonArray()))) + .primary(getString(helloResult, "primary")) + .maxDocumentSize(getMaxBsonObjectSize(helloResult)) + .tagSet(getTagSetFromDocument(helloResult.getDocument("tags", new BsonDocument()))) + .setName(getString(helloResult, "setName")) + .minWireVersion(getMinWireVersion(helloResult)) + .maxWireVersion(getMaxWireVersion(helloResult)) + .electionId(getElectionId(helloResult)) + .setVersion(getSetVersion(helloResult)) + .topologyVersion(getTopologyVersion(helloResult)) + .lastWriteDate(getLastWriteDate(helloResult)) + .roundTripTime(roundTripTime, NANOSECONDS) + .minRoundTripTime(minRoundTripTime, NANOSECONDS) + .logicalSessionTimeoutMinutes(getLogicalSessionTimeoutMinutes(helloResult)) + .helloOk(helloResult.getBoolean("helloOk", BsonBoolean.FALSE).getValue()) + .ok(CommandHelper.isCommandOk(helloResult)).build(); + } + + private static int getMinWireVersion(final BsonDocument helloResult) { + return helloResult.getInt32("minWireVersion", new BsonInt32(getDefaultMinWireVersion())).getValue(); + } + + private static int getMaxWireVersion(final BsonDocument helloResult) { + return helloResult.getInt32("maxWireVersion", new BsonInt32(getDefaultMaxWireVersion())).getValue(); + } + + @Nullable + private static Date getLastWriteDate(final BsonDocument helloResult) { + if (!helloResult.containsKey("lastWrite")) { + return null; + } + return new Date(helloResult.getDocument("lastWrite").getDateTime("lastWriteDate").getValue()); + } + + @Nullable + private static ObjectId getElectionId(final BsonDocument helloResult) { + return helloResult.containsKey("electionId") ? helloResult.getObjectId("electionId").getValue() : null; + } + + @Nullable + private static Integer getSetVersion(final BsonDocument helloResult) { + return helloResult.containsKey("setVersion") ? helloResult.getNumber("setVersion").intValue() : null; + } + + @Nullable + private static TopologyVersion getTopologyVersion(final BsonDocument helloResult) { + return helloResult.containsKey("topologyVersion") && helloResult.get("topologyVersion").isDocument() + ? new TopologyVersion(helloResult.getDocument("topologyVersion")) : null; + } + + @Nullable + private static ObjectId getServiceId(final BsonDocument helloResult) { + return helloResult.containsKey("serviceId") && helloResult.get("serviceId").isObjectId() + ? helloResult.getObjectId("serviceId").getValue() : null; + } + + private static int getMaxMessageSizeBytes(final BsonDocument helloResult) { + return helloResult.getInt32("maxMessageSizeBytes", new BsonInt32(getDefaultMaxMessageSize())).getValue(); + } + + private static int getMaxBsonObjectSize(final BsonDocument helloResult) { + return helloResult.getInt32("maxBsonObjectSize", new BsonInt32(getDefaultMaxDocumentSize())).getValue(); + } + + private static int getMaxWriteBatchSize(final BsonDocument helloResult) { + return helloResult.getInt32("maxWriteBatchSize", new BsonInt32(getDefaultMaxWriteBatchSize())).getValue(); + } + + @Nullable + private static Integer getLogicalSessionTimeoutMinutes(final BsonDocument helloResult) { + return helloResult.isNumber("logicalSessionTimeoutMinutes") + ? helloResult.getNumber("logicalSessionTimeoutMinutes").intValue() : null; + } + + @Nullable + private static String getString(final BsonDocument response, final String key) { + if (response.containsKey(key)) { + return response.getString(key).getValue(); + } else { + return null; + } + } + + private static Set listToSet(@Nullable final BsonArray array) { + if (array == null || array.isEmpty()) { + return Collections.emptySet(); + } else { + Set set = new HashSet<>(); + for (BsonValue value : array) { + set.add(value.asString().getValue()); + } + return set; + } + } + + private static ServerType getServerType(final BsonDocument helloResult) { + + if (!CommandHelper.isCommandOk(helloResult)) { + return UNKNOWN; + } + + if (isReplicaSetMember(helloResult)) { + + if (helloResult.getBoolean("hidden", BsonBoolean.FALSE).getValue()) { + return REPLICA_SET_OTHER; + } + + if (helloResult.getBoolean("isWritablePrimary", BsonBoolean.FALSE).getValue()) { + return REPLICA_SET_PRIMARY; + } + + if (helloResult.getBoolean(LEGACY_HELLO_LOWER, BsonBoolean.FALSE).getValue()) { + return REPLICA_SET_PRIMARY; + } + + if (helloResult.getBoolean("secondary", BsonBoolean.FALSE).getValue()) { + return REPLICA_SET_SECONDARY; + } + + if (helloResult.getBoolean("arbiterOnly", BsonBoolean.FALSE).getValue()) { + return REPLICA_SET_ARBITER; + } + + if (helloResult.containsKey("setName") && helloResult.containsKey("hosts")) { + return REPLICA_SET_OTHER; + } + + return ServerType.REPLICA_SET_GHOST; + } + + if (helloResult.containsKey("msg") && helloResult.get("msg").equals(new BsonString("isdbgrid"))) { + return SHARD_ROUTER; + } + + return STANDALONE; + } + + private static boolean isReplicaSetMember(final BsonDocument helloResult) { + return helloResult.containsKey("setName") || helloResult.getBoolean("isreplicaset", BsonBoolean.FALSE).getValue(); + } + + private static TagSet getTagSetFromDocument(final BsonDocument tagsDocuments) { + List tagList = new ArrayList<>(); + for (final Map.Entry curEntry : tagsDocuments.entrySet()) { + tagList.add(new Tag(curEntry.getKey(), curEntry.getValue().asString().getValue())); + } + return new TagSet(tagList); + } + + private static List getCompressors(final BsonDocument helloResult) { + List compressorList = new ArrayList<>(); + for (BsonValue compressor : helloResult.getArray("compression", new BsonArray())) { + compressorList.add(compressor.asString().getValue()); + } + return compressorList; + } + + private DescriptionHelper() { + } +} diff --git a/driver-core/src/main/com/mongodb/internal/connection/DnsMultiServerCluster.java b/driver-core/src/main/com/mongodb/internal/connection/DnsMultiServerCluster.java new file mode 100644 index 00000000000..e165146dd29 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/connection/DnsMultiServerCluster.java @@ -0,0 +1,121 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection; + +import com.mongodb.MongoException; +import com.mongodb.ServerAddress; +import com.mongodb.connection.ClusterId; +import com.mongodb.connection.ClusterSettings; +import com.mongodb.connection.ClusterType; +import com.mongodb.connection.ServerDescription; +import com.mongodb.lang.Nullable; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.concurrent.ThreadLocalRandom; +import java.util.stream.Collectors; + +import static com.mongodb.assertions.Assertions.assertNotNull; + +/** + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public final class DnsMultiServerCluster extends AbstractMultiServerCluster { + private final DnsSrvRecordMonitor dnsSrvRecordMonitor; + private volatile MongoException srvResolutionException; + + public DnsMultiServerCluster(final ClusterId clusterId, final ClusterSettings settings, + final ClusterableServerFactory serverFactory, + final ClientMetadata clientMetadata, + final DnsSrvRecordMonitorFactory dnsSrvRecordMonitorFactory) { + super(clusterId, settings, serverFactory, clientMetadata); + dnsSrvRecordMonitor = dnsSrvRecordMonitorFactory.create(assertNotNull(settings.getSrvHost()), settings.getSrvServiceName(), + new DnsSrvRecordInitializer() { + private volatile boolean initialized; + + @Override + public void initialize(final Collection hosts) { + srvResolutionException = null; + if (!initialized) { + initialized = true; + DnsMultiServerCluster.this.initialize(applySrvMaxHosts(hosts)); + } else { + DnsMultiServerCluster.this.onChange(applySrvMaxHosts(hosts)); + } + } + + private Collection applySrvMaxHosts(final Collection latestSrvHosts) { + Integer srvMaxHosts = getSettings().getSrvMaxHosts(); + if (srvMaxHosts == null || srvMaxHosts <= 0 || latestSrvHosts.size() <= srvMaxHosts) { + return new ArrayList<>(latestSrvHosts); + } + List activeHosts = getActivePriorHosts(latestSrvHosts); + int numNewHostsToAdd = srvMaxHosts - activeHosts.size(); + activeHosts.addAll(addShuffledHosts(latestSrvHosts, activeHosts, numNewHostsToAdd)); + + return activeHosts; + } + + private List getActivePriorHosts(final Collection latestSrvHosts) { + List priorHosts = DnsMultiServerCluster.this.getCurrentDescription().getServerDescriptions().stream() + .map(ServerDescription::getAddress).collect(Collectors.toList()); + priorHosts.removeIf(host -> !latestSrvHosts.contains(host)); + + return priorHosts; + } + + private List addShuffledHosts(final Collection latestSrvHosts, + final List activePriorHosts, final int numNewHostsToAdd) { + List addedHosts = new ArrayList<>(latestSrvHosts); + addedHosts.removeAll(activePriorHosts); + Collections.shuffle(addedHosts, ThreadLocalRandom.current()); + + return addedHosts.subList(0, numNewHostsToAdd); + } + + @Override + public void initialize(final MongoException initializationException) { + if (!initialized) { + srvResolutionException = initializationException; + DnsMultiServerCluster.this.initialize(Collections.emptyList()); + } + } + + @Override + public ClusterType getClusterType() { + return DnsMultiServerCluster.this.getClusterType(); + } + }); + dnsSrvRecordMonitor.start(); + } + + @Nullable + @Override + protected MongoException getSrvResolutionException() { + return srvResolutionException; + } + + @Override + public void close() { + if (dnsSrvRecordMonitor != null) { + dnsSrvRecordMonitor.close(); + } + super.close(); + } +} diff --git a/driver-core/src/main/com/mongodb/internal/connection/DnsSrvRecordInitializer.java b/driver-core/src/main/com/mongodb/internal/connection/DnsSrvRecordInitializer.java new file mode 100644 index 00000000000..deadf53aba0 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/connection/DnsSrvRecordInitializer.java @@ -0,0 +1,31 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection; + +import com.mongodb.MongoException; +import com.mongodb.ServerAddress; +import com.mongodb.connection.ClusterType; + +import java.util.Collection; + +interface DnsSrvRecordInitializer { + void initialize(Collection hosts); + + void initialize(MongoException initializationException); + + ClusterType getClusterType(); +} diff --git a/driver-core/src/main/com/mongodb/internal/connection/DnsSrvRecordMonitor.java b/driver-core/src/main/com/mongodb/internal/connection/DnsSrvRecordMonitor.java new file mode 100644 index 00000000000..a552d06ad5c --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/connection/DnsSrvRecordMonitor.java @@ -0,0 +1,26 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection; + +/** + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public interface DnsSrvRecordMonitor { + void start(); + + void close(); +} diff --git a/driver-core/src/main/com/mongodb/internal/connection/DnsSrvRecordMonitorFactory.java b/driver-core/src/main/com/mongodb/internal/connection/DnsSrvRecordMonitorFactory.java new file mode 100644 index 00000000000..202b490c1e5 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/connection/DnsSrvRecordMonitorFactory.java @@ -0,0 +1,24 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection; + +/** + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public interface DnsSrvRecordMonitorFactory { + DnsSrvRecordMonitor create(String hostName, String srvServiceName, DnsSrvRecordInitializer dnsSrvRecordInitializer); +} diff --git a/driver-core/src/main/com/mongodb/internal/connection/DomainNameUtils.java b/driver-core/src/main/com/mongodb/internal/connection/DomainNameUtils.java new file mode 100644 index 00000000000..b64ea64ae24 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/connection/DomainNameUtils.java @@ -0,0 +1,30 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.internal.connection; + +import java.util.regex.Pattern; + +/** + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public class DomainNameUtils { + private static final Pattern DOMAIN_PATTERN = + Pattern.compile("^(?=.{1,255}$)((([a-zA-Z0-9]([a-zA-Z0-9\\-]{0,61}[a-zA-Z0-9])?\\.)+[a-zA-Z]{2,63}|localhost))$"); + + static boolean isDomainName(final String domainName) { + return DOMAIN_PATTERN.matcher(domainName).matches(); + } +} diff --git a/driver-core/src/main/com/mongodb/internal/connection/DualMessageSequences.java b/driver-core/src/main/com/mongodb/internal/connection/DualMessageSequences.java new file mode 100644 index 00000000000..0c5a3430c22 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/connection/DualMessageSequences.java @@ -0,0 +1,122 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection; + +import org.bson.BsonBinaryWriter; +import org.bson.BsonElement; +import org.bson.FieldNameValidator; + +import java.util.List; + +/** + * Two sequences that may either be coupled or independent. + *

+ * This class is not part of the public API and may be removed or changed at any time.

+ */ +public abstract class DualMessageSequences extends MessageSequences { + + private final String firstSequenceId; + private final FieldNameValidator firstFieldNameValidator; + private final String secondSequenceId; + private final FieldNameValidator secondFieldNameValidator; + + protected DualMessageSequences( + final String firstSequenceId, + final FieldNameValidator firstFieldNameValidator, + final String secondSequenceId, + final FieldNameValidator secondFieldNameValidator) { + this.firstSequenceId = firstSequenceId; + this.firstFieldNameValidator = firstFieldNameValidator; + this.secondSequenceId = secondSequenceId; + this.secondFieldNameValidator = secondFieldNameValidator; + } + + FieldNameValidator getFirstFieldNameValidator() { + return firstFieldNameValidator; + } + + FieldNameValidator getSecondFieldNameValidator() { + return secondFieldNameValidator; + } + + String getFirstSequenceId() { + return firstSequenceId; + } + + String getSecondSequenceId() { + return secondSequenceId; + } + + protected abstract EncodeDocumentsResult encodeDocuments(WritersProviderAndLimitsChecker writersProviderAndLimitsChecker); + + /** + * @see #tryWrite(WriteAction) + */ + public interface WritersProviderAndLimitsChecker { + /** + * Provides writers to the specified {@link WriteAction}, + * {@linkplain WriteAction#doAndGetBatchCount(BsonBinaryWriter, BsonBinaryWriter) executes} it, + * checks the {@linkplain MessageSettings limits}. + *

+ * May be called multiple times per {@link #encodeDocuments(WritersProviderAndLimitsChecker)}.

+ */ + WriteResult tryWrite(WriteAction write); + + /** + * @see #doAndGetBatchCount(BsonBinaryWriter, BsonBinaryWriter) + */ + interface WriteAction { + /** + * Writes documents to the sequences using the provided writers. + * + * @return The resulting batch count since the beginning of {@link #encodeDocuments(WritersProviderAndLimitsChecker)}. + * It is generally allowed to be greater than {@link MessageSettings#getMaxBatchCount()}. + */ + int doAndGetBatchCount(BsonBinaryWriter firstWriter, BsonBinaryWriter secondWriter); + } + + enum WriteResult { + FAIL_LIMIT_EXCEEDED, + OK_LIMIT_REACHED, + OK_LIMIT_NOT_REACHED + } + } + + public static final class EncodeDocumentsResult { + private final boolean serverResponseRequired; + private final List extraElements; + + /** + * @param extraElements See {@link #getExtraElements()}. + */ + public EncodeDocumentsResult(final boolean serverResponseRequired, final List extraElements) { + this.serverResponseRequired = serverResponseRequired; + this.extraElements = extraElements; + } + + boolean isServerResponseRequired() { + return serverResponseRequired; + } + + /** + * {@linkplain BsonElement Key/value pairs} to be added to the document contained in the {@code OP_MSG} section with payload type 0. + */ + List getExtraElements() { + return extraElements; + } + } +} diff --git a/driver-core/src/main/com/mongodb/internal/connection/EventHelper.java b/driver-core/src/main/com/mongodb/internal/connection/EventHelper.java new file mode 100644 index 00000000000..9faf67f2c10 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/connection/EventHelper.java @@ -0,0 +1,138 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection; + +import com.mongodb.connection.ClusterDescription; +import com.mongodb.connection.ServerDescription; +import com.mongodb.lang.Nullable; + +import java.util.Objects; + +final class EventHelper { + + /** + * Determine whether the two cluster descriptions are effectively equivalent for the purpose of cluster event + * generation, according to the equality rules enumerated in the Server Discovery and Monitoring specification. + */ + static boolean wouldDescriptionsGenerateEquivalentEvents(final ClusterDescription current, + final ClusterDescription previous) { + if (!exceptionsEquals(current.getSrvResolutionException(), previous.getSrvResolutionException())) { + return false; + } + if (current.getServerDescriptions().size() != previous.getServerDescriptions().size()) { + return false; + } + for (ServerDescription curNew: current.getServerDescriptions()) { + ServerDescription matchingPrev = null; + for (ServerDescription curPrev: previous.getServerDescriptions()) { + if (curNew.getAddress().equals(curPrev.getAddress())) { + matchingPrev = curPrev; + break; + } + } + if (!wouldDescriptionsGenerateEquivalentEvents(curNew, matchingPrev)) { + return false; + } + } + return true; + } + + /** + * Determine whether the two server descriptions are effectively equivalent for the purpose of server event + * generation, according to the equality rules enumerated in the Server Discovery and Monitoring specification. + */ + static boolean wouldDescriptionsGenerateEquivalentEvents(@Nullable final ServerDescription current, + @Nullable final ServerDescription previous) { + if (current == previous) { + return true; + } + if (previous == null || current == null) { + return false; + } + if (current.isOk() != previous.isOk()) { + return false; + } + if (current.getState() != previous.getState()) { + return false; + } + if (current.getType() != previous.getType()) { + return false; + } + if (current.getMinWireVersion() != previous.getMinWireVersion()) { + return false; + } + if (current.getMaxWireVersion() != previous.getMaxWireVersion()) { + return false; + } + if (!Objects.equals(current.getCanonicalAddress(), previous.getCanonicalAddress())) { + return false; + } + if (!current.getHosts().equals(previous.getHosts())) { + return false; + } + if (!current.getPassives().equals(previous.getPassives())) { + return false; + } + if (!current.getArbiters().equals(previous.getArbiters())) { + return false; + } + if (!current.getTagSet().equals(previous.getTagSet())) { + return false; + } + if (!Objects.equals(current.getSetName(), previous.getSetName())) { + return false; + } + if (!Objects.equals(current.getSetVersion(), previous.getSetVersion())) { + return false; + } + if (!Objects.equals(current.getElectionId(), previous.getElectionId())) { + return false; + } + if (!Objects.equals(current.getPrimary(), previous.getPrimary())) { + return false; + } + if (!Objects.equals(current.getLogicalSessionTimeoutMinutes(), previous.getLogicalSessionTimeoutMinutes())) { + return false; + } + if (!Objects.equals(current.getTopologyVersion(), previous.getTopologyVersion())) { + return false; + } + if (!exceptionsEquals(current.getException(), previous.getException())) { + return false; + } + return true; + } + + @SuppressWarnings("BooleanMethodIsAlwaysInverted") + private static boolean exceptionsEquals(@Nullable final Throwable current, @Nullable final Throwable previous) { + if (current == null || previous == null) { + return current == previous; + } + // Compare class equality and message as exceptions rarely override equals + if (!Objects.equals(current.getClass(), previous.getClass())) { + return false; + } + + if (!Objects.equals(current.getMessage(), previous.getMessage())) { + return false; + } + return true; + } + + + private EventHelper() {} +} diff --git a/driver-core/src/main/com/mongodb/internal/connection/ExponentiallyWeightedMovingAverage.java b/driver-core/src/main/com/mongodb/internal/connection/ExponentiallyWeightedMovingAverage.java new file mode 100644 index 00000000000..b4e0f4c0283 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/connection/ExponentiallyWeightedMovingAverage.java @@ -0,0 +1,52 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection; + +import java.util.concurrent.atomic.AtomicLong; + +import static com.mongodb.assertions.Assertions.isTrueArgument; + +class ExponentiallyWeightedMovingAverage { + private static final long EMPTY = -1; + + private final double alpha; + private final AtomicLong average; + + ExponentiallyWeightedMovingAverage(final double alpha) { + isTrueArgument("alpha >= 0.0 and <= 1.0", alpha >= 0.0 && alpha <= 1.0); + this.alpha = alpha; + average = new AtomicLong(EMPTY); + } + + void reset() { + average.set(EMPTY); + } + + long addSample(final long sample) { + return average.accumulateAndGet(sample, (average, givenSample) -> { + if (average == EMPTY) { + return givenSample; + } + return (long) (alpha * givenSample + (1 - alpha) * average); + }); + } + + long getAverage() { + long average = this.average.get(); + return average == EMPTY ? 0 : average; + } +} diff --git a/driver-core/src/main/com/mongodb/internal/connection/ExtendedAsynchronousByteChannel.java b/driver-core/src/main/com/mongodb/internal/connection/ExtendedAsynchronousByteChannel.java new file mode 100644 index 00000000000..ed5e55b822a --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/connection/ExtendedAsynchronousByteChannel.java @@ -0,0 +1,237 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Original Work: MIT License, Copyright (c) [2015-2018] all contributors + * https://github.com/marianobarrios/tls-channel + */ + +package com.mongodb.internal.connection; + +import com.mongodb.lang.Nullable; + +import java.nio.ByteBuffer; +import java.nio.channels.AsynchronousByteChannel; +import java.nio.channels.CompletionHandler; +import java.nio.channels.ReadPendingException; +import java.nio.channels.ShutdownChannelGroupException; +import java.nio.channels.WritePendingException; +import java.util.concurrent.TimeUnit; + +/** + * This interface extends {@link AsynchronousByteChannel} adding optional timeouts and scattering and gathering methods. + * These additions are analogous to the ones made by {@link java.nio.channels.AsynchronousSocketChannel}. + * + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public interface ExtendedAsynchronousByteChannel extends AsynchronousByteChannel { + + /** + * Reads a sequence of bytes from this channel into the given buffer. + * + *

This method initiates an asynchronous read operation to read a + * sequence of bytes from this channel into the given buffer. The {@code + * handler} parameter is a completion handler that is invoked when the read + * operation completes (or fails). The result passed to the completion + * handler is the number of bytes read or {@code -1} if no bytes could be + * read because the channel has reached end-of-stream. + * + *

If a timeout is specified and the timeout elapses before the operation + * completes then the operation completes with the exception {@link + * java.nio.channels.InterruptedByTimeoutException}. Where a timeout occurs, and the + * implementation cannot guarantee that bytes have not been read, or will not + * be read from the channel into the given buffer, then further attempts to + * read from the channel will cause an unspecific runtime exception to be + * thrown. + * + *

Otherwise this method works in the same manner as the {@link + * AsynchronousByteChannel#read(ByteBuffer, Object, CompletionHandler)} + * method. + * + * @param dst The buffer into which bytes are to be transferred + * @param timeout The maximum time for the I/O operation to complete + * @param unit The time unit of the {@code timeout} argument + * @param attach The object to attach to the I/O operation; can be {@code null} + * @param handler The handler for consuming the result + * @throws IllegalArgumentException If the buffer is read-only + * @throws ReadPendingException If a read operation is already in progress on this channel + * @throws ShutdownChannelGroupException If the channel group has terminated + */ + void read( + ByteBuffer dst, + long timeout, TimeUnit unit, + @Nullable A attach, CompletionHandler handler); + + /** + * Reads a sequence of bytes from this channel into a subsequence of the + * given buffers. This operation, sometimes called a scattering read, + * is often useful when implementing network protocols that group data into + * segments consisting of one or more fixed-length headers followed by a + * variable-length body. The {@code handler} parameter is a completion + * handler that is invoked when the read operation completes (or fails). The + * result passed to the completion handler is the number of bytes read or + * {@code -1} if no bytes could be read because the channel has reached + * end-of-stream. + * + *

This method initiates a read of up to r bytes from this channel, + * where r is the total number of bytes remaining in the specified + * subsequence of the given buffer array, that is, + * + *

+     * dsts[offset].remaining()
+     *     + dsts[offset+1].remaining()
+     *     + ... + dsts[offset+length-1].remaining()
+ *

+ * at the moment that the read is attempted. + * + *

Suppose that a byte sequence of length n is read, where + * 0 < n <= r. + * Up to the first dsts[offset].remaining() bytes of this sequence + * are transferred into buffer dsts[offset], up to the next + * dsts[offset+1].remaining() bytes are transferred into buffer + * dsts[offset+1], and so forth, until the entire byte sequence + * is transferred into the given buffers. As many bytes as possible are + * transferred into each buffer, hence the final position of each updated + * buffer, except the last updated buffer, is guaranteed to be equal to + * that buffer's limit. The underlying operating system may impose a limit + * on the number of buffers that may be used in an I/O operation. Where the + * number of buffers (with bytes remaining), exceeds this limit, then the + * I/O operation is performed with the maximum number of buffers allowed by + * the operating system. + * + *

If a timeout is specified and the timeout elapses before the operation + * completes then it completes with the exception {@link + * java.nio.channels.InterruptedByTimeoutException}. Where a timeout occurs, and the + * implementation cannot guarantee that bytes have not been read, or will not + * be read from the channel into the given buffers, then further attempts to + * read from the channel will cause an unspecific runtime exception to be + * thrown. + * + * @param dsts The buffers into which bytes are to be transferred + * @param offset The offset within the buffer array of the first buffer into which + * bytes are to be transferred; must be non-negative and no larger than + * {@code dsts.length} + * @param length The maximum number of buffers to be accessed; must be non-negative + * and no larger than {@code dsts.length - offset} + * @param timeout The maximum time for the I/O operation to complete + * @param unit The time unit of the {@code timeout} argument + * @param attach The object to attach to the I/O operation; can be {@code null} + * @param handler The handler for consuming the result + * @throws IndexOutOfBoundsException If the pre-conditions for the {@code offset} and {@code length} + * parameter aren't met + * @throws IllegalArgumentException If the buffer is read-only + * @throws ReadPendingException If a read operation is already in progress on this channel + * @throws ShutdownChannelGroupException If the channel group has terminated + */ + void read( + ByteBuffer[] dsts, int offset, int length, + long timeout, TimeUnit unit, + @Nullable A attach, CompletionHandler handler); + + /** + * Writes a sequence of bytes to this channel from the given buffer. + * + *

This method initiates an asynchronous write operation to write a + * sequence of bytes to this channel from the given buffer. The {@code + * handler} parameter is a completion handler that is invoked when the write + * operation completes (or fails). The result passed to the completion + * handler is the number of bytes written. + * + *

If a timeout is specified and the timeout elapses before the operation + * completes then it completes with the exception {@link + * java.nio.channels.InterruptedByTimeoutException}. Where a timeout occurs, and the + * implementation cannot guarantee that bytes have not been written, or will + * not be written to the channel from the given buffer, then further attempts + * to write to the channel will cause an unspecific runtime exception to be + * thrown. + * + *

Otherwise this method works in the same manner as the {@link + * AsynchronousByteChannel#write(ByteBuffer, Object, CompletionHandler)} + * method. + * + * @param src The buffer from which bytes are to be retrieved + * @param timeout The maximum time for the I/O operation to complete + * @param unit The time unit of the {@code timeout} argument + * @param attach The object to attach to the I/O operation; can be {@code null} + * @param handler The handler for consuming the result + * @throws WritePendingException If a write operation is already in progress on this channel + * @throws ShutdownChannelGroupException If the channel group has terminated + */ + void write( + ByteBuffer src, + long timeout, TimeUnit unit, + @Nullable A attach, CompletionHandler handler); + + /** + * Writes a sequence of bytes to this channel from a subsequence of the given + * buffers. This operation, sometimes called a gathering write, is + * often useful when implementing network protocols that group data into + * segments consisting of one or more fixed-length headers followed by a + * variable-length body. The {@code handler} parameter is a completion + * handler that is invoked when the write operation completes (or fails). + * The result passed to the completion handler is the number of bytes written. + * + *

This method initiates a write of up to r bytes to this channel, + * where r is the total number of bytes remaining in the specified + * subsequence of the given buffer array, that is, + * + *

+     * srcs[offset].remaining()
+     *     + srcs[offset+1].remaining()
+     *     + ... + srcs[offset+length-1].remaining()
+ *

+ * at the moment that the write is attempted. + * + *

Suppose that a byte sequence of length n is written, where + * 0 < n <= r. + * Up to the first srcs[offset].remaining() bytes of this sequence + * are written from buffer srcs[offset], up to the next + * srcs[offset+1].remaining() bytes are written from buffer + * srcs[offset+1], and so forth, until the entire byte sequence is + * written. As many bytes as possible are written from each buffer, hence + * the final position of each updated buffer, except the last updated + * buffer, is guaranteed to be equal to that buffer's limit. The underlying + * operating system may impose a limit on the number of buffers that may be + * used in an I/O operation. Where the number of buffers (with bytes + * remaining), exceeds this limit, then the I/O operation is performed with + * the maximum number of buffers allowed by the operating system. + * + *

If a timeout is specified and the timeout elapses before the operation + * completes then it completes with the exception {@link + * java.nio.channels.InterruptedByTimeoutException}. Where a timeout occurs, and the + * implementation cannot guarantee that bytes have not been written, or will + * not be written to the channel from the given buffers, then further attempts + * to write to the channel will cause an unspecific runtime exception to be + * thrown. + * + * @param srcs The buffers from which bytes are to be retrieved + * @param offset The offset within the buffer array of the first buffer from which + * bytes are to be retrieved; must be non-negative and no larger + * than {@code srcs.length} + * @param length The maximum number of buffers to be accessed; must be non-negative + * and no larger than {@code srcs.length - offset} + * @param timeout The maximum time for the I/O operation to complete + * @param unit The time unit of the {@code timeout} argument + * @param attach The object to attach to the I/O operation; can be {@code null} + * @param handler The handler for consuming the result + * @throws IndexOutOfBoundsException If the pre-conditions for the {@code offset} and {@code length} + * parameter aren't met + * @throws WritePendingException If a write operation is already in progress on this channel + * @throws ShutdownChannelGroupException If the channel group has terminated + */ + void write( + ByteBuffer[] srcs, int offset, int length, + long timeout, TimeUnit unit, + @Nullable A attach, CompletionHandler handler); +} diff --git a/driver-core/src/main/com/mongodb/internal/connection/FaasEnvironment.java b/driver-core/src/main/com/mongodb/internal/connection/FaasEnvironment.java new file mode 100644 index 00000000000..a54c1efb066 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/connection/FaasEnvironment.java @@ -0,0 +1,129 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection; + +import com.mongodb.lang.Nullable; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +enum FaasEnvironment { + AWS_LAMBDA("aws.lambda"), + AZURE_FUNC("azure.func"), + GCP_FUNC("gcp.func"), + VERCEL("vercel"), + UNKNOWN(null); + + static final Map ENV_OVERRIDES_FOR_TESTING = new HashMap<>(); + + static FaasEnvironment getFaasEnvironment() { + List result = new ArrayList<>(); + String awsExecutionEnv = getEnv("AWS_EXECUTION_ENV"); + + if (getEnv("VERCEL") != null) { + result.add(FaasEnvironment.VERCEL); + } + if ((awsExecutionEnv != null && awsExecutionEnv.startsWith("AWS_Lambda_")) + || getEnv("AWS_LAMBDA_RUNTIME_API") != null) { + result.add(FaasEnvironment.AWS_LAMBDA); + } + if (getEnv("FUNCTIONS_WORKER_RUNTIME") != null) { + result.add(FaasEnvironment.AZURE_FUNC); + } + if (getEnv("K_SERVICE") != null || getEnv("FUNCTION_NAME") != null) { + result.add(FaasEnvironment.GCP_FUNC); + } + // vercel takes precedence over aws.lambda + if (result.equals(Arrays.asList(FaasEnvironment.VERCEL, FaasEnvironment.AWS_LAMBDA))) { + return FaasEnvironment.VERCEL; + } + if (result.size() != 1) { + return FaasEnvironment.UNKNOWN; + } + return result.get(0); + } + + @Nullable + public static String getEnv(final String key) { + if (ENV_OVERRIDES_FOR_TESTING.containsKey(key)) { + return ENV_OVERRIDES_FOR_TESTING.get(key); + } + return System.getenv(key); + } + + @Nullable + private final String name; + + FaasEnvironment(@Nullable final String name) { + this.name = name; + } + + @Nullable + public String getName() { + return name; + } + + @Nullable + public Integer getTimeoutSec() { + //noinspection SwitchStatementWithTooFewBranches + switch (this) { + case GCP_FUNC: + return getEnvInteger("FUNCTION_TIMEOUT_SEC"); + default: + return null; + } + } + + @Nullable + public Integer getMemoryMb() { + switch (this) { + case AWS_LAMBDA: + return getEnvInteger("AWS_LAMBDA_FUNCTION_MEMORY_SIZE"); + case GCP_FUNC: + return getEnvInteger("FUNCTION_MEMORY_MB"); + default: + return null; + } + } + + @Nullable + public String getRegion() { + switch (this) { + case AWS_LAMBDA: + return getEnv("AWS_REGION"); + case GCP_FUNC: + return getEnv("FUNCTION_REGION"); + case VERCEL: + return getEnv("VERCEL_REGION"); + default: + return null; + } + } + + @Nullable + private static Integer getEnvInteger(final String name) { + try { + String value = getEnv(name); + return Integer.parseInt(value); + } catch (NumberFormatException e) { + return null; + } + } +} diff --git a/driver-core/src/main/com/mongodb/internal/connection/FieldTrackingBsonWriter.java b/driver-core/src/main/com/mongodb/internal/connection/FieldTrackingBsonWriter.java new file mode 100644 index 00000000000..82c3cf7b3e4 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/connection/FieldTrackingBsonWriter.java @@ -0,0 +1,313 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection; + +import org.bson.BsonBinary; +import org.bson.BsonDbPointer; +import org.bson.BsonReader; +import org.bson.BsonRegularExpression; +import org.bson.BsonTimestamp; +import org.bson.BsonWriter; +import org.bson.types.Decimal128; +import org.bson.types.ObjectId; + +/** + * Helper class to help determine when an update document contains any fields + * It's an imperfect check because we can't tell if the pipe method ended up writing any fields. + * For the purposes of the check, it's better to assume that pipe does end up writing a field, in order to avoid + * incorrectly reporting an error any time pipe is used. + * + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public class FieldTrackingBsonWriter extends BsonWriterDecorator { + + private boolean hasWrittenField; + private boolean topLevelDocumentWritten; + + public FieldTrackingBsonWriter(final BsonWriter bsonWriter) { + super(bsonWriter); + } + + public boolean hasWrittenField() { + return hasWrittenField; + } + + @Override + public void writeStartDocument(final String name) { + if (topLevelDocumentWritten) { + hasWrittenField = true; + } + super.writeStartDocument(name); + } + + @Override + public void writeStartDocument() { + if (topLevelDocumentWritten) { + hasWrittenField = true; + } + topLevelDocumentWritten = true; + super.writeStartDocument(); + } + + @Override + public void writeStartArray(final String name) { + hasWrittenField = true; + super.writeStartArray(name); + } + + @Override + public void writeStartArray() { + hasWrittenField = true; + super.writeStartArray(); + } + + @Override + public void writeBinaryData(final String name, final BsonBinary binary) { + hasWrittenField = true; + super.writeBinaryData(name, binary); + } + + @Override + public void writeBinaryData(final BsonBinary binary) { + hasWrittenField = true; + super.writeBinaryData(binary); + } + + @Override + public void writeBoolean(final String name, final boolean value) { + hasWrittenField = true; + super.writeBoolean(name, value); + } + + @Override + public void writeBoolean(final boolean value) { + hasWrittenField = true; + super.writeBoolean(value); + } + + @Override + public void writeDateTime(final String name, final long value) { + hasWrittenField = true; + super.writeDateTime(name, value); + } + + @Override + public void writeDateTime(final long value) { + hasWrittenField = true; + super.writeDateTime(value); + } + + @Override + public void writeDBPointer(final String name, final BsonDbPointer value) { + hasWrittenField = true; + super.writeDBPointer(name, value); + } + + @Override + public void writeDBPointer(final BsonDbPointer value) { + hasWrittenField = true; + super.writeDBPointer(value); + } + + @Override + public void writeDouble(final String name, final double value) { + hasWrittenField = true; + super.writeDouble(name, value); + } + + @Override + public void writeDouble(final double value) { + hasWrittenField = true; + super.writeDouble(value); + } + + @Override + public void writeInt32(final String name, final int value) { + hasWrittenField = true; + super.writeInt32(name, value); + } + + @Override + public void writeInt32(final int value) { + hasWrittenField = true; + super.writeInt32(value); + } + + @Override + public void writeInt64(final String name, final long value) { + super.writeInt64(name, value); + hasWrittenField = true; + } + + @Override + public void writeInt64(final long value) { + hasWrittenField = true; + super.writeInt64(value); + } + + @Override + public void writeDecimal128(final Decimal128 value) { + hasWrittenField = true; + super.writeDecimal128(value); + } + + @Override + public void writeDecimal128(final String name, final Decimal128 value) { + hasWrittenField = true; + super.writeDecimal128(name, value); + } + + @Override + public void writeJavaScript(final String name, final String code) { + hasWrittenField = true; + super.writeJavaScript(name, code); + } + + @Override + public void writeJavaScript(final String code) { + hasWrittenField = true; + super.writeJavaScript(code); + } + + @Override + public void writeJavaScriptWithScope(final String name, final String code) { + super.writeJavaScriptWithScope(name, code); + hasWrittenField = true; + } + + @Override + public void writeJavaScriptWithScope(final String code) { + hasWrittenField = true; + super.writeJavaScriptWithScope(code); + } + + @Override + public void writeMaxKey(final String name) { + hasWrittenField = true; + super.writeMaxKey(name); + } + + @Override + public void writeMaxKey() { + hasWrittenField = true; + super.writeMaxKey(); + } + + @Override + public void writeMinKey(final String name) { + hasWrittenField = true; + super.writeMinKey(name); + } + + @Override + public void writeMinKey() { + hasWrittenField = true; + super.writeMinKey(); + } + + @Override + public void writeNull(final String name) { + hasWrittenField = true; + super.writeNull(name); + } + + @Override + public void writeNull() { + hasWrittenField = true; + super.writeNull(); + } + + @Override + public void writeObjectId(final String name, final ObjectId objectId) { + hasWrittenField = true; + super.writeObjectId(name, objectId); + } + + @Override + public void writeObjectId(final ObjectId objectId) { + hasWrittenField = true; + super.writeObjectId(objectId); + } + + @Override + public void writeRegularExpression(final String name, final BsonRegularExpression regularExpression) { + hasWrittenField = true; + super.writeRegularExpression(name, regularExpression); + } + + @Override + public void writeRegularExpression(final BsonRegularExpression regularExpression) { + hasWrittenField = true; + super.writeRegularExpression(regularExpression); + } + + @Override + public void writeString(final String name, final String value) { + hasWrittenField = true; + super.writeString(name, value); + } + + @Override + public void writeString(final String value) { + hasWrittenField = true; + super.writeString(value); + } + + @Override + public void writeSymbol(final String name, final String value) { + hasWrittenField = true; + super.writeSymbol(name, value); + } + + @Override + public void writeSymbol(final String value) { + hasWrittenField = true; + super.writeSymbol(value); + } + + @Override + public void writeTimestamp(final String name, final BsonTimestamp value) { + hasWrittenField = true; + super.writeTimestamp(name, value); + } + + @Override + public void writeTimestamp(final BsonTimestamp value) { + hasWrittenField = true; + super.writeTimestamp(value); + } + + @Override + public void writeUndefined(final String name) { + hasWrittenField = true; + super.writeUndefined(name); + } + + @Override + public void writeUndefined() { + hasWrittenField = true; + super.writeUndefined(); + } + + @Override + public void pipe(final BsonReader reader) { + // this is a faulty assumption, as we may end up piping an empty document. But if we don't increment here, we may undercount, + // which in this context is worse since we'll thrown an exception when we should not. + hasWrittenField = true; + super.pipe(reader); + } +} diff --git a/driver-core/src/main/com/mongodb/internal/connection/FutureAsyncCompletionHandler.java b/driver-core/src/main/com/mongodb/internal/connection/FutureAsyncCompletionHandler.java new file mode 100644 index 00000000000..2a9cc5af9c3 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/connection/FutureAsyncCompletionHandler.java @@ -0,0 +1,77 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection; + +import com.mongodb.MongoException; +import com.mongodb.MongoInternalException; +import com.mongodb.connection.AsyncCompletionHandler; +import com.mongodb.lang.Nullable; + +import java.io.IOException; +import java.util.concurrent.CountDownLatch; + +import static com.mongodb.internal.thread.InterruptionUtil.interruptAndCreateMongoInterruptedException; + +class FutureAsyncCompletionHandler implements AsyncCompletionHandler { + private final CountDownLatch latch = new CountDownLatch(1); + private volatile T result; + private volatile Throwable error; + + @Override + public void completed(@Nullable final T result) { + this.result = result; + latch.countDown(); + } + + @Override + public void failed(final Throwable t) { + this.error = t; + latch.countDown(); + } + + public void getOpen() throws IOException { + get("Opening"); + } + + public void getWrite() throws IOException { + get("Writing to"); + } + + public T getRead() throws IOException { + return get("Reading from"); + } + + private T get(final String prefix) throws IOException { + try { + latch.await(); + } catch (InterruptedException e) { + throw interruptAndCreateMongoInterruptedException(prefix + " the AsynchronousSocketChannelStream failed", e); + + } + if (error != null) { + if (error instanceof IOException) { + throw (IOException) error; + } else if (error instanceof MongoException) { + throw (MongoException) error; + } else { + throw new MongoInternalException(prefix + " the AsynchronousSocketChannelStream failed", error); + } + } + return result; + } + +} diff --git a/driver-core/src/main/com/mongodb/internal/connection/GSSAPIAuthenticator.java b/driver-core/src/main/com/mongodb/internal/connection/GSSAPIAuthenticator.java new file mode 100644 index 00000000000..c3902751ec5 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/connection/GSSAPIAuthenticator.java @@ -0,0 +1,114 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection; + +import com.mongodb.KerberosSubjectProvider; +import com.mongodb.MongoCredential; +import com.mongodb.MongoException; +import com.mongodb.MongoSecurityException; +import com.mongodb.ServerAddress; +import com.mongodb.ServerApi; +import com.mongodb.SubjectProvider; +import com.mongodb.connection.ClusterConnectionMode; +import com.mongodb.lang.NonNull; +import com.mongodb.lang.Nullable; +import org.ietf.jgss.GSSCredential; +import org.ietf.jgss.GSSException; +import org.ietf.jgss.GSSManager; +import org.ietf.jgss.GSSName; +import org.ietf.jgss.Oid; + +import javax.security.sasl.Sasl; +import javax.security.sasl.SaslClient; +import javax.security.sasl.SaslException; +import java.net.InetAddress; +import java.net.UnknownHostException; +import java.util.HashMap; +import java.util.Map; + +import static com.mongodb.AuthenticationMechanism.GSSAPI; +import static com.mongodb.MongoCredential.CANONICALIZE_HOST_NAME_KEY; +import static com.mongodb.MongoCredential.JAVA_SASL_CLIENT_PROPERTIES_KEY; +import static com.mongodb.MongoCredential.SERVICE_NAME_KEY; +import static com.mongodb.assertions.Assertions.assertNotNull; + +class GSSAPIAuthenticator extends SaslAuthenticator { + private static final String GSSAPI_MECHANISM_NAME = "GSSAPI"; + private static final String GSSAPI_OID = "1.2.840.113554.1.2.2"; + private static final String SERVICE_NAME_DEFAULT_VALUE = "mongodb"; + private static final Boolean CANONICALIZE_HOST_NAME_DEFAULT_VALUE = false; + + GSSAPIAuthenticator(final MongoCredentialWithCache credential, final ClusterConnectionMode clusterConnectionMode, + @Nullable final ServerApi serverApi) { + super(credential, clusterConnectionMode, serverApi); + + if (getMongoCredential().getAuthenticationMechanism() != GSSAPI) { + throw new MongoException("Incorrect mechanism: " + getMongoCredential().getMechanism()); + } + } + + @Override + public String getMechanismName() { + return GSSAPI_MECHANISM_NAME; + } + + @Override + protected SaslClient createSaslClient(final ServerAddress serverAddress, final OperationContext operationContext) { + MongoCredential credential = getMongoCredential(); + try { + Map saslClientProperties = credential.getMechanismProperty(JAVA_SASL_CLIENT_PROPERTIES_KEY, null); + if (saslClientProperties == null) { + saslClientProperties = new HashMap<>(); + saslClientProperties.put(Sasl.MAX_BUFFER, "0"); + saslClientProperties.put(Sasl.CREDENTIALS, getGSSCredential(assertNotNull(credential.getUserName()))); + } + + SaslClient saslClient = Sasl.createSaslClient(new String[]{GSSAPI.getMechanismName()}, credential.getUserName(), + credential.getMechanismProperty(SERVICE_NAME_KEY, SERVICE_NAME_DEFAULT_VALUE), + getHostName(serverAddress), saslClientProperties, null); + if (saslClient == null) { + throw new MongoSecurityException(credential, String.format("No platform support for %s mechanism", GSSAPI)); + } + + return saslClient; + } catch (SaslException e) { + throw new MongoSecurityException(credential, "Exception initializing SASL client", e); + } catch (GSSException e) { + throw new MongoSecurityException(credential, "Exception initializing GSSAPI credentials", e); + } catch (UnknownHostException e) { + throw new MongoSecurityException(credential, "Unable to canonicalize host name + " + serverAddress); + } + } + + private GSSCredential getGSSCredential(final String userName) throws GSSException { + Oid krb5Mechanism = new Oid(GSSAPI_OID); + GSSManager manager = GSSManager.getInstance(); + GSSName name = manager.createName(userName, GSSName.NT_USER_NAME); + return manager.createCredential(name, GSSCredential.INDEFINITE_LIFETIME, krb5Mechanism, GSSCredential.INITIATE_ONLY); + } + + private String getHostName(final ServerAddress serverAddress) throws UnknownHostException { + return getNonNullMechanismProperty(CANONICALIZE_HOST_NAME_KEY, CANONICALIZE_HOST_NAME_DEFAULT_VALUE) + ? InetAddress.getByName(serverAddress.getHost()).getCanonicalHostName() + : serverAddress.getHost(); + } + + @NonNull + protected SubjectProvider getDefaultSubjectProvider() { + return new KerberosSubjectProvider(); + } +} diff --git a/driver-core/src/main/com/mongodb/internal/connection/IdHoldingBsonWriter.java b/driver-core/src/main/com/mongodb/internal/connection/IdHoldingBsonWriter.java new file mode 100644 index 00000000000..c73a5d4fd86 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/connection/IdHoldingBsonWriter.java @@ -0,0 +1,466 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection; + +import com.mongodb.lang.Nullable; +import org.bson.BsonBinary; +import org.bson.BsonBinaryWriter; +import org.bson.BsonBoolean; +import org.bson.BsonDateTime; +import org.bson.BsonDbPointer; +import org.bson.BsonDecimal128; +import org.bson.BsonDocument; +import org.bson.BsonDouble; +import org.bson.BsonInt32; +import org.bson.BsonInt64; +import org.bson.BsonJavaScript; +import org.bson.BsonJavaScriptWithScope; +import org.bson.BsonMaxKey; +import org.bson.BsonMinKey; +import org.bson.BsonNull; +import org.bson.BsonObjectId; +import org.bson.BsonReader; +import org.bson.BsonRegularExpression; +import org.bson.BsonString; +import org.bson.BsonSymbol; +import org.bson.BsonTimestamp; +import org.bson.BsonUndefined; +import org.bson.BsonValue; +import org.bson.BsonWriter; +import org.bson.RawBsonDocument; +import org.bson.io.BasicOutputBuffer; +import org.bson.types.Decimal128; +import org.bson.types.ObjectId; + +import java.util.function.Supplier; + +/** + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public class IdHoldingBsonWriter extends LevelCountingBsonWriter { + + private static final String ID_FIELD_NAME = "_id"; + + private LevelCountingBsonWriter idBsonBinaryWriter; + private BasicOutputBuffer outputBuffer; + private String currentFieldName; + private final BsonValue fallbackId; + private BsonValue id; + private boolean idFieldIsAnArray = false; + + /** + * @param fallbackId The "_id" field value to use if the top-level document written via this {@link BsonWriter} + * does not have "_id". If {@code null}, then a new {@link BsonObjectId} is generated instead. + */ + public IdHoldingBsonWriter(final BsonWriter bsonWriter, @Nullable final BsonObjectId fallbackId) { + super(bsonWriter); + this.fallbackId = fallbackId; + } + + @Override + public void writeStartDocument(final String name) { + setCurrentFieldName(name); + + if (isWritingId()) { + getIdBsonWriter().writeStartDocument(name); + } + super.writeStartDocument(name); + } + + @Override + public void writeStartDocument() { + if (isWritingId()) { + getIdBsonWriter().writeStartDocument(); + } + super.writeStartDocument(); + } + + @Override + public void writeEndDocument() { + if (isWritingId()) { + if (getIdBsonWriterCurrentLevel() > DEFAULT_INITIAL_LEVEL) { + getIdBsonWriter().writeEndDocument(); + } + + if (getIdBsonWriterCurrentLevel() == DEFAULT_INITIAL_LEVEL) { + if (id != null && id.isJavaScriptWithScope()) { + id = new BsonJavaScriptWithScope(id.asJavaScriptWithScope().getCode(), new RawBsonDocument(getBytes())); + } else if (id == null) { + id = new RawBsonDocument(getBytes()); + } + } + } + + if (getCurrentLevel() == DEFAULT_INITIAL_LEVEL + 1 && id == null) { + id = fallbackId == null ? new BsonObjectId() : fallbackId; + writeObjectId(ID_FIELD_NAME, id.asObjectId().getValue()); + } + super.writeEndDocument(); + } + + @Override + public void writeStartArray() { + if (isWritingId()) { + if (getIdBsonWriterCurrentLevel() == DEFAULT_INITIAL_LEVEL) { + idFieldIsAnArray = true; + getIdBsonWriter().writeStartDocument(); + getIdBsonWriter().writeName(ID_FIELD_NAME); + } + getIdBsonWriter().writeStartArray(); + } + super.writeStartArray(); + } + + @Override + public void writeStartArray(final String name) { + setCurrentFieldName(name); + if (isWritingId()) { + if (getIdBsonWriterCurrentLevel() == DEFAULT_INITIAL_LEVEL) { + getIdBsonWriter().writeStartDocument(); + } + getIdBsonWriter().writeStartArray(name); + } + super.writeStartArray(name); + } + + @Override + public void writeEndArray() { + if (isWritingId()) { + getIdBsonWriter().writeEndArray(); + if (getIdBsonWriterCurrentLevel() == DEFAULT_INITIAL_LEVEL + 1 && idFieldIsAnArray) { + getIdBsonWriter().writeEndDocument(); + id = new RawBsonDocument(getBytes()).get(ID_FIELD_NAME); + } + } + super.writeEndArray(); + } + + @Override + public void writeBinaryData(final String name, final BsonBinary binary) { + setCurrentFieldName(name); + addBsonValue(() -> binary, () -> getIdBsonWriter().writeBinaryData(name, binary)); + super.writeBinaryData(name, binary); + } + + @Override + public void writeBinaryData(final BsonBinary binary) { + addBsonValue(() -> binary, () -> getIdBsonWriter().writeBinaryData(binary)); + super.writeBinaryData(binary); + } + + @Override + public void writeBoolean(final String name, final boolean value) { + setCurrentFieldName(name); + addBsonValue(() -> BsonBoolean.valueOf(value), () -> getIdBsonWriter().writeBoolean(name, value)); + super.writeBoolean(name, value); + } + + @Override + public void writeBoolean(final boolean value) { + addBsonValue(() -> BsonBoolean.valueOf(value), () -> getIdBsonWriter().writeBoolean(value)); + super.writeBoolean(value); + } + + @Override + public void writeDateTime(final String name, final long value) { + setCurrentFieldName(name); + addBsonValue(() -> new BsonDateTime(value), () -> getIdBsonWriter().writeDateTime(name, value)); + super.writeDateTime(name, value); + } + + @Override + public void writeDateTime(final long value) { + addBsonValue(() -> new BsonDateTime(value), () -> getIdBsonWriter().writeDateTime(value)); + super.writeDateTime(value); + } + + @Override + public void writeDBPointer(final String name, final BsonDbPointer value) { + setCurrentFieldName(name); + addBsonValue(() -> value, () -> getIdBsonWriter().writeDBPointer(name, value)); + super.writeDBPointer(name, value); + } + + @Override + public void writeDBPointer(final BsonDbPointer value) { + addBsonValue(() -> value, () -> getIdBsonWriter().writeDBPointer(value)); + super.writeDBPointer(value); + } + + @Override + public void writeDouble(final String name, final double value) { + setCurrentFieldName(name); + addBsonValue(() -> new BsonDouble(value), () -> getIdBsonWriter().writeDouble(name, value)); + super.writeDouble(name, value); + } + + @Override + public void writeDouble(final double value) { + addBsonValue(() -> new BsonDouble(value), () -> getIdBsonWriter().writeDouble(value)); + super.writeDouble(value); + } + + @Override + public void writeInt32(final String name, final int value) { + setCurrentFieldName(name); + addBsonValue(() -> new BsonInt32(value), () -> getIdBsonWriter().writeInt32(name, value)); + super.writeInt32(name, value); + } + + @Override + public void writeInt32(final int value) { + addBsonValue(() -> new BsonInt32(value), () -> getIdBsonWriter().writeInt32(value)); + super.writeInt32(value); + } + + @Override + public void writeInt64(final String name, final long value) { + setCurrentFieldName(name); + addBsonValue(() -> new BsonInt64(value), () -> getIdBsonWriter().writeInt64(name, value)); + super.writeInt64(name, value); + } + + @Override + public void writeInt64(final long value) { + addBsonValue(() -> new BsonInt64(value), () -> getIdBsonWriter().writeInt64(value)); + super.writeInt64(value); + } + + @Override + public void writeDecimal128(final String name, final Decimal128 value) { + setCurrentFieldName(name); + addBsonValue(() -> new BsonDecimal128(value), () -> getIdBsonWriter().writeDecimal128(name, value)); + super.writeDecimal128(name, value); + } + + @Override + public void writeDecimal128(final Decimal128 value) { + addBsonValue(() -> new BsonDecimal128(value), () -> getIdBsonWriter().writeDecimal128(value)); + super.writeDecimal128(value); + } + + @Override + public void writeJavaScript(final String name, final String code) { + setCurrentFieldName(name); + addBsonValue(() -> new BsonJavaScript(code), () -> getIdBsonWriter().writeJavaScript(name, code)); + super.writeJavaScript(name, code); + } + + @Override + public void writeJavaScript(final String code) { + addBsonValue(() -> new BsonJavaScript(code), () -> getIdBsonWriter().writeJavaScript(code)); + super.writeJavaScript(code); + } + + @Override + public void writeJavaScriptWithScope(final String name, final String code) { + addBsonValue(() -> new BsonJavaScriptWithScope(code, new BsonDocument()), + () -> getIdBsonWriter().writeJavaScriptWithScope(name, code)); + super.writeJavaScriptWithScope(name, code); + } + + @Override + public void writeJavaScriptWithScope(final String code) { + addBsonValue(() -> new BsonJavaScriptWithScope(code, new BsonDocument()), () -> getIdBsonWriter().writeJavaScriptWithScope(code)); + super.writeJavaScriptWithScope(code); + } + + @Override + public void writeMaxKey(final String name) { + setCurrentFieldName(name); + addBsonValue(BsonMaxKey::new, () -> getIdBsonWriter().writeMaxKey(name)); + super.writeMaxKey(name); + } + + @Override + public void writeMaxKey() { + addBsonValue(BsonMaxKey::new, getIdBsonWriter()::writeMaxKey); + super.writeMaxKey(); + } + + @Override + public void writeMinKey(final String name) { + setCurrentFieldName(name); + addBsonValue(BsonMinKey::new, () -> getIdBsonWriter().writeMinKey(name)); + super.writeMinKey(name); + } + + @Override + public void writeMinKey() { + addBsonValue(BsonMinKey::new, getIdBsonWriter()::writeMinKey); + super.writeMinKey(); + } + + @Override + public void writeName(final String name) { + setCurrentFieldName(name); + if (getIdBsonWriterCurrentLevel() > DEFAULT_INITIAL_LEVEL) { + getIdBsonWriter().writeName(name); + } + super.writeName(name); + } + + @Override + public void writeNull(final String name) { + setCurrentFieldName(name); + addBsonValue(BsonNull::new, () -> getIdBsonWriter().writeNull(name)); + super.writeNull(name); + } + + @Override + public void writeNull() { + addBsonValue(BsonNull::new, getIdBsonWriter()::writeNull); + super.writeNull(); + } + + @Override + public void writeObjectId(final String name, final ObjectId objectId) { + setCurrentFieldName(name); + addBsonValue(() -> new BsonObjectId(objectId), () -> getIdBsonWriter().writeObjectId(name, objectId)); + super.writeObjectId(name, objectId); + } + + @Override + public void writeObjectId(final ObjectId objectId) { + addBsonValue(() -> new BsonObjectId(objectId), () -> getIdBsonWriter().writeObjectId(objectId)); + super.writeObjectId(objectId); + } + + @Override + public void writeRegularExpression(final String name, final BsonRegularExpression regularExpression) { + setCurrentFieldName(name); + addBsonValue(() -> regularExpression, () -> getIdBsonWriter().writeRegularExpression(name, regularExpression)); + super.writeRegularExpression(name, regularExpression); + } + + @Override + public void writeRegularExpression(final BsonRegularExpression regularExpression) { + addBsonValue(() -> regularExpression, () -> getIdBsonWriter().writeRegularExpression(regularExpression)); + super.writeRegularExpression(regularExpression); + } + + @Override + public void writeString(final String name, final String value) { + setCurrentFieldName(name); + addBsonValue(() -> new BsonString(value), () -> getIdBsonWriter().writeString(name, value)); + super.writeString(name, value); + } + + @Override + public void writeString(final String value) { + addBsonValue(() -> new BsonString(value), () -> getIdBsonWriter().writeString(value)); + super.writeString(value); + } + + @Override + public void writeSymbol(final String name, final String value) { + setCurrentFieldName(name); + addBsonValue(() -> new BsonSymbol(value), () -> getIdBsonWriter().writeSymbol(name, value)); + super.writeSymbol(name, value); + } + + @Override + public void writeSymbol(final String value) { + addBsonValue(() -> new BsonSymbol(value), () -> getIdBsonWriter().writeSymbol(value)); + super.writeSymbol(value); + } + + @Override + public void writeTimestamp(final String name, final BsonTimestamp value) { + setCurrentFieldName(name); + addBsonValue(() -> value, () -> getIdBsonWriter().writeTimestamp(name, value)); + super.writeTimestamp(name, value); + } + + @Override + public void writeTimestamp(final BsonTimestamp value) { + addBsonValue(() -> value, () -> getIdBsonWriter().writeTimestamp(value)); + super.writeTimestamp(value); + } + + @Override + public void writeUndefined(final String name) { + setCurrentFieldName(name); + addBsonValue(BsonUndefined::new, () -> getIdBsonWriter().writeUndefined(name)); + super.writeUndefined(name); + } + + @Override + public void writeUndefined() { + addBsonValue(BsonUndefined::new, getIdBsonWriter()::writeUndefined); + super.writeUndefined(); + } + + @Override + public void pipe(final BsonReader reader) { + super.pipe(reader); + } + + @Override + public void flush() { + super.flush(); + } + + /** + * Returns either the value of the "_id" field from the top-level document written via this {@link BsonWriter}, + * provided that the document is not {@link RawBsonDocument}, + * or the generated {@link BsonObjectId}. + * If the document is {@link RawBsonDocument}, then returns {@code null}. + *

+ * {@linkplain #flush() Flushing} is not required before calling this method.

+ */ + @Nullable + public BsonValue getId() { + return id; + } + + private void setCurrentFieldName(final String name) { + currentFieldName = name; + } + + private boolean isWritingId() { + return getIdBsonWriterCurrentLevel() > DEFAULT_INITIAL_LEVEL || (getCurrentLevel() == DEFAULT_INITIAL_LEVEL + 1 && currentFieldName != null + && currentFieldName.equals(ID_FIELD_NAME)); + } + + private void addBsonValue(final Supplier value, final Runnable writeValue) { + if (isWritingId()) { + if (getIdBsonWriterCurrentLevel() > DEFAULT_INITIAL_LEVEL) { + writeValue.run(); + } else { + id = value.get(); + } + } + } + + private int getIdBsonWriterCurrentLevel() { + return idBsonBinaryWriter == null ? DEFAULT_INITIAL_LEVEL : idBsonBinaryWriter.getCurrentLevel(); + } + + private LevelCountingBsonWriter getIdBsonWriter() { + if (idBsonBinaryWriter == null) { + outputBuffer = new BasicOutputBuffer(128); + idBsonBinaryWriter = new LevelCountingBsonWriter(new BsonBinaryWriter(outputBuffer)){}; + } + return idBsonBinaryWriter; + } + + private byte[] getBytes() { + return outputBuffer.getInternalBuffer(); + } + +} diff --git a/driver-core/src/main/com/mongodb/internal/connection/IndexMap.java b/driver-core/src/main/com/mongodb/internal/connection/IndexMap.java new file mode 100644 index 00000000000..2d3be7e060d --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/connection/IndexMap.java @@ -0,0 +1,137 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection; + +import com.mongodb.MongoInternalException; + +import java.util.HashMap; +import java.util.Map; + +import static com.mongodb.assertions.Assertions.isTrueArgument; + +/** + *

Efficiently maps each integer in a set to another integer in a set, useful for merging bulk write errors when a bulk write must be + * split into multiple batches. Has the ability to switch from a range-based to a hash-based map depending on the mappings that have + * been added.

+ * + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public abstract class IndexMap { + + /** + * Create an empty index map. + * + * @return a new index map + */ + public static IndexMap create() { + return new RangeBased(); + } + + /** + * Create an index map that maps the integers 0..count to startIndex..startIndex + count. + * + * @param startIndex the start index + * @param count the count + * @return an index map + */ + public static IndexMap create(final int startIndex, final int count) { + return new RangeBased(startIndex, count); + } + + /** + * Add a new index to the map + * + * @param index the index + * @param originalIndex the original index + * @return an index map with this index added to it + */ + public abstract IndexMap add(int index, int originalIndex); + + /** + * Return the index that the specified index is mapped to. + * + * @param index the index + * @return the index it's mapped to + */ + public abstract int map(int index); + + private static class HashBased extends IndexMap { + private final Map indexMap = new HashMap<>(); + + HashBased(final int startIndex, final int count) { + for (int i = startIndex; i < startIndex + count; i++) { + indexMap.put(i - startIndex, i); + } + } + + @Override + public IndexMap add(final int index, final int originalIndex) { + indexMap.put(index, originalIndex); + return this; + } + + @Override + public int map(final int index) { + Integer originalIndex = indexMap.get(index); + if (originalIndex == null) { + throw new MongoInternalException("no mapping found for index " + index); + } + return originalIndex; + } + } + + private static class RangeBased extends IndexMap { + private int startIndex; + private int count; + + RangeBased() { + } + + RangeBased(final int startIndex, final int count) { + isTrueArgument("startIndex", startIndex >= 0); + isTrueArgument("count", count > 0); + this.startIndex = startIndex; + this.count = count; + } + + @Override + public IndexMap add(final int index, final int originalIndex) { + if (count == 0) { + startIndex = originalIndex; + count = 1; + return this; + } else if (originalIndex == startIndex + count) { + count += 1; + return this; + } else { + IndexMap hashBasedMap = new HashBased(startIndex, count); + hashBasedMap.add(index, originalIndex); + return hashBasedMap; + } + } + + @Override + public int map(final int index) { + if (index < 0) { + throw new MongoInternalException("no mapping found for index " + index); + } else if (index >= count) { + throw new MongoInternalException("index should not be greater than or equal to count"); + } + return startIndex + index; + } + } +} diff --git a/driver-core/src/main/com/mongodb/internal/connection/InetAddressUtils.java b/driver-core/src/main/com/mongodb/internal/connection/InetAddressUtils.java new file mode 100644 index 00000000000..9d82947671a --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/connection/InetAddressUtils.java @@ -0,0 +1,322 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * Copyright (C) 2008 The Guava Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. + */ + +package com.mongodb.internal.connection; + +import com.mongodb.lang.Nullable; + +import java.net.InetAddress; +import java.net.UnknownHostException; +import java.nio.ByteBuffer; + +/** + * Static utility methods pertaining to {@link InetAddress} instances. + * + *

Important note: Unlike {@link java.net.InetAddress#getByName(String)}, the methods of this class never + * cause DNS services to be accessed. For this reason, you should prefer these methods as much as + * possible over their JDK equivalents whenever you are expecting to handle only IP address string + * literals -- there is no blocking DNS penalty for a malformed string. + */ +final class InetAddressUtils { + private static final int IPV4_PART_COUNT = 4; + private static final int IPV6_PART_COUNT = 8; + private static final char IPV4_DELIMITER = '.'; + private static final char IPV6_DELIMITER = ':'; + + private InetAddressUtils() { + } + + /** + * Returns the {@link InetAddress} having the given string representation. + * + *

This deliberately avoids all nameservice lookups (e.g. no DNS). + * + *

Anything after a {@code %} in an IPv6 address is ignored (assumed to be a Scope ID). + * + *

This method accepts non-ASCII digits, for example {@code "192.168.0.1"} (those are fullwidth + * characters). That is consistent with {@link InetAddress}, but not with various RFCs. + * + * @param ipString {@code String} containing an IPv4 or IPv6 string literal, e.g. {@code + * "192.168.0.1"} or {@code "2001:db8::1"} + * @return {@link InetAddress} representing the argument + * @throws IllegalArgumentException if the argument is not a valid IP string literal + */ + static InetAddress forString(final String ipString) { + byte[] addr = ipStringToBytes(ipString); + + // The argument was malformed, i.e. not an IP string literal. + if (addr == null) { + throw new IllegalArgumentException(ipString + " IP address is incorrect"); + } + + return bytesToInetAddress(addr); + } + + /** + * Returns {@code true} if the supplied string is a valid IP string literal, {@code false} + * otherwise. + * + *

This method accepts non-ASCII digits, for example {@code "192.168.0.1"} (those are fullwidth + * characters). That is consistent with {@link InetAddress}, but not with various RFCs. + * + * @param ipString {@code String} to evaluated as an IP string literal + * @return {@code true} if the argument is a valid IP string literal + */ + static boolean isInetAddress(final String ipString) { + return ipStringToBytes(ipString) != null; + } + + /** + * Returns {@code null} if unable to parse into a {@code byte[]}. + */ + @Nullable + static byte[] ipStringToBytes(final String ipStringParam) { + String ipString = ipStringParam; + // Make a first pass to categorize the characters in this string. + boolean hasColon = false; + boolean hasDot = false; + int percentIndex = -1; + for (int i = 0; i < ipString.length(); i++) { + char c = ipString.charAt(i); + if (c == '.') { + hasDot = true; + } else if (c == ':') { + if (hasDot) { + return null; // Colons must not appear after dots. + } + hasColon = true; + } else if (c == '%') { + percentIndex = i; + break; // everything after a '%' is ignored (it's a Scope ID): http://superuser.com/a/99753 + } else if (Character.digit(c, 16) == -1) { + return null; // Everything else must be a decimal or hex digit. + } + } + + // Now decide which address family to parse. + if (hasColon) { + if (hasDot) { + ipString = convertDottedQuadToHex(ipString); + if (ipString == null) { + return null; + } + } + if (percentIndex != -1) { + ipString = ipString.substring(0, percentIndex); + } + return textToNumericFormatV6(ipString); + } else if (hasDot) { + if (percentIndex != -1) { + return null; // Scope IDs are not supported for IPV4 + } + return textToNumericFormatV4(ipString); + } + return null; + } + + private static boolean hasCorrectNumberOfOctets(final String sequence) { + int matches = 3; + int index = 0; + while (matches-- > 0) { + index = sequence.indexOf(IPV4_DELIMITER, index); + if (index == -1) { + return false; + } + index++; + } + return sequence.indexOf(IPV4_DELIMITER, index) == -1; + } + + private static int countIn(final CharSequence sequence, final char character) { + int count = 0; + for (int i = 0; i < sequence.length(); i++) { + if (sequence.charAt(i) == character) { + count++; + } + } + return count; + } + + @Nullable + private static byte[] textToNumericFormatV4(final String ipString) { + if (!hasCorrectNumberOfOctets(ipString)) { + return null; // Wrong number of parts + } + + byte[] bytes = new byte[IPV4_PART_COUNT]; + int start = 0; + // Iterate through the parts of the ip string. + // Invariant: start is always the beginning of an octet. + for (int i = 0; i < IPV4_PART_COUNT; i++) { + int end = ipString.indexOf(IPV4_DELIMITER, start); + if (end == -1) { + end = ipString.length(); + } + try { + bytes[i] = parseOctet(ipString, start, end); + } catch (NumberFormatException ex) { + return null; + } + start = end + 1; + } + + return bytes; + } + + @Nullable + private static byte[] textToNumericFormatV6(final String ipString) { + // An address can have [2..8] colons. + int delimiterCount = countIn(ipString, IPV6_DELIMITER); + if (delimiterCount < 2 || delimiterCount > IPV6_PART_COUNT) { + return null; + } + int partsSkipped = IPV6_PART_COUNT - (delimiterCount + 1); // estimate; may be modified later + boolean hasSkip = false; + // Scan for the appearance of ::, to mark a skip-format IPV6 string and adjust the partsSkipped + // estimate. + for (int i = 0; i < ipString.length() - 1; i++) { + if (ipString.charAt(i) == IPV6_DELIMITER && ipString.charAt(i + 1) == IPV6_DELIMITER) { + if (hasSkip) { + return null; // Can't have more than one :: + } + hasSkip = true; + partsSkipped++; // :: means we skipped an extra part in between the two delimiters. + if (i == 0) { + partsSkipped++; // Begins with ::, so we skipped the part preceding the first : + } + if (i == ipString.length() - 2) { + partsSkipped++; // Ends with ::, so we skipped the part after the last : + } + } + } + if (ipString.charAt(0) == IPV6_DELIMITER && ipString.charAt(1) != IPV6_DELIMITER) { + return null; // ^: requires ^:: + } + if (ipString.charAt(ipString.length() - 1) == IPV6_DELIMITER + && ipString.charAt(ipString.length() - 2) != IPV6_DELIMITER) { + return null; // :$ requires ::$ + } + if (hasSkip && partsSkipped <= 0) { + return null; // :: must expand to at least one '0' + } + if (!hasSkip && delimiterCount + 1 != IPV6_PART_COUNT) { + return null; // Incorrect number of parts + } + + ByteBuffer rawBytes = ByteBuffer.allocate(2 * IPV6_PART_COUNT); + try { + // Iterate through the parts of the ip string. + // Invariant: start is always the beginning of a hextet, or the second ':' of the skip + // sequence "::" + int start = 0; + if (ipString.charAt(0) == IPV6_DELIMITER) { + start = 1; + } + while (start < ipString.length()) { + int end = ipString.indexOf(IPV6_DELIMITER, start); + if (end == -1) { + end = ipString.length(); + } + if (ipString.charAt(start) == IPV6_DELIMITER) { + // expand zeroes + for (int i = 0; i < partsSkipped; i++) { + rawBytes.putShort((short) 0); + } + + } else { + rawBytes.putShort(parseHextet(ipString, start, end)); + } + start = end + 1; + } + } catch (NumberFormatException ex) { + return null; + } + return rawBytes.array(); + } + + @Nullable + private static String convertDottedQuadToHex(final String ipString) { + int lastColon = ipString.lastIndexOf(':'); + String initialPart = ipString.substring(0, lastColon + 1); + String dottedQuad = ipString.substring(lastColon + 1); + byte[] quad = textToNumericFormatV4(dottedQuad); + if (quad == null) { + return null; + } + String penultimate = Integer.toHexString(((quad[0] & 0xff) << 8) | (quad[1] & 0xff)); + String ultimate = Integer.toHexString(((quad[2] & 0xff) << 8) | (quad[3] & 0xff)); + return initialPart + penultimate + ":" + ultimate; + } + + private static byte parseOctet(final String ipString, final int start, final int end) { + // Note: we already verified that this string contains only hex digits, but the string may still + // contain non-decimal characters. + int length = end - start; + if (length <= 0 || length > 3) { + throw new NumberFormatException(); + } + // Disallow leading zeroes, because no clear standard exists on + // whether these should be interpreted as decimal or octal. + if (length > 1 && ipString.charAt(start) == '0') { + throw new NumberFormatException("IP address octal representation is not supported"); + } + int octet = 0; + for (int i = start; i < end; i++) { + octet *= 10; + int digit = Character.digit(ipString.charAt(i), 10); + if (digit < 0) { + throw new NumberFormatException(); + } + octet += digit; + } + if (octet > 255) { + throw new NumberFormatException(); + } + return (byte) octet; + } + + // Parse a hextet out of the ipString from start (inclusive) to end (exclusive) + private static short parseHextet(final String ipString, final int start, final int end) { + // Note: we already verified that this string contains only hex digits. + int length = end - start; + if (length <= 0 || length > 4) { + throw new NumberFormatException(); + } + int hextet = 0; + for (int i = start; i < end; i++) { + hextet = hextet << 4; + hextet |= Character.digit(ipString.charAt(i), 16); + } + return (short) hextet; + } + + /** + * Convert a byte array into an InetAddress. + * + *

{@link InetAddress#getByAddress} is documented as throwing a checked exception "if IP + * address is of illegal length." We replace it with an unchecked exception, for use by callers + * who already know that addr is an array of length 4 or 16. + * + * @param addr the raw 4-byte or 16-byte IP address in big-endian order + * @return an InetAddress object created from the raw IP address + */ + private static InetAddress bytesToInetAddress(final byte[] addr) { + try { + return InetAddress.getByAddress(addr); + } catch (UnknownHostException e) { + throw new AssertionError(e); + } + } +} diff --git a/driver-core/src/main/com/mongodb/internal/connection/InternalConnection.java b/driver-core/src/main/com/mongodb/internal/connection/InternalConnection.java new file mode 100644 index 00000000000..792c33570b7 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/connection/InternalConnection.java @@ -0,0 +1,153 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection; + +import com.mongodb.connection.ConnectionDescription; +import com.mongodb.connection.ServerDescription; +import com.mongodb.internal.async.SingleResultCallback; +import com.mongodb.lang.Nullable; +import org.bson.ByteBuf; +import org.bson.codecs.Decoder; + +import java.util.List; + +/** + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public interface InternalConnection extends BufferProvider { + + int NOT_INITIALIZED_GENERATION = -1; + + /** + * Gets the description of this connection. + * + * @return the connection description + */ + ConnectionDescription getDescription(); + + /** + * Get the initial server description + * + * @return the initial server description + */ + ServerDescription getInitialServerDescription(); + + /** + * Opens the connection so its ready for use. Will perform a handshake. + * + * @param operationContext the operation context + */ + void open(OperationContext operationContext); + + /** + * Opens the connection so its ready for use + * + * @param operationContext the operation context + * @param callback the callback to be called once the connection has been opened + */ + void openAsync(OperationContext operationContext, SingleResultCallback callback); + + /** + * Closes the connection. + */ + void close(); + + /** + * Returns if the connection has been opened + * + * @return true if connection has been opened + */ + boolean opened(); + + /** + * Returns the closed state of the connection + * + * @return true if connection is closed + */ + boolean isClosed(); + + /** + * Gets the generation of this connection. This can be used by connection pools to track whether the connection is stale. + * + * @return the generation + */ + int getGeneration(); + + /** + * Send a command message to the server. + * + * @param message the command message to send + * @param operationContext the operation context + */ + @Nullable + T sendAndReceive(CommandMessage message, Decoder decoder, OperationContext operationContext); + + void send(CommandMessage message, Decoder decoder, OperationContext operationContext); + + T receive(Decoder decoder, OperationContext operationContext); + + boolean hasMoreToCome(); + + /** + * Send a command message to the server. + * + * @param message the command message to send + * @param callback the callback + */ + void sendAndReceiveAsync(CommandMessage message, Decoder decoder, OperationContext operationContext, SingleResultCallback callback); + + /** + * Send a message to the server. The connection may not make any attempt to validate the integrity of the message. + * + * @param byteBuffers the list of byte buffers to send. + * @param lastRequestId the request id of the last message in byteBuffers + * @param operationContext the operation context + */ + void sendMessage(List byteBuffers, int lastRequestId, OperationContext operationContext); + + /** + * Receive a response to a sent message from the server. + * + * @param responseTo the request id that this message is a response to + * @param operationContext the operation context + * @return the response + */ + ResponseBuffers receiveMessage(int responseTo, OperationContext operationContext); + + /** + * Asynchronously send a message to the server. The connection may not make any attempt to validate the integrity of the message. + * + * @param byteBuffers the list of byte buffers to send + * @param lastRequestId the request id of the last message in byteBuffers + * @param operationContext the operation context + * @param callback the callback to invoke on completion + */ + void sendMessageAsync(List byteBuffers, int lastRequestId, OperationContext operationContext, + SingleResultCallback callback); + + /** + * Asynchronously receive a response to a sent message from the server. + * + * @param responseTo the request id that this message is a response to + * @param operationContext the operation context + * @param callback the callback to invoke on completion + */ + void receiveMessageAsync(int responseTo, OperationContext operationContext, SingleResultCallback callback); + + default void markAsPinned(Connection.PinningMode pinningMode) { + } +} diff --git a/driver-core/src/main/com/mongodb/internal/connection/InternalConnectionFactory.java b/driver-core/src/main/com/mongodb/internal/connection/InternalConnectionFactory.java new file mode 100644 index 00000000000..c788f4668e6 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/connection/InternalConnectionFactory.java @@ -0,0 +1,41 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection; + +import com.mongodb.annotations.ThreadSafe; +import com.mongodb.connection.ServerId; +import com.mongodb.lang.NonNull; +import org.bson.types.ObjectId; + +@ThreadSafe +interface InternalConnectionFactory { + default InternalConnection create(ServerId serverId) { + return create(serverId, new ConnectionGenerationSupplier() { + @Override + public int getGeneration() { + return 0; + } + + @Override + public int getGeneration(@NonNull final ObjectId serviceId) { + return 0; + } + }); + } + + InternalConnection create(ServerId serverId, ConnectionGenerationSupplier connectionGenerationSupplier); +} diff --git a/driver-core/src/main/com/mongodb/internal/connection/InternalConnectionInitializationDescription.java b/driver-core/src/main/com/mongodb/internal/connection/InternalConnectionInitializationDescription.java new file mode 100644 index 00000000000..0b9e55cb149 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/connection/InternalConnectionInitializationDescription.java @@ -0,0 +1,48 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection; + +import com.mongodb.annotations.Immutable; +import com.mongodb.connection.ConnectionDescription; +import com.mongodb.connection.ServerDescription; + +/** + *

This class is not part of the public API and may be removed or changed at any time

+ */ +@Immutable +public class InternalConnectionInitializationDescription { + private final ConnectionDescription connectionDescription; + private final ServerDescription serverDescription; + + public InternalConnectionInitializationDescription(final ConnectionDescription connectionDescription, + final ServerDescription serverDescription) { + this.connectionDescription = connectionDescription; + this.serverDescription = serverDescription; + } + + public ConnectionDescription getConnectionDescription() { + return connectionDescription; + } + + public ServerDescription getServerDescription() { + return serverDescription; + } + + public InternalConnectionInitializationDescription withConnectionDescription(final ConnectionDescription connectionDescription) { + return new InternalConnectionInitializationDescription(connectionDescription, serverDescription); + } +} diff --git a/driver-core/src/main/com/mongodb/internal/connection/InternalConnectionInitializer.java b/driver-core/src/main/com/mongodb/internal/connection/InternalConnectionInitializer.java new file mode 100644 index 00000000000..077e2c68254 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/connection/InternalConnectionInitializer.java @@ -0,0 +1,38 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection; + +import com.mongodb.internal.async.SingleResultCallback; + +interface InternalConnectionInitializer { + + InternalConnectionInitializationDescription startHandshake(InternalConnection internalConnection, + OperationContext operationContext); + + InternalConnectionInitializationDescription finishHandshake(InternalConnection internalConnection, + InternalConnectionInitializationDescription description, + OperationContext operationContext); + + void startHandshakeAsync(InternalConnection internalConnection, + OperationContext operationContext, + SingleResultCallback callback); + + void finishHandshakeAsync(InternalConnection internalConnection, + InternalConnectionInitializationDescription description, + OperationContext operationContext, + SingleResultCallback callback); +} diff --git a/driver-core/src/main/com/mongodb/internal/connection/InternalConnectionPoolSettings.java b/driver-core/src/main/com/mongodb/internal/connection/InternalConnectionPoolSettings.java new file mode 100644 index 00000000000..97733dab75c --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/connection/InternalConnectionPoolSettings.java @@ -0,0 +1,97 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.internal.connection; + +import com.mongodb.annotations.Immutable; +import com.mongodb.annotations.NotThreadSafe; + +import java.util.Objects; + +/** + *

This class is not part of the public API and may be removed or changed at any time

+ */ +@Immutable +public final class InternalConnectionPoolSettings { + private final boolean prestartAsyncWorkManager; + + private InternalConnectionPoolSettings(final Builder builder) { + prestartAsyncWorkManager = builder.prestartAsyncWorkManager; + } + + public static Builder builder() { + return new Builder(); + } + + /** + * Specifies whether to pre-start the asynchronous work manager of the pool. + *

+ * Default is {@code false}. + * + * @return {@code true} iff pool's asynchronous work manager must be pre-started. + * @see Builder#prestartAsyncWorkManager(boolean) + */ + public boolean isPrestartAsyncWorkManager() { + return prestartAsyncWorkManager; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + InternalConnectionPoolSettings that = (InternalConnectionPoolSettings) o; + return prestartAsyncWorkManager == that.prestartAsyncWorkManager; + } + + @Override + public int hashCode() { + return Objects.hash(prestartAsyncWorkManager); + } + + @Override + public String toString() { + return "InternalConnectionPoolSettings{" + + "prestartAsyncWorkManager=" + prestartAsyncWorkManager + + '}'; + } + + @NotThreadSafe + public static final class Builder { + private boolean prestartAsyncWorkManager = false; + + private Builder() { + } + + /** + * Allows to pre-start the asynchronous work manager of the pool. + * + * @param prestart {@code true} iff pool's asynchronous work manager must be pre-started. + * @return {@code this}. + * @see InternalConnectionPoolSettings#isPrestartAsyncWorkManager() + */ + public Builder prestartAsyncWorkManager(final boolean prestart) { + prestartAsyncWorkManager = prestart; + return this; + } + + public InternalConnectionPoolSettings build() { + return new InternalConnectionPoolSettings(this); + } + } +} diff --git a/driver-core/src/main/com/mongodb/internal/connection/InternalOperationContextFactory.java b/driver-core/src/main/com/mongodb/internal/connection/InternalOperationContextFactory.java new file mode 100644 index 00000000000..4653c90050b --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/connection/InternalOperationContextFactory.java @@ -0,0 +1,50 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.internal.connection; + +import com.mongodb.ServerApi; +import com.mongodb.internal.TimeoutContext; +import com.mongodb.internal.TimeoutSettings; +import com.mongodb.lang.Nullable; + +import static com.mongodb.internal.connection.OperationContext.simpleOperationContext; + +public final class InternalOperationContextFactory { + + private final TimeoutSettings timeoutSettings; + @Nullable + private final ServerApi serverApi; + + public InternalOperationContextFactory(final TimeoutSettings timeoutSettings, @Nullable final ServerApi serverApi) { + this.timeoutSettings = timeoutSettings; + this.serverApi = serverApi; + } + + /** + * @return a simple operation context without timeoutMS + */ + OperationContext create() { + return simpleOperationContext(timeoutSettings.connectionOnly(), serverApi); + } + + /** + * @return a simple operation context with timeoutMS if set at the MongoClientSettings level + */ + + OperationContext createMaintenanceContext() { + return create().withTimeoutContext(TimeoutContext.createMaintenanceTimeoutContext(timeoutSettings)); + } +} diff --git a/driver-core/src/main/com/mongodb/internal/connection/InternalStreamConnection.java b/driver-core/src/main/com/mongodb/internal/connection/InternalStreamConnection.java new file mode 100644 index 00000000000..bf009aa1b07 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/connection/InternalStreamConnection.java @@ -0,0 +1,970 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection; + +import com.mongodb.LoggerSettings; +import com.mongodb.MongoClientException; +import com.mongodb.MongoCommandException; +import com.mongodb.MongoCompressor; +import com.mongodb.MongoException; +import com.mongodb.MongoInternalException; +import com.mongodb.MongoInterruptedException; +import com.mongodb.MongoOperationTimeoutException; +import com.mongodb.MongoSocketClosedException; +import com.mongodb.MongoSocketReadException; +import com.mongodb.MongoSocketReadTimeoutException; +import com.mongodb.MongoSocketWriteException; +import com.mongodb.MongoSocketWriteTimeoutException; +import com.mongodb.ServerAddress; +import com.mongodb.annotations.NotThreadSafe; +import com.mongodb.connection.AsyncCompletionHandler; +import com.mongodb.connection.ClusterConnectionMode; +import com.mongodb.connection.ClusterId; +import com.mongodb.connection.ConnectionDescription; +import com.mongodb.connection.ConnectionId; +import com.mongodb.connection.ServerConnectionState; +import com.mongodb.connection.ServerDescription; +import com.mongodb.connection.ServerId; +import com.mongodb.connection.ServerType; +import com.mongodb.event.CommandListener; +import com.mongodb.internal.ResourceUtil; +import com.mongodb.internal.TimeoutContext; +import com.mongodb.internal.VisibleForTesting; +import com.mongodb.internal.async.AsyncSupplier; +import com.mongodb.internal.async.SingleResultCallback; +import com.mongodb.internal.diagnostics.logging.Logger; +import com.mongodb.internal.diagnostics.logging.Loggers; +import com.mongodb.internal.logging.StructuredLogger; +import com.mongodb.internal.session.SessionContext; +import com.mongodb.internal.time.Timeout; +import com.mongodb.lang.Nullable; +import org.bson.BsonBinaryReader; +import org.bson.BsonDocument; +import org.bson.ByteBuf; +import org.bson.codecs.BsonDocumentCodec; +import org.bson.codecs.Decoder; +import org.bson.io.ByteBufferBsonInput; + +import java.io.IOException; +import java.net.SocketTimeoutException; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.Set; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.function.Supplier; + +import static com.mongodb.assertions.Assertions.assertNotNull; +import static com.mongodb.assertions.Assertions.assertNull; +import static com.mongodb.assertions.Assertions.isTrue; +import static com.mongodb.assertions.Assertions.notNull; +import static com.mongodb.internal.TimeoutContext.createMongoTimeoutException; +import static com.mongodb.internal.async.AsyncRunnable.beginAsync; +import static com.mongodb.internal.async.ErrorHandlingResultCallback.errorHandlingCallback; +import static com.mongodb.internal.connection.Authenticator.shouldAuthenticate; +import static com.mongodb.internal.connection.CommandHelper.HELLO; +import static com.mongodb.internal.connection.CommandHelper.LEGACY_HELLO; +import static com.mongodb.internal.connection.CommandHelper.LEGACY_HELLO_LOWER; +import static com.mongodb.internal.connection.MessageHeader.MESSAGE_HEADER_LENGTH; +import static com.mongodb.internal.connection.OpCode.OP_COMPRESSED; +import static com.mongodb.internal.connection.ProtocolHelper.createSpecialWriteConcernException; +import static com.mongodb.internal.connection.ProtocolHelper.getClusterTime; +import static com.mongodb.internal.connection.ProtocolHelper.getCommandFailureException; +import static com.mongodb.internal.connection.ProtocolHelper.getMessageSettings; +import static com.mongodb.internal.connection.ProtocolHelper.getOperationTime; +import static com.mongodb.internal.connection.ProtocolHelper.getRecoveryToken; +import static com.mongodb.internal.connection.ProtocolHelper.getSnapshotTimestamp; +import static com.mongodb.internal.connection.ProtocolHelper.isCommandOk; +import static com.mongodb.internal.logging.LogMessage.Level.DEBUG; +import static com.mongodb.internal.thread.InterruptionUtil.translateInterruptedException; +import static java.util.Arrays.asList; + +/** + *

This class is not part of the public API and may be removed or changed at any time

+ */ +@NotThreadSafe +public class InternalStreamConnection implements InternalConnection { + + private static volatile boolean recordEverything = false; + + /** + * Will attempt to record events to the command listener that are usually + * suppressed. + * + * @param recordEverything whether to attempt to record everything + */ + @VisibleForTesting(otherwise = VisibleForTesting.AccessModifier.PRIVATE) + public static void setRecordEverything(final boolean recordEverything) { + InternalStreamConnection.recordEverything = recordEverything; + } + + private static final Set SECURITY_SENSITIVE_COMMANDS = new HashSet<>(asList( + "authenticate", + "saslStart", + "saslContinue", + "getnonce", + "createUser", + "updateUser", + "copydbgetnonce", + "copydbsaslstart", + "copydb")); + + private static final Set SECURITY_SENSITIVE_HELLO_COMMANDS = new HashSet<>(asList( + HELLO, + LEGACY_HELLO, + LEGACY_HELLO_LOWER)); + + private static final Logger LOGGER = Loggers.getLogger("connection"); + + private final ClusterConnectionMode clusterConnectionMode; + @Nullable + private final Authenticator authenticator; + private final boolean isMonitoringConnection; + private final ServerId serverId; + private final ConnectionGenerationSupplier connectionGenerationSupplier; + private final StreamFactory streamFactory; + private final InternalConnectionInitializer connectionInitializer; + private volatile ConnectionDescription description; + private volatile ServerDescription initialServerDescription; + private volatile Stream stream; + + private final AtomicBoolean isClosed = new AtomicBoolean(); + private final AtomicBoolean opened = new AtomicBoolean(); + private final AtomicBoolean authenticated = new AtomicBoolean(); + + private final List compressorList; + private final LoggerSettings loggerSettings; + private final CommandListener commandListener; + @Nullable private volatile Compressor sendCompressor; + private final Map compressorMap; + private volatile boolean hasMoreToCome; + private volatile int responseTo; + private int generation = NOT_INITIALIZED_GENERATION; + + // Package-level access provided to avoid duplicating the list in test code + static Set getSecuritySensitiveCommands() { + return Collections.unmodifiableSet(SECURITY_SENSITIVE_COMMANDS); + } + + // Package-level access provided to avoid duplicating the list in test code + static Set getSecuritySensitiveHelloCommands() { + return Collections.unmodifiableSet(SECURITY_SENSITIVE_HELLO_COMMANDS); + } + + @VisibleForTesting(otherwise = VisibleForTesting.AccessModifier.PRIVATE) + public InternalStreamConnection(final ClusterConnectionMode clusterConnectionMode, final ServerId serverId, + final ConnectionGenerationSupplier connectionGenerationSupplier, + final StreamFactory streamFactory, final List compressorList, + final CommandListener commandListener, final InternalConnectionInitializer connectionInitializer) { + this(clusterConnectionMode, null, false, serverId, connectionGenerationSupplier, streamFactory, compressorList, + LoggerSettings.builder().build(), commandListener, connectionInitializer); + } + + public InternalStreamConnection(final ClusterConnectionMode clusterConnectionMode, + @Nullable final Authenticator authenticator, + final boolean isMonitoringConnection, + final ServerId serverId, + final ConnectionGenerationSupplier connectionGenerationSupplier, + final StreamFactory streamFactory, final List compressorList, + final LoggerSettings loggerSettings, + final CommandListener commandListener, final InternalConnectionInitializer connectionInitializer) { + this.clusterConnectionMode = clusterConnectionMode; + this.authenticator = authenticator; + this.isMonitoringConnection = isMonitoringConnection; + this.serverId = notNull("serverId", serverId); + this.connectionGenerationSupplier = notNull("connectionGeneration", connectionGenerationSupplier); + this.streamFactory = notNull("streamFactory", streamFactory); + this.compressorList = notNull("compressorList", compressorList); + this.compressorMap = createCompressorMap(compressorList); + this.loggerSettings = loggerSettings; + this.commandListener = commandListener; + this.connectionInitializer = notNull("connectionInitializer", connectionInitializer); + description = new ConnectionDescription(serverId); + initialServerDescription = ServerDescription.builder() + .address(serverId.getAddress()) + .type(ServerType.UNKNOWN) + .state(ServerConnectionState.CONNECTING) + .build(); + if (clusterConnectionMode != ClusterConnectionMode.LOAD_BALANCED) { + generation = connectionGenerationSupplier.getGeneration(); + } + } + + @Override + public ConnectionDescription getDescription() { + return description; + } + + @Override + public ServerDescription getInitialServerDescription() { + return initialServerDescription; + } + + @Override + public int getGeneration() { + return generation; + } + + @Override + public void open(final OperationContext originalOperationContext) { + isTrue("Open already called", stream == null); + stream = streamFactory.create(serverId.getAddress()); + try { + OperationContext operationContext = originalOperationContext + .withTimeoutContext(originalOperationContext.getTimeoutContext().withComputedServerSelectionTimeoutContext()); + + stream.open(operationContext); + + InternalConnectionInitializationDescription initializationDescription = connectionInitializer.startHandshake(this, operationContext); + initAfterHandshakeStart(initializationDescription); + + initializationDescription = connectionInitializer.finishHandshake(this, initializationDescription, operationContext); + initAfterHandshakeFinish(initializationDescription); + } catch (Throwable t) { + close(); + if (t instanceof MongoException) { + throw (MongoException) t; + } else { + throw new MongoException(t.toString(), t); + } + } + } + + @Override + public void openAsync(final OperationContext originalOperationContext, final SingleResultCallback callback) { + assertNull(stream); + try { + OperationContext operationContext = originalOperationContext + .withTimeoutContext(originalOperationContext.getTimeoutContext().withComputedServerSelectionTimeoutContext()); + + stream = streamFactory.create(serverId.getAddress()); + stream.openAsync(operationContext, new AsyncCompletionHandler() { + + @Override + public void completed(@Nullable final Void aVoid) { + connectionInitializer.startHandshakeAsync(InternalStreamConnection.this, operationContext, + (initialResult, initialException) -> { + if (initialException != null) { + close(); + callback.onResult(null, initialException); + } else { + assertNotNull(initialResult); + initAfterHandshakeStart(initialResult); + connectionInitializer.finishHandshakeAsync(InternalStreamConnection.this, + initialResult, operationContext, (completedResult, completedException) -> { + if (completedException != null) { + close(); + callback.onResult(null, completedException); + } else { + assertNotNull(completedResult); + initAfterHandshakeFinish(completedResult); + callback.onResult(null, null); + } + }); + } + }); + } + + @Override + public void failed(final Throwable t) { + close(); + callback.onResult(null, t); + } + }); + } catch (Throwable t) { + close(); + callback.onResult(null, t); + } + } + + private void initAfterHandshakeStart(final InternalConnectionInitializationDescription initializationDescription) { + description = initializationDescription.getConnectionDescription(); + initialServerDescription = initializationDescription.getServerDescription(); + + if (clusterConnectionMode == ClusterConnectionMode.LOAD_BALANCED) { + generation = connectionGenerationSupplier.getGeneration(assertNotNull(description.getServiceId())); + } + } + + private void initAfterHandshakeFinish(final InternalConnectionInitializationDescription initializationDescription) { + description = initializationDescription.getConnectionDescription(); + initialServerDescription = initializationDescription.getServerDescription(); + opened.set(true); + authenticated.set(true); + sendCompressor = findSendCompressor(description); + } + + private Map createCompressorMap(final List compressorList) { + Map compressorMap = new HashMap<>(this.compressorList.size()); + + for (MongoCompressor mongoCompressor : compressorList) { + Compressor compressor = createCompressor(mongoCompressor); + compressorMap.put(compressor.getId(), compressor); + } + return compressorMap; + } + + @Nullable + private Compressor findSendCompressor(final ConnectionDescription description) { + if (description.getCompressors().isEmpty()) { + return null; + } + + String firstCompressorName = description.getCompressors().get(0); + + for (Compressor compressor : compressorMap.values()) { + if (compressor.getName().equals(firstCompressorName)) { + return compressor; + } + } + + throw new MongoInternalException("Unexpected compressor negotiated: " + firstCompressorName); + } + + private Compressor createCompressor(final MongoCompressor mongoCompressor) { + switch (mongoCompressor.getName()) { + case "zlib": + return new ZlibCompressor(mongoCompressor); + case "snappy": + return new SnappyCompressor(); + case "zstd": + return new ZstdCompressor(); + default: + throw new MongoClientException("Unsupported compressor " + mongoCompressor.getName()); + } + } + + @Override + public void close() { + // All but the first call is a no-op + if (!isClosed.getAndSet(true) && (stream != null)) { + stream.close(); + } + } + + @Override + public boolean opened() { + return opened.get(); + } + + @Override + public boolean isClosed() { + return isClosed.get(); + } + + @Nullable + @Override + public T sendAndReceive(final CommandMessage message, final Decoder decoder, final OperationContext operationContext) { + Supplier sendAndReceiveInternal = () -> sendAndReceiveInternal( + message, decoder, operationContext); + try { + return sendAndReceiveInternal.get(); + } catch (MongoCommandException e) { + if (reauthenticationIsTriggered(e)) { + return reauthenticateAndRetry(sendAndReceiveInternal, operationContext); + } + throw e; + } + } + + @Override + public void sendAndReceiveAsync(final CommandMessage message, final Decoder decoder, + final OperationContext operationContext, + final SingleResultCallback callback) { + + AsyncSupplier sendAndReceiveAsyncInternal = c -> sendAndReceiveAsyncInternal( + message, decoder, operationContext, c); + beginAsync().thenSupply(c -> { + sendAndReceiveAsyncInternal.getAsync(c); + }).onErrorIf(e -> reauthenticationIsTriggered(e), (t, c) -> { + reauthenticateAndRetryAsync(sendAndReceiveAsyncInternal, operationContext, c); + }).finish(callback); + } + + private T reauthenticateAndRetry(final Supplier operation, final OperationContext operationContext) { + authenticated.set(false); + assertNotNull(authenticator).reauthenticate(this, operationContext); + authenticated.set(true); + return operation.get(); + } + + private void reauthenticateAndRetryAsync(final AsyncSupplier operation, + final OperationContext operationContext, + final SingleResultCallback callback) { + beginAsync().thenRun(c -> { + authenticated.set(false); + assertNotNull(authenticator).reauthenticateAsync(this, operationContext, c); + }).thenSupply((c) -> { + authenticated.set(true); + operation.getAsync(c); + }).finish(callback); + } + + public boolean reauthenticationIsTriggered(@Nullable final Throwable t) { + if (!shouldAuthenticate(authenticator, this.description)) { + return false; + } + if (t instanceof MongoCommandException) { + MongoCommandException e = (MongoCommandException) t; + return e.getErrorCode() == 391; + } + return false; + } + + @Nullable + private T sendAndReceiveInternal(final CommandMessage message, final Decoder decoder, + final OperationContext operationContext) { + CommandEventSender commandEventSender; + try (ByteBufferBsonOutput bsonOutput = new ByteBufferBsonOutput(this)) { + message.encode(bsonOutput, operationContext); + commandEventSender = createCommandEventSender(message, bsonOutput, operationContext); + commandEventSender.sendStartedEvent(); + try { + sendCommandMessage(message, bsonOutput, operationContext); + } catch (Exception e) { + commandEventSender.sendFailedEvent(e); + throw e; + } + } + + if (message.isResponseExpected()) { + return receiveCommandMessageResponse(decoder, commandEventSender, operationContext); + } else { + commandEventSender.sendSucceededEventForOneWayCommand(); + return null; + } + } + + @Override + public void send(final CommandMessage message, final Decoder decoder, final OperationContext operationContext) { + try (ByteBufferBsonOutput bsonOutput = new ByteBufferBsonOutput(this)) { + message.encode(bsonOutput, operationContext); + sendCommandMessage(message, bsonOutput, operationContext); + if (message.isResponseExpected()) { + hasMoreToCome = true; + } + } + } + + @Override + public T receive(final Decoder decoder, final OperationContext operationContext) { + isTrue("Response is expected", hasMoreToCome); + return receiveCommandMessageResponse(decoder, new NoOpCommandEventSender(), operationContext); + } + + @Override + public boolean hasMoreToCome() { + return hasMoreToCome; + } + + private void sendCommandMessage(final CommandMessage message, final ByteBufferBsonOutput bsonOutput, + final OperationContext operationContext) { + + Compressor localSendCompressor = sendCompressor; + if (localSendCompressor == null || SECURITY_SENSITIVE_COMMANDS.contains(message.getCommandDocument(bsonOutput).getFirstKey())) { + trySendMessage(message, bsonOutput, operationContext); + } else { + ByteBufferBsonOutput compressedBsonOutput; + List byteBuffers = bsonOutput.getByteBuffers(); + try { + CompressedMessage compressedMessage = new CompressedMessage(message.getOpCode(), byteBuffers, localSendCompressor, + getMessageSettings(description, initialServerDescription)); + compressedBsonOutput = new ByteBufferBsonOutput(this); + compressedMessage.encode(compressedBsonOutput, operationContext); + } finally { + ResourceUtil.release(byteBuffers); + bsonOutput.close(); + } + trySendMessage(message, compressedBsonOutput, operationContext); + } + responseTo = message.getId(); + } + + private void trySendMessage(final CommandMessage message, final ByteBufferBsonOutput bsonOutput, + final OperationContext operationContext) { + Timeout.onExistsAndExpired(operationContext.getTimeoutContext().timeoutIncludingRoundTrip(), () -> { + throw TimeoutContext.createMongoRoundTripTimeoutException(); + }); + List byteBuffers = bsonOutput.getByteBuffers(); + try { + sendMessage(byteBuffers, message.getId(), operationContext); + } finally { + ResourceUtil.release(byteBuffers); + bsonOutput.close(); + } + } + + private T receiveCommandMessageResponse(final Decoder decoder, final CommandEventSender commandEventSender, + final OperationContext operationContext) { + boolean commandSuccessful = false; + try (ResponseBuffers responseBuffers = receiveResponseBuffers(operationContext)) { + updateSessionContext(operationContext.getSessionContext(), responseBuffers); + if (!isCommandOk(responseBuffers)) { + throw getCommandFailureException(responseBuffers.getResponseDocument(responseTo, + new BsonDocumentCodec()), description.getServerAddress(), operationContext.getTimeoutContext()); + } + + commandSuccessful = true; + commandEventSender.sendSucceededEvent(responseBuffers); + + T commandResult = getCommandResult(decoder, responseBuffers, responseTo, operationContext.getTimeoutContext()); + hasMoreToCome = responseBuffers.getReplyHeader().hasMoreToCome(); + if (hasMoreToCome) { + responseTo = responseBuffers.getReplyHeader().getRequestId(); + } else { + responseTo = 0; + } + + return commandResult; + } catch (Exception e) { + if (!commandSuccessful) { + commandEventSender.sendFailedEvent(e); + } + throw e; + } + } + + private void sendAndReceiveAsyncInternal(final CommandMessage message, final Decoder decoder, + final OperationContext operationContext, final SingleResultCallback callback) { + if (isClosed()) { + callback.onResult(null, new MongoSocketClosedException("Can not read from a closed socket", getServerAddress())); + return; + } + + ByteBufferBsonOutput bsonOutput = new ByteBufferBsonOutput(this); + ByteBufferBsonOutput compressedBsonOutput = new ByteBufferBsonOutput(this); + + try { + message.encode(bsonOutput, operationContext); + CommandEventSender commandEventSender = createCommandEventSender(message, bsonOutput, operationContext); + commandEventSender.sendStartedEvent(); + Compressor localSendCompressor = sendCompressor; + if (localSendCompressor == null || SECURITY_SENSITIVE_COMMANDS.contains(message.getCommandDocument(bsonOutput).getFirstKey())) { + sendCommandMessageAsync(message.getId(), decoder, operationContext, callback, bsonOutput, commandEventSender, + message.isResponseExpected()); + } else { + List byteBuffers = bsonOutput.getByteBuffers(); + try { + CompressedMessage compressedMessage = new CompressedMessage(message.getOpCode(), byteBuffers, localSendCompressor, + getMessageSettings(description, initialServerDescription)); + compressedMessage.encode(compressedBsonOutput, operationContext); + } finally { + ResourceUtil.release(byteBuffers); + bsonOutput.close(); + } + sendCommandMessageAsync(message.getId(), decoder, operationContext, callback, compressedBsonOutput, commandEventSender, + message.isResponseExpected()); + } + } catch (Throwable t) { + bsonOutput.close(); + compressedBsonOutput.close(); + callback.onResult(null, t); + } + } + + private void sendCommandMessageAsync(final int messageId, final Decoder decoder, final OperationContext operationContext, + final SingleResultCallback callback, final ByteBufferBsonOutput bsonOutput, + final CommandEventSender commandEventSender, final boolean responseExpected) { + boolean[] shouldReturn = {false}; + Timeout.onExistsAndExpired(operationContext.getTimeoutContext().timeoutIncludingRoundTrip(), () -> { + bsonOutput.close(); + MongoOperationTimeoutException operationTimeoutException = TimeoutContext.createMongoRoundTripTimeoutException(); + commandEventSender.sendFailedEvent(operationTimeoutException); + callback.onResult(null, operationTimeoutException); + shouldReturn[0] = true; + }); + if (shouldReturn[0]) { + return; + } + + List byteBuffers = bsonOutput.getByteBuffers(); + sendMessageAsync(byteBuffers, messageId, operationContext, (result, t) -> { + ResourceUtil.release(byteBuffers); + bsonOutput.close(); + if (t != null) { + commandEventSender.sendFailedEvent(t); + callback.onResult(null, t); + } else if (!responseExpected) { + commandEventSender.sendSucceededEventForOneWayCommand(); + callback.onResult(null, null); + } else { + readAsync(MESSAGE_HEADER_LENGTH, operationContext, new MessageHeaderCallback(operationContext, (responseBuffers, t1) -> { + if (t1 != null) { + commandEventSender.sendFailedEvent(t1); + callback.onResult(null, t1); + return; + } + assertNotNull(responseBuffers); + T commandResult; + try { + updateSessionContext(operationContext.getSessionContext(), responseBuffers); + boolean commandOk = + isCommandOk(new BsonBinaryReader(new ByteBufferBsonInput(responseBuffers.getBodyByteBuffer()))); + responseBuffers.reset(); + if (!commandOk) { + MongoException commandFailureException = getCommandFailureException( + responseBuffers.getResponseDocument(messageId, new BsonDocumentCodec()), + description.getServerAddress(), operationContext.getTimeoutContext()); + commandEventSender.sendFailedEvent(commandFailureException); + throw commandFailureException; + } + commandEventSender.sendSucceededEvent(responseBuffers); + + commandResult = getCommandResult(decoder, responseBuffers, messageId, operationContext.getTimeoutContext()); + } catch (Throwable localThrowable) { + callback.onResult(null, localThrowable); + return; + } finally { + responseBuffers.close(); + } + callback.onResult(commandResult, null); + })); + } + }); + } + + private T getCommandResult(final Decoder decoder, + final ResponseBuffers responseBuffers, + final int messageId, + final TimeoutContext timeoutContext) { + T result = new ReplyMessage<>(responseBuffers, decoder, messageId).getDocument(); + MongoException writeConcernBasedError = createSpecialWriteConcernException(responseBuffers, + description.getServerAddress(), + timeoutContext); + if (writeConcernBasedError instanceof MongoOperationTimeoutException) { + throw writeConcernBasedError; + } + if (writeConcernBasedError != null) { + throw new MongoWriteConcernWithResponseException(writeConcernBasedError, result); + } + return result; + } + + @Override + public void sendMessage(final List byteBuffers, final int lastRequestId, final OperationContext operationContext) { + notNull("stream is open", stream); + if (isClosed()) { + throw new MongoSocketClosedException("Cannot write to a closed stream", getServerAddress()); + } + try { + stream.write(byteBuffers, operationContext); + } catch (Exception e) { + close(); + throwTranslatedWriteException(e, operationContext); + } + } + + @Override + public void sendMessageAsync( + final List byteBuffers, + final int lastRequestId, + final OperationContext operationContext, + final SingleResultCallback callback) { + beginAsync().thenRun((c) -> { + notNull("stream is open", stream); + if (isClosed()) { + throw new MongoSocketClosedException("Cannot write to a closed stream", getServerAddress()); + } + c.complete(c); + }).thenRunTryCatchAsyncBlocks(c -> { + stream.writeAsync(byteBuffers, operationContext, c.asHandler()); + }, Exception.class, (e, c) -> { + try { + close(); + throwTranslatedWriteException(e, operationContext); + } catch (Throwable translatedException) { + c.completeExceptionally(translatedException); + } + }).finish(errorHandlingCallback(callback, LOGGER)); + } + + @Override + public ResponseBuffers receiveMessage(final int responseTo, final OperationContext operationContext) { + assertNotNull(stream); + if (isClosed()) { + throw new MongoSocketClosedException("Cannot read from a closed stream", getServerAddress()); + } + + return receiveResponseBuffers(operationContext); + } + + @Override + public void receiveMessageAsync(final int responseTo, final OperationContext operationContext, + final SingleResultCallback callback) { + assertNotNull(stream); + + if (isClosed()) { + callback.onResult(null, new MongoSocketClosedException("Can not read from a closed socket", getServerAddress())); + return; + } + + readAsync(MESSAGE_HEADER_LENGTH, operationContext, new MessageHeaderCallback(operationContext, (result, t) -> { + if (t != null) { + close(); + callback.onResult(null, t); + } else { + callback.onResult(result, null); + } + })); + } + + private void readAsync(final int numBytes, final OperationContext operationContext, final SingleResultCallback callback) { + if (isClosed()) { + callback.onResult(null, new MongoSocketClosedException("Cannot read from a closed stream", getServerAddress())); + return; + } + + try { + stream.readAsync(numBytes, operationContext, new AsyncCompletionHandler() { + @Override + public void completed(@Nullable final ByteBuf buffer) { + callback.onResult(buffer, null); + } + + @Override + public void failed(final Throwable t) { + close(); + callback.onResult(null, translateReadException(t, operationContext)); + } + }); + } catch (Exception e) { + close(); + callback.onResult(null, translateReadException(e, operationContext)); + } + } + + private ConnectionId getId() { + return description.getConnectionId(); + } + + private ServerAddress getServerAddress() { + return description.getServerAddress(); + } + + private void updateSessionContext(final SessionContext sessionContext, final ResponseBuffers responseBuffers) { + sessionContext.advanceOperationTime(getOperationTime(responseBuffers)); + sessionContext.advanceClusterTime(getClusterTime(responseBuffers)); + sessionContext.setSnapshotTimestamp(getSnapshotTimestamp(responseBuffers)); + if (sessionContext.hasActiveTransaction()) { + BsonDocument recoveryToken = getRecoveryToken(responseBuffers); + if (recoveryToken != null) { + sessionContext.setRecoveryToken(recoveryToken); + } + } + } + + private void throwTranslatedWriteException(final Throwable e, final OperationContext operationContext) { + if (e instanceof MongoSocketWriteTimeoutException && operationContext.getTimeoutContext().hasTimeoutMS()) { + throw createMongoTimeoutException(e); + } + + if (e instanceof MongoException) { + throw (MongoException) e; + } + Optional interruptedException = translateInterruptedException(e, "Interrupted while sending message"); + if (interruptedException.isPresent()) { + throw interruptedException.get(); + } else if (e instanceof IOException) { + throw new MongoSocketWriteException("Exception sending message", getServerAddress(), e); + } else { + throw new MongoInternalException("Unexpected exception", e); + } + } + + private MongoException translateReadException(final Throwable e, final OperationContext operationContext) { + if (operationContext.getTimeoutContext().hasTimeoutMS()) { + if (e instanceof SocketTimeoutException) { + return createMongoTimeoutException(createReadTimeoutException((SocketTimeoutException) e)); + } else if (e instanceof MongoSocketReadTimeoutException) { + return createMongoTimeoutException((e)); + } + } + + if (e instanceof MongoException) { + return (MongoException) e; + } + Optional interruptedException = translateInterruptedException(e, "Interrupted while receiving message"); + if (interruptedException.isPresent()) { + return interruptedException.get(); + } else if (e instanceof SocketTimeoutException) { + return createReadTimeoutException((SocketTimeoutException) e); + } else if (e instanceof IOException) { + return new MongoSocketReadException("Exception receiving message", getServerAddress(), e); + } else if (e instanceof RuntimeException) { + return new MongoInternalException("Unexpected runtime exception", e); + } else { + return new MongoInternalException("Unexpected exception", e); + } + } + + private MongoSocketReadTimeoutException createReadTimeoutException(final SocketTimeoutException e) { + return new MongoSocketReadTimeoutException("Timeout while receiving message", + getServerAddress(), e); + } + + private ResponseBuffers receiveResponseBuffers(final OperationContext operationContext) { + try { + ByteBuf messageHeaderBuffer = stream.read(MESSAGE_HEADER_LENGTH, operationContext); + MessageHeader messageHeader; + try { + messageHeader = new MessageHeader(messageHeaderBuffer, description.getMaxMessageSize()); + } finally { + messageHeaderBuffer.release(); + } + + ByteBuf messageBuffer = stream.read(messageHeader.getMessageLength() - MESSAGE_HEADER_LENGTH, operationContext); + boolean releaseMessageBuffer = true; + try { + if (messageHeader.getOpCode() == OP_COMPRESSED.getValue()) { + CompressedHeader compressedHeader = new CompressedHeader(messageBuffer, messageHeader); + + Compressor compressor = getCompressor(compressedHeader); + + ByteBuf buffer = getBuffer(compressedHeader.getUncompressedSize()); + compressor.uncompress(messageBuffer, buffer); + + buffer.flip(); + return new ResponseBuffers(new ReplyHeader(buffer, compressedHeader), buffer); + } else { + ResponseBuffers responseBuffers = new ResponseBuffers(new ReplyHeader(messageBuffer, messageHeader), messageBuffer); + releaseMessageBuffer = false; + return responseBuffers; + } + } finally { + if (releaseMessageBuffer) { + messageBuffer.release(); + } + } + } catch (Throwable t) { + close(); + throw translateReadException(t, operationContext); + } + } + + private Compressor getCompressor(final CompressedHeader compressedHeader) { + Compressor compressor = compressorMap.get(compressedHeader.getCompressorId()); + if (compressor == null) { + throw new MongoClientException("Unsupported compressor with identifier " + compressedHeader.getCompressorId()); + } + return compressor; + } + + @Override + public ByteBuf getBuffer(final int size) { + notNull("open", stream); + return stream.getBuffer(size); + } + + private class MessageHeaderCallback implements SingleResultCallback { + private final OperationContext operationContext; + private final SingleResultCallback callback; + + MessageHeaderCallback(final OperationContext operationContext, final SingleResultCallback callback) { + this.operationContext = operationContext; + this.callback = callback; + } + + @Override + public void onResult(@Nullable final ByteBuf result, @Nullable final Throwable t) { + if (t != null) { + callback.onResult(null, t); + return; + } + try { + assertNotNull(result); + MessageHeader messageHeader = new MessageHeader(result, description.getMaxMessageSize()); + readAsync(messageHeader.getMessageLength() - MESSAGE_HEADER_LENGTH, operationContext, + new MessageCallback(messageHeader)); + } catch (Throwable localThrowable) { + callback.onResult(null, localThrowable); + } finally { + if (result != null) { + result.release(); + } + } + } + + private class MessageCallback implements SingleResultCallback { + private final MessageHeader messageHeader; + + MessageCallback(final MessageHeader messageHeader) { + this.messageHeader = messageHeader; + } + + @Override + public void onResult(@Nullable final ByteBuf result, @Nullable final Throwable t) { + if (t != null) { + callback.onResult(null, t); + return; + } + boolean releaseResult = true; + assertNotNull(result); + try { + ReplyHeader replyHeader; + ByteBuf responseBuffer; + if (messageHeader.getOpCode() == OP_COMPRESSED.getValue()) { + try { + CompressedHeader compressedHeader = new CompressedHeader(result, messageHeader); + Compressor compressor = getCompressor(compressedHeader); + ByteBuf buffer = getBuffer(compressedHeader.getUncompressedSize()); + compressor.uncompress(result, buffer); + + buffer.flip(); + replyHeader = new ReplyHeader(buffer, compressedHeader); + responseBuffer = buffer; + } finally { + releaseResult = false; + result.release(); + } + } else { + replyHeader = new ReplyHeader(result, messageHeader); + responseBuffer = result; + releaseResult = false; + } + callback.onResult(new ResponseBuffers(replyHeader, responseBuffer), null); + } catch (Throwable localThrowable) { + callback.onResult(null, localThrowable); + } finally { + if (releaseResult) { + result.release(); + } + } + } + } + } + + private static final StructuredLogger COMMAND_PROTOCOL_LOGGER = new StructuredLogger("protocol.command"); + + private CommandEventSender createCommandEventSender(final CommandMessage message, final ByteBufferBsonOutput bsonOutput, + final OperationContext operationContext) { + boolean listensOrLogs = commandListener != null || COMMAND_PROTOCOL_LOGGER.isRequired(DEBUG, getClusterId()); + if (!recordEverything && (isMonitoringConnection || !opened() || !authenticated.get() || !listensOrLogs)) { + return new NoOpCommandEventSender(); + } + return new LoggingCommandEventSender( + SECURITY_SENSITIVE_COMMANDS, SECURITY_SENSITIVE_HELLO_COMMANDS, description, commandListener, + operationContext, message, bsonOutput, + COMMAND_PROTOCOL_LOGGER, loggerSettings); + } + + private ClusterId getClusterId() { + return description.getConnectionId().getServerId().getClusterId(); + } +} diff --git a/driver-core/src/main/com/mongodb/internal/connection/InternalStreamConnectionFactory.java b/driver-core/src/main/com/mongodb/internal/connection/InternalStreamConnectionFactory.java new file mode 100644 index 00000000000..252d62c35f8 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/connection/InternalStreamConnectionFactory.java @@ -0,0 +1,107 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection; + +import com.mongodb.AuthenticationMechanism; +import com.mongodb.LoggerSettings; +import com.mongodb.MongoCompressor; +import com.mongodb.ServerApi; +import com.mongodb.connection.ClusterConnectionMode; +import com.mongodb.connection.ServerId; +import com.mongodb.event.CommandListener; +import com.mongodb.lang.Nullable; + +import java.util.List; + +import static com.mongodb.assertions.Assertions.notNull; + +class InternalStreamConnectionFactory implements InternalConnectionFactory { + private final ClusterConnectionMode clusterConnectionMode; + private final boolean isMonitoringConnection; + private final StreamFactory streamFactory; + private final ClientMetadata clientMetadata; + private final List compressorList; + private final LoggerSettings loggerSettings; + private final CommandListener commandListener; + @Nullable + private final ServerApi serverApi; + private final MongoCredentialWithCache credential; + + InternalStreamConnectionFactory(final ClusterConnectionMode clusterConnectionMode, + final StreamFactory streamFactory, + @Nullable final MongoCredentialWithCache credential, + final ClientMetadata clientMetadata, + final List compressorList, + final LoggerSettings loggerSettings, @Nullable final CommandListener commandListener, + @Nullable final ServerApi serverApi) { + this(clusterConnectionMode, false, streamFactory, credential, clientMetadata, compressorList, + loggerSettings, commandListener, serverApi); + } + + InternalStreamConnectionFactory(final ClusterConnectionMode clusterConnectionMode, final boolean isMonitoringConnection, + final StreamFactory streamFactory, + @Nullable final MongoCredentialWithCache credential, + final ClientMetadata clientMetadata, + final List compressorList, + final LoggerSettings loggerSettings, @Nullable final CommandListener commandListener, @Nullable final ServerApi serverApi) { + this.clusterConnectionMode = clusterConnectionMode; + this.isMonitoringConnection = isMonitoringConnection; + this.streamFactory = notNull("streamFactory", streamFactory); + this.compressorList = notNull("compressorList", compressorList); + this.loggerSettings = loggerSettings; + this.commandListener = commandListener; + this.serverApi = serverApi; + this.clientMetadata = clientMetadata; + this.credential = credential; + } + + @Override + public InternalConnection create(final ServerId serverId, final ConnectionGenerationSupplier connectionGenerationSupplier) { + Authenticator authenticator = credential == null ? null : createAuthenticator(credential); + InternalStreamConnectionInitializer connectionInitializer = new InternalStreamConnectionInitializer( + clusterConnectionMode, authenticator, clientMetadata.getBsonDocument(), compressorList, serverApi); + return new InternalStreamConnection( + clusterConnectionMode, authenticator, + isMonitoringConnection, serverId, connectionGenerationSupplier, + streamFactory, compressorList, loggerSettings, commandListener, + connectionInitializer); + } + + private Authenticator createAuthenticator(final MongoCredentialWithCache credential) { + AuthenticationMechanism authenticationMechanism = credential.getAuthenticationMechanism(); + if (authenticationMechanism == null) { + return new DefaultAuthenticator(credential, clusterConnectionMode, serverApi); + } + switch (authenticationMechanism) { + case GSSAPI: + return new GSSAPIAuthenticator(credential, clusterConnectionMode, serverApi); + case PLAIN: + return new PlainAuthenticator(credential, clusterConnectionMode, serverApi); + case MONGODB_X509: + return new X509Authenticator(credential, clusterConnectionMode, serverApi); + case SCRAM_SHA_1: + case SCRAM_SHA_256: + return new ScramShaAuthenticator(credential, clusterConnectionMode, serverApi); + case MONGODB_AWS: + return new AwsAuthenticator(credential, clusterConnectionMode, serverApi); + case MONGODB_OIDC: + return new OidcAuthenticator(credential, clusterConnectionMode, serverApi); + default: + throw new IllegalArgumentException("Unsupported authentication mechanism " + authenticationMechanism); + } + } +} diff --git a/driver-core/src/main/com/mongodb/internal/connection/InternalStreamConnectionInitializer.java b/driver-core/src/main/com/mongodb/internal/connection/InternalStreamConnectionInitializer.java new file mode 100644 index 00000000000..79c21f33356 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/connection/InternalStreamConnectionInitializer.java @@ -0,0 +1,233 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection; + +import com.mongodb.MongoCompressor; +import com.mongodb.MongoCredential; +import com.mongodb.MongoException; +import com.mongodb.MongoSecurityException; +import com.mongodb.ServerApi; +import com.mongodb.connection.ClusterConnectionMode; +import com.mongodb.connection.ConnectionDescription; +import com.mongodb.connection.ConnectionId; +import com.mongodb.connection.ServerDescription; +import com.mongodb.internal.async.SingleResultCallback; +import com.mongodb.lang.Nullable; +import org.bson.BsonArray; +import org.bson.BsonBoolean; +import org.bson.BsonDocument; +import org.bson.BsonInt32; +import org.bson.BsonString; + +import java.util.List; + +import static com.mongodb.assertions.Assertions.notNull; +import static com.mongodb.internal.connection.CommandHelper.HELLO; +import static com.mongodb.internal.connection.CommandHelper.LEGACY_HELLO; +import static com.mongodb.internal.connection.CommandHelper.executeCommand; +import static com.mongodb.internal.connection.CommandHelper.executeCommandAsync; +import static com.mongodb.internal.connection.DefaultAuthenticator.USER_NOT_FOUND_CODE; +import static com.mongodb.internal.connection.DescriptionHelper.createConnectionDescription; +import static com.mongodb.internal.connection.DescriptionHelper.createServerDescription; +import static java.lang.String.format; + +/** + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public class InternalStreamConnectionInitializer implements InternalConnectionInitializer { + private static final int INITIAL_MIN_RTT = 0; + private final ClusterConnectionMode clusterConnectionMode; + private final Authenticator authenticator; + private final BsonDocument clientMetadataDocument; + private final List requestedCompressors; + private final boolean checkSaslSupportedMechs; + private final ServerApi serverApi; + + public InternalStreamConnectionInitializer(final ClusterConnectionMode clusterConnectionMode, + @Nullable final Authenticator authenticator, + @Nullable final BsonDocument clientMetadataDocument, + final List requestedCompressors, + @Nullable final ServerApi serverApi) { + this.clusterConnectionMode = clusterConnectionMode; + this.authenticator = authenticator; + this.clientMetadataDocument = clientMetadataDocument; + this.requestedCompressors = notNull("requestedCompressors", requestedCompressors); + this.checkSaslSupportedMechs = authenticator instanceof DefaultAuthenticator; + this.serverApi = serverApi; + } + + @Override + public InternalConnectionInitializationDescription startHandshake(final InternalConnection internalConnection, + final OperationContext operationContext) { + notNull("internalConnection", internalConnection); + + return initializeConnectionDescription(internalConnection, operationContext); + } + + public InternalConnectionInitializationDescription finishHandshake(final InternalConnection internalConnection, + final InternalConnectionInitializationDescription description, + final OperationContext operationContext) { + notNull("internalConnection", internalConnection); + notNull("description", description); + final ConnectionDescription connectionDescription = description.getConnectionDescription(); + if (Authenticator.shouldAuthenticate(authenticator, connectionDescription)) { + authenticator.authenticate(internalConnection, connectionDescription, operationContext); + } + + return description; + } + + @Override + public void startHandshakeAsync(final InternalConnection internalConnection, final OperationContext operationContext, + final SingleResultCallback callback) { + long startTime = System.nanoTime(); + executeCommandAsync("admin", createHelloCommand(authenticator, internalConnection), clusterConnectionMode, serverApi, + internalConnection, operationContext, (helloResult, t) -> { + if (t != null) { + callback.onResult(null, t instanceof MongoException ? mapHelloException((MongoException) t) : t); + } else { + setSpeculativeAuthenticateResponse(helloResult); + InternalConnectionInitializationDescription initializationDescription; + try { + initializationDescription = createInitializationDescription(helloResult, internalConnection, startTime); + } catch (Throwable localThrowable) { + callback.onResult(null, localThrowable); + return; + } + callback.onResult(initializationDescription, null); + } + }); + } + + @Override + public void finishHandshakeAsync(final InternalConnection internalConnection, + final InternalConnectionInitializationDescription description, + final OperationContext operationContext, + final SingleResultCallback callback) { + ConnectionDescription connectionDescription = description.getConnectionDescription(); + + if (!Authenticator.shouldAuthenticate(authenticator, connectionDescription)) { + callback.onResult(description, null); + } else { + authenticator.authenticateAsync(internalConnection, connectionDescription, operationContext, + (result1, t1) -> { + if (t1 != null) { + callback.onResult(null, t1); + } else { + callback.onResult(description, null); + } + }); + } + } + + private InternalConnectionInitializationDescription initializeConnectionDescription(final InternalConnection internalConnection, + final OperationContext operationContext) { + BsonDocument helloResult; + BsonDocument helloCommandDocument = createHelloCommand(authenticator, internalConnection); + + long start = System.nanoTime(); + try { + helloResult = executeCommand("admin", helloCommandDocument, clusterConnectionMode, serverApi, internalConnection, operationContext); + } catch (MongoException e) { + throw mapHelloException(e); + } finally { + operationContext.getTimeoutContext().resetMaintenanceTimeout(); + } + setSpeculativeAuthenticateResponse(helloResult); + return createInitializationDescription(helloResult, internalConnection, start); + } + + private MongoException mapHelloException(final MongoException e) { + if (checkSaslSupportedMechs && e.getCode() == USER_NOT_FOUND_CODE) { + MongoCredential credential = authenticator.getMongoCredential(); + return new MongoSecurityException(credential, format("Exception authenticating %s", credential), e); + } else { + return e; + } + } + + private InternalConnectionInitializationDescription createInitializationDescription(final BsonDocument helloResult, + final InternalConnection internalConnection, + final long startTime) { + ConnectionId connectionId = internalConnection.getDescription().getConnectionId(); + ConnectionDescription connectionDescription = createConnectionDescription(clusterConnectionMode, connectionId, + helloResult); + ServerDescription serverDescription = + createServerDescription(internalConnection.getDescription().getServerAddress(), helloResult, + System.nanoTime() - startTime, INITIAL_MIN_RTT); + return new InternalConnectionInitializationDescription(connectionDescription, serverDescription); + } + + private BsonDocument createHelloCommand(final Authenticator authenticator, final InternalConnection connection) { + BsonDocument helloCommandDocument = new BsonDocument(getHandshakeCommandName(), new BsonInt32(1)) + .append("helloOk", BsonBoolean.TRUE); + if (clientMetadataDocument != null) { + helloCommandDocument.append("client", clientMetadataDocument); + } + if (clusterConnectionMode == ClusterConnectionMode.LOAD_BALANCED) { + helloCommandDocument.append("loadBalanced", BsonBoolean.TRUE); + } + if (!requestedCompressors.isEmpty()) { + BsonArray compressors = new BsonArray(this.requestedCompressors.size()); + for (MongoCompressor cur : this.requestedCompressors) { + compressors.add(new BsonString(cur.getName())); + } + helloCommandDocument.append("compression", compressors); + } + if (checkSaslSupportedMechs) { + MongoCredential credential = authenticator.getMongoCredential(); + helloCommandDocument.append("saslSupportedMechs", + new BsonString(credential.getSource() + "." + credential.getUserName())); + } + if (authenticator instanceof SpeculativeAuthenticator) { + BsonDocument speculativeAuthenticateDocument = + ((SpeculativeAuthenticator) authenticator).createSpeculativeAuthenticateCommand(connection); + if (speculativeAuthenticateDocument != null) { + helloCommandDocument.append("speculativeAuthenticate", speculativeAuthenticateDocument); + } + } + return helloCommandDocument; + } + + private void setSpeculativeAuthenticateResponse(final BsonDocument helloResult) { + if (authenticator instanceof SpeculativeAuthenticator) { + ((SpeculativeAuthenticator) authenticator).setSpeculativeAuthenticateResponse( + helloResult.getDocument("speculativeAuthenticate", null)); + } + } + + private InternalConnectionInitializationDescription applyGetLastErrorResult( + final BsonDocument getLastErrorResult, + final InternalConnectionInitializationDescription description) { + + ConnectionDescription connectionDescription = description.getConnectionDescription(); + ConnectionId connectionId; + + if (getLastErrorResult.containsKey("connectionId")) { + connectionId = connectionDescription.getConnectionId() + .withServerValue(getLastErrorResult.getNumber("connectionId").longValue()); + } else { + connectionId = connectionDescription.getConnectionId(); + } + + return description.withConnectionDescription(connectionDescription.withConnectionId(connectionId)); + } + + private String getHandshakeCommandName() { + return serverApi == null ? LEGACY_HELLO : HELLO; + } +} diff --git a/driver-core/src/main/com/mongodb/internal/connection/LevelCountingBsonWriter.java b/driver-core/src/main/com/mongodb/internal/connection/LevelCountingBsonWriter.java new file mode 100644 index 00000000000..3e9d0324bd7 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/connection/LevelCountingBsonWriter.java @@ -0,0 +1,79 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection; + +import org.bson.BsonWriter; + + +abstract class LevelCountingBsonWriter extends BsonWriterDecorator { + static final int DEFAULT_INITIAL_LEVEL = -1; + + private int level; + + LevelCountingBsonWriter(final BsonWriter bsonWriter) { + this(bsonWriter, DEFAULT_INITIAL_LEVEL); + } + + /** + * @param initialLevel This parameter allows initializing the {@linkplain #getCurrentLevel() current level} + * with a value different from {@link #DEFAULT_INITIAL_LEVEL}. + */ + LevelCountingBsonWriter(final BsonWriter bsonWriter, final int initialLevel) { + super(bsonWriter); + level = initialLevel; + } + + int getCurrentLevel() { + return level; + } + + @Override + public void writeStartDocument(final String name) { + level++; + super.writeStartDocument(name); + } + + @Override + public void writeStartDocument() { + level++; + super.writeStartDocument(); + } + + @Override + public void writeEndDocument() { + level--; + super.writeEndDocument(); + } + + @Override + public void writeStartArray() { + level++; + super.writeStartArray(); + } + + @Override + public void writeStartArray(final String name) { + level++; + super.writeStartArray(name); + } + + @Override + public void writeEndArray() { + level--; + super.writeEndArray(); + } +} diff --git a/driver-core/src/main/com/mongodb/internal/connection/LoadBalancedCluster.java b/driver-core/src/main/com/mongodb/internal/connection/LoadBalancedCluster.java new file mode 100644 index 00000000000..2401a9e014a --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/connection/LoadBalancedCluster.java @@ -0,0 +1,461 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection; + +import com.mongodb.MongoClientException; +import com.mongodb.MongoException; +import com.mongodb.MongoInterruptedException; +import com.mongodb.MongoOperationTimeoutException; +import com.mongodb.MongoTimeoutException; +import com.mongodb.ServerAddress; +import com.mongodb.annotations.ThreadSafe; +import com.mongodb.connection.ClusterConnectionMode; +import com.mongodb.connection.ClusterDescription; +import com.mongodb.connection.ClusterId; +import com.mongodb.connection.ClusterSettings; +import com.mongodb.connection.ClusterType; +import com.mongodb.connection.ServerConnectionState; +import com.mongodb.connection.ServerDescription; +import com.mongodb.connection.ServerType; +import com.mongodb.event.ClusterClosedEvent; +import com.mongodb.event.ClusterDescriptionChangedEvent; +import com.mongodb.event.ClusterListener; +import com.mongodb.event.ClusterOpeningEvent; +import com.mongodb.event.ServerDescriptionChangedEvent; +import com.mongodb.internal.Locks; +import com.mongodb.internal.TimeoutContext; +import com.mongodb.internal.async.SingleResultCallback; +import com.mongodb.internal.diagnostics.logging.Logger; +import com.mongodb.internal.diagnostics.logging.Loggers; +import com.mongodb.internal.time.Timeout; +import com.mongodb.lang.Nullable; +import com.mongodb.selector.ServerSelector; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.Iterator; +import java.util.LinkedList; +import java.util.List; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.locks.Condition; +import java.util.concurrent.locks.Lock; +import java.util.concurrent.locks.ReentrantLock; + +import static com.mongodb.assertions.Assertions.assertNotNull; +import static com.mongodb.assertions.Assertions.assertTrue; +import static com.mongodb.assertions.Assertions.fail; +import static com.mongodb.assertions.Assertions.isTrue; +import static com.mongodb.assertions.Assertions.notNull; +import static com.mongodb.connection.ServerConnectionState.CONNECTING; +import static com.mongodb.internal.connection.BaseCluster.logServerSelectionStarted; +import static com.mongodb.internal.connection.BaseCluster.logServerSelectionSucceeded; +import static com.mongodb.internal.connection.BaseCluster.logTopologyMonitoringStopping; +import static com.mongodb.internal.event.EventListenerHelper.singleClusterListener; +import static java.lang.String.format; +import static java.util.Collections.emptyList; +import static java.util.Collections.singletonList; +import static java.util.concurrent.TimeUnit.NANOSECONDS; + +@ThreadSafe +final class LoadBalancedCluster implements Cluster { + private static final Logger LOGGER = Loggers.getLogger("cluster"); + + private final ClusterId clusterId; + private final ClusterSettings settings; + private final ClusterClock clusterClock = new ClusterClock(); + private final ClientMetadata clientMetadata; + private final ClusterListener clusterListener; + private ClusterDescription description; + @Nullable + private ClusterableServer server; + private final AtomicBoolean closed = new AtomicBoolean(); + private final DnsSrvRecordMonitor dnsSrvRecordMonitor; + private volatile MongoException srvResolutionException; + private boolean srvRecordResolvedToMultipleHosts; + private volatile boolean initializationCompleted; + private List waitQueue = new LinkedList<>(); + private Thread waitQueueHandler; + private final Lock lock = new ReentrantLock(true); + private final Condition condition = lock.newCondition(); + + LoadBalancedCluster(final ClusterId clusterId, final ClusterSettings settings, final ClusterableServerFactory serverFactory, + final ClientMetadata clientMetadata, + final DnsSrvRecordMonitorFactory dnsSrvRecordMonitorFactory) { + assertTrue(settings.getMode() == ClusterConnectionMode.LOAD_BALANCED); + LOGGER.info(format("Cluster created with id %s and settings %s", clusterId, settings.getShortDescription())); + + this.clusterId = clusterId; + this.settings = settings; + this.clusterListener = singleClusterListener(settings); + this.description = new ClusterDescription(settings.getMode(), ClusterType.UNKNOWN, emptyList(), settings, + serverFactory.getSettings()); + this.clientMetadata = clientMetadata; + + if (settings.getSrvHost() == null) { + dnsSrvRecordMonitor = null; + init(clusterId, serverFactory, settings.getHosts().get(0)); + initializationCompleted = true; + } else { + notNull("dnsSrvRecordMonitorFactory", dnsSrvRecordMonitorFactory); + dnsSrvRecordMonitor = dnsSrvRecordMonitorFactory.create(assertNotNull(settings.getSrvHost()), settings.getSrvServiceName(), + new DnsSrvRecordInitializer() { + + @Override + public void initialize(final Collection hosts) { + LOGGER.info("SRV resolution completed with hosts: " + hosts); + + List localWaitQueue; + lock.lock(); + try { + if (isClosed()) { + return; + } + srvResolutionException = null; + if (hosts.size() != 1) { + srvRecordResolvedToMultipleHosts = true; + } else { + init(clusterId, serverFactory, hosts.iterator().next()); + } + initializationCompleted = true; + localWaitQueue = waitQueue; + waitQueue = emptyList(); + condition.signalAll(); + } finally { + lock.unlock(); + } + localWaitQueue.forEach(request -> handleServerSelectionRequest(request)); + } + + @Override + public void initialize(final MongoException initializationException) { + srvResolutionException = initializationException; + } + + @Override + public ClusterType getClusterType() { + return initializationCompleted ? ClusterType.LOAD_BALANCED : ClusterType.UNKNOWN; + } + }); + dnsSrvRecordMonitor.start(); + } + } + + private void init(final ClusterId clusterId, final ClusterableServerFactory serverFactory, final ServerAddress host) { + clusterListener.clusterOpening(new ClusterOpeningEvent(clusterId)); + + ClusterDescription initialDescription = new ClusterDescription(settings.getMode(), ClusterType.LOAD_BALANCED, + singletonList(ServerDescription.builder().address(settings.getHosts().get(0)).state(CONNECTING).build()), + settings, serverFactory.getSettings()); + clusterListener.clusterDescriptionChanged(new ClusterDescriptionChangedEvent(clusterId, initialDescription, description)); + + description = new ClusterDescription(ClusterConnectionMode.LOAD_BALANCED, ClusterType.LOAD_BALANCED, + singletonList(ServerDescription.builder() + .ok(true) + .state(ServerConnectionState.CONNECTED) + .type(ServerType.LOAD_BALANCER) + .address(host) + .build()), + settings, serverFactory.getSettings()); + server = serverFactory.create(this, host); + + clusterListener.clusterDescriptionChanged(new ClusterDescriptionChangedEvent(clusterId, description, initialDescription)); + } + + @Override + public ClusterSettings getSettings() { + isTrue("open", !isClosed()); + return settings; + } + + @Override + public ClusterId getClusterId() { + return clusterId; + } + + @Override + public ServersSnapshot getServersSnapshot( + final Timeout serverSelectionTimeout, + final TimeoutContext timeoutContext) { + isTrue("open", !isClosed()); + waitForSrv(serverSelectionTimeout, timeoutContext); + ClusterableServer server = assertNotNull(this.server); + return serverAddress -> server; + } + + @Override + public ClusterDescription getCurrentDescription() { + isTrue("open", !isClosed()); + return description; + } + + @Override + public ClusterClock getClock() { + isTrue("open", !isClosed()); + return clusterClock; + } + + @Override + public ClientMetadata getClientMetadata() { + return clientMetadata; + } + + @Override + public ServerTuple selectServer(final ServerSelector serverSelector, final OperationContext operationContext) { + isTrue("open", !isClosed()); + Timeout computedServerSelectionTimeout = operationContext.getTimeoutContext().computeServerSelectionTimeout(); + waitForSrv(computedServerSelectionTimeout, operationContext.getTimeoutContext()); + if (srvRecordResolvedToMultipleHosts) { + throw createResolvedToMultipleHostsException(); + } + ClusterDescription curDescription = description; + logServerSelectionStarted(operationContext, clusterId, serverSelector, curDescription); + ServerTuple serverTuple = new ServerTuple(assertNotNull(server), curDescription.getServerDescriptions().get(0)); + logServerSelectionSucceeded(operationContext, clusterId, serverTuple.getServerDescription().getAddress(), + serverSelector, curDescription); + return serverTuple; + } + + private void waitForSrv(final Timeout serverSelectionTimeout, final TimeoutContext timeoutContext) { + if (initializationCompleted) { + return; + } + Locks.withLock(lock, () -> { + while (!initializationCompleted) { + if (isClosed()) { + throw createShutdownException(); + } + serverSelectionTimeout.onExpired(() -> { + throw createTimeoutException(timeoutContext); + }); + serverSelectionTimeout.awaitOn(condition, () -> format("resolving SRV records for %s", settings.getSrvHost())); + } + }); + } + + @Override + public void selectServerAsync(final ServerSelector serverSelector, final OperationContext operationContext, + final SingleResultCallback callback) { + if (isClosed()) { + callback.onResult(null, createShutdownException()); + return; + } + Timeout computedServerSelectionTimeout = operationContext.getTimeoutContext().computeServerSelectionTimeout(); + ServerSelectionRequest serverSelectionRequest = new ServerSelectionRequest(serverSelector, operationContext, + computedServerSelectionTimeout, callback); + if (initializationCompleted) { + handleServerSelectionRequest(serverSelectionRequest); + } else { + notifyWaitQueueHandler(serverSelectionRequest); + } + } + + private MongoClientException createShutdownException() { + return new MongoClientException("Shutdown in progress"); + } + + @Override + public void close() { + if (!closed.getAndSet(true)) { + LOGGER.info(format("Cluster closed with id %s", clusterId)); + if (dnsSrvRecordMonitor != null) { + dnsSrvRecordMonitor.close(); + } + ClusterableServer localServer = Locks.withLock(lock, () -> { + condition.signalAll(); + return server; + }); + if (localServer != null) { + localServer.close(); + } + logTopologyMonitoringStopping(clusterId); + ClusterClosedEvent clusterClosedEvent = new ClusterClosedEvent(clusterId); + clusterListener.clusterClosed(clusterClosedEvent); + } + } + + @Override + public boolean isClosed() { + return closed.get(); + } + + @Override + public void withLock(final Runnable action) { + fail(); + } + + @Override + public void onChange(final ServerDescriptionChangedEvent event) { + fail(); + } + + private void handleServerSelectionRequest(final ServerSelectionRequest serverSelectionRequest) { + assertTrue(initializationCompleted); + if (srvRecordResolvedToMultipleHosts) { + serverSelectionRequest.onError(createResolvedToMultipleHostsException()); + } else { + ClusterDescription curDescription = description; + logServerSelectionStarted( + serverSelectionRequest.operationContext, clusterId, serverSelectionRequest.serverSelector, curDescription); + ServerTuple serverTuple = new ServerTuple(assertNotNull(server), curDescription.getServerDescriptions().get(0)); + logServerSelectionSucceeded(serverSelectionRequest.operationContext, clusterId, + serverTuple.getServerDescription().getAddress(), serverSelectionRequest.serverSelector, curDescription); + serverSelectionRequest.onSuccess(serverTuple); + } + } + + private MongoClientException createResolvedToMultipleHostsException() { + return new MongoClientException("In load balancing mode, the host must resolve to a single SRV record, but instead it resolved " + + "to multiple hosts"); + } + + private MongoTimeoutException createTimeoutException(final TimeoutContext timeoutContext) { + MongoException localSrvResolutionException = srvResolutionException; + String message; + if (localSrvResolutionException == null) { + message = format("Timed out while waiting to resolve SRV records for %s.", settings.getSrvHost()); + } else { + message = format("Timed out while waiting to resolve SRV records for %s. " + + "Resolution exception was '%s'", settings.getSrvHost(), localSrvResolutionException); + } + return createTimeoutException(timeoutContext, message); + } + + private static MongoTimeoutException createTimeoutException(final TimeoutContext timeoutContext, final String message) { + return timeoutContext.hasTimeoutMS() ? new MongoOperationTimeoutException(message) : new MongoTimeoutException(message); + } + + private void notifyWaitQueueHandler(final ServerSelectionRequest request) { + Locks.withLock(lock, () -> { + if (isClosed()) { + request.onError(createShutdownException()); + return; + } + if (initializationCompleted) { + handleServerSelectionRequest(request); + return; + } + + waitQueue.add(request); + + if (waitQueueHandler == null) { + waitQueueHandler = new Thread(new WaitQueueHandler(), "cluster-" + clusterId.getValue()); + waitQueueHandler.setDaemon(true); + waitQueueHandler.start(); + } else { + condition.signalAll(); + } + }); + } + + private final class WaitQueueHandler implements Runnable { + public void run() { + try { + List timeoutList = new ArrayList<>(); + while (!(isClosed() || initializationCompleted)) { + lock.lock(); + try { + if (isClosed() || initializationCompleted) { + break; + } + Timeout waitTimeNanos = Timeout.infinite(); + + for (Iterator iterator = waitQueue.iterator(); iterator.hasNext();) { + ServerSelectionRequest next = iterator.next(); + + Timeout nextTimeout = next.getTimeout(); + Timeout waitTimeNanosFinal = waitTimeNanos; + waitTimeNanos = nextTimeout.call(NANOSECONDS, + () -> Timeout.earliest(waitTimeNanosFinal, nextTimeout), + (ns) -> Timeout.earliest(waitTimeNanosFinal, nextTimeout), + () -> { + timeoutList.add(next); + iterator.remove(); + return waitTimeNanosFinal; + }); + } + if (timeoutList.isEmpty()) { + try { + waitTimeNanos.awaitOn(condition, () -> "ignored"); + } catch (MongoInterruptedException unexpected) { + fail(); + } + } + } finally { + lock.unlock(); + } + timeoutList.forEach(request -> request.onError(createTimeoutException(request + .getOperationContext() + .getTimeoutContext()))); + timeoutList.clear(); + } + + // This code is executed either after closing the LoadBalancedCluster or after initializing it. In the latter case, + // waitQueue is guaranteed to be empty (as DnsSrvRecordInitializer.initialize clears it and no thread adds new elements to + // it after that). So shutdownList is not empty iff LoadBalancedCluster is closed, in which case we need to complete the + // requests in it. + List shutdownList = Locks.withLock(lock, () -> { + ArrayList result = new ArrayList<>(waitQueue); + waitQueue.clear(); + return result; + }); + shutdownList.forEach(request -> request.onError(createShutdownException())); + } catch (Throwable t) { + LOGGER.error(this + " stopped working. You may want to recreate the MongoClient", t); + throw t; + } + } + } + + private static final class ServerSelectionRequest { + private final ServerSelector serverSelector; + private final SingleResultCallback callback; + private final Timeout timeout; + private final OperationContext operationContext; + + private ServerSelectionRequest(final ServerSelector serverSelector, final OperationContext operationContext, + final Timeout timeout, final SingleResultCallback callback) { + this.serverSelector = serverSelector; + this.timeout = timeout; + this.operationContext = operationContext; + this.callback = callback; + } + + Timeout getTimeout() { + return timeout; + } + + OperationContext getOperationContext() { + return operationContext; + } + + public void onSuccess(final ServerTuple serverTuple) { + try { + callback.onResult(serverTuple, null); + } catch (Exception e) { + LOGGER.warn("Unanticipated exception thrown from callback", e); + } + } + + public void onError(final Throwable exception) { + try { + callback.onResult(null, exception); + } catch (Exception e) { + LOGGER.warn("Unanticipated exception thrown from callback", e); + } + } + } +} diff --git a/driver-core/src/main/com/mongodb/internal/connection/LoadBalancedClusterableServerFactory.java b/driver-core/src/main/com/mongodb/internal/connection/LoadBalancedClusterableServerFactory.java new file mode 100644 index 00000000000..296240cf39f --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/connection/LoadBalancedClusterableServerFactory.java @@ -0,0 +1,89 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection; + +import com.mongodb.LoggerSettings; +import com.mongodb.MongoCompressor; +import com.mongodb.MongoCredential; +import com.mongodb.ServerAddress; +import com.mongodb.ServerApi; +import com.mongodb.annotations.ThreadSafe; +import com.mongodb.connection.ClusterConnectionMode; +import com.mongodb.connection.ConnectionPoolSettings; +import com.mongodb.connection.ServerId; +import com.mongodb.connection.ServerSettings; +import com.mongodb.event.CommandListener; +import com.mongodb.internal.inject.EmptyProvider; +import com.mongodb.lang.Nullable; + +import java.util.List; + +import static com.mongodb.internal.event.EventListenerHelper.singleServerListener; + +/** + *

This class is not part of the public API and may be removed or changed at any time

+ */ +@ThreadSafe +public class LoadBalancedClusterableServerFactory implements ClusterableServerFactory { + private final ServerSettings serverSettings; + private final ConnectionPoolSettings connectionPoolSettings; + private final InternalConnectionPoolSettings internalConnectionPoolSettings; + private final StreamFactory streamFactory; + private final MongoCredentialWithCache credential; + private final LoggerSettings loggerSettings; + private final CommandListener commandListener; + private final List compressorList; + private final ServerApi serverApi; + private final InternalOperationContextFactory operationContextFactory; + + public LoadBalancedClusterableServerFactory(final ServerSettings serverSettings, + final ConnectionPoolSettings connectionPoolSettings, + final InternalConnectionPoolSettings internalConnectionPoolSettings, + final StreamFactory streamFactory, @Nullable final MongoCredential credential, + final LoggerSettings loggerSettings, + @Nullable final CommandListener commandListener, + final List compressorList, @Nullable final ServerApi serverApi, + final InternalOperationContextFactory operationContextFactory) { + this.serverSettings = serverSettings; + this.connectionPoolSettings = connectionPoolSettings; + this.internalConnectionPoolSettings = internalConnectionPoolSettings; + this.streamFactory = streamFactory; + this.credential = credential == null ? null : new MongoCredentialWithCache(credential); + this.loggerSettings = loggerSettings; + this.commandListener = commandListener; + this.compressorList = compressorList; + this.serverApi = serverApi; + this.operationContextFactory = operationContextFactory; + } + + @Override + public ClusterableServer create(final Cluster cluster, final ServerAddress serverAddress) { + ConnectionPool connectionPool = new DefaultConnectionPool(new ServerId(cluster.getClusterId(), serverAddress), + new InternalStreamConnectionFactory(ClusterConnectionMode.LOAD_BALANCED, streamFactory, credential, cluster.getClientMetadata(), + compressorList, loggerSettings, commandListener, serverApi), + connectionPoolSettings, internalConnectionPoolSettings, EmptyProvider.instance(), operationContextFactory); + connectionPool.ready(); + + return new LoadBalancedServer(new ServerId(cluster.getClusterId(), serverAddress), connectionPool, new DefaultConnectionFactory(), + singleServerListener(serverSettings), cluster.getClock()); + } + + @Override + public ServerSettings getSettings() { + return serverSettings; + } +} diff --git a/driver-core/src/main/com/mongodb/internal/connection/LoadBalancedServer.java b/driver-core/src/main/com/mongodb/internal/connection/LoadBalancedServer.java new file mode 100644 index 00000000000..eda27db521c --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/connection/LoadBalancedServer.java @@ -0,0 +1,199 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection; + +import com.mongodb.MongoCommandException; +import com.mongodb.MongoException; +import com.mongodb.MongoNodeIsRecoveringException; +import com.mongodb.MongoNotPrimaryException; +import com.mongodb.MongoSocketException; +import com.mongodb.MongoSocketReadTimeoutException; +import com.mongodb.annotations.ThreadSafe; +import com.mongodb.connection.ClusterConnectionMode; +import com.mongodb.connection.ServerConnectionState; +import com.mongodb.connection.ServerDescription; +import com.mongodb.connection.ServerId; +import com.mongodb.connection.ServerType; +import com.mongodb.event.ServerClosedEvent; +import com.mongodb.event.ServerDescriptionChangedEvent; +import com.mongodb.event.ServerListener; +import com.mongodb.event.ServerOpeningEvent; +import com.mongodb.internal.VisibleForTesting; +import com.mongodb.internal.async.SingleResultCallback; +import com.mongodb.internal.diagnostics.logging.Logger; +import com.mongodb.internal.diagnostics.logging.Loggers; +import com.mongodb.internal.session.SessionContext; +import com.mongodb.lang.Nullable; +import org.bson.types.ObjectId; + +import java.util.concurrent.atomic.AtomicBoolean; + +import static com.mongodb.assertions.Assertions.isTrue; +import static com.mongodb.connection.ServerConnectionState.CONNECTING; +import static com.mongodb.internal.VisibleForTesting.AccessModifier.PRIVATE; +import static com.mongodb.internal.async.ErrorHandlingResultCallback.errorHandlingCallback; + +/** + *

This class is not part of the public API and may be removed or changed at any time

+ */ +@ThreadSafe +public class LoadBalancedServer implements ClusterableServer { + private static final Logger LOGGER = Loggers.getLogger("connection"); + private final AtomicBoolean closed = new AtomicBoolean(); + private final ServerId serverId; + private final ConnectionPool connectionPool; + private final ConnectionFactory connectionFactory; + private final ServerListener serverListener; + private final ClusterClock clusterClock; + + public LoadBalancedServer(final ServerId serverId, final ConnectionPool connectionPool, final ConnectionFactory connectionFactory, + final ServerListener serverListener, final ClusterClock clusterClock) { + this.serverId = serverId; + this.connectionPool = connectionPool; + this.connectionFactory = connectionFactory; + this.serverListener = serverListener; + this.clusterClock = clusterClock; + + serverListener.serverOpening(new ServerOpeningEvent(serverId)); + serverListener.serverDescriptionChanged(new ServerDescriptionChangedEvent(serverId, + ServerDescription.builder() + .ok(true) + .state(ServerConnectionState.CONNECTED) + .type(ServerType.LOAD_BALANCER) + .address(serverId.getAddress()) + .build(), + ServerDescription.builder().address(serverId.getAddress()).state(CONNECTING).build())); + } + + @Override + public void resetToConnecting(final MongoException cause) { + // no op + } + + @Override + public void invalidate(final MongoException cause) { + // no op + } + + + private void invalidate(final Throwable t, @Nullable final ObjectId serviceId, final int generation) { + if (!isClosed()) { + if (t instanceof MongoSocketException && !(t instanceof MongoSocketReadTimeoutException)) { + if (serviceId != null) { + connectionPool.invalidate(serviceId, generation); + } + } else if (t instanceof MongoNotPrimaryException || t instanceof MongoNodeIsRecoveringException) { + if (SHUTDOWN_CODES.contains(((MongoCommandException) t).getErrorCode())) { + if (serviceId != null) { + connectionPool.invalidate(serviceId, generation); + } + } + } + } + } + + @Override + public void close() { + if (!closed.getAndSet(true)) { + connectionPool.close(); + serverListener.serverClosed(new ServerClosedEvent(serverId)); + } + } + + @Override + public boolean isClosed() { + return closed.get(); + } + + @Override + public void connect() { + // no op + } + + @Override + public Connection getConnection(final OperationContext operationContext) { + isTrue("open", !isClosed()); + return connectionFactory.create(connectionPool.get(operationContext), new LoadBalancedServerProtocolExecutor(), + ClusterConnectionMode.LOAD_BALANCED); + } + + @Override + public void getConnectionAsync(final OperationContext operationContext, final SingleResultCallback callback) { + isTrue("open", !isClosed()); + connectionPool.getAsync(operationContext, (result, t) -> { + if (t != null) { + callback.onResult(null, t); + } else { + callback.onResult(connectionFactory.createAsync(result, new LoadBalancedServerProtocolExecutor(), + ClusterConnectionMode.LOAD_BALANCED), null); + } + }); + } + + @Override + public int operationCount() { + return -1; + } + + @VisibleForTesting(otherwise = PRIVATE) + ConnectionPool getConnectionPool() { + return connectionPool; + } + + private class LoadBalancedServerProtocolExecutor extends AbstractProtocolExecutor { + @SuppressWarnings("unchecked") + @Override + public T execute(final CommandProtocol protocol, final InternalConnection connection, final SessionContext sessionContext) { + try { + return protocol.withSessionContext(new ClusterClockAdvancingSessionContext(sessionContext, clusterClock)) + .execute(connection); + } catch (MongoWriteConcernWithResponseException e) { + return (T) e.getResponse(); + } catch (MongoException e) { + handleExecutionException(connection, sessionContext, e); + throw e; + } + } + + @SuppressWarnings("unchecked") + @Override + public void executeAsync(final CommandProtocol protocol, final InternalConnection connection, + final SessionContext sessionContext, final SingleResultCallback callback) { + protocol.withSessionContext(new ClusterClockAdvancingSessionContext(sessionContext, clusterClock)) + .executeAsync(connection, errorHandlingCallback((result, t) -> { + if (t != null) { + if (t instanceof MongoWriteConcernWithResponseException) { + callback.onResult((T) ((MongoWriteConcernWithResponseException) t).getResponse(), null); + } else { + handleExecutionException(connection, sessionContext, t); + callback.onResult(null, t); + } + } else { + callback.onResult(result, null); + } + }, LOGGER)); + } + + private void handleExecutionException(final InternalConnection connection, final SessionContext sessionContext, + final Throwable t) { + invalidate(t, connection.getDescription().getServiceId(), connection.getGeneration()); + if (shouldMarkSessionDirty(t, sessionContext)) { + sessionContext.markSessionDirty(); + } + } + } +} diff --git a/driver-core/src/main/com/mongodb/internal/connection/LoggingCommandEventSender.java b/driver-core/src/main/com/mongodb/internal/connection/LoggingCommandEventSender.java new file mode 100644 index 00000000000..044a2113fd8 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/connection/LoggingCommandEventSender.java @@ -0,0 +1,243 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection; + +import com.mongodb.LoggerSettings; +import com.mongodb.MongoCommandException; +import com.mongodb.connection.ClusterId; +import com.mongodb.connection.ConnectionDescription; +import com.mongodb.event.CommandListener; +import com.mongodb.internal.ExceptionUtils.MongoCommandExceptionUtils; +import com.mongodb.internal.logging.LogMessage; +import com.mongodb.internal.logging.LogMessage.Entry; +import com.mongodb.internal.logging.StructuredLogger; +import com.mongodb.lang.Nullable; +import org.bson.BsonDocument; +import org.bson.BsonInt32; +import org.bson.BsonReader; +import org.bson.codecs.RawBsonDocumentCodec; +import org.bson.json.JsonMode; +import org.bson.json.JsonWriter; +import org.bson.json.JsonWriterSettings; + +import java.io.StringWriter; +import java.util.ArrayList; +import java.util.List; +import java.util.Set; +import java.util.function.Consumer; + +import static com.mongodb.assertions.Assertions.assertNotNull; +import static com.mongodb.internal.connection.ProtocolHelper.sendCommandFailedEvent; +import static com.mongodb.internal.connection.ProtocolHelper.sendCommandStartedEvent; +import static com.mongodb.internal.connection.ProtocolHelper.sendCommandSucceededEvent; +import static com.mongodb.internal.logging.LogMessage.Component.COMMAND; +import static com.mongodb.internal.logging.LogMessage.Entry.Name.COMMAND_CONTENT; +import static com.mongodb.internal.logging.LogMessage.Entry.Name.COMMAND_NAME; +import static com.mongodb.internal.logging.LogMessage.Entry.Name.DATABASE_NAME; +import static com.mongodb.internal.logging.LogMessage.Entry.Name.DRIVER_CONNECTION_ID; +import static com.mongodb.internal.logging.LogMessage.Entry.Name.DURATION_MS; +import static com.mongodb.internal.logging.LogMessage.Entry.Name.OPERATION_ID; +import static com.mongodb.internal.logging.LogMessage.Entry.Name.REPLY; +import static com.mongodb.internal.logging.LogMessage.Entry.Name.REQUEST_ID; +import static com.mongodb.internal.logging.LogMessage.Entry.Name.SERVER_CONNECTION_ID; +import static com.mongodb.internal.logging.LogMessage.Entry.Name.SERVER_HOST; +import static com.mongodb.internal.logging.LogMessage.Entry.Name.SERVER_PORT; +import static com.mongodb.internal.logging.LogMessage.Entry.Name.SERVICE_ID; +import static com.mongodb.internal.logging.LogMessage.Level.DEBUG; + +class LoggingCommandEventSender implements CommandEventSender { + private static final double NANOS_PER_MILLI = 1_000_000.0d; + private final ConnectionDescription description; + @Nullable + private final CommandListener commandListener; + private final OperationContext operationContext; + private final StructuredLogger logger; + private final LoggerSettings loggerSettings; + private final long startTimeNanos; + private final CommandMessage message; + private final String commandName; + private volatile BsonDocument commandDocument; + private final boolean redactionRequired; + + LoggingCommandEventSender(final Set securitySensitiveCommands, final Set securitySensitiveHelloCommands, + final ConnectionDescription description, + @Nullable final CommandListener commandListener, + final OperationContext operationContext, + final CommandMessage message, + final ByteBufferBsonOutput bsonOutput, + final StructuredLogger logger, + final LoggerSettings loggerSettings) { + this.description = description; + this.commandListener = commandListener; + this.operationContext = operationContext; + this.logger = logger; + this.loggerSettings = loggerSettings; + this.startTimeNanos = System.nanoTime(); + this.message = message; + this.commandDocument = message.getCommandDocument(bsonOutput); + this.commandName = commandDocument.getFirstKey(); + this.redactionRequired = securitySensitiveCommands.contains(commandName) + || (securitySensitiveHelloCommands.contains(commandName) && commandDocument.containsKey("speculativeAuthenticate")); + } + + @Override + public void sendStartedEvent() { + if (loggingRequired()) { + String messagePrefix = "Command \"{}\" started on database \"{}\""; + String command = redactionRequired ? "{}" : getTruncatedJsonCommand(commandDocument); + + logEventMessage(messagePrefix, "Command started", null, entries -> { + entries.add(new Entry(COMMAND_NAME, commandName)); + entries.add(new Entry(DATABASE_NAME, message.getDatabase())); + }, + entries -> entries.add(new Entry(COMMAND_CONTENT, command))); + } + + if (eventRequired()) { + BsonDocument commandDocumentForEvent = redactionRequired + ? new BsonDocument() : commandDocument; + + sendCommandStartedEvent(message, message.getDatabase(), commandName, commandDocumentForEvent, description, + assertNotNull(commandListener), operationContext); + } + // the buffer underlying the command document may be released after the started event, so set to null to ensure it's not used + // when sending the failed or succeeded event + commandDocument = null; + } + + + @Override + public void sendFailedEvent(final Throwable t) { + Throwable commandEventException = t; + if (t instanceof MongoCommandException && redactionRequired) { + commandEventException = MongoCommandExceptionUtils.redacted((MongoCommandException) t); + } + long elapsedTimeNanos = System.nanoTime() - startTimeNanos; + + if (loggingRequired()) { + String messagePrefix = "Command \"{}\" failed on database \"{}\" in {} ms"; + + logEventMessage(messagePrefix, "Command failed", commandEventException, + entries -> { + entries.add(new Entry(COMMAND_NAME, commandName)); + entries.add(new Entry(DATABASE_NAME, message.getDatabase())); + entries.add(new Entry(DURATION_MS, elapsedTimeNanos / NANOS_PER_MILLI)); + }, + entries -> entries.add(new Entry(COMMAND_CONTENT, null))); + } + + if (eventRequired()) { + sendCommandFailedEvent(message, commandName, message.getDatabase(), description, elapsedTimeNanos, + commandEventException, commandListener, operationContext); + } + } + + @Override + public void sendSucceededEvent(final ResponseBuffers responseBuffers) { + sendSucceededEvent(responseBuffers.getResponseDocument(message.getId(), new RawBsonDocumentCodec())); + } + + @Override + public void sendSucceededEventForOneWayCommand() { + sendSucceededEvent(new BsonDocument("ok", new BsonInt32(1))); + } + + private void sendSucceededEvent(final BsonDocument reply) { + long elapsedTimeNanos = System.nanoTime() - startTimeNanos; + + if (loggingRequired()) { + String format = "Command \"{}\" succeeded on database \"{}\" in {} ms using a connection with driver-generated ID {}" + + "[ and server-generated ID {}] to {}:{}[ with service ID {}]. The request ID is {}" + + " and the operation ID is {}. Command reply: {}"; + + BsonDocument responseDocumentForEvent = redactionRequired ? new BsonDocument() : reply; + String replyString = redactionRequired ? "{}" : getTruncatedJsonCommand(responseDocumentForEvent); + + logEventMessage("Command succeeded", null, + entries -> { + entries.add(new Entry(COMMAND_NAME, commandName)); + entries.add(new Entry(DATABASE_NAME, message.getDatabase())); + entries.add(new Entry(DURATION_MS, elapsedTimeNanos / NANOS_PER_MILLI)); + }, + entries -> entries.add(new Entry(REPLY, replyString)), format); + } + + if (eventRequired()) { + BsonDocument responseDocumentForEvent = redactionRequired ? new BsonDocument() : reply; + sendCommandSucceededEvent(message, commandName, message.getDatabase(), responseDocumentForEvent, + description, elapsedTimeNanos, commandListener, operationContext); + } + } + + private boolean loggingRequired() { + return logger.isRequired(DEBUG, getClusterId()); + } + + + private ClusterId getClusterId() { + return description.getConnectionId().getServerId().getClusterId(); + } + + private boolean eventRequired() { + return commandListener != null; + } + + private void logEventMessage(final String messagePrefix, final String messageId, @Nullable final Throwable exception, + final Consumer> prefixEntriesMutator, + final Consumer> suffixEntriesMutator) { + String format = messagePrefix + " using a connection with driver-generated ID {}" + + "[ and server-generated ID {}] to {}:{}[ with service ID {}]. The request ID is {}" + + " and the operation ID is {}.[ Command: {}]"; + logEventMessage(messageId, exception, prefixEntriesMutator, suffixEntriesMutator, format); + } + + private void logEventMessage(final String messageId, final @Nullable Throwable exception, + final Consumer> prefixEntriesMutator, + final Consumer> suffixEntriesMutator, + final String format) { + List entries = new ArrayList<>(); + prefixEntriesMutator.accept(entries); + entries.add(new Entry(DRIVER_CONNECTION_ID, description.getConnectionId().getLocalValue())); + entries.add(new Entry(SERVER_CONNECTION_ID, description.getConnectionId().getServerValue())); + entries.add(new Entry(SERVER_HOST, description.getServerAddress().getHost())); + entries.add(new Entry(SERVER_PORT, description.getServerAddress().getPort())); + entries.add(new Entry(SERVICE_ID, description.getServiceId())); + entries.add(new Entry(REQUEST_ID, message.getId())); + entries.add(new Entry(OPERATION_ID, operationContext.getId())); + suffixEntriesMutator.accept(entries); + logger.log(new LogMessage(COMMAND, DEBUG, messageId, getClusterId(), exception, entries, format)); + } + + private String getTruncatedJsonCommand(final BsonDocument commandDocument) { + StringWriter writer = new StringWriter(); + + try (BsonReader bsonReader = commandDocument.asBsonReader()) { + JsonWriter jsonWriter = new JsonWriter(writer, + JsonWriterSettings.builder().outputMode(JsonMode.RELAXED) + .maxLength(loggerSettings.getMaxDocumentLength()) + .build()); + + jsonWriter.pipe(bsonReader); + + if (jsonWriter.isTruncated()) { + writer.append(" ..."); + } + + return writer.toString(); + } + } +} diff --git a/driver-core/src/main/com/mongodb/internal/connection/MessageHeader.java b/driver-core/src/main/com/mongodb/internal/connection/MessageHeader.java new file mode 100644 index 00000000000..ab0f5c814cc --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/connection/MessageHeader.java @@ -0,0 +1,84 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection; + +import com.mongodb.MongoInternalException; +import com.mongodb.annotations.Immutable; +import org.bson.ByteBuf; + +// Contains the details of an OP_COMPRESSED reply from a MongoDB server. +@Immutable +final class MessageHeader { + /** + * The length of the standard message header in the MongoDB wire protocol. + */ + public static final int MESSAGE_HEADER_LENGTH = 16; + + private final int messageLength; + private final int requestId; + private final int responseTo; + private final int opCode; + + MessageHeader(final ByteBuf header, final int maxMessageLength) { + messageLength = header.getInt(); + requestId = header.getInt(); + responseTo = header.getInt(); + opCode = header.getInt(); + + if (messageLength > maxMessageLength) { + throw new MongoInternalException(String.format("The reply message length %d is greater than the maximum message length %d", + messageLength, maxMessageLength)); + } + } + + /** + * Gets the total size of the message in bytes. This total includes the 4 bytes that holds the message length. + * + * @return the total message size, including all of the header + */ + public int getMessageLength() { + return messageLength; + } + + /** + * This is a client or database-generated identifier that uniquely identifies this message. Along with the {@code responseTo} field in + * responses, clients can use this to associate query responses with the originating query. + * + * @return the identifier for this message + */ + public int getRequestId() { + return requestId; + } + + /** + * Along with the requestID field in queries, clients can use this to associate query responses with the originating query. + * + * @return the request ID from the original request + */ + public int getResponseTo() { + return responseTo; + } + + /** + * Gets the opcode + * + * @return the opcode + */ + public int getOpCode() { + return opCode; + } +} diff --git a/driver-core/src/main/com/mongodb/internal/connection/MessageSequences.java b/driver-core/src/main/com/mongodb/internal/connection/MessageSequences.java new file mode 100644 index 00000000000..19600007404 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/connection/MessageSequences.java @@ -0,0 +1,31 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.internal.connection; + +/** + * Zero or more identifiable sequences contained in the {@code OP_MSG} section with payload type 1. + *

+ * This class is not part of the public API and may be removed or changed at any time.

+ * @see
OP_MSG + */ +public abstract class MessageSequences { + public static final class EmptyMessageSequences extends MessageSequences { + public static final EmptyMessageSequences INSTANCE = new EmptyMessageSequences(); + + private EmptyMessageSequences() { + } + } +} diff --git a/driver-core/src/main/com/mongodb/internal/connection/MessageSettings.java b/driver-core/src/main/com/mongodb/internal/connection/MessageSettings.java new file mode 100644 index 00000000000..51587e8f91d --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/connection/MessageSettings.java @@ -0,0 +1,206 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection; + +import com.mongodb.annotations.Immutable; +import com.mongodb.annotations.NotThreadSafe; +import com.mongodb.connection.ServerType; + +import static com.mongodb.internal.operation.ServerVersionHelper.UNKNOWN_WIRE_VERSION; + +/** + * The message settings + * + *

This class is not part of the public API and may be removed or changed at any time

+ */ +@Immutable +public final class MessageSettings { + /** + * + * {@code maxBsonObjectSize}. + */ + private static final int DEFAULT_MAX_DOCUMENT_SIZE = 0x1000000; // 16MB + /** + * + * {@code maxMessageSizeBytes}. + */ + private static final int DEFAULT_MAX_MESSAGE_SIZE = 0x2000000; // 32MB + /** + * + * {@code maxWriteBatchSize}. + */ + private static final int DEFAULT_MAX_BATCH_COUNT = 1000; + /** + * The headroom for documents that are not intended to be stored in a database. + * A command document is an example of such a document. + * This headroom allows a command document to specify a document that is intended to be stored in a database, + * even if the specified document is of the maximum size. + */ + static final int DOCUMENT_HEADROOM_SIZE = 16 * (1 << 10); + + private final int maxDocumentSize; + private final int maxMessageSize; + private final int maxBatchCount; + private final int maxWireVersion; + private final ServerType serverType; + private final boolean sessionSupported; + private final boolean cryptd; + + /** + * Gets the builder + * + * @return the builder + */ + public static Builder builder() { + return new Builder(); + } + + /** + * A MessageSettings builder. + */ + @NotThreadSafe + public static final class Builder { + private int maxDocumentSize = DEFAULT_MAX_DOCUMENT_SIZE; + private int maxMessageSize = DEFAULT_MAX_MESSAGE_SIZE; + private int maxBatchCount = DEFAULT_MAX_BATCH_COUNT; + private int maxWireVersion = UNKNOWN_WIRE_VERSION; + private ServerType serverType; + private boolean sessionSupported; + private boolean cryptd; + + /** + * Build it. + * + * @return the message settings + */ + public MessageSettings build() { + return new MessageSettings(this); + } + + /** + * Sets the maximum document size allowed. + * + * @param maxDocumentSize the maximum document size allowed + * @return this + */ + public Builder maxDocumentSize(final int maxDocumentSize) { + this.maxDocumentSize = maxDocumentSize; + return this; + } + + /** + * Sets the maximum message size allowed. + * + * @param maxMessageSize the maximum message size allowed + * @return this + */ + public Builder maxMessageSize(final int maxMessageSize) { + this.maxMessageSize = maxMessageSize; + return this; + } + + /** + * Sets the maximum number of items in a batch allowed. + * + * @param maxBatchCount the maximum number of items in a batch allowed + * @return this + */ + public Builder maxBatchCount(final int maxBatchCount) { + this.maxBatchCount = maxBatchCount; + return this; + } + + public Builder maxWireVersion(final int maxWireVersion) { + this.maxWireVersion = maxWireVersion; + return this; + } + + public Builder serverType(final ServerType serverType) { + this.serverType = serverType; + return this; + } + + public Builder sessionSupported(final boolean sessionSupported) { + this.sessionSupported = sessionSupported; + return this; + } + + /** + * Set whether the server is a mongocryptd. + * + * @param cryptd true if the server is a mongocryptd. + * @return this + */ + public Builder cryptd(final boolean cryptd) { + this.cryptd = cryptd; + return this; + } + } + + /** + * Gets the maximum document size allowed. + * + * @return the maximum document size allowed + */ + public int getMaxDocumentSize() { + return maxDocumentSize; + } + + /** + * Gets the maximum message size allowed. + * + * @return the maximum message size allowed + */ + public int getMaxMessageSize() { + return maxMessageSize; + } + + /** + * Gets the maximum number of items in a batch allowed. + * + * @return the maximum number of items in a batch allowed + */ + public int getMaxBatchCount() { + return maxBatchCount; + } + + public int getMaxWireVersion() { + return maxWireVersion; + } + + public ServerType getServerType() { + return serverType; + } + public boolean isCryptd() { + return cryptd; + } + + public boolean isSessionSupported() { + return sessionSupported; + } + + + private MessageSettings(final Builder builder) { + this.maxDocumentSize = builder.maxDocumentSize; + this.maxMessageSize = builder.maxMessageSize; + this.maxBatchCount = builder.maxBatchCount; + this.maxWireVersion = builder.maxWireVersion; + this.serverType = builder.serverType; + this.sessionSupported = builder.sessionSupported; + this.cryptd = builder.cryptd; + } +} diff --git a/driver-core/src/main/com/mongodb/internal/connection/MongoCredentialWithCache.java b/driver-core/src/main/com/mongodb/internal/connection/MongoCredentialWithCache.java new file mode 100644 index 00000000000..682637bf9ed --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/connection/MongoCredentialWithCache.java @@ -0,0 +1,112 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection; + +import com.mongodb.AuthenticationMechanism; +import com.mongodb.MongoCredential; +import com.mongodb.lang.Nullable; + +import java.util.concurrent.locks.Lock; +import java.util.concurrent.locks.ReentrantLock; +import java.util.concurrent.locks.StampedLock; + +import static com.mongodb.internal.Locks.withInterruptibleLock; +import static com.mongodb.internal.connection.OidcAuthenticator.OidcCacheEntry; + +/** + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public class MongoCredentialWithCache { + private final MongoCredential credential; + private final Cache cache; + + public MongoCredentialWithCache(final MongoCredential credential) { + this(credential, new Cache()); + } + + private MongoCredentialWithCache(final MongoCredential credential, final Cache cache) { + this.credential = credential; + this.cache = cache; + } + + public MongoCredentialWithCache withMechanism(final AuthenticationMechanism mechanism) { + return new MongoCredentialWithCache(credential.withMechanism(mechanism), cache); + } + + @Nullable + public AuthenticationMechanism getAuthenticationMechanism() { + return credential.getAuthenticationMechanism(); + } + + public MongoCredential getCredential() { + return credential; + } + + @Nullable + public T getFromCache(final Object key, final Class clazz) { + return clazz.cast(cache.get(key)); + } + + public void putInCache(final Object key, final Object value) { + cache.set(key, value); + } + + OidcCacheEntry getOidcCacheEntry() { + return cache.oidcCacheEntry; + } + + void setOidcCacheEntry(final OidcCacheEntry oidcCacheEntry) { + this.cache.oidcCacheEntry = oidcCacheEntry; + } + + StampedLock getOidcLock() { + return cache.oidcLock; + } + + public Lock getLock() { + return cache.lock; + } + + /** + * Stores any state associated with the credential. + */ + static class Cache { + private final ReentrantLock lock = new ReentrantLock(); + private Object cacheKey; + private Object cacheValue; + + + private final StampedLock oidcLock = new StampedLock(); + private volatile OidcCacheEntry oidcCacheEntry = new OidcCacheEntry(); + + Object get(final Object key) { + return withInterruptibleLock(lock, () -> { + if (cacheKey != null && cacheKey.equals(key)) { + return cacheValue; + } + return null; + }); + } + + void set(final Object key, final Object value) { + withInterruptibleLock(lock, () -> { + cacheKey = key; + cacheValue = value; + }); + } + } +} diff --git a/driver-core/src/main/com/mongodb/internal/connection/MongoWriteConcernWithResponseException.java b/driver-core/src/main/com/mongodb/internal/connection/MongoWriteConcernWithResponseException.java new file mode 100644 index 00000000000..a9fc088691c --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/connection/MongoWriteConcernWithResponseException.java @@ -0,0 +1,43 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection; + +import com.mongodb.MongoException; + +/** + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public class MongoWriteConcernWithResponseException extends MongoException { + private static final long serialVersionUID = 1707360842648550287L; + private final MongoException cause; + private final Object response; + + public MongoWriteConcernWithResponseException(final MongoException exception, final Object response) { + super(exception.getCode(), exception.getMessage(), exception); + this.cause = exception; + this.response = response; + } + + @Override + public MongoException getCause() { + return cause; + } + + public Object getResponse() { + return response; + } +} diff --git a/driver-core/src/main/com/mongodb/internal/connection/MultiServerCluster.java b/driver-core/src/main/com/mongodb/internal/connection/MultiServerCluster.java new file mode 100644 index 00000000000..55a11a10228 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/connection/MultiServerCluster.java @@ -0,0 +1,35 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection; + +import com.mongodb.connection.ClusterId; +import com.mongodb.connection.ClusterSettings; + +import static com.mongodb.assertions.Assertions.isTrue; + +/** + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public final class MultiServerCluster extends AbstractMultiServerCluster { + public MultiServerCluster(final ClusterId clusterId, final ClusterSettings settings, + final ClusterableServerFactory serverFactory, + final ClientMetadata clientMetadata) { + super(clusterId, settings, serverFactory, clientMetadata); + isTrue("srvHost is null", settings.getSrvHost() == null); + initialize(settings.getHosts()); + } +} diff --git a/driver-core/src/main/com/mongodb/internal/connection/NoOpCommandEventSender.java b/driver-core/src/main/com/mongodb/internal/connection/NoOpCommandEventSender.java new file mode 100644 index 00000000000..4a9864ca5a8 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/connection/NoOpCommandEventSender.java @@ -0,0 +1,35 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection; + +class NoOpCommandEventSender implements CommandEventSender { + @Override + public void sendStartedEvent() { + } + + @Override + public void sendFailedEvent(final Throwable t) { + } + + @Override + public void sendSucceededEvent(final ResponseBuffers responseBuffers) { + } + + @Override + public void sendSucceededEventForOneWayCommand() { + } +} diff --git a/driver-core/src/main/com/mongodb/internal/connection/NoOpSessionContext.java b/driver-core/src/main/com/mongodb/internal/connection/NoOpSessionContext.java new file mode 100644 index 00000000000..2b5f7d8ef6c --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/connection/NoOpSessionContext.java @@ -0,0 +1,136 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection; + +import com.mongodb.ReadConcern; +import com.mongodb.internal.session.SessionContext; +import com.mongodb.lang.Nullable; +import org.bson.BsonDocument; +import org.bson.BsonTimestamp; + +/** + * A SessionContext implementation that does nothing and reports that it has no session. + * + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public class NoOpSessionContext implements SessionContext { + + /** + * A singleton instance of a NoOpSessionContext + */ + public static final NoOpSessionContext INSTANCE = new NoOpSessionContext(); + + @Override + public boolean hasSession() { + return false; + } + + @Override + public boolean isImplicitSession() { + throw new UnsupportedOperationException(); + } + + @Override + public BsonDocument getSessionId() { + throw new UnsupportedOperationException(); + } + + @Override + public boolean isCausallyConsistent() { + return false; + } + + @Override + public long getTransactionNumber() { + throw new UnsupportedOperationException(); + } + + @Override + public long advanceTransactionNumber() { + throw new UnsupportedOperationException(); + } + + @Override + public boolean notifyMessageSent() { + return false; + } + + @Nullable + @Override + public BsonTimestamp getOperationTime() { + return null; + } + + @Override + public void advanceOperationTime(@Nullable final BsonTimestamp operationTime) { + } + + @Nullable + @Override + public BsonDocument getClusterTime() { + return null; + } + + @Override + public void advanceClusterTime(@Nullable final BsonDocument clusterTime) { + } + + @Override + public boolean isSnapshot() { + return false; + } + + @Override + public void setSnapshotTimestamp(@Nullable final BsonTimestamp snapshotTimestamp) { + } + + @Override + @Nullable + public BsonTimestamp getSnapshotTimestamp() { + return null; + } + + @Override + public boolean hasActiveTransaction() { + return false; + } + + @Override + public ReadConcern getReadConcern() { + return ReadConcern.DEFAULT; + } + + @Override + public void setRecoveryToken(final BsonDocument recoveryToken) { + throw new UnsupportedOperationException(); + } + + @Override + public void clearTransactionContext() { + throw new UnsupportedOperationException(); + } + + @Override + public void markSessionDirty() { + throw new UnsupportedOperationException(); + } + + @Override + public boolean isSessionMarkedDirty() { + return false; + } +} diff --git a/driver-core/src/main/com/mongodb/internal/connection/OidcAuthenticator.java b/driver-core/src/main/com/mongodb/internal/connection/OidcAuthenticator.java new file mode 100644 index 00000000000..87f48b3308b --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/connection/OidcAuthenticator.java @@ -0,0 +1,814 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection; + +import com.mongodb.AuthenticationMechanism; +import com.mongodb.MongoClientException; +import com.mongodb.MongoCommandException; +import com.mongodb.MongoConfigurationException; +import com.mongodb.MongoCredential; +import com.mongodb.MongoCredential.OidcCallbackResult; +import com.mongodb.MongoException; +import com.mongodb.MongoSecurityException; +import com.mongodb.ServerAddress; +import com.mongodb.ServerApi; +import com.mongodb.connection.ClusterConnectionMode; +import com.mongodb.connection.ConnectionDescription; +import com.mongodb.internal.Locks; +import com.mongodb.internal.TimeoutContext; +import com.mongodb.internal.VisibleForTesting; +import com.mongodb.internal.async.SingleResultCallback; +import com.mongodb.internal.authentication.AzureCredentialHelper; +import com.mongodb.internal.authentication.CredentialInfo; +import com.mongodb.internal.authentication.GcpCredentialHelper; +import com.mongodb.lang.Nullable; +import org.bson.BsonDocument; +import org.bson.BsonString; +import org.bson.RawBsonDocument; + +import javax.security.sasl.SaslClient; +import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.nio.file.Files; +import java.nio.file.Paths; +import java.time.Duration; +import java.time.temporal.ChronoUnit; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.concurrent.TimeUnit; +import java.util.stream.Collectors; + +import static com.mongodb.AuthenticationMechanism.MONGODB_OIDC; +import static com.mongodb.MongoCredential.ALLOWED_HOSTS_KEY; +import static com.mongodb.MongoCredential.DEFAULT_ALLOWED_HOSTS; +import static com.mongodb.MongoCredential.ENVIRONMENT_KEY; +import static com.mongodb.MongoCredential.IdpInfo; +import static com.mongodb.MongoCredential.OIDC_CALLBACK_KEY; +import static com.mongodb.MongoCredential.OIDC_HUMAN_CALLBACK_KEY; +import static com.mongodb.MongoCredential.OidcCallback; +import static com.mongodb.MongoCredential.OidcCallbackContext; +import static com.mongodb.MongoCredential.TOKEN_RESOURCE_KEY; +import static com.mongodb.assertions.Assertions.assertFalse; +import static com.mongodb.assertions.Assertions.assertNotNull; +import static com.mongodb.assertions.Assertions.assertTrue; +import static com.mongodb.internal.TimeoutContext.throwMongoTimeoutException; +import static com.mongodb.internal.async.AsyncRunnable.beginAsync; +import static com.mongodb.internal.connection.OidcAuthenticator.OidcValidator.validateBeforeUse; +import static java.lang.String.format; + +/** + * Created per connection, and exists until connection is closed. + * + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public final class OidcAuthenticator extends SaslAuthenticator { + + private static final String TEST_ENVIRONMENT = "test"; + private static final String AZURE_ENVIRONMENT = "azure"; + private static final String GCP_ENVIRONMENT = "gcp"; + private static final String K8S_ENVIRONMENT = "k8s"; + private static final List IMPLEMENTED_ENVIRONMENTS = Arrays.asList( + AZURE_ENVIRONMENT, GCP_ENVIRONMENT, K8S_ENVIRONMENT, TEST_ENVIRONMENT); + private static final List USER_SUPPORTED_ENVIRONMENTS = Arrays.asList( + AZURE_ENVIRONMENT, GCP_ENVIRONMENT, K8S_ENVIRONMENT); + private static final List REQUIRES_TOKEN_RESOURCE = Arrays.asList( + AZURE_ENVIRONMENT, GCP_ENVIRONMENT); + private static final List ALLOWS_USERNAME = Arrays.asList( + AZURE_ENVIRONMENT); + + private static final Duration CALLBACK_TIMEOUT = Duration.ofMinutes(1); + private static final Duration HUMAN_CALLBACK_TIMEOUT = Duration.ofMinutes(5); + + public static final String OIDC_TOKEN_FILE = "OIDC_TOKEN_FILE"; + + private static final String K8S_FALLBACK_FILE = "/var/run/secrets/kubernetes.io/serviceaccount/token"; + private static final String K8S_AZURE_FILE = "AZURE_FEDERATED_TOKEN_FILE"; + private static final String K8S_AWS_FILE = "AWS_WEB_IDENTITY_TOKEN_FILE"; + + private static final int CALLBACK_API_VERSION_NUMBER = 1; + + @Nullable + private ServerAddress serverAddress; + + @Nullable + private String connectionLastAccessToken; + + private FallbackState fallbackState = FallbackState.INITIAL; + + @Nullable + private BsonDocument speculativeAuthenticateResponse; + + public OidcAuthenticator(final MongoCredentialWithCache credential, + final ClusterConnectionMode clusterConnectionMode, @Nullable final ServerApi serverApi) { + super(credential, clusterConnectionMode, serverApi); + validateBeforeUse(credential.getCredential()); + + if (getMongoCredential().getAuthenticationMechanism() != MONGODB_OIDC) { + throw new MongoException("Incorrect mechanism: " + getMongoCredential().getMechanism()); + } + } + + private Duration getCallbackTimeout(final TimeoutContext timeoutContext) { + if (isHumanCallback()) { + return HUMAN_CALLBACK_TIMEOUT; + } + + if (timeoutContext.hasTimeoutMS()) { + return assertNotNull(timeoutContext.getTimeout()).call(TimeUnit.MILLISECONDS, + () -> + // we can get here if server selection timeout was set to infinite. + ChronoUnit.FOREVER.getDuration(), + (renamingMs) -> Duration.ofMillis(renamingMs), + () -> throwMongoTimeoutException()); + + } + return CALLBACK_TIMEOUT; + } + + @Override + public String getMechanismName() { + return MONGODB_OIDC.getMechanismName(); + } + + @Override + protected SaslClient createSaslClient(final ServerAddress serverAddress, final OperationContext operationContext) { + this.serverAddress = assertNotNull(serverAddress); + MongoCredentialWithCache mongoCredentialWithCache = getMongoCredentialWithCache(); + return new OidcSaslClient(mongoCredentialWithCache, operationContext.getTimeoutContext()); + } + + @Override + @Nullable + public BsonDocument createSpeculativeAuthenticateCommand(final InternalConnection connection) { + try { + String cachedAccessToken = getMongoCredentialWithCache() + .getOidcCacheEntry() + .getCachedAccessToken(); + if (cachedAccessToken != null) { + return wrapInSpeculative(prepareTokenAsJwt(cachedAccessToken)); + } else { + // otherwise, skip speculative auth + return null; + } + } catch (Exception e) { + throw wrapException(e); + } + } + + private BsonDocument wrapInSpeculative(final byte[] outToken) { + BsonDocument startDocument = createSaslStartCommandDocument(outToken) + .append("db", new BsonString(getMongoCredential().getSource())); + appendSaslStartOptions(startDocument); + return startDocument; + } + + @Override + @Nullable + public BsonDocument getSpeculativeAuthenticateResponse() { + BsonDocument response = speculativeAuthenticateResponse; + // response should only be read once + this.speculativeAuthenticateResponse = null; + if (response == null) { + this.connectionLastAccessToken = null; + } + return response; + } + + @Override + public void setSpeculativeAuthenticateResponse(@Nullable final BsonDocument response) { + speculativeAuthenticateResponse = response; + } + + private boolean isHumanCallback() { + // built-in providers (aws, azure...) are considered machine callbacks + return getOidcCallbackMechanismProperty(OIDC_HUMAN_CALLBACK_KEY) != null; + } + + @Nullable + private OidcCallback getOidcCallbackMechanismProperty(final String key) { + return getMongoCredentialWithCache() + .getCredential() + .getMechanismProperty(key, null); + } + + private OidcCallback getRequestCallback() { + String environment = getMongoCredential().getMechanismProperty(ENVIRONMENT_KEY, null); + OidcCallback machine; + if (TEST_ENVIRONMENT.equals(environment)) { + machine = getTestCallback(); + } else if (AZURE_ENVIRONMENT.equals(environment)) { + machine = getAzureCallback(getMongoCredential()); + } else if (GCP_ENVIRONMENT.equals(environment)) { + machine = getGcpCallback(getMongoCredential()); + } else if (K8S_ENVIRONMENT.equals(environment)) { + machine = getK8sCallback(); + } else { + machine = getOidcCallbackMechanismProperty(OIDC_CALLBACK_KEY); + } + OidcCallback human = getOidcCallbackMechanismProperty(OIDC_HUMAN_CALLBACK_KEY); + return machine != null ? machine : assertNotNull(human); + } + + private static OidcCallback getTestCallback() { + return (context) -> { + String accessToken = readTokenFromFile(); + return new OidcCallbackResult(accessToken); + }; + } + + @VisibleForTesting(otherwise = VisibleForTesting.AccessModifier.PRIVATE) + static OidcCallback getK8sCallback() { + return (context) -> { + String azure = System.getenv(K8S_AZURE_FILE); + String aws = System.getenv(K8S_AWS_FILE); + String path; + if (azure != null) { + path = azure; + } else if (aws != null) { + path = aws; + } else { + path = K8S_FALLBACK_FILE; + } + String accessToken = readTokenFromFile(path); + return new OidcCallbackResult(accessToken); + }; + } + + @VisibleForTesting(otherwise = VisibleForTesting.AccessModifier.PRIVATE) + static OidcCallback getAzureCallback(final MongoCredential credential) { + return (context) -> { + String resource = assertNotNull(credential.getMechanismProperty(TOKEN_RESOURCE_KEY, null)); + String clientId = credential.getUserName(); + CredentialInfo response = AzureCredentialHelper.fetchAzureCredentialInfo(resource, clientId); + return new OidcCallbackResult(response.getAccessToken(), response.getExpiresIn()); + }; + } + + @VisibleForTesting(otherwise = VisibleForTesting.AccessModifier.PRIVATE) + static OidcCallback getGcpCallback(final MongoCredential credential) { + return (context) -> { + String resource = assertNotNull(credential.getMechanismProperty(TOKEN_RESOURCE_KEY, null)); + CredentialInfo response = GcpCredentialHelper.fetchGcpCredentialInfo(resource); + return new OidcCallbackResult(response.getAccessToken(), response.getExpiresIn()); + }; + } + + @Override + public void reauthenticate(final InternalConnection connection, final OperationContext operationContext) { + assertTrue(connection.opened()); + authenticationLoop(connection, connection.getDescription(), operationContextWithoutSession(operationContext)); + } + + @Override + public void reauthenticateAsync(final InternalConnection connection, + final OperationContext operationContext, + final SingleResultCallback callback) { + beginAsync().thenRun(c -> { + assertTrue(connection.opened()); + authenticationLoopAsync(connection, connection.getDescription(), operationContextWithoutSession(operationContext), c); + }).finish(callback); + } + + @Override + public void authenticate(final InternalConnection connection, final ConnectionDescription connectionDescription, + final OperationContext operationContext) { + assertFalse(connection.opened()); + authenticationLoop(connection, connectionDescription, operationContext); + } + + @Override + void authenticateAsync(final InternalConnection connection, final ConnectionDescription connectionDescription, + final OperationContext operationContext, + final SingleResultCallback callback) { + beginAsync().thenRun(c -> { + assertFalse(connection.opened()); + authenticationLoopAsync(connection, connectionDescription, operationContext, c); + }).finish(callback); + } + + private static boolean triggersRetry(@Nullable final Throwable t) { + if (t instanceof MongoSecurityException) { + MongoSecurityException e = (MongoSecurityException) t; + Throwable cause = e.getCause(); + if (cause instanceof MongoCommandException) { + MongoCommandException commandCause = (MongoCommandException) cause; + return commandCause.getErrorCode() == 18; + } + } + return false; + } + + private void authenticationLoop(final InternalConnection connection, final ConnectionDescription description, + final OperationContext operationContext) { + fallbackState = FallbackState.INITIAL; + while (true) { + try { + super.authenticate(connection, description, operationContext); + break; + } catch (Exception e) { + if (triggersRetry(e) && shouldRetryHandler()) { + continue; + } + throw e; + } + } + } + + private void authenticationLoopAsync(final InternalConnection connection, final ConnectionDescription description, + final OperationContext operationContext, + final SingleResultCallback callback) { + fallbackState = FallbackState.INITIAL; + beginAsync().thenRunRetryingWhile( + operationContext.getTimeoutContext(), + c -> super.authenticateAsync(connection, description, operationContext, c), + e -> triggersRetry(e) && shouldRetryHandler() + ).finish(callback); + } + + private byte[] evaluate(final byte[] challenge, final TimeoutContext timeoutContext) { + byte[][] jwt = new byte[1][]; + Locks.withInterruptibleLock(getMongoCredentialWithCache().getOidcLock(), () -> { + OidcCacheEntry oidcCacheEntry = getMongoCredentialWithCache().getOidcCacheEntry(); + String cachedRefreshToken = oidcCacheEntry.getRefreshToken(); + IdpInfo cachedIdpInfo = oidcCacheEntry.getIdpInfo(); + String cachedAccessToken = validatedCachedAccessToken(); + OidcCallback requestCallback = getRequestCallback(); + boolean isHuman = isHumanCallback(); + String userName = getMongoCredentialWithCache().getCredential().getUserName(); + + if (cachedAccessToken != null) { + fallbackState = FallbackState.PHASE_1_CACHED_TOKEN; + jwt[0] = prepareTokenAsJwt(cachedAccessToken); + } else if (cachedRefreshToken != null) { + // cached refresh token is only set when isHuman + // original IDP info will be present, if refresh token present + assertNotNull(cachedIdpInfo); + // Invoke Callback using cached Refresh Token + fallbackState = FallbackState.PHASE_2_REFRESH_CALLBACK_TOKEN; + OidcCallbackResult result = requestCallback.onRequest(new OidcCallbackContextImpl( + getCallbackTimeout(timeoutContext), cachedIdpInfo, cachedRefreshToken, userName)); + jwt[0] = populateCacheWithCallbackResultAndPrepareJwt(cachedIdpInfo, result); + } else { + // cache is empty + + if (!isHuman) { + // no principal request + fallbackState = FallbackState.PHASE_3B_CALLBACK_TOKEN; + OidcCallbackResult result = requestCallback.onRequest(new OidcCallbackContextImpl( + getCallbackTimeout(timeoutContext), userName)); + jwt[0] = populateCacheWithCallbackResultAndPrepareJwt(null, result); + if (result.getRefreshToken() != null) { + throw new MongoConfigurationException( + "Refresh token must only be provided in human workflow"); + } + } else { + /* + A check for present idp info short-circuits phase-3a. + If a challenge is present, it can only be a response to a + "principal-request", so the challenge must be the resulting + idp info. Such a request is made during speculative auth, + though the source is unimportant, as long as we detect and + use it here. + */ + boolean idpInfoNotPresent = challenge.length == 0; + /* + Checking that the fallback state is not phase-3a ensures that + this does not loop infinitely in the case of a bug. + */ + boolean alreadyTriedPrincipal = fallbackState == FallbackState.PHASE_3A_PRINCIPAL; + if (!alreadyTriedPrincipal && idpInfoNotPresent) { + // request for idp info, only in the human workflow + fallbackState = FallbackState.PHASE_3A_PRINCIPAL; + jwt[0] = prepareUsername(userName); + } else { + IdpInfo idpInfo = toIdpInfo(challenge); + // there is no cached refresh token + fallbackState = FallbackState.PHASE_3B_CALLBACK_TOKEN; + OidcCallbackResult result = requestCallback.onRequest(new OidcCallbackContextImpl( + getCallbackTimeout(timeoutContext), idpInfo, null, userName)); + jwt[0] = populateCacheWithCallbackResultAndPrepareJwt(idpInfo, result); + } + } + } + }); + return jwt[0]; + } + + /** + * Must be guarded by {@link MongoCredentialWithCache#getOidcLock()}. + */ + @Nullable + private String validatedCachedAccessToken() { + MongoCredentialWithCache mongoCredentialWithCache = getMongoCredentialWithCache(); + OidcCacheEntry cacheEntry = mongoCredentialWithCache.getOidcCacheEntry(); + String cachedAccessToken = cacheEntry.getCachedAccessToken(); + String invalidConnectionAccessToken = connectionLastAccessToken; + + if (cachedAccessToken != null) { + boolean cachedTokenIsInvalid = cachedAccessToken.equals(invalidConnectionAccessToken); + if (cachedTokenIsInvalid) { + mongoCredentialWithCache.setOidcCacheEntry(cacheEntry.clearAccessToken()); + cachedAccessToken = null; + } + } + return cachedAccessToken; + } + + private boolean clientIsComplete() { + return fallbackState != FallbackState.PHASE_3A_PRINCIPAL; + } + + private boolean shouldRetryHandler() { + boolean[] result = new boolean[1]; + Locks.withInterruptibleLock(getMongoCredentialWithCache().getOidcLock(), () -> { + MongoCredentialWithCache mongoCredentialWithCache = getMongoCredentialWithCache(); + OidcCacheEntry cacheEntry = mongoCredentialWithCache.getOidcCacheEntry(); + if (fallbackState == FallbackState.PHASE_1_CACHED_TOKEN) { + // a cached access token failed + mongoCredentialWithCache.setOidcCacheEntry(cacheEntry + .clearAccessToken()); + result[0] = true; + } else if (fallbackState == FallbackState.PHASE_2_REFRESH_CALLBACK_TOKEN) { + // a refresh token failed + mongoCredentialWithCache.setOidcCacheEntry(cacheEntry + .clearAccessToken() + .clearRefreshToken()); + result[0] = true; + } else { + // a clean-restart failed + mongoCredentialWithCache.setOidcCacheEntry(cacheEntry + .clearAccessToken() + .clearRefreshToken()); + result[0] = false; + } + }); + return result[0]; + } + + static final class OidcCacheEntry { + @Nullable + private final String accessToken; + @Nullable + private final String refreshToken; + @Nullable + private final IdpInfo idpInfo; + + @Override + public String toString() { + return "OidcCacheEntry{" + + "\n accessToken=[omitted]" + + ",\n refreshToken=[omitted]" + + ",\n idpInfo=" + idpInfo + + '}'; + } + + OidcCacheEntry() { + this(null, null, null); + } + + private OidcCacheEntry(@Nullable final String accessToken, + @Nullable final String refreshToken, @Nullable final IdpInfo idpInfo) { + this.accessToken = accessToken; + this.refreshToken = refreshToken; + this.idpInfo = idpInfo; + } + + @Nullable + String getCachedAccessToken() { + return accessToken; + } + + @Nullable + String getRefreshToken() { + return refreshToken; + } + + @Nullable + IdpInfo getIdpInfo() { + return idpInfo; + } + + OidcCacheEntry clearAccessToken() { + return new OidcCacheEntry( + null, + this.refreshToken, + this.idpInfo); + } + + OidcCacheEntry clearRefreshToken() { + return new OidcCacheEntry( + this.accessToken, + null, + null); + } + } + + private final class OidcSaslClient extends SaslClientImpl { + private final TimeoutContext timeoutContext; + + private OidcSaslClient(final MongoCredentialWithCache mongoCredentialWithCache, + final TimeoutContext timeoutContext) { + super(mongoCredentialWithCache.getCredential()); + + this.timeoutContext = timeoutContext; + } + + @Override + public byte[] evaluateChallenge(final byte[] challenge) { + return evaluate(challenge, timeoutContext); + } + + @Override + public boolean isComplete() { + return clientIsComplete(); + } + + } + + private static String readTokenFromFile() { + String path = System.getenv(OIDC_TOKEN_FILE); + if (path == null) { + throw new MongoClientException( + format("Environment variable must be specified: %s", OIDC_TOKEN_FILE)); + } + return readTokenFromFile(path); + } + + private static String readTokenFromFile(final String path) { + try { + return new String(Files.readAllBytes(Paths.get(path)), StandardCharsets.UTF_8); + } catch (IOException e) { + throw new MongoClientException(format( + "Could not read file specified by environment variable: %s at path: %s", + OIDC_TOKEN_FILE, path), e); + } + } + + private byte[] populateCacheWithCallbackResultAndPrepareJwt( + @Nullable final IdpInfo serverInfo, + @Nullable final OidcCallbackResult oidcCallbackResult) { + if (oidcCallbackResult == null) { + throw new MongoConfigurationException("Result of callback must not be null"); + } + OidcCacheEntry newEntry = new OidcCacheEntry(oidcCallbackResult.getAccessToken(), + oidcCallbackResult.getRefreshToken(), serverInfo); + getMongoCredentialWithCache().setOidcCacheEntry(newEntry); + return prepareTokenAsJwt(oidcCallbackResult.getAccessToken()); + } + + private static byte[] prepareUsername(@Nullable final String username) { + BsonDocument document = new BsonDocument(); + if (username != null) { + document = document.append("n", new BsonString(username)); + } + return toBson(document); + } + + private IdpInfo toIdpInfo(final byte[] challenge) { + // validate here to prevent creating IdpInfo for unauthorized hosts + validateAllowedHosts(getMongoCredential()); + BsonDocument c = new RawBsonDocument(challenge); + String issuer = c.getString("issuer").getValue(); + String clientId = !c.containsKey("clientId") ? null : c.getString("clientId").getValue(); + return new IdpInfoImpl( + issuer, + clientId, + getStringArray(c, "requestScopes")); + } + + @Nullable + private static List getStringArray(final BsonDocument document, final String key) { + if (!document.isArray(key)) { + return null; + } + return document.getArray(key).stream() + // ignore non-string values from server, rather than error + .filter(v -> v.isString()) + .map(v -> v.asString().getValue()) + .collect(Collectors.toList()); + } + + private void validateAllowedHosts(final MongoCredential credential) { + List allowedHosts = assertNotNull(credential.getMechanismProperty(ALLOWED_HOSTS_KEY, DEFAULT_ALLOWED_HOSTS)); + String host = assertNotNull(serverAddress).getHost(); + boolean permitted = allowedHosts.stream().anyMatch(allowedHost -> { + if (allowedHost.startsWith("*.")) { + String ending = allowedHost.substring(1); + return host.endsWith(ending); + } else if (allowedHost.contains("*")) { + throw new IllegalArgumentException( + "Allowed host " + allowedHost + " contains invalid wildcard"); + } else { + return host.equals(allowedHost); + } + }); + if (!permitted) { + throw new MongoSecurityException( + credential, "Host " + host + " not permitted by " + ALLOWED_HOSTS_KEY + + ", values: " + allowedHosts); + } + } + + private byte[] prepareTokenAsJwt(final String accessToken) { + connectionLastAccessToken = accessToken; + return toJwtDocument(accessToken); + } + + private static byte[] toJwtDocument(final String accessToken) { + return toBson(new BsonDocument().append("jwt", new BsonString(accessToken))); + } + + /** + * Contains all validation logic for OIDC in one location + */ + public static final class OidcValidator { + private OidcValidator() { + } + + public static void validateOidcCredentialConstruction( + final String source, + final Map mechanismProperties) { + + if (!"$external".equals(source)) { + throw new IllegalArgumentException("source must be '$external'"); + } + + Object environmentName = mechanismProperties.get(ENVIRONMENT_KEY.toLowerCase()); + if (environmentName != null) { + if (!(environmentName instanceof String) || !IMPLEMENTED_ENVIRONMENTS.contains(environmentName)) { + throw new IllegalArgumentException(ENVIRONMENT_KEY + " must be one of: " + USER_SUPPORTED_ENVIRONMENTS); + } + } + } + + public static void validateCreateOidcCredential(@Nullable final char[] password) { + if (password != null) { + throw new IllegalArgumentException("password must not be specified for " + + AuthenticationMechanism.MONGODB_OIDC); + } + } + + @VisibleForTesting(otherwise = VisibleForTesting.AccessModifier.PRIVATE) + public static void validateBeforeUse(final MongoCredential credential) { + String userName = credential.getUserName(); + Object environmentName = credential.getMechanismProperty(ENVIRONMENT_KEY, null); + Object machineCallback = credential.getMechanismProperty(OIDC_CALLBACK_KEY, null); + Object humanCallback = credential.getMechanismProperty(OIDC_HUMAN_CALLBACK_KEY, null); + boolean allowedHostsIsSet = credential.getMechanismProperty(ALLOWED_HOSTS_KEY, null) != null; + if (humanCallback == null && allowedHostsIsSet) { + throw new IllegalArgumentException(ALLOWED_HOSTS_KEY + " must be specified only when " + + OIDC_HUMAN_CALLBACK_KEY + " is specified"); + } + if (environmentName == null) { + // callback + if (machineCallback == null && humanCallback == null) { + throw new IllegalArgumentException("Either " + ENVIRONMENT_KEY + + " or " + OIDC_CALLBACK_KEY + + " or " + OIDC_HUMAN_CALLBACK_KEY + + " must be specified"); + } + if (machineCallback != null && humanCallback != null) { + throw new IllegalArgumentException("Both " + OIDC_CALLBACK_KEY + + " and " + OIDC_HUMAN_CALLBACK_KEY + + " must not be specified"); + } + } else { + if (!(environmentName instanceof String)) { + throw new IllegalArgumentException(ENVIRONMENT_KEY + " must be a String"); + } + if (userName != null && !ALLOWS_USERNAME.contains(environmentName)) { + throw new IllegalArgumentException("user name must not be specified when " + ENVIRONMENT_KEY + " is specified"); + } + if (machineCallback != null) { + throw new IllegalArgumentException(OIDC_CALLBACK_KEY + " must not be specified when " + ENVIRONMENT_KEY + " is specified"); + } + if (humanCallback != null) { + throw new IllegalArgumentException(OIDC_HUMAN_CALLBACK_KEY + " must not be specified when " + ENVIRONMENT_KEY + " is specified"); + } + String tokenResource = credential.getMechanismProperty(TOKEN_RESOURCE_KEY, null); + boolean hasTokenResourceProperty = tokenResource != null; + boolean tokenResourceSupported = REQUIRES_TOKEN_RESOURCE.contains(environmentName); + if (hasTokenResourceProperty != tokenResourceSupported) { + throw new IllegalArgumentException(TOKEN_RESOURCE_KEY + + " must be provided if and only if " + ENVIRONMENT_KEY + + " " + environmentName + " " + + " is one of: " + REQUIRES_TOKEN_RESOURCE + + ". " + TOKEN_RESOURCE_KEY + ": " + tokenResource); + } + } + } + } + + @VisibleForTesting(otherwise = VisibleForTesting.AccessModifier.PRIVATE) + static class OidcCallbackContextImpl implements OidcCallbackContext { + private final Duration timeout; + @Nullable + private final IdpInfo idpInfo; + @Nullable + private final String refreshToken; + @Nullable + private final String userName; + + OidcCallbackContextImpl(final Duration timeout, @Nullable final String userName) { + this.timeout = assertNotNull(timeout); + this.idpInfo = null; + this.refreshToken = null; + this.userName = userName; + } + + OidcCallbackContextImpl(final Duration timeout, final IdpInfo idpInfo, + @Nullable final String refreshToken, @Nullable final String userName) { + this.timeout = assertNotNull(timeout); + this.idpInfo = assertNotNull(idpInfo); + this.refreshToken = refreshToken; + this.userName = userName; + } + + @Override + @Nullable + public IdpInfo getIdpInfo() { + return idpInfo; + } + + @Override + public Duration getTimeout() { + return timeout; + } + + @Override + public int getVersion() { + return CALLBACK_API_VERSION_NUMBER; + } + + @Override + @Nullable + public String getRefreshToken() { + return refreshToken; + } + + @Override + @Nullable + public String getUserName() { + return userName; + } + } + + @VisibleForTesting(otherwise = VisibleForTesting.AccessModifier.PRIVATE) + static final class IdpInfoImpl implements IdpInfo { + private final String issuer; + @Nullable + private final String clientId; + private final List requestScopes; + + IdpInfoImpl(final String issuer, @Nullable final String clientId, @Nullable final List requestScopes) { + this.issuer = assertNotNull(issuer); + this.clientId = clientId; + this.requestScopes = requestScopes == null + ? Collections.emptyList() + : Collections.unmodifiableList(requestScopes); + } + + @Override + public String getIssuer() { + return issuer; + } + + @Override + @Nullable + public String getClientId() { + return clientId; + } + + @Override + public List getRequestScopes() { + return requestScopes; + } + } + + /** + * What was sent in the last request by this connection to the server. + */ + private enum FallbackState { + INITIAL, + PHASE_1_CACHED_TOKEN, + PHASE_2_REFRESH_CALLBACK_TOKEN, + PHASE_3A_PRINCIPAL, + PHASE_3B_CALLBACK_TOKEN + } +} diff --git a/driver-core/src/main/com/mongodb/internal/connection/OpCode.java b/driver-core/src/main/com/mongodb/internal/connection/OpCode.java new file mode 100644 index 00000000000..74f271d9c39 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/connection/OpCode.java @@ -0,0 +1,39 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection; + +enum OpCode { + OP_REPLY(1), + OP_UPDATE(2001), + OP_INSERT(2002), + OP_QUERY(2004), + OP_GETMORE(2005), + OP_DELETE(2006), + OP_KILL_CURSORS(2007), + OP_COMPRESSED(2012), + OP_MSG(2013); + + OpCode(final int value) { + this.value = value; + } + + private final int value; + + public int getValue() { + return value; + } +} diff --git a/driver-core/src/main/com/mongodb/internal/connection/OperationContext.java b/driver-core/src/main/com/mongodb/internal/connection/OperationContext.java new file mode 100644 index 00000000000..7e0de92da1d --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/connection/OperationContext.java @@ -0,0 +1,224 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.internal.connection; + +import com.mongodb.MongoConnectionPoolClearedException; +import com.mongodb.RequestContext; +import com.mongodb.ServerAddress; +import com.mongodb.ServerApi; +import com.mongodb.connection.ClusterDescription; +import com.mongodb.connection.ClusterType; +import com.mongodb.connection.ServerDescription; +import com.mongodb.internal.IgnorableRequestContext; +import com.mongodb.internal.TimeoutContext; +import com.mongodb.internal.TimeoutSettings; +import com.mongodb.internal.VisibleForTesting; +import com.mongodb.internal.session.SessionContext; +import com.mongodb.lang.Nullable; +import com.mongodb.selector.ServerSelector; + +import java.util.HashSet; +import java.util.List; +import java.util.Set; +import java.util.concurrent.atomic.AtomicLong; + +import static java.util.stream.Collectors.toList; + +/** + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public class OperationContext { + private static final AtomicLong NEXT_ID = new AtomicLong(0); + private final long id; + private final ServerDeprioritization serverDeprioritization; + private final SessionContext sessionContext; + private final RequestContext requestContext; + private final TimeoutContext timeoutContext; + @Nullable + private final ServerApi serverApi; + @Nullable + private final String operationName; + + public OperationContext(final RequestContext requestContext, final SessionContext sessionContext, final TimeoutContext timeoutContext, + @Nullable final ServerApi serverApi) { + this(requestContext, sessionContext, timeoutContext, serverApi, null); + } + + public OperationContext(final RequestContext requestContext, final SessionContext sessionContext, final TimeoutContext timeoutContext, + @Nullable final ServerApi serverApi, @Nullable final String operationName) { + this(NEXT_ID.incrementAndGet(), requestContext, sessionContext, timeoutContext, new ServerDeprioritization(), serverApi, operationName); + } + + public static OperationContext simpleOperationContext( + final TimeoutSettings timeoutSettings, @Nullable final ServerApi serverApi) { + return new OperationContext( + IgnorableRequestContext.INSTANCE, + NoOpSessionContext.INSTANCE, + new TimeoutContext(timeoutSettings), + serverApi, + null); + } + + public static OperationContext simpleOperationContext(final TimeoutContext timeoutContext) { + return new OperationContext( + IgnorableRequestContext.INSTANCE, + NoOpSessionContext.INSTANCE, + timeoutContext, + null, + null); + } + + public OperationContext withSessionContext(final SessionContext sessionContext) { + return new OperationContext(id, requestContext, sessionContext, timeoutContext, serverDeprioritization, serverApi, operationName); + } + + public OperationContext withTimeoutContext(final TimeoutContext timeoutContext) { + return new OperationContext(id, requestContext, sessionContext, timeoutContext, serverDeprioritization, serverApi, operationName); + } + + public OperationContext withOperationName(final String operationName) { + return new OperationContext(id, requestContext, sessionContext, timeoutContext, serverDeprioritization, serverApi, operationName); + } + + public long getId() { + return id; + } + + public SessionContext getSessionContext() { + return sessionContext; + } + + public RequestContext getRequestContext() { + return requestContext; + } + + public TimeoutContext getTimeoutContext() { + return timeoutContext; + } + + @Nullable + public ServerApi getServerApi() { + return serverApi; + } + + @Nullable + public String getOperationName() { + return operationName; + } + + @VisibleForTesting(otherwise = VisibleForTesting.AccessModifier.PRIVATE) + public OperationContext(final long id, + final RequestContext requestContext, + final SessionContext sessionContext, + final TimeoutContext timeoutContext, + final ServerDeprioritization serverDeprioritization, + @Nullable final ServerApi serverApi, + @Nullable final String operationName) { + this.id = id; + this.serverDeprioritization = serverDeprioritization; + this.requestContext = requestContext; + this.sessionContext = sessionContext; + this.timeoutContext = timeoutContext; + this.serverApi = serverApi; + this.operationName = operationName; + } + + @VisibleForTesting(otherwise = VisibleForTesting.AccessModifier.PRIVATE) + public OperationContext(final long id, + final RequestContext requestContext, + final SessionContext sessionContext, + final TimeoutContext timeoutContext, + @Nullable final ServerApi serverApi, + @Nullable final String operationName) { + this.id = id; + this.serverDeprioritization = new ServerDeprioritization(); + this.requestContext = requestContext; + this.sessionContext = sessionContext; + this.timeoutContext = timeoutContext; + this.serverApi = serverApi; + this.operationName = operationName; + } + + + /** + * @return The same {@link ServerDeprioritization} if called on the same {@link OperationContext}. + */ + public ServerDeprioritization getServerDeprioritization() { + return serverDeprioritization; + } + + public static final class ServerDeprioritization { + @Nullable + private ServerAddress candidate; + private final Set deprioritized; + private final DeprioritizingSelector selector; + + private ServerDeprioritization() { + candidate = null; + deprioritized = new HashSet<>(); + selector = new DeprioritizingSelector(); + } + + /** + * The returned {@link ServerSelector} tries to {@linkplain ServerSelector#select(ClusterDescription) select} + * only the {@link ServerDescription}s that do not have deprioritized {@link ServerAddress}es. + * If no such {@link ServerDescription} can be selected, then it selects {@link ClusterDescription#getServerDescriptions()}. + */ + ServerSelector getServerSelector() { + return selector; + } + + void updateCandidate(final ServerAddress serverAddress) { + candidate = serverAddress; + } + + public void onAttemptFailure(final Throwable failure) { + if (candidate == null || failure instanceof MongoConnectionPoolClearedException) { + candidate = null; + return; + } + deprioritized.add(candidate); + } + + /** + * {@link ServerSelector} requires thread safety, but that is only because a user may specify + * {@link com.mongodb.connection.ClusterSettings.Builder#serverSelector(ServerSelector)}, + * which indeed may be used concurrently. {@link DeprioritizingSelector} does not need to be thread-safe. + */ + private final class DeprioritizingSelector implements ServerSelector { + private DeprioritizingSelector() { + } + + @Override + public List select(final ClusterDescription clusterDescription) { + List serverDescriptions = clusterDescription.getServerDescriptions(); + if (!isEnabled(clusterDescription.getType())) { + return serverDescriptions; + } + List nonDeprioritizedServerDescriptions = serverDescriptions + .stream() + .filter(serverDescription -> !deprioritized.contains(serverDescription.getAddress())) + .collect(toList()); + return nonDeprioritizedServerDescriptions.isEmpty() ? serverDescriptions : nonDeprioritizedServerDescriptions; + } + + private boolean isEnabled(final ClusterType clusterType) { + return clusterType == ClusterType.SHARDED; + } + } + } +} + diff --git a/driver-core/src/main/com/mongodb/internal/connection/PlainAuthenticator.java b/driver-core/src/main/com/mongodb/internal/connection/PlainAuthenticator.java new file mode 100644 index 00000000000..f075ab154f5 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/connection/PlainAuthenticator.java @@ -0,0 +1,72 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection; + +import com.mongodb.MongoCredential; +import com.mongodb.MongoSecurityException; +import com.mongodb.ServerAddress; +import com.mongodb.ServerApi; +import com.mongodb.connection.ClusterConnectionMode; +import com.mongodb.lang.Nullable; + +import javax.security.auth.callback.Callback; +import javax.security.auth.callback.NameCallback; +import javax.security.auth.callback.PasswordCallback; +import javax.security.sasl.Sasl; +import javax.security.sasl.SaslClient; +import javax.security.sasl.SaslException; + +import static com.mongodb.AuthenticationMechanism.PLAIN; +import static com.mongodb.assertions.Assertions.isTrue; + +class PlainAuthenticator extends SaslAuthenticator { + private static final String DEFAULT_PROTOCOL = "mongodb"; + + PlainAuthenticator(final MongoCredentialWithCache credential, final ClusterConnectionMode clusterConnectionMode, + @Nullable final ServerApi serverApi) { + super(credential, clusterConnectionMode, serverApi); + } + + @Override + public String getMechanismName() { + return PLAIN.getMechanismName(); + } + + @Override + protected SaslClient createSaslClient(final ServerAddress serverAddress, final OperationContext operationContext) { + MongoCredential credential = getMongoCredential(); + isTrue("mechanism is PLAIN", credential.getAuthenticationMechanism() == PLAIN); + try { + return Sasl.createSaslClient(new String[]{PLAIN.getMechanismName()}, + credential.getUserName(), + DEFAULT_PROTOCOL, + serverAddress.getHost(), + null, + callbacks -> { + for (final Callback callback : callbacks) { + if (callback instanceof PasswordCallback) { + ((PasswordCallback) callback).setPassword(credential.getPassword()); + } else if (callback instanceof NameCallback) { + ((NameCallback) callback).setName(credential.getUserName()); + } + } + }); + } catch (SaslException e) { + throw new MongoSecurityException(credential, "Exception initializing SASL client", e); + } + } +} diff --git a/driver-core/src/main/com/mongodb/internal/connection/Pool.java b/driver-core/src/main/com/mongodb/internal/connection/Pool.java new file mode 100644 index 00000000000..2865fc9463c --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/connection/Pool.java @@ -0,0 +1,31 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection; + +import java.util.concurrent.TimeUnit; + +interface Pool { + T get(); + + T get(long timeout, TimeUnit timeUnit); + + void release(T t); + + void close(); + + void release(T t, boolean discard); +} diff --git a/driver-core/src/main/com/mongodb/internal/connection/PowerOfTwoBufferPool.java b/driver-core/src/main/com/mongodb/internal/connection/PowerOfTwoBufferPool.java new file mode 100644 index 00000000000..a8c7f87a24e --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/connection/PowerOfTwoBufferPool.java @@ -0,0 +1,208 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection; + +import com.mongodb.internal.diagnostics.logging.Logger; +import com.mongodb.internal.diagnostics.logging.Loggers; +import com.mongodb.internal.thread.DaemonThreadFactory; +import org.bson.ByteBuf; +import org.bson.ByteBufNIO; + +import java.nio.Buffer; +import java.nio.ByteBuffer; +import java.nio.ByteOrder; +import java.util.HashMap; +import java.util.Map; +import java.util.concurrent.ConcurrentLinkedDeque; +import java.util.concurrent.Executors; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.TimeUnit; + +/** + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public class PowerOfTwoBufferPool implements BufferProvider { + private static final Logger LOGGER = Loggers.getLogger("connection"); + + /** + * The global default pool. Pruning is enabled on this pool. Idle buffers are pruned after one minute. + */ + public static final PowerOfTwoBufferPool DEFAULT = new PowerOfTwoBufferPool().enablePruning(); + + private static final class IdleTrackingByteBuffer { + private final long lastUsedNanos; + private final ByteBuffer buffer; + + private IdleTrackingByteBuffer(final ByteBuffer buffer) { + this.lastUsedNanos = System.nanoTime(); + this.buffer = buffer; + } + + public long getLastUsedNanos() { + return lastUsedNanos; + } + + public ByteBuffer getBuffer() { + return buffer; + } + } + + private final Map powerOfTwoToPoolMap = new HashMap<>(); + private final long maxIdleTimeNanos; + private final ScheduledExecutorService pruner; + + /** + * Construct an instance with a highest power of two of 24. + */ + PowerOfTwoBufferPool() { + this(24); + } + + /** + * Construct an instance. + * + * @param highestPowerOfTwo the highest power of two buffer size that will be pooled + */ + PowerOfTwoBufferPool(final int highestPowerOfTwo) { + this(highestPowerOfTwo, 1, TimeUnit.MINUTES); + } + + /** + * Construct an instance. + * + * @param highestPowerOfTwo the highest power of two buffer size that will be pooled + * @param maxIdleTime max idle time when pruning is enabled + * @param timeUnit time unit of maxIdleTime + */ + PowerOfTwoBufferPool(final int highestPowerOfTwo, final long maxIdleTime, final TimeUnit timeUnit) { + int powerOfTwo = 1; + for (int i = 0; i <= highestPowerOfTwo; i++) { + int size = powerOfTwo; + powerOfTwoToPoolMap.put(i, new BufferPool(size)); + powerOfTwo = powerOfTwo << 1; + } + maxIdleTimeNanos = timeUnit.toNanos(maxIdleTime); + pruner = Executors.newSingleThreadScheduledExecutor(new DaemonThreadFactory("BufferPoolPruner")); + } + + /** + * Call this method at most once to enable a background thread that prunes idle buffers from the pool + */ + PowerOfTwoBufferPool enablePruning() { + pruner.scheduleAtFixedRate(this::prune, maxIdleTimeNanos, maxIdleTimeNanos / 2, TimeUnit.NANOSECONDS); + return this; + } + + void disablePruning() { + pruner.shutdownNow(); + } + + @Override + public ByteBuf getBuffer(final int size) { + return new PooledByteBufNIO(getByteBuffer(size)); + } + + public ByteBuffer getByteBuffer(final int size) { + BufferPool pool = powerOfTwoToPoolMap.get(log2(roundUpToNextHighestPowerOfTwo(size))); + ByteBuffer byteBuffer = (pool == null) ? createNew(size) : pool.get().getBuffer(); + + ((Buffer) byteBuffer).clear(); + ((Buffer) byteBuffer).limit(size); + return byteBuffer; + } + + private ByteBuffer createNew(final int size) { + ByteBuffer buf = ByteBuffer.allocate(size); + buf.order(ByteOrder.LITTLE_ENDIAN); + return buf; + } + + public void release(final ByteBuffer buffer) { + BufferPool pool = + powerOfTwoToPoolMap.get(log2(roundUpToNextHighestPowerOfTwo(buffer.capacity()))); + if (pool != null) { + pool.release(new IdleTrackingByteBuffer(buffer)); + } + } + + private void prune() { + try { + powerOfTwoToPoolMap.values().forEach(BufferPool::prune); + } catch (Throwable t) { + LOGGER.error(this + " stopped pruning idle buffer pools. You may want to recreate the MongoClient", t); + throw t; + } + } + + static int log2(final int powerOfTwo) { + return 31 - Integer.numberOfLeadingZeros(powerOfTwo); + } + + static int roundUpToNextHighestPowerOfTwo(final int size) { + int v = size; + v--; + v |= v >> 1; + v |= v >> 2; + v |= v >> 4; + v |= v >> 8; + v |= v >> 16; + v++; + return v; + } + + private class PooledByteBufNIO extends ByteBufNIO { + + PooledByteBufNIO(final ByteBuffer buf) { + super(buf); + } + + @Override + public void release() { + ByteBuffer wrapped = asNIO(); + super.release(); + if (getReferenceCount() == 0) { + PowerOfTwoBufferPool.this.release(wrapped); + } + } + } + + private final class BufferPool { + private final int bufferSize; + private final ConcurrentLinkedDeque available = new ConcurrentLinkedDeque<>(); + + BufferPool(final int bufferSize) { + this.bufferSize = bufferSize; + } + + IdleTrackingByteBuffer get() { + IdleTrackingByteBuffer buffer = available.pollLast(); + if (buffer != null) { + return buffer; + } + return new IdleTrackingByteBuffer(createNew(bufferSize)); + } + + void release(final IdleTrackingByteBuffer t) { + available.addLast(t); + } + + void prune() { + long now = System.nanoTime(); + available.removeIf(cur -> now - cur.getLastUsedNanos() >= maxIdleTimeNanos); + } + } +} diff --git a/driver-core/src/main/com/mongodb/internal/connection/ProtocolExecutor.java b/driver-core/src/main/com/mongodb/internal/connection/ProtocolExecutor.java new file mode 100644 index 00000000000..6331e52640f --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/connection/ProtocolExecutor.java @@ -0,0 +1,33 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection; + +import com.mongodb.internal.async.SingleResultCallback; +import com.mongodb.internal.session.SessionContext; +import com.mongodb.lang.Nullable; + +/** + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public interface ProtocolExecutor { + + @Nullable + T execute(CommandProtocol protocol, InternalConnection connection, SessionContext sessionContext); + + void executeAsync(CommandProtocol protocol, InternalConnection connection, SessionContext sessionContext, + SingleResultCallback callback); +} diff --git a/driver-core/src/main/com/mongodb/internal/connection/ProtocolHelper.java b/driver-core/src/main/com/mongodb/internal/connection/ProtocolHelper.java new file mode 100644 index 00000000000..c6ad5f451a0 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/connection/ProtocolHelper.java @@ -0,0 +1,344 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection; + +import com.mongodb.ErrorCategory; +import com.mongodb.MongoCommandException; +import com.mongodb.MongoException; +import com.mongodb.MongoExecutionTimeoutException; +import com.mongodb.MongoNodeIsRecoveringException; +import com.mongodb.MongoNotPrimaryException; +import com.mongodb.MongoQueryException; +import com.mongodb.RequestContext; +import com.mongodb.ServerAddress; +import com.mongodb.connection.ConnectionDescription; +import com.mongodb.connection.ServerDescription; +import com.mongodb.event.CommandFailedEvent; +import com.mongodb.event.CommandListener; +import com.mongodb.event.CommandStartedEvent; +import com.mongodb.event.CommandSucceededEvent; +import com.mongodb.internal.IgnorableRequestContext; +import com.mongodb.internal.TimeoutContext; +import com.mongodb.internal.diagnostics.logging.Logger; +import com.mongodb.internal.diagnostics.logging.Loggers; +import com.mongodb.lang.Nullable; +import org.bson.BsonBinaryReader; +import org.bson.BsonDocument; +import org.bson.BsonInt32; +import org.bson.BsonReader; +import org.bson.BsonString; +import org.bson.BsonTimestamp; +import org.bson.BsonType; +import org.bson.BsonValue; +import org.bson.codecs.BsonValueCodecProvider; +import org.bson.codecs.DecoderContext; +import org.bson.codecs.configuration.CodecRegistry; +import org.bson.io.ByteBufferBsonInput; + +import java.util.List; + +import static com.mongodb.assertions.Assertions.notNull; +import static java.lang.String.format; +import static java.util.Arrays.asList; +import static java.util.Collections.singletonList; +import static org.bson.codecs.BsonValueCodecProvider.getClassForBsonType; +import static org.bson.codecs.configuration.CodecRegistries.fromProviders; + +/** + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public final class ProtocolHelper { + private static final Logger PROTOCOL_EVENT_LOGGER = Loggers.getLogger("protocol.event"); + private static final CodecRegistry REGISTRY = fromProviders(new BsonValueCodecProvider()); + private static final int NO_ERROR_CODE = -1; + + + static boolean isCommandOk(final BsonDocument response) { + BsonValue okValue = response.get("ok"); + return isCommandOk(okValue); + } + + static boolean isCommandOk(final BsonReader bsonReader) { + return isCommandOk(getField(bsonReader, "ok")); + } + + static boolean isCommandOk(final ResponseBuffers responseBuffers) { + try { + return isCommandOk(createBsonReader(responseBuffers)); + } finally { + responseBuffers.reset(); + } + } + + @Nullable + static MongoException createSpecialWriteConcernException(final ResponseBuffers responseBuffers, + final ServerAddress serverAddress, + final TimeoutContext timeoutContext) { + BsonValue writeConcernError = getField(createBsonReader(responseBuffers), "writeConcernError"); + if (writeConcernError == null) { + return null; + } else { + return createSpecialException(writeConcernError.asDocument(), serverAddress, "errmsg", timeoutContext); + } + } + + @Nullable + static BsonTimestamp getOperationTime(final ResponseBuffers responseBuffers) { + return getFieldValueAsTimestamp(responseBuffers, "operationTime"); + } + + @Nullable + static BsonDocument getClusterTime(final ResponseBuffers responseBuffers) { + return getFieldValueAsDocument(responseBuffers, "$clusterTime"); + } + + @Nullable + static BsonTimestamp getSnapshotTimestamp(final ResponseBuffers responseBuffers) { + BsonValue atClusterTimeValue = getNestedFieldValue(responseBuffers, "cursor", "atClusterTime"); + if (atClusterTimeValue == null) { + atClusterTimeValue = getFieldValue(responseBuffers, "atClusterTime"); + } + if (atClusterTimeValue != null && atClusterTimeValue.isTimestamp()) { + return atClusterTimeValue.asTimestamp(); + } + return null; + } + + @Nullable + static BsonDocument getRecoveryToken(final ResponseBuffers responseBuffers) { + return getFieldValueAsDocument(responseBuffers, "recoveryToken"); + } + + @SuppressWarnings("SameParameterValue") + @Nullable + private static BsonTimestamp getFieldValueAsTimestamp(final ResponseBuffers responseBuffers, final String fieldName) { + BsonValue value = getFieldValue(responseBuffers, fieldName); + if (value == null) { + return null; + } + return value.asTimestamp(); + } + + @Nullable + private static BsonDocument getFieldValueAsDocument(final ResponseBuffers responseBuffers, final String fieldName) { + BsonValue value = getFieldValue(responseBuffers, fieldName); + if (value == null) { + return null; + } + return value.asDocument(); + } + + @Nullable + private static BsonValue getFieldValue(final ResponseBuffers responseBuffers, final String fieldName) { + try { + return getField(createBsonReader(responseBuffers), fieldName); + } finally { + responseBuffers.reset(); + } + } + + private static BsonBinaryReader createBsonReader(final ResponseBuffers responseBuffers) { + return new BsonBinaryReader(new ByteBufferBsonInput(responseBuffers.getBodyByteBuffer())); + } + + @Nullable + private static BsonValue getField(final BsonReader bsonReader, final String fieldName) { + bsonReader.readStartDocument(); + while (bsonReader.readBsonType() != BsonType.END_OF_DOCUMENT) { + if (bsonReader.readName().equals(fieldName)) { + return REGISTRY.get(getClassForBsonType(bsonReader.getCurrentBsonType())).decode(bsonReader, + DecoderContext.builder().build()); + } + bsonReader.skipValue(); + } + bsonReader.readEndDocument(); + return null; + } + + @SuppressWarnings("SameParameterValue") + @Nullable + private static BsonValue getNestedFieldValue(final ResponseBuffers responseBuffers, final String topLevelFieldName, + final String nestedFieldName) { + try { + BsonReader bsonReader = createBsonReader(responseBuffers); + bsonReader.readStartDocument(); + while (bsonReader.readBsonType() != BsonType.END_OF_DOCUMENT) { + if (bsonReader.readName().equals(topLevelFieldName)) { + return getField(bsonReader, nestedFieldName); + } + bsonReader.skipValue(); + } + bsonReader.readEndDocument(); + return null; + } finally { + responseBuffers.reset(); + } + } + + private static boolean isCommandOk(@Nullable final BsonValue okValue) { + if (okValue == null) { + return false; + } else if (okValue.isBoolean()) { + return okValue.asBoolean().getValue(); + } else if (okValue.isNumber()) { + return okValue.asNumber().intValue() == 1; + } else { + return false; + } + } + + static MongoException getCommandFailureException(final BsonDocument response, final ServerAddress serverAddress, + final TimeoutContext timeoutContext) { + MongoException specialException = createSpecialException(response, serverAddress, "errmsg", timeoutContext); + if (specialException != null) { + return specialException; + } + return new MongoCommandException(response, serverAddress); + } + + static int getErrorCode(final BsonDocument response) { + return (response.getNumber("code", new BsonInt32(-1)).intValue()); + } + + static String getErrorMessage(final BsonDocument response, final String errorMessageFieldName) { + return response.getString(errorMessageFieldName, new BsonString("")).getValue(); + } + + static MongoException getQueryFailureException(final BsonDocument errorDocument, final ServerAddress serverAddress, + final TimeoutContext timeoutContext) { + MongoException specialException = createSpecialException(errorDocument, serverAddress, "$err", timeoutContext); + if (specialException != null) { + return specialException; + } + return new MongoQueryException(errorDocument, serverAddress); + } + + static MessageSettings getMessageSettings(final ConnectionDescription connectionDescription, final ServerDescription serverDescription) { + return MessageSettings.builder() + .maxDocumentSize(connectionDescription.getMaxDocumentSize()) + .maxMessageSize(connectionDescription.getMaxMessageSize()) + .maxBatchCount(connectionDescription.getMaxBatchCount()) + .maxWireVersion(connectionDescription.getMaxWireVersion()) + .serverType(connectionDescription.getServerType()) + .sessionSupported(connectionDescription.getLogicalSessionTimeoutMinutes() != null) + .cryptd(serverDescription.isCryptd()) + .build(); + } + + private static final List NOT_PRIMARY_CODES = asList(10107, 13435, 10058); + private static final List NOT_PRIMARY_MESSAGES = singletonList("not master"); + private static final List RECOVERING_CODES = asList(11600, 11602, 13436, 189, 91); + private static final List RECOVERING_MESSAGES = asList("not master or secondary", "node is recovering"); + + @Nullable + public static MongoException createSpecialException(@Nullable final BsonDocument response, + final ServerAddress serverAddress, + final String errorMessageFieldName, + final TimeoutContext timeoutContext) { + if (response == null) { + return null; + } + int errorCode = getErrorCode(response); + String errorMessage = getErrorMessage(response, errorMessageFieldName); + if (ErrorCategory.fromErrorCode(errorCode) == ErrorCategory.EXECUTION_TIMEOUT) { + MongoExecutionTimeoutException mongoExecutionTimeoutException = new MongoExecutionTimeoutException(errorCode, errorMessage, response); + if (timeoutContext.hasTimeoutMS()) { + return TimeoutContext.createMongoTimeoutException(mongoExecutionTimeoutException); + } + return mongoExecutionTimeoutException; + } else if (isNodeIsRecoveringError(errorCode, errorMessage)) { + return new MongoNodeIsRecoveringException(response, serverAddress); + } else if (isNotPrimaryError(errorCode, errorMessage)) { + return new MongoNotPrimaryException(response, serverAddress); + } else if (response.containsKey("writeConcernError")) { + MongoException writeConcernException = createSpecialException(response.getDocument("writeConcernError"), serverAddress, + "errmsg", timeoutContext); + if (writeConcernException != null && response.isArray("errorLabels")) { + for (BsonValue errorLabel : response.getArray("errorLabels")) { + writeConcernException.addLabel(errorLabel.asString().getValue()); + } + } + return writeConcernException; + } else { + return null; + } + } + + private static boolean isNotPrimaryError(final int errorCode, final String errorMessage) { + return NOT_PRIMARY_CODES.contains(errorCode) + || (errorCode == NO_ERROR_CODE && NOT_PRIMARY_MESSAGES.stream().anyMatch(errorMessage::contains)); + } + + private static boolean isNodeIsRecoveringError(final int errorCode, final String errorMessage) { + return RECOVERING_CODES.contains(errorCode) + || (errorCode == NO_ERROR_CODE && (RECOVERING_MESSAGES.stream().anyMatch(errorMessage::contains))); + } + + static void sendCommandStartedEvent(final RequestMessage message, final String databaseName, final String commandName, + final BsonDocument command, final ConnectionDescription connectionDescription, + final CommandListener commandListener, final OperationContext operationContext) { + notNull("operationContext", operationContext); + try { + commandListener.commandStarted(new CommandStartedEvent(getRequestContextForEvent(operationContext.getRequestContext()), + operationContext.getId(), message.getId(), connectionDescription, databaseName, commandName, command)); + } catch (Exception e) { + if (PROTOCOL_EVENT_LOGGER.isWarnEnabled()) { + PROTOCOL_EVENT_LOGGER.warn(format("Exception thrown raising command started event to listener %s", commandListener), e); + } + } + } + + static void sendCommandSucceededEvent(final RequestMessage message, final String commandName, final String databaseName, + final BsonDocument response, final ConnectionDescription connectionDescription, final long elapsedTimeNanos, + final CommandListener commandListener, final OperationContext operationContext) { + notNull("operationContext", operationContext); + try { + + commandListener.commandSucceeded(new CommandSucceededEvent(getRequestContextForEvent(operationContext.getRequestContext()), + operationContext.getId(), message.getId(), connectionDescription, databaseName, commandName, response, + elapsedTimeNanos)); + } catch (Exception e) { + if (PROTOCOL_EVENT_LOGGER.isWarnEnabled()) { + PROTOCOL_EVENT_LOGGER.warn(format("Exception thrown raising command succeeded event to listener %s", commandListener), e); + } + } + } + + static void sendCommandFailedEvent(final RequestMessage message, final String commandName, final String databaseName, + final ConnectionDescription connectionDescription, final long elapsedTimeNanos, + final Throwable throwable, final CommandListener commandListener, final OperationContext operationContext) { + notNull("operationContext", operationContext); + try { + commandListener.commandFailed(new CommandFailedEvent(getRequestContextForEvent(operationContext.getRequestContext()), + operationContext.getId(), message.getId(), connectionDescription, databaseName, commandName, elapsedTimeNanos, + throwable)); + + } catch (Exception e) { + if (PROTOCOL_EVENT_LOGGER.isWarnEnabled()) { + PROTOCOL_EVENT_LOGGER.warn(format("Exception thrown raising command failed event to listener %s", commandListener), e); + } + } + } + + @Nullable + private static RequestContext getRequestContextForEvent(final RequestContext requestContext) { + return requestContext == IgnorableRequestContext.INSTANCE ? null : requestContext; + } + + private ProtocolHelper() { + } +} diff --git a/driver-core/src/main/com/mongodb/internal/connection/ReadConcernAwareNoOpSessionContext.java b/driver-core/src/main/com/mongodb/internal/connection/ReadConcernAwareNoOpSessionContext.java new file mode 100644 index 00000000000..554db5b2f20 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/connection/ReadConcernAwareNoOpSessionContext.java @@ -0,0 +1,40 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection; + +import com.mongodb.ReadConcern; + +import static com.mongodb.assertions.Assertions.notNull; + +/** + * A SessionContext implementation that does nothing and reports that it has no session, but does track read concern. + * + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public final class ReadConcernAwareNoOpSessionContext extends NoOpSessionContext { + + private final ReadConcern readConcern; + + public ReadConcernAwareNoOpSessionContext(final ReadConcern readConcern) { + this.readConcern = notNull("readConcern", readConcern); + } + + @Override + public ReadConcern getReadConcern() { + return readConcern; + } +} diff --git a/driver-core/src/main/com/mongodb/internal/connection/ReadConcernHelper.java b/driver-core/src/main/com/mongodb/internal/connection/ReadConcernHelper.java new file mode 100644 index 00000000000..df1e1ed7549 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/connection/ReadConcernHelper.java @@ -0,0 +1,66 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection; + +import com.mongodb.MongoClientException; +import com.mongodb.ReadConcernLevel; +import com.mongodb.internal.session.SessionContext; +import org.bson.BsonDocument; +import org.bson.BsonString; + +import static com.mongodb.assertions.Assertions.assertFalse; +import static com.mongodb.assertions.Assertions.notNull; +import static com.mongodb.internal.operation.ServerVersionHelper.FIVE_DOT_ZERO_WIRE_VERSION; + +/** + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public final class ReadConcernHelper { + + public static BsonDocument getReadConcernDocument(final SessionContext sessionContext, final int maxWireVersion) { + notNull("sessionContext", sessionContext); + + BsonDocument readConcernDocument = new BsonDocument(); + + ReadConcernLevel level = sessionContext.getReadConcern().getLevel(); + if (level != null) { + readConcernDocument.append("level", new BsonString(level.getValue())); + } + + assertFalse(sessionContext.isSnapshot() && sessionContext.isCausallyConsistent()); + if (sessionContext.isSnapshot() && maxWireVersion < FIVE_DOT_ZERO_WIRE_VERSION) { + throw new MongoClientException("Snapshot reads require MongoDB 5.0 or later"); + } + if (shouldAddAfterClusterTime(sessionContext)) { + readConcernDocument.append("afterClusterTime", sessionContext.getOperationTime()); + } else if (shouldAddAtClusterTime(sessionContext)) { + readConcernDocument.append("atClusterTime", sessionContext.getSnapshotTimestamp()); + } + return readConcernDocument; + } + + private static boolean shouldAddAtClusterTime(final SessionContext sessionContext) { + return sessionContext.isSnapshot() && sessionContext.getSnapshotTimestamp() != null; + } + + private static boolean shouldAddAfterClusterTime(final SessionContext sessionContext) { + return sessionContext.isCausallyConsistent() && sessionContext.getOperationTime() != null; + } + + private ReadConcernHelper() { + } +} diff --git a/driver-core/src/main/com/mongodb/internal/connection/ReplyHeader.java b/driver-core/src/main/com/mongodb/internal/connection/ReplyHeader.java new file mode 100644 index 00000000000..f1b723778a7 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/connection/ReplyHeader.java @@ -0,0 +1,118 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection; + +import com.mongodb.MongoInternalException; +import org.bson.ByteBuf; + +import static com.mongodb.internal.connection.MessageHeader.MESSAGE_HEADER_LENGTH; +import static com.mongodb.internal.connection.OpCode.OP_MSG; +import static com.mongodb.internal.connection.OpCode.OP_REPLY; +import static java.lang.String.format; + +/** + * Contains the details of a reply from a MongoDB server. + * + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public final class ReplyHeader { + /** + * The length of the OP_REPLY header in the MongoDB wire protocol. + */ + public static final int REPLY_HEADER_LENGTH = 20; + + /** + * The length of the OP_REPLY header plus the length of the standard message header + */ + public static final int TOTAL_REPLY_HEADER_LENGTH = REPLY_HEADER_LENGTH + MESSAGE_HEADER_LENGTH; + + private final int messageLength; + private final int requestId; + private final int responseTo; + private final boolean hasMoreToCome; + + ReplyHeader(final ByteBuf header, final MessageHeader messageHeader) { + this(messageHeader.getMessageLength(), messageHeader.getOpCode(), messageHeader, header); + } + + ReplyHeader(final ByteBuf header, final CompressedHeader compressedHeader) { + this(compressedHeader.getUncompressedSize() + MESSAGE_HEADER_LENGTH, compressedHeader.getOriginalOpcode(), + compressedHeader.getMessageHeader(), header); + } + + private ReplyHeader(final int messageLength, final int opCode, final MessageHeader messageHeader, final ByteBuf header) { + this.messageLength = messageLength; + this.requestId = messageHeader.getRequestId(); + this.responseTo = messageHeader.getResponseTo(); + if (opCode == OP_MSG.getValue()) { + int flagBits = header.getInt(); + hasMoreToCome = (flagBits & (1 << 1)) != 0; + header.get(); // ignored payload type + } else if (opCode == OP_REPLY.getValue()) { + if (messageLength < TOTAL_REPLY_HEADER_LENGTH) { + throw new MongoInternalException(format("The reply message length %d is less than the minimum message length %d", + messageLength, TOTAL_REPLY_HEADER_LENGTH)); + } + hasMoreToCome = false; + + header.getInt(); // ignored responseFlags + header.getLong(); // ignored cursorId + header.getInt(); // ignored startingFrom + int numberReturned = header.getInt(); + + if (numberReturned != 1) { + throw new MongoInternalException(format("The reply message number of returned documents, %d, is expected to be 1", + numberReturned)); + } + } else { + throw new MongoInternalException(format("Unexpected reply message opCode %d", opCode)); + } + } + + + /** + * Gets the total size of the message in bytes. This total includes the 4 bytes that holds the message length. + * + * @return the total message size, including all of the header + */ + public int getMessageLength() { + return messageLength; + } + + /** + * This is a client or database-generated identifier that uniquely identifies this message. Along with the {@code responseTo} field in + * responses, clients can use this to associate query responses with the originating query. + * + * @return the identifier for this message + */ + public int getRequestId() { + return requestId; + } + + /** + * Along with the requestID field in queries, clients can use this to associate query responses with the originating query. + * + * @return the request ID from the original request + */ + public int getResponseTo() { + return responseTo; + } + + public boolean hasMoreToCome() { + return hasMoreToCome; + } +} diff --git a/driver-core/src/main/com/mongodb/internal/connection/ReplyMessage.java b/driver-core/src/main/com/mongodb/internal/connection/ReplyMessage.java new file mode 100644 index 00000000000..68af818281e --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/connection/ReplyMessage.java @@ -0,0 +1,55 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection; + +import com.mongodb.MongoInternalException; +import org.bson.BsonBinaryReader; +import org.bson.codecs.Decoder; +import org.bson.codecs.DecoderContext; +import org.bson.io.BsonInput; +import org.bson.io.ByteBufferBsonInput; + +import static java.lang.String.format; + +/** + * An OP_REPLY message. + * + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public class ReplyMessage { + + private final T document; + + public ReplyMessage(final ResponseBuffers responseBuffers, final Decoder decoder, final long requestId) { + if (requestId != responseBuffers.getReplyHeader().getResponseTo()) { + throw new MongoInternalException(format("The responseTo (%d) in the response does not match the requestId (%d) in the " + + "request", responseBuffers.getReplyHeader().getResponseTo(), requestId)); + } + + try (BsonInput bsonInput = new ByteBufferBsonInput(responseBuffers.getBodyByteBuffer().duplicate())) { + try (BsonBinaryReader reader = new BsonBinaryReader(bsonInput)) { + document = decoder.decode(reader, DecoderContext.builder().build()); + } + } finally { + responseBuffers.reset(); + } + } + + public T getDocument() { + return document; + } +} diff --git a/driver-core/src/main/com/mongodb/internal/connection/RequestMessage.java b/driver-core/src/main/com/mongodb/internal/connection/RequestMessage.java new file mode 100644 index 00000000000..1771e293b0c --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/connection/RequestMessage.java @@ -0,0 +1,118 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection; + +import org.bson.io.BsonOutput; + +import java.util.concurrent.atomic.AtomicInteger; + +import static com.mongodb.assertions.Assertions.notNull; +import static com.mongodb.internal.connection.BsonWriterHelper.backpatchLength; + +/** + * Abstract base class for all MongoDB Wire Protocol request messages. + */ +abstract class RequestMessage { + + static final AtomicInteger REQUEST_ID = new AtomicInteger(1); + + static final int MESSAGE_PROLOGUE_LENGTH = 16; + + private final MessageSettings settings; + private final int id; + private final OpCode opCode; + + /** + * Gets the next available unique message identifier. + * + * @return the message identifier + */ + public static int getCurrentGlobalId() { + return REQUEST_ID.get(); + } + + RequestMessage(final OpCode opCode, final MessageSettings settings) { + this(opCode, REQUEST_ID.getAndIncrement(), settings); + } + + RequestMessage(final OpCode opCode, final int requestId, final MessageSettings settings) { + this.settings = settings; + id = requestId; + this.opCode = opCode; + } + + /** + * Gets the message id. + * + * @return the message id + */ + public int getId() { + return id; + } + + /** + * Gets the op code of the message. + * + * @return the op code + */ + public OpCode getOpCode() { + return opCode; + } + + /** + * Gets the message settings. + * + * @return the message settings + */ + public MessageSettings getSettings() { + return settings; + } + + /** + * Encoded the message to the given output. + * + * @param bsonOutput the output + * @param operationContext the session context + */ + public void encode(final ByteBufferBsonOutput bsonOutput, final OperationContext operationContext) { + notNull("operationContext", operationContext); + int messageStartPosition = bsonOutput.getPosition(); + writeMessagePrologue(bsonOutput); + encodeMessageBody(bsonOutput, operationContext); + backpatchLength(messageStartPosition, bsonOutput); + } + + /** + * Writes the message prologue to the given output. + * + * @param bsonOutput the output + */ + protected void writeMessagePrologue(final BsonOutput bsonOutput) { + bsonOutput.writeInt32(0); // length: will set this later + bsonOutput.writeInt32(id); + bsonOutput.writeInt32(0); // response to + bsonOutput.writeInt32(opCode.getValue()); + } + + /** + * Encode the message body to the given output. + * + * @param bsonOutput the output + * @param operationContext the session context + */ + protected abstract void encodeMessageBody(ByteBufferBsonOutput bsonOutput, OperationContext operationContext); +} diff --git a/driver-core/src/main/com/mongodb/internal/connection/ResponseBuffers.java b/driver-core/src/main/com/mongodb/internal/connection/ResponseBuffers.java new file mode 100644 index 00000000000..6774b4a50af --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/connection/ResponseBuffers.java @@ -0,0 +1,80 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection; + +import org.bson.BsonDocument; +import org.bson.ByteBuf; +import org.bson.codecs.Decoder; + +import java.io.Closeable; + +/** + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public class ResponseBuffers implements Closeable { + private final ReplyHeader replyHeader; + private final ByteBuf bodyByteBuffer; + private final int bodyByteBufferStartPosition; + private volatile boolean isClosed; + + ResponseBuffers(final ReplyHeader replyHeader, final ByteBuf bodyByteBuffer) { + this.replyHeader = replyHeader; + this.bodyByteBuffer = bodyByteBuffer; + this.bodyByteBufferStartPosition = bodyByteBuffer == null ? 0 : bodyByteBuffer.position(); + } + + /** + * Gets the reply header. + * + * @return the reply header + */ + public ReplyHeader getReplyHeader() { + return replyHeader; + } + + T getResponseDocument(final int messageId, final Decoder decoder) { + ReplyMessage replyMessage = new ReplyMessage<>(this, decoder, messageId); + reset(); + return replyMessage.getDocument(); + } + + /** + * Returns a buffer containing the response body. Care should be taken to not use the returned buffer after this instance has + * been closed. + * + * NOTE: do not modify this buffer, it is being made writable for performance reasons to avoid redundant copying. + * + * @return a buffer containing the response body + */ + public ByteBuf getBodyByteBuffer() { + return bodyByteBuffer; + } + + public void reset() { + bodyByteBuffer.position(bodyByteBufferStartPosition); + } + + @Override + public void close() { + if (!isClosed) { + if (bodyByteBuffer != null) { + bodyByteBuffer.release(); + } + isClosed = true; + } + } +} diff --git a/driver-core/src/main/com/mongodb/internal/connection/RoundTripTimeSampler.java b/driver-core/src/main/com/mongodb/internal/connection/RoundTripTimeSampler.java new file mode 100644 index 00000000000..ffba2caecc4 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/connection/RoundTripTimeSampler.java @@ -0,0 +1,72 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection; + +import com.mongodb.annotations.ThreadSafe; + +import java.util.Deque; +import java.util.concurrent.ConcurrentLinkedDeque; + +final class RoundTripTimeSampler { + private final ExponentiallyWeightedMovingAverage averageRoundTripTime = new ExponentiallyWeightedMovingAverage(0.2); + private final RecentSamples recentSamples = new RecentSamples(); + + void reset() { + averageRoundTripTime.reset(); + recentSamples.reset(); + } + + void addSample(final long sample) { + recentSamples.add(sample); + averageRoundTripTime.addSample(sample); + } + + long getAverage() { + return averageRoundTripTime.getAverage(); + } + + long getMin() { + return recentSamples.min(); + } + + @ThreadSafe + private static final class RecentSamples { + + private static final int MAX_SIZE = 10; + private final Deque samples; + + RecentSamples() { + samples = new ConcurrentLinkedDeque<>(); + } + + void add(final long sample) { + if (samples.size() == MAX_SIZE) { + samples.removeFirst(); + } + samples.add(sample); + } + + void reset() { + samples.clear(); + } + + long min() { + // Clients MUST report the minimum RTT as 0 until at least 2 samples have been gathered + return samples.size() < 2 ? 0 : samples.stream().min(Long::compareTo).orElse(0L); + } + } +} diff --git a/driver-core/src/main/com/mongodb/internal/connection/SaslAuthenticator.java b/driver-core/src/main/com/mongodb/internal/connection/SaslAuthenticator.java new file mode 100644 index 00000000000..eeee3a31abd --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/connection/SaslAuthenticator.java @@ -0,0 +1,422 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection; + +import com.mongodb.AuthenticationMechanism; +import com.mongodb.MongoCredential; +import com.mongodb.MongoException; +import com.mongodb.MongoInterruptedException; +import com.mongodb.MongoOperationTimeoutException; +import com.mongodb.MongoSecurityException; +import com.mongodb.ServerAddress; +import com.mongodb.ServerApi; +import com.mongodb.SubjectProvider; +import com.mongodb.connection.ClusterConnectionMode; +import com.mongodb.connection.ConnectionDescription; +import com.mongodb.internal.async.SingleResultCallback; +import com.mongodb.internal.diagnostics.logging.Logger; +import com.mongodb.internal.diagnostics.logging.Loggers; +import com.mongodb.lang.NonNull; +import com.mongodb.lang.Nullable; +import org.bson.BsonBinary; +import org.bson.BsonBinaryWriter; +import org.bson.BsonDocument; +import org.bson.BsonInt32; +import org.bson.BsonString; +import org.bson.codecs.BsonDocumentCodec; +import org.bson.codecs.EncoderContext; +import org.bson.io.BasicOutputBuffer; + +import javax.security.auth.Subject; +import javax.security.auth.login.LoginException; +import javax.security.sasl.SaslClient; +import javax.security.sasl.SaslException; + +import static com.mongodb.MongoCredential.JAVA_SUBJECT_KEY; +import static com.mongodb.MongoCredential.JAVA_SUBJECT_PROVIDER_KEY; +import static com.mongodb.assertions.Assertions.assertNotNull; +import static com.mongodb.internal.Locks.withInterruptibleLock; +import static com.mongodb.internal.async.ErrorHandlingResultCallback.errorHandlingCallback; +import static com.mongodb.internal.connection.CommandHelper.executeCommand; +import static com.mongodb.internal.connection.CommandHelper.executeCommandAsync; + +abstract class SaslAuthenticator extends Authenticator implements SpeculativeAuthenticator { + public static final Logger LOGGER = Loggers.getLogger("authenticator"); + private static final String SUBJECT_PROVIDER_CACHE_KEY = "SUBJECT_PROVIDER"; + SaslAuthenticator(final MongoCredentialWithCache credential, final ClusterConnectionMode clusterConnectionMode, + @Nullable final ServerApi serverApi) { + super(credential, clusterConnectionMode, serverApi); + } + + public void authenticate(final InternalConnection connection, final ConnectionDescription connectionDescription, + final OperationContext operationContext) { + doAsSubject(() -> { + SaslClient saslClient = createSaslClient(connection.getDescription().getServerAddress(), operationContext); + throwIfSaslClientIsNull(saslClient); + try { + BsonDocument responseDocument = getNextSaslResponse(saslClient, connection, operationContext); + BsonInt32 conversationId = responseDocument.getInt32("conversationId"); + + while (!(responseDocument.getBoolean("done")).getValue()) { + byte[] response = saslClient.evaluateChallenge((responseDocument.getBinary("payload")).getData()); + + if (response == null) { + throw new MongoSecurityException(getMongoCredential(), + "SASL protocol error: no client response to challenge for credential " + + getMongoCredential()); + } + + responseDocument = sendSaslContinue(conversationId, response, connection, operationContext); + operationContext.getTimeoutContext().resetMaintenanceTimeout(); + } + if (!saslClient.isComplete()) { + saslClient.evaluateChallenge((responseDocument.getBinary("payload")).getData()); + if (!saslClient.isComplete()) { + throw new MongoSecurityException(getMongoCredential(), + "SASL protocol error: server completed challenges before client completed responses " + + getMongoCredential()); + } + } + } catch (Exception e) { + throw wrapException(e); + } finally { + disposeOfSaslClient(saslClient); + } + return null; + }); + } + + @Override + void authenticateAsync(final InternalConnection connection, final ConnectionDescription connectionDescription, + final OperationContext operationContext, final SingleResultCallback callback) { + try { + doAsSubject(() -> { + SaslClient saslClient = createSaslClient(connection.getDescription().getServerAddress(), operationContext); + throwIfSaslClientIsNull(saslClient); + getNextSaslResponseAsync(saslClient, connection, operationContext, callback); + return null; + }); + } catch (Throwable t) { + callback.onResult(null, t); + } + } + + public abstract String getMechanismName(); + + protected abstract SaslClient createSaslClient(ServerAddress serverAddress, OperationContext operationContext); + + protected void appendSaslStartOptions(final BsonDocument saslStartCommand) { + } + + private void throwIfSaslClientIsNull(@Nullable final SaslClient saslClient) { + if (saslClient == null) { + throw new MongoSecurityException(getMongoCredential(), + String.format("This JDK does not support the %s SASL mechanism", getMechanismName())); + } + } + + private BsonDocument getNextSaslResponse(final SaslClient saslClient, final InternalConnection connection, + final OperationContext operationContext) { + BsonDocument response = connection.opened() ? null : getSpeculativeAuthenticateResponse(); + if (response != null) { + return response; + } + + try { + byte[] serverResponse = saslClient.hasInitialResponse() ? saslClient.evaluateChallenge(new byte[0]) : null; + return sendSaslStart(serverResponse, connection, operationContext); + } catch (Exception e) { + throw wrapException(e); + } + } + + private void getNextSaslResponseAsync(final SaslClient saslClient, final InternalConnection connection, + final OperationContext operationContext, final SingleResultCallback callback) { + SingleResultCallback errHandlingCallback = errorHandlingCallback(callback, LOGGER); + try { + BsonDocument response = connection.opened() ? null : getSpeculativeAuthenticateResponse(); + if (response == null) { + byte[] serverResponse = (saslClient.hasInitialResponse() ? saslClient.evaluateChallenge(new byte[0]) : null); + sendSaslStartAsync(serverResponse, connection, operationContext, (result, t) -> { + if (t != null) { + errHandlingCallback.onResult(null, wrapException(t)); + return; + } + assertNotNull(result); + if (result.getBoolean("done").getValue()) { + verifySaslClientComplete(saslClient, result, errHandlingCallback); + } else { + new Continuator(saslClient, result, connection, operationContext, errHandlingCallback).start(); + } + }); + } else if (response.getBoolean("done").getValue()) { + verifySaslClientComplete(saslClient, response, errHandlingCallback); + } else { + new Continuator(saslClient, response, connection, operationContext, errHandlingCallback).start(); + } + } catch (Exception e) { + callback.onResult(null, wrapException(e)); + } + } + + private void verifySaslClientComplete(final SaslClient saslClient, final BsonDocument result, + final SingleResultCallback callback) { + if (saslClient.isComplete()) { + callback.onResult(null, null); + } else { + try { + saslClient.evaluateChallenge(result.getBinary("payload").getData()); + if (saslClient.isComplete()) { + callback.onResult(null, null); + } else { + callback.onResult(null, new MongoSecurityException(getMongoCredential(), + "SASL protocol error: server completed challenges before client completed responses " + + getMongoCredential())); + } + } catch (SaslException e) { + callback.onResult(null, wrapException(e)); + } + } + } + + @Nullable + protected Subject getSubject() { + Subject subject = getMongoCredential().getMechanismProperty(JAVA_SUBJECT_KEY, null); + if (subject != null) { + return subject; + } + + try { + return getSubjectProvider().getSubject(); + } catch (LoginException e) { + throw new MongoSecurityException(getMongoCredential(), "Failed to login Subject", e); + } + } + + @NonNull + private SubjectProvider getSubjectProvider() { + return withInterruptibleLock(getMongoCredentialWithCache().getLock(), () -> { + SubjectProvider subjectProvider = + getMongoCredentialWithCache().getFromCache(SUBJECT_PROVIDER_CACHE_KEY, SubjectProvider.class); + if (subjectProvider == null) { + subjectProvider = getMongoCredential().getMechanismProperty(JAVA_SUBJECT_PROVIDER_KEY, null); + if (subjectProvider == null) { + subjectProvider = getDefaultSubjectProvider(); + } + getMongoCredentialWithCache().putInCache(SUBJECT_PROVIDER_CACHE_KEY, subjectProvider); + } + return subjectProvider; + }); + } + + @NonNull + protected SubjectProvider getDefaultSubjectProvider() { + return () -> null; + } + + private BsonDocument sendSaslStart(@Nullable final byte[] outToken, final InternalConnection connection, + final OperationContext operationContext) { + BsonDocument startDocument = createSaslStartCommandDocument(outToken); + appendSaslStartOptions(startDocument); + try { + return executeCommand(getMongoCredential().getSource(), startDocument, getClusterConnectionMode(), getServerApi(), connection, + operationContext); + } finally { + operationContext.getTimeoutContext().resetMaintenanceTimeout(); + } + } + + private BsonDocument sendSaslContinue(final BsonInt32 conversationId, final byte[] outToken, final InternalConnection connection, + final OperationContext operationContext) { + try { + return executeCommand(getMongoCredential().getSource(), createSaslContinueDocument(conversationId, outToken), + getClusterConnectionMode(), getServerApi(), connection, operationContext); + } finally { + operationContext.getTimeoutContext().resetMaintenanceTimeout(); + } + } + + private void sendSaslStartAsync(@Nullable final byte[] outToken, final InternalConnection connection, + final OperationContext operationContext, final SingleResultCallback callback) { + BsonDocument startDocument = createSaslStartCommandDocument(outToken); + appendSaslStartOptions(startDocument); + + executeCommandAsync(getMongoCredential().getSource(), startDocument, getClusterConnectionMode(), getServerApi(), connection, + operationContext, (r, t) -> { + operationContext.getTimeoutContext().resetMaintenanceTimeout(); + callback.onResult(r, t); + }); + } + + private void sendSaslContinueAsync(final BsonInt32 conversationId, final byte[] outToken, final InternalConnection connection, + final OperationContext operationContext, final SingleResultCallback callback) { + executeCommandAsync(getMongoCredential().getSource(), createSaslContinueDocument(conversationId, outToken), + getClusterConnectionMode(), getServerApi(), connection, operationContext, (r, t) -> { + operationContext.getTimeoutContext().resetMaintenanceTimeout(); + callback.onResult(r, t); + }); + } + + protected BsonDocument createSaslStartCommandDocument(@Nullable final byte[] outToken) { + return new BsonDocument("saslStart", new BsonInt32(1)).append("mechanism", new BsonString(getMechanismName())) + .append("payload", new BsonBinary(outToken != null ? outToken : new byte[0])); + } + + private BsonDocument createSaslContinueDocument(final BsonInt32 conversationId, final byte[] outToken) { + return new BsonDocument("saslContinue", new BsonInt32(1)).append("conversationId", conversationId) + .append("payload", new BsonBinary(outToken)); + } + + private void disposeOfSaslClient(final SaslClient saslClient) { + try { + saslClient.dispose(); + } catch (SaslException e) { // NOPMD + // ignore + } + } + + protected MongoException wrapException(final Throwable t) { + if (t instanceof MongoInterruptedException) { + return (MongoInterruptedException) t; + } else if (t instanceof MongoOperationTimeoutException) { + return (MongoOperationTimeoutException) t; + } else if (t instanceof MongoSecurityException) { + return (MongoSecurityException) t; + } else { + return new MongoSecurityException(getMongoCredential(), "Exception authenticating " + getMongoCredential(), t); + } + } + + void doAsSubject(final java.security.PrivilegedAction action) { + Subject subject = getSubject(); + if (subject == null) { + action.run(); + } else { + Subject.doAs(subject, action); + } + } + + static byte[] toBson(final BsonDocument document) { + byte[] bytes; + BasicOutputBuffer buffer = new BasicOutputBuffer(); + new BsonDocumentCodec().encode(new BsonBinaryWriter(buffer), document, EncoderContext.builder().build()); + bytes = new byte[buffer.size()]; + System.arraycopy(buffer.getInternalBuffer(), 0, bytes, 0, buffer.getSize()); + return bytes; + } + + private final class Continuator implements SingleResultCallback { + private final SaslClient saslClient; + private final BsonDocument saslStartDocument; + private final InternalConnection connection; + private final OperationContext operationContext; + private final SingleResultCallback callback; + + Continuator(final SaslClient saslClient, final BsonDocument saslStartDocument, final InternalConnection connection, + final OperationContext operationContext, final SingleResultCallback callback) { + this.saslClient = saslClient; + this.saslStartDocument = saslStartDocument; + this.connection = connection; + this.operationContext = operationContext; + this.callback = callback; + } + + @Override + public void onResult(@Nullable final BsonDocument result, @Nullable final Throwable t) { + if (t != null) { + callback.onResult(null, wrapException(t)); + disposeOfSaslClient(saslClient); + return; + } + assertNotNull(result); + if (result.getBoolean("done").getValue()) { + verifySaslClientComplete(saslClient, result, callback); + disposeOfSaslClient(saslClient); + } else { + continueConversation(result); + } + } + + public void start() { + continueConversation(saslStartDocument); + } + + private void continueConversation(final BsonDocument result) { + try { + doAsSubject(() -> { + try { + sendSaslContinueAsync(saslStartDocument.getInt32("conversationId"), + saslClient.evaluateChallenge((result.getBinary("payload")).getData()), connection, + operationContext, Continuator.this); + } catch (SaslException e) { + throw wrapException(e); + } + return null; + }); + } catch (Throwable t) { + callback.onResult(null, t); + disposeOfSaslClient(saslClient); + } + } + } + + protected abstract static class SaslClientImpl implements SaslClient { + private final MongoCredential credential; + + protected SaslClientImpl(final MongoCredential credential) { + this.credential = credential; + } + + @Override + public boolean hasInitialResponse() { + return true; + } + + @Override + public byte[] unwrap(final byte[] bytes, final int i, final int i1) { + throw new UnsupportedOperationException("Not implemented."); + } + + @Override + public byte[] wrap(final byte[] bytes, final int i, final int i1) { + throw new UnsupportedOperationException("Not implemented."); + } + + @Override + public Object getNegotiatedProperty(final String s) { + throw new UnsupportedOperationException("Not implemented."); + } + + @Override + public void dispose() { + // nothing to do + } + + @Override + public final String getMechanismName() { + AuthenticationMechanism authMechanism = getCredential().getAuthenticationMechanism(); + if (authMechanism == null) { + throw new IllegalArgumentException("Authentication mechanism cannot be null"); + } + return authMechanism.getMechanismName(); + } + + protected final MongoCredential getCredential() { + return credential; + } + } +} diff --git a/driver-core/src/main/com/mongodb/internal/connection/ScramShaAuthenticator.java b/driver-core/src/main/com/mongodb/internal/connection/ScramShaAuthenticator.java new file mode 100644 index 00000000000..b98b72b3be5 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/connection/ScramShaAuthenticator.java @@ -0,0 +1,430 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection; + +import com.mongodb.AuthenticationMechanism; +import com.mongodb.MongoCredential; +import com.mongodb.ServerAddress; +import com.mongodb.ServerApi; +import com.mongodb.connection.ClusterConnectionMode; +import com.mongodb.internal.authentication.SaslPrep; +import com.mongodb.lang.Nullable; +import org.bson.BsonBoolean; +import org.bson.BsonDocument; +import org.bson.BsonString; + +import javax.crypto.Mac; +import javax.crypto.SecretKeyFactory; +import javax.crypto.spec.PBEKeySpec; +import javax.crypto.spec.SecretKeySpec; +import javax.security.sasl.SaslClient; +import javax.security.sasl.SaslException; +import java.nio.charset.StandardCharsets; +import java.security.InvalidKeyException; +import java.security.MessageDigest; +import java.security.NoSuchAlgorithmException; +import java.security.SecureRandom; +import java.security.spec.InvalidKeySpecException; +import java.util.Base64; +import java.util.HashMap; +import java.util.Random; + +import static com.mongodb.AuthenticationMechanism.SCRAM_SHA_1; +import static com.mongodb.AuthenticationMechanism.SCRAM_SHA_256; +import static com.mongodb.assertions.Assertions.assertNotNull; +import static com.mongodb.internal.authentication.NativeAuthenticationHelper.createAuthenticationHash; +import static java.lang.String.format; + +class ScramShaAuthenticator extends SaslAuthenticator { + private final RandomStringGenerator randomStringGenerator; + private final AuthenticationHashGenerator authenticationHashGenerator; + private SaslClient speculativeSaslClient; + private BsonDocument speculativeAuthenticateResponse; + + private static final int MINIMUM_ITERATION_COUNT = 4096; + private static final String GS2_HEADER = "n,,"; + private static final int RANDOM_LENGTH = 24; + + ScramShaAuthenticator(final MongoCredentialWithCache credential, final ClusterConnectionMode clusterConnectionMode, + @Nullable final ServerApi serverApi) { + this(credential, new DefaultRandomStringGenerator(), + getAuthenicationHashGenerator(assertNotNull(credential.getAuthenticationMechanism())), + clusterConnectionMode, serverApi); + } + + ScramShaAuthenticator(final MongoCredentialWithCache credential, final RandomStringGenerator randomStringGenerator, + final AuthenticationHashGenerator authenticationHashGenerator, final ClusterConnectionMode clusterConnectionMode, + @Nullable final ServerApi serverApi) { + super(credential, clusterConnectionMode, serverApi); + this.randomStringGenerator = randomStringGenerator; + this.authenticationHashGenerator = authenticationHashGenerator; + } + + @Override + public String getMechanismName() { + AuthenticationMechanism authMechanism = getMongoCredential().getAuthenticationMechanism(); + if (authMechanism == null) { + throw new IllegalArgumentException("Authentication mechanism cannot be null"); + } + return authMechanism.getMechanismName(); + } + + @Override + protected void appendSaslStartOptions(final BsonDocument saslStartCommand) { + saslStartCommand.append("options", new BsonDocument("skipEmptyExchange", new BsonBoolean(true))); + } + + + @Override + protected SaslClient createSaslClient(final ServerAddress serverAddress, @Nullable final OperationContext operationContext) { + if (speculativeSaslClient != null) { + return speculativeSaslClient; + } + return new ScramShaSaslClient(getMongoCredentialWithCache().getCredential(), randomStringGenerator, authenticationHashGenerator); + } + + protected SaslClient createSaslClient(final ServerAddress serverAddress) { + return createSaslClient(serverAddress, null); + } + + @Override + public BsonDocument createSpeculativeAuthenticateCommand(final InternalConnection connection) { + try { + speculativeSaslClient = createSaslClient(connection.getDescription().getServerAddress()); + BsonDocument startDocument = createSaslStartCommandDocument(speculativeSaslClient.evaluateChallenge(new byte[0])) + .append("db", new BsonString(getMongoCredential().getSource())); + appendSaslStartOptions(startDocument); + return startDocument; + } catch (Exception e) { + throw wrapException(e); + } + } + + @Override + public BsonDocument getSpeculativeAuthenticateResponse() { + return speculativeAuthenticateResponse; + } + + @Override + public void setSpeculativeAuthenticateResponse(@Nullable final BsonDocument response) { + if (response == null) { + speculativeSaslClient = null; + } else { + speculativeAuthenticateResponse = response; + } + } + + class ScramShaSaslClient extends SaslClientImpl { + private final RandomStringGenerator randomStringGenerator; + private final AuthenticationHashGenerator authenticationHashGenerator; + private final String hAlgorithm; + private final String hmacAlgorithm; + private final String pbeAlgorithm; + private final int keyLength; + + private String clientFirstMessageBare; + private String clientNonce; + + private byte[] serverSignature; + private int step = -1; + + ScramShaSaslClient( + final MongoCredential credential, + final RandomStringGenerator randomStringGenerator, + final AuthenticationHashGenerator authenticationHashGenerator) { + super(credential); + this.randomStringGenerator = randomStringGenerator; + this.authenticationHashGenerator = authenticationHashGenerator; + if (assertNotNull(credential.getAuthenticationMechanism()).equals(SCRAM_SHA_1)) { + hAlgorithm = "SHA-1"; + hmacAlgorithm = "HmacSHA1"; + pbeAlgorithm = "PBKDF2WithHmacSHA1"; + keyLength = 160; + } else { + hAlgorithm = "SHA-256"; + hmacAlgorithm = "HmacSHA256"; + pbeAlgorithm = "PBKDF2WithHmacSHA256"; + keyLength = 256; + } + } + + public byte[] evaluateChallenge(final byte[] challenge) throws SaslException { + step++; + if (step == 0) { + return computeClientFirstMessage(); + } else if (step == 1) { + return computeClientFinalMessage(challenge); + } else if (step == 2) { + return validateServerSignature(challenge); + } else { + throw new SaslException(format("Too many steps involved in the %s negotiation.", + super.getMechanismName())); + } + } + + private byte[] validateServerSignature(final byte[] challenge) throws SaslException { + String serverResponse = new String(challenge, StandardCharsets.UTF_8); + HashMap map = parseServerResponse(serverResponse); + if (!MessageDigest.isEqual(Base64.getDecoder().decode(map.get("v")), serverSignature)) { + throw new SaslException("Server signature was invalid."); + } + return new byte[0]; + } + + public boolean isComplete() { + return step == 2; + } + + private byte[] computeClientFirstMessage() { + clientNonce = randomStringGenerator.generate(RANDOM_LENGTH); + String clientFirstMessage = "n=" + getUserName() + ",r=" + clientNonce; + clientFirstMessageBare = clientFirstMessage; + return (GS2_HEADER + clientFirstMessage).getBytes(StandardCharsets.UTF_8); + } + + private byte[] computeClientFinalMessage(final byte[] challenge) throws SaslException { + String serverFirstMessage = new String(challenge, StandardCharsets.UTF_8); + HashMap map = parseServerResponse(serverFirstMessage); + String serverNonce = map.get("r"); + if (!serverNonce.startsWith(clientNonce)) { + throw new SaslException("Server sent an invalid nonce."); + } + + String salt = map.get("s"); + int iterationCount = Integer.parseInt(map.get("i")); + if (iterationCount < MINIMUM_ITERATION_COUNT) { + throw new SaslException("Invalid iteration count."); + } + + String clientFinalMessageWithoutProof = "c=" + Base64.getEncoder().encodeToString(GS2_HEADER.getBytes(StandardCharsets.UTF_8)) + ",r=" + serverNonce; + String authMessage = clientFirstMessageBare + "," + serverFirstMessage + "," + clientFinalMessageWithoutProof; + String clientFinalMessage = clientFinalMessageWithoutProof + ",p=" + + getClientProof(getAuthenicationHash(), salt, iterationCount, authMessage); + return clientFinalMessage.getBytes(StandardCharsets.UTF_8); + } + + /** + * The client Proof: + *

+ * AuthMessage := client-first-message-bare + "," + server-first-message + "," + client-final-message-without-proof + * SaltedPassword := Hi(Normalize(password), salt, i) + * ClientKey := HMAC(SaltedPassword, "Client Key") + * ServerKey := HMAC(SaltedPassword, "Server Key") + * StoredKey := H(ClientKey) + * ClientSignature := HMAC(StoredKey, AuthMessage) + * ClientProof := ClientKey XOR ClientSignature + * ServerSignature := HMAC(ServerKey, AuthMessage) + */ + String getClientProof(final String password, final String salt, final int iterationCount, final String authMessage) + throws SaslException { + String hashedPasswordAndSalt = new String(h((password + salt).getBytes(StandardCharsets.UTF_8)), StandardCharsets.UTF_8); + + CacheKey cacheKey = new CacheKey(hashedPasswordAndSalt, salt, iterationCount); + CacheValue cachedKeys = getMongoCredentialWithCache().getFromCache(cacheKey, CacheValue.class); + if (cachedKeys == null) { + byte[] saltedPassword = hi(password, Base64.getDecoder().decode(salt), iterationCount); + byte[] clientKey = hmac(saltedPassword, "Client Key"); + byte[] serverKey = hmac(saltedPassword, "Server Key"); + cachedKeys = new CacheValue(clientKey, serverKey); + getMongoCredentialWithCache().putInCache(cacheKey, new CacheValue(clientKey, serverKey)); + } + serverSignature = hmac(cachedKeys.serverKey, authMessage); + + byte[] storedKey = h(cachedKeys.clientKey); + byte[] clientSignature = hmac(storedKey, authMessage); + byte[] clientProof = xor(cachedKeys.clientKey, clientSignature); + return Base64.getEncoder().encodeToString(clientProof); + } + + private byte[] h(final byte[] data) throws SaslException { + try { + return MessageDigest.getInstance(hAlgorithm).digest(data); + } catch (NoSuchAlgorithmException e) { + throw new SaslException(format("Algorithm for '%s' could not be found.", hAlgorithm), e); + } + } + + private byte[] hi(final String password, final byte[] salt, final int iterations) throws SaslException { + try { + SecretKeyFactory secretKeyFactory = SecretKeyFactory.getInstance(pbeAlgorithm); + PBEKeySpec spec = new PBEKeySpec(password.toCharArray(), salt, iterations, keyLength); + return secretKeyFactory.generateSecret(spec).getEncoded(); + } catch (NoSuchAlgorithmException e) { + throw new SaslException(format("Algorithm for '%s' could not be found.", pbeAlgorithm), e); + } catch (InvalidKeySpecException e) { + throw new SaslException(format("Invalid key specification for '%s'", pbeAlgorithm), e); + } + } + + private byte[] hmac(final byte[] bytes, final String key) throws SaslException { + try { + Mac mac = Mac.getInstance(hmacAlgorithm); + mac.init(new SecretKeySpec(bytes, hmacAlgorithm)); + return mac.doFinal(key.getBytes(StandardCharsets.UTF_8)); + } catch (NoSuchAlgorithmException e) { + throw new SaslException(format("Algorithm for '%s' could not be found.", hmacAlgorithm), e); + } catch (InvalidKeyException e) { + throw new SaslException("Could not initialize mac.", e); + } + } + + /** + * The server provides back key value pairs using an = sign and delimited + * by a command. All keys are also a single character. + * For example: a=kg4io3,b=skljsfoiew,c=1203 + */ + private HashMap parseServerResponse(final String response) { + HashMap map = new HashMap<>(); + String[] pairs = response.split(","); + for (String pair : pairs) { + String[] parts = pair.split("=", 2); + map.put(parts[0], parts[1]); + } + return map; + } + + private String getUserName() { + String userName = getCredential().getUserName(); + if (userName == null) { + throw new IllegalArgumentException("Username can not be null"); + } + return userName.replace("=", "=3D").replace(",", "=2C"); + } + + private String getAuthenicationHash() { + String password = authenticationHashGenerator.generate(getCredential()); + if (getCredential().getAuthenticationMechanism() == SCRAM_SHA_256) { + password = SaslPrep.saslPrepStored(password); + } + return password; + } + + private byte[] xorInPlace(final byte[] a, final byte[] b) { + for (int i = 0; i < a.length; i++) { + a[i] ^= b[i]; + } + return a; + } + + private byte[] xor(final byte[] a, final byte[] b) { + byte[] result = new byte[a.length]; + System.arraycopy(a, 0, result, 0, a.length); + return xorInPlace(result, b); + } + + } + + public interface RandomStringGenerator { + String generate(int length); + } + + public interface AuthenticationHashGenerator { + String generate(MongoCredential credential); + } + + private static class DefaultRandomStringGenerator implements RandomStringGenerator { + public String generate(final int length) { + Random random = new SecureRandom(); + int comma = 44; + int low = 33; + int high = 126; + int range = high - low; + + char[] text = new char[length]; + for (int i = 0; i < length; i++) { + int next = random.nextInt(range) + low; + while (next == comma) { + next = random.nextInt(range) + low; + } + text[i] = (char) next; + } + return new String(text); + } + } + + private static final AuthenticationHashGenerator DEFAULT_AUTHENTICATION_HASH_GENERATOR = credential -> { + char[] password = credential.getPassword(); + if (password == null) { + throw new IllegalArgumentException("Password must not be null"); + } + return new String(password); + }; + + private static final AuthenticationHashGenerator LEGACY_AUTHENTICATION_HASH_GENERATOR = credential -> { + // Username and password must not be modified going into the hash. + String username = credential.getUserName(); + char[] password = credential.getPassword(); + if (username == null || password == null) { + throw new IllegalArgumentException("Username and password must not be null"); + } + return createAuthenticationHash(username, password); + }; + + private static AuthenticationHashGenerator getAuthenicationHashGenerator(final AuthenticationMechanism authenticationMechanism) { + return authenticationMechanism == SCRAM_SHA_1 ? LEGACY_AUTHENTICATION_HASH_GENERATOR : DEFAULT_AUTHENTICATION_HASH_GENERATOR; + } + + private static class CacheKey { + private final String hashedPasswordAndSalt; + private final String salt; + private final int iterationCount; + + CacheKey(final String hashedPasswordAndSalt, final String salt, final int iterationCount) { + this.hashedPasswordAndSalt = hashedPasswordAndSalt; + this.salt = salt; + this.iterationCount = iterationCount; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + CacheKey that = (CacheKey) o; + + if (iterationCount != that.iterationCount) { + return false; + } + if (!hashedPasswordAndSalt.equals(that.hashedPasswordAndSalt)) { + return false; + } + return salt.equals(that.salt); + } + + @Override + public int hashCode() { + int result = hashedPasswordAndSalt.hashCode(); + result = 31 * result + salt.hashCode(); + result = 31 * result + iterationCount; + return result; + } + } + + private static class CacheValue { + private final byte[] clientKey; + private final byte[] serverKey; + + CacheValue(final byte[] clientKey, final byte[] serverKey) { + this.clientKey = clientKey; + this.serverKey = serverKey; + } + } +} diff --git a/driver-core/src/main/com/mongodb/internal/connection/SdamServerDescriptionManager.java b/driver-core/src/main/com/mongodb/internal/connection/SdamServerDescriptionManager.java new file mode 100644 index 00000000000..7f014d7ede6 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/connection/SdamServerDescriptionManager.java @@ -0,0 +1,197 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection; + +import com.mongodb.MongoCommandException; +import com.mongodb.MongoNodeIsRecoveringException; +import com.mongodb.MongoNotPrimaryException; +import com.mongodb.MongoSecurityException; +import com.mongodb.MongoSocketException; +import com.mongodb.MongoSocketReadTimeoutException; +import com.mongodb.MongoStalePrimaryException; +import com.mongodb.annotations.Immutable; +import com.mongodb.annotations.ThreadSafe; +import com.mongodb.connection.ServerDescription; +import com.mongodb.connection.ServerId; +import com.mongodb.connection.TopologyVersion; +import com.mongodb.lang.Nullable; + +import java.util.Optional; + +import static com.mongodb.assertions.Assertions.assertNotNull; +import static com.mongodb.assertions.Assertions.assertTrue; +import static com.mongodb.internal.connection.ClusterableServer.SHUTDOWN_CODES; +import static com.mongodb.internal.connection.ServerDescriptionHelper.unknownConnectingServerDescription; + +/** + * See the + * + * Server Discovery And Monitoring specification. + */ +@ThreadSafe +interface SdamServerDescriptionManager { + /** + * Receives candidate {@link ServerDescription} from the monitoring activity. + * + * @param candidateDescription A {@link ServerDescription} that may or may not be applied + * {@linkplain TopologyVersionHelper#newer(TopologyVersion, TopologyVersion) depending on} + * its {@link ServerDescription#getTopologyVersion() topology version}. + */ + void monitorUpdate(ServerDescription candidateDescription); + + /** + * @param candidateDescription A {@link ServerDescription} that may or may not be applied + * {@linkplain TopologyVersionHelper#newer(TopologyVersion, TopologyVersion) depending on} + * its {@link ServerDescription#getTopologyVersion() topology version}. + */ + void updateToUnknown(ServerDescription candidateDescription); + + void handleExceptionBeforeHandshake(SdamIssue sdamIssue); + + void handleExceptionAfterHandshake(SdamIssue sdamIssue); + + /** + * Must be used if and only if there is no {@link InternalConnection} available, + * e.g., if an exception was encountered when checking out a connection, + * in which case it must be called before (in the happens-before order) checking out. + * @see #context(InternalConnection) + */ + SdamIssue.Context context(); + + /** + * Must be preferred to {@link #context()} if an {@link InternalConnection} is available. + * + * @see #context() + */ + SdamIssue.Context context(InternalConnection connection); + + /** + * Represents an {@linkplain #exception() exception} potentially related to using either a {@link ConnectionPool} + * or an {@link InternalConnection} from it, and the {@linkplain Context context} in which it occurred. + */ + @ThreadSafe + final class SdamIssue { + @Nullable + private final Throwable exception; + private final Context context; + + private SdamIssue(@Nullable final Throwable exception, final Context context) { + this.exception = exception; + this.context = assertNotNull(context); + } + + static SdamIssue of(final Throwable exception, final Context context) { + return new SdamIssue(assertNotNull(exception), assertNotNull(context)); + } + + /** + * @return An exception that caused this {@link SdamIssue}. + */ + Optional exception() { + return Optional.ofNullable(exception); + } + + ServerDescription serverDescription() { + return unknownConnectingServerDescription(context.serverId(), exception); + } + + boolean stale(final ConnectionPool connectionPool, final ServerDescription currentServerDescription) { + return context.stale(connectionPool) || stale(exception, currentServerDescription); + } + + /** + * @see #relatedToShutdown() + */ + boolean relatedToStateChange() { + return exception instanceof MongoNotPrimaryException || exception instanceof MongoNodeIsRecoveringException; + } + + boolean relatedToStalePrimary() { + return exception instanceof MongoStalePrimaryException; + } + + /** + * Represents a subset of {@link #relatedToStateChange()}. + * + * @see #relatedToStateChange() + */ + boolean relatedToShutdown() { + assertTrue(relatedToStateChange()); // if this is violated, then we also may need to change the code that uses this method + //noinspection ConstantConditions + if (exception instanceof MongoCommandException) { + return SHUTDOWN_CODES.contains(((MongoCommandException) exception).getErrorCode()); + } + return false; + } + + /** + * @see #relatedToNetworkNotTimeout() + */ + boolean relatedToNetworkTimeout() { + return exception instanceof MongoSocketReadTimeoutException; + } + + /** + * @return {@code true} if and only if this {@link SdamIssue} is related to + * network communications and is not {@link #relatedToNetworkTimeout()}. + * @see #relatedToNetworkTimeout() + */ + boolean relatedToNetworkNotTimeout() { + return exception instanceof MongoSocketException && !relatedToNetworkTimeout(); + } + + boolean relatedToAuth() { + return exception instanceof MongoSecurityException; + } + + boolean relatedToWriteConcern() { + return exception instanceof MongoWriteConcernWithResponseException; + } + + private static boolean stale(@Nullable final Throwable t, final ServerDescription currentServerDescription) { + return TopologyVersionHelper.topologyVersion(t) + .map(candidateTopologyVersion -> TopologyVersionHelper.newerOrEqual( + currentServerDescription.getTopologyVersion(), candidateTopologyVersion)) + .orElse(false); + } + + /** + * A context in which an {@link SdamIssue} occurred. It is used to determine the freshness of the exception. + */ + @Immutable + static final class Context { + private final ServerId serverId; + private final int connectionPoolGeneration; + private final int serverMaxWireVersion; + + Context(final ServerId serverId, final int connectionPoolGeneration, final int serverMaxWireVersion) { + this.serverId = assertNotNull(serverId); + this.connectionPoolGeneration = connectionPoolGeneration; + this.serverMaxWireVersion = serverMaxWireVersion; + } + + private boolean stale(final ConnectionPool connectionPool) { + return connectionPoolGeneration < connectionPool.getGeneration(); + } + + private ServerId serverId() { + return serverId; + } + } + } + +} diff --git a/driver-core/src/main/com/mongodb/internal/connection/Server.java b/driver-core/src/main/com/mongodb/internal/connection/Server.java new file mode 100644 index 00000000000..bc7d9eac7e3 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/connection/Server.java @@ -0,0 +1,62 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection; + +import com.mongodb.annotations.ThreadSafe; +import com.mongodb.internal.async.SingleResultCallback; + +/** + * A logical connection to a MongoDB server. + * + *

This class is not part of the public API and may be removed or changed at any time

+ */ +@ThreadSafe +public interface Server { + + /** + *

Gets a connection to this server. The connection should be released after the caller is done with it.

+ * + *

Implementations of this method are allowed to block while waiting for a free connection from a pool of available connection.

+ * + *

Implementations of this method will likely pool the underlying connection, so the effect of closing the returned connection will + * be to return the connection to the pool.

+ * + * @param operationContext operation context + * @return a connection this server + */ + Connection getConnection(OperationContext operationContext); + + /** + *

Gets a connection to this server asynchronously. The connection should be released after the caller is done with it.

+ * + *

Implementations of this method will likely pool the underlying connection, so the effect of closing the returned connection will + * be to return the connection to the pool.

+ * + * @param operationContext operation context + * @param callback the callback to execute when the connection is available or an error occurs + */ + void getConnectionAsync(OperationContext operationContext, SingleResultCallback callback); + + /** + * An approximation of the + * + * number of operations that this server is currently executing. + * + * @return A negative value iff the server does not track its operation count. + */ + int operationCount(); +} diff --git a/driver-core/src/main/com/mongodb/internal/connection/ServerAddressHelper.java b/driver-core/src/main/com/mongodb/internal/connection/ServerAddressHelper.java new file mode 100644 index 00000000000..5230c4d7c18 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/connection/ServerAddressHelper.java @@ -0,0 +1,81 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection; + + +import com.mongodb.MongoClientSettings; +import com.mongodb.MongoSocketException; +import com.mongodb.ServerAddress; +import com.mongodb.UnixServerAddress; +import com.mongodb.lang.Nullable; +import com.mongodb.spi.dns.InetAddressResolver; +import com.mongodb.spi.dns.InetAddressResolverProvider; + +import java.net.InetSocketAddress; +import java.net.UnknownHostException; +import java.util.List; +import java.util.ServiceLoader; +import java.util.stream.Collectors; +import java.util.stream.StreamSupport; + +/** + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public final class ServerAddressHelper { + @Nullable + private static final InetAddressResolver LOCATED_INET_ADDRESS_RESOLVER = StreamSupport.stream( + ServiceLoader.load(InetAddressResolverProvider.class).spliterator(), false) + .findFirst() + .map(InetAddressResolverProvider::create) + .orElse(null); + + public static ServerAddress createServerAddress(final String host) { + return createServerAddress(host, ServerAddress.defaultPort()); + } + + public static ServerAddress createServerAddress(final String host, final int port) { + if (host != null && host.endsWith(".sock")) { + return new UnixServerAddress(host); + } else { + return new ServerAddress(host, port); + } + } + + public static InetAddressResolver getInetAddressResolver(final MongoClientSettings settings) { + InetAddressResolver explicitInetAddressResolver = settings.getInetAddressResolver(); + if (explicitInetAddressResolver != null) { + return explicitInetAddressResolver; + } else if (LOCATED_INET_ADDRESS_RESOLVER != null) { + return LOCATED_INET_ADDRESS_RESOLVER; + } else { + return new DefaultInetAddressResolver(); + } + } + + public static List getSocketAddresses(final ServerAddress serverAddress, final InetAddressResolver resolver) { + try { + return resolver.lookupByName(serverAddress.getHost()) + .stream() + .map(inetAddress -> new InetSocketAddress(inetAddress, serverAddress.getPort())).collect(Collectors.toList()); + } catch (UnknownHostException e) { + throw new MongoSocketException(e.getMessage(), serverAddress, e); + } + } + + private ServerAddressHelper() { + } +} diff --git a/driver-core/src/main/com/mongodb/internal/connection/ServerDescriptionHelper.java b/driver-core/src/main/com/mongodb/internal/connection/ServerDescriptionHelper.java new file mode 100644 index 00000000000..2e03f375b60 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/connection/ServerDescriptionHelper.java @@ -0,0 +1,42 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.internal.connection; + +import com.mongodb.connection.ServerDescription; +import com.mongodb.connection.ServerId; +import com.mongodb.lang.Nullable; + +import static com.mongodb.connection.ServerConnectionState.CONNECTING; +import static com.mongodb.connection.ServerType.UNKNOWN; + +final class ServerDescriptionHelper { + static ServerDescription unknownConnectingServerDescription(final ServerId serverId, @Nullable final Throwable cause) { + ServerDescription.Builder result = ServerDescription.builder() + .type(UNKNOWN) + .state(CONNECTING) + .address(serverId.getAddress()); + TopologyVersionHelper.topologyVersion(cause) + .ifPresent(result::topologyVersion); + if (cause != null) { + result.exception(cause); + } + return result.build(); + } + + private ServerDescriptionHelper() { + throw new AssertionError(); + } +} diff --git a/driver-core/src/main/com/mongodb/internal/connection/ServerMonitor.java b/driver-core/src/main/com/mongodb/internal/connection/ServerMonitor.java new file mode 100644 index 00000000000..5ccb9c81ebe --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/connection/ServerMonitor.java @@ -0,0 +1,28 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection; + +interface ServerMonitor { + + void start(); + + void connect(); + + void close(); + + void cancelCurrentCheck(); +} diff --git a/driver-core/src/main/com/mongodb/internal/connection/ServerMonitoringModeUtil.java b/driver-core/src/main/com/mongodb/internal/connection/ServerMonitoringModeUtil.java new file mode 100644 index 00000000000..17629f38a58 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/connection/ServerMonitoringModeUtil.java @@ -0,0 +1,55 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.internal.connection; + +import com.mongodb.connection.ServerMonitoringMode; + +import static java.lang.String.format; + +/** + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public final class ServerMonitoringModeUtil { + /** + * Returns the string value of the provided {@code serverMonitoringMode}. + * + * @return The string value. + * @see #fromString(String) + */ + public static String getValue(final ServerMonitoringMode serverMonitoringMode) { + return serverMonitoringMode.name().toLowerCase(); + } + + /** + * Parses a string into {@link ServerMonitoringMode}. + * + * @param serverMonitoringMode A server monitoring mode string. + * @return The corresponding {@link ServerMonitoringMode} value. + * @see #getValue(ServerMonitoringMode) + */ + public static ServerMonitoringMode fromString(final String serverMonitoringMode) { + for (ServerMonitoringMode mode : ServerMonitoringMode.values()) { + if (serverMonitoringMode.equalsIgnoreCase(mode.name())) { + return mode; + } + } + throw new IllegalArgumentException(format("'%s' is not a valid %s", + serverMonitoringMode, ServerMonitoringMode.class.getSimpleName())); + } + + private ServerMonitoringModeUtil() { + } +} diff --git a/driver-core/src/main/com/mongodb/internal/connection/ServerTuple.java b/driver-core/src/main/com/mongodb/internal/connection/ServerTuple.java new file mode 100644 index 00000000000..4829469e718 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/connection/ServerTuple.java @@ -0,0 +1,40 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection; + +import com.mongodb.connection.ServerDescription; + +/** + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public class ServerTuple { + private final Server server; + private final ServerDescription serverDescription; + + public ServerTuple(final Server server, final ServerDescription serverDescription) { + this.server = server; + this.serverDescription = serverDescription; + } + + public Server getServer() { + return server; + } + + public ServerDescription getServerDescription() { + return serverDescription; + } +} diff --git a/driver-core/src/main/com/mongodb/internal/connection/ServerTypeHelper.java b/driver-core/src/main/com/mongodb/internal/connection/ServerTypeHelper.java new file mode 100644 index 00000000000..1f929dcb7cf --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/connection/ServerTypeHelper.java @@ -0,0 +1,45 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.internal.connection; + +import com.mongodb.connection.ServerType; + +final class ServerTypeHelper { + static boolean isDataBearing(final ServerType serverType) { + switch (serverType) { + case STANDALONE: + case REPLICA_SET_PRIMARY: + case REPLICA_SET_SECONDARY: + case SHARD_ROUTER: + case LOAD_BALANCER: { + return true; + } + case REPLICA_SET_ARBITER: + case REPLICA_SET_OTHER: + case REPLICA_SET_GHOST: + case UNKNOWN: { + return false; + } + default: { + throw new AssertionError(); + } + } + } + + private ServerTypeHelper() { + throw new AssertionError(); + } +} diff --git a/driver-core/src/main/com/mongodb/internal/connection/SingleServerCluster.java b/driver-core/src/main/com/mongodb/internal/connection/SingleServerCluster.java new file mode 100644 index 00000000000..87b55e3f648 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/connection/SingleServerCluster.java @@ -0,0 +1,135 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection; + +import com.mongodb.MongoConfigurationException; +import com.mongodb.connection.ClusterConnectionMode; +import com.mongodb.connection.ClusterDescription; +import com.mongodb.connection.ClusterId; +import com.mongodb.connection.ClusterSettings; +import com.mongodb.connection.ClusterType; +import com.mongodb.connection.ServerDescription; +import com.mongodb.connection.ServerType; +import com.mongodb.event.ServerDescriptionChangedEvent; +import com.mongodb.internal.TimeoutContext; +import com.mongodb.internal.diagnostics.logging.Logger; +import com.mongodb.internal.diagnostics.logging.Loggers; +import com.mongodb.internal.time.Timeout; + +import java.util.concurrent.atomic.AtomicReference; + +import static com.mongodb.assertions.Assertions.assertNotNull; +import static com.mongodb.assertions.Assertions.isTrue; +import static com.mongodb.connection.ServerConnectionState.CONNECTING; +import static java.lang.String.format; +import static java.util.Collections.emptyList; +import static java.util.Collections.singletonList; + +/** + * This class needs to be final because we are leaking a reference to "this" from the constructor + * + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public final class SingleServerCluster extends BaseCluster { + private static final Logger LOGGER = Loggers.getLogger("cluster"); + + private final AtomicReference server; + + public SingleServerCluster(final ClusterId clusterId, final ClusterSettings settings, final ClusterableServerFactory serverFactory, + final ClientMetadata clientMetadata) { + super(clusterId, settings, serverFactory, clientMetadata); + isTrue("one server in a direct cluster", settings.getHosts().size() == 1); + isTrue("connection mode is single", settings.getMode() == ClusterConnectionMode.SINGLE); + + server = new AtomicReference<>(); + // synchronized in the constructor because the change listener is re-entrant to this instance. + // In other words, we are leaking a reference to "this" from the constructor. + withLock(() -> { + publishDescription(ServerDescription.builder().state(CONNECTING).address(settings.getHosts().get(0)) + .build()); + server.set(createServer(settings.getHosts().get(0))); + }); + } + + @Override + protected void connect() { + assertNotNull(server.get()).connect(); + } + + @Override + public ServersSnapshot getServersSnapshot( + final Timeout serverSelectionTimeout, + final TimeoutContext timeoutContext) { + isTrue("open", !isClosed()); + ClusterableServer server = assertNotNull(this.server.get()); + return serverAddress -> server; + } + + @Override + public void close() { + if (!isClosed()) { + assertNotNull(server.get()).close(); + super.close(); + } + } + + @Override + public void onChange(final ServerDescriptionChangedEvent event) { + withLock(() -> { + ServerDescription newDescription = event.getNewDescription(); + if (newDescription.isOk()) { + if (getSettings().getRequiredClusterType() != ClusterType.UNKNOWN + && getSettings().getRequiredClusterType() != newDescription.getClusterType()) { + newDescription = null; + } else if (getSettings().getRequiredClusterType() == ClusterType.REPLICA_SET + && getSettings().getRequiredReplicaSetName() != null) { + if (!getSettings().getRequiredReplicaSetName().equals(newDescription.getSetName())) { + newDescription = ServerDescription.builder(newDescription) + .exception(new MongoConfigurationException( + format("Replica set name '%s' does not match required replica set name of '%s'", + newDescription.getSetName(), getSettings().getRequiredReplicaSetName()))) + .type(ServerType.UNKNOWN) + .setName(null) + .ok(false) + .build(); + publishDescription(ClusterType.UNKNOWN, newDescription); + return; + } + } + } + publishDescription(newDescription); + }); + } + + private void publishDescription(final ServerDescription serverDescription) { + ClusterType clusterType = getSettings().getRequiredClusterType(); + if (clusterType == ClusterType.UNKNOWN && serverDescription != null) { + clusterType = serverDescription.getClusterType(); + } + publishDescription(clusterType, serverDescription); + } + + private void publishDescription(final ClusterType clusterType, final ServerDescription serverDescription) { + ClusterDescription currentDescription = getCurrentDescription(); + ClusterDescription description = new ClusterDescription(ClusterConnectionMode.SINGLE, clusterType, + serverDescription == null ? emptyList() : singletonList(serverDescription), getSettings(), + getServerFactory().getSettings()); + + updateDescription(description); + fireChangeEvent(description, currentDescription); + } +} diff --git a/driver-core/src/main/com/mongodb/internal/connection/SnappyCompressor.java b/driver-core/src/main/com/mongodb/internal/connection/SnappyCompressor.java new file mode 100644 index 00000000000..f9aa1648494 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/connection/SnappyCompressor.java @@ -0,0 +1,79 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection; + +import com.mongodb.MongoInternalException; +import org.bson.ByteBuf; +import org.bson.io.BsonOutput; +import org.xerial.snappy.Snappy; +import org.xerial.snappy.SnappyInputStream; + +import java.io.IOException; +import java.io.InputStream; +import java.util.List; + +class SnappyCompressor extends Compressor { + @Override + public String getName() { + return "snappy"; + } + + @Override + public byte getId() { + return 1; + } + + // the server does not support the framing format so SnappyFramedOutputStream can't be used. The entire source message must first + // be copied into a single byte array. For that reason the compress method defined in the base class can't be used. + @Override + public void compress(final List source, final BsonOutput target) { + int uncompressedSize = getUncompressedSize(source); + + byte[] singleByteArraySource = new byte[uncompressedSize]; + copy(source, singleByteArraySource); + + try { + byte[] out = new byte[Snappy.maxCompressedLength(uncompressedSize)]; + int compressedSize = Snappy.compress(singleByteArraySource, 0, singleByteArraySource.length, out, 0); + target.writeBytes(out, 0, compressedSize); + } catch (IOException e) { + throw new MongoInternalException("Unexpected IOException", e); + } + } + + private int getUncompressedSize(final List source) { + int uncompressedSize = 0; + for (ByteBuf cur : source) { + uncompressedSize += cur.remaining(); + } + return uncompressedSize; + } + + private void copy(final List source, final byte[] in) { + int offset = 0; + for (ByteBuf cur : source) { + int remaining = cur.remaining(); + cur.get(in, offset, remaining); + offset += remaining; + } + } + + @Override + InputStream getInputStream(final InputStream source) throws IOException { + return new SnappyInputStream(source); + } +} diff --git a/driver-core/src/main/com/mongodb/internal/connection/SocketStream.java b/driver-core/src/main/com/mongodb/internal/connection/SocketStream.java new file mode 100644 index 00000000000..a1c3ed0d914 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/connection/SocketStream.java @@ -0,0 +1,248 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection; + +import com.mongodb.MongoSocketException; +import com.mongodb.MongoSocketOpenException; +import com.mongodb.MongoSocketReadException; +import com.mongodb.ServerAddress; +import com.mongodb.connection.AsyncCompletionHandler; +import com.mongodb.connection.ProxySettings; +import com.mongodb.connection.SocketSettings; +import com.mongodb.connection.SslSettings; +import com.mongodb.spi.dns.InetAddressResolver; +import org.bson.ByteBuf; + +import javax.net.SocketFactory; +import javax.net.ssl.SSLSocket; +import javax.net.ssl.SSLSocketFactory; +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.net.InetSocketAddress; +import java.net.Socket; +import java.net.SocketTimeoutException; +import java.util.Iterator; +import java.util.List; + +import static com.mongodb.assertions.Assertions.assertTrue; +import static com.mongodb.assertions.Assertions.notNull; +import static com.mongodb.internal.TimeoutContext.throwMongoTimeoutException; +import static com.mongodb.internal.connection.ServerAddressHelper.getSocketAddresses; +import static com.mongodb.internal.connection.SocketStreamHelper.configureSocket; +import static com.mongodb.internal.connection.SslHelper.configureSslSocket; +import static com.mongodb.internal.thread.InterruptionUtil.translateInterruptedException; + +/** + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public class SocketStream implements Stream { + private final ServerAddress address; + private final InetAddressResolver inetAddressResolver; + private final SocketSettings settings; + private final SslSettings sslSettings; + private final SocketFactory socketFactory; + private final BufferProvider bufferProvider; + private volatile Socket socket; + private volatile OutputStream outputStream; + private volatile InputStream inputStream; + private volatile boolean isClosed; + + public SocketStream(final ServerAddress address, final InetAddressResolver inetAddressResolver, + final SocketSettings settings, final SslSettings sslSettings, + final SocketFactory socketFactory, final BufferProvider bufferProvider) { + this.address = notNull("address", address); + this.settings = notNull("settings", settings); + this.sslSettings = notNull("sslSettings", sslSettings); + this.socketFactory = notNull("socketFactory", socketFactory); + this.bufferProvider = notNull("bufferProvider", bufferProvider); + this.inetAddressResolver = inetAddressResolver; + } + + @Override + public void open(final OperationContext operationContext) { + try { + socket = initializeSocket(operationContext); + outputStream = socket.getOutputStream(); + inputStream = socket.getInputStream(); + } catch (IOException e) { + close(); + throw translateInterruptedException(e, "Interrupted while connecting") + .orElseThrow(() -> new MongoSocketOpenException("Exception opening socket", getAddress(), e)); + } + } + + protected Socket initializeSocket(final OperationContext operationContext) throws IOException { + ProxySettings proxySettings = settings.getProxySettings(); + if (proxySettings.isProxyEnabled()) { + if (sslSettings.isEnabled()) { + assertTrue(socketFactory instanceof SSLSocketFactory); + SSLSocketFactory sslSocketFactory = (SSLSocketFactory) socketFactory; + return initializeSslSocketOverSocksProxy(operationContext, sslSocketFactory); + } + return initializeSocketOverSocksProxy(operationContext); + } + + Iterator inetSocketAddresses = getSocketAddresses(address, inetAddressResolver).iterator(); + while (inetSocketAddresses.hasNext()) { + Socket socket = socketFactory.createSocket(); + try { + SocketStreamHelper.initialize(operationContext, socket, inetSocketAddresses.next(), settings, sslSettings); + return socket; + } catch (SocketTimeoutException e) { + if (!inetSocketAddresses.hasNext()) { + throw e; + } + } + } + + throw new MongoSocketException("Exception opening socket", getAddress()); + } + + private SSLSocket initializeSslSocketOverSocksProxy(final OperationContext operationContext, + final SSLSocketFactory sslSocketFactory) throws IOException { + final String serverHost = address.getHost(); + final int serverPort = address.getPort(); + + SocksSocket socksProxy = new SocksSocket(settings.getProxySettings()); + configureSocket(socksProxy, operationContext, settings); + InetSocketAddress inetSocketAddress = toSocketAddress(serverHost, serverPort); + socksProxy.connect(inetSocketAddress, operationContext.getTimeoutContext().getConnectTimeoutMs()); + + SSLSocket sslSocket = (SSLSocket) sslSocketFactory.createSocket(socksProxy, serverHost, serverPort, true); + //Even though Socks proxy connection is already established, TLS handshake has not been performed yet. + //So it is possible to set SSL parameters before handshake is done. + configureSslSocket(sslSocket, sslSettings, inetSocketAddress); + return sslSocket; + } + + + /** + * Creates an unresolved {@link InetSocketAddress}. + * This method is used to create an address that is meant to be resolved by a SOCKS proxy. + */ + private static InetSocketAddress toSocketAddress(final String serverHost, final int serverPort) { + return InetSocketAddress.createUnresolved(serverHost, serverPort); + } + + private Socket initializeSocketOverSocksProxy(final OperationContext operationContext) throws IOException { + Socket createdSocket = socketFactory.createSocket(); + configureSocket(createdSocket, operationContext, settings); + /* + Wrap the configured socket with SocksSocket to add extra functionality. + Reason for separate steps: We can't directly extend Java 11 methods within 'SocksSocket' + to configure itself. + */ + SocksSocket socksProxy = new SocksSocket(createdSocket, settings.getProxySettings()); + + socksProxy.connect(toSocketAddress(address.getHost(), address.getPort()), + operationContext.getTimeoutContext().getConnectTimeoutMs()); + return socksProxy; + } + + @Override + public ByteBuf getBuffer(final int size) { + return bufferProvider.getBuffer(size); + } + + @Override + public void write(final List buffers, final OperationContext operationContext) throws IOException { + for (final ByteBuf cur : buffers) { + outputStream.write(cur.array(), 0, cur.limit()); + operationContext.getTimeoutContext().onExpired(() -> { + throwMongoTimeoutException("Socket write exceeded the timeout limit."); + }); + } + } + + @Override + public ByteBuf read(final int numBytes, final OperationContext operationContext) throws IOException { + try { + ByteBuf buffer = bufferProvider.getBuffer(numBytes); + try { + int totalBytesRead = 0; + byte[] bytes = buffer.array(); + while (totalBytesRead < buffer.limit()) { + int readTimeoutMS = (int) operationContext.getTimeoutContext().getReadTimeoutMS(); + socket.setSoTimeout(readTimeoutMS); + int bytesRead = inputStream.read(bytes, totalBytesRead, buffer.limit() - totalBytesRead); + if (bytesRead == -1) { + throw new MongoSocketReadException("Prematurely reached end of stream", getAddress()); + } + totalBytesRead += bytesRead; + } + return buffer; + } catch (Exception e) { + buffer.release(); + throw e; + } + } finally { + if (!socket.isClosed()) { + // `socket` may be closed if the current thread is virtual, and it is interrupted while reading + socket.setSoTimeout(0); + } + } + } + + @Override + public void openAsync(final OperationContext operationContext, final AsyncCompletionHandler handler) { + throw new UnsupportedOperationException(getClass() + " does not support asynchronous operations."); + } + + @Override + public void writeAsync(final List buffers, final OperationContext operationContext, + final AsyncCompletionHandler handler) { + throw new UnsupportedOperationException(getClass() + " does not support asynchronous operations."); + } + + @Override + public void readAsync(final int numBytes, final OperationContext operationContext, + final AsyncCompletionHandler handler) { + throw new UnsupportedOperationException(getClass() + " does not support asynchronous operations."); + } + + @Override + public ServerAddress getAddress() { + return address; + } + + /** + * Get the settings for this socket. + * + * @return the settings + */ + SocketSettings getSettings() { + return settings; + } + + @Override + public void close() { + try { + isClosed = true; + if (socket != null) { + socket.close(); + } + } catch (IOException | RuntimeException e) { + // ignore + } + } + + @Override + public boolean isClosed() { + return isClosed; + } +} diff --git a/driver-core/src/main/com/mongodb/internal/connection/SocketStreamFactory.java b/driver-core/src/main/com/mongodb/internal/connection/SocketStreamFactory.java new file mode 100644 index 00000000000..793fc8b3dc4 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/connection/SocketStreamFactory.java @@ -0,0 +1,83 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection; + +import com.mongodb.MongoClientException; +import com.mongodb.ServerAddress; +import com.mongodb.UnixServerAddress; +import com.mongodb.connection.SocketSettings; +import com.mongodb.connection.SslSettings; +import com.mongodb.spi.dns.InetAddressResolver; + +import javax.net.SocketFactory; +import javax.net.ssl.SSLContext; +import java.security.NoSuchAlgorithmException; + +import static com.mongodb.assertions.Assertions.notNull; +import static java.util.Optional.ofNullable; + +/** + * Factory for creating instances of {@code SocketStream}. + */ +public class SocketStreamFactory implements StreamFactory { + private final InetAddressResolver inetAddressResolver; + private final SocketSettings settings; + private final SslSettings sslSettings; + private final BufferProvider bufferProvider = PowerOfTwoBufferPool.DEFAULT; + + /** + * Creates a new factory with the given settings for connecting to servers and the given SSL settings + * + * @param inetAddressResolver resolver + * @param settings the SocketSettings for connecting to a MongoDB server + * @param sslSettings whether SSL is enabled. + */ + public SocketStreamFactory(final InetAddressResolver inetAddressResolver, final SocketSettings settings, + final SslSettings sslSettings) { + this.inetAddressResolver = inetAddressResolver; + this.settings = notNull("settings", settings); + this.sslSettings = notNull("sslSettings", sslSettings); + } + + @Override + public Stream create(final ServerAddress serverAddress) { + Stream stream; + if (serverAddress instanceof UnixServerAddress) { + if (sslSettings.isEnabled()) { + throw new MongoClientException("Socket based connections do not support ssl"); + } + stream = new UnixSocketChannelStream((UnixServerAddress) serverAddress, settings, sslSettings, bufferProvider); + } else { + if (sslSettings.isEnabled()) { + stream = new SocketStream(serverAddress, inetAddressResolver, settings, sslSettings, getSslContext().getSocketFactory(), + bufferProvider); + } else { + stream = new SocketStream(serverAddress, inetAddressResolver, settings, sslSettings, SocketFactory.getDefault(), + bufferProvider); + } + } + return stream; + } + + private SSLContext getSslContext() { + try { + return ofNullable(sslSettings.getContext()).orElse(SSLContext.getDefault()); + } catch (NoSuchAlgorithmException e) { + throw new MongoClientException("Unable to create default SSLContext", e); + } + } +} diff --git a/driver-core/src/main/com/mongodb/internal/connection/SocketStreamHelper.java b/driver-core/src/main/com/mongodb/internal/connection/SocketStreamHelper.java new file mode 100644 index 00000000000..74098c4ede6 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/connection/SocketStreamHelper.java @@ -0,0 +1,118 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection; + +import com.mongodb.connection.SocketSettings; +import com.mongodb.connection.SslSettings; + +import java.io.IOException; +import java.lang.reflect.InvocationTargetException; +import java.lang.reflect.Method; +import java.net.InetSocketAddress; +import java.net.Socket; +import java.net.SocketException; +import java.net.SocketOption; + +import static com.mongodb.internal.connection.SslHelper.configureSslSocket; + +@SuppressWarnings({"unchecked", "rawtypes"}) +final class SocketStreamHelper { + // Keep alive options and their values for Java 11+ + private static final String TCP_KEEPIDLE = "TCP_KEEPIDLE"; + private static final int TCP_KEEPIDLE_DURATION = 120; + private static final String TCP_KEEPCOUNT = "TCP_KEEPCOUNT"; + private static final int TCP_KEEPCOUNT_LIMIT = 9; + private static final String TCP_KEEPINTERVAL = "TCP_KEEPINTERVAL"; + private static final int TCP_KEEPINTERVAL_DURATION = 10; + + private static final SocketOption KEEP_COUNT_OPTION; + private static final SocketOption KEEP_IDLE_OPTION; + private static final SocketOption KEEP_INTERVAL_OPTION; + + private static final Method SET_OPTION_METHOD; + + static { + SocketOption keepCountOption = null; + SocketOption keepIdleOption = null; + SocketOption keepIntervalOption = null; + Method setOptionMethod = null; + + try { + setOptionMethod = Socket.class.getMethod("setOption", SocketOption.class, Object.class); + + Class extendedSocketOptionsClass = Class.forName("jdk.net.ExtendedSocketOptions"); + keepCountOption = (SocketOption) extendedSocketOptionsClass.getDeclaredField(TCP_KEEPCOUNT).get(null); + keepIdleOption = (SocketOption) extendedSocketOptionsClass.getDeclaredField(TCP_KEEPIDLE).get(null); + keepIntervalOption = (SocketOption) extendedSocketOptionsClass.getDeclaredField(TCP_KEEPINTERVAL).get(null); + } catch (ClassNotFoundException | NoSuchMethodException | NoSuchFieldException | IllegalAccessException e) { + // ignore: this is expected on JDKs < 11 and some deployments that don't include the jdk.net package + } + + KEEP_COUNT_OPTION = keepCountOption; + KEEP_IDLE_OPTION = keepIdleOption; + KEEP_INTERVAL_OPTION = keepIntervalOption; + SET_OPTION_METHOD = setOptionMethod; + } + + static void initialize(final OperationContext operationContext, final Socket socket, + final InetSocketAddress inetSocketAddress, final SocketSettings settings, + final SslSettings sslSettings) throws IOException { + configureSocket(socket, operationContext, settings); + configureSslSocket(socket, sslSettings, inetSocketAddress); + socket.connect(inetSocketAddress, operationContext.getTimeoutContext().getConnectTimeoutMs()); + } + + static void configureSocket(final Socket socket, final OperationContext operationContext, final SocketSettings settings) throws SocketException { + socket.setTcpNoDelay(true); + socket.setKeepAlive(true); + int readTimeoutMS = (int) operationContext.getTimeoutContext().getReadTimeoutMS(); + if (readTimeoutMS > 0) { + socket.setSoTimeout(readTimeoutMS); + } + + // Adding keep alive options for users of Java 11+. These options will be ignored for older Java versions. + setExtendedSocketOptions(socket); + + if (settings.getReceiveBufferSize() > 0) { + socket.setReceiveBufferSize(settings.getReceiveBufferSize()); + } + if (settings.getSendBufferSize() > 0) { + socket.setSendBufferSize(settings.getSendBufferSize()); + } + } + + static void setExtendedSocketOptions(final Socket socket) { + try { + if (SET_OPTION_METHOD != null) { + if (KEEP_COUNT_OPTION != null) { + SET_OPTION_METHOD.invoke(socket, KEEP_COUNT_OPTION, TCP_KEEPCOUNT_LIMIT); + } + if (KEEP_IDLE_OPTION != null) { + SET_OPTION_METHOD.invoke(socket, KEEP_IDLE_OPTION, TCP_KEEPIDLE_DURATION); + } + if (KEEP_INTERVAL_OPTION != null) { + SET_OPTION_METHOD.invoke(socket, KEEP_INTERVAL_OPTION, TCP_KEEPINTERVAL_DURATION); + } + } + } catch (IllegalAccessException | InvocationTargetException e) { + // ignore failures, as this is best effort + } + } + + private SocketStreamHelper() { + } +} diff --git a/driver-core/src/main/com/mongodb/internal/connection/SocksSocket.java b/driver-core/src/main/com/mongodb/internal/connection/SocksSocket.java new file mode 100644 index 00000000000..2619a3c2c10 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/connection/SocksSocket.java @@ -0,0 +1,783 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.internal.connection; + +import com.mongodb.connection.ProxySettings; +import com.mongodb.internal.time.Timeout; +import com.mongodb.lang.Nullable; + +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.net.ConnectException; +import java.net.InetAddress; +import java.net.InetSocketAddress; +import java.net.Socket; +import java.net.SocketAddress; +import java.net.SocketException; +import java.net.SocketTimeoutException; +import java.nio.channels.SocketChannel; +import java.nio.charset.StandardCharsets; +import java.util.Arrays; + +import static com.mongodb.assertions.Assertions.assertFalse; +import static com.mongodb.assertions.Assertions.assertNotNull; +import static com.mongodb.assertions.Assertions.assertTrue; +import static com.mongodb.assertions.Assertions.fail; +import static com.mongodb.assertions.Assertions.isTrueArgument; +import static com.mongodb.internal.connection.DomainNameUtils.isDomainName; +import static com.mongodb.internal.connection.SocksSocket.AddressType.DOMAIN_NAME; +import static com.mongodb.internal.connection.SocksSocket.AddressType.IP_V4; +import static com.mongodb.internal.connection.SocksSocket.AddressType.IP_V6; +import static com.mongodb.internal.connection.SocksSocket.ServerReply.REPLY_SUCCEEDED; +import static com.mongodb.internal.time.Timeout.ZeroSemantics.ZERO_DURATION_MEANS_INFINITE; +import static java.util.concurrent.TimeUnit.MILLISECONDS; + +/** + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public final class SocksSocket extends Socket { + private static final byte SOCKS_VERSION = 0x05; + private static final byte RESERVED = 0x00; + private static final byte PORT_LENGTH = 2; + private static final byte AUTHENTICATION_SUCCEEDED_STATUS = 0x00; + public static final String IP_PARSING_ERROR_SUFFIX = " is not an IP string literal"; + private static final byte USER_PASSWORD_SUB_NEGOTIATION_VERSION = 0x01; + private InetSocketAddress remoteAddress; + private final ProxySettings proxySettings; + @Nullable + private final Socket socket; + + public SocksSocket(final ProxySettings proxySettings) { + this(null, proxySettings); + } + + public SocksSocket(@Nullable final Socket socket, final ProxySettings proxySettings) { + assertNotNull(proxySettings.getHost()); + /* Explanation for using Socket instead of SocketFactory: The process of initializing a socket for a SOCKS proxy follows a specific sequence. + First, a basic TCP socket is created using the socketFactory, and then it's customized with settings. + Subsequently, the socket is wrapped within a SocksSocket instance to provide additional functionality. + Due to limitations in extending methods within SocksSocket for Java 11, the configuration step must precede the wrapping stage. + As a result, passing SocketFactory directly into this constructor for socket creation is not feasible. + */ + if (socket != null) { + assertFalse(socket.isConnected()); + } + this.socket = socket; + this.proxySettings = proxySettings; + } + + @Override + public void connect(final SocketAddress endpoint, final int connectTimeoutMs) throws IOException { + // `Socket` requires `IllegalArgumentException` + isTrueArgument("connectTimeoutMs", connectTimeoutMs >= 0); + try { + Timeout timeout = Timeout.expiresIn(connectTimeoutMs, MILLISECONDS, ZERO_DURATION_MEANS_INFINITE); + InetSocketAddress unresolvedAddress = (InetSocketAddress) endpoint; + assertTrue(unresolvedAddress.isUnresolved()); + this.remoteAddress = unresolvedAddress; + + InetSocketAddress proxyAddress = new InetSocketAddress(assertNotNull(proxySettings.getHost()), proxySettings.getPort()); + + timeout.checkedRun(MILLISECONDS, + () -> socketConnect(proxyAddress, 0), + (ms) -> socketConnect(proxyAddress, Math.toIntExact(ms)), + () -> throwSocketConnectionTimeout()); + + SocksAuthenticationMethod authenticationMethod = performNegotiation(timeout); + authenticate(authenticationMethod, timeout); + sendConnect(timeout); + } catch (SocketException socketException) { + /* + * The 'close()' call here has two purposes: + * + * 1. Enforces self-closing under RFC 1928 if METHOD is X'FF'. + * 2. Handles all other errors during connection, distinct from external closures. + */ + try { + close(); + } catch (Exception closeException) { + socketException.addSuppressed(closeException); + } + throw socketException; + } + } + + private void socketConnect(final InetSocketAddress proxyAddress, final int rem) throws IOException { + if (socket != null) { + socket.connect(proxyAddress, rem); + } else { + super.connect(proxyAddress, rem); + } + } + + private void sendConnect(final Timeout timeout) throws IOException { + final String host = remoteAddress.getHostName(); + final int port = remoteAddress.getPort(); + final byte[] bytesOfHost = host.getBytes(StandardCharsets.US_ASCII); + final int hostLength = bytesOfHost.length; + + AddressType addressType; + byte[] ipAddress = null; + if (isDomainName(host)) { + addressType = DOMAIN_NAME; + } else { + ipAddress = createByteArrayFromIpAddress(host); + addressType = determineAddressType(ipAddress); + } + byte[] bufferSent = createBuffer(addressType, hostLength); + bufferSent[0] = SOCKS_VERSION; + bufferSent[1] = SocksCommand.CONNECT.getCommandNumber(); + bufferSent[2] = RESERVED; + switch (addressType) { + case DOMAIN_NAME: + bufferSent[3] = DOMAIN_NAME.getAddressTypeNumber(); + bufferSent[4] = (byte) hostLength; + System.arraycopy(bytesOfHost, 0, bufferSent, 5, hostLength); + addPort(bufferSent, 5 + hostLength, port); + break; + case IP_V4: + bufferSent[3] = IP_V4.getAddressTypeNumber(); + System.arraycopy(ipAddress, 0, bufferSent, 4, ipAddress.length); + addPort(bufferSent, 4 + ipAddress.length, port); + break; + case IP_V6: + bufferSent[3] = DOMAIN_NAME.getAddressTypeNumber(); + System.arraycopy(ipAddress, 0, bufferSent, 4, ipAddress.length); + addPort(bufferSent, 4 + ipAddress.length, port); + break; + default: + fail(); + } + OutputStream outputStream = getOutputStream(); + outputStream.write(bufferSent); + outputStream.flush(); + checkServerReply(timeout); + } + + private static void addPort(final byte[] bufferSent, final int index, final int port) { + bufferSent[index] = (byte) (port >> 8); + bufferSent[index + 1] = (byte) port; + } + + private static byte[] createByteArrayFromIpAddress(final String host) throws SocketException { + byte[] bytes = InetAddressUtils.ipStringToBytes(host); + if (bytes == null) { + throw new SocketException(host + IP_PARSING_ERROR_SUFFIX); + } + return bytes; + } + + private AddressType determineAddressType(final byte[] ipAddress) { + if (ipAddress.length == IP_V4.getLength()) { + return IP_V4; + } else if (ipAddress.length == IP_V6.getLength()) { + return IP_V6; + } + throw fail(); + } + + private static byte[] createBuffer(final AddressType addressType, final int hostLength) { + switch (addressType) { + case DOMAIN_NAME: + return new byte[7 + hostLength]; + case IP_V4: + return new byte[6 + IP_V4.getLength()]; + case IP_V6: + return new byte[6 + IP_V6.getLength()]; + default: + throw fail(); + } + } + + private void checkServerReply(final Timeout timeout) throws IOException { + byte[] data = readSocksReply(4, timeout); + ServerReply reply = ServerReply.of(data[1]); + if (reply == REPLY_SUCCEEDED) { + switch (AddressType.of(data[3])) { + case DOMAIN_NAME: + byte hostNameLength = readSocksReply(1, timeout)[0]; + readSocksReply(hostNameLength + PORT_LENGTH, timeout); + break; + case IP_V4: + readSocksReply(IP_V4.getLength() + PORT_LENGTH, timeout); + break; + case IP_V6: + readSocksReply(IP_V6.getLength() + PORT_LENGTH, timeout); + break; + default: + throw fail(); + } + return; + } + throw new ConnectException(reply.getMessage()); + } + + private void authenticate(final SocksAuthenticationMethod authenticationMethod, final Timeout timeout) throws IOException { + if (authenticationMethod == SocksAuthenticationMethod.USERNAME_PASSWORD) { + final byte[] bytesOfUsername = assertNotNull(proxySettings.getUsername()).getBytes(StandardCharsets.UTF_8); + final byte[] bytesOfPassword = assertNotNull(proxySettings.getPassword()).getBytes(StandardCharsets.UTF_8); + final int usernameLength = bytesOfUsername.length; + final int passwordLength = bytesOfPassword.length; + final byte[] command = new byte[3 + usernameLength + passwordLength]; + + command[0] = USER_PASSWORD_SUB_NEGOTIATION_VERSION; + command[1] = (byte) usernameLength; + System.arraycopy(bytesOfUsername, 0, command, 2, usernameLength); + command[2 + usernameLength] = (byte) passwordLength; + System.arraycopy(bytesOfPassword, 0, command, 3 + usernameLength, + passwordLength); + + OutputStream outputStream = getOutputStream(); + outputStream.write(command); + outputStream.flush(); + + byte[] authResult = readSocksReply(2, timeout); + byte authStatus = authResult[1]; + + if (authStatus != AUTHENTICATION_SUCCEEDED_STATUS) { + throw new ConnectException("Authentication failed. Proxy server returned status: " + authStatus); + } + } + } + + private SocksAuthenticationMethod performNegotiation(final Timeout timeout) throws IOException { + SocksAuthenticationMethod[] authenticationMethods = getSocksAuthenticationMethods(); + + int methodsCount = authenticationMethods.length; + + byte[] bufferSent = new byte[2 + methodsCount]; + bufferSent[0] = SOCKS_VERSION; + bufferSent[1] = (byte) methodsCount; + for (int i = 0; i < methodsCount; i++) { + bufferSent[2 + i] = authenticationMethods[i].getMethodNumber(); + } + + OutputStream outputStream = getOutputStream(); + outputStream.write(bufferSent); + outputStream.flush(); + + byte[] handshakeReply = readSocksReply(2, timeout); + + if (handshakeReply[0] != SOCKS_VERSION) { + throw new ConnectException("Remote server doesn't support socks version 5" + + " Received version: " + handshakeReply[0]); + } + byte authMethodNumber = handshakeReply[1]; + if (authMethodNumber == (byte) 0xFF) { + throw new ConnectException("None of the authentication methods listed are acceptable. Attempted methods: " + + Arrays.toString(authenticationMethods)); + } + if (authMethodNumber == SocksAuthenticationMethod.NO_AUTH.getMethodNumber()) { + return SocksAuthenticationMethod.NO_AUTH; + } else if (authMethodNumber == SocksAuthenticationMethod.USERNAME_PASSWORD.getMethodNumber()) { + return SocksAuthenticationMethod.USERNAME_PASSWORD; + } + + throw new ConnectException("Proxy returned unsupported authentication method: " + authMethodNumber); + } + + private SocksAuthenticationMethod[] getSocksAuthenticationMethods() { + SocksAuthenticationMethod[] authMethods; + if (proxySettings.getUsername() != null) { + authMethods = new SocksAuthenticationMethod[]{ + SocksAuthenticationMethod.NO_AUTH, + SocksAuthenticationMethod.USERNAME_PASSWORD}; + } else { + authMethods = new SocksAuthenticationMethod[]{SocksAuthenticationMethod.NO_AUTH}; + } + return authMethods; + } + + private byte[] readSocksReply(final int length, final Timeout timeout) throws IOException { + InputStream inputStream = getInputStream(); + byte[] data = new byte[length]; + int received = 0; + int originalTimeout = getSoTimeout(); + try { + while (received < length) { + int count; + timeout.checkedRun(MILLISECONDS, () -> { + setSoTimeout(0); + }, (remainingMs) -> { + setSoTimeout(Math.toIntExact(remainingMs)); + }, () -> { + throwSocketConnectionTimeout(); + }); + + count = inputStream.read(data, received, length - received); + if (count < 0) { + throw new ConnectException("Malformed reply from SOCKS proxy server"); + } + received += count; + } + } finally { + setSoTimeout(originalTimeout); + } + return data; + } + + private static void throwSocketConnectionTimeout() throws SocketTimeoutException { + throw new SocketTimeoutException("Socket connection timed out"); + } + + enum SocksCommand { + + CONNECT(0x01); + + private final byte value; + + SocksCommand(final int value) { + this.value = (byte) value; + } + + public byte getCommandNumber() { + return value; + } + } + + private enum SocksAuthenticationMethod { + NO_AUTH(0x00), + USERNAME_PASSWORD(0x02); + + private final byte methodNumber; + + SocksAuthenticationMethod(final int methodNumber) { + this.methodNumber = (byte) methodNumber; + } + + public byte getMethodNumber() { + return methodNumber; + } + } + + enum AddressType { + IP_V4(0x01, 4), + IP_V6(0x04, 16), + DOMAIN_NAME(0x03, -1) { + public byte getLength() { + throw fail(); + } + }; + + private final byte length; + private final byte addressTypeNumber; + + AddressType(final int addressTypeNumber, final int length) { + this.addressTypeNumber = (byte) addressTypeNumber; + this.length = (byte) length; + } + + static AddressType of(final byte signedAddressType) throws ConnectException { + int addressTypeNumber = Byte.toUnsignedInt(signedAddressType); + for (AddressType addressType : AddressType.values()) { + if (addressTypeNumber == addressType.getAddressTypeNumber()) { + return addressType; + } + } + throw new ConnectException("Reply from SOCKS proxy server contains wrong address type" + + " Address type: " + addressTypeNumber); + } + + byte getLength() { + return length; + } + + byte getAddressTypeNumber() { + return addressTypeNumber; + } + + } + + enum ServerReply { + REPLY_SUCCEEDED(0x00, "Succeeded"), + GENERAL_FAILURE(0x01, "General SOCKS5 server failure"), + NOT_ALLOWED(0x02, "Connection is not allowed by ruleset"), + NET_UNREACHABLE(0x03, "Network is unreachable"), + HOST_UNREACHABLE(0x04, "Host is unreachable"), + CONN_REFUSED(0x05, "Connection has been refused"), + TTL_EXPIRED(0x06, "TTL is expired"), + CMD_NOT_SUPPORTED(0x07, "Command is not supported"), + ADDR_TYPE_NOT_SUP(0x08, "Address type is not supported"); + + private final int replyNumber; + private final String message; + + ServerReply(final int replyNumber, final String message) { + this.replyNumber = replyNumber; + this.message = message; + } + + static ServerReply of(final byte byteStatus) throws ConnectException { + int status = Byte.toUnsignedInt(byteStatus); + for (ServerReply serverReply : ServerReply.values()) { + if (status == serverReply.replyNumber) { + return serverReply; + } + } + + throw new ConnectException("Unknown reply field. Reply field: " + status); + } + + public String getMessage() { + return message; + } + } + + @Override + @SuppressWarnings("try") + public void close() throws IOException { + /* + If this.socket is not null, this class essentially acts as a wrapper and we neither bind nor connect in the superclass, + nor do we get input/output streams from the superclass. While it might seem reasonable to skip calling super.close() in this case, + the Java SE Socket documentation doesn't definitively clarify this. Therefore, it's safer to always call super.close(). + */ + try (Socket autoClosed = socket) { + super.close(); + } + } + + @Override + public void setSoTimeout(final int timeout) throws SocketException { + if (socket != null) { + socket.setSoTimeout(timeout); + } else { + super.setSoTimeout(timeout); + } + } + + @Override + public int getSoTimeout() throws SocketException { + if (socket != null) { + return socket.getSoTimeout(); + } else { + return super.getSoTimeout(); + } + } + + @Override + public void bind(final SocketAddress bindpoint) throws IOException { + if (socket != null) { + socket.bind(bindpoint); + } else { + super.bind(bindpoint); + } + } + + @Override + public InetAddress getInetAddress() { + if (socket != null) { + return socket.getInetAddress(); + } else { + return super.getInetAddress(); + } + } + + @Override + public InetAddress getLocalAddress() { + if (socket != null) { + return socket.getLocalAddress(); + } else { + return super.getLocalAddress(); + } + } + + @Override + public int getPort() { + if (socket != null) { + return socket.getPort(); + } else { + return super.getPort(); + } + } + + @Override + public int getLocalPort() { + if (socket != null) { + return socket.getLocalPort(); + } else { + return super.getLocalPort(); + } + } + + @Override + public SocketAddress getRemoteSocketAddress() { + if (socket != null) { + return socket.getRemoteSocketAddress(); + } else { + return super.getRemoteSocketAddress(); + } + } + + @Override + public SocketAddress getLocalSocketAddress() { + if (socket != null) { + return socket.getLocalSocketAddress(); + } else { + return super.getLocalSocketAddress(); + } + } + + @Override + public SocketChannel getChannel() { + if (socket != null) { + return socket.getChannel(); + } else { + return super.getChannel(); + } + } + + @Override + public void setTcpNoDelay(final boolean on) throws SocketException { + if (socket != null) { + socket.setTcpNoDelay(on); + } else { + super.setTcpNoDelay(on); + } + } + + @Override + public boolean getTcpNoDelay() throws SocketException { + if (socket != null) { + return socket.getTcpNoDelay(); + } else { + return super.getTcpNoDelay(); + } + } + + @Override + public void setSoLinger(final boolean on, final int linger) throws SocketException { + if (socket != null) { + socket.setSoLinger(on, linger); + } else { + super.setSoLinger(on, linger); + } + } + + @Override + public int getSoLinger() throws SocketException { + if (socket != null) { + return socket.getSoLinger(); + } else { + return super.getSoLinger(); + } + } + + @Override + public void sendUrgentData(final int data) throws IOException { + if (socket != null) { + socket.sendUrgentData(data); + } else { + super.sendUrgentData(data); + } + } + + @Override + public void setOOBInline(final boolean on) throws SocketException { + if (socket != null) { + socket.setOOBInline(on); + } else { + super.setOOBInline(on); + } + } + + @Override + public boolean getOOBInline() throws SocketException { + if (socket != null) { + return socket.getOOBInline(); + } else { + return super.getOOBInline(); + } + } + + @Override + public void setSendBufferSize(final int size) throws SocketException { + if (socket != null) { + socket.setSendBufferSize(size); + } else { + super.setSendBufferSize(size); + } + } + + @Override + public int getSendBufferSize() throws SocketException { + if (socket != null) { + return socket.getSendBufferSize(); + } else { + return super.getSendBufferSize(); + } + } + + @Override + public void setReceiveBufferSize(final int size) throws SocketException { + if (socket != null) { + socket.setReceiveBufferSize(size); + } else { + super.setReceiveBufferSize(size); + } + } + + @Override + public int getReceiveBufferSize() throws SocketException { + if (socket != null) { + return socket.getReceiveBufferSize(); + } else { + return super.getReceiveBufferSize(); + } + } + + @Override + public void setKeepAlive(final boolean on) throws SocketException { + if (socket != null) { + socket.setKeepAlive(on); + } else { + super.setKeepAlive(on); + } + } + + @Override + public boolean getKeepAlive() throws SocketException { + if (socket != null) { + return socket.getKeepAlive(); + } else { + return super.getKeepAlive(); + } + } + + @Override + public void setTrafficClass(final int tc) throws SocketException { + if (socket != null) { + socket.setTrafficClass(tc); + } else { + super.setTrafficClass(tc); + } + } + + @Override + public int getTrafficClass() throws SocketException { + if (socket != null) { + return socket.getTrafficClass(); + } else { + return super.getTrafficClass(); + } + } + + @Override + public void setReuseAddress(final boolean on) throws SocketException { + if (socket != null) { + socket.setReuseAddress(on); + } else { + super.setReuseAddress(on); + } + } + + @Override + public boolean getReuseAddress() throws SocketException { + if (socket != null) { + return socket.getReuseAddress(); + } else { + return super.getReuseAddress(); + } + } + + @Override + public void shutdownInput() throws IOException { + if (socket != null) { + socket.shutdownInput(); + } else { + super.shutdownInput(); + } + } + + @Override + public void shutdownOutput() throws IOException { + if (socket != null) { + socket.shutdownOutput(); + } else { + super.shutdownOutput(); + } + } + + @Override + public boolean isConnected() { + if (socket != null) { + return socket.isConnected(); + } else { + return super.isConnected(); + } + } + + @Override + public boolean isBound() { + if (socket != null) { + return socket.isBound(); + } else { + return super.isBound(); + } + } + + @Override + public boolean isClosed() { + if (socket != null) { + return socket.isClosed(); + } else { + return super.isClosed(); + } + } + + @Override + public boolean isInputShutdown() { + if (socket != null) { + return socket.isInputShutdown(); + } else { + return super.isInputShutdown(); + } + } + + @Override + public boolean isOutputShutdown() { + if (socket != null) { + return socket.isOutputShutdown(); + } else { + return super.isOutputShutdown(); + } + } + + @Override + public void setPerformancePreferences(final int connectionTime, final int latency, final int bandwidth) { + if (socket != null) { + socket.setPerformancePreferences(connectionTime, latency, bandwidth); + } else { + super.setPerformancePreferences(connectionTime, latency, bandwidth); + } + } + + @Override + public InputStream getInputStream() throws IOException { + if (socket != null) { + return socket.getInputStream(); + } + return super.getInputStream(); + } + + @Override + public OutputStream getOutputStream() throws IOException { + if (socket != null) { + return socket.getOutputStream(); + } + return super.getOutputStream(); + } +} diff --git a/driver-core/src/main/com/mongodb/internal/connection/SpeculativeAuthenticator.java b/driver-core/src/main/com/mongodb/internal/connection/SpeculativeAuthenticator.java new file mode 100644 index 00000000000..5bcb4224ca7 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/connection/SpeculativeAuthenticator.java @@ -0,0 +1,35 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection; + +import com.mongodb.lang.Nullable; +import org.bson.BsonDocument; + +interface SpeculativeAuthenticator { + + @Nullable + default BsonDocument createSpeculativeAuthenticateCommand(final InternalConnection connection) { + return null; + } + + @Nullable + default BsonDocument getSpeculativeAuthenticateResponse() { + return null; + } + + default void setSpeculativeAuthenticateResponse(final BsonDocument response) {} +} diff --git a/driver-core/src/main/com/mongodb/internal/connection/SplittablePayload.java b/driver-core/src/main/com/mongodb/internal/connection/SplittablePayload.java new file mode 100644 index 00000000000..9e52894f720 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/connection/SplittablePayload.java @@ -0,0 +1,324 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection; + +import com.mongodb.internal.bulk.DeleteRequest; +import com.mongodb.internal.bulk.InsertRequest; +import com.mongodb.internal.bulk.UpdateRequest; +import com.mongodb.internal.bulk.WriteRequest; +import com.mongodb.internal.bulk.WriteRequestWithIndex; +import org.bson.BsonDocument; +import org.bson.BsonDocumentWrapper; +import org.bson.BsonObjectId; +import org.bson.BsonValue; +import org.bson.BsonWriter; +import org.bson.FieldNameValidator; +import org.bson.codecs.BsonValueCodecProvider; +import org.bson.codecs.Codec; +import org.bson.codecs.Encoder; +import org.bson.codecs.EncoderContext; +import org.bson.codecs.configuration.CodecRegistry; + +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; + +import static com.mongodb.assertions.Assertions.assertNotNull; +import static com.mongodb.assertions.Assertions.assertTrue; +import static com.mongodb.assertions.Assertions.isTrue; +import static com.mongodb.assertions.Assertions.notNull; +import static com.mongodb.internal.connection.SplittablePayload.Type.INSERT; +import static com.mongodb.internal.connection.SplittablePayload.Type.REPLACE; +import static com.mongodb.internal.connection.SplittablePayload.Type.UPDATE; +import static org.bson.codecs.configuration.CodecRegistries.fromProviders; + +/** + * A Splittable payload for write commands. + * + *

The command will consume as much of the payload as possible. The {@link #hasAnotherSplit()} method will return true if there is + * another split to consume, {@link #getNextSplit} method will return the next SplittablePayload.

+ * + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public final class SplittablePayload extends MessageSequences { + private static final CodecRegistry REGISTRY = fromProviders(new BsonValueCodecProvider()); + private final FieldNameValidator fieldNameValidator; + private final WriteRequestEncoder writeRequestEncoder = new WriteRequestEncoder(); + private final Type payloadType; + private final List writeRequestWithIndexes; + private final boolean ordered; + private final Map insertedIds = new HashMap<>(); + private int position = 0; + + /** + * The type of the payload. + */ + public enum Type { + /** + * An insert. + */ + INSERT, + + /** + * An update that uses update operators. + */ + UPDATE, + + /** + * An update that replaces the existing document. + */ + REPLACE, + + /** + * A delete. + */ + DELETE + } + + /** + * Create a new instance + * + * @param payloadType the payload type + * @param writeRequestWithIndexes the writeRequests + */ + public SplittablePayload( + final Type payloadType, + final List writeRequestWithIndexes, + final boolean ordered, + final FieldNameValidator fieldNameValidator) { + this.payloadType = notNull("batchType", payloadType); + this.writeRequestWithIndexes = notNull("writeRequests", writeRequestWithIndexes); + this.ordered = ordered; + this.fieldNameValidator = notNull("fieldNameValidator", fieldNameValidator); + } + + public FieldNameValidator getFieldNameValidator() { + return fieldNameValidator; + } + + /** + * @return the payload type + */ + public Type getPayloadType() { + return payloadType; + } + + /** + * @return the payload name + */ + public String getPayloadName() { + if (payloadType == INSERT) { + return "documents"; + } else if (payloadType == UPDATE || payloadType == REPLACE) { + return "updates"; + } else { + return "deletes"; + } + } + + boolean hasPayload() { + return !writeRequestWithIndexes.isEmpty(); + } + + public int size() { + return writeRequestWithIndexes.size(); + } + + public Map getInsertedIds() { + return insertedIds; + } + + /** + * @return the payload + */ + public List getPayload() { + return writeRequestWithIndexes.stream().map(wri -> + new BsonDocumentWrapper<>(wri, writeRequestEncoder)) + .collect(Collectors.toList()); + } + + /** + * @return the current position in the payload + */ + public int getPosition() { + return position; + } + + /** + * Sets the current position in the payload + * @param position the position + */ + public void setPosition(final int position) { + this.position = position; + } + + /** + * @return true if there are more values after the current position + */ + public boolean hasAnotherSplit() { + // this method must be not called before this payload having been encoded + assertTrue(position > 0); + return writeRequestWithIndexes.size() > position; + } + + boolean isOrdered() { + return ordered; + } + + /** + * @return a new SplittablePayload containing only the values after the current position. + */ + public SplittablePayload getNextSplit() { + isTrue("hasAnotherSplit", hasAnotherSplit()); + List nextPayLoad = writeRequestWithIndexes.subList(position, writeRequestWithIndexes.size()); + return new SplittablePayload(payloadType, nextPayLoad, ordered, fieldNameValidator); + } + + /** + * @return true if the writeRequests list is empty + */ + public boolean isEmpty() { + return writeRequestWithIndexes.isEmpty(); + } + + class WriteRequestEncoder implements Encoder { + + WriteRequestEncoder() { + } + + @Override + public void encode(final BsonWriter writer, final WriteRequestWithIndex writeRequestWithIndex, + final EncoderContext encoderContext) { + if (writeRequestWithIndex.getType() == WriteRequest.Type.INSERT) { + InsertRequest insertRequest = (InsertRequest) writeRequestWithIndex.getWriteRequest(); + BsonDocument document = insertRequest.getDocument(); + + BsonValue documentId = insertedIds.compute( + writeRequestWithIndex.getIndex(), + (writeRequestIndex, writeRequestDocumentId) -> { + IdHoldingBsonWriter idHoldingBsonWriter = new IdHoldingBsonWriter( + writer, + // Reuse `writeRequestDocumentId` if it may have been generated + // by `IdHoldingBsonWriter` in a previous attempt. + // If its type is not `BsonObjectId`, which happens only if `_id` was specified by the application, + // we know it could not have been generated. + writeRequestDocumentId instanceof BsonObjectId ? writeRequestDocumentId.asObjectId() : null); + getCodec(document).encode(idHoldingBsonWriter, document, + EncoderContext.builder().isEncodingCollectibleDocument(true).build()); + return idHoldingBsonWriter.getId(); + }); + if (documentId == null) { + // we must add an entry anyway because we rely on all the indexes being present + insertedIds.put(writeRequestWithIndex.getIndex(), null); + } + } else if (writeRequestWithIndex.getType() == WriteRequest.Type.UPDATE + || writeRequestWithIndex.getType() == WriteRequest.Type.REPLACE) { + UpdateRequest update = (UpdateRequest) writeRequestWithIndex.getWriteRequest(); + writer.writeStartDocument(); + writer.writeName("q"); + getCodec(update.getFilter()).encode(writer, update.getFilter(), EncoderContext.builder().build()); + + BsonValue updateValue = update.getUpdateValue(); + if (!updateValue.isDocument() && !updateValue.isArray()) { + throw new IllegalArgumentException("Invalid BSON value for an update."); + } + if (updateValue.isArray() && updateValue.asArray().isEmpty()) { + throw new IllegalArgumentException("Invalid pipeline for an update. The pipeline may not be empty."); + } + + writer.writeName("u"); + if (updateValue.isDocument()) { + FieldTrackingBsonWriter fieldTrackingBsonWriter = new FieldTrackingBsonWriter(writer); + getCodec(updateValue.asDocument()).encode(fieldTrackingBsonWriter, updateValue.asDocument(), + EncoderContext.builder().build()); + if (writeRequestWithIndex.getType() == WriteRequest.Type.UPDATE && !fieldTrackingBsonWriter.hasWrittenField()) { + throw new IllegalArgumentException("Invalid BSON document for an update. The document may not be empty."); + } + } else if (update.getType() == WriteRequest.Type.UPDATE && updateValue.isArray()) { + writer.writeStartArray(); + for (BsonValue cur : updateValue.asArray()) { + getCodec(cur.asDocument()).encode(writer, cur.asDocument(), EncoderContext.builder().build()); + } + writer.writeEndArray(); + } + + if (update.isMulti()) { + writer.writeBoolean("multi", true); + } + if (update.isUpsert()) { + writer.writeBoolean("upsert", true); + } + if (update.getCollation() != null) { + writer.writeName("collation"); + BsonDocument collation = assertNotNull(update.getCollation()).asDocument(); + getCodec(collation).encode(writer, collation, EncoderContext.builder().build()); + } + if (update.getArrayFilters() != null) { + writer.writeStartArray("arrayFilters"); + for (BsonDocument cur: assertNotNull(update.getArrayFilters())) { + getCodec(cur).encode(writer, cur, EncoderContext.builder().build()); + } + writer.writeEndArray(); + } + if (update.getHint() != null) { + writer.writeName("hint"); + getCodec(assertNotNull(update.getHint())).encode(writer, assertNotNull(update.getHint()), + EncoderContext.builder().build()); + } else if (update.getHintString() != null) { + writer.writeString("hint", update.getHintString()); + } + if (update.getSort() != null) { + writer.writeName("sort"); + getCodec(assertNotNull(update.getSort())).encode(writer, assertNotNull(update.getSort()), + EncoderContext.builder().build()); + } + writer.writeEndDocument(); + } else { + DeleteRequest deleteRequest = (DeleteRequest) writeRequestWithIndex.getWriteRequest(); + writer.writeStartDocument(); + writer.writeName("q"); + getCodec(deleteRequest.getFilter()).encode(writer, deleteRequest.getFilter(), EncoderContext.builder().build()); + writer.writeInt32("limit", deleteRequest.isMulti() ? 0 : 1); + if (deleteRequest.getCollation() != null) { + writer.writeName("collation"); + BsonDocument collation = assertNotNull(deleteRequest.getCollation()).asDocument(); + getCodec(collation).encode(writer, collation, EncoderContext.builder().build()); + } + if (deleteRequest.getHint() != null) { + writer.writeName("hint"); + BsonDocument hint = assertNotNull(deleteRequest.getHint()); + getCodec(hint).encode(writer, hint, EncoderContext.builder().build()); + } else if (deleteRequest.getHintString() != null) { + writer.writeString("hint", deleteRequest.getHintString()); + } + writer.writeEndDocument(); + } + } + + @Override + public Class getEncoderClass() { + return WriteRequestWithIndex.class; + } + } + + @SuppressWarnings("unchecked") + private static Codec getCodec(final BsonDocument document) { + return (Codec) REGISTRY.get(document.getClass()); + } + +} diff --git a/driver-core/src/main/com/mongodb/internal/connection/SplittablePayloadBsonWriter.java b/driver-core/src/main/com/mongodb/internal/connection/SplittablePayloadBsonWriter.java new file mode 100644 index 00000000000..e679a3b557c --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/connection/SplittablePayloadBsonWriter.java @@ -0,0 +1,72 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection; + +import org.bson.BsonBinaryWriter; +import org.bson.BsonWriter; +import org.bson.io.BsonOutput; + +import static com.mongodb.internal.connection.BsonWriterHelper.writePayloadArray; + +/** + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public class SplittablePayloadBsonWriter extends LevelCountingBsonWriter { + private final BsonWriter writer; + private final BsonOutput bsonOutput; + private final SplittablePayload payload; + private final int maxSplittableDocumentSize; + private final MessageSettings settings; + private final int messageStartPosition; + + public SplittablePayloadBsonWriter(final BsonBinaryWriter writer, final BsonOutput bsonOutput, + final MessageSettings settings, final SplittablePayload payload, + final int maxSplittableDocumentSize) { + this(writer, bsonOutput, 0, settings, payload, maxSplittableDocumentSize); + } + + public SplittablePayloadBsonWriter(final BsonBinaryWriter writer, final BsonOutput bsonOutput, final int messageStartPosition, + final MessageSettings settings, final SplittablePayload payload) { + this(writer, bsonOutput, messageStartPosition, settings, payload, settings.getMaxDocumentSize()); + } + + public SplittablePayloadBsonWriter(final BsonBinaryWriter writer, final BsonOutput bsonOutput, final int messageStartPosition, + final MessageSettings settings, final SplittablePayload payload, + final int maxSplittableDocumentSize) { + super(writer); + this.writer = writer; + this.bsonOutput = bsonOutput; + this.messageStartPosition = messageStartPosition; + this.settings = settings; + this.payload = payload; + this.maxSplittableDocumentSize = maxSplittableDocumentSize; + } + + @Override + public void writeStartDocument() { + super.writeStartDocument(); + } + + @Override + public void writeEndDocument() { + if (getCurrentLevel() == DEFAULT_INITIAL_LEVEL + 1 && payload.hasPayload()) { + writePayloadArray(writer, bsonOutput, settings, messageStartPosition, payload, maxSplittableDocumentSize); + } + super.writeEndDocument(); + } + +} diff --git a/driver-core/src/main/com/mongodb/internal/connection/SslHelper.java b/driver-core/src/main/com/mongodb/internal/connection/SslHelper.java new file mode 100644 index 00000000000..6e360b35b3f --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/connection/SslHelper.java @@ -0,0 +1,84 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection; + +import com.mongodb.MongoInternalException; +import com.mongodb.connection.SslSettings; + +import javax.net.ssl.SNIHostName; +import javax.net.ssl.SNIServerName; +import javax.net.ssl.SSLParameters; +import javax.net.ssl.SSLSocket; + +import java.net.InetSocketAddress; +import java.net.Socket; + +import static java.util.Collections.singletonList; + +/** + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public final class SslHelper { + + /** + * Enable HTTP endpoint verification on the given SSL parameters. + * + * @param sslParameters The original SSL parameters + */ + public static void enableHostNameVerification(final SSLParameters sslParameters) { + sslParameters.setEndpointIdentificationAlgorithm("HTTPS"); + } + + /** + * Enable SNI. + * + * @param host the server host + * @param sslParameters the SSL parameters + */ + public static void enableSni(final String host, final SSLParameters sslParameters) { + try { + SNIServerName sniHostName = new SNIHostName(host); + sslParameters.setServerNames(singletonList(sniHostName)); + } catch (IllegalArgumentException e) { + // ignore because SNIHostName will throw this for some legit host names for connecting to MongoDB, e.g an IPV6 literal + } + } + + public static void configureSslSocket(final Socket socket, final SslSettings sslSettings, final InetSocketAddress inetSocketAddress) throws + MongoInternalException { + if (sslSettings.isEnabled() || socket instanceof SSLSocket) { + if (!(socket instanceof SSLSocket)) { + throw new MongoInternalException("SSL is enabled but the socket is not an instance of javax.net.ssl.SSLSocket"); + } + SSLSocket sslSocket = (SSLSocket) socket; + SSLParameters sslParameters = sslSocket.getSSLParameters(); + if (sslParameters == null) { + sslParameters = new SSLParameters(); + } + + enableSni(inetSocketAddress.getHostName(), sslParameters); + + if (!sslSettings.isInvalidHostNameAllowed()) { + enableHostNameVerification(sslParameters); + } + sslSocket.setSSLParameters(sslParameters); + } + } + + private SslHelper() { + } +} diff --git a/driver-core/src/main/com/mongodb/internal/connection/Stream.java b/driver-core/src/main/com/mongodb/internal/connection/Stream.java new file mode 100644 index 00000000000..317927f1715 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/connection/Stream.java @@ -0,0 +1,106 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection; + +import com.mongodb.ServerAddress; +import com.mongodb.connection.AsyncCompletionHandler; +import org.bson.ByteBuf; + +import java.io.IOException; +import java.util.List; + +/** + * A full duplex stream of bytes. + */ +public interface Stream extends BufferProvider { + + /** + * Open the stream. + * + * @param operationContext the operation context + * @throws IOException if an I/O error occurs + */ + void open(OperationContext operationContext) throws IOException; + + /** + * Open the stream asynchronously. + * + * @param operationContext the operation context + * @param handler the completion handler for opening the stream + */ + void openAsync(OperationContext operationContext, AsyncCompletionHandler handler); + + /** + * Write each buffer in the list to the stream in order, blocking until all are completely written. + * + * @param buffers the buffers to write. The operation must not {@linkplain ByteBuf#release() release} any buffer from {@code buffers}, + * unless the operation {@linkplain ByteBuf#retain() retains} it, and releasing is meant to compensate for that. + * @param operationContext the operation context + * @throws IOException if there are problems writing to the stream + */ + void write(List buffers, OperationContext operationContext) throws IOException; + + /** + * Read from the stream, blocking until the requested number of bytes have been read. + * + * @param numBytes The number of bytes to read into the returned byte buffer + * @param operationContext the operation context + * @return a byte buffer filled with number of bytes requested + * @throws IOException if there are problems reading from the stream + */ + ByteBuf read(int numBytes, OperationContext operationContext) throws IOException; + + /** + * Write each buffer in the list to the stream in order, asynchronously. This method should return immediately, and invoke the given + * callback on completion. + * + * @param buffers the buffers to write. The operation must not {@linkplain ByteBuf#release() release} any buffer from {@code buffers}, + * unless the operation {@linkplain ByteBuf#retain() retains} it, and releasing is meant to compensate for that. + * @param operationContext the operation context + * @param handler invoked when the write operation has completed + */ + void writeAsync(List buffers, OperationContext operationContext, AsyncCompletionHandler handler); + + /** + * Read from the stream, asynchronously. This method should return immediately, and invoke the given callback when the number of + * requested bytes have been read. + * + * @param numBytes the number of bytes + * @param operationContext the operation context + * @param handler invoked when the read operation has completed + */ + void readAsync(int numBytes, OperationContext operationContext, AsyncCompletionHandler handler); + + /** + * The address that this stream is connected to. + * + * @return the address + */ + ServerAddress getAddress(); + + /** + * Closes the connection. + */ + void close(); + + /** + * Returns the closed state of the connection + * + * @return true if connection is closed + */ + boolean isClosed(); +} diff --git a/driver-core/src/main/com/mongodb/internal/connection/StreamFactory.java b/driver-core/src/main/com/mongodb/internal/connection/StreamFactory.java new file mode 100644 index 00000000000..120a4584862 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/connection/StreamFactory.java @@ -0,0 +1,32 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection; + +import com.mongodb.ServerAddress; + +/** + * A factory for streams. + */ +public interface StreamFactory { + /** + * Create a Stream to the given address + * + * @param serverAddress the address + * @return the stream + */ + Stream create(ServerAddress serverAddress); +} diff --git a/driver-core/src/main/com/mongodb/internal/connection/StreamFactoryFactory.java b/driver-core/src/main/com/mongodb/internal/connection/StreamFactoryFactory.java new file mode 100644 index 00000000000..6cbe620fd43 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/connection/StreamFactoryFactory.java @@ -0,0 +1,38 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection; + +import com.mongodb.connection.SocketSettings; +import com.mongodb.connection.SslSettings; + +/** + * A factory of {@code StreamFactory} instances. + */ +public interface StreamFactoryFactory extends AutoCloseable { + + /** + * Create a {@code StreamFactory} with the given settings. + * + * @param socketSettings the socket settings + * @param sslSettings the SSL settings + * @return a stream factory that will apply the given settins + */ + StreamFactory create(SocketSettings socketSettings, SslSettings sslSettings); + + @Override + void close(); +} diff --git a/driver-core/src/main/com/mongodb/internal/connection/StreamFactoryHelper.java b/driver-core/src/main/com/mongodb/internal/connection/StreamFactoryHelper.java new file mode 100644 index 00000000000..7aeb65720b0 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/connection/StreamFactoryHelper.java @@ -0,0 +1,100 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection; + +import com.mongodb.MongoClientException; +import com.mongodb.MongoClientSettings; +import com.mongodb.connection.AsyncTransportSettings; +import com.mongodb.connection.NettyTransportSettings; +import com.mongodb.connection.SocketSettings; +import com.mongodb.connection.SslSettings; +import com.mongodb.connection.TransportSettings; +import com.mongodb.internal.connection.netty.NettyStreamFactoryFactory; +import com.mongodb.lang.Nullable; +import com.mongodb.spi.dns.InetAddressResolver; + +import java.io.IOException; +import java.nio.channels.AsynchronousChannelGroup; +import java.util.concurrent.ExecutorService; + +/** + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public final class StreamFactoryHelper { + + public static StreamFactoryFactory getSyncStreamFactoryFactory( + @Nullable final TransportSettings transportSettings, + final InetAddressResolver inetAddressResolver) { + + if (transportSettings == null) { + return new StreamFactoryFactory() { + @Override + public StreamFactory create(final SocketSettings socketSettings, final SslSettings sslSettings) { + return new SocketStreamFactory(inetAddressResolver, socketSettings, sslSettings); + } + + @Override + public void close() { + //NOP + } + }; + } else if (transportSettings instanceof AsyncTransportSettings) { + throw new MongoClientException("Unsupported transport settings in sync: " + transportSettings.getClass().getName()); + } else if (transportSettings instanceof NettyTransportSettings) { + return getNettyStreamFactoryFactory(inetAddressResolver, (NettyTransportSettings) transportSettings); + } else { + throw new MongoClientException("Unsupported transport settings: " + transportSettings.getClass().getName()); + } + } + + public static StreamFactoryFactory getAsyncStreamFactoryFactory(final MongoClientSettings settings, + final InetAddressResolver inetAddressResolver) { + TransportSettings transportSettings = settings.getTransportSettings(); + if (transportSettings == null || transportSettings instanceof AsyncTransportSettings) { + ExecutorService executorService = transportSettings == null + ? null + : ((AsyncTransportSettings) transportSettings).getExecutorService(); + if (settings.getSslSettings().isEnabled()) { + return new TlsChannelStreamFactoryFactory(inetAddressResolver, executorService); + } + AsynchronousChannelGroup group = null; + if (executorService != null) { + try { + group = AsynchronousChannelGroup.withThreadPool(executorService); + } catch (IOException e) { + throw new MongoClientException("Unable to create an asynchronous channel group", e); + } + } + return new AsynchronousSocketChannelStreamFactoryFactory(inetAddressResolver, group); + } else if (transportSettings instanceof NettyTransportSettings) { + return getNettyStreamFactoryFactory(inetAddressResolver, (NettyTransportSettings) transportSettings); + } else { + throw new MongoClientException("Unsupported transport settings: " + transportSettings.getClass().getName()); + } + } + + private static NettyStreamFactoryFactory getNettyStreamFactoryFactory(final InetAddressResolver inetAddressResolver, + final NettyTransportSettings transportSettings) { + return NettyStreamFactoryFactory.builder() + .applySettings(transportSettings) + .inetAddressResolver(inetAddressResolver) + .build(); + } + + private StreamFactoryHelper() { + } +} diff --git a/driver-core/src/main/com/mongodb/internal/connection/Time.java b/driver-core/src/main/com/mongodb/internal/connection/Time.java new file mode 100644 index 00000000000..e3940adf1de --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/connection/Time.java @@ -0,0 +1,43 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection; + +/** + * To enable unit testing of classes that rely on System.nanoTime + * + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public final class Time { + static final long CONSTANT_TIME = 42; + + private static boolean isConstant; + + public static void makeTimeConstant() { + isConstant = true; + } + + public static void makeTimeMove() { + isConstant = false; + } + + public static long nanoTime() { + return isConstant ? CONSTANT_TIME : System.nanoTime(); + } + + private Time() { + } +} diff --git a/driver-core/src/main/com/mongodb/internal/connection/TlsChannelStreamFactoryFactory.java b/driver-core/src/main/com/mongodb/internal/connection/TlsChannelStreamFactoryFactory.java new file mode 100644 index 00000000000..b0fae1d044d --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/connection/TlsChannelStreamFactoryFactory.java @@ -0,0 +1,387 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection; + +import com.mongodb.MongoClientException; +import com.mongodb.MongoSocketOpenException; +import com.mongodb.ServerAddress; +import com.mongodb.connection.AsyncCompletionHandler; +import com.mongodb.connection.SocketSettings; +import com.mongodb.connection.SslSettings; +import com.mongodb.internal.connection.tlschannel.BufferAllocator; +import com.mongodb.internal.connection.tlschannel.ClientTlsChannel; +import com.mongodb.internal.connection.tlschannel.TlsChannel; +import com.mongodb.internal.connection.tlschannel.async.AsynchronousTlsChannel; +import com.mongodb.internal.connection.tlschannel.async.AsynchronousTlsChannelGroup; +import com.mongodb.internal.diagnostics.logging.Logger; +import com.mongodb.internal.diagnostics.logging.Loggers; +import com.mongodb.lang.Nullable; +import com.mongodb.spi.dns.InetAddressResolver; + +import javax.net.ssl.SSLContext; +import javax.net.ssl.SSLEngine; +import javax.net.ssl.SSLParameters; +import java.io.Closeable; +import java.io.IOException; +import java.net.StandardSocketOptions; +import java.nio.ByteBuffer; +import java.nio.channels.CompletionHandler; +import java.nio.channels.InterruptedByTimeoutException; +import java.nio.channels.SelectionKey; +import java.nio.channels.Selector; +import java.nio.channels.SocketChannel; +import java.security.NoSuchAlgorithmException; +import java.util.Iterator; +import java.util.concurrent.ConcurrentLinkedDeque; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Future; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicReference; + +import static com.mongodb.assertions.Assertions.assertTrue; +import static com.mongodb.assertions.Assertions.isTrue; +import static com.mongodb.internal.connection.ServerAddressHelper.getSocketAddresses; +import static com.mongodb.internal.connection.SslHelper.enableHostNameVerification; +import static com.mongodb.internal.connection.SslHelper.enableSni; +import static java.util.Optional.ofNullable; + +/** + * A {@code StreamFactoryFactory} that supports TLS/SSL. The implementation supports asynchronous usage. + */ +public class TlsChannelStreamFactoryFactory implements StreamFactoryFactory { + + private static final Logger LOGGER = Loggers.getLogger("connection.tls"); + + private final SelectorMonitor selectorMonitor; + private final AsynchronousTlsChannelGroup group; + private final PowerOfTwoBufferPool bufferPool = PowerOfTwoBufferPool.DEFAULT; + private final InetAddressResolver inetAddressResolver; + + /** + * Construct a new instance + */ + TlsChannelStreamFactoryFactory(final InetAddressResolver inetAddressResolver, + @Nullable final ExecutorService executorService) { + this.inetAddressResolver = inetAddressResolver; + this.group = new AsynchronousTlsChannelGroup(executorService); + selectorMonitor = new SelectorMonitor(); + selectorMonitor.start(); + } + + public TlsChannelStreamFactoryFactory(final InetAddressResolver inetAddressResolver) { + this(inetAddressResolver, null); + } + + @Override + public StreamFactory create(final SocketSettings socketSettings, final SslSettings sslSettings) { + assertTrue(sslSettings.isEnabled()); + return serverAddress -> new TlsChannelStream(serverAddress, inetAddressResolver, socketSettings, sslSettings, bufferPool, group, + selectorMonitor); + } + + @Override + public void close() { + selectorMonitor.close(); + group.shutdown(); + } + + /** + * Monitors `OP_CONNECT` events for socket connections. + */ + private static class SelectorMonitor implements Closeable { + + static final class SocketRegistration { + private final SocketChannel socketChannel; + private final AtomicReference afterConnectAction; + + SocketRegistration(final SocketChannel socketChannel, final Runnable afterConnectAction) { + this.socketChannel = socketChannel; + this.afterConnectAction = new AtomicReference<>(afterConnectAction); + } + + boolean tryCancelPendingConnection() { + return tryTakeAction() != null; + } + + void runAfterConnectActionIfNotCanceled() { + Runnable afterConnectActionToExecute = tryTakeAction(); + if (afterConnectActionToExecute != null) { + afterConnectActionToExecute.run(); + } + } + + @Nullable + private Runnable tryTakeAction() { + return afterConnectAction.getAndSet(null); + } + } + + private final Selector selector; + private volatile boolean isClosed; + private final ConcurrentLinkedDeque pendingRegistrations = new ConcurrentLinkedDeque<>(); + + SelectorMonitor() { + try { + this.selector = Selector.open(); + } catch (IOException e) { + throw new MongoClientException("Exception opening Selector", e); + } + } + + void start() { + Thread selectorThread = new Thread(() -> { + try { + while (!isClosed) { + try { + selector.select(); + for (SelectionKey selectionKey : selector.selectedKeys()) { + selectionKey.cancel(); + ((SocketRegistration) selectionKey.attachment()).runAfterConnectActionIfNotCanceled(); + } + + for (Iterator iter = pendingRegistrations.iterator(); iter.hasNext();) { + SocketRegistration pendingRegistration = iter.next(); + pendingRegistration.socketChannel.register(selector, SelectionKey.OP_CONNECT, pendingRegistration); + iter.remove(); + } + } catch (Exception e) { + LOGGER.warn("Exception in selector loop", e); + } + } + } catch (Throwable t) { + LOGGER.error(this + " stopped working. You may want to recreate the MongoClient", t); + throw t; + } finally { + try { + selector.close(); + } catch (IOException e) { + // ignore + } + } + }); + selectorThread.setDaemon(true); + selectorThread.start(); + } + + void register(final SocketRegistration registration) { + pendingRegistrations.add(registration); + selector.wakeup(); + } + + @Override + public void close() { + isClosed = true; + selector.wakeup(); + } + } + + private static class TlsChannelStream extends AsynchronousChannelStream { + + private final AsynchronousTlsChannelGroup group; + private final SelectorMonitor selectorMonitor; + private final InetAddressResolver inetAddressResolver; + private final SslSettings sslSettings; + + TlsChannelStream(final ServerAddress serverAddress, final InetAddressResolver inetAddressResolver, + final SocketSettings settings, final SslSettings sslSettings, final PowerOfTwoBufferPool bufferProvider, + final AsynchronousTlsChannelGroup group, final SelectorMonitor selectorMonitor) { + super(serverAddress, settings, bufferProvider); + this.inetAddressResolver = inetAddressResolver; + this.sslSettings = sslSettings; + this.group = group; + this.selectorMonitor = selectorMonitor; + } + + @Override + public void openAsync(final OperationContext operationContext, final AsyncCompletionHandler handler) { + isTrue("unopened", getChannel() == null); + try { + SocketChannel socketChannel = SocketChannel.open(); + socketChannel.configureBlocking(false); + + socketChannel.setOption(StandardSocketOptions.TCP_NODELAY, true); + socketChannel.setOption(StandardSocketOptions.SO_KEEPALIVE, true); + if (getSettings().getReceiveBufferSize() > 0) { + socketChannel.setOption(StandardSocketOptions.SO_RCVBUF, getSettings().getReceiveBufferSize()); + } + if (getSettings().getSendBufferSize() > 0) { + socketChannel.setOption(StandardSocketOptions.SO_SNDBUF, getSettings().getSendBufferSize()); + } + //getConnectTimeoutMs MUST be called before connection attempt, as it might throw MongoOperationTimeout exception. + int connectTimeoutMs = operationContext.getTimeoutContext().getConnectTimeoutMs(); + socketChannel.connect(getSocketAddresses(getServerAddress(), inetAddressResolver).get(0)); + SelectorMonitor.SocketRegistration socketRegistration = new SelectorMonitor.SocketRegistration( + socketChannel, () -> initializeTslChannel(handler, socketChannel)); + + if (connectTimeoutMs > 0) { + scheduleTimeoutInterruption(handler, socketRegistration, connectTimeoutMs); + } + selectorMonitor.register(socketRegistration); + } catch (IOException e) { + handler.failed(new MongoSocketOpenException("Exception opening socket", getServerAddress(), e)); + } catch (Throwable t) { + handler.failed(t); + } + } + + private void scheduleTimeoutInterruption(final AsyncCompletionHandler handler, + final SelectorMonitor.SocketRegistration socketRegistration, + final int connectTimeoutMs) { + group.getTimeoutExecutor().schedule(() -> { + if (socketRegistration.tryCancelPendingConnection()) { + closeAndTimeout(handler, socketRegistration.socketChannel); + } + }, connectTimeoutMs, TimeUnit.MILLISECONDS); + } + + private void closeAndTimeout(final AsyncCompletionHandler handler, final SocketChannel socketChannel) { + // We check if this stream was closed before timeout exception. + boolean streamClosed = isClosed(); + InterruptedByTimeoutException timeoutException = new InterruptedByTimeoutException(); + try { + socketChannel.close(); + } catch (Exception e) { + timeoutException.addSuppressed(e); + } + + if (streamClosed) { + handler.completed(null); + } else { + handler.failed(new MongoSocketOpenException("Exception opening socket", getAddress(), timeoutException)); + } + } + + private void initializeTslChannel(final AsyncCompletionHandler handler, final SocketChannel socketChannel) { + try { + if (!socketChannel.finishConnect()) { + throw new MongoSocketOpenException("Failed to finish connect", getServerAddress()); + } + + SSLEngine sslEngine = getSslContext().createSSLEngine(getServerAddress().getHost(), + getServerAddress().getPort()); + sslEngine.setUseClientMode(true); + + SSLParameters sslParameters = sslEngine.getSSLParameters(); + enableSni(getServerAddress().getHost(), sslParameters); + + if (!sslSettings.isInvalidHostNameAllowed()) { + enableHostNameVerification(sslParameters); + } + sslEngine.setSSLParameters(sslParameters); + + BufferAllocator bufferAllocator = new BufferProviderAllocator(); + + TlsChannel tlsChannel = ClientTlsChannel.newBuilder(socketChannel, sslEngine) + .withEncryptedBufferAllocator(bufferAllocator) + .withPlainBufferAllocator(bufferAllocator) + .build(); + + // build asynchronous channel, based in the TLS channel and associated with the global group. + setChannel(new AsynchronousTlsChannelAdapter(new AsynchronousTlsChannel(group, tlsChannel, socketChannel))); + + handler.completed(null); + } catch (IOException e) { + handler.failed(new MongoSocketOpenException("Exception opening socket", getServerAddress(), e)); + } catch (Throwable t) { + handler.failed(t); + } + } + + private SSLContext getSslContext() { + try { + return ofNullable(sslSettings.getContext()).orElse(SSLContext.getDefault()); + } catch (NoSuchAlgorithmException e) { + throw new MongoClientException("Unable to create default SSLContext", e); + } + } + + private class BufferProviderAllocator implements BufferAllocator { + @Override + public ByteBuffer allocate(final int size) { + return getBufferProvider().getByteBuffer(size); + } + + @Override + public void free(final ByteBuffer buffer) { + getBufferProvider().release(buffer); + } + } + + public static class AsynchronousTlsChannelAdapter implements ExtendedAsynchronousByteChannel { + private final AsynchronousTlsChannel wrapped; + + AsynchronousTlsChannelAdapter(final AsynchronousTlsChannel wrapped) { + this.wrapped = wrapped; + } + + @Override + public void read(final ByteBuffer dst, final A attach, final CompletionHandler handler) { + wrapped.read(dst, attach, handler); + } + + @Override + public void read(final ByteBuffer dst, final long timeout, final TimeUnit unit, @Nullable final A attach, + final CompletionHandler handler) { + wrapped.read(dst, timeout, unit, attach, handler); + } + + @Override + public void read(final ByteBuffer[] dsts, final int offset, final int length, final long timeout, final TimeUnit unit, + @Nullable final A attach, final CompletionHandler handler) { + wrapped.read(dsts, offset, length, timeout, unit, attach, handler); + } + + @Override + public Future read(final ByteBuffer dst) { + return wrapped.read(dst); + } + + @Override + public void write(final ByteBuffer src, final A attach, final CompletionHandler handler) { + wrapped.write(src, attach, handler); + } + + @Override + public void write(final ByteBuffer src, final long timeout, final TimeUnit unit, final A attach, + final CompletionHandler handler) { + wrapped.write(src, timeout, unit, attach, handler); + } + + @Override + public void write(final ByteBuffer[] srcs, final int offset, final int length, final long timeout, final TimeUnit unit, + final A attach, final CompletionHandler handler) { + wrapped.write(srcs, offset, length, timeout, unit, attach, handler); + } + + @Override + public Future write(final ByteBuffer src) { + return wrapped.write(src); + } + + @Override + public boolean isOpen() { + return wrapped.isOpen(); + } + + @Override + public void close() throws IOException { + wrapped.close(); + } + } + } +} + diff --git a/driver-core/src/main/com/mongodb/internal/connection/TopologyVersionHelper.java b/driver-core/src/main/com/mongodb/internal/connection/TopologyVersionHelper.java new file mode 100644 index 00000000000..93845490ab6 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/connection/TopologyVersionHelper.java @@ -0,0 +1,105 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.internal.connection; + +import com.mongodb.MongoCommandException; +import com.mongodb.connection.ServerDescription; +import com.mongodb.connection.TopologyVersion; +import com.mongodb.lang.Nullable; +import org.bson.BsonDocument; + +import java.util.Comparator; +import java.util.Optional; + +final class TopologyVersionHelper { + /** + * Defines a strict (irreflexive) + * partial order (vacuously antisymmetric, transitive) + * on a set of all {@link TopologyVersion}s, including {@code null}. + * Since this binary relation + * is not a total order, + * it cannot be expressed as a {@link Comparator}. + *

+ * There are two reasons we need this strict comparison in addition to the + * {@linkplain #newerOrEqual(TopologyVersion, TopologyVersion) non-strict one}: + *

    + *
  • A candidate {@link ServerDescription} has information besides + * {@linkplain ServerDescription#getTopologyVersion() topology version}, and that information must be applied by the client even if + * the topology version has not changed.
  • + *
  • The client may {@linkplain ConnectionPool#invalidate() pause} a {@link ConnectionPool} + * and then {@linkplain ConnectionPool#ready() mark it ready} based on receiving a new {@link ServerDescription} + * from a {@link ServerMonitor}, without the server for that pool changing its topology version. + * Consequently, a candidate {@link ServerDescription} cannot be rejected solely based on the fact that its + * {@linkplain ServerDescription#getTopologyVersion() topology version} is equal to the one that the client considers current.
  • + *
+ * + * @return {@code true} if and only if {@code current} is considered newer than {@code candidate}. + * @see #newerOrEqual(TopologyVersion, TopologyVersion) + */ + static boolean newer(@Nullable final TopologyVersion current, @Nullable final TopologyVersion candidate) { + return compare(current, candidate) > 0; + } + + /** + * Defines a quasi-reflexive, + * antisymmetric, transitive binary relation + * (neither a partial order + * nor a strict + * partial order + * because it is neither reflexive nor irreflexive) + * on a set of all {@link TopologyVersion}s, including {@code null}. + * Since this binary relation is not a total order, + * it cannot be expressed as a {@link Comparator}. + * + * @return {@code true} if and only if {@code current} is considered newer than or equal to {@code candidate}. + * @see #newer(TopologyVersion, TopologyVersion) + * @see #topologyVersion(Throwable) + */ + static boolean newerOrEqual(@Nullable final TopologyVersion current, @Nullable final TopologyVersion candidate) { + return compare(current, candidate) >= 0; + } + + /** + * @return A {@link TopologyVersion} that must be applied by the client unless it has already learned + * {@linkplain #newerOrEqual(TopologyVersion, TopologyVersion) it or a newer one}. + */ + static Optional topologyVersion(@Nullable final Throwable t) { + TopologyVersion result = null; + if (t instanceof MongoCommandException) { + BsonDocument rawTopologyVersion = ((MongoCommandException) t).getResponse() + .getDocument("topologyVersion", null); + if (rawTopologyVersion != null) { + result = new TopologyVersion(rawTopologyVersion); + } + } + return Optional.ofNullable(result); + } + + private static int compare(@Nullable final TopologyVersion o1, @Nullable final TopologyVersion o2) { + if (o1 == null || o2 == null) { + return -1; + } + if (o1.getProcessId().equals(o2.getProcessId())) { + return Long.compare(o1.getCounter(), o2.getCounter()); + } else { + return -1; + } + } + + private TopologyVersionHelper() { + throw new AssertionError(); + } +} diff --git a/driver-core/src/main/com/mongodb/internal/connection/UnixSocketChannelStream.java b/driver-core/src/main/com/mongodb/internal/connection/UnixSocketChannelStream.java new file mode 100644 index 00000000000..de74b6c8d0f --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/connection/UnixSocketChannelStream.java @@ -0,0 +1,45 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection; + +import com.mongodb.UnixServerAddress; +import com.mongodb.connection.SocketSettings; +import com.mongodb.connection.SslSettings; +import jnr.unixsocket.UnixSocketAddress; +import jnr.unixsocket.UnixSocketChannel; + +import javax.net.SocketFactory; +import java.io.IOException; +import java.net.Socket; + +/** + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public class UnixSocketChannelStream extends SocketStream { + private final UnixServerAddress address; + + public UnixSocketChannelStream(final UnixServerAddress address, final SocketSettings settings, final SslSettings sslSettings, + final BufferProvider bufferProvider) { + super(address, new DefaultInetAddressResolver(), settings, sslSettings, SocketFactory.getDefault(), bufferProvider); + this.address = address; + } + + @Override + protected Socket initializeSocket(final OperationContext operationContext) throws IOException { + return UnixSocketChannel.open(new UnixSocketAddress(address.getHost())).socket(); + } +} diff --git a/driver-core/src/main/com/mongodb/internal/connection/UsageTrackingInternalConnection.java b/driver-core/src/main/com/mongodb/internal/connection/UsageTrackingInternalConnection.java new file mode 100644 index 00000000000..d0ec8a6ea51 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/connection/UsageTrackingInternalConnection.java @@ -0,0 +1,222 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection; + +import com.mongodb.connection.ConnectionDescription; +import com.mongodb.connection.ServerDescription; +import com.mongodb.event.ConnectionCreatedEvent; +import com.mongodb.internal.async.SingleResultCallback; +import com.mongodb.internal.diagnostics.logging.Logger; +import com.mongodb.internal.diagnostics.logging.Loggers; +import org.bson.ByteBuf; +import org.bson.codecs.Decoder; + +import java.util.List; + +import static com.mongodb.assertions.Assertions.assertNotNull; +import static com.mongodb.internal.async.ErrorHandlingResultCallback.errorHandlingCallback; + +/** + * A connection that tracks when it was opened and when it was last used. + */ +class UsageTrackingInternalConnection implements InternalConnection { + private static final Logger LOGGER = Loggers.getLogger("connection"); + private volatile long openedAt; + private volatile long lastUsedAt; + private volatile boolean closeSilently; + private final InternalConnection wrapped; + private final DefaultConnectionPool.ServiceStateManager serviceStateManager; + + UsageTrackingInternalConnection(final InternalConnection wrapped, final DefaultConnectionPool.ServiceStateManager serviceStateManager) { + this.wrapped = wrapped; + this.serviceStateManager = serviceStateManager; + openedAt = Long.MAX_VALUE; + lastUsedAt = openedAt; + } + + @Override + public void open(final OperationContext operationContext) { + wrapped.open(operationContext); + openedAt = System.currentTimeMillis(); + lastUsedAt = openedAt; + if (getDescription().getServiceId() != null) { + serviceStateManager.addConnection(assertNotNull(getDescription().getServiceId())); + } + } + + @Override + public void openAsync(final OperationContext operationContext, final SingleResultCallback callback) { + wrapped.openAsync(operationContext, (result, t) -> { + if (t != null) { + callback.onResult(null, t); + } else { + openedAt = System.currentTimeMillis(); + lastUsedAt = openedAt; + if (getDescription().getServiceId() != null) { + serviceStateManager.addConnection(getDescription().getServiceId()); + } + callback.onResult(null, null); + } + }); + } + + @Override + public void close() { + try { + wrapped.close(); + } finally { + if (openedAt != Long.MAX_VALUE && getDescription().getServiceId() != null) { + serviceStateManager.removeConnection(assertNotNull(getDescription().getServiceId())); + } + } + } + + @Override + public boolean opened() { + return wrapped.opened(); + } + + @Override + public boolean isClosed() { + return wrapped.isClosed(); + } + + @Override + public ByteBuf getBuffer(final int size) { + return wrapped.getBuffer(size); + } + + @Override + public void sendMessage(final List byteBuffers, final int lastRequestId, final OperationContext operationContext) { + wrapped.sendMessage(byteBuffers, lastRequestId, operationContext); + lastUsedAt = System.currentTimeMillis(); + } + + @Override + public T sendAndReceive(final CommandMessage message, final Decoder decoder, final OperationContext operationContext) { + T result = wrapped.sendAndReceive(message, decoder, operationContext); + lastUsedAt = System.currentTimeMillis(); + return result; + } + + @Override + public void send(final CommandMessage message, final Decoder decoder, final OperationContext operationContext) { + wrapped.send(message, decoder, operationContext); + lastUsedAt = System.currentTimeMillis(); + } + + @Override + public T receive(final Decoder decoder, final OperationContext operationContext) { + T result = wrapped.receive(decoder, operationContext); + lastUsedAt = System.currentTimeMillis(); + return result; + } + + @Override + public boolean hasMoreToCome() { + return wrapped.hasMoreToCome(); + } + + @Override + public void sendAndReceiveAsync(final CommandMessage message, final Decoder decoder, final OperationContext operationContext, + final SingleResultCallback callback) { + SingleResultCallback errHandlingCallback = errorHandlingCallback((result, t) -> { + lastUsedAt = System.currentTimeMillis(); + callback.onResult(result, t); + }, LOGGER); + wrapped.sendAndReceiveAsync(message, decoder, operationContext, errHandlingCallback); + } + + @Override + public ResponseBuffers receiveMessage(final int responseTo, final OperationContext operationContext) { + ResponseBuffers responseBuffers = wrapped.receiveMessage(responseTo, operationContext); + lastUsedAt = System.currentTimeMillis(); + return responseBuffers; + } + + @Override + public void sendMessageAsync(final List byteBuffers, final int lastRequestId, final OperationContext operationContext, + final SingleResultCallback callback) { + SingleResultCallback errHandlingCallback = errorHandlingCallback((result, t) -> { + lastUsedAt = System.currentTimeMillis(); + callback.onResult(result, t); + }, LOGGER); + wrapped.sendMessageAsync(byteBuffers, lastRequestId, operationContext, errHandlingCallback); + } + + @Override + public void receiveMessageAsync(final int responseTo, final OperationContext operationContext, + final SingleResultCallback callback) { + SingleResultCallback errHandlingCallback = errorHandlingCallback((result, t) -> { + lastUsedAt = System.currentTimeMillis(); + callback.onResult(result, t); + }, LOGGER); + wrapped.receiveMessageAsync(responseTo, operationContext, errHandlingCallback); + } + + @Override + public ConnectionDescription getDescription() { + return wrapped.getDescription(); + } + + @Override + public ServerDescription getInitialServerDescription() { + return wrapped.getInitialServerDescription(); + } + + @Override + public int getGeneration() { + return wrapped.getGeneration(); + } + + /** + * Returns the time at which this connection was opened, or {@code Long.MAX_VALUE} if it has not yet been opened. + * + * @return the time when this connection was opened, in milliseconds since the epoch. + */ + long getOpenedAt() { + return openedAt; + } + + /** + * Returns the time at which this connection was last used, or {@code Long.MAX_VALUE} if it has not yet been used. + * + * @return the time when this connection was last used, in milliseconds since the epoch. + */ + long getLastUsedAt() { + return lastUsedAt; + } + + /** + * This method must be used if and only if {@link ConnectionCreatedEvent} was not sent for the connection. + * Must not throw {@link Exception}s. + * + * @see #isCloseSilently() + */ + void setCloseSilently() { + closeSilently = true; + } + + /** + * Must not throw {@link Exception}s. + * + * @see #setCloseSilently() + */ + boolean isCloseSilently() { + return closeSilently; + } +} diff --git a/driver-core/src/main/com/mongodb/internal/connection/X509Authenticator.java b/driver-core/src/main/com/mongodb/internal/connection/X509Authenticator.java new file mode 100644 index 00000000000..b5e2dd0512d --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/connection/X509Authenticator.java @@ -0,0 +1,117 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection; + +import com.mongodb.AuthenticationMechanism; +import com.mongodb.MongoCommandException; +import com.mongodb.MongoSecurityException; +import com.mongodb.ServerApi; +import com.mongodb.connection.ClusterConnectionMode; +import com.mongodb.connection.ConnectionDescription; +import com.mongodb.internal.async.SingleResultCallback; +import com.mongodb.internal.diagnostics.logging.Logger; +import com.mongodb.internal.diagnostics.logging.Loggers; +import com.mongodb.lang.Nullable; +import org.bson.BsonDocument; +import org.bson.BsonInt32; +import org.bson.BsonString; + +import static com.mongodb.internal.async.ErrorHandlingResultCallback.errorHandlingCallback; +import static com.mongodb.internal.connection.CommandHelper.executeCommand; +import static com.mongodb.internal.connection.CommandHelper.executeCommandAsync; + +class X509Authenticator extends Authenticator implements SpeculativeAuthenticator { + public static final Logger LOGGER = Loggers.getLogger("authenticator"); + private BsonDocument speculativeAuthenticateResponse; + + X509Authenticator(final MongoCredentialWithCache credential, final ClusterConnectionMode clusterConnectionMode, + @Nullable final ServerApi serverApi) { + super(credential, clusterConnectionMode, serverApi); + } + + @Override + void authenticate(final InternalConnection connection, final ConnectionDescription connectionDescription, + final OperationContext operationContext) { + if (this.speculativeAuthenticateResponse != null) { + return; + } + try { + BsonDocument authCommand = getAuthCommand(getMongoCredential().getUserName()); + executeCommand(getMongoCredential().getSource(), authCommand, getClusterConnectionMode(), getServerApi(), connection, operationContext); + } catch (MongoCommandException e) { + throw new MongoSecurityException(getMongoCredential(), "Exception authenticating", e); + } + } + + @Override + void authenticateAsync(final InternalConnection connection, final ConnectionDescription connectionDescription, + final OperationContext operationContext, final SingleResultCallback callback) { + if (speculativeAuthenticateResponse != null) { + callback.onResult(null, null); + } else { + SingleResultCallback errHandlingCallback = errorHandlingCallback(callback, LOGGER); + try { + executeCommandAsync(getMongoCredential().getSource(), getAuthCommand(getMongoCredential().getUserName()), + getClusterConnectionMode(), getServerApi(), connection, operationContext, + (nonceResult, t) -> { + if (t != null) { + errHandlingCallback.onResult(null, translateThrowable(t)); + } else { + errHandlingCallback.onResult(null, null); + } + }); + } catch (Throwable t) { + errHandlingCallback.onResult(null, t); + } + } + } + + @Override + public BsonDocument createSpeculativeAuthenticateCommand(final InternalConnection connection) { + return getAuthCommand(getMongoCredential().getUserName()).append("db", new BsonString("$external")); + } + + @Override + public void setSpeculativeAuthenticateResponse(final BsonDocument response) { + this.speculativeAuthenticateResponse = response; + } + + @Override + public BsonDocument getSpeculativeAuthenticateResponse() { + return speculativeAuthenticateResponse; + } + + private BsonDocument getAuthCommand(@Nullable final String userName) { + BsonDocument cmd = new BsonDocument(); + + cmd.put("authenticate", new BsonInt32(1)); + if (userName != null) { + cmd.put("user", new BsonString(userName)); + } + cmd.put("mechanism", new BsonString(AuthenticationMechanism.MONGODB_X509.getMechanismName())); + + return cmd; + } + + private Throwable translateThrowable(final Throwable t) { + if (t instanceof MongoCommandException) { + return new MongoSecurityException(getMongoCredential(), "Exception authenticating", t); + } else { + return t; + } + } +} diff --git a/driver-core/src/main/com/mongodb/internal/connection/ZlibCompressor.java b/driver-core/src/main/com/mongodb/internal/connection/ZlibCompressor.java new file mode 100644 index 00000000000..e826b626a79 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/connection/ZlibCompressor.java @@ -0,0 +1,63 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection; + +import com.mongodb.MongoCompressor; + +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.util.zip.Deflater; +import java.util.zip.DeflaterOutputStream; +import java.util.zip.InflaterInputStream; + +class ZlibCompressor extends Compressor { + private final int level; + + ZlibCompressor(final MongoCompressor mongoCompressor) { + this.level = mongoCompressor.getPropertyNonNull(MongoCompressor.LEVEL, Deflater.DEFAULT_COMPRESSION); + } + + @Override + public String getName() { + return "zlib"; + } + + @Override + public byte getId() { + return 2; + } + + @Override + InputStream getInputStream(final InputStream source) { + return new InflaterInputStream(source); + } + + @Override + OutputStream getOutputStream(final OutputStream source) { + return new DeflaterOutputStream(source, new Deflater(level)) { + @Override + public void close() throws IOException { + try { + super.close(); + } finally { + def.end(); + } + } + }; + } +} diff --git a/driver-core/src/main/com/mongodb/internal/connection/ZstdCompressor.java b/driver-core/src/main/com/mongodb/internal/connection/ZstdCompressor.java new file mode 100644 index 00000000000..10f99d05b0f --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/connection/ZstdCompressor.java @@ -0,0 +1,77 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection; + +import com.github.luben.zstd.Zstd; +import com.github.luben.zstd.ZstdInputStream; +import com.mongodb.MongoInternalException; +import org.bson.ByteBuf; +import org.bson.io.BsonOutput; + +import java.io.IOException; +import java.io.InputStream; +import java.util.List; + +class ZstdCompressor extends Compressor { + @Override + public String getName() { + return "zstd"; + } + + @Override + public byte getId() { + return 3; + } + + @Override + public void compress(final List source, final BsonOutput target) { + int uncompressedSize = getUncompressedSize(source); + + byte[] singleByteArraySource = new byte[uncompressedSize]; + copy(source, singleByteArraySource); + + try { + byte[] out = new byte[(int) Zstd.compressBound(uncompressedSize)]; + int compressedSize = (int) Zstd.compress(out, singleByteArraySource, Zstd.defaultCompressionLevel()); + target.writeBytes(out, 0, compressedSize); + } catch (Exception e) { + throw new MongoInternalException("Unexpected exception", e); + } + } + + private int getUncompressedSize(final List source) { + int uncompressedSize = 0; + for (ByteBuf cur : source) { + uncompressedSize += cur.remaining(); + } + return uncompressedSize; + } + + private void copy(final List source, final byte[] in) { + int offset = 0; + for (ByteBuf cur : source) { + int remaining = cur.remaining(); + cur.get(in, offset, remaining); + offset += remaining; + } + } + + @Override + InputStream getInputStream(final InputStream source) throws IOException { + return new ZstdInputStream(source); + } +} diff --git a/driver-core/src/main/com/mongodb/internal/connection/netty/NettyByteBuf.java b/driver-core/src/main/com/mongodb/internal/connection/netty/NettyByteBuf.java new file mode 100644 index 00000000000..c81cc87dee6 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/connection/netty/NettyByteBuf.java @@ -0,0 +1,287 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection.netty; + +import org.bson.ByteBuf; + +import java.nio.ByteBuffer; +import java.nio.ByteOrder; + +/** + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public final class NettyByteBuf implements ByteBuf { + + private io.netty.buffer.ByteBuf proxied; + private boolean isWriting = true; + + /** + * @param proxied This constructor stores a reference to {@code proxied} in the heap memory + * but does not {@linkplain io.netty.buffer.ByteBuf#retain() retain} {@code proxied}. + * A caller may have to do that depending on the + * reference counting approach he uses. + */ + @SuppressWarnings("deprecation") + public NettyByteBuf(final io.netty.buffer.ByteBuf proxied) { + this.proxied = proxied.order(ByteOrder.LITTLE_ENDIAN); + } + + /** + * @param proxied See {@link #NettyByteBuf(io.netty.buffer.ByteBuf)}. + */ + private NettyByteBuf(final io.netty.buffer.ByteBuf proxied, final boolean isWriting) { + this(proxied); + this.isWriting = isWriting; + } + + public io.netty.buffer.ByteBuf asByteBuf() { + return proxied; + } + + @Override + public int capacity() { + return proxied.capacity(); + } + + @Override + public ByteBuf put(final int index, final byte b) { + proxied.setByte(index, b); + return this; + } + + @Override + public int remaining() { + if (isWriting) { + return proxied.writableBytes(); + } else { + return proxied.readableBytes(); + } + } + + @Override + public ByteBuf put(final byte[] src, final int offset, final int length) { + proxied.writeBytes(src, offset, length); + return this; + } + + @Override + public boolean hasRemaining() { + return remaining() > 0; + } + + @Override + public ByteBuf put(final byte b) { + proxied.writeByte(b); + return this; + } + + @Override + public ByteBuf putInt(final int b) { + proxied.writeInt(b); + return this; + } + + @Override + public ByteBuf putInt(final int index, final int b) { + proxied.setInt(index, b); + return this; + } + + @Override + public ByteBuf putDouble(final double b) { + proxied.writeDouble(b); + return this; + } + + @Override + public ByteBuf putLong(final long b) { + proxied.writeLong(b); + return this; + } + + @Override + public ByteBuf flip() { + isWriting = !isWriting; + return this; + } + + @Override + public byte[] array() { + return proxied.array(); + } + + @Override + public boolean isBackedByArray() { + return proxied.hasArray(); + } + + @Override + public int arrayOffset() { + return proxied.arrayOffset(); + } + + @Override + public int limit() { + if (isWriting) { + return proxied.writerIndex() + remaining(); + } else { + return proxied.readerIndex() + remaining(); + } + } + + @Override + public ByteBuf position(final int newPosition) { + if (isWriting) { + proxied.writerIndex(newPosition); + } else { + proxied.readerIndex(newPosition); + } + return this; + } + + @Override + public ByteBuf clear() { + proxied.clear(); + return this; + } + + @Override + @SuppressWarnings("deprecation") + public ByteBuf order(final ByteOrder byteOrder) { + proxied = proxied.order(byteOrder); + return this; + } + + @Override + public byte get() { + return proxied.readByte(); + } + + @Override + public byte get(final int index) { + return proxied.getByte(index); + } + + @Override + public ByteBuf get(final byte[] bytes) { + proxied.readBytes(bytes); + return this; + } + + @Override + public ByteBuf get(final int index, final byte[] bytes) { + proxied.getBytes(index, bytes); + return this; + } + + @Override + public ByteBuf get(final byte[] bytes, final int offset, final int length) { + proxied.readBytes(bytes, offset, length); + return this; + } + + @Override + public ByteBuf get(final int index, final byte[] bytes, final int offset, final int length) { + proxied.getBytes(index, bytes, offset, length); + return this; + } + + @Override + public long getLong() { + return proxied.readLong(); + } + + @Override + public long getLong(final int index) { + return proxied.getLong(index); + } + + @Override + public double getDouble() { + return proxied.readDouble(); + } + + @Override + public double getDouble(final int index) { + return proxied.getDouble(index); + } + + @Override + public int getInt() { + return proxied.readInt(); + } + + @Override + public int getInt(final int index) { + return proxied.getInt(index); + } + + @Override + public int position() { + if (isWriting) { + return proxied.writerIndex(); + } else { + return proxied.readerIndex(); + } + } + + @Override + public ByteBuf limit(final int newLimit) { + if (isWriting) { + throw new UnsupportedOperationException("Can not set the limit while writing"); + } else { + proxied.writerIndex(newLimit); + } + return this; + } + + @Override + public ByteBuf asReadOnly() { + return this; // TODO: do we need this method really? Netty ByteBuf does not have this concept + } + + @Override + public ByteBuf duplicate() { + return new NettyByteBuf(proxied.retainedDuplicate(), isWriting); + } + + @Override + public ByteBuffer asNIO() { + if (isWriting) { + return proxied.nioBuffer(proxied.writerIndex(), proxied.writableBytes()); + } else { + return proxied.nioBuffer(proxied.readerIndex(), proxied.readableBytes()); + } + + } + + @Override + public int getReferenceCount() { + return proxied.refCnt(); + } + + @Override + public ByteBuf retain() { + proxied.retain(); + return this; + } + + @Override + public void release() { + proxied.release(); + } +} diff --git a/driver-core/src/main/com/mongodb/internal/connection/netty/NettyStream.java b/driver-core/src/main/com/mongodb/internal/connection/netty/NettyStream.java new file mode 100644 index 00000000000..76e10653454 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/connection/netty/NettyStream.java @@ -0,0 +1,588 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection.netty; + +import com.mongodb.MongoClientException; +import com.mongodb.MongoException; +import com.mongodb.MongoInternalException; +import com.mongodb.MongoSocketException; +import com.mongodb.MongoSocketOpenException; +import com.mongodb.MongoSocketReadTimeoutException; +import com.mongodb.ServerAddress; +import com.mongodb.annotations.ThreadSafe; +import com.mongodb.connection.AsyncCompletionHandler; +import com.mongodb.connection.SocketSettings; +import com.mongodb.connection.SslSettings; +import com.mongodb.internal.connection.OperationContext; +import com.mongodb.internal.connection.Stream; +import com.mongodb.lang.Nullable; +import com.mongodb.spi.dns.InetAddressResolver; +import io.netty.bootstrap.Bootstrap; +import io.netty.buffer.ByteBufAllocator; +import io.netty.buffer.CompositeByteBuf; +import io.netty.buffer.PooledByteBufAllocator; +import io.netty.channel.Channel; +import io.netty.channel.ChannelFuture; +import io.netty.channel.ChannelFutureListener; +import io.netty.channel.ChannelHandlerContext; +import io.netty.channel.ChannelInboundHandlerAdapter; +import io.netty.channel.ChannelInitializer; +import io.netty.channel.ChannelOption; +import io.netty.channel.ChannelPipeline; +import io.netty.channel.EventLoopGroup; +import io.netty.channel.SimpleChannelInboundHandler; +import io.netty.channel.socket.SocketChannel; +import io.netty.handler.ssl.SslContext; +import io.netty.handler.ssl.SslHandler; +import io.netty.handler.timeout.ReadTimeoutException; +import io.netty.handler.timeout.WriteTimeoutHandler; +import org.bson.ByteBuf; + +import javax.net.ssl.SSLContext; +import javax.net.ssl.SSLEngine; +import javax.net.ssl.SSLParameters; +import java.io.IOException; +import java.net.SocketAddress; +import java.security.NoSuchAlgorithmException; +import java.util.Iterator; +import java.util.LinkedList; +import java.util.List; +import java.util.Optional; +import java.util.Queue; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.Future; +import java.util.concurrent.ScheduledFuture; +import java.util.concurrent.locks.Lock; +import java.util.concurrent.locks.ReentrantLock; + +import static com.mongodb.assertions.Assertions.assertNotNull; +import static com.mongodb.internal.Locks.withLock; +import static com.mongodb.internal.connection.ServerAddressHelper.getSocketAddresses; +import static com.mongodb.internal.connection.SslHelper.enableHostNameVerification; +import static com.mongodb.internal.connection.SslHelper.enableSni; +import static com.mongodb.internal.thread.InterruptionUtil.interruptAndCreateMongoInterruptedException; +import static java.util.Optional.ofNullable; +import static java.util.concurrent.TimeUnit.MILLISECONDS; + +/** + * A Stream implementation based on Netty 4.0. + * Just like it is for the {@link java.nio.channels.AsynchronousSocketChannel}, + * concurrent pending1 readers + * (whether {@linkplain #read(int, OperationContext) synchronous} or + * {@linkplain #readAsync(int, OperationContext, AsyncCompletionHandler) asynchronous}) + * are not supported by {@link NettyStream}. + * However, this class does not have a fail-fast mechanism checking for such situations. + *
+ * 1We cannot simply say that read methods are not allowed be run concurrently because strictly speaking they are allowed, + * as explained below. + *
{@code
+ * NettyStream stream = ...;
+ * stream.readAsync(1, new AsyncCompletionHandler() {//inv1
+ *  @Override
+ *  public void completed(ByteBuf o) {
+ *      stream.readAsync(//inv2
+ *              1, ...);//ret2
+ *  }
+ *
+ *  @Override
+ *  public void failed(Throwable t) {
+ *  }
+ * });//ret1
+ * }
+ * Arrows on the diagram below represent happens-before relations. + *
{@code
+ * int1 -> inv2 -> ret2
+ *      \--------> ret1
+ * }
+ * As shown on the diagram, the method {@link #readAsync(int, OperationContext, AsyncCompletionHandler)} runs concurrently with + * itself in the example above. However, there are no concurrent pending readers because the second operation + * is invoked after the first operation has completed reading despite the method has not returned yet. + */ +final class NettyStream implements Stream { + private static final byte NO_SCHEDULE_TIME = 0; + private final ServerAddress address; + private final InetAddressResolver inetAddressResolver; + private final SocketSettings settings; + private final SslSettings sslSettings; + private final EventLoopGroup workerGroup; + private final Class socketChannelClass; + private final ByteBufAllocator allocator; + @Nullable + private final SslContext sslContext; + + private boolean isClosed; + private volatile Channel channel; + + private final LinkedList pendingInboundBuffers = new LinkedList<>(); + private final Lock lock = new ReentrantLock(); + // access to the fields `pendingReader`, `pendingException` is guarded by `lock` + private PendingReader pendingReader; + private Throwable pendingException; + /* The fields readTimeoutTask, readTimeoutMillis are each written only in the ChannelInitializer.initChannel method + * (in addition to the write of the default value and the write by variable initializers), + * and read only when NettyStream users read data, or Netty event loop handles incoming data. + * Since actions done by the ChannelInitializer.initChannel method + * are ordered (in the happens-before order) before user read actions and before event loop actions that handle incoming data, + * these fields can be plain.*/ + @Nullable + private ReadTimeoutTask readTimeoutTask; + + NettyStream(final ServerAddress address, final InetAddressResolver inetAddressResolver, final SocketSettings settings, + final SslSettings sslSettings, final EventLoopGroup workerGroup, + final Class socketChannelClass, final ByteBufAllocator allocator, + @Nullable final SslContext sslContext) { + this.address = address; + this.inetAddressResolver = inetAddressResolver; + this.settings = settings; + this.sslSettings = sslSettings; + this.workerGroup = workerGroup; + this.socketChannelClass = socketChannelClass; + this.allocator = allocator; + this.sslContext = sslContext; + } + + @Override + public ByteBuf getBuffer(final int size) { + return new NettyByteBuf(allocator.buffer(size, size)); + } + + @Override + public void open(final OperationContext operationContext) throws IOException { + FutureAsyncCompletionHandler handler = new FutureAsyncCompletionHandler<>(); + openAsync(operationContext, handler); + handler.get(); + } + + @Override + public void openAsync(final OperationContext operationContext, final AsyncCompletionHandler handler) { + Queue socketAddressQueue; + + try { + socketAddressQueue = new LinkedList<>(getSocketAddresses(address, inetAddressResolver)); + } catch (Throwable t) { + handler.failed(t); + return; + } + + initializeChannel(operationContext, handler, socketAddressQueue); + } + + private void initializeChannel(final OperationContext operationContext, final AsyncCompletionHandler handler, + final Queue socketAddressQueue) { + if (socketAddressQueue.isEmpty()) { + handler.failed(new MongoSocketException("Exception opening socket", getAddress())); + } else { + SocketAddress nextAddress = socketAddressQueue.poll(); + + Bootstrap bootstrap = new Bootstrap(); + bootstrap.group(workerGroup); + bootstrap.channel(socketChannelClass); + bootstrap.option(ChannelOption.CONNECT_TIMEOUT_MILLIS, + operationContext.getTimeoutContext().getConnectTimeoutMs()); + bootstrap.option(ChannelOption.TCP_NODELAY, true); + bootstrap.option(ChannelOption.SO_KEEPALIVE, true); + + if (settings.getReceiveBufferSize() > 0) { + bootstrap.option(ChannelOption.SO_RCVBUF, settings.getReceiveBufferSize()); + } + if (settings.getSendBufferSize() > 0) { + bootstrap.option(ChannelOption.SO_SNDBUF, settings.getSendBufferSize()); + } + bootstrap.option(ChannelOption.ALLOCATOR, allocator); + + bootstrap.handler(new ChannelInitializer() { + @Override + public void initChannel(final SocketChannel ch) { + ChannelPipeline pipeline = ch.pipeline(); + if (sslSettings.isEnabled()) { + addSslHandler(ch); + } + + /* We need at least one handler before (in the inbound evaluation order) the InboundBufferHandler, + * so that we can fire exception events (they are inbound events) using its context and the InboundBufferHandler + * receives them. SslHandler is not always present, so adding a NOOP handler.*/ + pipeline.addLast("ChannelInboundHandlerAdapter", new ChannelInboundHandlerAdapter()); + readTimeoutTask = new ReadTimeoutTask(pipeline.lastContext()); + pipeline.addLast("InboundBufferHandler", new InboundBufferHandler()); + } + }); + ChannelFuture channelFuture = bootstrap.connect(nextAddress); + channelFuture.addListener(new OpenChannelFutureListener(operationContext, socketAddressQueue, channelFuture, handler)); + } + } + + @Override + public void write(final List buffers, final OperationContext operationContext) throws IOException { + FutureAsyncCompletionHandler future = new FutureAsyncCompletionHandler<>(); + writeAsync(buffers, operationContext, future); + future.get(); + } + + @Override + public ByteBuf read(final int numBytes, final OperationContext operationContext) throws IOException { + FutureAsyncCompletionHandler future = new FutureAsyncCompletionHandler<>(); + readAsync(numBytes, future, operationContext.getTimeoutContext().getReadTimeoutMS()); + return future.get(); + } + + @Override + public void writeAsync(final List buffers, final OperationContext operationContext, + final AsyncCompletionHandler handler) { + CompositeByteBuf composite = PooledByteBufAllocator.DEFAULT.compositeBuffer(); + for (ByteBuf cur : buffers) { + // The Netty framework releases `CompositeByteBuf` after writing + // (see https://netty.io/wiki/reference-counted-objects.html#outbound-messages), + // which results in the buffer we pass to `CompositeByteBuf.addComponent` being released. + // However, `CompositeByteBuf.addComponent` does not retain this buffer, + // which means we must retain it to conform to the `Stream.writeAsync` contract. + composite.addComponent(true, ((NettyByteBuf) cur).asByteBuf().retain()); + } + + long writeTimeoutMS = operationContext.getTimeoutContext().getWriteTimeoutMS(); + final Optional writeTimeoutHandler = addWriteTimeoutHandler(writeTimeoutMS); + channel.writeAndFlush(composite).addListener((ChannelFutureListener) future -> { + writeTimeoutHandler.map(w -> channel.pipeline().remove(w)); + if (!future.isSuccess()) { + handler.failed(future.cause()); + } else { + handler.completed(null); + } + }); + } + + private Optional addWriteTimeoutHandler(final long writeTimeoutMS) { + if (writeTimeoutMS != NO_SCHEDULE_TIME) { + WriteTimeoutHandler writeTimeoutHandler = new WriteTimeoutHandler(writeTimeoutMS, MILLISECONDS); + channel.pipeline().addBefore("ChannelInboundHandlerAdapter", "WriteTimeoutHandler", writeTimeoutHandler); + return Optional.of(writeTimeoutHandler); + } + return Optional.empty(); + } + + @Override + public void readAsync(final int numBytes, final OperationContext operationContext, final AsyncCompletionHandler handler) { + readAsync(numBytes, handler, operationContext.getTimeoutContext().getReadTimeoutMS()); + } + + /** + * @param numBytes Must be equal to {@link #pendingReader}{@code .numBytes} when called by a Netty channel handler. + * @param handler Must be equal to {@link #pendingReader}{@code .handler} when called by a Netty channel handler. + * @param readTimeoutMillis Must be equal to {@link #NO_SCHEDULE_TIME} when called by a Netty channel handler. + * Timeouts may be scheduled only by the public read methods. Taking into account that concurrent pending + * readers are not allowed, there must not be a situation when threads attempt to schedule a timeout + * before the previous one is either cancelled or completed. + */ + private void readAsync(final int numBytes, final AsyncCompletionHandler handler, final long readTimeoutMillis) { + ByteBuf buffer = null; + Throwable exceptionResult = null; + lock.lock(); + try { + exceptionResult = pendingException; + if (exceptionResult == null) { + if (!hasBytesAvailable(numBytes)) { + if (pendingReader == null) {//called by a public read method + pendingReader = new PendingReader(numBytes, handler, scheduleReadTimeout(readTimeoutTask, readTimeoutMillis)); + } + } else { + CompositeByteBuf composite = allocator.compositeBuffer(pendingInboundBuffers.size()); + int bytesNeeded = numBytes; + for (Iterator iter = pendingInboundBuffers.iterator(); iter.hasNext();) { + io.netty.buffer.ByteBuf next = iter.next(); + int bytesNeededFromCurrentBuffer = Math.min(next.readableBytes(), bytesNeeded); + if (bytesNeededFromCurrentBuffer == next.readableBytes()) { + composite.addComponent(next); + iter.remove(); + } else { + composite.addComponent(next.readRetainedSlice(bytesNeededFromCurrentBuffer)); + } + composite.writerIndex(composite.writerIndex() + bytesNeededFromCurrentBuffer); + bytesNeeded -= bytesNeededFromCurrentBuffer; + if (bytesNeeded == 0) { + break; + } + } + buffer = new NettyByteBuf(composite).flip(); + } + } + if (!(exceptionResult == null && buffer == null)//the read operation has completed + && pendingReader != null) {//we need to clear the pending reader + cancel(pendingReader.timeout); + this.pendingReader = null; + } + } finally { + lock.unlock(); + } + if (exceptionResult != null) { + handler.failed(exceptionResult); + } + if (buffer != null) { + handler.completed(buffer); + } + } + + private boolean hasBytesAvailable(final int numBytes) { + int bytesAvailable = 0; + for (io.netty.buffer.ByteBuf cur : pendingInboundBuffers) { + bytesAvailable += cur.readableBytes(); + if (bytesAvailable >= numBytes) { + return true; + } + } + return false; + } + + private void handleReadResponse(@Nullable final io.netty.buffer.ByteBuf buffer, @Nullable final Throwable t) { + PendingReader localPendingReader = withLock(lock, () -> { + if (buffer != null) { + if (isClosed) { + pendingException = new MongoSocketException("Received data after the stream was closed.", address); + } else { + pendingInboundBuffers.add(buffer.retain()); + } + } else { + pendingException = t; + } + return pendingReader; + }); + + if (localPendingReader != null) { + //timeouts may be scheduled only by the public read methods + readAsync(localPendingReader.numBytes, localPendingReader.handler, NO_SCHEDULE_TIME); + } + } + + @Override + public ServerAddress getAddress() { + return address; + } + + @Override + public void close() { + withLock(lock, () -> { + isClosed = true; + if (channel != null) { + channel.close(); + channel = null; + } + for (Iterator iterator = pendingInboundBuffers.iterator(); iterator.hasNext();) { + io.netty.buffer.ByteBuf nextByteBuf = iterator.next(); + iterator.remove(); + // Drops all retains to prevent silent leaks; assumes callers have already released + // ByteBuffers returned by that NettyStream before calling close. + nextByteBuf.release(nextByteBuf.refCnt()); + } + }); + } + + @Override + public boolean isClosed() { + return isClosed; + } + + public SocketSettings getSettings() { + return settings; + } + + public SslSettings getSslSettings() { + return sslSettings; + } + + public EventLoopGroup getWorkerGroup() { + return workerGroup; + } + + public Class getSocketChannelClass() { + return socketChannelClass; + } + + public ByteBufAllocator getAllocator() { + return allocator; + } + + private void addSslHandler(final SocketChannel channel) { + SSLEngine engine; + if (sslContext == null) { + SSLContext sslContext; + try { + sslContext = ofNullable(sslSettings.getContext()).orElse(SSLContext.getDefault()); + } catch (NoSuchAlgorithmException e) { + throw new MongoClientException("Unable to create standard SSLContext", e); + } + engine = sslContext.createSSLEngine(address.getHost(), address.getPort()); + } else { + engine = sslContext.newEngine(channel.alloc(), address.getHost(), address.getPort()); + } + engine.setUseClientMode(true); + SSLParameters sslParameters = engine.getSSLParameters(); + enableSni(address.getHost(), sslParameters); + if (!sslSettings.isInvalidHostNameAllowed()) { + enableHostNameVerification(sslParameters); + } + engine.setSSLParameters(sslParameters); + channel.pipeline().addFirst("ssl", new SslHandler(engine, false)); + } + + private class InboundBufferHandler extends SimpleChannelInboundHandler { + @Override + protected void channelRead0(final ChannelHandlerContext ctx, final io.netty.buffer.ByteBuf buffer) { + handleReadResponse(buffer, null); + } + + @Override + public void exceptionCaught(final ChannelHandlerContext ctx, final Throwable t) { + if (t instanceof ReadTimeoutException) { + handleReadResponse(null, new MongoSocketReadTimeoutException("Timeout while receiving message", address, t)); + } else { + handleReadResponse(null, t); + } + ctx.close(); + } + } + + private static final class PendingReader { + private final int numBytes; + private final AsyncCompletionHandler handler; + @Nullable + private final ScheduledFuture timeout; + + private PendingReader( + final int numBytes, final AsyncCompletionHandler handler, @Nullable final ScheduledFuture timeout) { + this.numBytes = numBytes; + this.handler = handler; + this.timeout = timeout; + } + } + + private static final class FutureAsyncCompletionHandler implements AsyncCompletionHandler { + private final CountDownLatch latch = new CountDownLatch(1); + private volatile T t; + private volatile Throwable throwable; + + FutureAsyncCompletionHandler() { + } + + @Override + public void completed(@Nullable final T t) { + this.t = t; + latch.countDown(); + } + + @Override + public void failed(final Throwable t) { + this.throwable = t; + latch.countDown(); + } + + public T get() throws IOException { + try { + latch.await(); + if (throwable != null) { + if (throwable instanceof IOException) { + throw (IOException) throwable; + } else if (throwable instanceof MongoException) { + throw (MongoException) throwable; + } else { + throw new MongoInternalException("Exception thrown from Netty Stream", throwable); + } + } + return t; + } catch (InterruptedException e) { + throw interruptAndCreateMongoInterruptedException("Interrupted", e); + } + } + } + + private class OpenChannelFutureListener implements ChannelFutureListener { + private final Queue socketAddressQueue; + private final ChannelFuture channelFuture; + private final AsyncCompletionHandler handler; + private final OperationContext operationContext; + + OpenChannelFutureListener(final OperationContext operationContext, + final Queue socketAddressQueue, final ChannelFuture channelFuture, + final AsyncCompletionHandler handler) { + this.operationContext = operationContext; + this.socketAddressQueue = socketAddressQueue; + this.channelFuture = channelFuture; + this.handler = handler; + } + + @Override + public void operationComplete(final ChannelFuture future) { + withLock(lock, () -> { + if (future.isSuccess()) { + if (isClosed) { + channelFuture.channel().close(); + } else { + channel = channelFuture.channel(); + channel.closeFuture().addListener((ChannelFutureListener) future1 -> handleReadResponse(null, new IOException("The connection to the server was closed"))); + } + handler.completed(null); + } else { + if (isClosed) { + handler.completed(null); + } else if (socketAddressQueue.isEmpty()) { + handler.failed(new MongoSocketOpenException("Exception opening socket", getAddress(), future.cause())); + } else { + initializeChannel(operationContext, handler, socketAddressQueue); + } + } + }); + } + } + + private static void cancel(@Nullable final Future f) { + if (f != null) { + f.cancel(false); + } + } + + @Nullable + private static ScheduledFuture scheduleReadTimeout(@Nullable final ReadTimeoutTask readTimeoutTask, final long timeoutMillis) { + if (timeoutMillis == NO_SCHEDULE_TIME) { + return null; + } else { + return assertNotNull(readTimeoutTask).schedule(timeoutMillis); + } + } + + @ThreadSafe + private static final class ReadTimeoutTask implements Runnable { + private final ChannelHandlerContext ctx; + + private ReadTimeoutTask(final ChannelHandlerContext timeoutChannelHandlerContext) { + ctx = timeoutChannelHandlerContext; + } + + @Override + public void run() { + try { + if (ctx.channel().isOpen()) { + ctx.fireExceptionCaught(ReadTimeoutException.INSTANCE); + ctx.close(); + } + } catch (Throwable t) { + ctx.fireExceptionCaught(t); + } + } + + @Nullable + private ScheduledFuture schedule(final long timeoutMillis) { + return timeoutMillis > 0 ? ctx.executor().schedule(this, timeoutMillis, MILLISECONDS) : null; + } + } +} diff --git a/driver-core/src/main/com/mongodb/internal/connection/netty/NettyStreamFactory.java b/driver-core/src/main/com/mongodb/internal/connection/netty/NettyStreamFactory.java new file mode 100644 index 00000000000..ace80a347a2 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/connection/netty/NettyStreamFactory.java @@ -0,0 +1,128 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection.netty; + +import com.mongodb.ServerAddress; +import com.mongodb.connection.SocketSettings; +import com.mongodb.connection.SslSettings; +import com.mongodb.internal.connection.DefaultInetAddressResolver; +import com.mongodb.internal.connection.Stream; +import com.mongodb.internal.connection.StreamFactory; +import com.mongodb.lang.Nullable; +import com.mongodb.spi.dns.InetAddressResolver; +import io.netty.buffer.ByteBufAllocator; +import io.netty.buffer.PooledByteBufAllocator; +import io.netty.channel.EventLoopGroup; +import io.netty.channel.nio.NioEventLoopGroup; +import io.netty.channel.socket.SocketChannel; +import io.netty.channel.socket.nio.NioSocketChannel; +import io.netty.handler.ssl.SslContext; + +import static com.mongodb.assertions.Assertions.notNull; + +/** + * A StreamFactory for Streams based on Netty 4.x. + */ +public class NettyStreamFactory implements StreamFactory { + private final InetAddressResolver inetAddressResolver; + private final SocketSettings settings; + private final SslSettings sslSettings; + private final EventLoopGroup eventLoopGroup; + private final Class socketChannelClass; + private final ByteBufAllocator allocator; + @Nullable + private final SslContext sslContext; + + /** + * Construct a new instance of the factory. + * + * @param settings the socket settings + * @param sslSettings the SSL settings + * @param eventLoopGroup the event loop group that all channels created by this factory will be a part of + * @param socketChannelClass the socket channel class + * @param allocator the allocator to use for ByteBuf instances + * @param sslContext the Netty {@link SslContext} + * as specified by {@link NettyStreamFactoryFactory.Builder#sslContext(SslContext)}. + */ + public NettyStreamFactory(final InetAddressResolver inetAddressResolver, final SocketSettings settings, + final SslSettings sslSettings, final EventLoopGroup eventLoopGroup, final Class socketChannelClass, + final ByteBufAllocator allocator, @Nullable final SslContext sslContext) { + this.inetAddressResolver = inetAddressResolver; + this.settings = notNull("settings", settings); + this.sslSettings = notNull("sslSettings", sslSettings); + this.eventLoopGroup = notNull("eventLoopGroup", eventLoopGroup); + this.socketChannelClass = notNull("socketChannelClass", socketChannelClass); + this.allocator = notNull("allocator", allocator); + this.sslContext = sslContext; + } + + /** + * Construct a new instance of the factory. + * + * @param settings the socket settings + * @param sslSettings the SSL settings + * @param eventLoopGroup the event loop group that all channels created by this factory will be a part of + * @param socketChannelClass the socket channel class + * @param allocator the allocator to use for ByteBuf instances + */ + public NettyStreamFactory(final SocketSettings settings, final SslSettings sslSettings, + final EventLoopGroup eventLoopGroup, final Class socketChannelClass, + final ByteBufAllocator allocator) { + this(new DefaultInetAddressResolver(), settings, sslSettings, eventLoopGroup, socketChannelClass, allocator, null); + } + + /** + * Construct a new instance of the factory. + * + * @param settings the socket settings + * @param sslSettings the SSL settings + * @param eventLoopGroup the event loop group that all channels created by this factory will be a part of + * @param allocator the allocator to use for ByteBuf instances + */ + public NettyStreamFactory(final SocketSettings settings, final SslSettings sslSettings, final EventLoopGroup eventLoopGroup, + final ByteBufAllocator allocator) { + this(settings, sslSettings, eventLoopGroup, NioSocketChannel.class, allocator); + } + + /** + * Construct a new instance of the factory. + * + * @param settings the socket settings + * @param sslSettings the SSL settings + * @param eventLoopGroup the event loop group that all channels created by this factory will be a part of + */ + public NettyStreamFactory(final SocketSettings settings, final SslSettings sslSettings, final EventLoopGroup eventLoopGroup) { + this(settings, sslSettings, eventLoopGroup, PooledByteBufAllocator.DEFAULT); + } + + /** + * Construct a new instance of the factory with a default allocator, nio event loop group and nio socket channel. + * + * @param settings the socket settings + * @param sslSettings the SSL settings + */ + public NettyStreamFactory(final SocketSettings settings, final SslSettings sslSettings) { + this(settings, sslSettings, new NioEventLoopGroup()); + } + + @Override + public Stream create(final ServerAddress serverAddress) { + return new NettyStream(serverAddress, inetAddressResolver, settings, sslSettings, eventLoopGroup, socketChannelClass, allocator, + sslContext); + } + +} diff --git a/driver-core/src/main/com/mongodb/internal/connection/netty/NettyStreamFactoryFactory.java b/driver-core/src/main/com/mongodb/internal/connection/netty/NettyStreamFactoryFactory.java new file mode 100644 index 00000000000..7fe54defaa2 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/connection/netty/NettyStreamFactoryFactory.java @@ -0,0 +1,242 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection.netty; + +import com.mongodb.connection.NettyTransportSettings; +import com.mongodb.connection.SocketSettings; +import com.mongodb.connection.SslSettings; +import com.mongodb.internal.VisibleForTesting; +import com.mongodb.internal.connection.StreamFactory; +import com.mongodb.internal.connection.StreamFactoryFactory; +import com.mongodb.lang.Nullable; +import com.mongodb.spi.dns.InetAddressResolver; +import io.netty.buffer.ByteBufAllocator; +import io.netty.channel.EventLoopGroup; +import io.netty.channel.nio.NioEventLoopGroup; +import io.netty.channel.socket.SocketChannel; +import io.netty.channel.socket.nio.NioSocketChannel; +import io.netty.handler.ssl.ReferenceCountedOpenSslClientContext; +import io.netty.handler.ssl.SslContext; +import io.netty.handler.ssl.SslContextBuilder; +import io.netty.handler.ssl.SslProvider; + +import java.security.Security; +import java.util.Objects; + +import static com.mongodb.assertions.Assertions.isTrueArgument; +import static com.mongodb.assertions.Assertions.notNull; +import static com.mongodb.internal.VisibleForTesting.AccessModifier.PRIVATE; + +/** + * A {@code StreamFactoryFactory} implementation for Netty-based streams. + */ +public final class NettyStreamFactoryFactory implements StreamFactoryFactory { + + private final EventLoopGroup eventLoopGroup; + private final boolean ownsEventLoopGroup; + private final Class socketChannelClass; + private final ByteBufAllocator allocator; + @Nullable + private final SslContext sslContext; + private final InetAddressResolver inetAddressResolver; + + /** + * Gets a builder for an instance of {@code NettyStreamFactoryFactory}. + * @return the builder + */ + public static Builder builder() { + return new Builder(); + } + + @VisibleForTesting(otherwise = PRIVATE) + EventLoopGroup getEventLoopGroup() { + return eventLoopGroup; + } + + @VisibleForTesting(otherwise = PRIVATE) + Class getSocketChannelClass() { + return socketChannelClass; + } + + @VisibleForTesting(otherwise = PRIVATE) + ByteBufAllocator getAllocator() { + return allocator; + } + + @VisibleForTesting(otherwise = PRIVATE) + @Nullable + SslContext getSslContext() { + return sslContext; + } + + /** + * A builder for an instance of {@code NettyStreamFactoryFactory}. + */ + public static final class Builder { + private ByteBufAllocator allocator; + private Class socketChannelClass; + private EventLoopGroup eventLoopGroup; + @Nullable + private SslContext sslContext; + private InetAddressResolver inetAddressResolver; + + private Builder() { + } + + /** + * Apply NettyTransportSettings + * + * @param settings the settings + * @return this + */ + public Builder applySettings(final NettyTransportSettings settings) { + this.allocator = settings.getAllocator(); + this.eventLoopGroup = settings.getEventLoopGroup(); + this.sslContext = settings.getSslContext(); + this.socketChannelClass = settings.getSocketChannelClass(); + return this; + } + + + /** + * Sets the allocator. + * + * @param allocator the allocator to use for ByteBuf instances + * @return this + */ + public Builder allocator(final ByteBufAllocator allocator) { + this.allocator = notNull("allocator", allocator); + return this; + } + + /** + * Sets the socket channel class + * + * @param socketChannelClass the socket channel class + * @return this + */ + public Builder socketChannelClass(final Class socketChannelClass) { + this.socketChannelClass = notNull("socketChannelClass", socketChannelClass); + return this; + } + + /** + * Sets the event loop group. + * + *

It is highly recommended to supply your own event loop group and manage its shutdown. Otherwise, the event + * loop group created by default will not be shutdown properly.

+ * + * @param eventLoopGroup the event loop group that all channels created by this factory will be a part of + * @return this + */ + public Builder eventLoopGroup(final EventLoopGroup eventLoopGroup) { + this.eventLoopGroup = notNull("eventLoopGroup", eventLoopGroup); + return this; + } + + /** + * Sets a {@linkplain SslContextBuilder#forClient() client-side} {@link SslContext io.netty.handler.ssl.SslContext}, + * which overrides the standard {@link SslSettings#getContext()}. + * By default it is {@code null} and {@link SslSettings#getContext()} is at play. + *

+ * This option may be used as a convenient way to utilize + * OpenSSL as an alternative to the TLS/SSL protocol implementation in a JDK. + * To achieve this, specify {@link SslProvider#OPENSSL} TLS/SSL protocol provider via + * {@link SslContextBuilder#sslProvider(SslProvider)}. Note that doing so adds a runtime dependency on + * netty-tcnative, which you must satisfy. + *

+ * Notes: + *

    + *
  • Netty {@link SslContext} may not examine some + * {@linkplain Security security}/{@linkplain System#getProperties() system} properties that are used to + * + * customize JSSE. Therefore, instead of using them you may have to apply the equivalent configuration programmatically, + * if both the {@link SslContextBuilder} and the TLS/SSL protocol provider of choice support it. + *
  • + *
  • Only {@link SslProvider#JDK} and {@link SslProvider#OPENSSL} TLS/SSL protocol providers are supported. + *
  • + *
+ * + * @param sslContext The Netty {@link SslContext}, which must be created via {@linkplain SslContextBuilder#forClient()}. + * @return {@code this}. + */ + public Builder sslContext(final SslContext sslContext) { + this.sslContext = notNull("sslContext", sslContext); + isTrueArgument("sslContext must be client-side", sslContext.isClient()); + isTrueArgument("sslContext must use either SslProvider.JDK or SslProvider.OPENSSL TLS/SSL protocol provider", + !(sslContext instanceof ReferenceCountedOpenSslClientContext)); + + return this; + } + + public Builder inetAddressResolver(final InetAddressResolver inetAddressResolver) { + this.inetAddressResolver = inetAddressResolver; + return this; + } + + /** + * Build an instance of {@code NettyStreamFactoryFactory}. + * @return factory of the netty stream factory + */ + public NettyStreamFactoryFactory build() { + return new NettyStreamFactoryFactory(this); + } + } + + @Override + public StreamFactory create(final SocketSettings socketSettings, final SslSettings sslSettings) { + return new NettyStreamFactory(inetAddressResolver, socketSettings, sslSettings, eventLoopGroup, socketChannelClass, allocator, + sslContext); + } + + @Override + public void close() { + if (ownsEventLoopGroup) { + // ignore the returned Future. This is in line with MongoClient behavior to not block waiting for connections to be returned + // to the pool + eventLoopGroup.shutdownGracefully(); + } + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + NettyStreamFactoryFactory that = (NettyStreamFactoryFactory) o; + return Objects.equals(eventLoopGroup, that.eventLoopGroup) && Objects.equals(socketChannelClass, that.socketChannelClass) + && Objects.equals(allocator, that.allocator) && Objects.equals(sslContext, that.sslContext) + && Objects.equals(inetAddressResolver, that.inetAddressResolver); + } + + @Override + public int hashCode() { + return Objects.hash(eventLoopGroup, socketChannelClass, allocator, sslContext, inetAddressResolver); + } + + private NettyStreamFactoryFactory(final Builder builder) { + allocator = builder.allocator == null ? ByteBufAllocator.DEFAULT : builder.allocator; + socketChannelClass = builder.socketChannelClass == null ? NioSocketChannel.class : builder.socketChannelClass; + eventLoopGroup = builder.eventLoopGroup == null ? new NioEventLoopGroup() : builder.eventLoopGroup; + ownsEventLoopGroup = builder.eventLoopGroup == null; + sslContext = builder.sslContext; + inetAddressResolver = builder.inetAddressResolver; + } +} diff --git a/driver-core/src/main/com/mongodb/internal/connection/netty/package-info.java b/driver-core/src/main/com/mongodb/internal/connection/netty/package-info.java new file mode 100644 index 00000000000..f8a61623538 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/connection/netty/package-info.java @@ -0,0 +1,26 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * This package contains internal functionality that may change at any time. + */ + +@Internal +@NonNullApi +package com.mongodb.internal.connection.netty; + +import com.mongodb.annotations.Internal; +import com.mongodb.lang.NonNullApi; diff --git a/driver-core/src/main/com/mongodb/internal/connection/package-info.java b/driver-core/src/main/com/mongodb/internal/connection/package-info.java new file mode 100644 index 00000000000..f1b0349c963 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/connection/package-info.java @@ -0,0 +1,25 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * This package contains internal functionality that may change at any time. + */ +@Internal +@NonNullApi +package com.mongodb.internal.connection; + +import com.mongodb.annotations.Internal; +import com.mongodb.lang.NonNullApi; diff --git a/driver-core/src/main/com/mongodb/internal/connection/tlschannel/BufferAllocator.java b/driver-core/src/main/com/mongodb/internal/connection/tlschannel/BufferAllocator.java new file mode 100644 index 00000000000..11bc769d7c2 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/connection/tlschannel/BufferAllocator.java @@ -0,0 +1,45 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Original Work: MIT License, Copyright (c) [2015-2020] all contributors + * https://github.com/marianobarrios/tls-channel + */ + +package com.mongodb.internal.connection.tlschannel; + +import java.nio.ByteBuffer; + +/** + * A factory for {@link ByteBuffer}s. Implementations are free to return heap or direct buffers, or + * to do any kind of pooling. They are also expected to be thread-safe. + */ +public interface BufferAllocator { + + /** + * Allocate a {@link ByteBuffer} with the given initial capacity. + * + * @param size the size to allocate + * @return the newly created buffer + */ + ByteBuffer allocate(int size); + + /** + * Deallocate the given {@link ByteBuffer}. + * + * @param buffer the buffer to deallocate, that should have been allocated using the same {@link + * BufferAllocator} instance + */ + void free(ByteBuffer buffer); +} diff --git a/driver-core/src/main/com/mongodb/internal/connection/tlschannel/ClientTlsChannel.java b/driver-core/src/main/com/mongodb/internal/connection/tlschannel/ClientTlsChannel.java new file mode 100644 index 00000000000..57d113d1f2a --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/connection/tlschannel/ClientTlsChannel.java @@ -0,0 +1,231 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Original Work: MIT License, Copyright (c) [2015-2020] all contributors + * https://github.com/marianobarrios/tls-channel + */ + +package com.mongodb.internal.connection.tlschannel; + +import com.mongodb.internal.connection.tlschannel.impl.ByteBufferSet; +import com.mongodb.internal.connection.tlschannel.impl.TlsChannelImpl; + +import javax.net.ssl.SSLContext; +import javax.net.ssl.SSLEngine; +import javax.net.ssl.SSLSession; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.channels.ByteChannel; +import java.nio.channels.Channel; +import java.util.Optional; +import java.util.function.Consumer; +import java.util.function.Supplier; + +/** A client-side {@link TlsChannel}. */ +public class ClientTlsChannel implements TlsChannel { + + /** Builder of {@link ClientTlsChannel} */ + public static class Builder extends TlsChannelBuilder { + + private final Supplier sslEngineFactory; + + private Builder(ByteChannel underlying, SSLEngine sslEngine) { + super(underlying); + this.sslEngineFactory = () -> sslEngine; + } + + private Builder(ByteChannel underlying, SSLContext sslContext) { + super(underlying); + this.sslEngineFactory = () -> defaultSSLEngineFactory(sslContext); + } + + @Override + Builder getThis() { + return this; + } + + public ClientTlsChannel build() { + return new ClientTlsChannel( + underlying, + sslEngineFactory.get(), + sessionInitCallback, + runTasks, + plainBufferAllocator, + encryptedBufferAllocator, + releaseBuffers, + waitForCloseConfirmation); + } + } + + private static SSLEngine defaultSSLEngineFactory(SSLContext sslContext) { + SSLEngine engine = sslContext.createSSLEngine(); + engine.setUseClientMode(true); + return engine; + } + + /** + * Create a new {@link Builder}, configured with a underlying {@link Channel} and a fixed {@link + * SSLEngine}. + * + * @param underlying a reference to the underlying {@link ByteChannel} + * @param sslEngine the engine to use with this channel + * @return the new builder + */ + public static Builder newBuilder(ByteChannel underlying, SSLEngine sslEngine) { + return new Builder(underlying, sslEngine); + } + + /** + * Create a new {@link Builder}, configured with a underlying {@link Channel} and a {@link + * SSLContext}. + * + * @param underlying a reference to the underlying {@link ByteChannel} + * @param sslContext a context to use with this channel, it will be used to create a client {@link + * SSLEngine}. + * @return the new builder + */ + public static Builder newBuilder(ByteChannel underlying, SSLContext sslContext) { + return new Builder(underlying, sslContext); + } + + private final ByteChannel underlying; + private final TlsChannelImpl impl; + + private ClientTlsChannel( + ByteChannel underlying, + SSLEngine engine, + Consumer sessionInitCallback, + boolean runTasks, + BufferAllocator plainBufAllocator, + BufferAllocator encryptedBufAllocator, + boolean releaseBuffers, + boolean waitForCloseNotifyOnClose) { + if (!engine.getUseClientMode()) + throw new IllegalArgumentException("SSLEngine must be in client mode"); + this.underlying = underlying; + TrackingAllocator trackingPlainBufAllocator = new TrackingAllocator(plainBufAllocator); + TrackingAllocator trackingEncryptedAllocator = new TrackingAllocator(encryptedBufAllocator); + impl = + new TlsChannelImpl( + underlying, + underlying, + engine, + Optional.empty(), + sessionInitCallback, + runTasks, + trackingPlainBufAllocator, + trackingEncryptedAllocator, + releaseBuffers, + waitForCloseNotifyOnClose); + } + + @Override + public ByteChannel getUnderlying() { + return underlying; + } + + @Override + public SSLEngine getSslEngine() { + return impl.engine(); + } + + @Override + public Consumer getSessionInitCallback() { + return impl.getSessionInitCallback(); + } + + @Override + public TrackingAllocator getPlainBufferAllocator() { + return impl.getPlainBufferAllocator(); + } + + @Override + public TrackingAllocator getEncryptedBufferAllocator() { + return impl.getEncryptedBufferAllocator(); + } + + @Override + public boolean getRunTasks() { + return impl.getRunTasks(); + } + + @Override + public long read(ByteBuffer[] dstBuffers, int offset, int length) throws IOException { + ByteBufferSet dest = new ByteBufferSet(dstBuffers, offset, length); + TlsChannelImpl.checkReadBuffer(dest); + return impl.read(dest); + } + + @Override + public long read(ByteBuffer[] dstBuffers) throws IOException { + return read(dstBuffers, 0, dstBuffers.length); + } + + @Override + public int read(ByteBuffer dstBuffer) throws IOException { + return (int) read(new ByteBuffer[] {dstBuffer}); + } + + @Override + public long write(ByteBuffer[] srcBuffers, int offset, int length) throws IOException { + ByteBufferSet source = new ByteBufferSet(srcBuffers, offset, length); + return impl.write(source); + } + + @Override + public long write(ByteBuffer[] outs) throws IOException { + return write(outs, 0, outs.length); + } + + @Override + public int write(ByteBuffer srcBuffer) throws IOException { + return (int) write(new ByteBuffer[] {srcBuffer}); + } + + @Override + public void renegotiate() throws IOException { + impl.renegotiate(); + } + + @Override + public void handshake() throws IOException { + impl.handshake(); + } + + @Override + public void close() throws IOException { + impl.close(); + } + + @Override + public boolean isOpen() { + return impl.isOpen(); + } + + @Override + public boolean shutdown() throws IOException { + return impl.shutdown(); + } + + @Override + public boolean shutdownReceived() { + return impl.shutdownReceived(); + } + + @Override + public boolean shutdownSent() { + return impl.shutdownSent(); + } +} diff --git a/driver-core/src/main/com/mongodb/internal/connection/tlschannel/DirectBufferAllocator.java b/driver-core/src/main/com/mongodb/internal/connection/tlschannel/DirectBufferAllocator.java new file mode 100644 index 00000000000..802a6e86f54 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/connection/tlschannel/DirectBufferAllocator.java @@ -0,0 +1,49 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Original Work: MIT License, Copyright (c) [2015-2020] all contributors + * https://github.com/marianobarrios/tls-channel + */ + +package com.mongodb.internal.connection.tlschannel; + +import com.mongodb.internal.connection.tlschannel.util.DirectBufferDeallocator; + +import java.nio.ByteBuffer; + +/** + * Allocator that creates direct buffers. The {@link #free(ByteBuffer)} method, if called, + * deallocates the buffer immediately, without having to wait for GC (and the finalizer) to run. + * Calling {@link #free(ByteBuffer)} is actually optional, but should result in reduced memory + * consumption. + * + *

Direct buffers are generally preferred for using with I/O, to avoid an extra user-space copy, + * or to reduce garbage collection overhead. + */ +public class DirectBufferAllocator implements BufferAllocator { + + private final DirectBufferDeallocator deallocator = new DirectBufferDeallocator(); + + @Override + public ByteBuffer allocate(int size) { + return ByteBuffer.allocateDirect(size); + } + + @Override + public void free(ByteBuffer buffer) { + // do not wait for GC (and finalizer) to run + deallocator.deallocate(buffer); + } +} diff --git a/driver-core/src/main/com/mongodb/internal/connection/tlschannel/HeapBufferAllocator.java b/driver-core/src/main/com/mongodb/internal/connection/tlschannel/HeapBufferAllocator.java new file mode 100644 index 00000000000..af1c43a2739 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/connection/tlschannel/HeapBufferAllocator.java @@ -0,0 +1,41 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Original Work: MIT License, Copyright (c) [2015-2020] all contributors + * https://github.com/marianobarrios/tls-channel + */ + +package com.mongodb.internal.connection.tlschannel; + +import java.nio.ByteBuffer; + +/** + * Allocator that creates heap buffers. The {@link #free(ByteBuffer)} method is a no-op, as heap + * buffers are handled completely by the garbage collector. + * + *

Direct buffers are generally used as a simple and generally good enough default solution. + */ +public class HeapBufferAllocator implements BufferAllocator { + + @Override + public ByteBuffer allocate(int size) { + return ByteBuffer.allocate(size); + } + + @Override + public void free(ByteBuffer buffer) { + // GC does it + } +} diff --git a/driver-core/src/main/com/mongodb/internal/connection/tlschannel/NeedsReadException.java b/driver-core/src/main/com/mongodb/internal/connection/tlschannel/NeedsReadException.java new file mode 100644 index 00000000000..e10614e1970 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/connection/tlschannel/NeedsReadException.java @@ -0,0 +1,46 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Original Work: MIT License, Copyright (c) [2015-2020] all contributors + * https://github.com/marianobarrios/tls-channel + */ + +package com.mongodb.internal.connection.tlschannel; + +import java.nio.channels.ByteChannel; +import java.nio.channels.Selector; +import java.nio.channels.SocketChannel; + +/** + * This exception signals the caller that the operation cannot continue because bytesProduced need + * to be read from the underlying {@link ByteChannel}, the channel is non-blocking and there are no + * bytesProduced available. The caller should try the operation again, either with the channel in + * blocking mode of after ensuring that bytesProduced are ready. + * + *

For {@link SocketChannel}s, a {@link Selector} can be used to find out when the method should + * be retried. + * + *

Caveat: Any {@link TlsChannel} I/O method can throw this exception. In particular, write + * may want to read data. This is because TLS handshakes may occur at any time (initiated by + * either the client or the server). + * + *

This exception is akin to the SSL_ERROR_WANT_READ error code used by OpenSSL. + * + * @see OpenSSL error + * documentation + */ +public class NeedsReadException extends WouldBlockException { + private static final long serialVersionUID = 1419735639675146947L; +} diff --git a/driver-core/src/main/com/mongodb/internal/connection/tlschannel/NeedsTaskException.java b/driver-core/src/main/com/mongodb/internal/connection/tlschannel/NeedsTaskException.java new file mode 100644 index 00000000000..0f292665fda --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/connection/tlschannel/NeedsTaskException.java @@ -0,0 +1,48 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Original Work: MIT License, Copyright (c) [2015-2020] all contributors + * https://github.com/marianobarrios/tls-channel + */ + +package com.mongodb.internal.connection.tlschannel; + +/** + * This exception signals the caller that the operation could not continue because a CPU-intensive + * operation (typically a TLS handshaking) needs to be executed and the {@link TlsChannel} is + * configured to not run tasks. This allows the application to run these tasks in some other + * threads, in order to not slow the selection loop. The method that threw the exception should be + * retried once the task supplied by {@link #getTask()} is executed and finished. + * + *

This exception is akin to the SSL_ERROR_WANT_ASYNC error code used by OpenSSL (but note that + * in OpenSSL, the task is executed by the library, while with the {@link TlsChannel}, the calling + * code is responsible for the execution). + * + * @see OpenSSL error + * documentation + */ +public class NeedsTaskException extends TlsChannelFlowControlException { + + private static final long serialVersionUID = -936451835836926915L; + private final Runnable task; + + public NeedsTaskException(Runnable task) { + this.task = task; + } + + public Runnable getTask() { + return task; + } +} diff --git a/driver-core/src/main/com/mongodb/internal/connection/tlschannel/NeedsWriteException.java b/driver-core/src/main/com/mongodb/internal/connection/tlschannel/NeedsWriteException.java new file mode 100644 index 00000000000..6d19b3947df --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/connection/tlschannel/NeedsWriteException.java @@ -0,0 +1,46 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Original Work: MIT License, Copyright (c) [2015-2020] all contributors + * https://github.com/marianobarrios/tls-channel + */ + +package com.mongodb.internal.connection.tlschannel; + +import java.nio.channels.ByteChannel; +import java.nio.channels.Selector; +import java.nio.channels.SocketChannel; + +/** + * This exception signals the caller that the operation cannot continue because bytesProduced need + * to be write to the underlying {@link ByteChannel}, the channel is non-blocking and there are no + * buffer space available. The caller should try the operation again, either with the channel in + * blocking mode of after ensuring that buffer space exists. + * + *

For {@link SocketChannel}s, a {@link Selector} can be used to find out when the method should + * be retried. + * + *

Caveat: Any {@link TlsChannel} I/O method can throw this exception. In particular, read + * may want to write data. This is because TLS handshakes may occur at any time (initiated + * by either the client or the server). + * + *

This exception is akin to the SSL_ERROR_WANT_WRITE error code used by OpenSSL. + * + * @see OpenSSL error + * documentation + */ +public class NeedsWriteException extends WouldBlockException { + private static final long serialVersionUID = -3737940476382846413L; +} diff --git a/driver-core/src/main/com/mongodb/internal/connection/tlschannel/ServerTlsChannel.java b/driver-core/src/main/com/mongodb/internal/connection/tlschannel/ServerTlsChannel.java new file mode 100644 index 00000000000..dc2827b37c9 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/connection/tlschannel/ServerTlsChannel.java @@ -0,0 +1,445 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Original Work: MIT License, Copyright (c) [2015-2020] all contributors + * https://github.com/marianobarrios/tls-channel + */ + +package com.mongodb.internal.connection.tlschannel; + +import com.mongodb.internal.connection.tlschannel.impl.BufferHolder; +import com.mongodb.internal.connection.tlschannel.impl.ByteBufferSet; +import com.mongodb.internal.connection.tlschannel.impl.TlsChannelImpl; +import com.mongodb.internal.connection.tlschannel.impl.TlsChannelImpl.EofException; +import com.mongodb.internal.connection.tlschannel.impl.TlsExplorer; +import com.mongodb.internal.diagnostics.logging.Logger; +import com.mongodb.internal.diagnostics.logging.Loggers; + +import javax.net.ssl.SNIHostName; +import javax.net.ssl.SNIServerName; +import javax.net.ssl.SSLContext; +import javax.net.ssl.SSLEngine; +import javax.net.ssl.SSLHandshakeException; +import javax.net.ssl.SSLSession; +import javax.net.ssl.StandardConstants; +import java.io.IOException; +import java.nio.Buffer; +import java.nio.ByteBuffer; +import java.nio.channels.ByteChannel; +import java.nio.channels.Channel; +import java.nio.channels.ClosedChannelException; +import java.util.Map; +import java.util.Optional; +import java.util.concurrent.locks.Lock; +import java.util.concurrent.locks.ReentrantLock; +import java.util.function.Consumer; +import java.util.function.Function; + +/** A server-side {@link TlsChannel}. */ +public class ServerTlsChannel implements TlsChannel { + + private static final Logger LOGGER = Loggers.getLogger("connection.tls"); + + private interface SslContextStrategy { + + @FunctionalInterface + interface SniReader { + Optional readSni() throws IOException, EofException; + } + + SSLContext getSslContext(SniReader sniReader) throws IOException, EofException; + } + + private static class SniSslContextStrategy implements SslContextStrategy { + + private final SniSslContextFactory sniSslContextFactory; + + public SniSslContextStrategy(SniSslContextFactory sniSslContextFactory) { + this.sniSslContextFactory = sniSslContextFactory; + } + + @Override + public SSLContext getSslContext(SniReader sniReader) throws IOException, EofException { + // IO block + Optional nameOpt = sniReader.readSni(); + // call client code + Optional chosenContext; + try { + chosenContext = sniSslContextFactory.getSslContext(nameOpt); + } catch (Exception e) { + LOGGER.trace("client code threw exception during evaluation of server name indication", e); + throw new TlsChannelCallbackException("SNI callback failed", e); + } + return chosenContext.orElseThrow( + () -> new SSLHandshakeException("No ssl context available for received SNI: " + nameOpt)); + } + } + + private static class FixedSslContextStrategy implements SslContextStrategy { + + private final SSLContext sslContext; + + public FixedSslContextStrategy(SSLContext sslContext) { + this.sslContext = sslContext; + } + + @Override + public SSLContext getSslContext(SniReader sniReader) { + /* + * Avoid SNI parsing (using the supplied sniReader) when no decision + * would be made based on it. + */ + return sslContext; + } + } + + private static SSLEngine defaultSSLEngineFactory(SSLContext sslContext) { + SSLEngine engine = sslContext.createSSLEngine(); + engine.setUseClientMode(false); + return engine; + } + + /** Builder of {@link ServerTlsChannel} */ + public static class Builder extends TlsChannelBuilder { + + private final SslContextStrategy internalSslContextFactory; + private Function sslEngineFactory = + ServerTlsChannel::defaultSSLEngineFactory; + + private Builder(ByteChannel underlying, SSLContext sslContext) { + super(underlying); + this.internalSslContextFactory = new FixedSslContextStrategy(sslContext); + } + + private Builder(ByteChannel wrapped, SniSslContextFactory sslContextFactory) { + super(wrapped); + this.internalSslContextFactory = new SniSslContextStrategy(sslContextFactory); + } + + @Override + Builder getThis() { + return this; + } + + public Builder withEngineFactory(Function sslEngineFactory) { + this.sslEngineFactory = sslEngineFactory; + return this; + } + + public ServerTlsChannel build() { + return new ServerTlsChannel( + underlying, + internalSslContextFactory, + sslEngineFactory, + sessionInitCallback, + runTasks, + plainBufferAllocator, + encryptedBufferAllocator, + releaseBuffers, + waitForCloseConfirmation); + } + } + + /** + * Create a new {@link Builder}, configured with a underlying {@link Channel} and a fixed {@link + * SSLContext}, which will be used to create the {@link SSLEngine}. + * + * @param underlying a reference to the underlying {@link ByteChannel} + * @param sslContext a fixed {@link SSLContext} to be used + * @return the new builder + */ + public static Builder newBuilder(ByteChannel underlying, SSLContext sslContext) { + return new Builder(underlying, sslContext); + } + + /** + * Create a new {@link Builder}, configured with a underlying {@link Channel} and a custom {@link + * SSLContext} factory, which will be used to create the context (in turn used to create the + * {@link SSLEngine}, as a function of the SNI received at the TLS connection start. + * + *

Implementation note:
+ * Due to limitations of {@link SSLEngine}, configuring a {@link ServerTlsChannel} to select the + * {@link SSLContext} based on the SNI value implies parsing the first TLS frame (ClientHello) + * independently of the SSLEngine. + * + * @param underlying a reference to the underlying {@link ByteChannel} + * @param sslContextFactory a function from an optional SNI to the {@link SSLContext} to be used + * @return the new builder + * @see Server Name Indication + */ + public static Builder newBuilder(ByteChannel underlying, SniSslContextFactory sslContextFactory) { + return new Builder(underlying, sslContextFactory); + } + + private final ByteChannel underlying; + private final SslContextStrategy sslContextStrategy; + private final Function engineFactory; + private final Consumer sessionInitCallback; + private final boolean runTasks; + private final TrackingAllocator plainBufAllocator; + private final TrackingAllocator encryptedBufAllocator; + private final boolean releaseBuffers; + private final boolean waitForCloseConfirmation; + + private final Lock initLock = new ReentrantLock(); + + private BufferHolder inEncrypted; + + private volatile boolean sniRead = false; + private SSLContext sslContext = null; + private TlsChannelImpl impl = null; + + // @formatter:off + private ServerTlsChannel( + ByteChannel underlying, + SslContextStrategy internalSslContextFactory, + Function engineFactory, + Consumer sessionInitCallback, + boolean runTasks, + BufferAllocator plainBufAllocator, + BufferAllocator encryptedBufAllocator, + boolean releaseBuffers, + boolean waitForCloseConfirmation) { + this.underlying = underlying; + this.sslContextStrategy = internalSslContextFactory; + this.engineFactory = engineFactory; + this.sessionInitCallback = sessionInitCallback; + this.runTasks = runTasks; + this.plainBufAllocator = new TrackingAllocator(plainBufAllocator); + this.encryptedBufAllocator = new TrackingAllocator(encryptedBufAllocator); + this.releaseBuffers = releaseBuffers; + this.waitForCloseConfirmation = waitForCloseConfirmation; + inEncrypted = + new BufferHolder( + "inEncrypted", + Optional.empty(), + encryptedBufAllocator, + TlsChannelImpl.buffersInitialSize, + TlsChannelImpl.maxTlsPacketSize, + false /* plainData */, + releaseBuffers); + } + + // @formatter:on + + @Override + public ByteChannel getUnderlying() { + return underlying; + } + + /** + * Return the used {@link SSLContext}. + * + * @return if context if present, of null if the TLS connection as not been initializer, or the + * SNI not received yet. + */ + public SSLContext getSslContext() { + return sslContext; + } + + @Override + public SSLEngine getSslEngine() { + return impl == null ? null : impl.engine(); + } + + @Override + public Consumer getSessionInitCallback() { + return sessionInitCallback; + } + + @Override + public boolean getRunTasks() { + return impl.getRunTasks(); + } + + @Override + public TrackingAllocator getPlainBufferAllocator() { + return plainBufAllocator; + } + + @Override + public TrackingAllocator getEncryptedBufferAllocator() { + return encryptedBufAllocator; + } + + @Override + public long read(ByteBuffer[] dstBuffers, int offset, int length) throws IOException { + ByteBufferSet dest = new ByteBufferSet(dstBuffers, offset, length); + TlsChannelImpl.checkReadBuffer(dest); + if (!sniRead) { + try { + initEngine(); + } catch (EofException e) { + return -1; + } + } + return impl.read(dest); + } + + @Override + public long read(ByteBuffer[] dstBuffers) throws IOException { + return read(dstBuffers, 0, dstBuffers.length); + } + + @Override + public int read(ByteBuffer dstBuffer) throws IOException { + return (int) read(new ByteBuffer[] {dstBuffer}); + } + + @Override + public long write(ByteBuffer[] srcs, int offset, int length) throws IOException { + ByteBufferSet source = new ByteBufferSet(srcs, offset, length); + if (!sniRead) { + try { + initEngine(); + } catch (EofException e) { + throw new ClosedChannelException(); + } + } + return impl.write(source); + } + + @Override + public long write(ByteBuffer[] srcs) throws IOException { + return write(srcs, 0, srcs.length); + } + + @Override + public int write(ByteBuffer srcBuffer) throws IOException { + return (int) write(new ByteBuffer[] {srcBuffer}); + } + + @Override + public void renegotiate() throws IOException { + if (!sniRead) { + try { + initEngine(); + } catch (EofException e) { + throw new ClosedChannelException(); + } + } + impl.renegotiate(); + } + + @Override + public void handshake() throws IOException { + if (!sniRead) { + try { + initEngine(); + } catch (EofException e) { + throw new ClosedChannelException(); + } + } + impl.handshake(); + } + + @Override + public void close() throws IOException { + if (impl != null) impl.close(); + if (inEncrypted != null) inEncrypted.dispose(); + underlying.close(); + } + + @Override + public boolean isOpen() { + return underlying.isOpen(); + } + + private void initEngine() throws IOException, EofException { + initLock.lock(); + try { + if (!sniRead) { + sslContext = sslContextStrategy.getSslContext(this::getServerNameIndication); + // call client code + SSLEngine engine; + try { + engine = engineFactory.apply(sslContext); + } catch (Exception e) { + LOGGER.trace("client threw exception in SSLEngine factory", e); + throw new TlsChannelCallbackException("SSLEngine creation callback failed", e); + } + impl = + new TlsChannelImpl( + underlying, + underlying, + engine, + Optional.of(inEncrypted), + sessionInitCallback, + runTasks, + plainBufAllocator, + encryptedBufAllocator, + releaseBuffers, + waitForCloseConfirmation); + inEncrypted = null; + sniRead = true; + } + } finally { + initLock.unlock(); + } + } + + private Optional getServerNameIndication() throws IOException, EofException { + inEncrypted.prepare(); + try { + int recordHeaderSize = readRecordHeaderSize(); + while (inEncrypted.buffer.position() < recordHeaderSize) { + if (!inEncrypted.buffer.hasRemaining()) { + inEncrypted.enlarge(); + } + TlsChannelImpl.readFromChannel(underlying, inEncrypted.buffer); // IO block + } + ((Buffer) inEncrypted.buffer).flip(); + Map serverNames = TlsExplorer.explore(inEncrypted.buffer); + inEncrypted.buffer.compact(); + SNIServerName hostName = serverNames.get(StandardConstants.SNI_HOST_NAME); + if (hostName != null && hostName instanceof SNIHostName) { + SNIHostName sniHostName = (SNIHostName) hostName; + return Optional.of(sniHostName); + } else { + return Optional.empty(); + } + } finally { + inEncrypted.release(); + } + } + + private int readRecordHeaderSize() throws IOException, EofException { + while (inEncrypted.buffer.position() < TlsExplorer.RECORD_HEADER_SIZE) { + if (!inEncrypted.buffer.hasRemaining()) { + throw new IllegalStateException("inEncrypted too small"); + } + TlsChannelImpl.readFromChannel(underlying, inEncrypted.buffer); // IO block + } + ((Buffer) inEncrypted.buffer).flip(); + int recordHeaderSize = TlsExplorer.getRequiredSize(inEncrypted.buffer); + inEncrypted.buffer.compact(); + return recordHeaderSize; + } + + @Override + public boolean shutdown() throws IOException { + return impl != null && impl.shutdown(); + } + + @Override + public boolean shutdownReceived() { + return impl != null && impl.shutdownReceived(); + } + + @Override + public boolean shutdownSent() { + return impl != null && impl.shutdownSent(); + } +} diff --git a/driver-core/src/main/com/mongodb/internal/connection/tlschannel/SniSslContextFactory.java b/driver-core/src/main/com/mongodb/internal/connection/tlschannel/SniSslContextFactory.java new file mode 100644 index 00000000000..a97f0ba6d40 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/connection/tlschannel/SniSslContextFactory.java @@ -0,0 +1,43 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Original Work: MIT License, Copyright (c) [2015-2020] all contributors + * https://github.com/marianobarrios/tls-channel + */ + +package com.mongodb.internal.connection.tlschannel; + +import javax.net.ssl.SNIServerName; +import javax.net.ssl.SSLContext; +import java.util.Optional; + +/** + * Factory for {@link SSLContext}s, based in an optional {@link SNIServerName}. Implementations of + * this interface are supplied to {@link ServerTlsChannel} instances, to select the correct context + * (and so the correct certificate) based on the server name provided by the client. + */ +@FunctionalInterface +public interface SniSslContextFactory { + + /** + * Return a proper {@link SSLContext}. + * + * @param sniServerName an optional {@link SNIServerName}; an empty value means that the client + * did not send and SNI value. + * @return the chosen context, or an empty value, indicating that no context is supplied and the + * connection should be aborted. + */ + Optional getSslContext(Optional sniServerName); +} diff --git a/driver-core/src/main/com/mongodb/internal/connection/tlschannel/TlsChannel.java b/driver-core/src/main/com/mongodb/internal/connection/tlschannel/TlsChannel.java new file mode 100644 index 00000000000..d0a57c04f0e --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/connection/tlschannel/TlsChannel.java @@ -0,0 +1,468 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Original Work: MIT License, Copyright (c) [2015-2020] all contributors + * https://github.com/marianobarrios/tls-channel + */ + +package com.mongodb.internal.connection.tlschannel; + +import javax.net.ssl.SSLEngine; +import javax.net.ssl.SSLException; +import javax.net.ssl.SSLSession; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.channels.ByteChannel; +import java.nio.channels.GatheringByteChannel; +import java.nio.channels.ScatteringByteChannel; +import java.nio.channels.Selector; +import java.nio.channels.SocketChannel; +import java.util.function.Consumer; + +/** + * A ByteChannel interface to a TLS (Transport Layer Security) connection. + * + *

Instances that implement this interface delegate all cryptographic operations to the standard + * Java TLS implementation: SSLEngine; effectively hiding it behind an easy-to-use streaming API, + * that allows to securitize JVM applications with minimal added complexity. + * + *

In other words, an interface that allows the programmer to have TLS using the same standard + * socket API used for plaintext, just like OpenSSL does for C, only for Java. + * + *

Note that this is an API adapter, not a cryptographic implementation: with the exception of a + * few bytesProduced of parsing at the beginning of the connection, to look for the SNI, the whole + * protocol implementation is done by the SSLEngine. Both the SSLContext and SSLEngine are supplied + * by the client; these classes are the ones responsible for protocol configuration, including + * hostname validation, client-side authentication, etc. + * + *

A TLS channel is created by using one of its subclasses. They will take an existing {@link + * ByteChannel} (typically, but not necessarily, a {@link SocketChannel}) and a {@link SSLEngine}. + * + *

It should be noted that this interface extends {@link ByteChannel} as a design compromise, but + * it does not follow its interface completely. In particular, in case of underlying non-blocking + * channels, when it is not possible to complete an operation, no zero is returned, but an {@link + * WouldBlockException}. This divergence from the base interface is needed because both a read + * and a write operation can run out of both bytesProduced for reading and + * buffer space for writing, as a handshake (a bidirectional operation) can happen at any moment. + * The user would use a {@link Selector} to wait for the expected condition of the underlying + * channel, and should know which operation to register. + * + *

On top of that, operations can also fail to complete due to asynchronous tasks; this is + * communicated using a {@link NeedsTaskException}. This behavior is controlled by the {@link + * #getRunTasks()} attribute. This allows the user to execute CPU-intensive tasks out of the + * selector loop. + */ +public interface TlsChannel extends ByteChannel, GatheringByteChannel, ScatteringByteChannel { + + BufferAllocator defaultPlainBufferAllocator = new HeapBufferAllocator(); + BufferAllocator defaultEncryptedBufferAllocator = new DirectBufferAllocator(); + + /** + * Return a reference to the underlying {@link ByteChannel}. + * + * @return the underlying channel + */ + ByteChannel getUnderlying(); + + /** + * Return a reference to the {@link SSLEngine} used. + * + * @return the engine reference if present, or null if unknown (that can happen in + * server-side channels before the SNI is parsed). + */ + SSLEngine getSslEngine(); + + /** + * Return the callback function to be executed when the TLS session is established (or + * re-established). + * + * @return the callback function + * @see TlsChannelBuilder#withSessionInitCallback(Consumer) + */ + Consumer getSessionInitCallback(); + + /** + * Return the {@link BufferAllocator} to use for unencrypted data. Actually, a decorating subclass + * is returned, which contains allocation statistics for this channel. + * + * @return the buffer allocator + * @see TlsChannelBuilder#withPlainBufferAllocator(BufferAllocator) + * @see TrackingAllocator + */ + TrackingAllocator getPlainBufferAllocator(); + + /** + * Return the {@link BufferAllocator} to use for encrypted data. Actually, a decorating subclass + * is returned, which contains allocation statistics for this channel. + * + * @return the buffer allocator + * @see TlsChannelBuilder#withEncryptedBufferAllocator(BufferAllocator) + * @see TrackingAllocator + */ + TrackingAllocator getEncryptedBufferAllocator(); + + /** + * Return whether CPU-intensive tasks are run or not. + * + * @return whether tasks are run + * @see TlsChannelBuilder#withRunTasks(boolean) + */ + boolean getRunTasks(); + + /** + * Reads a sequence of bytesProduced from this channel into the given buffer. + * + *

An attempt is made to read up to r bytesProduced from the channel, where r is + * the number of bytesProduced remaining in the buffer, that is, dst.remaining(), at + * the moment this method is invoked. + * + *

Suppose that a byte sequence of length n is read, where 0   + * <= n <= r. This byte sequence will be + * transferred into the buffer so that the first byte in the sequence is at index p and the + * last byte is at index p + n -  + * 1, where p is the buffer's position at the moment this method is invoked. + * Upon return the buffer's position will be equal to p  + n; + * its limit will not have changed. + * + *

A read operation might not fill the buffer, and in fact it might not read any bytesProduced + * at all. Whether or not it does so depends upon the nature and state of the underlying channel. + * It is guaranteed, however, that if a channel is in blocking mode and there is at least one byte + * remaining in the buffer then this method will block until at least one byte is read. On the + * other hand, if the underlying channel is in non-blocking mode then a {@link + * WouldBlockException} may be thrown. Note that this also includes the possibility of a {@link + * NeedsWriteException}, due to the fact that, during a TLS handshake, bytesProduced need to be + * written to the underlying channel. In any case, after a {@link WouldBlockException}, the + * operation should be retried when the underlying channel is ready (for reading or writing, + * depending on the subclass). + * + *

If the channel is configured to not run tasks and one is due to run, a {@link + * NeedsTaskException} will be thrown. In this case the operation should be retried after the task + * is run. + * + *

This method may be invoked at any time. If another thread has already initiated a read or + * handshaking operation upon this channel, however, then an invocation of this method will block + * until the first operation is complete. + * + * @param dst The buffer into which bytesProduced are to be transferred + * @return The number of bytesProduced read, or -1 if the channel has reached + * end-of-stream; contrary to the behavior specified in {@link ByteChannel}, this method never + * returns 0, but throws {@link WouldBlockException} + * @throws WouldBlockException if the channel is in non-blocking mode and the IO operation cannot + * be completed immediately + * @throws NeedsTaskException if the channel is not configured to run tasks automatically and a + * task needs to be executed to complete the operation + * @throws SSLException if the {@link SSLEngine} throws a SSLException + * @throws IOException if the underlying channel throws an IOException + */ + int read(ByteBuffer dst) throws IOException; + + /** + * Writes a sequence of bytesProduced to this channel from the given buffer. + * + *

An attempt is made to write up to r bytesProduced to the channel, where r is + * the number of bytesProduced remaining in the buffer, that is, src.remaining(), at + * the moment this method is invoked. + * + *

Suppose that a byte sequence of length n is written, where 0  + * <= n <=  r. This byte sequence will be + * transferred from the buffer starting at index p, where p is the buffer's position + * at the moment this method is invoked; the index of the last byte written will be p  + * + n - 1. Upon return the buffer's + * position will be equal to p  + n; its limit will not have + * changed. + * + *

If the underlying channel is in blocking mode, a write operation will return only after + * writing all of the r requested bytesProduced. On the other hand, if it is in + * non-blocking mode, this operation may write only some of the bytesProduced or possibly none at + * all, in this case a {@link WouldBlockException} will be thrown. Note that this also includes + * the possibility of a {@link NeedsReadException}, due to the fact that, during a TLS handshake, + * bytes need to be read from the underlying channel. In any case, after a {@link + * WouldBlockException}, the operation should be retried when the underlying channel is ready (for + * reading or writing, depending on the subclass). + * + *

If the channel is configured to not run tasks and one is due to run, a {@link + * NeedsTaskException} will be thrown. In this case the operation should be retried after the task + * is run. + * + *

This method may be invoked at any time. If another thread has already initiated a write or + * handshaking operation upon this channel, however, then an invocation of this method will block + * until the first operation is complete. + * + * @param src The buffer from which bytesProduced are to be retrieved + * @return The number of bytesProduced written, contrary to the behavior specified in {@link + * ByteChannel}, this method never returns 0, but throws {@link WouldBlockException} + * @throws WouldBlockException if the channel is in non-blocking mode and the IO operation cannot + * be completed immediately + * @throws NeedsTaskException if the channel is not configured to run tasks automatically and a + * task needs to be executed to complete the operation + * @throws SSLException if the {@link SSLEngine} throws a SSLException + * @throws IOException if the underlying channel throws an IOException + */ + int write(ByteBuffer src) throws IOException; + + /** + * Initiates a handshake (initial or renegotiation) on this channel. This method is not needed for + * the initial handshake, as the read() and write() methods will + * implicitly do the initial handshake if needed. + * + *

This method may block if the underlying channel if in blocking mode. + * + *

Note that renegotiation is a problematic feature of the TLS protocol, that should only be + * initiated at quiet point of the protocol. + * + *

This method may block if the underlying channel is in blocking mode, otherwise a {@link + * WouldBlockException} can be thrown. In this case the operation should be retried when the + * underlying channel is ready (for reading or writing, depending on the subclass). + * + *

If the channel is configured to not run tasks and one is due to run, a {@link + * NeedsTaskException} will be thrown, with a reference to the task. In this case the operation + * should be retried after the task is run. + * + *

This method may be invoked at any time. If another thread has already initiated a read, + * write, or handshaking operation upon this channel, however, then an invocation of this method + * will block until the first operation is complete. + * + * @throws WouldBlockException if the channel is in non-blocking mode and the IO operation cannot + * be completed immediately + * @throws NeedsTaskException if the channel is not configured to run tasks automatically and a + * task needs to be executed to complete the operation + * @throws SSLException if the {@link SSLEngine} throws a SSLException + * @throws IOException if the underlying channel throws an IOException + */ + void renegotiate() throws IOException; + + /** + * Forces the initial TLS handshake. Calling this method is usually not needed, as a handshake + * will happen automatically when doing the first read() or write() + * operation. Calling this method after the initial handshake has been done has no effect. + * + *

This method may block if the underlying channel is in blocking mode, otherwise a {@link + * WouldBlockException} can be thrown. In this case the operation should be retried when the + * underlying channel is ready (for reading or writing, depending on the subclass). + * + *

If the channel is configured to not run tasks and one is due to run, a {@link + * NeedsTaskException} will be thrown, with a reference to the task. In this case the operation + * should be retried after the task is run. + * + *

This method may be invoked at any time. If another thread has already initiated a read, + * write, or handshaking operation upon this channel, however, then an invocation of this method + * will block until the first operation is complete. + * + * @throws WouldBlockException if the channel is in non-blocking mode and the IO operation cannot + * be completed immediately + * @throws NeedsTaskException if the channel is not configured to run tasks automatically and a + * task needs to be executed to complete the operation + * @throws SSLException if the {@link SSLEngine} throws a SSLException + * @throws IOException if the underlying channel throws an IOException + */ + void handshake() throws IOException; + + /** + * Writes a sequence of bytesProduced to this channel from a subsequence of the given buffers. + * + *

See {@link GatheringByteChannel#write(ByteBuffer[], int, int)} for more details of the + * meaning of this signature. + * + *

This method behaves slightly different than the interface specification, with respect to + * non-blocking responses, see {@link #write(ByteBuffer)} for more details. + * + * @param srcs The buffers from which bytesProduced are to be retrieved + * @param offset The offset within the buffer array of the first buffer from which bytesProduced + * are to be retrieved; must be non-negative and no larger than srcs.length + * @param length The maximum number of buffers to be accessed; must be non-negative and no larger + * than srcs.length  - offset + * @return The number of bytesProduced written, contrary to the behavior specified in {@link + * ByteChannel}, this method never returns 0, but throws {@link WouldBlockException} + * @throws IndexOutOfBoundsException If the preconditions on the offset and + * length parameters do not hold + * @throws WouldBlockException if the channel is in non-blocking mode and the IO operation cannot + * be completed immediately + * @throws NeedsTaskException if the channel is not configured to run tasks automatically and a + * task needs to be executed to complete the operation + * @throws SSLException if the {@link SSLEngine} throws a SSLException + * @throws IOException if the underlying channel throws an IOException + */ + long write(ByteBuffer[] srcs, int offset, int length) throws IOException; + + /** + * Writes a sequence of bytesProduced to this channel from the given buffers. + * + *

An invocation of this method of the form c.write(srcs) behaves in exactly the + * same manner as the invocation + * + *

+ * + *
+   * c.write(srcs, 0, srcs.length);
+   * 
+ * + *
+ * + * This method behaves slightly different than the interface specification, with respect to + * non-blocking responses, see {@link #write(ByteBuffer)} for more details. + * + * @param srcs The buffers from which bytesProduced are to be retrieved + * @return The number of bytesProduced written, contrary to the behavior specified in {@link + * ByteChannel}, this method never returns 0, but throws {@link WouldBlockException} + * @throws IndexOutOfBoundsException If the preconditions on the offset and + * length parameters do not hold + * @throws WouldBlockException if the channel is in non-blocking mode and the IO operation cannot + * be completed immediately + * @throws NeedsTaskException if the channel is not configured to run tasks automatically and a + * task needs to be executed to complete the operation + * @throws SSLException if the {@link SSLEngine} throws a SSLException + * @throws IOException if the underlying channel throws an IOExceptions + */ + long write(ByteBuffer[] srcs) throws IOException; + + /** + * Reads a sequence of bytesProduced from this channel into a subsequence of the given buffers. + * + *

See {@link ScatteringByteChannel#read(ByteBuffer[], int, int)} for more details of the + * meaning of this signature. + * + *

This method behaves slightly different than the interface specification, with respect to + * non-blocking responses, see {@link #read(ByteBuffer)} for more details. + * + * @param dsts The buffers into which bytesProduced are to be transferred + * @param offset The offset within the buffer array of the first buffer into which bytesProduced + * are to be transferred; must be non-negative and no larger than dsts.length + * @param length The maximum number of buffers to be accessed; must be non-negative and no larger + * than dsts.length  - offset + * @return The number of bytesProduced read, or -1 if the channel has reached + * end-of-stream; contrary to the behavior specified in {@link ByteChannel}, this method never + * returns 0, but throws {@link WouldBlockException} + * @throws IndexOutOfBoundsException If the preconditions on the offset and + * length parameters do not hold + * @throws WouldBlockException if the channel is in non-blocking mode and the IO operation cannot + * be completed immediately + * @throws NeedsTaskException if the channel is not configured to run tasks automatically and a + * task needs to be executed to complete the operation + * @throws SSLException if the {@link SSLEngine} throws a SSLException + * @throws IOException if the underlying channel throws an IOException + */ + long read(ByteBuffer[] dsts, int offset, int length) throws IOException; + + /** + * Reads a sequence of bytesProduced from this channel into the given buffers. + * + *

An invocation of this method of the form c.read(dsts) behaves in exactly the + * same manner as the invocation + * + *

+ * + *
+   * c.read(dsts, 0, dsts.length);
+   * 
+ * + *
+ * + *

This method behaves slightly different than the interface specification, with respect to + * non-blocking responses, see {@link #read(ByteBuffer)} for more details. + * + * @param dsts The buffers into which bytesProduced are to be transferred + * @return The number of bytesProduced read, or -1 if the channel has reached + * end-of-stream; contrary to the behavior specified in {@link ByteChannel}, this method never + * returns 0, but throws {@link WouldBlockException} + * @throws IndexOutOfBoundsException If the preconditions on the offset and + * length parameters do not hold + * @throws WouldBlockException if the channel is in non-blocking mode and the IO operation cannot + * be completed immediately + * @throws NeedsTaskException if the channel is not configured to run tasks automatically and a + * task needs to be executed to complete the operation + * @throws SSLException if the {@link SSLEngine} throws a SSLException + * @throws IOException if the underlying channel throws an IOException + */ + long read(ByteBuffer[] dsts) throws IOException; + + /** + * Closes the underlying channel. This method first does some form of TLS close if not already + * done. The exact behavior can be configured using the {@link + * TlsChannelBuilder#withWaitForCloseConfirmation}. + * + *

The default behavior mimics what happens in a normal (that is, non layered) {@link + * javax.net.ssl.SSLSocket#close()}. + * + *

For finer control of the TLS close, use {@link #shutdown()} + * + * @throws IOException if the underlying channel throws an IOException during close. Exceptions + * thrown during any previous TLS close are not propagated. + */ + void close() throws IOException; + + /** + * Shuts down the TLS connection. This method emulates the behavior of OpenSSL's SSL_shutdown(). + * + *

The shutdown procedure consists of two steps: the sending of the "close notify" shutdown + * alert and the reception of the peer's "close notify". According to the TLS standard, it is + * acceptable for an application to only send its shutdown alert and then close the underlying + * connection without waiting for the peer's response. When the underlying connection shall be + * used for more communications, the complete shutdown procedure (bidirectional "close notify" + * alerts) must be performed, so that the peers stay synchronized. + * + *

This class supports both uni- and bidirectional shutdown by its 2 step behavior, using this + * method. + * + *

When this is the first party to send the "close notify" alert, this method will only send + * the alert, set the {@link #shutdownSent()} flag and return false. If a + * unidirectional shutdown is enough, this first call is sufficient. In order to complete the + * bidirectional shutdown handshake, This method must be called again. The second call will wait + * for the peer's "close notify" shutdown alert. On success, the second call will return + * true. + * + *

If the peer already sent the "close notify" alert and it was already processed implicitly + * inside a read operation, the {@link #shutdownReceived()} flag is already set. This method will + * then send the "close notify" alert, set the {@link #shutdownSent()} flag and immediately return + * true. It is therefore recommended to check the return value of this method and + * call it again, if the bidirectional shutdown is not yet complete. + * + *

If the underlying channel is blocking, this method will only return once the handshake step + * has been finished or an error occurred. + * + *

If the underlying channel is non-blocking, this method may throw {@link WouldBlockException} + * if the underlying channel could not support the continuation of the handshake. The calling + * process then must repeat the call after taking appropriate action (like waiting in a selector + * in case of a {@link SocketChannel}). + * + *

Note that despite not being mandated by the specification, a proper TLS close is important + * to prevent truncation attacks, which consists, essentially, of an adversary introducing TCP FIN + * segments to trick on party to ignore the final bytes of a secure stream. For more details, see + * the original paper. + * + * @return whether the closing is finished. + * @throws IOException if the underlying channel throws an IOException + * @throws WouldBlockException if the channel is in non-blocking mode and the IO operation cannot + * be completed immediately + * @see TlsChannelBuilder#withWaitForCloseConfirmation(boolean) + */ + boolean shutdown() throws IOException; + + /** + * Return whether this side of the connection has already received the close notification. + * + * @see #shutdown() + * @return true if the close notification was received + */ + boolean shutdownReceived(); + + /** + * Return whether this side of the connection has already sent the close notification. + * + * @see #shutdown() + * @return true if the close notification was sent + */ + boolean shutdownSent(); +} diff --git a/driver-core/src/main/com/mongodb/internal/connection/tlschannel/TlsChannelBuilder.java b/driver-core/src/main/com/mongodb/internal/connection/tlschannel/TlsChannelBuilder.java new file mode 100644 index 00000000000..e55f5c349f5 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/connection/tlschannel/TlsChannelBuilder.java @@ -0,0 +1,141 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Original Work: MIT License, Copyright (c) [2015-2020] all contributors + * https://github.com/marianobarrios/tls-channel + */ + +package com.mongodb.internal.connection.tlschannel; + +import javax.net.ssl.SSLSession; +import javax.net.ssl.SSLSocket; +import java.io.IOException; +import java.nio.channels.ByteChannel; +import java.util.function.Consumer; + +/** Base class for builders of {@link TlsChannel}. */ +public abstract class TlsChannelBuilder> { + + final ByteChannel underlying; + + // @formatter:off + Consumer sessionInitCallback = session -> {}; + // @formatter:on + boolean runTasks = true; + BufferAllocator plainBufferAllocator = TlsChannel.defaultPlainBufferAllocator; + BufferAllocator encryptedBufferAllocator = TlsChannel.defaultEncryptedBufferAllocator; + boolean releaseBuffers = true; + boolean waitForCloseConfirmation = false; + + TlsChannelBuilder(ByteChannel underlying) { + this.underlying = underlying; + } + + abstract T getThis(); + + /** + * Whether CPU-intensive tasks are run or not. Default is to do run them. If setting this + * false, the calling code should be prepared to handle {@link NeedsTaskException} + * + * @param runTasks whether to run tasks + * @return this object + */ + public T withRunTasks(boolean runTasks) { + this.runTasks = runTasks; + return getThis(); + } + + /** + * Set the {@link BufferAllocator} to use for unencrypted data. By default a {@link + * HeapBufferAllocator} is used, as this buffers are used to supplement user-supplied ones when + * dealing with too big a TLS record, that is, they operate entirely inside the JVM. + * + * @param bufferAllocator the buffer allocator + * @return this object + */ + public T withPlainBufferAllocator(BufferAllocator bufferAllocator) { + this.plainBufferAllocator = bufferAllocator; + return getThis(); + } + + /** + * Set the {@link BufferAllocator} to use for encrypted data. By default a {@link + * DirectBufferAllocator} is used, as this data is usually read from or written to native sockets. + * + * @param bufferAllocator the buffer allocator + * @return this object + */ + public T withEncryptedBufferAllocator(BufferAllocator bufferAllocator) { + this.encryptedBufferAllocator = bufferAllocator; + return getThis(); + } + + /** + * Register a callback function to be executed when the TLS session is established (or + * re-established). The supplied function will run in the same thread as the rest of the + * handshake, so it should ideally run as fast as possible. + * + * @param sessionInitCallback the session initialization callback + * @return this object + */ + public T withSessionInitCallback(Consumer sessionInitCallback) { + this.sessionInitCallback = sessionInitCallback; + return getThis(); + } + + /** + * Whether to release unused buffers in the mid of connections. Equivalent to OpenSSL's + * SSL_MODE_RELEASE_BUFFERS. + * + *

Default is to release. Releasing unused buffers is specially effective in the case case of + * idle long-lived connections, when the memory footprint can be reduced significantly. A + * potential reason for setting this value to false is performance, since more + * releases means more allocations, which have a cost. This is effectively a memory-time + * trade-off. However, in most cases the default behavior makes sense. + * + * @param releaseBuffers whether to release buffers + * @return this object + */ + public T withReleaseBuffers(boolean releaseBuffers) { + this.releaseBuffers = releaseBuffers; + return getThis(); + } + + /** + * Whether to wait for TLS close confirmation when executing a local {@link TlsChannel#close()} on + * the channel. If the underlying channel is blocking, setting this to true will + * block (potentially until it times out, or indefinitely) the close operation until the + * counterpart confirms the close on their side (sending a close_notify alert. If the underlying + * channel is non-blocking, setting this parameter to true is ineffective. + * + *

Setting this value to true emulates the behavior of {@link SSLSocket} when used + * in layered mode (and without autoClose). + * + *

Even when this behavior is enabled, the close operation will not propagate any {@link + * IOException} thrown during the TLS close exchange and just proceed to close the underlying + * channel. + * + *

Default is to not wait and close immediately. The proper closing procedure can be initiated + * at any moment using {@link TlsChannel#shutdown()}. + * + * @param waitForCloseConfirmation whether to wait for close confirmation + * @return this object + * @see TlsChannel#shutdown() + */ + public T withWaitForCloseConfirmation(boolean waitForCloseConfirmation) { + this.waitForCloseConfirmation = waitForCloseConfirmation; + return getThis(); + } +} diff --git a/driver-core/src/main/com/mongodb/internal/connection/tlschannel/TlsChannelCallbackException.java b/driver-core/src/main/com/mongodb/internal/connection/tlschannel/TlsChannelCallbackException.java new file mode 100644 index 00000000000..fa45b1a557a --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/connection/tlschannel/TlsChannelCallbackException.java @@ -0,0 +1,34 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Original Work: MIT License, Copyright (c) [2015-2020] all contributors + * https://github.com/marianobarrios/tls-channel + */ + +package com.mongodb.internal.connection.tlschannel; + +import javax.net.ssl.SSLException; + +/** + * Thrown during {@link TlsChannel} handshake to indicate that a user-supplied function threw an + * exception. + */ +public class TlsChannelCallbackException extends SSLException { + private static final long serialVersionUID = 8491908031320425318L; + + public TlsChannelCallbackException(String message, Throwable throwable) { + super(message, throwable); + } +} diff --git a/driver-core/src/main/com/mongodb/internal/connection/tlschannel/TlsChannelFlowControlException.java b/driver-core/src/main/com/mongodb/internal/connection/tlschannel/TlsChannelFlowControlException.java new file mode 100644 index 00000000000..c0b68320f43 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/connection/tlschannel/TlsChannelFlowControlException.java @@ -0,0 +1,45 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Original Work: MIT License, Copyright (c) [2015-2020] all contributors + * https://github.com/marianobarrios/tls-channel + */ + +package com.mongodb.internal.connection.tlschannel; + +import java.io.IOException; +import java.nio.channels.ByteChannel; + +/** + * Base class for exceptions used to control flow. + * + *

Because exceptions of this class are not used to signal errors, they don't contain stack + * traces, to improve efficiency. + * + *

This class inherits from {@link IOException} as a compromise to allow {@link TlsChannel} to + * throw it while still implementing the {@link ByteChannel} interface. + */ +public abstract class TlsChannelFlowControlException extends IOException { + private static final long serialVersionUID = -2394919487958591959L; + + public TlsChannelFlowControlException() { + } + + /** For efficiency, override this method to do nothing. */ + @Override + public Throwable fillInStackTrace() { + return this; + } +} diff --git a/driver-core/src/main/com/mongodb/internal/connection/tlschannel/TrackingAllocator.java b/driver-core/src/main/com/mongodb/internal/connection/tlschannel/TrackingAllocator.java new file mode 100644 index 00000000000..6b516aa5f0c --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/connection/tlschannel/TrackingAllocator.java @@ -0,0 +1,83 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Original Work: MIT License, Copyright (c) [2015-2020] all contributors + * https://github.com/marianobarrios/tls-channel + */ + +package com.mongodb.internal.connection.tlschannel; + +import java.nio.ByteBuffer; +import java.util.concurrent.atomic.AtomicLong; +import java.util.concurrent.atomic.LongAccumulator; +import java.util.concurrent.atomic.LongAdder; + +/** A decorating {@link BufferAllocator} that keeps statistics. */ +public class TrackingAllocator implements BufferAllocator { + + private final BufferAllocator impl; + + private final LongAdder bytesAllocatedAdder = new LongAdder(); + private final LongAdder bytesDeallocatedAdder = new LongAdder(); + private final AtomicLong currentAllocationSize = new AtomicLong(); + private final LongAccumulator maxAllocationSizeAcc = new LongAccumulator(Math::max, 0); + + private final LongAdder buffersAllocatedAdder = new LongAdder(); + private final LongAdder buffersDeallocatedAdder = new LongAdder(); + + public TrackingAllocator(BufferAllocator impl) { + this.impl = impl; + } + + public ByteBuffer allocate(int size) { + bytesAllocatedAdder.add(size); + currentAllocationSize.addAndGet(size); + buffersAllocatedAdder.increment(); + return impl.allocate(size); + } + + public void free(ByteBuffer buffer) { + int size = buffer.capacity(); + bytesDeallocatedAdder.add(size); + maxAllocationSizeAcc.accumulate(currentAllocationSize.longValue()); + currentAllocationSize.addAndGet(-size); + buffersDeallocatedAdder.increment(); + impl.free(buffer); + } + + public long bytesAllocated() { + return bytesAllocatedAdder.longValue(); + } + + public long bytesDeallocated() { + return bytesDeallocatedAdder.longValue(); + } + + public long currentAllocation() { + return currentAllocationSize.longValue(); + } + + public long maxAllocation() { + return maxAllocationSizeAcc.longValue(); + } + + public long buffersAllocated() { + return buffersAllocatedAdder.longValue(); + } + + public long buffersDeallocated() { + return buffersDeallocatedAdder.longValue(); + } +} diff --git a/driver-core/src/main/com/mongodb/internal/connection/tlschannel/WouldBlockException.java b/driver-core/src/main/com/mongodb/internal/connection/tlschannel/WouldBlockException.java new file mode 100644 index 00000000000..4e95b599ccf --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/connection/tlschannel/WouldBlockException.java @@ -0,0 +1,28 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Original Work: MIT License, Copyright (c) [2015-2020] all contributors + * https://github.com/marianobarrios/tls-channel + */ + +package com.mongodb.internal.connection.tlschannel; + +/** + * Signals that some IO operation cannot continue because the channel is in non-blocking mode and + * some blocking would otherwise happen. + */ +public class WouldBlockException extends TlsChannelFlowControlException { + private static final long serialVersionUID = -1881728208118024998L; +} diff --git a/driver-core/src/main/com/mongodb/internal/connection/tlschannel/async/AsynchronousTlsChannel.java b/driver-core/src/main/com/mongodb/internal/connection/tlschannel/async/AsynchronousTlsChannel.java new file mode 100644 index 00000000000..04114318f92 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/connection/tlschannel/async/AsynchronousTlsChannel.java @@ -0,0 +1,284 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Original Work: MIT License, Copyright (c) [2015-2020] all contributors + * https://github.com/marianobarrios/tls-channel + */ + +package com.mongodb.internal.connection.tlschannel.async; + +import com.mongodb.internal.connection.tlschannel.TlsChannel; +import com.mongodb.internal.connection.tlschannel.async.AsynchronousTlsChannelGroup.RegisteredSocket; +import com.mongodb.internal.connection.tlschannel.impl.ByteBufferSet; + +import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.channels.AsynchronousByteChannel; +import java.nio.channels.ClosedChannelException; +import java.nio.channels.CompletionHandler; +import java.nio.channels.SocketChannel; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.Future; +import java.util.concurrent.TimeUnit; + +import static com.mongodb.internal.connection.tlschannel.async.AsynchronousTlsChannelGroup.ReadOperation; +import static com.mongodb.internal.connection.tlschannel.async.AsynchronousTlsChannelGroup.WriteOperation; + +/** An {@link AsynchronousByteChannel} that works using {@link TlsChannel}s. */ +public class AsynchronousTlsChannel implements ExtendedAsynchronousByteChannel { + + private class FutureReadResult extends CompletableFuture { + ReadOperation op; + + @Override + public boolean cancel(boolean mayInterruptIfRunning) { + super.cancel(mayInterruptIfRunning); + return group.doCancelRead(registeredSocket, op); + } + } + + private class FutureWriteResult extends CompletableFuture { + WriteOperation op; + + @Override + public boolean cancel(boolean mayInterruptIfRunning) { + super.cancel(mayInterruptIfRunning); + return group.doCancelWrite(registeredSocket, op); + } + } + + private final AsynchronousTlsChannelGroup group; + private final TlsChannel tlsChannel; + private final RegisteredSocket registeredSocket; + + /** + * Initializes a new instance of this class. + * + * @param channelGroup group to associate new new channel to + * @param tlsChannel existing TLS channel to be used asynchronously + * @param socketChannel underlying socket + * @throws ClosedChannelException if any of the underlying channels are closed. + * @throws IllegalArgumentException is the socket is in blocking mode + */ + public AsynchronousTlsChannel( + AsynchronousTlsChannelGroup channelGroup, TlsChannel tlsChannel, SocketChannel socketChannel) + throws ClosedChannelException, IllegalArgumentException { + if (!tlsChannel.isOpen() || !socketChannel.isOpen()) { + throw new ClosedChannelException(); + } + if (socketChannel.isBlocking()) { + throw new IllegalArgumentException("socket channel must be in non-blocking mode"); + } + this.group = channelGroup; + this.tlsChannel = tlsChannel; + this.registeredSocket = channelGroup.registerSocket(tlsChannel, socketChannel); + } + + @Override + public void read(ByteBuffer dst, A attach, CompletionHandler handler) { + checkReadOnly(dst); + if (!dst.hasRemaining()) { + completeWithZeroInt(attach, handler); + return; + } + group.startRead( + registeredSocket, + new ByteBufferSet(dst), + 0, + TimeUnit.MILLISECONDS, + c -> group.submit(() -> handler.completed((int) c, attach)), + e -> group.submit(() -> handler.failed(e, attach))); + } + + @Override + public void read( + ByteBuffer dst, + long timeout, + TimeUnit unit, + A attach, + CompletionHandler handler) { + checkReadOnly(dst); + if (!dst.hasRemaining()) { + completeWithZeroInt(attach, handler); + return; + } + group.startRead( + registeredSocket, + new ByteBufferSet(dst), + timeout, + unit, + c -> group.submit(() -> handler.completed((int) c, attach)), + e -> group.submit(() -> handler.failed(e, attach))); + } + + @Override + public void read( + ByteBuffer[] dsts, + int offset, + int length, + long timeout, + TimeUnit unit, + A attach, + CompletionHandler handler) { + ByteBufferSet bufferSet = new ByteBufferSet(dsts, offset, length); + if (bufferSet.isReadOnly()) { + throw new IllegalArgumentException("buffer is read-only"); + } + if (!bufferSet.hasRemaining()) { + completeWithZeroLong(attach, handler); + return; + } + group.startRead( + registeredSocket, + bufferSet, + timeout, + unit, + c -> group.submit(() -> handler.completed(c, attach)), + e -> group.submit(() -> handler.failed(e, attach))); + } + + @Override + public Future read(ByteBuffer dst) { + checkReadOnly(dst); + if (!dst.hasRemaining()) { + return CompletableFuture.completedFuture(0); + } + FutureReadResult future = new FutureReadResult(); + ReadOperation op = + group.startRead( + registeredSocket, + new ByteBufferSet(dst), + 0, + TimeUnit.MILLISECONDS, + c -> future.complete((int) c), + future::completeExceptionally); + future.op = op; + return future; + } + + private void checkReadOnly(ByteBuffer dst) { + if (dst.isReadOnly()) { + throw new IllegalArgumentException("buffer is read-only"); + } + } + + @Override + public void write(ByteBuffer src, A attach, CompletionHandler handler) { + if (!src.hasRemaining()) { + completeWithZeroInt(attach, handler); + return; + } + group.startWrite( + registeredSocket, + new ByteBufferSet(src), + 0, + TimeUnit.MILLISECONDS, + c -> group.submit(() -> handler.completed((int) c, attach)), + e -> group.submit(() -> handler.failed(e, attach))); + } + + @Override + public void write( + ByteBuffer src, + long timeout, + TimeUnit unit, + A attach, + CompletionHandler handler) { + if (!src.hasRemaining()) { + completeWithZeroInt(attach, handler); + return; + } + group.startWrite( + registeredSocket, + new ByteBufferSet(src), + timeout, + unit, + c -> group.submit(() -> handler.completed((int) c, attach)), + e -> group.submit(() -> handler.failed(e, attach))); + } + + @Override + public void write( + ByteBuffer[] srcs, + int offset, + int length, + long timeout, + TimeUnit unit, + A attach, + CompletionHandler handler) { + ByteBufferSet bufferSet = new ByteBufferSet(srcs, offset, length); + if (!bufferSet.hasRemaining()) { + completeWithZeroLong(attach, handler); + return; + } + group.startWrite( + registeredSocket, + bufferSet, + timeout, + unit, + c -> group.submit(() -> handler.completed(c, attach)), + e -> group.submit(() -> handler.failed(e, attach))); + } + + @Override + public Future write(ByteBuffer src) { + if (!src.hasRemaining()) { + return CompletableFuture.completedFuture(0); + } + FutureWriteResult future = new FutureWriteResult(); + WriteOperation op = + group.startWrite( + registeredSocket, + new ByteBufferSet(src), + 0, + TimeUnit.MILLISECONDS, + c -> future.complete((int) c), + future::completeExceptionally); + future.op = op; + return future; + } + + private void completeWithZeroInt(A attach, CompletionHandler handler) { + group.submit(() -> handler.completed(0, attach)); + } + + private void completeWithZeroLong(A attach, CompletionHandler handler) { + group.submit(() -> handler.completed(0L, attach)); + } + + /** + * Tells whether or not this channel is open. + * + * @return true if, and only if, this channel is open + */ + @Override + public boolean isOpen() { + return tlsChannel.isOpen(); + } + + /** + * Closes this channel. + * + *

This method will close the underlying {@link TlsChannel} and also deregister it from its + * group. + * + * @throws IOException If an I/O error occurs + */ + @Override + public void close() throws IOException { + tlsChannel.close(); + registeredSocket.close(); + } +} diff --git a/driver-core/src/main/com/mongodb/internal/connection/tlschannel/async/AsynchronousTlsChannelGroup.java b/driver-core/src/main/com/mongodb/internal/connection/tlschannel/async/AsynchronousTlsChannelGroup.java new file mode 100644 index 00000000000..d9b1420a6e3 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/connection/tlschannel/async/AsynchronousTlsChannelGroup.java @@ -0,0 +1,835 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Original Work: MIT License, Copyright (c) [2015-2020] all contributors + * https://github.com/marianobarrios/tls-channel + */ + +package com.mongodb.internal.connection.tlschannel.async; + +import com.mongodb.internal.connection.tlschannel.NeedsReadException; +import com.mongodb.internal.connection.tlschannel.NeedsTaskException; +import com.mongodb.internal.connection.tlschannel.NeedsWriteException; +import com.mongodb.internal.connection.tlschannel.TlsChannel; +import com.mongodb.internal.connection.tlschannel.impl.ByteBufferSet; +import com.mongodb.internal.connection.tlschannel.util.Util; +import com.mongodb.internal.diagnostics.logging.Logger; +import com.mongodb.internal.diagnostics.logging.Loggers; +import com.mongodb.lang.Nullable; + +import java.io.IOException; +import java.nio.channels.CancelledKeyException; +import java.nio.channels.ClosedChannelException; +import java.nio.channels.InterruptedByTimeoutException; +import java.nio.channels.ReadPendingException; +import java.nio.channels.SelectionKey; +import java.nio.channels.Selector; +import java.nio.channels.ShutdownChannelGroupException; +import java.nio.channels.SocketChannel; +import java.nio.channels.WritePendingException; +import java.util.Iterator; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentLinkedQueue; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Future; +import java.util.concurrent.LinkedBlockingQueue; +import java.util.concurrent.ScheduledThreadPoolExecutor; +import java.util.concurrent.ThreadPoolExecutor; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.LongAdder; +import java.util.concurrent.locks.Lock; +import java.util.concurrent.locks.ReentrantLock; +import java.util.function.Consumer; +import java.util.function.LongConsumer; + +import static com.mongodb.internal.thread.InterruptionUtil.interruptAndCreateMongoInterruptedException; +import static java.lang.String.format; + +/** + * This class encapsulates the infrastructure for running {@link AsynchronousTlsChannel}s. Each + * instance of this class is a singleton-like object that manages a thread pool that makes it + * possible to run a group of asynchronous channels. + */ +public class AsynchronousTlsChannelGroup { + + private static final Logger LOGGER = Loggers.getLogger("connection.tls"); + + /** The main executor of the group has a queue, whose size is a multiple of the number of CPUs. */ + private static final int queueLengthMultiplier = 32; + + private static final AtomicInteger globalGroupCount = new AtomicInteger(); + + class RegisteredSocket { + + final TlsChannel tlsChannel; + final SocketChannel socketChannel; + + /** + * Used to wait until the channel is effectively in the selector (which happens asynchronously + * to the initial registration. + */ + final CountDownLatch registered = new CountDownLatch(1); + + SelectionKey key; + + /** Protects {@link #readOperation} reference and instance. */ + final Lock readLock = new ReentrantLock(); + + /** Protects {@link #writeOperation} reference and instance. */ + final Lock writeLock = new ReentrantLock(); + + /** Current read operation, in not null */ + ReadOperation readOperation; + + /** Current write operation, if not null */ + WriteOperation writeOperation; + + /** Bitwise union of pending operation to be registered in the selector */ + final AtomicInteger pendingOps = new AtomicInteger(); + + RegisteredSocket(TlsChannel tlsChannel, SocketChannel socketChannel) { + this.tlsChannel = tlsChannel; + this.socketChannel = socketChannel; + } + + public void close() { + if (key != null) { + key.cancel(); + } + /* + * Actual de-registration from the selector will happen asynchronously. + */ + selector.wakeup(); + } + } + + private abstract static class Operation { + final ByteBufferSet bufferSet; + final LongConsumer onSuccess; + final Consumer onFailure; + Future timeoutFuture; + + Operation(ByteBufferSet bufferSet, LongConsumer onSuccess, Consumer onFailure) { + this.bufferSet = bufferSet; + this.onSuccess = onSuccess; + this.onFailure = onFailure; + } + } + + static final class ReadOperation extends Operation { + ReadOperation(ByteBufferSet bufferSet, LongConsumer onSuccess, Consumer onFailure) { + super(bufferSet, onSuccess, onFailure); + } + } + + static final class WriteOperation extends Operation { + + /** + * Because a write operation can flag a block (needs read/write) even after the source buffer + * was read from, we need to accumulate consumed bytes. + */ + long consumesBytes = 0; + + WriteOperation(ByteBufferSet bufferSet, LongConsumer onSuccess, Consumer onFailure) { + super(bufferSet, onSuccess, onFailure); + } + } + + private final int id = globalGroupCount.getAndIncrement(); + + /** + * With the intention of being spacer with warnings, use this flag to ensure that we only log the + * warning about needed task once. + */ + private final AtomicBoolean loggedTaskWarning = new AtomicBoolean(); + + private final Selector selector; + + private final ExecutorService executor; + + private final ScheduledThreadPoolExecutor timeoutExecutor = + new ScheduledThreadPoolExecutor( + 1, + runnable -> + new Thread(runnable, format("async-channel-group-%d-timeout-thread", id))); + + private final Thread selectorThread = + new Thread(this::loop, format("async-channel-group-%d-selector", id)); + + private final ConcurrentLinkedQueue pendingRegistrations = + new ConcurrentLinkedQueue<>(); + + private enum Shutdown { + No, + Wait, + Immediate + } + + private volatile Shutdown shutdown = Shutdown.No; + + private final LongAdder selectionCount = new LongAdder(); + + private final LongAdder startedReads = new LongAdder(); + private final LongAdder startedWrites = new LongAdder(); + private final LongAdder successfulReads = new LongAdder(); + private final LongAdder successfulWrites = new LongAdder(); + private final LongAdder failedReads = new LongAdder(); + private final LongAdder failedWrites = new LongAdder(); + private final LongAdder cancelledReads = new LongAdder(); + private final LongAdder cancelledWrites = new LongAdder(); + + private final ConcurrentHashMap registrations = new ConcurrentHashMap<>(); + + private final LongAdder currentReads = new LongAdder(); + private final LongAdder currentWrites = new LongAdder(); + + /** + * Creates an instance of this class. + */ + public AsynchronousTlsChannelGroup(@Nullable final ExecutorService executorService) { + try { + selector = Selector.open(); + } catch (IOException e) { + throw new RuntimeException(e); + } + timeoutExecutor.setRemoveOnCancelPolicy(true); + if (executorService != null) { + this.executor = executorService; + } else { + int nThreads = Runtime.getRuntime().availableProcessors(); + this.executor = new ThreadPoolExecutor( + nThreads, + nThreads, + 0, + TimeUnit.MILLISECONDS, + new LinkedBlockingQueue<>(nThreads * queueLengthMultiplier), + runnable -> new Thread(runnable, format("async-channel-group-%d-handler-executor", id)), + new ThreadPoolExecutor.CallerRunsPolicy()); + } + selectorThread.start(); + } + + void submit(final Runnable r) { + executor.submit(r); + } + + RegisteredSocket registerSocket(TlsChannel reader, SocketChannel socketChannel) { + if (shutdown != Shutdown.No) { + throw new ShutdownChannelGroupException(); + } + RegisteredSocket socket = new RegisteredSocket(reader, socketChannel); + pendingRegistrations.add(socket); + selector.wakeup(); + return socket; + } + + boolean doCancelRead(RegisteredSocket socket, ReadOperation op) { + socket.readLock.lock(); + try { + if (op != socket.readOperation) { + return false; + } + socket.readOperation = null; + cancelledReads.increment(); + currentReads.decrement(); + return true; + } finally { + socket.readLock.unlock(); + } + } + + boolean doCancelWrite(RegisteredSocket socket, WriteOperation op) { + socket.writeLock.lock(); + try { + if (op != socket.writeOperation) { + return false; + } + socket.writeOperation = null; + cancelledWrites.increment(); + currentWrites.decrement(); + return true; + } finally { + socket.writeLock.unlock(); + } + } + + ReadOperation startRead( + RegisteredSocket socket, + ByteBufferSet buffer, + long timeout, + TimeUnit unit, + LongConsumer onSuccess, + Consumer onFailure) + throws ReadPendingException { + checkTerminated(); + Util.assertTrue(buffer.hasRemaining()); + waitForSocketRegistration(socket); + socket.readLock.lock(); + try { + if (socket.readOperation != null) { + throw new ReadPendingException(); + } + ReadOperation op = new ReadOperation(buffer, onSuccess, onFailure); + + startedReads.increment(); + currentReads.increment(); + + if (!registrations.containsKey(socket)) { + op.onFailure.accept(new ClosedChannelException()); + failedReads.increment(); + currentReads.decrement(); + return op; + } + + /* + * we do not try to outsmart the TLS state machine and register for both IO operations for each new socket + * operation + */ + socket.pendingOps.set(SelectionKey.OP_WRITE | SelectionKey.OP_READ); + if (timeout != 0) { + op.timeoutFuture = + timeoutExecutor.schedule( + () -> { + boolean success = doCancelRead(socket, op); + if (success) { + op.onFailure.accept(new InterruptedByTimeoutException()); + } + }, + timeout, + unit); + } + socket.readOperation = op; + } finally { + socket.readLock.unlock(); + } + selector.wakeup(); + return socket.readOperation; + } + + WriteOperation startWrite( + RegisteredSocket socket, + ByteBufferSet buffer, + long timeout, + TimeUnit unit, + LongConsumer onSuccess, + Consumer onFailure) + throws WritePendingException { + checkTerminated(); + Util.assertTrue(buffer.hasRemaining()); + waitForSocketRegistration(socket); + socket.writeLock.lock(); + try { + if (socket.writeOperation != null) { + throw new WritePendingException(); + } + WriteOperation op = new WriteOperation(buffer, onSuccess, onFailure); + + startedWrites.increment(); + currentWrites.increment(); + + if (!registrations.containsKey(socket)) { + op.onFailure.accept(new ClosedChannelException()); + failedWrites.increment(); + currentWrites.decrement(); + return op; + } + + /* + * we do not try to outsmart the TLS state machine and register for both IO operations for each new socket + * operation + */ + socket.pendingOps.set(SelectionKey.OP_WRITE | SelectionKey.OP_READ); + if (timeout != 0) { + op.timeoutFuture = + timeoutExecutor.schedule( + () -> { + boolean success = doCancelWrite(socket, op); + if (success) { + op.onFailure.accept(new InterruptedByTimeoutException()); + } + }, + timeout, + unit); + } + socket.writeOperation = op; + } finally { + socket.writeLock.unlock(); + } + selector.wakeup(); + return socket.writeOperation; + } + + private void checkTerminated() { + if (isTerminated()) { + throw new ShutdownChannelGroupException(); + } + } + + private void waitForSocketRegistration(RegisteredSocket socket) { + try { + socket.registered.await(); + } catch (InterruptedException e) { + throw interruptAndCreateMongoInterruptedException(null, e); + } + } + + private void loop() { + try { + while (shutdown == Shutdown.No + || shutdown == Shutdown.Wait + && (!pendingRegistrations.isEmpty() || !registrations.isEmpty())) { + // most state-changing operations will wake the selector up, however, asynchronous closings + // of the channels won't, so we have to timeout to allow checking those cases + int c = selector.select(100); // block + selectionCount.increment(); + // avoid unnecessary creation of iterator object + if (c > 0) { + Iterator it = selector.selectedKeys().iterator(); + while (it.hasNext()) { + SelectionKey key = it.next(); + it.remove(); + try { + key.interestOps(0); + } catch (CancelledKeyException e) { + // can happen when channels are closed with pending operations + continue; + } + RegisteredSocket socket = (RegisteredSocket) key.attachment(); + processRead(socket); + processWrite(socket); + } + } + registerPendingSockets(); + processPendingInterests(); + checkClosings(); + } + } catch (Throwable e) { + LOGGER.error("error in selector loop", e); + } finally { + executor.shutdown(); + // use shutdownNow to stop delayed tasks + timeoutExecutor.shutdownNow(); + try { + selector.close(); + } catch (IOException e) { + LOGGER.warn("error closing selector: " + e.getMessage()); + } + checkClosings(); + } + } + + private void processPendingInterests() { + for (SelectionKey key : selector.keys()) { + RegisteredSocket socket = (RegisteredSocket) key.attachment(); + int pending = socket.pendingOps.getAndSet(0); + if (pending != 0) { + try { + key.interestOps(key.interestOps() | pending); + } catch (CancelledKeyException e) { + // can happen when channels are closed with pending operations + } + } + } + } + + private void processWrite(RegisteredSocket socket) { + socket.writeLock.lock(); + try { + WriteOperation op = socket.writeOperation; + if (op != null) { + executor.execute( + () -> { + try { + doWrite(socket, op); + } catch (Throwable e) { + LOGGER.error("error in operation", e); + } + }); + } + } finally { + socket.writeLock.unlock(); + } + } + + private void processRead(RegisteredSocket socket) { + socket.readLock.lock(); + try { + ReadOperation op = socket.readOperation; + if (op != null) { + executor.execute( + () -> { + try { + doRead(socket, op); + } catch (Throwable e) { + LOGGER.error("error in operation", e); + } + }); + } + } finally { + socket.readLock.unlock(); + } + } + + private void doWrite(RegisteredSocket socket, WriteOperation op) { + socket.writeLock.lock(); + try { + if (socket.writeOperation != op) { + return; + } + try { + long before = op.bufferSet.remaining(); + try { + writeHandlingTasks(socket, op); + } finally { + long c = before - op.bufferSet.remaining(); + Util.assertTrue(c >= 0); + op.consumesBytes += c; + } + socket.writeOperation = null; + if (op.timeoutFuture != null) { + op.timeoutFuture.cancel(false); + } + op.onSuccess.accept(op.consumesBytes); + successfulWrites.increment(); + currentWrites.decrement(); + } catch (NeedsReadException e) { + socket.pendingOps.accumulateAndGet(SelectionKey.OP_READ, (a, b) -> a | b); + selector.wakeup(); + } catch (NeedsWriteException e) { + socket.pendingOps.accumulateAndGet(SelectionKey.OP_WRITE, (a, b) -> a | b); + selector.wakeup(); + } catch (IOException e) { + if (socket.writeOperation == op) { + socket.writeOperation = null; + } + if (op.timeoutFuture != null) { + op.timeoutFuture.cancel(false); + } + op.onFailure.accept(e); + failedWrites.increment(); + currentWrites.decrement(); + } + } finally { + socket.writeLock.unlock(); + } + } + + /** + * Intended use of the channel group is with sockets that run tasks internally, but out of + * tolerance, run tasks in thread in case the socket does not. + */ + private void writeHandlingTasks(RegisteredSocket socket, WriteOperation op) throws IOException { + while (true) { + try { + socket.tlsChannel.write(op.bufferSet.array, op.bufferSet.offset, op.bufferSet.length); + return; + } catch (NeedsTaskException e) { + warnAboutNeedTask(); + e.getTask().run(); + } + } + } + + private void warnAboutNeedTask() { + if (!loggedTaskWarning.getAndSet(true)) { + LOGGER.warn(format( + "caught %s; channels used in asynchronous groups should run tasks themselves; " + + "although task is being dealt with anyway, consider configuring channels properly", + NeedsTaskException.class.getName())); + } + } + + private void doRead(RegisteredSocket socket, ReadOperation op) { + socket.readLock.lock(); + try { + if (socket.readOperation != op) { + return; + } + try { + Util.assertTrue(op.bufferSet.hasRemaining()); + long c = readHandlingTasks(socket, op); + Util.assertTrue(c > 0 || c == -1); + socket.readOperation = null; + if (op.timeoutFuture != null) { + op.timeoutFuture.cancel(false); + } + op.onSuccess.accept(c); + successfulReads.increment(); + currentReads.decrement(); + } catch (NeedsReadException e) { + socket.pendingOps.accumulateAndGet(SelectionKey.OP_READ, (a, b) -> a | b); + selector.wakeup(); + } catch (NeedsWriteException e) { + socket.pendingOps.accumulateAndGet(SelectionKey.OP_WRITE, (a, b) -> a | b); + selector.wakeup(); + } catch (IOException e) { + if (socket.readOperation == op) { + socket.readOperation = null; + } + if (op.timeoutFuture != null) { + op.timeoutFuture.cancel(false); + } + op.onFailure.accept(e); + failedReads.increment(); + currentReads.decrement(); + } + } finally { + socket.readLock.unlock(); + } + } + + /** @see #writeHandlingTasks */ + private long readHandlingTasks(RegisteredSocket socket, ReadOperation op) throws IOException { + while (true) { + try { + return socket.tlsChannel.read(op.bufferSet.array, op.bufferSet.offset, op.bufferSet.length); + } catch (NeedsTaskException e) { + warnAboutNeedTask(); + e.getTask().run(); + } + } + } + + private void registerPendingSockets() { + RegisteredSocket socket; + while ((socket = pendingRegistrations.poll()) != null) { + try { + socket.key = socket.socketChannel.register(selector, 0, socket); + registrations.put(socket, true); + } catch (ClosedChannelException e) { + // can happen when channels are closed right after creation + } finally { + // decrement the count of the latch even in case of exceptions, so the waiting thread + // is unlocked; it will have to check the result, though + socket.registered.countDown(); + } + } + } + + /** + * Channels that are closed asynchronously are silently removed from selectors. This method will + * check them using the internal catalog and do the proper cleanup. + */ + private void checkClosings() { + for (RegisteredSocket socket : registrations.keySet()) { + if (!socket.key.isValid() || shutdown == Shutdown.Immediate) { + registrations.remove(socket); + failCurrentRead(socket); + failCurrentWrite(socket); + } + } + } + + private void failCurrentRead(RegisteredSocket socket) { + socket.readLock.lock(); + try { + if (socket.readOperation != null) { + socket.readOperation.onFailure.accept(new ClosedChannelException()); + if (socket.readOperation.timeoutFuture != null) { + socket.readOperation.timeoutFuture.cancel(false); + } + socket.readOperation = null; + failedReads.increment(); + currentReads.decrement(); + } + } finally { + socket.readLock.unlock(); + } + } + + private void failCurrentWrite(RegisteredSocket socket) { + socket.writeLock.lock(); + try { + if (socket.writeOperation != null) { + socket.writeOperation.onFailure.accept(new ClosedChannelException()); + if (socket.writeOperation.timeoutFuture != null) { + socket.writeOperation.timeoutFuture.cancel(false); + } + socket.writeOperation = null; + failedWrites.increment(); + currentWrites.decrement(); + } + } finally { + socket.writeLock.unlock(); + } + } + + /** + * Whether either {@link #shutdown()} or {@link #shutdownNow()} have been called. + * + * @return {@code true} if this group has initiated shutdown and {@code false} if the group is + * active + */ + public boolean isShutdown() { + return shutdown != Shutdown.No; + } + + /** + * Starts the shutdown process. New sockets cannot be registered, already registered one continue + * operating normally until they are closed. + */ + public void shutdown() { + shutdown = Shutdown.Wait; + selector.wakeup(); + } + + /** + * Shuts down this channel group immediately. All registered sockets are closed, pending + * operations may or may not finish. + */ + public void shutdownNow() { + shutdown = Shutdown.Immediate; + selector.wakeup(); + } + + /** + * Whether this channel group was shut down, and all pending tasks have drained. + * + * @return whether the channel is terminated + */ + public boolean isTerminated() { + return executor.isTerminated(); + } + + /** + * Blocks until all registers sockets are closed and pending tasks finished execution after a + * shutdown request, or the timeout occurs, or the current thread is interrupted, whichever + * happens first. + * + * @param timeout the maximum time to wait + * @param unit the time unit of the timeout argument + * @return {@code true} if this group terminated and {@code false} if the group elapsed before + * termination + * @throws InterruptedException if interrupted while waiting + */ + public boolean awaitTermination(long timeout, TimeUnit unit) throws InterruptedException { + return executor.awaitTermination(timeout, unit); + } + + long getSelectionCount() { + return selectionCount.longValue(); + } + + /** + * Return the total number of read operations that were started. + * + * @return number of operations + */ + public long getStartedReadCount() { + return startedReads.longValue(); + } + + /** + * Return the total number of write operations that were started. + * + * @return number of operations + */ + public long getStartedWriteCount() { + return startedWrites.longValue(); + } + + /** + * Return the total number of read operations that succeeded. + * + * @return number of operations + */ + public long getSuccessfulReadCount() { + return successfulReads.longValue(); + } + + /** + * Return the total number of write operations that succeeded. + * + * @return number of operations + */ + public long getSuccessfulWriteCount() { + return successfulWrites.longValue(); + } + + /** + * Return the total number of read operations that failed. + * + * @return number of operations + */ + public long getFailedReadCount() { + return failedReads.longValue(); + } + + /** + * Return the total number of write operations that failed. + * + * @return number of operations + */ + public long getFailedWriteCount() { + return failedWrites.longValue(); + } + + /** + * Return the total number of read operations that were cancelled. + * + * @return number of operations + */ + public long getCancelledReadCount() { + return cancelledReads.longValue(); + } + + /** + * Return the total number of write operations that were cancelled. + * + * @return number of operations + */ + public long getCancelledWriteCount() { + return cancelledWrites.longValue(); + } + + /** + * Returns the current number of active read operations. + * + * @return number of operations + */ + public long getCurrentReadCount() { + return currentReads.longValue(); + } + + /** + * Returns the current number of active write operations. + * + * @return number of operations + */ + public long getCurrentWriteCount() { + return currentWrites.longValue(); + } + + /** + * Returns the current number of registered sockets. + * + * @return number of sockets + */ + public long getCurrentRegistrationCount() { + return registrations.mappingCount(); + } + + /** + * Returns the timeout executor used by this channel group. + * + * @return the timeout executor + */ + public ScheduledThreadPoolExecutor getTimeoutExecutor() { + return timeoutExecutor; + } +} diff --git a/driver-core/src/main/com/mongodb/internal/connection/tlschannel/async/ExtendedAsynchronousByteChannel.java b/driver-core/src/main/com/mongodb/internal/connection/tlschannel/async/ExtendedAsynchronousByteChannel.java new file mode 100644 index 00000000000..63abc8e66b8 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/connection/tlschannel/async/ExtendedAsynchronousByteChannel.java @@ -0,0 +1,237 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Original Work: MIT License, Copyright (c) [2015-2020] all contributors + * https://github.com/marianobarrios/tls-channel + */ + +package com.mongodb.internal.connection.tlschannel.async; + +import java.nio.ByteBuffer; +import java.nio.channels.AsynchronousByteChannel; +import java.nio.channels.CompletionHandler; +import java.nio.channels.InterruptedByTimeoutException; +import java.nio.channels.ReadPendingException; +import java.nio.channels.ShutdownChannelGroupException; +import java.nio.channels.WritePendingException; +import java.util.concurrent.TimeUnit; + +/** + * This interface extends {@link AsynchronousByteChannel} adding optional timeouts and scattering + * and gathering methods. These additions are analogous to the ones made by {@link + * java.nio.channels.AsynchronousSocketChannel}. + */ +public interface ExtendedAsynchronousByteChannel extends AsynchronousByteChannel { + + /** + * Reads a sequence of bytes from this channel into the given buffer. + * + *

This method initiates an asynchronous read operation to read a sequence of bytes from this + * channel into the given buffer. The {@code handler} parameter is a completion handler that is + * invoked when the read operation completes (or fails). The result passed to the completion + * handler is the number of bytes read or {@code -1} if no bytes could be read because the channel + * has reached end-of-stream. + * + *

If a timeout is specified and the timeout elapses before the operation completes then the + * operation completes with the exception {@link InterruptedByTimeoutException}. Where a timeout + * occurs, and the implementation cannot guarantee that bytes have not been read, or will not be + * read from the channel into the given buffer, then further attempts to read from the channel + * will cause an unspecific runtime exception to be thrown. + * + *

Otherwise this method works in the same manner as the {@link + * AsynchronousByteChannel#read(ByteBuffer,Object,CompletionHandler)} method. + * + * @param The type for the object to the attached to the operation + * @param dst The buffer into which bytes are to be transferred + * @param timeout The maximum time for the I/O operation to complete + * @param unit The time unit of the {@code timeout} argument + * @param attach The object to attach to the I/O operation; can be {@code null} + * @param handler The handler for consuming the result + * @throws IllegalArgumentException If the buffer is read-only + * @throws ReadPendingException If a read operation is already in progress on this channel + * @throws ShutdownChannelGroupException If the channel group has terminated + */ + void read( + ByteBuffer dst, + long timeout, + TimeUnit unit, + A attach, + CompletionHandler handler); + + /** + * Reads a sequence of bytes from this channel into a subsequence of the given buffers. This + * operation, sometimes called a scattering read, is often useful when implementing + * network protocols that group data into segments consisting of one or more fixed-length headers + * followed by a variable-length body. The {@code handler} parameter is a completion handler that + * is invoked when the read operation completes (or fails). The result passed to the completion + * handler is the number of bytes read or {@code -1} if no bytes could be read because the channel + * has reached end-of-stream. + * + *

This method initiates a read of up to r bytes from this channel, where r is + * the total number of bytes remaining in the specified subsequence of the given buffer array, + * that is, + * + *

+ * + *
+   * dsts[offset].remaining()
+   *     + dsts[offset+1].remaining()
+   *     + ... + dsts[offset+length-1].remaining()
+ * + *
+ * + * at the moment that the read is attempted. + * + *

Suppose that a byte sequence of length n is read, where 0  + * < n <= r. Up to the first + * dsts[offset].remaining() bytes of this sequence are transferred into buffer + * dsts[offset], up to the next dsts[offset+1].remaining() bytes are + * transferred into buffer dsts[offset+1], and so forth, until the entire byte + * sequence is transferred into the given buffers. As many bytes as possible are transferred into + * each buffer, hence the final position of each updated buffer, except the last updated buffer, + * is guaranteed to be equal to that buffer's limit. The underlying operating system may impose a + * limit on the number of buffers that may be used in an I/O operation. Where the number of + * buffers (with bytes remaining), exceeds this limit, then the I/O operation is performed with + * the maximum number of buffers allowed by the operating system. + * + *

If a timeout is specified and the timeout elapses before the operation completes then it + * completes with the exception {@link InterruptedByTimeoutException}. Where a timeout occurs, and + * the implementation cannot guarantee that bytes have not been read, or will not be read from the + * channel into the given buffers, then further attempts to read from the channel will cause an + * unspecific runtime exception to be thrown. + * + * @param The type for the object to the attached to the operation + * @param dsts The buffers into which bytes are to be transferred + * @param offset The offset within the buffer array of the first buffer into which bytes are to be + * transferred; must be non-negative and no larger than {@code dsts.length} + * @param length The maximum number of buffers to be accessed; must be non-negative and no larger + * than {@code dsts.length - offset} + * @param timeout The maximum time for the I/O operation to complete + * @param unit The time unit of the {@code timeout} argument + * @param attach The object to attach to the I/O operation; can be {@code null} + * @param handler The handler for consuming the result + * @throws IndexOutOfBoundsException If the pre-conditions for the {@code offset} and {@code + * length} parameter aren't met + * @throws IllegalArgumentException If the buffer is read-only + * @throws ReadPendingException If a read operation is already in progress on this channel + * @throws ShutdownChannelGroupException If the channel group has terminated + */ + void read( + ByteBuffer[] dsts, + int offset, + int length, + long timeout, + TimeUnit unit, + A attach, + CompletionHandler handler); + + /** + * Writes a sequence of bytes to this channel from the given buffer. + * + *

This method initiates an asynchronous write operation to write a sequence of bytes to this + * channel from the given buffer. The {@code handler} parameter is a completion handler that is + * invoked when the write operation completes (or fails). The result passed to the completion + * handler is the number of bytes written. + * + *

If a timeout is specified and the timeout elapses before the operation completes then it + * completes with the exception {@link InterruptedByTimeoutException}. Where a timeout occurs, and + * the implementation cannot guarantee that bytes have not been written, or will not be written to + * the channel from the given buffer, then further attempts to write to the channel will cause an + * unspecific runtime exception to be thrown. + * + *

Otherwise this method works in the same manner as the {@link + * AsynchronousByteChannel#write(ByteBuffer,Object,CompletionHandler)} method. + * + * @param The type for the object to the attached to the operation + * @param src The buffer from which bytes are to be retrieved + * @param timeout The maximum time for the I/O operation to complete + * @param unit The time unit of the {@code timeout} argument + * @param attach The object to attach to the I/O operation; can be {@code null} + * @param handler The handler for consuming the result + * @throws WritePendingException If a write operation is already in progress on this channel + * @throws ShutdownChannelGroupException If the channel group has terminated + */ + void write( + ByteBuffer src, + long timeout, + TimeUnit unit, + A attach, + CompletionHandler handler); + + /** + * Writes a sequence of bytes to this channel from a subsequence of the given buffers. This + * operation, sometimes called a gathering write, is often useful when implementing + * network protocols that group data into segments consisting of one or more fixed-length headers + * followed by a variable-length body. The {@code handler} parameter is a completion handler that + * is invoked when the write operation completes (or fails). The result passed to the completion + * handler is the number of bytes written. + * + *

This method initiates a write of up to r bytes to this channel, where r is the + * total number of bytes remaining in the specified subsequence of the given buffer array, that + * is, + * + *

+ * + *
+   * srcs[offset].remaining()
+   *     + srcs[offset+1].remaining()
+   *     + ... + srcs[offset+length-1].remaining()
+ * + *
+ * + * at the moment that the write is attempted. + * + *

Suppose that a byte sequence of length n is written, where 0  + * < n <= r. Up to the first + * srcs[offset].remaining() bytes of this sequence are written from buffer + * srcs[offset], up to the next srcs[offset+1].remaining() bytes are written + * from buffer srcs[offset+1], and so forth, until the entire byte sequence is + * written. As many bytes as possible are written from each buffer, hence the final position of + * each updated buffer, except the last updated buffer, is guaranteed to be equal to that buffer's + * limit. The underlying operating system may impose a limit on the number of buffers that may be + * used in an I/O operation. Where the number of buffers (with bytes remaining), exceeds this + * limit, then the I/O operation is performed with the maximum number of buffers allowed by the + * operating system. + * + *

If a timeout is specified and the timeout elapses before the operation completes then it + * completes with the exception {@link InterruptedByTimeoutException}. Where a timeout occurs, and + * the implementation cannot guarantee that bytes have not been written, or will not be written to + * the channel from the given buffers, then further attempts to write to the channel will cause an + * unspecific runtime exception to be thrown. + * + * @param The type for the object to the attached to the operation + * @param srcs The buffers from which bytes are to be retrieved + * @param offset The offset within the buffer array of the first buffer from which bytes are to be + * retrieved; must be non-negative and no larger than {@code srcs.length} + * @param length The maximum number of buffers to be accessed; must be non-negative and no larger + * than {@code srcs.length - offset} + * @param timeout The maximum time for the I/O operation to complete + * @param unit The time unit of the {@code timeout} argument + * @param attach The object to attach to the I/O operation; can be {@code null} + * @param handler The handler for consuming the result + * @throws IndexOutOfBoundsException If the pre-conditions for the {@code offset} and {@code + * length} parameter aren't met + * @throws WritePendingException If a write operation is already in progress on this channel + * @throws ShutdownChannelGroupException If the channel group has terminated + */ + void write( + ByteBuffer[] srcs, + int offset, + int length, + long timeout, + TimeUnit unit, + A attach, + CompletionHandler handler); +} diff --git a/driver-core/src/main/com/mongodb/internal/connection/tlschannel/async/package-info.java b/driver-core/src/main/com/mongodb/internal/connection/tlschannel/async/package-info.java new file mode 100644 index 00000000000..d04052254d4 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/connection/tlschannel/async/package-info.java @@ -0,0 +1,29 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Original Work: MIT License, Copyright (c) [2015-2020] all contributors + * https://github.com/marianobarrios/tls-channel + */ + +/** + * This package enables the usage of TLS Channel as an {@link + * java.nio.channels.AsynchronousByteChannel}. + * + *

This package contains internal functionality that may change at any time. + */ +@Internal +package com.mongodb.internal.connection.tlschannel.async; + +import com.mongodb.annotations.Internal; diff --git a/driver-core/src/main/com/mongodb/internal/connection/tlschannel/impl/BufferHolder.java b/driver-core/src/main/com/mongodb/internal/connection/tlschannel/impl/BufferHolder.java new file mode 100644 index 00000000000..39e196dabbb --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/connection/tlschannel/impl/BufferHolder.java @@ -0,0 +1,167 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Original Work: MIT License, Copyright (c) [2015-2020] all contributors + * https://github.com/marianobarrios/tls-channel + */ + +package com.mongodb.internal.connection.tlschannel.impl; + +import com.mongodb.internal.diagnostics.logging.Logger; +import com.mongodb.internal.diagnostics.logging.Loggers; +import com.mongodb.internal.connection.tlschannel.BufferAllocator; + +import java.nio.Buffer; +import java.nio.ByteBuffer; +import java.util.Optional; + +import static java.lang.String.format; + +public class BufferHolder { + + private static final Logger LOGGER = Loggers.getLogger("connection.tls"); + private static final byte[] zeros = new byte[TlsChannelImpl.maxTlsPacketSize]; + + public final String name; + public final BufferAllocator allocator; + public final boolean plainData; + public final int maxSize; + public final boolean opportunisticDispose; + + public ByteBuffer buffer; + public int lastSize; + + public BufferHolder( + String name, + Optional buffer, + BufferAllocator allocator, + int initialSize, + int maxSize, + boolean plainData, + boolean opportunisticDispose) { + this.name = name; + this.allocator = allocator; + this.buffer = buffer.orElse(null); + this.maxSize = maxSize; + this.plainData = plainData; + this.opportunisticDispose = opportunisticDispose; + this.lastSize = buffer.map(b -> b.capacity()).orElse(initialSize); + } + + public void prepare() { + if (buffer == null) { + buffer = allocator.allocate(lastSize); + } + } + + public boolean release() { + if (opportunisticDispose && buffer.position() == 0) { + return dispose(); + } else { + return false; + } + } + + public boolean dispose() { + if (buffer != null) { + allocator.free(buffer); + buffer = null; + return true; + } else { + return false; + } + } + + public void resize(int newCapacity) { + if (newCapacity > maxSize) + throw new IllegalArgumentException( + format( + "new capacity (%s) bigger than absolute max size (%s)", newCapacity, maxSize)); + if (LOGGER.isTraceEnabled()) { + LOGGER.trace(format( + "resizing buffer %s, increasing from %s to %s (manual sizing)", + name, + buffer.capacity(), + newCapacity)); + } + resizeImpl(newCapacity); + } + + public void enlarge() { + if (buffer.capacity() >= maxSize) { + throw new IllegalStateException( + format( + "%s buffer insufficient despite having capacity of %d", name, buffer.capacity())); + } + int newCapacity = Math.min(buffer.capacity() * 2, maxSize); + if (LOGGER.isTraceEnabled()) { + LOGGER.trace(format( + "enlarging buffer %s, increasing from %s to %s (automatic enlarge)", + name, + buffer.capacity(), + newCapacity)); + } + resizeImpl(newCapacity); + } + + private void resizeImpl(int newCapacity) { + ByteBuffer newBuffer = allocator.allocate(newCapacity); + ((Buffer) buffer).flip(); + newBuffer.put(buffer); + if (plainData) { + zero(); + } + allocator.free(buffer); + buffer = newBuffer; + lastSize = newCapacity; + } + + /** + * Fill with zeros the remaining of the supplied buffer. This method does not change the buffer + * position. + * + *

Typically used for security reasons, with buffers that contains now-unused plaintext. + */ + public void zeroRemaining() { + zero(buffer.position()); + } + + /** + * Fill the buffer with zeros. This method does not change the buffer position. + * + *

Typically used for security reasons, with buffers that contains now-unused plaintext. + */ + public void zero() { + zero(0); + } + + private void zero(final int position) { + ((Buffer) buffer).mark(); + ((Buffer) buffer).position(position); + int size = buffer.remaining(); + int length = Math.min(size, zeros.length); + int offset = 0; + while (length > 0) { + buffer.put(zeros, 0, length); + offset = offset + length; + length = Math.min(size - offset, zeros.length); + } + ((Buffer) buffer).reset(); + } + + public boolean nullOrEmpty() { + return buffer == null || buffer.position() == 0; + } +} diff --git a/driver-core/src/main/com/mongodb/internal/connection/tlschannel/impl/ByteBufferSet.java b/driver-core/src/main/com/mongodb/internal/connection/tlschannel/impl/ByteBufferSet.java new file mode 100644 index 00000000000..cf95a90801b --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/connection/tlschannel/impl/ByteBufferSet.java @@ -0,0 +1,142 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Original Work: MIT License, Copyright (c) [2015-2020] all contributors + * https://github.com/marianobarrios/tls-channel + */ + +package com.mongodb.internal.connection.tlschannel.impl; + +import java.nio.ByteBuffer; +import java.util.Arrays; + +public class ByteBufferSet { + + public final ByteBuffer[] array; + public final int offset; + public final int length; + + public ByteBufferSet(ByteBuffer[] array, int offset, int length) { + if (array == null) throw new NullPointerException(); + if (array.length < offset) throw new IndexOutOfBoundsException(); + if (array.length < offset + length) throw new IndexOutOfBoundsException(); + for (int i = offset; i < offset + length; i++) { + if (array[i] == null) throw new NullPointerException(); + } + this.array = array; + this.offset = offset; + this.length = length; + } + + public ByteBufferSet(ByteBuffer[] array) { + this(array, 0, array.length); + } + + public ByteBufferSet(ByteBuffer buffer) { + this(new ByteBuffer[] {buffer}); + } + + public long remaining() { + long ret = 0; + for (int i = offset; i < offset + length; i++) { + ret += array[i].remaining(); + } + return ret; + } + + public int putRemaining(ByteBuffer from) { + int totalBytes = 0; + for (int i = offset; i < offset + length; i++) { + if (!from.hasRemaining()) break; + ByteBuffer dstBuffer = array[i]; + int bytes = Math.min(from.remaining(), dstBuffer.remaining()); + ByteBufferUtil.copy(from, dstBuffer, bytes); + totalBytes += bytes; + } + return totalBytes; + } + + public ByteBufferSet put(ByteBuffer from, int length) { + if (from.remaining() < length) { + throw new IllegalArgumentException(); + } + if (remaining() < length) { + throw new IllegalArgumentException(); + } + int totalBytes = 0; + for (int i = offset; i < offset + this.length; i++) { + int pending = length - totalBytes; + if (pending == 0) break; + int bytes = Math.min(pending, (int) remaining()); + ByteBuffer dstBuffer = array[i]; + ByteBufferUtil.copy(from, dstBuffer, bytes); + totalBytes += bytes; + } + return this; + } + + public int getRemaining(ByteBuffer dst) { + int totalBytes = 0; + for (int i = offset; i < offset + length; i++) { + if (!dst.hasRemaining()) break; + ByteBuffer srcBuffer = array[i]; + int bytes = Math.min(dst.remaining(), srcBuffer.remaining()); + ByteBufferUtil.copy(srcBuffer, dst, bytes); + totalBytes += bytes; + } + return totalBytes; + } + + public ByteBufferSet get(ByteBuffer dst, int length) { + if (remaining() < length) { + throw new IllegalArgumentException(); + } + if (dst.remaining() < length) { + throw new IllegalArgumentException(); + } + int totalBytes = 0; + for (int i = offset; i < offset + this.length; i++) { + int pending = length - totalBytes; + if (pending == 0) break; + ByteBuffer srcBuffer = array[i]; + int bytes = Math.min(pending, srcBuffer.remaining()); + ByteBufferUtil.copy(srcBuffer, dst, bytes); + totalBytes += bytes; + } + return this; + } + + public boolean hasRemaining() { + return remaining() > 0; + } + + public boolean isReadOnly() { + for (int i = offset; i < offset + length; i++) { + if (array[i].isReadOnly()) return true; + } + return false; + } + + @Override + public String toString() { + return "ByteBufferSet[array=" + + Arrays.toString(array) + + ", offset=" + + offset + + ", length=" + + length + + "]"; + } +} diff --git a/driver-core/src/main/com/mongodb/internal/connection/tlschannel/impl/ByteBufferUtil.java b/driver-core/src/main/com/mongodb/internal/connection/tlschannel/impl/ByteBufferUtil.java new file mode 100644 index 00000000000..d35f61307bc --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/connection/tlschannel/impl/ByteBufferUtil.java @@ -0,0 +1,51 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Original Work: MIT License, Copyright (c) [2015-2020] all contributors + * https://github.com/marianobarrios/tls-channel + */ + +package com.mongodb.internal.connection.tlschannel.impl; + +import java.nio.Buffer; +import java.nio.ByteBuffer; + +public class ByteBufferUtil { + + public static void copy(ByteBuffer src, ByteBuffer dst, int length) { + if (length < 0) { + throw new IllegalArgumentException("negative length"); + } + if (src.remaining() < length) { + throw new IllegalArgumentException( + String.format( + "source buffer does not have enough remaining capacity (%d < %d)", + src.remaining(), length)); + } + if (dst.remaining() < length) { + throw new IllegalArgumentException( + String.format( + "destination buffer does not have enough remaining capacity (%d < %d)", + dst.remaining(), length)); + } + if (length == 0) { + return; + } + ByteBuffer tmp = src.duplicate(); + ((Buffer) tmp).limit(src.position() + length); + dst.put(tmp); + ((Buffer) src).position(src.position() + length); + } +} diff --git a/driver-core/src/main/com/mongodb/internal/connection/tlschannel/impl/TlsChannelImpl.java b/driver-core/src/main/com/mongodb/internal/connection/tlschannel/impl/TlsChannelImpl.java new file mode 100644 index 00000000000..20bc69e81f0 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/connection/tlschannel/impl/TlsChannelImpl.java @@ -0,0 +1,810 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Original Work: MIT License, Copyright (c) [2015-2020] all contributors + * https://github.com/marianobarrios/tls-channel + */ + +package com.mongodb.internal.connection.tlschannel.impl; + +import com.mongodb.internal.connection.tlschannel.NeedsReadException; +import com.mongodb.internal.connection.tlschannel.NeedsTaskException; +import com.mongodb.internal.connection.tlschannel.NeedsWriteException; +import com.mongodb.internal.connection.tlschannel.TlsChannelCallbackException; +import com.mongodb.internal.connection.tlschannel.TrackingAllocator; +import com.mongodb.internal.connection.tlschannel.WouldBlockException; +import com.mongodb.internal.connection.tlschannel.util.Util; +import com.mongodb.internal.diagnostics.logging.Logger; +import com.mongodb.internal.diagnostics.logging.Loggers; + +import javax.net.ssl.SSLEngine; +import javax.net.ssl.SSLEngineResult; +import javax.net.ssl.SSLEngineResult.HandshakeStatus; +import javax.net.ssl.SSLEngineResult.Status; +import javax.net.ssl.SSLException; +import javax.net.ssl.SSLSession; +import java.io.IOException; +import java.nio.Buffer; +import java.nio.ByteBuffer; +import java.nio.channels.ByteChannel; +import java.nio.channels.ClosedChannelException; +import java.nio.channels.ReadableByteChannel; +import java.nio.channels.WritableByteChannel; +import java.util.Optional; +import java.util.concurrent.locks.Lock; +import java.util.concurrent.locks.ReentrantLock; +import java.util.function.Consumer; + +import static java.lang.String.format; +import static javax.net.ssl.SSLEngineResult.HandshakeStatus.NOT_HANDSHAKING; + +public class TlsChannelImpl implements ByteChannel { + + private static final Logger LOGGER = Loggers.getLogger("connection.tls"); + + public static final int buffersInitialSize = 4096; + + /** Official TLS max data size is 2^14 = 16k. Use 1024 more to account for the overhead */ + public static final int maxTlsPacketSize = 17 * 1024; + + private static class UnwrapResult { + public final int bytesProduced; + public final HandshakeStatus lastHandshakeStatus; + public final boolean wasClosed; + + public UnwrapResult(int bytesProduced, HandshakeStatus lastHandshakeStatus, boolean wasClosed) { + this.bytesProduced = bytesProduced; + this.lastHandshakeStatus = lastHandshakeStatus; + this.wasClosed = wasClosed; + } + } + + private static class WrapResult { + public final int bytesConsumed; + public final HandshakeStatus lastHandshakeStatus; + + public WrapResult(int bytesConsumed, HandshakeStatus lastHandshakeStatus) { + this.bytesConsumed = bytesConsumed; + this.lastHandshakeStatus = lastHandshakeStatus; + } + } + + /** Used to signal EOF conditions from the underlying channel */ + public static class EofException extends Exception { + private static final long serialVersionUID = -3859156713994602991L; + + /** For efficiency, override this method to do nothing. */ + @Override + public Throwable fillInStackTrace() { + return this; + } + } + + private final ReadableByteChannel readChannel; + private final WritableByteChannel writeChannel; + private final SSLEngine engine; + private BufferHolder inEncrypted; + private final Consumer initSessionCallback; + + private final boolean runTasks; + private final TrackingAllocator encryptedBufAllocator; + private final TrackingAllocator plainBufAllocator; + private final boolean waitForCloseConfirmation; + + // @formatter:off + public TlsChannelImpl( + ReadableByteChannel readChannel, + WritableByteChannel writeChannel, + SSLEngine engine, + Optional inEncrypted, + Consumer initSessionCallback, + boolean runTasks, + TrackingAllocator plainBufAllocator, + TrackingAllocator encryptedBufAllocator, + boolean releaseBuffers, + boolean waitForCloseConfirmation) { + // @formatter:on + this.readChannel = readChannel; + this.writeChannel = writeChannel; + this.engine = engine; + this.inEncrypted = + inEncrypted.orElseGet( + () -> + new BufferHolder( + "inEncrypted", + Optional.empty(), + encryptedBufAllocator, + buffersInitialSize, + maxTlsPacketSize, + false /* plainData */, + releaseBuffers)); + this.initSessionCallback = initSessionCallback; + this.runTasks = runTasks; + this.plainBufAllocator = plainBufAllocator; + this.encryptedBufAllocator = encryptedBufAllocator; + this.waitForCloseConfirmation = waitForCloseConfirmation; + inPlain = + new BufferHolder( + "inPlain", + Optional.empty(), + plainBufAllocator, + buffersInitialSize, + maxTlsPacketSize, + true /* plainData */, + releaseBuffers); + outEncrypted = + new BufferHolder( + "outEncrypted", + Optional.empty(), + encryptedBufAllocator, + buffersInitialSize, + maxTlsPacketSize, + false /* plainData */, + releaseBuffers); + } + + private final Lock initLock = new ReentrantLock(); + private final Lock readLock = new ReentrantLock(); + private final Lock writeLock = new ReentrantLock(); + + private boolean handshakeStarted = false; + + private volatile boolean handshakeCompleted = false; + + /** + * Whether a IOException was received from the underlying channel or from the {@link SSLEngine}. + */ + private volatile boolean invalid = false; + + /** Whether a close_notify was already sent. */ + private volatile boolean shutdownSent = false; + + /** Whether a close_notify was already received. */ + private volatile boolean shutdownReceived = false; + + // decrypted data from inEncrypted + private BufferHolder inPlain; + + // contains data encrypted to send to the underlying channel + private BufferHolder outEncrypted; + + /** + * Handshake wrap() method calls need a buffer to read from, even when they actually do not read + * anything. + * + *

Note: standard SSLEngine is happy with no buffers, the empty buffer is here to make this + * work with Netty's OpenSSL's wrapper. + */ + private final ByteBufferSet dummyOut = + new ByteBufferSet(new ByteBuffer[] {ByteBuffer.allocate(0)}); + + public Consumer getSessionInitCallback() { + return initSessionCallback; + } + + public TrackingAllocator getPlainBufferAllocator() { + return plainBufAllocator; + } + + public TrackingAllocator getEncryptedBufferAllocator() { + return encryptedBufAllocator; + } + + // read + + public long read(ByteBufferSet dest) throws IOException { + checkReadBuffer(dest); + if (!dest.hasRemaining()) return 0; + handshake(); + readLock.lock(); + try { + if (invalid || shutdownSent) { + throw new ClosedChannelException(); + } + HandshakeStatus handshakeStatus = engine.getHandshakeStatus(); + int bytesToReturn = inPlain.nullOrEmpty() ? 0 :inPlain.buffer.position(); + while (true) { + if (bytesToReturn > 0) { + if (inPlain.nullOrEmpty()) { + return bytesToReturn; + } else { + return transferPendingPlain(dest); + } + } + if (shutdownReceived) { + return -1; + } + Util.assertTrue(inPlain.nullOrEmpty()); + switch (handshakeStatus) { + case NEED_UNWRAP: + case NEED_WRAP: + bytesToReturn = handshake(Optional.of(dest), Optional.of(handshakeStatus)); + handshakeStatus = NOT_HANDSHAKING; + break; + case NOT_HANDSHAKING: + case FINISHED: + UnwrapResult res = readAndUnwrap(Optional.of(dest)); + if (res.wasClosed) { + return -1; + } + bytesToReturn = res.bytesProduced; + handshakeStatus = res.lastHandshakeStatus; + break; + case NEED_TASK: + handleTask(); + handshakeStatus = engine.getHandshakeStatus(); + break; + default: + // Unsupported stage eg: NEED_UNWRAP_AGAIN + return -1; + } + } + } catch (EofException e) { + return -1; + } finally { + readLock.unlock(); + } + } + + private void handleTask() throws NeedsTaskException { + if (runTasks) { + engine.getDelegatedTask().run(); + } else { + throw new NeedsTaskException(engine.getDelegatedTask()); + } + } + + private int transferPendingPlain(ByteBufferSet dstBuffers) { + ((Buffer) inPlain.buffer).flip(); // will read + int bytes = dstBuffers.putRemaining(inPlain.buffer); + inPlain.buffer.compact(); // will write + boolean disposed = inPlain.release(); + if (!disposed) { + inPlain.zeroRemaining(); + } + return bytes; + } + + private UnwrapResult unwrapLoop(Optional dest, HandshakeStatus originalStatus) + throws SSLException { + ByteBufferSet effDest = + dest.orElseGet( + () -> { + inPlain.prepare(); + return new ByteBufferSet(inPlain.buffer); + }); + while (true) { + Util.assertTrue(inPlain.nullOrEmpty()); + SSLEngineResult result = callEngineUnwrap(effDest); + /* + * Note that data can be returned even in case of overflow, in that + * case, just return the data. + */ + if (result.bytesProduced() > 0 + || result.getStatus() == Status.BUFFER_UNDERFLOW + || result.getStatus() == Status.CLOSED + || result.getHandshakeStatus() != originalStatus) { + boolean wasClosed = result.getStatus() == Status.CLOSED; + return new UnwrapResult(result.bytesProduced(), result.getHandshakeStatus(), wasClosed); + } + if (result.getStatus() == Status.BUFFER_OVERFLOW) { + if (dest.isPresent() && effDest == dest.get()) { + /* + * The client-supplier buffer is not big enough. Use the + * internal inPlain buffer, also ensure that it is bigger + * than the too-small supplied one. + */ + inPlain.prepare(); + ensureInPlainCapacity(Math.min(((int) dest.get().remaining()) * 2, maxTlsPacketSize)); + } else { + inPlain.enlarge(); + } + // inPlain changed, re-create the wrapper + effDest = new ByteBufferSet(inPlain.buffer); + } + } + } + + private SSLEngineResult callEngineUnwrap(ByteBufferSet dest) throws SSLException { + ((Buffer) inEncrypted.buffer).flip(); + try { + SSLEngineResult result = + engine.unwrap(inEncrypted.buffer, dest.array, dest.offset, dest.length); + if (LOGGER.isTraceEnabled()) { + LOGGER.trace(format( + "engine.unwrap() result [%s]. Engine status: %s; inEncrypted %s; inPlain: %s", + Util.resultToString(result), + result.getHandshakeStatus(), + inEncrypted, + dest)); + } + return result; + } catch (SSLException e) { + // something bad was received from the underlying channel, we cannot + // continue + invalid = true; + throw e; + } finally { + inEncrypted.buffer.compact(); + } + } + + private int readFromChannel() throws IOException, EofException { + try { + return readFromChannel(readChannel, inEncrypted.buffer); + } catch (WouldBlockException e) { + throw e; + } catch (IOException e) { + invalid = true; + throw e; + } + } + + public static int readFromChannel(ReadableByteChannel readChannel, ByteBuffer buffer) + throws IOException, EofException { + Util.assertTrue(buffer.hasRemaining()); + LOGGER.trace("Reading from channel"); + int c = readChannel.read(buffer); // IO block + if (LOGGER.isTraceEnabled()) { + LOGGER.trace(format("Read from channel; response: %s, buffer: %s", c, buffer)); + } + if (c == -1) { + throw new EofException(); + } + if (c == 0) { + throw new NeedsReadException(); + } + return c; + } + + // write + + public long write(ByteBufferSet source) throws IOException { + /* + * Note that we should enter the write loop even in the case that the source buffer has no remaining bytes, + * as it could be the case, in non-blocking usage, that the user is forced to call write again after the + * underlying channel is available for writing, just to write pending encrypted bytes. + */ + handshake(); + writeLock.lock(); + try { + if (invalid || shutdownSent) { + throw new ClosedChannelException(); + } + return wrapAndWrite(source); + } finally { + writeLock.unlock(); + } + } + + private long wrapAndWrite(ByteBufferSet source) throws IOException { + long bytesToConsume = source.remaining(); + long bytesConsumed = 0; + outEncrypted.prepare(); + try { + while (true) { + writeToChannel(); + if (bytesConsumed == bytesToConsume) return bytesToConsume; + WrapResult res = wrapLoop(source); + bytesConsumed += res.bytesConsumed; + } + } finally { + outEncrypted.release(); + } + } + + private WrapResult wrapLoop(ByteBufferSet source) throws SSLException { + while (true) { + SSLEngineResult result = callEngineWrap(source); + switch (result.getStatus()) { + case OK: + case CLOSED: + return new WrapResult(result.bytesConsumed(), result.getHandshakeStatus()); + case BUFFER_OVERFLOW: + Util.assertTrue(result.bytesConsumed() == 0); + outEncrypted.enlarge(); + break; + case BUFFER_UNDERFLOW: + throw new IllegalStateException(); + } + } + } + + private SSLEngineResult callEngineWrap(ByteBufferSet source) throws SSLException { + try { + SSLEngineResult result = + engine.wrap(source.array, source.offset, source.length, outEncrypted.buffer); + if (LOGGER.isTraceEnabled()) { + LOGGER.trace(format( + "engine.wrap() result: [%s]; engine status: %s; srcBuffer: %s, outEncrypted: %s", + Util.resultToString(result), + result.getHandshakeStatus(), + source, + outEncrypted)); + } + return result; + } catch (SSLException e) { + invalid = true; + throw e; + } + } + + private void ensureInPlainCapacity(int newCapacity) { + if (inPlain.buffer.capacity() < newCapacity) { + if (LOGGER.isTraceEnabled()) { + LOGGER.trace(format( + "inPlain buffer too small, increasing from %s to %s", + inPlain.buffer.capacity(), + newCapacity)); + } + inPlain.resize(newCapacity); + } + } + + private void writeToChannel() throws IOException { + if (outEncrypted.buffer.position() == 0) { + return; + } + ((Buffer) outEncrypted.buffer).flip(); + try { + try { + writeToChannel(writeChannel, outEncrypted.buffer); + } catch (WouldBlockException e) { + throw e; + } catch (IOException e) { + invalid = true; + throw e; + } + } finally { + outEncrypted.buffer.compact(); + } + } + + private static void writeToChannel(WritableByteChannel channel, ByteBuffer src) + throws IOException { + while (src.hasRemaining()) { + if (LOGGER.isTraceEnabled()) { + LOGGER.trace("Writing to channel: " + src); + } + int c = channel.write(src); + if (c == 0) { + /* + * If no bytesProduced were written, it means that the socket is + * non-blocking and needs more buffer space, so stop the loop + */ + throw new NeedsWriteException(); + } + // blocking SocketChannels can write less than all the bytesProduced + // just before an error the loop forces the exception + } + } + + // handshake and close + + /** + * Force a new negotiation. + * + * @throws IOException if the underlying channel throws an IOException + */ + public void renegotiate() throws IOException { + /* + * Renegotiation was removed in TLS 1.3. We have to do the check at this level because SSLEngine will not + * check that, and just enter into undefined behavior. + */ + // relying in hopefully-robust lexicographic ordering of protocol names + if (engine.getSession().getProtocol().compareTo("TLSv1.3") >= 0) { + throw new SSLException("renegotiation not supported in TLS 1.3 or latter"); + } + try { + doHandshake(true /* force */); + } catch (EofException e) { + throw new ClosedChannelException(); + } + } + + /** + * Do a negotiation if this connection is new and it hasn't been done already. + * + * @throws IOException if the underlying channel throws an IOException + */ + public void handshake() throws IOException { + try { + doHandshake(false /* force */); + } catch (EofException e) { + throw new ClosedChannelException(); + } + } + + private void doHandshake(boolean force) throws IOException, EofException { + if (!force && handshakeCompleted) { + return; + } + initLock.lock(); + try { + if (invalid || shutdownSent) throw new ClosedChannelException(); + if (force || !handshakeCompleted) { + + if (!handshakeStarted) { + engine.beginHandshake(); + LOGGER.trace("Called engine.beginHandshake()"); + + // Some engines that do not support renegotiations may be sensitive to calling + // SSLEngine.beginHandshake() more than once. This guard prevents that. + // See: https://github.com/marianobarrios/tls-channel/issues/197 + handshakeStarted = true; + } + + handshake(Optional.empty(), Optional.empty()); + + handshakeCompleted = true; + + // call client code + try { + initSessionCallback.accept(engine.getSession()); + } catch (Exception e) { + LOGGER.trace("client code threw exception in session initialization callback", e); + throw new TlsChannelCallbackException("session initialization callback failed", e); + } + } + } finally { + initLock.unlock(); + } + } + + private int handshake(Optional dest, Optional handshakeStatus) + throws IOException, EofException { + readLock.lock(); + try { + writeLock.lock(); + try { + if (invalid || shutdownSent) { + throw new ClosedChannelException(); + } + Util.assertTrue(inPlain.nullOrEmpty()); + outEncrypted.prepare(); + try { + writeToChannel(); // IO block + return handshakeLoop(dest, handshakeStatus); + } finally { + outEncrypted.release(); + } + } finally { + writeLock.unlock(); + } + } finally { + readLock.unlock(); + } + } + + private int handshakeLoop(Optional dest, Optional handshakeStatus) + throws IOException, EofException { + Util.assertTrue(inPlain.nullOrEmpty()); + HandshakeStatus status = handshakeStatus.orElseGet(() -> engine.getHandshakeStatus()); + while (true) { + switch (status) { + case NEED_WRAP: + Util.assertTrue(outEncrypted.nullOrEmpty()); + WrapResult wrapResult = wrapLoop(dummyOut); + status = wrapResult.lastHandshakeStatus; + writeToChannel(); // IO block + break; + case NEED_UNWRAP: + UnwrapResult res = readAndUnwrap(dest); + status = res.lastHandshakeStatus; + if (res.bytesProduced > 0) return res.bytesProduced; + break; + case NOT_HANDSHAKING: + /* + * This should not really happen using SSLEngine, because + * handshaking ends with a FINISHED status. However, we accept + * this value to permit the use of a pass-through stub engine + * with no encryption. + */ + return 0; + case NEED_TASK: + handleTask(); + status = engine.getHandshakeStatus(); + break; + case FINISHED: + return 0; + default: + // Unsupported stage eg: NEED_UNWRAP_AGAIN + return 0; + } + } + } + + private UnwrapResult readAndUnwrap(Optional dest) + throws IOException, EofException { + // Save status before operation: use it to stop when status changes + HandshakeStatus orig = engine.getHandshakeStatus(); + inEncrypted.prepare(); + try { + while (true) { + Util.assertTrue(inPlain.nullOrEmpty()); + UnwrapResult res = unwrapLoop(dest, orig); + if (res.bytesProduced > 0 || res.lastHandshakeStatus != orig || res.wasClosed) { + if (res.wasClosed) { + shutdownReceived = true; + } + return res; + } + if (!inEncrypted.buffer.hasRemaining()) { + inEncrypted.enlarge(); + } + readFromChannel(); // IO block + } + } finally { + inEncrypted.release(); + } + } + + public void close() throws IOException { + tryShutdown(); + writeChannel.close(); + readChannel.close(); + /* + * After closing the underlying channels, locks should be taken fast. + */ + readLock.lock(); + try { + writeLock.lock(); + try { + freeBuffers(); + } finally { + writeLock.unlock(); + } + } finally { + readLock.unlock(); + } + } + + private void tryShutdown() { + if (!readLock.tryLock()) return; + try { + if (!writeLock.tryLock()) return; + try { + if (!shutdownSent) { + try { + boolean closed = shutdown(); + if (!closed && waitForCloseConfirmation) { + shutdown(); + } + } catch (Throwable e) { + if (LOGGER.isDebugEnabled()) { + LOGGER.debug("error doing TLS shutdown on close(), continuing: " + e.getMessage()); + } + } + } + } finally { + writeLock.unlock(); + } + } finally { + readLock.unlock(); + } + } + + public boolean shutdown() throws IOException { + readLock.lock(); + try { + writeLock.lock(); + try { + if (invalid) { + throw new ClosedChannelException(); + } + if (!shutdownSent) { + shutdownSent = true; + outEncrypted.prepare(); + try { + writeToChannel(); // IO block + engine.closeOutbound(); + wrapLoop(dummyOut); + writeToChannel(); // IO block + } finally { + outEncrypted.release(); + } + /* + * If this side is the first to send close_notify, then, + * inbound is not done and false should be returned (so the + * client waits for the response). If this side is the + * second, then inbound was already done, and we can return + * true. + */ + if (shutdownReceived) { + freeBuffers(); + } + return shutdownReceived; + } + /* + * If we reach this point, then we just have to read the close + * notification from the client. Only try to do it if necessary, + * to make this method idempotent. + */ + if (!shutdownReceived) { + try { + // IO block + readAndUnwrap(Optional.empty()); + Util.assertTrue(shutdownReceived); + } catch (EofException e) { + throw new ClosedChannelException(); + } + } + freeBuffers(); + return true; + } finally { + writeLock.unlock(); + } + } finally { + readLock.unlock(); + } + } + + private void freeBuffers() { + if (inEncrypted != null) { + inEncrypted.dispose(); + inEncrypted = null; + } + if (inPlain != null) { + inPlain.dispose(); + inPlain = null; + } + if (outEncrypted != null) { + outEncrypted.dispose(); + outEncrypted = null; + } + } + + public boolean isOpen() { + return !invalid && writeChannel.isOpen() && readChannel.isOpen(); + } + + public static void checkReadBuffer(ByteBufferSet dest) { + if (dest.isReadOnly()) throw new IllegalArgumentException(); + } + + public SSLEngine engine() { + return engine; + } + + public boolean getRunTasks() { + return runTasks; + } + + @Override + public int read(ByteBuffer dst) throws IOException { + return (int) read(new ByteBufferSet(dst)); + } + + @Override + public int write(ByteBuffer src) throws IOException { + return (int) write(new ByteBufferSet(src)); + } + + public boolean shutdownReceived() { + return shutdownReceived; + } + + public boolean shutdownSent() { + return shutdownSent; + } + + public ReadableByteChannel plainReadableChannel() { + return readChannel; + } + + public WritableByteChannel plainWritableChannel() { + return writeChannel; + } +} diff --git a/driver-core/src/main/com/mongodb/internal/connection/tlschannel/impl/TlsExplorer.java b/driver-core/src/main/com/mongodb/internal/connection/tlschannel/impl/TlsExplorer.java new file mode 100644 index 00000000000..0f75c6b33f5 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/connection/tlschannel/impl/TlsExplorer.java @@ -0,0 +1,270 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Original Work: MIT License, Copyright (c) [2015-2020] all contributors + * https://github.com/marianobarrios/tls-channel + */ + +package com.mongodb.internal.connection.tlschannel.impl; + +import javax.net.ssl.SNIHostName; +import javax.net.ssl.SNIServerName; +import javax.net.ssl.SSLProtocolException; +import javax.net.ssl.StandardConstants; +import java.nio.Buffer; +import java.nio.BufferUnderflowException; +import java.nio.ByteBuffer; +import java.util.HashMap; +import java.util.Map; + +/* + * Implement basic TLS parsing, just to read the SNI (as this is not done by + * {@link SSLEngine}. + */ +public final class TlsExplorer { + + private TlsExplorer() {} + + /** The header size of TLS/SSL records. */ + public static final int RECORD_HEADER_SIZE = 5; + + /** + * Returns the required number of bytesProduced in the {@code source} {@link ByteBuffer} necessary + * to explore SSL/TLS connection. + * + *

This method tries to parse as few bytesProduced as possible from {@code source} byte buffer + * to get the length of an SSL/TLS record. + * + * @param source source buffer + * @return the required size + */ + public static int getRequiredSize(ByteBuffer source) { + if (source.remaining() < RECORD_HEADER_SIZE) throw new BufferUnderflowException(); + ((Buffer) source).mark(); + try { + byte firstByte = source.get(); + source.get(); // second byte discarded + byte thirdByte = source.get(); + if ((firstByte & 0x80) != 0 && thirdByte == 0x01) { + // looks like a V2ClientHello + return RECORD_HEADER_SIZE; // Only need the header fields + } else { + return (((source.get() & 0xFF) << 8) | (source.get() & 0xFF)) + 5; + } + } finally { + ((Buffer) source).reset(); + } + } + + public static Map explore(ByteBuffer source) throws SSLProtocolException { + if (source.remaining() < RECORD_HEADER_SIZE) throw new BufferUnderflowException(); + ((Buffer) source).mark(); + try { + byte firstByte = source.get(); + ignore(source, 1); // ignore second byte + byte thirdByte = source.get(); + if ((firstByte & 0x80) != 0 && thirdByte == 0x01) { + // looks like a V2ClientHello + return new HashMap<>(); + } else if (firstByte == 22) { + // 22: handshake record + return exploreTLSRecord(source, firstByte); + } else { + throw new SSLProtocolException("Not handshake record"); + } + } finally { + ((Buffer) source).reset(); + } + } + + /* + * struct { uint8 major; uint8 minor; } ProtocolVersion; + * + * enum { change_cipher_spec(20), alert(21), handshake(22), + * application_data(23), (255) } ContentType; + * + * struct { ContentType type; ProtocolVersion version; uint16 length; opaque + * fragment[TLSPlaintext.length]; } TLSPlaintext; + */ + private static Map exploreTLSRecord(ByteBuffer input, byte firstByte) + throws SSLProtocolException { + // Is it a handshake message? + if (firstByte != 22) // 22: handshake record + throw new SSLProtocolException("Not handshake record"); + // Is there enough data for a full record? + int recordLength = getInt16(input); + if (recordLength > input.remaining()) throw new BufferUnderflowException(); + return exploreHandshake(input, recordLength); + } + + /* + * enum { hello_request(0), client_hello(1), server_hello(2), + * certificate(11), server_key_exchange (12), certificate_request(13), + * server_hello_done(14), certificate_verify(15), client_key_exchange(16), + * finished(20) (255) } HandshakeType; + * + * struct { HandshakeType msg_type; uint24 length; select (HandshakeType) { + * case hello_request: HelloRequest; case client_hello: ClientHello; case + * server_hello: ServerHello; case certificate: Certificate; case + * server_key_exchange: ServerKeyExchange; case certificate_request: + * CertificateRequest; case server_hello_done: ServerHelloDone; case + * certificate_verify: CertificateVerify; case client_key_exchange: + * ClientKeyExchange; case finished: Finished; } body; } Handshake; + */ + private static Map exploreHandshake(ByteBuffer input, int recordLength) + throws SSLProtocolException { + // What is the handshake type? + byte handshakeType = input.get(); + if (handshakeType != 0x01) // 0x01: client_hello message + throw new SSLProtocolException("Not initial handshaking"); + // What is the handshake body length? + int handshakeLength = getInt24(input); + // Theoretically, a single handshake message might span multiple + // records, but in practice this does not occur. + if (handshakeLength > recordLength - 4) // 4: handshake header size + throw new SSLProtocolException("Handshake message spans multiple records"); + ((Buffer) input).limit(handshakeLength + input.position()); + return exploreClientHello(input); + } + + /* + * struct { uint32 gmt_unix_time; opaque random_bytes[28]; } Random; + * + * opaque SessionID<0..32>; + * + * uint8 CipherSuite[2]; + * + * enum { null(0), (255) } CompressionMethod; + * + * struct { ProtocolVersion client_version; Random random; SessionID + * session_id; CipherSuite cipher_suites<2..2^16-2>; CompressionMethod + * compression_methods<1..2^8-1>; select (extensions_present) { case false: + * struct {}; case true: Extension extensions<0..2^16-1>; }; } ClientHello; + */ + private static Map exploreClientHello(ByteBuffer input) + throws SSLProtocolException { + ignore(input, 2); // ignore version + ignore(input, 32); // ignore random; 32: the length of Random + ignoreByteVector8(input); // ignore session id + ignoreByteVector16(input); // ignore cipher_suites + ignoreByteVector8(input); // ignore compression methods + if (input.remaining() > 0) return exploreExtensions(input); + else return new HashMap<>(); + } + + /* + * struct { ExtensionType extension_type; opaque extension_data<0..2^16-1>; + * } Extension; + * + * enum { server_name(0), max_fragment_length(1), client_certificate_url(2), + * trusted_ca_keys(3), truncated_hmac(4), status_request(5), (65535) } + * ExtensionType; + */ + private static Map exploreExtensions(ByteBuffer input) + throws SSLProtocolException { + int length = getInt16(input); // length of extensions + while (length > 0) { + int extType = getInt16(input); // extension type + int extLen = getInt16(input); // length of extension data + if (extType == 0x00) { // 0x00: type of server name indication + return exploreSNIExt(input, extLen); + } else { // ignore other extensions + ignore(input, extLen); + } + length -= extLen + 4; + } + return new HashMap<>(); + } + + /* + * struct { NameType name_type; select (name_type) { case host_name: + * HostName; } name; } ServerName; + * + * enum { host_name(0), (255) } NameType; + * + * opaque HostName<1..2^16-1>; + * + * struct { ServerName server_name_list<1..2^16-1> } ServerNameList; + */ + private static Map exploreSNIExt(ByteBuffer input, int extLen) + throws SSLProtocolException { + Map sniMap = new HashMap<>(); + int remains = extLen; + if (extLen >= 2) { // "server_name" extension in ClientHello + int listLen = getInt16(input); // length of server_name_list + if (listLen == 0 || listLen + 2 != extLen) + throw new SSLProtocolException("Invalid server name indication extension"); + remains -= 2; // 2: the length field of server_name_list + while (remains > 0) { + int code = getInt8(input); // name_type + int snLen = getInt16(input); // length field of server name + if (snLen > remains) + throw new SSLProtocolException("Not enough data to fill declared vector size"); + byte[] encoded = new byte[snLen]; + input.get(encoded); + SNIServerName serverName; + if (code == StandardConstants.SNI_HOST_NAME) { + if (encoded.length == 0) + throw new SSLProtocolException("Empty HostName in server name indication"); + serverName = new SNIHostName(encoded); + } else { + serverName = new UnknownServerName(code, encoded); + } + // check for duplicated server name type + if (sniMap.put(serverName.getType(), serverName) != null) + throw new SSLProtocolException("Duplicated server name of type " + serverName.getType()); + remains -= encoded.length + 3; // NameType: 1 byte; HostName; + // length: 2 bytesProduced + } + } else if (extLen == 0) { // "server_name" extension in ServerHello + throw new SSLProtocolException("Not server name indication extension in client"); + } + if (remains != 0) throw new SSLProtocolException("Invalid server name indication extension"); + return sniMap; + } + + private static int getInt8(ByteBuffer input) { + return input.get(); + } + + private static int getInt16(ByteBuffer input) { + return ((input.get() & 0xFF) << 8) | (input.get() & 0xFF); + } + + private static int getInt24(ByteBuffer input) { + return ((input.get() & 0xFF) << 16) | ((input.get() & 0xFF) << 8) | (input.get() & 0xFF); + } + + private static void ignoreByteVector8(ByteBuffer input) { + ignore(input, getInt8(input)); + } + + private static void ignoreByteVector16(ByteBuffer input) { + ignore(input, getInt16(input)); + } + + private static void ignore(ByteBuffer input, int length) { + if (length != 0) { + ((Buffer) input).position(input.position() + length); + } + } + + // For some reason, SNIServerName is abstract + private static class UnknownServerName extends SNIServerName { + UnknownServerName(int code, byte[] encoded) { + super(code, encoded); + } + } +} diff --git a/driver-core/src/main/com/mongodb/internal/connection/tlschannel/package-info.java b/driver-core/src/main/com/mongodb/internal/connection/tlschannel/package-info.java new file mode 100644 index 00000000000..d3b738d2593 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/connection/tlschannel/package-info.java @@ -0,0 +1,35 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Original Work: MIT License, Copyright (c) [2015-2020] all contributors + * https://github.com/marianobarrios/tls-channel + */ + +/** + * TLS Channel is a library that implements a ByteChannel interface to a TLS (Transport Layer + * Security) connection. The library delegates all cryptographic operations to the standard Java TLS + * implementation: SSLEngine; effectively hiding it behind an easy-to-use streaming API, that allows + * to securitize JVM applications with minimal added complexity. + * + *

In other words, a simple library that allows the programmer to have TLS using the same + * standard socket API used for plaintext, just like OpenSSL does for C, only for Java, filling a + * specially painful missing feature of the standard Java library. + * + *

This package contains internal functionality that may change at any time. + */ +@Internal +package com.mongodb.internal.connection.tlschannel; + +import com.mongodb.annotations.Internal; diff --git a/driver-core/src/main/com/mongodb/internal/connection/tlschannel/util/DirectBufferDeallocator.java b/driver-core/src/main/com/mongodb/internal/connection/tlschannel/util/DirectBufferDeallocator.java new file mode 100644 index 00000000000..97863fe6cce --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/connection/tlschannel/util/DirectBufferDeallocator.java @@ -0,0 +1,121 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Original Work: MIT License, Copyright (c) [2015-2020] all contributors + * https://github.com/marianobarrios/tls-channel + */ + +package com.mongodb.internal.connection.tlschannel.util; + +import com.mongodb.internal.diagnostics.logging.Logger; +import com.mongodb.internal.diagnostics.logging.Loggers; + +import java.lang.reflect.Field; +import java.lang.reflect.InvocationTargetException; +import java.lang.reflect.Method; +import java.nio.ByteBuffer; + +/** + * Access to NIO sun.misc.Cleaner, allowing caller to deterministically deallocate a given + * sun.nio.ch.DirectBuffer. + */ +public class DirectBufferDeallocator { + + private static final Logger LOGGER = Loggers.getLogger("connection.tls"); + + private interface Deallocator { + void free(ByteBuffer bb); + } + + private static class Java8Deallocator implements Deallocator { + + /* + * Getting instance of cleaner from buffer (sun.misc.Cleaner) + */ + + final Method cleanerAccessor; + final Method clean; + + Java8Deallocator() { + try { + cleanerAccessor = Class.forName("sun.nio.ch.DirectBuffer").getMethod("cleaner"); + clean = Class.forName("sun.misc.Cleaner").getMethod("clean"); + } catch (NoSuchMethodException | ClassNotFoundException t) { + throw new RuntimeException(t); + } + } + + @Override + public void free(ByteBuffer bb) { + try { + clean.invoke(cleanerAccessor.invoke(bb)); + } catch (IllegalAccessException | InvocationTargetException t) { + throw new RuntimeException(t); + } + } + } + + private static class Java9Deallocator implements Deallocator { + + /* + * Clean is of type jdk.internal.ref.Cleaner, but this type is not accessible, as it is not exported by default. + * Using workaround through sun.misc.Unsafe. + */ + + final Object unsafe; + final Method invokeCleaner; + + Java9Deallocator() { + try { + Class unsafeClass = Class.forName("sun.misc.Unsafe"); + // avoiding getUnsafe methods, as it is explicitly filtered out from reflection API + Field theUnsafe = unsafeClass.getDeclaredField("theUnsafe"); + theUnsafe.setAccessible(true); + unsafe = theUnsafe.get(null); + invokeCleaner = unsafeClass.getMethod("invokeCleaner", ByteBuffer.class); + } catch (NoSuchMethodException + | ClassNotFoundException + | IllegalAccessException + | NoSuchFieldException t) { + throw new RuntimeException(t); + } + } + + @Override + public void free(ByteBuffer bb) { + try { + invokeCleaner.invoke(unsafe, bb); + } catch (IllegalAccessException | InvocationTargetException t) { + throw new RuntimeException(t); + } + } + } + + private final Deallocator deallocator; + + public DirectBufferDeallocator() { + if (Util.getJavaMajorVersion() >= 9) { + deallocator = new Java9Deallocator(); + LOGGER.debug("initialized direct buffer deallocator for java >= 9"); + } else { + deallocator = new Java8Deallocator(); + LOGGER.debug("initialized direct buffer deallocator for java < 9"); + } + } + + public void deallocate(ByteBuffer buffer) { + deallocator.free(buffer); + } +} diff --git a/driver-core/src/main/com/mongodb/internal/connection/tlschannel/util/Util.java b/driver-core/src/main/com/mongodb/internal/connection/tlschannel/util/Util.java new file mode 100644 index 00000000000..0a6e11d4cc4 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/connection/tlschannel/util/Util.java @@ -0,0 +1,71 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Original Work: MIT License, Copyright (c) [2015-2020] all contributors + * https://github.com/marianobarrios/tls-channel + */ + +package com.mongodb.internal.connection.tlschannel.util; + +import com.mongodb.internal.VisibleForTesting; + +import javax.net.ssl.SSLEngineResult; + +import static com.mongodb.internal.VisibleForTesting.AccessModifier.PRIVATE; + +public class Util { + + public static void assertTrue(boolean condition) { + if (!condition) throw new AssertionError(); + } + + /** + * Convert a {@link SSLEngineResult} into a {@link String}, this is needed because the supplied + * method includes a log-breaking newline. + * + * @param result the SSLEngineResult + * @return the resulting string + */ + public static String resultToString(SSLEngineResult result) { + return String.format( + "status=%s,handshakeStatus=%s,bytesConsumed=%d,bytesConsumed=%d", + result.getStatus(), + result.getHandshakeStatus(), + result.bytesProduced(), + result.bytesConsumed()); + } + + public static int getJavaMajorVersion() { + return getJavaMajorVersion(System.getProperty("java.version")); + } + + @VisibleForTesting(otherwise = PRIVATE) + static int getJavaMajorVersion(final String javaVersion) { + String version = javaVersion; + if (version.startsWith("1.")) { + version = version.substring(2); + } + // Allow these formats: + // 1.8.0_72-ea + // 9-ea + // 9 + // 9.0.1 + // 17 + int dotPos = version.indexOf('.'); + int dashPos = version.indexOf('-'); + return Integer.parseInt( + version.substring(0, dotPos > -1 ? dotPos : dashPos > -1 ? dashPos : version.length())); + } +} diff --git a/driver-core/src/main/com/mongodb/internal/diagnostics/logging/Logger.java b/driver-core/src/main/com/mongodb/internal/diagnostics/logging/Logger.java new file mode 100644 index 00000000000..e9907bb3953 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/diagnostics/logging/Logger.java @@ -0,0 +1,164 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.diagnostics.logging; + +/** + * This class is not part of the public API. It may be removed or changed at any time. + * + */ +public interface Logger { + /** + * Return the name of this Logger instance. + * + * @return name of this logger instance + */ + String getName(); + + /** + * Is the logger instance enabled for the TRACE level? + * + * @return True if this Logger is enabled for the TRACE level, false otherwise. + * @since 1.4 + */ + default boolean isTraceEnabled() { + return false; + } + + /** + * Log a message at the TRACE level. + * + * @param msg the message string to be logged + * @since 1.4 + */ + default void trace(String msg) { + } + + /** + * Log an exception (throwable) at the TRACE level with an accompanying message. + * + * @param msg the message accompanying the exception + * @param t the exception (throwable) to log + * @since 1.4 + */ + default void trace(String msg, Throwable t) { + } + + /** + * Is the logger instance enabled for the DEBUG level? + * + * @return True if this Logger is enabled for the DEBUG level, false otherwise. + */ + default boolean isDebugEnabled() { + return false; + } + + /** + * Log a message at the DEBUG level. + * + * @param msg the message string to be logged + */ + default void debug(String msg) { + } + + + /** + * Log an exception (throwable) at the DEBUG level with an accompanying message. + * + * @param msg the message accompanying the exception + * @param t the exception (throwable) to log + */ + default void debug(String msg, Throwable t) { + } + + /** + * Is the logger instance enabled for the INFO level? + * + * @return True if this Logger is enabled for the INFO level, false otherwise. + */ + default boolean isInfoEnabled() { + return false; + } + + /** + * Log a message at the INFO level. + * + * @param msg the message string to be logged + */ + default void info(String msg) { + } + + /** + * Log an exception (throwable) at the INFO level with an accompanying message. + * + * @param msg the message accompanying the exception + * @param t the exception (throwable) to log + */ + default void info(String msg, Throwable t) { + } + + /** + * Is the logger instance enabled for the WARN level? + * + * @return True if this Logger is enabled for the WARN level, false otherwise. + */ + default boolean isWarnEnabled() { + return false; + } + + /** + * Log a message at the WARN level. + * + * @param msg the message string to be logged + */ + default void warn(String msg) { + } + + /** + * Log an exception (throwable) at the WARN level with an accompanying message. + * + * @param msg the message accompanying the exception + * @param t the exception (throwable) to log + */ + default void warn(String msg, Throwable t) { + } + + /** + * Is the logger instance enabled for the ERROR level? + * + * @return True if this Logger is enabled for the ERROR level, false otherwise. + */ + default boolean isErrorEnabled() { + return false; + } + + /** + * Log a message at the ERROR level. + * + * @param msg the message string to be logged + */ + default void error(String msg) { + } + + /** + * Log an exception (throwable) at the ERROR level with an accompanying message. + * + * @param msg the message accompanying the exception + * @param t the exception (throwable) to log + */ + default void error(String msg, Throwable t) { + } +} diff --git a/driver-core/src/main/com/mongodb/internal/diagnostics/logging/Loggers.java b/driver-core/src/main/com/mongodb/internal/diagnostics/logging/Loggers.java new file mode 100644 index 00000000000..175201ee44a --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/diagnostics/logging/Loggers.java @@ -0,0 +1,69 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.diagnostics.logging; + +import static com.mongodb.assertions.Assertions.notNull; + +/** + * This class is not part of the public API. + * + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public final class Loggers { + /** + * The prefix for all logger names. + */ + private static final String PREFIX = "org.mongodb.driver"; + + private static final boolean USE_SLF4J = shouldUseSLF4J(); + + /** + * Gets a logger with the given suffix appended on to {@code PREFIX}, separated by a '.'. + * + * @param suffix the suffix for the logger + * @return the logger + * @see Loggers#PREFIX + */ + public static Logger getLogger(final String suffix) { + notNull("suffix", suffix); + if (suffix.startsWith(".") || suffix.endsWith(".")) { + throw new IllegalArgumentException("The suffix can not start or end with a '.'"); + } + + String name = PREFIX + "." + suffix; + + if (USE_SLF4J) { + return new SLF4JLogger(name); + } else { + return new NoOpLogger(name); + } + } + + private Loggers() { + } + + private static boolean shouldUseSLF4J() { + try { + Class.forName("org.slf4j.Logger"); + return true; + } catch (ClassNotFoundException e) { + java.util.logging.Logger.getLogger(PREFIX) + .warning(String.format("SLF4J not found on the classpath. Logging is disabled for the '%s' component", PREFIX)); + return false; + } + } +} diff --git a/driver-core/src/main/com/mongodb/internal/diagnostics/logging/NoOpLogger.java b/driver-core/src/main/com/mongodb/internal/diagnostics/logging/NoOpLogger.java new file mode 100644 index 00000000000..d04e071bc76 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/diagnostics/logging/NoOpLogger.java @@ -0,0 +1,33 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.diagnostics.logging; + +/** + * A logger that disables all levels and logs nothing + */ +class NoOpLogger implements Logger { + private final String name; + + NoOpLogger(final String name) { + this.name = name; + } + + @Override + public String getName() { + return name; + } +} diff --git a/driver-core/src/main/com/mongodb/internal/diagnostics/logging/SLF4JLogger.java b/driver-core/src/main/com/mongodb/internal/diagnostics/logging/SLF4JLogger.java new file mode 100644 index 00000000000..a3abe49f8ef --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/diagnostics/logging/SLF4JLogger.java @@ -0,0 +1,108 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.diagnostics.logging; + +import org.slf4j.LoggerFactory; + +class SLF4JLogger implements Logger { + + private final org.slf4j.Logger delegate; + + SLF4JLogger(final String name) { + this.delegate = LoggerFactory.getLogger(name); + } + + @Override + public String getName() { + return delegate.getName(); + } + + @Override + public boolean isTraceEnabled() { + return delegate.isTraceEnabled(); + } + + @Override + public void trace(final String msg) { + delegate.trace(msg); + } + + @Override + public void trace(final String msg, final Throwable t) { + delegate.trace(msg, t); + } + + @Override + public boolean isDebugEnabled() { + return delegate.isDebugEnabled(); + } + + @Override + public void debug(final String msg) { + delegate.debug(msg); + } + + @Override + public void debug(final String msg, final Throwable t) { + delegate.debug(msg, t); + } + + @Override + public boolean isInfoEnabled() { + return delegate.isInfoEnabled(); + } + + @Override + public void info(final String msg) { + delegate.info(msg); + } + + @Override + public void info(final String msg, final Throwable t) { + delegate.info(msg, t); + } + + @Override + public boolean isWarnEnabled() { + return delegate.isWarnEnabled(); + } + + @Override + public void warn(final String msg) { + delegate.warn(msg); + } + + @Override + public void warn(final String msg, final Throwable t) { + delegate.warn(msg, t); + } + + @Override + public boolean isErrorEnabled() { + return delegate.isErrorEnabled(); + } + + @Override + public void error(final String msg) { + delegate.error(msg); + } + + @Override + public void error(final String msg, final Throwable t) { + delegate.error(msg, t); + } +} diff --git a/driver-core/src/main/com/mongodb/internal/diagnostics/logging/package-info.java b/driver-core/src/main/com/mongodb/internal/diagnostics/logging/package-info.java new file mode 100644 index 00000000000..44488d7ecaa --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/diagnostics/logging/package-info.java @@ -0,0 +1,25 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * This package contains internal functionality that may change at any time. + */ +@Internal +@NonNullApi +package com.mongodb.internal.diagnostics.logging; + +import com.mongodb.annotations.Internal; +import com.mongodb.lang.NonNullApi; diff --git a/driver-core/src/main/com/mongodb/internal/dns/DefaultDnsResolver.java b/driver-core/src/main/com/mongodb/internal/dns/DefaultDnsResolver.java new file mode 100644 index 00000000000..f7b433b85bd --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/dns/DefaultDnsResolver.java @@ -0,0 +1,151 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.dns; + +import com.mongodb.MongoConfigurationException; +import com.mongodb.lang.Nullable; +import com.mongodb.spi.dns.DnsClient; +import com.mongodb.spi.dns.DnsClientProvider; +import com.mongodb.spi.dns.DnsWithResponseCodeException; + +import java.util.ArrayList; +import java.util.List; +import java.util.ServiceLoader; +import java.util.stream.StreamSupport; + +import static java.lang.String.format; +import static java.util.Arrays.asList; + +/** + * Utility class for resolving SRV and TXT records. + * + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public final class DefaultDnsResolver implements DnsResolver { + + private static final DnsClient DEFAULT_DNS_CLIENT; + + static { + DEFAULT_DNS_CLIENT = StreamSupport.stream(ServiceLoader.load(DnsClientProvider.class).spliterator(), false) + .findFirst() + .map(DnsClientProvider::create) + .orElse(new JndiDnsClient()); + } + + private final DnsClient dnsClient; + + public DefaultDnsResolver() { + this(DEFAULT_DNS_CLIENT); + } + + public DefaultDnsResolver(@Nullable final DnsClient dnsClient) { + this.dnsClient = dnsClient == null ? DEFAULT_DNS_CLIENT : dnsClient; + } + + /* + The format of SRV record is + priority weight port target. + e.g. + 0 5 5060 example.com. + + The priority and weight are ignored, and we just concatenate the host (after removing the ending '.') and port with a + ':' in between, as expected by ServerAddress. + */ + @Override + public List resolveHostFromSrvRecords(final String srvHost, final String srvServiceName) { + List srvHostParts = asList(srvHost.split("\\.")); + + String srvHostDomain; + boolean srvHasLessThanThreeParts = srvHostParts.size() < 3; + if (srvHasLessThanThreeParts) { + srvHostDomain = srvHost; + } else { + srvHostDomain = srvHost.substring(srvHost.indexOf('.') + 1); + } + + List srvHostDomainParts = asList(srvHostDomain.split("\\.")); + List hosts = new ArrayList<>(); + String resourceName = "_" + srvServiceName + "._tcp." + srvHost; + try { + List srvAttributeValues = dnsClient.getResourceRecordData(resourceName, "SRV"); + if (srvAttributeValues == null || srvAttributeValues.isEmpty()) { + throw new MongoConfigurationException(format("No SRV records available for '%s'.", resourceName)); + } + + for (String srvRecord : srvAttributeValues) { + String[] split = srvRecord.split(" "); + String resolvedHost = split[3].endsWith(".") ? split[3].substring(0, split[3].length() - 1) : split[3]; + String resolvedHostDomain = resolvedHost.substring(resolvedHost.indexOf('.') + 1); + List resolvedHostDomainParts = asList(resolvedHostDomain.split("\\.")); + if (!sameDomain(srvHostDomainParts, resolvedHostDomainParts)) { + throw new MongoConfigurationException( + format("The SRV host name '%s' resolved to a host '%s' that does not share domain name", + srvHost, resolvedHost)); + } + if (srvHasLessThanThreeParts && resolvedHostDomainParts.size() <= srvHostDomainParts.size()) { + throw new MongoConfigurationException( + format("The SRV host name '%s' resolved to a host '%s' that does not have at least one more domain level", + srvHost, resolvedHost)); + } + hosts.add(resolvedHost + ":" + split[2]); + } + + } catch (Exception e) { + throw new MongoConfigurationException(format("Failed looking up SRV record for '%s'.", resourceName), e); + } + return hosts; + } + + private static boolean sameDomain(final List srvHostDomainParts, final List resolvedHostDomainParts) { + if (srvHostDomainParts.size() > resolvedHostDomainParts.size()) { + return false; + } + return resolvedHostDomainParts.subList(resolvedHostDomainParts.size() - srvHostDomainParts.size(), resolvedHostDomainParts.size()) + .equals(srvHostDomainParts); + } + + /* + A TXT record is just a string + We require each to be one or more query parameters for a MongoDB connection string. + Here we concatenate TXT records together with a '&' separator as required by connection strings + */ + @Override + public String resolveAdditionalQueryParametersFromTxtRecords(final String host) { + try { + List attributeValues = dnsClient.getResourceRecordData(host, "TXT"); + if (attributeValues == null || attributeValues.isEmpty()) { + return ""; + } + if (attributeValues.size() > 1) { + throw new MongoConfigurationException(format("Multiple TXT records found for host '%s'. Only one is permitted", + host)); + } + // Remove all space characters, as the DNS resolver for TXT records inserts a space character + // between each character-string in a single TXT record. That whitespace is spurious in + // this context and must be removed + return attributeValues.get(0).replaceAll("\\s", ""); + } catch (DnsWithResponseCodeException e) { + // ignore NXDomain error (error code 3, "Non-Existent Domain) + if (e.getResponseCode() != 3) { + throw new MongoConfigurationException("Failed looking up TXT record for host " + host, e); + } + return ""; + } catch (Exception e) { + throw new MongoConfigurationException("Failed looking up TXT record for host " + host, e); + } + } +} diff --git a/driver-core/src/main/com/mongodb/internal/dns/DnsResolver.java b/driver-core/src/main/com/mongodb/internal/dns/DnsResolver.java new file mode 100644 index 00000000000..5ff12c33bc8 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/dns/DnsResolver.java @@ -0,0 +1,31 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.dns; + +import java.util.List; + +/** + * Utility interface for resolving SRV and TXT records. + * + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public interface DnsResolver { + + List resolveHostFromSrvRecords(String srvHost, String srvServiceName); + + String resolveAdditionalQueryParametersFromTxtRecords(String host); +} diff --git a/driver-core/src/main/com/mongodb/internal/dns/JndiDnsClient.java b/driver-core/src/main/com/mongodb/internal/dns/JndiDnsClient.java new file mode 100644 index 00000000000..71df713fb8b --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/dns/JndiDnsClient.java @@ -0,0 +1,88 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.dns; + +import com.mongodb.MongoClientException; +import com.mongodb.spi.dns.DnsClient; +import com.mongodb.spi.dns.DnsException; +import com.mongodb.spi.dns.DnsWithResponseCodeException; + +import javax.naming.Context; +import javax.naming.NameNotFoundException; +import javax.naming.NamingEnumeration; +import javax.naming.NamingException; +import javax.naming.directory.Attribute; +import javax.naming.directory.InitialDirContext; +import java.util.ArrayList; +import java.util.Collections; +import java.util.Hashtable; +import java.util.List; + +/** + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public final class JndiDnsClient implements DnsClient { + + @Override + public List getResourceRecordData(final String name, final String type) throws DnsException { + InitialDirContext dirContext = createDnsDirContext(); + try { + Attribute attribute = dirContext.getAttributes(name, new String[]{type}).get(type); + if (attribute == null) { + return Collections.emptyList(); + } + List attributeValues = new ArrayList<>(); + NamingEnumeration namingEnumeration = attribute.getAll(); + while (namingEnumeration.hasMore()) { + attributeValues.add((String) namingEnumeration.next()); + } + return attributeValues; + } catch (NameNotFoundException e) { + throw new DnsWithResponseCodeException(e.getMessage(), 3, e); + } catch (NamingException e) { + throw new DnsException(e.getMessage(), e); + } finally { + try { + dirContext.close(); + } catch (NamingException e) { + // ignore + } + } + } + + /* + It's unfortunate that we take a runtime dependency on com.sun.jndi.dns.DnsContextFactory. + This is not guaranteed to work on all JVMs but in practice is expected to work on most. + */ + private static InitialDirContext createDnsDirContext() { + Hashtable envProps = new Hashtable<>(); + envProps.put(Context.INITIAL_CONTEXT_FACTORY, "com.sun.jndi.dns.DnsContextFactory"); + + try { + return new InitialDirContext(envProps); + } catch (NamingException e) { + // Just in case the provider url default has been changed to a non-dns pseudo url, fallback to the JDK default + envProps.put(Context.PROVIDER_URL, "dns:"); + try { + return new InitialDirContext(envProps); + } catch (NamingException ex) { + throw new MongoClientException("Unable to support mongodb+srv// style connections as the 'com.sun.jndi.dns.DnsContextFactory' " + + "class is not available in this JRE. A JNDI context is required for resolving SRV records.", e); + } + } + } +} diff --git a/driver-core/src/main/com/mongodb/internal/dns/package-info.java b/driver-core/src/main/com/mongodb/internal/dns/package-info.java new file mode 100644 index 00000000000..e02c7f1ebe8 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/dns/package-info.java @@ -0,0 +1,25 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * This package contains internal functionality that may change at any time. + */ +@Internal +@NonNullApi +package com.mongodb.internal.dns; + +import com.mongodb.annotations.Internal; +import com.mongodb.lang.NonNullApi; diff --git a/driver-core/src/main/com/mongodb/internal/event/ClusterListenerMulticaster.java b/driver-core/src/main/com/mongodb/internal/event/ClusterListenerMulticaster.java new file mode 100644 index 00000000000..4b2698bd4f2 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/event/ClusterListenerMulticaster.java @@ -0,0 +1,82 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.event; + +import com.mongodb.event.ClusterClosedEvent; +import com.mongodb.event.ClusterDescriptionChangedEvent; +import com.mongodb.event.ClusterListener; +import com.mongodb.event.ClusterOpeningEvent; +import com.mongodb.internal.diagnostics.logging.Logger; +import com.mongodb.internal.diagnostics.logging.Loggers; + +import java.util.ArrayList; +import java.util.List; + +import static com.mongodb.assertions.Assertions.isTrue; +import static java.lang.String.format; + + +final class ClusterListenerMulticaster implements ClusterListener { + private static final Logger LOGGER = Loggers.getLogger("cluster.event"); + + private final List clusterListeners; + + ClusterListenerMulticaster(final List clusterListeners) { + isTrue("All ClusterListener instances are non-null", !clusterListeners.contains(null)); + this.clusterListeners = new ArrayList<>(clusterListeners); + } + + @Override + public void clusterOpening(final ClusterOpeningEvent event) { + for (final ClusterListener cur : clusterListeners) { + try { + cur.clusterOpening(event); + } catch (Exception e) { + if (LOGGER.isWarnEnabled()) { + LOGGER.warn(format("Exception thrown raising cluster opening event to listener %s", cur), e); + } + } + } + } + + @Override + public void clusterClosed(final ClusterClosedEvent event) { + for (final ClusterListener cur : clusterListeners) { + try { + cur.clusterClosed(event); + } catch (Exception e) { + if (LOGGER.isWarnEnabled()) { + LOGGER.warn(format("Exception thrown raising cluster closed event to listener %s", cur), e); + } + + } + } + } + + @Override + public void clusterDescriptionChanged(final ClusterDescriptionChangedEvent event) { + for (final ClusterListener cur : clusterListeners) { + try { + cur.clusterDescriptionChanged(event); + } catch (Exception e) { + if (LOGGER.isWarnEnabled()) { + LOGGER.warn(format("Exception thrown raising cluster description changed event to listener %s", cur), e); + } + } + } + } +} diff --git a/driver-core/src/main/com/mongodb/internal/event/CommandListenerMulticaster.java b/driver-core/src/main/com/mongodb/internal/event/CommandListenerMulticaster.java new file mode 100644 index 00000000000..e18318f8cde --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/event/CommandListenerMulticaster.java @@ -0,0 +1,81 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.event; + +import com.mongodb.event.CommandFailedEvent; +import com.mongodb.event.CommandListener; +import com.mongodb.event.CommandStartedEvent; +import com.mongodb.event.CommandSucceededEvent; +import com.mongodb.internal.diagnostics.logging.Logger; +import com.mongodb.internal.diagnostics.logging.Loggers; + +import java.util.ArrayList; +import java.util.List; + +import static com.mongodb.assertions.Assertions.isTrue; +import static java.lang.String.format; + + +final class CommandListenerMulticaster implements CommandListener { + private static final Logger LOGGER = Loggers.getLogger("protocol.event"); + + private final List commandListeners; + + CommandListenerMulticaster(final List commandListeners) { + isTrue("All CommandListener instances are non-null", !commandListeners.contains(null)); + this.commandListeners = new ArrayList<>(commandListeners); + } + + @Override + public void commandStarted(final CommandStartedEvent event) { + for (CommandListener cur : commandListeners) { + try { + cur.commandStarted(event); + } catch (Exception e) { + if (LOGGER.isWarnEnabled()) { + LOGGER.warn(format("Exception thrown raising command started event to listener %s", cur), e); + } + } + } + } + + @Override + public void commandSucceeded(final CommandSucceededEvent event) { + for (CommandListener cur : commandListeners) { + try { + cur.commandSucceeded(event); + } catch (Exception e) { + if (LOGGER.isWarnEnabled()) { + LOGGER.warn(format("Exception thrown raising command succeeded event to listener %s", cur), e); + } + } + } + } + + @Override + public void commandFailed(final CommandFailedEvent event) { + for (CommandListener cur : commandListeners) { + try { + cur.commandFailed(event); + } catch (Exception e) { + if (LOGGER.isWarnEnabled()) { + LOGGER.warn(format("Exception thrown raising command failed event to listener %s", cur), e); + } + } + } + } +} diff --git a/driver-core/src/main/com/mongodb/internal/event/ConnectionPoolListenerMulticaster.java b/driver-core/src/main/com/mongodb/internal/event/ConnectionPoolListenerMulticaster.java new file mode 100644 index 00000000000..5aa5d9fa305 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/event/ConnectionPoolListenerMulticaster.java @@ -0,0 +1,192 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.event; + +import com.mongodb.event.ConnectionCheckOutFailedEvent; +import com.mongodb.event.ConnectionCheckOutStartedEvent; +import com.mongodb.event.ConnectionCheckedInEvent; +import com.mongodb.event.ConnectionCheckedOutEvent; +import com.mongodb.event.ConnectionClosedEvent; +import com.mongodb.event.ConnectionCreatedEvent; +import com.mongodb.event.ConnectionPoolClearedEvent; +import com.mongodb.event.ConnectionPoolClosedEvent; +import com.mongodb.event.ConnectionPoolCreatedEvent; +import com.mongodb.event.ConnectionPoolListener; +import com.mongodb.event.ConnectionPoolReadyEvent; +import com.mongodb.event.ConnectionReadyEvent; +import com.mongodb.internal.diagnostics.logging.Logger; +import com.mongodb.internal.diagnostics.logging.Loggers; + +import java.util.ArrayList; +import java.util.List; + +import static com.mongodb.assertions.Assertions.isTrue; +import static java.lang.String.format; + +final class ConnectionPoolListenerMulticaster implements ConnectionPoolListener { + private static final Logger LOGGER = Loggers.getLogger("protocol.event"); + + private final List connectionPoolListeners; + + ConnectionPoolListenerMulticaster(final List connectionPoolListeners) { + isTrue("All ConnectionPoolListener instances are non-null", !connectionPoolListeners.contains(null)); + this.connectionPoolListeners = new ArrayList<>(connectionPoolListeners); + } + + @Override + public void connectionPoolCreated(final ConnectionPoolCreatedEvent event) { + for (ConnectionPoolListener cur : connectionPoolListeners) { + try { + cur.connectionPoolCreated(event); + } catch (Exception e) { + if (LOGGER.isWarnEnabled()) { + LOGGER.warn(format("Exception thrown raising connection pool created event to listener %s", cur), e); + } + } + } + } + + @Override + public void connectionPoolCleared(final ConnectionPoolClearedEvent event) { + for (ConnectionPoolListener cur : connectionPoolListeners) { + try { + cur.connectionPoolCleared(event); + } catch (Exception e) { + if (LOGGER.isWarnEnabled()) { + LOGGER.warn(format("Exception thrown raising connection pool cleared event to listener %s", cur), e); + } + } + } + } + + @Override + public void connectionPoolReady(final ConnectionPoolReadyEvent event) { + for (ConnectionPoolListener cur : connectionPoolListeners) { + try { + cur.connectionPoolReady(event); + } catch (Exception e) { + if (LOGGER.isWarnEnabled()) { + LOGGER.warn(format("Exception thrown raising connection pool ready event to listener %s", cur), e); + } + } + } + } + + @Override + public void connectionPoolClosed(final ConnectionPoolClosedEvent event) { + for (ConnectionPoolListener cur : connectionPoolListeners) { + try { + cur.connectionPoolClosed(event); + } catch (Exception e) { + if (LOGGER.isWarnEnabled()) { + LOGGER.warn(format("Exception thrown raising connection pool closed event to listener %s", cur), e); + } + } + } + } + + @Override + public void connectionCheckOutStarted(final ConnectionCheckOutStartedEvent event) { + for (ConnectionPoolListener cur : connectionPoolListeners) { + try { + cur.connectionCheckOutStarted(event); + } catch (Exception e) { + if (LOGGER.isWarnEnabled()) { + LOGGER.warn(format("Exception thrown raising connection check out started event to listener %s", cur), e); + } + } + } + } + + @Override + public void connectionCheckedOut(final ConnectionCheckedOutEvent event) { + for (ConnectionPoolListener cur : connectionPoolListeners) { + try { + cur.connectionCheckedOut(event); + } catch (Exception e) { + if (LOGGER.isWarnEnabled()) { + LOGGER.warn(format("Exception thrown raising connection pool checked out event to listener %s", cur), e); + } + } + } + } + + @Override + public void connectionCheckOutFailed(final ConnectionCheckOutFailedEvent event) { + for (ConnectionPoolListener cur : connectionPoolListeners) { + try { + cur.connectionCheckOutFailed(event); + } catch (Exception e) { + if (LOGGER.isWarnEnabled()) { + LOGGER.warn(format("Exception thrown raising connection pool check out failed event to listener %s", cur), e); + } + } + } + } + + @Override + public void connectionCheckedIn(final ConnectionCheckedInEvent event) { + for (ConnectionPoolListener cur : connectionPoolListeners) { + try { + cur.connectionCheckedIn(event); + } catch (Exception e) { + if (LOGGER.isWarnEnabled()) { + LOGGER.warn(format("Exception thrown raising connection pool checked in event to listener %s", cur), e); + } + } + } + } + + @Override + public void connectionCreated(final ConnectionCreatedEvent event) { + for (ConnectionPoolListener cur : connectionPoolListeners) { + try { + cur.connectionCreated(event); + } catch (Exception e) { + if (LOGGER.isWarnEnabled()) { + LOGGER.warn(format("Exception thrown raising connection pool connection created event to listener %s", cur), e); + } + } + } + } + + @Override + public void connectionReady(final ConnectionReadyEvent event) { + for (ConnectionPoolListener cur : connectionPoolListeners) { + try { + cur.connectionReady(event); + } catch (Exception e) { + if (LOGGER.isWarnEnabled()) { + LOGGER.warn(format("Exception thrown raising connection pool connection ready event to listener %s", cur), e); + } + } + } + } + + @Override + public void connectionClosed(final ConnectionClosedEvent event) { + for (ConnectionPoolListener cur : connectionPoolListeners) { + try { + cur.connectionClosed(event); + } catch (Exception e) { + if (LOGGER.isWarnEnabled()) { + LOGGER.warn(format("Exception thrown raising connection pool connection removed event to listener %s", cur), e); + } + } + } + } +} diff --git a/driver-core/src/main/com/mongodb/internal/event/EventListenerHelper.java b/driver-core/src/main/com/mongodb/internal/event/EventListenerHelper.java new file mode 100644 index 00000000000..b6f82651127 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/event/EventListenerHelper.java @@ -0,0 +1,120 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.event; + +import com.mongodb.connection.ClusterSettings; +import com.mongodb.connection.ConnectionPoolSettings; +import com.mongodb.connection.ServerSettings; +import com.mongodb.event.ClusterListener; +import com.mongodb.event.CommandListener; +import com.mongodb.event.ConnectionPoolListener; +import com.mongodb.event.ServerListener; +import com.mongodb.event.ServerMonitorListener; +import com.mongodb.lang.Nullable; + +import java.util.List; + +import static com.mongodb.assertions.Assertions.assertTrue; + +/** + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public final class EventListenerHelper { + + /** + * Returns a single listener. Asserts that the number configured is <= 1, and returns a no-op listener if 0 or the only one + * in the list if 1. + */ + public static ClusterListener singleClusterListener(final ClusterSettings clusterSettings) { + assertTrue(clusterSettings.getClusterListeners().size() <= 1); + return clusterSettings.getClusterListeners().isEmpty() + ? NO_OP_CLUSTER_LISTENER + : clusterSettings.getClusterListeners().get(0); + } + + /** + * Returns a single listener. Asserts that the number configured is <= 1, and returns a no-op listener if 0 or the only one + * in the list if 1. + */ + public static ServerListener singleServerListener(final ServerSettings serverSettings) { + assertTrue(serverSettings.getServerListeners().size() <= 1); + return serverSettings.getServerListeners().isEmpty() + ? NO_OP_SERVER_LISTENER + : serverSettings.getServerListeners().get(0); + } + + /** + * Returns a single listener. Asserts that the number configured is <= 1, and returns a no-op listener if 0 or the only one + * in the list if 1. + */ + public static ServerMonitorListener singleServerMonitorListener(final ServerSettings serverSettings) { + assertTrue(serverSettings.getServerMonitorListeners().size() <= 1); + return serverSettings.getServerMonitorListeners().isEmpty() + ? NO_OP_SERVER_MONITOR_LISTENER + : serverSettings.getServerMonitorListeners().get(0); + } + + public static ClusterListener clusterListenerMulticaster(final List clusterListeners) { + return new ClusterListenerMulticaster(clusterListeners); + } + + public static ServerListener serverListenerMulticaster(final List serverListeners) { + return new ServerListenerMulticaster(serverListeners); + } + + public static ServerMonitorListener serverMonitorListenerMulticaster(final List serverMonitorListeners) { + return new ServerMonitorListenerMulticaster(serverMonitorListeners); + } + + @Nullable + public static CommandListener getCommandListener(final List commandListeners) { + switch (commandListeners.size()) { + case 0: + return null; + case 1: + return commandListeners.get(0); + default: + return new CommandListenerMulticaster(commandListeners); + } + } + + public static ConnectionPoolListener getConnectionPoolListener(final ConnectionPoolSettings connectionPoolSettings) { + switch (connectionPoolSettings.getConnectionPoolListeners().size()) { + case 0: + return NO_OP_CONNECTION_POOL_LISTENER; + case 1: + return connectionPoolSettings.getConnectionPoolListeners().get(0); + default: + return new ConnectionPoolListenerMulticaster(connectionPoolSettings.getConnectionPoolListeners()); + } + } + + public static final ServerListener NO_OP_SERVER_LISTENER = new ServerListener() { + }; + + public static final ServerMonitorListener NO_OP_SERVER_MONITOR_LISTENER = new ServerMonitorListener() { + }; + + public static final ClusterListener NO_OP_CLUSTER_LISTENER = new ClusterListener() { + }; + + private static final ConnectionPoolListener NO_OP_CONNECTION_POOL_LISTENER = new ConnectionPoolListener() { + }; + + private EventListenerHelper() { + } +} diff --git a/driver-core/src/main/com/mongodb/internal/event/EventReasonMessageResolver.java b/driver-core/src/main/com/mongodb/internal/event/EventReasonMessageResolver.java new file mode 100644 index 00000000000..f9cf4b9f9e8 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/event/EventReasonMessageResolver.java @@ -0,0 +1,60 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.event; + +import com.mongodb.event.ConnectionCheckOutFailedEvent; +import com.mongodb.event.ConnectionClosedEvent; + +/** + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public final class EventReasonMessageResolver { + private static final String MESSAGE_CONNECTION_POOL_WAS_CLOSED = "Connection pool was closed"; + private static final String EMPTY_REASON = ""; + + public static String getMessage(final ConnectionClosedEvent.Reason reason) { + switch (reason) { + case STALE: + return "Connection became stale because the pool was cleared"; + case IDLE: + return "Connection has been available but unused for longer than the configured max idle time"; + case ERROR: + return "An error occurred while using the connection"; + case POOL_CLOSED: + return MESSAGE_CONNECTION_POOL_WAS_CLOSED; + default: + return EMPTY_REASON; + } + } + + public static String getMessage(final ConnectionCheckOutFailedEvent.Reason reason) { + switch (reason) { + case TIMEOUT: + return "Wait queue timeout elapsed without a connection becoming available"; + case CONNECTION_ERROR: + return "An error occurred while trying to establish a new connection"; + case POOL_CLOSED: + return MESSAGE_CONNECTION_POOL_WAS_CLOSED; + default: + return EMPTY_REASON; + } + } + + private EventReasonMessageResolver() { + //NOP + } +} diff --git a/driver-core/src/main/com/mongodb/internal/event/ServerListenerMulticaster.java b/driver-core/src/main/com/mongodb/internal/event/ServerListenerMulticaster.java new file mode 100644 index 00000000000..c0173ba1883 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/event/ServerListenerMulticaster.java @@ -0,0 +1,82 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.event; + +import com.mongodb.event.ServerClosedEvent; +import com.mongodb.event.ServerDescriptionChangedEvent; +import com.mongodb.event.ServerListener; +import com.mongodb.event.ServerOpeningEvent; +import com.mongodb.internal.diagnostics.logging.Logger; +import com.mongodb.internal.diagnostics.logging.Loggers; + +import java.util.ArrayList; +import java.util.List; + +import static com.mongodb.assertions.Assertions.isTrue; +import static java.lang.String.format; + + +final class ServerListenerMulticaster implements ServerListener { + + private static final Logger LOGGER = Loggers.getLogger("cluster.event"); + + private final List serverListeners; + + ServerListenerMulticaster(final List serverListeners) { + isTrue("All ServerListener instances are non-null", !serverListeners.contains(null)); + this.serverListeners = new ArrayList<>(serverListeners); + } + + @Override + public void serverOpening(final ServerOpeningEvent event) { + for (ServerListener cur : serverListeners) { + try { + cur.serverOpening(event); + } catch (Exception e) { + if (LOGGER.isWarnEnabled()) { + LOGGER.warn(format("Exception thrown raising server opening event to listener %s", cur), e); + } + } + } + } + + @Override + public void serverClosed(final ServerClosedEvent event) { + for (ServerListener cur : serverListeners) { + try { + cur.serverClosed(event); + } catch (Exception e) { + if (LOGGER.isWarnEnabled()) { + LOGGER.warn(format("Exception thrown raising server opening event to listener %s", cur), e); + } + } + } + } + + @Override + public void serverDescriptionChanged(final ServerDescriptionChangedEvent event) { + for (ServerListener cur : serverListeners) { + try { + cur.serverDescriptionChanged(event); + } catch (Exception e) { + if (LOGGER.isWarnEnabled()) { + LOGGER.warn(format("Exception thrown raising server description changed event to listener %s", cur), e); + } + } + } + } +} diff --git a/driver-core/src/main/com/mongodb/internal/event/ServerMonitorListenerMulticaster.java b/driver-core/src/main/com/mongodb/internal/event/ServerMonitorListenerMulticaster.java new file mode 100644 index 00000000000..ed0ccb1f809 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/event/ServerMonitorListenerMulticaster.java @@ -0,0 +1,80 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.event; + +import com.mongodb.event.ServerHeartbeatFailedEvent; +import com.mongodb.event.ServerHeartbeatStartedEvent; +import com.mongodb.event.ServerHeartbeatSucceededEvent; +import com.mongodb.event.ServerMonitorListener; +import com.mongodb.internal.diagnostics.logging.Logger; +import com.mongodb.internal.diagnostics.logging.Loggers; + +import java.util.ArrayList; +import java.util.List; + +import static com.mongodb.assertions.Assertions.isTrue; +import static java.lang.String.format; + +final class ServerMonitorListenerMulticaster implements ServerMonitorListener { + private static final Logger LOGGER = Loggers.getLogger("cluster.event"); + + private final List serverMonitorListeners; + + ServerMonitorListenerMulticaster(final List serverMonitorListeners) { + isTrue("All ServerMonitorListener instances are non-null", !serverMonitorListeners.contains(null)); + this.serverMonitorListeners = new ArrayList<>(serverMonitorListeners); + } + + @Override + public void serverHearbeatStarted(final ServerHeartbeatStartedEvent event) { + for (ServerMonitorListener cur : serverMonitorListeners) { + try { + cur.serverHearbeatStarted(event); + } catch (Exception e) { + if (LOGGER.isWarnEnabled()) { + LOGGER.warn(format("Exception thrown raising server heartbeat started event to listener %s", cur), e); + } + } + } + } + + @Override + public void serverHeartbeatSucceeded(final ServerHeartbeatSucceededEvent event) { + for (ServerMonitorListener cur : serverMonitorListeners) { + try { + cur.serverHeartbeatSucceeded(event); + } catch (Exception e) { + if (LOGGER.isWarnEnabled()) { + LOGGER.warn(format("Exception thrown raising server heartbeat succeeded event to listener %s", cur), e); + } + } + } + } + + @Override + public void serverHeartbeatFailed(final ServerHeartbeatFailedEvent event) { + for (ServerMonitorListener cur : serverMonitorListeners) { + try { + cur.serverHeartbeatFailed(event); + } catch (Exception e) { + if (LOGGER.isWarnEnabled()) { + LOGGER.warn(format("Exception thrown raising server heartbeat failed event to listener %s", cur), e); + } + } + } + } +} diff --git a/driver-core/src/main/com/mongodb/internal/event/package-info.java b/driver-core/src/main/com/mongodb/internal/event/package-info.java new file mode 100644 index 00000000000..f36c52a8298 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/event/package-info.java @@ -0,0 +1,25 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * This package contains internal functionality that may change at any time. + */ +@Internal +@NonNullApi +package com.mongodb.internal.event; + +import com.mongodb.annotations.Internal; +import com.mongodb.lang.NonNullApi; diff --git a/driver-core/src/main/com/mongodb/internal/function/CheckedConsumer.java b/driver-core/src/main/com/mongodb/internal/function/CheckedConsumer.java new file mode 100644 index 00000000000..5c178f8ed33 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/function/CheckedConsumer.java @@ -0,0 +1,32 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.function; + +/** + *

This class is not part of the public API and may be removed or changed at any time

+ */ +@FunctionalInterface +public interface CheckedConsumer { + + /** + * Performs this operation on the given argument. + * + * @param t the input argument + * @throws E the checked exception to throw + */ + void accept(T t) throws E; +} diff --git a/driver-core/src/main/com/mongodb/internal/function/CheckedFunction.java b/driver-core/src/main/com/mongodb/internal/function/CheckedFunction.java new file mode 100644 index 00000000000..39b280aa561 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/function/CheckedFunction.java @@ -0,0 +1,33 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.function; + +/** + *

This class is not part of the public API and may be removed or changed at any time

+ */ +@FunctionalInterface +public interface CheckedFunction { + + /** + * Applies the function to the given argument. + * + * @param t the function argument + * @return the function result + * @throws E the checked exception to throw + */ + R apply(T t) throws E; +} diff --git a/driver-core/src/main/com/mongodb/internal/function/CheckedRunnable.java b/driver-core/src/main/com/mongodb/internal/function/CheckedRunnable.java new file mode 100644 index 00000000000..f5b24c28a72 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/function/CheckedRunnable.java @@ -0,0 +1,31 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.function; + +/** + *

This class is not part of the public API and may be removed or changed at any time

+ */ +@FunctionalInterface +public interface CheckedRunnable { + + /** + * Checked run. + * + * @throws E the checked exception to throw + */ + void run() throws E; +} diff --git a/driver-core/src/main/com/mongodb/internal/function/CheckedSupplier.java b/driver-core/src/main/com/mongodb/internal/function/CheckedSupplier.java new file mode 100644 index 00000000000..ab39e5c824a --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/function/CheckedSupplier.java @@ -0,0 +1,32 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.function; + +/** + *

This class is not part of the public API and may be removed or changed at any time

+ */ +@FunctionalInterface +public interface CheckedSupplier { + + /** + * Gets a result. + * + * @return a result + * @throws E the checked exception to throw + */ + T get() throws E; +} diff --git a/driver-core/src/main/com/mongodb/internal/function/package-info.java b/driver-core/src/main/com/mongodb/internal/function/package-info.java new file mode 100644 index 00000000000..50db2793b8a --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/function/package-info.java @@ -0,0 +1,25 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * This package contains internal functionality that may change at any time. + */ +@Internal +@NonNullApi +package com.mongodb.internal.function; + +import com.mongodb.annotations.Internal; +import com.mongodb.lang.NonNullApi; diff --git a/driver-core/src/main/com/mongodb/internal/graalvm/substitution/UnixServerAddressSubstitution.java b/driver-core/src/main/com/mongodb/internal/graalvm/substitution/UnixServerAddressSubstitution.java new file mode 100644 index 00000000000..9d52c730c1b --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/graalvm/substitution/UnixServerAddressSubstitution.java @@ -0,0 +1,31 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.internal.graalvm.substitution; + +import com.mongodb.UnixServerAddress; +import com.oracle.svm.core.annotate.Substitute; +import com.oracle.svm.core.annotate.TargetClass; + +@TargetClass(UnixServerAddress.class) +public final class UnixServerAddressSubstitution { + @Substitute + private static void checkNotInGraalVmNativeImage() { + throw new UnsupportedOperationException("UnixServerAddress is not supported in GraalVM native image"); + } + + private UnixServerAddressSubstitution() { + } +} diff --git a/driver-core/src/main/com/mongodb/internal/inject/EmptyProvider.java b/driver-core/src/main/com/mongodb/internal/inject/EmptyProvider.java new file mode 100644 index 00000000000..3533daf9982 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/inject/EmptyProvider.java @@ -0,0 +1,44 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.internal.inject; + +import com.mongodb.annotations.Immutable; + +import java.util.Optional; + +/** + *

This class is not part of the public API and may be removed or changed at any time

+ */ +@Immutable +public final class EmptyProvider implements OptionalProvider { + private static final EmptyProvider INSTANCE = new EmptyProvider<>(); + + private EmptyProvider() { + } + + /** + * Returns {@link Optional#empty()}. + */ + @Override + public Optional optional() { + return Optional.empty(); + } + + @SuppressWarnings("unchecked") + public static EmptyProvider instance() { + return (EmptyProvider) INSTANCE; + } +} diff --git a/driver-core/src/main/com/mongodb/internal/inject/OptionalProvider.java b/driver-core/src/main/com/mongodb/internal/inject/OptionalProvider.java new file mode 100644 index 00000000000..facbf542156 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/inject/OptionalProvider.java @@ -0,0 +1,38 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.internal.inject; + +import com.mongodb.annotations.ThreadSafe; + +import java.util.Optional; + +/** + * If a constructor parameter is of type {@link OptionalProvider}, then the corresponding argument must not be {@code null}. + * + *

This class is not part of the public API and may be removed or changed at any time

+ * + * @param The type of provided objects. + * @see Provider + */ +@ThreadSafe +public interface OptionalProvider { + /** + * Provides either a fully constructed and injected object or an {@linkplain Optional#isPresent() empty} {@link Optional} + * to signify that the provider does not provide an object. This method may be called multiple times and must provide the same object. + * This method must not be called by a constructor that got this provider as its argument. + */ + Optional optional(); +} diff --git a/driver-core/src/main/com/mongodb/internal/inject/Provider.java b/driver-core/src/main/com/mongodb/internal/inject/Provider.java new file mode 100644 index 00000000000..0390fade8fb --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/inject/Provider.java @@ -0,0 +1,44 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.internal.inject; + +import com.mongodb.annotations.ThreadSafe; + +import java.util.Optional; +import java.util.function.Supplier; + +/** + * If a constructor parameter is of type {@link Provider}, then the corresponding argument must not be {@code null}. + * + *

This class is not part of the public API and may be removed or changed at any time

+ * + * @param The type of provided objects. + */ +@ThreadSafe +public interface Provider extends OptionalProvider, Supplier { + /** + * Provides a fully constructed and injected object. + * Calling this method is equivalent to calling {@link #optional() optional()}{@code .}{@link Optional#get() get()}. + */ + T get(); + + /** + * Provides a fully constructed and injected object, never returns an {@linkplain Optional#isPresent() empty} {@link Optional}. + * + * @see #get() + */ + Optional optional(); +} diff --git a/driver-core/src/main/com/mongodb/internal/inject/SameObjectProvider.java b/driver-core/src/main/com/mongodb/internal/inject/SameObjectProvider.java new file mode 100644 index 00000000000..3d521762800 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/inject/SameObjectProvider.java @@ -0,0 +1,62 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.internal.inject; + +import com.mongodb.annotations.ThreadSafe; +import com.mongodb.lang.Nullable; + +import java.util.Optional; +import java.util.concurrent.atomic.AtomicReference; + +import static com.mongodb.assertions.Assertions.assertNotNull; +import static com.mongodb.assertions.Assertions.assertTrue; + +/** + *

This class is not part of the public API and may be removed or changed at any time

+ */ +@ThreadSafe +public final class SameObjectProvider implements Provider { + private final AtomicReference object; + + private SameObjectProvider(@Nullable final T o) { + object = new AtomicReference<>(); + if (o != null) { + initialize(o); + } + } + + @Override + public T get() { + return assertNotNull(object.get()); + } + + @Override + public Optional optional() { + return Optional.of(get()); + } + + public void initialize(final T o) { + assertTrue(object.compareAndSet(null, o)); + } + + public static SameObjectProvider initialized(final T o) { + return new SameObjectProvider<>(o); + } + + public static SameObjectProvider uninitialized() { + return new SameObjectProvider<>(null); + } +} diff --git a/driver-core/src/main/com/mongodb/internal/inject/package-info.java b/driver-core/src/main/com/mongodb/internal/inject/package-info.java new file mode 100644 index 00000000000..34ad231d106 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/inject/package-info.java @@ -0,0 +1,25 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * This package contains internal functionality that may change at any time. + */ +@Internal +@NonNullApi +package com.mongodb.internal.inject; + +import com.mongodb.annotations.Internal; +import com.mongodb.lang.NonNullApi; diff --git a/driver-core/src/main/com/mongodb/internal/logging/LogMessage.java b/driver-core/src/main/com/mongodb/internal/logging/LogMessage.java new file mode 100644 index 00000000000..c23befc5e28 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/logging/LogMessage.java @@ -0,0 +1,257 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.logging; + +import com.mongodb.connection.ClusterId; +import com.mongodb.internal.VisibleForTesting; +import com.mongodb.lang.Nullable; + +import java.util.Collection; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.NoSuchElementException; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +import static com.mongodb.assertions.Assertions.assertNotNull; +import static com.mongodb.internal.VisibleForTesting.AccessModifier.PRIVATE; +import static java.util.function.Function.identity; + +/** + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public final class LogMessage { + + private final Component component; + private final Level level; + private final String messageId; + private final ClusterId clusterId; + private final Throwable exception; + private final Collection entries; + private final String format; + + public enum Component { + COMMAND("command"), + CONNECTION("connection"), + SERVER_SELECTION("serverSelection"), + TOPOLOGY("topology"); + + private static final Map INDEX; + + static { + INDEX = Stream.of(Component.values()).collect(Collectors.toMap(Component::getValue, identity())); + } + + private final String value; + + Component(final String value) { + this.value = value; + } + + @VisibleForTesting(otherwise = PRIVATE) + public String getValue() { + return value; + } + + @VisibleForTesting(otherwise = PRIVATE) + public static Component of(final String value) { + Component result = INDEX.get(value); + return assertNotNull(result); + } + } + + public enum Level { + INFO, + DEBUG + } + + public static final class Entry { + private final Name name; + private final Object value; + + public Entry(final Name name, final @Nullable Object value) { + this.name = name; + this.value = value; + } + + public String getName() { + return name.getValue(); + } + + @Nullable + public Object getValue() { + return value; + } + + public enum Name { + SERVER_HOST("serverHost"), + SERVER_PORT("serverPort"), + COMMAND_NAME("commandName"), + REQUEST_ID("requestId"), + OPERATION_ID("operationId"), + OPERATION("operation"), + AWAITED("awaited"), + SERVICE_ID("serviceId"), + SERVER_CONNECTION_ID("serverConnectionId"), + DRIVER_CONNECTION_ID("driverConnectionId"), + DURATION_MS("durationMS"), + DATABASE_NAME("databaseName"), + REPLY("reply"), + COMMAND_CONTENT("command"), + REASON_DESCRIPTION("reason"), + ERROR_DESCRIPTION("error"), + FAILURE("failure"), + MAX_IDLE_TIME_MS("maxIdleTimeMS"), + MIN_POOL_SIZE("minPoolSize"), + MAX_POOL_SIZE("maxPoolSize"), + MAX_CONNECTING("maxConnecting"), + MAX_WAIT_TIMEOUT_MS("waitQueueTimeoutMS"), + SELECTOR("selector"), + TOPOLOGY_DESCRIPTION("topologyDescription"), + REMAINING_TIME_MS("remainingTimeMS"), + TOPOLOGY_ID("topologyId"), + TOPOLOGY_PREVIOUS_DESCRIPTION("previousDescription"), + TOPOLOGY_NEW_DESCRIPTION("newDescription"); + + private final String value; + + public String getValue() { + return value; + } + + Name(final String value) { + this.value = value; + } + } + } + + public LogMessage(final Component component, final Level level, final String messageId, final ClusterId clusterId, + final List entries, final String format) { + this(component, level, messageId, clusterId, null, entries, format); + } + + public LogMessage(final Component component, final Level level, final String messageId, final ClusterId clusterId, + @Nullable final Throwable exception, final Collection entries, final String format) { + this.component = component; + this.level = level; + this.messageId = messageId; + this.clusterId = clusterId; + this.exception = exception; + this.entries = entries; + this.format = format; + } + + public ClusterId getClusterId() { + return clusterId; + } + + public LogMessage.Component getComponent() { + return component; + } + + public LogMessage.Level getLevel() { + return level; + } + + public String getMessageId() { + return messageId; + } + @Nullable + public Throwable getException() { + return exception; + } + + public Collection getEntries() { + return entries; + } + + public LogMessage.StructuredLogMessage toStructuredLogMessage() { + List nullableEntries = entries.stream() + .filter(entry -> entry.getValue() != null) + .collect(Collectors.toList()); + return new LogMessage.StructuredLogMessage(nullableEntries); + } + + public LogMessage.UnstructuredLogMessage toUnstructuredLogMessage() { + return new LogMessage.UnstructuredLogMessage(); + } + + public static final class StructuredLogMessage { + private final Collection entries; + + private StructuredLogMessage(final Collection entries) { + entries.forEach(entry -> assertNotNull(entry.getValue())); + this.entries = entries; + } + + public Collection getEntries() { + return entries; + } + } + + public final class UnstructuredLogMessage { + /** + * Interpolates the specified string format with the values in the entries collection. + * The format string can contain {} placeholders for values and [] placeholders for conditionals. + *

+ * For example, [ with service-id {}] will wrap the sentence with 'service-id {}' within the conditionals. + * If the corresponding {@link LogMessage.Entry#getValue()} for the placeholder is null, the entire sentence within the conditionals will be + * omitted. + *

+ * If the {@link LogMessage.Entry#getValue()} for the {} placeholder is null outside of conditionals, then null will be placed instead of + * placeholder. + *

+ * The method will iterate through the values in the entries collection and fill the placeholders in the order specified. + * If the number of placeholders does not correspond to the number of entries in the collection, a NoSuchElementException will be thrown. + * + * @return the interpolated string with the values from the entries collection filled in the placeholders. + * @throws NoSuchElementException – if the iteration has no more elements. + */ + public String interpolate() { + Iterator iterator = entries.iterator(); + StringBuilder builder = new StringBuilder(); + int s = 0, i = 0; + while (i < format.length()) { + char curr = format.charAt(i); + if (curr == '[' || curr == '{') { + Object value = iterator.next().getValue(); + builder.append(format, s, i); + if (curr == '{') { + builder.append(value); + } else if (value == null) { + i = format.indexOf(']', i); + } else { + int openBrace = format.indexOf('{', i); + builder.append(format, i + 1, openBrace); + builder.append(value); + i = openBrace + 1; + } + s = i + 1; + } else if (curr == ']' || curr == '}') { + if (curr == ']') { + builder.append(format, s, i); + } + s = i + 1; + } + i++; + } + builder.append(format, s, format.length()); + return builder.toString(); + } + } +} diff --git a/driver-core/src/main/com/mongodb/internal/logging/LoggingInterceptor.java b/driver-core/src/main/com/mongodb/internal/logging/LoggingInterceptor.java new file mode 100644 index 00000000000..f69b2be908a --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/logging/LoggingInterceptor.java @@ -0,0 +1,28 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.logging; + +import com.mongodb.annotations.ThreadSafe; + +/** + *

This class is not part of the public API and may be removed or changed at any time

+ */ +@FunctionalInterface +@ThreadSafe +public interface LoggingInterceptor { + void intercept(LogMessage message); +} diff --git a/driver-core/src/main/com/mongodb/internal/logging/StructuredLogger.java b/driver-core/src/main/com/mongodb/internal/logging/StructuredLogger.java new file mode 100644 index 00000000000..d65a80ef230 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/logging/StructuredLogger.java @@ -0,0 +1,117 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.logging; + +import com.mongodb.connection.ClusterId; +import com.mongodb.internal.VisibleForTesting; +import com.mongodb.internal.diagnostics.logging.Logger; +import com.mongodb.internal.diagnostics.logging.Loggers; +import com.mongodb.internal.logging.LogMessage.Level; +import com.mongodb.lang.Nullable; + +import java.util.concurrent.ConcurrentHashMap; +import java.util.function.BiConsumer; +import java.util.function.Consumer; +import java.util.function.Supplier; + +import static com.mongodb.internal.VisibleForTesting.AccessModifier.PRIVATE; + +/** + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public final class StructuredLogger { + + private static final ConcurrentHashMap INTERCEPTORS = new ConcurrentHashMap<>(); + + private final Logger logger; + + @VisibleForTesting(otherwise = PRIVATE) + public static void addInterceptor(final String clusterDescription, final LoggingInterceptor interceptor) { + INTERCEPTORS.put(clusterDescription, interceptor); + } + + @VisibleForTesting(otherwise = PRIVATE) + public static void removeInterceptor(final String clusterDescription) { + INTERCEPTORS.remove(clusterDescription); + } + + @Nullable + private static LoggingInterceptor getInterceptor(@Nullable final String clusterDescription) { + if (clusterDescription == null) { + return null; + } + return INTERCEPTORS.get(clusterDescription); + } + + public StructuredLogger(final String suffix) { + this(Loggers.getLogger(suffix)); + } + + @VisibleForTesting(otherwise = PRIVATE) + public StructuredLogger(final Logger logger) { + this.logger = logger; + } + + public boolean isRequired(final Level level, final ClusterId clusterId) { + if (getInterceptor(clusterId.getDescription()) != null) { + return true; + } + + switch (level) { + case DEBUG: + return logger.isDebugEnabled(); + case INFO: + return logger.isInfoEnabled(); + default: + throw new UnsupportedOperationException(); + } + } + + public void log(final LogMessage logMessage) { + LoggingInterceptor interceptor = getInterceptor(logMessage.getClusterId().getDescription()); + if (interceptor != null) { + interceptor.intercept(logMessage); + } + switch (logMessage.getLevel()) { + case DEBUG: + logUnstructured(logMessage, logger::isDebugEnabled, logger::debug, logger::debug); + break; + case INFO: + logUnstructured(logMessage, logger::isInfoEnabled, logger::info, logger::info); + break; + default: + throw new UnsupportedOperationException(); + } + } + + private static void logUnstructured( + final LogMessage logMessage, + final Supplier loggingEnabled, + final Consumer doLog, + final BiConsumer doLogWithException) { + if (loggingEnabled.get()) { + LogMessage.UnstructuredLogMessage unstructuredLogMessage = logMessage.toUnstructuredLogMessage(); + String message = unstructuredLogMessage.interpolate(); + Throwable exception = logMessage.getException(); + if (exception == null) { + doLog.accept(message); + } else { + doLogWithException.accept(message, exception); + } + } + } +} diff --git a/driver-core/src/main/com/mongodb/internal/logging/package-info.java b/driver-core/src/main/com/mongodb/internal/logging/package-info.java new file mode 100644 index 00000000000..74810176f88 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/logging/package-info.java @@ -0,0 +1,25 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * This package contains internal functionality that may change at any time. + */ +@Internal +@NonNullApi +package com.mongodb.internal.logging; + +import com.mongodb.annotations.Internal; +import com.mongodb.lang.NonNullApi; diff --git a/driver-core/src/main/com/mongodb/internal/operation/AbortTransactionOperation.java b/driver-core/src/main/com/mongodb/internal/operation/AbortTransactionOperation.java new file mode 100644 index 00000000000..bc7e6655bc7 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/operation/AbortTransactionOperation.java @@ -0,0 +1,66 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.operation; + +import com.mongodb.Function; +import com.mongodb.WriteConcern; +import com.mongodb.internal.TimeoutContext; +import com.mongodb.lang.Nullable; +import org.bson.BsonDocument; + +import static com.mongodb.internal.operation.CommandOperationHelper.CommandCreator; +import static com.mongodb.internal.operation.DocumentHelper.putIfNotNull; + +/** + * An operation that aborts a transaction. + * + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public class AbortTransactionOperation extends TransactionOperation { + private static final String COMMAND_NAME = "abortTransaction"; + private BsonDocument recoveryToken; + + public AbortTransactionOperation(final WriteConcern writeConcern) { + super(writeConcern); + } + + public AbortTransactionOperation recoveryToken(@Nullable final BsonDocument recoveryToken) { + this.recoveryToken = recoveryToken; + return this; + } + + @Override + public String getCommandName() { + return COMMAND_NAME; + } + + @Override + CommandCreator getCommandCreator() { + return (operationContext, serverDescription, connectionDescription) -> { + operationContext.getTimeoutContext().resetToDefaultMaxTime(); + BsonDocument command = AbortTransactionOperation.super.getCommandCreator() + .create(operationContext, serverDescription, connectionDescription); + putIfNotNull(command, "recoveryToken", recoveryToken); + return command; + }; + } + + @Override + protected Function getRetryCommandModifier(final TimeoutContext timeoutContext) { + return cmd -> cmd; + } +} diff --git a/driver-core/src/main/com/mongodb/internal/operation/AbstractWriteSearchIndexOperation.java b/driver-core/src/main/com/mongodb/internal/operation/AbstractWriteSearchIndexOperation.java new file mode 100644 index 00000000000..e6643f3c7d2 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/operation/AbstractWriteSearchIndexOperation.java @@ -0,0 +1,102 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.operation; + + +import com.mongodb.MongoCommandException; +import com.mongodb.MongoNamespace; +import com.mongodb.internal.async.SingleResultCallback; +import com.mongodb.internal.binding.AsyncWriteBinding; +import com.mongodb.internal.binding.WriteBinding; +import com.mongodb.lang.Nullable; +import org.bson.BsonDocument; + +import static com.mongodb.internal.operation.AsyncOperationHelper.executeCommandAsync; +import static com.mongodb.internal.operation.AsyncOperationHelper.withAsyncSourceAndConnection; +import static com.mongodb.internal.operation.AsyncOperationHelper.writeConcernErrorTransformerAsync; +import static com.mongodb.internal.operation.SyncOperationHelper.executeCommand; +import static com.mongodb.internal.operation.SyncOperationHelper.withConnection; +import static com.mongodb.internal.operation.SyncOperationHelper.writeConcernErrorTransformer; + +/** + * An abstract class for defining operations for managing Atlas Search indexes. + * + *

This class is not part of the public API and may be removed or changed at any time

+ */ +abstract class AbstractWriteSearchIndexOperation implements WriteOperation { + private final MongoNamespace namespace; + + AbstractWriteSearchIndexOperation(final MongoNamespace namespace) { + this.namespace = namespace; + } + + @Override + public Void execute(final WriteBinding binding) { + return withConnection(binding, connection -> { + try { + executeCommand(binding, namespace.getDatabaseName(), buildCommand(), + connection, + writeConcernErrorTransformer(binding.getOperationContext().getTimeoutContext())); + } catch (MongoCommandException mongoCommandException) { + swallowOrThrow(mongoCommandException); + } + return null; + }); + } + + @Override + public void executeAsync(final AsyncWriteBinding binding, final SingleResultCallback callback) { + withAsyncSourceAndConnection(binding::getWriteConnectionSource, false, callback, + (connectionSource, connection, cb) -> + executeCommandAsync(binding, namespace.getDatabaseName(), buildCommand(), connection, + writeConcernErrorTransformerAsync(binding.getOperationContext().getTimeoutContext()), (result, commandExecutionError) -> { + try { + swallowOrThrow(commandExecutionError); + cb.onResult(result, null); + } catch (Throwable mongoCommandException) { + cb.onResult(null, mongoCommandException); + } + } + ) + ); + } + + /** + * Handles the provided execution exception by either throwing it or ignoring it. This method is meant to be overridden + * by subclasses that need to handle exceptions differently based on their specific requirements. + * + *

+ * Note: While the method declaration allows throwing a checked exception to enhance readability, the implementation + * of this method must not throw a checked exception. + *

+ * + * @param The type of the execution exception. + * @param mongoExecutionException The execution exception to handle. If not null, it may be thrown or ignored. + * @throws E The execution exception, if it is not null (implementation-specific). + */ + void swallowOrThrow(@Nullable final E mongoExecutionException) throws E { + if (mongoExecutionException != null) { + throw mongoExecutionException; + } + } + + abstract BsonDocument buildCommand(); + + MongoNamespace getNamespace() { + return namespace; + } +} diff --git a/driver-core/src/main/com/mongodb/internal/operation/AggregateOperation.java b/driver-core/src/main/com/mongodb/internal/operation/AggregateOperation.java new file mode 100644 index 00000000000..1c9abfc68ca --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/operation/AggregateOperation.java @@ -0,0 +1,170 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.operation; + +import com.mongodb.ExplainVerbosity; +import com.mongodb.MongoNamespace; +import com.mongodb.client.cursor.TimeoutMode; +import com.mongodb.client.model.Collation; +import com.mongodb.internal.async.AsyncBatchCursor; +import com.mongodb.internal.async.SingleResultCallback; +import com.mongodb.internal.binding.AsyncReadBinding; +import com.mongodb.internal.binding.ReadBinding; +import com.mongodb.internal.client.model.AggregationLevel; +import com.mongodb.lang.Nullable; +import org.bson.BsonDocument; +import org.bson.BsonValue; +import org.bson.codecs.Decoder; + +import java.util.List; + +import static com.mongodb.internal.connection.CommandHelper.applyMaxTimeMS; +import static com.mongodb.internal.operation.ExplainHelper.asExplainCommand; +import static com.mongodb.internal.operation.ServerVersionHelper.UNKNOWN_WIRE_VERSION; + +/** + * An operation that executes an aggregation query. + * + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public class AggregateOperation implements ReadOperationExplainable { + private final AggregateOperationImpl wrapped; + + public AggregateOperation(final MongoNamespace namespace, final List pipeline, final Decoder decoder) { + this(namespace, pipeline, decoder, AggregationLevel.COLLECTION); + } + + public AggregateOperation(final MongoNamespace namespace, final List pipeline, final Decoder decoder, + final AggregationLevel aggregationLevel) { + this.wrapped = new AggregateOperationImpl<>(namespace, pipeline, decoder, aggregationLevel); + } + + public List getPipeline() { + return wrapped.getPipeline(); + } + + public Boolean getAllowDiskUse() { + return wrapped.getAllowDiskUse(); + } + + public AggregateOperation allowDiskUse(@Nullable final Boolean allowDiskUse) { + wrapped.allowDiskUse(allowDiskUse); + return this; + } + + public Integer getBatchSize() { + return wrapped.getBatchSize(); + } + + public AggregateOperation batchSize(@Nullable final Integer batchSize) { + wrapped.batchSize(batchSize); + return this; + } + + public Collation getCollation() { + return wrapped.getCollation(); + } + + public AggregateOperation collation(@Nullable final Collation collation) { + wrapped.collation(collation); + return this; + } + + @Nullable + public BsonValue getComment() { + return wrapped.getComment(); + } + + public AggregateOperation comment(@Nullable final BsonValue comment) { + wrapped.comment(comment); + return this; + } + + public AggregateOperation let(@Nullable final BsonDocument variables) { + wrapped.let(variables); + return this; + } + + public AggregateOperation retryReads(final boolean retryReads) { + wrapped.retryReads(retryReads); + return this; + } + + public boolean getRetryReads() { + return wrapped.getRetryReads(); + } + + @Nullable + public BsonDocument getHint() { + BsonValue hint = wrapped.getHint(); + if (hint == null) { + return null; + } + if (!hint.isDocument()) { + throw new IllegalArgumentException("Hint is not a BsonDocument please use the #getHintBsonValue() method. "); + } + return hint.asDocument(); + } + + @Nullable + public BsonValue getHintBsonValue() { + return wrapped.getHint(); + } + + public AggregateOperation hint(@Nullable final BsonValue hint) { + wrapped.hint(hint); + return this; + } + + public AggregateOperation timeoutMode(@Nullable final TimeoutMode timeoutMode) { + wrapped.timeoutMode(timeoutMode); + return this; + } + + @Override + public String getCommandName() { + return wrapped.getCommandName(); + } + + @Override + public BatchCursor execute(final ReadBinding binding) { + return wrapped.execute(binding); + } + + @Override + public void executeAsync(final AsyncReadBinding binding, final SingleResultCallback> callback) { + wrapped.executeAsync(binding, callback); + } + + @Override + public ReadOperationSimple asExplainableOperation(@Nullable final ExplainVerbosity verbosity, final Decoder resultDecoder) { + return createExplainableOperation(verbosity, resultDecoder); + } + + CommandReadOperation createExplainableOperation(@Nullable final ExplainVerbosity verbosity, final Decoder resultDecoder) { + return new CommandReadOperation<>(getNamespace().getDatabaseName(), wrapped.getCommandName(), + (operationContext, serverDescription, connectionDescription) -> { + BsonDocument command = wrapped.getCommand(operationContext, UNKNOWN_WIRE_VERSION); + applyMaxTimeMS(operationContext.getTimeoutContext(), command); + return asExplainCommand(command, verbosity); + }, resultDecoder); + } + + MongoNamespace getNamespace() { + return wrapped.getNamespace(); + } +} diff --git a/driver-core/src/main/com/mongodb/internal/operation/AggregateOperationImpl.java b/driver-core/src/main/com/mongodb/internal/operation/AggregateOperationImpl.java new file mode 100644 index 00000000000..4c9bc3828b7 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/operation/AggregateOperationImpl.java @@ -0,0 +1,293 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.operation; + +import com.mongodb.CursorType; +import com.mongodb.MongoNamespace; +import com.mongodb.client.cursor.TimeoutMode; +import com.mongodb.client.model.Collation; +import com.mongodb.internal.TimeoutContext; +import com.mongodb.internal.TimeoutSettings; +import com.mongodb.internal.async.AsyncBatchCursor; +import com.mongodb.internal.async.SingleResultCallback; +import com.mongodb.internal.binding.AsyncReadBinding; +import com.mongodb.internal.binding.ReadBinding; +import com.mongodb.internal.client.model.AggregationLevel; +import com.mongodb.internal.connection.OperationContext; +import com.mongodb.lang.Nullable; +import org.bson.BsonArray; +import org.bson.BsonBoolean; +import org.bson.BsonDocument; +import org.bson.BsonInt32; +import org.bson.BsonString; +import org.bson.BsonValue; +import org.bson.codecs.Decoder; + +import java.util.Arrays; +import java.util.List; + +import static com.mongodb.assertions.Assertions.isTrueArgument; +import static com.mongodb.assertions.Assertions.notNull; +import static com.mongodb.internal.async.ErrorHandlingResultCallback.errorHandlingCallback; +import static com.mongodb.internal.operation.AsyncOperationHelper.CommandReadTransformerAsync; +import static com.mongodb.internal.operation.AsyncOperationHelper.executeRetryableReadAsync; +import static com.mongodb.internal.operation.CommandOperationHelper.CommandCreator; +import static com.mongodb.internal.operation.OperationHelper.LOGGER; +import static com.mongodb.internal.operation.OperationHelper.setNonTailableCursorMaxTimeSupplier; +import static com.mongodb.internal.operation.OperationReadConcernHelper.appendReadConcernToCommand; +import static com.mongodb.internal.operation.SyncOperationHelper.CommandReadTransformer; +import static com.mongodb.internal.operation.SyncOperationHelper.executeRetryableRead; + +class AggregateOperationImpl implements ReadOperationCursor { + private static final String COMMAND_NAME = "aggregate"; + private static final String RESULT = "result"; + private static final String CURSOR = "cursor"; + private static final String FIRST_BATCH = "firstBatch"; + private static final List FIELD_NAMES_WITH_RESULT = Arrays.asList(RESULT, FIRST_BATCH); + private final MongoNamespace namespace; + private final List pipeline; + private final Decoder decoder; + private final AggregateTarget aggregateTarget; + private final PipelineCreator pipelineCreator; + + private boolean retryReads; + private Boolean allowDiskUse; + private Integer batchSize; + private Collation collation; + private BsonValue comment; + private BsonValue hint; + private BsonDocument variables; + private TimeoutMode timeoutMode; + private CursorType cursorType; + + AggregateOperationImpl(final MongoNamespace namespace, + final List pipeline, final Decoder decoder, final AggregationLevel aggregationLevel) { + this(namespace, pipeline, decoder, + defaultAggregateTarget(notNull("aggregationLevel", aggregationLevel), + notNull("namespace", namespace).getCollectionName()), + defaultPipelineCreator(pipeline)); + } + + AggregateOperationImpl(final MongoNamespace namespace, + final List pipeline, final Decoder decoder, final AggregateTarget aggregateTarget, + final PipelineCreator pipelineCreator) { + this.namespace = notNull("namespace", namespace); + this.pipeline = notNull("pipeline", pipeline); + this.decoder = notNull("decoder", decoder); + this.aggregateTarget = notNull("aggregateTarget", aggregateTarget); + this.pipelineCreator = notNull("pipelineCreator", pipelineCreator); + } + + MongoNamespace getNamespace() { + return namespace; + } + + List getPipeline() { + return pipeline; + } + + Decoder getDecoder() { + return decoder; + } + + Boolean getAllowDiskUse() { + return allowDiskUse; + } + + AggregateOperationImpl allowDiskUse(@Nullable final Boolean allowDiskUse) { + this.allowDiskUse = allowDiskUse; + return this; + } + + Integer getBatchSize() { + return batchSize; + } + + AggregateOperationImpl batchSize(@Nullable final Integer batchSize) { + this.batchSize = batchSize; + return this; + } + + Collation getCollation() { + return collation; + } + + AggregateOperationImpl collation(@Nullable final Collation collation) { + this.collation = collation; + return this; + } + + @Nullable + BsonValue getComment() { + return comment; + } + + AggregateOperationImpl comment(@Nullable final BsonValue comment) { + this.comment = comment; + return this; + } + + AggregateOperationImpl let(@Nullable final BsonDocument variables) { + this.variables = variables; + return this; + } + + AggregateOperationImpl retryReads(final boolean retryReads) { + this.retryReads = retryReads; + return this; + } + + /** + * When {@link TimeoutContext#hasTimeoutMS()} then {@link TimeoutSettings#getMaxAwaitTimeMS()} usage in {@code getMore} commands + * depends on the type of cursor. For {@link CursorType#TailableAwait} it is used, for others it is not. + * {@link CursorType#TailableAwait} is used mainly for change streams in {@link AggregateOperationImpl}. + * + * @param cursorType + * @return this + */ + AggregateOperationImpl cursorType(final CursorType cursorType) { + this.cursorType = cursorType; + return this; + } + + boolean getRetryReads() { + return retryReads; + } + + @Nullable + BsonValue getHint() { + return hint; + } + + public AggregateOperationImpl timeoutMode(@Nullable final TimeoutMode timeoutMode) { + if (timeoutMode != null) { + this.timeoutMode = timeoutMode; + } + return this; + } + + AggregateOperationImpl hint(@Nullable final BsonValue hint) { + isTrueArgument("BsonString or BsonDocument", hint == null || hint.isDocument() || hint.isString()); + this.hint = hint; + return this; + } + + @Override + public String getCommandName() { + return COMMAND_NAME; + } + + @Override + public BatchCursor execute(final ReadBinding binding) { + return executeRetryableRead(binding, namespace.getDatabaseName(), + getCommandCreator(), CommandResultDocumentCodec.create(decoder, FIELD_NAMES_WITH_RESULT), + transformer(), retryReads); + } + + @Override + public void executeAsync(final AsyncReadBinding binding, final SingleResultCallback> callback) { + SingleResultCallback> errHandlingCallback = errorHandlingCallback(callback, LOGGER); + executeRetryableReadAsync(binding, namespace.getDatabaseName(), + getCommandCreator(), CommandResultDocumentCodec.create(decoder, FIELD_NAMES_WITH_RESULT), + asyncTransformer(), retryReads, + errHandlingCallback); + } + + private CommandCreator getCommandCreator() { + return (operationContext, serverDescription, connectionDescription) -> + getCommand(operationContext, connectionDescription.getMaxWireVersion()); + } + + BsonDocument getCommand(final OperationContext operationContext, final int maxWireVersion) { + BsonDocument commandDocument = new BsonDocument(getCommandName(), aggregateTarget.create()); + appendReadConcernToCommand(operationContext.getSessionContext(), maxWireVersion, commandDocument); + commandDocument.put("pipeline", pipelineCreator.create()); + setNonTailableCursorMaxTimeSupplier(timeoutMode, operationContext); + BsonDocument cursor = new BsonDocument(); + if (batchSize != null) { + cursor.put("batchSize", new BsonInt32(batchSize)); + } + commandDocument.put(CURSOR, cursor); + if (allowDiskUse != null) { + commandDocument.put("allowDiskUse", BsonBoolean.valueOf(allowDiskUse)); + } + if (collation != null) { + commandDocument.put("collation", collation.asDocument()); + } + if (comment != null) { + commandDocument.put("comment", comment); + } + if (hint != null) { + commandDocument.put("hint", hint); + } + if (variables != null) { + commandDocument.put("let", variables); + } + + return commandDocument; + } + + private CommandReadTransformer> transformer() { + return (result, source, connection) -> + new CommandBatchCursor<>(getTimeoutMode(), result, batchSize != null ? batchSize : 0, + getMaxTimeForCursor(source.getOperationContext().getTimeoutContext()), decoder, comment, source, connection); + } + + private CommandReadTransformerAsync> asyncTransformer() { + return (result, source, connection) -> + new AsyncCommandBatchCursor<>(getTimeoutMode(), result, batchSize != null ? batchSize : 0, + getMaxTimeForCursor(source.getOperationContext().getTimeoutContext()), decoder, comment, source, connection); + } + + private TimeoutMode getTimeoutMode() { + TimeoutMode localTimeoutMode = timeoutMode; + if (localTimeoutMode == null) { + localTimeoutMode = TimeoutMode.CURSOR_LIFETIME; + } + return localTimeoutMode; + } + + private long getMaxTimeForCursor(final TimeoutContext timeoutContext) { + long maxAwaitTimeMS = timeoutContext.getMaxAwaitTimeMS(); + if (timeoutContext.hasTimeoutMS()){ + return CursorType.TailableAwait == cursorType ? maxAwaitTimeMS : 0; + } + return maxAwaitTimeMS; + } + + interface AggregateTarget { + BsonValue create(); + } + + interface PipelineCreator { + BsonArray create(); + } + + private static AggregateTarget defaultAggregateTarget(final AggregationLevel aggregationLevel, final String collectionName) { + return () -> { + if (aggregationLevel == AggregationLevel.DATABASE) { + return new BsonInt32(1); + } else { + return new BsonString(collectionName); + } + }; + } + + private static PipelineCreator defaultPipelineCreator(final List pipeline) { + return () -> new BsonArray(pipeline); + } +} diff --git a/driver-core/src/main/com/mongodb/internal/operation/AggregateResponseBatchCursor.java b/driver-core/src/main/com/mongodb/internal/operation/AggregateResponseBatchCursor.java new file mode 100644 index 00000000000..e12a2249123 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/operation/AggregateResponseBatchCursor.java @@ -0,0 +1,40 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.operation; + +import com.mongodb.annotations.NotThreadSafe; +import com.mongodb.lang.Nullable; +import org.bson.BsonDocument; +import org.bson.BsonTimestamp; + +/** + * Extends the batch cursor interface to include information included in an aggregate or getMore response. + * + *

This class is not part of the public API and may be removed or changed at any time

+ */ +@NotThreadSafe +public interface AggregateResponseBatchCursor extends BatchCursor { + @Nullable + BsonDocument getPostBatchResumeToken(); + + @Nullable + BsonTimestamp getOperationTime(); + + boolean isFirstBatchEmpty(); + + int getMaxWireVersion(); +} diff --git a/driver-core/src/main/com/mongodb/internal/operation/AggregateToCollectionOperation.java b/driver-core/src/main/com/mongodb/internal/operation/AggregateToCollectionOperation.java new file mode 100644 index 00000000000..16f33ad45e5 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/operation/AggregateToCollectionOperation.java @@ -0,0 +1,223 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.operation; + +import com.mongodb.MongoNamespace; +import com.mongodb.ReadConcern; +import com.mongodb.ReadPreference; +import com.mongodb.WriteConcern; +import com.mongodb.client.cursor.TimeoutMode; +import com.mongodb.client.model.Collation; +import com.mongodb.internal.async.SingleResultCallback; +import com.mongodb.internal.binding.AsyncReadBinding; +import com.mongodb.internal.binding.ReadBinding; +import com.mongodb.internal.client.model.AggregationLevel; +import com.mongodb.lang.Nullable; +import org.bson.BsonArray; +import org.bson.BsonBoolean; +import org.bson.BsonDocument; +import org.bson.BsonInt32; +import org.bson.BsonString; +import org.bson.BsonValue; +import org.bson.codecs.BsonDocumentCodec; + +import java.util.List; + +import static com.mongodb.assertions.Assertions.isTrueArgument; +import static com.mongodb.assertions.Assertions.notNull; +import static com.mongodb.internal.operation.AsyncOperationHelper.executeRetryableReadAsync; +import static com.mongodb.internal.operation.ServerVersionHelper.FIVE_DOT_ZERO_WIRE_VERSION; +import static com.mongodb.internal.operation.SyncOperationHelper.executeRetryableRead; +import static com.mongodb.internal.operation.WriteConcernHelper.appendWriteConcernToCommand; +import static com.mongodb.internal.operation.WriteConcernHelper.throwOnWriteConcernError; + +/** + * An operation that executes an aggregation that writes its results to a collection. + * + *

Drivers are required to execute this operation on a secondary as of MongoDB 5.0, and otherwise execute it on a primary. That's why + * this is a ReadOperation, not a WriteOperation: because it now uses the read preference to select the server. + *

+ * + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public class AggregateToCollectionOperation implements ReadOperationSimple { + private static final String COMMAND_NAME = "aggregate"; + private final MongoNamespace namespace; + private final List pipeline; + private final WriteConcern writeConcern; + private final ReadConcern readConcern; + private final AggregationLevel aggregationLevel; + + private Boolean allowDiskUse; + private Boolean bypassDocumentValidation; + private Collation collation; + private BsonValue comment; + private BsonValue hint; + private BsonDocument variables; + + public AggregateToCollectionOperation(final MongoNamespace namespace, final List pipeline, final ReadConcern readConcern, + final WriteConcern writeConcern) { + this(namespace, pipeline, readConcern, writeConcern, AggregationLevel.COLLECTION); + } + + public AggregateToCollectionOperation(final MongoNamespace namespace, final List pipeline, + @Nullable final ReadConcern readConcern, @Nullable final WriteConcern writeConcern, final AggregationLevel aggregationLevel) { + this.namespace = notNull("namespace", namespace); + this.pipeline = notNull("pipeline", pipeline); + this.writeConcern = writeConcern; + this.readConcern = readConcern; + this.aggregationLevel = notNull("aggregationLevel", aggregationLevel); + + isTrueArgument("pipeline is not empty", !pipeline.isEmpty()); + } + + public List getPipeline() { + return pipeline; + } + + public ReadConcern getReadConcern() { + return readConcern; + } + + public WriteConcern getWriteConcern() { + return writeConcern; + } + + public Boolean getAllowDiskUse() { + return allowDiskUse; + } + + public AggregateToCollectionOperation allowDiskUse(@Nullable final Boolean allowDiskUse) { + this.allowDiskUse = allowDiskUse; + return this; + } + + public Boolean getBypassDocumentValidation() { + return bypassDocumentValidation; + } + + public AggregateToCollectionOperation bypassDocumentValidation(@Nullable final Boolean bypassDocumentValidation) { + this.bypassDocumentValidation = bypassDocumentValidation; + return this; + } + + public Collation getCollation() { + return collation; + } + + public AggregateToCollectionOperation collation(@Nullable final Collation collation) { + this.collation = collation; + return this; + } + + public BsonValue getComment() { + return comment; + } + + public AggregateToCollectionOperation let(@Nullable final BsonDocument variables) { + this.variables = variables; + return this; + } + + public AggregateToCollectionOperation comment(final BsonValue comment) { + this.comment = comment; + return this; + } + + public BsonValue getHint() { + return hint; + } + + public AggregateToCollectionOperation hint(@Nullable final BsonValue hint) { + this.hint = hint; + return this; + } + + public AggregateToCollectionOperation timeoutMode(@Nullable final TimeoutMode timeoutMode) { + isTrueArgument("timeoutMode cannot be ITERATION.", timeoutMode == null || timeoutMode.equals(TimeoutMode.CURSOR_LIFETIME)); + return this; + } + + @Override + public String getCommandName() { + return COMMAND_NAME; + } + + @Override + public Void execute(final ReadBinding binding) { + return executeRetryableRead(binding, + () -> binding.getReadConnectionSource(FIVE_DOT_ZERO_WIRE_VERSION, ReadPreference.primary()), + namespace.getDatabaseName(), + getCommandCreator(), + new BsonDocumentCodec(), (result, source, connection) -> { + throwOnWriteConcernError(result, connection.getDescription().getServerAddress(), + connection.getDescription().getMaxWireVersion(), binding.getOperationContext().getTimeoutContext()); + return null; + }, false); + } + + @Override + public void executeAsync(final AsyncReadBinding binding, final SingleResultCallback callback) { + executeRetryableReadAsync(binding, + (connectionSourceCallback) -> + binding.getReadConnectionSource(FIVE_DOT_ZERO_WIRE_VERSION, ReadPreference.primary(), connectionSourceCallback), + namespace.getDatabaseName(), + getCommandCreator(), + new BsonDocumentCodec(), (result, source, connection) -> { + throwOnWriteConcernError(result, connection.getDescription().getServerAddress(), + connection.getDescription().getMaxWireVersion(), binding.getOperationContext().getTimeoutContext()); + return null; + }, false, callback); + } + + private CommandOperationHelper.CommandCreator getCommandCreator() { + return (operationContext, serverDescription, connectionDescription) -> { + BsonValue aggregationTarget = (aggregationLevel == AggregationLevel.DATABASE) + ? new BsonInt32(1) : new BsonString(namespace.getCollectionName()); + + BsonDocument commandDocument = new BsonDocument(getCommandName(), aggregationTarget); + commandDocument.put("pipeline", new BsonArray(pipeline)); + if (allowDiskUse != null) { + commandDocument.put("allowDiskUse", BsonBoolean.valueOf(allowDiskUse)); + } + if (bypassDocumentValidation != null) { + commandDocument.put("bypassDocumentValidation", BsonBoolean.valueOf(bypassDocumentValidation)); + } + + commandDocument.put("cursor", new BsonDocument()); + + appendWriteConcernToCommand(writeConcern, commandDocument); + if (readConcern != null && !readConcern.isServerDefault()) { + commandDocument.put("readConcern", readConcern.asDocument()); + } + + if (collation != null) { + commandDocument.put("collation", collation.asDocument()); + } + if (comment != null) { + commandDocument.put("comment", comment); + } + if (hint != null) { + commandDocument.put("hint", hint); + } + if (variables != null) { + commandDocument.put("let", variables); + } + return commandDocument; + }; + } +} diff --git a/driver-core/src/main/com/mongodb/internal/operation/AsyncChangeStreamBatchCursor.java b/driver-core/src/main/com/mongodb/internal/operation/AsyncChangeStreamBatchCursor.java new file mode 100644 index 00000000000..a4cfbafedb6 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/operation/AsyncChangeStreamBatchCursor.java @@ -0,0 +1,245 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.operation; + +import com.mongodb.MongoException; +import com.mongodb.internal.TimeoutContext; +import com.mongodb.internal.async.AsyncAggregateResponseBatchCursor; +import com.mongodb.internal.async.AsyncBatchCursor; +import com.mongodb.internal.async.SingleResultCallback; +import com.mongodb.internal.binding.AsyncReadBinding; +import com.mongodb.lang.NonNull; +import com.mongodb.lang.Nullable; +import org.bson.BsonDocument; +import org.bson.BsonTimestamp; +import org.bson.RawBsonDocument; + +import java.util.List; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicReference; + +import static com.mongodb.assertions.Assertions.assertNotNull; +import static com.mongodb.assertions.Assertions.assertNull; +import static com.mongodb.internal.async.ErrorHandlingResultCallback.errorHandlingCallback; +import static com.mongodb.internal.operation.AsyncOperationHelper.withAsyncReadConnectionSource; +import static com.mongodb.internal.operation.ChangeStreamBatchCursor.convertAndProduceLastId; +import static com.mongodb.internal.operation.ChangeStreamBatchCursorHelper.isResumableError; +import static com.mongodb.internal.operation.OperationHelper.LOGGER; +import static java.lang.String.format; + +final class AsyncChangeStreamBatchCursor implements AsyncAggregateResponseBatchCursor { + private final AsyncReadBinding binding; + private final TimeoutContext timeoutContext; + private final ChangeStreamOperation changeStreamOperation; + private final int maxWireVersion; + + private volatile BsonDocument resumeToken; + /** + * {@linkplain ChangeStreamBatchCursorHelper#isResumableError(Throwable, int) Retryable errors} can result in + * {@code wrapped} containing {@code null} and {@link #isClosed} being {@code false}. + * This represents a situation in which the wrapped object was closed by {@code this} but {@code this} remained open. + */ + private final AtomicReference> wrapped; + private final AtomicBoolean isClosed; + + AsyncChangeStreamBatchCursor(final ChangeStreamOperation changeStreamOperation, + final AsyncCommandBatchCursor wrapped, + final AsyncReadBinding binding, + @Nullable final BsonDocument resumeToken, + final int maxWireVersion) { + this.changeStreamOperation = changeStreamOperation; + this.wrapped = new AtomicReference<>(assertNotNull(wrapped)); + this.binding = binding; + binding.retain(); + this.timeoutContext = binding.getOperationContext().getTimeoutContext(); + this.resumeToken = resumeToken; + this.maxWireVersion = maxWireVersion; + isClosed = new AtomicBoolean(); + } + + @NonNull + AsyncCommandBatchCursor getWrapped() { + return assertNotNull(wrapped.get()); + } + + @Override + public void next(final SingleResultCallback> callback) { + resumeableOperation(AsyncBatchCursor::next, callback, false); + } + + @Override + public void close() { + timeoutContext.resetTimeoutIfPresent(); + if (isClosed.compareAndSet(false, true)) { + try { + nullifyAndCloseWrapped(); + } finally { + binding.release(); + } + } + } + + @Override + public void setBatchSize(final int batchSize) { + getWrapped().setBatchSize(batchSize); + } + + @Override + public int getBatchSize() { + return getWrapped().getBatchSize(); + } + + @Override + public boolean isClosed() { + if (isClosed.get()) { + return true; + } else if (wrappedClosedItself()) { + close(); + return true; + } else { + return false; + } + } + + private boolean wrappedClosedItself() { + AsyncAggregateResponseBatchCursor observedWrapped = wrapped.get(); + return observedWrapped != null && observedWrapped.isClosed(); + } + + /** + * {@code null} is written to {@link #wrapped} before closing the wrapped object to maintain the following guarantee: + * if {@link #wrappedClosedItself()} observes a {@linkplain AsyncAggregateResponseBatchCursor#isClosed() closed} wrapped object, + * then it closed itself as opposed to being closed by {@code this}. + */ + private void nullifyAndCloseWrapped() { + AsyncAggregateResponseBatchCursor observedWrapped = wrapped.getAndSet(null); + if (observedWrapped != null) { + observedWrapped.close(); + } + } + + /** + * This method guarantees that the {@code newValue} argument is closed even if + * {@code setWrappedOrCloseIt(AsyncCommandBatchCursor)} is called concurrently with or after (in the happens-before order) + * the method {@link #close()}. + */ + private void setWrappedOrCloseIt(final AsyncCommandBatchCursor newValue) { + if (isClosed()) { + assertNull(wrapped.get()); + newValue.close(); + } else { + assertNull(wrapped.getAndSet(newValue)); + if (isClosed()) { + nullifyAndCloseWrapped(); + } + } + } + + @Override + public BsonDocument getPostBatchResumeToken() { + return getWrapped().getPostBatchResumeToken(); + } + + @Override + public BsonTimestamp getOperationTime() { + return changeStreamOperation.getStartAtOperationTime(); + } + + @Override + public boolean isFirstBatchEmpty() { + return getWrapped().isFirstBatchEmpty(); + } + + @Override + public int getMaxWireVersion() { + return maxWireVersion; + } + + private void cachePostBatchResumeToken(final AsyncCommandBatchCursor cursor) { + BsonDocument resumeToken = cursor.getPostBatchResumeToken(); + if (resumeToken != null) { + this.resumeToken = resumeToken; + } + } + + private interface AsyncBlock { + void apply(AsyncAggregateResponseBatchCursor cursor, SingleResultCallback> callback); + } + + private void resumeableOperation(final AsyncBlock asyncBlock, final SingleResultCallback> callback, final boolean tryNext) { + timeoutContext.resetTimeoutIfPresent(); + SingleResultCallback> errHandlingCallback = errorHandlingCallback(callback, LOGGER); + if (isClosed()) { + errHandlingCallback.onResult(null, new MongoException(format("%s called after the cursor was closed.", + tryNext ? "tryNext()" : "next()"))); + return; + } + AsyncCommandBatchCursor wrappedCursor = getWrapped(); + asyncBlock.apply(wrappedCursor, (result, t) -> { + if (t == null) { + try { + List convertedResults; + try { + convertedResults = convertAndProduceLastId(assertNotNull(result), changeStreamOperation.getDecoder(), + lastId -> resumeToken = lastId); + } finally { + cachePostBatchResumeToken(wrappedCursor); + } + errHandlingCallback.onResult(convertedResults, null); + } catch (Exception e) { + errHandlingCallback.onResult(null, e); + } + } else { + cachePostBatchResumeToken(wrappedCursor); + if (isResumableError(t, maxWireVersion)) { + nullifyAndCloseWrapped(); + retryOperation(asyncBlock, errHandlingCallback, tryNext); + } else { + errHandlingCallback.onResult(null, t); + } + } + }); + } + + private void retryOperation(final AsyncBlock asyncBlock, final SingleResultCallback> callback, + final boolean tryNext) { + withAsyncReadConnectionSource(binding, (source, t) -> { + if (t != null) { + callback.onResult(null, t); + } else { + changeStreamOperation.setChangeStreamOptionsForResume(resumeToken, + assertNotNull(source).getServerDescription().getMaxWireVersion()); + source.release(); + changeStreamOperation.executeAsync(binding, (asyncBatchCursor, t1) -> { + if (t1 != null) { + callback.onResult(null, t1); + } else { + try { + setWrappedOrCloseIt(assertNotNull((AsyncChangeStreamBatchCursor) asyncBatchCursor).getWrapped()); + } finally { + try { + binding.release(); // release the new change stream batch cursor's reference to the binding + } finally { + resumeableOperation(asyncBlock, callback, tryNext); + } + } + } + }); + } + }); + } +} diff --git a/driver-core/src/main/com/mongodb/internal/operation/AsyncCommandBatchCursor.java b/driver-core/src/main/com/mongodb/internal/operation/AsyncCommandBatchCursor.java new file mode 100644 index 00000000000..792c10b4bb2 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/operation/AsyncCommandBatchCursor.java @@ -0,0 +1,372 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.operation; + +import com.mongodb.MongoCommandException; +import com.mongodb.MongoException; +import com.mongodb.MongoNamespace; +import com.mongodb.MongoOperationTimeoutException; +import com.mongodb.MongoSocketException; +import com.mongodb.ReadPreference; +import com.mongodb.ServerAddress; +import com.mongodb.ServerCursor; +import com.mongodb.annotations.ThreadSafe; +import com.mongodb.client.cursor.TimeoutMode; +import com.mongodb.connection.ConnectionDescription; +import com.mongodb.connection.ServerType; +import com.mongodb.internal.TimeoutContext; +import com.mongodb.internal.VisibleForTesting; +import com.mongodb.internal.async.AsyncAggregateResponseBatchCursor; +import com.mongodb.internal.async.SingleResultCallback; +import com.mongodb.internal.async.function.AsyncCallbackSupplier; +import com.mongodb.internal.binding.AsyncConnectionSource; +import com.mongodb.internal.connection.AsyncConnection; +import com.mongodb.internal.connection.Connection; +import com.mongodb.internal.connection.OperationContext; +import com.mongodb.internal.operation.AsyncOperationHelper.AsyncCallableConnectionWithCallback; +import com.mongodb.internal.validator.NoOpFieldNameValidator; +import com.mongodb.lang.Nullable; +import org.bson.BsonDocument; +import org.bson.BsonTimestamp; +import org.bson.BsonValue; +import org.bson.codecs.BsonDocumentCodec; +import org.bson.codecs.Decoder; + +import java.util.List; +import java.util.concurrent.atomic.AtomicBoolean; + +import static com.mongodb.assertions.Assertions.assertNotNull; +import static com.mongodb.assertions.Assertions.assertTrue; +import static com.mongodb.assertions.Assertions.doesNotThrow; +import static com.mongodb.internal.async.AsyncRunnable.beginAsync; +import static com.mongodb.internal.operation.CommandBatchCursorHelper.FIRST_BATCH; +import static com.mongodb.internal.operation.CommandBatchCursorHelper.MESSAGE_IF_CLOSED_AS_CURSOR; +import static com.mongodb.internal.operation.CommandBatchCursorHelper.NEXT_BATCH; +import static com.mongodb.internal.operation.CommandBatchCursorHelper.getKillCursorsCommand; +import static com.mongodb.internal.operation.CommandBatchCursorHelper.getMoreCommandDocument; +import static com.mongodb.internal.operation.CommandBatchCursorHelper.logCommandCursorResult; +import static com.mongodb.internal.operation.CommandBatchCursorHelper.translateCommandException; +import static com.mongodb.internal.operation.CommandCursorResult.withEmptyResults; +import static java.util.Collections.emptyList; + +class AsyncCommandBatchCursor implements AsyncAggregateResponseBatchCursor { + + private final MongoNamespace namespace; + private final Decoder decoder; + @Nullable + private final BsonValue comment; + private final int maxWireVersion; + private final boolean firstBatchEmpty; + private final ResourceManager resourceManager; + private final OperationContext operationContext; + private final TimeoutMode timeoutMode; + private final AtomicBoolean processedInitial = new AtomicBoolean(); + private int batchSize; + private volatile CommandCursorResult commandCursorResult; + private boolean resetTimeoutWhenClosing; + + AsyncCommandBatchCursor( + final TimeoutMode timeoutMode, + final BsonDocument commandCursorDocument, + final int batchSize, final long maxTimeMS, + final Decoder decoder, + @Nullable final BsonValue comment, + final AsyncConnectionSource connectionSource, + final AsyncConnection connection) { + ConnectionDescription connectionDescription = connection.getDescription(); + this.commandCursorResult = toCommandCursorResult(connectionDescription.getServerAddress(), FIRST_BATCH, commandCursorDocument); + this.namespace = commandCursorResult.getNamespace(); + this.batchSize = batchSize; + this.decoder = decoder; + this.comment = comment; + this.maxWireVersion = connectionDescription.getMaxWireVersion(); + this.firstBatchEmpty = commandCursorResult.getResults().isEmpty(); + operationContext = connectionSource.getOperationContext(); + this.timeoutMode = timeoutMode; + + operationContext.getTimeoutContext().setMaxTimeOverride(maxTimeMS); + + AsyncConnection connectionToPin = connectionSource.getServerDescription().getType() == ServerType.LOAD_BALANCER + ? connection : null; + resourceManager = new ResourceManager(namespace, connectionSource, connectionToPin, commandCursorResult.getServerCursor()); + resetTimeoutWhenClosing = true; + } + + @Override + public void next(final SingleResultCallback> callback) { + resourceManager.execute(funcCallback -> { + checkTimeoutModeAndResetTimeoutContextIfIteration(); + ServerCursor localServerCursor = resourceManager.getServerCursor(); + boolean serverCursorIsNull = localServerCursor == null; + List batchResults = emptyList(); + if (!processedInitial.getAndSet(true) && !firstBatchEmpty) { + batchResults = commandCursorResult.getResults(); + } + + if (serverCursorIsNull || !batchResults.isEmpty()) { + commandCursorResult = withEmptyResults(commandCursorResult); + funcCallback.onResult(batchResults, null); + } else { + getMore(localServerCursor, funcCallback); + } + }, callback); + } + + @Override + public boolean isClosed() { + return !resourceManager.operable(); + } + + @Override + public void setBatchSize(final int batchSize) { + this.batchSize = batchSize; + } + + @Override + public int getBatchSize() { + return batchSize; + } + + @Override + public void close() { + resourceManager.close(); + } + + @Nullable + @VisibleForTesting(otherwise = VisibleForTesting.AccessModifier.PRIVATE) + ServerCursor getServerCursor() { + if (!resourceManager.operable()) { + return null; + } + return resourceManager.getServerCursor(); + } + + @Override + public BsonDocument getPostBatchResumeToken() { + return commandCursorResult.getPostBatchResumeToken(); + } + + @Override + public BsonTimestamp getOperationTime() { + return commandCursorResult.getOperationTime(); + } + + @Override + public boolean isFirstBatchEmpty() { + return firstBatchEmpty; + } + + @Override + public int getMaxWireVersion() { + return maxWireVersion; + } + + void checkTimeoutModeAndResetTimeoutContextIfIteration() { + if (timeoutMode == TimeoutMode.ITERATION) { + operationContext.getTimeoutContext().resetTimeoutIfPresent(); + } + } + + private void getMore(final ServerCursor cursor, final SingleResultCallback> callback) { + resourceManager.executeWithConnection((connection, wrappedCallback) -> + getMoreLoop(assertNotNull(connection), cursor, wrappedCallback), callback); + } + + private void getMoreLoop(final AsyncConnection connection, final ServerCursor serverCursor, + final SingleResultCallback> callback) { + connection.commandAsync(namespace.getDatabaseName(), + getMoreCommandDocument(serverCursor.getId(), connection.getDescription(), namespace, batchSize, comment), + NoOpFieldNameValidator.INSTANCE, ReadPreference.primary(), + CommandResultDocumentCodec.create(decoder, NEXT_BATCH), + assertNotNull(resourceManager.getConnectionSource()).getOperationContext(), + (commandResult, t) -> { + if (t != null) { + Throwable translatedException = + t instanceof MongoCommandException + ? translateCommandException((MongoCommandException) t, serverCursor) + : t; + callback.onResult(null, translatedException); + return; + } + commandCursorResult = toCommandCursorResult( + connection.getDescription().getServerAddress(), NEXT_BATCH, assertNotNull(commandResult)); + ServerCursor nextServerCursor = commandCursorResult.getServerCursor(); + resourceManager.setServerCursor(nextServerCursor); + List nextBatch = commandCursorResult.getResults(); + if (nextServerCursor == null || !nextBatch.isEmpty()) { + commandCursorResult = withEmptyResults(commandCursorResult); + callback.onResult(nextBatch, null); + return; + } + + if (!resourceManager.operable()) { + callback.onResult(emptyList(), null); + return; + } + + getMoreLoop(connection, nextServerCursor, callback); + }); + } + + private CommandCursorResult toCommandCursorResult(final ServerAddress serverAddress, final String fieldNameContainingBatch, + final BsonDocument commandCursorDocument) { + CommandCursorResult commandCursorResult = new CommandCursorResult<>(serverAddress, fieldNameContainingBatch, + commandCursorDocument); + logCommandCursorResult(commandCursorResult); + return commandCursorResult; + } + + /** + * Configures the cursor to {@link #close()} + * without {@linkplain TimeoutContext#resetTimeoutIfPresent() resetting} its {@linkplain TimeoutContext#getTimeout() timeout}. + * This is useful when managing the {@link #close()} behavior externally. + */ + AsyncCommandBatchCursor disableTimeoutResetWhenClosing() { + resetTimeoutWhenClosing = false; + return this; + } + + @ThreadSafe + private final class ResourceManager extends CursorResourceManager { + ResourceManager( + final MongoNamespace namespace, + final AsyncConnectionSource connectionSource, + @Nullable final AsyncConnection connectionToPin, + @Nullable final ServerCursor serverCursor) { + super(namespace, connectionSource, connectionToPin, serverCursor); + } + + /** + * Thread-safe. + * Executes {@code operation} within the {@link #tryStartOperation()}/{@link #endOperation()} bounds. + */ + void execute(final AsyncCallbackSupplier operation, final SingleResultCallback callback) { + boolean canStartOperation = doesNotThrow(this::tryStartOperation); + if (!canStartOperation) { + callback.onResult(null, new IllegalStateException(MESSAGE_IF_CLOSED_AS_CURSOR)); + } else { + operation.whenComplete(() -> { + endOperation(); + if (super.getServerCursor() == null) { + // At this point all resources have been released, + // but `isClose` may still be returning `false` if `close` have not been called. + // Self-close to update the state managed by `ResourceManger`, and so that `isClosed` return `true`. + close(); + } + }).get(callback); + } + } + + @Override + void markAsPinned(final AsyncConnection connectionToPin, final Connection.PinningMode pinningMode) { + connectionToPin.markAsPinned(pinningMode); + } + + @Override + void doClose() { + TimeoutContext timeoutContext = operationContext.getTimeoutContext(); + timeoutContext.resetToDefaultMaxTime(); + SingleResultCallback thenDoNothing = (r, t) -> {}; + if (resetTimeoutWhenClosing) { + timeoutContext.doWithResetTimeout(this::releaseResourcesAsync, thenDoNothing); + } else { + releaseResourcesAsync(thenDoNothing); + } + } + + private void releaseResourcesAsync(final SingleResultCallback callback) { + beginAsync().thenRunTryCatchAsyncBlocks(c -> { + if (isSkipReleasingServerResourcesOnClose()) { + unsetServerCursor(); + } + if (super.getServerCursor() != null) { + beginAsync().thenSupply(c2 -> { + getConnection(c2); + }).thenConsume((connection, c3) -> { + beginAsync().thenRun(c4 -> { + releaseServerResourcesAsync(connection, c4); + }).thenAlwaysRunAndFinish(() -> { + connection.release(); + }, c3); + }).finish(c); + } else { + c.complete(c); + } + }, MongoException.class, (e, c5) -> { + c5.complete(c5); // ignore exceptions when releasing server resources + }).thenAlwaysRunAndFinish(() -> { + // guarantee that regardless of exceptions, `serverCursor` is null and client resources are released + unsetServerCursor(); + releaseClientResources(); + }, callback); + } + + void executeWithConnection(final AsyncCallableConnectionWithCallback callable, final SingleResultCallback callback) { + getConnection((connection, t) -> { + if (t != null) { + callback.onResult(null, t); + return; + } + callable.call(assertNotNull(connection), (result, t1) -> { + if (t1 != null) { + handleException(connection, t1); + } + connection.release(); + callback.onResult(result, t1); + }); + }); + } + + private void handleException(final AsyncConnection connection, final Throwable exception) { + if (exception instanceof MongoOperationTimeoutException && exception.getCause() instanceof MongoSocketException) { + onCorruptedConnection(connection, (MongoSocketException) exception.getCause()); + } else if (exception instanceof MongoSocketException) { + onCorruptedConnection(connection, (MongoSocketException) exception); + } + } + + private void getConnection(final SingleResultCallback callback) { + assertTrue(getState() != State.IDLE); + AsyncConnection pinnedConnection = getPinnedConnection(); + if (pinnedConnection != null) { + callback.onResult(assertNotNull(pinnedConnection).retain(), null); + } else { + assertNotNull(getConnectionSource()).getConnection(callback); + } + } + + private void releaseServerResourcesAsync(final AsyncConnection connection, final SingleResultCallback callback) { + beginAsync().thenRun((c) -> { + ServerCursor localServerCursor = super.getServerCursor(); + if (localServerCursor != null) { + killServerCursorAsync(getNamespace(), localServerCursor, connection, callback); + } else { + c.complete(c); + } + }).thenAlwaysRunAndFinish(() -> { + unsetServerCursor(); + }, callback); + } + + private void killServerCursorAsync(final MongoNamespace namespace, final ServerCursor localServerCursor, + final AsyncConnection localConnection, final SingleResultCallback callback) { + localConnection.commandAsync(namespace.getDatabaseName(), getKillCursorsCommand(namespace, localServerCursor), + NoOpFieldNameValidator.INSTANCE, ReadPreference.primary(), new BsonDocumentCodec(), + operationContext, (r, t) -> callback.onResult(null, null)); + } + } +} diff --git a/driver-core/src/main/com/mongodb/internal/operation/AsyncOperationHelper.java b/driver-core/src/main/com/mongodb/internal/operation/AsyncOperationHelper.java new file mode 100644 index 00000000000..660e4b30417 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/operation/AsyncOperationHelper.java @@ -0,0 +1,481 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.operation; + +import com.mongodb.Function; +import com.mongodb.MongoException; +import com.mongodb.ReadPreference; +import com.mongodb.assertions.Assertions; +import com.mongodb.client.cursor.TimeoutMode; +import com.mongodb.internal.TimeoutContext; +import com.mongodb.internal.async.AsyncBatchCursor; +import com.mongodb.internal.async.SingleResultCallback; +import com.mongodb.internal.async.function.AsyncCallbackBiFunction; +import com.mongodb.internal.async.function.AsyncCallbackFunction; +import com.mongodb.internal.async.function.AsyncCallbackSupplier; +import com.mongodb.internal.async.function.RetryState; +import com.mongodb.internal.async.function.RetryingAsyncCallbackSupplier; +import com.mongodb.internal.binding.AsyncConnectionSource; +import com.mongodb.internal.binding.AsyncReadBinding; +import com.mongodb.internal.binding.AsyncWriteBinding; +import com.mongodb.internal.binding.ReferenceCounted; +import com.mongodb.internal.connection.AsyncConnection; +import com.mongodb.internal.connection.OperationContext; +import com.mongodb.internal.operation.retry.AttachmentKeys; +import com.mongodb.internal.validator.NoOpFieldNameValidator; +import com.mongodb.lang.Nullable; +import org.bson.BsonDocument; +import org.bson.BsonValue; +import org.bson.FieldNameValidator; +import org.bson.codecs.BsonDocumentCodec; +import org.bson.codecs.Decoder; + +import java.util.Collections; +import java.util.List; + +import static com.mongodb.assertions.Assertions.assertNotNull; +import static com.mongodb.internal.async.ErrorHandlingResultCallback.errorHandlingCallback; +import static com.mongodb.internal.operation.CommandOperationHelper.CommandCreator; +import static com.mongodb.internal.operation.CommandOperationHelper.addRetryableWriteErrorLabel; +import static com.mongodb.internal.operation.CommandOperationHelper.initialRetryState; +import static com.mongodb.internal.operation.CommandOperationHelper.isRetryWritesEnabled; +import static com.mongodb.internal.operation.CommandOperationHelper.logRetryExecute; +import static com.mongodb.internal.operation.CommandOperationHelper.onRetryableReadAttemptFailure; +import static com.mongodb.internal.operation.CommandOperationHelper.onRetryableWriteAttemptFailure; +import static com.mongodb.internal.operation.CommandOperationHelper.transformWriteException; +import static com.mongodb.internal.operation.WriteConcernHelper.throwOnWriteConcernError; + +final class AsyncOperationHelper { + + interface AsyncCallableWithConnection { + void call(@Nullable AsyncConnection connection, @Nullable Throwable t); + } + + interface AsyncCallableConnectionWithCallback { + void call(AsyncConnection connection, SingleResultCallback callback); + } + + interface AsyncCallableWithSource { + void call(@Nullable AsyncConnectionSource source, @Nullable Throwable t); + } + + interface CommandWriteTransformerAsync { + + /** + * Yield an appropriate result object for the input object. + * + * @param t the input object + * @return the function result + */ + @Nullable + R apply(T t, AsyncConnection connection); + } + + interface CommandReadTransformerAsync { + + /** + * Yield an appropriate result object for the input object. + * + * @param t the input object + * @return the function result + */ + @Nullable + R apply(T t, AsyncConnectionSource source, AsyncConnection connection); + } + + + static void withAsyncReadConnectionSource(final AsyncReadBinding binding, final AsyncCallableWithSource callable) { + binding.getReadConnectionSource(errorHandlingCallback(new AsyncCallableWithSourceCallback(callable), OperationHelper.LOGGER)); + } + + static void withAsyncConnection(final AsyncWriteBinding binding, final AsyncCallableWithConnection callable) { + binding.getWriteConnectionSource(errorHandlingCallback(new AsyncCallableWithConnectionCallback(callable), OperationHelper.LOGGER)); + } + + /** + * @see #withAsyncSuppliedResource(AsyncCallbackSupplier, boolean, SingleResultCallback, AsyncCallbackFunction) + */ + static void withAsyncSourceAndConnection(final AsyncCallbackSupplier sourceSupplier, + final boolean wrapConnectionSourceException, final SingleResultCallback callback, + final AsyncCallbackBiFunction asyncFunction) + throws OperationHelper.ResourceSupplierInternalException { + SingleResultCallback errorHandlingCallback = errorHandlingCallback(callback, OperationHelper.LOGGER); + withAsyncSuppliedResource(sourceSupplier, wrapConnectionSourceException, errorHandlingCallback, + (source, sourceReleasingCallback) -> + withAsyncSuppliedResource(source::getConnection, wrapConnectionSourceException, sourceReleasingCallback, + (connection, connectionAndSourceReleasingCallback) -> + asyncFunction.apply(source, connection, connectionAndSourceReleasingCallback))); + } + + + static void withAsyncSuppliedResource(final AsyncCallbackSupplier resourceSupplier, + final boolean wrapSourceConnectionException, final SingleResultCallback callback, + final AsyncCallbackFunction function) throws OperationHelper.ResourceSupplierInternalException { + SingleResultCallback errorHandlingCallback = errorHandlingCallback(callback, OperationHelper.LOGGER); + resourceSupplier.get((resource, supplierException) -> { + if (supplierException != null) { + if (wrapSourceConnectionException) { + supplierException = new OperationHelper.ResourceSupplierInternalException(supplierException); + } + errorHandlingCallback.onResult(null, supplierException); + } else { + Assertions.assertNotNull(resource); + try { + AsyncCallbackSupplier curriedFunction = c -> function.apply(resource, c); + curriedFunction.whenComplete(resource::release).get(errorHandlingCallback); + } catch (Exception e) { + if (resource.getCount() > 0) { + resource.release(); + } + errorHandlingCallback.onResult(null, e); + } + } + }); + } + + static void withAsyncConnectionSourceCallableConnection(final AsyncConnectionSource source, + final AsyncCallableWithConnection callable) { + source.getConnection((connection, t) -> { + source.release(); + if (t != null) { + callable.call(null, t); + } else { + callable.call(connection, null); + } + }); + } + + static void withAsyncConnectionSource(final AsyncConnectionSource source, final AsyncCallableWithSource callable) { + callable.call(source, null); + } + + static void executeRetryableReadAsync( + final AsyncReadBinding binding, + final String database, + final CommandCreator commandCreator, + final Decoder decoder, + final CommandReadTransformerAsync transformer, + final boolean retryReads, + final SingleResultCallback callback) { + executeRetryableReadAsync(binding, binding::getReadConnectionSource, database, commandCreator, + decoder, transformer, retryReads, callback); + } + + static void executeRetryableReadAsync( + final AsyncReadBinding binding, + final AsyncCallbackSupplier sourceAsyncSupplier, + final String database, + final CommandCreator commandCreator, + final Decoder decoder, + final CommandReadTransformerAsync transformer, + final boolean retryReads, + final SingleResultCallback callback) { + RetryState retryState = initialRetryState(retryReads, binding.getOperationContext().getTimeoutContext()); + binding.retain(); + OperationContext operationContext = binding.getOperationContext(); + AsyncCallbackSupplier asyncRead = decorateReadWithRetriesAsync(retryState, binding.getOperationContext(), + (AsyncCallbackSupplier) funcCallback -> + withAsyncSourceAndConnection(sourceAsyncSupplier, false, funcCallback, + (source, connection, releasingCallback) -> { + if (retryState.breakAndCompleteIfRetryAnd( + () -> !OperationHelper.canRetryRead(source.getServerDescription(), + operationContext), + releasingCallback)) { + return; + } + createReadCommandAndExecuteAsync(retryState, operationContext, source, database, + commandCreator, decoder, transformer, connection, releasingCallback); + }) + ).whenComplete(binding::release); + asyncRead.get(errorHandlingCallback(callback, OperationHelper.LOGGER)); + } + + static void executeCommandAsync( + final AsyncWriteBinding binding, + final String database, + final CommandCreator commandCreator, + final CommandWriteTransformerAsync transformer, + final SingleResultCallback callback) { + Assertions.notNull("binding", binding); + withAsyncSourceAndConnection(binding::getWriteConnectionSource, false, callback, + (source, connection, releasingCallback) -> + executeCommandAsync(binding, database, commandCreator.create( + binding.getOperationContext(), source.getServerDescription(), connection.getDescription()), + connection, transformer, releasingCallback) + ); + } + + static void executeCommandAsync(final AsyncWriteBinding binding, + final String database, + final BsonDocument command, + final AsyncConnection connection, + final CommandWriteTransformerAsync transformer, + final SingleResultCallback callback) { + Assertions.notNull("binding", binding); + SingleResultCallback addingRetryableLabelCallback = addingRetryableLabelCallback(callback, + connection.getDescription().getMaxWireVersion()); + connection.commandAsync(database, command, NoOpFieldNameValidator.INSTANCE, ReadPreference.primary(), new BsonDocumentCodec(), + binding.getOperationContext(), transformingWriteCallback(transformer, connection, addingRetryableLabelCallback)); + } + + static void executeRetryableWriteAsync( + final AsyncWriteBinding binding, + final String database, + @Nullable final ReadPreference readPreference, + final FieldNameValidator fieldNameValidator, + final Decoder commandResultDecoder, + final CommandCreator commandCreator, + final CommandWriteTransformerAsync transformer, + final Function retryCommandModifier, + final SingleResultCallback callback) { + + RetryState retryState = initialRetryState(true, binding.getOperationContext().getTimeoutContext()); + binding.retain(); + OperationContext operationContext = binding.getOperationContext(); + + AsyncCallbackSupplier asyncWrite = decorateWriteWithRetriesAsync(retryState, operationContext, + (AsyncCallbackSupplier) funcCallback -> { + boolean firstAttempt = retryState.isFirstAttempt(); + if (!firstAttempt && operationContext.getSessionContext().hasActiveTransaction()) { + operationContext.getSessionContext().clearTransactionContext(); + } + withAsyncSourceAndConnection(binding::getWriteConnectionSource, true, funcCallback, + (source, connection, releasingCallback) -> { + int maxWireVersion = connection.getDescription().getMaxWireVersion(); + SingleResultCallback addingRetryableLabelCallback = firstAttempt + ? releasingCallback + : addingRetryableLabelCallback(releasingCallback, maxWireVersion); + if (retryState.breakAndCompleteIfRetryAnd(() -> + !OperationHelper.canRetryWrite(connection.getDescription(), operationContext.getSessionContext()), + addingRetryableLabelCallback)) { + return; + } + BsonDocument command; + try { + command = retryState.attachment(AttachmentKeys.command()) + .map(previousAttemptCommand -> { + Assertions.assertFalse(firstAttempt); + return retryCommandModifier.apply(previousAttemptCommand); + }).orElseGet(() -> commandCreator.create( + operationContext, + source.getServerDescription(), + connection.getDescription())); + // attach `maxWireVersion`, `retryableCommandFlag` ASAP because they are used to check whether we should retry + retryState.attach(AttachmentKeys.maxWireVersion(), maxWireVersion, true) + .attach(AttachmentKeys.retryableCommandFlag(), isRetryWritesEnabled(command), true) + .attach(AttachmentKeys.commandDescriptionSupplier(), command::getFirstKey, false) + .attach(AttachmentKeys.command(), command, false); + } catch (Throwable t) { + addingRetryableLabelCallback.onResult(null, t); + return; + } + connection.commandAsync(database, command, fieldNameValidator, readPreference, commandResultDecoder, + operationContext, transformingWriteCallback(transformer, connection, addingRetryableLabelCallback)); + }); + }).whenComplete(binding::release); + + asyncWrite.get(exceptionTransformingCallback(errorHandlingCallback(callback, OperationHelper.LOGGER))); + } + + static void createReadCommandAndExecuteAsync( + final RetryState retryState, + final OperationContext operationContext, + final AsyncConnectionSource source, + final String database, + final CommandCreator commandCreator, + final Decoder decoder, + final CommandReadTransformerAsync transformer, + final AsyncConnection connection, + final SingleResultCallback callback) { + BsonDocument command; + try { + command = commandCreator.create(operationContext, source.getServerDescription(), connection.getDescription()); + retryState.attach(AttachmentKeys.commandDescriptionSupplier(), command::getFirstKey, false); + } catch (IllegalArgumentException e) { + callback.onResult(null, e); + return; + } + connection.commandAsync(database, command, NoOpFieldNameValidator.INSTANCE, source.getReadPreference(), decoder, + operationContext, transformingReadCallback(transformer, source, connection, callback)); + } + + static AsyncCallbackSupplier decorateReadWithRetriesAsync(final RetryState retryState, final OperationContext operationContext, + final AsyncCallbackSupplier asyncReadFunction) { + return new RetryingAsyncCallbackSupplier<>(retryState, onRetryableReadAttemptFailure(operationContext), + CommandOperationHelper::shouldAttemptToRetryRead, callback -> { + logRetryExecute(retryState, operationContext); + asyncReadFunction.get(callback); + }); + } + + static AsyncCallbackSupplier decorateWriteWithRetriesAsync(final RetryState retryState, final OperationContext operationContext, + final AsyncCallbackSupplier asyncWriteFunction) { + return new RetryingAsyncCallbackSupplier<>(retryState, onRetryableWriteAttemptFailure(operationContext), + CommandOperationHelper::loggingShouldAttemptToRetryWriteAndAddRetryableLabel, callback -> { + logRetryExecute(retryState, operationContext); + asyncWriteFunction.get(callback); + }); + } + + static CommandWriteTransformerAsync writeConcernErrorTransformerAsync(final TimeoutContext timeoutContext) { + return (result, connection) -> { + assertNotNull(result); + throwOnWriteConcernError(result, connection.getDescription().getServerAddress(), + connection.getDescription().getMaxWireVersion(), + timeoutContext); + return null; + }; + } + + static CommandReadTransformerAsync> asyncSingleBatchCursorTransformer(final String fieldName) { + return (result, source, connection) -> + new AsyncSingleBatchCursor<>(BsonDocumentWrapperHelper.toList(result, fieldName), 0); + } + + static AsyncBatchCursor cursorDocumentToAsyncBatchCursor(final TimeoutMode timeoutMode, final BsonDocument cursorDocument, + final int batchSize, final Decoder decoder, @Nullable final BsonValue comment, final AsyncConnectionSource source, + final AsyncConnection connection) { + return new AsyncCommandBatchCursor<>(timeoutMode, cursorDocument, batchSize, 0, decoder, comment, source, connection); + } + + static SingleResultCallback releasingCallback(final SingleResultCallback wrapped, final AsyncConnection connection) { + return new ReferenceCountedReleasingWrappedCallback<>(wrapped, Collections.singletonList(connection)); + } + + static SingleResultCallback exceptionTransformingCallback(final SingleResultCallback callback) { + return (result, t) -> { + if (t != null) { + if (t instanceof MongoException) { + callback.onResult(null, transformWriteException((MongoException) t)); + } else { + callback.onResult(null, t); + } + } else { + callback.onResult(result, null); + } + }; + } + + private static SingleResultCallback transformingWriteCallback(final CommandWriteTransformerAsync transformer, + final AsyncConnection connection, final SingleResultCallback callback) { + return (result, t) -> { + if (t != null) { + callback.onResult(null, t); + } else { + R transformedResult; + try { + transformedResult = transformer.apply(assertNotNull(result), connection); + } catch (Throwable e) { + callback.onResult(null, e); + return; + } + callback.onResult(transformedResult, null); + } + }; + } + + + private static class AsyncCallableWithConnectionCallback implements SingleResultCallback { + private final AsyncCallableWithConnection callable; + + AsyncCallableWithConnectionCallback(final AsyncCallableWithConnection callable) { + this.callable = callable; + } + + @Override + public void onResult(@Nullable final AsyncConnectionSource source, @Nullable final Throwable t) { + if (t != null) { + callable.call(null, t); + } else { + withAsyncConnectionSourceCallableConnection(Assertions.assertNotNull(source), callable); + } + } + } + + private static class AsyncCallableWithSourceCallback implements SingleResultCallback { + private final AsyncCallableWithSource callable; + + AsyncCallableWithSourceCallback(final AsyncCallableWithSource callable) { + this.callable = callable; + } + + @Override + public void onResult(@Nullable final AsyncConnectionSource source, @Nullable final Throwable t) { + if (t != null) { + callable.call(null, t); + } else { + withAsyncConnectionSource(Assertions.assertNotNull(source), callable); + } + } + } + + private static class ReferenceCountedReleasingWrappedCallback implements SingleResultCallback { + private final SingleResultCallback wrapped; + private final List referenceCounted; + + ReferenceCountedReleasingWrappedCallback(final SingleResultCallback wrapped, + final List referenceCounted) { + this.wrapped = wrapped; + this.referenceCounted = Assertions.notNull("referenceCounted", referenceCounted); + } + + @Override + public void onResult(@Nullable final T result, @Nullable final Throwable t) { + for (ReferenceCounted cur : referenceCounted) { + if (cur != null) { + cur.release(); + } + } + wrapped.onResult(result, t); + } + } + + private static SingleResultCallback addingRetryableLabelCallback(final SingleResultCallback callback, + final int maxWireVersion) { + return (result, t) -> { + if (t != null) { + if (t instanceof MongoException) { + addRetryableWriteErrorLabel((MongoException) t, maxWireVersion); + } + callback.onResult(null, t); + } else { + callback.onResult(result, null); + } + }; + } + + private static SingleResultCallback transformingReadCallback(final CommandReadTransformerAsync transformer, + final AsyncConnectionSource source, final AsyncConnection connection, final SingleResultCallback callback) { + return (result, t) -> { + if (t != null) { + callback.onResult(null, t); + } else { + R transformedResult; + try { + transformedResult = transformer.apply(assertNotNull(result), source, connection); + } catch (Throwable e) { + callback.onResult(null, e); + return; + } + callback.onResult(transformedResult, null); + } + }; + } + + private AsyncOperationHelper() { + } +} diff --git a/driver-core/src/main/com/mongodb/internal/operation/AsyncSingleBatchCursor.java b/driver-core/src/main/com/mongodb/internal/operation/AsyncSingleBatchCursor.java new file mode 100644 index 00000000000..57b20ff1711 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/operation/AsyncSingleBatchCursor.java @@ -0,0 +1,77 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.operation; + +import com.mongodb.MongoException; +import com.mongodb.internal.async.AsyncBatchCursor; +import com.mongodb.internal.async.SingleResultCallback; + +import java.util.List; + +import static java.util.Collections.emptyList; + +class AsyncSingleBatchCursor implements AsyncBatchCursor { + + static AsyncSingleBatchCursor createEmptyAsyncSingleBatchCursor(final int batchSize) { + return new AsyncSingleBatchCursor<>(emptyList(), batchSize); + } + + private final List batch; + private final int batchSize; + + private volatile boolean hasNext = true; + private volatile boolean closed = false; + + AsyncSingleBatchCursor(final List batch, final int batchSize) { + this.batch = batch; + this.batchSize = batchSize; + } + + @Override + public void close() { + closed = true; + } + + @Override + public void next(final SingleResultCallback> callback) { + if (closed) { + callback.onResult(null, new MongoException("next() called after the cursor was closed.")); + } else if (hasNext && !batch.isEmpty()) { + hasNext = false; + callback.onResult(batch, null); + } else { + closed = true; + callback.onResult(emptyList(), null); + } + } + + @Override + public void setBatchSize(final int batchSize) { + // Noop + } + + @Override + public int getBatchSize() { + return batchSize; + } + + @Override + public boolean isClosed() { + return closed; + } + +} diff --git a/driver-core/src/main/com/mongodb/internal/operation/BaseFindAndModifyOperation.java b/driver-core/src/main/com/mongodb/internal/operation/BaseFindAndModifyOperation.java new file mode 100644 index 00000000000..c5d56fda81c --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/operation/BaseFindAndModifyOperation.java @@ -0,0 +1,229 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.operation; + +import com.mongodb.MongoNamespace; +import com.mongodb.WriteConcern; +import com.mongodb.client.model.Collation; +import com.mongodb.connection.ConnectionDescription; +import com.mongodb.internal.async.SingleResultCallback; +import com.mongodb.internal.binding.AsyncWriteBinding; +import com.mongodb.internal.binding.WriteBinding; +import com.mongodb.internal.session.SessionContext; +import com.mongodb.lang.Nullable; +import org.bson.BsonDocument; +import org.bson.BsonInt64; +import org.bson.BsonString; +import org.bson.BsonValue; +import org.bson.FieldNameValidator; +import org.bson.codecs.Decoder; + +import static com.mongodb.assertions.Assertions.notNull; +import static com.mongodb.internal.operation.AsyncOperationHelper.executeRetryableWriteAsync; +import static com.mongodb.internal.operation.CommandOperationHelper.CommandCreator; +import static com.mongodb.internal.operation.DocumentHelper.putIfNotNull; +import static com.mongodb.internal.operation.OperationHelper.isRetryableWrite; +import static com.mongodb.internal.operation.OperationHelper.validateHintForFindAndModify; +import static com.mongodb.internal.operation.SyncOperationHelper.executeRetryableWrite; + +/** + * Abstract base class for findAndModify-based operations + * + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public abstract class BaseFindAndModifyOperation implements WriteOperation { + private static final String COMMAND_NAME = "findAndModify"; + private final MongoNamespace namespace; + private final WriteConcern writeConcern; + private final boolean retryWrites; + private final Decoder decoder; + + private BsonDocument filter; + private BsonDocument projection; + private BsonDocument sort; + private Collation collation; + private BsonDocument hint; + private String hintString; + private BsonValue comment; + private BsonDocument variables; + + protected BaseFindAndModifyOperation(final MongoNamespace namespace, final WriteConcern writeConcern, final boolean retryWrites, + final Decoder decoder) { + this.namespace = notNull("namespace", namespace); + this.writeConcern = notNull("writeConcern", writeConcern); + this.retryWrites = retryWrites; + this.decoder = notNull("decoder", decoder); + } + + @Override + public String getCommandName() { + return COMMAND_NAME; + } + + + @Override + public T execute(final WriteBinding binding) { + return executeRetryableWrite(binding, getDatabaseName(), null, getFieldNameValidator(), + CommandResultDocumentCodec.create(getDecoder(), "value"), + getCommandCreator(), + FindAndModifyHelper.transformer(), + cmd -> cmd); + } + + @Override + public void executeAsync(final AsyncWriteBinding binding, final SingleResultCallback callback) { + executeRetryableWriteAsync(binding, getDatabaseName(), null, getFieldNameValidator(), + CommandResultDocumentCodec.create(getDecoder(), "value"), + getCommandCreator(), + FindAndModifyHelper.asyncTransformer(), cmd -> cmd, callback); + } + + public MongoNamespace getNamespace() { + return namespace; + } + + public WriteConcern getWriteConcern() { + return writeConcern; + } + + public Decoder getDecoder() { + return decoder; + } + + public boolean isRetryWrites() { + return retryWrites; + } + + public BsonDocument getFilter() { + return filter; + } + + public BaseFindAndModifyOperation filter(@Nullable final BsonDocument filter) { + this.filter = filter; + return this; + } + + public BsonDocument getProjection() { + return projection; + } + + public BaseFindAndModifyOperation projection(@Nullable final BsonDocument projection) { + this.projection = projection; + return this; + } + + public BsonDocument getSort() { + return sort; + } + + public BaseFindAndModifyOperation sort(@Nullable final BsonDocument sort) { + this.sort = sort; + return this; + } + + @Nullable + public Collation getCollation() { + return collation; + } + + @Nullable + public BsonDocument getHint() { + return hint; + } + + public BaseFindAndModifyOperation hint(@Nullable final BsonDocument hint) { + this.hint = hint; + return this; + } + + @Nullable + public String getHintString() { + return hintString; + } + + public BaseFindAndModifyOperation hintString(@Nullable final String hint) { + this.hintString = hint; + return this; + } + + public BaseFindAndModifyOperation collation(@Nullable final Collation collation) { + this.collation = collation; + return this; + } + + public BsonValue getComment() { + return comment; + } + + public BaseFindAndModifyOperation comment(@Nullable final BsonValue comment) { + this.comment = comment; + return this; + } + + public BsonDocument getLet() { + return variables; + } + + public BaseFindAndModifyOperation let(@Nullable final BsonDocument variables) { + this.variables = variables; + return this; + } + + protected abstract FieldNameValidator getFieldNameValidator(); + + protected abstract void specializeCommand(BsonDocument initialCommand, ConnectionDescription connectionDescription); + + private CommandCreator getCommandCreator() { + return (operationContext, serverDescription, connectionDescription) -> { + SessionContext sessionContext = operationContext.getSessionContext(); + + BsonDocument commandDocument = new BsonDocument(getCommandName(), new BsonString(getNamespace().getCollectionName())); + putIfNotNull(commandDocument, "query", getFilter()); + putIfNotNull(commandDocument, "fields", getProjection()); + putIfNotNull(commandDocument, "sort", getSort()); + + specializeCommand(commandDocument, connectionDescription); + + if (getWriteConcern().isAcknowledged() && !getWriteConcern().isServerDefault() + && !sessionContext.hasActiveTransaction()) { + commandDocument.put("writeConcern", getWriteConcern().asDocument()); + } + if (getCollation() != null) { + commandDocument.put("collation", getCollation().asDocument()); + } + if (getHint() != null || getHintString() != null) { + validateHintForFindAndModify(connectionDescription, getWriteConcern()); + if (getHint() != null) { + commandDocument.put("hint", getHint()); + } else { + commandDocument.put("hint", new BsonString(getHintString())); + } + } + putIfNotNull(commandDocument, "comment", getComment()); + putIfNotNull(commandDocument, "let", getLet()); + + if (isRetryableWrite(isRetryWrites(), getWriteConcern(), connectionDescription, sessionContext)) { + commandDocument.put("txnNumber", new BsonInt64(sessionContext.advanceTransactionNumber())); + } + return commandDocument; + }; + } + + private String getDatabaseName() { + return getNamespace().getDatabaseName(); + } +} diff --git a/driver-core/src/main/com/mongodb/internal/operation/BatchCursor.java b/driver-core/src/main/com/mongodb/internal/operation/BatchCursor.java new file mode 100644 index 00000000000..1f280e040fd --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/operation/BatchCursor.java @@ -0,0 +1,114 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.operation; + +import com.mongodb.ServerAddress; +import com.mongodb.ServerCursor; +import com.mongodb.annotations.NotThreadSafe; +import com.mongodb.lang.Nullable; + +import java.io.Closeable; +import java.util.Iterator; +import java.util.List; + +import static java.util.Spliterator.IMMUTABLE; +import static java.util.Spliterator.ORDERED; +import static java.util.Spliterators.spliteratorUnknownSize; +import static java.util.stream.Collectors.toList; +import static java.util.stream.StreamSupport.stream; + +/** + * MongoDB returns query results as batches, and this interface provideds an iterator over those batches. The first call to + * the {@code next} method will return the first batch, and subsequent calls will trigger a request to get the next batch + * of results. Clients can control the batch size by setting the {@code batchSize} property between calls to {@code next}. + * + *

This class is not part of the public API and may be removed or changed at any time

+ */ +@NotThreadSafe +public interface BatchCursor extends Iterator>, Closeable { + /** + * Despite this interface being {@linkplain NotThreadSafe non-thread-safe}, + * {@link #close()} is allowed to be called concurrently with any method of the cursor, including itself. + * This is useful to cancel blocked {@link #hasNext()}, {@link #next()}. + * This method is idempotent. + *

+ * Another quirk is that this method is allowed to release resources "eventually", + * i.e., not before (in the happens-before order) returning. + */ + @Override + void close(); + + /** + * Returns true if another batch of results exists. A tailable cursor will block until another batch exists. + * + * @return true if another batch exists + */ + boolean hasNext(); + + /** + * Returns the next batch of results as a mutable list. Modifications to the list will not affect the cursor state. + * A tailable cursor will block until another batch exists. + * + * @return the next batch of results + * @throws java.util.NoSuchElementException if no next batch exists + */ + List next(); + + /** + * Gets the number of results available locally without blocking,which may be 0, or 0 when the cursor is exhausted or closed. + * + * @return the number of results available locally without blocking + */ + int available(); + + /** + * Sets the batch size to use when requesting the next batch. This is the number of documents to request in the next batch. + * + * @param batchSize the non-negative batch size. 0 means to use the server default. + */ + void setBatchSize(int batchSize); + + /** + * Gets the batch size to use when requesting the next batch. This is the number of documents to request in the next batch. + * + * @return the non-negative batch size. 0 means to use the server default. + */ + int getBatchSize(); + + /** + * A special {@code next()} case that returns the next batch as a mutable list if available or null. + * Modifications to the list will not affect the cursor state. + * + *

Tailable cursors are an example where this is useful. A call to {@code tryNext()} may return null, but in the future calling + * {@code tryNext()} would return a new batch if a document had been added to the capped collection.

+ * + * @return the next batch if available or null. + * @mongodb.driver.manual reference/glossary/#term-tailable-cursor Tailable Cursor + */ + @Nullable + List tryNext(); + + @Nullable + ServerCursor getServerCursor(); + + ServerAddress getServerAddress(); + + default List> exhaust() { + return stream(spliteratorUnknownSize(this, ORDERED | IMMUTABLE), false) + .collect(toList()); + } +} diff --git a/driver-core/src/main/com/mongodb/internal/operation/BsonArrayWrapper.java b/driver-core/src/main/com/mongodb/internal/operation/BsonArrayWrapper.java new file mode 100644 index 00000000000..35467ff9c5c --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/operation/BsonArrayWrapper.java @@ -0,0 +1,197 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.operation; + +import org.bson.BsonArray; +import org.bson.BsonValue; + +import java.util.Collection; +import java.util.Iterator; +import java.util.List; +import java.util.ListIterator; + +import static org.bson.assertions.Assertions.notNull; + + +class BsonArrayWrapper extends BsonArray { + + private final List wrappedArray; + + BsonArrayWrapper(final List wrappedArray) { + this.wrappedArray = notNull("wrappedArray", wrappedArray); + } + + public List getWrappedArray() { + return wrappedArray; + } + + @Override + public List getValues() { + throw new UnsupportedOperationException(); + } + + @Override + public int size() { + throw new UnsupportedOperationException(); + } + + @Override + public boolean isEmpty() { + throw new UnsupportedOperationException(); + } + + @Override + public boolean contains(final Object o) { + throw new UnsupportedOperationException(); + } + + @Override + public Iterator iterator() { + throw new UnsupportedOperationException(); + } + + @Override + public Object[] toArray() { + throw new UnsupportedOperationException(); + } + + @Override + public T[] toArray(final T[] a) { + throw new UnsupportedOperationException(); + } + + @Override + public boolean add(final BsonValue bsonValue) { + throw new UnsupportedOperationException(); + } + + @Override + public boolean remove(final Object o) { + throw new UnsupportedOperationException(); + } + + @Override + public boolean containsAll(final Collection c) { + throw new UnsupportedOperationException(); + } + + @Override + public boolean addAll(final Collection c) { + throw new UnsupportedOperationException(); + } + + @Override + public boolean addAll(final int index, final Collection c) { + throw new UnsupportedOperationException(); + } + + @Override + public boolean removeAll(final Collection c) { + throw new UnsupportedOperationException(); + } + + @Override + public boolean retainAll(final Collection c) { + throw new UnsupportedOperationException(); + } + + @Override + public void clear() { + throw new UnsupportedOperationException(); + } + + @Override + public BsonValue get(final int index) { + throw new UnsupportedOperationException(); + } + + @Override + public BsonValue set(final int index, final BsonValue element) { + throw new UnsupportedOperationException(); + } + + @Override + public void add(final int index, final BsonValue element) { + throw new UnsupportedOperationException(); + } + + @Override + public BsonValue remove(final int index) { + throw new UnsupportedOperationException(); + } + + @Override + public int indexOf(final Object o) { + throw new UnsupportedOperationException(); + } + + @Override + public int lastIndexOf(final Object o) { + throw new UnsupportedOperationException(); + } + + @Override + public ListIterator listIterator() { + throw new UnsupportedOperationException(); + } + + @Override + public ListIterator listIterator(final int index) { + throw new UnsupportedOperationException(); + } + + @Override + public List subList(final int fromIndex, final int toIndex) { + throw new UnsupportedOperationException(); + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + BsonArrayWrapper that = (BsonArrayWrapper) o; + if (!wrappedArray.equals(that.wrappedArray)) { + return false; + } + + return true; + } + + @Override + public int hashCode() { + int result = super.hashCode(); + result = 31 * result + wrappedArray.hashCode(); + return result; + } + + @Override + public String toString() { + return "BsonArrayWrapper{" + + "wrappedArray=" + wrappedArray + + '}'; + } + + @Override + public BsonArray clone() { + throw new UnsupportedOperationException("This should never be called on an instance of this type"); + } +} diff --git a/driver-core/src/main/com/mongodb/internal/operation/BsonDocumentWrapperHelper.java b/driver-core/src/main/com/mongodb/internal/operation/BsonDocumentWrapperHelper.java new file mode 100644 index 00000000000..5b0d45dfc65 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/operation/BsonDocumentWrapperHelper.java @@ -0,0 +1,41 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.operation; + +import org.bson.BsonDocument; +import org.bson.BsonDocumentWrapper; + +import java.util.List; + +final class BsonDocumentWrapperHelper { + + @SuppressWarnings("unchecked") + static List toList(final BsonDocument result, final String fieldContainingWrappedArray) { + return ((BsonArrayWrapper) result.getArray(fieldContainingWrappedArray)).getWrappedArray(); + } + + @SuppressWarnings("unchecked") + static T toDocument(final BsonDocument document) { + if (document == null) { + return null; + } + return ((BsonDocumentWrapper) document).getWrappedDocument(); + } + + private BsonDocumentWrapperHelper() { + } +} diff --git a/driver-core/src/main/com/mongodb/internal/operation/BulkWriteBatch.java b/driver-core/src/main/com/mongodb/internal/operation/BulkWriteBatch.java new file mode 100644 index 00000000000..1064bee14d3 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/operation/BulkWriteBatch.java @@ -0,0 +1,392 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.operation; + +import com.mongodb.MongoBulkWriteException; +import com.mongodb.MongoInternalException; +import com.mongodb.MongoNamespace; +import com.mongodb.WriteConcern; +import com.mongodb.bulk.BulkWriteError; +import com.mongodb.bulk.BulkWriteInsert; +import com.mongodb.bulk.BulkWriteResult; +import com.mongodb.bulk.BulkWriteUpsert; +import com.mongodb.bulk.WriteConcernError; +import com.mongodb.connection.ConnectionDescription; +import com.mongodb.internal.bulk.DeleteRequest; +import com.mongodb.internal.bulk.UpdateRequest; +import com.mongodb.internal.bulk.WriteRequest; +import com.mongodb.internal.bulk.WriteRequestWithIndex; +import com.mongodb.internal.connection.BulkWriteBatchCombiner; +import com.mongodb.internal.connection.IndexMap; +import com.mongodb.internal.connection.OperationContext; +import com.mongodb.internal.connection.SplittablePayload; +import com.mongodb.internal.session.SessionContext; +import com.mongodb.internal.validator.MappedFieldNameValidator; +import com.mongodb.internal.validator.NoOpFieldNameValidator; +import com.mongodb.internal.validator.ReplacingDocumentFieldNameValidator; +import com.mongodb.internal.validator.UpdateFieldNameValidator; +import com.mongodb.lang.Nullable; +import org.bson.BsonArray; +import org.bson.BsonBoolean; +import org.bson.BsonDocument; +import org.bson.BsonInt32; +import org.bson.BsonInt64; +import org.bson.BsonString; +import org.bson.BsonValue; +import org.bson.FieldNameValidator; +import org.bson.codecs.BsonValueCodecProvider; +import org.bson.codecs.Decoder; +import org.bson.codecs.configuration.CodecRegistry; + +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.stream.Collectors; + +import static com.mongodb.assertions.Assertions.assertNotNull; +import static com.mongodb.internal.bulk.WriteRequest.Type.DELETE; +import static com.mongodb.internal.bulk.WriteRequest.Type.INSERT; +import static com.mongodb.internal.bulk.WriteRequest.Type.REPLACE; +import static com.mongodb.internal.bulk.WriteRequest.Type.UPDATE; +import static com.mongodb.internal.operation.DocumentHelper.putIfNotNull; +import static com.mongodb.internal.operation.CommandOperationHelper.commandWriteConcern; +import static com.mongodb.internal.operation.OperationHelper.LOGGER; +import static com.mongodb.internal.operation.OperationHelper.isRetryableWrite; +import static com.mongodb.internal.operation.WriteConcernHelper.createWriteConcernError; +import static java.util.Collections.singletonMap; +import static org.bson.codecs.configuration.CodecRegistries.fromProviders; + +/** + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public final class BulkWriteBatch { + private static final CodecRegistry REGISTRY = fromProviders(new BsonValueCodecProvider()); + private static final Decoder DECODER = REGISTRY.get(BsonDocument.class); + + private final MongoNamespace namespace; + private final ConnectionDescription connectionDescription; + private final boolean ordered; + private final WriteConcern writeConcern; + private final Boolean bypassDocumentValidation; + private final boolean retryWrites; + private final BulkWriteBatchCombiner bulkWriteBatchCombiner; + private final IndexMap indexMap; + private final WriteRequest.Type batchType; + private final BsonDocument command; + private final SplittablePayload payload; + private final List unprocessed; + private final OperationContext operationContext; + private final BsonValue comment; + private final BsonDocument variables; + + static BulkWriteBatch createBulkWriteBatch(final MongoNamespace namespace, + final ConnectionDescription connectionDescription, + final boolean ordered, final WriteConcern writeConcern, + final Boolean bypassDocumentValidation, final boolean retryWrites, + final List writeRequests, + final OperationContext operationContext, + @Nullable final BsonValue comment, @Nullable final BsonDocument variables) { + boolean canRetryWrites = isRetryableWrite(retryWrites, writeConcern, connectionDescription, operationContext.getSessionContext()); + List writeRequestsWithIndex = new ArrayList<>(); + boolean writeRequestsAreRetryable = true; + for (int i = 0; i < writeRequests.size(); i++) { + WriteRequest writeRequest = writeRequests.get(i); + writeRequestsAreRetryable = writeRequestsAreRetryable && isRetryable(writeRequest); + writeRequestsWithIndex.add(new WriteRequestWithIndex(writeRequest, i)); + } + if (canRetryWrites && !writeRequestsAreRetryable) { + canRetryWrites = false; + logWriteModelDoesNotSupportRetries(); + } + return new BulkWriteBatch(namespace, connectionDescription, ordered, writeConcern, bypassDocumentValidation, + canRetryWrites, new BulkWriteBatchCombiner(connectionDescription.getServerAddress(), ordered, writeConcern), + writeRequestsWithIndex, operationContext, comment, variables); + } + + private BulkWriteBatch(final MongoNamespace namespace, final ConnectionDescription connectionDescription, + final boolean ordered, final WriteConcern writeConcern, @Nullable final Boolean bypassDocumentValidation, + final boolean retryWrites, final BulkWriteBatchCombiner bulkWriteBatchCombiner, + final List writeRequestsWithIndices, final OperationContext operationContext, + @Nullable final BsonValue comment, @Nullable final BsonDocument variables) { + this.namespace = namespace; + this.connectionDescription = connectionDescription; + this.ordered = ordered; + this.writeConcern = writeConcern; + this.bypassDocumentValidation = bypassDocumentValidation; + this.bulkWriteBatchCombiner = bulkWriteBatchCombiner; + this.batchType = writeRequestsWithIndices.isEmpty() ? INSERT : writeRequestsWithIndices.get(0).getType(); + this.retryWrites = retryWrites; + + List payloadItems = new ArrayList<>(); + List unprocessedItems = new ArrayList<>(); + + IndexMap indexMap = IndexMap.create(); + for (int i = 0; i < writeRequestsWithIndices.size(); i++) { + WriteRequestWithIndex writeRequestWithIndex = writeRequestsWithIndices.get(i); + if (writeRequestWithIndex.getType() != batchType) { + if (ordered) { + unprocessedItems.addAll(writeRequestsWithIndices.subList(i, writeRequestsWithIndices.size())); + break; + } else { + unprocessedItems.add(writeRequestWithIndex); + continue; + } + } + + indexMap = indexMap.add(payloadItems.size(), writeRequestWithIndex.getIndex()); + payloadItems.add(writeRequestWithIndex); + } + + this.indexMap = indexMap; + this.unprocessed = unprocessedItems; + this.payload = new SplittablePayload(getPayloadType(batchType), payloadItems, ordered, getFieldNameValidator()); + this.operationContext = operationContext; + this.comment = comment; + this.variables = variables; + this.command = new BsonDocument(); + + SessionContext sessionContext = operationContext.getSessionContext(); + if (!payloadItems.isEmpty()) { + command.put(getCommandName(batchType), new BsonString(namespace.getCollectionName())); + command.put("ordered", new BsonBoolean(ordered)); + commandWriteConcern(writeConcern, sessionContext).ifPresent(value -> + command.put("writeConcern", value.asDocument())); + if (bypassDocumentValidation != null) { + command.put("bypassDocumentValidation", new BsonBoolean(bypassDocumentValidation)); + } + putIfNotNull(command, "comment", comment); + putIfNotNull(command, "let", variables); + if (retryWrites) { + command.put("txnNumber", new BsonInt64(sessionContext.advanceTransactionNumber())); + } + } + } + + private BulkWriteBatch(final MongoNamespace namespace, final ConnectionDescription connectionDescription, + final boolean ordered, final WriteConcern writeConcern, final Boolean bypassDocumentValidation, + final boolean retryWrites, final BulkWriteBatchCombiner bulkWriteBatchCombiner, final IndexMap indexMap, + final WriteRequest.Type batchType, final BsonDocument command, final SplittablePayload payload, + final List unprocessed, final OperationContext operationContext, + @Nullable final BsonValue comment, @Nullable final BsonDocument variables) { + this.namespace = namespace; + this.connectionDescription = connectionDescription; + this.ordered = ordered; + this.writeConcern = writeConcern; + this.bypassDocumentValidation = bypassDocumentValidation; + this.bulkWriteBatchCombiner = bulkWriteBatchCombiner; + this.indexMap = indexMap; + this.batchType = batchType; + this.payload = payload; + this.unprocessed = unprocessed; + this.retryWrites = retryWrites; + this.operationContext = operationContext; + this.comment = comment; + this.variables = variables; + if (retryWrites) { + command.put("txnNumber", new BsonInt64(operationContext.getSessionContext().advanceTransactionNumber())); + } + this.command = command; + } + + void addResult(@Nullable final BsonDocument result) { + if (writeConcern.isAcknowledged()) { + if (hasError(assertNotNull(result))) { + MongoBulkWriteException bulkWriteException = getBulkWriteException(result); + bulkWriteBatchCombiner.addErrorResult(bulkWriteException, indexMap); + } else { + bulkWriteBatchCombiner.addResult(getBulkWriteResult(result)); + } + } + } + + boolean getRetryWrites() { + return retryWrites; + } + + BsonDocument getCommand() { + return command; + } + + SplittablePayload getPayload() { + return payload; + } + + Decoder getDecoder() { + return DECODER; + } + + BulkWriteResult getResult() { + return bulkWriteBatchCombiner.getResult(); + } + + boolean hasErrors() { + return bulkWriteBatchCombiner.hasErrors(); + } + + @Nullable + MongoBulkWriteException getError() { + return bulkWriteBatchCombiner.getError(); + } + + boolean shouldProcessBatch() { + return !bulkWriteBatchCombiner.shouldStopSendingMoreBatches() && !payload.isEmpty(); + } + + boolean hasAnotherBatch() { + return !unprocessed.isEmpty(); + } + + BulkWriteBatch getNextBatch() { + if (payload.hasAnotherSplit()) { + IndexMap nextIndexMap = IndexMap.create(); + int newIndex = 0; + for (int i = payload.getPosition(); i < payload.size(); i++) { + nextIndexMap = nextIndexMap.add(newIndex, indexMap.map(i)); + newIndex++; + } + + + return new BulkWriteBatch(namespace, connectionDescription, ordered, writeConcern, bypassDocumentValidation, retryWrites, + bulkWriteBatchCombiner, nextIndexMap, batchType, command, payload.getNextSplit(), unprocessed, operationContext, + comment, variables); + } else { + return new BulkWriteBatch(namespace, connectionDescription, ordered, writeConcern, bypassDocumentValidation, retryWrites, + bulkWriteBatchCombiner, unprocessed, operationContext, comment, variables); + } + } + + private FieldNameValidator getFieldNameValidator() { + if (batchType == UPDATE || batchType == REPLACE) { + Map rootMap; + if (batchType == REPLACE) { + rootMap = singletonMap("u", ReplacingDocumentFieldNameValidator.INSTANCE); + } else { + rootMap = singletonMap("u", new UpdateFieldNameValidator()); + } + return new MappedFieldNameValidator(NoOpFieldNameValidator.INSTANCE, rootMap); + } else { + return NoOpFieldNameValidator.INSTANCE; + } + } + + private BulkWriteResult getBulkWriteResult(final BsonDocument result) { + int count = result.getNumber("n").intValue(); + List insertedItems = getInsertedItems(result); + List upsertedItems = getUpsertedItems(result); + return BulkWriteResult.acknowledged(batchType, count - upsertedItems.size(), getModifiedCount(result), upsertedItems, + insertedItems); + } + + private List getInsertedItems(final BsonDocument result) { + Set writeErrors = getWriteErrors(result).stream().map(BulkWriteError::getIndex).collect(Collectors.toSet()); + return payload.getInsertedIds().entrySet().stream() + .filter(entry -> !writeErrors.contains(entry.getKey())) + .map(entry -> new BulkWriteInsert(entry.getKey(), entry.getValue())) + .collect(Collectors.toList()); + } + + + private List getUpsertedItems(final BsonDocument result) { + BsonArray upsertedValue = result.getArray("upserted", new BsonArray()); + List bulkWriteUpsertList = new ArrayList<>(); + for (BsonValue upsertedItem : upsertedValue) { + BsonDocument upsertedItemDocument = (BsonDocument) upsertedItem; + bulkWriteUpsertList.add(new BulkWriteUpsert(indexMap.map(upsertedItemDocument.getNumber("index").intValue()), + upsertedItemDocument.get("_id"))); + } + return bulkWriteUpsertList; + } + + private int getModifiedCount(final BsonDocument result) { + return result.getNumber("nModified", new BsonInt32(0)).intValue(); + } + + private boolean hasError(final BsonDocument result) { + return result.get("writeErrors") != null || result.get("writeConcernError") != null; + } + + private MongoBulkWriteException getBulkWriteException(final BsonDocument result) { + if (!hasError(result)) { + throw new MongoInternalException("This method should not have been called"); + } + + return new MongoBulkWriteException(getBulkWriteResult(result), getWriteErrors(result), + getWriteConcernError(result), connectionDescription.getServerAddress(), + result.getArray("errorLabels", new BsonArray()).stream().map(i -> i.asString().getValue()).collect(Collectors.toSet())); + } + + private List getWriteErrors(final BsonDocument result) { + List writeErrors = new ArrayList<>(); + BsonArray writeErrorsDocuments = (BsonArray) result.get("writeErrors"); + if (writeErrorsDocuments != null) { + for (BsonValue cur : writeErrorsDocuments) { + BsonDocument curDocument = (BsonDocument) cur; + writeErrors.add(new BulkWriteError(curDocument.getNumber("code").intValue(), + curDocument.getString("errmsg").getValue(), + curDocument.getDocument("errInfo", new BsonDocument()), + curDocument.getNumber("index").intValue())); + } + } + return writeErrors; + } + + @Nullable + private WriteConcernError getWriteConcernError(final BsonDocument result) { + BsonDocument writeConcernErrorDocument = (BsonDocument) result.get("writeConcernError"); + if (writeConcernErrorDocument == null) { + return null; + } else { + return createWriteConcernError(writeConcernErrorDocument); + } + } + + private String getCommandName(final WriteRequest.Type batchType) { + if (batchType == INSERT) { + return "insert"; + } else if (batchType == UPDATE || batchType == REPLACE) { + return "update"; + } else { + return "delete"; + } + } + + private SplittablePayload.Type getPayloadType(final WriteRequest.Type batchType) { + if (batchType == INSERT) { + return SplittablePayload.Type.INSERT; + } else if (batchType == UPDATE) { + return SplittablePayload.Type.UPDATE; + } else if (batchType == REPLACE) { + return SplittablePayload.Type.REPLACE; + } else { + return SplittablePayload.Type.DELETE; + } + } + + private static boolean isRetryable(final WriteRequest writeRequest) { + if (writeRequest.getType() == UPDATE || writeRequest.getType() == REPLACE) { + return !((UpdateRequest) writeRequest).isMulti(); + } else if (writeRequest.getType() == DELETE) { + return !((DeleteRequest) writeRequest).isMulti(); + } + return true; + } + + static void logWriteModelDoesNotSupportRetries() { + LOGGER.debug("retryWrites set but one or more writeRequests do not support retryable writes"); + } +} diff --git a/driver-core/src/main/com/mongodb/internal/operation/ChangeStreamBatchCursor.java b/driver-core/src/main/com/mongodb/internal/operation/ChangeStreamBatchCursor.java new file mode 100644 index 00000000000..c4bd72a4775 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/operation/ChangeStreamBatchCursor.java @@ -0,0 +1,260 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.operation; + +import com.mongodb.MongoChangeStreamException; +import com.mongodb.MongoException; +import com.mongodb.MongoOperationTimeoutException; +import com.mongodb.ServerAddress; +import com.mongodb.ServerCursor; +import com.mongodb.internal.TimeoutContext; +import com.mongodb.internal.binding.ReadBinding; +import com.mongodb.lang.Nullable; +import org.bson.BsonDocument; +import org.bson.BsonTimestamp; +import org.bson.RawBsonDocument; +import org.bson.codecs.Decoder; + +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.function.Consumer; +import java.util.function.Function; + +import static com.mongodb.assertions.Assertions.assertNotNull; +import static com.mongodb.internal.operation.ChangeStreamBatchCursorHelper.isResumableError; +import static com.mongodb.internal.operation.SyncOperationHelper.withReadConnectionSource; + +/** + * A change stream cursor that wraps {@link CommandBatchCursor} with automatic resumption capabilities in the event + * of timeouts or transient errors. + *

+ * Upon encountering a resumable error during {@code hasNext()}, {@code next()}, or {@code tryNext()} calls, the {@link ChangeStreamBatchCursor} + * attempts to establish a new change stream on the server. + *

+ * If an error occurring during any of these method calls is not resumable, it is immediately propagated to the caller, and the {@link ChangeStreamBatchCursor} + * is closed and invalidated on the server. Server errors that occur during this invalidation process are not propagated to the caller. + *

+ * A {@link MongoOperationTimeoutException} does not invalidate the {@link ChangeStreamBatchCursor}, but is immediately propagated to the caller. + * Subsequent method call will attempt to resume operation by establishing a new change stream on the server, without doing {@code getMore} + * request first. + *

+ */ +final class ChangeStreamBatchCursor implements AggregateResponseBatchCursor { + private final ReadBinding binding; + private final ChangeStreamOperation changeStreamOperation; + private final int maxWireVersion; + private final TimeoutContext timeoutContext; + private CommandBatchCursor wrapped; + private BsonDocument resumeToken; + private final AtomicBoolean closed; + + /** + * This flag is used to manage change stream resumption logic after a timeout error. + * Indicates whether the last {@code hasNext()}, {@code next()}, or {@code tryNext()} call resulted in a {@link MongoOperationTimeoutException}. + * If {@code true}, indicates a timeout occurred, prompting an attempt to resume the change stream on the subsequent call. + */ + private boolean lastOperationTimedOut; + + ChangeStreamBatchCursor(final ChangeStreamOperation changeStreamOperation, + final CommandBatchCursor wrapped, + final ReadBinding binding, + @Nullable final BsonDocument resumeToken, + final int maxWireVersion) { + this.timeoutContext = binding.getOperationContext().getTimeoutContext(); + this.changeStreamOperation = changeStreamOperation; + this.binding = binding.retain(); + this.wrapped = wrapped; + this.resumeToken = resumeToken; + this.maxWireVersion = maxWireVersion; + closed = new AtomicBoolean(); + lastOperationTimedOut = false; + } + + CommandBatchCursor getWrapped() { + return wrapped; + } + + @Override + public boolean hasNext() { + return resumeableOperation(commandBatchCursor -> { + try { + return commandBatchCursor.hasNext(); + } finally { + cachePostBatchResumeToken(commandBatchCursor); + } + }); + } + + @Override + public List next() { + return resumeableOperation(commandBatchCursor -> { + try { + return convertAndProduceLastId(commandBatchCursor.next(), changeStreamOperation.getDecoder(), + lastId -> resumeToken = lastId); + } finally { + cachePostBatchResumeToken(commandBatchCursor); + } + }); + } + + @Override + public int available() { + return wrapped.available(); + } + + @Override + public List tryNext() { + return resumeableOperation(commandBatchCursor -> { + try { + List tryNext = commandBatchCursor.tryNext(); + return tryNext == null ? null + : convertAndProduceLastId(tryNext, changeStreamOperation.getDecoder(), lastId -> resumeToken = lastId); + } finally { + cachePostBatchResumeToken(commandBatchCursor); + } + }); + } + + @Override + public void close() { + if (!closed.getAndSet(true)) { + timeoutContext.resetTimeoutIfPresent(); + wrapped.close(); + binding.release(); + } + } + + @Override + public void setBatchSize(final int batchSize) { + wrapped.setBatchSize(batchSize); + } + + @Override + public int getBatchSize() { + return wrapped.getBatchSize(); + } + + @Override + public ServerCursor getServerCursor() { + return wrapped.getServerCursor(); + } + + @Override + public ServerAddress getServerAddress() { + return wrapped.getServerAddress(); + } + + @Override + public void remove() { + throw new UnsupportedOperationException("Not implemented!"); + } + + @Override + public BsonDocument getPostBatchResumeToken() { + return wrapped.getPostBatchResumeToken(); + } + + @Override + public BsonTimestamp getOperationTime() { + return changeStreamOperation.getStartAtOperationTime(); + } + + @Override + public boolean isFirstBatchEmpty() { + return wrapped.isFirstBatchEmpty(); + } + + @Override + public int getMaxWireVersion() { + return maxWireVersion; + } + + private void cachePostBatchResumeToken(final AggregateResponseBatchCursor commandBatchCursor) { + if (commandBatchCursor.getPostBatchResumeToken() != null) { + resumeToken = commandBatchCursor.getPostBatchResumeToken(); + } + } + + /** + * @param lastIdConsumer Is {@linkplain Consumer#accept(Object) called} iff {@code rawDocuments} is successfully converted + * and the returned {@link List} is neither {@code null} nor {@linkplain List#isEmpty() empty}. + */ + static List convertAndProduceLastId(final List rawDocuments, + final Decoder decoder, + final Consumer lastIdConsumer) { + List results = new ArrayList<>(); + for (RawBsonDocument rawDocument : assertNotNull(rawDocuments)) { + if (!rawDocument.containsKey("_id")) { + throw new MongoChangeStreamException("Cannot provide resume functionality when the resume token is missing."); + } + results.add(rawDocument.decode(decoder)); + } + if (!rawDocuments.isEmpty()) { + lastIdConsumer.accept(rawDocuments.get(rawDocuments.size() - 1).getDocument("_id")); + } + return results; + } + + R resumeableOperation(final Function, R> function) { + timeoutContext.resetTimeoutIfPresent(); + try { + R result = execute(function); + lastOperationTimedOut = false; + return result; + } catch (Throwable exception) { + lastOperationTimedOut = isTimeoutException(exception); + throw exception; + } + } + + private R execute(final Function, R> function) { + boolean shouldBeResumed = hasPreviousNextTimedOut(); + while (true) { + if (shouldBeResumed) { + resumeChangeStream(); + } + try { + return function.apply(wrapped); + } catch (Throwable t) { + if (!isResumableError(t, maxWireVersion)) { + throw MongoException.fromThrowableNonNull(t); + } + shouldBeResumed = true; + } + } + } + + private void resumeChangeStream() { + wrapped.close(); + + withReadConnectionSource(binding, source -> { + changeStreamOperation.setChangeStreamOptionsForResume(resumeToken, source.getServerDescription().getMaxWireVersion()); + return null; + }); + wrapped = ((ChangeStreamBatchCursor) changeStreamOperation.execute(binding)).getWrapped(); + binding.release(); // release the new change stream batch cursor's reference to the binding + } + + private boolean hasPreviousNextTimedOut() { + return lastOperationTimedOut && !closed.get(); + } + + private static boolean isTimeoutException(final Throwable exception) { + return exception instanceof MongoOperationTimeoutException; + } +} diff --git a/driver-core/src/main/com/mongodb/internal/operation/ChangeStreamBatchCursorHelper.java b/driver-core/src/main/com/mongodb/internal/operation/ChangeStreamBatchCursorHelper.java new file mode 100644 index 00000000000..1bed2ed2b0f --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/operation/ChangeStreamBatchCursorHelper.java @@ -0,0 +1,58 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.operation; + +import com.mongodb.MongoChangeStreamException; +import com.mongodb.MongoClientException; +import com.mongodb.MongoCursorNotFoundException; +import com.mongodb.MongoException; +import com.mongodb.MongoInterruptedException; +import com.mongodb.MongoNotPrimaryException; +import com.mongodb.MongoOperationTimeoutException; +import com.mongodb.MongoSocketException; +import com.mongodb.internal.VisibleForTesting; + +import java.util.List; + +import static com.mongodb.internal.VisibleForTesting.AccessModifier.PRIVATE; +import static com.mongodb.internal.operation.ServerVersionHelper.FOUR_DOT_FOUR_WIRE_VERSION; +import static java.util.Arrays.asList; + +final class ChangeStreamBatchCursorHelper { + @VisibleForTesting(otherwise = PRIVATE) + static final List RETRYABLE_SERVER_ERROR_CODES = + asList(6, 7, 63, 89, 91, 133, 134, 150, 189, 234, 262, 9001, 10107, 11600, 11602, 13388, 13435, 13436); + @VisibleForTesting(otherwise = PRIVATE) + static final String RESUMABLE_CHANGE_STREAM_ERROR_LABEL = "ResumableChangeStreamError"; + + static boolean isResumableError(final Throwable t, final int maxWireVersion) { + if (!(t instanceof MongoException) || (t instanceof MongoChangeStreamException) || (t instanceof MongoInterruptedException) + || (t instanceof MongoOperationTimeoutException)) { + return false; + } else if (t instanceof MongoNotPrimaryException || t instanceof MongoCursorNotFoundException + || t instanceof MongoSocketException | t instanceof MongoClientException) { + return true; + } else if (maxWireVersion >= FOUR_DOT_FOUR_WIRE_VERSION) { + return ((MongoException) t).getErrorLabels().contains(RESUMABLE_CHANGE_STREAM_ERROR_LABEL); + } else { + return RETRYABLE_SERVER_ERROR_CODES.contains(((MongoException) t).getCode()); + } + } + + private ChangeStreamBatchCursorHelper(){ + } +} diff --git a/driver-core/src/main/com/mongodb/internal/operation/ChangeStreamOperation.java b/driver-core/src/main/com/mongodb/internal/operation/ChangeStreamOperation.java new file mode 100644 index 00000000000..f4c896ba6e9 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/operation/ChangeStreamOperation.java @@ -0,0 +1,296 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.operation; + +import com.mongodb.CursorType; +import com.mongodb.MongoNamespace; +import com.mongodb.client.model.Collation; +import com.mongodb.client.model.changestream.FullDocument; +import com.mongodb.client.model.changestream.FullDocumentBeforeChange; +import com.mongodb.internal.TimeoutContext; +import com.mongodb.internal.async.AsyncBatchCursor; +import com.mongodb.internal.async.SingleResultCallback; +import com.mongodb.internal.binding.AsyncReadBinding; +import com.mongodb.internal.binding.ReadBinding; +import com.mongodb.internal.client.model.changestream.ChangeStreamLevel; +import com.mongodb.lang.Nullable; +import org.bson.BsonArray; +import org.bson.BsonBoolean; +import org.bson.BsonDocument; +import org.bson.BsonInt32; +import org.bson.BsonString; +import org.bson.BsonTimestamp; +import org.bson.BsonValue; +import org.bson.RawBsonDocument; +import org.bson.codecs.Decoder; +import org.bson.codecs.RawBsonDocumentCodec; + +import java.util.ArrayList; +import java.util.List; + +import static com.mongodb.assertions.Assertions.assertNotNull; +import static com.mongodb.assertions.Assertions.notNull; +import static com.mongodb.client.cursor.TimeoutMode.CURSOR_LIFETIME; + +/** + * An operation that executes an {@code $changeStream} aggregation. + * + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public class ChangeStreamOperation implements ReadOperationCursor { + private static final RawBsonDocumentCodec RAW_BSON_DOCUMENT_CODEC = new RawBsonDocumentCodec(); + private final AggregateOperationImpl wrapped; + private final FullDocument fullDocument; + private final FullDocumentBeforeChange fullDocumentBeforeChange; + private final Decoder decoder; + private final ChangeStreamLevel changeStreamLevel; + + private BsonDocument resumeAfter; + private BsonDocument startAfter; + private BsonTimestamp startAtOperationTime; + private boolean showExpandedEvents; + + + public ChangeStreamOperation(final MongoNamespace namespace, final FullDocument fullDocument, + final FullDocumentBeforeChange fullDocumentBeforeChange, final List pipeline, final Decoder decoder) { + this(namespace, fullDocument, fullDocumentBeforeChange, pipeline, decoder, ChangeStreamLevel.COLLECTION); + } + + public ChangeStreamOperation(final MongoNamespace namespace, final FullDocument fullDocument, + final FullDocumentBeforeChange fullDocumentBeforeChange, final List pipeline, final Decoder decoder, + final ChangeStreamLevel changeStreamLevel) { + this.wrapped = new AggregateOperationImpl<>(namespace, pipeline, RAW_BSON_DOCUMENT_CODEC, getAggregateTarget(), + getPipelineCreator()).cursorType(CursorType.TailableAwait); + this.fullDocument = notNull("fullDocument", fullDocument); + this.fullDocumentBeforeChange = notNull("fullDocumentBeforeChange", fullDocumentBeforeChange); + this.decoder = notNull("decoder", decoder); + this.changeStreamLevel = notNull("changeStreamLevel", changeStreamLevel); + } + + public MongoNamespace getNamespace() { + return wrapped.getNamespace(); + } + + public Decoder getDecoder() { + return decoder; + } + + public FullDocument getFullDocument() { + return fullDocument; + } + + public BsonDocument getResumeAfter() { + return resumeAfter; + } + + public ChangeStreamOperation resumeAfter(final BsonDocument resumeAfter) { + this.resumeAfter = resumeAfter; + return this; + } + + public BsonDocument getStartAfter() { + return startAfter; + } + + public ChangeStreamOperation startAfter(final BsonDocument startAfter) { + this.startAfter = startAfter; + return this; + } + + public List getPipeline() { + return wrapped.getPipeline(); + } + + public Integer getBatchSize() { + return wrapped.getBatchSize(); + } + + public ChangeStreamOperation batchSize(@Nullable final Integer batchSize) { + wrapped.batchSize(batchSize); + return this; + } + + public Collation getCollation() { + return wrapped.getCollation(); + } + + public ChangeStreamOperation collation(final Collation collation) { + wrapped.collation(collation); + return this; + } + + public ChangeStreamOperation startAtOperationTime(final BsonTimestamp startAtOperationTime) { + this.startAtOperationTime = startAtOperationTime; + return this; + } + + public BsonTimestamp getStartAtOperationTime() { + return startAtOperationTime; + } + + public ChangeStreamOperation retryReads(final boolean retryReads) { + wrapped.retryReads(retryReads); + return this; + } + + public boolean getRetryReads() { + return wrapped.getRetryReads(); + } + + @Nullable + public BsonValue getComment() { + return wrapped.getComment(); + } + + public ChangeStreamOperation comment(final BsonValue comment) { + wrapped.comment(comment); + return this; + } + + public boolean getShowExpandedEvents() { + return this.showExpandedEvents; + } + + public ChangeStreamOperation showExpandedEvents(final boolean showExpandedEvents) { + this.showExpandedEvents = showExpandedEvents; + return this; + } + + /** + * Gets an aggregate operation with consideration for timeout settings. + *

+ * Change streams act similarly to tailable awaitData cursors, with identical timeoutMS option behavior. + * Key distinctions include: + * - The timeoutMS option must be applied at the start of the aggregate operation for change streams. + * - Change streams support resumption on next() calls. The driver handles automatic resumption for transient errors. + *

+ * + * As a result, when {@code timeoutContext.hasTimeoutMS()} the CURSOR_LIFETIME setting is utilized to manage the underlying cursor's + * lifespan in change streams. + * + * @param timeoutContext + * @return An AggregateOperationImpl + */ + private AggregateOperationImpl getAggregateOperation(final TimeoutContext timeoutContext) { + if (timeoutContext.hasTimeoutMS()) { + return wrapped.timeoutMode(CURSOR_LIFETIME); + } + return wrapped; + } + + @Override + public String getCommandName() { + return wrapped.getCommandName(); + } + + @Override + public BatchCursor execute(final ReadBinding binding) { + TimeoutContext timeoutContext = binding.getOperationContext().getTimeoutContext(); + CommandBatchCursor cursor = ((CommandBatchCursor) getAggregateOperation(timeoutContext).execute(binding)) + .disableTimeoutResetWhenClosing(); + + return new ChangeStreamBatchCursor<>(ChangeStreamOperation.this, cursor, binding, + setChangeStreamOptions(cursor.getPostBatchResumeToken(), cursor.getOperationTime(), + cursor.getMaxWireVersion(), cursor.isFirstBatchEmpty()), cursor.getMaxWireVersion()); + } + + @Override + public void executeAsync(final AsyncReadBinding binding, final SingleResultCallback> callback) { + TimeoutContext timeoutContext = binding.getOperationContext().getTimeoutContext(); + getAggregateOperation(timeoutContext).executeAsync(binding, (result, t) -> { + if (t != null) { + callback.onResult(null, t); + } else { + AsyncCommandBatchCursor cursor = ((AsyncCommandBatchCursor) assertNotNull(result)) + .disableTimeoutResetWhenClosing(); + + callback.onResult(new AsyncChangeStreamBatchCursor<>(ChangeStreamOperation.this, cursor, binding, + setChangeStreamOptions(cursor.getPostBatchResumeToken(), cursor.getOperationTime(), + cursor.getMaxWireVersion(), cursor.isFirstBatchEmpty()), cursor.getMaxWireVersion()), null); + } + }); + } + + @Nullable + private BsonDocument setChangeStreamOptions(@Nullable final BsonDocument postBatchResumeToken, + @Nullable final BsonTimestamp operationTime, final int maxWireVersion, final boolean firstBatchEmpty) { + BsonDocument resumeToken = null; + if (startAfter != null) { + resumeToken = startAfter; + } else if (resumeAfter != null) { + resumeToken = resumeAfter; + } else if (startAtOperationTime == null && postBatchResumeToken == null && firstBatchEmpty && maxWireVersion >= 7) { + startAtOperationTime = operationTime; + } + return resumeToken; + } + + public void setChangeStreamOptionsForResume(@Nullable final BsonDocument resumeToken, final int maxWireVersion) { + startAfter = null; + if (resumeToken != null) { + startAtOperationTime = null; + resumeAfter = resumeToken; + } else if (startAtOperationTime != null && maxWireVersion >= 7) { + resumeAfter = null; + } else { + resumeAfter = null; + startAtOperationTime = null; + } + } + + // Leave as anonymous class so as not to confuse CustomMatchers#compare + private AggregateOperationImpl.AggregateTarget getAggregateTarget() { + return () -> changeStreamLevel == ChangeStreamLevel.COLLECTION + ? new BsonString(getNamespace().getCollectionName()) : new BsonInt32(1); + } + + // Leave as anonymous class so as not to confuse CustomMatchers#compare + private AggregateOperationImpl.PipelineCreator getPipelineCreator() { + return () -> { + List changeStreamPipeline = new ArrayList<>(); + BsonDocument changeStream = new BsonDocument(); + if (fullDocument != FullDocument.DEFAULT) { + changeStream.append("fullDocument", new BsonString(fullDocument.getValue())); + } + if (fullDocumentBeforeChange != FullDocumentBeforeChange.DEFAULT) { + changeStream.append("fullDocumentBeforeChange", new BsonString(fullDocumentBeforeChange.getValue())); + } + + if (changeStreamLevel == ChangeStreamLevel.CLIENT) { + changeStream.append("allChangesForCluster", BsonBoolean.TRUE); + } + + if (showExpandedEvents) { + changeStream.append("showExpandedEvents", BsonBoolean.TRUE); + } + + if (resumeAfter != null) { + changeStream.append("resumeAfter", resumeAfter); + } + if (startAfter != null) { + changeStream.append("startAfter", startAfter); + } + if (startAtOperationTime != null) { + changeStream.append("startAtOperationTime", startAtOperationTime); + } + + changeStreamPipeline.add(new BsonDocument("$changeStream", changeStream)); + changeStreamPipeline.addAll(getPipeline()); + return new BsonArray(changeStreamPipeline); + }; + } +} diff --git a/driver-core/src/main/com/mongodb/internal/operation/ClientBulkWriteOperation.java b/driver-core/src/main/com/mongodb/internal/operation/ClientBulkWriteOperation.java new file mode 100644 index 00000000000..2b9e79f6f06 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/operation/ClientBulkWriteOperation.java @@ -0,0 +1,1355 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.internal.operation; + +import com.mongodb.ClientBulkWriteException; +import com.mongodb.MongoClientException; +import com.mongodb.MongoClientSettings; +import com.mongodb.MongoCommandException; +import com.mongodb.MongoException; +import com.mongodb.MongoNamespace; +import com.mongodb.MongoServerException; +import com.mongodb.MongoSocketException; +import com.mongodb.MongoWriteConcernException; +import com.mongodb.ServerAddress; +import com.mongodb.WriteConcern; +import com.mongodb.WriteError; +import com.mongodb.assertions.Assertions; +import com.mongodb.bulk.WriteConcernError; +import com.mongodb.client.cursor.TimeoutMode; +import com.mongodb.client.model.bulk.ClientBulkWriteOptions; +import com.mongodb.client.model.bulk.ClientBulkWriteResult; +import com.mongodb.client.model.bulk.ClientDeleteResult; +import com.mongodb.client.model.bulk.ClientInsertOneResult; +import com.mongodb.client.model.bulk.ClientNamespacedReplaceOneModel; +import com.mongodb.client.model.bulk.ClientNamespacedUpdateOneModel; +import com.mongodb.client.model.bulk.ClientNamespacedWriteModel; +import com.mongodb.client.model.bulk.ClientUpdateResult; +import com.mongodb.connection.ConnectionDescription; +import com.mongodb.internal.TimeoutContext; +import com.mongodb.internal.VisibleForTesting; +import com.mongodb.internal.async.AsyncBatchCursor; +import com.mongodb.internal.async.AsyncSupplier; +import com.mongodb.internal.async.MutableValue; +import com.mongodb.internal.async.SingleResultCallback; +import com.mongodb.internal.async.function.AsyncCallbackSupplier; +import com.mongodb.internal.async.function.RetryState; +import com.mongodb.internal.binding.AsyncConnectionSource; +import com.mongodb.internal.binding.AsyncWriteBinding; +import com.mongodb.internal.binding.ConnectionSource; +import com.mongodb.internal.binding.WriteBinding; +import com.mongodb.internal.client.model.bulk.AbstractClientDeleteModel; +import com.mongodb.internal.client.model.bulk.AbstractClientDeleteOptions; +import com.mongodb.internal.client.model.bulk.AbstractClientNamespacedWriteModel; +import com.mongodb.internal.client.model.bulk.AbstractClientUpdateModel; +import com.mongodb.internal.client.model.bulk.AbstractClientUpdateOptions; +import com.mongodb.internal.client.model.bulk.AcknowledgedSummaryClientBulkWriteResult; +import com.mongodb.internal.client.model.bulk.AcknowledgedVerboseClientBulkWriteResult; +import com.mongodb.internal.client.model.bulk.ClientWriteModel; +import com.mongodb.internal.client.model.bulk.ConcreteClientBulkWriteOptions; +import com.mongodb.internal.client.model.bulk.ConcreteClientDeleteManyModel; +import com.mongodb.internal.client.model.bulk.ConcreteClientDeleteOneModel; +import com.mongodb.internal.client.model.bulk.ConcreteClientDeleteResult; +import com.mongodb.internal.client.model.bulk.ConcreteClientInsertOneModel; +import com.mongodb.internal.client.model.bulk.ConcreteClientInsertOneResult; +import com.mongodb.internal.client.model.bulk.ConcreteClientNamespacedDeleteManyModel; +import com.mongodb.internal.client.model.bulk.ConcreteClientNamespacedDeleteOneModel; +import com.mongodb.internal.client.model.bulk.ConcreteClientNamespacedInsertOneModel; +import com.mongodb.internal.client.model.bulk.ConcreteClientNamespacedReplaceOneModel; +import com.mongodb.internal.client.model.bulk.ConcreteClientNamespacedUpdateManyModel; +import com.mongodb.internal.client.model.bulk.ConcreteClientNamespacedUpdateOneModel; +import com.mongodb.internal.client.model.bulk.ConcreteClientReplaceOneModel; +import com.mongodb.internal.client.model.bulk.ConcreteClientReplaceOneOptions; +import com.mongodb.internal.client.model.bulk.ConcreteClientUpdateManyModel; +import com.mongodb.internal.client.model.bulk.ConcreteClientUpdateOneModel; +import com.mongodb.internal.client.model.bulk.ConcreteClientUpdateResult; +import com.mongodb.internal.client.model.bulk.UnacknowledgedClientBulkWriteResult; +import com.mongodb.internal.connection.AsyncConnection; +import com.mongodb.internal.connection.Connection; +import com.mongodb.internal.connection.DualMessageSequences; +import com.mongodb.internal.connection.IdHoldingBsonWriter; +import com.mongodb.internal.connection.MongoWriteConcernWithResponseException; +import com.mongodb.internal.connection.OperationContext; +import com.mongodb.internal.operation.retry.AttachmentKeys; +import com.mongodb.internal.session.SessionContext; +import com.mongodb.internal.validator.NoOpFieldNameValidator; +import com.mongodb.internal.validator.ReplacingDocumentFieldNameValidator; +import com.mongodb.internal.validator.UpdateFieldNameValidator; +import com.mongodb.lang.Nullable; +import org.bson.BsonArray; +import org.bson.BsonBinaryWriter; +import org.bson.BsonBoolean; +import org.bson.BsonDocument; +import org.bson.BsonElement; +import org.bson.BsonInt32; +import org.bson.BsonInt64; +import org.bson.BsonObjectId; +import org.bson.BsonValue; +import org.bson.BsonWriter; +import org.bson.FieldNameValidator; +import org.bson.codecs.Encoder; +import org.bson.codecs.EncoderContext; +import org.bson.codecs.configuration.CodecRegistry; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.HashMap; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.Set; +import java.util.function.Supplier; +import java.util.stream.Stream; + +import static com.mongodb.assertions.Assertions.assertFalse; +import static com.mongodb.assertions.Assertions.assertNotNull; +import static com.mongodb.assertions.Assertions.assertTrue; +import static com.mongodb.assertions.Assertions.fail; +import static com.mongodb.internal.VisibleForTesting.AccessModifier.PACKAGE; +import static com.mongodb.internal.VisibleForTesting.AccessModifier.PRIVATE; +import static com.mongodb.internal.async.AsyncRunnable.beginAsync; +import static com.mongodb.internal.connection.DualMessageSequences.WritersProviderAndLimitsChecker.WriteResult.FAIL_LIMIT_EXCEEDED; +import static com.mongodb.internal.connection.DualMessageSequences.WritersProviderAndLimitsChecker.WriteResult.OK_LIMIT_NOT_REACHED; +import static com.mongodb.internal.operation.AsyncOperationHelper.cursorDocumentToAsyncBatchCursor; +import static com.mongodb.internal.operation.AsyncOperationHelper.decorateWriteWithRetriesAsync; +import static com.mongodb.internal.operation.AsyncOperationHelper.withAsyncSourceAndConnection; +import static com.mongodb.internal.operation.BulkWriteBatch.logWriteModelDoesNotSupportRetries; +import static com.mongodb.internal.operation.CommandOperationHelper.commandWriteConcern; +import static com.mongodb.internal.operation.CommandOperationHelper.initialRetryState; +import static com.mongodb.internal.operation.CommandOperationHelper.shouldAttemptToRetryWriteAndAddRetryableLabel; +import static com.mongodb.internal.operation.CommandOperationHelper.transformWriteException; +import static com.mongodb.internal.operation.OperationHelper.isRetryableWrite; +import static com.mongodb.internal.operation.SyncOperationHelper.cursorDocumentToBatchCursor; +import static com.mongodb.internal.operation.SyncOperationHelper.decorateWriteWithRetries; +import static com.mongodb.internal.operation.SyncOperationHelper.withSourceAndConnection; +import static java.util.Collections.emptyList; +import static java.util.Collections.emptyMap; +import static java.util.Collections.singletonList; +import static java.util.Optional.ofNullable; +import static java.util.stream.Collectors.toList; +import static java.util.stream.Collectors.toSet; + +/** + * This class is not part of the public API and may be removed or changed at any time. + */ +public final class ClientBulkWriteOperation implements WriteOperation { + private static final ConcreteClientBulkWriteOptions EMPTY_OPTIONS = new ConcreteClientBulkWriteOptions(); + private static final String BULK_WRITE_COMMAND_NAME = "bulkWrite"; + private static final EncoderContext DEFAULT_ENCODER_CONTEXT = EncoderContext.builder().build(); + private static final EncoderContext COLLECTIBLE_DOCUMENT_ENCODER_CONTEXT = EncoderContext.builder() + .isEncodingCollectibleDocument(true).build(); + private static final int INITIAL_BATCH_MODEL_START_INDEX = 0; + private static final int SERVER_DEFAULT_CURSOR_BATCH_SIZE = 0; + + private final List models; + private final ConcreteClientBulkWriteOptions options; + private final WriteConcern writeConcernSetting; + private final boolean retryWritesSetting; + private final CodecRegistry codecRegistry; + + /** + * @param retryWritesSetting See {@link MongoClientSettings#getRetryWrites()}. + */ + public ClientBulkWriteOperation( + final List models, + @Nullable final ClientBulkWriteOptions options, + final WriteConcern writeConcernSetting, + final boolean retryWritesSetting, + final CodecRegistry codecRegistry) { + this.models = models; + this.options = options == null ? EMPTY_OPTIONS : (ConcreteClientBulkWriteOptions) options; + this.writeConcernSetting = writeConcernSetting; + this.retryWritesSetting = retryWritesSetting; + this.codecRegistry = codecRegistry; + } + + @Override + public String getCommandName() { + return "bulkWrite"; + } + + @Override + public ClientBulkWriteResult execute(final WriteBinding binding) throws ClientBulkWriteException { + WriteConcern effectiveWriteConcern = validateAndGetEffectiveWriteConcern(binding.getOperationContext().getSessionContext()); + ResultAccumulator resultAccumulator = new ResultAccumulator(); + MongoException transformedTopLevelError = null; + + try { + executeAllBatches(effectiveWriteConcern, binding, resultAccumulator); + } catch (MongoException topLevelError) { + transformedTopLevelError = transformWriteException(topLevelError); + } + return resultAccumulator.build(transformedTopLevelError, effectiveWriteConcern); + } + + + @Override + public void executeAsync(final AsyncWriteBinding binding, + final SingleResultCallback finalCallback) { + WriteConcern effectiveWriteConcern = validateAndGetEffectiveWriteConcern(binding.getOperationContext().getSessionContext()); + ResultAccumulator resultAccumulator = new ResultAccumulator(); + MutableValue transformedTopLevelError = new MutableValue<>(); + + beginAsync().thenSupply(c -> { + executeAllBatchesAsync(effectiveWriteConcern, binding, resultAccumulator, c); + }).onErrorIf(topLevelError -> topLevelError instanceof MongoException, (topLevelError, c) -> { + transformedTopLevelError.set(transformWriteException((MongoException) topLevelError)); + c.complete(c); + }).thenApply((ignored, c) -> { + c.complete(resultAccumulator.build(transformedTopLevelError.getNullable(), effectiveWriteConcern)); + }).finish(finalCallback); + } + + /** + * To execute a batch means: + *

    + *
  • execute a `bulkWrite` command, which creates a cursor;
  • + *
  • consume the cursor, which may involve executing `getMore` commands.
  • + *
+ * + * @throws MongoException When a {@linkplain ClientBulkWriteException#getCause() top-level error} happens. + */ + private void executeAllBatches( + final WriteConcern effectiveWriteConcern, + final WriteBinding binding, + final ResultAccumulator resultAccumulator) throws MongoException { + Integer nextBatchStartModelIndex = INITIAL_BATCH_MODEL_START_INDEX; + + do { + nextBatchStartModelIndex = executeBatch(nextBatchStartModelIndex, effectiveWriteConcern, binding, resultAccumulator); + } while (nextBatchStartModelIndex != null); + } + + /** + * @see #executeAllBatches(WriteConcern, WriteBinding, ResultAccumulator) + */ + private void executeAllBatchesAsync( + final WriteConcern effectiveWriteConcern, + final AsyncWriteBinding binding, + final ResultAccumulator resultAccumulator, + final SingleResultCallback finalCallback) { + MutableValue nextBatchStartModelIndex = new MutableValue<>(INITIAL_BATCH_MODEL_START_INDEX); + + beginAsync().thenRunDoWhileLoop(iterationCallback -> { + beginAsync().thenSupply(c -> { + executeBatchAsync(nextBatchStartModelIndex.get(), effectiveWriteConcern, binding, resultAccumulator, c); + }).thenApply((nextBatchStartModelIdx, c) -> { + nextBatchStartModelIndex.set(nextBatchStartModelIdx); + c.complete(c); + }).finish(iterationCallback); + }, () -> nextBatchStartModelIndex.getNullable() != null + ).finish(finalCallback); + } + + /** + * @return The start model index of the next batch, provided that the operation + * {@linkplain ExhaustiveClientBulkWriteCommandOkResponse#operationMayContinue(ConcreteClientBulkWriteOptions) may continue} + * and there are unexecuted {@linkplain ClientNamespacedWriteModel models} left. + */ + @Nullable + private Integer executeBatch( + final int batchStartModelIndex, + final WriteConcern effectiveWriteConcern, + final WriteBinding binding, + final ResultAccumulator resultAccumulator) { + List unexecutedModels = models.subList(batchStartModelIndex, models.size()); + assertFalse(unexecutedModels.isEmpty()); + OperationContext operationContext = binding.getOperationContext(); + SessionContext sessionContext = operationContext.getSessionContext(); + TimeoutContext timeoutContext = operationContext.getTimeoutContext(); + RetryState retryState = initialRetryState(retryWritesSetting, timeoutContext); + BatchEncoder batchEncoder = new BatchEncoder(); + + Supplier retryingBatchExecutor = decorateWriteWithRetries( + retryState, operationContext, + // Each batch re-selects a server and re-checks out a connection because this is simpler, + // and it is allowed by https://jira.mongodb.org/browse/DRIVERS-2502. + // If connection pinning is required, `binding` handles that, + // and `ClientSession`, `TransactionContext` are aware of that. + () -> withSourceAndConnection(binding::getWriteConnectionSource, true, + (connectionSource, connection) -> { + ConnectionDescription connectionDescription = connection.getDescription(); + boolean effectiveRetryWrites = isRetryableWrite( + retryWritesSetting, effectiveWriteConcern, connectionDescription, sessionContext); + retryState.breakAndThrowIfRetryAnd(() -> !effectiveRetryWrites); + resultAccumulator.onNewServerAddress(connectionDescription.getServerAddress()); + retryState.attach(AttachmentKeys.maxWireVersion(), connectionDescription.getMaxWireVersion(), true) + .attach(AttachmentKeys.commandDescriptionSupplier(), () -> BULK_WRITE_COMMAND_NAME, false); + ClientBulkWriteCommand bulkWriteCommand = createBulkWriteCommand( + retryState, effectiveRetryWrites, effectiveWriteConcern, sessionContext, unexecutedModels, batchEncoder, + () -> retryState.attach(AttachmentKeys.retryableCommandFlag(), true, true)); + return executeBulkWriteCommandAndExhaustOkResponse( + retryState, connectionSource, connection, bulkWriteCommand, effectiveWriteConcern, operationContext); + }) + ); + + try { + ExhaustiveClientBulkWriteCommandOkResponse bulkWriteCommandOkResponse = retryingBatchExecutor.get(); + return resultAccumulator.onBulkWriteCommandOkResponseOrNoResponse( + batchStartModelIndex, bulkWriteCommandOkResponse, batchEncoder.intoEncodedBatchInfo()); + } catch (MongoWriteConcernWithResponseException mongoWriteConcernWithOkResponseException) { + return resultAccumulator.onBulkWriteCommandOkResponseWithWriteConcernError( + batchStartModelIndex, mongoWriteConcernWithOkResponseException, batchEncoder.intoEncodedBatchInfo()); + } catch (MongoCommandException bulkWriteCommandException) { + resultAccumulator.onBulkWriteCommandErrorResponse(bulkWriteCommandException); + throw bulkWriteCommandException; + } catch (MongoException mongoException) { + // The server does not have a chance to add "RetryableWriteError" label to `e`, + // and if it is the last attempt failure, `RetryingSyncSupplier` also may not have a chance + // to add the label. So we do that explicitly. + shouldAttemptToRetryWriteAndAddRetryableLabel(retryState, mongoException); + resultAccumulator.onBulkWriteCommandErrorWithoutResponse(mongoException); + throw mongoException; + } + } + + /** + * @see #executeBatch(int, WriteConcern, WriteBinding, ResultAccumulator) + */ + private void executeBatchAsync( + final int batchStartModelIndex, + final WriteConcern effectiveWriteConcern, + final AsyncWriteBinding binding, + final ResultAccumulator resultAccumulator, + final SingleResultCallback finalCallback) { + List unexecutedModels = models.subList(batchStartModelIndex, models.size()); + assertFalse(unexecutedModels.isEmpty()); + OperationContext operationContext = binding.getOperationContext(); + SessionContext sessionContext = operationContext.getSessionContext(); + TimeoutContext timeoutContext = operationContext.getTimeoutContext(); + RetryState retryState = initialRetryState(retryWritesSetting, timeoutContext); + BatchEncoder batchEncoder = new BatchEncoder(); + + AsyncCallbackSupplier retryingBatchExecutor = decorateWriteWithRetriesAsync( + retryState, operationContext, + // Each batch re-selects a server and re-checks out a connection because this is simpler, + // and it is allowed by https://jira.mongodb.org/browse/DRIVERS-2502. + // If connection pinning is required, `binding` handles that, + // and `ClientSession`, `TransactionContext` are aware of that. + funcCallback -> withAsyncSourceAndConnection(binding::getWriteConnectionSource, true, funcCallback, + (connectionSource, connection, resultCallback) -> { + ConnectionDescription connectionDescription = connection.getDescription(); + boolean effectiveRetryWrites = isRetryableWrite( + retryWritesSetting, effectiveWriteConcern, connectionDescription, sessionContext); + retryState.breakAndThrowIfRetryAnd(() -> !effectiveRetryWrites); + resultAccumulator.onNewServerAddress(connectionDescription.getServerAddress()); + retryState.attach(AttachmentKeys.maxWireVersion(), connectionDescription.getMaxWireVersion(), true) + .attach(AttachmentKeys.commandDescriptionSupplier(), () -> BULK_WRITE_COMMAND_NAME, false); + ClientBulkWriteCommand bulkWriteCommand = createBulkWriteCommand( + retryState, effectiveRetryWrites, effectiveWriteConcern, sessionContext, unexecutedModels, batchEncoder, + () -> retryState.attach(AttachmentKeys.retryableCommandFlag(), true, true)); + executeBulkWriteCommandAndExhaustOkResponseAsync( + retryState, connectionSource, connection, bulkWriteCommand, effectiveWriteConcern, operationContext, resultCallback); + }) + ); + + beginAsync().thenSupply(callback -> { + retryingBatchExecutor.get(callback); + }).thenApply((bulkWriteCommandOkResponse, callback) -> { + callback.complete(resultAccumulator.onBulkWriteCommandOkResponseOrNoResponse( + batchStartModelIndex, bulkWriteCommandOkResponse, batchEncoder.intoEncodedBatchInfo())); + }).onErrorIf(throwable -> true, (t, callback) -> { + if (t instanceof MongoWriteConcernWithResponseException) { + MongoWriteConcernWithResponseException mongoWriteConcernWithOkResponseException = (MongoWriteConcernWithResponseException) t; + callback.complete(resultAccumulator.onBulkWriteCommandOkResponseWithWriteConcernError( + batchStartModelIndex, mongoWriteConcernWithOkResponseException, batchEncoder.intoEncodedBatchInfo())); + } else if (t instanceof MongoCommandException) { + MongoCommandException bulkWriteCommandException = (MongoCommandException) t; + resultAccumulator.onBulkWriteCommandErrorResponse(bulkWriteCommandException); + callback.completeExceptionally(t); + } else if (t instanceof MongoException) { + MongoException mongoException = (MongoException) t; + // The server does not have a chance to add "RetryableWriteError" label to `e`, + // and if it is the last attempt failure, `RetryingSyncSupplier` also may not have a chance + // to add the label. So we do that explicitly. + shouldAttemptToRetryWriteAndAddRetryableLabel(retryState, mongoException); + resultAccumulator.onBulkWriteCommandErrorWithoutResponse(mongoException); + callback.completeExceptionally(mongoException); + } else { + callback.completeExceptionally(t); + } + }).finish(finalCallback); + } + + /** + * @throws MongoWriteConcernWithResponseException This internal exception must be handled to avoid it being observed by an application. + * It {@linkplain MongoWriteConcernWithResponseException#getResponse() bears} the OK response to the {@code bulkWriteCommand}, + * which must be + * {@linkplain ResultAccumulator#onBulkWriteCommandOkResponseWithWriteConcernError(int, MongoWriteConcernWithResponseException, BatchEncoder.EncodedBatchInfo) accumulated} + * iff this exception is the failed result of retries. + */ + @Nullable + private ExhaustiveClientBulkWriteCommandOkResponse executeBulkWriteCommandAndExhaustOkResponse( + final RetryState retryState, + final ConnectionSource connectionSource, + final Connection connection, + final ClientBulkWriteCommand bulkWriteCommand, + final WriteConcern effectiveWriteConcern, + final OperationContext operationContext) throws MongoWriteConcernWithResponseException { + BsonDocument bulkWriteCommandOkResponse = connection.command( + "admin", + bulkWriteCommand.getCommandDocument(), + NoOpFieldNameValidator.INSTANCE, + null, + CommandResultDocumentCodec.create(codecRegistry.get(BsonDocument.class), CommandBatchCursorHelper.FIRST_BATCH), + operationContext, + effectiveWriteConcern.isAcknowledged(), + bulkWriteCommand.getOpsAndNsInfo()); + if (bulkWriteCommandOkResponse == null) { + return null; + } + List> cursorExhaustBatches = doWithRetriesDisabledForCommand(retryState, "getMore", () -> + exhaustBulkWriteCommandOkResponseCursor(connectionSource, connection, bulkWriteCommandOkResponse)); + return createExhaustiveClientBulkWriteCommandOkResponse( + bulkWriteCommandOkResponse, + cursorExhaustBatches, + connection.getDescription()); + } + + /** + * @see #executeBulkWriteCommandAndExhaustOkResponse(RetryState, ConnectionSource, Connection, ClientBulkWriteCommand, WriteConcern, OperationContext) + */ + private void executeBulkWriteCommandAndExhaustOkResponseAsync( + final RetryState retryState, + final AsyncConnectionSource connectionSource, + final AsyncConnection connection, + final ClientBulkWriteCommand bulkWriteCommand, + final WriteConcern effectiveWriteConcern, + final OperationContext operationContext, + final SingleResultCallback finalCallback) { + beginAsync().thenSupply(callback -> { + connection.commandAsync( + "admin", + bulkWriteCommand.getCommandDocument(), + NoOpFieldNameValidator.INSTANCE, + null, + CommandResultDocumentCodec.create(codecRegistry.get(BsonDocument.class), CommandBatchCursorHelper.FIRST_BATCH), + operationContext, + effectiveWriteConcern.isAcknowledged(), + bulkWriteCommand.getOpsAndNsInfo(), callback); + }).thenApply((bulkWriteCommandOkResponse, callback) -> { + if (bulkWriteCommandOkResponse == null) { + callback.complete((ExhaustiveClientBulkWriteCommandOkResponse) null); + return; + } + beginAsync().>>thenSupply(c -> { + doWithRetriesDisabledForCommandAsync(retryState, "getMore", (c1) -> { + exhaustBulkWriteCommandOkResponseCursorAsync(connectionSource, connection, bulkWriteCommandOkResponse, c1); + }, c); + }).thenApply((cursorExhaustBatches, c) -> { + c.complete(createExhaustiveClientBulkWriteCommandOkResponse( + bulkWriteCommandOkResponse, + cursorExhaustBatches, + connection.getDescription())); + }).finish(callback); + }).finish(finalCallback); + } + + private static ExhaustiveClientBulkWriteCommandOkResponse createExhaustiveClientBulkWriteCommandOkResponse( + final BsonDocument bulkWriteCommandOkResponse, final List> cursorExhaustBatches, + final ConnectionDescription connectionDescription) { + ExhaustiveClientBulkWriteCommandOkResponse exhaustiveBulkWriteCommandOkResponse = + new ExhaustiveClientBulkWriteCommandOkResponse( + bulkWriteCommandOkResponse, cursorExhaustBatches); + + // `Connection.command` does not throw `MongoWriteConcernException`, so we have to construct it ourselves + MongoWriteConcernException writeConcernException = Exceptions.createWriteConcernException( + bulkWriteCommandOkResponse, connectionDescription.getServerAddress()); + if (writeConcernException != null) { + throw new MongoWriteConcernWithResponseException(writeConcernException, exhaustiveBulkWriteCommandOkResponse); + } + return exhaustiveBulkWriteCommandOkResponse; + } + + private R doWithRetriesDisabledForCommand( + final RetryState retryState, + final String commandDescription, + final Supplier actionWithCommand) { + Optional originalRetryableCommandFlag = retryState.attachment(AttachmentKeys.retryableCommandFlag()); + Supplier originalCommandDescriptionSupplier = retryState.attachment(AttachmentKeys.commandDescriptionSupplier()) + .orElseThrow(Assertions::fail); + + try { + retryState.attach(AttachmentKeys.retryableCommandFlag(), false, true) + .attach(AttachmentKeys.commandDescriptionSupplier(), () -> commandDescription, false); + return actionWithCommand.get(); + } finally { + originalRetryableCommandFlag.ifPresent(value -> retryState.attach(AttachmentKeys.retryableCommandFlag(), value, true)); + retryState.attach(AttachmentKeys.commandDescriptionSupplier(), originalCommandDescriptionSupplier, false); + } + } + + private void doWithRetriesDisabledForCommandAsync( + final RetryState retryState, + final String commandDescription, + final AsyncSupplier actionWithCommand, + final SingleResultCallback finalCallback) { + Optional originalRetryableCommandFlag = retryState.attachment(AttachmentKeys.retryableCommandFlag()); + Supplier originalCommandDescriptionSupplier = retryState.attachment(AttachmentKeys.commandDescriptionSupplier()) + .orElseThrow(Assertions::fail); + + beginAsync().thenSupply(c -> { + retryState.attach(AttachmentKeys.retryableCommandFlag(), false, true) + .attach(AttachmentKeys.commandDescriptionSupplier(), () -> commandDescription, false); + actionWithCommand.finish(c); + }).thenAlwaysRunAndFinish(() -> { + originalRetryableCommandFlag.ifPresent(value -> retryState.attach(AttachmentKeys.retryableCommandFlag(), value, true)); + retryState.attach(AttachmentKeys.commandDescriptionSupplier(), originalCommandDescriptionSupplier, false); + }, finalCallback); + } + + private List> exhaustBulkWriteCommandOkResponseCursor( + final ConnectionSource connectionSource, + final Connection connection, + final BsonDocument response) { + try (CommandBatchCursor cursor = cursorDocumentToBatchCursor( + TimeoutMode.CURSOR_LIFETIME, + response, + SERVER_DEFAULT_CURSOR_BATCH_SIZE, + codecRegistry.get(BsonDocument.class), + options.getComment().orElse(null), + connectionSource, + connection)) { + + return cursor.exhaust(); + } + } + + private void exhaustBulkWriteCommandOkResponseCursorAsync(final AsyncConnectionSource connectionSource, + final AsyncConnection connection, + final BsonDocument bulkWriteCommandOkResponse, + final SingleResultCallback>> finalCallback) { + AsyncBatchCursor cursor = cursorDocumentToAsyncBatchCursor( + TimeoutMode.CURSOR_LIFETIME, + bulkWriteCommandOkResponse, + SERVER_DEFAULT_CURSOR_BATCH_SIZE, + codecRegistry.get(BsonDocument.class), + options.getComment().orElse(null), + connectionSource, + connection); + + beginAsync().>>thenSupply(callback -> { + cursor.exhaust(callback); + }).thenAlwaysRunAndFinish(() -> { + cursor.close(); + }, finalCallback); + } + + + private ClientBulkWriteCommand createBulkWriteCommand( + final RetryState retryState, + final boolean effectiveRetryWrites, + final WriteConcern effectiveWriteConcern, + final SessionContext sessionContext, + final List unexecutedModels, + final BatchEncoder batchEncoder, + final Runnable retriesEnabler) { + BsonDocument commandDocument = new BsonDocument(BULK_WRITE_COMMAND_NAME, new BsonInt32(1)) + .append("errorsOnly", BsonBoolean.valueOf(!options.isVerboseResults())) + .append("ordered", BsonBoolean.valueOf(options.isOrdered())); + options.isBypassDocumentValidation().ifPresent(value -> + commandDocument.append("bypassDocumentValidation", BsonBoolean.valueOf(value))); + options.getComment().ifPresent(value -> + commandDocument.append("comment", value)); + options.getLet().ifPresent(let -> + commandDocument.append("let", let.toBsonDocument(BsonDocument.class, codecRegistry))); + commandWriteConcern(effectiveWriteConcern, sessionContext).ifPresent(value-> + commandDocument.append("writeConcern", value.asDocument())); + return new ClientBulkWriteCommand( + commandDocument, + new ClientBulkWriteCommand.OpsAndNsInfo( + effectiveRetryWrites, unexecutedModels, + batchEncoder, + options, + () -> { + retriesEnabler.run(); + return retryState.isFirstAttempt() + ? sessionContext.advanceTransactionNumber() + : sessionContext.getTransactionNumber(); + })); + } + + private WriteConcern validateAndGetEffectiveWriteConcern(final SessionContext sessionContext) { + WriteConcern effectiveWriteConcern = CommandOperationHelper.validateAndGetEffectiveWriteConcern(writeConcernSetting, sessionContext); + if (!effectiveWriteConcern.isAcknowledged()) { + if (options.isVerboseResults()) { + throw new MongoClientException("Cannot request unacknowledged write concern and verbose results"); + } + if (options.isOrdered()) { + throw new MongoClientException("Cannot request unacknowledged write concern and ordered writes"); + } + } + return effectiveWriteConcern; + } + + private void encodeUsingRegistry(final BsonWriter writer, final T value) { + encodeUsingRegistry(writer, value, DEFAULT_ENCODER_CONTEXT); + } + + private void encodeUsingRegistry(final BsonWriter writer, final T value, final EncoderContext encoderContext) { + @SuppressWarnings("unchecked") + Encoder encoder = (Encoder) codecRegistry.get(value.getClass()); + encoder.encode(writer, value, encoderContext); + } + + private static AbstractClientNamespacedWriteModel getNamespacedModel( + final List models, final int index) { + return (AbstractClientNamespacedWriteModel) models.get(index); + } + + public static final class Exceptions { + public static Optional serverAddressFromException(@Nullable final MongoException exception) { + ServerAddress serverAddress = null; + if (exception instanceof MongoServerException) { + serverAddress = ((MongoServerException) exception).getServerAddress(); + } else if (exception instanceof MongoSocketException) { + serverAddress = ((MongoSocketException) exception).getServerAddress(); + } + return ofNullable(serverAddress); + } + + @Nullable + private static MongoWriteConcernException createWriteConcernException( + final BsonDocument response, + final ServerAddress serverAddress) { + final String writeConcernErrorFieldName = "writeConcernError"; + if (!response.containsKey(writeConcernErrorFieldName)) { + return null; + } + BsonDocument writeConcernErrorDocument = response.getDocument(writeConcernErrorFieldName); + WriteConcernError writeConcernError = WriteConcernHelper.createWriteConcernError(writeConcernErrorDocument); + Set errorLabels = response.getArray("errorLabels", new BsonArray()).stream() + .map(i -> i.asString().getValue()) + .collect(toSet()); + return new MongoWriteConcernException(writeConcernError, null, serverAddress, errorLabels); + } + } + + private static final class ExhaustiveClientBulkWriteCommandOkResponse { + /** + * The number of unsuccessful individual write operations. + */ + private final int nErrors; + private final int nInserted; + private final int nUpserted; + private final int nMatched; + private final int nModified; + private final int nDeleted; + private final List cursorExhaust; + + ExhaustiveClientBulkWriteCommandOkResponse( + final BsonDocument bulkWriteCommandOkResponse, + final List> cursorExhaustBatches) { + this.nErrors = bulkWriteCommandOkResponse.getInt32("nErrors").getValue(); + this.nInserted = bulkWriteCommandOkResponse.getInt32("nInserted").getValue(); + this.nUpserted = bulkWriteCommandOkResponse.getInt32("nUpserted").getValue(); + this.nMatched = bulkWriteCommandOkResponse.getInt32("nMatched").getValue(); + this.nModified = bulkWriteCommandOkResponse.getInt32("nModified").getValue(); + this.nDeleted = bulkWriteCommandOkResponse.getInt32("nDeleted").getValue(); + if (cursorExhaustBatches.isEmpty()) { + cursorExhaust = emptyList(); + } else if (cursorExhaustBatches.size() == 1) { + cursorExhaust = cursorExhaustBatches.get(0); + } else { + cursorExhaust = cursorExhaustBatches.stream().flatMap(Collection::stream).collect(toList()); + } + } + + boolean operationMayContinue(final ConcreteClientBulkWriteOptions options) { + return nErrors == 0 || !options.isOrdered(); + } + + int getNErrors() { + return nErrors; + } + + int getNInserted() { + return nInserted; + } + + int getNUpserted() { + return nUpserted; + } + + int getNMatched() { + return nMatched; + } + + int getNModified() { + return nModified; + } + + int getNDeleted() { + return nDeleted; + } + + List getCursorExhaust() { + return cursorExhaust; + } + } + + /** + * Accumulates results of the operation as it is being executed + * for {@linkplain #build(MongoException, WriteConcern) building} them when the operation completes. + */ + private final class ResultAccumulator { + @Nullable + private ServerAddress serverAddress; + private final ArrayList batchResults; + + ResultAccumulator() { + serverAddress = null; + batchResults = new ArrayList<>(); + } + + /** + *
    + *
  • Either builds and returns {@link ClientBulkWriteResult};
  • + *
  • or builds and throws {@link ClientBulkWriteException};
  • + *
  • or throws {@code topLevelError}.
  • + *
+ */ + ClientBulkWriteResult build(@Nullable final MongoException topLevelError, final WriteConcern effectiveWriteConcern) throws MongoException { + boolean verboseResultsSetting = options.isVerboseResults(); + boolean batchResultsHaveResponses = false; + boolean batchResultsHaveInfoAboutSuccessfulIndividualOperations = false; + long insertedCount = 0; + long upsertedCount = 0; + long matchedCount = 0; + long modifiedCount = 0; + long deletedCount = 0; + Map insertResults = verboseResultsSetting ? new HashMap<>() : emptyMap(); + Map updateResults = verboseResultsSetting ? new HashMap<>() : emptyMap(); + Map deleteResults = verboseResultsSetting ? new HashMap<>() : emptyMap(); + ArrayList writeConcernErrors = new ArrayList<>(); + Map writeErrors = new HashMap<>(); + for (BatchResult batchResult : batchResults) { + if (batchResult.hasResponse()) { + batchResultsHaveResponses = true; + MongoWriteConcernException writeConcernException = batchResult.getWriteConcernException(); + if (writeConcernException != null) { + writeConcernErrors.add(writeConcernException.getWriteConcernError()); + } + int batchStartModelIndex = batchResult.getBatchStartModelIndex(); + ExhaustiveClientBulkWriteCommandOkResponse response = batchResult.getResponse(); + boolean orderedSetting = options.isOrdered(); + int nErrors = response.getNErrors(); + batchResultsHaveInfoAboutSuccessfulIndividualOperations = batchResultsHaveInfoAboutSuccessfulIndividualOperations + || (orderedSetting && nErrors == 0) + || (!orderedSetting && nErrors < batchResult.getBatchModelsCount()); + insertedCount += response.getNInserted(); + upsertedCount += response.getNUpserted(); + matchedCount += response.getNMatched(); + modifiedCount += response.getNModified(); + deletedCount += response.getNDeleted(); + Map insertModelDocumentIds = batchResult.getInsertModelDocumentIds(); + for (BsonDocument individualOperationResponse : response.getCursorExhaust()) { + int individualOperationIndexInBatch = individualOperationResponse.getInt32("idx").getValue(); + int writeModelIndex = batchStartModelIndex + individualOperationIndexInBatch; + if (individualOperationResponse.getNumber("ok").intValue() == 1) { + assertTrue(verboseResultsSetting); + AbstractClientNamespacedWriteModel writeModel = getNamespacedModel(models, writeModelIndex); + if (writeModel instanceof ConcreteClientNamespacedInsertOneModel) { + insertResults.put( + writeModelIndex, + new ConcreteClientInsertOneResult(insertModelDocumentIds.get(individualOperationIndexInBatch))); + } else if (writeModel instanceof ConcreteClientNamespacedUpdateOneModel + || writeModel instanceof ConcreteClientNamespacedUpdateManyModel + || writeModel instanceof ConcreteClientNamespacedReplaceOneModel) { + BsonDocument upsertedIdDocument = individualOperationResponse.getDocument("upserted", null); + updateResults.put( + writeModelIndex, + new ConcreteClientUpdateResult( + individualOperationResponse.getInt32("n").getValue(), + individualOperationResponse.getInt32("nModified").getValue(), + upsertedIdDocument == null ? null : upsertedIdDocument.get("_id"))); + } else if (writeModel instanceof ConcreteClientNamespacedDeleteOneModel + || writeModel instanceof ConcreteClientNamespacedDeleteManyModel) { + deleteResults.put( + writeModelIndex, + new ConcreteClientDeleteResult(individualOperationResponse.getInt32("n").getValue())); + } else { + fail(writeModel.getClass().toString()); + } + } else { + batchResultsHaveInfoAboutSuccessfulIndividualOperations = batchResultsHaveInfoAboutSuccessfulIndividualOperations + || (orderedSetting && individualOperationIndexInBatch > 0); + WriteError individualOperationWriteError = new WriteError( + individualOperationResponse.getInt32("code").getValue(), + individualOperationResponse.getString("errmsg").getValue(), + individualOperationResponse.getDocument("errInfo", new BsonDocument())); + writeErrors.put(writeModelIndex, individualOperationWriteError); + } + } + } + } + if (topLevelError == null && writeConcernErrors.isEmpty() && writeErrors.isEmpty()) { + if (effectiveWriteConcern.isAcknowledged()) { + AcknowledgedSummaryClientBulkWriteResult summaryResult = new AcknowledgedSummaryClientBulkWriteResult( + insertedCount, upsertedCount, matchedCount, modifiedCount, deletedCount); + return verboseResultsSetting + ? new AcknowledgedVerboseClientBulkWriteResult(summaryResult, insertResults, updateResults, deleteResults) + : summaryResult; + } else { + return UnacknowledgedClientBulkWriteResult.INSTANCE; + } + } else if (batchResultsHaveResponses) { + AcknowledgedSummaryClientBulkWriteResult partialSummaryResult = batchResultsHaveInfoAboutSuccessfulIndividualOperations + ? new AcknowledgedSummaryClientBulkWriteResult(insertedCount, upsertedCount, matchedCount, modifiedCount, deletedCount) + : null; + throw new ClientBulkWriteException( + topLevelError, + writeConcernErrors, + writeErrors, + verboseResultsSetting && partialSummaryResult != null + ? new AcknowledgedVerboseClientBulkWriteResult(partialSummaryResult, insertResults, updateResults, deleteResults) + : partialSummaryResult, + assertNotNull(serverAddress)); + } else { + throw assertNotNull(topLevelError); + } + } + + void onNewServerAddress(final ServerAddress serverAddress) { + this.serverAddress = serverAddress; + } + + @Nullable + Integer onBulkWriteCommandOkResponseOrNoResponse( + final int batchStartModelIndex, + @Nullable + final ExhaustiveClientBulkWriteCommandOkResponse response, + final BatchEncoder.EncodedBatchInfo encodedBatchInfo) { + return onBulkWriteCommandOkResponseOrNoResponse(batchStartModelIndex, response, null, encodedBatchInfo); + } + + /** + * @return See {@link #executeBatch(int, WriteConcern, WriteBinding, ResultAccumulator)}. + */ + @Nullable + Integer onBulkWriteCommandOkResponseWithWriteConcernError( + final int batchStartModelIndex, + final MongoWriteConcernWithResponseException exception, + final BatchEncoder.EncodedBatchInfo encodedBatchInfo) { + MongoWriteConcernException writeConcernException = (MongoWriteConcernException) exception.getCause(); + onNewServerAddress(writeConcernException.getServerAddress()); + ExhaustiveClientBulkWriteCommandOkResponse response = (ExhaustiveClientBulkWriteCommandOkResponse) exception.getResponse(); + return onBulkWriteCommandOkResponseOrNoResponse(batchStartModelIndex, response, writeConcernException, encodedBatchInfo); + } + + /** + * @return See {@link #executeBatch(int, WriteConcern, WriteBinding, ResultAccumulator)}. + */ + @Nullable + private Integer onBulkWriteCommandOkResponseOrNoResponse( + final int batchStartModelIndex, + @Nullable + final ExhaustiveClientBulkWriteCommandOkResponse response, + @Nullable + final MongoWriteConcernException writeConcernException, + final BatchEncoder.EncodedBatchInfo encodedBatchInfo) { + BatchResult batchResult = response == null + ? BatchResult.noResponse(batchStartModelIndex, encodedBatchInfo) + : BatchResult.okResponse(batchStartModelIndex, encodedBatchInfo, response, writeConcernException); + batchResults.add(batchResult); + int potentialNextBatchStartModelIndex = batchStartModelIndex + batchResult.getBatchModelsCount(); + return (response == null || response.operationMayContinue(options)) + ? potentialNextBatchStartModelIndex == models.size() ? null : potentialNextBatchStartModelIndex + : null; + } + + void onBulkWriteCommandErrorResponse(final MongoCommandException exception) { + onNewServerAddress(exception.getServerAddress()); + } + + void onBulkWriteCommandErrorWithoutResponse(final MongoException exception) { + Exceptions.serverAddressFromException(exception).ifPresent(this::onNewServerAddress); + } + } + + public static final class ClientBulkWriteCommand { + private final BsonDocument commandDocument; + private final OpsAndNsInfo opsAndNsInfo; + + ClientBulkWriteCommand( + final BsonDocument commandDocument, + final OpsAndNsInfo opsAndNsInfo) { + this.commandDocument = commandDocument; + this.opsAndNsInfo = opsAndNsInfo; + } + + BsonDocument getCommandDocument() { + return commandDocument; + } + + OpsAndNsInfo getOpsAndNsInfo() { + return opsAndNsInfo; + } + + public static final class OpsAndNsInfo extends DualMessageSequences { + private final boolean effectiveRetryWrites; + private final List models; + private final BatchEncoder batchEncoder; + private final ConcreteClientBulkWriteOptions options; + private final Supplier doIfCommandIsRetryableAndAdvanceGetTxnNumber; + + @VisibleForTesting(otherwise = PACKAGE) + public OpsAndNsInfo( + final boolean effectiveRetryWrites, + final List models, + final BatchEncoder batchEncoder, + final ConcreteClientBulkWriteOptions options, + final Supplier doIfCommandIsRetryableAndAdvanceGetTxnNumber) { + super("ops", new OpsFieldNameValidator(models), "nsInfo", NoOpFieldNameValidator.INSTANCE); + this.effectiveRetryWrites = effectiveRetryWrites; + this.models = models; + this.batchEncoder = batchEncoder; + this.options = options; + this.doIfCommandIsRetryableAndAdvanceGetTxnNumber = doIfCommandIsRetryableAndAdvanceGetTxnNumber; + } + + @Override + public EncodeDocumentsResult encodeDocuments(final WritersProviderAndLimitsChecker writersProviderAndLimitsChecker) { + // We must call `batchEncoder.reset` lazily, that is here, and not eagerly before a command retry attempt, + // because a retry attempt may fail before encoding, + // in which case we need the information gathered by `batchEncoder` at a previous attempt. + batchEncoder.reset(); + LinkedHashMap indexedNamespaces = new LinkedHashMap<>(); + WritersProviderAndLimitsChecker.WriteResult writeResult = OK_LIMIT_NOT_REACHED; + boolean commandIsRetryable = effectiveRetryWrites; + int maxModelIndexInBatch = -1; + for (int modelIndexInBatch = 0; modelIndexInBatch < models.size() && writeResult == OK_LIMIT_NOT_REACHED; modelIndexInBatch++) { + AbstractClientNamespacedWriteModel namespacedModel = getNamespacedModel(models, modelIndexInBatch); + MongoNamespace namespace = namespacedModel.getNamespace(); + int indexedNamespacesSizeBeforeCompute = indexedNamespaces.size(); + int namespaceIndexInBatch = indexedNamespaces.computeIfAbsent(namespace, k -> indexedNamespacesSizeBeforeCompute); + boolean writeNewNamespace = indexedNamespaces.size() != indexedNamespacesSizeBeforeCompute; + int finalModelIndexInBatch = modelIndexInBatch; + writeResult = writersProviderAndLimitsChecker.tryWrite((opsWriter, nsInfoWriter) -> { + batchEncoder.encodeWriteModel(opsWriter, namespacedModel.getModel(), finalModelIndexInBatch, namespaceIndexInBatch); + if (writeNewNamespace) { + nsInfoWriter.writeStartDocument(); + nsInfoWriter.writeString("ns", namespace.getFullName()); + nsInfoWriter.writeEndDocument(); + } + return finalModelIndexInBatch + 1; + }); + if (writeResult == FAIL_LIMIT_EXCEEDED) { + batchEncoder.reset(finalModelIndexInBatch); + } else { + maxModelIndexInBatch = finalModelIndexInBatch; + if (commandIsRetryable && doesNotSupportRetries(namespacedModel)) { + commandIsRetryable = false; + logWriteModelDoesNotSupportRetries(); + } + } + } + return new EncodeDocumentsResult( + // we will execute more batches, so we must request a response to maintain the order of individual write operations + options.isOrdered() && maxModelIndexInBatch < models.size() - 1, + commandIsRetryable + ? singletonList(new BsonElement("txnNumber", new BsonInt64(doIfCommandIsRetryableAndAdvanceGetTxnNumber.get()))) + : emptyList()); + } + + private static boolean doesNotSupportRetries(final AbstractClientNamespacedWriteModel model) { + return model instanceof ConcreteClientNamespacedUpdateManyModel || model instanceof ConcreteClientNamespacedDeleteManyModel; + } + + /** + * The server supports only the {@code update} individual write operation in the {@code ops} array field, while the driver supports + * {@link ClientNamespacedUpdateOneModel}, {@link ClientNamespacedUpdateOneModel}, {@link ClientNamespacedReplaceOneModel}. + * The difference between updating and replacing is only in the document specified via the {@code updateMods} field: + *
    + *
  • if the name of the first field starts with {@code '$'}, then the document is interpreted as specifying update operators;
  • + *
  • if the name of the first field does not start with {@code '$'}, then the document is interpreted as a replacement.
  • + *
+ * + * @see
+ * Update vs. replace document validation + */ + private static final class OpsFieldNameValidator implements FieldNameValidator { + private static final Set OPERATION_DISCRIMINATOR_FIELD_NAMES = Stream.of("insert", "update", "delete").collect(toSet()); + + private final List models; + private final ReplacingUpdateModsFieldValidator replacingValidator; + private final UpdatingUpdateModsFieldValidator updatingValidator; + private int currentIndividualOperationIndex; + + OpsFieldNameValidator(final List models) { + this.models = models; + replacingValidator = new ReplacingUpdateModsFieldValidator(); + updatingValidator = new UpdatingUpdateModsFieldValidator(); + currentIndividualOperationIndex = -1; + } + + @Override + public boolean validate(final String fieldName) { + if (OPERATION_DISCRIMINATOR_FIELD_NAMES.contains(fieldName)) { + currentIndividualOperationIndex++; + } + return true; + } + + @Override + public FieldNameValidator getValidatorForField(final String fieldName) { + if (fieldName.equals("updateMods")) { + return currentIndividualOperationIsReplace() ? replacingValidator.reset() : updatingValidator.reset(); + } + return NoOpFieldNameValidator.INSTANCE; + } + + private boolean currentIndividualOperationIsReplace() { + return getNamespacedModel(models, currentIndividualOperationIndex) instanceof ConcreteClientNamespacedReplaceOneModel; + } + + private static final class ReplacingUpdateModsFieldValidator implements FieldNameValidator { + private boolean firstFieldSinceLastReset; + + ReplacingUpdateModsFieldValidator() { + firstFieldSinceLastReset = true; + } + + @Override + public boolean validate(final String fieldName) { + if (firstFieldSinceLastReset) { + // we must validate only the first field, and leave the rest up to the server + firstFieldSinceLastReset = false; + return ReplacingDocumentFieldNameValidator.INSTANCE.validate(fieldName); + } + return true; + } + + @Override + public String getValidationErrorMessage(final String fieldName) { + return ReplacingDocumentFieldNameValidator.INSTANCE.getValidationErrorMessage(fieldName); + } + + @Override + public FieldNameValidator getValidatorForField(final String fieldName) { + return NoOpFieldNameValidator.INSTANCE; + } + + ReplacingUpdateModsFieldValidator reset() { + firstFieldSinceLastReset = true; + return this; + } + } + + private static final class UpdatingUpdateModsFieldValidator implements FieldNameValidator { + private final UpdateFieldNameValidator delegate; + private boolean firstFieldSinceLastReset; + + UpdatingUpdateModsFieldValidator() { + delegate = new UpdateFieldNameValidator(); + firstFieldSinceLastReset = true; + } + + @Override + public boolean validate(final String fieldName) { + if (firstFieldSinceLastReset) { + // we must validate only the first field, and leave the rest up to the server + firstFieldSinceLastReset = false; + return delegate.validate(fieldName); + } + return true; + } + + @Override + public String getValidationErrorMessage(final String fieldName) { + return delegate.getValidationErrorMessage(fieldName); + } + + @Override + public FieldNameValidator getValidatorForField(final String fieldName) { + return NoOpFieldNameValidator.INSTANCE; + } + + @Override + public void start() { + delegate.start(); + } + + @Override + public void end() { + delegate.end(); + } + + UpdatingUpdateModsFieldValidator reset() { + delegate.reset(); + firstFieldSinceLastReset = true; + return this; + } + } + } + } + } + + static final class BatchResult { + private final int batchStartModelIndex; + private final BatchEncoder.EncodedBatchInfo encodedBatchInfo; + @Nullable + private final ExhaustiveClientBulkWriteCommandOkResponse response; + @Nullable + private final MongoWriteConcernException writeConcernException; + + static BatchResult okResponse( + final int batchStartModelIndex, + final BatchEncoder.EncodedBatchInfo encodedBatchInfo, + final ExhaustiveClientBulkWriteCommandOkResponse response, + @Nullable final MongoWriteConcernException writeConcernException) { + return new BatchResult(batchStartModelIndex, encodedBatchInfo, assertNotNull(response), writeConcernException); + } + + static BatchResult noResponse(final int batchStartModelIndex, final BatchEncoder.EncodedBatchInfo encodedBatchInfo) { + return new BatchResult(batchStartModelIndex, encodedBatchInfo, null, null); + } + + private BatchResult( + final int batchStartModelIndex, + final BatchEncoder.EncodedBatchInfo encodedBatchInfo, + @Nullable final ExhaustiveClientBulkWriteCommandOkResponse response, + @Nullable final MongoWriteConcernException writeConcernException) { + this.batchStartModelIndex = batchStartModelIndex; + this.encodedBatchInfo = encodedBatchInfo; + this.response = response; + this.writeConcernException = writeConcernException; + } + + int getBatchStartModelIndex() { + return batchStartModelIndex; + } + + /** + * @see BatchEncoder.EncodedBatchInfo#getModelsCount() + */ + int getBatchModelsCount() { + return encodedBatchInfo.getModelsCount(); + } + + boolean hasResponse() { + return response != null; + } + + ExhaustiveClientBulkWriteCommandOkResponse getResponse() { + return assertNotNull(response); + } + + @Nullable + MongoWriteConcernException getWriteConcernException() { + assertTrue(hasResponse()); + return writeConcernException; + } + + /** + * @see BatchEncoder.EncodedBatchInfo#getInsertModelDocumentIds() + */ + Map getInsertModelDocumentIds() { + assertTrue(hasResponse()); + return encodedBatchInfo.getInsertModelDocumentIds(); + } + } + + /** + * Exactly one instance must be used per {@linkplain #executeBatch(int, WriteConcern, WriteBinding, ResultAccumulator) batch}. + */ + @VisibleForTesting(otherwise = PRIVATE) + public final class BatchEncoder { + private EncodedBatchInfo encodedBatchInfo; + + @VisibleForTesting(otherwise = PACKAGE) + public BatchEncoder() { + encodedBatchInfo = new EncodedBatchInfo(); + } + + /** + * Must be called at most once. + * Must not be called before calling + * {@link #encodeWriteModel(BsonBinaryWriter, ClientWriteModel, int, int)} at least once. + * Renders {@code this} unusable. + */ + EncodedBatchInfo intoEncodedBatchInfo() { + EncodedBatchInfo result = assertNotNull(encodedBatchInfo); + encodedBatchInfo = null; + assertTrue(result.getModelsCount() > 0); + return result; + } + + void reset() { + // we must not reset anything but `modelsCount` + assertNotNull(encodedBatchInfo).modelsCount = 0; + } + + void reset(final int modelIndexInBatch) { + assertNotNull(encodedBatchInfo).modelsCount -= 1; + encodedBatchInfo.insertModelDocumentIds.remove(modelIndexInBatch); + } + + void encodeWriteModel( + final BsonBinaryWriter writer, + final ClientWriteModel model, + final int modelIndexInBatch, + final int namespaceIndexInBatch) { + assertNotNull(encodedBatchInfo).modelsCount++; + writer.writeStartDocument(); + if (model instanceof ConcreteClientInsertOneModel) { + writer.writeInt32("insert", namespaceIndexInBatch); + encodeWriteModelInternals(writer, (ConcreteClientInsertOneModel) model, modelIndexInBatch); + } else if (model instanceof ConcreteClientUpdateOneModel) { + writer.writeInt32("update", namespaceIndexInBatch); + writer.writeBoolean("multi", false); + encodeWriteModelInternals(writer, (ConcreteClientUpdateOneModel) model); + } else if (model instanceof ConcreteClientUpdateManyModel) { + writer.writeInt32("update", namespaceIndexInBatch); + writer.writeBoolean("multi", true); + encodeWriteModelInternals(writer, (ConcreteClientUpdateManyModel) model); + } else if (model instanceof ConcreteClientReplaceOneModel) { + writer.writeInt32("update", namespaceIndexInBatch); + encodeWriteModelInternals(writer, (ConcreteClientReplaceOneModel) model); + } else if (model instanceof ConcreteClientDeleteOneModel) { + writer.writeInt32("delete", namespaceIndexInBatch); + writer.writeBoolean("multi", false); + encodeWriteModelInternals(writer, (ConcreteClientDeleteOneModel) model); + } else if (model instanceof ConcreteClientDeleteManyModel) { + writer.writeInt32("delete", namespaceIndexInBatch); + writer.writeBoolean("multi", true); + encodeWriteModelInternals(writer, (ConcreteClientDeleteManyModel) model); + } else { + throw fail(model.getClass().toString()); + } + writer.writeEndDocument(); + } + + private void encodeWriteModelInternals( + final BsonBinaryWriter writer, + final ConcreteClientInsertOneModel model, + final int modelIndexInBatch) { + writer.writeName("document"); + Object document = model.getDocument(); + assertNotNull(encodedBatchInfo).insertModelDocumentIds.compute(modelIndexInBatch, (k, knownModelDocumentId) -> { + IdHoldingBsonWriter documentIdHoldingBsonWriter = new IdHoldingBsonWriter( + writer, + // Reuse `knownModelDocumentId` if it may have been generated by `IdHoldingBsonWriter` in a previous attempt. + // If its type is not `BsonObjectId`, which happens only if `_id` was specified by the application, + // we know it could not have been generated. + knownModelDocumentId instanceof BsonObjectId ? knownModelDocumentId.asObjectId() : null); + encodeUsingRegistry(documentIdHoldingBsonWriter, document, COLLECTIBLE_DOCUMENT_ENCODER_CONTEXT); + return documentIdHoldingBsonWriter.getId(); + }); + } + + private void encodeWriteModelInternals(final BsonWriter writer, final ConcreteClientUpdateOneModel model) { + encodeWriteModelInternals(writer, (AbstractClientUpdateModel) model); + model.getOptions().getSort().ifPresent(value -> { + writer.writeName("sort"); + encodeUsingRegistry(writer, value); + }); + } + + private void encodeWriteModelInternals(final BsonWriter writer, final AbstractClientUpdateModel model) { + writer.writeName("filter"); + encodeUsingRegistry(writer, model.getFilter()); + model.getUpdate().ifPresent(value -> { + writer.writeName("updateMods"); + encodeUsingRegistry(writer, value); + }); + model.getUpdatePipeline().ifPresent(value -> { + writer.writeStartArray("updateMods"); + value.forEach(pipelineStage -> encodeUsingRegistry(writer, pipelineStage)); + writer.writeEndArray(); + }); + AbstractClientUpdateOptions options = model.getOptions(); + options.getArrayFilters().ifPresent(value -> { + writer.writeStartArray("arrayFilters"); + value.forEach(filter -> encodeUsingRegistry(writer, filter)); + writer.writeEndArray(); + }); + options.getCollation().ifPresent(value -> { + writer.writeName("collation"); + encodeUsingRegistry(writer, value.asDocument()); + }); + options.getHint().ifPresent(hint -> { + writer.writeName("hint"); + encodeUsingRegistry(writer, hint); + }); + options.getHintString().ifPresent(value -> writer.writeString("hint", value)); + options.isUpsert().ifPresent(value -> writer.writeBoolean("upsert", value)); + } + + private void encodeWriteModelInternals(final BsonBinaryWriter writer, final ConcreteClientReplaceOneModel model) { + writer.writeBoolean("multi", false); + writer.writeName("filter"); + encodeUsingRegistry(writer, model.getFilter()); + writer.writeName("updateMods"); + encodeUsingRegistry(writer, model.getReplacement(), COLLECTIBLE_DOCUMENT_ENCODER_CONTEXT); + ConcreteClientReplaceOneOptions options = model.getOptions(); + options.getCollation().ifPresent(value -> { + writer.writeName("collation"); + encodeUsingRegistry(writer, value.asDocument()); + }); + options.getHint().ifPresent(value -> { + writer.writeName("hint"); + encodeUsingRegistry(writer, value); + }); + options.getHintString().ifPresent(value -> writer.writeString("hint", value)); + options.isUpsert().ifPresent(value -> writer.writeBoolean("upsert", value)); + options.getSort().ifPresent(value -> { + writer.writeName("sort"); + encodeUsingRegistry(writer, value); + }); + } + + private void encodeWriteModelInternals(final BsonWriter writer, final AbstractClientDeleteModel model) { + writer.writeName("filter"); + encodeUsingRegistry(writer, model.getFilter()); + AbstractClientDeleteOptions options = model.getOptions(); + options.getCollation().ifPresent(value -> { + writer.writeName("collation"); + encodeUsingRegistry(writer, value.asDocument()); + }); + options.getHint().ifPresent(value -> { + writer.writeName("hint"); + encodeUsingRegistry(writer, value); + }); + options.getHintString().ifPresent(value -> writer.writeString("hint", value)); + } + + final class EncodedBatchInfo { + private final HashMap insertModelDocumentIds; + private int modelsCount; + + private EncodedBatchInfo() { + insertModelDocumentIds = new HashMap<>(); + modelsCount = 0; + } + + /** + * The key of each entry is the index of a model in the + * {@linkplain #executeBatch(int, WriteConcern, WriteBinding, ResultAccumulator) batch}, + * the value is either the "_id" field value from {@linkplain ConcreteClientInsertOneModel#getDocument()}, + * or the value we generated for this field if the field is absent. + */ + Map getInsertModelDocumentIds() { + return insertModelDocumentIds; + } + + int getModelsCount() { + return modelsCount; + } + } + } +} diff --git a/driver-core/src/main/com/mongodb/internal/operation/CommandBatchCursor.java b/driver-core/src/main/com/mongodb/internal/operation/CommandBatchCursor.java new file mode 100644 index 00000000000..24ecc99b9f1 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/operation/CommandBatchCursor.java @@ -0,0 +1,394 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.operation; + +import com.mongodb.MongoCommandException; +import com.mongodb.MongoException; +import com.mongodb.MongoNamespace; +import com.mongodb.MongoOperationTimeoutException; +import com.mongodb.MongoSocketException; +import com.mongodb.ReadPreference; +import com.mongodb.ServerAddress; +import com.mongodb.ServerCursor; +import com.mongodb.annotations.ThreadSafe; +import com.mongodb.client.cursor.TimeoutMode; +import com.mongodb.connection.ConnectionDescription; +import com.mongodb.connection.ServerType; +import com.mongodb.internal.TimeoutContext; +import com.mongodb.internal.VisibleForTesting; +import com.mongodb.internal.binding.ConnectionSource; +import com.mongodb.internal.connection.Connection; +import com.mongodb.internal.connection.OperationContext; +import com.mongodb.internal.validator.NoOpFieldNameValidator; +import com.mongodb.lang.Nullable; +import org.bson.BsonDocument; +import org.bson.BsonTimestamp; +import org.bson.BsonValue; +import org.bson.codecs.BsonDocumentCodec; +import org.bson.codecs.Decoder; + +import java.util.List; +import java.util.NoSuchElementException; +import java.util.function.Consumer; +import java.util.function.Supplier; + +import static com.mongodb.assertions.Assertions.assertNotNull; +import static com.mongodb.assertions.Assertions.assertTrue; +import static com.mongodb.internal.VisibleForTesting.AccessModifier.PRIVATE; +import static com.mongodb.internal.operation.CommandBatchCursorHelper.FIRST_BATCH; +import static com.mongodb.internal.operation.CommandBatchCursorHelper.MESSAGE_IF_CLOSED_AS_CURSOR; +import static com.mongodb.internal.operation.CommandBatchCursorHelper.MESSAGE_IF_CLOSED_AS_ITERATOR; +import static com.mongodb.internal.operation.CommandBatchCursorHelper.NEXT_BATCH; +import static com.mongodb.internal.operation.CommandBatchCursorHelper.getKillCursorsCommand; +import static com.mongodb.internal.operation.CommandBatchCursorHelper.getMoreCommandDocument; +import static com.mongodb.internal.operation.CommandBatchCursorHelper.logCommandCursorResult; +import static com.mongodb.internal.operation.CommandBatchCursorHelper.translateCommandException; + +class CommandBatchCursor implements AggregateResponseBatchCursor { + + private final MongoNamespace namespace; + private final Decoder decoder; + @Nullable + private final BsonValue comment; + private final int maxWireVersion; + private final boolean firstBatchEmpty; + private final ResourceManager resourceManager; + private final OperationContext operationContext; + private final TimeoutMode timeoutMode; + + private int batchSize; + private CommandCursorResult commandCursorResult; + @Nullable + private List nextBatch; + private boolean resetTimeoutWhenClosing; + + CommandBatchCursor( + final TimeoutMode timeoutMode, + final BsonDocument commandCursorDocument, + final int batchSize, final long maxTimeMS, + final Decoder decoder, + @Nullable final BsonValue comment, + final ConnectionSource connectionSource, + final Connection connection) { + ConnectionDescription connectionDescription = connection.getDescription(); + this.commandCursorResult = toCommandCursorResult(connectionDescription.getServerAddress(), FIRST_BATCH, commandCursorDocument); + this.namespace = commandCursorResult.getNamespace(); + this.batchSize = batchSize; + this.decoder = decoder; + this.comment = comment; + this.maxWireVersion = connectionDescription.getMaxWireVersion(); + this.firstBatchEmpty = commandCursorResult.getResults().isEmpty(); + operationContext = connectionSource.getOperationContext(); + this.timeoutMode = timeoutMode; + + operationContext.getTimeoutContext().setMaxTimeOverride(maxTimeMS); + + Connection connectionToPin = connectionSource.getServerDescription().getType() == ServerType.LOAD_BALANCER ? connection : null; + resourceManager = new ResourceManager(namespace, connectionSource, connectionToPin, commandCursorResult.getServerCursor()); + resetTimeoutWhenClosing = true; + } + + @Override + public boolean hasNext() { + return assertNotNull(resourceManager.execute(MESSAGE_IF_CLOSED_AS_CURSOR, this::doHasNext)); + } + + private boolean doHasNext() { + if (nextBatch != null) { + return true; + } + + checkTimeoutModeAndResetTimeoutContextIfIteration(); + while (resourceManager.getServerCursor() != null) { + getMore(); + if (!resourceManager.operable()) { + throw new IllegalStateException(MESSAGE_IF_CLOSED_AS_CURSOR); + } + if (nextBatch != null) { + return true; + } + } + + return false; + } + + @Override + public List next() { + return assertNotNull(resourceManager.execute(MESSAGE_IF_CLOSED_AS_ITERATOR, this::doNext)); + } + + @Override + public int available() { + return !resourceManager.operable() || nextBatch == null ? 0 : nextBatch.size(); + } + + @Nullable + private List doNext() { + if (!doHasNext()) { + throw new NoSuchElementException(); + } + + List retVal = nextBatch; + nextBatch = null; + commandCursorResult = CommandCursorResult.withEmptyResults(commandCursorResult); + return retVal; + } + + @VisibleForTesting(otherwise = PRIVATE) + boolean isClosed() { + return !resourceManager.operable(); + } + + @Override + public void setBatchSize(final int batchSize) { + this.batchSize = batchSize; + } + + @Override + public int getBatchSize() { + return batchSize; + } + + @Override + public void remove() { + throw new UnsupportedOperationException("Not implemented yet!"); + } + + @Override + public void close() { + resourceManager.close(); + } + + @Nullable + @Override + public List tryNext() { + return resourceManager.execute(MESSAGE_IF_CLOSED_AS_CURSOR, () -> { + if (!tryHasNext()) { + return null; + } + return doNext(); + }); + } + + private boolean tryHasNext() { + if (nextBatch != null) { + return true; + } + + if (resourceManager.getServerCursor() != null) { + getMore(); + } + + return nextBatch != null; + } + + @Override + @Nullable + public ServerCursor getServerCursor() { + if (!resourceManager.operable()) { + throw new IllegalStateException(MESSAGE_IF_CLOSED_AS_ITERATOR); + } + return resourceManager.getServerCursor(); + } + + @Override + public ServerAddress getServerAddress() { + if (!resourceManager.operable()) { + throw new IllegalStateException(MESSAGE_IF_CLOSED_AS_ITERATOR); + } + + return commandCursorResult.getServerAddress(); + } + + @Override + public BsonDocument getPostBatchResumeToken() { + return commandCursorResult.getPostBatchResumeToken(); + } + + @Override + public BsonTimestamp getOperationTime() { + return commandCursorResult.getOperationTime(); + } + + @Override + public boolean isFirstBatchEmpty() { + return firstBatchEmpty; + } + + @Override + public int getMaxWireVersion() { + return maxWireVersion; + } + + void checkTimeoutModeAndResetTimeoutContextIfIteration() { + if (timeoutMode == TimeoutMode.ITERATION) { + operationContext.getTimeoutContext().resetTimeoutIfPresent(); + } + } + + private void getMore() { + ServerCursor serverCursor = assertNotNull(resourceManager.getServerCursor()); + resourceManager.executeWithConnection(connection -> { + ServerCursor nextServerCursor; + try { + this.commandCursorResult = toCommandCursorResult(connection.getDescription().getServerAddress(), NEXT_BATCH, + assertNotNull( + connection.command(namespace.getDatabaseName(), + getMoreCommandDocument(serverCursor.getId(), connection.getDescription(), namespace, batchSize, comment), + NoOpFieldNameValidator.INSTANCE, + ReadPreference.primary(), + CommandResultDocumentCodec.create(decoder, NEXT_BATCH), + assertNotNull(resourceManager.getConnectionSource()).getOperationContext()))); + nextServerCursor = commandCursorResult.getServerCursor(); + } catch (MongoCommandException e) { + throw translateCommandException(e, serverCursor); + } + resourceManager.setServerCursor(nextServerCursor); + }); + } + + private CommandCursorResult toCommandCursorResult(final ServerAddress serverAddress, final String fieldNameContainingBatch, + final BsonDocument commandCursorDocument) { + CommandCursorResult commandCursorResult = new CommandCursorResult<>(serverAddress, fieldNameContainingBatch, + commandCursorDocument); + logCommandCursorResult(commandCursorResult); + this.nextBatch = commandCursorResult.getResults().isEmpty() ? null : commandCursorResult.getResults(); + return commandCursorResult; + } + + /** + * Configures the cursor to {@link #close()} + * without {@linkplain TimeoutContext#resetTimeoutIfPresent() resetting} its {@linkplain TimeoutContext#getTimeout() timeout}. + * This is useful when managing the {@link #close()} behavior externally. + */ + CommandBatchCursor disableTimeoutResetWhenClosing() { + resetTimeoutWhenClosing = false; + return this; + } + + @ThreadSafe + private final class ResourceManager extends CursorResourceManager { + ResourceManager( + final MongoNamespace namespace, + final ConnectionSource connectionSource, + @Nullable final Connection connectionToPin, + @Nullable final ServerCursor serverCursor) { + super(namespace, connectionSource, connectionToPin, serverCursor); + } + + /** + * Thread-safe. + * Executes {@code operation} within the {@link #tryStartOperation()}/{@link #endOperation()} bounds. + * + * @throws IllegalStateException If {@linkplain CommandBatchCursor#close() closed}. + */ + @Nullable + R execute(final String exceptionMessageIfClosed, final Supplier operation) throws IllegalStateException { + if (!tryStartOperation()) { + throw new IllegalStateException(exceptionMessageIfClosed); + } + try { + return operation.get(); + } finally { + endOperation(); + } + } + + @Override + void markAsPinned(final Connection connectionToPin, final Connection.PinningMode pinningMode) { + connectionToPin.markAsPinned(pinningMode); + } + + @Override + void doClose() { + TimeoutContext timeoutContext = operationContext.getTimeoutContext(); + timeoutContext.resetToDefaultMaxTime(); + if (resetTimeoutWhenClosing) { + timeoutContext.doWithResetTimeout(this::releaseResources); + } else { + releaseResources(); + } + } + + private void releaseResources() { + try { + if (isSkipReleasingServerResourcesOnClose()) { + unsetServerCursor(); + } + if (super.getServerCursor() != null) { + Connection connection = getConnection(); + try { + releaseServerResources(connection); + } finally { + connection.release(); + } + } + } catch (MongoException e) { + // ignore exceptions when releasing server resources + } finally { + // guarantee that regardless of exceptions, `serverCursor` is null and client resources are released + unsetServerCursor(); + releaseClientResources(); + } + } + + void executeWithConnection(final Consumer action) { + Connection connection = getConnection(); + try { + action.accept(connection); + } catch (MongoSocketException e) { + onCorruptedConnection(connection, e); + throw e; + } catch (MongoOperationTimeoutException e) { + Throwable cause = e.getCause(); + if (cause instanceof MongoSocketException) { + onCorruptedConnection(connection, (MongoSocketException) cause); + } + throw e; + } finally { + connection.release(); + } + } + + private Connection getConnection() { + assertTrue(getState() != State.IDLE); + Connection pinnedConnection = getPinnedConnection(); + if (pinnedConnection == null) { + return assertNotNull(getConnectionSource()).getConnection(); + } else { + return pinnedConnection.retain(); + } + } + + private void releaseServerResources(final Connection connection) { + try { + ServerCursor localServerCursor = super.getServerCursor(); + if (localServerCursor != null) { + killServerCursor(getNamespace(), localServerCursor, connection); + } + } finally { + unsetServerCursor(); + } + } + + private void killServerCursor(final MongoNamespace namespace, final ServerCursor localServerCursor, + final Connection localConnection) { + localConnection.command(namespace.getDatabaseName(), getKillCursorsCommand(namespace, localServerCursor), + NoOpFieldNameValidator.INSTANCE, ReadPreference.primary(), new BsonDocumentCodec(), operationContext); + } + } +} diff --git a/driver-core/src/main/com/mongodb/internal/operation/CommandBatchCursorHelper.java b/driver-core/src/main/com/mongodb/internal/operation/CommandBatchCursorHelper.java new file mode 100644 index 00000000000..2a6e3b061ee --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/operation/CommandBatchCursorHelper.java @@ -0,0 +1,88 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.operation; + +import com.mongodb.MongoCommandException; +import com.mongodb.MongoCursorNotFoundException; +import com.mongodb.MongoNamespace; +import com.mongodb.MongoQueryException; +import com.mongodb.ServerCursor; +import com.mongodb.connection.ConnectionDescription; +import com.mongodb.lang.Nullable; +import org.bson.BsonArray; +import org.bson.BsonDocument; +import org.bson.BsonInt32; +import org.bson.BsonInt64; +import org.bson.BsonString; +import org.bson.BsonValue; + +import static com.mongodb.internal.operation.DocumentHelper.putIfNotNull; +import static com.mongodb.internal.operation.OperationHelper.LOGGER; +import static com.mongodb.internal.operation.ServerVersionHelper.serverIsAtLeastVersionFourDotFour; +import static java.lang.String.format; +import static java.util.Collections.singletonList; + +final class CommandBatchCursorHelper { + + static final String FIRST_BATCH = "firstBatch"; + static final String NEXT_BATCH = "nextBatch"; + static final String MESSAGE_IF_CLOSED_AS_CURSOR = "Cursor has been closed"; + static final String MESSAGE_IF_CLOSED_AS_ITERATOR = "Iterator has been closed"; + + static final String MESSAGE_IF_CONCURRENT_OPERATION = "Another operation is currently in progress, concurrent operations are not " + + "supported"; + + static BsonDocument getMoreCommandDocument( + final long cursorId, final ConnectionDescription connectionDescription, final MongoNamespace namespace, final int batchSize, + @Nullable final BsonValue comment) { + BsonDocument document = new BsonDocument("getMore", new BsonInt64(cursorId)) + .append("collection", new BsonString(namespace.getCollectionName())); + + if (batchSize != 0) { + document.append("batchSize", new BsonInt32(batchSize)); + } + if (serverIsAtLeastVersionFourDotFour(connectionDescription)) { + putIfNotNull(document, "comment", comment); + } + return document; + } + + static CommandCursorResult logCommandCursorResult(final CommandCursorResult commandCursorResult) { + if (LOGGER.isDebugEnabled()) { + LOGGER.debug(format("Received batch of %d documents with cursorId %d from server %s", commandCursorResult.getResults().size(), + commandCursorResult.getCursorId(), commandCursorResult.getServerAddress())); + } + return commandCursorResult; + } + + static BsonDocument getKillCursorsCommand(final MongoNamespace namespace, final ServerCursor serverCursor) { + return new BsonDocument("killCursors", new BsonString(namespace.getCollectionName())) + .append("cursors", new BsonArray(singletonList(new BsonInt64(serverCursor.getId())))); + } + + + static MongoQueryException translateCommandException(final MongoCommandException commandException, final ServerCursor cursor) { + if (commandException.getErrorCode() == 43) { + return new MongoCursorNotFoundException(cursor.getId(), commandException.getResponse(), cursor.getAddress()); + } else { + return new MongoQueryException(commandException.getResponse(), commandException.getServerAddress()); + } + } + + private CommandBatchCursorHelper() { + } +} diff --git a/driver-core/src/main/com/mongodb/internal/operation/CommandCursorResult.java b/driver-core/src/main/com/mongodb/internal/operation/CommandCursorResult.java new file mode 100644 index 00000000000..813d8c145cd --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/operation/CommandCursorResult.java @@ -0,0 +1,139 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.operation; + +import com.mongodb.MongoNamespace; +import com.mongodb.ServerAddress; +import com.mongodb.ServerCursor; +import com.mongodb.lang.Nullable; +import org.bson.BsonDocument; +import org.bson.BsonTimestamp; + +import java.util.Collections; +import java.util.List; + +import static com.mongodb.assertions.Assertions.isTrue; + +/** + * The command cursor result + * + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public class CommandCursorResult { + + private static final String CURSOR = "cursor"; + private static final String POST_BATCH_RESUME_TOKEN = "postBatchResumeToken"; + private static final String OPERATION_TIME = "operationTime"; + private final ServerAddress serverAddress; + private final List results; + private final MongoNamespace namespace; + private final long cursorId; + @Nullable + private final BsonTimestamp operationTime; + @Nullable + private final BsonDocument postBatchResumeToken; + + public CommandCursorResult( + final ServerAddress serverAddress, + final String fieldNameContainingBatch, + final BsonDocument commandCursorDocument) { + isTrue("Contains cursor", commandCursorDocument.isDocument(CURSOR)); + this.serverAddress = serverAddress; + BsonDocument cursorDocument = commandCursorDocument.getDocument(CURSOR); + this.results = BsonDocumentWrapperHelper.toList(cursorDocument, fieldNameContainingBatch); + this.namespace = new MongoNamespace(cursorDocument.getString("ns").getValue()); + this.cursorId = cursorDocument.getNumber("id").longValue(); + this.operationTime = cursorDocument.getTimestamp(OPERATION_TIME, null); + this.postBatchResumeToken = cursorDocument.getDocument(POST_BATCH_RESUME_TOKEN, null); + } + + private CommandCursorResult( + final ServerAddress serverAddress, + final List results, + final MongoNamespace namespace, + final long cursorId, + @Nullable final BsonTimestamp operationTime, + @Nullable final BsonDocument postBatchResumeToken) { + this.serverAddress = serverAddress; + this.results = results; + this.namespace = namespace; + this.cursorId = cursorId; + this.operationTime = operationTime; + this.postBatchResumeToken = postBatchResumeToken; + } + + public static CommandCursorResult withEmptyResults(final CommandCursorResult commandCursorResult) { + return new CommandCursorResult<>( + commandCursorResult.getServerAddress(), + Collections.emptyList(), + commandCursorResult.getNamespace(), + commandCursorResult.getCursorId(), + commandCursorResult.getOperationTime(), + commandCursorResult.getPostBatchResumeToken()); + } + + /** + * Gets the namespace. + * + * @return the namespace + */ + public MongoNamespace getNamespace() { + return namespace; + } + + /** + * Gets the cursor. + * + * @return the cursor, which may be null if it's been exhausted + */ + @Nullable + public ServerCursor getServerCursor() { + return cursorId == 0 ? null : new ServerCursor(cursorId, serverAddress); + } + + /** + * Gets the results. + * + * @return the results + */ + public List getResults() { + return results; + } + + /** + * Gets the server address. + * + * @return the server address + */ + public ServerAddress getServerAddress() { + return serverAddress; + } + + public long getCursorId() { + return cursorId; + } + + @Nullable + public BsonDocument getPostBatchResumeToken() { + return postBatchResumeToken; + } + + @Nullable + public BsonTimestamp getOperationTime() { + return operationTime; + } +} diff --git a/driver-core/src/main/com/mongodb/internal/operation/CommandOperationHelper.java b/driver-core/src/main/com/mongodb/internal/operation/CommandOperationHelper.java new file mode 100644 index 00000000000..db6870f52e8 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/operation/CommandOperationHelper.java @@ -0,0 +1,282 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.operation; + +import com.mongodb.MongoClientException; +import com.mongodb.MongoCommandException; +import com.mongodb.MongoConnectionPoolClearedException; +import com.mongodb.MongoException; +import com.mongodb.MongoNodeIsRecoveringException; +import com.mongodb.MongoNotPrimaryException; +import com.mongodb.MongoSecurityException; +import com.mongodb.MongoServerException; +import com.mongodb.MongoSocketException; +import com.mongodb.WriteConcern; +import com.mongodb.assertions.Assertions; +import com.mongodb.connection.ConnectionDescription; +import com.mongodb.connection.ServerDescription; +import com.mongodb.internal.TimeoutContext; +import com.mongodb.internal.async.function.RetryState; +import com.mongodb.internal.connection.OperationContext; +import com.mongodb.internal.operation.OperationHelper.ResourceSupplierInternalException; +import com.mongodb.internal.operation.retry.AttachmentKeys; +import com.mongodb.internal.session.SessionContext; +import com.mongodb.lang.Nullable; +import org.bson.BsonDocument; + +import java.util.List; +import java.util.Optional; +import java.util.function.BinaryOperator; +import java.util.function.Supplier; + +import static com.mongodb.assertions.Assertions.assertFalse; +import static com.mongodb.assertions.Assertions.assertNotNull; +import static com.mongodb.internal.operation.OperationHelper.LOGGER; +import static java.lang.String.format; +import static java.util.Arrays.asList; + +@SuppressWarnings("overloads") +final class CommandOperationHelper { + static WriteConcern validateAndGetEffectiveWriteConcern(final WriteConcern writeConcernSetting, final SessionContext sessionContext) + throws MongoClientException { + boolean activeTransaction = sessionContext.hasActiveTransaction(); + WriteConcern effectiveWriteConcern = activeTransaction + ? WriteConcern.ACKNOWLEDGED + : writeConcernSetting; + if (sessionContext.hasSession() && !sessionContext.isImplicitSession() && !activeTransaction && !effectiveWriteConcern.isAcknowledged()) { + throw new MongoClientException("Unacknowledged writes are not supported when using an explicit session"); + } + return effectiveWriteConcern; + } + + static Optional commandWriteConcern(final WriteConcern effectiveWriteConcern, final SessionContext sessionContext) { + return effectiveWriteConcern.isServerDefault() || sessionContext.hasActiveTransaction() + ? Optional.empty() + : Optional.of(effectiveWriteConcern); + } + + interface CommandCreator { + BsonDocument create( + OperationContext operationContext, + ServerDescription serverDescription, + ConnectionDescription connectionDescription); + } + + static BinaryOperator onRetryableReadAttemptFailure(final OperationContext operationContext) { + return (@Nullable Throwable previouslyChosenException, Throwable mostRecentAttemptException) -> { + operationContext.getServerDeprioritization().onAttemptFailure(mostRecentAttemptException); + return chooseRetryableReadException(previouslyChosenException, mostRecentAttemptException); + }; + } + + private static Throwable chooseRetryableReadException( + @Nullable final Throwable previouslyChosenException, final Throwable mostRecentAttemptException) { + assertFalse(mostRecentAttemptException instanceof ResourceSupplierInternalException); + if (previouslyChosenException == null + || mostRecentAttemptException instanceof MongoSocketException + || mostRecentAttemptException instanceof MongoServerException) { + return mostRecentAttemptException; + } else { + return previouslyChosenException; + } + } + + static BinaryOperator onRetryableWriteAttemptFailure(final OperationContext operationContext) { + return (@Nullable Throwable previouslyChosenException, Throwable mostRecentAttemptException) -> { + operationContext.getServerDeprioritization().onAttemptFailure(mostRecentAttemptException); + return chooseRetryableWriteException(previouslyChosenException, mostRecentAttemptException); + }; + } + + private static Throwable chooseRetryableWriteException( + @Nullable final Throwable previouslyChosenException, final Throwable mostRecentAttemptException) { + if (previouslyChosenException == null) { + if (mostRecentAttemptException instanceof ResourceSupplierInternalException) { + return mostRecentAttemptException.getCause(); + } + return mostRecentAttemptException; + } else if (mostRecentAttemptException instanceof ResourceSupplierInternalException + || (mostRecentAttemptException instanceof MongoException + && ((MongoException) mostRecentAttemptException).hasErrorLabel(NO_WRITES_PERFORMED_ERROR_LABEL))) { + return previouslyChosenException; + } else { + return mostRecentAttemptException; + } + } + + /* Read Binding Helpers */ + + static RetryState initialRetryState(final boolean retry, final TimeoutContext timeoutContext) { + if (retry) { + return RetryState.withRetryableState(RetryState.RETRIES, timeoutContext); + } + return RetryState.withNonRetryableState(); + } + + private static final List RETRYABLE_ERROR_CODES = asList(6, 7, 89, 91, 134, 189, 262, 9001, 13436, 13435, 11602, 11600, 10107); + static boolean isRetryableException(final Throwable t) { + if (!(t instanceof MongoException)) { + return false; + } + + if (t instanceof MongoSocketException || t instanceof MongoNotPrimaryException || t instanceof MongoNodeIsRecoveringException + || t instanceof MongoConnectionPoolClearedException) { + return true; + } + return RETRYABLE_ERROR_CODES.contains(((MongoException) t).getCode()); + } + + /* Misc operation helpers */ + + static void rethrowIfNotNamespaceError(final MongoCommandException e) { + rethrowIfNotNamespaceError(e, null); + } + + @Nullable + static T rethrowIfNotNamespaceError(final MongoCommandException e, @Nullable final T defaultValue) { + if (!isNamespaceError(e)) { + throw e; + } + return defaultValue; + } + + static boolean isNamespaceError(final Throwable t) { + if (t instanceof MongoCommandException) { + MongoCommandException e = (MongoCommandException) t; + return (e.getErrorMessage().contains("ns not found") || e.getErrorCode() == 26); + } else { + return false; + } + } + + static boolean shouldAttemptToRetryRead(final RetryState retryState, final Throwable attemptFailure) { + assertFalse(attemptFailure instanceof ResourceSupplierInternalException); + boolean decision = isRetryableException(attemptFailure) + || (attemptFailure instanceof MongoSecurityException + && attemptFailure.getCause() != null && isRetryableException(attemptFailure.getCause())); + if (!decision) { + logUnableToRetry(retryState.attachment(AttachmentKeys.commandDescriptionSupplier()).orElse(null), attemptFailure); + } + return decision; + } + + static boolean loggingShouldAttemptToRetryWriteAndAddRetryableLabel(final RetryState retryState, final Throwable attemptFailure) { + Throwable attemptFailureNotToBeRetried = getAttemptFailureNotToRetryOrAddRetryableLabel(retryState, attemptFailure); + boolean decision = attemptFailureNotToBeRetried == null; + if (!decision && retryState.attachment(AttachmentKeys.retryableCommandFlag()).orElse(false)) { + logUnableToRetry( + retryState.attachment(AttachmentKeys.commandDescriptionSupplier()).orElse(null), + assertNotNull(attemptFailureNotToBeRetried)); + } + return decision; + } + + static boolean shouldAttemptToRetryWriteAndAddRetryableLabel(final RetryState retryState, final Throwable attemptFailure) { + return getAttemptFailureNotToRetryOrAddRetryableLabel(retryState, attemptFailure) != null; + } + + /** + * @return {@code null} if the decision is {@code true}. Otherwise, returns the {@link Throwable} that must not be retried. + */ + @Nullable + private static Throwable getAttemptFailureNotToRetryOrAddRetryableLabel(final RetryState retryState, final Throwable attemptFailure) { + Throwable failure = attemptFailure instanceof ResourceSupplierInternalException ? attemptFailure.getCause() : attemptFailure; + boolean decision = false; + MongoException exceptionRetryableRegardlessOfCommand = null; + if (failure instanceof MongoConnectionPoolClearedException + || (failure instanceof MongoSecurityException && failure.getCause() != null && isRetryableException(failure.getCause()))) { + decision = true; + exceptionRetryableRegardlessOfCommand = (MongoException) failure; + } + if (retryState.attachment(AttachmentKeys.retryableCommandFlag()).orElse(false)) { + if (exceptionRetryableRegardlessOfCommand != null) { + /* We are going to retry even if `retryableCommand` is false, + * but we add the retryable label only if `retryableCommand` is true. */ + exceptionRetryableRegardlessOfCommand.addLabel(RETRYABLE_WRITE_ERROR_LABEL); + } else if (decideRetryableAndAddRetryableWriteErrorLabel(failure, retryState.attachment(AttachmentKeys.maxWireVersion()) + .orElse(null))) { + decision = true; + } + } + return decision ? null : assertNotNull(failure); + } + + static boolean isRetryWritesEnabled(@Nullable final BsonDocument command) { + return (command != null && (command.containsKey("txnNumber") + || command.getFirstKey().equals("commitTransaction") || command.getFirstKey().equals("abortTransaction"))); + } + + static final String RETRYABLE_WRITE_ERROR_LABEL = "RetryableWriteError"; + private static final String NO_WRITES_PERFORMED_ERROR_LABEL = "NoWritesPerformed"; + + private static boolean decideRetryableAndAddRetryableWriteErrorLabel(final Throwable t, @Nullable final Integer maxWireVersion) { + if (!(t instanceof MongoException)) { + return false; + } + MongoException exception = (MongoException) t; + if (maxWireVersion != null) { + addRetryableWriteErrorLabel(exception, maxWireVersion); + } + return exception.hasErrorLabel(RETRYABLE_WRITE_ERROR_LABEL); + } + + static void addRetryableWriteErrorLabel(final MongoException exception, final int maxWireVersion) { + if (maxWireVersion >= 9 && exception instanceof MongoSocketException) { + exception.addLabel(RETRYABLE_WRITE_ERROR_LABEL); + } else if (maxWireVersion < 9 && isRetryableException(exception)) { + exception.addLabel(RETRYABLE_WRITE_ERROR_LABEL); + } + } + + static void logRetryExecute(final RetryState retryState, final OperationContext operationContext) { + if (LOGGER.isDebugEnabled() && !retryState.isFirstAttempt()) { + String commandDescription = retryState.attachment(AttachmentKeys.commandDescriptionSupplier()).map(Supplier::get).orElse(null); + Throwable exception = retryState.exception().orElseThrow(Assertions::fail); + int oneBasedAttempt = retryState.attempt() + 1; + long operationId = operationContext.getId(); + LOGGER.debug(commandDescription == null + ? format("Retrying the operation with operation ID %s due to the error \"%s\". Attempt number: #%d", + operationId, exception, oneBasedAttempt) + : format("Retrying the operation '%s' with operation ID %s due to the error \"%s\". Attempt number: #%d", + commandDescription, operationId, exception, oneBasedAttempt)); + } + } + + private static void logUnableToRetry(@Nullable final Supplier commandDescriptionSupplier, final Throwable originalError) { + if (LOGGER.isDebugEnabled()) { + String commandDescription = commandDescriptionSupplier == null ? null : commandDescriptionSupplier.get(); + LOGGER.debug(commandDescription == null + ? format("Unable to retry an operation due to the error \"%s\"", originalError) + : format("Unable to retry the operation %s due to the error \"%s\"", commandDescription, originalError)); + } + } + + static MongoException transformWriteException(final MongoException exception) { + if (exception.getCode() == 20 && exception.getMessage().contains("Transaction numbers")) { + MongoException clientException = new MongoClientException("This MongoDB deployment does not support retryable writes. " + + "Please add retryWrites=false to your connection string.", exception); + for (final String errorLabel : exception.getErrorLabels()) { + clientException.addLabel(errorLabel); + } + return clientException; + } + return exception; + } + + private CommandOperationHelper() { + } +} diff --git a/driver-core/src/main/com/mongodb/internal/operation/CommandReadOperation.java b/driver-core/src/main/com/mongodb/internal/operation/CommandReadOperation.java new file mode 100644 index 00000000000..6965bfc34a3 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/operation/CommandReadOperation.java @@ -0,0 +1,69 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.operation; + +import com.mongodb.internal.async.SingleResultCallback; +import com.mongodb.internal.binding.AsyncReadBinding; +import com.mongodb.internal.binding.ReadBinding; +import org.bson.BsonDocument; +import org.bson.codecs.Decoder; + +import static com.mongodb.assertions.Assertions.notNull; +import static com.mongodb.internal.operation.AsyncOperationHelper.executeRetryableReadAsync; +import static com.mongodb.internal.operation.CommandOperationHelper.CommandCreator; +import static com.mongodb.internal.operation.SyncOperationHelper.executeRetryableRead; + +/** + * An operation that executes an arbitrary command that reads from the server. + * + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public class CommandReadOperation implements ReadOperationSimple { + private final String commandName; + private final String databaseName; + private final CommandCreator commandCreator; + private final Decoder decoder; + + public CommandReadOperation(final String databaseName, final BsonDocument command, final Decoder decoder) { + this(databaseName, command.getFirstKey(), (operationContext, serverDescription, connectionDescription) -> command, decoder); + } + + public CommandReadOperation(final String databaseName, final String commandName, final CommandCreator commandCreator, + final Decoder decoder) { + this.commandName = notNull("commandName", commandName); + this.databaseName = notNull("databaseName", databaseName); + this.commandCreator = notNull("commandCreator", commandCreator); + this.decoder = notNull("decoder", decoder); + } + + @Override + public String getCommandName() { + return commandName; + } + + @Override + public T execute(final ReadBinding binding) { + return executeRetryableRead(binding, databaseName, commandCreator, decoder, + (result, source, connection) -> result, false); + } + + @Override + public void executeAsync(final AsyncReadBinding binding, final SingleResultCallback callback) { + executeRetryableReadAsync(binding, databaseName, commandCreator, decoder, + (result, source, connection) -> result, false, callback); + } +} diff --git a/driver-core/src/main/com/mongodb/internal/operation/CommandResultArrayCodec.java b/driver-core/src/main/com/mongodb/internal/operation/CommandResultArrayCodec.java new file mode 100644 index 00000000000..365bf213f02 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/operation/CommandResultArrayCodec.java @@ -0,0 +1,68 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.operation; + +import org.bson.BsonArray; +import org.bson.BsonDocumentWrapper; +import org.bson.BsonReader; +import org.bson.BsonType; +import org.bson.BsonValue; +import org.bson.codecs.BsonArrayCodec; +import org.bson.codecs.Decoder; +import org.bson.codecs.DecoderContext; +import org.bson.codecs.configuration.CodecRegistry; + +import java.util.ArrayList; +import java.util.List; + +import static org.bson.BsonType.DOCUMENT; + +class CommandResultArrayCodec extends BsonArrayCodec { + private final Decoder decoder; + + CommandResultArrayCodec(final CodecRegistry registry, final Decoder decoder) { + super(registry); + this.decoder = decoder; + } + + @Override + public BsonArray decode(final BsonReader reader, final DecoderContext decoderContext) { + reader.readStartArray(); + + List list = new ArrayList<>(); + while (reader.readBsonType() != BsonType.END_OF_DOCUMENT) { + if (reader.getCurrentBsonType() == BsonType.NULL) { + reader.readNull(); + list.add(null); + } else { + list.add(decoder.decode(reader, decoderContext)); + } + } + reader.readEndArray(); + + return new BsonArrayWrapper<>(list); + } + + @Override + protected BsonValue readValue(final BsonReader reader, final DecoderContext decoderContext) { + if (reader.getCurrentBsonType() == DOCUMENT) { + return new BsonDocumentWrapper<>(decoder.decode(reader, decoderContext), null); + } else { + return super.readValue(reader, decoderContext); + } + } +} diff --git a/driver-core/src/main/com/mongodb/internal/operation/CommandResultCodecProvider.java b/driver-core/src/main/com/mongodb/internal/operation/CommandResultCodecProvider.java new file mode 100644 index 00000000000..32ca8c68310 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/operation/CommandResultCodecProvider.java @@ -0,0 +1,134 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.operation; + +import org.bson.BsonArray; +import org.bson.BsonDocument; +import org.bson.BsonValue; +import org.bson.codecs.BsonArrayCodec; +import org.bson.codecs.BsonBinaryCodec; +import org.bson.codecs.BsonBooleanCodec; +import org.bson.codecs.BsonDBPointerCodec; +import org.bson.codecs.BsonDateTimeCodec; +import org.bson.codecs.BsonDecimal128Codec; +import org.bson.codecs.BsonDocumentCodec; +import org.bson.codecs.BsonDoubleCodec; +import org.bson.codecs.BsonInt32Codec; +import org.bson.codecs.BsonInt64Codec; +import org.bson.codecs.BsonJavaScriptCodec; +import org.bson.codecs.BsonJavaScriptWithScopeCodec; +import org.bson.codecs.BsonMaxKeyCodec; +import org.bson.codecs.BsonMinKeyCodec; +import org.bson.codecs.BsonNullCodec; +import org.bson.codecs.BsonObjectIdCodec; +import org.bson.codecs.BsonRegularExpressionCodec; +import org.bson.codecs.BsonStringCodec; +import org.bson.codecs.BsonSymbolCodec; +import org.bson.codecs.BsonTimestampCodec; +import org.bson.codecs.BsonUndefinedCodec; +import org.bson.codecs.Codec; +import org.bson.codecs.Decoder; +import org.bson.codecs.configuration.CodecProvider; +import org.bson.codecs.configuration.CodecRegistry; + +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +class CommandResultCodecProvider

implements CodecProvider { + private final Map, Codec> codecs = new HashMap<>(); + private final Decoder

payloadDecoder; + private final List fieldsContainingPayload; + + CommandResultCodecProvider(final Decoder

payloadDecoder, final List fieldContainingPayload) { + this.payloadDecoder = payloadDecoder; + this.fieldsContainingPayload = fieldContainingPayload; + addCodecs(); + } + + @Override + @SuppressWarnings("unchecked") + public Codec get(final Class clazz, final CodecRegistry registry) { + if (codecs.containsKey(clazz)) { + return (Codec) codecs.get(clazz); + } + + if (clazz == BsonArray.class) { + return (Codec) new BsonArrayCodec(registry); + } + + if (clazz == BsonDocument.class) { + return (Codec) new CommandResultDocumentCodec<>(registry, payloadDecoder, fieldsContainingPayload); + } + + return null; + } + + private void addCodecs() { + addCodec(new BsonNullCodec()); + addCodec(new BsonBinaryCodec()); + addCodec(new BsonBooleanCodec()); + addCodec(new BsonDateTimeCodec()); + addCodec(new BsonDBPointerCodec()); + addCodec(new BsonDoubleCodec()); + addCodec(new BsonInt32Codec()); + addCodec(new BsonInt64Codec()); + addCodec(new BsonDecimal128Codec()); + addCodec(new BsonMinKeyCodec()); + addCodec(new BsonMaxKeyCodec()); + addCodec(new BsonJavaScriptCodec()); + addCodec(new BsonObjectIdCodec()); + addCodec(new BsonRegularExpressionCodec()); + addCodec(new BsonStringCodec()); + addCodec(new BsonSymbolCodec()); + addCodec(new BsonTimestampCodec()); + addCodec(new BsonUndefinedCodec()); + addCodec(new BsonJavaScriptWithScopeCodec(new BsonDocumentCodec())); + } + + private void addCodec(final Codec codec) { + codecs.put(codec.getEncoderClass(), codec); + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + CommandResultCodecProvider that = (CommandResultCodecProvider) o; + + if (!fieldsContainingPayload.equals(that.fieldsContainingPayload)) { + return false; + } + if (!payloadDecoder.getClass().equals(that.payloadDecoder.getClass())) { + return false; + } + + return true; + } + + @Override + public int hashCode() { + int result = payloadDecoder.getClass().hashCode(); + result = 31 * result + fieldsContainingPayload.hashCode(); + return result; + } +} diff --git a/driver-core/src/main/com/mongodb/internal/operation/CommandResultDocumentCodec.java b/driver-core/src/main/com/mongodb/internal/operation/CommandResultDocumentCodec.java new file mode 100644 index 00000000000..769e5fafdb1 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/operation/CommandResultDocumentCodec.java @@ -0,0 +1,66 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.operation; + +import org.bson.BsonDocument; +import org.bson.BsonDocumentWrapper; +import org.bson.BsonReader; +import org.bson.BsonType; +import org.bson.BsonValue; +import org.bson.codecs.BsonDocumentCodec; +import org.bson.codecs.Codec; +import org.bson.codecs.Decoder; +import org.bson.codecs.DecoderContext; +import org.bson.codecs.configuration.CodecRegistry; + +import java.util.Collections; +import java.util.List; + +import static org.bson.codecs.configuration.CodecRegistries.fromProviders; + +class CommandResultDocumentCodec extends BsonDocumentCodec { + private final Decoder payloadDecoder; + private final List fieldsContainingPayload; + + CommandResultDocumentCodec(final CodecRegistry registry, final Decoder payloadDecoder, final List fieldsContainingPayload) { + super(registry); + this.payloadDecoder = payloadDecoder; + this.fieldsContainingPayload = fieldsContainingPayload; + } + + static

Codec create(final Decoder

decoder, final String fieldContainingPayload) { + return create(decoder, Collections.singletonList(fieldContainingPayload)); + } + + static

Codec create(final Decoder

decoder, final List fieldsContainingPayload) { + CodecRegistry registry = fromProviders(new CommandResultCodecProvider<>(decoder, fieldsContainingPayload)); + return registry.get(BsonDocument.class); + } + + @Override + protected BsonValue readValue(final BsonReader reader, final DecoderContext decoderContext) { + if (fieldsContainingPayload.contains(reader.getCurrentName())) { + if (reader.getCurrentBsonType() == BsonType.DOCUMENT) { + return new BsonDocumentWrapper<>(payloadDecoder.decode(reader, decoderContext), null); + } else if (reader.getCurrentBsonType() == BsonType.ARRAY) { + return new CommandResultArrayCodec<>(getCodecRegistry(), payloadDecoder).decode(reader, decoderContext); + } + } + return super.readValue(reader, decoderContext); + } +} + diff --git a/driver-core/src/main/com/mongodb/internal/operation/CommitTransactionOperation.java b/driver-core/src/main/com/mongodb/internal/operation/CommitTransactionOperation.java new file mode 100644 index 00000000000..998a002f348 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/operation/CommitTransactionOperation.java @@ -0,0 +1,153 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.operation; + +import com.mongodb.Function; +import com.mongodb.MongoException; +import com.mongodb.MongoExecutionTimeoutException; +import com.mongodb.MongoNodeIsRecoveringException; +import com.mongodb.MongoNotPrimaryException; +import com.mongodb.MongoSocketException; +import com.mongodb.MongoTimeoutException; +import com.mongodb.MongoWriteConcernException; +import com.mongodb.WriteConcern; +import com.mongodb.internal.TimeoutContext; +import com.mongodb.internal.async.SingleResultCallback; +import com.mongodb.internal.binding.AsyncWriteBinding; +import com.mongodb.internal.binding.WriteBinding; +import com.mongodb.lang.Nullable; +import org.bson.BsonDocument; + +import java.util.List; + +import static com.mongodb.MongoException.UNKNOWN_TRANSACTION_COMMIT_RESULT_LABEL; +import static com.mongodb.internal.operation.CommandOperationHelper.CommandCreator; +import static com.mongodb.internal.operation.CommandOperationHelper.RETRYABLE_WRITE_ERROR_LABEL; +import static java.util.Arrays.asList; +import static java.util.concurrent.TimeUnit.MILLISECONDS; + +/** + * An operation that commits a transaction. + * + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public class CommitTransactionOperation extends TransactionOperation { + private static final String COMMAND_NAME = "commitTransaction"; + private final boolean alreadyCommitted; + private BsonDocument recoveryToken; + + public CommitTransactionOperation(final WriteConcern writeConcern) { + this(writeConcern, false); + } + + public CommitTransactionOperation(final WriteConcern writeConcern, final boolean alreadyCommitted) { + super(writeConcern); + this.alreadyCommitted = alreadyCommitted; + } + + public CommitTransactionOperation recoveryToken(@Nullable final BsonDocument recoveryToken) { + this.recoveryToken = recoveryToken; + return this; + } + + @Override + public Void execute(final WriteBinding binding) { + try { + return super.execute(binding); + } catch (MongoException e) { + addErrorLabels(e); + throw e; + } + } + + @Override + public void executeAsync(final AsyncWriteBinding binding, final SingleResultCallback callback) { + super.executeAsync(binding, (result, t) -> { + if (t instanceof MongoException) { + addErrorLabels((MongoException) t); + } + callback.onResult(result, t); + }); + } + + private void addErrorLabels(final MongoException e) { + if (shouldAddUnknownTransactionCommitResultLabel(e)) { + e.addLabel(UNKNOWN_TRANSACTION_COMMIT_RESULT_LABEL); + } + } + + private static final List NON_RETRYABLE_WRITE_CONCERN_ERROR_CODES = asList(79, 100); + + private static boolean shouldAddUnknownTransactionCommitResultLabel(final MongoException e) { + + if (e instanceof MongoSocketException || e instanceof MongoTimeoutException + || e instanceof MongoNotPrimaryException || e instanceof MongoNodeIsRecoveringException + || e instanceof MongoExecutionTimeoutException) { + return true; + } + + if (e.hasErrorLabel(RETRYABLE_WRITE_ERROR_LABEL)) { + return true; + } + + if (e instanceof MongoWriteConcernException) { + return !NON_RETRYABLE_WRITE_CONCERN_ERROR_CODES.contains(e.getCode()); + } + + return false; + } + + @Override + public String getCommandName() { + return COMMAND_NAME; + } + + @Override + CommandCreator getCommandCreator() { + CommandCreator creator = (operationContext, serverDescription, connectionDescription) -> { + BsonDocument command = CommitTransactionOperation.super.getCommandCreator() + .create(operationContext, serverDescription, connectionDescription); + operationContext.getTimeoutContext().setMaxTimeOverrideToMaxCommitTime(); + return command; + }; + if (alreadyCommitted) { + return (operationContext, serverDescription, connectionDescription) -> + getRetryCommandModifier(operationContext.getTimeoutContext()) + .apply(creator.create(operationContext, serverDescription, connectionDescription)); + } else if (recoveryToken != null) { + return (operationContext, serverDescription, connectionDescription) -> + creator.create(operationContext, serverDescription, connectionDescription) + .append("recoveryToken", recoveryToken); + } + return creator; + } + + @Override + protected Function getRetryCommandModifier(final TimeoutContext timeoutContext) { + return command -> { + WriteConcern retryWriteConcern = getWriteConcern().withW("majority"); + if (retryWriteConcern.getWTimeout(MILLISECONDS) == null && !timeoutContext.hasTimeoutMS()) { + retryWriteConcern = retryWriteConcern.withWTimeout(10000, MILLISECONDS); + } + command.put("writeConcern", retryWriteConcern.asDocument()); + if (recoveryToken != null) { + command.put("recoveryToken", recoveryToken); + } + return command; + }; + } +} diff --git a/driver-core/src/main/com/mongodb/internal/operation/CountDocumentsOperation.java b/driver-core/src/main/com/mongodb/internal/operation/CountDocumentsOperation.java new file mode 100644 index 00000000000..9460026062a --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/operation/CountDocumentsOperation.java @@ -0,0 +1,181 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.operation; + +import com.mongodb.MongoNamespace; +import com.mongodb.client.model.Collation; +import com.mongodb.internal.async.SingleResultCallback; +import com.mongodb.internal.binding.AsyncReadBinding; +import com.mongodb.internal.binding.ReadBinding; +import com.mongodb.lang.Nullable; +import org.bson.BsonDocument; +import org.bson.BsonInt32; +import org.bson.BsonInt64; +import org.bson.BsonValue; +import org.bson.codecs.BsonDocumentCodec; +import org.bson.codecs.Decoder; + +import java.util.ArrayList; +import java.util.List; + +import static com.mongodb.assertions.Assertions.notNull; + +/** + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public class CountDocumentsOperation implements ReadOperationSimple { + private static final String COMMAND_NAME = "aggregate"; + private static final Decoder DECODER = new BsonDocumentCodec(); + private final MongoNamespace namespace; + private boolean retryReads; + private BsonDocument filter; + private BsonValue hint; + private BsonValue comment; + private long skip; + private long limit; + private Collation collation; + + public CountDocumentsOperation(final MongoNamespace namespace) { + this.namespace = notNull("namespace", namespace); + } + + @Nullable + public BsonDocument getFilter() { + return filter; + } + + public CountDocumentsOperation filter(@Nullable final BsonDocument filter) { + this.filter = filter; + return this; + } + + public CountDocumentsOperation retryReads(final boolean retryReads) { + this.retryReads = retryReads; + return this; + } + + public boolean getRetryReads() { + return retryReads; + } + + @Nullable + public BsonValue getHint() { + return hint; + } + + public CountDocumentsOperation hint(@Nullable final BsonValue hint) { + this.hint = hint; + return this; + } + + public long getLimit() { + return limit; + } + + public CountDocumentsOperation limit(final long limit) { + this.limit = limit; + return this; + } + + public long getSkip() { + return skip; + } + + public CountDocumentsOperation skip(final long skip) { + this.skip = skip; + return this; + } + + @Nullable + public Collation getCollation() { + return collation; + } + + public CountDocumentsOperation collation(@Nullable final Collation collation) { + this.collation = collation; + return this; + } + + @Nullable + public BsonValue getComment() { + return comment; + } + + public CountDocumentsOperation comment(@Nullable final BsonValue comment) { + this.comment = comment; + return this; + } + + @Override + public String getCommandName() { + return COMMAND_NAME; + } + + @Override + public Long execute(final ReadBinding binding) { + try (BatchCursor cursor = getAggregateOperation().execute(binding)) { + return cursor.hasNext() ? getCountFromAggregateResults(cursor.next()) : 0; + } + } + + @Override + public void executeAsync(final AsyncReadBinding binding, final SingleResultCallback callback) { + getAggregateOperation().executeAsync(binding, (result, t) -> { + if (t != null) { + callback.onResult(null, t); + } else { + result.next((result1, t1) -> { + if (t1 != null) { + callback.onResult(null, t1); + } else { + callback.onResult(getCountFromAggregateResults(result1), null); + } + }); + } + }); + } + + private AggregateOperation getAggregateOperation() { + return new AggregateOperation<>(namespace, getPipeline(), DECODER) + .retryReads(retryReads) + .collation(collation) + .comment(comment) + .hint(hint); + } + + private List getPipeline() { + ArrayList pipeline = new ArrayList<>(); + pipeline.add(new BsonDocument("$match", filter != null ? filter : new BsonDocument())); + if (skip > 0) { + pipeline.add(new BsonDocument("$skip", new BsonInt64(skip))); + } + if (limit > 0) { + pipeline.add(new BsonDocument("$limit", new BsonInt64(limit))); + } + pipeline.add(new BsonDocument("$group", new BsonDocument("_id", new BsonInt32(1)) + .append("n", new BsonDocument("$sum", new BsonInt32(1))))); + return pipeline; + } + + private Long getCountFromAggregateResults(@Nullable final List results) { + if (results == null || results.isEmpty()) { + return 0L; + } else { + return results.get(0).getNumber("n").longValue(); + } + } +} diff --git a/driver-core/src/main/com/mongodb/internal/operation/CountOperation.java b/driver-core/src/main/com/mongodb/internal/operation/CountOperation.java new file mode 100644 index 00000000000..6d0b7b78f93 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/operation/CountOperation.java @@ -0,0 +1,155 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.operation; + +import com.mongodb.MongoNamespace; +import com.mongodb.client.model.Collation; +import com.mongodb.internal.async.SingleResultCallback; +import com.mongodb.internal.binding.AsyncReadBinding; +import com.mongodb.internal.binding.ReadBinding; +import com.mongodb.lang.Nullable; +import org.bson.BsonDocument; +import org.bson.BsonString; +import org.bson.BsonValue; +import org.bson.codecs.BsonDocumentCodec; +import org.bson.codecs.Decoder; + +import static com.mongodb.assertions.Assertions.notNull; +import static com.mongodb.internal.operation.AsyncOperationHelper.CommandReadTransformerAsync; +import static com.mongodb.internal.operation.AsyncOperationHelper.executeRetryableReadAsync; +import static com.mongodb.internal.operation.CommandOperationHelper.CommandCreator; +import static com.mongodb.internal.operation.DocumentHelper.putIfNotNull; +import static com.mongodb.internal.operation.DocumentHelper.putIfNotZero; +import static com.mongodb.internal.operation.OperationReadConcernHelper.appendReadConcernToCommand; +import static com.mongodb.internal.operation.SyncOperationHelper.CommandReadTransformer; +import static com.mongodb.internal.operation.SyncOperationHelper.executeRetryableRead; + +/** + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public class CountOperation implements ReadOperationSimple { + private static final String COMMAND_NAME = "count"; + private static final Decoder DECODER = new BsonDocumentCodec(); + private final MongoNamespace namespace; + private boolean retryReads; + private BsonDocument filter; + private BsonValue hint; + private long skip; + private long limit; + private Collation collation; + + public CountOperation(final MongoNamespace namespace) { + this.namespace = notNull("namespace", namespace); + } + + public BsonDocument getFilter() { + return filter; + } + + public CountOperation filter(final BsonDocument filter) { + this.filter = filter; + return this; + } + + public CountOperation retryReads(final boolean retryReads) { + this.retryReads = retryReads; + return this; + } + + public boolean getRetryReads() { + return retryReads; + } + + public BsonValue getHint() { + return hint; + } + + public CountOperation hint(final BsonValue hint) { + this.hint = hint; + return this; + } + + public long getLimit() { + return limit; + } + + public CountOperation limit(final long limit) { + this.limit = limit; + return this; + } + + public long getSkip() { + return skip; + } + + public CountOperation skip(final long skip) { + this.skip = skip; + return this; + } + + public Collation getCollation() { + return collation; + } + + public CountOperation collation(@Nullable final Collation collation) { + this.collation = collation; + return this; + } + + @Override + public String getCommandName() { + return COMMAND_NAME; + } + + @Override + public Long execute(final ReadBinding binding) { + return executeRetryableRead(binding, namespace.getDatabaseName(), + getCommandCreator(), DECODER, transformer(), retryReads); + } + + @Override + public void executeAsync(final AsyncReadBinding binding, final SingleResultCallback callback) { + executeRetryableReadAsync(binding, namespace.getDatabaseName(), + getCommandCreator(), DECODER, asyncTransformer(), retryReads, callback); + } + + private CommandReadTransformer transformer() { + return (result, source, connection) -> (result.getNumber("n")).longValue(); + } + + private CommandReadTransformerAsync asyncTransformer() { + return (result, source, connection) -> (result.getNumber("n")).longValue(); + } + + private CommandCreator getCommandCreator() { + return (operationContext, serverDescription, connectionDescription) -> { + BsonDocument document = new BsonDocument(getCommandName(), new BsonString(namespace.getCollectionName())); + + appendReadConcernToCommand(operationContext.getSessionContext(), connectionDescription.getMaxWireVersion(), document); + + putIfNotNull(document, "query", filter); + putIfNotZero(document, "limit", limit); + putIfNotZero(document, "skip", skip); + putIfNotNull(document, "hint", hint); + + if (collation != null) { + document.put("collation", collation.asDocument()); + } + return document; + }; + } +} diff --git a/driver-core/src/main/com/mongodb/internal/operation/CreateCollectionOperation.java b/driver-core/src/main/com/mongodb/internal/operation/CreateCollectionOperation.java new file mode 100644 index 00000000000..5284076eecb --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/operation/CreateCollectionOperation.java @@ -0,0 +1,434 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.operation; + +import com.mongodb.MongoClientException; +import com.mongodb.MongoException; +import com.mongodb.WriteConcern; +import com.mongodb.client.model.ChangeStreamPreAndPostImagesOptions; +import com.mongodb.client.model.Collation; +import com.mongodb.client.model.TimeSeriesGranularity; +import com.mongodb.client.model.TimeSeriesOptions; +import com.mongodb.client.model.ValidationAction; +import com.mongodb.client.model.ValidationLevel; +import com.mongodb.connection.ConnectionDescription; +import com.mongodb.internal.async.SingleResultCallback; +import com.mongodb.internal.binding.AsyncWriteBinding; +import com.mongodb.internal.binding.WriteBinding; +import com.mongodb.internal.connection.AsyncConnection; +import com.mongodb.lang.Nullable; +import org.bson.BsonArray; +import org.bson.BsonBoolean; +import org.bson.BsonDocument; +import org.bson.BsonInt64; +import org.bson.BsonString; + +import java.util.ArrayDeque; +import java.util.Deque; +import java.util.List; +import java.util.concurrent.TimeUnit; +import java.util.function.Supplier; + +import static com.mongodb.assertions.Assertions.notNull; +import static com.mongodb.internal.async.ErrorHandlingResultCallback.errorHandlingCallback; +import static com.mongodb.internal.operation.AsyncOperationHelper.executeCommandAsync; +import static com.mongodb.internal.operation.AsyncOperationHelper.releasingCallback; +import static com.mongodb.internal.operation.AsyncOperationHelper.withAsyncConnection; +import static com.mongodb.internal.operation.AsyncOperationHelper.writeConcernErrorTransformerAsync; +import static com.mongodb.internal.operation.DocumentHelper.putIfFalse; +import static com.mongodb.internal.operation.DocumentHelper.putIfNotNull; +import static com.mongodb.internal.operation.DocumentHelper.putIfNotZero; +import static com.mongodb.internal.operation.OperationHelper.LOGGER; +import static com.mongodb.internal.operation.ServerVersionHelper.serverIsLessThanVersionSevenDotZero; +import static com.mongodb.internal.operation.SyncOperationHelper.executeCommand; +import static com.mongodb.internal.operation.SyncOperationHelper.withConnection; +import static com.mongodb.internal.operation.SyncOperationHelper.writeConcernErrorTransformer; +import static com.mongodb.internal.operation.WriteConcernHelper.appendWriteConcernToCommand; +import static java.util.Arrays.asList; +import static java.util.Collections.singletonList; + +/** + * An operation to create a collection + * + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public class CreateCollectionOperation implements WriteOperation { + private static final String ENCRYPT_PREFIX = "enxcol_."; + private static final BsonDocument ENCRYPT_CLUSTERED_INDEX = BsonDocument.parse("{key: {_id: 1}, unique: true}"); + private static final BsonArray SAFE_CONTENT_ARRAY = new BsonArray( + singletonList(BsonDocument.parse("{key: {__safeContent__: 1}, name: '__safeContent___1'}"))); + private final String databaseName; + private final String collectionName; + private final WriteConcern writeConcern; + private boolean capped = false; + private long sizeInBytes = 0; + private boolean autoIndex = true; + private long maxDocuments = 0; + private BsonDocument storageEngineOptions; + private BsonDocument indexOptionDefaults; + private BsonDocument validator; + private ValidationLevel validationLevel = null; + private ValidationAction validationAction = null; + private Collation collation = null; + private long expireAfterSeconds; + private TimeSeriesOptions timeSeriesOptions; + private ChangeStreamPreAndPostImagesOptions changeStreamPreAndPostImagesOptions; + private BsonDocument clusteredIndexKey; + private boolean clusteredIndexUnique; + private String clusteredIndexName; + private BsonDocument encryptedFields; + + public CreateCollectionOperation(final String databaseName, final String collectionName, @Nullable final WriteConcern writeConcern) { + this.databaseName = notNull("databaseName", databaseName); + this.collectionName = notNull("collectionName", collectionName); + this.writeConcern = writeConcern; + } + + public String getCollectionName() { + return collectionName; + } + + public WriteConcern getWriteConcern() { + return writeConcern; + } + + public boolean isAutoIndex() { + return autoIndex; + } + + public CreateCollectionOperation autoIndex(final boolean autoIndex) { + this.autoIndex = autoIndex; + return this; + } + + public long getMaxDocuments() { + return maxDocuments; + } + + public CreateCollectionOperation maxDocuments(final long maxDocuments) { + this.maxDocuments = maxDocuments; + return this; + } + + public boolean isCapped() { + return capped; + } + + public CreateCollectionOperation capped(final boolean capped) { + this.capped = capped; + return this; + } + + public long getSizeInBytes() { + return sizeInBytes; + } + + public CreateCollectionOperation sizeInBytes(final long sizeInBytes) { + this.sizeInBytes = sizeInBytes; + return this; + } + + public BsonDocument getStorageEngineOptions() { + return storageEngineOptions; + } + + public CreateCollectionOperation storageEngineOptions(@Nullable final BsonDocument storageEngineOptions) { + this.storageEngineOptions = storageEngineOptions; + return this; + } + + public BsonDocument getIndexOptionDefaults() { + return indexOptionDefaults; + } + + public CreateCollectionOperation indexOptionDefaults(@Nullable final BsonDocument indexOptionDefaults) { + this.indexOptionDefaults = indexOptionDefaults; + return this; + } + + public BsonDocument getValidator() { + return validator; + } + + public CreateCollectionOperation validator(@Nullable final BsonDocument validator) { + this.validator = validator; + return this; + } + + public ValidationLevel getValidationLevel() { + return validationLevel; + } + + public CreateCollectionOperation validationLevel(@Nullable final ValidationLevel validationLevel) { + this.validationLevel = validationLevel; + return this; + } + + public ValidationAction getValidationAction() { + return validationAction; + } + + public CreateCollectionOperation validationAction(@Nullable final ValidationAction validationAction) { + this.validationAction = validationAction; + return this; + } + + public Collation getCollation() { + return collation; + } + + public CreateCollectionOperation collation(@Nullable final Collation collation) { + this.collation = collation; + return this; + } + + public CreateCollectionOperation expireAfter(final long expireAfterSeconds) { + this.expireAfterSeconds = expireAfterSeconds; + return this; + } + + public CreateCollectionOperation timeSeriesOptions(@Nullable final TimeSeriesOptions timeSeriesOptions) { + this.timeSeriesOptions = timeSeriesOptions; + return this; + } + + public CreateCollectionOperation changeStreamPreAndPostImagesOptions( + @Nullable final ChangeStreamPreAndPostImagesOptions changeStreamPreAndPostImagesOptions) { + this.changeStreamPreAndPostImagesOptions = changeStreamPreAndPostImagesOptions; + return this; + } + + public CreateCollectionOperation clusteredIndexKey(@Nullable final BsonDocument clusteredIndexKey) { + this.clusteredIndexKey = clusteredIndexKey; + return this; + } + + public CreateCollectionOperation clusteredIndexUnique(final boolean clusteredIndexUnique) { + this.clusteredIndexUnique = clusteredIndexUnique; + return this; + } + + public CreateCollectionOperation clusteredIndexName(@Nullable final String clusteredIndexName) { + this.clusteredIndexName = clusteredIndexName; + return this; + } + public CreateCollectionOperation encryptedFields(@Nullable final BsonDocument encryptedFields) { + this.encryptedFields = encryptedFields; + return this; + } + + @Override + public String getCommandName() { + return "createCollection"; + } + + @Override + public Void execute(final WriteBinding binding) { + return withConnection(binding, connection -> { + checkEncryptedFieldsSupported(connection.getDescription()); + getCommandFunctions().forEach(commandCreator -> + executeCommand(binding, databaseName, commandCreator.get(), connection, + writeConcernErrorTransformer(binding.getOperationContext().getTimeoutContext())) + ); + return null; + }); + } + + @Override + public void executeAsync(final AsyncWriteBinding binding, final SingleResultCallback callback) { + withAsyncConnection(binding, (connection, t) -> { + SingleResultCallback errHandlingCallback = errorHandlingCallback(callback, LOGGER); + if (t != null) { + errHandlingCallback.onResult(null, t); + } else { + SingleResultCallback releasingCallback = releasingCallback(errHandlingCallback, connection); + if (!checkEncryptedFieldsSupported(connection.getDescription(), releasingCallback)) { + return; + } + new ProcessCommandsCallback(binding, connection, releasingCallback) + .onResult(null, null); + } + }); + } + + private String getGranularityAsString(final TimeSeriesGranularity granularity) { + switch (granularity) { + case SECONDS: + return "seconds"; + case MINUTES: + return "minutes"; + case HOURS: + return "hours"; + default: + throw new AssertionError("Unexpected granularity " + granularity); + } + } + + /** + * With Queryable Encryption creating a collection can involve more logic and commands. + * + *

+ * If the collection namespace has an associated encryptedFields, then do the following operations. + * If any of the following operations error, the remaining operations are not attempted: + *

    + *
  1. Create the collection with name encryptedFields["escCollection"] using default options. + * If encryptedFields["escCollection"] is not set, use the collection name enxcol_..esc. + * Creating this collection MUST NOT check if the collection namespace is in the AutoEncryptionOpts.encryptedFieldsMap. + *
  2. Create the collection with name encryptedFields["ecocCollection"] using default options. + * If encryptedFields["ecocCollection"] is not set, use the collection name enxcol_..ecoc. + * Creating this collection MUST NOT check if the collection namespace is in the AutoEncryptionOpts.encryptedFieldsMap. + *
  3. Create the collection collectionName with collectionOptions and the option encryptedFields set to the encryptedFields. + *
  4. Create the index {"__safeContent__": 1} on collection collectionName. + *
+ *

+ * @return the list of commands to run to create the collection + */ + private List> getCommandFunctions() { + if (encryptedFields == null) { + return singletonList(this::getCreateCollectionCommand); + } + return asList( + () -> getCreateEncryptedFieldsCollectionCommand("esc"), + () -> getCreateEncryptedFieldsCollectionCommand("ecoc"), + this::getCreateCollectionCommand, + () -> new BsonDocument("createIndexes", new BsonString(collectionName)) + .append("indexes", SAFE_CONTENT_ARRAY) + ); + } + + private BsonDocument getCreateEncryptedFieldsCollectionCommand(final String collectionSuffix) { + return new BsonDocument() + .append("create", encryptedFields + .getOrDefault(collectionSuffix + "Collection", + new BsonString(ENCRYPT_PREFIX + collectionName + "." + collectionSuffix))) + .append("clusteredIndex", ENCRYPT_CLUSTERED_INDEX); + } + + private BsonDocument getCreateCollectionCommand() { + BsonDocument document = new BsonDocument("create", new BsonString(collectionName)); + putIfFalse(document, "autoIndexId", autoIndex); + document.put("capped", BsonBoolean.valueOf(capped)); + if (capped) { + putIfNotZero(document, "size", sizeInBytes); + putIfNotZero(document, "max", maxDocuments); + } + putIfNotNull(document, "storageEngine", storageEngineOptions); + putIfNotNull(document, "indexOptionDefaults", indexOptionDefaults); + putIfNotNull(document, "validator", validator); + if (validationLevel != null) { + document.put("validationLevel", new BsonString(validationLevel.getValue())); + } + if (validationAction != null) { + document.put("validationAction", new BsonString(validationAction.getValue())); + } + appendWriteConcernToCommand(writeConcern, document); + if (collation != null) { + document.put("collation", collation.asDocument()); + } + putIfNotZero(document, "expireAfterSeconds", expireAfterSeconds); + if (timeSeriesOptions != null) { + BsonDocument timeSeriesDocument = new BsonDocument("timeField", new BsonString(timeSeriesOptions.getTimeField())); + String metaField = timeSeriesOptions.getMetaField(); + if (metaField != null) { + timeSeriesDocument.put("metaField", new BsonString(metaField)); + } + TimeSeriesGranularity granularity = timeSeriesOptions.getGranularity(); + if (granularity != null) { + timeSeriesDocument.put("granularity", new BsonString(getGranularityAsString(granularity))); + } + Long bucketMaxSpan = timeSeriesOptions.getBucketMaxSpan(TimeUnit.SECONDS); + if (bucketMaxSpan != null){ + timeSeriesDocument.put("bucketMaxSpanSeconds", new BsonInt64(bucketMaxSpan)); + } + Long bucketRounding = timeSeriesOptions.getBucketRounding(TimeUnit.SECONDS); + if (bucketRounding != null){ + timeSeriesDocument.put("bucketRoundingSeconds", new BsonInt64(bucketRounding)); + } + document.put("timeseries", timeSeriesDocument); + } + if (changeStreamPreAndPostImagesOptions != null) { + document.put("changeStreamPreAndPostImages", new BsonDocument("enabled", + BsonBoolean.valueOf(changeStreamPreAndPostImagesOptions.isEnabled()))); + } + if (clusteredIndexKey != null) { + BsonDocument clusteredIndexDocument = new BsonDocument() + .append("key", clusteredIndexKey) + .append("unique", BsonBoolean.valueOf(clusteredIndexUnique)); + if (clusteredIndexName != null) { + clusteredIndexDocument.put("name", new BsonString(clusteredIndexName)); + } + document.put("clusteredIndex", clusteredIndexDocument); + } + putIfNotNull(document, "encryptedFields", encryptedFields); + return document; + } + + private void checkEncryptedFieldsSupported(final ConnectionDescription connectionDescription) throws MongoException { + if (encryptedFields != null && serverIsLessThanVersionSevenDotZero(connectionDescription)) { + throw new MongoClientException("Driver support of Queryable Encryption is incompatible with server." + + " Upgrade server to use Queryable Encryption."); + } + } + + /** + * @return {@code true} iff the {@linkplain #checkEncryptedFieldsSupported(ConnectionDescription) check} was successful. + * The {@code callback} is completed (with a failed result) iff the check was not successful. + */ + private boolean checkEncryptedFieldsSupported(final ConnectionDescription connectionDescription, final SingleResultCallback callback) { + try { + checkEncryptedFieldsSupported(connectionDescription); + return true; + } catch (Exception e) { + callback.onResult(null, e); + return false; + } + } + + /** + * A SingleResultCallback that can be repeatedly called via onResult until all commands have been run. + */ + class ProcessCommandsCallback implements SingleResultCallback { + private final AsyncWriteBinding binding; + private final AsyncConnection connection; + private final SingleResultCallback finalCallback; + private final Deque> commands; + + ProcessCommandsCallback( + final AsyncWriteBinding binding, final AsyncConnection connection, final SingleResultCallback finalCallback) { + this.binding = binding; + this.connection = connection; + this.finalCallback = finalCallback; + this.commands = new ArrayDeque<>(getCommandFunctions()); + } + + @Override + public void onResult(@Nullable final Void result, @Nullable final Throwable t) { + if (t != null) { + finalCallback.onResult(null, t); + return; + } + Supplier nextCommandFunction = commands.poll(); + if (nextCommandFunction == null) { + finalCallback.onResult(null, null); + } else { + executeCommandAsync(binding, databaseName, nextCommandFunction.get(), + connection, writeConcernErrorTransformerAsync(binding.getOperationContext().getTimeoutContext()), this); + } + } + } + +} diff --git a/driver-core/src/main/com/mongodb/internal/operation/CreateIndexesOperation.java b/driver-core/src/main/com/mongodb/internal/operation/CreateIndexesOperation.java new file mode 100644 index 00000000000..b9b4242a3f4 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/operation/CreateIndexesOperation.java @@ -0,0 +1,230 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.operation; + +import com.mongodb.CreateIndexCommitQuorum; +import com.mongodb.DuplicateKeyException; +import com.mongodb.ErrorCategory; +import com.mongodb.MongoClientException; +import com.mongodb.MongoCommandException; +import com.mongodb.MongoException; +import com.mongodb.MongoNamespace; +import com.mongodb.WriteConcern; +import com.mongodb.WriteConcernResult; +import com.mongodb.internal.async.SingleResultCallback; +import com.mongodb.internal.binding.AsyncWriteBinding; +import com.mongodb.internal.binding.WriteBinding; +import com.mongodb.internal.bulk.IndexRequest; +import com.mongodb.lang.Nullable; +import org.bson.BsonArray; +import org.bson.BsonBoolean; +import org.bson.BsonDocument; +import org.bson.BsonDouble; +import org.bson.BsonInt32; +import org.bson.BsonInt64; +import org.bson.BsonString; + +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.TimeUnit; + +import static com.mongodb.assertions.Assertions.assertNotNull; +import static com.mongodb.assertions.Assertions.notNull; +import static com.mongodb.internal.operation.AsyncOperationHelper.executeCommandAsync; +import static com.mongodb.internal.operation.AsyncOperationHelper.writeConcernErrorTransformerAsync; +import static com.mongodb.internal.operation.IndexHelper.generateIndexName; +import static com.mongodb.internal.operation.ServerVersionHelper.serverIsAtLeastVersionFourDotFour; +import static com.mongodb.internal.operation.SyncOperationHelper.executeCommand; +import static com.mongodb.internal.operation.SyncOperationHelper.writeConcernErrorTransformer; +import static com.mongodb.internal.operation.WriteConcernHelper.appendWriteConcernToCommand; + +/** + * An operation that creates one or more indexes. + * + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public class CreateIndexesOperation implements WriteOperation { + private static final String COMMAND_NAME = "createIndexes"; + private final MongoNamespace namespace; + private final List requests; + private final WriteConcern writeConcern; + private CreateIndexCommitQuorum commitQuorum; + + public CreateIndexesOperation(final MongoNamespace namespace, final List requests, + @Nullable final WriteConcern writeConcern) { + this.namespace = notNull("namespace", namespace); + this.requests = notNull("indexRequests", requests); + this.writeConcern = writeConcern; + } + + public WriteConcern getWriteConcern() { + return writeConcern; + } + + public List getRequests() { + return requests; + } + + public List getIndexNames() { + List indexNames = new ArrayList<>(requests.size()); + for (IndexRequest request : requests) { + if (request.getName() != null) { + indexNames.add(request.getName()); + } else { + indexNames.add(generateIndexName(request.getKeys())); + } + } + return indexNames; + } + + public CreateIndexCommitQuorum getCommitQuorum() { + return commitQuorum; + } + + public CreateIndexesOperation commitQuorum(@Nullable final CreateIndexCommitQuorum commitQuorum) { + this.commitQuorum = commitQuorum; + return this; + } + + @Override + public String getCommandName() { + return COMMAND_NAME; + } + + @Override + public Void execute(final WriteBinding binding) { + try { + return executeCommand(binding, namespace.getDatabaseName(), getCommandCreator(), writeConcernErrorTransformer( + binding.getOperationContext().getTimeoutContext())); + } catch (MongoCommandException e) { + throw checkForDuplicateKeyError(e); + } + } + + @Override + public void executeAsync(final AsyncWriteBinding binding, final SingleResultCallback callback) { + executeCommandAsync(binding, namespace.getDatabaseName(), getCommandCreator(), writeConcernErrorTransformerAsync(binding + .getOperationContext().getTimeoutContext()), + ((result, t) -> { + if (t != null) { + callback.onResult(null, translateException(t)); + } else { + callback.onResult(result, null); + } + })); + } + + @SuppressWarnings("deprecation") + private BsonDocument getIndex(final IndexRequest request) { + BsonDocument index = new BsonDocument(); + index.append("key", request.getKeys()); + index.append("name", new BsonString(request.getName() != null ? request.getName() : generateIndexName(request.getKeys()))); + if (request.isBackground()) { + index.append("background", BsonBoolean.TRUE); + } + if (request.isUnique()) { + index.append("unique", BsonBoolean.TRUE); + } + if (request.isSparse()) { + index.append("sparse", BsonBoolean.TRUE); + } + if (request.getExpireAfter(TimeUnit.SECONDS) != null) { + index.append("expireAfterSeconds", new BsonInt64(assertNotNull(request.getExpireAfter(TimeUnit.SECONDS)))); + } + if (request.getVersion() != null) { + index.append("v", new BsonInt32(assertNotNull(request.getVersion()))); + } + if (request.getWeights() != null) { + index.append("weights", assertNotNull(request.getWeights())); + } + if (request.getDefaultLanguage() != null) { + index.append("default_language", new BsonString(assertNotNull(request.getDefaultLanguage()))); + } + if (request.getLanguageOverride() != null) { + index.append("language_override", new BsonString(assertNotNull(request.getLanguageOverride()))); + } + if (request.getTextVersion() != null) { + index.append("textIndexVersion", new BsonInt32(assertNotNull(request.getTextVersion()))); + } + if (request.getSphereVersion() != null) { + index.append("2dsphereIndexVersion", new BsonInt32(assertNotNull(request.getSphereVersion()))); + } + if (request.getBits() != null) { + index.append("bits", new BsonInt32(assertNotNull(request.getBits()))); + } + if (request.getMin() != null) { + index.append("min", new BsonDouble(assertNotNull(request.getMin()))); + } + if (request.getMax() != null) { + index.append("max", new BsonDouble(assertNotNull(request.getMax()))); + } + if (request.getDropDups()) { + index.append("dropDups", BsonBoolean.TRUE); + } + if (request.getStorageEngine() != null) { + index.append("storageEngine", assertNotNull(request.getStorageEngine())); + } + if (request.getPartialFilterExpression() != null) { + index.append("partialFilterExpression", assertNotNull(request.getPartialFilterExpression())); + } + if (request.getCollation() != null) { + index.append("collation", assertNotNull(request.getCollation().asDocument())); + } + if (request.getWildcardProjection() != null) { + index.append("wildcardProjection", assertNotNull(request.getWildcardProjection())); + } + if (request.isHidden()) { + index.append("hidden", BsonBoolean.TRUE); + } + return index; + } + + private CommandOperationHelper.CommandCreator getCommandCreator() { + return (operationContext, serverDescription, connectionDescription) -> { + BsonDocument command = new BsonDocument(getCommandName(), new BsonString(namespace.getCollectionName())); + List values = new ArrayList<>(); + for (IndexRequest request : requests) { + values.add(getIndex(request)); + } + command.put("indexes", new BsonArray(values)); + appendWriteConcernToCommand(writeConcern, command); + if (commitQuorum != null) { + if (serverIsAtLeastVersionFourDotFour(connectionDescription)) { + command.put("commitQuorum", commitQuorum.toBsonValue()); + } else { + throw new MongoClientException("Specifying a value for the create index commit quorum option " + + "requires a minimum MongoDB version of 4.4"); + } + } + return command; + }; + } + + @Nullable + private MongoException translateException(@Nullable final Throwable t) { + return (t instanceof MongoCommandException) ? checkForDuplicateKeyError((MongoCommandException) t) + : MongoException.fromThrowable(t); + } + + private MongoException checkForDuplicateKeyError(final MongoCommandException e) { + if (ErrorCategory.fromErrorCode(e.getCode()) == ErrorCategory.DUPLICATE_KEY) { + return new DuplicateKeyException(e.getResponse(), e.getServerAddress(), WriteConcernResult.acknowledged(0, false, null)); + } else { + return e; + } + } +} diff --git a/driver-core/src/main/com/mongodb/internal/operation/CreateSearchIndexesOperation.java b/driver-core/src/main/com/mongodb/internal/operation/CreateSearchIndexesOperation.java new file mode 100644 index 00000000000..bf75ee88b0d --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/operation/CreateSearchIndexesOperation.java @@ -0,0 +1,74 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.operation; + +import com.mongodb.MongoNamespace; +import com.mongodb.client.model.SearchIndexType; +import org.bson.BsonArray; +import org.bson.BsonDocument; +import org.bson.BsonString; + +import java.util.List; +import java.util.stream.Collectors; + +import static com.mongodb.assertions.Assertions.assertNotNull; + +/** + * An operation that creates one or more Atlas Search indexes. + * + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public final class CreateSearchIndexesOperation extends AbstractWriteSearchIndexOperation { + private static final String COMMAND_NAME = "createSearchIndexes"; + private final List indexRequests; + + public CreateSearchIndexesOperation(final MongoNamespace namespace, final List indexRequests) { + super(namespace); + this.indexRequests = assertNotNull(indexRequests); + } + + @Override + public String getCommandName() { + return COMMAND_NAME; + } + + private static BsonArray convert(final List requests) { + return requests.stream() + .map(CreateSearchIndexesOperation::convert) + .collect(Collectors.toCollection(BsonArray::new)); + } + + private static BsonDocument convert(final SearchIndexRequest request) { + BsonDocument bsonIndexRequest = new BsonDocument(); + String searchIndexName = request.getIndexName(); + if (searchIndexName != null) { + bsonIndexRequest.append("name", new BsonString(searchIndexName)); + } + SearchIndexType searchIndexType = request.getSearchIndexType(); + if (searchIndexType != null) { + bsonIndexRequest.append("type", searchIndexType.toBsonValue()); + } + bsonIndexRequest.append("definition", request.getDefinition()); + return bsonIndexRequest; + } + + @Override + BsonDocument buildCommand() { + return new BsonDocument(getCommandName(), new BsonString(getNamespace().getCollectionName())) + .append("indexes", convert(indexRequests)); + } +} diff --git a/driver-core/src/main/com/mongodb/internal/operation/CreateViewOperation.java b/driver-core/src/main/com/mongodb/internal/operation/CreateViewOperation.java new file mode 100644 index 00000000000..49b47fb7e9c --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/operation/CreateViewOperation.java @@ -0,0 +1,166 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.operation; + +import com.mongodb.WriteConcern; +import com.mongodb.client.model.Collation; +import com.mongodb.internal.async.SingleResultCallback; +import com.mongodb.internal.binding.AsyncWriteBinding; +import com.mongodb.internal.binding.WriteBinding; +import com.mongodb.lang.Nullable; +import org.bson.BsonArray; +import org.bson.BsonDocument; +import org.bson.BsonString; +import org.bson.codecs.BsonDocumentCodec; + +import java.util.List; + +import static com.mongodb.assertions.Assertions.notNull; +import static com.mongodb.internal.async.ErrorHandlingResultCallback.errorHandlingCallback; +import static com.mongodb.internal.operation.AsyncOperationHelper.executeCommandAsync; +import static com.mongodb.internal.operation.AsyncOperationHelper.releasingCallback; +import static com.mongodb.internal.operation.AsyncOperationHelper.withAsyncConnection; +import static com.mongodb.internal.operation.AsyncOperationHelper.writeConcernErrorTransformerAsync; +import static com.mongodb.internal.operation.OperationHelper.LOGGER; +import static com.mongodb.internal.operation.SyncOperationHelper.executeCommand; +import static com.mongodb.internal.operation.SyncOperationHelper.withConnection; +import static com.mongodb.internal.operation.SyncOperationHelper.writeConcernErrorTransformer; +import static com.mongodb.internal.operation.WriteConcernHelper.appendWriteConcernToCommand; + +/** + * An operation to create a view. + * + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public class CreateViewOperation implements WriteOperation { + private final String databaseName; + private final String viewName; + private final String viewOn; + private final List pipeline; + private final WriteConcern writeConcern; + private Collation collation; + + public CreateViewOperation(final String databaseName, final String viewName, final String viewOn, final List pipeline, + final WriteConcern writeConcern) { + this.databaseName = notNull("databaseName", databaseName); + this.viewName = notNull("viewName", viewName); + this.viewOn = notNull("viewOn", viewOn); + this.pipeline = notNull("pipeline", pipeline); + this.writeConcern = notNull("writeConcern", writeConcern); + } + + public String getDatabaseName() { + return databaseName; + } + + /** + * Gets the name of the view to create. + * + * @return the view name + */ + public String getViewName() { + return viewName; + } + + /** + * Gets the name of the collection or view that backs this view. + * + * @return the name of the collection or view that backs this view + */ + public String getViewOn() { + return viewOn; + } + + /** + * Gets the pipeline that defines the view. + * + * @return the pipeline that defines the view + */ + public List getPipeline() { + return pipeline; + } + + /** + * Gets the write concern. + * + * @return the write concern + */ + public WriteConcern getWriteConcern() { + return writeConcern; + } + + /** + * Gets the default collation for the view + * + * @return the collation, which may be null + */ + public Collation getCollation() { + return collation; + } + + /** + * Sets the default collation for the view. + * + * @param collation the collation, which may be null + * @return this + */ + public CreateViewOperation collation(@Nullable final Collation collation) { + this.collation = collation; + return this; + } + + @Override + public String getCommandName() { + return "createView"; + } + + @Override + public Void execute(final WriteBinding binding) { + return withConnection(binding, connection -> { + executeCommand(binding, databaseName, getCommand(), new BsonDocumentCodec(), + writeConcernErrorTransformer(binding.getOperationContext().getTimeoutContext())); + return null; + }); + } + + @Override + public void executeAsync(final AsyncWriteBinding binding, final SingleResultCallback callback) { + withAsyncConnection(binding, (connection, t) -> { + SingleResultCallback errHandlingCallback = errorHandlingCallback(callback, LOGGER); + if (t != null) { + errHandlingCallback.onResult(null, t); + } else { + SingleResultCallback wrappedCallback = releasingCallback(errHandlingCallback, connection); + executeCommandAsync(binding, databaseName, getCommand(), connection, + writeConcernErrorTransformerAsync(binding.getOperationContext().getTimeoutContext()), + wrappedCallback); + } + }); + } + + private BsonDocument getCommand() { + BsonDocument commandDocument = new BsonDocument("create", new BsonString(viewName)) + .append("viewOn", new BsonString(viewOn)) + .append("pipeline", new BsonArray(pipeline)); + if (collation != null) { + commandDocument.put("collation", collation.asDocument()); + } + + appendWriteConcernToCommand(writeConcern, commandDocument); + return commandDocument; + } +} diff --git a/driver-core/src/main/com/mongodb/internal/operation/CursorHelper.java b/driver-core/src/main/com/mongodb/internal/operation/CursorHelper.java new file mode 100644 index 00000000000..26511c86885 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/operation/CursorHelper.java @@ -0,0 +1,31 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.operation; + +import com.mongodb.lang.Nullable; +import org.bson.BsonDocument; +import org.bson.BsonInt32; + +final class CursorHelper { + + static BsonDocument getCursorDocumentFromBatchSize(@Nullable final Integer batchSize) { + return batchSize == null ? new BsonDocument() : new BsonDocument("batchSize", new BsonInt32(batchSize)); + } + + private CursorHelper() { + } +} diff --git a/driver-core/src/main/com/mongodb/internal/operation/CursorResourceManager.java b/driver-core/src/main/com/mongodb/internal/operation/CursorResourceManager.java new file mode 100644 index 00000000000..0fbdf512dab --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/operation/CursorResourceManager.java @@ -0,0 +1,277 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.operation; + +import com.mongodb.MongoNamespace; +import com.mongodb.MongoSocketException; +import com.mongodb.ServerCursor; +import com.mongodb.annotations.ThreadSafe; +import com.mongodb.internal.binding.ReferenceCounted; +import com.mongodb.internal.connection.Connection; +import com.mongodb.lang.Nullable; + +import java.util.concurrent.locks.Lock; +import java.util.concurrent.locks.ReentrantLock; + +import static com.mongodb.assertions.Assertions.assertNotNull; +import static com.mongodb.assertions.Assertions.assertNull; +import static com.mongodb.assertions.Assertions.assertTrue; +import static com.mongodb.assertions.Assertions.fail; +import static com.mongodb.internal.Locks.withLock; +import static com.mongodb.internal.operation.CommandBatchCursorHelper.MESSAGE_IF_CONCURRENT_OPERATION; + +/** + * This is the resource manager for {@link CommandBatchCursor} or {@link AsyncCommandBatchCursor} implementations. + *

+ * This class maintains all resources that must be released in {@link CommandBatchCursor#close()} / + * {@link AsyncCommandBatchCursor#close()}. The abstract {@linkplain #doClose() deferred close action} is such that it is totally + * ordered with other operations of {@link CommandBatchCursor} / {@link AsyncCommandBatchCursor} (methods {@link #tryStartOperation()}/ + * {@link #endOperation()} must be used properly to enforce the order) despite the method {@link CommandBatchCursor#close()} / + * {@link AsyncCommandBatchCursor#close()} being called concurrently with those operations. + *

+ * This total order induces the happens-before order. + *

+ * The deferred close action does not violate externally observable idempotence of {@link CommandBatchCursor#close()} / + * {@link AsyncCommandBatchCursor#close()}, because the close method is allowed to release resources "eventually". + *

+ * Only methods explicitly documented as thread-safe are thread-safe, + * others are not and rely on the total order mentioned above. + */ +@ThreadSafe +abstract class CursorResourceManager { + private final Lock lock; + private final MongoNamespace namespace; + private volatile State state; + @Nullable + private volatile CS connectionSource; + @Nullable + private volatile C pinnedConnection; + @Nullable + private volatile ServerCursor serverCursor; + private volatile boolean skipReleasingServerResourcesOnClose; + + CursorResourceManager( + final MongoNamespace namespace, + final CS connectionSource, + @Nullable final C connectionToPin, + @Nullable final ServerCursor serverCursor) { + this.lock = new ReentrantLock(); + this.namespace = namespace; + this.state = State.IDLE; + if (serverCursor != null) { + connectionSource.retain(); + this.connectionSource = connectionSource; + if (connectionToPin != null) { + connectionToPin.retain(); + markAsPinned(connectionToPin, Connection.PinningMode.CURSOR); + this.pinnedConnection = connectionToPin; + } + } + this.skipReleasingServerResourcesOnClose = false; + this.serverCursor = serverCursor; + } + + /** + * Thread-safe. + */ + MongoNamespace getNamespace() { + return namespace; + } + + /** + * Thread-safe. + */ + State getState() { + return state; + } + + /** + * Thread-safe. + */ + @Nullable + CS getConnectionSource() { + return connectionSource; + } + + /** + * Thread-safe. + */ + @Nullable + C getPinnedConnection() { + return pinnedConnection; + } + + /** + * Thread-safe. + */ + boolean isSkipReleasingServerResourcesOnClose() { + return skipReleasingServerResourcesOnClose; + } + + @SuppressWarnings("SameParameterValue") + abstract void markAsPinned(C connectionToPin, Connection.PinningMode pinningMode); + + /** + * Thread-safe. + */ + boolean operable() { + return state.operable(); + } + + /** + * Thread-safe. + * Returns {@code true} iff started an operation. + * If {@linkplain #operable() closed}, then returns false, otherwise completes abruptly. + * + * @throws IllegalStateException Iff another operation is in progress. + */ + boolean tryStartOperation() throws IllegalStateException { + return withLock(lock, () -> { + State localState = state; + if (!localState.operable()) { + return false; + } else if (localState == State.IDLE) { + state = State.OPERATION_IN_PROGRESS; + return true; + } else if (localState == State.OPERATION_IN_PROGRESS) { + throw new IllegalStateException(MESSAGE_IF_CONCURRENT_OPERATION); + } else { + throw fail(state.toString()); + } + }); + } + + /** + * Thread-safe. + */ + void endOperation() { + boolean doClose = withLock(lock, () -> { + State localState = state; + if (localState == State.OPERATION_IN_PROGRESS) { + state = State.IDLE; + } else if (localState == State.CLOSE_PENDING) { + state = State.CLOSED; + return true; + } else if (localState != State.CLOSED) { + throw fail(localState.toString()); + } + return false; + }); + if (doClose) { + doClose(); + } + } + + /** + * Thread-safe. + */ + void close() { + boolean doClose = withLock(lock, () -> { + State localState = state; + if (localState.inProgress()) { + state = State.CLOSE_PENDING; + } else if (localState != State.CLOSED) { + state = State.CLOSED; + return true; + } + return false; + }); + if (doClose) { + doClose(); + } + } + + /** + * This method is never executed concurrently with either itself or other operations + * demarcated by {@link #tryStartOperation()}/{@link #endOperation()}. + */ + abstract void doClose(); + + void onCorruptedConnection(@Nullable final C corruptedConnection, final MongoSocketException e) { + // if `pinnedConnection` is corrupted, then we cannot kill `serverCursor` via such a connection + C localPinnedConnection = pinnedConnection; + if (localPinnedConnection != null) { + if (corruptedConnection != localPinnedConnection) { + e.addSuppressed(new AssertionError("Corrupted connection does not equal the pinned connection.")); + } + skipReleasingServerResourcesOnClose = true; + } + } + + /** + * Thread-safe. + */ + @Nullable + final ServerCursor getServerCursor() { + return serverCursor; + } + + void setServerCursor(@Nullable final ServerCursor serverCursor) { + assertTrue(state.inProgress()); + assertNotNull(this.serverCursor); + // without `connectionSource` we will not be able to kill `serverCursor` later + assertNotNull(connectionSource); + this.serverCursor = serverCursor; + if (serverCursor == null) { + releaseClientResources(); + } + } + + void unsetServerCursor() { + this.serverCursor = null; + } + + void releaseClientResources() { + assertNull(serverCursor); + CS localConnectionSource = connectionSource; + if (localConnectionSource != null) { + localConnectionSource.release(); + connectionSource = null; + } + C localPinnedConnection = pinnedConnection; + if (localPinnedConnection != null) { + localPinnedConnection.release(); + pinnedConnection = null; + } + } + + enum State { + IDLE(true, false), + OPERATION_IN_PROGRESS(true, true), + /** + * Implies {@link #OPERATION_IN_PROGRESS}. + */ + CLOSE_PENDING(false, true), + CLOSED(false, false); + + private final boolean operable; + private final boolean inProgress; + + State(final boolean operable, final boolean inProgress) { + this.operable = operable; + this.inProgress = inProgress; + } + + boolean operable() { + return operable; + } + + boolean inProgress() { + return inProgress; + } + } +} diff --git a/driver-core/src/main/com/mongodb/internal/operation/DistinctOperation.java b/driver-core/src/main/com/mongodb/internal/operation/DistinctOperation.java new file mode 100644 index 00000000000..489e3923bdc --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/operation/DistinctOperation.java @@ -0,0 +1,147 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.operation; + +import com.mongodb.MongoNamespace; +import com.mongodb.client.model.Collation; +import com.mongodb.internal.async.AsyncBatchCursor; +import com.mongodb.internal.async.SingleResultCallback; +import com.mongodb.internal.binding.AsyncReadBinding; +import com.mongodb.internal.binding.ReadBinding; +import com.mongodb.lang.Nullable; +import org.bson.BsonDocument; +import org.bson.BsonString; +import org.bson.BsonValue; +import org.bson.codecs.Codec; +import org.bson.codecs.Decoder; + +import static com.mongodb.assertions.Assertions.notNull; +import static com.mongodb.internal.async.ErrorHandlingResultCallback.errorHandlingCallback; +import static com.mongodb.internal.operation.AsyncOperationHelper.asyncSingleBatchCursorTransformer; +import static com.mongodb.internal.operation.AsyncOperationHelper.executeRetryableReadAsync; +import static com.mongodb.internal.operation.CommandOperationHelper.CommandCreator; +import static com.mongodb.internal.operation.DocumentHelper.putIfNotNull; +import static com.mongodb.internal.operation.OperationHelper.LOGGER; +import static com.mongodb.internal.operation.OperationReadConcernHelper.appendReadConcernToCommand; +import static com.mongodb.internal.operation.SyncOperationHelper.executeRetryableRead; +import static com.mongodb.internal.operation.SyncOperationHelper.singleBatchCursorTransformer; + +/** + * Finds the distinct values for a specified field across a single collection. + * + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public class DistinctOperation implements ReadOperationCursor { + private static final String COMMAND_NAME = "distinct"; + private static final String VALUES = "values"; + private final MongoNamespace namespace; + private final String fieldName; + private final Decoder decoder; + private boolean retryReads; + private BsonDocument filter; + private Collation collation; + private BsonValue comment; + private BsonValue hint; + + public DistinctOperation(final MongoNamespace namespace, final String fieldName, final Decoder decoder) { + this.namespace = notNull("namespace", namespace); + this.fieldName = notNull("fieldName", fieldName); + this.decoder = notNull("decoder", decoder); + } + + public BsonDocument getFilter() { + return filter; + } + + public DistinctOperation filter(@Nullable final BsonDocument filter) { + this.filter = filter; + return this; + } + + public DistinctOperation retryReads(final boolean retryReads) { + this.retryReads = retryReads; + return this; + } + + public boolean getRetryReads() { + return retryReads; + } + + public Collation getCollation() { + return collation; + } + + public DistinctOperation collation(@Nullable final Collation collation) { + this.collation = collation; + return this; + } + + public BsonValue getComment() { + return comment; + } + + public DistinctOperation comment(final BsonValue comment) { + this.comment = comment; + return this; + } + + public BsonValue getHint() { + return hint; + } + + public DistinctOperation hint(@Nullable final BsonValue hint) { + this.hint = hint; + return this; + } + + @Override + public String getCommandName() { + return COMMAND_NAME; + } + + @Override + public BatchCursor execute(final ReadBinding binding) { + return executeRetryableRead(binding, namespace.getDatabaseName(), getCommandCreator(), createCommandDecoder(), + singleBatchCursorTransformer(VALUES), retryReads); + } + + @Override + public void executeAsync(final AsyncReadBinding binding, final SingleResultCallback> callback) { + executeRetryableReadAsync(binding, namespace.getDatabaseName(), + getCommandCreator(), createCommandDecoder(), asyncSingleBatchCursorTransformer(VALUES), retryReads, + errorHandlingCallback(callback, LOGGER)); + } + + private Codec createCommandDecoder() { + return CommandResultDocumentCodec.create(decoder, VALUES); + } + + private CommandCreator getCommandCreator() { + return (operationContext, serverDescription, connectionDescription) -> { + BsonDocument commandDocument = new BsonDocument(getCommandName(), new BsonString(namespace.getCollectionName())); + appendReadConcernToCommand(operationContext.getSessionContext(), connectionDescription.getMaxWireVersion(), commandDocument); + commandDocument.put("key", new BsonString(fieldName)); + putIfNotNull(commandDocument, "query", filter); + if (collation != null) { + commandDocument.put("collation", collation.asDocument()); + } + putIfNotNull(commandDocument, "comment", comment); + putIfNotNull(commandDocument, "hint", hint); + return commandDocument; + }; + } +} diff --git a/driver-core/src/main/com/mongodb/internal/operation/DocumentHelper.java b/driver-core/src/main/com/mongodb/internal/operation/DocumentHelper.java new file mode 100644 index 00000000000..46a66fcf28e --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/operation/DocumentHelper.java @@ -0,0 +1,79 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.operation; + +import com.mongodb.lang.Nullable; +import org.bson.BsonBoolean; +import org.bson.BsonDocument; +import org.bson.BsonInt32; +import org.bson.BsonInt64; +import org.bson.BsonString; +import org.bson.BsonValue; + +final class DocumentHelper { + + private DocumentHelper() { + } + + static void putIfTrue(final BsonDocument command, final String key, final boolean condition) { + if (condition) { + command.put(key, BsonBoolean.TRUE); + } + } + + static void putIfFalse(final BsonDocument command, final String key, final boolean condition) { + if (!condition) { + command.put(key, BsonBoolean.FALSE); + } + } + + static void putIfNotNullOrEmpty(final BsonDocument command, final String key, @Nullable final BsonDocument documentValue) { + if (documentValue != null && !documentValue.isEmpty()) { + command.put(key, documentValue); + } + } + + static void putIfNotNull(final BsonDocument command, final String key, @Nullable final BsonValue value) { + if (value != null) { + command.put(key, value); + } + } + + static void putIfNotNull(final BsonDocument command, final String key, @Nullable final String value) { + if (value != null) { + command.put(key, new BsonString(value)); + } + } + + static void putIfNotNull(final BsonDocument command, final String key, @Nullable final Boolean value) { + if (value != null) { + command.put(key, new BsonBoolean(value)); + } + } + + static void putIfNotZero(final BsonDocument command, final String key, final int value) { + if (value != 0) { + command.put(key, new BsonInt32(value)); + } + } + + static void putIfNotZero(final BsonDocument command, final String key, final long value) { + if (value != 0) { + command.put(key, new BsonInt64(value)); + } + } +} diff --git a/driver-core/src/main/com/mongodb/internal/operation/DropCollectionOperation.java b/driver-core/src/main/com/mongodb/internal/operation/DropCollectionOperation.java new file mode 100644 index 00000000000..2926fdec799 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/operation/DropCollectionOperation.java @@ -0,0 +1,266 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.operation; + +import com.mongodb.MongoCommandException; +import com.mongodb.MongoNamespace; +import com.mongodb.MongoOperationTimeoutException; +import com.mongodb.WriteConcern; +import com.mongodb.internal.async.SingleResultCallback; +import com.mongodb.internal.binding.AsyncReadWriteBinding; +import com.mongodb.internal.binding.AsyncWriteBinding; +import com.mongodb.internal.binding.ReadWriteBinding; +import com.mongodb.internal.binding.WriteBinding; +import com.mongodb.internal.connection.AsyncConnection; +import com.mongodb.lang.Nullable; +import org.bson.BsonDocument; +import org.bson.BsonString; +import org.bson.BsonValue; +import org.bson.codecs.BsonValueCodec; + +import java.util.ArrayDeque; +import java.util.Deque; +import java.util.List; +import java.util.function.Supplier; + +import static com.mongodb.assertions.Assertions.notNull; +import static com.mongodb.internal.async.ErrorHandlingResultCallback.errorHandlingCallback; +import static com.mongodb.internal.operation.AsyncOperationHelper.executeCommandAsync; +import static com.mongodb.internal.operation.AsyncOperationHelper.releasingCallback; +import static com.mongodb.internal.operation.AsyncOperationHelper.withAsyncConnection; +import static com.mongodb.internal.operation.AsyncOperationHelper.writeConcernErrorTransformerAsync; +import static com.mongodb.internal.operation.CommandOperationHelper.isNamespaceError; +import static com.mongodb.internal.operation.CommandOperationHelper.rethrowIfNotNamespaceError; +import static com.mongodb.internal.operation.OperationHelper.LOGGER; +import static com.mongodb.internal.operation.SyncOperationHelper.executeCommand; +import static com.mongodb.internal.operation.SyncOperationHelper.withConnection; +import static com.mongodb.internal.operation.SyncOperationHelper.writeConcernErrorTransformer; +import static com.mongodb.internal.operation.WriteConcernHelper.appendWriteConcernToCommand; +import static java.util.Arrays.asList; +import static java.util.Collections.singletonList; + +/** + * Operation to drop a Collection in MongoDB. The {@code execute} method throws MongoCommandFailureException if something goes wrong, but + * it will not throw an Exception if the collection does not exist before trying to drop it. + * + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public class DropCollectionOperation implements WriteOperation { + private static final String ENCRYPT_PREFIX = "enxcol_."; + private static final BsonValueCodec BSON_VALUE_CODEC = new BsonValueCodec(); + private final MongoNamespace namespace; + private final WriteConcern writeConcern; + private BsonDocument encryptedFields; + private boolean autoEncryptedFields; + + public DropCollectionOperation(final MongoNamespace namespace, @Nullable final WriteConcern writeConcern) { + this.namespace = notNull("namespace", namespace); + this.writeConcern = writeConcern; + } + + public WriteConcern getWriteConcern() { + return writeConcern; + } + + public DropCollectionOperation encryptedFields(final BsonDocument encryptedFields) { + this.encryptedFields = encryptedFields; + return this; + } + + public DropCollectionOperation autoEncryptedFields(final boolean autoEncryptedFields) { + this.autoEncryptedFields = autoEncryptedFields; + return this; + } + + @Override + public String getCommandName() { + return "dropCollection"; + } + + @Override + public Void execute(final WriteBinding binding) { + BsonDocument localEncryptedFields = getEncryptedFields((ReadWriteBinding) binding); + return withConnection(binding, connection -> { + getCommands(localEncryptedFields).forEach(command -> { + try { + executeCommand(binding, namespace.getDatabaseName(), command.get(), + connection, writeConcernErrorTransformer(binding.getOperationContext().getTimeoutContext())); + } catch (MongoCommandException e) { + rethrowIfNotNamespaceError(e); + } + }); + return null; + }); + } + + @Override + public void executeAsync(final AsyncWriteBinding binding, final SingleResultCallback callback) { + SingleResultCallback errHandlingCallback = errorHandlingCallback(callback, LOGGER); + getEncryptedFields((AsyncReadWriteBinding) binding, (result, t) -> { + if (t != null) { + errHandlingCallback.onResult(null, t); + } else { + withAsyncConnection(binding, (connection, t1) -> { + if (t1 != null) { + errHandlingCallback.onResult(null, t1); + } else { + new ProcessCommandsCallback(binding, connection, getCommands(result), releasingCallback(errHandlingCallback, + connection)) + .onResult(null, null); + } + }); + } + }); + } + + /** + * With Queryable Encryption dropping a collection can involve more logic and commands. + * + *

+ * A call to a driver helper Collection.drop(dropOptions) must check if the collection namespace (.) + * has an associated encryptedFields. Check for an associated encryptedFields from the following: + *

    + *
  1. The encryptedFields option passed in dropOptions.
  2. + *
  3. The value of AutoEncryptionOpts.encryptedFieldsMap[.].
  4. + *
  5. If AutoEncryptionOpts.encryptedFieldsMap is not null, run a listCollections command on the database databaseName with the + * filter { "name": "" }. Check the returned options for the encryptedFields option.
  6. + *
+ *

+ *

+ * If the collection namespace has an associated encryptedFields, then do the following operations. + * If any of the following operations error, the remaining operations are not attempted: + *

    + *
  1. Drop the collection collectionName. + *
  2. Drop the collection with name encryptedFields["escCollection"]. + * If encryptedFields["escCollection"] is not set, use the collection name enxcol_..esc.
  3. + *
  4. Drop the collection with name encryptedFields["ecocCollection"]. + * If encryptedFields["ecocCollection"] is not set, use the collection name enxcol_..ecoc.
  5. + *
+ *

+ * + * @return the list of commands to run to create the collection + */ + private List> getCommands(@Nullable final BsonDocument encryptedFields) { + if (encryptedFields == null || encryptedFields.isEmpty()) { + return singletonList(this::dropCollectionCommand); + } else { + return asList( + () -> getDropEncryptedFieldsCollectionCommand(encryptedFields, "esc"), + () -> getDropEncryptedFieldsCollectionCommand(encryptedFields, "ecoc"), + this::dropCollectionCommand + ); + } + } + + private BsonDocument getDropEncryptedFieldsCollectionCommand(final BsonDocument encryptedFields, final String collectionSuffix) { + BsonString defaultCollectionName = new BsonString(ENCRYPT_PREFIX + namespace.getCollectionName() + "." + collectionSuffix); + return new BsonDocument("drop", encryptedFields.getOrDefault(collectionSuffix + "Collection", defaultCollectionName)); + } + + private BsonDocument dropCollectionCommand() { + BsonDocument commandDocument = new BsonDocument("drop", new BsonString(namespace.getCollectionName())); + appendWriteConcernToCommand(writeConcern, commandDocument); + return commandDocument; + } + + @Nullable + private BsonDocument getEncryptedFields(final ReadWriteBinding readWriteBinding) { + if (encryptedFields == null && autoEncryptedFields) { + try (BatchCursor cursor = listCollectionOperation().execute(readWriteBinding)) { + return getCollectionEncryptedFields(encryptedFields, cursor.tryNext()); + } + } + return encryptedFields; + } + + private void getEncryptedFields( + final AsyncReadWriteBinding asyncReadWriteBinding, + final SingleResultCallback callback) { + if (encryptedFields == null && autoEncryptedFields) { + listCollectionOperation().executeAsync(asyncReadWriteBinding, (cursor, t) -> { + if (t != null) { + callback.onResult(null, t); + } else { + cursor.next((bsonValues, t1) -> { + if (t1 != null) { + callback.onResult(null, t1); + } else { + callback.onResult(getCollectionEncryptedFields(encryptedFields, bsonValues), null); + } + }); + } + }); + } else { + callback.onResult(encryptedFields, null); + } + } + private BsonDocument getCollectionEncryptedFields(final BsonDocument defaultEncryptedFields, + @Nullable final List bsonValues) { + if (bsonValues != null && bsonValues.size() > 0) { + return bsonValues.get(0).asDocument() + .getDocument("options", new BsonDocument()) + .getDocument("encryptedFields", new BsonDocument()); + } + return defaultEncryptedFields; + } + + private ListCollectionsOperation listCollectionOperation() { + return new ListCollectionsOperation<>(namespace.getDatabaseName(), BSON_VALUE_CODEC) + .filter(new BsonDocument("name", new BsonString(namespace.getCollectionName()))) + .batchSize(1); + } + + /** + * A SingleResultCallback that can be repeatedly called via onResult until all commands have been run. + */ + class ProcessCommandsCallback implements SingleResultCallback { + private final AsyncWriteBinding binding; + private final AsyncConnection connection; + private final SingleResultCallback finalCallback; + private final Deque> commands; + + ProcessCommandsCallback( + final AsyncWriteBinding binding, final AsyncConnection connection, + final List> commands, + final SingleResultCallback finalCallback) { + this.binding = binding; + this.connection = connection; + this.finalCallback = finalCallback; + this.commands = new ArrayDeque<>(commands); + } + + @Override + public void onResult(@Nullable final Void result, @Nullable final Throwable t) { + if (t != null && !isNamespaceError(t)) { + finalCallback.onResult(null, t); + return; + } + Supplier nextCommandFunction = commands.poll(); + if (nextCommandFunction == null) { + finalCallback.onResult(null, null); + } else { + try { + executeCommandAsync(binding, namespace.getDatabaseName(), nextCommandFunction.get(), + connection, writeConcernErrorTransformerAsync(binding.getOperationContext().getTimeoutContext()), this); + } catch (MongoOperationTimeoutException operationTimeoutException) { + finalCallback.onResult(null, operationTimeoutException); + } + } + } + } + +} diff --git a/driver-core/src/main/com/mongodb/internal/operation/DropDatabaseOperation.java b/driver-core/src/main/com/mongodb/internal/operation/DropDatabaseOperation.java new file mode 100644 index 00000000000..d619176e8a3 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/operation/DropDatabaseOperation.java @@ -0,0 +1,92 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.operation; + +import com.mongodb.WriteConcern; +import com.mongodb.internal.async.SingleResultCallback; +import com.mongodb.internal.binding.AsyncWriteBinding; +import com.mongodb.internal.binding.WriteBinding; +import com.mongodb.lang.Nullable; +import org.bson.BsonDocument; +import org.bson.BsonInt32; + +import static com.mongodb.assertions.Assertions.notNull; +import static com.mongodb.internal.async.ErrorHandlingResultCallback.errorHandlingCallback; +import static com.mongodb.internal.operation.AsyncOperationHelper.executeCommandAsync; +import static com.mongodb.internal.operation.AsyncOperationHelper.releasingCallback; +import static com.mongodb.internal.operation.AsyncOperationHelper.withAsyncConnection; +import static com.mongodb.internal.operation.AsyncOperationHelper.writeConcernErrorTransformerAsync; +import static com.mongodb.internal.operation.OperationHelper.LOGGER; +import static com.mongodb.internal.operation.SyncOperationHelper.executeCommand; +import static com.mongodb.internal.operation.SyncOperationHelper.withConnection; +import static com.mongodb.internal.operation.SyncOperationHelper.writeConcernErrorTransformer; +import static com.mongodb.internal.operation.WriteConcernHelper.appendWriteConcernToCommand; + +/** + * Operation to drop a database in MongoDB. The {@code execute} method throws MongoCommandFailureException if something goes wrong, but + * it will not throw an Exception if the collection does not exist before trying to drop it. + * + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public class DropDatabaseOperation implements WriteOperation { + private final String databaseName; + private final WriteConcern writeConcern; + + public DropDatabaseOperation(final String databaseName, @Nullable final WriteConcern writeConcern) { + this.databaseName = notNull("databaseName", databaseName); + this.writeConcern = writeConcern; + } + + public WriteConcern getWriteConcern() { + return writeConcern; + } + + @Override + public String getCommandName() { + return "dropDatabase"; + } + + @Override + public Void execute(final WriteBinding binding) { + return withConnection(binding, connection -> { + executeCommand(binding, databaseName, getCommand(), connection, writeConcernErrorTransformer(binding.getOperationContext() + .getTimeoutContext())); + return null; + }); + } + + @Override + public void executeAsync(final AsyncWriteBinding binding, final SingleResultCallback callback) { + withAsyncConnection(binding, (connection, t) -> { + SingleResultCallback errHandlingCallback = errorHandlingCallback(callback, LOGGER); + if (t != null) { + errHandlingCallback.onResult(null, t); + } else { + executeCommandAsync(binding, databaseName, getCommand(), connection, + writeConcernErrorTransformerAsync(binding.getOperationContext().getTimeoutContext()), + releasingCallback(errHandlingCallback, connection)); + + } + }); + } + + private BsonDocument getCommand() { + BsonDocument commandDocument = new BsonDocument("dropDatabase", new BsonInt32(1)); + appendWriteConcernToCommand(writeConcern, commandDocument); + return commandDocument; + } +} diff --git a/driver-core/src/main/com/mongodb/internal/operation/DropIndexOperation.java b/driver-core/src/main/com/mongodb/internal/operation/DropIndexOperation.java new file mode 100644 index 00000000000..3671a90aa56 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/operation/DropIndexOperation.java @@ -0,0 +1,109 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.operation; + +import com.mongodb.MongoCommandException; +import com.mongodb.MongoNamespace; +import com.mongodb.WriteConcern; +import com.mongodb.internal.async.SingleResultCallback; +import com.mongodb.internal.binding.AsyncWriteBinding; +import com.mongodb.internal.binding.WriteBinding; +import com.mongodb.lang.Nullable; +import org.bson.BsonDocument; +import org.bson.BsonString; + +import static com.mongodb.assertions.Assertions.notNull; +import static com.mongodb.internal.operation.AsyncOperationHelper.executeCommandAsync; +import static com.mongodb.internal.operation.AsyncOperationHelper.writeConcernErrorTransformerAsync; +import static com.mongodb.internal.operation.CommandOperationHelper.isNamespaceError; +import static com.mongodb.internal.operation.CommandOperationHelper.rethrowIfNotNamespaceError; +import static com.mongodb.internal.operation.SyncOperationHelper.executeCommand; +import static com.mongodb.internal.operation.SyncOperationHelper.writeConcernErrorTransformer; +import static com.mongodb.internal.operation.WriteConcernHelper.appendWriteConcernToCommand; + +/** + * An operation that drops an index. + * + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public class DropIndexOperation implements WriteOperation { + private static final String COMMAND_NAME = "dropIndexes"; + private final MongoNamespace namespace; + private final String indexName; + private final BsonDocument indexKeys; + private final WriteConcern writeConcern; + + public DropIndexOperation(final MongoNamespace namespace, final String indexName, @Nullable final WriteConcern writeConcern) { + this.namespace = notNull("namespace", namespace); + this.indexName = notNull("indexName", indexName); + this.indexKeys = null; + this.writeConcern = writeConcern; + } + + public DropIndexOperation(final MongoNamespace namespace, final BsonDocument indexKeys, @Nullable final WriteConcern writeConcern) { + this.namespace = notNull("namespace", namespace); + this.indexKeys = notNull("indexKeys", indexKeys); + this.indexName = null; + this.writeConcern = writeConcern; + } + + public WriteConcern getWriteConcern() { + return writeConcern; + } + + @Override + public String getCommandName() { + return COMMAND_NAME; + } + + @Override + public Void execute(final WriteBinding binding) { + try { + executeCommand(binding, namespace.getDatabaseName(), getCommandCreator(), writeConcernErrorTransformer(binding + .getOperationContext() + .getTimeoutContext())); + } catch (MongoCommandException e) { + rethrowIfNotNamespaceError(e); + } + return null; + } + + @Override + public void executeAsync(final AsyncWriteBinding binding, final SingleResultCallback callback) { + executeCommandAsync(binding, namespace.getDatabaseName(), getCommandCreator(), + writeConcernErrorTransformerAsync(binding.getOperationContext().getTimeoutContext()), (result, t) -> { + if (t != null && !isNamespaceError(t)) { + callback.onResult(null, t); + } else { + callback.onResult(null, null); + } + }); + } + + private CommandOperationHelper.CommandCreator getCommandCreator() { + return (operationContext, serverDescription, connectionDescription) -> { + BsonDocument command = new BsonDocument(getCommandName(), new BsonString(namespace.getCollectionName())); + if (indexName != null) { + command.put("index", new BsonString(indexName)); + } else { + command.put("index", indexKeys); + } + appendWriteConcernToCommand(writeConcern, command); + return command; + }; + } +} diff --git a/driver-core/src/main/com/mongodb/internal/operation/DropSearchIndexOperation.java b/driver-core/src/main/com/mongodb/internal/operation/DropSearchIndexOperation.java new file mode 100644 index 00000000000..a440dbd0e7e --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/operation/DropSearchIndexOperation.java @@ -0,0 +1,57 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.operation; + +import com.mongodb.MongoNamespace; +import com.mongodb.lang.Nullable; +import org.bson.BsonDocument; +import org.bson.BsonString; + +import static com.mongodb.internal.operation.CommandOperationHelper.isNamespaceError; + +/** + * An operation that drops an Alas Search index. + * + *

This class is not part of the public API and may be removed or changed at any time

+ */ +final class DropSearchIndexOperation extends AbstractWriteSearchIndexOperation { + private static final String COMMAND_NAME = "dropSearchIndex"; + private final String indexName; + + DropSearchIndexOperation(final MongoNamespace namespace, final String indexName) { + super(namespace); + this.indexName = indexName; + } + + @Override + public String getCommandName() { + return COMMAND_NAME; + } + + @Override + void swallowOrThrow(@Nullable final E mongoExecutionException) throws E { + if (mongoExecutionException != null && !isNamespaceError(mongoExecutionException)) { + throw mongoExecutionException; + } + } + + @Override + BsonDocument buildCommand() { + return new BsonDocument(getCommandName(), new BsonString(getNamespace().getCollectionName())) + .append("name", new BsonString(indexName)); + } +} diff --git a/driver-core/src/main/com/mongodb/internal/operation/EstimatedDocumentCountOperation.java b/driver-core/src/main/com/mongodb/internal/operation/EstimatedDocumentCountOperation.java new file mode 100644 index 00000000000..427cd40dc40 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/operation/EstimatedDocumentCountOperation.java @@ -0,0 +1,125 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.operation; + +import com.mongodb.MongoCommandException; +import com.mongodb.MongoNamespace; +import com.mongodb.connection.ConnectionDescription; +import com.mongodb.internal.async.SingleResultCallback; +import com.mongodb.internal.binding.AsyncReadBinding; +import com.mongodb.internal.binding.ReadBinding; +import com.mongodb.lang.Nullable; +import org.bson.BsonDocument; +import org.bson.BsonString; +import org.bson.BsonValue; +import org.bson.codecs.BsonDocumentCodec; +import org.bson.codecs.Decoder; + +import static com.mongodb.assertions.Assertions.assertNotNull; +import static com.mongodb.assertions.Assertions.notNull; +import static com.mongodb.internal.operation.AsyncOperationHelper.CommandReadTransformerAsync; +import static com.mongodb.internal.operation.AsyncOperationHelper.executeRetryableReadAsync; +import static com.mongodb.internal.operation.CommandOperationHelper.CommandCreator; +import static com.mongodb.internal.operation.CommandOperationHelper.isNamespaceError; +import static com.mongodb.internal.operation.CommandOperationHelper.rethrowIfNotNamespaceError; +import static com.mongodb.internal.operation.OperationReadConcernHelper.appendReadConcernToCommand; +import static com.mongodb.internal.operation.SyncOperationHelper.CommandReadTransformer; +import static com.mongodb.internal.operation.SyncOperationHelper.executeRetryableRead; +import static java.util.Collections.singletonList; + +/** + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public class EstimatedDocumentCountOperation implements ReadOperationSimple { + private static final String COMMAND_NAME = "count"; + private static final Decoder DECODER = new BsonDocumentCodec(); + private final MongoNamespace namespace; + private boolean retryReads; + private BsonValue comment; + + public EstimatedDocumentCountOperation(final MongoNamespace namespace) { + this.namespace = notNull("namespace", namespace); + } + + public EstimatedDocumentCountOperation retryReads(final boolean retryReads) { + this.retryReads = retryReads; + return this; + } + + @Nullable + public BsonValue getComment() { + return comment; + } + + public EstimatedDocumentCountOperation comment(@Nullable final BsonValue comment) { + this.comment = comment; + return this; + } + + @Override + public String getCommandName() { + return COMMAND_NAME; + } + + @Override + public Long execute(final ReadBinding binding) { + try { + return executeRetryableRead(binding, namespace.getDatabaseName(), + getCommandCreator(), CommandResultDocumentCodec.create(DECODER, singletonList("firstBatch")), + transformer(), retryReads); + } catch (MongoCommandException e) { + return assertNotNull(rethrowIfNotNamespaceError(e, 0L)); + } + } + + @Override + public void executeAsync(final AsyncReadBinding binding, final SingleResultCallback callback) { + executeRetryableReadAsync(binding, namespace.getDatabaseName(), + getCommandCreator(), CommandResultDocumentCodec.create(DECODER, singletonList("firstBatch")), + asyncTransformer(), retryReads, + (result, t) -> { + if (isNamespaceError(t)) { + callback.onResult(0L, null); + } else { + callback.onResult(result, t); + } + }); + } + + private CommandReadTransformer transformer() { + return (result, source, connection) -> transformResult(result, connection.getDescription()); + } + + private CommandReadTransformerAsync asyncTransformer() { + return (result, source, connection) -> transformResult(result, connection.getDescription()); + } + + private long transformResult(final BsonDocument result, final ConnectionDescription connectionDescription) { + return (result.getNumber("n")).longValue(); + } + + private CommandCreator getCommandCreator() { + return (operationContext, serverDescription, connectionDescription) -> { + BsonDocument document = new BsonDocument(getCommandName(), new BsonString(namespace.getCollectionName())); + appendReadConcernToCommand(operationContext.getSessionContext(), connectionDescription.getMaxWireVersion(), document); + if (comment != null) { + document.put("comment", comment); + } + return document; + }; + } +} diff --git a/driver-core/src/main/com/mongodb/internal/operation/ExplainHelper.java b/driver-core/src/main/com/mongodb/internal/operation/ExplainHelper.java new file mode 100644 index 00000000000..e1cd6a82b4b --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/operation/ExplainHelper.java @@ -0,0 +1,51 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.operation; + +import com.mongodb.ExplainVerbosity; +import com.mongodb.MongoInternalException; +import com.mongodb.lang.Nullable; +import org.bson.BsonDocument; +import org.bson.BsonString; + +final class ExplainHelper { + + static BsonDocument asExplainCommand(final BsonDocument command, @Nullable final ExplainVerbosity explainVerbosity) { + BsonDocument explainCommand = new BsonDocument("explain", command); + if (explainVerbosity != null) { + explainCommand.append("verbosity", getVerbosityAsString(explainVerbosity)); + } + return explainCommand; + } + + private static BsonString getVerbosityAsString(final ExplainVerbosity explainVerbosity) { + switch (explainVerbosity) { + case QUERY_PLANNER: + return new BsonString("queryPlanner"); + case EXECUTION_STATS: + return new BsonString("executionStats"); + case ALL_PLANS_EXECUTIONS: + return new BsonString("allPlansExecution"); + default: + throw new MongoInternalException(String.format("Unsupported explain verbosity %s", explainVerbosity)); + } + } + + private ExplainHelper() { + + } +} diff --git a/driver-core/src/main/com/mongodb/internal/operation/FindAndDeleteOperation.java b/driver-core/src/main/com/mongodb/internal/operation/FindAndDeleteOperation.java new file mode 100644 index 00000000000..db9d61b1dd4 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/operation/FindAndDeleteOperation.java @@ -0,0 +1,100 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.operation; + +import com.mongodb.MongoNamespace; +import com.mongodb.WriteConcern; +import com.mongodb.client.model.Collation; +import com.mongodb.connection.ConnectionDescription; +import com.mongodb.internal.validator.NoOpFieldNameValidator; +import com.mongodb.lang.Nullable; +import org.bson.BsonBoolean; +import org.bson.BsonDocument; +import org.bson.BsonValue; +import org.bson.FieldNameValidator; +import org.bson.codecs.Decoder; + +/** + * An operation that atomically finds and deletes a single document. + * + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public class FindAndDeleteOperation extends BaseFindAndModifyOperation { + + public FindAndDeleteOperation(final MongoNamespace namespace, final WriteConcern writeConcern, final boolean retryWrites, + final Decoder decoder) { + super(namespace, writeConcern, retryWrites, decoder); + } + + @Override + public FindAndDeleteOperation filter(@Nullable final BsonDocument filter) { + super.filter(filter); + return this; + } + + @Override + public FindAndDeleteOperation projection(@Nullable final BsonDocument projection) { + super.projection(projection); + return this; + } + + @Override + public FindAndDeleteOperation sort(@Nullable final BsonDocument sort) { + super.sort(sort); + return this; + } + + @Override + public FindAndDeleteOperation hint(@Nullable final BsonDocument hint) { + super.hint(hint); + return this; + } + + @Override + public FindAndDeleteOperation hintString(@Nullable final String hint) { + super.hintString(hint); + return this; + } + + @Override + public FindAndDeleteOperation collation(@Nullable final Collation collation) { + super.collation(collation); + return this; + } + + @Override + public FindAndDeleteOperation comment(@Nullable final BsonValue comment) { + super.comment(comment); + return this; + } + + @Override + public FindAndDeleteOperation let(@Nullable final BsonDocument variables) { + super.let(variables); + return this; + } + + @Override + protected FieldNameValidator getFieldNameValidator() { + return NoOpFieldNameValidator.INSTANCE; + } + + @Override + protected void specializeCommand(final BsonDocument commandDocument, final ConnectionDescription connectionDescription) { + commandDocument.put("remove", BsonBoolean.TRUE); + } +} diff --git a/driver-core/src/main/com/mongodb/internal/operation/FindAndModifyHelper.java b/driver-core/src/main/com/mongodb/internal/operation/FindAndModifyHelper.java new file mode 100644 index 00000000000..aa7f774d0b7 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/operation/FindAndModifyHelper.java @@ -0,0 +1,69 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.operation; + +import com.mongodb.MongoWriteConcernException; +import com.mongodb.ServerAddress; +import com.mongodb.WriteConcernResult; +import com.mongodb.lang.Nullable; +import org.bson.BsonArray; +import org.bson.BsonBoolean; +import org.bson.BsonDocument; +import org.bson.BsonInt32; + +import java.util.stream.Collectors; + +import static com.mongodb.internal.operation.AsyncOperationHelper.CommandWriteTransformerAsync; +import static com.mongodb.internal.operation.SyncOperationHelper.CommandWriteTransformer; +import static com.mongodb.internal.operation.WriteConcernHelper.createWriteConcernError; +import static com.mongodb.internal.operation.WriteConcernHelper.hasWriteConcernError; + +final class FindAndModifyHelper { + + static CommandWriteTransformer transformer() { + return (result, connection) -> transformDocument(result, connection.getDescription().getServerAddress()); + } + + static CommandWriteTransformerAsync asyncTransformer() { + return (result, connection) -> transformDocument(result, connection.getDescription().getServerAddress()); + } + + @Nullable + private static T transformDocument(final BsonDocument result, final ServerAddress serverAddress) { + if (hasWriteConcernError(result)) { + throw new MongoWriteConcernException( + createWriteConcernError(result.getDocument("writeConcernError")), + createWriteConcernResult(result.getDocument("lastErrorObject", new BsonDocument())), serverAddress, + result.getArray("errorLabels", new BsonArray()).stream().map(i -> i.asString().getValue()).collect(Collectors.toSet())); + } + + if (!result.isDocument("value")) { + return null; + } + return BsonDocumentWrapperHelper.toDocument(result.getDocument("value", null)); + } + + private static WriteConcernResult createWriteConcernResult(final BsonDocument result) { + BsonBoolean updatedExisting = result.getBoolean("updatedExisting", BsonBoolean.FALSE); + + return WriteConcernResult.acknowledged(result.getNumber("n", new BsonInt32(0)).intValue(), + updatedExisting.getValue(), result.get("upserted")); + } + + private FindAndModifyHelper() { + } +} diff --git a/driver-core/src/main/com/mongodb/internal/operation/FindAndReplaceOperation.java b/driver-core/src/main/com/mongodb/internal/operation/FindAndReplaceOperation.java new file mode 100644 index 00000000000..7073260a4c7 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/operation/FindAndReplaceOperation.java @@ -0,0 +1,149 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.operation; + +import com.mongodb.MongoNamespace; +import com.mongodb.WriteConcern; +import com.mongodb.client.model.Collation; +import com.mongodb.connection.ConnectionDescription; +import com.mongodb.internal.validator.MappedFieldNameValidator; +import com.mongodb.internal.validator.NoOpFieldNameValidator; +import com.mongodb.internal.validator.ReplacingDocumentFieldNameValidator; +import com.mongodb.lang.Nullable; +import org.bson.BsonBoolean; +import org.bson.BsonDocument; +import org.bson.BsonValue; +import org.bson.FieldNameValidator; +import org.bson.codecs.Decoder; + +import static com.mongodb.assertions.Assertions.notNull; +import static com.mongodb.internal.operation.DocumentHelper.putIfTrue; +import static java.util.Collections.singletonMap; + +/** + * An operation that atomically finds and replaces a single document. + * + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public class FindAndReplaceOperation extends BaseFindAndModifyOperation { + private final BsonDocument replacement; + private boolean returnOriginal = true; + private boolean upsert; + private Boolean bypassDocumentValidation; + + public FindAndReplaceOperation(final MongoNamespace namespace, final WriteConcern writeConcern, final boolean retryWrites, + final Decoder decoder, final BsonDocument replacement) { + super(namespace, writeConcern, retryWrites, decoder); + this.replacement = notNull("replacement", replacement); + } + + public BsonDocument getReplacement() { + return replacement; + } + + public boolean isReturnOriginal() { + return returnOriginal; + } + + public FindAndReplaceOperation returnOriginal(final boolean returnOriginal) { + this.returnOriginal = returnOriginal; + return this; + } + + public boolean isUpsert() { + return upsert; + } + + public FindAndReplaceOperation upsert(final boolean upsert) { + this.upsert = upsert; + return this; + } + + public Boolean getBypassDocumentValidation() { + return bypassDocumentValidation; + } + + public FindAndReplaceOperation bypassDocumentValidation(@Nullable final Boolean bypassDocumentValidation) { + this.bypassDocumentValidation = bypassDocumentValidation; + return this; + } + + @Override + public FindAndReplaceOperation filter(@Nullable final BsonDocument filter) { + super.filter(filter); + return this; + } + + @Override + public FindAndReplaceOperation projection(@Nullable final BsonDocument projection) { + super.projection(projection); + return this; + } + + @Override + public FindAndReplaceOperation sort(@Nullable final BsonDocument sort) { + super.sort(sort); + return this; + } + + @Override + public FindAndReplaceOperation hint(@Nullable final BsonDocument hint) { + super.hint(hint); + return this; + } + + @Override + public FindAndReplaceOperation hintString(@Nullable final String hint) { + super.hintString(hint); + return this; + } + + @Override + public FindAndReplaceOperation collation(@Nullable final Collation collation) { + super.collation(collation); + return this; + } + + @Override + public FindAndReplaceOperation comment(@Nullable final BsonValue comment) { + super.comment(comment); + return this; + } + + @Override + public FindAndReplaceOperation let(@Nullable final BsonDocument variables) { + super.let(variables); + return this; + } + + @Override + protected FieldNameValidator getFieldNameValidator() { + return new MappedFieldNameValidator( + NoOpFieldNameValidator.INSTANCE, + singletonMap("update", ReplacingDocumentFieldNameValidator.INSTANCE)); + } + + @Override + protected void specializeCommand(final BsonDocument commandDocument, final ConnectionDescription connectionDescription) { + commandDocument.put("new", new BsonBoolean(!isReturnOriginal())); + putIfTrue(commandDocument, "upsert", isUpsert()); + commandDocument.put("update", getReplacement()); + if (bypassDocumentValidation != null) { + commandDocument.put("bypassDocumentValidation", BsonBoolean.valueOf(bypassDocumentValidation)); + } + } +} diff --git a/driver-core/src/main/com/mongodb/internal/operation/FindAndUpdateOperation.java b/driver-core/src/main/com/mongodb/internal/operation/FindAndUpdateOperation.java new file mode 100644 index 00000000000..e83deba30f3 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/operation/FindAndUpdateOperation.java @@ -0,0 +1,184 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.operation; + +import com.mongodb.MongoNamespace; +import com.mongodb.WriteConcern; +import com.mongodb.client.model.Collation; +import com.mongodb.connection.ConnectionDescription; +import com.mongodb.internal.validator.MappedFieldNameValidator; +import com.mongodb.internal.validator.NoOpFieldNameValidator; +import com.mongodb.internal.validator.UpdateFieldNameValidator; +import com.mongodb.lang.Nullable; +import org.bson.BsonArray; +import org.bson.BsonBoolean; +import org.bson.BsonDocument; +import org.bson.BsonValue; +import org.bson.FieldNameValidator; +import org.bson.codecs.Decoder; + +import java.util.List; + +import static com.mongodb.assertions.Assertions.notNull; +import static com.mongodb.internal.operation.DocumentHelper.putIfNotNull; +import static com.mongodb.internal.operation.DocumentHelper.putIfTrue; +import static java.util.Collections.singletonMap; + +/** + * An operation that atomically finds and updates a single document. + * + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public class FindAndUpdateOperation extends BaseFindAndModifyOperation { + private final BsonDocument update; + private final List updatePipeline; + private boolean returnOriginal = true; + private boolean upsert; + private Boolean bypassDocumentValidation; + private List arrayFilters; + + public FindAndUpdateOperation(final MongoNamespace namespace, + final WriteConcern writeConcern, final boolean retryWrites, final Decoder decoder, final BsonDocument update) { + super(namespace, writeConcern, retryWrites, decoder); + this.update = notNull("update", update); + this.updatePipeline = null; + } + + public FindAndUpdateOperation(final MongoNamespace namespace, final WriteConcern writeConcern, final boolean retryWrites, + final Decoder decoder, final List update) { + super(namespace, writeConcern, retryWrites, decoder); + this.updatePipeline = update; + this.update = null; + } + + @Nullable + public BsonDocument getUpdate() { + return update; + } + + @Nullable + public List getUpdatePipeline() { + return updatePipeline; + } + + public boolean isReturnOriginal() { + return returnOriginal; + } + + public FindAndUpdateOperation returnOriginal(final boolean returnOriginal) { + this.returnOriginal = returnOriginal; + return this; + } + + public boolean isUpsert() { + return upsert; + } + + public FindAndUpdateOperation upsert(final boolean upsert) { + this.upsert = upsert; + return this; + } + + public Boolean getBypassDocumentValidation() { + return bypassDocumentValidation; + } + + public FindAndUpdateOperation bypassDocumentValidation(@Nullable final Boolean bypassDocumentValidation) { + this.bypassDocumentValidation = bypassDocumentValidation; + return this; + } + + public FindAndUpdateOperation arrayFilters(@Nullable final List arrayFilters) { + this.arrayFilters = arrayFilters; + return this; + } + + public List getArrayFilters() { + return arrayFilters; + } + + @Override + public FindAndUpdateOperation filter(@Nullable final BsonDocument filter) { + super.filter(filter); + return this; + } + + @Override + public FindAndUpdateOperation projection(@Nullable final BsonDocument projection) { + super.projection(projection); + return this; + } + + @Override + public FindAndUpdateOperation sort(@Nullable final BsonDocument sort) { + super.sort(sort); + return this; + } + + @Override + public FindAndUpdateOperation hint(@Nullable final BsonDocument hint) { + super.hint(hint); + return this; + } + + @Override + public FindAndUpdateOperation hintString(@Nullable final String hint) { + super.hintString(hint); + return this; + } + + @Override + public FindAndUpdateOperation collation(@Nullable final Collation collation) { + super.collation(collation); + return this; + } + + @Override + public FindAndUpdateOperation comment(@Nullable final BsonValue comment) { + super.comment(comment); + return this; + } + + @Override + public FindAndUpdateOperation let(@Nullable final BsonDocument variables) { + super.let(variables); + return this; + } + + @Override + protected FieldNameValidator getFieldNameValidator() { + return new MappedFieldNameValidator(NoOpFieldNameValidator.INSTANCE, singletonMap("update", new UpdateFieldNameValidator())); + } + + @Override + protected void specializeCommand(final BsonDocument commandDocument, final ConnectionDescription connectionDescription) { + commandDocument.put("new", new BsonBoolean(!isReturnOriginal())); + putIfTrue(commandDocument, "upsert", isUpsert()); + + if (getUpdatePipeline() != null) { + commandDocument.put("update", new BsonArray(getUpdatePipeline())); + } else { + putIfNotNull(commandDocument, "update", getUpdate()); + } + if (bypassDocumentValidation != null) { + commandDocument.put("bypassDocumentValidation", BsonBoolean.valueOf(bypassDocumentValidation)); + } + if (arrayFilters != null) { + commandDocument.put("arrayFilters", new BsonArray(arrayFilters)); + } + } +} diff --git a/driver-core/src/main/com/mongodb/internal/operation/FindOperation.java b/driver-core/src/main/com/mongodb/internal/operation/FindOperation.java new file mode 100644 index 00000000000..4e1de40d150 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/operation/FindOperation.java @@ -0,0 +1,495 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.operation; + +import com.mongodb.CursorType; +import com.mongodb.ExplainVerbosity; +import com.mongodb.MongoCommandException; +import com.mongodb.MongoNamespace; +import com.mongodb.MongoQueryException; +import com.mongodb.client.cursor.TimeoutMode; +import com.mongodb.client.model.Collation; +import com.mongodb.internal.async.AsyncBatchCursor; +import com.mongodb.internal.async.SingleResultCallback; +import com.mongodb.internal.async.function.AsyncCallbackSupplier; +import com.mongodb.internal.async.function.RetryState; +import com.mongodb.internal.binding.AsyncReadBinding; +import com.mongodb.internal.binding.ReadBinding; +import com.mongodb.internal.connection.OperationContext; +import com.mongodb.lang.Nullable; +import org.bson.BsonBoolean; +import org.bson.BsonDocument; +import org.bson.BsonInt32; +import org.bson.BsonString; +import org.bson.BsonValue; +import org.bson.codecs.Decoder; + +import java.util.function.Supplier; + +import static com.mongodb.assertions.Assertions.notNull; +import static com.mongodb.internal.async.ErrorHandlingResultCallback.errorHandlingCallback; +import static com.mongodb.internal.connection.CommandHelper.applyMaxTimeMS; +import static com.mongodb.internal.operation.AsyncOperationHelper.CommandReadTransformerAsync; +import static com.mongodb.internal.operation.AsyncOperationHelper.createReadCommandAndExecuteAsync; +import static com.mongodb.internal.operation.AsyncOperationHelper.decorateReadWithRetriesAsync; +import static com.mongodb.internal.operation.AsyncOperationHelper.withAsyncSourceAndConnection; +import static com.mongodb.internal.operation.CommandOperationHelper.CommandCreator; +import static com.mongodb.internal.operation.CommandOperationHelper.initialRetryState; +import static com.mongodb.internal.operation.DocumentHelper.putIfNotNull; +import static com.mongodb.internal.operation.DocumentHelper.putIfNotNullOrEmpty; +import static com.mongodb.internal.operation.ExplainHelper.asExplainCommand; +import static com.mongodb.internal.operation.OperationHelper.LOGGER; +import static com.mongodb.internal.operation.OperationHelper.canRetryRead; +import static com.mongodb.internal.operation.OperationHelper.setNonTailableCursorMaxTimeSupplier; +import static com.mongodb.internal.operation.OperationReadConcernHelper.appendReadConcernToCommand; +import static com.mongodb.internal.operation.ServerVersionHelper.UNKNOWN_WIRE_VERSION; +import static com.mongodb.internal.operation.SyncOperationHelper.CommandReadTransformer; +import static com.mongodb.internal.operation.SyncOperationHelper.createReadCommandAndExecute; +import static com.mongodb.internal.operation.SyncOperationHelper.decorateReadWithRetries; +import static com.mongodb.internal.operation.SyncOperationHelper.withSourceAndConnection; + +/** + * An operation that queries a collection using the provided criteria. + * + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public class FindOperation implements ReadOperationExplainable { + private static final String COMMAND_NAME = "find"; + private static final String FIRST_BATCH = "firstBatch"; + + private final MongoNamespace namespace; + private final Decoder decoder; + private boolean retryReads; + private BsonDocument filter; + private int batchSize; + private int limit; + private BsonDocument projection; + private int skip; + private BsonDocument sort; + private CursorType cursorType = CursorType.NonTailable; + private boolean noCursorTimeout; + private boolean partial; + private Collation collation; + private BsonValue comment; + private BsonValue hint; + private BsonDocument variables; + private BsonDocument max; + private BsonDocument min; + private boolean returnKey; + private boolean showRecordId; + private Boolean allowDiskUse; + private TimeoutMode timeoutMode; + + public FindOperation(final MongoNamespace namespace, final Decoder decoder) { + this.namespace = notNull("namespace", namespace); + this.decoder = notNull("decoder", decoder); + } + + public MongoNamespace getNamespace() { + return namespace; + } + + public Decoder getDecoder() { + return decoder; + } + + public BsonDocument getFilter() { + return filter; + } + + public FindOperation filter(@Nullable final BsonDocument filter) { + this.filter = filter; + return this; + } + + public int getBatchSize() { + return batchSize; + } + + public FindOperation batchSize(final int batchSize) { + this.batchSize = batchSize; + return this; + } + + public int getLimit() { + return limit; + } + + public FindOperation limit(final int limit) { + this.limit = limit; + return this; + } + + public BsonDocument getProjection() { + return projection; + } + + public FindOperation projection(@Nullable final BsonDocument projection) { + this.projection = projection; + return this; + } + + public int getSkip() { + return skip; + } + + public FindOperation skip(final int skip) { + this.skip = skip; + return this; + } + + public BsonDocument getSort() { + return sort; + } + + public FindOperation sort(@Nullable final BsonDocument sort) { + this.sort = sort; + return this; + } + + public CursorType getCursorType() { + return cursorType; + } + + public FindOperation cursorType(final CursorType cursorType) { + this.cursorType = notNull("cursorType", cursorType); + return this; + } + + public FindOperation timeoutMode(@Nullable final TimeoutMode timeoutMode) { + if (timeoutMode != null) { + this.timeoutMode = timeoutMode; + } + return this; + } + + public boolean isNoCursorTimeout() { + return noCursorTimeout; + } + + public FindOperation noCursorTimeout(final boolean noCursorTimeout) { + this.noCursorTimeout = noCursorTimeout; + return this; + } + + public boolean isPartial() { + return partial; + } + + public FindOperation partial(final boolean partial) { + this.partial = partial; + return this; + } + + public Collation getCollation() { + return collation; + } + + public FindOperation collation(@Nullable final Collation collation) { + this.collation = collation; + return this; + } + + public BsonValue getComment() { + return comment; + } + + public FindOperation comment(@Nullable final BsonValue comment) { + this.comment = comment; + return this; + } + + public BsonValue getHint() { + return hint; + } + + public FindOperation hint(@Nullable final BsonValue hint) { + this.hint = hint; + return this; + } + + public BsonDocument getLet() { + return variables; + } + + public FindOperation let(@Nullable final BsonDocument variables) { + this.variables = variables; + return this; + } + + public BsonDocument getMax() { + return max; + } + + public FindOperation max(@Nullable final BsonDocument max) { + this.max = max; + return this; + } + + public BsonDocument getMin() { + return min; + } + + public FindOperation min(@Nullable final BsonDocument min) { + this.min = min; + return this; + } + + public boolean isReturnKey() { + return returnKey; + } + + public FindOperation returnKey(final boolean returnKey) { + this.returnKey = returnKey; + return this; + } + + public boolean isShowRecordId() { + return showRecordId; + } + + public FindOperation showRecordId(final boolean showRecordId) { + this.showRecordId = showRecordId; + return this; + } + + public FindOperation retryReads(final boolean retryReads) { + this.retryReads = retryReads; + return this; + } + + public boolean getRetryReads() { + return retryReads; + } + + public Boolean isAllowDiskUse() { + return allowDiskUse; + } + + public FindOperation allowDiskUse(@Nullable final Boolean allowDiskUse) { + this.allowDiskUse = allowDiskUse; + return this; + } + + @Override + public String getCommandName() { + return COMMAND_NAME; + } + + @Override + public BatchCursor execute(final ReadBinding binding) { + IllegalStateException invalidTimeoutModeException = invalidTimeoutModeException(); + if (invalidTimeoutModeException != null) { + throw invalidTimeoutModeException; + } + + RetryState retryState = initialRetryState(retryReads, binding.getOperationContext().getTimeoutContext()); + Supplier> read = decorateReadWithRetries(retryState, binding.getOperationContext(), () -> + withSourceAndConnection(binding::getReadConnectionSource, false, (source, connection) -> { + retryState.breakAndThrowIfRetryAnd(() -> !canRetryRead(source.getServerDescription(), binding.getOperationContext())); + try { + return createReadCommandAndExecute(retryState, binding.getOperationContext(), source, namespace.getDatabaseName(), + getCommandCreator(), CommandResultDocumentCodec.create(decoder, FIRST_BATCH), + transformer(), connection); + } catch (MongoCommandException e) { + throw new MongoQueryException(e.getResponse(), e.getServerAddress()); + } + }) + ); + return read.get(); + } + + @Override + public void executeAsync(final AsyncReadBinding binding, final SingleResultCallback> callback) { + IllegalStateException invalidTimeoutModeException = invalidTimeoutModeException(); + if (invalidTimeoutModeException != null) { + callback.onResult(null, invalidTimeoutModeException); + return; + } + + RetryState retryState = initialRetryState(retryReads, binding.getOperationContext().getTimeoutContext()); + binding.retain(); + AsyncCallbackSupplier> asyncRead = decorateReadWithRetriesAsync( + retryState, binding.getOperationContext(), (AsyncCallbackSupplier>) funcCallback -> + withAsyncSourceAndConnection(binding::getReadConnectionSource, false, funcCallback, + (source, connection, releasingCallback) -> { + if (retryState.breakAndCompleteIfRetryAnd(() -> !canRetryRead(source.getServerDescription(), + binding.getOperationContext()), releasingCallback)) { + return; + } + SingleResultCallback> wrappedCallback = exceptionTransformingCallback(releasingCallback); + createReadCommandAndExecuteAsync(retryState, binding.getOperationContext(), source, + namespace.getDatabaseName(), getCommandCreator(), + CommandResultDocumentCodec.create(decoder, FIRST_BATCH), + asyncTransformer(), connection, wrappedCallback); + }) + ).whenComplete(binding::release); + asyncRead.get(errorHandlingCallback(callback, LOGGER)); + } + + private static SingleResultCallback exceptionTransformingCallback(final SingleResultCallback callback) { + return (result, t) -> { + if (t != null) { + if (t instanceof MongoCommandException) { + MongoCommandException commandException = (MongoCommandException) t; + callback.onResult(result, + new MongoQueryException(commandException.getResponse(), commandException.getServerAddress())); + } else { + callback.onResult(result, t); + } + } else { + callback.onResult(result, null); + } + }; + } + + @Override + public CommandReadOperation asExplainableOperation(@Nullable final ExplainVerbosity verbosity, final Decoder resultDecoder) { + return createExplainableOperation(verbosity, resultDecoder); + } + + CommandReadOperation createExplainableOperation(@Nullable final ExplainVerbosity verbosity, final Decoder resultDecoder) { + return new CommandReadOperation<>(getNamespace().getDatabaseName(), getCommandName(), + (operationContext, serverDescription, connectionDescription) -> { + BsonDocument command = getCommand(operationContext, UNKNOWN_WIRE_VERSION); + applyMaxTimeMS(operationContext.getTimeoutContext(), command); + return asExplainCommand(command, verbosity); + }, resultDecoder); + } + + private BsonDocument getCommand(final OperationContext operationContext, final int maxWireVersion) { + BsonDocument commandDocument = new BsonDocument(getCommandName(), new BsonString(namespace.getCollectionName())); + + appendReadConcernToCommand(operationContext.getSessionContext(), maxWireVersion, commandDocument); + + putIfNotNull(commandDocument, "filter", filter); + putIfNotNullOrEmpty(commandDocument, "sort", sort); + putIfNotNullOrEmpty(commandDocument, "projection", projection); + if (skip > 0) { + commandDocument.put("skip", new BsonInt32(skip)); + } + if (limit != 0) { + commandDocument.put("limit", new BsonInt32(Math.abs(limit))); + } + if (limit >= 0) { + if (batchSize < 0 && Math.abs(batchSize) < limit) { + commandDocument.put("limit", new BsonInt32(Math.abs(batchSize))); + } else if (batchSize != 0) { + int effectiveBatchSize = Math.abs(batchSize); + if (effectiveBatchSize == limit && effectiveBatchSize < Integer.MAX_VALUE) { + // avoid an open cursor on server side when batchSize and limit are equal + effectiveBatchSize++; + } + commandDocument.put("batchSize", new BsonInt32(effectiveBatchSize)); + } + } + if (limit < 0 || batchSize < 0) { + commandDocument.put("singleBatch", BsonBoolean.TRUE); + } + if (isTailableCursor()) { + commandDocument.put("tailable", BsonBoolean.TRUE); + if (isAwaitData()) { + commandDocument.put("awaitData", BsonBoolean.TRUE); + } else { + operationContext.getTimeoutContext().disableMaxTimeOverride(); + } + } else { + setNonTailableCursorMaxTimeSupplier(timeoutMode, operationContext); + } + + if (noCursorTimeout) { + commandDocument.put("noCursorTimeout", BsonBoolean.TRUE); + } + if (partial) { + commandDocument.put("allowPartialResults", BsonBoolean.TRUE); + } + if (collation != null) { + commandDocument.put("collation", collation.asDocument()); + } + if (comment != null) { + commandDocument.put("comment", comment); + } + if (hint != null) { + commandDocument.put("hint", hint); + } + if (variables != null) { + commandDocument.put("let", variables); + } + if (max != null) { + commandDocument.put("max", max); + } + if (min != null) { + commandDocument.put("min", min); + } + if (returnKey) { + commandDocument.put("returnKey", BsonBoolean.TRUE); + } + if (showRecordId) { + commandDocument.put("showRecordId", BsonBoolean.TRUE); + } + if (allowDiskUse != null) { + commandDocument.put("allowDiskUse", BsonBoolean.valueOf(allowDiskUse)); + } + return commandDocument; + } + + private CommandCreator getCommandCreator() { + return (operationContext, serverDescription, connectionDescription) -> + getCommand(operationContext, connectionDescription.getMaxWireVersion()); + } + + private boolean isTailableCursor() { + return cursorType.isTailable(); + } + + private boolean isAwaitData() { + return cursorType == CursorType.TailableAwait; + } + + private TimeoutMode getTimeoutMode() { + if (timeoutMode == null) { + return isTailableCursor() ? TimeoutMode.ITERATION : TimeoutMode.CURSOR_LIFETIME; + } + return timeoutMode; + } + + private CommandReadTransformer> transformer() { + return (result, source, connection) -> + new CommandBatchCursor<>(getTimeoutMode(), result, batchSize, getMaxTimeForCursor(source.getOperationContext()), decoder, + comment, source, connection); + } + + private CommandReadTransformerAsync> asyncTransformer() { + return (result, source, connection) -> + new AsyncCommandBatchCursor<>(getTimeoutMode(), result, batchSize, getMaxTimeForCursor(source.getOperationContext()), decoder, + comment, source, connection); + } + + private long getMaxTimeForCursor(final OperationContext operationContext) { + return cursorType == CursorType.TailableAwait ? operationContext.getTimeoutContext().getMaxAwaitTimeMS() : 0; + } + + @Nullable + private IllegalStateException invalidTimeoutModeException() { + if (isTailableCursor()) { + if (timeoutMode == TimeoutMode.CURSOR_LIFETIME) { + return new IllegalStateException("Tailable cursors only support the ITERATION value for the timeoutMode option."); + } + } + return null; + } +} diff --git a/driver-core/src/main/com/mongodb/internal/operation/IndexHelper.java b/driver-core/src/main/com/mongodb/internal/operation/IndexHelper.java new file mode 100644 index 00000000000..39933b33f27 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/operation/IndexHelper.java @@ -0,0 +1,97 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.operation; + +import com.mongodb.client.model.IndexModel; +import com.mongodb.client.model.SearchIndexModel; +import org.bson.BsonDocument; +import org.bson.BsonNumber; +import org.bson.BsonString; +import org.bson.BsonValue; +import org.bson.codecs.configuration.CodecRegistry; + +import java.util.ArrayList; +import java.util.List; +import java.util.stream.Collectors; + +/** + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public final class IndexHelper { + + /** + * Get a list of index names for the given list of index models + * + * @param indexes the index models + * @param codecRegistry the codec registry to convert each Bson key to a BsonDocument + * @return the list of index names + */ + public static List getIndexNames(final List indexes, final CodecRegistry codecRegistry) { + List indexNames = new ArrayList<>(indexes.size()); + for (IndexModel index : indexes) { + String name = index.getOptions().getName(); + if (name != null) { + indexNames.add(name); + } else { + indexNames.add(IndexHelper.generateIndexName(index.getKeys().toBsonDocument(BsonDocument.class, codecRegistry))); + } + } + return indexNames; + } + + /** + * Get a list of Atlas Search index names for the given list of {@link SearchIndexModel}. + * + * @param indexes the search index models. + * @return the list of search index names. + */ + public static List getSearchIndexNames(final List indexes) { + return indexes.stream() + .map(IndexHelper::getSearchIndexName) + .collect(Collectors.toList()); + } + + private static String getSearchIndexName(final SearchIndexModel model) { + String name = model.getName(); + return name != null ? name : "default"; + } + + /** + * Convenience method to generate an index name from the set of fields it is over. + * + * @return a string representation of this index's fields + */ + public static String generateIndexName(final BsonDocument index) { + StringBuilder indexName = new StringBuilder(); + for (final String keyNames : index.keySet()) { + if (indexName.length() != 0) { + indexName.append('_'); + } + indexName.append(keyNames).append('_'); + BsonValue ascOrDescValue = index.get(keyNames); + if (ascOrDescValue instanceof BsonNumber) { + indexName.append(((BsonNumber) ascOrDescValue).intValue()); + } else if (ascOrDescValue instanceof BsonString) { + indexName.append(((BsonString) ascOrDescValue).getValue().replace(' ', '_')); + } + } + return indexName.toString(); + } + + private IndexHelper() { + } +} diff --git a/driver-core/src/main/com/mongodb/internal/operation/ListCollectionsOperation.java b/driver-core/src/main/com/mongodb/internal/operation/ListCollectionsOperation.java new file mode 100644 index 00000000000..8740986b23f --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/operation/ListCollectionsOperation.java @@ -0,0 +1,238 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.operation; + +import com.mongodb.MongoCommandException; +import com.mongodb.client.cursor.TimeoutMode; +import com.mongodb.internal.VisibleForTesting; +import com.mongodb.internal.async.AsyncBatchCursor; +import com.mongodb.internal.async.SingleResultCallback; +import com.mongodb.internal.async.function.AsyncCallbackSupplier; +import com.mongodb.internal.async.function.RetryState; +import com.mongodb.internal.binding.AsyncReadBinding; +import com.mongodb.internal.binding.ReadBinding; +import com.mongodb.lang.Nullable; +import org.bson.BsonDocument; +import org.bson.BsonInt32; +import org.bson.BsonValue; +import org.bson.codecs.Codec; +import org.bson.codecs.Decoder; + +import java.util.function.Supplier; + +import static com.mongodb.assertions.Assertions.notNull; +import static com.mongodb.internal.VisibleForTesting.AccessModifier.PRIVATE; +import static com.mongodb.internal.async.ErrorHandlingResultCallback.errorHandlingCallback; +import static com.mongodb.internal.operation.AsyncOperationHelper.CommandReadTransformerAsync; +import static com.mongodb.internal.operation.AsyncOperationHelper.createReadCommandAndExecuteAsync; +import static com.mongodb.internal.operation.AsyncOperationHelper.cursorDocumentToAsyncBatchCursor; +import static com.mongodb.internal.operation.AsyncOperationHelper.decorateReadWithRetriesAsync; +import static com.mongodb.internal.operation.AsyncOperationHelper.withAsyncSourceAndConnection; +import static com.mongodb.internal.operation.AsyncSingleBatchCursor.createEmptyAsyncSingleBatchCursor; +import static com.mongodb.internal.operation.CommandOperationHelper.CommandCreator; +import static com.mongodb.internal.operation.CommandOperationHelper.initialRetryState; +import static com.mongodb.internal.operation.CommandOperationHelper.isNamespaceError; +import static com.mongodb.internal.operation.CommandOperationHelper.rethrowIfNotNamespaceError; +import static com.mongodb.internal.operation.CursorHelper.getCursorDocumentFromBatchSize; +import static com.mongodb.internal.operation.DocumentHelper.putIfNotNull; +import static com.mongodb.internal.operation.DocumentHelper.putIfTrue; +import static com.mongodb.internal.operation.OperationHelper.LOGGER; +import static com.mongodb.internal.operation.OperationHelper.canRetryRead; +import static com.mongodb.internal.operation.OperationHelper.setNonTailableCursorMaxTimeSupplier; +import static com.mongodb.internal.operation.SingleBatchCursor.createEmptySingleBatchCursor; +import static com.mongodb.internal.operation.SyncOperationHelper.CommandReadTransformer; +import static com.mongodb.internal.operation.SyncOperationHelper.createReadCommandAndExecute; +import static com.mongodb.internal.operation.SyncOperationHelper.cursorDocumentToBatchCursor; +import static com.mongodb.internal.operation.SyncOperationHelper.decorateReadWithRetries; +import static com.mongodb.internal.operation.SyncOperationHelper.withSourceAndConnection; + +/** + * An operation that provides a cursor allowing iteration through the metadata of all the collections in a database. This operation + * ensures that the value of the {@code name} field of each returned document is the simple name of the collection rather than the full + * namespace. + *

+ * See {@code listCollections}

. + * + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public class ListCollectionsOperation implements ReadOperationCursor { + private static final String COMMAND_NAME = "listCollections"; + private final String databaseName; + private final Decoder decoder; + private boolean retryReads; + private BsonDocument filter; + private int batchSize; + private boolean nameOnly; + private boolean authorizedCollections; + private BsonValue comment; + private TimeoutMode timeoutMode = TimeoutMode.CURSOR_LIFETIME; + + public ListCollectionsOperation(final String databaseName, final Decoder decoder) { + this.databaseName = notNull("databaseName", databaseName); + this.decoder = notNull("decoder", decoder); + } + + public BsonDocument getFilter() { + return filter; + } + + public boolean isNameOnly() { + return nameOnly; + } + + public ListCollectionsOperation filter(@Nullable final BsonDocument filter) { + this.filter = filter; + return this; + } + + public ListCollectionsOperation nameOnly(final boolean nameOnly) { + this.nameOnly = nameOnly; + return this; + } + + public Integer getBatchSize() { + return batchSize; + } + + public ListCollectionsOperation batchSize(final int batchSize) { + this.batchSize = batchSize; + return this; + } + + public ListCollectionsOperation retryReads(final boolean retryReads) { + this.retryReads = retryReads; + return this; + } + + public boolean getRetryReads() { + return retryReads; + } + + @Nullable + public BsonValue getComment() { + return comment; + } + + public ListCollectionsOperation comment(@Nullable final BsonValue comment) { + this.comment = comment; + return this; + } + + public ListCollectionsOperation authorizedCollections(final boolean authorizedCollections) { + this.authorizedCollections = authorizedCollections; + return this; + } + + /** + * This method is used by tests via the reflection API. See + * {@code com.mongodb.reactivestreams.client.internal.TestHelper.assertOperationIsTheSameAs}. + */ + @VisibleForTesting(otherwise = PRIVATE) + public boolean isAuthorizedCollections() { + return authorizedCollections; + } + + + public TimeoutMode getTimeoutMode() { + return timeoutMode; + } + + public ListCollectionsOperation timeoutMode(@Nullable final TimeoutMode timeoutMode) { + if (timeoutMode != null) { + this.timeoutMode = timeoutMode; + } + return this; + } + + @Override + public String getCommandName() { + return COMMAND_NAME; + } + + @Override + public BatchCursor execute(final ReadBinding binding) { + RetryState retryState = initialRetryState(retryReads, binding.getOperationContext().getTimeoutContext()); + Supplier> read = decorateReadWithRetries(retryState, binding.getOperationContext(), () -> + withSourceAndConnection(binding::getReadConnectionSource, false, (source, connection) -> { + retryState.breakAndThrowIfRetryAnd(() -> !canRetryRead(source.getServerDescription(), binding.getOperationContext())); + try { + return createReadCommandAndExecute(retryState, binding.getOperationContext(), source, databaseName, + getCommandCreator(), createCommandDecoder(), transformer(), connection); + } catch (MongoCommandException e) { + return rethrowIfNotNamespaceError(e, + createEmptySingleBatchCursor(source.getServerDescription().getAddress(), batchSize)); + } + }) + ); + return read.get(); + } + + @Override + public void executeAsync(final AsyncReadBinding binding, final SingleResultCallback> callback) { + RetryState retryState = initialRetryState(retryReads, binding.getOperationContext().getTimeoutContext()); + binding.retain(); + AsyncCallbackSupplier> asyncRead = decorateReadWithRetriesAsync( + retryState, binding.getOperationContext(), (AsyncCallbackSupplier>) funcCallback -> + withAsyncSourceAndConnection(binding::getReadConnectionSource, false, funcCallback, + (source, connection, releasingCallback) -> { + if (retryState.breakAndCompleteIfRetryAnd(() -> !canRetryRead(source.getServerDescription(), + binding.getOperationContext()), releasingCallback)) { + return; + } + createReadCommandAndExecuteAsync(retryState, binding.getOperationContext(), source, databaseName, + getCommandCreator(), createCommandDecoder(), asyncTransformer(), connection, + (result, t) -> { + if (t != null && !isNamespaceError(t)) { + releasingCallback.onResult(null, t); + } else { + releasingCallback.onResult(result != null + ? result : createEmptyAsyncSingleBatchCursor(getBatchSize()), null); + } + }); + }) + ).whenComplete(binding::release); + asyncRead.get(errorHandlingCallback(callback, LOGGER)); + } + + private CommandReadTransformer> transformer() { + return (result, source, connection) -> + cursorDocumentToBatchCursor(timeoutMode, result, batchSize, decoder, comment, source, connection); + } + + private CommandReadTransformerAsync> asyncTransformer() { + return (result, source, connection) -> + cursorDocumentToAsyncBatchCursor(timeoutMode, result, batchSize, decoder, comment, source, connection); + } + + + private CommandCreator getCommandCreator() { + return (operationContext, serverDescription, connectionDescription) -> { + BsonDocument commandDocument = new BsonDocument(getCommandName(), new BsonInt32(1)) + .append("cursor", getCursorDocumentFromBatchSize(batchSize == 0 ? null : batchSize)); + putIfNotNull(commandDocument, "filter", filter); + putIfTrue(commandDocument, "nameOnly", nameOnly); + putIfTrue(commandDocument, "authorizedCollections", authorizedCollections); + setNonTailableCursorMaxTimeSupplier(timeoutMode, operationContext); + putIfNotNull(commandDocument, "comment", comment); + return commandDocument; + }; + } + + private Codec createCommandDecoder() { + return CommandResultDocumentCodec.create(decoder, "firstBatch"); + } +} diff --git a/driver-core/src/main/com/mongodb/internal/operation/ListDatabasesOperation.java b/driver-core/src/main/com/mongodb/internal/operation/ListDatabasesOperation.java new file mode 100644 index 00000000000..4787153190b --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/operation/ListDatabasesOperation.java @@ -0,0 +1,132 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.operation; + +import com.mongodb.internal.async.AsyncBatchCursor; +import com.mongodb.internal.async.SingleResultCallback; +import com.mongodb.internal.binding.AsyncReadBinding; +import com.mongodb.internal.binding.ReadBinding; +import com.mongodb.lang.Nullable; +import org.bson.BsonDocument; +import org.bson.BsonInt32; +import org.bson.BsonValue; +import org.bson.codecs.Decoder; + +import static com.mongodb.assertions.Assertions.notNull; +import static com.mongodb.internal.async.ErrorHandlingResultCallback.errorHandlingCallback; +import static com.mongodb.internal.operation.AsyncOperationHelper.asyncSingleBatchCursorTransformer; +import static com.mongodb.internal.operation.AsyncOperationHelper.executeRetryableReadAsync; +import static com.mongodb.internal.operation.CommandOperationHelper.CommandCreator; +import static com.mongodb.internal.operation.DocumentHelper.putIfNotNull; +import static com.mongodb.internal.operation.OperationHelper.LOGGER; +import static com.mongodb.internal.operation.SyncOperationHelper.executeRetryableRead; +import static com.mongodb.internal.operation.SyncOperationHelper.singleBatchCursorTransformer; + + +/** + * An operation that provides a cursor allowing iteration through the metadata of all the databases for a MongoClient. + * + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public class ListDatabasesOperation implements ReadOperationCursor { + private static final String COMMAND_NAME = "listDatabases"; + private static final String DATABASES = "databases"; + private final Decoder decoder; + private boolean retryReads; + private BsonDocument filter; + private Boolean nameOnly; + private Boolean authorizedDatabasesOnly; + private BsonValue comment; + + public ListDatabasesOperation(final Decoder decoder) { + this.decoder = notNull("decoder", decoder); + } + + public ListDatabasesOperation filter(@Nullable final BsonDocument filter) { + this.filter = filter; + return this; + } + + public BsonDocument getFilter() { + return filter; + } + + public ListDatabasesOperation nameOnly(final Boolean nameOnly) { + this.nameOnly = nameOnly; + return this; + } + + public ListDatabasesOperation authorizedDatabasesOnly(final Boolean authorizedDatabasesOnly) { + this.authorizedDatabasesOnly = authorizedDatabasesOnly; + return this; + } + + public ListDatabasesOperation retryReads(final boolean retryReads) { + this.retryReads = retryReads; + return this; + } + + public boolean getRetryReads() { + return retryReads; + } + + public Boolean getNameOnly() { + return nameOnly; + } + + public Boolean getAuthorizedDatabasesOnly() { + return authorizedDatabasesOnly; + } + + @Nullable + public BsonValue getComment() { + return comment; + } + + public ListDatabasesOperation comment(@Nullable final BsonValue comment) { + this.comment = comment; + return this; + } + + @Override + public String getCommandName() { + return COMMAND_NAME; + } + + @Override + public BatchCursor execute(final ReadBinding binding) { + return executeRetryableRead(binding, "admin", getCommandCreator(), CommandResultDocumentCodec.create(decoder, DATABASES), + singleBatchCursorTransformer(DATABASES), retryReads); + } + + @Override + public void executeAsync(final AsyncReadBinding binding, final SingleResultCallback> callback) { + executeRetryableReadAsync(binding, "admin", getCommandCreator(), CommandResultDocumentCodec.create(decoder, DATABASES), + asyncSingleBatchCursorTransformer(DATABASES), retryReads, errorHandlingCallback(callback, LOGGER)); + } + + private CommandCreator getCommandCreator() { + return (operationContext, serverDescription, connectionDescription) -> { + BsonDocument commandDocument = new BsonDocument(getCommandName(), new BsonInt32(1)); + putIfNotNull(commandDocument, "filter", filter); + putIfNotNull(commandDocument, "nameOnly", nameOnly); + putIfNotNull(commandDocument, "authorizedDatabases", authorizedDatabasesOnly); + putIfNotNull(commandDocument, "comment", comment); + return commandDocument; + }; + } +} diff --git a/driver-core/src/main/com/mongodb/internal/operation/ListIndexesOperation.java b/driver-core/src/main/com/mongodb/internal/operation/ListIndexesOperation.java new file mode 100644 index 00000000000..a97acd64d58 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/operation/ListIndexesOperation.java @@ -0,0 +1,195 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.operation; + +import com.mongodb.MongoCommandException; +import com.mongodb.MongoNamespace; +import com.mongodb.client.cursor.TimeoutMode; +import com.mongodb.internal.async.AsyncBatchCursor; +import com.mongodb.internal.async.SingleResultCallback; +import com.mongodb.internal.async.function.AsyncCallbackSupplier; +import com.mongodb.internal.async.function.RetryState; +import com.mongodb.internal.binding.AsyncReadBinding; +import com.mongodb.internal.binding.ReadBinding; +import com.mongodb.lang.Nullable; +import org.bson.BsonDocument; +import org.bson.BsonString; +import org.bson.BsonValue; +import org.bson.codecs.Codec; +import org.bson.codecs.Decoder; + +import java.util.function.Supplier; + +import static com.mongodb.assertions.Assertions.notNull; +import static com.mongodb.internal.async.ErrorHandlingResultCallback.errorHandlingCallback; +import static com.mongodb.internal.operation.AsyncOperationHelper.CommandReadTransformerAsync; +import static com.mongodb.internal.operation.AsyncOperationHelper.createReadCommandAndExecuteAsync; +import static com.mongodb.internal.operation.AsyncOperationHelper.cursorDocumentToAsyncBatchCursor; +import static com.mongodb.internal.operation.AsyncOperationHelper.decorateReadWithRetriesAsync; +import static com.mongodb.internal.operation.AsyncOperationHelper.withAsyncSourceAndConnection; +import static com.mongodb.internal.operation.AsyncSingleBatchCursor.createEmptyAsyncSingleBatchCursor; +import static com.mongodb.internal.operation.CommandOperationHelper.CommandCreator; +import static com.mongodb.internal.operation.CommandOperationHelper.initialRetryState; +import static com.mongodb.internal.operation.CommandOperationHelper.isNamespaceError; +import static com.mongodb.internal.operation.CommandOperationHelper.rethrowIfNotNamespaceError; +import static com.mongodb.internal.operation.CursorHelper.getCursorDocumentFromBatchSize; +import static com.mongodb.internal.operation.DocumentHelper.putIfNotNull; +import static com.mongodb.internal.operation.OperationHelper.LOGGER; +import static com.mongodb.internal.operation.OperationHelper.canRetryRead; +import static com.mongodb.internal.operation.OperationHelper.setNonTailableCursorMaxTimeSupplier; +import static com.mongodb.internal.operation.SingleBatchCursor.createEmptySingleBatchCursor; +import static com.mongodb.internal.operation.SyncOperationHelper.CommandReadTransformer; +import static com.mongodb.internal.operation.SyncOperationHelper.createReadCommandAndExecute; +import static com.mongodb.internal.operation.SyncOperationHelper.cursorDocumentToBatchCursor; +import static com.mongodb.internal.operation.SyncOperationHelper.decorateReadWithRetries; +import static com.mongodb.internal.operation.SyncOperationHelper.withSourceAndConnection; + +/** + * An operation that lists the indexes that have been created on a collection. For flexibility, the type of each document returned is + * generic. + * + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public class ListIndexesOperation implements ReadOperationCursor { + private static final String COMMAND_NAME = "listIndexes"; + private final MongoNamespace namespace; + private final Decoder decoder; + private boolean retryReads; + private int batchSize; + private BsonValue comment; + private TimeoutMode timeoutMode = TimeoutMode.CURSOR_LIFETIME; + + public ListIndexesOperation(final MongoNamespace namespace, final Decoder decoder) { + this.namespace = notNull("namespace", namespace); + this.decoder = notNull("decoder", decoder); + } + + public Integer getBatchSize() { + return batchSize; + } + + public ListIndexesOperation batchSize(final int batchSize) { + this.batchSize = batchSize; + return this; + } + + public ListIndexesOperation retryReads(final boolean retryReads) { + this.retryReads = retryReads; + return this; + } + + public boolean getRetryReads() { + return retryReads; + } + + @Nullable + public BsonValue getComment() { + return comment; + } + + public ListIndexesOperation comment(@Nullable final BsonValue comment) { + this.comment = comment; + return this; + } + + public TimeoutMode getTimeoutMode() { + return timeoutMode; + } + + public ListIndexesOperation timeoutMode(@Nullable final TimeoutMode timeoutMode) { + if (timeoutMode != null) { + this.timeoutMode = timeoutMode; + } + return this; + } + + @Override + public String getCommandName() { + return COMMAND_NAME; + } + + @Override + public BatchCursor execute(final ReadBinding binding) { + RetryState retryState = initialRetryState(retryReads, binding.getOperationContext().getTimeoutContext()); + Supplier> read = decorateReadWithRetries(retryState, binding.getOperationContext(), () -> + withSourceAndConnection(binding::getReadConnectionSource, false, (source, connection) -> { + retryState.breakAndThrowIfRetryAnd(() -> !canRetryRead(source.getServerDescription(), binding.getOperationContext())); + try { + return createReadCommandAndExecute(retryState, binding.getOperationContext(), source, namespace.getDatabaseName(), + getCommandCreator(), createCommandDecoder(), transformer(), connection); + } catch (MongoCommandException e) { + return rethrowIfNotNamespaceError(e, + createEmptySingleBatchCursor(source.getServerDescription().getAddress(), batchSize)); + } + }) + ); + return read.get(); + } + + @Override + public void executeAsync(final AsyncReadBinding binding, final SingleResultCallback> callback) { + RetryState retryState = initialRetryState(retryReads, binding.getOperationContext().getTimeoutContext()); + binding.retain(); + AsyncCallbackSupplier> asyncRead = decorateReadWithRetriesAsync( + retryState, binding.getOperationContext(), (AsyncCallbackSupplier>) funcCallback -> + withAsyncSourceAndConnection(binding::getReadConnectionSource, false, funcCallback, + (source, connection, releasingCallback) -> { + if (retryState.breakAndCompleteIfRetryAnd(() -> !canRetryRead(source.getServerDescription(), + binding.getOperationContext()), releasingCallback)) { + return; + } + createReadCommandAndExecuteAsync(retryState, binding.getOperationContext(), source, + namespace.getDatabaseName(), getCommandCreator(), createCommandDecoder(), + asyncTransformer(), connection, + (result, t) -> { + if (t != null && !isNamespaceError(t)) { + releasingCallback.onResult(null, t); + } else { + releasingCallback.onResult(result != null + ? result : createEmptyAsyncSingleBatchCursor(getBatchSize()), null); + } + }); + }) + ).whenComplete(binding::release); + asyncRead.get(errorHandlingCallback(callback, LOGGER)); + } + + + private CommandCreator getCommandCreator() { + return (operationContext, serverDescription, connectionDescription) -> { + BsonDocument commandDocument = new BsonDocument(getCommandName(), new BsonString(namespace.getCollectionName())) + .append("cursor", getCursorDocumentFromBatchSize(batchSize == 0 ? null : batchSize)); + setNonTailableCursorMaxTimeSupplier(timeoutMode, operationContext); + putIfNotNull(commandDocument, "comment", comment); + return commandDocument; + }; + } + + private CommandReadTransformer> transformer() { + return (result, source, connection) -> + cursorDocumentToBatchCursor(timeoutMode, result, batchSize, decoder, comment, source, connection); + } + + private CommandReadTransformerAsync> asyncTransformer() { + return (result, source, connection) -> + cursorDocumentToAsyncBatchCursor(timeoutMode, result, batchSize, decoder, comment, source, connection); + } + + private Codec createCommandDecoder() { + return CommandResultDocumentCodec.create(decoder, "firstBatch"); + } +} diff --git a/driver-core/src/main/com/mongodb/internal/operation/ListSearchIndexesOperation.java b/driver-core/src/main/com/mongodb/internal/operation/ListSearchIndexesOperation.java new file mode 100644 index 00000000000..7fadead0b57 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/operation/ListSearchIndexesOperation.java @@ -0,0 +1,131 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.operation; + +import com.mongodb.ExplainVerbosity; +import com.mongodb.MongoCommandException; +import com.mongodb.MongoNamespace; +import com.mongodb.client.model.Collation; +import com.mongodb.internal.async.AsyncBatchCursor; +import com.mongodb.internal.async.SingleResultCallback; +import com.mongodb.internal.binding.AsyncReadBinding; +import com.mongodb.internal.binding.ReadBinding; +import com.mongodb.lang.NonNull; +import com.mongodb.lang.Nullable; +import org.bson.BsonDocument; +import org.bson.BsonString; +import org.bson.BsonValue; +import org.bson.codecs.Decoder; + +import static com.mongodb.internal.operation.AsyncSingleBatchCursor.createEmptyAsyncSingleBatchCursor; +import static com.mongodb.internal.operation.CommandOperationHelper.isNamespaceError; +import static com.mongodb.internal.operation.SingleBatchCursor.createEmptySingleBatchCursor; +import static java.util.Collections.singletonList; + + +/** + * An operation that lists Alas Search indexes with the help of {@value #STAGE_LIST_SEARCH_INDEXES} pipeline stage. + * + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public final class ListSearchIndexesOperation implements ReadOperationExplainable { + private static final String COMMAND_NAME = "aggregate"; + private static final String STAGE_LIST_SEARCH_INDEXES = "$listSearchIndexes"; + private final MongoNamespace namespace; + private final Decoder decoder; + @Nullable + private final Boolean allowDiskUse; + @Nullable + private final Integer batchSize; + @Nullable + private final Collation collation; + @Nullable + private final BsonValue comment; + @Nullable + private final String indexName; + private final boolean retryReads; + + public ListSearchIndexesOperation(final MongoNamespace namespace, final Decoder decoder, @Nullable final String indexName, + @Nullable final Integer batchSize, @Nullable final Collation collation, + @Nullable final BsonValue comment, + @Nullable final Boolean allowDiskUse, final boolean retryReads) { + this.namespace = namespace; + this.decoder = decoder; + this.allowDiskUse = allowDiskUse; + this.batchSize = batchSize; + this.collation = collation; + this.comment = comment; + this.indexName = indexName; + this.retryReads = retryReads; + } + + @Override + public String getCommandName() { + return COMMAND_NAME; + } + + @Override + public BatchCursor execute(final ReadBinding binding) { + try { + return asAggregateOperation().execute(binding); + } catch (MongoCommandException exception) { + int cursorBatchSize = batchSize == null ? 0 : batchSize; + if (!isNamespaceError(exception)) { + throw exception; + } else { + return createEmptySingleBatchCursor(exception.getServerAddress(), cursorBatchSize); + } + } + } + + @Override + public void executeAsync(final AsyncReadBinding binding, final SingleResultCallback> callback) { + asAggregateOperation().executeAsync(binding, (cursor, exception) -> { + if (exception != null && !isNamespaceError(exception)) { + callback.onResult(null, exception); + } else if (exception != null) { + callback.onResult(createEmptyAsyncSingleBatchCursor(batchSize == null ? 0 : batchSize), null); + } else { + callback.onResult(cursor, null); + } + }); + } + + @Override + public ReadOperationSimple asExplainableOperation(@Nullable final ExplainVerbosity verbosity, final Decoder resultDecoder) { + return asAggregateOperation().asExplainableOperation(verbosity, resultDecoder); + } + + private AggregateOperation asAggregateOperation() { + BsonDocument searchDefinition = getSearchDefinition(); + BsonDocument listSearchIndexesStage = new BsonDocument(STAGE_LIST_SEARCH_INDEXES, searchDefinition); + return new AggregateOperation<>(namespace, singletonList(listSearchIndexesStage), decoder) + .retryReads(retryReads) + .collation(collation) + .comment(comment) + .allowDiskUse(allowDiskUse) + .batchSize(batchSize); + } + + @NonNull + private BsonDocument getSearchDefinition() { + if (indexName == null) { + return new BsonDocument(); + } + return new BsonDocument("name", new BsonString(indexName)); + } +} diff --git a/driver-core/src/main/com/mongodb/internal/operation/MapReduceAsyncBatchCursor.java b/driver-core/src/main/com/mongodb/internal/operation/MapReduceAsyncBatchCursor.java new file mode 100644 index 00000000000..f2f7fe4af57 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/operation/MapReduceAsyncBatchCursor.java @@ -0,0 +1,29 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.operation; + +import com.mongodb.internal.async.AsyncBatchCursor; + +/** + * Represents the future results of a map-reduce operation as a cursor. Users can iterate over the results and additionally get relevant + * statistics about the operation. + * + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public interface MapReduceAsyncBatchCursor extends AsyncBatchCursor { + MapReduceStatistics getStatistics(); +} diff --git a/driver-core/src/main/com/mongodb/internal/operation/MapReduceBatchCursor.java b/driver-core/src/main/com/mongodb/internal/operation/MapReduceBatchCursor.java new file mode 100644 index 00000000000..a5de595d477 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/operation/MapReduceBatchCursor.java @@ -0,0 +1,27 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.operation; + +/** + * Represents the results of a map-reduce operation as a cursor. Users can iterate over the results and additionally get relevant + * statistics about the operation. + * + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public interface MapReduceBatchCursor extends BatchCursor { + MapReduceStatistics getStatistics(); +} diff --git a/driver-core/src/main/com/mongodb/internal/operation/MapReduceHelper.java b/driver-core/src/main/com/mongodb/internal/operation/MapReduceHelper.java new file mode 100644 index 00000000000..2f899d11585 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/operation/MapReduceHelper.java @@ -0,0 +1,47 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.operation; + +import org.bson.BsonDocument; +import org.bson.BsonInt32; + +final class MapReduceHelper { + + static MapReduceStatistics createStatistics(final BsonDocument result) { + return new MapReduceStatistics(getInputCount(result), getOutputCount(result), getEmitCount(result), + getDuration(result)); + } + + private static int getInputCount(final BsonDocument result) { + return result.getDocument("counts", new BsonDocument()).getNumber("input", new BsonInt32(0)).intValue(); + } + + private static int getOutputCount(final BsonDocument result) { + return result.getDocument("counts", new BsonDocument()).getNumber("output", new BsonInt32(0)).intValue(); + } + + private static int getEmitCount(final BsonDocument result) { + return result.getDocument("counts", new BsonDocument()).getNumber("emit", new BsonInt32(0)).intValue(); + } + + private static int getDuration(final BsonDocument result) { + return result.getNumber("timeMillis", new BsonInt32(0)).intValue(); + } + + private MapReduceHelper() { + } +} diff --git a/driver-core/src/main/com/mongodb/internal/operation/MapReduceInlineResultsAsyncCursor.java b/driver-core/src/main/com/mongodb/internal/operation/MapReduceInlineResultsAsyncCursor.java new file mode 100644 index 00000000000..ebf331fe47b --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/operation/MapReduceInlineResultsAsyncCursor.java @@ -0,0 +1,66 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.operation; + +import com.mongodb.internal.async.SingleResultCallback; + +import java.util.List; + +/** + * Cursor representation of the results of an inline map-reduce operation. This allows users to iterate over the results that were returned + * from the operation, and also provides access to the statistics returned in the results. + */ +class MapReduceInlineResultsAsyncCursor implements MapReduceAsyncBatchCursor { + + private final AsyncSingleBatchCursor delegate; + private final MapReduceStatistics statistics; + + MapReduceInlineResultsAsyncCursor(final AsyncSingleBatchCursor delegate, final MapReduceStatistics statistics) { + this.delegate = delegate; + this.statistics = statistics; + } + + @Override + public MapReduceStatistics getStatistics() { + return statistics; + } + + @Override + public void next(final SingleResultCallback> callback) { + delegate.next(callback); + } + + @Override + public void setBatchSize(final int batchSize) { + delegate.setBatchSize(batchSize); + } + + @Override + public int getBatchSize() { + return delegate.getBatchSize(); + } + + @Override + public boolean isClosed() { + return delegate.isClosed(); + } + + @Override + public void close() { + delegate.close(); + } +} diff --git a/driver-core/src/main/com/mongodb/internal/operation/MapReduceInlineResultsCursor.java b/driver-core/src/main/com/mongodb/internal/operation/MapReduceInlineResultsCursor.java new file mode 100644 index 00000000000..564eac4a8f0 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/operation/MapReduceInlineResultsCursor.java @@ -0,0 +1,86 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.operation; + +import com.mongodb.ServerAddress; +import com.mongodb.ServerCursor; + +import java.util.List; + +/** + * Cursor representation of the results of an inline map-reduce operation. This allows users to iterate over the results that were returned + * from the operation, and also provides access to the statistics returned in the results. + */ +class MapReduceInlineResultsCursor implements MapReduceBatchCursor { + private final BatchCursor delegate; + private final MapReduceStatistics statistics; + + MapReduceInlineResultsCursor(final BatchCursor delegate, final MapReduceStatistics statistics) { + this.delegate = delegate; + this.statistics = statistics; + } + + @Override + public MapReduceStatistics getStatistics() { + return statistics; + } + + @Override + public boolean hasNext() { + return delegate.hasNext(); + } + + @Override + public List next() { + return delegate.next(); + } + + @Override + public int available() { + return delegate.available(); + } + + @Override + public void setBatchSize(final int batchSize) { + delegate.setBatchSize(batchSize); + } + + @Override + public int getBatchSize() { + return delegate.getBatchSize(); + } + + @Override + public List tryNext() { + return delegate.tryNext(); + } + + @Override + public ServerCursor getServerCursor() { + return delegate.getServerCursor(); + } + + @Override + public ServerAddress getServerAddress() { + return delegate.getServerAddress(); + } + + @Override + public void close() { + delegate.close(); + } +} diff --git a/driver-core/src/main/com/mongodb/internal/operation/MapReduceStatistics.java b/driver-core/src/main/com/mongodb/internal/operation/MapReduceStatistics.java new file mode 100644 index 00000000000..a8d34968142 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/operation/MapReduceStatistics.java @@ -0,0 +1,53 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.operation; + +/** + * Common statistics returned by running all types of map-reduce operations. + * + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public class MapReduceStatistics { + + private final int inputCount; + private final int outputCount; + private final int emitCount; + private final int duration; + + public MapReduceStatistics(final int inputCount, final int outputCount, final int emitCount, final int duration) { + this.inputCount = inputCount; + this.outputCount = outputCount; + this.emitCount = emitCount; + this.duration = duration; + } + + public int getInputCount() { + return inputCount; + } + + public int getOutputCount() { + return outputCount; + } + + public int getEmitCount() { + return emitCount; + } + + public int getDuration() { + return duration; + } +} diff --git a/driver-core/src/main/com/mongodb/internal/operation/MapReduceToCollectionOperation.java b/driver-core/src/main/com/mongodb/internal/operation/MapReduceToCollectionOperation.java new file mode 100644 index 00000000000..bfcc73a5aa6 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/operation/MapReduceToCollectionOperation.java @@ -0,0 +1,296 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.operation; + +import com.mongodb.ExplainVerbosity; +import com.mongodb.MongoNamespace; +import com.mongodb.WriteConcern; +import com.mongodb.client.model.Collation; +import com.mongodb.internal.TimeoutContext; +import com.mongodb.internal.async.SingleResultCallback; +import com.mongodb.internal.binding.AsyncWriteBinding; +import com.mongodb.internal.binding.WriteBinding; +import com.mongodb.lang.Nullable; +import org.bson.BsonBoolean; +import org.bson.BsonDocument; +import org.bson.BsonJavaScript; +import org.bson.BsonString; +import org.bson.codecs.BsonDocumentCodec; + +import java.util.List; + +import static com.mongodb.assertions.Assertions.isTrue; +import static com.mongodb.assertions.Assertions.notNull; +import static com.mongodb.internal.connection.CommandHelper.applyMaxTimeMS; +import static com.mongodb.internal.operation.AsyncOperationHelper.CommandWriteTransformerAsync; +import static com.mongodb.internal.operation.AsyncOperationHelper.executeCommandAsync; +import static com.mongodb.internal.operation.CommandOperationHelper.CommandCreator; +import static com.mongodb.internal.operation.DocumentHelper.putIfNotNull; +import static com.mongodb.internal.operation.DocumentHelper.putIfNotZero; +import static com.mongodb.internal.operation.DocumentHelper.putIfTrue; +import static com.mongodb.internal.operation.ExplainHelper.asExplainCommand; +import static com.mongodb.internal.operation.SyncOperationHelper.CommandWriteTransformer; +import static com.mongodb.internal.operation.SyncOperationHelper.executeCommand; +import static com.mongodb.internal.operation.WriteConcernHelper.appendWriteConcernToCommand; +import static com.mongodb.internal.operation.WriteConcernHelper.throwOnWriteConcernError; +import static java.util.Arrays.asList; + +/** + * Operation that runs a Map Reduce against a MongoDB instance. This operation does not support "inline" results, i.e. the results will + * be output into the collection represented by the MongoNamespace provided. + * + *

To run a map reduce operation and receive the results inline (i.e. as a response to running the command) use {@code + * MapReduceToCollectionOperation}.

+ * + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public class MapReduceToCollectionOperation implements WriteOperation { + private static final String COMMAND_NAME = "mapReduce"; + private final MongoNamespace namespace; + private final BsonJavaScript mapFunction; + private final BsonJavaScript reduceFunction; + private final String collectionName; + private final WriteConcern writeConcern; + private BsonJavaScript finalizeFunction; + private BsonDocument scope; + private BsonDocument filter; + private BsonDocument sort; + private int limit; + private boolean jsMode; + private boolean verbose; + private String action = "replace"; + private String databaseName; + private Boolean bypassDocumentValidation; + private Collation collation; + private static final List VALID_ACTIONS = asList("replace", "merge", "reduce"); + + public MapReduceToCollectionOperation(final MongoNamespace namespace, final BsonJavaScript mapFunction, + final BsonJavaScript reduceFunction, @Nullable final String collectionName, @Nullable final WriteConcern writeConcern) { + this.namespace = notNull("namespace", namespace); + this.mapFunction = notNull("mapFunction", mapFunction); + this.reduceFunction = notNull("reduceFunction", reduceFunction); + this.collectionName = notNull("collectionName", collectionName); + this.writeConcern = writeConcern; + } + + public MongoNamespace getNamespace() { + return namespace; + } + + public BsonJavaScript getMapFunction() { + return mapFunction; + } + + public BsonJavaScript getReduceFunction() { + return reduceFunction; + } + + public String getCollectionName() { + return collectionName; + } + + public WriteConcern getWriteConcern() { + return writeConcern; + } + + public BsonJavaScript getFinalizeFunction() { + return finalizeFunction; + } + + public MapReduceToCollectionOperation finalizeFunction(final BsonJavaScript finalizeFunction) { + this.finalizeFunction = finalizeFunction; + return this; + } + + public BsonDocument getScope() { + return scope; + } + + public MapReduceToCollectionOperation scope(@Nullable final BsonDocument scope) { + this.scope = scope; + return this; + } + + public BsonDocument getFilter() { + return filter; + } + + public MapReduceToCollectionOperation filter(@Nullable final BsonDocument filter) { + this.filter = filter; + return this; + } + + public BsonDocument getSort() { + return sort; + } + + public MapReduceToCollectionOperation sort(@Nullable final BsonDocument sort) { + this.sort = sort; + return this; + } + + public int getLimit() { + return limit; + } + + public MapReduceToCollectionOperation limit(final int limit) { + this.limit = limit; + return this; + } + + public boolean isJsMode() { + return jsMode; + } + + public MapReduceToCollectionOperation jsMode(final boolean jsMode) { + this.jsMode = jsMode; + return this; + } + + public boolean isVerbose() { + return verbose; + } + + public MapReduceToCollectionOperation verbose(final boolean verbose) { + this.verbose = verbose; + return this; + } + + public String getAction() { + return action; + } + + public MapReduceToCollectionOperation action(final String action) { + notNull("action", action); + isTrue("action must be one of: \"replace\", \"merge\", \"reduce\"", VALID_ACTIONS.contains(action)); + this.action = action; + return this; + } + + @Nullable + public String getDatabaseName() { + return databaseName; + } + + public MapReduceToCollectionOperation databaseName(@Nullable final String databaseName) { + this.databaseName = databaseName; + return this; + } + + public Boolean getBypassDocumentValidation() { + return bypassDocumentValidation; + } + + public MapReduceToCollectionOperation bypassDocumentValidation(@Nullable final Boolean bypassDocumentValidation) { + this.bypassDocumentValidation = bypassDocumentValidation; + return this; + } + + public Collation getCollation() { + return collation; + } + + public MapReduceToCollectionOperation collation(@Nullable final Collation collation) { + this.collation = collation; + return this; + } + + @Override + public String getCommandName() { + return COMMAND_NAME; + } + + @Override + public MapReduceStatistics execute(final WriteBinding binding) { + return executeCommand(binding, namespace.getDatabaseName(), getCommandCreator(), transformer(binding + .getOperationContext() + .getTimeoutContext())); + } + + @Override + public void executeAsync(final AsyncWriteBinding binding, final SingleResultCallback callback) { + executeCommandAsync(binding, namespace.getDatabaseName(), getCommandCreator(), transformerAsync(binding + .getOperationContext() + .getTimeoutContext()), callback); + } + + /** + * Gets an operation whose execution explains this operation. + * + * @param explainVerbosity the explain verbosity + * @return a read operation that when executed will explain this operation + */ + public ReadOperationSimple asExplainableOperation(final ExplainVerbosity explainVerbosity) { + return createExplainableOperation(explainVerbosity); + } + + private CommandReadOperation createExplainableOperation(final ExplainVerbosity explainVerbosity) { + return new CommandReadOperation<>(getNamespace().getDatabaseName(), getCommandName(), + (operationContext, serverDescription, connectionDescription) -> { + BsonDocument command = getCommandCreator().create(operationContext, serverDescription, connectionDescription); + applyMaxTimeMS(operationContext.getTimeoutContext(), command); + return asExplainCommand(command, explainVerbosity); + }, new BsonDocumentCodec()); + } + + private CommandWriteTransformer transformer(final TimeoutContext timeoutContext) { + return (result, connection) -> { + throwOnWriteConcernError(result, connection.getDescription().getServerAddress(), + connection.getDescription().getMaxWireVersion(), timeoutContext); + return MapReduceHelper.createStatistics(result); + }; + } + + private CommandWriteTransformerAsync transformerAsync(final TimeoutContext timeoutContext) { + return (result, connection) -> { + throwOnWriteConcernError(result, connection.getDescription().getServerAddress(), + connection.getDescription().getMaxWireVersion(), timeoutContext); + return MapReduceHelper.createStatistics(result); + }; + } + + + private CommandCreator getCommandCreator() { + return (operationContext, serverDescription, connectionDescription) -> { + BsonDocument outputDocument = new BsonDocument(getAction(), new BsonString(getCollectionName())); + if (getDatabaseName() != null) { + outputDocument.put("db", new BsonString(getDatabaseName())); + } + BsonDocument commandDocument = new BsonDocument("mapReduce", new BsonString(namespace.getCollectionName())) + .append("map", getMapFunction()) + .append("reduce", getReduceFunction()) + .append("out", outputDocument); + + putIfNotNull(commandDocument, "query", getFilter()); + putIfNotNull(commandDocument, "sort", getSort()); + putIfNotNull(commandDocument, "finalize", getFinalizeFunction()); + putIfNotNull(commandDocument, "scope", getScope()); + putIfTrue(commandDocument, "verbose", isVerbose()); + putIfNotZero(commandDocument, "limit", getLimit()); + putIfTrue(commandDocument, "jsMode", isJsMode()); + if (bypassDocumentValidation != null) { + commandDocument.put("bypassDocumentValidation", BsonBoolean.valueOf(bypassDocumentValidation)); + } + appendWriteConcernToCommand(writeConcern, commandDocument); + if (collation != null) { + commandDocument.put("collation", collation.asDocument()); + } + return commandDocument; + }; + } + +} diff --git a/driver-core/src/main/com/mongodb/internal/operation/MapReduceWithInlineResultsOperation.java b/driver-core/src/main/com/mongodb/internal/operation/MapReduceWithInlineResultsOperation.java new file mode 100644 index 00000000000..6661c2a5c77 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/operation/MapReduceWithInlineResultsOperation.java @@ -0,0 +1,237 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.operation; + +import com.mongodb.ExplainVerbosity; +import com.mongodb.MongoNamespace; +import com.mongodb.client.model.Collation; +import com.mongodb.internal.async.SingleResultCallback; +import com.mongodb.internal.binding.AsyncReadBinding; +import com.mongodb.internal.binding.ReadBinding; +import com.mongodb.lang.Nullable; +import org.bson.BsonDocument; +import org.bson.BsonInt32; +import org.bson.BsonJavaScript; +import org.bson.BsonString; +import org.bson.codecs.BsonDocumentCodec; +import org.bson.codecs.Decoder; + +import static com.mongodb.assertions.Assertions.notNull; +import static com.mongodb.internal.async.ErrorHandlingResultCallback.errorHandlingCallback; +import static com.mongodb.internal.connection.CommandHelper.applyMaxTimeMS; +import static com.mongodb.internal.operation.AsyncOperationHelper.CommandReadTransformerAsync; +import static com.mongodb.internal.operation.AsyncOperationHelper.executeRetryableReadAsync; +import static com.mongodb.internal.operation.CommandOperationHelper.CommandCreator; +import static com.mongodb.internal.operation.DocumentHelper.putIfNotNull; +import static com.mongodb.internal.operation.DocumentHelper.putIfNotZero; +import static com.mongodb.internal.operation.DocumentHelper.putIfTrue; +import static com.mongodb.internal.operation.ExplainHelper.asExplainCommand; +import static com.mongodb.internal.operation.OperationHelper.LOGGER; +import static com.mongodb.internal.operation.OperationReadConcernHelper.appendReadConcernToCommand; +import static com.mongodb.internal.operation.SyncOperationHelper.CommandReadTransformer; +import static com.mongodb.internal.operation.SyncOperationHelper.executeRetryableRead; + +/** + *

Operation that runs a Map Reduce against a MongoDB instance. This operation only supports "inline" results, i.e. the results will be + * returned as a result of running this operation.

+ * + *

To run a map reduce operation into a given collection, use {@code MapReduceToCollectionOperation}.

+ * + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public class MapReduceWithInlineResultsOperation implements ReadOperationMapReduceCursor { + private static final String COMMAND_NAME = "mapReduce"; + private final MongoNamespace namespace; + private final BsonJavaScript mapFunction; + private final BsonJavaScript reduceFunction; + private final Decoder decoder; + private BsonJavaScript finalizeFunction; + private BsonDocument scope; + private BsonDocument filter; + private BsonDocument sort; + private int limit; + private boolean jsMode; + private boolean verbose; + private Collation collation; + + public MapReduceWithInlineResultsOperation(final MongoNamespace namespace, final BsonJavaScript mapFunction, + final BsonJavaScript reduceFunction, final Decoder decoder) { + this.namespace = notNull("namespace", namespace); + this.mapFunction = notNull("mapFunction", mapFunction); + this.reduceFunction = notNull("reduceFunction", reduceFunction); + this.decoder = notNull("decoder", decoder); + } + + public MongoNamespace getNamespace() { + return namespace; + } + + public Decoder getDecoder() { + return decoder; + } + + public BsonJavaScript getMapFunction() { + return mapFunction; + } + + public BsonJavaScript getReduceFunction() { + return reduceFunction; + } + + public BsonJavaScript getFinalizeFunction() { + return finalizeFunction; + } + + public MapReduceWithInlineResultsOperation finalizeFunction(final BsonJavaScript finalizeFunction) { + this.finalizeFunction = finalizeFunction; + return this; + } + + public BsonDocument getScope() { + return scope; + } + + public MapReduceWithInlineResultsOperation scope(@Nullable final BsonDocument scope) { + this.scope = scope; + return this; + } + + public BsonDocument getFilter() { + return filter; + } + + public MapReduceWithInlineResultsOperation filter(@Nullable final BsonDocument filter) { + this.filter = filter; + return this; + } + + public BsonDocument getSort() { + return sort; + } + + public MapReduceWithInlineResultsOperation sort(@Nullable final BsonDocument sort) { + this.sort = sort; + return this; + } + + public int getLimit() { + return limit; + } + + public MapReduceWithInlineResultsOperation limit(final int limit) { + this.limit = limit; + return this; + } + + public boolean isJsMode() { + return jsMode; + } + + public MapReduceWithInlineResultsOperation jsMode(final boolean jsMode) { + this.jsMode = jsMode; + return this; + } + + public boolean isVerbose() { + return verbose; + } + + public MapReduceWithInlineResultsOperation verbose(final boolean verbose) { + this.verbose = verbose; + return this; + } + + public Collation getCollation() { + return collation; + } + + public MapReduceWithInlineResultsOperation collation(@Nullable final Collation collation) { + this.collation = collation; + return this; + } + + @Override + public String getCommandName() { + return COMMAND_NAME; + } + + @Override + public MapReduceBatchCursor execute(final ReadBinding binding) { + return executeRetryableRead(binding, namespace.getDatabaseName(), + getCommandCreator(), + CommandResultDocumentCodec.create(decoder, "results"), transformer(), false); + } + + @Override + public void executeAsync(final AsyncReadBinding binding, final SingleResultCallback> callback) { + SingleResultCallback> errHandlingCallback = errorHandlingCallback(callback, LOGGER); + executeRetryableReadAsync(binding, namespace.getDatabaseName(), + getCommandCreator(), CommandResultDocumentCodec.create(decoder, "results"), + asyncTransformer(), false, errHandlingCallback); + } + + public ReadOperationSimple asExplainableOperation(final ExplainVerbosity explainVerbosity) { + return createExplainableOperation(explainVerbosity); + } + + private CommandReadOperation createExplainableOperation(final ExplainVerbosity explainVerbosity) { + return new CommandReadOperation<>(namespace.getDatabaseName(), getCommandName(), + (operationContext, serverDescription, connectionDescription) -> { + BsonDocument command = getCommandCreator().create(operationContext, serverDescription, connectionDescription); + applyMaxTimeMS(operationContext.getTimeoutContext(), command); + return asExplainCommand(command, + explainVerbosity); + }, new BsonDocumentCodec()); + } + + private CommandReadTransformer> transformer() { + return (result, source, connection) -> + new MapReduceInlineResultsCursor<>( + new SingleBatchCursor<>(BsonDocumentWrapperHelper.toList(result, "results"), 0, + connection.getDescription().getServerAddress()), + MapReduceHelper.createStatistics(result)); + } + + private CommandReadTransformerAsync> asyncTransformer() { + return (result, source, connection) -> new MapReduceInlineResultsAsyncCursor<>( + new AsyncSingleBatchCursor<>(BsonDocumentWrapperHelper.toList(result, "results"), 0), + MapReduceHelper.createStatistics(result)); + } + + private CommandCreator getCommandCreator() { + return (operationContext, serverDescription, connectionDescription) -> { + + BsonDocument commandDocument = new BsonDocument(getCommandName(), new BsonString(namespace.getCollectionName())) + .append("map", getMapFunction()) + .append("reduce", getReduceFunction()) + .append("out", new BsonDocument("inline", new BsonInt32(1))); + + putIfNotNull(commandDocument, "query", getFilter()); + putIfNotNull(commandDocument, "sort", getSort()); + putIfNotNull(commandDocument, "finalize", getFinalizeFunction()); + putIfNotNull(commandDocument, "scope", getScope()); + putIfTrue(commandDocument, "verbose", isVerbose()); + appendReadConcernToCommand(operationContext.getSessionContext(), connectionDescription.getMaxWireVersion(), commandDocument); + putIfNotZero(commandDocument, "limit", getLimit()); + putIfTrue(commandDocument, "jsMode", isJsMode()); + if (collation != null) { + commandDocument.put("collation", collation.asDocument()); + } + return commandDocument; + }; + } +} diff --git a/driver-core/src/main/com/mongodb/internal/operation/MixedBulkWriteOperation.java b/driver-core/src/main/com/mongodb/internal/operation/MixedBulkWriteOperation.java new file mode 100644 index 00000000000..39ff2dab17f --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/operation/MixedBulkWriteOperation.java @@ -0,0 +1,513 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.operation; + +import com.mongodb.MongoException; +import com.mongodb.MongoNamespace; +import com.mongodb.WriteConcern; +import com.mongodb.assertions.Assertions; +import com.mongodb.bulk.BulkWriteResult; +import com.mongodb.connection.ConnectionDescription; +import com.mongodb.internal.TimeoutContext; +import com.mongodb.internal.async.SingleResultCallback; +import com.mongodb.internal.async.function.AsyncCallbackLoop; +import com.mongodb.internal.async.function.AsyncCallbackRunnable; +import com.mongodb.internal.async.function.AsyncCallbackSupplier; +import com.mongodb.internal.async.function.LoopState; +import com.mongodb.internal.async.function.RetryState; +import com.mongodb.internal.async.function.RetryingAsyncCallbackSupplier; +import com.mongodb.internal.async.function.RetryingSyncSupplier; +import com.mongodb.internal.binding.AsyncWriteBinding; +import com.mongodb.internal.binding.WriteBinding; +import com.mongodb.internal.bulk.WriteRequest; +import com.mongodb.internal.connection.AsyncConnection; +import com.mongodb.internal.connection.Connection; +import com.mongodb.internal.connection.MongoWriteConcernWithResponseException; +import com.mongodb.internal.connection.OperationContext; +import com.mongodb.internal.connection.ProtocolHelper; +import com.mongodb.internal.operation.retry.AttachmentKeys; +import com.mongodb.internal.session.SessionContext; +import com.mongodb.internal.validator.NoOpFieldNameValidator; +import com.mongodb.lang.Nullable; +import org.bson.BsonArray; +import org.bson.BsonDocument; +import org.bson.BsonString; +import org.bson.BsonValue; + +import java.util.List; +import java.util.Locale; +import java.util.Optional; +import java.util.Set; +import java.util.function.Supplier; +import java.util.stream.Collectors; + +import static com.mongodb.assertions.Assertions.assertTrue; +import static com.mongodb.assertions.Assertions.isTrueArgument; +import static com.mongodb.assertions.Assertions.notNull; +import static com.mongodb.internal.async.ErrorHandlingResultCallback.errorHandlingCallback; +import static com.mongodb.internal.operation.AsyncOperationHelper.exceptionTransformingCallback; +import static com.mongodb.internal.operation.AsyncOperationHelper.withAsyncSourceAndConnection; +import static com.mongodb.internal.operation.CommandOperationHelper.addRetryableWriteErrorLabel; +import static com.mongodb.internal.operation.CommandOperationHelper.logRetryExecute; +import static com.mongodb.internal.operation.CommandOperationHelper.loggingShouldAttemptToRetryWriteAndAddRetryableLabel; +import static com.mongodb.internal.operation.CommandOperationHelper.onRetryableWriteAttemptFailure; +import static com.mongodb.internal.operation.CommandOperationHelper.transformWriteException; +import static com.mongodb.internal.operation.CommandOperationHelper.validateAndGetEffectiveWriteConcern; +import static com.mongodb.internal.operation.OperationHelper.LOGGER; +import static com.mongodb.internal.operation.OperationHelper.isRetryableWrite; +import static com.mongodb.internal.operation.OperationHelper.validateWriteRequests; +import static com.mongodb.internal.operation.OperationHelper.validateWriteRequestsAndCompleteIfInvalid; +import static com.mongodb.internal.operation.SyncOperationHelper.withSourceAndConnection; + +/** + * An operation to execute a series of write operations in bulk. + * + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public class MixedBulkWriteOperation implements WriteOperation { + private final MongoNamespace namespace; + private final List writeRequests; + private final boolean ordered; + private final boolean retryWrites; + private final WriteConcern writeConcern; + private Boolean bypassDocumentValidation; + private String commandName; + private BsonValue comment; + private BsonDocument variables; + + public MixedBulkWriteOperation(final MongoNamespace namespace, final List writeRequests, + final boolean ordered, final WriteConcern writeConcern, final boolean retryWrites) { + notNull("writeRequests", writeRequests); + isTrueArgument("writeRequests is not an empty list", !writeRequests.isEmpty()); + this.commandName = notNull("commandName", writeRequests.get(0).getType().toString().toLowerCase(Locale.ROOT)); + this.namespace = notNull("namespace", namespace); + this.writeRequests = writeRequests; + this.ordered = ordered; + this.writeConcern = notNull("writeConcern", writeConcern); + this.retryWrites = retryWrites; + } + + public MongoNamespace getNamespace() { + return namespace; + } + + public WriteConcern getWriteConcern() { + return writeConcern; + } + + public boolean isOrdered() { + return ordered; + } + + public List getWriteRequests() { + return writeRequests; + } + + public Boolean getBypassDocumentValidation() { + return bypassDocumentValidation; + } + + public MixedBulkWriteOperation bypassDocumentValidation(@Nullable final Boolean bypassDocumentValidation) { + this.bypassDocumentValidation = bypassDocumentValidation; + return this; + } + + public BsonValue getComment() { + return comment; + } + + public MixedBulkWriteOperation comment(@Nullable final BsonValue comment) { + this.comment = comment; + return this; + } + + public MixedBulkWriteOperation let(@Nullable final BsonDocument variables) { + this.variables = variables; + return this; + } + + public Boolean getRetryWrites() { + return retryWrites; + } + + private Supplier decorateWriteWithRetries(final RetryState retryState, final OperationContext operationContext, + final Supplier writeFunction) { + return new RetryingSyncSupplier<>(retryState, onRetryableWriteAttemptFailure(operationContext), + this::shouldAttemptToRetryWrite, () -> { + logRetryExecute(retryState, operationContext); + return writeFunction.get(); + }); + } + + private AsyncCallbackSupplier decorateWriteWithRetries(final RetryState retryState, final OperationContext operationContext, + final AsyncCallbackSupplier writeFunction) { + return new RetryingAsyncCallbackSupplier<>(retryState, onRetryableWriteAttemptFailure(operationContext), + this::shouldAttemptToRetryWrite, callback -> { + logRetryExecute(retryState, operationContext); + writeFunction.get(callback); + }); + } + + private boolean shouldAttemptToRetryWrite(final RetryState retryState, final Throwable attemptFailure) { + BulkWriteTracker bulkWriteTracker = retryState.attachment(AttachmentKeys.bulkWriteTracker()).orElseThrow(Assertions::fail); + /* A retry predicate is called only if there is at least one more attempt left. Here we maintain attempt counters manually + * and emulate the above contract by returning `false` at the very beginning of the retry predicate. */ + if (bulkWriteTracker.lastAttempt()) { + return false; + } + boolean decision = loggingShouldAttemptToRetryWriteAndAddRetryableLabel(retryState, attemptFailure); + if (decision) { + /* The attempt counter maintained by `RetryState` is updated after (in the happens-before order) testing a retry predicate, + * and only if the predicate completes normally. Here we maintain attempt counters manually, and we emulate the + * "after completion" part by updating the counter at the very end of the retry predicate. */ + bulkWriteTracker.advance(); + } + return decision; + } + + @Override + public String getCommandName() { + return commandName; + } + + @Override + public BulkWriteResult execute(final WriteBinding binding) { + TimeoutContext timeoutContext = binding.getOperationContext().getTimeoutContext(); + /* We cannot use the tracking of attempts built in the `RetryState` class because conceptually we have to maintain multiple attempt + * counters while executing a single bulk write operation: + * - a counter that limits attempts to select server and checkout a connection before we created a batch; + * - a counter per each batch that limits attempts to execute the specific batch. + * Fortunately, these counters do not exist concurrently with each other. While maintaining the counters manually, + * we must adhere to the contract of `RetryingSyncSupplier`. When the retry timeout is implemented, there will be no counters, + * and the code related to the attempt tracking in `BulkWriteTracker` will be removed. */ + RetryState retryState = new RetryState(timeoutContext); + BulkWriteTracker.attachNew(retryState, retryWrites, timeoutContext); + Supplier retryingBulkWrite = decorateWriteWithRetries(retryState, binding.getOperationContext(), () -> + withSourceAndConnection(binding::getWriteConnectionSource, true, (source, connection) -> { + ConnectionDescription connectionDescription = connection.getDescription(); + // attach `maxWireVersion` ASAP because it is used to check whether we can retry + retryState.attach(AttachmentKeys.maxWireVersion(), connectionDescription.getMaxWireVersion(), true); + SessionContext sessionContext = binding.getOperationContext().getSessionContext(); + WriteConcern writeConcern = validateAndGetEffectiveWriteConcern(this.writeConcern, sessionContext); + if (!isRetryableWrite(retryWrites, writeConcern, connectionDescription, sessionContext)) { + handleMongoWriteConcernWithResponseException(retryState, true, timeoutContext); + } + validateWriteRequests(connectionDescription, bypassDocumentValidation, writeRequests, writeConcern); + if (!retryState.attachment(AttachmentKeys.bulkWriteTracker()).orElseThrow(Assertions::fail).batch().isPresent()) { + BulkWriteTracker.attachNew(retryState, BulkWriteBatch.createBulkWriteBatch(namespace, + connectionDescription, ordered, writeConcern, + bypassDocumentValidation, retryWrites, writeRequests, binding.getOperationContext(), comment, variables), timeoutContext); + } + return executeBulkWriteBatch(retryState, writeConcern, binding, connection); + }) + ); + try { + return retryingBulkWrite.get(); + } catch (MongoException e) { + throw transformWriteException(e); + } + } + + public void executeAsync(final AsyncWriteBinding binding, final SingleResultCallback callback) { + TimeoutContext timeoutContext = binding.getOperationContext().getTimeoutContext(); + // see the comment in `execute(WriteBinding)` explaining the manual tracking of attempts + RetryState retryState = new RetryState(timeoutContext); + BulkWriteTracker.attachNew(retryState, retryWrites, timeoutContext); + binding.retain(); + AsyncCallbackSupplier retryingBulkWrite = this.decorateWriteWithRetries(retryState, + binding.getOperationContext(), + funcCallback -> + withAsyncSourceAndConnection(binding::getWriteConnectionSource, true, funcCallback, + (source, connection, releasingCallback) -> { + ConnectionDescription connectionDescription = connection.getDescription(); + // attach `maxWireVersion` ASAP because it is used to check whether we can retry + retryState.attach(AttachmentKeys.maxWireVersion(), connectionDescription.getMaxWireVersion(), true); + SessionContext sessionContext = binding.getOperationContext().getSessionContext(); + WriteConcern writeConcern = validateAndGetEffectiveWriteConcern(this.writeConcern, sessionContext); + if (!isRetryableWrite(retryWrites, writeConcern, connectionDescription, sessionContext) + && handleMongoWriteConcernWithResponseExceptionAsync(retryState, releasingCallback, timeoutContext)) { + return; + } + if (validateWriteRequestsAndCompleteIfInvalid(connectionDescription, bypassDocumentValidation, writeRequests, + writeConcern, releasingCallback)) { + return; + } + try { + if (!retryState.attachment(AttachmentKeys.bulkWriteTracker()).orElseThrow(Assertions::fail).batch().isPresent()) { + BulkWriteTracker.attachNew(retryState, BulkWriteBatch.createBulkWriteBatch(namespace, + connectionDescription, ordered, writeConcern, + bypassDocumentValidation, retryWrites, writeRequests, binding.getOperationContext(), comment, variables), timeoutContext); + } + } catch (Throwable t) { + releasingCallback.onResult(null, t); + return; + } + executeBulkWriteBatchAsync(retryState, writeConcern, binding, connection, releasingCallback); + }) + ).whenComplete(binding::release); + retryingBulkWrite.get(exceptionTransformingCallback(errorHandlingCallback(callback, LOGGER))); + } + + private BulkWriteResult executeBulkWriteBatch( + final RetryState retryState, + final WriteConcern effectiveWriteConcern, + final WriteBinding binding, + final Connection connection) { + BulkWriteTracker currentBulkWriteTracker = retryState.attachment(AttachmentKeys.bulkWriteTracker()) + .orElseThrow(Assertions::fail); + BulkWriteBatch currentBatch = currentBulkWriteTracker.batch().orElseThrow(Assertions::fail); + int maxWireVersion = connection.getDescription().getMaxWireVersion(); + OperationContext operationContext = binding.getOperationContext(); + TimeoutContext timeoutContext = operationContext.getTimeoutContext(); + + while (currentBatch.shouldProcessBatch()) { + try { + BsonDocument result = executeCommand(effectiveWriteConcern, operationContext, connection, currentBatch); + if (currentBatch.getRetryWrites() && !operationContext.getSessionContext().hasActiveTransaction()) { + MongoException writeConcernBasedError = ProtocolHelper.createSpecialException(result, + connection.getDescription().getServerAddress(), "errMsg", timeoutContext); + if (writeConcernBasedError != null) { + if (currentBulkWriteTracker.lastAttempt()) { + addRetryableWriteErrorLabel(writeConcernBasedError, maxWireVersion); + addErrorLabelsToWriteConcern(result.getDocument("writeConcernError"), writeConcernBasedError.getErrorLabels()); + } else if (loggingShouldAttemptToRetryWriteAndAddRetryableLabel(retryState, writeConcernBasedError)) { + throw new MongoWriteConcernWithResponseException(writeConcernBasedError, result); + } + } + } + currentBatch.addResult(result); + currentBulkWriteTracker = BulkWriteTracker.attachNext(retryState, currentBatch, timeoutContext); + currentBatch = currentBulkWriteTracker.batch().orElseThrow(Assertions::fail); + } catch (MongoException exception) { + if (!retryState.isFirstAttempt() && !(exception instanceof MongoWriteConcernWithResponseException)) { + addRetryableWriteErrorLabel(exception, maxWireVersion); + } + handleMongoWriteConcernWithResponseException(retryState, false, timeoutContext); + throw exception; + } + } + try { + return currentBatch.getResult(); + } catch (MongoException e) { + /* if we get here, some of the batches failed on the server side, + * so we need to mark the last attempt to avoid retrying. */ + retryState.markAsLastAttempt(); + throw e; + } + } + + private void executeBulkWriteBatchAsync( + final RetryState retryState, + final WriteConcern effectiveWriteConcern, + final AsyncWriteBinding binding, + final AsyncConnection connection, + final SingleResultCallback callback) { + LoopState loopState = new LoopState(); + AsyncCallbackRunnable loop = new AsyncCallbackLoop(loopState, iterationCallback -> { + BulkWriteTracker currentBulkWriteTracker = retryState.attachment(AttachmentKeys.bulkWriteTracker()) + .orElseThrow(Assertions::fail); + loopState.attach(AttachmentKeys.bulkWriteTracker(), currentBulkWriteTracker, true); + BulkWriteBatch currentBatch = currentBulkWriteTracker.batch().orElseThrow(Assertions::fail); + int maxWireVersion = connection.getDescription().getMaxWireVersion(); + if (loopState.breakAndCompleteIf(() -> !currentBatch.shouldProcessBatch(), iterationCallback)) { + return; + } + OperationContext operationContext = binding.getOperationContext(); + TimeoutContext timeoutContext = operationContext.getTimeoutContext(); + executeCommandAsync(effectiveWriteConcern, operationContext, connection, currentBatch, (result, t) -> { + if (t == null) { + if (currentBatch.getRetryWrites() && !operationContext.getSessionContext().hasActiveTransaction()) { + MongoException writeConcernBasedError = ProtocolHelper.createSpecialException(result, + connection.getDescription().getServerAddress(), "errMsg", binding.getOperationContext().getTimeoutContext()); + if (writeConcernBasedError != null) { + if (currentBulkWriteTracker.lastAttempt()) { + addRetryableWriteErrorLabel(writeConcernBasedError, maxWireVersion); + addErrorLabelsToWriteConcern(result.getDocument("writeConcernError"), + writeConcernBasedError.getErrorLabels()); + } else if (loggingShouldAttemptToRetryWriteAndAddRetryableLabel(retryState, writeConcernBasedError)) { + iterationCallback.onResult(null, + new MongoWriteConcernWithResponseException(writeConcernBasedError, result)); + return; + } + } + } + currentBatch.addResult(result); + BulkWriteTracker.attachNext(retryState, currentBatch, timeoutContext); + iterationCallback.onResult(null, null); + } else { + if (t instanceof MongoException) { + MongoException exception = (MongoException) t; + if (!retryState.isFirstAttempt() && !(exception instanceof MongoWriteConcernWithResponseException)) { + addRetryableWriteErrorLabel(exception, maxWireVersion); + } + if (handleMongoWriteConcernWithResponseExceptionAsync(retryState, null, timeoutContext)) { + return; + } + } + iterationCallback.onResult(null, t); + } + }); + }); + loop.run((voidResult, t) -> { + if (t != null) { + callback.onResult(null, t); + } else { + BulkWriteResult result; + try { + result = loopState.attachment(AttachmentKeys.bulkWriteTracker()) + .flatMap(BulkWriteTracker::batch).orElseThrow(Assertions::fail).getResult(); + } catch (Throwable loopResultT) { + if (loopResultT instanceof MongoException) { + /* if we get here, some of the batches failed on the server side, + * so we need to mark the last attempt to avoid retrying. */ + retryState.markAsLastAttempt(); + } + callback.onResult(null, loopResultT); + return; + } + callback.onResult(result, null); + } + }); + } + + private void handleMongoWriteConcernWithResponseException(final RetryState retryState, + final boolean breakAndThrowIfDifferent, + final TimeoutContext timeoutContext) { + if (!retryState.isFirstAttempt()) { + RuntimeException prospectiveFailedResult = (RuntimeException) retryState.exception().orElse(null); + boolean prospectiveResultIsWriteConcernException = prospectiveFailedResult instanceof MongoWriteConcernWithResponseException; + retryState.breakAndThrowIfRetryAnd(() -> breakAndThrowIfDifferent && !prospectiveResultIsWriteConcernException); + if (prospectiveResultIsWriteConcernException) { + retryState.attachment(AttachmentKeys.bulkWriteTracker()).orElseThrow(Assertions::fail) + .batch().ifPresent(bulkWriteBatch -> { + bulkWriteBatch.addResult( + (BsonDocument) ((MongoWriteConcernWithResponseException) prospectiveFailedResult).getResponse()); + BulkWriteTracker.attachNext(retryState, bulkWriteBatch, timeoutContext); + }); + } + } + } + + private boolean handleMongoWriteConcernWithResponseExceptionAsync(final RetryState retryState, + @Nullable final SingleResultCallback callback, + final TimeoutContext timeoutContext) { + if (!retryState.isFirstAttempt()) { + RuntimeException prospectiveFailedResult = (RuntimeException) retryState.exception().orElse(null); + boolean prospectiveResultIsWriteConcernException = prospectiveFailedResult instanceof MongoWriteConcernWithResponseException; + if (callback != null && retryState.breakAndCompleteIfRetryAnd(() -> !prospectiveResultIsWriteConcernException, callback)) { + return true; + } + if (prospectiveResultIsWriteConcernException) { + retryState.attachment(AttachmentKeys.bulkWriteTracker()).orElseThrow(Assertions::fail) + .batch().ifPresent(bulkWriteBatch -> { + bulkWriteBatch.addResult( + (BsonDocument) ((MongoWriteConcernWithResponseException) prospectiveFailedResult).getResponse()); + BulkWriteTracker.attachNext(retryState, bulkWriteBatch, timeoutContext); + }); + } + } + return false; + } + + @Nullable + private BsonDocument executeCommand( + final WriteConcern effectiveWriteConcern, + final OperationContext operationContext, + final Connection connection, + final BulkWriteBatch batch) { + commandName = batch.getCommand().getFirstKey(); + return connection.command(namespace.getDatabaseName(), batch.getCommand(), NoOpFieldNameValidator.INSTANCE, null, batch.getDecoder(), + operationContext.withOperationName(commandName), shouldExpectResponse(batch, effectiveWriteConcern), batch.getPayload()); + } + + private void executeCommandAsync( + final WriteConcern effectiveWriteConcern, + final OperationContext operationContext, + final AsyncConnection connection, + final BulkWriteBatch batch, + final SingleResultCallback callback) { + commandName = batch.getCommand().getFirstKey(); + connection.commandAsync(namespace.getDatabaseName(), batch.getCommand(), NoOpFieldNameValidator.INSTANCE, null, batch.getDecoder(), + operationContext.withOperationName(commandName), shouldExpectResponse(batch, effectiveWriteConcern), + batch.getPayload(), callback); + } + + private boolean shouldExpectResponse(final BulkWriteBatch batch, final WriteConcern effectiveWriteConcern) { + return effectiveWriteConcern.isAcknowledged() || (ordered && batch.hasAnotherBatch()); + } + + private void addErrorLabelsToWriteConcern(final BsonDocument result, final Set errorLabels) { + if (!result.containsKey("errorLabels")) { + result.put("errorLabels", new BsonArray(errorLabels.stream().map(BsonString::new).collect(Collectors.toList()))); + } + } + + public static final class BulkWriteTracker { + private int attempt; + private final int attempts; + private final boolean retryUntilTimeoutThrowsException; + @Nullable + private final BulkWriteBatch batch; + + static void attachNew(final RetryState retryState, final boolean retry, final TimeoutContext timeoutContext) { + retryState.attach(AttachmentKeys.bulkWriteTracker(), new BulkWriteTracker(retry, null, timeoutContext), false); + } + + static void attachNew(final RetryState retryState, final BulkWriteBatch batch, final TimeoutContext timeoutContext) { + attach(retryState, new BulkWriteTracker(batch.getRetryWrites(), batch, timeoutContext)); + } + + static BulkWriteTracker attachNext(final RetryState retryState, final BulkWriteBatch batch, final TimeoutContext timeoutContext) { + BulkWriteBatch nextBatch = batch.getNextBatch(); + BulkWriteTracker nextTracker = new BulkWriteTracker(nextBatch.getRetryWrites(), nextBatch, timeoutContext); + attach(retryState, nextTracker); + return nextTracker; + } + + private static void attach(final RetryState retryState, final BulkWriteTracker tracker) { + retryState.attach(AttachmentKeys.bulkWriteTracker(), tracker, false); + BulkWriteBatch batch = tracker.batch; + if (batch != null) { + retryState.attach(AttachmentKeys.retryableCommandFlag(), batch.getRetryWrites(), false) + .attach(AttachmentKeys.commandDescriptionSupplier(), () -> batch.getPayload().getPayloadType().toString(), false); + } + } + + private BulkWriteTracker(final boolean retry, @Nullable final BulkWriteBatch batch, final TimeoutContext timeoutContext) { + attempt = 0; + attempts = retry ? RetryState.RETRIES + 1 : 1; + this.batch = batch; + this.retryUntilTimeoutThrowsException = timeoutContext.hasTimeoutMS(); + } + + boolean lastAttempt() { + if (retryUntilTimeoutThrowsException){ + return false; + } + return attempt == attempts - 1; + } + + void advance() { + assertTrue(!lastAttempt()); + attempt++; + } + + Optional batch() { + return Optional.ofNullable(batch); + } + } +} diff --git a/driver-core/src/main/com/mongodb/internal/operation/OperationHelper.java b/driver-core/src/main/com/mongodb/internal/operation/OperationHelper.java new file mode 100644 index 00000000000..f980d309a8a --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/operation/OperationHelper.java @@ -0,0 +1,245 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.operation; + +import com.mongodb.ClientBulkWriteException; +import com.mongodb.MongoClientException; +import com.mongodb.MongoException; +import com.mongodb.WriteConcern; +import com.mongodb.client.cursor.TimeoutMode; +import com.mongodb.client.model.Collation; +import com.mongodb.connection.ConnectionDescription; +import com.mongodb.connection.ServerDescription; +import com.mongodb.connection.ServerType; +import com.mongodb.internal.async.SingleResultCallback; +import com.mongodb.internal.async.function.AsyncCallbackFunction; +import com.mongodb.internal.async.function.AsyncCallbackSupplier; +import com.mongodb.internal.bulk.DeleteRequest; +import com.mongodb.internal.bulk.UpdateRequest; +import com.mongodb.internal.bulk.WriteRequest; +import com.mongodb.internal.connection.OperationContext; +import com.mongodb.internal.diagnostics.logging.Logger; +import com.mongodb.internal.diagnostics.logging.Loggers; +import com.mongodb.internal.session.SessionContext; +import com.mongodb.lang.NonNull; +import com.mongodb.lang.Nullable; +import org.bson.BsonDocument; +import org.bson.conversions.Bson; + +import java.util.List; +import java.util.function.Function; +import java.util.function.Supplier; + +import static com.mongodb.assertions.Assertions.assertNotNull; +import static com.mongodb.internal.operation.ServerVersionHelper.serverIsLessThanVersionFourDotFour; +import static java.lang.String.format; + +/** + * This class is not part of the public API and may be removed or changed at any time. + */ +public final class OperationHelper { + public static final Logger LOGGER = Loggers.getLogger("operation"); + + static void validateCollationAndWriteConcern(@Nullable final Collation collation, final WriteConcern writeConcern) { + if (collation != null && !writeConcern.isAcknowledged()) { + throw new MongoClientException("Specifying collation with an unacknowledged WriteConcern is not supported"); + } + } + + private static void validateArrayFilters(final WriteConcern writeConcern) { + if (!writeConcern.isAcknowledged()) { + throw new MongoClientException("Specifying array filters with an unacknowledged WriteConcern is not supported"); + } + } + + private static void validateWriteRequestHint(final ConnectionDescription connectionDescription, final WriteConcern writeConcern, + final WriteRequest request) { + if (!writeConcern.isAcknowledged()) { + if (request instanceof DeleteRequest && serverIsLessThanVersionFourDotFour(connectionDescription)) { + throw new IllegalArgumentException(format("Hint not supported by wire version: %s", + connectionDescription.getMaxWireVersion())); + } + } + } + + static void validateHintForFindAndModify(final ConnectionDescription connectionDescription, final WriteConcern writeConcern) { + if (!writeConcern.isAcknowledged() && serverIsLessThanVersionFourDotFour(connectionDescription)) { + throw new IllegalArgumentException(format("Hint not supported by wire version: %s", + connectionDescription.getMaxWireVersion())); + } + } + + private static void validateWriteRequestCollations(final List requests, final WriteConcern writeConcern) { + Collation collation = null; + for (WriteRequest request : requests) { + if (request instanceof UpdateRequest) { + collation = ((UpdateRequest) request).getCollation(); + } else if (request instanceof DeleteRequest) { + collation = ((DeleteRequest) request).getCollation(); + } + if (collation != null) { + break; + } + } + validateCollationAndWriteConcern(collation, writeConcern); + } + + private static void validateUpdateRequestArrayFilters(final List requests, final WriteConcern writeConcern) { + for (WriteRequest request : requests) { + List arrayFilters = null; + if (request instanceof UpdateRequest) { + arrayFilters = ((UpdateRequest) request).getArrayFilters(); + } + if (arrayFilters != null) { + validateArrayFilters(writeConcern); + break; + } + } + } + + private static void validateWriteRequestHints(final ConnectionDescription connectionDescription, + final List requests, + final WriteConcern writeConcern) { + for (WriteRequest request : requests) { + Bson hint = null; + String hintString = null; + if (request instanceof UpdateRequest) { + hint = ((UpdateRequest) request).getHint(); + hintString = ((UpdateRequest) request).getHintString(); + } else if (request instanceof DeleteRequest) { + hint = ((DeleteRequest) request).getHint(); + hintString = ((DeleteRequest) request).getHintString(); + } + if (hint != null || hintString != null) { + validateWriteRequestHint(connectionDescription, writeConcern, request); + break; + } + } + } + + static void validateWriteRequests(final ConnectionDescription connectionDescription, final Boolean bypassDocumentValidation, + final List requests, final WriteConcern writeConcern) { + checkBypassDocumentValidationIsSupported(bypassDocumentValidation, writeConcern); + validateWriteRequestCollations(requests, writeConcern); + validateUpdateRequestArrayFilters(requests, writeConcern); + validateWriteRequestHints(connectionDescription, requests, writeConcern); + } + + static boolean validateWriteRequestsAndCompleteIfInvalid(final ConnectionDescription connectionDescription, + final Boolean bypassDocumentValidation, final List requests, final WriteConcern writeConcern, + final SingleResultCallback callback) { + try { + validateWriteRequests(connectionDescription, bypassDocumentValidation, requests, writeConcern); + return false; + } catch (Throwable validationT) { + callback.onResult(null, validationT); + return true; + } + } + + private static void checkBypassDocumentValidationIsSupported(@Nullable final Boolean bypassDocumentValidation, + final WriteConcern writeConcern) { + if (bypassDocumentValidation != null && !writeConcern.isAcknowledged()) { + throw new MongoClientException("Specifying bypassDocumentValidation with an unacknowledged WriteConcern is not supported"); + } + } + + static boolean isRetryableWrite(final boolean retryWrites, final WriteConcern writeConcern, + final ConnectionDescription connectionDescription, final SessionContext sessionContext) { + if (!retryWrites) { + return false; + } else if (!writeConcern.isAcknowledged()) { + LOGGER.debug("retryWrites set to true but the writeConcern is unacknowledged."); + return false; + } else if (sessionContext.hasActiveTransaction()) { + LOGGER.debug("retryWrites set to true but in an active transaction."); + return false; + } else { + return canRetryWrite(connectionDescription, sessionContext); + } + } + + static boolean canRetryWrite(final ConnectionDescription connectionDescription, final SessionContext sessionContext) { + if (connectionDescription.getLogicalSessionTimeoutMinutes() == null) { + LOGGER.debug("retryWrites set to true but the server does not support sessions."); + return false; + } else if (connectionDescription.getServerType().equals(ServerType.STANDALONE)) { + LOGGER.debug("retryWrites set to true but the server is a standalone server."); + return false; + } + return true; + } + + static boolean canRetryRead(final ServerDescription serverDescription, final OperationContext operationContext) { + if (operationContext.getSessionContext().hasActiveTransaction()) { + LOGGER.debug("retryReads set to true but in an active transaction."); + return false; + } + return true; + } + + static void setNonTailableCursorMaxTimeSupplier(final TimeoutMode timeoutMode, final OperationContext operationContext) { + if (timeoutMode == TimeoutMode.ITERATION) { + operationContext.getTimeoutContext().disableMaxTimeOverride(); + } + } + + /** + * Returns the {@link MongoException} that carries or should carry + * the {@linkplain MongoException#getCode() error code} and {@linkplain MongoException#getErrorLabels() error labels}. + * This method is needed because exceptions like {@link ClientBulkWriteException} do not carry that data themselves. + */ + public static MongoException unwrap(final MongoException exception) { + MongoException result = exception; + if (exception instanceof ClientBulkWriteException) { + MongoException topLevelError = ((ClientBulkWriteException) exception).getCause(); + result = topLevelError == null ? exception : topLevelError; + } + return result; + } + + + /** + * This internal exception is used to + *
    + *
  • on one hand allow propagating exceptions from {@link SyncOperationHelper#withSuppliedResource(Supplier, boolean, Function)} / + * {@link AsyncOperationHelper#withAsyncSuppliedResource(AsyncCallbackSupplier, boolean, SingleResultCallback, AsyncCallbackFunction)} + * and similar methods so that they can be properly retried, which is useful, e.g., + * for {@link com.mongodb.MongoConnectionPoolClearedException};
  • + *
  • on the other hand to prevent them from propagation once the retry decision is made.
  • + *
+ * + * @see SyncOperationHelper#withSuppliedResource(Supplier, boolean, Function) + * @see AsyncOperationHelper#withAsyncSuppliedResource(AsyncCallbackSupplier, boolean, SingleResultCallback, AsyncCallbackFunction) + */ + public static final class ResourceSupplierInternalException extends RuntimeException { + private static final long serialVersionUID = 0; + + ResourceSupplierInternalException(final Throwable cause) { + super(assertNotNull(cause)); + } + + @NonNull + @Override + public Throwable getCause() { + return assertNotNull(super.getCause()); + } + } + + private OperationHelper() { + } +} diff --git a/driver-core/src/main/com/mongodb/internal/operation/OperationReadConcernHelper.java b/driver-core/src/main/com/mongodb/internal/operation/OperationReadConcernHelper.java new file mode 100644 index 00000000000..55a56f0f5d7 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/operation/OperationReadConcernHelper.java @@ -0,0 +1,47 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.operation; + +import com.mongodb.internal.session.SessionContext; +import org.bson.BsonDocument; + +import static com.mongodb.assertions.Assertions.notNull; +import static com.mongodb.internal.connection.ReadConcernHelper.getReadConcernDocument; + +final class OperationReadConcernHelper { + static void appendReadConcernToCommand(final SessionContext sessionContext, final int maxWireVersion, + final BsonDocument commandDocument) { + notNull("commandDocument", commandDocument); + notNull("sessionContext", sessionContext); + + if (sessionContext.hasActiveTransaction()) { + return; + } + + if (sessionContext.isSnapshot()) { + return; + } + + BsonDocument readConcernDocument = getReadConcernDocument(sessionContext, maxWireVersion); + if (!readConcernDocument.isEmpty()) { + commandDocument.append("readConcern", readConcernDocument); + } + } + + private OperationReadConcernHelper() { + } +} diff --git a/driver-core/src/main/com/mongodb/internal/operation/Operations.java b/driver-core/src/main/com/mongodb/internal/operation/Operations.java new file mode 100644 index 00000000000..da0661220da --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/operation/Operations.java @@ -0,0 +1,851 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.operation; + +import com.mongodb.AutoEncryptionSettings; +import com.mongodb.MongoNamespace; +import com.mongodb.ReadConcern; +import com.mongodb.ReadPreference; +import com.mongodb.WriteConcern; +import com.mongodb.bulk.BulkWriteResult; +import com.mongodb.client.cursor.TimeoutMode; +import com.mongodb.client.model.BulkWriteOptions; +import com.mongodb.client.model.ClusteredIndexOptions; +import com.mongodb.client.model.Collation; +import com.mongodb.client.model.CountOptions; +import com.mongodb.client.model.CreateCollectionOptions; +import com.mongodb.client.model.CreateIndexOptions; +import com.mongodb.client.model.CreateViewOptions; +import com.mongodb.client.model.DeleteManyModel; +import com.mongodb.client.model.DeleteOneModel; +import com.mongodb.client.model.DeleteOptions; +import com.mongodb.client.model.DropCollectionOptions; +import com.mongodb.client.model.DropIndexOptions; +import com.mongodb.client.model.EstimatedDocumentCountOptions; +import com.mongodb.client.model.FindOneAndDeleteOptions; +import com.mongodb.client.model.FindOneAndReplaceOptions; +import com.mongodb.client.model.FindOneAndUpdateOptions; +import com.mongodb.client.model.IndexModel; +import com.mongodb.client.model.IndexOptionDefaults; +import com.mongodb.client.model.InsertManyOptions; +import com.mongodb.client.model.InsertOneModel; +import com.mongodb.client.model.InsertOneOptions; +import com.mongodb.client.model.RenameCollectionOptions; +import com.mongodb.client.model.ReplaceOneModel; +import com.mongodb.client.model.ReplaceOptions; +import com.mongodb.client.model.ReturnDocument; +import com.mongodb.client.model.SearchIndexModel; +import com.mongodb.client.model.SearchIndexType; +import com.mongodb.client.model.UpdateManyModel; +import com.mongodb.client.model.UpdateOneModel; +import com.mongodb.client.model.UpdateOptions; +import com.mongodb.client.model.ValidationOptions; +import com.mongodb.client.model.WriteModel; +import com.mongodb.client.model.bulk.ClientBulkWriteOptions; +import com.mongodb.client.model.bulk.ClientBulkWriteResult; +import com.mongodb.client.model.bulk.ClientNamespacedWriteModel; +import com.mongodb.client.model.changestream.FullDocument; +import com.mongodb.client.model.changestream.FullDocumentBeforeChange; +import com.mongodb.internal.TimeoutSettings; +import com.mongodb.internal.bulk.DeleteRequest; +import com.mongodb.internal.bulk.IndexRequest; +import com.mongodb.internal.bulk.InsertRequest; +import com.mongodb.internal.bulk.UpdateRequest; +import com.mongodb.internal.bulk.WriteRequest; +import com.mongodb.internal.client.model.AggregationLevel; +import com.mongodb.internal.client.model.FindOptions; +import com.mongodb.internal.client.model.changestream.ChangeStreamLevel; +import com.mongodb.lang.Nullable; +import org.bson.BsonArray; +import org.bson.BsonDocument; +import org.bson.BsonDocumentWrapper; +import org.bson.BsonJavaScript; +import org.bson.BsonString; +import org.bson.BsonTimestamp; +import org.bson.BsonValue; +import org.bson.codecs.Codec; +import org.bson.codecs.CollectibleCodec; +import org.bson.codecs.Decoder; +import org.bson.codecs.configuration.CodecRegistry; +import org.bson.conversions.Bson; + +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.concurrent.TimeUnit; +import java.util.stream.Collectors; + +import static com.mongodb.assertions.Assertions.assertNotNull; +import static com.mongodb.assertions.Assertions.notNull; +import static java.lang.String.format; +import static java.util.Collections.singletonList; +import static java.util.concurrent.TimeUnit.MILLISECONDS; + +public final class Operations { + @Nullable + private final MongoNamespace namespace; + private final Class documentClass; + private final ReadPreference readPreference; + private final CodecRegistry codecRegistry; + private final ReadConcern readConcern; + private final WriteConcern writeConcern; + private final boolean retryWrites; + private final boolean retryReads; + private final TimeoutSettings timeoutSettings; + + public Operations(final Class documentClass, final ReadPreference readPreference, final CodecRegistry codecRegistry, + final boolean retryReads, final TimeoutSettings timeoutSettings) { + this(null, documentClass, readPreference, codecRegistry, ReadConcern.DEFAULT, WriteConcern.ACKNOWLEDGED, + true, retryReads, timeoutSettings); + } + + public Operations(@Nullable final MongoNamespace namespace, final Class documentClass, final ReadPreference readPreference, + final CodecRegistry codecRegistry, final boolean retryReads, final TimeoutSettings timeoutSettings) { + this(namespace, documentClass, readPreference, codecRegistry, ReadConcern.DEFAULT, WriteConcern.ACKNOWLEDGED, + true, retryReads, timeoutSettings); + } + + public Operations(@Nullable final MongoNamespace namespace, final Class documentClass, final ReadPreference readPreference, + final CodecRegistry codecRegistry, final ReadConcern readConcern, final WriteConcern writeConcern, final boolean retryWrites, + final boolean retryReads, final TimeoutSettings timeoutSettings) { + this.namespace = namespace; + this.documentClass = documentClass; + this.readPreference = readPreference; + this.codecRegistry = codecRegistry; + this.readConcern = readConcern; + this.retryWrites = retryWrites; + this.retryReads = retryReads; + this.timeoutSettings = timeoutSettings; + + WriteConcern writeConcernToUse = writeConcern; + if (timeoutSettings.getTimeoutMS() != null) { + writeConcernToUse = assertNotNull(WriteConcernHelper.cloneWithoutTimeout(writeConcern)); + } + this.writeConcern = writeConcernToUse; + } + + @Nullable + public MongoNamespace getNamespace() { + return namespace; + } + + public Class getDocumentClass() { + return documentClass; + } + + public ReadPreference getReadPreference() { + return readPreference; + } + + public CodecRegistry getCodecRegistry() { + return codecRegistry; + } + + public ReadConcern getReadConcern() { + return readConcern; + } + + public WriteConcern getWriteConcern() { + return writeConcern; + } + + public boolean isRetryWrites() { + return retryWrites; + } + + public boolean isRetryReads() { + return retryReads; + } + + public TimeoutSettings getTimeoutSettings() { + return timeoutSettings; + } + + public TimeoutSettings createTimeoutSettings(final long maxTimeMS) { + return timeoutSettings.withMaxTimeMS(maxTimeMS); + } + + public TimeoutSettings createTimeoutSettings(final long maxTimeMS, final long maxAwaitTimeMS) { + return timeoutSettings.withMaxTimeAndMaxAwaitTimeMS(maxTimeMS, maxAwaitTimeMS); + } + + @SuppressWarnings("deprecation") // MaxTime + public TimeoutSettings createTimeoutSettings(final CountOptions options) { + return createTimeoutSettings(options.getMaxTime(MILLISECONDS)); + } + + @SuppressWarnings("deprecation") // MaxTime + public TimeoutSettings createTimeoutSettings(final EstimatedDocumentCountOptions options) { + return createTimeoutSettings(options.getMaxTime(MILLISECONDS)); + } + + @SuppressWarnings("deprecation") // MaxTime + public TimeoutSettings createTimeoutSettings(final FindOptions options) { + return timeoutSettings.withMaxTimeAndMaxAwaitTimeMS(options.getMaxTime(MILLISECONDS), options.getMaxAwaitTime(MILLISECONDS)); + } + + @SuppressWarnings("deprecation") // MaxTime + public TimeoutSettings createTimeoutSettings(final FindOneAndDeleteOptions options) { + return createTimeoutSettings(options.getMaxTime(MILLISECONDS)); + } + + @SuppressWarnings("deprecation") // MaxTime + public TimeoutSettings createTimeoutSettings(final FindOneAndReplaceOptions options) { + return createTimeoutSettings(options.getMaxTime(MILLISECONDS)); + } + + @SuppressWarnings("deprecation") // MaxTime + public TimeoutSettings createTimeoutSettings(final FindOneAndUpdateOptions options) { + return timeoutSettings.withMaxTimeMS(options.getMaxTime(MILLISECONDS)); + } + + @SuppressWarnings("deprecation") // MaxTime + public TimeoutSettings createTimeoutSettings(final CreateIndexOptions options) { + return timeoutSettings.withMaxTimeMS(options.getMaxTime(MILLISECONDS)); + } + + @SuppressWarnings("deprecation") // MaxTime + public TimeoutSettings createTimeoutSettings(final DropIndexOptions options) { + return timeoutSettings.withMaxTimeMS(options.getMaxTime(MILLISECONDS)); + } + + public ReadOperationSimple countDocuments(final Bson filter, final CountOptions options) { + CountDocumentsOperation operation = new CountDocumentsOperation( + assertNotNull(namespace)) + .retryReads(retryReads) + .filter(toBsonDocument(filter)) + .skip(options.getSkip()) + .limit(options.getLimit()) + .collation(options.getCollation()) + .comment(options.getComment()); + if (options.getHint() != null) { + operation.hint(toBsonDocument(options.getHint())); + } else if (options.getHintString() != null) { + operation.hint(new BsonString(options.getHintString())); + } + return operation; + } + + public ReadOperationSimple estimatedDocumentCount(final EstimatedDocumentCountOptions options) { + return new EstimatedDocumentCountOperation( + assertNotNull(namespace)) + .retryReads(retryReads) + .comment(options.getComment()); + } + + public ReadOperationCursor findFirst(final Bson filter, final Class resultClass, + final FindOptions options) { + return createFindOperation(assertNotNull(namespace), filter, resultClass, options).batchSize(0).limit(-1); + } + + public ReadOperationExplainable find(final Bson filter, final Class resultClass, + final FindOptions options) { + return createFindOperation(assertNotNull(namespace), filter, resultClass, options); + } + + public ReadOperationExplainable find(final MongoNamespace findNamespace, @Nullable final Bson filter, + final Class resultClass, final FindOptions options) { + return createFindOperation(findNamespace, filter, resultClass, options); + } + + private FindOperation createFindOperation(final MongoNamespace findNamespace, @Nullable final Bson filter, + final Class resultClass, final FindOptions options) { + FindOperation operation = new FindOperation<>( + findNamespace, codecRegistry.get(resultClass)) + .retryReads(retryReads) + .filter(filter == null ? new BsonDocument() : filter.toBsonDocument(documentClass, codecRegistry)) + .batchSize(options.getBatchSize()) + .skip(options.getSkip()) + .limit(options.getLimit()) + .projection(toBsonDocument(options.getProjection())) + .sort(toBsonDocument(options.getSort())) + .cursorType(options.getCursorType()) + .noCursorTimeout(options.isNoCursorTimeout()) + .partial(options.isPartial()) + .collation(options.getCollation()) + .comment(options.getComment()) + .let(toBsonDocument(options.getLet())) + .min(toBsonDocument(options.getMin())) + .max(toBsonDocument(options.getMax())) + .returnKey(options.isReturnKey()) + .showRecordId(options.isShowRecordId()) + .allowDiskUse(options.isAllowDiskUse()) + .timeoutMode(options.getTimeoutMode()); + + if (options.getHint() != null) { + operation.hint(toBsonDocument(options.getHint())); + } else if (options.getHintString() != null) { + operation.hint(new BsonString(options.getHintString())); + } + return operation; + } + + public ReadOperationCursor distinct(final String fieldName, @Nullable final Bson filter, final Class resultClass, + final Collation collation, final BsonValue comment, @Nullable final Bson hint, @Nullable final String hintString) { + DistinctOperation operation = new DistinctOperation<>(assertNotNull(namespace), + fieldName, codecRegistry.get(resultClass)) + .retryReads(retryReads) + .filter(filter == null ? null : filter.toBsonDocument(documentClass, codecRegistry)) + .collation(collation) + .comment(comment); + + if (hint != null) { + operation.hint(toBsonDocument(hint)); + } else if (hintString != null) { + operation.hint(new BsonString(hintString)); + } + return operation; + } + + public ReadOperationExplainable aggregate(final List pipeline, final Class resultClass, + @Nullable final TimeoutMode timeoutMode, @Nullable final Integer batchSize, + final Collation collation, @Nullable final Bson hint, @Nullable final String hintString, + final BsonValue comment, final Bson variables, final Boolean allowDiskUse, final AggregationLevel aggregationLevel) { + return new AggregateOperation<>(assertNotNull(namespace), + assertNotNull(toBsonDocumentList(pipeline)), codecRegistry.get(resultClass), aggregationLevel) + .retryReads(retryReads) + .allowDiskUse(allowDiskUse) + .batchSize(batchSize) + .collation(collation) + .hint(hint != null ? toBsonDocument(hint) : (hintString != null ? new BsonString(hintString) : null)) + .comment(comment) + .let(toBsonDocument(variables)) + .timeoutMode(timeoutMode); + } + + public ReadOperationSimple aggregateToCollection(final List pipeline, @Nullable final TimeoutMode timeoutMode, + final Boolean allowDiskUse, final Boolean bypassDocumentValidation, final Collation collation, @Nullable final Bson hint, + @Nullable final String hintString, final BsonValue comment, final Bson variables, final AggregationLevel aggregationLevel) { + return new AggregateToCollectionOperation(assertNotNull(namespace), + assertNotNull(toBsonDocumentList(pipeline)), readConcern, writeConcern, aggregationLevel) + .allowDiskUse(allowDiskUse) + .bypassDocumentValidation(bypassDocumentValidation) + .collation(collation) + .hint(hint != null ? toBsonDocument(hint) : (hintString != null ? new BsonString(hintString) : null)) + .comment(comment) + .let(toBsonDocument(variables)) + .timeoutMode(timeoutMode); + } + + @SuppressWarnings("deprecation") + public WriteOperation mapReduceToCollection(final String databaseName, final String collectionName, + final String mapFunction, final String reduceFunction, + @Nullable final String finalizeFunction, final Bson filter, + final int limit, final boolean jsMode, + final Bson scope, final Bson sort, final boolean verbose, + final com.mongodb.client.model.MapReduceAction action, + final Boolean bypassDocumentValidation, final Collation collation) { + MapReduceToCollectionOperation operation = new MapReduceToCollectionOperation( + assertNotNull(namespace), new BsonJavaScript(mapFunction), + new BsonJavaScript(reduceFunction), collectionName, writeConcern) + .filter(toBsonDocument(filter)) + .limit(limit) + .jsMode(jsMode) + .scope(toBsonDocument(scope)) + .sort(toBsonDocument(sort)) + .verbose(verbose) + .action(action.getValue()) + .databaseName(databaseName) + .bypassDocumentValidation(bypassDocumentValidation) + .collation(collation); + + if (finalizeFunction != null) { + operation.finalizeFunction(new BsonJavaScript(finalizeFunction)); + } + return operation; + } + + public ReadOperationMapReduceCursor mapReduce(final String mapFunction, + final String reduceFunction, @Nullable final String finalizeFunction, final Class resultClass, final Bson filter, + final int limit, final boolean jsMode, final Bson scope, final Bson sort, final boolean verbose, + final Collation collation) { + MapReduceWithInlineResultsOperation operation = + new MapReduceWithInlineResultsOperation<>( + assertNotNull(namespace), new BsonJavaScript(mapFunction), new BsonJavaScript(reduceFunction), + codecRegistry.get(resultClass)) + .filter(toBsonDocument(filter)) + .limit(limit) + .jsMode(jsMode) + .scope(toBsonDocument(scope)) + .sort(toBsonDocument(sort)) + .verbose(verbose) + .collation(collation); + if (finalizeFunction != null) { + operation.finalizeFunction(new BsonJavaScript(finalizeFunction)); + } + return operation; + } + + public WriteOperation findOneAndDelete(final Bson filter, final FindOneAndDeleteOptions options) { + return new FindAndDeleteOperation<>( + assertNotNull(namespace), writeConcern, retryWrites, getCodec()) + .filter(toBsonDocument(filter)) + .projection(toBsonDocument(options.getProjection())) + .sort(toBsonDocument(options.getSort())) + .collation(options.getCollation()) + .hint(toBsonDocument(options.getHint())) + .hintString(options.getHintString()) + .comment(options.getComment()) + .let(toBsonDocument(options.getLet())); + } + + public WriteOperation findOneAndReplace(final Bson filter, final T replacement, + final FindOneAndReplaceOptions options) { + return new FindAndReplaceOperation<>( + assertNotNull(namespace), writeConcern, retryWrites, getCodec(), documentToBsonDocument(replacement)) + .filter(toBsonDocument(filter)) + .projection(toBsonDocument(options.getProjection())) + .sort(toBsonDocument(options.getSort())) + .returnOriginal(options.getReturnDocument() == ReturnDocument.BEFORE) + .upsert(options.isUpsert()) + .bypassDocumentValidation(options.getBypassDocumentValidation()) + .collation(options.getCollation()) + .hint(toBsonDocument(options.getHint())) + .hintString(options.getHintString()) + .comment(options.getComment()) + .let(toBsonDocument(options.getLet())); + } + + public WriteOperation findOneAndUpdate(final Bson filter, final Bson update, final FindOneAndUpdateOptions options) { + return new FindAndUpdateOperation<>( + assertNotNull(namespace), writeConcern, retryWrites, getCodec(), assertNotNull(toBsonDocument(update))) + .filter(toBsonDocument(filter)) + .projection(toBsonDocument(options.getProjection())) + .sort(toBsonDocument(options.getSort())) + .returnOriginal(options.getReturnDocument() == ReturnDocument.BEFORE) + .upsert(options.isUpsert()) + .bypassDocumentValidation(options.getBypassDocumentValidation()) + .collation(options.getCollation()) + .arrayFilters(toBsonDocumentList(options.getArrayFilters())) + .hint(toBsonDocument(options.getHint())) + .hintString(options.getHintString()) + .comment(options.getComment()) + .let(toBsonDocument(options.getLet())); + } + + public WriteOperation findOneAndUpdate(final Bson filter, final List update, + final FindOneAndUpdateOptions options) { + return new FindAndUpdateOperation<>( + assertNotNull(namespace), writeConcern, retryWrites, getCodec(), assertNotNull(toBsonDocumentList(update))) + .filter(toBsonDocument(filter)) + .projection(toBsonDocument(options.getProjection())) + .sort(toBsonDocument(options.getSort())) + .returnOriginal(options.getReturnDocument() == ReturnDocument.BEFORE) + .upsert(options.isUpsert()) + .bypassDocumentValidation(options.getBypassDocumentValidation()) + .collation(options.getCollation()) + .arrayFilters(toBsonDocumentList(options.getArrayFilters())) + .hint(toBsonDocument(options.getHint())) + .hintString(options.getHintString()) + .comment(options.getComment()) + .let(toBsonDocument(options.getLet())); + } + + + public WriteOperation insertOne(final T document, final InsertOneOptions options) { + return bulkWrite(singletonList(new InsertOneModel<>(document)), + new BulkWriteOptions().bypassDocumentValidation(options.getBypassDocumentValidation()).comment(options.getComment())); + } + + + public WriteOperation replaceOne(final Bson filter, final T replacement, final ReplaceOptions options) { + return bulkWrite(singletonList(new ReplaceOneModel<>(filter, replacement, options)), + new BulkWriteOptions().bypassDocumentValidation(options.getBypassDocumentValidation()) + .comment(options.getComment()).let(options.getLet())); + } + + public WriteOperation deleteOne(final Bson filter, final DeleteOptions options) { + return bulkWrite(singletonList(new DeleteOneModel<>(filter, options)), + new BulkWriteOptions().comment(options.getComment()).let(options.getLet())); + } + + public WriteOperation deleteMany(final Bson filter, final DeleteOptions options) { + return bulkWrite(singletonList(new DeleteManyModel<>(filter, options)), + new BulkWriteOptions().comment(options.getComment()).let(options.getLet())); + } + + public WriteOperation updateOne(final Bson filter, final Bson update, final UpdateOptions options) { + return bulkWrite(singletonList(new UpdateOneModel<>(filter, update, options)), + new BulkWriteOptions().bypassDocumentValidation(options.getBypassDocumentValidation()) + .comment(options.getComment()).let(options.getLet())); + } + + public WriteOperation updateOne(final Bson filter, final List update, final UpdateOptions options) { + return bulkWrite(singletonList(new UpdateOneModel<>(filter, update, options)), + new BulkWriteOptions().bypassDocumentValidation(options.getBypassDocumentValidation()) + .comment(options.getComment()).let(options.getLet())); + } + + public WriteOperation updateMany(final Bson filter, final Bson update, final UpdateOptions options) { + return bulkWrite(singletonList(new UpdateManyModel<>(filter, update, options)), + new BulkWriteOptions().bypassDocumentValidation(options.getBypassDocumentValidation()) + .comment(options.getComment()).let(options.getLet())); + } + + public WriteOperation updateMany(final Bson filter, final List update, final UpdateOptions options) { + return bulkWrite(singletonList(new UpdateManyModel<>(filter, update, options)), + new BulkWriteOptions().bypassDocumentValidation(options.getBypassDocumentValidation()) + .comment(options.getComment()).let(options.getLet())); + } + + public WriteOperation insertMany(final List documents, final InsertManyOptions options) { + notNull("documents", documents); + List requests = new ArrayList<>(documents.size()); + for (T document : documents) { + if (document == null) { + throw new IllegalArgumentException("documents can not contain a null value"); + } + if (getCodec() instanceof CollectibleCodec) { + document = ((CollectibleCodec) getCodec()).generateIdIfAbsentFromDocument(document); + } + requests.add(new InsertRequest(documentToBsonDocument(document))); + } + + return new MixedBulkWriteOperation(assertNotNull(namespace), + requests, options.isOrdered(), writeConcern, retryWrites) + .bypassDocumentValidation(options.getBypassDocumentValidation()) + .comment(options.getComment()); + } + + public WriteOperation bulkWrite(final List> requests, + final BulkWriteOptions options) { + notNull("requests", requests); + List writeRequests = new ArrayList<>(requests.size()); + for (WriteModel writeModel : requests) { + WriteRequest writeRequest; + if (writeModel == null) { + throw new IllegalArgumentException("requests can not contain a null value"); + } else if (writeModel instanceof InsertOneModel) { + T document = ((InsertOneModel) writeModel).getDocument(); + if (getCodec() instanceof CollectibleCodec) { + document = ((CollectibleCodec) getCodec()).generateIdIfAbsentFromDocument(document); + } + writeRequest = new InsertRequest(documentToBsonDocument(document)); + } else if (writeModel instanceof ReplaceOneModel) { + ReplaceOneModel replaceOneModel = (ReplaceOneModel) writeModel; + writeRequest = new UpdateRequest(assertNotNull(toBsonDocument(replaceOneModel.getFilter())), + documentToBsonDocument(replaceOneModel.getReplacement()), WriteRequest.Type.REPLACE) + .upsert(replaceOneModel.getReplaceOptions().isUpsert()) + .collation(replaceOneModel.getReplaceOptions().getCollation()) + .hint(toBsonDocument(replaceOneModel.getReplaceOptions().getHint())) + .hintString(replaceOneModel.getReplaceOptions().getHintString()) + .sort(toBsonDocument(replaceOneModel.getReplaceOptions().getSort())); + } else if (writeModel instanceof UpdateOneModel) { + UpdateOneModel updateOneModel = (UpdateOneModel) writeModel; + BsonValue update = updateOneModel.getUpdate() != null ? toBsonDocument(updateOneModel.getUpdate()) + : new BsonArray(toBsonDocumentList(updateOneModel.getUpdatePipeline())); + writeRequest = new UpdateRequest(assertNotNull(toBsonDocument(updateOneModel.getFilter())), update, WriteRequest.Type.UPDATE) + .multi(false) + .upsert(updateOneModel.getOptions().isUpsert()) + .collation(updateOneModel.getOptions().getCollation()) + .arrayFilters(toBsonDocumentList(updateOneModel.getOptions().getArrayFilters())) + .hint(toBsonDocument(updateOneModel.getOptions().getHint())) + .hintString(updateOneModel.getOptions().getHintString()) + .sort(toBsonDocument(updateOneModel.getOptions().getSort())); + } else if (writeModel instanceof UpdateManyModel) { + UpdateManyModel updateManyModel = (UpdateManyModel) writeModel; + BsonValue update = updateManyModel.getUpdate() != null ? toBsonDocument(updateManyModel.getUpdate()) + : new BsonArray(toBsonDocumentList(updateManyModel.getUpdatePipeline())); + writeRequest = new UpdateRequest(assertNotNull(toBsonDocument(updateManyModel.getFilter())), update, WriteRequest.Type.UPDATE) + .multi(true) + .upsert(updateManyModel.getOptions().isUpsert()) + .collation(updateManyModel.getOptions().getCollation()) + .arrayFilters(toBsonDocumentList(updateManyModel.getOptions().getArrayFilters())) + .hint(toBsonDocument(updateManyModel.getOptions().getHint())) + .hintString(updateManyModel.getOptions().getHintString()); + } else if (writeModel instanceof DeleteOneModel) { + DeleteOneModel deleteOneModel = (DeleteOneModel) writeModel; + writeRequest = new DeleteRequest(assertNotNull(toBsonDocument(deleteOneModel.getFilter()))).multi(false) + .collation(deleteOneModel.getOptions().getCollation()) + .hint(toBsonDocument(deleteOneModel.getOptions().getHint())) + .hintString(deleteOneModel.getOptions().getHintString()); + } else if (writeModel instanceof DeleteManyModel) { + DeleteManyModel deleteManyModel = (DeleteManyModel) writeModel; + writeRequest = new DeleteRequest(assertNotNull(toBsonDocument(deleteManyModel.getFilter()))).multi(true) + .collation(deleteManyModel.getOptions().getCollation()) + .hint(toBsonDocument(deleteManyModel.getOptions().getHint())) + .hintString(deleteManyModel.getOptions().getHintString()); + } else { + throw new UnsupportedOperationException(format("WriteModel of type %s is not supported", writeModel.getClass())); + } + writeRequests.add(writeRequest); + } + + return new MixedBulkWriteOperation(assertNotNull(namespace), writeRequests, + options.isOrdered(), writeConcern, retryWrites) + .bypassDocumentValidation(options.getBypassDocumentValidation()) + .comment(options.getComment()) + .let(toBsonDocument(options.getLet())); + } + + public ReadOperationSimple commandRead(final Bson command, final Class resultClass) { + notNull("command", command); + notNull("resultClass", resultClass); + return new CommandReadOperation<>(assertNotNull(namespace).getDatabaseName(), + assertNotNull(toBsonDocument(command)), codecRegistry.get(resultClass)); + } + + + public WriteOperation dropDatabase() { + return new DropDatabaseOperation(assertNotNull(namespace).getDatabaseName(), + getWriteConcern()); + } + + public WriteOperation createCollection(final String collectionName, final CreateCollectionOptions createCollectionOptions, + @Nullable final AutoEncryptionSettings autoEncryptionSettings) { + CreateCollectionOperation operation = new CreateCollectionOperation( + assertNotNull(namespace).getDatabaseName(), collectionName, writeConcern) + .collation(createCollectionOptions.getCollation()) + .capped(createCollectionOptions.isCapped()) + .sizeInBytes(createCollectionOptions.getSizeInBytes()) + .maxDocuments(createCollectionOptions.getMaxDocuments()) + .storageEngineOptions(toBsonDocument(createCollectionOptions.getStorageEngineOptions())) + .expireAfter(createCollectionOptions.getExpireAfter(TimeUnit.SECONDS)) + .timeSeriesOptions(createCollectionOptions.getTimeSeriesOptions()) + .changeStreamPreAndPostImagesOptions(createCollectionOptions.getChangeStreamPreAndPostImagesOptions()); + + ClusteredIndexOptions clusteredIndexOptions = createCollectionOptions.getClusteredIndexOptions(); + if (clusteredIndexOptions != null) { + operation.clusteredIndexKey(toBsonDocument(clusteredIndexOptions.getKey())); + operation.clusteredIndexUnique(clusteredIndexOptions.isUnique()); + operation.clusteredIndexName(clusteredIndexOptions.getName()); + } + + Bson encryptedFields = createCollectionOptions.getEncryptedFields(); + operation.encryptedFields(toBsonDocument(encryptedFields)); + if (encryptedFields == null && autoEncryptionSettings != null) { + Map encryptedFieldsMap = autoEncryptionSettings.getEncryptedFieldsMap(); + if (encryptedFieldsMap != null) { + operation.encryptedFields(encryptedFieldsMap.getOrDefault(namespace.getDatabaseName() + "." + collectionName, null)); + } + } + + IndexOptionDefaults indexOptionDefaults = createCollectionOptions.getIndexOptionDefaults(); + Bson storageEngine = indexOptionDefaults.getStorageEngine(); + if (storageEngine != null) { + operation.indexOptionDefaults(new BsonDocument("storageEngine", toBsonDocument(storageEngine))); + } + ValidationOptions validationOptions = createCollectionOptions.getValidationOptions(); + Bson validator = validationOptions.getValidator(); + operation.validator(toBsonDocument(validator)); + operation.validationLevel(validationOptions.getValidationLevel()); + operation.validationAction(validationOptions.getValidationAction()); + return operation; + } + + public WriteOperation dropCollection( + final DropCollectionOptions dropCollectionOptions, + @Nullable final AutoEncryptionSettings autoEncryptionSettings) { + DropCollectionOperation operation = new DropCollectionOperation( + assertNotNull(namespace), writeConcern); + Bson encryptedFields = dropCollectionOptions.getEncryptedFields(); + if (encryptedFields != null) { + operation.encryptedFields(assertNotNull(toBsonDocument(encryptedFields))); + } else if (autoEncryptionSettings != null) { + Map encryptedFieldsMap = autoEncryptionSettings.getEncryptedFieldsMap(); + if (encryptedFieldsMap != null) { + operation.encryptedFields(encryptedFieldsMap.getOrDefault(namespace.getFullName(), null)); + operation.autoEncryptedFields(true); + } + } + return operation; + } + + + public WriteOperation renameCollection(final MongoNamespace newCollectionNamespace, + final RenameCollectionOptions renameCollectionOptions) { + return new RenameCollectionOperation(assertNotNull(namespace), + newCollectionNamespace, writeConcern).dropTarget(renameCollectionOptions.isDropTarget()); + } + + public WriteOperation createView(final String viewName, final String viewOn, final List pipeline, + final CreateViewOptions createViewOptions) { + notNull("options", createViewOptions); + notNull("pipeline", pipeline); + return new CreateViewOperation(assertNotNull(namespace).getDatabaseName(), viewName, + viewOn, assertNotNull(toBsonDocumentList(pipeline)), writeConcern).collation(createViewOptions.getCollation()); + } + + public WriteOperation createIndexes(final List indexes, final CreateIndexOptions createIndexOptions) { + notNull("indexes", indexes); + notNull("createIndexOptions", createIndexOptions); + List indexRequests = new ArrayList<>(indexes.size()); + for (IndexModel model : indexes) { + if (model == null) { + throw new IllegalArgumentException("indexes can not contain a null value"); + } + indexRequests.add(new IndexRequest(assertNotNull(toBsonDocument(model.getKeys()))) + .name(model.getOptions().getName()) + .background(model.getOptions().isBackground()) + .unique(model.getOptions().isUnique()) + .sparse(model.getOptions().isSparse()) + .expireAfter(model.getOptions().getExpireAfter(TimeUnit.SECONDS), TimeUnit.SECONDS) + .version(model.getOptions().getVersion()) + .weights(toBsonDocument(model.getOptions().getWeights())) + .defaultLanguage(model.getOptions().getDefaultLanguage()) + .languageOverride(model.getOptions().getLanguageOverride()) + .textVersion(model.getOptions().getTextVersion()) + .sphereVersion(model.getOptions().getSphereVersion()) + .bits(model.getOptions().getBits()) + .min(model.getOptions().getMin()) + .max(model.getOptions().getMax()) + .storageEngine(toBsonDocument(model.getOptions().getStorageEngine())) + .partialFilterExpression(toBsonDocument(model.getOptions().getPartialFilterExpression())) + .collation(model.getOptions().getCollation()) + .wildcardProjection(toBsonDocument(model.getOptions().getWildcardProjection())) + .hidden(model.getOptions().isHidden()) + ); + } + return new CreateIndexesOperation( + assertNotNull(namespace), indexRequests, writeConcern) + .commitQuorum(createIndexOptions.getCommitQuorum()); + } + + public WriteOperation createSearchIndexes(final List indexes) { + List indexRequests = indexes.stream() + .map(this::createSearchIndexRequest) + .collect(Collectors.toList()); + return new CreateSearchIndexesOperation(assertNotNull(namespace), indexRequests); + } + + public WriteOperation updateSearchIndex(final String indexName, final Bson definition) { + BsonDocument definitionDocument = assertNotNull(toBsonDocument(definition)); + SearchIndexRequest searchIndexRequest = new SearchIndexRequest(definitionDocument, indexName); + return new UpdateSearchIndexesOperation(assertNotNull(namespace), searchIndexRequest); + } + + + public WriteOperation dropSearchIndex(final String indexName) { + return new DropSearchIndexOperation(assertNotNull(namespace), indexName); + } + + + public ReadOperationExplainable listSearchIndexes(final Class resultClass, + @Nullable final String indexName, @Nullable final Integer batchSize, @Nullable final Collation collation, + @Nullable final BsonValue comment, @Nullable final Boolean allowDiskUse) { + return new ListSearchIndexesOperation<>(assertNotNull(namespace), + codecRegistry.get(resultClass), indexName, batchSize, collation, comment, allowDiskUse, retryReads); + } + + public WriteOperation dropIndex(final String indexName, final DropIndexOptions ignoredOptions) { + return new DropIndexOperation(assertNotNull(namespace), indexName, writeConcern); + } + + public WriteOperation dropIndex(final Bson keys, final DropIndexOptions ignoredOptions) { + return new DropIndexOperation(assertNotNull(namespace), keys.toBsonDocument(BsonDocument.class, codecRegistry), writeConcern); + } + + public ReadOperationCursor listCollections(final String databaseName, final Class resultClass, + final Bson filter, final boolean collectionNamesOnly, final boolean authorizedCollections, @Nullable final Integer batchSize, + final BsonValue comment, @Nullable final TimeoutMode timeoutMode) { + return new ListCollectionsOperation<>(databaseName, codecRegistry.get(resultClass)) + .retryReads(retryReads) + .filter(toBsonDocument(filter)) + .nameOnly(collectionNamesOnly) + .authorizedCollections(authorizedCollections) + .batchSize(batchSize == null ? 0 : batchSize) + .comment(comment) + .timeoutMode(timeoutMode); + } + + public ReadOperationCursor listDatabases(final Class resultClass, final Bson filter, + final Boolean nameOnly, + final Boolean authorizedDatabasesOnly, final BsonValue comment) { + return new ListDatabasesOperation<>(codecRegistry.get(resultClass)) + .retryReads(retryReads) + .filter(toBsonDocument(filter)) + .nameOnly(nameOnly) + .authorizedDatabasesOnly(authorizedDatabasesOnly) + .comment(comment); + } + + public ReadOperationCursor listIndexes(final Class resultClass, @Nullable final Integer batchSize, + final BsonValue comment, @Nullable final TimeoutMode timeoutMode) { + return new ListIndexesOperation<>(assertNotNull(namespace), + codecRegistry.get(resultClass)) + .retryReads(retryReads) + .batchSize(batchSize == null ? 0 : batchSize) + .comment(comment) + .timeoutMode(timeoutMode); + } + + public ReadOperationCursor changeStream(final FullDocument fullDocument, + final FullDocumentBeforeChange fullDocumentBeforeChange, final List pipeline, + final Decoder decoder, final ChangeStreamLevel changeStreamLevel, @Nullable final Integer batchSize, + final Collation collation, final BsonValue comment, final BsonDocument resumeToken, + final BsonTimestamp startAtOperationTime, final BsonDocument startAfter, final boolean showExpandedEvents) { + return new ChangeStreamOperation<>( + assertNotNull(namespace), + fullDocument, + fullDocumentBeforeChange, + assertNotNull(toBsonDocumentList(pipeline)), decoder, changeStreamLevel) + .batchSize(batchSize) + .collation(collation) + .comment(comment) + .resumeAfter(resumeToken) + .startAtOperationTime(startAtOperationTime) + .startAfter(startAfter) + .showExpandedEvents(showExpandedEvents) + .retryReads(retryReads); + } + + public WriteOperation clientBulkWriteOperation( + final List clientWriteModels, + @Nullable final ClientBulkWriteOptions options) { + return new ClientBulkWriteOperation(clientWriteModels, options, writeConcern, retryWrites, codecRegistry); + } + + private Codec getCodec() { + return codecRegistry.get(documentClass); + } + + private BsonDocument documentToBsonDocument(final T document) { + if (document instanceof BsonDocument) { + return (BsonDocument) document; + } else { + return new BsonDocumentWrapper<>(document, getCodec()); + } + } + + @Nullable + private BsonDocument toBsonDocument(@Nullable final Bson bson) { + return bson == null ? null : bson.toBsonDocument(documentClass, codecRegistry); + } + + @Nullable + private List toBsonDocumentList(@Nullable final List bsonList) { + if (bsonList == null) { + return null; + } + List bsonDocumentList = new ArrayList<>(bsonList.size()); + for (Bson cur : bsonList) { + if (cur == null) { + throw new IllegalArgumentException("All documents in the list must be non-null"); + } + bsonDocumentList.add(toBsonDocument(cur)); + } + return bsonDocumentList; + } + + private SearchIndexRequest createSearchIndexRequest(final SearchIndexModel model) { + BsonDocument definition = assertNotNull(toBsonDocument(model.getDefinition())); + String indexName = model.getName(); + SearchIndexType searchIndexType = model.getType(); + + return new SearchIndexRequest(definition, indexName, searchIndexType); + } +} diff --git a/driver-core/src/main/com/mongodb/internal/operation/QueryHelper.java b/driver-core/src/main/com/mongodb/internal/operation/QueryHelper.java new file mode 100644 index 00000000000..053e4fc8817 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/operation/QueryHelper.java @@ -0,0 +1,35 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.operation; + +import com.mongodb.MongoCommandException; +import com.mongodb.MongoCursorNotFoundException; +import com.mongodb.MongoQueryException; +import com.mongodb.ServerCursor; + +final class QueryHelper { + static MongoQueryException translateCommandException(final MongoCommandException commandException, final ServerCursor cursor) { + if (commandException.getErrorCode() == 43) { + return new MongoCursorNotFoundException(cursor.getId(), commandException.getResponse(), cursor.getAddress()); + } else { + return new MongoQueryException(commandException.getResponse(), commandException.getServerAddress()); + } + } + + private QueryHelper() { + } +} diff --git a/driver-core/src/main/com/mongodb/internal/operation/ReadOperation.java b/driver-core/src/main/com/mongodb/internal/operation/ReadOperation.java new file mode 100644 index 00000000000..6a90d490b30 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/operation/ReadOperation.java @@ -0,0 +1,50 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.operation; + +import com.mongodb.internal.async.SingleResultCallback; +import com.mongodb.internal.binding.AsyncReadBinding; +import com.mongodb.internal.binding.ReadBinding; + +/** + * An operation that reads from a MongoDB server. + * + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public interface ReadOperation { + + /** + * @return the command name of the operation, e.g. "insert", "update", "delete", "bulkWrite", etc. + */ + String getCommandName(); + + /** + * General execute which can return anything of type T + * + * @param binding the binding to execute in the context of + * @return T, the result of the execution + */ + T execute(ReadBinding binding); + + /** + * General execute which can return anything of type R + * + * @param binding the binding to execute in the context of + * @param callback the callback to be called when the operation has been executed + */ + void executeAsync(AsyncReadBinding binding, SingleResultCallback callback); +} diff --git a/driver-core/src/main/com/mongodb/internal/operation/ReadOperationCursor.java b/driver-core/src/main/com/mongodb/internal/operation/ReadOperationCursor.java new file mode 100644 index 00000000000..75393a755cf --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/operation/ReadOperationCursor.java @@ -0,0 +1,27 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.operation; + +import com.mongodb.internal.async.AsyncBatchCursor; + +/** + * An operation that reads from a MongoDB server and returns a cursor. + * + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public interface ReadOperationCursor extends ReadOperation, AsyncBatchCursor> { +} diff --git a/driver-core/src/main/com/mongodb/internal/operation/ReadOperationExplainable.java b/driver-core/src/main/com/mongodb/internal/operation/ReadOperationExplainable.java new file mode 100644 index 00000000000..613e9b3b4d2 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/operation/ReadOperationExplainable.java @@ -0,0 +1,28 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.operation; + +import com.mongodb.ExplainVerbosity; +import com.mongodb.lang.Nullable; +import org.bson.codecs.Decoder; + +/** + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public interface ReadOperationExplainable extends ReadOperationCursor { + ReadOperationSimple asExplainableOperation(@Nullable ExplainVerbosity verbosity, Decoder resultDecoder); +} diff --git a/driver-core/src/main/com/mongodb/internal/operation/ReadOperationMapReduceCursor.java b/driver-core/src/main/com/mongodb/internal/operation/ReadOperationMapReduceCursor.java new file mode 100644 index 00000000000..f743a2dc1df --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/operation/ReadOperationMapReduceCursor.java @@ -0,0 +1,25 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.operation; + +/** + * An operation that reads from a MongoDB server and returns a map reduce cursor. + * + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public interface ReadOperationMapReduceCursor extends ReadOperation, MapReduceAsyncBatchCursor> { +} diff --git a/driver-core/src/main/com/mongodb/internal/operation/ReadOperationSimple.java b/driver-core/src/main/com/mongodb/internal/operation/ReadOperationSimple.java new file mode 100644 index 00000000000..1268d140363 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/operation/ReadOperationSimple.java @@ -0,0 +1,25 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.operation; + +/** + * An operation that reads from a MongoDB server and returns the same type for sync and async. + * + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public interface ReadOperationSimple extends ReadOperation { +} diff --git a/driver-core/src/main/com/mongodb/internal/operation/RenameCollectionOperation.java b/driver-core/src/main/com/mongodb/internal/operation/RenameCollectionOperation.java new file mode 100644 index 00000000000..ea477bf67bd --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/operation/RenameCollectionOperation.java @@ -0,0 +1,109 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.operation; + +import com.mongodb.MongoNamespace; +import com.mongodb.WriteConcern; +import com.mongodb.internal.async.SingleResultCallback; +import com.mongodb.internal.binding.AsyncWriteBinding; +import com.mongodb.internal.binding.WriteBinding; +import com.mongodb.lang.Nullable; +import org.bson.BsonBoolean; +import org.bson.BsonDocument; +import org.bson.BsonString; + +import static com.mongodb.assertions.Assertions.assertNotNull; +import static com.mongodb.assertions.Assertions.notNull; +import static com.mongodb.internal.async.ErrorHandlingResultCallback.errorHandlingCallback; +import static com.mongodb.internal.operation.AsyncOperationHelper.executeCommandAsync; +import static com.mongodb.internal.operation.AsyncOperationHelper.releasingCallback; +import static com.mongodb.internal.operation.AsyncOperationHelper.withAsyncConnection; +import static com.mongodb.internal.operation.AsyncOperationHelper.writeConcernErrorTransformerAsync; +import static com.mongodb.internal.operation.OperationHelper.LOGGER; +import static com.mongodb.internal.operation.SyncOperationHelper.executeCommand; +import static com.mongodb.internal.operation.SyncOperationHelper.withConnection; +import static com.mongodb.internal.operation.SyncOperationHelper.writeConcernErrorTransformer; +import static com.mongodb.internal.operation.WriteConcernHelper.appendWriteConcernToCommand; + +/** + * An operation that renames the given collection to the new name. + * + *

If the new name is the same as an existing collection and dropTarget is true, this existing collection will be dropped. If + * dropTarget is false and the newCollectionName is the same as an existing collection, a MongoServerException will be thrown.

+ * + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public class RenameCollectionOperation implements WriteOperation { + private static final String COMMAND_NAME = "renameCollection"; + private final MongoNamespace originalNamespace; + private final MongoNamespace newNamespace; + private final WriteConcern writeConcern; + private boolean dropTarget; + + public RenameCollectionOperation(final MongoNamespace originalNamespace, final MongoNamespace newNamespace, + @Nullable final WriteConcern writeConcern) { + this.originalNamespace = notNull("originalNamespace", originalNamespace); + this.newNamespace = notNull("newNamespace", newNamespace); + this.writeConcern = writeConcern; + } + + public WriteConcern getWriteConcern() { + return writeConcern; + } + + public boolean isDropTarget() { + return dropTarget; + } + + public RenameCollectionOperation dropTarget(final boolean dropTarget) { + this.dropTarget = dropTarget; + return this; + } + + @Override + public String getCommandName() { + return COMMAND_NAME; + } + + @Override + public Void execute(final WriteBinding binding) { + return withConnection(binding, connection -> executeCommand(binding, "admin", getCommand(), connection, + writeConcernErrorTransformer(binding.getOperationContext().getTimeoutContext()))); + } + + @Override + public void executeAsync(final AsyncWriteBinding binding, final SingleResultCallback callback) { + withAsyncConnection(binding, (connection, t) -> { + SingleResultCallback errHandlingCallback = errorHandlingCallback(callback, LOGGER); + if (t != null) { + errHandlingCallback.onResult(null, t); + } else { + executeCommandAsync(binding, "admin", getCommand(), assertNotNull(connection), + writeConcernErrorTransformerAsync(binding.getOperationContext().getTimeoutContext()), + releasingCallback(errHandlingCallback, connection)); + } + }); + } + + private BsonDocument getCommand() { + BsonDocument commandDocument = new BsonDocument(getCommandName(), new BsonString(originalNamespace.getFullName())) + .append("to", new BsonString(newNamespace.getFullName())) + .append("dropTarget", BsonBoolean.valueOf(dropTarget)); + appendWriteConcernToCommand(writeConcern, commandDocument); + return commandDocument; + } +} diff --git a/driver-core/src/main/com/mongodb/internal/operation/SearchIndexRequest.java b/driver-core/src/main/com/mongodb/internal/operation/SearchIndexRequest.java new file mode 100644 index 00000000000..29b9b1ef34d --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/operation/SearchIndexRequest.java @@ -0,0 +1,66 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.operation; + +import com.mongodb.client.model.SearchIndexType; +import com.mongodb.lang.Nullable; +import org.bson.BsonDocument; + +import static com.mongodb.assertions.Assertions.assertNotNull; + +/** + * A request for creating or updating an Atlas Search index. + * + *

Additional options may be introduced as Atlas evolves. + * To maintain a clear API, it can be split into separate classes, e.g., {@code SearchIndexCreateRequest} + * and {@code SearchIndexUpdateRequest}, for handling each operation separately in the future.

+ * + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public final class SearchIndexRequest { + private final BsonDocument definition; + @Nullable + private final String indexName; + @Nullable + private final SearchIndexType searchIndexType; + + public SearchIndexRequest(final BsonDocument definition, @Nullable final String indexName, + @Nullable final SearchIndexType searchIndexType) { + assertNotNull(definition); + this.definition = definition; + this.indexName = indexName; + this.searchIndexType = searchIndexType; + } + + SearchIndexRequest(final BsonDocument definition, @Nullable final String indexName) { + this(definition, indexName, null); + } + + public BsonDocument getDefinition() { + return definition; + } + + @Nullable + public String getIndexName() { + return indexName; + } + @Nullable + public SearchIndexType getSearchIndexType() { + return searchIndexType; + } + +} diff --git a/driver-core/src/main/com/mongodb/internal/operation/ServerVersionHelper.java b/driver-core/src/main/com/mongodb/internal/operation/ServerVersionHelper.java new file mode 100644 index 00000000000..093d48e3781 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/operation/ServerVersionHelper.java @@ -0,0 +1,50 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.operation; + + +import com.mongodb.connection.ConnectionDescription; + +/** + * This class is NOT part of the public API. It may change at any time without notification. + */ +public final class ServerVersionHelper { + + public static final int UNKNOWN_WIRE_VERSION = 0; + public static final int FOUR_DOT_TWO_WIRE_VERSION = 8; + public static final int FOUR_DOT_FOUR_WIRE_VERSION = 9; + public static final int FIVE_DOT_ZERO_WIRE_VERSION = 13; + public static final int SIX_DOT_ZERO_WIRE_VERSION = 17; + public static final int SEVEN_DOT_ZERO_WIRE_VERSION = 21; + public static final int EIGHT_DOT_ZERO_WIRE_VERSION = 25; + public static final int LATEST_WIRE_VERSION = EIGHT_DOT_ZERO_WIRE_VERSION; + + public static boolean serverIsAtLeastVersionFourDotFour(final ConnectionDescription description) { + return description.getMaxWireVersion() >= FOUR_DOT_FOUR_WIRE_VERSION; + } + + public static boolean serverIsLessThanVersionFourDotFour(final ConnectionDescription description) { + return description.getMaxWireVersion() < FOUR_DOT_FOUR_WIRE_VERSION; + } + + public static boolean serverIsLessThanVersionSevenDotZero(final ConnectionDescription description) { + return description.getMaxWireVersion() < SEVEN_DOT_ZERO_WIRE_VERSION; + } + + private ServerVersionHelper() { + } +} diff --git a/driver-core/src/main/com/mongodb/internal/operation/SingleBatchCursor.java b/driver-core/src/main/com/mongodb/internal/operation/SingleBatchCursor.java new file mode 100644 index 00000000000..8a673ee93d9 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/operation/SingleBatchCursor.java @@ -0,0 +1,91 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.internal.operation; + +import com.mongodb.ServerAddress; +import com.mongodb.ServerCursor; + +import java.util.List; +import java.util.NoSuchElementException; + +import static java.util.Collections.emptyList; + +class SingleBatchCursor implements BatchCursor { + + static SingleBatchCursor createEmptySingleBatchCursor(final ServerAddress serverAddress, final int batchSize) { + return new SingleBatchCursor<>(emptyList(), batchSize, serverAddress); + } + + private final List batch; + private final ServerAddress serverAddress; + private final int batchSize; + private boolean hasNext; + + SingleBatchCursor(final List batch, final int batchSize, final ServerAddress serverAddress) { + this.batch = batch; + this.serverAddress = serverAddress; + this.batchSize = batchSize; + this.hasNext = !batch.isEmpty(); + } + + @Override + public boolean hasNext() { + return hasNext; + } + + @Override + public List next() { + if (hasNext) { + hasNext = false; + return batch; + } + throw new NoSuchElementException(); + } + + @Override + public int available() { + return hasNext ? 1 : 0; + } + + @Override + public void setBatchSize(final int batchSize) { + // NOOP + } + + @Override + public int getBatchSize() { + return batchSize; + } + + @Override + public List tryNext() { + return hasNext ? next() : null; + } + + @Override + public ServerCursor getServerCursor() { + return null; + } + + @Override + public ServerAddress getServerAddress() { + return serverAddress; + } + + @Override + public void close() { + } +} diff --git a/driver-core/src/main/com/mongodb/internal/operation/SyncOperationHelper.java b/driver-core/src/main/com/mongodb/internal/operation/SyncOperationHelper.java new file mode 100644 index 00000000000..6d013df59ba --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/operation/SyncOperationHelper.java @@ -0,0 +1,345 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.operation; + +import com.mongodb.MongoException; +import com.mongodb.ReadPreference; +import com.mongodb.client.cursor.TimeoutMode; +import com.mongodb.internal.TimeoutContext; +import com.mongodb.internal.VisibleForTesting; +import com.mongodb.internal.async.SingleResultCallback; +import com.mongodb.internal.async.function.AsyncCallbackBiFunction; +import com.mongodb.internal.async.function.AsyncCallbackFunction; +import com.mongodb.internal.async.function.AsyncCallbackSupplier; +import com.mongodb.internal.async.function.RetryState; +import com.mongodb.internal.async.function.RetryingSyncSupplier; +import com.mongodb.internal.binding.ConnectionSource; +import com.mongodb.internal.binding.ReadBinding; +import com.mongodb.internal.binding.ReferenceCounted; +import com.mongodb.internal.binding.WriteBinding; +import com.mongodb.internal.connection.Connection; +import com.mongodb.internal.connection.OperationContext; +import com.mongodb.internal.operation.retry.AttachmentKeys; +import com.mongodb.internal.session.SessionContext; +import com.mongodb.internal.validator.NoOpFieldNameValidator; +import com.mongodb.lang.Nullable; +import org.bson.BsonDocument; +import org.bson.BsonValue; +import org.bson.FieldNameValidator; +import org.bson.codecs.BsonDocumentCodec; +import org.bson.codecs.Decoder; + +import java.util.function.BiFunction; +import java.util.function.Function; +import java.util.function.Supplier; + +import static com.mongodb.ReadPreference.primary; +import static com.mongodb.assertions.Assertions.assertFalse; +import static com.mongodb.assertions.Assertions.assertNotNull; +import static com.mongodb.assertions.Assertions.notNull; +import static com.mongodb.internal.VisibleForTesting.AccessModifier.PRIVATE; +import static com.mongodb.internal.operation.CommandOperationHelper.CommandCreator; +import static com.mongodb.internal.operation.CommandOperationHelper.logRetryExecute; +import static com.mongodb.internal.operation.CommandOperationHelper.onRetryableReadAttemptFailure; +import static com.mongodb.internal.operation.CommandOperationHelper.onRetryableWriteAttemptFailure; +import static com.mongodb.internal.operation.OperationHelper.ResourceSupplierInternalException; +import static com.mongodb.internal.operation.OperationHelper.canRetryRead; +import static com.mongodb.internal.operation.OperationHelper.canRetryWrite; +import static com.mongodb.internal.operation.WriteConcernHelper.throwOnWriteConcernError; + +final class SyncOperationHelper { + + interface CallableWithConnection { + T call(Connection connection); + } + + interface CallableWithSource { + T call(ConnectionSource source); + } + + interface CommandReadTransformer { + + /** + * Yield an appropriate result object for the input object. + * + * @param t the input object + * @return the function result + */ + @Nullable + R apply(T t, ConnectionSource source, Connection connection); + } + + interface CommandWriteTransformer { + + /** + * Yield an appropriate result object for the input object. + * + * @param t the input object + * @return the function result + */ + @Nullable + R apply(T t, Connection connection); + } + + private static final BsonDocumentCodec BSON_DOCUMENT_CODEC = new BsonDocumentCodec(); + + static T withReadConnectionSource(final ReadBinding binding, final CallableWithSource callable) { + ConnectionSource source = binding.getReadConnectionSource(); + try { + return callable.call(source); + } finally { + source.release(); + } + } + + static T withConnection(final WriteBinding binding, final CallableWithConnection callable) { + ConnectionSource source = binding.getWriteConnectionSource(); + try { + return withConnectionSource(source, callable); + } finally { + source.release(); + } + } + + /** + * Gets a {@link ConnectionSource} and a {@link Connection} from the {@code sourceSupplier} and executes the {@code function} with them. + * Guarantees to {@linkplain ReferenceCounted#release() release} the source and the connection after completion of the {@code function}. + * + * @param wrapConnectionSourceException See {@link #withSuppliedResource(Supplier, boolean, Function)}. + * @see #withSuppliedResource(Supplier, boolean, Function) + * @see AsyncOperationHelper#withAsyncSourceAndConnection(AsyncCallbackSupplier, boolean, SingleResultCallback, AsyncCallbackBiFunction) + */ + static R withSourceAndConnection(final Supplier sourceSupplier, + final boolean wrapConnectionSourceException, + final BiFunction function) throws ResourceSupplierInternalException { + return withSuppliedResource(sourceSupplier, wrapConnectionSourceException, source -> + withSuppliedResource(source::getConnection, wrapConnectionSourceException, connection -> + function.apply(source, connection))); + } + + /** + * Gets a {@link ReferenceCounted} resource from the {@code resourceSupplier} and applies the {@code function} to it. + * Guarantees to {@linkplain ReferenceCounted#release() release} the resource after completion of the {@code function}. + * + * @param wrapSupplierException If {@code true} and {@code resourceSupplier} completes abruptly, then the exception is wrapped + * into {@link OperationHelper.ResourceSupplierInternalException}, such that it can be accessed + * via {@link OperationHelper.ResourceSupplierInternalException#getCause()}. + * @see AsyncOperationHelper#withAsyncSuppliedResource(AsyncCallbackSupplier, boolean, SingleResultCallback, AsyncCallbackFunction) + */ + static R withSuppliedResource(final Supplier resourceSupplier, + final boolean wrapSupplierException, final Function function) throws OperationHelper.ResourceSupplierInternalException { + T resource = null; + try { + try { + resource = resourceSupplier.get(); + } catch (Exception supplierException) { + if (wrapSupplierException) { + throw new ResourceSupplierInternalException(supplierException); + } else { + throw supplierException; + } + } + return function.apply(resource); + } finally { + if (resource != null) { + resource.release(); + } + } + } + + private static T withConnectionSource(final ConnectionSource source, final CallableWithConnection callable) { + Connection connection = source.getConnection(); + try { + return callable.call(connection); + } finally { + connection.release(); + } + } + + static T executeRetryableRead( + final ReadBinding binding, + final String database, + final CommandCreator commandCreator, + final Decoder decoder, + final CommandReadTransformer transformer, + final boolean retryReads) { + return executeRetryableRead(binding, binding::getReadConnectionSource, database, commandCreator, + decoder, transformer, retryReads); + } + + static T executeRetryableRead( + final ReadBinding binding, + final Supplier readConnectionSourceSupplier, + final String database, + final CommandCreator commandCreator, + final Decoder decoder, + final CommandReadTransformer transformer, + final boolean retryReads) { + RetryState retryState = CommandOperationHelper.initialRetryState(retryReads, binding.getOperationContext().getTimeoutContext()); + + Supplier read = decorateReadWithRetries(retryState, binding.getOperationContext(), () -> + withSourceAndConnection(readConnectionSourceSupplier, false, (source, connection) -> { + retryState.breakAndThrowIfRetryAnd(() -> !canRetryRead(source.getServerDescription(), binding.getOperationContext())); + return createReadCommandAndExecute(retryState, binding.getOperationContext(), source, database, + commandCreator, decoder, transformer, connection); + }) + ); + return read.get(); + } + + @VisibleForTesting(otherwise = PRIVATE) + static T executeCommand(final WriteBinding binding, final String database, final CommandCreator commandCreator, + final CommandWriteTransformer transformer) { + return withSourceAndConnection(binding::getWriteConnectionSource, false, (source, connection) -> + transformer.apply(assertNotNull( + connection.command(database, + commandCreator.create(binding.getOperationContext(), + source.getServerDescription(), + connection.getDescription()), + NoOpFieldNameValidator.INSTANCE, primary(), BSON_DOCUMENT_CODEC, binding.getOperationContext())), + connection)); + } + + @VisibleForTesting(otherwise = PRIVATE) + static T executeCommand(final WriteBinding binding, final String database, final BsonDocument command, + final Decoder decoder, final CommandWriteTransformer transformer) { + return withSourceAndConnection(binding::getWriteConnectionSource, false, (source, connection) -> + transformer.apply(assertNotNull( + connection.command(database, command, NoOpFieldNameValidator.INSTANCE, primary(), decoder, + binding.getOperationContext())), connection)); + } + + @Nullable + static T executeCommand(final WriteBinding binding, final String database, final BsonDocument command, + final Connection connection, final CommandWriteTransformer transformer) { + notNull("binding", binding); + return transformer.apply(assertNotNull( + connection.command(database, command, NoOpFieldNameValidator.INSTANCE, primary(), BSON_DOCUMENT_CODEC, + binding.getOperationContext())), + connection); + } + + static R executeRetryableWrite( + final WriteBinding binding, + final String database, + @Nullable final ReadPreference readPreference, + final FieldNameValidator fieldNameValidator, + final Decoder commandResultDecoder, + final CommandCreator commandCreator, + final CommandWriteTransformer transformer, + final com.mongodb.Function retryCommandModifier) { + RetryState retryState = CommandOperationHelper.initialRetryState(true, binding.getOperationContext().getTimeoutContext()); + Supplier retryingWrite = decorateWriteWithRetries(retryState, binding.getOperationContext(), () -> { + boolean firstAttempt = retryState.isFirstAttempt(); + SessionContext sessionContext = binding.getOperationContext().getSessionContext(); + if (!firstAttempt && sessionContext.hasActiveTransaction()) { + sessionContext.clearTransactionContext(); + } + return withSourceAndConnection(binding::getWriteConnectionSource, true, (source, connection) -> { + int maxWireVersion = connection.getDescription().getMaxWireVersion(); + try { + retryState.breakAndThrowIfRetryAnd(() -> !canRetryWrite(connection.getDescription(), sessionContext)); + BsonDocument command = retryState.attachment(AttachmentKeys.command()) + .map(previousAttemptCommand -> { + assertFalse(firstAttempt); + return retryCommandModifier.apply(previousAttemptCommand); + }).orElseGet(() -> commandCreator.create(binding.getOperationContext(), source.getServerDescription(), + connection.getDescription())); + // attach `maxWireVersion`, `retryableCommandFlag` ASAP because they are used to check whether we should retry + retryState.attach(AttachmentKeys.maxWireVersion(), maxWireVersion, true) + .attach(AttachmentKeys.retryableCommandFlag(), CommandOperationHelper.isRetryWritesEnabled(command), true) + .attach(AttachmentKeys.commandDescriptionSupplier(), command::getFirstKey, false) + .attach(AttachmentKeys.command(), command, false); + return transformer.apply(assertNotNull(connection.command(database, command, fieldNameValidator, readPreference, + commandResultDecoder, binding.getOperationContext())), + connection); + } catch (MongoException e) { + if (!firstAttempt) { + CommandOperationHelper.addRetryableWriteErrorLabel(e, maxWireVersion); + } + throw e; + } + }); + }); + try { + return retryingWrite.get(); + } catch (MongoException e) { + throw CommandOperationHelper.transformWriteException(e); + } + } + + @Nullable + static T createReadCommandAndExecute( + final RetryState retryState, + final OperationContext operationContext, + final ConnectionSource source, + final String database, + final CommandCreator commandCreator, + final Decoder decoder, + final CommandReadTransformer transformer, + final Connection connection) { + BsonDocument command = commandCreator.create(operationContext, source.getServerDescription(), + connection.getDescription()); + retryState.attach(AttachmentKeys.commandDescriptionSupplier(), command::getFirstKey, false); + return transformer.apply(assertNotNull(connection.command(database, command, NoOpFieldNameValidator.INSTANCE, + source.getReadPreference(), decoder, operationContext)), source, connection); + } + + + static Supplier decorateWriteWithRetries(final RetryState retryState, + final OperationContext operationContext, final Supplier writeFunction) { + return new RetryingSyncSupplier<>(retryState, onRetryableWriteAttemptFailure(operationContext), + CommandOperationHelper::loggingShouldAttemptToRetryWriteAndAddRetryableLabel, () -> { + logRetryExecute(retryState, operationContext); + return writeFunction.get(); + }); + } + + static Supplier decorateReadWithRetries(final RetryState retryState, final OperationContext operationContext, + final Supplier readFunction) { + return new RetryingSyncSupplier<>(retryState, onRetryableReadAttemptFailure(operationContext), + CommandOperationHelper::shouldAttemptToRetryRead, () -> { + logRetryExecute(retryState, operationContext); + return readFunction.get(); + }); + } + + + static CommandWriteTransformer writeConcernErrorTransformer(final TimeoutContext timeoutContext) { + return (result, connection) -> { + assertNotNull(result); + throwOnWriteConcernError(result, connection.getDescription().getServerAddress(), + connection.getDescription().getMaxWireVersion(), timeoutContext); + return null; + }; + } + + static CommandReadTransformer> singleBatchCursorTransformer(final String fieldName) { + return (result, source, connection) -> + new SingleBatchCursor<>(BsonDocumentWrapperHelper.toList(result, fieldName), 0, + connection.getDescription().getServerAddress()); + } + + static CommandBatchCursor cursorDocumentToBatchCursor(final TimeoutMode timeoutMode, final BsonDocument cursorDocument, + final int batchSize, final Decoder decoder, @Nullable final BsonValue comment, final ConnectionSource source, + final Connection connection) { + return new CommandBatchCursor<>(timeoutMode, cursorDocument, batchSize, 0, decoder, comment, source, connection); + } + + private SyncOperationHelper() { + } +} diff --git a/driver-core/src/main/com/mongodb/internal/operation/TransactionOperation.java b/driver-core/src/main/com/mongodb/internal/operation/TransactionOperation.java new file mode 100644 index 00000000000..a15a2aa88e3 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/operation/TransactionOperation.java @@ -0,0 +1,86 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.operation; + +import com.mongodb.Function; +import com.mongodb.WriteConcern; +import com.mongodb.internal.TimeoutContext; +import com.mongodb.internal.async.SingleResultCallback; +import com.mongodb.internal.binding.AsyncWriteBinding; +import com.mongodb.internal.binding.WriteBinding; +import com.mongodb.internal.validator.NoOpFieldNameValidator; +import org.bson.BsonDocument; +import org.bson.BsonInt32; +import org.bson.codecs.BsonDocumentCodec; + +import static com.mongodb.assertions.Assertions.isTrue; +import static com.mongodb.assertions.Assertions.notNull; +import static com.mongodb.internal.async.ErrorHandlingResultCallback.errorHandlingCallback; +import static com.mongodb.internal.operation.AsyncOperationHelper.executeRetryableWriteAsync; +import static com.mongodb.internal.operation.AsyncOperationHelper.writeConcernErrorTransformerAsync; +import static com.mongodb.internal.operation.CommandOperationHelper.CommandCreator; +import static com.mongodb.internal.operation.OperationHelper.LOGGER; +import static com.mongodb.internal.operation.SyncOperationHelper.executeRetryableWrite; +import static com.mongodb.internal.operation.SyncOperationHelper.writeConcernErrorTransformer; + +/** + * A base class for transaction-related operations + * + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public abstract class TransactionOperation implements WriteOperation { + private final WriteConcern writeConcern; + + TransactionOperation(final WriteConcern writeConcern) { + this.writeConcern = notNull("writeConcern", writeConcern); + } + + public WriteConcern getWriteConcern() { + return writeConcern; + } + + @Override + public Void execute(final WriteBinding binding) { + isTrue("in transaction", binding.getOperationContext().getSessionContext().hasActiveTransaction()); + TimeoutContext timeoutContext = binding.getOperationContext().getTimeoutContext(); + return executeRetryableWrite(binding, "admin", null, NoOpFieldNameValidator.INSTANCE, + new BsonDocumentCodec(), getCommandCreator(), + writeConcernErrorTransformer(timeoutContext), getRetryCommandModifier(timeoutContext)); + } + + @Override + public void executeAsync(final AsyncWriteBinding binding, final SingleResultCallback callback) { + isTrue("in transaction", binding.getOperationContext().getSessionContext().hasActiveTransaction()); + TimeoutContext timeoutContext = binding.getOperationContext().getTimeoutContext(); + executeRetryableWriteAsync(binding, "admin", null, NoOpFieldNameValidator.INSTANCE, + new BsonDocumentCodec(), getCommandCreator(), + writeConcernErrorTransformerAsync(timeoutContext), getRetryCommandModifier(timeoutContext), + errorHandlingCallback(callback, LOGGER)); + } + + CommandCreator getCommandCreator() { + return (operationContext, serverDescription, connectionDescription) -> { + BsonDocument command = new BsonDocument(getCommandName(), new BsonInt32(1)); + if (!writeConcern.isServerDefault()) { + command.put("writeConcern", writeConcern.asDocument()); + } + return command; + }; + } + + protected abstract Function getRetryCommandModifier(TimeoutContext timeoutContext); +} diff --git a/driver-core/src/main/com/mongodb/internal/operation/UpdateSearchIndexesOperation.java b/driver-core/src/main/com/mongodb/internal/operation/UpdateSearchIndexesOperation.java new file mode 100644 index 00000000000..ca23fd8e502 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/operation/UpdateSearchIndexesOperation.java @@ -0,0 +1,49 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.operation; + +import com.mongodb.MongoNamespace; +import org.bson.BsonDocument; +import org.bson.BsonString; + +/** + * An operation that updates an Atlas Search index. + * + *

This class is not part of the public API and may be removed or changed at any time

+ */ +final class UpdateSearchIndexesOperation extends AbstractWriteSearchIndexOperation { + private static final String COMMAND_NAME = "updateSearchIndex"; + private final SearchIndexRequest request; + + UpdateSearchIndexesOperation(final MongoNamespace namespace, final SearchIndexRequest request) { + super(namespace); + this.request = request; + } + + @Override + public String getCommandName() { + return COMMAND_NAME; + } + + @Override + BsonDocument buildCommand() { + return new BsonDocument(getCommandName(), new BsonString(getNamespace().getCollectionName())) + .append("name", new BsonString(request.getIndexName())) + .append("definition", request.getDefinition()); + } +} + diff --git a/driver-core/src/main/com/mongodb/internal/operation/WriteConcernHelper.java b/driver-core/src/main/com/mongodb/internal/operation/WriteConcernHelper.java new file mode 100644 index 00000000000..10b02eda4fe --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/operation/WriteConcernHelper.java @@ -0,0 +1,96 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.operation; + +import com.mongodb.MongoException; +import com.mongodb.MongoWriteConcernException; +import com.mongodb.ServerAddress; +import com.mongodb.WriteConcern; +import com.mongodb.WriteConcernResult; +import com.mongodb.bulk.WriteConcernError; +import com.mongodb.internal.TimeoutContext; +import com.mongodb.internal.connection.ProtocolHelper; +import com.mongodb.lang.Nullable; +import org.bson.BsonArray; +import org.bson.BsonDocument; +import org.bson.BsonString; + +import java.util.concurrent.TimeUnit; +import java.util.stream.Collectors; + +import static com.mongodb.internal.operation.CommandOperationHelper.addRetryableWriteErrorLabel; + +/** + * This class is NOT part of the public API. It may change at any time without notification. + */ +public final class WriteConcernHelper { + + public static void appendWriteConcernToCommand(final WriteConcern writeConcern, final BsonDocument commandDocument) { + if (writeConcern != null && !writeConcern.isServerDefault()) { + commandDocument.put("writeConcern", writeConcern.asDocument()); + } + } + @Nullable + public static WriteConcern cloneWithoutTimeout(@Nullable final WriteConcern writeConcern) { + if (writeConcern == null || writeConcern.getWTimeout(TimeUnit.MILLISECONDS) == null) { + return writeConcern; + } + + WriteConcern mapped; + Object w = writeConcern.getWObject(); + if (w == null) { + mapped = WriteConcern.ACKNOWLEDGED; + } else { + mapped = w instanceof Integer ? new WriteConcern((Integer) w) : new WriteConcern((String) w); + } + return mapped.withJournal(writeConcern.getJournal()); + } + + public static void throwOnWriteConcernError(final BsonDocument result, final ServerAddress serverAddress, + final int maxWireVersion, final TimeoutContext timeoutContext) { + if (hasWriteConcernError(result)) { + MongoException exception = ProtocolHelper.createSpecialException(result, serverAddress, "errmsg", timeoutContext); + if (exception == null) { + exception = createWriteConcernException(result, serverAddress); + } + addRetryableWriteErrorLabel(exception, maxWireVersion); + throw exception; + } + } + + public static boolean hasWriteConcernError(final BsonDocument result) { + return result.containsKey("writeConcernError"); + } + + public static MongoWriteConcernException createWriteConcernException(final BsonDocument result, final ServerAddress serverAddress) { + return new MongoWriteConcernException( + createWriteConcernError(result.getDocument("writeConcernError")), + WriteConcernResult.acknowledged(0, false, null), serverAddress, + result.getArray("errorLabels", new BsonArray()).stream().map(i -> i.asString().getValue()) + .collect(Collectors.toSet())); + } + + public static WriteConcernError createWriteConcernError(final BsonDocument writeConcernErrorDocument) { + return new WriteConcernError(writeConcernErrorDocument.getNumber("code").intValue(), + writeConcernErrorDocument.getString("codeName", new BsonString("")).getValue(), + writeConcernErrorDocument.getString("errmsg").getValue(), + writeConcernErrorDocument.getDocument("errInfo", new BsonDocument())); + } + + private WriteConcernHelper() { + } +} diff --git a/driver-core/src/main/com/mongodb/internal/operation/WriteOperation.java b/driver-core/src/main/com/mongodb/internal/operation/WriteOperation.java new file mode 100644 index 00000000000..73cec2f416b --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/operation/WriteOperation.java @@ -0,0 +1,50 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.operation; + +import com.mongodb.internal.async.SingleResultCallback; +import com.mongodb.internal.binding.AsyncWriteBinding; +import com.mongodb.internal.binding.WriteBinding; + +/** + * An operation which writes to a MongoDB server. + * + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public interface WriteOperation { + + /** + * @return the command name of the operation, e.g. "insert", "update", "delete", "bulkWrite", etc. + */ + String getCommandName(); + + /** + * General execute which can return anything of type T + * + * @param binding the binding to execute in the context of + * @return T, the result of the execution + */ + T execute(WriteBinding binding); + + /** + * General execute which can return anything of type T + * + * @param binding the binding to execute in the context of + * @param callback the callback to be called when the operation has been executed + */ + void executeAsync(AsyncWriteBinding binding, SingleResultCallback callback); +} diff --git a/driver-core/src/main/com/mongodb/internal/operation/package-info.java b/driver-core/src/main/com/mongodb/internal/operation/package-info.java new file mode 100644 index 00000000000..ffd207916d1 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/operation/package-info.java @@ -0,0 +1,25 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * This package contains internal functionality that may change at any time. + */ +@Internal +@NonNullApi +package com.mongodb.internal.operation; + +import com.mongodb.annotations.Internal; +import com.mongodb.lang.NonNullApi; diff --git a/driver-core/src/main/com/mongodb/internal/operation/retry/AttachmentKeys.java b/driver-core/src/main/com/mongodb/internal/operation/retry/AttachmentKeys.java new file mode 100644 index 00000000000..d1fbd049632 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/operation/retry/AttachmentKeys.java @@ -0,0 +1,108 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.internal.operation.retry; + +import com.mongodb.annotations.Immutable; +import com.mongodb.bulk.BulkWriteResult; +import com.mongodb.internal.async.function.LoopState.AttachmentKey; +import com.mongodb.internal.operation.MixedBulkWriteOperation.BulkWriteTracker; +import org.bson.BsonDocument; + +import java.util.HashSet; +import java.util.Set; +import java.util.function.Supplier; + +import static com.mongodb.assertions.Assertions.assertTrue; +import static com.mongodb.assertions.Assertions.fail; + +/** + * A class with {@code static} methods providing access to {@link AttachmentKey}s relevant when implementing retryable operations. + * + *

This class is not part of the public API and may be removed or changed at any time

+ * + * @see AttachmentKey + */ +public final class AttachmentKeys { + private static final AttachmentKey MAX_WIRE_VERSION = new DefaultAttachmentKey<>("maxWireVersion"); + private static final AttachmentKey COMMAND = new DefaultAttachmentKey<>("command"); + private static final AttachmentKey RETRYABLE_COMMAND_FLAG = new DefaultAttachmentKey<>("retryableCommandFlag"); + private static final AttachmentKey> COMMAND_DESCRIPTION_SUPPLIER = new DefaultAttachmentKey<>( + "commandDescriptionSupplier"); + private static final AttachmentKey BULK_WRITE_TRACKER = new DefaultAttachmentKey<>("bulkWriteTracker"); + private static final AttachmentKey BULK_WRITE_RESULT = new DefaultAttachmentKey<>("bulkWriteResult"); + + public static AttachmentKey maxWireVersion() { + return MAX_WIRE_VERSION; + } + + public static AttachmentKey command() { + return COMMAND; + } + + public static AttachmentKey retryableCommandFlag() { + return RETRYABLE_COMMAND_FLAG; + } + + public static AttachmentKey> commandDescriptionSupplier() { + return COMMAND_DESCRIPTION_SUPPLIER; + } + + public static AttachmentKey bulkWriteTracker() { + return BULK_WRITE_TRACKER; + } + + public static AttachmentKey bulkWriteResult() { + return BULK_WRITE_RESULT; + } + + private AttachmentKeys() { + fail(); + } + + @Immutable + private static final class DefaultAttachmentKey implements AttachmentKey { + private static final Set AVOID_KEY_DUPLICATION = new HashSet<>(); + + private final String key; + + private DefaultAttachmentKey(final String key) { + assertTrue(AVOID_KEY_DUPLICATION.add(key)); + this.key = key; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + DefaultAttachmentKey that = (DefaultAttachmentKey) o; + return key.equals(that.key); + } + + @Override + public int hashCode() { + return key.hashCode(); + } + + @Override + public String toString() { + return key; + } + } +} diff --git a/driver-core/src/main/com/mongodb/internal/operation/retry/package-info.java b/driver-core/src/main/com/mongodb/internal/operation/retry/package-info.java new file mode 100644 index 00000000000..29c27a47914 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/operation/retry/package-info.java @@ -0,0 +1,25 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * This package contains internal functionality that may change at any time. + */ +@Internal +@NonNullApi +package com.mongodb.internal.operation.retry; + +import com.mongodb.annotations.Internal; +import com.mongodb.lang.NonNullApi; diff --git a/driver-core/src/main/com/mongodb/internal/package-info.java b/driver-core/src/main/com/mongodb/internal/package-info.java new file mode 100644 index 00000000000..52023d78b75 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/package-info.java @@ -0,0 +1,26 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * This package contains internal functionality that may change at any time. + */ + +@Internal +@NonNullApi +package com.mongodb.internal; + +import com.mongodb.annotations.Internal; +import com.mongodb.lang.NonNullApi; diff --git a/driver-core/src/main/com/mongodb/internal/selector/AtMostTwoRandomServerSelector.java b/driver-core/src/main/com/mongodb/internal/selector/AtMostTwoRandomServerSelector.java new file mode 100644 index 00000000000..22f55ac0245 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/selector/AtMostTwoRandomServerSelector.java @@ -0,0 +1,60 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.internal.selector; + +import com.mongodb.annotations.Immutable; +import com.mongodb.connection.ClusterDescription; +import com.mongodb.connection.ServerDescription; +import com.mongodb.selector.ServerSelector; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.concurrent.ThreadLocalRandom; + +/** + * {@linkplain #select(ClusterDescription) Selects} at most two {@link ServerDescription}s at random. This selector uses the + * Fisher–Yates, a.k.a. Durstenfeld, shuffle algorithm. + * + *

This class is not part of the public API and may be removed or changed at any time

+ */ +@Immutable +public final class AtMostTwoRandomServerSelector implements ServerSelector { + private static final int TWO = 2; + private static final AtMostTwoRandomServerSelector INSTANCE = new AtMostTwoRandomServerSelector(); + + private AtMostTwoRandomServerSelector() { + } + + public static AtMostTwoRandomServerSelector instance() { + return INSTANCE; + } + + @Override + public List select(final ClusterDescription clusterDescription) { + List serverDescriptions = new ArrayList<>(clusterDescription.getServerDescriptions()); + List result = new ArrayList<>(); + ThreadLocalRandom random = ThreadLocalRandom.current(); + for (int i = serverDescriptions.size() - 1; i >= 0; i--) { + Collections.swap(serverDescriptions, i, random.nextInt(i + 1)); + result.add(serverDescriptions.get(i)); + if (result.size() == TWO) { + break; + } + } + return result; + } +} diff --git a/driver-core/src/main/com/mongodb/internal/selector/LatencyMinimizingServerSelector.java b/driver-core/src/main/com/mongodb/internal/selector/LatencyMinimizingServerSelector.java new file mode 100644 index 00000000000..d7433f1706a --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/selector/LatencyMinimizingServerSelector.java @@ -0,0 +1,122 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.selector; + +import com.mongodb.connection.ClusterDescription; +import com.mongodb.connection.ServerDescription; +import com.mongodb.selector.ServerSelector; + +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.TimeUnit; + +import static com.mongodb.connection.ClusterConnectionMode.MULTIPLE; +import static com.mongodb.internal.connection.ClusterDescriptionHelper.getAny; +import static java.util.concurrent.TimeUnit.MILLISECONDS; +import static java.util.concurrent.TimeUnit.NANOSECONDS; + +/** + * A server selector that accepts only servers within the given ping-time latency difference from the faster of the servers. + * + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public class LatencyMinimizingServerSelector implements ServerSelector { + + private final long acceptableLatencyDifferenceNanos; + + /** + * + * @param acceptableLatencyDifference the maximum difference in ping-time latency between the fastest ping time and the slowest of + * the chosen servers + * @param timeUnit the time unit of the acceptableLatencyDifference + */ + public LatencyMinimizingServerSelector(final long acceptableLatencyDifference, final TimeUnit timeUnit) { + this.acceptableLatencyDifferenceNanos = NANOSECONDS.convert(acceptableLatencyDifference, timeUnit); + } + + /** + * Gets the acceptable latency difference. + * + * @param timeUnit the time unit to get it in. + * @return the acceptable latency difference in the specified time unit + */ + public long getAcceptableLatencyDifference(final TimeUnit timeUnit) { + return timeUnit.convert(acceptableLatencyDifferenceNanos, NANOSECONDS); + } + + @Override + public List select(final ClusterDescription clusterDescription) { + if (clusterDescription.getConnectionMode() != MULTIPLE) { + return getAny(clusterDescription); + } else { + return getServersWithAcceptableLatencyDifference(getAny(clusterDescription), + getFastestRoundTripTimeNanos(clusterDescription.getServerDescriptions())); + } + } + + @Override + public String toString() { + return "LatencyMinimizingServerSelector{" + + "acceptableLatencyDifference=" + MILLISECONDS.convert(acceptableLatencyDifferenceNanos, NANOSECONDS) + " ms" + + '}'; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + LatencyMinimizingServerSelector that = (LatencyMinimizingServerSelector) o; + return acceptableLatencyDifferenceNanos == that.acceptableLatencyDifferenceNanos; + } + + @Override + public int hashCode() { + return (int) (acceptableLatencyDifferenceNanos ^ (acceptableLatencyDifferenceNanos >>> 32)); + } + + private long getFastestRoundTripTimeNanos(final List members) { + long fastestRoundTripTime = Long.MAX_VALUE; + for (final ServerDescription cur : members) { + if (!cur.isOk()) { + continue; + } + if (cur.getRoundTripTimeNanos() < fastestRoundTripTime) { + fastestRoundTripTime = cur.getRoundTripTimeNanos(); + } + } + return fastestRoundTripTime; + } + + private List getServersWithAcceptableLatencyDifference(final List servers, + final long bestPingTime) { + List goodSecondaries = new ArrayList<>(servers.size()); + for (final ServerDescription cur : servers) { + if (!cur.isOk()) { + continue; + } + if (cur.getRoundTripTimeNanos() - acceptableLatencyDifferenceNanos <= bestPingTime) { + goodSecondaries.add(cur); + } + } + return goodSecondaries; + } +} diff --git a/driver-core/src/main/com/mongodb/internal/selector/MinimumOperationCountServerSelector.java b/driver-core/src/main/com/mongodb/internal/selector/MinimumOperationCountServerSelector.java new file mode 100644 index 00000000000..8acc5978c1f --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/selector/MinimumOperationCountServerSelector.java @@ -0,0 +1,62 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.internal.selector; + +import com.mongodb.ServerAddress; +import com.mongodb.annotations.ThreadSafe; +import com.mongodb.connection.ClusterDescription; +import com.mongodb.connection.ServerDescription; +import com.mongodb.internal.connection.Cluster.ServersSnapshot; +import com.mongodb.internal.connection.Server; +import com.mongodb.selector.ServerSelector; + +import java.util.Collections; +import java.util.List; + +import static com.mongodb.assertions.Assertions.assertNotNull; +import static java.util.Collections.emptyList; +import static java.util.Comparator.comparingInt; + +/** + * {@linkplain #select(ClusterDescription) Selects} at most one {@link ServerDescription} + * corresponding to a {@link ServersSnapshot#getServer(ServerAddress) server} with the smallest {@link Server#operationCount()}. + * + *

This class is not part of the public API and may be removed or changed at any time

+ */ +@ThreadSafe +public final class MinimumOperationCountServerSelector implements ServerSelector { + private final ServersSnapshot serversSnapshot; + + /** + * @param serversSnapshot Must {@linkplain ServersSnapshot#containsServer(ServerAddress) contain} {@link Server}s corresponding to + * {@linkplain ClusterDescription#getServerDescriptions() all} {@link ServerDescription}s + * in the {@link ClusterDescription} passed to {@link #select(ClusterDescription)}. + */ + public MinimumOperationCountServerSelector(final ServersSnapshot serversSnapshot) { + this.serversSnapshot = serversSnapshot; + } + + @Override + public List select(final ClusterDescription clusterDescription) { + return clusterDescription.getServerDescriptions() + .stream() + .min(comparingInt(serverDescription -> + assertNotNull(serversSnapshot.getServer(serverDescription.getAddress())) + .operationCount())) + .map(Collections::singletonList) + .orElse(emptyList()); + } +} diff --git a/driver-core/src/main/com/mongodb/internal/selector/PrimaryServerSelector.java b/driver-core/src/main/com/mongodb/internal/selector/PrimaryServerSelector.java new file mode 100644 index 00000000000..3c4e08d2e45 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/selector/PrimaryServerSelector.java @@ -0,0 +1,43 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.selector; + +import com.mongodb.connection.ClusterDescription; +import com.mongodb.connection.ServerDescription; +import com.mongodb.selector.ServerSelector; + +import java.util.List; + +import static com.mongodb.internal.connection.ClusterDescriptionHelper.getPrimaries; + +/** + * A server selector that chooses servers that are primaries. + * + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public final class PrimaryServerSelector implements ServerSelector { + + @Override + public List select(final ClusterDescription clusterDescription) { + return getPrimaries(clusterDescription); + } + + @Override + public String toString() { + return "PrimaryServerSelector"; + } +} diff --git a/driver-core/src/main/com/mongodb/internal/selector/ReadPreferenceServerSelector.java b/driver-core/src/main/com/mongodb/internal/selector/ReadPreferenceServerSelector.java new file mode 100644 index 00000000000..5fdc815b37a --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/selector/ReadPreferenceServerSelector.java @@ -0,0 +1,70 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.selector; + +import com.mongodb.ReadPreference; +import com.mongodb.connection.ClusterConnectionMode; +import com.mongodb.connection.ClusterDescription; +import com.mongodb.connection.ServerDescription; +import com.mongodb.selector.ServerSelector; + +import java.util.List; + +import static com.mongodb.assertions.Assertions.notNull; +import static com.mongodb.internal.connection.ClusterDescriptionHelper.getAny; + +/** + * A server selector that chooses based on a read preference. + * + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public class ReadPreferenceServerSelector implements ServerSelector { + private final ReadPreference readPreference; + + /** + * Gets the read preference. + * + * @param readPreference the read preference + */ + public ReadPreferenceServerSelector(final ReadPreference readPreference) { + this.readPreference = notNull("readPreference", readPreference); + } + + /** + * Gets the read preference. + * + * @return the read preference + */ + public ReadPreference getReadPreference() { + return readPreference; + } + + @Override + public List select(final ClusterDescription clusterDescription) { + if (clusterDescription.getConnectionMode() == ClusterConnectionMode.SINGLE) { + return getAny(clusterDescription); + } + return readPreference.choose(clusterDescription); + } + + @Override + public String toString() { + return "ReadPreferenceServerSelector{" + + "readPreference=" + readPreference + + '}'; + } +} diff --git a/driver-core/src/main/com/mongodb/internal/selector/ReadPreferenceWithFallbackServerSelector.java b/driver-core/src/main/com/mongodb/internal/selector/ReadPreferenceWithFallbackServerSelector.java new file mode 100644 index 00000000000..500c3b0b4d4 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/selector/ReadPreferenceWithFallbackServerSelector.java @@ -0,0 +1,74 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.selector; + +import com.mongodb.ReadPreference; +import com.mongodb.connection.ClusterDescription; +import com.mongodb.connection.ServerConnectionState; +import com.mongodb.connection.ServerDescription; +import com.mongodb.selector.ServerSelector; + +import java.util.List; + +/** + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public class ReadPreferenceWithFallbackServerSelector implements ServerSelector { + + private final ReadPreference preferredReadPreference; + private final int minWireVersion; + private final ReadPreference fallbackReadPreference; + private ReadPreference appliedReadPreference; + + public ReadPreferenceWithFallbackServerSelector(final ReadPreference preferredReadPreference, final int minWireVersion, + final ReadPreference fallbackReadPreference) { + this.preferredReadPreference = preferredReadPreference; + this.minWireVersion = minWireVersion; + this.fallbackReadPreference = fallbackReadPreference; + } + + + @Override + public List select(final ClusterDescription clusterDescription) { + if (clusterContainsOlderServers(clusterDescription)) { + appliedReadPreference = fallbackReadPreference; + return new ReadPreferenceServerSelector(fallbackReadPreference).select(clusterDescription); + } else { + appliedReadPreference = preferredReadPreference; + return new ReadPreferenceServerSelector(preferredReadPreference).select(clusterDescription); + } + } + + public ReadPreference getAppliedReadPreference() { + return appliedReadPreference; + } + + private boolean clusterContainsOlderServers(final ClusterDescription clusterDescription) { + return clusterDescription.getServerDescriptions().stream() + .filter(serverDescription -> serverDescription.getState() == ServerConnectionState.CONNECTED) + .anyMatch(serverDescription -> serverDescription.getMaxWireVersion() < minWireVersion); + } + + @Override + public String toString() { + return "ReadPreferenceWithFallbackServerSelector{" + + "preferredReadPreference=" + preferredReadPreference + + ", fallbackReadPreference=" + fallbackReadPreference + + ", minWireVersionForPreferred=" + minWireVersion + + '}'; + } +} diff --git a/driver-core/src/main/com/mongodb/internal/selector/ServerAddressSelector.java b/driver-core/src/main/com/mongodb/internal/selector/ServerAddressSelector.java new file mode 100644 index 00000000000..e78ace75604 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/selector/ServerAddressSelector.java @@ -0,0 +1,72 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.selector; + +import com.mongodb.ServerAddress; +import com.mongodb.connection.ClusterDescription; +import com.mongodb.connection.ServerDescription; +import com.mongodb.selector.ServerSelector; + +import java.util.Collections; +import java.util.List; + +import static com.mongodb.assertions.Assertions.notNull; +import static com.mongodb.internal.connection.ClusterDescriptionHelper.getByServerAddress; +import static java.util.Collections.singletonList; + +/** + * A server selector that chooses a server that matches the server address. + * + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public class ServerAddressSelector implements ServerSelector { + private final ServerAddress serverAddress; + + /** + * Constructs a new instance. + * + * @param serverAddress the server address + */ + public ServerAddressSelector(final ServerAddress serverAddress) { + this.serverAddress = notNull("serverAddress", serverAddress); + } + + /** + * Gets the server address. + * + * @return the server address + */ + public ServerAddress getServerAddress() { + return serverAddress; + } + + @Override + public List select(final ClusterDescription clusterDescription) { + ServerDescription serverDescription = getByServerAddress(clusterDescription, serverAddress); + if (serverDescription != null) { + return singletonList(serverDescription); + } + return Collections.emptyList(); + } + + @Override + public String toString() { + return "ServerAddressSelector{" + + "serverAddress=" + serverAddress + + '}'; + } +} diff --git a/driver-core/src/main/com/mongodb/internal/selector/WritableServerSelector.java b/driver-core/src/main/com/mongodb/internal/selector/WritableServerSelector.java new file mode 100644 index 00000000000..a0cba92a8b2 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/selector/WritableServerSelector.java @@ -0,0 +1,49 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.selector; + +import com.mongodb.connection.ClusterConnectionMode; +import com.mongodb.connection.ClusterDescription; +import com.mongodb.connection.ServerDescription; +import com.mongodb.selector.ServerSelector; + +import java.util.List; + +import static com.mongodb.internal.connection.ClusterDescriptionHelper.getAny; +import static com.mongodb.internal.connection.ClusterDescriptionHelper.getPrimaries; + +/** + * A server selector that chooses servers that are writable. + * + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public final class WritableServerSelector implements ServerSelector { + + @Override + public List select(final ClusterDescription clusterDescription) { + if (clusterDescription.getConnectionMode() == ClusterConnectionMode.SINGLE + || clusterDescription.getConnectionMode() == ClusterConnectionMode.LOAD_BALANCED) { + return getAny(clusterDescription); + } + return getPrimaries(clusterDescription); + } + + @Override + public String toString() { + return "WritableServerSelector"; + } +} diff --git a/driver-core/src/main/com/mongodb/internal/selector/package-info.java b/driver-core/src/main/com/mongodb/internal/selector/package-info.java new file mode 100644 index 00000000000..ab05e07c087 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/selector/package-info.java @@ -0,0 +1,25 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * This package contains internal functionality that may change at any time. + */ +@Internal +@NonNullApi +package com.mongodb.internal.selector; + +import com.mongodb.annotations.Internal; +import com.mongodb.lang.NonNullApi; diff --git a/driver-core/src/main/com/mongodb/internal/session/BaseClientSessionImpl.java b/driver-core/src/main/com/mongodb/internal/session/BaseClientSessionImpl.java new file mode 100644 index 00000000000..80f88cc08f5 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/session/BaseClientSessionImpl.java @@ -0,0 +1,245 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.session; + +import com.mongodb.ClientSessionOptions; +import com.mongodb.MongoClientException; +import com.mongodb.ServerAddress; +import com.mongodb.TransactionOptions; +import com.mongodb.WriteConcern; +import com.mongodb.internal.TimeoutContext; +import com.mongodb.internal.TimeoutSettings; +import com.mongodb.internal.binding.ReferenceCounted; +import com.mongodb.lang.Nullable; +import com.mongodb.session.ClientSession; +import com.mongodb.session.ServerSession; +import org.bson.BsonDocument; +import org.bson.BsonTimestamp; + +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; + +import static com.mongodb.assertions.Assertions.assertTrue; +import static com.mongodb.assertions.Assertions.isTrue; +import static java.util.concurrent.TimeUnit.MILLISECONDS; + +/** + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public class BaseClientSessionImpl implements ClientSession { + private static final String CLUSTER_TIME_KEY = "clusterTime"; + + private final ServerSessionPool serverSessionPool; + private ServerSession serverSession; + private final Object originator; + private final ClientSessionOptions options; + private final AtomicBoolean closed = new AtomicBoolean(false); + private BsonDocument clusterTime; + private BsonTimestamp operationTime; + private BsonTimestamp snapshotTimestamp; + private ServerAddress pinnedServerAddress; + private BsonDocument recoveryToken; + private ReferenceCounted transactionContext; + @Nullable + private TimeoutContext timeoutContext; + + protected static boolean hasTimeoutMS(@Nullable final TimeoutContext timeoutContext) { + return timeoutContext != null && timeoutContext.hasTimeoutMS(); + } + + protected static boolean hasWTimeoutMS(@Nullable final WriteConcern writeConcern) { + return writeConcern != null && writeConcern.getWTimeout(TimeUnit.MILLISECONDS) != null; + } + + public BaseClientSessionImpl(final ServerSessionPool serverSessionPool, final Object originator, final ClientSessionOptions options) { + this.serverSessionPool = serverSessionPool; + this.originator = originator; + this.options = options; + this.pinnedServerAddress = null; + } + + @Override + @Nullable + public ServerAddress getPinnedServerAddress() { + return pinnedServerAddress; + } + + @Override + public Object getTransactionContext() { + return transactionContext; + } + + @Override + public void setTransactionContext(final ServerAddress address, final Object transactionContext) { + assertTrue(transactionContext instanceof ReferenceCounted); + pinnedServerAddress = address; + this.transactionContext = (ReferenceCounted) transactionContext; + this.transactionContext.retain(); + } + + @Override + public void clearTransactionContext() { + pinnedServerAddress = null; + if (transactionContext != null) { + transactionContext.release(); + transactionContext = null; + } + } + + @Override + public BsonDocument getRecoveryToken() { + return recoveryToken; + } + + @Override + public void setRecoveryToken(final BsonDocument recoveryToken) { + this.recoveryToken = recoveryToken; + } + + @Override + public ClientSessionOptions getOptions() { + return options; + } + + @Override + public boolean isCausallyConsistent() { + Boolean causallyConsistent = options.isCausallyConsistent(); + return causallyConsistent == null || causallyConsistent; + } + + @Override + public Object getOriginator() { + return originator; + } + + @Override + public BsonDocument getClusterTime() { + return clusterTime; + } + + @Override + public BsonTimestamp getOperationTime() { + return operationTime; + } + + @Override + public ServerSession getServerSession() { + isTrue("open", !closed.get()); + if (serverSession == null) { + serverSession = serverSessionPool.get(); + } + return serverSession; + } + + @Override + public void advanceOperationTime(@Nullable final BsonTimestamp newOperationTime) { + isTrue("open", !closed.get()); + this.operationTime = greaterOf(newOperationTime); + } + + @Override + public void advanceClusterTime(@Nullable final BsonDocument newClusterTime) { + isTrue("open", !closed.get()); + this.clusterTime = greaterOf(newClusterTime); + } + + @Override + public void setSnapshotTimestamp(@Nullable final BsonTimestamp snapshotTimestamp) { + isTrue("open", !closed.get()); + if (snapshotTimestamp != null) { + if (this.snapshotTimestamp != null && !snapshotTimestamp.equals(this.snapshotTimestamp)) { + throw new MongoClientException("Snapshot timestamps should not change during the lifetime of the session. Current " + + "timestamp is " + this.snapshotTimestamp + ", and attempting to set it to " + snapshotTimestamp); + } + this.snapshotTimestamp = snapshotTimestamp; + } + } + + @Override + @Nullable + public BsonTimestamp getSnapshotTimestamp() { + isTrue("open", !closed.get()); + return snapshotTimestamp; + } + + private BsonDocument greaterOf(@Nullable final BsonDocument newClusterTime) { + if (newClusterTime == null) { + return clusterTime; + } else if (clusterTime == null) { + return newClusterTime; + } else { + return newClusterTime.getTimestamp(CLUSTER_TIME_KEY).compareTo(clusterTime.getTimestamp(CLUSTER_TIME_KEY)) > 0 + ? newClusterTime : clusterTime; + } + } + + private BsonTimestamp greaterOf(@Nullable final BsonTimestamp newOperationTime) { + if (newOperationTime == null) { + return operationTime; + } else if (operationTime == null) { + return newOperationTime; + } else { + return newOperationTime.compareTo(operationTime) > 0 ? newOperationTime : operationTime; + } + } + + @Override + public void close() { + // While the interface implemented by this class is documented as not thread safe, it's still useful to provide thread safety here + // in order to prevent the code within the conditional from executing more than once. Doing so protects the server session pool from + // corruption, by preventing the same server session from being released to the pool more than once. + if (closed.compareAndSet(false, true)) { + if (serverSession != null) { + serverSessionPool.release(serverSession); + } + clearTransactionContext(); + } + } + + @Override + @Nullable + public TimeoutContext getTimeoutContext() { + return timeoutContext; + } + + protected void setTimeoutContext(@Nullable final TimeoutContext timeoutContext) { + this.timeoutContext = timeoutContext; + } + + protected void resetTimeout() { + if (timeoutContext != null) { + timeoutContext.resetTimeoutIfPresent(); + } + } + + protected TimeoutSettings getTimeoutSettings(final TransactionOptions transactionOptions, final TimeoutSettings timeoutSettings) { + Long transactionTimeoutMS = transactionOptions.getTimeout(MILLISECONDS); + Long defaultTimeoutMS = getOptions().getDefaultTimeout(MILLISECONDS); + Long clientTimeoutMS = timeoutSettings.getTimeoutMS(); + + Long timeoutMS = transactionTimeoutMS != null ? transactionTimeoutMS + : defaultTimeoutMS != null ? defaultTimeoutMS : clientTimeoutMS; + + return timeoutSettings + .withMaxCommitMS(transactionOptions.getMaxCommitTime(MILLISECONDS)) + .withTimeout(timeoutMS, MILLISECONDS); + } + + protected enum TransactionState { + NONE, IN, COMMITTED, ABORTED + } +} diff --git a/driver-core/src/main/com/mongodb/internal/session/ClientSessionContext.java b/driver-core/src/main/com/mongodb/internal/session/ClientSessionContext.java new file mode 100644 index 00000000000..f9f23428f00 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/session/ClientSessionContext.java @@ -0,0 +1,122 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.session; + +import com.mongodb.lang.Nullable; +import com.mongodb.session.ClientSession; +import org.bson.BsonDocument; +import org.bson.BsonTimestamp; + +import static com.mongodb.assertions.Assertions.notNull; + +/** + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public abstract class ClientSessionContext implements SessionContext { + + private final ClientSession clientSession; + + public ClientSessionContext(final ClientSession clientSession) { + this.clientSession = notNull("clientSession", clientSession); + } + + public ClientSession getClientSession() { + return clientSession; + } + + @Override + public boolean hasSession() { + return true; + } + + @Override + public BsonDocument getSessionId() { + return clientSession.getServerSession().getIdentifier(); + } + + @Override + public boolean isCausallyConsistent() { + return clientSession.isCausallyConsistent(); + } + + @Override + public long getTransactionNumber() { + return clientSession.getServerSession().getTransactionNumber(); + } + + @Override + public long advanceTransactionNumber() { + return clientSession.getServerSession().advanceTransactionNumber(); + } + + @Override + public BsonTimestamp getOperationTime() { + return clientSession.getOperationTime(); + } + + @Override + public void advanceOperationTime(@Nullable final BsonTimestamp operationTime) { + clientSession.advanceOperationTime(operationTime); + } + + @Override + public BsonDocument getClusterTime() { + return clientSession.getClusterTime(); + } + + @Override + public void advanceClusterTime(@Nullable final BsonDocument clusterTime) { + clientSession.advanceClusterTime(clusterTime); + } + + @Override + public boolean isSnapshot() { + Boolean snapshot = clientSession.getOptions().isSnapshot(); + return snapshot != null && snapshot; + } + + @Override + public void setSnapshotTimestamp(@Nullable final BsonTimestamp snapshotTimestamp) { + clientSession.setSnapshotTimestamp(snapshotTimestamp); + } + + @Override + @Nullable + public BsonTimestamp getSnapshotTimestamp() { + return clientSession.getSnapshotTimestamp(); + } + + @Override + public void setRecoveryToken(final BsonDocument recoveryToken) { + clientSession.setRecoveryToken(recoveryToken); + } + + @Override + public void clearTransactionContext() { + clientSession.clearTransactionContext(); + } + + @Override + public void markSessionDirty() { + clientSession.getServerSession().markDirty(); + } + + @Override + public boolean isSessionMarkedDirty() { + return clientSession.getServerSession().isMarkedDirty(); + } +} diff --git a/driver-core/src/main/com/mongodb/internal/session/ServerSessionPool.java b/driver-core/src/main/com/mongodb/internal/session/ServerSessionPool.java new file mode 100644 index 00000000000..9111eaed3a9 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/session/ServerSessionPool.java @@ -0,0 +1,257 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.session; + +import com.mongodb.MongoException; +import com.mongodb.ReadPreference; +import com.mongodb.ServerApi; +import com.mongodb.connection.ClusterDescription; +import com.mongodb.connection.ServerDescription; +import com.mongodb.internal.IgnorableRequestContext; +import com.mongodb.internal.TimeoutContext; +import com.mongodb.internal.TimeoutSettings; +import com.mongodb.internal.connection.Cluster; +import com.mongodb.internal.connection.Connection; +import com.mongodb.internal.connection.NoOpSessionContext; +import com.mongodb.internal.connection.OperationContext; +import com.mongodb.internal.selector.ReadPreferenceServerSelector; +import com.mongodb.internal.validator.NoOpFieldNameValidator; +import com.mongodb.lang.Nullable; +import com.mongodb.selector.ServerSelector; +import com.mongodb.session.ServerSession; +import org.bson.BsonArray; +import org.bson.BsonBinary; +import org.bson.BsonDocument; +import org.bson.BsonDocumentWriter; +import org.bson.UuidRepresentation; +import org.bson.codecs.BsonDocumentCodec; +import org.bson.codecs.EncoderContext; +import org.bson.codecs.UuidCodec; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.UUID; +import java.util.concurrent.ConcurrentLinkedDeque; +import java.util.concurrent.atomic.LongAdder; + +import static com.mongodb.assertions.Assertions.isTrue; +import static java.util.concurrent.TimeUnit.MINUTES; + +/** + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public class ServerSessionPool { + private final ConcurrentLinkedDeque available = new ConcurrentLinkedDeque<>(); + private final Cluster cluster; + private final ServerSessionPool.Clock clock; + private volatile boolean closed; + private final OperationContext operationContext; + private final LongAdder inUseCount = new LongAdder(); + + interface Clock { + long millis(); + } + + public ServerSessionPool(final Cluster cluster, final TimeoutSettings timeoutSettings, @Nullable final ServerApi serverApi) { + this(cluster, + new OperationContext(IgnorableRequestContext.INSTANCE, NoOpSessionContext.INSTANCE, + new TimeoutContext(timeoutSettings.connectionOnly()), serverApi)); + } + + public ServerSessionPool(final Cluster cluster, final OperationContext operationContext) { + this(cluster, operationContext, System::currentTimeMillis); + } + + public ServerSessionPool(final Cluster cluster, final OperationContext operationContext, final Clock clock) { + this.cluster = cluster; + this.operationContext = operationContext; + this.clock = clock; + } + + public ServerSession get() { + isTrue("server session pool is open", !closed); + ServerSessionImpl serverSession = available.pollLast(); + while (serverSession != null && shouldPrune(serverSession)) { + serverSession.close(); + serverSession = available.pollLast(); + } + if (serverSession == null) { + serverSession = new ServerSessionImpl(); + } + inUseCount.increment(); + return serverSession; + } + + public void release(final ServerSession serverSession) { + inUseCount.decrement(); + ServerSessionImpl serverSessionImpl = (ServerSessionImpl) serverSession; + if (serverSessionImpl.isMarkedDirty()) { + serverSessionImpl.close(); + } else { + available.addLast(serverSessionImpl); + } + } + + public long getInUseCount() { + return inUseCount.sum(); + } + + public void close() { + closed = true; + endClosedSessions(); + } + + private void endClosedSessions() { + List identifiers = drainPool(); + if (identifiers.isEmpty()) { + return; + } + + ReadPreference primaryPreferred = ReadPreference.primaryPreferred(); + List primaryPreferredServers = new ReadPreferenceServerSelector(primaryPreferred) + .select(cluster.getCurrentDescription()); + if (primaryPreferredServers.isEmpty()) { + // Skip doing server selection if we anticipate that no server is readily selectable. + // This approach is racy, and it is still possible to become blocked selecting a server + // even if `primaryPreferredServers` is not empty. + return; + } + + Connection connection = null; + try { + connection = cluster.selectServer( + new ServerSelector() { + @Override + public List select(final ClusterDescription clusterDescription) { + for (ServerDescription cur : clusterDescription.getServerDescriptions()) { + if (cur.getAddress().equals(primaryPreferredServers.get(0).getAddress())) { + return Collections.singletonList(cur); + } + } + return Collections.emptyList(); + } + + @Override + public String toString() { + return "ReadPreferenceServerSelector{" + + "readPreference=" + primaryPreferred + + '}'; + } + }, + operationContext).getServer().getConnection(operationContext); + + connection.command("admin", + new BsonDocument("endSessions", new BsonArray(identifiers)), NoOpFieldNameValidator.INSTANCE, + ReadPreference.primaryPreferred(), new BsonDocumentCodec(), operationContext); + } catch (MongoException e) { + // ignore exceptions + } finally { + if (connection != null) { + connection.release(); + } + } + } + + /** + * Drain the pool, returning a list of the identifiers of all drained sessions. + */ + private List drainPool() { + List identifiers = new ArrayList<>(available.size()); + ServerSessionImpl nextSession = available.pollFirst(); + while (nextSession != null) { + identifiers.add(nextSession.getIdentifier()); + nextSession = available.pollFirst(); + } + return identifiers; + } + + private boolean shouldPrune(final ServerSessionImpl serverSession) { + Integer logicalSessionTimeoutMinutes = cluster.getCurrentDescription().getLogicalSessionTimeoutMinutes(); + // if the server no longer supports sessions, prune the session + if (logicalSessionTimeoutMinutes == null) { + return false; + } + long currentTimeMillis = clock.millis(); + long timeSinceLastUse = currentTimeMillis - serverSession.getLastUsedAtMillis(); + long oneMinuteFromTimeout = MINUTES.toMillis(logicalSessionTimeoutMinutes - 1); + return timeSinceLastUse > oneMinuteFromTimeout; + } + + private BsonBinary createNewServerSessionIdentifier() { + UuidCodec uuidCodec = new UuidCodec(UuidRepresentation.STANDARD); + BsonDocument holder = new BsonDocument(); + BsonDocumentWriter bsonDocumentWriter = new BsonDocumentWriter(holder); + bsonDocumentWriter.writeStartDocument(); + bsonDocumentWriter.writeName("id"); + uuidCodec.encode(bsonDocumentWriter, UUID.randomUUID(), EncoderContext.builder().build()); + bsonDocumentWriter.writeEndDocument(); + return holder.getBinary("id"); + } + + final class ServerSessionImpl implements ServerSession { + private final BsonDocument identifier; + private long transactionNumber = 0; + private volatile long lastUsedAtMillis = clock.millis(); + private volatile boolean closed; + private volatile boolean dirty = false; + + ServerSessionImpl() { + identifier = new BsonDocument("id", createNewServerSessionIdentifier()); + } + + void close() { + closed = true; + } + + long getLastUsedAtMillis() { + return lastUsedAtMillis; + } + + @Override + public long getTransactionNumber() { + return transactionNumber; + } + + @Override + public BsonDocument getIdentifier() { + lastUsedAtMillis = clock.millis(); + return identifier; + } + + @Override + public long advanceTransactionNumber() { + transactionNumber++; + return transactionNumber; + } + + @Override + public boolean isClosed() { + return closed; + } + + @Override + public void markDirty() { + dirty = true; + } + + @Override + public boolean isMarkedDirty() { + return dirty; + } + } +} diff --git a/driver-core/src/main/com/mongodb/internal/session/SessionContext.java b/driver-core/src/main/com/mongodb/internal/session/SessionContext.java new file mode 100644 index 00000000000..4a8902799ec --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/session/SessionContext.java @@ -0,0 +1,117 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.session; + +import com.mongodb.ReadConcern; +import com.mongodb.lang.Nullable; +import org.bson.BsonDocument; +import org.bson.BsonTimestamp; + +/** + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public interface SessionContext { + + /** + * Returns true if there is a true server session associated with this context. + */ + boolean hasSession(); + + /** + * Returns true if the session is implicit, and false if the application started the session explicitly. + */ + boolean isImplicitSession(); + + /** + * Gets the session identifier if this context has a session backing it. + */ + BsonDocument getSessionId(); + + boolean isCausallyConsistent(); + + long getTransactionNumber(); + + /** + * Advance the transaction number. + * + * @return the next non-negative transaction number for the session + */ + long advanceTransactionNumber(); + + /** + * Notify the session context that a message has been sent. + * + * @return true if this is the first message sent, false otherwise + */ + boolean notifyMessageSent(); + + /** + * Gets the current operation time for this session context + * + * @return the current operation time, which may be null + */ + @Nullable + BsonTimestamp getOperationTime(); + + /** + * Advance the operation time. If the current operation time is greater than the given operation time, this method has no effect. + * + * @param operationTime the new operation time + */ + void advanceOperationTime(@Nullable BsonTimestamp operationTime); + + /** + * Gets the current cluster time for this session context. + * + * @return the cluster time, which may be null + */ + @Nullable + BsonDocument getClusterTime(); + + /** + * Advance the cluster time. If the current cluster time is greater than the given cluster time, this method has no effect. + * + * @param clusterTime the new cluster time + */ + void advanceClusterTime(@Nullable BsonDocument clusterTime); + + boolean isSnapshot(); + + void setSnapshotTimestamp(@Nullable BsonTimestamp snapshotTimestamp); + + @Nullable + BsonTimestamp getSnapshotTimestamp(); + + boolean hasActiveTransaction(); + + ReadConcern getReadConcern(); + + void setRecoveryToken(BsonDocument recoveryToken); + + /** + * Unpin a mongos from a session. + */ + void clearTransactionContext(); + + /** + * Mark the session as dirty. This happens when a command fails with a network + * error. Dirty sessions are later discarded from the server session pool. + */ + void markSessionDirty(); + + boolean isSessionMarkedDirty(); +} diff --git a/driver-core/src/main/com/mongodb/internal/session/package-info.java b/driver-core/src/main/com/mongodb/internal/session/package-info.java new file mode 100644 index 00000000000..faeb3aeaac4 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/session/package-info.java @@ -0,0 +1,25 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * This package contains internal functionality that may change at any time. + */ +@Internal +@NonNullApi +package com.mongodb.internal.session; + +import com.mongodb.annotations.Internal; +import com.mongodb.lang.NonNullApi; diff --git a/driver-core/src/main/com/mongodb/internal/thread/DaemonThreadFactory.java b/driver-core/src/main/com/mongodb/internal/thread/DaemonThreadFactory.java new file mode 100644 index 00000000000..e44fd0661c4 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/thread/DaemonThreadFactory.java @@ -0,0 +1,43 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.thread; + +import java.util.concurrent.ThreadFactory; +import java.util.concurrent.atomic.AtomicInteger; + +/** + * Custom thread factory for scheduled executor service that creates daemon threads. Otherwise, + * applications that neglect to close the client will not exit. + * + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public class DaemonThreadFactory implements ThreadFactory { + private static final AtomicInteger POOL_NUMBER = new AtomicInteger(1); + private final AtomicInteger threadNumber = new AtomicInteger(1); + private final String namePrefix; + + public DaemonThreadFactory(final String prefix) { + namePrefix = prefix + "-" + POOL_NUMBER.getAndIncrement() + "-thread-"; + } + + @Override + public Thread newThread(final Runnable runnable) { + Thread t = new Thread(runnable, namePrefix + threadNumber.getAndIncrement()); + t.setDaemon(true); + return t; + } +} diff --git a/driver-core/src/main/com/mongodb/internal/thread/InterruptionUtil.java b/driver-core/src/main/com/mongodb/internal/thread/InterruptionUtil.java new file mode 100644 index 00000000000..54a3ba31f24 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/thread/InterruptionUtil.java @@ -0,0 +1,77 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.internal.thread; + +import com.mongodb.MongoInterruptedException; +import com.mongodb.lang.Nullable; + +import java.io.InterruptedIOException; +import java.net.SocketException; +import java.net.SocketTimeoutException; +import java.nio.channels.ClosedByInterruptException; +import java.util.Optional; + +/** + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public final class InterruptionUtil { + /** + * {@linkplain Thread#interrupt() Interrupts} the {@linkplain Thread#currentThread() current thread} + * before creating {@linkplain MongoInterruptedException}. + * We do this because the interrupt status is cleared before throwing {@link InterruptedException}, + * we are not propagating {@link InterruptedException}, which means we must reinstate the interrupt status. + * This matches the behavior documented by {@link MongoInterruptedException}. + */ + public static MongoInterruptedException interruptAndCreateMongoInterruptedException( + @Nullable final String msg, @Nullable final InterruptedException cause) { + Thread.currentThread().interrupt(); + return new MongoInterruptedException(msg, cause); + } + + /** + * If {@code e} is {@link InterruptedException}, then {@link #interruptAndCreateMongoInterruptedException(String, InterruptedException)} + * is used. + * + * @return {@link Optional#empty()} iff {@code e} does not communicate an interrupt. + */ + public static Optional translateInterruptedException( + @Nullable final Throwable e, @Nullable final String message) { + if (e instanceof InterruptedException) { + return Optional.of(interruptAndCreateMongoInterruptedException(message, (InterruptedException) e)); + } else if ( + // `InterruptedIOException` is weirdly documented, and almost seems to be a relic abandoned by the Java SE APIs: + // - `SocketTimeoutException` is `InterruptedIOException`, + // but it is not related to the Java SE interrupt mechanism. As a side note, it does not happen when writing. + // - Java SE methods, where IO may indeed be interrupted via the Java SE interrupt mechanism, + // use different exceptions, like `ClosedByInterruptException` or even `SocketException`. + (e instanceof InterruptedIOException && !(e instanceof SocketTimeoutException)) + // see `java.nio.channels.InterruptibleChannel` + // and `java.net.Socket.connect`, `java.net.Socket.getOutputStream`/`getInputStream` + || e instanceof ClosedByInterruptException + // see `java.net.Socket.connect`, `java.net.Socket.getOutputStream`/`getInputStream` + || (e instanceof SocketException && Thread.currentThread().isInterrupted())) { + // The interrupted status is not cleared before throwing `ClosedByInterruptException`/`SocketException`, + // so we do not need to reinstate it. + // `InterruptedIOException` does not specify how it behaves with regard to the interrupted status, so we do nothing. + return Optional.of(new MongoInterruptedException(message, (Exception) e)); + } else { + return Optional.empty(); + } + } + + private InterruptionUtil() { + } +} diff --git a/driver-core/src/main/com/mongodb/internal/thread/package-info.java b/driver-core/src/main/com/mongodb/internal/thread/package-info.java new file mode 100644 index 00000000000..e5a0a9f7237 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/thread/package-info.java @@ -0,0 +1,25 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * This package contains internal functionality that may change at any time. + */ +@Internal +@NonNullApi +package com.mongodb.internal.thread; + +import com.mongodb.annotations.Internal; +import com.mongodb.lang.NonNullApi; diff --git a/driver-core/src/main/com/mongodb/internal/time/StartTime.java b/driver-core/src/main/com/mongodb/internal/time/StartTime.java new file mode 100644 index 00000000000..1d8f186ab67 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/time/StartTime.java @@ -0,0 +1,64 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.internal.time; + +import java.time.Duration; +import java.util.concurrent.TimeUnit; + +/** + * A point in time used to track how much time has elapsed. In contrast to a + * Timeout, it is guaranteed to not be in the future, and is never infinite. + * + * Implementations of this interface must be immutable. + * + * @see TimePoint + */ +public interface StartTime { + + /** + * @see TimePoint#elapsed() + */ + Duration elapsed(); + + /** + * @see TimePoint#asTimeout() + */ + Timeout asTimeout(); + + /** + * Returns an {@linkplain Timeout#infinite() infinite} timeout if + * {@code timeoutValue} is negative, an expired timeout if + * {@code timeoutValue} is 0, otherwise a timeout in {@code durationNanos}. + *

+ * Note that some code might ignore a timeout, and attempt to perform + * the operation in question at least once.

+ *

+ * Note that the contract of this method is also used in some places to + * specify the behavior of methods that accept {@code (long timeout, TimeUnit unit)}, + * e.g., {@link com.mongodb.internal.connection.ConcurrentPool#get(long, TimeUnit)}, + * so it cannot be changed without updating those methods.

+ * + * @see TimePoint#timeoutAfterOrInfiniteIfNegative(long, TimeUnit) + */ + Timeout timeoutAfterOrInfiniteIfNegative(long timeoutValue, TimeUnit timeUnit); + + /** + * @return a StartPoint, as of now + */ + static StartTime now() { + return TimePoint.at(System.nanoTime()); + } +} diff --git a/driver-core/src/main/com/mongodb/internal/time/TimePoint.java b/driver-core/src/main/com/mongodb/internal/time/TimePoint.java new file mode 100644 index 00000000000..811065d13a6 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/time/TimePoint.java @@ -0,0 +1,244 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.internal.time; + +import com.mongodb.annotations.Immutable; +import com.mongodb.internal.VisibleForTesting; +import com.mongodb.internal.function.CheckedFunction; +import com.mongodb.internal.function.CheckedSupplier; +import com.mongodb.lang.Nullable; + +import java.time.Clock; +import java.time.Duration; +import java.util.Objects; +import java.util.concurrent.TimeUnit; + +import static com.mongodb.assertions.Assertions.assertNotNull; +import static com.mongodb.internal.VisibleForTesting.AccessModifier.PRIVATE; +import static java.util.concurrent.TimeUnit.MILLISECONDS; +import static java.util.concurrent.TimeUnit.NANOSECONDS; + +/** + * A value-based class + * representing a point on a timeline. The origin of this timeline (which is not + * exposed) has no relation to the {@linkplain Clock#systemUTC() system clock}. + * The same timeline is used by all {@link TimePoint}s within the same process. + *

+ * Methods operating on a pair of {@link TimePoint}s, + * for example, {@link #durationSince(TimePoint)}, {@link #compareTo(TimePoint)}, + * or producing a point from another one, for example, {@link #add(Duration)}, + * work correctly only if the duration between the points is not greater than + * {@link Long#MAX_VALUE} nanoseconds, which is more than 292 years.

+ *

+ * This class is not part of the public API and may be removed or changed at any time.

+ */ +@Immutable +class TimePoint implements Comparable, StartTime, Timeout { + @Nullable + private final Long nanos; + + TimePoint(@Nullable final Long nanos) { + this.nanos = nanos; + } + + @VisibleForTesting(otherwise = PRIVATE) + static TimePoint at(@Nullable final Long nanos) { + return new TimePoint(nanos); + } + + @VisibleForTesting(otherwise = PRIVATE) + long currentNanos() { + return System.nanoTime(); + } + + /** + * Returns the current {@link TimePoint}. + */ + static TimePoint now() { + return at(System.nanoTime()); + } + + /** + * Returns a {@link TimePoint} infinitely far in the future. + */ + static TimePoint infinite() { + return at(null); + } + + @Override + public Timeout shortenBy(final long amount, final TimeUnit timeUnit) { + if (isInfinite()) { + return this; // shortening (lengthening) an infinite timeout does nothing + } + long durationNanos = NANOSECONDS.convert(amount, timeUnit); + return TimePoint.at(assertNotNull(nanos) - durationNanos); + } + + @Override + public T checkedCall(final TimeUnit timeUnit, + final CheckedSupplier onInfinite, final CheckedFunction onHasRemaining, + final CheckedSupplier onExpired) throws E { + if (this.isInfinite()) { + return onInfinite.get(); + } + long remaining = remaining(timeUnit); + if (remaining <= 0) { + return onExpired.get(); + } else { + return onHasRemaining.apply(remaining); + } + } + + /** + * @return true if this timepoint is infinite. + */ + private boolean isInfinite() { + return nanos == null; + } + + /** + * @return this TimePoint, as a Timeout. Convenience for {@link StartTime} + */ + @Override + public Timeout asTimeout() { + return this; + } + + /** + * The number of whole time units that remain until this TimePoint + * has expired. This should not be used to check for expiry, + * but can be used to supply a remaining value, in the finest-grained + * TimeUnit available, to some method that may time out. + * This method must not be used with infinite TimePoints. + * + * @param unit the time unit + * @return the remaining time + * @throws AssertionError if the timeout is infinite. Always check if the + * timeout {@link #isInfinite()} before calling. + */ + private long remaining(final TimeUnit unit) { + if (isInfinite()) { + throw new AssertionError("Infinite TimePoints have infinite remaining time"); + } + long remaining = assertNotNull(nanos) - currentNanos(); + remaining = unit.convert(remaining, NANOSECONDS); + return remaining <= 0 ? 0 : remaining; + } + + /** + * The {@link Duration} between {@link TimePoint#now()} and this {@link TimePoint}. + * This method is functionally equivalent to {@code TimePoint.now().durationSince(this)}. + * Note that the duration will represent fully-elapsed whole units. + * + * @throws AssertionError If this TimePoint is {@linkplain #isInfinite() infinite}. + * @see #durationSince(TimePoint) + */ + public Duration elapsed() { + if (isInfinite()) { + throw new AssertionError("No time can elapse since an infinite TimePoint"); + } + return Duration.ofNanos(currentNanos() - assertNotNull(nanos)); + } + + /** + * The {@link Duration} between this {@link TimePoint} and {@code t}. + * A {@linkplain Duration#isNegative() negative} {@link Duration} means that + * this {@link TimePoint} is {@linkplain #compareTo(TimePoint) before} {@code t}. + * + * @see #elapsed() + */ + Duration durationSince(final TimePoint t) { + if (this.isInfinite()) { + throw new AssertionError("this timepoint is infinite, with no duration since"); + } + if (t.isInfinite()) { + throw new AssertionError("the other timepoint is infinite, with no duration until"); + } + return Duration.ofNanos(nanos - assertNotNull(t.nanos)); + } + + /** + * @param timeoutValue value; if negative, the result is infinite + * @param timeUnit timeUnit + * @return a TimePoint that is the given number of timeUnits in the future + */ + @Override + public TimePoint timeoutAfterOrInfiniteIfNegative(final long timeoutValue, final TimeUnit timeUnit) { + if (timeoutValue < 0) { + return infinite(); + } + return this.add(Duration.ofNanos(NANOSECONDS.convert(timeoutValue, timeUnit))); + } + + + /** + * Returns a {@link TimePoint} that is {@code duration} away from this one. + * + * @param duration A duration that may also be {@linkplain Duration#isNegative() negative}. + */ + TimePoint add(final Duration duration) { + if (isInfinite()) { + throw new AssertionError("No time can be added to an infinite TimePoint"); + } + long durationNanos = duration.toNanos(); + return TimePoint.at(assertNotNull(nanos) + durationNanos); + } + + /** + * If this {@link TimePoint} is less/greater than {@code t}, then it is before/after {@code t}. + *

+ * {@inheritDoc}

+ */ + @Override + public int compareTo(final TimePoint t) { + if (Objects.equals(nanos, t.nanos)) { + return 0; + } else if (this.isInfinite()) { + return 1; + } else if (t.isInfinite()) { + return -1; + } + return Long.signum(nanos - assertNotNull(t.nanos)); + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + final TimePoint timePoint = (TimePoint) o; + return Objects.equals(nanos, timePoint.nanos); + } + + @Override + public int hashCode() { + return Objects.hash(nanos); + } + + @Override + public String toString() { + String remainingMs = isInfinite() + ? "infinite" + : "" + remaining(MILLISECONDS); + return "TimePoint{" + + "nanos=" + nanos + + ", remainingMs=" + remainingMs + + '}'; + } +} diff --git a/driver-core/src/main/com/mongodb/internal/time/Timeout.java b/driver-core/src/main/com/mongodb/internal/time/Timeout.java new file mode 100644 index 00000000000..c497f08945b --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/time/Timeout.java @@ -0,0 +1,245 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.internal.time; + +import com.mongodb.MongoInterruptedException; +import com.mongodb.assertions.Assertions; +import com.mongodb.internal.function.CheckedConsumer; +import com.mongodb.internal.function.CheckedFunction; +import com.mongodb.internal.function.CheckedRunnable; +import com.mongodb.internal.function.CheckedSupplier; +import com.mongodb.lang.NonNull; +import com.mongodb.lang.Nullable; + +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.locks.Condition; +import java.util.function.LongConsumer; +import java.util.function.LongFunction; +import java.util.function.Supplier; + +import static com.mongodb.internal.thread.InterruptionUtil.interruptAndCreateMongoInterruptedException; +import static java.util.concurrent.TimeUnit.NANOSECONDS; + +/** + * A Timeout is a "deadline", point in time by which something must happen. + * + * Implementations of this interface must be immutable. + * + * @see TimePoint + */ +public interface Timeout { + /** + * @param timeouts the timeouts + * @return the instance of the timeout that expires earliest + */ + static Timeout earliest(final Timeout... timeouts) { + List list = Arrays.asList(timeouts); + list.forEach(v -> { + if (!(v instanceof TimePoint)) { + throw new AssertionError("Only TimePoints may be compared"); + } + }); + return Collections.min(list, (a, b) -> { + TimePoint tpa = (TimePoint) a; + TimePoint tpb = (TimePoint) b; + return tpa.compareTo(tpb); + }); + } + + /** + * @return an infinite (non-expiring) timeout + */ + static Timeout infinite() { + return TimePoint.infinite(); + } + + /** + * @param timeout the timeout + * @return the provided timeout, or an infinite timeout if provided null. + */ + static Timeout nullAsInfinite(@Nullable final Timeout timeout) { + return timeout == null ? infinite() : timeout; + } + + /** + * @param duration the non-negative duration, in the specified time unit + * @param unit the time unit + * @param zeroSemantics what to interpret a 0 duration as (infinite or expired) + * @return a timeout that expires in the specified duration after now. + */ + @NonNull + static Timeout expiresIn(final long duration, final TimeUnit unit, final ZeroSemantics zeroSemantics) { + if (duration < 0) { + throw new AssertionError("Timeouts must not be in the past"); + } else if (duration == 0) { + switch (zeroSemantics) { + case ZERO_DURATION_MEANS_INFINITE: + return Timeout.infinite(); + case ZERO_DURATION_MEANS_EXPIRED: + return TimePoint.now(); + default: + throw Assertions.fail("Unknown enum value"); + } + } else { + // duration will never be negative + return TimePoint.now().timeoutAfterOrInfiniteIfNegative(duration, unit); + } + } + + /** + * This timeout, shortened by the provided amount (it will expire sooner). + * + * @param amount the amount to shorten by + * @param timeUnit the time unit of the amount + * @return the shortened timeout + */ + Timeout shortenBy(long amount, TimeUnit timeUnit); + + /** + * {@linkplain Condition#awaitNanos(long) Awaits} on the provided + * condition. Will {@linkplain Condition#await() await} without a waiting + * time if this timeout is infinite. + * {@linkplain #onExistsAndExpired(Timeout, Runnable) Expiry} is not + * checked by this method, and should be called outside of this method. + * @param condition the condition. + * @param action supplies the name of the action, for {@link MongoInterruptedException} + */ + default void awaitOn(final Condition condition, final Supplier action) { + try { + // ignore result, the timeout will track this remaining time + //noinspection ResultOfMethodCallIgnored + checkedRun(NANOSECONDS, + () -> condition.await(), + (ns) -> condition.awaitNanos(ns), + () -> condition.awaitNanos(0)); + } catch (InterruptedException e) { + throw interruptAndCreateMongoInterruptedException("Interrupted while " + action.get(), e); + } + } + + /** + * {@linkplain CountDownLatch#await(long, TimeUnit) Awaits} on the provided + * condition. Will {@linkplain CountDownLatch#await() await} without a waiting + * time if this timeout is infinite. + * {@linkplain #onExistsAndExpired(Timeout, Runnable) Expiry} is not + * checked by this method, and should be called outside of this method. + * @param latch the latch. + * @param action supplies the name of the action, for {@link MongoInterruptedException} + */ + default void awaitOn(final CountDownLatch latch, final Supplier action) { + try { + // ignore result, the timeout will track this remaining time + //noinspection ResultOfMethodCallIgnored + checkedRun(NANOSECONDS, + () -> latch.await(), + (ns) -> latch.await(ns, NANOSECONDS), + () -> latch.await(0, NANOSECONDS)); + } catch (InterruptedException e) { + throw interruptAndCreateMongoInterruptedException("Interrupted while " + action.get(), e); + } + } + + /** + * Call one of 3 possible branches depending on the state of the timeout, + * and return the result. + * @param timeUnit the positive (non-zero) remaining time to provide to the + * {@code onHasRemaining} branch. The underlying nano time + * is rounded down to the given time unit. If 0, the timeout + * is considered expired. + * @param onInfinite branch to take when the timeout is infinite + * @param onHasRemaining branch to take when there is positive remaining + * time in the specified time unit + * @param onExpired branch to take when the timeout is expired + * @return the result provided by the branch + * @param the type of the result + */ + default T call(final TimeUnit timeUnit, + final Supplier onInfinite, final LongFunction onHasRemaining, + final Supplier onExpired) { + return checkedCall(timeUnit, onInfinite::get, onHasRemaining::apply, onExpired::get); + } + + /** + * Call, but throwing a checked exception. + * @see #call(TimeUnit, Supplier, LongFunction, Supplier) + * @param the checked exception type + * @throws E the checked exception + */ + T checkedCall(TimeUnit timeUnit, + CheckedSupplier onInfinite, CheckedFunction onHasRemaining, + CheckedSupplier onExpired) throws E; + + /** + * Run one of 3 possible branches depending on the state of the timeout. + * @see #call(TimeUnit, Supplier, LongFunction, Supplier) + */ + default void run(final TimeUnit timeUnit, + final Runnable onInfinite, final LongConsumer onHasRemaining, + final Runnable onExpired) { + this.call(timeUnit, () -> { + onInfinite.run(); + return null; + }, (t) -> { + onHasRemaining.accept(t); + return null; + }, () -> { + onExpired.run(); + return null; + }); + } + + /** + * Run, but throwing a checked exception. + * @see #checkedCall(TimeUnit, CheckedSupplier, CheckedFunction, CheckedSupplier) + */ + default void checkedRun(final TimeUnit timeUnit, + final CheckedRunnable onInfinite, final CheckedConsumer onHasRemaining, + final CheckedRunnable onExpired) throws E { + this.checkedCall(timeUnit, () -> { + onInfinite.run(); + return null; + }, (t) -> { + onHasRemaining.accept(t); + return null; + }, () -> { + onExpired.run(); + return null; + }); + } + + default void onExpired(final Runnable onExpired) { + onExistsAndExpired(this, onExpired); + } + + static void onExistsAndExpired(@Nullable final Timeout t, final Runnable onExpired) { + if (t == null) { + return; + } + t.run(NANOSECONDS, + () -> {}, + (ns) -> {}, + () -> onExpired.run()); + } + + enum ZeroSemantics { + ZERO_DURATION_MEANS_EXPIRED, + ZERO_DURATION_MEANS_INFINITE + } +} diff --git a/driver-core/src/main/com/mongodb/internal/time/package-info.java b/driver-core/src/main/com/mongodb/internal/time/package-info.java new file mode 100644 index 00000000000..deb84634964 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/time/package-info.java @@ -0,0 +1,25 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * This package contains internal functionality that may change at any time. + */ +@Internal +@NonNullApi +package com.mongodb.internal.time; + +import com.mongodb.annotations.Internal; +import com.mongodb.lang.NonNullApi; diff --git a/driver-core/src/main/com/mongodb/internal/validator/MappedFieldNameValidator.java b/driver-core/src/main/com/mongodb/internal/validator/MappedFieldNameValidator.java new file mode 100644 index 00000000000..3e7956f06ed --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/validator/MappedFieldNameValidator.java @@ -0,0 +1,60 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.validator; + +import org.bson.FieldNameValidator; + +import java.util.Map; + +/** + * A field name validator that serves as a root validator for a map of validators that are applied to child fields. Note that instances of + * this class can be nested to achieve a wide variety of validation behaviors. + * + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public class MappedFieldNameValidator implements FieldNameValidator { + private final FieldNameValidator defaultValidator; + private final Map fieldNameToValidatorMap; + + /** + * The default validator will be use to validate all fields whose names are not contained int the fieldNameToValidator map. The map is + * used to apply different validators to fields with specific names. + * + * @param defaultValidator the validator to use for any fields not matching any field name in the map + * @param fieldNameToValidatorMap a map from field name to FieldNameValidator + */ + public MappedFieldNameValidator(final FieldNameValidator defaultValidator, + final Map fieldNameToValidatorMap) { + this.defaultValidator = defaultValidator; + this.fieldNameToValidatorMap = fieldNameToValidatorMap; + } + + @Override + public boolean validate(final String fieldName) { + return defaultValidator.validate(fieldName); + } + + @Override + public String getValidationErrorMessage(final String fieldName) { + return defaultValidator.getValidationErrorMessage(fieldName); + } + + @Override + public FieldNameValidator getValidatorForField(final String fieldName) { + return fieldNameToValidatorMap.getOrDefault(fieldName, defaultValidator); + } +} diff --git a/driver-core/src/main/com/mongodb/internal/validator/NoOpFieldNameValidator.java b/driver-core/src/main/com/mongodb/internal/validator/NoOpFieldNameValidator.java new file mode 100644 index 00000000000..160406aedaf --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/validator/NoOpFieldNameValidator.java @@ -0,0 +1,41 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.validator; + +import org.bson.FieldNameValidator; + +/** + * A field name validator that treats all fields as valid. + * + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public final class NoOpFieldNameValidator implements FieldNameValidator { + public static final NoOpFieldNameValidator INSTANCE = new NoOpFieldNameValidator(); + + private NoOpFieldNameValidator() { + } + + @Override + public boolean validate(final String fieldName) { + return true; + } + + @Override + public FieldNameValidator getValidatorForField(final String fieldName) { + return this; + } +} diff --git a/driver-core/src/main/com/mongodb/internal/validator/ReplacingDocumentFieldNameValidator.java b/driver-core/src/main/com/mongodb/internal/validator/ReplacingDocumentFieldNameValidator.java new file mode 100644 index 00000000000..d6d815a529f --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/validator/ReplacingDocumentFieldNameValidator.java @@ -0,0 +1,57 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.validator; + +import org.bson.FieldNameValidator; + +import java.util.Arrays; +import java.util.List; + +import static com.mongodb.assertions.Assertions.assertFalse; +import static java.lang.String.format; + +/** + * A field name validator for documents that are meant for storage in MongoDB collections via replace operations. It ensures that no + * top-level fields start with '$' (with the exception of "$db", "$ref", and "$id", so that DBRefs are not rejected). + * + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public final class ReplacingDocumentFieldNameValidator implements FieldNameValidator { + public static final ReplacingDocumentFieldNameValidator INSTANCE = new ReplacingDocumentFieldNameValidator(); + // Have to support DBRef fields + private static final List EXCEPTIONS = Arrays.asList("$db", "$ref", "$id"); + + private ReplacingDocumentFieldNameValidator() { + } + + @Override + public boolean validate(final String fieldName) { + return !fieldName.startsWith("$") || EXCEPTIONS.contains(fieldName); + } + + @Override + public String getValidationErrorMessage(final String fieldName) { + assertFalse(validate(fieldName)); + return format("Field names in a replacement document can not start with '$' but '%s' does", fieldName); + } + + @Override + public FieldNameValidator getValidatorForField(final String fieldName) { + // Only top-level fields are validated + return NoOpFieldNameValidator.INSTANCE; + } +} diff --git a/driver-core/src/main/com/mongodb/internal/validator/UpdateFieldNameValidator.java b/driver-core/src/main/com/mongodb/internal/validator/UpdateFieldNameValidator.java new file mode 100644 index 00000000000..fc59b0cc312 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/validator/UpdateFieldNameValidator.java @@ -0,0 +1,65 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.validator; + +import org.bson.FieldNameValidator; + +import static com.mongodb.assertions.Assertions.assertFalse; +import static java.lang.String.format; + +/** + * A field name validator for update documents. It ensures that all top-level fields start with a '$'. + * + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public final class UpdateFieldNameValidator implements org.bson.FieldNameValidator { + private boolean encounteredField = false; + + @Override + public boolean validate(final String fieldName) { + encounteredField = true; + return fieldName.startsWith("$"); + } + + @Override + public String getValidationErrorMessage(final String fieldName) { + assertFalse(fieldName.startsWith("$")); + return format("All update operators must start with '$', but '%s' does not", fieldName); + } + + @Override + public FieldNameValidator getValidatorForField(final String fieldName) { + return NoOpFieldNameValidator.INSTANCE; + } + + @Override + public void start() { + reset(); + } + + @Override + public void end() { + if (!encounteredField) { + throw new IllegalArgumentException("Invalid BSON document for an update. The document may not be empty."); + } + } + + public UpdateFieldNameValidator reset() { + encounteredField = false; + return this; + } +} diff --git a/driver-core/src/main/com/mongodb/internal/validator/package-info.java b/driver-core/src/main/com/mongodb/internal/validator/package-info.java new file mode 100644 index 00000000000..cec46638fb5 --- /dev/null +++ b/driver-core/src/main/com/mongodb/internal/validator/package-info.java @@ -0,0 +1,25 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * This package contains internal functionality that may change at any time. + */ +@Internal +@NonNullApi +package com.mongodb.internal.validator; + +import com.mongodb.annotations.Internal; +import com.mongodb.lang.NonNullApi; diff --git a/driver-core/src/main/com/mongodb/lang/NonNull.java b/driver-core/src/main/com/mongodb/lang/NonNull.java new file mode 100644 index 00000000000..de5d99287a8 --- /dev/null +++ b/driver-core/src/main/com/mongodb/lang/NonNull.java @@ -0,0 +1,47 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.lang; + +import javax.annotation.Nonnull; +import javax.annotation.meta.TypeQualifierNickname; +import java.lang.annotation.Documented; +import java.lang.annotation.ElementType; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.annotation.Target; + +/** + * A common annotation to declare that annotated elements cannot be {@code null}. Leverages JSR 305 meta-annotations to indicate + * nullability in Java to common tools with JSR 305 support and used by Kotlin to infer nullability of Spring API. + * + *

Should be used at parameter, return value, and field level. Method overrides should repeat parent {@code @NonNull} annotations unless + * they behave differently.

+ * + *

Use {@code @NonNullApi} (scope = parameters + return values) to set the default behavior to non-nullable in order to avoid annotating + * your whole codebase with {@code @NonNull}.

+ * + * @since 3.7 + * @see NonNullApi + * @see Nullable + */ +@Target({ElementType.METHOD, ElementType.PARAMETER, ElementType.FIELD}) +@Retention(RetentionPolicy.RUNTIME) +@Documented +@Nonnull +@TypeQualifierNickname +public @interface NonNull { +} diff --git a/driver-core/src/main/com/mongodb/lang/NonNullApi.java b/driver-core/src/main/com/mongodb/lang/NonNullApi.java new file mode 100644 index 00000000000..229f89c73a3 --- /dev/null +++ b/driver-core/src/main/com/mongodb/lang/NonNullApi.java @@ -0,0 +1,45 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.lang; + +import javax.annotation.Nonnull; +import javax.annotation.meta.TypeQualifierDefault; +import java.lang.annotation.Documented; +import java.lang.annotation.ElementType; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.annotation.Target; + +/** + * A common annotation to declare that parameters and return values are to be considered as non-nullable by default for a given package. + * + *

Leverages JSR-305 meta-annotations to indicate nullability in Java to common tools with JSR-305 support and used by Kotlin to infer + * nullability of Spring API.

+ * + *

Should be used at package level in association with {@link Nullable} annotations at parameter and return value level.

+ * + * @since 3.7 + * @see Nullable + * @see NonNull + */ +@Target(ElementType.PACKAGE) +@Retention(RetentionPolicy.RUNTIME) +@Documented +@Nonnull +@TypeQualifierDefault({ElementType.METHOD, ElementType.PARAMETER}) +public @interface NonNullApi { +} diff --git a/driver-core/src/main/com/mongodb/lang/Nullable.java b/driver-core/src/main/com/mongodb/lang/Nullable.java new file mode 100644 index 00000000000..2c1dd799a6a --- /dev/null +++ b/driver-core/src/main/com/mongodb/lang/Nullable.java @@ -0,0 +1,47 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.lang; + +import javax.annotation.Nonnull; +import javax.annotation.meta.TypeQualifierNickname; +import javax.annotation.meta.When; +import java.lang.annotation.Documented; +import java.lang.annotation.ElementType; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.annotation.Target; + +/** + * A common annotation to declare that annotated elements can be {@code null} under some circumstance. Leverages JSR 305 meta-annotations to + * indicate nullability in Java to common tools with JSR 305 support and used by Kotlin to infer nullability of Spring API. + * + *

Should be used at parameter, return value, and field level. Methods override should repeat parent {@code @Nullable} annotations + * unless they behave differently.

+ * + *

Can be used in association with {@code NonNullApi} to override the default non-nullable semantic to nullable.

+ * + * @see NonNullApi + * @see NonNull + * @since 3.7 + */ +@Target({ElementType.METHOD, ElementType.PARAMETER, ElementType.FIELD}) +@Retention(RetentionPolicy.RUNTIME) +@Documented +@Nonnull(when = When.MAYBE) +@TypeQualifierNickname +public @interface Nullable { +} diff --git a/driver-core/src/main/com/mongodb/lang/package-info.java b/driver-core/src/main/com/mongodb/lang/package-info.java new file mode 100644 index 00000000000..05e7e89cffe --- /dev/null +++ b/driver-core/src/main/com/mongodb/lang/package-info.java @@ -0,0 +1,20 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * This package contains JSR 305-compatible annotations related to nullability. + */ +package com.mongodb.lang; diff --git a/driver-core/src/main/com/mongodb/management/ConnectionPoolStatistics.java b/driver-core/src/main/com/mongodb/management/ConnectionPoolStatistics.java new file mode 100644 index 00000000000..1fd322d4e4e --- /dev/null +++ b/driver-core/src/main/com/mongodb/management/ConnectionPoolStatistics.java @@ -0,0 +1,94 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.management; + +import com.mongodb.ServerAddress; +import com.mongodb.connection.ConnectionPoolSettings; +import com.mongodb.event.ConnectionCheckedInEvent; +import com.mongodb.event.ConnectionCheckedOutEvent; +import com.mongodb.event.ConnectionClosedEvent; +import com.mongodb.event.ConnectionCreatedEvent; +import com.mongodb.event.ConnectionPoolCreatedEvent; +import com.mongodb.event.ConnectionPoolListener; + +import java.util.concurrent.atomic.AtomicInteger; + +/** + * An MBean implementation for connection pool statistics. + */ +final class ConnectionPoolStatistics implements ConnectionPoolListener, ConnectionPoolStatisticsMBean { + private final ServerAddress serverAddress; + private final ConnectionPoolSettings settings; + private final AtomicInteger size = new AtomicInteger(); + private final AtomicInteger checkedOutCount = new AtomicInteger(); + + ConnectionPoolStatistics(final ConnectionPoolCreatedEvent event) { + serverAddress = event.getServerId().getAddress(); + settings = event.getSettings(); + } + + @Override + public String getHost() { + return serverAddress.getHost(); + } + + @Override + public int getPort() { + return serverAddress.getPort(); + } + + @Override + public int getMinSize() { + return settings.getMinSize(); + } + + @Override + public int getMaxSize() { + return settings.getMaxSize(); + } + + @Override + public int getSize() { + return size.get(); + } + + @Override + public int getCheckedOutCount() { + return checkedOutCount.get(); + } + + @Override + public void connectionCheckedOut(final ConnectionCheckedOutEvent event) { + checkedOutCount.incrementAndGet(); + } + + @Override + public void connectionCheckedIn(final ConnectionCheckedInEvent event) { + checkedOutCount.decrementAndGet(); + } + + @Override + public void connectionCreated(final ConnectionCreatedEvent event) { + size.incrementAndGet(); + } + + @Override + public void connectionClosed(final ConnectionClosedEvent event) { + size.decrementAndGet(); + } + +} diff --git a/driver-core/src/main/com/mongodb/management/ConnectionPoolStatisticsMBean.java b/driver-core/src/main/com/mongodb/management/ConnectionPoolStatisticsMBean.java new file mode 100644 index 00000000000..e655c9992d6 --- /dev/null +++ b/driver-core/src/main/com/mongodb/management/ConnectionPoolStatisticsMBean.java @@ -0,0 +1,69 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.management; + +/** + *

A standard MXBean interface for a Mongo connection pool.

+ * + *

This interface is NOT part of the public API. Be prepared for non-binary compatible changes in minor releases.

+ * + * @since 2.12 + */ +public interface ConnectionPoolStatisticsMBean { + + /** + * Gets the host that this connection pool is connecting to. + * + * @return the host + */ + String getHost(); + + /** + * Gets the port that this connection pool is connecting to. + * + * @return the port + */ + int getPort(); + + /** + * Gets the minimum allowed size of the pool, including idle and in-use members. + * + * @return the minimum size + */ + int getMinSize(); + + /** + * Gets the maximum allowed size of the pool, including idle and in-use members. + * + * @return the maximum size + */ + int getMaxSize(); + + /** + * Gets the current size of the pool, including idle and in-use members. + * + * @return the size + */ + int getSize(); + + /** + * Gets the count of connections that are currently in use. + * + * @return count of in-use connections + */ + int getCheckedOutCount(); +} diff --git a/driver-core/src/main/com/mongodb/management/JMXConnectionPoolListener.java b/driver-core/src/main/com/mongodb/management/JMXConnectionPoolListener.java new file mode 100644 index 00000000000..f91523d8a03 --- /dev/null +++ b/driver-core/src/main/com/mongodb/management/JMXConnectionPoolListener.java @@ -0,0 +1,140 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.management; + +import com.mongodb.connection.ConnectionId; +import com.mongodb.connection.ServerId; +import com.mongodb.event.ConnectionCheckedInEvent; +import com.mongodb.event.ConnectionCheckedOutEvent; +import com.mongodb.event.ConnectionClosedEvent; +import com.mongodb.event.ConnectionCreatedEvent; +import com.mongodb.event.ConnectionPoolClosedEvent; +import com.mongodb.event.ConnectionPoolCreatedEvent; +import com.mongodb.event.ConnectionPoolListener; +import com.mongodb.lang.Nullable; + +import javax.management.ObjectName; +import java.util.List; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentMap; + +import static java.lang.String.format; +import static java.util.Arrays.asList; + +/** + * A connection pool listener that manages a set of JMX MBeans, one for each connection pool. + * + * @since 3.5 + */ +public class JMXConnectionPoolListener implements ConnectionPoolListener { + private final ConcurrentMap map = + new ConcurrentHashMap<>(); + + @Override + public void connectionPoolCreated(final ConnectionPoolCreatedEvent event) { + ConnectionPoolStatistics statistics = new ConnectionPoolStatistics(event); + map.put(event.getServerId(), statistics); + MBeanServerFactory.getMBeanServer().registerMBean(statistics, getMBeanObjectName(event.getServerId())); + } + + @Override + public void connectionPoolClosed(final ConnectionPoolClosedEvent event) { + map.remove(event.getServerId()); + MBeanServerFactory.getMBeanServer().unregisterMBean(getMBeanObjectName(event.getServerId())); + } + + @Override + public void connectionCheckedOut(final ConnectionCheckedOutEvent event) { + ConnectionPoolStatistics statistics = getStatistics(event.getConnectionId()); + if (statistics != null) { + statistics.connectionCheckedOut(event); + } + } + + @Override + public void connectionCheckedIn(final ConnectionCheckedInEvent event) { + ConnectionPoolStatistics statistics = getStatistics(event.getConnectionId()); + if (statistics != null) { + statistics.connectionCheckedIn(event); + } + } + + @Override + public void connectionCreated(final ConnectionCreatedEvent event) { + ConnectionPoolStatistics statistics = getStatistics(event.getConnectionId()); + if (statistics != null) { + statistics.connectionCreated(event); + } + } + + @Override + public void connectionClosed(final ConnectionClosedEvent event) { + ConnectionPoolStatistics statistics = getStatistics(event.getConnectionId()); + if (statistics != null) { + statistics.connectionClosed(event); + } + } + + String getMBeanObjectName(final ServerId serverId) { + String name = format("org.mongodb.driver:type=ConnectionPool,clusterId=%s,host=%s,port=%s", + ensureValidValue(serverId.getClusterId().getValue()), + ensureValidValue(serverId.getAddress().getHost()), + serverId.getAddress().getPort()); + String clusterDescription = serverId.getClusterId().getDescription(); + if (clusterDescription != null) { + name = format("%s,description=%s", name, ensureValidValue(clusterDescription)); + } + return name; + } + + // for unit test + @Nullable + ConnectionPoolStatisticsMBean getMBean(final ServerId serverId) { + return getStatistics(serverId); + } + + @Nullable + private ConnectionPoolStatistics getStatistics(final ConnectionId connectionId) { + return getStatistics(connectionId.getServerId()); + } + + @Nullable + private ConnectionPoolStatistics getStatistics(final ServerId serverId) { + return map.get(serverId); + } + + private String ensureValidValue(final String value) { + if (containsQuotableCharacter(value)) { + return ObjectName.quote(value); + } else { + return value; + } + } + + private boolean containsQuotableCharacter(@Nullable final String value) { + if (value == null || value.length() == 0) { + return false; + } + List quoteableCharacters = asList(",", ":", "?", "*", "=", "\"", "\\", "\n"); + for (String quotable : quoteableCharacters) { + if (value.contains(quotable)) { + return true; + } + } + return false; + } +} diff --git a/driver-core/src/main/com/mongodb/management/JMXMBeanServer.java b/driver-core/src/main/com/mongodb/management/JMXMBeanServer.java new file mode 100644 index 00000000000..4d858bb1d0b --- /dev/null +++ b/driver-core/src/main/com/mongodb/management/JMXMBeanServer.java @@ -0,0 +1,50 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.management; + +import com.mongodb.internal.diagnostics.logging.Logger; +import com.mongodb.internal.diagnostics.logging.Loggers; + +import javax.management.ObjectName; +import java.lang.management.ManagementFactory; + +class JMXMBeanServer implements MBeanServer { + private static final Logger LOGGER = Loggers.getLogger("management"); + + @Override + public void registerMBean(final Object mBean, final String mBeanName) { + try { + server.registerMBean(mBean, new ObjectName(mBeanName)); + } catch (Exception e) { + LOGGER.warn("Unable to register MBean " + mBeanName, e); + } + } + + @Override + public void unregisterMBean(final String mBeanName) { + try { + ObjectName objectName = new ObjectName(mBeanName); + if (server.isRegistered(objectName)) { + server.unregisterMBean(objectName); + } + } catch (Exception e) { + LOGGER.warn("Unable to unregister MBean " + mBeanName, e); + } + } + + private final javax.management.MBeanServer server = ManagementFactory.getPlatformMBeanServer(); +} diff --git a/driver-core/src/main/com/mongodb/management/MBeanServer.java b/driver-core/src/main/com/mongodb/management/MBeanServer.java new file mode 100644 index 00000000000..b8acf58525e --- /dev/null +++ b/driver-core/src/main/com/mongodb/management/MBeanServer.java @@ -0,0 +1,34 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.management; + +interface MBeanServer { + /** + * Unregister the MBean with the given name. + * + * @param mBeanName the MBean name + */ + void unregisterMBean(String mBeanName); + + /** + * Register the given mBean with the given name. + * + * @param mBean the MBean + * @param mBeanName the MBean name + */ + void registerMBean(Object mBean, String mBeanName); +} diff --git a/driver-core/src/main/com/mongodb/management/MBeanServerFactory.java b/driver-core/src/main/com/mongodb/management/MBeanServerFactory.java new file mode 100644 index 00000000000..d9b1bfed5f6 --- /dev/null +++ b/driver-core/src/main/com/mongodb/management/MBeanServerFactory.java @@ -0,0 +1,49 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.management; + +/** + *

This class is used to insulate the rest of the driver from the possibility that JMX is not available, as currently is the case on + * Android VM.

+ */ +final class MBeanServerFactory { + private MBeanServerFactory() { + } + + static { + MBeanServer tmp; + try { + tmp = new JMXMBeanServer(); + } catch (Throwable e) { + tmp = new NullMBeanServer(); + } + + M_BEAN_SERVER = tmp; + } + + /** + * Gets the MBeanServer for registering or unregistering MBeans. This returns a no-op server if JMX is not available (for example, in + * Android). + * + * @return the MBean server. + */ + static MBeanServer getMBeanServer() { + return M_BEAN_SERVER; + } + + private static final MBeanServer M_BEAN_SERVER; +} diff --git a/driver-core/src/main/com/mongodb/management/NullMBeanServer.java b/driver-core/src/main/com/mongodb/management/NullMBeanServer.java new file mode 100644 index 00000000000..59d18a9a96e --- /dev/null +++ b/driver-core/src/main/com/mongodb/management/NullMBeanServer.java @@ -0,0 +1,27 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.management; + +class NullMBeanServer implements MBeanServer { + @Override + public void unregisterMBean(final String mBeanName) { + } + + @Override + public void registerMBean(final Object mBean, final String mBeanName) { + } +} diff --git a/driver-core/src/main/com/mongodb/management/package-info.java b/driver-core/src/main/com/mongodb/management/package-info.java new file mode 100644 index 00000000000..22408a57dc4 --- /dev/null +++ b/driver-core/src/main/com/mongodb/management/package-info.java @@ -0,0 +1,23 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * Contains classes for monitoring the server/driver via Java Management Extensions (JMX). + */ +@NonNullApi +package com.mongodb.management; + +import com.mongodb.lang.NonNullApi; diff --git a/driver-core/src/main/com/mongodb/package-info.java b/driver-core/src/main/com/mongodb/package-info.java new file mode 100644 index 00000000000..1effee5f002 --- /dev/null +++ b/driver-core/src/main/com/mongodb/package-info.java @@ -0,0 +1,23 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * The core mongodb package + */ +@NonNullApi +package com.mongodb; + +import com.mongodb.lang.NonNullApi; diff --git a/driver-core/src/main/com/mongodb/selector/CompositeServerSelector.java b/driver-core/src/main/com/mongodb/selector/CompositeServerSelector.java new file mode 100644 index 00000000000..40feab69433 --- /dev/null +++ b/driver-core/src/main/com/mongodb/selector/CompositeServerSelector.java @@ -0,0 +1,110 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.selector; + +import com.mongodb.connection.ClusterDescription; +import com.mongodb.connection.ServerDescription; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; + +import static com.mongodb.assertions.Assertions.assertNotNull; +import static com.mongodb.assertions.Assertions.notNull; + +/** + * A server selector that composes a list of server selectors, and selects the servers by iterating through the list from start to + * finish, passing the result of the previous into the next, and finally returning the result of the last one. + * + * @since 3.0 + */ +public final class CompositeServerSelector implements ServerSelector { + private final List serverSelectors; + + /** + * Constructs a new instance. + * + * @param serverSelectors the list of composed server selectors + */ + public CompositeServerSelector(final List serverSelectors) { + notNull("serverSelectors", serverSelectors); + if (serverSelectors.isEmpty()) { + throw new IllegalArgumentException("Server selectors can not be an empty list"); + } + ArrayList mergedServerSelectors = new ArrayList<>(); + for (ServerSelector cur : serverSelectors) { + if (cur == null) { + throw new IllegalArgumentException("Can not have a null server selector in the list of composed selectors"); + } + if (cur instanceof CompositeServerSelector) { + mergedServerSelectors.addAll(((CompositeServerSelector) cur).serverSelectors); + } else { + mergedServerSelectors.add(cur); + } + } + this.serverSelectors = Collections.unmodifiableList(mergedServerSelectors); + } + + /** + * @return the server selectors list. + */ + public List getServerSelectors() { + return serverSelectors; + } + + @Override + public List select(final ClusterDescription clusterDescription) { + ClusterDescription curClusterDescription = clusterDescription; + List choices = null; + for (ServerSelector cur : serverSelectors) { + choices = cur.select(curClusterDescription); + curClusterDescription = new ClusterDescription(clusterDescription.getConnectionMode(), clusterDescription.getType(), choices, + clusterDescription.getClusterSettings(), + clusterDescription.getServerSettings()); + } + + return assertNotNull(choices); + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + CompositeServerSelector that = (CompositeServerSelector) o; + if (serverSelectors.size() != that.serverSelectors.size()) { + return false; + } + return serverSelectors.equals(that.serverSelectors); + } + + @Override + public int hashCode() { + return serverSelectors != null ? serverSelectors.hashCode() : 0; + } + + @Override + public String toString() { + return "CompositeServerSelector{" + + "serverSelectors=" + serverSelectors + + '}'; + } +} diff --git a/driver-core/src/main/com/mongodb/selector/ServerSelector.java b/driver-core/src/main/com/mongodb/selector/ServerSelector.java new file mode 100644 index 00000000000..c4f32f9bd69 --- /dev/null +++ b/driver-core/src/main/com/mongodb/selector/ServerSelector.java @@ -0,0 +1,43 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.selector; + +import com.mongodb.annotations.ThreadSafe; +import com.mongodb.connection.ClusterDescription; +import com.mongodb.connection.ServerDescription; + +import java.util.List; + +/** + *

An interface for selecting a server from a cluster according to some preference.

+ * + *

Implementations of this interface should ensure that their equals and hashCode methods compare equal preferences as equal, as users of + * this interface may rely on that behavior to efficiently consolidate handling of multiple requests waiting on a server that can satisfy + * the preference.

+ * + * @since 3.0 + */ +@ThreadSafe +public interface ServerSelector { + /** + * Select a list of server descriptions from the given cluster description according to some criteria. + * + * @param clusterDescription the cluster of servers to select from + * @return a non-null list of ServerDescriptions that meet the requirements of this ServerSelector. This may be empty. + */ + List select(ClusterDescription clusterDescription); +} diff --git a/driver-core/src/main/com/mongodb/selector/package-info.java b/driver-core/src/main/com/mongodb/selector/package-info.java new file mode 100644 index 00000000000..84c24584975 --- /dev/null +++ b/driver-core/src/main/com/mongodb/selector/package-info.java @@ -0,0 +1,23 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * Contains classes that determine how to select the server to connect to in order to send commands or queries. + */ +@NonNullApi +package com.mongodb.selector; + +import com.mongodb.lang.NonNullApi; diff --git a/driver-core/src/main/com/mongodb/session/ClientSession.java b/driver-core/src/main/com/mongodb/session/ClientSession.java new file mode 100644 index 00000000000..072e6d90905 --- /dev/null +++ b/driver-core/src/main/com/mongodb/session/ClientSession.java @@ -0,0 +1,186 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.session; + +import com.mongodb.ClientSessionOptions; +import com.mongodb.ServerAddress; +import com.mongodb.annotations.NotThreadSafe; +import com.mongodb.internal.TimeoutContext; +import com.mongodb.lang.Nullable; +import org.bson.BsonDocument; +import org.bson.BsonTimestamp; + +import java.io.Closeable; + +/** + * A client session. + * + * @mongodb.server.release 3.6 + * @since 3.6 + * @see ClientSessionOptions + */ +@NotThreadSafe +public interface ClientSession extends Closeable { + + /** + * Get the server address of the pinned mongos on this session. + * For internal use only. + * + * @return the server address of the pinned mongos + * @mongodb.server.release 4.2 + * @since 3.11 + */ + @Nullable + ServerAddress getPinnedServerAddress(); + + /** + * Gets the transaction context. + * + *

For internal use only

+ * + * @return the transaction context + */ + @Nullable + Object getTransactionContext(); + + /** + * Sets the transaction context. + * + *

For internal use only

+ *

Implementations may place additional restrictions on the type of the transaction context

+ * + * @param address the server address + * @param transactionContext the transaction context + */ + void setTransactionContext(ServerAddress address, Object transactionContext); + + /** + * Clears the transaction context. + * + *

For internal use only

+ * + */ + void clearTransactionContext(); + + /** + * Get the recovery token from the latest outcome in a sharded transaction. + * For internal use only. + * + * @return the recovery token + * @mongodb.server.release 4.2 + * @since 3.11 + */ + @Nullable + BsonDocument getRecoveryToken(); + + /** + * Set the recovery token. + * For internal use only. + * + * @param recoveryToken the recovery token + * @mongodb.server.release 4.2 + * @since 3.11 + */ + void setRecoveryToken(BsonDocument recoveryToken); + + /** + * Get the options for this session. + * + * @return the options, which may not be null + */ + ClientSessionOptions getOptions(); + + /** + * Returns true if operations in this session must be causally consistent + * + * @return whether operations in this session must be causally consistent. + */ + boolean isCausallyConsistent(); + + /** + * Gets the originator for the session. + * + *

+ * Important because sessions must only be used by their own originator. + *

+ * + * @return the sessions originator + */ + Object getOriginator(); + + /** + * + * @return the server session + */ + ServerSession getServerSession(); + + /** + * Gets the operation time of the last operation executed in this session. + * + * @return the operation time + */ + BsonTimestamp getOperationTime(); + + /** + * Set the operation time of the last operation executed in this session. + * + * @param operationTime the operation time + */ + void advanceOperationTime(@Nullable BsonTimestamp operationTime); + + /** + * @param clusterTime the cluster time to advance to + */ + void advanceClusterTime(@Nullable BsonDocument clusterTime); + + /** + * For internal use only. + * + * @param snapshotTimestamp the snapshot timestamp + */ + void setSnapshotTimestamp(@Nullable BsonTimestamp snapshotTimestamp); + + /** + * For internal use only. + * + * @return the snapshot timestamp + */ + @Nullable + BsonTimestamp getSnapshotTimestamp(); + + /** + * @return the latest cluster time seen by this session + */ + BsonDocument getClusterTime(); + + @Override + void close(); + + /** + * Gets the timeout context to use with this session: + * + *
    + *
  • {@code MongoClientSettings#getTimeoutMS}
  • + *
  • {@code ClientSessionOptions#getDefaultTimeout}
  • + *
+ *

For internal use only

+ * @return the timeout to use + * @since 5.2 + */ + @Nullable + TimeoutContext getTimeoutContext(); +} diff --git a/driver-core/src/main/com/mongodb/session/ServerSession.java b/driver-core/src/main/com/mongodb/session/ServerSession.java new file mode 100644 index 00000000000..295510b23da --- /dev/null +++ b/driver-core/src/main/com/mongodb/session/ServerSession.java @@ -0,0 +1,72 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.session; + +import org.bson.BsonDocument; + +/** + * A MongoDB server session. + * + * @mongodb.server.release 3.6 + * @since 3.6 + */ +public interface ServerSession { + + /** + * @return the server session identifier + */ + BsonDocument getIdentifier(); + + /** + * Gets the current transaction number. + * + * @return the current transaction number + * @since 3.8 + */ + long getTransactionNumber(); + + /** + * Return the next available transaction number. + * + * @return the next transaction number + */ + long advanceTransactionNumber(); + + /** + * Whether the server session is closed. + * + * @return true if the session has been closed + */ + boolean isClosed(); + + /** + * Mark the server session as dirty. + *

+ * A server session is marked dirty when a command fails with a network + * error. Dirty sessions are later discarded from the server session pool. + * @since 3.12 + */ + void markDirty(); + + /** + * Whether the server session is marked dirty. + * + * @return true if the session has been marked dirty + * @since 3.12 + */ + boolean isMarkedDirty(); +} diff --git a/driver-core/src/main/com/mongodb/session/package-info.java b/driver-core/src/main/com/mongodb/session/package-info.java new file mode 100644 index 00000000000..2edec257565 --- /dev/null +++ b/driver-core/src/main/com/mongodb/session/package-info.java @@ -0,0 +1,23 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * Contains classes related to sessions + */ +@NonNullApi +package com.mongodb.session; + +import com.mongodb.lang.NonNullApi; diff --git a/driver-core/src/main/com/mongodb/spi/dns/DnsClient.java b/driver-core/src/main/com/mongodb/spi/dns/DnsClient.java new file mode 100644 index 00000000000..482d48ec0a2 --- /dev/null +++ b/driver-core/src/main/com/mongodb/spi/dns/DnsClient.java @@ -0,0 +1,48 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.spi.dns; + +import com.mongodb.MongoClientSettings; +import com.mongodb.annotations.ThreadSafe; + +import java.util.List; + + +/** + * An interface describing a DNS client. + * + * @since 4.6 + * @see DnsClientProvider + * @see MongoClientSettings.Builder#dnsClient(DnsClient) + */ +@ThreadSafe +public interface DnsClient { + /** + * Gets the resource record values for the given name and type. + * + *

+ * Implementations should throw {@link DnsWithResponseCodeException} if the DNS response code is known. Otherwise, the more generic + * {@link DnsException} should be thrown. + *

+ * + * @param name the name of the resource to look up + * @param type the resource record type, typically either {@code "SRV"} or {@code "TXT"}. + * @return the list of values for the requested resource, or the empty list if none exist + * @throws DnsException the exception + */ + List getResourceRecordData(String name, String type) throws DnsException; +} diff --git a/driver-core/src/main/com/mongodb/spi/dns/DnsClientProvider.java b/driver-core/src/main/com/mongodb/spi/dns/DnsClientProvider.java new file mode 100644 index 00000000000..85581ee8eed --- /dev/null +++ b/driver-core/src/main/com/mongodb/spi/dns/DnsClientProvider.java @@ -0,0 +1,42 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.spi.dns; + +/** + * Service-provider class for {@link DnsClient}. + * + *

A resolver provider is a factory for custom implementations of + * {@linkplain DnsClient a DNS client}. A DNS client defines operations for + * looking up DNS records for a given type. + * + *

The driver discovers implementations of this interface via {@link java.util.ServiceLoader}. + * + *

If more fine-grained control is required for multi-tenant applications, an + * {@linkplain DnsClient a DNS client} can be configured via + * {@link com.mongodb.MongoClientSettings.Builder#dnsClient(DnsClient)}. + * + * @since 4.6 + * @see java.util.ServiceLoader +*/ +public interface DnsClientProvider { + /** + * Construct a new instance of a {@link DnsClient}. + * + * @return a {@link DnsClient} + */ + DnsClient create(); +} diff --git a/driver-core/src/main/com/mongodb/spi/dns/DnsException.java b/driver-core/src/main/com/mongodb/spi/dns/DnsException.java new file mode 100644 index 00000000000..4bd43407dc9 --- /dev/null +++ b/driver-core/src/main/com/mongodb/spi/dns/DnsException.java @@ -0,0 +1,37 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.spi.dns; + +/** + * An exception indicating a DNS error; + * + * @since 4.6 + * @see DnsClient#getResourceRecordData(String, String) + */ +public class DnsException extends RuntimeException { + private static final long serialVersionUID = 1; + + /** + * Construct an instance + * + * @param message the message + * @param cause the cause + */ + public DnsException(final String message, final Throwable cause) { + super(message, cause); + } +} diff --git a/driver-core/src/main/com/mongodb/spi/dns/DnsWithResponseCodeException.java b/driver-core/src/main/com/mongodb/spi/dns/DnsWithResponseCodeException.java new file mode 100644 index 00000000000..cb3b8446b80 --- /dev/null +++ b/driver-core/src/main/com/mongodb/spi/dns/DnsWithResponseCodeException.java @@ -0,0 +1,50 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.spi.dns; + +/** + * An exception indicating a DNS error that includes a response code. + * + * @since 4.6 + * @serial exclude + */ +public class DnsWithResponseCodeException extends DnsException { + private static final long serialVersionUID = 1; + + private final int responseCode; + + /** + * Construct an instance + * + * @param message the message + * @param responseCode the DNS response code + * @param cause the cause + */ + public DnsWithResponseCodeException(final String message, final int responseCode, final Throwable cause) { + super(message, cause); + this.responseCode = responseCode; + } + + /** + * Gets the response code + * + * @return the response code + */ + public int getResponseCode() { + return responseCode; + } +} diff --git a/driver-core/src/main/com/mongodb/spi/dns/InetAddressResolver.java b/driver-core/src/main/com/mongodb/spi/dns/InetAddressResolver.java new file mode 100644 index 00000000000..c07dfabae79 --- /dev/null +++ b/driver-core/src/main/com/mongodb/spi/dns/InetAddressResolver.java @@ -0,0 +1,51 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.spi.dns; + +import com.mongodb.MongoClientSettings; +import com.mongodb.annotations.ThreadSafe; + +import java.net.InetAddress; +import java.net.UnknownHostException; +import java.util.List; + +/** + * This interface defines operations for looking up host names. + * + *

The default resolver for the driver can be customized by deploying an implementation of {@link InetAddressResolverProvider}.

+ * + * @see InetAddressResolverProvider + * @see MongoClientSettings.Builder#inetAddressResolver(InetAddressResolver) + * @since 4.10 + */ +@ThreadSafe +public interface InetAddressResolver { + /** + * Given the name of a host, returns a list of IP addresses of the requested + * address family associated with a provided hostname. + * + *

The host name can be an IP literal, as with {@link InetAddress#getAllByName(String)}

+ * + *

Implementations are encouraged to implement their own caching policies, as there is + * no guarantee that the caller will implement a cache. + * + * @param host the host + * @return a list of IP addresses for the requested host + * @throws UnknownHostException if no IP addresses for the {@code host} could be found + */ + List lookupByName(String host) throws UnknownHostException; +} diff --git a/driver-core/src/main/com/mongodb/spi/dns/InetAddressResolverProvider.java b/driver-core/src/main/com/mongodb/spi/dns/InetAddressResolverProvider.java new file mode 100644 index 00000000000..0bbb92162a3 --- /dev/null +++ b/driver-core/src/main/com/mongodb/spi/dns/InetAddressResolverProvider.java @@ -0,0 +1,42 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.spi.dns; + +/** + * Service-provider class for {@link InetAddressResolver}. + * + *

A resolver provider is a factory for custom implementations of {@linkplain + * InetAddressResolver InetAddress resolvers}. A resolver defines operations for + * looking up (resolving) host names. + * + *

The driver discovers implementations of this interface via {@link java.util.ServiceLoader}. + * + *

If more fine-grained control is required for multi-tenant applications, an {@linkplain InetAddressResolver InetAddress resolver} + * can be configured via {@link com.mongodb.MongoClientSettings.Builder#inetAddressResolver(InetAddressResolver)}. + * + * @since 4.10 + * @see java.util.ServiceLoader + */ + +public interface InetAddressResolverProvider { + /** + * Construct a new instance of a {@link InetAddressResolver}. + * + * @return a {@link InetAddressResolver} + */ + InetAddressResolver create(); +} diff --git a/driver-core/src/main/com/mongodb/spi/dns/package-info.java b/driver-core/src/main/com/mongodb/spi/dns/package-info.java new file mode 100644 index 00000000000..5b173f43309 --- /dev/null +++ b/driver-core/src/main/com/mongodb/spi/dns/package-info.java @@ -0,0 +1,30 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Original Work: MIT License, Copyright (c) [2015-2020] all contributors + * https://github.com/marianobarrios/tls-channel + */ + +/** + * This package defines the Service Provider Interface (SPI) for a DNS provider. By default the driver will use the + * JNDI support from com.sun.jndi.dns.DnsContextFactory, but this can be replaced using the JDK's ServiceLoader capabilities. + * + * @see java.util.ServiceLoader + * @since 4.6 + */ +@NonNullApi +package com.mongodb.spi.dns; + +import com.mongodb.lang.NonNullApi; diff --git a/driver-core/src/main/resources/META-INF/native-image/native-image.properties b/driver-core/src/main/resources/META-INF/native-image/native-image.properties new file mode 100644 index 00000000000..6de9c4d8765 --- /dev/null +++ b/driver-core/src/main/resources/META-INF/native-image/native-image.properties @@ -0,0 +1,22 @@ +# +# Copyright 2008-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +Args =\ + --initialize-at-run-time=\ + com.mongodb.UnixServerAddress,\ + com.mongodb.internal.connection.SnappyCompressor,\ + com.mongodb.internal.connection.ClientMetadata,\ + com.mongodb.internal.connection.ServerAddressHelper,\ + com.mongodb.internal.dns.DefaultDnsResolver diff --git a/driver-core/src/main/resources/META-INF/native-image/reflect-config.json b/driver-core/src/main/resources/META-INF/native-image/reflect-config.json new file mode 100644 index 00000000000..db705783693 --- /dev/null +++ b/driver-core/src/main/resources/META-INF/native-image/reflect-config.json @@ -0,0 +1,83 @@ +[ +{ + "name":"com.mongodb.BasicDBObject", + "methods":[{"name":"","parameterTypes":[] }] +}, +{ + "name":"com.mongodb.MongoNamespace", + "allDeclaredFields":true, + "queryAllDeclaredMethods":true, + "queryAllDeclaredConstructors":true +}, +{ + "name":"com.mongodb.WriteConcern", + "allPublicFields":true +}, +{ + "name":"com.mongodb.client.model.changestream.ChangeStreamDocument", + "allDeclaredFields":true, + "queryAllDeclaredMethods":true, + "queryAllDeclaredConstructors":true, + "methods":[{"name":"","parameterTypes":["java.lang.String","org.bson.BsonDocument","org.bson.BsonDocument","java.lang.String","org.bson.BsonDocument","java.lang.Object","java.lang.Object","org.bson.BsonDocument","org.bson.BsonTimestamp","com.mongodb.client.model.changestream.UpdateDescription","org.bson.BsonInt64","org.bson.BsonDocument","org.bson.BsonDateTime","com.mongodb.client.model.changestream.SplitEvent","org.bson.BsonDocument"] }] +}, +{ + "name":"com.mongodb.client.model.changestream.SplitEvent", + "allDeclaredFields":true, + "queryAllDeclaredMethods":true, + "queryAllDeclaredConstructors":true +}, +{ + "name":"com.mongodb.client.model.changestream.TruncatedArray", + "allDeclaredFields":true, + "queryAllDeclaredMethods":true, + "queryAllDeclaredConstructors":true +}, +{ + "name":"com.mongodb.client.model.changestream.UpdateDescription", + "allDeclaredFields":true, + "queryAllDeclaredMethods":true, + "queryAllDeclaredConstructors":true, + "methods":[{"name":"","parameterTypes":["java.util.List","org.bson.BsonDocument","java.util.List","org.bson.BsonDocument"] }] +}, +{ + "name":"java.lang.Record" +}, +{ + "name":"java.lang.Thread", + "fields":[{"name":"threadLocalRandomProbe"}] +}, +{ + "name":"java.net.Socket", + "methods":[{"name":"setOption","parameterTypes":["java.net.SocketOption","java.lang.Object"] }] +}, +{ + "name":"java.security.SecureRandomParameters" +}, +{ + "name":"java.util.concurrent.ForkJoinTask", + "fields":[{"name":"aux"}, {"name":"status"}] +}, +{ + "name":"java.util.concurrent.atomic.Striped64", + "fields":[{"name":"base"}, {"name":"cellsBusy"}] +}, +{ + "name":"jdk.internal.misc.Unsafe" +}, +{ + "name":"jdk.net.ExtendedSocketOptions", + "fields":[{"name":"TCP_KEEPCOUNT"}, {"name":"TCP_KEEPIDLE"}, {"name":"TCP_KEEPINTERVAL"}] +}, +{ + "name":"org.bson.codecs.kotlin.DataClassCodecProvider" +}, +{ + "name":"org.bson.codecs.kotlinx.KotlinSerializerCodecProvider" +}, +{ + "name":"org.bson.codecs.record.RecordCodecProvider" +}, +{ + "name":"org.slf4j.Logger" +} +] diff --git a/driver-core/src/main/resources/META-INF/native-image/resource-config.json b/driver-core/src/main/resources/META-INF/native-image/resource-config.json new file mode 100644 index 00000000000..43d3d5bb969 --- /dev/null +++ b/driver-core/src/main/resources/META-INF/native-image/resource-config.json @@ -0,0 +1,9 @@ +{ + "resources":{ + "includes":[{ + "pattern":"\\QMETA-INF/services/com.mongodb.spi.dns.DnsClientProvider\\E" + }, { + "pattern":"\\QMETA-INF/services/com.mongodb.spi.dns.InetAddressResolverProvider\\E" + }]}, + "bundles":[] +} diff --git a/driver-core/src/test/functional/com/mongodb/ClusterFixture.java b/driver-core/src/test/functional/com/mongodb/ClusterFixture.java new file mode 100644 index 00000000000..d3518436ddb --- /dev/null +++ b/driver-core/src/test/functional/com/mongodb/ClusterFixture.java @@ -0,0 +1,845 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb; + +import com.mongodb.async.FutureResultCallback; +import com.mongodb.connection.ClusterConnectionMode; +import com.mongodb.connection.ClusterDescription; +import com.mongodb.connection.ClusterSettings; +import com.mongodb.connection.ClusterType; +import com.mongodb.connection.ConnectionPoolSettings; +import com.mongodb.connection.NettyTransportSettings; +import com.mongodb.connection.ServerDescription; +import com.mongodb.connection.ServerSettings; +import com.mongodb.connection.ServerVersion; +import com.mongodb.connection.SocketSettings; +import com.mongodb.connection.SslSettings; +import com.mongodb.connection.TransportSettings; +import com.mongodb.internal.IgnorableRequestContext; +import com.mongodb.internal.TimeoutContext; +import com.mongodb.internal.TimeoutSettings; +import com.mongodb.internal.async.AsyncBatchCursor; +import com.mongodb.internal.async.SingleResultCallback; +import com.mongodb.internal.binding.AsyncClusterBinding; +import com.mongodb.internal.binding.AsyncConnectionSource; +import com.mongodb.internal.binding.AsyncOperationContextBinding; +import com.mongodb.internal.binding.AsyncReadBinding; +import com.mongodb.internal.binding.AsyncReadWriteBinding; +import com.mongodb.internal.binding.AsyncSessionBinding; +import com.mongodb.internal.binding.AsyncSingleConnectionBinding; +import com.mongodb.internal.binding.AsyncWriteBinding; +import com.mongodb.internal.binding.ClusterBinding; +import com.mongodb.internal.binding.OperationContextBinding; +import com.mongodb.internal.binding.ReadWriteBinding; +import com.mongodb.internal.binding.ReferenceCounted; +import com.mongodb.internal.binding.SessionBinding; +import com.mongodb.internal.binding.SingleConnectionBinding; +import com.mongodb.internal.connection.AsyncConnection; +import com.mongodb.internal.connection.AsynchronousSocketChannelStreamFactory; +import com.mongodb.internal.connection.ClientMetadata; +import com.mongodb.internal.connection.Cluster; +import com.mongodb.internal.connection.DefaultClusterFactory; +import com.mongodb.internal.connection.DefaultInetAddressResolver; +import com.mongodb.internal.connection.InternalConnectionPoolSettings; +import com.mongodb.internal.connection.InternalOperationContextFactory; +import com.mongodb.internal.connection.MongoCredentialWithCache; +import com.mongodb.internal.connection.OperationContext; +import com.mongodb.internal.connection.ReadConcernAwareNoOpSessionContext; +import com.mongodb.internal.connection.SocketStreamFactory; +import com.mongodb.internal.connection.StreamFactory; +import com.mongodb.internal.connection.StreamFactoryFactory; +import com.mongodb.internal.connection.TlsChannelStreamFactoryFactory; +import com.mongodb.internal.connection.netty.NettyStreamFactoryFactory; +import com.mongodb.internal.crypt.capi.CAPI; +import com.mongodb.internal.operation.BatchCursor; +import com.mongodb.internal.operation.CommandReadOperation; +import com.mongodb.internal.operation.DropDatabaseOperation; +import com.mongodb.internal.operation.ReadOperation; +import com.mongodb.internal.operation.WriteOperation; +import com.mongodb.lang.Nullable; +import io.netty.handler.ssl.SslContext; +import io.netty.handler.ssl.SslContextBuilder; +import io.netty.handler.ssl.SslProvider; +import org.bson.BsonDocument; +import org.bson.BsonInt32; +import org.bson.BsonString; +import org.bson.BsonValue; +import org.bson.Document; +import org.bson.codecs.BsonDocumentCodec; +import org.bson.codecs.DocumentCodec; + +import javax.net.ssl.SSLException; +import java.time.Duration; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Optional; + +import static com.mongodb.assertions.Assertions.assertNotNull; +import static com.mongodb.connection.ClusterConnectionMode.LOAD_BALANCED; +import static com.mongodb.connection.ClusterConnectionMode.MULTIPLE; +import static com.mongodb.connection.ClusterType.REPLICA_SET; +import static com.mongodb.connection.ClusterType.SHARDED; +import static com.mongodb.connection.ClusterType.STANDALONE; +import static com.mongodb.connection.ClusterType.UNKNOWN; +import static com.mongodb.internal.connection.ClusterDescriptionHelper.getPrimaries; +import static com.mongodb.internal.connection.ClusterDescriptionHelper.getSecondaries; +import static com.mongodb.internal.thread.InterruptionUtil.interruptAndCreateMongoInterruptedException; +import static java.lang.String.format; +import static java.util.Arrays.asList; +import static java.util.Collections.singletonList; +import static java.util.concurrent.TimeUnit.MILLISECONDS; +import static java.util.concurrent.TimeUnit.NANOSECONDS; +import static java.util.concurrent.TimeUnit.SECONDS; +import static org.hamcrest.CoreMatchers.is; +import static org.junit.Assume.assumeThat; +import static org.junit.Assume.assumeTrue; + +/** + * Helper class for the acceptance tests. Used primarily by DatabaseTestCase and FunctionalSpecification. This fixture allows Test + * super-classes to share functionality whilst minimising duplication. + */ +public final class ClusterFixture { + public static final String DEFAULT_URI = "mongodb://localhost:27017"; + public static final String MONGODB_URI_SYSTEM_PROPERTY_NAME = "org.mongodb.test.uri"; + public static final String MONGODB_API_VERSION = "org.mongodb.test.api.version"; + public static final String MONGODB_MULTI_MONGOS_URI_SYSTEM_PROPERTY_NAME = "org.mongodb.test.multi.mongos.uri"; + public static final String ATLAS_SEARCH_TEST_SYSTEM_PROPERTY_NAME = "org.mongodb.test.atlas.search"; + private static final String MONGODB_OCSP_SHOULD_SUCCEED = "org.mongodb.test.ocsp.tls.should.succeed"; + private static final String DEFAULT_DATABASE_NAME = "JavaDriverTest"; + private static final int COMMAND_NOT_FOUND_ERROR_CODE = 59; + public static final long TIMEOUT = 120L; + public static final Duration TIMEOUT_DURATION = Duration.ofSeconds(TIMEOUT); + public static final ClientMetadata CLIENT_METADATA = new ClientMetadata("test", MongoDriverInformation.builder().build()); + + public static final TimeoutSettings TIMEOUT_SETTINGS = new TimeoutSettings(30_000, 10_000, 0, null, SECONDS.toMillis(5)); + public static final TimeoutSettings TIMEOUT_SETTINGS_WITH_TIMEOUT = TIMEOUT_SETTINGS.withTimeout(TIMEOUT, SECONDS); + public static final TimeoutSettings TIMEOUT_SETTINGS_WITH_INFINITE_TIMEOUT = TIMEOUT_SETTINGS.withTimeout(0L, MILLISECONDS); + public static final TimeoutSettings TIMEOUT_SETTINGS_WITH_MAX_TIME = TIMEOUT_SETTINGS.withMaxTimeMS(100); + public static final TimeoutSettings TIMEOUT_SETTINGS_WITH_MAX_AWAIT_TIME = TIMEOUT_SETTINGS.withMaxAwaitTimeMS(101); + public static final TimeoutSettings TIMEOUT_SETTINGS_WITH_MAX_TIME_AND_AWAIT_TIME = + TIMEOUT_SETTINGS.withMaxTimeAndMaxAwaitTimeMS(101, 1001); + + public static final TimeoutSettings TIMEOUT_SETTINGS_WITH_LEGACY_SETTINGS = + TIMEOUT_SETTINGS.withMaxTimeAndMaxAwaitTimeMS(101, 1001).withMaxCommitMS(999L); + public static final TimeoutSettings TIMEOUT_SETTINGS_WITH_MAX_COMMIT = TIMEOUT_SETTINGS.withMaxCommitMS(999L); + + public static final String LEGACY_HELLO = "isMaster"; + + private static ConnectionString connectionString; + private static Cluster cluster; + private static Cluster asyncCluster; + private static final Map BINDING_MAP = new HashMap<>(); + private static final Map ASYNC_BINDING_MAP = new HashMap<>(); + + private static ServerVersion mongoCryptVersion; + private static ServerVersion serverVersion; + private static BsonDocument serverParameters; + + private static NettyTransportSettings nettyTransportSettings; + + static { + Runtime.getRuntime().addShutdownHook(new ShutdownHook()); + } + + private ClusterFixture() { + } + + public static String getDefaultDatabaseName() { + return DEFAULT_DATABASE_NAME; + } + + public static boolean clusterIsType(final ClusterType clusterType) { + return getClusterDescription(getCluster()).getType() == clusterType; + } + + public static ClusterDescription getClusterDescription(final Cluster cluster) { + try { + ClusterDescription clusterDescription = cluster.getCurrentDescription(); + while (clusterDescription.getType() == UNKNOWN) { + Thread.sleep(10); + clusterDescription = cluster.getCurrentDescription(); + } + return clusterDescription; + } catch (InterruptedException e) { + throw interruptAndCreateMongoInterruptedException("Interrupted", e); + } + } + + public static ServerVersion getMongoCryptVersion() { + if (mongoCryptVersion == null) { + mongoCryptVersion = new ServerVersion(getVersionList(CAPI.mongocrypt_version(null).toString())); + } + return mongoCryptVersion; + } + + public static ServerVersion getServerVersion() { + if (serverVersion == null) { + serverVersion = getVersion(new CommandReadOperation<>("admin", + new BsonDocument("buildInfo", new BsonInt32(1)), new BsonDocumentCodec()) + .execute(new ClusterBinding(getCluster(), ReadPreference.nearest(), ReadConcern.DEFAULT, OPERATION_CONTEXT))); + } + return serverVersion; + } + + public static final OperationContext OPERATION_CONTEXT = new OperationContext( + IgnorableRequestContext.INSTANCE, + new ReadConcernAwareNoOpSessionContext(ReadConcern.DEFAULT), + new TimeoutContext(TIMEOUT_SETTINGS), + getServerApi()); + + public static final InternalOperationContextFactory OPERATION_CONTEXT_FACTORY = + new InternalOperationContextFactory(TIMEOUT_SETTINGS, getServerApi()); + + public static OperationContext createOperationContext(final TimeoutSettings timeoutSettings) { + return new OperationContext( + IgnorableRequestContext.INSTANCE, + new ReadConcernAwareNoOpSessionContext(ReadConcern.DEFAULT), + new TimeoutContext(timeoutSettings), + getServerApi()); + } + + private static ServerVersion getVersion(final BsonDocument buildInfoResult) { + List versionArray = buildInfoResult.getArray("versionArray").subList(0, 3); + + return new ServerVersion(asList(versionArray.get(0).asInt32().getValue(), + versionArray.get(1).asInt32().getValue(), + versionArray.get(2).asInt32().getValue())); + } + + public static boolean serverVersionAtLeast(final int majorVersion, final int minorVersion) { + return getServerVersion().compareTo(new ServerVersion(asList(majorVersion, minorVersion, 0))) >= 0; + } + + public static boolean serverVersionLessThan(final int majorVersion, final int minorVersion) { + return getServerVersion().compareTo(new ServerVersion(asList(majorVersion, minorVersion, 0))) < 0; + } + + public static List getVersionList(final String versionString) { + List versionList = new ArrayList<>(); + for (String s : versionString.split("\\.")) { + versionList.add(Integer.valueOf(s)); + } + while (versionList.size() < 3) { + versionList.add(0); + } + return versionList; + } + + public static boolean hasEncryptionTestsEnabled() { + List requiredSystemProperties = asList("AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY", "AZURE_TENANT_ID", "AZURE_CLIENT_ID", + "AZURE_CLIENT_SECRET", "GCP_EMAIL", "GCP_PRIVATE_KEY", "AWS_TEMP_ACCESS_KEY_ID", "AWS_TEMP_SECRET_ACCESS_KEY", + "AWS_TEMP_SESSION_TOKEN"); + return requiredSystemProperties.stream() + .map(name -> getEnv(name, "")) + .filter(s -> !s.isEmpty()) + .count() == requiredSystemProperties.size(); + } + + public static Document getServerStatus() { + return new CommandReadOperation<>("admin", new BsonDocument("serverStatus", new BsonInt32(1)), + new DocumentCodec()) + .execute(getBinding()); + } + + public static boolean supportsFsync() { + Document serverStatus = getServerStatus(); + Document storageEngine = (Document) serverStatus.get("storageEngine"); + + return storageEngine != null && !storageEngine.get("name").equals("inMemory"); + } + + static class ShutdownHook extends Thread { + @Override + public void run() { + if (cluster != null) { + try { + new DropDatabaseOperation(getDefaultDatabaseName(), WriteConcern.ACKNOWLEDGED).execute(getBinding()); + } catch (MongoCommandException e) { + // if we do not have permission to drop the database, assume it is cleaned up in some other way + if (!e.getMessage().contains("Command dropDatabase requires authentication")) { + throw e; + } + } + cluster.close(); + } + } + } + + public static String getEnv(final String name, final String defaultValue) { + String value = getEnv(name); + return value == null ? defaultValue : value; + } + + public static Optional cryptSharedLibPathSysPropValue() { + String value = getEnv("CRYPT_SHARED_LIB_PATH", ""); + return value.isEmpty() ? Optional.empty() : Optional.of(value); + } + + @Nullable + public static String getEnv(final String name) { + return System.getenv(name); + } + + public static boolean getOcspShouldSucceed() { + return Integer.parseInt(System.getProperty(MONGODB_OCSP_SHOULD_SUCCEED)) == 1; + } + + @Nullable + public static synchronized ConnectionString getMultiMongosConnectionString() { + return getConnectionStringFromSystemProperty(MONGODB_MULTI_MONGOS_URI_SYSTEM_PROPERTY_NAME); + } + + public static synchronized ConnectionString getConnectionString() { + if (connectionString != null) { + return connectionString; + } + + ConnectionString mongoURIProperty = getConnectionStringFromSystemProperty(MONGODB_URI_SYSTEM_PROPERTY_NAME); + if (mongoURIProperty != null) { + return mongoURIProperty; + } + + // Figure out what the connection string should be + Cluster cluster = createCluster(new ConnectionString(DEFAULT_URI), + new SocketStreamFactory(new DefaultInetAddressResolver(), SocketSettings.builder().build(), SslSettings.builder().build())); + try { + BsonDocument helloResult = new CommandReadOperation<>("admin", + new BsonDocument(LEGACY_HELLO, new BsonInt32(1)), new BsonDocumentCodec()) + .execute(new ClusterBinding(cluster, ReadPreference.nearest(), ReadConcern.DEFAULT, OPERATION_CONTEXT)); + if (helloResult.containsKey("setName")) { + connectionString = new ConnectionString(DEFAULT_URI + "/?replicaSet=" + + helloResult.getString("setName").getValue()); + } else { + connectionString = new ConnectionString(DEFAULT_URI); + ClusterFixture.cluster = cluster; + } + + return connectionString; + } finally { + if (ClusterFixture.cluster == null) { + cluster.close(); + } + } + } + + public static ClusterConnectionMode getClusterConnectionMode() { + return getCluster().getCurrentDescription().getConnectionMode(); + } + + @Nullable + public static ServerApi getServerApi() { + if (System.getProperty(MONGODB_API_VERSION) == null) { + return null; + } else { + return ServerApi.builder().version(ServerApiVersion.findByValue(System.getProperty(MONGODB_API_VERSION))).build(); + } + } + + public static String getConnectionStringSystemPropertyOrDefault() { + return System.getProperty(MONGODB_URI_SYSTEM_PROPERTY_NAME, DEFAULT_URI); + } + + @Nullable + private static ConnectionString getConnectionStringFromSystemProperty(final String property) { + String mongoURIProperty = System.getProperty(property); + if (mongoURIProperty != null && !mongoURIProperty.isEmpty()) { + return new ConnectionString(mongoURIProperty); + } + return null; + } + + public static ReadWriteBinding getBinding() { + return getBinding(getCluster()); + } + + public static ReadWriteBinding getBinding(final Cluster cluster) { + return new ClusterBinding(cluster, ReadPreference.primary(), ReadConcern.DEFAULT, OPERATION_CONTEXT); + } + + public static ReadWriteBinding getBinding(final TimeoutSettings timeoutSettings) { + return getBinding(getCluster(), ReadPreference.primary(), createNewOperationContext(timeoutSettings)); + } + + public static ReadWriteBinding getBinding(final OperationContext operationContext) { + return getBinding(getCluster(), ReadPreference.primary(), operationContext); + } + + public static ReadWriteBinding getBinding(final ReadPreference readPreference) { + return getBinding(getCluster(), readPreference, OPERATION_CONTEXT); + } + + public static OperationContext createNewOperationContext(final TimeoutSettings timeoutSettings) { + return OPERATION_CONTEXT.withTimeoutContext(new TimeoutContext(timeoutSettings)); + } + + private static ReadWriteBinding getBinding(final Cluster cluster, + final ReadPreference readPreference, + final OperationContext operationContext) { + if (!BINDING_MAP.containsKey(readPreference)) { + ReadWriteBinding binding = new SessionBinding(new ClusterBinding(cluster, readPreference, ReadConcern.DEFAULT, + operationContext)); + BINDING_MAP.put(readPreference, binding); + } + ReadWriteBinding readWriteBinding = BINDING_MAP.get(readPreference); + return new OperationContextBinding(readWriteBinding, + operationContext.withSessionContext(readWriteBinding.getOperationContext().getSessionContext())); + } + + public static SingleConnectionBinding getSingleConnectionBinding() { + return new SingleConnectionBinding(getCluster(), ReadPreference.primary(), OPERATION_CONTEXT); + } + + public static AsyncSingleConnectionBinding getAsyncSingleConnectionBinding() { + return getAsyncSingleConnectionBinding(getAsyncCluster()); + } + + public static AsyncSingleConnectionBinding getAsyncSingleConnectionBinding(final Cluster cluster) { + return new AsyncSingleConnectionBinding(cluster, ReadPreference.primary(), OPERATION_CONTEXT); + } + + public static AsyncReadWriteBinding getAsyncBinding(final Cluster cluster) { + return new AsyncClusterBinding(cluster, ReadPreference.primary(), ReadConcern.DEFAULT, OPERATION_CONTEXT); + } + + public static AsyncReadWriteBinding getAsyncBinding() { + return getAsyncBinding(getAsyncCluster(), ReadPreference.primary(), OPERATION_CONTEXT); + } + + public static AsyncReadWriteBinding getAsyncBinding(final TimeoutSettings timeoutSettings) { + return getAsyncBinding(createNewOperationContext(timeoutSettings)); + } + + public static AsyncReadWriteBinding getAsyncBinding(final OperationContext operationContext) { + return getAsyncBinding(getAsyncCluster(), ReadPreference.primary(), operationContext); + } + + public static AsyncReadWriteBinding getAsyncBinding(final ReadPreference readPreference) { + return getAsyncBinding(getAsyncCluster(), readPreference, OPERATION_CONTEXT); + } + + public static AsyncReadWriteBinding getAsyncBinding( + final Cluster cluster, + final ReadPreference readPreference, + final OperationContext operationContext) { + if (!ASYNC_BINDING_MAP.containsKey(readPreference)) { + AsyncReadWriteBinding binding = new AsyncSessionBinding(new AsyncClusterBinding(cluster, readPreference, ReadConcern.DEFAULT, + operationContext)); + ASYNC_BINDING_MAP.put(readPreference, binding); + } + AsyncReadWriteBinding readWriteBinding = ASYNC_BINDING_MAP.get(readPreference); + return new AsyncOperationContextBinding(readWriteBinding, + operationContext.withSessionContext(readWriteBinding.getOperationContext().getSessionContext())); + } + + public static synchronized Cluster getCluster() { + if (cluster == null) { + cluster = createCluster(new SocketStreamFactory(new DefaultInetAddressResolver(), getSocketSettings(), getSslSettings())); + } + return cluster; + } + + public static synchronized Cluster getAsyncCluster() { + if (asyncCluster == null) { + asyncCluster = createCluster(getAsyncStreamFactory()); + } + return asyncCluster; + } + + public static Cluster createCluster(final StreamFactory streamFactory) { + return createCluster(getConnectionString(), streamFactory); + } + + + public static Cluster createCluster(final MongoCredential credential) { + return createCluster(credential, getStreamFactory()); + } + + public static Cluster createAsyncCluster(final MongoCredential credential) { + return createCluster(credential, getAsyncStreamFactory()); + } + + private static Cluster createCluster(final MongoCredential credential, final StreamFactory streamFactory) { + return new DefaultClusterFactory().createCluster(ClusterSettings.builder().hosts(asList(getPrimary())).build(), + ServerSettings.builder().build(), + ConnectionPoolSettings.builder().maxSize(1).build(), InternalConnectionPoolSettings.builder().build(), + TIMEOUT_SETTINGS.connectionOnly(), streamFactory, TIMEOUT_SETTINGS.connectionOnly(), streamFactory, credential, + LoggerSettings.builder().build(), null, null, null, Collections.emptyList(), getServerApi(), null); + } + + private static Cluster createCluster(final ConnectionString connectionString, final StreamFactory streamFactory) { + MongoClientSettings mongoClientSettings = MongoClientSettings.builder().applyConnectionString(connectionString).build(); + + return new DefaultClusterFactory().createCluster(mongoClientSettings.getClusterSettings(), + mongoClientSettings.getServerSettings(), mongoClientSettings.getConnectionPoolSettings(), + InternalConnectionPoolSettings.builder().build(), TimeoutSettings.create(mongoClientSettings).connectionOnly(), + streamFactory, TimeoutSettings.createHeartbeatSettings(mongoClientSettings).connectionOnly(), + new SocketStreamFactory(new DefaultInetAddressResolver(), SocketSettings.builder().readTimeout(5, SECONDS).build(), + getSslSettings(connectionString)), + connectionString.getCredential(), + LoggerSettings.builder().build(), null, null, null, + connectionString.getCompressorList(), getServerApi(), null); + } + + public static StreamFactory getStreamFactory() { + return new SocketStreamFactory(new DefaultInetAddressResolver(), SocketSettings.builder().build(), getSslSettings()); + } + + public static StreamFactory getAsyncStreamFactory() { + TransportSettings transportSettings = getOverriddenTransportSettings(); + if (transportSettings == null) { // use NIO2 + if (getSslSettings().isEnabled()) { + return new TlsChannelStreamFactoryFactory(new DefaultInetAddressResolver()).create(getSocketSettings(), getSslSettings()); + } else { + return new AsynchronousSocketChannelStreamFactory(new DefaultInetAddressResolver(), getSocketSettings(), getSslSettings()); + } + } else { + StreamFactoryFactory overriddenStreamFactoryFactory = NettyStreamFactoryFactory.builder() + .applySettings((NettyTransportSettings) transportSettings) + .inetAddressResolver(new DefaultInetAddressResolver()) + .build(); + return assertNotNull(overriddenStreamFactoryFactory).create(getSocketSettings(), getSslSettings()); + } + } + + @Nullable + public static TransportSettings getOverriddenTransportSettings() { + String asyncTransport = System.getProperty("org.mongodb.test.async.transport", "nio2"); + + if (nettyTransportSettings == null && asyncTransport.equals("netty")) { + NettyTransportSettings.Builder builder = TransportSettings.nettyBuilder(); + String sslProvider = System.getProperty("org.mongodb.test.netty.ssl.provider"); + if (sslProvider != null) { + SslContext sslContext; + try { + sslContext = SslContextBuilder.forClient() + .sslProvider(SslProvider.valueOf(sslProvider)) + .build(); + } catch (SSLException e) { + throw new MongoClientException("Unable to create Netty SslContext", e); + } + builder.sslContext(sslContext); + } + nettyTransportSettings = builder.build(); + } + return nettyTransportSettings; + } + + private static SocketSettings getSocketSettings() { + return SocketSettings.builder().applyConnectionString(getConnectionString()).build(); + } + + public static SslSettings getSslSettings() { + return getSslSettings(getConnectionString()); + } + + public static SslSettings getSslSettings(final ConnectionString connectionString) { + return SslSettings.builder().applyConnectionString(connectionString).build(); + } + + public static ServerDescription getPrimaryServerDescription() { + List serverDescriptions = getPrimaries(getClusterDescription(getCluster())); + while (serverDescriptions.isEmpty()) { + sleep(100); + serverDescriptions = getPrimaries(getClusterDescription(getCluster())); + } + return serverDescriptions.get(0); + } + + public static ServerAddress getPrimary() { + return getPrimaryServerDescription().getAddress(); + } + + public static long getPrimaryRTT() { + return MILLISECONDS.convert(getPrimaryServerDescription().getRoundTripTimeNanos(), NANOSECONDS); + } + + public static ServerAddress getSecondary() { + List serverDescriptions = getSecondaries(getClusterDescription(getCluster())); + while (serverDescriptions.isEmpty()) { + sleep(100); + serverDescriptions = getSecondaries(getClusterDescription(getCluster())); + } + return serverDescriptions.get(0).getAddress(); + } + + public static void sleep(final int sleepMS) { + try { + Thread.sleep(sleepMS); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } + } + + @Nullable + public static MongoCredential getCredential() { + return getConnectionString().getCredential(); + } + + public static String getLoginContextName() { + return System.getProperty("org.mongodb.test.gssapi.login.context.name", "com.sun.security.jgss.krb5.initiate"); + } + + @Nullable + public static MongoCredentialWithCache getCredentialWithCache() { + return getConnectionString().getCredential() == null ? null : new MongoCredentialWithCache(getConnectionString().getCredential()); + } + + public static BsonDocument getServerParameters() { + if (serverParameters == null) { + serverParameters = new CommandReadOperation<>("admin", + new BsonDocument("getParameter", new BsonString("*")), new BsonDocumentCodec()) + .execute(getBinding()); + } + return serverParameters; + } + + public static boolean isDiscoverableReplicaSet() { + return clusterIsType(REPLICA_SET) && getClusterConnectionMode() == MULTIPLE; + } + + public static boolean isSharded() { + return clusterIsType(SHARDED); + } + + public static boolean isStandalone() { + return clusterIsType(STANDALONE); + } + + public static boolean isLoadBalanced() { + return getClusterConnectionMode() == LOAD_BALANCED; + } + + public static boolean isAuthenticated() { + return getConnectionString().getCredential() != null; + } + + public static boolean isClientSideEncryptionTest() { + return !getEnv("AWS_ACCESS_KEY_ID", "").isEmpty(); + } + + public static boolean isAtlasSearchTest() { + return System.getProperty(ATLAS_SEARCH_TEST_SYSTEM_PROPERTY_NAME) != null; + } + + public static void enableMaxTimeFailPoint() { + configureFailPoint(BsonDocument.parse("{configureFailPoint: 'maxTimeAlwaysTimeOut', mode: 'alwaysOn'}")); + } + + public static void disableMaxTimeFailPoint() { + disableFailPoint("maxTimeAlwaysTimeOut"); + } + + public static void enableOnPrimaryTransactionalWriteFailPoint(final BsonValue failPointData) { + BsonDocument command = BsonDocument.parse("{ configureFailPoint: 'onPrimaryTransactionalWrite'}"); + + if (failPointData.isDocument() && failPointData.asDocument().containsKey("mode")) { + for (Map.Entry keyValue : failPointData.asDocument().entrySet()) { + command.append(keyValue.getKey(), keyValue.getValue()); + } + } else { + command.append("mode", failPointData); + } + configureFailPoint(command); + } + + public static void disableOnPrimaryTransactionalWriteFailPoint() { + disableFailPoint("onPrimaryTransactionalWrite"); + } + + public static void configureFailPoint(final BsonDocument failPointDocument) { + assumeThat(isSharded(), is(false)); + boolean failsPointsSupported = true; + if (!isSharded()) { + try { + new CommandReadOperation<>("admin", failPointDocument, new BsonDocumentCodec()) + .execute(getBinding()); + } catch (MongoCommandException e) { + if (e.getErrorCode() == COMMAND_NOT_FOUND_ERROR_CODE) { + failsPointsSupported = false; + } + } + assumeTrue("configureFailPoint is not enabled", failsPointsSupported); + } + } + + public static void disableFailPoint(final String failPoint) { + if (!isSharded()) { + BsonDocument failPointDocument = new BsonDocument("configureFailPoint", new BsonString(failPoint)) + .append("mode", new BsonString("off")); + try { + new CommandReadOperation<>("admin", failPointDocument, new BsonDocumentCodec()) + .execute(getBinding()); + } catch (MongoCommandException e) { + // ignore + } + } + } + + @SuppressWarnings("overloads") + public static T executeSync(final WriteOperation op) { + return executeSync(op, getBinding()); + } + + @SuppressWarnings("overloads") + public static T executeSync(final WriteOperation op, final ReadWriteBinding binding) { + return op.execute(binding); + } + + @SuppressWarnings("overloads") + public static T executeSync(final ReadOperation op) { + return executeSync(op, getBinding()); + } + + @SuppressWarnings("overloads") + public static T executeSync(final ReadOperation op, final ReadWriteBinding binding) { + return op.execute(binding); + } + + @SuppressWarnings("overloads") + public static T executeAsync(final WriteOperation op) throws Throwable { + return executeAsync(op, getAsyncBinding()); + } + + @SuppressWarnings("overloads") + public static T executeAsync(final WriteOperation op, final AsyncWriteBinding binding) throws Throwable { + FutureResultCallback futureResultCallback = new FutureResultCallback<>(); + op.executeAsync(binding, futureResultCallback); + return futureResultCallback.get(TIMEOUT, SECONDS); + } + + @SuppressWarnings("overloads") + public static T executeAsync(final ReadOperation op) throws Throwable { + return executeAsync(op, getAsyncBinding()); + } + + @SuppressWarnings("overloads") + public static T executeAsync(final ReadOperation op, final AsyncReadBinding binding) throws Throwable { + FutureResultCallback futureResultCallback = new FutureResultCallback<>(); + op.executeAsync(binding, futureResultCallback); + return futureResultCallback.get(TIMEOUT, SECONDS); + } + + public static void loopCursor(final List> batchCursors, final Block block) throws Throwable { + List> futures = new ArrayList<>(); + for (AsyncBatchCursor batchCursor : batchCursors) { + FutureResultCallback futureResultCallback = new FutureResultCallback<>(); + futures.add(futureResultCallback); + loopCursor(batchCursor, block, futureResultCallback); + } + for (int i = 0; i < batchCursors.size(); i++) { + futures.get(i).get(TIMEOUT, SECONDS); + } + } + + public static void loopCursor(final ReadOperation> op, final Block block) throws Throwable { + FutureResultCallback futureResultCallback = new FutureResultCallback<>(); + loopCursor(executeAsync(op), block, futureResultCallback); + futureResultCallback.get(TIMEOUT, SECONDS); + } + + public static void loopCursor(final AsyncBatchCursor batchCursor, final Block block, + final SingleResultCallback callback) { + if (batchCursor.isClosed()) { + callback.onResult(null, null); + return; + } + batchCursor.next((results, t) -> { + if (t != null || results == null) { + batchCursor.close(); + callback.onResult(null, t); + } else { + try { + for (T result : results) { + block.apply(result); + } + loopCursor(batchCursor, block, callback); + } catch (Throwable tr) { + batchCursor.close(); + callback.onResult(null, tr); + } + } + }); + } + + public static List collectCursorResults(final AsyncBatchCursor batchCursor) throws Throwable { + List results = new ArrayList<>(); + FutureResultCallback futureResultCallback = new FutureResultCallback<>(); + loopCursor(batchCursor, t -> results.add(t), futureResultCallback); + futureResultCallback.get(TIMEOUT, SECONDS); + return results; + } + + public static List collectCursorResults(final BatchCursor batchCursor) { + List results = new ArrayList<>(); + while (batchCursor.hasNext()) { + results.addAll(batchCursor.next()); + } + return results; + } + + public static AsyncConnectionSource getWriteConnectionSource(final AsyncReadWriteBinding binding) throws Throwable { + FutureResultCallback futureResultCallback = new FutureResultCallback<>(); + binding.getWriteConnectionSource(futureResultCallback); + return futureResultCallback.get(TIMEOUT, SECONDS); + } + + public static AsyncConnectionSource getReadConnectionSource(final AsyncReadWriteBinding binding) throws Throwable { + FutureResultCallback futureResultCallback = new FutureResultCallback<>(); + binding.getReadConnectionSource(futureResultCallback); + return futureResultCallback.get(TIMEOUT, SECONDS); + } + + public static AsyncConnection getConnection(final AsyncConnectionSource source) throws Throwable { + FutureResultCallback futureResultCallback = new FutureResultCallback<>(); + source.getConnection(futureResultCallback); + return futureResultCallback.get(TIMEOUT, SECONDS); + } + + public static synchronized void checkReferenceCountReachesTarget(final ReferenceCounted referenceCounted, final int target) { + int count = getReferenceCountAfterTimeout(referenceCounted, target); + if (count != target) { + throw new MongoTimeoutException( + format("Timed out waiting for reference count to drop to %d. Now at %d for %s", target, count, + referenceCounted)); + } + } + + public static int getReferenceCountAfterTimeout(final ReferenceCounted referenceCounted, final int target) { + long startTime = System.currentTimeMillis(); + int count = referenceCounted.getCount(); + while (count > target) { + try { + if (System.currentTimeMillis() > startTime + TIMEOUT_DURATION.toMillis()) { + return count; + } + Thread.sleep(10); + count = referenceCounted.getCount(); + } catch (InterruptedException e) { + throw interruptAndCreateMongoInterruptedException("Interrupted", e); + } + } + return count; + } + + public static ClusterSettings.Builder setDirectConnection(final ClusterSettings.Builder builder) { + return builder.mode(ClusterConnectionMode.SINGLE).hosts(singletonList(getPrimary())); + } + +} diff --git a/driver-core/src/test/functional/com/mongodb/JsonTestServerVersionChecker.java b/driver-core/src/test/functional/com/mongodb/JsonTestServerVersionChecker.java new file mode 100644 index 00000000000..e3db5821b69 --- /dev/null +++ b/driver-core/src/test/functional/com/mongodb/JsonTestServerVersionChecker.java @@ -0,0 +1,147 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb; + +import com.mongodb.connection.ServerVersion; +import org.bson.BsonArray; +import org.bson.BsonDocument; +import org.bson.BsonString; +import org.bson.BsonValue; + +import java.util.List; + +import static com.mongodb.ClusterFixture.getServerVersion; +import static com.mongodb.ClusterFixture.getVersionList; +import static com.mongodb.ClusterFixture.isAuthenticated; +import static com.mongodb.ClusterFixture.isDiscoverableReplicaSet; +import static com.mongodb.ClusterFixture.isLoadBalanced; +import static com.mongodb.ClusterFixture.isSharded; +import static com.mongodb.ClusterFixture.isStandalone; +import static java.lang.String.format; +import static java.util.Arrays.asList; + +public final class JsonTestServerVersionChecker { + private static final List TOPOLOGY_TYPES = asList("sharded", "sharded-replicaset", "replicaset", "single", "load-balanced"); + + public static boolean skipTest(final BsonDocument testDocument, final BsonDocument testDefinition) { + return skipTest(testDocument, testDefinition, getServerVersion()); + } + + public static boolean skipTest(final BsonDocument testDocument, final BsonDocument testDefinition, final ServerVersion serverVersion) { + return !(canRunTest(testDocument, serverVersion) && canRunTest(testDefinition, serverVersion)); + } + + private static boolean canRunTest(final BsonDocument document, final ServerVersion serverVersion) { + if ((!serverlessMatches(document.getString("serverless", new BsonString("allow")).getValue()))) { + return false; + } + + if (document.containsKey("minServerVersion") + && serverVersion.compareTo(getMinServerVersionForField("minServerVersion", document)) < 0) { + return false; + } + if (document.containsKey("maxServerVersion") + && serverVersion.compareTo(getMaxServerVersionForField("maxServerVersion", document)) > 0) { + return false; + } + if (document.containsKey("topology") && !topologyMatches(document.getArray("topology"))) { + return false; + } + if (document.containsKey("topologies") && !topologyMatches(document.getArray("topologies"))) { + return false; + } + if (document.containsKey("authEnabled") && (isAuthenticated() != document.getBoolean("authEnabled").getValue())) { + return false; + } + + if (document.containsKey("runOn")) { + return canRunTest(document.getArray("runOn"), serverVersion); + } + + // Ignore certain matching types + if (document.containsKey("ignore_if_server_version_less_than") + && serverVersion.compareTo(getMinServerVersionForField("ignore_if_server_version_less_than", document)) < 0) { + return false; + } + if (document.containsKey("ignore_if_server_version_greater_than") + && serverVersion.compareTo(getMaxServerVersionForField("ignore_if_server_version_greater_than", document)) > 0) { + return false; + } + if (document.containsKey("ignore_if_topology_type") && topologyMatches(document.getArray("ignore_if_topology_type"))) { + return false; + } + + return true; + } + + private static boolean canRunTest(final BsonArray runOn, final ServerVersion serverVersion) { + return runOn.stream().anyMatch(v -> canRunTest(v.asDocument(), serverVersion)); + } + + public static boolean topologyMatches(final BsonArray topologyTypes) { + for (BsonValue type : topologyTypes) { + String typeString = type.asString().getValue(); + if ((typeString.equals("sharded") || typeString.equals("sharded-replicaset")) && isSharded()) { + return true; + } else if (typeString.equals("replicaset") && isDiscoverableReplicaSet()) { + return true; + } else if (typeString.equals("single") && isStandalone()) { + return true; + } else if (typeString.equals("load-balanced") && isLoadBalanced()) { + return true; + } else if (!TOPOLOGY_TYPES.contains(typeString)) { + throw new IllegalArgumentException(format("Unexpected topology type: '%s'", typeString)); + } + } + return false; + } + + public static boolean serverlessMatches(final String serverlessRequirement) { + switch (serverlessRequirement) { + case "require": + return false; + case "forbid": + case "allow": + return true; + default: + throw new UnsupportedOperationException("Unsupported serverless requirement value: " + serverlessRequirement); + } + } + + public static ServerVersion getMinServerVersionForField(final String fieldName, final BsonDocument document) { + return getMinServerVersion(document.getString(fieldName).getValue()); + } + + public static ServerVersion getMinServerVersion(final String serverVersion) { + return new ServerVersion(getVersionList(serverVersion)); + } + + public static ServerVersion getMaxServerVersionForField(final String fieldName, final BsonDocument document) { + return getMaxServerVersionForField(document.getString(fieldName).getValue()); + } + + public static ServerVersion getMaxServerVersionForField(final String serverVersion) { + List versionList = getVersionList(serverVersion); + if (versionList.size() > 2 && versionList.get(2).equals(0)) { + versionList = asList(versionList.get(0), versionList.get(1), Integer.MAX_VALUE); + } + return new ServerVersion(versionList); + } + + + private JsonTestServerVersionChecker() { + } +} diff --git a/driver-core/src/test/functional/com/mongodb/KerberosSubjectProviderTest.java b/driver-core/src/test/functional/com/mongodb/KerberosSubjectProviderTest.java new file mode 100644 index 00000000000..944226a1be3 --- /dev/null +++ b/driver-core/src/test/functional/com/mongodb/KerberosSubjectProviderTest.java @@ -0,0 +1,42 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb; + +import org.junit.jupiter.api.Test; + +import javax.security.auth.Subject; +import javax.security.auth.login.LoginException; + +import static com.mongodb.AuthenticationMechanism.GSSAPI; +import static com.mongodb.ClusterFixture.getCredential; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertSame; +import static org.junit.jupiter.api.Assumptions.assumeTrue; + +class KerberosSubjectProviderTest { + + @Test + void testThatGetSubjectCachesTheSubject() throws LoginException { + assumeTrue(getCredential() != null && getCredential().getAuthenticationMechanism() == GSSAPI); + + KerberosSubjectProvider provider = new KerberosSubjectProvider(); + Subject firstSubject = provider.getSubject(); + assertNotNull(firstSubject); + Subject nextSubject = provider.getSubject(); + assertSame(firstSubject, nextSubject); + } +} diff --git a/driver-core/src/test/functional/com/mongodb/OperationFunctionalSpecification.groovy b/driver-core/src/test/functional/com/mongodb/OperationFunctionalSpecification.groovy new file mode 100644 index 00000000000..dcefaaa65ba --- /dev/null +++ b/driver-core/src/test/functional/com/mongodb/OperationFunctionalSpecification.groovy @@ -0,0 +1,535 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb + +import com.mongodb.async.FutureResultCallback +import com.mongodb.client.model.Collation +import com.mongodb.client.model.CollationAlternate +import com.mongodb.client.model.CollationCaseFirst +import com.mongodb.client.model.CollationMaxVariable +import com.mongodb.client.model.CollationStrength +import com.mongodb.client.test.CollectionHelper +import com.mongodb.client.test.Worker +import com.mongodb.client.test.WorkerCodec +import com.mongodb.connection.ConnectionDescription +import com.mongodb.connection.ServerConnectionState +import com.mongodb.connection.ServerDescription +import com.mongodb.connection.ServerType +import com.mongodb.connection.ServerVersion +import com.mongodb.internal.binding.AsyncConnectionSource +import com.mongodb.internal.binding.AsyncReadBinding +import com.mongodb.internal.binding.AsyncReadWriteBinding +import com.mongodb.internal.binding.AsyncSessionBinding +import com.mongodb.internal.binding.AsyncSingleConnectionBinding +import com.mongodb.internal.binding.AsyncWriteBinding +import com.mongodb.internal.binding.ConnectionSource +import com.mongodb.internal.binding.ReadBinding +import com.mongodb.internal.binding.ReadWriteBinding +import com.mongodb.internal.binding.SessionBinding +import com.mongodb.internal.binding.SingleConnectionBinding +import com.mongodb.internal.binding.WriteBinding +import com.mongodb.internal.bulk.InsertRequest +import com.mongodb.internal.connection.AsyncConnection +import com.mongodb.internal.connection.Connection +import com.mongodb.internal.connection.ServerHelper +import com.mongodb.internal.connection.SplittablePayload +import com.mongodb.internal.operation.MixedBulkWriteOperation +import com.mongodb.internal.operation.ReadOperation +import com.mongodb.internal.operation.WriteOperation +import com.mongodb.internal.session.SessionContext +import org.bson.BsonDocument +import org.bson.Document +import org.bson.codecs.DocumentCodec +import spock.lang.Shared +import spock.lang.Specification + +import java.util.concurrent.TimeUnit + +import static com.mongodb.ClusterFixture.OPERATION_CONTEXT +import static com.mongodb.ClusterFixture.TIMEOUT +import static com.mongodb.ClusterFixture.checkReferenceCountReachesTarget +import static com.mongodb.ClusterFixture.executeAsync +import static com.mongodb.ClusterFixture.getAsyncBinding +import static com.mongodb.ClusterFixture.getBinding +import static com.mongodb.ClusterFixture.getPrimary +import static com.mongodb.ClusterFixture.loopCursor +import static com.mongodb.WriteConcern.ACKNOWLEDGED +import static com.mongodb.internal.operation.OperationUnitSpecification.getMaxWireVersionForServerVersion + +class OperationFunctionalSpecification extends Specification { + + def setup() { + setupInternal() + } + + protected void setupInternal() { + ServerHelper.checkPool(getPrimary()) + CollectionHelper.drop(getNamespace()) + } + + void cleanup() { + cleanupInternal() + } + + protected void cleanupInternal() { + CollectionHelper.drop(getNamespace()) + checkReferenceCountReachesTarget(getBinding(), 1) + checkReferenceCountReachesTarget(getAsyncBinding(), 1) + ServerHelper.checkPool(getPrimary()) + } + + String getDatabaseName() { + ClusterFixture.getDefaultDatabaseName() + } + + String getCollectionName() { + getClass().getName() + } + + MongoNamespace getNamespace() { + new MongoNamespace(getDatabaseName(), getCollectionName()) + } + + void acknowledgeWrite(final SingleConnectionBinding binding) { + new MixedBulkWriteOperation(getNamespace(), [new InsertRequest(new BsonDocument())], true, + ACKNOWLEDGED, false).execute(binding) + binding.release() + } + + void acknowledgeWrite(final AsyncSingleConnectionBinding binding) { + executeAsync(new MixedBulkWriteOperation(getNamespace(), [new InsertRequest(new BsonDocument())], + true, ACKNOWLEDGED, false), binding) + binding.release() + } + + CollectionHelper getCollectionHelper() { + getCollectionHelper(getNamespace()) + } + + CollectionHelper getCollectionHelper(MongoNamespace namespace) { + new CollectionHelper(new DocumentCodec(), namespace) + } + + CollectionHelper getWorkerCollectionHelper() { + new CollectionHelper(new WorkerCodec(), getNamespace()) + } + + def execute(operation) { + execute(operation, false) + } + + def execute(operation, boolean async) { + def executor = async ? ClusterFixture.&executeAsync : ClusterFixture.&executeSync + executor(operation) + } + + def executeWithSession(operation, boolean async) { + def executor = async ? ClusterFixture.&executeAsync : ClusterFixture.&executeSync + def binding = async ? + new AsyncSessionBinding(getAsyncBinding()) + : new SessionBinding(getBinding()) + executor(operation, binding) + } + + def execute(operation, ReadWriteBinding binding) { + ClusterFixture.executeSync(operation, binding) + } + + def execute(operation, AsyncReadWriteBinding binding) { + ClusterFixture.executeAsync(operation, binding) + } + + def executeAndCollectBatchCursorResults(operation, boolean async) { + def cursor = execute(operation, async) + def results = [] + if (async) { + loopCursor([cursor], new Block(){ + void apply(Object batch) { + results.addAll(batch) + } + }) + } else { + while (cursor.hasNext()) { + results.addAll(cursor.next()) + } + } + results + } + + def next(cursor, boolean async, int minimumCount) { + next(cursor, async, false, minimumCount) + } + + def next(cursor, boolean async, boolean callHasNextBeforeNext, int minimumCount) { + List retVal = [] + + while (retVal.size() < minimumCount) { + retVal.addAll(doNext(cursor, async, callHasNextBeforeNext)) + } + + retVal + } + + def next(cursor, boolean async) { + doNext(cursor, async, false) + } + + def doNext(cursor, boolean async, boolean callHasNextBeforeNext) { + if (async) { + def futureResultCallback = new FutureResultCallback>() + cursor.next(futureResultCallback) + futureResultCallback.get(TIMEOUT, TimeUnit.SECONDS) + } else { + if (callHasNextBeforeNext) { + cursor.hasNext() + } + cursor.next() + } + } + + void testOperation(Map params) { + params.async = params.async != null ? params.async : false + params.result = params.result != null ? params.result : null + params.checkCommand = params.checkCommand != null ? params.checkCommand : true + params.checkSecondaryOk = params.checkSecondaryOk != null ? params.checkSecondaryOk : false + params.readPreference = params.readPreference != null ? params.readPreference : ReadPreference.primary() + params.retryable = params.retryable != null ? params.retryable : false + params.serverType = params.serverType != null ? params.serverType : ServerType.STANDALONE + testOperation(params.operation, params.serverVersion, params.expectedCommand, params.async, params.result, params.checkCommand, + params.checkSecondaryOk, params.readPreference, params.retryable, params.serverType) + } + + void testOperationInTransaction(operation, List serverVersion, BsonDocument expectedCommand, boolean async, result = null, + boolean checkCommand = true, boolean checkSecondaryOk = false, + ReadPreference readPreference = ReadPreference.primary(), boolean retryable = false, + ServerType serverType = ServerType.STANDALONE) { + testOperation(operation, serverVersion, ReadConcern.DEFAULT, expectedCommand, async, result, checkCommand, checkSecondaryOk, + readPreference, retryable, serverType, true) + } + + void testOperation(operation, List serverVersion, BsonDocument expectedCommand, boolean async, result = null, + boolean checkCommand = true, boolean checkSecondaryOk = false, + ReadPreference readPreference = ReadPreference.primary(), boolean retryable = false, + ServerType serverType = ServerType.STANDALONE, Boolean activeTransaction = false) { + testOperation(operation, serverVersion, ReadConcern.DEFAULT, expectedCommand, async, result, checkCommand, checkSecondaryOk, + readPreference, retryable, serverType, activeTransaction) + } + + void testOperation(operation, List serverVersion, ReadConcern readConcern, BsonDocument expectedCommand, boolean async, + result = null, boolean checkCommand = true, boolean checkSecondaryOk = false, + ReadPreference readPreference = ReadPreference.primary(), boolean retryable = false, + ServerType serverType = ServerType.STANDALONE, Boolean activeTransaction = false) { + def test = async ? this.&testAsyncOperation : this.&testSyncOperation + test(operation, serverVersion, readConcern, result, checkCommand, expectedCommand, checkSecondaryOk, readPreference, retryable, + serverType, activeTransaction) + } + + void testOperationRetries(operation, List serverVersion, BsonDocument expectedCommand, boolean async, result = null, + Boolean activeTransaction = false) { + testOperation(operation, serverVersion, expectedCommand, async, result, true, false, ReadPreference.primary(), true, + ServerType.REPLICA_SET_PRIMARY, activeTransaction) + } + + void testRetryableOperationThrowsOriginalError(operation, List> serverVersions, List serverTypes, + Throwable exception, boolean async, int expectedConnectionReleaseCount = 2) { + def test = async ? this.&testAyncRetryableOperationThrows : this.&testSyncRetryableOperationThrows + test(operation, serverVersions as Queue, serverTypes as Queue, exception, expectedConnectionReleaseCount) + } + + void testOperationSecondaryOk(operation, List serverVersion, ReadPreference readPreference, boolean async, result = null) { + def test = async ? this.&testAsyncOperation : this.&testSyncOperation + test(operation, serverVersion, ReadConcern.DEFAULT, result, false, null, true, readPreference) + } + + void testOperationThrows(operation, List serverVersion, boolean async) { + testOperationThrows(operation, serverVersion, ReadConcern.DEFAULT, async) + } + + void testOperationThrows(operation, List serverVersion, ReadConcern readConcern, boolean async) { + def test = async ? this.&testAsyncOperation : this.&testSyncOperation + test(operation, serverVersion, readConcern, null, false, null, false, ReadPreference.primary(), + false, ServerType.STANDALONE, false) + } + + def testSyncOperation(operation, List serverVersion, ReadConcern readConcern, result, Boolean checkCommand=true, + BsonDocument expectedCommand=null, Boolean checkSecondaryOk=false, + ReadPreference readPreference=ReadPreference.primary(), Boolean retryable = false, + ServerType serverType = ServerType.STANDALONE, Boolean activeTransaction = false) { + def operationContext = OPERATION_CONTEXT + .withSessionContext(Stub(SessionContext) { + hasActiveTransaction() >> activeTransaction + getReadConcern() >> readConcern + }) + def connection = Mock(Connection) { + _ * getDescription() >> Stub(ConnectionDescription) { + getMaxWireVersion() >> getMaxWireVersionForServerVersion(serverVersion) + getServerType() >> serverType + } + } + + def connectionSource = Stub(ConnectionSource) { + getConnection() >> { + connection + } + getOperationContext() >> operationContext + getReadPreference() >> readPreference + getServerDescription() >> { + def builder = ServerDescription.builder().address(Stub(ServerAddress)).state(ServerConnectionState.CONNECTED) + if (new ServerVersion(serverVersion).compareTo(new ServerVersion(3, 6)) >= 0) { + builder.logicalSessionTimeoutMinutes(42) + } + builder.build() + } + } + def readBinding = Stub(ReadBinding) { + getReadConnectionSource(*_) >> connectionSource + getReadPreference() >> readPreference + getOperationContext() >> operationContext + } + def writeBinding = Stub(WriteBinding) { + getWriteConnectionSource() >> connectionSource + getOperationContext() >> operationContext + } + + if (retryable) { + 1 * connection.command(*_) >> { throw new MongoSocketException('Some socket error', Stub(ServerAddress)) } + } + + if (checkCommand) { + 1 * connection.command(*_) >> { + assert it[1] == expectedCommand + if (it.size() > 6) { + SplittablePayload payload = it[7] + payload.setPosition(payload.size()) + } + result + } + } else if (checkSecondaryOk) { + 1 * connection.command(*_) >> { + it[4] == readPreference + result + } + } + + 0 * connection.command(*_) >> { + // Unexpected Command + result + } + + if (retryable) { + 2 * connection.release() + } else { + 1 * connection.release() + } + if (operation instanceof ReadOperation) { + operation.execute(readBinding) + } else if (operation instanceof WriteOperation) { + operation.execute(writeBinding) + } + } + + def testAsyncOperation(operation = operation, List serverVersion = serverVersion, ReadConcern readConcern, result = null, + Boolean checkCommand = true, BsonDocument expectedCommand = null, Boolean checkSecondaryOk = false, + ReadPreference readPreference = ReadPreference.primary(), Boolean retryable = false, + ServerType serverType = ServerType.STANDALONE, Boolean activeTransaction = false) { + def operationContext = OPERATION_CONTEXT + .withSessionContext(Stub(SessionContext) { + hasActiveTransaction() >> activeTransaction + getReadConcern() >> readConcern + }) + def connection = Mock(AsyncConnection) { + _ * getDescription() >> Stub(ConnectionDescription) { + getMaxWireVersion() >> getMaxWireVersionForServerVersion(serverVersion) + getServerType() >> serverType + } + } + + def connectionSource = Stub(AsyncConnectionSource) { + getConnection(_) >> { it[0].onResult(connection, null) } + getReadPreference() >> readPreference + getOperationContext() >> operationContext + getServerDescription() >> { + def builder = ServerDescription.builder().address(Stub(ServerAddress)).state(ServerConnectionState.CONNECTED) + if (new ServerVersion(serverVersion).compareTo(new ServerVersion(3, 6)) >= 0) { + builder.logicalSessionTimeoutMinutes(42) + } + builder.build() + } + } + def readBinding = Stub(AsyncReadBinding) { + getReadConnectionSource(*_) >> { it.last().onResult(connectionSource, null) } + getReadPreference() >> readPreference + getOperationContext() >> operationContext + } + def writeBinding = Stub(AsyncWriteBinding) { + getWriteConnectionSource(_) >> { it[0].onResult(connectionSource, null) } + getOperationContext() >> operationContext + } + def callback = new FutureResultCallback() + + if (retryable) { + 1 * connection.commandAsync(*_) >> { + it.last().onResult(null, new MongoSocketException('Some socket error', Stub(ServerAddress))) + } + } + + if (checkCommand) { + 1 * connection.commandAsync(*_) >> { + assert it[1] == expectedCommand + if (it.size() > 7) { + SplittablePayload payload = it[7] + payload.setPosition(payload.size()) + } + it.last().onResult(result, null) + } + } else if (checkSecondaryOk) { + 1 * connection.commandAsync(*_) >> { + it[4] == readPreference + it.last().onResult(result, null) + } + } + + 0 * connection.commandAsync(*_) >> { + // Unexpected Command + it.last().onResult(result, null) + } + + if (retryable) { + 2 * connection.release() + } else { + 1 * connection.release() + } + + if (operation instanceof ReadOperation) { + operation.executeAsync(readBinding, callback) + } else if (operation instanceof WriteOperation) { + operation.executeAsync(writeBinding, callback) + } + try { + callback.get(1000, TimeUnit.MILLISECONDS) + } catch (MongoException e) { + throw e.cause + } + } + + def testSyncRetryableOperationThrows(operation, Queue> serverVersions, Queue serverTypes, + Throwable exception, int expectedConnectionReleaseCount) { + def connection = Mock(Connection) { + _ * getDescription() >> Stub(ConnectionDescription) { + getMaxWireVersion() >> { + getMaxWireVersionForServerVersion(serverVersions.poll()) + } + getServerType() >> { + serverTypes.poll() + } + } + } + + def operationContext = OPERATION_CONTEXT.withSessionContext( + Stub(SessionContext) { + hasSession() >> true + hasActiveTransaction() >> false + getReadConcern() >> ReadConcern.DEFAULT + }) + + def connectionSource = Stub(ConnectionSource) { + getConnection() >> { + if (serverVersions.isEmpty()){ + throw new MongoSocketOpenException('No Server', new ServerAddress(), new Exception('no server')) + } else { + connection + } + } + getOperationContext() >> operationContext + } + def writeBinding = Stub(WriteBinding) { + getWriteConnectionSource() >> connectionSource + getOperationContext() >> operationContext + } + + 1 * connection.command(*_) >> { + throw exception + } + + expectedConnectionReleaseCount * connection.release() + operation.execute(writeBinding) + } + + def testAyncRetryableOperationThrows(operation, Queue> serverVersions, Queue serverTypes, + Throwable exception, int expectedConnectionReleaseCount) { + def connection = Mock(AsyncConnection) { + _ * getDescription() >> Stub(ConnectionDescription) { + getMaxWireVersion() >> { + getMaxWireVersionForServerVersion(serverVersions.poll()) + } + getServerType() >> { + serverTypes.poll() + } + } + } + + def operationContext = OPERATION_CONTEXT.withSessionContext( + Stub(SessionContext) { + hasSession() >> true + hasActiveTransaction() >> false + getReadConcern() >> ReadConcern.DEFAULT + }) + + def connectionSource = Stub(AsyncConnectionSource) { + getConnection(_) >> { + if (serverVersions.isEmpty()) { + it[0].onResult(null, + new MongoSocketOpenException('No Server', new ServerAddress(), new Exception('no server'))) + } else { + it[0].onResult(connection, null) + } + } + getOperationContext() >> operationContext + } + + def writeBinding = Stub(AsyncWriteBinding) { + getWriteConnectionSource(_) >> { it[0].onResult(connectionSource, null) } + getOperationContext() >> operationContext + } + def callback = new FutureResultCallback() + + 1 * connection.commandAsync(*_) >> { it.last().onResult(null, exception) } + expectedConnectionReleaseCount * connection.release() + + operation.executeAsync(writeBinding, callback) + callback.get(1000, TimeUnit.MILLISECONDS) + } + + @Shared + Collation defaultCollation = Collation.builder() + .locale('en') + .caseLevel(true) + .collationCaseFirst(CollationCaseFirst.OFF) + .collationStrength(CollationStrength.IDENTICAL) + .numericOrdering(true) + .collationAlternate(CollationAlternate.SHIFTED) + .collationMaxVariable(CollationMaxVariable.SPACE) + .normalization(true) + .backwards(true) + .build() + + @Shared + Collation caseInsensitiveCollation = Collation.builder() + .locale('en') + .collationStrength(CollationStrength.SECONDARY) + .build() +} diff --git a/driver-core/src/test/functional/com/mongodb/client/CommandMonitoringTestHelper.java b/driver-core/src/test/functional/com/mongodb/client/CommandMonitoringTestHelper.java new file mode 100644 index 00000000000..23be2ccc3ab --- /dev/null +++ b/driver-core/src/test/functional/com/mongodb/client/CommandMonitoringTestHelper.java @@ -0,0 +1,362 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client; + +import com.mongodb.event.CommandEvent; +import com.mongodb.event.CommandFailedEvent; +import com.mongodb.event.CommandStartedEvent; +import com.mongodb.event.CommandSucceededEvent; +import com.mongodb.lang.Nullable; +import org.bson.BsonArray; +import org.bson.BsonBoolean; +import org.bson.BsonDocument; +import org.bson.BsonDocumentWriter; +import org.bson.BsonDouble; +import org.bson.BsonInt32; +import org.bson.BsonInt64; +import org.bson.BsonString; +import org.bson.BsonType; +import org.bson.BsonValue; +import org.bson.codecs.BsonDocumentCodec; +import org.bson.codecs.BsonValueCodecProvider; +import org.bson.codecs.Codec; +import org.bson.codecs.EncoderContext; +import org.bson.codecs.configuration.CodecProvider; +import org.bson.codecs.configuration.CodecRegistries; +import org.bson.codecs.configuration.CodecRegistry; + +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.concurrent.TimeUnit; + +import static com.mongodb.client.CrudTestHelper.replaceTypeAssertionWithActual; +import static java.util.Arrays.asList; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; + +public final class CommandMonitoringTestHelper { + private static final CodecRegistry CODEC_REGISTRY_HACK = CodecRegistries.fromProviders(new BsonValueCodecProvider(), + new CodecProvider() { + @Override + @SuppressWarnings("unchecked") + public Codec get(final Class clazz, final CodecRegistry registry) { + // Use BsonDocumentCodec even for a private sub-class of BsonDocument + if (BsonDocument.class.isAssignableFrom(clazz)) { + return (Codec) new BsonDocumentCodec(registry); + } + return null; + } + }); + + public static List getExpectedEvents(final BsonArray expectedEventDocuments, final String databaseName, + final BsonDocument operation) { + List expectedEvents = new ArrayList<>(expectedEventDocuments.size()); + for (BsonValue expectedEventDocument : expectedEventDocuments) { + BsonDocument curExpectedEventDocument = expectedEventDocument.asDocument(); + String eventType = curExpectedEventDocument.keySet().iterator().next(); + BsonDocument eventDescriptionDocument = curExpectedEventDocument.getDocument(eventType); + CommandEvent commandEvent; + String commandName = eventDescriptionDocument.getString("command_name", new BsonString("")).getValue(); + if (eventType.equals("command_started_event")) { + BsonDocument commandDocument = eventDescriptionDocument.getDocument("command"); + String actualDatabaseName = eventDescriptionDocument.containsKey("database_name") + ? eventDescriptionDocument.getString("database_name").getValue() : databaseName; + // If the spec test supplies a $db field in the command, then use that database. + if (commandDocument.containsKey("$db")) { + actualDatabaseName = commandDocument.getString("$db").getValue(); + } else if (commandName.equals("")) { + commandName = commandDocument.keySet().iterator().next(); + } + + if (isAdminCommand(commandName)) { + actualDatabaseName = "admin"; + } + + // Not clear whether these global fields should be included, but also not clear how to efficiently exclude them + commandDocument.put("$db", new BsonString(actualDatabaseName)); + if (operation != null && operation.containsKey("read_preference")) { + commandDocument.put("$readPreference", operation.getDocument("read_preference")); + } + commandEvent = new CommandStartedEvent(null, 1, 1, null, actualDatabaseName, commandName, + commandDocument); + } else if (eventType.equals("command_succeeded_event")) { + BsonDocument replyDocument = eventDescriptionDocument.get("reply").asDocument(); + commandEvent = new CommandSucceededEvent(null, 1, 1, null, null, commandName, replyDocument, 1); + } else if (eventType.equals("command_failed_event")) { + commandEvent = new CommandFailedEvent(null, 1, 1, null, null, commandName, 1, null); + } else { + throw new UnsupportedOperationException("Unsupported command event type: " + eventType); + } + expectedEvents.add(commandEvent); + } + return expectedEvents; + } + + private static final List ADMIN_COMMANDS = asList("commitTransaction", "abortTransaction", "listDatabases"); + static boolean isAdminCommand(final String commandName) { + return ADMIN_COMMANDS.contains(commandName); + } + + static boolean isWriteCommand(final String commandName) { + return asList("insert", "update", "delete").contains(commandName); + } + + public static void assertEventsEquality(final List expectedEvents, final List events) { + assertEventsEquality(expectedEvents, events, null); + } + + public static void assertEventsEquality(final List expectedEvents, final List events, + @Nullable final Map lsidMap) { + assertEquals(expectedEvents.size(), events.size()); + + for (int i = 0; i < events.size(); i++) { + CommandEvent actual = events.get(i); + CommandEvent expected = expectedEvents.get(i); + + assertEquals(expected.getClass(), actual.getClass()); + assertEquals(expected.getCommandName().toLowerCase(), actual.getCommandName().toLowerCase()); + + if (actual.getClass().equals(CommandStartedEvent.class)) { + CommandStartedEvent expectedCommandStartedEvent = massageExpectedCommandStartedEvent((CommandStartedEvent) expected, + (CommandStartedEvent) actual, lsidMap); + CommandStartedEvent actualCommandStartedEvent = massageActualCommandStartedEvent((CommandStartedEvent) actual, + lsidMap, expectedCommandStartedEvent); + + assertEquals(expectedCommandStartedEvent.getDatabaseName(), actualCommandStartedEvent.getDatabaseName()); + assertEquals(expectedCommandStartedEvent.getCommand(), actualCommandStartedEvent.getCommand()); + if (((CommandStartedEvent) expected).getCommand().containsKey("recoveryToken")) { + if (((CommandStartedEvent) expected).getCommand().get("recoveryToken").isNull()) { + assertFalse(((CommandStartedEvent) actual).getCommand().containsKey("recoveryToken")); + } else { + assertTrue(((CommandStartedEvent) actual).getCommand().containsKey("recoveryToken")); + } + } + + } else if (actual.getClass().equals(CommandSucceededEvent.class)) { + CommandSucceededEvent actualCommandSucceededEvent = massageActualCommandSucceededEvent((CommandSucceededEvent) actual); + CommandSucceededEvent expectedCommandSucceededEvent = massageExpectedCommandSucceededEvent((CommandSucceededEvent) + expected); + + assertEquals(expectedCommandSucceededEvent.getCommandName(), actualCommandSucceededEvent.getCommandName()); + assertTrue(actualCommandSucceededEvent.getElapsedTime(TimeUnit.NANOSECONDS) > 0); + + if (expectedCommandSucceededEvent.getResponse() == null) { + assertNull(actualCommandSucceededEvent.getResponse()); + } else { + assertTrue(String.format("\nExpected: %s\nActual: %s", + expectedCommandSucceededEvent.getResponse(), + actualCommandSucceededEvent.getResponse()), + actualCommandSucceededEvent.getResponse().entrySet() + .containsAll(expectedCommandSucceededEvent.getResponse().entrySet())); + } + } else if (actual.getClass().equals(CommandFailedEvent.class)) { + // nothing else to assert here + } else { + throw new UnsupportedOperationException("Unsupported event type: " + actual.getClass()); + } + } + } + + private static CommandSucceededEvent massageExpectedCommandSucceededEvent(final CommandSucceededEvent expected) { + // massage numbers that are the wrong BSON type + expected.getResponse().put("ok", new BsonDouble(expected.getResponse().getNumber("ok").doubleValue())); + return expected; + } + + private static CommandSucceededEvent massageActualCommandSucceededEvent(final CommandSucceededEvent actual) { + BsonDocument response = getWritableCloneOfCommand(actual.getResponse()); + + // massage numbers that are the wrong BSON type + response.put("ok", new BsonDouble(response.getNumber("ok").doubleValue())); + if (response.containsKey("n")) { + response.put("n", new BsonInt32(response.getNumber("n").intValue())); + } + + if (actual.getCommandName().equals("find") || actual.getCommandName().equals("getMore")) { + if (response.containsKey("cursor")) { + if (response.getDocument("cursor").containsKey("id") + && !response.getDocument("cursor").getInt64("id").equals(new BsonInt64(0))) { + response.getDocument("cursor").put("id", new BsonInt64(42)); + } + } + } else if (actual.getCommandName().equals("killCursors")) { + response.getArray("cursorsUnknown").set(0, new BsonInt64(42)); + } else if (isWriteCommand(actual.getCommandName())) { + if (response.containsKey("writeErrors")) { + for (BsonValue bsonValue : response.getArray("writeErrors")) { + BsonDocument cur = bsonValue.asDocument(); + BsonDocument newWriteErrorDocument = + new BsonDocument().append("index", cur.get("index")) + .append("code", new BsonInt32(42)) + .append("errmsg", new BsonString("")); + cur.clear(); + cur.putAll(newWriteErrorDocument); + } + } + if (actual.getCommandName().equals("update")) { + response.remove("nModified"); + } + } + return new CommandSucceededEvent(actual.getRequestContext(), actual.getOperationId(), actual.getRequestId(), + actual.getConnectionDescription(), actual.getDatabaseName(), actual.getCommandName(), response, + actual.getElapsedTime(TimeUnit.NANOSECONDS)); + } + + private static CommandStartedEvent massageActualCommandStartedEvent(final CommandStartedEvent event, + @Nullable final Map lsidMap, + final CommandStartedEvent expectedCommandStartedEvent) { + BsonDocument actualCommand = getWritableCloneOfCommand(event.getCommand()); + BsonDocument expectedCommand = expectedCommandStartedEvent.getCommand(); + + massageCommand(event, actualCommand); + + if (actualCommand.containsKey("readConcern") && (actualCommand.getDocument("readConcern").containsKey("afterClusterTime"))) { + actualCommand.getDocument("readConcern").put("afterClusterTime", new BsonInt32(42)); + } + if (actualCommand.containsKey("maxTimeMS") && !isExpectedMaxTimeMsLong(expectedCommand)) { + // Some tests expect maxTimeMS to be int32, but Java API requires maxTime to be a long. This massage seems preferable to casting + actualCommand.put("maxTimeMS", new BsonInt32(actualCommand.getNumber("maxTimeMS").intValue())); + } + // Tests do not expect the "ns" field in a result after running createIndex. + if (actualCommand.containsKey("createIndexes") && actualCommand.containsKey("indexes")) { + massageCommandIndexes(actualCommand.getArray("indexes")); + } + massageActualCommand(actualCommand, expectedCommand); + + return new CommandStartedEvent(event.getRequestContext(), event.getOperationId(), event.getRequestId(), + event.getConnectionDescription(), event.getDatabaseName(), event.getCommandName(), actualCommand); + } + + private static boolean isExpectedMaxTimeMsLong(final BsonDocument expectedCommand) { + if (expectedCommand.containsKey("maxTimeMS")) { + return expectedCommand.get("maxTimeMS").getBsonType() == BsonType.INT64; + } + return false; + } + + private static void massageCommandIndexes(final BsonArray indexes) { + for (BsonValue indexDocument : indexes) { + BsonDocument index = indexDocument.asDocument(); + index.remove("ns"); + } + } + + private static void massageActualCommand(final BsonDocument command, final BsonDocument expectedCommand) { + String[] keySet = command.keySet().toArray(new String[command.keySet().size()]); + for (String key : keySet) { + if (!expectedCommand.containsKey(key)) { + command.remove(key); + } else if (command.isDocument(key) && expectedCommand.isDocument(key)) { + massageActualCommand(command.getDocument(key), expectedCommand.getDocument(key)); + } else if (command.containsKey("pipeline") && expectedCommand.containsKey("pipeline")) { + massagePipeline(command, expectedCommand); + } + } + + } + + // If the expected pipeline contains a $changeStream key with an empty document value, remove the + // startAtOperationTime and resumeAfter fields from the actual pipeline value. + private static void massagePipeline(final BsonDocument command, final BsonDocument expectedCommand) { + if (expectedCommand.containsKey("pipeline") && command.containsKey("pipeline")) { + if (!expectedCommand.getArray("pipeline").isEmpty()) { + BsonDocument expectedChangeStreamDocument = expectedCommand.getArray("pipeline").get(0).asDocument(); + if (expectedChangeStreamDocument.containsKey("$changeStream") + && expectedChangeStreamDocument.getDocument("$changeStream").isEmpty()) { + if (!command.getArray("pipeline").isEmpty()) { + BsonDocument actualChangeStreamDocument = command.getArray("pipeline").get(0).asDocument() + .getDocument("$changeStream"); + actualChangeStreamDocument.remove("resumeAfter"); + actualChangeStreamDocument.remove("startAtOperationTime"); + } + } + } + } + } + + private static CommandStartedEvent massageExpectedCommandStartedEvent(final CommandStartedEvent event, + final CommandStartedEvent actualEvent, + @Nullable final Map lsidMap) { + BsonDocument command = getWritableCloneOfCommand(event.getCommand()); + + massageCommand(event, command); + // The null-treatment below stems from + // https://github.com/mongodb/specifications/blob/master/source/transactions/tests/README.rst#null-values + if (lsidMap == null) { + command.remove("lsid"); + } else if (command.containsKey("lsid")) { + command.put("lsid", lsidMap.get(command.getString("lsid").getValue())); + } + for (String nullableFieldName : new String[] {"txnNumber", "stmtId", "startTransaction", "autocommit", "maxTimeMS", "writeConcern", + "allowDiskUse", "readConcern", "encryptedFields"}) { + if (command.isNull(nullableFieldName)) { + command.remove(nullableFieldName); + } + } + if (command.containsKey("encryptedFields")) { + BsonDocument encryptedFields = command.getDocument("encryptedFields"); + for (String nullableFieldName : new String[] {"escCollection", "ecocCollection", "eccCollection"}) { + if (encryptedFields.isNull(nullableFieldName)) { + encryptedFields.remove(nullableFieldName); + } + } + } + command.remove("recoveryToken"); + command.remove("query"); + if (command.containsKey("filter") && command.getDocument("filter").isEmpty()) { + command.remove("filter"); + } + command.remove("mapReduce"); + + replaceTypeAssertionWithActual(command, actualEvent.getCommand()); + + return new CommandStartedEvent(event.getRequestContext(), event.getOperationId(), event.getRequestId(), + event.getConnectionDescription(), event.getDatabaseName(), event.getCommandName(), command); + } + + private static void massageCommand(final CommandStartedEvent event, final BsonDocument command) { + if (event.getCommandName().equals("update")) { + for (BsonValue bsonValue : command.getArray("updates")) { + BsonDocument curUpdate = bsonValue.asDocument(); + if (!curUpdate.containsKey("multi")) { + curUpdate.put("multi", BsonBoolean.FALSE); + } + if (!curUpdate.containsKey("upsert")) { + curUpdate.put("upsert", BsonBoolean.FALSE); + } + } + } else if (event.getCommandName().equals("getMore")) { + command.put("getMore", new BsonInt64(42)); + } else if (event.getCommandName().equals("killCursors")) { + command.getArray("cursors").set(0, new BsonInt64(42)); + } + command.remove("$clusterTime"); + } + + private static BsonDocument getWritableCloneOfCommand(final BsonDocument original) { + BsonDocument clone = new BsonDocument(); + BsonDocumentWriter writer = new BsonDocumentWriter(clone); + new BsonDocumentCodec(CODEC_REGISTRY_HACK).encode(writer, original, EncoderContext.builder().build()); + return clone; + } + + private CommandMonitoringTestHelper() { + } +} diff --git a/driver-core/src/test/functional/com/mongodb/client/CrudTestHelper.java b/driver-core/src/test/functional/com/mongodb/client/CrudTestHelper.java new file mode 100644 index 00000000000..119babf8875 --- /dev/null +++ b/driver-core/src/test/functional/com/mongodb/client/CrudTestHelper.java @@ -0,0 +1,102 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client; + +import org.bson.BsonArray; +import org.bson.BsonDocument; +import org.bson.BsonType; +import org.bson.BsonValue; + +import java.util.List; +import java.util.stream.Collectors; + +import static java.util.Collections.singletonList; + +public final class CrudTestHelper { + + public static void replaceTypeAssertionWithActual(final BsonDocument expected, final BsonDocument actual) { + for (String key : expected.keySet()) { + BsonValue value = expected.get(key); + if (value.isDocument()) { + BsonDocument valueDocument = value.asDocument(); + BsonValue actualValue = actual.get(key); + if (valueDocument.size() == 1 && valueDocument.getFirstKey().equals("$$type")) { + List types = getExpectedTypes(valueDocument.get("$$type")); + String actualType = asTypeString(actualValue.getBsonType()); + if (types.contains(actualType)) { + expected.put(key, actualValue); + } else { + throw new UnsupportedOperationException("Unsupported type: " + actualValue); + } + } else if (actualValue != null && actualValue.isDocument()) { + replaceTypeAssertionWithActual(valueDocument, actualValue.asDocument()); + } else { + throw new RuntimeException(String.format("Expecting '%s' as actual value but found '%s' ", valueDocument, actualValue)); + } + } else if (value.isArray()) { + replaceTypeAssertionWithActual(value.asArray(), actual.get(key).asArray()); + } + } + } + + private static String asTypeString(final BsonType bsonType) { + switch (bsonType) { + case BINARY: + return "binData"; + case INT32: + return "int"; + case INT64: + return "long"; + default: + throw new UnsupportedOperationException("Unsupported bson type conversion to string: " + bsonType); + } + } + + private static List getExpectedTypes(final BsonValue expectedTypes) { + List types; + if (expectedTypes.isString()) { + types = singletonList(expectedTypes.asString().getValue()); + } else if (expectedTypes.isArray()) { + types = expectedTypes.asArray().stream().map(type -> type.asString().getValue()).collect(Collectors.toList()); + } else { + throw new UnsupportedOperationException("Unsupported type for $$type value"); + } + return types; + } + + private static void replaceTypeAssertionWithActual(final BsonArray expected, final BsonArray actual) { + for (int i = 0; i < expected.size(); i++) { + BsonValue value = expected.get(i); + if (value.isDocument()) { + replaceTypeAssertionWithActual(value.asDocument(), actual.get(i).asDocument()); + } else if (value.isArray()) { + replaceTypeAssertionWithActual(value.asArray(), actual.get(i).asArray()); + } + } + } + + private CrudTestHelper() { + } + + public static String repeat(final int times, final String s) { + StringBuilder builder = new StringBuilder(times); + for (int i = 0; i < times; i++) { + builder.append(s); + } + return builder.toString(); + } +} diff --git a/driver-core/src/test/functional/com/mongodb/client/TestHelper.java b/driver-core/src/test/functional/com/mongodb/client/TestHelper.java new file mode 100644 index 00000000000..237c03c7e19 --- /dev/null +++ b/driver-core/src/test/functional/com/mongodb/client/TestHelper.java @@ -0,0 +1,47 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client; + +import com.mongodb.lang.Nullable; + +import java.lang.reflect.Field; +import java.util.Map; + +import static java.lang.System.getenv; + +public final class TestHelper { + + public static void setEnvironmentVariable(final String name, @Nullable final String value) { + try { + Map env = getenv(); + Field field = env.getClass().getDeclaredField("m"); + field.setAccessible(true); + @SuppressWarnings("unchecked") + Map result = (Map) field.get(env); + if (value == null) { + result.remove(name); + } else { + result.put(name, value); + } + } catch (IllegalAccessException | NoSuchFieldException e) { + throw new RuntimeException(e); + } + } + + private TestHelper() { + } +} diff --git a/driver-core/src/test/functional/com/mongodb/client/TestListener.java b/driver-core/src/test/functional/com/mongodb/client/TestListener.java new file mode 100644 index 00000000000..6b968f31f1b --- /dev/null +++ b/driver-core/src/test/functional/com/mongodb/client/TestListener.java @@ -0,0 +1,45 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client; + +import com.mongodb.annotations.ThreadSafe; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; + +/** + * A simple listener that consumes string events, which can be checked in tests. + */ +@ThreadSafe +public final class TestListener { + private final List events = Collections.synchronizedList(new ArrayList<>()); + + public void add(final String s) { + events.add(s); + } + + public List getEventStrings() { + synchronized (events) { + return new ArrayList<>(events); + } + } + + public void clear() { + events.clear(); + } +} diff --git a/driver-core/src/test/functional/com/mongodb/client/WithWrapper.java b/driver-core/src/test/functional/com/mongodb/client/WithWrapper.java new file mode 100644 index 00000000000..e610f578112 --- /dev/null +++ b/driver-core/src/test/functional/com/mongodb/client/WithWrapper.java @@ -0,0 +1,66 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client; + +import com.mongodb.internal.connection.FaasEnvironmentAccessor; +import com.mongodb.lang.Nullable; + +import java.util.Map; + +@FunctionalInterface +public interface WithWrapper { + + void run(Runnable r); + + static WithWrapper withWrapper() { + return r -> r.run(); + } + + default WithWrapper withEnvironmentVariable(final String name, @Nullable final String value) { + return runnable -> { + Map innerMap = FaasEnvironmentAccessor.getFaasEnvMap(); + String original = innerMap.get(name); + if (value == null) { + innerMap.remove(name); + } else { + innerMap.put(name, value); + } + try { + this.run(runnable); + } finally { + if (original == null) { + innerMap.remove(name); + } else { + innerMap.put(name, original); + } + } + }; + } + + default WithWrapper withSystemProperty(final String name, final String value) { + return runnable -> { + String original = System.getProperty(name); + System.setProperty(name, value); + try { + this.run(runnable); + } finally { + System.setProperty(name, original); + } + }; + } + +} diff --git a/driver-core/src/test/functional/com/mongodb/client/model/AggregatesFunctionalSpecification.groovy b/driver-core/src/test/functional/com/mongodb/client/model/AggregatesFunctionalSpecification.groovy new file mode 100644 index 00000000000..e27f888f548 --- /dev/null +++ b/driver-core/src/test/functional/com/mongodb/client/model/AggregatesFunctionalSpecification.groovy @@ -0,0 +1,1383 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model + +import com.mongodb.MongoCommandException +import com.mongodb.MongoNamespace +import com.mongodb.OperationFunctionalSpecification +import com.mongodb.client.model.fill.FillOutputField +import org.bson.BsonDecimal128 +import org.bson.BsonDocument +import org.bson.BsonString +import org.bson.Document +import org.bson.conversions.Bson +import org.bson.types.Decimal128 +import spock.lang.IgnoreIf + +import java.time.Instant +import java.time.LocalDateTime +import java.time.ZoneId +import java.time.ZoneOffset + +import static com.mongodb.ClusterFixture.serverVersionLessThan +import static com.mongodb.client.model.Accumulators.accumulator +import static com.mongodb.client.model.Accumulators.addToSet +import static com.mongodb.client.model.Accumulators.avg +import static com.mongodb.client.model.Accumulators.bottom +import static com.mongodb.client.model.Accumulators.bottomN +import static com.mongodb.client.model.Accumulators.first +import static com.mongodb.client.model.Accumulators.firstN +import static com.mongodb.client.model.Accumulators.last +import static com.mongodb.client.model.Accumulators.lastN +import static com.mongodb.client.model.Accumulators.max +import static com.mongodb.client.model.Accumulators.maxN +import static com.mongodb.client.model.Accumulators.mergeObjects +import static com.mongodb.client.model.Accumulators.min +import static com.mongodb.client.model.Accumulators.minN +import static com.mongodb.client.model.Accumulators.push +import static com.mongodb.client.model.Accumulators.stdDevPop +import static com.mongodb.client.model.Accumulators.stdDevSamp +import static com.mongodb.client.model.Accumulators.sum +import static com.mongodb.client.model.Accumulators.top +import static com.mongodb.client.model.Accumulators.topN +import static com.mongodb.client.model.Aggregates.addFields +import static com.mongodb.client.model.Aggregates.bucket +import static com.mongodb.client.model.Aggregates.bucketAuto +import static com.mongodb.client.model.Aggregates.count +import static com.mongodb.client.model.Aggregates.densify +import static com.mongodb.client.model.Aggregates.facet +import static com.mongodb.client.model.Aggregates.fill +import static com.mongodb.client.model.Aggregates.graphLookup +import static com.mongodb.client.model.Aggregates.group +import static com.mongodb.client.model.Aggregates.limit +import static com.mongodb.client.model.Aggregates.lookup +import static com.mongodb.client.model.Aggregates.match +import static com.mongodb.client.model.Aggregates.merge +import static com.mongodb.client.model.Aggregates.out +import static com.mongodb.client.model.Aggregates.project +import static com.mongodb.client.model.Aggregates.replaceRoot +import static com.mongodb.client.model.Aggregates.replaceWith +import static com.mongodb.client.model.Aggregates.sample +import static com.mongodb.client.model.Aggregates.set +import static com.mongodb.client.model.Aggregates.setWindowFields +import static com.mongodb.client.model.Aggregates.skip +import static com.mongodb.client.model.Aggregates.sort +import static com.mongodb.client.model.Aggregates.sortByCount +import static com.mongodb.client.model.Aggregates.unionWith +import static com.mongodb.client.model.Aggregates.unwind +import static com.mongodb.client.model.Filters.eq +import static com.mongodb.client.model.Filters.expr +import static com.mongodb.client.model.Projections.computed +import static com.mongodb.client.model.Projections.exclude +import static com.mongodb.client.model.Projections.excludeId +import static com.mongodb.client.model.Projections.fields +import static com.mongodb.client.model.Projections.include +import static com.mongodb.client.model.Sorts.ascending +import static com.mongodb.client.model.Sorts.descending +import static com.mongodb.client.model.Windows.Bound.CURRENT +import static com.mongodb.client.model.Windows.Bound.UNBOUNDED +import static com.mongodb.client.model.Windows.documents +import static com.mongodb.client.model.Windows.range +import static com.mongodb.client.model.Windows.timeRange +import static com.mongodb.client.model.densify.DensifyOptions.densifyOptions +import static com.mongodb.client.model.densify.DensifyRange.fullRangeWithStep +import static com.mongodb.client.model.densify.DensifyRange.partitionRangeWithStep +import static com.mongodb.client.model.densify.DensifyRange.rangeWithStep +import static com.mongodb.client.model.fill.FillOptions.fillOptions +import static java.util.Arrays.asList +import static java.util.stream.Collectors.toList +import static org.spockframework.util.CollectionUtil.containsAny + +class AggregatesFunctionalSpecification extends OperationFunctionalSpecification { + + def a = new Document('_id', 1).append('x', 1) + .append('y', 'a') + .append('z', false) + .append('a', [1, 2, 3]) + .append('a1', [new Document('c', 1).append('d', 2), new Document('c', 2).append('d', 3)]) + .append('o', new Document('a', 1)) + + def b = new Document('_id', 2).append('x', 2) + .append('y', 'b') + .append('z', true) + .append('a', [3, 4, 5, 6]) + .append('a1', [new Document('c', 2).append('d', 3), new Document('c', 3).append('d', 4)]) + .append('o', new Document('b', 2)) + + def c = new Document('_id', 3).append('x', 3) + .append('y', 'c') + .append('z', true) + .append('o', new Document('c', 3)) + + def setup() { + getCollectionHelper().insertDocuments(a, b, c) + } + + + def aggregate(List pipeline) { + getCollectionHelper().aggregate(pipeline) + } + + def '$match'() { + expect: + aggregate([match(Filters.exists('a1'))]) == [a, b] + } + + def '$project'() { + expect: + aggregate([project(fields(include('x'), computed('c', '$y')))]) == [new Document('_id', 1).append('x', 1).append('c', 'a'), + new Document('_id', 2).append('x', 2).append('c', 'b'), + new Document('_id', 3).append('x', 3).append('c', 'c')] + } + + def '$project an exclusion'() { + expect: + aggregate([project(exclude('a', 'a1', 'z', 'o'))]) == [new Document('_id', 1).append('x', 1).append('y', 'a'), + new Document('_id', 2).append('x', 2).append('y', 'b'), + new Document('_id', 3).append('x', 3).append('y', 'c')] + } + + def '$sort'() { + expect: + aggregate([sort(descending('x'))]) == [c, b, a] + } + + def '$skip'() { + expect: + aggregate([skip(1)]) == [b, c] + } + + def '$limit'() { + expect: + aggregate([limit(2)]) == [a, b] + } + + def '$unwind'() { + expect: + aggregate([project(fields(include('a'), excludeId())), unwind('$a')]) == [new Document('a', 1), + new Document('a', 2), + new Document('a', 3), + new Document('a', 3), + new Document('a', 4), + new Document('a', 5), + new Document('a', 6)] + } + + def '$unwind with UnwindOptions'() { + given: + getCollectionHelper().drop() + getCollectionHelper().insertDocuments(new Document('a', [1]), new Document('a', null), new Document('a', [])) + + when: + def results = aggregate([project(fields(include('a'), excludeId())), unwind('$a', options)]) + + then: + results == expectedResults + + where: + options | expectedResults + new UnwindOptions() | [Document.parse('{a: 1}')] + new UnwindOptions().preserveNullAndEmptyArrays(true) | [Document.parse('{a: 1}'), Document.parse('{a: null}'), + Document.parse('{}')] + new UnwindOptions() + .preserveNullAndEmptyArrays(true) + .includeArrayIndex('b') | [Document.parse('{a: 1, b: 0}'), Document.parse('{a: null, b: null}'), + Document.parse('{b: null}')] + } + + def '$group'() { + expect: + aggregate([group(null)]) == [new Document('_id', null)] + + aggregate([group('$z')]).containsAll([new Document('_id', true), new Document('_id', false)]) + + aggregate([group(null, sum('acc', '$x'))]) == [new Document('_id', null).append('acc', 6)] + + aggregate([group(null, avg('acc', '$x'))]) == [new Document('_id', null).append('acc', 2)] + + aggregate([group(null, first('acc', '$x'))]) == [new Document('_id', null).append('acc', 1)] + + aggregate([group(null, last('acc', '$x'))]) == [new Document('_id', null).append('acc', 3)] + + aggregate([group(null, max('acc', '$x'))]) == [new Document('_id', null).append('acc', 3)] + + aggregate([group(null, min('acc', '$x'))]) == [new Document('_id', null).append('acc', 1)] + + aggregate([group('$z', push('acc', '$z'))]).containsAll([new Document('_id', true).append('acc', [true, true]), + new Document('_id', false).append('acc', [false])]) + + aggregate([group('$z', addToSet('acc', '$z'))]).containsAll([new Document('_id', true).append('acc', [true]), + new Document('_id', false).append('acc', [false])]) + } + + def '$group with $mergeObjects'() { + aggregate([group(null, mergeObjects('acc', '$o'))]).containsAll( + [new Document('_id', null).append('acc', new Document('a', 1).append('b', 2).append('c', 3))]) + } + + @IgnoreIf({ serverVersionLessThan(5, 2) }) + def '$group with top or bottom n'() { + when: + List results = aggregate([group(new Document('gid', '$z'), + minN('res', '$y', + new Document('$cond', new Document('if', new Document('$eq', asList('$gid', true))) + .append('then', 2).append('else', 1))))]) + .collect() + then: + ((Document) results.stream().find { it.get('_id') == new Document('gid', true) }) + .get('res', List).toSet() == ['b', 'c'].toSet() + + when: + results = aggregate([group(null, + maxN('res', '$y', 1))]) + then: + results.first().get('res', List).toSet() == ['c'].toSet() + + when: + results = aggregate([ + sort(ascending('x')), + group(null, + firstN('res', '$y', 2))]) + then: + results.first().get('res', List) == ['a', 'b'] + + when: + results = aggregate([ + sort(ascending('x')), + group(null, + lastN('res', '$y', 1))]) + then: + results.first().get('res', List) == ['c'] + + when: + results = aggregate([group(new Document('gid', '$z'), + bottom('res', descending('y'), ['$x', '$y']))]) + .collect() + then: + ((Document) results.stream().find { it.get('_id') == new Document('gid', true) }) + .get('res', List) == [2, 'b'] + + when: + results = aggregate([group(new Document('gid', '$z'), + bottomN('res', descending('y'), ['$x', '$y'], + new Document('$cond', new Document('if', new Document('$eq', asList('$gid', true))) + .append('then', 2).append('else', 1))))]) + .collect() + then: + ((Document) results.stream().find { it.get('_id') == new Document('gid', true) }) + .get('res', List) == [[3, 'c'], [2, 'b']] + + when: + results = aggregate([group(null, + top('res', ascending('x'), '$y'))]) + then: + results.first().get('res') == 'a' + + when: + results = aggregate([group(null, + topN('res', descending('x'), '$y', 1))]) + then: + results.first().get('res', List) == ['c'] + } + + def '$out'() { + given: + def outCollectionName = getCollectionName() + '.out' + + when: + aggregate([out(outCollectionName)]) + + then: + getCollectionHelper(new MongoNamespace(getDatabaseName(), outCollectionName)).find() == [a, b, c] + } + + @IgnoreIf({ serverVersionLessThan(4, 4) }) + def '$out to specified database'() { + given: + def outDatabaseName = getDatabaseName() + '_out' + def outCollectionName = getCollectionName() + '.out' + getCollectionHelper(new MongoNamespace(outDatabaseName, outCollectionName)).create() + + when: + aggregate([out(outDatabaseName, outCollectionName)]) + + then: + getCollectionHelper(new MongoNamespace(outDatabaseName, outCollectionName)).find() == [a, b, c] + } + + def '$merge'() { + given: + def outCollectionName = getCollectionName() + '.out' + getCollectionHelper(new MongoNamespace(getDatabaseName(), outCollectionName)) + .createUniqueIndex(new Document('x', 1)) + getCollectionHelper(new MongoNamespace('db1', outCollectionName)).create() + + when: + aggregate([merge(outCollectionName)]) + + then: + getCollectionHelper(new MongoNamespace(getDatabaseName(), outCollectionName)).find() == [a, b, c] + + when: + aggregate([merge(new MongoNamespace('db1', outCollectionName))]) + + then: + getCollectionHelper(new MongoNamespace('db1', outCollectionName)).find() == [a, b, c] + + when: + aggregate([merge(outCollectionName, new MergeOptions() + .uniqueIdentifier('x') + .whenMatched(MergeOptions.WhenMatched.REPLACE) + .whenNotMatched(MergeOptions.WhenNotMatched.FAIL))]) + + then: + getCollectionHelper(new MongoNamespace(getDatabaseName(), outCollectionName)).find() == [a, b, c] + + when: + aggregate([merge(outCollectionName, new MergeOptions() + .uniqueIdentifier('x') + .whenMatched(MergeOptions.WhenMatched.KEEP_EXISTING))]) + + then: + getCollectionHelper(new MongoNamespace(getDatabaseName(), outCollectionName)).find() == [a, b, c] + + when: + aggregate([merge(outCollectionName, new MergeOptions() + .uniqueIdentifier('x') + .whenMatched(MergeOptions.WhenMatched.MERGE))]) + + then: + getCollectionHelper(new MongoNamespace(getDatabaseName(), outCollectionName)).find() == [a, b, c] + + when: + aggregate([merge(outCollectionName, new MergeOptions() + .uniqueIdentifier('x') + .whenMatched(MergeOptions.WhenMatched.FAIL))]) + + then: + thrown(MongoCommandException) + + when: + aggregate([merge(outCollectionName, new MergeOptions() + .uniqueIdentifier('x') + .whenMatched(MergeOptions.WhenMatched.REPLACE) + .whenNotMatched(MergeOptions.WhenNotMatched.DISCARD))]) + + then: + getCollectionHelper(new MongoNamespace(getDatabaseName(), outCollectionName)).find() == [a, b, c] + + when: + aggregate([merge(outCollectionName, new MergeOptions() + .uniqueIdentifier('x') + .whenMatched(MergeOptions.WhenMatched.REPLACE) + .whenNotMatched(MergeOptions.WhenNotMatched.INSERT))]) + + then: + getCollectionHelper(new MongoNamespace(getDatabaseName(), outCollectionName)).find() == [a, b, c] + + when: + aggregate([merge(outCollectionName, new MergeOptions() + .uniqueIdentifier('x') + .whenMatched(MergeOptions.WhenMatched.PIPELINE) + .variables([new Variable('b', 1)]) + .whenMatchedPipeline([addFields([new Field('b', '$$b')])]) + .whenNotMatched(MergeOptions.WhenNotMatched.FAIL))]) + + then: + a.append('b', 1) + b.append('b', 1) + c.append('b', 1) + getCollectionHelper(new MongoNamespace(getDatabaseName(), outCollectionName)).find() == [a, b, c] + } + + def '$stdDev'() { + when: + def results = aggregate([group(null, stdDevPop('stdDevPop', '$x'), stdDevSamp('stdDevSamp', '$x'))]).first() + + then: + results.keySet().containsAll(['_id', 'stdDevPop', 'stdDevSamp']) + results.get('_id') == null + results.getDouble('stdDevPop').round(10) == new Double(Math.sqrt(2 / 3)).round(10) + results.get('stdDevSamp') == 1.0 + } + + def '$sample'() { + expect: + containsAny([a, b, c], aggregate([sample(1)]).first()) + } + + + def '$lookup'() { + given: + def fromCollectionName = 'lookupCollection' + def fromHelper = getCollectionHelper(new MongoNamespace(getDatabaseName(), fromCollectionName)) + + getCollectionHelper().drop() + fromHelper.drop() + + getCollectionHelper().insertDocuments(new Document('_id', 0).append('a', 1), + new Document('_id', 1).append('a', null), new Document('_id', 2)) + fromHelper.insertDocuments(new Document('_id', 0).append('b', 1), new Document('_id', 1).append('b', null), new Document('_id', 2)) + def lookupDoc = lookup(fromCollectionName, 'a', 'b', 'same') + + when: + def results = aggregate([lookupDoc]) + + then: + results == [ + Document.parse('{_id: 0, a: 1, "same": [{_id: 0, b: 1}]}'), + Document.parse('{_id: 1, a: null, "same": [{_id: 1, b: null}, {_id: 2}]}'), + Document.parse('{_id: 2, "same": [{_id: 1, b: null}, {_id: 2}]}') + ] + + cleanup: + fromHelper?.drop() + } + + def '$lookup with pipeline'() { + given: + def fromCollectionName = 'warehouses' + def fromHelper = getCollectionHelper(new MongoNamespace(getDatabaseName(), fromCollectionName)) + def collection = getCollectionHelper() + + collection.drop() + fromHelper.drop() + + fromHelper.insertDocuments( + Document.parse('{ "_id" : 1, "stock_item" : "abc", warehouse: "A", "instock" : 120 }'), + Document.parse('{ "_id" : 2, "stock_item" : "abc", warehouse: "B", "instock" : 60 }'), + Document.parse('{ "_id" : 3, "stock_item" : "xyz", warehouse: "B", "instock" : 40 }'), + Document.parse('{ "_id" : 4, "stock_item" : "xyz", warehouse: "A", "instock" : 80 }')) + + collection.insertDocuments( + Document.parse('{ "_id" : 1, "item" : "abc", "price" : 12, "ordered" : 2 }'), + Document.parse('{ "_id" : 2, "item" : "xyz", "price" : 10, "ordered" : 60 }') + ) + + def let = asList(new Variable('order_item', '$item'), new Variable('order_qty', '$ordered')) + + def pipeline = asList( + match(expr(new Document('$and', + asList( new Document('$eq', asList('$stock_item', '$$order_item')), + new Document('$gte', asList('$instock', '$$order_qty')))))), + project(fields(exclude('stock_item'), excludeId()))) + + def lookupDoc = lookup(fromCollectionName, let, pipeline, 'stockdata') + + when: + def results = aggregate([lookupDoc]) + + then: + results == [ + Document.parse('{ "_id" : 1.0, "item" : "abc", "price" : 12.0, "ordered" : 2.0, ' + + '"stockdata" : [ { "warehouse" : "A", "instock" : 120.0 }, { "warehouse" : "B", "instock" : 60.0 } ] }'), + Document.parse('{ "_id" : 2.0, "item" : "xyz", "price" : 10.0, "ordered" : 60.0, ' + + '"stockdata" : [ { "warehouse" : "A", "instock" : 80.0 } ] }') ] + + cleanup: + fromHelper?.drop() + } + + def '$lookup with pipeline without variables'() { + given: + def fromCollectionName = 'holidays' + def fromCollection = getCollectionHelper(new MongoNamespace(getDatabaseName(), fromCollectionName)) + def collection = getCollectionHelper() + + collection.drop() + fromCollection.drop() + + fromCollection.insertDocuments( + Document.parse('{ "_id" : 1, year: 2018, name: "New Years", date: { $date : "2018-01-01T00:00:00Z"} }'), + Document.parse('{ "_id" : 2, year: 2018, name: "Pi Day", date: { $date : "2018-03-14T00:00:00Z" } }'), + Document.parse('{ "_id" : 3, year: 2018, name: "Ice Cream Day", date: { $date : "2018-07-15T00:00:00Z"} }'), + Document.parse('{ "_id" : 4, year: 2017, name: "New Years", date: { $date : "2017-01-01T00:00:00Z" } }'), + Document.parse('{ "_id" : 5, year: 2017, name: "Ice Cream Day", date: { $date : "2017-07-16T00:00:00Z" } }') + ) + + collection.insertDocuments( + Document.parse('''{ "_id" : 1, "student" : "Ann Aardvark", + sickdays: [ { $date : "2018-05-01T00:00:00Z" }, { $date : "2018-08-23T00:00:00Z" } ] }'''), + Document.parse('''{ "_id" : 2, "student" : "Zoe Zebra", + sickdays: [ { $date : "2018-02-01T00:00:00Z" }, { $date : "2018-05-23T00:00:00Z" } ] }''') + ) + + def pipeline = asList( + match(eq('year', 2018)), + project(fields(excludeId(), computed('date', fields(computed('name', '$name'), computed('date', '$date'))))), + replaceRoot('$date') + ) + + def lookupDoc = lookup(fromCollectionName, pipeline, 'holidays') + + when: + def results = aggregate([lookupDoc]) + + then: + results == [ + Document.parse( + '''{ '_id' : 1, 'student' : "Ann Aardvark", + 'sickdays' : [ ISODate("2018-05-01T00:00:00Z"), ISODate("2018-08-23T00:00:00Z") ], + 'holidays' : [ { 'name' : "New Years", 'date' : ISODate ("2018-01-01T00:00:00Z") }, + { 'name' : "Pi Day", 'date' : ISODate("2018-03-14T00:00:00Z") }, + { 'name' : "Ice Cream Day", 'date' : ISODate("2018-07-15T00:00:00Z") } ] }'''), + Document.parse( + '''{ '_id' : 2, 'student' : "Zoe Zebra", + 'sickdays' : [ ISODate("2018-02-01T00:00:00Z"), ISODate("2018-05-23T00:00:00Z") ], + 'holidays' : [ { 'name' : "New Years", 'date' : ISODate("2018-01-01T00:00:00Z") }, + { 'name' : "Pi Day", 'date' : ISODate("2018-03-14T00:00:00Z") }, + { 'name' : "Ice Cream Day", 'date' : ISODate("2018-07-15T00:00:00Z") } ] }''') ] + + cleanup: + fromCollection?.drop() + } + + def '$facet'() { + given: + def helper = getCollectionHelper() + + helper.drop() + + (0..50).each { + def size = (35 + it) + def manufacturer = ['Sony', 'Samsung', 'Vizio'][it % 3] + helper.insertDocuments(Document.parse(""" { + title: "${manufacturer} ${size} inch HDTV", + attributes: { + "type": "HD", + "screen_size": ${size}, + "manufacturer": "${manufacturer}", + } + }""")) + } + def stage = facet( + new Facet('Manufacturer', + sortByCount('$attributes.manufacturer'), + limit(5)), + new Facet('Screen Sizes', + unwind('$attributes'), + bucketAuto('$attributes.screen_size', 5, new BucketAutoOptions() + .output(sum('count', 1))))) + + when: + def results = aggregate([stage, + unwind('$Manufacturer'), + sort(ascending('Manufacturer')), + group('_id', push('Manufacturer', '$Manufacturer'), + first('Screen Sizes', '$Screen Sizes')), + project(excludeId())]) + + then: + results == [ + Document.parse( + '''{ 'Manufacturer': [ + {'_id': "Samsung", 'count': 17}, + {'_id': "Sony", 'count': 17}, + {'_id': "Vizio", 'count': 17} + ], 'Screen Sizes': [ + {'_id': {'min': 35, 'max': 45}, 'count': 10}, + {'_id': {'min': 45, 'max': 55}, 'count': 10}, + {'_id': {'min': 55, 'max': 65}, 'count': 10}, + {'_id': {'min': 65, 'max': 75}, 'count': 10}, + {'_id': {'min': 75, 'max': 85}, 'count': 11} + ]} + '''), + ] + + cleanup: + helper?.drop() + } + + def '$graphLookup'() { + given: + def fromCollectionName = 'contacts' + def fromHelper = getCollectionHelper(new MongoNamespace(getDatabaseName(), fromCollectionName)) + + fromHelper.drop() + + fromHelper.insertDocuments(Document.parse('{ _id: 0, name: "Bob Smith", friends: ["Anna Jones", "Chris Green"] }')) + fromHelper.insertDocuments(Document.parse('{ _id: 1, name: "Anna Jones", friends: ["Bob Smith", "Chris Green", "Joe Lee"] }')) + fromHelper.insertDocuments(Document.parse('{ _id: 2, name: "Chris Green", friends: ["Anna Jones", "Bob Smith"] }')) + fromHelper.insertDocuments(Document.parse('{ _id: 3, name: "Joe Lee", friends: ["Anna Jones", "Fred Brown"] }')) + fromHelper.insertDocuments(Document.parse('{ _id: 4, name: "Fred Brown", friends: ["Joe Lee"] }')) + + def lookupDoc = graphLookup('contacts', new BsonString('$friends'), 'friends', 'name', 'socialNetwork') + + when: + def results = fromHelper.aggregate([lookupDoc, + unwind('$socialNetwork'), + sort(new Document('_id', 1).append('socialNetwork._id', 1))]) + + then: + results.subList(0, 5) == [ + Document.parse('''{ _id: 0, name: "Bob Smith", friends: ["Anna Jones", "Chris Green"], socialNetwork: { + _id: 0, name: "Bob Smith", friends: ["Anna Jones", "Chris Green"] } }'''), + Document.parse('''{ _id: 0, name: "Bob Smith", friends: ["Anna Jones", "Chris Green"], socialNetwork: { + _id: 1, name: "Anna Jones", friends: ["Bob Smith", "Chris Green", "Joe Lee"] } }'''), + Document.parse('''{ _id: 0, name: "Bob Smith", friends: ["Anna Jones", "Chris Green"], socialNetwork: { + _id: 2, name: "Chris Green", friends: ["Anna Jones", "Bob Smith"] } }'''), + Document.parse('''{ _id: 0, name: "Bob Smith", friends: ["Anna Jones", "Chris Green"], socialNetwork: { + _id: 3, name: "Joe Lee", friends: ["Anna Jones", "Fred Brown" ] } }'''), + Document.parse('''{ _id: 0, name: "Bob Smith", friends: ["Anna Jones", "Chris Green"], socialNetwork: { + _id: 4, name: "Fred Brown", friends: ["Joe Lee"] } }''') + ] + + cleanup: + fromHelper?.drop() + } + + def '$graphLookup with depth options'() { + given: + def fromCollectionName = 'contacts' + def fromHelper = getCollectionHelper(new MongoNamespace(getDatabaseName(), fromCollectionName)) + + fromHelper.drop() + + fromHelper.insertDocuments(Document.parse('{ _id: 0, name: "Bob Smith", friends: ["Anna Jones", "Chris Green"] }')) + fromHelper.insertDocuments(Document.parse('{ _id: 1, name: "Anna Jones", friends: ["Bob Smith", "Chris Green", "Joe Lee"] }')) + fromHelper.insertDocuments(Document.parse('{ _id: 2, name: "Chris Green", friends: ["Anna Jones", "Bob Smith"] }')) + fromHelper.insertDocuments(Document.parse('{ _id: 3, name: "Joe Lee", friends: ["Anna Jones", "Fred Brown"] }')) + fromHelper.insertDocuments(Document.parse('{ _id: 4, name: "Fred Brown", friends: ["Joe Lee"] }')) + + def lookupDoc = graphLookup('contacts', new BsonString('$friends'), 'friends', 'name', 'socialNetwork', + new GraphLookupOptions() + .maxDepth(1) + .depthField('depth')) + + when: + def results = fromHelper.aggregate([lookupDoc, + unwind('$socialNetwork'), + sort(new Document('_id', 1) + .append('socialNetwork._id', 1))]) + + then: + results.subList(0, 4) == [ + Document.parse('''{ _id: 0, name: "Bob Smith", friends: ["Anna Jones", "Chris Green"], socialNetwork: { + _id: 0, name: "Bob Smith", friends: ["Anna Jones", "Chris Green"], depth:1 } }'''), + Document.parse('''{ _id: 0, name: "Bob Smith", friends: ["Anna Jones", "Chris Green"], socialNetwork: { + _id: 1, name: "Anna Jones", friends: ["Bob Smith", "Chris Green", "Joe Lee"], depth:0 } }'''), + Document.parse('''{ _id: 0, name: "Bob Smith", friends: ["Anna Jones", "Chris Green"], socialNetwork: { + _id: 2, name: "Chris Green", friends: ["Anna Jones", "Bob Smith"], depth:0 } }'''), + Document.parse('''{ _id: 0, name: "Bob Smith", friends: ["Anna Jones", "Chris Green"], socialNetwork: { + _id: 3, name: "Joe Lee", friends: ["Anna Jones", "Fred Brown" ], depth:1 } }''') + ] + + cleanup: + fromHelper?.drop() + } + + def '$graphLookup with query filter option'() { + given: + def fromCollectionName = 'contacts' + def fromHelper = getCollectionHelper(new MongoNamespace(getDatabaseName(), fromCollectionName)) + + fromHelper.drop() + + fromHelper.insertDocuments( + Document.parse('''{ _id: 0, name: "Bob Smith", friends: ["Anna Jones", "Chris Green"], + hobbies : ["tennis", "unicycling", "golf"] }'''), + Document.parse('''{ _id: 1, name: "Anna Jones", friends: ["Bob Smith", "Chris Green", "Joe Lee"], + hobbies : ["archery", "golf", "woodworking"] }'''), + Document.parse('''{ _id: 2, name: "Chris Green", friends: ["Anna Jones", "Bob Smith"], + hobbies : ["knitting", "frisbee"] }'''), + Document.parse('''{ _id: 3, name: "Joe Lee", friends: ["Anna Jones", "Fred Brown"], + hobbies : [ "tennis", "golf", "topiary" ] }'''), + Document.parse('''{ _id: 4, name: "Fred Brown", friends: ["Joe Lee"], + hobbies : [ "travel", "ceramics", "golf" ] }''')) + + + def lookupDoc = graphLookup('contacts', new BsonString('$friends'), 'friends', 'name', 'golfers', + new GraphLookupOptions() + .restrictSearchWithMatch(eq('hobbies', 'golf'))) + + when: + def results = fromHelper.aggregate([lookupDoc, + unwind('$golfers'), + sort(new Document('_id', 1) + .append('golfers._id', 1))]) + + then: + results.subList(0, 4) == [ + Document.parse('''{ _id: 0, name: "Bob Smith", friends: ["Anna Jones", "Chris Green"], + hobbies : ["tennis", "unicycling", "golf"], golfers: {_id: 0, name: "Bob Smith", + friends: ["Anna Jones", "Chris Green"], hobbies : ["tennis", "unicycling", "golf"] } }'''), + Document.parse('''{ _id: 0, name: "Bob Smith", friends: ["Anna Jones", "Chris Green"], + hobbies: ["tennis", "unicycling", "golf"], golfers:{ _id: 1, name: "Anna Jones", + friends: ["Bob Smith", "Chris Green", "Joe Lee"], hobbies : ["archery", "golf", "woodworking"] } } }'''), + Document.parse('''{ _id: 0, name: "Bob Smith", friends: ["Anna Jones", "Chris Green"], + hobbies: ["tennis", "unicycling", "golf"], golfers: { _id: 3, name: "Joe Lee", + friends: ["Anna Jones", "Fred Brown"], hobbies : [ "tennis", "golf", "topiary" ] } }'''), + Document.parse('''{ _id: 0, name: "Bob Smith", friends: ["Anna Jones", "Chris Green"], + hobbies: ["tennis", "unicycling", "golf"], golfers:{ _id: 4, name: "Fred Brown", friends: ["Joe Lee"], + hobbies : [ "travel", "ceramics", "golf" ] } }''') + ] + + cleanup: + fromHelper?.drop() + } + + def '$bucket'() { + given: + def helper = getCollectionHelper() + + helper.drop() + + helper.insertDocuments(Document.parse('{screenSize: 30}')) + helper.insertDocuments(Document.parse('{screenSize: 24}')) + helper.insertDocuments(Document.parse('{screenSize: 42}')) + helper.insertDocuments(Document.parse('{screenSize: 22}')) + helper.insertDocuments(Document.parse('{screenSize: 55}')) + helper.insertDocuments(Document.parse('{screenSize: 155}')) + helper.insertDocuments(Document.parse('{screenSize: 75}')) + + def bucket = bucket('$screenSize', [0, 24, 32, 50, 70], new BucketOptions() + .defaultBucket('monster') + .output(sum('count', 1), push('matches', '$screenSize'))) + + when: + def results = helper.aggregate([sort(new Document('screenSize', 1)), bucket]) + + then: + results == [ + Document.parse('{_id: 0, count: 1, matches: [22]}'), + Document.parse('{_id: 24, count: 2, matches: [24, 30]}'), + Document.parse('{_id: 32, count: 1, matches: [42]}'), + Document.parse('{_id: 50, count: 1, matches: [55]}'), + Document.parse('{_id: "monster", count: 2, matches: [75, 155]}') + ] + cleanup: + helper?.drop() + } + + def '$bucketAuto'() { + given: + def helper = getCollectionHelper() + + helper.drop() + + (1..100).each { + helper.insertDocuments(Document.parse("{price: ${it * 2}}")) + } + + when: + def results = helper.aggregate([bucketAuto('$price', 10)]) + + then: + results[0]._id.min == 2 + results[0].count == 10 + + results[-1]._id.max == 200 + + when: + results = helper.aggregate([bucketAuto('$price', 7)]) + + then: + results[0]._id.min == 2 + results[0].count == 14 + + results[-1]._id.max == 200 + results[-1].count == 16 + + cleanup: + helper?.drop() + } + + def '$bucketAuto with options'() { + given: + def helper = getCollectionHelper() + + helper.drop() + + (0..2000).each { + def document = new Document('price', it * 5.01D) + helper.insertDocuments(document) + } + + when: + def results = helper.aggregate([bucketAuto('$price', 10, new BucketAutoOptions() + .granularity(BucketGranularity.POWERSOF2) + .output(sum('count', 1), avg('avgPrice', '$price')))]) + + then: + results.size() == 5 + results[0].count != null + results[0].avgPrice != null + + cleanup: + helper?.drop() + } + + def '$count'() { + given: + def helper = getCollectionHelper() + + helper.drop() + + def total = 3 + def documents = [] + (1..total).each { + documents.add(new BsonDocument()) + } + helper.insertDocuments(documents) + + when: + def results = helper.aggregate([count()]) + + then: + results[0].count == total + + when: + results = helper.aggregate([count('count')]) + + then: + results[0].count == total + + when: + results = helper.aggregate([count('total')]) + + then: + results[0].total == total + + cleanup: + helper?.drop() + } + + def '$sortByCount'() { + given: + def helper = getCollectionHelper() + + when: + helper.drop() + + helper.insertDocuments(Document.parse('{_id: 0, x: 1}')) + helper.insertDocuments(Document.parse('{_id: 2, x: 1}')) + helper.insertDocuments(Document.parse('{_id: 3, x: 0}')) + + def results = helper.aggregate([sortByCount('$x')]) + + then: + results == [Document.parse('{_id: 1, count: 2}'), + Document.parse('{_id: 0, count: 1}')] + + when: + helper.drop() + + helper.insertDocuments(Document.parse('{_id: 0, x: 1.4}')) + helper.insertDocuments(Document.parse('{_id: 2, x: 1.1}')) + helper.insertDocuments(Document.parse('{_id: 3, x: 0.5}')) + + results = helper.aggregate([sortByCount(new Document('$floor', '$x'))]) + + then: + results == [Document.parse('{_id: 1, count: 2}'), + Document.parse('{_id: 0, count: 1}')] + + cleanup: + helper?.drop() + } + + @IgnoreIf({ serverVersionLessThan(4, 4) }) + def '$accumulator'() { + given: + def helper = getCollectionHelper() + + when: + helper.drop() + helper.insertDocuments(Document.parse('{_id: 1, x: "string"}')) + def init = 'function() { return { x: "test string" } }' + def accumulate = 'function(state) { return state }' + def merge = 'function(state1, state2) { return state1 }' + def accumulatorExpr = accumulator('testString', init, accumulate, merge) + def results1 = helper.aggregate([group('$x', asList(accumulatorExpr))]) + + then: + results1.size() == 1 + results1.contains(Document.parse('{ _id: "string", testString: { x: "test string" } }')) + + when: + helper.drop() + helper.insertDocuments(Document.parse('{_id: 8751, title: "The Banquet", author: "Dante", copies: 2}'), + Document.parse('{_id: 8752, title: "Divine Comedy", author: "Dante", copies: 1}'), + Document.parse('{_id: 8645, title: "Eclogues", author: "Dante", copies: 2}'), + Document.parse('{_id: 7000, title: "The Odyssey", author: "Homer", copies: 10}'), + Document.parse('{_id: 7020, title: "Iliad", author: "Homer", copies: 10}')) + def initFunction = 'function(initCount, initSum) { return { count: parseInt(initCount), sum: parseInt(initSum) } }' + def accumulateFunction = 'function(state, numCopies) { return { count : state.count + 1, sum : state.sum + numCopies } }' + def mergeFunction = 'function(state1, state2) { return { count : state1.count + state2.count, sum : state1.sum + state2.sum } }' + def finalizeFunction = 'function(state) { return (state.sum / state.count) }' + def accumulatorExpression = accumulator('avgCopies', initFunction, [ '0', '0' ], accumulateFunction, + [ '$copies' ], mergeFunction, finalizeFunction) + def results2 = helper.aggregate([group('$author', asList( + new BsonField('minCopies', new Document('$min', '$copies')), accumulatorExpression, + new BsonField('maxCopies', new Document('$max', '$copies'))))]) + + then: + results2.size() == 2 + results2.contains(Document.parse('{_id: "Dante", minCopies: 1, avgCopies: 1.6666666666666667, maxCopies : 2}')) + results2.contains(Document.parse('{_id: "Homer", minCopies: 10, avgCopies: 10.0, maxCopies : 10}')) + + cleanup: + helper?.drop() + } + + def '$addFields'() { + given: + def helper = getCollectionHelper() + + when: + helper.drop() + helper.insertDocuments(Document.parse('{_id: 0, a: 1}')) + def results = helper.aggregate([addFields(new Field('newField', null))]) + + then: + results == [Document.parse('{_id: 0, a: 1, newField: null}')] + + when: + helper.drop() + helper.insertDocuments(Document.parse('{_id: 0, a: 1}')) + results = helper.aggregate([addFields(new Field('newField', 'hello'))]) + + then: + results == [Document.parse('{_id: 0, a: 1, newField: "hello"}')] + + when: + helper.drop() + helper.insertDocuments(Document.parse('{_id: 0, a: 1}')) + results = helper.aggregate([addFields(new Field('b', '$a'))]) + + then: + results == [Document.parse('{_id: 0, a: 1, b: 1}')] + + when: + helper.drop() + helper.insertDocuments(Document.parse('{_id: 0, a: 1}')) + results = helper.aggregate([addFields(new Field('this', '$$CURRENT'))]) + + then: + results == [Document.parse('{_id: 0, a: 1, this: {_id: 0, a: 1}}')] + + when: + helper.drop() + helper.insertDocuments(Document.parse('{_id: 0, a: 1}')) + results = helper.aggregate([addFields(new Field('myNewField', + new Document('c', 3).append('d', 4)))]) + + then: + results == [Document.parse('{_id: 0, a: 1, myNewField: {c: 3, d: 4}}')] + + when: + helper.drop() + helper.insertDocuments(Document.parse('{_id: 0, a: 1}')) + results = helper.aggregate([addFields(new Field('alt3', new Document('$lt', asList('$a', 3))))]) + + then: + results == [Document.parse('{_id: 0, a: 1, alt3: true}')] + + when: + helper.drop() + helper.insertDocuments(Document.parse('{_id: 0, a: 1}')) + results = helper.aggregate([addFields(new Field('b', 3), new Field('c', 5))]) + + then: + results == [Document.parse('{_id: 0, a: 1, b: 3, c: 5}')] + + when: + helper.drop() + helper.insertDocuments(Document.parse('{_id: 0, a: 1}')) + results = helper.aggregate([addFields(new Field('a', [1, 2, 3]))]) + + then: + results == [Document.parse('{_id: 0, a: [1, 2, 3]}')] + + cleanup: + helper?.drop() + } + + def '$set'() { + expect: + aggregate([set(new Field('c', '$y'))]) == [new Document(a).append('c', 'a'), + new Document(b).append('c', 'b'), + new Document(c).append('c', 'c')] + } + + def '$replaceRoot'() { + given: + def helper = getCollectionHelper() + def results = [] + + when: + helper.drop() + helper.insertDocuments(Document.parse('{_id: 0, a1: {b: 1}, a2: 2}')) + results = helper.aggregate([replaceRoot('$a1')]) + + then: + results == [Document.parse('{b: 1}')] + + when: + helper.drop() + helper.insertDocuments(Document.parse('{_id: 0, a1: {b: {c1: 4, c2: 5}}, a2: 2}')) + results = helper.aggregate([replaceRoot('$a1.b')]) + + then: + results == [Document.parse('{c1: 4, c2: 5}')] + + when: + helper.drop() + helper.insertDocuments(Document.parse('{_id: 0, a1: {b: 1, _id: 7}, a2: 2}')) + results = helper.aggregate([replaceRoot('$a1')]) + + then: + results == [Document.parse('{b: 1, _id: 7}')] + } + + def '$replaceWith'() { + given: + def helper = getCollectionHelper() + def results = [] + + when: + helper.drop() + helper.insertDocuments(Document.parse('{_id: 0, a1: {b: 1}, a2: 2}')) + results = helper.aggregate([replaceWith('$a1')]) + + then: + results == [Document.parse('{b: 1}')] + + when: + helper.drop() + helper.insertDocuments(Document.parse('{_id: 0, a1: {b: {c1: 4, c2: 5}}, a2: 2}')) + results = helper.aggregate([replaceWith('$a1.b')]) + + then: + results == [Document.parse('{c1: 4, c2: 5}')] + + when: + helper.drop() + helper.insertDocuments(Document.parse('{_id: 0, a1: {b: 1, _id: 7}, a2: 2}')) + results = helper.aggregate([replaceWith('$a1')]) + + then: + results == [Document.parse('{b: 1, _id: 7}')] + } + + @IgnoreIf({ serverVersionLessThan(4, 4) }) + def '$unionWith'() { + given: + def coll1Helper = getCollectionHelper(new MongoNamespace(getDatabaseName(), 'coll1')) + def coll2Helper = getCollectionHelper(new MongoNamespace(getDatabaseName(), 'coll2')) + + coll1Helper.drop() + coll2Helper.drop() + + coll1Helper.insertDocuments( + Document.parse('{ "name1" : "almonds" }'), + Document.parse('{ "name1" : "cookies" }')) + + coll2Helper.insertDocuments( + Document.parse('{ "name2" : "cookies" }'), + Document.parse('{ "name2" : "cookies" }'), + Document.parse('{ "name2" : "pecans" }')) + + def pipeline = asList(match(eq('name2', 'cookies')), project(fields(excludeId(), computed('name', '$name2'))), + sort(ascending('name'))) + + when: + def results = coll1Helper.aggregate([project(fields(excludeId(), computed('name', '$name1'))), + unionWith('coll2', pipeline)]) + + then: + results == [ + Document.parse('{ name: "almonds" }'), + Document.parse('{ name: "cookies" }'), + Document.parse('{ name: "cookies" }'), + Document.parse('{ name: "cookies" }') ] + + cleanup: + coll1Helper?.drop() + coll2Helper?.drop() + } + + @IgnoreIf({ serverVersionLessThan(5, 2) }) + def '$setWindowFields'(Bson preSortBy, Object partitionBy, Bson sortBy, WindowOutputField output, List expectedFieldValues) { + given: + ZoneId utc = ZoneId.of(ZoneOffset.UTC.getId()) + getCollectionHelper().drop() + Document[] original = [ + new Document('partitionId', 1) + .append('num1', 1) + .append('num2', -1) + .append('numMissing', 1) + .append('date', LocalDateTime.ofInstant(Instant.ofEpochSecond(1), utc)), + new Document('partitionId', 1) + .append('num1', 2) + .append('num2', -2) + .append('date', LocalDateTime.ofInstant(Instant.ofEpochSecond(2), utc)), + new Document('partitionId', 2) + .append('num1', 3) + .append('num2', -3) + .append('numMissing', 3) + .append('date', LocalDateTime.ofInstant(Instant.ofEpochSecond(3), utc))] + getCollectionHelper().insertDocuments(original) + List stages = [ + setWindowFields(partitionBy, sortBy, output), + sort(ascending('num1')) + ] + if (preSortBy != null) { + stages.add(0, sort(preSortBy)) + } + List actual = aggregate(stages) + List actualFieldValues = actual.stream() + .map { doc -> doc.get('result') } + .collect(toList()) + + expect: + actualFieldValues.size() == expectedFieldValues.size() + for (int i = 0; i < actualFieldValues.size(); i++) { + Object actualV = actualFieldValues.get(i) + Object expectedV = expectedFieldValues.get(i) + if (actualV instanceof Collection && expectedV instanceof Set) { + assert ((Collection) actualV).toSet() == expectedV + } else { + assert actualV == expectedV + } + } + + where: + preSortBy | partitionBy | sortBy | output | expectedFieldValues + null | null | null | WindowOutputFields + .sum('result', '$num1', null) | [6, 6, 6] + null | null | null | WindowOutputFields + .sum('result', '$num1', documents(UNBOUNDED, UNBOUNDED)) | [6, 6, 6] + null | '$partitionId' | ascending('num1') | WindowOutputFields + .sum('result', '$num1', range(0, UNBOUNDED)) | [3, 2, 3] + null | null | ascending('num1') | WindowOutputFields + .sum('result', '$num1', range(CURRENT, Integer.MAX_VALUE)) | [6, 5, 3] + null | null | ascending('num1') | WindowOutputFields + .of(new BsonField('result', new Document('$sum', '$num1') + .append('window', Windows.of( + new Document('range', asList('current', Integer.MAX_VALUE))).toBsonDocument()))) | [6, 5, 3] + null | null | ascending('date') | WindowOutputFields + .avg('result', '$num1', timeRange(-1, 0, MongoTimeUnit.QUARTER)) | [1, 1.5, 2] + null | null | null | WindowOutputFields + .stdDevSamp('result', '$num1', documents(UNBOUNDED, UNBOUNDED)) | [1.0, 1.0, 1.0] + null | null | ascending('num1') | WindowOutputFields + .stdDevPop('result', '$num1', documents(CURRENT, CURRENT)) | [0, 0, 0] + null | null | ascending('num1') | WindowOutputFields + .min('result', '$num1', documents(-1, 0)) | [1, 1, 2] + null | new Document('gid', '$partitionId') | ascending('num1') | WindowOutputFields + .minN('result', '$num1', + new Document('$cond', new Document('if', new Document('$eq', asList('$gid', 1))) + .append('then', 2).append('else', 2)), + documents(-1, 0)) | [ [1].toSet(), [1, 2].toSet(), [3].toSet() ] + null | null | null | WindowOutputFields + .max('result', '$num1', null) | [3, 3, 3] + null | null | ascending('num1') | WindowOutputFields + .maxN('result', '$num1', 2, documents(-1, 0)) | [ [1].toSet(), [1, 2].toSet(), [2, 3].toSet() ] + null | '$partitionId' | null | WindowOutputFields + .count('result', null) | [2, 2, 1] + null | null | ascending('num1') | WindowOutputFields + .derivative('result', '$num2', documents(UNBOUNDED, UNBOUNDED)) | [-1, -1, -1] + null | null | ascending('date') | WindowOutputFields + .timeDerivative('result', '$num2', documents(UNBOUNDED, UNBOUNDED), MongoTimeUnit.MILLISECOND) | [-0.001, -0.001, -0.001] + null | null | ascending('num1') | WindowOutputFields + .integral('result', '$num2', documents(UNBOUNDED, UNBOUNDED)) | [-4, -4, -4] + null | null | ascending('date') | WindowOutputFields + .timeIntegral('result', '$num2', documents(UNBOUNDED, UNBOUNDED), MongoTimeUnit.SECOND) | [-4, -4, -4] + null | null | null | WindowOutputFields + .covarianceSamp('result', '$num1', '$num2', documents(UNBOUNDED, UNBOUNDED)) | [-1.0, -1.0, -1.0] + null | null | ascending('num1') | WindowOutputFields + .covariancePop('result', '$num1', '$num2', documents(CURRENT, CURRENT)) | [0, 0, 0] + null | null | ascending('num1') | WindowOutputFields + .expMovingAvg('result', '$num1', 1) | [1, 2, 3] + null | null | ascending('num1') | WindowOutputFields + .expMovingAvg('result', '$num1', 0.5) | [1.0, 1.5, 2.25] + null | null | descending('num1') | WindowOutputFields + .push('result', '$num1', documents(UNBOUNDED, CURRENT)) | [ [3, 2, 1], [3, 2], [3] ] + null | null | ascending('num1') | WindowOutputFields + .addToSet('result', '$partitionId', documents(UNBOUNDED, -1)) | [ [], [1], [1] ] + null | null | ascending('num1') | WindowOutputFields + .first('result', '$num1', documents(UNBOUNDED, UNBOUNDED)) | [ 1, 1, 1 ] + null | null | descending('num1') | WindowOutputFields + .firstN('result', '$num1', 2, documents(UNBOUNDED, UNBOUNDED)) | [ [3, 2], [3, 2], [3, 2] ] + null | null | ascending('num1') | WindowOutputFields + .last('result', '$num1', documents(UNBOUNDED, UNBOUNDED)) | [ 3, 3, 3 ] + null | null | ascending('num1') | WindowOutputFields + .lastN('result', '$num1', 2, documents(UNBOUNDED, UNBOUNDED)) | [ [2, 3], [2, 3], [2, 3] ] + null | null | ascending('num1') | WindowOutputFields + .shift('result', '$num1', -3, 1) | [ 2, 3, -3 ] + null | null | ascending('num1') | WindowOutputFields + .documentNumber('result') | [ 1, 2, 3 ] + null | null | ascending('partitionId') | WindowOutputFields + .rank('result') | [ 1, 1, 3 ] + null | null | ascending('partitionId') | WindowOutputFields + .denseRank('result') | [ 1, 1, 2 ] + null | null | null | WindowOutputFields + .bottom('result', ascending('num1'), '$num1', null) | [ 3, 3, 3 ] + null | new Document('gid', '$partitionId') | descending('num1') | WindowOutputFields + .bottomN('result', ascending('num1'), '$num1', + new Document('$cond', new Document('if', new Document('$eq', asList('$gid', 1))) + .append('then', 2).append('else', 2)), + null) | [ [1, 2], [1, 2], [3] ] + null | null | descending('num1') | WindowOutputFields + .topN('result', ascending('num1'), '$num1', 2, null) | [ [1, 2], [1, 2], [1, 2] ] + null | null | null | WindowOutputFields + .top('result', ascending('num1'), '$num1', null) | [ 1, 1, 1 ] + ascending('num1') | null | null | WindowOutputFields + .locf('result', '$numMissing') | [ 1, 1, 3 ] + null | null | ascending('num1') | WindowOutputFields + .linearFill('result', '$numMissing') | [ 1, 2, 3 ] + } + + @IgnoreIf({ serverVersionLessThan(5, 0) }) + def '$setWindowFields with multiple output'() { + given: + getCollectionHelper().drop() + Document[] original = [new Document('num', 1)] + getCollectionHelper().insertDocuments(original) + List actual = aggregate([ + setWindowFields(null, null, [ + WindowOutputFields.count('count', null), + WindowOutputFields.max('max', '$num', null)]), + project(fields(excludeId()))]) + + expect: + actual.size() == 1 + actual.get(0) == original[0].append('count', 1).append('max', 1) + } + + @IgnoreIf({ serverVersionLessThan(5, 0) }) + def '$setWindowFields with empty output'() { + given: + getCollectionHelper().drop() + Document[] original = [new Document('num', 1)] + getCollectionHelper().insertDocuments(original) + List actual = aggregate([ + setWindowFields(null, null, []), + project(fields(excludeId()))]) + + expect: + actual.size() == 1 + actual.get(0) == original[0] + } + + @IgnoreIf({ serverVersionLessThan(5, 1) }) + def '$densify'(String field, String partitionByField, Bson densifyStage, List expectedFieldValues) { + given: + getCollectionHelper().drop() + Document[] docs = [ + new Document('partitionId', 1) + .append('num', 1) + .append('date', Instant.ofEpochMilli(BigInteger.TWO.pow(32).longValueExact())), + new Document('partitionId', 1) + .append('num', 3) + .append('date', Instant.ofEpochMilli(BigInteger.TWO.pow(33).longValueExact())), + new Document('partitionId', 2) + .append('num', new BsonDecimal128(new Decimal128(new BigDecimal('4.1')))) + .append('date', Instant.ofEpochMilli(BigInteger.TWO.pow(34).longValueExact()))] + getCollectionHelper().insertDocuments(docs) + Bson sortSpec = partitionByField == null ? ascending(field) : ascending(partitionByField, field) + List actualFieldValues = aggregate([ + densifyStage, + sort(sortSpec), + project(fields(include(field), exclude('_id')))]) + .stream() + .map { doc -> doc.get(field) } + .map { e -> e instanceof Date ? ((Date) e).toInstant() : e } + .collect(toList()) + + expect: + actualFieldValues == expectedFieldValues + + where: + field | partitionByField | densifyStage | expectedFieldValues + 'num' | null | densify(field, fullRangeWithStep(new Decimal128(BigDecimal.ONE))) | + [1, 2, 3, 4, 4.1] + 'num' | null | densify(field, rangeWithStep(-1.0, 5.0, 1.0)) | + [-1, 0, 1, 2, 3, 4, 4.1] + 'num' | null | densify(field, rangeWithStep(BigDecimal.ONE, BigDecimal.ONE, BigDecimal.ONE)) | + [1, 3, 4.1] + 'num' | 'partitionId' | densify(field, fullRangeWithStep(1), densifyOptions().partitionByFields(partitionByField)) | + [1, 2, 3, 4, 1, 2, 3, 4, 4.1] + 'num' | 'partitionId' | densify(field, partitionRangeWithStep(1), densifyOptions().partitionByFields([partitionByField])) | + [1, 2, 3, 4.1] + 'date' | null | densify(field, rangeWithStep(Instant.EPOCH, Instant.ofEpochMilli(BigInteger.TWO.pow(32).longValueExact()) + .plusMillis(1), + // there is a server bug that prevents using `step` larger than 2^31 - 1 in versions before 6.1 + BigInteger.TWO.pow(31).longValueExact() - 1, MongoTimeUnit.MILLISECOND)) | + [Instant.EPOCH, + Instant.ofEpochMilli(BigInteger.TWO.pow(31).longValueExact() - 1), + Instant.ofEpochMilli(BigInteger.TWO.pow(32).longValueExact() - 2), + Instant.ofEpochMilli(BigInteger.TWO.pow(32).longValueExact()), + Instant.ofEpochMilli(BigInteger.TWO.pow(33).longValueExact()), + Instant.ofEpochMilli(BigInteger.TWO.pow(34).longValueExact())] + } + + @IgnoreIf({ serverVersionLessThan(5, 3) }) + def '$fill'(Bson preSortBy, String field, Bson fillStage, List expectedFieldValues) { + given: + getCollectionHelper().drop() + Document[] docs = [ + new Document('partition', new Document('id', 10)) + .append('_id', 1) + .append('field1', 1) + .append('doc', new Document('field2', 1)), + new Document('partition', new Document('id', 10)) + .append('_id', 2) + .append('doc', null), + new Document('partition', new Document('id', 20)) + .append('_id', 3) + .append('field1', 3) + .append('doc', new Document('field2', 3))] + getCollectionHelper().insertDocuments(docs) + String resultField = 'result' + List stages = [ + fillStage, + project(fields(computed(resultField, '$' + field))), + sort(ascending('_id')) + ] + if (preSortBy != null) { + stages.add(0, sort(preSortBy)) + } + List actualFieldValues = aggregate(stages) + .stream() + .map { doc -> doc.get(resultField) } + .collect(toList()) + + expect: + actualFieldValues == expectedFieldValues + + where: + preSortBy| field | fillStage | expectedFieldValues + null | 'doc.field2' | fill( + fillOptions().partitionByFields('p1', 'p2'), + FillOutputField.value(field, '$partition.id')) | + [1, 10, 3] + null | 'doc.field2' | fill( + fillOptions().sortBy(ascending('_id')), + FillOutputField.linear(field), FillOutputField.locf('newField')) | + [1, 2, 3] + null | 'field1' | fill( + // https://jira.mongodb.org/browse/SERVER-67284 prevents specifying partitionByField('partition.id') + fillOptions().partitionBy(new Document('p', '$partition.id')).sortBy(descending('_id')), + FillOutputField.locf('field1')) | + [1, null, 3] + descending('_id') | 'field1' | fill( + fillOptions(), + FillOutputField.locf('field1')) | + [1, 3, 3] + } +} diff --git a/driver-core/src/test/functional/com/mongodb/client/model/AggregatesTest.java b/driver-core/src/test/functional/com/mongodb/client/model/AggregatesTest.java new file mode 100644 index 00000000000..2b1ad7d5b4b --- /dev/null +++ b/driver-core/src/test/functional/com/mongodb/client/model/AggregatesTest.java @@ -0,0 +1,285 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model; + +import com.mongodb.client.model.geojson.Point; +import com.mongodb.client.model.geojson.Position; +import com.mongodb.client.model.mql.MqlValues; +import com.mongodb.lang.Nullable; +import org.bson.BsonDocument; +import org.bson.Document; +import org.bson.conversions.Bson; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.Arguments; +import org.junit.jupiter.params.provider.MethodSource; + +import java.math.RoundingMode; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.stream.Stream; + +import static com.mongodb.ClusterFixture.serverVersionAtLeast; +import static com.mongodb.client.model.Accumulators.median; +import static com.mongodb.client.model.Accumulators.percentile; +import static com.mongodb.client.model.Aggregates.geoNear; +import static com.mongodb.client.model.Aggregates.group; +import static com.mongodb.client.model.Aggregates.unset; +import static com.mongodb.client.model.GeoNearOptions.geoNearOptions; +import static com.mongodb.client.model.Sorts.ascending; +import static com.mongodb.client.model.Windows.Bound.UNBOUNDED; +import static com.mongodb.client.model.Windows.documents; +import static java.util.Arrays.asList; +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.hasSize; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assumptions.assumeTrue; + +public class AggregatesTest extends OperationTest { + + private static Stream groupWithQuantileSource() { + return Stream.of( + Arguments.of(percentile("result", "$x", MqlValues.ofNumberArray(0.95), QuantileMethod.approximate()), asList(3.0), asList(1.0)), + Arguments.of(percentile("result", "$x", MqlValues.ofNumberArray(0.95, 0.3), QuantileMethod.approximate()), asList(3.0, 2.0), asList(1.0, 1.0)), + Arguments.of(median("result", "$x", QuantileMethod.approximate()), 2.0d, 1.0d) + ); + } + + @ParameterizedTest + @MethodSource("groupWithQuantileSource") + public void shouldGroupWithQuantile(final BsonField quantileAccumulator, + final Object expectedGroup1, + final Object expectedGroup2) { + //given + assumeTrue(serverVersionAtLeast(7, 0)); + getCollectionHelper().insertDocuments("[\n" + + " { _id: 1, x: 1, z: false},\n" + + " { _id: 2, x: 2, z: true },\n" + + " { _id: 3, x: 3, z: true }\n" + + "]"); + + //when + List results = getCollectionHelper().aggregate(Collections.singletonList( + group("$z", quantileAccumulator)), DOCUMENT_DECODER); + + //then + assertThat(results, hasSize(2)); + + Object result = results.stream() + .filter(document -> document.get("_id").equals(true)) + .findFirst().map(document -> document.get("result")).get(); + + + assertEquals(expectedGroup1, result); + + result = results.stream() + .filter(document -> document.get("_id").equals(false)) + .findFirst().map(document -> document.get("result")).get(); + + assertEquals(expectedGroup2, result); + } + + private static Stream setWindowFieldWithQuantileSource() { + return Stream.of( + Arguments.of(null, + WindowOutputFields.percentile("result", "$num1", asList(0.1, 0.9), QuantileMethod.approximate(), + documents(UNBOUNDED, UNBOUNDED)), + asList(asList(1.0, 3.0), asList(1.0, 3.0), asList(1.0, 3.0))), + Arguments.of("$partitionId", + WindowOutputFields.percentile("result", "$num1", asList(0.1, 0.9), QuantileMethod.approximate(), null), + asList(asList(1.0, 2.0), asList(1.0, 2.0), asList(3.0, 3.0))), + Arguments.of(null, + WindowOutputFields.median("result", "$num1", QuantileMethod.approximate(), documents(UNBOUNDED, UNBOUNDED)), + asList(2.0, 2.0, 2.0)), + Arguments.of("$partitionId", + WindowOutputFields.median("result", "$num1", QuantileMethod.approximate(), null), + asList(1.0, 1.0, 3.0)) + ); + } + + @ParameterizedTest + @MethodSource("setWindowFieldWithQuantileSource") + public void shouldSetWindowFieldWithQuantile(@Nullable final Object partitionBy, + final WindowOutputField output, + final List expectedFieldValues) { + //given + assumeTrue(serverVersionAtLeast(7, 0)); + Document[] original = new Document[]{ + new Document("partitionId", 1).append("num1", 1), + new Document("partitionId", 1).append("num1", 2), + new Document("partitionId", 2).append("num1", 3) + }; + getCollectionHelper().insertDocuments(original); + + //when + List actualFieldValues = aggregateWithWindowFields(partitionBy, output, ascending("num1")); + + //then + Assertions.assertEquals(actualFieldValues, expectedFieldValues); + } + + @Test + public void testUnset() { + getCollectionHelper().insertDocuments("[\n" + + " { _id: 1, title: 'Antelope Antics', author: { last:'An', first: 'Auntie' } },\n" + + " { _id: 2, title: 'Bees Babble', author: { last:'Bumble', first: 'Bee' } }\n" + + "]"); + + assertPipeline( + "{ $unset: ['title', 'author.first'] }", + unset("title", "author.first")); + + List pipeline = assertPipeline( + "{ $unset: 'author.first' }", + unset("author.first")); + + assertResults(pipeline, "[\n" + + " { _id: 1, title: 'Antelope Antics', author: { last:'An' } },\n" + + " { _id: 2, title: 'Bees Babble', author: { last:'Bumble' } }\n" + + "]"); + + assertPipeline( + "{ $unset: ['title', 'author.first'] }", + unset(asList("title", "author.first"))); + + assertPipeline( + "{ $unset: 'author.first' }", + unset(asList("author.first"))); + } + + @Test + public void testGeoNear() { + getCollectionHelper().insertDocuments("[\n" + + " {\n" + + " _id: 1,\n" + + " name: 'Central Park',\n" + + " location: { type: 'Point', coordinates: [ -73.97, 40.77 ] },\n" + + " category: 'Parks'\n" + + " },\n" + + " {\n" + + " _id: 2,\n" + + " name: 'Sara D. Roosevelt Park',\n" + + " location: { type: 'Point', coordinates: [ -73.9928, 40.7193 ] },\n" + + " category: 'Parks'\n" + + " },\n" + + " {\n" + + " _id: 3,\n" + + " name: 'Polo Grounds',\n" + + " location: { type: 'Point', coordinates: [ -73.9375, 40.8303 ] },\n" + + " category: 'Stadiums'\n" + + " }\n" + + "]"); + getCollectionHelper().createIndex(BsonDocument.parse("{ location: '2dsphere' }")); + + assertPipeline("{\n" + + " $geoNear: {\n" + + " near: { type: 'Point', coordinates: [ -73.99279 , 40.719296 ] },\n" + + " distanceField: 'dist.calculated'\n" + + " }\n" + + "}", + geoNear( + new Point(new Position(-73.99279, 40.719296)), + "dist.calculated" + )); + + List pipeline = assertPipeline("{\n" + + " $geoNear: {\n" + + " near: { type: 'Point', coordinates: [ -73.99279 , 40.719296 ] },\n" + + " distanceField: 'dist.calculated',\n" + + " minDistance: 0,\n" + + " maxDistance: 2,\n" + + " query: { category: 'Parks' },\n" + + " includeLocs: 'dist.location',\n" + + " spherical: true,\n" + + " key: 'location',\n" + + " distanceMultiplier: 10.0\n" + + " }\n" + + "}", + geoNear( + new Point(new Position(-73.99279, 40.719296)), + "dist.calculated", + geoNearOptions() + .minDistance(0) + .maxDistance(2) + .query(new Document("category", "Parks")) + .includeLocs("dist.location") + .spherical() + .key("location") + .distanceMultiplier(10.0) + )); + + assertResults(pipeline, "" + + "[{\n" + + " '_id': 2,\n" + + " 'name' : 'Sara D. Roosevelt Park',\n" + + " 'category' : 'Parks',\n" + + " 'location' : {\n" + + " 'type' : 'Point',\n" + + " 'coordinates' : [ -73.9928, 40.7193 ]\n" + + " },\n" + + " 'dist' : {\n" + + " 'calculated' : 9.5399,\n" + + " 'location' : {\n" + + " 'type' : 'Point',\n" + + " 'coordinates' : [ -73.9928, 40.7193 ]\n" + + " }\n" + + " }\n" + + "}]", 4, RoundingMode.FLOOR); + } + + @Test + public void testDocuments() { + assumeTrue(serverVersionAtLeast(5, 1)); + Bson stage = Aggregates.documents(asList( + Document.parse("{a: 1, b: {$add: [1, 1]} }"), + BsonDocument.parse("{a: 3, b: 4}"))); + assertPipeline( + "{$documents: [{a: 1, b: {$add: [1, 1]}}, {a: 3, b: 4}]}", + stage); + + List pipeline = Arrays.asList(stage); + getCollectionHelper().aggregateDb(pipeline); + + assertEquals( + parseToList("[{a: 1, b: 2}, {a: 3, b: 4}]"), + getCollectionHelper().aggregateDb(pipeline)); + + // accepts lists of Documents and BsonDocuments + List documents = Arrays.asList(BsonDocument.parse("{a: 1, b: 2}")); + assertPipeline("{$documents: [{a: 1, b: 2}]}", Aggregates.documents(documents)); + List bsonDocuments = Arrays.asList(BsonDocument.parse("{a: 1, b: 2}")); + assertPipeline("{$documents: [{a: 1, b: 2}]}", Aggregates.documents(bsonDocuments)); + } + + @Test + public void testDocumentsLookup() { + assumeTrue(serverVersionAtLeast(5, 1)); + + getCollectionHelper().insertDocuments("[{_id: 1, a: 8}, {_id: 2, a: 9}]"); + Bson documentsStage = Aggregates.documents(asList(Document.parse("{a: 5}"))); + + Bson lookupStage = Aggregates.lookup(null, Arrays.asList(documentsStage), "added"); + assertPipeline( + "{'$lookup': {'pipeline': [{'$documents': [{'a': 5}]}], 'as': 'added'}}", + lookupStage); + assertEquals( + parseToList("[{_id:1, a:8, added: [{a: 5}]}, {_id:2, a:9, added: [{a: 5}]}]"), + getCollectionHelper().aggregate(Arrays.asList(lookupStage))); + } +} diff --git a/driver-core/src/test/functional/com/mongodb/client/model/ArrayUpdatesFunctionalSpecification.groovy b/driver-core/src/test/functional/com/mongodb/client/model/ArrayUpdatesFunctionalSpecification.groovy new file mode 100644 index 00000000000..55ceef36fac --- /dev/null +++ b/driver-core/src/test/functional/com/mongodb/client/model/ArrayUpdatesFunctionalSpecification.groovy @@ -0,0 +1,159 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model + +import com.mongodb.OperationFunctionalSpecification +import org.bson.Document +import org.bson.conversions.Bson + +import static com.mongodb.client.model.Updates.addEachToSet +import static com.mongodb.client.model.Updates.addToSet +import static com.mongodb.client.model.Updates.combine +import static com.mongodb.client.model.Updates.popFirst +import static com.mongodb.client.model.Updates.popLast +import static com.mongodb.client.model.Updates.pull +import static com.mongodb.client.model.Updates.pullAll +import static com.mongodb.client.model.Updates.pullByFilter +import static com.mongodb.client.model.Updates.push +import static com.mongodb.client.model.Updates.pushEach +import static com.mongodb.client.model.Updates.unset + +class ArrayUpdatesFunctionalSpecification extends OperationFunctionalSpecification { + def a = new Document('_id', 1).append('x', [1, 2, 3]) + + + def setup() { + getCollectionHelper().insertDocuments(a) + } + + def find() { + find(new Document('_id', 1)) + } + + def find(Bson filter) { + getCollectionHelper().find(filter) + } + + def updateOne(Bson update) { + getCollectionHelper().updateOne(new Document('_id', 1), update) + } + + def updateOne(Bson filter, Bson update, boolean isUpsert) { + getCollectionHelper().updateOne(filter, update, isUpsert) + } + + def 'add to set'() { + when: + updateOne(addToSet('x', 4)) + + then: + find() == [new Document('_id', 1).append('x', [1, 2, 3, 4])] + + when: + updateOne(addToSet('x', 4)) + + then: + find() == [new Document('_id', 1).append('x', [1, 2, 3, 4])] + + when: + updateOne(addEachToSet('x', [4, 5, 6])) + + then: + find() == [new Document('_id', 1).append('x', [1, 2, 3, 4, 5, 6])] + } + + def 'push'() { + when: + updateOne(push('x', 4)) + + then: + find() == [new Document('_id', 1).append('x', [1, 2, 3, 4])] + + when: + updateOne(push('x', 4)) + + then: + find() == [new Document('_id', 1).append('x', [1, 2, 3, 4, 4])] + } + + def 'push with each'() { + when: + updateOne(pushEach('x', [4, 4, 4, 5, 6], new PushOptions())) + + then: + find() == [new Document('_id', 1).append('x', [1, 2, 3, 4, 4, 4, 5, 6])] + + when: + updateOne(pushEach('x', [4, 5, 6], new PushOptions().position(0).slice(5))) + + then: + find() == [new Document('_id', 1).append('x', [4, 5, 6, 1, 2])] + + when: + updateOne(pushEach('x', [], new PushOptions().sort(-1))) + + then: + find() == [new Document('_id', 1).append('x', [6, 5, 4, 2, 1])] + + when: + updateOne(combine(unset('x'), pushEach('scores', [new Document('score', 89), new Document('score', 65)], + new PushOptions().sortDocument(new Document('score', 1))))) + + then: + find() == [new Document('_id', 1).append('scores', [new Document('score', 65), new Document('score', 89)])] + } + + def 'pull'() { + when: + updateOne(pull('x', 1)) + + then: + find() == [new Document('_id', 1).append('x', [2, 3])] + } + + def 'pullByFilter'() { + when: + updateOne(pullByFilter(Filters.gt('x', 1))) + + then: + find() == [new Document('_id', 1).append('x', [1])] + } + + def 'pullAll'() { + when: + updateOne(pullAll('x', [2, 3])) + + then: + find() == [new Document('_id', 1).append('x', [1])] + } + + def 'pop first'() { + when: + updateOne(popFirst('x')) + + then: + find() == [new Document('_id', 1).append('x', [2, 3])] + } + + def 'pop last'() { + when: + updateOne(popLast('x')) + + then: + find() == [new Document('_id', 1).append('x', [1, 2])] + } +} diff --git a/driver-core/src/test/functional/com/mongodb/client/model/BitwiseUpdatesFunctionalSpecification.groovy b/driver-core/src/test/functional/com/mongodb/client/model/BitwiseUpdatesFunctionalSpecification.groovy new file mode 100644 index 00000000000..2bb033b23be --- /dev/null +++ b/driver-core/src/test/functional/com/mongodb/client/model/BitwiseUpdatesFunctionalSpecification.groovy @@ -0,0 +1,96 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model + +import com.mongodb.OperationFunctionalSpecification +import org.bson.Document +import org.bson.conversions.Bson + +import static com.mongodb.client.model.Updates.bitwiseAnd +import static com.mongodb.client.model.Updates.bitwiseOr +import static com.mongodb.client.model.Updates.bitwiseXor + +class BitwiseUpdatesFunctionalSpecification extends OperationFunctionalSpecification { + private static final long LONG_MASK = 0x0fffffffffffffff + private static final int INT_MASK = 0x0ffffffff + private static final int NUM = 13 + + def a = new Document('_id', 1).append('x', NUM) + + + def setup() { + getCollectionHelper().insertDocuments(a) + } + + def find() { + getCollectionHelper().find(new Document('_id', 1)) + } + + + def updateOne(Bson update) { + getCollectionHelper().updateOne(new Document('_id', 1), update) + } + + def 'integer bitwiseAnd'() { + when: + updateOne(bitwiseAnd('x', INT_MASK)) + + then: + find() == [new Document('_id', 1).append('x', NUM & INT_MASK)] + } + + def 'integer bitwiseOr'() { + when: + updateOne(bitwiseOr('x', INT_MASK)) + + then: + find() == [new Document('_id', 1).append('x', NUM | INT_MASK)] + } + + def 'integer bitwiseXor'() { + when: + updateOne(bitwiseXor('x', INT_MASK)) + + then: + find() == [new Document('_id', 1).append('x', NUM ^ INT_MASK)] + } + + def 'long bitwiseAnd'() { + when: + updateOne(bitwiseAnd('x', LONG_MASK)) + + then: + find() == [new Document('_id', 1).append('x', NUM & LONG_MASK)] + } + + def 'long bitwiseOr'() { + when: + updateOne(bitwiseOr('x', LONG_MASK)) + + then: + find() == [new Document('_id', 1).append('x', NUM | LONG_MASK)] + } + + def 'long bitwiseXor'() { + when: + updateOne(bitwiseXor('x', LONG_MASK)) + + then: + find() == [new Document('_id', 1).append('x', NUM ^ LONG_MASK)] + } + +} diff --git a/driver-core/src/test/functional/com/mongodb/client/model/FieldSpecification.groovy b/driver-core/src/test/functional/com/mongodb/client/model/FieldSpecification.groovy new file mode 100644 index 00000000000..bd029c9e491 --- /dev/null +++ b/driver-core/src/test/functional/com/mongodb/client/model/FieldSpecification.groovy @@ -0,0 +1,47 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model + +import spock.lang.Specification + +class FieldSpecification extends Specification { + def 'should validate name'() { + when: + new Field(null, [1, 2, 3]) + + then: + thrown(IllegalArgumentException) + } + + def 'should accept null values'() { + when: + def field = new Field('name', null) + + then: + field.getName() == 'name' + field.getValue() == null + } + + def 'should accept properties'() { + when: + def field = new Field('name', [1, 2, 3]) + + then: + field.getName() == 'name' + field.getValue() == [1, 2, 3] + } +} diff --git a/driver-core/src/test/functional/com/mongodb/client/model/FiltersFunctionalSpecification.groovy b/driver-core/src/test/functional/com/mongodb/client/model/FiltersFunctionalSpecification.groovy new file mode 100644 index 00000000000..ae24f4f7d6d --- /dev/null +++ b/driver-core/src/test/functional/com/mongodb/client/model/FiltersFunctionalSpecification.groovy @@ -0,0 +1,341 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model + +import com.mongodb.MongoQueryException +import com.mongodb.OperationFunctionalSpecification +import org.bson.BsonType +import org.bson.Document +import org.bson.conversions.Bson + +import java.util.regex.Pattern + +import static com.mongodb.client.model.Filters.all +import static com.mongodb.client.model.Filters.and +import static com.mongodb.client.model.Filters.bitsAllClear +import static com.mongodb.client.model.Filters.bitsAllSet +import static com.mongodb.client.model.Filters.bitsAnyClear +import static com.mongodb.client.model.Filters.bitsAnySet +import static com.mongodb.client.model.Filters.elemMatch +import static com.mongodb.client.model.Filters.empty +import static com.mongodb.client.model.Filters.eq +import static com.mongodb.client.model.Filters.exists +import static com.mongodb.client.model.Filters.expr +import static com.mongodb.client.model.Filters.gt +import static com.mongodb.client.model.Filters.gte +import static com.mongodb.client.model.Filters.jsonSchema +import static com.mongodb.client.model.Filters.lt +import static com.mongodb.client.model.Filters.lte +import static com.mongodb.client.model.Filters.mod +import static com.mongodb.client.model.Filters.ne +import static com.mongodb.client.model.Filters.nin +import static com.mongodb.client.model.Filters.nor +import static com.mongodb.client.model.Filters.not +import static com.mongodb.client.model.Filters.or +import static com.mongodb.client.model.Filters.regex +import static com.mongodb.client.model.Filters.size +import static com.mongodb.client.model.Filters.text +import static com.mongodb.client.model.Filters.type +import static com.mongodb.client.model.Filters.where + +class FiltersFunctionalSpecification extends OperationFunctionalSpecification { + def a = new Document('_id', 1).append('x', 1) + .append('y', 'a') + .append('a', [1, 2, 3]) + .append('a1', [new Document('c', 1).append('d', 2), new Document('c', 2).append('d', 3)]) + + def b = new Document('_id', 2).append('x', 2) + .append('y', 'b') + .append('a', [3, 4, 5, 6]) + .append('a1', [new Document('c', 2).append('d', 3), new Document('c', 3).append('d', 4)]) + + def c = new Document('_id', 3).append('x', 3) + .append('y', 'c') + .append('z', true) + + def setup() { + getCollectionHelper().insertDocuments(a, b, c) + } + + def 'find'(Bson filter) { + getCollectionHelper().find(filter, new Document('_id', 1)) // sort by _id + } + + def 'eq'() { + expect: + find(eq('x', 1)) == [a] + find(eq('_id', 2)) == [b] + find(eq(2)) == [b] + } + + def '$ne'() { + expect: + find(ne('x', 1)) == [b, c] + } + + def '$not'() { + expect: + find(not(eq('x', 1))) == [b, c] + find(not(gt('x', 1))) == [a] + find(not(regex('y', 'a.*'))) == [b, c] + + when: + def dbref = Document.parse('{$ref: "1", $id: "1"}') + def dbrefDoc = new Document('_id', 4).append('dbref', dbref) + getCollectionHelper().insertDocuments(dbrefDoc) + + then: + find(not(eq('dbref', dbref))) == [a, b, c] + + when: + getCollectionHelper().deleteOne(dbrefDoc) + dbref.put('$db', '1') + dbrefDoc.put('dbref', dbref) + getCollectionHelper().insertDocuments(dbrefDoc) + + then: + find(not(eq('dbref', dbref))) == [a, b, c] + + when: + def subDoc = Document.parse('{x: 1, b: 1}') + getCollectionHelper().insertDocuments(new Document('subDoc', subDoc)) + + then: + find(not(eq('subDoc', subDoc))) == [a, b, c, dbrefDoc] + + when: + find(not(and(eq('x', 1), eq('x', 1)))) + + then: + thrown MongoQueryException + } + + def '$nor'() { + expect: + find(nor(eq('x', 1))) == [b, c] + find(nor(eq('x', 1), eq('x', 2))) == [c] + find(nor(and(eq('x', 1), eq('y', 'b')))) == [a, b, c] + } + + def '$gt'() { + expect: + find(gt('x', 1)) == [b, c] + } + + def '$lt'() { + expect: + find(lt('x', 3)) == [a, b] + } + + def '$gte'() { + expect: + find(gte('x', 2)) == [b, c] + } + + def '$lte'() { + expect: + find(lte('x', 2)) == [a, b] + } + + def '$exists'() { + expect: + find(exists('z')) == [c] + find(exists('z', false)) == [a, b] + } + + def '$or'() { + expect: + find(or([eq('x', 1)])) == [a] + find(or([eq('x', 1), eq('y', 'b')])) == [a, b] + } + + def 'and'() { + expect: + find(and([eq('x', 1)])) == [a] + find(and([eq('x', 1), eq('y', 'a')])) == [a] + } + + def 'and should duplicate clashing keys'() { + expect: + find(and([eq('x', 1), eq('x', 1)])) == [a] + } + + def 'and should flatten multiple operators for the same key'() { + expect: + find(and([gte('x', 1), lte('x', 2)])) == [a, b] + } + + def 'and should flatten nested'() { + expect: + find(and([and([eq('x', 3), eq('y', 'c')]), eq('z', true)])) == [c] + find(and([and([eq('x', 3), eq('x', 3)]), eq('z', true)])) == [c] + find(and([gt('x', 1), gt('y', 'a')])) == [b, c] + find(and([lt('x', 4), lt('x', 3)])) == [a, b] + } + + def 'explicit $and when using $not'() { + expect: + find(and([lt('x', 3), not(lt('x', 1))])) == [a, b] + find(and([lt('x', 5), gt('x', 0), not(gt('x', 2))])) == [a, b] + find(and([not(lt('x', 2)), lt('x', 4), not(gt('x', 2))])) == [b] + } + + def 'should render $all'() { + expect: + find(all('a', [1, 2])) == [a] + } + + def 'should render $elemMatch'() { + expect: + find(elemMatch('a', new Document('$gte', 2).append('$lte', 2))) == [a] + find(elemMatch('a1', and(eq('c', 1), gte('d', 2)))) == [a] + find(elemMatch('a1', and(eq('c', 2), eq('d', 3)))) == [a, b] + } + + def 'should render $in'() { + expect: + find(Filters.in('a', [0, 1, 2])) == [a] + } + + def 'should render $nin'() { + expect: + find(nin('a', [1, 2])) == [b, c] + } + + def 'should render $mod'() { + expect: + find(mod('x', 2, 0)) == [b] + } + + def 'should render $size'() { + expect: + find(size('a', 4)) == [b] + } + + def 'should render $bitsAllClear'() { + when: + def bitDoc = Document.parse('{_id: 1, bits: 20}') + getCollectionHelper().drop() + getCollectionHelper().insertDocuments(bitDoc) + + then: + find(bitsAllClear('bits', 35)) == [bitDoc] + } + + def 'should render $bitsAllSet'() { + when: + def bitDoc = Document.parse('{_id: 1, bits: 54}') + getCollectionHelper().drop() + getCollectionHelper().insertDocuments(bitDoc) + + then: + find(bitsAllSet('bits', 50)) == [bitDoc] + } + + def 'should render $bitsAnyClear'() { + when: + def bitDoc = Document.parse('{_id: 1, bits: 50}') + getCollectionHelper().drop() + getCollectionHelper().insertDocuments(bitDoc) + + then: + find(bitsAnyClear('bits', 20)) == [bitDoc] + } + + def 'should render $bitsAnySet'() { + when: + def bitDoc = Document.parse('{_id: 1, bits: 20}') + getCollectionHelper().drop() + getCollectionHelper().insertDocuments(bitDoc) + + then: + find(bitsAnySet('bits', 50)) == [bitDoc] + } + + def 'should render $type'() { + expect: + find(type('x', BsonType.INT32)) == [a, b, c] + find(type('x', BsonType.ARRAY)) == [] + } + + def 'should render $type with a string type representation'() { + expect: + find(type('x', 'number')) == [a, b, c] + find(type('x', 'array')) == [] + } + + @SuppressWarnings('deprecated') + def 'should render $text'() { + given: + getCollectionHelper().createIndex(new Document('y', 'text')) + + when: + def textDocument = new Document('_id', 4).append('y', 'mongoDB for GIANT ideas') + collectionHelper.insertDocuments(textDocument) + + then: + find(text('GIANT')) == [textDocument] + find(text('GIANT', new TextSearchOptions().language('english'))) == [textDocument] + } + + def 'should render $text with 3.2 options'() { + given: + collectionHelper.drop() + getCollectionHelper().createIndex(new Document('desc', 'text'), 'portuguese') + + when: + def textDocument = new Document('_id', 1).append('desc', 'mongodb para idéias GIGANTES') + collectionHelper.insertDocuments(textDocument) + + then: + find(text('idéias')) == [textDocument] + find(text('ideias', new TextSearchOptions())) == [textDocument] + find(text('ideias', new TextSearchOptions().caseSensitive(false).diacriticSensitive(false))) == [textDocument] + find(text('IDéIAS', new TextSearchOptions().caseSensitive(false).diacriticSensitive(true))) == [textDocument] + find(text('ideias', new TextSearchOptions().caseSensitive(true).diacriticSensitive(true))) == [] + find(text('idéias', new TextSearchOptions().language('english'))) == [] + } + + + def 'should render $regex'() { + expect: + find(regex('y', 'a.*')) == [a] + find(regex('y', 'a.*', 'si')) == [a] + find(regex('y', Pattern.compile('a.*'))) == [a] + } + + def 'should render $where'() { + expect: + find(where('Array.isArray(this.a)')) == [a, b] + } + + def '$expr'() { + expect: + find(expr(Document.parse('{ $eq: [ "$x" , 3 ] } '))) == [c] + } + + + def '$jsonSchema'() { + expect: + find(jsonSchema(Document.parse('{ bsonType : "object", properties: { x : {type : "number", minimum : 2} } } '))) == [b, c] + } + + def 'empty matches everything'() { + expect: + find(empty()) == [a, b, c] + } +} diff --git a/driver-core/src/test/functional/com/mongodb/client/model/GeoFiltersFunctionalSpecification.groovy b/driver-core/src/test/functional/com/mongodb/client/model/GeoFiltersFunctionalSpecification.groovy new file mode 100644 index 00000000000..ccfcf3712fd --- /dev/null +++ b/driver-core/src/test/functional/com/mongodb/client/model/GeoFiltersFunctionalSpecification.groovy @@ -0,0 +1,73 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model + +import com.mongodb.OperationFunctionalSpecification +import org.bson.Document +import org.bson.conversions.Bson + +import static com.mongodb.client.model.Filters.geoWithinBox +import static com.mongodb.client.model.Filters.geoWithinCenter +import static com.mongodb.client.model.Filters.geoWithinCenterSphere +import static com.mongodb.client.model.Filters.geoWithinPolygon +import static com.mongodb.client.model.Filters.near +import static com.mongodb.client.model.Filters.nearSphere + +class GeoFiltersFunctionalSpecification extends OperationFunctionalSpecification { + def firstPoint = new Document('_id', 1).append('geo', [1d, 1d]) + def secondPoint = new Document('_id', 2).append('geo', [45d, 2d]) + def thirdPoint = new Document('_id', 3).append('geo', [3d, 3d]) + + def setup() { + getCollectionHelper().createIndex(new Document('geo', '2d')) + getCollectionHelper().insertDocuments(firstPoint, secondPoint, thirdPoint) + } + + def 'find'(Bson filter) { + getCollectionHelper().find(filter, new Document('_id', 1)) // sort by _id + } + + def '$near'() { + expect: + find(near('geo', 1.01d, 1.01d, 0.1d, 0.0d)) == [firstPoint] + } + + def '$nearSphere'() { + expect: + find(nearSphere('geo', 1.01d, 1.01d, 0.1d, 0.0d)) == [firstPoint, thirdPoint] + } + + def '$geoWithin $box'() { + expect: + find(geoWithinBox('geo', 0d, 0d, 4d, 4d)) == [firstPoint, thirdPoint] + } + + def '$geoWithin $polygon'() { + expect: + find(geoWithinPolygon('geo', [[0d, 0d], [0d, 4d], [4d, 4d], [4d, 0d]])) == [firstPoint, thirdPoint] + } + + def '$geoWithin $center'() { + expect: + find(geoWithinCenter('geo', 2d, 2d, 4d)) == [firstPoint, thirdPoint] + } + + def '$geoWithin $centerSphere'() { + expect: + find(geoWithinCenterSphere('geo', 2d, 2d, 4d)) == [firstPoint, secondPoint, thirdPoint] + } +} diff --git a/driver-core/src/test/functional/com/mongodb/client/model/GeoJsonFiltersFunctionalSpecification.groovy b/driver-core/src/test/functional/com/mongodb/client/model/GeoJsonFiltersFunctionalSpecification.groovy new file mode 100644 index 00000000000..6fb0cc9df95 --- /dev/null +++ b/driver-core/src/test/functional/com/mongodb/client/model/GeoJsonFiltersFunctionalSpecification.groovy @@ -0,0 +1,77 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model + +import com.mongodb.OperationFunctionalSpecification +import com.mongodb.client.model.geojson.Point +import com.mongodb.client.model.geojson.Polygon +import com.mongodb.client.model.geojson.Position +import org.bson.Document +import org.bson.conversions.Bson + +import static com.mongodb.client.model.Filters.geoIntersects +import static com.mongodb.client.model.Filters.geoWithin +import static com.mongodb.client.model.Filters.near +import static com.mongodb.client.model.Filters.nearSphere +import static com.mongodb.client.model.geojson.NamedCoordinateReferenceSystem.CRS_84 +import static com.mongodb.client.model.geojson.NamedCoordinateReferenceSystem.EPSG_4326 + +class GeoJsonFiltersFunctionalSpecification extends OperationFunctionalSpecification { + def firstPoint = new Document('_id', 1).append('geo', Document.parse(new Point(CRS_84, new Position(1d, 1d)).toJson())) + def secondPoint = new Document('_id', 2).append('geo', Document.parse(new Point(EPSG_4326, new Position(2d, 2d)).toJson())) + def thirdPoint = new Document('_id', 3).append('geo', Document.parse(new Point(new Position(3d, 3d)).toJson())) + def firstPolygon = new Document('_id', 4).append('geo', Document.parse(new Polygon([new Position(2d, 2d), new Position(6d, 2d), + new Position(6d, 6d), new Position(2d, 6d), + new Position(2d, 2d)]).toJson())) + + def setup() { + getCollectionHelper().createIndex(new Document('geo', '2dsphere')) + getCollectionHelper().insertDocuments(firstPoint, secondPoint, thirdPoint, firstPolygon) + } + + def 'find'(Bson filter) { + getCollectionHelper().find(filter, new Document('_id', 1)) // sort by _id + } + + def '$geoWithin'() { + given: + def polygon = new Polygon([new Position(0d, 0d), new Position(4d, 0d), new Position(4d, 4d), new Position(0d, 4d), + new Position(0d, 0d)]) + + expect: + find(geoWithin('geo', polygon)) == [firstPoint, secondPoint, thirdPoint] + } + + def '$geoIntersects'() { + given: + def polygon = new Polygon([new Position(0d, 0d), new Position(4d, 0d), new Position(4d, 4d), new Position(0d, 4d), + new Position(0d, 0d)]) + + expect: + find(geoIntersects('geo', polygon)) == [firstPoint, secondPoint, thirdPoint, firstPolygon] + } + + def '$near'() { + expect: + find(near('geo', new Point(new Position(1.01d, 1.01d)), 10000d, null)) == [firstPoint] + } + + def '$nearSphere'() { + expect: + find(nearSphere('geo', new Point(new Position(1.01d, 1.01d)), 10000d, null)) == [firstPoint] + } +} diff --git a/driver-core/src/test/functional/com/mongodb/client/model/IndexesFunctionalSpecification.groovy b/driver-core/src/test/functional/com/mongodb/client/model/IndexesFunctionalSpecification.groovy new file mode 100644 index 00000000000..2058d2cd197 --- /dev/null +++ b/driver-core/src/test/functional/com/mongodb/client/model/IndexesFunctionalSpecification.groovy @@ -0,0 +1,137 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model + +import com.mongodb.OperationFunctionalSpecification + +import static com.mongodb.client.model.Indexes.ascending +import static com.mongodb.client.model.Indexes.compoundIndex +import static com.mongodb.client.model.Indexes.descending +import static com.mongodb.client.model.Indexes.geo2d +import static com.mongodb.client.model.Indexes.geo2dsphere +import static com.mongodb.client.model.Indexes.hashed +import static com.mongodb.client.model.Indexes.text +import static org.bson.BsonDocument.parse + +class IndexesFunctionalSpecification extends OperationFunctionalSpecification { + + def 'ascending'() { + when: + getCollectionHelper().createIndex(ascending('x')) + + then: + getCollectionHelper().listIndexes()*.get('key').contains(parse('{x : 1}')) + + when: + getCollectionHelper().createIndex(ascending('x', 'y')) + + then: + getCollectionHelper().listIndexes()*.get('key').contains(parse('{x : 1, y: 1}')) + + when: + getCollectionHelper().createIndex(ascending(['a', 'b'])) + + then: + getCollectionHelper().listIndexes()*.get('key').contains(parse('{a : 1, b: 1}')) + } + + def 'descending'() { + when: + getCollectionHelper().createIndex(descending('x')) + + then: + getCollectionHelper().listIndexes()*.get('key').contains(parse('{x : -1}')) + + when: + getCollectionHelper().createIndex(descending('x', 'y')) + + then: + getCollectionHelper().listIndexes()*.get('key').contains(parse('{x : -1, y: -1}')) + + when: + getCollectionHelper().createIndex(descending(['a', 'b'])) + + then: + getCollectionHelper().listIndexes()*.get('key').contains(parse('{a : -1, b: -1}')) + } + + def 'geo2dsphere'() { + when: + getCollectionHelper().createIndex(geo2dsphere('x')) + + then: + getCollectionHelper().listIndexes()*.get('key').contains(parse('{x : "2dsphere"}')) + + when: + getCollectionHelper().createIndex(geo2dsphere('x', 'y')) + + then: + getCollectionHelper().listIndexes()*.get('key').contains(parse('{x : "2dsphere", y: "2dsphere"}')) + + when: + getCollectionHelper().createIndex(geo2dsphere(['a', 'b'])) + + then: + getCollectionHelper().listIndexes()*.get('key').contains(parse('{a : "2dsphere", b: "2dsphere"}')) + } + + def 'geo2d'() { + when: + getCollectionHelper().createIndex(geo2d('x')) + + then: + getCollectionHelper().listIndexes()*.get('key').contains(parse('{x : "2d"}')) + } + + def 'text helper'() { + when: + getCollectionHelper().createIndex(text('x')) + + then: + getCollectionHelper().listIndexes()*.get('key').contains(parse('{_fts: "text", _ftsx: 1}')) + } + + def 'text wildcard'() { + when: + getCollectionHelper().createIndex(text()) + + then: + getCollectionHelper().listIndexes()*.get('key').contains(parse('{_fts: "text", _ftsx: 1}')) + } + + def 'hashed'() { + when: + getCollectionHelper().createIndex(hashed('x')) + + then: + getCollectionHelper().listIndexes()*.get('key').contains(parse('{x : "hashed"}')) + } + + def 'compoundIndex'() { + when: + getCollectionHelper().createIndex(compoundIndex(ascending('a'), descending('b'))) + + then: + getCollectionHelper().listIndexes()*.get('key').contains(parse('{a : 1, b : -1}')) + + when: + getCollectionHelper().createIndex(compoundIndex([ascending('x'), descending('y')])) + + then: + getCollectionHelper().listIndexes()*.get('key').contains(parse('{x : 1, y : -1}')) + } +} diff --git a/driver-core/src/test/functional/com/mongodb/client/model/OperationTest.java b/driver-core/src/test/functional/com/mongodb/client/model/OperationTest.java new file mode 100644 index 00000000000..aa4e5cbdf23 --- /dev/null +++ b/driver-core/src/test/functional/com/mongodb/client/model/OperationTest.java @@ -0,0 +1,188 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model; + +import com.mongodb.ClusterFixture; +import com.mongodb.MongoNamespace; +import com.mongodb.async.FutureResultCallback; +import com.mongodb.client.test.CollectionHelper; +import com.mongodb.internal.connection.ServerHelper; +import com.mongodb.lang.Nullable; +import org.bson.BsonArray; +import org.bson.BsonDocument; +import org.bson.BsonDouble; +import org.bson.BsonValue; +import org.bson.Document; +import org.bson.codecs.BsonDocumentCodec; +import org.bson.codecs.DecoderContext; +import org.bson.codecs.DocumentCodec; +import org.bson.conversions.Bson; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; + +import java.math.BigDecimal; +import java.math.RoundingMode; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.concurrent.TimeUnit; +import java.util.function.Consumer; +import java.util.stream.Collectors; + +import static com.mongodb.ClusterFixture.TIMEOUT; +import static com.mongodb.ClusterFixture.checkReferenceCountReachesTarget; +import static com.mongodb.ClusterFixture.getAsyncBinding; +import static com.mongodb.ClusterFixture.getBinding; +import static com.mongodb.ClusterFixture.getPrimary; +import static com.mongodb.MongoClientSettings.getDefaultCodecRegistry; +import static com.mongodb.client.model.Aggregates.setWindowFields; +import static com.mongodb.client.model.Aggregates.sort; +import static java.util.stream.Collectors.toList; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assumptions.assumeTrue; + +public abstract class OperationTest { + + protected static final DocumentCodec DOCUMENT_DECODER = new DocumentCodec(); + + @BeforeEach + public void beforeEach() { + assumeTrue(ServerHelper.checkPoolCount(getPrimary()) == 0, "Sync Pool count not zero"); + assumeTrue(ServerHelper.checkAsyncPoolCount(getPrimary()) == 0, "Async Pool count not zero"); + CollectionHelper.drop(getNamespace()); + } + + @AfterEach + public void afterEach() { + CollectionHelper.drop(getNamespace()); + checkReferenceCountReachesTarget(getBinding(), 1); + checkReferenceCountReachesTarget(getAsyncBinding(), 1); + ServerHelper.checkPool(getPrimary()); + } + + protected CollectionHelper getCollectionHelper() { + return getCollectionHelper(getNamespace()); + } + + private CollectionHelper getCollectionHelper(final MongoNamespace namespace) { + return new CollectionHelper<>(new BsonDocumentCodec(), namespace); + } + + protected String getDatabaseName() { + return ClusterFixture.getDefaultDatabaseName(); + } + + protected String getCollectionName() { + return "test"; + } + + protected MongoNamespace getNamespace() { + return new MongoNamespace(getDatabaseName(), getCollectionName()); + } + + static List parseToList(final String s) { + return BsonArray.parse(s).stream().map(v -> toBsonDocument(v.asDocument())).collect(Collectors.toList()); + } + + public static BsonDocument toBsonDocument(final BsonDocument bsonDocument) { + return getDefaultCodecRegistry().get(BsonDocument.class).decode(bsonDocument.asBsonReader(), DecoderContext.builder().build()); + } + + protected List assertPipeline(final String stageAsString, final Bson stage) { + List pipeline = Collections.singletonList(stage); + return assertPipeline(stageAsString, pipeline); + } + + protected List assertPipeline(final String stageAsString, final List pipeline) { + BsonDocument expectedStage = BsonDocument.parse(stageAsString); + assertEquals(expectedStage, pipeline.get(0).toBsonDocument(BsonDocument.class, getDefaultCodecRegistry())); + return pipeline; + } + + protected void assertResults(final List pipeline, final String expectedResultsAsString) { + List expectedResults = parseToList(expectedResultsAsString); + List results = getCollectionHelper().aggregate(pipeline); + assertEquals(expectedResults, results); + } + + protected void assertResults(final List pipeline, final String expectedResultsAsString, + final int scale, final RoundingMode roundingMode) { + List expectedResults = parseToList(expectedResultsAsString); + List results = getCollectionHelper().aggregate(pipeline); + assertEquals(adjustScale(expectedResults, scale, roundingMode), adjustScale(results, scale, roundingMode)); + } + + private static List adjustScale(final List documents, final int scale, final RoundingMode roundingMode) { + documents.replaceAll(value -> adjustScale(value, scale, roundingMode).asDocument()); + return documents; + } + + private static BsonValue adjustScale(final BsonValue value, final int scale, final RoundingMode roundingMode) { + if (value.isDouble()) { + double scaledDoubleValue = BigDecimal.valueOf(value.asDouble().doubleValue()) + .setScale(scale, roundingMode) + .doubleValue(); + return new BsonDouble(scaledDoubleValue); + } else if (value.isDocument()) { + for (Map.Entry entry : value.asDocument().entrySet()) { + entry.setValue(adjustScale(entry.getValue(), scale, roundingMode)); + } + } else if (value.isArray()) { + BsonArray array = value.asArray(); + for (int i = 0; i < array.size(); i++) { + array.set(i, adjustScale(array.get(i), scale, roundingMode)); + } + } + return value; + } + + protected List aggregateWithWindowFields(@Nullable final Object partitionBy, + final WindowOutputField output, + final Bson sortSpecification) { + List stages = new ArrayList<>(); + stages.add(setWindowFields(partitionBy, null, output)); + stages.add(sort(sortSpecification)); + + List actual = getCollectionHelper().aggregate(stages, DOCUMENT_DECODER); + + return actual.stream() + .map(doc -> doc.get("result")) + .collect(toList()); + } + + protected void ifNotNull(@Nullable final T maybeNull, final Consumer consumer) { + if (maybeNull != null) { + consumer.accept(maybeNull); + } + } + + protected void sleep(final long ms) { + try { + Thread.sleep(ms); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + throw new RuntimeException(e); + } + } + + protected T block(final Consumer> consumer) { + FutureResultCallback cb = new FutureResultCallback<>(); + consumer.accept(cb); + return cb.get(TIMEOUT, TimeUnit.SECONDS); + } +} diff --git a/driver-core/src/test/functional/com/mongodb/client/model/ProjectionFunctionalSpecification.groovy b/driver-core/src/test/functional/com/mongodb/client/model/ProjectionFunctionalSpecification.groovy new file mode 100644 index 00000000000..f19c5e848c0 --- /dev/null +++ b/driver-core/src/test/functional/com/mongodb/client/model/ProjectionFunctionalSpecification.groovy @@ -0,0 +1,121 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model + + +import com.mongodb.MongoQueryException +import com.mongodb.OperationFunctionalSpecification +import org.bson.Document +import org.bson.conversions.Bson + +import static com.mongodb.client.model.Filters.and +import static com.mongodb.client.model.Filters.eq +import static com.mongodb.client.model.Filters.text +import static com.mongodb.client.model.Projections.elemMatch +import static com.mongodb.client.model.Projections.exclude +import static com.mongodb.client.model.Projections.excludeId +import static com.mongodb.client.model.Projections.fields +import static com.mongodb.client.model.Projections.include +import static com.mongodb.client.model.Projections.metaTextScore +import static com.mongodb.client.model.Projections.slice + +class ProjectionFunctionalSpecification extends OperationFunctionalSpecification { + def a = new Document('_id', 1).append('x', 'coffee').append('y', [new Document('a', 1).append('b', 2), + new Document('a', 2).append('b', 3), + new Document('a', 3).append('b', 4)]) + def aYSlice1 = new Document('_id', 1).append('x', 'coffee').append('y', [new Document('a', 1).append('b', 2)]) + def aYSlice12 = new Document('_id', 1).append('x', 'coffee').append('y', [new Document('a', 2).append('b', 3), + new Document('a', 3).append('b', 4)]) + def aNoY = new Document('_id', 1).append('x', 'coffee') + def aId = new Document('_id', 1) + def aNoId = new Document().append('x', 'coffee').append('y', [new Document('a', 1).append('b', 2), + new Document('a', 2).append('b', 3), + new Document('a', 3).append('b', 4)]) + def aWithScore = new Document('_id', 1).append('x', 'coffee').append('y', [new Document('a', 1).append('b', 2), + new Document('a', 2).append('b', 3), + new Document('a', 3).append('b', 4)]) + .append('score', 1.0) + + def setup() { + getCollectionHelper().insertDocuments(a) + } + + def 'find'(Bson projection) { + getCollectionHelper().find(null, null, projection) + } + + def 'find'(Bson filter, Bson projection) { + getCollectionHelper().find(filter, null, projection) + } + + def 'include'() { + expect: + find(include('x')) == [aNoY] + find(include('x', 'y')) == [a] + find(include(['x', 'y', 'x'])) == [a] + } + + def 'exclude'() { + expect: + find(exclude('y')) == [aNoY] + find(exclude('x', 'y')) == [aId] + find(exclude(['x', 'y', 'x'])) == [aId] + } + + def 'excludeId helper'() { + expect: + find(excludeId()) == [aNoId] + } + + def 'firstElem'() { + expect: + find(new Document('y', new Document('$elemMatch', new Document('a', 1).append('b', 2))), + fields(include('x'), elemMatch('y'))) == [aYSlice1] + } + + def 'elemMatch'() { + expect: + find(fields(include('x'), elemMatch('y', and(eq('a', 1), eq('b', 2))))) == [aYSlice1] + } + + def 'slice'() { + expect: + find(slice('y', 1)) == [aYSlice1] + find(slice('y', 1, 2)) == [aYSlice12] + } + + def 'metaTextScore'() { + given: + getCollectionHelper().createIndex(new Document('x', 'text')) + + expect: + find(text('coffee'), metaTextScore('score')) == [aWithScore] + } + + def 'combine fields'() { + expect: + find(fields(include('x', 'y'), exclude('_id'))) == [aNoId] + } + + def 'combine fields illegally'() { + when: + find(fields(include('x', 'y'), exclude('y'))) == [aNoY] + + then: + thrown(MongoQueryException) + } +} diff --git a/driver-core/src/test/functional/com/mongodb/client/model/SortsFunctionalSpecification.groovy b/driver-core/src/test/functional/com/mongodb/client/model/SortsFunctionalSpecification.groovy new file mode 100644 index 00000000000..af5788f5052 --- /dev/null +++ b/driver-core/src/test/functional/com/mongodb/client/model/SortsFunctionalSpecification.groovy @@ -0,0 +1,83 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model + +import com.mongodb.OperationFunctionalSpecification +import org.bson.Document +import org.bson.conversions.Bson + +import static com.mongodb.client.model.Sorts.ascending +import static com.mongodb.client.model.Sorts.descending +import static com.mongodb.client.model.Sorts.metaTextScore +import static com.mongodb.client.model.Sorts.orderBy + +class SortsFunctionalSpecification extends OperationFunctionalSpecification { + def a = new Document('_id', 1).append('x', 1) + .append('y', 'bear') + + def b = new Document('_id', 2).append('x', 1) + .append('y', 'albatross') + + def c = new Document('_id', 3).append('x', 2) + .append('y', 'cat') + + def setup() { + getCollectionHelper().insertDocuments(a, b, c) + } + + def 'find'(Bson sort) { + getCollectionHelper().find(new Document(), sort) + } + + def 'find'(Bson sort, Bson projection) { + find(new Document(), sort, projection) + } + + def 'find'(Bson filter, Bson sort, Bson projection) { + getCollectionHelper().find(filter, sort, projection) + } + + def 'ascending'() { + expect: + find(ascending('_id')) == [a, b, c] + find(ascending('y')) == [b, a, c] + find(ascending('x', 'y')) == [b, a, c] + } + + def 'descending'() { + expect: + find(descending('_id')) == [c, b, a] + find(descending('y')) == [c, a, b] + find(descending('x', 'y')) == [c, a, b] + } + + def 'metaTextScore'() { + given: + getCollectionHelper().createIndex(new Document('y', 'text')) + + expect: + find(new Document('$text', new Document('$search', 'bear')), metaTextScore('score'), + new Document('score', new Document('$meta', 'textScore')))*.containsKey('score') + } + + def 'orderBy'() { + expect: + find(orderBy([ascending('x'), descending('y')])) == [a, b, c] + find(orderBy(ascending('x'), descending('y'), descending('x'))) == [c, a, b] + } + +} diff --git a/driver-core/src/test/functional/com/mongodb/client/model/TimeSeriesOptionsTest.java b/driver-core/src/test/functional/com/mongodb/client/model/TimeSeriesOptionsTest.java new file mode 100644 index 00000000000..4f38da79a3d --- /dev/null +++ b/driver-core/src/test/functional/com/mongodb/client/model/TimeSeriesOptionsTest.java @@ -0,0 +1,78 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model; + +import com.mongodb.lang.Nullable; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.Arguments; +import org.junit.jupiter.params.provider.MethodSource; + +import java.util.concurrent.TimeUnit; +import java.util.stream.Stream; + +import static org.junit.jupiter.api.Assertions.assertAll; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.params.provider.Arguments.arguments; + +class TimeSeriesOptionsTest { + + private TimeSeriesOptions timeSeriesOptions; + + @BeforeEach + void setUp() { + timeSeriesOptions = new TimeSeriesOptions("test"); + } + + @Test + void shouldThrowErrorWhenGranularityIsAlreadySet() { + //given + timeSeriesOptions.granularity(TimeSeriesGranularity.SECONDS); + + //when & then + assertAll( + () -> assertThrows(IllegalStateException.class, () -> timeSeriesOptions.bucketRounding(1L, TimeUnit.SECONDS)), + () -> assertThrows(IllegalStateException.class, () -> timeSeriesOptions.bucketMaxSpan(1L, TimeUnit.SECONDS)) + ); + } + + @Test + void shouldThrowErrorWhenGetWithNullParameter() { + assertAll( + () -> assertThrows(IllegalArgumentException.class, () -> timeSeriesOptions.getBucketMaxSpan(null)), + () -> assertThrows(IllegalArgumentException.class, () -> timeSeriesOptions.getBucketRounding(null)) + ); + } + + @ParameterizedTest + @MethodSource("args") + void shouldThrowErrorWhenInvalidArgumentProvided(@Nullable final Long valueToSet, @Nullable final TimeUnit timeUnit) { + assertAll( + () -> assertThrows(IllegalArgumentException.class, () -> timeSeriesOptions.bucketRounding(valueToSet, timeUnit)), + () -> assertThrows(IllegalArgumentException.class, () -> timeSeriesOptions.bucketMaxSpan(valueToSet, timeUnit)) + ); + } + + private static Stream args() { + return Stream.of( + arguments(1L, null), + arguments(null, null), + arguments(1L, TimeUnit.MILLISECONDS) + ); + } +} diff --git a/driver-core/src/test/functional/com/mongodb/client/model/UpdatesFunctionalSpecification.groovy b/driver-core/src/test/functional/com/mongodb/client/model/UpdatesFunctionalSpecification.groovy new file mode 100644 index 00000000000..17dfa35c5ec --- /dev/null +++ b/driver-core/src/test/functional/com/mongodb/client/model/UpdatesFunctionalSpecification.groovy @@ -0,0 +1,195 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model + +import com.mongodb.OperationFunctionalSpecification +import org.bson.BsonTimestamp +import org.bson.Document +import org.bson.conversions.Bson + +import static com.mongodb.client.model.Updates.combine +import static com.mongodb.client.model.Updates.currentDate +import static com.mongodb.client.model.Updates.currentTimestamp +import static com.mongodb.client.model.Updates.inc +import static com.mongodb.client.model.Updates.max +import static com.mongodb.client.model.Updates.min +import static com.mongodb.client.model.Updates.mul +import static com.mongodb.client.model.Updates.rename +import static com.mongodb.client.model.Updates.set +import static com.mongodb.client.model.Updates.setOnInsert +import static com.mongodb.client.model.Updates.unset + +class UpdatesFunctionalSpecification extends OperationFunctionalSpecification { + def a = new Document('_id', 1).append('x', 1) + + + def setup() { + getCollectionHelper().insertDocuments(a) + } + + def find() { + find(new Document('_id', 1)) + } + + def find(Bson filter) { + getCollectionHelper().find(filter) + } + + def updateOne(Bson update) { + getCollectionHelper().updateOne(new Document('_id', 1), update) + } + + def updateOne(Bson filter, Bson update, boolean isUpsert) { + getCollectionHelper().updateOne(filter, update, isUpsert) + } + + def 'set'() { + when: + updateOne(set('x', 5)) + + then: + find() == [new Document('_id', 1).append('x', 5)] + } + + def 'setOnInsert'() { + when: + updateOne(setOnInsert('y', 5)) + + then: + find() == [a] + + when: + updateOne(new Document('_id', 2), setOnInsert('y', 5), true) + + then: + find(new Document('_id', 2)) == [new Document('_id', 2).append('y', 5)] + + when: + updateOne(new Document('_id', 3), setOnInsert(Document.parse('{a: 1, b: "two"}')), true) + + then: + find(new Document('_id', 3)) == [Document.parse('{_id: 3, a: 1, b: "two"}')] + + when: + updateOne(new Document('_id', 3), setOnInsert(null), true) + + then: + thrown IllegalArgumentException + } + + def 'unset'() { + when: + updateOne(unset('x')) + + then: + find() == [new Document('_id', 1)] + } + + def 'rename'() { + when: + updateOne(rename('x', 'y')) + + then: + find() == [new Document('_id', 1).append('y', 1)] + } + + def 'inc'() { + when: + updateOne(inc('x', 5)) + + then: + find() == [new Document('_id', 1).append('x', 6)] + + when: + updateOne(inc('x', 5L)) + + then: + find() == [new Document('_id', 1).append('x', 11)] + + when: + updateOne(inc('x', 3.4d)) + + then: + find() == [new Document('_id', 1).append('x', 14.4)] + } + + def 'mul'() { + when: + updateOne(mul('x', 5)) + + then: + find() == [new Document('_id', 1).append('x', 5)] + + when: + updateOne(mul('x', 5L)) + + then: + find() == [new Document('_id', 1).append('x', 25)] + + when: + updateOne(mul('x', 3.5d)) + + then: + find() == [new Document('_id', 1).append('x', 87.5)] + } + + def 'min'() { + when: + updateOne(min('x', -1)) + + then: + find() == [new Document('_id', 1).append('x', -1)] + } + + def 'max'() { + when: + updateOne(max('x', 5)) + + then: + find() == [new Document('_id', 1).append('x', 5)] + } + + def 'currentDate'() { + when: + updateOne(currentDate('y')) + + then: + find()[0].get('y').getClass() == Date + + when: + updateOne(currentTimestamp('z')) + + then: + find()[0].get('z').getClass() == BsonTimestamp + } + + def 'combine single operator'() { + when: + updateOne(combine(set('x', 5), set('y', 6))) + + then: + find() == [new Document('_id', 1).append('x', 5).append('y', 6)] + } + + def 'combine multiple operators'() { + when: + updateOne(combine(set('a', 5), set('b', 6), inc('x', 3), inc('y', 5))) + + then: + find() == [new Document('_id', 1).append('a', 5).append('b', 6).append('x', 4).append('y', 5)] + } +} diff --git a/driver-core/src/test/functional/com/mongodb/client/model/mql/AbstractMqlValuesFunctionalTest.java b/driver-core/src/test/functional/com/mongodb/client/model/mql/AbstractMqlValuesFunctionalTest.java new file mode 100644 index 00000000000..31a5cecd91f --- /dev/null +++ b/driver-core/src/test/functional/com/mongodb/client/model/mql/AbstractMqlValuesFunctionalTest.java @@ -0,0 +1,130 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model.mql; + +import com.mongodb.client.model.Field; +import com.mongodb.client.model.OperationTest; +import com.mongodb.lang.Nullable; +import org.bson.BsonArray; +import org.bson.BsonDocument; +import org.bson.BsonReader; +import org.bson.BsonString; +import org.bson.BsonValue; +import org.bson.Document; +import org.bson.codecs.BsonDocumentCodec; +import org.bson.codecs.BsonValueCodecProvider; +import org.bson.codecs.DecoderContext; +import org.bson.conversions.Bson; +import org.bson.json.JsonReader; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; + +import static com.mongodb.ClusterFixture.serverVersionAtLeast; +import static com.mongodb.client.model.Aggregates.addFields; +import static org.bson.codecs.configuration.CodecRegistries.fromProviders; +import static org.bson.conversions.Bson.DEFAULT_CODEC_REGISTRY; +import static org.junit.jupiter.api.Assertions.assertEquals; + +public abstract class AbstractMqlValuesFunctionalTest extends OperationTest { + + /** + * Java stand-in for the "missing" value. + */ + public static final Object MISSING = new Object(); + + @BeforeEach + public void setUp() { + getCollectionHelper().drop(); + } + + @AfterEach + public void tearDown() { + getCollectionHelper().drop(); + } + + protected void assertExpression(@Nullable final Object expected, final MqlValue mqlValue) { + assertExpression(expected, mqlValue, null); + } + + protected void assertExpression(@Nullable final Object expected, final MqlValue mqlValue, @Nullable final String expectedMql) { + assertEval(expected, mqlValue); + + if (expectedMql == null) { + return; + } + + BsonValue expressionValue = ((MqlExpression) mqlValue).toBsonValue( + fromProviders(new BsonValueCodecProvider(), DEFAULT_CODEC_REGISTRY)); + BsonValue bsonValue = new BsonDocumentFragmentCodec().readValue( + new JsonReader(expectedMql), + DecoderContext.builder().build()); + assertEquals(bsonValue, expressionValue, expressionValue.toString().replace("\"", "'")); + } + + private void assertEval(@Nullable final Object expected, final MqlValue toEvaluate) { + BsonValue evaluated = evaluate(toEvaluate); + if (expected == MISSING && evaluated == null) { + // if the "val" field was removed by "missing", then evaluated is null + return; + } + BsonValue expected1 = toBsonValue(expected); + assertEquals(expected1, evaluated); + } + + protected BsonValue toBsonValue(@Nullable final Object value) { + if (value instanceof BsonValue) { + return (BsonValue) value; + } + return new Document("val", value).toBsonDocument().get("val"); + } + + @Nullable + protected BsonValue evaluate(final MqlValue toEvaluate) { + Bson addFieldsStage = addFields(new Field<>("val", toEvaluate)); + List stages = new ArrayList<>(); + stages.add(addFieldsStage); + List results; + if (getCollectionHelper().count() == 0) { + BsonDocument document = new BsonDocument("val", new BsonString("#invalid string#")); + if (serverVersionAtLeast(5, 1)) { + Bson documentsStage = new BsonDocument("$documents", new BsonArray(Arrays.asList(document))); + stages.add(0, documentsStage); + results = getCollectionHelper().aggregateDb(stages); + } else { + getCollectionHelper().insertDocuments(document); + results = getCollectionHelper().aggregate(stages); + getCollectionHelper().drop(); + } + } else { + results = getCollectionHelper().aggregate(stages); + } + return results.get(0).get("val"); + } + + private static class BsonDocumentFragmentCodec extends BsonDocumentCodec { + public BsonValue readValue(final BsonReader reader, final DecoderContext decoderContext) { + reader.readBsonType(); + return super.readValue(reader, decoderContext); + } + } + +} + diff --git a/driver-core/src/test/functional/com/mongodb/client/model/mql/ArithmeticMqlValuesFunctionalTest.java b/driver-core/src/test/functional/com/mongodb/client/model/mql/ArithmeticMqlValuesFunctionalTest.java new file mode 100644 index 00000000000..ac177ce7e1c --- /dev/null +++ b/driver-core/src/test/functional/com/mongodb/client/model/mql/ArithmeticMqlValuesFunctionalTest.java @@ -0,0 +1,281 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model.mql; + +import org.bson.types.Decimal128; +import org.junit.jupiter.api.Test; + +import java.math.BigDecimal; +import java.math.RoundingMode; + +import static com.mongodb.client.model.mql.MqlValues.numberToMqlNumber; +import static com.mongodb.client.model.mql.MqlValues.of; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; + +@SuppressWarnings("ConstantConditions") +class ArithmeticMqlValuesFunctionalTest extends AbstractMqlValuesFunctionalTest { + // https://www.mongodb.com/docs/manual/reference/operator/aggregation/#arithmetic-expression-operators + + @Test + public void literalsTest() { + assertExpression(1, of(1), "1"); + assertExpression(1L, of(1L)); + assertExpression(1.0, of(1.0)); + assertExpression(Decimal128.parse("1.0"), of(Decimal128.parse("1.0"))); + assertThrows(IllegalArgumentException.class, () -> of((Decimal128) null)); + + // expression equality differs from bson equality + assertExpression(true, of(1L).eq(of(1.0))); + assertExpression(true, of(1L).eq(of(1))); + + // bson equality; underlying type is preserved + // this behaviour is not defined by the API, but tested for clarity + assertEquals(toBsonValue(1), evaluate(of(1))); + assertEquals(toBsonValue(1L), evaluate(of(1L))); + assertEquals(toBsonValue(1.0), evaluate(of(1.0))); + assertNotEquals(toBsonValue(1), evaluate(of(1L))); + assertNotEquals(toBsonValue(1.0), evaluate(of(1L))); + + // Number conversions; used internally + assertExpression(1, numberToMqlNumber(1)); + assertExpression(1L, numberToMqlNumber(1L)); + assertExpression(1.0, numberToMqlNumber(1.0)); + assertExpression(Decimal128.parse("1.0"), numberToMqlNumber(Decimal128.parse("1.0"))); + assertThrows(IllegalArgumentException.class, + () -> assertExpression("n/a", numberToMqlNumber(BigDecimal.valueOf(1)))); + } + + @Test + public void multiplyTest() { + // https://www.mongodb.com/docs/manual/reference/operator/aggregation/multiply/ + assertExpression( + 2.0 * 2, + of(2.0).multiply(of(2)), + "{'$multiply': [2.0, 2]}"); + + // mixing integers and numbers + MqlInteger oneInt = of(1); + MqlNumber oneNum = of(1.0); + MqlInteger resultInt = oneInt.multiply(oneInt); + MqlNumber resultNum = oneNum.multiply(oneNum); + // compile time error if these were IntegerExpressions: + MqlNumber r2 = oneNum.multiply(oneInt); + MqlNumber r3 = oneInt.multiply(oneNum); + assertExpression(1, resultInt); + // 1 is also a valid expected value in our API + assertExpression(1.0, resultNum); + assertExpression(1.0, r2); + assertExpression(1.0, r3); + + // convenience + assertExpression(2.0, of(1.0).multiply(2.0)); + assertExpression(2L, of(1).multiply(2L)); + assertExpression(2, of(1).multiply(2)); + } + + @SuppressWarnings("PointlessArithmeticExpression") + @Test + public void divideTest() { + // https://www.mongodb.com/docs/manual/reference/operator/aggregation/divide/ + assertExpression( + 2.0 / 1.0, + of(2.0).divide(of(1.0)), + "{'$divide': [2.0, 1.0]}"); + + // division always converts to a double: + assertExpression( + 2.0, // not: 2 / 1 + of(2).divide(of(1)), + "{'$divide': [2, 1]}"); + + // this means that unlike Java's 1/2==0, dividing any underlying + // BSON number type always yields an equal result: + assertExpression( + 1.0 / 2.0, + of(1.0).divide(of(2.0)), + "{'$divide': [1.0, 2.0]}"); + assertExpression( + 0.5, + of(1).divide(of(2)), + "{'$divide': [1, 2]}"); + + // however, there are differences in evaluation between numbers + // represented using Decimal128 and double: + assertExpression( + 2.5242187499999997, + of(3.231).divide(of(1.28))); + assertExpression( + Decimal128.parse("2.52421875"), + of(Decimal128.parse("3.231")).divide(of(Decimal128.parse("1.28")))); + assertExpression( + Decimal128.parse("2.52421875"), + of(Decimal128.parse("3.231")).divide(of(1.28))); + assertExpression( + Decimal128.parse("2.524218750000"), + of(3.231).divide(of(Decimal128.parse("1.28")))); + // this is not simply because the Java literal used has no corresponding + // double value - it is the same value as-written: + assertEquals("3.231", "" + 3.231); + assertEquals("1.28", "" + 1.28); + + + // convenience + assertExpression(0.5, of(1.0).divide(2.0)); + assertExpression(0.5, of(1).divide(2.0)); + assertExpression(0.5, of(1).divide(2L)); + assertExpression(0.5, of(1).divide(2)); + + // divide always returns a Number, so the method is not on IntegerExpression + } + + @Test + public void addTest() { + // https://www.mongodb.com/docs/manual/reference/operator/aggregation/add/ + MqlInteger actual = of(2).add(of(2)); + assertExpression( + 2 + 2, actual, + "{'$add': [2, 2]}"); + assertExpression( + 2.0 + 2, + of(2.0).add(of(2)), + "{'$add': [2.0, 2]}"); + + // overflows into a supported underlying type + assertExpression( + Integer.MAX_VALUE + 2L, + of(Integer.MAX_VALUE).add(of(2))); + assertExpression( + Long.MAX_VALUE + 2.0, + of(Long.MAX_VALUE).add(of(2))); + assertExpression( + Double.POSITIVE_INFINITY, + of(Double.MAX_VALUE).add(of(Double.MAX_VALUE))); + + // convenience + assertExpression(3.0, of(1.0).add(2.0)); + assertExpression(3L, of(1).add(2L)); + assertExpression(3, of(1).add(2)); + + // https://www.mongodb.com/docs/manual/reference/operator/aggregation/sum/ + // sum's alternative behaviour exists for purposes of reduction, but is + // inconsistent with multiply, and potentially confusing. Unimplemented. + } + + @Test + public void subtractTest() { + // https://www.mongodb.com/docs/manual/reference/operator/aggregation/subtract/ + MqlInteger actual = of(2).subtract(of(2)); + assertExpression( + 0, + actual, + "{'$subtract': [2, 2]} "); + assertExpression( + 2.0 - 2, + of(2.0).subtract(of(2)), + "{'$subtract': [2.0, 2]} "); + + // convenience + assertExpression(-1.0, of(1.0).subtract(2.0)); + assertExpression(-1, of(1).subtract(2)); + } + + @Test + public void maxTest() { + // https://www.mongodb.com/docs/manual/reference/operator/aggregation/max/ + MqlInteger actual = of(-2).max(of(2)); + assertExpression( + Math.max(-2, 2), + actual, + "{'$max': [-2, 2]}"); + assertExpression( + Math.max(-2.0, 2.0), + of(-2.0).max(of(2.0)), + "{'$max': [-2.0, 2.0]}"); + } + + @Test + public void minTest() { + // https://www.mongodb.com/docs/manual/reference/operator/aggregation/min/ + MqlInteger actual = of(-2).min(of(2)); + assertExpression( + Math.min(-2, 2), + actual, + "{'$min': [-2, 2]}"); + assertExpression( + Math.min(-2.0, 2.0), + of(-2.0).min(of(2.0)), + "{'$min': [-2.0, 2.0]}"); + } + + @Test + public void roundTest() { + // https://www.mongodb.com/docs/manual/reference/operator/aggregation/round/ + MqlInteger actual = of(5.5).round(); + assertExpression( + 6.0, + actual, + "{'$round': 5.5} "); + MqlNumber actualNum = of(5.5).round(of(0)); + assertExpression( + new BigDecimal("5.5").setScale(0, RoundingMode.HALF_EVEN).doubleValue(), + actualNum, + "{'$round': [5.5, 0]} "); + // unlike Java, uses banker's rounding (half_even) + assertExpression( + 2.0, + of(2.5).round(), + "{'$round': 2.5} "); + assertExpression( + new BigDecimal("-5.5").setScale(0, RoundingMode.HALF_EVEN).doubleValue(), + of(-5.5).round()); + // to place + assertExpression( + 555.55, + of(555.555).round(of(2)), + "{'$round': [555.555, 2]} "); + assertExpression( + 600.0, + of(555.555).round(of(-2)), + "{'$round': [555.555, -2]} "); + // underlying type rounds to same underlying type + assertExpression( + 5L, + of(5L).round()); + assertExpression( + 5.0, + of(5.0).round()); + assertExpression( + Decimal128.parse("1234"), + of(Decimal128.parse("1234.2")).round()); + } + + @Test + public void absTest() { + // https://www.mongodb.com/docs/manual/reference/operator/aggregation/round/ + assertExpression( + Math.abs(-2.0), + of(-2.0).abs(), + "{'$abs': -2.0}"); + // integer + MqlInteger abs = of(-2).abs(); + assertExpression( + Math.abs(-2), abs, + "{'$abs': -2}"); + } +} diff --git a/driver-core/src/test/functional/com/mongodb/client/model/mql/ArrayMqlValuesFunctionalTest.java b/driver-core/src/test/functional/com/mongodb/client/model/mql/ArrayMqlValuesFunctionalTest.java new file mode 100644 index 00000000000..c183558d59f --- /dev/null +++ b/driver-core/src/test/functional/com/mongodb/client/model/mql/ArrayMqlValuesFunctionalTest.java @@ -0,0 +1,468 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model.mql; + +import com.mongodb.MongoCommandException; +import org.bson.Document; +import org.bson.types.Decimal128; +import org.junit.jupiter.api.Test; + +import java.time.Instant; +import java.util.Arrays; +import java.util.Collections; +import java.util.LinkedList; +import java.util.function.Function; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +import static com.mongodb.ClusterFixture.serverVersionAtLeast; +import static com.mongodb.client.model.mql.MqlValues.of; +import static com.mongodb.client.model.mql.MqlValues.ofArray; +import static com.mongodb.client.model.mql.MqlValues.ofBooleanArray; +import static com.mongodb.client.model.mql.MqlValues.ofDateArray; +import static com.mongodb.client.model.mql.MqlValues.ofIntegerArray; +import static com.mongodb.client.model.mql.MqlValues.ofNumberArray; +import static com.mongodb.client.model.mql.MqlValues.ofStringArray; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assumptions.assumeTrue; + +@SuppressWarnings({"Convert2MethodRef"}) +class ArrayMqlValuesFunctionalTest extends AbstractMqlValuesFunctionalTest { + // https://www.mongodb.com/docs/manual/reference/operator/aggregation/#array-expression-operators + + private final MqlArray array123 = ofIntegerArray(1, 2, 3); + private final MqlArray arrayTTF = ofBooleanArray(true, true, false); + + @Test + public void literalsTest() { + // Boolean + assertExpression( + Arrays.asList(true, true, false), + arrayTTF, + "[true, true, false]"); + // Integer + assertExpression( + Arrays.asList(1, 2, 3), + array123, + "[1, 2, 3]"); + assertExpression( + Arrays.asList(1L, 2L, 3L), + ofIntegerArray(1L, 2L, 3L), + "[{'$numberLong': '1'}, {'$numberLong': '2'}, {'$numberLong': '3'}]"); + // Number + assertExpression( + Arrays.asList(1.0, 2.0, 3.0), + ofNumberArray(1.0, 2.0, 3.0), + "[1.0, 2.0, 3.0]"); + assertExpression( + Arrays.asList(Decimal128.parse("1.0")), + ofNumberArray(Decimal128.parse("1.0")), + "[{'$numberDecimal': '1.0'}]"); + // String + assertExpression( + Arrays.asList("a", "b", "c"), + ofStringArray("a", "b", "c"), + "['a', 'b', 'c']"); + // must escape: + assertExpression( + Arrays.asList("$a", "b", "$c.d"), + ofStringArray("$a", "b", "$c.d"), + "[{'$literal': '$a'}, 'b', {'$literal': '$c.d'}]"); + + // Date + assertExpression( + Arrays.asList(Instant.parse("2007-12-03T10:15:30.00Z")), + ofDateArray(Instant.parse("2007-12-03T10:15:30.00Z")), + "[{'$date': '2007-12-03T10:15:30.00Z'}]"); + + // Document + MqlArray documentArray = ofArray( + of(Document.parse("{a: 1}")), + of(Document.parse("{b: 2}"))); + assertExpression( + Arrays.asList(Document.parse("{a: 1}"), Document.parse("{b: 2}")), + documentArray, + "[{'$literal': {'a': 1}}, {'$literal': {'b': 2}}]"); + + // Array + MqlArray> arrayArray = ofArray(ofArray(), ofArray()); + assertExpression( + Arrays.asList(Collections.emptyList(), Collections.emptyList()), + arrayArray, + "[[], []]"); + + // Mixed + MqlArray expression = ofArray(of(1), of(true), ofArray(of(1.0), of(1))); + assertExpression( + Arrays.asList(1, true, Arrays.asList(1.0, 1)), + expression, + "[1, true, [1.0, 1]]"); + } + + @Test + public void filterTest() { + // https://www.mongodb.com/docs/manual/reference/operator/aggregation/filter/ + assertExpression( + Stream.of(true, true, false) + .filter(v -> v).collect(Collectors.toList()), + arrayTTF.filter(v -> v), + "{'$filter': {'input': [true, true, false], 'cond': '$$this'}}"); + } + + @Test + public void mapTest() { + // https://www.mongodb.com/docs/manual/reference/operator/aggregation/map/ + assertExpression( + Stream.of(true, true, false) + .map(v -> !v).collect(Collectors.toList()), + arrayTTF.map(v -> v.not()), + "{'$map': {'input': [true, true, false], 'in': {'$not': '$$this'}}}"); + } + + @Test + public void sortTest() { + // https://www.mongodb.com/docs/manual/reference/operator/aggregation/sortArray/ + MqlArray integerExpressionArrayExpression = ofIntegerArray(3, 1, 2); + assertExpression( + Stream.of(3, 1, 2) + .sorted().collect(Collectors.toList()), sort(integerExpressionArrayExpression), + "{'$sortArray': {'input': [3, 1, 2], 'sortBy': 1}}"); + } + + @SuppressWarnings("unchecked") + private static MqlArray sort(final MqlArray array) { + assumeTrue(serverVersionAtLeast(5, 2)); // due to sort + MqlExpression mqlArray = (MqlExpression) array; + return mqlArray.sort(); + } + + // https://www.mongodb.com/docs/manual/reference/operator/aggregation/reduce/ + // reduce is implemented as each individual type of reduction (monoid) + // this prevents issues related to incorrect specification of identity values + + @Test + public void reduceAnyTest() { + assertExpression( + true, + arrayTTF.any(a -> a), + "{'$reduce': {'input': {'$map': {'input': [true, true, false], 'in': '$$this'}}, " + + "'initialValue': false, 'in': {'$or': ['$$value', '$$this']}}}"); + assertExpression( + false, + ofBooleanArray().any(a -> a)); + + assertExpression( + true, + ofIntegerArray(1, 2, 3).any(a -> a.eq(of(3)))); + assertExpression( + false, + ofIntegerArray(1, 2, 2).any(a -> a.eq(of(9)))); + } + + @Test + public void reduceAllTest() { + assertExpression( + false, + arrayTTF.all(a -> a), + "{'$reduce': {'input': {'$map': {'input': [true, true, false], 'in': '$$this'}}, " + + "'initialValue': true, 'in': {'$and': ['$$value', '$$this']}}}"); + assertExpression( + true, + ofBooleanArray().all(a -> a)); + + assertExpression( + true, + ofIntegerArray(1, 2, 3).all(a -> a.gt(of(0)))); + assertExpression( + false, + ofIntegerArray(1, 2, 2).all(a -> a.eq(of(2)))); + } + + @Test + public void reduceSumTest() { + assertExpression( + 6, + ofIntegerArray(1, 2, 3).sum(a -> a), + "{'$reduce': {'input': {'$map': {'input': [1, 2, 3], 'in': '$$this'}}, " + + "'initialValue': 0, 'in': {'$add': ['$$value', '$$this']}}}"); + // empty array: + assertExpression( + 0, + ofIntegerArray().sum(a -> a)); + } + + @Test + public void reduceMultiplyTest() { + assertExpression( + 6, + ofIntegerArray(1, 2, 3).multiply(a -> a), + "{'$reduce': {'input': {'$map': {'input': [1, 2, 3], 'in': '$$this'}}, " + + "'initialValue': 1, 'in': {'$multiply': ['$$value', '$$this']}}}"); + // empty array: + assertExpression( + 1, + ofIntegerArray().multiply(a -> a)); + } + + @Test + public void reduceMaxTest() { + assumeTrue(serverVersionAtLeast(5, 2)); + assertExpression( + 3, + ofIntegerArray(1, 2, 3).max(of(9)), + "{'$cond': [{'$eq': [{'$size': [[1, 2, 3]]}, 0]}, 9, " + + "{'$first': [{'$maxN': {'input': [1, 2, 3], 'n': 1}}]}]}"); + assertExpression( + 9, + ofIntegerArray().max(of(9))); + } + + @Test + public void reduceMinTest() { + assumeTrue(serverVersionAtLeast(5, 2)); + assertExpression( + 1, + ofIntegerArray(1, 2, 3).min(of(9)), + "{'$cond': [{'$eq': [{'$size': [[1, 2, 3]]}, 0]}, 9, " + + "{'$first': [{'$minN': {'input': [1, 2, 3], 'n': 1}}]}]}"); + assertExpression( + 9, + ofIntegerArray().min(of(9))); + } + + @Test + public void reduceMaxNTest() { + assumeTrue(serverVersionAtLeast(5, 2)); + assertExpression( + Arrays.asList(3, 2), + ofIntegerArray(3, 1, 2).maxN(of(2))); + assertExpression( + Arrays.asList(), + ofIntegerArray().maxN(of(2))); + // N must be non-zero + assertThrows(MongoCommandException.class, () -> assertExpression( + Arrays.asList(), + ofIntegerArray(3, 2, 1).maxN(of(0)))); + } + + @Test + public void reduceMinNTest() { + assumeTrue(serverVersionAtLeast(5, 2)); + assertExpression( + Arrays.asList(1, 2), + ofIntegerArray(3, 1, 2).minN(of(2))); + assertExpression( + Arrays.asList(), + ofIntegerArray().minN(of(2))); + // N must be non-zero + assertThrows(MongoCommandException.class, () -> assertExpression( + Arrays.asList(), + ofIntegerArray(3, 2, 1).minN(of(0)))); + } + + @Test + public void reduceJoinTest() { + assertExpression( + "abc", + ofStringArray("a", "b", "c").joinStrings(a -> a), + "{'$reduce': {'input': {'$map': {'input': ['a', 'b', 'c'], 'in': '$$this'}}, " + + "'initialValue': '', 'in': {'$concat': ['$$value', '$$this']}}}"); + assertExpression( + "", + ofStringArray().joinStrings(a -> a)); + } + + @Test + public void reduceConcatTest() { + assertExpression( + Arrays.asList(1, 2, 3, 4), + ofArray(ofIntegerArray(1, 2), ofIntegerArray(3, 4)).concatArrays(v -> v), + "{'$reduce': {'input': {'$map': {'input': [[1, 2], [3, 4]], 'in': '$$this'}}, " + + "'initialValue': [], " + + "'in': {'$concatArrays': ['$$value', '$$this']}}} "); + // empty: + MqlArray> expressionArrayExpression = ofArray(); + assertExpression( + Collections.emptyList(), + expressionArrayExpression.concatArrays(a -> a)); + } + + @Test + public void reduceUnionTest() { + // https://www.mongodb.com/docs/manual/reference/operator/aggregation/setUnion/ (40) + assertExpression( + Arrays.asList(1, 2, 3), + sort(ofArray(ofIntegerArray(1, 2), ofIntegerArray(1, 3)).unionArrays(v -> v)), + "{'$sortArray': {'input': {'$reduce': {'input': " + + "{'$map': {'input': [[1, 2], [1, 3]], 'in': '$$this'}}, " + + "'initialValue': [], 'in': {'$setUnion': ['$$value', '$$this']}}}, 'sortBy': 1}}"); + + Function, MqlArray> f = a -> + a.map(v -> v.isBooleanOr(of(false)) + .cond(of(1), of(0))); + assertExpression( + Arrays.asList(0, 1), + ofArray(ofBooleanArray(true, false), ofBooleanArray(false)).unionArrays(f)); + } + + @Test + public void sizeTest() { + // https://www.mongodb.com/docs/manual/reference/operator/aggregation/size/ + assertExpression( + Arrays.asList(1, 2, 3).size(), + array123.size(), + "{'$size': [[1, 2, 3]]}"); + assertExpression( + 0, + ofIntegerArray().size(), + "{'$size': [[]]}"); + } + + @Test + public void elementAtTest() { + // https://www.mongodb.com/docs/manual/reference/operator/aggregation/arrayElemAt/ + assertExpression( + Arrays.asList(1, 2, 3).get(0), + array123.elementAt(of(0)), + "{'$arrayElemAt': [[1, 2, 3], 0]}"); + // negatives + assertExpression( + Arrays.asList(1, 2, 3).get(3 - 1), + array123.elementAt(-1)); + // underlying long + assertExpression( + 2, + array123.elementAt(of(1L))); + + assertExpression( + MISSING, + array123.elementAt(99)); + + assertExpression( + MISSING, + array123.elementAt(-99)); + + // long values are considered entirely out of bounds; server error + assertThrows(MongoCommandException.class, () -> assertExpression( + MISSING, + array123.elementAt(of(Long.MAX_VALUE)))); + + // 0.0 is a valid integer value + assumeTrue(serverVersionAtLeast(4, 4)); // isNumber + assertExpression( + Arrays.asList(1, 2, 3).get(0), + array123.elementAt(of(0.0).isIntegerOr(of(-1)))); + } + + @Test + public void firstTest() { + assumeTrue(serverVersionAtLeast(4, 4)); + // https://www.mongodb.com/docs/manual/reference/operator/aggregation/first/ + // https://www.mongodb.com/docs/manual/reference/operator/aggregation/first-array-element/ + assertExpression( + new LinkedList<>(Arrays.asList(1, 2, 3)).getFirst(), + array123.first(), + "{'$first': [[1, 2, 3]]}"); + + assertExpression( + MISSING, + ofIntegerArray().first(), + "{'$first': [[]]}"); + } + + @Test + public void lastTest() { + assumeTrue(serverVersionAtLeast(4, 4)); + // https://www.mongodb.com/docs/manual/reference/operator/aggregation/last-array-element/ + assertExpression( + new LinkedList<>(Arrays.asList(1, 2, 3)).getLast(), + array123.last(), + "{'$last': [[1, 2, 3]]}"); + + assertExpression( + MISSING, + ofIntegerArray().last(), + "{'$last': [[]]}"); + } + + @Test + public void containsTest() { + // https://www.mongodb.com/docs/manual/reference/operator/aggregation/in/ + // The parameters of this expression are flipped + assertExpression( + Arrays.asList(1, 2, 3).contains(2), + array123.contains(of(2)), + "{'$in': [2, [1, 2, 3]]}"); + } + + @Test + public void concatTest() { + // https://www.mongodb.com/docs/manual/reference/operator/aggregation/concatArrays/ + assertExpression( + Stream.concat(Stream.of(1, 2, 3), Stream.of(1, 2, 3)) + .collect(Collectors.toList()), + ofIntegerArray(1, 2, 3).concat(ofIntegerArray(1, 2, 3)), + "{'$concatArrays': [[1, 2, 3], [1, 2, 3]]}"); + // mixed types: + assertExpression( + Arrays.asList(1.0, 1, 2, 3), + ofNumberArray(1.0).concat(ofIntegerArray(1, 2, 3))); + } + + @Test + public void sliceTest() { + // https://www.mongodb.com/docs/manual/reference/operator/aggregation/slice/ + assertExpression( + Arrays.asList(1, 2, 3).subList(1, 3), + array123.slice(1, 10), + "{'$slice': [[1, 2, 3], 1, 10]}"); + + MqlArray array12345 = ofIntegerArray(1, 2, 3, 4, 5); + // sub-array: skipFirstN + firstN + assertExpression( + Arrays.asList(2, 3), + array12345.slice(1, 2)); + // lastN + firstN + assertExpression( + Arrays.asList(5), + array12345.slice(-1, 100)); + assertExpression( + Arrays.asList(1, 2, 3, 4, 5), + array12345.slice(-100, 100)); + } + + @Test + public void unionTest() { + // https://www.mongodb.com/docs/manual/reference/operator/aggregation/setUnion/ + assertExpression( + Arrays.asList(1, 2, 3), + sort(array123.union(array123)), + "{'$sortArray': {'input': {'$setUnion': [[1, 2, 3], [1, 2, 3]]}, 'sortBy': 1}}"); + // mixed types: + assertExpression( + Arrays.asList(1, 2.0, 3), + sort(ofNumberArray(2.0).union(ofIntegerArray(1, 2, 3)))); + } + + @Test + public void distinctTest() { + assertExpression( + Arrays.asList(1, 2, 3), + sort(ofIntegerArray(1, 2, 1, 3, 3).distinct()), + "{'$sortArray': {'input': {'$setUnion': [[1, 2, 1, 3, 3]]}, 'sortBy': 1}}"); + } +} diff --git a/driver-core/src/test/functional/com/mongodb/client/model/mql/BooleanMqlValuesFunctionalTest.java b/driver-core/src/test/functional/com/mongodb/client/model/mql/BooleanMqlValuesFunctionalTest.java new file mode 100644 index 00000000000..0c5b8bd48ce --- /dev/null +++ b/driver-core/src/test/functional/com/mongodb/client/model/mql/BooleanMqlValuesFunctionalTest.java @@ -0,0 +1,75 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model.mql; + +import org.junit.jupiter.api.Test; + +@SuppressWarnings({"PointlessBooleanExpression", "ConstantConditions", "ConstantConditionalExpression", "SimplifyBooleanExpression"}) +class BooleanMqlValuesFunctionalTest extends AbstractMqlValuesFunctionalTest { + // https://www.mongodb.com/docs/manual/reference/operator/aggregation/#boolean-expression-operators + // (Complete as of 6.0) + + private final MqlBoolean tru = MqlValues.of(true); + private final MqlBoolean fal = MqlValues.of(false); + + @Test + public void literalsTest() { + assertExpression(true, tru, "true"); + assertExpression(false, fal, "false"); + } + + @Test + public void orTest() { + // https://www.mongodb.com/docs/manual/reference/operator/aggregation/or/ + assertExpression(true || false, tru.or(fal), "{'$or': [true, false]}"); + assertExpression(false || true, fal.or(tru), "{'$or': [false, true]}"); + } + + @Test + public void andTest() { + // https://www.mongodb.com/docs/manual/reference/operator/aggregation/and/ + assertExpression(true && false, tru.and(fal), "{'$and': [true, false]}"); + assertExpression(false && true, fal.and(tru), "{'$and': [false, true]}"); + } + + @Test + public void notTest() { + // https://www.mongodb.com/docs/manual/reference/operator/aggregation/not/ + assertExpression(!true, tru.not(), "{'$not': true}"); + assertExpression(!false, fal.not(), "{'$not': false}"); + } + + @Test + public void condTest() { + // https://www.mongodb.com/docs/manual/reference/operator/aggregation/cond/ + MqlString abc = MqlValues.of("abc"); + MqlString xyz = MqlValues.of("xyz"); + MqlNumber nnn = MqlValues.of(123); + assertExpression( + true && false ? "abc" : "xyz", + tru.and(fal).cond(abc, xyz), + "{'$cond': [{'$and': [true, false]}, 'abc', 'xyz']}"); + assertExpression( + true || false ? "abc" : "xyz", + tru.or(fal).cond(abc, xyz), + "{'$cond': [{'$or': [true, false]}, 'abc', 'xyz']}"); + assertExpression( + false ? "abc" : 123, + fal.cond(abc, nnn), + "{'$cond': [false, 'abc', 123]}"); + } +} diff --git a/driver-core/src/test/functional/com/mongodb/client/model/mql/ComparisonMqlValuesFunctionalTest.java b/driver-core/src/test/functional/com/mongodb/client/model/mql/ComparisonMqlValuesFunctionalTest.java new file mode 100644 index 00000000000..f5108fe4e25 --- /dev/null +++ b/driver-core/src/test/functional/com/mongodb/client/model/mql/ComparisonMqlValuesFunctionalTest.java @@ -0,0 +1,194 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model.mql; + +import org.bson.BsonDocument; +import org.bson.BsonValue; +import org.bson.codecs.BsonValueCodecProvider; +import org.junit.jupiter.api.Test; + +import java.time.Instant; +import java.util.Arrays; +import java.util.List; + +import static com.mongodb.client.model.mql.MqlValues.of; +import static com.mongodb.client.model.mql.MqlValues.ofBooleanArray; +import static com.mongodb.client.model.mql.MqlValues.ofIntegerArray; +import static com.mongodb.client.model.mql.MqlValues.ofNull; +import static org.bson.codecs.configuration.CodecRegistries.fromProviders; +import static org.junit.jupiter.api.Assertions.fail; + +@SuppressWarnings({"ConstantConditions"}) +class ComparisonMqlValuesFunctionalTest extends AbstractMqlValuesFunctionalTest { + // https://www.mongodb.com/docs/manual/reference/operator/aggregation/#comparison-expression-operators + // (Complete as of 6.0) + // Comparison expressions are part of the generic Expression class. + + // https://www.mongodb.com/docs/manual/reference/bson-type-comparison-order/#std-label-bson-types-comparison-order + private final List sampleValues = Arrays.asList( + MqlExpression.ofRem(), + ofNull(), + of(0), + of(1), + of(2.0), + of(""), + of("str"), + of(BsonDocument.parse("{}")), + of(BsonDocument.parse("{a: 1}")), + of(BsonDocument.parse("{a: 2}")), + of(BsonDocument.parse("{a: 2, b: 1}")), + of(BsonDocument.parse("{b: 1, a: 2}")), + of(BsonDocument.parse("{'':''}")), + ofIntegerArray(0), + ofIntegerArray(1), + ofBooleanArray(true), + of(false), + of(true), + of(Instant.now()) + ); + + @Test + public void literalsTest() { + // special values + assertExpression(null, ofNull(), "null"); + // the "missing" value is obtained via getField. + // the "$$REMOVE" value is intentionally not exposed. It is used internally. + // the "undefined" value is deprecated. + // https://www.mongodb.com/docs/manual/reference/operator/aggregation/literal/ + // $literal is intentionally not exposed. It is used internally. + } + + @Test + public void eqTest() { + // https://www.mongodb.com/docs/manual/reference/operator/aggregation/eq/ + assertExpression( + 1 == 2, + of(1).eq(of(2)), + "{'$eq': [1, 2]}"); + assertExpression( + false, + of(BsonDocument.parse("{}")).eq(ofIntegerArray()), + "{'$eq': [{'$literal': {}}, []]}"); + + // numbers are equal, even though of different types + assertExpression( + 1 == 1.0, + of(1).eq(of(1.0)), + "{'$eq': [1, 1.0]}"); + assertExpression( + 1 == 1L, + of(1).eq(of(1L)), + "{'$eq': [1, { '$numberLong': '1' }]}"); + + // ensure that no two samples are equal to each other + for (int i = 0; i < sampleValues.size(); i++) { + for (int j = 0; j < sampleValues.size(); j++) { + if (i == j) { + continue; + } + MqlValue first = sampleValues.get(i); + MqlValue second = sampleValues.get(j); + BsonValue evaluate = evaluate(first.eq(second)); + if (evaluate.asBoolean().getValue()) { + BsonValue v1 = ((MqlExpression) first).toBsonValue(fromProviders(new BsonValueCodecProvider())); + BsonValue v2 = ((MqlExpression) second).toBsonValue(fromProviders(new BsonValueCodecProvider())); + fail(i + "," + j + " --" + v1 + " and " + v2 + " should not equal"); + } + } + } + } + + @Test + public void neTest() { + // https://www.mongodb.com/docs/manual/reference/operator/aggregation/ne/ + assertExpression( + 1 != 2, + of(1).ne(of(2)), + "{'$ne': [1, 2]}"); + } + + @Test + public void ltTest() { + // https://www.mongodb.com/docs/manual/reference/operator/aggregation/lt/ + assertExpression( + -1 < 1, + of(-1).lt(of(1)), + "{'$lt': [-1, 1]}"); + assertExpression( + 0 < 0, + of(0).lt(of(0)), + "{'$lt': [0, 0]}"); + + assertExpression( + true, + ofNull().lt(of(0)), + "{'$lt': [null, 0]}"); + + for (int i = 0; i < sampleValues.size() - 1; i++) { + for (int j = i + 1; j < sampleValues.size(); j++) { + MqlValue first = sampleValues.get(i); + MqlValue second = sampleValues.get(j); + BsonValue evaluate = evaluate(first.lt(second)); + if (!evaluate.asBoolean().getValue()) { + BsonValue v1 = ((MqlExpression) first).toBsonValue(fromProviders(new BsonValueCodecProvider())); + BsonValue v2 = ((MqlExpression) second).toBsonValue(fromProviders(new BsonValueCodecProvider())); + fail(i + "," + j + " --" + v1 + " < " + v2 + " should be true"); + } + } + } + } + + @Test + public void lteTest() { + // https://www.mongodb.com/docs/manual/reference/operator/aggregation/lte/ + assertExpression( + -1 <= 1, + of(-1).lte(of(1)), + "{'$lte': [-1, 1]}"); + assertExpression( + 0 <= 0, + of(0).lte(of(0)), + "{'$lte': [0, 0]}"); + } + + @Test + public void gtTest() { + // https://www.mongodb.com/docs/manual/reference/operator/aggregation/gt/ + assertExpression( + -1 > 1, + of(-1).gt(of(1)), + "{'$gt': [-1, 1]}"); + assertExpression( + 0 > 0, + of(0).gt(of(0)), + "{'$gt': [0, 0]}"); + } + + @Test + public void gteTest() { + // https://www.mongodb.com/docs/manual/reference/operator/aggregation/gte/ + assertExpression( + -1 >= 1, + of(-1).gte(of(1)), + "{'$gte': [-1, 1]}"); + assertExpression( + 0 >= 0, + of(0).gte(of(0)), + "{'$gte': [0, 0]}"); + } + +} diff --git a/driver-core/src/test/functional/com/mongodb/client/model/mql/ControlMqlValuesFunctionalTest.java b/driver-core/src/test/functional/com/mongodb/client/model/mql/ControlMqlValuesFunctionalTest.java new file mode 100644 index 00000000000..706f20c2e60 --- /dev/null +++ b/driver-core/src/test/functional/com/mongodb/client/model/mql/ControlMqlValuesFunctionalTest.java @@ -0,0 +1,292 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model.mql; + +import org.bson.Document; +import org.junit.jupiter.api.Test; + +import java.time.Instant; +import java.util.function.Function; + +import static com.mongodb.ClusterFixture.serverVersionAtLeast; +import static com.mongodb.client.model.mql.MqlValues.of; +import static com.mongodb.client.model.mql.MqlValues.ofArray; +import static com.mongodb.client.model.mql.MqlValues.ofIntegerArray; +import static com.mongodb.client.model.mql.MqlValues.ofMap; +import static com.mongodb.client.model.mql.MqlValues.ofNull; +import static org.junit.jupiter.api.Assumptions.assumeTrue; + +class ControlMqlValuesFunctionalTest extends AbstractMqlValuesFunctionalTest { + + @Test + public void passToTest() { + Function intDecrement = (e) -> e.subtract(of(1)); + Function numDecrement = (e) -> e.subtract(of(1)); + + // "nested functional" function application: + assertExpression( + 2 - 1, + intDecrement.apply(of(2)), + "{'$subtract': [2, 1]}"); + // "chained" function application produces the same MQL: + assertExpression( + 2 - 1, + of(2).passIntegerTo(intDecrement), + "{'$subtract': [2, 1]}"); + + // variations + assertExpression( + 2 - 1, + of(2).passIntegerTo(numDecrement)); + assertExpression( + 2 - 1, + of(2).passNumberTo(numDecrement)); + + // all types + Function test = on -> of("A"); + assertExpression("A", of(true).passTo(test)); + assertExpression("A", of(false).passBooleanTo(test)); + assertExpression("A", of(0).passIntegerTo(test)); + assertExpression("A", of(0).passNumberTo(test)); + assertExpression("A", of("").passStringTo(test)); + assertExpression("A", of(Instant.ofEpochMilli(123)).passDateTo(test)); + assertExpression("A", ofIntegerArray(1, 2).passArrayTo(test)); + assertExpression("A", of(Document.parse("{_id: 'a'}")).passDocumentTo(test)); + assertExpression("A", ofMap(Document.parse("{_id: 'a'}")).passMapTo(test)); + } + + @Test + public void switchTest() { + assumeTrue(serverVersionAtLeast(4, 4)); // isNumber + // https://www.mongodb.com/docs/manual/reference/operator/aggregation/switch/ + assertExpression("a", of(0).switchOn(on -> on.is(v -> v.eq(of(0)), v -> of("a")))); + assertExpression("a", of(0).switchOn(on -> on.isNumber(v -> of("a")))); + assertExpression("a", of(0).switchOn(on -> on.eq(of(0), v -> of("a")))); + assertExpression("a", of(0).switchOn(on -> on.lte(of(9), v -> of("a")))); + + // test branches + Function isOver10 = v -> v.subtract(10).gt(of(0)); + Function s = e -> e + .switchIntegerOn(on -> on + .eq(of(0), v -> of("A")) + .lt(of(10), v -> of("B")) + .is(isOver10, v -> of("C")) + .defaults(v -> of("D"))) + .toLower(); + + assertExpression("a", of(0).passIntegerTo(s)); + assertExpression("b", of(9).passIntegerTo(s)); + assertExpression("b", of(-9).passIntegerTo(s)); + assertExpression("c", of(11).passIntegerTo(s)); + assertExpression("d", of(10).passIntegerTo(s)); + } + + @Test + public void switchInferenceTest() { + // the following must compile: + assertExpression( + "b", + of(1).switchOn(on -> on + .eq(of(0), v -> of("a")) + .eq(of(1), v -> of("b")) + )); + // the "of(0)" must not cause a type inference of T being an integer, + // since switchOn expects an Expression. + } + + @Test + public void switchTypesTest() { + // isIntegerOr relies on switch short-circuiting, which only happens after 5.2 + assumeTrue(serverVersionAtLeast(5, 2)); + Function label = expr -> expr.switchOn(on -> on + .isBoolean(v -> v.asString().append(of(" - bool"))) + // integer should be checked before string + .isInteger(v -> v.asString().append(of(" - integer"))) + .isNumber(v -> v.asString().append(of(" - number"))) + .isString(v -> v.asString().append(of(" - string"))) + .isDate(v -> v.asString().append(of(" - date"))) + .isArray((MqlArray v) -> v.sum(a -> a).asString().append(of(" - array"))) + .isDocument(v -> v.getString("_id").append(of(" - document"))) + .isNull(v -> of("null - null")) + .defaults(v -> of("default")) + ).toLower(); + assertExpression("true - bool", of(true).passTo(label)); + assertExpression("false - bool", of(false).passBooleanTo(label)); + assertExpression("1 - integer", of(1).passIntegerTo(label)); + assertExpression("1 - integer", of(1.0).passNumberTo(label)); + assertExpression("1.01 - number", of(1.01).passNumberTo(label)); + assertExpression("abc - string", of("abc").passStringTo(label)); + assertExpression("1970-01-01t00:00:00.123z - date", of(Instant.ofEpochMilli(123)).passDateTo(label)); + assertExpression("3 - array", ofIntegerArray(1, 2).passArrayTo(label)); + assertExpression("a - document", of(Document.parse("{_id: 'a'}")).passDocumentTo(label)); + // maps are considered documents + assertExpression("a - document", ofMap(Document.parse("{_id: 'a'}")).passMapTo(label)); + assertExpression("null - null", ofNull().passTo(label)); + // maps via isMap: + assertExpression( + "12 - map", + ofMap(Document.parse("{a: '1', b: '2'}")).switchOn(on -> on + .isMap((MqlMap v) -> v.entries() + .joinStrings(e -> e.getValue()).append(of(" - map"))))); + // arrays via isArray, and tests signature: + assertExpression( + "ab - array", + ofArray(of("a"), of("b")).switchOn(on -> on + .isArray((MqlArray v) -> v + .joinStrings(e -> e).append(of(" - array"))))); + } + + private BranchesIntermediary branches(final Branches on) { + return on.is(v -> of(true), v -> of("A")); + } + + @Test + public void switchTestVariants() { + assertExpression("A", of(true).switchOn(this::branches)); + assertExpression("A", of(false).switchBooleanOn(this::branches)); + assertExpression("A", of(0).switchIntegerOn(this::branches)); + assertExpression("A", of(0).switchNumberOn(this::branches)); + assertExpression("A", of("").switchStringOn(this::branches)); + assertExpression("A", of(Instant.ofEpochMilli(123)).switchDateOn(this::branches)); + assertExpression("A", ofIntegerArray(1, 2).switchArrayOn(this::branches)); + assertExpression("A", of(Document.parse("{_id: 'a'}")).switchDocumentOn(this::branches)); + assertExpression("A", ofMap(Document.parse("{_id: 'a'}")).switchMapOn(this::branches)); + } + + @Test + public void switchTestInitial() { + assertExpression("A", + of(0).switchOn(on -> on.is(v -> v.gt(of(-1)), v -> of("A"))), + "{'$switch': {'branches': [{'case': {'$gt': [0, -1]}, 'then': 'A'}]}}"); + // eq lt lte + assertExpression("A", + of(0).switchOn(on -> on.eq(of(0), v -> of("A"))), + "{'$switch': {'branches': [{'case': {'$eq': [0, 0]}, 'then': 'A'}]}}"); + assertExpression("A", + of(0).switchOn(on -> on.lt(of(1), v -> of("A"))), + "{'$switch': {'branches': [{'case': {'$lt': [0, 1]}, 'then': 'A'}]}}"); + assertExpression("A", + of(0).switchOn(on -> on.lte(of(0), v -> of("A"))), + "{'$switch': {'branches': [{'case': {'$lte': [0, 0]}, 'then': 'A'}]}}"); + // is type + assertExpression("A", + of(true).switchOn(on -> on.isBoolean(v -> of("A"))), + "{'$switch': {'branches': [{'case': {'$eq': [{'$type': [true]}, 'bool']}, 'then': 'A'}]}}"); + assertExpression("A", + of("x").switchOn(on -> on.isString(v -> of("A"))), + "{'$switch': {'branches': [{'case': {'$eq': [{'$type': ['x']}, 'string']}, 'then': 'A'}]}}"); + assertExpression("A", + of(Instant.ofEpochMilli(123)).switchOn(on -> on.isDate(v -> of("A"))), + "{'$switch': {'branches': [{'case': {'$in': [{'$type': " + + "[{'$date': '1970-01-01T00:00:00.123Z'}]}, ['date']]}, 'then': 'A'}]}}"); + assertExpression("A", + ofIntegerArray(0).switchOn(on -> on.isArray(v -> of("A"))), + "{'$switch': {'branches': [{'case': {'$isArray': [[0]]}, 'then': 'A'}]}}"); + assertExpression("A", + of(Document.parse("{}")).switchOn(on -> on.isDocument(v -> of("A"))), + "{'$switch': {'branches': [{'case': {'$eq': [{'$type': " + + "[{'$literal': {}}]}, 'object']}, 'then': 'A'}]}}"); + assertExpression("A", + ofMap(Document.parse("{}")).switchOn(on -> on.isMap(v -> of("A"))), + "{'$switch': {'branches': [{'case': {'$eq': [{'$type': " + + "[{'$literal': {}}]}, 'object']}, 'then': 'A'}]}}"); + assertExpression("A", + ofNull().switchOn(on -> on.isNull(v -> of("A"))), + "{'$switch': {'branches': [{'case': {'$eq': [null, null]}, 'then': 'A'}]}}"); + } + + @Test + public void switchTestInitialVersion44() { + assumeTrue(serverVersionAtLeast(4, 4)); + assertExpression("A", + of(1).switchOn(on -> on.isNumber(v -> of("A"))), + "{'$switch': {'branches': [{'case': {'$isNumber': [1]}, 'then': 'A'}]}}"); + assertExpression("A", + of(1).switchOn(on -> on.isInteger(v -> of("A"))), + "{'$switch': {'branches': [{'case': {'$switch': {'branches': [{'case': {'$isNumber': [1]}," + + "'then': {'$eq': [{'$round': 1}, 1]}}], 'default': false}}, 'then': 'A'}]}}"); + } + @Test + public void switchTestPartialVersion44() { + assumeTrue(serverVersionAtLeast(4, 4)); + assertExpression("A", + of(1).switchOn(on -> on.isNull(v -> of("X")).isNumber(v -> of("A"))), + "{'$switch': {'branches': [{'case': {'$eq': [1, null]}, 'then': 'X'}, " + + "{'case': {'$isNumber': [1]}, 'then': 'A'}]}}"); + assertExpression("A", + of(1).switchOn(on -> on.isNull(v -> of("X")).isInteger(v -> of("A"))), + "{'$switch': {'branches': [{'case': {'$eq': [1, null]}, 'then': 'X'}, {'case': " + + "{'$switch': {'branches': [{'case': {'$isNumber': [1]}, " + + "'then': {'$eq': [{'$round': 1}, 1]}}], 'default': false}}, 'then': 'A'}]}}"); + assertExpression("A", + ofNull().switchOn(on -> on.isNumber(v -> of("X")).isNull(v -> of("A"))), + "{'$switch': {'branches': [{'case': {'$isNumber': [null]}, 'then': 'X'}, " + + "{'case': {'$eq': [null, null]}, 'then': 'A'}]}}"); + } + + @Test + public void switchTestPartial() { + assertExpression("A", + of(0).switchOn(on -> on.isNull(v -> of("X")).is(v -> v.gt(of(-1)), v -> of("A"))), + "{'$switch': {'branches': [{'case': {'$eq': [0, null]}, 'then': 'X'}, " + + "{'case': {'$gt': [0, -1]}, 'then': 'A'}]}}"); + assertExpression("A", + of(0).switchOn(on -> on.isNull(v -> of("X")).defaults(v -> of("A"))), + "{'$switch': {'branches': [{'case': {'$eq': [0, null]}, 'then': 'X'}], " + + "'default': 'A'}}"); + // eq lt lte + assertExpression("A", + of(0).switchOn(on -> on.isNull(v -> of("X")).eq(of(0), v -> of("A"))), + "{'$switch': {'branches': [{'case': {'$eq': [0, null]}, 'then': 'X'}, " + + "{'case': {'$eq': [0, 0]}, 'then': 'A'}]}}"); + assertExpression("A", + of(0).switchOn(on -> on.isNull(v -> of("X")).lt(of(1), v -> of("A"))), + "{'$switch': {'branches': [{'case': {'$eq': [0, null]}, 'then': 'X'}, " + + "{'case': {'$lt': [0, 1]}, 'then': 'A'}]}}"); + assertExpression("A", + of(0).switchOn(on -> on.isNull(v -> of("X")).lte(of(0), v -> of("A"))), + "{'$switch': {'branches': [{'case': {'$eq': [0, null]}, 'then': 'X'}, " + + "{'case': {'$lte': [0, 0]}, 'then': 'A'}]}}"); + // is type + assertExpression("A", + of(true).switchOn(on -> on.isNull(v -> of("X")).isBoolean(v -> of("A"))), + "{'$switch': {'branches': [{'case': {'$eq': [true, null]}, 'then': 'X'}, " + + "{'case': {'$eq': [{'$type': [true]}, 'bool']}, 'then': 'A'}]}}"); + assertExpression("A", + of("x").switchOn(on -> on.isNull(v -> of("X")).isString(v -> of("A"))), + "{'$switch': {'branches': [{'case': {'$eq': ['x', null]}, 'then': 'X'}, " + + "{'case': {'$eq': [{'$type': ['x']}, 'string']}, 'then': 'A'}]}}"); + assertExpression("A", + of(Instant.ofEpochMilli(123)).switchOn(on -> on.isNull(v -> of("X")).isDate(v -> of("A"))), + "{'$switch': {'branches': [" + + "{'case': {'$eq': [{'$date': '1970-01-01T00:00:00.123Z'}, null]}, 'then': 'X'}, " + + "{'case': {'$in': [{'$type': [{'$date': '1970-01-01T00:00:00.123Z'}]}, " + + "['date']]}, 'then': 'A'}]}}"); + assertExpression("A", + ofIntegerArray(0).switchOn(on -> on.isNull(v -> of("X")).isArray(v -> of("A"))), + "{'$switch': {'branches': [{'case': {'$eq': [[0], null]}, 'then': 'X'}, " + + "{'case': {'$isArray': [[0]]}, 'then': 'A'}]}}"); + assertExpression("A", + of(Document.parse("{}")).switchOn(on -> on.isNull(v -> of("X")).isDocument(v -> of("A"))), + "{'$switch': {'branches': [{'case': {'$eq': [{'$literal': {}}, null]}, 'then': 'X'}, " + + "{'case': {'$eq': [{'$type': [{'$literal': {}}]}, 'object']}, 'then': 'A'}]}}"); + assertExpression("A", + ofMap(Document.parse("{}")).switchOn(on -> on.isNull(v -> of("X")).isMap(v -> of("A"))), + "{'$switch': {'branches': [{'case': {'$eq': [{'$literal': {}}, null]}, 'then': 'X'}, " + + "{'case': {'$eq': [{'$type': [{'$literal': {}}]}, 'object']}, 'then': 'A'}]}}"); + } +} diff --git a/driver-core/src/test/functional/com/mongodb/client/model/mql/DateMqlValuesFunctionalTest.java b/driver-core/src/test/functional/com/mongodb/client/model/mql/DateMqlValuesFunctionalTest.java new file mode 100644 index 00000000000..2683b772bf2 --- /dev/null +++ b/driver-core/src/test/functional/com/mongodb/client/model/mql/DateMqlValuesFunctionalTest.java @@ -0,0 +1,138 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model.mql; + +import org.junit.jupiter.api.Test; + +import java.time.Instant; +import java.time.ZoneId; +import java.time.ZoneOffset; +import java.time.ZonedDateTime; +import java.time.temporal.ChronoField; + +import static com.mongodb.client.model.mql.MqlValues.of; +import static org.junit.jupiter.api.Assertions.assertThrows; + +@SuppressWarnings("ConstantConditions") +class DateMqlValuesFunctionalTest extends AbstractMqlValuesFunctionalTest { + // https://www.mongodb.com/docs/manual/reference/operator/aggregation/#date-expression-operators + + private final Instant instant = Instant.parse("2007-12-03T10:15:30.005Z"); + private final MqlDate date = of(instant); + private final ZonedDateTime utcDateTime = ZonedDateTime.ofInstant(instant, ZoneId.of(ZoneOffset.UTC.getId())); + private final MqlString utc = of("UTC"); + + @Test + public void literalsTest() { + assertExpression( + instant, + date, + "{'$date': '2007-12-03T10:15:30.005Z'}"); + assertThrows(IllegalArgumentException.class, () -> of((Instant) null)); + } + + @Test + public void yearTest() { + // https://www.mongodb.com/docs/manual/reference/operator/aggregation/year/ + assertExpression( + utcDateTime.get(ChronoField.YEAR), + date.year(utc), + "{'$year': {'date': {'$date': '2007-12-03T10:15:30.005Z'}, 'timezone': 'UTC'}}"); + } + + @Test + public void monthTest() { + // https://www.mongodb.com/docs/manual/reference/operator/aggregation/month/ + assertExpression( + utcDateTime.get(ChronoField.MONTH_OF_YEAR), + date.month(utc), + "{'$month': {'date': {'$date': '2007-12-03T10:15:30.005Z'}, 'timezone': 'UTC'}}"); + } + + @Test + public void dayOfMonthTest() { + // https://www.mongodb.com/docs/manual/reference/operator/aggregation/dayOfMonth/ + assertExpression( + utcDateTime.get(ChronoField.DAY_OF_MONTH), + date.dayOfMonth(utc), + "{'$dayOfMonth': {'date': {'$date': '2007-12-03T10:15:30.005Z'}, 'timezone': 'UTC'}}"); + } + + @Test + public void dayOfWeekTest() { + // https://www.mongodb.com/docs/manual/reference/operator/aggregation/dayOfWeek/ + assertExpression( + utcDateTime.get(ChronoField.DAY_OF_WEEK) + 1, + date.dayOfWeek(utc), + "{'$dayOfWeek': {'date': {'$date': '2007-12-03T10:15:30.005Z'}, 'timezone': 'UTC'}}"); + } + + @Test + public void dayOfYearTest() { + // https://www.mongodb.com/docs/manual/reference/operator/aggregation/dayOfYear/ + assertExpression( + utcDateTime.get(ChronoField.DAY_OF_YEAR), + date.dayOfYear(utc), + "{'$dayOfYear': {'date': {'$date': '2007-12-03T10:15:30.005Z'}, 'timezone': 'UTC'}}"); + } + + @Test + public void hourTest() { + // https://www.mongodb.com/docs/manual/reference/operator/aggregation/hour/ + assertExpression( + utcDateTime.get(ChronoField.HOUR_OF_DAY), + date.hour(utc), + "{'$hour': {'date': {'$date': '2007-12-03T10:15:30.005Z'}, 'timezone': 'UTC'}}"); + } + + @Test + public void minuteTest() { + // https://www.mongodb.com/docs/manual/reference/operator/aggregation/minute/ + assertExpression( + utcDateTime.get(ChronoField.MINUTE_OF_HOUR), + date.minute(utc), + "{'$minute': {'date': {'$date': '2007-12-03T10:15:30.005Z'}, 'timezone': 'UTC'}}"); + } + + @Test + public void secondTest() { + // https://www.mongodb.com/docs/manual/reference/operator/aggregation/second/ + assertExpression( + utcDateTime.get(ChronoField.SECOND_OF_MINUTE), + date.second(utc), + "{'$second': {'date': {'$date': '2007-12-03T10:15:30.005Z'}, 'timezone': 'UTC'}}"); + } + + @Test + public void weekTest() { + // https://www.mongodb.com/docs/manual/reference/operator/aggregation/week/ + assertExpression( + 48, + date.week(utc), + "{'$week': {'date': {'$date': '2007-12-03T10:15:30.005Z'}, 'timezone': 'UTC'}}"); + } + + @Test + public void millisecondTest() { + // https://www.mongodb.com/docs/manual/reference/operator/aggregation/millisecond/ + assertExpression( + utcDateTime.get(ChronoField.MILLI_OF_SECOND), + date.millisecond(utc), + "{'$millisecond': {'date': {'$date': '2007-12-03T10:15:30.005Z'}, 'timezone': 'UTC'}}"); + } + +} diff --git a/driver-core/src/test/functional/com/mongodb/client/model/mql/DocumentMqlValuesFunctionalTest.java b/driver-core/src/test/functional/com/mongodb/client/model/mql/DocumentMqlValuesFunctionalTest.java new file mode 100644 index 00000000000..9afc7274953 --- /dev/null +++ b/driver-core/src/test/functional/com/mongodb/client/model/mql/DocumentMqlValuesFunctionalTest.java @@ -0,0 +1,283 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model.mql; + +import org.bson.BsonDocument; +import org.bson.Document; +import org.bson.conversions.Bson; +import org.bson.types.Decimal128; +import org.junit.jupiter.api.Test; + +import java.time.Instant; +import java.util.Arrays; + +import static com.mongodb.ClusterFixture.serverVersionAtLeast; +import static com.mongodb.client.model.mql.MqlValues.of; +import static com.mongodb.client.model.mql.MqlValues.ofIntegerArray; +import static com.mongodb.client.model.mql.MqlValues.ofMap; +import static org.junit.jupiter.api.Assertions.assertSame; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assumptions.assumeTrue; + +@SuppressWarnings("ConstantConditions") +class DocumentMqlValuesFunctionalTest extends AbstractMqlValuesFunctionalTest { + // https://www.mongodb.com/docs/manual/reference/operator/aggregation/#object-expression-operators + // (Complete as of 6.0) + + private static MqlDocument ofDoc(final String ofDoc) { + return of(BsonDocument.parse(ofDoc)); + } + + private final MqlDocument a1 = ofDoc("{a: 1}"); + private final MqlDocument ax1ay2 = ofDoc("{a: {x: 1, y: 2}}"); + + @Test + public void literalsTest() { + assertExpression( + BsonDocument.parse("{'a': 1}"), + ofDoc("{a: 1}"), + "{'$literal': {'a': 1}}"); + assertThrows(IllegalArgumentException.class, () -> of((Bson) null)); + // doc inside doc + assertExpression( + BsonDocument.parse("{'a': {'x': 1, 'y': 2}}"), + ofDoc("{a: {x: 1, y: 2}}"), + "{'$literal': {'a': {'x': 1, 'y': 2}}}"); + // empty + assertExpression( + BsonDocument.parse("{}"), + ofDoc("{}"), + "{'$literal': {}}"); + // ensure is literal + assertExpression(BsonDocument.parse( + "{'lit': {'$not': true}}"), + of(BsonDocument.parse("{lit: {'$not': true} }")), + "{'$literal': {'lit': {'$not': true}}}"); + } + + @Test + public void getFieldTest() { + assumeTrue(serverVersionAtLeast(5, 0)); // get/setField + // https://www.mongodb.com/docs/manual/reference/operator/aggregation/getField/ (100) + // these count as assertions by the user that the value is of the correct type + + assertExpression(1, + a1.getField("a"), + "{'$getField': {'input': {'$literal': {'a': 1}}, 'field': 'a'}}"); + assertExpression(2, + a1.getInteger("a").multiply(2), + "{'$multiply': [{'$getField': {'input': {'$literal': {'a': 1}}, 'field': 'a'}}, 2]}"); + + // different types + String getFieldMql = "{'$getField': {'input': {'$literal': {'a': 1}}, 'field': 'a'}}"; + assertExpression(1, a1.getNumber("a"), getFieldMql); + // these are all violations, since they assert the wrong type, but we are testing the generated Mql: + assertExpression(1, a1.getBoolean("a"), getFieldMql); + assertExpression(1, a1.getInteger("a"), getFieldMql); + assertExpression(1, a1.getString("a"), getFieldMql); + assertExpression(1, a1.getDate("a"), getFieldMql); + assertExpression(1, a1.getArray("a"), getFieldMql); + assertExpression(1, a1.getDocument("a"), getFieldMql); + // usage with other expressions + assertExpression(false, ofDoc("{a: true}").getBoolean("a").not()); + assertExpression(0.5, ofDoc("{a: 1.0}").getNumber("a").divide(2)); + assertExpression(8, ofIntegerArray(9, 8, 7).elementAt(ofDoc("{a: 1.0}").getInteger("a"))); + assertExpression("a", ofDoc("{a: 'A'}").getString("a").toLower()); + assertExpression(12, ofDoc("{a: {'$date': '2007-12-03T10:15:30.005Z'}}") + .getDate("a").month(of("UTC"))); + assertExpression(3, ofDoc("{a: [3, 2]}").getArray("a").first()); + assertExpression(2, ofDoc("{a: {b: 2}}").getDocument("a").getInteger("b")); + + // field names, not paths + MqlDocument doc = ofDoc("{a: {b: 2}, 'a.b': 3, 'a$b': 4, '$a.b': 5}"); + assertExpression(2, doc.getDocument("a").getInteger("b")); + assertExpression(3, doc.getInteger("a.b")); + assertExpression(4, doc.getInteger("a$b")); + assertExpression(5, + doc.getInteger("$a.b"), + "{'$getField': {'input': {'$literal': {'a': {'b': 2}, 'a.b': 3, 'a$b': 4, '$a.b': 5}}, " + + "'field': {'$literal': '$a.b'}}}"); + } + + @Test + public void getFieldOrTest() { + assumeTrue(serverVersionAtLeast(5, 0)); // get/setField + // convenience + assertExpression(true, ofDoc("{a: true}").getBoolean("a", false)); + assertExpression(1.0, ofDoc("{a: 1.0}").getNumber("a", 99)); + assertExpression(1.0, ofDoc("{a: 1.0}").getNumber("a", Decimal128.parse("99"))); + assertExpression("A", ofDoc("{a: 'A'}").getString("a", "Z")); + assertExpression(2007, ofDoc("{a: {'$date': '2007-12-03T10:15:30.005Z'}}") + .getDate("a", Instant.EPOCH).year(of("UTC"))); + // no convenience for arrays + assertExpression(Document.parse("{b: 2}"), ofDoc("{a: {b: 2}}") + .getDocument("a", Document.parse("{z: 99}"))); + assertExpression(Document.parse("{b: 2}"), ofDoc("{a: {b: 2}}") + .getMap("a", Document.parse("{z: 99}"))); + + // normal + assertExpression(true, ofDoc("{a: true}").getBoolean("a", of(false))); + assertExpression(1.0, ofDoc("{a: 1.0}").getNumber("a", of(99))); + assertExpression(1.0, ofDoc("{a: 1.0}").getInteger("a", of(99))); + assertExpression("A", ofDoc("{a: 'A'}").getString("a", of("Z"))); + assertExpression(2007, ofDoc("{a: {'$date': '2007-12-03T10:15:30.005Z'}}") + .getDate("a", of(Instant.EPOCH)).year(of("UTC"))); + assertExpression(Arrays.asList(3, 2), ofDoc("{a: [3, 2]}").getArray("a", ofIntegerArray(99, 88))); + assertExpression(Document.parse("{b: 2}"), ofDoc("{a: {b: 2}}") + .getDocument("a", of(Document.parse("{z: 99}")))); + assertExpression(Document.parse("{b: 2}"), ofDoc("{a: {b: 2}}") + .getMap("a", ofMap(Document.parse("{z: 99}")))); + + // right branch (missing field) + assertExpression(false, ofDoc("{}").getBoolean("a", false)); + assertExpression(99, ofDoc("{}").getInteger("a", 99)); + assertExpression(99, ofDoc("{}").getNumber("a", 99)); + assertExpression(99L, ofDoc("{}").getNumber("a", 99L)); + assertExpression(99.0, ofDoc("{}").getNumber("a", 99.0)); + assertExpression(Decimal128.parse("99"), ofDoc("{}").getNumber("a", Decimal128.parse("99"))); + assertExpression("Z", ofDoc("{}").getString("a", "Z")); + assertExpression(1970, ofDoc("{}") + .getDate("a", Instant.EPOCH).year(of("UTC"))); + assertExpression(Arrays.asList(99, 88), ofDoc("{}").getArray("a", ofIntegerArray(99, 88))); + assertExpression(Document.parse("{z: 99}"), ofDoc("{}") + .getDocument("a", Document.parse("{z: 99}"))); + assertExpression(Document.parse("{z: 99}"), ofDoc("{}") + .getMap("a", Document.parse("{z: 99}"))); + + // int vs num + assertExpression(99, ofDoc("{a: 1.1}").getInteger("a", of(99))); + } + + @Test + public void getFieldMissingTest() { + assumeTrue(serverVersionAtLeast(5, 0)); // get/setField + // missing fields + assertExpression( + BsonDocument.parse("{'a': 1}"), + a1.setField("z", a1.getBoolean("missing"))); + assertExpression( + BsonDocument.parse("{'a': 1}"), + a1.setField("z", a1.getDocument("missing").getDocument("also_missing"))); + assertExpression( + BsonDocument.parse("{'a': 1, 'z': ''}"), + a1.setField("z", a1.getString("missing").toLower())); + /* + The behaviour of missing fields appears to be as follows, and equivalent to $$REMOVE: + propagates -- getField, cond branches... + false -- not, or, cond check... + 0 -- sum... + "" -- toLower... + null -- multiply, add, subtract, year, filter, reduce, map, result within map... + */ + } + + @Test + public void setFieldTest() { + assumeTrue(serverVersionAtLeast(5, 0)); // get/setField + // https://www.mongodb.com/docs/manual/reference/operator/aggregation/setField/ + // Placing a field based on a literal: + assertExpression( + BsonDocument.parse("{a: 1, r: 2}"), // map.put("r", 2) + a1.setField("r", of(2)), + "{'$setField': {'field': 'r', 'input': {'$literal': {'a': 1}}, 'value': 2}}"); + + // Placing a null value: + assertExpression( + BsonDocument.parse("{a: 1, r: null}"), // map.put("r", null) + a1.setField("r", MqlValues.ofNull()), + "{'$setField': {'field': 'r', 'input': {'$literal': {'a': 1}}, 'value': null}}"); + + // Replacing a field based on its prior value: + assertExpression( + BsonDocument.parse("{a: 3}"), // map.put("a", map.get("a") * 3) + a1.setField("a", a1.getInteger("a").multiply(3)), + "{'$setField': {'field': 'a', 'input': {'$literal': {'a': 1}}, 'value': " + + "{'$multiply': [{'$getField': {'input': {'$literal': {'a': 1}}, 'field': 'a'}}, 3]}}}"); + + // Placing a field based on a nested object: + assertExpression( + BsonDocument.parse("{'a': {'x': 1, 'y': 2}, r: 10}"), + ax1ay2.setField("r", ax1ay2.getDocument("a").getInteger("x").multiply(10)), + "{'$setField': {'field': 'r', 'input': {'$literal': {'a': {'x': 1, 'y': 2}}}, " + + "'value': {'$multiply': [" + + " {'$getField': {'input': {'$getField': {'input': {'$literal': {'a': {'x': 1, 'y': 2}}}, " + + " 'field': 'a'}}, 'field': 'x'}}, 10]}}}"); + + // Replacing a nested object requires two setFields, as expected: + assertExpression( + // "with" syntax: [ { a:{x:1,y:2} } ].map(d -> d.with("a", d.a.with("y", d.a.y.multiply(11)))) + BsonDocument.parse("{'a': {'x': 1, 'y': 22}}"), + ax1ay2.setField("a", ax1ay2.getDocument("a") + .setField("y", ax1ay2.getDocument("a").getInteger("y").multiply(11))), + "{'$setField': {'field': 'a', 'input': {'$literal': {'a': {'x': 1, 'y': 2}}}, " + + "'value': {'$setField': {'field': 'y', 'input': {'$getField': " + + "{'input': {'$literal': {'a': {'x': 1, 'y': 2}}}, 'field': 'a'}}, " + + "'value': {'$multiply': [{'$getField': {'input': {'$getField': " + + "{'input': {'$literal': {'a': {'x': 1, 'y': 2}}}, 'field': 'a'}}, " + + "'field': 'y'}}, 11]}}}}}"); + } + + @Test + public void unsetFieldTest() { + assumeTrue(serverVersionAtLeast(5, 0)); // get/setField (unset) + // https://www.mongodb.com/docs/manual/reference/operator/aggregation/unsetField/ + assertExpression( + BsonDocument.parse("{}"), // map.remove("a") + a1.unsetField("a"), + "{'$unsetField': {'field': 'a', 'input': {'$literal': {'a': 1}}}}"); + } + + @Test + public void mergeTest() { + // https://www.mongodb.com/docs/manual/reference/operator/aggregation/mergeObjects/ + assertExpression( + BsonDocument.parse("{a: 1, b: 2}"), + ofDoc("{a: 1}").merge(ofDoc("{b: 2}")), + "{'$mergeObjects': [{'$literal': {'a': 1}}, {'$literal': {'b': 2}}]}"); + + assertExpression( + BsonDocument.parse("{a: null}"), + ofDoc("{a: 1}").merge(ofDoc("{a: null}"))); + + assertExpression( + BsonDocument.parse("{a: 1}"), + ofDoc("{a: null}").merge(ofDoc("{a: 1}"))); + } + + @Test + public void asMapTest() { + MqlDocument d = ofDoc("{a: 1}"); + assertSame(d, d.asMap()); + } + + + @Test + public void hasTest() { + assumeTrue(serverVersionAtLeast(5, 0)); // get/setField + MqlDocument d = ofDoc("{a: 1, null: null}"); + assertExpression( + true, + d.hasField("a"), + "{'$ne': [{'$getField': {'input': {'$literal': {'a': 1, 'null': null}}, 'field': 'a'}}, '$$REMOVE']}"); + assertExpression( + false, + d.hasField("not_a")); + assertExpression( + true, + d.hasField("null")); + } +} diff --git a/driver-core/src/test/functional/com/mongodb/client/model/mql/MapMqlValuesFunctionalTest.java b/driver-core/src/test/functional/com/mongodb/client/model/mql/MapMqlValuesFunctionalTest.java new file mode 100644 index 00000000000..b85100e4e05 --- /dev/null +++ b/driver-core/src/test/functional/com/mongodb/client/model/mql/MapMqlValuesFunctionalTest.java @@ -0,0 +1,205 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model.mql; + +import org.bson.BsonDocument; +import org.bson.Document; +import org.junit.jupiter.api.Test; + +import java.util.Arrays; + +import static com.mongodb.ClusterFixture.serverVersionAtLeast; +import static com.mongodb.client.model.mql.MqlValues.of; +import static com.mongodb.client.model.mql.MqlValues.ofArray; +import static com.mongodb.client.model.mql.MqlValues.ofEntry; +import static com.mongodb.client.model.mql.MqlValues.ofMap; +import static com.mongodb.client.model.mql.MqlValues.ofStringArray; +import static org.junit.jupiter.api.Assertions.assertSame; +import static org.junit.jupiter.api.Assumptions.assumeTrue; + +class MapMqlValuesFunctionalTest extends AbstractMqlValuesFunctionalTest { + + private final MqlMap mapKey123 = MqlValues.ofMap() + .set("key", of(123)); + + private final MqlMap mapA1B2 = ofMap(Document.parse("{keyA: 1, keyB: 2}")); + + @Test + public void literalsTest() { + // entry + assertExpression( + Document.parse("{k: 'keyA', v: 1}"), + ofEntry(of("keyA"), of(1))); + assumeTrue(serverVersionAtLeast(5, 0)); // get/setField (unset) + // map + assertExpression( + Document.parse("{keyA: 1, keyB: 2}"), + ofMap(Document.parse("{keyA: 1, keyB: 2}")), + "{'$literal': {'keyA': 1, 'keyB': 2}}"); + assertExpression( + Document.parse("{key: 123}"), + mapKey123, + "{'$setField': {'field': 'key', 'input': {'$literal': {}}, 'value': 123}}"); + } + + @Test + public void getSetMapTest() { + assumeTrue(serverVersionAtLeast(5, 0)); // get/setField + // get + assertExpression( + 123, + mapKey123.get("key")); + assertExpression( + 1, + mapKey123.get("missing", of(1))); + // set (map.put) + assertExpression( + BsonDocument.parse("{key: 123, b: 1}"), + mapKey123.set("b", of(1))); + // unset (delete) + assertExpression( + BsonDocument.parse("{}"), + mapKey123.unset("key")); + // "other" parameter + assertExpression( + null, + ofMap(Document.parse("{ 'null': null }")).get("null", of(1))); + } + + @Test + public void hasTest() { + assumeTrue(serverVersionAtLeast(5, 0)); // get/setField (unset) + MqlMap e = ofMap(BsonDocument.parse("{key: 1, null: null}")); + assertExpression( + true, + e.has(of("key")), + "{'$ne': [{'$getField': {'input': {'$literal': {'key': 1, 'null': null}}, 'field': 'key'}}, '$$REMOVE']}"); + assertExpression( + false, + e.has("not_key")); + assertExpression( + true, + e.has("null")); + // consistency: + assertExpression(true, e.has("null")); + assertExpression(null, e.get("null", of(1))); + } + + @Test + public void getSetEntryTest() { + assumeTrue(serverVersionAtLeast(5, 0)); // get/setField + MqlEntry entryA1 = ofEntry(of("keyA"), of(1)); + assertExpression( + Document.parse("{k: 'keyA', 'v': 33}"), + entryA1.setValue(of(33))); + assertExpression( + Document.parse("{k: 'keyB', 'v': 1}"), + entryA1.setKey(of("keyB"))); + } + + @Test + public void buildMapTest() { + // https://www.mongodb.com/docs/manual/reference/operator/aggregation/arrayToObject/ (48) + assertExpression( + Document.parse("{'keyA': 1}"), + ofArray(ofEntry(of("keyA"), of(1))).asMap(v -> v), + "{'$arrayToObject': [{'$map': {'input': [{'k': 'keyA', 'v': 1}], 'in': '$$this'}}]}"); + + assumeTrue(serverVersionAtLeast(5, 0)); // get/setField + assertExpression( + Document.parse("{'keyA': 55}"), + ofArray(ofEntry(of("keyA"), of(1))).asMap(v -> v.setValue(of(55))), + "{'$arrayToObject': [{'$map': {'input': [{'k': 'keyA', 'v': 1}], " + + "'in': {'$setField': {'field': 'v', 'input': '$$this', 'value': 55}}}}]}"); + + // using documents + assertExpression( + Document.parse("{ 'item' : 'abc123', 'qty' : 25 }"), + ofArray( + of(Document.parse("{ 'k': 'item', 'v': 'abc123' }")), + of(Document.parse("{ 'k': 'qty', 'v': 25 }"))) + .asMap(v -> ofEntry(v.getString("k"), v.getField("v")))); + // using arrays + assertExpression( + Document.parse("{ 'item' : 'abc123', 'qty' : 25 }"), + ofArray( + ofStringArray("item", "abc123"), + ofArray(of("qty"), of(25))) + .asMap(v -> ofEntry(v.elementAt(of(0)).asString(), v.elementAt(of(1))))); + // last listed value used + assertExpression( + Document.parse("{ 'item' : 'abc123' }"), + ofArray( + MqlValues.ofMap(Document.parse("{ 'k': 'item', 'v': '123abc' }")), + MqlValues.ofMap(Document.parse("{ 'k': 'item', 'v': 'abc123' }"))) + .asMap(v -> ofEntry(v.get("k"), v.get("v")))); + + } + + @Test + public void entrySetTest() { + assumeTrue(serverVersionAtLeast(5, 0)); // get/setField + // https://www.mongodb.com/docs/manual/reference/operator/aggregation/objectToArray/ (23) + assertExpression( + Arrays.asList(Document.parse("{'k': 'k1', 'v': 1}")), + MqlValues.ofMap().set("k1", of(1)).entries(), + "{'$objectToArray': {'$setField': " + + "{'field': 'k1', 'input': {'$literal': {}}, 'value': 1}}}"); + + // key/value usage + assertExpression( + "keyA|keyB|", + mapA1B2.entries().map(v -> v.getKey().append(of("|"))).joinStrings(v -> v)); + assertExpression( + 23, + mapA1B2.entries().map(v -> v.getValue().add(10)).sum(v -> v)); + + // combined entrySet-buildMap usage + assertExpression( + Document.parse("{'keyA': 2, 'keyB': 3}"), + mapA1B2 + .entries() + .map(v -> v.setValue(v.getValue().add(1))) + .asMap(v -> v)); + + // via getMap + MqlDocument doc = of(Document.parse("{ instock: { warehouse1: 2500, warehouse2: 500 } }")); + assertExpression( + Arrays.asList( + Document.parse("{'k': 'warehouse1', 'v': 2500}"), + Document.parse("{'k': 'warehouse2', 'v': 500}")), + doc.getMap("instock").entries(), + "{'$objectToArray': {'$getField': {'input': {'$literal': " + + "{'instock': {'warehouse1': 2500, 'warehouse2': 500}}}, 'field': 'instock'}}}"); + } + + @Test + public void mergeTest() { + assertExpression( + Document.parse("{'keyA': 9, 'keyB': 2, 'keyC': 3}"), + ofMap(Document.parse("{keyA: 1, keyB: 2}")) + .merge(ofMap(Document.parse("{keyA: 9, keyC: 3}"))), + "{'$mergeObjects': [{'$literal': {'keyA': 1, 'keyB': 2}}, " + + "{'$literal': {'keyA': 9, 'keyC': 3}}]}"); + } + + @Test + public void asDocumentTest() { + MqlMap d = ofMap(BsonDocument.parse("{a: 1}")); + assertSame(d, d.asDocument()); + } +} diff --git a/driver-core/src/test/functional/com/mongodb/client/model/mql/NotNullApiTest.java b/driver-core/src/test/functional/com/mongodb/client/model/mql/NotNullApiTest.java new file mode 100644 index 00000000000..97635bbf44d --- /dev/null +++ b/driver-core/src/test/functional/com/mongodb/client/model/mql/NotNullApiTest.java @@ -0,0 +1,164 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model.mql; + +import org.bson.BsonDocument; +import org.bson.conversions.Bson; +import org.bson.types.Decimal128; +import org.junit.jupiter.api.Test; + +import java.lang.reflect.Array; +import java.lang.reflect.Method; +import java.lang.reflect.Modifier; +import java.time.Instant; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.function.Function; + +import static com.mongodb.assertions.Assertions.fail; +import static com.mongodb.client.model.mql.MqlValues.of; +import static com.mongodb.client.model.mql.MqlValues.ofArray; +import static com.mongodb.client.model.mql.MqlValues.ofEntry; +import static com.mongodb.client.model.mql.MqlValues.ofMap; +import static com.mongodb.client.model.mql.MqlValues.ofNull; + +class NotNullApiTest { + + @Test + public void notNullApiTest() { + Map, Object> mapping = new HashMap<>(); + Map, Object> paramMapping = new HashMap<>(); + + // to test: + mapping.put(MqlValues.class, null); + mapping.put(MqlBoolean.class, of(true)); + mapping.put(MqlInteger.class, of(1)); + mapping.put(MqlNumber.class, of(1.0)); + mapping.put(MqlString.class, of("")); + mapping.put(MqlDate.class, of(Instant.now())); + mapping.put(MqlDocument.class, of(BsonDocument.parse("{}"))); + mapping.put(MqlMap.class, ofMap(BsonDocument.parse("{}"))); + mapping.put(MqlArray.class, ofArray()); + mapping.put(MqlValue.class, ofNull()); + mapping.put(MqlEntry.class, ofEntry(of(""), of(""))); + mapping.put(Branches.class, new Branches<>()); + mapping.put(BranchesIntermediary.class, new BranchesIntermediary<>(Collections.emptyList())); + mapping.put(BranchesTerminal.class, new BranchesTerminal<>(Collections.emptyList(), null)); + + // additional params from classes not tested: + paramMapping.put(String.class, ""); + paramMapping.put(Instant.class, Instant.now()); + paramMapping.put(Bson.class, BsonDocument.parse("{}")); + paramMapping.put(Function.class, Function.identity()); + paramMapping.put(Number.class, 1); + paramMapping.put(int.class, 1); + paramMapping.put(boolean.class, true); + paramMapping.put(long.class, 1L); + paramMapping.put(Object.class, new Object()); + paramMapping.put(Decimal128.class, new Decimal128(1)); + putArray(paramMapping, MqlValue.class); + putArray(paramMapping, boolean.class); + putArray(paramMapping, long.class); + putArray(paramMapping, int.class); + putArray(paramMapping, double.class); + putArray(paramMapping, Decimal128.class); + putArray(paramMapping, Instant.class); + putArray(paramMapping, String.class); + + checkNotNullApi(mapping, paramMapping); + } + + private void putArray(final Map, Object> paramMapping, final Class componentType) { + final Object o = Array.newInstance(componentType, 0); + paramMapping.put(o.getClass(), o); + } + + private void checkNotNullApi( + final Map, Object> mapping, + final Map, Object> paramMapping) { + Map, Object> allParams = new HashMap<>(); + allParams.putAll(mapping); + allParams.putAll(paramMapping); + List uncheckedMethods = new ArrayList<>(); + for (Map.Entry, Object> entry : mapping.entrySet()) { + Object instance = entry.getValue(); + Class clazz = entry.getKey(); + Method[] methods = clazz.getDeclaredMethods(); + for (Method method : methods) { + if (!Modifier.isPublic(method.getModifiers())) { + continue; + } + boolean failed = false; + for (int i = 0; i < method.getParameterCount(); i++) { + if (method.getParameterTypes()[i].isPrimitive()) { + continue; + } + if (method.toString().endsWith(".equals(java.lang.Object)")) { + continue; + } + Object[] args = createArgs(allParams, method); + args[i] = null; // set one parameter to null + try { + // the method needs to throw due to Assertions.notNull: + method.invoke(instance, args); + failed = true; + } catch (Exception e) { + Throwable cause = e.getCause(); + if (!(cause instanceof IllegalArgumentException)) { + failed = true; + continue; + } + StackTraceElement[] trace = cause.getStackTrace(); + if (!method.getName().equals(trace[1].getMethodName())) { + failed = true; + } + if (!"notNull".equals(trace[0].getMethodName())) { + failed = true; + } + } + } + if (failed) { + uncheckedMethods.add("> " + method); + } + } + } + if (uncheckedMethods.size() > 0) { + fail("Assertions.notNull must be called on parameter from " + + uncheckedMethods.size() + " methods:\n" + + String.join("\n", uncheckedMethods)); + } + } + + private Object[] createArgs(final Map, ?> mapping, final Method method) { + Object[] args = new Object[method.getParameterCount()]; + Class[] parameterTypes = method.getParameterTypes(); + for (int j = 0; j < parameterTypes.length; j++) { + Class p = parameterTypes[j]; + Object arg = mapping.get(p); + if (arg == null) { + throw new IllegalArgumentException("mappings did not contain parameter of type: " + + p + " for method " + method); + } + args[j] = arg; + } + return args; + } + +} diff --git a/driver-core/src/test/functional/com/mongodb/client/model/mql/StringMqlValuesFunctionalTest.java b/driver-core/src/test/functional/com/mongodb/client/model/mql/StringMqlValuesFunctionalTest.java new file mode 100644 index 00000000000..de29a2400f2 --- /dev/null +++ b/driver-core/src/test/functional/com/mongodb/client/model/mql/StringMqlValuesFunctionalTest.java @@ -0,0 +1,178 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model.mql; + +import org.junit.jupiter.api.Test; + +import java.nio.charset.StandardCharsets; +import java.util.Arrays; + +import static com.mongodb.client.model.mql.MqlValues.of; +import static org.junit.jupiter.api.Assertions.assertThrows; + +@SuppressWarnings({"ConstantConditions"}) +class StringMqlValuesFunctionalTest extends AbstractMqlValuesFunctionalTest { + // https://www.mongodb.com/docs/manual/reference/operator/aggregation/#string-expression-operators + + private final String jalapeno = "jalape\u00F1o"; + private final String sushi = "\u5BFF\u53F8"; + private final String fish = "\uD83D\uDC1F"; + + @Test + public void literalsTest() { + assertExpression("", of(""), "''"); + assertExpression("abc", of("abc"), "'abc'"); + assertThrows(IllegalArgumentException.class, () -> of((String) null)); + assertExpression(fish, of(fish), "'" + fish + "'"); + + // must escape: + assertExpression( + "$abc", + of("$abc"), + "{'$literal': '$abc'}"); + } + + @Test + public void concatTest() { + // https://www.mongodb.com/docs/manual/reference/operator/aggregation/concat/ + assertExpression( + "abc".concat("de"), + of("abc").append(of("de")), + "{'$concat': ['abc', 'de']}"); + } + + @Test + public void toLowerTest() { + // https://www.mongodb.com/docs/manual/reference/operator/aggregation/toLower/ + assertExpression( + "ABC".toLowerCase(), + of("ABC").toLower(), + "{'$toLower': 'ABC'}"); + } + + @Test + public void toUpperTest() { + // https://www.mongodb.com/docs/manual/reference/operator/aggregation/toUpper/ + assertExpression( + "abc".toUpperCase(), + of("abc").toUpper(), + "{'$toUpper': 'abc'}"); + } + + @Test + public void strLenTest() { + // https://www.mongodb.com/docs/manual/reference/operator/aggregation/strLenCP/ + assertExpression( + "abc".codePointCount(0, 3), + of("abc").length(), + "{'$strLenCP': 'abc'}"); + + // unicode + assertExpression( + jalapeno.codePointCount(0, jalapeno.length()), + of(jalapeno).length(), + "{'$strLenCP': '" + jalapeno + "'}"); + assertExpression( + sushi.codePointCount(0, sushi.length()), + of(sushi).length(), + "{'$strLenCP': '" + sushi + "'}"); + assertExpression( + fish.codePointCount(0, fish.length()), + of(fish).length(), + "{'$strLenCP': '" + fish + "'}"); + } + + @Test + public void strLenBytesTest() { + // https://www.mongodb.com/docs/manual/reference/operator/aggregation/strLenBytes/ + assertExpression( + "abc".getBytes(StandardCharsets.UTF_8).length, + of("abc").lengthBytes(), + "{'$strLenBytes': 'abc'}"); + + // unicode + assertExpression( + jalapeno.getBytes(StandardCharsets.UTF_8).length, + of(jalapeno).lengthBytes(), + "{'$strLenBytes': '" + jalapeno + "'}"); + assertExpression( + sushi.getBytes(StandardCharsets.UTF_8).length, + of(sushi).lengthBytes(), + "{'$strLenBytes': '" + sushi + "'}"); + assertExpression( + fish.getBytes(StandardCharsets.UTF_8).length, + of(fish).lengthBytes(), + "{'$strLenBytes': '" + fish + "'}"); + + // comparison + assertExpression(8, of(jalapeno).length()); + assertExpression(9, of(jalapeno).lengthBytes()); + assertExpression(2, of(sushi).length()); + assertExpression(6, of(sushi).lengthBytes()); + assertExpression(1, of(fish).length()); + assertExpression(4, of(fish).lengthBytes()); + } + + @Test + public void substrTest() { + // https://www.mongodb.com/docs/manual/reference/operator/aggregation/substr/ + // https://www.mongodb.com/docs/manual/reference/operator/aggregation/substrCP/ + // substr is deprecated, an alias for bytes + assertExpression( + "abc".substring(1, 1 + 1), + of("abc").substr(of(1), of(1)), + "{'$substrCP': ['abc', 1, 1]}"); + assertExpression( + "bc", + of("abc").substr(of(1), of(100)), + "{'$substrCP': ['abc', 1, 100]}"); + + // unicode + assertExpression( + jalapeno.substring(5, 5 + 3), + of(jalapeno).substr(of(5), of(3)), + "{'$substrCP': ['" + jalapeno + "', 5, 3]}"); + assertExpression( + "e\u00F1o", + of(jalapeno).substr(of(5), of(3))); + + // bounds; convenience + assertExpression("abc", of("abc").substr(0, 99)); + assertExpression("ab", of("abc").substr(0, 2)); + assertExpression("b", of("abc").substr(1, 1)); + assertExpression("", of("abc").substr(1, 0)); + } + + @Test + public void substrBytesTest() { + // https://www.mongodb.com/docs/manual/reference/operator/aggregation/substrBytes/ + assertExpression( + "b", + of("abc").substrBytes(of(1), of(1)), + "{'$substrBytes': ['abc', 1, 1]}"); + + // unicode + byte[] bytes = Arrays.copyOfRange(sushi.getBytes(StandardCharsets.UTF_8), 0, 3); + String expected = new String(bytes, StandardCharsets.UTF_8); + assertExpression(expected, + of(sushi).substrBytes(of(0), of(3))); + // server returns "starting index is a UTF-8 continuation byte" error when substrBytes(1, 1) + + // convenience + assertExpression("b", of("abc").substrBytes(1, 1)); + } +} diff --git a/driver-core/src/test/functional/com/mongodb/client/model/mql/TypeMqlValuesFunctionalTest.java b/driver-core/src/test/functional/com/mongodb/client/model/mql/TypeMqlValuesFunctionalTest.java new file mode 100644 index 00000000000..228dc3ede76 --- /dev/null +++ b/driver-core/src/test/functional/com/mongodb/client/model/mql/TypeMqlValuesFunctionalTest.java @@ -0,0 +1,333 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model.mql; + +import com.mongodb.MongoCommandException; +import org.bson.BsonDocument; +import org.bson.Document; +import org.bson.types.Decimal128; +import org.junit.jupiter.api.Test; + +import java.time.Instant; +import java.time.ZoneId; +import java.time.ZoneOffset; +import java.time.ZonedDateTime; +import java.util.Arrays; + +import static com.mongodb.ClusterFixture.serverVersionAtLeast; +import static com.mongodb.ClusterFixture.serverVersionLessThan; +import static com.mongodb.client.model.mql.MqlValues.of; +import static com.mongodb.client.model.mql.MqlValues.ofIntegerArray; +import static com.mongodb.client.model.mql.MqlValues.ofMap; +import static com.mongodb.client.model.mql.MqlValues.ofNull; +import static java.time.format.DateTimeFormatter.ISO_LOCAL_DATE_TIME; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assumptions.assumeTrue; + +class TypeMqlValuesFunctionalTest extends AbstractMqlValuesFunctionalTest { + // https://www.mongodb.com/docs/manual/reference/operator/aggregation/#type-expression-operators + + // https://www.mongodb.com/docs/manual/reference/operator/aggregation/type/ + // type is not implemented directly; instead, similar checks done via switch + + @Test + public void isBooleanOrTest() { + assertExpression( + true, + of(true).isBooleanOr(of(false)), + "{'$cond': [{'$eq': [{'$type': [true]}, 'bool']}, true, false]}"); + // non-boolean: + assertExpression(false, ofIntegerArray(1).isBooleanOr(of(false))); + assertExpression(false, ofNull().isBooleanOr(of(false))); + } + + @Test + public void isNumberOrTest() { + assumeTrue(serverVersionAtLeast(4, 4)); + // https://www.mongodb.com/docs/manual/reference/operator/aggregation/isNumber/ + assertExpression(1, of(1).isNumberOr(of(99)), "{'$cond': [{'$isNumber': [1]}, 1, 99]}"); + // other numeric values: + assertExpression(1L, of(1L).isNumberOr(of(99))); + assertExpression(1.0, of(1.0).isNumberOr(of(99))); + assertExpression(Decimal128.parse("1"), of(Decimal128.parse("1")).isNumberOr(of(99))); + // non-numeric: + assertExpression(99, ofIntegerArray(1).isNumberOr(of(99))); + assertExpression(99, ofNull().isNumberOr(of(99))); + } + + @Test + public void isIntegerOr() { + // isIntegerOr relies on switch short-circuiting, which only happens after 5.2 + assumeTrue(serverVersionAtLeast(5, 2)); + assertExpression( + 1, + of(1).isIntegerOr(of(99)), + "{'$switch': {'branches': [{'case': {'$isNumber': [1]}, 'then': " + + "{'$cond': [{'$eq': [{'$round': 1}, 1]}, 1, 99]}}], 'default': 99}}" + ); + // other numeric values: + assertExpression(1L, of(1L).isIntegerOr(of(99))); + assertExpression(1.0, of(1.0).isIntegerOr(of(99))); + assertExpression(Decimal128.parse("1"), of(Decimal128.parse("1")).isIntegerOr(of(99))); + // non-numeric: + assertExpression(99, ofIntegerArray(1).isIntegerOr(of(99))); + assertExpression(99, ofNull().isIntegerOr(of(99))); + assertExpression(99, of("str").isIntegerOr(of(99))); + } + + @Test + public void isStringOrTest() { + assertExpression( + "abc", + of("abc").isStringOr(of("or")), + "{'$cond': [{'$eq': [{'$type': ['abc']}, 'string']}, 'abc', 'or']}"); + // non-string: + assertExpression("or", ofIntegerArray(1).isStringOr(of("or"))); + assertExpression("or", ofNull().isStringOr(of("or"))); + } + + @Test + public void isDateOrTest() { + Instant date = Instant.parse("2007-12-03T10:15:30.005Z"); + assertExpression( + date, + of(date).isDateOr(of(date.plusMillis(10))), + "{'$cond': [{'$in': [{'$type': [{'$date': '2007-12-03T10:15:30.005Z'}]}, ['date']]}, " + + "{'$date': '2007-12-03T10:15:30.005Z'}, {'$date': '2007-12-03T10:15:30.015Z'}]}"); + // non-date: + assertExpression(date, ofIntegerArray(1).isDateOr(of(date))); + assertExpression(date, ofNull().isDateOr(of(date))); + } + + @Test + public void isArrayOrTest() { + // https://www.mongodb.com/docs/manual/reference/operator/aggregation/isArray/ + assertExpression( + Arrays.asList(1, 2), + ofIntegerArray(1, 2).isArrayOr(ofIntegerArray(99)), + "{'$cond': [{'$isArray': [[1, 2]]}, [1, 2], [99]]}"); + // non-array: + assertExpression(Arrays.asList(1, 2), of(true).isArrayOr(ofIntegerArray(1, 2))); + assertExpression(Arrays.asList(1, 2), ofNull().isArrayOr(ofIntegerArray(1, 2))); + } + + @Test + public void isDocumentOrTest() { + BsonDocument doc = BsonDocument.parse("{a: 1}"); + assertExpression( + doc, + of(doc).isDocumentOr(of(BsonDocument.parse("{b: 2}"))), + "{'$cond': [{'$eq': [{'$type': [{'$literal': {'a': 1}}]}, 'object']}, " + + "{'$literal': {'a': 1}}, {'$literal': {'b': 2}}]}"); + // non-document: + assertExpression(doc, ofIntegerArray(1).isDocumentOr(of(doc))); + assertExpression(doc, ofNull().isDocumentOr(of(doc))); + + // maps are documents + assertExpression(doc, ofMap(doc).isDocumentOr(of(BsonDocument.parse("{x: 9}")))); + + // conversion between maps and documents + MqlMap first = ofMap(doc); + MqlDocument second = first.isDocumentOr(of(BsonDocument.parse("{}"))); + MqlMap third = second.isMapOr(ofMap(BsonDocument.parse("{}"))); + assertExpression( + true, + first.eq(second)); + assertExpression( + true, + second.eq(third)); + } + + @Test + public void isMapOrTest() { + BsonDocument map = BsonDocument.parse("{a: 1}"); + assertExpression( + map, + ofMap(map).isMapOr(ofMap(BsonDocument.parse("{b: 2}"))), + "{'$cond': [{'$eq': [{'$type': [{'$literal': {'a': 1}}]}, 'object']}, " + + "{'$literal': {'a': 1}}, {'$literal': {'b': 2}}]}"); + // non-map: + assertExpression(map, ofIntegerArray(1).isMapOr(ofMap(map))); + assertExpression(map, ofNull().isMapOr(ofMap(map))); + + // documents are maps + assertExpression(map, of(map).isMapOr(ofMap(BsonDocument.parse("{x: 9}")))); + } + + // conversions + // https://www.mongodb.com/docs/manual/reference/operator/aggregation/convert/ + // Convert is not implemented: too dynamic, conversions should be explicit. + + @Test + public void asStringTest() { + // https://www.mongodb.com/docs/manual/reference/operator/aggregation/toString/ + // asString, since toString conflicts + assertExpression("false", of(false).asString(), "{'$toString': [false]}"); + + assertExpression("1", of(1).asString()); + assertExpression("1", of(1L).asString()); + assertExpression("1", of(1.0).asString()); + assertExpression("1.0", of(Decimal128.parse("1.0")).asString()); + + assertExpression("abc", of("abc").asString()); + + // this is equivalent to $dateToString + assertExpression("1970-01-01T00:00:00.123Z", of(Instant.ofEpochMilli(123)).asString()); + } + + @Test + public void asStringTestNestedPre82() { + assumeTrue(serverVersionLessThan(8, 2)); + + // Arrays and documents are not (yet) supported: + assertThrows(MongoCommandException.class, () -> + assertExpression("[1,2]", ofIntegerArray(1, 2).asString())); + assertThrows(MongoCommandException.class, () -> + assertExpression("{\"a\":1}", of(Document.parse("{a: 1}")).asString())); + } + + @Test + public void asStringTestNested() { + assumeTrue(serverVersionAtLeast(8, 2)); + + assertExpression("[1,2]", ofIntegerArray(1, 2).asString()); + assertExpression("{\"a\":1}", of(Document.parse("{a: 1}")).asString()); + } + + @Test + public void dateAsStringTest() { + // https://www.mongodb.com/docs/manual/reference/operator/aggregation/dateToString/ + final Instant instant = Instant.parse("2007-12-03T10:15:30.005Z"); + MqlDate date = of(instant); + ZonedDateTime utcDateTime = ZonedDateTime.ofInstant(instant, ZoneId.of(ZoneOffset.UTC.getId())); + assertExpression( + "2007-12-03T10:15:30.005Z", + of(instant).asString(), + "{'$toString': [{'$date': '2007-12-03T10:15:30.005Z'}]}"); + // with parameters + assertExpression( + utcDateTime.withZoneSameInstant(ZoneId.of("America/New_York")).format(ISO_LOCAL_DATE_TIME), + date.asString(of("America/New_York"), of("%Y-%m-%dT%H:%M:%S.%L")), + "{'$dateToString': {'date': {'$date': '2007-12-03T10:15:30.005Z'}, " + + "'format': '%Y-%m-%dT%H:%M:%S.%L', " + + "'timezone': 'America/New_York'}}"); + assertExpression( + utcDateTime.withZoneSameInstant(ZoneId.of("+04:30")).format(ISO_LOCAL_DATE_TIME), + date.asString(of("+04:30"), of("%Y-%m-%dT%H:%M:%S.%L")), + "{'$dateToString': {'date': {'$date': '2007-12-03T10:15:30.005Z'}, " + + "'format': '%Y-%m-%dT%H:%M:%S.%L', " + + "'timezone': '+04:30'}}"); + // Olson Timezone Identifier is changed to UTC offset: + assertExpression( + "2007-12-03T05:15:30.005-0500", + of(instant).asString(of("America/New_York"), of("%Y-%m-%dT%H:%M:%S.%L%z"))); + } + + // parse string + + @Test + public void parseDateTest() { + // https://www.mongodb.com/docs/manual/reference/operator/aggregation/dateToString/ + String dateString = "2007-12-03T10:15:30.005Z"; + assertExpression( + Instant.parse(dateString), + of(dateString).parseDate(), + "{'$dateFromString': {'dateString': '2007-12-03T10:15:30.005Z'}}"); + + + // throws: "cannot pass in a date/time string with GMT offset together with a timezone argument" + assertThrows(MongoCommandException.class, () -> + assertExpression(1, of("2007-12-03T10:15:30.005+01:00") + .parseDate(of("+01:00"), of("%Y-%m-%dT%H:%M:%S.%L%z")) + .asString())); + // therefore, to parse date strings containing UTC offsets, we need: + assertExpression( + Instant.parse("2007-12-03T09:15:30.005Z"), + of("2007-12-03T10:15:30.005+01:00") + .parseDate(of("%Y-%m-%dT%H:%M:%S.%L%z")), + "{'$dateFromString': {'dateString': '2007-12-03T10:15:30.005+01:00', " + + "'format': '%Y-%m-%dT%H:%M:%S.%L%z'}}"); + + // missing items: + assertExpression( + Instant.parse("2007-12-03T10:15:00.000Z"), + of("2007-12-03T10:15").parseDate(of("%Y-%m-%dT%H:%M"))); + assertThrows(MongoCommandException.class, () -> assertExpression( + "an incomplete date/time string has been found, with elements missing", + of("-12-03T10:15").parseDate(of("-%m-%dT%H:%M")).asString())); + assertThrows(MongoCommandException.class, () -> assertExpression( + "an incomplete date/time string has been found, with elements missing", + of("2007--03T10:15").parseDate(of("%Y--%dT%H:%M")).asString())); + assertThrows(MongoCommandException.class, () -> assertExpression( + "an incomplete date/time string has been found, with elements missing", + of("").parseDate(of("")).asString())); + } + + @Test + public void parseIntegerTest() { + // https://www.mongodb.com/docs/manual/reference/operator/aggregation/toInt/ + // https://www.mongodb.com/docs/manual/reference/operator/aggregation/toLong/ + assertExpression( + 1234, + of("1234").parseInteger(), + "{'$convert': {'input': '1234', 'onError': {'$toLong': '1234'}, 'to': 'int'}}"); + + int intVal = 2_000_000_000; + long longVal = 4_000_000_000L; + assertExpression( + intVal, + of(intVal + "").parseInteger()); + assertExpression( + longVal, + of(longVal + "").parseInteger()); + + // failures + assertThrows(MongoCommandException.class, () -> + assertExpression( + "", + of(BsonDocument.parse("{a:'1.5'}")).getString("a").parseInteger())); + assertThrows(MongoCommandException.class, () -> + assertExpression( + "", + of(BsonDocument.parse("{a:'not an integer'}")).getString("a").parseInteger())); + assertThrows(MongoCommandException.class, () -> + assertExpression( + "", + of("1.5").parseInteger())); + assertThrows(MongoCommandException.class, () -> + assertExpression( + "", + of("not an integer").parseInteger())); + } + + // non-string + + @Test + public void millisecondsToDateTest() { + // https://www.mongodb.com/docs/manual/reference/operator/aggregation/toDate/ + assertExpression( + Instant.ofEpochMilli(1234), + of(1234L).millisecondsAsDate(), + "{'$toDate': {'$numberLong': '1234'}}"); + // This does not accept plain integers: + assertThrows(MongoCommandException.class, () -> + assertExpression( + Instant.parse("2007-12-03T10:15:30.005Z"), + of(1234).millisecondsAsDate(), + "{'$toDate': 1234}")); + } +} diff --git a/driver-core/src/test/functional/com/mongodb/client/model/search/AggregatesBinaryVectorSearchIntegrationTest.java b/driver-core/src/test/functional/com/mongodb/client/model/search/AggregatesBinaryVectorSearchIntegrationTest.java new file mode 100644 index 00000000000..a242367992f --- /dev/null +++ b/driver-core/src/test/functional/com/mongodb/client/model/search/AggregatesBinaryVectorSearchIntegrationTest.java @@ -0,0 +1,353 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model.search; + +import com.mongodb.MongoInterruptedException; +import com.mongodb.MongoNamespace; +import com.mongodb.client.model.Aggregates; +import com.mongodb.client.model.SearchIndexType; +import com.mongodb.client.test.CollectionHelper; +import com.mongodb.internal.operation.SearchIndexRequest; +import org.bson.BinaryVector; +import org.bson.BsonDocument; +import org.bson.Document; +import org.bson.codecs.DocumentCodec; +import org.bson.conversions.Bson; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.Arguments; +import org.junit.jupiter.params.provider.MethodSource; + +import java.util.List; +import java.util.Optional; +import java.util.concurrent.TimeUnit; +import java.util.function.Consumer; +import java.util.stream.Stream; + +import static com.mongodb.ClusterFixture.isAtlasSearchTest; +import static com.mongodb.ClusterFixture.serverVersionAtLeast; +import static com.mongodb.client.model.Filters.and; +import static com.mongodb.client.model.Filters.eq; +import static com.mongodb.client.model.Filters.gt; +import static com.mongodb.client.model.Filters.gte; +import static com.mongodb.client.model.Filters.in; +import static com.mongodb.client.model.Filters.lt; +import static com.mongodb.client.model.Filters.lte; +import static com.mongodb.client.model.Filters.ne; +import static com.mongodb.client.model.Filters.nin; +import static com.mongodb.client.model.Filters.or; +import static com.mongodb.client.model.Projections.fields; +import static com.mongodb.client.model.Projections.metaVectorSearchScore; +import static com.mongodb.client.model.search.SearchPath.fieldPath; +import static com.mongodb.client.model.search.VectorSearchOptions.approximateVectorSearchOptions; +import static com.mongodb.client.model.search.VectorSearchOptions.exactVectorSearchOptions; +import static java.lang.String.format; +import static java.util.Arrays.asList; +import static java.util.Collections.singletonList; +import static org.junit.jupiter.api.Assertions.assertAll; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assumptions.assumeTrue; +import static org.junit.jupiter.params.provider.Arguments.arguments; + +class AggregatesBinaryVectorSearchIntegrationTest { + private static final String EXCEED_WAIT_ATTEMPTS_ERROR_MESSAGE = + "Exceeded maximum attempts waiting for Search Index creation in Atlas cluster. Index document: %s"; + + private static final String VECTOR_INDEX = "vector_search_index"; + private static final String VECTOR_FIELD_INT_8 = "int8Vector"; + private static final String VECTOR_FIELD_FLOAT_32 = "float32Vector"; + private static final String VECTOR_FIELD_LEGACY_DOUBLE_LIST = "legacyDoubleVector"; + private static final int LIMIT = 5; + private static final String FIELD_YEAR = "year"; + private static CollectionHelper collectionHelper; + private static final BsonDocument VECTOR_SEARCH_INDEX_DEFINITION = BsonDocument.parse( + "{" + + " fields: [" + + " {" + + " path: '" + VECTOR_FIELD_INT_8 + "'," + + " numDimensions: 5," + + " similarity: 'cosine'," + + " type: 'vector'," + + " }," + + " {" + + " path: '" + VECTOR_FIELD_FLOAT_32 + "'," + + " numDimensions: 5," + + " similarity: 'cosine'," + + " type: 'vector'," + + " }," + + " {" + + " path: '" + VECTOR_FIELD_LEGACY_DOUBLE_LIST + "'," + + " numDimensions: 5," + + " similarity: 'cosine'," + + " type: 'vector'," + + " }," + + " {" + + " path: '" + FIELD_YEAR + "'," + + " type: 'filter'," + + " }," + + " ]" + + "}"); + + @BeforeAll + static void beforeAll() { + assumeTrue(isAtlasSearchTest()); + assumeTrue(serverVersionAtLeast(6, 0)); + + collectionHelper = + new CollectionHelper<>(new DocumentCodec(), new MongoNamespace("javaVectorSearchTest", AggregatesBinaryVectorSearchIntegrationTest.class.getSimpleName())); + collectionHelper.drop(); + collectionHelper.insertDocuments( + new Document() + .append("_id", 0) + .append(VECTOR_FIELD_INT_8, BinaryVector.int8Vector(new byte[]{0, 1, 2, 3, 4})) + .append(VECTOR_FIELD_FLOAT_32, BinaryVector.floatVector(new float[]{0.0001f, 1.12345f, 2.23456f, 3.34567f, 4.45678f})) + .append(VECTOR_FIELD_LEGACY_DOUBLE_LIST, new double[]{0.0001, 1.12345, 2.23456, 3.34567, 4.45678}) + .append(FIELD_YEAR, 2016), + new Document() + .append("_id", 1) + .append(VECTOR_FIELD_INT_8, BinaryVector.int8Vector(new byte[]{1, 2, 3, 4, 5})) + .append(VECTOR_FIELD_FLOAT_32, BinaryVector.floatVector(new float[]{1.0001f, 2.12345f, 3.23456f, 4.34567f, 5.45678f})) + .append(VECTOR_FIELD_LEGACY_DOUBLE_LIST, new double[]{1.0001, 2.12345, 3.23456, 4.34567, 5.45678}) + .append(FIELD_YEAR, 2017), + new Document() + .append("_id", 2) + .append(VECTOR_FIELD_INT_8, BinaryVector.int8Vector(new byte[]{2, 3, 4, 5, 6})) + .append(VECTOR_FIELD_FLOAT_32, BinaryVector.floatVector(new float[]{2.0002f, 3.12345f, 4.23456f, 5.34567f, 6.45678f})) + .append(VECTOR_FIELD_LEGACY_DOUBLE_LIST, new double[]{2.0002, 3.12345, 4.23456, 5.34567, 6.45678}) + .append(FIELD_YEAR, 2018), + new Document() + .append("_id", 3) + .append(VECTOR_FIELD_INT_8, BinaryVector.int8Vector(new byte[]{3, 4, 5, 6, 7})) + .append(VECTOR_FIELD_FLOAT_32, BinaryVector.floatVector(new float[]{3.0003f, 4.12345f, 5.23456f, 6.34567f, 7.45678f})) + .append(VECTOR_FIELD_LEGACY_DOUBLE_LIST, new double[]{3.0003, 4.12345, 5.23456, 6.34567, 7.45678}) + .append(FIELD_YEAR, 2019), + new Document() + .append("_id", 4) + .append(VECTOR_FIELD_INT_8, BinaryVector.int8Vector(new byte[]{4, 5, 6, 7, 8})) + .append(VECTOR_FIELD_FLOAT_32, BinaryVector.floatVector(new float[]{4.0004f, 5.12345f, 6.23456f, 7.34567f, 8.45678f})) + .append(VECTOR_FIELD_LEGACY_DOUBLE_LIST, new double[]{4.0004, 5.12345, 6.23456, 7.34567, 8.45678}) + .append(FIELD_YEAR, 2020), + new Document() + .append("_id", 5) + .append(VECTOR_FIELD_INT_8, BinaryVector.int8Vector(new byte[]{5, 6, 7, 8, 9})) + .append(VECTOR_FIELD_FLOAT_32, BinaryVector.floatVector(new float[]{5.0005f, 6.12345f, 7.23456f, 8.34567f, 9.45678f})) + .append(VECTOR_FIELD_LEGACY_DOUBLE_LIST, new double[]{5.0005, 6.12345, 7.23456, 8.34567, 9.45678}) + .append(FIELD_YEAR, 2021), + new Document() + .append("_id", 6) + .append(VECTOR_FIELD_INT_8, BinaryVector.int8Vector(new byte[]{6, 7, 8, 9, 10})) + .append(VECTOR_FIELD_FLOAT_32, BinaryVector.floatVector(new float[]{6.0006f, 7.12345f, 8.23456f, 9.34567f, 10.45678f})) + .append(VECTOR_FIELD_LEGACY_DOUBLE_LIST, new double[]{6.0006, 7.12345, 8.23456, 9.34567, 10.45678}) + .append(FIELD_YEAR, 2022), + new Document() + .append("_id", 7) + .append(VECTOR_FIELD_INT_8, BinaryVector.int8Vector(new byte[]{7, 8, 9, 10, 11})) + .append(VECTOR_FIELD_FLOAT_32, BinaryVector.floatVector(new float[]{7.0007f, 8.12345f, 9.23456f, 10.34567f, 11.45678f})) + .append(VECTOR_FIELD_LEGACY_DOUBLE_LIST, new double[]{7.0007, 8.12345, 9.23456, 10.34567, 11.45678}) + .append(FIELD_YEAR, 2023), + new Document() + .append("_id", 8) + .append(VECTOR_FIELD_INT_8, BinaryVector.int8Vector(new byte[]{8, 9, 10, 11, 12})) + .append(VECTOR_FIELD_FLOAT_32, BinaryVector.floatVector(new float[]{8.0008f, 9.12345f, 10.23456f, 11.34567f, 12.45678f})) + .append(VECTOR_FIELD_LEGACY_DOUBLE_LIST, new double[]{8.0008, 9.12345, 10.23456, 11.34567, 12.45678}) + .append(FIELD_YEAR, 2024), + new Document() + .append("_id", 9) + .append(VECTOR_FIELD_INT_8, BinaryVector.int8Vector(new byte[]{9, 10, 11, 12, 13})) + .append(VECTOR_FIELD_FLOAT_32, BinaryVector.floatVector(new float[]{9.0009f, 10.12345f, 11.23456f, 12.34567f, 13.45678f})) + .append(VECTOR_FIELD_LEGACY_DOUBLE_LIST, new double[]{9.0009, 10.12345, 11.23456, 12.34567, 13.45678}) + .append(FIELD_YEAR, 2025) + ); + + collectionHelper.createSearchIndex( + new SearchIndexRequest(VECTOR_SEARCH_INDEX_DEFINITION, VECTOR_INDEX, + SearchIndexType.vectorSearch())); + awaitIndexCreation(); + } + + @AfterAll + static void afterAll() { + if (collectionHelper != null) { + collectionHelper.drop(); + } + } + + private static Stream provideSupportedVectors() { + return Stream.of( + arguments(BinaryVector.int8Vector(new byte[]{0, 1, 2, 3, 4}), + // `multi` is used here only to verify that it is tolerated + fieldPath(VECTOR_FIELD_INT_8).multi("ignored"), + approximateVectorSearchOptions(LIMIT * 2)), + arguments(BinaryVector.int8Vector(new byte[]{0, 1, 2, 3, 4}), + fieldPath(VECTOR_FIELD_INT_8), + approximateVectorSearchOptions(LIMIT * 2)), + + arguments(BinaryVector.floatVector(new float[]{0.0001f, 1.12345f, 2.23456f, 3.34567f, 4.45678f}), + // `multi` is used here only to verify that it is tolerated + fieldPath(VECTOR_FIELD_FLOAT_32).multi("ignored"), + approximateVectorSearchOptions(LIMIT * 2)), + arguments(BinaryVector.floatVector(new float[]{0.0001f, 1.12345f, 2.23456f, 3.34567f, 4.45678f}), + fieldPath(VECTOR_FIELD_FLOAT_32), + approximateVectorSearchOptions(LIMIT * 2)), + + arguments(BinaryVector.floatVector(new float[]{0.0001f, 1.12345f, 2.23456f, 3.34567f, 4.45678f}), + // `multi` is used here only to verify that it is tolerated + fieldPath(VECTOR_FIELD_FLOAT_32).multi("ignored"), + exactVectorSearchOptions()), + arguments(BinaryVector.floatVector(new float[]{0.0001f, 1.12345f, 2.23456f, 3.34567f, 4.45678f}), + fieldPath(VECTOR_FIELD_FLOAT_32), + exactVectorSearchOptions()), + + arguments(BinaryVector.floatVector(new float[]{0.0001f, 1.12345f, 2.23456f, 3.34567f, 4.45678f}), + // `multi` is used here only to verify that it is tolerated + fieldPath(VECTOR_FIELD_LEGACY_DOUBLE_LIST).multi("ignored"), + exactVectorSearchOptions()), + arguments(BinaryVector.floatVector(new float[]{0.0001f, 1.12345f, 2.23456f, 3.34567f, 4.45678f}), + fieldPath(VECTOR_FIELD_LEGACY_DOUBLE_LIST), + exactVectorSearchOptions()), + + arguments(BinaryVector.floatVector(new float[]{0.0001f, 1.12345f, 2.23456f, 3.34567f, 4.45678f}), + // `multi` is used here only to verify that it is tolerated + fieldPath(VECTOR_FIELD_LEGACY_DOUBLE_LIST).multi("ignored"), + approximateVectorSearchOptions(LIMIT * 2)), + arguments(BinaryVector.floatVector(new float[]{0.0001f, 1.12345f, 2.23456f, 3.34567f, 4.45678f}), + fieldPath(VECTOR_FIELD_LEGACY_DOUBLE_LIST), + approximateVectorSearchOptions(LIMIT * 2)) + ); + } + + @ParameterizedTest + @MethodSource("provideSupportedVectors") + void shouldSearchByVectorWithSearchScore(final BinaryVector vector, + final FieldSearchPath fieldSearchPath, + final VectorSearchOptions vectorSearchOptions) { + //given + List pipeline = asList( + Aggregates.vectorSearch( + fieldSearchPath, + vector, + VECTOR_INDEX, LIMIT, + vectorSearchOptions), + Aggregates.project( + fields( + metaVectorSearchScore("vectorSearchScore") + )) + ); + + //when + List aggregate = collectionHelper.aggregate(pipeline); + + //then + Assertions.assertEquals(LIMIT, aggregate.size()); + assertScoreIsDecreasing(aggregate); + Document highestScoreDocument = aggregate.get(0); + assertEquals(1, highestScoreDocument.getDouble("vectorSearchScore")); + } + + @ParameterizedTest + @MethodSource("provideSupportedVectors") + void shouldSearchByVector(final BinaryVector vector, + final FieldSearchPath fieldSearchPath, + final VectorSearchOptions vectorSearchOptions) { + //given + List pipeline = asList( + Aggregates.vectorSearch( + fieldSearchPath, + vector, + VECTOR_INDEX, LIMIT, + vectorSearchOptions) + ); + + //when + List aggregate = collectionHelper.aggregate(pipeline); + + //then + Assertions.assertEquals(LIMIT, aggregate.size()); + assertFalse( + aggregate.stream() + .anyMatch(document -> document.containsKey("vectorSearchScore")) + ); + } + + @ParameterizedTest + @MethodSource("provideSupportedVectors") + void shouldSearchByVectorWithFilter(final BinaryVector vector, + final FieldSearchPath fieldSearchPath, + final VectorSearchOptions vectorSearchOptions) { + Consumer asserter = filter -> { + List pipeline = singletonList( + Aggregates.vectorSearch( + fieldSearchPath, vector, VECTOR_INDEX, 1, + vectorSearchOptions.filter(filter)) + ); + + List aggregate = collectionHelper.aggregate(pipeline); + Assertions.assertFalse(aggregate.isEmpty()); + }; + + assertAll( + () -> asserter.accept(lt("year", 2020)), + () -> asserter.accept(lte("year", 2020)), + () -> asserter.accept(eq("year", 2020)), + () -> asserter.accept(gte("year", 2016)), + () -> asserter.accept(gt("year", 2015)), + () -> asserter.accept(ne("year", 2016)), + () -> asserter.accept(in("year", 2000, 2024)), + () -> asserter.accept(nin("year", 2000, 2024)), + () -> asserter.accept(and(gte("year", 2015), lte("year", 2017))), + () -> asserter.accept(or(eq("year", 2015), eq("year", 2017))) + ); + } + + private static void assertScoreIsDecreasing(final List aggregate) { + double previousScore = Integer.MAX_VALUE; + for (Document document : aggregate) { + Double vectorSearchScore = document.getDouble("vectorSearchScore"); + assertTrue(vectorSearchScore > 0, "Expected positive score"); + assertTrue(vectorSearchScore < previousScore, "Expected decreasing score"); + previousScore = vectorSearchScore; + } + } + + private static void awaitIndexCreation() { + int attempts = 10; + Optional searchIndex = Optional.empty(); + + while (attempts-- > 0) { + searchIndex = collectionHelper.listSearchIndex(VECTOR_INDEX); + if (searchIndex.filter(document -> document.getBoolean("queryable")) + .isPresent()) { + return; + } + + try { + TimeUnit.SECONDS.sleep(5); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + throw new MongoInterruptedException(null, e); + } + } + + searchIndex.ifPresent(document -> + Assertions.fail(format(EXCEED_WAIT_ATTEMPTS_ERROR_MESSAGE, document.toJson()))); + Assertions.fail(format(EXCEED_WAIT_ATTEMPTS_ERROR_MESSAGE, "null")); + } +} diff --git a/driver-core/src/test/functional/com/mongodb/client/model/search/AggregatesSearchIntegrationTest.java b/driver-core/src/test/functional/com/mongodb/client/model/search/AggregatesSearchIntegrationTest.java new file mode 100644 index 00000000000..bc34cb0060c --- /dev/null +++ b/driver-core/src/test/functional/com/mongodb/client/model/search/AggregatesSearchIntegrationTest.java @@ -0,0 +1,781 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.client.model.search; + +import com.mongodb.MongoClientSettings; +import com.mongodb.MongoNamespace; +import com.mongodb.assertions.Assertions; +import com.mongodb.client.model.Aggregates; +import com.mongodb.client.model.geojson.Point; +import com.mongodb.client.model.geojson.Position; +import com.mongodb.client.test.CollectionHelper; +import org.bson.BsonDocument; +import org.bson.BsonString; +import org.bson.codecs.BsonDocumentCodec; +import org.bson.conversions.Bson; +import org.bson.json.JsonWriterSettings; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.Arguments; +import org.junit.jupiter.params.provider.MethodSource; + +import java.time.Duration; +import java.time.Instant; +import java.time.Month; +import java.time.Year; +import java.util.ArrayList; +import java.util.Collection; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.function.BiConsumer; +import java.util.function.BiFunction; +import java.util.function.Consumer; +import java.util.function.Function; +import java.util.function.Supplier; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +import static com.mongodb.ClusterFixture.isAtlasSearchTest; +import static com.mongodb.ClusterFixture.serverVersionAtLeast; +import static com.mongodb.client.model.Aggregates.limit; +import static com.mongodb.client.model.Aggregates.project; +import static com.mongodb.client.model.Aggregates.replaceWith; +import static com.mongodb.client.model.Filters.and; +import static com.mongodb.client.model.Filters.eq; +import static com.mongodb.client.model.Filters.gt; +import static com.mongodb.client.model.Filters.gte; +import static com.mongodb.client.model.Filters.in; +import static com.mongodb.client.model.Filters.lt; +import static com.mongodb.client.model.Filters.lte; +import static com.mongodb.client.model.Filters.ne; +import static com.mongodb.client.model.Filters.nin; +import static com.mongodb.client.model.Filters.or; +import static com.mongodb.client.model.Projections.computedSearchMeta; +import static com.mongodb.client.model.Projections.metaSearchHighlights; +import static com.mongodb.client.model.Projections.metaSearchScore; +import static com.mongodb.client.model.Projections.metaVectorSearchScore; +import static com.mongodb.client.model.search.FuzzySearchOptions.fuzzySearchOptions; +import static com.mongodb.client.model.search.SearchCollector.facet; +import static com.mongodb.client.model.search.SearchCount.lowerBound; +import static com.mongodb.client.model.search.SearchCount.total; +import static com.mongodb.client.model.search.SearchFacet.dateFacet; +import static com.mongodb.client.model.search.SearchFacet.numberFacet; +import static com.mongodb.client.model.search.SearchFacet.stringFacet; +import static com.mongodb.client.model.search.SearchHighlight.paths; +import static com.mongodb.client.model.search.SearchOperator.autocomplete; +import static com.mongodb.client.model.search.SearchOperator.compound; +import static com.mongodb.client.model.search.SearchOperator.dateRange; +import static com.mongodb.client.model.search.SearchOperator.equalsNull; +import static com.mongodb.client.model.search.SearchOperator.exists; +import static com.mongodb.client.model.search.SearchOperator.in; +import static com.mongodb.client.model.search.SearchOperator.moreLikeThis; +import static com.mongodb.client.model.search.SearchOperator.near; +import static com.mongodb.client.model.search.SearchOperator.numberRange; +import static com.mongodb.client.model.search.SearchOperator.queryString; +import static com.mongodb.client.model.search.SearchOperator.regex; +import static com.mongodb.client.model.search.SearchOperator.phrase; +import static com.mongodb.client.model.search.SearchOperator.text; +import static com.mongodb.client.model.search.SearchOperator.wildcard; +import static com.mongodb.client.model.search.SearchOptions.searchOptions; +import static com.mongodb.client.model.search.SearchPath.fieldPath; +import static com.mongodb.client.model.search.SearchPath.wildcardPath; +import static com.mongodb.client.model.search.SearchScore.boost; +import static com.mongodb.client.model.search.SearchScore.constant; +import static com.mongodb.client.model.search.SearchScore.function; +import static com.mongodb.client.model.search.SearchScoreExpression.addExpression; +import static com.mongodb.client.model.search.SearchScoreExpression.constantExpression; +import static com.mongodb.client.model.search.SearchScoreExpression.gaussExpression; +import static com.mongodb.client.model.search.SearchScoreExpression.log1pExpression; +import static com.mongodb.client.model.search.SearchScoreExpression.logExpression; +import static com.mongodb.client.model.search.SearchScoreExpression.multiplyExpression; +import static com.mongodb.client.model.search.SearchScoreExpression.pathExpression; +import static com.mongodb.client.model.search.SearchScoreExpression.relevanceExpression; +import static com.mongodb.client.model.search.VectorSearchOptions.approximateVectorSearchOptions; +import static com.mongodb.client.model.search.VectorSearchOptions.exactVectorSearchOptions; +import static java.time.ZoneOffset.UTC; +import static java.util.Arrays.asList; +import static java.util.Collections.emptyList; +import static java.util.Collections.singleton; +import static java.util.Collections.singletonList; +import static java.util.Collections.unmodifiableList; +import static org.junit.jupiter.api.Assertions.assertAll; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assumptions.assumeTrue; +import static org.junit.jupiter.params.provider.Arguments.arguments; + +/** + * These tests require the sample data + * and the following Atlas Search indices: + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
NamespaceIndex nameField mappings
{@code sample_mflix.movies}{@code default}
{@code
+ *            {
+ *              "mappings": {
+ *                "dynamic": true,
+ *                "fields": {
+ *                  "fullplot": {
+ *                    "type": "stringFacet"
+ *                  },
+ *                  "released": {
+ *                    "type": "dateFacet"
+ *                  },
+ *                  "title": [
+ *                    {
+ *                      "multi": {
+ *                        "keyword": {
+ *                          "analyzer": "lucene.keyword",
+ *                          "searchAnalyzer": "lucene.keyword",
+ *                          "type": "string"
+ *                        }
+ *                      },
+ *                      "type": "string"
+ *                    },
+ *                    {
+ *                      "type": "autocomplete"
+ *                    }
+ *                  ],
+ *                  "tomatoes": {
+ *                    "fields": {
+ *                      "dvd": {
+ *                        "type": "date"
+ *                      },
+ *                      "viewer": {
+ *                        "fields": {
+ *                          "meter": {
+ *                            "type": "numberFacet"
+ *                          }
+ *                        },
+ *                        "type": "document"
+ *                      }
+ *                    },
+ *                    "type": "document"
+ *                  }
+ *                }
+ *              },
+ *              "storedSource": {
+ *                "include": [
+ *                  "plot"
+ *                ]
+ *              }
+ *            }
+ *          }
{@code sample_airbnb.listingsAndReviews}{@code default}
{@code
+ *            {
+ *              "mappings": {
+ *                "dynamic": true,
+ *                "fields": {
+ *                  "address": {
+ *                    "fields": {
+ *                      "location": {
+ *                        "type": "geo"
+ *                      }
+ *                    },
+ *                    "type": "document"
+ *                  }
+ *                }
+ *              }
+ *            }
+ *          }
{@code sample_mflix.embedded_movies}{@code sample_mflix__embedded_movies}
{@code
+ *            {
+ *              "mappings": {
+ *                "dynamic": true,
+ *                "fields": {
+ *                  "plot_embedding": {
+ *                    "dimensions": 1536,
+ *                    "similarity": "cosine",
+ *                    "type": "knnVector"
+ *                  }
+ *                }
+ *              }
+ *            }
+ *          }
+ *

+ * Use this class when needing to test against MFLIX specifically. Otherwise, + * see AggregatesSearchTest. + */ +final class AggregatesSearchIntegrationTest { + private static final MongoNamespace MFLIX_MOVIES_NS = new MongoNamespace("sample_mflix", "movies"); + private static final MongoNamespace MFLIX_EMBEDDED_MOVIES_NS = new MongoNamespace("sample_mflix", "embedded_movies"); + private static final MongoNamespace AIRBNB_LISTINGS_AND_REVIEWS_NS = new MongoNamespace("sample_airbnb", "listingsAndReviews"); + private static final List QUERY_VECTOR = unmodifiableList(asList(-0.0072121937, -0.030757688, 0.014948666, -0.018497631, -0.019035352, 0.028149737, -0.0019593239, -0.02012424, -0.025649332, -0.007985169, 0.007830574, 0.023726976, -0.011507247, -0.022839734, 0.00027999343, -0.010431803, 0.03823202, -0.025756875, -0.02074262, -0.0042883316, -0.010841816, 0.010552791, 0.0015266258, -0.01791958, 0.018430416, -0.013980767, 0.017247427, -0.010525905, 0.0126230195, 0.009255537, 0.017153326, 0.008260751, -0.0036060968, -0.019210111, -0.0133287795, -0.011890373, -0.0030599732, -0.0002904958, -0.001310697, -0.020715732, 0.020890493, 0.012428096, 0.0015837587, -0.006644225, -0.028499257, -0.005098275, -0.0182691, 0.005760345, -0.0040665213, 0.00075491105, 0.007844017, 0.00040791242, 0.0006780336, 0.0027037326, -0.0041370974, -0.022275126, 0.004775642, -0.0045235846, -0.003659869, -0.0020567859, 0.021602973, 0.01010917, -0.011419867, 0.0043689897, -0.0017946466, 0.000101610516, -0.014061426, -0.002626435, -0.00035540052, 0.0062174085, 0.020809835, 0.0035220778, -0.0071046497, -0.005041142, 0.018067453, 0.012569248, -0.021683631, 0.020245226, 0.017247427, 0.017032338, 0.01037131, -0.036296222, -0.026334926, 0.041135717, 0.009625221, 0.032155763, -0.025057837, 0.027827105, -0.03323121, 0.0055721425, 0.005716655, 0.01791958, 0.012078577, -0.011117399, -0.0016005626, -0.0033254733, -0.007702865, 0.034306653, 0.0063854465, -0.009524398, 0.006069535, 0.012696956, -0.0042883316, -0.013167463, -0.0024667988, -0.02356566, 0.00052721944, -0.008858967, 0.039630096, -0.0064593833, -0.0016728189, -0.0020366213, 0.00622413, -0.03739855, 0.0028616884, -0.0102301575, 0.017717933, -0.0041068504, -0.0060896995, -0.01876649, 0.0069903834, 0.025595559, 0.029762903, -0.006388807, 0.017247427, 0.0022080203, -0.029117636, -0.029870447, -0.0049739266, -0.011809715, 0.023243025, 0.009510955, 0.030004878, 0.0015837587, -0.018524516, 0.007931396, -0.03589293, 0.013590919, -0.026361812, 0.002922182, 0.025743432, 0.014894894, 0.0012989342, -0.0016232478, 0.006251016, 0.029789789, -0.004664737, 0.017812036, -0.013436324, -0.0102301575, 0.016884465, -0.017220542, 0.010156221, 0.00014503786, 0.03933435, 0.018658947, 0.016897907, 0.0076961434, -0.029843561, -0.02021834, 0.015056211, 0.01002179, -0.0031994449, -0.03796316, -0.008133043, 0.03707592, 0.032128878, 9.483648E-05, 0.0017627194, -0.0007544909, 0.006647586, 0.020903936, -0.032559056, 0.025272924, -0.012804501, 0.019210111, 0.0022987607, 0.013301893, -0.0047218697, -0.022853177, -0.02162986, 0.006788738, 0.0092286505, 0.024184039, -0.015419173, -0.006479548, -0.00180977, 0.0060728956, -0.0030919004, 0.0022449887, -0.004046357, 0.012663349, -0.028579915, 0.0047722813, -0.6775295, -0.018779935, -0.018484188, -0.017449073, -0.01805401, 0.026630674, 0.008018777, 0.013436324, -0.0034683058, 0.00070912065, -0.005027699, 0.009658828, -0.0031792803, -0.010478854, 0.0034951917, -0.011594627, 0.02441257, -0.042533796, -0.012414653, 0.006261098, -0.012266779, 0.026630674, -0.017852364, -0.02184495, 0.02176429, 0.019263884, 0.00984031, -0.012609577, -0.01907568, -0.020231783, -0.002886894, 0.02706085, -0.0042345594, 0.02265153, 0.05769755, 0.021522315, -0.014195856, 0.011144285, 0.0038077426, 0.024573887, -0.03578539, -0.004476534, 0.016521502, -0.019815048, 0.00071836275, 0.008173372, 0.013436324, 0.021885278, -0.0147604635, -0.021777734, 0.0052595916, -0.011668564, -0.02356566, -0.0049974523, 0.03473683, -0.0255149, 0.012831387, -0.009658828, -0.0031036632, -0.001386314, -0.01385978, 0.008294359, -0.02512505, -0.0012308789, 0.008711093, 0.03610802, 0.016225755, 0.014034539, 0.0032431346, -0.017852364, 0.017906137, 0.005787231, -0.03514012, 0.017207097, -0.0019542826, -0.010189828, 0.010808208, -0.017408744, -0.0074944976, 0.011009854, 0.00887241, 0.009652107, -0.0062409337, 0.009766373, 0.009759651, -0.0020819916, -0.02599885, 0.0040665213, 0.016064439, -0.019035352, -0.013604362, 0.020231783, -0.025272924, -0.01196431, -0.01509654, 0.0010233518, -0.00869765, -0.01064017, 0.005249509, -0.036807057, 0.00054570363, 0.0021777733, -0.009302587, -0.00039362916, 0.011386259, 0.013382551, 0.03046194, 0.0032380936, 0.037801843, -0.036807057, -0.006244295, 0.002392862, -0.01346321, -0.008953068, -0.0025861058, -0.022853177, 0.018242212, -0.0031624765, 0.009880639, -0.0017341529, 0.0072054723, 0.014693249, 0.026630674, 0.008435511, -0.012562525, 0.011581183, -0.0028768117, -0.01059312, -0.027746446, 0.0077969665, 2.468059E-05, -0.011151006, 0.0152712995, -0.01761039, 0.023256468, 0.0076625356, 0.0026163526, -0.028795004, 0.0025877862, -0.017583502, -0.016588718, 0.017556617, 0.00075491105, 0.0075885993, -0.011722336, -0.010620005, -0.017274313, -0.008025498, -0.036376882, 0.009457182, -0.007265966, -0.0048663826, -0.00494368, 0.003616179, 0.0067820163, 0.0033775652, -0.016037554, 0.0043320213, -0.007978448, -0.012925488, 0.029413383, -0.00016583256, -0.018040568, 0.004180787, -0.011453475, -0.013886666, -0.0072121937, 0.006486269, 0.008005333, -0.01412864, -0.00061796, -0.025635887, -0.006630782, 0.02074262, -0.007192029, 0.03906549, -0.0030885397, -0.00088976155, -0.022033151, -0.008758144, 0.00049361185, 0.009342916, -0.014988995, -0.008704372, 0.014276514, -0.012300386, -0.0020063745, 0.030892119, -0.010532626, 0.019653732, 0.0028583275, 0.006163636, 0.0071517, -0.017489402, -0.008448954, -0.004352186, 0.013201071, 0.01090231, 0.0004110631, 0.03306989, 0.006916447, 0.002922182, 0.023888292, -0.009067334, 0.012434817, -0.051298663, 0.016279528, -0.02741037, 0.026227381, -0.005182294, 0.008153207, -0.026603786, 0.0045571923, 0.018067453, 0.038016934, 0.028042194, 0.0077431942, 0.015499831, -0.020298999, 0.0013123773, -0.021334114, -0.026281154, -0.0012720482, -0.0045571923, 0.006086339, 0.0028952959, -0.003041489, 0.007931396, -0.0005406625, -0.023444671, -0.0038715971, 0.0070374343, -0.0019979726, 0.024089938, 0.0020903936, -0.024210924, 0.007319738, -0.005995598, 0.032478396, 0.020998036, 0.01654839, 0.033876475, 0.025098165, 0.021132467, -0.017099554, -0.013516982, 0.01306664, 0.010525905, -0.02335057, -0.013543868, -0.03583916, 0.021172797, -0.033607613, -0.0036094578, -0.007911232, -0.0054578763, 0.013227956, 0.00993441, 0.025810648, 0.02255743, -0.013678298, 0.012273501, 0.00040497174, 0.0019072321, 0.0008170851, 0.01540573, 0.015580489, 0.005239427, 0.003989224, -0.013254843, 0.024708318, 0.0046680975, -0.034360424, -0.0041942303, 0.0077095865, -0.0053503322, -0.024399128, -0.02644247, 0.0062476555, 0.021885278, -0.0010922474, -0.014209299, 0.018295985, 0.0135640325, 0.0033842868, 0.0017812036, 0.004735313, 0.006486269, -0.008072549, 0.009551284, 0.007938119, 0.0101696635, 0.021750847, 0.014034539, 0.0071449787, -0.008448954, 0.010841816, -0.008274195, -0.014531932, -0.0024785616, 0.0018601815, 0.009564727, -0.011130841, -0.020581303, 0.012985982, 0.019976366, -0.030542599, -0.021818062, -0.018551402, -0.0092286505, -0.024385685, 0.0036901159, -0.0061367503, -0.00034048714, -0.007057599, -0.014558818, -0.022221355, 0.023377456, 0.026119838, -0.0008813597, 0.004520224, 0.0027843907, -0.022382671, 0.0018248934, 0.13313992, 0.013685021, -6.170148E-05, 0.015876237, 0.005417547, -0.008314524, -0.019169783, -0.016494617, 0.016844137, -0.0046412116, 0.024305027, -0.027827105, 0.023162367, 0.0143034, -0.0029893972, -0.014626034, -0.018215327, 0.0073264595, 0.024331912, -0.0070777633, -0.0004259765, -0.00042345593, -0.0034262962, -0.00423792, -0.016185427, -0.017946465, -5.9706024E-05, 0.016467731, -0.014773907, -0.022664975, -0.009322752, -0.027585128, 0.0020651878, -0.010532626, -0.010546069, 0.009174879, -0.0011098915, 0.026469355, 0.022006266, -0.013039754, 0.023458114, 0.005481402, -0.00050705485, -0.012092019, 0.0055990284, -0.007057599, -0.012266779, 0.03253217, 0.007071042, -0.01699201, 0.06597847, -0.013436324, 0.0070038266, -0.009981461, 0.024829306, 0.0067383265, 0.0056292755, 0.0018534599, -0.020057024, 0.011735778, 0.0025491375, -0.022194467, 0.0012468424, -0.0051621296, -0.018457301, -0.008509448, -0.011594627, -0.0152712995, -0.001858501, -0.014921781, -0.0056696045, -0.0066979975, -0.02008391, 0.0040093884, 0.032935463, -0.0032935461, -0.0074205613, -0.014088311, -0.0014762144, -0.011218221, 0.011984475, -0.01898158, -0.027208723, -0.008072549, 0.010942639, 0.0183632, 0.04148524, -0.0009922648, -0.017086111, 0.013483374, 0.019841935, 0.024264697, 0.011601348, -0.0077431942, -0.020258669, -0.005770427, 0.013429603, -0.011554297, -0.012831387, -1.4752561E-06, 0.011594627, -0.012683514, -0.012824666, 0.02180462, 0.011023297, 0.012468425, -0.0029860365, -0.0076289284, -0.021293784, 0.005068028, 0.017812036, 0.0007708746, -0.008684208, 0.0048126103, -0.0076558143, 0.019169783, -0.0076558143, 0.028579915, -0.011574462, -0.03196756, -0.0011334168, -0.030219967, 0.023901735, 0.014021097, -0.016776921, 0.0030045207, -0.0019257163, -0.023579102, 0.004197591, 0.00012497831, -0.016803807, 0.01915634, -0.010472132, -0.042130504, -0.038016934, -0.007702865, -0.0025861058, -0.010512462, -0.013537147, -0.013382551, -0.0036397045, 0.0053032814, 0.0046277684, -0.021952493, -0.016588718, -0.031886905, 0.0058208387, -0.00043689896, -0.01337583, 0.018349757, 0.015244413, 0.00900684, -0.017677605, 0.01523097, 0.010337702, -0.024426013, -0.021965936, -0.014182413, 0.008596827, 0.029628472, 0.058611676, -0.015446059, 0.021374442, -0.0095042335, 0.00091748784, 0.021132467, -0.011285436, -0.0035724894, -0.027907763, 0.027302826, 0.004184148, 0.026281154, -0.0026802071, -0.015163755, 0.005699851, 0.023122039, 0.0075415485, -0.020057024, -0.0109359175, -0.018309427, 0.017529732, 0.0020685487, -0.012441538, 0.0023239665, 0.012038247, -0.017543174, 0.029332725, 0.01399421, -0.0092488155, -1.0607403E-05, 0.019371428, -0.0315105, 0.023471557, -0.009430297, 0.00022097006, 0.013301893, -0.020110795, -0.0072928523, 0.007649093, 0.011547576, 0.026805433, -0.01461259, -0.018968137, -0.0104250815, 0.0005646079, 0.031456728, -0.0020147765, -0.024224367, 0.002431511, -0.019371428, -0.025017507, -0.02365976, -0.004318578, -0.04457714, 0.0029826758, -0.020473758, -0.016118212, -0.00068181445, -0.03446797, -0.020715732, -0.04256068, -0.013792564, 0.013873223, 0.011413146, -0.002419748, 0.0123877665, -0.0011115718, 0.007978448, 0.021441657, 0.004405958, 0.0042480025, 0.022920392, -0.0067920987, 0.011083791, -0.017529732, -0.03659197, -0.0066005355, -0.023888292, -0.016521502, 0.009591613, -0.0008590946, 0.013846337, -0.021092137, -0.012562525, -0.0028415236, 0.02882189, 5.3378342E-05, -0.006943333, -0.012226449, -0.035570297, -0.024547001, 0.022355784, -0.018416973, 0.014209299, 0.010035234, 0.0046916227, 0.009672271, -0.00067635323, -0.024815861, 0.0007049197, 0.0017055863, -0.0051251613, 0.0019391594, 0.027665788, -0.007306295, -0.013369109, 0.006308149, 0.009699157, 0.000940173, 0.024842748, 0.017220542, -0.0053032814, -0.008395182, 0.011359373, 0.013214514, 0.0062711807, 0.004110211, -0.019277327, -0.01412864, -0.009322752, 0.007124814, 0.0035119955, -0.024036165, -0.012831387, -0.006734966, -0.0019694061, -0.025367027, -0.006630782, 0.016010666, 0.0018534599, -0.0030717358, -0.017717933, 0.008489283, 0.010875423, -0.0028700903, 0.0121323485, 0.004930237, 0.009947853, -0.02992422, 0.021777734, 0.00015081417, 0.010344423, 0.0017543174, 0.006166997, -0.0015467904, 0.010089005, 0.0111711705, -0.010740994, -0.016965123, -0.006771934, 0.014464716, 0.007192029, -0.0006175399, -0.010855259, -0.003787578, 0.015647706, 0.01002179, -0.015378844, -0.01598378, 0.015741806, -0.0039119264, -0.008422068, 0.03253217, -0.019210111, -0.014975552, 0.0025810648, 0.0035556855, 8.449164E-05, -0.034172222, -0.006395529, -0.0036867552, 0.020769505, 0.009766373, -0.017543174, -0.013557311, 0.0031994449, -0.0014577302, 0.01832287, -0.009907524, -0.024654545, 0.0049940916, 0.016965123, 0.004476534, 0.022261683, -0.009369803, 0.0015308268, -0.010102449, -0.001209874, -0.023807634, -0.008348132, -0.020312442, 0.030892119, -0.0058309208, -0.005128522, -0.02437224, 0.01478735, -0.011016576, -0.010290652, -0.00503106, 0.016884465, 0.02132067, -0.014236185, -0.004903351, 0.01902191, 0.0028179984, 0.019505858, -0.021535758, -0.0038514326, 0.0112115, 0.0038682362, 0.003217929, -0.0012770894, -0.013685021, -0.008381739, 0.0025256122, 0.029386498, 0.018645504, 0.005323446, -0.0032784226, -0.0043253, 0.0007998612, 0.019949479, 0.025770318, -0.0030868594, 0.018968137, -0.010236879, -0.005370497, -0.024748646, -0.014047982, 0.005760345, -0.03610802, 0.0042009517, -0.0034817487, 0.003385967, 0.006560206, -0.006294706, -0.02400928, -0.006140111, -0.0017980073, -0.012481867, -0.0033960494, -0.00097210024, 0.014061426, -0.017596947, -0.023202697, 0.0028499255, -0.016010666, -0.028149737, 0.0024752007, -0.018941252, 0.0056158323, -0.012912045, 0.0054410724, 0.003054932, 0.019559631, -0.0048932685, -0.007823853, -0.017099554, 0.025662774, 0.02572999, 0.004379072, -0.010223436, 0.0031036632, -0.011755943, -0.025622444, -0.030623257, 0.019895706, -0.02052753, -0.006637504, -0.001231719, -0.013980767, -0.02706085, -0.012071854, -0.0041370974, -0.008885853, 0.0001885177, 0.2460615, -0.009389968, -0.010714107, 0.0326666, 0.0009561366, 0.022624645, 0.009793258, 0.019452088, -0.004493338, -0.007097928, -0.0022298652, 0.012401209, -0.0036229007, -0.00023819396, -0.017502844, -0.014209299, -0.030542599, -0.004863022, 0.005128522, -0.03081146, 0.02118624, -0.0042177555, 0.0032448152, -0.019936036, 0.015311629, 0.0070508774, -0.02021834, 0.0016148458, 0.04317906, 0.01385978, 0.004211034, -0.02534014, -0.00030309867, -0.011930703, -0.00207527, -0.021643303, 0.01575525, -0.0042883316, 0.0069231684, 0.017946465, 0.03081146, 0.0043857936, 3.646951E-05, -0.0214551, 0.0089933975, 0.022785962, -0.008106156, 0.00082884775, -0.0006717322, -0.0025457768, -0.017059224, -0.035113234, 0.054982055, 0.021266898, -0.0071046497, -0.012636462, 0.016965123, 0.01902191, -0.0061737187, 0.00076247274, 0.0002789432, 0.030112421, -0.0026768465, 0.0015207445, -0.004926876, 0.0067551304, -0.022624645, 0.0005003333, 0.0035523248, -0.0041337362, 0.011634956, -0.0183632, -0.02820351, -0.0061737187, -0.022355784, -0.03796316, 0.041888528, 0.019626847, 0.02211381, 0.001474534, 0.0037640526, 0.0085228905, 0.013140577, 0.012616298, -0.010599841, -0.022920392, 0.011278715, -0.011493804, -0.0044966987, -0.028741231, 0.015782135, -0.011500525, -0.00027621258, -0.0046378504, -0.003280103, 0.026993636, 0.0109359175, 0.027168395, 0.014370616, -0.011890373, -0.020648519, -0.03465617, 0.001964365, 0.034064677, -0.02162986, -0.01081493, 0.014397502, 0.008038941, 0.029789789, -0.012044969, 0.0038379894, -0.011245107, 0.0048193317, -0.0048563, 0.0142899575, 0.009779816, 0.0058510853, -0.026845763, 0.013281729, -0.0005818318, 0.009685714, -0.020231783, -0.004197591, 0.015593933, -0.016319858, -0.019492416, -0.008314524, 0.014693249, 0.013617805, -0.02917141, -0.0052058194, -0.0061838008, 0.0072726877, -0.010149499, -0.019035352, 0.0070374343, -0.0023138842, 0.0026583623, -0.00034111727, 0.0019038713, 0.025945077, -0.014693249, 0.009820145, -0.0037506097, 0.00041127318, -0.024909964, 0.008603549, -0.0041707046, 0.019398315, -0.024022723, -0.013409438, -0.027880875, 0.0023558936, -0.024237812, 0.034172222, -0.006251016, -0.048152987, -0.01523097, -0.002308843, -0.013691742, -0.02688609, 0.007810409, 0.011513968, -0.006647586, -0.011735778, 0.0017408744, -0.17422187, 0.01301959, 0.018860593, -0.00068013405, 0.008791751, -0.031618044, 0.017946465, 0.011735778, -0.03129541, 0.0033607613, 0.0072861305, 0.008227143, -0.018443858, -0.014007653, 0.009961297, 0.006284624, -0.024815861, 0.012676792, 0.014222742, 0.0036632298, 0.0028364826, -0.012320551, -0.0050478633, 0.011729057, 0.023135481, 0.025945077, 0.005676326, -0.007192029, 0.0015308268, -0.019492416, -0.008932903, -0.021737404, 0.012925488, 0.008092714, 0.03245151, -0.009457182, -0.018524516, 0.0025188907, -0.008569942, 0.0022769158, -0.004617686, 0.01315402, 0.024291582, -0.001880346, 0.0014274834, 0.04277577, 0.010216715, -0.018699275, 0.018645504, 0.008059106, 0.02997799, -0.021576088, 0.004846218, 0.015741806, 0.0023542133, 0.03142984, 0.01372535, 0.01598378, 0.001151901, -0.012246614, -0.004184148, -0.023605987, 0.008657321, -0.025770318, -0.019048795, -0.023054823, 0.005535174, -0.018161554, -0.019761277, 0.01385978, -0.016655933, 0.01416897, 0.015311629, 0.008919461, 0.0077499156, 0.023888292, 0.015257857, 0.009087498, 0.0017845642, 0.0013762318, -0.023713533, 0.027464142, -0.014021097, -0.024681432, -0.006741687, 0.0016450927, -0.005804035, -0.002821359, 0.0056796866, -0.023189254, 0.00723908, -0.013483374, -0.018390086, -0.018847149, 0.0061905226, 0.033365637, 0.008489283, 0.015257857, 0.019694062, -0.03019308, -0.012253336, 0.0021744126, -0.00754827, 0.01929077, 0.025044393, 0.017677605, 0.02503095, 0.028579915, 0.01774482, 0.0029961187, -0.019895706, 0.001165344, -0.0075281053, 0.02105181, -0.009221929, 0.023404341, -0.0028079161, -0.0037237236, 0.02847237, 0.0009821824, 0.04629785, -0.017771706, -0.038904175, 0.00869765, 0.0016249281, 0.020984594, -0.10867358, -0.008395182, -0.0010830053, 0.008059106, -0.020097353, 0.0020383017, 0.008038941, -0.009047169, -0.007252523, 0.0286068, -0.0037774958, -0.024923407, 0.005279756, -0.009524398, 0.011527412, -0.0020198175, 0.019452088, 0.014384058, -0.025609002, 0.006025845, -0.030542599, 0.016790364, 0.019223554, -0.012434817, 0.003901844, -0.007817131, -0.027612016, 0.008314524, 0.007938119, -0.0004868903, 0.014747021, -0.009457182, 0.014706692, -0.018847149, 0.015311629, 0.015647706, -0.0031288688, -0.0032717013, 0.008879132, -0.034629285, 0.0090337265, 0.004382433, 0.011305601, -0.028391711, 0.0053268066, 0.0003566608, -0.019169783, 0.011507247, 0.023592545, -0.006603896, -0.009685714, 0.010714107, -0.027907763, 0.006412333, 0.0045706355, -0.029816674, 0.0047958065, 0.0018500991, -0.011500525, 0.0030179636, 0.015997224, -0.022140697, -0.0001849469, -0.014263071, 0.011540854, -0.006607257, -0.01871272, -0.0038480717, -0.0024903242, -0.031214751, -0.0050478633, 0.021481987, -0.012912045, 0.028122852, -0.018605174, -0.00723908, 0.0023609349, -0.0073331813, 0.014935223, -0.005699851, -0.0068895607, -0.015244413, 0.029789789, -0.02458733, 0.0004453009, 0.0015577129, 0.0048596608, 0.009376524, -0.011984475, -0.014518489, 0.015647706, 0.0068794787, 0.0065534846, 0.003107024, -0.01973439, 0.027383484, -0.015459502, -0.006318231, 0.020863606, -0.0021357639, -0.0076692575, -0.021266898, -0.046862457, 0.025326697, 0.016521502, -0.0036833945, 0.0029860365, -0.016306413, 0.026496243, -0.016803807, 0.008724537, -0.0025407355, -0.027302826, 0.017798591, 0.0060796174, -0.014007653, -0.01650806, -0.0095042335, 0.009242094, -0.009342916, 0.010330981, 0.009544563, 0.018591732, 0.0036867552, 0.0194252, 0.0092488155, -0.007823853, 0.0015501512, -0.012031525, 0.010203271, -0.0074272826, -0.020258669, 0.025662774, -0.03032751, 0.014854565, 0.010835094, 0.0007708746, 0.0009989863, -0.014007653, -0.012871716, 0.023444671, 0.03323121, -0.034575514, -0.024291582, 0.011634956, -0.025958521, -0.01973439, 0.0029742739, 0.0067148013, 0.0022399474, 0.011802994, 0.011151006, -0.0116416775, 0.030166194, 0.013039754, -0.022517102, -0.011466918, -0.0033053088, 0.006156915, 0.004829414, 0.006029206, -0.016534945, 0.015325071, -0.0109359175, 0.032854803, -0.001010749, 0.0021155993, -0.011702171, -0.009766373, 0.00679882, 0.0040900465, -0.019438643, -0.006758491, -0.0040060277, 0.022436442, 0.025850976, 0.006150193, 0.018632062, -0.0077230297, -0.015298186, -0.017381858, 0.01911601, -0.005763706, -0.0022281848, -0.031994447, 0.0015972018, 0.028848775, 0.014572261, -0.0073264595, -0.009551284, -0.0052058194, 0.014518489, -0.0041068504, 0.010754436, 0.0055519775, -0.005804035, -0.0054007433, 0.028579915, -0.01791958, -0.015284742, 0.036807057, 0.015069654, -0.0023810994, -0.0038648755, 0.0015467904, -0.0037136413, 0.0023458113, 0.019008467, -0.011547576, -0.010001626, 0.012347437, 0.0155267175, 0.01907568, -0.003041489, -0.0132414, 0.017449073, 0.00060073606, -0.008536334, 0.008233866, -0.0085430555, -0.02365976, 0.024089938, -0.0034615842, -0.006580371, 0.008327967, -0.01509654, 0.009692436, 0.025635887, 0.0020282194, -0.04022159, -0.0021290423, -0.012407931, -0.0021727323, 0.006506434, -0.005320085, -0.008240587, 0.020984594, -0.014491603, 0.003592654, 0.0072121937, -0.03081146, 0.043770555, 0.009302587, -0.003217929, 0.019008467, -0.011271994, 0.02917141, 0.0019576435, -0.0077431942, -0.0030448497, -0.023726976, 0.023377456, -0.006382086, 0.025716545, -0.017341528, 0.0035556855, -0.019129453, -0.004311857, -0.003253217, -0.014935223, 0.0036363439, 0.018121226, -0.0066543072, 0.02458733, 0.0035691285, 0.0039085653, -0.014209299, 0.020191453, 0.0357585, 0.007830574, -0.024130266, -0.008912739, 0.008314524, -0.0346024, -0.0014005973, -0.006788738, -0.021777734, 0.010465411, -0.004012749, -0.00679882, 0.009981461, -0.026227381, 0.027033964, -0.015567047, -0.0063115098, 0.0023071626, 0.01037131, 0.015741806, -0.020635074, -0.012945653)); + private static final int LIMIT = 2; + private static Map> collectionHelpers; + + @BeforeAll + static void beforeAll() { + collectionHelpers = new HashMap<>(); + collectionHelpers.put(MFLIX_MOVIES_NS, new CollectionHelper<>(new BsonDocumentCodec(), MFLIX_MOVIES_NS)); + collectionHelpers.put(MFLIX_EMBEDDED_MOVIES_NS, new CollectionHelper<>(new BsonDocumentCodec(), MFLIX_EMBEDDED_MOVIES_NS)); + collectionHelpers.put(AIRBNB_LISTINGS_AND_REVIEWS_NS, new CollectionHelper<>(new BsonDocumentCodec(), AIRBNB_LISTINGS_AND_REVIEWS_NS)); + } + + @BeforeEach + void beforeEach() { + assumeTrue(isAtlasSearchTest()); + } + + private static Stream vectorSearchArgs(){ + return Stream.of( + arguments(approximateVectorSearchOptions(LIMIT + 1)), + arguments(exactVectorSearchOptions()) + ); + } + + @ParameterizedTest + @MethodSource("vectorSearchArgs") + void vectorSearch(final VectorSearchOptions vectorSearchOptions) { + assumeTrue(serverVersionAtLeast(7, 1)); + CollectionHelper collectionHelper = collectionHelpers.get(MFLIX_EMBEDDED_MOVIES_NS); + assertAll( + () -> { + List pipeline = singletonList( + Aggregates.vectorSearch( + // `multi` is used here only to verify that it is tolerated + fieldPath("plot_embedding").multi("ignored"), + QUERY_VECTOR, "sample_mflix__embedded_movies", LIMIT, vectorSearchOptions) + ); + Asserters.size(LIMIT) + .accept(collectionHelper.aggregate(pipeline), msgSupplier(pipeline)); + }, + () -> { + List pipeline = asList( + Aggregates.vectorSearch( + fieldPath("plot_embedding"), QUERY_VECTOR, "sample_mflix__embedded_movies", LIMIT, + vectorSearchOptions.filter(gte("year", 2016))), + Aggregates.project( + metaVectorSearchScore("vectorSearchScore")) + ); + List results = collectionHelper.aggregate(pipeline); + Asserters.size(1) + .accept(results, msgSupplier(pipeline)); + Asserters.firstResult((doc, msgSupplier) -> + assertTrue(doc.getDouble("vectorSearchScore").doubleValue() > 0, msgSupplier)) + .accept(results, msgSupplier(pipeline)); + } + ); + } + + private static Stream vectorSearchSupportedFiltersArgs(){ + return Stream.of( + arguments(approximateVectorSearchOptions(1)), + arguments(exactVectorSearchOptions()) + ); + } + + @ParameterizedTest + @MethodSource("vectorSearchSupportedFiltersArgs") + void vectorSearchSupportedFilters(final VectorSearchOptions vectorSearchOptions) { + assumeTrue(serverVersionAtLeast(7, 1)); + CollectionHelper collectionHelper = collectionHelpers.get(MFLIX_EMBEDDED_MOVIES_NS); + Consumer asserter = filter -> { + List pipeline = singletonList( + Aggregates.vectorSearch( + fieldPath("plot_embedding"), QUERY_VECTOR, "sample_mflix__embedded_movies", 1, + vectorSearchOptions.filter(filter)) + ); + Asserters.nonEmpty() + .accept(collectionHelper.aggregate(pipeline), msgSupplier(pipeline)); + }; + assertAll( + () -> asserter.accept(lt("year", 2016)), + () -> asserter.accept(lte("year", 2016)), + () -> asserter.accept(eq("year", 2016)), + () -> asserter.accept(gte("year", 2016)), + () -> asserter.accept(gt("year", 2015)), + () -> asserter.accept(ne("year", 2016)), + () -> asserter.accept(in("year", 2000, 2016)), + () -> asserter.accept(nin("year", 2000, 2016)), + () -> asserter.accept(and(gte("year", 2015), lte("year", 2016))), + () -> asserter.accept(or(eq("year", 2015), eq("year", 2016))) + ); + } + + /** + * @param stageUnderTestCreator A {@link CustomizableSearchStageCreator} that is used to create both + * {@code $search} and {@code $searchMeta} stages. Any combination of an {@link SearchOperator}/{@link SearchCollector} and + * {@link SearchOptions} that is valid for the {@code $search} stage is also valid for the {@code $searchMeta} stage. + * This is why we use the same creator for both. + * @param accessories A list of {@link Accessory} objects that specify additional pipeline stages and an asserter. + *

    + *
  • The item with index 0 is used with {@code $search};
  • + *
  • the idem with index 1 is used with {@code $searchMeta}.
  • + *
+ */ + @ParameterizedTest(name = "{index} {0}") + @MethodSource("searchAndSearchMetaArgs") + void searchAndSearchMeta( + @SuppressWarnings("unused") final String testDescription, + final CustomizableSearchStageCreator stageUnderTestCreator, + final MongoNamespace ns, + final List accessories) { + List> stageUnderTestCustomizers = asList( + (bsonOperatorOrCollector, options) -> { + if (bsonOperatorOrCollector instanceof SearchOperator) { + return Aggregates.search((SearchOperator) bsonOperatorOrCollector, options); + } else if (bsonOperatorOrCollector instanceof SearchCollector) { + return Aggregates.search((SearchCollector) bsonOperatorOrCollector, options); + } else { + throw Assertions.fail(); + } + }, + (bsonOperatorOrCollector, options) -> { + if (bsonOperatorOrCollector instanceof SearchOperator) { + return Aggregates.searchMeta((SearchOperator) bsonOperatorOrCollector, options); + } else if (bsonOperatorOrCollector instanceof SearchCollector) { + return Aggregates.searchMeta((SearchCollector) bsonOperatorOrCollector, options); + } else { + throw Assertions.fail(); + } + } + ); + Assertions.assertTrue(stageUnderTestCustomizers.size() == accessories.size()); + for (int i = 0; i < stageUnderTestCustomizers.size(); i++) { + Bson stageUnderTest = stageUnderTestCreator.apply(stageUnderTestCustomizers.get(i)); + Accessory accessory = accessories.get(i); + List pipeline = new ArrayList<>(); + pipeline.add(stageUnderTest); + pipeline.addAll(accessory.postStages); + Supplier msgSupplier = msgSupplier(pipeline); + List results; + try { + results = collectionHelpers.get(ns).aggregate(pipeline); + } catch (Exception e) { + throw new RuntimeException(msgSupplier.get(), e); + } + accessory.resultAsserter.accept(results, msgSupplier); + } + } + + /** + * @see #searchAndSearchMeta(String, CustomizableSearchStageCreator, MongoNamespace, List) + */ + private static Stream searchAndSearchMetaArgs() { + return Stream.of( + arguments( + "default options", + stageCreator( + exists(fieldPath("tomatoes.dvd")), + searchOptions() + ), + MFLIX_MOVIES_NS, + asList( + new Accessory( + asList(limit(1), project(metaSearchScore("score"))), + Asserters.score(1) + ), + new Accessory( + emptyList(), + // specifying a bare operator works as if `SearchCount.lowerBound` were specified + Asserters.countLowerBound(1_001) + ) + ) + ), + arguments( + "`index`, `count` options", + stageCreator( + // `multi` is used here only to verify that it is tolerated + exists(fieldPath("title").multi("ignored")), + searchOptions() + .option("index", "default") + .count(lowerBound().threshold(2_000)) + ), + MFLIX_MOVIES_NS, + asList( + new Accessory( + asList(limit(1), project(computedSearchMeta("meta"))), + Asserters.countLowerBound("meta", 2_000) + ), + new Accessory( + emptyList(), + Asserters.countLowerBound(2_000) + ) + ) + ), + arguments( + "`highlight` option", + stageCreator( + text(singleton(fieldPath("plot")), asList("factory", "century")), + searchOptions() + .highlight(paths( + fieldPath("title").multi("keyword"), + wildcardPath("pl*t")) + .maxCharsToExamine(100_000)) + ), + MFLIX_MOVIES_NS, + asList( + new Accessory( + asList(limit(1), project(metaSearchHighlights("highlights"))), + Asserters.firstResult((doc, msgSupplier) -> assertEquals(1, doc.getArray("highlights").size(), msgSupplier)) + ), + new Accessory( + emptyList(), + Asserters.nonEmpty() + ) + ) + ), + arguments( + "`returnStoredSource` option", + stageCreator( + exists(fieldPath("plot")), + searchOptions() + .returnStoredSource(true) + ), + MFLIX_MOVIES_NS, + asList( + new Accessory( + singleton(limit(1)), + Asserters.firstResult((doc, msgSupplier) -> { + // assert that the fields specified in `storedSource` and "id" were returned + assertNotNull(doc.get("_id"), msgSupplier); + assertFalse(doc.get("plot").asString().getValue().isEmpty(), msgSupplier); + assertEquals(2, doc.size(), msgSupplier); + }) + ), + new Accessory( + emptyList(), + Asserters.nonEmpty() + ) + ) + ), + arguments( + "alternate analyzer (`multi` field path)", + stageCreator( + text(singleton(fieldPath("title").multi("keyword")), singleton("Top Gun")), + searchOptions().count(total()) + ), + MFLIX_MOVIES_NS, + asList( + new Accessory( + emptyList(), + Asserters.firstResult((doc, msgSupplier) -> assertEquals( + "Top Gun", doc.getString("title").getValue(), msgSupplier)) + ), + new Accessory( + emptyList(), + Asserters.countTotal(1) + ) + ) + ), + arguments( + "facet collector", + stageCreator( + facet( + exists(fieldPath("tomatoes")), + asList( + stringFacet( + "fullplotFacet", + fieldPath("fullplot")) + .numBuckets(1), + numberFacet( + "tomatoesMeterFacet", + fieldPath("tomatoes.viewer.meter"), + asList(10f, 20d, 90, Long.MAX_VALUE / 2, Long.MAX_VALUE)) + .defaultBucket("defaultBucket"), + dateFacet( + "releasedFacet", + fieldPath("released"), + asList( + Instant.EPOCH, + Instant.from(Year.of(1985) + .atMonth(Month.JANUARY).atDay(1).atStartOfDay().atOffset(UTC)), + Instant.now())))), + searchOptions() + ), + MFLIX_MOVIES_NS, + asList( + new Accessory( + asList(limit(1), project(computedSearchMeta("meta")), replaceWith("$meta")), + Asserters.firstResult((doc, msgSupplier) -> assertEquals(5, doc.getDocument("facet") + .getDocument("tomatoesMeterFacet").getArray("buckets").size(), msgSupplier)) + ), + new Accessory( + emptyList(), + Asserters.firstResult((doc, msgSupplier) -> assertEquals(5, doc.getDocument("facet") + .getDocument("tomatoesMeterFacet").getArray("buckets").size(), msgSupplier)) + ) + ) + ), + arguments( + "score modifier", + stageCreator(compound() + .should(asList( + exists(fieldPath("fieldName1")) + .score(boost(Float.MAX_VALUE / 2)), + exists(fieldPath("fieldName2")) + .score(boost(fieldPath("boostFieldName"))), + exists(fieldPath("fieldName3")) + .score(boost(fieldPath("boostFieldName")) + .undefined(-1)), + exists(fieldPath("fieldName4")) + .score(constant(1.2f)), + exists(fieldPath("fieldName5")) + .score(function(relevanceExpression())), + exists(fieldPath("fieldName6")) + .score(function(pathExpression(fieldPath("expressionFieldName")))), + exists(fieldPath("fieldName7")) + .score(function(pathExpression(fieldPath("expressionFieldName")) + .undefined(-1))), + exists(fieldPath("fieldName8")) + .score(function(constantExpression(-1.2f))), + exists(fieldPath("fieldName9")) + .score(function( + gaussExpression(-10, pathExpression(fieldPath("gaussianFieldName")), Double.MAX_VALUE / -2))), + exists(fieldPath("fieldName10")) + .score(function( + gaussExpression( + -10, + pathExpression(fieldPath("gaussianFieldName")) + .undefined(0), + Double.MAX_VALUE / -2) + .offset(Double.MAX_VALUE / -2) + .decay(Double.MIN_VALUE))), + exists(fieldPath("fieldName11")) + .score(function(logExpression(constantExpression(3)))), + exists(fieldPath("fieldName12")) + .score(function(log1pExpression(constantExpression(-3)))), + exists(fieldPath("fieldName13")) + .score(function(addExpression(asList( + logExpression(multiplyExpression(asList( + constantExpression(2), + constantExpression(3), + relevanceExpression()))), + gaussExpression(0, pathExpression(fieldPath("gaussianFieldName")), 1))))) + )), + searchOptions() + ), + MFLIX_MOVIES_NS, + asList( + new Accessory( + emptyList(), + Asserters.empty() + ), + new Accessory( + emptyList(), + Asserters.nonEmpty() + ) + ) + ), + arguments( + "all operators in a `compound` operator", + stageCreator(compound() + .should(asList( + exists(fieldPath("fieldName1")), + text(fieldPath("fieldName2"), "term1") + .score(function(logExpression(constantExpression(3)))), + text(asList(wildcardPath("wildc*rd"), fieldPath("fieldName3")), asList("term2", "term3")) + .fuzzy(fuzzySearchOptions() + .maxEdits(1) + .prefixLength(2) + .maxExpansions(3)), + autocomplete(fieldPath("title") + // `multi` is used here only to verify that it is tolerated + .multi("ignored"), "term4"), + // this operator produces non-empty search results + autocomplete(fieldPath("title"), "Traffic in", "term5") + .fuzzy() + .sequentialTokenOrder(), + numberRange(fieldPath("fieldName4"), fieldPath("fieldName5")) + .gtLt(1, 1.5), + dateRange(fieldPath("fieldName6")) + .lte(Instant.ofEpochMilli(1)), + near(0, 1.5, fieldPath("fieldName7"), fieldPath("fieldName8")), + near(Instant.ofEpochMilli(1), Duration.ofMillis(3), fieldPath("fieldName9")), + phrase(fieldPath("fieldName10"), "term6"), + in(fieldPath("fieldName10"), true), + in(fieldPath("fieldName11"), "term4", "term5"), + regex(fieldPath("title").multi("keyword"), "term7"), + queryString(fieldPath("fieldName12"), "term8"), + moreLikeThis(new BsonDocument("like", new BsonDocument("fieldName10", + new BsonString("term6")))), + wildcard(asList("term10", "term11"), asList(wildcardPath("wildc*rd"), fieldPath("title").multi( + "keyword"))), + SearchOperator.equals(fieldPath("fieldName11"), "term7"), + equalsNull(fieldPath("fieldName12")) + )) + .minimumShouldMatch(1) + .mustNot(singleton( + compound().must(singleton(exists(fieldPath("fieldName")))))), + searchOptions() + ), + MFLIX_MOVIES_NS, + asList( + new Accessory( + emptyList(), + Asserters.nonEmpty() + ), + new Accessory( + emptyList(), + Asserters.countLowerBound(0) + ) + ) + ), + arguments( + "geo operators in a `compound` operator", + stageCreator(compound() + .should(singleton( + near( + new Point(new Position(114.15, 22.28)), + 1234.5, + fieldPath("address.location")) + )), + searchOptions() + ), + AIRBNB_LISTINGS_AND_REVIEWS_NS, + asList( + new Accessory( + emptyList(), + Asserters.nonEmpty() + ), + new Accessory( + emptyList(), + Asserters.countLowerBound(0) + ) + ) + ) + ); + } + + private static Supplier msgSupplier(final Collection pipeline) { + return () -> "For reference, the pipeline (" + pipeline.size() + " elements) used in the test is\n[\n" + + pipeline.stream() + .map(stage -> stage.toBsonDocument(BsonDocument.class, MongoClientSettings.getDefaultCodecRegistry())) + .map(doc -> doc.toJson(JsonWriterSettings.builder().indent(true).build())) + .collect(Collectors.joining(",\n")) + + "\n]\n"; + } + + private static final class Asserters { + static Asserter empty() { + return decorate((results, msgSupplier) -> assertTrue(results.isEmpty(), msgSupplier)); + } + + static Asserter nonEmpty() { + return decorate((results, msgSupplier) -> assertFalse(results.isEmpty(), msgSupplier)); + } + + static Asserter size(final int expectedSize) { + return decorate((results, msgSupplier) -> assertEquals(expectedSize, results.size(), msgSupplier)); + } + + /** + * Checks the value of the {@code "score"} field for each result document. + */ + static Asserter score(final double expectedScore) { + return decorate((results, msgSupplier) -> { + assertFalse(results.isEmpty(), msgSupplier); + for (BsonDocument result : results) { + assertEquals(expectedScore, result.getNumber("score").doubleValue(), 0.000_1, msgSupplier); + } + }); + } + + /** + * Checks the value of the {@code "customMetaField.count.lowerBound"} field. + */ + static Asserter countLowerBound(final String customMetaField, final int expectedAtLeast) { + return firstResult((doc, msgSupplier) -> assertTrue( + doc.getDocument(customMetaField).getDocument("count").getNumber("lowerBound").intValue() >= expectedAtLeast, msgSupplier)); + } + + /** + * Checks the value of the {@code "count.lowerBound"} field. + */ + static Asserter countLowerBound(final int expectedAtLeast) { + return firstResult((doc, msgSupplier) -> assertTrue( + doc.getDocument("count").getNumber("lowerBound").intValue() >= expectedAtLeast, msgSupplier)); + } + + /** + * Checks the value of the {@code "count.total"} field. + */ + static Asserter countTotal(final int expected) { + return firstResult((doc, msgSupplier) -> assertEquals( + expected, doc.getDocument("count").getNumber("total").intValue(), msgSupplier)); + } + + static Asserter firstResult(final BiConsumer> asserter) { + return decorate((results, msgSupplier) -> { + assertFalse(results.isEmpty(), msgSupplier); + asserter.accept(results.get(0), msgSupplier); + }); + } + + private static Asserter decorate(final Asserter asserter) { + int maxRenderedResults = 20; + return (results, msgSupplier) -> asserter.accept( + results, + () -> msgSupplier.get() + + "\ntop " + maxRenderedResults + " out of total " + results.size() + " results are\n[" + + results.stream() + .map(doc -> doc.toJson(JsonWriterSettings.builder().indent(true).build())) + .limit(maxRenderedResults) + .collect(Collectors.joining(",\n")) + + "\n]\n" + ); + } + } + + private static CustomizableSearchStageCreator stageCreator(final Bson operatorOrCollector, final SearchOptions options) { + return customizer -> customizer.apply(operatorOrCollector, options); + } + + @FunctionalInterface + private interface CustomizableSearchStageCreator extends Function, Bson> { + } + + @FunctionalInterface + private interface Asserter extends BiConsumer, Supplier> { + } + + private static final class Accessory { + private final Collection postStages; + private final BiConsumer, Supplier> resultAsserter; + + Accessory( + final Collection postStages, + final BiConsumer, Supplier> resultAsserter) { + this.postStages = postStages; + this.resultAsserter = resultAsserter; + } + } +} diff --git a/driver-core/src/test/functional/com/mongodb/client/syncadapter/SupplyingCallback.java b/driver-core/src/test/functional/com/mongodb/client/syncadapter/SupplyingCallback.java new file mode 100644 index 00000000000..bf298a99a08 --- /dev/null +++ b/driver-core/src/test/functional/com/mongodb/client/syncadapter/SupplyingCallback.java @@ -0,0 +1,73 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.client.syncadapter; + +import com.mongodb.annotations.ThreadSafe; +import com.mongodb.internal.async.SingleResultCallback; +import com.mongodb.lang.Nullable; + +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; +import java.util.function.Supplier; + +@ThreadSafe +public final class SupplyingCallback implements SingleResultCallback, Supplier { + public static final long TIMEOUT_MINUTES = 1; + + private final CompletableFuture future; + + public SupplyingCallback() { + future = new CompletableFuture<>(); + } + + @Override + public void onResult(@Nullable final R result, @Nullable final Throwable t) { + if (t != null) { + future.completeExceptionally(t); + } else { + future.complete(result); + } + } + + @Override + public R get() { + try { + return future.get(TIMEOUT_MINUTES, TimeUnit.MINUTES); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + throw new RuntimeException(e); + } catch (TimeoutException e) { + throw new RuntimeException(e); + } catch (ExecutionException e) { + Throwable cause = e.getCause(); + if (cause == null) { + throw new RuntimeException(e); + } else if (cause instanceof RuntimeException) { + throw (RuntimeException) cause; + } else if (cause instanceof Error) { + throw (Error) cause; + } else { + throw new RuntimeException(e); + } + } + } + + public boolean completed() { + return future.isDone(); + } +} diff --git a/driver-core/src/test/functional/com/mongodb/client/syncadapter/SyncConnection.java b/driver-core/src/test/functional/com/mongodb/client/syncadapter/SyncConnection.java new file mode 100644 index 00000000000..0a96d5ab0cf --- /dev/null +++ b/driver-core/src/test/functional/com/mongodb/client/syncadapter/SyncConnection.java @@ -0,0 +1,79 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.client.syncadapter; + +import com.mongodb.ReadPreference; +import com.mongodb.connection.ConnectionDescription; +import com.mongodb.internal.connection.AsyncConnection; +import com.mongodb.internal.connection.Connection; +import com.mongodb.internal.connection.MessageSequences; +import com.mongodb.internal.connection.OperationContext; +import org.bson.BsonDocument; +import org.bson.FieldNameValidator; +import org.bson.codecs.Decoder; + +public final class SyncConnection implements Connection { + private final AsyncConnection wrapped; + + public SyncConnection(final AsyncConnection connection) { + wrapped = connection; + } + + @Override + public int getCount() { + return wrapped.getCount(); + } + + @Override + public int release() { + return wrapped.release(); + } + + @Override + public Connection retain() { + wrapped.retain(); + return this; + } + + @Override + public ConnectionDescription getDescription() { + return wrapped.getDescription(); + } + + @Override + public T command(final String database, final BsonDocument command, final FieldNameValidator fieldNameValidator, + final ReadPreference readPreference, final Decoder commandResultDecoder, + final OperationContext operationContext) { + SupplyingCallback callback = new SupplyingCallback<>(); + wrapped.commandAsync(database, command, fieldNameValidator, readPreference, commandResultDecoder, operationContext, callback); + return callback.get(); + } + + @Override + public T command(final String database, final BsonDocument command, final FieldNameValidator commandFieldNameValidator, + final ReadPreference readPreference, final Decoder commandResultDecoder, + final OperationContext operationContext, final boolean responseExpected, final MessageSequences sequences) { + SupplyingCallback callback = new SupplyingCallback<>(); + wrapped.commandAsync(database, command, commandFieldNameValidator, readPreference, commandResultDecoder, operationContext, + responseExpected, sequences, callback); + return callback.get(); + } + + @Override + public void markAsPinned(final PinningMode pinningMode) { + wrapped.markAsPinned(pinningMode); + } +} diff --git a/driver-core/src/test/functional/com/mongodb/client/test/CollectionHelper.java b/driver-core/src/test/functional/com/mongodb/client/test/CollectionHelper.java new file mode 100644 index 00000000000..d5abfdd6e3f --- /dev/null +++ b/driver-core/src/test/functional/com/mongodb/client/test/CollectionHelper.java @@ -0,0 +1,555 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.test; + +import com.mongodb.MongoClientSettings; +import com.mongodb.MongoCommandException; +import com.mongodb.MongoNamespace; +import com.mongodb.MongoWriteConcernException; +import com.mongodb.ReadPreference; +import com.mongodb.ServerCursor; +import com.mongodb.WriteConcern; +import com.mongodb.client.model.CreateCollectionOptions; +import com.mongodb.client.model.IndexOptionDefaults; +import com.mongodb.client.model.ValidationOptions; +import com.mongodb.internal.binding.AsyncReadWriteBinding; +import com.mongodb.internal.binding.ReadBinding; +import com.mongodb.internal.binding.WriteBinding; +import com.mongodb.internal.bulk.DeleteRequest; +import com.mongodb.internal.bulk.IndexRequest; +import com.mongodb.internal.bulk.InsertRequest; +import com.mongodb.internal.bulk.UpdateRequest; +import com.mongodb.internal.bulk.WriteRequest; +import com.mongodb.internal.client.model.AggregationLevel; +import com.mongodb.internal.diagnostics.logging.Logger; +import com.mongodb.internal.diagnostics.logging.Loggers; +import com.mongodb.internal.operation.AggregateOperation; +import com.mongodb.internal.operation.BatchCursor; +import com.mongodb.internal.operation.CommandReadOperation; +import com.mongodb.internal.operation.CountDocumentsOperation; +import com.mongodb.internal.operation.CreateCollectionOperation; +import com.mongodb.internal.operation.CreateIndexesOperation; +import com.mongodb.internal.operation.CreateSearchIndexesOperation; +import com.mongodb.internal.operation.DropCollectionOperation; +import com.mongodb.internal.operation.DropDatabaseOperation; +import com.mongodb.internal.operation.FindOperation; +import com.mongodb.internal.operation.ListIndexesOperation; +import com.mongodb.internal.operation.ListSearchIndexesOperation; +import com.mongodb.internal.operation.MixedBulkWriteOperation; +import com.mongodb.internal.operation.SearchIndexRequest; +import org.bson.BsonArray; +import org.bson.BsonDocument; +import org.bson.BsonDocumentWrapper; +import org.bson.BsonInt32; +import org.bson.BsonInt64; +import org.bson.BsonString; +import org.bson.BsonValue; +import org.bson.Document; +import org.bson.assertions.Assertions; +import org.bson.codecs.BsonDocumentCodec; +import org.bson.codecs.Codec; +import org.bson.codecs.Decoder; +import org.bson.codecs.DocumentCodec; +import org.bson.codecs.configuration.CodecRegistry; +import org.bson.conversions.Bson; + +import java.util.ArrayList; +import java.util.List; +import java.util.Optional; +import java.util.stream.Collectors; + +import static com.mongodb.ClusterFixture.executeAsync; +import static com.mongodb.ClusterFixture.getBinding; +import static java.lang.String.format; +import static java.util.Arrays.asList; +import static java.util.Collections.singletonList; + +public final class CollectionHelper { + private static final Logger LOGGER = Loggers.getLogger("test"); + + private final Codec codec; + private final CodecRegistry registry = MongoClientSettings.getDefaultCodecRegistry(); + private final MongoNamespace namespace; + + public CollectionHelper(final Codec codec, final MongoNamespace namespace) { + this.codec = codec; + this.namespace = namespace; + } + + public T hello() { + return new CommandReadOperation<>("admin", BsonDocument.parse("{isMaster: 1}"), codec) + .execute(getBinding()); + } + + public static void drop(final MongoNamespace namespace) { + drop(namespace, WriteConcern.ACKNOWLEDGED); + } + + public static void drop(final MongoNamespace namespace, final WriteConcern writeConcern) { + // This loop is a workaround for unanticipated failures of the drop command when run on a sharded cluster < 4.2. + // In practice the command tends to succeed on the first attempt after a failure + boolean success = false; + while (!success) { + try { + new DropCollectionOperation(namespace, writeConcern).execute(getBinding()); + success = true; + } catch (MongoWriteConcernException e) { + LOGGER.info("Retrying drop collection after a write concern error: " + e); + // repeat until success! + } catch (MongoCommandException e) { + if ("Interrupted".equals(e.getErrorCodeName())) { + LOGGER.info("Retrying drop collection after an Interrupted error: " + e); + // repeat until success! + } else { + throw e; + } + } + } + } + + public static void dropDatabase(final String name) { + dropDatabase(name, WriteConcern.ACKNOWLEDGED); + } + + public static void dropDatabase(final String name, final WriteConcern writeConcern) { + if (name == null) { + return; + } + try { + new DropDatabaseOperation(name, writeConcern).execute(getBinding()); + } catch (MongoCommandException e) { + if (!e.getErrorMessage().contains("ns not found")) { + throw e; + } + } + } + + public static BsonDocument getCurrentClusterTime() { + return new CommandReadOperation("admin", new BsonDocument("ping", new BsonInt32(1)), new BsonDocumentCodec()) + .execute(getBinding()).getDocument("$clusterTime", null); + } + + public MongoNamespace getNamespace() { + return namespace; + } + + public void drop() { + drop(WriteConcern.ACKNOWLEDGED); + } + + public void drop(final WriteConcern writeConcern) { + drop(namespace, writeConcern); + } + + public void dropAndCreate(final BsonDocument createOptions) { + // Drop the collection and any encryption collections: enxcol_..esc and enxcol_..ecoc + drop(namespace, WriteConcern.MAJORITY); + drop(new MongoNamespace(namespace.getDatabaseName(), format("enxcol_.%s.esc", namespace.getCollectionName())), + WriteConcern.MAJORITY); + drop(new MongoNamespace(namespace.getDatabaseName(), format("enxcol_.%s.ecoc", namespace.getCollectionName())), + WriteConcern.MAJORITY); + create(WriteConcern.MAJORITY, createOptions); + } + + public void create() { + create(namespace.getCollectionName(), new CreateCollectionOptions(), WriteConcern.ACKNOWLEDGED); + } + + public void create(final WriteConcern writeConcern) { + create(namespace.getCollectionName(), new CreateCollectionOptions(), writeConcern); + } + + public void create(final String collectionName, final CreateCollectionOptions options) { + create(collectionName, options, WriteConcern.ACKNOWLEDGED); + } + + public void create(final WriteConcern writeConcern, final BsonDocument createOptions) { + CreateCollectionOptions createCollectionOptions = new CreateCollectionOptions(); + for (String option : createOptions.keySet()) { + switch (option) { + case "capped": + createCollectionOptions.capped(createOptions.getBoolean("capped").getValue()); + break; + case "size": + createCollectionOptions.sizeInBytes(createOptions.getNumber("size").longValue()); + break; + case "encryptedFields": + createCollectionOptions.encryptedFields(createOptions.getDocument("encryptedFields")); + break; + case "validator": + ValidationOptions validationOptions = new ValidationOptions(); + validationOptions.validator(createOptions.getDocument("validator")); + createCollectionOptions.validationOptions(validationOptions); + break; + default: + throw new UnsupportedOperationException("Unsupported create collection option: " + option); + } + } + create(namespace.getCollectionName(), createCollectionOptions, writeConcern); + } + + public void create(final String collectionName, final CreateCollectionOptions options, final WriteConcern writeConcern) { + drop(namespace, writeConcern); + CreateCollectionOperation operation = new CreateCollectionOperation(namespace.getDatabaseName(), collectionName, + writeConcern) + .capped(options.isCapped()) + .sizeInBytes(options.getSizeInBytes()) + .maxDocuments(options.getMaxDocuments()); + + IndexOptionDefaults indexOptionDefaults = options.getIndexOptionDefaults(); + if (indexOptionDefaults.getStorageEngine() != null) { + operation.indexOptionDefaults(new BsonDocument("storageEngine", toBsonDocument(indexOptionDefaults.getStorageEngine()))); + } + Bson encryptedFields = options.getEncryptedFields(); + if (encryptedFields != null) { + operation.encryptedFields(encryptedFields.toBsonDocument()); + } + ValidationOptions validationOptions = options.getValidationOptions(); + if (validationOptions.getValidator() != null) { + operation.validator(toBsonDocument(validationOptions.getValidator())); + } + if (validationOptions.getValidationLevel() != null) { + operation.validationLevel(validationOptions.getValidationLevel()); + } + if (validationOptions.getValidationAction() != null) { + operation.validationAction(validationOptions.getValidationAction()); + } + + // This loop is a workaround for unanticipated failures of the create command when run on a sharded cluster < 4.2 + // In practice the command tends to succeed on the first attempt after a failure + boolean success = false; + while (!success) { + try { + operation.execute(getBinding()); + success = true; + } catch (MongoCommandException e) { + if ("Interrupted".equals(e.getErrorCodeName())) { + LOGGER.info("Retrying create collection after a write concern error: " + e); + // repeat until success! + } else { + throw e; + } + } + } + } + + public void killCursor(final MongoNamespace namespace, final ServerCursor serverCursor) { + if (serverCursor != null) { + BsonDocument command = new BsonDocument("killCursors", new BsonString(namespace.getCollectionName())) + .append("cursors", new BsonArray(singletonList(new BsonInt64(serverCursor.getId())))); + try { + new CommandReadOperation<>(namespace.getDatabaseName(), command, new BsonDocumentCodec()) + .execute(getBinding()); + } catch (Exception e) { + // Ignore any exceptions killing old cursors + } + } + } + + public void insertDocuments(final BsonDocument... documents) { + insertDocuments(asList(documents)); + } + + public void insertDocuments(final WriteConcern writeConcern, final BsonDocument... documents) { + insertDocuments(asList(documents), writeConcern); + } + + public void insertDocuments(final List documents) { + insertDocuments(documents, getBinding()); + } + + public void insertDocuments(final List documents, final WriteConcern writeConcern) { + insertDocuments(documents, writeConcern, getBinding()); + } + + public void insertDocuments(final List documents, final WriteBinding binding) { + insertDocuments(documents, WriteConcern.ACKNOWLEDGED, binding); + } + + public void insertDocuments(final List documents, final WriteConcern writeConcern, final WriteBinding binding) { + List insertRequests = new ArrayList<>(documents.size()); + for (BsonDocument document : documents) { + insertRequests.add(new InsertRequest(document)); + } + new MixedBulkWriteOperation(namespace, insertRequests, true, writeConcern, false).execute(binding); + } + + public void insertDocuments(final Document... documents) { + insertDocuments(new DocumentCodec(registry), asList(documents)); + } + + @SafeVarargs + @SuppressWarnings("varargs") + public final void insertDocuments(final Codec iCodec, final I... documents) { + insertDocuments(iCodec, asList(documents)); + } + + @SafeVarargs + @SuppressWarnings("varargs") + public final void insertDocuments(final Codec iCodec, final WriteBinding binding, final I... documents) { + insertDocuments(iCodec, binding, asList(documents)); + } + + public void insertDocuments(final Codec iCodec, final List documents) { + insertDocuments(iCodec, getBinding(), documents); + } + + public void insertDocuments(final Codec iCodec, final WriteBinding binding, final List documents) { + List bsonDocuments = new ArrayList<>(documents.size()); + for (I document : documents) { + bsonDocuments.add(new BsonDocumentWrapper<>(document, iCodec)); + } + insertDocuments(bsonDocuments, binding); + } + + public void insertDocuments(final String insertAll) { + List documents = BsonArray.parse(insertAll).stream().map(BsonValue::asDocument).collect(Collectors.toList()); + insertDocuments(documents); + } + + public List find() { + return find(codec); + } + + public Optional listSearchIndex(final String indexName) { + ListSearchIndexesOperation listSearchIndexesOperation = + new ListSearchIndexesOperation<>(namespace, codec, indexName, null, null, null, null, true); + BatchCursor cursor = listSearchIndexesOperation.execute(getBinding()); + + List results = new ArrayList<>(); + while (cursor.hasNext()) { + results.addAll(cursor.next()); + } + Assertions.assertTrue("Expected at most one result, but found " + results.size(), results.size() <= 1); + return results.isEmpty() ? Optional.empty() : Optional.of(results.get(0)); + } + + public void createSearchIndex(final SearchIndexRequest searchIndexModel) { + CreateSearchIndexesOperation searchIndexesOperation = + new CreateSearchIndexesOperation(namespace, singletonList(searchIndexModel)); + searchIndexesOperation.execute(getBinding()); + } + + public List find(final Codec codec) { + BatchCursor cursor = new FindOperation<>(namespace, codec) + .sort(new BsonDocument("_id", new BsonInt32(1))) + .execute(getBinding()); + List results = new ArrayList<>(); + while (cursor.hasNext()) { + results.addAll(cursor.next()); + } + return results; + } + + public void updateOne(final Bson filter, final Bson update) { + updateOne(filter, update, false); + } + + public void updateOne(final Bson filter, final Bson update, final boolean isUpsert) { + new MixedBulkWriteOperation(namespace, + singletonList(new UpdateRequest(filter.toBsonDocument(Document.class, registry), + update.toBsonDocument(Document.class, registry), + WriteRequest.Type.UPDATE) + .upsert(isUpsert)), + true, WriteConcern.ACKNOWLEDGED, false) + .execute(getBinding()); + } + + public void replaceOne(final Bson filter, final Bson update, final boolean isUpsert) { + new MixedBulkWriteOperation(namespace, + singletonList(new UpdateRequest(filter.toBsonDocument(Document.class, registry), + update.toBsonDocument(Document.class, registry), + WriteRequest.Type.REPLACE) + .upsert(isUpsert)), + true, WriteConcern.ACKNOWLEDGED, false) + .execute(getBinding()); + } + + public void deleteOne(final Bson filter) { + delete(filter, false); + } + + public void deleteMany(final Bson filter) { + delete(filter, true); + } + + private void delete(final Bson filter, final boolean multi) { + new MixedBulkWriteOperation(namespace, + singletonList(new DeleteRequest(filter.toBsonDocument(Document.class, registry)).multi(multi)), + true, WriteConcern.ACKNOWLEDGED, false) + .execute(getBinding()); + } + + public List find(final Bson filter) { + return find(filter, null); + } + + public List aggregate(final List pipeline) { + return aggregate(pipeline, codec); + } + + public List aggregate(final List pipeline, final Decoder decoder) { + return aggregate(pipeline, decoder, AggregationLevel.COLLECTION); + } + + public List aggregateDb(final List pipeline) { + return aggregate(pipeline, codec, AggregationLevel.DATABASE); + } + + private List aggregate(final List pipeline, final Decoder decoder, final AggregationLevel level) { + List bsonDocumentPipeline = new ArrayList<>(); + for (Bson cur : pipeline) { + bsonDocumentPipeline.add(cur.toBsonDocument(Document.class, registry)); + } + BatchCursor cursor = new AggregateOperation<>(namespace, bsonDocumentPipeline, decoder, level) + .execute(getBinding()); + List results = new ArrayList<>(); + while (cursor.hasNext()) { + results.addAll(cursor.next()); + } + return results; + } + + @SuppressWarnings("overloads") + public List find(final Bson filter, final Bson sort) { + return find(filter != null ? filter.toBsonDocument(Document.class, registry) : null, + sort != null ? sort.toBsonDocument(Document.class, registry) : null, + codec); + } + + @SuppressWarnings("overloads") + public List find(final Bson filter, final Bson sort, final Bson projection) { + return find(filter != null ? filter.toBsonDocument(Document.class, registry) : null, + sort != null ? sort.toBsonDocument(Document.class, registry) : null, + projection != null ? projection.toBsonDocument(Document.class, registry) : null, + codec); + } + + @SuppressWarnings("overloads") + public List find(final BsonDocument filter, final Decoder decoder) { + return find(filter, null, decoder); + } + + @SuppressWarnings("overloads") + public List find(final BsonDocument filter, final BsonDocument sort, final Decoder decoder) { + return find(filter, sort, null, decoder); + } + + public List find(final BsonDocument filter, final BsonDocument sort, final BsonDocument projection, final Decoder decoder) { + BatchCursor cursor = new FindOperation<>(namespace, decoder).filter(filter).sort(sort) + .projection(projection).execute(getBinding()); + List results = new ArrayList<>(); + while (cursor.hasNext()) { + results.addAll(cursor.next()); + } + return results; + } + + public long count() { + return count(getBinding()); + } + + public long count(final ReadBinding binding) { + return new CountDocumentsOperation(namespace).execute(binding); + } + + public long count(final AsyncReadWriteBinding binding) throws Throwable { + return executeAsync(new CountDocumentsOperation(namespace), binding); + } + + public long count(final Bson filter) { + return new CountDocumentsOperation(namespace) + .filter(toBsonDocument(filter)).execute(getBinding()); + } + + public BsonDocument wrap(final Document document) { + return new BsonDocumentWrapper<>(document, new DocumentCodec()); + } + + public BsonDocument toBsonDocument(final Bson document) { + return document.toBsonDocument(BsonDocument.class, registry); + } + + public void createIndex(final BsonDocument key) { + new CreateIndexesOperation(namespace, singletonList(new IndexRequest(key)), WriteConcern.ACKNOWLEDGED) + .execute(getBinding()); + } + + public void createIndex(final Document key) { + new CreateIndexesOperation(namespace, singletonList(new IndexRequest(wrap(key))), WriteConcern.ACKNOWLEDGED) + .execute(getBinding()); + } + + public void createUniqueIndex(final Document key) { + new CreateIndexesOperation(namespace, singletonList(new IndexRequest(wrap(key)).unique(true)), + WriteConcern.ACKNOWLEDGED) + .execute(getBinding()); + } + + public void createIndex(final Document key, final String defaultLanguage) { + new CreateIndexesOperation(namespace, + singletonList(new IndexRequest(wrap(key)).defaultLanguage(defaultLanguage)), WriteConcern.ACKNOWLEDGED).execute(getBinding()); + } + + public void createIndex(final Bson key) { + new CreateIndexesOperation(namespace, + singletonList(new IndexRequest(key.toBsonDocument(Document.class, registry))), WriteConcern.ACKNOWLEDGED).execute(getBinding()); + } + + public List listIndexes(){ + List indexes = new ArrayList<>(); + BatchCursor cursor = new ListIndexesOperation<>(namespace, new BsonDocumentCodec()) + .execute(getBinding()); + while (cursor.hasNext()) { + indexes.addAll(cursor.next()); + } + return indexes; + } + + public static void killAllSessions() { + try { + new CommandReadOperation<>("admin", + new BsonDocument("killAllSessions", new BsonArray()), new BsonDocumentCodec()).execute(getBinding()); + } catch (MongoCommandException e) { + // ignore exception caused by killing the implicit session that the killAllSessions command itself is running in + } + } + + public void renameCollection(final MongoNamespace newNamespace) { + try { + new CommandReadOperation<>("admin", + new BsonDocument("renameCollection", new BsonString(getNamespace().getFullName())) + .append("to", new BsonString(newNamespace.getFullName())), new BsonDocumentCodec()).execute(getBinding()); + } catch (MongoCommandException e) { + // do nothing + } + } + + public void runAdminCommand(final String command) { + runAdminCommand(BsonDocument.parse(command)); + } + + public void runAdminCommand(final BsonDocument command) { + new CommandReadOperation<>("admin", command, new BsonDocumentCodec()) + .execute(getBinding()); + } + + public void runAdminCommand(final BsonDocument command, final ReadPreference readPreference) { + new CommandReadOperation<>("admin", command, new BsonDocumentCodec()) + .execute(getBinding(readPreference)); + } +} diff --git a/driver-core/src/test/functional/com/mongodb/client/test/Worker.java b/driver-core/src/test/functional/com/mongodb/client/test/Worker.java new file mode 100644 index 00000000000..f7cdb4c9ed7 --- /dev/null +++ b/driver-core/src/test/functional/com/mongodb/client/test/Worker.java @@ -0,0 +1,112 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.test; + +import org.bson.types.ObjectId; + +import java.util.Date; + +public final class Worker { + private final ObjectId id; + private final String name; + private final String jobTitle; + private final Date dateStarted; + private final int numberOfJobs; + + public Worker(final String name, final String jobTitle, final Date dateStarted, final int numberOfJobs) { + this(new ObjectId(), name, jobTitle, dateStarted, numberOfJobs); + } + + public Worker(final ObjectId id, final String name, final String jobTitle, final Date dateStarted, final int numberOfJobs) { + this.id = id; + this.name = name; + this.jobTitle = jobTitle; + this.dateStarted = dateStarted; + this.numberOfJobs = numberOfJobs; + } + + public ObjectId getId() { + return id; + } + + public String getName() { + return name; + } + + public String getJobTitle() { + return jobTitle; + } + + public Date getDateStarted() { + return dateStarted; + } + + public int getNumberOfJobs() { + return numberOfJobs; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + Worker worker = (Worker) o; + + if (numberOfJobs != worker.numberOfJobs) { + return false; + } + if (!dateStarted.equals(worker.dateStarted)) { + return false; + } + if (!id.equals(worker.id)) { + return false; + } + if (!jobTitle.equals(worker.jobTitle)) { + return false; + } + if (!name.equals(worker.name)) { + return false; + } + + return true; + } + + @Override + public int hashCode() { + int result = id.hashCode(); + result = 31 * result + name.hashCode(); + result = 31 * result + jobTitle.hashCode(); + result = 31 * result + dateStarted.hashCode(); + result = 31 * result + numberOfJobs; + return result; + } + + @Override + public String toString() { + return "Worker{" + + "id=" + id + + ", name='" + name + '\'' + + ", jobTitle='" + jobTitle + '\'' + + ", dateStarted=" + dateStarted + + ", numberOfJobs=" + numberOfJobs + + '}'; + } +} diff --git a/driver-core/src/test/functional/com/mongodb/client/test/WorkerCodec.java b/driver-core/src/test/functional/com/mongodb/client/test/WorkerCodec.java new file mode 100644 index 00000000000..300608b9e7b --- /dev/null +++ b/driver-core/src/test/functional/com/mongodb/client/test/WorkerCodec.java @@ -0,0 +1,72 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.test; + +import org.bson.BsonObjectId; +import org.bson.BsonReader; +import org.bson.BsonWriter; +import org.bson.codecs.CollectibleCodec; +import org.bson.codecs.DecoderContext; +import org.bson.codecs.EncoderContext; +import org.bson.types.ObjectId; + +import java.util.Date; + +public final class WorkerCodec implements CollectibleCodec { + @Override + public boolean documentHasId(final Worker document) { + return true; + } + + @Override + public BsonObjectId getDocumentId(final Worker document) { + return new BsonObjectId(document.getId()); + } + + @Override + public Worker generateIdIfAbsentFromDocument(final Worker worker) { + return worker; + } + + @Override + public void encode(final BsonWriter writer, final Worker value, final EncoderContext encoderContext) { + writer.writeStartDocument(); + writer.writeObjectId("_id", value.getId()); + writer.writeString("name", value.getName()); + writer.writeString("jobTitle", value.getJobTitle()); + writer.writeDateTime("dateStarted", value.getDateStarted().getTime()); + writer.writeInt32("numberOfJobs", value.getNumberOfJobs()); + writer.writeEndDocument(); + } + + @Override + public Worker decode(final BsonReader reader, final DecoderContext decoderContext) { + reader.readStartDocument(); + ObjectId id = reader.readObjectId("_id"); + String name = reader.readString("name"); + String jobTitle = reader.readString("jobTitle"); + Date dateStarted = new Date(reader.readDateTime("dateStarted")); + int numberOfJobs = reader.readInt32("numberOfJobs"); + reader.readEndDocument(); + return new Worker(id, name, jobTitle, dateStarted, numberOfJobs); + } + + @Override + public Class getEncoderClass() { + return Worker.class; + } +} diff --git a/driver-core/src/test/functional/com/mongodb/client/test/WorkerCodecProvider.java b/driver-core/src/test/functional/com/mongodb/client/test/WorkerCodecProvider.java new file mode 100644 index 00000000000..e0f721ef283 --- /dev/null +++ b/driver-core/src/test/functional/com/mongodb/client/test/WorkerCodecProvider.java @@ -0,0 +1,36 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.test; + +import org.bson.codecs.Codec; +import org.bson.codecs.configuration.CodecProvider; +import org.bson.codecs.configuration.CodecRegistry; + +/** + * + */ +@SuppressWarnings("unchecked") +public class WorkerCodecProvider implements CodecProvider { + @Override + @SuppressWarnings("unchecked") + public Codec get(final Class clazz, final CodecRegistry registry) { + if (clazz.equals(Worker.class)) { + return (Codec) new WorkerCodec(); + } + return null; + } +} diff --git a/driver-core/src/test/functional/com/mongodb/connection/ConnectionSpecification.groovy b/driver-core/src/test/functional/com/mongodb/connection/ConnectionSpecification.groovy new file mode 100644 index 00000000000..b3da89231e7 --- /dev/null +++ b/driver-core/src/test/functional/com/mongodb/connection/ConnectionSpecification.groovy @@ -0,0 +1,71 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.connection + + +import com.mongodb.OperationFunctionalSpecification +import com.mongodb.internal.operation.CommandReadOperation +import org.bson.BsonDocument +import org.bson.BsonInt32 +import org.bson.codecs.BsonDocumentCodec + +import static com.mongodb.ClusterFixture.getBinding +import static com.mongodb.ClusterFixture.LEGACY_HELLO +import static com.mongodb.connection.ConnectionDescription.getDefaultMaxMessageSize +import static com.mongodb.connection.ConnectionDescription.getDefaultMaxWriteBatchSize + +class ConnectionSpecification extends OperationFunctionalSpecification { + + def 'should have id'() { + when: + def source = getBinding().getReadConnectionSource() + def connection = source.connection + + then: + connection.getDescription().getConnectionId() != null + + cleanup: + connection?.release() + source?.release() + } + + def 'should have description'() { + when: + def commandResult = getHelloResult() + def expectedMaxMessageSize = commandResult.getNumber('maxMessageSizeBytes', + new BsonInt32(getDefaultMaxMessageSize())).intValue() + def expectedMaxBatchCount = commandResult.getNumber('maxWriteBatchSize', + new BsonInt32(getDefaultMaxWriteBatchSize())).intValue() + def source = getBinding().getReadConnectionSource() + def connection = source.connection + + then: + connection.description.serverAddress == source.getServerDescription().getAddress() + connection.description.serverType == source.getServerDescription().getType() + connection.description.maxDocumentSize == source.getServerDescription().getMaxDocumentSize() + connection.description.maxMessageSize == expectedMaxMessageSize + connection.description.maxBatchCount == expectedMaxBatchCount + + cleanup: + connection?.release() + source?.release() + } + private static BsonDocument getHelloResult() { + new CommandReadOperation('admin', new BsonDocument(LEGACY_HELLO, new BsonInt32(1)), + new BsonDocumentCodec()).execute(getBinding()) + } +} diff --git a/driver-core/src/test/functional/com/mongodb/connection/netty/NettyStreamSpecification.groovy b/driver-core/src/test/functional/com/mongodb/connection/netty/NettyStreamSpecification.groovy new file mode 100644 index 00000000000..e582e0fc398 --- /dev/null +++ b/driver-core/src/test/functional/com/mongodb/connection/netty/NettyStreamSpecification.groovy @@ -0,0 +1,125 @@ +package com.mongodb.connection.netty + +import com.mongodb.MongoSocketException +import com.mongodb.MongoSocketOpenException +import com.mongodb.ServerAddress +import com.mongodb.connection.AsyncCompletionHandler +import com.mongodb.connection.SocketSettings +import com.mongodb.connection.SslSettings +import com.mongodb.internal.connection.netty.NettyStreamFactory +import com.mongodb.spi.dns.InetAddressResolver +import io.netty.buffer.PooledByteBufAllocator +import io.netty.channel.nio.NioEventLoopGroup +import io.netty.channel.socket.nio.NioSocketChannel +import spock.lang.IgnoreIf +import spock.lang.Specification +import com.mongodb.spock.Slow + +import java.util.concurrent.CountDownLatch +import java.util.concurrent.TimeUnit + +import static com.mongodb.ClusterFixture.OPERATION_CONTEXT +import static com.mongodb.ClusterFixture.getSslSettings + +class NettyStreamSpecification extends Specification { + + @Slow + @IgnoreIf({ getSslSettings().isEnabled() }) + def 'should successfully connect with working ip address group'() { + given: + SocketSettings socketSettings = SocketSettings.builder().connectTimeout(1000, TimeUnit.MILLISECONDS).build() + SslSettings sslSettings = SslSettings.builder().build() + def inetAddressResolver = new InetAddressResolver() { + @Override + List lookupByName(String host) { + [InetAddress.getByName('192.168.255.255'), + InetAddress.getByName('1.2.3.4'), + InetAddress.getByName('127.0.0.1')] + } + } + def factory = new NettyStreamFactory(inetAddressResolver, socketSettings, sslSettings, new NioEventLoopGroup(), + NioSocketChannel, PooledByteBufAllocator.DEFAULT, null) + + def stream = factory.create(new ServerAddress()) + + when: + stream.open(OPERATION_CONTEXT) + + then: + !stream.isClosed() + } + + @Slow + @IgnoreIf({ getSslSettings().isEnabled() }) + def 'should throw exception with non-working ip address group'() { + given: + SocketSettings socketSettings = SocketSettings.builder().connectTimeout(1000, TimeUnit.MILLISECONDS).build() + SslSettings sslSettings = SslSettings.builder().build() + def inetAddressResolver = new InetAddressResolver() { + @Override + List lookupByName(String host) { + [InetAddress.getByName('192.168.255.255'), + InetAddress.getByName('1.2.3.4'), + InetAddress.getByName('1.2.3.5')] + } + } + def factory = new NettyStreamFactory(inetAddressResolver, socketSettings, sslSettings, new NioEventLoopGroup(), + NioSocketChannel, PooledByteBufAllocator.DEFAULT, null) + + def stream = factory.create(new ServerAddress()) + + when: + stream.open(OPERATION_CONTEXT) + + then: + thrown(MongoSocketOpenException) + } + + @IgnoreIf({ getSslSettings().isEnabled() }) + def 'should fail AsyncCompletionHandler if name resolution fails'() { + given: + def serverAddress = Stub(ServerAddress) + def exception = new MongoSocketException('Temporary failure in name resolution', serverAddress) + serverAddress.getSocketAddresses() >> { throw exception } + + SocketSettings socketSettings = SocketSettings.builder().connectTimeout(1000, TimeUnit.MILLISECONDS).build() + SslSettings sslSettings = SslSettings.builder().build() + def inetAddressResolver = new InetAddressResolver() { + @Override + List lookupByName(String host) { + throw exception + } + } + def stream = new NettyStreamFactory(inetAddressResolver, socketSettings, sslSettings, new NioEventLoopGroup(), + NioSocketChannel, PooledByteBufAllocator.DEFAULT, null) + .create(new ServerAddress()) + def callback = new CallbackErrorHolder() + + when: + stream.openAsync(OPERATION_CONTEXT, callback) + + then: + callback.getError().is(exception) + } + + class CallbackErrorHolder implements AsyncCompletionHandler { + CountDownLatch latch = new CountDownLatch(1) + Throwable throwable = null + + Throwable getError() { + latch.countDown() + throwable + } + + @Override + void completed(Void r) { + latch.await() + } + + @Override + void failed(Throwable t) { + throwable = t + latch.countDown() + } + } +} diff --git a/driver-core/src/test/functional/com/mongodb/internal/binding/AsyncOperationContextBinding.java b/driver-core/src/test/functional/com/mongodb/internal/binding/AsyncOperationContextBinding.java new file mode 100644 index 00000000000..17b1a1c4a7e --- /dev/null +++ b/driver-core/src/test/functional/com/mongodb/internal/binding/AsyncOperationContextBinding.java @@ -0,0 +1,145 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.binding; + +import com.mongodb.ReadPreference; +import com.mongodb.connection.ServerDescription; +import com.mongodb.internal.async.SingleResultCallback; +import com.mongodb.internal.connection.AsyncConnection; +import com.mongodb.internal.connection.OperationContext; + +import static org.bson.assertions.Assertions.notNull; + +public final class AsyncOperationContextBinding implements AsyncReadWriteBinding { + + private final AsyncReadWriteBinding wrapped; + private final OperationContext operationContext; + + public AsyncOperationContextBinding(final AsyncReadWriteBinding wrapped, final OperationContext operationContext) { + this.wrapped = notNull("wrapped", wrapped); + this.operationContext = notNull("operationContext", operationContext); + } + + @Override + public ReadPreference getReadPreference() { + return wrapped.getReadPreference(); + } + + @Override + public void getWriteConnectionSource(final SingleResultCallback callback) { + wrapped.getWriteConnectionSource((result, t) -> { + if (t != null) { + callback.onResult(null, t); + } else { + callback.onResult(new SessionBindingAsyncConnectionSource(result), null); + } + }); + } + + @Override + public OperationContext getOperationContext() { + return operationContext; + } + + @Override + public void getReadConnectionSource(final SingleResultCallback callback) { + wrapped.getReadConnectionSource((result, t) -> { + if (t != null) { + callback.onResult(null, t); + } else { + callback.onResult(new SessionBindingAsyncConnectionSource(result), null); + } + }); + } + + + @Override + public void getReadConnectionSource(final int minWireVersion, final ReadPreference fallbackReadPreference, + final SingleResultCallback callback) { + wrapped.getReadConnectionSource(minWireVersion, fallbackReadPreference, (result, t) -> { + if (t != null) { + callback.onResult(null, t); + } else { + callback.onResult(new SessionBindingAsyncConnectionSource(result), null); + } + }); + } + + @Override + public int getCount() { + return wrapped.getCount(); + } + + @Override + public AsyncReadWriteBinding retain() { + wrapped.retain(); + return this; + } + + @Override + public int release() { + return wrapped.release(); + } + + private class SessionBindingAsyncConnectionSource implements AsyncConnectionSource { + private final AsyncConnectionSource wrapped; + + SessionBindingAsyncConnectionSource(final AsyncConnectionSource wrapped) { + this.wrapped = wrapped; + } + + @Override + public ServerDescription getServerDescription() { + return wrapped.getServerDescription(); + } + + @Override + public OperationContext getOperationContext() { + return operationContext; + } + + @Override + public ReadPreference getReadPreference() { + return wrapped.getReadPreference(); + } + + @Override + public void getConnection(final SingleResultCallback callback) { + wrapped.getConnection(callback); + } + + @Override + public int getCount() { + return wrapped.getCount(); + } + + @Override + public AsyncConnectionSource retain() { + wrapped.retain(); + return this; + } + + @Override + public int release() { + return wrapped.release(); + } + } + + public AsyncReadWriteBinding getWrapped() { + return wrapped; + } +} diff --git a/driver-core/src/test/functional/com/mongodb/internal/binding/AsyncSessionBinding.java b/driver-core/src/test/functional/com/mongodb/internal/binding/AsyncSessionBinding.java new file mode 100644 index 00000000000..fa588a340d0 --- /dev/null +++ b/driver-core/src/test/functional/com/mongodb/internal/binding/AsyncSessionBinding.java @@ -0,0 +1,145 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.binding; + +import com.mongodb.ReadPreference; +import com.mongodb.connection.ServerDescription; +import com.mongodb.internal.async.SingleResultCallback; +import com.mongodb.internal.connection.AsyncConnection; +import com.mongodb.internal.connection.OperationContext; + +import static org.bson.assertions.Assertions.notNull; + +public final class AsyncSessionBinding implements AsyncReadWriteBinding { + + private final AsyncReadWriteBinding wrapped; + private final OperationContext operationContext; + + public AsyncSessionBinding(final AsyncReadWriteBinding wrapped) { + this.wrapped = notNull("wrapped", wrapped); + this.operationContext = wrapped.getOperationContext().withSessionContext(new SimpleSessionContext()); + } + + @Override + public ReadPreference getReadPreference() { + return wrapped.getReadPreference(); + } + + @Override + public void getWriteConnectionSource(final SingleResultCallback callback) { + wrapped.getWriteConnectionSource((result, t) -> { + if (t != null) { + callback.onResult(null, t); + } else { + callback.onResult(new SessionBindingAsyncConnectionSource(result), null); + } + }); + } + + @Override + public OperationContext getOperationContext() { + return operationContext; + } + + @Override + public void getReadConnectionSource(final SingleResultCallback callback) { + wrapped.getReadConnectionSource((result, t) -> { + if (t != null) { + callback.onResult(null, t); + } else { + callback.onResult(new SessionBindingAsyncConnectionSource(result), null); + } + }); + } + + + @Override + public void getReadConnectionSource(final int minWireVersion, final ReadPreference fallbackReadPreference, + final SingleResultCallback callback) { + wrapped.getReadConnectionSource(minWireVersion, fallbackReadPreference, (result, t) -> { + if (t != null) { + callback.onResult(null, t); + } else { + callback.onResult(new SessionBindingAsyncConnectionSource(result), null); + } + }); + } + + @Override + public int getCount() { + return wrapped.getCount(); + } + + @Override + public AsyncReadWriteBinding retain() { + wrapped.retain(); + return this; + } + + @Override + public int release() { + return wrapped.release(); + } + + private class SessionBindingAsyncConnectionSource implements AsyncConnectionSource { + private final AsyncConnectionSource wrapped; + + SessionBindingAsyncConnectionSource(final AsyncConnectionSource wrapped) { + this.wrapped = wrapped; + } + + @Override + public ServerDescription getServerDescription() { + return wrapped.getServerDescription(); + } + + @Override + public OperationContext getOperationContext() { + return operationContext; + } + + @Override + public ReadPreference getReadPreference() { + return wrapped.getReadPreference(); + } + + @Override + public void getConnection(final SingleResultCallback callback) { + wrapped.getConnection(callback); + } + + @Override + public int getCount() { + return wrapped.getCount(); + } + + @Override + public AsyncConnectionSource retain() { + wrapped.retain(); + return this; + } + + @Override + public int release() { + return wrapped.release(); + } + } + + public AsyncReadWriteBinding getWrapped() { + return wrapped; + } +} diff --git a/driver-core/src/test/functional/com/mongodb/internal/binding/AsyncSessionBindingSpecification.groovy b/driver-core/src/test/functional/com/mongodb/internal/binding/AsyncSessionBindingSpecification.groovy new file mode 100644 index 00000000000..87fa1b9c4ff --- /dev/null +++ b/driver-core/src/test/functional/com/mongodb/internal/binding/AsyncSessionBindingSpecification.groovy @@ -0,0 +1,76 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.binding + +import com.mongodb.internal.async.SingleResultCallback +import spock.lang.Specification + +import static com.mongodb.ClusterFixture.OPERATION_CONTEXT + +class AsyncSessionBindingSpecification extends Specification { + + def 'should wrap the passed in async binding'() { + given: + def wrapped = Mock(AsyncReadWriteBinding) + wrapped.getOperationContext() >> OPERATION_CONTEXT + def binding = new AsyncSessionBinding(wrapped) + + when: + binding.getCount() + + then: + 1 * wrapped.getCount() + + when: + binding.getReadPreference() + + then: + 1 * wrapped.getReadPreference() + + when: + binding.retain() + + then: + 1 * wrapped.retain() + + when: + binding.release() + + then: + 1 * wrapped.release() + + when: + binding.getReadConnectionSource(Stub(SingleResultCallback)) + + then: + 1 * wrapped.getReadConnectionSource(_) + + when: + binding.getWriteConnectionSource(Stub(SingleResultCallback)) + + then: + 1 * wrapped.getWriteConnectionSource(_) + + when: + def context = binding.getOperationContext().getSessionContext() + + then: + 0 * wrapped.getOperationContext().getSessionContext() + context instanceof SimpleSessionContext + } + +} diff --git a/driver-core/src/test/functional/com/mongodb/internal/binding/AsyncSingleConnectionBinding.java b/driver-core/src/test/functional/com/mongodb/internal/binding/AsyncSingleConnectionBinding.java new file mode 100644 index 00000000000..3fff8b66e06 --- /dev/null +++ b/driver-core/src/test/functional/com/mongodb/internal/binding/AsyncSingleConnectionBinding.java @@ -0,0 +1,215 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.binding; + +import com.mongodb.MongoInternalException; +import com.mongodb.MongoTimeoutException; +import com.mongodb.ReadPreference; +import com.mongodb.connection.ServerDescription; +import com.mongodb.internal.async.SingleResultCallback; +import com.mongodb.internal.connection.AsyncConnection; +import com.mongodb.internal.connection.Cluster; +import com.mongodb.internal.connection.OperationContext; +import com.mongodb.internal.connection.Server; +import com.mongodb.internal.selector.ReadPreferenceServerSelector; +import com.mongodb.internal.selector.WritableServerSelector; + +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; + +import static com.mongodb.ReadPreference.primary; +import static com.mongodb.assertions.Assertions.isTrue; +import static com.mongodb.assertions.Assertions.notNull; + +/** + * An asynchronous binding that ensures that all reads use the same connection, and all writes use the same connection. + * + *

If the readPreference is {#link ReadPreference.primary()} then all reads and writes will use the same connection.

+ */ +public class AsyncSingleConnectionBinding extends AbstractReferenceCounted implements AsyncReadWriteBinding { + private final ReadPreference readPreference; + private AsyncConnection readConnection; + private AsyncConnection writeConnection; + private volatile Server readServer; + private volatile Server writeServer; + private volatile ServerDescription readServerDescription; + private volatile ServerDescription writeServerDescription; + private final OperationContext operationContext; + + /** + * Create a new binding with the given cluster. + * + * @param cluster a non-null Cluster which will be used to select a server to bind to + * @param readPreference the readPreference for reads, if not primary a separate connection will be used for reads + * @param operationContext the operation context + */ + public AsyncSingleConnectionBinding(final Cluster cluster, final ReadPreference readPreference, final OperationContext operationContext) { + notNull("cluster", cluster); + this.operationContext = operationContext; + this.readPreference = notNull("readPreference", readPreference); + CountDownLatch latch = new CountDownLatch(2); + cluster.selectServerAsync(new WritableServerSelector(), operationContext, (result, t) -> { + if (t == null) { + writeServer = result.getServer(); + writeServerDescription = result.getServerDescription(); + latch.countDown(); + } + }); + cluster.selectServerAsync(new ReadPreferenceServerSelector(readPreference), operationContext, (result, t) -> { + if (t == null) { + readServer = result.getServer(); + readServerDescription = result.getServerDescription(); + latch.countDown(); + } + }); + + awaitLatch(latch); + + if (writeServer == null || readServer == null) { + throw new MongoInternalException("Failure to select server"); + } + + CountDownLatch writeServerLatch = new CountDownLatch(1); + writeServer.getConnectionAsync(operationContext, (result, t) -> { + writeConnection = result; + writeServerLatch.countDown(); + }); + + awaitLatch(writeServerLatch); + + if (writeConnection == null) { + throw new MongoInternalException("Failure to get connection"); + } + + CountDownLatch readServerLatch = new CountDownLatch(1); + + readServer.getConnectionAsync(operationContext, (result, t) -> { + readConnection = result; + readServerLatch.countDown(); + }); + awaitLatch(readServerLatch); + + if (readConnection == null) { + throw new MongoInternalException("Failure to get connection"); + } + } + + private void awaitLatch(final CountDownLatch latch) { + try { + if (!latch.await(operationContext.getTimeoutContext().timeoutOrAlternative(10000), TimeUnit.MILLISECONDS)) { + throw new MongoTimeoutException("Failed to get servers"); + } + } catch (InterruptedException e) { + throw new MongoInternalException(e.getMessage(), e); + } + } + + @Override + public AsyncReadWriteBinding retain() { + super.retain(); + return this; + } + + @Override + public ReadPreference getReadPreference() { + return readPreference; + } + + @Override + public OperationContext getOperationContext() { + return operationContext; + } + + @Override + public void getReadConnectionSource(final SingleResultCallback callback) { + isTrue("open", getCount() > 0); + if (readPreference == primary()) { + getWriteConnectionSource(callback); + } else { + callback.onResult(new SingleAsyncConnectionSource(readServerDescription, readConnection), null); + } + } + + @Override + public void getReadConnectionSource(final int minWireVersion, final ReadPreference fallbackReadPreference, + final SingleResultCallback callback) { + getReadConnectionSource(callback); + } + + @Override + public void getWriteConnectionSource(final SingleResultCallback callback) { + isTrue("open", getCount() > 0); + callback.onResult(new SingleAsyncConnectionSource(writeServerDescription, writeConnection), null); + } + + @Override + public int release() { + int count = super.release(); + if (count == 0) { + readConnection.release(); + writeConnection.release(); + } + return count; + } + + private final class SingleAsyncConnectionSource extends AbstractReferenceCounted implements AsyncConnectionSource { + private final ServerDescription serverDescription; + private final AsyncConnection connection; + + private SingleAsyncConnectionSource(final ServerDescription serverDescription, + final AsyncConnection connection) { + this.serverDescription = serverDescription; + this.connection = connection; + AsyncSingleConnectionBinding.this.retain(); + } + + @Override + public ServerDescription getServerDescription() { + return serverDescription; + } + + @Override + public OperationContext getOperationContext() { + return operationContext; + } + + @Override + public ReadPreference getReadPreference() { + return readPreference; + } + + @Override + public void getConnection(final SingleResultCallback callback) { + isTrue("open", getCount() > 0); + callback.onResult(connection.retain(), null); + } + + public AsyncConnectionSource retain() { + super.retain(); + return this; + } + + @Override + public int release() { + int count = super.release(); + if (count == 0) { + AsyncSingleConnectionBinding.this.release(); + } + return count; + } + } +} diff --git a/driver-core/src/test/functional/com/mongodb/internal/binding/OperationContextBinding.java b/driver-core/src/test/functional/com/mongodb/internal/binding/OperationContextBinding.java new file mode 100644 index 00000000000..6af3f4520d4 --- /dev/null +++ b/driver-core/src/test/functional/com/mongodb/internal/binding/OperationContextBinding.java @@ -0,0 +1,123 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.binding; + +import com.mongodb.ReadPreference; +import com.mongodb.connection.ServerDescription; +import com.mongodb.internal.connection.Connection; +import com.mongodb.internal.connection.OperationContext; + +import static org.bson.assertions.Assertions.notNull; + +public class OperationContextBinding implements ReadWriteBinding { + private final ReadWriteBinding wrapped; + private final OperationContext operationContext; + + public OperationContextBinding(final ReadWriteBinding wrapped, final OperationContext operationContext) { + this.wrapped = notNull("wrapped", wrapped); + this.operationContext = notNull("operationContext", operationContext); + } + + @Override + public ReadPreference getReadPreference() { + return wrapped.getReadPreference(); + } + + @Override + public int getCount() { + return wrapped.getCount(); + } + + @Override + public ReadWriteBinding retain() { + wrapped.retain(); + return this; + } + + @Override + public int release() { + return wrapped.release(); + } + + @Override + public ConnectionSource getReadConnectionSource() { + return new SessionBindingConnectionSource(wrapped.getReadConnectionSource()); + } + + @Override + public ConnectionSource getReadConnectionSource(final int minWireVersion, final ReadPreference fallbackReadPreference) { + return new SessionBindingConnectionSource(wrapped.getReadConnectionSource(minWireVersion, fallbackReadPreference)); + } + + @Override + public OperationContext getOperationContext() { + return operationContext; + } + + @Override + public ConnectionSource getWriteConnectionSource() { + return new SessionBindingConnectionSource(wrapped.getWriteConnectionSource()); + } + + private class SessionBindingConnectionSource implements ConnectionSource { + private ConnectionSource wrapped; + + SessionBindingConnectionSource(final ConnectionSource wrapped) { + this.wrapped = wrapped; + } + + @Override + public ServerDescription getServerDescription() { + return wrapped.getServerDescription(); + } + + @Override + public OperationContext getOperationContext() { + return operationContext; + } + + @Override + public ReadPreference getReadPreference() { + return wrapped.getReadPreference(); + } + + @Override + public Connection getConnection() { + return wrapped.getConnection(); + } + + @Override + public ConnectionSource retain() { + wrapped = wrapped.retain(); + return this; + } + + @Override + public int getCount() { + return wrapped.getCount(); + } + + @Override + public int release() { + return wrapped.release(); + } + } + + public ReadWriteBinding getWrapped() { + return wrapped; + } +} diff --git a/driver-core/src/test/functional/com/mongodb/internal/binding/SessionBinding.java b/driver-core/src/test/functional/com/mongodb/internal/binding/SessionBinding.java new file mode 100644 index 00000000000..3a2666a8093 --- /dev/null +++ b/driver-core/src/test/functional/com/mongodb/internal/binding/SessionBinding.java @@ -0,0 +1,123 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.binding; + +import com.mongodb.ReadPreference; +import com.mongodb.connection.ServerDescription; +import com.mongodb.internal.connection.Connection; +import com.mongodb.internal.connection.OperationContext; + +import static org.bson.assertions.Assertions.notNull; + +public class SessionBinding implements ReadWriteBinding { + private final ReadWriteBinding wrapped; + private final OperationContext operationContext; + + public SessionBinding(final ReadWriteBinding wrapped) { + this.wrapped = notNull("wrapped", wrapped); + this.operationContext = wrapped.getOperationContext().withSessionContext(new SimpleSessionContext()); + } + + @Override + public ReadPreference getReadPreference() { + return wrapped.getReadPreference(); + } + + @Override + public int getCount() { + return wrapped.getCount(); + } + + @Override + public ReadWriteBinding retain() { + wrapped.retain(); + return this; + } + + @Override + public int release() { + return wrapped.release(); + } + + @Override + public ConnectionSource getReadConnectionSource() { + return new SessionBindingConnectionSource(wrapped.getReadConnectionSource()); + } + + @Override + public ConnectionSource getReadConnectionSource(final int minWireVersion, final ReadPreference fallbackReadPreference) { + return new SessionBindingConnectionSource(wrapped.getReadConnectionSource(minWireVersion, fallbackReadPreference)); + } + + @Override + public OperationContext getOperationContext() { + return operationContext; + } + + @Override + public ConnectionSource getWriteConnectionSource() { + return new SessionBindingConnectionSource(wrapped.getWriteConnectionSource()); + } + + private class SessionBindingConnectionSource implements ConnectionSource { + private ConnectionSource wrapped; + + SessionBindingConnectionSource(final ConnectionSource wrapped) { + this.wrapped = wrapped; + } + + @Override + public ServerDescription getServerDescription() { + return wrapped.getServerDescription(); + } + + @Override + public OperationContext getOperationContext() { + return operationContext; + } + + @Override + public ReadPreference getReadPreference() { + return wrapped.getReadPreference(); + } + + @Override + public Connection getConnection() { + return wrapped.getConnection(); + } + + @Override + public ConnectionSource retain() { + wrapped = wrapped.retain(); + return this; + } + + @Override + public int getCount() { + return wrapped.getCount(); + } + + @Override + public int release() { + return wrapped.release(); + } + } + + public ReadWriteBinding getWrapped() { + return wrapped; + } +} diff --git a/driver-core/src/test/functional/com/mongodb/internal/binding/SimpleSessionContext.java b/driver-core/src/test/functional/com/mongodb/internal/binding/SimpleSessionContext.java new file mode 100644 index 00000000000..ee258fb28cf --- /dev/null +++ b/driver-core/src/test/functional/com/mongodb/internal/binding/SimpleSessionContext.java @@ -0,0 +1,151 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.binding; + +import com.mongodb.ReadConcern; +import com.mongodb.internal.session.SessionContext; +import com.mongodb.lang.Nullable; +import org.bson.BsonDocument; +import org.bson.BsonDocumentWriter; +import org.bson.BsonTimestamp; +import org.bson.UuidRepresentation; +import org.bson.codecs.EncoderContext; +import org.bson.codecs.UuidCodec; + +import java.util.UUID; + +public class SimpleSessionContext implements SessionContext { + private final BsonDocument sessionId; + private BsonTimestamp operationTime; + private long counter; + private BsonDocument clusterTime; + + public SimpleSessionContext() { + this.sessionId = createNewServerSessionIdentifier(); + } + + @Override + public boolean hasSession() { + return true; + } + + @Override + public boolean isImplicitSession() { + return true; + } + + @Override + public BsonDocument getSessionId() { + return sessionId; + } + + @Override + public boolean isCausallyConsistent() { + return false; + } + + @Override + public long getTransactionNumber() { + throw new UnsupportedOperationException(); + } + + @Override + public long advanceTransactionNumber() { + counter++; + return counter; + } + + @Override + public boolean notifyMessageSent() { + return false; + } + + @Override + public BsonTimestamp getOperationTime() { + return operationTime; + } + + @Override + public void advanceOperationTime(final BsonTimestamp operationTime) { + this.operationTime = operationTime; + } + + @Override + public BsonDocument getClusterTime() { + return clusterTime; + } + + @Override + public void advanceClusterTime(final BsonDocument clusterTime) { + this.clusterTime = clusterTime; + } + + @Override + public boolean isSnapshot() { + return false; + } + + @Override + public void setSnapshotTimestamp(final BsonTimestamp snapshotTimestamp) { + } + + @Override + @Nullable + public BsonTimestamp getSnapshotTimestamp() { + return null; + } + + @Override + public boolean hasActiveTransaction() { + return false; + } + + @Override + public ReadConcern getReadConcern() { + return ReadConcern.DEFAULT; + } + + @Override + public void setRecoveryToken(final BsonDocument recoveryToken) { + throw new UnsupportedOperationException(); + } + + @Override + public void clearTransactionContext() { + throw new UnsupportedOperationException(); + } + + @Override + public void markSessionDirty() { + } + + @Override + public boolean isSessionMarkedDirty() { + return false; + } + + private static BsonDocument createNewServerSessionIdentifier() { + UuidCodec uuidCodec = new UuidCodec(UuidRepresentation.STANDARD); + BsonDocument holder = new BsonDocument(); + BsonDocumentWriter bsonDocumentWriter = new BsonDocumentWriter(holder); + bsonDocumentWriter.writeStartDocument(); + bsonDocumentWriter.writeName("id"); + uuidCodec.encode(bsonDocumentWriter, UUID.randomUUID(), EncoderContext.builder().build()); + bsonDocumentWriter.writeEndDocument(); + return holder; + } +} diff --git a/driver-core/src/test/functional/com/mongodb/internal/binding/SingleConnectionBinding.java b/driver-core/src/test/functional/com/mongodb/internal/binding/SingleConnectionBinding.java new file mode 100644 index 00000000000..6bf3cff636d --- /dev/null +++ b/driver-core/src/test/functional/com/mongodb/internal/binding/SingleConnectionBinding.java @@ -0,0 +1,170 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.binding; + +import com.mongodb.ReadPreference; +import com.mongodb.connection.ServerDescription; +import com.mongodb.internal.connection.Cluster; +import com.mongodb.internal.connection.Connection; +import com.mongodb.internal.connection.OperationContext; +import com.mongodb.internal.connection.ServerTuple; +import com.mongodb.internal.selector.ReadPreferenceServerSelector; +import com.mongodb.internal.selector.WritableServerSelector; + +import static com.mongodb.ReadPreference.primary; +import static com.mongodb.assertions.Assertions.isTrue; +import static com.mongodb.assertions.Assertions.notNull; + +/** + * A binding that ensures that all reads use the same connection, and all writes use the same connection. + * + *

If the readPreference is {#link ReadPreference.primary()} then all reads and writes will use the same connection.

+ */ +public class SingleConnectionBinding implements ReadWriteBinding { + private final ReadPreference readPreference; + private final Connection readConnection; + private final Connection writeConnection; + private final ServerDescription readServerDescription; + private final ServerDescription writeServerDescription; + private int count = 1; + private final OperationContext operationContext; + + /** + * Create a new binding with the given cluster. + * + * @param cluster a non-null Cluster which will be used to select a server to bind to + * @param readPreference the readPreference for reads, if not primary a separate connection will be used for reads + * + */ + public SingleConnectionBinding(final Cluster cluster, final ReadPreference readPreference, final OperationContext operationContext) { + notNull("cluster", cluster); + this.readPreference = notNull("readPreference", readPreference); + this.operationContext = operationContext; + ServerTuple writeServerTuple = cluster.selectServer(new WritableServerSelector(), operationContext); + writeServerDescription = writeServerTuple.getServerDescription(); + writeConnection = writeServerTuple.getServer().getConnection(operationContext); + ServerTuple readServerTuple = cluster.selectServer(new ReadPreferenceServerSelector(readPreference), operationContext); + readServerDescription = readServerTuple.getServerDescription(); + readConnection = readServerTuple.getServer().getConnection(operationContext); + } + + @Override + public int getCount() { + return count; + } + + @Override + public SingleConnectionBinding retain() { + count++; + return this; + } + + @Override + public int release() { + count--; + if (count == 0) { + writeConnection.release(); + readConnection.release(); + } + return count; + } + + @Override + public ReadPreference getReadPreference() { + isTrue("open", getCount() > 0); + return readPreference; + } + + @Override + public ConnectionSource getReadConnectionSource() { + isTrue("open", getCount() > 0); + if (readPreference == primary()) { + return getWriteConnectionSource(); + } else { + return new SingleConnectionSource(readServerDescription, readConnection); + } + } + + @Override + public ConnectionSource getReadConnectionSource(final int minWireVersion, final ReadPreference fallbackReadPreference) { + throw new UnsupportedOperationException(); + } + + @Override + public OperationContext getOperationContext() { + return operationContext; + } + + @Override + public ConnectionSource getWriteConnectionSource() { + isTrue("open", getCount() > 0); + return new SingleConnectionSource(writeServerDescription, writeConnection); + } + + private final class SingleConnectionSource implements ConnectionSource { + private final ServerDescription serverDescription; + private final Connection connection; + private int count = 1; + + SingleConnectionSource(final ServerDescription serverDescription, final Connection connection) { + this.serverDescription = serverDescription; + this.connection = connection; + SingleConnectionBinding.this.retain(); + } + + @Override + public ServerDescription getServerDescription() { + return serverDescription; + } + + @Override + public OperationContext getOperationContext() { + return operationContext; + } + + @Override + public ReadPreference getReadPreference() { + return readPreference; + } + + @Override + public Connection getConnection() { + isTrue("open", getCount() > 0); + return connection.retain(); + } + + @Override + public int getCount() { + return count; + } + + @Override + public SingleConnectionSource retain() { + count++; + return this; + } + + @Override + public int release() { + count--; + if (count == 0) { + SingleConnectionBinding.this.release(); + } + return count; + } + } +} diff --git a/driver-core/src/test/functional/com/mongodb/internal/capi/MongoCryptHelperTest.java b/driver-core/src/test/functional/com/mongodb/internal/capi/MongoCryptHelperTest.java new file mode 100644 index 00000000000..832e0d5eeb3 --- /dev/null +++ b/driver-core/src/test/functional/com/mongodb/internal/capi/MongoCryptHelperTest.java @@ -0,0 +1,154 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.capi; + +import com.mongodb.AutoEncryptionSettings; +import com.mongodb.ClientEncryptionSettings; +import com.mongodb.MongoClientException; +import com.mongodb.MongoClientSettings; +import com.mongodb.client.model.vault.RewrapManyDataKeyOptions; +import com.mongodb.internal.crypt.capi.MongoCryptOptions; +import org.bson.BsonDocument; +import org.junit.jupiter.api.Test; + +import java.util.HashMap; +import java.util.Map; +import java.util.concurrent.TimeUnit; + +import static com.mongodb.internal.capi.MongoCryptHelper.isMongocryptdSpawningDisabled; +import static com.mongodb.internal.capi.MongoCryptHelper.validateRewrapManyDataKeyOptions; +import static java.util.Collections.emptyList; +import static java.util.Collections.emptyMap; +import static java.util.Collections.singletonList; +import static java.util.Collections.singletonMap; +import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; + +public class MongoCryptHelperTest { + + @Test + public void createsExpectedMongoCryptOptionsUsingClientEncryptionSettings() { + + Map> kmsProvidersRaw = new HashMap<>(); + kmsProvidersRaw.put("provider", new HashMap(){{ + put("test", "test"); + }}); + + ClientEncryptionSettings settings = ClientEncryptionSettings + .builder() + .kmsProviders(kmsProvidersRaw) + .keyVaultMongoClientSettings(MongoClientSettings.builder().build()) + .keyVaultNamespace("a.b") + .build(); + MongoCryptOptions mongoCryptOptions = MongoCryptHelper.createMongoCryptOptions(settings); + + + BsonDocument expectedKmsProviders = BsonDocument.parse("{provider: {test: 'test'}}"); + MongoCryptOptions expectedMongoCryptOptions = MongoCryptOptions + .builder() + .kmsProviderOptions(expectedKmsProviders) + .needsKmsCredentialsStateEnabled(true) + .build(); + + assertMongoCryptOptions(expectedMongoCryptOptions, mongoCryptOptions); + } + + @Test + public void createsExpectedMongoCryptOptionsUsingAutoEncryptionSettings() { + + Map> kmsProvidersRaw = new HashMap<>(); + kmsProvidersRaw.put("provider", new HashMap(){{ + put("test", "test"); + }}); + + AutoEncryptionSettings.Builder autoEncryptionSettingsBuilder = AutoEncryptionSettings + .builder() + .kmsProviders(kmsProvidersRaw) + .keyVaultNamespace("a.b"); + MongoCryptOptions mongoCryptOptions = MongoCryptHelper.createMongoCryptOptions(autoEncryptionSettingsBuilder.build()); + + BsonDocument expectedKmsProviders = BsonDocument.parse("{provider: {test: 'test'}}"); + MongoCryptOptions.Builder mongoCryptOptionsBuilder = MongoCryptOptions + .builder() + .kmsProviderOptions(expectedKmsProviders) + .needsKmsCredentialsStateEnabled(true) + .encryptedFieldsMap(emptyMap()) + .localSchemaMap(emptyMap()) + .searchPaths(singletonList("$SYSTEM")); + + assertMongoCryptOptions(mongoCryptOptionsBuilder.build(), mongoCryptOptions); + + // Ensure can set key expiration + autoEncryptionSettingsBuilder.keyExpiration(10L, TimeUnit.SECONDS); + mongoCryptOptions = MongoCryptHelper.createMongoCryptOptions(autoEncryptionSettingsBuilder.build()); + assertMongoCryptOptions(mongoCryptOptionsBuilder.keyExpirationMS(10_000L).build(), mongoCryptOptions); + + // Ensure search Paths is empty when bypassAutoEncryption is true + autoEncryptionSettingsBuilder.bypassAutoEncryption(true); + mongoCryptOptions = MongoCryptHelper.createMongoCryptOptions(autoEncryptionSettingsBuilder.build()); + assertMongoCryptOptions(mongoCryptOptionsBuilder.searchPaths(emptyList()).build(), mongoCryptOptions); + } + + @Test + public void validateRewrapManyDataKeyOptionsTest() { + // Happy path + assertDoesNotThrow(() -> validateRewrapManyDataKeyOptions(new RewrapManyDataKeyOptions())); + assertDoesNotThrow(() -> validateRewrapManyDataKeyOptions(new RewrapManyDataKeyOptions().provider("AWS"))); + + // Failure + assertThrows(MongoClientException.class, () -> validateRewrapManyDataKeyOptions(new RewrapManyDataKeyOptions().masterKey(new BsonDocument()))); + } + + @Test + public void isMongocryptdSpawningDisabledTest() { + assertTrue(isMongocryptdSpawningDisabled(null, + initializeAutoEncryptionSettingsBuilder().bypassAutoEncryption(true).build())); + assertTrue(isMongocryptdSpawningDisabled(null, + initializeAutoEncryptionSettingsBuilder().bypassQueryAnalysis(true).build())); + assertTrue(isMongocryptdSpawningDisabled(null, + initializeAutoEncryptionSettingsBuilder().extraOptions(singletonMap("cryptSharedLibRequired", true)).build())); + assertTrue(isMongocryptdSpawningDisabled("/path/to/shared/lib.so", + initializeAutoEncryptionSettingsBuilder().build())); + assertFalse(isMongocryptdSpawningDisabled(null, + initializeAutoEncryptionSettingsBuilder().build())); + assertFalse(isMongocryptdSpawningDisabled("", + initializeAutoEncryptionSettingsBuilder().build())); + } + + private static AutoEncryptionSettings.Builder initializeAutoEncryptionSettingsBuilder() { + AutoEncryptionSettings.Builder builder = AutoEncryptionSettings.builder() + .keyVaultNamespace("test.vault") + .kmsProviders(singletonMap("local", singletonMap("key", new byte[96]))); + return builder; + } + + void assertMongoCryptOptions(final MongoCryptOptions expected, final MongoCryptOptions actual) { + assertEquals(expected.getAwsKmsProviderOptions(), actual.getAwsKmsProviderOptions(), "AwsKmsProviderOptions not equal"); + assertEquals(expected.getEncryptedFieldsMap(), actual.getEncryptedFieldsMap(), "EncryptedFieldsMap not equal"); + assertEquals(expected.getExtraOptions(), actual.getExtraOptions(), "ExtraOptions not equal"); + assertEquals(expected.getKmsProviderOptions(), actual.getKmsProviderOptions(), "KmsProviderOptions not equal"); + assertEquals(expected.getLocalKmsProviderOptions(), actual.getLocalKmsProviderOptions(), "LocalKmsProviderOptions not equal"); + assertEquals(expected.getLocalSchemaMap(), actual.getLocalSchemaMap(), "LocalSchemaMap not equal"); + assertEquals(expected.getSearchPaths(), actual.getSearchPaths(), "SearchPaths not equal"); + assertEquals(expected.isBypassQueryAnalysis(), actual.isBypassQueryAnalysis(), "isBypassQueryAnalysis not equal"); + assertEquals(expected.isNeedsKmsCredentialsStateEnabled(), actual.isNeedsKmsCredentialsStateEnabled(), "isNeedsKmsCredentialsStateEnabled not equal"); + assertEquals(expected.getKeyExpirationMS(), actual.getKeyExpirationMS(), "keyExpirationMS not equal"); + } +} diff --git a/driver-core/src/test/functional/com/mongodb/internal/connection/AsyncSocketChannelStreamSpecification.groovy b/driver-core/src/test/functional/com/mongodb/internal/connection/AsyncSocketChannelStreamSpecification.groovy new file mode 100644 index 00000000000..85f23350984 --- /dev/null +++ b/driver-core/src/test/functional/com/mongodb/internal/connection/AsyncSocketChannelStreamSpecification.groovy @@ -0,0 +1,119 @@ +package com.mongodb.internal.connection + +import com.mongodb.MongoSocketException +import com.mongodb.MongoSocketOpenException +import com.mongodb.ServerAddress +import com.mongodb.connection.AsyncCompletionHandler +import com.mongodb.connection.SocketSettings +import com.mongodb.connection.SslSettings +import com.mongodb.spi.dns.InetAddressResolver +import spock.lang.IgnoreIf +import spock.lang.Specification +import com.mongodb.spock.Slow + +import java.util.concurrent.CountDownLatch + +import static com.mongodb.ClusterFixture.OPERATION_CONTEXT +import static com.mongodb.ClusterFixture.getSslSettings +import static java.util.concurrent.TimeUnit.MILLISECONDS + +class AsyncSocketChannelStreamSpecification extends Specification { + + @Slow + @IgnoreIf({ getSslSettings().isEnabled() }) + def 'should successfully connect with working ip address list'() { + given: + def socketSettings = SocketSettings.builder().connectTimeout(100, MILLISECONDS).build() + def sslSettings = SslSettings.builder().build() + + def inetAddressResolver = new InetAddressResolver() { + @Override + List lookupByName(String host) { + [InetAddress.getByName('192.168.255.255'), + InetAddress.getByName('127.0.0.1')] + } + } + + def factoryFactory = new AsynchronousSocketChannelStreamFactoryFactory(inetAddressResolver) + def factory = factoryFactory.create(socketSettings, sslSettings) + + def stream = factory.create(new ServerAddress('host1')) + + when: + stream.open(OPERATION_CONTEXT) + + then: + !stream.isClosed() + } + + @Slow + @IgnoreIf({ getSslSettings().isEnabled() }) + def 'should fail to connect with non-working ip address list'() { + given: + def socketSettings = SocketSettings.builder().connectTimeout(100, MILLISECONDS).build() + def sslSettings = SslSettings.builder().build() + + def inetAddressResolver = new InetAddressResolver() { + @Override + List lookupByName(String host) { + [InetAddress.getByName('192.168.255.255'), + InetAddress.getByName('1.2.3.4')] + } + } + + def factoryFactory = new AsynchronousSocketChannelStreamFactoryFactory(inetAddressResolver) + def factory = factoryFactory.create(socketSettings, sslSettings) + def stream = factory.create(new ServerAddress()) + + when: + stream.open(OPERATION_CONTEXT) + + then: + thrown(MongoSocketOpenException) + } + + @IgnoreIf({ getSslSettings().isEnabled() }) + def 'should fail AsyncCompletionHandler if name resolution fails'() { + given: + def serverAddress = new ServerAddress() + def exception = new MongoSocketException('Temporary failure in name resolution', serverAddress) + + def inetAddressResolver = new InetAddressResolver() { + @Override + List lookupByName(String host) { + throw exception + } + } + def stream = new AsynchronousSocketChannelStream(serverAddress, inetAddressResolver, + SocketSettings.builder().connectTimeout(100, MILLISECONDS).build(), + new PowerOfTwoBufferPool()) + def callback = new CallbackErrorHolder() + + when: + stream.openAsync(OPERATION_CONTEXT, callback) + + then: + callback.getError().is(exception) + } + + class CallbackErrorHolder implements AsyncCompletionHandler { + CountDownLatch latch = new CountDownLatch(1) + Throwable throwable = null + + Throwable getError() { + latch.countDown() + throwable + } + + @Override + void completed(Void r) { + latch.await() + } + + @Override + void failed(Throwable t) { + throwable = t + latch.countDown() + } + } +} diff --git a/driver-core/src/test/functional/com/mongodb/internal/connection/AsyncStreamTimeoutsSpecification.groovy b/driver-core/src/test/functional/com/mongodb/internal/connection/AsyncStreamTimeoutsSpecification.groovy new file mode 100644 index 00000000000..3589362b8ac --- /dev/null +++ b/driver-core/src/test/functional/com/mongodb/internal/connection/AsyncStreamTimeoutsSpecification.groovy @@ -0,0 +1,72 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection + +import com.mongodb.LoggerSettings +import com.mongodb.MongoSocketOpenException +import com.mongodb.OperationFunctionalSpecification +import com.mongodb.ServerAddress +import com.mongodb.connection.ClusterConnectionMode +import com.mongodb.connection.ClusterId +import com.mongodb.connection.ServerId +import com.mongodb.connection.SocketSettings +import com.mongodb.internal.connection.netty.NettyStreamFactory +import spock.lang.IgnoreIf +import com.mongodb.spock.Slow + +import java.util.concurrent.TimeUnit + +import static com.mongodb.ClusterFixture.OPERATION_CONTEXT +import static com.mongodb.ClusterFixture.getCredentialWithCache +import static com.mongodb.ClusterFixture.getServerApi +import static com.mongodb.ClusterFixture.getSslSettings + +@Slow +class AsyncStreamTimeoutsSpecification extends OperationFunctionalSpecification { + + static SocketSettings openSocketSettings = SocketSettings.builder().connectTimeout(1, TimeUnit.MILLISECONDS).build() + + @IgnoreIf({ getSslSettings().isEnabled() }) + def 'should throw a MongoSocketOpenException when the AsynchronousSocket Stream fails to open'() { + given: + def connection = new InternalStreamConnectionFactory(ClusterConnectionMode.SINGLE, + new AsynchronousSocketChannelStreamFactory(new DefaultInetAddressResolver(), openSocketSettings, getSslSettings()), + getCredentialWithCache(), null, null, [], LoggerSettings.builder().build(), null, getServerApi()) + .create(new ServerId(new ClusterId(), new ServerAddress(new InetSocketAddress('192.168.255.255', 27017)))) + + when: + connection.open(OPERATION_CONTEXT) + + then: + thrown(MongoSocketOpenException) + } + + def 'should throw a MongoSocketOpenException when the Netty Stream fails to open'() { + given: + def connection = new InternalStreamConnectionFactory(ClusterConnectionMode.SINGLE, + new NettyStreamFactory(openSocketSettings, getSslSettings()), getCredentialWithCache(), null, null, + [], LoggerSettings.builder().build(), null, getServerApi()).create(new ServerId(new ClusterId(), + new ServerAddress(new InetSocketAddress('192.168.255.255', 27017)))) + + when: + connection.open(OPERATION_CONTEXT) + + then: + thrown(MongoSocketOpenException) + } + +} diff --git a/driver-core/src/test/functional/com/mongodb/internal/connection/AwsAuthenticationSpecification.groovy b/driver-core/src/test/functional/com/mongodb/internal/connection/AwsAuthenticationSpecification.groovy new file mode 100644 index 00000000000..8dd53bc1c03 --- /dev/null +++ b/driver-core/src/test/functional/com/mongodb/internal/connection/AwsAuthenticationSpecification.groovy @@ -0,0 +1,169 @@ +package com.mongodb.internal.connection + +import com.mongodb.AwsCredential +import com.mongodb.ClusterFixture +import com.mongodb.MongoCommandException +import com.mongodb.MongoCredential +import com.mongodb.MongoSecurityException +import com.mongodb.ServerAddress +import com.mongodb.async.FutureResultCallback +import com.mongodb.connection.ClusterId +import com.mongodb.connection.ServerId +import com.mongodb.connection.SocketSettings +import com.mongodb.internal.authentication.AwsCredentialHelper +import org.bson.BsonDocument +import org.bson.BsonString +import spock.lang.IgnoreIf +import spock.lang.Specification + +import java.util.function.Supplier + +import static com.mongodb.AuthenticationMechanism.MONGODB_AWS +import static com.mongodb.ClusterFixture.OPERATION_CONTEXT +import static com.mongodb.ClusterFixture.getClusterConnectionMode +import static com.mongodb.ClusterFixture.getConnectionString +import static com.mongodb.ClusterFixture.getCredential +import static com.mongodb.ClusterFixture.getSslSettings +import static com.mongodb.connection.ClusterConnectionMode.SINGLE +import static com.mongodb.internal.connection.CommandHelper.executeCommand +import static java.util.concurrent.TimeUnit.SECONDS + +@IgnoreIf({ getCredential() == null || getCredential().getAuthenticationMechanism() != MONGODB_AWS }) +class AwsAuthenticationSpecification extends Specification { + + static { + def providerProperty = System.getProperty('org.mongodb.test.aws.credential.provider', 'awsSdkV2') + + if (providerProperty == 'builtIn') { + AwsCredentialHelper.requireBuiltInProvider() + } else if (providerProperty == 'awsSdkV1') { + AwsCredentialHelper.requireAwsSdkV1Provider() + } else if (providerProperty == 'awsSdkV2') { + AwsCredentialHelper.requireAwsSdkV2Provider() + } else { + throw new IllegalArgumentException("Unrecognized AWS credential provider: $providerProperty") + } + } + + def 'should not authorize when not authenticated'() { + given: + def connection = createConnection(async, null) + + when: + openConnection(connection, async) + executeCommand(getConnectionString().getDatabase(), new BsonDocument('count', new BsonString('test')), + getClusterConnectionMode(), null, connection, OPERATION_CONTEXT) + + then: + thrown(MongoCommandException) + + cleanup: + connection?.close() + + where: + async << [true, false] + } + + def 'should authorize when successfully authenticated'() { + given: + def connection = createConnection(async, getCredential()) + + when: + openConnection(connection, async) + executeCommand(getConnectionString().getDatabase(), new BsonDocument('count', new BsonString('test')), + getClusterConnectionMode(), null, connection, OPERATION_CONTEXT) + + then: + true + + cleanup: + connection?.close() + + where: + async << [true, false] + } + + @IgnoreIf({ System.getenv('AWS_SESSION_TOKEN') == null || System.getenv('AWS_SESSION_TOKEN') == '' }) + def 'should authorize when successfully authenticated using provider'() { + given: + def connection = createConnection(async, + getCredential().withMechanismProperty(MongoCredential.AWS_CREDENTIAL_PROVIDER_KEY, + new Supplier() { + @Override + AwsCredential get() { + new AwsCredential( + System.getenv('AWS_ACCESS_KEY_ID'), + System.getenv('AWS_SECRET_ACCESS_KEY'), + System.getenv('AWS_SESSION_TOKEN')) + } + })) + + when: + openConnection(connection, async) + executeCommand(getConnectionString().getDatabase(), new BsonDocument('count', new BsonString('test')), + getClusterConnectionMode(), null, connection, OPERATION_CONTEXT) + + then: + true + + cleanup: + connection?.close() + + where: + async << [true, false] + } + + // This test is just proving that the credential provider is not being totally ignored + @IgnoreIf({ System.getenv('AWS_SESSION_TOKEN') == null || System.getenv('AWS_SESSION_TOKEN') == '' }) + def 'should not authenticate when provider gives invalid session token'() { + given: + def connection = createConnection(async, + getCredential().withMechanismProperty(MongoCredential.AWS_CREDENTIAL_PROVIDER_KEY, + new Supplier() { + @Override + AwsCredential get() { + new AwsCredential( + System.getenv('AWS_ACCESS_KEY_ID'), + System.getenv('AWS_SECRET_ACCESS_KEY'), + 'fake-session-token') + } + })) + + when: + openConnection(connection, async) + + then: + thrown(MongoSecurityException) + + cleanup: + connection?.close() + + where: + async << [true, false] + } + + private static InternalStreamConnection createConnection(final boolean async, final MongoCredential credential) { + new InternalStreamConnection(SINGLE, + new ServerId(new ClusterId(), new ServerAddress(getConnectionString().getHosts().get(0))), + new TestConnectionGenerationSupplier(), + async ? new AsynchronousSocketChannelStreamFactory(new DefaultInetAddressResolver(), SocketSettings.builder().build(), + getSslSettings()) : new SocketStreamFactory(new DefaultInetAddressResolver(), SocketSettings.builder().build(), + getSslSettings()), [], null, new InternalStreamConnectionInitializer(SINGLE, createAuthenticator(credential), + null, [], null) + ) + } + + private static Authenticator createAuthenticator(final MongoCredential credential) { + credential == null ? null : new AwsAuthenticator(new MongoCredentialWithCache(credential), SINGLE, null) + } + + private static void openConnection(final InternalConnection connection, final boolean async) { + if (async) { + FutureResultCallback futureResultCallback = new FutureResultCallback() + connection.openAsync(OPERATION_CONTEXT, futureResultCallback) + futureResultCallback.get(ClusterFixture.TIMEOUT, SECONDS) + } else { + connection.open(OPERATION_CONTEXT) + } + } +} diff --git a/driver-core/src/test/functional/com/mongodb/internal/connection/ClientMetadataTest.java b/driver-core/src/test/functional/com/mongodb/internal/connection/ClientMetadataTest.java new file mode 100644 index 00000000000..6ea8c506ff4 --- /dev/null +++ b/driver-core/src/test/functional/com/mongodb/internal/connection/ClientMetadataTest.java @@ -0,0 +1,460 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection; + +import com.mongodb.ClusterFixture; +import com.mongodb.MongoDriverInformation; +import com.mongodb.MongoNamespace; +import com.mongodb.client.test.CollectionHelper; +import com.mongodb.internal.build.MongoDriverVersion; +import com.mongodb.lang.Nullable; +import org.bson.BsonDocument; +import org.bson.BsonString; +import org.bson.Document; +import org.bson.codecs.DocumentCodec; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.Arguments; +import org.junit.jupiter.params.provider.CsvSource; +import org.junit.jupiter.params.provider.MethodSource; +import org.mockito.MockedStatic; +import org.mockito.Mockito; + +import java.io.File; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.util.ArrayList; +import java.util.List; +import java.util.stream.Stream; + +import static com.mongodb.client.CrudTestHelper.repeat; +import static com.mongodb.client.WithWrapper.withWrapper; +import static com.mongodb.internal.connection.ClientMetadata.getOperatingSystemType; +import static java.util.Optional.ofNullable; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; + +/** + * See spec + * + *

+ * NOTE: This class also contains tests that aren't categorized as Prose tests. + */ +public class ClientMetadataTest { + private static final String APP_NAME = "app name"; + private static final MongoDriverInformation EMPTY_MONGO_DRIVER_INFORMATION = MongoDriverInformation.builder().build(); + + @Test + public void test01ValidAws() { + withWrapper() + .withEnvironmentVariable("AWS_EXECUTION_ENV", "AWS_Lambda_java8") + .withEnvironmentVariable("AWS_REGION", "us-east-2") + .withEnvironmentVariable("AWS_LAMBDA_FUNCTION_MEMORY_SIZE", "1024") + .run(() -> { + BsonDocument expected = createExpectedClientMetadataDocument(APP_NAME); + expected.put("env", BsonDocument.parse("{'name': 'aws.lambda', 'memory_mb': 1024, 'region': 'us-east-2'}")); + BsonDocument actual = createActualClientMetadataDocument(); + assertEquals(expected, actual); + + performHello(); + }); + } + + @Test + public void test02ValidAzure() { + withWrapper() + .withEnvironmentVariable("FUNCTIONS_WORKER_RUNTIME", "node") + .run(() -> { + BsonDocument expected = createExpectedClientMetadataDocument(APP_NAME); + expected.put("env", BsonDocument.parse("{'name': 'azure.func'}")); + BsonDocument actual = createActualClientMetadataDocument(); + assertEquals(expected, actual); + + performHello(); + }); + } + + @Test + public void test03ValidGcp() { + withWrapper() + .withEnvironmentVariable("K_SERVICE", "servicename") + .withEnvironmentVariable("FUNCTION_MEMORY_MB", "1024") + .withEnvironmentVariable("FUNCTION_TIMEOUT_SEC", "60") + .withEnvironmentVariable("FUNCTION_REGION", "us-central1") + .run(() -> { + BsonDocument expected = createExpectedClientMetadataDocument(APP_NAME); + expected.put("env", BsonDocument.parse( + "{'name': 'gcp.func', 'timeout_sec': 60, 'memory_mb': 1024, 'region': 'us-central1'}")); + BsonDocument actual = createActualClientMetadataDocument(); + assertEquals(expected, actual); + + performHello(); + }); + } + + @Test + public void test04ValidVercel() { + withWrapper() + .withEnvironmentVariable("VERCEL", "1") + .withEnvironmentVariable("VERCEL_REGION", "cdg1") + .run(() -> { + BsonDocument expected = createExpectedClientMetadataDocument(APP_NAME); + expected.put("env", BsonDocument.parse("{'name': 'vercel', 'region': 'cdg1'}")); + BsonDocument actual = createActualClientMetadataDocument(); + assertEquals(expected, actual); + + performHello(); + }); + } + + @Test + public void test05InvalidMultipleProviders() { + withWrapper() + .withEnvironmentVariable("AWS_EXECUTION_ENV", "AWS_Lambda_java8") + .withEnvironmentVariable("FUNCTIONS_WORKER_RUNTIME", "node") + .run(() -> { + BsonDocument expected = createExpectedClientMetadataDocument(APP_NAME); + BsonDocument actual = createActualClientMetadataDocument(); + assertEquals(expected, actual); + + performHello(); + }); + } + + @Test + public void test06InvalidLongString() { + withWrapper() + .withEnvironmentVariable("AWS_EXECUTION_ENV", "AWS_Lambda_java8") + .withEnvironmentVariable("AWS_REGION", repeat(512, "a")) + .run(() -> { + BsonDocument expected = createExpectedClientMetadataDocument(APP_NAME); + expected.put("env", BsonDocument.parse("{'name': 'aws.lambda'}")); + BsonDocument actual = createActualClientMetadataDocument(); + assertEquals(expected, actual); + + performHello(); + }); + } + + @Test + public void test07InvalidWrongTypes() { + withWrapper() + .withEnvironmentVariable("AWS_EXECUTION_ENV", "AWS_Lambda_java8") + .withEnvironmentVariable("AWS_LAMBDA_FUNCTION_MEMORY_SIZE", "big") + .run(() -> { + BsonDocument expected = createExpectedClientMetadataDocument(APP_NAME); + expected.put("env", BsonDocument.parse("{'name': 'aws.lambda'}")); + BsonDocument actual = createActualClientMetadataDocument(); + assertEquals(expected, actual); + + performHello(); + }); + } + + @Test + public void test08NotLambda() { + withWrapper() + .withEnvironmentVariable("AWS_EXECUTION_ENV", "EC2") + .run(() -> { + BsonDocument expected = createExpectedClientMetadataDocument(APP_NAME); + BsonDocument actual = createActualClientMetadataDocument(); + assertEquals(expected, actual); + + performHello(); + }); + } + + @Test + public void test09ValidContainerAndFaasProvider() { + withWrapper() + .withEnvironmentVariable("AWS_EXECUTION_ENV", "AWS_Lambda_java8") + .withEnvironmentVariable("AWS_REGION", "us-east-2") + .withEnvironmentVariable("AWS_LAMBDA_FUNCTION_MEMORY_SIZE", "1024") + .withEnvironmentVariable("KUBERNETES_SERVICE_HOST", "1") + .run(() -> { + BsonDocument expected = createExpectedClientMetadataDocument(APP_NAME); + expected.put("env", BsonDocument.parse("{'name': 'aws.lambda', 'memory_mb': 1024, 'region': 'us-east-2', 'container': {'orchestrator': 'kubernetes'}}")); + BsonDocument actual = createActualClientMetadataDocument(); + assertEquals(expected, actual); + + performHello(); + }); + } + + // Additional tests, not specified as prose tests: + + @Test + void testKubernetesMetadataIncluded() { + withWrapper() + .withEnvironmentVariable("KUBERNETES_SERVICE_HOST", "kubernetes.default.svc.cluster.local") + .run(() -> { + BsonDocument expected = createExpectedClientMetadataDocument(APP_NAME); + expected.put("env", BsonDocument.parse("{'container': {'orchestrator': 'kubernetes'}}")); + BsonDocument actual = createActualClientMetadataDocument(); + assertEquals(expected, actual); + + performHello(); + }); + } + + @Test + void testDockerMetadataIncluded() { + try (MockedStatic pathsMockedStatic = Mockito.mockStatic(Files.class)) { + Path path = Paths.get(File.separator + ".dockerenv"); + pathsMockedStatic.when(() -> Files.exists(path)).thenReturn(true); + + withWrapper() + .run(() -> { + BsonDocument expected = createExpectedClientMetadataDocument(APP_NAME); + expected.put("env", BsonDocument.parse("{'container': {'runtime': 'docker'}}")); + BsonDocument actual = createActualClientMetadataDocument(); + assertEquals(expected, actual); + + performHello(); + }); + } + } + + @Test + void testDockerAndKubernetesMetadataIncluded() { + try (MockedStatic pathsMockedStatic = Mockito.mockStatic(Files.class)) { + Path path = Paths.get(File.separator + "/.dockerenv"); + pathsMockedStatic.when(() -> Files.exists(path)).thenReturn(true); + + withWrapper() + .withEnvironmentVariable("KUBERNETES_SERVICE_HOST", "kubernetes.default.svc.cluster.local") + .run(() -> { + BsonDocument expected = createExpectedClientMetadataDocument(APP_NAME); + expected.put("env", BsonDocument.parse("{'container': {'runtime': 'docker', 'orchestrator': 'kubernetes'}}")); + BsonDocument actual = createActualClientMetadataDocument(); + assertEquals(expected, actual); + + performHello(); + }); + } + } + + @Test + public void testLimitForDriverVersion() { + // should create client metadata document and exclude the extra driver info if its too verbose + MongoDriverInformation driverInfo = MongoDriverInformation.builder() + .driverName("mongo-spark") + .driverVersion(repeat(512, "a")) + .driverPlatform("Scala 2.10 / Spark 2.0.0") + .build(); + + BsonDocument expected = createExpectedClientMetadataDocument(APP_NAME, driverInfo); + BsonDocument expectedBase = createExpectedClientMetadataDocument(APP_NAME); + expected.put("driver", expectedBase.get("driver")); + + BsonDocument actual = new ClientMetadata(APP_NAME, driverInfo).getBsonDocument(); + assertEquals(expected, actual); + } + + @Test + public void testLimitForPlatform() { + MongoDriverInformation driverInfo = MongoDriverInformation.builder() + .driverName("mongo-spark") + .driverVersion("2.0.0") + .driverPlatform(repeat(512, "a")) + .build(); + + BsonDocument expected = createExpectedClientMetadataDocument(APP_NAME, driverInfo); + BsonDocument expectedBase = createExpectedClientMetadataDocument(APP_NAME); + expected.put("platform", expectedBase.get("platform")); + + BsonDocument actual = new ClientMetadata(APP_NAME, driverInfo).getBsonDocument(); + assertEquals(expected, actual); + } + + @Test + public void testLimitForOsName() { + withWrapper() + .withSystemProperty("os.name", repeat(512, "a")) + .run(() -> { + BsonDocument expected = createExpectedClientMetadataDocument(APP_NAME); + expected.getDocument("os").remove("name"); + + BsonDocument actual = createActualClientMetadataDocument(); + assertEquals(expected, actual); + }); + } + + @Test + public void testApplicationNameUnderLimit() { + String applicationName = repeat(126, "a") + "\u00A0"; + BsonDocument client = new ClientMetadata(applicationName, EMPTY_MONGO_DRIVER_INFORMATION).getBsonDocument(); + assertEquals(applicationName, client.getDocument("application").getString("name").getValue()); + } + + @Test + public void testApplicationNameOverLimit() { + String applicationName = repeat(127, "a") + "\u00A0"; + assertThrows(IllegalArgumentException.class, () -> new ClientMetadata(applicationName, EMPTY_MONGO_DRIVER_INFORMATION)); + } + + @ParameterizedTest + @CsvSource({ + APP_NAME + ", " + true, + APP_NAME + ", " + false, + ", " + true, // null appName + ", " + false, + }) + public void testCreateClientMetadataDocument(@Nullable final String appName, final boolean hasDriverInfo) { + MongoDriverInformation driverInformation = hasDriverInfo ? createDriverInformation() : EMPTY_MONGO_DRIVER_INFORMATION; + ClientMetadata clientMetadata = new ClientMetadata(appName, driverInformation); + assertEquals( + createExpectedClientMetadataDocument(appName, driverInformation), + clientMetadata.getBsonDocument()); + } + + public static java.util.stream.Stream provideDriverInformation() { + return Stream.of( + Arguments.of("1.0", "Framework", "Framework Platform"), + Arguments.of("1.0", "Framework", null), + Arguments.of(null, "Framework", "Framework Platform"), + Arguments.of(null, null, "Framework Platform"), + Arguments.of(null, "Framework", null) + ); + } + + + @ParameterizedTest + @MethodSource("provideDriverInformation") + void testUpdateClientMetadataDocument(@Nullable final String driverVersion, + @Nullable final String driverName, + @Nullable final String driverPlatform) { + //given + MongoDriverInformation initialDriverInformation = MongoDriverInformation.builder() + .driverName("mongo-spark") + .driverVersion("2.0.0") + .driverPlatform("Scala 2.10 / Spark 2.0.0") + .build(); + + ClientMetadata clientMetadata = new ClientMetadata(null, initialDriverInformation); + BsonDocument initialClientMetadataDocument = clientMetadata.getBsonDocument(); + assertEquals( + createExpectedClientMetadataDocument(null, initialDriverInformation), + initialClientMetadataDocument); + + MongoDriverInformation.Builder expectedUpdatedMetadataBuilder = MongoDriverInformation.builder(initialDriverInformation); + ofNullable(driverName).ifPresent(expectedUpdatedMetadataBuilder::driverName); + ofNullable(driverVersion).ifPresent(expectedUpdatedMetadataBuilder::driverVersion); + ofNullable(driverPlatform).ifPresent(expectedUpdatedMetadataBuilder::driverPlatform); + MongoDriverInformation expectedUpdatedMetadata = expectedUpdatedMetadataBuilder.build(); + + MongoDriverInformation.Builder builder = MongoDriverInformation.builder(); + ofNullable(driverName).ifPresent(builder::driverName); + ofNullable(driverVersion).ifPresent(builder::driverVersion); + ofNullable(driverPlatform).ifPresent(builder::driverPlatform); + MongoDriverInformation metadataToAppend = builder.build(); + + //when + clientMetadata.append(metadataToAppend); + BsonDocument updatedClientMetadata = clientMetadata.getBsonDocument(); + + //then + assertEquals( + createExpectedClientMetadataDocument(null, expectedUpdatedMetadata), + updatedClientMetadata); + assertNotEquals(updatedClientMetadata, initialClientMetadataDocument); + } + + @ParameterizedTest + @CsvSource({ + "unknown, unknown", + "Linux OS, Linux", + "Mac OS X, Darwin", + "Windows 10, Windows", + "HP-UX OS, Unix", + "AIX OS, Unix", + "Irix OS, Unix", + "Solaris OS, Unix", + "SunOS, Unix", + "Some Other OS, unknown", + }) + public void testApplicationName(final String input, final String expected) { + assertEquals(expected, getOperatingSystemType(input)); + } + + private void performHello() { + CollectionHelper collectionHelper = new CollectionHelper<>( + new DocumentCodec(), + new MongoNamespace(ClusterFixture.getDefaultDatabaseName(), "test")); + collectionHelper.hello(); + } + + private BsonDocument createActualClientMetadataDocument() { + return new ClientMetadata(APP_NAME, EMPTY_MONGO_DRIVER_INFORMATION).getBsonDocument(); + } + + private static MongoDriverInformation createDriverInformation() { + return MongoDriverInformation.builder() + .driverName("mongo-spark") + .driverVersion("2.0.0") + .driverPlatform("Scala 2.10 / Spark 2.0.0") + .build(); + } + + private static BsonDocument createExpectedClientMetadataDocument( + @Nullable final String appName, + @Nullable final MongoDriverInformation driverInformation) { + BsonDocument driverDocument = new BsonDocument() + .append("name", new BsonString(MongoDriverVersion.NAME)) + .append("version", new BsonString(MongoDriverVersion.VERSION)); + BsonDocument osDocument = new BsonDocument() + .append("type", new BsonString(getOperatingSystemType(System.getProperty("os.name")))) + .append("name", new BsonString(System.getProperty("os.name"))) + .append("architecture", new BsonString(System.getProperty("os.arch"))) + .append("version", new BsonString(System.getProperty("os.version"))); + BsonDocument clientDocument = new BsonDocument(); + if (appName != null) { + clientDocument.append("application", new BsonDocument("name", new BsonString(appName))); + } + clientDocument + .append("driver", driverDocument) + .append("os", osDocument) + .append("platform", new BsonString("Java/" + System.getProperty("java.vendor") + "/" + + System.getProperty("java.runtime.version"))); + if (driverInformation != null) { + driverDocument.append("name", new BsonString(join( + driverDocument.getString("name").getValue(), + driverInformation.getDriverNames()))); + driverDocument.append("version", new BsonString(join( + driverDocument.getString("version").getValue(), + driverInformation.getDriverVersions()))); + clientDocument.append("platform", new BsonString(join( + clientDocument.getString("platform").getValue(), + driverInformation.getDriverPlatforms()))); + } + return clientDocument; + } + + static BsonDocument createExpectedClientMetadataDocument(final String appName) { + return createExpectedClientMetadataDocument(appName, null); + } + + private static String join(final String first, final List rest) { + String separator = "|"; + ArrayList result = new ArrayList<>(); + result.add(first); + result.addAll(rest); + return String.join(separator, result); + } +} diff --git a/driver-core/src/test/functional/com/mongodb/internal/connection/CommandHelperSpecification.groovy b/driver-core/src/test/functional/com/mongodb/internal/connection/CommandHelperSpecification.groovy new file mode 100644 index 00000000000..83ce94f7075 --- /dev/null +++ b/driver-core/src/test/functional/com/mongodb/internal/connection/CommandHelperSpecification.groovy @@ -0,0 +1,84 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection + +import com.mongodb.LoggerSettings +import com.mongodb.MongoCommandException +import com.mongodb.connection.ClusterConnectionMode +import com.mongodb.connection.ClusterId +import com.mongodb.connection.ServerId +import com.mongodb.connection.SocketSettings +import com.mongodb.internal.connection.netty.NettyStreamFactory +import org.bson.BsonDocument +import org.bson.BsonInt32 +import spock.lang.Specification + +import java.util.concurrent.CountDownLatch + +import static com.mongodb.ClusterFixture.CLIENT_METADATA +import static com.mongodb.ClusterFixture.LEGACY_HELLO +import static com.mongodb.ClusterFixture.OPERATION_CONTEXT +import static com.mongodb.ClusterFixture.getClusterConnectionMode +import static com.mongodb.ClusterFixture.getCredentialWithCache +import static com.mongodb.ClusterFixture.getPrimary +import static com.mongodb.ClusterFixture.getServerApi +import static com.mongodb.ClusterFixture.getSslSettings +import static com.mongodb.internal.connection.CommandHelper.executeCommandAsync + +class CommandHelperSpecification extends Specification { + InternalConnection connection + + def setup() { + connection = new InternalStreamConnectionFactory(ClusterConnectionMode.SINGLE, + new NettyStreamFactory(SocketSettings.builder().build(), getSslSettings()), + getCredentialWithCache(), CLIENT_METADATA, [], LoggerSettings.builder().build(), null, getServerApi()) + .create(new ServerId(new ClusterId(), getPrimary())) + connection.open(OPERATION_CONTEXT) + } + + def cleanup() { + connection?.close() + } + + def 'should execute command asynchronously'() { + when: + BsonDocument receivedDocument = null + Throwable receivedException = null + def latch1 = new CountDownLatch(1) + executeCommandAsync('admin', new BsonDocument(LEGACY_HELLO, new BsonInt32(1)), getClusterConnectionMode(), + getServerApi(), connection, OPERATION_CONTEXT) + { document, exception -> receivedDocument = document; receivedException = exception; latch1.countDown() } + latch1.await() + + then: + !receivedDocument.isEmpty() + receivedDocument.containsKey('ok') + !receivedException + + when: + def latch2 = new CountDownLatch(1) + executeCommandAsync('admin', new BsonDocument('non-existent-command', new BsonInt32(1)), getClusterConnectionMode(), + getServerApi(), connection, OPERATION_CONTEXT) + { document, exception -> receivedDocument = document; receivedException = exception; latch2.countDown() } + latch2.await() + + then: + !receivedDocument + receivedException instanceof MongoCommandException + } + +} diff --git a/driver-core/src/test/functional/com/mongodb/internal/connection/DefaultConnectionPoolTest.java b/driver-core/src/test/functional/com/mongodb/internal/connection/DefaultConnectionPoolTest.java new file mode 100644 index 00000000000..fc5926b3bad --- /dev/null +++ b/driver-core/src/test/functional/com/mongodb/internal/connection/DefaultConnectionPoolTest.java @@ -0,0 +1,779 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection; + +import com.mongodb.MongoConnectionPoolClearedException; +import com.mongodb.MongoServerUnavailableException; +import com.mongodb.ServerAddress; +import com.mongodb.client.syncadapter.SupplyingCallback; +import com.mongodb.connection.ClusterId; +import com.mongodb.connection.ConnectionDescription; +import com.mongodb.connection.ConnectionId; +import com.mongodb.connection.ConnectionPoolSettings; +import com.mongodb.connection.ServerId; +import com.mongodb.event.ConnectionCreatedEvent; +import com.mongodb.internal.TimeoutSettings; +import com.mongodb.internal.async.SingleResultCallback; +import com.mongodb.internal.inject.EmptyProvider; +import com.mongodb.internal.inject.OptionalProvider; +import com.mongodb.internal.inject.SameObjectProvider; +import com.mongodb.internal.time.TimePointTest; +import com.mongodb.internal.time.Timeout; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.Arguments; +import org.junit.jupiter.params.provider.MethodSource; + +import java.time.Duration; +import java.util.ArrayList; +import java.util.Collection; +import java.util.HashSet; +import java.util.List; +import java.util.Set; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.Future; +import java.util.concurrent.ThreadLocalRandom; +import java.util.concurrent.TimeoutException; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.locks.Condition; +import java.util.concurrent.locks.Lock; +import java.util.concurrent.locks.ReentrantLock; +import java.util.stream.Stream; + +import static com.mongodb.ClusterFixture.OPERATION_CONTEXT; +import static com.mongodb.ClusterFixture.OPERATION_CONTEXT_FACTORY; +import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS; +import static com.mongodb.ClusterFixture.createOperationContext; +import static com.mongodb.internal.time.Timeout.ZeroSemantics.ZERO_DURATION_MEANS_EXPIRED; +import static java.lang.Long.MAX_VALUE; +import static java.lang.Thread.sleep; +import static java.util.concurrent.TimeUnit.MILLISECONDS; +import static java.util.concurrent.TimeUnit.MINUTES; +import static java.util.concurrent.TimeUnit.NANOSECONDS; +import static java.util.concurrent.TimeUnit.SECONDS; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; +import static org.mockito.Mockito.withSettings; + +/** + * These tests are racy, so doing them in Java instead of Groovy to reduce chance of failure. + */ +public class DefaultConnectionPoolTest { + private static final ServerId SERVER_ID = new ServerId(new ClusterId(), new ServerAddress()); + private static final long TEST_WAIT_TIMEOUT_MILLIS = SECONDS.toMillis(5); + private static final int DEFAULT_MAX_CONNECTING = ConnectionPoolSettings.builder().build().getMaxConnecting(); + + private TestInternalConnectionFactory connectionFactory; + + private DefaultConnectionPool provider; + private ExecutorService cachedExecutor; + + @BeforeEach + public void setUp() { + connectionFactory = new TestInternalConnectionFactory(); + cachedExecutor = Executors.newCachedThreadPool(); + } + + @AfterEach + @SuppressWarnings("try") + public void cleanup() throws InterruptedException { + //noinspection unused + try (DefaultConnectionPool closed = provider) { + cachedExecutor.shutdownNow(); + //noinspection ResultOfMethodCallIgnored + cachedExecutor.awaitTermination(MAX_VALUE, NANOSECONDS); + } + } + + @Test + public void shouldThrowOnTimeout() throws InterruptedException { + // given + provider = new DefaultConnectionPool(SERVER_ID, connectionFactory, + ConnectionPoolSettings.builder() + .maxSize(1) + .build(), + mockSdamProvider(), OPERATION_CONTEXT_FACTORY); + provider.ready(); + TimeoutSettings timeoutSettings = TIMEOUT_SETTINGS.withMaxWaitTimeMS(50); + provider.get(createOperationContext(timeoutSettings)); + + // when + TimeoutTrackingConnectionGetter connectionGetter = new TimeoutTrackingConnectionGetter(provider, timeoutSettings); + cachedExecutor.submit(connectionGetter); + + connectionGetter.getLatch().await(); + + // then + assertTrue(connectionGetter.isGotTimeout()); + } + + @Test + public void shouldNotUseMaxAwaitTimeMSWhenTimeoutMsIsSet() throws InterruptedException { + // given + provider = new DefaultConnectionPool(SERVER_ID, connectionFactory, + ConnectionPoolSettings.builder() + .maxSize(1) + .build(), + mockSdamProvider(), OPERATION_CONTEXT_FACTORY); + provider.ready(); + TimeoutSettings timeoutSettings = TIMEOUT_SETTINGS + .withTimeout(100L, MILLISECONDS) + .withMaxWaitTimeMS(50); + + InternalConnection internalConnection = provider.get(createOperationContext(timeoutSettings)); + + // when + TimeoutTrackingConnectionGetter connectionGetter = new TimeoutTrackingConnectionGetter(provider, timeoutSettings); + cachedExecutor.submit(connectionGetter); + + sleep(70); // wait for more than maxWaitTimeMS but less than timeoutMs. + internalConnection.close(); + connectionGetter.getLatch().await(); + + // then + assertFalse(connectionGetter.isGotTimeout()); + } + + @Test + public void shouldThrowOnPoolClosed() { + provider = new DefaultConnectionPool(SERVER_ID, connectionFactory, + ConnectionPoolSettings.builder() + .maxSize(1) + .build(), + mockSdamProvider(), OPERATION_CONTEXT_FACTORY); + provider.close(); + + String expectedExceptionMessage = "The server at 127.0.0.1:27017 is no longer available"; + MongoServerUnavailableException exception; + exception = assertThrows(MongoServerUnavailableException.class, () -> provider.get(OPERATION_CONTEXT)); + assertEquals(expectedExceptionMessage, exception.getMessage()); + SupplyingCallback supplyingCallback = new SupplyingCallback<>(); + provider.getAsync(createOperationContext(TIMEOUT_SETTINGS.withMaxWaitTimeMS(50)), supplyingCallback); + exception = assertThrows(MongoServerUnavailableException.class, supplyingCallback::get); + assertEquals(expectedExceptionMessage, exception.getMessage()); + } + + @Test + public void shouldExpireConnectionAfterMaxLifeTime() throws InterruptedException { + // given + provider = new DefaultConnectionPool(SERVER_ID, connectionFactory, + ConnectionPoolSettings.builder() + .maxSize(1) + .maintenanceInitialDelay(5, MINUTES) + .maxConnectionLifeTime(50, MILLISECONDS) + .build(), + mockSdamProvider(), OPERATION_CONTEXT_FACTORY); + provider.ready(); + + // when + provider.get(OPERATION_CONTEXT).close(); + sleep(100); + provider.doMaintenance(); + provider.get(OPERATION_CONTEXT); + + // then + assertTrue(connectionFactory.getNumCreatedConnections() >= 2); // should really be two, but it's racy + } + + @Test + public void shouldExpireConnectionAfterLifeTimeOnClose() throws InterruptedException { + // given + provider = new DefaultConnectionPool(SERVER_ID, + connectionFactory, + ConnectionPoolSettings.builder() + .maxSize(1) + .maxConnectionLifeTime(20, MILLISECONDS).build(), + mockSdamProvider(), OPERATION_CONTEXT_FACTORY); + provider.ready(); + + // when + InternalConnection connection = provider.get(OPERATION_CONTEXT); + sleep(50); + connection.close(); + + // then + assertTrue(connectionFactory.getCreatedConnections().get(0).isClosed()); + } + + @Test + public void shouldExpireConnectionAfterMaxIdleTime() throws InterruptedException { + // given + provider = new DefaultConnectionPool(SERVER_ID, + connectionFactory, + ConnectionPoolSettings.builder() + .maxSize(1) + .maintenanceInitialDelay(5, MINUTES) + .maxConnectionIdleTime(50, MILLISECONDS).build(), + mockSdamProvider(), OPERATION_CONTEXT_FACTORY); + provider.ready(); + + // when + provider.get(OPERATION_CONTEXT).close(); + sleep(100); + provider.doMaintenance(); + provider.get(OPERATION_CONTEXT); + + // then + assertTrue(connectionFactory.getNumCreatedConnections() >= 2); // should really be two, but it's racy + } + + @Test + public void shouldCloseConnectionAfterExpiration() throws InterruptedException { + // given + provider = new DefaultConnectionPool(SERVER_ID, + connectionFactory, + ConnectionPoolSettings.builder() + .maxSize(1) + .maintenanceInitialDelay(5, MINUTES) + .maxConnectionLifeTime(20, MILLISECONDS).build(), + mockSdamProvider(), OPERATION_CONTEXT_FACTORY); + provider.ready(); + + // when + provider.get(OPERATION_CONTEXT).close(); + sleep(50); + provider.doMaintenance(); + provider.get(OPERATION_CONTEXT); + + // then + assertTrue(connectionFactory.getCreatedConnections().get(0).isClosed()); + } + + @Test + public void shouldCreateNewConnectionAfterExpiration() throws InterruptedException { + // given + provider = new DefaultConnectionPool(SERVER_ID, + connectionFactory, + ConnectionPoolSettings.builder() + .maxSize(1) + .maintenanceInitialDelay(5, MINUTES) + .maxConnectionLifeTime(20, MILLISECONDS).build(), + mockSdamProvider(), OPERATION_CONTEXT_FACTORY); + provider.ready(); + + // when + provider.get(OPERATION_CONTEXT).close(); + sleep(50); + provider.doMaintenance(); + InternalConnection secondConnection = provider.get(OPERATION_CONTEXT); + + // then + assertNotNull(secondConnection); + assertEquals(2, connectionFactory.getNumCreatedConnections()); + } + + @Test + public void shouldPruneAfterMaintenanceTaskRuns() throws InterruptedException { + // given + provider = new DefaultConnectionPool(SERVER_ID, + connectionFactory, + ConnectionPoolSettings.builder() + .maxSize(10) + .maxConnectionLifeTime(1, MILLISECONDS) + .maintenanceInitialDelay(5, MINUTES) + .build(), + mockSdamProvider(), OPERATION_CONTEXT_FACTORY); + provider.ready(); + provider.get(OPERATION_CONTEXT).close(); + + + // when + sleep(10); + provider.doMaintenance(); + + // then + assertTrue(connectionFactory.getCreatedConnections().get(0).isClosed()); + } + + @Test + void infiniteMaxSize() { + int defaultMaxSize = ConnectionPoolSettings.builder().build().getMaxSize(); + provider = new DefaultConnectionPool(SERVER_ID, connectionFactory, + ConnectionPoolSettings.builder().maxSize(0).build(), EmptyProvider.instance(), OPERATION_CONTEXT_FACTORY); + provider.ready(); + List connections = new ArrayList<>(); + try { + for (int i = 0; i < 2 * defaultMaxSize; i++) { + connections.add(provider.get(OPERATION_CONTEXT)); + } + } finally { + connections.forEach(connection -> { + try { + connection.close(); + } catch (Exception e) { + // ignore + } + }); + } + } + + @ParameterizedTest + @MethodSource("concurrentUsageArguments") + @Tag("Slow") + public void concurrentUsage(final int minSize, final int maxSize, final boolean limitConnectionLifeIdleTime, + final int concurrentUsersCount, + final boolean checkoutSync, final boolean checkoutAsync, + final float invalidateAndReadyProb, final float invalidateProb, final float readyProb) + throws InterruptedException { + ControllableConnectionFactory controllableConnFactory = newControllableConnectionFactory(cachedExecutor); + provider = new DefaultConnectionPool(SERVER_ID, controllableConnFactory.factory, + ConnectionPoolSettings.builder() + .minSize(minSize) + .maxSize(maxSize) + .maintenanceInitialDelay(0, NANOSECONDS) + .maintenanceFrequency(100, MILLISECONDS) + .maxConnectionLifeTime(limitConnectionLifeIdleTime ? 350 : 0, MILLISECONDS) + .maxConnectionIdleTime(limitConnectionLifeIdleTime ? 50 : 0, MILLISECONDS) + .build(), + mockSdamProvider(), OPERATION_CONTEXT_FACTORY); + provider.ready(); + assertUseConcurrently(provider, concurrentUsersCount, + checkoutSync, checkoutAsync, + invalidateAndReadyProb, invalidateProb, readyProb, + cachedExecutor, SECONDS.toNanos(10), TIMEOUT_SETTINGS.withMaxWaitTimeMS(TEST_WAIT_TIMEOUT_MILLIS)); + } + + private static Stream concurrentUsageArguments() { + return Stream.of(// variants marked with (*) have proved their usefulness by detecting bugs + Arguments.of(0, 1, true, 8, true, false, 0.02f, 0, 0), + Arguments.of(0, 1, false, 8, false, true, 0.02f, 0, 0), // (*) + Arguments.of(DEFAULT_MAX_CONNECTING, DEFAULT_MAX_CONNECTING, true, 8, true, true, 0, 0, 0), + Arguments.of(DEFAULT_MAX_CONNECTING + 1, DEFAULT_MAX_CONNECTING + 5, true, 2 * (DEFAULT_MAX_CONNECTING + 5), + true, true, 0.02f, 0, 0), + Arguments.of(DEFAULT_MAX_CONNECTING + 5, DEFAULT_MAX_CONNECTING + 5, false, 2 * (DEFAULT_MAX_CONNECTING + 5), + true, true, 0.02f, 0, 0), // (*) + Arguments.of(DEFAULT_MAX_CONNECTING + 1, DEFAULT_MAX_CONNECTING + 5, false, 2 * (DEFAULT_MAX_CONNECTING + 5), + true, true, 0.3f, 0.1f, 0.1f), + Arguments.of(DEFAULT_MAX_CONNECTING + 1, DEFAULT_MAX_CONNECTING + 5, true, 2 * (DEFAULT_MAX_CONNECTING + 5), + true, true, 0, 0.5f, 0.05f)); + } + + @Test + @Tag("Slow") + public void callbackShouldNotBlockCheckoutIfOpenAsyncWorksNotInCurrentThread() throws InterruptedException, TimeoutException { + int maxAvailableConnections = 7; + ControllableConnectionFactory controllableConnFactory = newControllableConnectionFactory(cachedExecutor); + TestConnectionPoolListener listener = new TestConnectionPoolListener(); + provider = new DefaultConnectionPool(SERVER_ID, controllableConnFactory.factory, + ConnectionPoolSettings.builder() + .maxSize(DEFAULT_MAX_CONNECTING + maxAvailableConnections) + .addConnectionPoolListener(listener) + .maintenanceInitialDelay(MAX_VALUE, NANOSECONDS) + .build(), + mockSdamProvider(), OPERATION_CONTEXT_FACTORY); + provider.ready(); + TimeoutSettings timeoutSettings = TIMEOUT_SETTINGS.withMaxWaitTimeMS(TEST_WAIT_TIMEOUT_MILLIS); + acquireOpenPermits(provider, DEFAULT_MAX_CONNECTING, InfiniteCheckoutEmulation.INFINITE_CALLBACK, + controllableConnFactory, listener, timeoutSettings); + assertUseConcurrently(provider, 2 * maxAvailableConnections, + true, true, + 0.02f, 0, 0, + cachedExecutor, SECONDS.toNanos(10), timeoutSettings); + } + + /** + * The idea of this test is as follows: + *

    + *
  1. Check out some connections from the pool + * ({@link #DEFAULT_MAX_CONNECTING} connections must not be checked out to make the next step possible).
  2. + *
  3. Acquire all permits to open a connection and leave them acquired.
  4. + *
  5. Check in the checked out connections and concurrently check them out again.
  6. + *
+ * If the hand-over mechanism fails, then some checkouts may be infinitely stuck trying to open a connection: + * since there are no permits to open available, the hand-over mechanism is the only way to get a connection. + */ + @Test + @Tag("Slow") + public void checkoutHandOverMechanism() throws InterruptedException, TimeoutException { + int openConnectionsCount = 5_000; + int maxConcurrentlyHandedOver = 7; + ControllableConnectionFactory controllableConnFactory = newControllableConnectionFactory(cachedExecutor); + TestConnectionPoolListener listener = new TestConnectionPoolListener(); + provider = new DefaultConnectionPool(SERVER_ID, controllableConnFactory.factory, + ConnectionPoolSettings.builder() + .maxSize(DEFAULT_MAX_CONNECTING + + openConnectionsCount + /* This wiggle room is needed to open opportunities to create new connections from the standpoint of + * the max pool size, and then check that no connections were created nonetheless. */ + + maxConcurrentlyHandedOver) + .addConnectionPoolListener(listener) + .maintenanceInitialDelay(MAX_VALUE, NANOSECONDS) + .build(), + mockSdamProvider(), OPERATION_CONTEXT_FACTORY); + provider.ready(); + List connections = new ArrayList<>(); + for (int i = 0; i < openConnectionsCount; i++) { + connections.add(provider.get(createOperationContext(TIMEOUT_SETTINGS.withMaxWaitTimeMS(0)))); + } + TimeoutSettings timeoutSettings = TIMEOUT_SETTINGS.withMaxWaitTimeMS(TEST_WAIT_TIMEOUT_MILLIS); + acquireOpenPermits(provider, DEFAULT_MAX_CONNECTING, InfiniteCheckoutEmulation.INFINITE_OPEN, controllableConnFactory, listener, + timeoutSettings); + int previousIdx = 0; + // concurrently check in / check out and assert the hand-over mechanism works + for (int idx = 0; idx < connections.size(); idx += maxConcurrentlyHandedOver) { + Collection> handedOverFutures = new ArrayList<>(); + Collection> receivedFutures = new ArrayList<>(); + while (previousIdx < idx) { + int currentIdx = previousIdx; + previousIdx++; + Runnable checkIn = () -> handedOverFutures.add(cachedExecutor.submit(() -> { + InternalConnection connection = connections.get(currentIdx); + ConnectionId connectionId = connection.getDescription().getConnectionId(); + connections.get(currentIdx).close(); + return connectionId; + })); + Runnable checkOut = () -> receivedFutures.add(cachedExecutor.submit(() -> { + InternalConnection connection = + provider.get(createOperationContext(timeoutSettings)); + return connection.getDescription().getConnectionId(); + })); + if (ThreadLocalRandom.current().nextBoolean()) { + checkIn.run(); + checkOut.run(); + } else { + checkOut.run(); + checkIn.run(); + } + } + try { + Set handedOver = new HashSet<>(); + Set received = new HashSet<>(); + for (Future handedOverFuture : handedOverFutures) { + handedOver.add(handedOverFuture.get(TEST_WAIT_TIMEOUT_MILLIS, MILLISECONDS)); + } + for (Future receivedFuture : receivedFutures) { + received.add(receivedFuture.get(TEST_WAIT_TIMEOUT_MILLIS, MILLISECONDS)); + } + assertEquals(handedOver, received); + } catch (TimeoutException | ExecutionException e) { + throw new AssertionError(e); + } + } + } + + @Test + public void readyAfterCloseMustNotThrow() { + provider = new DefaultConnectionPool( + SERVER_ID, + connectionFactory, + ConnectionPoolSettings.builder().maxSize(1).build(), + mockSdamProvider(), OPERATION_CONTEXT_FACTORY); + provider.close(); + provider.ready(); + } + + @Test + public void invalidateAfterCloseMustNotThrow() { + provider = new DefaultConnectionPool( + SERVER_ID, + connectionFactory, + ConnectionPoolSettings.builder().maxSize(1).build(), + mockSdamProvider(), OPERATION_CONTEXT_FACTORY); + provider.ready(); + provider.close(); + provider.invalidate(null); + } + + @Test + public void readyInvalidateConcurrentWithCloseMustNotThrow() throws ExecutionException, InterruptedException { + Future readyAndInvalidateResult = null; + for (int i = 0; i < 3_000; i++) { + provider = new DefaultConnectionPool( + SERVER_ID, + connectionFactory, + ConnectionPoolSettings.builder().maxSize(1).build(), + mockSdamProvider(), OPERATION_CONTEXT_FACTORY); + try { + readyAndInvalidateResult = cachedExecutor.submit(() -> { + provider.ready(); + provider.invalidate(null); + }); + } finally { + provider.close(); + if (readyAndInvalidateResult != null) { + readyAndInvalidateResult.get(); + } + } + } + } + + private static void assertUseConcurrently(final DefaultConnectionPool pool, final int concurrentUsersCount, + final boolean sync, final boolean async, + final float invalidateAndReadyProb, final float invalidateProb, final float readyProb, + final ExecutorService executor, final long durationNanos, + final TimeoutSettings timeoutSettings) throws InterruptedException { + try { + useConcurrently(pool, concurrentUsersCount, + sync, async, + invalidateAndReadyProb, invalidateProb, readyProb, + executor, durationNanos, timeoutSettings); + } catch (TimeoutException | ExecutionException e) { + throw new AssertionError(e); + } + } + + private static void useConcurrently(final DefaultConnectionPool pool, final int concurrentUsersCount, + final boolean checkoutSync, final boolean checkoutAsync, + final float invalidateAndReadyProb, final float invalidateProb, final float readyProb, + final ExecutorService executor, final long durationNanos, + final TimeoutSettings timeoutSettings) + throws ExecutionException, InterruptedException, TimeoutException { + assertTrue(invalidateAndReadyProb >= 0 && invalidateAndReadyProb <= 1); + Runnable spontaneouslyInvalidateReady = () -> { + if (ThreadLocalRandom.current().nextFloat() < invalidateAndReadyProb) { + pool.invalidate(null); + pool.ready(); + } + if (ThreadLocalRandom.current().nextFloat() < invalidateProb) { + pool.invalidate(null); + } + if (ThreadLocalRandom.current().nextFloat() < readyProb) { + pool.ready(); + } + }; + Collection> tasks = new ArrayList<>(); + Timeout timeout = Timeout.expiresIn(durationNanos, NANOSECONDS, ZERO_DURATION_MEANS_EXPIRED); + for (int i = 0; i < concurrentUsersCount; i++) { + if ((checkoutSync && checkoutAsync) ? i % 2 == 0 : checkoutSync) {//check out synchronously and check in + tasks.add(executor.submit(() -> { + while (!Thread.currentThread().isInterrupted()) { + if (timeout.call(NANOSECONDS, () -> false, (ns) -> false, () -> true)) { + break; + } + spontaneouslyInvalidateReady.run(); + InternalConnection conn = null; + try { + conn = pool.get(createOperationContext(timeoutSettings)); + } catch (MongoConnectionPoolClearedException e) { + // expected because we spontaneously invalidate `pool` + } finally { + if (conn != null) { + conn.close(); + } + } + } + })); + } else if (checkoutAsync) {//check out asynchronously and check in + tasks.add(executor.submit(() -> { + while (!Thread.currentThread().isInterrupted()) { + if (TimePointTest.hasExpired(timeout)) { + break; + } + spontaneouslyInvalidateReady.run(); + CompletableFuture futureCheckOutCheckIn = new CompletableFuture<>(); + pool.getAsync(createOperationContext(timeoutSettings), (conn, t) -> { + if (t != null) { + if (t instanceof MongoConnectionPoolClearedException) { + futureCheckOutCheckIn.complete(null); // expected because we spontaneously invalidate `pool` + } else { + futureCheckOutCheckIn.completeExceptionally(t); + } + } else { + conn.close(); + futureCheckOutCheckIn.complete(null); + } + }); + try { + futureCheckOutCheckIn.get(TEST_WAIT_TIMEOUT_MILLIS, MILLISECONDS); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + return; + } catch (ExecutionException | TimeoutException e) { + throw new AssertionError(e); + } + } + })); + } + } + for (Future task : tasks) { + task.get(durationNanos + MILLISECONDS.toNanos(TEST_WAIT_TIMEOUT_MILLIS), NANOSECONDS); + } + } + + /** + * Returns early if {@linkplain Thread#interrupt() interrupted}. + */ + private static void sleepMillis(final long millis) { + try { + sleep(millis); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + } + } + + /** + * This method starts asynchronously checking out {@code openPermitsCount} connections in such a way that checkout never completes. + * This results in acquiring permits to open a connection and leaving them acquired. + */ + private static void acquireOpenPermits(final DefaultConnectionPool pool, final int openPermitsCount, + final InfiniteCheckoutEmulation infiniteEmulation, + final ControllableConnectionFactory controllableConnFactory, + final TestConnectionPoolListener listener, + final TimeoutSettings timeoutSettings) throws TimeoutException, InterruptedException { + assertTrue(openPermitsCount <= DEFAULT_MAX_CONNECTING); + int initialCreatedEventCount = listener.countEvents(ConnectionCreatedEvent.class); + switch (infiniteEmulation) { + case INFINITE_CALLBACK: { + for (int i = 0; i < openPermitsCount; i++) { + SingleResultCallback infiniteCallback = (result, t) -> sleepMillis(MAX_VALUE); + pool.getAsync(createOperationContext(timeoutSettings), infiniteCallback); + } + break; + } + case INFINITE_OPEN: { + controllableConnFactory.openDurationHandle.set(Duration.ofMillis(MAX_VALUE), openPermitsCount); + for (int i = 0; i < openPermitsCount; i++) { + pool.getAsync(createOperationContext(timeoutSettings), (result, t) -> {}); + } + controllableConnFactory.openDurationHandle.await(Duration.ofMillis(TEST_WAIT_TIMEOUT_MILLIS)); + break; + } + default: { + fail(); + } + } + listener.waitForEvent(//wait until openPermitsCount are guaranteed to be acquired + ConnectionCreatedEvent.class, initialCreatedEventCount + openPermitsCount, TEST_WAIT_TIMEOUT_MILLIS, MILLISECONDS); + } + + private static ControllableConnectionFactory newControllableConnectionFactory(final ExecutorService asyncOpenExecutor) { + ControllableConnectionFactory.OpenDurationHandle openDurationHandle = new ControllableConnectionFactory.OpenDurationHandle(); + InternalConnectionFactory connectionFactory = (serverId, connectionGenerationSupplier) -> { + InternalConnection connection = mock(InternalConnection.class, withSettings().stubOnly()); + when(connection.getGeneration()).thenReturn(connectionGenerationSupplier.getGeneration()); + when(connection.getDescription()).thenReturn(new ConnectionDescription(serverId)); + AtomicBoolean open = new AtomicBoolean(false); + when(connection.opened()).thenAnswer(invocation -> open.get()); + Runnable doOpen = () -> { + sleepMillis(openDurationHandle.getDurationAndCountDown().toMillis()); + if (ThreadLocalRandom.current().nextFloat() < 0.2) { // add a bit more randomness + sleepMillis(ThreadLocalRandom.current().nextInt(7, 15)); + } + open.set(true); + }; + doAnswer(invocation -> { + doOpen.run(); + return null; + }).when(connection).open(any()); + doAnswer(invocation -> { + SingleResultCallback callback = invocation.getArgument(1, SingleResultCallback.class); + asyncOpenExecutor.execute(() -> { + doOpen.run(); + callback.onResult(null, null); + }); + return null; + }).when(connection).openAsync(any(), any()); + return connection; + }; + return new ControllableConnectionFactory(connectionFactory, openDurationHandle); + } + + private OptionalProvider mockSdamProvider() { + return SameObjectProvider.initialized(mock(SdamServerDescriptionManager.class)); + } + + private static class ControllableConnectionFactory { + private final InternalConnectionFactory factory; + private final OpenDurationHandle openDurationHandle; + + ControllableConnectionFactory(final InternalConnectionFactory factory, final OpenDurationHandle openDurationHandle) { + this.factory = factory; + this.openDurationHandle = openDurationHandle; + } + + static final class OpenDurationHandle { + private Duration duration; + private long count; + private final Lock lock; + private final Condition countIsZeroCondition; + + private OpenDurationHandle() { + duration = Duration.ZERO; + lock = new ReentrantLock(); + countIsZeroCondition = lock.newCondition(); + } + + /** + * Sets the specified {@code duration} for the next {@code count} connections. + */ + void set(final Duration duration, final long count) { + lock.lock(); + try { + assertEquals(this.count, 0); + if (count > 0) { + this.duration = duration; + this.count = count; + } + } finally { + lock.unlock(); + } + } + + private Duration getDurationAndCountDown() { + lock.lock(); + try { + Duration result = duration; + if (count > 0) { + count--; + if (count == 0) { + duration = Duration.ZERO; + countIsZeroCondition.signalAll(); + } + } + return result; + } finally { + lock.unlock(); + } + } + + /** + * Wait until {@link #factory} has started opening as many connections as were specified via {@link #set(Duration, long)}. + */ + void await(final Duration timeout) throws InterruptedException { + long remainingNanos = timeout.toNanos(); + lock.lock(); + try { + while (count > 0) { + assertTrue(remainingNanos > 0, "Timed out after " + timeout); + remainingNanos = countIsZeroCondition.awaitNanos(remainingNanos); + } + } finally { + lock.unlock(); + } + } + } + } + + private enum InfiniteCheckoutEmulation { + INFINITE_OPEN, + INFINITE_CALLBACK + } +} diff --git a/driver-core/src/test/functional/com/mongodb/internal/connection/DomainNameUtilsTest.java b/driver-core/src/test/functional/com/mongodb/internal/connection/DomainNameUtilsTest.java new file mode 100644 index 00000000000..43abfa5b9f7 --- /dev/null +++ b/driver-core/src/test/functional/com/mongodb/internal/connection/DomainNameUtilsTest.java @@ -0,0 +1,76 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection; + +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.ValueSource; + +import static com.mongodb.internal.connection.DomainNameUtils.isDomainName; + +class DomainNameUtilsTest { + + @ParameterizedTest + @ValueSource(strings = { + "hyphen-domain.com", + "sub.domain.com", + "sub.domain.c.com.com", + "123numbers.com", + "mixed-123domain.net", + "longdomainnameabcdefghijk.com", + "i-0123456789abcdef.ec2.internal", + "ip-10-24-34-0.ec2.internal", + "xn--frosch-6ya.com", + "xn--emoji-grinning-3s0b.org", + "xn--bcher-kva.ch", + "localhost", + "abcdefghijklmnopqrstuvwxyz0123456789-abcdefghijklmnopqrstuvwxyz.com", //63 characters label name. + "a.abcdefghijklmnopqrstuvwxyzabcdefghjklabcdefghijklmnopqrstuvwxyz", //63 characters TLD. + "xn--weihnachten-uzb.org", + "sub.domain.com.sub.domain.com.sub.domain.com.sub.domain.com.sub.domain.com.sub.domain.com.sub.domain." + + "com.sub.domain.com.sub.domain.com.sub.domain.com.sub.domain.com.sub.domain.com.sub.domain.com.sub.domain.com.sub.domain." + + "com.domain.com.sub.domain.subb.com" //255 characters + }) + void shouldReturnTrueWithValidHostName(final String hostname) { + Assertions.assertTrue(isDomainName(hostname), hostname + " is not a valid domain name"); + } + + @ParameterizedTest + @ValueSource(strings = { + "xn--frosch-6ya.w23", + "-special_chars_$$.net", + "special_chars_$$.net", + "special_chars_$$.123", + "subdomain..domain.com", + "_subdomain..domain.com", + "subdomain..domain._com", + "subdomain..domain.com_", + "notlocalhost", + "домен.com", //NON-ASCII + "ẞẞ.com", //NON-ASCII + "abcdefghijklmnopqrstuvwxyz0123456789-abcdefghijklmnopqrstuvwxyzl.com", //64 characters label name. + "a.abcdefghijklmnopqrstuvwxyzabcdefghjklabcdefghijklmnopqrstuvwxyza", //64 characters TLD. + "this-domain-is-really-long-because-it-just-keeps-going-and-going-and-its-still-not-done-yet-because-theres-more.net", + "verylongsubdomainnamethatisreallylongandmaycausetroubleforparsing.example", + "sub.domain.com.sub.domain.com.sub.domain.com.sub.domain.com.sub.domain.com.sub.domain.com.sub.domain." + + "com.sub.domain.com.sub.domain.com.sub.domain.com.sub.domain.com.sub.domain.com.sub.domain.com.sub.domain." + + "com.sub.domain.com.domain.com.sub.domain.subbb.com" //256 characters + }) + void shouldReturnFalseWithInvalidHostName(final String hostname) { + Assertions.assertFalse(isDomainName(hostname)); + } +} diff --git a/driver-core/src/test/functional/com/mongodb/internal/connection/FaasEnvironmentAccessor.java b/driver-core/src/test/functional/com/mongodb/internal/connection/FaasEnvironmentAccessor.java new file mode 100644 index 00000000000..ccc71f718ba --- /dev/null +++ b/driver-core/src/test/functional/com/mongodb/internal/connection/FaasEnvironmentAccessor.java @@ -0,0 +1,31 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection; + +import java.util.Map; + +/** + * In the same package as FaasEnvironment, to access package-private + */ +public final class FaasEnvironmentAccessor { + private FaasEnvironmentAccessor() { + } + + public static Map getFaasEnvMap() { + return FaasEnvironment.ENV_OVERRIDES_FOR_TESTING; + } +} diff --git a/driver-core/src/test/functional/com/mongodb/internal/connection/GSSAPIAuthenticationSpecification.groovy b/driver-core/src/test/functional/com/mongodb/internal/connection/GSSAPIAuthenticationSpecification.groovy new file mode 100644 index 00000000000..cc3e0401bb5 --- /dev/null +++ b/driver-core/src/test/functional/com/mongodb/internal/connection/GSSAPIAuthenticationSpecification.groovy @@ -0,0 +1,230 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection + +import com.mongodb.ClusterFixture +import com.mongodb.KerberosSubjectProvider +import com.mongodb.MongoCommandException +import com.mongodb.MongoCredential +import com.mongodb.MongoSecurityException +import com.mongodb.ServerAddress +import com.mongodb.async.FutureResultCallback +import com.mongodb.connection.ClusterId +import com.mongodb.connection.ServerId +import com.mongodb.connection.SocketSettings +import com.mongodb.internal.connection.netty.NettyStreamFactory +import org.bson.BsonDocument +import org.bson.BsonString +import spock.lang.IgnoreIf +import spock.lang.Specification + +import javax.security.auth.Subject +import javax.security.auth.login.LoginContext + +import static com.mongodb.AuthenticationMechanism.GSSAPI +import static com.mongodb.ClusterFixture.OPERATION_CONTEXT +import static com.mongodb.ClusterFixture.getClusterConnectionMode +import static com.mongodb.ClusterFixture.getConnectionString +import static com.mongodb.ClusterFixture.getCredential +import static com.mongodb.ClusterFixture.getLoginContextName +import static com.mongodb.ClusterFixture.getSslSettings +import static com.mongodb.MongoCredential.JAVA_SUBJECT_PROVIDER_KEY +import static com.mongodb.MongoCredential.createGSSAPICredential +import static com.mongodb.connection.ClusterConnectionMode.SINGLE +import static com.mongodb.internal.connection.CommandHelper.executeCommand +import static java.util.concurrent.TimeUnit.SECONDS + +@IgnoreIf({ getCredential() == null || getCredential().getAuthenticationMechanism() != GSSAPI }) +class GSSAPIAuthenticationSpecification extends Specification { + + def 'should not authorize when not authenticated'() { + given: + def connection = createConnection(async, null) + + when: + openConnection(connection, async) + executeCommand(getConnectionString().getDatabase(), new BsonDocument('count', new BsonString('test')), + getClusterConnectionMode(), null, connection, OPERATION_CONTEXT) + + then: + thrown(MongoCommandException) + + cleanup: + connection?.close() + + where: + async << [true, false] + } + + def 'should authorize when successfully authenticated'() { + given: + def connection = createConnection(async, credentials) + + when: + openConnection(connection, async) + executeCommand(getConnectionString().getDatabase(), new BsonDocument('count', new BsonString('test')), + getClusterConnectionMode(), null, connection, OPERATION_CONTEXT) + + then: + true + + cleanup: + connection?.close() + + where: + [async, credentials] << [ + [true, false], + [getMongoCredential(), getMongoCredential().withMechanismProperty(JAVA_SUBJECT_PROVIDER_KEY, new KerberosSubjectProvider())] + ].combinations() + } + + def 'should throw MongoSecurityException when authentication fails'() { + given: + def connection = createConnection(async, credentials) + + when: + openConnection(connection, async) + executeCommand(getConnectionString().getDatabase(), new BsonDocument('count', new BsonString('test')), + getClusterConnectionMode(), null, connection, OPERATION_CONTEXT) + + then: + thrown(MongoSecurityException) + + cleanup: + connection?.close() + + where: + [async, credentials] << [ + [true, false], + [createGSSAPICredential('wrongUserName'), + createGSSAPICredential('wrongUserName') + .withMechanismProperty(JAVA_SUBJECT_PROVIDER_KEY, new KerberosSubjectProvider())] + ].combinations() + } + + def 'should authorize when successfully authenticated with Subject property'() { + when: + def loginContext = new LoginContext(getLoginContextName()) + loginContext.login() + def subject = loginContext.getSubject() + + then: + subject != null + subject.getPrincipals().size() == 1 + getMongoCredential().getUserName() == subject.getPrincipals().iterator().next().getName() + + when: + def connection = createConnection(async, getMongoCredential(subject)) + openConnection(connection, async) + executeCommand(getConnectionString().getDatabase(), new BsonDocument('count', new BsonString('test')), + getClusterConnectionMode(), null, connection, OPERATION_CONTEXT) + + then: + true + + cleanup: + connection?.close() + + where: + async << [true, false] + } + + def 'should throw MongoSecurityException when authentication fails with Subject property'() { + when: + LoginContext context = new LoginContext(getLoginContextName()) + context.login() + + Subject subject = context.getSubject() + + then: + subject != null + + when: + def connection = createConnection(async, getMongoCredential(createGSSAPICredential('wrongUserName'), subject)) + openConnection(connection, async) + + then: + thrown(MongoSecurityException) + + cleanup: + connection?.close() + + where: + async << [true, false] + } + + def 'should authorize when successfully authenticated with SaslClient properties'() { + given: + Map saslClientProperties = [:] + + when: + def connection = createConnection(async, getMongoCredential(saslClientProperties)) + openConnection(connection, async) + executeCommand(getConnectionString().getDatabase(), new BsonDocument('count', new BsonString('test')), + getClusterConnectionMode(), null, connection, OPERATION_CONTEXT) + + then: + true + + cleanup: + connection?.close() + + where: + async << [true, false] + } + + private static MongoCredential getMongoCredential(final Map saslClientProperties) { + getMongoCredential().withMechanismProperty(MongoCredential.JAVA_SASL_CLIENT_PROPERTIES_KEY, saslClientProperties) + } + + private static MongoCredential getMongoCredential(final Subject subject) { + getMongoCredential(getMongoCredential(), subject) + } + + private static MongoCredential getMongoCredential(final MongoCredential mongoCredential, final Subject subject) { + mongoCredential.withMechanismProperty(MongoCredential.JAVA_SUBJECT_KEY, subject) + } + + private static MongoCredential getMongoCredential() { + getCredential() + } + + private static InternalStreamConnection createConnection(final boolean async, final MongoCredential credential) { + new InternalStreamConnection(SINGLE, new ServerId(new ClusterId(), new ServerAddress(getConnectionString().getHosts().get(0))), + new TestConnectionGenerationSupplier(), + async ? new NettyStreamFactory(SocketSettings.builder().build(), getSslSettings()) + : new SocketStreamFactory(new DefaultInetAddressResolver(), SocketSettings.builder().build(), getSslSettings()), + [], null, new InternalStreamConnectionInitializer(SINGLE, createAuthenticator(credential), null, [], null) + ) + } + + private static Authenticator createAuthenticator(final MongoCredential credential) { + credential == null ? null : new GSSAPIAuthenticator(new MongoCredentialWithCache(credential), getClusterConnectionMode(), null) + } + + private static void openConnection(final InternalConnection connection, final boolean async) { + if (async) { + FutureResultCallback futureResultCallback = new FutureResultCallback() + connection.openAsync(OPERATION_CONTEXT, futureResultCallback) + futureResultCallback.get(ClusterFixture.TIMEOUT, SECONDS) + } else { + connection.open(OPERATION_CONTEXT) + } + } +} + + diff --git a/driver-core/src/test/functional/com/mongodb/internal/connection/GSSAPIAuthenticatorSpecification.groovy b/driver-core/src/test/functional/com/mongodb/internal/connection/GSSAPIAuthenticatorSpecification.groovy new file mode 100644 index 00000000000..223698d561c --- /dev/null +++ b/driver-core/src/test/functional/com/mongodb/internal/connection/GSSAPIAuthenticatorSpecification.groovy @@ -0,0 +1,65 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection + +import com.mongodb.ClusterFixture +import com.mongodb.LoggerSettings +import com.mongodb.MongoCompressor +import com.mongodb.MongoDriverInformation +import com.mongodb.SubjectProvider +import com.mongodb.connection.ClusterId +import com.mongodb.connection.ServerId +import com.mongodb.connection.SocketSettings +import spock.lang.IgnoreIf +import spock.lang.Specification + +import javax.security.auth.login.LoginContext + +import static com.mongodb.AuthenticationMechanism.GSSAPI +import static com.mongodb.ClusterFixture.OPERATION_CONTEXT +import static com.mongodb.ClusterFixture.getLoginContextName +import static com.mongodb.ClusterFixture.getPrimary +import static com.mongodb.ClusterFixture.getServerApi +import static com.mongodb.ClusterFixture.getSslSettings +import static com.mongodb.MongoCredential.JAVA_SUBJECT_PROVIDER_KEY +import static com.mongodb.connection.ClusterConnectionMode.SINGLE + +@IgnoreIf({ ClusterFixture.getCredential() == null || ClusterFixture.getCredential().getAuthenticationMechanism() != GSSAPI }) +class GSSAPIAuthenticatorSpecification extends Specification { + + def 'should use subject provider mechanism property'() { + given: + def loginContext = new LoginContext(getLoginContextName()) + loginContext.login() + def subject = loginContext.getSubject() + def subjectProvider = Mock(SubjectProvider) + def credential = ClusterFixture.getCredential().withMechanismProperty(JAVA_SUBJECT_PROVIDER_KEY, subjectProvider) + def credentialWithCache = new MongoCredentialWithCache(credential) + def streamFactory = new SocketStreamFactory(new DefaultInetAddressResolver(), SocketSettings.builder().build(), getSslSettings()) + def internalConnection = new InternalStreamConnectionFactory( + SINGLE, streamFactory, + credentialWithCache, new ClientMetadata("test", MongoDriverInformation.builder().build()), + Collections. emptyList(), LoggerSettings.builder().build(), null, getServerApi()) + .create(new ServerId(new ClusterId(), getPrimary())) + + when: + internalConnection.open(OPERATION_CONTEXT) + + then: + 1 * subjectProvider.getSubject() >> subject + } +} diff --git a/driver-core/src/test/functional/com/mongodb/internal/connection/InetAddressUtilsTest.java b/driver-core/src/test/functional/com/mongodb/internal/connection/InetAddressUtilsTest.java new file mode 100644 index 00000000000..6d26166ee25 --- /dev/null +++ b/driver-core/src/test/functional/com/mongodb/internal/connection/InetAddressUtilsTest.java @@ -0,0 +1,240 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * Copyright (C) 2008 The Guava Authors + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. + */ +package com.mongodb.internal.connection; + + +import junit.framework.TestCase; + +import java.net.InetAddress; +import java.net.UnknownHostException; +import java.util.Arrays; +import java.util.HashSet; +import java.util.Set; + +/** + * Tests for {@link InetAddressUtils}. + */ +public class InetAddressUtilsTest extends TestCase { + public void testForStringBogusInput() { + Set bogusInputs = + toSet( + "", + "016.016.016.016", + "016.016.016", + "016.016", + "016", + "000.000.000.000", + "000", + "0x0a.0x0a.0x0a.0x0a", + "0x0a.0x0a.0x0a", + "0x0a.0x0a", + "0x0a", + "42.42.42.42.42", + "42.42.42", + "42.42", + "42", + "42..42.42", + "42..42.42.42", + "42.42.42.42.", + "42.42.42.42...", + ".42.42.42.42", + ".42.42.42", + "...42.42.42.42", + "42.42.42.-0", + "42.42.42.+0", + ".", + "...", + "bogus", + "bogus.com", + "192.168.0.1.com", + "12345.67899.-54321.-98765", + "257.0.0.0", + "42.42.42.-42", + "42.42.42.ab", + "3ffe::1.net", + "3ffe::1::1", + "1::2::3::4:5", + "::7:6:5:4:3:2:", // should end with ":0" + ":6:5:4:3:2:1::", // should begin with "0:" + "2001::db:::1", + "FEDC:9878", + "+1.+2.+3.4", + "1.2.3.4e0", + "6:5:4:3:2:1:0", // too few parts + "::7:6:5:4:3:2:1:0", // too many parts + "7:6:5:4:3:2:1:0::", // too many parts + "9:8:7:6:5:4:3::2:1", // too many parts + "0:1:2:3::4:5:6:7", // :: must remove at least one 0. + "3ffe:0:0:0:0:0:0:0:1", // too many parts (9 instead of 8) + "3ffe::10000", // hextet exceeds 16 bits + "3ffe::goog", + "3ffe::-0", + "3ffe::+0", + "3ffe::-1", + ":", + ":::", + "::1.2.3", + "::1.2.3.4.5", + "::1.2.3.4:", + "1.2.3.4::", + "2001:db8::1:", + ":2001:db8::1", + ":1:2:3:4:5:6:7", + "1:2:3:4:5:6:7:", + ":1:2:3:4:5:6:"); + + for (String bogusInput : bogusInputs) { + try { + InetAddressUtils.forString(bogusInput); + fail("IllegalArgumentException expected for '" + bogusInput + "'"); + } catch (IllegalArgumentException expected) { + } + assertFalse(InetAddressUtils.isInetAddress(bogusInput)); + } + } + + public void test3ff31() { + try { + InetAddressUtils.forString("3ffe:::1"); + fail("IllegalArgumentException expected"); + } catch (IllegalArgumentException expected) { + } + assertFalse(InetAddressUtils.isInetAddress("016.016.016.016")); + } + + public void testForStringIPv4Input() throws UnknownHostException { + String ipStr = "192.168.0.1"; + // Shouldn't hit DNS, because it's an IP string literal. + InetAddress ipv4Addr = InetAddress.getByName(ipStr); + assertEquals(ipv4Addr, InetAddressUtils.forString(ipStr)); + assertTrue(InetAddressUtils.isInetAddress(ipStr)); + } + + public void testForStringIPv4NonAsciiInput() throws UnknownHostException { + String ipStr = "૧૯૨.૧૬૮.૦.૧"; // 192.168.0.1 in Gujarati digits + // Shouldn't hit DNS, because it's an IP string literal. + InetAddress ipv4Addr; + try { + ipv4Addr = InetAddress.getByName(ipStr); + } catch (UnknownHostException e) { + // OK: this is probably Android, which is stricter. + return; + } + assertEquals(ipv4Addr, InetAddressUtils.forString(ipStr)); + assertTrue(InetAddressUtils.isInetAddress(ipStr)); + } + + public void testForStringIPv6Input() throws UnknownHostException { + String ipStr = "3ffe::1"; + // Shouldn't hit DNS, because it's an IP string literal. + InetAddress ipv6Addr = InetAddress.getByName(ipStr); + assertEquals(ipv6Addr, InetAddressUtils.forString(ipStr)); + assertTrue(InetAddressUtils.isInetAddress(ipStr)); + } + + public void testForStringIPv6NonAsciiInput() throws UnknownHostException { + String ipStr = "૩ffe::૧"; // 3ffe::1 with Gujarati digits for 3 and 1 + // Shouldn't hit DNS, because it's an IP string literal. + InetAddress ipv6Addr; + try { + ipv6Addr = InetAddress.getByName(ipStr); + } catch (UnknownHostException e) { + // OK: this is probably Android, which is stricter. + return; + } + assertEquals(ipv6Addr, InetAddressUtils.forString(ipStr)); + assertTrue(InetAddressUtils.isInetAddress(ipStr)); + } + + public void testForStringIPv6EightColons() throws UnknownHostException { + Set eightColons = + toSet("::7:6:5:4:3:2:1", "::7:6:5:4:3:2:0", "7:6:5:4:3:2:1::", "0:6:5:4:3:2:1::"); + + for (String ipString : eightColons) { + // Shouldn't hit DNS, because it's an IP string literal. + InetAddress ipv6Addr = InetAddress.getByName(ipString); + assertEquals(ipv6Addr, InetAddressUtils.forString(ipString)); + assertTrue(InetAddressUtils.isInetAddress(ipString)); + } + } + + public void testConvertDottedQuadToHex() throws UnknownHostException { + Set ipStrings = + toSet("7::0.128.0.127", "7::0.128.0.128", "7::128.128.0.127", "7::0.128.128.127"); + + for (String ipString : ipStrings) { + // Shouldn't hit DNS, because it's an IP string literal. + InetAddress ipv6Addr = InetAddress.getByName(ipString); + assertEquals(ipv6Addr, InetAddressUtils.forString(ipString)); + assertTrue(InetAddressUtils.isInetAddress(ipString)); + } + } + + // see https://github.com/google/guava/issues/2587 + private static final Set SCOPE_IDS = + toSet("eno1", "en1", "eth0", "X", "1", "2", "14", "20"); + + public void testIPv4AddressWithScopeId() { + Set ipStrings = toSet("1.2.3.4", "192.168.0.1"); + for (String ipString : ipStrings) { + for (String scopeId : SCOPE_IDS) { + String withScopeId = ipString + "%" + scopeId; + assertFalse( + "InetAddresses.isInetAddress(" + withScopeId + ") should be false but was true", + InetAddressUtils.isInetAddress(withScopeId)); + } + } + } + + private static Set toSet(final String... strings) { + return new HashSet<>(Arrays.asList(strings)); + } + + public void testDottedQuadAddressWithScopeId() { + Set ipStrings = + toSet("7::0.128.0.127", "7::0.128.0.128", "7::128.128.0.127", "7::0.128.128.127"); + for (String ipString : ipStrings) { + for (String scopeId : SCOPE_IDS) { + String withScopeId = ipString + "%" + scopeId; + assertFalse( + "InetAddresses.isInetAddress(" + withScopeId + ") should be false but was true", + InetAddressUtils.isInetAddress(withScopeId)); + } + } + } + + public void testIPv6AddressWithScopeId() { + Set ipStrings = + toSet( + "0:0:0:0:0:0:0:1", + "fe80::a", + "fe80::1", + "fe80::2", + "fe80::42", + "fe80::3dd0:7f8e:57b7:34d5", + "fe80::71a3:2b00:ddd3:753f", + "fe80::8b2:d61e:e5c:b333", + "fe80::b059:65f4:e877:c40"); + for (String ipString : ipStrings) { + for (String scopeId : SCOPE_IDS) { + String withScopeId = ipString + "%" + scopeId; + assertTrue( + "InetAddresses.isInetAddress(" + withScopeId + ") should be true but was false", + InetAddressUtils.isInetAddress(withScopeId)); + assertEquals(InetAddressUtils.forString(withScopeId), InetAddressUtils.forString(ipString)); + } + } + } +} diff --git a/driver-core/src/test/functional/com/mongodb/internal/connection/PlainAuthenticationSpecification.groovy b/driver-core/src/test/functional/com/mongodb/internal/connection/PlainAuthenticationSpecification.groovy new file mode 100644 index 00000000000..e8c2a408220 --- /dev/null +++ b/driver-core/src/test/functional/com/mongodb/internal/connection/PlainAuthenticationSpecification.groovy @@ -0,0 +1,134 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection + +import com.mongodb.ClusterFixture +import com.mongodb.MongoCommandException +import com.mongodb.MongoCredential +import com.mongodb.MongoSecurityException +import com.mongodb.ServerAddress +import com.mongodb.async.FutureResultCallback +import com.mongodb.connection.ClusterId +import com.mongodb.connection.ServerId +import com.mongodb.connection.SocketSettings +import com.mongodb.internal.connection.netty.NettyStreamFactory +import org.bson.BsonDocument +import org.bson.BsonString +import spock.lang.IgnoreIf +import spock.lang.Specification + +import static com.mongodb.AuthenticationMechanism.PLAIN +import static com.mongodb.ClusterFixture.OPERATION_CONTEXT +import static com.mongodb.ClusterFixture.getClusterConnectionMode +import static com.mongodb.ClusterFixture.getConnectionString +import static com.mongodb.ClusterFixture.getCredential +import static com.mongodb.ClusterFixture.getSslSettings +import static com.mongodb.MongoCredential.createPlainCredential +import static com.mongodb.connection.ClusterConnectionMode.SINGLE +import static com.mongodb.internal.connection.CommandHelper.executeCommand +import static java.util.concurrent.TimeUnit.SECONDS + +@IgnoreIf({ getCredential() == null || getCredential().getAuthenticationMechanism() != PLAIN }) +class PlainAuthenticationSpecification extends Specification { + + def 'should not authorize when not authenticated'() { + given: + def connection = createConnection(async, null) + + when: + openConnection(connection, async) + executeCommand(getConnectionString().getDatabase(), new BsonDocument('count', new BsonString('test')), + getClusterConnectionMode(), null, connection, OPERATION_CONTEXT) + + then: + thrown(MongoCommandException) + + cleanup: + connection?.close() + + where: + async << [true, false] + } + + def 'should authorize when successfully authenticated'() { + given: + def connection = createConnection(async, getMongoCredential()) + + when: + openConnection(connection, async) + executeCommand(getConnectionString().getDatabase(), new BsonDocument('count', new BsonString('test')), + getClusterConnectionMode(), null, connection, OPERATION_CONTEXT) + + then: + true + + cleanup: + connection?.close() + + where: + async << [true, false] + } + + def 'should throw MongoSecurityException when authentication fails'() { + given: + def connection = createConnection(async, createPlainCredential('wrongUserName', '$external', 'wrongPassword'.toCharArray())) + + when: + openConnection(connection, async) + executeCommand(getConnectionString().getDatabase(), new BsonDocument('count', new BsonString('test')), + getClusterConnectionMode(), null, connection, OPERATION_CONTEXT) + + then: + thrown(MongoSecurityException) + + cleanup: + connection?.close() + + where: + async << [true, false] + } + + private static MongoCredential getMongoCredential() { + getCredential() + } + + private static InternalStreamConnection createConnection(final boolean async, final MongoCredential credential) { + new InternalStreamConnection(SINGLE, + new ServerId(new ClusterId(), new ServerAddress(getConnectionString().getHosts().get(0))), + new TestConnectionGenerationSupplier(), + async ? new NettyStreamFactory(SocketSettings.builder().build(), getSslSettings()) + : new SocketStreamFactory(new DefaultInetAddressResolver(), SocketSettings.builder().build(), getSslSettings()), + [], null, new InternalStreamConnectionInitializer(SINGLE, createAuthenticator(credential), null, [], null) + ) + } + + private static Authenticator createAuthenticator(final MongoCredential credential) { + credential == null ? null : new PlainAuthenticator(new MongoCredentialWithCache(credential), clusterConnectionMode, null) + } + + private static void openConnection(final InternalConnection connection, final boolean async) { + if (async) { + FutureResultCallback futureResultCallback = new FutureResultCallback() + connection.openAsync(OPERATION_CONTEXT, futureResultCallback) + futureResultCallback.get(ClusterFixture.TIMEOUT, SECONDS) + } else { + connection.open(OPERATION_CONTEXT) + } + } +} + + diff --git a/driver-core/src/test/functional/com/mongodb/internal/connection/PlainAuthenticatorTest.java b/driver-core/src/test/functional/com/mongodb/internal/connection/PlainAuthenticatorTest.java new file mode 100644 index 00000000000..b95b9c96894 --- /dev/null +++ b/driver-core/src/test/functional/com/mongodb/internal/connection/PlainAuthenticatorTest.java @@ -0,0 +1,85 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection; + +import com.mongodb.LoggerSettings; +import com.mongodb.MongoCredential; +import com.mongodb.MongoSecurityException; +import com.mongodb.ServerAddress; +import com.mongodb.connection.ClusterConnectionMode; +import com.mongodb.connection.ClusterId; +import com.mongodb.connection.ConnectionDescription; +import com.mongodb.connection.ServerId; +import com.mongodb.connection.SocketSettings; +import org.junit.After; +import org.junit.Before; +import org.junit.Ignore; +import org.junit.Test; + +import java.util.Collections; + +import static com.mongodb.ClusterFixture.CLIENT_METADATA; +import static com.mongodb.ClusterFixture.OPERATION_CONTEXT; +import static com.mongodb.ClusterFixture.getClusterConnectionMode; +import static com.mongodb.ClusterFixture.getServerApi; +import static com.mongodb.ClusterFixture.getSslSettings; + +@Ignore +public class PlainAuthenticatorTest { + private InternalConnection internalConnection; + private ConnectionDescription connectionDescription; + private String userName; + private String source; + private String password; + private final StreamFactory streamFactory = new SocketStreamFactory(new DefaultInetAddressResolver(), SocketSettings.builder().build(), getSslSettings()); + + @Before + public void setUp() { + String host = System.getProperty("org.mongodb.test.host"); + userName = System.getProperty("org.mongodb.test.userName"); + source = System.getProperty("org.mongod.test.source"); + password = System.getProperty("org.mongodb.test.password"); + internalConnection = new InternalStreamConnectionFactory(ClusterConnectionMode.SINGLE, streamFactory, null, CLIENT_METADATA, + Collections.emptyList(), LoggerSettings.builder().build(), null, getServerApi() + ).create(new ServerId(new ClusterId(), + new ServerAddress(host))); + connectionDescription = new ConnectionDescription(new ServerId(new ClusterId(), new ServerAddress())); + } + + @After + public void tearDown() { + internalConnection.close(); + } + + @Test + public void testSuccessfulAuthentication() { + PlainAuthenticator authenticator = new PlainAuthenticator(getCredentialWithCache(userName, source, password.toCharArray()), + getClusterConnectionMode(), getServerApi()); + authenticator.authenticate(internalConnection, connectionDescription, OPERATION_CONTEXT); + } + + @Test(expected = MongoSecurityException.class) + public void testUnsuccessfulAuthentication() { + PlainAuthenticator authenticator = new PlainAuthenticator(getCredentialWithCache(userName, source, "wrong".toCharArray()), + getClusterConnectionMode(), getServerApi()); + authenticator.authenticate(internalConnection, connectionDescription, OPERATION_CONTEXT); + } + + private static MongoCredentialWithCache getCredentialWithCache(final String userName, final String source, final char[] password) { + return new MongoCredentialWithCache(MongoCredential.createPlainCredential(userName, source, password)); + } +} diff --git a/driver-core/src/test/functional/com/mongodb/internal/connection/ReplyHeaderSpecification.groovy b/driver-core/src/test/functional/com/mongodb/internal/connection/ReplyHeaderSpecification.groovy new file mode 100644 index 00000000000..0407baeca8a --- /dev/null +++ b/driver-core/src/test/functional/com/mongodb/internal/connection/ReplyHeaderSpecification.groovy @@ -0,0 +1,201 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +package com.mongodb.internal.connection + +import com.mongodb.MongoInternalException +import org.bson.io.BasicOutputBuffer +import spock.lang.Specification + +import static com.mongodb.connection.ConnectionDescription.getDefaultMaxMessageSize + +class ReplyHeaderSpecification extends Specification { + + def 'should parse reply header'() { + def outputBuffer = new BasicOutputBuffer() + outputBuffer.with { + writeInt(186) + writeInt(45) + writeInt(23) + writeInt(1) + writeInt(responseFlags) + writeLong(9000) + writeInt(4) + writeInt(1) + } + def byteBuf = outputBuffer.byteBuffers.get(0) + + when: + def replyHeader = new ReplyHeader(byteBuf, new MessageHeader(byteBuf, getDefaultMaxMessageSize())) + + then: + replyHeader.messageLength == 186 + replyHeader.requestId == 45 + replyHeader.responseTo == 23 + + where: + responseFlags << [0, 1, 2, 3] + cursorNotFound << [false, true, false, true] + queryFailure << [false, false, true, true] + } + + def 'should parse reply header with compressed header'() { + def outputBuffer = new BasicOutputBuffer() + outputBuffer.with { + writeInt(186) + writeInt(45) + writeInt(23) + writeInt(2012) + writeInt(1) + writeInt(258) + writeByte(2) + writeInt(responseFlags) + writeLong(9000) + writeInt(4) + writeInt(1) + } + def byteBuf = outputBuffer.byteBuffers.get(0) + def compressedHeader = new CompressedHeader(byteBuf, new MessageHeader(byteBuf, getDefaultMaxMessageSize())) + + when: + def replyHeader = new ReplyHeader(byteBuf, compressedHeader) + + then: + replyHeader.messageLength == 274 + replyHeader.requestId == 45 + replyHeader.responseTo == 23 + + where: + responseFlags << [0, 1, 2, 3] + cursorNotFound << [false, true, false, true] + queryFailure << [false, false, true, true] + } + + def 'should throw MongoInternalException on incorrect opCode'() { + def outputBuffer = new BasicOutputBuffer() + outputBuffer.with { + writeInt(36) + writeInt(45) + writeInt(23) + writeInt(2) + writeInt(0) + writeLong(2) + writeInt(0) + writeInt(0) + } + def byteBuf = outputBuffer.byteBuffers.get(0) + + when: + new ReplyHeader(byteBuf, new MessageHeader(byteBuf, getDefaultMaxMessageSize())) + + then: + def ex = thrown(MongoInternalException) + ex.getMessage() == 'Unexpected reply message opCode 2' + } + + def 'should throw MongoInternalException on message size < 36'() { + def outputBuffer = new BasicOutputBuffer() + outputBuffer.with { + writeInt(35) + writeInt(45) + writeInt(23) + writeInt(1) + writeInt(0) + writeLong(2) + writeInt(0) + writeInt(0) + } + def byteBuf = outputBuffer.byteBuffers.get(0) + + when: + new ReplyHeader(byteBuf, new MessageHeader(byteBuf, getDefaultMaxMessageSize())) + + then: + def ex = thrown(MongoInternalException) + ex.getMessage() == 'The reply message length 35 is less than the minimum message length 36' + } + + def 'should throw MongoInternalException on message size > max message size'() { + def outputBuffer = new BasicOutputBuffer() + outputBuffer.with { + writeInt(400) + writeInt(45) + writeInt(23) + writeInt(1) + writeInt(0) + writeLong(2) + writeInt(0) + writeInt(0) + } + def byteBuf = outputBuffer.byteBuffers.get(0) + + when: + new ReplyHeader(byteBuf, new MessageHeader(byteBuf, 399)) + + then: + def ex = thrown(MongoInternalException) + ex.getMessage() == 'The reply message length 400 is greater than the maximum message length 399' + } + + def 'should throw MongoInternalException on num documents < 0'() { + def outputBuffer = new BasicOutputBuffer() + outputBuffer.with { + writeInt(186) + writeInt(45) + writeInt(23) + writeInt(1) + writeInt(1) + writeLong(9000) + writeInt(4) + writeInt(-1) + } + def byteBuf = outputBuffer.byteBuffers.get(0) + + when: + new ReplyHeader(byteBuf, new MessageHeader(byteBuf, getDefaultMaxMessageSize())) + + then: + def ex = thrown(MongoInternalException) + ex.getMessage() == 'The reply message number of returned documents, -1, is expected to be 1' + } + + def 'should throw MongoInternalException on num documents < 0 with compressed header'() { + def outputBuffer = new BasicOutputBuffer() + outputBuffer.with { + writeInt(186) + writeInt(45) + writeInt(23) + writeInt(2012) + writeInt(1) + writeInt(258) + writeByte(2) + writeInt(1) + writeLong(9000) + writeInt(4) + writeInt(-1) + } + def byteBuf = outputBuffer.byteBuffers.get(0) + def compressedHeader = new CompressedHeader(byteBuf, new MessageHeader(byteBuf, getDefaultMaxMessageSize())) + + when: + new ReplyHeader(byteBuf, compressedHeader) + + then: + def ex = thrown(MongoInternalException) + ex.getMessage() == 'The reply message number of returned documents, -1, is expected to be 1' + } +} diff --git a/driver-core/src/test/functional/com/mongodb/internal/connection/ScramSha256AuthenticationSpecification.groovy b/driver-core/src/test/functional/com/mongodb/internal/connection/ScramSha256AuthenticationSpecification.groovy new file mode 100644 index 00000000000..4901872c1fc --- /dev/null +++ b/driver-core/src/test/functional/com/mongodb/internal/connection/ScramSha256AuthenticationSpecification.groovy @@ -0,0 +1,221 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection + +import com.mongodb.MongoCredential +import com.mongodb.MongoSecurityException +import com.mongodb.ReadConcern +import com.mongodb.ReadPreference +import com.mongodb.async.FutureResultCallback +import com.mongodb.internal.binding.AsyncClusterBinding +import com.mongodb.internal.binding.ClusterBinding +import com.mongodb.internal.operation.CommandReadOperation +import org.bson.BsonDocument +import org.bson.BsonDocumentWrapper +import org.bson.BsonString +import org.bson.Document +import org.bson.codecs.BsonDocumentCodec +import org.bson.codecs.DocumentCodec +import spock.lang.IgnoreIf +import spock.lang.Specification + +import static com.mongodb.ClusterFixture.OPERATION_CONTEXT +import static com.mongodb.ClusterFixture.createAsyncCluster +import static com.mongodb.ClusterFixture.createCluster +import static com.mongodb.ClusterFixture.getBinding +import static com.mongodb.ClusterFixture.isAuthenticated +import static com.mongodb.MongoCredential.createCredential +import static com.mongodb.MongoCredential.createScramSha1Credential +import static com.mongodb.MongoCredential.createScramSha256Credential + +@IgnoreIf({ (!isAuthenticated()) }) +class ScramSha256AuthenticationSpecification extends Specification { + + static MongoCredential sha1Implicit = createCredential('sha1', 'admin', 'sha1'.toCharArray()) + static MongoCredential sha1Explicit = createScramSha1Credential('sha1', 'admin', 'sha1'.toCharArray()) + static MongoCredential sha256Implicit = createCredential('sha256', 'admin', 'sha256'.toCharArray()) + static MongoCredential sha256Explicit = createScramSha256Credential('sha256', 'admin', 'sha256'.toCharArray()) + static MongoCredential bothImplicit = createCredential('both', 'admin', 'both'.toCharArray()) + static MongoCredential bothExplicitSha1 = createScramSha1Credential('both', 'admin', 'both'.toCharArray()) + static MongoCredential bothExplicitSha256 = createScramSha256Credential('both', 'admin', 'both'.toCharArray()) + + static MongoCredential sha1AsSha256 = createScramSha256Credential('sha1', 'admin', 'sha1'.toCharArray()) + static MongoCredential sha256AsSha1 = createScramSha1Credential('sha256', 'admin', 'sha256'.toCharArray()) + static MongoCredential nonExistentUserImplicit = createCredential('nonexistent', 'admin', 'pwd'.toCharArray()) + + static MongoCredential userNinePrepped = createScramSha256Credential('IX', 'admin', 'IX'.toCharArray()) + static MongoCredential userNineUnprepped = createScramSha256Credential('IX', 'admin', 'I\u00ADX'.toCharArray()) + + static MongoCredential userFourPrepped = createScramSha256Credential('\u2168', 'admin', 'IV'.toCharArray()) + static MongoCredential userFourUnprepped = createScramSha256Credential('\u2168', 'admin', 'I\u00ADV'.toCharArray()) + + def setupSpec() { + createUser('sha1', 'sha1', ['SCRAM-SHA-1']) + createUser('sha256', 'sha256', ['SCRAM-SHA-256']) + createUser('both', 'both', ['SCRAM-SHA-1', 'SCRAM-SHA-256']) + createUser('IX', 'IX', ['SCRAM-SHA-256']) + createUser('\u2168', '\u2163', ['SCRAM-SHA-256']) + } + + + def cleanupSpec() { + dropUser('sha1') + dropUser('sha256') + dropUser('both') + dropUser('IX') + dropUser('\u2168') + } + + def createUser(final String userName, final String password, final List mechanisms) { + def createUserCommand = new Document('createUser', userName) + .append('pwd', password) + .append('roles', ['root']) + .append('mechanisms', mechanisms) + new CommandReadOperation<>('admin', + new BsonDocumentWrapper(createUserCommand, new DocumentCodec()), new DocumentCodec()) + .execute(getBinding()) + } + + def dropUser(final String userName) { + new CommandReadOperation<>('admin', new BsonDocument('dropUser', new BsonString(userName)), + new BsonDocumentCodec()).execute(getBinding()) + } + + def 'test authentication and authorization'() { + given: + def cluster = createCluster(credential) + + when: + new CommandReadOperation('admin', + new BsonDocumentWrapper(new Document('dbstats', 1), new DocumentCodec()), new DocumentCodec()) + .execute(new ClusterBinding(cluster, ReadPreference.primary(), ReadConcern.DEFAULT, OPERATION_CONTEXT)) + + then: + noExceptionThrown() + + cleanup: + cluster.close() + + where: + credential << [sha1Implicit, sha1Explicit, sha256Implicit, sha256Explicit, bothImplicit, bothExplicitSha1, bothExplicitSha256] + } + + def 'test authentication and authorization async'() { + given: + def cluster = createAsyncCluster(credential) + def callback = new FutureResultCallback() + + when: + // make this synchronous + new CommandReadOperation('admin', + new BsonDocumentWrapper(new Document('dbstats', 1), new DocumentCodec()), new DocumentCodec()) + .executeAsync(new AsyncClusterBinding(cluster, ReadPreference.primary(), ReadConcern.DEFAULT, OPERATION_CONTEXT), + callback) + callback.get() + + then: + noExceptionThrown() + + cleanup: + cluster.close() + + where: + credential << [sha1Implicit, sha1Explicit, sha256Implicit, sha256Explicit, bothImplicit, bothExplicitSha1, bothExplicitSha256] + } + + def 'test authentication and authorization failure with wrong mechanism'() { + given: + def cluster = createCluster(credential) + + when: + new CommandReadOperation('admin', + new BsonDocumentWrapper(new Document('dbstats', 1), new DocumentCodec()), new DocumentCodec()) + .execute(new ClusterBinding(cluster, ReadPreference.primary(), ReadConcern.DEFAULT, OPERATION_CONTEXT)) + + then: + thrown(MongoSecurityException) + + cleanup: + cluster.close() + + where: + credential << [sha1AsSha256, sha256AsSha1, nonExistentUserImplicit] + } + + def 'test authentication and authorization failure with wrong mechanism async'() { + given: + def cluster = createAsyncCluster(credential) + def callback = new FutureResultCallback() + + when: + new CommandReadOperation('admin', + new BsonDocumentWrapper(new Document('dbstats', 1), new DocumentCodec()), new DocumentCodec()) + .executeAsync(new AsyncClusterBinding(cluster, ReadPreference.primary(), ReadConcern.DEFAULT, OPERATION_CONTEXT), + callback) + callback.get() + + then: + thrown(MongoSecurityException) + + cleanup: + cluster.close() + + where: + credential << [sha1AsSha256, sha256AsSha1, nonExistentUserImplicit] + } + + def 'test SASL Prep'() { + given: + def cluster = createCluster(credential) + + when: + new CommandReadOperation('admin', + new BsonDocumentWrapper(new Document('dbstats', 1), new DocumentCodec()), new DocumentCodec()) + .execute(new ClusterBinding(cluster, ReadPreference.primary(), ReadConcern.DEFAULT, OPERATION_CONTEXT)) + + then: + noExceptionThrown() + + cleanup: + cluster.close() + + where: + credential << [userNinePrepped, userNineUnprepped, userFourPrepped, userFourUnprepped] + } + + def 'test SASL Prep async'() { + given: + def cluster = createAsyncCluster(credential) + def callback = new FutureResultCallback() + + when: + new CommandReadOperation('admin', + new BsonDocumentWrapper(new Document('dbstats', 1), new DocumentCodec()), new DocumentCodec()) + .executeAsync(new AsyncClusterBinding(cluster, ReadPreference.primary(), ReadConcern.DEFAULT, OPERATION_CONTEXT), + callback) + callback.get() + + then: + noExceptionThrown() + + cleanup: + cluster.close() + + where: + credential << [userNinePrepped, userNineUnprepped, userFourPrepped, userFourUnprepped] + } +} diff --git a/driver-core/src/test/functional/com/mongodb/internal/connection/ServerHelper.java b/driver-core/src/test/functional/com/mongodb/internal/connection/ServerHelper.java new file mode 100644 index 00000000000..0295e8c1f9f --- /dev/null +++ b/driver-core/src/test/functional/com/mongodb/internal/connection/ServerHelper.java @@ -0,0 +1,112 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection; + +import com.mongodb.ClusterFixture; +import com.mongodb.MongoTimeoutException; +import com.mongodb.ServerAddress; +import com.mongodb.connection.ServerDescription; +import com.mongodb.internal.binding.AsyncConnectionSource; +import com.mongodb.internal.selector.ServerAddressSelector; + +import static com.mongodb.ClusterFixture.OPERATION_CONTEXT; +import static com.mongodb.ClusterFixture.getAsyncCluster; +import static com.mongodb.ClusterFixture.getCluster; +import static com.mongodb.assertions.Assertions.fail; +import static com.mongodb.internal.thread.InterruptionUtil.interruptAndCreateMongoInterruptedException; +import static java.lang.Thread.sleep; + +public final class ServerHelper { + public static void checkPool(final ServerAddress address) { + checkPool(address, getCluster()); + checkPool(address, getAsyncCluster()); + } + + public static int checkPoolCount(final ServerAddress address) { + return getConnectionPool(address, getCluster()).getInUseCount(); + } + + public static int checkAsyncPoolCount(final ServerAddress address) { + return getConnectionPool(address, getAsyncCluster()).getInUseCount(); + } + + public static void waitForLastRelease(final Cluster cluster) { + for (ServerDescription cur : cluster.getCurrentDescription().getServerDescriptions()) { + if (cur.isOk()) { + waitForLastRelease(cur.getAddress(), cluster); + } + } + } + + public static void waitForLastRelease(final ServerAddress address, final Cluster cluster) { + ConcurrentPool pool = connectionPool( + cluster.selectServer(new ServerAddressSelector(address), OPERATION_CONTEXT).getServer()); + long startTime = System.currentTimeMillis(); + while (pool.getInUseCount() > 0) { + try { + sleep(100); + if (System.currentTimeMillis() > startTime + ClusterFixture.TIMEOUT * 1000) { + throw new MongoTimeoutException("Timed out waiting for pool in use count to drop to 0. Now at: " + + pool.getInUseCount()); + } + } catch (InterruptedException e) { + throw interruptAndCreateMongoInterruptedException("Interrupted", e); + } + } + } + + private static ConcurrentPool getConnectionPool(final ServerAddress address, final Cluster cluster) { + return connectionPool(cluster.selectServer(new ServerAddressSelector(address), OPERATION_CONTEXT).getServer()); + } + + private static void checkPool(final ServerAddress address, final Cluster cluster) { + try { + waitForLastRelease(address, cluster); + } catch (MongoTimeoutException e) { + throw new IllegalStateException(e.getMessage()); + } + } + + private static ConcurrentPool connectionPool(final Server server) { + ConnectionPool connectionPool; + if (server instanceof DefaultServer) { + connectionPool = ((DefaultServer) server).getConnectionPool(); + } else if (server instanceof LoadBalancedServer) { + connectionPool = ((LoadBalancedServer) server).getConnectionPool(); + } else { + throw fail(server.getClass().toString()); + } + return ((DefaultConnectionPool) connectionPool).getPool(); + } + + public static void waitForRelease(final AsyncConnectionSource connectionSource, final int expectedCount) { + long startTime = System.currentTimeMillis(); + while (connectionSource.getCount() > expectedCount) { + try { + sleep(10); + if (System.currentTimeMillis() > startTime + ClusterFixture.TIMEOUT * 1000) { + throw new MongoTimeoutException("Timed out waiting for ConnectionSource count to drop to " + expectedCount); + } + } catch (InterruptedException e) { + throw interruptAndCreateMongoInterruptedException("Interrupted", e); + } + } + } + + private ServerHelper() { + } +} diff --git a/driver-core/src/test/functional/com/mongodb/internal/connection/ServerMonitorSpecification.groovy b/driver-core/src/test/functional/com/mongodb/internal/connection/ServerMonitorSpecification.groovy new file mode 100644 index 00000000000..092f74ef96a --- /dev/null +++ b/driver-core/src/test/functional/com/mongodb/internal/connection/ServerMonitorSpecification.groovy @@ -0,0 +1,241 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection + +import com.mongodb.LoggerSettings +import com.mongodb.MongoSocketException +import com.mongodb.OperationFunctionalSpecification +import com.mongodb.ServerAddress +import com.mongodb.Tag +import com.mongodb.TagSet +import com.mongodb.connection.ClusterId +import com.mongodb.connection.ServerDescription +import com.mongodb.connection.ServerId +import com.mongodb.connection.ServerSettings +import com.mongodb.connection.ServerType +import com.mongodb.connection.SocketSettings +import com.mongodb.internal.inject.SameObjectProvider +import org.bson.types.ObjectId + +import java.util.concurrent.CountDownLatch +import java.util.concurrent.TimeUnit + +import static com.mongodb.ClusterFixture.CLIENT_METADATA +import static com.mongodb.ClusterFixture.OPERATION_CONTEXT_FACTORY +import static com.mongodb.ClusterFixture.getClusterConnectionMode +import static com.mongodb.ClusterFixture.getCredentialWithCache +import static com.mongodb.ClusterFixture.getPrimary +import static com.mongodb.ClusterFixture.getServerApi +import static com.mongodb.ClusterFixture.getSslSettings +import static com.mongodb.connection.ClusterConnectionMode.SINGLE +import static com.mongodb.connection.ServerConnectionState.CONNECTED +import static com.mongodb.connection.ServerConnectionState.CONNECTING +import static com.mongodb.connection.ServerDescription.builder +import static com.mongodb.internal.connection.DefaultServerMonitor.shouldLogStageChange +import static java.util.Arrays.asList + +class ServerMonitorSpecification extends OperationFunctionalSpecification { + ServerDescription newDescription + ServerMonitor serverMonitor + CountDownLatch latch = new CountDownLatch(1) + + def cleanup() { + serverMonitor?.close() + } + + def 'should have positive round trip time'() { + given: + initializeServerMonitor(getPrimary()) + + when: + latch.await() + + then: + newDescription.roundTripTimeNanos > 0 + } + + def 'should report current exception'() { + given: + initializeServerMonitor(new ServerAddress('some_unknown_server_name:34567')) + + when: + latch.await() + + then: + newDescription.exception instanceof MongoSocketException + } + + def 'should log state change if significant properties have changed'() { + given: + ServerDescription.Builder builder = createBuilder() + ServerDescription description = builder.build() + ServerDescription otherDescription + + expect: + !shouldLogStageChange(description, builder.build()) + + when: + otherDescription = createBuilder().address(new ServerAddress('localhost:27018')).build() + + then: + shouldLogStageChange(description, otherDescription) + + when: + otherDescription = createBuilder().type(ServerType.STANDALONE).build() + + then: + shouldLogStageChange(description, otherDescription) + + when: + otherDescription = createBuilder().tagSet(null).build() + + then: + shouldLogStageChange(description, otherDescription) + + when: + otherDescription = createBuilder().setName('test2').build() + + then: + shouldLogStageChange(description, otherDescription) + + when: + otherDescription = createBuilder().primary('localhost:27018').build() + + then: + shouldLogStageChange(description, otherDescription) + + when: + otherDescription = createBuilder().canonicalAddress('localhost:27018').build() + + then: + shouldLogStageChange(description, otherDescription) + + when: + otherDescription = createBuilder().hosts(new HashSet(asList('localhost:27018'))).build() + + then: + shouldLogStageChange(description, otherDescription) + + when: + otherDescription = createBuilder().arbiters(new HashSet(asList('localhost:27018'))).build() + + then: + shouldLogStageChange(description, otherDescription) + + when: + otherDescription = createBuilder().passives(new HashSet(asList('localhost:27018'))).build() + + then: + shouldLogStageChange(description, otherDescription) + + when: + otherDescription = createBuilder().ok(false).build() + + then: + shouldLogStageChange(description, otherDescription) + + when: + otherDescription = createBuilder().state(CONNECTING).build() + + then: + shouldLogStageChange(description, otherDescription) + + then: + shouldLogStageChange(description, otherDescription) + + when: + otherDescription = createBuilder().electionId(new ObjectId()).build() + + then: + shouldLogStageChange(description, otherDescription) + + when: + otherDescription = createBuilder().setVersion(3).build() + + then: + shouldLogStageChange(description, otherDescription) + + // test exception state changes + shouldLogStageChange(createBuilder().exception(new IOException()).build(), + createBuilder().exception(new RuntimeException()).build()) + shouldLogStageChange(createBuilder().exception(new IOException('message one')).build(), + createBuilder().exception(new IOException('message two')).build()) + } + + private static ServerDescription.Builder createBuilder() { + builder().ok(true) + .state(CONNECTED) + .address(new ServerAddress()) + .type(ServerType.SHARD_ROUTER) + .tagSet(new TagSet(asList(new Tag('dc', 'ny')))) + .setName('test') + .primary('localhost:27017') + .canonicalAddress('localhost:27017') + .hosts(new HashSet(asList('localhost:27017', 'localhost:27018'))) + .passives(new HashSet(asList('localhost:27019'))) + .arbiters(new HashSet(asList('localhost:27020'))) + .electionId(new ObjectId('abcdabcdabcdabcdabcdabcd')) + .setVersion(2) + } + + def initializeServerMonitor(ServerAddress address) { + SdamServerDescriptionManager sdam = new SdamServerDescriptionManager() { + @Override + void monitorUpdate(final ServerDescription candidateDescription) { + assert candidateDescription != null + newDescription = candidateDescription + latch.countDown() + } + + @Override + void updateToUnknown(final ServerDescription candidateDescription) { + assert candidateDescription != null + newDescription = candidateDescription + latch.countDown() + } + + @Override + void handleExceptionBeforeHandshake(final SdamServerDescriptionManager.SdamIssue sdamIssue) { + throw new UnsupportedOperationException() + } + + @Override + void handleExceptionAfterHandshake(final SdamServerDescriptionManager.SdamIssue sdamIssue) { + throw new UnsupportedOperationException() + } + + @Override + SdamServerDescriptionManager.SdamIssue.Context context() { + throw new UnsupportedOperationException() + } + + @Override + SdamServerDescriptionManager.SdamIssue.Context context(final InternalConnection connection) { + throw new UnsupportedOperationException() + } + } + serverMonitor = new DefaultServerMonitor(new ServerId(new ClusterId(), address), ServerSettings.builder().build(), + new InternalStreamConnectionFactory(SINGLE, new SocketStreamFactory(new DefaultInetAddressResolver(), + SocketSettings.builder().connectTimeout(500, TimeUnit.MILLISECONDS).build(), getSslSettings()), + getCredentialWithCache(), CLIENT_METADATA, [], LoggerSettings.builder().build(), null, + getServerApi()), + getClusterConnectionMode(), getServerApi(), false, SameObjectProvider.initialized(sdam), + OPERATION_CONTEXT_FACTORY) + serverMonitor.start() + serverMonitor + } +} diff --git a/driver-core/src/test/functional/com/mongodb/internal/connection/SingleServerClusterTest.java b/driver-core/src/test/functional/com/mongodb/internal/connection/SingleServerClusterTest.java new file mode 100644 index 00000000000..62fa6c27032 --- /dev/null +++ b/driver-core/src/test/functional/com/mongodb/internal/connection/SingleServerClusterTest.java @@ -0,0 +1,119 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection; + +import com.mongodb.LoggerSettings; +import com.mongodb.ReadPreference; +import com.mongodb.ServerAddress; +import com.mongodb.connection.ClusterConnectionMode; +import com.mongodb.connection.ClusterId; +import com.mongodb.connection.ClusterSettings; +import com.mongodb.connection.ConnectionPoolSettings; +import com.mongodb.connection.ServerSettings; +import com.mongodb.connection.SocketSettings; +import com.mongodb.internal.selector.ServerAddressSelector; +import com.mongodb.internal.validator.NoOpFieldNameValidator; +import org.bson.BsonDocument; +import org.bson.BsonDouble; +import org.bson.BsonString; +import org.bson.codecs.BsonDocumentCodec; +import org.junit.After; +import org.junit.Test; + +import java.util.Collections; + +import static com.mongodb.ClusterFixture.CLIENT_METADATA; +import static com.mongodb.ClusterFixture.OPERATION_CONTEXT; +import static com.mongodb.ClusterFixture.OPERATION_CONTEXT_FACTORY; +import static com.mongodb.ClusterFixture.getCredential; +import static com.mongodb.ClusterFixture.getDefaultDatabaseName; +import static com.mongodb.ClusterFixture.getPrimary; +import static com.mongodb.ClusterFixture.getSecondary; +import static com.mongodb.ClusterFixture.getServerApi; +import static com.mongodb.ClusterFixture.getSslSettings; +import static com.mongodb.internal.connection.ClusterDescriptionHelper.getPrimaries; +import static java.util.Collections.singletonList; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; + +public class SingleServerClusterTest { + private SingleServerCluster cluster; + + + private void setUpCluster(final ServerAddress serverAddress) { + SocketStreamFactory streamFactory = new SocketStreamFactory(new DefaultInetAddressResolver(), SocketSettings.builder().build(), + getSslSettings()); + ClusterId clusterId = new ClusterId(); + ClusterSettings clusterSettings = ClusterSettings.builder() + .mode(ClusterConnectionMode.SINGLE) + .hosts(singletonList(serverAddress)) + .build(); + cluster = new SingleServerCluster(clusterId, + clusterSettings, + new DefaultClusterableServerFactory(ServerSettings.builder().build(), + ConnectionPoolSettings.builder().maxSize(1).build(), InternalConnectionPoolSettings.builder().build(), + OPERATION_CONTEXT_FACTORY, streamFactory, OPERATION_CONTEXT_FACTORY, streamFactory, getCredential(), + LoggerSettings.builder().build(), null, + Collections.emptyList(), getServerApi(), false), CLIENT_METADATA); + } + + @After + public void tearDown() { + cluster.close(); + } + + @Test + public void descriptionShouldIncludeSettings() { + // given + setUpCluster(getPrimary()); + + // expect + assertNotNull(cluster.getCurrentDescription().getClusterSettings()); + assertNotNull(cluster.getCurrentDescription().getServerSettings()); + } + + @Test + public void shouldGetServerWithOkDescription() { + // given + setUpCluster(getPrimary()); + + // when + ServerTuple serverTuple = cluster.selectServer(clusterDescription -> getPrimaries(clusterDescription), OPERATION_CONTEXT); + + // then + assertTrue(serverTuple.getServerDescription().isOk()); + } + + @Test + public void shouldSuccessfullyQueryASecondaryWithPrimaryReadPreference() { + // given + OperationContext operationContext = OPERATION_CONTEXT; + ServerAddress secondary = getSecondary(); + setUpCluster(secondary); + String collectionName = getClass().getName(); + Connection connection = cluster.selectServer(new ServerAddressSelector(secondary), operationContext).getServer() + .getConnection(operationContext); + + // when + BsonDocument result = connection.command(getDefaultDatabaseName(), new BsonDocument("count", new BsonString(collectionName)), + NoOpFieldNameValidator.INSTANCE, ReadPreference.primary(), new BsonDocumentCodec(), operationContext); + + // then + assertEquals(new BsonDouble(1.0).intValue(), result.getNumber("ok").intValue()); + } +} diff --git a/driver-core/src/test/functional/com/mongodb/internal/connection/SocketStreamHelperSpecification.groovy b/driver-core/src/test/functional/com/mongodb/internal/connection/SocketStreamHelperSpecification.groovy new file mode 100644 index 00000000000..68a82fcbf74 --- /dev/null +++ b/driver-core/src/test/functional/com/mongodb/internal/connection/SocketStreamHelperSpecification.groovy @@ -0,0 +1,178 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection + +import com.mongodb.ClusterFixture +import com.mongodb.MongoInternalException +import com.mongodb.MongoOperationTimeoutException +import com.mongodb.connection.SocketSettings +import com.mongodb.connection.SslSettings +import com.mongodb.internal.TimeoutContext +import com.mongodb.internal.TimeoutSettings +import jdk.net.ExtendedSocketOptions +import spock.lang.IgnoreIf +import spock.lang.Specification + +import javax.net.SocketFactory +import javax.net.ssl.SNIHostName +import javax.net.ssl.SSLSocket +import javax.net.ssl.SSLSocketFactory +import java.lang.reflect.Method + +import static com.mongodb.ClusterFixture.OPERATION_CONTEXT +import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS +import static com.mongodb.ClusterFixture.createOperationContext +import static com.mongodb.ClusterFixture.getPrimary +import static com.mongodb.internal.connection.ServerAddressHelper.getSocketAddresses +import static java.util.concurrent.TimeUnit.MILLISECONDS +import static java.util.concurrent.TimeUnit.SECONDS + +class SocketStreamHelperSpecification extends Specification { + + def 'should configure socket with settings()'() { + given: + Socket socket = SocketFactory.default.createSocket() + def socketSettings = SocketSettings.builder() + .readTimeout(10, SECONDS) + .build() + + def operationContext = createOperationContext(TIMEOUT_SETTINGS.withReadTimeoutMS(socketSettings.getReadTimeout(MILLISECONDS))) + + when: + SocketStreamHelper.initialize(operationContext, socket, getSocketAddresses(getPrimary(), new DefaultInetAddressResolver()).get(0), + socketSettings, SslSettings.builder().build()) + + then: + socket.getTcpNoDelay() + socket.getKeepAlive() + socket.getSoTimeout() == socketSettings.getReadTimeout(MILLISECONDS) + + // If the Java 11+ extended socket options for keep alive probes are available, check those values. + if (Arrays.stream(ExtendedSocketOptions.getDeclaredFields()).anyMatch{ f -> f.getName().equals('TCP_KEEPCOUNT') }) { + Method getOptionMethod + try { + getOptionMethod = Socket.getMethod('getOption', SocketOption) + } catch (NoSuchMethodException e) { + // ignore, the `Socket.getOption` method was added in Java SE 9 and does not exist in Java SE 8 + getOptionMethod = null + } + if (getOptionMethod != null) { + getOptionMethod.invoke(socket, ExtendedSocketOptions.getDeclaredField('TCP_KEEPCOUNT').get(null)) == 9 + getOptionMethod.invoke(socket, ExtendedSocketOptions.getDeclaredField('TCP_KEEPIDLE').get(null)) == 120 + getOptionMethod.invoke(socket, ExtendedSocketOptions.getDeclaredField('TCP_KEEPINTERVAL').get(null)) == 10 + } + } + + cleanup: + socket?.close() + } + + def 'should throw MongoOperationTimeoutException during initialization when timeoutMS expires'() { + given: + Socket socket = SocketFactory.default.createSocket() + + when: + SocketStreamHelper.initialize( + OPERATION_CONTEXT.withTimeoutContext(new TimeoutContext( + new TimeoutSettings( + 1, + 100, + 100, + 1, + 100))), + socket, getSocketAddresses(getPrimary(), new DefaultInetAddressResolver()).get(0), + SocketSettings.builder().build(), SslSettings.builder().build()) + + then: + thrown(MongoOperationTimeoutException) + + cleanup: + socket?.close() + } + + + def 'should connect socket()'() { + given: + Socket socket = SocketFactory.default.createSocket() + + when: + SocketStreamHelper.initialize(OPERATION_CONTEXT, socket, getSocketAddresses(getPrimary(), new DefaultInetAddressResolver()).get(0), + SocketSettings.builder().build(), SslSettings.builder().build()) + + then: + socket.isConnected() + + cleanup: + socket?.close() + } + + @IgnoreIf({ !ClusterFixture.sslSettings.enabled }) + def 'should enable host name verification if socket is an instance of SSLSocket'() { + given: + SSLSocket socket = SSLSocketFactory.default.createSocket() + + when: + SocketStreamHelper.initialize(OPERATION_CONTEXT, socket, getSocketAddresses(getPrimary(), new DefaultInetAddressResolver()).get(0), + SocketSettings.builder().build(), sslSettings) + + then: + socket.getSSLParameters().endpointIdentificationAlgorithm == (sslSettings.invalidHostNameAllowed ? null : 'HTTPS') + + cleanup: + socket?.close() + + where: + sslSettings << [SslSettings.builder().enabled(true).build(), + SslSettings.builder().enabled(false).build(), + SslSettings.builder().enabled(true).invalidHostNameAllowed(true).build()] + } + + @IgnoreIf({ !ClusterFixture.sslSettings.enabled }) + def 'should enable SNI if socket is an instance of SSLSocket'() { + given: + SSLSocket socket = SSLSocketFactory.default.createSocket() + + when: + SocketStreamHelper.initialize(OPERATION_CONTEXT, socket, getSocketAddresses(getPrimary(), new DefaultInetAddressResolver()).get(0), + SocketSettings.builder().build(), sslSettings) + + then: + socket.getSSLParameters().getServerNames() == [new SNIHostName(getPrimary().getHost())] + + cleanup: + socket?.close() + + where: + sslSettings << [SslSettings.builder().enabled(true).build(), + SslSettings.builder().enabled(false).build()] + } + + def 'should throw MongoInternalException is ssl is enabled and the socket is not an instance of SSLSocket'() { + given: + Socket socket = SocketFactory.default.createSocket() + + when: + SocketStreamHelper.initialize(OPERATION_CONTEXT, socket, getSocketAddresses(getPrimary(), new DefaultInetAddressResolver()).get(0), + SocketSettings.builder().build(), SslSettings.builder().enabled(true).build()) + + then: + thrown(MongoInternalException) + + cleanup: + socket?.close() + } +} diff --git a/driver-core/src/test/functional/com/mongodb/internal/connection/StreamSocketAddressSpecification.groovy b/driver-core/src/test/functional/com/mongodb/internal/connection/StreamSocketAddressSpecification.groovy new file mode 100644 index 00000000000..0283ce44f7b --- /dev/null +++ b/driver-core/src/test/functional/com/mongodb/internal/connection/StreamSocketAddressSpecification.groovy @@ -0,0 +1,97 @@ +package com.mongodb.internal.connection + +import com.mongodb.MongoSocketOpenException +import com.mongodb.ServerAddress +import com.mongodb.connection.SocketSettings +import com.mongodb.connection.SslSettings +import com.mongodb.spi.dns.InetAddressResolver +import spock.lang.Ignore +import spock.lang.IgnoreIf +import spock.lang.Specification +import com.mongodb.spock.Slow + +import javax.net.SocketFactory +import java.util.concurrent.TimeUnit + +import static com.mongodb.ClusterFixture.OPERATION_CONTEXT +import static com.mongodb.ClusterFixture.getSslSettings + +class StreamSocketAddressSpecification extends Specification { + + @Slow + @Ignore + def 'should successfully connect with working ip address group'() { + given: + def port = 27017 + def socketSettings = SocketSettings.builder().connectTimeout(100, TimeUnit.MILLISECONDS).build() + def sslSettings = SslSettings.builder().build() + def bufferProvider = Stub(BufferProvider) + + def inetAddresses = new InetSocketAddress[3] + inetAddresses[0] = new InetSocketAddress(InetAddress.getByName('1.2.3.4'), port) + inetAddresses[1] = new InetSocketAddress(InetAddress.getByName('2.3.4.5'), port) + inetAddresses[2] = new InetSocketAddress(InetAddress.getByName('127.0.0.1'), port) + + def serverAddress = Stub(ServerAddress) + serverAddress.getSocketAddresses() >> inetAddresses + + def socketFactory = Stub(SocketFactory) + def socket0 = SocketFactory.default.createSocket() + def socket1 = SocketFactory.default.createSocket() + def socket2 = SocketFactory.default.createSocket() + socketFactory.createSocket() >>> [socket0, socket1, socket2] + + def socketStream = new SocketStream(serverAddress, null, socketSettings, sslSettings, socketFactory, bufferProvider) + + when: + socketStream.open(OPERATION_CONTEXT) + + then: + !socket0.isConnected() + !socket1.isConnected() + socket2.isConnected() + + cleanup: + socketStream?.close() + } + + @Slow + @IgnoreIf({ getSslSettings().isEnabled() }) + def 'should throw exception when attempting to connect with incorrect ip address group'() { + given: + def socketSettings = SocketSettings.builder().connectTimeout(100, TimeUnit.MILLISECONDS).build() + def sslSettings = SslSettings.builder().build() + def bufferProvider = Stub(BufferProvider) + + def serverAddress = new ServerAddress() + + def socketFactory = Stub(SocketFactory) + def socket0 = SocketFactory.default.createSocket() + def socket1 = SocketFactory.default.createSocket() + def socket2 = SocketFactory.default.createSocket() + socketFactory.createSocket() >>> [socket0, socket1, socket2] + + def inetAddressResolver = new InetAddressResolver() { + @Override + List lookupByName(String host) { + [InetAddress.getByName('1.2.3.4'), + InetAddress.getByName('2.3.4.5'), + InetAddress.getByName('1.2.3.5')] + } + } + + def socketStream = new SocketStream(serverAddress, inetAddressResolver, socketSettings, sslSettings, socketFactory, bufferProvider) + + when: + socketStream.open(OPERATION_CONTEXT) + + then: + thrown(MongoSocketOpenException) + !socket0.isConnected() + !socket1.isConnected() + !socket2.isConnected() + + cleanup: + socketStream?.close() + } +} diff --git a/driver-core/src/test/functional/com/mongodb/internal/connection/TestCommandListener.java b/driver-core/src/test/functional/com/mongodb/internal/connection/TestCommandListener.java new file mode 100644 index 00000000000..9381ad842a1 --- /dev/null +++ b/driver-core/src/test/functional/com/mongodb/internal/connection/TestCommandListener.java @@ -0,0 +1,444 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection; + +import com.mongodb.MongoTimeoutException; +import com.mongodb.client.TestListener; +import com.mongodb.event.CommandEvent; +import com.mongodb.event.CommandFailedEvent; +import com.mongodb.event.CommandListener; +import com.mongodb.event.CommandStartedEvent; +import com.mongodb.event.CommandSucceededEvent; +import com.mongodb.lang.Nullable; +import org.bson.BsonDocument; +import org.bson.BsonDocumentWriter; +import org.bson.BsonDouble; +import org.bson.BsonInt32; +import org.bson.codecs.BsonDocumentCodec; +import org.bson.codecs.BsonValueCodecProvider; +import org.bson.codecs.Codec; +import org.bson.codecs.EncoderContext; +import org.bson.codecs.configuration.CodecProvider; +import org.bson.codecs.configuration.CodecRegistries; +import org.bson.codecs.configuration.CodecRegistry; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; +import java.util.concurrent.locks.Condition; +import java.util.concurrent.locks.Lock; +import java.util.concurrent.locks.ReentrantLock; +import java.util.function.Predicate; +import java.util.stream.Collectors; + +import static com.mongodb.ClusterFixture.TIMEOUT; +import static com.mongodb.internal.connection.InternalStreamConnection.getSecuritySensitiveCommands; +import static com.mongodb.internal.connection.InternalStreamConnection.getSecuritySensitiveHelloCommands; +import static com.mongodb.internal.thread.InterruptionUtil.interruptAndCreateMongoInterruptedException; +import static java.util.Collections.emptyList; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; + +public class TestCommandListener implements CommandListener { + private final List eventTypes; + private final List ignoredCommandMonitoringEvents; + private final List events = new ArrayList<>(); + @Nullable + private final TestListener listener; + private final Lock lock = new ReentrantLock(); + private final Condition commandCompletedCondition = lock.newCondition(); + private final Condition commandAnyEventCondition = lock.newCondition(); + private final boolean observeSensitiveCommands; + private boolean ignoreNextSucceededOrFailedEvent; + private static final CodecRegistry CODEC_REGISTRY_HACK; + + static { + CODEC_REGISTRY_HACK = CodecRegistries.fromProviders(new BsonValueCodecProvider(), + new CodecProvider() { + @Override + @SuppressWarnings("unchecked") + public Codec get(final Class clazz, final CodecRegistry registry) { + // Use BsonDocumentCodec even for a private sub-class of BsonDocument + if (BsonDocument.class.isAssignableFrom(clazz)) { + return (Codec) new BsonDocumentCodec(registry); + } + return null; + } + }); + } + + /** + * When a test listener is set, this command listener will send string events to the + * test listener in the form {@code " "}, where the event + * type will be lowercase and will omit the terms "command" and "event". + * For example: {@code "saslContinue succeeded"}. + * + * @see InternalStreamConnection#setRecordEverything(boolean) + * @param listener the test listener + */ + public TestCommandListener(final TestListener listener) { + this(Arrays.asList("commandStartedEvent", "commandSucceededEvent", "commandFailedEvent"), emptyList(), true, listener); + } + + public TestCommandListener() { + this(Arrays.asList("commandStartedEvent", "commandSucceededEvent", "commandFailedEvent"), emptyList()); + } + + public TestCommandListener(final List eventTypes, final List ignoredCommandMonitoringEvents) { + this(eventTypes, ignoredCommandMonitoringEvents, true, null); + } + + public TestCommandListener(final List eventTypes, final List ignoredCommandMonitoringEvents, + final boolean observeSensitiveCommands, @Nullable final TestListener listener) { + this.eventTypes = eventTypes; + this.ignoredCommandMonitoringEvents = ignoredCommandMonitoringEvents; + this.observeSensitiveCommands = observeSensitiveCommands; + this.listener = listener; + } + + + + public void reset() { + lock.lock(); + try { + events.clear(); + if (listener != null) { + listener.clear(); + } + } finally { + lock.unlock(); + } + } + + public List getEvents() { + lock.lock(); + try { + return new ArrayList<>(events); + } finally { + lock.unlock(); + } + } + + private void addEvent(final CommandEvent c) { + events.add(c); + String className = c.getClass().getSimpleName() + .replace("Command", "") + .replace("Event", "") + .toLowerCase(); + // example: "saslContinue succeeded" + if (listener != null) { + listener.add(c.getCommandName() + " " + className); + } + } + + public CommandStartedEvent getCommandStartedEvent(final String commandName) { + for (CommandEvent event : getCommandStartedEvents()) { + if (event instanceof CommandStartedEvent) { + CommandStartedEvent startedEvent = (CommandStartedEvent) event; + if (startedEvent.getCommandName().equals(commandName)) { + return startedEvent; + } + } + } + throw new IllegalArgumentException(commandName + " not found in command started event list"); + } + + public CommandSucceededEvent getCommandSucceededEvent(final String commandName) { + for (CommandEvent event : getEvents()) { + if (event instanceof CommandSucceededEvent) { + CommandSucceededEvent succeededEvent = (CommandSucceededEvent) event; + if (succeededEvent.getCommandName().equals(commandName)) { + return succeededEvent; + } + } + } + throw new IllegalArgumentException(commandName + " not found in command succeeded event list"); + } + + public CommandFailedEvent getCommandFailedEvent(final String commandName) { + return getEvents() + .stream() + .filter(e -> e instanceof CommandFailedEvent) + .filter(e -> e.getCommandName().equals(commandName)) + .map(e -> (CommandFailedEvent) e) + .findFirst() + .orElseThrow(() -> new IllegalArgumentException(commandName + " not found in command failed event list")); + } + + public List getCommandFailedEvents() { + return getEvents(CommandFailedEvent.class, Integer.MAX_VALUE); + } + + public List getCommandFailedEvents(final String commandName) { + return getEvents(CommandFailedEvent.class, + commandEvent -> commandEvent.getCommandName().equals(commandName), + Integer.MAX_VALUE); + } + + public List getCommandStartedEvents() { + return getEvents(CommandStartedEvent.class, Integer.MAX_VALUE); + } + + public List getCommandStartedEvents(final String commandName) { + return getEvents(CommandStartedEvent.class, + commandEvent -> commandEvent.getCommandName().equals(commandName), + Integer.MAX_VALUE); + } + + public List getCommandSucceededEvents() { + return getEvents(CommandSucceededEvent.class, Integer.MAX_VALUE); + } + + private List getEvents(final Class type, final int maxEvents) { + return getEvents(type, e -> true, maxEvents); + } + + private List getEvents(final Class type, + final Predicate filter, + final int maxEvents) { + lock.lock(); + try { + return getEvents().stream() + .filter(e -> e.getClass() == type) + .filter(filter) + .map(type::cast) + .limit(maxEvents).collect(Collectors.toList()); + } finally { + lock.unlock(); + } + } + + private long getEventCount(final Class eventClass, final Predicate matcher) { + return getEvents().stream() + .filter(eventClass::isInstance) + .map(eventClass::cast) + .filter(matcher) + .count(); + } + + public void waitForFirstCommandCompletion() { + lock.lock(); + try { + while (!hasCompletedEvents(1)) { + try { + if (!commandCompletedCondition.await(TIMEOUT, TimeUnit.SECONDS)) { + throw new MongoTimeoutException("Timeout waiting for event"); + } + } catch (InterruptedException e) { + throw interruptAndCreateMongoInterruptedException("Interrupted waiting for event", e); + } + } + } finally { + lock.unlock(); + } + } + + private boolean hasCompletedEvents(final int numEventsCompleted) { + int count = 0; + for (CommandEvent event : events) { + if (event instanceof CommandSucceededEvent || event instanceof CommandFailedEvent) { + count++; + } + } + return count >= numEventsCompleted; + } + + + @Override + public void commandStarted(final CommandStartedEvent event) { + if (!eventTypes.contains("commandStartedEvent") || ignoredCommandMonitoringEvents.contains(event.getCommandName())) { + return; + } + else if (!observeSensitiveCommands) { + if (getSecuritySensitiveCommands().contains(event.getCommandName())) { + return; + } else if (getSecuritySensitiveHelloCommands().contains(event.getCommandName()) && event.getCommand().isEmpty()) { + ignoreNextSucceededOrFailedEvent = true; + return; + } + } + lock.lock(); + try { + addEvent(new CommandStartedEvent(event.getRequestContext(), event.getOperationId(), event.getRequestId(), + event.getConnectionDescription(), event.getDatabaseName(), event.getCommandName(), + event.getCommand() == null ? null : getWritableClone(event.getCommand()))); + commandAnyEventCondition.signal(); + } finally { + lock.unlock(); + } + } + + @Override + public void commandSucceeded(final CommandSucceededEvent event) { + if (!eventTypes.contains("commandSucceededEvent") || ignoredCommandMonitoringEvents.contains(event.getCommandName())) { + return; + } + else if (!observeSensitiveCommands) { + if (getSecuritySensitiveCommands().contains(event.getCommandName())) { + return; + } else if (getSecuritySensitiveHelloCommands().contains(event.getCommandName()) && ignoreNextSucceededOrFailedEvent) { + ignoreNextSucceededOrFailedEvent = false; + return; + } + } + lock.lock(); + try { + addEvent(new CommandSucceededEvent(event.getRequestContext(), event.getOperationId(), event.getRequestId(), + event.getConnectionDescription(), event.getDatabaseName(), event.getCommandName(), + event.getResponse() == null ? null : event.getResponse().clone(), + event.getElapsedTime(TimeUnit.NANOSECONDS))); + commandCompletedCondition.signal(); + commandAnyEventCondition.signal(); + } finally { + lock.unlock(); + } + } + + @Override + public void commandFailed(final CommandFailedEvent event) { + if (!eventTypes.contains("commandFailedEvent") || ignoredCommandMonitoringEvents.contains(event.getCommandName())) { + return; + } + else if (!observeSensitiveCommands) { + if (getSecuritySensitiveCommands().contains(event.getCommandName())) { + return; + } else if (getSecuritySensitiveHelloCommands().contains(event.getCommandName()) && ignoreNextSucceededOrFailedEvent) { + ignoreNextSucceededOrFailedEvent = false; + return; + } + } + lock.lock(); + try { + addEvent(event); + commandCompletedCondition.signal(); + commandAnyEventCondition.signal(); + } finally { + lock.unlock(); + } + } + + public void eventsWereDelivered(final List expectedEvents) { + lock.lock(); + try { + assertEquals(expectedEvents.size(), events.size()); + + int currentlyExpectedRequestId = 0; + for (int i = 0; i < events.size(); i++) { + CommandEvent actual = events.get(i); + CommandEvent expected = expectedEvents.get(i); + + if (actual instanceof CommandStartedEvent) { + currentlyExpectedRequestId = actual.getRequestId(); + } else { + assertEquals(currentlyExpectedRequestId, actual.getRequestId()); + } + + assertEventEquivalence(actual, expected); + } + } finally { + lock.unlock(); + } + } + + public void eventWasDelivered(final CommandEvent expectedEvent, final int index) { + lock.lock(); + try { + assertTrue(events.size() > index); + assertEventEquivalence(events.get(index), expectedEvent); + } finally { + lock.unlock(); + } + } + + private BsonDocument getWritableClone(final BsonDocument original) { + BsonDocument clone = new BsonDocument(); + BsonDocumentWriter writer = new BsonDocumentWriter(clone); + new BsonDocumentCodec(CODEC_REGISTRY_HACK).encode(writer, original, EncoderContext.builder().build()); + return clone; + } + + private void assertEventEquivalence(final CommandEvent actual, final CommandEvent expected) { + assertEquals(expected.getClass(), actual.getClass()); + + assertEquals(expected.getConnectionDescription(), actual.getConnectionDescription()); + + assertEquals(expected.getCommandName(), actual.getCommandName()); + + if (actual.getClass().equals(CommandStartedEvent.class)) { + assertEquivalence((CommandStartedEvent) actual, (CommandStartedEvent) expected); + } else if (actual.getClass().equals(CommandSucceededEvent.class)) { + assertEquivalence((CommandSucceededEvent) actual, (CommandSucceededEvent) expected); + } else if (actual.getClass().equals(CommandFailedEvent.class)) { + assertEquivalence((CommandFailedEvent) actual, (CommandFailedEvent) expected); + } else { + throw new UnsupportedOperationException("Unsupported event type: " + actual.getClass()); + } + } + + private void assertEquivalence(final CommandFailedEvent actual, final CommandFailedEvent expected) { + assertEquals(expected.getThrowable(), actual.getThrowable()); + } + + private void assertEquivalence(final CommandSucceededEvent actual, final CommandSucceededEvent expected) { + if (actual.getResponse() == null) { + assertNull(expected.getResponse()); + } else { + // ignore extra elements in the actual response + assertTrue("Expected response contains elements not in the actual response", + massageResponse(actual.getResponse()).entrySet() + .containsAll(massageResponse(expected.getResponse()).entrySet())); + } + } + + private BsonDocument massageResponse(final BsonDocument response) { + BsonDocument massagedResponse = getWritableClone(response); + // massage numbers to the same BSON type + if (massagedResponse.containsKey("ok")) { + massagedResponse.put("ok", new BsonDouble(response.getNumber("ok").doubleValue())); + } + if (massagedResponse.containsKey("n")) { + massagedResponse.put("n", new BsonInt32(response.getNumber("n").intValue())); + } + return massagedResponse; + } + + private void assertEquivalence(final CommandStartedEvent actual, final CommandStartedEvent expected) { + assertEquals(expected.getDatabaseName(), actual.getDatabaseName()); + assertEquals(expected.getCommand(), actual.getCommand()); + } + + public void waitForEvents(final Class eventClass, final Predicate matcher, final int count) + throws TimeoutException { + lock.lock(); + try { + while (getEventCount(eventClass, matcher) < count) { + try { + if (!commandAnyEventCondition.await(TIMEOUT, TimeUnit.SECONDS)) { + throw new MongoTimeoutException("Timeout waiting for command event"); + } + } catch (InterruptedException e) { + throw interruptAndCreateMongoInterruptedException("Interrupted waiting for event", e); + } + } + } finally { + lock.unlock(); + } + } +} diff --git a/driver-core/src/test/functional/com/mongodb/internal/connection/TlsChannelStreamFunctionalTest.java b/driver-core/src/test/functional/com/mongodb/internal/connection/TlsChannelStreamFunctionalTest.java new file mode 100644 index 00000000000..3af1eaa33e1 --- /dev/null +++ b/driver-core/src/test/functional/com/mongodb/internal/connection/TlsChannelStreamFunctionalTest.java @@ -0,0 +1,198 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection; + +import com.mongodb.ClusterFixture; +import com.mongodb.MongoSocketOpenException; +import com.mongodb.ServerAddress; +import com.mongodb.connection.SocketSettings; +import com.mongodb.connection.SslSettings; +import com.mongodb.internal.TimeoutContext; +import com.mongodb.internal.TimeoutSettings; +import org.bson.ByteBuf; +import org.bson.ByteBufNIO; +import org.junit.jupiter.api.DisplayName; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.ValueSource; +import org.mockito.MockedStatic; +import org.mockito.Mockito; +import org.mockito.invocation.InvocationOnMock; +import org.mockito.stubbing.Answer; + +import javax.net.ssl.SSLContext; +import javax.net.ssl.SSLEngine; +import java.io.IOException; +import java.net.ServerSocket; +import java.nio.ByteBuffer; +import java.nio.channels.InterruptedByTimeoutException; +import java.nio.channels.SocketChannel; +import java.util.Collections; +import java.util.concurrent.TimeUnit; + +import static com.mongodb.ClusterFixture.getPrimaryServerDescription; +import static com.mongodb.internal.connection.OperationContext.simpleOperationContext; +import static java.lang.String.format; +import static java.util.concurrent.TimeUnit.MILLISECONDS; +import static java.util.concurrent.TimeUnit.SECONDS; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertInstanceOf; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; +import static org.junit.jupiter.api.Assumptions.assumeTrue; +import static org.mockito.ArgumentMatchers.anyInt; +import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.Mockito.atLeast; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +class TlsChannelStreamFunctionalTest { + private static final SslSettings SSL_SETTINGS = SslSettings.builder().enabled(true).build(); + private static final String UNREACHABLE_PRIVATE_IP_ADDRESS = "10.255.255.1"; + private static final int UNREACHABLE_PORT = 65333; + + @ParameterizedTest + @ValueSource(ints = {500, 1000, 2000}) + void shouldInterruptConnectionEstablishmentWhenConnectionTimeoutExpires(final int connectTimeoutMs) throws IOException { + //given + try (StreamFactoryFactory streamFactoryFactory = new TlsChannelStreamFactoryFactory(new DefaultInetAddressResolver()); + MockedStatic socketChannelMockedStatic = Mockito.mockStatic(SocketChannel.class)) { + SingleResultSpyCaptor singleResultSpyCaptor = new SingleResultSpyCaptor<>(); + socketChannelMockedStatic.when(SocketChannel::open).thenAnswer(singleResultSpyCaptor); + + StreamFactory streamFactory = streamFactoryFactory.create(SocketSettings.builder() + .connectTimeout(connectTimeoutMs, TimeUnit.MILLISECONDS) + .build(), SSL_SETTINGS); + + Stream stream = streamFactory.create(new ServerAddress(UNREACHABLE_PRIVATE_IP_ADDRESS, UNREACHABLE_PORT)); + long connectOpenStart = System.nanoTime(); + + //when + OperationContext operationContext = createOperationContext(connectTimeoutMs); + MongoSocketOpenException mongoSocketOpenException = assertThrows(MongoSocketOpenException.class, () -> + stream.open(operationContext)); + + //then + long elapsedMs = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - connectOpenStart); + // Allow for some timing imprecision due to test overhead. + int maximumAcceptableTimeoutOvershoot = 300; + + assertInstanceOf(InterruptedByTimeoutException.class, mongoSocketOpenException.getCause()); + assertFalse(connectTimeoutMs > elapsedMs, + format("Connection timed-out sooner than expected. ConnectTimeoutMS: %d, elapsedMs: %d", connectTimeoutMs, elapsedMs)); + assertTrue(elapsedMs - connectTimeoutMs <= maximumAcceptableTimeoutOvershoot, + format("Connection timeout overshoot time %d ms should be within %d ms", elapsedMs - connectTimeoutMs, + maximumAcceptableTimeoutOvershoot)); + + SocketChannel actualSpySocketChannel = singleResultSpyCaptor.getResult(); + assertNotNull(actualSpySocketChannel, "SocketChannel was not opened"); + verify(actualSpySocketChannel, atLeast(1)).close(); + } + } + + @ParameterizedTest + @ValueSource(ints = {0, 500, 1000, 2000}) + void shouldEstablishConnection(final int connectTimeoutMs) throws IOException, InterruptedException { + //given + try (StreamFactoryFactory streamFactoryFactory = new TlsChannelStreamFactoryFactory(new DefaultInetAddressResolver()); + MockedStatic socketChannelMockedStatic = Mockito.mockStatic(SocketChannel.class); + ServerSocket serverSocket = new ServerSocket(0, 1)) { + + SingleResultSpyCaptor singleResultSpyCaptor = new SingleResultSpyCaptor<>(); + socketChannelMockedStatic.when(SocketChannel::open).thenAnswer(singleResultSpyCaptor); + + StreamFactory streamFactory = streamFactoryFactory.create(SocketSettings.builder() + .connectTimeout(connectTimeoutMs, TimeUnit.MILLISECONDS) + .build(), SSL_SETTINGS); + + Stream stream = streamFactory.create(new ServerAddress(serverSocket.getInetAddress(), serverSocket.getLocalPort())); + try { + //when + stream.open(createOperationContext(connectTimeoutMs)); + + //then + SocketChannel actualSpySocketChannel = singleResultSpyCaptor.getResult(); + assertNotNull(actualSpySocketChannel, "SocketChannel was not opened"); + assertTrue(actualSpySocketChannel.isConnected()); + + // Wait to verify that socket was not closed by timeout. + MILLISECONDS.sleep(connectTimeoutMs * 2L); + assertTrue(actualSpySocketChannel.isConnected()); + assertFalse(stream.isClosed()); + } finally { + stream.close(); + } + } + } + + private static final class SingleResultSpyCaptor implements Answer { + private volatile T result = null; + + public T getResult() { + return result; + } + + @Override + public T answer(final InvocationOnMock invocationOnMock) throws Throwable { + if (result != null) { + fail(invocationOnMock.getMethod().getName() + " was called more then once"); + } + @SuppressWarnings("unchecked") + T returnedValue = (T) invocationOnMock.callRealMethod(); + result = Mockito.spy(returnedValue); + return result; + } + } + + private static OperationContext createOperationContext(final int connectTimeoutMs) { + return simpleOperationContext(new TimeoutContext(TimeoutSettings.DEFAULT.withConnectTimeoutMS(connectTimeoutMs))); + } + + @Test + @DisplayName("should not call beginHandshake more than once during TLS session establishment") + void shouldNotCallBeginHandshakeMoreThenOnceDuringTlsSessionEstablishment() throws Exception { + assumeTrue(ClusterFixture.getSslSettings().isEnabled()); + + //given + try (StreamFactoryFactory streamFactoryFactory = new TlsChannelStreamFactoryFactory(new DefaultInetAddressResolver())) { + + SSLContext sslContext = Mockito.spy(SSLContext.getDefault()); + SingleResultSpyCaptor singleResultSpyCaptor = new SingleResultSpyCaptor<>(); + when(sslContext.createSSLEngine(anyString(), anyInt())).thenAnswer(singleResultSpyCaptor); + + StreamFactory streamFactory = streamFactoryFactory.create( + SocketSettings.builder().build(), + SslSettings.builder(ClusterFixture.getSslSettings()) + .context(sslContext) + .build()); + + Stream stream = streamFactory.create(getPrimaryServerDescription().getAddress()); + stream.open(ClusterFixture.OPERATION_CONTEXT); + ByteBuf wrap = new ByteBufNIO(ByteBuffer.wrap(new byte[]{1, 3, 4})); + + //when + stream.write(Collections.singletonList(wrap), ClusterFixture.OPERATION_CONTEXT); + + //then + SECONDS.sleep(5); + verify(singleResultSpyCaptor.getResult(), times(1)).beginHandshake(); + } + } +} diff --git a/driver-core/src/test/functional/com/mongodb/internal/connection/TopologyVersionHelperTest.java b/driver-core/src/test/functional/com/mongodb/internal/connection/TopologyVersionHelperTest.java new file mode 100644 index 00000000000..67ce21eb40c --- /dev/null +++ b/driver-core/src/test/functional/com/mongodb/internal/connection/TopologyVersionHelperTest.java @@ -0,0 +1,77 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.internal.connection; + +import com.mongodb.connection.TopologyVersion; +import org.bson.types.ObjectId; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; + +import java.util.concurrent.ThreadLocalRandom; + +import static org.junit.jupiter.api.Assertions.assertAll; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNotEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; + +final class TopologyVersionHelperTest { + private ObjectId processIdA; + private ObjectId processIdB; + + @BeforeEach + void setUp() { + int objectIdCounterExclusiveUpperBound = 0xff_ff_ff; + ThreadLocalRandom rnd = ThreadLocalRandom.current(); + processIdA = new ObjectId(rnd.nextInt(), rnd.nextInt(objectIdCounterExclusiveUpperBound)); + processIdB = new ObjectId(rnd.nextInt(), rnd.nextInt(objectIdCounterExclusiveUpperBound)); + assertNotEquals(processIdA, processIdB); + } + + @Test + void newer() { + TopologyVersion a1 = new TopologyVersion(processIdA, 1); + TopologyVersion a2 = new TopologyVersion(processIdA, 2); + TopologyVersion b1 = new TopologyVersion(processIdB, 1); + assertAll( + () -> assertFalse(TopologyVersionHelper.newer(null, null)), + () -> assertFalse(TopologyVersionHelper.newer(null, a1)), + () -> assertFalse(TopologyVersionHelper.newer(a1, null)), + () -> assertFalse(TopologyVersionHelper.newer(a1, b1)), + () -> assertFalse(TopologyVersionHelper.newer(b1, a1)), + () -> assertFalse(TopologyVersionHelper.newer(a1, a2)), + () -> assertFalse(TopologyVersionHelper.newer(a1, a1)), + () -> assertTrue(TopologyVersionHelper.newer(a2, a1))); + } + + @Test + void newerOrEqual() { + TopologyVersion a1 = new TopologyVersion(processIdA, 1); + TopologyVersion a2 = new TopologyVersion(processIdA, 2); + TopologyVersion b1 = new TopologyVersion(processIdB, 1); + assertAll( + () -> assertFalse(TopologyVersionHelper.newerOrEqual(null, null)), + () -> assertFalse(TopologyVersionHelper.newerOrEqual(null, a1)), + () -> assertFalse(TopologyVersionHelper.newerOrEqual(a1, null)), + () -> assertFalse(TopologyVersionHelper.newerOrEqual(a1, b1)), + () -> assertFalse(TopologyVersionHelper.newerOrEqual(b1, a1)), + () -> assertFalse(TopologyVersionHelper.newerOrEqual(a1, a2)), + () -> assertTrue(TopologyVersionHelper.newerOrEqual(a1, a1)), + () -> assertTrue(TopologyVersionHelper.newerOrEqual(a2, a1))); + } + + private TopologyVersionHelperTest() { + } +} diff --git a/driver-core/src/test/functional/com/mongodb/internal/operation/AbortTransactionOperationSpecification.groovy b/driver-core/src/test/functional/com/mongodb/internal/operation/AbortTransactionOperationSpecification.groovy new file mode 100644 index 00000000000..fe7cd511c0c --- /dev/null +++ b/driver-core/src/test/functional/com/mongodb/internal/operation/AbortTransactionOperationSpecification.groovy @@ -0,0 +1,83 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.operation + +import com.mongodb.OperationFunctionalSpecification +import org.bson.BsonDocument + +import java.util.concurrent.TimeUnit + +import static com.mongodb.WriteConcern.ACKNOWLEDGED +import static com.mongodb.WriteConcern.MAJORITY + + +class AbortTransactionOperationSpecification extends OperationFunctionalSpecification { + + def 'should produce the expected command'() { + given: + def cannedResult = BsonDocument.parse('{value: {}}') + def expectedCommand = BsonDocument.parse('{abortTransaction: 1}') + + when: + def operation = new AbortTransactionOperation(ACKNOWLEDGED) + + then: + testOperationInTransaction(operation, [4, 0, 0], expectedCommand, async, cannedResult) + + when: + operation = new AbortTransactionOperation(MAJORITY) + expectedCommand.put('writeConcern', MAJORITY.asDocument()) + + then: + testOperationInTransaction(operation, [4, 0, 0], expectedCommand, async, cannedResult, true) + + where: + async << [true, false] + } + + def 'should retry if the connection initially fails'() { + given: + def cannedResult = BsonDocument.parse('{value: {}}') + def expectedCommand = BsonDocument.parse('{abortTransaction: 1, writeConcern: {w: "majority", wtimeout: 10}}') + + when: + def writeConcern = MAJORITY.withWTimeout(10, TimeUnit.MILLISECONDS) + def operation = new AbortTransactionOperation(writeConcern) + + then: + testOperationRetries(operation, [4, 0, 0], expectedCommand, async, cannedResult, true) + + when: + writeConcern = MAJORITY + operation = new AbortTransactionOperation(writeConcern) + expectedCommand.put('writeConcern', writeConcern.asDocument()) + + then: + testOperationRetries(operation, [4, 0, 0], expectedCommand, async, cannedResult, true) + + when: + writeConcern = ACKNOWLEDGED + operation = new AbortTransactionOperation(writeConcern) + expectedCommand.remove('writeConcern') + + then: + testOperationRetries(operation, [4, 0, 0], expectedCommand, async, cannedResult, true) + + where: + async << [true, false] + } +} diff --git a/driver-core/src/test/functional/com/mongodb/internal/operation/AggregateOperationSpecification.groovy b/driver-core/src/test/functional/com/mongodb/internal/operation/AggregateOperationSpecification.groovy new file mode 100644 index 00000000000..0ce503f466e --- /dev/null +++ b/driver-core/src/test/functional/com/mongodb/internal/operation/AggregateOperationSpecification.groovy @@ -0,0 +1,483 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.operation + + +import com.mongodb.MongoNamespace +import com.mongodb.OperationFunctionalSpecification +import com.mongodb.ReadConcern +import com.mongodb.ReadPreference +import com.mongodb.ServerAddress +import com.mongodb.WriteConcern +import com.mongodb.client.model.Collation +import com.mongodb.client.model.CreateCollectionOptions +import com.mongodb.client.model.Filters +import com.mongodb.connection.ClusterId +import com.mongodb.connection.ConnectionDescription +import com.mongodb.connection.ConnectionId +import com.mongodb.connection.ServerId +import com.mongodb.connection.ServerVersion +import com.mongodb.internal.binding.AsyncConnectionSource +import com.mongodb.internal.binding.AsyncReadBinding +import com.mongodb.internal.binding.ConnectionSource +import com.mongodb.internal.binding.ReadBinding +import com.mongodb.internal.connection.AsyncConnection +import com.mongodb.internal.connection.Connection +import com.mongodb.internal.session.SessionContext +import org.bson.BsonArray +import org.bson.BsonBoolean +import org.bson.BsonDocument +import org.bson.BsonInt32 +import org.bson.BsonInt64 +import org.bson.BsonString +import org.bson.BsonTimestamp +import org.bson.Document +import org.bson.codecs.BsonDocumentCodec +import org.bson.codecs.DocumentCodec +import spock.lang.IgnoreIf + +import static com.mongodb.ClusterFixture.OPERATION_CONTEXT +import static com.mongodb.ClusterFixture.collectCursorResults +import static com.mongodb.ClusterFixture.executeAsync +import static com.mongodb.ClusterFixture.getAsyncCluster +import static com.mongodb.ClusterFixture.getBinding +import static com.mongodb.ClusterFixture.getCluster +import static com.mongodb.ClusterFixture.isSharded +import static com.mongodb.ClusterFixture.isStandalone +import static com.mongodb.ExplainVerbosity.QUERY_PLANNER +import static com.mongodb.connection.ServerType.STANDALONE +import static com.mongodb.internal.connection.ServerHelper.waitForLastRelease +import static com.mongodb.internal.operation.OperationReadConcernHelper.appendReadConcernToCommand +import static com.mongodb.internal.operation.ServerVersionHelper.UNKNOWN_WIRE_VERSION +import static com.mongodb.internal.operation.TestOperationHelper.getKeyPattern + +class AggregateOperationSpecification extends OperationFunctionalSpecification { + + def setup() { + Document pete = new Document('name', 'Pete').append('job', 'handyman') + Document sam = new Document('name', 'Sam').append('job', 'plumber') + Document pete2 = new Document('name', 'Pete').append('job', 'electrician') + getCollectionHelper().insertDocuments(new DocumentCodec(), pete, sam, pete2) + } + + def 'should have the correct defaults'() { + when: + AggregateOperation operation = new AggregateOperation(getNamespace(), [], new DocumentCodec()) + + then: + operation.getAllowDiskUse() == null + operation.getBatchSize() == null + operation.getCollation() == null + operation.getPipeline() == [] + } + + def 'should set optional values correctly'(){ + given: + def hint = BsonDocument.parse('{a: 1}') + + when: + AggregateOperation operation = new AggregateOperation(getNamespace(), [], new DocumentCodec()) + .allowDiskUse(true) + .batchSize(10) + .collation(defaultCollation) + .hint(hint) + + then: + operation.getAllowDiskUse() + operation.getBatchSize() == 10 + operation.getCollation() == defaultCollation + operation.getHint() == hint + } + + def 'should throw when using invalid hint'() { + given: + def hint = new BsonString('ok') + def operation = new AggregateOperation(getNamespace(), [], new DocumentCodec()).hint(hint) + + when: + operation.getHint() + + then: + thrown(IllegalArgumentException) + + when: + def result = operation.getHintBsonValue() + + then: + result == hint + + when: + operation.hint(new BsonInt32(1)) + + then: + thrown(IllegalArgumentException) + } + + def 'should create the expected command'() { + when: + def pipeline = [new BsonDocument('$match', new BsonDocument('a', new BsonString('A')))] + def operation = new AggregateOperation(helper.namespace, pipeline, new DocumentCodec()) + + def expectedCommand = new BsonDocument('aggregate', new BsonString(helper.namespace.getCollectionName())) + .append('pipeline', new BsonArray(pipeline)) + .append('cursor', new BsonDocument()) + + then: + testOperation(operation, [3, 4, 0], expectedCommand, async, helper.cursorResult) + + when: + operation = new AggregateOperation(helper.namespace, pipeline, new DocumentCodec()) + .allowDiskUse(true) + .batchSize(10) + .collation(defaultCollation) + + expectedCommand = new BsonDocument('aggregate', new BsonString(helper.namespace.getCollectionName())) + .append('pipeline', new BsonArray(pipeline)) + .append('allowDiskUse', new BsonBoolean(true)) + .append('collation', defaultCollation.asDocument()) + .append('cursor', new BsonDocument('batchSize', new BsonInt32(10))) + + then: + testOperation(operation, [3, 4, 0], expectedCommand, async, helper.cursorResult) + + where: + async << [true, false] + } + + def 'should support collation'() { + given: + def document = BsonDocument.parse('{_id: 1, str: "foo"}') + getCollectionHelper().insertDocuments(document) + def pipeline = [BsonDocument.parse('{$match: {str: "FOO"}}')] + def operation = new AggregateOperation(namespace, pipeline, new BsonDocumentCodec()) + .collation(caseInsensitiveCollation) + + when: + def result = executeAndCollectBatchCursorResults(operation, async) + + then: + result == [document] + + where: + async << [true, false] + } + + @IgnoreIf({ isStandalone() }) + def 'should support changeStreams'() { + given: + def expected = [createExpectedChangeNotification(namespace, 0), createExpectedChangeNotification(namespace, 1)] + def pipeline = ['{$changeStream: {}}', '{$project: {"_id.clusterTime": 0, "_id.uuid": 0}}'].collect { BsonDocument.parse(it) } + def operation = new AggregateOperation(namespace, pipeline, new BsonDocumentCodec()) + def helper = getCollectionHelper() + + when: + helper.create(helper.getNamespace().getCollectionName(), new CreateCollectionOptions()) + def cursor = execute(operation, async) + helper.insertDocuments(['{_id: 0, a: 0}', '{_id: 1, a: 1}'].collect { BsonDocument.parse(it) }) + + then: + def nextDoc = next(cursor, async).collect { doc -> + doc.remove('_id') + doc.remove('clusterTime') + doc.remove('wallTime') + doc.remove('collectionUUID') + doc + } + nextDoc == expected + + cleanup: + cursor?.close() + waitForLastRelease(async ? getAsyncCluster() : getCluster()) + + where: + async << [true, false] + } + + def 'should be able to aggregate'() { + when: + AggregateOperation operation = new AggregateOperation(getNamespace(), [], new DocumentCodec()) + def batchCursor = execute(operation, async) + def results = collectCursorResults(batchCursor)*.getString('name') + + then: + results.size() == 3 + results.containsAll(['Pete', 'Sam']) + + where: + async << [true, false] + } + + def 'should be able to aggregate on a view'() { + given: + def viewSuffix = '-view' + def viewName = getCollectionName() + viewSuffix + def viewNamespace = new MongoNamespace(getDatabaseName(), viewName) + new CreateViewOperation(getDatabaseName(), viewName, getCollectionName(), [], WriteConcern.ACKNOWLEDGED) + .execute(getBinding(getCluster())) + + when: + AggregateOperation operation = new AggregateOperation(viewNamespace, [], new DocumentCodec()) + def batchCursor = execute(operation, async) + def results = collectCursorResults(batchCursor)*.getString('name') + + then: + results.size() == 3 + results.containsAll(['Pete', 'Sam']) + + cleanup: + new DropCollectionOperation(viewNamespace, WriteConcern.ACKNOWLEDGED) + .execute(getBinding(getCluster())) + + where: + async << [true, false] + } + + def 'should be able to aggregate with pipeline'() { + when: + AggregateOperation operation = new AggregateOperation(getNamespace(), + [new BsonDocument('$match', new BsonDocument('job', new BsonString('plumber')))], new DocumentCodec()) + def batchCursor = execute(operation, async) + def results = collectCursorResults(batchCursor)*.getString('name') + + then: + results.size() == 1 + results == ['Sam'] + + where: + async << [true, false] + } + + def 'should allow disk usage'() { + when: + AggregateOperation operation = new AggregateOperation(getNamespace(), [], new DocumentCodec()) + .allowDiskUse(allowDiskUse) + def cursor = operation.execute(getBinding()) + + then: + cursor.next()*.getString('name') == ['Pete', 'Sam', 'Pete'] + + where: + allowDiskUse << [null, true, false] + } + + def 'should allow batch size'() { + when: + AggregateOperation operation = new AggregateOperation(getNamespace(), [], new DocumentCodec()) + .batchSize(batchSize) + def cursor = operation.execute(getBinding()) + + then: + cursor.next()*.getString('name') == ['Pete', 'Sam', 'Pete'] + + where: + batchSize << [null, 0, 10] + } + + def 'should be able to explain an empty pipeline'() { + given: + def operation = new AggregateOperation(getNamespace(), [], new BsonDocumentCodec()) + operation = async ? operation.asExplainableOperation(QUERY_PLANNER, new BsonDocumentCodec()) : + operation.asExplainableOperation(QUERY_PLANNER, new BsonDocumentCodec()) + + when: + def result = execute(operation, async) + + then: + result.containsKey('stages') || result.containsKey('queryPlanner') || result.containsKey('shards') + + where: + async << [true, false] + } + + def 'should be able to aggregate with collation'() { + when: + AggregateOperation operation = new AggregateOperation(getNamespace(), + [BsonDocument.parse('{$match: {job : "plumber"}}')], new DocumentCodec() + ).collation(options) + def batchCursor = execute(operation, async) + def results = collectCursorResults(batchCursor)*.getString('name') + + then: + results.size() == 1 + results == ['Sam'] + + where: + [async, options] << [[true, false], [defaultCollation, null, Collation.builder().build()]].combinations() + } + + @IgnoreIf({ isSharded() }) + def 'should apply $hint'() { + given: + def index = new BsonDocument('a', new BsonInt32(1)) + collectionHelper.createIndex(index) + + def operation = new AggregateOperation(getNamespace(), [], new DocumentCodec()) + .hint(hint) + + when: + execute(operation, async) + BsonDocument explainPlan = execute(operation.asExplainableOperation(QUERY_PLANNER, new BsonDocumentCodec()), async) + + then: + getKeyPattern(explainPlan) == index + + where: + [async, hint] << [[true, false], [BsonDocument.parse('{a: 1}'), new BsonString('a_1')]].combinations() + } + + @IgnoreIf({ isSharded() }) + def 'should apply comment'() { + given: + def profileCollectionHelper = getCollectionHelper(new MongoNamespace(getDatabaseName(), 'system.profile')) + new CommandReadOperation<>(getDatabaseName(), new BsonDocument('profile', new BsonInt32(2)), + new BsonDocumentCodec()).execute(getBinding()) + def expectedComment = 'this is a comment' + def operation = new AggregateOperation(getNamespace(), [], new DocumentCodec()) + .comment(new BsonString(expectedComment)) + + when: + execute(operation, async) + + then: + Document profileDocument = profileCollectionHelper.find(Filters.exists('command.aggregate')).get(0) + ((Document) profileDocument.get('command')).get('comment') == expectedComment + + cleanup: + new CommandReadOperation<>(getDatabaseName(), new BsonDocument('profile', new BsonInt32(0)), + new BsonDocumentCodec()).execute(getBinding()) + profileCollectionHelper.drop() + + where: + async << [true, false] + } + + def 'should add read concern to command'() { + given: + def operationContext = OPERATION_CONTEXT.withSessionContext(sessionContext) + def binding = Stub(ReadBinding) + def source = Stub(ConnectionSource) + def connection = Mock(Connection) + binding.readPreference >> ReadPreference.primary() + binding.operationContext >> operationContext + binding.readConnectionSource >> source + source.connection >> connection + source.retain() >> source + source.operationContext >> operationContext + def commandDocument = new BsonDocument('aggregate', new BsonString(getCollectionName())) + .append('pipeline', new BsonArray()) + .append('cursor', new BsonDocument()) + appendReadConcernToCommand(operationContext.getSessionContext(), UNKNOWN_WIRE_VERSION, commandDocument) + + def operation = new AggregateOperation(getNamespace(), [], new DocumentCodec()) + + when: + operation.execute(binding) + + then: + _ * connection.description >> new ConnectionDescription(new ConnectionId(new ServerId(new ClusterId(), new ServerAddress())), + 6, STANDALONE, 1000, 100000, 100000, []) + 1 * connection.command(_, commandDocument, _, _, _, operationContext) >> + new BsonDocument('cursor', new BsonDocument('id', new BsonInt64(1)) + .append('ns', new BsonString(getNamespace().getFullName())) + .append('firstBatch', new BsonArrayWrapper([]))) + 1 * connection.release() + + where: + sessionContext << [ + Stub(SessionContext) { + isCausallyConsistent() >> true + getOperationTime() >> new BsonTimestamp(42, 0) + hasActiveTransaction() >> false + getReadConcern() >> ReadConcern.MAJORITY + } + ] + } + + def 'should add read concern to command asynchronously'() { + given: + def operationContext = OPERATION_CONTEXT.withSessionContext(sessionContext) + def binding = Stub(AsyncReadBinding) + def source = Stub(AsyncConnectionSource) + def connection = Mock(AsyncConnection) + binding.operationContext >> operationContext + binding.getReadConnectionSource(_) >> { it[0].onResult(source, null) } + source.operationContext >> operationContext + source.getConnection(_) >> { it[0].onResult(connection, null) } + source.retain() >> source + def commandDocument = new BsonDocument('aggregate', new BsonString(getCollectionName())) + .append('pipeline', new BsonArray()) + .append('cursor', new BsonDocument()) + appendReadConcernToCommand(sessionContext, UNKNOWN_WIRE_VERSION, commandDocument) + + def operation = new AggregateOperation(getNamespace(), [], new DocumentCodec()) + + when: + executeAsync(operation, binding) + + then: + _ * connection.description >> new ConnectionDescription(new ConnectionId(new ServerId(new ClusterId(), new ServerAddress())), + 6, STANDALONE, 1000, 100000, 100000, []) + 1 * connection.commandAsync(_, commandDocument, _, _, _, operationContext, _) >> { + it.last().onResult(new BsonDocument('cursor', new BsonDocument('id', new BsonInt64(1)) + .append('ns', new BsonString(getNamespace().getFullName())) + .append('firstBatch', new BsonArrayWrapper([]))), null) + } + 1 * connection.release() + + where: + sessionContext << [ + Stub(SessionContext) { + isCausallyConsistent() >> true + getOperationTime() >> new BsonTimestamp(42, 0) + hasActiveTransaction() >> false + getReadConcern() >> ReadConcern.MAJORITY + } + ] + } + + def 'should use the ReadBindings readPreference to set secondaryOk'() { + when: + def operation = new AggregateOperation(helper.namespace, [], new BsonDocumentCodec()) + + then: + testOperationSecondaryOk(operation, [2, 6, 0], readPreference, async, helper.cursorResult) + + where: + [async, readPreference] << [[true, false], [ReadPreference.primary(), ReadPreference.secondary()]].combinations() + } + + def helper = [ + dbName: 'db', + namespace: new MongoNamespace('db', 'coll'), + twoSixConnectionDescription : Stub(ConnectionDescription) { + getServerVersion() >> new ServerVersion([2, 6, 0]) + }, + inlineResult: BsonDocument.parse('{ok: 1.0}').append('result', new BsonArrayWrapper([])), + cursorResult: BsonDocument.parse('{ok: 1.0}') + .append('cursor', new BsonDocument('id', new BsonInt64(0)).append('ns', new BsonString('db.coll')) + .append('firstBatch', new BsonArrayWrapper([]))) + ] + + private static BsonDocument createExpectedChangeNotification(MongoNamespace namespace, int idValue) { + BsonDocument.parse("""{ + "operationType": "insert", + "fullDocument": {"_id": $idValue, "a": $idValue}, + "ns": {"coll": "${namespace.getCollectionName()}", "db": "${namespace.getDatabaseName()}"}, + "documentKey": {"_id": $idValue} + }""") + } +} diff --git a/driver-core/src/test/functional/com/mongodb/internal/operation/AggregateToCollectionOperationSpecification.groovy b/driver-core/src/test/functional/com/mongodb/internal/operation/AggregateToCollectionOperationSpecification.groovy new file mode 100644 index 00000000000..ed617289316 --- /dev/null +++ b/driver-core/src/test/functional/com/mongodb/internal/operation/AggregateToCollectionOperationSpecification.groovy @@ -0,0 +1,313 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.operation + +import com.mongodb.MongoCommandException +import com.mongodb.MongoNamespace +import com.mongodb.MongoWriteConcernException +import com.mongodb.OperationFunctionalSpecification +import com.mongodb.ReadConcern +import com.mongodb.ReadPreference +import com.mongodb.WriteConcern +import com.mongodb.client.model.Aggregates +import com.mongodb.client.model.CreateCollectionOptions +import com.mongodb.client.model.Filters +import com.mongodb.client.model.ValidationOptions +import com.mongodb.client.test.CollectionHelper +import com.mongodb.internal.client.model.AggregationLevel +import org.bson.BsonArray +import org.bson.BsonBoolean +import org.bson.BsonDocument +import org.bson.BsonInt32 +import org.bson.BsonString +import org.bson.Document +import org.bson.codecs.BsonDocumentCodec +import org.bson.codecs.BsonValueCodecProvider +import org.bson.codecs.DocumentCodec +import spock.lang.IgnoreIf + +import static com.mongodb.ClusterFixture.getBinding +import static com.mongodb.ClusterFixture.isDiscoverableReplicaSet +import static com.mongodb.ClusterFixture.isSharded +import static com.mongodb.WriteConcern.ACKNOWLEDGED +import static com.mongodb.client.model.Filters.gte +import static org.bson.codecs.configuration.CodecRegistries.fromProviders + +class AggregateToCollectionOperationSpecification extends OperationFunctionalSpecification { + def registry = fromProviders([new BsonValueCodecProvider()]) + + def aggregateCollectionNamespace = new MongoNamespace(getDatabaseName(), 'aggregateCollectionName') + + def setup() { + CollectionHelper.drop(aggregateCollectionNamespace) + Document pete = new Document('name', 'Pete').append('job', 'handyman') + Document sam = new Document('name', 'Sam').append('job', 'plumber') + Document pete2 = new Document('name', 'Pete').append('job', 'electrician') + getCollectionHelper().insertDocuments(new DocumentCodec(), pete, sam, pete2) + } + + def 'should have the correct defaults'() { + given: + def pipeline = [new BsonDocument('$out', new BsonString(aggregateCollectionNamespace.collectionName))] + + when: + AggregateToCollectionOperation operation = createOperation(getNamespace(), pipeline, ACKNOWLEDGED) + + then: + operation.getAllowDiskUse() == null + operation.getPipeline() == pipeline + operation.getBypassDocumentValidation() == null + operation.getWriteConcern() == ACKNOWLEDGED + operation.getCollation() == null + } + + def 'should set optional values correctly (with write concern)'(){ + given: + def pipeline = [new BsonDocument('$out', new BsonString(aggregateCollectionNamespace.collectionName))] + + when: + AggregateToCollectionOperation operation = + createOperation(getNamespace(), pipeline, WriteConcern.MAJORITY) + .allowDiskUse(true) + .bypassDocumentValidation(true) + .collation(defaultCollation) + + then: + operation.getAllowDiskUse() + operation.getBypassDocumentValidation() == true + operation.getWriteConcern() == WriteConcern.MAJORITY + operation.getCollation() == defaultCollation + } + + def 'should set optional values correctly (with read concern)'(){ + given: + def pipeline = [new BsonDocument('$out', new BsonString(aggregateCollectionNamespace.collectionName))] + + when: + AggregateToCollectionOperation operation = createOperation(getNamespace(), pipeline, ReadConcern.DEFAULT) + .allowDiskUse(true) + .bypassDocumentValidation(true) + .collation(defaultCollation) + + then: + operation.getAllowDiskUse() + operation.getBypassDocumentValidation() == true + operation.getReadConcern() == ReadConcern.DEFAULT + operation.getCollation() == defaultCollation + } + + def 'should not accept an empty pipeline'() { + when: + createOperation(getNamespace(), [], ACKNOWLEDGED) + + + then: + thrown(IllegalArgumentException) + } + + def 'should be able to output to a collection'() { + when: + AggregateToCollectionOperation operation = createOperation(getNamespace(), + [new BsonDocument('$out', new BsonString(aggregateCollectionNamespace.collectionName))], + ACKNOWLEDGED) + execute(operation, async) + + then: + getCollectionHelper(aggregateCollectionNamespace).count() == 3 + + where: + async << [true, false] + } + + def 'should be able to merge into a collection'() { + when: + AggregateToCollectionOperation operation = createOperation(getNamespace(), + [new BsonDocument('$merge', new BsonDocument('into', new BsonString(aggregateCollectionNamespace.collectionName)))]) + execute(operation, async) + + then: + getCollectionHelper(aggregateCollectionNamespace).count() == 3 + + where: + async << [true, false] + } + + def 'should be able to match then output to a collection'() { + when: + AggregateToCollectionOperation operation = createOperation(getNamespace(), + [new BsonDocument('$match', new BsonDocument('job', new BsonString('plumber'))), + new BsonDocument('$out', new BsonString(aggregateCollectionNamespace.collectionName))], ACKNOWLEDGED) + execute(operation, async) + + then: + getCollectionHelper(aggregateCollectionNamespace).count() == 1 + + where: + async << [true, false] + } + + @IgnoreIf({ !isDiscoverableReplicaSet() }) + def 'should throw on write concern error'() { + given: + AggregateToCollectionOperation operation = createOperation(getNamespace(), + [new BsonDocument('$out', new BsonString(aggregateCollectionNamespace.collectionName))], + new WriteConcern(5)) + + when: + execute(operation, async) + + then: + def ex = thrown(MongoWriteConcernException) + ex.writeConcernError.code == 100 + ex.writeResult.wasAcknowledged() + + where: + async << [true, false] + } + + def 'should support bypassDocumentValidation'() { + given: + def collectionOutHelper = getCollectionHelper(new MongoNamespace(getDatabaseName(), 'collectionOut')) + collectionOutHelper.create('collectionOut', new CreateCollectionOptions().validationOptions( + new ValidationOptions().validator(gte('level', 10)))) + getCollectionHelper().insertDocuments(BsonDocument.parse('{ level: 9 }')) + + when: + AggregateToCollectionOperation operation = createOperation(getNamespace(), + [BsonDocument.parse('{$out: "collectionOut"}')], ACKNOWLEDGED) + execute(operation, async) + + then: + thrown(MongoCommandException) + + when: + execute(operation.bypassDocumentValidation(false), async) + + then: + thrown(MongoCommandException) + + when: + execute(operation.bypassDocumentValidation(true), async) + + then: + notThrown(MongoCommandException) + + cleanup: + collectionOutHelper?.drop() + + where: + async << [true, false] + } + + def 'should create the expected command'() { + when: + def pipeline = [BsonDocument.parse('{$out: "collectionOut"}')] + AggregateToCollectionOperation operation = new AggregateToCollectionOperation(getNamespace(), pipeline, + ReadConcern.MAJORITY, WriteConcern.MAJORITY) + .bypassDocumentValidation(true) + def expectedCommand = new BsonDocument('aggregate', new BsonString(getNamespace().getCollectionName())) + .append('pipeline', new BsonArray(pipeline)) + + if (includeBypassValidation) { + expectedCommand.put('bypassDocumentValidation', BsonBoolean.TRUE) + } + if (includeReadConcern) { + expectedCommand.append('readConcern', new BsonDocument('level', new BsonString('majority'))) + } + if (includeWriteConcern) { + expectedCommand.append('writeConcern', new BsonDocument('w', new BsonString('majority'))) + } + if (includeCollation) { + operation.collation(defaultCollation) + expectedCommand.append('collation', defaultCollation.asDocument()) + } + if (useCursor) { + expectedCommand.append('cursor', new BsonDocument()) + } + if (useHint) { + operation.hint(new BsonString('x_1')) + expectedCommand.append('hint', new BsonString('x_1')) + } + + then: + testOperation(operation, serverVersion, expectedCommand, async, BsonDocument.parse('{ok: 1}'), + true, false, ReadPreference.primary(), false) + + where: + serverVersion | includeBypassValidation | includeReadConcern | includeWriteConcern | includeCollation | async | useCursor | useHint + [3, 6, 0] | true | true | true | true | true | true | true + [3, 6, 0] | true | true | true | true | false | true | false + } + + def 'should support collation'() { + given: + getCollectionHelper().insertDocuments(BsonDocument.parse('{_id: 1, str: "foo"}')) + def pipeline = [BsonDocument.parse('{$match: {str: "FOO"}}'), + new BsonDocument('$out', new BsonString(aggregateCollectionNamespace.collectionName))] + AggregateToCollectionOperation operation = createOperation(getNamespace(), pipeline, ACKNOWLEDGED) + .collation(caseInsensitiveCollation) + + when: + execute(operation, async) + + then: + getCollectionHelper(aggregateCollectionNamespace).count() == 1 + + where: + async << [true, false] + } + + @IgnoreIf({ isSharded() }) + def 'should apply comment'() { + given: + def profileCollectionHelper = getCollectionHelper(new MongoNamespace(getDatabaseName(), 'system.profile')) + new CommandReadOperation<>(getDatabaseName(), new BsonDocument('profile', new BsonInt32(2)), + new BsonDocumentCodec()).execute(getBinding()) + def expectedComment = 'this is a comment' + AggregateToCollectionOperation operation = createOperation(getNamespace(), + [Aggregates.out('outputCollection').toBsonDocument(BsonDocument, registry)], ACKNOWLEDGED) + .comment(new BsonString(expectedComment)) + + when: + execute(operation, async) + + then: + Document profileDocument = profileCollectionHelper.find(Filters.exists('command.aggregate')).get(0) + ((Document) profileDocument.get('command')).get('comment') == expectedComment + + cleanup: + new CommandReadOperation<>(getDatabaseName(), new BsonDocument('profile', new BsonInt32(0)), + new BsonDocumentCodec()).execute(getBinding()) + profileCollectionHelper.drop() + + where: + async << [true, false] + } + + def createOperation(final MongoNamespace namespace, final List pipeline) { + new AggregateToCollectionOperation(namespace, pipeline, null, null, AggregationLevel.COLLECTION) + } + + def createOperation(final MongoNamespace namespace, final List pipeline, final WriteConcern writeConcern) { + new AggregateToCollectionOperation(namespace, pipeline, null, writeConcern, AggregationLevel.COLLECTION) + } + + def createOperation(final MongoNamespace namespace, final List pipeline, final ReadConcern readConcern) { + new AggregateToCollectionOperation(namespace, pipeline, readConcern, null, AggregationLevel.COLLECTION) + } + +} diff --git a/driver-core/src/test/functional/com/mongodb/internal/operation/AsyncCommandBatchCursorFunctionalTest.java b/driver-core/src/test/functional/com/mongodb/internal/operation/AsyncCommandBatchCursorFunctionalTest.java new file mode 100644 index 00000000000..88dc199ee29 --- /dev/null +++ b/driver-core/src/test/functional/com/mongodb/internal/operation/AsyncCommandBatchCursorFunctionalTest.java @@ -0,0 +1,501 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.operation; + + +import com.mongodb.MongoCursorNotFoundException; +import com.mongodb.MongoQueryException; +import com.mongodb.ReadPreference; +import com.mongodb.ServerCursor; +import com.mongodb.async.FutureResultCallback; +import com.mongodb.client.cursor.TimeoutMode; +import com.mongodb.client.model.CreateCollectionOptions; +import com.mongodb.client.model.Filters; +import com.mongodb.client.model.OperationTest; +import com.mongodb.internal.binding.AsyncConnectionSource; +import com.mongodb.internal.connection.AsyncConnection; +import com.mongodb.internal.validator.NoOpFieldNameValidator; +import org.bson.BsonArray; +import org.bson.BsonBoolean; +import org.bson.BsonDocument; +import org.bson.BsonInt32; +import org.bson.BsonInt64; +import org.bson.BsonString; +import org.bson.BsonTimestamp; +import org.bson.Document; +import org.bson.codecs.BsonDocumentCodec; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.DisplayName; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.Arguments; +import org.junit.jupiter.params.provider.MethodSource; + +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.stream.Collectors; +import java.util.stream.IntStream; +import java.util.stream.Stream; + +import static com.mongodb.ClusterFixture.checkReferenceCountReachesTarget; +import static com.mongodb.ClusterFixture.getAsyncBinding; +import static com.mongodb.ClusterFixture.getConnection; +import static com.mongodb.ClusterFixture.getReferenceCountAfterTimeout; +import static com.mongodb.ClusterFixture.getWriteConnectionSource; +import static com.mongodb.ClusterFixture.isSharded; +import static com.mongodb.internal.operation.CommandBatchCursorHelper.FIRST_BATCH; +import static com.mongodb.internal.operation.TestOperationHelper.makeAdditionalGetMoreCall; +import static java.util.Collections.singletonList; +import static java.util.stream.Stream.generate; +import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assumptions.assumeFalse; +import static org.junit.jupiter.params.provider.Arguments.arguments; + +public class AsyncCommandBatchCursorFunctionalTest extends OperationTest { + + private AsyncConnectionSource connectionSource; + private AsyncConnection connection; + private AsyncCommandBatchCursor cursor; + + @BeforeEach + void setup() throws Throwable { + List documents = IntStream.rangeClosed(1, 10) + .mapToObj(i -> new BsonDocument("i", new BsonInt32(i))) + .collect(Collectors.toList()); + getCollectionHelper().insertDocuments(documents); + + connectionSource = getWriteConnectionSource(getAsyncBinding()); + connection = getConnection(connectionSource); + } + + @AfterEach + void cleanup() { + ifNotNull(cursor, AsyncCommandBatchCursor::close); + ifNotNull(connectionSource, cs -> { + getReferenceCountAfterTimeout(cs, 1); + cs.release(); + }); + ifNotNull(connection, c -> { + getReferenceCountAfterTimeout(c, 1); + c.release(); + }); + } + + @Test + @DisplayName("should exhaust cursor with multiple batches") + void shouldExhaustCursorAsyncWithMultipleBatches() { + // given + BsonDocument commandResult = executeFindCommand(0, 3); // Fetch in batches of size 3 + cursor = new AsyncCommandBatchCursor<>(TimeoutMode.CURSOR_LIFETIME, commandResult, 3, 0, DOCUMENT_DECODER, + null, connectionSource, connection); + + // when + FutureResultCallback>> futureCallback = new FutureResultCallback<>(); + cursor.exhaust(futureCallback); + + // then + List> resultBatches = futureCallback.get(5, TimeUnit.SECONDS); + + assertTrue(cursor.isClosed(), "Expected cursor to be closed."); + assertEquals(4, resultBatches.size(), "Expected 4 batches for 10 documents with batch size of 3."); + + int totalDocuments = resultBatches.stream().mapToInt(List::size).sum(); + assertEquals(10, totalDocuments, "Expected a total of 10 documents."); + } + + @Test + @DisplayName("should exhaust cursor with closed cursor") + void shouldExhaustCursorAsyncWithClosedCursor() { + // given + BsonDocument commandResult = executeFindCommand(0, 3); + cursor = new AsyncCommandBatchCursor<>(TimeoutMode.CURSOR_LIFETIME, commandResult, 3, 0, DOCUMENT_DECODER, + null, connectionSource, connection); + + cursor.close(); + + // when + FutureResultCallback>> futureCallback = new FutureResultCallback<>(); + cursor.exhaust(futureCallback); + + //then + IllegalStateException illegalStateException = assertThrows(IllegalStateException.class, () -> { + futureCallback.get(5, TimeUnit.SECONDS); + }, "Expected an exception when operating on a closed cursor."); + assertEquals("Cursor has been closed", illegalStateException.getMessage()); + } + + @Test + @DisplayName("should exhaust cursor with empty cursor") + void shouldExhaustCursorAsyncWithEmptyCursor() { + // given + getCollectionHelper().deleteMany(Filters.empty()); + + BsonDocument commandResult = executeFindCommand(0, 3); // No documents to fetch + cursor = new AsyncCommandBatchCursor<>(TimeoutMode.CURSOR_LIFETIME, commandResult, 3, 0, DOCUMENT_DECODER, + null, connectionSource, connection); + + // when + FutureResultCallback>> futureCallback = new FutureResultCallback<>(); + cursor.exhaust(futureCallback); + + // then + List> resultBatches = futureCallback.get(5, TimeUnit.SECONDS); + assertTrue(resultBatches.isEmpty(), "Expected no batches for an empty cursor."); + assertTrue(cursor.isClosed(), "Expected cursor to be closed."); + } + + @Test + @DisplayName("server cursor should not be null") + void theServerCursorShouldNotBeNull() { + BsonDocument commandResult = executeFindCommand(2); + cursor = new AsyncCommandBatchCursor<>(TimeoutMode.CURSOR_LIFETIME, commandResult, 0, 0, DOCUMENT_DECODER, + null, connectionSource, connection); + + assertNotNull(cursor.getServerCursor()); + } + + @Test + @DisplayName("should get Exceptions for operations on the cursor after closing") + void shouldGetExceptionsForOperationsOnTheCursorAfterClosing() { + BsonDocument commandResult = executeFindCommand(5); + cursor = new AsyncCommandBatchCursor<>(TimeoutMode.CURSOR_LIFETIME, commandResult, 0, 0, DOCUMENT_DECODER, + null, connectionSource, connection); + + cursor.close(); + assertDoesNotThrow(() -> cursor.close()); + + checkReferenceCountReachesTarget(connectionSource, 1); + assertThrows(IllegalStateException.class, this::cursorNext); + assertNull(cursor.getServerCursor()); + } + + @Test + @DisplayName("should throw an Exception when going off the end") + void shouldThrowAnExceptionWhenGoingOffTheEnd() { + BsonDocument commandResult = executeFindCommand(2, 1); + cursor = new AsyncCommandBatchCursor<>(TimeoutMode.CURSOR_LIFETIME, commandResult, 0, 0, DOCUMENT_DECODER, + null, connectionSource, connection); + + cursorNext(); + cursorNext(); + + assertThrows(IllegalStateException.class, this::cursorNext); + } + + + @Test + @DisplayName("test normal exhaustion") + void testNormalExhaustion() { + BsonDocument commandResult = executeFindCommand(); + cursor = new AsyncCommandBatchCursor<>(TimeoutMode.CURSOR_LIFETIME, commandResult, 0, 0, DOCUMENT_DECODER, + null, connectionSource, connection); + + assertEquals(10, cursorFlatten().size()); + } + + @ParameterizedTest(name = "{index} => limit={0}, batchSize={1}, expectedTotal={2}") + @MethodSource + @DisplayName("test limit exhaustion") + void testLimitExhaustion(final int limit, final int batchSize, final int expectedTotal) { + BsonDocument commandResult = executeFindCommand(limit, batchSize); + cursor = new AsyncCommandBatchCursor<>(TimeoutMode.CURSOR_LIFETIME, commandResult, batchSize, 0, DOCUMENT_DECODER, + null, connectionSource, connection); + + + assertEquals(expectedTotal, cursorFlatten().size()); + + checkReferenceCountReachesTarget(connectionSource, 1); + checkReferenceCountReachesTarget(connection, 1); + } + + @ParameterizedTest(name = "{index} => awaitData={0}, maxTimeMS={1}") + @MethodSource + @DisplayName("should block waiting for next batch on a tailable cursor") + void shouldBlockWaitingForNextBatchOnATailableCursor(final boolean awaitData, final int maxTimeMS) { + getCollectionHelper().create(getCollectionName(), new CreateCollectionOptions().capped(true).sizeInBytes(1000)); + getCollectionHelper().insertDocuments(DOCUMENT_DECODER, new Document("_id", 1).append("ts", new BsonTimestamp(5, 0))); + + BsonDocument commandResult = executeFindCommand(new BsonDocument("ts", + new BsonDocument("$gte", new BsonTimestamp(5, 0))), 0, 2, true, awaitData); + cursor = new AsyncCommandBatchCursor<>(TimeoutMode.CURSOR_LIFETIME, commandResult, 2, maxTimeMS, DOCUMENT_DECODER, + null, connectionSource, connection); + + assertFalse(cursor.isClosed()); + assertEquals(1, cursorNext().get(0).get("_id")); + + new Thread(() -> { + sleep(100); + getCollectionHelper().insertDocuments(DOCUMENT_DECODER, new Document("_id", 2).append("ts", new BsonTimestamp(6, 0))); + }).start(); + + assertFalse(cursor.isClosed()); + assertEquals(2, cursorNext().get(0).get("_id")); + } + + @Test + @DisplayName("test tailable interrupt") + void testTailableInterrupt() throws InterruptedException { + getCollectionHelper().create(getCollectionName(), new CreateCollectionOptions().capped(true).sizeInBytes(1000)); + getCollectionHelper().insertDocuments(DOCUMENT_DECODER, new Document("_id", 1).append("ts", new BsonTimestamp(5, 0))); + + BsonDocument commandResult = executeFindCommand(new BsonDocument("ts", + new BsonDocument("$gte", new BsonTimestamp(5, 0))), 0, 2, true, true); + cursor = new AsyncCommandBatchCursor<>(TimeoutMode.CURSOR_LIFETIME, commandResult, 2, 0, DOCUMENT_DECODER, + null, connectionSource, connection); + + CountDownLatch latch = new CountDownLatch(1); + AtomicInteger seen = new AtomicInteger(); + Thread thread = new Thread(() -> { + try { + cursorNext(); + seen.incrementAndGet(); + cursorNext(); + seen.incrementAndGet(); + } catch (Exception e) { + // pass + } finally { + latch.countDown(); + } + }); + + thread.start(); + sleep(1000); + thread.interrupt(); + getCollectionHelper().insertDocuments(DOCUMENT_DECODER, new Document("_id", 2)); + latch.await(); + + assertTrue(latch.await(5, TimeUnit.SECONDS)); + assertEquals(1, seen.intValue()); + } + + @Test + @DisplayName("should kill cursor if limit is reached on initial query") + void shouldKillCursorIfLimitIsReachedOnInitialQuery() { + assumeFalse(isSharded()); + BsonDocument commandResult = executeFindCommand(5, 10); + cursor = new AsyncCommandBatchCursor<>(TimeoutMode.CURSOR_LIFETIME, commandResult, 0, 0, DOCUMENT_DECODER, + null, connectionSource, connection); + + assertNotNull(cursorNext()); + assertTrue(cursor.isClosed()); + assertNull(cursor.getServerCursor()); + } + + @Test + @DisplayName("should kill cursor if limit is reached on getMore") + void shouldKillCursorIfLimitIsReachedOnGetMore() { + assumeFalse(isSharded()); + BsonDocument commandResult = executeFindCommand(5, 3); + cursor = new AsyncCommandBatchCursor<>(TimeoutMode.CURSOR_LIFETIME, commandResult, 3, 0, DOCUMENT_DECODER, + null, connectionSource, connection); + + ServerCursor serverCursor = cursor.getServerCursor(); + assertNotNull(serverCursor); + assertNotNull(cursorNext()); + assertNotNull(cursorNext()); + + assertDoesNotThrow(() -> checkReferenceCountReachesTarget(connection, 1)); + assertThrows(MongoQueryException.class, () -> + makeAdditionalGetMoreCall(getNamespace(), serverCursor, connection) + ); + } + + @Test + @DisplayName("should release connection source if limit is reached on initial query") + void shouldReleaseConnectionSourceIfLimitIsReachedOnInitialQuery() { + assumeFalse(isSharded()); + + BsonDocument commandResult = executeFindCommand(5, 10); + cursor = new AsyncCommandBatchCursor<>(TimeoutMode.CURSOR_LIFETIME, commandResult, 0, 0, DOCUMENT_DECODER, + null, connectionSource, connection); + + assertDoesNotThrow(() -> checkReferenceCountReachesTarget(connectionSource, 1)); + assertDoesNotThrow(() -> checkReferenceCountReachesTarget(connection, 1)); + assertNull(cursor.getServerCursor()); + } + + @Test + @DisplayName("should release connection source if limit is reached on getMore") + void shouldReleaseConnectionSourceIfLimitIsReachedOnGetMore() { + assumeFalse(isSharded()); + BsonDocument commandResult = executeFindCommand(5, 3); + cursor = new AsyncCommandBatchCursor<>(TimeoutMode.CURSOR_LIFETIME, commandResult, 3, 0, DOCUMENT_DECODER, + null, connectionSource, connection); + + assertNotNull(cursorNext()); + assertNotNull(cursorNext()); + assertDoesNotThrow(() -> checkReferenceCountReachesTarget(connectionSource, 1)); + assertDoesNotThrow(() -> checkReferenceCountReachesTarget(connection, 1)); + } + + @Test + @DisplayName("test limit with get more") + void testLimitWithGetMore() { + BsonDocument commandResult = executeFindCommand(5, 2); + cursor = new AsyncCommandBatchCursor<>(TimeoutMode.CURSOR_LIFETIME, commandResult, 2, 0, DOCUMENT_DECODER, + null, connectionSource, connection); + + assertNotNull(cursorNext()); + assertNotNull(cursorNext()); + assertNotNull(cursorNext()); + + assertDoesNotThrow(() -> checkReferenceCountReachesTarget(connectionSource, 1)); + assertTrue(cursor.isClosed()); + } + + @Test + @DisplayName("test limit with large documents") + void testLimitWithLargeDocuments() { + String bigString = generate(() -> "x") + .limit(16000) + .collect(Collectors.joining()); + + IntStream.range(11, 1000).forEach(i -> + getCollectionHelper().insertDocuments(DOCUMENT_DECODER, new Document("_id", i).append("s", bigString)) + ); + + BsonDocument commandResult = executeFindCommand(300, 0); + cursor = new AsyncCommandBatchCursor<>(TimeoutMode.CURSOR_LIFETIME, commandResult, 0, 0, DOCUMENT_DECODER, + null, connectionSource, connection); + + assertEquals(300, cursorFlatten().size()); + } + + @Test + @DisplayName("should respect batch size") + void shouldRespectBatchSize() { + BsonDocument commandResult = executeFindCommand(2); + cursor = new AsyncCommandBatchCursor<>(TimeoutMode.CURSOR_LIFETIME, commandResult, 2, 0, DOCUMENT_DECODER, + null, connectionSource, connection); + + assertEquals(2, cursor.getBatchSize()); + assertEquals(2, cursorNext().size()); + assertEquals(2, cursorNext().size()); + + cursor.setBatchSize(3); + assertEquals(3, cursor.getBatchSize()); + assertEquals(3, cursorNext().size()); + assertEquals(3, cursorNext().size()); + } + + @Test + @DisplayName("should throw cursor not found exception") + void shouldThrowCursorNotFoundException() throws Throwable { + BsonDocument commandResult = executeFindCommand(2); + cursor = new AsyncCommandBatchCursor<>(TimeoutMode.CURSOR_LIFETIME, commandResult, 2, 0, DOCUMENT_DECODER, + null, connectionSource, connection); + + ServerCursor serverCursor = cursor.getServerCursor(); + assertNotNull(serverCursor); + AsyncConnection localConnection = getConnection(connectionSource); + this.block(cb -> localConnection.commandAsync(getNamespace().getDatabaseName(), + new BsonDocument("killCursors", new BsonString(getNamespace().getCollectionName())) + .append("cursors", new BsonArray(singletonList(new BsonInt64(serverCursor.getId())))), + NoOpFieldNameValidator.INSTANCE, ReadPreference.primary(), new BsonDocumentCodec(), connectionSource.getOperationContext(), cb)); + localConnection.release(); + + cursorNext(); + + MongoCursorNotFoundException exception = assertThrows(MongoCursorNotFoundException.class, this::cursorNext); + assertEquals(serverCursor.getId(), exception.getCursorId()); + assertEquals(serverCursor.getAddress(), exception.getServerAddress()); + } + + + private static Stream shouldBlockWaitingForNextBatchOnATailableCursor() { + return Stream.of( + arguments(true, 0), + arguments(true, 100), + arguments(false, 0)); + } + + private static Stream testLimitExhaustion() { + return Stream.of( + arguments(5, 2, 5), + arguments(5, -2, 2), + arguments(-5, -2, 5), + arguments(-5, 2, 5), + arguments(2, 5, 2), + arguments(2, -5, 2), + arguments(-2, 5, 2), + arguments(-2, -5, 2) + ); + } + + private BsonDocument executeFindCommand() { + return executeFindCommand(0); + } + + private BsonDocument executeFindCommand(final int batchSize) { + return executeFindCommand(new BsonDocument(), 0, batchSize, false, false); + } + + private BsonDocument executeFindCommand(final int limit, final int batchSize) { + return executeFindCommand(new BsonDocument(), limit, batchSize, false, false); + } + + private BsonDocument executeFindCommand(final BsonDocument filter, final int limit, final int batchSize, final boolean tailable, + final boolean awaitData) { + return executeFindCommand(filter, limit, batchSize, tailable, awaitData, ReadPreference.primary()); + } + + private BsonDocument executeFindCommand(final BsonDocument filter, final int limit, final int batchSize, + final boolean tailable, final boolean awaitData, final ReadPreference readPreference) { + BsonDocument findCommand = new BsonDocument("find", new BsonString(getCollectionName())) + .append("filter", filter) + .append("tailable", BsonBoolean.valueOf(tailable)) + .append("awaitData", BsonBoolean.valueOf(awaitData)); + + findCommand.append("limit", new BsonInt32(Math.abs(limit))); + if (limit >= 0) { + if (batchSize < 0 && Math.abs(batchSize) < limit) { + findCommand.append("limit", new BsonInt32(Math.abs(batchSize))); + } else { + findCommand.append("batchSize", new BsonInt32(Math.abs(batchSize))); + } + } + + BsonDocument results = block(cb -> connection.commandAsync(getDatabaseName(), findCommand, + NoOpFieldNameValidator.INSTANCE, readPreference, CommandResultDocumentCodec.create(DOCUMENT_DECODER, FIRST_BATCH), + connectionSource.getOperationContext(), cb)); + + assertNotNull(results); + return results; + } + + private List cursorNext() { + return block(cb -> cursor.next(cb)); + } + + private List cursorFlatten() { + List results = new ArrayList<>(); + while (!cursor.isClosed()) { + results.addAll(cursorNext()); + } + return results; + } +} diff --git a/driver-core/src/test/functional/com/mongodb/internal/operation/AsyncCommandBatchCursorTest.java b/driver-core/src/test/functional/com/mongodb/internal/operation/AsyncCommandBatchCursorTest.java new file mode 100644 index 00000000000..e9a30686d5f --- /dev/null +++ b/driver-core/src/test/functional/com/mongodb/internal/operation/AsyncCommandBatchCursorTest.java @@ -0,0 +1,268 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.operation; + +import com.mongodb.MongoClientSettings; +import com.mongodb.MongoNamespace; +import com.mongodb.MongoOperationTimeoutException; +import com.mongodb.MongoSocketException; +import com.mongodb.ServerAddress; +import com.mongodb.client.cursor.TimeoutMode; +import com.mongodb.connection.ConnectionDescription; +import com.mongodb.connection.ServerDescription; +import com.mongodb.connection.ServerType; +import com.mongodb.connection.ServerVersion; +import com.mongodb.internal.TimeoutContext; +import com.mongodb.internal.TimeoutSettings; +import com.mongodb.internal.async.SingleResultCallback; +import com.mongodb.internal.binding.AsyncConnectionSource; +import com.mongodb.internal.connection.AsyncConnection; +import com.mongodb.internal.connection.OperationContext; +import org.bson.BsonArray; +import org.bson.BsonDocument; +import org.bson.BsonInt32; +import org.bson.BsonInt64; +import org.bson.BsonString; +import org.bson.Document; +import org.bson.codecs.Decoder; +import org.bson.codecs.DocumentCodec; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.ValueSource; + +import java.time.Duration; + +import static com.mongodb.internal.operation.OperationUnitSpecification.getMaxWireVersionForServerVersion; +import static com.mongodb.internal.thread.InterruptionUtil.interruptAndCreateMongoInterruptedException; +import static java.util.concurrent.TimeUnit.MILLISECONDS; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.argThat; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +class AsyncCommandBatchCursorTest { + + private static final MongoNamespace NAMESPACE = new MongoNamespace("test", "test"); + private static final BsonInt64 CURSOR_ID = new BsonInt64(1); + private static final BsonDocument COMMAND_CURSOR_DOCUMENT = new BsonDocument("ok", new BsonInt32(1)) + .append("cursor", + new BsonDocument("ns", new BsonString(NAMESPACE.getFullName())) + .append("id", CURSOR_ID) + .append("firstBatch", new BsonArrayWrapper<>(new BsonArray()))); + + private static final Decoder DOCUMENT_CODEC = new DocumentCodec(); + private static final Duration TIMEOUT = Duration.ofMillis(3_000); + + + private AsyncConnection mockConnection; + private ConnectionDescription mockDescription; + private AsyncConnectionSource connectionSource; + private OperationContext operationContext; + private TimeoutContext timeoutContext; + private ServerDescription serverDescription; + + @BeforeEach + void setUp() { + ServerVersion serverVersion = new ServerVersion(3, 6); + + mockConnection = mock(AsyncConnection.class, "connection"); + mockDescription = mock(ConnectionDescription.class); + when(mockDescription.getMaxWireVersion()).thenReturn(getMaxWireVersionForServerVersion(serverVersion.getVersionList())); + when(mockDescription.getServerType()).thenReturn(ServerType.LOAD_BALANCER); + when(mockConnection.getDescription()).thenReturn(mockDescription); + when(mockConnection.retain()).thenReturn(mockConnection); + + connectionSource = mock(AsyncConnectionSource.class); + operationContext = mock(OperationContext.class); + timeoutContext = new TimeoutContext(TimeoutSettings.create( + MongoClientSettings.builder().timeout(TIMEOUT.toMillis(), MILLISECONDS).build())); + serverDescription = mock(ServerDescription.class); + when(operationContext.getTimeoutContext()).thenReturn(timeoutContext); + when(connectionSource.getOperationContext()).thenReturn(operationContext); + doAnswer(invocation -> { + SingleResultCallback callback = invocation.getArgument(0); + callback.onResult(mockConnection, null); + return null; + }).when(connectionSource).getConnection(any()); + when(connectionSource.getServerDescription()).thenReturn(serverDescription); + } + + + @Test + void shouldSkipKillsCursorsCommandWhenNetworkErrorOccurs() { + //given + doAnswer(invocation -> { + SingleResultCallback argument = invocation.getArgument(6); + argument.onResult(null, new MongoSocketException("test", new ServerAddress())); + return null; + }).when(mockConnection).commandAsync(eq(NAMESPACE.getDatabaseName()), any(), any(), any(), any(), any(), any()); + when(serverDescription.getType()).thenReturn(ServerType.LOAD_BALANCER); + AsyncCommandBatchCursor commandBatchCursor = createBatchCursor(0); + + //when + commandBatchCursor.next((result, t) -> { + Assertions.assertNull(result); + Assertions.assertNotNull(t); + Assertions.assertEquals(MongoSocketException.class, t.getClass()); + }); + + //then + commandBatchCursor.close(); + verify(mockConnection, times(1)).commandAsync(eq(NAMESPACE.getDatabaseName()), any(), any(), any(), any(), any(), any()); + } + + + @Test + void shouldNotSkipKillsCursorsCommandWhenTimeoutExceptionDoesNotHaveNetworkErrorCause() { + //given + doAnswer(invocation -> { + SingleResultCallback argument = invocation.getArgument(6); + argument.onResult(null, new MongoOperationTimeoutException("test")); + return null; + }).when(mockConnection).commandAsync(eq(NAMESPACE.getDatabaseName()), any(), any(), any(), any(), any(), any()); + when(serverDescription.getType()).thenReturn(ServerType.LOAD_BALANCER); + + AsyncCommandBatchCursor commandBatchCursor = createBatchCursor(0); + + //when + commandBatchCursor.next((result, t) -> { + Assertions.assertNull(result); + Assertions.assertNotNull(t); + Assertions.assertEquals(MongoOperationTimeoutException.class, t.getClass()); + }); + + commandBatchCursor.close(); + + + //then + verify(mockConnection, times(2)).commandAsync(any(), + any(), any(), any(), any(), any(), any()); + verify(mockConnection, times(1)).commandAsync(eq(NAMESPACE.getDatabaseName()), + argThat(bsonDocument -> bsonDocument.containsKey("getMore")), any(), any(), any(), any(), any()); + verify(mockConnection, times(1)).commandAsync(eq(NAMESPACE.getDatabaseName()), + argThat(bsonDocument -> bsonDocument.containsKey("killCursors")), any(), any(), any(), any(), any()); + } + + @Test + void shouldSkipKillsCursorsCommandWhenTimeoutExceptionHaveNetworkErrorCause() { + //given + doAnswer(invocation -> { + SingleResultCallback argument = invocation.getArgument(6); + argument.onResult(null, new MongoOperationTimeoutException("test", new MongoSocketException("test", new ServerAddress()))); + return null; + }).when(mockConnection).commandAsync(eq(NAMESPACE.getDatabaseName()), any(), any(), any(), any(), any(), any()); + when(serverDescription.getType()).thenReturn(ServerType.LOAD_BALANCER); + + AsyncCommandBatchCursor commandBatchCursor = createBatchCursor(0); + + //when + commandBatchCursor.next((result, t) -> { + Assertions.assertNull(result); + Assertions.assertNotNull(t); + Assertions.assertEquals(MongoOperationTimeoutException.class, t.getClass()); + }); + + commandBatchCursor.close(); + + //then + verify(mockConnection, times(1)).commandAsync(any(), + any(), any(), any(), any(), any(), any()); + verify(mockConnection, times(1)).commandAsync(eq(NAMESPACE.getDatabaseName()), + argThat(bsonDocument -> bsonDocument.containsKey("getMore")), any(), any(), any(), any(), any()); + verify(mockConnection, never()).commandAsync(eq(NAMESPACE.getDatabaseName()), + argThat(bsonDocument -> bsonDocument.containsKey("killCursors")), any(), any(), any(), any(), any()); + } + + @Test + @SuppressWarnings("try") + void closeShouldResetTimeoutContextToDefaultMaxTime() { + long maxTimeMS = 10; + com.mongodb.assertions.Assertions.assertTrue(maxTimeMS < TIMEOUT.toMillis()); + try (AsyncCommandBatchCursor commandBatchCursor = createBatchCursor(maxTimeMS)) { + // verify that the `maxTimeMS` override was applied + timeoutContext.runMaxTimeMS(remainingMillis -> assertTrue(remainingMillis <= maxTimeMS)); + } catch (Exception e) { + throw new RuntimeException(e); + } + timeoutContext.runMaxTimeMS(remainingMillis -> { + // verify that the `maxTimeMS` override was reset + assertTrue(remainingMillis > maxTimeMS); + assertTrue(remainingMillis <= TIMEOUT.toMillis()); + }); + } + + @ParameterizedTest + @ValueSource(booleans = {false, true}) + void closeShouldNotResetOriginalTimeout(final boolean disableTimeoutResetWhenClosing) { + doAnswer(invocation -> { + SingleResultCallback argument = invocation.getArgument(6); + argument.onResult(null, null); + return null; + }).when(mockConnection).commandAsync(any(), any(), any(), any(), any(), any(), any()); + Duration thirdOfTimeout = TIMEOUT.dividedBy(3); + com.mongodb.assertions.Assertions.assertTrue(thirdOfTimeout.toMillis() > 0); + try (AsyncCommandBatchCursor commandBatchCursor = createBatchCursor(0)) { + if (disableTimeoutResetWhenClosing) { + commandBatchCursor.disableTimeoutResetWhenClosing(); + } + try { + Thread.sleep(thirdOfTimeout.toMillis()); + } catch (InterruptedException e) { + throw interruptAndCreateMongoInterruptedException(null, e); + } + when(mockConnection.release()).then(invocation -> { + Thread.sleep(thirdOfTimeout.toMillis()); + return null; + }); + } catch (Exception e) { + throw new RuntimeException(e); + } + verify(mockConnection, times(1)).release(); + // at this point at least (2 * thirdOfTimeout) have passed + com.mongodb.assertions.Assertions.assertNotNull(timeoutContext.getTimeout()).run( + MILLISECONDS, + com.mongodb.assertions.Assertions::fail, + remainingMillis -> { + // Verify that the original timeout has not been intact. + // If `close` had reset it, we would have observed more than `thirdOfTimeout` left. + assertTrue(remainingMillis <= thirdOfTimeout.toMillis()); + }, + Assertions::fail); + } + + + private AsyncCommandBatchCursor createBatchCursor(final long maxTimeMS) { + return new AsyncCommandBatchCursor( + TimeoutMode.CURSOR_LIFETIME, + COMMAND_CURSOR_DOCUMENT, + 0, + maxTimeMS, + DOCUMENT_CODEC, + null, + connectionSource, + mockConnection); + } + +} diff --git a/driver-core/src/test/functional/com/mongodb/internal/operation/ChangeStreamOperationProseTestSpecification.groovy b/driver-core/src/test/functional/com/mongodb/internal/operation/ChangeStreamOperationProseTestSpecification.groovy new file mode 100644 index 00000000000..c7d3314b5b7 --- /dev/null +++ b/driver-core/src/test/functional/com/mongodb/internal/operation/ChangeStreamOperationProseTestSpecification.groovy @@ -0,0 +1,165 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.operation + + +import com.mongodb.MongoException +import com.mongodb.MongoQueryException +import com.mongodb.OperationFunctionalSpecification +import com.mongodb.WriteConcern +import com.mongodb.client.model.CreateCollectionOptions +import com.mongodb.client.model.changestream.FullDocument +import com.mongodb.client.model.changestream.FullDocumentBeforeChange +import com.mongodb.client.test.CollectionHelper +import org.bson.BsonArray +import org.bson.BsonDocument +import org.bson.BsonInt32 +import org.bson.BsonString +import org.bson.Document +import org.bson.codecs.BsonDocumentCodec +import spock.lang.IgnoreIf + +import static com.mongodb.ClusterFixture.getAsyncCluster +import static com.mongodb.ClusterFixture.getCluster +import static com.mongodb.ClusterFixture.isDiscoverableReplicaSet +import static com.mongodb.ClusterFixture.isStandalone +import static com.mongodb.internal.connection.ServerHelper.waitForLastRelease +import static java.util.Arrays.asList + +// See https://github.com/mongodb/specifications/tree/master/source/change-streams/tests/README.md#prose-tests +@IgnoreIf({ isStandalone() }) +class ChangeStreamOperationProseTestSpecification extends OperationFunctionalSpecification { + + // + // Test that the ChangeStream will throw an exception if the server response is missing the resume token (if wire version is < 8). + // + def 'should throw if the _id field is projected out'() { + given: + def helper = getHelper() + def pipeline = [BsonDocument.parse('{$project: {"_id": 0}}')] + def operation = new ChangeStreamOperation(helper.getNamespace(), FullDocument.DEFAULT, + FullDocumentBeforeChange.DEFAULT, pipeline, CODEC) + + when: + def cursor = execute(operation, async) + insertDocuments(helper, [11, 22]) + next(cursor, async) + + then: + def exception = thrown(MongoException) + + then: + exception instanceof MongoQueryException + + cleanup: + cursor?.close() + waitForLastRelease(async ? getAsyncCluster() : getCluster()) + + where: + async << [true, false] + } + + // + // Test that the ChangeStream will automatically resume one time on a resumable error (including not master) + // with the initial pipeline and options, except for the addition/update of a resumeToken. + // + @IgnoreIf({ !isDiscoverableReplicaSet() }) + def 'should resume after single getMore Error'() { + given: + def helper = getHelper() + + def pipeline = [BsonDocument.parse('{$match: {operationType: "insert"}}')] + def failPointDocument = createFailPointDocument('getMore', 10107) + def operation = new ChangeStreamOperation(helper.getNamespace(), FullDocument.DEFAULT, + FullDocumentBeforeChange.DEFAULT, pipeline, CODEC) + + def cursor = execute(operation, async) + + when: + insertDocuments(helper, [1, 2]) + setFailPoint(failPointDocument) + + then: + def result = next(cursor, async, callHasNext, 2) + + then: + result.size() == 2 + + cleanup: + cursor?.close() + disableFailPoint(failPointDocument) + waitForLastRelease(async ? getAsyncCluster() : getCluster()) + + where: + async | callHasNext + true | false + false | false + false | true + } + + // + // Test that ChangeStream will not attempt to resume on any error encountered while executing an aggregate command. + // + def 'should not resume for aggregation errors'() { + given: + def pipeline = [BsonDocument.parse('{$unsupportedStage: {_id: 0}}')] + def operation = new ChangeStreamOperation(helper.getNamespace(), FullDocument.DEFAULT, + FullDocumentBeforeChange.DEFAULT, pipeline, CODEC) + + when: + def cursor = execute(operation, async) + + then: + thrown(MongoException) + + cleanup: + cursor?.close() + waitForLastRelease(async ? getAsyncCluster() : getCluster()) + + where: + async << [true, false] + } + + + private final static CODEC = new BsonDocumentCodec() + + private CollectionHelper getHelper() { + def helper = getCollectionHelper() + helper.create(helper.getNamespace().getCollectionName(), new CreateCollectionOptions()) + helper + } + + private static void insertDocuments(final CollectionHelper helper, final List docs) { + helper.insertDocuments(docs.collect { BsonDocument.parse("{_id: $it, a: $it}") }, WriteConcern.MAJORITY) + } + + private static BsonDocument createFailPointDocument(final String command, final int errCode) { + new BsonDocument('configureFailPoint', new BsonString('failCommand')) + .append('mode', new BsonDocument('times', new BsonInt32(1))) + .append('data', new BsonDocument('failCommands', new BsonArray(asList(new BsonString(command)))) + .append('errorCode', new BsonInt32(errCode)) + .append('errorLabels', new BsonArray(asList(new BsonString('ResumableChangeStreamError'))))) + } + + def setFailPoint(final BsonDocument failPointDocument) { + collectionHelper.runAdminCommand(failPointDocument) + } + + def disableFailPoint(final BsonDocument failPointDocument) { + collectionHelper.runAdminCommand(failPointDocument.append('mode', new BsonString('off'))) + } +} diff --git a/driver-core/src/test/functional/com/mongodb/internal/operation/ChangeStreamOperationSpecification.groovy b/driver-core/src/test/functional/com/mongodb/internal/operation/ChangeStreamOperationSpecification.groovy new file mode 100644 index 00000000000..9134375ffec --- /dev/null +++ b/driver-core/src/test/functional/com/mongodb/internal/operation/ChangeStreamOperationSpecification.groovy @@ -0,0 +1,794 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.operation + +import com.mongodb.MongoException +import com.mongodb.MongoNamespace +import com.mongodb.OperationFunctionalSpecification +import com.mongodb.ReadConcern +import com.mongodb.WriteConcern +import com.mongodb.client.model.CreateCollectionOptions +import com.mongodb.client.model.changestream.ChangeStreamDocument +import com.mongodb.client.model.changestream.FullDocument +import com.mongodb.client.model.changestream.FullDocumentBeforeChange +import com.mongodb.client.model.changestream.OperationType +import com.mongodb.client.model.changestream.UpdateDescription +import com.mongodb.client.test.CollectionHelper +import com.mongodb.connection.ConnectionDescription +import com.mongodb.internal.async.SingleResultCallback +import com.mongodb.internal.binding.AsyncConnectionSource +import com.mongodb.internal.binding.AsyncReadBinding +import com.mongodb.internal.binding.ConnectionSource +import com.mongodb.internal.binding.ReadBinding +import com.mongodb.internal.client.model.changestream.ChangeStreamLevel +import com.mongodb.internal.connection.AsyncConnection +import com.mongodb.internal.connection.Connection +import com.mongodb.internal.session.SessionContext +import org.bson.BsonArray +import org.bson.BsonBoolean +import org.bson.BsonDocument +import org.bson.BsonInt32 +import org.bson.BsonInt64 +import org.bson.BsonString +import org.bson.BsonTimestamp +import org.bson.Document +import org.bson.codecs.BsonDocumentCodec +import org.bson.codecs.BsonValueCodecProvider +import org.bson.codecs.DocumentCodec +import org.bson.codecs.ValueCodecProvider +import spock.lang.IgnoreIf + +import static com.mongodb.ClusterFixture.OPERATION_CONTEXT +import static com.mongodb.ClusterFixture.getAsyncCluster +import static com.mongodb.ClusterFixture.getCluster +import static com.mongodb.ClusterFixture.isStandalone +import static com.mongodb.ClusterFixture.serverVersionLessThan +import static com.mongodb.client.model.changestream.ChangeStreamDocument.createCodec +import static com.mongodb.internal.connection.ServerHelper.waitForLastRelease +import static com.mongodb.internal.operation.OperationUnitSpecification.getMaxWireVersionForServerVersion +import static org.bson.codecs.configuration.CodecRegistries.fromProviders + +@IgnoreIf({ isStandalone() }) +class ChangeStreamOperationSpecification extends OperationFunctionalSpecification { + + def 'should have the correct defaults'() { + when: + ChangeStreamOperation operation = new ChangeStreamOperation(getNamespace(), + FullDocument.DEFAULT, FullDocumentBeforeChange.DEFAULT, [], new DocumentCodec()) + + then: + operation.getBatchSize() == null + operation.getCollation() == null + operation.getFullDocument() == FullDocument.DEFAULT + operation.getPipeline() == [] + operation.getStartAtOperationTime() == null + } + + def 'should set optional values correctly'() { + when: + ChangeStreamOperation operation = new ChangeStreamOperation(getNamespace(), + FullDocument.UPDATE_LOOKUP, FullDocumentBeforeChange.DEFAULT, [], new DocumentCodec()) + .batchSize(5) + .collation(defaultCollation) + .startAtOperationTime(new BsonTimestamp(99)) + + then: + operation.getBatchSize() == 5 + operation.getCollation() == defaultCollation + operation.getFullDocument() == FullDocument.UPDATE_LOOKUP + operation.getStartAtOperationTime() == new BsonTimestamp(99) + } + + def 'should create the expected command'() { + given: + def aggregate = changeStreamLevel == ChangeStreamLevel.COLLECTION ? new BsonString(namespace.getCollectionName()) : new BsonInt32(1) + def pipeline = [BsonDocument.parse('{$match: {a: "A"}}')] + def resumeToken = BsonDocument.parse('{_id: 1}') + + when: + def changeStream = BsonDocument.parse('''{$changeStream: {startAtOperationTime: + { "$timestamp" : { "t" : 0, "i" : 0 }}}}''') + if (changeStreamLevel == ChangeStreamLevel.CLIENT) { + changeStream.getDocument('$changeStream').put('allChangesForCluster', BsonBoolean.TRUE) + } + + def cursorResult = BsonDocument.parse('{ok: 1.0}') + .append('cursor', new BsonDocument('id', new BsonInt64(0)).append('ns', new BsonString('db.coll')) + .append('firstBatch', new BsonArrayWrapper([]))) + + def operation = new ChangeStreamOperation(namespace, FullDocument.DEFAULT, + FullDocumentBeforeChange.DEFAULT, pipeline, new DocumentCodec(), changeStreamLevel as ChangeStreamLevel) + .batchSize(5) + .collation(defaultCollation) + .startAtOperationTime(new BsonTimestamp()) + + def expectedCommand = new BsonDocument('aggregate', aggregate) + .append('collation', defaultCollation.asDocument()) + .append('cursor', new BsonDocument('batchSize', new BsonInt32(5))) + .append('pipeline', new BsonArray([changeStream, *pipeline])) + .append('readConcern', new BsonDocument('level', new BsonString('majority'))) + + then: + testOperation(operation, [4, 0, 0], ReadConcern.MAJORITY, expectedCommand, async, cursorResult) + + when: 'resumeAfter & startAfter & startAtOperationTime' + def changeStreamDoc = changeStream.getDocument('$changeStream') + changeStreamDoc.put('resumeAfter', resumeToken) + changeStreamDoc.put('startAfter', resumeToken) + + operation.resumeAfter(resumeToken) + .startAfter(resumeToken) + + then: + testOperation(operation, [4, 0, 0], ReadConcern.MAJORITY, expectedCommand, async, cursorResult) + + where: + [async, changeStreamLevel] << [[true, false], + [ChangeStreamLevel.CLIENT, ChangeStreamLevel.DATABASE, ChangeStreamLevel.COLLECTION]].combinations() + } + + def 'should return the expected results'() { + given: + def helper = getHelper() + + def pipeline = [BsonDocument.parse('{$match: {operationType: "insert"}}')] + def operation = new ChangeStreamOperation(helper.getNamespace(), FullDocument.DEFAULT, + FullDocumentBeforeChange.DEFAULT, pipeline, CODEC) + + when: + def cursor = execute(operation, async) + def expected = insertDocuments(helper, [1, 2]) + + then: + def next = nextAndClean(cursor, async, expected.size()) + next == expected + + when: + expected = insertDocuments(helper, [3, 4, 5, 6, 7]) + cursor.setBatchSize(5) + + then: + cursor.getBatchSize() == 5 + nextAndClean(cursor, async, expected.size()) == expected + + then: + if (async) { + !cursor.isClosed() + } else { + cursor.getServerCursor() == cursor.getWrapped().getServerCursor() + cursor.getServerAddress() == cursor.getWrapped().getServerAddress() + } + + cleanup: + cursor?.close() + waitForLastRelease(async ? getAsyncCluster() : getCluster()) + + where: + async << [true, false] + } + + def 'should decode insert to ChangeStreamDocument '() { + given: + def helper = getHelper() + + def pipeline = [BsonDocument.parse('{$match: {operationType: "insert"}}')] + def operation = new ChangeStreamOperation(helper.getNamespace(), FullDocument.DEFAULT, + FullDocumentBeforeChange.DEFAULT, pipeline, + createCodec(BsonDocument, fromProviders(new BsonValueCodecProvider(), new ValueCodecProvider()))) + + when: + def cursor = execute(operation, false) + helper.insertDocuments(BsonDocument.parse('{ _id : 2, x : 2 }')) + ChangeStreamDocument nextDoc = next(cursor, false, 1).get(0) + + then: + nextDoc.getResumeToken() != null + nextDoc.getDocumentKey() == BsonDocument.parse('{ _id : 2 }') + nextDoc.getFullDocument() == BsonDocument.parse('{ _id : 2, x : 2 }') + nextDoc.getNamespace() == helper.getNamespace() + nextDoc.getOperationType() == OperationType.INSERT + nextDoc.getUpdateDescription() == null + + cleanup: + cursor?.close() + waitForLastRelease(getCluster()) + } + + // TODO undo skip and update for JAVA-5835 + @IgnoreIf({ !serverVersionLessThan(8, 2) }) + def 'should decode update to ChangeStreamDocument '() { + given: + def helper = getHelper() + + def pipeline = [BsonDocument.parse('{$match: {operationType: "update"}}')] + def operation = new ChangeStreamOperation(helper.getNamespace(), FullDocument.UPDATE_LOOKUP, + FullDocumentBeforeChange.DEFAULT, pipeline, + createCodec(BsonDocument, fromProviders(new BsonValueCodecProvider(), new ValueCodecProvider()))) + helper.insertDocuments(BsonDocument.parse('{ _id : 2, x : 2, y : 3 }')) + + when: + def cursor = execute(operation, false) + helper.updateOne(BsonDocument.parse('{ _id : 2}'), BsonDocument.parse('{ $set : {x : 3}, $unset : {y : 1}}')) + ChangeStreamDocument nextDoc = next(cursor, false, 1).get(0) + + then: + nextDoc.getResumeToken() != null + nextDoc.getDocumentKey() == BsonDocument.parse('{ _id : 2 }') + nextDoc.getFullDocument() == BsonDocument.parse('{ _id : 2, x : 3 }') + nextDoc.getNamespace() == helper.getNamespace() + nextDoc.getOperationType() == OperationType.UPDATE + nextDoc.getUpdateDescription() == new UpdateDescription(['y'], BsonDocument.parse('{x : 3}'), null) + + cleanup: + cursor?.close() + waitForLastRelease(getCluster()) + } + + def 'should decode replace to ChangeStreamDocument '() { + given: + def helper = getHelper() + + def pipeline = [BsonDocument.parse('{$match: {operationType: "replace"}}')] + def operation = new ChangeStreamOperation(helper.getNamespace(), FullDocument.UPDATE_LOOKUP, + FullDocumentBeforeChange.DEFAULT, pipeline, + createCodec(BsonDocument, fromProviders(new BsonValueCodecProvider(), new ValueCodecProvider()))) + helper.insertDocuments(BsonDocument.parse('{ _id : 2, x : 2, y : 3 }')) + + when: + def cursor = execute(operation, false) + helper.replaceOne(BsonDocument.parse('{ _id : 2}'), BsonDocument.parse('{ _id : 2, x : 3}'), false) + ChangeStreamDocument nextDoc = next(cursor, false, 1).get(0) + + then: + nextDoc.getResumeToken() != null + nextDoc.getDocumentKey() == BsonDocument.parse('{ _id : 2 }') + nextDoc.getFullDocument() == BsonDocument.parse('{ _id : 2, x : 3 }') + nextDoc.getNamespace() == helper.getNamespace() + nextDoc.getOperationType() == OperationType.REPLACE + nextDoc.getUpdateDescription() == null + + cleanup: + cursor?.close() + waitForLastRelease(getCluster()) + } + + def 'should decode delete to ChangeStreamDocument '() { + given: + def helper = getHelper() + + def pipeline = [BsonDocument.parse('{$match: {operationType: "delete"}}')] + def operation = new ChangeStreamOperation(helper.getNamespace(), FullDocument.UPDATE_LOOKUP, + FullDocumentBeforeChange.DEFAULT, pipeline, + createCodec(BsonDocument, fromProviders(new BsonValueCodecProvider(), new ValueCodecProvider()))) + helper.insertDocuments(BsonDocument.parse('{ _id : 2, x : 2, y : 3 }')) + + when: + def cursor = execute(operation, false) + helper.deleteOne(BsonDocument.parse('{ _id : 2}')) + ChangeStreamDocument nextDoc = next(cursor, false, 1).get(0) + + then: + nextDoc.getResumeToken() != null + nextDoc.getDocumentKey() == BsonDocument.parse('{ _id : 2 }') + nextDoc.getFullDocument() == null + nextDoc.getNamespace() == helper.getNamespace() + nextDoc.getOperationType() == OperationType.DELETE + nextDoc.getUpdateDescription() == null + + cleanup: + cursor?.close() + waitForLastRelease(getCluster()) + } + + def 'should decode invalidate to ChangeStreamDocument '() { + given: + def helper = getHelper() + + def pipeline = [BsonDocument.parse('{$match: {operationType: "invalidate"}}')] + def operation = new ChangeStreamOperation(helper.getNamespace(), FullDocument.UPDATE_LOOKUP, + FullDocumentBeforeChange.DEFAULT, pipeline, + createCodec(BsonDocument, fromProviders(new BsonValueCodecProvider(), new ValueCodecProvider()))) + helper.insertDocuments(BsonDocument.parse('{ _id : 2, x : 2, y : 3 }')) + + when: + def cursor = execute(operation, false) + helper.drop() + ChangeStreamDocument nextDoc = next(cursor, false, 1).get(0) + + then: + nextDoc.getResumeToken() != null + nextDoc.getDocumentKey() == null + nextDoc.getFullDocument() == null + nextDoc.getNamespace() == null + nextDoc.getOperationType() == OperationType.INVALIDATE + nextDoc.getUpdateDescription() == null + + cleanup: + cursor?.close() + waitForLastRelease(getCluster()) + } + + def 'should decode drop to ChangeStreamDocument '() { + given: + def helper = getHelper() + + def pipeline = [BsonDocument.parse('{$match: {operationType: "drop"}}')] + def operation = new ChangeStreamOperation(helper.getNamespace(), FullDocument.UPDATE_LOOKUP, + FullDocumentBeforeChange.DEFAULT, pipeline, + createCodec(BsonDocument, fromProviders(new BsonValueCodecProvider(), new ValueCodecProvider()))) + helper.insertDocuments(BsonDocument.parse('{ _id : 2, x : 2, y : 3 }')) + + when: + def cursor = execute(operation, false) + helper.drop() + ChangeStreamDocument nextDoc = next(cursor, false, 1).get(0) + + then: + nextDoc.getResumeToken() != null + nextDoc.getDocumentKey() == null + nextDoc.getFullDocument() == null + nextDoc.getNamespace() == helper.getNamespace() + nextDoc.getOperationType() == OperationType.DROP + nextDoc.getUpdateDescription() == null + + cleanup: + cursor?.close() + waitForLastRelease(getCluster()) + } + + def 'should decode dropDatabase to ChangeStreamDocument '() { + given: + def helper = getHelper() + + def pipeline = [BsonDocument.parse('{$match: {operationType: "dropDatabase"}}')] + def operation = new ChangeStreamOperation(helper.getNamespace(), FullDocument.UPDATE_LOOKUP, + FullDocumentBeforeChange.DEFAULT, pipeline, + createCodec(BsonDocument, fromProviders(new BsonValueCodecProvider(), new ValueCodecProvider())), + ChangeStreamLevel.DATABASE) + helper.insertDocuments(BsonDocument.parse('{ _id : 2, x : 2, y : 3 }')) + + when: + def cursor = execute(operation, false) + helper.dropDatabase('JavaDriverTest') + ChangeStreamDocument nextDoc = next(cursor, false, 1).get(0) + + then: + nextDoc.getResumeToken() != null + nextDoc.getDocumentKey() == null + nextDoc.getFullDocument() == null + nextDoc.getDatabaseName() == 'JavaDriverTest' + nextDoc.getOperationType() == OperationType.DROP_DATABASE + nextDoc.getUpdateDescription() == null + + cleanup: + cursor?.close() + waitForLastRelease(getCluster()) + } + + def 'should decode rename to ChangeStreamDocument '() { + given: + def helper = getHelper() + + def pipeline = [BsonDocument.parse('{$match: {operationType: "rename"}}')] + def operation = new ChangeStreamOperation(helper.getNamespace(), + FullDocument.UPDATE_LOOKUP, FullDocumentBeforeChange.DEFAULT, pipeline, + createCodec(BsonDocument, fromProviders(new BsonValueCodecProvider(), new ValueCodecProvider()))) + def newNamespace = new MongoNamespace('JavaDriverTest', 'newCollectionName') + helper.insertDocuments(BsonDocument.parse('{ _id : 2, x : 2, y : 3 }')) + + when: + def cursor = execute(operation, false) + helper.renameCollection(newNamespace) + ChangeStreamDocument nextDoc = next(cursor, false, 1).get(0) + + then: + nextDoc.getResumeToken() != null + nextDoc.getDocumentKey() == null + nextDoc.getFullDocument() == null + nextDoc.getNamespace() == helper.getNamespace() + nextDoc.getDestinationNamespace() == newNamespace + nextDoc.getOperationType() == OperationType.RENAME + nextDoc.getUpdateDescription() == null + + cleanup: + cursor?.close() + waitForLastRelease(getCluster()) + } + + def 'should throw if the _id field is projected out'() { + given: + def helper = getHelper() + def pipeline = [BsonDocument.parse('{$project: {"_id": 0}}')] + def operation = new ChangeStreamOperation(helper.getNamespace(), FullDocument.DEFAULT, + FullDocumentBeforeChange.DEFAULT, pipeline, CODEC) + + when: + def cursor = execute(operation, async) + insertDocuments(helper, [11, 22]) + nextAndClean(cursor, async) + + then: + thrown(MongoException) + + cleanup: + cursor?.close() + waitForLastRelease(async ? getAsyncCluster() : getCluster()) + + where: + async << [true, false] + } + + def 'should act like a tailable cursor'() { + given: + def helper = getHelper() + + def pipeline = [BsonDocument.parse('{$match: {operationType: "insert"}}')] + def operation = new ChangeStreamOperation(helper.getNamespace(), FullDocument.DEFAULT, + FullDocumentBeforeChange.DEFAULT, pipeline, CODEC) + + when: + def cursor = execute(operation, async) + def expected = insertDocuments(helper, [1, 2]) + + then: + nextAndClean(cursor, async, expected.size()) == expected + + when: + expected = insertDocuments(helper, [3, 4]) + + then: + nextAndClean(cursor, async, expected.size()) == expected + + cleanup: + cursor?.close() + waitForLastRelease(async ? getAsyncCluster() : getCluster()) + + where: + async << [true, false] + } + + def 'should be resumable'() { + given: + def helper = getHelper() + + def pipeline = [BsonDocument.parse('{$match: {operationType: "insert"}}')] + def operation = new ChangeStreamOperation(helper.getNamespace(), FullDocument.DEFAULT, + FullDocumentBeforeChange.DEFAULT, pipeline, CODEC) + + when: + def cursor = execute(operation, async) + def expected = insertDocuments(helper, [1, 2]) + + then: + nextAndClean(cursor, async, expected.size()) == expected + + when: + helper.killCursor(helper.getNamespace(), cursor.getWrapped().getServerCursor()) + expected = insertDocuments(helper, [3, 4]) + def results = nextAndClean(cursor, async, expected.size()) + + then: + results == expected + + when: + expected = insertDocuments(helper, [5, 6]) + helper.killCursor(helper.getNamespace(), cursor.getWrapped().getServerCursor()) + + results = nextAndClean(cursor, async, expected.size()) + + then: + results == expected + + cleanup: + cursor?.close() + waitForLastRelease(async ? getAsyncCluster() : getCluster()) + + where: + async << [true, false] + } + + def 'should work with a startAtOperationTime'() { + given: + def helper = getHelper() + + def pipeline = [BsonDocument.parse('{$match: {operationType: "insert"}}')] + def operation = new ChangeStreamOperation(helper.getNamespace(), FullDocument.DEFAULT, + FullDocumentBeforeChange.DEFAULT, pipeline, CODEC) + + def cursor = execute(operation, async) + + when: + // split into two insert commands, because starting in MongoDB 8.0 the same clusterTime is applied to all documents in a bulk + // write operation, and the test relies on the clusterTime values being both ascending _and_ unique. + def expectedOne = insertDocuments(helper, [1]) + def expectedTwo = insertDocuments(helper, [2]) + def expected = [] + expected.addAll(expectedOne) + expected.addAll(expectedTwo) + def result = next(cursor, async, 2) + + then: + result.size() == 2 + + when: + cursor.close() + waitForLastRelease(async ? getAsyncCluster() : getCluster()) + + operation.startAtOperationTime(result.last().getTimestamp('clusterTime')) + cursor = execute(operation, async) + result = nextAndClean(cursor, async, expected.tail().size()) + + then: + result == expected.tail() + + cleanup: + cursor?.close() + waitForLastRelease(async ? getAsyncCluster() : getCluster()) + + where: + async << [true, false] + } + + def 'should work with a resumeAfter resumeToken'() { + given: + def helper = getHelper() + + def pipeline = [BsonDocument.parse('{$match: {operationType: "insert"}}')] + def operation = new ChangeStreamOperation(helper.getNamespace(), FullDocument.DEFAULT, + FullDocumentBeforeChange.DEFAULT, pipeline, CODEC) + + def cursor = execute(operation, async) + + when: + def expected = insertDocuments(helper, [1, 2]) + def result = next(cursor, async, 2) + + then: + result.size() == 2 + + when: + cursor.close() + waitForLastRelease(async ? getAsyncCluster() : getCluster()) + + operation.resumeAfter(result.head().getDocument('_id')).startAtOperationTime(null) + cursor = execute(operation, async) + result = nextAndClean(cursor, async, expected.tail().size()) + + then: + result == expected.tail() + + cleanup: + cursor?.close() + waitForLastRelease(async ? getAsyncCluster() : getCluster()) + + where: + async << [true, false] + } + + def 'should work with a startAfter resumeToken'() { + given: + def helper = getHelper() + + def pipeline = [BsonDocument.parse('{$match: {operationType: "insert"}}')] + def operation = new ChangeStreamOperation(helper.getNamespace(), FullDocument.DEFAULT, + FullDocumentBeforeChange.DEFAULT, pipeline, CODEC) + + def cursor = execute(operation, async) + + when: + def expected = insertDocuments(helper, [1, 2]) + def result = next(cursor, async, 2) + + then: + result.size() == 2 + + when: + cursor.close() + waitForLastRelease(async ? getAsyncCluster() : getCluster()) + + cursor = execute(operation.startAfter(result.head().getDocument('_id')).startAtOperationTime(null), async) + result = nextAndClean(cursor, async, expected.tail().size()) + + then: + result == expected.tail() + + cleanup: + cursor?.close() + waitForLastRelease(async ? getAsyncCluster() : getCluster()) + + where: + async << [true, false] + } + + def 'should support hasNext on the sync API'() { + given: + def helper = getHelper() + def operation = new ChangeStreamOperation(helper.getNamespace(), FullDocument.DEFAULT, + FullDocumentBeforeChange.DEFAULT, [], CODEC) + + when: + def cursor = execute(operation, false) + insertDocuments(helper, [1]) + + then: + cursor.hasNext() + + cleanup: + cursor?.close() + waitForLastRelease(getCluster()) + } + + def 'should set the startAtOperationTime on the sync cursor'() { + given: + def operationContext = OPERATION_CONTEXT.withSessionContext( + Stub(SessionContext) { + getReadConcern() >> ReadConcern.DEFAULT + getOperationTime() >> new BsonTimestamp() + }) + def changeStream + def binding = Stub(ReadBinding) { + getOperationContext() >> operationContext + getReadConnectionSource() >> Stub(ConnectionSource) { + getOperationContext() >> operationContext + getConnection() >> Stub(Connection) { + command(*_) >> { + changeStream = getChangeStream(it[1]) + new BsonDocument('cursor', new BsonDocument('id', new BsonInt64(1)) + .append('ns', new BsonString(getNamespace().getFullName())) + .append('firstBatch', new BsonArrayWrapper([]))) + } + getDescription() >> Stub(ConnectionDescription) { + getMaxWireVersion() >> getMaxWireVersionForServerVersion([4, 0]) + } + } + } + } + + when: 'set resumeAfter' + new ChangeStreamOperation(helper.getNamespace(), FullDocument.DEFAULT, + FullDocumentBeforeChange.DEFAULT, [], CODEC) + .resumeAfter(new BsonDocument()) + .execute(binding) + + then: + changeStream.containsKey('resumeAfter') + !changeStream.containsKey('startAtOperationTime') + + when: 'set startAfter' + new ChangeStreamOperation(helper.getNamespace(), FullDocument.DEFAULT, + FullDocumentBeforeChange.DEFAULT, [], CODEC) + .startAfter(new BsonDocument()) + .execute(binding) + + then: + changeStream.containsKey('startAfter') + !changeStream.containsKey('startAtOperationTime') + + when: 'set startAtOperationTime' + def startAtTime = new BsonTimestamp(42) + new ChangeStreamOperation(helper.getNamespace(), FullDocument.DEFAULT, + FullDocumentBeforeChange.DEFAULT, [], CODEC) + .startAtOperationTime(startAtTime) + .execute(binding) + + then: + changeStream.getTimestamp('startAtOperationTime') == startAtTime + } + + def 'should set the startAtOperationTime on the async cursor'() { + given: + def operationContext = OPERATION_CONTEXT.withSessionContext( + Stub(SessionContext) { + getReadConcern() >> ReadConcern.DEFAULT + getOperationTime() >> new BsonTimestamp() + }) + def changeStream + def binding = Stub(AsyncReadBinding) { + getOperationContext() >> operationContext + getReadConnectionSource(_) >> { + it.last().onResult(Stub(AsyncConnectionSource) { + getOperationContext() >> operationContext + getConnection(_) >> { + it.last().onResult(Stub(AsyncConnection) { + commandAsync(*_) >> { + changeStream = getChangeStream(it[1]) + it.last().onResult(new BsonDocument('cursor', new BsonDocument('id', new BsonInt64(1)) + .append('ns', new BsonString(getNamespace().getFullName())) + .append('firstBatch', new BsonArrayWrapper([]))), null) + } + getDescription() >> Stub(ConnectionDescription) { + getMaxWireVersion() >> getMaxWireVersionForServerVersion([4, 0]) + } + }, null) + } + }, null) + } + } + + when: 'set resumeAfter' + new ChangeStreamOperation(helper.getNamespace(), FullDocument.DEFAULT, + FullDocumentBeforeChange.DEFAULT, [], CODEC) + .resumeAfter(new BsonDocument()) + .executeAsync(binding, Stub(SingleResultCallback)) + + then: + changeStream.containsKey('resumeAfter') + !changeStream.containsKey('startAtOperationTime') + + when: 'set startAfter' + new ChangeStreamOperation(helper.getNamespace(), FullDocument.DEFAULT, + FullDocumentBeforeChange.DEFAULT, [], CODEC) + .startAfter(new BsonDocument()) + .executeAsync(binding, Stub(SingleResultCallback)) + + then: + changeStream.containsKey('startAfter') + !changeStream.containsKey('startAtOperationTime') + + when: 'set startAtOperationTime' + def startAtTime = new BsonTimestamp(42) + new ChangeStreamOperation(helper.getNamespace(), FullDocument.DEFAULT, + FullDocumentBeforeChange.DEFAULT, [], CODEC) + .startAtOperationTime(startAtTime) + .executeAsync(binding, Stub(SingleResultCallback)) + + then: + changeStream.getTimestamp('startAtOperationTime') == startAtTime + } + + private final static CODEC = new BsonDocumentCodec() + + private CollectionHelper getHelper() { + def helper = getCollectionHelper() + helper.create(helper.getNamespace().getCollectionName(), new CreateCollectionOptions()) + helper + } + + private static List insertDocuments(final CollectionHelper helper, final List docs) { + helper.insertDocuments(docs.collect { BsonDocument.parse("{_id: $it, a: $it}") }, WriteConcern.MAJORITY) + docs.collect { + BsonDocument.parse("""{ + "operationType": "insert", + "fullDocument": {"_id": $it, "a": $it}, + "ns": {"db": "${helper.getNamespace().getDatabaseName()}", "coll": "${helper.getNamespace().getCollectionName()}"}, + "documentKey": {"_id": $it} + }""") + } + } + + def nextAndClean(cursor, boolean async, int minimumCount) { + removeExtra(next(cursor, async, minimumCount)) + } + + def nextAndClean(cursor, boolean async) { + removeExtra(next(cursor, async)) + } + + def removeExtra(List next) { + next?.collect { doc -> + doc.remove('_id') + doc.remove('clusterTime') + doc.remove('wallTime') + doc.remove('collectionUUID') + doc + } + } + + def getChangeStream(BsonDocument command) { + command.getArray('pipeline').head().getDocument('$changeStream') + } +} diff --git a/driver-core/src/test/functional/com/mongodb/internal/operation/CommandBatchCursorFunctionalTest.java b/driver-core/src/test/functional/com/mongodb/internal/operation/CommandBatchCursorFunctionalTest.java new file mode 100644 index 00000000000..d9861c71659 --- /dev/null +++ b/driver-core/src/test/functional/com/mongodb/internal/operation/CommandBatchCursorFunctionalTest.java @@ -0,0 +1,599 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.operation; + +import com.mongodb.MongoCursorNotFoundException; +import com.mongodb.MongoQueryException; +import com.mongodb.ReadPreference; +import com.mongodb.ServerCursor; +import com.mongodb.client.cursor.TimeoutMode; +import com.mongodb.client.model.CreateCollectionOptions; +import com.mongodb.client.model.Filters; +import com.mongodb.client.model.OperationTest; +import com.mongodb.internal.binding.ConnectionSource; +import com.mongodb.internal.connection.Connection; +import com.mongodb.internal.validator.NoOpFieldNameValidator; +import org.bson.BsonArray; +import org.bson.BsonBoolean; +import org.bson.BsonDocument; +import org.bson.BsonInt32; +import org.bson.BsonInt64; +import org.bson.BsonString; +import org.bson.BsonTimestamp; +import org.bson.Document; +import org.bson.codecs.BsonDocumentCodec; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.DisplayName; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.Arguments; +import org.junit.jupiter.params.provider.MethodSource; + +import java.util.ArrayList; +import java.util.List; +import java.util.NoSuchElementException; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.stream.Collectors; +import java.util.stream.IntStream; +import java.util.stream.Stream; + +import static com.mongodb.ClusterFixture.checkReferenceCountReachesTarget; +import static com.mongodb.ClusterFixture.getBinding; +import static com.mongodb.ClusterFixture.getReferenceCountAfterTimeout; +import static com.mongodb.ClusterFixture.isSharded; +import static com.mongodb.internal.operation.CommandBatchCursorHelper.FIRST_BATCH; +import static com.mongodb.internal.operation.TestOperationHelper.makeAdditionalGetMoreCall; +import static java.util.Collections.singletonList; +import static java.util.stream.Stream.generate; +import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assumptions.assumeFalse; +import static org.junit.jupiter.params.provider.Arguments.arguments; + +public class CommandBatchCursorFunctionalTest extends OperationTest { + + private ConnectionSource connectionSource; + private Connection connection; + private CommandBatchCursor cursor; + + @BeforeEach + void setup() { + List documents = IntStream.rangeClosed(1, 10) + .mapToObj(i -> new BsonDocument("i", new BsonInt32(i))) + .collect(Collectors.toList()); + getCollectionHelper().insertDocuments(documents); + + connectionSource = getBinding().getWriteConnectionSource(); + connection = connectionSource.getConnection(); + } + + @AfterEach + void cleanup() { + ifNotNull(cursor, CommandBatchCursor::close); + ifNotNull(connectionSource, cs -> { + getReferenceCountAfterTimeout(cs, 1); + cs.release(); + }); + ifNotNull(connection, c -> { + getReferenceCountAfterTimeout(c, 1); + c.release(); + }); + } + + @Test + @DisplayName("should exhaust cursor with multiple batches") + void shouldExhaustCursorWithMultipleBatches() { + // given + BsonDocument commandResult = executeFindCommand(0, 3); // Fetch in batches of size 3 + cursor = new CommandBatchCursor<>(TimeoutMode.CURSOR_LIFETIME, commandResult, 3, 0, DOCUMENT_DECODER, + null, connectionSource, connection); + + // when + List> result = cursor.exhaust(); + + // then + assertEquals(4, result.size(), "Expected 4 batches for 10 documents with batch size of 3."); + + int totalDocuments = result.stream().mapToInt(List::size).sum(); + assertEquals(10, totalDocuments, "Expected a total of 10 documents."); + } + + @Test + @DisplayName("should exhaust cursor with closed cursor") + void shouldExhaustCursorWithClosedCursor() { + // given + BsonDocument commandResult = executeFindCommand(0, 3); + cursor = new CommandBatchCursor<>(TimeoutMode.CURSOR_LIFETIME, commandResult, 3, 0, DOCUMENT_DECODER, + null, connectionSource, connection); + cursor.close(); + + // when & then + IllegalStateException illegalStateException = assertThrows(IllegalStateException.class, cursor::exhaust); + assertEquals("Cursor has been closed", illegalStateException.getMessage()); + } + + @Test + @DisplayName("should exhaust cursor with empty cursor") + void shouldExhaustCursorWithEmptyCursor() { + // given + getCollectionHelper().deleteMany(Filters.empty()); + + BsonDocument commandResult = executeFindCommand(0, 3); // No documents to fetch + cursor = new CommandBatchCursor<>(TimeoutMode.CURSOR_LIFETIME, commandResult, 3, 0, DOCUMENT_DECODER, + null, connectionSource, connection); + + // when + List> result = cursor.exhaust(); + + // then + assertTrue(result.isEmpty(), "Expected no batches for an empty cursor."); + } + + @Test + @DisplayName("server cursor should not be null") + void theServerCursorShouldNotBeNull() { + BsonDocument commandResult = executeFindCommand(2); + cursor = new CommandBatchCursor<>(TimeoutMode.CURSOR_LIFETIME, commandResult, 0, 0, DOCUMENT_DECODER, + null, connectionSource, connection); + + assertNotNull(cursor.getServerCursor()); + } + + @Test + @DisplayName("test server address should not be null") + void theServerAddressShouldNotNull() { + BsonDocument commandResult = executeFindCommand(); + cursor = new CommandBatchCursor<>(TimeoutMode.CURSOR_LIFETIME, commandResult, 0, 0, DOCUMENT_DECODER, + null, connectionSource, connection); + + assertNotNull(cursor.getServerAddress()); + } + + @Test + @DisplayName("should get Exceptions for operations on the cursor after closing") + void shouldGetExceptionsForOperationsOnTheCursorAfterClosing() { + BsonDocument commandResult = executeFindCommand(); + cursor = new CommandBatchCursor<>(TimeoutMode.CURSOR_LIFETIME, commandResult, 0, 0, DOCUMENT_DECODER, + null, connectionSource, connection); + + cursor.close(); + + assertDoesNotThrow(() -> cursor.close()); + assertThrows(IllegalStateException.class, () -> cursor.hasNext()); + assertThrows(IllegalStateException.class, () -> cursor.next()); + assertThrows(IllegalStateException.class, () -> cursor.getServerCursor()); + } + + @Test + @DisplayName("should throw an Exception when going off the end") + void shouldThrowAnExceptionWhenGoingOffTheEnd() { + BsonDocument commandResult = executeFindCommand(1); + cursor = new CommandBatchCursor<>(TimeoutMode.CURSOR_LIFETIME, commandResult, 0, 0, DOCUMENT_DECODER, + null, connectionSource, connection); + + cursor.next(); + cursor.next(); + assertThrows(NoSuchElementException.class, () -> cursor.next()); + } + + @Test + @DisplayName("test cursor remove") + void testCursorRemove() { + BsonDocument commandResult = executeFindCommand(); + cursor = new CommandBatchCursor<>(TimeoutMode.CURSOR_LIFETIME, commandResult, 0, 0, DOCUMENT_DECODER, + null, connectionSource, connection); + + assertThrows(UnsupportedOperationException.class, () -> cursor.remove()); + } + + @Test + @DisplayName("test normal exhaustion") + void testNormalExhaustion() { + BsonDocument commandResult = executeFindCommand(); + cursor = new CommandBatchCursor<>(TimeoutMode.CURSOR_LIFETIME, commandResult, 0, 0, DOCUMENT_DECODER, + null, connectionSource, connection); + + assertEquals(10, cursorFlatten().size()); + } + + @ParameterizedTest(name = "{index} => limit={0}, batchSize={1}, expectedTotal={2}") + @MethodSource + @DisplayName("test limit exhaustion") + void testLimitExhaustion(final int limit, final int batchSize, final int expectedTotal) { + BsonDocument commandResult = executeFindCommand(limit, batchSize); + cursor = new CommandBatchCursor<>(TimeoutMode.CURSOR_LIFETIME, commandResult, batchSize, 0, DOCUMENT_DECODER, + null, connectionSource, connection); + + assertEquals(expectedTotal, cursorFlatten().size()); + + checkReferenceCountReachesTarget(connectionSource, 1); + checkReferenceCountReachesTarget(connection, 1); + } + + @ParameterizedTest(name = "{index} => awaitData={0}, maxTimeMS={1}") + @MethodSource + @DisplayName("should block waiting for next batch on a tailable cursor") + void shouldBlockWaitingForNextBatchOnATailableCursor(final boolean awaitData, final int maxTimeMS) { + + getCollectionHelper().create(getCollectionName(), new CreateCollectionOptions().capped(true).sizeInBytes(1000)); + getCollectionHelper().insertDocuments(DOCUMENT_DECODER, new Document("_id", 1).append("ts", new BsonTimestamp(5, 0))); + + BsonDocument commandResult = executeFindCommand(new BsonDocument("ts", + new BsonDocument("$gte", new BsonTimestamp(5, 0))), 0, 2, true, awaitData); + cursor = new CommandBatchCursor<>(TimeoutMode.CURSOR_LIFETIME, commandResult, 2, maxTimeMS, DOCUMENT_DECODER, + null, connectionSource, connection); + + assertTrue(cursor.hasNext()); + assertEquals(1, cursor.next().get(0).get("_id")); + + new Thread(() -> { + sleep(100); + getCollectionHelper().insertDocuments(DOCUMENT_DECODER, new Document("_id", 2).append("ts", new BsonTimestamp(6, 0))); + }).start(); + + assertTrue(cursor.hasNext()); + assertEquals(2, cursor.next().get(0).get("_id")); + } + + @Test + @DisplayName("test tryNext with tailable") + void testTryNextWithTailable() { + getCollectionHelper().create(getCollectionName(), new CreateCollectionOptions().capped(true).sizeInBytes(1000)); + getCollectionHelper().insertDocuments(DOCUMENT_DECODER, new Document("_id", 1).append("ts", new BsonTimestamp(5, 0))); + + BsonDocument commandResult = executeFindCommand(new BsonDocument("ts", + new BsonDocument("$gte", new BsonTimestamp(5, 0))), 0, 2, true, true); + cursor = new CommandBatchCursor<>(TimeoutMode.CURSOR_LIFETIME, commandResult, 2, 0, DOCUMENT_DECODER, + null, connectionSource, connection); + + List nextBatch = cursor.tryNext(); + assertNotNull(nextBatch); + assertEquals(1, nextBatch.get(0).get("_id")); + + nextBatch = cursor.tryNext(); + assertNull(nextBatch); + + getCollectionHelper().insertDocuments(DOCUMENT_DECODER, new Document("_id", 2).append("ts", new BsonTimestamp(6, 0))); + + nextBatch = cursor.tryNext(); + assertNotNull(nextBatch); + assertEquals(2, nextBatch.get(0).get("_id")); + } + + @Test + @DisplayName("hasNext should throw when cursor is closed in another thread") + void hasNextShouldThrowWhenCursorIsClosedInAnotherThread() throws InterruptedException { + + getCollectionHelper().create(getCollectionName(), new CreateCollectionOptions().capped(true).sizeInBytes(1000)); + getCollectionHelper().insertDocuments(DOCUMENT_DECODER, new Document("_id", 1).append("ts", new BsonTimestamp(5, 0))); + + BsonDocument commandResult = executeFindCommand(new BsonDocument("ts", + new BsonDocument("$gte", new BsonTimestamp(5, 0))), 0, 2, true, true); + cursor = new CommandBatchCursor<>(TimeoutMode.CURSOR_LIFETIME, commandResult, 2, 0, DOCUMENT_DECODER, + null, connectionSource, connection); + + assertTrue(cursor.hasNext()); + assertEquals(1, cursor.next().get(0).get("_id")); + + CountDownLatch latch = new CountDownLatch(1); + new Thread(() -> { + sleep(100); + cursor.close(); + latch.countDown(); + }).start(); + + assertTrue(latch.await(5, TimeUnit.SECONDS)); + assertThrows(IllegalStateException.class, () -> cursor.hasNext()); + } + + @Test + @DisplayName("test maxTimeMS") + void testMaxTimeMS() { + assumeFalse(isSharded()); + getCollectionHelper().create(getCollectionName(), new CreateCollectionOptions().capped(true).sizeInBytes(1000)); + getCollectionHelper().insertDocuments(DOCUMENT_DECODER, new Document("_id", 1).append("ts", new BsonTimestamp(5, 0))); + + long maxTimeMS = 500; + BsonDocument commandResult = executeFindCommand(new BsonDocument("ts", + new BsonDocument("$gte", new BsonTimestamp(5, 0))), 0, 2, true, true); + cursor = new CommandBatchCursor<>(TimeoutMode.CURSOR_LIFETIME, commandResult, 2, maxTimeMS, DOCUMENT_DECODER, + null, connectionSource, connection); + + List nextBatch = cursor.tryNext(); + assertNotNull(nextBatch); + + long startTime = System.currentTimeMillis(); + nextBatch = cursor.tryNext(); + long endTime = System.currentTimeMillis(); + + assertNull(nextBatch); + + // RACY TEST: no guarantee assertion will fire within the given timeframe + assertTrue(endTime - startTime < (maxTimeMS + 200)); + } + + @Test + @DisplayName("test tailable interrupt") + void testTailableInterrupt() throws InterruptedException { + getCollectionHelper().create(getCollectionName(), new CreateCollectionOptions().capped(true).sizeInBytes(1000)); + getCollectionHelper().insertDocuments(DOCUMENT_DECODER, new Document("_id", 1).append("ts", new BsonTimestamp(5, 0))); + + BsonDocument commandResult = executeFindCommand(new BsonDocument("ts", + new BsonDocument("$gte", new BsonTimestamp(5, 0))), 0, 2, true, true); + cursor = new CommandBatchCursor<>(TimeoutMode.CURSOR_LIFETIME, commandResult, 2, 0, DOCUMENT_DECODER, + null, connectionSource, connection); + + CountDownLatch latch = new CountDownLatch(1); + AtomicInteger seen = new AtomicInteger(); + Thread thread = new Thread(() -> { + try { + cursor.next(); + seen.incrementAndGet(); + cursor.next(); + seen.incrementAndGet(); + } catch (Exception e) { + // pass + } finally { + latch.countDown(); + } + }); + + thread.start(); + sleep(1000); + thread.interrupt(); + getCollectionHelper().insertDocuments(DOCUMENT_DECODER, new Document("_id", 2)); + latch.await(); + + assertTrue(latch.await(5, TimeUnit.SECONDS)); + assertEquals(1, seen.intValue()); + } + + @Test + @DisplayName("should kill cursor if limit is reached on initial query") + void shouldKillCursorIfLimitIsReachedOnInitialQuery() { + assumeFalse(isSharded()); + BsonDocument commandResult = executeFindCommand(5, 10); + cursor = new CommandBatchCursor<>(TimeoutMode.CURSOR_LIFETIME, commandResult, 0, 0, DOCUMENT_DECODER, + null, connectionSource, connection); + + assertNotNull(cursor.next()); + assertFalse(cursor.hasNext()); + assertNull(cursor.getServerCursor()); + } + + @Test + @DisplayName("should kill cursor if limit is reached on getMore") + void shouldKillCursorIfLimitIsReachedOnGetMore() { + assumeFalse(isSharded()); + BsonDocument commandResult = executeFindCommand(5, 3); + cursor = new CommandBatchCursor<>(TimeoutMode.CURSOR_LIFETIME, commandResult, 3, 0, DOCUMENT_DECODER, + null, connectionSource, connection); + + ServerCursor serverCursor = cursor.getServerCursor(); + assertNotNull(serverCursor); + assertNotNull(cursor.next()); + assertNotNull(cursor.next()); + + assertDoesNotThrow(() -> checkReferenceCountReachesTarget(connection, 1)); + assertThrows(MongoQueryException.class, () -> + makeAdditionalGetMoreCall(getNamespace(), serverCursor, connection) + ); + } + + @Test + @DisplayName("should release connection source if limit is reached on initial query") + void shouldReleaseConnectionSourceIfLimitIsReachedOnInitialQuery() { + assumeFalse(isSharded()); + BsonDocument commandResult = executeFindCommand(5, 10); + cursor = new CommandBatchCursor<>(TimeoutMode.CURSOR_LIFETIME, commandResult, 0, 0, DOCUMENT_DECODER, + null, connectionSource, connection); + + assertNull(cursor.getServerCursor()); + assertDoesNotThrow(() -> checkReferenceCountReachesTarget(connectionSource, 1)); + assertDoesNotThrow(() -> checkReferenceCountReachesTarget(connection, 1)); + } + + @Test + @DisplayName("should release connection source if limit is reached on getMore") + void shouldReleaseConnectionSourceIfLimitIsReachedOnGetMore() { + assumeFalse(isSharded()); + BsonDocument commandResult = executeFindCommand(5, 3); + cursor = new CommandBatchCursor<>(TimeoutMode.CURSOR_LIFETIME, commandResult, 3, 0, DOCUMENT_DECODER, + null, connectionSource, connection); + + assertNotNull(cursor.next()); + assertNotNull(cursor.next()); + assertDoesNotThrow(() -> checkReferenceCountReachesTarget(connectionSource, 1)); + assertDoesNotThrow(() -> checkReferenceCountReachesTarget(connection, 1)); + } + + @Test + @DisplayName("test limit with get more") + void testLimitWithGetMore() { + BsonDocument commandResult = executeFindCommand(5, 2); + cursor = new CommandBatchCursor<>(TimeoutMode.CURSOR_LIFETIME, commandResult, 2, 0, DOCUMENT_DECODER, + null, connectionSource, connection); + + assertNotNull(cursor.next()); + assertNotNull(cursor.next()); + assertNotNull(cursor.next()); + assertFalse(cursor.hasNext()); + } + + @Test + @DisplayName("test limit with large documents") + void testLimitWithLargeDocuments() { + String bigString = generate(() -> "x") + .limit(16000) + .collect(Collectors.joining()); + + IntStream.range(11, 1000).forEach(i -> + getCollectionHelper().insertDocuments(DOCUMENT_DECODER, new Document("_id", i).append("s", bigString)) + ); + + BsonDocument commandResult = executeFindCommand(300, 0); + cursor = new CommandBatchCursor<>(TimeoutMode.CURSOR_LIFETIME, commandResult, 0, 0, DOCUMENT_DECODER, + null, connectionSource, connection); + + assertEquals(300, cursorFlatten().size()); + } + + @Test + @DisplayName("should respect batch size") + void shouldRespectBatchSize() { + BsonDocument commandResult = executeFindCommand(2); + cursor = new CommandBatchCursor<>(TimeoutMode.CURSOR_LIFETIME, commandResult, 2, 0, DOCUMENT_DECODER, + null, connectionSource, connection); + + assertEquals(2, cursor.getBatchSize()); + assertEquals(2, cursor.next().size()); + assertEquals(2, cursor.next().size()); + + cursor.setBatchSize(3); + assertEquals(3, cursor.getBatchSize()); + assertEquals(3, cursor.next().size()); + assertEquals(3, cursor.next().size()); + } + + @Test + @DisplayName("should throw cursor not found exception") + void shouldThrowCursorNotFoundException() { + BsonDocument commandResult = executeFindCommand(2); + cursor = new CommandBatchCursor<>(TimeoutMode.CURSOR_LIFETIME, commandResult, 2, 0, DOCUMENT_DECODER, + null, connectionSource, connection); + + ServerCursor serverCursor = cursor.getServerCursor(); + assertNotNull(serverCursor); + Connection localConnection = connectionSource.getConnection(); + localConnection.command(getNamespace().getDatabaseName(), + new BsonDocument("killCursors", new BsonString(getNamespace().getCollectionName())) + .append("cursors", new BsonArray(singletonList(new BsonInt64(serverCursor.getId())))), + NoOpFieldNameValidator.INSTANCE, ReadPreference.primary(), new BsonDocumentCodec(), connectionSource.getOperationContext()); + localConnection.release(); + + cursor.next(); + + MongoCursorNotFoundException exception = assertThrows(MongoCursorNotFoundException.class, () -> cursor.next()); + assertEquals(serverCursor.getId(), exception.getCursorId()); + assertEquals(serverCursor.getAddress(), exception.getServerAddress()); + } + + @Test + @DisplayName("should report available documents") + void shouldReportAvailableDocuments() { + BsonDocument commandResult = executeFindCommand(3); + cursor = new CommandBatchCursor<>(TimeoutMode.CURSOR_LIFETIME, commandResult, 2, 0, DOCUMENT_DECODER, + null, connectionSource, connection); + + assertEquals(3, cursor.available()); + + cursor.next(); + assertEquals(0, cursor.available()); + + assertTrue(cursor.hasNext()); + assertEquals(2, cursor.available()); + + cursor.next(); + assertEquals(0, cursor.available()); + + assertTrue(cursor.hasNext()); + assertEquals(2, cursor.available()); + + cursor.close(); + assertEquals(0, cursor.available()); + } + + + private static Stream shouldBlockWaitingForNextBatchOnATailableCursor() { + return Stream.of( + arguments(true, 0), + arguments(true, 100), + arguments(false, 0)); + } + + private static Stream testLimitExhaustion() { + return Stream.of( + arguments(5, 2, 5), + arguments(5, -2, 2), + arguments(-5, -2, 5), + arguments(-5, 2, 5), + arguments(2, 5, 2), + arguments(2, -5, 2), + arguments(-2, 5, 2), + arguments(-2, -5, 2) + ); + } + + private BsonDocument executeFindCommand() { + return executeFindCommand(0); + } + + private BsonDocument executeFindCommand(final int batchSize) { + return executeFindCommand(new BsonDocument(), 0, batchSize, false, false); + } + + private BsonDocument executeFindCommand(final int limit, final int batchSize) { + return executeFindCommand(new BsonDocument(), limit, batchSize, false, false); + } + + private BsonDocument executeFindCommand(final BsonDocument filter, final int limit, final int batchSize, final boolean tailable, + final boolean awaitData) { + return executeFindCommand(filter, limit, batchSize, tailable, awaitData, ReadPreference.primary()); + } + + private BsonDocument executeFindCommand(final BsonDocument filter, final int limit, final int batchSize, + final boolean tailable, final boolean awaitData, final ReadPreference readPreference) { + BsonDocument findCommand = new BsonDocument("find", new BsonString(getCollectionName())) + .append("filter", filter) + .append("tailable", BsonBoolean.valueOf(tailable)) + .append("awaitData", BsonBoolean.valueOf(awaitData)); + + findCommand.append("limit", new BsonInt32(Math.abs(limit))); + if (limit >= 0) { + if (batchSize < 0 && Math.abs(batchSize) < limit) { + findCommand.append("limit", new BsonInt32(Math.abs(batchSize))); + } else { + findCommand.append("batchSize", new BsonInt32(Math.abs(batchSize))); + } + } + + BsonDocument results = connection.command(getDatabaseName(), findCommand, + NoOpFieldNameValidator.INSTANCE, readPreference, + CommandResultDocumentCodec.create(DOCUMENT_DECODER, FIRST_BATCH), + connectionSource.getOperationContext()); + + assertNotNull(results); + return results; + } + + private List cursorFlatten() { + List results = new ArrayList<>(); + while (cursor.hasNext()) { + results.addAll(cursor.next()); + } + return results; + } + +} diff --git a/driver-core/src/test/functional/com/mongodb/internal/operation/CommandOperationSpecification.groovy b/driver-core/src/test/functional/com/mongodb/internal/operation/CommandOperationSpecification.groovy new file mode 100644 index 00000000000..57aee534fd0 --- /dev/null +++ b/driver-core/src/test/functional/com/mongodb/internal/operation/CommandOperationSpecification.groovy @@ -0,0 +1,68 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.operation + + +import com.mongodb.OperationFunctionalSpecification +import org.bson.BsonBinary +import org.bson.BsonDocument +import org.bson.BsonInt32 +import org.bson.BsonString +import org.bson.codecs.BsonDocumentCodec +import com.mongodb.spock.Slow + +class CommandOperationSpecification extends OperationFunctionalSpecification { + + def 'should execute read command'() { + given: + def operation = new CommandReadOperation(getNamespace().databaseName, + new BsonDocument('count', new BsonString(getCollectionName())), + new BsonDocumentCodec()) + when: + def result = execute(operation, async) + + then: + result.getNumber('n').intValue() == 0 + + + where: + async << [true, false] + } + + + @Slow + def 'should execute command larger than 16MB'() { + given: + def operation = new CommandReadOperation<>(getNamespace().databaseName, + new BsonDocument('findAndModify', new BsonString(getNamespace().fullName)) + .append('query', new BsonDocument('_id', new BsonInt32(42))) + .append('update', + new BsonDocument('_id', new BsonInt32(42)) + .append('b', new BsonBinary( + new byte[16 * 1024 * 1024 - 30]))), + new BsonDocumentCodec()) + + when: + def result = execute(operation, async) + + then: + result.containsKey('value') + + where: + async << [true, false] + } +} diff --git a/driver-core/src/test/functional/com/mongodb/internal/operation/CommitTransactionOperationSpecification.groovy b/driver-core/src/test/functional/com/mongodb/internal/operation/CommitTransactionOperationSpecification.groovy new file mode 100644 index 00000000000..0d91963d5bf --- /dev/null +++ b/driver-core/src/test/functional/com/mongodb/internal/operation/CommitTransactionOperationSpecification.groovy @@ -0,0 +1,98 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.operation + +import com.mongodb.OperationFunctionalSpecification +import org.bson.BsonDocument + +import java.util.concurrent.TimeUnit + +import static com.mongodb.WriteConcern.ACKNOWLEDGED +import static com.mongodb.WriteConcern.MAJORITY + + +class CommitTransactionOperationSpecification extends OperationFunctionalSpecification { + + def 'should produce the expected command'() { + given: + def cannedResult = BsonDocument.parse('{value: {}}') + def expectedCommand = BsonDocument.parse('{commitTransaction: 1}') + + when: + def operation = new CommitTransactionOperation(ACKNOWLEDGED) + + then: + testOperationInTransaction(operation, [4, 0, 0], expectedCommand, async, cannedResult) + + when: + operation = new CommitTransactionOperation(MAJORITY) + expectedCommand.put('writeConcern', MAJORITY.asDocument()) + + then: + testOperationInTransaction(operation, [4, 0, 0], expectedCommand, async, cannedResult, true) + + where: + async << [true, false] + } + + def 'should retry if the connection initially fails'() { + given: + def cannedResult = BsonDocument.parse('{value: {}}') + def expectedCommand = BsonDocument.parse('{commitTransaction: 1, writeConcern: {w: "majority", wtimeout: 10}}') + + when: + def writeConcern = MAJORITY.withWTimeout(10, TimeUnit.MILLISECONDS) + def operation = new CommitTransactionOperation(writeConcern) + + then: + testOperationRetries(operation, [4, 0, 0], expectedCommand, async, cannedResult, true) + + when: + writeConcern = MAJORITY + operation = new CommitTransactionOperation(writeConcern) + expectedCommand.put('writeConcern', writeConcern.withWTimeout(10000, TimeUnit.MILLISECONDS).asDocument()) + + then: + testOperationRetries(operation, [4, 0, 0], expectedCommand, async, cannedResult, true) + + when: + writeConcern = ACKNOWLEDGED + operation = new CommitTransactionOperation(writeConcern) + expectedCommand.put('writeConcern', writeConcern.withW('majority').withWTimeout(10000, TimeUnit.MILLISECONDS).asDocument()) + + then: + testOperationRetries(operation, [4, 0, 0], expectedCommand, async, cannedResult, true) + + where: + async << [true, false] + } + + def 'should set writeconcern on second commit'() { + given: + def cannedResult = BsonDocument.parse('{value: {}}') + def expectedCommand = BsonDocument.parse('{commitTransaction: 1, writeConcern: {w: "majority", wtimeout: 10000}}') + + when: + def operation = new CommitTransactionOperation(ACKNOWLEDGED, true) + + then: + testOperationInTransaction(operation, [4, 0, 0], expectedCommand, async, cannedResult, true) + + where: + async << [true, false] + } +} diff --git a/driver-core/src/test/functional/com/mongodb/internal/operation/CountDocumentsOperationSpecification.groovy b/driver-core/src/test/functional/com/mongodb/internal/operation/CountDocumentsOperationSpecification.groovy new file mode 100644 index 00000000000..8d13cba9f61 --- /dev/null +++ b/driver-core/src/test/functional/com/mongodb/internal/operation/CountDocumentsOperationSpecification.groovy @@ -0,0 +1,345 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.operation + +import com.mongodb.MongoException +import com.mongodb.MongoNamespace +import com.mongodb.OperationFunctionalSpecification +import com.mongodb.ReadConcern +import com.mongodb.ReadPreference +import com.mongodb.ServerAddress +import com.mongodb.connection.ClusterId +import com.mongodb.connection.ConnectionDescription +import com.mongodb.connection.ConnectionId +import com.mongodb.connection.ServerId +import com.mongodb.internal.binding.AsyncConnectionSource +import com.mongodb.internal.binding.AsyncReadBinding +import com.mongodb.internal.binding.ConnectionSource +import com.mongodb.internal.binding.ReadBinding +import com.mongodb.internal.bulk.IndexRequest +import com.mongodb.internal.connection.AsyncConnection +import com.mongodb.internal.connection.Connection +import com.mongodb.internal.session.SessionContext +import org.bson.BsonArray +import org.bson.BsonDocument +import org.bson.BsonInt32 +import org.bson.BsonInt64 +import org.bson.BsonString +import org.bson.BsonTimestamp +import org.bson.Document +import org.bson.codecs.DocumentCodec + +import static com.mongodb.ClusterFixture.OPERATION_CONTEXT +import static com.mongodb.ClusterFixture.executeAsync +import static com.mongodb.ClusterFixture.getBinding +import static com.mongodb.connection.ServerType.STANDALONE +import static com.mongodb.internal.operation.OperationReadConcernHelper.appendReadConcernToCommand +import static com.mongodb.internal.operation.ServerVersionHelper.UNKNOWN_WIRE_VERSION + +class CountDocumentsOperationSpecification extends OperationFunctionalSpecification { + + private documents + + def setup() { + documents = [ + new Document('x', 1), + new Document('x', 2), + new Document('x', 3), + new Document('x', 4), + new Document('x', 5).append('y', 1) + ] + getCollectionHelper().insertDocuments(new DocumentCodec(), documents) + } + + def 'should have the correct defaults'() { + when: + CountDocumentsOperation operation = new CountDocumentsOperation(getNamespace()) + + then: + operation.getFilter() == null + operation.getHint() == null + operation.getLimit() == 0 + operation.getSkip() == 0 + } + + def 'should set optional values correctly'() { + given: + def filter = new BsonDocument('filter', new BsonInt32(1)) + def hint = new BsonString('hint') + + when: + CountDocumentsOperation operation = new CountDocumentsOperation(getNamespace()) + .filter(filter) + .hint(hint) + .limit(20) + .skip(30) + + then: + operation.getFilter() == filter + operation.getHint() == hint + operation.getLimit() == 20 + operation.getSkip() == 30 + } + + def 'should get the count'() { + expect: + execute(new CountDocumentsOperation(getNamespace()), async) == documents.size() + + where: + async << [true, false] + } + + def 'should return 0 if no collection'() { + when: + getCollectionHelper().drop() + + then: + execute(new CountDocumentsOperation(getNamespace()), async) == 0 + + where: + async << [true, false] + } + + def 'should return 0 if empty collection'() { + when: + getCollectionHelper().drop() + getCollectionHelper().create() + + then: + execute(new CountDocumentsOperation(getNamespace()), async) == 0 + + where: + async << [true, false] + } + + def 'should use limit with the count'() { + when: + def operation = new CountDocumentsOperation(getNamespace()).limit(1) + + then: + execute(operation, async) == 1 + + where: + async << [true, false] + } + + def 'should use skip with the count'() { + when: + def operation = new CountDocumentsOperation(getNamespace()).skip(documents.size() - 2) + + then: + execute(operation, async) + + where: + async << [true, false] + } + + def 'should use hint with the count'() { + given: + def indexDefinition = new BsonDocument('y', new BsonInt32(1)) + new CreateIndexesOperation(getNamespace(), [new IndexRequest(indexDefinition).sparse(true)], null) + .execute(getBinding()) + def operation = new CountDocumentsOperation(getNamespace()).hint(indexDefinition) + + when: + def count = execute(operation, async) + + then: + count == 1 + + where: + async << [true, false] + } + + def 'should support hints that are bson documents or strings'() { + expect: + execute(new CountDocumentsOperation(getNamespace()).hint(hint), async) == 5 + + where: + [async, hint] << [[true, false], [new BsonString('_id_'), BsonDocument.parse('{_id: 1}')]].combinations() + } + + def 'should throw with bad hint'() { + given: + def operation = new CountDocumentsOperation(getNamespace()) + .filter(new BsonDocument('a', new BsonInt32(1))) + .hint(new BsonString('BAD HINT')) + + when: + execute(operation, async) + + then: + thrown(MongoException) + + where: + async << [true, false] + } + + def 'should use the ReadBindings readPreference to set secondaryOk'() { + when: + def operation = new CountDocumentsOperation(helper.namespace) + .filter(BsonDocument.parse('{a: 1}')) + + then: + testOperationSecondaryOk(operation, [3, 4, 0], readPreference, async, helper.cursorResult) + + where: + [async, readPreference] << [[true, false], [ReadPreference.primary(), ReadPreference.secondary()]].combinations() + } + + def 'should create the expected aggregation command'() { + when: + def filter = new BsonDocument('filter', new BsonInt32(1)) + def operation = new CountDocumentsOperation(helper.namespace) + def pipeline = [BsonDocument.parse('{ $match: {}}'), BsonDocument.parse('{$group: {_id: 1, n: {$sum: 1}}}')] + def expectedCommand = new BsonDocument('aggregate', new BsonString(helper.namespace.getCollectionName())) + .append('pipeline', new BsonArray(pipeline)) + .append('cursor', new BsonDocument()) + + then: + testOperation(operation, [3, 4, 0], expectedCommand, async, helper.cursorResult) + + when: + operation = new CountDocumentsOperation(helper.namespace) + .filter(filter) + .limit(20) + .skip(30) + .hint(hint) + .collation(defaultCollation) + + expectedCommand = expectedCommand + .append('pipeline', new BsonArray([new BsonDocument('$match', filter), + new BsonDocument('$skip', new BsonInt64(30)), + new BsonDocument('$limit', new BsonInt64(20)), + pipeline.last()])) + .append('collation', defaultCollation.asDocument()) + .append('hint', hint) + + then: + testOperation(operation, [3, 4, 0], expectedCommand, async, helper.cursorResult) + + where: + [async, hint] << [[true, false], [new BsonString('hint_1'), BsonDocument.parse('{hint: 1}')]].combinations() + } + + def 'should support collation'() { + given: + getCollectionHelper().insertDocuments(BsonDocument.parse('{str: "foo"}')) + def operation = new CountDocumentsOperation(namespace) + .filter(BsonDocument.parse('{str: "FOO"}')) + .collation(caseInsensitiveCollation) + + when: + def result = execute(operation, async) + + then: + result == 1 + + where: + async << [true, false] + } + + def 'should add read concern to command'() { + given: + def operationContext = OPERATION_CONTEXT.withSessionContext(sessionContext) + def binding = Stub(ReadBinding) + def source = Stub(ConnectionSource) + def connection = Mock(Connection) + binding.readPreference >> ReadPreference.primary() + binding.operationContext >> operationContext + binding.readConnectionSource >> source + source.connection >> connection + source.retain() >> source + source.operationContext >> operationContext + def pipeline = new BsonArray([BsonDocument.parse('{ $match: {}}'), BsonDocument.parse('{$group: {_id: 1, n: {$sum: 1}}}')]) + def commandDocument = new BsonDocument('aggregate', new BsonString(getCollectionName())) + .append('pipeline', pipeline) + .append('cursor', new BsonDocument()) + appendReadConcernToCommand(sessionContext, UNKNOWN_WIRE_VERSION, commandDocument) + + def operation = new CountDocumentsOperation(getNamespace()) + + when: + operation.execute(binding) + + then: + _ * connection.description >> new ConnectionDescription(new ConnectionId(new ServerId(new ClusterId(), new ServerAddress())), + 6, STANDALONE, 1000, 100000, 100000, []) + 1 * connection.command(_, commandDocument, _, _, _, operationContext) >> helper.cursorResult + 1 * connection.release() + + where: + sessionContext << [ + Stub(SessionContext) { + isCausallyConsistent() >> true + getOperationTime() >> new BsonTimestamp(42, 0) + hasActiveTransaction() >> false + getReadConcern() >> ReadConcern.MAJORITY + } + ] + } + + def 'should add read concern to command asynchronously'() { + given: + def operationContext = OPERATION_CONTEXT.withSessionContext(sessionContext) + def binding = Stub(AsyncReadBinding) + def source = Stub(AsyncConnectionSource) + def connection = Mock(AsyncConnection) + binding.readPreference >> ReadPreference.primary() + binding.operationContext >> operationContext + binding.getReadConnectionSource(_) >> { it[0].onResult(source, null) } + source.getConnection(_) >> { it[0].onResult(connection, null) } + source.retain() >> source + source.operationContext >> operationContext + def pipeline = new BsonArray([BsonDocument.parse('{ $match: {}}'), BsonDocument.parse('{$group: {_id: 1, n: {$sum: 1}}}')]) + def commandDocument = new BsonDocument('aggregate', new BsonString(getCollectionName())) + .append('pipeline', pipeline) + .append('cursor', new BsonDocument()) + appendReadConcernToCommand(sessionContext, UNKNOWN_WIRE_VERSION, commandDocument) + + def operation = new CountDocumentsOperation(getNamespace()) + + when: + executeAsync(operation, binding) + + then: + _ * connection.description >> new ConnectionDescription(new ConnectionId(new ServerId(new ClusterId(), new ServerAddress())), + 6, STANDALONE, 1000, 100000, 100000, []) + 1 * connection.commandAsync(_, commandDocument, _, _, _, *_) >> { + it.last().onResult(helper.cursorResult, null) + } + 1 * connection.release() + + where: + sessionContext << [ + Stub(SessionContext) { + isCausallyConsistent() >> true + getOperationTime() >> new BsonTimestamp(42, 0) + hasActiveTransaction() >> false + getReadConcern() >> ReadConcern.MAJORITY + } + ] + } + def helper = [ + dbName: 'db', + namespace: new MongoNamespace('db', 'coll'), + cursorResult: BsonDocument.parse('{ok: 1.0}') + .append('cursor', new BsonDocument('id', new BsonInt64(0)).append('ns', new BsonString('db.coll')) + .append('firstBatch', new BsonArrayWrapper([BsonDocument.parse('{n: 10}') ]))), + connectionDescription: Stub(ConnectionDescription) + ] +} diff --git a/driver-core/src/test/functional/com/mongodb/internal/operation/CreateCollectionOperationSpecification.groovy b/driver-core/src/test/functional/com/mongodb/internal/operation/CreateCollectionOperationSpecification.groovy new file mode 100644 index 00000000000..b33ec785094 --- /dev/null +++ b/driver-core/src/test/functional/com/mongodb/internal/operation/CreateCollectionOperationSpecification.groovy @@ -0,0 +1,281 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.operation + +import com.mongodb.MongoBulkWriteException +import com.mongodb.MongoWriteConcernException +import com.mongodb.OperationFunctionalSpecification +import com.mongodb.WriteConcern +import com.mongodb.client.model.ValidationAction +import com.mongodb.client.model.ValidationLevel +import org.bson.BsonDocument +import org.bson.BsonInt32 +import org.bson.BsonString +import org.bson.codecs.BsonDocumentCodec +import spock.lang.IgnoreIf + +import static com.mongodb.ClusterFixture.getBinding +import static com.mongodb.ClusterFixture.isDiscoverableReplicaSet +import static com.mongodb.ClusterFixture.serverVersionLessThan +import static java.util.Collections.singletonList + +class CreateCollectionOperationSpecification extends OperationFunctionalSpecification { + + def 'should have the correct defaults'() { + when: + CreateCollectionOperation operation = createOperation() + + then: + !operation.isCapped() + operation.getSizeInBytes() == 0 + operation.isAutoIndex() + operation.getMaxDocuments() == 0 + operation.getStorageEngineOptions() == null + operation.getIndexOptionDefaults() == null + operation.getValidator() == null + operation.getValidationLevel() == null + operation.getValidationAction() == null + operation.getCollation() == null + } + + def 'should set optional values correctly'(){ + given: + def storageEngineOptions = BsonDocument.parse('{ wiredTiger : {}}') + def indexOptionDefaults = BsonDocument.parse('{ storageEngine: { wiredTiger : {} }}') + def validator = BsonDocument.parse('{ level: { $gte : 10 }}') + + when: + CreateCollectionOperation operation = createOperation() + .autoIndex(false) + .capped(true) + .sizeInBytes(1000) + .maxDocuments(1000) + .storageEngineOptions(storageEngineOptions) + .indexOptionDefaults(indexOptionDefaults) + .validator(validator) + .validationLevel(ValidationLevel.MODERATE) + .validationAction(ValidationAction.WARN) + .collation(defaultCollation) + + then: + operation.isCapped() + operation.sizeInBytes == 1000 + !operation.isAutoIndex() + operation.getMaxDocuments() == 1000 + operation.getStorageEngineOptions() == storageEngineOptions + operation.getIndexOptionDefaults() == indexOptionDefaults + operation.getValidator() == validator + operation.getValidationLevel() == ValidationLevel.MODERATE + operation.getValidationAction() == ValidationAction.WARN + operation.getCollation() == defaultCollation + } + + def 'should create a collection'() { + given: + assert !collectionNameExists(getCollectionName()) + + when: + def operation = createOperation() + execute(operation, async) + + then: + collectionNameExists(getCollectionName()) + + where: + async << [true, false] + } + + def 'should pass through storage engine options'() { + given: + def storageEngineOptions = new BsonDocument('wiredTiger', new BsonDocument('configString', new BsonString('block_compressor=zlib'))) + def operation = createOperation() + .storageEngineOptions(storageEngineOptions) + + when: + execute(operation, async) + + then: + new ListCollectionsOperation(getDatabaseName(), new BsonDocumentCodec()) + .execute(getBinding()).next().find { it -> it.getString('name').value == getCollectionName() } + .getDocument('options').getDocument('storageEngine') == operation.storageEngineOptions + + where: + async << [true, false] + } + + def 'should pass through storage engine options- zstd compression'() { + given: + def storageEngineOptions = new BsonDocument('wiredTiger', new BsonDocument('configString', new BsonString('block_compressor=zstd'))) + def operation = createOperation() + .storageEngineOptions(storageEngineOptions) + + when: + execute(operation, async) + + then: + new ListCollectionsOperation(getDatabaseName(), new BsonDocumentCodec()) + .execute(getBinding()).next().find { it -> it.getString('name').value == getCollectionName() } + .getDocument('options').getDocument('storageEngine') == operation.storageEngineOptions + where: + async << [true, false] + } + + def 'should create capped collection'() { + given: + assert !collectionNameExists(getCollectionName()) + def operation = createOperation() + .capped(true) + .maxDocuments(100) + .sizeInBytes(40 * 1024) + + when: + execute(operation, async) + + then: + collectionNameExists(getCollectionName()) + + when: + def stats = storageStats() + + then: + stats.getBoolean('capped').getValue() + stats.getNumber('max').intValue() == 100 + // Starting in 3.0, the size in bytes moved from storageSize to maxSize + stats.getNumber('maxSize', new BsonInt32(0)).intValue() == 40 * 1024 || + stats.getNumber('storageSize', new BsonInt32(0)).intValue() == 40 * 1024 + + where: + async << [true, false] + } + + def 'should allow indexOptionDefaults'() { + given: + assert !collectionNameExists(getCollectionName()) + def indexOptionDefaults = BsonDocument.parse('{ storageEngine: { wiredTiger : {} }}') + def operation = createOperation() + .indexOptionDefaults(indexOptionDefaults) + + when: + execute(operation, async) + + then: + getCollectionInfo(getCollectionName()).get('options').get('indexOptionDefaults') == indexOptionDefaults + + where: + async << [true, false] + } + + + def 'should allow validator'() { + given: + assert !collectionNameExists(getCollectionName()) + def validator = BsonDocument.parse('{ level: { $gte : 10 }}') + def operation = createOperation() + .validator(validator) + .validationLevel(ValidationLevel.MODERATE) + .validationAction(ValidationAction.ERROR) + + when: + execute(operation, async) + + then: + def options = getCollectionInfo(getCollectionName()).get('options') + options.get('validator') == validator + options.get('validationLevel') == new BsonString(ValidationLevel.MODERATE.getValue()) + options.get('validationAction') == new BsonString(ValidationAction.ERROR.getValue()) + + when: + getCollectionHelper().insertDocuments(BsonDocument.parse('{ level: 8}')) + + then: + MongoBulkWriteException writeConcernException = thrown() + writeConcernException.getWriteErrors().get(0).getCode() == 121 + + where: + async << [true, false] + } + + @IgnoreIf({ !isDiscoverableReplicaSet() }) + def 'should throw on write concern error'() { + given: + assert !collectionNameExists(getCollectionName()) + def operation = createOperation(new WriteConcern(5)) + + when: + execute(operation, async) + + then: + def ex = thrown(MongoWriteConcernException) + ex.writeConcernError.code == 100 + ex.writeResult.wasAcknowledged() + + where: + async << [true, false] + } + + def 'should be able to create a collection with a collation'() { + given: + def operation = createOperation().collation(defaultCollation) + + when: + execute(operation, async) + def collectionCollation = getCollectionInfo(getCollectionName()).get('options').get('collation') + collectionCollation.remove('version') + + then: + collectionCollation == defaultCollation.asDocument() + + where: + async << [true, false] + } + + def getCollectionInfo(String collectionName) { + new ListCollectionsOperation(databaseName, new BsonDocumentCodec()).filter(new BsonDocument('name', + new BsonString(collectionName))).execute(getBinding()).tryNext()?.head() + } + + def collectionNameExists(String collectionName) { + getCollectionInfo(collectionName) != null + } + + + BsonDocument storageStats() { + if (serverVersionLessThan(6, 2)) { + return new CommandReadOperation<>(getDatabaseName(), + new BsonDocument('collStats', new BsonString(getCollectionName())), + new BsonDocumentCodec()).execute(getBinding()) + } + BatchCursor cursor = new AggregateOperation( + + getNamespace(), + singletonList(new BsonDocument('$collStats', new BsonDocument('storageStats', new BsonDocument()))), + new BsonDocumentCodec()).execute(getBinding()) + try { + return cursor.next().first().getDocument('storageStats') + } finally { + cursor.close() + } + } + + def createOperation() { + createOperation(null) + } + + def createOperation(WriteConcern writeConcern) { + new CreateCollectionOperation(getDatabaseName(), getCollectionName(), writeConcern) + } +} diff --git a/driver-core/src/test/functional/com/mongodb/internal/operation/CreateIndexesOperationSpecification.groovy b/driver-core/src/test/functional/com/mongodb/internal/operation/CreateIndexesOperationSpecification.groovy new file mode 100644 index 00000000000..78a9914e022 --- /dev/null +++ b/driver-core/src/test/functional/com/mongodb/internal/operation/CreateIndexesOperationSpecification.groovy @@ -0,0 +1,513 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.operation + +import com.mongodb.CreateIndexCommitQuorum +import com.mongodb.DuplicateKeyException +import com.mongodb.MongoClientException +import com.mongodb.MongoCommandException +import com.mongodb.MongoWriteConcernException +import com.mongodb.OperationFunctionalSpecification +import com.mongodb.WriteConcern +import com.mongodb.internal.bulk.IndexRequest +import org.bson.BsonBoolean +import org.bson.BsonDocument +import org.bson.BsonDocumentWrapper +import org.bson.BsonInt32 +import org.bson.BsonInt64 +import org.bson.BsonString +import org.bson.Document +import org.bson.codecs.DocumentCodec +import spock.lang.IgnoreIf + +import static com.mongodb.ClusterFixture.getBinding +import static com.mongodb.ClusterFixture.isDiscoverableReplicaSet +import static com.mongodb.ClusterFixture.serverVersionAtLeast +import static com.mongodb.ClusterFixture.serverVersionLessThan +import static java.util.concurrent.TimeUnit.SECONDS + +class CreateIndexesOperationSpecification extends OperationFunctionalSpecification { + def x1 = ['x': 1] as Document + def field1Index = ['field': 1] + def field2Index = ['field2': 1] + def xyIndex = ['x.y': 1] + + + def 'should get index names'() { + when: + def createIndexOperation = createOperation([new IndexRequest(new BsonDocument('field1', new BsonInt32(1))), + new IndexRequest(new BsonDocument('field2', new BsonInt32(-1))), + new IndexRequest(new BsonDocument('field3', new BsonInt32(1)) + .append('field4', new BsonInt32(-1))), + new IndexRequest(new BsonDocument('field5', new BsonInt32(-1))) + .name('customName') + ]) + then: + createIndexOperation.indexNames == ['field1_1', 'field2_-1', 'field3_1_field4_-1', 'customName'] + } + + def 'should be able to create a single index'() { + given: + def keys = new BsonDocument('field', new BsonInt32(1)) + def operation = createOperation([new IndexRequest(keys)]) + + when: + execute(operation, async) + + then: + getUserCreatedIndexes('key') == [field1Index] + + where: + async << [true, false] + } + + @IgnoreIf({ serverVersionAtLeast(4, 4) }) + def 'should throw exception if commit quorum is set where server < 4.4'() { + given: + def keys = new BsonDocument('field', new BsonInt32(1)) + def operation = createOperation([new IndexRequest(keys)]) + .commitQuorum(CreateIndexCommitQuorum.MAJORITY) + + when: + execute(operation, async) + + then: + thrown(MongoClientException) + + where: + async << [true, false] + } + + @IgnoreIf({ !isDiscoverableReplicaSet() || serverVersionLessThan(4, 4) }) + def 'should create index with commit quorum'() { + given: + def keys = new BsonDocument('field', new BsonInt32(1)) + + when: + def operation = createOperation([new IndexRequest(keys)]) + .commitQuorum(quorum) + + then: + operation.getCommitQuorum() == quorum + + when: + execute(operation, async) + + then: + getUserCreatedIndexes('key') == [field1Index] + + where: + [async, quorum] << [[true, false], [CreateIndexCommitQuorum.MAJORITY, CreateIndexCommitQuorum.VOTING_MEMBERS, + CreateIndexCommitQuorum.create(1), CreateIndexCommitQuorum.create(2)]].combinations() + } + + def 'should be able to create a single index with a BsonInt64'() { + given: + def keys = new BsonDocument('field', new BsonInt64(1)) + def operation = createOperation([new IndexRequest(keys)]) + + when: + execute(operation, async) + + then: + getUserCreatedIndexes('key') == [field1Index] + + where: + async << [true, false] + } + + def 'should be able to create multiple indexes'() { + given: + def keysForFirstIndex = new BsonDocument('field', new BsonInt32(1)) + def keysForSecondIndex = new BsonDocument('field2', new BsonInt32(1)) + def operation = createOperation([new IndexRequest(keysForFirstIndex), + new IndexRequest(keysForSecondIndex)]) + + when: + execute(operation, async) + + then: + getUserCreatedIndexes('key') == [field1Index, field2Index] + + where: + async << [true, false] + } + + def 'should be able to create a single index on a nested field'() { + given: + def keys = new BsonDocument('x.y', new BsonInt32(1)) + def operation = createOperation([new IndexRequest(keys)]) + + when: + execute(operation, async) + + then: + getUserCreatedIndexes('key') == [xyIndex] + + where: + async << [true, false] + } + + def 'should be able to handle duplicate key errors when indexing'() { + given: + getCollectionHelper().insertDocuments(new DocumentCodec(), x1, x1) + def operation = createOperation([new IndexRequest(new BsonDocument('x', new BsonInt32(1))).unique(true)]) + + when: + execute(operation, async) + + then: + thrown(DuplicateKeyException) + + where: + async << [true, false] + } + + def 'should throw when trying to build an invalid index'() { + given: + def operation = createOperation([new IndexRequest(new BsonDocument())]) + + when: + execute(operation, async) + + then: + thrown(MongoCommandException) + + where: + async << [true, false] + } + + def 'should be able to create a unique index'() { + given: + def operation = createOperation([new IndexRequest(new BsonDocument('field', new BsonInt32(1)))]) + + when: + execute(operation, async) + + then: + getUserCreatedIndexes('unique').size() == 0 + + when: + getCollectionHelper().drop(getNamespace()) + operation = createOperation([new IndexRequest(new BsonDocument('field', new BsonInt32(1))).unique(true)]) + execute(operation, async) + + then: + getUserCreatedIndexes('unique').size() == 1 + + where: + async << [true, false] + } + + def 'should be able to create a sparse index'() { + given: + def operation = createOperation([new IndexRequest(new BsonDocument('field', new BsonInt32(1)))]) + + when: + execute(operation, async) + + then: + getUserCreatedIndexes('sparse').size() == 0 + + when: + getCollectionHelper().drop(getNamespace()) + operation = createOperation([new IndexRequest(new BsonDocument('field', new BsonInt32(1))).sparse(true)]) + execute(operation, async) + + then: + getUserCreatedIndexes('sparse').size() == 1 + + where: + async << [true, false] + } + + def 'should be able to create a TTL indexes'() { + given: + def operation = createOperation([new IndexRequest(new BsonDocument('field', new BsonInt32(1)))]) + + when: + execute(operation, async) + + then: + getUserCreatedIndexes('expireAfterSeconds').size() == 0 + + when: + getCollectionHelper().drop(getNamespace()) + operation = createOperation([new IndexRequest(new BsonDocument('field', new BsonInt32(1))).expireAfter(100, SECONDS)]) + execute(operation, async) + + then: + getUserCreatedIndexes('expireAfterSeconds').size() == 1 + getUserCreatedIndexes('expireAfterSeconds') == [100] + + where: + async << [true, false] + } + + def 'should be able to create a 2d indexes'() { + given: + def operation = createOperation([new IndexRequest(new BsonDocument('field', new BsonString('2d')))]) + + when: + execute(operation, async) + + then: + getUserCreatedIndexes('key') == [['field': '2d']] + + when: + getCollectionHelper().drop(getNamespace()) + operation = createOperation([new IndexRequest(new BsonDocument('field', new BsonString('2d'))).bits(2).min(1.0).max(2.0)]) + execute(operation, async) + + then: + getUserCreatedIndexes('key') == [['field': '2d']] + getUserCreatedIndexes('bits') == [2] + getUserCreatedIndexes('min') == [1.0] + getUserCreatedIndexes('max') == [2.0] + + where: + async << [true, false] + } + + def 'should be able to create a 2dSphereIndex'() { + given: + def operation = createOperation([new IndexRequest(new BsonDocument('field', new BsonString('2dsphere')))]) + + when: + execute(operation, async) + + then: + getUserCreatedIndexes('key') == [['field' :'2dsphere']] + + where: + async << [true, false] + } + + def 'should be able to create a 2dSphereIndex with version 1'() { + given: + def operation = createOperation([new IndexRequest(new BsonDocument('field', new BsonString('2dsphere'))).sphereVersion(1)]) + + when: + execute(operation, async) + + then: + getUserCreatedIndexes('key') == [['field' :'2dsphere']] + getUserCreatedIndexes('2dsphereIndexVersion') == [1] + + where: + async << [true, false] + } + + def 'should be able to create a textIndex'() { + given: + def operation = createOperation([new IndexRequest(new BsonDocument('field', new BsonString('text'))) + .defaultLanguage('es') + .languageOverride('language') + .weights(new BsonDocument('field', new BsonInt32(100)))]) + + when: + execute(operation, async) + + then: + getUserCreatedIndexes().size() == 1 + getUserCreatedIndexes('weights') == [['field': 100]] + getUserCreatedIndexes('default_language') == ['es'] + getUserCreatedIndexes('language_override') == ['language'] + + where: + async << [true, false] + } + + def 'should be able to create a textIndexVersion'() { + given: + def operation = createOperation([new IndexRequest(new BsonDocument('field', new BsonString('text')))]) + + when: + execute(operation, async) + + then: + getUserCreatedIndexes().size() == 1 + + where: + async << [true, false] + } + + def 'should be able to create a textIndexVersion with version 1'() { + given: + def operation = createOperation([new IndexRequest(new BsonDocument('field', new BsonString('text'))).textVersion(1)]) + + when: + execute(operation, async) + + then: + getUserCreatedIndexes('textIndexVersion') == [1] + + where: + async << [true, false] + } + + def 'should pass through storage engine options'() { + given: + def storageEngineOptions = new Document('wiredTiger', new Document('configString', 'block_compressor=zlib')) + def operation = createOperation([new IndexRequest(new BsonDocument('a', new BsonInt32(1))) + .storageEngine(new BsonDocumentWrapper(storageEngineOptions, new DocumentCodec()))]) + + when: + execute(operation, async) + + then: + getIndex('a_1').get('storageEngine') == storageEngineOptions + + where: + async << [true, false] + } + + def 'should be able to create a partially filtered index'() { + given: + def partialFilterExpression = new Document('a', new Document('$gte', 10)) + def operation = createOperation([new IndexRequest(new BsonDocument('field', new BsonInt32(1))) + .partialFilterExpression(new BsonDocumentWrapper(partialFilterExpression, + new DocumentCodec()))]) + + when: + execute(operation, async) + + then: + getUserCreatedIndexes('partialFilterExpression').head() == partialFilterExpression + + where: + async << [true, false] + } + + @IgnoreIf({ !isDiscoverableReplicaSet() }) + def 'should throw on write concern error'() { + given: + def keys = new BsonDocument('field', new BsonInt32(1)) + def operation = new CreateIndexesOperation(getNamespace(), [new IndexRequest(keys)], new WriteConcern(5)) + + when: + execute(operation, async) + + then: + def ex = thrown(MongoWriteConcernException) + ex.writeConcernError.code == 100 + ex.writeResult.wasAcknowledged() + + where: + async << [true, false] + } + + def 'should be able to create an index with collation'() { + given: + def operation = createOperation([new IndexRequest(new BsonDocument('a', new BsonInt32(1))).collation(defaultCollation)]) + + when: + execute(operation, async) + def indexCollation = new BsonDocumentWrapper(getIndex('a_1').get('collation'), new DocumentCodec()) + indexCollation.remove('version') + + then: + indexCollation == defaultCollation.asDocument() + + where: + async << [true, false] + } + + def 'should be able to create wildcard indexes'() { + given: + def operation = createOperation([new IndexRequest(new BsonDocument('$**', new BsonInt32(1))), + new IndexRequest(new BsonDocument('tags.$**', new BsonInt32(1)))]) + + when: + execute(operation, async) + + then: + getUserCreatedIndexes('key').contains(['$**': 1]) + getUserCreatedIndexes('key').contains(['tags.$**': 1]) + + where: + async << [true, false] + } + + def 'should be able to create wildcard index with projection'() { + given: + def operation = createOperation([new IndexRequest(new BsonDocument('$**', new BsonInt32(1))) + .wildcardProjection(new BsonDocument('a', BsonBoolean.TRUE).append('_id', + BsonBoolean.FALSE))]) + + when: + execute(operation, async) + + then: + getUserCreatedIndexes('key').contains(['$**': 1]) + getUserCreatedIndexes('wildcardProjection').contains(['a': true, '_id': false]) + + where: + async << [true, false] + } + + @IgnoreIf({ serverVersionLessThan(4, 4) }) + def 'should be able to set hidden index'() { + given: + def operation = createOperation([new IndexRequest(new BsonDocument('field', new BsonInt32(1)))]) + + when: + execute(operation, async) + + then: + getUserCreatedIndexes('hidden').size() == 0 + + when: + getCollectionHelper().drop(getNamespace()) + operation = createOperation([new IndexRequest(new BsonDocument('field', new BsonInt32(1))).hidden(true)]) + execute(operation, async) + + then: + getUserCreatedIndexes('hidden').size() == 1 + + where: + async << [true, false] + } + + Document getIndex(final String indexName) { + getIndexes().find { + it -> it.getString('name') == indexName + } + } + + List getIndexes() { + def indexes = [] + def cursor = new ListIndexesOperation(getNamespace(), new DocumentCodec()).execute(getBinding()) + while (cursor.hasNext()) { + indexes.addAll(cursor.next()) + } + indexes + } + + List getUserCreatedIndexes() { + getIndexes().findAll { it.key != [_id: 1] } + } + + List getUserCreatedIndexes(String keyname) { + getUserCreatedIndexes()*.get(keyname).findAll { it != null } + } + + def createOperation(final List requests) { + new CreateIndexesOperation(getNamespace(), requests, null) + } + +} diff --git a/driver-core/src/test/functional/com/mongodb/internal/operation/CreateViewOperationSpecification.groovy b/driver-core/src/test/functional/com/mongodb/internal/operation/CreateViewOperationSpecification.groovy new file mode 100644 index 00000000000..07a35800242 --- /dev/null +++ b/driver-core/src/test/functional/com/mongodb/internal/operation/CreateViewOperationSpecification.groovy @@ -0,0 +1,132 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.operation + + +import com.mongodb.MongoNamespace +import com.mongodb.MongoWriteConcernException +import com.mongodb.OperationFunctionalSpecification +import com.mongodb.WriteConcern +import org.bson.BsonArray +import org.bson.BsonBoolean +import org.bson.BsonDocument +import org.bson.BsonInt32 +import org.bson.BsonString +import org.bson.codecs.BsonDocumentCodec +import spock.lang.IgnoreIf + +import static com.mongodb.ClusterFixture.getBinding +import static com.mongodb.ClusterFixture.isDiscoverableReplicaSet + +class CreateViewOperationSpecification extends OperationFunctionalSpecification { + + def 'should create view'() { + given: + def viewOn = getCollectionName() + def viewName = getCollectionName() + '-view' + def viewNamespace = new MongoNamespace(getDatabaseName(), viewName) + + assert !collectionNameExists(viewOn) + assert !collectionNameExists(viewName) + + def trueXDocument = new BsonDocument('_id', new BsonInt32(1)).append('x', BsonBoolean.TRUE) + def falseXDocument = new BsonDocument('_id', new BsonInt32(2)).append('x', BsonBoolean.FALSE) + getCollectionHelper().insertDocuments([trueXDocument, falseXDocument]) + + def pipeline = [new BsonDocument('$match', trueXDocument)] + def operation = new CreateViewOperation(getDatabaseName(), viewName, viewOn, pipeline, + WriteConcern.ACKNOWLEDGED) + + when: + execute(operation, async) + + then: + def options = getCollectionInfo(viewName).get('options') + options.get('viewOn') == new BsonString(viewOn) + options.get('pipeline') == new BsonArray(pipeline) + getCollectionHelper(viewNamespace).find(new BsonDocumentCodec()) == [trueXDocument] + + cleanup: + getCollectionHelper(viewNamespace).drop() + + where: + async << [true, false] + } + + def 'should create view with collation'() { + given: + def viewOn = getCollectionName() + def viewName = getCollectionName() + '-view' + def viewNamespace = new MongoNamespace(getDatabaseName(), viewName) + + assert !collectionNameExists(viewOn) + assert !collectionNameExists(viewName) + + def operation = new CreateViewOperation(getDatabaseName(), viewName, viewOn, [], + WriteConcern.ACKNOWLEDGED) + .collation(defaultCollation) + + when: + execute(operation, async) + def collectionCollation = getCollectionInfo(viewName).get('options').get('collation') + collectionCollation.remove('version') + + then: + collectionCollation == defaultCollation.asDocument() + + cleanup: + getCollectionHelper(viewNamespace).drop() + + where: + async << [true, false] + } + + @IgnoreIf({ !isDiscoverableReplicaSet() }) + def 'should throw on write concern error'() { + given: + def viewName = getCollectionName() + '-view' + def viewNamespace = new MongoNamespace(getDatabaseName(), viewName) + assert !collectionNameExists(viewName) + + def operation = new CreateViewOperation(getDatabaseName(), viewName, getCollectionName(), [], + new WriteConcern(5)) + + when: + execute(operation, async) + + then: + def ex = thrown(MongoWriteConcernException) + ex.writeConcernError.code == 100 + ex.writeResult.wasAcknowledged() + + cleanup: + getCollectionHelper(viewNamespace).drop() + + where: + async << [true, false] + } + + def getCollectionInfo(String collectionName) { + new ListCollectionsOperation(databaseName, new BsonDocumentCodec()).filter(new BsonDocument('name', + new BsonString(collectionName))).execute(getBinding()).tryNext()?.head() + } + + def collectionNameExists(String collectionName) { + getCollectionInfo(collectionName) != null + } + +} diff --git a/driver-core/src/test/functional/com/mongodb/internal/operation/DistinctOperationSpecification.groovy b/driver-core/src/test/functional/com/mongodb/internal/operation/DistinctOperationSpecification.groovy new file mode 100644 index 00000000000..726a3723df5 --- /dev/null +++ b/driver-core/src/test/functional/com/mongodb/internal/operation/DistinctOperationSpecification.groovy @@ -0,0 +1,312 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.operation + +import com.mongodb.MongoNamespace +import com.mongodb.OperationFunctionalSpecification +import com.mongodb.ReadConcern +import com.mongodb.ReadPreference +import com.mongodb.ServerAddress +import com.mongodb.client.test.Worker +import com.mongodb.client.test.WorkerCodec +import com.mongodb.connection.ClusterId +import com.mongodb.connection.ConnectionDescription +import com.mongodb.connection.ConnectionId +import com.mongodb.connection.ServerId +import com.mongodb.internal.binding.AsyncConnectionSource +import com.mongodb.internal.binding.AsyncReadBinding +import com.mongodb.internal.binding.ConnectionSource +import com.mongodb.internal.binding.ReadBinding +import com.mongodb.internal.connection.AsyncConnection +import com.mongodb.internal.connection.Connection +import com.mongodb.internal.session.SessionContext +import org.bson.BsonBoolean +import org.bson.BsonDocument +import org.bson.BsonInt32 +import org.bson.BsonInvalidOperationException +import org.bson.BsonString +import org.bson.BsonTimestamp +import org.bson.Document +import org.bson.codecs.BsonDocumentCodec +import org.bson.codecs.BsonValueCodecProvider +import org.bson.codecs.Decoder +import org.bson.codecs.DocumentCodec +import org.bson.codecs.DocumentCodecProvider +import org.bson.codecs.StringCodec +import org.bson.codecs.ValueCodecProvider +import org.bson.types.ObjectId + +import static com.mongodb.ClusterFixture.OPERATION_CONTEXT +import static com.mongodb.ClusterFixture.executeAsync +import static com.mongodb.connection.ServerType.STANDALONE +import static com.mongodb.internal.operation.OperationReadConcernHelper.appendReadConcernToCommand +import static com.mongodb.internal.operation.ServerVersionHelper.UNKNOWN_WIRE_VERSION +import static org.bson.codecs.configuration.CodecRegistries.fromProviders + +class DistinctOperationSpecification extends OperationFunctionalSpecification { + + def codecRegistry = fromProviders([new ValueCodecProvider(), new DocumentCodecProvider(), new BsonValueCodecProvider()]) + + def getCodec(final Class clazz) { + codecRegistry.get(clazz) + } + + def stringDecoder = getCodec(String) + + def 'should have the correct defaults'() { + when: + DistinctOperation operation = new DistinctOperation(getNamespace(), 'name', stringDecoder) + + then: + operation.getFilter() == null + operation.getCollation() == null + } + + def 'should set optional values correctly'(){ + given: + def filter = new BsonDocument('filter', new BsonInt32(1)) + + when: + DistinctOperation operation = new DistinctOperation(getNamespace(), 'name', stringDecoder) + .filter(filter) + .collation(defaultCollation) + + then: + operation.getFilter() == filter + operation.getCollation() == defaultCollation + } + + def 'should be able to distinct by name'() { + given: + Document pete = new Document('name', 'Pete').append('age', 38) + Document sam = new Document('name', 'Sam').append('age', 21) + Document pete2 = new Document('name', 'Pete').append('age', 25) + getCollectionHelper().insertDocuments(new DocumentCodec(), pete, sam, pete2) + DistinctOperation operation = new DistinctOperation(getNamespace(), 'name', stringDecoder) + + when: + def results = executeAndCollectBatchCursorResults(operation, async) + + then: + results == ['Pete', 'Sam'] + + where: + async << [true, false] + } + + def 'should be able to distinct by name with find'() { + given: + Document pete = new Document('name', 'Pete').append('age', 38) + Document sam = new Document('name', 'Sam').append('age', 21) + Document pete2 = new Document('name', 'Pete').append('age', 25) + getCollectionHelper().insertDocuments(new DocumentCodec(), pete, sam, pete2) + def operation = new DistinctOperation(getNamespace(), 'name', stringDecoder) + .filter(new BsonDocument('age', new BsonInt32(25))) + + when: + def results = executeAndCollectBatchCursorResults(operation, async) + + then: + results == ['Pete'] + + where: + async << [true, false] + } + + def 'should be able to distinct with custom codecs'() { + given: + Worker pete = new Worker(new ObjectId(), 'Pete', 'handyman', new Date(), 3) + Worker sam = new Worker(new ObjectId(), 'Sam', 'plumber', new Date(), 7) + + Document peteDocument = new Document('_id', pete.id) + .append('name', pete.name) + .append('jobTitle', pete.jobTitle) + .append('dateStarted', pete.dateStarted) + .append('numberOfJobs', pete.numberOfJobs) + + Document samDocument = new Document('_id', sam.id) + .append('name', sam.name) + .append('jobTitle', sam.jobTitle) + .append('dateStarted', sam.dateStarted) + .append('numberOfJobs', sam.numberOfJobs) + + getCollectionHelper().insertDocuments(new Document('worker', peteDocument), new Document('worker', samDocument)) + DistinctOperation operation = new DistinctOperation(getNamespace(), 'worker', new WorkerCodec()) + + when: + def results = executeAndCollectBatchCursorResults(operation, async) + + then: + results == [pete, sam] + + where: + async << [true, false] + } + + + def 'should throw if invalid decoder passed to distinct'() { + given: + Document pete = new Document('name', 'Pete') + Document sam = new Document('name', 1) + Document pete2 = new Document('name', new Document('earle', 'Jones')) + getCollectionHelper().insertDocuments(new DocumentCodec(), pete, sam, pete2) + DistinctOperation operation = new DistinctOperation(getNamespace(), 'name', stringDecoder) + + when: + execute(operation, async) + + then: + thrown(BsonInvalidOperationException) + + where: + async << [true, false] + } + + def 'should use the ReadBindings readPreference to set secondaryOk'() { + when: + def operation = new DistinctOperation(helper.namespace, 'name', helper.decoder) + + then: + testOperationSecondaryOk(operation, [3, 4, 0], readPreference, async, helper.commandResult) + + where: + [async, readPreference] << [[true, false], [ReadPreference.primary(), ReadPreference.secondary()]].combinations() + } + + def 'should create the expected command'() { + when: + def operation = new DistinctOperation(helper.namespace, 'name', new BsonDocumentCodec()) + .filter(new BsonDocument('a', BsonBoolean.TRUE)) + .collation(defaultCollation) + + def expectedCommand = new BsonDocument('distinct', new BsonString(helper.namespace.getCollectionName())) + .append('key', new BsonString('name')) + .append('query', operation.getFilter()) + .append('collation', defaultCollation.asDocument()) + + then: + testOperation(operation, [3, 4, 0], expectedCommand, async, helper.commandResult) + + where: + async << [false, false] + } + + def 'should support collation'() { + given: + def document = Document.parse('{str: "foo"}') + getCollectionHelper().insertDocuments(document) + def operation = new DistinctOperation(namespace, 'str', stringDecoder) + .filter(BsonDocument.parse('{str: "FOO"}}')) + .collation(caseInsensitiveCollation) + + when: + def result = executeAndCollectBatchCursorResults(operation, async) + + then: + result == ['foo'] + + where: + async << [true, false] + } + + def 'should add read concern to command'() { + given: + def operationContext = OPERATION_CONTEXT.withSessionContext(sessionContext) + def binding = Stub(ReadBinding) + def source = Stub(ConnectionSource) + def connection = Mock(Connection) + binding.readPreference >> ReadPreference.primary() + binding.operationContext >> operationContext + binding.readConnectionSource >> source + source.connection >> connection + source.retain() >> source + source.operationContext >> operationContext + def commandDocument = new BsonDocument('distinct', new BsonString(getCollectionName())) + .append('key', new BsonString('str')) + appendReadConcernToCommand(sessionContext, UNKNOWN_WIRE_VERSION, commandDocument) + + def operation = new DistinctOperation(getNamespace(), 'str', new StringCodec()) + + when: + operation.execute(binding) + + then: + _ * connection.description >> new ConnectionDescription(new ConnectionId(new ServerId(new ClusterId(), new ServerAddress())), + 6, STANDALONE, 1000, 100000, 100000, []) + 1 * connection.command(_, commandDocument, _, _, _, operationContext) >> + new BsonDocument('values', new BsonArrayWrapper([])) + 1 * connection.release() + + where: + sessionContext << [ + Stub(SessionContext) { + isCausallyConsistent() >> true + getOperationTime() >> new BsonTimestamp(42, 0) + hasActiveTransaction() >> false + getReadConcern() >> ReadConcern.MAJORITY + } + ] + } + + def 'should add read concern to command asynchronously'() { + given: + def operationContext = OPERATION_CONTEXT.withSessionContext(sessionContext) + def binding = Stub(AsyncReadBinding) + def source = Stub(AsyncConnectionSource) + def connection = Mock(AsyncConnection) + binding.readPreference >> ReadPreference.primary() + binding.getReadConnectionSource(_) >> { it[0].onResult(source, null) } + binding.operationContext >> operationContext + source.operationContext >> operationContext + source.getConnection(_) >> { it[0].onResult(connection, null) } + source.retain() >> source + def commandDocument = new BsonDocument('distinct', new BsonString(getCollectionName())) + .append('key', new BsonString('str')) + appendReadConcernToCommand(sessionContext, UNKNOWN_WIRE_VERSION, commandDocument) + + def operation = new DistinctOperation(getNamespace(), 'str', new StringCodec()) + + when: + executeAsync(operation, binding) + + then: + _ * connection.description >> new ConnectionDescription(new ConnectionId(new ServerId(new ClusterId(), new ServerAddress())), + 6, STANDALONE, 1000, 100000, 100000, []) + 1 * connection.commandAsync(_, commandDocument, _, _, _, operationContext, *_) >> { + it.last().onResult(new BsonDocument('values', new BsonArrayWrapper([])), null) + } + 1 * connection.release() + + where: + sessionContext << [ + Stub(SessionContext) { + isCausallyConsistent() >> true + getOperationTime() >> new BsonTimestamp(42, 0) + hasActiveTransaction() >> false + getReadConcern() >> ReadConcern.MAJORITY + } + ] + } + + + def helper = [ + dbName: 'db', + namespace: new MongoNamespace('db', 'coll'), + decoder: Stub(Decoder), + commandResult: BsonDocument.parse('{ok: 1.0}').append('values', new BsonArrayWrapper([])) + ] +} diff --git a/driver-core/src/test/functional/com/mongodb/internal/operation/DropCollectionOperationSpecification.groovy b/driver-core/src/test/functional/com/mongodb/internal/operation/DropCollectionOperationSpecification.groovy new file mode 100644 index 00000000000..164dc66d654 --- /dev/null +++ b/driver-core/src/test/functional/com/mongodb/internal/operation/DropCollectionOperationSpecification.groovy @@ -0,0 +1,108 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.operation + +import com.mongodb.MongoNamespace +import com.mongodb.MongoWriteConcernException +import com.mongodb.OperationFunctionalSpecification +import com.mongodb.WriteConcern +import org.bson.Document +import org.bson.codecs.DocumentCodec +import spock.lang.IgnoreIf + +import static com.mongodb.ClusterFixture.executeAsync +import static com.mongodb.ClusterFixture.getBinding +import static com.mongodb.ClusterFixture.isDiscoverableReplicaSet + +class DropCollectionOperationSpecification extends OperationFunctionalSpecification { + + def 'should drop a collection that exists'() { + given: + getCollectionHelper().insertDocuments(new DocumentCodec(), new Document('documentTo', 'createTheCollection')) + assert collectionNameExists(getCollectionName()) + + when: + new DropCollectionOperation(getNamespace(), WriteConcern.ACKNOWLEDGED).execute(getBinding()) + + then: + !collectionNameExists(getCollectionName()) + } + + + def 'should drop a collection that exists asynchronously'() { + given: + getCollectionHelper().insertDocuments(new DocumentCodec(), new Document('documentTo', 'createTheCollection')) + assert collectionNameExists(getCollectionName()) + + when: + executeAsync(new DropCollectionOperation(getNamespace(), WriteConcern.ACKNOWLEDGED)) + + then: + !collectionNameExists(getCollectionName()) + } + + def 'should not error when dropping a collection that does not exist'() { + given: + def namespace = new MongoNamespace(getDatabaseName(), 'nonExistingCollection') + + when: + new DropCollectionOperation(namespace, WriteConcern.ACKNOWLEDGED).execute(getBinding()) + + then: + !collectionNameExists('nonExistingCollection') + } + + + def 'should not error when dropping a collection that does not exist asynchronously'() { + given: + def namespace = new MongoNamespace(getDatabaseName(), 'nonExistingCollection') + + when: + executeAsync(new DropCollectionOperation(namespace, WriteConcern.ACKNOWLEDGED)) + + then: + !collectionNameExists('nonExistingCollection') + } + + @IgnoreIf({ !isDiscoverableReplicaSet() }) + def 'should throw on write concern error'() { + given: + getCollectionHelper().insertDocuments(new DocumentCodec(), new Document('documentTo', 'createTheCollection')) + assert collectionNameExists(getCollectionName()) + def operation = new DropCollectionOperation(getNamespace(), new WriteConcern(5)) + + when: + async ? executeAsync(operation) : operation.execute(getBinding()) + + then: + def ex = thrown(MongoWriteConcernException) + ex.writeConcernError.code == 100 + ex.writeResult.wasAcknowledged() + + where: + async << [true, false] + } + + def collectionNameExists(String collectionName) { + def cursor = new ListCollectionsOperation(databaseName, new DocumentCodec()).execute(getBinding()) + if (!cursor.hasNext()) { + return false + } + cursor.next()*.get('name').contains(collectionName) + } + +} diff --git a/driver-core/src/test/functional/com/mongodb/internal/operation/DropDatabaseOperationSpecification.groovy b/driver-core/src/test/functional/com/mongodb/internal/operation/DropDatabaseOperationSpecification.groovy new file mode 100644 index 00000000000..d91ac02e8cc --- /dev/null +++ b/driver-core/src/test/functional/com/mongodb/internal/operation/DropDatabaseOperationSpecification.groovy @@ -0,0 +1,94 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.operation + +import com.mongodb.MongoWriteConcernException +import com.mongodb.OperationFunctionalSpecification +import com.mongodb.WriteConcern +import org.bson.BsonDocument +import org.bson.Document +import org.bson.codecs.DocumentCodec +import spock.lang.IgnoreIf + +import static com.mongodb.ClusterFixture.configureFailPoint +import static com.mongodb.ClusterFixture.executeAsync +import static com.mongodb.ClusterFixture.getBinding +import static com.mongodb.ClusterFixture.isDiscoverableReplicaSet +import static com.mongodb.ClusterFixture.isSharded + +class DropDatabaseOperationSpecification extends OperationFunctionalSpecification { + + @IgnoreIf({ isSharded() }) + def 'should drop a database that exists'() { + given: + getCollectionHelper().insertDocuments(new DocumentCodec(), new Document('documentTo', 'createTheCollection')) + assert databaseNameExists(databaseName) + + when: + execute(new DropDatabaseOperation(databaseName, WriteConcern.ACKNOWLEDGED), async) + + then: + !databaseNameExists(databaseName) + + where: + async << [true, false] + } + + + def 'should not error when dropping a collection that does not exist'() { + given: + def dbName = 'nonExistingDatabase' + + when: + execute(new DropDatabaseOperation(dbName, WriteConcern.ACKNOWLEDGED), async) + + then: + !databaseNameExists(dbName) + + where: + async << [true, false] + } + + @IgnoreIf({ !isDiscoverableReplicaSet() }) + def 'should throw on write concern error'() { + given: + getCollectionHelper().insertDocuments(new DocumentCodec(), new Document('documentTo', 'createTheCollection')) + + def w = 2 + def operation = new DropDatabaseOperation(databaseName, new WriteConcern(w)) + configureFailPoint(BsonDocument.parse('{ configureFailPoint: "failCommand", ' + + 'mode : {times : 1}, ' + + 'data : {failCommands : ["dropDatabase"], ' + + 'writeConcernError : {code : 100, errmsg : "failed"}}}')) + + when: + async ? executeAsync(operation) : operation.execute(getBinding()) + + then: + def ex = thrown(MongoWriteConcernException) + ex.writeConcernError.code == 100 + ex.writeResult.wasAcknowledged() + + where: + async << [true, false] + } + + def databaseNameExists(String databaseName) { + new ListDatabasesOperation(new DocumentCodec()).execute(getBinding()).next()*.name.contains(databaseName) + } + +} diff --git a/driver-core/src/test/functional/com/mongodb/internal/operation/DropIndexOperationSpecification.groovy b/driver-core/src/test/functional/com/mongodb/internal/operation/DropIndexOperationSpecification.groovy new file mode 100644 index 00000000000..e3711b0035b --- /dev/null +++ b/driver-core/src/test/functional/com/mongodb/internal/operation/DropIndexOperationSpecification.groovy @@ -0,0 +1,166 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.operation + +import com.mongodb.MongoException +import com.mongodb.MongoWriteConcernException +import com.mongodb.OperationFunctionalSpecification +import com.mongodb.WriteConcern +import org.bson.BsonDocument +import org.bson.BsonInt32 +import org.bson.BsonInt64 +import org.bson.BsonString +import org.bson.Document +import org.bson.codecs.DocumentCodec +import spock.lang.IgnoreIf +import spock.lang.Unroll + +import static com.mongodb.ClusterFixture.getBinding +import static com.mongodb.ClusterFixture.isDiscoverableReplicaSet +import static com.mongodb.ClusterFixture.serverVersionAtLeast + +class DropIndexOperationSpecification extends OperationFunctionalSpecification { + + def 'should not error when dropping non-existent index on non-existent collection'() { + when: + execute(new DropIndexOperation(getNamespace(), 'made_up_index_1', null), async) + + then: + getIndexes().size() == 0 + + where: + async << [true, false] + } + + @IgnoreIf({ serverVersionAtLeast(8, 3) }) + def 'should error when dropping non-existent index on existing collection'() { + given: + getCollectionHelper().insertDocuments(new DocumentCodec(), new Document('documentThat', 'forces creation of the Collection')) + + when: + execute(new DropIndexOperation(getNamespace(), 'made_up_index_1', null), async) + + then: + thrown(MongoException) + + where: + async << [true, false] + } + + def 'should drop existing index by name'() { + given: + collectionHelper.createIndex(new BsonDocument('theField', new BsonInt32(1))) + + when: + execute(new DropIndexOperation(getNamespace(), 'theField_1', null), async) + List indexes = getIndexes() + + then: + indexes.size() == 1 + indexes[0].name == '_id_' + + where: + async << [true, false] + } + + @Unroll + def 'should drop existing index by keys'() { + given: + collectionHelper.createIndex(keys) + + when: + execute(new DropIndexOperation(getNamespace(), keys, null), async) + List indexes = getIndexes() + + then: + indexes.size() == 1 + indexes[0].name == '_id_' + + where: + [keys, async] << [ + [new BsonDocument('theField', new BsonInt32(1)), + new BsonDocument('theField', new BsonInt32(1)).append('theSecondField', new BsonInt32(-1)), + new BsonDocument('theField', new BsonString('2d')), + new BsonDocument('theField', new BsonString('hashed')), + ], + [true, false] + ].combinations() + } + + def 'should drop existing index by key when using BsonInt64'() { + given: + def keys = new BsonDocument('theField', new BsonInt32(1)) + collectionHelper.createIndex(keys) + + when: + execute(new DropIndexOperation(getNamespace(), new BsonDocument('theField', new BsonInt64(1)), null), + async) + List indexes = getIndexes() + + then: + indexes.size() == 1 + indexes[0].name == '_id_' + + where: + async << [true, false] + } + + def 'should drop all indexes when passed *'() { + given: + collectionHelper.createIndex(new BsonDocument('theField', new BsonInt32(1))) + collectionHelper.createIndex(new BsonDocument('theOtherField', new BsonInt32(1))) + + when: + execute(new DropIndexOperation(getNamespace(), '*', null), async) + List indexes = getIndexes() + + then: + indexes.size() == 1 + indexes[0].name == '_id_' + + where: + async << [true, false] + } + + @IgnoreIf({ !isDiscoverableReplicaSet() }) + def 'should throw on write concern error'() { + given: + collectionHelper.createIndex(new BsonDocument('theField', new BsonInt32(1))) + def operation = new DropIndexOperation(getNamespace(), 'theField_1', new WriteConcern(5)) + + when: + execute(operation, async) + + then: + def ex = thrown(MongoWriteConcernException) + ex.writeConcernError.code == 100 + ex.writeResult.wasAcknowledged() + + where: + async << [true, false] + } + + def getIndexes() { + def indexes = [] + def cursor = new ListIndexesOperation(getNamespace(), new DocumentCodec()).execute(getBinding()) + while (cursor.hasNext()) { + indexes.addAll(cursor.next()) + } + indexes + } + +} diff --git a/driver-core/src/test/functional/com/mongodb/internal/operation/FindAndDeleteOperationSpecification.groovy b/driver-core/src/test/functional/com/mongodb/internal/operation/FindAndDeleteOperationSpecification.groovy new file mode 100644 index 00000000000..64c6123a84b --- /dev/null +++ b/driver-core/src/test/functional/com/mongodb/internal/operation/FindAndDeleteOperationSpecification.groovy @@ -0,0 +1,322 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.operation + +import com.mongodb.MongoSocketException +import com.mongodb.MongoWriteConcernException +import com.mongodb.OperationFunctionalSpecification +import com.mongodb.ServerAddress +import com.mongodb.WriteConcern +import com.mongodb.client.test.CollectionHelper +import com.mongodb.client.test.Worker +import com.mongodb.client.test.WorkerCodec +import org.bson.BsonBoolean +import org.bson.BsonDocument +import org.bson.BsonDocumentWrapper +import org.bson.BsonInt64 +import org.bson.BsonString +import org.bson.Document +import org.bson.codecs.BsonDocumentCodec +import org.bson.codecs.DocumentCodec +import spock.lang.IgnoreIf + +import static com.mongodb.ClusterFixture.configureFailPoint +import static com.mongodb.ClusterFixture.disableFailPoint +import static com.mongodb.ClusterFixture.disableOnPrimaryTransactionalWriteFailPoint +import static com.mongodb.ClusterFixture.enableOnPrimaryTransactionalWriteFailPoint +import static com.mongodb.ClusterFixture.isDiscoverableReplicaSet +import static com.mongodb.WriteConcern.ACKNOWLEDGED +import static com.mongodb.WriteConcern.UNACKNOWLEDGED +import static com.mongodb.WriteConcern.W1 +import static com.mongodb.connection.ServerType.REPLICA_SET_PRIMARY +import static com.mongodb.connection.ServerType.STANDALONE + +class FindAndDeleteOperationSpecification extends OperationFunctionalSpecification { + private final DocumentCodec documentCodec = new DocumentCodec() + private final WorkerCodec workerCodec = new WorkerCodec() + + def 'should have the correct defaults'() { + when: + def operation = new FindAndDeleteOperation(getNamespace(), ACKNOWLEDGED, false, documentCodec) + + then: + operation.getNamespace() == getNamespace() + operation.getWriteConcern() == ACKNOWLEDGED + operation.getDecoder() == documentCodec + operation.getFilter() == null + operation.getSort() == null + operation.getProjection() == null + operation.getCollation() == null + } + + def 'should set optional values correctly'(){ + given: + def filter = BsonDocument.parse('{ filter : 1}') + def sort = BsonDocument.parse('{ sort : 1}') + def projection = BsonDocument.parse('{ projection : 1}') + + when: + def operation = new FindAndDeleteOperation(getNamespace(), ACKNOWLEDGED, false, documentCodec) + .filter(filter) + .sort(sort) + .projection(projection) + .collation(defaultCollation) + + then: + operation.getFilter() == filter + operation.getSort() == sort + operation.getProjection() == projection + operation.getCollation() == defaultCollation + } + + def 'should remove single document'() { + given: + Document pete = new Document('name', 'Pete').append('job', 'handyman') + Document sam = new Document('name', 'Sam').append('job', 'plumber') + + getCollectionHelper().insertDocuments(new DocumentCodec(), pete, sam) + + when: + def operation = new FindAndDeleteOperation(getNamespace(), ACKNOWLEDGED, false, documentCodec) + .filter(new BsonDocument('name', new BsonString('Pete'))) + Document returnedDocument = execute(operation, async) + + then: + getCollectionHelper().find().size() == 1 + getCollectionHelper().find().first().getString('name') == 'Sam' + returnedDocument.getString('name') == 'Pete' + + where: + async << [true, false] + } + + + def 'should remove single document when using custom codecs'() { + given: + Worker pete = new Worker('Pete', 'handyman', new Date(), 3) + Worker sam = new Worker('Sam', 'plumber', new Date(), 7) + getWorkerCollectionHelper().insertDocuments(new WorkerCodec(), pete, sam) + + when: + FindAndDeleteOperation operation = new FindAndDeleteOperation(getNamespace(), + ACKNOWLEDGED, false, workerCodec).filter(new BsonDocument('name', new BsonString('Pete'))) + Worker returnedDocument = execute(operation, async) + + then: + getWorkerCollectionHelper().find().size() == 1 + getWorkerCollectionHelper().find().first() == sam + returnedDocument == pete + + where: + async << [true, false] + } + + + @IgnoreIf({ !isDiscoverableReplicaSet() }) + def 'should throw on write concern error'() { + given: + CollectionHelper helper = new CollectionHelper(documentCodec, getNamespace()) + Document pete = new Document('name', 'Pete').append('job', 'handyman') + helper.insertDocuments(new DocumentCodec(), pete) + def operation = new FindAndDeleteOperation(getNamespace(), new WriteConcern(5, 1), false, + documentCodec).filter(new BsonDocument('name', new BsonString('Pete'))) + + when: + execute(operation, async) + + then: + def ex = thrown(MongoWriteConcernException) + ex.writeConcernError.code == 100 + !ex.writeConcernError.message.isEmpty() + ex.writeResult.count == 1 + !ex.writeResult.updateOfExisting + ex.writeResult.upsertedId == null + + where: + async << [true, false] + } + + @IgnoreIf({ !isDiscoverableReplicaSet() }) + def 'should throw on write concern error on multiple failpoint'() { + given: + CollectionHelper helper = new CollectionHelper(documentCodec, getNamespace()) + Document pete = new Document('name', 'Pete').append('job', 'handyman') + helper.insertDocuments(new DocumentCodec(), pete) + + def failPoint = BsonDocument.parse('''{ + "configureFailPoint": "failCommand", + "mode": {"times": 2 }, + "data": { "failCommands": ["findAndModify"], + "writeConcernError": {"code": 91, "errmsg": "Replication is being shut down"}}}''') + configureFailPoint(failPoint) + + def operation = new FindAndDeleteOperation(getNamespace(), ACKNOWLEDGED, false, + documentCodec).filter(new BsonDocument('name', new BsonString('Pete'))) + + when: + execute(operation, async) + + then: + def ex = thrown(MongoWriteConcernException) + ex.writeConcernError.code == 91 + !ex.writeConcernError.message.isEmpty() + ex.writeResult.count == 1 + !ex.writeResult.updateOfExisting + ex.writeResult.upsertedId == null + + cleanup: + disableFailPoint('failCommand') + + where: + async << [true, false] + } + + def 'should create the expected command'() { + when: + def includeTxnNumber = retryWrites && writeConcern.isAcknowledged() && serverType != STANDALONE + def includeWriteConcern = writeConcern.isAcknowledged() && !writeConcern.isServerDefault() + def cannedResult = new BsonDocument('value', new BsonDocumentWrapper(BsonDocument.parse('{}'), new BsonDocumentCodec())) + def operation = new FindAndDeleteOperation(getNamespace(), writeConcern as WriteConcern, + retryWrites as boolean, documentCodec) + def expectedCommand = new BsonDocument('findAndModify', new BsonString(getNamespace().getCollectionName())) + .append('remove', BsonBoolean.TRUE) + + if (includeWriteConcern) { + expectedCommand.put('writeConcern', writeConcern.asDocument()) + } + if (includeTxnNumber) { + expectedCommand.put('txnNumber', new BsonInt64(0)) + } + + then: + testOperation([operation: operation, serverVersion: [3, 6, 0], expectedCommand: expectedCommand, async: async, + result: cannedResult, serverType: serverType]) + + when: + def filter = BsonDocument.parse('{ filter : 1}') + def sort = BsonDocument.parse('{ sort : 1}') + def projection = BsonDocument.parse('{ projection : 1}') + + operation.filter(filter) + .sort(sort) + .projection(projection) + + expectedCommand.append('query', filter) + .append('sort', sort) + .append('fields', projection) + + operation.collation(defaultCollation) + expectedCommand.append('collation', defaultCollation.asDocument()) + + then: + testOperation([operation: operation, serverVersion: [3, 6, 0], expectedCommand: expectedCommand, async: async, + result: cannedResult, serverType: serverType]) + + where: + [serverType, writeConcern, async, retryWrites] << [ + [REPLICA_SET_PRIMARY, STANDALONE], + [ACKNOWLEDGED, W1, UNACKNOWLEDGED], + [true, false], + [true, false] + ].combinations() + } + + @IgnoreIf({ !isDiscoverableReplicaSet() }) + def 'should support retryable writes'() { + given: + Document pete = new Document('name', 'Pete').append('job', 'handyman') + Document sam = new Document('name', 'Sam').append('job', 'plumber') + + getCollectionHelper().insertDocuments(new DocumentCodec(), pete, sam) + + when: + def operation = new FindAndDeleteOperation(getNamespace(), ACKNOWLEDGED, true, documentCodec) + .filter(new BsonDocument('name', new BsonString('Pete'))) + enableOnPrimaryTransactionalWriteFailPoint(BsonDocument.parse('{times: 1}')) + + Document returnedDocument = executeWithSession(operation, async) + + then: + getCollectionHelper().find().size() == 1 + getCollectionHelper().find().first().getString('name') == 'Sam' + returnedDocument.getString('name') == 'Pete' + + cleanup: + disableOnPrimaryTransactionalWriteFailPoint() + + where: + async << [true, false] + } + + def 'should retry if the connection initially fails'() { + when: + def cannedResult = new BsonDocument('value', new BsonDocumentWrapper(BsonDocument.parse('{}'), new BsonDocumentCodec())) + def operation = new FindAndDeleteOperation(getNamespace(), ACKNOWLEDGED, true, documentCodec) + def expectedCommand = new BsonDocument('findAndModify', new BsonString(getNamespace().getCollectionName())) + .append('remove', BsonBoolean.TRUE) + .append('txnNumber', new BsonInt64(0)) + + then: + testOperationRetries(operation, [3, 6, 0], expectedCommand, async, cannedResult) + + where: + async << [true, false] + } + + def 'should throw original error when retrying and failing'() { + given: + def operation = new FindAndDeleteOperation(getNamespace(), ACKNOWLEDGED, true, documentCodec) + def originalException = new MongoSocketException('Some failure', new ServerAddress()) + + when: + testRetryableOperationThrowsOriginalError(operation, [[3, 6, 0], [3, 6, 0]], + [REPLICA_SET_PRIMARY, STANDALONE], originalException, async) + + then: + Exception commandException = thrown() + commandException == originalException + + when: + testRetryableOperationThrowsOriginalError(operation, [[3, 6, 0]], + [REPLICA_SET_PRIMARY], originalException, async, 1) + + then: + commandException = thrown() + commandException == originalException + + where: + async << [false] + } + + def 'should support collation'() { + given: + def document = Document.parse('{_id: 1, str: "foo"}') + getCollectionHelper().insertDocuments(document) + def operation = new FindAndDeleteOperation(getNamespace(), ACKNOWLEDGED, false, documentCodec) + .filter(BsonDocument.parse('{str: "FOO"}')) + .collation(caseInsensitiveCollation) + + when: + def result = execute(operation, async) + + then: + result == document + + where: + async << [true, false] + } +} diff --git a/driver-core/src/test/functional/com/mongodb/internal/operation/FindAndReplaceOperationSpecification.groovy b/driver-core/src/test/functional/com/mongodb/internal/operation/FindAndReplaceOperationSpecification.groovy new file mode 100644 index 00000000000..50dd68fa810 --- /dev/null +++ b/driver-core/src/test/functional/com/mongodb/internal/operation/FindAndReplaceOperationSpecification.groovy @@ -0,0 +1,462 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.operation + +import com.mongodb.MongoCommandException +import com.mongodb.MongoNamespace +import com.mongodb.MongoSocketException +import com.mongodb.MongoWriteConcernException +import com.mongodb.OperationFunctionalSpecification +import com.mongodb.ServerAddress +import com.mongodb.WriteConcern +import com.mongodb.client.model.CreateCollectionOptions +import com.mongodb.client.model.ValidationOptions +import com.mongodb.client.test.CollectionHelper +import com.mongodb.client.test.Worker +import com.mongodb.client.test.WorkerCodec +import org.bson.BsonBoolean +import org.bson.BsonDocument +import org.bson.BsonDocumentWrapper +import org.bson.BsonInt32 +import org.bson.BsonInt64 +import org.bson.BsonObjectId +import org.bson.BsonString +import org.bson.Document +import org.bson.codecs.BsonDocumentCodec +import org.bson.codecs.DocumentCodec +import spock.lang.IgnoreIf + +import static com.mongodb.ClusterFixture.configureFailPoint +import static com.mongodb.ClusterFixture.disableFailPoint +import static com.mongodb.ClusterFixture.disableOnPrimaryTransactionalWriteFailPoint +import static com.mongodb.ClusterFixture.enableOnPrimaryTransactionalWriteFailPoint +import static com.mongodb.ClusterFixture.isDiscoverableReplicaSet +import static com.mongodb.WriteConcern.ACKNOWLEDGED +import static com.mongodb.WriteConcern.UNACKNOWLEDGED +import static com.mongodb.WriteConcern.W1 +import static com.mongodb.client.model.Filters.gte +import static com.mongodb.connection.ServerType.REPLICA_SET_PRIMARY +import static com.mongodb.connection.ServerType.STANDALONE + +class FindAndReplaceOperationSpecification extends OperationFunctionalSpecification { + private final DocumentCodec documentCodec = new DocumentCodec() + private final WorkerCodec workerCodec = new WorkerCodec() + + def 'should have the correct defaults and passed values'() { + when: + def replacement = new BsonDocument('replace', new BsonInt32(1)) + def operation = new FindAndReplaceOperation(getNamespace(), ACKNOWLEDGED, false, documentCodec, + replacement) + + then: + operation.getNamespace() == getNamespace() + operation.getWriteConcern() == ACKNOWLEDGED + operation.getDecoder() == documentCodec + operation.getReplacement() == replacement + operation.getFilter() == null + operation.getSort() == null + operation.getProjection() == null + operation.getBypassDocumentValidation() == null + operation.getCollation() == null + } + + def 'should set optional values correctly'() { + given: + def filter = new BsonDocument('filter', new BsonInt32(1)) + def sort = new BsonDocument('sort', new BsonInt32(1)) + def projection = new BsonDocument('projection', new BsonInt32(1)) + + when: + def operation = new FindAndReplaceOperation(getNamespace(), ACKNOWLEDGED, false, documentCodec, + new BsonDocument('replace', new BsonInt32(1))).filter(filter).sort(sort).projection(projection) + .bypassDocumentValidation(true).upsert(true).returnOriginal(false) + .collation(defaultCollation) + + then: + operation.getFilter() == filter + operation.getSort() == sort + operation.getProjection() == projection + operation.upsert == true + operation.getBypassDocumentValidation() + !operation.isReturnOriginal() + operation.getCollation() == defaultCollation + } + + def 'should replace single document'() { + given: + CollectionHelper helper = new CollectionHelper(documentCodec, getNamespace()) + Document pete = new Document('name', 'Pete').append('job', 'handyman') + Document sam = new Document('name', 'Sam').append('job', 'plumber') + BsonDocument jordan = BsonDocument.parse('{name: "Jordan", job: "sparky"}') + + helper.insertDocuments(new DocumentCodec(), pete, sam) + + when: + def operation = new FindAndReplaceOperation(getNamespace(), ACKNOWLEDGED, false, + documentCodec, jordan) + .filter(new BsonDocument('name', new BsonString('Pete'))) + Document returnedDocument = execute(operation, async) + + then: + returnedDocument.getString('name') == 'Pete' + helper.find().size() == 2 + helper.find().get(0).getString('name') == 'Jordan' + + when: + operation = new FindAndReplaceOperation(getNamespace(), ACKNOWLEDGED, false, documentCodec, + new BsonDocumentWrapper(pete, documentCodec)) + .filter(new BsonDocument('name', new BsonString('Jordan'))) + .returnOriginal(false) + returnedDocument = execute(operation, async) + + then: + returnedDocument.getString('name') == 'Pete' + + where: + async << [true, false] + } + + def 'should replace single document when using custom codecs'() { + given: + CollectionHelper helper = new CollectionHelper(workerCodec, getNamespace()) + Worker pete = new Worker('Pete', 'handyman', new Date(), 3) + Worker sam = new Worker('Sam', 'plumber', new Date(), 5) + Worker jordan = new Worker(pete.id, 'Jordan', 'sparky', new Date(), 7) + BsonDocument replacement = new BsonDocumentWrapper(jordan, workerCodec) + + helper.insertDocuments(new WorkerCodec(), pete, sam) + + when: + def operation = new FindAndReplaceOperation(getNamespace(), ACKNOWLEDGED, false, + workerCodec, replacement).filter(new BsonDocument('name', new BsonString('Pete'))) + Worker returnedDocument = execute(operation, async) + + then: + returnedDocument == pete + helper.find().get(0) == jordan + + when: + replacement = new BsonDocumentWrapper(pete, workerCodec) + operation = new FindAndReplaceOperation(getNamespace(), ACKNOWLEDGED, false, workerCodec, + replacement) + .filter(new BsonDocument('name', new BsonString('Jordan'))) + .returnOriginal(false) + returnedDocument = execute(operation, async) + + then: + returnedDocument == pete + + where: + async << [true, false] + } + + def 'should return null if query fails to match'() { + when: + BsonDocument jordan = BsonDocument.parse('{name: "Jordan", job: "sparky"}') + def operation = new FindAndReplaceOperation(getNamespace(), ACKNOWLEDGED, false, + documentCodec, jordan) + .filter(new BsonDocument('name', new BsonString('Pete'))) + Document returnedDocument = execute(operation, async) + + then: + returnedDocument == null + + where: + async << [true, false] + } + + def 'should throw an exception if replacement contains update operators'() { + given: + def replacement = new BsonDocumentWrapper(['$inc': 1] as Document, documentCodec) + def operation = new FindAndReplaceOperation(getNamespace(), ACKNOWLEDGED, false, + documentCodec, replacement) + + when: + execute(operation, async) + + then: + def e = thrown(IllegalArgumentException) + e.getMessage() == 'Field names in a replacement document can not start with \'$\' but \'$inc\' does' + + where: + async << [true, false] + } + + def 'should support bypassDocumentValidation'() { + given: + def namespace = new MongoNamespace(getDatabaseName(), 'collectionOut') + def collectionHelper = getCollectionHelper(namespace) + collectionHelper.create('collectionOut', new CreateCollectionOptions().validationOptions( + new ValidationOptions().validator(gte('level', 10)))) + collectionHelper.insertDocuments(BsonDocument.parse('{ level: 10 }')) + + when: + def replacement = new BsonDocument('level', new BsonInt32(9)) + def operation = new FindAndReplaceOperation(namespace, ACKNOWLEDGED, false, + documentCodec, replacement) + execute(operation, async) + + then: + thrown(MongoCommandException) + + when: + operation.bypassDocumentValidation(false) + execute(operation, async) + + then: + thrown(MongoCommandException) + + when: + operation.bypassDocumentValidation(true).returnOriginal(false) + Document returnedDocument = execute(operation, async) + + then: + notThrown(MongoCommandException) + returnedDocument.getInteger('level') == 9 + + cleanup: + collectionHelper?.drop() + + where: + async << [true, false] + } + + @IgnoreIf({ !isDiscoverableReplicaSet() }) + def 'should throw on write concern error'() { + given: + CollectionHelper helper = new CollectionHelper(documentCodec, getNamespace()) + Document pete = new Document('name', 'Pete').append('job', 'handyman') + helper.insertDocuments(new DocumentCodec(), pete) + + BsonDocument jordan = BsonDocument.parse('{name: "Jordan", job: "sparky"}') + + when: + def operation = new FindAndReplaceOperation(getNamespace(), + new WriteConcern(5, 1), false, documentCodec, jordan) + .filter(new BsonDocument('name', new BsonString('Pete'))) + execute(operation, async) + + then: + def ex = thrown(MongoWriteConcernException) + ex.writeConcernError.code == 100 + !ex.writeConcernError.message.isEmpty() + ex.writeResult.count == 1 + ex.writeResult.updateOfExisting + ex.writeResult.upsertedId == null + + when: + operation = new FindAndReplaceOperation(getNamespace(), new WriteConcern(5, 1), + false, documentCodec, jordan).filter(new BsonDocument('name', new BsonString('Bob'))) + .upsert(true) + execute(operation, async) + + then: + ex = thrown(MongoWriteConcernException) + ex.writeConcernError.code == 100 + !ex.writeConcernError.message.isEmpty() + ex.writeResult.count == 1 + !ex.writeResult.updateOfExisting + ex.writeResult.upsertedId instanceof BsonObjectId + + where: + async << [true, false] + } + + + @IgnoreIf({ !isDiscoverableReplicaSet() }) + def 'should throw on write concern error on multiple failpoint'() { + CollectionHelper helper = new CollectionHelper(documentCodec, getNamespace()) + Document pete = new Document('name', 'Pete').append('job', 'handyman') + helper.insertDocuments(new DocumentCodec(), pete) + + def failPoint = BsonDocument.parse('''{ + "configureFailPoint": "failCommand", + "mode": {"times": 2 }, + "data": { "failCommands": ["findAndModify"], + "writeConcernError": {"code": 91, "errmsg": "Replication is being shut down"}}}''') + configureFailPoint(failPoint) + + BsonDocument jordan = BsonDocument.parse('{name: "Jordan", job: "sparky"}') + def operation = new FindAndReplaceOperation(getNamespace(), ACKNOWLEDGED, + false, documentCodec, jordan).filter(new BsonDocument('name', new BsonString('Pete'))) + + when: + execute(operation, async) + + then: + def ex = thrown(MongoWriteConcernException) + ex.writeConcernError.code == 91 + !ex.writeConcernError.message.isEmpty() + ex.writeResult.count == 1 + ex.writeResult.updateOfExisting + ex.writeResult.upsertedId == null + + cleanup: + disableFailPoint('failCommand') + + where: + async << [true, false] + } + + def 'should create the expected command'() { + when: + def includeTxnNumber = retryWrites && writeConcern.isAcknowledged() && serverType != STANDALONE + def includeWriteConcern = writeConcern.isAcknowledged() && !writeConcern.isServerDefault() + def cannedResult = new BsonDocument('value', new BsonDocumentWrapper(BsonDocument.parse('{}'), new BsonDocumentCodec())) + def replacement = BsonDocument.parse('{ replacement: 1}') + def operation = new FindAndReplaceOperation(getNamespace(), writeConcern, retryWrites, documentCodec, replacement) + def expectedCommand = new BsonDocument('findAndModify', new BsonString(getNamespace().getCollectionName())) + .append('update', replacement) + if (includeWriteConcern) { + expectedCommand.put('writeConcern', writeConcern.asDocument()) + } + if (includeTxnNumber) { + expectedCommand.put('txnNumber', new BsonInt64(0)) + } + expectedCommand.put('new', BsonBoolean.FALSE) + + then: + testOperation([operation: operation, serverVersion: [3, 6, 0], expectedCommand: expectedCommand, async: async, + result : cannedResult, serverType: serverType]) + + when: + def filter = BsonDocument.parse('{ filter : 1}') + def sort = BsonDocument.parse('{ sort : 1}') + def projection = BsonDocument.parse('{ projection : 1}') + + operation.filter(filter) + .sort(sort) + .projection(projection) + .bypassDocumentValidation(true) + + expectedCommand.append('query', filter) + .append('sort', sort) + .append('fields', projection) + + operation.collation(defaultCollation) + expectedCommand.append('collation', defaultCollation.asDocument()) + expectedCommand.append('bypassDocumentValidation', BsonBoolean.TRUE) + + then: + testOperation([operation: operation, serverVersion: [3, 6, 0], expectedCommand: expectedCommand, async: async, + result : cannedResult, serverType: serverType]) + + where: + [serverType, writeConcern, async, retryWrites] << [ + [REPLICA_SET_PRIMARY, STANDALONE], + [ACKNOWLEDGED, W1, UNACKNOWLEDGED], + [true, false], + [true, false] + ].combinations() + } + + @IgnoreIf({ !isDiscoverableReplicaSet() }) + def 'should support retryable writes'() { + given: + CollectionHelper helper = new CollectionHelper(documentCodec, getNamespace()) + Document pete = new Document('name', 'Pete').append('job', 'handyman') + Document sam = new Document('name', 'Sam').append('job', 'plumber') + BsonDocument jordan = BsonDocument.parse('{name: "Jordan", job: "sparky"}') + + helper.insertDocuments(new DocumentCodec(), pete, sam) + + when: + def operation = new FindAndReplaceOperation(getNamespace(), ACKNOWLEDGED, true, + documentCodec, jordan) + .filter(new BsonDocument('name', new BsonString('Pete'))) + + enableOnPrimaryTransactionalWriteFailPoint(BsonDocument.parse('{times: 1}')) + Document returnedDocument = executeWithSession(operation, async) + + then: + returnedDocument.getString('name') == 'Pete' + helper.find().size() == 2 + helper.find().get(0).getString('name') == 'Jordan' + + cleanup: + disableOnPrimaryTransactionalWriteFailPoint() + + where: + async << [true, false] + } + + def 'should retry if the connection initially fails'() { + when: + def cannedResult = new BsonDocument('value', new BsonDocumentWrapper(BsonDocument.parse('{}'), new BsonDocumentCodec())) + def replacement = BsonDocument.parse('{ replacement: 1}') + def operation = new FindAndReplaceOperation(getNamespace(), ACKNOWLEDGED, true, + documentCodec, replacement) + def expectedCommand = new BsonDocument('findAndModify', new BsonString(getNamespace().getCollectionName())) + .append('update', replacement) + .append('txnNumber', new BsonInt64(0)) + .append('new', BsonBoolean.FALSE) + + then: + testOperationRetries(operation, [3, 6, 0], expectedCommand, async, cannedResult) + + where: + async << [true, false] + } + + def 'should throw original error when retrying and failing'() { + given: + def replacement = BsonDocument.parse('{ replacement: 1}') + def operation = new FindAndReplaceOperation(getNamespace(), ACKNOWLEDGED, true, + documentCodec, replacement) + def originalException = new MongoSocketException('Some failure', new ServerAddress()) + + when: + testRetryableOperationThrowsOriginalError(operation, [[3, 6, 0], [3, 6, 0]], + [REPLICA_SET_PRIMARY, STANDALONE], originalException, async) + + then: + Exception commandException = thrown() + commandException == originalException + + when: + testRetryableOperationThrowsOriginalError(operation, [[3, 6, 0]], + [REPLICA_SET_PRIMARY], originalException, async, 1) + + then: + commandException = thrown() + commandException == originalException + + where: + async << [true, false] + } + + def 'should support collation'() { + given: + def document = Document.parse('{_id: 1, str: "foo"}') + getCollectionHelper().insertDocuments(document) + def replacement = BsonDocument.parse('{str: "bar"}') + def operation = new FindAndReplaceOperation(getNamespace(), ACKNOWLEDGED, false, + documentCodec, replacement) + .filter(BsonDocument.parse('{str: "FOO"}')) + .collation(caseInsensitiveCollation) + + when: + def result = execute(operation, async) + + then: + result == document + + where: + async << [true, false] + } +} + diff --git a/driver-core/src/test/functional/com/mongodb/internal/operation/FindAndUpdateOperationSpecification.groovy b/driver-core/src/test/functional/com/mongodb/internal/operation/FindAndUpdateOperationSpecification.groovy new file mode 100644 index 00000000000..292d5bb471e --- /dev/null +++ b/driver-core/src/test/functional/com/mongodb/internal/operation/FindAndUpdateOperationSpecification.groovy @@ -0,0 +1,605 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.operation + +import com.mongodb.MongoCommandException +import com.mongodb.MongoNamespace +import com.mongodb.MongoSocketException +import com.mongodb.MongoWriteConcernException +import com.mongodb.OperationFunctionalSpecification +import com.mongodb.ServerAddress +import com.mongodb.WriteConcern +import com.mongodb.client.model.CreateCollectionOptions +import com.mongodb.client.model.ValidationOptions +import com.mongodb.client.test.CollectionHelper +import com.mongodb.client.test.Worker +import com.mongodb.client.test.WorkerCodec +import org.bson.BsonArray +import org.bson.BsonBoolean +import org.bson.BsonDocument +import org.bson.BsonDocumentWrapper +import org.bson.BsonInt32 +import org.bson.BsonInt64 +import org.bson.BsonObjectId +import org.bson.BsonString +import org.bson.Document +import org.bson.codecs.BsonDocumentCodec +import org.bson.codecs.DocumentCodec +import spock.lang.IgnoreIf + +import static com.mongodb.ClusterFixture.configureFailPoint +import static com.mongodb.ClusterFixture.disableFailPoint +import static com.mongodb.ClusterFixture.disableOnPrimaryTransactionalWriteFailPoint +import static com.mongodb.ClusterFixture.enableOnPrimaryTransactionalWriteFailPoint +import static com.mongodb.ClusterFixture.isDiscoverableReplicaSet +import static com.mongodb.WriteConcern.ACKNOWLEDGED +import static com.mongodb.WriteConcern.UNACKNOWLEDGED +import static com.mongodb.WriteConcern.W1 +import static com.mongodb.client.model.Filters.gte +import static com.mongodb.connection.ServerType.REPLICA_SET_PRIMARY +import static com.mongodb.connection.ServerType.STANDALONE +import static java.util.Collections.singletonList + +class FindAndUpdateOperationSpecification extends OperationFunctionalSpecification { + private final DocumentCodec documentCodec = new DocumentCodec() + private final WorkerCodec workerCodec = new WorkerCodec() + + def 'should have the correct defaults and passed values'() { + when: + def update = new BsonDocument('update', new BsonInt32(1)) + def operation = new FindAndUpdateOperation(getNamespace(), ACKNOWLEDGED, false, + documentCodec, update) + + then: + operation.getNamespace() == getNamespace() + operation.getWriteConcern() == ACKNOWLEDGED + operation.getDecoder() == documentCodec + operation.getUpdate() == update + operation.getFilter() == null + operation.getSort() == null + operation.getProjection() == null + operation.getBypassDocumentValidation() == null + operation.getCollation() == null + } + + def 'should have the correct defaults and passed values using update pipelines'() { + when: + def updatePipeline = new BsonArray(singletonList(new BsonDocument('update', new BsonInt32(1)))) + def operation = new FindAndUpdateOperation(getNamespace(), ACKNOWLEDGED, false, documentCodec, updatePipeline) + + then: + operation.getNamespace() == getNamespace() + operation.getWriteConcern() == ACKNOWLEDGED + operation.getDecoder() == documentCodec + operation.getUpdatePipeline() == updatePipeline + operation.getFilter() == null + operation.getSort() == null + operation.getProjection() == null + operation.getBypassDocumentValidation() == null + operation.getCollation() == null + } + + def 'should set optional values correctly'() { + given: + def filter = new BsonDocument('filter', new BsonInt32(1)) + def sort = new BsonDocument('sort', new BsonInt32(1)) + def projection = new BsonDocument('projection', new BsonInt32(1)) + + when: + def operation = new FindAndUpdateOperation(getNamespace(), + ACKNOWLEDGED, false, documentCodec, new BsonDocument('update', new BsonInt32(1))) + .filter(filter) + .sort(sort) + .projection(projection) + .bypassDocumentValidation(true).upsert(true) + .returnOriginal(false) + .collation(defaultCollation) + + then: + operation.getFilter() == filter + operation.getSort() == sort + operation.getProjection() == projection + operation.upsert == true + operation.getBypassDocumentValidation() + !operation.isReturnOriginal() + operation.getCollation() == defaultCollation + } + + def 'should set optional values correctly when using update pipelines'(){ + given: + def filter = new BsonDocument('filter', new BsonInt32(1)) + def sort = new BsonDocument('sort', new BsonInt32(1)) + def projection = new BsonDocument('projection', new BsonInt32(1)) + + when: + def operation = new FindAndUpdateOperation(getNamespace(), ACKNOWLEDGED, false, + documentCodec, new BsonArray(singletonList(new BsonDocument('update', new BsonInt32(1))))) + .filter(filter) + .sort(sort) + .projection(projection) + .bypassDocumentValidation(true).upsert(true) + .returnOriginal(false) + .collation(defaultCollation) + + then: + operation.getFilter() == filter + operation.getSort() == sort + operation.getProjection() == projection + operation.upsert == true + operation.getBypassDocumentValidation() + !operation.isReturnOriginal() + operation.getCollation() == defaultCollation + } + + def 'should update single document'() { + given: + CollectionHelper helper = new CollectionHelper(documentCodec, getNamespace()) + Document pete = new Document('name', 'Pete').append('numberOfJobs', 3) + Document sam = new Document('name', 'Sam').append('numberOfJobs', 5) + + helper.insertDocuments(new DocumentCodec(), pete, sam) + + when: + def update = new BsonDocument('$inc', new BsonDocument('numberOfJobs', new BsonInt32(1))) + def operation = new FindAndUpdateOperation(getNamespace(), ACKNOWLEDGED, false, + documentCodec, update) + .filter(new BsonDocument('name', new BsonString('Pete'))) + Document returnedDocument = execute(operation, async) + + then: + returnedDocument.getInteger('numberOfJobs') == 3 + helper.find().size() == 2 + helper.find().get(0).getInteger('numberOfJobs') == 4 + + when: + update = new BsonDocument('$inc', new BsonDocument('numberOfJobs', new BsonInt32(1))) + operation = new FindAndUpdateOperation(getNamespace(), ACKNOWLEDGED, false, + documentCodec, update) + .filter(new BsonDocument('name', new BsonString('Pete'))) + .returnOriginal(false) + returnedDocument = execute(operation, async) + + then: + returnedDocument.getInteger('numberOfJobs') == 5 + + where: + async << [true, false] + } + + def 'should add field using update pipeline'() { + given: + CollectionHelper helper = new CollectionHelper(documentCodec, getNamespace()) + Document pete = new Document('name', 'Pete').append('numberOfJobs', 3) + Document sam = new Document('name', 'Sam').append('numberOfJobs', 5) + + helper.insertDocuments(new DocumentCodec(), pete, sam) + + when: + def update = new BsonArray(singletonList(new BsonDocument('$addFields', new BsonDocument('foo', new BsonInt32(1))))) + def operation = new FindAndUpdateOperation(getNamespace(), ACKNOWLEDGED, false, documentCodec, update) + .filter(new BsonDocument('name', new BsonString('Pete'))) + .returnOriginal(false) + Document returnedDocument = execute(operation, false) + + then: + returnedDocument.getInteger('numberOfJobs') == 3 + helper.find().get(0).getInteger('foo') == 1 + + when: + update = new BsonArray(singletonList(new BsonDocument('$addFields', new BsonDocument('foo', new BsonInt32(1))))) + operation = new FindAndUpdateOperation(getNamespace(), ACKNOWLEDGED, false, documentCodec, update) + .filter(new BsonDocument('name', new BsonString('Pete'))) + .returnOriginal(false) + returnedDocument = execute(operation, false) + + then: + returnedDocument.getInteger('numberOfJobs') == 3 + helper.find().get(0).getInteger('foo') == 1 + } + + def 'should update single document when using custom codecs'() { + given: + CollectionHelper helper = new CollectionHelper(workerCodec, getNamespace()) + Worker pete = new Worker('Pete', 'handyman', new Date(), 3) + Worker sam = new Worker('Sam', 'plumber', new Date(), 5) + + helper.insertDocuments(new WorkerCodec(), pete, sam) + + when: + def update = new BsonDocument('$inc', new BsonDocument('numberOfJobs', new BsonInt32(1))) + def operation = new FindAndUpdateOperation(getNamespace(), ACKNOWLEDGED, false, + workerCodec, update) + .filter(new BsonDocument('name', new BsonString('Pete'))) + Worker returnedDocument = execute(operation, async) + + then: + returnedDocument.numberOfJobs == 3 + helper.find().size() == 2 + helper.find().get(0).numberOfJobs == 4 + + when: + update = new BsonDocument('$inc', new BsonDocument('numberOfJobs', new BsonInt32(1))) + operation = new FindAndUpdateOperation(getNamespace(), ACKNOWLEDGED, false, + workerCodec, update) + .filter(new BsonDocument('name', new BsonString('Pete'))) + .returnOriginal(false) + returnedDocument = execute(operation, async) + + then: + returnedDocument.numberOfJobs == 5 + + where: + async << [true, false] + } + + def 'should update using pipeline when using custom codecs'() { + given: + CollectionHelper helper = new CollectionHelper(documentCodec, getNamespace()) + Document pete = new Document('name', 'Pete').append('numberOfJobs', 3) + Document sam = new Document('name', 'Sam').append('numberOfJobs', 5) + + helper.insertDocuments(new DocumentCodec(), pete, sam) + + when: + def update = new BsonArray(singletonList(new BsonDocument('$project', new BsonDocument('name', new BsonInt32(1))))) + def operation = new FindAndUpdateOperation(getNamespace(), ACKNOWLEDGED, false, documentCodec, update) + .filter(new BsonDocument('name', new BsonString('Pete'))) + .returnOriginal(false) + Document returnedDocument = execute(operation, async) + + then: + returnedDocument.getString('name') == 'Pete' + !returnedDocument.containsKey('numberOfJobs') + + where: + async << [true, false] + } + + def 'should return null if query fails to match'() { + when: + def update = new BsonDocument('$inc', new BsonDocument('numberOfJobs', new BsonInt32(1))) + def operation = new FindAndUpdateOperation(getNamespace(), ACKNOWLEDGED, false, documentCodec, update) + .filter(new BsonDocument('name', new BsonString('Pete'))) + Document returnedDocument = execute(operation, async) + + then: + returnedDocument == null + + where: + async << [true, false] + } + + def 'should throw an exception if update contains fields that are not update operators'() { + given: + def update = new BsonDocument('x', new BsonInt32(1)) + def operation = new FindAndUpdateOperation(getNamespace(), ACKNOWLEDGED, false, + documentCodec, update) + + when: + execute(operation, async) + + then: + def e = thrown(IllegalArgumentException) + e.getMessage() == 'All update operators must start with \'$\', but \'x\' does not' + + where: + async << [true, false] + } + + def 'should throw an exception if update pipeline contains operations that are not supported'() { + when: + def update = new BsonArray(singletonList(new BsonDocument('$foo', new BsonDocument('x', new BsonInt32(1))))) + def operation = new FindAndUpdateOperation(getNamespace(), ACKNOWLEDGED, false, documentCodec, update) + execute(operation, async) + + then: + thrown(MongoCommandException) + + when: + update = singletonList(new BsonInt32(1)) + operation = new FindAndUpdateOperation(getNamespace(), ACKNOWLEDGED, false, documentCodec, update) + execute(operation, async) + + then: + thrown(MongoCommandException) + + where: + async << [true, false] + } + + def 'should support bypassDocumentValidation'() { + given: + def namespace = new MongoNamespace(getDatabaseName(), 'collectionOut') + def collectionHelper = getCollectionHelper(namespace) + collectionHelper.create('collectionOut', new CreateCollectionOptions().validationOptions( + new ValidationOptions().validator(gte('level', 10)))) + collectionHelper.insertDocuments(BsonDocument.parse('{ level: 10 }')) + + when: + def update = new BsonDocument('$inc', new BsonDocument('level', new BsonInt32(-1))) + def operation = new FindAndUpdateOperation(namespace, ACKNOWLEDGED, false, + documentCodec, update) + execute(operation, async) + + then: + thrown(MongoCommandException) + + when: + operation.bypassDocumentValidation(false) + execute(operation, async) + + then: + thrown(MongoCommandException) + + when: + operation.bypassDocumentValidation(true).returnOriginal(false) + Document returnedDocument = execute(operation, async) + + then: + notThrown(MongoCommandException) + returnedDocument.getInteger('level') == 9 + + cleanup: + collectionHelper?.drop() + + where: + async << [true, false] + } + + @IgnoreIf({ !isDiscoverableReplicaSet() }) + def 'should throw on write concern error'() { + given: + getCollectionHelper().insertDocuments(new DocumentCodec(), new Document('name', 'Pete')) + def update = new BsonDocument('$inc', new BsonDocument('numberOfJobs', new BsonInt32(1))) + + when: + def operation = new FindAndUpdateOperation(getNamespace(), + new WriteConcern(5, 1), false, documentCodec, update) + .filter(new BsonDocument('name', new BsonString('Pete'))) + execute(operation, async) + + then: + def ex = thrown(MongoWriteConcernException) + ex.writeConcernError.code == 100 + !ex.writeConcernError.message.isEmpty() + ex.writeResult.count == 1 + ex.writeResult.updateOfExisting + ex.writeResult.upsertedId == null + + when: + operation = new FindAndUpdateOperation(getNamespace(), new WriteConcern(5, 1), false, + documentCodec, update) + .filter(new BsonDocument('name', new BsonString('Bob'))) + .upsert(true) + execute(operation, async) + + then: + ex = thrown(MongoWriteConcernException) + ex.writeResult.count == 1 + !ex.writeResult.updateOfExisting + ex.writeResult.upsertedId instanceof BsonObjectId + + where: + async << [true, false] + } + + @IgnoreIf({ !isDiscoverableReplicaSet() }) + def 'should throw on write concern error on multiple failpoint'() { + given: + CollectionHelper helper = new CollectionHelper(documentCodec, getNamespace()) + helper.insertDocuments(new DocumentCodec(), new Document('name', 'Pete')) + + def failPoint = BsonDocument.parse('''{ + "configureFailPoint": "failCommand", + "mode": {"times": 2 }, + "data": { "failCommands": ["findAndModify"], + "writeConcernError": {"code": 91, "errmsg": "Replication is being shut down"}}}''') + configureFailPoint(failPoint) + + def update = new BsonDocument('$inc', new BsonDocument('numberOfJobs', new BsonInt32(1))) + def operation = new FindAndUpdateOperation(getNamespace(), ACKNOWLEDGED, false, + documentCodec, update) + .filter(new BsonDocument('name', new BsonString('Pete'))) + + when: + execute(operation, async) + + then: + def ex = thrown(MongoWriteConcernException) + ex.writeConcernError.code == 91 + !ex.writeConcernError.message.isEmpty() + ex.writeResult.count == 1 + ex.writeResult.updateOfExisting + ex.writeResult.upsertedId == null + + cleanup: + disableFailPoint('failCommand') + + where: + async << [true, false] + } + + def 'should create the expected command'() { + when: + def includeTxnNumber = retryWrites && writeConcern.isAcknowledged() && serverType != STANDALONE + def includeWriteConcern = writeConcern.isAcknowledged() && !writeConcern.isServerDefault() + def cannedResult = new BsonDocument('value', new BsonDocumentWrapper(BsonDocument.parse('{}'), new BsonDocumentCodec())) + def update = BsonDocument.parse('{ update: 1}') + def operation = new FindAndUpdateOperation(getNamespace(), writeConcern, retryWrites, documentCodec, update) + def expectedCommand = new BsonDocument('findAndModify', new BsonString(getNamespace().getCollectionName())) + .append('update', update) + if (includeWriteConcern) { + expectedCommand.put('writeConcern', writeConcern.asDocument()) + } + if (includeTxnNumber) { + expectedCommand.put('txnNumber', new BsonInt64(0)) + } + expectedCommand.put('new', BsonBoolean.FALSE) + + then: + testOperation([operation: operation, serverVersion: [3, 6, 0], expectedCommand: expectedCommand, async: async, + result: cannedResult, serverType: serverType]) + + when: + def filter = BsonDocument.parse('{ filter : 1}') + def sort = BsonDocument.parse('{ sort : 1}') + def projection = BsonDocument.parse('{ projection : 1}') + + operation.filter(filter) + .sort(sort) + .projection(projection) + .bypassDocumentValidation(true) + + expectedCommand.append('query', filter) + .append('sort', sort) + .append('fields', projection) + + operation.collation(defaultCollation) + expectedCommand.append('collation', defaultCollation.asDocument()) + expectedCommand.append('bypassDocumentValidation', BsonBoolean.TRUE) + + then: + testOperation([operation: operation, serverVersion: [3, 6, 0], expectedCommand: expectedCommand, async: async, + result: cannedResult, serverType: serverType]) + + where: + [serverType, writeConcern, async, retryWrites] << [ + [REPLICA_SET_PRIMARY, STANDALONE], + [ACKNOWLEDGED, W1, UNACKNOWLEDGED], + [true, false], + [true, false] + ].combinations() + } + + @IgnoreIf({ !isDiscoverableReplicaSet() }) + def 'should support retryable writes'() { + given: + CollectionHelper helper = new CollectionHelper(documentCodec, getNamespace()) + Document pete = new Document('name', 'Pete').append('numberOfJobs', 3) + Document sam = new Document('name', 'Sam').append('numberOfJobs', 5) + + helper.insertDocuments(new DocumentCodec(), pete, sam) + + when: + def update = new BsonDocument('$inc', new BsonDocument('numberOfJobs', new BsonInt32(1))) + def operation = new FindAndUpdateOperation(getNamespace(), ACKNOWLEDGED, true, + documentCodec, update) + .filter(new BsonDocument('name', new BsonString('Pete'))) + + enableOnPrimaryTransactionalWriteFailPoint(BsonDocument.parse('{times: 1}')) + + Document returnedDocument = executeWithSession(operation, async) + + then: + returnedDocument.getInteger('numberOfJobs') == 3 + helper.find().size() == 2 + helper.find().get(0).getInteger('numberOfJobs') == 4 + + cleanup: + disableOnPrimaryTransactionalWriteFailPoint() + + where: + async << [true, false] + } + + def 'should retry if the connection initially fails'() { + when: + def cannedResult = new BsonDocument('value', new BsonDocumentWrapper(BsonDocument.parse('{}'), new BsonDocumentCodec())) + def update = BsonDocument.parse('{ update: 1}') + def operation = new FindAndUpdateOperation(getNamespace(), ACKNOWLEDGED, true, + documentCodec, update) + def expectedCommand = new BsonDocument('findAndModify', new BsonString(getNamespace().getCollectionName())) + .append('update', update) + .append('txnNumber', new BsonInt64(0)) + .append('new', BsonBoolean.FALSE) + + then: + testOperationRetries(operation, [3, 6, 0], expectedCommand, async, cannedResult) + + where: + async << [true, false] + } + + def 'should throw original error when retrying and failing'() { + given: + def update = BsonDocument.parse('{ update: 1}') + def operation = new FindAndUpdateOperation(getNamespace(), ACKNOWLEDGED, true, + documentCodec, update) + def originalException = new MongoSocketException('Some failure', new ServerAddress()) + + when: + testRetryableOperationThrowsOriginalError(operation, [[3, 6, 0], [3, 6, 0]], + [REPLICA_SET_PRIMARY, STANDALONE], originalException, async) + + then: + Exception commandException = thrown() + commandException == originalException + + when: + testRetryableOperationThrowsOriginalError(operation, [[3, 6, 0]], + [REPLICA_SET_PRIMARY], originalException, async, 1) + + then: + commandException = thrown() + commandException == originalException + + where: + async << [true, false] + } + + def 'should support collation'() { + given: + def document = Document.parse('{_id: 1, str: "foo"}') + getCollectionHelper().insertDocuments(document) + def update = BsonDocument.parse('{ $set: {str: "bar"}}') + def operation = new FindAndUpdateOperation(getNamespace(), ACKNOWLEDGED, false, + documentCodec, update) + .filter(BsonDocument.parse('{str: "FOO"}')) + .collation(caseInsensitiveCollation) + + when: + def result = execute(operation, async) + + then: + result == document + + where: + async << [true, false] + } + + def 'should support array filters'() { + given: + def documentOne = Document.parse('{_id: 1, y: [ {b: 3}, {b: 1}]}') + def documentTwo = Document.parse('{_id: 2, y: [ {b: 0}, {b: 1}]}') + getCollectionHelper().insertDocuments(documentOne, documentTwo) + def update = BsonDocument.parse('{ $set: {"y.$[i].b": 2}}') + def arrayFilters = [BsonDocument.parse('{"i.b": 3}')] + def operation = new FindAndUpdateOperation(getNamespace(), ACKNOWLEDGED, false, + documentCodec, update) + .returnOriginal(false) + .arrayFilters(arrayFilters) + + when: + def result = execute(operation, async) + + then: + result == Document.parse('{_id: 1, y: [ {b: 2}, {b: 1}]}') + + where: + async << [true, false] + } +} diff --git a/driver-core/src/test/functional/com/mongodb/internal/operation/FindOperationSpecification.groovy b/driver-core/src/test/functional/com/mongodb/internal/operation/FindOperationSpecification.groovy new file mode 100644 index 00000000000..f61ab70f2ae --- /dev/null +++ b/driver-core/src/test/functional/com/mongodb/internal/operation/FindOperationSpecification.groovy @@ -0,0 +1,707 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.operation + +import com.mongodb.ClusterFixture +import com.mongodb.MongoNamespace +import com.mongodb.MongoQueryException +import com.mongodb.OperationFunctionalSpecification +import com.mongodb.ReadConcern +import com.mongodb.ReadPreference +import com.mongodb.ReadPreferenceHedgeOptions +import com.mongodb.ServerAddress +import com.mongodb.async.FutureResultCallback +import com.mongodb.client.model.CreateCollectionOptions +import com.mongodb.connection.ClusterId +import com.mongodb.connection.ConnectionDescription +import com.mongodb.connection.ConnectionId +import com.mongodb.connection.ServerId +import com.mongodb.internal.TimeoutContext +import com.mongodb.internal.binding.AsyncClusterBinding +import com.mongodb.internal.binding.AsyncConnectionSource +import com.mongodb.internal.binding.AsyncReadBinding +import com.mongodb.internal.binding.ClusterBinding +import com.mongodb.internal.binding.ConnectionSource +import com.mongodb.internal.binding.ReadBinding +import com.mongodb.internal.connection.AsyncConnection +import com.mongodb.internal.connection.Connection +import com.mongodb.internal.session.SessionContext +import org.bson.BsonBoolean +import org.bson.BsonDocument +import org.bson.BsonInt32 +import org.bson.BsonInt64 +import org.bson.BsonString +import org.bson.BsonTimestamp +import org.bson.BsonValue +import org.bson.Document +import org.bson.codecs.BsonDocumentCodec +import org.bson.codecs.DocumentCodec +import spock.lang.IgnoreIf + +import static com.mongodb.ClusterFixture.OPERATION_CONTEXT +import static com.mongodb.ClusterFixture.executeAsync +import static com.mongodb.ClusterFixture.executeSync +import static com.mongodb.ClusterFixture.getAsyncBinding +import static com.mongodb.ClusterFixture.getAsyncCluster +import static com.mongodb.ClusterFixture.getBinding +import static com.mongodb.ClusterFixture.getCluster +import static com.mongodb.ClusterFixture.isSharded +import static com.mongodb.ClusterFixture.serverVersionLessThan +import static com.mongodb.CursorType.NonTailable +import static com.mongodb.CursorType.Tailable +import static com.mongodb.CursorType.TailableAwait +import static com.mongodb.connection.ServerType.STANDALONE +import static com.mongodb.internal.operation.OperationReadConcernHelper.appendReadConcernToCommand +import static com.mongodb.internal.operation.ServerVersionHelper.UNKNOWN_WIRE_VERSION +import static org.junit.Assert.assertEquals + +class FindOperationSpecification extends OperationFunctionalSpecification { + + def 'should have the correct defaults'() { + given: + def decoder = new DocumentCodec() + + when: + FindOperation operation = new FindOperation(getNamespace(), decoder) + + then: + operation.getNamespace() == getNamespace() + operation.getDecoder() == decoder + operation.getFilter() == null + operation.getHint() == null + operation.getLimit() == 0 + operation.getSkip() == 0 + operation.getBatchSize() == 0 + operation.getProjection() == null + operation.getCollation() == null + !operation.isNoCursorTimeout() + !operation.isPartial() + operation.isAllowDiskUse() == null + } + + def 'should set optional values correctly'() { + given: + def filter = new BsonDocument('filter', new BsonInt32(1)) + def projection = new BsonDocument('projection', new BsonInt32(1)) + def hint = new BsonString('a_1') + + when: + FindOperation operation = new FindOperation(getNamespace(), new DocumentCodec()) + .filter(filter) + .limit(20) + .skip(30) + .hint(hint) + .batchSize(40) + .projection(projection) + .cursorType(Tailable) + .collation(defaultCollation) + .partial(true) + .noCursorTimeout(true) + .allowDiskUse(true) + + then: + operation.getFilter() == filter + operation.getLimit() == 20 + operation.getSkip() == 30 + operation.getHint() == hint + operation.getBatchSize() == 40 + operation.getProjection() == projection + operation.getCollation() == defaultCollation + operation.isNoCursorTimeout() + operation.isPartial() + operation.isAllowDiskUse() + } + + def 'should query with default values'() { + given: + def document = new Document('_id', 1) + getCollectionHelper().insertDocuments(new DocumentCodec(), document) + def operation = new FindOperation(getNamespace(), new DocumentCodec()) + + when: + def results = executeAndCollectBatchCursorResults(operation, async) + + then: + results == [document] + + where: + async << [true, false] + } + + def 'should apply filter'() { + given: + def document = new Document('_id', 1) + getCollectionHelper().insertDocuments(new DocumentCodec(), document, new Document()) + + when: + def results = executeAndCollectBatchCursorResults(operation, async) + + then: + results == [document] + + where: + [async, operation] << [ + [true, false], + [new FindOperation(getNamespace(), new DocumentCodec()) + .filter(new BsonDocument('_id', new BsonInt32(1)))] + ].combinations() + } + + def 'should apply sort'() { + given: + def documents = [new Document('_id', 3), new Document('_id', 1), new Document('_id', 2), new Document('_id', 5), + new Document('_id', 4)] + getCollectionHelper().insertDocuments(new DocumentCodec(), documents) + + + when: 'ascending' + def results = executeAndCollectBatchCursorResults(operation, async) + + then: + results == [new Document('_id', 1), new Document('_id', 2), new Document('_id', 3), new Document('_id', 4), new Document('_id', 5)] + + where: + [async, operation] << [ + [true, false], + [new FindOperation(getNamespace(), new DocumentCodec()) + .sort(new BsonDocument('_id', new BsonInt32(1)))] + ].combinations() + } + + def 'should apply projection'() { + given: + getCollectionHelper().insertDocuments(new DocumentCodec(), + new Document('x', 5).append('y', 10), new Document('_id', 1).append('x', 10)) + def operation = new FindOperation(getNamespace(), new DocumentCodec()) + .projection(new BsonDocument('_id', new BsonInt32(0)).append('x', new BsonInt32(1))) + + when: + def results = executeAndCollectBatchCursorResults(operation, async) + + then: + results == [new Document('x', 5), new Document('x', 10)] + + where: + async << [true, false] + } + + def 'should apply skip'() { + given: + def documents = [new Document('_id', 3), new Document('_id', 1), new Document('_id', 2), new Document('_id', 4), + new Document('_id', 5)] + getCollectionHelper().insertDocuments(new DocumentCodec(), documents) + + def operation = new FindOperation(getNamespace(), new DocumentCodec()) + .sort(new BsonDocument('_id', new BsonInt32(1))) + .skip(3) + + when: + def results = executeAndCollectBatchCursorResults(operation, async) + + then: + results == [new Document('_id', 4), new Document('_id', 5)] + + where: + async << [true, false] + } + + def 'should apply limit'() { + given: + def documents = [new Document('_id', 1), new Document('_id', 2), new Document('_id', 3), new Document('_id', 4), + new Document('_id', 5)] + getCollectionHelper().insertDocuments(new DocumentCodec(), documents) + + def operation = new FindOperation(getNamespace(), new DocumentCodec()) + .sort(new BsonDocument('_id', new BsonInt32(1))) + .limit(limit) + + when: + def results = executeAndCollectBatchCursorResults(operation, async) + + then: + results == [new Document('_id', 1), new Document('_id', 2), new Document('_id', 3)] + + where: + [async, limit] << [[true, false], [3, -3]].combinations() + } + + def 'should apply batch size'() { + given: + def documents = [new Document('_id', 1), new Document('_id', 2), new Document('_id', 3), new Document('_id', 4), + new Document('_id', 5)] + getCollectionHelper().insertDocuments(new DocumentCodec(), documents) + def operation = new FindOperation(getNamespace(), new DocumentCodec()) + .sort(new BsonDocument('_id', new BsonInt32(1))) + .batchSize(batchSize) + + when: + def cursor = execute(operation, async) + def firstBatch = { + if (async) { + def futureResultCallback = new FutureResultCallback() + cursor.next(futureResultCallback) + futureResultCallback.get() + } else { + cursor.next() + } + }() + def hasAnotherBatch = { + if (async) { + if (cursor.isClosed()) { + false + } else { + def futureResultCallback = new FutureResultCallback() + cursor.next(futureResultCallback) + futureResultCallback.get() != null + } + } else { + cursor.hasNext() + } + }() + + then: + firstBatch == [new Document('_id', 1), new Document('_id', 2), new Document('_id', 3)] + hasAnotherBatch == hasNext + + where: + batchSize | hasNext | async + 3 | true | true + 3 | true | false + -3 | false | true + -3 | false | false + } + + def 'should throw query exception'() { + given: + def operation = new FindOperation(getNamespace(), new DocumentCodec()) + .filter(new BsonDocument('x', new BsonDocument('$thisIsNotAnOperator', BsonBoolean.TRUE))) + + when: + execute(operation, async) + + then: + thrown(MongoQueryException) + + where: + async << [true, false] + } + + def '$max should limit items returned'() { + given: + (1..100).each { + collectionHelper.insertDocuments(new DocumentCodec(), new Document('x', 'y').append('count', it)) + } + collectionHelper.createIndex(new BsonDocument('count', new BsonInt32(1))) + def operation = new FindOperation(getNamespace(), new DocumentCodec()) + .max(new BsonDocument('count', new BsonInt32(11))) + .hint(new BsonDocument('count', new BsonInt32(1))) + + when: + def count = executeAndCollectBatchCursorResults(operation, async).size() + + then: + count == 10 + + where: + async << [true, false] + } + + def '$min should limit items returned'() { + given: + (1..100).each { + collectionHelper.insertDocuments(new DocumentCodec(), new Document('x', 'y').append('count', it)) + } + collectionHelper.createIndex(new BsonDocument('count', new BsonInt32(1))) + def operation = new FindOperation(getNamespace(), new DocumentCodec()) + .min(new BsonDocument('count', new BsonInt32(10))) + .hint(new BsonDocument('count', new BsonInt32(1))) + + when: + def count = executeAndCollectBatchCursorResults(operation, async).size() + + then: + count == 91 + + where: + async << [true, false] + } + + def '$returnKey should only return the field that was in an index used to perform the find'() { + given: + (1..13).each { + collectionHelper.insertDocuments(new DocumentCodec(), new Document('x', it)) + } + collectionHelper.createIndex(new BsonDocument('x', new BsonInt32(1))) + + def operation = new FindOperation(getNamespace(), new DocumentCodec()) + .filter(new BsonDocument('x', new BsonInt32(7))) + .returnKey(true) + + when: + def results = executeAndCollectBatchCursorResults(operation, async) + + then: + results == [new Document('x', 7)] + + where: + async << [true, false] + } + + def 'should apply $hint'() { + given: + def index = new BsonDocument('a', new BsonInt32(1)) + collectionHelper.createIndex(index) + + def operation = new FindOperation(getNamespace(), new DocumentCodec()) + .hint((BsonValue) hint) + .asExplainableOperation(null, new BsonDocumentCodec()) + + when: + def explainPlan = execute(operation, async) + + then: + assertEquals(index, TestOperationHelper.getKeyPattern(explainPlan)) + + where: + [async, hint] << [[true, false], [new BsonDocument('a', new BsonInt32(1)), + new BsonString('a_1')]].combinations() + } + + @IgnoreIf({ isSharded() }) + def 'should apply comment'() { + given: + def profileCollectionHelper = getCollectionHelper(new MongoNamespace(getDatabaseName(), 'system.profile')) + new CommandReadOperation<>(getDatabaseName(), new BsonDocument('profile', new BsonInt32(2)), + new BsonDocumentCodec()).execute(getBinding()) + def expectedComment = 'this is a comment' + def operation = new FindOperation(getNamespace(), new DocumentCodec()) + .comment(new BsonString(expectedComment)) + + when: + execute(operation, async) + + then: + Document profileDocument = profileCollectionHelper.find().get(0) + assertEquals(expectedComment, ((Document) profileDocument.get('command')).get('comment')) + + cleanup: + new CommandReadOperation<>(getDatabaseName(), new BsonDocument('profile', new BsonInt32(0)), + new BsonDocumentCodec()) + .execute(getBinding()) + profileCollectionHelper.drop() + + where: + async << [true, false] + } + + def 'should apply $showDiskLoc'() { + given: + String fieldName = '$recordId' + collectionHelper.insertDocuments(new BsonDocument()) + + def operation = new FindOperation(getNamespace(), new DocumentCodec()) + .showRecordId(true) + + when: + def result = executeAndCollectBatchCursorResults(operation, async).head() + + then: + result[fieldName] + + where: + async << [true, false] + } + + @IgnoreIf({ !ClusterFixture.isDiscoverableReplicaSet() }) + def 'should read from a secondary'() { + given: + collectionHelper.insertDocuments(new DocumentCodec(), new Document()) + def operation = new FindOperation(getNamespace(), new DocumentCodec()) + def syncBinding = new ClusterBinding(getCluster(), ReadPreference.secondary(), ReadConcern.DEFAULT, OPERATION_CONTEXT) + def asyncBinding = new AsyncClusterBinding(getAsyncCluster(), ReadPreference.secondary(), ReadConcern.DEFAULT, + OPERATION_CONTEXT) + + when: + def result = async ? executeAsync(operation, asyncBinding) : executeSync(operation, syncBinding) + + then: + result != null // if it didn't throw, the query was executed + + where: + async << [true, false] + } + + @IgnoreIf({ serverVersionLessThan(4, 4) || ClusterFixture.isStandalone() }) + def 'should read from a secondary when hedge is specified'() { + given: + def documents = [new Document('_id', 3), new Document('_id', 1), new Document('_id', 2), new Document('_id', 5), + new Document('_id', 4)] + collectionHelper.insertDocuments(new DocumentCodec(), documents) + def operation = new FindOperation(getNamespace(), new DocumentCodec()) + + when: + def hedgeOptions = isHedgeEnabled != null ? + ReadPreferenceHedgeOptions.builder().enabled(isHedgeEnabled as boolean).build() : null + def readPreference = ReadPreference.primaryPreferred().withHedgeOptions(hedgeOptions) + def syncBinding = new ClusterBinding(getCluster(), readPreference, ReadConcern.DEFAULT, OPERATION_CONTEXT) + def asyncBinding = new AsyncClusterBinding(getAsyncCluster(), readPreference, ReadConcern.DEFAULT, OPERATION_CONTEXT) + def cursor = async ? executeAsync(operation, asyncBinding) : executeSync(operation, syncBinding) + def firstBatch = { + if (async) { + def futureResultCallback = new FutureResultCallback() + cursor.next(futureResultCallback) + futureResultCallback.get() + } else { + cursor.next() + } + }() + + then: + firstBatch.size() == 5 + + where: + [async, isHedgeEnabled] << [[true, false], [null, false, true]].combinations() + } + + def 'should add read concern to command'() { + given: + def operationContext = OPERATION_CONTEXT.withSessionContext(sessionContext) + def binding = Stub(ReadBinding) + def source = Stub(ConnectionSource) + def connection = Mock(Connection) + binding.readPreference >> ReadPreference.primary() + binding.operationContext >> operationContext + binding.readConnectionSource >> source + source.connection >> connection + source.retain() >> source + source.operationContext >> operationContext + def commandDocument = new BsonDocument('find', new BsonString(getCollectionName())) + appendReadConcernToCommand(sessionContext, UNKNOWN_WIRE_VERSION, commandDocument) + + def operation = new FindOperation(getNamespace(), new DocumentCodec()) + + when: + operation.execute(binding) + + then: + _ * connection.description >> new ConnectionDescription(new ConnectionId(new ServerId(new ClusterId(), new ServerAddress())), + 6, STANDALONE, 1000, 100000, 100000, []) + 1 * connection.command(_, commandDocument, _, _, _, operationContext) >> + new BsonDocument('cursor', new BsonDocument('id', new BsonInt64(1)) + .append('ns', new BsonString(getNamespace().getFullName())) + .append('firstBatch', new BsonArrayWrapper([]))) + 1 * connection.release() + + where: + sessionContext << [ + Stub(SessionContext) { + isCausallyConsistent() >> true + getOperationTime() >> new BsonTimestamp(42, 0) + hasActiveTransaction() >> false + getReadConcern() >> ReadConcern.MAJORITY + } + ] + } + + def 'should add read concern to command asynchronously'() { + given: + def operationContext = OPERATION_CONTEXT.withSessionContext(sessionContext) + def binding = Stub(AsyncReadBinding) + def source = Stub(AsyncConnectionSource) + def connection = Mock(AsyncConnection) + binding.readPreference >> ReadPreference.primary() + binding.operationContext >> operationContext + binding.getReadConnectionSource(_) >> { it[0].onResult(source, null) } + source.operationContext >> operationContext + source.getConnection(_) >> { it[0].onResult(connection, null) } + source.retain() >> source + def commandDocument = new BsonDocument('find', new BsonString(getCollectionName())) + appendReadConcernToCommand(sessionContext, UNKNOWN_WIRE_VERSION, commandDocument) + + def operation = new FindOperation(getNamespace(), new DocumentCodec()) + + when: + executeAsync(operation, binding) + + then: + _ * connection.description >> new ConnectionDescription(new ConnectionId(new ServerId(new ClusterId(), new ServerAddress())), + 6, STANDALONE, 1000, 100000, 100000, []) + 1 * connection.commandAsync(_, commandDocument, _, _, _, operationContext, _) >> { + it.last().onResult(new BsonDocument('cursor', new BsonDocument('id', new BsonInt64(1)) + .append('ns', new BsonString(getNamespace().getFullName())) + .append('firstBatch', new BsonArrayWrapper([]))), null) + } + 1 * connection.release() + + where: + sessionContext << [ + Stub(SessionContext) { + isCausallyConsistent() >> true + getOperationTime() >> new BsonTimestamp(42, 0) + hasActiveTransaction() >> false + getReadConcern() >> ReadConcern.MAJORITY + } + ] + } + + def 'should add allowDiskUse to command if the server version >= 3.2'() { + given: + def operationContext = OPERATION_CONTEXT.withSessionContext(sessionContext) + def binding = Stub(ReadBinding) + def source = Stub(ConnectionSource) + def connection = Mock(Connection) + binding.readPreference >> ReadPreference.primary() + binding.readConnectionSource >> source + binding.operationContext >> operationContext + source.connection >> connection + source.retain() >> source + source.operationContext >> operationContext + def commandDocument = new BsonDocument('find', new BsonString(getCollectionName())).append('allowDiskUse', BsonBoolean.TRUE) + appendReadConcernToCommand(sessionContext, UNKNOWN_WIRE_VERSION, commandDocument) + + def operation = new FindOperation(getNamespace(), new DocumentCodec()).allowDiskUse(true) + + when: + operation.execute(binding) + + then: + _ * connection.description >> new ConnectionDescription(new ConnectionId(new ServerId(new ClusterId(), new ServerAddress())), + 6, STANDALONE, 1000, 100000, 100000, []) + 1 * connection.command(_, commandDocument, _, _, _, operationContext) >> + new BsonDocument('cursor', new BsonDocument('id', new BsonInt64(1)) + .append('ns', new BsonString(getNamespace().getFullName())) + .append('firstBatch', new BsonArrayWrapper([]))) + 1 * connection.release() + + where: + sessionContext << [ + Stub(SessionContext) { + isCausallyConsistent() >> true + getOperationTime() >> new BsonTimestamp(42, 0) + hasActiveTransaction() >> false + getReadConcern() >> ReadConcern.DEFAULT + } + ] + } + + def 'should add allowDiskUse to command if the server version >= 3.2 asynchronously'() { + given: + def operationContext = OPERATION_CONTEXT.withSessionContext(sessionContext) + def binding = Stub(AsyncReadBinding) + def source = Stub(AsyncConnectionSource) + def connection = Mock(AsyncConnection) + binding.operationContext >> operationContext + binding.readPreference >> ReadPreference.primary() + binding.getReadConnectionSource(_) >> { it[0].onResult(source, null) } + source.operationContext >> operationContext + source.getConnection(_) >> { it[0].onResult(connection, null) } + source.retain() >> source + def commandDocument = new BsonDocument('find', new BsonString(getCollectionName())).append('allowDiskUse', BsonBoolean.TRUE) + appendReadConcernToCommand(sessionContext, UNKNOWN_WIRE_VERSION, commandDocument) + + def operation = new FindOperation(getNamespace(), new DocumentCodec()).allowDiskUse(true) + + when: + executeAsync(operation, binding) + + then: + _ * connection.description >> new ConnectionDescription(new ConnectionId(new ServerId(new ClusterId(), new ServerAddress())), + 6, STANDALONE, 1000, 100000, 100000, []) + 1 * connection.commandAsync(_, commandDocument, _, _, _, operationContext, _) >> { + it.last().onResult(new BsonDocument('cursor', new BsonDocument('id', new BsonInt64(1)) + .append('ns', new BsonString(getNamespace().getFullName())) + .append('firstBatch', new BsonArrayWrapper([]))), null) + } + 1 * connection.release() + + where: + sessionContext << [ + Stub(SessionContext) { + isCausallyConsistent() >> true + getOperationTime() >> new BsonTimestamp(42, 0) + hasActiveTransaction() >> false + getReadConcern() >> ReadConcern.MAJORITY + } + ] + } + + // sanity check that the server accepts tailable and await data flags + def 'should pass tailable and await data flags through'() { + given: + def (cursorType, long maxAwaitTimeMS, long maxTimeMSForCursor) = cursorDetails + def timeoutSettings = ClusterFixture.TIMEOUT_SETTINGS_WITH_INFINITE_TIMEOUT.withMaxAwaitTimeMS(maxAwaitTimeMS) + def timeoutContext = Spy(TimeoutContext, constructorArgs: [timeoutSettings]) + def operationContext = OPERATION_CONTEXT.withTimeoutContext(timeoutContext) + + collectionHelper.create(getCollectionName(), new CreateCollectionOptions().capped(true).sizeInBytes(1000)) + def operation = new FindOperation(namespace, new BsonDocumentCodec()) + .cursorType(cursorType) + + when: + if (async) { + execute(operation, getBinding(operationContext)) + } else { + execute(operation, getAsyncBinding(operationContext)) + } + + then: + timeoutContext.setMaxTimeOverride(maxTimeMSForCursor) + + where: + [async, cursorDetails] << [ + [true, false], + [[NonTailable, 100, 0], [Tailable, 100, 0], [TailableAwait, 100, 100]] + ].combinations() + } + + // sanity check that the server accepts the miscallaneous flags + def 'should pass miscallaneous flags through'() { + given: + def operation = new FindOperation(namespace, new BsonDocumentCodec()) + .noCursorTimeout(true) + .partial(true) + + when: + execute(operation, async) + + then: + true + + where: + async << [true, false] + } + + def 'should support collation'() { + given: + def document = BsonDocument.parse('{_id: 1, str: "foo"}') + getCollectionHelper().insertDocuments(document) + def operation = new FindOperation(getNamespace(), new BsonDocumentCodec()) + .filter(BsonDocument.parse('{str: "FOO"}')) + .collation(caseInsensitiveCollation) + + when: + def result = executeAndCollectBatchCursorResults(operation, async) + + then: + result == [document] + + where: + async << [true, false] + } + + +} diff --git a/driver-core/src/test/functional/com/mongodb/internal/operation/ListCollectionsOperationSpecification.groovy b/driver-core/src/test/functional/com/mongodb/internal/operation/ListCollectionsOperationSpecification.groovy new file mode 100644 index 00000000000..0d2688e0da6 --- /dev/null +++ b/driver-core/src/test/functional/com/mongodb/internal/operation/ListCollectionsOperationSpecification.groovy @@ -0,0 +1,477 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.operation + +import com.mongodb.MongoNamespace +import com.mongodb.OperationFunctionalSpecification +import com.mongodb.ReadPreference +import com.mongodb.ServerAddress +import com.mongodb.ServerCursor +import com.mongodb.WriteConcern +import com.mongodb.async.FutureResultCallback +import com.mongodb.client.model.CreateCollectionOptions +import com.mongodb.connection.ConnectionDescription +import com.mongodb.internal.async.AsyncBatchCursor +import com.mongodb.internal.async.SingleResultCallback +import com.mongodb.internal.binding.AsyncConnectionSource +import com.mongodb.internal.binding.AsyncReadBinding +import com.mongodb.internal.binding.ConnectionSource +import com.mongodb.internal.binding.ReadBinding +import com.mongodb.internal.connection.AsyncConnection +import com.mongodb.internal.connection.Connection +import org.bson.BsonBoolean +import org.bson.BsonDocument +import org.bson.BsonDouble +import org.bson.BsonInt64 +import org.bson.BsonString +import org.bson.Document +import org.bson.codecs.Decoder +import org.bson.codecs.DocumentCodec + +import static com.mongodb.ClusterFixture.OPERATION_CONTEXT +import static com.mongodb.ClusterFixture.executeAsync +import static com.mongodb.ClusterFixture.getBinding + +class ListCollectionsOperationSpecification extends OperationFunctionalSpecification { + + def madeUpDatabase = 'MadeUpDatabase' + + def 'should return empty set if database does not exist'() { + given: + def operation = new ListCollectionsOperation(madeUpDatabase, new DocumentCodec()) + + when: + def cursor = operation.execute(getBinding()) + + then: + !cursor.hasNext() + + cleanup: + collectionHelper.dropDatabase(madeUpDatabase) + } + + + def 'should return empty cursor if database does not exist asynchronously'() { + given: + def operation = new ListCollectionsOperation(madeUpDatabase, new DocumentCodec()) + + when: + def cursor = executeAsync(operation) + def callback = new FutureResultCallback() + cursor.next(callback) + + then: + callback.get() == [] + + cleanup: + collectionHelper.dropDatabase(madeUpDatabase) + } + + def 'should return collection names if a collection exists'() { + given: + def operation = new ListCollectionsOperation(databaseName, new DocumentCodec()) + def helper = getCollectionHelper() + def helper2 = getCollectionHelper(new MongoNamespace(databaseName, 'collection2')) + def codec = new DocumentCodec() + helper.insertDocuments(codec, ['a': 1] as Document) + helper2.insertDocuments(codec, ['a': 1] as Document) + + when: + def cursor = operation.execute(getBinding()) + def collections = cursor.next() + def names = collections*.get('name') + + then: + names.containsAll([collectionName, 'collection2']) + !names.contains(null) + names.findAll { it.contains('$') }.isEmpty() + } + + def 'should filter collection names if a name filter is specified'() { + given: + def operation = new ListCollectionsOperation(databaseName, new DocumentCodec()) + .filter(new BsonDocument('name', new BsonString('collection2'))) + def helper = getCollectionHelper() + def helper2 = getCollectionHelper(new MongoNamespace(databaseName, 'collection2')) + def codec = new DocumentCodec() + helper.insertDocuments(codec, ['a': 1] as Document) + helper2.insertDocuments(codec, ['a': 1] as Document) + + when: + def cursor = operation.execute(getBinding()) + def collections = cursor.next() + def names = collections*.get('name') + + then: + names.contains('collection2') + !names.contains(collectionName) + } + + def 'should filter capped collections'() { + given: + def operation = new ListCollectionsOperation(databaseName, new DocumentCodec()) + .filter(new BsonDocument('options.capped', BsonBoolean.TRUE)) + def helper = getCollectionHelper() + getCollectionHelper().create('collection3', new CreateCollectionOptions().capped(true).sizeInBytes(1000)) + def codec = new DocumentCodec() + helper.insertDocuments(codec, ['a': 1] as Document) + + when: + def cursor = operation.execute(getBinding()) + def collections = cursor.next() + def names = collections*.get('name') + + then: + names.contains('collection3') + !names.contains(collectionName) + } + + def 'should only get collection names when nameOnly is requested'() { + given: + def operation = new ListCollectionsOperation(databaseName, new DocumentCodec()) + .nameOnly(true) + getCollectionHelper().create('collection5', new CreateCollectionOptions()) + + when: + def cursor = operation.execute(getBinding()) + def collection = cursor.next()[0] + + then: + collection.size() == 2 + } + + def 'should only get collection names when nameOnly and authorizedCollections are requested'() { + given: + def operation = new ListCollectionsOperation(databaseName, new DocumentCodec()) + .nameOnly(true) + .authorizedCollections(true) + getCollectionHelper().create('collection6', new CreateCollectionOptions()) + + when: + def cursor = operation.execute(getBinding()) + def collection = cursor.next()[0] + + then: + collection.size() == 2 + } + + def 'should get all fields when authorizedCollections is requested and nameOnly is not requested'() { + given: + def operation = new ListCollectionsOperation(databaseName, new DocumentCodec()) + .nameOnly(false) + .authorizedCollections(true) + getCollectionHelper().create('collection8', new CreateCollectionOptions()) + + when: + def cursor = operation.execute(getBinding()) + def collection = cursor.next()[0] + + then: + collection.size() > 2 + } + + def 'should return collection names if a collection exists asynchronously'() { + given: + def operation = new ListCollectionsOperation(databaseName, new DocumentCodec()) + def helper = getCollectionHelper() + def helper2 = getCollectionHelper(new MongoNamespace(databaseName, 'collection2')) + def codec = new DocumentCodec() + helper.insertDocuments(codec, ['a': 1] as Document) + helper2.insertDocuments(codec, ['a': 1] as Document) + + when: + def cursor = executeAsync(operation) + def callback = new FutureResultCallback() + cursor.next(callback) + def names = callback.get()*.get('name') + + then: + names.containsAll([collectionName, 'collection2']) + !names.contains(null) + names.findAll { it.contains('$') }.isEmpty() + } + + def 'should filter indexes when calling hasNext before next'() { + given: + new DropDatabaseOperation(databaseName, WriteConcern.ACKNOWLEDGED).execute(getBinding()) + addSeveralIndexes() + def operation = new ListCollectionsOperation(databaseName, new DocumentCodec()).batchSize(2) + + when: + def cursor = operation.execute(getBinding()) + + then: + cursor.hasNext() + cursor.hasNext() + cursorToListWithNext(cursor)*.get('name').contains(collectionName) + !cursor.hasNext() + } + + def 'should filter indexes without calling hasNext before next'() { + given: + new DropDatabaseOperation(databaseName, WriteConcern.ACKNOWLEDGED).execute(getBinding()) + addSeveralIndexes() + def operation = new ListCollectionsOperation(databaseName, new DocumentCodec()).batchSize(2) + + when: + def cursor = operation.execute(getBinding()) + def list = cursorToListWithNext(cursor) + + then: + list*.get('name').contains(collectionName) + list.findAll { collection -> collection.get('name').contains('$') } == [] + !cursor.hasNext() + + when: + cursor.next() + + then: + thrown(NoSuchElementException) + } + + def 'should filter indexes when calling hasNext before tryNext'() { + given: + new DropDatabaseOperation(databaseName, WriteConcern.ACKNOWLEDGED).execute(getBinding()) + addSeveralIndexes() + def operation = new ListCollectionsOperation(databaseName, new DocumentCodec()).batchSize(2) + + when: + def cursor = operation.execute(getBinding()) + + then: + cursor.hasNext() + cursor.hasNext() + + def list = cursorToListWithTryNext(cursor) + list*.get('name').contains(collectionName) + list.findAll { collection -> collection.get('name').contains('$') } == [] + + !cursor.hasNext() + !cursor.hasNext() + cursor.tryNext() == null + } + + def 'should filter indexes without calling hasNext before tryNext'() { + given: + new DropDatabaseOperation(databaseName, WriteConcern.ACKNOWLEDGED).execute(getBinding()) + addSeveralIndexes() + def operation = new ListCollectionsOperation(databaseName, new DocumentCodec()).batchSize(2) + + when: + def cursor = operation.execute(getBinding()) + def list = cursorToListWithTryNext(cursor) + + then: + list*.get('name').contains(collectionName) + list.findAll { collection -> collection.get('name').contains('$') } == [] + cursor.tryNext() == null + } + + + def 'should filter indexes asynchronously'() { + given: + new DropDatabaseOperation(databaseName, WriteConcern.ACKNOWLEDGED).execute(getBinding()) + addSeveralIndexes() + def operation = new ListCollectionsOperation(databaseName, new DocumentCodec()).batchSize(2) + + when: + def cursor = executeAsync(operation) + def list = asyncCursorToList(cursor) + + then: + list*.get('name').contains(collectionName) + list.findAll { collection -> collection.get('name').contains('$') } == [] + } + + def 'should use the set batchSize of collections'() { + given: + def operation = new ListCollectionsOperation(databaseName, new DocumentCodec()).batchSize(2) + def codec = new DocumentCodec() + getCollectionHelper().insertDocuments(codec, ['a': 1] as Document) + getCollectionHelper(new MongoNamespace(databaseName, 'collection2')).insertDocuments(codec, ['a': 1] as Document) + getCollectionHelper(new MongoNamespace(databaseName, 'collection3')).insertDocuments(codec, ['a': 1] as Document) + getCollectionHelper(new MongoNamespace(databaseName, 'collection4')).insertDocuments(codec, ['a': 1] as Document) + getCollectionHelper(new MongoNamespace(databaseName, 'collection5')).insertDocuments(codec, ['a': 1] as Document) + + when: + def cursor = operation.execute(getBinding()) + def collections = cursor.next() + + then: + collections.size() <= 2 // pre 3.0 items may be filtered out the batch by the driver + cursor.hasNext() + cursor.getBatchSize() == 2 + + when: + collections = cursor.next() + + then: + collections.size() <= 2 // pre 3.0 items may be filtered out the batch by the driver + cursor.hasNext() + cursor.getBatchSize() == 2 + + cleanup: + cursor?.close() + } + + + def 'should use the set batchSize of collections asynchronously'() { + given: + def operation = new ListCollectionsOperation(databaseName, new DocumentCodec()).batchSize(2) + def codec = new DocumentCodec() + getCollectionHelper().insertDocuments(codec, ['a': 1] as Document) + getCollectionHelper(new MongoNamespace(databaseName, 'collection2')).insertDocuments(codec, ['a': 1] as Document) + getCollectionHelper(new MongoNamespace(databaseName, 'collection3')).insertDocuments(codec, ['a': 1] as Document) + getCollectionHelper(new MongoNamespace(databaseName, 'collection4')).insertDocuments(codec, ['a': 1] as Document) + getCollectionHelper(new MongoNamespace(databaseName, 'collection5')).insertDocuments(codec, ['a': 1] as Document) + + when: + def cursor = executeAsync(operation) + def callback = new FutureResultCallback() + cursor.next(callback) + + then: + callback.get().size() <= 2 // pre 3.0 items may be filtered out the batch by the driver + cursor.getBatchSize() == 2 + + when: + callback = new FutureResultCallback() + cursor.next(callback) + + then: + callback.get().size() <= 2 // pre 3.0 items may be filtered out the batch by the driver + cursor.getBatchSize() == 2 + + cleanup: + cursor?.close() + } + + def 'should use the readPreference to set secondaryOk'() { + given: + def connection = Mock(Connection) + def connectionSource = Stub(ConnectionSource) { + getConnection() >> connection + getReadPreference() >> readPreference + getOperationContext() >> OPERATION_CONTEXT + } + def readBinding = Stub(ReadBinding) { + getReadConnectionSource() >> connectionSource + getReadPreference() >> readPreference + getOperationContext() >> OPERATION_CONTEXT + } + def operation = new ListCollectionsOperation(helper.dbName, helper.decoder) + + when: '3.6.0' + operation.execute(readBinding) + + then: + _ * connection.getDescription() >> helper.threeSixConnectionDescription + 1 * connection.command(_, _, _, readPreference, _, OPERATION_CONTEXT) >> helper.commandResult + 1 * connection.release() + + where: + readPreference << [ReadPreference.primary(), ReadPreference.secondary()] + } + + def 'should use the readPreference to set secondaryOk in async'() { + given: + def connection = Mock(AsyncConnection) + def connectionSource = Stub(AsyncConnectionSource) { + getConnection(_) >> { it[0].onResult(connection, null) } + getReadPreference() >> readPreference + getOperationContext() >> OPERATION_CONTEXT + } + def readBinding = Stub(AsyncReadBinding) { + getReadConnectionSource(_) >> { it[0].onResult(connectionSource, null) } + getReadPreference() >> readPreference + getOperationContext() >> OPERATION_CONTEXT + } + def operation = new ListCollectionsOperation(helper.dbName, helper.decoder) + + when: '3.6.0' + operation.executeAsync(readBinding, Stub(SingleResultCallback)) + + then: + _ * connection.getDescription() >> helper.threeSixConnectionDescription + 1 * connection.commandAsync(helper.dbName, _, _, readPreference, _, OPERATION_CONTEXT, *_) >> { + it.last().onResult(helper.commandResult, null) } + + where: + readPreference << [ReadPreference.primary(), ReadPreference.secondary()] + } + + def helper = [ + dbName: 'db', + decoder: Stub(Decoder), + threeSixConnectionDescription : Stub(ConnectionDescription) { + getMaxWireVersion() >> 3 + }, + queryResult: Stub(CommandCursorResult) { + getNamespace() >> new MongoNamespace('db', 'coll') + getResults() >> [] + getCursor() >> new ServerCursor(1, Stub(ServerAddress)) + }, + commandResult: new BsonDocument('ok', new BsonDouble(1.0)) + .append('cursor', new BsonDocument('id', new BsonInt64(1)).append('ns', new BsonString('db.coll')) + .append('firstBatch', new BsonArrayWrapper([]))) + ] + + private void addSeveralIndexes() { + getCollectionHelper().create(getCollectionName(), new CreateCollectionOptions()) + getCollectionHelper().createIndex(['a': 1] as Document) + getCollectionHelper().createIndex(['b': 1] as Document) + getCollectionHelper().createIndex(['c': 1] as Document) + getCollectionHelper().createIndex(['d': 1] as Document) + getCollectionHelper().createIndex(['e': 1] as Document) + getCollectionHelper().createIndex(['f': 1] as Document) + getCollectionHelper().createIndex(['g': 1] as Document) + } + + def cursorToListWithNext(BatchCursor cursor) { + def list = [] + while (cursor.hasNext()) { + list += cursor.next() + } + list + } + + def cursorToListWithTryNext(BatchCursor cursor) { + def list = [] + while (true) { + def next = cursor.tryNext() + if (next == null) { + break + } + list += next + } + list + } + + def asyncCursorToList(AsyncBatchCursor cursor) { + if (cursor.isClosed()) { + return [] + } + def callback = new FutureResultCallback() + cursor.next(callback) + def next = callback.get() + if (next == null) { + return [] + } + + next + asyncCursorToList(cursor) + } +} diff --git a/driver-core/src/test/functional/com/mongodb/internal/operation/ListDatabasesOperationSpecification.groovy b/driver-core/src/test/functional/com/mongodb/internal/operation/ListDatabasesOperationSpecification.groovy new file mode 100644 index 00000000000..740f9073dcd --- /dev/null +++ b/driver-core/src/test/functional/com/mongodb/internal/operation/ListDatabasesOperationSpecification.groovy @@ -0,0 +1,129 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.operation + + +import com.mongodb.OperationFunctionalSpecification +import com.mongodb.ReadPreference +import com.mongodb.connection.ConnectionDescription +import com.mongodb.internal.async.SingleResultCallback +import com.mongodb.internal.binding.AsyncConnectionSource +import com.mongodb.internal.binding.AsyncReadBinding +import com.mongodb.internal.binding.ConnectionSource +import com.mongodb.internal.binding.ReadBinding +import com.mongodb.internal.connection.AsyncConnection +import com.mongodb.internal.connection.Connection +import org.bson.BsonDocument +import org.bson.BsonRegularExpression +import org.bson.Document +import org.bson.codecs.Decoder +import org.bson.codecs.DocumentCodec + +import static com.mongodb.ClusterFixture.OPERATION_CONTEXT + +class ListDatabasesOperationSpecification extends OperationFunctionalSpecification { + def codec = new DocumentCodec() + + def 'should return a list of database names'() { + given: + getCollectionHelper().insertDocuments(new DocumentCodec(), new Document('_id', 1)) + def operation = new ListDatabasesOperation(codec) + + when: + def names = executeAndCollectBatchCursorResults(operation, async)*.get('name') + + then: + names.contains(getDatabaseName()) + + when: + operation = operation.nameOnly(true).filter(new BsonDocument('name', new BsonRegularExpression("^${getDatabaseName()}"))) + names = executeAndCollectBatchCursorResults(operation, async)*.get('name') + + then: + names.contains(getDatabaseName()) + + when: + operation = operation.authorizedDatabasesOnly(true).nameOnly(true) + .filter(new BsonDocument('name', new BsonRegularExpression("^${getDatabaseName()}"))) + names = executeAndCollectBatchCursorResults(operation, async)*.get('name') + + then: + names.contains(getDatabaseName()) + + where: + async << [true, false] + } + + def 'should use the readPreference to set secondaryOk'() { + given: + def connection = Mock(Connection) + def connectionSource = Stub(ConnectionSource) { + getConnection() >> connection + getReadPreference() >> readPreference + getOperationContext() >> OPERATION_CONTEXT + } + def readBinding = Stub(ReadBinding) { + getReadConnectionSource() >> connectionSource + getReadPreference() >> readPreference + getOperationContext() >> OPERATION_CONTEXT + } + def operation = new ListDatabasesOperation(helper.decoder) + + when: + operation.execute(readBinding) + + then: + _ * connection.getDescription() >> helper.connectionDescription + 1 * connection.command(_, _, _, readPreference, _, OPERATION_CONTEXT) >> helper.commandResult + 1 * connection.release() + + where: + readPreference << [ReadPreference.primary(), ReadPreference.secondary()] + } + + def 'should use the readPreference to set secondaryOk async'() { + given: + def connection = Mock(AsyncConnection) + def connectionSource = Stub(AsyncConnectionSource) { + getReadPreference() >> readPreference + getConnection(_) >> { it[0].onResult(connection, null) } + } + def readBinding = Stub(AsyncReadBinding) { + getReadPreference() >> readPreference + getReadConnectionSource(_) >> { it[0].onResult(connectionSource, null) } + } + def operation = new ListDatabasesOperation(helper.decoder) + + when: + operation.executeAsync(readBinding, Stub(SingleResultCallback)) + + then: + _ * connection.getDescription() >> helper.connectionDescription + 1 * connection.commandAsync(_, _, _, readPreference, *_) >> { it.last().onResult(helper.commandResult, null) } + 1 * connection.release() + + where: + readPreference << [ReadPreference.primary(), ReadPreference.secondary()] + } + + def helper = [ + decoder: Stub(Decoder), + commandResult: BsonDocument.parse('{ok: 1.0}').append('databases', new BsonArrayWrapper([])), + connectionDescription: Stub(ConnectionDescription) + ] + +} diff --git a/driver-core/src/test/functional/com/mongodb/internal/operation/ListIndexesOperationSpecification.groovy b/driver-core/src/test/functional/com/mongodb/internal/operation/ListIndexesOperationSpecification.groovy new file mode 100644 index 00000000000..462bf367e50 --- /dev/null +++ b/driver-core/src/test/functional/com/mongodb/internal/operation/ListIndexesOperationSpecification.groovy @@ -0,0 +1,277 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.operation + + +import com.mongodb.MongoNamespace +import com.mongodb.OperationFunctionalSpecification +import com.mongodb.ReadPreference +import com.mongodb.ServerAddress +import com.mongodb.ServerCursor +import com.mongodb.async.FutureResultCallback +import com.mongodb.connection.ConnectionDescription +import com.mongodb.internal.async.AsyncBatchCursor +import com.mongodb.internal.async.SingleResultCallback +import com.mongodb.internal.binding.AsyncConnectionSource +import com.mongodb.internal.binding.AsyncReadBinding +import com.mongodb.internal.binding.ConnectionSource +import com.mongodb.internal.binding.ReadBinding +import com.mongodb.internal.bulk.IndexRequest +import com.mongodb.internal.connection.AsyncConnection +import com.mongodb.internal.connection.Connection +import org.bson.BsonDocument +import org.bson.BsonDouble +import org.bson.BsonInt32 +import org.bson.BsonInt64 +import org.bson.BsonString +import org.bson.Document +import org.bson.codecs.Decoder +import org.bson.codecs.DocumentCodec + +import static com.mongodb.ClusterFixture.OPERATION_CONTEXT +import static com.mongodb.ClusterFixture.executeAsync +import static com.mongodb.ClusterFixture.getBinding + +class ListIndexesOperationSpecification extends OperationFunctionalSpecification { + + def 'should return empty list for nonexistent collection'() { + given: + def operation = new ListIndexesOperation(getNamespace(), new DocumentCodec()) + + when: + def cursor = operation.execute(getBinding()) + + then: + !cursor.hasNext() + } + + + def 'should return empty list for nonexistent collection asynchronously'() { + given: + def operation = new ListIndexesOperation(getNamespace(), new DocumentCodec()) + + when: + AsyncBatchCursor cursor = executeAsync(operation) + def callback = new FutureResultCallback() + cursor.next(callback) + + then: + callback.get() == [] + } + + + def 'should return default index on Collection that exists'() { + given: + def operation = new ListIndexesOperation(getNamespace(), new DocumentCodec()) + getCollectionHelper().insertDocuments(new DocumentCodec(), new Document('documentThat', 'forces creation of the Collection')) + + when: + BatchCursor indexes = operation.execute(getBinding()) + + then: + def firstBatch = indexes.next() + firstBatch.size() == 1 + firstBatch[0].name == '_id_' + !indexes.hasNext() + } + + + def 'should return default index on Collection that exists asynchronously'() { + given: + def operation = new ListIndexesOperation(getNamespace(), new DocumentCodec()) + getCollectionHelper().insertDocuments(new DocumentCodec(), new Document('documentThat', 'forces creation of the Collection')) + + when: + def cursor = executeAsync(operation) + def callback = new FutureResultCallback() + cursor.next(callback) + def indexes = callback.get() + + then: + indexes.size() == 1 + indexes[0].name == '_id_' + } + + def 'should return created indexes on Collection'() { + given: + def operation = new ListIndexesOperation(getNamespace(), new DocumentCodec()) + collectionHelper.createIndex(new BsonDocument('theField', new BsonInt32(1))) + collectionHelper.createIndex(new BsonDocument('compound', new BsonInt32(1)).append('index', new BsonInt32(-1))) + new CreateIndexesOperation(namespace, + [new IndexRequest(new BsonDocument('unique', new BsonInt32(1))).unique(true)], null).execute(getBinding()) + + when: + BatchCursor cursor = operation.execute(getBinding()) + + then: + def indexes = cursor.next() + indexes.size() == 4 + indexes*.name.containsAll(['_id_', 'theField_1', 'compound_1_index_-1', 'unique_1']) + indexes.find { it.name == 'unique_1' }.unique + !cursor.hasNext() + } + + + def 'should return created indexes on Collection asynchronously'() { + given: + def operation = new ListIndexesOperation(getNamespace(), new DocumentCodec()) + collectionHelper.createIndex(new BsonDocument('theField', new BsonInt32(1))) + collectionHelper.createIndex(new BsonDocument('compound', new BsonInt32(1)).append('index', new BsonInt32(-1))) + new CreateIndexesOperation(namespace, + [new IndexRequest(new BsonDocument('unique', new BsonInt32(1))).unique(true)], null).execute(getBinding()) + + when: + def cursor = executeAsync(operation) + def callback = new FutureResultCallback() + cursor.next(callback) + def indexes = callback.get() + + then: + indexes.size() == 4 + indexes*.name.containsAll(['_id_', 'theField_1', 'compound_1_index_-1', 'unique_1']) + indexes.find { it.name == 'unique_1' }.unique + } + + def 'should use the set batchSize of collections'() { + given: + def operation = new ListIndexesOperation(getNamespace(), new DocumentCodec()).batchSize(2) + collectionHelper.createIndex(new BsonDocument('collection1', new BsonInt32(1))) + collectionHelper.createIndex(new BsonDocument('collection2', new BsonInt32(1))) + collectionHelper.createIndex(new BsonDocument('collection3', new BsonInt32(1))) + collectionHelper.createIndex(new BsonDocument('collection4', new BsonInt32(1))) + collectionHelper.createIndex(new BsonDocument('collection5', new BsonInt32(1))) + + when: + def cursor = operation.execute(getBinding()) + def collections = cursor.next() + + then: + collections.size() <= 2 // pre 3.0 items may be filtered out the batch by the driver + cursor.hasNext() + cursor.getBatchSize() == 2 + + when: + collections = cursor.next() + + then: + collections.size() <= 2 // pre 3.0 items may be filtered out the batch by the driver + cursor.hasNext() + cursor.getBatchSize() == 2 + + cleanup: + cursor?.close() + } + + + def 'should use the set batchSize of collections asynchronously'() { + given: + def operation = new ListIndexesOperation(getNamespace(), new DocumentCodec()).batchSize(2) + collectionHelper.createIndex(new BsonDocument('collection1', new BsonInt32(1))) + collectionHelper.createIndex(new BsonDocument('collection2', new BsonInt32(1))) + collectionHelper.createIndex(new BsonDocument('collection3', new BsonInt32(1))) + collectionHelper.createIndex(new BsonDocument('collection4', new BsonInt32(1))) + collectionHelper.createIndex(new BsonDocument('collection5', new BsonInt32(1))) + + when: + def cursor = executeAsync(operation) + def callback = new FutureResultCallback() + cursor.next(callback) + + then: + callback.get().size() <= 2 // pre 3.0 items may be filtered out the batch by the driver + cursor.getBatchSize() == 2 + + when: + callback = new FutureResultCallback() + cursor.next(callback) + + then: + callback.get().size() <= 2 // pre 3.0 items may be filtered out the batch by the driver + cursor.getBatchSize() == 2 + + cleanup: + cursor?.close() + } + + def 'should use the readPreference to set secondaryOk'() { + given: + def connection = Mock(Connection) + def connectionSource = Stub(ConnectionSource) { + getConnection() >> connection + getReadPreference() >> readPreference + getOperationContext() >> OPERATION_CONTEXT + } + def readBinding = Stub(ReadBinding) { + getReadConnectionSource() >> connectionSource + getReadPreference() >> readPreference + getOperationContext() >> OPERATION_CONTEXT + } + def operation = new ListIndexesOperation(helper.namespace, helper.decoder) + + when: '3.6.0' + operation.execute(readBinding) + + then: + _ * connection.getDescription() >> helper.threeSixConnectionDescription + 1 * connection.command(_, _, _, readPreference, _, OPERATION_CONTEXT) >> helper.commandResult + 1 * connection.release() + + where: + readPreference << [ReadPreference.primary(), ReadPreference.secondary()] + } + + def 'should use the readPreference to set secondaryOk async'() { + given: + def connection = Mock(AsyncConnection) + def connectionSource = Stub(AsyncConnectionSource) { + getReadPreference() >> readPreference + getConnection(_) >> { it[0].onResult(connection, null) } + } + def readBinding = Stub(AsyncReadBinding) { + getReadPreference() >> readPreference + getReadConnectionSource(_) >> { it[0].onResult(connectionSource, null) } + } + def operation = new ListIndexesOperation(helper.namespace, helper.decoder) + + when: '3.6.0' + operation.executeAsync(readBinding, Stub(SingleResultCallback)) + + then: + _ * connection.getDescription() >> helper.threeSixConnectionDescription + 1 * connection.commandAsync(helper.dbName, _, _, readPreference, *_) >> { it.last().onResult(helper.commandResult, null) } + + where: + readPreference << [ReadPreference.primary(), ReadPreference.secondary()] + } + + def helper = [ + dbName: 'db', + namespace: new MongoNamespace('db', 'coll'), + decoder: Stub(Decoder), + threeSixConnectionDescription : Stub(ConnectionDescription) { + getMaxWireVersion() >> 3 + }, + queryResult: Stub(CommandCursorResult) { + getNamespace() >> new MongoNamespace('db', 'coll') + getResults() >> [] + getCursor() >> new ServerCursor(1, Stub(ServerAddress)) + }, + commandResult: new BsonDocument('ok', new BsonDouble(1.0)) + .append('cursor', new BsonDocument('id', new BsonInt64(1)).append('ns', new BsonString('db.coll')) + .append('firstBatch', new BsonArrayWrapper([]))) + ] +} diff --git a/driver-core/src/test/functional/com/mongodb/internal/operation/MapReduceToCollectionOperationSpecification.groovy b/driver-core/src/test/functional/com/mongodb/internal/operation/MapReduceToCollectionOperationSpecification.groovy new file mode 100644 index 00000000000..0f48042da47 --- /dev/null +++ b/driver-core/src/test/functional/com/mongodb/internal/operation/MapReduceToCollectionOperationSpecification.groovy @@ -0,0 +1,322 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.operation + +import com.mongodb.MongoCommandException +import com.mongodb.MongoNamespace +import com.mongodb.MongoWriteConcernException +import com.mongodb.OperationFunctionalSpecification +import com.mongodb.ReadPreference +import com.mongodb.WriteConcern +import com.mongodb.client.model.CreateCollectionOptions +import com.mongodb.client.model.ValidationOptions +import com.mongodb.client.test.CollectionHelper +import org.bson.BsonBoolean +import org.bson.BsonDocument +import org.bson.BsonDouble +import org.bson.BsonInt32 +import org.bson.BsonJavaScript +import org.bson.BsonString +import org.bson.Document +import org.bson.codecs.BsonDocumentCodec +import org.bson.codecs.DocumentCodec +import spock.lang.IgnoreIf + +import static com.mongodb.ClusterFixture.getBinding +import static com.mongodb.ClusterFixture.isDiscoverableReplicaSet +import static com.mongodb.ClusterFixture.serverVersionAtLeast +import static com.mongodb.ClusterFixture.serverVersionLessThan +import static com.mongodb.client.model.Filters.gte + +class MapReduceToCollectionOperationSpecification extends OperationFunctionalSpecification { + def mapReduceInputNamespace = new MongoNamespace(getDatabaseName(), 'mapReduceInput') + def mapReduceOutputNamespace = new MongoNamespace(getDatabaseName(), 'mapReduceOutput') + def mapReduceOperation = new MapReduceToCollectionOperation(mapReduceInputNamespace, + new BsonJavaScript('function(){ emit( this.name , 1 ); }'), + new BsonJavaScript('function(key, values){ return values.length; }'), + mapReduceOutputNamespace.getCollectionName(), null) + def expectedResults = [new BsonDocument('_id', new BsonString('Pete')).append('value', new BsonDouble(2.0)), + new BsonDocument('_id', new BsonString('Sam')).append('value', new BsonDouble(1.0))] as Set + def helper = new CollectionHelper(new BsonDocumentCodec(), mapReduceOutputNamespace) + + def setup() { + CollectionHelper helper = new CollectionHelper(new DocumentCodec(), mapReduceInputNamespace) + Document pete = new Document('name', 'Pete').append('job', 'handyman') + Document sam = new Document('name', 'Sam').append('job', 'plumber') + Document pete2 = new Document('name', 'Pete').append('job', 'electrician') + helper.insertDocuments(new DocumentCodec(), pete, sam, pete2) + } + + def cleanup() { + new DropCollectionOperation(mapReduceInputNamespace, WriteConcern.ACKNOWLEDGED).execute(getBinding()) + new DropCollectionOperation(mapReduceOutputNamespace, WriteConcern.ACKNOWLEDGED) + .execute(getBinding()) + } + + def 'should have the correct defaults'() { + given: + def mapF = new BsonJavaScript('function(){ emit( "level" , 1 ); }') + def reduceF = new BsonJavaScript('function(key, values){ return values.length; }') + def out = 'outCollection' + + when: + def operation = new MapReduceToCollectionOperation(getNamespace(), mapF, reduceF, out, null) + + then: + operation.getMapFunction() == mapF + operation.getReduceFunction() == reduceF + operation.getAction() == 'replace' + operation.getCollectionName() == out + operation.getWriteConcern() == null + operation.getDatabaseName() == null + operation.getFilter() == null + operation.getFinalizeFunction() == null + operation.getLimit() == 0 + operation.getScope() == null + operation.getSort() == null + operation.getBypassDocumentValidation() == null + operation.getCollation() == null + !operation.isJsMode() + !operation.isVerbose() + } + + def 'should set optional values correctly'(){ + given: + def mapF = new BsonJavaScript('function(){ emit( "level" , 1 ); }') + def reduceF = new BsonJavaScript('function(key, values){ return values.length; }') + def finalizeF = new BsonJavaScript('function(key, value) { return value }') + def filter = BsonDocument.parse('{level: {$gte: 5}}') + def sort = BsonDocument.parse('{level: 1}') + def scope = BsonDocument.parse('{level: 1}') + def out = 'outCollection' + def action = 'merge' + def dbName = 'dbName' + def writeConcern = WriteConcern.MAJORITY + + when: + def operation = new MapReduceToCollectionOperation(getNamespace(), mapF, reduceF, out, writeConcern) + .action(action) + .databaseName(dbName) + .finalizeFunction(finalizeF) + .filter(filter) + .limit(10) + .scope(scope) + .sort(sort) + .bypassDocumentValidation(true) + .collation(defaultCollation) + + then: + operation.getMapFunction() == mapF + operation.getReduceFunction() == reduceF + operation.getAction() == action + operation.getCollectionName() == out + operation.getWriteConcern() == writeConcern + operation.getDatabaseName() == dbName + operation.getFilter() == filter + operation.getLimit() == 10 + operation.getScope() == scope + operation.getSort() == sort + operation.getBypassDocumentValidation() == true + operation.getCollation() == defaultCollation + } + + @IgnoreIf({ serverVersionAtLeast(4, 4) }) + def 'should return the correct statistics and save the results'() { + when: + MapReduceStatistics results = execute(mapReduceOperation, async) + + then: + results.emitCount == 3 + results.inputCount == 3 + results.outputCount == 2 + helper.count() == 2 + helper.find() as Set == expectedResults + + where: + async << [true, false] + } + + @IgnoreIf({ serverVersionLessThan(4, 4) }) + def 'should return zero-valued statistics and save the results'() { + when: + MapReduceStatistics results = execute(mapReduceOperation, async) + + then: + results.emitCount == 0 + results.inputCount == 0 + results.outputCount == 0 + helper.count() == 2 + helper.find() as Set == expectedResults + + where: + async << [true, false] + } + + + def 'should support bypassDocumentValidation'() { + given: + def collectionOutHelper = getCollectionHelper(new MongoNamespace(getDatabaseName(), 'collectionOut')) + collectionOutHelper.create('collectionOut', new CreateCollectionOptions().validationOptions( + new ValidationOptions().validator(gte('level', 10)))) + getCollectionHelper().insertDocuments(new BsonDocument()) + + when: + def operation = new MapReduceToCollectionOperation(mapReduceInputNamespace, + new BsonJavaScript('function(){ emit( "level" , 1 ); }'), + new BsonJavaScript('function(key, values){ return values.length; }'), + 'collectionOut', null) + execute(operation, async) + + then: + thrown(MongoCommandException) + + when: + operation.bypassDocumentValidation(false) + execute(operation, async) + + then: + thrown(MongoCommandException) + + when: + operation.bypassDocumentValidation(true) + execute(operation, async) + + then: + notThrown(MongoCommandException) + + cleanup: + collectionOutHelper?.drop() + + where: + async << [true, false] + } + + @IgnoreIf({ !isDiscoverableReplicaSet() }) + def 'should throw on write concern error'() { + given: + getCollectionHelper().insertDocuments(new BsonDocument()) + def operation = new MapReduceToCollectionOperation(mapReduceInputNamespace, + new BsonJavaScript('function(){ emit( "level" , 1 ); }'), + new BsonJavaScript('function(key, values){ return values.length; }'), + 'collectionOut', new WriteConcern(5)) + + when: + execute(operation, async) + + then: + def ex = thrown(MongoWriteConcernException) + ex.writeConcernError.code == 100 + ex.writeResult.wasAcknowledged() + + where: + async << [true, false] + } + + def 'should create the expected command'() { + given: + def cannedResults = BsonDocument.parse('''{result : "outCollection", timeMillis: 11, + counts: {input: 3, emit: 3, reduce: 1, output: 2 }, ok: 1.0 }''') + def mapF = new BsonJavaScript('function(){ emit( "level" , 1 ); }') + def reduceF = new BsonJavaScript('function(key, values){ return values.length; }') + def finalizeF = new BsonJavaScript('function(key, value) { return value }') + def filter = BsonDocument.parse('{level: {$gte: 5}}') + def sort = BsonDocument.parse('{level: 1}') + def scope = BsonDocument.parse('{level: 1}') + def out = 'outCollection' + def action = 'merge' + def dbName = 'dbName' + + when: + def operation = new MapReduceToCollectionOperation(getNamespace(), mapF, reduceF, out, + WriteConcern.MAJORITY) + def expectedCommand = new BsonDocument('mapReduce', new BsonString(getCollectionName())) + .append('map', mapF) + .append('reduce', reduceF) + .append('out', BsonDocument.parse('{replace: "outCollection"}')) + + if (includeWriteConcern) { + expectedCommand.append('writeConcern', WriteConcern.MAJORITY.asDocument()) + } + + then: + testOperation(operation, serverVersion, expectedCommand, async, cannedResults, true, false, + ReadPreference.primary(), false) + + when: + operation = new MapReduceToCollectionOperation(getNamespace(), mapF, reduceF, out, + WriteConcern.MAJORITY) + .action(action) + .databaseName(dbName) + .finalizeFunction(finalizeF) + .filter(filter) + .limit(10) + .scope(scope) + .sort(sort) + .bypassDocumentValidation(true) + .verbose(true) + + expectedCommand.append('out', BsonDocument.parse('{merge: "outCollection", db: "dbName"}')) + .append('query', filter) + .append('sort', sort) + .append('finalize', finalizeF) + .append('scope', scope) + .append('verbose', BsonBoolean.TRUE) + .append('limit', new BsonInt32(10)) + + if (includeCollation) { + operation.collation(defaultCollation) + expectedCommand.append('collation', defaultCollation.asDocument()) + } + if (includeBypassValidation) { + expectedCommand.append('bypassDocumentValidation', BsonBoolean.TRUE) + } + + then: + testOperation(operation, serverVersion, expectedCommand, async, cannedResults, true, false, + ReadPreference.primary(), false) + + where: + serverVersion | includeBypassValidation | includeWriteConcern | includeCollation | async + [3, 4, 0] | true | true | true | true + [3, 4, 0] | true | true | true | false + } + + def 'should support collation'() { + given: + def outCollectionHelper = getCollectionHelper(new MongoNamespace(mapReduceInputNamespace.getDatabaseName(), 'collectionOut')) + outCollectionHelper.drop() + + def document = Document.parse('{_id: 1, str: "foo"}') + getCollectionHelper(mapReduceInputNamespace).insertDocuments(document) + def operation = new MapReduceToCollectionOperation(mapReduceInputNamespace, + new BsonJavaScript('function(){ emit( this._id, this.str ); }'), + new BsonJavaScript('function(key, values){ return values; }'), + 'collectionOut', null) + .filter(BsonDocument.parse('{str: "FOO"}')) + .collation(caseInsensitiveCollation) + + when: + execute(operation, async) + + then: + outCollectionHelper.count() == 1 + + where: + async << [true, false] + } + +} diff --git a/driver-core/src/test/functional/com/mongodb/internal/operation/MapReduceWithInlineResultsOperationSpecification.groovy b/driver-core/src/test/functional/com/mongodb/internal/operation/MapReduceWithInlineResultsOperationSpecification.groovy new file mode 100644 index 00000000000..17b3c28f637 --- /dev/null +++ b/driver-core/src/test/functional/com/mongodb/internal/operation/MapReduceWithInlineResultsOperationSpecification.groovy @@ -0,0 +1,321 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.operation + +import com.mongodb.MongoNamespace +import com.mongodb.OperationFunctionalSpecification +import com.mongodb.ReadConcern +import com.mongodb.ReadPreference +import com.mongodb.ServerAddress +import com.mongodb.client.test.CollectionHelper +import com.mongodb.connection.ClusterId +import com.mongodb.connection.ConnectionDescription +import com.mongodb.connection.ConnectionId +import com.mongodb.connection.ServerId +import com.mongodb.internal.binding.AsyncConnectionSource +import com.mongodb.internal.binding.AsyncReadBinding +import com.mongodb.internal.binding.ConnectionSource +import com.mongodb.internal.binding.ReadBinding +import com.mongodb.internal.connection.AsyncConnection +import com.mongodb.internal.connection.Connection +import com.mongodb.internal.session.SessionContext +import org.bson.BsonBoolean +import org.bson.BsonDocument +import org.bson.BsonDouble +import org.bson.BsonInt32 +import org.bson.BsonJavaScript +import org.bson.BsonString +import org.bson.BsonTimestamp +import org.bson.Document +import org.bson.codecs.BsonDocumentCodec +import org.bson.codecs.DocumentCodec + +import static com.mongodb.ClusterFixture.OPERATION_CONTEXT +import static com.mongodb.ClusterFixture.executeAsync +import static com.mongodb.connection.ServerType.STANDALONE +import static com.mongodb.internal.operation.OperationReadConcernHelper.appendReadConcernToCommand +import static com.mongodb.internal.operation.ServerVersionHelper.UNKNOWN_WIRE_VERSION + +class MapReduceWithInlineResultsOperationSpecification extends OperationFunctionalSpecification { + private final bsonDocumentCodec = new BsonDocumentCodec() + def mapReduceOperation = new MapReduceWithInlineResultsOperation(getNamespace(), + new BsonJavaScript('function(){ emit( this.name , 1 ); }'), + new BsonJavaScript('function(key, values){ return values.length; }'), + bsonDocumentCodec) + + def expectedResults = [new BsonDocument('_id', new BsonString('Pete')).append('value', new BsonDouble(2.0)), + new BsonDocument('_id', new BsonString('Sam')).append('value', new BsonDouble(1.0))] as Set + + def setup() { + CollectionHelper helper = new CollectionHelper(bsonDocumentCodec, getNamespace()) + Document pete = new Document('name', 'Pete').append('job', 'handyman') + Document sam = new Document('name', 'Sam').append('job', 'plumber') + Document pete2 = new Document('name', 'Pete').append('job', 'electrician') + helper.insertDocuments(new DocumentCodec(), pete, sam, pete2) + } + + def 'should have the correct defaults'() { + when: + def mapF = new BsonJavaScript('function(){ }') + def reduceF = new BsonJavaScript('function(key, values){ }') + def operation = new MapReduceWithInlineResultsOperation(helper.namespace, mapF, reduceF, + bsonDocumentCodec) + + then: + operation.getMapFunction() == mapF + operation.getReduceFunction() == reduceF + operation.getFilter() == null + operation.getFinalizeFunction() == null + operation.getScope() == null + operation.getSort() == null + operation.getLimit() == 0 + operation.getCollation() == null + !operation.isJsMode() + !operation.isVerbose() + } + + def 'should set optional values correctly'(){ + when: + def filter = new BsonDocument('filter', new BsonInt32(1)) + def scope = new BsonDocument('scope', new BsonInt32(1)) + def sort = new BsonDocument('sort', new BsonInt32(1)) + def finalizeF = new BsonJavaScript('function(key, value){}') + def mapF = new BsonJavaScript('function(){ }') + def reduceF = new BsonJavaScript('function(key, values){ }') + def operation = new MapReduceWithInlineResultsOperation(helper.namespace, + mapF, reduceF, bsonDocumentCodec) + .filter(filter) + .finalizeFunction(finalizeF) + .scope(scope) + .sort(sort) + .jsMode(true) + .verbose(true) + .limit(20) + .collation(defaultCollation) + + then: + operation.getMapFunction() == mapF + operation.getReduceFunction() == reduceF + operation.getFilter() == filter + operation.getFinalizeFunction() == finalizeF + operation.getScope() == scope + operation.getSort() == sort + operation.getLimit() == 20 + operation.getCollation() == defaultCollation + operation.isJsMode() + operation.isVerbose() + } + + def 'should return the correct results'() { + given: + def operation = mapReduceOperation + + when: + def results = executeAndCollectBatchCursorResults(operation, async) as Set + + then: + results == expectedResults + + where: + async << [true, false] + } + + def 'should use the ReadBindings readPreference to set secondaryOk'() { + when: + def operation = new MapReduceWithInlineResultsOperation(helper.namespace, + new BsonJavaScript('function(){ }'), new BsonJavaScript('function(key, values){ }'), bsonDocumentCodec) + + then: + testOperationSecondaryOk(operation, [3, 4, 0], readPreference, async, helper.commandResult) + + where: + [async, readPreference] << [[true, false], [ReadPreference.primary(), ReadPreference.secondary()]].combinations() + } + + def 'should create the expected command'() { + when: + def operation = new MapReduceWithInlineResultsOperation(helper.namespace, + new BsonJavaScript('function(){ }'), new BsonJavaScript('function(key, values){ }'), bsonDocumentCodec) + def expectedCommand = new BsonDocument('mapReduce', new BsonString(helper.namespace.getCollectionName())) + .append('map', operation.getMapFunction()) + .append('reduce', operation.getReduceFunction()) + .append('out', new BsonDocument('inline', new BsonInt32(1))) + + then: + testOperation(operation, serverVersion, expectedCommand, async, helper.commandResult) + + when: + operation.filter(new BsonDocument('filter', new BsonInt32(1))) + .scope(new BsonDocument('scope', new BsonInt32(1))) + .sort(new BsonDocument('sort', new BsonInt32(1))) + .finalizeFunction(new BsonJavaScript('function(key, value){}')) + .jsMode(true) + .verbose(true) + .limit(20) + + + expectedCommand.append('query', operation.getFilter()) + .append('scope', operation.getScope()) + .append('sort', operation.getSort()) + .append('finalize', operation.getFinalizeFunction()) + .append('jsMode', BsonBoolean.TRUE) + .append('verbose', BsonBoolean.TRUE) + .append('limit', new BsonInt32(20)) + + if (includeCollation) { + operation.collation(defaultCollation) + expectedCommand.append('collation', defaultCollation.asDocument()) + } + + then: + testOperation(operation, serverVersion, expectedCommand, async, helper.commandResult) + + where: + serverVersion | includeCollation | async + [3, 4, 0] | true | true + [3, 4, 0] | true | false + [3, 0, 0] | false | true + [3, 0, 0] | false | false + } + + def 'should support collation'() { + given: + def document = Document.parse('{_id: 1, str: "foo"}') + getCollectionHelper().insertDocuments(document) + def operation = new MapReduceWithInlineResultsOperation(namespace, + new BsonJavaScript('function(){ emit( this.str, 1 ); }'), + new BsonJavaScript('function(key, values){ return Array.sum(values); }'), + bsonDocumentCodec) + .filter(BsonDocument.parse('{str: "FOO"}')) + .collation(caseInsensitiveCollation) + + when: + def results = executeAndCollectBatchCursorResults(operation, async) + + then: + results == [new BsonDocument('_id', new BsonString('foo')).append('value', new BsonDouble(1))] + + where: + async << [true, false] + } + + def 'should add read concern to command'() { + given: + def operationContext = OPERATION_CONTEXT.withSessionContext(sessionContext) + def binding = Stub(ReadBinding) + def source = Stub(ConnectionSource) + def connection = Mock(Connection) + binding.readPreference >> ReadPreference.primary() + binding.operationContext >> operationContext + binding.readConnectionSource >> source + source.connection >> connection + source.retain() >> source + source.operationContext >> operationContext + def commandDocument = BsonDocument.parse(''' + { "mapReduce" : "coll", + "map" : { "$code" : "function(){ }" }, + "reduce" : { "$code" : "function(key, values){ }" }, + "out" : { "inline" : 1 }, + }''') + appendReadConcernToCommand(sessionContext, UNKNOWN_WIRE_VERSION, commandDocument) + + def operation = new MapReduceWithInlineResultsOperation(helper.namespace, + new BsonJavaScript('function(){ }'), new BsonJavaScript('function(key, values){ }'), bsonDocumentCodec) + + when: + operation.execute(binding) + + then: + _ * connection.description >> new ConnectionDescription(new ConnectionId(new ServerId(new ClusterId(), new ServerAddress())), + 6, STANDALONE, 1000, 100000, 100000, []) + 1 * connection.command(_, commandDocument, _, _, _, operationContext) >> + new BsonDocument('results', new BsonArrayWrapper([])) + .append('counts', + new BsonDocument('input', new BsonInt32(0)) + .append('output', new BsonInt32(0)) + .append('emit', new BsonInt32(0))) + .append('timeMillis', new BsonInt32(0)) + 1 * connection.release() + + where: + sessionContext << [ + Stub(SessionContext) { + isCausallyConsistent() >> true + getOperationTime() >> new BsonTimestamp(42, 0) + hasActiveTransaction() >> false + getReadConcern() >> ReadConcern.MAJORITY + } + ] + } + + def 'should add read concern to command asynchronously'() { + given: + def operationContext = OPERATION_CONTEXT.withSessionContext(sessionContext) + def binding = Stub(AsyncReadBinding) + def source = Stub(AsyncConnectionSource) + def connection = Mock(AsyncConnection) + binding.readPreference >> ReadPreference.primary() + binding.operationContext >> operationContext + binding.getReadConnectionSource(_) >> { it[0].onResult(source, null) } + source.operationContext >> operationContext + source.getConnection(_) >> { it[0].onResult(connection, null) } + source.retain() >> source + def commandDocument = BsonDocument.parse(''' + { "mapReduce" : "coll", + "map" : { "$code" : "function(){ }" }, + "reduce" : { "$code" : "function(key, values){ }" }, + "out" : { "inline" : 1 }, + }''') + appendReadConcernToCommand(sessionContext, UNKNOWN_WIRE_VERSION, commandDocument) + + def operation = new MapReduceWithInlineResultsOperation(helper.namespace, + new BsonJavaScript('function(){ }'), new BsonJavaScript('function(key, values){ }'), bsonDocumentCodec) + + when: + executeAsync(operation, binding) + + then: + _ * connection.description >> new ConnectionDescription(new ConnectionId(new ServerId(new ClusterId(), new ServerAddress())), + 6, STANDALONE, 1000, 100000, 100000, []) + 1 * connection.commandAsync(_, commandDocument, _, _, _, operationContext, _) >> { + it.last().onResult(new BsonDocument('results', new BsonArrayWrapper([])) + .append('counts', + new BsonDocument('input', new BsonInt32(0)) + .append('output', new BsonInt32(0)) + .append('emit', new BsonInt32(0))) + .append('timeMillis', new BsonInt32(0)), + null) + } + 1 * connection.release() + + where: + sessionContext << [ + Stub(SessionContext) { + isCausallyConsistent() >> true + getOperationTime() >> new BsonTimestamp(42, 0) + hasActiveTransaction() >> false + getReadConcern() >> ReadConcern.MAJORITY + } + ] + } + + def helper = [ + namespace: new MongoNamespace('db', 'coll'), + commandResult: BsonDocument.parse('{ok: 1.0, counts: {input: 1, emit: 1, output: 1}, timeMillis: 1}') + .append('results', new BsonArrayWrapper([])) + ] +} diff --git a/driver-core/src/test/functional/com/mongodb/internal/operation/MixedBulkWriteOperationSpecification.groovy b/driver-core/src/test/functional/com/mongodb/internal/operation/MixedBulkWriteOperationSpecification.groovy new file mode 100644 index 00000000000..619eb6747f7 --- /dev/null +++ b/driver-core/src/test/functional/com/mongodb/internal/operation/MixedBulkWriteOperationSpecification.groovy @@ -0,0 +1,1253 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.operation + +import com.mongodb.MongoBulkWriteException +import com.mongodb.MongoClientException +import com.mongodb.MongoNamespace +import com.mongodb.MongoSocketException +import com.mongodb.MongoSocketReadException +import com.mongodb.OperationFunctionalSpecification +import com.mongodb.ServerAddress +import com.mongodb.WriteConcern +import com.mongodb.bulk.BulkWriteInsert +import com.mongodb.bulk.BulkWriteResult +import com.mongodb.bulk.BulkWriteUpsert +import com.mongodb.client.model.CreateCollectionOptions +import com.mongodb.client.model.ValidationOptions +import com.mongodb.internal.bulk.DeleteRequest +import com.mongodb.internal.bulk.InsertRequest +import com.mongodb.internal.bulk.UpdateRequest +import com.mongodb.internal.bulk.WriteRequest +import com.mongodb.spock.Slow +import org.bson.BsonBinary +import org.bson.BsonBoolean +import org.bson.BsonDocument +import org.bson.BsonInt32 +import org.bson.BsonInt64 +import org.bson.BsonObjectId +import org.bson.BsonString +import org.bson.Document +import org.bson.RawBsonDocument +import org.bson.codecs.BsonDocumentCodec +import org.bson.codecs.DocumentCodec +import org.bson.types.ObjectId +import spock.lang.IgnoreIf + +import static com.mongodb.ClusterFixture.configureFailPoint +import static com.mongodb.ClusterFixture.disableFailPoint +import static com.mongodb.ClusterFixture.disableOnPrimaryTransactionalWriteFailPoint +import static com.mongodb.ClusterFixture.enableOnPrimaryTransactionalWriteFailPoint +import static com.mongodb.ClusterFixture.getAsyncSingleConnectionBinding +import static com.mongodb.ClusterFixture.getSingleConnectionBinding +import static com.mongodb.ClusterFixture.isDiscoverableReplicaSet +import static com.mongodb.ClusterFixture.serverVersionLessThan +import static com.mongodb.WriteConcern.ACKNOWLEDGED +import static com.mongodb.WriteConcern.UNACKNOWLEDGED +import static com.mongodb.client.model.Filters.eq +import static com.mongodb.client.model.Filters.gte +import static com.mongodb.connection.ServerType.REPLICA_SET_PRIMARY +import static com.mongodb.connection.ServerType.STANDALONE +import static com.mongodb.internal.bulk.WriteRequest.Type.DELETE +import static com.mongodb.internal.bulk.WriteRequest.Type.REPLACE +import static com.mongodb.internal.bulk.WriteRequest.Type.UPDATE + +@SuppressWarnings('ClassSize') +class MixedBulkWriteOperationSpecification extends OperationFunctionalSpecification { + + def 'should throw IllegalArgumentException for empty list of requests'() { + when: + new MixedBulkWriteOperation(getNamespace(), [], true, ACKNOWLEDGED, false) + + then: + thrown(IllegalArgumentException) + } + + def 'should have the expected passed values'() { + when: + def operation = new MixedBulkWriteOperation(getNamespace(), requests, ordered, writeConcern, retryWrites) + .bypassDocumentValidation(bypassValidation) + + then: + operation.isOrdered() == ordered + operation.getNamespace() == getNamespace() + operation.getWriteRequests() == requests + operation.getRetryWrites() == retryWrites + operation.getWriteConcern() == writeConcern + operation.getBypassDocumentValidation() == bypassValidation + + where: + ordered | writeConcern | bypassValidation | retryWrites | requests + true | ACKNOWLEDGED | null | true | [new InsertRequest(new BsonDocument('_id', new BsonInt32(1)))] + false | UNACKNOWLEDGED | true | false | [new InsertRequest(new BsonDocument('_id', new BsonInt32(1)))] + false | UNACKNOWLEDGED | false | false | [new InsertRequest(new BsonDocument('_id', new BsonInt32(1)))] + } + + def 'when no document with the same id exists, should insert the document'() { + given: + def operation = new MixedBulkWriteOperation(getNamespace(), + [new InsertRequest(new BsonDocument('_id', new BsonInt32(1)))], ordered, ACKNOWLEDGED, false) + + when: + BulkWriteResult result = execute(operation, async) + + then: + result.insertedCount == 1 + result.inserts == [new BulkWriteInsert(0, new BsonInt32(1))] + result.upserts == [] + getCollectionHelper().count() == 1 + + where: + [async, ordered] << [[true, false], [true, false]].combinations() + } + + def 'when a document with the same id exists, should throw an exception'() { + given: + def document = new BsonDocument('_id', new BsonInt32(1)) + getCollectionHelper().insertDocuments(document) + def operation = new MixedBulkWriteOperation(getNamespace(), [new InsertRequest(document)], ordered, + ACKNOWLEDGED, false) + + when: + execute(operation, async) + + then: + def ex = thrown(MongoBulkWriteException) + ex.getWriteErrors().get(0).code == 11000 + + where: + [async, ordered] << [[true, false], [true, false]].combinations() + } + + def 'RawBsonDocument should not generate an _id'() { + given: + def operation = new MixedBulkWriteOperation(getNamespace(), + [new InsertRequest(RawBsonDocument.parse('{_id: 1}'))], ordered, ACKNOWLEDGED, false) + + when: + BulkWriteResult result = execute(operation, async) + + then: + result.insertedCount == 1 + result.inserts == [new BulkWriteInsert(0, null)] + result.upserts == [] + getCollectionHelper().count() == 1 + + where: + [async, ordered] << [[true, false], [true, false]].combinations() + } + + def 'when documents match the query, a remove of one should remove one of them'() { + given: + getCollectionHelper().insertDocuments(new DocumentCodec(), new Document('x', true), new Document('x', true)) + def operation = new MixedBulkWriteOperation(getNamespace(), + [new DeleteRequest(new BsonDocument('x', BsonBoolean.TRUE)).multi(false)], + ordered, ACKNOWLEDGED, false) + + when: + BulkWriteResult result = execute(operation, async) + + then: + result == BulkWriteResult.acknowledged(DELETE, 1, 0, [], []) + getCollectionHelper().count() == 1 + + where: + [async, ordered] << [[true, false], [true, false]].combinations() + } + + def 'when documents match the query, a remove should remove all of them'() { + given: + getCollectionHelper().insertDocuments(new DocumentCodec(), new Document('x', true), new Document('x', true), + new Document('x', false)) + def operation = new MixedBulkWriteOperation(getNamespace(), + [new DeleteRequest(new BsonDocument('x', BsonBoolean.TRUE))], + ordered, ACKNOWLEDGED, false) + + when: + BulkWriteResult result = execute(operation, async) + + then: + result == BulkWriteResult.acknowledged(DELETE, 2, 0, [], []) + getCollectionHelper().count() == 1 + + where: + [async, ordered] << [[true, false], [true, false]].combinations() + } + + def 'when multiple document match the query, update of one should update only one of them'() { + given: + getCollectionHelper().insertDocuments(new DocumentCodec(), new Document('x', true), new Document('x', true)) + def operation = new MixedBulkWriteOperation(getNamespace(), + [new UpdateRequest(new BsonDocument('x', BsonBoolean.TRUE), + new BsonDocument('$set', new BsonDocument('y', new BsonInt32(1))), + UPDATE).multi(false)], + ordered, ACKNOWLEDGED, false) + + when: + BulkWriteResult result = execute(operation, async) + + then: + result == BulkWriteResult.acknowledged(UPDATE, 1, expectedModifiedCount(1), [], []) + getCollectionHelper().count(new Document('y', 1)) == 1 + + where: + [async, ordered] << [[true, false], [true, false]].combinations() + } + + def 'when documents match the query, update multi should update all of them'() { + given: + getCollectionHelper().insertDocuments(new DocumentCodec(), new Document('x', true), new Document('x', true)) + def operation = new MixedBulkWriteOperation(getNamespace(), + [new UpdateRequest(new BsonDocument('x', BsonBoolean.TRUE), + new BsonDocument('$set', new BsonDocument('y', new BsonInt32(1))), + UPDATE).multi(true)], ordered, ACKNOWLEDGED, false) + + when: + BulkWriteResult result = execute(operation, async) + + then: + result == BulkWriteResult.acknowledged(UPDATE, 2, expectedModifiedCount(2), [], []) + getCollectionHelper().count(new Document('y', 1)) == 2 + + where: + [async, ordered] << [[true, false], [true, false]].combinations() + } + + def 'when no document matches the query, an update of one with upsert should insert a document'() { + given: + def id = new ObjectId() + def query = new BsonDocument('_id', new BsonObjectId(id)) + def operation = new MixedBulkWriteOperation(getNamespace(), + [new UpdateRequest(query, new BsonDocument('$set', new BsonDocument('x', new BsonInt32(2))), + UPDATE).upsert(true)], ordered, ACKNOWLEDGED, false) + + when: + BulkWriteResult result = execute(operation, async) + + then: + result == BulkWriteResult.acknowledged(UPDATE, 0, expectedModifiedCount(0), [new BulkWriteUpsert(0, new BsonObjectId(id))], []) + getCollectionHelper().find().first() == new Document('_id', query.getObjectId('_id').getValue()).append('x', 2) + + where: + [async, ordered] << [[true, false], [true, false]].combinations() + } + + def 'when no document matches the query, an update multi with upsert should insert a document'() { + def id = new ObjectId() + def query = new BsonDocument('_id', new BsonObjectId(id)) + given: + def operation = new MixedBulkWriteOperation(getNamespace(), + [new UpdateRequest(query, new BsonDocument('$set', new BsonDocument('x', new BsonInt32(2))), + UPDATE).upsert(true).multi(true)], + ordered, ACKNOWLEDGED, false) + + when: + BulkWriteResult result = execute(operation, async) + + then: + result == BulkWriteResult.acknowledged(UPDATE, 0, expectedModifiedCount(0), + [new BulkWriteUpsert(0, new BsonObjectId(id))], []) + getCollectionHelper().find().first() == new Document('_id', query.getObjectId('_id').getValue()).append('x', 2) + + where: + [async, ordered] << [[true, false], [true, false]].combinations() + } + + def 'when documents matches the query, update one with upsert should update only one of them'() { + given: + getCollectionHelper().insertDocuments(new DocumentCodec(), new Document('x', true), new Document('x', true)) + def operation = new MixedBulkWriteOperation(getNamespace(), + [new UpdateRequest(new BsonDocument('x', BsonBoolean.TRUE), + new BsonDocument('$set', new BsonDocument('y', new BsonInt32(1))), + UPDATE).multi(false).upsert(true)], + ordered, ACKNOWLEDGED, false) + + when: + BulkWriteResult result = execute(operation, async) + + then: + result == BulkWriteResult.acknowledged(UPDATE, 1, expectedModifiedCount(1), [], []) + getCollectionHelper().count(new Document('y', 1)) == 1 + + where: + [async, ordered] << [[true, false], [true, false]].combinations() + } + + def 'when documents match the query, update multi with upsert should update all of them'() { + given: + getCollectionHelper().insertDocuments(new DocumentCodec(), new Document('x', true), new Document('x', true)) + def operation = new MixedBulkWriteOperation(getNamespace(), + [new UpdateRequest(new BsonDocument('x', BsonBoolean.TRUE), + new BsonDocument('$set', new BsonDocument('y', new BsonInt32(1))), + UPDATE).upsert(true).multi(true)], + ordered, ACKNOWLEDGED, false) + + when: + BulkWriteResult result = execute(operation, async) + + then: + result == BulkWriteResult.acknowledged(UPDATE, 2, expectedModifiedCount(2), [], []) + getCollectionHelper().count(new Document('y', 1)) == 2 + + where: + [async, ordered] << [[true, false], [true, false]].combinations() + } + + def 'when updating with an empty document, update should throw IllegalArgumentException'() { + given: + def id = new ObjectId() + def operation = new MixedBulkWriteOperation(getNamespace(), + [new UpdateRequest(new BsonDocument('_id', new BsonObjectId(id)), new BsonDocument(), UPDATE)], + true, ACKNOWLEDGED, false) + + when: + execute(operation, async) + + then: + thrown(IllegalArgumentException) + + where: + [async, ordered] << [[true, false], [true, false]].combinations() + } + + def 'when replacing with an empty document, update should not throw IllegalArgumentException'() { + given: + def id = new ObjectId() + def operation = new MixedBulkWriteOperation(getNamespace(), + [new UpdateRequest(new BsonDocument('_id', new BsonObjectId(id)), new BsonDocument(), REPLACE)], + true, ACKNOWLEDGED, false) + + when: + execute(operation, async) + + then: + noExceptionThrown() + + where: + [async, ordered] << [[true, false], [true, false]].combinations() + } + + def 'when updating with an invalid document, update should throw IllegalArgumentException'() { + given: + def id = new ObjectId() + def operation = new MixedBulkWriteOperation(getNamespace(), + [new UpdateRequest(new BsonDocument('_id', new BsonObjectId(id)), new BsonDocument('a', new BsonInt32(1)), UPDATE)], + true, ACKNOWLEDGED, false) + + when: + execute(operation, async) + + then: + def e = thrown(IllegalArgumentException) + e.getMessage() == 'All update operators must start with \'$\', but \'a\' does not' + + where: + [async, ordered] << [[true, false], [true, false]].combinations() + } + + def 'when replacing an invalid document, replace should throw IllegalArgumentException'() { + given: + def id = new ObjectId() + def operation = new MixedBulkWriteOperation(getNamespace(), + [new UpdateRequest(new BsonDocument('_id', new BsonObjectId(id)), + new BsonDocument('$set', new BsonDocument('x', new BsonInt32(1))), REPLACE)], + true, ACKNOWLEDGED, false) + + when: + execute(operation, async) + + then: + def e = thrown(IllegalArgumentException) + e.getMessage() == 'Field names in a replacement document can not start with \'$\' but \'$set\' does' + + where: + [async, ordered] << [[true, false], [true, false]].combinations() + } + + @IgnoreIf({ serverVersionLessThan(5, 0) }) + def 'when inserting a document with a field starting with a dollar sign, insert should not throw'() { + given: + def operation = new MixedBulkWriteOperation(getNamespace(), + [new InsertRequest(new BsonDocument('$inc', new BsonDocument('x', new BsonInt32(1))))], + true, ACKNOWLEDGED, false) + + when: + execute(operation, async) + + then: + notThrown(IllegalArgumentException) + + where: + [async, ordered] << [[true, false], [true, false]].combinations() + } + + def 'when a document contains a key with an illegal character, replacing a document with it should throw IllegalArgumentException'() { + given: + def id = new ObjectId() + def operation = new MixedBulkWriteOperation(getNamespace(), + [new UpdateRequest(new BsonDocument('_id', new BsonObjectId(id)), + new BsonDocument('$set', new BsonDocument('x', new BsonInt32(1))), + REPLACE) + .upsert(true)], + true, ACKNOWLEDGED, false) + + when: + execute(operation, async) + + then: + thrown(IllegalArgumentException) + + where: + [async, ordered] << [[true, false], [true, false]].combinations() + } + + def 'when no document matches the query, a replace with upsert should insert a document'() { + given: + def id = new ObjectId() + def operation = new MixedBulkWriteOperation(getNamespace(), + [new UpdateRequest(new BsonDocument('_id', new BsonObjectId(id)), + new BsonDocument('_id', new BsonObjectId(id)) + .append('x', new BsonInt32(2)), + REPLACE) + .upsert(true)], + ordered, ACKNOWLEDGED, false) + + when: + BulkWriteResult result = execute(operation, async) + + then: + result == BulkWriteResult.acknowledged(UPDATE, 0, expectedModifiedCount(0), [new BulkWriteUpsert(0, new BsonObjectId(id))], []) + getCollectionHelper().find().first() == new Document('_id', id).append('x', 2) + + where: + [async, ordered] << [[true, false], [true, false]].combinations() + } + + def 'when a custom _id is upserted it should be in the write result'() { + given: + def operation = new MixedBulkWriteOperation(getNamespace(), + [new UpdateRequest(new BsonDocument('_id', new BsonInt32(0)), + new BsonDocument('$set', new BsonDocument('a', new BsonInt32(0))), + UPDATE) + .upsert(true), + new UpdateRequest(new BsonDocument('a', new BsonInt32(1)), + new BsonDocument('_id', new BsonInt32(1)), + REPLACE) + .upsert(true), + new UpdateRequest(new BsonDocument('_id', new BsonInt32(2)), + new BsonDocument('_id', new BsonInt32(2)), + REPLACE) + .upsert(true) + ], + ordered, ACKNOWLEDGED, false) + + when: + BulkWriteResult result = execute(operation, async) + + then: + result == BulkWriteResult.acknowledged(UPDATE, 0, expectedModifiedCount(0), + [new BulkWriteUpsert(0, new BsonInt32(0)), new BulkWriteUpsert(1, new BsonInt32(1)), + new BulkWriteUpsert(2, new BsonInt32(2))], []) + getCollectionHelper().count() == 3 + + where: + [async, ordered] << [[true, false], [true, false]].combinations() + } + + def 'unacknowledged upserts with custom _id should not error'() { + given: + def binding = async ? getAsyncSingleConnectionBinding() : getSingleConnectionBinding() + def operation = new MixedBulkWriteOperation(getNamespace(), + [new UpdateRequest(new BsonDocument('_id', new BsonInt32(0)), + new BsonDocument('$set', new BsonDocument('a', new BsonInt32(0))), + UPDATE) + .upsert(true), + new UpdateRequest(new BsonDocument('a', new BsonInt32(1)), + new BsonDocument('_id', new BsonInt32(1)), + REPLACE) + .upsert(true), + new UpdateRequest(new BsonDocument('_id', new BsonInt32(2)), + new BsonDocument('_id', new BsonInt32(2)), + REPLACE) + .upsert(true) + ], + ordered, UNACKNOWLEDGED, false) + + when: + def result = execute(operation, binding) + acknowledgeWrite(binding) + + then: + !result.wasAcknowledged() + getCollectionHelper().count() == 4 + + where: + [async, ordered] << [[true, false], [true, false]].combinations() + } + + def 'when multiple documents match the query, replace should replace only one of them'() { + given: + getCollectionHelper().insertDocuments(new DocumentCodec(), new Document('x', true), new Document('x', true)) + + def operation = new MixedBulkWriteOperation(getNamespace(), + [new UpdateRequest(new BsonDocument('x', BsonBoolean.TRUE), + new BsonDocument('y', new BsonInt32(1)).append('x', BsonBoolean.FALSE), + REPLACE).upsert(true)], + ordered, ACKNOWLEDGED, false) + + when: + BulkWriteResult result = execute(operation, async) + + then: + result == BulkWriteResult.acknowledged(UPDATE, 1, expectedModifiedCount(1), [], []) + getCollectionHelper().count(new Document('x', false)) == 1 + + where: + [async, ordered] << [[true, false], [true, false]].combinations() + } + + @Slow + def 'when a replacement document is 16MB, the document is still replaced'() { + given: + getCollectionHelper().insertDocuments(new DocumentCodec(), new Document('_id', 1)) + def operation = new MixedBulkWriteOperation(getNamespace(), + [new UpdateRequest(new BsonDocument('_id', new BsonInt32(1)), + new BsonDocument('_id', new BsonInt32(1)) + .append('x', new BsonBinary(new byte[1024 * 1024 * 16 - 30])), + REPLACE).upsert(true)], true, ACKNOWLEDGED, false) + + when: + BulkWriteResult result = execute(operation, async) + + then: + result == BulkWriteResult.acknowledged(UPDATE, 1, expectedModifiedCount(1), [], []) + getCollectionHelper().count() == 1 + + where: + async << [true, false] + } + + @Slow + def 'when two update documents together exceed 16MB, the documents are still updated'() { + given: + getCollectionHelper().insertDocuments(new DocumentCodec(), new Document('_id', 1), new Document('_id', 2)) + def operation = new MixedBulkWriteOperation(getNamespace(), + [new UpdateRequest(new BsonDocument('_id', new BsonInt32(1)), + new BsonDocument('_id', new BsonInt32(1)) + .append('x', new BsonBinary(new byte[1024 * 1024 * 16 - 30])), + REPLACE), + new UpdateRequest(new BsonDocument('_id', new BsonInt32(2)), + new BsonDocument('_id', new BsonInt32(2)) + .append('x', new BsonBinary(new byte[1024 * 1024 * 16 - 30])), + REPLACE)], + true, ACKNOWLEDGED, false) + + when: + BulkWriteResult result = execute(operation, async) + + then: + result == BulkWriteResult.acknowledged(UPDATE, 2, expectedModifiedCount(2), [], []) + getCollectionHelper().count() == 2 + + where: + async << [true, false] + } + + @Slow + def 'when documents together are just below the max message size, the documents are still inserted'() { + given: + def bsonBinary = new BsonBinary(new byte[16 * 1000 * 1000 - (getCollectionName().length() + 33)]) + def operation = new MixedBulkWriteOperation(getNamespace(), + [ + new InsertRequest(new BsonDocument('_id', new BsonObjectId()).append('b', bsonBinary)), + new InsertRequest(new BsonDocument('_id', new BsonObjectId()).append('b', bsonBinary)), + new InsertRequest(new BsonDocument('_id', new BsonObjectId()).append('b', bsonBinary)) + ], + true, ACKNOWLEDGED, false) + + when: + BulkWriteResult result = execute(operation, true) + + then: + result.wasAcknowledged() + result.insertedCount == 3 + getCollectionHelper().count() == 3 + } + + @Slow + def 'when documents together are just above the max message size, the documents are still inserted'() { + given: + def bsonBinary = new BsonBinary(new byte[16 * 1000 * 1000 - (getCollectionName().length() + 32)]) + def operation = new MixedBulkWriteOperation(getNamespace(), + [ + new InsertRequest(new BsonDocument('_id', new BsonObjectId()).append('b', bsonBinary)), + new InsertRequest(new BsonDocument('_id', new BsonObjectId()).append('b', bsonBinary)), + new InsertRequest(new BsonDocument('_id', new BsonObjectId()).append('b', bsonBinary)) + ], + true, ACKNOWLEDGED, false) + + when: + BulkWriteResult result = execute(operation, true) + + then: + result.wasAcknowledged() + result.insertedCount == 3 + getCollectionHelper().count() == 3 + } + + + def 'should handle multi-length runs of ordered insert, update, replace, and remove'() { + given: + getCollectionHelper().insertDocuments(getTestInserts()) + def operation = new MixedBulkWriteOperation(getNamespace(), getTestWrites(), ordered, ACKNOWLEDGED, false) + + when: + BulkWriteResult result = execute(operation, async) + + then: + result.wasAcknowledged() + getCollectionHelper().find(new Document('_id', 1)).first() == new Document('_id', 1).append('x', 2) + getCollectionHelper().find(new Document('_id', 2)).first() == new Document('_id', 2).append('x', 3) + getCollectionHelper().find(new Document('_id', 3)).isEmpty() + getCollectionHelper().find(new Document('_id', 4)).isEmpty() + getCollectionHelper().find(new Document('_id', 5)).first() == new Document('_id', 5).append('x', 4) + getCollectionHelper().find(new Document('_id', 6)).first() == new Document('_id', 6).append('x', 5) + getCollectionHelper().find(new Document('_id', 7)).first() == new Document('_id', 7) + getCollectionHelper().find(new Document('_id', 8)).first() == new Document('_id', 8) + + where: + [async, ordered] << [[true, false], [true, false]].combinations() + } + + def 'should handle multi-length runs of UNACKNOWLEDGED insert, update, replace, and remove'() { + given: + getCollectionHelper().insertDocuments(getTestInserts()) + def operation = new MixedBulkWriteOperation(getNamespace(), getTestWrites(), ordered, UNACKNOWLEDGED, + false) + def binding = async ? getAsyncSingleConnectionBinding() : getSingleConnectionBinding() + + when: + def result = execute(operation, binding) + execute(new MixedBulkWriteOperation(namespace, + [new InsertRequest(new BsonDocument('_id', new BsonInt32(9)))], true, ACKNOWLEDGED, false,), binding) + + then: + !result.wasAcknowledged() + acknowledgeWrite(binding) + getCollectionHelper().find(new Document('_id', 1)).first() == new Document('_id', 1).append('x', 2) + getCollectionHelper().find(new Document('_id', 2)).first() == new Document('_id', 2).append('x', 3) + getCollectionHelper().find(new Document('_id', 3)).isEmpty() + getCollectionHelper().find(new Document('_id', 4)).isEmpty() + getCollectionHelper().find(new Document('_id', 5)).first() == new Document('_id', 5).append('x', 4) + getCollectionHelper().find(new Document('_id', 6)).first() == new Document('_id', 6).append('x', 5) + getCollectionHelper().find(new Document('_id', 7)).first() == new Document('_id', 7) + getCollectionHelper().find(new Document('_id', 8)).first() == new Document('_id', 8) + + where: + [async, ordered] << [[true, false], [true, false]].combinations() + } + + @Slow + def 'should split the number of writes is larger than the match write batch size'() { + given: + def binding = async ? getAsyncSingleConnectionBinding() : getSingleConnectionBinding() + def maxWriteBatchSize = getCollectionHelper().hello().getInteger('maxWriteBatchSize').intValue() + def numberOfWrites = maxWriteBatchSize + 100 + def writes = [] + + (1..numberOfWrites).each { + writes.add(new InsertRequest(new BsonDocument())) + } + def operation = new MixedBulkWriteOperation(getNamespace(), writes, ordered, ACKNOWLEDGED, false) + + when: + execute(operation, binding) + acknowledgeWrite(binding) + + then: + getCollectionHelper().count() == numberOfWrites + 1 + + where: + [async, ordered, writeConcern] << [[true, false], [true, false], [ACKNOWLEDGED, UNACKNOWLEDGED]].combinations() + } + + def 'should be able to merge upserts across batches'() { + given: + def writeOperations = [] + (0..1002).each { + def upsert = new UpdateRequest(new BsonDocument('key', new BsonInt32(it)), + new BsonDocument('$set', new BsonDocument('key', new BsonInt32(it))), + UPDATE).upsert(true) + writeOperations.add(upsert) + writeOperations.add(new DeleteRequest(new BsonDocument('key', new BsonInt32(it)))) + } + def operation = new MixedBulkWriteOperation(getNamespace(), writeOperations, ordered, ACKNOWLEDGED, false) + + when: + BulkWriteResult result = execute(operation, async) + + then: + result.deletedCount == result.upserts.size() + getCollectionHelper().count() == 0 + + where: + [async, ordered] << [[false], [true]].combinations() + } + + def 'error details should have correct index on ordered write failure'() { + given: + def operation = new MixedBulkWriteOperation(getNamespace(), + [new InsertRequest(new BsonDocument('_id', new BsonInt32(1))), + new UpdateRequest(new BsonDocument('_id', new BsonInt32(1)), + new BsonDocument('$set', new BsonDocument('x', new BsonInt32(3))), + UPDATE), + new InsertRequest(new BsonDocument('_id', new BsonInt32(1))) // this should fail with index 2 + ], true, ACKNOWLEDGED, false) + when: + execute(operation, async) + + then: + def ex = thrown(MongoBulkWriteException) + ex.writeErrors.size() == 1 + ex.writeErrors[0].index == 2 + ex.writeErrors[0].code == 11000 + + where: + async << [true, false] + } + + def 'error details should have correct index on unordered write failure'() { + given: + getCollectionHelper().insertDocuments(getTestInserts()) + def operation = new MixedBulkWriteOperation(getNamespace(), + [new InsertRequest(new BsonDocument('_id', new BsonInt32(1))), + new UpdateRequest(new BsonDocument('_id', new BsonInt32(2)), + new BsonDocument('$set', new BsonDocument('x', new BsonInt32(3))), + UPDATE), + new InsertRequest(new BsonDocument('_id', new BsonInt32(3))) // this should fail with index 2 + ], false, ACKNOWLEDGED, false) + when: + execute(operation, async) + + then: + def ex = thrown(MongoBulkWriteException) + ex.writeErrors.size() == 2 + ex.writeErrors[0].index == 0 + ex.writeErrors[0].code == 11000 + ex.writeErrors[1].index == 2 + ex.writeErrors[1].code == 11000 + + where: + async << [true, false] + } + + def 'should continue to execute batches after a failure if writes are unordered'() { + given: + getCollectionHelper().insertDocuments([new BsonDocument('_id', new BsonInt32(500)), new BsonDocument('_id', new BsonInt32(1500))]) + def inserts = [] + for (int i = 0; i < 2000; i++) { + inserts.add(new InsertRequest(new BsonDocument('_id', new BsonInt32(i)))) + } + def operation = new MixedBulkWriteOperation(getNamespace(), inserts, false, ACKNOWLEDGED, false) + + when: + execute(operation, async) + + then: + def ex = thrown(MongoBulkWriteException) + ex.writeErrors.size() == 2 + ex.getWriteResult().getInsertedCount() == 1998 + getCollectionHelper().count() == 2000 + + where: + async << [true, false] + } + + def 'should stop executing batches after a failure if writes are ordered'() { + given: + getCollectionHelper().insertDocuments([new BsonDocument('_id', new BsonInt32(500)), new BsonDocument('_id', new BsonInt32(1500))]) + def inserts = [] + for (int i = 0; i < 2000; i++) { + inserts.add(new InsertRequest(new BsonDocument('_id', new BsonInt32(i)))) + } + def operation = new MixedBulkWriteOperation(getNamespace(), inserts, true, ACKNOWLEDGED, false) + + when: + execute(operation, async) + + then: + def ex = thrown(MongoBulkWriteException) + ex.writeErrors.size() == 1 + ex.getWriteResult().getInsertedCount() == 500 + getCollectionHelper().count() == 502 + + + where: + async << [true, false] + } + + // using w = 5 to force a timeout + @IgnoreIf({ !isDiscoverableReplicaSet() }) + def 'should throw bulk write exception with a write concern error when wtimeout is exceeded'() { + given: + def operation = new MixedBulkWriteOperation(getNamespace(), + [new InsertRequest(new BsonDocument('_id', new BsonInt32(1)))], + false, new WriteConcern(5, 1), false) + when: + execute(operation, async) + + then: + def ex = thrown(MongoBulkWriteException) + ex.getWriteConcernError() != null + + + where: + async << [true, false] + } + + @IgnoreIf({ !isDiscoverableReplicaSet() }) + def 'when there is a duplicate key error and a write concern error, both should be reported'() { + given: + getCollectionHelper().insertDocuments(getTestInserts()) + def operation = new MixedBulkWriteOperation(getNamespace(), + [new InsertRequest(new BsonDocument('_id', new BsonInt32(7))), + new InsertRequest(new BsonDocument('_id', new BsonInt32(1))) // duplicate key + ], false, new WriteConcern(4, 1), false) + + when: + execute(operation, async) // This is assuming that it won't be able to replicate to 4 servers in 1 ms + + then: + def ex = thrown(MongoBulkWriteException) + ex.writeErrors.size() == 1 + ex.writeErrors[0].index == 1 + ex.writeErrors[0].code == 11000 + ex.writeConcernError != null + + where: + async << [true, false] + } + + @IgnoreIf({ !isDiscoverableReplicaSet() }) + def 'should throw on write concern error on multiple failpoint'() { + given: + getCollectionHelper().insertDocuments(getTestInserts()) + def operation = new MixedBulkWriteOperation(getNamespace(), + [new DeleteRequest(new BsonDocument('_id', new BsonInt32(2))), // existing key + new InsertRequest(new BsonDocument('_id', new BsonInt32(1))) // existing (duplicate) key + ], true, ACKNOWLEDGED, true) + + def failPoint = BsonDocument.parse('''{ + "configureFailPoint": "failCommand", + "mode": {"times": 2 }, + "data": { "failCommands": ["delete"], + "writeConcernError": {"code": 91, "errmsg": "Replication is being shut down"}}}''') + configureFailPoint(failPoint) + + when: + execute(operation, async) // This is assuming that it won't be able to replicate to 4 servers in 1 ms + + then: + def ex = thrown(MongoBulkWriteException) + ex.writeErrors.size() == 1 + ex.writeErrors[0].index == 1 + ex.writeErrors[0].code == 11000 + ex.writeConcernError != null + ex.writeConcernError.code == 91 + + cleanup: + disableFailPoint('failCommand') + + where: + async << [true, false] + } + + def 'should throw IllegalArgumentException when passed an empty bulk operation'() { + when: + new MixedBulkWriteOperation(getNamespace(), [], ordered, UNACKNOWLEDGED, false) + + then: + thrown(IllegalArgumentException) + + where: + ordered << [true, false] + } + + def 'should throw if bypassDocumentValidation is set and writeConcern is UNACKNOWLEDGED'() { + given: + def operation = new MixedBulkWriteOperation(getNamespace(), + [new InsertRequest(BsonDocument.parse('{ level: 9 }'))], true, UNACKNOWLEDGED, false) + .bypassDocumentValidation(bypassDocumentValidation) + + when: + execute(operation, async) + + then: + thrown(MongoClientException) + + where: + [async, bypassDocumentValidation] << [[false, false], [true, false]].combinations() + } + + def 'should throw if collation is set and write is UNACKNOWLEDGED'() { + given: + def operation = new MixedBulkWriteOperation(getNamespace(), + [new DeleteRequest(BsonDocument.parse('{ level: 9 }')).collation(defaultCollation)], true, UNACKNOWLEDGED, false) + + when: + execute(operation, async) + + then: + thrown(MongoClientException) + + where: + [async, bypassDocumentValidation] << [[true, false], [true, false]].combinations() + } + + def 'should honour the bypass validation flag for inserts'() { + given: + def namespace = new MongoNamespace(getDatabaseName(), 'collection') + def collectionHelper = getCollectionHelper(namespace) + collectionHelper.create(namespace.getCollectionName(), new CreateCollectionOptions().validationOptions( + new ValidationOptions().validator(gte('level', 10)))) + def operation = new MixedBulkWriteOperation(namespace, + [new InsertRequest(BsonDocument.parse('{ level: 9 }'))], ordered, ACKNOWLEDGED, false) + + when: + execute(operation, async) + + then: + def ex = thrown(MongoBulkWriteException) + ex.getWriteErrors().get(0).code == 121 + + when: + operation.bypassDocumentValidation(true) + BulkWriteResult result = execute(operation, async) + + then: + notThrown(MongoBulkWriteException) + result.wasAcknowledged() + result.insertedCount == 1 + collectionHelper.count() == 1 + + cleanup: + collectionHelper?.drop() + + where: + [async, ordered] << [[true, false], [true, false]].combinations() + } + + def 'should honour the bypass validation flag for updates'() { + given: + def namespace = new MongoNamespace(getDatabaseName(), 'collection') + def collectionHelper = getCollectionHelper(namespace) + collectionHelper.create(namespace.getCollectionName(), new CreateCollectionOptions().validationOptions( + new ValidationOptions().validator(gte('level', 10)))) + + collectionHelper.insertDocuments(BsonDocument.parse('{ x: true, level: 10}')) + def operation = new MixedBulkWriteOperation(namespace, + [new UpdateRequest(BsonDocument.parse('{x: true}'), BsonDocument.parse('{$inc: {level: -1}}'), UPDATE).multi(false)], + ordered, ACKNOWLEDGED, false) + + when: + execute(operation, async) + + then: + def ex = thrown(MongoBulkWriteException) + ex.getWriteErrors().get(0).code == 121 + + when: + operation.bypassDocumentValidation(true) + BulkWriteResult result = execute(operation, async) + + then: + result == BulkWriteResult.acknowledged(UPDATE, 1, expectedModifiedCount(1), [], []) + collectionHelper.count(eq('level', 9)) == 1 + + where: + [async, ordered] << [[true, false], [true, false]].combinations() + } + + def 'should support collation'() { + given: + getCollectionHelper().insertDocuments(Document.parse('{str: "foo"}'), Document.parse('{str: "bar"}')) + def requests = [new DeleteRequest(BsonDocument.parse('{str: "FOO"}}')).collation(caseInsensitiveCollation), + new UpdateRequest(BsonDocument.parse('{str: "BAR"}}'), BsonDocument.parse('{str: "bar"}}'), REPLACE) + .collation(caseInsensitiveCollation)] + def operation = new MixedBulkWriteOperation(namespace, requests, false, ACKNOWLEDGED, false) + + when: + BulkWriteResult result = execute(operation, async) + + then: + result.getDeletedCount() == 1 + result.getModifiedCount() == 1 + + where: + async << [true, false] + } + + def 'should support retryWrites=true'() { + given: + def testWrites = getTestWrites() + Collections.shuffle(testWrites) + getCollectionHelper().insertDocuments(getTestInserts()) + def operation = new MixedBulkWriteOperation(getNamespace(), testWrites, true, ACKNOWLEDGED, true) + + when: + if (isDiscoverableReplicaSet()) { + enableOnPrimaryTransactionalWriteFailPoint(BsonDocument.parse(failPoint)) + } + BulkWriteResult result = executeWithSession(operation, async) + + then: + result.wasAcknowledged() + result.getInsertedCount() == 2 + result.getDeletedCount() == 2 + result.getMatchedCount() == 4 + result.getModifiedCount() == 4 + result.getUpserts().isEmpty() + + then: + getCollectionHelper().find(new Document('_id', 1)).first() == new Document('_id', 1).append('x', 2) + getCollectionHelper().find(new Document('_id', 2)).first() == new Document('_id', 2).append('x', 3) + getCollectionHelper().find(new Document('_id', 3)).isEmpty() + getCollectionHelper().find(new Document('_id', 4)).isEmpty() + getCollectionHelper().find(new Document('_id', 5)).first() == new Document('_id', 5).append('x', 4) + getCollectionHelper().find(new Document('_id', 6)).first() == new Document('_id', 6).append('x', 5) + getCollectionHelper().find(new Document('_id', 7)).first() == new Document('_id', 7) + getCollectionHelper().find(new Document('_id', 8)).first() == new Document('_id', 8) + + cleanup: + disableOnPrimaryTransactionalWriteFailPoint() + + where: + [async, ordered, failPoint] << [ + [true, false], + [true, false], + ['{mode: {times: 5}}', // SDAM will retry multiple times to find a server + '{mode: {times: 1}, data: {failBeforeCommitExceptionCode : 1}}'] + ].combinations() + } + + @IgnoreIf({ !isDiscoverableReplicaSet() }) + def 'should fail as expected with retryWrites and failPoints'() { + given: + def testWrites = getTestWrites() + getCollectionHelper().insertDocuments(getTestInserts()) + def operation = new MixedBulkWriteOperation(getNamespace(), testWrites, true, ACKNOWLEDGED, true) + + when: + enableOnPrimaryTransactionalWriteFailPoint(BsonDocument.parse(failPoint)) + executeWithSession(operation, async) + + then: + thrown(MongoSocketReadException) + + cleanup: + disableOnPrimaryTransactionalWriteFailPoint() + + where: + [async, failPoint] << [ + [true, false], + ['{mode: {times: 2}, data: {failBeforeCommitExceptionCode : 1}}', + '{mode: {skip: 2}, data: {failBeforeCommitExceptionCode : 1}}'] + ].combinations() + } + + @IgnoreIf({ !isDiscoverableReplicaSet() }) + def 'should not fail with unacknowledged writes, retryWrites and failPoints'() { + given: + def testWrites = getTestWrites() + getCollectionHelper().insertDocuments(getTestInserts()) + def operation = new MixedBulkWriteOperation(getNamespace(), testWrites, true, UNACKNOWLEDGED, true) + + when: + enableOnPrimaryTransactionalWriteFailPoint(BsonDocument.parse(failPoint)) + def result = executeWithSession(operation, async) + + then: + result == BulkWriteResult.unacknowledged() + + cleanup: + disableOnPrimaryTransactionalWriteFailPoint() + + where: + [async, failPoint] << [ + [true, false], + ['{mode: {times: 2}, data: {failBeforeCommitExceptionCode : 1}}', + '{mode: {skip: 2}, data: {failBeforeCommitExceptionCode : 1}}'] + ].combinations() + } + + def 'should retry if the connection initially fails'() { + when: + def cannedResult = BsonDocument.parse('{ok: 1.0, n: 1}') + def operation = new MixedBulkWriteOperation(getNamespace(), + [new InsertRequest(BsonDocument.parse('{ level: 9 }'))], true, ACKNOWLEDGED, true) + def expectedCommand = new BsonDocument('insert', new BsonString(getNamespace().getCollectionName())) + .append('ordered', BsonBoolean.TRUE) + .append('txnNumber', new BsonInt64(0)) + + then: + testOperationRetries(operation, [3, 6, 0], expectedCommand, async, cannedResult) + + where: + async << [true, false] + } + + def 'should throw original error when retrying and failing'() { + given: + def operation = new MixedBulkWriteOperation(getNamespace(), + [new InsertRequest(BsonDocument.parse('{ level: 9 }'))], true, ACKNOWLEDGED, true) + def originalException = new MongoSocketException('Some failure', new ServerAddress()) + + when: + testRetryableOperationThrowsOriginalError(operation, [[3, 6, 0], [3, 6, 0], [3, 6, 0]], + [REPLICA_SET_PRIMARY, REPLICA_SET_PRIMARY, STANDALONE], originalException, async) + + then: + Exception commandException = thrown() + commandException == originalException + + when: + testRetryableOperationThrowsOriginalError(operation, [[3, 6, 0], [3, 6, 0]], + [REPLICA_SET_PRIMARY, REPLICA_SET_PRIMARY], originalException, async, 1) + + then: + commandException = thrown() + commandException == originalException + + where: + async << [true, false] + } + + def 'should not request retryable write for multi updates or deletes'() { + given: + def operation = new MixedBulkWriteOperation(getNamespace(), writes, true, ACKNOWLEDGED, true) + + when: + executeWithSession(operation, async) + + then: + noExceptionThrown() + + where: + [async, writes] << [ + [true, false], + // Test scenarios where the multi:true request is at the beginning and at the end of the list + [ + [ + new DeleteRequest(new BsonDocument()).multi(true), + new InsertRequest(new BsonDocument()) + ], + [ + new UpdateRequest(new BsonDocument('_id', new BsonInt32(1)), + new BsonDocument('$set', new BsonDocument('_id', new BsonInt32(1))), UPDATE).multi(true), + new InsertRequest(new BsonDocument()) + ], + [ + new InsertRequest(new BsonDocument()), + new DeleteRequest(new BsonDocument()).multi(true) + ], + [ + new InsertRequest(new BsonDocument()), + new UpdateRequest(new BsonDocument('_id', new BsonInt32(1)), + new BsonDocument('$set', new BsonDocument('_id', new BsonInt32(1))), UPDATE).multi(true) + ] + ] + ].combinations() + } + + def 'should support array filters'() { + given: + def documentOne = BsonDocument.parse('{_id: 1, y: [ {b: 3}, {b: 1}]}') + def documentTwo = BsonDocument.parse('{_id: 2, y: [ {b: 0}, {b: 1}]}') + getCollectionHelper().insertDocuments(documentOne, documentTwo) + def requests = [ + new UpdateRequest(new BsonDocument(), BsonDocument.parse('{ $set: {"y.$[i].b": 2}}'), UPDATE) + .arrayFilters([BsonDocument.parse('{"i.b": 3}')]), + new UpdateRequest(new BsonDocument(), BsonDocument.parse('{ $set: {"y.$[i].b": 4}}'), UPDATE) + .multi(true) + .arrayFilters([BsonDocument.parse('{"i.b": 1}')]), + ] + def operation = new MixedBulkWriteOperation(namespace, requests, true, ACKNOWLEDGED, false) + + when: + execute(operation, async) + + then: + getCollectionHelper().find(new BsonDocumentCodec()) == [ + BsonDocument.parse('{_id: 1, y: [ {b: 2}, {b: 4}]}'), + BsonDocument.parse('{_id: 2, y: [ {b: 0}, {b: 4}]}') + ] + + where: + async << [true, false] + } + + def 'should throw if array filters is set and write concern is UNACKNOWLEDGED'() { + given: + def requests = [ + new UpdateRequest(new BsonDocument(), BsonDocument.parse('{ $set: {"y.$[i].b": 2}}'), UPDATE) + .arrayFilters([BsonDocument.parse('{"i.b": 3}')]) + ] + def operation = new MixedBulkWriteOperation(namespace, requests, true, UNACKNOWLEDGED, false) + + when: + execute(operation, async) + + then: + thrown(MongoClientException) + + where: + async << [true, false] + } + + private static List getTestWrites() { + [new UpdateRequest(new BsonDocument('_id', new BsonInt32(1)), + new BsonDocument('$set', new BsonDocument('x', new BsonInt32(2))), + UPDATE).multi(false), + new UpdateRequest(new BsonDocument('_id', new BsonInt32(2)), + new BsonDocument('$set', new BsonDocument('x', new BsonInt32(3))), + UPDATE).multi(false), + new DeleteRequest(new BsonDocument('_id', new BsonInt32(3))).multi(false), + new DeleteRequest(new BsonDocument('_id', new BsonInt32(4))).multi(false), + new UpdateRequest(new BsonDocument('_id', new BsonInt32(5)), + new BsonDocument('_id', new BsonInt32(5)).append('x', new BsonInt32(4)), + REPLACE).multi(false), + new UpdateRequest(new BsonDocument('_id', new BsonInt32(6)), + new BsonDocument('_id', new BsonInt32(6)).append('x', new BsonInt32(5)), + REPLACE).multi(false), + new InsertRequest(new BsonDocument('_id', new BsonInt32(7))), + new InsertRequest(new BsonDocument('_id', new BsonInt32(8))) + ] + } + + private static BsonDocument[] getTestInserts() { + [new BsonDocument('_id', new BsonInt32(1)), + new BsonDocument('_id', new BsonInt32(2)), + new BsonDocument('_id', new BsonInt32(3)), + new BsonDocument('_id', new BsonInt32(4)), + new BsonDocument('_id', new BsonInt32(5)), + new BsonDocument('_id', new BsonInt32(6))] + } + + private static Integer expectedModifiedCount(final int expectedCountForServersThatSupportIt) { + expectedCountForServersThatSupportIt + } +} diff --git a/driver-core/src/test/functional/com/mongodb/internal/operation/OperationReadConcernHelperSpecification.groovy b/driver-core/src/test/functional/com/mongodb/internal/operation/OperationReadConcernHelperSpecification.groovy new file mode 100644 index 00000000000..331690a830b --- /dev/null +++ b/driver-core/src/test/functional/com/mongodb/internal/operation/OperationReadConcernHelperSpecification.groovy @@ -0,0 +1,132 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.operation + +import com.mongodb.ReadConcern +import com.mongodb.internal.session.SessionContext +import org.bson.BsonDocument +import org.bson.BsonString +import org.bson.BsonTimestamp +import spock.lang.Specification + +import static com.mongodb.internal.operation.OperationReadConcernHelper.appendReadConcernToCommand +import static com.mongodb.internal.operation.ServerVersionHelper.UNKNOWN_WIRE_VERSION + + +class OperationReadConcernHelperSpecification extends Specification { + + def 'should throw IllegalArgumentException if command document is null'() { + when: + appendReadConcernToCommand(Stub(SessionContext), UNKNOWN_WIRE_VERSION, null) + + then: + thrown(IllegalArgumentException) + } + + def 'should throw IllegalArgumentException if session context is null'() { + when: + appendReadConcernToCommand(null, UNKNOWN_WIRE_VERSION, new BsonDocument()) + + then: + thrown(IllegalArgumentException) + } + + def 'should add afterClusterTime to majority read concern when session is causally consistent'() { + given: + def operationTime = new BsonTimestamp(42, 1) + def sessionContext = Stub(SessionContext) { + isCausallyConsistent() >> true + getOperationTime() >> operationTime + getReadConcern() >> ReadConcern.MAJORITY + } + def commandDocument = new BsonDocument() + + when: + appendReadConcernToCommand(sessionContext, UNKNOWN_WIRE_VERSION, commandDocument) + + then: + commandDocument == new BsonDocument('readConcern', + new BsonDocument('level', new BsonString('majority')).append('afterClusterTime', operationTime)) + } + + def 'should add afterClusterTime to default read concern when session is causally consistent'() { + given: + def operationTime = new BsonTimestamp(42, 1) + def sessionContext = Stub(SessionContext) { + isCausallyConsistent() >> true + getOperationTime() >> operationTime + getReadConcern() >> ReadConcern.DEFAULT + } + def commandDocument = new BsonDocument() + + when: + appendReadConcernToCommand(sessionContext, UNKNOWN_WIRE_VERSION, commandDocument) + + then: + commandDocument == new BsonDocument('readConcern', + new BsonDocument(new BsonDocument('afterClusterTime', operationTime))) + } + + def 'should not add afterClusterTime to ReadConcern when session is not causally consistent'() { + given: + def sessionContext = Stub(SessionContext) { + isCausallyConsistent() >> false + getOperationTime() >> { throw new UnsupportedOperationException() } + getReadConcern() >> ReadConcern.MAJORITY + } + def commandDocument = new BsonDocument() + + when: + appendReadConcernToCommand(sessionContext, UNKNOWN_WIRE_VERSION, commandDocument) + + then: + commandDocument == new BsonDocument('readConcern', + new BsonDocument('level', new BsonString('majority'))) + } + + def 'should not add the default read concern to the command document'() { + def sessionContext = Stub(SessionContext) { + isCausallyConsistent() >> false + getOperationTime() >> { throw new UnsupportedOperationException() } + getReadConcern() >> ReadConcern.DEFAULT + } + def commandDocument = new BsonDocument() + + when: + appendReadConcernToCommand(sessionContext, UNKNOWN_WIRE_VERSION, commandDocument) + + then: + commandDocument == new BsonDocument() + } + + def 'should not add afterClusterTime to ReadConcern when operation time is null'() { + given: + def sessionContext = Stub(SessionContext) { + isCausallyConsistent() >> true + getOperationTime() >> null + getReadConcern() >> ReadConcern.MAJORITY + } + def commandDocument = new BsonDocument() + + when: + appendReadConcernToCommand(sessionContext, UNKNOWN_WIRE_VERSION, commandDocument) + + then: + commandDocument == new BsonDocument('readConcern', + new BsonDocument('level', new BsonString('majority'))) + } +} diff --git a/driver-core/src/test/functional/com/mongodb/internal/operation/RenameCollectionOperationSpecification.groovy b/driver-core/src/test/functional/com/mongodb/internal/operation/RenameCollectionOperationSpecification.groovy new file mode 100644 index 00000000000..f2e75a235df --- /dev/null +++ b/driver-core/src/test/functional/com/mongodb/internal/operation/RenameCollectionOperationSpecification.groovy @@ -0,0 +1,104 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.operation + +import com.mongodb.MongoNamespace +import com.mongodb.MongoServerException +import com.mongodb.MongoWriteConcernException +import com.mongodb.OperationFunctionalSpecification +import com.mongodb.WriteConcern +import org.bson.Document +import org.bson.codecs.DocumentCodec +import spock.lang.IgnoreIf + +import static com.mongodb.ClusterFixture.executeAsync +import static com.mongodb.ClusterFixture.getBinding +import static com.mongodb.ClusterFixture.isDiscoverableReplicaSet +import static com.mongodb.ClusterFixture.isSharded + +@IgnoreIf( { isSharded() } ) // these tests don't reliably pass against mongos +class RenameCollectionOperationSpecification extends OperationFunctionalSpecification { + + def cleanup() { + new DropCollectionOperation(new MongoNamespace(getDatabaseName(), 'newCollection'), + WriteConcern.ACKNOWLEDGED).execute(getBinding()) + } + + def 'should return rename a collection'() { + given: + getCollectionHelper().insertDocuments(new DocumentCodec(), new Document('documentThat', 'forces creation of the Collection')) + assert collectionNameExists(getCollectionName()) + def operation = new RenameCollectionOperation(getNamespace(), + new MongoNamespace(getDatabaseName(), 'newCollection'), null) + + when: + execute(operation, async) + + then: + !collectionNameExists(getCollectionName()) + collectionNameExists('newCollection') + + where: + async << [true, false] + } + + def 'should throw if not drop and collection exists'() { + given: + getCollectionHelper().insertDocuments(new DocumentCodec(), new Document('documentThat', 'forces creation of the Collection')) + assert collectionNameExists(getCollectionName()) + def operation = new RenameCollectionOperation(getNamespace(), getNamespace(), null) + + when: + execute(operation, async) + + then: + thrown(MongoServerException) + collectionNameExists(getCollectionName()) + + where: + async << [true, false] + } + + @IgnoreIf({ !isDiscoverableReplicaSet() }) + def 'should throw on write concern error'() { + given: + getCollectionHelper().insertDocuments(new DocumentCodec(), new Document('documentThat', 'forces creation of the Collection')) + assert collectionNameExists(getCollectionName()) + def operation = new RenameCollectionOperation(getNamespace(), + new MongoNamespace(getDatabaseName(), 'newCollection'), new WriteConcern(5)) + + when: + async ? executeAsync(operation) : operation.execute(getBinding()) + + then: + def ex = thrown(MongoWriteConcernException) + ex.writeConcernError.code == 100 + ex.writeResult.wasAcknowledged() + + where: + async << [true, false] + } + + def collectionNameExists(String collectionName) { + def cursor = new ListCollectionsOperation(databaseName, new DocumentCodec()).execute(getBinding()) + if (!cursor.hasNext()) { + return false + } + cursor.next()*.get('name').contains(collectionName) + } + +} diff --git a/driver-core/src/test/functional/com/mongodb/internal/operation/TestOperationHelper.java b/driver-core/src/test/functional/com/mongodb/internal/operation/TestOperationHelper.java new file mode 100644 index 00000000000..824517e10db --- /dev/null +++ b/driver-core/src/test/functional/com/mongodb/internal/operation/TestOperationHelper.java @@ -0,0 +1,88 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.operation; + +import com.mongodb.MongoCommandException; +import com.mongodb.MongoCursorNotFoundException; +import com.mongodb.MongoNamespace; +import com.mongodb.MongoQueryException; +import com.mongodb.ReadPreference; +import com.mongodb.ServerCursor; +import com.mongodb.async.FutureResultCallback; +import com.mongodb.internal.connection.AsyncConnection; +import com.mongodb.internal.connection.Connection; +import com.mongodb.internal.validator.NoOpFieldNameValidator; +import org.bson.BsonDocument; +import org.bson.BsonInt64; +import org.bson.BsonString; +import org.bson.codecs.BsonDocumentCodec; + +import static com.mongodb.ClusterFixture.OPERATION_CONTEXT; + +final class TestOperationHelper { + + static BsonDocument getKeyPattern(final BsonDocument explainPlan) { + BsonDocument winningPlan = explainPlan.getDocument("queryPlanner").getDocument("winningPlan"); + if (winningPlan.containsKey("queryPlan")) { + BsonDocument queryPlan = winningPlan.getDocument("queryPlan"); + if (queryPlan.containsKey("inputStage")) { + return queryPlan.getDocument("inputStage").getDocument("keyPattern"); + } + } else if (winningPlan.containsKey("inputStage")) { + return winningPlan.getDocument("inputStage").getDocument("keyPattern"); + } else if (winningPlan.containsKey("shards")) { + // recurse on shards[0] to get its query plan + return getKeyPattern(new BsonDocument("queryPlanner", winningPlan.getArray("shards").get(0).asDocument())); + } + throw new IllegalArgumentException("Unexpected explain plain: " + explainPlan.toJson()); + } + + static void makeAdditionalGetMoreCall(final MongoNamespace namespace, final ServerCursor serverCursor, final Connection connection) { + makeAdditionalGetMoreCallHandleError(serverCursor, () -> + connection.command(namespace.getDatabaseName(), + new BsonDocument("getMore", new BsonInt64(serverCursor.getId())) + .append("collection", new BsonString(namespace.getCollectionName())), + NoOpFieldNameValidator.INSTANCE, ReadPreference.primary(), new BsonDocumentCodec(), OPERATION_CONTEXT)); + } + + static void makeAdditionalGetMoreCall(final MongoNamespace namespace, final ServerCursor serverCursor, + final AsyncConnection connection) { + FutureResultCallback callback = new FutureResultCallback<>(); + makeAdditionalGetMoreCallHandleError(serverCursor, () -> { + connection.commandAsync(namespace.getDatabaseName(), + new BsonDocument("getMore", new BsonInt64(serverCursor.getId())) + .append("collection", new BsonString(namespace.getCollectionName())), + NoOpFieldNameValidator.INSTANCE, ReadPreference.primary(), new BsonDocumentCodec(), OPERATION_CONTEXT, callback); + callback.get(); + }); + } + + static void makeAdditionalGetMoreCallHandleError(final ServerCursor serverCursor, final Runnable runnable) { + try { + runnable.run(); + } catch (MongoCommandException e) { + if (e.getErrorCode() == 43) { + throw new MongoCursorNotFoundException(serverCursor.getId(), e.getResponse(), serverCursor.getAddress()); + } else { + throw new MongoQueryException(e.getResponse(), e.getServerAddress()); + } + } + } + + private TestOperationHelper() { + } +} diff --git a/driver-core/src/test/functional/com/mongodb/test/AfterBeforeParameterResolver.java b/driver-core/src/test/functional/com/mongodb/test/AfterBeforeParameterResolver.java new file mode 100644 index 00000000000..bc78709e73b --- /dev/null +++ b/driver-core/src/test/functional/com/mongodb/test/AfterBeforeParameterResolver.java @@ -0,0 +1,159 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.test; + +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.extension.ExtensionContext; +import org.junit.jupiter.api.extension.ParameterContext; +import org.junit.jupiter.api.extension.ParameterResolutionException; +import org.junit.jupiter.api.extension.ParameterResolver; +import org.junit.jupiter.engine.execution.BeforeEachMethodAdapter; +import org.junit.jupiter.engine.extension.ExtensionRegistry; +import org.junit.platform.commons.util.AnnotationUtils; + +import java.lang.annotation.Annotation; +import java.lang.reflect.Parameter; +import java.util.Arrays; +import java.util.List; +import java.util.Optional; + +/** + * The {@code AfterBeforeParameterResolver} supports passing parameterized test values to the {@code BeforeEach} and/or + * the {@code AfterEach} methods. + * + *

Example usage: + *

+ * {@code + * @ExtendWith(AfterBeforeParameterResolver.class) + * class AfterBeforeParameterResolverTest { + * + * private TestEnum capturedParameter; + * @BeforeEach + * public void setup(TestEnum parameter) { + * capturedParameter = parameter; + * } + * + * @ParameterizedTest + * @EnumSource(TestEnum.class) + * public void test(TestEnum parameter) { + * assertThat(parameter).isEqualTo(capturedParameter); + * } + * + * enum TestEnum { + * PARAMETER_1, + * PARAMETER_2; + * } + * } + * } + * + * @see AfterBeforeParameterResolver + * @see junit-team/junit5#944 + */ +public class AfterBeforeParameterResolver implements BeforeEachMethodAdapter, ParameterResolver { + private ParameterResolver parameterisedTestParameterResolver = null; + + @Override + public void invokeBeforeEachMethod(final ExtensionContext context, final ExtensionRegistry registry) { + Optional resolverOptional = registry.getExtensions(ParameterResolver.class) + .stream() + .filter(parameterResolver -> parameterResolver.getClass().getName().contains("ParameterizedTestParameterResolver")) + .findFirst(); + if (!resolverOptional.isPresent()) { + throw new IllegalStateException("ParameterizedTestParameterResolver missed in the registry. " + + "Probably it's not a Parameterized Test"); + } else { + parameterisedTestParameterResolver = resolverOptional.get(); + } + } + + @Override + public boolean supportsParameter(final ParameterContext parameterContext, final ExtensionContext extensionContext) + throws ParameterResolutionException { + if (isExecutedOnAfterOrBeforeMethod(parameterContext)) { + ParameterContext pContext = getMappedContext(parameterContext, extensionContext); + return parameterisedTestParameterResolver.supportsParameter(pContext, extensionContext); + } + return false; + } + + @Override + public Object resolveParameter(final ParameterContext parameterContext, final ExtensionContext extensionContext) + throws ParameterResolutionException { + return parameterisedTestParameterResolver.resolveParameter(getMappedContext(parameterContext, extensionContext), extensionContext); + } + + private MappedParameterContext getMappedContext(final ParameterContext parameterContext, final ExtensionContext extensionContext) { + return new MappedParameterContext( + parameterContext.getIndex(), + extensionContext.getRequiredTestMethod().getParameters()[parameterContext.getIndex()], + parameterContext.getTarget()); + } + + + private boolean isExecutedOnAfterOrBeforeMethod(final ParameterContext parameterContext) { + return Arrays.stream(parameterContext.getDeclaringExecutable().getDeclaredAnnotations()) + .anyMatch(this::isAfterEachOrBeforeEachAnnotation); + } + + private boolean isAfterEachOrBeforeEachAnnotation(final Annotation annotation) { + return annotation.annotationType() == BeforeEach.class || annotation.annotationType() == AfterEach.class; + } + + + @SuppressWarnings("OptionalUsedAsFieldOrParameterType") + public static class MappedParameterContext implements ParameterContext { + private final int index; + private final Parameter parameter; + private final Optional target; + public MappedParameterContext(final int index, final Parameter parameter, final Optional target) { + this.index = index; + this.parameter = parameter; + this.target = target; + } + + @Override + public int getIndex() { + return index; + } + + @Override + public Parameter getParameter() { + return parameter; + } + + @Override + public Optional getTarget() { + return target; + } + + @Override + public boolean isAnnotated(final Class annotationType) { + return AnnotationUtils.isAnnotated(parameter, annotationType); + } + + @Override + public Optional findAnnotation(final Class annotationType) { + return AnnotationUtils.findAnnotation(parameter, annotationType); + } + + @Override + public List findRepeatableAnnotations(final Class annotationType) { + return AnnotationUtils.findRepeatableAnnotations(parameter, annotationType); + } + } +} diff --git a/driver-core/src/test/functional/com/mongodb/test/FlakyTest.java b/driver-core/src/test/functional/com/mongodb/test/FlakyTest.java new file mode 100644 index 00000000000..226b035151c --- /dev/null +++ b/driver-core/src/test/functional/com/mongodb/test/FlakyTest.java @@ -0,0 +1,93 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.test; + +import com.mongodb.test.extension.FlakyTestExtension; +import org.junit.jupiter.api.TestInfo; +import org.junit.jupiter.api.TestTemplate; +import org.junit.jupiter.api.extension.ExtendWith; +import org.junit.jupiter.api.parallel.Execution; + +import java.lang.annotation.ElementType; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.annotation.Target; + +import static org.junit.jupiter.api.parallel.ExecutionMode.SAME_THREAD; + +/** + * {@code @FlakyTest} is used to signal that the annotated method contains a flaky / racy test. + * + *

The test will be repeated up to a {@linkplain #maxAttempts maximum number of times} with a + * configurable {@linkplain #name display name}. Each invocation will be repeated if the previous test fails. + */ +@Target({ElementType.METHOD, ElementType.TYPE}) +@Retention(RetentionPolicy.RUNTIME) +@Execution(SAME_THREAD) // cannot be run in parallel +@ExtendWith(FlakyTestExtension.class) +@TestTemplate +public @interface FlakyTest { + + /** + * Placeholder for the {@linkplain TestInfo#getDisplayName display name} of + * a {@code @RepeatedTest} method: {displayName} + */ + String DISPLAY_NAME_PLACEHOLDER = "{displayName}"; + + /** + * Placeholder for the current repetition count of a {@code @FlakyTest} + * method: {index} + */ + String CURRENT_REPETITION_PLACEHOLDER = "{index}"; + + /** + * Placeholder for the total number of repetitions of a {@code @FlakyTest} + * method: {totalRepetitions} + */ + String TOTAL_REPETITIONS_PLACEHOLDER = "{totalRepetitions}"; + + /** + * Short display name pattern for a repeated test: {@value #SHORT_DISPLAY_NAME} + * + * @see #CURRENT_REPETITION_PLACEHOLDER + * @see #TOTAL_REPETITIONS_PLACEHOLDER + * @see #LONG_DISPLAY_NAME + */ + String SHORT_DISPLAY_NAME = "Attempt: " + CURRENT_REPETITION_PLACEHOLDER + " / " + TOTAL_REPETITIONS_PLACEHOLDER; + + /** + * Long display name pattern for a repeated test: {@value #LONG_DISPLAY_NAME} + * + * @see #DISPLAY_NAME_PLACEHOLDER + * @see #SHORT_DISPLAY_NAME + */ + String LONG_DISPLAY_NAME = DISPLAY_NAME_PLACEHOLDER + " " + SHORT_DISPLAY_NAME; + + /** + * max number of attempts + * + * @return N-times repeat test if it failed + */ + int maxAttempts() default 1; + + /** + * Display name for test method + * + * @return Short name + */ + String name() default LONG_DISPLAY_NAME; +} diff --git a/driver-core/src/test/functional/com/mongodb/test/extension/FlakyTestExtension.java b/driver-core/src/test/functional/com/mongodb/test/extension/FlakyTestExtension.java new file mode 100644 index 00000000000..55ddd7a001e --- /dev/null +++ b/driver-core/src/test/functional/com/mongodb/test/extension/FlakyTestExtension.java @@ -0,0 +1,198 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.test.extension; + +import com.mongodb.test.FlakyTest; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.AfterTestExecutionCallback; +import org.junit.jupiter.api.extension.BeforeTestExecutionCallback; +import org.junit.jupiter.api.extension.ConditionEvaluationResult; +import org.junit.jupiter.api.extension.ExecutionCondition; +import org.junit.jupiter.api.extension.Extension; +import org.junit.jupiter.api.extension.ExtensionContext; +import org.junit.jupiter.api.extension.TestExecutionExceptionHandler; +import org.junit.jupiter.api.extension.TestInstantiationException; +import org.junit.jupiter.api.extension.TestTemplateInvocationContext; +import org.junit.jupiter.api.extension.TestTemplateInvocationContextProvider; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.platform.commons.util.Preconditions; +import org.opentest4j.TestAbortedException; + +import java.lang.reflect.Method; +import java.util.Iterator; +import java.util.List; +import java.util.NoSuchElementException; +import java.util.Spliterator; +import java.util.stream.Stream; + +import static com.mongodb.test.FlakyTest.CURRENT_REPETITION_PLACEHOLDER; +import static com.mongodb.test.FlakyTest.DISPLAY_NAME_PLACEHOLDER; +import static com.mongodb.test.FlakyTest.TOTAL_REPETITIONS_PLACEHOLDER; +import static java.util.Collections.singletonList; +import static java.util.Spliterators.spliteratorUnknownSize; +import static java.util.stream.StreamSupport.stream; +import static org.junit.platform.commons.util.AnnotationUtils.findAnnotation; +import static org.junit.platform.commons.util.AnnotationUtils.isAnnotated; + + +/** + * A {@code TestTemplateInvocationContextProvider} that supports the {@link FlakyTest @FlakyTest} annotation. + */ +public class FlakyTestExtension implements TestTemplateInvocationContextProvider, + BeforeTestExecutionCallback, + AfterTestExecutionCallback, + TestExecutionExceptionHandler { + + private int maxAttempts = 0; + private FlakyTestDisplayFormatter formatter; + private Boolean testHasPassed; + private int currentAttempt = 0; + + + @Override + public void afterTestExecution(final ExtensionContext extensionContext) { + testHasPassed = extensionContext.getExecutionException().map(e -> e instanceof TestInstantiationException).orElse(true); + } + + @Override + public boolean supportsTestTemplate(final ExtensionContext context) { + return isAnnotated(context.getTestMethod(), FlakyTest.class); + } + + @Override + public Stream provideTestTemplateInvocationContexts(final ExtensionContext context) { + Method testMethod = context.getRequiredTestMethod(); + String displayName = context.getDisplayName(); + + if (isAnnotated(testMethod, Test.class)) { + throw new TestInstantiationException(String.format("Test %s also annotated with @Test", displayName)); + } else if (isAnnotated(testMethod, ParameterizedTest.class)) { + throw new TestInstantiationException(String.format("Test %s also annotated with @ParameterizedTest", displayName)); + } + + FlakyTest flakyTest = findAnnotation(testMethod, FlakyTest.class) + .orElseThrow(() -> + new TestInstantiationException("The extension should not be executed unless the test method is " + + "annotated with @FlakyTest.")); + + formatter = displayNameFormatter(flakyTest, testMethod, displayName); + + maxAttempts = flakyTest.maxAttempts(); + Preconditions.condition(maxAttempts > 0, "Total repeats must be higher than 0"); + + //Convert logic of repeated handler to spliterator + Spliterator spliterator = + spliteratorUnknownSize(new TestTemplateIterator(), Spliterator.NONNULL); + return stream(spliterator, false); + } + + private FlakyTestDisplayFormatter displayNameFormatter(final FlakyTest flakyTest, final Method method, + final String displayName) { + String pattern = Preconditions.notBlank(flakyTest.name().trim(), () -> String.format( + "Configuration error: @FlakyTest on method [%s] must be declared with a non-empty name.", method)); + return new FlakyTestDisplayFormatter(pattern, displayName); + } + + @Override + public void handleTestExecutionException(final ExtensionContext context, final Throwable throwable) throws Throwable { + if (currentAttempt < maxAttempts) { + // Mark failure as skipped / aborted so to pass CI + throw new TestAbortedException("Test failed on attempt: " + currentAttempt); + } + throw throwable; + } + + @Override + public void beforeTestExecution(final ExtensionContext context) { + currentAttempt++; + } + + /** + * TestTemplateIterator (Repeat test if it failed) + */ + class TestTemplateIterator implements Iterator { + private int currentIndex = 0; + + @Override + public boolean hasNext() { + if (currentIndex == 0) { + return true; + } + return !testHasPassed && currentIndex < maxAttempts; + } + + @Override + public TestTemplateInvocationContext next() { + if (hasNext()) { + currentIndex++; + return new RepeatInvocationContext(currentIndex, maxAttempts, formatter); + } + throw new NoSuchElementException(); + } + + @Override + public void remove() { + throw new UnsupportedOperationException(); + } + } + + static class RepeatInvocationContext implements TestTemplateInvocationContext { + private final int currentRepetition; + private final int totalTestRuns; + private final FlakyTestDisplayFormatter formatter; + + RepeatInvocationContext(final int currentRepetition, final int totalRepetitions, final FlakyTestDisplayFormatter formatter) { + this.currentRepetition = currentRepetition; + this.totalTestRuns = totalRepetitions; + this.formatter = formatter; + } + + @Override + public String getDisplayName(final int invocationIndex) { + return formatter.format(currentRepetition, totalTestRuns); + } + + @Override + public List getAdditionalExtensions() { + return singletonList((ExecutionCondition) context -> { + if (currentRepetition > totalTestRuns) { + return ConditionEvaluationResult.disabled("All attempts failed"); + } else { + return ConditionEvaluationResult.enabled("Test failed - retry"); + } + }); + } + } + + static class FlakyTestDisplayFormatter { + private final String pattern; + private final String displayName; + + FlakyTestDisplayFormatter(final String pattern, final String displayName) { + this.pattern = pattern; + this.displayName = displayName; + } + + String format(final int currentRepetition, final int totalRepetitions) { + return pattern + .replace(DISPLAY_NAME_PLACEHOLDER, displayName) + .replace(CURRENT_REPETITION_PLACEHOLDER, String.valueOf(currentRepetition)) + .replace(TOTAL_REPETITIONS_PLACEHOLDER, String.valueOf(totalRepetitions)); + } + + } + +} diff --git a/driver-core/src/test/resources/logback-test.xml b/driver-core/src/test/resources/logback-test.xml new file mode 100644 index 00000000000..dde5eeba5aa --- /dev/null +++ b/driver-core/src/test/resources/logback-test.xml @@ -0,0 +1,18 @@ + + + + + %d{HH:mm:ss.SSS} [%thread] %-5level %logger{36} - %msg%n + + + + + + + + + + + + + \ No newline at end of file diff --git a/driver-core/src/test/resources/specifications b/driver-core/src/test/resources/specifications new file mode 160000 index 00000000000..ace53b165f2 --- /dev/null +++ b/driver-core/src/test/resources/specifications @@ -0,0 +1 @@ +Subproject commit ace53b165f2ab83e8385de15fbda9346befc0ea7 diff --git a/driver-core/src/test/unit/com/mongodb/AbstractConnectionStringTest.java b/driver-core/src/test/unit/com/mongodb/AbstractConnectionStringTest.java new file mode 100644 index 00000000000..d511d2750eb --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/AbstractConnectionStringTest.java @@ -0,0 +1,337 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb; + +import com.mongodb.internal.connection.ServerMonitoringModeUtil; +import com.mongodb.lang.Nullable; +import junit.framework.TestCase; +import org.bson.BsonArray; +import org.bson.BsonDocument; +import org.bson.BsonValue; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.TimeUnit; + +import static java.util.Arrays.asList; + +@RunWith(Parameterized.class) +public abstract class AbstractConnectionStringTest extends TestCase { + private static final Set UNSUPPORTED_OPTIONS = + new HashSet<>(asList( + "tlsallowinvalidcertificates", + "tlsallowinvalidhostnames", + "tlscafile", + "tlscertificatekeyfile", + "tlscertificatekeyfilepassword", + "serverselectiontryonce")); + private final String filename; + private final String description; + private final String input; + private final BsonDocument definition; + + public AbstractConnectionStringTest(final String filename, final String description, final String input, + final BsonDocument definition) { + this.filename = filename; + this.description = description; + this.input = input; + this.definition = definition; + } + + protected String getFilename() { + return filename; + } + + protected BsonDocument getDefinition() { + return definition; + } + + protected String getDescription() { + return description; + } + + protected void testInvalidUris() { + Throwable expectedError = null; + + try { + new ConnectionString(input); + } catch (Throwable t) { + expectedError = t; + } + + assertNotNull(String.format("Connection string '%s' should have thrown an exception", input), expectedError); + assertTrue(String.format("Connection string '%s' should have thrown an IllegalArgumentException", input), + expectedError instanceof IllegalArgumentException); + } + + protected void testValidHostIdentifiers() { + ConnectionString connectionString = null; + try { + connectionString = new ConnectionString(input); + } catch (Throwable t) { + fail(String.format("Connection string '%s' should not have thrown an exception: %s", input, t)); + } + + assertExpectedHosts(connectionString.getHosts()); + } + + @SuppressWarnings("ConstantConditions") + protected void testValidOptions() { + ConnectionString connectionString = null; + + try { + connectionString = new ConnectionString(input); + } catch (Throwable t) { + fail(String.format("Connection string '%s' should not have thrown an exception: %s", input, t)); + } + + for (Map.Entry option : definition.getDocument("options").entrySet()) { + if (UNSUPPORTED_OPTIONS.contains(option.getKey().toLowerCase())) { + continue; + } + + if (option.getKey().equals("authmechanism")) { + String expected = option.getValue().asString().getValue(); + if (expected.equals("MONGODB-CR")) { + assertNotNull(connectionString.getCredential()); + assertNull(connectionString.getCredential().getAuthenticationMechanism()); + } else { + String actual = connectionString.getCredential().getAuthenticationMechanism().getMechanismName(); + assertEquals(expected, actual); + } + } else if (option.getKey().equalsIgnoreCase("retrywrites")) { + boolean expected = option.getValue().asBoolean().getValue(); + assertEquals(expected, connectionString.getRetryWritesValue().booleanValue()); + } else if (option.getKey().equalsIgnoreCase("replicaset")) { + String expected = option.getValue().asString().getValue(); + assertEquals(expected, connectionString.getRequiredReplicaSetName()); + } else if (option.getKey().equalsIgnoreCase("timeoutMS")) { + int expected = option.getValue().asInt32().getValue(); + assertEquals(expected, connectionString.getTimeout().intValue()); + } else if (option.getKey().equalsIgnoreCase("serverselectiontimeoutms")) { + int expected = option.getValue().asInt32().getValue(); + assertEquals(expected, connectionString.getServerSelectionTimeout().intValue()); + } else if (option.getKey().equalsIgnoreCase("sockettimeoutms")) { + int expected = option.getValue().asInt32().getValue(); + assertEquals(expected, connectionString.getSocketTimeout().intValue()); + } else if (option.getKey().equalsIgnoreCase("wtimeoutms")) { + int expected = option.getValue().asInt32().getValue(); + assertEquals(expected, connectionString.getWriteConcern().getWTimeout(TimeUnit.MILLISECONDS).intValue()); + } else if (option.getKey().equalsIgnoreCase("connecttimeoutms")) { + int expected = option.getValue().asInt32().getValue(); + assertEquals(expected, connectionString.getConnectTimeout().intValue()); + } else if (option.getKey().equalsIgnoreCase("heartbeatfrequencyms")) { + int expected = option.getValue().asInt32().getValue(); + assertEquals(expected, connectionString.getHeartbeatFrequency().intValue()); + } else if (option.getKey().equalsIgnoreCase("servermonitoringmode")) { + String expected = option.getValue().asString().getValue(); + assertEquals(expected, ServerMonitoringModeUtil.getValue(connectionString.getServerMonitoringMode())); + } else if (option.getKey().equalsIgnoreCase("localthresholdms")) { + int expected = option.getValue().asInt32().getValue(); + assertEquals(expected, connectionString.getLocalThreshold().intValue()); + } else if (option.getKey().equalsIgnoreCase("maxidletimems")) { + int expected = option.getValue().asInt32().getValue(); + assertEquals(expected, connectionString.getMaxConnectionIdleTime().intValue()); + } else if (option.getKey().equalsIgnoreCase("tls")) { + boolean expected = option.getValue().asBoolean().getValue(); + assertEquals(expected, connectionString.getSslEnabled().booleanValue()); + } else if (option.getKey().equalsIgnoreCase("tlsinsecure")) { + boolean expected = option.getValue().asBoolean().getValue(); + assertEquals(expected, connectionString.getSslInvalidHostnameAllowed().booleanValue()); + } else if (option.getKey().equalsIgnoreCase("readconcernlevel")) { + String expected = option.getValue().asString().getValue(); + assertEquals(expected, connectionString.getReadConcern().getLevel().getValue()); + } else if (option.getKey().equalsIgnoreCase("w")) { + if (option.getValue().isString()) { + String expected = option.getValue().asString().getValue(); + assertEquals(expected, connectionString.getWriteConcern().getWString()); + } else { + int expected = option.getValue().asNumber().intValue(); + assertEquals(expected, connectionString.getWriteConcern().getW()); + } + } else if (option.getKey().equalsIgnoreCase("journal")) { + boolean expected = option.getValue().asBoolean().getValue(); + assertEquals(expected, connectionString.getWriteConcern().getJournal().booleanValue()); + } else if (option.getKey().equalsIgnoreCase("readpreference")) { + String expected = option.getValue().asString().getValue(); + assertEquals(expected, connectionString.getReadPreference().getName()); + } else if (option.getKey().equalsIgnoreCase("readpreferencetags")) { + BsonArray expected = option.getValue().asArray(); + assertEquals(expected, connectionString.getReadPreference().toDocument().getArray("tags")); + } else if (option.getKey().equalsIgnoreCase("maxstalenessseconds")) { + int expected = option.getValue().asNumber().intValue(); + assertEquals(expected, connectionString.getReadPreference().toDocument().getNumber("maxStalenessSeconds").intValue()); + } else if (option.getKey().equals("compressors")) { + BsonArray expectedCompressorList = option.getValue().asArray(); + assertEquals(expectedCompressorList.size(), connectionString.getCompressorList().size()); + for (int i = 0; i < expectedCompressorList.size(); i++) { + String expected = expectedCompressorList.get(i).asString().getValue(); + assertEquals(expected, connectionString.getCompressorList().get(i).getName()); + } + } else if (option.getKey().equalsIgnoreCase("zlibcompressionlevel")) { + int expected = option.getValue().asNumber().intValue(); + assertEquals(expected, connectionString.getCompressorList().get(0).getProperty("level", 0).intValue()); + } else if (option.getKey().equalsIgnoreCase("appname")) { + String expected = option.getValue().asString().getValue(); + assertEquals(expected, connectionString.getApplicationName()); + } else if (option.getKey().equalsIgnoreCase("authmechanism")) { + String expected = option.getValue().asString().getValue(); + assertEquals(expected, connectionString.getCredential().getMechanism()); + } else if (option.getKey().equalsIgnoreCase("authsource")) { + String expected = option.getValue().asString().getValue(); + assertEquals(expected, connectionString.getCredential().getSource()); + } else if (option.getKey().equalsIgnoreCase("authmechanismproperties")) { + BsonDocument properties = option.getValue().asDocument(); + for (String cur : properties.keySet()) { + if (properties.get(cur).isString()) { + String expected = properties.getString(cur).getValue(); + assertEquals(expected, connectionString.getCredential().getMechanismProperty(cur, null)); + } else { + boolean expected = properties.getBoolean(cur).getValue(); + assertEquals(expected, connectionString.getCredential().getMechanismProperty(cur, (Boolean) null).booleanValue()); + } + } + } else if (option.getKey().equalsIgnoreCase("loadbalanced")) { + Boolean expected = option.getValue().asBoolean().getValue(); + assertEquals(expected, connectionString.isLoadBalanced()); + } else if (option.getKey().equalsIgnoreCase("directconnection")) { + Boolean expected = option.getValue().asBoolean().getValue(); + assertEquals(expected, connectionString.isDirectConnection()); + } else if (option.getKey().equalsIgnoreCase("maxpoolsize")) { + Integer expected = option.getValue().asNumber().intValue(); + assertEquals(expected, connectionString.getMaxConnectionPoolSize()); + } else if (option.getKey().equalsIgnoreCase("minpoolsize")) { + Integer expected = option.getValue().asNumber().intValue(); + assertEquals(expected, connectionString.getMinConnectionPoolSize()); + } else if (option.getKey().equalsIgnoreCase("maxconnecting")) { + Integer expected = option.getValue().asNumber().intValue(); + assertEquals(expected, connectionString.getMaxConnecting()); + } else if (option.getKey().equalsIgnoreCase("srvmaxhosts")) { + Integer expected = option.getValue().asNumber().intValue(); + assertEquals(expected, connectionString.getSrvMaxHosts()); + } else if (option.getKey().equalsIgnoreCase("srvservicename")) { + String expected = option.getValue().asString().getValue(); + assertEquals(expected, connectionString.getSrvServiceName()); + } else { + fail(String.format("Unsupported option '%s' in '%s'", option.getKey(), input)); + } + } + } + + protected void testValidAuth() { + ConnectionString connectionString = null; + + try { + connectionString = new ConnectionString(input); + } catch (Throwable t) { + if (description.contains("without password")) { + // We don't allow null passwords without setting the authentication mechanism. + return; + } else { + fail(String.format("Connection string '%s' should not have throw an exception: %s", input, t)); + } + } + + assertString("auth.db", getAuthDB(connectionString)); + assertString("auth.username", connectionString.getUsername()); + + // Passwords for certain auth mechanisms are ignored. + String password = null; + if (connectionString.getPassword() != null) { + password = new String(connectionString.getPassword()); + } + if (connectionString.getCredential() != null) { + AuthenticationMechanism mechanism = connectionString.getCredential().getAuthenticationMechanism(); + if (mechanism == null) { + assertString("auth.password", password); + } else { + switch (mechanism) { + case PLAIN: + case SCRAM_SHA_1: + case SCRAM_SHA_256: + assertString("auth.password", password); + break; + default: + // Ignore the password field. + } + } + + } else { + assertString("auth.password", password); + } + } + + private void assertString(final String key, @Nullable final String actual) { + BsonValue expected = definition; + if (key.contains(".")) { + for (String subKey : key.split("\\.")) { + expected = expected.asDocument().get(subKey); + } + } else { + expected = expected.asDocument().get(key); + } + + if (expected.isNull()) { + assertNull(String.format("%s should be null", key), actual); + } else if (expected.isString()) { + String expectedString = expected.asString().getValue(); + assertEquals(String.format("%s should be %s but was %s", key, actual, expectedString), actual, expectedString); + } else { + fail(String.format("%s should be %s but was %s", key, actual, expected)); + } + } + + private void assertExpectedHosts(final List hosts) { + List cleanedHosts = new ArrayList<>(); + for (String host : hosts) { + if (host.startsWith("[")) { + int idx = host.indexOf("]"); + cleanedHosts.add(host.substring(1, idx) + host.substring(idx + 1)); + } else { + cleanedHosts.add(host); + } + } + + + List expectedHosts = new ArrayList<>(); + for (BsonValue rawHost : definition.getArray("hosts")) { + BsonDocument hostDoc = rawHost.asDocument(); + String host = hostDoc.getString("host").getValue(); + String port = ""; + if (!hostDoc.get("port").isNull()) { + port = ":" + hostDoc.getInt32("port").getValue(); + } + expectedHosts.add(host + port); + } + Collections.sort(expectedHosts); + assertEquals(expectedHosts, cleanedHosts); + } + + @SuppressWarnings("ConstantConditions") + private String getAuthDB(final ConnectionString connectionString) { + if (connectionString.getCollection() != null) { + return connectionString.getDatabase() + "." + connectionString.getCollection(); + } + return connectionString.getDatabase(); + } +} diff --git a/driver-core/src/test/unit/com/mongodb/AuthConnectionStringTest.java b/driver-core/src/test/unit/com/mongodb/AuthConnectionStringTest.java new file mode 100644 index 00000000000..f214667e510 --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/AuthConnectionStringTest.java @@ -0,0 +1,197 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb; + +import com.mongodb.internal.connection.OidcAuthenticator; +import com.mongodb.lang.Nullable; +import junit.framework.TestCase; +import org.bson.BsonArray; +import org.bson.BsonDocument; +import org.bson.BsonNull; +import org.bson.BsonString; +import org.bson.BsonValue; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import util.JsonPoweredTestHelper; + +import java.util.Collection; + +import static com.mongodb.AuthenticationMechanism.MONGODB_OIDC; +import static com.mongodb.MongoCredential.OIDC_CALLBACK_KEY; +import static org.junit.Assume.assumeFalse; + +// See https://github.com/mongodb/specifications/tree/master/source/auth/legacy/tests +@RunWith(Parameterized.class) +public class AuthConnectionStringTest extends TestCase { + private final String description; + private final String input; + private final BsonDocument definition; + + public AuthConnectionStringTest(final String filename, final String description, final String input, + final BsonDocument definition) { + this.description = description; + this.input = input; + this.definition = definition; + } + + @Test + public void shouldPassAllOutcomes() { + // No CANONICALIZE_HOST_NAME support https://jira.mongodb.org/browse/JAVA-4278 + assumeFalse(description.equals("must raise an error when the hostname canonicalization is invalid")); + assumeFalse(description.equals("should accept forwardAndReverse hostname canonicalization (GSSAPI)")); + assumeFalse(description.equals("should accept generic mechanism property (GSSAPI)")); + assumeFalse(description.equals("should accept no hostname canonicalization (GSSAPI)")); + + if (definition.getBoolean("valid").getValue()) { + testValidUris(); + } else { + testInvalidUris(); + } + } + + @Parameterized.Parameters(name = "{1}") + public static Collection data() { + return JsonPoweredTestHelper.getTestData("auth/tests/legacy"); + } + + private void testInvalidUris() { + Throwable expectedError = null; + try { + getMongoCredential(); + } catch (Throwable t) { + expectedError = t; + } + assertTrue(String.format("Connection string '%s' should have thrown an exception. Instead, %s", input, expectedError), + expectedError instanceof IllegalArgumentException); + } + + private void testValidUris() { + MongoCredential credential = getMongoCredential(); + + if (credential != null) { + assertString("credential.source", credential.getSource()); + assertString("credential.username", credential.getUserName()); + assertMechanismProperties(credential); + + // Passwords for certain auth mechanisms are ignored. + String password = credential.getPassword() != null ? new String(credential.getPassword()) : null; + if (password != null) { + assertString("credential.password", password); + } + + assertMechanism("credential.mechanism", credential.getMechanism()); + } else { + if (!getExpectedValue("credential").equals(BsonNull.VALUE)) { + fail(String.format("Connection string '%s' should produce credentials", input)); + } + } + } + + @Nullable + private MongoCredential getMongoCredential() { + ConnectionString connectionString; + connectionString = new ConnectionString(input); + MongoCredential credential = connectionString.getCredential(); + if (credential != null) { + BsonArray callbacks = (BsonArray) getExpectedValue("callback"); + if (callbacks != null) { + for (BsonValue v : callbacks) { + String string = ((BsonString) v).getValue(); + if ("oidcRequest".equals(string)) { + credential = credential.withMechanismProperty( + OIDC_CALLBACK_KEY, + (MongoCredential.OidcCallback) (context) -> null); + } else { + fail("Unsupported callback: " + string); + } + } + } + if (MONGODB_OIDC.getMechanismName().equals(credential.getMechanism())) { + OidcAuthenticator.OidcValidator.validateBeforeUse(credential); + } + } + return credential; + } + + private void assertString(final String key, final String actual) { + BsonValue expected = getExpectedValue(key); + + if (expected.isNull()) { + assertNull(String.format("%s should be null", key), actual); + } else if (expected.isString()) { + String expectedString = expected.asString().getValue(); + assertEquals(String.format("%s should be %s but was %s", key, actual, expectedString), actual, expectedString); + } else { + fail(String.format("%s should be %s but was %s", key, actual, expected)); + } + } + + private void assertMechanism(final String key, final String actual) { + BsonValue expected = getExpectedValue(key); + + // MONGODB-CR was removed from the AuthenticationMechanism enum for the 4.0 release, so null will be assigned. + if (expected.isString() && expected.asString().getValue().equals("MONGODB-CR")) { + assertNull(String.format("%s should be null when the expected mechanism is MONGODB-CR", key), actual); + } else { + assertString(key, actual); + } + } + + private void assertMechanismProperties(final MongoCredential credential) { + BsonValue expected = getExpectedValue("credential.mechanism_properties"); + if (expected.isNull()) { + return; + } + BsonDocument document = expected.asDocument(); + for (String key : document.keySet()) { + Object actualMechanismProperty = credential.getMechanismProperty(key, null); + if (document.get(key).isString()) { + String expectedValue = document.getString(key).getValue(); + // If the mechanism is "GSSAPI", the default SERVICE_NAME, which is stated as "mongodb" in the specification, + // is set to null in the driver. + if (credential.getMechanism().equals("GSSAPI") && key.equals("SERVICE_NAME") && expectedValue.equals("mongodb")) { + assertNull(actualMechanismProperty); + } else { + assertEquals(expectedValue, actualMechanismProperty); + } + } else if ((document.get(key).isBoolean())) { + boolean expectedValue = document.getBoolean(key).getValue(); + if (OIDC_CALLBACK_KEY.equals(key)) { + assertTrue(actualMechanismProperty instanceof MongoCredential.OidcCallback); + return; + } + assertNotNull(actualMechanismProperty); + assertEquals(expectedValue, actualMechanismProperty); + } else { + fail("unsupported property type"); + } + } + } + + private BsonValue getExpectedValue(final String key) { + BsonValue expected = definition; + if (key.contains(".")) { + for (String subKey : key.split("\\.")) { + expected = expected.asDocument().get(subKey); + } + } else { + expected = expected.asDocument().get(key); + } + return expected; + } +} diff --git a/driver-core/src/test/unit/com/mongodb/BasicDBObjectTest.java b/driver-core/src/test/unit/com/mongodb/BasicDBObjectTest.java new file mode 100644 index 00000000000..50f5ebdab75 --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/BasicDBObjectTest.java @@ -0,0 +1,261 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb; + +import org.bson.BSONObject; +import org.bson.BasicBSONObject; +import org.bson.BsonBinary; +import org.bson.BsonDocument; +import org.bson.codecs.Codec; +import org.bson.json.JsonMode; +import org.bson.json.JsonWriterSettings; +import org.bson.types.BasicBSONList; +import org.bson.types.ObjectId; +import org.junit.Test; + +import java.util.Date; +import java.util.HashMap; +import java.util.Map; +import java.util.TreeMap; +import java.util.UUID; + +import static java.util.Arrays.asList; +import static org.hamcrest.CoreMatchers.not; +import static org.hamcrest.MatcherAssert.assertThat; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + +@SuppressWarnings("MismatchedQueryAndUpdateOfCollection") +public class BasicDBObjectTest { + + private static final Codec DECODER = DBObjectCodec.getDefaultRegistry().get(BasicDBObject.class); + + @Test + public void testParse() { + BasicDBObject document = BasicDBObject.parse("{ 'int' : 1, 'string' : 'abc' }"); + assertEquals(new BasicDBObject("int", 1).append("string", "abc"), document); + + document = BasicDBObject.parse("{ 'int' : 1, 'string' : 'abc' }", DECODER); + assertEquals(new BasicDBObject("int", 1).append("string", "abc"), document); + + document = BasicDBObject.parse("{_id : ObjectId('5524094c2cf8fb61dede210c')}"); + assertEquals(new BasicDBObject("_id", new ObjectId("5524094c2cf8fb61dede210c")), document); + + document = BasicDBObject.parse("{dbRef : {$ref: 'collection', $id: {$oid: '01234567890123456789abcd'}, $db: 'db'}}"); + assertEquals(new BasicDBObject("dbRef", new DBRef("db", "collection", new ObjectId("01234567890123456789abcd"))), document); + } + + @Test + public void testToJson() { + BasicDBObject document = BasicDBObject.parse("{ 'int' : 1, 'string' : 'abc', '_id' : { '$oid' : '000000000000000000000000' }, " + + "'dbRef' : { $ref: 'collection', $id: { $oid: '01234567890123456789abcd' }, $db: 'db' } }"); + + assertEquals("{\"int\": 1, \"string\": \"abc\", \"_id\": {\"$oid\": \"000000000000000000000000\"}, " + + "\"dbRef\": {\"$ref\": \"collection\", \"$id\": {\"$oid\": \"01234567890123456789abcd\"}, \"$db\": \"db\"}}", + document.toJson()); + assertEquals("{\"int\": 1, \"string\": \"abc\", \"_id\": ObjectId(\"000000000000000000000000\"), " + + "\"dbRef\": {\"$ref\": \"collection\", \"$id\": ObjectId(\"01234567890123456789abcd\"), \"$db\": \"db\"}}", + document.toJson(JsonWriterSettings.builder().outputMode(JsonMode.SHELL).build())); + assertEquals("{\"int\": 1, \"string\": \"abc\", \"_id\": {\"$oid\": \"000000000000000000000000\"}, " + + "\"dbRef\": {\"$ref\": \"collection\", \"$id\": {\"$oid\": \"01234567890123456789abcd\"}, \"$db\": \"db\"}}", + document.toJson(DECODER)); + assertEquals("{\"int\": 1, \"string\": \"abc\", \"_id\": ObjectId(\"000000000000000000000000\"), " + + "\"dbRef\": {\"$ref\": \"collection\", \"$id\": ObjectId(\"01234567890123456789abcd\"), \"$db\": \"db\"}}", + document.toJson(JsonWriterSettings.builder().outputMode(JsonMode.SHELL).build(), DECODER)); + } + + @Test + public void toJsonShouldRenderUuidAsStandard() { + UUID uuid = UUID.randomUUID(); + BasicDBObject doc = new BasicDBObject("_id", uuid); + + String json = doc.toJson(); + assertEquals(new BsonDocument("_id", new BsonBinary(uuid)), BsonDocument.parse(json)); + } + + @Test + public void toStringShouldRenderUuidAsStandard() { + UUID uuid = UUID.randomUUID(); + BasicDBObject doc = new BasicDBObject("_id", uuid); + + String json = doc.toString(); + assertEquals(new BsonDocument("_id", new BsonBinary(uuid)), BsonDocument.parse(json)); + } + + @Test + public void testGetDate() { + Date date = new Date(); + BasicDBObject doc = new BasicDBObject("foo", date); + assertEquals(doc.getDate("foo"), date); + } + + @Test + public void testGetDateWithDefault() { + Date date = new Date(); + BasicDBObject doc = new BasicDBObject("foo", date); + assertEquals(doc.getDate("foo", new Date()), date); + assertEquals(doc.getDate("bar", date), date); + } + + @Test + public void testGetObjectId() { + ObjectId objId = ObjectId.get(); + BasicDBObject doc = new BasicDBObject("foo", objId); + assertEquals(doc.getObjectId("foo"), objId); + } + + @Test + public void testGetObjectIdWithDefault() { + ObjectId objId = ObjectId.get(); + BasicDBObject doc = new BasicDBObject("foo", objId); + assertEquals(doc.getObjectId("foo", ObjectId.get()), objId); + assertEquals(doc.getObjectId("bar", objId), objId); + } + + @Test + public void testGetLongWithDefault() { + final long test = 100; + BasicDBObject doc = new BasicDBObject("foo", test); + assertEquals(test, doc.getLong("foo", 0L)); + assertEquals(0L, doc.getLong("bar", 0L)); + } + + @Test + public void testGetDoubleWithDefault() { + BasicDBObject doc = new BasicDBObject("foo", Double.MAX_VALUE); + assertEquals(Double.MAX_VALUE, doc.getDouble("foo", 0), 0.0); + assertEquals(Double.MIN_VALUE, doc.getDouble("bar", Double.MIN_VALUE), 0.0); + } + + @Test + public void testGetStringWithDefault() { + BasicDBObject doc = new BasicDBObject("foo", "badmf"); + assertEquals("badmf", doc.getString("foo", "ID")); + assertEquals("DEFAULT", doc.getString("bar", "DEFAULT")); + } + + @Test + public void testBuilderIsEmpty() { + BasicDBObjectBuilder b = BasicDBObjectBuilder.start(); + assertTrue(b.isEmpty()); + b.append("a", 1); + assertFalse(b.isEmpty()); + assertEquals(b.get(), new BasicDBObject("a", 1)); + } + + @Test + public void testBuilderNested() { + BasicDBObjectBuilder b = BasicDBObjectBuilder.start(); + b.add("a", 1); + b.push("b").append("c", 2).pop(); + DBObject a = b.get(); + assertEquals(a, new BasicDBObject("a", 1).append("b", new BasicDBObject("c", 2))); + } + + @Test + public void testDown1() { + BasicDBObjectBuilder b = BasicDBObjectBuilder.start(); + b.append("x", 1); + b.push("y"); + b.append("a", 2); + b.pop(); + b.push("z"); + b.append("b", 3); + + assertEquals(b.get(), + new BasicDBObject("x", 1).append("y", new BasicDBObject("a", 2)).append("z", new BasicDBObject("b", 3))); + } + + @Test + public void testEqualsAndHashCode() { + assertEquality(new BasicDBObject(), new BasicDBObject()); + + assertEquality(new BasicDBObject("x", 1), new BasicDBObject("x", 1)); + assertEquality(new BasicDBObject("x", 1), new BasicBSONObject("x", 1)); + + assertInequality(new BasicDBObject("x", 1), new BasicDBObject("x", 2)); + assertInequality(new BasicDBObject("x", 1), new BasicBSONObject("x", 2)); + + assertInequality(new BasicDBObject("x", 1), new BasicDBObject("y", 1)); + assertInequality(new BasicDBObject("x", 1), new BasicBSONObject("y", 1)); + + assertEquality(new BasicDBObject("x", asList(1, 2, 3)), new BasicDBObject("x", new int[]{1, 2, 3})); + assertEquality(new BasicDBObject("x", asList(1, 2, 3)), new BasicBSONObject("x", asList(1, 2, 3))); + + BasicDBList list = new BasicDBList(); + list.put(0, 1); + list.put(1, 2); + list.put(2, 3); + + assertEquality(new BasicDBObject("x", asList(1, 2, 3)), new BasicDBObject("x", list)); + assertEquality(new BasicDBObject("x", asList(1, 2, 3)), new BasicBSONObject("x", list)); + + + assertEquality(new BasicDBObject("x", 1).append("y", 2), new BasicDBObject("y", 2).append("x", 1)); + assertEquality(new BasicDBObject("x", 1).append("y", 2), new BasicBSONObject("y", 2).append("x", 1)); + + assertEquality(new BasicDBObject("a", new BasicDBObject("y", 2).append("x", 1)), + new BasicDBObject("a", new BasicDBObject("x", 1).append("y", 2))); + assertEquality(new BasicDBObject("a", new BasicDBObject("y", 2).append("x", 1)), + new BasicBSONObject("a", new BasicBSONObject("x", 1).append("y", 2))); + + assertEquality(new BasicDBObject("a", asList(new BasicDBObject("y", 2).append("x", 1))), + new BasicDBObject("a", asList(new BasicDBObject("x", 1).append("y", 2)))); + assertEquality(new BasicDBObject("a", asList(new BasicDBObject("y", 2).append("x", 1))), + new BasicBSONObject("a", asList(new BasicBSONObject("x", 1).append("y", 2)))); + + assertEquality(new BasicDBObject("a", new BasicDBList().put(1, new BasicDBObject("y", 2).append("x", 1))), + new BasicDBObject("a", new BasicDBList().put(1, new BasicDBObject("x", 1).append("y", 2)))); + assertEquality(new BasicDBObject("a", new BasicDBList().put(1, new BasicDBObject("y", 2).append("x", 1))), + new BasicBSONObject("a", new BasicBSONList().put(1, new BasicBSONObject("x", 1).append("y", 2)))); + + Map first = new HashMap<>(); + first.put("1", new BasicDBObject("y", 2).append("x", 1)); + first.put("2", new BasicDBObject("a", 2).append("b", 1)); + Map second = new TreeMap<>(); + second.put("2", new BasicDBObject("b", 1).append("a", 2)); + second.put("1", new BasicDBObject("x", 1).append("y", 2)); + Map third = new TreeMap<>(); + third.put("2", new BasicBSONObject("a", 2).append("b", 1)); + third.put("1", new BasicBSONObject("x", 1).append("y", 2)); + + assertEquality(new BasicDBObject("a", first), new BasicDBObject("a", second)); + assertEquality(new BasicDBObject("a", first), new BasicBSONObject("a", third)); + } + + @Test + public void testUuid() { + UUID uuid = UUID.randomUUID(); + BasicDBObject dbo1 = new BasicDBObject("_id", uuid); + BasicDBObject dbo2 = new BasicDBObject("_id", uuid); + + assertEquality(dbo1, dbo2); + } + + void assertEquality(final BSONObject x, final BSONObject y) { + assertEquals(x, y); + assertEquals(y, x); + assertEquals(x.hashCode(), y.hashCode()); + } + + void assertInequality(final BSONObject x, final BSONObject y) { + assertThat(x, not(y)); + assertThat(y, not(x)); + assertThat(x.hashCode(), not(y.hashCode())); + } +} diff --git a/driver-core/src/test/unit/com/mongodb/ClientEncryptionSettingsSpecification.groovy b/driver-core/src/test/unit/com/mongodb/ClientEncryptionSettingsSpecification.groovy new file mode 100644 index 00000000000..43deb3bd42c --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/ClientEncryptionSettingsSpecification.groovy @@ -0,0 +1,86 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb + +import spock.lang.Specification + +import javax.net.ssl.SSLContext +import java.util.concurrent.TimeUnit +import java.util.function.Supplier + +class ClientEncryptionSettingsSpecification extends Specification { + + def 'should have return the configured values defaults'() { + given: + def mongoClientSettings = MongoClientSettings.builder().build() + def keyVaultNamespace = "keyVaultNamespace" + def kmsProvider = ["provider": ["test" : "test"]] + def kmsProviderSupplier = ["provider": { ["test" : "test"] } as Supplier] + def kmsProviderSslContextMap = ["provider": SSLContext.getDefault()] + + when: + def options = ClientEncryptionSettings.builder() + .keyVaultMongoClientSettings(mongoClientSettings) + .keyVaultNamespace(keyVaultNamespace) + .kmsProviders(kmsProvider) + .build() + + then: + options.getKeyVaultMongoClientSettings() == mongoClientSettings + options.getKeyVaultNamespace() == keyVaultNamespace + options.getKmsProviders() == kmsProvider + options.getKmsProviderPropertySuppliers() == [:] + options.getKmsProviderSslContextMap() == [:] + options.getTimeout(TimeUnit.MILLISECONDS) == null + + when: + options = ClientEncryptionSettings.builder() + .keyVaultMongoClientSettings(mongoClientSettings) + .keyVaultNamespace(keyVaultNamespace) + .kmsProviders(kmsProvider) + .kmsProviderPropertySuppliers(kmsProviderSupplier) + .kmsProviderSslContextMap(kmsProviderSslContextMap) + .timeout(1_000, TimeUnit.MILLISECONDS) + .build() + + then: + options.getKeyVaultMongoClientSettings() == mongoClientSettings + options.getKeyVaultNamespace() == keyVaultNamespace + options.getKmsProviders() == kmsProvider + options.getKmsProviderPropertySuppliers() == kmsProviderSupplier + options.getKmsProviderSslContextMap() == kmsProviderSslContextMap + options.getTimeout(TimeUnit.MILLISECONDS) == 1_000 + } + + def 'should throw an exception if the defaultTimeout is set and negative'() { + given: + def builder = ClientEncryptionSettings.builder() + + when: + builder.timeout(500, TimeUnit.NANOSECONDS) + + then: + thrown(IllegalArgumentException) + + when: + builder.timeout(-1, TimeUnit.SECONDS) + + then: + thrown(IllegalArgumentException) + } + +} diff --git a/driver-core/src/test/unit/com/mongodb/ClientSessionOptionsSpecification.groovy b/driver-core/src/test/unit/com/mongodb/ClientSessionOptionsSpecification.groovy new file mode 100644 index 00000000000..98bf163f9e3 --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/ClientSessionOptionsSpecification.groovy @@ -0,0 +1,81 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb + +import spock.lang.Specification + +import java.util.concurrent.TimeUnit + +class ClientSessionOptionsSpecification extends Specification { + + def 'should have correct defaults'() { + when: + def options = ClientSessionOptions.builder().build() + + then: + options.isCausallyConsistent() == null + options.defaultTransactionOptions == TransactionOptions.builder().build() + } + + def 'should apply options set in builder'() { + when: + def options = ClientSessionOptions.builder() + .causallyConsistent(causallyConsistent) + .defaultTransactionOptions(transactionOptions) + .build() + + then: + options.isCausallyConsistent() == causallyConsistent + options.defaultTransactionOptions == transactionOptions + + where: + causallyConsistent << [true, false] + transactionOptions << [TransactionOptions.builder().build(), TransactionOptions.builder().readConcern(ReadConcern.LOCAL).build()] + } + + def 'should throw an exception if the defaultTimeout is set and negative'() { + given: + def builder = ClientSessionOptions.builder() + + when: + builder.defaultTimeout(500, TimeUnit.NANOSECONDS) + + then: + thrown(IllegalArgumentException) + + when: + builder.defaultTimeout(-1, TimeUnit.SECONDS) + + then: + thrown(IllegalArgumentException) + } + + def 'should apply options to builder'() { + expect: + ClientSessionOptions.builder(baseOptions).build() == baseOptions + + where: + baseOptions << [ClientSessionOptions.builder().build(), + ClientSessionOptions.builder() + .causallyConsistent(true) + .defaultTransactionOptions(TransactionOptions.builder() + .writeConcern(WriteConcern.MAJORITY) + .readConcern(ReadConcern + .MAJORITY).build()) + .build()] + } +} diff --git a/driver-core/src/test/unit/com/mongodb/ConnectionStringSpecification.groovy b/driver-core/src/test/unit/com/mongodb/ConnectionStringSpecification.groovy new file mode 100644 index 00000000000..72fdf108698 --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/ConnectionStringSpecification.groovy @@ -0,0 +1,848 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb + +import org.bson.UuidRepresentation +import spock.lang.Specification +import spock.lang.Unroll + +import static com.mongodb.MongoCompressor.LEVEL +import static com.mongodb.MongoCompressor.createZlibCompressor +import static com.mongodb.MongoCompressor.createZstdCompressor +import static com.mongodb.MongoCredential.createCredential +import static com.mongodb.MongoCredential.createGSSAPICredential +import static com.mongodb.MongoCredential.createMongoX509Credential +import static com.mongodb.MongoCredential.createPlainCredential +import static com.mongodb.MongoCredential.createScramSha1Credential +import static com.mongodb.MongoCredential.createScramSha256Credential +import static com.mongodb.ReadPreference.primary +import static com.mongodb.ReadPreference.secondary +import static com.mongodb.ReadPreference.secondaryPreferred +import static java.util.Arrays.asList +import static java.util.concurrent.TimeUnit.MILLISECONDS + +/** + * Update {@link ConnectionStringUnitTest} instead. + */ +class ConnectionStringSpecification extends Specification { + static final LONG_STRING = new String((1..256).collect { (byte) 1 } as byte[]) + + @Unroll + def 'should parse #connectionString into correct components'() { + expect: + connectionString.getHosts().size() == num + connectionString.getHosts() == hosts + connectionString.getDatabase() == database + connectionString.getCollection() == collection + connectionString.getUsername() == username + connectionString.getPassword() == password + + where: + connectionString | num | hosts | database | collection | username | password + new ConnectionString('mongodb://db.example.com') | 1 | ['db.example.com'] | null | null | null | null + new ConnectionString('mongodb://10.0.0.1') | 1 | ['10.0.0.1'] | null | null | null | null + new ConnectionString('mongodb://[::1]') | 1 | ['[::1]'] | null | null | null | null + new ConnectionString('mongodb://%2Ftmp%2Fmongo' + + 'db-27017.sock') | 1 | ['/tmp/mongodb' + + '-27017.sock'] | null | null | null | null + new ConnectionString('mongodb://foo/bar') | 1 | ['foo'] | 'bar' | null | null | null + new ConnectionString('mongodb://10.0.0.1/bar') | 1 | ['10.0.0.1'] | 'bar' | null | null | null + new ConnectionString('mongodb://[::1]/bar') | 1 | ['[::1]'] | 'bar' | null | null | null + new ConnectionString('mongodb://%2Ftmp%2Fmongo' + + 'db-27017.sock/bar') | 1 | ['/tmp/mongodb' + + '-27017.sock'] | 'bar' | null | null | null + new ConnectionString('mongodb://localhost/' + + 'test.my.coll') | 1 | ['localhost'] | 'test' | 'my.coll' | null | null + new ConnectionString('mongodb://foo/bar.goo') | 1 | ['foo'] | 'bar' | 'goo' | null | null + new ConnectionString('mongodb://foo/s,db') | 1 | ['foo'] | 's,db'| null | null | null + new ConnectionString('mongodb://foo/s%2Cdb') | 1 | ['foo'] | 's,db'| null | null | null + new ConnectionString('mongodb://user:pass@' + + 'host/bar') | 1 | ['host'] | 'bar' | null | 'user' | 'pass' as char[] + new ConnectionString('mongodb://user:pass@' + + 'host:27011/bar') | 1 | ['host:27011'] | 'bar' | null | 'user' | 'pass' as char[] + new ConnectionString('mongodb://user:pass@' + + '10.0.0.1:27011/bar') | 1 | ['10.0.0.1:27011'] | 'bar' | null | 'user' | 'pass' as char[] + new ConnectionString('mongodb://user:pass@' + + '[::1]:27011/bar') | 1 | ['[::1]:27011'] | 'bar' | null | 'user' | 'pass' as char[] + new ConnectionString('mongodb://user:pass@' + + 'host:7,' + + 'host2:8,' + + 'host3:9/bar') | 3 | ['host2:8', + 'host3:9', + 'host:7'] | 'bar' | null | 'user' | 'pass' as char[] + new ConnectionString('mongodb://user:pass@' + + '10.0.0.1:7,' + + '[::1]:8,' + + 'host3:9/bar') | 3 | ['10.0.0.1:7', + '[::1]:8', + 'host3:9'] | 'bar' | null | 'user' | 'pass' as char[] + new ConnectionString('mongodb://user:pass@' + + '%2Ftmp%2Fmongodb-27017.sock,' + + '%2Ftmp%2Fmongodb-27018.sock,' + + '%2Ftmp%2Fmongodb-27019.sock/bar') | 3 | ['/tmp/mongodb-27017.sock', + '/tmp/mongodb-27018.sock', + '/tmp/mongodb-27019.sock' + ] | 'bar' | null | 'user' | 'pass' as char[] + } + + def 'should throw exception if mongod+srv host contains a port'() { + when: + new ConnectionString('mongodb+srv://host1:27017') + + then: + thrown(IllegalArgumentException) + } + + def 'should throw exception if mongod+srv contains multiple hosts'() { + when: + new ConnectionString('mongodb+srv://host1,host2') + + then: + thrown(IllegalArgumentException) + } + + def 'should throw exception if direct connection when using mongodb+srv'() { + when: + new ConnectionString('mongodb+srv://test5.test.build.10gen.cc/?directConnection=true') + + then: + thrown(IllegalArgumentException) + } + + + def 'should correctly parse different write concerns'() { + expect: + uri.getWriteConcern() == writeConcern + + where: + uri | writeConcern + new ConnectionString('mongodb://localhost') | null + new ConnectionString('mongodb://localhost/?safe=true') | WriteConcern.ACKNOWLEDGED + new ConnectionString('mongodb://localhost/?safe=false') | WriteConcern.UNACKNOWLEDGED + new ConnectionString('mongodb://localhost/?wTimeout=5') | WriteConcern.ACKNOWLEDGED + .withWTimeout(5, MILLISECONDS) + new ConnectionString('mongodb://localhost/?journal=true') | WriteConcern.ACKNOWLEDGED.withJournal(true) + new ConnectionString('mongodb://localhost/?w=2&wtimeout=5&journal=true') | new WriteConcern(2, 5).withJournal(true) + new ConnectionString('mongodb://localhost/?w=majority&wtimeout=5&j=true') | new WriteConcern('majority') + .withWTimeout(5, MILLISECONDS).withJournal(true) + } + + @Unroll + def 'should treat trailing slash before query parameters as optional'() { + expect: + uri.getApplicationName() == appName + uri.getDatabase() == db + + where: + uri | appName | db + new ConnectionString('mongodb://mongodb.com') | null | null + new ConnectionString('mongodb://mongodb.com?') | null | null + new ConnectionString('mongodb://mongodb.com/') | null | null + new ConnectionString('mongodb://mongodb.com/?') | null | null + new ConnectionString('mongodb://mongodb.com/test') | null | "test" + new ConnectionString('mongodb://mongodb.com/test?') | null | "test" + new ConnectionString('mongodb://mongodb.com/?appName=a1') | "a1" | null + new ConnectionString('mongodb://mongodb.com?appName=a1') | "a1" | null + new ConnectionString('mongodb://mongodb.com/?appName=a1/a2') | "a1/a2" | null + new ConnectionString('mongodb://mongodb.com?appName=a1/a2') | "a1/a2" | null + new ConnectionString('mongodb://mongodb.com/test?appName=a1') | "a1" | "test" + new ConnectionString('mongodb://mongodb.com/test?appName=a1/a2') | "a1/a2" | "test" + } + + def 'should correctly parse different UUID representations'() { + expect: + uri.getUuidRepresentation() == uuidRepresentation + + where: + uri | uuidRepresentation + new ConnectionString('mongodb://localhost') | null + new ConnectionString('mongodb://localhost/?uuidRepresentation=unspecified') | UuidRepresentation.UNSPECIFIED + new ConnectionString('mongodb://localhost/?uuidRepresentation=javaLegacy') | UuidRepresentation.JAVA_LEGACY + new ConnectionString('mongodb://localhost/?uuidRepresentation=csharpLegacy') | UuidRepresentation.C_SHARP_LEGACY + new ConnectionString('mongodb://localhost/?uuidRepresentation=pythonLegacy') | UuidRepresentation.PYTHON_LEGACY + new ConnectionString('mongodb://localhost/?uuidRepresentation=standard') | UuidRepresentation.STANDARD + } + + @Unroll + def 'should correctly parse retryWrites'() { + expect: + uri.getRetryWritesValue() == retryWritesValue + + where: + uri | retryWritesValue + new ConnectionString('mongodb://localhost/') | null + new ConnectionString('mongodb://localhost/?retryWrites=false') | false + new ConnectionString('mongodb://localhost/?retryWrites=true') | true + new ConnectionString('mongodb://localhost/?retryWrites=foos') | null + } + + @Unroll + def 'should parse range of boolean values'() { + expect: + uri.getSslEnabled() == value + + where: + uri | value + new ConnectionString('mongodb://localhost/?tls=true') | true + new ConnectionString('mongodb://localhost/?tls=yes') | true + new ConnectionString('mongodb://localhost/?tls=1') | true + new ConnectionString('mongodb://localhost/?tls=false') | false + new ConnectionString('mongodb://localhost/?tls=no') | false + new ConnectionString('mongodb://localhost/?tls=0') | false + new ConnectionString('mongodb://localhost/?tls=foo') | null + new ConnectionString('mongodb://localhost') | null + } + + @Unroll + def 'should correct parse retryReads'() { + expect: + uri.getRetryReads() == retryReads + + where: + uri | retryReads + new ConnectionString('mongodb://localhost/') | null + new ConnectionString('mongodb://localhost/?retryReads=false') | false + new ConnectionString('mongodb://localhost/?retryReads=true') | true + new ConnectionString('mongodb://localhost/?retryReads=foos') | null + } + + @Unroll + def 'should correctly parse URI options for #type'() { + expect: + connectionString.getMinConnectionPoolSize() == 5 + connectionString.getMaxConnectionPoolSize() == 10 + connectionString.getMaxWaitTime() == 150 + connectionString.getMaxConnectionIdleTime() == 200 + connectionString.getMaxConnectionLifeTime() == 300 + connectionString.getMaxConnecting() == 1 + connectionString.getConnectTimeout() == 2500 + connectionString.getSocketTimeout() == 5500 + connectionString.getWriteConcern() == new WriteConcern(1, 2500) + connectionString.getReadPreference() == primary() + connectionString.getRequiredReplicaSetName() == 'test' + connectionString.getSslEnabled() + connectionString.getSslInvalidHostnameAllowed() + connectionString.getServerSelectionTimeout() == 25000 + connectionString.getLocalThreshold() == 30 + connectionString.getHeartbeatFrequency() == 20000 + connectionString.getApplicationName() == 'app1' + + where: + connectionString << + [new ConnectionString('mongodb://localhost/?minPoolSize=5&maxPoolSize=10&waitQueueTimeoutMS=150&' + + 'maxIdleTimeMS=200&maxLifeTimeMS=300&maxConnecting=1&replicaSet=test&' + + 'connectTimeoutMS=2500&socketTimeoutMS=5500&' + + 'safe=false&w=1&wtimeout=2500&readPreference=primary&ssl=true&' + + 'sslInvalidHostNameAllowed=true&' + + 'serverSelectionTimeoutMS=25000&' + + 'localThresholdMS=30&' + + 'heartbeatFrequencyMS=20000&' + + 'appName=app1'), + new ConnectionString('mongodb://localhost/?minPoolSize=5;maxPoolSize=10;waitQueueTimeoutMS=150;' + + 'maxIdleTimeMS=200;maxLifeTimeMS=300;maxConnecting=1;replicaSet=test;' + + 'connectTimeoutMS=2500;socketTimeoutMS=5500;' + + 'safe=false;w=1;wtimeout=2500;readPreference=primary;ssl=true;' + + 'sslInvalidHostNameAllowed=true;' + + 'serverSelectionTimeoutMS=25000;' + + 'localThresholdMS=30;' + + 'heartbeatFrequencyMS=20000;' + + 'appName=app1'), + new ConnectionString('mongodb://localhost/test?minPoolSize=5;maxPoolSize=10;waitQueueTimeoutMS=150;' + + 'maxIdleTimeMS=200&maxLifeTimeMS=300&maxConnecting=1&replicaSet=test;' + + 'connectTimeoutMS=2500;' + + 'socketTimeoutMS=5500&' + + 'safe=false&w=1;wtimeout=2500;readPreference=primary;ssl=true;' + + 'sslInvalidHostNameAllowed=true;' + + 'serverSelectionTimeoutMS=25000&' + + 'localThresholdMS=30;' + + 'heartbeatFrequencyMS=20000&' + + 'appName=app1')] + //for documentation, i.e. the Unroll description for each type + type << ['amp', 'semi', 'mixed'] + } + + def 'should parse options to enable TLS'() { + when: + def connectionString = new ConnectionString('mongodb://localhost/?ssl=false') + + then: + connectionString.getSslEnabled() == false + + when: + connectionString = new ConnectionString('mongodb://localhost/?ssl=true') + + then: + connectionString.getSslEnabled() + + when: + connectionString = new ConnectionString('mongodb://localhost/?ssl=foo') + + then: + connectionString.getSslEnabled() == null + + when: + connectionString = new ConnectionString('mongodb://localhost/?tls=false') + + then: + connectionString.getSslEnabled() == false + + when: + connectionString = new ConnectionString('mongodb://localhost/?tls=true') + + then: + connectionString.getSslEnabled() + + when: + connectionString = new ConnectionString('mongodb://localhost/?tls=foo') + + then: + connectionString.getSslEnabled() == null + + when: + connectionString = new ConnectionString('mongodb://localhost/?tls=true&ssl=false') + + then: + thrown(IllegalArgumentException) + + when: + connectionString = new ConnectionString('mongodb://localhost/?tls=false&ssl=true') + + then: + thrown(IllegalArgumentException) + } + + def 'should parse options to enable TLS invalid host names'() { + when: + def connectionString = new ConnectionString('mongodb://localhost/?ssl=true&sslInvalidHostNameAllowed=false') + + then: + connectionString.getSslInvalidHostnameAllowed() == false + + when: + connectionString = new ConnectionString('mongodb://localhost/?ssl=true&sslInvalidHostNameAllowed=true') + + then: + connectionString.getSslInvalidHostnameAllowed() + + when: + connectionString = new ConnectionString('mongodb://localhost/?ssl=true&sslInvalidHostNameAllowed=foo') + + then: + connectionString.getSslInvalidHostnameAllowed() == null + + when: + connectionString = new ConnectionString('mongodb://localhost/?tls=true&tlsAllowInvalidHostnames=false') + + then: + connectionString.getSslInvalidHostnameAllowed() == false + + when: + connectionString = new ConnectionString('mongodb://localhost/?tls=true&tlsAllowInvalidHostnames=true') + + then: + connectionString.getSslInvalidHostnameAllowed() + + when: + connectionString = new ConnectionString('mongodb://localhost/?tls=true&tlsAllowInvalidHostnames=foo') + + then: + connectionString.getSslInvalidHostnameAllowed() == null + + when: + connectionString = new ConnectionString( + 'mongodb://localhost/?tls=true&tlsAllowInvalidHostnames=false&sslInvalidHostNameAllowed=true') + + then: + connectionString.getSslInvalidHostnameAllowed() == false + + when: + connectionString = new ConnectionString( + 'mongodb://localhost/?tls=true&tlsAllowInvalidHostnames=true&sslInvalidHostNameAllowed=false') + + then: + connectionString.getSslInvalidHostnameAllowed() + } + + def 'should parse options to enable unsecured TLS'() { + when: + def connectionString = new ConnectionString('mongodb://localhost/?tls=true&tlsInsecure=true') + + then: + connectionString.getSslInvalidHostnameAllowed() + + when: + connectionString = new ConnectionString('mongodb://localhost/?tls=true&tlsInsecure=false') + + then: + connectionString.getSslInvalidHostnameAllowed() == false + + when: + connectionString = new ConnectionString('mongodb://localhost/?tls=true&tlsAllowInvalidHostnames=false') + + then: + connectionString.getSslInvalidHostnameAllowed() == false + + when: + connectionString = new ConnectionString('mongodb://localhost/?tls=true&tlsAllowInvalidHostnames=true') + + then: + connectionString.getSslInvalidHostnameAllowed() + } + + @Unroll + def 'should throw IllegalArgumentException when the proxy settings are invalid'() { + when: + new ConnectionString(connectionString) + + then: + IllegalArgumentException exception = thrown(IllegalArgumentException) + assert exception.message == cause + + where: + cause | connectionString + 'proxyPort can only be specified with proxyHost' | 'mongodb://localhost:27017/?proxyPort=1' + 'proxyPort should be within the valid range (0 to 65535)'| 'mongodb://localhost:27017/?proxyHost=a&proxyPort=-1' + 'proxyPort should be within the valid range (0 to 65535)'| 'mongodb://localhost:27017/?proxyHost=a&proxyPort=65536' + 'proxyUsername can only be specified with proxyHost' | 'mongodb://localhost:27017/?proxyUsername=1' + 'proxyUsername cannot be empty' | 'mongodb://localhost:27017/?proxyHost=a&proxyUsername=' + 'proxyPassword can only be specified with proxyHost' | 'mongodb://localhost:27017/?proxyPassword=1' + 'proxyPassword cannot be empty' | 'mongodb://localhost:27017/?proxyHost=a&proxyPassword=' + 'username\'s length in bytes cannot be greater than 255' | 'mongodb://localhost:27017/?proxyHost=a&proxyUsername=' + LONG_STRING + 'password\'s length in bytes cannot be greater than 255' | 'mongodb://localhost:27017/?proxyHost=a&proxyPassword=' + LONG_STRING + 'Both proxyUsername' + + ' and proxyPassword must be set together.' + + ' They cannot be set individually' | 'mongodb://localhost:27017/?proxyHost=a&proxyPassword=1' + } + + @Unroll + def 'should create connection string with valid proxy socket settings'() { + when: + def connectionString = new ConnectionString(uri) + + then: + assert connectionString.getProxyHost() == proxyHost + assert connectionString.getProxyPort() == 1081 + + where: + uri | proxyHost + 'mongodb://localhost:27017/?proxyHost=2001:db8:85a3::8a2e:370:7334&proxyPort=1081'| '2001:db8:85a3::8a2e:370:7334' + 'mongodb://localhost:27017/?proxyHost=::5000&proxyPort=1081' | '::5000' + 'mongodb://localhost:27017/?proxyHost=%3A%3A5000&proxyPort=1081' | '::5000' + 'mongodb://localhost:27017/?proxyHost=0::1&proxyPort=1081' | '0::1' + 'mongodb://localhost:27017/?proxyHost=hyphen-domain.com&proxyPort=1081' | 'hyphen-domain.com' + 'mongodb://localhost:27017/?proxyHost=sub.domain.c.com.com&proxyPort=1081' | 'sub.domain.c.com.com' + 'mongodb://localhost:27017/?proxyHost=192.168.0.1&proxyPort=1081' | '192.168.0.1' + } + + @Unroll + def 'should create connection string with valid proxy credentials settings'() { + when: + def connectionString = new ConnectionString(uri) + + then: + assert connectionString.getProxyPassword() == proxyPassword + assert connectionString.getProxyUsername() == proxyUsername + + where: + uri | proxyPassword | proxyUsername + 'mongodb://localhost:27017/?proxyHost=test4&proxyPassword=pass%21wor%24&proxyUsername=user%21name'| 'pass!wor$' | 'user!name' + 'mongodb://localhost:27017/?proxyHost=::5000&proxyPassword=pass!wor$&proxyUsername=user!name' | 'pass!wor$' | 'user!name' + } + + def 'should set proxy settings properties'() { + when: + def connectionString = new ConnectionString('mongodb+srv://test5.cc/?' + + 'proxyPort=1080' + + '&proxyHost=proxy.com' + + '&proxyUsername=username' + + '&proxyPassword=password') + + then: + connectionString.getProxyHost() == 'proxy.com' + connectionString.getProxyPort() == 1080 + connectionString.getProxyUsername() == 'username' + connectionString.getProxyPassword() == 'password' + } + + + @Unroll + def 'should throw IllegalArgumentException when the string #cause'() { + when: + new ConnectionString(connectionString) + + then: + thrown(IllegalArgumentException) + + where: + + cause | connectionString + 'is not a connection string' | 'hello world' + 'is missing a host' | 'mongodb://' + 'has an empty host' | 'mongodb://localhost:27017,,localhost:27019' + 'has an malformed IPv6 host' | 'mongodb://[::1' + 'has unescaped colons' | 'mongodb://locahost::1' + 'contains an invalid port string' | 'mongodb://localhost:twenty' + 'contains an invalid port negative' | 'mongodb://localhost:-1' + 'contains an invalid port out of range' | 'mongodb://localhost:1000000' + 'contains multiple at-signs' | 'mongodb://user@123:password@localhost' + 'contains multiple colons' | 'mongodb://user:123:password@localhost' + 'invalid integer in options' | 'mongodb://localhost/?wTimeout=five' + 'has incomplete options' | 'mongodb://localhost/?wTimeout' + 'has an unknown auth mechanism' | 'mongodb://user:password@localhost/?authMechanism=postItNote' + 'invalid readConcern' | 'mongodb://localhost:27017/?readConcernLevel=pickThree' + 'contains tags but no mode' | 'mongodb://localhost:27017/?readPreferenceTags=dc:ny' + 'contains max staleness but no mode' | 'mongodb://localhost:27017/?maxStalenessSeconds=100.5' + 'contains tags and primary mode' | 'mongodb://localhost:27017/?readPreference=primary&readPreferenceTags=dc:ny' + 'contains max staleness and primary mode' | 'mongodb://localhost:27017/?readPreference=primary&maxStalenessSeconds=100' + 'contains non-integral max staleness' | 'mongodb://localhost:27017/?readPreference=secondary&maxStalenessSeconds=100.0' + 'contains GSSAPI mechanism with no user' | 'mongodb://localhost:27017/?authMechanism=GSSAPI' + 'contains SCRAM mechanism with no user' | 'mongodb://localhost:27017/?authMechanism=SCRAM-SHA-1' + 'contains MONGODB mechanism with no user' | 'mongodb://localhost:27017/?authMechanism=MONGODB-CR' + 'contains PLAIN mechanism with no user' | 'mongodb://localhost:27017/?authMechanism=PLAIN' + 'contains multiple hosts and directConnection' | 'mongodb://localhost:27017,localhost:27018/?directConnection=true' + } + + def 'should have correct defaults for options'() { + when: + def connectionString = new ConnectionString('mongodb://localhost') + + then: + connectionString.getMaxConnectionPoolSize() == null + connectionString.getMaxWaitTime() == null + connectionString.getMaxConnecting() == null + connectionString.getConnectTimeout() == null + connectionString.getSocketTimeout() == null + connectionString.getWriteConcern() == null + connectionString.getReadConcern() == null + connectionString.getReadPreference() == null + connectionString.getRequiredReplicaSetName() == null + connectionString.getSslEnabled() == null + connectionString.getSslInvalidHostnameAllowed() == null + connectionString.getApplicationName() == null + connectionString.getCompressorList() == [] + connectionString.getRetryWritesValue() == null + connectionString.getRetryReads() == null + } + + @Unroll + def 'should support all credential types'() { + expect: + uri.credential == credential + + where: + uri | credential + new ConnectionString('mongodb://jeff:123@localhost') | createCredential('jeff', 'admin', '123'.toCharArray()) + new ConnectionString('mongodb://jeff:123@localhost/?' + + '&authSource=test') | createCredential('jeff', 'test', '123'.toCharArray()) + new ConnectionString('mongodb://jeff:123@localhost/?' + + 'authMechanism=MONGODB-CR') | createCredential('jeff', 'admin', '123'.toCharArray()) + new ConnectionString('mongodb://jeff:123@localhost/?' + + 'authMechanism=MONGODB-CR' + + '&authSource=test') | createCredential('jeff', 'test', '123'.toCharArray()) + new ConnectionString('mongodb://jeff:123@localhost/?' + + 'authMechanism=SCRAM-SHA-1') | createScramSha1Credential('jeff', 'admin', '123'.toCharArray()) + new ConnectionString('mongodb://jeff:123@localhost/?' + + 'authMechanism=SCRAM-SHA-1' + + '&authSource=test') | createScramSha1Credential('jeff', 'test', '123'.toCharArray()) + new ConnectionString('mongodb://jeff:123@localhost/?' + + 'authMechanism=SCRAM-SHA-256') | createScramSha256Credential('jeff', 'admin', '123'.toCharArray()) + new ConnectionString('mongodb://jeff:123@localhost/?' + + 'authMechanism=SCRAM-SHA-256' + + '&authSource=test') | createScramSha256Credential('jeff', 'test', '123'.toCharArray()) + new ConnectionString('mongodb://jeff@localhost/?' + + 'authMechanism=GSSAPI') | createGSSAPICredential('jeff') + new ConnectionString('mongodb://jeff:123@localhost/?' + + 'authMechanism=PLAIN') | createPlainCredential('jeff', '$external', '123'.toCharArray()) + new ConnectionString('mongodb://jeff@localhost/?' + + 'authMechanism=MONGODB-X509') | createMongoX509Credential('jeff') + new ConnectionString('mongodb://localhost/?' + + 'authMechanism=MONGODB-X509') | createMongoX509Credential() + new ConnectionString('mongodb://jeff@localhost/?' + + 'authMechanism=GSSAPI' + + '&gssapiServiceName=foo') | createGSSAPICredential('jeff') + .withMechanismProperty('SERVICE_NAME', 'foo') + new ConnectionString('mongodb://jeff@localhost/?' + + 'authMechanism=GSSAPI' + + '&authMechanismProperties=' + + 'SERVICE_NAME:foo') | createGSSAPICredential('jeff') + .withMechanismProperty('SERVICE_NAME', 'foo') + new ConnectionString('mongodb://jeff@localhost/?' + + 'authMechanism=GSSAPI' + + '&authMechanismProperties=' + + 'SERVICE_NAME :foo') | createGSSAPICredential('jeff') + .withMechanismProperty('SERVICE_NAME', 'foo') + new ConnectionString('mongodb://jeff@localhost/?' + + 'authMechanism=GSSAPI' + + '&authMechanismProperties=' + + 'SERVICE_NAME:foo,' + + 'CANONICALIZE_HOST_NAME:true,' + + 'SERVICE_REALM:AWESOME') | createGSSAPICredential('jeff') + .withMechanismProperty('SERVICE_NAME', 'foo') + .withMechanismProperty('CANONICALIZE_HOST_NAME', true) + .withMechanismProperty('SERVICE_REALM', 'AWESOME') + } + + def 'should ignore authSource if there is no credential'() { + expect: + new ConnectionString('mongodb://localhost/?authSource=test').credential == null + } + + def 'should ignore authMechanismProperties if there is no credential'() { + expect: + new ConnectionString('mongodb://localhost/?&authMechanismProperties=SERVICE_REALM:AWESOME').credential == null + } + + def 'should support thrown an IllegalArgumentException when given invalid authMechanismProperties'() { + when: + new ConnectionString('mongodb://jeff@localhost/?' + + 'authMechanism=GSSAPI' + + '&authMechanismProperties=' + + 'SERVICE_NAME=foo,' + + 'CANONICALIZE_HOST_NAME=true,' + + 'SERVICE_REALM=AWESOME') + + then: + thrown(IllegalArgumentException) + + when: + new ConnectionString('mongodb://jeff@localhost/?' + + 'authMechanism=GSSAPI' + + '&authMechanismProperties=' + + 'SERVICE_NAMEbar') // missing = + + then: + thrown(IllegalArgumentException) + } + + @Unroll + def 'should correct parse read preference for #readPreference'() { + expect: + uri.getReadPreference() == readPreference + + where: + uri | readPreference + new ConnectionString('mongodb://localhost') | null + new ConnectionString('mongodb://localhost/' + + '?readPreference=primary') | primary() + new ConnectionString('mongodb://localhost/' + + '?readPreference=secondary') | secondary() + new ConnectionString('mongodb://localhost/' + + '?readPreference=secondaryPreferred') | secondaryPreferred() + new ConnectionString('mongodb://localhost/' + + '?readPreference=secondaryPreferred' + + '&readPreferenceTags=dc:ny,rack:1' + + '&readPreferenceTags=dc:ny' + + '&readPreferenceTags=') | secondaryPreferred([new TagSet(asList(new Tag('dc', 'ny'), + new Tag('rack', '1'))), + new TagSet(asList(new Tag('dc', 'ny'))), + new TagSet()]) + new ConnectionString('mongodb://localhost/' + + '?readPreference=secondary' + + '&maxStalenessSeconds=120') | secondary(120000, MILLISECONDS) + new ConnectionString('mongodb://localhost/' + + '?readPreference=secondary' + + '&maxStalenessSeconds=0') | secondary(0, MILLISECONDS) + new ConnectionString('mongodb://localhost/' + + '?readPreference=secondary' + + '&maxStalenessSeconds=-1') | secondary() + new ConnectionString('mongodb://localhost/' + + '?readPreference=primary' + + '&maxStalenessSeconds=-1') | primary() + } + + @Unroll + def 'should correct parse read concern for #readConcern'() { + expect: + uri.getReadConcern() == readConcern + + where: + uri | readConcern + new ConnectionString('mongodb://localhost/') | null + new ConnectionString('mongodb://localhost/?readConcernLevel=local') | ReadConcern.LOCAL + new ConnectionString('mongodb://localhost/?readConcernLevel=majority') | ReadConcern.MAJORITY + } + + @Unroll + def 'should parse compressors'() { + expect: + uri.getCompressorList() == [compressor] + + where: + uri | compressor + new ConnectionString('mongodb://localhost/?compressors=zlib') | createZlibCompressor() + new ConnectionString('mongodb://localhost/?compressors=zlib' + + '&zlibCompressionLevel=5') | createZlibCompressor().withProperty(LEVEL, 5) + new ConnectionString('mongodb://localhost/?compressors=zstd') | createZstdCompressor() + } + + def 'should be equal to another instance with the same string values'() { + expect: + uri1 == uri2 + uri1.hashCode() == uri2.hashCode() + + where: + uri1 | uri2 + new ConnectionString('mongodb://user:pass@host1:1/') | new ConnectionString('mongodb://user:pass@host1:1/') + new ConnectionString('mongodb://user:pass@host1:1,host2:2,' + + 'host3:3/bar') | new ConnectionString('mongodb://user:pass@host3:3,host1:1,' + + 'host2:2/bar') + new ConnectionString('mongodb://localhost/' + + '?readPreference=secondaryPreferred' + + '&readPreferenceTags=dc:ny,rack:1' + + '&readPreferenceTags=dc:ny' + + '&readPreferenceTags=') | new ConnectionString('mongodb://localhost/' + + '?readPreference=secondaryPreferred' + + '&readPreferenceTags=dc:ny, rack:1' + + '&readPreferenceTags=dc:ny' + + '&readPreferenceTags=') + new ConnectionString('mongodb://localhost/?readPreference=' + + 'secondaryPreferred') | new ConnectionString('mongodb://localhost/?readPreference=' + + 'secondaryPreferred') + new ConnectionString('mongodb://ross:123@localhost/?' + + 'authMechanism=SCRAM-SHA-1') | new ConnectionString('mongodb://ross:123@localhost/?' + + 'authMechanism=SCRAM-SHA-1') + new ConnectionString('mongodb://ross:123@localhost/?' + + 'proxyHost=proxy.com' + + '&proxyPort=1080' + + '&proxyUsername=username' + + '&proxyPassword=password') | new ConnectionString('mongodb://ross:123@localhost/?' + + 'proxyHost=proxy.com' + + '&proxyPort=1080' + + '&proxyUsername=username' + + '&proxyPassword=password') + + new ConnectionString('mongodb://localhost/db.coll' + + '?minPoolSize=5;' + + 'maxPoolSize=10;' + + 'waitQueueTimeoutMS=150;' + + 'maxIdleTimeMS=200;' + + 'maxLifeTimeMS=300;replicaSet=test;' + + 'maxConnecting=1;' + + 'connectTimeoutMS=2500;' + + 'socketTimeoutMS=5500;' + + 'safe=false;w=1;wtimeout=2500;' + + 'fsync=true;readPreference=primary;' + + 'directConnection=true;' + + 'ssl=true') | new ConnectionString('mongodb://localhost/db.coll?minPoolSize=5;' + + 'maxPoolSize=10;' + + 'waitQueueTimeoutMS=150;' + + 'maxIdleTimeMS=200&maxLifeTimeMS=300' + + '&replicaSet=test;' + + 'maxConnecting=1;' + + 'connectTimeoutMS=2500;' + + 'socketTimeoutMS=5500&safe=false&w=1;' + + 'wtimeout=2500;fsync=true' + + '&directConnection=true' + + '&readPreference=primary;ssl=true') + } + + def 'should be not equal to another ConnectionString with the different string values'() { + expect: + uri1 != uri2 + uri1.hashCode() != uri2.hashCode() + + where: + uri1 | uri2 + new ConnectionString('mongodb://user:pass@host1:1/') | new ConnectionString('mongodb://user:pass@host1:2/') + new ConnectionString('mongodb://user:pass@host1:1,host2:2,' + + 'host3:3/bar') | new ConnectionString('mongodb://user:pass@host1:1,host2:2,' + + 'host4:4/bar') + new ConnectionString('mongodb://localhost/?readPreference=' + + 'secondaryPreferred') | new ConnectionString('mongodb://localhost/?readPreference=' + + 'secondary') + new ConnectionString('mongodb://localhost/' + + '?readPreference=secondaryPreferred' + + '&readPreferenceTags=dc:ny,rack:1' + + '&readPreferenceTags=dc:ny' + + '&readPreferenceTags=' + + '&maxConnecting=1') | new ConnectionString('mongodb://localhost/' + + '?readPreference=secondaryPreferred' + + '&readPreferenceTags=dc:ny' + + '&readPreferenceTags=dc:ny, rack:1' + + '&readPreferenceTags=' + + '&maxConnecting=2') + new ConnectionString('mongodb://ross:123@localhost/?' + + 'authMechanism=SCRAM-SHA-1') | new ConnectionString('mongodb://ross:123@localhost/?' + + 'authMechanism=GSSAPI') + new ConnectionString('mongodb://ross:123@localhost/?' + + 'proxyHost=proxy.com') | new ConnectionString('mongodb://ross:123@localhost/?' + + 'proxyHost=1proxy.com') + new ConnectionString('mongodb://ross:123@localhost/?' + + 'proxyHost=proxy.com&proxyPort=1080') | new ConnectionString('mongodb://ross:123@localhost/?' + + 'proxyHost=proxy.com1.com&proxyPort=1081') + new ConnectionString('mongodb://ross:123@localhost/?' + + 'proxyHost=proxy.com&proxyPassword=password' + + '&proxyUsername=username') | new ConnectionString('mongodb://ross:123@localhost/?' + + 'proxyHost=proxy.com&proxyPassword=password1' + + '&proxyUsername=username') + } + + def 'should recognize SRV protocol'() { + when: + def connectionString = new ConnectionString('mongodb+srv://test5.test.build.10gen.cc') + + then: + connectionString.isSrvProtocol() + connectionString.hosts == ['test5.test.build.10gen.cc'] + } + + // sslEnabled defaults to true with mongodb+srv but can be overridden via query parameter + def 'should set sslEnabled property with SRV protocol'() { + expect: + connectionString.getSslEnabled() == sslEnabled + + where: + connectionString | sslEnabled + new ConnectionString('mongodb+srv://test5.test.build.10gen.cc') | true + new ConnectionString('mongodb+srv://test5.test.build.10gen.cc/?tls=true') | true + new ConnectionString('mongodb+srv://test5.test.build.10gen.cc/?ssl=true') | true + new ConnectionString('mongodb+srv://test5.test.build.10gen.cc/?tls=false') | false + new ConnectionString('mongodb+srv://test5.test.build.10gen.cc/?ssl=false') | false + } + + + // these next two tests are functionally part of the initial-dns-seedlist-discovery specification tests, but since those + // tests require that the driver connects to an actual replica set, it isn't possible to create specification tests + // with URIs containing user names, since connection to a replica set that doesn't have that user defined would fail. + // So to ensure there is proper test coverage of an authSource property specified in a TXT record, adding those tests here. + def 'should use authSource from TXT record'() { + given: + def uri = new ConnectionString('mongodb+srv://bob:pwd@test5.test.build.10gen.cc/') + + expect: + uri.credential == createCredential('bob', 'thisDB', 'pwd'.toCharArray()) + } + + def 'should override authSource from TXT record with authSource from connectionString'() { + given: + def uri = new ConnectionString('mongodb+srv://bob:pwd@test5.test.build.10gen.cc/?authSource=otherDB') + + expect: + uri.credential == createCredential('bob', 'otherDB', 'pwd'.toCharArray()) + } + + def 'should use DnsClient to resolve TXT record'() { + given: + def dnsClient = { def name, def type -> ['replicaSet=java'] } + + when: + def connectionString = new ConnectionString('mongodb+srv://free-java.mongodb-dev.net', dnsClient); + + then: + connectionString.getRequiredReplicaSetName() == 'java' + } +} diff --git a/driver-core/src/test/unit/com/mongodb/ConnectionStringTest.java b/driver-core/src/test/unit/com/mongodb/ConnectionStringTest.java new file mode 100644 index 00000000000..ba8b7763a8d --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/ConnectionStringTest.java @@ -0,0 +1,70 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb; + +import org.bson.BsonDocument; +import org.junit.Test; +import org.junit.runners.Parameterized; +import util.JsonPoweredTestHelper; + +import java.util.Collection; + +import static org.junit.Assume.assumeFalse; + +// See https://github.com/mongodb/specifications/tree/master/source/connection-string/tests +public class ConnectionStringTest extends AbstractConnectionStringTest { + public ConnectionStringTest(final String filename, final String description, final String input, final BsonDocument definition) { + super(filename, description, input, definition); + } + + @Test + public void shouldPassAllOutcomes() { + // Java driver currently throws an IllegalArgumentException for these tests + assumeFalse(getDescription().equals("Empty integer option values are ignored")); + assumeFalse(getDescription().equals("Comma in a key value pair causes a warning")); + + if (getFilename().equals("invalid-uris.json")) { + testInvalidUris(); + } else if (getFilename().equals("valid-auth.json")) { + testValidAuth(); + } else if (getFilename().equals("valid-db-with-dotted-name.json")) { + testValidHostIdentifiers(); + testValidAuth(); + } else if (getFilename().equals("valid-host_identifiers.json")) { + testValidHostIdentifiers(); + } else if (getFilename().equals("valid-options.json")) { + testValidOptions(); + } else if (getFilename().equals("valid-unix_socket-absolute.json")) { + testValidHostIdentifiers(); + } else if (getFilename().equals("valid-unix_socket-relative.json")) { + testValidHostIdentifiers(); + } else if (getFilename().equals("valid-warnings.json")) { + testValidHostIdentifiers(); + if (!getDefinition().get("options").isNull()) { + testValidOptions(); + } + } else { + throw new IllegalArgumentException("Unsupported file: " + getFilename()); + } + } + + + @Parameterized.Parameters(name = "{1}") + public static Collection data() { + return JsonPoweredTestHelper.getTestData("connection-string"); + } +} diff --git a/driver-core/src/test/unit/com/mongodb/ConnectionStringUnitTest.java b/driver-core/src/test/unit/com/mongodb/ConnectionStringUnitTest.java new file mode 100644 index 00000000000..0b3dd1a0814 --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/ConnectionStringUnitTest.java @@ -0,0 +1,112 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb; + +import com.mongodb.assertions.Assertions; +import com.mongodb.connection.ServerMonitoringMode; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.ValueSource; + +import java.io.UnsupportedEncodingException; +import java.net.URLEncoder; +import java.nio.charset.StandardCharsets; + +import static org.junit.jupiter.api.Assertions.assertAll; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNotEquals; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertThrows; + +final class ConnectionStringUnitTest { + private static final String DEFAULT_OPTIONS = "mongodb://localhost/?"; + @Test + void defaults() { + ConnectionString connectionStringDefault = new ConnectionString(DEFAULT_OPTIONS); + assertAll(() -> assertNull(connectionStringDefault.getServerMonitoringMode())); + } + + @Test + public void mustDecodeNonOidcAsWhole() { + // this string allows us to check if there is no double decoding + String rawValue = encode("ot her"); + assertAll(() -> { + // even though only one part has been encoded by the user, the whole option value (pre-split) must be decoded + ConnectionString cs = new ConnectionString( + "mongodb://foo:bar@example.com/?authMechanism=GSSAPI&authMechanismProperties=" + + "SERVICE_NAME:" + encode(rawValue) + ",CANONICALIZE_HOST_NAME:true&authSource=$external"); + MongoCredential credential = Assertions.assertNotNull(cs.getCredential()); + assertEquals(rawValue, credential.getMechanismProperty("SERVICE_NAME", null)); + }, () -> { + ConnectionString cs = new ConnectionString( + "mongodb://foo:bar@example.com/?authMechanism=GSSAPI&authMechanismProperties=" + + encode("SERVICE_NAME:" + rawValue + ",CANONICALIZE_HOST_NAME:true&authSource=$external")); + MongoCredential credential = Assertions.assertNotNull(cs.getCredential()); + assertEquals(rawValue, credential.getMechanismProperty("SERVICE_NAME", null)); + }); + } + + private static String encode(final String string) { + try { + return URLEncoder.encode(string, StandardCharsets.UTF_8.name()); + } catch (UnsupportedEncodingException e) { + throw new RuntimeException(e); + } + } + + @ParameterizedTest + @ValueSource(strings = {DEFAULT_OPTIONS + "serverMonitoringMode=stream"}) + void equalAndHashCode(final String connectionString) { + ConnectionString default1 = new ConnectionString(DEFAULT_OPTIONS); + ConnectionString default2 = new ConnectionString(DEFAULT_OPTIONS); + ConnectionString actual1 = new ConnectionString(connectionString); + ConnectionString actual2 = new ConnectionString(connectionString); + assertAll( + () -> assertEquals(default1, default2), + () -> assertEquals(default1.hashCode(), default2.hashCode()), + () -> assertEquals(actual1, actual2), + () -> assertEquals(actual1.hashCode(), actual2.hashCode()), + () -> assertNotEquals(default1, actual1) + ); + } + + @Test + void serverMonitoringMode() { + assertAll( + () -> assertEquals(ServerMonitoringMode.POLL, + new ConnectionString(DEFAULT_OPTIONS + "serverMonitoringMode=poll").getServerMonitoringMode()), + () -> assertThrows(IllegalArgumentException.class, + () -> new ConnectionString(DEFAULT_OPTIONS + "serverMonitoringMode=invalid")) + ); + } + + + @ParameterizedTest + @ValueSource(strings = {"mongodb://foo:bar/@hostname/java?", "mongodb://foo:bar?@hostname/java/", + "mongodb+srv://foo:bar/@hostname/java?", "mongodb+srv://foo:bar?@hostname/java/", + "mongodb://foo:bar/@[::1]:27018", "mongodb://foo:bar?@[::1]:27018", + "mongodb://foo:12345678/@hostname", "mongodb+srv://foo:12345678/@hostname", + "mongodb://foo:12345678/@hostname", "mongodb+srv://foo:12345678/@hostname", + "mongodb://foo:12345678%40hostname", "mongodb+srv://foo:12345678%40hostname", + "mongodb://foo:12345678@bar@hostname", "mongodb+srv://foo:12345678@bar@hostname" + }) + void unescapedPasswordsShouldNotBeLeakedInExceptionMessages(final String input) { + IllegalArgumentException exception = assertThrows(IllegalArgumentException.class, () -> new ConnectionString(input)); + assertFalse(exception.getMessage().contains("bar")); + assertFalse(exception.getMessage().contains("12345678")); + } +} diff --git a/driver-core/src/test/unit/com/mongodb/CustomMatchers.groovy b/driver-core/src/test/unit/com/mongodb/CustomMatchers.groovy new file mode 100644 index 00000000000..37ff1a66616 --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/CustomMatchers.groovy @@ -0,0 +1,152 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb + +import org.hamcrest.BaseMatcher +import org.hamcrest.Description + +import java.lang.reflect.Field +import java.lang.reflect.Modifier + +@SuppressWarnings('NoDef') +class CustomMatchers { + + static nullList = [null, null] + static isTheSameAs(final Object e) { + [ + matches : { a -> compare(e, a) }, + describeTo : { Description description -> description.appendText("Operation has the same attributes ${e.class.name}") + }, + describeMismatch: { a, description -> describer(e, a, description) } + ] as BaseMatcher + } + + static isTheSameAs(final Object e, final List ignoreNames) { + [ + matches : { a -> compare(e, a, ignoreNames) }, + describeTo : { Description description -> description.appendText("Operation has the same attributes ${e.class.name}") + }, + describeMismatch: { a, description -> describer(e, a, ignoreNames, description) } + ] as BaseMatcher + } + + static compare(expected, actual) { + compare(expected, actual, []) + } + + static compare(expected, actual, ignoreNames) { + if (expected == actual) { + return true + } + if (expected == null || actual == null) { + return false + } + if (actual.class.name != expected.class.name) { + return false + } + getFields(actual.class).findAll { !ignoreNames.contains(it.name) } .collect { + it.setAccessible(true) + def actualPropertyValue = it.get(actual) + def expectedPropertyValue = it.get(expected) + + if (nominallyTheSame(it.name)) { + return actualPropertyValue.class == expectedPropertyValue.class + } else if (actualPropertyValue != expectedPropertyValue) { + if ([actualPropertyValue, expectedPropertyValue].contains(null) + && [actualPropertyValue, expectedPropertyValue] != nullList) { + return false + } else if (List.isCase(actualPropertyValue) && List.isCase(expectedPropertyValue) + && (actualPropertyValue.size() == expectedPropertyValue.size())) { + def i = -1 + return actualPropertyValue.collect { a -> i++; compare(a, expectedPropertyValue[i]) }.every { it } + } else if (actualPropertyValue.class != null && actualPropertyValue.class.name.startsWith('com.mongodb') + && actualPropertyValue.class == expectedPropertyValue.class) { + return compare(actualPropertyValue, expectedPropertyValue) + } + return false + } + true + }.every { it } + } + + + static describer(expected, actual, description) { + describer(expected, actual, [], description) + } + + static describer(expected, actual, ignoreNames, description) { + if (expected == actual) { + return true + } + if (expected == null || actual == null) { + description.appendText("different values: $expected != $actual, ") + return false + } + if (actual.class.name != expected.class.name) { + description.appendText("different classes: ${expected.class.name} != ${actual.class.name}, ") + return false + } + + getFields(actual.class).findAll { !ignoreNames.contains(it.name) } .collect { + it.setAccessible(true) + def actualPropertyValue = it.get(actual) + def expectedPropertyValue = it.get(expected) + if (nominallyTheSame(it.name)) { + if (actualPropertyValue.class != expectedPropertyValue.class) { + description.appendText("different classes in $it.name :" + + " ${expectedPropertyValue.class.name} != ${actualPropertyValue.class.name}, ") + return false + } + } else if (actualPropertyValue != expectedPropertyValue) { + if (([actualPropertyValue, expectedPropertyValue].contains(null) + || [actualPropertyValue.class, expectedPropertyValue.class].contains(null)) + && [actualPropertyValue, expectedPropertyValue] != nullList) { + description.appendText("different values in $it.name : ${expectedPropertyValue} != ${actualPropertyValue}\n") + return false + } else if (List.isCase(actualPropertyValue) && List.isCase(expectedPropertyValue) + && (actualPropertyValue.size() == expectedPropertyValue.size())) { + def i = -1 + actualPropertyValue.each { a -> + i++; if (!compare(a, expectedPropertyValue[i])) { + describer(a, expectedPropertyValue[i], description) + } + }.every { it } + } else if (actualPropertyValue.class.name.startsWith('com.mongodb') + && actualPropertyValue.class == expectedPropertyValue.class) { + return describer(actualPropertyValue, expectedPropertyValue, description) + } + description.appendText("different values in $it.name : ${expectedPropertyValue} != ${actualPropertyValue}\n") + return false + } + true + } + } + + static List getFields(Class curClass) { + if (curClass == Object) { + return [] + } + def fields = getFields(curClass.getSuperclass()) + fields.addAll(curClass.declaredFields.findAll { !it.synthetic && !Modifier.isStatic(it.modifiers) && !it.name.contains('$') }) + fields + } + + + static nominallyTheSame(String propertyName ) { + propertyName in ['decoder', 'executor'] + } +} diff --git a/driver-core/src/test/unit/com/mongodb/DBObjectCodecProviderSpecification.groovy b/driver-core/src/test/unit/com/mongodb/DBObjectCodecProviderSpecification.groovy new file mode 100644 index 00000000000..8e9f46255bd --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/DBObjectCodecProviderSpecification.groovy @@ -0,0 +1,52 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb + +import org.bson.codecs.DateCodec +import org.bson.codecs.configuration.CodecRegistries +import org.bson.types.BSONTimestamp +import spock.lang.Specification + +class DBObjectCodecProviderSpecification extends Specification { + def provider = new DBObjectCodecProvider() + def registry = CodecRegistries.fromProviders(provider) + + def 'should provide codec for BSONTimestamp'() { + expect: + provider.get(BSONTimestamp, registry).class == BSONTimestampCodec + } + + def 'should provide codec for Date'() { + expect: + provider.get(Date, registry).class == DateCodec + } + + def 'should provide codec for class assignable to DBObject'() { + expect: + provider.get(BasicDBObject, registry).class == DBObjectCodec + } + + def 'should not provide codec for class assignable to DBObject that is also assignable to List'() { + expect: + provider.get(BasicDBList, registry) == null + } + + def 'should not provide codec for unexpected class'() { + expect: + provider.get(Integer, registry) == null + } +} diff --git a/driver-core/src/test/unit/com/mongodb/DBObjectCodecSpecification.groovy b/driver-core/src/test/unit/com/mongodb/DBObjectCodecSpecification.groovy new file mode 100644 index 00000000000..bfe5551d6f3 --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/DBObjectCodecSpecification.groovy @@ -0,0 +1,326 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb + +import org.bson.BsonArray +import org.bson.BsonBinary +import org.bson.BsonBinaryReader +import org.bson.BsonBinarySubType +import org.bson.BsonDocument +import org.bson.BsonDocumentReader +import org.bson.BsonDocumentWriter +import org.bson.BsonInt32 +import org.bson.BsonJavaScriptWithScope +import org.bson.BsonNull +import org.bson.BsonObjectId +import org.bson.BsonString +import org.bson.BsonSymbol +import org.bson.BsonTimestamp +import org.bson.codecs.BinaryCodec +import org.bson.codecs.BsonValueCodecProvider +import org.bson.codecs.DecoderContext +import org.bson.codecs.EncoderContext +import org.bson.codecs.UuidCodec +import org.bson.codecs.ValueCodecProvider +import org.bson.types.BSONTimestamp +import org.bson.types.Binary +import org.bson.types.CodeWScope +import org.bson.types.ObjectId +import org.bson.types.Symbol +import spock.lang.Specification +import spock.lang.Unroll + +import java.nio.ByteBuffer +import java.sql.Timestamp + +import static org.bson.UuidRepresentation.C_SHARP_LEGACY +import static org.bson.UuidRepresentation.JAVA_LEGACY +import static org.bson.UuidRepresentation.PYTHON_LEGACY +import static org.bson.UuidRepresentation.STANDARD +import static org.bson.UuidRepresentation.UNSPECIFIED +import static org.bson.codecs.configuration.CodecRegistries.fromCodecs +import static org.bson.codecs.configuration.CodecRegistries.fromProviders +import static org.bson.codecs.configuration.CodecRegistries.fromRegistries + +class DBObjectCodecSpecification extends Specification { + + def bsonDoc = new BsonDocument() + def codecRegistry = fromRegistries(fromCodecs(new UuidCodec(JAVA_LEGACY)), + fromProviders([new ValueCodecProvider(), new DBObjectCodecProvider(), new BsonValueCodecProvider()])) + def dbObjectCodec = new DBObjectCodec(codecRegistry).withUuidRepresentation(JAVA_LEGACY) + + def 'default registry should include necessary providers'() { + when: + def registry = DBObjectCodec.getDefaultRegistry() + + then: + registry.get(Integer) != null + registry.get(BsonInt32) != null + registry.get(BSONTimestamp) != null + registry.get(BasicDBObject) != null + } + + def 'should encode with default registry'() { + given: + def document = new BsonDocument() + def dBObject = new BasicDBObject('a', 0).append('b', new BsonInt32(1)).append('c', new BSONTimestamp()) + + when: + new DBObjectCodec().encode(new BsonDocumentWriter(document), dBObject, EncoderContext.builder().build()) + + then: + document == new BsonDocument('a', new BsonInt32(0)).append('b', new BsonInt32(1)).append('c', new BsonTimestamp()) + } + + def 'should encode and decode UUID as UUID'() { + given: + def uuid = UUID.fromString('01020304-0506-0708-090a-0b0c0d0e0f10') + def doc = new BasicDBObject('uuid', uuid) + + when: + dbObjectCodec.encode(new BsonDocumentWriter(bsonDoc), doc, EncoderContext.builder().build()) + + then: + bsonDoc.getBinary('uuid') == new BsonBinary(BsonBinarySubType.UUID_LEGACY, + [8, 7, 6, 5, 4, 3, 2, 1, 16, 15, 14, 13, 12, 11, 10, 9] as byte[]) + when: + def decodedUuid = dbObjectCodec.decode(new BsonDocumentReader(bsonDoc), DecoderContext.builder().build()) + + then: + decodedUuid.get('uuid') == uuid + } + + def 'should decode binary subtypes for UUID that are not 16 bytes into Binary'() { + given: + def reader = new BsonBinaryReader(ByteBuffer.wrap(bytes as byte[])) + + when: + def document = new DBObjectCodec().decode(reader, DecoderContext.builder().build()) + + then: + value == document.get('f') + + where: + value | bytes + new Binary((byte) 0x03, (byte[]) [115, 116, 11]) | [16, 0, 0, 0, 5, 102, 0, 3, 0, 0, 0, 3, 115, 116, 11, 0] + new Binary((byte) 0x04, (byte[]) [115, 116, 11]) | [16, 0, 0, 0, 5, 102, 0, 3, 0, 0, 0, 4, 115, 116, 11, 0] + } + + @SuppressWarnings(['LineLength']) + @Unroll + def 'should decode binary subtype 3 for UUID'() { + given: + def reader = new BsonBinaryReader(ByteBuffer.wrap(bytes as byte[])) + + when: + def document = new DBObjectCodec(fromCodecs(new UuidCodec(representation), new BinaryCodec())) + .withUuidRepresentation(representation) + .decode(reader, DecoderContext.builder().build()) + + then: + value == document.get('f') + + where: + representation | value | bytes + JAVA_LEGACY | UUID.fromString('08070605-0403-0201-100f-0e0d0c0b0a09') | [29, 0, 0, 0, 5, 102, 0, 16, 0, 0, 0, 3, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 0] + C_SHARP_LEGACY | UUID.fromString('04030201-0605-0807-090a-0b0c0d0e0f10') | [29, 0, 0, 0, 5, 102, 0, 16, 0, 0, 0, 3, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 0] + PYTHON_LEGACY | UUID.fromString('01020304-0506-0708-090a-0b0c0d0e0f10') | [29, 0, 0, 0, 5, 102, 0, 16, 0, 0, 0, 3, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 0] + STANDARD | new Binary((byte) 3, [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16] as byte[]) | [29, 0, 0, 0, 5, 102, 0, 16, 0, 0, 0, 3, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 0] + UNSPECIFIED | new Binary((byte) 3, [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16] as byte[]) | [29, 0, 0, 0, 5, 102, 0, 16, 0, 0, 0, 3, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 0] + } + + def 'should encode and decode UUID as UUID with alternate UUID Codec'() { + given: + def codecWithAlternateUUIDCodec = new DBObjectCodec(fromRegistries(fromCodecs(new UuidCodec(STANDARD)), codecRegistry)) + .withUuidRepresentation(STANDARD) + def uuid = UUID.fromString('01020304-0506-0708-090a-0b0c0d0e0f10') + def doc = new BasicDBObject('uuid', uuid) + + when: + codecWithAlternateUUIDCodec.encode(new BsonDocumentWriter(bsonDoc), doc, EncoderContext.builder().build()) + + then: + bsonDoc.getBinary('uuid') == new BsonBinary(BsonBinarySubType.UUID_STANDARD, + [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16] as byte[]) + + when: + def decodedDoc = codecWithAlternateUUIDCodec.decode(new BsonDocumentReader(bsonDoc), DecoderContext.builder().build()) + + then: + decodedDoc.get('uuid') == uuid + } + + @SuppressWarnings(['LineLength']) + @Unroll + def 'should decode binary subtype 4 for UUID'() { + given: + def reader = new BsonBinaryReader(ByteBuffer.wrap(bytes as byte[])) + + when: + def document = new DBObjectCodec().withUuidRepresentation(representation) + .decode(reader, DecoderContext.builder().build()) + + then: + value == document.get('f') + + where: + representation | value | bytes + STANDARD | UUID.fromString('01020304-0506-0708-090a-0b0c0d0e0f10') | [29, 0, 0, 0, 5, 102, 0, 16, 0, 0, 0, 4, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 0] + JAVA_LEGACY | new Binary((byte) 4, [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16] as byte[]) | [29, 0, 0, 0, 5, 102, 0, 16, 0, 0, 0, 4, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 0] + C_SHARP_LEGACY | new Binary((byte) 4, [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16] as byte[]) | [29, 0, 0, 0, 5, 102, 0, 16, 0, 0, 0, 4, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 0] + PYTHON_LEGACY | new Binary((byte) 4, [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16] as byte[]) | [29, 0, 0, 0, 5, 102, 0, 16, 0, 0, 0, 4, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 0] + UNSPECIFIED | new Binary((byte) 4, [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16] as byte[]) | [29, 0, 0, 0, 5, 102, 0, 16, 0, 0, 0, 4, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 0] + } + + def 'should encode and decode byte array value as binary'() { + given: + def array = [0, 1, 2, 4, 4] as byte[] + def doc = new BasicDBObject('byteArray', array) + + when: + dbObjectCodec.encode(new BsonDocumentWriter(bsonDoc), doc, EncoderContext.builder().build()) + + then: + bsonDoc.getBinary('byteArray') == new BsonBinary(array) + + when: + DBObject decodedUuid = dbObjectCodec.decode(new BsonDocumentReader(bsonDoc), DecoderContext.builder().build()) + + then: + decodedUuid.get('byteArray') == array + } + + def 'should encode and decode Binary value as binary'() { + given: + def subType = (byte) 42 + def array = [0, 1, 2, 4, 4] as byte[] + def doc = new BasicDBObject('byteArray', new Binary(subType, array)) + + when: + dbObjectCodec.encode(new BsonDocumentWriter(bsonDoc), doc, EncoderContext.builder().build()) + + then: + bsonDoc.getBinary('byteArray') == new BsonBinary(subType, array) + + when: + DBObject decodedUuid = dbObjectCodec.decode(new BsonDocumentReader(bsonDoc), DecoderContext.builder().build()) + + then: + decodedUuid.get('byteArray') == new Binary(subType, array) + } + + def 'should encode Symbol to BsonSymbol and decode BsonSymbol to String'() { + given: + def symbol = new Symbol('symbol') + def doc = new BasicDBObject('symbol', symbol) + + when: + dbObjectCodec.encode(new BsonDocumentWriter(bsonDoc), doc, EncoderContext.builder().build()) + + then: + bsonDoc.get('symbol') == new BsonSymbol('symbol') + + when: + def decodedSymbol = dbObjectCodec.decode(new BsonDocumentReader(bsonDoc), DecoderContext.builder().build()) + + then: + decodedSymbol.get('symbol') == symbol.toString() + } + + def 'should encode java.sql.Date as date'() { + given: + def sqlDate = new java.sql.Date(System.currentTimeMillis()) + def doc = new BasicDBObject('d', sqlDate) + + when: + dbObjectCodec.encode(new BsonDocumentWriter(bsonDoc), doc, EncoderContext.builder().build()) + + then: + def decodededDoc = dbObjectCodec.decode(new BsonDocumentReader(bsonDoc), DecoderContext.builder().build()) + + then: + ((Date) decodededDoc.get('d')).getTime() == sqlDate.getTime() + } + + def 'should encode java.sql.Timestamp as date'() { + given: + def sqlTimestamp = new Timestamp(System.currentTimeMillis()) + def doc = new BasicDBObject('d', sqlTimestamp) + + when: + dbObjectCodec.encode(new BsonDocumentWriter(bsonDoc), doc, EncoderContext.builder().build()) + + then: + def decodededDoc = dbObjectCodec.decode(new BsonDocumentReader(bsonDoc), DecoderContext.builder().build()) + + then: + ((Date) decodededDoc.get('d')).getTime() == sqlTimestamp.getTime() + } + + def 'should encode collectible document with _id'() { + given: + def doc = new BasicDBObject('y', 1).append('_id', new BasicDBObject('x', 1)) + + when: + dbObjectCodec.encode(new BsonDocumentWriter(bsonDoc), doc, + EncoderContext.builder().isEncodingCollectibleDocument(true).build()) + + then: + bsonDoc == new BsonDocument('_id', new BsonDocument('x', new BsonInt32(1))).append('y', new BsonInt32(1)) + } + + def 'should encode collectible document without _id'() { + given: + def doc = new BasicDBObject('y', 1) + + when: + dbObjectCodec.encode(new BsonDocumentWriter(bsonDoc), doc, + EncoderContext.builder().isEncodingCollectibleDocument(true).build()) + + then: + bsonDoc == new BsonDocument('y', new BsonInt32(1)) + } + + def 'should encode all types'() { + given: + def id = new ObjectId() + def dbRef = new DBRef('c', 1) + def doc = new BasicDBObject('_id', id) + .append('n', null) + .append('r', dbRef) + .append('m', ['f': 1]) + .append('i', [1, 2]) + .append('c', new CodeWScope('c', new BasicDBObject('f', 1))) + .append('b', new byte[0]) + .append('a', [1, 2].toArray()) + .append('s', new Symbol('s')) + + when: + dbObjectCodec.encode(new BsonDocumentWriter(bsonDoc), doc, EncoderContext.builder().build()) + + then: + bsonDoc == new BsonDocument('_id', new BsonObjectId(id)) + .append('n', new BsonNull()) + .append('r', new BsonDocument('$ref', new BsonString('c')).append('$id', new BsonInt32(1))) + .append('m', new BsonDocument('f', new BsonInt32(1))) + .append('i', new BsonArray([new BsonInt32(1), new BsonInt32(2)])) + .append('c', new BsonJavaScriptWithScope('c', new BsonDocument('f', new BsonInt32(1)))) + .append('b', new BsonBinary(new byte[0])) + .append('a', new BsonArray([new BsonInt32(1), new BsonInt32(2)])) + .append('s', new BsonSymbol('s')) + } +} diff --git a/driver-core/src/test/unit/com/mongodb/DBRefCodecSpecification.groovy b/driver-core/src/test/unit/com/mongodb/DBRefCodecSpecification.groovy new file mode 100644 index 00000000000..fe29b23f866 --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/DBRefCodecSpecification.groovy @@ -0,0 +1,92 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb + +import org.bson.BsonDocument +import org.bson.BsonDocumentReader +import org.bson.BsonDocumentWriter +import org.bson.BsonInt32 +import org.bson.BsonString +import org.bson.codecs.DecoderContext +import org.bson.codecs.EncoderContext +import org.bson.codecs.ValueCodecProvider +import org.bson.codecs.configuration.CodecRegistry +import spock.lang.Specification + +import static org.bson.codecs.configuration.CodecRegistries.fromProviders + +class DBRefCodecSpecification extends Specification { + def 'provider should return codec for DBRef class'() { + expect: + new DBRefCodecProvider().get(DBRef, Stub(CodecRegistry)) instanceof DBRefCodec + } + + def 'provider should return null for non-DBRef class'() { + expect: + !new DBRefCodecProvider().get(Integer, Stub(CodecRegistry)) + } + + def 'provider should be equal to another of the same class'() { + expect: + new DBRefCodecProvider() == new DBRefCodecProvider() + } + + def 'provider should be not equal to any thing else'() { + expect: + new DBRefCodecProvider() != new ValueCodecProvider() + } + + def 'codec should encode DBRef'() { + given: + def ref = new DBRef('foo', 1) + def writer = new BsonDocumentWriter(new BsonDocument()) + + when: + writer.writeStartDocument() + writer.writeName('ref') + new DBRefCodec(fromProviders([new ValueCodecProvider()])).encode(writer, ref, EncoderContext.builder().build()) + writer.writeEndDocument() + + then: + writer.document == new BsonDocument('ref', new BsonDocument('$ref', new BsonString('foo')).append('$id', new BsonInt32(1))) + } + + def 'codec should encode DBRef with database name'() { + given: + def ref = new DBRef('mydb', 'foo', 1) + def writer = new BsonDocumentWriter(new BsonDocument()) + + when: + writer.writeStartDocument() + writer.writeName('ref') + new DBRefCodec(fromProviders([new ValueCodecProvider()])).encode(writer, ref, EncoderContext.builder().build()) + writer.writeEndDocument() + + then: + writer.document == new BsonDocument('ref', + new BsonDocument('$ref', new BsonString('foo')).append('$id', new BsonInt32(1)).append('$db', new BsonString('mydb'))) + } + + def 'codec should throw UnsupportedOperationException on decode'() { + when: + new DBRefCodec(fromProviders([new ValueCodecProvider()])).decode(new BsonDocumentReader(new BsonDocument()), + DecoderContext.builder().build()) + + then: + thrown(UnsupportedOperationException) + } +} diff --git a/driver-core/src/test/unit/com/mongodb/DBRefSpecification.groovy b/driver-core/src/test/unit/com/mongodb/DBRefSpecification.groovy new file mode 100644 index 00000000000..f6dcfe3bfe1 --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/DBRefSpecification.groovy @@ -0,0 +1,114 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb + +import spock.lang.Specification + +class DBRefSpecification extends Specification { + + def 'should set properties'() { + when: + DBRef referenceA = new DBRef('foo.bar', 5) + DBRef referenceB = new DBRef('mydb', 'foo.bar', 5) + DBRef referenceC = new DBRef(null, 'foo.bar', 5) + + then: + referenceA.databaseName == null + referenceA.collectionName == 'foo.bar' + referenceA.id == 5 + referenceB.databaseName == 'mydb' + referenceB.collectionName == 'foo.bar' + referenceB.id == 5 + referenceC.databaseName == null + referenceC.collectionName == 'foo.bar' + referenceC.id == 5 + } + + def 'constructor should throw if collection name is null'() { + when: + new DBRef(null, 5) + + then: + thrown(IllegalArgumentException) + } + + def 'constructor should throw if id is null'() { + when: + new DBRef('foo.bar', null) + + then: + thrown(IllegalArgumentException) + } + + def 'equivalent instances should be equal and have the same hash code'() { + given: + DBRef referenceA = new DBRef('foo.bar', 4) + DBRef referenceB = new DBRef('foo.bar', 4) + DBRef referenceC = new DBRef('mydb', 'foo.bar', 4) + DBRef referenceD = new DBRef('mydb', 'foo.bar', 4) + + expect: + referenceA.equals(referenceA) + referenceA.equals(referenceB) + referenceC.equals(referenceD) + referenceA.hashCode() == referenceB.hashCode() + referenceC.hashCode() == referenceD.hashCode() + } + + def 'non-equivalent instances should not be equal and have different hash codes'() { + given: + DBRef referenceA = new DBRef('foo.bar', 4) + DBRef referenceB = new DBRef('foo.baz', 4) + DBRef referenceC = new DBRef('foo.bar', 5) + DBRef referenceD = new DBRef('mydb', 'foo.bar', 4) + DBRef referenceE = new DBRef('yourdb', 'foo.bar', 4) + + expect: + !referenceA.equals(null) + !referenceA.equals('some other class instance') + !referenceA.equals(referenceB) + referenceA.hashCode() != referenceB.hashCode() + !referenceA.equals(referenceC) + referenceA.hashCode() != referenceC.hashCode() + !referenceA.equals(referenceD) + referenceA.hashCode() != referenceD.hashCode() + !referenceD.equals(referenceE) + referenceD.hashCode() != referenceE.hashCode() + } + + def 'should stringify'() { + expect: + new DBRef('foo.bar', 4).toString() == '{ "$ref" : "foo.bar", "$id" : "4" }' + new DBRef('mydb', 'foo.bar', 4).toString() == '{ "$ref" : "foo.bar", "$id" : "4", "$db" : "mydb" }' + } + + def 'testSerialization'() throws Exception { + given: + DBRef originalDBRef = new DBRef('col', 42) + + ByteArrayOutputStream outputStream = new ByteArrayOutputStream() + ObjectOutputStream objectOutputStream = new ObjectOutputStream(outputStream) + + when: + objectOutputStream.writeObject(originalDBRef) + ObjectInputStream objectInputStream = new ObjectInputStream(new ByteArrayInputStream(outputStream.toByteArray())) + DBRef deserializedDBRef = (DBRef) objectInputStream.readObject() + + then: + originalDBRef == deserializedDBRef + } +} diff --git a/driver-core/src/test/unit/com/mongodb/DocumentToDBRefTransformerSpecification.groovy b/driver-core/src/test/unit/com/mongodb/DocumentToDBRefTransformerSpecification.groovy new file mode 100644 index 00000000000..34c2ebfa43e --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/DocumentToDBRefTransformerSpecification.groovy @@ -0,0 +1,70 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb + +import org.bson.Document +import spock.lang.Specification + + +class DocumentToDBRefTransformerSpecification extends Specification { + def transformer = new DocumentToDBRefTransformer() + + def 'should not transform a value that is not a Document'() { + given: + def str = 'some string' + + expect: + transformer.transform(str).is(str) + } + + def 'should not transform a Document that does not have both $ref and $id fields'() { + expect: + transformer.transform(doc).is(doc) + + where: + doc << [new Document(), + new Document('foo', 'bar'), + new Document('$ref', 'bar'), + new Document('$id', 'bar')] + } + + def 'should transform a Document that has both $ref and $id fields to a DBRef'() { + when: + def doc = new Document('$ref', 'foo').append('$id', 1) + + then: + transformer.transform(doc) == new DBRef('foo', 1) + } + + def 'should transform a Document that has $ref and $id and $db fields to a DBRef'() { + when: + def doc = new Document('$ref', 'foo').append('$id', 1).append('$db', 'mydb') + + then: + transformer.transform(doc) == new DBRef('mydb', 'foo', 1) + } + + def 'should be equal to another instance'() { + expect: + transformer == new DocumentToDBRefTransformer() + } + + def 'should not be equal to anything else'() { + expect: + transformer != 1 + } +} diff --git a/driver-core/src/test/unit/com/mongodb/ErrorCategorySpecification.groovy b/driver-core/src/test/unit/com/mongodb/ErrorCategorySpecification.groovy new file mode 100644 index 00000000000..2904b75d8f4 --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/ErrorCategorySpecification.groovy @@ -0,0 +1,45 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb + +import spock.lang.Specification + +import static com.mongodb.ErrorCategory.DUPLICATE_KEY +import static com.mongodb.ErrorCategory.EXECUTION_TIMEOUT +import static com.mongodb.ErrorCategory.UNCATEGORIZED +import static com.mongodb.ErrorCategory.fromErrorCode + +class ErrorCategorySpecification extends Specification { + + def 'should categorize duplicate key errors'() { + expect: + fromErrorCode(11000) == DUPLICATE_KEY + fromErrorCode(11001) == DUPLICATE_KEY + fromErrorCode(12582) == DUPLICATE_KEY + } + + def 'should categorize execution timeout errors'() { + expect: + fromErrorCode(50) == EXECUTION_TIMEOUT + } + + def 'should categorize uncategorized errors'() { + expect: + fromErrorCode(0) == UNCATEGORIZED + } + +} diff --git a/driver-core/src/test/unit/com/mongodb/IndexRequestSpecification.groovy b/driver-core/src/test/unit/com/mongodb/IndexRequestSpecification.groovy new file mode 100644 index 00000000000..d36a3ad771c --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/IndexRequestSpecification.groovy @@ -0,0 +1,182 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb + +import com.mongodb.client.model.Collation +import com.mongodb.client.model.CollationAlternate +import com.mongodb.client.model.CollationCaseFirst +import com.mongodb.client.model.CollationMaxVariable +import com.mongodb.client.model.CollationStrength +import com.mongodb.internal.bulk.IndexRequest +import org.bson.BsonDocument +import org.bson.BsonInt32 +import spock.lang.Specification + +import java.util.concurrent.TimeUnit + +class IndexRequestSpecification extends Specification { + + def 'should set its options correctly'() { + when: + def request = new IndexRequest(new BsonDocument('a', new BsonInt32(1))) + + then: + request.getKeys() == new BsonDocument('a', new BsonInt32(1)) + !request.isBackground() + !request.isUnique() + !request.isSparse() + request.getName() == null + request.getExpireAfter(TimeUnit.SECONDS) == null + request.getVersion() == null + request.getWeights() == null + request.getDefaultLanguage() == null + request.getLanguageOverride() == null + request.getTextVersion() == null + request.getSphereVersion() == null + request.getBits() == null + request.getMin() == null + request.getMax() == null + !request.getDropDups() + request.getStorageEngine() == null + request.getPartialFilterExpression() == null + request.getCollation() == null + request.getWildcardProjection() == null + !request.isHidden() + + when: + def keys = BsonDocument.parse('{ a: 1 }') + def weights = BsonDocument.parse('{ a: 1000 }') + def storageEngine = BsonDocument.parse('{ wiredTiger : { configString : "block_compressor=zlib" }}') + def partialFilterExpression = BsonDocument.parse('{ a: { $gte: 10 } }') + def collation = Collation.builder() + .locale('en') + .caseLevel(true) + .collationCaseFirst(CollationCaseFirst.OFF) + .collationStrength(CollationStrength.IDENTICAL) + .numericOrdering(true) + .collationAlternate(CollationAlternate.SHIFTED) + .collationMaxVariable(CollationMaxVariable.SPACE) + .backwards(true) + .build() + def wildcardProjection = BsonDocument.parse('{a : 1}') + def request2 = new IndexRequest(keys) + .background(true) + .unique(true) + .sparse(true) + .name('aIndex') + .expireAfter(100, TimeUnit.SECONDS) + .version(1) + .weights(weights) + .defaultLanguage('es') + .languageOverride('language') + .textVersion(1) + .sphereVersion(2) + .bits(1) + .min(-180.0) + .max(180.0) + .dropDups(true) + .storageEngine(storageEngine) + .partialFilterExpression(partialFilterExpression) + .collation(collation) + .wildcardProjection(wildcardProjection) + .hidden(true) + + then: + request2.getKeys() == keys + request2.isBackground() + request2.isUnique() + request2.isSparse() + request2.getName() == 'aIndex' + request2.getExpireAfter(TimeUnit.SECONDS) == 100 + request2.getVersion() == 1 + request2.getWeights() == weights + request2.getDefaultLanguage() == 'es' + request2.getLanguageOverride() == 'language' + request2.getTextVersion() == 1 + request2.getSphereVersion() == 2 + request2.getBits() == 1 + request2.getMin() == -180.0 + request2.getMax() == 180.0 + request2.getDropDups() + request2.getStorageEngine() == storageEngine + request2.getPartialFilterExpression() == partialFilterExpression + request2.getCollation() == collation + request2.getWildcardProjection() == wildcardProjection + request2.isHidden() + } + + + def 'should validate textIndexVersion'() { + given: + def options = new IndexRequest(new BsonDocument('a', new BsonInt32(1))) + + when: + options.textVersion(1) + + then: + notThrown(IllegalArgumentException) + + when: + options.textVersion(2) + + then: + notThrown(IllegalArgumentException) + + when: + options.textVersion(3) + + then: + notThrown(IllegalArgumentException) + + when: + options.textVersion(4) + + then: + thrown(IllegalArgumentException) + } + + + def 'should validate 2dsphereIndexVersion'() { + given: + def options = new IndexRequest(new BsonDocument('a', new BsonInt32(1))) + + when: + options.sphereVersion(1) + + then: + notThrown(IllegalArgumentException) + + when: + options.sphereVersion(2) + + then: + notThrown(IllegalArgumentException) + + when: + options.sphereVersion(3) + + then: + notThrown(IllegalArgumentException) + + when: + options.sphereVersion(4) + + then: + thrown(IllegalArgumentException) + } + +} diff --git a/driver-core/src/test/unit/com/mongodb/Jep395RecordCodecProviderTest.java b/driver-core/src/test/unit/com/mongodb/Jep395RecordCodecProviderTest.java new file mode 100644 index 00000000000..5086328f7de --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/Jep395RecordCodecProviderTest.java @@ -0,0 +1,42 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb; + +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.condition.EnabledForJreRange; +import org.junit.jupiter.api.condition.JRE; + +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; + +public class Jep395RecordCodecProviderTest { + + @Test + @EnabledForJreRange(min = JRE.JAVA_17) + void canSupportJavaRecordsOnJava17() { + assertTrue(new Jep395RecordCodecProvider().hasRecordSupport()); + } + + @Test + void doesNotErrorWhenCheckingNonRecords() { + try { + assertNull(new Jep395RecordCodecProvider().get(Integer.class, MongoClientSettings.getDefaultCodecRegistry())); + } catch (Exception e) { + fail("Should return null when checking for class"); + } + } +} diff --git a/driver-core/src/test/unit/com/mongodb/MongoClientSettingsSpecification.groovy b/driver-core/src/test/unit/com/mongodb/MongoClientSettingsSpecification.groovy new file mode 100644 index 00000000000..ec5d92b1e49 --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/MongoClientSettingsSpecification.groovy @@ -0,0 +1,578 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb + +import com.mongodb.connection.ClusterConnectionMode +import com.mongodb.connection.ClusterSettings +import com.mongodb.connection.ConnectionPoolSettings +import com.mongodb.connection.ProxySettings +import com.mongodb.connection.ServerSettings +import com.mongodb.connection.SocketSettings +import com.mongodb.connection.SslSettings +import com.mongodb.connection.TransportSettings +import com.mongodb.event.CommandListener +import com.mongodb.spi.dns.DnsClient +import com.mongodb.spi.dns.InetAddressResolver +import org.bson.UuidRepresentation +import org.bson.codecs.configuration.CodecRegistry +import spock.lang.Specification + +import java.util.concurrent.TimeUnit + +import static com.mongodb.CustomMatchers.isTheSameAs +import static spock.util.matcher.HamcrestSupport.expect + +class MongoClientSettingsSpecification extends Specification { + + def 'should set the correct default values'() { + given: + def settings = MongoClientSettings.builder().build() + + expect: + settings.getWriteConcern() == WriteConcern.ACKNOWLEDGED + settings.getRetryWrites() + settings.getRetryReads() + settings.getReadConcern() == ReadConcern.DEFAULT + settings.getReadPreference() == ReadPreference.primary() + settings.getCommandListeners().isEmpty() + settings.getApplicationName() == null + settings.getLoggerSettings() == LoggerSettings.builder().build() + settings.clusterSettings == ClusterSettings.builder().build() + settings.connectionPoolSettings == ConnectionPoolSettings.builder().build() + settings.socketSettings == SocketSettings.builder().build() + settings.socketSettings.proxySettings == ProxySettings.builder().build() + settings.heartbeatSocketSettings == SocketSettings.builder().readTimeout(10000, TimeUnit.MILLISECONDS).build() + settings.serverSettings == ServerSettings.builder().build() + settings.transportSettings == null + settings.compressorList == [] + settings.credential == null + settings.uuidRepresentation == UuidRepresentation.UNSPECIFIED + settings.contextProvider == null + settings.dnsClient == null + settings.inetAddressResolver == null + settings.getTimeout(TimeUnit.MILLISECONDS) == null + } + + @SuppressWarnings('UnnecessaryObjectReferences') + def 'should handle illegal arguments'() { + given: + def builder = MongoClientSettings.builder() + + when: + builder.readPreference(null) + then: + thrown(IllegalArgumentException) + + when: + builder.writeConcern(null) + then: + thrown(IllegalArgumentException) + + when: + builder.credential(null) + then: + thrown(IllegalArgumentException) + + when: + builder.credential(null) + then: + thrown(IllegalArgumentException) + + when: + builder.codecRegistry(null) + then: + thrown(IllegalArgumentException) + + when: + builder.transportSettings(null) + then: + thrown(IllegalArgumentException) + + when: + builder.addCommandListener(null) + then: + thrown(IllegalArgumentException) + + when: + builder.compressorList(null) + then: + thrown(IllegalArgumentException) + + when: + builder.uuidRepresentation(null) + then: + thrown(IllegalArgumentException) + } + + def 'should build with set configuration'() { + given: + def transportSettings = TransportSettings.nettyBuilder().build() + def credential = MongoCredential.createMongoX509Credential('test') + def codecRegistry = Stub(CodecRegistry) + def commandListener = Stub(CommandListener) + def clusterSettings = ClusterSettings.builder().hosts([new ServerAddress('localhost')]).requiredReplicaSetName('test').build() + def contextProvider = Stub(ContextProvider) + def dnsClient = Stub(DnsClient) + def inetAddressResolver = Stub(InetAddressResolver) + + when: + def settings = MongoClientSettings.builder() + .readPreference(ReadPreference.secondary()) + .writeConcern(WriteConcern.JOURNALED) + .retryWrites(true) + .retryReads(true) + .readConcern(ReadConcern.LOCAL) + .applicationName('app1') + .addCommandListener(commandListener) + .credential(credential) + .codecRegistry(codecRegistry) + .applyToClusterSettings(new Block() { + @Override + void apply(final ClusterSettings.Builder builder) { + builder.applySettings(clusterSettings) + } + }) + .transportSettings(transportSettings) + .compressorList([MongoCompressor.createZlibCompressor()]) + .uuidRepresentation(UuidRepresentation.STANDARD) + .contextProvider(contextProvider) + .dnsClient(dnsClient) + .inetAddressResolver(inetAddressResolver) + .timeout(1000, TimeUnit.SECONDS) + .build() + + then: + settings.getReadPreference() == ReadPreference.secondary() + settings.getWriteConcern() == WriteConcern.JOURNALED + settings.getRetryWrites() + settings.getRetryReads() + settings.getReadConcern() == ReadConcern.LOCAL + settings.getApplicationName() == 'app1' + settings.getSocketSettings() == SocketSettings.builder().build() + settings.getHeartbeatSocketSettings() == SocketSettings.builder().readTimeout(10000, TimeUnit.MILLISECONDS).build() + settings.getCommandListeners().get(0) == commandListener + settings.getCodecRegistry() == codecRegistry + settings.getCredential() == credential + settings.getClusterSettings() == clusterSettings + settings.getTransportSettings() == transportSettings + settings.getCompressorList() == [MongoCompressor.createZlibCompressor()] + settings.getUuidRepresentation() == UuidRepresentation.STANDARD + settings.getContextProvider() == contextProvider + settings.getDnsClient() == dnsClient + settings.getInetAddressResolver() == inetAddressResolver + settings.getTimeout(TimeUnit.MILLISECONDS) == 1_000_000 + } + + def 'should be easy to create new settings from existing'() { + when: + def settings = MongoClientSettings.builder().build() + + then: + expect settings, isTheSameAs(MongoClientSettings.builder(settings).build()) + + when: + def credential = MongoCredential.createMongoX509Credential('test') + def codecRegistry = Stub(CodecRegistry) + def commandListener = Stub(CommandListener) + def compressorList = [MongoCompressor.createZlibCompressor()] + def contextProvider = Stub(ContextProvider) + def dnsClient = Stub(DnsClient) + def inetAddressResolver = Stub(InetAddressResolver) + + settings = MongoClientSettings.builder() + .heartbeatConnectTimeoutMS(24000) + .heartbeatSocketTimeoutMS(12000) + .readPreference(ReadPreference.secondary()) + .writeConcern(WriteConcern.JOURNALED) + .retryWrites(true) + .retryReads(true) + .readConcern(ReadConcern.LOCAL) + .applicationName('app1') + .addCommandListener(commandListener) + .applyToLoggerSettings { LoggerSettings.Builder builder -> + builder.maxDocumentLength(10) + } + .applyToClusterSettings { ClusterSettings.Builder builder -> + builder.hosts([new ServerAddress('localhost')]) + .requiredReplicaSetName('test') + } + .credential(credential) + .codecRegistry(codecRegistry) + .compressorList(compressorList) + .contextProvider(contextProvider) + .dnsClient(dnsClient) + .inetAddressResolver(inetAddressResolver) + .timeout(0, TimeUnit.SECONDS) + .build() + + then: + expect settings, isTheSameAs(MongoClientSettings.builder(settings).build()) + } + + def 'applicationName can be 128 bytes when encoded as UTF-8'() { + given: + def applicationName = 'a' * 126 + '\u00A0' + + when: + def settings = MongoClientSettings.builder().applicationName(applicationName).build() + + then: + settings.applicationName == applicationName + } + + def 'should throw IllegalArgumentException if applicationName exceeds 128 bytes when encoded as UTF-8'() { + given: + def applicationName = 'a' * 127 + '\u00A0' + + when: + MongoClientSettings.builder().applicationName(applicationName) + + then: + thrown(IllegalArgumentException) + } + + def 'should throw an exception if the timeout is invalid'() { + given: + def builder = MongoClientSettings.builder() + + when: + builder.timeout(500, TimeUnit.NANOSECONDS) + + then: + thrown(IllegalArgumentException) + + when: + builder.timeout(-1, TimeUnit.SECONDS) + + then: + thrown(IllegalArgumentException) + + when: + def connectionString = new ConnectionString('mongodb://localhost/?timeoutMS=-1') + builder.applyConnectionString(connectionString).build() + + then: + thrown(IllegalStateException) + } + + def 'should add command listeners'() { + given: + CommandListener commandListenerOne = Mock(CommandListener) + CommandListener commandListenerTwo = Mock(CommandListener) + CommandListener commandListenerThree = Mock(CommandListener) + + when: + def settings = MongoClientSettings.builder() + .build() + + then: + settings.commandListeners.size() == 0 + + when: + settings = MongoClientSettings.builder() + .addCommandListener(commandListenerOne) + .build() + + then: + settings.commandListeners.size() == 1 + settings.commandListeners[0].is commandListenerOne + + when: + settings = MongoClientSettings.builder() + .addCommandListener(commandListenerOne) + .addCommandListener(commandListenerTwo) + .build() + + then: + settings.commandListeners.size() == 2 + settings.commandListeners[0].is commandListenerOne + settings.commandListeners[1].is commandListenerTwo + + when: + def copied = MongoClientSettings.builder(settings).addCommandListener(commandListenerThree).build() + + then: + copied.commandListeners.size() == 3 + copied.commandListeners[0].is commandListenerOne + copied.commandListeners[1].is commandListenerTwo + copied.commandListeners[2].is commandListenerThree + settings.commandListeners.size() == 2 + settings.commandListeners[0].is commandListenerOne + settings.commandListeners[1].is commandListenerTwo + } + + def 'should build settings from a connection string'() { + when: + ConnectionString connectionString = new ConnectionString('mongodb://user:pass@host1:1,host2:2/' + + '?authMechanism=SCRAM-SHA-1&authSource=test' + + '&minPoolSize=5&maxPoolSize=10' + + '&waitQueueTimeoutMS=150&maxIdleTimeMS=200&maxLifeTimeMS=300' + + '&connectTimeoutMS=2500' + + '&socketTimeoutMS=5500' + + '&serverSelectionTimeoutMS=25000' + + '&localThresholdMS=30' + + '&heartbeatFrequencyMS=20000' + + '&appName=MyApp' + + '&replicaSet=test' + + '&retryWrites=true' + + '&retryReads=true' + + '&ssl=true&sslInvalidHostNameAllowed=true' + + '&w=majority&wTimeoutMS=2500' + + '&readPreference=secondary' + + '&readConcernLevel=majority' + + '&compressors=zlib&zlibCompressionLevel=5' + + '&uuidRepresentation=standard' + + '&timeoutMS=10000' + + '&proxyHost=proxy.com' + + '&proxyPort=1080' + + '&proxyUsername=username' + + '&proxyPassword=password' + ) + MongoClientSettings settings = MongoClientSettings.builder().applyConnectionString(connectionString).build() + MongoClientSettings expected = MongoClientSettings.builder() + .applyToClusterSettings(new Block() { + @Override + void apply(final ClusterSettings.Builder builder) { + builder.hosts([new ServerAddress('host1', 1), new ServerAddress('host2', 2)]) + .mode(ClusterConnectionMode.MULTIPLE) + .requiredReplicaSetName('test') + .serverSelectionTimeout(25000, TimeUnit.MILLISECONDS) + .localThreshold(30, TimeUnit.MILLISECONDS) + } + }) + .applyToConnectionPoolSettings(new Block() { + @Override + void apply(final ConnectionPoolSettings.Builder builder) { + builder.minSize(5) + .maxSize(10) + .maxWaitTime(150, TimeUnit.MILLISECONDS) + .maxConnectionLifeTime(300, TimeUnit.MILLISECONDS) + .maxConnectionIdleTime(200, TimeUnit.MILLISECONDS) + } + }) + .applyToServerSettings(new Block() { + @Override + void apply(final ServerSettings.Builder builder) { + builder.heartbeatFrequency(20000, TimeUnit.MILLISECONDS) + } + }) + .applyToSocketSettings(new Block() { + @Override + void apply(final SocketSettings.Builder builder) { + builder.connectTimeout(2500, TimeUnit.MILLISECONDS) + .readTimeout(5500, TimeUnit.MILLISECONDS) + .applyToProxySettings { + it.host('proxy.com') + it.port(1080) + it.username('username') + it.password('password') + } + } + }) + .applyToSslSettings(new Block() { + @Override + void apply(final SslSettings.Builder builder) { + builder.enabled(true) + .invalidHostNameAllowed(true) + } + }) + .readConcern(ReadConcern.MAJORITY) + .readPreference(ReadPreference.secondary()) + .writeConcern(WriteConcern.MAJORITY.withWTimeout(2500, TimeUnit.MILLISECONDS)) + .applicationName('MyApp') + .credential(MongoCredential.createScramSha1Credential('user', 'test', 'pass'.toCharArray())) + .compressorList([MongoCompressor.createZlibCompressor().withProperty(MongoCompressor.LEVEL, 5)]) + .retryWrites(true) + .retryReads(true) + .uuidRepresentation(UuidRepresentation.STANDARD) + .timeout(10000, TimeUnit.MILLISECONDS) + .build() + + then: + expect expected, isTheSameAs(settings) + } + + def 'should build settings from a connection string with default values'() { + when: + def builder = MongoClientSettings.builder() + .applyToClusterSettings(new Block() { + @Override + void apply(final ClusterSettings.Builder builder) { + builder.hosts([new ServerAddress('localhost', 27017)]) + .mode(ClusterConnectionMode.SINGLE) + .serverSelectionTimeout(25000, TimeUnit.MILLISECONDS) + .localThreshold(30, TimeUnit.MILLISECONDS) + } + }) + .applyToConnectionPoolSettings(new Block() { + @Override + void apply(final ConnectionPoolSettings.Builder builder) { + builder.minSize(5) + .maxSize(10) + .maxWaitTime(150, TimeUnit.MILLISECONDS) + .maxConnectionLifeTime(300, TimeUnit.MILLISECONDS) + .maxConnectionIdleTime(200, TimeUnit.MILLISECONDS) + } + }) + .applyToServerSettings(new Block() { + @Override + void apply(final ServerSettings.Builder builder) { + builder.heartbeatFrequency(20000, TimeUnit.MILLISECONDS) + } + }) + .applyToSocketSettings(new Block() { + @Override + void apply(final SocketSettings.Builder builder) { + builder.connectTimeout(2500, TimeUnit.MILLISECONDS) + .readTimeout(5500, TimeUnit.MILLISECONDS) + .applyToProxySettings { + it.host('proxy.com') + it.port(1080) + it.username('username') + it.password('password') + } + } + }) + .applyToSslSettings(new Block() { + @Override + void apply(final SslSettings.Builder builder) { + builder.enabled(true) + .invalidHostNameAllowed(true) + } + }) + .readConcern(ReadConcern.MAJORITY) + .readPreference(ReadPreference.secondary()) + .writeConcern(WriteConcern.MAJORITY.withWTimeout(2500, TimeUnit.MILLISECONDS)) + .applicationName('MyApp') + .credential(MongoCredential.createScramSha1Credential('user', 'test', 'pass'.toCharArray())) + .compressorList([MongoCompressor.createZlibCompressor().withProperty(MongoCompressor.LEVEL, 5)]) + .retryWrites(true) + .retryReads(true) + + def expectedSettings = builder.build() + def settingsWithDefaultConnectionStringApplied = builder + .applyConnectionString(new ConnectionString('mongodb://localhost')) + .build() + + then: + expect expectedSettings, isTheSameAs(settingsWithDefaultConnectionStringApplied) + } + + def 'should use the socket settings connectionTimeout for the heartbeat settings'() { + when: + def settings = MongoClientSettings.builder().applyToSocketSettings { SocketSettings.Builder builder -> + builder.connectTimeout(42, TimeUnit.SECONDS).readTimeout(60, TimeUnit.SECONDS) + .receiveBufferSize(22).sendBufferSize(10) + }.build() + + then: + settings.getHeartbeatSocketSettings() == SocketSettings.builder().connectTimeout(42, TimeUnit.SECONDS) + .readTimeout(42, TimeUnit.SECONDS) + .build() + + when: + settings = MongoClientSettings.builder(settings) + .applyToSocketSettings { SocketSettings.Builder builder -> + builder.connectTimeout(21, TimeUnit.SECONDS) + }.build() + + then: + settings.getHeartbeatSocketSettings() == SocketSettings.builder().connectTimeout(21, TimeUnit.SECONDS) + .readTimeout(21, TimeUnit.SECONDS) + .build() + } + + def 'should use the proxy settings for the heartbeat settings'() { + when: + def settings = MongoClientSettings.builder().applyToSocketSettings { SocketSettings.Builder builder -> + builder.connectTimeout(42, TimeUnit.SECONDS) + .readTimeout(60, TimeUnit.SECONDS) + .applyToProxySettings { + it.host('proxy.com') + it.port(1080) + it.username('username') + it.password('password') + } + }.build() + + then: + settings.getHeartbeatSocketSettings() == SocketSettings.builder().connectTimeout(42, TimeUnit.SECONDS) + .readTimeout(42, TimeUnit.SECONDS) + .applyToProxySettings { + it.host('proxy.com') + it.port(1080) + it.username('username') + it.password('password') + } + .build() + } + + def 'should use the configured heartbeat timeouts for the heartbeat settings'() { + when: + def settings = MongoClientSettings.builder() + .heartbeatConnectTimeoutMS(24000) + .heartbeatSocketTimeoutMS(12000) + .applyToSocketSettings { SocketSettings.Builder builder -> + builder.connectTimeout(42, TimeUnit.SECONDS).readTimeout(60, TimeUnit.SECONDS) + .receiveBufferSize(22).sendBufferSize(10) + }.build() + then: + settings.getHeartbeatSocketSettings() == SocketSettings.builder().connectTimeout(24, TimeUnit.SECONDS) + .readTimeout(12, TimeUnit.SECONDS).build() + + when: + settings = MongoClientSettings.builder(settings) + .applyToSocketSettings { SocketSettings.Builder builder -> + builder.connectTimeout(21, TimeUnit.SECONDS) + }.build() + + then: + settings.getHeartbeatSocketSettings() == SocketSettings.builder().connectTimeout(24, TimeUnit.SECONDS) + .readTimeout(12, TimeUnit.SECONDS) + .build() + } + + def 'should only have the following fields in the builder'() { + when: + // A regression test so that if anymore fields are added then the builder(final MongoClientSettings settings) should be updated + def actual = MongoClientSettings.Builder.declaredFields.grep { !it.synthetic } *.name.sort() + def expected = ['applicationName', 'autoEncryptionSettings', 'clusterSettingsBuilder', 'codecRegistry', 'commandListeners', + 'compressorList', 'connectionPoolSettingsBuilder', 'contextProvider', 'credential', 'dnsClient', + 'heartbeatConnectTimeoutMS', 'heartbeatSocketTimeoutMS', 'inetAddressResolver', 'loggerSettingsBuilder', + 'readConcern', 'readPreference', 'retryReads', + 'retryWrites', 'serverApi', 'serverSettingsBuilder', 'socketSettingsBuilder', 'sslSettingsBuilder', + 'timeoutMS', 'transportSettings', 'uuidRepresentation', 'writeConcern'] + + then: + actual == expected + } + + def 'should only have the following methods in the builder'() { + when: + // A regression test so that if anymore methods are added then the builder(final MongoClientSettings settings) should be updated + def actual = MongoClientSettings.Builder.declaredMethods.grep { !it.synthetic } *.name.sort() + def expected = ['addCommandListener', 'applicationName', 'applyConnectionString', 'applyToClusterSettings', + 'applyToConnectionPoolSettings', 'applyToLoggerSettings', 'applyToServerSettings', 'applyToSocketSettings', + 'applyToSslSettings', 'autoEncryptionSettings', 'build', 'codecRegistry', 'commandListenerList', + 'compressorList', 'contextProvider', 'credential', 'dnsClient', 'heartbeatConnectTimeoutMS', + 'heartbeatSocketTimeoutMS', 'inetAddressResolver', 'readConcern', 'readPreference', 'retryReads', 'retryWrites', + 'serverApi', 'timeout', 'transportSettings', 'uuidRepresentation', 'writeConcern'] + + then: + actual == expected + } +} diff --git a/driver-core/src/test/unit/com/mongodb/MongoCommandExceptionSpecification.groovy b/driver-core/src/test/unit/com/mongodb/MongoCommandExceptionSpecification.groovy new file mode 100644 index 00000000000..f56dbdd2e61 --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/MongoCommandExceptionSpecification.groovy @@ -0,0 +1,67 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb + +import org.bson.BsonBoolean +import org.bson.BsonDocument +import org.bson.BsonInt32 +import org.bson.BsonString +import spock.lang.Specification + +class MongoCommandExceptionSpecification extends Specification { + + def 'should extract error message'() { + expect: + new MongoCommandException(new BsonDocument('ok', BsonBoolean.FALSE).append('errmsg', new BsonString('the error message')), + new ServerAddress()) + .getErrorMessage() == 'the error message' + new MongoCommandException(new BsonDocument('ok', BsonBoolean.FALSE), + new ServerAddress()) + .getErrorMessage() == '' + } + + def 'should extract error code'() { + expect: + new MongoCommandException(new BsonDocument('ok', BsonBoolean.FALSE).append('code', new BsonInt32(26)), + new ServerAddress()) + .getErrorCode() == 26 + new MongoCommandException(new BsonDocument('ok', BsonBoolean.FALSE), + new ServerAddress()) + .getErrorCode() == -1 + } + + def 'should extract error code name'() { + expect: + new MongoCommandException(new BsonDocument('ok', BsonBoolean.FALSE).append('code', new BsonInt32(26)) + .append('codeName', new BsonString('TimeoutError')), new ServerAddress()).getErrorCodeName() == 'TimeoutError' + new MongoCommandException(new BsonDocument('ok', BsonBoolean.FALSE), new ServerAddress()).getErrorCodeName() == '' + } + + def 'should create message'() { + expect: + new MongoCommandException(new BsonDocument('ok', BsonBoolean.FALSE).append('code', new BsonInt32(26)) + .append('codeName', new BsonString('TimeoutError')).append('errmsg', new BsonString('the error message')), + new ServerAddress()) + .getMessage() == 'Command execution failed on MongoDB server with error 26 (TimeoutError): \'the error message\' ' + + 'on server 127.0.0.1:27017. The full response is {"ok": false, "code": 26, "codeName": "TimeoutError", ' + + '"errmsg": "the error message"}' + new MongoCommandException(new BsonDocument('ok', BsonBoolean.FALSE).append('code', new BsonInt32(26)) + .append('errmsg', new BsonString('the error message')), new ServerAddress()) + .getMessage() == 'Command execution failed on MongoDB server with error 26: \'the error message\' ' + + 'on server 127.0.0.1:27017. The full response is {"ok": false, "code": 26, "errmsg": "the error message"}' + } +} diff --git a/driver-core/src/test/unit/com/mongodb/MongoCompressorSpecification.groovy b/driver-core/src/test/unit/com/mongodb/MongoCompressorSpecification.groovy new file mode 100644 index 00000000000..bfd19bb8022 --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/MongoCompressorSpecification.groovy @@ -0,0 +1,52 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb + +import spock.lang.Specification + +class MongoCompressorSpecification extends Specification { + def 'should create zlib compressor'() { + when: + def compressor = MongoCompressor.createZlibCompressor() + + then: + compressor.getName() == 'zlib' + compressor.getProperty(MongoCompressor.LEVEL, -1) == -1 + } + + def 'should create zstd compressor'() { + when: + def compressor = MongoCompressor.createZstdCompressor() + + then: + compressor.getName() == 'zstd' + compressor.getProperty(MongoCompressor.LEVEL, -1) == -1 + } + + def 'should set property'() { + when: + def compressor = MongoCompressor.createZlibCompressor() + def newCompressor = compressor.withProperty(MongoCompressor.LEVEL, 5) + + then: + compressor != newCompressor + compressor.getProperty(MongoCompressor.LEVEL, -1) == -1 + compressor.getProperty(MongoCompressor.LEVEL, 5) == 5 + } + + +} diff --git a/driver-core/src/test/unit/com/mongodb/MongoCredentialSpecification.groovy b/driver-core/src/test/unit/com/mongodb/MongoCredentialSpecification.groovy new file mode 100644 index 00000000000..6207049f321 --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/MongoCredentialSpecification.groovy @@ -0,0 +1,291 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb + +import spock.lang.Specification + +import static com.mongodb.AuthenticationMechanism.PLAIN +import static com.mongodb.AuthenticationMechanism.SCRAM_SHA_1 +import static com.mongodb.AuthenticationMechanism.SCRAM_SHA_256 + +class MongoCredentialSpecification extends Specification { + def 'creating a credential with an unspecified mechanism should populate correct fields'() { + given: + String userName = 'user' + String database = 'test' + char[] password = 'pwd'.toCharArray() + + when: + MongoCredential credential = MongoCredential.createCredential(userName, database, password) + + then: + userName == credential.getUserName() + database == credential.getSource() + password == credential.getPassword() + !credential.getAuthenticationMechanism() + !credential.getMechanism() + } + + def 'creating a Plain credential should populate all required fields'() { + given: + AuthenticationMechanism mechanism = PLAIN + String userName = 'user' + String source = '$external' + char[] password = 'pwd'.toCharArray() + + when: + MongoCredential credential = MongoCredential.createPlainCredential(userName, source, password) + + then: + userName == credential.getUserName() + source == credential.getSource() + password == credential.getPassword() + mechanism == credential.getAuthenticationMechanism() + MongoCredential.PLAIN_MECHANISM == credential.getMechanism() + } + + def 'should throw IllegalArgumentException when a required field is not passed in'() { + when: + MongoCredential.createPlainCredential(null, '$external', 'pwd'.toCharArray()) + then: + thrown(IllegalArgumentException) + + when: + MongoCredential.createPlainCredential('user', '$external', null) + then: + thrown(IllegalArgumentException) + + when: + MongoCredential.createPlainCredential('user', null, 'pwd'.toCharArray()) + then: + thrown(IllegalArgumentException) + } + + def 'creating a SCRAM_SHA_1 credential should populate all required fields'() { + given: + AuthenticationMechanism mechanism = SCRAM_SHA_1 + String userName = 'user' + String source = 'admin' + char[] password = 'pwd'.toCharArray() + + when: + MongoCredential credential = MongoCredential.createScramSha1Credential(userName, source, password) + + then: + userName == credential.getUserName() + source == credential.getSource() + password == credential.getPassword() + mechanism == credential.getAuthenticationMechanism() + MongoCredential.SCRAM_SHA_1_MECHANISM == credential.getMechanism() + } + + def 'creating a SCRAM_SHA_256 credential should populate all required fields'() { + given: + AuthenticationMechanism mechanism = SCRAM_SHA_256 + String userName = 'user' + String source = 'admin' + char[] password = 'pwd'.toCharArray() + + when: + MongoCredential credential = MongoCredential.createScramSha256Credential(userName, source, password) + + then: + userName == credential.getUserName() + source == credential.getSource() + password == credential.getPassword() + mechanism == credential.getAuthenticationMechanism() + MongoCredential.SCRAM_SHA_256_MECHANISM == credential.getMechanism() + } + + def 'should throw IllegalArgumentException when a required field is not passed in for the SCRAM_SHA_1 mechanism'() { + when: + MongoCredential.createScramSha1Credential(null, 'admin', 'pwd'.toCharArray()) + then: + thrown(IllegalArgumentException) + + when: + MongoCredential.createScramSha1Credential('user', 'admin', null) + then: + thrown(IllegalArgumentException) + + when: + MongoCredential.createScramSha1Credential('user', null, 'pwd'.toCharArray()) + then: + thrown(IllegalArgumentException) + } + + def 'creating a GSSAPI Credential should populate the correct fields'() { + given: + AuthenticationMechanism mechanism = AuthenticationMechanism.GSSAPI + String userName = 'user' + + when: + MongoCredential credential = MongoCredential.createGSSAPICredential(userName) + + then: + userName == credential.getUserName() + '$external' == credential.getSource() + null == credential.getPassword() + mechanism == credential.getAuthenticationMechanism() + MongoCredential.GSSAPI_MECHANISM == credential.getMechanism() + } + + def 'creating an X.509 Credential should populate the correct fields'() { + given: + AuthenticationMechanism mechanism = AuthenticationMechanism.MONGODB_X509 + String userName = 'user' + + when: + MongoCredential credential = MongoCredential.createMongoX509Credential(userName) + + then: + userName == credential.getUserName() + '$external' == credential.getSource() + null == credential.getPassword() + mechanism == credential.getAuthenticationMechanism() + MongoCredential.MONGODB_X509_MECHANISM == credential.getMechanism() + } + + def 'creating an X.509 Credential without a username should populate the correct fields'() { + given: + AuthenticationMechanism mechanism = AuthenticationMechanism.MONGODB_X509 + + when: + MongoCredential credential = MongoCredential.createMongoX509Credential() + + then: + null == credential.getUserName() + '$external' == credential.getSource() + null == credential.getPassword() + mechanism == credential.getAuthenticationMechanism() + MongoCredential.MONGODB_X509_MECHANISM == credential.getMechanism() + } + + def 'should get default value of mechanism property when there is no mapping'() { + when: + def credential = MongoCredential.createGSSAPICredential('user') + + then: + credential.getMechanismProperty('unmappedKey', 'mongodb') == 'mongodb' + } + + def 'should get mapped mechanism properties when there is a mapping'() { + given: + String firstKey = 'firstKey' + String firstValue = 'firstValue' + String secondKey = 'secondKey' + Integer secondValue = 2 + + when: + def credential = MongoCredential.createGSSAPICredential('user').withMechanismProperty(firstKey, firstValue) + + then: + credential.getMechanismProperty(firstKey, 'default') == firstValue + credential.getMechanismProperty(firstKey.toLowerCase(), 'default') == firstValue + + when: + credential = credential.withMechanismProperty(secondKey, secondValue) + + then: + credential.getMechanismProperty(firstKey, 'default') == firstValue + credential.getMechanismProperty(secondKey, 1) == secondValue + } + + def 'should preserve other properties when adding a mechanism property'() { + given: + def credential = MongoCredential.createPlainCredential('user', 'source', 'pwd'.toCharArray()) + + when: + def newCredential = credential.withMechanismProperty('foo', 'bar') + + then: + newCredential.mechanism == credential.mechanism + newCredential.userName == credential.userName + newCredential.password == credential.password + newCredential.source == credential.source + } + + def 'should throw IllegalArgumentException if username is not provided to a GSSAPI credential'() { + when: + MongoCredential.createGSSAPICredential(null) + + then: + thrown(IllegalArgumentException) + } + + def 'should make a copy of the password'() { + given: + def credential = MongoCredential.createPlainCredential('user', 'source', 'pwd'.toCharArray()) + def password = credential.getPassword() + + when: + password[0] = 's' + + then: + credential.password == credential.password + } + + def 'testObjectOverrides'() { + given: + String userName = 'user' + String database = 'test' + def password = 'pwd' + def propertyKey = 'keyOne' + def propertyValue = 'valueOne' + + when: + def credentialOne = MongoCredential.createScramSha256Credential(userName, database, password.toCharArray()) + def credentialTwo = credentialOne.withMechanismProperty(propertyKey, propertyValue) + + then: + MongoCredential.createScramSha256Credential(userName, database, password.toCharArray()) == credentialOne + credentialOne.withMechanismProperty(propertyKey, propertyValue) == credentialTwo + credentialOne != credentialTwo + + MongoCredential.createScramSha256Credential(userName, database, password.toCharArray()).hashCode() == credentialOne.hashCode() + credentialOne.hashCode() != credentialTwo.hashCode() + + !credentialOne.toString().contains(password) + credentialOne.toString().contains('password=') + + !credentialTwo.toString().contains(propertyKey.toLowerCase()) + !credentialTwo.toString().contains(propertyValue) + credentialTwo.toString().contains('mechanismProperties=') + } + + def 'testEqualsAndHashCode'() { + expect: + credential() == credential() + credential().hashCode() == credential().hashCode() + + where: + credential << [ + { MongoCredential.createCredential('user', 'database', 'pwd'.toCharArray()) }, + { MongoCredential.createCredential('user', 'database', 'pwd'.toCharArray()).withMechanismProperty('foo', 'bar') }, + { MongoCredential.createPlainCredential('user', '$external', 'pwd'.toCharArray()) }, + { MongoCredential.createPlainCredential('user', '$external', 'pwd'.toCharArray()).withMechanismProperty('foo', 'bar') }, + { MongoCredential.createScramSha1Credential('user', '$external', 'pwd'.toCharArray()) }, + { MongoCredential.createScramSha1Credential('user', '$external', 'pwd'.toCharArray()).withMechanismProperty('foo', 'bar') }, + { MongoCredential.createGSSAPICredential('user') }, + { MongoCredential.createGSSAPICredential('user').withMechanismProperty('foo', 'bar') }, + { MongoCredential.createMongoX509Credential('user') }, + { MongoCredential.createMongoX509Credential('user').withMechanismProperty('foo', 'bar') }, + { MongoCredential.createMongoX509Credential() }, + { MongoCredential.createMongoX509Credential().withMechanismProperty('foo', 'bar') }, + ] + } +} diff --git a/driver-core/src/test/unit/com/mongodb/MongoDriverInformationSpecification.groovy b/driver-core/src/test/unit/com/mongodb/MongoDriverInformationSpecification.groovy new file mode 100644 index 00000000000..58bfbd5c424 --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/MongoDriverInformationSpecification.groovy @@ -0,0 +1,86 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb + +import spock.lang.Specification + +class MongoDriverInformationSpecification extends Specification { + + def 'should set the correct default values'() { + given: + def options = MongoDriverInformation.builder().build() + + expect: + options.getDriverNames() == [] + options.getDriverVersions() == [] + options.getDriverPlatforms() == [] + } + + def 'should not append data if none has been added'() { + given: + def options = MongoDriverInformation.builder(MongoDriverInformation.builder().build()).build() + + expect: + options.getDriverNames() == [] + options.getDriverVersions() == [] + options.getDriverPlatforms() == [] + } + + def 'should append data to the list'() { + given: + def javaDriverInfo = MongoDriverInformation.builder() + .driverName('mongo-java-driver') + .driverVersion('3.4.0') + .driverPlatform('Java oracle64-1.8.0.31') + .build() + + def options = MongoDriverInformation.builder(javaDriverInfo) + .driverName('mongo-scala-driver') + .driverVersion('1.2.0') + .driverPlatform('Scala 2.11') + .build() + + expect: + options.getDriverNames() == ['mongo-java-driver', 'mongo-scala-driver'] + options.getDriverVersions() == ['3.4.0', '1.2.0'] + options.getDriverPlatforms() == ['Java oracle64-1.8.0.31', 'Scala 2.11'] + } + + def 'should only append data that has been set'() { + given: + def javaDriverInfo = MongoDriverInformation.builder() + .driverName('mongo-java-driver') + .driverVersion('3.4.0') + .driverPlatform('Java oracle64-1.8.0.31') + .build() + + def options = MongoDriverInformation.builder(javaDriverInfo).driverName('mongo-scala-driver').build() + + expect: + options.getDriverNames() == ['mongo-java-driver', 'mongo-scala-driver'] + options.getDriverVersions() == ['3.4.0'] + options.getDriverPlatforms() == ['Java oracle64-1.8.0.31'] + } + + def 'should null check the passed MongoDriverInformation'() { + when: + MongoDriverInformation.builder(null).build() + + then: + thrown IllegalArgumentException + } +} diff --git a/driver-core/src/test/unit/com/mongodb/MongoNamespaceSpecification.groovy b/driver-core/src/test/unit/com/mongodb/MongoNamespaceSpecification.groovy new file mode 100644 index 00000000000..10769aa8c4c --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/MongoNamespaceSpecification.groovy @@ -0,0 +1,94 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb + +import spock.lang.Specification + +class MongoNamespaceSpecification extends Specification { + def 'invalid database name should throw IllegalArgumentException'() { + when: + new MongoNamespace(databaseName, 'test') + + then: + thrown(IllegalArgumentException) + + when: + MongoNamespace.checkDatabaseNameValidity(databaseName) + + then: + thrown(IllegalArgumentException) + + where: + databaseName << [null, '', 'a\0b', 'a b', 'a.b', 'a/b', 'a\\b', 'a"b'] + } + + def 'invalid collection name should throw IllegalArgumentException'() { + when: + new MongoNamespace('test', collectionName) + + then: + thrown(IllegalArgumentException) + + when: + MongoNamespace.checkCollectionNameValidity(collectionName) + + then: + thrown(IllegalArgumentException) + + where: + collectionName << [null, ''] + } + + def 'invalid full name should throw IllegalArgumentException'() { + when: + new MongoNamespace(fullName) + + then: + thrown(IllegalArgumentException) + + where: + fullName << [null, '', 'db', '.db', 'db.', 'a .b'] + } + + def 'test getters'() { + expect: + namespace.getDatabaseName() == 'db' + namespace.getCollectionName() == 'a.b' + namespace.getFullName() == 'db.a.b' + + where: + namespace << [new MongoNamespace('db', 'a.b'), new MongoNamespace('db.a.b')] + } + + @SuppressWarnings('ComparisonWithSelf') + def 'testEqualsAndHashCode'() { + given: + MongoNamespace namespace1 = new MongoNamespace('db1', 'coll1') + MongoNamespace namespace2 = new MongoNamespace('db1', 'coll1') + MongoNamespace namespace3 = new MongoNamespace('db2', 'coll1') + MongoNamespace namespace4 = new MongoNamespace('db1', 'coll2') + + expect: + namespace1 != new Object() + namespace1 == namespace1 + namespace1 == namespace2 + namespace1 != namespace3 + namespace1 != namespace4 + + namespace1.hashCode() == 97917362 + } +} diff --git a/driver-core/src/test/unit/com/mongodb/MongoWriteExceptionTest.java b/driver-core/src/test/unit/com/mongodb/MongoWriteExceptionTest.java new file mode 100644 index 00000000000..390a50f0cd9 --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/MongoWriteExceptionTest.java @@ -0,0 +1,41 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * Copyright (c) 2008-2014 Atlassian Pty Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb; + +import org.bson.BsonDocument; +import org.bson.BsonInt32; +import org.junit.Test; + +import java.util.Collections; + +import static org.junit.jupiter.api.Assertions.assertEquals; + +public class MongoWriteExceptionTest { + + @Test + public void testExceptionProperties() { + WriteError writeError = new WriteError(11000, "Duplicate key", new BsonDocument("x", new BsonInt32(1))); + MongoWriteException e = new MongoWriteException(writeError, new ServerAddress("host1"), Collections.emptySet()); + + assertEquals("Write operation error on MongoDB server host1:27017. Write error: WriteError{code=11000, message='Duplicate key', " + + "details={\"x\": 1}}.", + e.getMessage()); + assertEquals(writeError.getCode(), e.getCode()); + assertEquals(writeError, e.getError()); + } +} diff --git a/driver-core/src/test/unit/com/mongodb/ProxySettingsTest.java b/driver-core/src/test/unit/com/mongodb/ProxySettingsTest.java new file mode 100644 index 00000000000..e161b25b61c --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/ProxySettingsTest.java @@ -0,0 +1,141 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb; + +import com.mongodb.connection.ProxySettings; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.function.Executable; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.Arguments; +import org.junit.jupiter.params.provider.MethodSource; + +import java.util.Arrays; +import java.util.stream.Stream; + +class ProxySettingsTest { + + private static final String PASSWORD = "password"; + private static final String USERNAME = "username"; + private static final String HOST = "proxy.example.com"; + private static final int VALID_PORT = 1080; + + static Stream shouldThrowExceptionWhenProxySettingsAreInInvalid() { + return Stream.of( + Arguments.of(ProxySettings.builder() + .port(VALID_PORT), "state should be: proxyPort can only be specified with proxyHost"), + Arguments.of(ProxySettings.builder() + .port(VALID_PORT) + .username(USERNAME) + .password(PASSWORD), "state should be: proxyPort can only be specified with proxyHost"), + Arguments.of(ProxySettings.builder() + .username(USERNAME), "state should be: proxyUsername can only be specified with proxyHost"), + Arguments.of(ProxySettings.builder() + .password(PASSWORD), "state should be: proxyPassword can only be specified with proxyHost"), + Arguments.of(ProxySettings.builder() + .host(HOST) + .username(USERNAME), + "state should be: Both proxyUsername and proxyPassword must be set together. They cannot be set individually"), + Arguments.of(ProxySettings.builder() + .host(HOST) + .password(PASSWORD), + "state should be: Both proxyUsername and proxyPassword must be set together. They cannot be set individually") + ); + } + + @ParameterizedTest + @MethodSource + void shouldThrowExceptionWhenProxySettingsAreInInvalid(final ProxySettings.Builder builder, final String expectedErrorMessage) { + IllegalStateException exception = Assertions.assertThrows(IllegalStateException.class, builder::build); + Assertions.assertEquals(expectedErrorMessage, exception.getMessage()); + } + + static Stream shouldThrowExceptionWhenInvalidValueIsProvided() { + byte[] byteData = new byte[256]; + Arrays.fill(byteData, (byte) 1); + return Stream.of( + Arguments.of((Executable) () -> ProxySettings.builder() + .port(-1), "state should be: proxyPort is within the valid range (0 to 65535)"), + Arguments.of((Executable) () -> ProxySettings.builder() + .port(65536), "state should be: proxyPort is within the valid range (0 to 65535)"), + Arguments.of((Executable) () -> ProxySettings.builder() + .host(""), "state should be: proxyHost is not empty"), + Arguments.of((Executable) () -> ProxySettings.builder() + .username(""), "state should be: username is not empty"), + Arguments.of((Executable) () -> ProxySettings.builder() + .username(new String(byteData)), "state should be: username's length in bytes is not greater than 255"), + Arguments.of((Executable) () -> ProxySettings.builder() + .password(""), "state should be: password is not empty"), + Arguments.of((Executable) () -> ProxySettings.builder() + .password(new String(byteData)), "state should be: password's length in bytes is not greater than 255"), + Arguments.of((Executable) () -> ProxySettings.builder() + .host(null), "proxyHost can not be null"), + Arguments.of((Executable) () -> ProxySettings.builder() + .username(null), "username can not be null"), + Arguments.of((Executable) () -> ProxySettings.builder() + .password(null), "password can not be null") + ); + } + + @ParameterizedTest + @MethodSource + void shouldThrowExceptionWhenInvalidValueIsProvided(final Executable action, final String expectedMessage) { + IllegalArgumentException exception = Assertions.assertThrows(IllegalArgumentException.class, action); + Assertions.assertEquals(expectedMessage, exception.getMessage()); + } + + static Stream shouldNotThrowExceptionWhenProxySettingAreValid() { + return Stream.of( + Arguments.of(ProxySettings.builder() + .host(HOST) + .port(VALID_PORT)), + Arguments.of(ProxySettings.builder() + .host(HOST)), + Arguments.of(ProxySettings.builder() + .host(HOST) + .port(VALID_PORT) + .host(USERNAME) + .host(PASSWORD)), + Arguments.of(ProxySettings.builder() + .host(HOST) + .host(USERNAME) + .host(PASSWORD)) + ); + } + + @ParameterizedTest + @MethodSource + void shouldNotThrowExceptionWhenProxySettingAreValid(final ProxySettings.Builder builder) { + builder.build(); + } + + @Test + void shouldGetExpectedValues() { + //given + ProxySettings proxySettings = ProxySettings.builder() + .host(HOST) + .port(VALID_PORT) + .username(USERNAME) + .password(PASSWORD) + .build(); + + Assertions.assertEquals(HOST, proxySettings.getHost()); + Assertions.assertEquals(VALID_PORT, proxySettings.getPort()); + Assertions.assertEquals(USERNAME, proxySettings.getUsername()); + Assertions.assertEquals(PASSWORD, proxySettings.getPassword()); + } +} diff --git a/driver-core/src/test/unit/com/mongodb/ReadConcernConnectionStringTest.java b/driver-core/src/test/unit/com/mongodb/ReadConcernConnectionStringTest.java new file mode 100644 index 00000000000..1cb70ea3f00 --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/ReadConcernConnectionStringTest.java @@ -0,0 +1,60 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb; + +import junit.framework.TestCase; +import org.bson.BsonBoolean; +import org.bson.BsonDocument; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import util.JsonPoweredTestHelper; + +import java.util.Collection; + +// See https://github.com/mongodb/specifications/tree/master/source/read-write-concern/tests/ +@RunWith(Parameterized.class) +public class ReadConcernConnectionStringTest extends TestCase { + private final String description; + private final String input; + private final BsonDocument definition; + + public ReadConcernConnectionStringTest(@SuppressWarnings("unused") final String fileName, final String description, + final String input, final BsonDocument definition) { + this.description = description; + this.input = input; + this.definition = definition; + } + + @Test + public void shouldPassAllOutcomes() { + boolean valid = definition.getBoolean("valid", BsonBoolean.TRUE).getValue(); + try { + ConnectionString connectionString = new ConnectionString(input); + assertTrue(valid); + ReadConcern readConcern = connectionString.getReadConcern() != null ? connectionString.getReadConcern() : ReadConcern.DEFAULT; + assertEquals(definition.getDocument("readConcern"), readConcern.asDocument()); + } catch (IllegalArgumentException e) { + assertFalse(valid); + } + } + + @Parameterized.Parameters(name = "{0}: {1}") + public static Collection data() { + return JsonPoweredTestHelper.getTestData("read-write-concern/tests/connection-string/read-concern.json"); + } +} diff --git a/driver-core/src/test/unit/com/mongodb/ReadConcernDocumentTest.java b/driver-core/src/test/unit/com/mongodb/ReadConcernDocumentTest.java new file mode 100644 index 00000000000..392e691706e --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/ReadConcernDocumentTest.java @@ -0,0 +1,72 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb; + +import junit.framework.TestCase; +import org.bson.BsonBoolean; +import org.bson.BsonDocument; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import util.JsonPoweredTestHelper; + +import java.util.Collection; + +// See https://github.com/mongodb/specifications/tree/master/source/read-write-concern/tests/connection-string +@RunWith(Parameterized.class) +public class ReadConcernDocumentTest extends TestCase { + private final String description; + private final BsonDocument definition; + + public ReadConcernDocumentTest(@SuppressWarnings("unused") final String fileName, final String description, + @SuppressWarnings("unused") final String uri, final BsonDocument definition) { + this.description = description; + this.definition = definition; + } + + @Test + public void shouldPassAllOutcomes() { + boolean valid = definition.getBoolean("valid", BsonBoolean.TRUE).getValue(); + try { + ReadConcern readConcern = getReadConcern(definition.getDocument("readConcern")); + ReadConcern expectedReadConcern = getReadConcern(definition.getDocument("readConcernDocument")); + + // just a sanity check of the tests. These should be equal by definition + assertEquals(expectedReadConcern, readConcern); + assertEquals(definition.getBoolean("isServerDefault").getValue(), readConcern.isServerDefault()); + assertTrue(valid); + } catch (IllegalArgumentException e) { + assertFalse(valid); + } + } + + private ReadConcern getReadConcern(final BsonDocument readConcernDocument) { + ReadConcern readConcern; + if (readConcernDocument.containsKey("level")) { + readConcern = new ReadConcern(ReadConcernLevel.fromString(readConcernDocument.getString("level").getValue())); + } else { + readConcern = ReadConcern.DEFAULT; + } + + return readConcern; + } + + @Parameterized.Parameters(name = "{0}: {1}") + public static Collection data() { + return JsonPoweredTestHelper.getTestData("read-write-concern/tests/document/read-concern.json"); + } +} diff --git a/driver-core/src/test/unit/com/mongodb/ReadConcernLevelSpecification.groovy b/driver-core/src/test/unit/com/mongodb/ReadConcernLevelSpecification.groovy new file mode 100644 index 00000000000..3c8718aff8e --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/ReadConcernLevelSpecification.groovy @@ -0,0 +1,55 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb + +import spock.lang.Specification + +class ReadConcernLevelSpecification extends Specification { + + def 'should return the expected string value'() { + expect: + readConcernLevel.getValue() == expectedString + + where: + readConcernLevel | expectedString + ReadConcernLevel.LOCAL | 'local' + ReadConcernLevel.MAJORITY | 'majority' + ReadConcernLevel.LINEARIZABLE | 'linearizable' + ReadConcernLevel.SNAPSHOT | 'snapshot' + ReadConcernLevel.AVAILABLE | 'available' + } + + def 'should support valid string representations'() { + expect: + ReadConcernLevel.fromString(readConcernLevel) instanceof ReadConcernLevel + + where: + readConcernLevel << ['local', 'majority', 'linearizable', 'snapshot', 'available', 'LOCAL', 'MAJORITY', + 'LINEARIZABLE', 'SNAPSHOT', 'AVAILABLE'] + } + + def 'should throw an illegal Argument exception for invalid values'() { + when: + ReadConcernLevel.fromString(readConcernLevel) + + then: + thrown(IllegalArgumentException) + + where: + readConcernLevel << [null, 'pickThree'] + } +} diff --git a/driver-core/src/test/unit/com/mongodb/ReadConcernSpecification.groovy b/driver-core/src/test/unit/com/mongodb/ReadConcernSpecification.groovy new file mode 100644 index 00000000000..9b8641b50f5 --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/ReadConcernSpecification.groovy @@ -0,0 +1,66 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb + +import org.bson.BsonDocument +import spock.lang.Specification + +class ReadConcernSpecification extends Specification { + + def 'should have the expected read concern levels'() { + expect: + staticValue == expectedReadConcern + staticValue.getLevel() == expectedLevel + + where: + staticValue | expectedLevel | expectedReadConcern + ReadConcern.DEFAULT | null | new ReadConcern() + ReadConcern.LOCAL | ReadConcernLevel.LOCAL | new ReadConcern(ReadConcernLevel.LOCAL) + ReadConcern.MAJORITY | ReadConcernLevel.MAJORITY | new ReadConcern(ReadConcernLevel.MAJORITY) + ReadConcern.LINEARIZABLE | ReadConcernLevel.LINEARIZABLE | new ReadConcern(ReadConcernLevel.LINEARIZABLE) + ReadConcern.SNAPSHOT | ReadConcernLevel.SNAPSHOT | new ReadConcern(ReadConcernLevel.SNAPSHOT) + ReadConcern.AVAILABLE | ReadConcernLevel.AVAILABLE | new ReadConcern(ReadConcernLevel.AVAILABLE) + } + + def 'should create the expected Documents'() { + expect: + staticValue.asDocument() == expected + + where: + staticValue | expected + ReadConcern.DEFAULT | BsonDocument.parse('{}') + ReadConcern.LOCAL | BsonDocument.parse('{level: "local"}') + ReadConcern.MAJORITY | BsonDocument.parse('{level: "majority"}') + ReadConcern.LINEARIZABLE | BsonDocument.parse('{level: "linearizable"}') + ReadConcern.SNAPSHOT | BsonDocument.parse('{level: "snapshot"}') + ReadConcern.AVAILABLE | BsonDocument.parse('{level: "available"}') + } + + def 'should have the correct value for isServerDefault'() { + expect: + staticValue.isServerDefault() == expected + + where: + staticValue | expected + ReadConcern.DEFAULT | true + ReadConcern.LOCAL | false + ReadConcern.MAJORITY | false + ReadConcern.LINEARIZABLE | false + ReadConcern.SNAPSHOT | false + ReadConcern.AVAILABLE | false + } +} diff --git a/driver-core/src/test/unit/com/mongodb/ReadPreferenceChooseServersTest.java b/driver-core/src/test/unit/com/mongodb/ReadPreferenceChooseServersTest.java new file mode 100644 index 00000000000..c2247a212b9 --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/ReadPreferenceChooseServersTest.java @@ -0,0 +1,188 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb; + +import com.mongodb.connection.ClusterDescription; +import com.mongodb.connection.ServerDescription; +import com.mongodb.connection.ServerType; +import org.junit.Before; +import org.junit.Test; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +import static com.mongodb.connection.ClusterConnectionMode.MULTIPLE; +import static com.mongodb.connection.ClusterType.REPLICA_SET; +import static com.mongodb.connection.ServerConnectionState.CONNECTED; +import static com.mongodb.connection.ServerType.REPLICA_SET_OTHER; +import static java.util.Arrays.asList; +import static java.util.concurrent.TimeUnit.NANOSECONDS; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; + +public class ReadPreferenceChooseServersTest { + private static final int FOUR_MEG = 4 * 1024 * 1024; + private static final String HOST = "localhost"; + + private ServerDescription primary, secondary, otherSecondary; + private ClusterDescription set; + private ClusterDescription setNoSecondary; + private ClusterDescription setNoPrimary; + + @Before + public void setUp() throws IOException { + TagSet tags1 = new TagSet(asList(new Tag("foo", "1"), new Tag("bar", "2"), new Tag("baz", "1"))); + TagSet tags2 = new TagSet(asList(new Tag("foo", "1"), new Tag("bar", "2"), new Tag("baz", "2"))); + TagSet tags3 = new TagSet(asList(new Tag("foo", "1"), new Tag("bar", "2"), new Tag("baz", "3"))); + + long acceptableLatencyMS = 15; + long bestRoundTripTime = 50; + long acceptableRoundTripTime = bestRoundTripTime + (acceptableLatencyMS / 2); + long unacceptableRoundTripTime = bestRoundTripTime + acceptableLatencyMS + 1; + + primary = ServerDescription.builder().state(CONNECTED).address(new ServerAddress(HOST, 27017)) + .roundTripTime(acceptableRoundTripTime * 1000000L, NANOSECONDS) + .ok(true) + .type(ServerType.REPLICA_SET_PRIMARY) + .tagSet(tags1) + .maxDocumentSize(FOUR_MEG).build(); + + secondary = ServerDescription.builder().state(CONNECTED).address(new ServerAddress(HOST, 27018)) + .roundTripTime(bestRoundTripTime * 1000000L, NANOSECONDS) + .ok(true) + .type(ServerType.REPLICA_SET_SECONDARY) + .tagSet(tags2) + .maxDocumentSize(FOUR_MEG).build(); + + otherSecondary = ServerDescription.builder().state(CONNECTED).address(new ServerAddress(HOST, 27019)) + .roundTripTime(unacceptableRoundTripTime * 1000000L, NANOSECONDS) + .ok(true) + .type(ServerType.REPLICA_SET_SECONDARY) + .tagSet(tags3) + .maxDocumentSize(FOUR_MEG) + .build(); + ServerDescription uninitiatedMember = ServerDescription.builder().state(CONNECTED).address(new ServerAddress(HOST, 27020)) + .roundTripTime(unacceptableRoundTripTime * 1000000L, NANOSECONDS) + .ok(true) + .type(REPLICA_SET_OTHER) + .maxDocumentSize(FOUR_MEG) + .build(); + + List nodeList = new ArrayList<>(); + nodeList.add(primary); + nodeList.add(secondary); + nodeList.add(otherSecondary); + nodeList.add(uninitiatedMember); + + set = new ClusterDescription(MULTIPLE, REPLICA_SET, nodeList); + setNoPrimary = new ClusterDescription(MULTIPLE, REPLICA_SET, asList(secondary, otherSecondary)); + setNoSecondary = new ClusterDescription(MULTIPLE, REPLICA_SET, asList(primary, uninitiatedMember)); + } + + + @Test + public void testPrimaryReadPreference() { + assertEquals(1, ReadPreference.primary().choose(set).size()); + assertEquals(primary, ReadPreference.primary().choose(set).get(0)); + assertTrue(ReadPreference.primary().choose(setNoPrimary).isEmpty()); + } + + @Test + public void testSecondaryReadPreference() { + TaggableReadPreference pref = (TaggableReadPreference) ReadPreference.secondary(); + List candidates = pref.choose(set); + assertEquals(2, candidates.size()); + assertTrue(candidates.contains(secondary)); + assertTrue(candidates.contains(otherSecondary)); + + List tagSetList = asList(new TagSet(new Tag("foo", "1")), new TagSet(new Tag("bar", "2"))); + pref = ReadPreference.secondary(tagSetList); + assertEquals(tagSetList, pref.getTagSetList()); + + pref = ReadPreference.secondary(new TagSet(new Tag("baz", "1"))); + assertTrue(pref.choose(set).isEmpty()); + + pref = ReadPreference.secondary(new TagSet(new Tag("baz", "2"))); + candidates = pref.choose(set); + assertEquals(1, candidates.size()); + assertTrue(candidates.contains(secondary)); + + pref = ReadPreference.secondary(new TagSet(new Tag("unknown", "1"))); + assertTrue(pref.choose(set).isEmpty()); + + pref = ReadPreference.secondary(asList(new TagSet(new Tag("unknown", "1")), new TagSet(new Tag("baz", "2")))); + candidates = pref.choose(set); + assertEquals(1, candidates.size()); + assertTrue(candidates.contains(secondary)); + } + + @Test + public void testPrimaryPreferredMode() { + ReadPreference pref = ReadPreference.primaryPreferred(); + List candidates = pref.choose(set); + assertEquals(1, candidates.size()); + assertEquals(primary, candidates.get(0)); + + candidates = pref.choose(setNoPrimary); + assertEquals(2, candidates.size()); + assertTrue(candidates.contains(secondary)); + assertTrue(candidates.contains(otherSecondary)); + + pref = ReadPreference.primaryPreferred(new TagSet(new Tag("baz", "2"))); + assertEquals(1, pref.choose(set).size()); + assertEquals(primary, pref.choose(set).get(0)); + assertEquals(1, pref.choose(setNoPrimary).size()); + assertEquals(secondary, pref.choose(setNoPrimary).get(0)); + } + + @Test + public void testSecondaryPreferredMode() { + ReadPreference pref = ReadPreference.secondary(new TagSet(new Tag("baz", "2"))); + List candidates = pref.choose(set); + assertEquals(1, candidates.size()); + assertTrue(candidates.contains(secondary)); + + // test that the primary is returned if no secondaries match the tag + pref = ReadPreference.secondaryPreferred(new TagSet(new Tag("unknown", "2"))); + assertEquals(pref.choose(set).get(0), primary); + + pref = ReadPreference.secondaryPreferred(); + candidates = pref.choose(set); + assertEquals(2, candidates.size()); + assertTrue(candidates.contains(secondary)); + assertTrue(candidates.contains(otherSecondary)); + + assertTrue(ReadPreference.secondaryPreferred().choose(setNoSecondary).contains(primary)); + } + + @Test + public void testNearestMode() { + ReadPreference pref = ReadPreference.nearest(); + assertNotNull(pref.choose(set)); + + pref = ReadPreference.nearest(new TagSet(new Tag("baz", "1"))); + assertEquals(pref.choose(set).get(0), primary); + + pref = ReadPreference.nearest(new TagSet(new Tag("baz", "2"))); + assertEquals(pref.choose(set).get(0), secondary); + + pref = ReadPreference.nearest(new TagSet(new Tag("unknown", "2"))); + assertTrue(pref.choose(set).isEmpty()); + } +} diff --git a/driver-core/src/test/unit/com/mongodb/ReadPreferenceSpecification.groovy b/driver-core/src/test/unit/com/mongodb/ReadPreferenceSpecification.groovy new file mode 100644 index 00000000000..e196c82b83a --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/ReadPreferenceSpecification.groovy @@ -0,0 +1,337 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb + +import org.bson.BsonArray +import org.bson.BsonDocument +import org.bson.BsonString +import spock.lang.Specification + +import static java.util.Arrays.asList +import static java.util.Collections.emptyList +import static java.util.concurrent.TimeUnit.MILLISECONDS +import static java.util.concurrent.TimeUnit.SECONDS +import static org.bson.BsonDocument.parse + +@SuppressWarnings(['DuplicateMapLiteral', 'LineLength']) +class ReadPreferenceSpecification extends Specification { + + def 'should have correct names'() { + expect: + readPreference.getName() == name + + where: + readPreference | name + ReadPreference.primary() | 'primary' + ReadPreference.primaryPreferred() | 'primaryPreferred' + ReadPreference.secondary() | 'secondary' + ReadPreference.secondaryPreferred() | 'secondaryPreferred' + ReadPreference.nearest() | 'nearest' + } + + static final TAG_SET = new TagSet(new Tag('rack', '1')) + static final TAG_SET_LIST = [TAG_SET] + static final HEDGE_OPTIONS = ReadPreferenceHedgeOptions.builder().enabled(true).build() + + def 'should have correct max staleness and tag set list'() { + given: + + expect: + ((TaggableReadPreference) readPreference).getMaxStaleness(MILLISECONDS) == (Long) maxStalenessMS + ((TaggableReadPreference) readPreference).getTagSetList() == tagSetList + ((TaggableReadPreference) readPreference).getHedgeOptions() == hedgeOptions + + where: + readPreference | maxStalenessMS | tagSetList | hedgeOptions + ReadPreference.primaryPreferred() | null | emptyList() | null + ReadPreference.secondary() | null | emptyList() | null + ReadPreference.secondaryPreferred() | null | emptyList() | null + ReadPreference.nearest() | null | emptyList() | null + ReadPreference.secondary(10, SECONDS) | 10000 | emptyList() | null + ReadPreference.secondaryPreferred(10, SECONDS) | 10000 | emptyList() | null + ReadPreference.primaryPreferred(10, SECONDS) | 10000 | emptyList() | null + ReadPreference.nearest(10, SECONDS) | 10000 | emptyList() | null + ReadPreference.secondary(TAG_SET, 10, SECONDS) | 10000 | TAG_SET_LIST | null + ReadPreference.secondaryPreferred(TAG_SET, 10, SECONDS) | 10000 | TAG_SET_LIST | null + ReadPreference.primaryPreferred(TAG_SET, 10, SECONDS) | 10000 | TAG_SET_LIST | null + ReadPreference.nearest(TAG_SET, 10, SECONDS) | 10000 | TAG_SET_LIST | null + ReadPreference.secondary(TAG_SET_LIST, 10, SECONDS) | 10000 | TAG_SET_LIST | null + ReadPreference.secondaryPreferred(TAG_SET_LIST, 10, SECONDS) | 10000 | TAG_SET_LIST | null + ReadPreference.primaryPreferred(TAG_SET_LIST, 10, SECONDS) | 10000 | TAG_SET_LIST | null + ReadPreference.nearest(TAG_SET_LIST, 10, SECONDS) | 10000 | TAG_SET_LIST | null + + ReadPreference.secondary().withMaxStalenessMS(10, SECONDS) | 10000 | emptyList() | null + ReadPreference.secondaryPreferred() + .withMaxStalenessMS(10, SECONDS) | 10000 | emptyList() | null + ReadPreference.primaryPreferred() + .withMaxStalenessMS(10, SECONDS) | 10000 | emptyList() | null + ReadPreference.nearest().withMaxStalenessMS(10, SECONDS) | 10000 | emptyList() | null + ReadPreference.secondary().withHedgeOptions(HEDGE_OPTIONS) | null | emptyList() | HEDGE_OPTIONS + ReadPreference.secondaryPreferred() + .withHedgeOptions(HEDGE_OPTIONS) | null | emptyList() | HEDGE_OPTIONS + ReadPreference.primaryPreferred() + .withHedgeOptions(HEDGE_OPTIONS) | null | emptyList() | HEDGE_OPTIONS + ReadPreference.nearest().withHedgeOptions(HEDGE_OPTIONS) | null | emptyList() | HEDGE_OPTIONS + ReadPreference.secondary().withTagSet(TAG_SET) + .withMaxStalenessMS(10, SECONDS) + .withHedgeOptions(HEDGE_OPTIONS) | 10000 | TAG_SET_LIST | HEDGE_OPTIONS + ReadPreference.secondaryPreferred().withTagSet(TAG_SET) + .withMaxStalenessMS(10, SECONDS) + .withHedgeOptions(HEDGE_OPTIONS) | 10000 | TAG_SET_LIST | HEDGE_OPTIONS + ReadPreference.primaryPreferred().withTagSet(TAG_SET) + .withMaxStalenessMS(10, SECONDS) + .withHedgeOptions(HEDGE_OPTIONS) | 10000 | TAG_SET_LIST | HEDGE_OPTIONS + ReadPreference.nearest().withTagSet(TAG_SET) + .withMaxStalenessMS(10, SECONDS) + .withHedgeOptions(HEDGE_OPTIONS) | 10000 | TAG_SET_LIST | HEDGE_OPTIONS + } + + def 'should throw if max staleness is negative'() { + when: + ReadPreference.secondary(-1, SECONDS) + + then: + thrown(IllegalArgumentException) + + when: + ReadPreference.secondary().withMaxStalenessMS(-1, SECONDS) + + then: + thrown(IllegalArgumentException) + } + + def 'should have correct valueOf'() { + expect: + ReadPreference.primary() == ReadPreference.valueOf('primary') + ReadPreference.secondary() == ReadPreference.valueOf('secondary') + ReadPreference.primaryPreferred() == ReadPreference.valueOf('primaryPreferred') + ReadPreference.secondaryPreferred() == ReadPreference.valueOf('secondaryPreferred') + ReadPreference.nearest() == ReadPreference.valueOf('nearest') + } + + def 'valueOf should throw with null name'() { + when: + ReadPreference.valueOf(null) + + then: + thrown(IllegalArgumentException) + + when: + ReadPreference.valueOf(null, asList(new TagSet(new Tag('dc', 'ny')))) + + then: + thrown(IllegalArgumentException) + } + + def 'valueOf should throw with unexpected name'() { + when: + ReadPreference.valueOf('unknown') + + then: + thrown(IllegalArgumentException) + + when: + ReadPreference.valueOf(null, asList(new TagSet(new Tag('dc', 'ny')))) + + then: + thrown(IllegalArgumentException) + + when: + ReadPreference.valueOf('primary', asList(new TagSet(new Tag('dc', 'ny')))) + + then: + thrown(IllegalArgumentException) + } + + def 'should have correct valueOf with tag set list'() { + def tags = [new TagSet([new Tag('dy', 'ny'), new Tag('rack', '1')]), new TagSet([new Tag('dy', 'ca'), new Tag('rack', '2')])] + + expect: + ReadPreference.secondary(tags) == ReadPreference.valueOf('secondary', tags) + ReadPreference.primaryPreferred(tags) == ReadPreference.valueOf('primaryPreferred', tags) + ReadPreference.secondaryPreferred(tags) == ReadPreference.valueOf('secondaryPreferred', tags) + ReadPreference.nearest(tags) == ReadPreference.valueOf('nearest', tags) + } + + def 'should have correct valueOf with max staleness'() { + expect: + ReadPreference.secondary(10, SECONDS) == ReadPreference.valueOf('secondary', [], 10, SECONDS) + } + + def 'should convert read preference with max staleness to correct documents'() { + expect: + readPreference.toDocument() == document + + where: + readPreference | document + ReadPreference.primaryPreferred(10, SECONDS) | parse('{mode : "primaryPreferred", maxStalenessSeconds : {$numberLong : "10" }}}') + ReadPreference.secondary(10, SECONDS) | parse('{mode : "secondary", maxStalenessSeconds : {$numberLong : "10" }}}') + ReadPreference.secondaryPreferred(10, SECONDS) | parse('{mode : "secondaryPreferred", maxStalenessSeconds : {$numberLong : "10" }}}') + ReadPreference.nearest(10, SECONDS) | parse('{mode : "nearest", maxStalenessSeconds : {$numberLong : "10" }}}') + ReadPreference.nearest(10005, MILLISECONDS) | parse('{mode : "nearest", maxStalenessSeconds : {$numberLong : "10" }}}') + + ReadPreference.primaryPreferred() + .withMaxStalenessMS(10, SECONDS) | parse('{mode : "primaryPreferred", maxStalenessSeconds : {$numberLong : "10" }}}') + ReadPreference.secondary() + .withMaxStalenessMS(10, SECONDS) | parse('{mode : "secondary", maxStalenessSeconds : {$numberLong : "10" }}}') + ReadPreference.secondaryPreferred() + .withMaxStalenessMS(10, SECONDS) | parse('{mode : "secondaryPreferred", maxStalenessSeconds : {$numberLong : "10" }}}') + ReadPreference.nearest() + .withMaxStalenessMS(10, SECONDS) | parse('{mode : "nearest", maxStalenessSeconds : {$numberLong : "10" }}}') + ReadPreference.nearest() + .withMaxStalenessMS(10005, MILLISECONDS) | parse('{mode : "nearest", maxStalenessSeconds : {$numberLong : "10" }}}') + } + + def 'should convert read preference with hedge options to correct documents'() { + expect: + readPreference.toDocument() == document + + where: + readPreference | document + ReadPreference.primaryPreferred() + .withHedgeOptions(HEDGE_OPTIONS) | parse('{mode : "primaryPreferred", hedge : { enabled : true }}}') + ReadPreference.secondary() + .withHedgeOptions(HEDGE_OPTIONS) | parse('{mode : "secondary", hedge : { enabled : true }}}') + ReadPreference.secondaryPreferred() + .withHedgeOptions(HEDGE_OPTIONS) | parse('{mode : "secondaryPreferred", hedge : { enabled : true }}}') + ReadPreference.nearest() + .withHedgeOptions(HEDGE_OPTIONS) | parse('{mode : "nearest", hedge : { enabled : true }}}') + } + + def 'should convert read preferences with a single tag set to correct documents'() { + expect: + readPreference.toDocument() == document + + where: + readPreference | document + ReadPreference.primary() | new BsonDocument('mode', new BsonString('primary')) + ReadPreference.primaryPreferred() | new BsonDocument('mode', new BsonString('primaryPreferred')) + ReadPreference.secondary() | new BsonDocument('mode', new BsonString('secondary')) + ReadPreference.secondaryPreferred() | new BsonDocument('mode', new BsonString('secondaryPreferred')) + ReadPreference.nearest() | new BsonDocument('mode', new BsonString('nearest')) + ReadPreference.primaryPreferred( + new TagSet([new Tag('dc', 'ny'), new Tag('rack', '1')])) | new BsonDocument('mode', new BsonString('primaryPreferred')) + .append('tags', new BsonArray([new BsonDocument('dc', new BsonString('ny')).append('rack', new BsonString('1'))])) + + ReadPreference.secondary( + new TagSet([new Tag('dc', 'ny'), new Tag('rack', '1')])) | new BsonDocument('mode', new BsonString('secondary')) + .append('tags', new BsonArray([new BsonDocument('dc', new BsonString('ny')).append('rack', new BsonString('1'))])) + + ReadPreference.secondaryPreferred( + new TagSet([new Tag('dc', 'ny'), new Tag('rack', '1')])) | new BsonDocument('mode', new BsonString('secondaryPreferred')) + .append('tags', new BsonArray([new BsonDocument('dc', new BsonString('ny')).append('rack', new BsonString('1'))])) + + ReadPreference.nearest( + new TagSet([new Tag('dc', 'ny'), new Tag('rack', '1')])) | new BsonDocument('mode', new BsonString('nearest')) + .append('tags', new BsonArray([new BsonDocument('dc', new BsonString('ny')).append('rack', new BsonString('1'))])) + + ReadPreference.nearest( + new TagSet([new Tag('dc', 'ny'), new Tag('rack', '1')])) | new BsonDocument('mode', new BsonString('nearest')) + .append('tags', new BsonArray([new BsonDocument('dc', new BsonString('ny')).append('rack', new BsonString('1'))])) + + ReadPreference.primaryPreferred().withTagSet( + new TagSet([new Tag('dc', 'ny'), new Tag('rack', '1')])) | new BsonDocument('mode', new BsonString('primaryPreferred')) + .append('tags', new BsonArray([new BsonDocument('dc', new BsonString('ny')).append('rack', new BsonString('1'))])) + + ReadPreference.secondary().withTagSet( + new TagSet([new Tag('dc', 'ny'), new Tag('rack', '1')])) | new BsonDocument('mode', new BsonString('secondary')) + .append('tags', new BsonArray([new BsonDocument('dc', new BsonString('ny')).append('rack', new BsonString('1'))])) + + ReadPreference.secondaryPreferred().withTagSet( + new TagSet([new Tag('dc', 'ny'), new Tag('rack', '1')])) | new BsonDocument('mode', new BsonString('secondaryPreferred')) + .append('tags', new BsonArray([new BsonDocument('dc', new BsonString('ny')).append('rack', new BsonString('1'))])) + + ReadPreference.nearest().withTagSet( + new TagSet([new Tag('dc', 'ny'), new Tag('rack', '1')])) | new BsonDocument('mode', new BsonString('nearest')) + .append('tags', new BsonArray([new BsonDocument('dc', new BsonString('ny')).append('rack', new BsonString('1'))])) + + ReadPreference.nearest().withTagSet( + new TagSet([new Tag('dc', 'ny'), new Tag('rack', '1')])) | new BsonDocument('mode', new BsonString('nearest')) + .append('tags', new BsonArray([new BsonDocument('dc', new BsonString('ny')).append('rack', new BsonString('1'))])) + } + + + def 'should convert read preferences with a tag set list to correct documents'() { + expect: + readPreference.toDocument() == document + + where: + readPreference | document + ReadPreference.primary() | new BsonDocument('mode', new BsonString('primary')) + ReadPreference.primaryPreferred() | new BsonDocument('mode', new BsonString('primaryPreferred')) + ReadPreference.secondary() | new BsonDocument('mode', new BsonString('secondary')) + ReadPreference.secondaryPreferred() | new BsonDocument('mode', new BsonString('secondaryPreferred')) + ReadPreference.nearest() | new BsonDocument('mode', new BsonString('nearest')) + ReadPreference.primaryPreferred( + [new TagSet([new Tag('dc', 'ny'), new Tag('rack', '1')])]) | new BsonDocument('mode', new BsonString('primaryPreferred')) + .append('tags', new BsonArray([new BsonDocument('dc', new BsonString('ny')).append('rack', new BsonString('1'))])) + + ReadPreference.secondary( + [new TagSet([new Tag('dc', 'ny'), new Tag('rack', '1')])]) | new BsonDocument('mode', new BsonString('secondary')) + .append('tags', new BsonArray([new BsonDocument('dc', new BsonString('ny')).append('rack', new BsonString('1'))])) + + ReadPreference.secondaryPreferred( + [new TagSet([new Tag('dc', 'ny'), new Tag('rack', '1')])]) | new BsonDocument('mode', new BsonString('secondaryPreferred')) + .append('tags', new BsonArray([new BsonDocument('dc', new BsonString('ny')).append('rack', new BsonString('1'))])) + + ReadPreference.nearest( + [new TagSet([new Tag('dc', 'ny'), new Tag('rack', '1')])]) | new BsonDocument('mode', new BsonString('nearest')) + .append('tags', new BsonArray([new BsonDocument('dc', new BsonString('ny')).append('rack', new BsonString('1'))])) + + ReadPreference.nearest( + [new TagSet([new Tag('dc', 'ny'), new Tag('rack', '1')]), + new TagSet([new Tag('dc', 'ca'), new Tag('rack', '2')])]) | new BsonDocument('mode', new BsonString('nearest')) + .append('tags', new BsonArray([new BsonDocument('dc', new BsonString('ny')).append('rack', new BsonString('1')), + new BsonDocument('dc', new BsonString('ca')).append('rack', new BsonString('2'))])) + + ReadPreference.primaryPreferred().withTagSetList( + [new TagSet([new Tag('dc', 'ny'), new Tag('rack', '1')])]) | new BsonDocument('mode', new BsonString('primaryPreferred')) + .append('tags', new BsonArray([new BsonDocument('dc', new BsonString('ny')).append('rack', new BsonString('1'))])) + + ReadPreference.secondary().withTagSetList( + [new TagSet([new Tag('dc', 'ny'), new Tag('rack', '1')])]) | new BsonDocument('mode', new BsonString('secondary')) + .append('tags', new BsonArray([new BsonDocument('dc', new BsonString('ny')).append('rack', new BsonString('1'))])) + + ReadPreference.secondaryPreferred().withTagSetList( + [new TagSet([new Tag('dc', 'ny'), new Tag('rack', '1')])]) | new BsonDocument('mode', new BsonString('secondaryPreferred')) + .append('tags', new BsonArray([new BsonDocument('dc', new BsonString('ny')).append('rack', new BsonString('1'))])) + + ReadPreference.nearest().withTagSetList( + [new TagSet([new Tag('dc', 'ny'), new Tag('rack', '1')])]) | new BsonDocument('mode', new BsonString('nearest')) + .append('tags', new BsonArray([new BsonDocument('dc', new BsonString('ny')).append('rack', new BsonString('1'))])) + + ReadPreference.nearest().withTagSetList( + [new TagSet([new Tag('dc', 'ny'), new Tag('rack', '1')]), + new TagSet([new Tag('dc', 'ca'), new Tag('rack', '2')])]) | new BsonDocument('mode', new BsonString('nearest')) + .append('tags', new BsonArray([new BsonDocument('dc', new BsonString('ny')).append('rack', new BsonString('1')), + new BsonDocument('dc', new BsonString('ca')).append('rack', new BsonString('2'))])) + } + + def 'different read preferences should have different hash codes'() { + expect: + first.hashCode() != second.hashCode() + + where: + first | second + ReadPreference.primary() | ReadPreference.secondary() + ReadPreference.secondary() | ReadPreference.nearest() + ReadPreference.secondary() | ReadPreference.secondary([new TagSet([new Tag('dc', 'ny')])]) + ReadPreference.secondary([new TagSet([new Tag('dc', 'ny')])]) | ReadPreference.secondary([new TagSet([new Tag('dc', 'la')])]) + ReadPreference.secondary() | ReadPreference.secondary(1000, MILLISECONDS) + ReadPreference.secondary().withHedgeOptions(HEDGE_OPTIONS) | ReadPreference.secondary() + } +} diff --git a/driver-core/src/test/unit/com/mongodb/ServerAddressSpecification.groovy b/driver-core/src/test/unit/com/mongodb/ServerAddressSpecification.groovy new file mode 100644 index 00000000000..d4194563d65 --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/ServerAddressSpecification.groovy @@ -0,0 +1,97 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb + +import spock.lang.Specification +import spock.lang.Unroll + +class ServerAddressSpecification extends Specification { + + @Unroll + def 'constructors should parse hostname and port correctly'() { + expect: + address.getHost() == host + address.getPort() == port + address == new ServerAddress(host, port) + + where: + address | host | port + new ServerAddress() | ServerAddress.defaultHost() | ServerAddress.defaultPort() + new ServerAddress('10.0.0.1:1000') | '10.0.0.1' | 1000 + new ServerAddress('10.0.0.1') | '10.0.0.1' | ServerAddress.defaultPort() + new ServerAddress('10.0.0.1', 1000) | '10.0.0.1' | 1000 + new ServerAddress('somewhere') | 'somewhere' | ServerAddress.defaultPort() + new ServerAddress('SOMEWHERE') | 'somewhere' | ServerAddress.defaultPort() + new ServerAddress('somewhere:1000') | 'somewhere' | 1000 + new ServerAddress('somewhere', 1000) | 'somewhere' | 1000 + new ServerAddress('[2010:836B:4179::836B:4179]') | '2010:836b:4179::836b:4179' | ServerAddress.defaultPort() + new ServerAddress('[2010:836B:4179::836B:4179]:1000') | '2010:836b:4179::836b:4179' | 1000 + new ServerAddress('[2010:836B:4179::836B:4179]', 1000) | '2010:836b:4179::836b:4179' | 1000 + new ServerAddress('2010:836B:4179::836B:4179') | '2010:836b:4179::836b:4179' | ServerAddress.defaultPort() + new ServerAddress('2010:836B:4179::836B:4179', 1000) | '2010:836b:4179::836b:4179' | 1000 + } + + def 'ipv4 host with a port specified should throw when a port is also specified as an argument'() { + when: + new ServerAddress('10.0.0.1:80', 80) + then: + thrown(IllegalArgumentException) + + when: + new ServerAddress('10.0.0.1:1000', 80) + then: + thrown(IllegalArgumentException) + } + + def 'ipv6 host with a port specified should throw when a port is also specified as an argument'() { + when: + new ServerAddress('[2010:836B:4179::836B:4179]:80', 80) + then: + thrown(IllegalArgumentException) + + when: + new ServerAddress('[2010:836B:4179::836B:4179]:1000', 80) + then: + thrown(IllegalArgumentException) + } + + def 'ipv6 host should throw when terminating ] is not specified'() { + when: + new ServerAddress('[2010:836B:4179::836B:4179') + then: + thrown(IllegalArgumentException) + } + + def 'hostname with a port specified should throw when a port is also specified as an argument'() { + when: + new ServerAddress('somewhere:80', 80) + then: + thrown(IllegalArgumentException) + + when: + new ServerAddress('somewhere:1000', 80) + then: + thrown(IllegalArgumentException) + } + + def 'uri missing port should throw an exception'() { + when: + new ServerAddress('mongodb://somewhere/') + then: + thrown(MongoException) + } +} diff --git a/driver-core/src/test/unit/com/mongodb/TagSetSpecification.groovy b/driver-core/src/test/unit/com/mongodb/TagSetSpecification.groovy new file mode 100644 index 00000000000..f92fee847f9 --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/TagSetSpecification.groovy @@ -0,0 +1,100 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb + +import spock.lang.Specification + +import static java.util.Arrays.asList + + +class TagSetSpecification extends Specification { + + def 'should iterate an empty tag set'() { + when: + def tagSet = new TagSet() + + then: + !tagSet.iterator().hasNext() + } + + def 'should iterate a tag set with a single tag'() { + def tag = new Tag('dc', 'ny') + when: + def tagSet = new TagSet(tag) + def iterator = tagSet.iterator() + then: + iterator.hasNext() + iterator.next() == tag + !iterator.hasNext() + } + + def 'should iterate a tag set with multiple tags'() { + def tagOne = new Tag('dc', 'ny') + def tagTwo = new Tag('rack', '1') + when: + def tagSet = new TagSet(asList(tagOne, tagTwo)) + def iterator = tagSet.iterator() + then: + iterator.hasNext() + iterator.next() == tagOne + iterator.hasNext() + iterator.next() == tagTwo + !iterator.hasNext() + } + + def 'should throw on null argument'() { + when: + new TagSet((Tag) null) + + then: + thrown(IllegalArgumentException) + + when: + new TagSet((List) null) + + then: + thrown(IllegalArgumentException) + + when: + new TagSet([new Tag('dc', 'ny'), null]) + + then: + thrown(IllegalArgumentException) + } + + def 'should throw on duplicate tag name'() { + when: + new TagSet([new Tag('dc', 'ny'), new Tag('dc', 'ca')]) + + then: + thrown(IllegalArgumentException) + } + + def 'should alphabetically order tags'() { + when: + def pTag = new Tag('p', '1') + def dcTag = new Tag('dc', 'ny') + def tagSet = new TagSet([pTag, dcTag]) + def iter = tagSet.iterator() + + then: + iter.next() == dcTag + iter.next() == pTag + !iter.hasNext() + tagSet == new TagSet([dcTag, pTag]) + } +} diff --git a/driver-core/src/test/unit/com/mongodb/TransactionOptionsSpecification.groovy b/driver-core/src/test/unit/com/mongodb/TransactionOptionsSpecification.groovy new file mode 100644 index 00000000000..5b3f35f42f1 --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/TransactionOptionsSpecification.groovy @@ -0,0 +1,95 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb + +import spock.lang.Specification + +import java.util.concurrent.TimeUnit + +class TransactionOptionsSpecification extends Specification { + def 'should have correct defaults'() { + when: + def options = TransactionOptions.builder().build() + + then: + options.getReadConcern() == null + options.getWriteConcern() == null + options.getReadPreference() == null + options.getMaxCommitTime(TimeUnit.MILLISECONDS) == null + } + + def 'should throw an exception if the timeout is invalid'() { + given: + def builder = TransactionOptions.builder() + + + when: + builder.timeout(500, TimeUnit.NANOSECONDS) + + then: + thrown(IllegalArgumentException) + + when: + builder.timeout(-1, TimeUnit.SECONDS).build() + + then: + thrown(IllegalArgumentException) + } + + def 'should apply options set in builder'() { + when: + def options = TransactionOptions.builder() + .readConcern(ReadConcern.LOCAL) + .writeConcern(WriteConcern.JOURNALED) + .readPreference(ReadPreference.secondary()) + .maxCommitTime(5, TimeUnit.SECONDS) + .timeout(null, TimeUnit.MILLISECONDS) + .build() + + then: + options.readConcern == ReadConcern.LOCAL + options.writeConcern == WriteConcern.JOURNALED + options.readPreference == ReadPreference.secondary() + options.getMaxCommitTime(TimeUnit.MILLISECONDS) == 5000 + options.getMaxCommitTime(TimeUnit.SECONDS) == 5 + options.getTimeout(TimeUnit.MILLISECONDS) == null + } + + def 'should merge'() { + given: + def first = TransactionOptions.builder().build() + def second = TransactionOptions.builder().readConcern(ReadConcern.MAJORITY) + .writeConcern(WriteConcern.MAJORITY) + .readPreference(ReadPreference.secondary()) + .maxCommitTime(5, TimeUnit.SECONDS) + .timeout(123, TimeUnit.MILLISECONDS) + .build() + def third = TransactionOptions.builder() + .readConcern(ReadConcern.LOCAL) + .writeConcern(WriteConcern.W2) + .readPreference(ReadPreference.nearest()) + .maxCommitTime(10, TimeUnit.SECONDS) + .timeout(123, TimeUnit.MILLISECONDS) + .build() + + expect: + TransactionOptions.merge(first, second) == second + TransactionOptions.merge(second, first) == second + TransactionOptions.merge(second, third) == second + TransactionOptions.merge(third, second) == third + } +} diff --git a/driver-core/src/test/unit/com/mongodb/UnixServerAddressSpecification.groovy b/driver-core/src/test/unit/com/mongodb/UnixServerAddressSpecification.groovy new file mode 100644 index 00000000000..b8e18198eb2 --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/UnixServerAddressSpecification.groovy @@ -0,0 +1,39 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb + + +import spock.lang.Specification + +class UnixServerAddressSpecification extends Specification { + + def 'should return the path for the host'() { + when: + def path = '/tmp/mongodb.sock' + + then: + new UnixServerAddress(path).getHost() == path + } + + def 'should throw if the path does not end with .sock'() { + when: + new UnixServerAddress('localhost') + + then: + thrown(IllegalArgumentException) + } +} diff --git a/driver-core/src/test/unit/com/mongodb/UriOptionsTest.java b/driver-core/src/test/unit/com/mongodb/UriOptionsTest.java new file mode 100644 index 00000000000..736e3f5d201 --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/UriOptionsTest.java @@ -0,0 +1,61 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb; + +import org.bson.BsonBoolean; +import org.bson.BsonDocument; +import org.junit.Test; +import org.junit.runners.Parameterized; +import util.JsonPoweredTestHelper; + +import java.util.Collection; + +import static org.junit.Assume.assumeFalse; + +// See https://github.com/mongodb/specifications/tree/master/source/uri-options/tests +public class UriOptionsTest extends AbstractConnectionStringTest { + public UriOptionsTest(final String filename, final String description, final String input, final BsonDocument definition) { + super(filename, description, input, definition); + } + + @Test + public void shouldPassAllOutcomes() { + assumeFalse(getDefinition().getBoolean("warning", BsonBoolean.FALSE).getValue()); + assumeFalse(getDescription().equals("Arbitrary string readConcernLevel does not cause a warning")); + // Skip because Java driver does not support the tlsAllowInvalidCertificates & tlsInsecure options + assumeFalse(getDescription().startsWith("tlsInsecure and tlsAllowInvalidCertificates both present")); + assumeFalse(getDescription().startsWith("tlsAllowInvalidCertificates and tlsInsecure both present")); + // Skip because Java driver does not support the tlsDisableCertificateRevocationCheck option + assumeFalse(getDescription().contains("tlsDisableCertificateRevocationCheck")); + // Skip because Java driver does not support the tlsDisableOCSPEndpointCheck option + assumeFalse(getDescription().contains("tlsDisableOCSPEndpointCheck")); + + // No CANONICALIZE_HOST_NAME support https://jira.mongodb.org/browse/JAVA-4278 + assumeFalse(getDescription().equals("Valid auth options are parsed correctly (GSSAPI)")); + + if (getDefinition().getBoolean("valid", BsonBoolean.TRUE).getValue()) { + testValidOptions(); + } else { + testInvalidUris(); + } + } + + @Parameterized.Parameters(name = "{1}") + public static Collection data() { + return JsonPoweredTestHelper.getTestData("uri-options"); + } +} diff --git a/driver-core/src/test/unit/com/mongodb/WriteConcernConnectionStringTest.java b/driver-core/src/test/unit/com/mongodb/WriteConcernConnectionStringTest.java new file mode 100644 index 00000000000..503cac0a5e9 --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/WriteConcernConnectionStringTest.java @@ -0,0 +1,91 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb; + +import junit.framework.TestCase; +import org.bson.BsonBoolean; +import org.bson.BsonDocument; +import org.bson.BsonInt32; +import org.bson.BsonNumber; +import org.bson.BsonString; +import org.bson.BsonValue; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import util.JsonPoweredTestHelper; + +import java.util.Collection; +import java.util.concurrent.TimeUnit; + +// See https://github.com/mongodb/specifications/tree/master/source/read-write-concern/tests/connection-string +@RunWith(Parameterized.class) +public class WriteConcernConnectionStringTest extends TestCase { + private final String description; + private final String input; + private final BsonDocument definition; + + public WriteConcernConnectionStringTest(@SuppressWarnings("unused") final String fileName, final String description, + final String input, final BsonDocument definition) { + this.description = description; + this.input = input; + this.definition = definition; + } + + @Test + public void shouldPassAllOutcomes() { + boolean valid = definition.getBoolean("valid", BsonBoolean.TRUE).getValue(); + try { + ConnectionString connectionString = new ConnectionString(input); + WriteConcern writeConcern = connectionString.getWriteConcern() != null + ? connectionString.getWriteConcern() + : WriteConcern.ACKNOWLEDGED; + assertTrue(valid); + assertEquals(getExpectedWriteConcern(), writeConcern); + } catch (IllegalArgumentException e) { + assertFalse(valid); + } + } + + private WriteConcern getExpectedWriteConcern() { + BsonDocument writeConcernDocument = definition.getDocument("writeConcern"); + + BsonValue wValue = writeConcernDocument.get("w"); + WriteConcern retVal; + if (wValue == null) { + retVal = WriteConcern.ACKNOWLEDGED; + } else if (wValue instanceof BsonNumber) { + retVal = new WriteConcern(wValue.asNumber().intValue()); + } else if (wValue instanceof BsonString) { + retVal = new WriteConcern(wValue.asString().getValue()); + } else { + throw new IllegalArgumentException("Unexpected w value: " + wValue); + } + + if (writeConcernDocument.containsKey("wtimeoutMS")) { + retVal = retVal.withWTimeout(writeConcernDocument.getNumber("wtimeoutMS", new BsonInt32(0)).intValue(), TimeUnit.MILLISECONDS); + } + if (writeConcernDocument.containsKey("journal")) { + retVal = retVal.withJournal(writeConcernDocument.getBoolean("journal", BsonBoolean.FALSE).getValue()); + } + return retVal; + } + + @Parameterized.Parameters(name = "{0}: {1}") + public static Collection data() { + return JsonPoweredTestHelper.getTestData("read-write-concern/tests/connection-string/write-concern.json"); + } +} diff --git a/driver-core/src/test/unit/com/mongodb/WriteConcernDocumentTest.java b/driver-core/src/test/unit/com/mongodb/WriteConcernDocumentTest.java new file mode 100644 index 00000000000..18185250e57 --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/WriteConcernDocumentTest.java @@ -0,0 +1,88 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb; + +import junit.framework.TestCase; +import org.bson.BsonBoolean; +import org.bson.BsonDocument; +import org.bson.BsonInt32; +import org.bson.BsonNumber; +import org.bson.BsonString; +import org.bson.BsonValue; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import util.JsonPoweredTestHelper; + +import java.util.Collection; +import java.util.concurrent.TimeUnit; + +// See https://github.com/mongodb/specifications/tree/master/source/read-write-concern/tests/connection-string +@RunWith(Parameterized.class) +public class WriteConcernDocumentTest extends TestCase { + private final String description; + private final BsonDocument writeConcernDocument; + private final BsonDocument definition; + + public WriteConcernDocumentTest(@SuppressWarnings("unused") final String fileName, final String description, + @SuppressWarnings("unused") final String uri, final BsonDocument definition) { + this.description = description; + this.writeConcernDocument = definition.getDocument("writeConcern"); + this.definition = definition; + } + + @Test + public void shouldPassAllOutcomes() { + boolean valid = definition.getBoolean("valid", BsonBoolean.TRUE).getValue(); + try { + WriteConcern writeConcern = getWriteConcern(writeConcernDocument); + assertTrue(valid); + assertEquals(writeConcern.isAcknowledged(), definition.getBoolean("isAcknowledged").getValue()); + assertEquals(writeConcern.isServerDefault(), definition.getBoolean("isServerDefault").getValue()); + assertEquals(writeConcern.asDocument(), definition.getDocument("writeConcernDocument")); + } catch (IllegalArgumentException e) { + assertFalse(valid); + } + } + + private WriteConcern getWriteConcern(final BsonDocument writeConcernDocument) { + BsonValue wValue = writeConcernDocument.get("w"); + WriteConcern retVal; + if (wValue == null) { + retVal = WriteConcern.ACKNOWLEDGED; + } else if (wValue instanceof BsonNumber) { + retVal = new WriteConcern(wValue.asNumber().intValue()); + } else if (wValue instanceof BsonString) { + retVal = new WriteConcern(wValue.asString().getValue()); + } else { + throw new IllegalArgumentException("Unexpected w value: " + wValue); + } + + if (writeConcernDocument.containsKey("wtimeoutMS")) { + retVal = retVal.withWTimeout(writeConcernDocument.getNumber("wtimeoutMS", new BsonInt32(0)).intValue(), TimeUnit.MILLISECONDS); + } + if (writeConcernDocument.containsKey("journal")) { + retVal = retVal.withJournal(writeConcernDocument.getBoolean("journal", BsonBoolean.FALSE).getValue()); + } + return retVal; + } + + @Parameterized.Parameters(name = "{0}: {1}") + public static Collection data() { + return JsonPoweredTestHelper.getTestData("read-write-concern/tests/document/write-concern.json"); + } +} diff --git a/driver-core/src/test/unit/com/mongodb/WriteConcernSpecification.groovy b/driver-core/src/test/unit/com/mongodb/WriteConcernSpecification.groovy new file mode 100644 index 00000000000..c895496535d --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/WriteConcernSpecification.groovy @@ -0,0 +1,288 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb + +import org.bson.BsonBoolean +import org.bson.BsonDocument +import org.bson.BsonInt32 +import org.bson.BsonString +import spock.lang.Specification +import spock.lang.Unroll + +import static java.util.concurrent.TimeUnit.MICROSECONDS +import static java.util.concurrent.TimeUnit.MILLISECONDS + +class WriteConcernSpecification extends Specification { + + @Unroll + def 'constructors should set up write concern #wc correctly'() { + expect: + wc.getWObject() == w + wc.getWTimeout(MILLISECONDS) == wTimeout + wc.getJournal() == journal + + where: + wc | w | wTimeout | journal + new WriteConcern(1) | 1 | null | null + new WriteConcern(1, 10) | 1 | 10 | null + new WriteConcern((Object) null, 0, false) | null | 0 | false + new WriteConcern('majority') | 'majority' | null | null + } + + def 'test journal getters'() { + expect: + wc.getJournal() == journal + + where: + wc | journal + new WriteConcern(null, null, null) | null + new WriteConcern(null, null, false) | false + new WriteConcern(null, null, true) | true + } + + + def 'test wTimeout getters'() { + expect: + wc.getWTimeout(MILLISECONDS) == wTimeout + wc.getWTimeout(MICROSECONDS) == (wTimeout == null ? null : wTimeout * 1000) + + where: + wc | wTimeout + new WriteConcern(null, null, null) | null + new WriteConcern(null, 1000, null) | 1000 + } + + def 'test wTimeout getter error conditions'() { + when: + WriteConcern.ACKNOWLEDGED.getWTimeout(null) + + then: + thrown(IllegalArgumentException) + } + + def 'test getWObject'() { + expect: + wc.getWObject() == wObject + + where: + wc | wObject + WriteConcern.ACKNOWLEDGED | null + WriteConcern.W1 | 1 + WriteConcern.MAJORITY | 'majority' + } + + def 'test getWString'() { + expect: + wc.getWString() == wString + + where: + wc | wString + WriteConcern.MAJORITY | 'majority' + } + + def 'test getWString error conditions'() { + when: + wc.getWString() + + then: + thrown(IllegalStateException) + + where: + wc << [WriteConcern.ACKNOWLEDGED, WriteConcern.W1] + } + + def 'test getW'() { + expect: + wc.getW() == w + + where: + wc | w + WriteConcern.UNACKNOWLEDGED | 0 + WriteConcern.W1 | 1 + } + + def 'test getW error conditions'() { + when: + wc.getW() + + then: + thrown(IllegalStateException) + + where: + wc << [WriteConcern.ACKNOWLEDGED, WriteConcern.MAJORITY] + } + + def 'test withW methods'() { + expect: + WriteConcern.UNACKNOWLEDGED.withW(1) == new WriteConcern(1, null, null) + WriteConcern.UNACKNOWLEDGED.withW('dc1') == new WriteConcern('dc1', null, null) + + when: + WriteConcern.UNACKNOWLEDGED.withW(null) + + then: + thrown(IllegalArgumentException) + + when: + WriteConcern.UNACKNOWLEDGED.withW(-1) + + then: + thrown(IllegalArgumentException) + + when: + WriteConcern.UNACKNOWLEDGED.withJournal(true) + + then: + thrown(IllegalArgumentException) + } + + def 'test withJournal methods'() { + expect: + new WriteConcern(null, null, null).withJournal(true) == new WriteConcern(null, null, true) + } + + def 'test withWTimeout methods'() { + expect: + new WriteConcern(null, null, null).withWTimeout(0, MILLISECONDS) == new WriteConcern(null, 0, null) + new WriteConcern(null, null, null).withWTimeout(1000, MILLISECONDS) == new WriteConcern(null, 1000, null) + + when: + WriteConcern.ACKNOWLEDGED.withWTimeout(0, null) + + then: + thrown(IllegalArgumentException) + + when: + WriteConcern.ACKNOWLEDGED.withWTimeout(-1, MILLISECONDS) + + then: + thrown(IllegalArgumentException) + + when: + WriteConcern.ACKNOWLEDGED.withWTimeout(Integer.MAX_VALUE + 1, MILLISECONDS) + + then: + thrown(IllegalArgumentException) + } + + @Unroll + @SuppressWarnings('DuplicateMapLiteral') + def '#wc should return write concern document #commandDocument'() { + expect: + wc.asDocument() == commandDocument + + where: + wc | commandDocument + WriteConcern.UNACKNOWLEDGED | new BsonDocument('w', new BsonInt32(0)) + WriteConcern.ACKNOWLEDGED | new BsonDocument() + WriteConcern.W2 | new BsonDocument('w', new BsonInt32(2)) + WriteConcern.JOURNALED | new BsonDocument('j', BsonBoolean.TRUE) + new WriteConcern('majority') | new BsonDocument('w', new BsonString('majority')) + new WriteConcern(2, 100) | new BsonDocument('w', new BsonInt32(2)).append('wtimeout', new BsonInt32(100)) + } + + @SuppressWarnings('ExplicitCallToEqualsMethod') + def 'test equals'() { + expect: + wc.equals(compareTo) == expectedResult + + where: + wc | compareTo | expectedResult + WriteConcern.ACKNOWLEDGED | WriteConcern.ACKNOWLEDGED | true + WriteConcern.ACKNOWLEDGED | null | false + WriteConcern.ACKNOWLEDGED | WriteConcern.UNACKNOWLEDGED | false + new WriteConcern(1, 0) | new WriteConcern(1, 1) | false + } + + def 'test hashCode'() { + expect: + wc.hashCode() == hashCode + + where: + wc | hashCode + WriteConcern.ACKNOWLEDGED | 0 + WriteConcern.W1 | 961 + WriteConcern.W2 | 1922 + WriteConcern.MAJORITY | -322299115 + } + + def 'test constants'() { + expect: + constructedWriteConcern == constantWriteConcern + + where: + constructedWriteConcern | constantWriteConcern + new WriteConcern((Object) null, null, null) | WriteConcern.ACKNOWLEDGED + new WriteConcern(1) | WriteConcern.W1 + new WriteConcern(2) | WriteConcern.W2 + new WriteConcern(3) | WriteConcern.W3 + new WriteConcern(0) | WriteConcern.UNACKNOWLEDGED + WriteConcern.ACKNOWLEDGED.withJournal(true) | WriteConcern.JOURNALED + new WriteConcern('majority') | WriteConcern.MAJORITY + } + + def 'test isAcknowledged'() { + expect: + writeConcern.isAcknowledged() == acknowledged + + where: + writeConcern | acknowledged + WriteConcern.ACKNOWLEDGED | true + WriteConcern.W1 | true + WriteConcern.W2 | true + WriteConcern.W3 | true + WriteConcern.MAJORITY | true + WriteConcern.UNACKNOWLEDGED | false + WriteConcern.UNACKNOWLEDGED.withWTimeout(10, MILLISECONDS) | false + WriteConcern.UNACKNOWLEDGED.withJournal(false) | false + } + + def 'test value of'() { + expect: + wc == valueOf + + where: + wc | valueOf + WriteConcern.ACKNOWLEDGED | WriteConcern.valueOf('ACKNOWLEDGED') + WriteConcern.ACKNOWLEDGED | WriteConcern.valueOf('acknowledged') + null | WriteConcern.valueOf('blahblah') + } + + def 'write concern should know if it is the server default'() { + expect: + WriteConcern.ACKNOWLEDGED.serverDefault + !WriteConcern.UNACKNOWLEDGED.serverDefault + !WriteConcern.ACKNOWLEDGED.withJournal(false).serverDefault + !WriteConcern.ACKNOWLEDGED.withWTimeout(0, MILLISECONDS).serverDefault + } + + def 'should throw when w is -1'() { + when: + new WriteConcern(-1) + + then: + thrown(IllegalArgumentException) + } + + def 'should throw when w is null'() { + when: + new WriteConcern((String) null) + + then: + thrown(IllegalArgumentException) + } +} diff --git a/driver-core/src/test/unit/com/mongodb/async/CallbackResultHolder.java b/driver-core/src/test/unit/com/mongodb/async/CallbackResultHolder.java new file mode 100644 index 00000000000..2e819617635 --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/async/CallbackResultHolder.java @@ -0,0 +1,109 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.async; + +import com.mongodb.internal.async.SingleResultCallback; + +import java.io.PrintWriter; +import java.io.StringWriter; +import java.util.concurrent.atomic.AtomicInteger; + +/** + * A SingleResultCallback implementation that saves the result of the callback. + * + * @param the result type + * @since 3.0 + */ +class CallbackResultHolder implements SingleResultCallback { + private T result = null; + private Throwable error = null; + private final AtomicInteger completionCounter = new AtomicInteger(); + + /** + * Set the result of the callback + * + * @param result the result of the callback + * @param error the throwable error of the callback + */ + public void onResult(final T result, final Throwable error) { + if (completionCounter.getAndIncrement() > 0) { + throw new IllegalStateException("The CallbackResult cannot be initialized multiple times. The first time it was initialized " + + "with " + (this.error != null ? getErrorString(this.error) : this.result) + "\n" + + "On invocation number " + completionCounter.get() + " it was initialized " + + "with " + (error != null ? getErrorString(error) : result)); + } + this.result = result; + this.error = error; + } + + private String getErrorString(final Throwable error) { + StringWriter writer = new StringWriter(); + error.printStackTrace(new PrintWriter(writer)); + return writer.toString(); + } + + /** + * Returns the result of the callback or null. + * + * @return the result of the callback if completed or null + */ + public T getResult() { + return result; + } + + /** + * Gets the error result of the callback or null. + * + * @return the error result of the callback or null + */ + public Throwable getError() { + return error; + } + + /** + * Returns true if the callback returned an error. + * + * @return true if the callback returned an error + */ + public boolean hasError() { + return error != null; + } + + /** + * Returns true if the callback has been called. + * + * @return true if the callback has been called + */ + public boolean isDone() { + return completionCounter.get() > 0; + } + + public boolean wasInvokedMultipleTimes() { + return completionCounter.get() > 1; + } + + @Override + public String toString() { + return "CallbackResultHolder{" + + "result=" + result + + ", error=" + error + + ", isDone=" + isDone() + + ", completionCounter=" + completionCounter.get() + + '}'; + } + +} diff --git a/driver-core/src/test/unit/com/mongodb/async/FutureResultCallback.java b/driver-core/src/test/unit/com/mongodb/async/FutureResultCallback.java new file mode 100644 index 00000000000..b3648f3e314 --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/async/FutureResultCallback.java @@ -0,0 +1,103 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.async; + +import com.mongodb.MongoException; +import com.mongodb.MongoTimeoutException; +import com.mongodb.internal.async.SingleResultCallback; + +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.Future; +import java.util.concurrent.TimeUnit; + +import static com.mongodb.ClusterFixture.TIMEOUT; + +/** + * A SingleResultCallback Future implementation. + * + *

The result of the callback is stored internally and is accessible via {@link #get}, which will either return the successful result + * of the callback or if the callback returned an error the error will be throw.

+ * + * @param the result type + * @since 3.0 + */ +public class FutureResultCallback implements SingleResultCallback, Future { + private final CountDownLatch latch; + private final CallbackResultHolder result; + + public FutureResultCallback() { + latch = new CountDownLatch(1); + result = new CallbackResultHolder<>(); + } + + @Override + public boolean cancel(final boolean mayInterruptIfRunning) { + return false; + } + + @Override + public boolean isCancelled() { + return false; + } + + @Override + public boolean isDone() { + return result.isDone(); + } + + public boolean isCompletedExceptionally() { + return result.hasError(); + } + + public boolean wasInvokedMultipleTimes() { + return result.wasInvokedMultipleTimes(); + } + + @Override + public T get() { + return get(TIMEOUT, TimeUnit.SECONDS); + } + + @Override + public T get(final long timeout, final TimeUnit unit) { + try { + if (!latch.await(timeout, unit)) { + throw new MongoTimeoutException("Callback timed out"); + } + } catch (InterruptedException e) { + throw new MongoException("Latch interrupted"); + } + + if (result.hasError()) { + if (result.getError() instanceof RuntimeException) { + throw (RuntimeException) result.getError(); + } else if (result.getError() instanceof Error) { + throw (Error) result.getError(); + } else { + throw new RuntimeException("Wrapping unexpected Throwable", result.getError()); + } + } else { + return result.getResult(); + } + } + + @Override + public void onResult(final T result, final Throwable t) { + this.result.onResult(result, t); + latch.countDown(); + } +} diff --git a/driver-core/src/test/unit/com/mongodb/client/ImmutableDocument.java b/driver-core/src/test/unit/com/mongodb/client/ImmutableDocument.java new file mode 100644 index 00000000000..885b29fb45e --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/client/ImmutableDocument.java @@ -0,0 +1,107 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client; + +import org.bson.BsonDocument; +import org.bson.BsonDocumentWrapper; +import org.bson.codecs.configuration.CodecRegistry; +import org.bson.conversions.Bson; + +import java.util.Collection; +import java.util.Collections; +import java.util.LinkedHashMap; +import java.util.Map; +import java.util.Set; + +public final class ImmutableDocument implements Map, Bson { + private final Map immutableDocument; + + /** + * Creates a Document instance initialized with the given map. + * + * @param map initial map + */ + public ImmutableDocument(final Map map) { + immutableDocument = Collections.unmodifiableMap(new LinkedHashMap<>(map)); + } + + + @Override + public int size() { + return immutableDocument.size(); + } + + @Override + public boolean isEmpty() { + return immutableDocument.isEmpty(); + } + + @Override + public boolean containsKey(final Object key) { + return immutableDocument.containsKey(key); + } + + @Override + public boolean containsValue(final Object value) { + return immutableDocument.containsValue(value); + } + + @Override + public Object get(final Object key) { + return immutableDocument.get(key); + } + + @Override + public Object put(final String key, final Object value) { + throw new UnsupportedOperationException(); + } + + @Override + public Object remove(final Object key) { + throw new UnsupportedOperationException(); + } + + @Override + public void putAll(final Map m) { + throw new UnsupportedOperationException(); + } + + @Override + public void clear() { + throw new UnsupportedOperationException(); + } + + @Override + public Set keySet() { + return immutableDocument.keySet(); + } + + @Override + public Collection values() { + return immutableDocument.values(); + } + + @Override + public Set> entrySet() { + return immutableDocument.entrySet(); + } + + @Override + public BsonDocument toBsonDocument(final Class tDocumentClass, final CodecRegistry codecRegistry) { + return new BsonDocumentWrapper<>(this, codecRegistry.get(ImmutableDocument.class)); + } +} diff --git a/driver-core/src/test/unit/com/mongodb/client/ImmutableDocumentCodec.java b/driver-core/src/test/unit/com/mongodb/client/ImmutableDocumentCodec.java new file mode 100644 index 00000000000..a9278957af3 --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/client/ImmutableDocumentCodec.java @@ -0,0 +1,76 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client; + +import org.bson.BsonReader; +import org.bson.BsonValue; +import org.bson.BsonWriter; +import org.bson.Document; +import org.bson.codecs.CollectibleCodec; +import org.bson.codecs.DecoderContext; +import org.bson.codecs.EncoderContext; +import org.bson.codecs.configuration.CodecRegistry; +import org.bson.types.ObjectId; + +import java.util.LinkedHashMap; + +import static java.lang.String.format; + +public final class ImmutableDocumentCodec implements CollectibleCodec { + private final CodecRegistry codecRegistry; + private static final String ID_FIELD_NAME = "_id"; + + public ImmutableDocumentCodec(final CodecRegistry codecRegistry) { + this.codecRegistry = codecRegistry; + } + + @Override + public ImmutableDocument generateIdIfAbsentFromDocument(final ImmutableDocument document) { + LinkedHashMap mutable = new LinkedHashMap<>(document); + mutable.put(ID_FIELD_NAME, new ObjectId()); + return new ImmutableDocument(mutable); + } + + @Override + public boolean documentHasId(final ImmutableDocument document) { + return document.containsKey(ID_FIELD_NAME); + } + + @Override + public BsonValue getDocumentId(final ImmutableDocument document) { + if (!documentHasId(document)) { + throw new IllegalStateException(format("The document does not contain an %s", ID_FIELD_NAME)); + } + return document.toBsonDocument(ImmutableDocument.class, codecRegistry).get(ID_FIELD_NAME); + } + + @Override + public void encode(final BsonWriter writer, final ImmutableDocument value, final EncoderContext encoderContext) { + codecRegistry.get(Document.class).encode(writer, new Document(value), encoderContext); + } + + @Override + public Class getEncoderClass() { + return ImmutableDocument.class; + } + + @Override + public ImmutableDocument decode(final BsonReader reader, final DecoderContext decoderContext) { + Document document = codecRegistry.get(Document.class).decode(reader, decoderContext); + return new ImmutableDocument(document); + } +} diff --git a/driver-core/src/test/unit/com/mongodb/client/ImmutableDocumentCodecProvider.java b/driver-core/src/test/unit/com/mongodb/client/ImmutableDocumentCodecProvider.java new file mode 100644 index 00000000000..9df1f8cf1f0 --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/client/ImmutableDocumentCodecProvider.java @@ -0,0 +1,32 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client; + +import org.bson.codecs.Codec; +import org.bson.codecs.configuration.CodecProvider; +import org.bson.codecs.configuration.CodecRegistry; + +public final class ImmutableDocumentCodecProvider implements CodecProvider { + @Override + @SuppressWarnings("unchecked") + public Codec get(final Class clazz, final CodecRegistry registry) { + if (clazz.equals(ImmutableDocument.class)) { + return (Codec) new ImmutableDocumentCodec(registry); + } + return null; + } +} diff --git a/driver-core/src/test/unit/com/mongodb/client/async/FutureResultCallbackSpecification.groovy b/driver-core/src/test/unit/com/mongodb/client/async/FutureResultCallbackSpecification.groovy new file mode 100644 index 00000000000..82e23c913e8 --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/client/async/FutureResultCallbackSpecification.groovy @@ -0,0 +1,103 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.async + +import com.mongodb.MongoException +import com.mongodb.MongoTimeoutException +import com.mongodb.async.FutureResultCallback +import spock.lang.Specification + +import static java.util.concurrent.TimeUnit.MILLISECONDS +import static java.util.concurrent.TimeUnit.SECONDS + +class FutureResultCallbackSpecification extends Specification { + + def 'should return false if tried to cancel'() { + when: + def futureResultCallback = new FutureResultCallback() + + then: + !futureResultCallback.cancel(false) + !futureResultCallback.cancel(true) + !futureResultCallback.isCancelled() + } + + def 'should return true if done'() { + when: + def futureResultCallback = new FutureResultCallback() + futureResultCallback.onResult(null, null) + + then: + futureResultCallback.isDone() + } + + def 'should return the result on get'() { + when: + def futureResultCallback = new FutureResultCallback() + futureResultCallback.onResult(true, null) + + then: + futureResultCallback.get() + + when: + futureResultCallback = new FutureResultCallback() + futureResultCallback.onResult(null, new MongoException('failed')) + futureResultCallback.get() + + then: + thrown MongoException + + when: + futureResultCallback = new FutureResultCallback() + futureResultCallback.onResult(true, null) + + then: + futureResultCallback.get(1, SECONDS) + + when: + futureResultCallback = new FutureResultCallback() + futureResultCallback.onResult(null, new MongoException('failed')) + futureResultCallback.get(1, SECONDS) + + then: + thrown MongoException + } + + def 'should timeout when no result and called get'(){ + when: + def futureResultCallback = new FutureResultCallback() + futureResultCallback.get(1, MILLISECONDS) + + then: + thrown MongoTimeoutException + } + + def 'should throw an error if onResult called more than once'() { + when: + def futureResultCallback = new FutureResultCallback() + futureResultCallback.onResult(true, null) + + then: + futureResultCallback.get() + + when: + futureResultCallback.onResult(false, null) + + then: + thrown IllegalStateException + } +} diff --git a/driver-core/src/test/unit/com/mongodb/client/gridfs/codecs/GridFSFileCodecProviderSpecification.groovy b/driver-core/src/test/unit/com/mongodb/client/gridfs/codecs/GridFSFileCodecProviderSpecification.groovy new file mode 100644 index 00000000000..6796d8323a9 --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/client/gridfs/codecs/GridFSFileCodecProviderSpecification.groovy @@ -0,0 +1,40 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.gridfs.codecs + +import com.mongodb.client.gridfs.model.GridFSFile +import org.bson.codecs.BsonValueCodecProvider +import org.bson.codecs.DocumentCodecProvider +import org.bson.codecs.ValueCodecProvider +import org.bson.codecs.configuration.CodecRegistries +import spock.lang.Specification + +class GridFSFileCodecProviderSpecification extends Specification { + private final provider = new GridFSFileCodecProvider() + private final registry = CodecRegistries.fromProviders(provider, new DocumentCodecProvider(), new BsonValueCodecProvider(), + new ValueCodecProvider()) + + def 'should provide supported codec or null'() { + expect: + provider.get(GridFSFile, registry) instanceof GridFSFileCodec + provider.get(TestType, registry) == null + } + + @SuppressWarnings('EmptyClass') + class TestType { + } +} diff --git a/driver-core/src/test/unit/com/mongodb/client/gridfs/codecs/GridFSFileCodecSpecification.groovy b/driver-core/src/test/unit/com/mongodb/client/gridfs/codecs/GridFSFileCodecSpecification.groovy new file mode 100644 index 00000000000..8559bcc5f22 --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/client/gridfs/codecs/GridFSFileCodecSpecification.groovy @@ -0,0 +1,102 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.gridfs.codecs + +import com.mongodb.client.gridfs.model.GridFSFile +import org.bson.BsonBinaryReader +import org.bson.BsonBinaryWriter +import org.bson.BsonObjectId +import org.bson.BsonString +import org.bson.BsonType +import org.bson.ByteBufNIO +import org.bson.Document +import org.bson.codecs.BsonTypeClassMap +import org.bson.codecs.BsonValueCodecProvider +import org.bson.codecs.Codec +import org.bson.codecs.DecoderContext +import org.bson.codecs.DocumentCodecProvider +import org.bson.codecs.EncoderContext +import org.bson.codecs.ValueCodecProvider +import org.bson.codecs.configuration.CodecRegistry +import org.bson.io.BasicOutputBuffer +import org.bson.io.ByteBufferBsonInput +import org.bson.types.ObjectId +import spock.lang.Specification + +import java.nio.ByteBuffer + +import static org.bson.codecs.configuration.CodecRegistries.fromProviders +import static org.bson.codecs.configuration.CodecRegistries.fromRegistries + +class GridFSFileCodecSpecification extends Specification { + + static final REGISTRY = fromProviders([new BsonValueCodecProvider(), new ValueCodecProvider(), new DocumentCodecProvider()]) + static final BSONTYPESREGISTRY = fromRegistries( + fromProviders(new DocumentCodecProvider(new BsonTypeClassMap([(BsonType.STRING): BsonString]))), REGISTRY) + static final CODEC = new GridFSFileCodec(REGISTRY) + static final ID = new BsonObjectId(new ObjectId()) + static final FILENAME = 'filename' + static final LENGTH = 100L + static final CHUNKSIZE = 255 + static final UPLOADDATE = new Date() + static final METADATA = new Document('field', 'value') + + def 'should encode and decode all default types with all readers and writers'() { + expect: + roundTripped(original) == original + + where: + original << [ + new GridFSFile(ID, FILENAME, LENGTH, CHUNKSIZE, UPLOADDATE, null), + new GridFSFile(ID, FILENAME, LENGTH, CHUNKSIZE, UPLOADDATE, METADATA), + ] + } + + def 'it should use the users codec for metadata / extra elements'() { + when: + def gridFSFileFromDocument = toGridFSFile(['_id': ID, 'filename': FILENAME, 'length': LENGTH, 'chunkSize': CHUNKSIZE, + 'uploadDate': UPLOADDATE, 'metadata': METADATA] as Document, + BSONTYPESREGISTRY) + then: + gridFSFileFromDocument.metadata.get('field') == new BsonString('value') + } + + GridFSFile roundTripped(GridFSFile gridFSFile) { + def writer = new BsonBinaryWriter(new BasicOutputBuffer()) + encode(writer, gridFSFile, CODEC) + decode(writer, CODEC) + } + + GridFSFile toGridFSFile(Document document) { + toGridFSFile(document, REGISTRY) + } + + GridFSFile toGridFSFile(Document document, CodecRegistry registry) { + def writer = new BsonBinaryWriter(new BasicOutputBuffer()) + registry.get(Document).encode(writer, document, EncoderContext.builder().build()) + decode(writer, new GridFSFileCodec(registry)) + } + + def encode(BsonBinaryWriter writer, GridFSFile gridFSFile, Codec codec) { + codec.encode(writer, gridFSFile, EncoderContext.builder().build()) + } + + def T decode(BsonBinaryWriter writer, Codec codec) { + def reader = new BsonBinaryReader(new ByteBufferBsonInput(new ByteBufNIO(ByteBuffer.wrap(writer.bsonOutput.toByteArray())))) + codec.decode(reader, DecoderContext.builder().build()) + } +} diff --git a/driver-core/src/test/unit/com/mongodb/client/gridfs/model/GridFSFileSpecification.groovy b/driver-core/src/test/unit/com/mongodb/client/gridfs/model/GridFSFileSpecification.groovy new file mode 100644 index 00000000000..f226df5420e --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/client/gridfs/model/GridFSFileSpecification.groovy @@ -0,0 +1,60 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.gridfs.model + +import com.mongodb.MongoGridFSException +import org.bson.BsonObjectId +import org.bson.BsonString +import org.bson.Document +import org.bson.types.ObjectId +import spock.lang.Specification + +class GridFSFileSpecification extends Specification { + + def 'should return the expected valued'() { + given: + def id = new BsonObjectId(new ObjectId()) + def filename = 'filename' + def length = 100L + def chunkSize = 255 + def uploadDate = new Date() + def metadata = new Document('id', id) + + when: + def gridFSFile = new GridFSFile(id, filename, length, chunkSize, uploadDate, metadata) + + then: + gridFSFile.getId() == id + gridFSFile.getFilename() == filename + gridFSFile.getLength() == length + gridFSFile.getChunkSize() == chunkSize + gridFSFile.getUploadDate() == uploadDate + gridFSFile.getMetadata() == metadata + } + + def 'should throw an exception when using getObjectId with custom id types'() { + given: + def gridFSFile = new GridFSFile(new BsonString('id'), 'test', 10L, 225, new Date(), null) + + when: + gridFSFile.getObjectId() + + then: + thrown(MongoGridFSException) + } + +} diff --git a/driver-core/src/test/unit/com/mongodb/client/model/AggregatesSpecification.groovy b/driver-core/src/test/unit/com/mongodb/client/model/AggregatesSpecification.groovy new file mode 100644 index 00000000000..f78aefd51b4 --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/client/model/AggregatesSpecification.groovy @@ -0,0 +1,1365 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model + +import com.mongodb.MongoNamespace +import com.mongodb.client.model.fill.FillOutputField +import com.mongodb.client.model.search.SearchCollector +import com.mongodb.client.model.search.SearchOperator +import org.bson.BsonDocument +import org.bson.BsonInt32 +import org.bson.Document +import org.bson.BinaryVector +import org.bson.conversions.Bson +import spock.lang.IgnoreIf +import spock.lang.Specification + +import static BucketGranularity.R5 +import static MongoTimeUnit.DAY +import static com.mongodb.ClusterFixture.serverVersionLessThan +import static com.mongodb.client.model.Accumulators.accumulator +import static com.mongodb.client.model.Accumulators.addToSet +import static com.mongodb.client.model.Accumulators.avg +import static com.mongodb.client.model.Accumulators.bottom +import static com.mongodb.client.model.Accumulators.bottomN +import static com.mongodb.client.model.Accumulators.first +import static com.mongodb.client.model.Accumulators.firstN +import static com.mongodb.client.model.Accumulators.last +import static com.mongodb.client.model.Accumulators.lastN +import static com.mongodb.client.model.Accumulators.max +import static com.mongodb.client.model.Accumulators.maxN +import static com.mongodb.client.model.Accumulators.mergeObjects +import static com.mongodb.client.model.Accumulators.min +import static com.mongodb.client.model.Accumulators.minN +import static com.mongodb.client.model.Accumulators.push +import static com.mongodb.client.model.Accumulators.stdDevPop +import static com.mongodb.client.model.Accumulators.stdDevSamp +import static com.mongodb.client.model.Accumulators.sum +import static com.mongodb.client.model.Accumulators.top +import static com.mongodb.client.model.Accumulators.topN +import static com.mongodb.client.model.Aggregates.addFields +import static com.mongodb.client.model.Aggregates.bucket +import static com.mongodb.client.model.Aggregates.bucketAuto +import static com.mongodb.client.model.Aggregates.count +import static com.mongodb.client.model.Aggregates.densify +import static com.mongodb.client.model.Aggregates.fill +import static com.mongodb.client.model.Aggregates.graphLookup +import static com.mongodb.client.model.Aggregates.group +import static com.mongodb.client.model.Aggregates.limit +import static com.mongodb.client.model.Aggregates.lookup +import static com.mongodb.client.model.Aggregates.match +import static com.mongodb.client.model.Aggregates.merge +import static com.mongodb.client.model.Aggregates.out +import static com.mongodb.client.model.Aggregates.project +import static com.mongodb.client.model.Aggregates.replaceRoot +import static com.mongodb.client.model.Aggregates.replaceWith +import static com.mongodb.client.model.Aggregates.sample +import static com.mongodb.client.model.Aggregates.search +import static com.mongodb.client.model.Aggregates.searchMeta +import static com.mongodb.client.model.Aggregates.set +import static com.mongodb.client.model.Aggregates.setWindowFields +import static com.mongodb.client.model.Aggregates.skip +import static com.mongodb.client.model.Aggregates.sort +import static com.mongodb.client.model.Aggregates.sortByCount +import static com.mongodb.client.model.Aggregates.unionWith +import static com.mongodb.client.model.Aggregates.unwind +import static com.mongodb.client.model.Aggregates.vectorSearch +import static com.mongodb.client.model.BsonHelper.toBson +import static com.mongodb.client.model.Filters.eq +import static com.mongodb.client.model.Filters.expr +import static com.mongodb.client.model.Projections.computed +import static com.mongodb.client.model.Projections.fields +import static com.mongodb.client.model.Projections.include +import static com.mongodb.client.model.Sorts.ascending +import static com.mongodb.client.model.Sorts.descending +import static com.mongodb.client.model.Windows.Bound.CURRENT +import static com.mongodb.client.model.Windows.Bound.UNBOUNDED +import static com.mongodb.client.model.Windows.documents +import static com.mongodb.client.model.densify.DensifyRange.fullRangeWithStep +import static com.mongodb.client.model.fill.FillOptions.fillOptions +import static com.mongodb.client.model.search.SearchCollector.facet +import static com.mongodb.client.model.search.SearchCount.total +import static com.mongodb.client.model.search.SearchFacet.stringFacet +import static com.mongodb.client.model.search.SearchHighlight.paths +import static com.mongodb.client.model.search.SearchOperator.exists +import static com.mongodb.client.model.search.SearchOptions.searchOptions +import static com.mongodb.client.model.search.SearchPath.fieldPath +import static com.mongodb.client.model.search.SearchPath.wildcardPath +import static com.mongodb.client.model.search.VectorSearchOptions.approximateVectorSearchOptions +import static com.mongodb.client.model.search.VectorSearchOptions.exactVectorSearchOptions +import static java.util.Arrays.asList +import static org.bson.BsonDocument.parse + +class AggregatesSpecification extends Specification { + + @IgnoreIf({ serverVersionLessThan(4, 4) }) + def 'should render $accumulator'() { + given: + def initFunction = 'function() { return { count : 0, sum : 0 } }' + def initFunctionWithArgs = 'function(initCount, initSun) { return { count : parseInt(initCount), sum : parseInt(initSun) } }' + def accumulateFunction = 'function(state, numCopies) { return { count : state.count + 1, sum : state.sum + numCopies } }' + def mergeFunction = 'function(state1, state2) { return { count : state1.count + state2.count, sum : state1.sum + state2.sum } }' + def finalizeFunction = 'function(state) { return (state.sum / state.count) }' + + expect: + toBson(group(null, accumulator('test', initFunction, accumulateFunction, mergeFunction))) == + parse('{$group: {_id: null, test: {$accumulator: {init: "' + initFunction + '", initArgs: [], accumulate: "' + + accumulateFunction + '", accumulateArgs: [], merge: "' + mergeFunction + '", lang: "js"}}}}') + toBson(group(null, accumulator('test', initFunction, accumulateFunction, mergeFunction, finalizeFunction))) == + parse('{$group: {_id: null, test: {$accumulator: {init: "' + initFunction + '", initArgs: [], accumulate: "' + + accumulateFunction + '", accumulateArgs: [], merge: "' + mergeFunction + '", finalize: "' + finalizeFunction + + '", lang: "js"}}}}') + toBson(group(null, accumulator('test', initFunctionWithArgs, ['0', '0'], accumulateFunction, [ '$copies' ], mergeFunction, + finalizeFunction))) == + parse('{$group: {_id: null, test: {$accumulator: {init: "' + initFunctionWithArgs + + '", initArgs: [ "0", "0" ], accumulate: "' + accumulateFunction + + '", accumulateArgs: [ "$copies" ], merge: "' + mergeFunction + + '", finalize: "' + finalizeFunction + '", lang: "js"}}}}') + toBson(group(null, accumulator('test', initFunction, accumulateFunction, mergeFunction, finalizeFunction, 'lang'))) == + parse('{$group: {_id: null, test: {$accumulator: {init: "' + initFunction + '", initArgs: [], accumulate: "' + + accumulateFunction + '", accumulateArgs: [], merge: "' + mergeFunction + '", finalize: "' + finalizeFunction + + '", lang: "lang"}}}}') + toBson(group(null, accumulator('test', initFunctionWithArgs, ['0', '0'], accumulateFunction, [ '$copies' ], mergeFunction, + finalizeFunction, 'js'))) == + parse('{$group: {_id: null, test: {$accumulator: {init: "' + initFunctionWithArgs + + '", initArgs: [ "0", "0" ], accumulate: "' + accumulateFunction + + '", accumulateArgs: [ "$copies" ], merge: "' + mergeFunction + + '", finalize: "' + finalizeFunction + '", lang: "js"}}}}') + } + + def 'should render $addFields'() { + expect: + toBson(addFields(new Field('newField', null))) == parse('{$addFields: {newField: null}}') + toBson(addFields(new Field('newField', 'hello'))) == parse('{$addFields: {newField: "hello"}}') + toBson(addFields(new Field('this', '$$CURRENT'))) == parse('{$addFields: {this: "$$CURRENT"}}') + toBson(addFields(new Field('myNewField', new Document('c', 3) + .append('d', 4)))) == parse('{$addFields: {myNewField: {c: 3, d: 4}}}') + toBson(addFields(new Field('alt3', new Document('$lt', asList('$a', 3))))) == parse( + '{$addFields: {alt3: {$lt: ["$a", 3]}}}') + toBson(addFields(new Field('b', 3), new Field('c', 5))) == parse('{$addFields: {b: 3, c: 5}}') + toBson(addFields(asList(new Field('b', 3), new Field('c', 5)))) == parse('{$addFields: {b: 3, c: 5}}') + } + + def 'should render $set'() { + expect: + toBson(set(new Field('newField', null))) == parse('{$set: {newField: null}}') + toBson(set(new Field('newField', 'hello'))) == parse('{$set: {newField: "hello"}}') + toBson(set(new Field('this', '$$CURRENT'))) == parse('{$set: {this: "$$CURRENT"}}') + toBson(set(new Field('myNewField', new Document('c', 3) + .append('d', 4)))) == parse('{$set: {myNewField: {c: 3, d: 4}}}') + toBson(set(new Field('alt3', new Document('$lt', asList('$a', 3))))) == parse( + '{$set: {alt3: {$lt: ["$a", 3]}}}') + toBson(set(new Field('b', 3), new Field('c', 5))) == parse('{$set: {b: 3, c: 5}}') + toBson(set(asList(new Field('b', 3), new Field('c', 5)))) == parse('{$set: {b: 3, c: 5}}') + } + + def 'should render $bucket'() { + expect: + toBson(bucket('$screenSize', [0, 24, 32, 50, 100000])) == parse('''{ + $bucket: { + groupBy: "$screenSize", + boundaries: [0, 24, 32, 50, 100000] + } + }''') + toBson(bucket('$screenSize', [0, 24, 32, 50, 100000], + new BucketOptions() + .defaultBucket('other'))) == parse('''{ + $bucket: { + groupBy: "$screenSize", + boundaries: [0, 24, 32, 50, 100000], + default: "other" + } + }''') + toBson(bucket('$screenSize', [0, 24, 32, 50, 100000], + new BucketOptions() + .defaultBucket('other') + .output(sum('count', 1), push('matches', '$screenSize')))) == parse('''{ + $bucket: { + groupBy: "$screenSize", + boundaries: [0, 24, 32, 50, 100000], + default: "other", + output: { + count: {$sum: 1}, + matches: {$push: "$screenSize"} + } + } + }''') + } + + def 'should render $bucketAuto'() { + expect: + toBson(bucketAuto('$price', 4)) == parse('''{ + $bucketAuto: { + groupBy: "$price", + buckets: 4 + } + }''') + toBson(bucketAuto('$price', 4, new BucketAutoOptions() + .output(sum('count', 1), + avg('avgPrice', '$price')))) == parse('''{ + $bucketAuto: { + groupBy: "$price", + buckets: 4, + output: { + count: {$sum: 1}, + avgPrice: {$avg: "$price"}, + } + } + }''') + toBson(bucketAuto('$price', 4, new BucketAutoOptions() + .granularity(R5) + .output(sum('count', 1), + avg('avgPrice', '$price')))) == parse('''{ + $bucketAuto: { + groupBy: "$price", + buckets: 4, + output: { + count: {$sum: 1}, + avgPrice: {$avg: "$price"}, + }, + granularity: "R5" + } + }''') + } + + def 'should render $count'() { + expect: + toBson(count()) == parse('{$count: "count"}') + toBson(count('count')) == parse('{$count: "count"}') + toBson(count('total')) == parse('{$count: "total"}') + } + + def 'should render $match'() { + expect: + toBson(match(eq('author', 'dave'))) == parse('{ $match : { author : "dave" } }') + } + + def 'should render $project'() { + expect: + toBson(project(fields(include('title', 'author'), computed('lastName', '$author.last')))) == + parse('{ $project : { title : 1 , author : 1, lastName : "$author.last" } }') + } + + def 'should render $replaceRoot'() { + expect: + toBson(replaceRoot('$a1')) == parse('{$replaceRoot: {newRoot: "$a1"}}') + toBson(replaceRoot('$a1.b')) == parse('{$replaceRoot: {newRoot: "$a1.b"}}') + toBson(replaceRoot('$a1')) == parse('{$replaceRoot: {newRoot: "$a1"}}') + } + + def 'should render $replaceWith'() { + expect: + toBson(replaceWith('$a1')) == parse('{$replaceWith: "$a1"}') + toBson(replaceWith('$a1.b')) == parse('{$replaceWith: "$a1.b"}') + toBson(replaceWith('$a1')) == parse('{$replaceWith: "$a1"}') + } + + def 'should render $sort'() { + expect: + toBson(sort(ascending('title', 'author'))) == parse('{ $sort : { title : 1 , author : 1 } }') + } + + def 'should render $sortByCount'() { + expect: + toBson(sortByCount('someField')) == parse('{$sortByCount: "someField"}') + toBson(sortByCount(new Document('$floor', '$x'))) == parse('{$sortByCount: {$floor: "$x"}}') + } + + def 'should render $limit'() { + expect: + toBson(limit(5)) == parse('{ $limit : 5 }') + } + + def 'should render $lookup'() { + expect: + toBson(lookup('from', 'localField', 'foreignField', 'as')) == parse('''{ $lookup : { from: "from", localField: "localField", + foreignField: "foreignField", as: "as" } }''') + + List pipeline = asList(match(expr(new Document('$eq', asList('x', '1'))))) + toBson(lookup('from', asList(new Variable('var1', 'expression1')), pipeline, 'as')) == + parse('''{ $lookup : { from: "from", + let: { var1: "expression1" }, + pipeline : [{ $match : { $expr: { $eq : [ "x" , "1" ]}}}], + as: "as" }}''') + + // without variables + toBson(lookup('from', pipeline, 'as')) == + parse('''{ $lookup : { from: "from", + pipeline : [{ $match : { $expr: { $eq : [ "x" , "1" ]}}}], + as: "as" }}''') + } + + def 'should render $facet'() { + expect: + toBson(Aggregates.facet( + new Facet('Screen Sizes', + unwind('$attributes'), + match(eq('attributes.name', 'screen size')), + group(null, sum('count', 1 ))), + new Facet('Manufacturer', + match(eq('attributes.name', 'manufacturer')), + group('$attributes.value', sum('count', 1)), + sort(descending('count')), + limit(5)))) == + parse('''{$facet: { + "Screen Sizes": [ + {$unwind: "$attributes"}, + {$match: {"attributes.name": "screen size"}}, + {$group: { + _id: null, + count: {$sum: 1} + }} + ], + + "Manufacturer": [ + {$match: {"attributes.name": "manufacturer"}}, + {$group: {_id: "$attributes.value", count: {$sum: 1}}}, + {$sort: {count: -1}} + {$limit: 5} + ] + }} ''') + } + + def 'should render $graphLookup'() { + expect: + //without options + toBson(graphLookup('contacts', '$friends', 'friends', 'name', 'socialNetwork')) == + parse('''{ $graphLookup: { from: "contacts", startWith: "$friends", connectFromField: "friends", connectToField: "name", + as: "socialNetwork" } }''') + + //with maxDepth + toBson(graphLookup('contacts', '$friends', 'friends', 'name', 'socialNetwork', new GraphLookupOptions().maxDepth(1))) == + parse('''{ $graphLookup: { from: "contacts", startWith: "$friends", connectFromField: "friends", connectToField: "name", + as: "socialNetwork", maxDepth: 1 } }''') + + // with depthField + toBson(graphLookup('contacts', '$friends', 'friends', 'name', 'socialNetwork', new GraphLookupOptions().depthField('depth'))) == + parse('''{ $graphLookup: { from: "contacts", startWith: "$friends", connectFromField: "friends", connectToField: "name", + as: "socialNetwork", depthField: "depth" } }''') + + // with restrictSearchWithMatch + toBson(graphLookup('contacts', '$friends', 'friends', 'name', 'socialNetwork', new GraphLookupOptions() + .restrictSearchWithMatch(eq('hobbies', 'golf')))) == + parse('''{ $graphLookup: { from: "contacts", startWith: "$friends", connectFromField: "friends", connectToField: "name", + as: "socialNetwork", restrictSearchWithMatch : { "hobbies" : "golf" } } }''') + + // with maxDepth and depthField + toBson(graphLookup('contacts', '$friends', 'friends', 'name', 'socialNetwork', new GraphLookupOptions() + .maxDepth(1).depthField('depth'))) == + parse('''{ $graphLookup: { from: "contacts", startWith: "$friends", connectFromField: "friends", connectToField: "name", + as: "socialNetwork", maxDepth: 1, depthField: "depth" } }''') + + // with all options + toBson(graphLookup('contacts', '$friends', 'friends', 'name', 'socialNetwork', new GraphLookupOptions() + .maxDepth(1).depthField('depth').restrictSearchWithMatch(eq('hobbies', 'golf')))) == + parse('''{ $graphLookup: { from: "contacts", startWith: "$friends", connectFromField: "friends", connectToField: "name", + as: "socialNetwork", maxDepth: 1, depthField: "depth", restrictSearchWithMatch : { "hobbies" : "golf" } } }''') + } + + def 'should render $skip'() { + expect: + toBson(skip(5)) == parse('{ $skip : 5 }') + } + + def 'should render $unionWith'() { + expect: + List pipeline = asList(match(expr(new Document('$eq', asList('x', '1'))))) + toBson(unionWith('with', pipeline)) == + parse('''{ $unionWith : { coll: "with", pipeline : [{ $match : { $expr: { $eq : [ "x" , "1" ]}}}] }}''') + } + + def 'should render $unwind'() { + expect: + toBson(unwind('$sizes')) == parse('{ $unwind : "$sizes" }') + toBson(unwind('$sizes', new UnwindOptions().preserveNullAndEmptyArrays(null))) == parse('{ $unwind : { path : "$sizes" } }') + toBson(unwind('$sizes', new UnwindOptions().preserveNullAndEmptyArrays(false))) == parse(''' + { $unwind : { path : "$sizes", preserveNullAndEmptyArrays : false } }''') + toBson(unwind('$sizes', new UnwindOptions().preserveNullAndEmptyArrays(true))) == parse(''' + { $unwind : { path : "$sizes", preserveNullAndEmptyArrays : true } }''') + toBson(unwind('$sizes', new UnwindOptions().includeArrayIndex(null))) == parse('{ $unwind : { path : "$sizes" } }') + toBson(unwind('$sizes', new UnwindOptions().includeArrayIndex('$a'))) == parse(''' + { $unwind : { path : "$sizes", includeArrayIndex : "$a" } }''') + toBson(unwind('$sizes', new UnwindOptions().preserveNullAndEmptyArrays(true).includeArrayIndex('$a'))) == parse(''' + { $unwind : { path : "$sizes", preserveNullAndEmptyArrays : true, includeArrayIndex : "$a" } }''') + } + + def 'should render $out'() { + expect: + toBson(out('authors')) == parse('{ $out : "authors" }') + toBson(out(Document.parse('{ s3: "s3://bucket/path/to/file…?format=json&maxFileSize=100MiB"}'))) == + parse('{ $out : { s3: "s3://bucket/path/to/file…?format=json&maxFileSize=100MiB"} }') + toBson(out('authorsDB', 'books')) == parse('{ $out : { db: "authorsDB", coll: "books" } }') + } + + def 'should render merge'() { + expect: + toBson(merge('authors')) == parse('{ $merge : {into: "authors" }}') + toBson(merge(new MongoNamespace('db1', 'authors'))) == + parse('{ $merge : {into: {db: "db1", coll: "authors" }}}') + + toBson(merge('authors', + new MergeOptions().uniqueIdentifier('ssn'))) == + parse('{ $merge : {into: "authors", on: "ssn" }}') + + toBson(merge('authors', + new MergeOptions().uniqueIdentifier(['ssn', 'otherId']))) == + parse('{ $merge : {into: "authors", on: ["ssn", "otherId"] }}') + + toBson(merge('authors', + new MergeOptions().whenMatched(MergeOptions.WhenMatched.REPLACE))) == + parse('{ $merge : {into: "authors", whenMatched: "replace" }}') + toBson(merge('authors', + new MergeOptions().whenMatched(MergeOptions.WhenMatched.KEEP_EXISTING))) == + parse('{ $merge : {into: "authors", whenMatched: "keepExisting" }}') + toBson(merge('authors', + new MergeOptions().whenMatched(MergeOptions.WhenMatched.MERGE))) == + parse('{ $merge : {into: "authors", whenMatched: "merge" }}') + toBson(merge('authors', + new MergeOptions().whenMatched(MergeOptions.WhenMatched.FAIL))) == + parse('{ $merge : {into: "authors", whenMatched: "fail" }}') + + toBson(merge('authors', + new MergeOptions().whenNotMatched(MergeOptions.WhenNotMatched.INSERT))) == + parse('{ $merge : {into: "authors", whenNotMatched: "insert" }}') + toBson(merge('authors', + new MergeOptions().whenNotMatched(MergeOptions.WhenNotMatched.DISCARD))) == + parse('{ $merge : {into: "authors", whenNotMatched: "discard" }}') + toBson(merge('authors', + new MergeOptions().whenNotMatched(MergeOptions.WhenNotMatched.FAIL))) == + parse('{ $merge : {into: "authors", whenNotMatched: "fail" }}') + + toBson(merge('authors', + new MergeOptions().whenMatched(MergeOptions.WhenMatched.PIPELINE) + .variables([new Variable('y', 2), new Variable('z', 3)]) + .whenMatchedPipeline([addFields([new Field('x', 1)])]))) == + parse('{ $merge : {into: "authors", let: {y: 2, z: 3}, whenMatched: [{$addFields: {x: 1}}]}}') + } + + def 'should render $group'() { + expect: + toBson(group('$customerId')) == parse('{ $group : { _id : "$customerId" } }') + toBson(group(null)) == parse('{ $group : { _id : null } }') + + toBson(group(parse('{ month: { $month: "$date" }, day: { $dayOfMonth: "$date" }, year: { $year: "$date" } }'))) == + parse('{ $group : { _id : { month: { $month: "$date" }, day: { $dayOfMonth: "$date" }, year: { $year: "$date" } } } }') + + + def groupDocument = parse('''{ + $group : { + _id : { gid: "$groupByField"}, + sum: { $sum: { $multiply: [ "$price", "$quantity" ] } }, + avg: { $avg: "$quantity" }, + min: { $min: "$quantity" }, + minN: { $minN: { input: "$quantity", + n: { $cond: { if: { $eq: ["$gid", true] }, then: 2, else: 1 } } } }, + max: { $max: "$quantity" }, + maxN: { $maxN: { input: "$quantity", n: 2 } }, + first: { $first: "$quantity" }, + firstN: { $firstN: { input: "$quantity", n: 2 } }, + top: { $top: { sortBy: { quantity: 1 }, output: "$quantity" } }, + topN: { $topN: { sortBy: { quantity: 1 }, output: "$quantity", n: 2 } }, + last: { $last: "$quantity" }, + lastN: { $lastN: { input: "$quantity", n: 2 } }, + bottom: { $bottom: { sortBy: { quantity: 1 }, output: ["$quantity", "$quality"] } }, + bottomN: { $bottomN: { sortBy: { quantity: 1 }, output: ["$quantity", "$quality"], + n: { $cond: { if: { $eq: ["$gid", true] }, then: 2, else: 1 } } } }, + all: { $push: "$quantity" }, + merged: { $mergeObjects: "$quantity" }, + unique: { $addToSet: "$quantity" }, + stdDevPop: { $stdDevPop: "$quantity" }, + stdDevSamp: { $stdDevSamp: "$quantity" } + } + }''') + toBson(group(new Document('gid', '$groupByField'), + sum('sum', parse('{ $multiply: [ "$price", "$quantity" ] }')), + avg('avg', '$quantity'), + min('min', '$quantity'), + minN('minN', '$quantity', + new Document('$cond', new Document('if', new Document('$eq', asList('$gid', true))) + .append('then', 2).append('else', 1))), + max('max', '$quantity'), + maxN('maxN', '$quantity', 2), + first('first', '$quantity'), + firstN('firstN', '$quantity', 2), + top('top', ascending('quantity'), '$quantity'), + topN('topN', ascending('quantity'), '$quantity', 2), + last('last', '$quantity'), + lastN('lastN', '$quantity', 2), + bottom('bottom', ascending('quantity'), ['$quantity', '$quality']), + bottomN('bottomN', ascending('quantity'), ['$quantity', '$quality'], + new Document('$cond', new Document('if', new Document('$eq', asList('$gid', true))) + .append('then', 2).append('else', 1))), + push('all', '$quantity'), + mergeObjects('merged', '$quantity'), + addToSet('unique', '$quantity'), + stdDevPop('stdDevPop', '$quantity'), + stdDevSamp('stdDevSamp', '$quantity') + )) == groupDocument + } + + def 'should render $setWindowFields'() { + given: + Window window = documents(1, 2) + BsonDocument setWindowFieldsBson = toBson(setWindowFields( + new Document('gid', '$partitionByField'), ascending('sortByField'), asList( + WindowOutputFields.of(new BsonField('newField00', new Document('$sum', '$field00') + .append('window', Windows.of(new Document('range', asList(1, 'current')))))), + WindowOutputFields.sum('newField01', '$field01', Windows.range(1, CURRENT)), + WindowOutputFields.avg('newField02', '$field02', Windows.range(UNBOUNDED, 1)), + WindowOutputFields.stdDevSamp('newField03', '$field03', window), + WindowOutputFields.stdDevPop('newField04', '$field04', window), + WindowOutputFields.min('newField05', '$field05', window), + WindowOutputFields.minN('newField05N', '$field05N', + new Document('$cond', new Document('if', new Document('$eq', asList('$gid', true))) + .append('then', 2).append('else', 1)), + window), + WindowOutputFields.max('newField06', '$field06', window), + WindowOutputFields.maxN('newField06N', '$field06N', 2, window), + WindowOutputFields.count('newField07', window), + WindowOutputFields.derivative('newField08', '$field08', window), + WindowOutputFields.timeDerivative('newField09', '$field09', window, DAY), + WindowOutputFields.integral('newField10', '$field10', window), + WindowOutputFields.timeIntegral('newField11', '$field11', window, DAY), + WindowOutputFields.timeIntegral('newField11', '$field11', window, DAY), + WindowOutputFields.covarianceSamp('newField12', '$field12_1', '$field12_2', window), + WindowOutputFields.covariancePop('newField13', '$field13_1', '$field13_2', window), + WindowOutputFields.expMovingAvg('newField14', '$field14', 3), + WindowOutputFields.expMovingAvg('newField15', '$field15', 0.5), + WindowOutputFields.push('newField16', '$field16', window), + WindowOutputFields.addToSet('newField17', '$field17', window), + WindowOutputFields.first('newField18', '$field18', window), + WindowOutputFields.firstN('newField18N', '$field18N', 2, window), + WindowOutputFields.last('newField19', '$field19', window), + WindowOutputFields.lastN('newField19N', '$field19N', 2, window), + WindowOutputFields.shift('newField20', '$field20', 'defaultConstantValue', -3), + WindowOutputFields.documentNumber('newField21'), + WindowOutputFields.rank('newField22'), + WindowOutputFields.denseRank('newField23'), + WindowOutputFields.bottom('newField24', descending('sortByField'), '$field24', window), + WindowOutputFields.bottomN('newField24N', descending('sortByField'), '$field24N', + new Document('$cond', new Document('if', new Document('$eq', asList('$gid', true))) + .append('then', 2).append('else', 1)), + window), + WindowOutputFields.top('newField25', ascending('sortByField'), '$field25', window), + WindowOutputFields.topN('newField25N', ascending('sortByField'), '$field25N', 2, window), + WindowOutputFields.locf('newField26', '$field26'), + WindowOutputFields.linearFill('newField27', '$field27') + ))) + + expect: + setWindowFieldsBson == parse('''{ + "$setWindowFields": { + "partitionBy": { "gid": "$partitionByField" }, + "sortBy": { "sortByField" : 1 }, + "output": { + "newField00": { "$sum": "$field00", "window": { "range": [{"$numberInt": "1"}, "current"] } }, + "newField01": { "$sum": "$field01", "window": { "range": [{"$numberLong": "1"}, "current"] } }, + "newField02": { "$avg": "$field02", "window": { "range": ["unbounded", {"$numberLong": "1"}] } }, + "newField03": { "$stdDevSamp": "$field03", "window": { "documents": [1, 2] } }, + "newField04": { "$stdDevPop": "$field04", "window": { "documents": [1, 2] } }, + "newField05": { "$min": "$field05", "window": { "documents": [1, 2] } }, + "newField05N": { + "$minN": { "input": "$field05N", "n": { "$cond": { "if": { "$eq": ["$gid", true] }, "then": 2, "else": 1 } } }, + "window": { "documents": [1, 2] } }, + "newField06": { "$max": "$field06", "window": { "documents": [1, 2] } }, + "newField06N": { "$maxN": { "input": "$field06N", "n": 2 }, "window": { "documents": [1, 2] } }, + "newField07": { "$count": {}, "window": { "documents": [1, 2] } }, + "newField08": { "$derivative": { "input": "$field08" }, "window": { "documents": [1, 2] } }, + "newField09": { "$derivative": { "input": "$field09", "unit": "day" }, "window": { "documents": [1, 2] } }, + "newField10": { "$integral": { "input": "$field10"}, "window": { "documents": [1, 2] } }, + "newField11": { "$integral": { "input": "$field11", "unit": "day" }, "window": { "documents": [1, 2] } }, + "newField12": { "$covarianceSamp": ["$field12_1", "$field12_2"], "window": { "documents": [1, 2] } }, + "newField13": { "$covariancePop": ["$field13_1", "$field13_2"], "window": { "documents": [1, 2] } }, + "newField14": { "$expMovingAvg": { "input": "$field14", "N": 3 } }, + "newField15": { "$expMovingAvg": { "input": "$field15", "alpha": 0.5 } }, + "newField16": { "$push": "$field16", "window": { "documents": [1, 2] } }, + "newField17": { "$addToSet": "$field17", "window": { "documents": [1, 2] } }, + "newField18": { "$first": "$field18", "window": { "documents": [1, 2] } }, + "newField18N": { "$firstN": { "input": "$field18N", "n": 2 }, "window": { "documents": [1, 2] } }, + "newField19": { "$last": "$field19", "window": { "documents": [1, 2] } }, + "newField19N": { "$lastN": { "input": "$field19N", "n": 2 }, "window": { "documents": [1, 2] } }, + "newField20": { "$shift": { "output": "$field20", "by": -3, "default": "defaultConstantValue" } }, + "newField21": { "$documentNumber": {} }, + "newField22": { "$rank": {} }, + "newField23": { "$denseRank": {} }, + "newField24": { + "$bottom": { "sortBy": { "sortByField": -1 }, "output": "$field24"}, + "window": { "documents": [1, 2] } }, + "newField24N": { + "$bottomN": { "sortBy": { "sortByField": -1 }, "output": "$field24N", + "n": { "$cond": { "if": { "$eq": ["$gid", true] }, "then": 2, "else": 1 } } }, + "window": { "documents": [1, 2] } }, + "newField25": { + "$top": { "sortBy": { "sortByField": 1 }, "output": "$field25"}, + "window": { "documents": [1, 2] } }, + "newField25N": { + "$topN": { "sortBy": { "sortByField": 1 }, "output": "$field25N", "n": 2 }, + "window": { "documents": [1, 2] } }, + "newField26": { "$locf": "$field26" }, + "newField27": { "$linearFill": "$field27" } + } + } + }''') + } + + def 'should render $setWindowFields with no partitionBy/sortBy'() { + given: + BsonDocument setWindowFields = toBson(setWindowFields(null, null, asList( + WindowOutputFields.sum('newField01', '$field01', documents(1, 2))) + )) + + expect: + setWindowFields == parse('''{ + "$setWindowFields": { + "output": { + "newField01": { "$sum": "$field01", "window": { "documents": [1, 2] } } + } + } + }''') + } + + def 'should render $densify'() { + when: + BsonDocument densifyDoc = toBson( + densify( + 'fieldName', + fullRangeWithStep(1)) + ) + + then: + densifyDoc == parse('''{ + "$densify": { + "field": "fieldName", + "range": { "bounds": "full", "step": 1 } + } + }''') + } + + def 'should render $fill'() { + when: + BsonDocument fillDoc = toBson( + fill(fillOptions().sortBy(ascending('fieldName3')), + FillOutputField.linear('fieldName1'), + FillOutputField.locf('fieldName2')) + ) + + then: + fillDoc == parse('''{ + "$fill": { + "output": { + "fieldName1": { "method" : "linear" } + "fieldName2": { "method" : "locf" } + } + "sortBy": { "fieldName3": 1 } + } + }''') + } + + def 'should render $search'() { + when: + BsonDocument searchDoc = toBson( + search( + (SearchOperator) exists(fieldPath('fieldName')), + searchOptions() + ) + ) + + then: + searchDoc == parse('''{ + "$search": { + "exists": { "path": "fieldName" } + } + }''') + + when: + searchDoc = toBson( + search( + (SearchCollector) facet( + exists(fieldPath('fieldName')), + [stringFacet('stringFacetName', fieldPath('fieldName1'))]), + searchOptions() + .index('indexName') + .count(total()) + .highlight(paths( + fieldPath('fieldName1'), + fieldPath('fieldName2').multi('analyzerName'), + wildcardPath('field.name*'))) + ) + ) + + then: + searchDoc == parse('''{ + "$search": { + "facet": { + "operator": { "exists": { "path": "fieldName" } }, + "facets": { + "stringFacetName": { "type" : "string", "path": "fieldName1" } + } + }, + "index": "indexName", + "count": { "type": "total" }, + "highlight": { + "path": [ + "fieldName1", + { "value": "fieldName2", "multi": "analyzerName" }, + { "wildcard": "field.name*" } + ] + } + } + }''') + } + + def 'should render $search with no options'() { + when: + BsonDocument searchDoc = toBson( + search( + (SearchOperator) exists(fieldPath('fieldName')) + ) + ) + + then: + searchDoc == parse('''{ + "$search": { + "exists": { "path": "fieldName" } + } + }''') + + when: + searchDoc = toBson( + search( + (SearchCollector) facet( + exists(fieldPath('fieldName')), + [stringFacet('facetName', fieldPath('fieldName')).numBuckets(3)]) + ) + ) + + then: + searchDoc == parse('''{ + "$search": { + "facet": { + "operator": { "exists": { "path": "fieldName" } }, + "facets": { + "facetName": { "type": "string", "path": "fieldName", "numBuckets": 3 } + } + } + } + }''') + } + + def 'should render $searchMeta'() { + when: + BsonDocument searchDoc = toBson( + searchMeta( + (SearchOperator) exists(fieldPath('fieldName')), + searchOptions() + ) + ) + + then: + searchDoc == parse('''{ + "$searchMeta": { + "exists": { "path": "fieldName" } + } + }''') + + when: + searchDoc = toBson( + searchMeta( + (SearchCollector) facet( + exists(fieldPath('fieldName')), + [stringFacet('stringFacetName', fieldPath('fieldName1'))]), + searchOptions() + .index('indexName') + .count(total()) + .highlight(paths( + fieldPath('fieldName1'), + fieldPath('fieldName2').multi('analyzerName'), + wildcardPath('field.name*'))) + ) + ) + + then: + searchDoc == parse('''{ + "$searchMeta": { + "facet": { + "operator": { "exists": { "path": "fieldName" } }, + "facets": { + "stringFacetName": { "type" : "string", "path": "fieldName1" } + } + }, + "index": "indexName", + "count": { "type": "total" }, + "highlight": { + "path": [ + "fieldName1", + { "value": "fieldName2", "multi": "analyzerName" }, + { "wildcard": "field.name*" } + ] + } + } + }''') + } + + def 'should render $searchMeta with no options'() { + when: + BsonDocument searchDoc = toBson( + searchMeta( + (SearchOperator) exists(fieldPath('fieldName')) + ) + ) + + then: + searchDoc == parse('''{ + "$searchMeta": { + "exists": { "path": "fieldName" } + } + }''') + + when: + searchDoc = toBson( + searchMeta( + (SearchCollector) facet( + exists(fieldPath('fieldName')), + [stringFacet('facetName', fieldPath('fieldName')).numBuckets(3)]) + ) + ) + + then: + searchDoc == parse('''{ + "$searchMeta": { + "facet": { + "operator": { "exists": { "path": "fieldName" } }, + "facets": { + "facetName": { "type": "string", "path": "fieldName", "numBuckets": 3 } + } + } + } + }''') + } + + def 'should render approximate $vectorSearch'() { + when: + BsonDocument vectorSearchDoc = toBson( + vectorSearch( + fieldPath('fieldName').multi('ignored'), + vector, + 'indexName', + 1, + approximateVectorSearchOptions(2) + .filter(Filters.ne("fieldName", "fieldValue")) + + ) + ) + + then: + vectorSearchDoc == parse('''{ + "$vectorSearch": { + "path": "fieldName", + "queryVector": ''' + queryVector + ''', + "index": "indexName", + "numCandidates": {"$numberLong": "2"}, + "limit": {"$numberLong": "1"}, + "filter": {"fieldName": {"$ne": "fieldValue"}} + } + }''') + + where: + vector | queryVector + BinaryVector.int8Vector([127, 7] as byte[]) | '{"$binary": {"base64": "AwB/Bw==", "subType": "09"}}' + BinaryVector.floatVector([127.0f, 7.0f] as float[]) | '{"$binary": {"base64": "JwAAAP5CAADgQA==", "subType": "09"}}' + BinaryVector.packedBitVector([127, 7] as byte[], (byte) 0) | '{"$binary": {"base64": "EAB/Bw==", "subType": "09"}}' + [1.0d, 2.0d] | "[1.0, 2.0]" + } + + def 'should render exact $vectorSearch'() { + when: + BsonDocument vectorSearchDoc = toBson( + vectorSearch( + fieldPath('fieldName').multi('ignored'), + vector, + 'indexName', + 1, + exactVectorSearchOptions() + .filter(Filters.ne("fieldName", "fieldValue")) + + ) + ) + + then: + vectorSearchDoc == parse('''{ + "$vectorSearch": { + "path": "fieldName", + "queryVector": ''' + queryVector + ''', + "index": "indexName", + "exact": true, + "limit": {"$numberLong": "1"}, + "filter": {"fieldName": {"$ne": "fieldValue"}} + } + }''') + + where: + vector | queryVector + BinaryVector.int8Vector([127, 7] as byte[]) | '{"$binary": {"base64": "AwB/Bw==", "subType": "09"}}' + BinaryVector.floatVector([127.0f, 7.0f] as float[]) | '{"$binary": {"base64": "JwAAAP5CAADgQA==", "subType": "09"}}' + [1.0d, 2.0d] | "[1.0, 2.0]" + } + + def 'should create string representation for simple stages'() { + expect: + match(new BsonDocument('x', new BsonInt32(1))).toString() == 'Stage{name=\'$match\', value={"x": 1}}' + } + + def 'should create string representation for group stage'() { + expect: + group('_id', avg('avg', '$quantity')).toString() == + 'Stage{name=\'$group\', id=_id, ' + + 'fieldAccumulators=[' + + 'BsonField{name=\'avg\', value=Expression{name=\'$avg\', expression=$quantity}}]}' + group(null, avg('avg', '$quantity')).toString() == + 'Stage{name=\'$group\', id=null, ' + + 'fieldAccumulators=[' + + 'BsonField{name=\'avg\', value=Expression{name=\'$avg\', expression=$quantity}}]}' + } + + def 'should render $sample'() { + expect: + toBson(sample(5)) == parse('{ $sample : { size: 5} }') + } + + def 'should test equals for SimplePipelineStage'() { + expect: + match(eq('author', 'dave')).equals(match(eq('author', 'dave'))) + project(fields(include('title', 'author'), computed('lastName', '$author.last'))) + .equals(project(fields(include('title', 'author'), computed('lastName', '$author.last')))) + sort(ascending('title', 'author')).equals(sort(ascending('title', 'author'))) + !sort(ascending('title', 'author')).equals(sort(descending('title', 'author'))) + } + + def 'should test hashCode for SimplePipelineStage'() { + expect: + match(eq('author', 'dave')).hashCode() == match(eq('author', 'dave')).hashCode() + project(fields(include('title', 'author'), computed('lastName', '$author.last'))).hashCode() == + project(fields(include('title', 'author'), computed('lastName', '$author.last'))).hashCode() + sort(ascending('title', 'author')).hashCode() == sort(ascending('title', 'author')).hashCode() + sort(ascending('title', 'author')).hashCode() != sort(descending('title', 'author')).hashCode() + } + + def 'should test equals for BucketStage'() { + expect: + bucket('$screenSize', [0, 24, 32, 50, 100000]).equals(bucket('$screenSize', [0, 24, 32, 50, 100000])) + } + + def 'should test hashCode for BucketStage'() { + expect: + bucket('$screenSize', [0, 24, 32, 50, 100000]).hashCode() == bucket('$screenSize', [0, 24, 32, 50, 100000]).hashCode() + bucket('$screenSize', [0, 24, 32, 50, 100000]).hashCode() != bucket('$screenSize', [0, 24, 32, 50, 10000]).hashCode() + } + + def 'should test equals for BucketAutoStage'() { + expect: + bucketAuto('$price', 4).equals(bucketAuto('$price', 4)) + bucketAuto('$price', 4, new BucketAutoOptions() + .output(sum('count', 1), + avg('avgPrice', '$price'))) + .equals(bucketAuto('$price', 4, new BucketAutoOptions() + .output(sum('count', 1), + avg('avgPrice', '$price')))) + bucketAuto('$price', 4, new BucketAutoOptions() + .granularity(R5) + .output(sum('count', 1), + avg('avgPrice', '$price'))) + .equals(bucketAuto('$price', 4, new BucketAutoOptions() + .granularity(R5) + .output(sum('count', 1), + avg('avgPrice', '$price')))) + } + + def 'should test hashCode for BucketAutoStage'() { + expect: + bucketAuto('$price', 4).hashCode() == bucketAuto('$price', 4).hashCode() + bucketAuto('$price', 4, new BucketAutoOptions() + .output(sum('count', 1), + avg('avgPrice', '$price'))).hashCode() == + bucketAuto('$price', 4, new BucketAutoOptions() + .output(sum('count', 1), + avg('avgPrice', '$price'))).hashCode() + bucketAuto('$price', 4, new BucketAutoOptions() + .granularity(R5) + .output(sum('count', 1), + avg('avgPrice', '$price'))).hashCode() == + bucketAuto('$price', 4, new BucketAutoOptions() + .granularity(R5) + .output(sum('count', 1), + avg('avgPrice', '$price'))).hashCode() + } + + def 'should test equals for LookupStage'() { + expect: + lookup('from', 'localField', 'foreignField', 'as') + .equals(lookup('from', 'localField', 'foreignField', 'as')) + + List pipeline = asList(match(expr(new Document('$eq', asList('x', '1'))))) + lookup('from', asList(new Variable('var1', 'expression1')), pipeline, 'as') + .equals(lookup('from', asList(new Variable('var1', 'expression1')), pipeline, 'as')) + + lookup('from', pipeline, 'as').equals(lookup('from', pipeline, 'as')) + } + + def 'should test hashCode for LookupStage'() { + expect: + lookup('from', 'localField', 'foreignField', 'as').hashCode() == + lookup('from', 'localField', 'foreignField', 'as').hashCode() + + List pipeline = asList(match(expr(new Document('$eq', asList('x', '1'))))) + lookup('from', asList(new Variable('var1', 'expression1')), pipeline, 'as').hashCode() == + lookup('from', asList(new Variable('var1', 'expression1')), pipeline, 'as').hashCode() + + lookup('from', pipeline, 'as').hashCode() == lookup('from', pipeline, 'as').hashCode() + } + + def 'should test equals for GraphLookupStage'() { + expect: + graphLookup('contacts', '$friends', 'friends', 'name', 'socialNetwork') + .equals(graphLookup('contacts', '$friends', 'friends', 'name', 'socialNetwork')) + + graphLookup('contacts', '$friends', 'friends', 'name', 'socialNetwork', new GraphLookupOptions().maxDepth(1)) + .equals(graphLookup('contacts', '$friends', 'friends', 'name', 'socialNetwork', + new GraphLookupOptions().maxDepth(1))) + + graphLookup('contacts', '$friends', 'friends', 'name', 'socialNetwork', new GraphLookupOptions().depthField('depth')) + .equals(graphLookup('contacts', '$friends', 'friends', 'name', 'socialNetwork', + new GraphLookupOptions().depthField('depth'))) + + graphLookup('contacts', '$friends', 'friends', 'name', 'socialNetwork', new GraphLookupOptions() + .restrictSearchWithMatch(eq('hobbies', 'golf'))) + .equals(graphLookup('contacts', '$friends', 'friends', 'name', 'socialNetwork', new GraphLookupOptions() + .restrictSearchWithMatch(eq('hobbies', 'golf')))) + + graphLookup('contacts', '$friends', 'friends', 'name', 'socialNetwork', new GraphLookupOptions() + .maxDepth(1).depthField('depth')) + .equals(graphLookup('contacts', '$friends', 'friends', 'name', 'socialNetwork', new GraphLookupOptions() + .maxDepth(1).depthField('depth'))) + + graphLookup('contacts', '$friends', 'friends', 'name', 'socialNetwork', new GraphLookupOptions() + .maxDepth(1).depthField('depth').restrictSearchWithMatch(eq('hobbies', 'golf'))) + .equals(graphLookup('contacts', '$friends', 'friends', 'name', 'socialNetwork', new GraphLookupOptions() + .maxDepth(1).depthField('depth').restrictSearchWithMatch(eq('hobbies', 'golf')))) + } + + def 'should test hashCode for GraphLookupStage'() { + expect: + graphLookup('contacts', '$friends', 'friends', 'name', 'socialNetwork').hashCode() == + graphLookup('contacts', '$friends', 'friends', 'name', 'socialNetwork').hashCode() + + graphLookup('contacts', '$friends', 'friends', 'name', 'socialNetwork', new GraphLookupOptions().maxDepth(1)) + .hashCode() == + graphLookup('contacts', '$friends', 'friends', 'name', 'socialNetwork', + new GraphLookupOptions().maxDepth(1)).hashCode() + + graphLookup('contacts', '$friends', 'friends', 'name', 'socialNetwork', new GraphLookupOptions().depthField('depth')) + .hashCode() == + graphLookup('contacts', '$friends', 'friends', 'name', 'socialNetwork', + new GraphLookupOptions().depthField('depth')).hashCode() + + graphLookup('contacts', '$friends', 'friends', 'name', 'socialNetwork', new GraphLookupOptions() + .restrictSearchWithMatch(eq('hobbies', 'golf'))).hashCode() == + graphLookup('contacts', '$friends', 'friends', 'name', 'socialNetwork', new GraphLookupOptions() + .restrictSearchWithMatch(eq('hobbies', 'golf'))).hashCode() + + graphLookup('contacts', '$friends', 'friends', 'name', 'socialNetwork', new GraphLookupOptions() + .maxDepth(1).depthField('depth')).hashCode() == + graphLookup('contacts', '$friends', 'friends', 'name', 'socialNetwork', new GraphLookupOptions() + .maxDepth(1).depthField('depth')).hashCode() + + graphLookup('contacts', '$friends', 'friends', 'name', 'socialNetwork', new GraphLookupOptions() + .maxDepth(1).depthField('depth').restrictSearchWithMatch(eq('hobbies', 'golf'))).hashCode() == + graphLookup('contacts', '$friends', 'friends', 'name', 'socialNetwork', new GraphLookupOptions() + .maxDepth(1).depthField('depth').restrictSearchWithMatch(eq('hobbies', 'golf'))).hashCode() + } + + def 'should test equals for GroupStage'() { + expect: + group('$customerId').equals(group('$customerId')) + group(null).equals(group(null)) + + group(parse('{ month: { $month: "$date" }, day: { $dayOfMonth: "$date" }, year: { $year: "$date" } }')) + .equals(group(parse('{ month: { $month: "$date" }, day: { $dayOfMonth: "$date" }, year: { $year: "$date" } }'))) + + group(null, + sum('sum', parse('{ $multiply: [ "$price", "$quantity" ] }')), + avg('avg', '$quantity'), + min('min', '$quantity'), + minN('minN', '$quantity', + new Document('$cond', new Document('if', new Document('$eq', asList('$gid', true))) + .append('then', 2).append('else', 1))), + max('max', '$quantity'), + maxN('maxN', '$quantity', 2), + first('first', '$quantity'), + firstN('firstN', '$quantity', 2), + top('top', ascending('quantity'), '$quantity'), + topN('topN', ascending('quantity'), '$quantity', 2), + last('last', '$quantity'), + lastN('lastN', '$quantity', 2), + bottom('bottom', ascending('quantity'), ['$quantity', '$quality']), + bottomN('bottomN', ascending('quantity'), ['$quantity', '$quality'], + new Document('$cond', new Document('if', new Document('$eq', asList('$gid', true))) + .append('then', 2).append('else', 1))), + push('all', '$quantity'), + mergeObjects('merged', '$quantity'), + addToSet('unique', '$quantity'), + stdDevPop('stdDevPop', '$quantity'), + stdDevSamp('stdDevSamp', '$quantity') + ).equals(group(null, + sum('sum', parse('{ $multiply: [ "$price", "$quantity" ] }')), + avg('avg', '$quantity'), + min('min', '$quantity'), + minN('minN', '$quantity', + new Document('$cond', new Document('if', new Document('$eq', asList('$gid', true))) + .append('then', 2).append('else', 1))), + max('max', '$quantity'), + maxN('maxN', '$quantity', 2), + first('first', '$quantity'), + firstN('firstN', '$quantity', 2), + top('top', ascending('quantity'), '$quantity'), + topN('topN', ascending('quantity'), '$quantity', 2), + last('last', '$quantity'), + lastN('lastN', '$quantity', 2), + bottom('bottom', ascending('quantity'), ['$quantity', '$quality']), + bottomN('bottomN', ascending('quantity'), ['$quantity', '$quality'], + new Document('$cond', new Document('if', new Document('$eq', asList('$gid', true))) + .append('then', 2).append('else', 1))), + push('all', '$quantity'), + mergeObjects('merged', '$quantity'), + addToSet('unique', '$quantity'), + stdDevPop('stdDevPop', '$quantity'), + stdDevSamp('stdDevSamp', '$quantity') + )) + } + + def 'should test hashCode for GroupStage'() { + expect: + group('$customerId').hashCode() == group('$customerId').hashCode() + group(null).hashCode() == group(null).hashCode() + + group(parse('{ month: { $month: "$date" }, day: { $dayOfMonth: "$date" }, year: { $year: "$date" } }')).hashCode() == + group(parse('{ month: { $month: "$date" }, day: { $dayOfMonth: "$date" }, year: { $year: "$date" } }')).hashCode() + + group(null, + sum('sum', parse('{ $multiply: [ "$price", "$quantity" ] }')), + avg('avg', '$quantity'), + min('min', '$quantity'), + minN('minN', '$quantity', + new Document('$cond', new Document('if', new Document('$eq', asList('$gid', true))) + .append('then', 2).append('else', 1))), + max('max', '$quantity'), + maxN('maxN', '$quantity', 2), + first('first', '$quantity'), + firstN('firstN', '$quantity', 2), + top('top', ascending('quantity'), '$quantity'), + topN('topN', ascending('quantity'), '$quantity', 2), + last('last', '$quantity'), + lastN('lastN', '$quantity', 2), + bottom('bottom', ascending('quantity'), ['$quantity', '$quality']), + bottomN('bottomN', ascending('quantity'), ['$quantity', '$quality'], + new Document('$cond', new Document('if', new Document('$eq', asList('$gid', true))) + .append('then', 2).append('else', 1))), + push('all', '$quantity'), + mergeObjects('merged', '$quantity'), + addToSet('unique', '$quantity'), + stdDevPop('stdDevPop', '$quantity'), + stdDevSamp('stdDevSamp', '$quantity') + ).hashCode() == + group(null, + sum('sum', parse('{ $multiply: [ "$price", "$quantity" ] }')), + avg('avg', '$quantity'), + min('min', '$quantity'), + minN('minN', '$quantity', + new Document('$cond', new Document('if', new Document('$eq', asList('$gid', true))) + .append('then', 2).append('else', 1))), + max('max', '$quantity'), + maxN('maxN', '$quantity', 2), + first('first', '$quantity'), + firstN('firstN', '$quantity', 2), + top('top', ascending('quantity'), '$quantity'), + topN('topN', ascending('quantity'), '$quantity', 2), + last('last', '$quantity'), + lastN('lastN', '$quantity', 2), + bottom('bottom', ascending('quantity'), ['$quantity', '$quality']), + bottomN('bottomN', ascending('quantity'), ['$quantity', '$quality'], + new Document('$cond', new Document('if', new Document('$eq', asList('$gid', true))) + .append('then', 2).append('else', 1))), + push('all', '$quantity'), + mergeObjects('merged', '$quantity'), + addToSet('unique', '$quantity'), + stdDevPop('stdDevPop', '$quantity'), + stdDevSamp('stdDevSamp', '$quantity')).hashCode() + } + + def 'should test equals for SortByCountStage'() { + expect: + sortByCount('someField').equals(sortByCount('someField')) + sortByCount(new Document('$floor', '$x')).equals(sortByCount(new Document('$floor', '$x'))) + } + + def 'should test hashCode for SortByCountStage'() { + expect: + sortByCount('someField').hashCode() == sortByCount('someField').hashCode() + sortByCount(new Document('$floor', '$x')).hashCode() == sortByCount(new Document('$floor', '$x')).hashCode() + } + + def 'should test equals for FacetStage'() { + expect: + Aggregates.facet( + new Facet('Screen Sizes', + unwind('$attributes'), + match(eq('attributes.name', 'screen size')), + group(null, sum('count', 1 ))), + new Facet('Manufacturer', + match(eq('attributes.name', 'manufacturer')), + group('$attributes.value', sum('count', 1)), + sort(descending('count')), + limit(5))) + .equals(Aggregates.facet( + new Facet('Screen Sizes', + unwind('$attributes'), + match(eq('attributes.name', 'screen size')), + group(null, sum('count', 1 ))), + new Facet('Manufacturer', + match(eq('attributes.name', 'manufacturer')), + group('$attributes.value', sum('count', 1)), + sort(descending('count')), + limit(5)))) + } + + def 'should test hashCode for FacetStage'() { + expect: + Aggregates.facet( + new Facet('Screen Sizes', + unwind('$attributes'), + match(eq('attributes.name', 'screen size')), + group(null, sum('count', 1 ))), + new Facet('Manufacturer', + match(eq('attributes.name', 'manufacturer')), + group('$attributes.value', sum('count', 1)), + sort(descending('count')), + limit(5))).hashCode() == + Aggregates.facet( + new Facet('Screen Sizes', + unwind('$attributes'), + match(eq('attributes.name', 'screen size')), + group(null, sum('count', 1 ))), + new Facet('Manufacturer', + match(eq('attributes.name', 'manufacturer')), + group('$attributes.value', sum('count', 1)), + sort(descending('count')), + limit(5))).hashCode() + } + + def 'should test equals for AddFieldsStage'() { + expect: + addFields(new Field('newField', null)).equals(addFields(new Field('newField', null))) + addFields(new Field('newField', 'hello')).equals(addFields(new Field('newField', 'hello'))) + addFields(new Field('this', '$$CURRENT')).equals(addFields(new Field('this', '$$CURRENT'))) + addFields(new Field('myNewField', new Document('c', 3).append('d', 4))) + .equals(addFields(new Field('myNewField', new Document('c', 3).append('d', 4)))) + addFields(new Field('alt3', new Document('$lt', asList('$a', 3)))) + .equals(addFields(new Field('alt3', new Document('$lt', asList('$a', 3))))) + addFields(new Field('b', 3), new Field('c', 5)) + .equals(addFields(new Field('b', 3), new Field('c', 5))) + addFields(asList(new Field('b', 3), new Field('c', 5))) + .equals(addFields(asList(new Field('b', 3), new Field('c', 5)))) + } + + def 'should test hashCode for AddFieldsStage'() { + expect: + addFields(new Field('newField', null)).hashCode() == addFields(new Field('newField', null)).hashCode() + addFields(new Field('newField', 'hello')).hashCode() == addFields(new Field('newField', 'hello')).hashCode() + addFields(new Field('this', '$$CURRENT')).hashCode() == addFields(new Field('this', '$$CURRENT')).hashCode() + addFields(new Field('myNewField', new Document('c', 3).append('d', 4))).hashCode() == + addFields(new Field('myNewField', new Document('c', 3).append('d', 4))).hashCode() + addFields(new Field('alt3', new Document('$lt', asList('$a', 3)))).hashCode() == + addFields(new Field('alt3', new Document('$lt', asList('$a', 3)))).hashCode() + addFields(new Field('b', 3), new Field('c', 5)).hashCode() == + addFields(new Field('b', 3), new Field('c', 5)).hashCode() + addFields(asList(new Field('b', 3), new Field('c', 5))).hashCode() == + addFields(asList(new Field('b', 3), new Field('c', 5))).hashCode() + } + + @IgnoreIf({ serverVersionLessThan(4, 4) }) + def 'should test equals for accumulator operator'() { + given: + def initFunction = 'function() { return { count : 0, sum : 0 } }' + def initFunctionWithArgs = 'function(initCount, initSun) { return { count : parseInt(initCount), sum : parseInt(initSun) } }' + def initArgs = ['0', '0'] + def accumulateFunction = 'function(state, numCopies) { return { count : state.count + 1, sum : state.sum + numCopies } }' + def mergeFunction = 'function(state1, state2) { return { count : state1.count + state2.count, sum : state1.sum + state2.sum } }' + def finalizeFunction = 'function(state) { return (state.sum / state.count) }' + + expect: + accumulator('test', initFunction, accumulateFunction, mergeFunction) + .equals(accumulator('test', initFunction, null, accumulateFunction, null, mergeFunction, + null, 'js')) + accumulator('test', initFunction, accumulateFunction, mergeFunction, finalizeFunction) + .equals(accumulator('test', initFunction, null, accumulateFunction, null, mergeFunction, + finalizeFunction, 'js')) + accumulator('test', initFunction, accumulateFunction, mergeFunction, finalizeFunction, 'lang') + .equals(accumulator('test', initFunction, null, accumulateFunction, null, mergeFunction, + finalizeFunction, 'lang')) + accumulator('test', initFunctionWithArgs, initArgs, accumulateFunction, [ '$copies' ], mergeFunction, finalizeFunction) + .equals(accumulator('test', initFunctionWithArgs, initArgs, accumulateFunction, [ '$copies' ], mergeFunction, + finalizeFunction, 'js')) + } + + @IgnoreIf({ serverVersionLessThan(4, 4) }) + def 'should test hashCode for accumulator operator'() { + given: + def initFunction = 'function() { return { count : 0, sum : 0 } }' + def initFunctionWithArgs = 'function(initCount, initSun) { return { count : parseInt(initCount), sum : parseInt(initSun) } }' + def initArgs = ['0', '0'] + def accumulateFunction = 'function(state, numCopies) { return { count : state.count + 1, sum : state.sum + numCopies } }' + def mergeFunction = 'function(state1, state2) { return { count : state1.count + state2.count, sum : state1.sum + state2.sum } }' + def finalizeFunction = 'function(state) { return (state.sum / state.count) }' + + expect: + accumulator('test', initFunction, accumulateFunction, mergeFunction).hashCode() == + accumulator('test', initFunction, null, accumulateFunction, null, + mergeFunction, null, 'js').hashCode() + accumulator('test', initFunction, accumulateFunction, mergeFunction, finalizeFunction).hashCode() == + accumulator('test', initFunction, null, accumulateFunction, null, + mergeFunction, finalizeFunction, 'js').hashCode() + accumulator('test', initFunction, accumulateFunction, mergeFunction, finalizeFunction, 'lang').hashCode() == + accumulator('test', initFunction, null, accumulateFunction, null, mergeFunction, + finalizeFunction, 'lang').hashCode() + accumulator('test', initFunctionWithArgs, initArgs, accumulateFunction, [ '$copies' ], mergeFunction, + finalizeFunction).hashCode() == accumulator('test', initFunctionWithArgs, initArgs, accumulateFunction, + [ '$copies' ], mergeFunction, finalizeFunction, 'js').hashCode() + } + + def 'should test equals for ReplaceRootStage'() { + expect: + replaceRoot('$a1').equals(replaceRoot('$a1')) + replaceRoot('$a1.b').equals(replaceRoot('$a1.b')) + replaceRoot('$a1').equals(replaceRoot('$a1')) + } + + def 'should test hashCode for ReplaceRootStage'() { + expect: + replaceRoot('$a1').hashCode() == replaceRoot('$a1').hashCode() + replaceRoot('$a1.b').hashCode() == replaceRoot('$a1.b').hashCode() + replaceRoot('$a1').hashCode() == replaceRoot('$a1').hashCode() + } +} diff --git a/driver-core/src/test/unit/com/mongodb/client/model/BsonHelper.java b/driver-core/src/test/unit/com/mongodb/client/model/BsonHelper.java new file mode 100644 index 00000000000..402b12f4a28 --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/client/model/BsonHelper.java @@ -0,0 +1,41 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model; + +import org.bson.BsonDocument; +import org.bson.BsonDocumentWriter; +import org.bson.codecs.Codec; +import org.bson.codecs.EncoderContext; +import org.bson.codecs.configuration.CodecRegistry; +import org.bson.conversions.Bson; + +import static com.mongodb.MongoClientSettings.getDefaultCodecRegistry; + +final class BsonHelper { + private static final CodecRegistry CODEC_REGISTRY = getDefaultCodecRegistry(); + + @SuppressWarnings("unchecked") + static BsonDocument toBson(final Bson bson) { + BsonDocumentWriter writer = new BsonDocumentWriter(new BsonDocument()); + ((Codec) CODEC_REGISTRY.get(bson.getClass())).encode(writer, bson, EncoderContext.builder().build()); + return writer.getDocument(); + } + + private BsonHelper() { + } + +} diff --git a/driver-core/src/test/unit/com/mongodb/client/model/BucketAutoOptionsSpecification.groovy b/driver-core/src/test/unit/com/mongodb/client/model/BucketAutoOptionsSpecification.groovy new file mode 100644 index 00000000000..08b5f69ee07 --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/client/model/BucketAutoOptionsSpecification.groovy @@ -0,0 +1,46 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model + +import spock.lang.Specification + +import static com.mongodb.client.model.Accumulators.sum + +class BucketAutoOptionsSpecification extends Specification { + def 'defaults should be null'() { + when: + def options = new BucketAutoOptions() + + then: + options.granularity == null + options.output == null + } + + def "should return new options with the same property values"() { + when: + def options = new BucketAutoOptions() + .granularity(BucketGranularity.E96) + .output(sum('count', 1)) + + then: + def sum = sum('count', 1) + options.granularity == BucketGranularity.E96 + options.output.size() == 1 + options.output[0].name == sum.name + } + +} diff --git a/driver-core/src/test/unit/com/mongodb/client/model/BucketGranularitySpecification.groovy b/driver-core/src/test/unit/com/mongodb/client/model/BucketGranularitySpecification.groovy new file mode 100644 index 00000000000..a9f8f4bbade --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/client/model/BucketGranularitySpecification.groovy @@ -0,0 +1,42 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model + +import spock.lang.Specification + +class BucketGranularitySpecification extends Specification { + def 'should return the expected string value'() { + expect: + granularity.getValue() == expectedString + + where: + granularity | expectedString + BucketGranularity.R5 | 'R5' + BucketGranularity.R10 | 'R10' + BucketGranularity.R20 | 'R20' + BucketGranularity.R40 | 'R40' + BucketGranularity.R80 | 'R80' + BucketGranularity.SERIES_125 | '1-2-5' + BucketGranularity.E6 | 'E6' + BucketGranularity.E12 | 'E12' + BucketGranularity.E24 | 'E24' + BucketGranularity.E48 | 'E48' + BucketGranularity.E96 | 'E96' + BucketGranularity.E192 | 'E192' + BucketGranularity.POWERSOF2 | 'POWERSOF2' + } +} diff --git a/driver-core/src/test/unit/com/mongodb/client/model/BucketOptionsSpecification.groovy b/driver-core/src/test/unit/com/mongodb/client/model/BucketOptionsSpecification.groovy new file mode 100644 index 00000000000..49d500d5135 --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/client/model/BucketOptionsSpecification.groovy @@ -0,0 +1,46 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model + +import spock.lang.Specification + +import static com.mongodb.client.model.Accumulators.sum + +class BucketOptionsSpecification extends Specification { + def 'defaults should be null'() { + when: + def options = new BucketOptions() + + then: + options.defaultBucket == null + options.output == null + } + + def "should return new options with the same property values"() { + when: + def options = new BucketOptions() + .defaultBucket('other') + .output(sum('count', 1)) + + then: + def sum = sum('count', 1) + options.defaultBucket == 'other' + options.output.size() == 1 + options.output[0].name == sum.name + } + +} diff --git a/driver-core/src/test/unit/com/mongodb/client/model/BulkWriteOptionsSpecification.groovy b/driver-core/src/test/unit/com/mongodb/client/model/BulkWriteOptionsSpecification.groovy new file mode 100644 index 00000000000..bb74c67e51b --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/client/model/BulkWriteOptionsSpecification.groovy @@ -0,0 +1,35 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model + +import spock.lang.Specification + + +class BulkWriteOptionsSpecification extends Specification { + def 'should default to ordered'() { + expect: + new BulkWriteOptions().ordered + } + + def 'should set ordered'() { + expect: + new BulkWriteOptions().ordered(ordered).ordered == ordered + + where: + ordered << [true, false] + } +} diff --git a/driver-core/src/test/unit/com/mongodb/client/model/CollationAlternateSpecification.groovy b/driver-core/src/test/unit/com/mongodb/client/model/CollationAlternateSpecification.groovy new file mode 100644 index 00000000000..a57a7003104 --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/client/model/CollationAlternateSpecification.groovy @@ -0,0 +1,53 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model + +import spock.lang.Specification + +class CollationAlternateSpecification extends Specification { + + def 'should return the expected string value'() { + expect: + collationAlternate.getValue() == expectedString + + where: + collationAlternate | expectedString + CollationAlternate.SHIFTED | 'shifted' + CollationAlternate.NON_IGNORABLE | 'non-ignorable' + } + + def 'should support valid string representations'() { + expect: + CollationAlternate.fromString(stringValue) == collationAlternate + + where: + collationAlternate | stringValue + CollationAlternate.SHIFTED | 'shifted' + CollationAlternate.NON_IGNORABLE | 'non-ignorable' + } + + def 'should throw an illegal Argument exception for invalid values'() { + when: + CollationAlternate.fromString(stringValue) + + then: + thrown(IllegalArgumentException) + + where: + stringValue << [null, 'info'] + } +} diff --git a/driver-core/src/test/unit/com/mongodb/client/model/CollationCaseFirstSpecification.groovy b/driver-core/src/test/unit/com/mongodb/client/model/CollationCaseFirstSpecification.groovy new file mode 100644 index 00000000000..126bf5d52a8 --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/client/model/CollationCaseFirstSpecification.groovy @@ -0,0 +1,55 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model + +import spock.lang.Specification + +class CollationCaseFirstSpecification extends Specification { + + def 'should return the expected string value'() { + expect: + collationCaseFirst.getValue() == expectedString + + where: + collationCaseFirst | expectedString + CollationCaseFirst.LOWER | 'lower' + CollationCaseFirst.OFF | 'off' + CollationCaseFirst.UPPER | 'upper' + } + + def 'should support valid string representations'() { + expect: + CollationCaseFirst.fromString(stringValue) == collationCaseFirst + + where: + collationCaseFirst | stringValue + CollationCaseFirst.LOWER | 'lower' + CollationCaseFirst.OFF | 'off' + CollationCaseFirst.UPPER | 'upper' + } + + def 'should throw an illegal Argument exception for invalid values'() { + when: + CollationCaseFirst.fromString(stringValue) + + then: + thrown(IllegalArgumentException) + + where: + stringValue << [null, 'info'] + } +} diff --git a/driver-core/src/test/unit/com/mongodb/client/model/CollationMaxVariableSpecification.groovy b/driver-core/src/test/unit/com/mongodb/client/model/CollationMaxVariableSpecification.groovy new file mode 100644 index 00000000000..fa3624b8e5f --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/client/model/CollationMaxVariableSpecification.groovy @@ -0,0 +1,53 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model + +import spock.lang.Specification + +class CollationMaxVariableSpecification extends Specification { + + def 'should return the expected string value'() { + expect: + collationMaxVariable.getValue() == expectedString + + where: + collationMaxVariable | expectedString + CollationMaxVariable.PUNCT | 'punct' + CollationMaxVariable.SPACE | 'space' + } + + def 'should support valid string representations'() { + expect: + CollationMaxVariable.fromString(stringValue) == collationMaxVariable + + where: + collationMaxVariable | stringValue + CollationMaxVariable.PUNCT | 'punct' + CollationMaxVariable.SPACE | 'space' + } + + def 'should throw an illegal Argument exception for invalid values'() { + when: + CollationMaxVariable.fromString(stringValue) + + then: + thrown(IllegalArgumentException) + + where: + stringValue << [null, 'info'] + } +} diff --git a/driver-core/src/test/unit/com/mongodb/client/model/CollationSpecification.groovy b/driver-core/src/test/unit/com/mongodb/client/model/CollationSpecification.groovy new file mode 100644 index 00000000000..aa8758a70d0 --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/client/model/CollationSpecification.groovy @@ -0,0 +1,90 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model + +import org.bson.BsonDocument +import spock.lang.Specification + +class CollationSpecification extends Specification { + + def 'should have null values as default'() { + when: + def options = Collation.builder().build() + + then: + options.getAlternate() == null + options.getBackwards() == null + options.getCaseFirst() == null + options.getCaseLevel() == null + options.getLocale() == null + options.getMaxVariable() == null + options.getNormalization() == null + options.getNumericOrdering() == null + options.getStrength() == null + } + + def 'should have the set values as passed to the builder'() { + when: + def options = Collation.builder() + .locale('en') + .caseLevel(true) + .collationCaseFirst(CollationCaseFirst.OFF) + .collationStrength(CollationStrength.IDENTICAL) + .numericOrdering(true) + .collationAlternate(CollationAlternate.SHIFTED) + .collationMaxVariable(CollationMaxVariable.SPACE) + .backwards(true) + .normalization(true) + .build() + + then: + options.getAlternate() == CollationAlternate.SHIFTED + options.getBackwards() == true + options.getCaseFirst() == CollationCaseFirst.OFF + options.getCaseLevel() == true + options.getLocale() == 'en' + options.getMaxVariable() == CollationMaxVariable.SPACE + options.getNormalization() == true + options.getNumericOrdering() == true + options.getStrength() == CollationStrength.IDENTICAL + } + + def 'should create the expected BsonDocument'() { + expect: + collation.asDocument() == BsonDocument.parse(json) + + where: + collation | json + Collation.builder().build() | '{}' + Collation.builder().locale('en').build() | '{locale: "en"}' + Collation.builder() + .locale('en') + .caseLevel(true) + .collationCaseFirst(CollationCaseFirst.OFF) + .collationStrength(CollationStrength.IDENTICAL) + .numericOrdering(true) + .collationAlternate(CollationAlternate.SHIFTED) + .collationMaxVariable(CollationMaxVariable.SPACE) + .normalization(true) + .backwards(true) + .build() | '''{locale: "en", caseLevel: true, caseFirst: "off", strength: 5, + numericOrdering: true, alternate: "shifted", + maxVariable: "space", normalization: true, backwards: true}''' + } + + +} diff --git a/driver-core/src/test/unit/com/mongodb/client/model/CollationStrengthSpecification.groovy b/driver-core/src/test/unit/com/mongodb/client/model/CollationStrengthSpecification.groovy new file mode 100644 index 00000000000..d502bf0d45a --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/client/model/CollationStrengthSpecification.groovy @@ -0,0 +1,59 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model + +import spock.lang.Specification + +class CollationStrengthSpecification extends Specification { + + def 'should return the expected string value'() { + expect: + collationStrength.getIntRepresentation() == expectedInt + + where: + collationStrength | expectedInt + CollationStrength.PRIMARY | 1 + CollationStrength.SECONDARY | 2 + CollationStrength.TERTIARY | 3 + CollationStrength.QUATERNARY | 4 + CollationStrength.IDENTICAL | 5 + } + + def 'should support valid string representations'() { + expect: + CollationStrength.fromInt(intValue) == collationStrength + + where: + collationStrength | intValue + CollationStrength.PRIMARY | 1 + CollationStrength.SECONDARY | 2 + CollationStrength.TERTIARY | 3 + CollationStrength.QUATERNARY | 4 + CollationStrength.IDENTICAL | 5 + } + + def 'should throw an illegal Argument exception for invalid values'() { + when: + CollationStrength.fromInt(intValue) + + then: + thrown(IllegalArgumentException) + + where: + intValue << [0, 6] + } +} diff --git a/driver-core/src/test/unit/com/mongodb/client/model/CountOptionsSpecification.groovy b/driver-core/src/test/unit/com/mongodb/client/model/CountOptionsSpecification.groovy new file mode 100644 index 00000000000..367e2841f92 --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/client/model/CountOptionsSpecification.groovy @@ -0,0 +1,100 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model + +import org.bson.BsonDocument +import org.bson.Document +import spock.lang.Specification + +import static java.util.concurrent.TimeUnit.MILLISECONDS +import static java.util.concurrent.TimeUnit.SECONDS + +class CountOptionsSpecification extends Specification { + + def 'should have the expected defaults'() { + when: + def options = new CountOptions() + + then: + options.getCollation() == null + options.getHint() == null + options.getHintString() == null + options.getLimit() == 0 + options.getMaxTime(MILLISECONDS) == 0 + options.getSkip() == 0 + } + + def 'should set collation'() { + expect: + new CountOptions().collation(collation).getCollation() == collation + + where: + collation << [null, Collation.builder().locale('en').build()] + } + + def 'should set hint'() { + expect: + new CountOptions().hint(hint).getHint() == hint + + where: + hint << [null, new BsonDocument(), new Document('a', 1)] + } + + def 'should set hintString'() { + expect: + new CountOptions().hintString(hintString).getHintString() == hintString + + where: + hintString << [null, 'a_1'] + } + + def 'should set limit'() { + expect: + new CountOptions().limit(limit).getLimit() == limit + + where: + limit << [-1, 0, 1] + } + + def 'should set skip'() { + expect: + new CountOptions().skip(skip).getSkip() == skip + + where: + skip << [-1, 0, 1] + } + + def 'should convert maxTime'() { + when: + def options = new CountOptions() + + then: + options.getMaxTime(SECONDS) == 0 + + when: + options.maxTime(100, MILLISECONDS) + + then: + options.getMaxTime(MILLISECONDS) == 100 + + when: + options.maxTime(1004, MILLISECONDS) + + then: + options.getMaxTime(SECONDS) == 1 + } +} \ No newline at end of file diff --git a/driver-core/src/test/unit/com/mongodb/client/model/CreateCollectionOptionsSpecification.groovy b/driver-core/src/test/unit/com/mongodb/client/model/CreateCollectionOptionsSpecification.groovy new file mode 100644 index 00000000000..d8c97a73c15 --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/client/model/CreateCollectionOptionsSpecification.groovy @@ -0,0 +1,93 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model + +import org.bson.BsonDocument +import spock.lang.Specification + +class CreateCollectionOptionsSpecification extends Specification { + + def 'should have the expected defaults'() { + when: + def options = new CreateCollectionOptions() + + then: + options.getCollation() == null + options.getIndexOptionDefaults().getStorageEngine() == null + options.getMaxDocuments() == 0 + options.getSizeInBytes() == 0 + options.getStorageEngineOptions() == null + options.getValidationOptions().getValidator() == null + !options.isCapped() + } + + def 'should set collation'() { + expect: + new CreateCollectionOptions().collation(collation).getCollation() == collation + + where: + collation << [null, Collation.builder().locale('en').build()] + } + + def 'should set indexOptionDefaults'() { + expect: + new CreateCollectionOptions().indexOptionDefaults(indexOptionDefaults).getIndexOptionDefaults() == indexOptionDefaults + + where: + indexOptionDefaults << [new IndexOptionDefaults().storageEngine(BsonDocument.parse('{ storageEngine: { mmapv1 : {} }}'))] + } + + def 'should set maxDocuments'() { + expect: + new CreateCollectionOptions().maxDocuments(maxDocuments).getMaxDocuments() == maxDocuments + + where: + maxDocuments << [-1, 0, 1] + } + + def 'should set sizeInBytes'() { + expect: + new CreateCollectionOptions().sizeInBytes(sizeInBytes).getSizeInBytes() == sizeInBytes + + where: + sizeInBytes << [-1, 0, 1] + } + + def 'should set storageEngineOptions'() { + expect: + new CreateCollectionOptions().storageEngineOptions(storageEngineOptions).getStorageEngineOptions() == storageEngineOptions + + where: + storageEngineOptions << [null, BsonDocument.parse('{ mmapv1 : {} }')] + } + + def 'should set validationOptions'() { + expect: + new CreateCollectionOptions().validationOptions(validationOptions).getValidationOptions() == validationOptions + + where: + validationOptions << [new ValidationOptions(), new ValidationOptions().validationAction(ValidationAction.ERROR)] + } + + def 'should set capped'() { + expect: + new CreateCollectionOptions().capped(capped).isCapped() == capped + + where: + capped << [true, false] + } +} diff --git a/driver-core/src/test/unit/com/mongodb/client/model/DeleteOptionsSpecification.groovy b/driver-core/src/test/unit/com/mongodb/client/model/DeleteOptionsSpecification.groovy new file mode 100644 index 00000000000..10fea8942ad --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/client/model/DeleteOptionsSpecification.groovy @@ -0,0 +1,58 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model + +import org.bson.BsonDocument +import org.bson.Document +import spock.lang.Specification + +class DeleteOptionsSpecification extends Specification { + + def 'should have the expected defaults'() { + when: + def options = new DeleteOptions() + + then: + options.getCollation() == null + options.getHint() == null + options.getHintString() == null + } + + def 'should set collation'() { + expect: + new DeleteOptions().collation(collation).getCollation() == collation + + where: + collation << [null, Collation.builder().locale('en').build()] + } + + def 'should set hint'() { + expect: + new DeleteOptions().hint(hint).getHint() == hint + + where: + hint << [null, new BsonDocument(), new Document('a', 1)] + } + + def 'should set hintString'() { + expect: + new DeleteOptions().hintString(hintString).getHintString() == hintString + + where: + hintString << [null, 'a_1'] + } +} diff --git a/driver-core/src/test/unit/com/mongodb/client/model/FiltersSpecification.groovy b/driver-core/src/test/unit/com/mongodb/client/model/FiltersSpecification.groovy new file mode 100644 index 00000000000..9c9a4bc8748 --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/client/model/FiltersSpecification.groovy @@ -0,0 +1,868 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model + +import com.mongodb.client.model.geojson.Point +import com.mongodb.client.model.geojson.Polygon +import com.mongodb.client.model.geojson.Position +import org.bson.BsonArray +import org.bson.BsonDocument +import org.bson.BsonInt32 +import org.bson.BsonInt64 +import org.bson.BsonString +import org.bson.BsonType +import org.bson.Document +import spock.lang.Specification + +import java.util.regex.Pattern + +import static Filters.and +import static Filters.exists +import static Filters.or +import static com.mongodb.client.model.BsonHelper.toBson +import static com.mongodb.client.model.Filters.all +import static com.mongodb.client.model.Filters.bitsAllClear +import static com.mongodb.client.model.Filters.bitsAllSet +import static com.mongodb.client.model.Filters.bitsAnyClear +import static com.mongodb.client.model.Filters.bitsAnySet +import static com.mongodb.client.model.Filters.elemMatch +import static com.mongodb.client.model.Filters.empty +import static com.mongodb.client.model.Filters.eq +import static com.mongodb.client.model.Filters.expr +import static com.mongodb.client.model.Filters.geoIntersects +import static com.mongodb.client.model.Filters.geoWithin +import static com.mongodb.client.model.Filters.geoWithinBox +import static com.mongodb.client.model.Filters.geoWithinCenter +import static com.mongodb.client.model.Filters.geoWithinCenterSphere +import static com.mongodb.client.model.Filters.geoWithinPolygon +import static com.mongodb.client.model.Filters.gt +import static com.mongodb.client.model.Filters.gte +import static com.mongodb.client.model.Filters.jsonSchema +import static com.mongodb.client.model.Filters.lt +import static com.mongodb.client.model.Filters.lte +import static com.mongodb.client.model.Filters.mod +import static com.mongodb.client.model.Filters.ne +import static com.mongodb.client.model.Filters.near +import static com.mongodb.client.model.Filters.nearSphere +import static com.mongodb.client.model.Filters.nin +import static com.mongodb.client.model.Filters.nor +import static com.mongodb.client.model.Filters.not +import static com.mongodb.client.model.Filters.regex +import static com.mongodb.client.model.Filters.size +import static com.mongodb.client.model.Filters.text +import static com.mongodb.client.model.Filters.type +import static com.mongodb.client.model.Filters.where +import static org.bson.BsonDocument.parse + +class FiltersSpecification extends Specification { + + def 'eq should render without $eq'() { + expect: + toBson(eq('x', 1)) == parse('{x : 1}') + toBson(eq('x', null)) == parse('{x : null}') + toBson(eq(1)) == parse('{_id : 1}') + } + + def 'should render $ne'() { + expect: + toBson(ne('x', 1)) == parse('{x : {$ne : 1} }') + toBson(ne('x', null)) == parse('{x : {$ne : null} }') + } + + def 'should render $not'() { + expect: + toBson(not(eq('x', 1))) == parse('{x : {$not: {$eq: 1}}}') + toBson(not(gt('x', 1))) == parse('{x : {$not: {$gt: 1}}}') + toBson(not(regex('x', '^p.*'))) == parse('{x : {$not: /^p.*/}}') + + toBson(not(and(gt('x', 1), eq('y', 20)))) == parse('{$not: {$and: [{x: {$gt: 1}}, {y: 20}]}}') + toBson(not(and(eq('x', 1), eq('x', 2)))) == parse('{$not: {$and: [{x: 1}, {x: 2}]}}') + toBson(not(and(Filters.in('x', 1, 2), eq('x', 3)))) == parse('{$not: {$and: [{x: {$in: [1, 2]}}, {x: 3}]}}') + + toBson(not(or(gt('x', 1), eq('y', 20)))) == parse('{$not: {$or: [{x: {$gt: 1}}, {y: 20}]}}') + toBson(not(or(eq('x', 1), eq('x', 2)))) == parse('{$not: {$or: [{x: 1}, {x: 2}]}}') + toBson(not(or(Filters.in('x', 1, 2), eq('x', 3)))) == parse('{$not: {$or: [{x: {$in: [1, 2]}}, {x: 3}]}}') + + toBson(not(parse('{$in: [1]}'))) == parse('{$not: {$in: [1]}}') + + toBson(not(eq('x', parse('{a: 1, b: 1}')))) == parse('{x: {$not: {$eq: {"a": 1, "b": 1}}}}') + toBson(not(eq('x', parse('{$ref: "1", $id: "1"}')))) == parse('{x: {$not: {$eq: {"$ref": "1", "$id": "1"}}}}') + toBson(not(eq('x', parse('{$ref: "1", $id: "1", $db: "db"}')))) == parse('{x: {$not: {$eq: {"$ref": "1", "$id": "1", $db: "db"}}}}') + } + + def 'should render $nor'() { + expect: + toBson(nor(eq('price', 1))) == parse('{$nor : [{price: 1}]}') + toBson(nor(eq('price', 1), eq('sale', true))) == parse('{$nor : [{price: 1}, {sale: true}]}') + } + + def 'should render $gt'() { + expect: + toBson(gt('x', 1)) == parse('{x : {$gt : 1} }') + } + + def 'should render $lt'() { + expect: + toBson(lt('x', 1)) == parse('{x : {$lt : 1} }') + } + + def 'should render $gte'() { + expect: + toBson(gte('x', 1)) == parse('{x : {$gte : 1} }') + } + + def 'should render $lte'() { + expect: + toBson(lte('x', 1)) == parse('{x : {$lte : 1} }') + } + + def 'should render $exists'() { + expect: + toBson(exists('x')) == parse('{x : {$exists : true} }') + toBson(exists('x', false)) == parse('{x : {$exists : false} }') + } + + def 'or should render empty or using $or'() { + expect: + toBson(or([])) == parse('{$or : []}') + toBson(or()) == parse('{$or : []}') + } + + def 'should render $or'() { + expect: + toBson(or([eq('x', 1), eq('y', 2)])) == parse('{$or : [{x : 1}, {y : 2}]}') + toBson(or(eq('x', 1), eq('y', 2))) == parse('{$or : [{x : 1}, {y : 2}]}') + } + + def 'and should render empty and using $and'() { + expect: + toBson(and([])) == parse('{$and : []}') + toBson(and()) == parse('{$and : []}') + } + + def 'and should render using $and'() { + expect: + toBson(and([eq('x', 1), eq('y', 2)])) == parse('{$and: [{x : 1}, {y : 2}]}') + toBson(and(eq('x', 1), eq('y', 2))) == parse('{$and: [{x : 1}, {y : 2}]}') + } + + def 'and should render $and with clashing keys'() { + expect: + toBson(and([eq('a', 1), eq('a', 2)])) == parse('{$and: [{a: 1}, {a: 2}]}') + } + + def 'and should not flatten nested'() { + expect: + toBson(and([and([eq('a', 1), eq('b', 2)]), eq('c', 3)])) == + parse('{$and: [{$and: [{a : 1}, {b : 2}]}, {c : 3}]}') + toBson(and([and([eq('a', 1), eq('a', 2)]), eq('c', 3)])) == + parse('{$and:[{$and: [{a : 1}, {a : 2}]}, {c : 3}]} }') + toBson(and([lt('a', 1), lt('b', 2)])) == + parse('{$and: [{a : {$lt : 1}}, {b : {$lt : 2}}]}') + toBson(and([lt('a', 1), lt('a', 2)])) == + parse('{$and : [{a : {$lt : 1}}, {a : {$lt : 2}}]}') + } + + def '$and should be explicit when using $not'() { + expect: + toBson(and(lt('item', 10), not(lt('item', 5)))) == + parse('''{ + $and: + [ + { item: { $lt: 10 } }, + { item: { $not: { $lt: 5 } } } + ] + } + ''') + + toBson(and(lt('item', 100), gt('item', 10), not(gt('item', 50)))) == + parse('''{ + $and: + [ + { item: { $lt: 100 } }, + { item: { $gt: 10 } }, + { item: { $not: { $gt: 50 } } } + ] + } + ''') + + toBson(and(not(lt('item', 10)), lt('item', 100), not(gt('item', 50)))) == + parse('''{ + $and: + [ + { item: { $not: { $lt: 10 } } }, + { item: { $lt: 100 } }, + { item: { $not: { $gt: 50 } } } + ] + } + ''') + } + + def 'should render $all'() { + expect: + toBson(all('a', [1, 2, 3])) == parse('{a : {$all : [1, 2, 3]} }') + toBson(all('a', 1, 2, 3)) == parse('{a : {$all : [1, 2, 3]} }') + } + + def 'should render $elemMatch'() { + expect: + toBson(elemMatch('results', new BsonDocument('$gte', new BsonInt32(80)).append('$lt', new BsonInt32(85)))) == + parse('{results : {$elemMatch : {$gte: 80, $lt: 85}}}') + + toBson(elemMatch('results', and(eq('product', 'xyz'), gt('score', 8)))) == + parse('{ results : {$elemMatch : {$and: [{product : "xyz"}, {score : {$gt : 8}}]}}}') + } + + def 'should render $in'() { + expect: + toBson(Filters.in('a', [1, 2, 3])) == parse('{a : {$in : [1, 2, 3]} }') + toBson(Filters.in('a', 1, 2, 3)) == parse('{a : {$in : [1, 2, 3]} }') + } + + def 'should render $nin'() { + expect: + toBson(nin('a', [1, 2, 3])) == parse('{a : {$nin : [1, 2, 3]} }') + toBson(nin('a', 1, 2, 3)) == parse('{a : {$nin : [1, 2, 3]} }') + } + + def 'should render $mod'() { + expect: + toBson(mod('a', 100, 7)) == new BsonDocument('a', new BsonDocument('$mod', new BsonArray([new BsonInt64(100), new BsonInt64(7)]))) + } + + def 'should render $size'() { + expect: + toBson(size('a', 13)) == parse('{a : {$size : 13} }') + } + + def 'should render $bitsAllClear'() { + expect: + toBson(bitsAllClear('a', 13)) == parse('{a : {$bitsAllClear : { "$numberLong" : "13" }} }') + } + + def 'should render $bitsAllSet'() { + expect: + toBson(bitsAllSet('a', 13)) == parse('{a : {$bitsAllSet : { "$numberLong" : "13" }} }') + } + + def 'should render $bitsAnyClear'() { + expect: + toBson(bitsAnyClear('a', 13)) == parse('{a : {$bitsAnyClear : { "$numberLong" : "13" }} }') + } + + def 'should render $bitsAnySet'() { + expect: + toBson(bitsAnySet('a', 13)) == parse('{a : {$bitsAnySet : { "$numberLong" : "13" }} }') + } + + def 'should render $type'() { + expect: + toBson(type('a', BsonType.ARRAY)) == parse('{a : {$type : 4} }') + toBson(type('a', 'number')) == parse('{a : {$type : "number"} }') + } + + def 'should render $text'() { + expect: + toBson(text('mongoDB for GIANT ideas')) == parse('{$text: {$search: "mongoDB for GIANT ideas"} }') + toBson(text('mongoDB for GIANT ideas', new TextSearchOptions().language('english'))) == parse(''' + {$text : {$search : "mongoDB for GIANT ideas", $language : "english"} }''' + ) + toBson(text('mongoDB for GIANT ideas', new TextSearchOptions().caseSensitive(true))) == parse(''' + {$text : {$search : "mongoDB for GIANT ideas", $caseSensitive : true} }''' + ) + toBson(text('mongoDB for GIANT ideas', new TextSearchOptions().diacriticSensitive(false))) == parse(''' + {$text : {$search : "mongoDB for GIANT ideas", $diacriticSensitive : false} }''' + ) + toBson(text('mongoDB for GIANT ideas', new TextSearchOptions().language('english').caseSensitive(false) + .diacriticSensitive(true))) == parse(''' + {$text : {$search : "mongoDB for GIANT ideas", $language : "english", $caseSensitive : false, $diacriticSensitive : true} }''' + ) + } + + def 'should render $regex'() { + expect: + toBson(regex('name', 'acme.*corp')) == parse('{name : {$regex : "acme.*corp", $options : ""}}') + toBson(regex('name', 'acme.*corp', 'si')) == parse('{name : {$regex : "acme.*corp", $options : "si"}}') + toBson(regex('name', Pattern.compile('acme.*corp'))) == parse('{name : {$regex : "acme.*corp", $options : ""}}') + } + + def 'should render $where'() { + expect: + toBson(where('this.credits == this.debits')) == parse('{$where: "this.credits == this.debits"}') + } + + def 'should render $expr'() { + expect: + toBson(expr(new BsonDocument('$gt', new BsonArray([new BsonString('$spent'), new BsonString('$budget')])))) == + parse('{$expr: { $gt: [ "$spent" , "$budget" ] } }') + } + + def 'should render $geoWithin'() { + given: + def polygon = new Polygon([new Position([40.0d, 18.0d]), + new Position([40.0d, 19.0d]), + new Position([41.0d, 19.0d]), + new Position([40.0d, 18.0d])]) + expect: + toBson(geoWithin('loc', polygon)) == parse('''{ + loc: { + $geoWithin: { + $geometry: { + type: "Polygon", + coordinates: [ + [ + [40.0, 18.0], [40.0, 19.0], [41.0, 19.0], [40.0, 18.0] + ] + ] + } + } + } + }''') + + toBson(geoWithin('loc', parse(polygon.toJson()))) == parse('''{ + loc: { + $geoWithin: { + $geometry: { + type: "Polygon", + coordinates: [ + [ + [40.0, 18.0], [40.0, 19.0], [41.0, 19.0], + [40.0, 18.0] + ] + ] + } + } + } + }''') + } + + def 'should render $geoWithin with $box'() { + expect: + toBson(geoWithinBox('loc', 1d, 2d, 3d, 4d)) == parse('''{ + loc: { + $geoWithin: { + $box: [ + [ 1.0, 2.0 ], [ 3.0, 4.0 ] + ] + } + } + }''') + } + + def 'should render $geoWithin with $polygon'() { + expect: + toBson(geoWithinPolygon('loc', [[0d, 0d], [3d, 6d], [6d, 0d]])) == parse('''{ + loc: { + $geoWithin: { + $polygon: [ + [ 0.0, 0.0 ], [ 3.0, 6.0 ], + [ 6.0, 0.0 ] + ] + } + } + }''') + } + + def 'should render $geoWithin with $center'() { + expect: + toBson(geoWithinCenter('loc', -74d, 40.74d, 10d)) == parse('{ loc: { $geoWithin: { $center: [ [-74.0, 40.74], 10.0 ] } } }') + } + + def 'should render $geoWithin with $centerSphere'() { + expect: + toBson(geoWithinCenterSphere('loc', -74d, 40.74d, 10d)) == parse('''{ + loc: { + $geoWithin: { + $centerSphere: [ + [-74.0, 40.74], 10.0 + ] + } + } + }''') + } + + def 'should render $geoIntersects'() { + given: + def polygon = new Polygon([new Position([40.0d, 18.0d]), + new Position([40.0d, 19.0d]), + new Position([41.0d, 19.0d]), + new Position([40.0d, 18.0d])]) + expect: + toBson(geoIntersects('loc', polygon)) == parse('''{ + loc: { + $geoIntersects: { + $geometry: { + type: "Polygon", + coordinates: [ + [ + [40.0, 18.0], [40.0, 19.0], [41.0, 19.0], + [40.0, 18.0] + ] + ] + } + } + } + }''') + + toBson(geoIntersects('loc', parse(polygon.toJson()))) == parse('''{ + loc: { + $geoIntersects: { + $geometry: { + type: "Polygon", + coordinates: [ + [ + [40.0, 18.0], [40.0, 19.0], [41.0, 19.0], + [40.0, 18.0] + ] + ] + } + } + } + }''') + } + + def 'should render $near'() { + given: + def point = new Point(new Position(-73.9667, 40.78)) + def pointDocument = parse(point.toJson()) + + expect: + toBson(near('loc', point, 5000d, 1000d)) == parse('''{ + loc : { + $near: { + $geometry: { + type : "Point", + coordinates : [ -73.9667, 40.78 ] + }, + $maxDistance: 5000.0, + $minDistance: 1000.0, + } + } + }''') + + toBson(near('loc', point, 5000d, null)) == parse('''{ + loc : { + $near: { + $geometry: { + type : "Point", + coordinates : [ -73.9667, 40.78 ] + }, + $maxDistance: 5000.0, + } + } + }''') + + toBson(near('loc', point, null, 1000d)) == parse('''{ + loc : { + $near: { + $geometry: { + type : "Point", + coordinates : [ -73.9667, 40.78 ] + }, + $minDistance: 1000.0, + } + } + }''') + + toBson(near('loc', pointDocument, 5000d, 1000d)) == parse('''{ + loc : { + $near: { + $geometry: { + type : "Point", + coordinates : [ -73.9667, 40.78 ] + }, + $maxDistance: 5000.0, + $minDistance: 1000.0, + } + } + }''') + + toBson(near('loc', pointDocument, 5000d, null)) == parse('''{ + loc : { + $near: { + $geometry: { + type : "Point", + coordinates : [ -73.9667, 40.78 ] + }, + $maxDistance: 5000.0, + } + } + }''') + + toBson(near('loc', pointDocument, null, 1000d)) == parse('''{ + loc : { + $near: { + $geometry: { + type : "Point", + coordinates : [ -73.9667, 40.78 ] + }, + $minDistance: 1000.0, + } + } + }''') + + toBson(near('loc', -73.9667, 40.78, 5000d, 1000d)) == parse('''{ + loc : { + $near: [-73.9667, 40.78], + $maxDistance: 5000.0, + $minDistance: 1000.0, + } + } + }''') + + toBson(near('loc', -73.9667, 40.78, 5000d, null)) == parse('''{ + loc : { + $near: [-73.9667, 40.78], + $maxDistance: 5000.0, + } + } + }''') + + toBson(near('loc', -73.9667, 40.78, null, 1000d)) == parse('''{ + loc : { + $near: [-73.9667, 40.78], + $minDistance: 1000.0, + } + } + }''') + } + + def 'should render $nearSphere'() { + given: + def point = new Point(new Position(-73.9667, 40.78)) + def pointDocument = parse(point.toJson()) + + expect: + toBson(nearSphere('loc', point, 5000d, 1000d)) == parse('''{ + loc : { + $nearSphere: { + $geometry: { + type : "Point", + coordinates : [ -73.9667, 40.78 ] + }, + $maxDistance: 5000.0, + $minDistance: 1000.0, + } + } + }''') + + toBson(nearSphere('loc', point, 5000d, null)) == parse('''{ + loc: + { + $nearSphere: + { + $geometry: + { + type: "Point", + coordinates: + [-73.9667, 40.78] + }, + $maxDistance: 5000.0, + } + } + }''') + + toBson(nearSphere('loc', point, null, 1000d)) == parse('''{ + loc : { + $nearSphere: { + $geometry: { + type : "Point", + coordinates : [ -73.9667, 40.78 ] + }, + $minDistance: 1000.0, + } + } + }''') + + toBson(nearSphere('loc', pointDocument, 5000d, 1000d)) == parse('''{ + loc : { + $nearSphere: { + $geometry: { + type : "Point", + coordinates : [ -73.9667, 40.78 ] + }, + $maxDistance: 5000.0, + $minDistance: 1000.0, + } + } + }''') + + toBson(nearSphere('loc', pointDocument, 5000d, null)) == parse('''{ + loc : { + $nearSphere: { + $geometry: { + type : "Point", + coordinates : [ -73.9667, 40.78 ] + }, + $maxDistance: 5000.0, + } + } + }''') + + toBson(nearSphere('loc', pointDocument, null, 1000d)) == parse('''{ + loc : { + $nearSphere: { + $geometry: { + type : "Point", + coordinates : [ -73.9667, 40.78 ] + }, + $minDistance: 1000.0, + } + } + }''') + + toBson(nearSphere('loc', -73.9667, 40.78, 5000d, 1000d)) == parse('''{ + loc : { + $nearSphere: [-73.9667, 40.78], + $maxDistance: 5000.0, + $minDistance: 1000.0, + } + } + }''') + + toBson(nearSphere('loc', -73.9667, 40.78, 5000d, null)) == parse('''{ + loc : { + $nearSphere: [-73.9667, 40.78], + $maxDistance: 5000.0, + } + } + }''') + + toBson(nearSphere('loc', -73.9667, 40.78, null, 1000d)) == parse('''{ + loc : { + $nearSphere: [-73.9667, 40.78], + $minDistance: 1000.0, + } + } + }''') + } + + def 'should render $jsonSchema'() { + expect: + toBson(jsonSchema(new BsonDocument('bsonType', new BsonString('object')))) == parse( '''{ + $jsonSchema : { + bsonType : "object" + } + }''') + } + + def 'should render an empty document'() { + expect: + toBson(empty()) == parse('''{}''') + } + + def 'should render with iterable value'() { + expect: + toBson(eq('x', new Document())) == parse('''{ + x : {} + }''') + + toBson(eq('x', [1, 2, 3])) == parse('''{ + x : [1, 2, 3] + }''') + } + + def 'should create string representation for simple filter'() { + expect: + eq('x', 1).toString() == 'Filter{fieldName=\'x\', value=1}' + } + + def 'should create string representation for regex filter'() { + expect: + regex('x', '.*').toString() == 'Operator Filter{fieldName=\'x\', operator=\'$eq\', ' + + 'value=BsonRegularExpression{pattern=\'.*\', options=\'\'}}' + } + + def 'should create string representation for simple operator filter'() { + expect: + gt('x', 1).toString() == 'Operator Filter{fieldName=\'x\', operator=\'$gt\', value=1}' + } + + def 'should create string representation for compound filters'() { + expect: + and(eq('x', 1), eq('y', 2)).toString() == 'And Filter{filters=[Filter{fieldName=\'x\', value=1}, Filter{fieldName=\'y\', value=2}]}' + or(eq('x', 1), eq('y', 2)).toString() == 'Or Filter{filters=[Filter{fieldName=\'x\', value=1}, Filter{fieldName=\'y\', value=2}]}' + nor(eq('x', 1), eq('y', 2)).toString() == 'Nor Filter{filters=[Filter{fieldName=\'x\', value=1}, Filter{fieldName=\'y\', value=2}]}' + not(eq('x', 1)).toString() == 'Not Filter{filter=Filter{fieldName=\'x\', value=1}}' + } + + def 'should create string representation for geo filters'() { + expect: + geoIntersects('x', new Point(new Position(1, 2))).toString() == 'Geometry Operator Filter{fieldName=\'x\', ' + + 'operator=\'$geoIntersects\', geometry=Point{coordinate=Position{values=[1.0, 2.0]}}, maxDistance=null, minDistance=null}' + near('x', new Point(new Position(1, 2)), 3.0, 4.0).toString() == 'Geometry Operator Filter{fieldName=\'x\', ' + + 'operator=\'$near\', geometry=Point{coordinate=Position{values=[1.0, 2.0]}}, maxDistance=3.0, minDistance=4.0}' + } + + def 'should create string representation for text filter'() { + expect: + text('java', new TextSearchOptions().language('French').caseSensitive(true).diacriticSensitive(true)).toString() == + 'Text Filter{search=\'java\', textSearchOptions=Text Search Options{language=\'French\', caseSensitive=true, ' + + 'diacriticSensitive=true}}' + } + + def 'should create string representation for iterable operator filters'() { + expect: + all('x', [1, 2, 3]).toString() == 'Operator Filter{fieldName=\'x\', operator=\'$all\', value=[1, 2, 3]}' + } + + def 'should test equals for SimpleFilter'() { + expect: + regex('x', 'acme.*corp').equals(regex('x', 'acme.*corp')) + } + + def 'should test hashCode for SimpleFilter'() { + expect: + regex('x', 'acme.*corp').hashCode() == regex('x', 'acme.*corp').hashCode() + } + + def 'should test equals for OperatorFilter'() { + expect: + ne('x', 1).equals(ne('x', 1)) + exists('x').equals(exists('x', true)) + exists('x', false).equals(exists('x', false)) + type('a', BsonType.ARRAY).equals(type('a', BsonType.ARRAY)) + !type('a', 'number').equals(type('a', BsonType.ARRAY)) + } + + def 'should test hashCode for OperatorFilter'() { + expect: + ne('x', 1).hashCode() == ne('x', 1).hashCode() + exists('x').hashCode() == exists('x', true).hashCode() + exists('x', false).hashCode() == exists('x', false).hashCode() + type('a', BsonType.ARRAY).hashCode() == type('a', BsonType.ARRAY).hashCode() + type('a', 'number').hashCode() != type('a', BsonType.ARRAY).hashCode() + } + + def 'should test equals for AndFilter'() { + expect: + and([]).equals(and()) + and([eq('x', 1), eq('y', 2)]) + .equals(and(eq('x', 1), eq('y', 2))) + } + + def 'should test hashCode for AndFilter'() { + expect: + and([]).hashCode() == and().hashCode() + and([eq('x', 1), eq('y', 2)]).hashCode() == + and(eq('x', 1), eq('y', 2)).hashCode() + } + + def 'should test equals for OrNorFilter'() { + expect: + or([]).equals(or()) + nor(eq('x', 1), eq('x', 2)).equals(nor(eq('x', 1), eq('x', 2))) + !nor(eq('x', 1), eq('x', 2)).equals(or(eq('x', 1), eq('x', 2))) + } + + def 'should test hashCode for OrNorFilter'() { + expect: + or([]).hashCode() == or().hashCode() + nor(eq('x', 1), eq('x', 2)).hashCode() == nor(eq('x', 1), eq('x', 2)).hashCode() + nor(eq('x', 1), eq('x', 2)).hashCode() != or(eq('x', 1), eq('x', 2)).hashCode() + } + + def 'should test equals for IterableOperatorFilter'() { + expect: + Filters.in('a', [1, 2, 3]).equals(Filters.in('a', 1, 2, 3)) + !nin('a', [1, 2, 3]).equals(nin('a', 1, 2)) + !all('a', [1, 2, 3]).equals(nin('a', 1, 2, 3)) + all('a', []).equals(all('a')) + } + + def 'should test hashCode for IterableOperatorFilter'() { + expect: + Filters.in('a', [1, 2, 3]).hashCode() == Filters.in('a', 1, 2, 3).hashCode() + nin('a', [1, 2, 3]).hashCode() != nin('a', 1, 2).hashCode() + all('a', [1, 2, 3]).hashCode() != nin('a', 1, 2, 3).hashCode() + all('a', []).hashCode() == all('a').hashCode() + } + + def 'should test equals for SimpleEncodingFilter'() { + expect: + eq('x', 1).equals(eq('x', 1)) + !eq('x', 1).equals(ne('x', 1)) + !eq('x', 1).equals(eq('x', 2)) + !eq('y', 1).equals(eq('x', 1)) + !eq('x', 1).equals(parse('{x : 1}')) + expr(new BsonDocument('$gt', new BsonArray([new BsonString('$spent'), new BsonString('$budget')]))) + .equals(expr(new BsonDocument('$gt', new BsonArray([new BsonString('$spent'), new BsonString('$budget')])))) + } + + def 'should test hashCode for SimpleEncodingFilter'() { + expect: + eq('x', 1).hashCode() == eq('x', 1).hashCode() + eq('x', 1).hashCode() != ne('x', 1).hashCode() + eq('x', 1).hashCode() != eq('x', 2).hashCode() + eq('y', 1).hashCode() != eq('x', 1).hashCode() + eq('x', 1).hashCode() != parse('{x : 1}').hashCode() + expr(new BsonDocument('$gt', new BsonArray([new BsonString('$spent'), new BsonString('$budget')]))).hashCode() == + expr(new BsonDocument('$gt', new BsonArray([new BsonString('$spent'), new BsonString('$budget')]))).hashCode() + } + + def 'should test equals for NotFilter'() { + expect: + not(eq('x', 1)).equals(not(eq('x', 1))) + } + + def 'should test hashCode for NotFilter'() { + expect: + not(eq('x', 1)).hashCode() == not(eq('x', 1)).hashCode() + } + + def 'should test equals for GeometryOperatorFilter'() { + def polygon = new Polygon([new Position([40.0d, 18.0d]), + new Position([40.0d, 19.0d]), + new Position([41.0d, 19.0d]), + new Position([40.0d, 18.0d])]) + expect: + geoWithin('loc', polygon).equals(geoWithin('loc', polygon)) + !geoWithinBox('loc', 1d, 2d, 3d, 4d) + .equals(geoWithinBox('loc', 1d, 2d, 3d, 5d)) + + geoWithinPolygon('loc', [[0d, 0d], [3d, 6d], [6d, 0d]]) + .equals(geoWithinPolygon('loc', [[0d, 0d], [3d, 6d], [6d, 0d]])) + } + + def 'should test hashCode for GeometryOperatorFilter'() { + def polygon = new Polygon([new Position([40.0d, 18.0d]), + new Position([40.0d, 19.0d]), + new Position([41.0d, 19.0d]), + new Position([40.0d, 18.0d])]) + expect: + geoWithin('loc', polygon).hashCode() == geoWithin('loc', polygon).hashCode() + geoWithinBox('loc', 1d, 2d, 3d, 4d).hashCode() != + geoWithinBox('loc', 1d, 2d, 3d, 5d).hashCode() + + geoWithinPolygon('loc', [[0d, 0d], [3d, 6d], [6d, 0d]]).hashCode() == + geoWithinPolygon('loc', [[0d, 0d], [3d, 6d], [6d, 0d]]).hashCode() + } + + def 'should test equals for TextFilter'() { + expect: + text('mongoDB for GIANT ideas').equals(text('mongoDB for GIANT ideas')) + text('mongoDB for GIANT ideas', new TextSearchOptions().language('english')) + .equals(text('mongoDB for GIANT ideas', new TextSearchOptions().language('english'))) + } + + def 'should test hashCode for TextFilter'() { + expect: + text('mongoDB for GIANT ideas').hashCode() == text('mongoDB for GIANT ideas').hashCode() + text('mongoDB for GIANT ideas', new TextSearchOptions().language('english')).hashCode() == + text('mongoDB for GIANT ideas', new TextSearchOptions().language('english')).hashCode() + } +} diff --git a/driver-core/src/test/unit/com/mongodb/client/model/FindOneAndDeleteOptionsSpecification.groovy b/driver-core/src/test/unit/com/mongodb/client/model/FindOneAndDeleteOptionsSpecification.groovy new file mode 100644 index 00000000000..38549496c85 --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/client/model/FindOneAndDeleteOptionsSpecification.groovy @@ -0,0 +1,100 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model + +import org.bson.BsonDocument +import org.bson.Document +import spock.lang.Specification + +import static java.util.concurrent.TimeUnit.MILLISECONDS +import static java.util.concurrent.TimeUnit.SECONDS + +class FindOneAndDeleteOptionsSpecification extends Specification { + + def 'should have the expected defaults'() { + when: + def options = new FindOneAndDeleteOptions() + + then: + options.getCollation() == null + options.getMaxTime(MILLISECONDS) == 0 + options.getProjection() == null + options.getSort() == null + options.getHint() == null + options.getHintString() == null + } + + def 'should set collation'() { + expect: + new FindOneAndDeleteOptions().collation(collation).getCollation() == collation + + where: + collation << [null, Collation.builder().locale('en').build()] + } + + def 'should set projection'() { + expect: + new FindOneAndDeleteOptions().projection(projection).getProjection() == projection + + where: + projection << [null, BsonDocument.parse('{ a: 1}')] + } + + def 'should set sort'() { + expect: + new FindOneAndDeleteOptions().sort(sort).getSort() == sort + + where: + sort << [null, BsonDocument.parse('{ a: 1}')] + } + + def 'should convert maxTime'() { + when: + def options = new FindOneAndDeleteOptions() + + then: + options.getMaxTime(SECONDS) == 0 + + when: + options.maxTime(100, MILLISECONDS) + + then: + options.getMaxTime(MILLISECONDS) == 100 + + when: + options.maxTime(1004, MILLISECONDS) + + then: + options.getMaxTime(SECONDS) == 1 + } + + def 'should set hint'() { + expect: + new FindOneAndDeleteOptions().hint(hint).getHint() == hint + + where: + hint << [null, new BsonDocument(), new Document('a', 1)] + } + + def 'should set hintString'() { + expect: + new FindOneAndDeleteOptions().hintString(hintString).getHintString() == hintString + + where: + hintString << [null, 'a_1'] + } +} diff --git a/driver-core/src/test/unit/com/mongodb/client/model/FindOneAndReplaceOptionsSpecification.groovy b/driver-core/src/test/unit/com/mongodb/client/model/FindOneAndReplaceOptionsSpecification.groovy new file mode 100644 index 00000000000..efafee238bb --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/client/model/FindOneAndReplaceOptionsSpecification.groovy @@ -0,0 +1,125 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model + +import org.bson.BsonDocument +import org.bson.BsonInt32 +import spock.lang.Specification + +import static java.util.concurrent.TimeUnit.MILLISECONDS +import static java.util.concurrent.TimeUnit.SECONDS + +class FindOneAndReplaceOptionsSpecification extends Specification { + + def 'should have the expected defaults'() { + when: + def options = new FindOneAndReplaceOptions() + + then: + options.getCollation() == null + options.getMaxTime(MILLISECONDS) == 0 + options.getProjection() == null + options.getSort() == null + options.getBypassDocumentValidation() == null + options.getReturnDocument() == ReturnDocument.BEFORE + options.isUpsert() == false + } + + def 'should set collation'() { + expect: + new FindOneAndReplaceOptions().collation(collation).getCollation() == collation + + where: + collation << [null, Collation.builder().locale('en').build()] + } + + def 'should set projection'() { + expect: + new FindOneAndReplaceOptions().projection(projection).getProjection() == projection + + where: + projection << [null, BsonDocument.parse('{ a: 1}')] + } + + def 'should set sort'() { + expect: + new FindOneAndReplaceOptions().sort(sort).getSort() == sort + + where: + sort << [null, BsonDocument.parse('{ a: 1}')] + } + + def 'should convert maxTime'() { + when: + def options = new FindOneAndReplaceOptions() + + then: + options.getMaxTime(SECONDS) == 0 + + when: + options.maxTime(100, MILLISECONDS) + + then: + options.getMaxTime(MILLISECONDS) == 100 + + when: + options.maxTime(1004, MILLISECONDS) + + then: + options.getMaxTime(SECONDS) == 1 + } + + def 'should set upsert'() { + expect: + new FindOneAndReplaceOptions().upsert(upsert).isUpsert() == upsert + + where: + upsert << [true, false] + } + + def 'should set bypassDocumentValidation'() { + expect: + new FindOneAndReplaceOptions().bypassDocumentValidation(bypassValidation).getBypassDocumentValidation() == bypassValidation + + where: + bypassValidation << [null, true, false] + } + + def 'should set returnDocument'() { + expect: + new FindOneAndReplaceOptions().returnDocument(returnDocument).getReturnDocument() == returnDocument + + where: + returnDocument << [ReturnDocument.BEFORE, ReturnDocument.AFTER] + } + + def 'should set hint'() { + expect: + new FindOneAndReplaceOptions().hint(hint).getHint() == hint + + where: + hint << [null, new BsonDocument('_id', new BsonInt32(1))] + } + + def 'should set hint string'() { + expect: + new FindOneAndReplaceOptions().hintString(hint).getHintString() == hint + + where: + hint << [null, '_id_'] + } +} diff --git a/driver-core/src/test/unit/com/mongodb/client/model/FindOneAndUpdateOptionsSpecification.groovy b/driver-core/src/test/unit/com/mongodb/client/model/FindOneAndUpdateOptionsSpecification.groovy new file mode 100644 index 00000000000..0cd3844061b --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/client/model/FindOneAndUpdateOptionsSpecification.groovy @@ -0,0 +1,134 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model + +import org.bson.BsonDocument +import org.bson.BsonInt32 +import spock.lang.Specification + +import static java.util.concurrent.TimeUnit.MILLISECONDS +import static java.util.concurrent.TimeUnit.SECONDS + +class FindOneAndUpdateOptionsSpecification extends Specification { + + def 'should have the expected defaults'() { + when: + def options = new FindOneAndUpdateOptions() + + then: + options.getCollation() == null + options.getMaxTime(MILLISECONDS) == 0 + options.getProjection() == null + options.getSort() == null + options.getBypassDocumentValidation() == null + options.getReturnDocument() == ReturnDocument.BEFORE + options.isUpsert() == false + } + + def 'should set collation'() { + expect: + new FindOneAndUpdateOptions().collation(collation).getCollation() == collation + + where: + collation << [null, Collation.builder().locale('en').build()] + } + + def 'should set projection'() { + expect: + new FindOneAndUpdateOptions().projection(projection).getProjection() == projection + + where: + projection << [null, BsonDocument.parse('{ a: 1}')] + } + + def 'should set sort'() { + expect: + new FindOneAndUpdateOptions().sort(sort).getSort() == sort + + where: + sort << [null, BsonDocument.parse('{ a: 1}')] + } + + def 'should convert maxTime'() { + when: + def options = new FindOneAndUpdateOptions() + + then: + options.getMaxTime(SECONDS) == 0 + + when: + options.maxTime(100, MILLISECONDS) + + then: + options.getMaxTime(MILLISECONDS) == 100 + + when: + options.maxTime(1004, MILLISECONDS) + + then: + options.getMaxTime(SECONDS) == 1 + } + + def 'should set upsert'() { + expect: + new FindOneAndUpdateOptions().upsert(upsert).isUpsert() == upsert + + where: + upsert << [true, false] + } + + def 'should set bypassDocumentValidation'() { + expect: + new FindOneAndUpdateOptions().bypassDocumentValidation(bypassValidation).getBypassDocumentValidation() == bypassValidation + + where: + bypassValidation << [null, true, false] + } + + def 'should set returnDocument'() { + expect: + new FindOneAndUpdateOptions().returnDocument(returnDocument).getReturnDocument() == returnDocument + + where: + returnDocument << [ReturnDocument.BEFORE, ReturnDocument.AFTER] + } + + + def 'should set array filters'() { + expect: + new FindOneAndUpdateOptions().arrayFilters(arrayFilters).getArrayFilters() == arrayFilters + + where: + arrayFilters << [null, [], [new BsonDocument('a.b', new BsonInt32(1))]] + } + + def 'should set hint'() { + expect: + new FindOneAndUpdateOptions().hint(hint).getHint() == hint + + where: + hint << [null, new BsonDocument('_id', new BsonInt32(1))] + } + + def 'should set hint string'() { + expect: + new UpdateOptions().hintString(hint).getHintString() == hint + + where: + hint << [null, '_id_'] + } +} diff --git a/driver-core/src/test/unit/com/mongodb/client/model/FindOptionsSpecification.groovy b/driver-core/src/test/unit/com/mongodb/client/model/FindOptionsSpecification.groovy new file mode 100644 index 00000000000..380d305cb28 --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/client/model/FindOptionsSpecification.groovy @@ -0,0 +1,186 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model + +import com.mongodb.CursorType +import com.mongodb.internal.client.model.FindOptions +import org.bson.BsonDocument +import org.bson.Document +import spock.lang.Specification + +import static java.util.concurrent.TimeUnit.MILLISECONDS +import static java.util.concurrent.TimeUnit.SECONDS + +class FindOptionsSpecification extends Specification { + + def 'should have the expected defaults'() { + when: + def options = new FindOptions() + + then: + options.getCollation() == null + options.getMaxTime(MILLISECONDS) == 0 + options.getMaxAwaitTime(MILLISECONDS) == 0 + options.getProjection() == null + options.getSort() == null + options.getHint() == null + options.getHintString() == null + options.getLimit() == 0 + options.getSkip() == 0 + options.getBatchSize() == 0 + options.getCursorType() == CursorType.NonTailable + !options.isNoCursorTimeout() + !options.isPartial() + !options.isAllowDiskUse() + } + + def 'should set collation'() { + expect: + new FindOptions().collation(collation).getCollation() == collation + + where: + collation << [null, Collation.builder().locale('en').build()] + } + + def 'should set projection'() { + expect: + new FindOptions().projection(projection).getProjection() == projection + + where: + projection << [null, BsonDocument.parse('{a: 1}')] + } + + def 'should set sort'() { + expect: + new FindOptions().sort(sort).getSort() == sort + + where: + sort << [null, BsonDocument.parse('{a: 1}')] + } + + def 'should set limit'() { + expect: + new FindOptions().limit(limit).getLimit() == limit + + where: + limit << [-1, 0, 1] + } + + def 'should set skip'() { + expect: + new FindOptions().skip(skip).getSkip() == skip + + where: + skip << [-1, 0, 1] + } + + def 'should set batchSize'() { + expect: + new FindOptions().batchSize(batchSize).getBatchSize() == batchSize + + where: + batchSize << [-1, 0, 1] + } + + def 'should set cursorType'() { + expect: + new FindOptions().cursorType(cursorType).getCursorType() == cursorType + + where: + cursorType << [CursorType.NonTailable, CursorType.TailableAwait, CursorType.Tailable] + } + + def 'should set partial'() { + expect: + new FindOptions().partial(partial).isPartial() == partial + + where: + partial << [true, false] + } + + def 'should set noCursorTimeout'() { + expect: + new FindOptions().noCursorTimeout(noCursorTimeout).isNoCursorTimeout() == noCursorTimeout + + where: + noCursorTimeout << [true, false] + } + + def 'should convert maxTime'() { + when: + def options = new FindOptions() + + then: + options.getMaxTime(SECONDS) == 0 + + when: + options.maxTime(100, MILLISECONDS) + + then: + options.getMaxTime(MILLISECONDS) == 100 + + when: + options.maxTime(1004, MILLISECONDS) + + then: + options.getMaxTime(SECONDS) == 1 + } + + def 'should convert maxAwaitTime'() { + when: + def options = new FindOptions() + + then: + options.getMaxAwaitTime(SECONDS) == 0 + + when: + options.maxAwaitTime(100, MILLISECONDS) + + then: + options.getMaxAwaitTime(MILLISECONDS) == 100 + + when: + options.maxAwaitTime(1004, MILLISECONDS) + + then: + options.getMaxAwaitTime(SECONDS) == 1 + } + + def 'should set hint'() { + expect: + new FindOptions().hint(hint).getHint() == hint + + where: + hint << [null, new BsonDocument(), new Document('a', 1)] + } + + def 'should set hintString'() { + expect: + new FindOptions().hintString(hintString).getHintString() == hintString + + where: + hintString << [null, 'a_1'] + } + + def 'should set allowDiskUse'() { + expect: + new FindOptions().allowDiskUse(allowDiskUse).isAllowDiskUse() == allowDiskUse + + where: + allowDiskUse << [true, false] + } +} diff --git a/driver-core/src/test/unit/com/mongodb/client/model/GraphLookupOptionsSpecification.groovy b/driver-core/src/test/unit/com/mongodb/client/model/GraphLookupOptionsSpecification.groovy new file mode 100644 index 00000000000..fdf3700ce6d --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/client/model/GraphLookupOptionsSpecification.groovy @@ -0,0 +1,36 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model + +import spock.lang.Specification + +import static org.bson.BsonDocument.parse + +class GraphLookupOptionsSpecification extends Specification { + def "should return new options with the same property values"() { + when: + def options = new GraphLookupOptions() + .maxDepth(10) + .depthField('field') + .restrictSearchWithMatch(parse('{x : 1}')) + + then: + options.maxDepth == 10 + options.depthField == 'field' + options.restrictSearchWithMatch == parse('{x : 1}') + } +} diff --git a/driver-core/src/test/unit/com/mongodb/client/model/IndexOptionsSpecification.groovy b/driver-core/src/test/unit/com/mongodb/client/model/IndexOptionsSpecification.groovy new file mode 100644 index 00000000000..5342cfed885 --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/client/model/IndexOptionsSpecification.groovy @@ -0,0 +1,124 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model + +import org.bson.BsonDocument +import spock.lang.Specification + +import java.util.concurrent.TimeUnit + +class IndexOptionsSpecification extends Specification { + + def 'should set options correctly'() { + when: + def options = new IndexOptions() + + then: + !options.isBackground() + !options.isUnique() + !options.isSparse() + options.getName() == null + options.getExpireAfter(TimeUnit.SECONDS) == null + options.getVersion() == null + options.getWeights() == null + options.getDefaultLanguage() == null + options.getLanguageOverride() == null + options.getTextVersion() == null + options.getSphereVersion() == null + options.getBits() == null + options.getMin() == null + options.getMax() == null + options.getStorageEngine() == null + options.getPartialFilterExpression() == null + options.getCollation() == null + options.getWildcardProjection() == null + !options.isHidden() + def wildcardProjection = BsonDocument.parse('{a : 1}') + + when: + def weights = BsonDocument.parse('{ a: 1000 }') + def storageEngine = BsonDocument.parse('{ wiredTiger : { configString : "block_compressor=zlib" }}') + def partialFilterExpression = BsonDocument.parse('{ a: { $gte: 10 } }') + def collation = Collation.builder().locale('en').build() + options.background(true) + .unique(true) + .sparse(true) + .name('aIndex') + .expireAfter(100, TimeUnit.SECONDS) + .version(1) + .weights(weights) + .defaultLanguage('es') + .languageOverride('language') + .textVersion(1) + .sphereVersion(2) + .bits(1) + .min(-180.0) + .max(180.0) + .storageEngine(storageEngine) + .partialFilterExpression(partialFilterExpression) + .collation(collation) + .wildcardProjection(wildcardProjection) + .hidden(true) + + then: + options.isBackground() + options.isUnique() + options.isSparse() + options.getName() == 'aIndex' + options.getExpireAfter(TimeUnit.SECONDS) == 100 + options.getVersion() == 1 + options.getWeights() == weights + options.getDefaultLanguage() == 'es' + options.getLanguageOverride() == 'language' + options.getTextVersion() == 1 + options.getSphereVersion() == 2 + options.getBits() == 1 + options.getMin() == -180.0 + options.getMax() == 180.0 + options.getStorageEngine() == storageEngine + options.getPartialFilterExpression() == partialFilterExpression + options.getCollation() == collation + options.getWildcardProjection() == wildcardProjection + options.isHidden() + } + + def 'should convert expireAfter'() { + when: + def options = new IndexOptions() + + then: + !options.getExpireAfter(TimeUnit.SECONDS) + + when: + options = new IndexOptions().expireAfter(null, null) + + then: + !options.getExpireAfter(TimeUnit.SECONDS) + + when: + options = new IndexOptions().expireAfter(4, TimeUnit.MILLISECONDS) + + then: + options.getExpireAfter(TimeUnit.SECONDS) == 0 + + when: + options = new IndexOptions().expireAfter(1004, TimeUnit.MILLISECONDS) + + then: + options.getExpireAfter(TimeUnit.SECONDS) == 1 + } +} diff --git a/driver-core/src/test/unit/com/mongodb/client/model/IndexesSpecification.groovy b/driver-core/src/test/unit/com/mongodb/client/model/IndexesSpecification.groovy new file mode 100644 index 00000000000..ac999f5b911 --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/client/model/IndexesSpecification.groovy @@ -0,0 +1,102 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model + + +import spock.lang.Specification + +import static com.mongodb.client.model.BsonHelper.toBson +import static com.mongodb.client.model.Indexes.ascending +import static com.mongodb.client.model.Indexes.compoundIndex +import static com.mongodb.client.model.Indexes.descending +import static com.mongodb.client.model.Indexes.geo2d +import static com.mongodb.client.model.Indexes.geo2dsphere +import static com.mongodb.client.model.Indexes.hashed +import static com.mongodb.client.model.Indexes.text +import static org.bson.BsonDocument.parse + +class IndexesSpecification extends Specification { + + def 'ascending'() { + expect: + toBson(ascending('x')) == parse('{x : 1}') + toBson(ascending('x', 'y')) == parse('{x : 1, y : 1}') + toBson(ascending(['x', 'y'])) == parse('{x : 1, y : 1}') + } + + def 'descending'() { + expect: + toBson(descending('x')) == parse('{x : -1}') + toBson(descending('x', 'y')) == parse('{x : -1, y : -1}') + toBson(descending(['x', 'y'])) == parse('{x : -1, y : -1}') + } + + def 'geo2dsphere'() { + expect: + toBson(geo2dsphere('x')) == parse('{x : "2dsphere"}') + toBson(geo2dsphere('x', 'y')) == parse('{x : "2dsphere", y : "2dsphere"}') + toBson(geo2dsphere(['x', 'y'])) == parse('{x : "2dsphere", y : "2dsphere"}') + } + + def 'geo2d'() { + expect: + toBson(geo2d('x')) == parse('{x : "2d"}') + } + + def 'text helper'() { + expect: + toBson(text('x')) == parse('{x : "text"}') + toBson(text()) == parse('{ "$**" : "text"}') + } + + def 'hashed'() { + expect: + toBson(hashed('x')) == parse('{x : "hashed"}') + } + + def 'compoundIndex'() { + expect: + toBson(compoundIndex([ascending('x'), descending('y')])) == parse('{x : 1, y : -1}') + toBson(compoundIndex(ascending('x'), descending('y'))) == parse('{x : 1, y : -1}') + toBson(compoundIndex(ascending('x'), descending('y'), descending('x'))) == parse('{y : -1, x : -1}') + toBson(compoundIndex(ascending('x', 'y'), descending('a', 'b'))) == parse('{x : 1, y : 1, a : -1, b : -1}') + } + + def 'should test equals on CompoundIndex'() { + expect: + compoundIndex([ascending('x'), descending('y')]) + .equals(compoundIndex([ascending('x'), descending('y')])) + compoundIndex(ascending('x'), descending('y')) + .equals(compoundIndex(ascending('x'), descending('y'))) + compoundIndex(ascending('x'), descending('y'), descending('x')) + .equals(compoundIndex(ascending('x'), descending('y'), descending('x'))) + compoundIndex(ascending('x', 'y'), descending('a', 'b')) + .equals(compoundIndex(ascending('x', 'y'), descending('a', 'b'))) + } + + def 'should test hashCode on CompoundIndex'() { + expect: + compoundIndex([ascending('x'), descending('y')]).hashCode() == + compoundIndex([ascending('x'), descending('y')]).hashCode() + compoundIndex(ascending('x'), descending('y')).hashCode() == + compoundIndex(ascending('x'), descending('y')).hashCode() + compoundIndex(ascending('x'), descending('y'), descending('x')).hashCode() == + compoundIndex(ascending('x'), descending('y'), descending('x')).hashCode() + compoundIndex(ascending('x', 'y'), descending('a', 'b')).hashCode() == + compoundIndex(ascending('x', 'y'), descending('a', 'b')).hashCode() + } +} diff --git a/driver-core/src/test/unit/com/mongodb/client/model/InsertManyOptionsSpecification.groovy b/driver-core/src/test/unit/com/mongodb/client/model/InsertManyOptionsSpecification.groovy new file mode 100644 index 00000000000..033ebf36a0f --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/client/model/InsertManyOptionsSpecification.groovy @@ -0,0 +1,46 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model + +import spock.lang.Specification + +class InsertManyOptionsSpecification extends Specification { + def 'should have the expected defaults'() { + when: + def options = new InsertManyOptions() + + then: + options.isOrdered() + options.getBypassDocumentValidation() == null + } + + def 'should set ordered'() { + expect: + new InsertManyOptions().ordered(ordered).isOrdered() == ordered + + where: + ordered << [true, false] + } + + def 'should set bypassDocumentValidation'() { + expect: + new InsertManyOptions().bypassDocumentValidation(bypassValidation).getBypassDocumentValidation() == bypassValidation + + where: + bypassValidation << [null, true, false] + } +} diff --git a/driver-core/src/test/unit/com/mongodb/client/model/InsertOneOptionsSpecification.groovy b/driver-core/src/test/unit/com/mongodb/client/model/InsertOneOptionsSpecification.groovy new file mode 100644 index 00000000000..482e58faa0a --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/client/model/InsertOneOptionsSpecification.groovy @@ -0,0 +1,37 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model + +import spock.lang.Specification + +class InsertOneOptionsSpecification extends Specification { + def 'should have the expected defaults'() { + when: + def options = new InsertOneOptions() + + then: + options.getBypassDocumentValidation() == null + } + + def 'should set bypassDocumentValidation'() { + expect: + new InsertOneOptions().bypassDocumentValidation(bypassValidation).getBypassDocumentValidation() == bypassValidation + + where: + bypassValidation << [null, true, false] + } +} diff --git a/driver-core/src/test/unit/com/mongodb/client/model/ProjectionsSpecification.groovy b/driver-core/src/test/unit/com/mongodb/client/model/ProjectionsSpecification.groovy new file mode 100644 index 00000000000..6e7d0336037 --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/client/model/ProjectionsSpecification.groovy @@ -0,0 +1,121 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model + + +import spock.lang.Specification + +import static com.mongodb.client.model.BsonHelper.toBson +import static com.mongodb.client.model.Filters.and +import static com.mongodb.client.model.Filters.eq +import static com.mongodb.client.model.Projections.computed +import static com.mongodb.client.model.Projections.elemMatch +import static com.mongodb.client.model.Projections.exclude +import static com.mongodb.client.model.Projections.excludeId +import static com.mongodb.client.model.Projections.fields +import static com.mongodb.client.model.Projections.include +import static com.mongodb.client.model.Projections.meta +import static com.mongodb.client.model.Projections.metaTextScore +import static com.mongodb.client.model.Projections.slice +import static org.bson.BsonDocument.parse + +class ProjectionsSpecification extends Specification { + + def 'include'() { + expect: + toBson(include('x')) == parse('{x : 1}') + toBson(include('x', 'y')) == parse('{x : 1, y : 1}') + toBson(include(['x', 'y'])) == parse('{x : 1, y : 1}') + toBson(include(['x', 'y', 'x'])) == parse('{y : 1, x : 1}') + } + + def 'exclude'() { + expect: + toBson(exclude('x')) == parse('{x : 0}') + toBson(exclude('x', 'y')) == parse('{x : 0, y : 0}') + toBson(exclude(['x', 'y'])) == parse('{x : 0, y : 0}') + } + + def 'excludeId helper'() { + expect: + toBson(excludeId()) == parse('{_id : 0}') + } + + def 'firstElem'() { + expect: + toBson(elemMatch('x')) == parse('{"x.$" : 1}') + } + + def 'elemMatch'() { + expect: + toBson(elemMatch('x', and(eq('y', 1), eq('z', 2)))) == + parse('{x : {$elemMatch : {$and: [{y : 1}, {z : 2}]}}}') + } + + def 'slice'() { + expect: + toBson(slice('x', 5)) == parse('{x : {$slice : 5}}') + toBson(slice('x', 5, 10)) == parse('{x : {$slice : [5, 10]}}') + } + + def 'meta'() { + expect: + toBson(meta('x', 'textScore')) == parse('{x : {$meta : "textScore"}}') + toBson(meta('x', 'recordId')) == parse('{x : {$meta : "recordId"}}') + } + + def 'metaTextScore'() { + expect: + toBson(metaTextScore('x')) == parse('{x : {$meta : "textScore"}}') + } + + def 'computed'() { + expect: + toBson(computed('c', '$y')) == parse('{c : "$y"}') + } + + def 'combine fields'() { + expect: + toBson(fields(include('x', 'y'), exclude('_id'))) == parse('{x : 1, y : 1, _id : 0}') + toBson(fields([include('x', 'y'), exclude('_id')])) == parse('{x : 1, y : 1, _id : 0}') + toBson(fields(include('x', 'y'), exclude('x'))) == parse('{y : 1, x : 0}') + } + + def 'should create string representation for include and exclude'() { + expect: + include(['x', 'y', 'x']).toString() == '{"y": 1, "x": 1}' + exclude(['x', 'y', 'x']).toString() == '{"y": 0, "x": 0}' + excludeId().toString() == '{"_id": 0}' + } + + def 'should create string representation for computed'() { + expect: + computed('c', '$y').toString() == 'Expression{name=\'c\', expression=$y}' + } + + def 'should create string representation for elemMatch with filter'() { + expect: + elemMatch('x', and(eq('y', 1), eq('z', 2))).toString() == + 'ElemMatch Projection{fieldName=\'x\', ' + + 'filter=And Filter{filters=[Filter{fieldName=\'y\', value=1}, Filter{fieldName=\'z\', value=2}]}}' + } + + def 'should create string representation for fields'() { + expect: + fields(include('x', 'y'), exclude('_id')).toString() == 'Projections{projections=[{"x": 1, "y": 1}, {"_id": 0}]}' + } +} diff --git a/driver-core/src/test/unit/com/mongodb/client/model/SortsSpecification.groovy b/driver-core/src/test/unit/com/mongodb/client/model/SortsSpecification.groovy new file mode 100644 index 00000000000..395bce497a8 --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/client/model/SortsSpecification.groovy @@ -0,0 +1,94 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model + + +import spock.lang.Specification + +import static com.mongodb.client.model.BsonHelper.toBson +import static com.mongodb.client.model.Sorts.ascending +import static com.mongodb.client.model.Sorts.descending +import static com.mongodb.client.model.Sorts.metaTextScore +import static com.mongodb.client.model.Sorts.orderBy +import static org.bson.BsonDocument.parse + +class SortsSpecification extends Specification { + + def 'ascending'() { + expect: + toBson(ascending('x')) == parse('{x : 1}') + toBson(ascending('x', 'y')) == parse('{x : 1, y : 1}') + toBson(ascending(['x', 'y'])) == parse('{x : 1, y : 1}') + } + + def 'descending'() { + expect: + toBson(descending('x')) == parse('{x : -1}') + toBson(descending('x', 'y')) == parse('{x : -1, y : -1}') + toBson(descending(['x', 'y'])) == parse('{x : -1, y : -1}') + } + + def 'metaTextScore'() { + expect: + toBson(metaTextScore('x')) == parse('{x : {$meta : "textScore"}}') + } + + def 'orderBy'() { + expect: + toBson(orderBy([ascending('x'), descending('y')])) == parse('{x : 1, y : -1}') + toBson(orderBy(ascending('x'), descending('y'))) == parse('{x : 1, y : -1}') + toBson(orderBy(ascending('x'), descending('y'), descending('x'))) == parse('{y : -1, x : -1}') + toBson(orderBy(ascending('x', 'y'), descending('a', 'b'))) == parse('{x : 1, y : 1, a : -1, b : -1}') + } + + def 'should create string representation for simple sorts'() { + expect: + ascending('x', 'y').toString() == '{"x": 1, "y": 1}' + descending('x', 'y').toString() == '{"x": -1, "y": -1}' + metaTextScore('x').toString() == '{"x": {"$meta": "textScore"}}' + } + + def 'should create string representation for compound sorts'() { + expect: + orderBy(ascending('x', 'y'), descending('a', 'b')).toString() == + 'Compound Sort{sorts=[{"x": 1, "y": 1}, {"a": -1, "b": -1}]}' + } + + def 'should test equals for CompoundSort'() { + expect: + orderBy([ascending('x'), descending('y')]) + .equals(orderBy([ascending('x'), descending('y')])) + orderBy(ascending('x'), descending('y')) + .equals(orderBy(ascending('x'), descending('y'))) + orderBy(ascending('x'), descending('y'), descending('x')) + .equals(orderBy(ascending('x'), descending('y'), descending('x'))) + orderBy(ascending('x', 'y'), descending('a', 'b')) + .equals(orderBy(ascending('x', 'y'), descending('a', 'b'))) + } + + def 'should test hashCode for CompoundSort'() { + expect: + orderBy([ascending('x'), descending('y')]).hashCode() == + orderBy([ascending('x'), descending('y')]).hashCode() + orderBy(ascending('x'), descending('y')).hashCode() == + orderBy(ascending('x'), descending('y')).hashCode() + orderBy(ascending('x'), descending('y'), descending('x')).hashCode() == + orderBy(ascending('x'), descending('y'), descending('x')).hashCode() + orderBy(ascending('x', 'y'), descending('a', 'b')).hashCode() == + orderBy(ascending('x', 'y'), descending('a', 'b')).hashCode() + } +} diff --git a/driver-core/src/test/unit/com/mongodb/client/model/TestWindowOutputFields.java b/driver-core/src/test/unit/com/mongodb/client/model/TestWindowOutputFields.java new file mode 100644 index 00000000000..13f05d6acf0 --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/client/model/TestWindowOutputFields.java @@ -0,0 +1,554 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.client.model; + +import com.mongodb.lang.Nullable; +import org.bson.BsonArray; +import org.bson.BsonBoolean; +import org.bson.BsonDocument; +import org.bson.BsonDouble; +import org.bson.BsonInt32; +import org.bson.BsonString; +import org.bson.BsonValue; +import org.bson.Document; +import org.bson.conversions.Bson; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.Arguments; +import org.junit.jupiter.params.provider.MethodSource; + +import java.util.AbstractMap; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.function.BiFunction; +import java.util.function.Function; +import java.util.function.Supplier; +import java.util.stream.Stream; + +import static com.mongodb.assertions.Assertions.assertNotNull; +import static com.mongodb.client.model.Sorts.ascending; +import static com.mongodb.client.model.Windows.documents; +import static com.mongodb.client.model.Windows.range; +import static java.util.Arrays.asList; +import static java.util.Collections.singleton; +import static org.junit.jupiter.api.Assertions.assertAll; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; + +final class TestWindowOutputFields { + private static final String NO_EXPRESSION = "{}"; + private static final String PATH = "newField"; + private static final Bson SORT_BY = ascending("sortByField"); + private static final Map.Entry INT_EXPR = new AbstractMap.SimpleImmutableEntry<>(1, new BsonInt32(1)); + private static final Map.Entry ARRAY_EXPR = + new AbstractMap.SimpleImmutableEntry<>(Arrays.asList(0.5, 0.9, "$$letValueX"), + new BsonArray(Arrays.asList(new BsonDouble(0.5), new BsonDouble(0.9), new BsonString("$$letValueX")))); + private static final Map.Entry STR_EXPR = + new AbstractMap.SimpleImmutableEntry<>("$fieldToRead", new BsonString("$fieldToRead")); + private static final Map.Entry DOC_EXPR = new AbstractMap.SimpleImmutableEntry<>( + new Document("gid", "$partitionId"), + new BsonDocument("gid", new BsonString("$partitionId"))); + private static final Map.Entry DOC_INT_EXPR = new AbstractMap.SimpleImmutableEntry<>( + new Document("$cond", new Document("if", + new Document("$eq", asList("$gid", true))) + .append("then", 2).append("else", 2)), + new BsonDocument("$cond", new BsonDocument("if", + new BsonDocument("$eq", new BsonArray(asList(new BsonString("$gid"), BsonBoolean.TRUE)))) + .append("then", new BsonInt32(2)).append("else", new BsonInt32(2)))); + private static final Window POSITION_BASED_WINDOW = documents(1, 2); + private static final Window RANGE_BASED_WINDOW = range(1, 2); + + @Test + void of() { + WindowOutputField expected = WindowOutputFields.sum(PATH, STR_EXPR.getKey(), POSITION_BASED_WINDOW); + WindowOutputField actual = WindowOutputFields.of(new BsonField(PATH, new Document("$sum", STR_EXPR.getKey()) + .append("window", POSITION_BASED_WINDOW.toBsonDocument()))); + assertAll( + () -> assertEquals(expected.toBsonField().getName(), actual.toBsonField().getName()), + () -> assertEquals(expected.toBsonField().getValue().toBsonDocument(), actual.toBsonField().getValue().toBsonDocument())); + } + + @Test + void simpleWindowFunctions() { + Map expressions = new HashMap<>(); + expressions.put(INT_EXPR.getKey(), INT_EXPR.getValue()); + expressions.put(STR_EXPR.getKey(), STR_EXPR.getValue()); + Collection windows = asList(null, POSITION_BASED_WINDOW, RANGE_BASED_WINDOW); + assertAll( + () -> assertSimpleParameterWindowFunction("$sum", WindowOutputFields::sum, expressions, windows, false), + () -> assertSimpleParameterWindowFunction("$avg", WindowOutputFields::avg, expressions, windows, false), + () -> assertSimpleParameterWindowFunction("$stdDevSamp", WindowOutputFields::stdDevSamp, expressions, windows, false), + () -> assertSimpleParameterWindowFunction("$stdDevPop", WindowOutputFields::stdDevPop, expressions, windows, false), + () -> assertSimpleParameterWindowFunction("$min", WindowOutputFields::min, expressions, windows, false), + () -> assertSimpleParameterWindowFunction("$max", WindowOutputFields::max, expressions, windows, false), + () -> assertNoParameterWindowFunction("$count", WindowOutputFields::count, windows, false), + () -> assertSimpleParameterWindowFunction("$push", WindowOutputFields::push, expressions, windows, false), + () -> assertSimpleParameterWindowFunction("$addToSet", WindowOutputFields::addToSet, expressions, windows, false), + () -> assertSimpleParameterWindowFunction("$first", WindowOutputFields::first, expressions, windows, false), + () -> assertSimpleParameterWindowFunction("$last", WindowOutputFields::last, expressions, windows, false), + () -> assertNoParameterNoWindowFunction("$documentNumber", WindowOutputFields::documentNumber), + () -> assertNoParameterNoWindowFunction("$rank", WindowOutputFields::rank), + () -> assertNoParameterNoWindowFunction("$denseRank", WindowOutputFields::denseRank), + () -> assertSimpleParameterNoWindowFunction("$locf", WindowOutputFields::locf, expressions), + () -> assertSimpleParameterNoWindowFunction("$linearFill", WindowOutputFields::linearFill, expressions) + ); + } + + @Test + void derivative() { + assertDerivativeOrIntegral("$derivative", WindowOutputFields::derivative); + } + + @Test + void timeDerivative() { + assertTimeDerivativeOrIntegral("$derivative", WindowOutputFields::timeDerivative); + } + + @Test + void integral() { + assertDerivativeOrIntegral("$integral", WindowOutputFields::integral); + } + + @Test + void timeIntegral() { + assertTimeDerivativeOrIntegral("$integral", WindowOutputFields::timeIntegral); + } + + @Test + void covarianceSamp() { + assertCovariance("$covarianceSamp", WindowOutputFields::covarianceSamp); + } + + @Test + void covariancePop() { + assertCovariance("$covariancePop", WindowOutputFields::covariancePop); + } + + @Test + void expMovingAvgWithN() { + assertWindowOutputField( + new BsonField(PATH, new BsonDocument("$expMovingAvg", new BsonDocument("input", STR_EXPR.getValue()) + .append("N", new BsonInt32(1)))), + WindowOutputFields.expMovingAvg(PATH, STR_EXPR.getKey(), 1)); + assertAll( + () -> assertThrows(IllegalArgumentException.class, () -> + WindowOutputFields.expMovingAvg(null, STR_EXPR.getKey(), 1)), + () -> assertThrows(IllegalArgumentException.class, () -> + WindowOutputFields.expMovingAvg(PATH, null, 1)), + () -> assertThrows(IllegalArgumentException.class, () -> + WindowOutputFields.expMovingAvg(PATH, STR_EXPR.getKey(), 0)), + () -> assertThrows(IllegalArgumentException.class, () -> + WindowOutputFields.expMovingAvg(PATH, STR_EXPR.getKey(), -1))); + } + + @Test + void expMovingAvgWithAlpha() { + assertWindowOutputField( + new BsonField(PATH, new BsonDocument("$expMovingAvg", new BsonDocument("input", STR_EXPR.getValue()) + .append("alpha", new BsonDouble(0.5)))), + WindowOutputFields.expMovingAvg(PATH, STR_EXPR.getKey(), 0.5)); + assertAll( + () -> assertThrows(IllegalArgumentException.class, () -> + WindowOutputFields.expMovingAvg(null, STR_EXPR.getKey(), 0.5)), + () -> assertThrows(IllegalArgumentException.class, () -> + WindowOutputFields.expMovingAvg(PATH, null, 0.5)), + () -> assertThrows(IllegalArgumentException.class, () -> + WindowOutputFields.expMovingAvg(PATH, STR_EXPR.getKey(), 0d)), + () -> assertThrows(IllegalArgumentException.class, () -> + WindowOutputFields.expMovingAvg(PATH, STR_EXPR.getKey(), 1d))); + } + + @Test + void shift() { + assertAll( + () -> assertWindowOutputField( + new BsonField(PATH, new BsonDocument("$shift", new BsonDocument("output", STR_EXPR.getValue()) + .append("by", new BsonInt32(-1)) + .append("default", INT_EXPR.getValue()))), + WindowOutputFields.shift(PATH, STR_EXPR.getKey(), INT_EXPR.getKey(), -1)), + () -> assertWindowOutputField( + new BsonField(PATH, new BsonDocument("$shift", new BsonDocument("output", STR_EXPR.getValue()) + .append("by", new BsonInt32(0)))), + WindowOutputFields.shift(PATH, STR_EXPR.getKey(), null, 0)), + () -> assertWindowOutputField( + new BsonField(PATH, new BsonDocument("$shift", new BsonDocument("output", STR_EXPR.getValue()) + .append("by", new BsonInt32(1)) + .append("default", INT_EXPR.getValue()))), + WindowOutputFields.shift(PATH, STR_EXPR.getKey(), INT_EXPR.getKey(), 1))); + assertAll( + () -> assertThrows(IllegalArgumentException.class, () -> + WindowOutputFields.shift(null, STR_EXPR.getKey(), INT_EXPR.getKey(), 0)), + () -> assertThrows(IllegalArgumentException.class, () -> + WindowOutputFields.shift(PATH, null, INT_EXPR.getKey(), 0))); + } + + @Test + void pick() { + Map expressions = new HashMap<>(); + expressions.put(INT_EXPR.getKey(), INT_EXPR.getValue()); + expressions.put(STR_EXPR.getKey(), STR_EXPR.getValue()); + expressions.put(DOC_EXPR.getKey(), DOC_EXPR.getValue()); + Map nExpressions = new HashMap<>(); + nExpressions.put(INT_EXPR.getKey(), INT_EXPR.getValue()); + nExpressions.put(DOC_INT_EXPR.getKey(), DOC_INT_EXPR.getValue()); + Collection windows = asList(null, POSITION_BASED_WINDOW, RANGE_BASED_WINDOW); + assertAll( + () -> assertPickNoSortWindowFunction("$minN", WindowOutputFields::minN, expressions, "input", nExpressions, windows), + () -> assertPickNoSortWindowFunction("$maxN", WindowOutputFields::maxN, expressions, "input", nExpressions, windows), + () -> assertPickNoSortWindowFunction("$firstN", WindowOutputFields::firstN, expressions, "input", nExpressions, windows), + () -> assertPickNoSortWindowFunction("$lastN", WindowOutputFields::lastN, expressions, "input", nExpressions, windows), + () -> assertPickNoNWindowFunction("$bottom", WindowOutputFields::bottom, expressions, "output", windows), + () -> assertPickSortWindowFunction("$bottomN", WindowOutputFields::bottomN, expressions, "output", nExpressions, windows), + () -> assertPickNoNWindowFunction("$top", WindowOutputFields::top, expressions, "output", windows), + () -> assertPickSortWindowFunction("$topN", WindowOutputFields::topN, expressions, "output", nExpressions, windows) + ); + } + + @ParameterizedTest + @MethodSource("percentileWindowFunctionsSource") + void percentile(final Object inExpressionParameter, + final BsonValue expectedInExpression, + final Object pExpressionParameter, + final BsonValue expectedPExpression, + @Nullable final Window window) { + String expectedFunctionName = "$percentile"; + QuantileMethod method = QuantileMethod.approximate(); + BsonField expectedWindowOutputField = getExpectedBsonField(expectedFunctionName, expectedInExpression, expectedPExpression, + method, window); + + Supplier msg = () -> "expectedFunctionName=" + expectedFunctionName + + ", path=" + PATH + + ", InExpression=" + inExpressionParameter + + ", pExpression=" + pExpressionParameter + + ", method=" + method + + ", window=" + window; + + assertWindowOutputField(expectedWindowOutputField, WindowOutputFields.percentile(PATH, inExpressionParameter, pExpressionParameter, method, window), + msg); + assertThrows(IllegalArgumentException.class, () -> WindowOutputFields.percentile(null, inExpressionParameter, pExpressionParameter, method, window), msg); + assertThrows(IllegalArgumentException.class, () -> WindowOutputFields.percentile(PATH, null, pExpressionParameter, method, window), msg); + assertThrows(IllegalArgumentException.class, () -> WindowOutputFields.percentile(PATH, inExpressionParameter, null, method, window), msg); + assertThrows(IllegalArgumentException.class, () -> WindowOutputFields.percentile(PATH, inExpressionParameter, pExpressionParameter, null, window), msg); + } + + @ParameterizedTest + @MethodSource("medianWindowFunctionsSource") + void median(final Object inExpressionParameter, + final BsonValue expectedInExpression, + @Nullable final Window window) { + String expectedFunctionName = "$median"; + QuantileMethod method = QuantileMethod.approximate(); + BsonField expectedWindowOutputField = getExpectedBsonField(expectedFunctionName, expectedInExpression, + null, method, window); + + Supplier msg = () -> "expectedFunctionName=" + expectedFunctionName + + ", path=" + PATH + + ", InExpression=" + inExpressionParameter + + ", method=" + method + + ", window=" + window; + + assertWindowOutputField(expectedWindowOutputField, WindowOutputFields.median(PATH, inExpressionParameter, method, window), + msg); + assertThrows(IllegalArgumentException.class, () -> WindowOutputFields.median(null, inExpressionParameter, method, window), + msg); + assertThrows(IllegalArgumentException.class, () -> WindowOutputFields.median(PATH, null, method, window), msg); + assertThrows(IllegalArgumentException.class, () -> WindowOutputFields.median(PATH, inExpressionParameter, null, window), msg); + } + + private static Stream percentileWindowFunctionsSource() { + Map inExpressions = new HashMap<>(); + inExpressions.put(INT_EXPR.getKey(), INT_EXPR.getValue()); + inExpressions.put(STR_EXPR.getKey(), STR_EXPR.getValue()); + inExpressions.put(DOC_EXPR.getKey(), DOC_EXPR.getValue()); + + Map pExpressions = new HashMap<>(); + pExpressions.put(ARRAY_EXPR.getKey(), ARRAY_EXPR.getValue()); + pExpressions.put(STR_EXPR.getKey(), STR_EXPR.getValue()); + + Collection windows = asList(null, POSITION_BASED_WINDOW, RANGE_BASED_WINDOW); + + // Generate different combinations of test arguments using Cartesian product of inExpressions, pExpressions, and windows. + List argumentsList = new ArrayList<>(); + inExpressions.forEach((incomingInParameter, inBsonValue) -> + pExpressions.forEach((incomingPParameter, pBsonValue) -> + windows.forEach(window -> + argumentsList.add( + Arguments.of(incomingInParameter, inBsonValue, incomingPParameter, pBsonValue, window))))); + return Stream.of(argumentsList.toArray(new Arguments[]{})); + } + + private static Stream medianWindowFunctionsSource() { + Map inExpressions = new HashMap<>(); + inExpressions.put(INT_EXPR.getKey(), INT_EXPR.getValue()); + inExpressions.put(STR_EXPR.getKey(), STR_EXPR.getValue()); + inExpressions.put(DOC_EXPR.getKey(), DOC_EXPR.getValue()); + + Collection windows = asList(null, POSITION_BASED_WINDOW, RANGE_BASED_WINDOW); + + // Generate different combinations of test arguments using Cartesian product of inExpressions and windows. + List argumentsList = new ArrayList<>(); + inExpressions.forEach((incomingInParameter, inBsonValue) -> + windows.forEach(window -> + argumentsList.add( + Arguments.of(incomingInParameter, inBsonValue, window)))); + return Stream.of(argumentsList.toArray(new Arguments[]{})); + } + + private static BsonField getExpectedBsonField(final String expectedFunctionName, final BsonValue expectedInExpression, + final @Nullable BsonValue expectedPExpression, + final QuantileMethod method, final @Nullable Window window) { + BsonDocument expectedFunctionDoc = new BsonDocument("input", expectedInExpression); + if (expectedPExpression != null) { + expectedFunctionDoc.append("p", expectedPExpression); + } + expectedFunctionDoc.append("method", method.toBsonValue()); + BsonDocument expectedFunctionAndWindow = new BsonDocument(expectedFunctionName, expectedFunctionDoc); + if (window != null) { + expectedFunctionAndWindow.append("window", window.toBsonDocument()); + } + return new BsonField(PATH, expectedFunctionAndWindow); + } + + private static void assertPickNoSortWindowFunction( + final String expectedFunctionName, + final QuadriFunction windowOutputFieldBuilder, + final Map expressions, + final String expressionKey, + final Map nExpressions, + final Collection windows) { + assertPickWindowFunction( + expectedFunctionName, + (a1, ignoredSort, a3, a4, a5) -> windowOutputFieldBuilder.apply(a1, a3, a4, a5), + false, expressions, expressionKey, nExpressions, windows); + } + + private static void assertPickNoNWindowFunction( + final String expectedFunctionName, + final QuadriFunction windowOutputFieldBuilder, + final Map expressions, + final String expressionKey, + final Collection windows) { + assertPickWindowFunction( + expectedFunctionName, + (a1, a2, a3, ignoredN, a5) -> windowOutputFieldBuilder.apply(a1, a2, a3, a5), + true, expressions, expressionKey, Collections.singletonMap(NO_EXPRESSION, BsonDocument.parse(NO_EXPRESSION)), windows); + } + + private static void assertPickSortWindowFunction( + final String expectedFunctionName, + final QuinqueFunction windowOutputFieldBuilder, + final Map expressions, + final String expressionKey, + final Map nExpressions, + final Collection windows) { + assertPickWindowFunction( + expectedFunctionName, + windowOutputFieldBuilder, + true, expressions, expressionKey, nExpressions, windows); + } + + private static void assertPickWindowFunction( + final String expectedFunctionName, + final QuinqueFunction windowOutputFieldBuilder, + final boolean useSortBy, + final Map expressions, + final String expressionKey, + final Map nExpressions, + final Collection windows) { + Bson sortBySpec = useSortBy ? SORT_BY : null; + for (final Map.Entry expressionAndEncoded: expressions.entrySet()) { + Object expression = expressionAndEncoded.getKey(); + BsonValue encodedExpression = expressionAndEncoded.getValue(); + for (final Map.Entry nExpressionAndEncoded: nExpressions.entrySet()) { + Object nExpression = nExpressionAndEncoded.getKey(); + BsonValue encodedNExpression = nExpressionAndEncoded.getValue(); + boolean useNExpression = !nExpression.equals(NO_EXPRESSION); + for (final Window window : windows) { + BsonDocument expectedFunctionDoc = new BsonDocument(expressionKey, encodedExpression); + if (useSortBy) { + expectedFunctionDoc.append("sortBy", assertNotNull(sortBySpec).toBsonDocument()); + } + if (useNExpression) { + expectedFunctionDoc.append("n", encodedNExpression); + } + BsonDocument expectedFunctionAndWindow = new BsonDocument(expectedFunctionName, expectedFunctionDoc); + if (window != null) { + expectedFunctionAndWindow.append("window", window.toBsonDocument()); + } + BsonField expectedWindowOutputField = new BsonField(PATH, expectedFunctionAndWindow); + Supplier msg = () -> "expectedFunctionName=" + expectedFunctionName + + ", path=" + PATH + + ", sortBySpec=" + sortBySpec + + ", expression=" + expression + + ", nExpression=" + nExpression + + ", window=" + window; + assertWindowOutputField( + expectedWindowOutputField, windowOutputFieldBuilder.apply(PATH, sortBySpec, expression, nExpression, window), msg); + assertThrows(IllegalArgumentException.class, () -> + windowOutputFieldBuilder.apply(null, sortBySpec, expression, nExpression, window), msg); + if (useSortBy) { + assertThrows(IllegalArgumentException.class, () -> + windowOutputFieldBuilder.apply(PATH, null, expression, nExpression, window), msg); + } + assertThrows(IllegalArgumentException.class, () -> + windowOutputFieldBuilder.apply(PATH, sortBySpec, null, nExpression, window), msg); + if (useNExpression) { + assertThrows(IllegalArgumentException.class, () -> + windowOutputFieldBuilder.apply(PATH, sortBySpec, expression, null, window), msg); + } + } + } + } + } + + private static void assertSimpleParameterWindowFunction(final String expectedFunctionName, + final TriFunction + windowOutputFieldBuilder, + final Map expressions, + final Collection windows, + final boolean windowRequired) { + boolean assertNullExpressionsNotAllowed = !expressions.containsKey(NO_EXPRESSION); + for (final Map.Entry expressionAndEncoded: expressions.entrySet()) { + Object expression = expressionAndEncoded.getKey(); + BsonValue encodedExpression = expressionAndEncoded.getValue(); + for (final Window window : windows) { + BsonDocument expectedFunctionAndWindow = new BsonDocument(expectedFunctionName, encodedExpression); + if (window != null) { + expectedFunctionAndWindow.append("window", window.toBsonDocument()); + } + BsonField expectedWindowOutputField = new BsonField(PATH, expectedFunctionAndWindow); + Supplier msg = () -> "expectedFunctionName=" + expectedFunctionName + + ", path=" + PATH + + ", expression=" + expression + + ", window=" + window + + ", windowRequired=" + windowRequired; + if (windowRequired && window == null) { + assertThrows(IllegalArgumentException.class, () -> windowOutputFieldBuilder.apply(PATH, expression, null), msg); + } else { + assertWindowOutputField(expectedWindowOutputField, windowOutputFieldBuilder.apply(PATH, expression, window), msg); + } + assertThrows(IllegalArgumentException.class, () -> windowOutputFieldBuilder.apply(null, expression, window), msg); + if (assertNullExpressionsNotAllowed) { + assertThrows(IllegalArgumentException.class, () -> windowOutputFieldBuilder.apply(PATH, null, window), msg); + } + } + } + } + + private static void assertNoParameterWindowFunction(final String expectedFunctionName, + final BiFunction windowOutputFieldBuilder, + final Collection windows, final boolean windowRequired) { + assertSimpleParameterWindowFunction(expectedFunctionName, + (fName, expr, window) -> windowOutputFieldBuilder.apply(fName, window), + Collections.singletonMap(NO_EXPRESSION, BsonDocument.parse(NO_EXPRESSION)), windows, windowRequired); + } + + private static void assertSimpleParameterNoWindowFunction( + final String expectedFunctionName, + final BiFunction windowOutputFieldBuilder, + final Map expressions) { + assertSimpleParameterWindowFunction( + expectedFunctionName, + (fName, expr, window) -> windowOutputFieldBuilder.apply(fName, expr), + expressions, singleton(null), false); + } + + private static void assertNoParameterNoWindowFunction(final String expectedFunctionName, + final Function windowOutputFieldBuilder) { + assertNoParameterWindowFunction(expectedFunctionName, (fName, window) -> windowOutputFieldBuilder.apply(fName), + singleton(null), false); + } + + private static void assertWindowOutputField(final BsonField expected, final WindowOutputField actual, + @Nullable final Supplier messageSupplier) { + assertEquals(expected.getName(), actual.toBsonField().getName(), messageSupplier); + assertEquals(expected.getValue().toBsonDocument(), actual.toBsonField().getValue().toBsonDocument(), messageSupplier); + } + + private static void assertWindowOutputField(final BsonField expected, final WindowOutputField actual) { + assertWindowOutputField(expected, actual, null); + } + + private static void assertDerivativeOrIntegral(final String expectedFunctionName, + final TriFunction + windowOutputFieldBuilder) { + assertDerivativeOrIntegral(expectedFunctionName, + (fName, expr, window, unit) -> windowOutputFieldBuilder.apply(fName, expr, window), false); + } + + private static void assertTimeDerivativeOrIntegral(final String expectedFunctionName, + final QuadriFunction + windowOutputFieldBuilder) { + assertDerivativeOrIntegral(expectedFunctionName, windowOutputFieldBuilder, true); + } + + private static void assertDerivativeOrIntegral(final String expectedFunctionName, + final QuadriFunction + windowOutputFieldBuilder, + final boolean time) { + BsonDocument expectedArgs = new BsonDocument("input", STR_EXPR.getValue()); + if (time) { + expectedArgs.append("unit", new BsonString(MongoTimeUnit.DAY.value())); + } + assertWindowOutputField(new BsonField(PATH, + new BsonDocument(expectedFunctionName, expectedArgs) + .append("window", POSITION_BASED_WINDOW.toBsonDocument())), + windowOutputFieldBuilder.apply(PATH, STR_EXPR.getKey(), POSITION_BASED_WINDOW, MongoTimeUnit.DAY)); + assertThrows(IllegalArgumentException.class, () -> + windowOutputFieldBuilder.apply(PATH, STR_EXPR.getKey(), null, MongoTimeUnit.DAY)); + if (time) { + assertThrows(IllegalArgumentException.class, () -> + windowOutputFieldBuilder.apply(PATH, STR_EXPR.getKey(), POSITION_BASED_WINDOW, null)); + } + } + + private static void assertCovariance(final String expectedFunctionName, + final QuadriFunction + windowOutputFieldBuilder) { + assertWindowOutputField(new BsonField(PATH, + new BsonDocument(expectedFunctionName, new BsonArray(asList(INT_EXPR.getValue(), STR_EXPR.getValue()))) + .append("window", POSITION_BASED_WINDOW.toBsonDocument())), + windowOutputFieldBuilder.apply(PATH, INT_EXPR.getKey(), STR_EXPR.getKey(), POSITION_BASED_WINDOW)); + assertWindowOutputField(new BsonField(PATH, + new BsonDocument(expectedFunctionName, new BsonArray(asList(INT_EXPR.getValue(), STR_EXPR.getValue())))), + windowOutputFieldBuilder.apply(PATH, INT_EXPR.getKey(), STR_EXPR.getKey(), null)); + assertAll( + () -> assertThrows(IllegalArgumentException.class, () -> + windowOutputFieldBuilder.apply(PATH, null, STR_EXPR.getKey(), POSITION_BASED_WINDOW)), + () -> assertThrows(IllegalArgumentException.class, () -> + windowOutputFieldBuilder.apply(PATH, INT_EXPR.getKey(), null, POSITION_BASED_WINDOW))); + } + + @FunctionalInterface + interface TriFunction { + R apply(@Nullable A1 a1, @Nullable A2 a2, @Nullable A3 a3); + } + + @FunctionalInterface + interface QuadriFunction { + R apply(@Nullable A1 a1, @Nullable A2 a2, @Nullable A3 a3, @Nullable A4 a4); + } + + @FunctionalInterface + interface QuinqueFunction { + R apply(@Nullable A1 a1, @Nullable A2 a2, @Nullable A3 a3, @Nullable A4 a4, @Nullable A5 a5); + } +} diff --git a/driver-core/src/test/unit/com/mongodb/client/model/TestWindows.java b/driver-core/src/test/unit/com/mongodb/client/model/TestWindows.java new file mode 100644 index 00000000000..6b29c0f5a06 --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/client/model/TestWindows.java @@ -0,0 +1,135 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.client.model; + +import com.mongodb.client.model.Windows.Bound; +import org.bson.BsonArray; +import org.bson.BsonDecimal128; +import org.bson.BsonDocument; +import org.bson.BsonDouble; +import org.bson.BsonInt32; +import org.bson.BsonInt64; +import org.bson.BsonString; +import org.bson.Document; +import org.bson.types.Decimal128; +import org.junit.jupiter.api.Test; + +import static com.mongodb.client.model.MongoTimeUnit.HOUR; +import static com.mongodb.client.model.MongoTimeUnit.MILLISECOND; +import static com.mongodb.client.model.MongoTimeUnit.MONTH; +import static com.mongodb.client.model.MongoTimeUnit.SECOND; +import static com.mongodb.client.model.Windows.Bound.CURRENT; +import static com.mongodb.client.model.Windows.Bound.UNBOUNDED; +import static com.mongodb.client.model.Windows.documents; +import static com.mongodb.client.model.Windows.range; +import static com.mongodb.client.model.Windows.timeRange; +import static java.util.Arrays.asList; +import static org.junit.jupiter.api.Assertions.assertAll; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; + +final class TestWindows { + @Test + void of() { + Window expected = timeRange(-1, SECOND, CURRENT); + Document windowDocument = new Document("range", asList(-1L, "current")).append("unit", SECOND.value()); + Window actualFromDocument = Windows.of(windowDocument); + Window actualFromBsonDocument = Windows.of(windowDocument.toBsonDocument()); + assertAll( + () -> assertEquals(expected.toBsonDocument(), actualFromDocument.toBsonDocument()), + () -> assertEquals(expected.toBsonDocument(), actualFromBsonDocument.toBsonDocument())); + } + + @Test + void positionBased() { + assertAll( + () -> assertEquals( + new BsonDocument("documents", new BsonArray(asList(new BsonInt32(-2), new BsonInt32(0)))), + documents(-2, 0).toBsonDocument()), + () -> assertEquals( + new BsonDocument("documents", new BsonArray(asList( + new BsonString(CURRENT.value()), new BsonInt32(Integer.MAX_VALUE)))), + documents(CURRENT, Integer.MAX_VALUE).toBsonDocument()), + () -> assertEquals( + new BsonDocument("documents", new BsonArray(asList(new BsonInt32(0), new BsonString(UNBOUNDED.value())))), + documents(0, UNBOUNDED).toBsonDocument()), + () -> assertEquals( + new BsonDocument("documents", new BsonArray(asList( + new BsonString(CURRENT.value()), new BsonString(UNBOUNDED.value())))), + documents(CURRENT, UNBOUNDED).toBsonDocument())); + assertAll( + () -> assertThrows(IllegalArgumentException.class, () -> documents(1, -1)), + () -> assertThrows(IllegalArgumentException.class, () -> documents(CURRENT, -1)), + () -> assertThrows(IllegalArgumentException.class, () -> documents(1, CURRENT)), + () -> assertThrows(IllegalArgumentException.class, () -> documents(null, 1)), + () -> assertThrows(IllegalArgumentException.class, () -> documents(1, null)), + () -> assertThrows(IllegalArgumentException.class, () -> documents(null, null))); + } + + @Test + void rangeBased() { + assertAll( + () -> assertEquals( + new BsonDocument("range", new BsonArray(asList(new BsonInt64(-1), new BsonInt64(0)))), + range(-1, 0).toBsonDocument()), + () -> assertEquals( + new BsonDocument("range", new BsonArray(asList(new BsonDouble(0), new BsonDouble(0)))), + range(0d, 0d).toBsonDocument()), + () -> assertEquals( + new BsonDocument("range", new BsonArray(asList( + new BsonDecimal128(new Decimal128(1)), new BsonDecimal128(new Decimal128(2))))), + range(new Decimal128(1), new Decimal128(2)).toBsonDocument()), + () -> assertEquals( + new BsonDocument("range", new BsonArray(asList(new BsonString(CURRENT.value()), new BsonDouble(0.1)))), + range(CURRENT, 0.1).toBsonDocument()), + () -> assertEquals( + new BsonDocument("range", new BsonArray(asList(new BsonDouble(0.1), new BsonString(UNBOUNDED.value())))), + range(0.1, UNBOUNDED).toBsonDocument()), + () -> assertEquals( + new BsonDocument("range", new BsonArray(asList( + new BsonString(CURRENT.value()), new BsonDecimal128(new Decimal128(Long.MAX_VALUE))))), + range(CURRENT, new Decimal128(Long.MAX_VALUE)).toBsonDocument()), + () -> assertEquals( + new BsonDocument("range", new BsonArray(asList( + new BsonDecimal128(new Decimal128(Long.MAX_VALUE)), new BsonString(UNBOUNDED.value())))), + range(new Decimal128(Long.MAX_VALUE), UNBOUNDED).toBsonDocument()), + () -> assertEquals( + new BsonDocument("range", new BsonArray(asList(new BsonInt64(-1), new BsonInt64(0)))) + .append("unit", new BsonString("millisecond")), + timeRange(-1, 0, MILLISECOND).toBsonDocument()), + () -> assertEquals( + new BsonDocument("range", new BsonArray(asList(new BsonString(CURRENT.value()), new BsonInt64(1)))) + .append("unit", new BsonString("hour")), + timeRange(CURRENT, 1, HOUR).toBsonDocument()), + () -> assertEquals( + new BsonDocument("range", new BsonArray(asList(new BsonInt64(1), new BsonString(UNBOUNDED.value())))) + .append("unit", new BsonString("month")), + timeRange(1, MONTH, UNBOUNDED).toBsonDocument())); + assertAll( + () -> assertThrows(IllegalArgumentException.class, () -> range(1, -1)), + () -> assertThrows(IllegalArgumentException.class, () -> range(null, 1)), + () -> assertThrows(IllegalArgumentException.class, () -> range(null, 0.1)), + () -> assertThrows(IllegalArgumentException.class, () -> range((Bound) null, Decimal128.POSITIVE_ZERO)), + () -> assertThrows(IllegalArgumentException.class, () -> range(1, null)), + () -> assertThrows(IllegalArgumentException.class, () -> range(0.1, null)), + () -> assertThrows(IllegalArgumentException.class, () -> range(Decimal128.POSITIVE_ZERO, (Bound) null)), + () -> assertThrows(IllegalArgumentException.class, () -> range((Decimal128) null, Decimal128.POSITIVE_ZERO)), + () -> assertThrows(IllegalArgumentException.class, () -> range(Decimal128.POSITIVE_ZERO, (Decimal128) null)), + () -> assertThrows(IllegalArgumentException.class, () -> range((Decimal128) null, (Decimal128) null)), + () -> assertThrows(IllegalArgumentException.class, () -> timeRange(1, -1, MongoTimeUnit.DAY)), + () -> assertThrows(IllegalArgumentException.class, () -> timeRange(1, 2, null))); + } +} diff --git a/driver-core/src/test/unit/com/mongodb/client/model/UpdateOptionsSpecification.groovy b/driver-core/src/test/unit/com/mongodb/client/model/UpdateOptionsSpecification.groovy new file mode 100644 index 00000000000..cd588936c18 --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/client/model/UpdateOptionsSpecification.groovy @@ -0,0 +1,89 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model + +import org.bson.BsonDocument +import org.bson.BsonInt32 +import spock.lang.Specification + +class UpdateOptionsSpecification extends Specification { + def 'should have the expected defaults'() { + when: + def options = new UpdateOptions() + + then: + !options.isUpsert() + options.getBypassDocumentValidation() == null + options.getCollation() == null + } + + def 'should set upsert'() { + expect: + new UpdateOptions().upsert(upsert).isUpsert() == upsert + + where: + upsert << [true, false] + } + + def 'should set bypassDocumentValidation'() { + expect: + new UpdateOptions().bypassDocumentValidation(bypassValidation).getBypassDocumentValidation() == bypassValidation + + where: + bypassValidation << [null, true, false] + } + + def 'should set collation'() { + expect: + new UpdateOptions().collation(collation).getCollation() == collation + + where: + collation << [null, Collation.builder().locale('en').build()] + } + + def 'should set array filters'() { + expect: + new UpdateOptions().arrayFilters(arrayFilters).getArrayFilters() == arrayFilters + + where: + arrayFilters << [null, [], [new BsonDocument('a.b', new BsonInt32(1))]] + } + + def 'should set hint'() { + expect: + new UpdateOptions().hint(hint).getHint() == hint + + where: + hint << [null, new BsonDocument('_id', new BsonInt32(1))] + } + + def 'should set hint string'() { + expect: + new UpdateOptions().hintString(hint).getHintString() == hint + + where: + hint << [null, '_id_'] + } + + def 'should set sort'() { + expect: + new UpdateOptions().sort(sort).getSort() == sort + + where: + sort << [null, new BsonDocument('_id', new BsonInt32(1))] + } +} diff --git a/driver-core/src/test/unit/com/mongodb/client/model/UpdatesSpecification.groovy b/driver-core/src/test/unit/com/mongodb/client/model/UpdatesSpecification.groovy new file mode 100644 index 00000000000..7feb2ff7ea8 --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/client/model/UpdatesSpecification.groovy @@ -0,0 +1,355 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model + +import spock.lang.Specification + +import static com.mongodb.client.model.BsonHelper.toBson +import static com.mongodb.client.model.Updates.addEachToSet +import static com.mongodb.client.model.Updates.addToSet +import static com.mongodb.client.model.Updates.bitwiseAnd +import static com.mongodb.client.model.Updates.bitwiseOr +import static com.mongodb.client.model.Updates.bitwiseXor +import static com.mongodb.client.model.Updates.combine +import static com.mongodb.client.model.Updates.currentDate +import static com.mongodb.client.model.Updates.currentTimestamp +import static com.mongodb.client.model.Updates.inc +import static com.mongodb.client.model.Updates.max +import static com.mongodb.client.model.Updates.min +import static com.mongodb.client.model.Updates.mul +import static com.mongodb.client.model.Updates.popFirst +import static com.mongodb.client.model.Updates.popLast +import static com.mongodb.client.model.Updates.pull +import static com.mongodb.client.model.Updates.pullAll +import static com.mongodb.client.model.Updates.pullByFilter +import static com.mongodb.client.model.Updates.push +import static com.mongodb.client.model.Updates.pushEach +import static com.mongodb.client.model.Updates.rename +import static com.mongodb.client.model.Updates.set +import static com.mongodb.client.model.Updates.setOnInsert +import static com.mongodb.client.model.Updates.unset +import static org.bson.BsonDocument.parse + +class UpdatesSpecification extends Specification { + + def 'should render $set'() { + expect: + toBson(set('x', 1)) == parse('{$set : { x : 1} }') + toBson(set('x', null)) == parse('{$set : { x : null } }') + } + + def 'should render $setOnInsert'() { + expect: + toBson(setOnInsert('x', 1)) == parse('{$setOnInsert : { x : 1} }') + toBson(setOnInsert('x', null)) == parse('{$setOnInsert : { x : null } }') + toBson(setOnInsert(parse('{ a : 1, b: "two"}'))) == parse('{$setOnInsert : {a: 1, b: "two"} }') + + when: + toBson(setOnInsert(null)) + + then: + thrown IllegalArgumentException + } + + def 'should render $unset'() { + expect: + toBson(unset('x')) == parse('{$unset : { x : ""} }') + } + + def 'should render $rename'() { + expect: + toBson(rename('x', 'y')) == parse('{$rename : { x : "y"} }') + } + + def 'should render $inc'() { + expect: + toBson(inc('x', 1)) == parse('{$inc : { x : 1} }') + toBson(inc('x', 5L)) == parse('{$inc : { x : {$numberLong : "5"}} }') + toBson(inc('x', 3.4d)) == parse('{$inc : { x : 3.4} }') + } + + def 'should render $mul'() { + expect: + toBson(mul('x', 1)) == parse('{$mul : { x : 1} }') + toBson(mul('x', 5L)) == parse('{$mul : { x : {$numberLong : "5"}} }') + toBson(mul('x', 3.4d)) == parse('{$mul : { x : 3.4} }') + } + + def 'should render $min'() { + expect: + toBson(min('x', 42)) == parse('{$min : { x : 42} }') + } + + def 'should render $max'() { + expect: + toBson(max('x', 42)) == parse('{$max : { x : 42} }') + } + + def 'should render $currentDate'() { + expect: + toBson(currentDate('x')) == parse('{$currentDate : { x : true} }') + toBson(currentTimestamp('x')) == parse('{$currentDate : { x : {$type : "timestamp"} } }') + } + + def 'should render $addToSet'() { + expect: + toBson(addToSet('x', 1)) == parse('{$addToSet : { x : 1} }') + toBson(addEachToSet('x', [1, 2, 3])) == parse('{$addToSet : { x : { $each : [1, 2, 3] } } }') + } + + def 'should render $push'() { + expect: + toBson(push('x', 1)) == parse('{$push : { x : 1} }') + toBson(pushEach('x', [1, 2, 3], new PushOptions())) == parse('{$push : { x : { $each : [1, 2, 3] } } }') + toBson(pushEach('x', [parse('{score : 89}'), parse('{score : 65}')], + new PushOptions().position(0).slice(3).sortDocument(parse('{score : -1}')))) == + parse('{$push : { x : { $each : [{score : 89}, {score : 65}], $position : 0, $slice : 3, $sort : { score : -1 } } } }') + + toBson(pushEach('x', [89, 65], + new PushOptions().position(0).slice(3).sort(-1))) == + parse('{$push : { x : { $each : [89, 65], $position : 0, $slice : 3, $sort : -1 } } }') + } + + def 'should render "$pull'() { + expect: + toBson(pull('x', 1)) == parse('{$pull : { x : 1} }') + toBson(pullByFilter(Filters.gte('x', 5))) == parse('{$pull : { x : { $gte : 5 }} }') + } + + def 'should render "$pullAll'() { + expect: + toBson(pullAll('x', [])) == parse('{$pullAll : { x : []} }') + toBson(pullAll('x', [1, 2, 3])) == parse('{$pullAll : { x : [1, 2, 3]} }') + } + + def 'should render $pop'() { + expect: + toBson(popFirst('x')) == parse('{$pop : { x : -1} }') + toBson(popLast('x')) == parse('{$pop : { x : 1} }') + } + + + def 'should render $bit'() { + expect: + toBson(bitwiseAnd('x', 5)) == parse('{$bit : { x : {and : 5} } }') + toBson(bitwiseAnd('x', 5L)) == parse('{$bit : { x : {and : {$numberLong : "5"} } } }') + toBson(bitwiseOr('x', 5)) == parse('{$bit : { x : {or : 5} } }') + toBson(bitwiseOr('x', 5L)) == parse('{$bit : { x : {or : {$numberLong : "5"} } } }') + toBson(bitwiseXor('x', 5)) == parse('{$bit : { x : {xor : 5} } }') + toBson(bitwiseXor('x', 5L)) == parse('{$bit : { x : {xor : {$numberLong : "5"} } } }') + } + + + def 'should combine updates'() { + expect: + toBson(combine(set('x', 1))) == parse('{$set : { x : 1} }') + toBson(combine(set('x', 1), set('y', 2))) == parse('{$set : { x : 1, y : 2} }') + toBson(combine(set('x', 1), set('x', 2))) == parse('{$set : { x : 2} }') + toBson(combine(set('x', 1), inc('z', 3), set('y', 2), inc('a', 4))) == parse('''{ + $set : { x : 1, y : 2}, + $inc : { z : 3, a : 4}} + }''') + + toBson(combine(combine(set('x', 1)))) == parse('{$set : { x : 1} }') + toBson(combine(combine(set('x', 1), set('y', 2)))) == parse('{$set : { x : 1, y : 2} }') + toBson(combine(combine(set('x', 1), set('x', 2)))) == parse('{$set : { x : 2} }') + + toBson(combine(combine(set('x', 1), inc('z', 3), set('y', 2), inc('a', 4)))) == parse('''{ + $set : { x : 1, y : 2}, + $inc : { z : 3, a : 4} + }''') + } + + def 'should set document'() { + toBson(set(parse('{ a : 1, b: "two"}'))) == parse('{$set : {a: 1, b: "two"} }') + } + + def 'should create string representation for simple updates'() { + expect: + set('x', 1).toString() == 'Update{fieldName=\'x\', operator=\'$set\', value=1}' + } + + def 'should create string representation for with each update'() { + expect: + addEachToSet('x', [1, 2, 3]).toString() == 'Each Update{fieldName=\'x\', operator=\'$addToSet\', values=[1, 2, 3]}' + } + def 'should create string representation for push each update'() { + expect: + pushEach('x', [89, 65], new PushOptions().position(0).slice(3).sort(-1)).toString() == + 'Each Update{fieldName=\'x\', operator=\'$push\', values=[89, 65], ' + + 'options=Push Options{position=0, slice=3, sort=-1}}' + pushEach('x', [89, 65], new PushOptions().position(0).slice(3).sortDocument(parse('{x : 1}'))).toString() == + 'Each Update{fieldName=\'x\', operator=\'$push\', values=[89, 65], ' + + 'options=Push Options{position=0, slice=3, sortDocument={"x": 1}}}' + } + + def 'should create string representation for pull all update'() { + expect: + pullAll('x', [1, 2, 3]).toString() == 'Update{fieldName=\'x\', operator=\'$pullAll\', value=[1, 2, 3]}' + } + + def 'should create string representation for combined update'() { + expect: + combine(set('x', 1), inc('z', 3)).toString() == + 'Updates{updates=[' + + 'Update{fieldName=\'x\', operator=\'$set\', value=1}, ' + + 'Update{fieldName=\'z\', operator=\'$inc\', value=3}]}' + } + + def 'should test equals for SimpleBsonKeyValue'() { + expect: + setOnInsert('x', 1).equals(setOnInsert('x', 1)) + setOnInsert('x', null).equals(setOnInsert('x', null)) + setOnInsert(parse('{ a : 1, b: "two"}')).equals(setOnInsert(parse('{ a : 1, b: "two"}'))) + } + + def 'should test hashCode for SimpleBsonKeyValue'() { + expect: + setOnInsert('x', 1).hashCode() == setOnInsert('x', 1).hashCode() + setOnInsert('x', null).hashCode() == setOnInsert('x', null).hashCode() + setOnInsert(parse('{ a : 1, b: "two"}')).hashCode() == setOnInsert(parse('{ a : 1, b: "two"}')).hashCode() + } + + def 'should test equals for SimpleUpdate'() { + expect: + setOnInsert('x', 1).equals(setOnInsert('x', 1)) + setOnInsert('x', null).equals(setOnInsert('x', null)) + setOnInsert(parse('{ a : 1, b: "two"}')).equals(setOnInsert(parse('{ a : 1, b: "two"}'))) + rename('x', 'y').equals(rename('x', 'y')) + inc('x', 1).equals(inc('x', 1)) + inc('x', 5L).equals(inc('x', 5L)) + inc('x', 3.4d).equals(inc('x', 3.4d)) + mul('x', 1).equals(mul('x', 1)) + mul('x', 5L).equals(mul('x', 5L)) + mul('x', 3.4d).equals(mul('x', 3.4d)) + min('x', 42).equals(min('x', 42)) + max('x', 42).equals(max('x', 42)) + currentDate('x').equals(currentDate('x')) + currentTimestamp('x').equals(currentTimestamp('x')) + addToSet('x', 1).equals(addToSet('x', 1)) + addEachToSet('x', [1, 2, 3]).equals(addEachToSet('x', [1, 2, 3])) + push('x', 1).equals(push('x', 1)) + pull('x', 1).equals(pull('x', 1)) + popFirst('x').equals(popFirst('x')) + popLast('x').equals(popLast('x')) + } + + def 'should test hashCode for SimpleUpdate'() { + expect: + setOnInsert('x', 1).hashCode() == setOnInsert('x', 1).hashCode() + setOnInsert('x', null).hashCode() == setOnInsert('x', null).hashCode() + setOnInsert(parse('{ a : 1, b: "two"}')).hashCode() == setOnInsert(parse('{ a : 1, b: "two"}')).hashCode() + rename('x', 'y').hashCode() == rename('x', 'y').hashCode() + inc('x', 1).hashCode() == inc('x', 1).hashCode() + inc('x', 5L).hashCode() == inc('x', 5L).hashCode() + inc('x', 3.4d).hashCode() == inc('x', 3.4d).hashCode() + mul('x', 1).hashCode() == mul('x', 1).hashCode() + mul('x', 5L).hashCode() == mul('x', 5L).hashCode() + mul('x', 3.4d).hashCode() == mul('x', 3.4d).hashCode() + min('x', 42).hashCode() == min('x', 42).hashCode() + max('x', 42).hashCode() == max('x', 42).hashCode() + currentDate('x').hashCode() == currentDate('x').hashCode() + currentTimestamp('x').hashCode() == currentTimestamp('x').hashCode() + addToSet('x', 1).hashCode() == addToSet('x', 1).hashCode() + addEachToSet('x', [1, 2, 3]).hashCode() == addEachToSet('x', [1, 2, 3]).hashCode() + push('x', 1).hashCode() == push('x', 1).hashCode() + pull('x', 1).hashCode() == pull('x', 1).hashCode() + popFirst('x').hashCode() == popFirst('x').hashCode() + popLast('x').hashCode() == popLast('x').hashCode() + } + + def 'should test equals for WithEachUpdate'() { + expect: + addEachToSet('x', [1, 2, 3]).equals(addEachToSet('x', [1, 2, 3])) + } + + def 'should test hashCode for WithEachUpdate'() { + expect: + addEachToSet('x', [1, 2, 3]).hashCode() == addEachToSet('x', [1, 2, 3]).hashCode() + } + + def 'should test equals for PushUpdate'() { + expect: + pushEach('x', [1, 2, 3], new PushOptions()).equals(pushEach('x', [1, 2, 3], new PushOptions())) + pushEach('x', [parse('{score : 89}'), parse('{score : 65}')], + new PushOptions().position(0).slice(3).sortDocument(parse('{score : -1}'))) + .equals(pushEach('x', [parse('{score : 89}'), parse('{score : 65}')], + new PushOptions().position(0).slice(3).sortDocument(parse('{score : -1}')))) + + pushEach('x', [89, 65], + new PushOptions().position(0).slice(3).sort(-1)) + .equals(pushEach('x', [89, 65], + new PushOptions().position(0).slice(3).sort(-1))) + } + + def 'should test hashCode for PushUpdate'() { + expect: + pushEach('x', [1, 2, 3], new PushOptions()).hashCode() == pushEach('x', [1, 2, 3], new PushOptions()).hashCode() + pushEach('x', [parse('{score : 89}'), parse('{score : 65}')], + new PushOptions().position(0).slice(3).sortDocument(parse('{score : -1}'))).hashCode() == + pushEach('x', [parse('{score : 89}'), parse('{score : 65}')], + new PushOptions().position(0).slice(3).sortDocument(parse('{score : -1}'))).hashCode() + + pushEach('x', [89, 65], + new PushOptions().position(0).slice(3).sort(-1)).hashCode() == + pushEach('x', [89, 65], new PushOptions().position(0).slice(3).sort(-1)).hashCode() + } + + def 'should test equals for PullAllUpdate'() { + expect: + pullAll('x', []).equals(pullAll('x', [])) + pullAll('x', [1, 2, 3]).equals(pullAll('x', [1, 2, 3])) + } + + def 'should test hashCode for PullAllUpdate'() { + expect: + pullAll('x', []).hashCode() == pullAll('x', []).hashCode() + pullAll('x', [1, 2, 3]).hashCode() == pullAll('x', [1, 2, 3]).hashCode() + } + + def 'should test equals for CompositeUpdate'() { + expect: + combine(set('x', 1)).equals(combine(set('x', 1))) + combine(set('x', 1), set('y', 2)).equals(combine(set('x', 1), set('y', 2))) + combine(set('x', 1), set('x', 2)).equals(combine(set('x', 1), set('x', 2))) + combine(set('x', 1), inc('z', 3), set('y', 2), inc('a', 4)) + .equals(combine(set('x', 1), inc('z', 3), set('y', 2), inc('a', 4))) + + combine(combine(set('x', 1))).equals(combine(combine(set('x', 1)))) + combine(combine(set('x', 1), set('y', 2))).equals(combine(combine(set('x', 1), set('y', 2)))) + combine(combine(set('x', 1), set('x', 2))).equals(combine(combine(set('x', 1), set('x', 2)))) + + combine(combine(set('x', 1), inc('z', 3), set('y', 2), inc('a', 4))) + .equals(combine(combine(set('x', 1), inc('z', 3), set('y', 2), inc('a', 4)))) + } + + def 'should test hashCode for CompositeUpdate'() { + expect: + combine(set('x', 1)).hashCode() == combine(set('x', 1)).hashCode() + combine(set('x', 1), set('y', 2)).hashCode() == combine(set('x', 1), set('y', 2)).hashCode() + combine(set('x', 1), set('x', 2)).equals(combine(set('x', 1), set('x', 2))) + combine(set('x', 1), inc('z', 3), set('y', 2), inc('a', 4)).hashCode() == + combine(set('x', 1), inc('z', 3), set('y', 2), inc('a', 4)).hashCode() + + combine(combine(set('x', 1))).hashCode() == combine(combine(set('x', 1))).hashCode() + combine(combine(set('x', 1), set('y', 2))).hashCode() == combine(combine(set('x', 1), set('y', 2))).hashCode() + combine(combine(set('x', 1), set('x', 2))).hashCode() == combine(combine(set('x', 1), set('x', 2))).hashCode() + + combine(combine(set('x', 1), inc('z', 3), set('y', 2), inc('a', 4))).hashCode() == + combine(combine(set('x', 1), inc('z', 3), set('y', 2), inc('a', 4))).hashCode() + } +} diff --git a/driver-core/src/test/unit/com/mongodb/client/model/ValidationActionSpecification.groovy b/driver-core/src/test/unit/com/mongodb/client/model/ValidationActionSpecification.groovy new file mode 100644 index 00000000000..e3378507c3b --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/client/model/ValidationActionSpecification.groovy @@ -0,0 +1,51 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model + +import spock.lang.Specification + +class ValidationActionSpecification extends Specification { + + def 'should return the expected string value'() { + expect: + validationAction.getValue() == expectedString + + where: + validationAction | expectedString + ValidationAction.WARN | 'warn' + ValidationAction.ERROR | 'error' + } + + def 'should support valid string representations'() { + expect: + ValidationAction.fromString(actionString) instanceof ValidationAction + + where: + actionString << ['error', 'warn', 'ERROR', 'WARN'] + } + + def 'should throw an illegal Argument exception for invalid values'() { + when: + ValidationAction.fromString(actionString) + + then: + thrown(IllegalArgumentException) + + where: + actionString << [null, 'info'] + } +} diff --git a/driver-core/src/test/unit/com/mongodb/client/model/ValidationLevelSpecification.groovy b/driver-core/src/test/unit/com/mongodb/client/model/ValidationLevelSpecification.groovy new file mode 100644 index 00000000000..86cbfc6f88d --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/client/model/ValidationLevelSpecification.groovy @@ -0,0 +1,53 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model + +import spock.lang.Specification + +class ValidationLevelSpecification extends Specification { + + def 'should return the expected string value'() { + expect: + validationLevel.getValue() == expectedString + + where: + validationLevel | expectedString + ValidationLevel.OFF | 'off' + ValidationLevel.MODERATE | 'moderate' + ValidationLevel.STRICT | 'strict' + } + + def 'should support valid string representations'() { + expect: + ValidationLevel.fromString(levelString) instanceof ValidationLevel + + where: + levelString << ['off', 'moderate', 'strict', 'OFF', 'MODERATE', 'STRICT'] + } + + def 'should throw an illegal Argument exception for invalid values'() { + when: + ValidationLevel.fromString(levelString) + + then: + thrown(IllegalArgumentException) + + where: + levelString << [null, 'laissez-faire'] + } + +} diff --git a/driver-core/src/test/unit/com/mongodb/client/model/bulk/BaseClientDeleteOptionsTest.java b/driver-core/src/test/unit/com/mongodb/client/model/bulk/BaseClientDeleteOptionsTest.java new file mode 100644 index 00000000000..fdcba01c2d3 --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/client/model/bulk/BaseClientDeleteOptionsTest.java @@ -0,0 +1,28 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model.bulk; + +import com.mongodb.testing.MongoBaseInterfaceAssertions; +import org.junit.jupiter.api.Test; + +class BaseClientDeleteOptionsTest { + + @Test + void testAllSubInterfacesOverrideMethods() { + MongoBaseInterfaceAssertions.assertSubtypeReturn(BaseClientDeleteOptions.class); + } +} diff --git a/driver-core/src/test/unit/com/mongodb/client/model/bulk/BaseClientUpdateOptionsTest.java b/driver-core/src/test/unit/com/mongodb/client/model/bulk/BaseClientUpdateOptionsTest.java new file mode 100644 index 00000000000..c9131452063 --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/client/model/bulk/BaseClientUpdateOptionsTest.java @@ -0,0 +1,28 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model.bulk; + +import com.mongodb.testing.MongoBaseInterfaceAssertions; +import org.junit.jupiter.api.Test; + +class BaseClientUpdateOptionsTest { + + @Test + void testAllSubInterfacesOverrideMethods() { + MongoBaseInterfaceAssertions.assertSubtypeReturn(BaseClientUpdateOptions.class); + } +} diff --git a/driver-core/src/test/unit/com/mongodb/client/model/bulk/BaseClientUpsertableWriteModelOptionsTest.java b/driver-core/src/test/unit/com/mongodb/client/model/bulk/BaseClientUpsertableWriteModelOptionsTest.java new file mode 100644 index 00000000000..8fecf8d14fd --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/client/model/bulk/BaseClientUpsertableWriteModelOptionsTest.java @@ -0,0 +1,27 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model.bulk; + +import com.mongodb.testing.MongoBaseInterfaceAssertions; +import org.junit.jupiter.api.Test; + +final class BaseClientUpsertableWriteModelOptionsTest { + @Test + void testAllSubInterfacesOverrideMethods() { + MongoBaseInterfaceAssertions.assertSubtypeReturn(BaseClientUpsertableWriteModelOptions.class); + } +} diff --git a/driver-core/src/test/unit/com/mongodb/client/model/bulk/BaseClientWriteModelOptionsTest.java b/driver-core/src/test/unit/com/mongodb/client/model/bulk/BaseClientWriteModelOptionsTest.java new file mode 100644 index 00000000000..17b3803727a --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/client/model/bulk/BaseClientWriteModelOptionsTest.java @@ -0,0 +1,27 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model.bulk; + +import com.mongodb.testing.MongoBaseInterfaceAssertions; +import org.junit.jupiter.api.Test; + +final class BaseClientWriteModelOptionsTest { + @Test + void testAllSubInterfacesOverrideMethods() { + MongoBaseInterfaceAssertions.assertSubtypeReturn(BaseClientWriteModelOptions.class); + } +} diff --git a/driver-core/src/test/unit/com/mongodb/client/model/changestream/ChangeStreamDocumentCodecSpecification.groovy b/driver-core/src/test/unit/com/mongodb/client/model/changestream/ChangeStreamDocumentCodecSpecification.groovy new file mode 100644 index 00000000000..09576c9429f --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/client/model/changestream/ChangeStreamDocumentCodecSpecification.groovy @@ -0,0 +1,389 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model.changestream + +import org.bson.BsonBoolean +import org.bson.BsonDateTime +import org.bson.BsonDocument +import org.bson.BsonDocumentReader +import org.bson.BsonDocumentWriter +import org.bson.BsonInt32 +import org.bson.BsonInt64 +import org.bson.BsonReader +import org.bson.BsonTimestamp +import org.bson.Document +import org.bson.codecs.DecoderContext +import org.bson.codecs.DocumentCodecProvider +import org.bson.codecs.EncoderContext +import org.bson.codecs.ValueCodecProvider +import spock.lang.Specification + +import static java.util.Collections.singletonList +import static org.bson.codecs.configuration.CodecRegistries.fromProviders + +class ChangeStreamDocumentCodecSpecification extends Specification { + + def 'should round trip ChangeStreamDocument successfully'() { + given: + def codecRegistry = fromProviders([new DocumentCodecProvider(), new ValueCodecProvider()]) + def codec = new ChangeStreamDocumentCodec(clazz, codecRegistry) + + when: + def writer = new BsonDocumentWriter(new BsonDocument()) + codec.encode(writer, changeStreamDocument, EncoderContext.builder().build()) + + then: + BsonDocument.parse(json) == writer.getDocument() + + when: + BsonReader bsonReader = new BsonDocumentReader(writer.getDocument()) + ChangeStreamDocument actual = codec.decode(bsonReader, DecoderContext.builder().build()) + + then: + changeStreamDocument == actual + + where: + changeStreamDocument << [ + new ChangeStreamDocument(OperationType.INSERT.value, + BsonDocument.parse('{token: true}'), + BsonDocument.parse('{db: "engineering", coll: "users"}'), + NamespaceType.COLLECTION.value, + null, + Document.parse('{_id: 1, userName: "alice123", name: "Alice"}'), + null, + BsonDocument.parse('{userName: "alice123", _id: 1}'), + new BsonTimestamp(1234, 2), + null, null, null, null, + new SplitEvent(3, 4), + null + ), + new ChangeStreamDocument(OperationType.UPDATE.value, + BsonDocument.parse('{token: true}'), + BsonDocument.parse('{db: "engineering", coll: "users"}'), + NamespaceType.COLLECTION.value, + null, + null, + null, + BsonDocument.parse('{_id: 1}'), + new BsonTimestamp(1234, 2), + new UpdateDescription(['phoneNumber'], BsonDocument.parse('{email: "alice@10gen.com"}'), null), + null, null, null, null, null + ), + new ChangeStreamDocument(OperationType.UPDATE.value, + BsonDocument.parse('{token: true}'), + BsonDocument.parse('{db: "engineering", coll: "users"}'), + NamespaceType.COLLECTION.value, + null, + Document.parse('{_id: 1, userName: "alice123", name: "Alice"}'), + Document.parse('{_id: 1, userName: "alice1234", name: "Alice"}'), + BsonDocument.parse('{_id: 1}'), + new BsonTimestamp(1234, 2), + new UpdateDescription(['phoneNumber'], BsonDocument.parse('{email: "alice@10gen.com"}'), + singletonList(new TruncatedArray('education', 2))), + null, null, null, null, null + ), + new ChangeStreamDocument(OperationType.REPLACE.value, + BsonDocument.parse('{token: true}'), + BsonDocument.parse('{db: "engineering", coll: "users"}'), + NamespaceType.COLLECTION.value, + null, + Document.parse('{_id: 1, userName: "alice123", name: "Alice"}'), + Document.parse('{_id: 1, userName: "alice1234", name: "Alice"}'), + BsonDocument.parse('{_id: 1}'), + new BsonTimestamp(1234, 2), + null, null, null, null, null, null + ), + new ChangeStreamDocument(OperationType.DELETE.value, + BsonDocument.parse('{token: true}'), + BsonDocument.parse('{db: "engineering", coll: "users"}'), + NamespaceType.COLLECTION.value, + null, + null, + Document.parse('{_id: 1, userName: "alice123", name: "Alice"}'), + BsonDocument.parse('{_id: 1}'), + new BsonTimestamp(1234, 2), + null, null, null, null, null, null + ), + new ChangeStreamDocument(OperationType.DROP.value, + BsonDocument.parse('{token: true}'), + BsonDocument.parse('{db: "engineering", coll: "users"}'), + NamespaceType.COLLECTION.value, + null, + null, + null, + null, + new BsonTimestamp(1234, 2), + null, null, null, null, null, null + ), + new ChangeStreamDocument(OperationType.RENAME.value, + BsonDocument.parse('{token: true}'), + BsonDocument.parse('{db: "engineering", coll: "users"}'), + NamespaceType.COLLECTION.value, + BsonDocument.parse('{db: "engineering", coll: "people"}'), + null, + null, + null, + new BsonTimestamp(1234, 2), + null, null, null, null, null, null + ), + new ChangeStreamDocument(OperationType.DROP_DATABASE.value, + BsonDocument.parse('{token: true}'), + BsonDocument.parse('{db: "engineering"}'), + null, + null, + null, + null, + null, + new BsonTimestamp(1234, 2), + null, null, null, null, null, null + ), + new ChangeStreamDocument(OperationType.INVALIDATE.value, + BsonDocument.parse('{token: true}'), + null, + null, + null, + null, + null, + null, + new BsonTimestamp(1234, 2), + null, null, null, null, null, null + ), + new ChangeStreamDocument(OperationType.INSERT.value, + BsonDocument.parse('{token: true}'), + BsonDocument.parse('{db: "engineering", coll: "users"}'), + NamespaceType.COLLECTION.value, + null, + Document.parse('{_id: 1, userName: "alice123", name: "Alice"}'), + null, + BsonDocument.parse('{userName: "alice123", _id: 1}'), + new BsonTimestamp(1234, 2), + null, + new BsonInt64(1), + BsonDocument.parse('{id: 1, uid: 2}'), + new BsonDateTime(42), null, + new BsonDocument('extra', BsonBoolean.TRUE).append('value', new BsonInt32(1)) + ), + ] + clazz << [Document, Document, Document, Document, Document, Document, Document, Document, Document, Document + ] + json << [ + ''' +{ + _id: { token : true }, + operationType: 'insert', + clusterTime: { "$timestamp" : { "t" : 1234, "i" : 2 } }, + ns: { + db: 'engineering', + coll: 'users' + }, + nsType: 'collection', + documentKey: { + userName: 'alice123', + _id: 1 + }, + fullDocument: { + _id: 1, + userName: 'alice123', + name: 'Alice' + }, + splitEvent: { + fragment: 3, + of: 4 + } +} +''', + ''' +{ + _id: { token : true }, + operationType: 'update', + clusterTime: { "$timestamp" : { "t" : 1234, "i" : 2 } }, + ns: { + db: 'engineering', + coll: 'users' + }, + nsType: 'collection', + documentKey: { + _id: 1 + }, + updateDescription: { + updatedFields: { + email: 'alice@10gen.com' + }, + removedFields: ['phoneNumber'] + "truncatedArrays": [] + } +} +''', + ''' +{ + _id: { token : true }, + operationType: 'update', + clusterTime: { "$timestamp" : { "t" : 1234, "i" : 2 } }, + ns: { + db: 'engineering', + coll: 'users' + }, + nsType: 'collection', + documentKey: { + _id: 1 + }, + updateDescription: { + updatedFields: { + email: 'alice@10gen.com' + }, + removedFields: ['phoneNumber'], + "truncatedArrays": [ + { + "field": "education", + "newSize": 2 + } + ] + }, + fullDocument: { + _id: 1, + name: 'Alice', + userName: 'alice123' + }, + fullDocumentBeforeChange: { + _id: 1, + name: 'Alice', + userName: 'alice1234' + } +} +''', + ''' +{ + _id: { token : true }, + operationType: 'replace', + clusterTime: { "$timestamp" : { "t" : 1234, "i" : 2 } }, + ns: { + db: 'engineering', + coll: 'users' + }, + nsType: 'collection', + documentKey: { + _id: 1 + }, + fullDocument: { + _id: 1, + userName: 'alice123', + name: 'Alice' + }, + fullDocumentBeforeChange: { + _id: 1, + name: 'Alice', + userName: 'alice1234' + } +} +''', + ''' +{ + _id: { token : true }, + operationType: 'delete', + clusterTime: { "$timestamp" : { "t" : 1234, "i" : 2 } }, + ns: { + db: 'engineering', + coll: 'users' + }, + nsType: 'collection', + documentKey: { + _id: 1 + }, + fullDocumentBeforeChange: { + _id: 1, + name: 'Alice', + userName: 'alice123' + } +} +''', + ''' +{ + _id: { token : true }, + operationType: 'drop', + clusterTime: { "$timestamp" : { "t" : 1234, "i" : 2 } }, + ns: { + db: 'engineering', + coll: 'users' + } + nsType: 'collection', +} +''', + ''' +{ + _id: { token : true }, + operationType: 'rename', + clusterTime: { "$timestamp" : { "t" : 1234, "i" : 2 } }, + ns: { + db: 'engineering', + coll: 'users' + }, + nsType: 'collection', + to: { + db: 'engineering', + coll: 'people' + } +} +''', + ''' +{ + _id: { token : true }, + operationType: 'dropDatabase', + clusterTime: { "$timestamp" : { "t" : 1234, "i" : 2 } }, + ns: { + db: 'engineering' + } +} +''', + ''' +{ + _id: { token : true }, + operationType: 'invalidate', + clusterTime: { "$timestamp" : { "t" : 1234, "i" : 2 } } +} +''', + ''' +{ + _id: { token : true }, + operationType: 'insert', + clusterTime: { "$timestamp" : { "t" : 1234, "i" : 2 } }, + ns: { + db: 'engineering', + coll: 'users' + }, + nsType: 'collection', + documentKey: { + userName: 'alice123', + _id: 1 + }, + fullDocument: { + _id: 1, + userName: 'alice123', + name: 'Alice' + }, + txnNumber: NumberLong('1'), + lsid: { + id: 1, + uid: 2 + }, + wallTime: {$date: 42}, + extra: true, + value: 1 +} +''', + ] + } +} diff --git a/driver-core/src/test/unit/com/mongodb/client/model/changestream/ChangeStreamDocumentSpecification.groovy b/driver-core/src/test/unit/com/mongodb/client/model/changestream/ChangeStreamDocumentSpecification.groovy new file mode 100644 index 00000000000..da6b147513e --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/client/model/changestream/ChangeStreamDocumentSpecification.groovy @@ -0,0 +1,144 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model.changestream + +import com.mongodb.MongoNamespace +import org.bson.BsonBoolean +import org.bson.BsonDateTime +import org.bson.BsonDocument +import org.bson.BsonInt64 +import org.bson.BsonTimestamp +import org.bson.RawBsonDocument +import spock.lang.Specification + +import static java.util.Collections.emptyList +import static java.util.Collections.singletonList + +class ChangeStreamDocumentSpecification extends Specification { + + def 'should initialize correctly'() { + given: + def resumeToken = RawBsonDocument.parse('{token: true}') + def namespaceDocument = BsonDocument.parse('{db: "databaseName", coll: "collectionName"}') + def namespace = new MongoNamespace('databaseName.collectionName') + def namespaceType = NamespaceType.COLLECTION + def destinationNamespaceDocument = BsonDocument.parse('{db: "databaseName2", coll: "collectionName2"}') + def destinationNamespace = new MongoNamespace('databaseName2.collectionName2') + def fullDocument = BsonDocument.parse('{key: "value for fullDocument"}') + def fullDocumentBeforeChange = BsonDocument.parse('{key: "value for fullDocumentBeforeChange"}') + def documentKey = BsonDocument.parse('{_id : 1}') + def clusterTime = new BsonTimestamp(1234, 2) + def operationType = OperationType.UPDATE + def updateDesc = new UpdateDescription(['a', 'b'], BsonDocument.parse('{c: 1}'), null) + def txnNumber = new BsonInt64(1) + def lsid = BsonDocument.parse('{id: 1, uid: 1}') + def wallTime = new BsonDateTime(42) + def splitEvent = new SplitEvent(1, 2) + def extraElements = new BsonDocument('extra', BsonBoolean.TRUE) + + when: + def changeStreamDocument = new ChangeStreamDocument(operationType.value, resumeToken, + namespaceDocument, namespaceType.value, + destinationNamespaceDocument, fullDocument, + fullDocumentBeforeChange, documentKey, + clusterTime, updateDesc, txnNumber, + lsid, wallTime, splitEvent, extraElements) + + then: + changeStreamDocument.getResumeToken() == resumeToken + changeStreamDocument.getFullDocument() == fullDocument + changeStreamDocument.getFullDocumentBeforeChange() == fullDocumentBeforeChange + changeStreamDocument.getDocumentKey() == documentKey + changeStreamDocument.getClusterTime() == clusterTime + changeStreamDocument.getNamespace() == namespace + changeStreamDocument.getNamespaceDocument() == namespaceDocument + changeStreamDocument.getNamespaceType() == namespaceType + changeStreamDocument.getNamespaceTypeString() == namespaceType.value + changeStreamDocument.getDestinationNamespace() == destinationNamespace + changeStreamDocument.getDestinationNamespaceDocument() == destinationNamespaceDocument + changeStreamDocument.getOperationTypeString() == operationType.value + changeStreamDocument.getOperationType() == operationType + changeStreamDocument.getUpdateDescription() == updateDesc + changeStreamDocument.getDatabaseName() == namespace.getDatabaseName() + changeStreamDocument.getTxnNumber() == txnNumber + changeStreamDocument.getLsid() == lsid + changeStreamDocument.getWallTime() == wallTime + changeStreamDocument.getSplitEvent() == splitEvent + changeStreamDocument.getExtraElements() == extraElements + } + + def 'should handle null namespace correctly'() { + given: + def resumeToken = RawBsonDocument.parse('{token: true}') + def fullDocument = BsonDocument.parse('{key: "value for fullDocument"}') + def fullDocumentBeforeChange = BsonDocument.parse('{key: "value for fullDocumentBeforeChange"}') + def documentKey = BsonDocument.parse('{_id : 1}') + def clusterTime = new BsonTimestamp(1234, 2) + def operationType = OperationType.DROP_DATABASE + def updateDesc = new UpdateDescription(['a', 'b'], BsonDocument.parse('{c: 1}'), emptyList()) + def wallTime = new BsonDateTime(42) + def splitEvent = new SplitEvent(1, 2) + def extraElements = new BsonDocument('extra', BsonBoolean.TRUE) + def changeStreamDocumentNullNamespace = new ChangeStreamDocument(operationType.value, resumeToken, + (BsonDocument) null, null, (BsonDocument) null, fullDocument, fullDocumentBeforeChange, + documentKey, clusterTime, updateDesc, + null, null, wallTime, splitEvent, extraElements) + + expect: + changeStreamDocumentNullNamespace.getDatabaseName() == null + changeStreamDocumentNullNamespace.getNamespace() == null + changeStreamDocumentNullNamespace.getNamespaceType() == null + changeStreamDocumentNullNamespace.getNamespaceTypeString() == null + changeStreamDocumentNullNamespace.getNamespaceDocument() == null + changeStreamDocumentNullNamespace.getDestinationNamespace() == null + changeStreamDocumentNullNamespace.getDestinationNamespaceDocument() == null + } + + def 'should return null on missing BsonDocument elements'() { + given: + def resumeToken = RawBsonDocument.parse('{token: true}') + def namespaceDocument = BsonDocument.parse('{db: "databaseName"}') + def namespaceDocumentEmpty = new BsonDocument() + def fullDocument = BsonDocument.parse('{key: "value for fullDocument"}') + def fullDocumentBeforeChange = BsonDocument.parse('{key: "value for fullDocumentBeforeChange"}') + def documentKey = BsonDocument.parse('{_id : 1}') + def clusterTime = new BsonTimestamp(1234, 2) + def updateDesc = new UpdateDescription(['a', 'b'], BsonDocument.parse('{c: 1}'), singletonList(new TruncatedArray('d', 1))) + def wallTime = new BsonDateTime(42) + def splitEvent = new SplitEvent(1, 2) + def extraElements = new BsonDocument('extra', BsonBoolean.TRUE) + + def changeStreamDocument = new ChangeStreamDocument(null, resumeToken, namespaceDocument, null, + (BsonDocument) null, fullDocument, fullDocumentBeforeChange, documentKey, clusterTime, updateDesc, null, null, + wallTime, splitEvent, extraElements) + def changeStreamDocumentEmptyNamespace = new ChangeStreamDocument(null, resumeToken, + namespaceDocumentEmpty, null, (BsonDocument) null, fullDocument, fullDocumentBeforeChange, + documentKey, clusterTime, updateDesc, + null, null, wallTime, splitEvent, extraElements) + + expect: + changeStreamDocument.getNamespace() == null + changeStreamDocument.getNamespaceType() == null + changeStreamDocument.getNamespaceTypeString() == null + changeStreamDocument.getDatabaseName() == 'databaseName' + changeStreamDocument.getOperationTypeString() == null + changeStreamDocument.getOperationType() == null + + changeStreamDocumentEmptyNamespace.getNamespace() == null + changeStreamDocumentEmptyNamespace.getDatabaseName() == null + } +} diff --git a/driver-core/src/test/unit/com/mongodb/client/model/changestream/FullDocumentSpecification.groovy b/driver-core/src/test/unit/com/mongodb/client/model/changestream/FullDocumentSpecification.groovy new file mode 100644 index 00000000000..07cbc04b3ea --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/client/model/changestream/FullDocumentSpecification.groovy @@ -0,0 +1,53 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model.changestream + +import spock.lang.Specification + +class FullDocumentSpecification extends Specification { + + def 'should return the expected string value'() { + expect: + fullDocument.getValue() == expectedString + + where: + fullDocument | expectedString + FullDocument.DEFAULT | 'default' + FullDocument.UPDATE_LOOKUP | 'updateLookup' + } + + def 'should support valid string representations'() { + expect: + FullDocument.fromString(stringValue) == fullDocument + + where: + fullDocument | stringValue + FullDocument.DEFAULT | 'default' + FullDocument.UPDATE_LOOKUP | 'updateLookup' + } + + def 'should throw an illegal argument exception for invalid values'() { + when: + FullDocument.fromString(stringValue) + + then: + thrown(IllegalArgumentException) + + where: + stringValue << [null, 'info'] + } +} diff --git a/driver-core/src/test/unit/com/mongodb/client/model/changestream/OperationTypeSpecification.groovy b/driver-core/src/test/unit/com/mongodb/client/model/changestream/OperationTypeSpecification.groovy new file mode 100644 index 00000000000..b659e00f725 --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/client/model/changestream/OperationTypeSpecification.groovy @@ -0,0 +1,63 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model.changestream + +import spock.lang.Specification + +class OperationTypeSpecification extends Specification { + + def 'should return the expected string value'() { + expect: + operationType.getValue() == expectedString + + where: + operationType | expectedString + OperationType.DELETE | 'delete' + OperationType.DROP | 'drop' + OperationType.DROP_DATABASE | 'dropDatabase' + OperationType.INSERT | 'insert' + OperationType.INVALIDATE | 'invalidate' + OperationType.OTHER | 'other' + OperationType.RENAME | 'rename' + OperationType.REPLACE | 'replace' + OperationType.UPDATE | 'update' + } + + def 'should support valid string representations'() { + expect: + OperationType.fromString(stringValue) == operationType + + where: + operationType | stringValue + OperationType.DELETE | 'delete' + OperationType.DROP | 'drop' + OperationType.DROP_DATABASE | 'dropDatabase' + OperationType.INSERT | 'insert' + OperationType.INVALIDATE | 'invalidate' + OperationType.OTHER | 'other' + OperationType.REPLACE | 'replace' + OperationType.UPDATE | 'update' + } + + def 'should return UNKNOWN for new / unknown values'() { + expect: + OperationType.fromString(stringValue) == OperationType.OTHER + + where: + stringValue << [null, 'info', 'reIndex'] + } +} diff --git a/driver-core/src/test/unit/com/mongodb/client/model/changestream/UpdateDescriptionSpecification.groovy b/driver-core/src/test/unit/com/mongodb/client/model/changestream/UpdateDescriptionSpecification.groovy new file mode 100644 index 00000000000..855d9ef23a2 --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/client/model/changestream/UpdateDescriptionSpecification.groovy @@ -0,0 +1,44 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model.changestream + +import org.bson.BsonDocument +import spock.lang.Specification + +import static java.util.Collections.emptyList +import static java.util.Collections.singletonList + +class UpdateDescriptionSpecification extends Specification { + + def 'should create the expected UpdateDescription'() { + when: + def description = new UpdateDescription(removedFields, updatedFields, truncatedArrays, disambiguatedPaths) + + then: + description.getRemovedFields() == removedFields + description.getUpdatedFields() == updatedFields + description.getTruncatedArrays() == (truncatedArrays ?: emptyList()) + description.getDisambiguatedPaths() == disambiguatedPaths + + where: + removedFields | updatedFields | truncatedArrays | disambiguatedPaths + ['a', 'b'] | null | null | null + null | BsonDocument.parse('{c: 1}') | [] | null + ['a', 'b'] | BsonDocument.parse('{c: 1}') | singletonList(new TruncatedArray('d', 1)) | null + ['a', 'b'] | BsonDocument.parse('{c: 1}') | singletonList(new TruncatedArray('d', 1)) | BsonDocument.parse('{e: 1}') + } +} diff --git a/driver-core/src/test/unit/com/mongodb/client/model/densify/DensifyOptionsTest.java b/driver-core/src/test/unit/com/mongodb/client/model/densify/DensifyOptionsTest.java new file mode 100644 index 00000000000..faf45994d0a --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/client/model/densify/DensifyOptionsTest.java @@ -0,0 +1,106 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.client.model.densify; + +import org.bson.BsonArray; +import org.bson.BsonDocument; +import org.bson.BsonInt32; +import org.bson.BsonString; +import org.junit.jupiter.api.Test; + +import static java.util.Arrays.asList; +import static java.util.Collections.singleton; +import static java.util.Collections.singletonList; +import static org.junit.jupiter.api.Assertions.assertAll; +import static org.junit.jupiter.api.Assertions.assertEquals; + +final class DensifyOptionsTest { + @Test + void densifyOptions() { + assertEquals( + new BsonDocument(), + DensifyOptions.densifyOptions() + .toBsonDocument() + ); + } + + @Test + void partitionByFields() { + assertAll( + () -> assertEquals( + new BsonDocument() + .append("partitionByFields", new BsonArray(singletonList(new BsonString("$fieldName")))), + DensifyOptions.densifyOptions() + .partitionByFields("$fieldName") + .toBsonDocument() + ), + () -> assertEquals( + new BsonDocument() + .append("partitionByFields", new BsonArray(asList(new BsonString("$fieldName1"), new BsonString("$fieldName2")))), + DensifyOptions.densifyOptions() + .partitionByFields("$fieldName1", "$fieldName2") + .toBsonDocument() + ), + () -> assertEquals( + new BsonDocument() + .append("partitionByFields", new BsonArray(asList(new BsonString("$fieldName1"), new BsonString("$fieldName2")))), + DensifyOptions.densifyOptions() + .partitionByFields(asList("$fieldName1", "$fieldName2")) + .toBsonDocument() + ), + () -> assertEquals( + new BsonDocument() + .append("partitionByFields", new BsonArray(singletonList(new BsonString("fieldName2")))), + DensifyOptions.densifyOptions() + .partitionByFields("$fieldName1") + .partitionByFields(singleton("fieldName2")) + .toBsonDocument() + ), + () -> assertEquals( + new BsonDocument(), + DensifyOptions.densifyOptions() + .partitionByFields(singleton("$fieldName1")) + .partitionByFields() + .toBsonDocument() + ) + ); + } + + @Test + void option() { + assertEquals( + DensifyOptions.densifyOptions() + .option("partitionByFields", new BsonArray(singletonList(new BsonString("fieldName")))) + .toBsonDocument(), + DensifyOptions.densifyOptions() + .option("partitionByFields", singleton("fieldName")) + .toBsonDocument() + ); + } + + @Test + void options() { + assertEquals( + new BsonDocument() + .append("partitionByFields", new BsonArray(singletonList(new BsonString("fieldName")))) + .append("name", new BsonInt32(42)), + DensifyOptions.densifyOptions() + .partitionByFields("fieldName") + .option("name", 42) + .toBsonDocument() + ); + } +} diff --git a/driver-core/src/test/unit/com/mongodb/client/model/densify/DensifyRangeTest.java b/driver-core/src/test/unit/com/mongodb/client/model/densify/DensifyRangeTest.java new file mode 100644 index 00000000000..4b667f94eab --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/client/model/densify/DensifyRangeTest.java @@ -0,0 +1,142 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.client.model.densify; + +import com.mongodb.client.model.MongoTimeUnit; +import org.bson.BsonArray; +import org.bson.BsonDateTime; +import org.bson.BsonDecimal128; +import org.bson.BsonDocument; +import org.bson.BsonDouble; +import org.bson.BsonInt32; +import org.bson.BsonInt64; +import org.bson.BsonString; +import org.bson.Document; +import org.bson.types.Decimal128; +import org.junit.jupiter.api.Test; + +import java.math.BigDecimal; +import java.time.Instant; + +import static java.util.Arrays.asList; +import static org.junit.jupiter.api.Assertions.assertAll; +import static org.junit.jupiter.api.Assertions.assertEquals; + +final class DensifyRangeTest { + @Test + void of() { + assertEquals( + docExamplePredefined() + .toBsonDocument(), + DensifyRange.of(docExampleCustom()) + .toBsonDocument() + ); + } + + @Test + void numberRangeFull() { + assertAll( + () -> assertEquals( + new BsonDocument("bounds", new BsonString("full")).append("step", new BsonInt32(1)), + DensifyRange.fullRangeWithStep(1) + .toBsonDocument() + ) + ); + } + + @Test + void numberRangePartition() { + assertAll( + () -> assertEquals( + new BsonDocument("bounds", new BsonString("partition")).append("step", new BsonDouble(0.5)), + DensifyRange.partitionRangeWithStep(0.5) + .toBsonDocument() + ) + ); + } + + @Test + void numberRange() { + assertEquals( + new BsonDocument("bounds", new BsonArray(asList( + new BsonDecimal128(new Decimal128(new BigDecimal("-10.5"))), + new BsonDecimal128(new Decimal128(new BigDecimal("-10.5")))))) + .append("step", new BsonDecimal128(new Decimal128(BigDecimal.ONE))), + DensifyRange.rangeWithStep( + new BigDecimal("-10.5"), + new BigDecimal("-10.5"), + BigDecimal.ONE) + .toBsonDocument() + ); + } + + @Test + void dateRangeFull() { + assertAll( + () -> assertEquals( + new BsonDocument("bounds", new BsonString("full")) + .append("step", new BsonInt64(1)).append("unit", new BsonString(MongoTimeUnit.MILLISECOND.value())), + DensifyRange.fullRangeWithStep( + 1, MongoTimeUnit.MILLISECOND) + .toBsonDocument() + ) + ); + } + + @Test + void dateRangePartition() { + assertAll( + () -> assertEquals( + docExampleCustom() + .toBsonDocument(), + docExamplePredefined() + .toBsonDocument() + ), + () -> assertEquals( + new BsonDocument("bounds", new BsonString("partition")) + .append("step", new BsonInt64(1)) + .append("unit", new BsonString(MongoTimeUnit.MILLISECOND.value())), + DensifyRange.partitionRangeWithStep(1, MongoTimeUnit.MILLISECOND) + .toBsonDocument() + ) + ); + } + + @Test + void dateRange() { + assertEquals( + new BsonDocument("bounds", new BsonArray(asList( + new BsonDateTime(0), + new BsonDateTime(2)))) + .append("step", new BsonInt64(1)).append("unit", new BsonString(MongoTimeUnit.MILLISECOND.value())), + DensifyRange.rangeWithStep( + Instant.EPOCH, + Instant.ofEpochMilli(2), + 1, MongoTimeUnit.MILLISECOND) + .toBsonDocument() + ); + } + + private static DensifyRange docExamplePredefined() { + return DensifyRange.partitionRangeWithStep( + 1, MongoTimeUnit.MINUTE); + } + + private static Document docExampleCustom() { + return new Document("bounds", "partition") + .append("step", 1L).append("unit", MongoTimeUnit.MINUTE.value()); + } +} diff --git a/driver-core/src/test/unit/com/mongodb/client/model/fill/FillOptionsTest.java b/driver-core/src/test/unit/com/mongodb/client/model/fill/FillOptionsTest.java new file mode 100644 index 00000000000..fd94699a915 --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/client/model/fill/FillOptionsTest.java @@ -0,0 +1,151 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.client.model.fill; + +import org.bson.BsonArray; +import org.bson.BsonDocument; +import org.bson.BsonString; +import org.junit.jupiter.api.Test; + +import static com.mongodb.client.model.Sorts.descending; +import static java.util.Arrays.asList; +import static java.util.Collections.singleton; +import static java.util.Collections.singletonList; +import static org.junit.jupiter.api.Assertions.assertAll; +import static org.junit.jupiter.api.Assertions.assertEquals; + +final class FillOptionsTest { + @Test + void fillOptions() { + assertEquals( + new BsonDocument(), + FillOptions.fillOptions() + .toBsonDocument() + ); + } + + @Test + void partitionBy() { + assertAll( + () -> assertEquals( + new BsonDocument() + .append("partitionBy", new BsonString("$fieldName")), + FillOptions.fillOptions() + .partitionBy("$fieldName") + .toBsonDocument() + ), + () -> assertEquals( + new BsonDocument() + .append("partitionBy", new BsonString("$fieldName2")), + FillOptions.fillOptions() + .partitionByFields("fieldName1") + // partitionBy overrides partitionByFields + .partitionBy("$fieldName2") + .toBsonDocument() + ) + ); + } + + @Test + void partitionByFields() { + assertAll( + () -> assertEquals( + new BsonDocument() + .append("partitionByFields", new BsonArray(singletonList(new BsonString("$fieldName")))), + FillOptions.fillOptions() + .partitionByFields("$fieldName") + .toBsonDocument() + ), + () -> assertEquals( + new BsonDocument() + .append("partitionByFields", new BsonArray(asList(new BsonString("$fieldName1"), new BsonString("$fieldName2")))), + FillOptions.fillOptions() + .partitionByFields("$fieldName1", "$fieldName2") + .toBsonDocument() + ), + () -> assertEquals( + new BsonDocument() + .append("partitionByFields", new BsonArray(asList(new BsonString("$fieldName1"), new BsonString("$fieldName2")))), + FillOptions.fillOptions() + .partitionByFields(asList("$fieldName1", "$fieldName2")) + .toBsonDocument() + ), + () -> assertEquals( + new BsonDocument() + .append("partitionByFields", new BsonArray(singletonList(new BsonString("fieldName2")))), + FillOptions.fillOptions() + .partitionBy("$fieldName1") + // partitionByFields overrides partitionBy + .partitionByFields("fieldName2") + .toBsonDocument() + ), + () -> assertEquals( + new BsonDocument(), + FillOptions.fillOptions() + .partitionBy("$fieldName1") + // partitionByFields overrides partitionBy + .partitionByFields() + .toBsonDocument() + ) + ); + } + + @Test + void sortBy() { + assertEquals( + new BsonDocument() + .append("sortBy", descending("fieldName").toBsonDocument()), + FillOptions.fillOptions() + .sortBy(descending("fieldName")) + .toBsonDocument() + ); + } + + @Test + void option() { + assertAll( + () -> assertEquals( + FillOptions.fillOptions() + .partitionByFields("fieldName") + .toBsonDocument(), + FillOptions.fillOptions() + .option("partitionByFields", new BsonArray(singletonList(new BsonString("fieldName")))) + .toBsonDocument() + ), + () -> assertEquals( + FillOptions.fillOptions() + .option("partitionByFields", singleton("fieldName")) + .toBsonDocument(), + FillOptions.fillOptions() + .option("partitionByFields", new BsonArray(singletonList(new BsonString("fieldName")))) + .toBsonDocument() + ) + ); + } + + @Test + void options() { + assertEquals( + new BsonDocument() + .append("partitionByFields", new BsonArray(singletonList(new BsonString("fieldName1")))) + .append("sortBy", descending("fieldName2").toBsonDocument()), + FillOptions.fillOptions() + .partitionByFields("fieldName1") + .sortBy(descending("fieldName2")) + .toBsonDocument() + ); + } +} diff --git a/driver-core/src/test/unit/com/mongodb/client/model/fill/FillOutputFieldTest.java b/driver-core/src/test/unit/com/mongodb/client/model/fill/FillOutputFieldTest.java new file mode 100644 index 00000000000..12764ffcc7f --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/client/model/fill/FillOutputFieldTest.java @@ -0,0 +1,71 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.client.model.fill; + +import org.bson.BsonDocument; +import org.bson.BsonString; +import org.bson.Document; +import org.junit.jupiter.api.Test; + +import static org.junit.jupiter.api.Assertions.assertEquals; + +final class FillOutputFieldTest { + @Test + void of() { + assertEquals( + docExamplePredefined() + .toBsonDocument(), + FillOutputField.of(docExampleCustom()) + .toBsonDocument() + ); + } + + @Test + void value() { + assertEquals( + new BsonDocument("fieldName1", new BsonDocument("value", new BsonString("$fieldName2"))), + FillOutputField.value("fieldName1", "$fieldName2") + .toBsonDocument() + ); + } + + @Test + void locf() { + assertEquals( + docExampleCustom() + .toBsonDocument(), + docExamplePredefined() + .toBsonDocument() + ); + } + + @Test + void linear() { + assertEquals( + new BsonDocument("fieldName", new BsonDocument("method", new BsonString("linear"))), + FillOutputField.linear("fieldName") + .toBsonDocument() + ); + } + + private static FillOutputField docExamplePredefined() { + return FillOutputField.locf("fieldName"); + } + + private static Document docExampleCustom() { + return new Document("fieldName", new Document("method", "locf")); + } +} diff --git a/driver-core/src/test/unit/com/mongodb/client/model/geojson/GeometryCollectionSpecification.groovy b/driver-core/src/test/unit/com/mongodb/client/model/geojson/GeometryCollectionSpecification.groovy new file mode 100644 index 00000000000..eebf63b71d1 --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/client/model/geojson/GeometryCollectionSpecification.groovy @@ -0,0 +1,63 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model.geojson + +import groovy.transform.CompileStatic +import spock.lang.Specification + +class GeometryCollectionSpecification extends Specification { + def geometries = [new Point(new Position(1d, 2d)), new Point(new Position(2d, 2d))] + + @CompileStatic + @SuppressWarnings('UnusedVariable') + def 'constructor should accept lists containing subtype of Geometry'() { + expect: + GeometryCollection gc = new GeometryCollection((List) geometries) + } + + def 'constructor should set geometries'() { + expect: + new GeometryCollection(geometries).geometries == geometries + } + + def 'constructors should throw if preconditions are violated'() { + when: + new GeometryCollection(null) + + then: + thrown(IllegalArgumentException) + + when: + new GeometryCollection([new Point(new Position(1d, 2d)), new Position([40.0d, 19.0d]), null]) + + then: + thrown(IllegalArgumentException) + } + + def 'should get type'() { + expect: + new GeometryCollection(geometries).type == GeoJsonObjectType.GEOMETRY_COLLECTION + } + + def 'equals, hashcode and toString should be overridden'() { + expect: + new GeometryCollection(geometries) == new GeometryCollection(geometries) + new GeometryCollection(geometries).hashCode() == new GeometryCollection(geometries).hashCode() + new GeometryCollection(geometries).toString() == + 'GeometryCollection{geometries=[Point{coordinate=Position{values=[1.0, 2.0]}}, Point{coordinate=Position{values=[2.0, 2.0]}}]}' + } +} diff --git a/driver-core/src/test/unit/com/mongodb/client/model/geojson/LineStringSpecification.groovy b/driver-core/src/test/unit/com/mongodb/client/model/geojson/LineStringSpecification.groovy new file mode 100644 index 00000000000..410ea241f13 --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/client/model/geojson/LineStringSpecification.groovy @@ -0,0 +1,77 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model.geojson + +import spock.lang.Specification + +import static com.mongodb.client.model.geojson.NamedCoordinateReferenceSystem.EPSG_4326_STRICT_WINDING + +class LineStringSpecification extends Specification { + def coordinates = [new Position([40.0d, 18.0d]), + new Position([40.0d, 19.0d]), + new Position([41.0d, 19.0d]), + new Position([40.0d, 18.0d])] + + def 'constructor should set coordinates'() { + expect: + new LineString(coordinates).coordinates == coordinates + } + + def 'constructor should set coordinate reference system'() { + expect: + new LineString(coordinates).coordinateReferenceSystem == null + new LineString(EPSG_4326_STRICT_WINDING, coordinates).coordinateReferenceSystem == EPSG_4326_STRICT_WINDING + } + + def 'constructors should throw if preconditions are violated'() { + when: + new LineString(null) + + then: + thrown(IllegalArgumentException) + + when: + new LineString([new Position([40.0d, 18.0d])]) + + then: + thrown(IllegalArgumentException) + + when: + new LineString([new Position([40.0d, 18.0d]), + new Position([40.0d, 19.0d]), + null]) + + then: + thrown(IllegalArgumentException) + } + + def 'should get type'() { + expect: + new LineString(coordinates).type == GeoJsonObjectType.LINE_STRING + } + + def 'equals, hashcode and toString should be overridden'() { + expect: + new LineString(coordinates) == new LineString(coordinates) + new LineString(coordinates).hashCode() == new LineString(coordinates).hashCode() + new LineString(coordinates).toString() == + 'LineString{coordinates=[Position{values=[40.0, 18.0]}, ' + + 'Position{values=[40.0, 19.0]}, ' + + 'Position{values=[41.0, 19.0]}, ' + + 'Position{values=[40.0, 18.0]}]}' + } +} diff --git a/driver-core/src/test/unit/com/mongodb/client/model/geojson/MultiLineStringSpecification.groovy b/driver-core/src/test/unit/com/mongodb/client/model/geojson/MultiLineStringSpecification.groovy new file mode 100644 index 00000000000..e5119c58cf7 --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/client/model/geojson/MultiLineStringSpecification.groovy @@ -0,0 +1,73 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model.geojson + +import spock.lang.Specification + +import static com.mongodb.client.model.geojson.NamedCoordinateReferenceSystem.EPSG_4326_STRICT_WINDING + + +class MultiLineStringSpecification extends Specification { + def coordinates = [[new Position([1.0d, 1.0d]), new Position([2.0d, 2.0d]), new Position([3.0d, 4.0d])], + [new Position([2.0d, 3.0d]), new Position([3.0d, 2.0d]), new Position([4.0d, 4.0d])]] + + def 'constructor should set coordinates'() { + expect: + new MultiLineString(coordinates).coordinates == coordinates + } + + def 'constructor should set coordinate reference system'() { + expect: + new MultiLineString(coordinates).coordinateReferenceSystem == null + new MultiLineString(EPSG_4326_STRICT_WINDING, coordinates).coordinateReferenceSystem == EPSG_4326_STRICT_WINDING + } + + def 'constructors should throw if preconditions are violated'() { + when: + new MultiLineString(null) + + then: + thrown(IllegalArgumentException) + + when: + new MultiLineString([[new Position([40.0d, 18.0d]), new Position([40.0d, 19.0d])], null]) + + then: + thrown(IllegalArgumentException) + + when: + new MultiLineString([[new Position([40.0d, 18.0d]), new Position([40.0d, 19.0d]), null]]) + + then: + thrown(IllegalArgumentException) + } + + def 'should get type'() { + expect: + new MultiLineString(coordinates).type == GeoJsonObjectType.MULTI_LINE_STRING + } + + def 'equals, hashcode and toString should be overridden'() { + expect: + new MultiLineString(coordinates) == new MultiLineString(coordinates) + new MultiLineString(coordinates).hashCode() == new MultiLineString(coordinates).hashCode() + new MultiLineString(coordinates).toString() == + 'MultiLineString{coordinates=[' + + '[Position{values=[1.0, 1.0]}, Position{values=[2.0, 2.0]}, Position{values=[3.0, 4.0]}], ' + + '[Position{values=[2.0, 3.0]}, Position{values=[3.0, 2.0]}, Position{values=[4.0, 4.0]}]]}' + } +} diff --git a/driver-core/src/test/unit/com/mongodb/client/model/geojson/MultiPointSpecification.groovy b/driver-core/src/test/unit/com/mongodb/client/model/geojson/MultiPointSpecification.groovy new file mode 100644 index 00000000000..3c93dea30ab --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/client/model/geojson/MultiPointSpecification.groovy @@ -0,0 +1,71 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model.geojson + +import spock.lang.Specification + +import static com.mongodb.client.model.geojson.NamedCoordinateReferenceSystem.EPSG_4326_STRICT_WINDING + + +class MultiPointSpecification extends Specification { + def coordinates = [new Position([40.0d, 18.0d]), + new Position([40.0d, 19.0d]), + new Position([41.0d, 19.0d]), + new Position([40.0d, 18.0d])] + + def 'constructor should set coordinates'() { + expect: + new MultiPoint(coordinates).coordinates == coordinates + } + + def 'constructor should set coordinate reference system'() { + expect: + new MultiPoint(coordinates).coordinateReferenceSystem == null + new MultiPoint(EPSG_4326_STRICT_WINDING, coordinates).coordinateReferenceSystem == EPSG_4326_STRICT_WINDING + } + + def 'constructors should throw if preconditions are violated'() { + when: + new MultiPoint(null) + + then: + thrown(IllegalArgumentException) + + when: + new MultiPoint([new Position([40.0d, 18.0d]), + null]) + + then: + thrown(IllegalArgumentException) + } + + def 'should get type'() { + expect: + new MultiPoint(coordinates).type == GeoJsonObjectType.MULTI_POINT + } + + def 'equals, hashcode and toString should be overridden'() { + expect: + new MultiPoint(coordinates) == new MultiPoint(coordinates) + new MultiPoint(coordinates).hashCode() == new MultiPoint(coordinates).hashCode() + new MultiPoint(coordinates).toString() == + 'MultiPoint{coordinates=[Position{values=[40.0, 18.0]}, ' + + 'Position{values=[40.0, 19.0]}, ' + + 'Position{values=[41.0, 19.0]}, ' + + 'Position{values=[40.0, 18.0]}]}' + } +} diff --git a/driver-core/src/test/unit/com/mongodb/client/model/geojson/MultiPolygonSpecification.groovy b/driver-core/src/test/unit/com/mongodb/client/model/geojson/MultiPolygonSpecification.groovy new file mode 100644 index 00000000000..4877339286e --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/client/model/geojson/MultiPolygonSpecification.groovy @@ -0,0 +1,78 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model.geojson + +import spock.lang.Specification + +import static com.mongodb.client.model.geojson.NamedCoordinateReferenceSystem.EPSG_4326_STRICT_WINDING + + +class MultiPolygonSpecification extends Specification { + def exteriorOne = [new Position([40.0d, 18.0d]), + new Position([40.0d, 19.0d]), + new Position([41.0d, 19.0d]), + new Position([40.0d, 18.0d])] + def coordinatesOne = new PolygonCoordinates(exteriorOne) + + def exteriorTwo = [new Position([80.0d, 18.0d]), + new Position([80.0d, 19.0d]), + new Position([81.0d, 19.0d]), + new Position([80.0d, 18.0d])] + def coordinatesTwo = new PolygonCoordinates(exteriorTwo) + + def 'constructor should set coordinates'() { + expect: + new MultiPolygon([coordinatesOne, coordinatesTwo]).coordinates == [coordinatesOne, coordinatesTwo] + } + + def 'constructor should set coordinate reference system'() { + expect: + new MultiPolygon([coordinatesOne]).coordinateReferenceSystem == null + new MultiPolygon(EPSG_4326_STRICT_WINDING, [coordinatesOne]).coordinateReferenceSystem == EPSG_4326_STRICT_WINDING + } + + def 'constructors should throw if preconditions are violated'() { + when: + new MultiPolygon(null) + + then: + thrown(IllegalArgumentException) + + when: + new MultiPolygon([coordinatesOne, null]) + + then: + thrown(IllegalArgumentException) + } + + def 'should get type'() { + expect: + new MultiPolygon([coordinatesOne]).type == GeoJsonObjectType.MULTI_POLYGON + } + + def 'equals, hashcode and toString should be overridden'() { + expect: + new MultiPolygon([coordinatesOne, coordinatesTwo]) == new MultiPolygon([coordinatesOne, coordinatesTwo]) + new MultiPolygon([coordinatesOne, coordinatesTwo]).hashCode() == new MultiPolygon([coordinatesOne, coordinatesTwo]).hashCode() + new MultiPolygon([coordinatesOne, coordinatesTwo]).toString() == + 'MultiPolygon{coordinates=[' + + 'PolygonCoordinates{exterior=[Position{values=[40.0, 18.0]}, Position{values=[40.0, 19.0]}, Position{values=[41.0, 19.0]}, ' + + 'Position{values=[40.0, 18.0]}]}, ' + + 'PolygonCoordinates{exterior=[Position{values=[80.0, 18.0]}, Position{values=[80.0, 19.0]}, Position{values=[81.0, 19.0]}, ' + + 'Position{values=[80.0, 18.0]}]}]}' + } +} diff --git a/driver-core/src/test/unit/com/mongodb/client/model/geojson/PointSpecification.groovy b/driver-core/src/test/unit/com/mongodb/client/model/geojson/PointSpecification.groovy new file mode 100644 index 00000000000..af4762f760d --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/client/model/geojson/PointSpecification.groovy @@ -0,0 +1,62 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model.geojson + +import spock.lang.Specification + +import static com.mongodb.client.model.geojson.NamedCoordinateReferenceSystem.EPSG_4326_STRICT_WINDING + + +class PointSpecification extends Specification { + def 'constructor should set coordinates'() { + expect: + new Point(new Position(1.0d, 2.0d)).coordinates == new Position(1.0d, 2.0d) + new Point(new Position(1.0d, 2.0d)).position == new Position(1.0d, 2.0d) + } + + def 'constructor should set coordinate reference system'() { + expect: + new Point(new Position(1.0d, 2.0d)).coordinateReferenceSystem == null + new Point(EPSG_4326_STRICT_WINDING, new Position(1.0d, 2.0d)).coordinateReferenceSystem == EPSG_4326_STRICT_WINDING + } + + def 'constructors should throw if preconditions are violated'() { + when: + new Point(null) + + then: + thrown(IllegalArgumentException) + + when: + new Point(EPSG_4326_STRICT_WINDING, null) + + then: + thrown(IllegalArgumentException) + } + + def 'should get type'() { + expect: + new Point(new Position(1.0d, 2.0d)).type == GeoJsonObjectType.POINT + } + + def 'equals, hashcode and toString should be overridden'() { + expect: + new Point(new Position(1.0d, 2.0d)) == new Point(new Position(1.0d, 2.0d)) + new Point(new Position(1.0d, 2.0d)).hashCode() == new Point(new Position(1.0d, 2.0d)).hashCode() + new Point(new Position(1.0d, 2.0d)).toString() == 'Point{coordinate=Position{values=[1.0, 2.0]}}' + } +} diff --git a/driver-core/src/test/unit/com/mongodb/client/model/geojson/PolygonSpecification.groovy b/driver-core/src/test/unit/com/mongodb/client/model/geojson/PolygonSpecification.groovy new file mode 100644 index 00000000000..f44c9f953a4 --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/client/model/geojson/PolygonSpecification.groovy @@ -0,0 +1,121 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model.geojson + +import spock.lang.Specification + +import static com.mongodb.client.model.geojson.NamedCoordinateReferenceSystem.EPSG_4326_STRICT_WINDING + + +class PolygonSpecification extends Specification { + def exterior = [new Position([40.0d, 18.0d]), + new Position([40.0d, 19.0d]), + new Position([41.0d, 19.0d]), + new Position([40.0d, 18.0d])] + def coordinates = new PolygonCoordinates(exterior) + + def 'constructor should set coordinates'() { + expect: + new Polygon(exterior).coordinates == coordinates + } + + def 'constructor should set coordinate reference system'() { + expect: + new Polygon(exterior).coordinateReferenceSystem == null + new Polygon(EPSG_4326_STRICT_WINDING, coordinates).coordinateReferenceSystem == EPSG_4326_STRICT_WINDING + } + + def 'constructors should throw if preconditions are violated'() { + when: + new Polygon(null) + + then: + thrown(IllegalArgumentException) + + when: + new Polygon([new Position([40.0d, 18.0d]), + new Position([40.0d, 19.0d]), + new Position([41.0d, 19.0d])]) + + then: + thrown(IllegalArgumentException) + + when: + new Polygon([new Position([40.0d, 18.0d]), + new Position([40.0d, 19.0d]), + null]) + + then: + thrown(IllegalArgumentException) + + when: + new Polygon([new Position([40.0d, 18.0d]), + new Position([40.0d, 19.0d]), + new Position([41.0d, 19.0d]), + new Position([1.0, 2.0])]) + + then: + thrown(IllegalArgumentException) + + when: + new Polygon(exterior, [null]) + + then: + thrown(IllegalArgumentException) + + when: + new Polygon(exterior, [[new Position([40.0d, 18.0d]), + new Position([40.0d, 19.0d]), + null]]) + + then: + thrown(IllegalArgumentException) + + when: + new Polygon(exterior, [[new Position([40.0d, 18.0d]), + new Position([40.0d, 19.0d]), + new Position([41.0d, 19.0d])]]) + + then: + thrown(IllegalArgumentException) + + when: + new Polygon(exterior, [[new Position([40.0d, 18.0d]), + new Position([40.0d, 19.0d]), + new Position([41.0d, 19.0d]), + new Position([1.0, 2.0])]]) + + then: + thrown(IllegalArgumentException) + } + + def 'should get type'() { + expect: + new Polygon(exterior).type == GeoJsonObjectType.POLYGON + } + + def 'equals, hashcode and toString should be overridden'() { + expect: + new Polygon(exterior) == new Polygon(exterior) + new Polygon(exterior).hashCode() == new Polygon(exterior).hashCode() + new Polygon(exterior).toString() == + 'Polygon{exterior=[Position{values=[40.0, 18.0]}, ' + + 'Position{values=[40.0, 19.0]}, ' + + 'Position{values=[41.0, 19.0]}, ' + + 'Position{values=[40.0, 18.0]}]}' + } +} diff --git a/driver-core/src/test/unit/com/mongodb/client/model/geojson/PositionSpecification.groovy b/driver-core/src/test/unit/com/mongodb/client/model/geojson/PositionSpecification.groovy new file mode 100644 index 00000000000..d1ea6ab9147 --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/client/model/geojson/PositionSpecification.groovy @@ -0,0 +1,71 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model.geojson + +import spock.lang.Specification + + +class PositionSpecification extends Specification { + def 'constructors should set values'() { + expect: + new Position([1.0d, 2.0d]).values == [1.0d, 2.0d] + new Position(1.0d, 2.0d).values == [1.0d, 2.0d] + new Position(1.0d, 2.0d, 3.0d).values == [1.0d, 2.0d, 3.0d] + new Position(1.0d, 2.0d, 3.0d, 4.0d).values == [1.0d, 2.0d, 3.0d, 4.0d] + } + + def 'constructors should set unmodifiable'() { + when: + new Position([1.0d, 2.0d]).values[0] = 3.0d + + then: + thrown(UnsupportedOperationException) + + when: + new Position(1.0d, 2.0d).values[0] = 3.0d + + then: + thrown(UnsupportedOperationException) + } + + def 'constructor should throw when preconditions are violated'() { + when: + new Position(null) + + then: + thrown(IllegalArgumentException) + + when: + new Position([1.0]) + + then: + thrown(IllegalArgumentException) + + when: + new Position([1.0, null]) + + then: + thrown(IllegalArgumentException) + } + + def 'equals, hashcode and toString should be overridden'() { + expect: + new Position(1.0d, 2.0d) == new Position(1.0d, 2.0d) + new Position(1.0d, 2.0d).hashCode() == new Position(1.0d, 2.0d).hashCode() + new Position(1.0d, 2.0d).toString() == 'Position{values=[1.0, 2.0]}' + } +} diff --git a/driver-core/src/test/unit/com/mongodb/client/model/geojson/codecs/GeometryCodecSpecification.groovy b/driver-core/src/test/unit/com/mongodb/client/model/geojson/codecs/GeometryCodecSpecification.groovy new file mode 100644 index 00000000000..4468555821c --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/client/model/geojson/codecs/GeometryCodecSpecification.groovy @@ -0,0 +1,127 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model.geojson.codecs + +import com.mongodb.client.model.geojson.GeoJsonObjectType +import com.mongodb.client.model.geojson.Geometry +import com.mongodb.client.model.geojson.GeometryCollection +import com.mongodb.client.model.geojson.LineString +import com.mongodb.client.model.geojson.MultiLineString +import com.mongodb.client.model.geojson.MultiPoint +import com.mongodb.client.model.geojson.MultiPolygon +import com.mongodb.client.model.geojson.Point +import com.mongodb.client.model.geojson.Polygon +import com.mongodb.client.model.geojson.PolygonCoordinates +import com.mongodb.client.model.geojson.Position +import org.bson.BsonDocument +import org.bson.BsonDocumentReader +import org.bson.BsonDocumentWriter +import org.bson.codecs.DecoderContext +import org.bson.codecs.EncoderContext +import org.bson.codecs.configuration.CodecConfigurationException +import spock.lang.Specification + +import static org.bson.BsonDocument.parse +import static org.bson.codecs.configuration.CodecRegistries.fromProviders + +class GeometryCodecSpecification extends Specification { + def registry = fromProviders([new GeoJsonCodecProvider()]) + def codec = registry.get(Geometry) + def writer = new BsonDocumentWriter(new BsonDocument()) + def context = EncoderContext.builder().build() + + def 'should round trip known geometries'() { + when: + codec.encode(writer, geometry, context) + + then: + writer.document == parse(geoJson) + + when: + def decodedGeometry = codec.decode(new BsonDocumentReader(writer.document), DecoderContext.builder().build()) + + then: + geometry == decodedGeometry + + where: + geometry << [ + new LineString([new Position(101d, 0d), new Position(102d, 1d)]), + new MultiLineString([[new Position([1.0d, 1.0d]), new Position([2.0d, 2.0d]), new Position([3.0d, 4.0d])], + [new Position([2.0d, 3.0d]), new Position([3.0d, 2.0d]), new Position([4.0d, 4.0d])]]), + new Point(new Position(100d, 0d)), + new MultiPoint([new Position([40.0d, 18.0d]), new Position([40.0d, 19.0d]), new Position([41.0d, 19.0d])]), + new Polygon([new Position([40.0d, 18.0d]), new Position([40.0d, 19.0d]), new Position([41.0d, 19.0d]), + new Position([40.0d, 18.0d])]), + new MultiPolygon([new PolygonCoordinates([new Position(102.0, 2.0), new Position(103.0, 2.0), + new Position(103.0, 3.0), new Position(102.0, 3.0), + new Position(102.0, 2.0)]), + new PolygonCoordinates([new Position(100.0, 0.0), new Position(101.0, 0.0), + new Position(101.0, 1.0), new Position(100.0, 1.0), + new Position(100.0, 0.0)], + [[new Position(100.2, 0.2), new Position(100.8, 0.2), + new Position(100.8, 0.8), new Position(100.2, 0.8), + new Position(100.2, 0.2)]])]), + new GeometryCollection([new Point(new Position(100d, 0d)), + new LineString([new Position(101d, 0d), new Position(102d, 1d)])]) + ] + + geoJson << [ + '{type: "LineString", coordinates: [ [101.0, 0.0], [102.0, 1.0] ]}', + '{type: "MultiLineString", coordinates: [[[1.0, 1.0], [2.0, 2.0], [3.0, 4.0]], [[2.0, 3.0], [3.0, 2.0], [4.0, 4.0]]]}', + '{type: "Point", coordinates: [100.0, 0.0]}', + '{type: "MultiPoint", coordinates: [[40.0, 18.0], [40.0, 19.0], [41.0, 19.0]]}', + '{type: "Polygon", coordinates: [[[40.0, 18.0], [40.0, 19.0], [41.0, 19.0], [40.0, 18.0]]]}', + '''{type: "MultiPolygon", coordinates: [[[[102.0, 2.0], [103.0, 2.0], [103.0, 3.0], [102.0, 3.0], [102.0, 2.0]]], + [[[100.0, 0.0], [101.0, 0.0], [101.0, 1.0], [100.0, 1.0], [100.0, 0.0]], + [[100.2, 0.2], [100.8, 0.2], [100.8, 0.8], [100.2, 0.8], [100.2, 0.2]]]]}''', + '''{ type: "GeometryCollection", geometries: [{ type: "Point", coordinates: [100.0, 0.0]}, + { type: "LineString", coordinates: [ [101.0, 0.0], [102.0, 1.0] ]}]}''' + ] + } + + def 'should throw when decoding invalid documents'() { + when: + codec.decode(new BsonDocumentReader(parse(invalidJson)), DecoderContext.builder().build()) + + then: + thrown(CodecConfigurationException) + + where: + invalidJson << [ + '{type: "GeoShard", coordinates: [100.0, 0.0]}', + '{coordinates: [100.0, 0.0]}', + '{type: "Point", coordinates: [40.0, 18.0], crs : {type: "link", properties: {href: "http://example.com/crs/42"}}}', + ] + } + + def 'should not support unknown geometries'() { + given: + def geometry = new Geometry() { + @Override + GeoJsonObjectType getType() { + GeoJsonObjectType.POINT + } + } + + + when: + codec.encode(writer, geometry, context) + + then: + thrown(CodecConfigurationException) + } +} diff --git a/driver-core/src/test/unit/com/mongodb/client/model/geojson/codecs/GeometryCollectionCodecSpecification.groovy b/driver-core/src/test/unit/com/mongodb/client/model/geojson/codecs/GeometryCollectionCodecSpecification.groovy new file mode 100644 index 00000000000..90717c8b79a --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/client/model/geojson/codecs/GeometryCollectionCodecSpecification.groovy @@ -0,0 +1,202 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model.geojson.codecs + +import com.mongodb.client.model.geojson.GeometryCollection +import com.mongodb.client.model.geojson.LineString +import com.mongodb.client.model.geojson.MultiLineString +import com.mongodb.client.model.geojson.MultiPoint +import com.mongodb.client.model.geojson.MultiPolygon +import com.mongodb.client.model.geojson.Point +import com.mongodb.client.model.geojson.Polygon +import com.mongodb.client.model.geojson.PolygonCoordinates +import com.mongodb.client.model.geojson.Position +import org.bson.BsonDocument +import org.bson.BsonDocumentReader +import org.bson.BsonDocumentWriter +import org.bson.codecs.DecoderContext +import org.bson.codecs.EncoderContext +import org.bson.codecs.configuration.CodecConfigurationException +import org.bson.json.JsonReader +import spock.lang.Specification + +import static com.mongodb.client.model.geojson.NamedCoordinateReferenceSystem.EPSG_4326_STRICT_WINDING +import static org.bson.BsonDocument.parse +import static org.bson.codecs.configuration.CodecRegistries.fromProviders + +class GeometryCollectionCodecSpecification extends Specification { + def registry = fromProviders([new GeoJsonCodecProvider()]) + def codec = registry.get(GeometryCollection) + def writer = new BsonDocumentWriter(new BsonDocument()) + def context = EncoderContext.builder().build() + + def 'should accept empty geometries'() { + def geometryCollection = new GeometryCollection([]) + + when: + codec.encode(writer, geometryCollection, context) + + then: + writer.document == parse('{ type: "GeometryCollection", geometries: []}') + + when: + def decodedGeometryCollection = codec.decode(new BsonDocumentReader(writer.document), DecoderContext.builder().build()) + + then: + geometryCollection == decodedGeometryCollection + } + + def 'should round trip'() { + given: + def geometryCollection = new GeometryCollection([ + new LineString([new Position(101d, 0d), new Position(102d, 1d)]), + new MultiLineString([[new Position([1.0d, 1.0d]), new Position([2.0d, 2.0d]), new Position([3.0d, 4.0d])], + [new Position([2.0d, 3.0d]), new Position([3.0d, 2.0d]), new Position([4.0d, 4.0d])]]), + new Point(new Position(100d, 0d)), + new MultiPoint([new Position([40.0d, 18.0d]), new Position([40.0d, 19.0d]), new Position([41.0d, 19.0d])]), + new Polygon([new Position([40.0d, 18.0d]), new Position([40.0d, 19.0d]), new Position([41.0d, 19.0d]), + new Position([40.0d, 18.0d])]), + new MultiPolygon([new PolygonCoordinates([new Position(102.0, 2.0), new Position(103.0, 2.0), + new Position(103.0, 3.0), new Position(102.0, 3.0), + new Position(102.0, 2.0)]), + new PolygonCoordinates([new Position(100.0, 0.0), new Position(101.0, 0.0), + new Position(101.0, 1.0), new Position(100.0, 1.0), + new Position(100.0, 0.0)], + [[new Position(100.2, 0.2), new Position(100.8, 0.2), + new Position(100.8, 0.8), new Position(100.2, 0.8), + new Position(100.2, 0.2)]])]), + new GeometryCollection([new Point(new Position(100d, 0d)), + new LineString([new Position(101d, 0d), new Position(102d, 1d)])]) + ]) + + when: + codec.encode(writer, geometryCollection, context) + + then: + writer.document == parse('''{ type: "GeometryCollection", geometries: [ + {type: "LineString", coordinates: [ [101.0, 0.0], [102.0, 1.0] ]}, + {type: "MultiLineString", coordinates: [[[1.0, 1.0], [2.0, 2.0], [3.0, 4.0]], [[2.0, 3.0], [3.0, 2.0], [4.0, 4.0]]]}, + {type: "Point", coordinates: [100.0, 0.0]}, + {type: "MultiPoint", coordinates: [[40.0, 18.0], [40.0, 19.0], [41.0, 19.0]]}, + {type: "Polygon", coordinates: [[[40.0, 18.0], [40.0, 19.0], [41.0, 19.0], [40.0, 18.0]]]}, + {type: "MultiPolygon", coordinates: [[[[102.0, 2.0], [103.0, 2.0], [103.0, 3.0], [102.0, 3.0], [102.0, 2.0]]], + [[[100.0, 0.0], [101.0, 0.0], [101.0, 1.0], [100.0, 1.0], [100.0, 0.0]], + [[100.2, 0.2], [100.8, 0.2], [100.8, 0.8], [100.2, 0.8], [100.2, 0.2]]]]}, + { type: "GeometryCollection", geometries: [{ type: "Point", coordinates: [100.0, 0.0]}, + { type: "LineString", coordinates: [ [101.0, 0.0], [102.0, 1.0] ]}]} + ]}''') + + + when: + def decodedGeometryCollection = codec.decode(new BsonDocumentReader(writer.document), DecoderContext.builder().build()) + + then: + geometryCollection == decodedGeometryCollection + } + + def 'should round trip with coordinate reference system'() { + given: + def geometryCollection = new GeometryCollection(EPSG_4326_STRICT_WINDING, [ + new LineString([new Position(101d, 0d), new Position(102d, 1d)]), + new MultiLineString([[new Position([1.0d, 1.0d]), new Position([2.0d, 2.0d]), new Position([3.0d, 4.0d])], + [new Position([2.0d, 3.0d]), new Position([3.0d, 2.0d]), new Position([4.0d, 4.0d])]]), + new Point(new Position(100d, 0d)), + new MultiPoint([new Position([40.0d, 18.0d]), new Position([40.0d, 19.0d]), new Position([41.0d, 19.0d])]), + new Polygon([new Position([40.0d, 18.0d]), new Position([40.0d, 19.0d]), new Position([41.0d, 19.0d]), + new Position([40.0d, 18.0d])]), + new MultiPolygon([new PolygonCoordinates([new Position(102.0, 2.0), new Position(103.0, 2.0), + new Position(103.0, 3.0), new Position(102.0, 3.0), + new Position(102.0, 2.0)]), + new PolygonCoordinates([new Position(100.0, 0.0), new Position(101.0, 0.0), + new Position(101.0, 1.0), new Position(100.0, 1.0), + new Position(100.0, 0.0)], + [[new Position(100.2, 0.2), new Position(100.8, 0.2), + new Position(100.8, 0.8), new Position(100.2, 0.8), + new Position(100.2, 0.2)]])]), + new GeometryCollection([new Point(new Position(100d, 0d)), + new LineString([new Position(101d, 0d), new Position(102d, 1d)])]) + ]) + + when: + codec.encode(writer, geometryCollection, context) + + then: + writer.document == parse("""{ type: "GeometryCollection", geometries: [ + {type: "LineString", coordinates: [ [101.0, 0.0], [102.0, 1.0] ]}, + {type: "MultiLineString", coordinates: [[[1.0, 1.0], [2.0, 2.0], [3.0, 4.0]], [[2.0, 3.0], [3.0, 2.0], [4.0, 4.0]]]}, + {type: "Point", coordinates: [100.0, 0.0]}, + {type: "MultiPoint", coordinates: [[40.0, 18.0], [40.0, 19.0], [41.0, 19.0]]}, + {type: "Polygon", coordinates: [[[40.0, 18.0], [40.0, 19.0], [41.0, 19.0], [40.0, 18.0]]]}, + {type: "MultiPolygon", coordinates: [[[[102.0, 2.0], [103.0, 2.0], [103.0, 3.0], [102.0, 3.0], [102.0, 2.0]]], + [[[100.0, 0.0], [101.0, 0.0], [101.0, 1.0], [100.0, 1.0], [100.0, 0.0]], + [[100.2, 0.2], [100.8, 0.2], [100.8, 0.8], [100.2, 0.8], [100.2, 0.2]]]]}, + { type: "GeometryCollection", geometries: [{ type: "Point", coordinates: [100.0, 0.0]}, + { type: "LineString", coordinates: [ [101.0, 0.0], [102.0, 1.0] ]}]}], + crs : {type: 'name', properties : {name : '$EPSG_4326_STRICT_WINDING.name'}}}""") + + when: + def decodedGeometryCollection = codec.decode(new BsonDocumentReader(writer.document), DecoderContext.builder().build()) + + then: + geometryCollection == decodedGeometryCollection + } + + def 'should decode integral value types'() { + given: + def jsonRepresentation = '{type: "LineString", coordinates: [ [101.0, 0], [102.0, 2147483648] ] }' + def expectedGeometry = new LineString([new Position(101d, 0d), new Position(102d, 2147483648d)]) + def codec = registry.get(LineString) + + when: + def decodedGeometry = codec.decode(new JsonReader(jsonRepresentation), DecoderContext.builder().build()) + + then: + decodedGeometry == expectedGeometry + } + + def 'should throw when decoding invalid documents'() { + when: + codec.decode(new BsonDocumentReader(parse(invalidJson)), DecoderContext.builder().build()) + + then: + thrown(CodecConfigurationException) + + where: + invalidJson << [ + '{ type: "GeometryCollect"}', + '''{ geometries: [{ type: "Point", coordinates: [100.0, 0.0]}, + { type: "LineString", coordinates: [ [101.0, 0.0], [102.0, 1.0] ]}]}''', + '''{ type: "GeometryCollect", + geometries: [{ type: "Point", coordinates: [100.0, 0.0]}, + { type: "LineString", coordinates: [ [101.0, 0.0], [102.0, 1.0] ]}]}''', + '''{ type: "GeometryCollection", geometries: [[]]}''', + '''{ type: "GeometryCollect", + geometries: [{ type: "Paint", coordinates: [100.0, 0.0]}, + { type: "LaneString", coordinates: [ [101.0, 0.0], [102.0, 1.0] ]}]}''', + '''{ type: "GeometryCollect", + geometries: [{ coordinates: [100.0, 0.0]}]}''', + "{type: 'GeometryCollection', crs : {type: 'name', properties : {name : '$EPSG_4326_STRICT_WINDING.name'}}}", + '''{ type: "GeometryCollection", + geometries: [{ type: "Point", coordinates: [100.0, 0.0]}], + crs : {type: "something"}}''', + '''{ type: "GeometryCollection", + geometries: [{ type: "Point", coordinates: [100.0, 0.0]}], + crs : {type: "link", properties: {href: "http://example.com/crs/42"}}}''', + '''{ type: "GeometryCollection", geometries: [], abc: 123}''' + ] + } +} diff --git a/driver-core/src/test/unit/com/mongodb/client/model/geojson/codecs/LineStringCodecSpecification.groovy b/driver-core/src/test/unit/com/mongodb/client/model/geojson/codecs/LineStringCodecSpecification.groovy new file mode 100644 index 00000000000..ead23f88259 --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/client/model/geojson/codecs/LineStringCodecSpecification.groovy @@ -0,0 +1,103 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model.geojson.codecs + +import com.mongodb.client.model.geojson.LineString +import com.mongodb.client.model.geojson.Position +import org.bson.BsonDocument +import org.bson.BsonDocumentReader +import org.bson.BsonDocumentWriter +import org.bson.codecs.DecoderContext +import org.bson.codecs.EncoderContext +import org.bson.codecs.configuration.CodecConfigurationException +import spock.lang.Specification + +import static com.mongodb.client.model.geojson.NamedCoordinateReferenceSystem.EPSG_4326_STRICT_WINDING +import static org.bson.BsonDocument.parse +import static org.bson.codecs.configuration.CodecRegistries.fromProviders + +class LineStringCodecSpecification extends Specification { + def registry = fromProviders([new GeoJsonCodecProvider()]) + def codec = registry.get(LineString) + def writer = new BsonDocumentWriter(new BsonDocument()) + def context = EncoderContext.builder().build() + + def 'should round trip'() { + given: + def lineString = new LineString([new Position([40.0d, 18.0d]), + new Position([40.0d, 19.0d]), + new Position([41.0d, 19.0d])]) + + when: + codec.encode(writer, lineString, context) + + then: + writer.document == parse('{type: "LineString", coordinates: [[40.0, 18.0], [40.0, 19.0], [41.0, 19.0]]}') + + when: + def decodedLineString = codec.decode(new BsonDocumentReader(writer.document), DecoderContext.builder().build()) + + then: + lineString == decodedLineString + } + + def 'should round trip with coordinate reference system'() { + given: + def lineString = new LineString(EPSG_4326_STRICT_WINDING, + [new Position([40.0d, 20.0d]), + new Position([40.0d, 40.0d]), + new Position([20.0d, 40.0d])]) + + when: + codec.encode(writer, lineString, context) + + then: + writer.document == parse("""{type: "LineString", + coordinates: [[40.0, 20.0], [40.0, 40.0], [20.0, 40.0]] + crs : {type: 'name', properties : {name : '$EPSG_4326_STRICT_WINDING.name'}}}""") + + when: + def decodedLineString = codec.decode(new BsonDocumentReader(writer.document), DecoderContext.builder().build()) + + then: + lineString == decodedLineString + } + + def 'should throw when decoding invalid documents'() { + when: + codec.decode(new BsonDocumentReader(parse(invalidJson)), DecoderContext.builder().build()) + + then: + thrown(CodecConfigurationException) + + where: + invalidJson << [ + '{type: "lineString"}', + '{coordinates: [[40.0, 18.0], [40.0, 19.0]]}', + '{type: "lineStr", coordinates: [[40.0, 18.0], [40.0, 19.0]]}', + '{type: "lineString", coordinates: [40.0, 18.0]}', + '{type: "lineString", coordinates: []}', + '{type: "lineString", coordinates: [[]]}', + '{type: "lineString", coordinates: [[[40.0, 18.0], [40.0, 19.0], [41.0, 19.0], [40.0, 18.0]]]}', + "{type: 'lineString', crs : {type: 'name', properties : {name : '$EPSG_4326_STRICT_WINDING.name'}}}", + '{type: "lineString", coordinates: [[40.0, 18.0], [40.0, 19.0], [41.0, 19.0], [40.0, 18.0]], crs : {type: "something"}}', + '''{type: "lineString", coordinates: [[40.0, 18.0], [40.0, 19.0], [41.0, 19.0], [40.0, 18.0]], + crs : {type: "link", properties: {href: "http://example.com/crs/42"}}}''', + '{type: "lineString", coordinates: [[40.0, 18.0], [40.0, 19.0]], abc: 123}' + ] + } +} diff --git a/driver-core/src/test/unit/com/mongodb/client/model/geojson/codecs/MultiLineStringCodecSpecification.groovy b/driver-core/src/test/unit/com/mongodb/client/model/geojson/codecs/MultiLineStringCodecSpecification.groovy new file mode 100644 index 00000000000..5015c387c13 --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/client/model/geojson/codecs/MultiLineStringCodecSpecification.groovy @@ -0,0 +1,98 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model.geojson.codecs + +import com.mongodb.client.model.geojson.MultiLineString +import com.mongodb.client.model.geojson.Position +import org.bson.BsonDocument +import org.bson.BsonDocumentReader +import org.bson.BsonDocumentWriter +import org.bson.codecs.DecoderContext +import org.bson.codecs.EncoderContext +import org.bson.codecs.configuration.CodecConfigurationException +import spock.lang.Specification + +import static com.mongodb.client.model.geojson.NamedCoordinateReferenceSystem.EPSG_4326_STRICT_WINDING +import static org.bson.BsonDocument.parse +import static org.bson.codecs.configuration.CodecRegistries.fromProviders + +class MultiLineStringCodecSpecification extends Specification { + def registry = fromProviders([new GeoJsonCodecProvider()]) + def codec = registry.get(MultiLineString) + def writer = new BsonDocumentWriter(new BsonDocument()) + def context = EncoderContext.builder().build() + + def 'should round trip'() { + given: + def multiLineString = new MultiLineString([[new Position([1.0d, 1.0d]), new Position([2.0d, 2.0d]), new Position([3.0d, 4.0d])], + [new Position([2.0d, 3.0d]), new Position([3.0d, 2.0d]), new Position([4.0d, 4.0d])]]) + + when: + codec.encode(writer, multiLineString, context) + + then: + writer.document == parse('''{type: "MultiLineString", + coordinates: [[[1.0, 1.0], [2.0, 2.0], [3.0, 4.0]], [[2.0, 3.0], [3.0, 2.0], [4.0, 4.0]]]}''') + + when: + def decodedMultiLineString = codec.decode(new BsonDocumentReader(writer.document), DecoderContext.builder().build()) + + then: + multiLineString == decodedMultiLineString + } + + def 'should round trip with coordinate reference system'() { + given: + def multiLineString = new MultiLineString(EPSG_4326_STRICT_WINDING, + [[new Position([1.0d, 1.0d]), new Position([2.0d, 2.0d]), new Position([3.0d, 4.0d])], + [new Position([2.0d, 3.0d]), new Position([3.0d, 2.0d]), new Position([4.0d, 4.0d])]]) + + when: + codec.encode(writer, multiLineString, context) + + then: + writer.document == parse("""{type: "MultiLineString", + coordinates: [[[1.0, 1.0], [2.0, 2.0], [3.0, 4.0]], [[2.0, 3.0], [3.0, 2.0], [4.0, 4.0]]], + crs : {type: 'name', properties : {name : '$EPSG_4326_STRICT_WINDING.name'}}}""") + + when: + def decodedMultiLineString = codec.decode(new BsonDocumentReader(writer.document), DecoderContext.builder().build()) + + then: + multiLineString == decodedMultiLineString + } + + def 'should throw when decoding invalid documents'() { + when: + codec.decode(new BsonDocumentReader(parse(invalidJson)), DecoderContext.builder().build()) + + then: + thrown(CodecConfigurationException) + + where: + invalidJson << [ + '{type: "MultiLineString"}', + '{coordinates: [[[1.0, 1.0], [2.0, 2.0], [3.0, 4.0]], [[2.0, 3.0], [3.0, 2.0], [4.0, 4.0]]]}', + '{type: "MultiLineStr", coordinates: [[[1.0, 1.0], [2.0, 2.0], [3.0, 4.0]], [[2.0, 3.0], [3.0, 2.0], [4.0, 4.0]]]}', + '{type: "MultiLineString", coordinates: [40.0, 18.0]}', + '{type: "MultiLineString", coordinates: [[[[40.0, 18.0], [40.0, 19.0], [41.0, 19.0], [40.0, 18.0]]]]}', + "{type: 'MultiLineString', crs : {type: 'name', properties : {name : '$EPSG_4326_STRICT_WINDING.name'}}}", + '{type: "MultiLineString", coordinates: [[[1.0, 1.0], [2.0, 2.0], [3.0, 4.0]]], crs : {type: "something"}}', + '{type: "MultiLineString", coordinates: [[[1.0, 1.0], [2.0, 2.0], [3.0, 4.0]], [[2.0, 3.0], [3.0, 2.0], [4.0, 4.0]]], a: 1}' + ] + } +} diff --git a/driver-core/src/test/unit/com/mongodb/client/model/geojson/codecs/MultiPointCodecSpecification.groovy b/driver-core/src/test/unit/com/mongodb/client/model/geojson/codecs/MultiPointCodecSpecification.groovy new file mode 100644 index 00000000000..7bf468a2577 --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/client/model/geojson/codecs/MultiPointCodecSpecification.groovy @@ -0,0 +1,99 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model.geojson.codecs + +import com.mongodb.client.model.geojson.MultiPoint +import com.mongodb.client.model.geojson.Position +import org.bson.BsonDocument +import org.bson.BsonDocumentReader +import org.bson.BsonDocumentWriter +import org.bson.codecs.DecoderContext +import org.bson.codecs.EncoderContext +import org.bson.codecs.configuration.CodecConfigurationException +import spock.lang.Specification + +import static com.mongodb.client.model.geojson.NamedCoordinateReferenceSystem.EPSG_4326_STRICT_WINDING +import static org.bson.BsonDocument.parse +import static org.bson.codecs.configuration.CodecRegistries.fromProviders + +class MultiPointCodecSpecification extends Specification { + def registry = fromProviders([new GeoJsonCodecProvider()]) + def codec = registry.get(MultiPoint) + def writer = new BsonDocumentWriter(new BsonDocument()) + def context = EncoderContext.builder().build() + + def 'should round trip'() { + given: + def multiPoint = new MultiPoint([new Position([40.0d, 18.0d]), + new Position([40.0d, 19.0d]), + new Position([41.0d, 19.0d])]) + + when: + codec.encode(writer, multiPoint, context) + + then: + writer.document == parse('{type: "MultiPoint", coordinates: [[40.0, 18.0], [40.0, 19.0], [41.0, 19.0]]}') + + when: + def decodedMultiPoint = codec.decode(new BsonDocumentReader(writer.document), DecoderContext.builder().build()) + + then: + multiPoint == decodedMultiPoint + } + + def 'should round trip with coordinate reference system'() { + given: + def multiPoint = new MultiPoint(EPSG_4326_STRICT_WINDING, + [new Position([40.0d, 18.0d]), + new Position([40.0d, 19.0d]), + new Position([41.0d, 19.0d])]) + + when: + codec.encode(writer, multiPoint, context) + + then: + writer.document == parse("""{type: "MultiPoint", coordinates: [[40.0, 18.0], [40.0, 19.0], [41.0, 19.0]], + crs : {type: 'name', properties : {name : '$EPSG_4326_STRICT_WINDING.name'}}}""") + + when: + def decodedMultiPoint = codec.decode(new BsonDocumentReader(writer.document), DecoderContext.builder().build()) + + then: + multiPoint == decodedMultiPoint + } + + + def 'should throw when decoding invalid documents'() { + when: + codec.decode(new BsonDocumentReader(parse(invalidJson)), DecoderContext.builder().build()) + + then: + thrown(CodecConfigurationException) + + where: + invalidJson << [ + '{type: "MultiPoint"}', + '{coordinates: [[40.0, 20.0], [40.0, 40.0], [20.0, 40.0]]}', + '{type: "MultiPoit", coordinates: [[40.0, 20.0], [40.0, 40.0], [20.0, 40.0]]}', + '{type: "MultiPoint", coordinates: [40.0, 18.0]}', + '{type: "MultiPoint", coordinates: [[[40.0, 18.0], [40.0, 19.0], [41.0, 19.0], [40.0, 18.0]]]}', + "{type: 'MultiPoint', crs : {type: 'name', properties : {name : '$EPSG_4326_STRICT_WINDING.name'}}}", + '{type: "MultiPoint", coordinates: [[1.0, 1.0], [2.0, 2.0], [3.0, 4.0]], crs : {type: "something"}}', + '{type: "MultiPoint", coordinates: [[40.0, 20.0], [40.0, 40.0], [20.0, 40.0]], abc: 123}' + ] + } +} diff --git a/driver-core/src/test/unit/com/mongodb/client/model/geojson/codecs/MultiPolygonCodecSpecification.groovy b/driver-core/src/test/unit/com/mongodb/client/model/geojson/codecs/MultiPolygonCodecSpecification.groovy new file mode 100644 index 00000000000..4cb563d1022 --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/client/model/geojson/codecs/MultiPolygonCodecSpecification.groovy @@ -0,0 +1,138 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model.geojson.codecs + +import com.mongodb.client.model.geojson.MultiPolygon +import com.mongodb.client.model.geojson.PolygonCoordinates +import com.mongodb.client.model.geojson.Position +import org.bson.BsonDocument +import org.bson.BsonDocumentReader +import org.bson.BsonDocumentWriter +import org.bson.codecs.DecoderContext +import org.bson.codecs.EncoderContext +import org.bson.codecs.configuration.CodecConfigurationException +import spock.lang.Specification + +import static com.mongodb.client.model.geojson.NamedCoordinateReferenceSystem.EPSG_4326_STRICT_WINDING +import static org.bson.BsonDocument.parse +import static org.bson.codecs.configuration.CodecRegistries.fromProviders + +class MultiPolygonCodecSpecification extends Specification { + def registry = fromProviders([new GeoJsonCodecProvider()]) + def codec = registry.get(MultiPolygon) + def writer = new BsonDocumentWriter(new BsonDocument()) + def context = EncoderContext.builder().build() + + def 'should round trip'() { + given: + def multiMultiPolygon = new MultiPolygon([new PolygonCoordinates([new Position(102.0, 2.0), new Position(103.0, 2.0), + new Position(103.0, 3.0), new Position(102.0, 3.0), + new Position(102.0, 2.0)]), + new PolygonCoordinates([new Position(100.0, 0.0), new Position(101.0, 0.0), + new Position(101.0, 1.0), new Position(100.0, 1.0), + new Position(100.0, 0.0)], + [[new Position(100.2, 0.2), new Position(100.8, 0.2), + new Position(100.8, 0.8), new Position(100.2, 0.8), + new Position(100.2, 0.2)]])]) + + when: + codec.encode(writer, multiMultiPolygon, context) + + then: + writer.document == parse('''{type: "MultiPolygon", + coordinates: [[[[102.0, 2.0], [103.0, 2.0], [103.0, 3.0], [102.0, 3.0], [102.0, 2.0]]], + [[[100.0, 0.0], [101.0, 0.0], [101.0, 1.0], [100.0, 1.0], [100.0, 0.0]], + [[100.2, 0.2], [100.8, 0.2], [100.8, 0.8], [100.2, 0.8], [100.2, 0.2]]]]}''') + + + when: + def decodedMultiMultiPolygon = codec.decode(new BsonDocumentReader(writer.document), DecoderContext.builder().build()) + + then: + multiMultiPolygon == decodedMultiMultiPolygon + } + + def 'should round trip with coordinate reference system'() { + given: + def multiMultiPolygon = new MultiPolygon(EPSG_4326_STRICT_WINDING, + [new PolygonCoordinates([new Position(102.0, 2.0), new Position(103.0, 2.0), + new Position(103.0, 3.0), new Position(102.0, 3.0), + new Position(102.0, 2.0)]), + new PolygonCoordinates([new Position(100.0, 0.0), new Position(101.0, 0.0), + new Position(101.0, 1.0), new Position(100.0, 1.0), + new Position(100.0, 0.0)], + [[new Position(100.2, 0.2), new Position(100.8, 0.2), + new Position(100.8, 0.8), new Position(100.2, 0.8), + new Position(100.2, 0.2)]])]) + + when: + codec.encode(writer, multiMultiPolygon, context) + + then: + writer.document == parse("""{ "type": "MultiPolygon", + coordinates: [[[[102.0, 2.0], [103.0, 2.0], [103.0, 3.0], [102.0, 3.0], [102.0, 2.0]]], + [[[100.0, 0.0], [101.0, 0.0], [101.0, 1.0], [100.0, 1.0], [100.0, 0.0]], + [[100.2, 0.2], [100.8, 0.2], [100.8, 0.8], [100.2, 0.8], [100.2, 0.2]]]], + crs: {type: 'name', properties : {name : '$EPSG_4326_STRICT_WINDING.name'}}}""") + + when: + def decodedMultiMultiPolygon = codec.decode(new BsonDocumentReader(writer.document), DecoderContext.builder().build()) + + then: + multiMultiPolygon == decodedMultiMultiPolygon + } + + def 'should throw when decoding invalid documents'() { + when: + codec.decode(new BsonDocumentReader(parse(invalidJson)), DecoderContext.builder().build()) + + then: + thrown(CodecConfigurationException) + + where: + invalidJson << [ + '{type: "MultiPolygon"}', + '''{coordinates: [[[[102.0, 2.0], [103.0, 2.0], [103.0, 3.0], [102.0, 3.0], [102.0, 2.0]]], + [[[100.0, 0.0], [101.0, 0.0], [101.0, 1.0], [100.0, 1.0], [100.0, 0.0]], + [[100.2, 0.2], [100.8, 0.2], [100.8, 0.8], [100.2, 0.8], [100.2, 0.2]]]]}''', + '''{type: "MultiPolygot", + coordinates: [[[[102.0, 2.0], [103.0, 2.0], [103.0, 3.0], [102.0, 3.0], [102.0, 2.0]]], + [[[100.0, 0.0], [101.0, 0.0], [101.0, 1.0], [100.0, 1.0], [100.0, 0.0]], + [[100.2, 0.2], [100.8, 0.2], [100.8, 0.8], [100.2, 0.8], [100.2, 0.2]]]]}''', + '{type: "MultiPolygon", coordinates: [[[40.0, 18.0], [40.0, 19.0]]]}', + '{type: "MultiPolygon", coordinates: []}', + '{type: "MultiPolygon", coordinates: [[]]}', + '{type: "MultiPolygon", coordinates: [[[]]]}', + '{type: "MultiPolygon", coordinates: [[[[]]]]}', + '{type: "MultiPolygon", coordinates: [[[[[]]]]]}', + "{type: 'MultiPolygon', crs : {type: 'name', properties : {name : '$EPSG_4326_STRICT_WINDING.name'}}}", + '''{type: "MultiPolygot", crs : {type: "something"}, + coordinates: [[[[102.0, 2.0], [103.0, 2.0], [103.0, 3.0], [102.0, 3.0], [102.0, 2.0]]], + [[[100.0, 0.0], [101.0, 0.0], [101.0, 1.0], [100.0, 1.0], [100.0, 0.0]], + [[100.2, 0.2], [100.8, 0.2], [100.8, 0.8], [100.2, 0.8], [100.2, 0.2]]]]}''', + '''{type: "MultiPolygot", crs : {type: "link", properties: {href: "http://example.com/crs/42"}}, + coordinates: [[[[102.0, 2.0], [103.0, 2.0], [103.0, 3.0], [102.0, 3.0], [102.0, 2.0]]], + [[[100.0, 0.0], [101.0, 0.0], [101.0, 1.0], [100.0, 1.0], [100.0, 0.0]], + [[100.2, 0.2], [100.8, 0.2], [100.8, 0.8], [100.2, 0.8], [100.2, 0.2]]]]}''', + '''{type: "MultiPolygon", abc: 123, + coordinates: [[[[102.0, 2.0], [103.0, 2.0], [103.0, 3.0], [102.0, 3.0], [102.0, 2.0]]], + [[[100.0, 0.0], [101.0, 0.0], [101.0, 1.0], [100.0, 1.0], [100.0, 0.0]], + [[100.2, 0.2], [100.8, 0.2], [100.8, 0.8], [100.2, 0.8], [100.2, 0.2]]]]}''' + ] + } + +} diff --git a/driver-core/src/test/unit/com/mongodb/client/model/geojson/codecs/NamedCoordinateReferenceSystemSpecification.groovy b/driver-core/src/test/unit/com/mongodb/client/model/geojson/codecs/NamedCoordinateReferenceSystemSpecification.groovy new file mode 100644 index 00000000000..5f07fa60f55 --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/client/model/geojson/codecs/NamedCoordinateReferenceSystemSpecification.groovy @@ -0,0 +1,71 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model.geojson.codecs + +import com.mongodb.client.model.geojson.NamedCoordinateReferenceSystem +import org.bson.BsonDocument +import org.bson.BsonDocumentReader +import org.bson.BsonDocumentWriter +import org.bson.codecs.DecoderContext +import org.bson.codecs.EncoderContext +import org.bson.codecs.configuration.CodecConfigurationException +import spock.lang.Specification + +import static com.mongodb.client.model.geojson.NamedCoordinateReferenceSystem.CRS_84 +import static com.mongodb.client.model.geojson.NamedCoordinateReferenceSystem.EPSG_4326 +import static com.mongodb.client.model.geojson.NamedCoordinateReferenceSystem.EPSG_4326_STRICT_WINDING +import static org.bson.BsonDocument.parse +import static org.bson.codecs.configuration.CodecRegistries.fromProviders + +final class NamedCoordinateReferenceSystemSpecification extends Specification { + def registry = fromProviders([new GeoJsonCodecProvider()]) + def codec = registry.get(NamedCoordinateReferenceSystem) + def writer = new BsonDocumentWriter(new BsonDocument()) + def context = EncoderContext.builder().build() + + def 'should round trip'() { + when: + codec.encode(writer, crs, context) + + then: + writer.document == parse( "{type: 'name', properties : {name : '$crs.name'}}") + + when: + def decodeCRS = codec.decode(new BsonDocumentReader(writer.document), DecoderContext.builder().build()) + + then: + crs == decodeCRS + + where: + crs << [CRS_84, EPSG_4326, EPSG_4326_STRICT_WINDING] + } + + def 'should throw when decoding invalid documents'() { + when: + codec.decode(new BsonDocumentReader(parse(invalidJson)), DecoderContext.builder().build()) + + then: + thrown(CodecConfigurationException) + + where: + invalidJson << [ + '{type: "name"}', + '{type: "name", properties : {}}', + '{type: "name", properties : {type: "link", properties: {href: "http://example.com/crs/42"}}}', + ] + } +} diff --git a/driver-core/src/test/unit/com/mongodb/client/model/geojson/codecs/PointCodecSpecification.groovy b/driver-core/src/test/unit/com/mongodb/client/model/geojson/codecs/PointCodecSpecification.groovy new file mode 100644 index 00000000000..1e0924d6fad --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/client/model/geojson/codecs/PointCodecSpecification.groovy @@ -0,0 +1,92 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model.geojson.codecs + +import com.mongodb.client.model.geojson.Point +import com.mongodb.client.model.geojson.Position +import org.bson.BsonDocument +import org.bson.BsonDocumentReader +import org.bson.BsonDocumentWriter +import org.bson.codecs.DecoderContext +import org.bson.codecs.EncoderContext +import org.bson.codecs.configuration.CodecConfigurationException +import spock.lang.Specification + +import static com.mongodb.client.model.geojson.NamedCoordinateReferenceSystem.EPSG_4326_STRICT_WINDING +import static org.bson.BsonDocument.parse +import static org.bson.codecs.configuration.CodecRegistries.fromProviders + +class PointCodecSpecification extends Specification { + def registry = fromProviders([new GeoJsonCodecProvider()]) + def codec = registry.get(Point) + def writer = new BsonDocumentWriter(new BsonDocument()) + def context = EncoderContext.builder().build() + + def 'should round trip'() { + given: + def point = new Point(new Position([40.0d, 18.0d])) + + when: + codec.encode(writer, point, context) + + then: + writer.document == parse('{type: "Point", coordinates: [40.0, 18.0]}') + + when: + def decodedPoint = codec.decode(new BsonDocumentReader(writer.document), DecoderContext.builder().build()) + + then: + point == decodedPoint + } + + def 'should round trip with coordinate reference system'() { + given: + def point = new Point(EPSG_4326_STRICT_WINDING, new Position([40.0d, 18.0d])) + + when: + codec.encode(writer, point, context) + + then: + writer.document == parse('{type: "Point", coordinates: [40.0, 18.0], ' + + "crs : {type: 'name', properties : {name : '$EPSG_4326_STRICT_WINDING.name'}}}") + + when: + def decodedPoint = codec.decode(new BsonDocumentReader(writer.document), DecoderContext.builder().build()) + + then: + point == decodedPoint + } + + def 'should throw when decoding invalid documents'() { + when: + codec.decode(new BsonDocumentReader(parse(invalidJson)), DecoderContext.builder().build()) + + then: + thrown(CodecConfigurationException) + + where: + invalidJson << [ + '{type: "Point"}', + '{coordinates: [40.0, 18.0]}', + '{type: "Pointer", coordinates: [40.0, 18.0]}', + "{type: 'Point', crs : {type: 'name', properties : {name : '$EPSG_4326_STRICT_WINDING.name'}}}", + '{type: "Point", coordinates: [40.0, 18.0], crs : {type: "link", properties: {href: "http://example.com/crs/42"}}}', + '{type: "Point", coordinates: [40.0, 18.0], crs : {type: "name", properties: {}}}', + '{type: "Point", coordinates: [40.0, 18.0], abc: 123}' + ] + } +} diff --git a/driver-core/src/test/unit/com/mongodb/client/model/geojson/codecs/PolygonCodecSpecification.groovy b/driver-core/src/test/unit/com/mongodb/client/model/geojson/codecs/PolygonCodecSpecification.groovy new file mode 100644 index 00000000000..5f2e4559700 --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/client/model/geojson/codecs/PolygonCodecSpecification.groovy @@ -0,0 +1,136 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model.geojson.codecs + +import com.mongodb.client.model.geojson.Polygon +import com.mongodb.client.model.geojson.PolygonCoordinates +import com.mongodb.client.model.geojson.Position +import org.bson.BsonDocument +import org.bson.BsonDocumentReader +import org.bson.BsonDocumentWriter +import org.bson.codecs.DecoderContext +import org.bson.codecs.EncoderContext +import org.bson.codecs.configuration.CodecConfigurationException +import spock.lang.Specification + +import static com.mongodb.client.model.geojson.NamedCoordinateReferenceSystem.EPSG_4326_STRICT_WINDING +import static org.bson.BsonDocument.parse +import static org.bson.codecs.configuration.CodecRegistries.fromProviders + +class PolygonCodecSpecification extends Specification { + def registry = fromProviders([new GeoJsonCodecProvider()]) + def codec = registry.get(Polygon) + def writer = new BsonDocumentWriter(new BsonDocument()) + def context = EncoderContext.builder().build() + + def 'should round trip'() { + given: + def polygon = new Polygon([new Position([40.0d, 18.0d]), + new Position([40.0d, 19.0d]), + new Position([41.0d, 19.0d]), + new Position([40.0d, 18.0d])]) + + when: + codec.encode(writer, polygon, context) + + then: + writer.document == parse('{type: "Polygon", coordinates: [[[40.0, 18.0], [40.0, 19.0], [41.0, 19.0], [40.0, 18.0]]]}') + + when: + def decodedPolygon = codec.decode(new BsonDocumentReader(writer.document), DecoderContext.builder().build()) + + then: + polygon == decodedPolygon + } + + def 'should round trip with coordinate reference system'() { + given: + def polygon = new Polygon(EPSG_4326_STRICT_WINDING, + new PolygonCoordinates([new Position([40.0d, 20.0d]), + new Position([40.0d, 40.0d]), + new Position([20.0d, 40.0d]), + new Position([40.0d, 20.0d])])) + + when: + codec.encode(writer, polygon, context) + + then: + writer.document == parse("""{type: 'Polygon', coordinates: [[[40.0, 20.0], [40.0, 40.0], [20.0, 40.0], [40.0, 20.0]]], + crs : {type: 'name', properties : {name : '$EPSG_4326_STRICT_WINDING.name'}}}""") + + when: + def decodedPolygon = codec.decode(new BsonDocumentReader(writer.document), DecoderContext.builder().build()) + + then: + polygon == decodedPolygon + } + + def 'should round trip with holes'() { + given: + def polygon = new Polygon([new Position([40.0d, 20.0d]), + new Position([40.0d, 40.0d]), + new Position([20.0d, 40.0d]), + new Position([40.0d, 20.0d])], + [new Position([30.0d, 25.0d]), + new Position([30.0d, 35.0d]), + new Position([25.0d, 25.0d]), + new Position([30.0d, 25.0d])], + [new Position([36.0d, 37.0d]), + new Position([36.0d, 37.0d]), + new Position([37.0d, 37.0d]), + new Position([36.0d, 37.0d])]) + + when: + codec.encode(writer, polygon, context) + + then: + writer.document == parse('''{type: 'Polygon', coordinates: + [[[40.0, 20.0], [40.0, 40.0], [20.0, 40.0], [40.0, 20.0]], + [[30.0, 25.0], [30.0, 35.0], [25.0, 25.0], [30.0, 25.0]], + [[36.0, 37.0], [36.0, 37.0], [37.0, 37.0], [36.0, 37.0]]]}''') + + when: + def decodedPolygon = codec.decode(new BsonDocumentReader(writer.document), DecoderContext.builder().build()) + + then: + polygon == decodedPolygon + } + + def 'should throw when decoding invalid documents'() { + when: + codec.decode(new BsonDocumentReader(parse(invalidJson)), DecoderContext.builder().build()) + + then: + thrown(CodecConfigurationException) + + where: + invalidJson << [ + '{type: "Polygon"}', + '{coordinates: [[[40.0, 18.0], [40.0, 19.0], [41.0, 19.0], [40.0, 18.0]]]}', + '{type: "Polygot", coordinates: [[[40.0, 18.0], [40.0, 19.0], [41.0, 19.0], [40.0, 18.0]]]}', + '{type: "Polygon", coordinates: [[[40.0, 18.0], [40.0, 19.0]]]}', + '{type: "Polygon", coordinates: []}', + '{type: "Polygon", coordinates: [[]]}', + '{type: "Polygon", coordinates: [[[]]]}', + "{type: 'Polygon', crs : {type: 'name', properties : {name : '$EPSG_4326_STRICT_WINDING.name'}}}", + '{type: "Polygon", coordinates: [[[40.0, 18.0], [40.0, 19.0], [41.0, 19.0], [40.0, 18.0]]], crs : {type: "something"}}', + '''{type: "Polygon", coordinates: [[[40.0, 18.0], [40.0, 19.0], [41.0, 19.0], [40.0, 18.0]]], + crs : {type: "link", properties: {href: "http://example.com/crs/42"}}}''', + '{type: "Polygon", coordinates: [[[40.0, 18.0], [40.0, 19.0], [41.0, 19.0], [40.0, 18.0]]], abc: 123}' + ] + } +} diff --git a/driver-core/src/test/unit/com/mongodb/client/model/search/BinaryVectorSearchOptionsTest.java b/driver-core/src/test/unit/com/mongodb/client/model/search/BinaryVectorSearchOptionsTest.java new file mode 100644 index 00000000000..1fde037dbef --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/client/model/search/BinaryVectorSearchOptionsTest.java @@ -0,0 +1,126 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.client.model.search; + +import com.mongodb.client.model.Filters; +import org.bson.BsonBoolean; +import org.bson.BsonDocument; +import org.bson.BsonInt64; +import org.bson.BsonString; +import org.junit.jupiter.api.Test; + +import static org.junit.jupiter.api.Assertions.assertEquals; + +final class BinaryVectorSearchOptionsTest { + @Test + void approximateVectorSearchOptions() { + assertEquals( + new BsonDocument().append("numCandidates", new BsonInt64(1)), + VectorSearchOptions.approximateVectorSearchOptions(1) + .toBsonDocument() + ); + } + + @Test + void exactVectorSearchOptions() { + assertEquals( + new BsonDocument().append("exact", new BsonBoolean(true)), + VectorSearchOptions.exactVectorSearchOptions() + .toBsonDocument() + ); + } + + @Test + void option() { + assertEquals( + VectorSearchOptions.approximateVectorSearchOptions(1) + .filter(Filters.lt("fieldName", 1)) + .toBsonDocument(), + VectorSearchOptions.approximateVectorSearchOptions(1) + .option("filter", Filters.lt("fieldName", 1)) + .toBsonDocument()); + } + + @Test + void filterApproximate() { + assertEquals( + new BsonDocument() + .append("filter", Filters.lt("fieldName", 1).toBsonDocument()) + .append("numCandidates", new BsonInt64(1)), + VectorSearchOptions.approximateVectorSearchOptions(1) + .filter(Filters.lt("fieldName", 1)) + .toBsonDocument() + ); + } + + @Test + void filterExact() { + assertEquals( + new BsonDocument() + .append("filter", Filters.lt("fieldName", 1).toBsonDocument()) + .append("exact", new BsonBoolean(true)), + VectorSearchOptions.exactVectorSearchOptions() + .filter(Filters.lt("fieldName", 1)) + .toBsonDocument() + ); + } + + @Test + void optionsApproximate() { + assertEquals( + new BsonDocument() + .append("name", new BsonString("value")) + .append("filter", Filters.lt("fieldName", 1).toBsonDocument()) + .append("numCandidates", new BsonInt64(1)), + VectorSearchOptions.approximateVectorSearchOptions(1) + .option("name", "value") + .filter(Filters.lt("fieldName", 0)) + .option("filter", Filters.lt("fieldName", 1)) + .option("numCandidates", new BsonInt64(1)) + .toBsonDocument() + ); + } + + @Test + void optionsExact() { + assertEquals( + new BsonDocument() + .append("name", new BsonString("value")) + .append("filter", Filters.lt("fieldName", 1).toBsonDocument()) + .append("exact", new BsonBoolean(true)), + VectorSearchOptions.exactVectorSearchOptions() + .option("name", "value") + .filter(Filters.lt("fieldName", 0)) + .option("filter", Filters.lt("fieldName", 1)) + .option("exact", new BsonBoolean(true)) + .toBsonDocument() + ); + } + + @Test + void approximateVectorSearchOptionsIsUnmodifiable() { + String expected = VectorSearchOptions.approximateVectorSearchOptions(1).toBsonDocument().toJson(); + VectorSearchOptions.approximateVectorSearchOptions(1).option("name", "value"); + assertEquals(expected, VectorSearchOptions.approximateVectorSearchOptions(1).toBsonDocument().toJson()); + } + + @Test + void approximateVectorSearchOptionsIsImmutable() { + String expected = VectorSearchOptions.approximateVectorSearchOptions(1).toBsonDocument().toJson(); + VectorSearchOptions.approximateVectorSearchOptions(1).toBsonDocument().append("name", new BsonString("value")); + assertEquals(expected, VectorSearchOptions.approximateVectorSearchOptions(1).toBsonDocument().toJson()); + } +} diff --git a/driver-core/src/test/unit/com/mongodb/client/model/search/FuzzySearchOptionsTest.java b/driver-core/src/test/unit/com/mongodb/client/model/search/FuzzySearchOptionsTest.java new file mode 100644 index 00000000000..577ae549ba8 --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/client/model/search/FuzzySearchOptionsTest.java @@ -0,0 +1,95 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.client.model.search; + +import org.bson.BsonDocument; +import org.bson.BsonInt32; +import org.junit.jupiter.api.Test; + +import static org.junit.jupiter.api.Assertions.assertEquals; + +final class FuzzySearchOptionsTest { + @Test + void fuzzySearchOptions() { + assertEquals( + new BsonDocument(), + FuzzySearchOptions.fuzzySearchOptions() + .toBsonDocument() + ); + } + + @Test + void maxEdits() { + assertEquals( + new BsonDocument() + .append("maxEdits", new BsonInt32(1)), + FuzzySearchOptions.fuzzySearchOptions() + .maxEdits(1) + .toBsonDocument() + ); + } + + @Test + void prefixLength() { + assertEquals( + new BsonDocument() + .append("prefixLength", new BsonInt32(5)), + FuzzySearchOptions.fuzzySearchOptions() + .prefixLength(5) + .toBsonDocument() + ); + } + + @Test + void maxExpansions() { + assertEquals( + new BsonDocument() + .append("maxExpansions", new BsonInt32(10)), + FuzzySearchOptions.fuzzySearchOptions() + .maxExpansions(10) + .toBsonDocument() + ); + } + + @Test + void options() { + assertEquals( + new BsonDocument() + .append("maxEdits", new BsonInt32(1)) + .append("prefixLength", new BsonInt32(5)) + .append("maxExpansions", new BsonInt32(10)), + FuzzySearchOptions.fuzzySearchOptions() + .maxEdits(1) + .prefixLength(5) + .maxExpansions(10) + .toBsonDocument() + ); + } + + @Test + void fuzzySearchOptionsIsUnmodifiable() { + String expected = FuzzySearchOptions.fuzzySearchOptions().toBsonDocument().toJson(); + FuzzySearchOptions.fuzzySearchOptions().maxEdits(1); + assertEquals(expected, FuzzySearchOptions.fuzzySearchOptions().toBsonDocument().toJson()); + } + + @Test + void fuzzySearchOptionsIsImmutable() { + String expected = FuzzySearchOptions.fuzzySearchOptions().toBsonDocument().toJson(); + FuzzySearchOptions.fuzzySearchOptions().toBsonDocument().append("maxEdits", new BsonInt32(1)); + assertEquals(expected, FuzzySearchOptions.fuzzySearchOptions().toBsonDocument().toJson()); + } +} diff --git a/driver-core/src/test/unit/com/mongodb/client/model/search/SearchCollectorTest.java b/driver-core/src/test/unit/com/mongodb/client/model/search/SearchCollectorTest.java new file mode 100644 index 00000000000..f8826554990 --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/client/model/search/SearchCollectorTest.java @@ -0,0 +1,92 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.client.model.search; + +import org.bson.Document; +import org.bson.codecs.configuration.CodecConfigurationException; +import org.junit.jupiter.api.Test; + +import static com.mongodb.client.model.search.SearchFacet.combineToBson; +import static com.mongodb.client.model.search.SearchFacet.numberFacet; +import static com.mongodb.client.model.search.SearchFacet.stringFacet; +import static com.mongodb.client.model.search.SearchOperator.exists; +import static com.mongodb.client.model.search.SearchPath.fieldPath; +import static java.util.Arrays.asList; +import static org.junit.jupiter.api.Assertions.assertAll; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; + +final class SearchCollectorTest { + @Test + void of() { + assertEquals( + docExamplePredefined() + .toBsonDocument(), + SearchCollector.of(docExampleCustom()) + .toBsonDocument() + ); + } + + @Test + void facet() { + assertAll( + () -> assertThrows(CodecConfigurationException.class, () -> + // facet names must be unique; `BsonCodec` wraps our `IllegalStateException` into `CodecConfigurationException` + SearchCollector.facet( + exists(fieldPath("fieldName")), + asList( + stringFacet("duplicateFacetName", fieldPath("stringFieldName")), + numberFacet("duplicateFacetName", fieldPath("numberFieldName"), asList(10, 20, 30)))) + // we have to render into `BsonDocument` in order to trigger the lazy check + .toBsonDocument() + ), + () -> assertEquals( + docExampleCustom() + .toBsonDocument(), + docExamplePredefined() + .toBsonDocument() + ) + ); + } + + private static SearchCollector docExamplePredefined() { + return SearchCollector.facet( + exists( + fieldPath("fieldName")), + asList( + stringFacet( + "stringFacetName", + fieldPath("stringFieldName")), + numberFacet( + "numberFacetName", + fieldPath("numberFieldName"), + asList(10, 20, 30)))); + } + + private static Document docExampleCustom() { + return new Document("facet", + new Document("operator", exists( + fieldPath("fieldName"))) + .append("facets", combineToBson(asList( + stringFacet( + "stringFacetName", + fieldPath("stringFieldName")), + numberFacet( + "numberFacetName", + fieldPath("numberFieldName"), + asList(10, 20, 30)))))); + } +} diff --git a/driver-core/src/test/unit/com/mongodb/client/model/search/SearchCountTest.java b/driver-core/src/test/unit/com/mongodb/client/model/search/SearchCountTest.java new file mode 100644 index 00000000000..b525c43b8a0 --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/client/model/search/SearchCountTest.java @@ -0,0 +1,75 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.client.model.search; + +import org.bson.BsonDocument; +import org.bson.BsonInt32; +import org.bson.BsonString; +import org.bson.Document; +import org.junit.jupiter.api.Test; + +import static org.junit.jupiter.api.Assertions.assertAll; +import static org.junit.jupiter.api.Assertions.assertEquals; + +final class SearchCountTest { + @Test + void of() { + assertEquals( + docExamplePredefined() + .toBsonDocument(), + SearchCount.of(docExampleCustom()) + .toBsonDocument() + ); + } + + @Test + void total() { + assertAll( + () -> assertEquals( + new BsonDocument("type", new BsonString("total")), + SearchCount.total() + .toBsonDocument() + ) + ); + } + + @Test + void lowerBound() { + assertAll( + () -> assertEquals( + docExampleCustom() + .toBsonDocument(), + docExamplePredefined() + .toBsonDocument() + ), + () -> assertEquals( + new BsonDocument("type", new BsonString("lowerBound")) + .append("threshold", new BsonInt32(123)), + SearchCount.lowerBound() + .threshold(123) + .toBsonDocument() + ) + ); + } + + private static SearchCount docExamplePredefined() { + return SearchCount.lowerBound(); + } + + private static Document docExampleCustom() { + return new Document("type", "lowerBound"); + } +} diff --git a/driver-core/src/test/unit/com/mongodb/client/model/search/SearchFacetTest.java b/driver-core/src/test/unit/com/mongodb/client/model/search/SearchFacetTest.java new file mode 100644 index 00000000000..7a348cdf1c7 --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/client/model/search/SearchFacetTest.java @@ -0,0 +1,203 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.client.model.search; + +import org.bson.BsonArray; +import org.bson.BsonDateTime; +import org.bson.BsonDocument; +import org.bson.BsonDouble; +import org.bson.BsonInt32; +import org.bson.BsonInt64; +import org.bson.BsonString; +import org.bson.Document; +import org.junit.jupiter.api.Test; + +import java.time.Instant; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicLong; + +import static com.mongodb.client.model.search.SearchPath.fieldPath; +import static java.util.Arrays.asList; +import static java.util.Collections.emptyList; +import static java.util.Collections.singleton; +import static org.junit.jupiter.api.Assertions.assertAll; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; + +final class SearchFacetTest { + @Test + void of() { + assertEquals( + docExamplePredefined() + .toBsonDocument(), + SearchFacet.of(docExampleCustom()) + .toBsonDocument() + ); + } + + @Test + void stringFacet() { + assertAll( + () -> assertEquals( + docExampleCustom() + .toBsonDocument(), + docExamplePredefined() + .toBsonDocument() + ), + () -> assertEquals( + new BsonDocument("facetName", new BsonDocument("type", new BsonString("string")) + .append("path", new BsonString(fieldPath("fieldName").toValue())) + .append("numBuckets", new BsonInt32(3))), + SearchFacet.stringFacet("facetName", + fieldPath("fieldName") + // multi must be ignored + .multi("analyzerName")) + .numBuckets(3) + .toBsonDocument() + ) + ); + } + + @Test + void numberFacet() { + assertAll( + () -> assertThrows(IllegalArgumentException.class, () -> + // boundaries must contain at least 2 elements + SearchFacet.numberFacet("facetName", + fieldPath("fieldName"), + singleton(1)) + ), + () -> assertEquals( + new BsonDocument("facetName", new BsonDocument("type", new BsonString("number")) + .append("path", new BsonString(fieldPath("fieldName").toValue())) + .append("boundaries", new BsonArray(asList( + new BsonInt32(1), + new BsonInt32(2))))), + SearchFacet.numberFacet("facetName", + fieldPath("fieldName") + // multi must be ignored + .multi("analyzerName"), + asList( + 1, + 2)) + .toBsonDocument() + ), + () -> assertEquals( + new BsonDocument("facetName", new BsonDocument("type", new BsonString("number")) + .append("path", new BsonString(fieldPath("fieldName").toValue())) + .append("boundaries", new BsonArray(asList( + new BsonInt32(-1), + new BsonInt32(0), + new BsonInt32(1), + new BsonInt64(2), + new BsonDouble(3.5), + new BsonDouble(4.5), + new BsonInt32(5), + new BsonInt64(6)))) + .append("default", new BsonString("defaultBucketName"))), + SearchFacet.numberFacet("facetName", + fieldPath("fieldName"), + asList( + (byte) -1, + (short) 0, + 1, + 2L, + 3.5f, + 4.5d, + new AtomicInteger(5), + new AtomicLong(6))) + .defaultBucket("defaultBucketName") + .toBsonDocument() + ) + ); + } + + @Test + void dateFacet() { + assertAll( + () -> assertThrows(IllegalArgumentException.class, () -> + // boundaries must contain at least 2 elements + SearchFacet.dateFacet("facetName", + fieldPath("fieldName"), + singleton(Instant.now())) + ), + () -> assertEquals( + new BsonDocument("facetName", new BsonDocument("type", new BsonString("date")) + .append("path", new BsonString(fieldPath("fieldName").toValue())) + .append("boundaries", new BsonArray(asList( + new BsonDateTime(0), + new BsonDateTime(1))))), + SearchFacet.dateFacet("facetName", + fieldPath("fieldName") + // multi must be ignored + .multi("analyzerName"), + asList( + Instant.ofEpochMilli(0), + Instant.ofEpochMilli(1))) + .toBsonDocument() + ), + () -> assertEquals( + new BsonDocument("facetName", new BsonDocument("type", new BsonString("date")) + .append("path", new BsonString(fieldPath("fieldName").toValue())) + .append("boundaries", new BsonArray(asList( + new BsonDateTime(0), + new BsonDateTime(1)))) + .append("default", new BsonString("defaultBucketName"))), + SearchFacet.dateFacet("facetName", + fieldPath("fieldName"), + asList( + Instant.ofEpochMilli(0), + Instant.ofEpochMilli(1))) + .defaultBucket("defaultBucketName") + .toBsonDocument() + ) + ); + } + + @Test + void combineToBson() { + assertAll( + () -> assertThrows(IllegalArgumentException.class, () -> + // facets must not be empty + SearchFacet.combineToBson(emptyList()) + ), + () -> assertThrows(IllegalStateException.class, () -> + // facet names must be unique + SearchFacet.combineToBson(asList( + SearchFacet.stringFacet("duplicateFacetName", fieldPath("fieldName1")), + SearchFacet.numberFacet("duplicateFacetName", fieldPath("fieldName2"), asList(1, 2)))) + .toBsonDocument() + ), + () -> assertEquals( + new BsonDocument("facetName", new BsonDocument("type", new BsonString("string")) + .append("path", fieldPath("fieldName").toBsonValue())), + SearchFacet.combineToBson(singleton(SearchFacet.stringFacet("facetName", + fieldPath("fieldName")))) + .toBsonDocument() + ) + ); + } + + private static SearchFacet docExamplePredefined() { + return SearchFacet.stringFacet("facetName", + fieldPath("fieldName")); + } + + private static Document docExampleCustom() { + return new Document("facetName", new Document("type", "string") + .append("path", fieldPath("fieldName").toValue())); + } +} diff --git a/driver-core/src/test/unit/com/mongodb/client/model/search/SearchHighlightTest.java b/driver-core/src/test/unit/com/mongodb/client/model/search/SearchHighlightTest.java new file mode 100644 index 00000000000..10995464a06 --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/client/model/search/SearchHighlightTest.java @@ -0,0 +1,120 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.client.model.search; + +import org.bson.BsonArray; +import org.bson.BsonDocument; +import org.bson.BsonInt32; +import org.bson.Document; +import org.junit.jupiter.api.Test; + +import static com.mongodb.client.model.search.SearchPath.fieldPath; +import static com.mongodb.client.model.search.SearchPath.wildcardPath; +import static java.util.Arrays.asList; +import static java.util.Collections.emptyList; +import static java.util.Collections.singleton; +import static org.junit.jupiter.api.Assertions.assertAll; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; + +final class SearchHighlightTest { + @Test + void of() { + assertEquals( + docExamplePredefined() + .toBsonDocument(), + SearchHighlight.of(docExampleCustom()) + .toBsonDocument() + ); + } + + @Test + void path() { + assertEquals( + new BsonDocument("path", + fieldPath("fieldName").toBsonValue()), + SearchHighlight.paths( + fieldPath("fieldName")) + .toBsonDocument() + ); + } + + @Test + void paths() { + assertAll( + () -> assertThrows(IllegalArgumentException.class, () -> + // paths must not be empty + SearchHighlight.paths(emptyList()) + ), + () -> assertEquals( + docExampleCustom() + .toBsonDocument(), + docExamplePredefined() + .toBsonDocument() + ), + () -> assertEquals( + new BsonDocument("path", new BsonArray(asList( + fieldPath("fieldName").toBsonValue(), + wildcardPath("wildc*rd").toBsonValue()))), + SearchHighlight.paths( + fieldPath("fieldName"), + wildcardPath("wildc*rd")) + .toBsonDocument() + ), + () -> assertEquals( + new BsonDocument("path", + fieldPath("fieldName").toBsonValue()) + .append("maxCharsToExamine", new BsonInt32(10)), + SearchHighlight.paths( + fieldPath("fieldName")) + .maxCharsToExamine(10) + .toBsonDocument() + ), + () -> assertEquals( + new BsonDocument("path", + fieldPath("fieldName").toBsonValue()) + .append("maxNumPassages", new BsonInt32(20)), + SearchHighlight.paths( + singleton(fieldPath("fieldName"))) + .maxNumPassages(20) + .toBsonDocument() + ), + () -> assertEquals( + new BsonDocument("path", + fieldPath("fieldName").toBsonValue()) + .append("maxCharsToExamine", new BsonInt32(10)) + .append("maxNumPassages", new BsonInt32(20)), + SearchHighlight.paths( + singleton(fieldPath("fieldName"))) + .maxCharsToExamine(10) + .maxNumPassages(20) + .toBsonDocument() + ) + ); + } + + private static SearchHighlight docExamplePredefined() { + return SearchHighlight.paths( + fieldPath("fieldName"), + wildcardPath("wildc*rd")); + } + + private static Document docExampleCustom() { + return new Document("path", asList( + fieldPath("fieldName").toBsonValue(), + wildcardPath("wildc*rd").toBsonValue())); + } +} diff --git a/driver-core/src/test/unit/com/mongodb/client/model/search/SearchOperatorTest.java b/driver-core/src/test/unit/com/mongodb/client/model/search/SearchOperatorTest.java new file mode 100644 index 00000000000..ccf5a44cd1f --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/client/model/search/SearchOperatorTest.java @@ -0,0 +1,1014 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.client.model.search; + +import com.mongodb.MongoClientSettings; +import com.mongodb.client.model.geojson.Point; +import com.mongodb.client.model.geojson.Position; +import org.bson.BsonArray; +import org.bson.BsonBinary; +import org.bson.BsonBoolean; +import org.bson.BsonDateTime; +import org.bson.BsonDocument; +import org.bson.BsonDouble; +import org.bson.BsonInt32; +import org.bson.BsonInt64; +import org.bson.BsonNull; +import org.bson.BsonObjectId; +import org.bson.BsonString; +import org.bson.Document; +import org.bson.UuidRepresentation; +import org.bson.codecs.configuration.CodecRegistries; +import org.bson.conversions.Bson; +import org.bson.types.ObjectId; +import org.junit.jupiter.api.Test; + +import java.time.Duration; +import java.time.Instant; +import java.util.UUID; + +import static com.mongodb.client.model.search.FuzzySearchOptions.fuzzySearchOptions; +import static com.mongodb.client.model.search.SearchPath.fieldPath; +import static com.mongodb.client.model.search.SearchPath.wildcardPath; +import static com.mongodb.client.model.search.SearchScore.boost; +import static java.util.Arrays.asList; +import static java.util.Collections.emptyList; +import static java.util.Collections.singleton; +import static java.util.Collections.singletonList; +import static org.junit.jupiter.api.Assertions.assertAll; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; + +final class SearchOperatorTest { + @Test + void of() { + assertAll( + () -> assertEquals( + docExamplePredefined() + .toBsonDocument(), + SearchOperator.of(docExampleCustom()) + .toBsonDocument() + ), + () -> assertEquals( + docExamplePredefined() + .score(boost(2)) + .toBsonDocument(), + SearchOperator.of(docExampleCustom()) + .score(boost(2)) + .toBsonDocument() + ) + ); + } + + @Test + void compound() { + assertAll( + // combinations must not be empty + () -> assertThrows(IllegalArgumentException.class, () -> + SearchOperator.compound().must(emptyList()) + ), + () -> assertThrows(IllegalArgumentException.class, () -> + SearchOperator.compound().mustNot(emptyList()) + ), + () -> assertThrows(IllegalArgumentException.class, () -> + SearchOperator.compound().should(emptyList()) + ), + () -> assertThrows(IllegalArgumentException.class, () -> + SearchOperator.compound().filter(emptyList()) + ), + () -> assertEquals( + new BsonDocument("compound", new BsonDocument() + .append("must", new BsonArray(singletonList(SearchOperator.exists(fieldPath("fieldName1")).toBsonDocument()))) + .append("mustNot", new BsonArray(singletonList(SearchOperator.exists(fieldPath("fieldName2")) + .score(boost(0.1f)).toBsonDocument()))) + .append("should", new BsonArray(asList( + SearchOperator.exists(fieldPath("fieldName3")).toBsonDocument(), + SearchOperator.exists(fieldPath("fieldName4")).toBsonDocument(), + SearchOperator.exists(fieldPath("fieldName5")).toBsonDocument()))) + .append("filter", new BsonArray(singletonList(SearchOperator.exists(fieldPath("fieldName6")).toBsonDocument()))) + .append("minimumShouldMatch", new BsonInt32(1)) + ), + SearchOperator.compound() + .must(singleton(SearchOperator.exists(fieldPath("fieldName1")))) + .mustNot(singleton(SearchOperator.exists(fieldPath("fieldName2")) + .score(boost(0.1f)))) + .should(singleton(SearchOperator.exists(fieldPath("fieldName3")))) + // appends to the existing operators combined with the same rule + .should(asList( + SearchOperator.exists(fieldPath("fieldName4")), + SearchOperator.exists(fieldPath("fieldName5")))) + .minimumShouldMatch(2) + // overrides the previous value + .minimumShouldMatch(1) + .filter(singleton(SearchOperator.exists(fieldPath("fieldName6")))) + .toBsonDocument() + ), + () -> assertEquals( + new BsonDocument("compound", new BsonDocument( + "filter", new BsonArray(singletonList( + SearchOperator.compound().filter(singleton( + SearchOperator.exists(fieldPath("fieldName")))).toBsonDocument()))) + ), + SearchOperator.compound().filter(singleton( + // nested compound operators are allowed + SearchOperator.compound().filter(singleton( + SearchOperator.exists(fieldPath("fieldName")))))) + .toBsonDocument() + ) + ); + } + + @Test + void exists() { + assertAll( + () -> assertEquals( + docExampleCustom() + .toBsonDocument(), + docExamplePredefined() + .toBsonDocument() + ), + () -> assertEquals( + new BsonDocument("exists", new BsonDocument("path", new BsonString(fieldPath("fieldName").toValue()))), + SearchOperator.exists( + fieldPath("fieldName") + // multi must be ignored + .multi("analyzerName")) + .toBsonDocument() + ) + ); + } + + @Test + void text() { + assertAll( + () -> assertThrows(IllegalArgumentException.class, () -> + // queries must not be empty + SearchOperator.text(singleton(fieldPath("fieldName")), emptyList()) + ), + () -> assertThrows(IllegalArgumentException.class, () -> + // paths must not be empty + SearchOperator.text(emptyList(), singleton("term")) + ), + () -> assertEquals( + new BsonDocument("text", + new BsonDocument("path", fieldPath("fieldName").toBsonValue()) + .append("query", new BsonString("term")) + ), + SearchOperator.text( + fieldPath("fieldName"), + "term") + .toBsonDocument() + ), + () -> assertEquals( + new BsonDocument("text", + new BsonDocument("path", new BsonArray(asList( + fieldPath("fieldName").toBsonValue(), + wildcardPath("wildc*rd").toBsonValue()))) + .append("query", new BsonArray(asList( + new BsonString("term1"), + new BsonString("term2")))) + ), + SearchOperator.text( + asList( + fieldPath("fieldName"), + wildcardPath("wildc*rd")), + asList( + "term1", + "term2")) + .toBsonDocument() + ), + () -> assertEquals( + new BsonDocument("text", + new BsonDocument("path", fieldPath("fieldName").toBsonValue()) + .append("query", new BsonString("term")) + .append("synonyms", new BsonString("synonymMappingName")) + ), + SearchOperator.text( + singleton(fieldPath("fieldName")), + singleton("term")) + .fuzzy(fuzzySearchOptions()) + // synonyms overrides fuzzy + .synonyms("synonymMappingName") + .toBsonDocument() + ), + () -> assertEquals( + new BsonDocument("text", + new BsonDocument("path", fieldPath("fieldName").toBsonValue()) + .append("query", new BsonString("term")) + .append("fuzzy", new BsonDocument()) + ), + SearchOperator.text( + singleton(fieldPath("fieldName")), + singleton("term")) + .synonyms("synonymMappingName") + // fuzzy overrides synonyms + .fuzzy() + .toBsonDocument() + ) + ); + } + + @Test + void autocomplete() { + assertAll( + () -> assertThrows(IllegalArgumentException.class, () -> + // queries must not be empty + SearchOperator.autocomplete(fieldPath("fieldName"), emptyList()) + ), + () -> assertEquals( + new BsonDocument("autocomplete", + new BsonDocument("path", fieldPath("fieldName").toBsonValue()) + .append("query", new BsonString("term")) + ), + SearchOperator.autocomplete( + fieldPath("fieldName"), + "term") + .toBsonDocument() + ), + () -> assertEquals( + new BsonDocument("autocomplete", + new BsonDocument("path", fieldPath("fieldName").toBsonValue()) + .append("query", new BsonArray(asList( + new BsonString("term1"), + new BsonString("term2")))) + ), + SearchOperator.autocomplete( + fieldPath("fieldName") + // multi must be ignored + .multi("analyzerName"), + asList( + "term1", + "term2")) + .toBsonDocument() + ), + () -> assertEquals( + new BsonDocument("autocomplete", + new BsonDocument("path", fieldPath("fieldName").toBsonValue()) + .append("query", new BsonString("term")) + .append("fuzzy", new BsonDocument() + .append("maxExpansions", new BsonInt32(10)) + .append("maxEdits", new BsonInt32(1))) + .append("tokenOrder", new BsonString("any")) + ), + SearchOperator.autocomplete( + fieldPath("fieldName") + // multi must be ignored + .multi("analyzerName"), + singleton("term")) + .fuzzy(fuzzySearchOptions() + .maxExpansions(10) + .maxEdits(1)) + .sequentialTokenOrder() + // anyTokenOrder overrides sequentialTokenOrder + .anyTokenOrder() + .toBsonDocument() + ) + ); + } + + @Test + void numberRange() { + assertAll( + () -> assertThrows(IllegalArgumentException.class, () -> + // queries must not be empty + SearchOperator.numberRange(emptyList()) + ), + () -> assertEquals( + new BsonDocument("range", + new BsonDocument("path", fieldPath("fieldName").toBsonValue()) + .append("gt", new BsonDouble(Double.MIN_VALUE)) + ), + SearchOperator.numberRange( + singleton(fieldPath("fieldName") + // multi must be ignored + .multi("analyzeName"))) + .gteLte(-1, 1) + // overrides + .gt(Double.MIN_VALUE) + .toBsonDocument() + ), + () -> assertEquals( + new BsonDocument("range", + new BsonDocument("path", fieldPath("fieldName").toBsonValue()) + .append("lt", new BsonInt32(Integer.MAX_VALUE)) + ), + SearchOperator.numberRange( + fieldPath("fieldName") + // multi must be ignored + .multi("analyzeName")) + .lt(Integer.MAX_VALUE) + .toBsonDocument() + ), + () -> assertEquals( + new BsonDocument("range", + new BsonDocument("path", fieldPath("fieldName").toBsonValue()) + .append("gte", new BsonDouble(Float.MIN_VALUE)) + .append("score", new BsonDocument("boost", new BsonDocument("value", new BsonDouble(0.5)))) + ), + SearchOperator.numberRange( + fieldPath("fieldName")) + .gte(Float.MIN_VALUE) + .score(boost(0.5f)) + .toBsonDocument() + ), + () -> assertEquals( + new BsonDocument("range", + new BsonDocument("path", fieldPath("fieldName").toBsonValue()) + .append("lte", new BsonInt64(Long.MAX_VALUE)) + ), + SearchOperator.numberRange( + fieldPath("fieldName")) + .lte(Long.MAX_VALUE) + .toBsonDocument() + ), + () -> assertEquals( + new BsonDocument("range", + new BsonDocument("path", fieldPath("fieldName").toBsonValue()) + .append("gt", new BsonInt32(-1)).append("lt", new BsonInt32(1)) + ), + SearchOperator.numberRange( + fieldPath("fieldName")) + .gtLt(-1, 1) + .toBsonDocument() + ), + () -> assertEquals( + new BsonDocument("range", + new BsonDocument("path", fieldPath("fieldName").toBsonValue()) + .append("gte", new BsonInt32(-1)).append("lte", new BsonInt32(1)) + ), + SearchOperator.numberRange( + fieldPath("fieldName")) + .gteLte(-1, 1) + .toBsonDocument() + ), + () -> assertEquals( + new BsonDocument("range", + new BsonDocument("path", fieldPath("fieldName").toBsonValue()) + .append("gt", new BsonInt32(-1)).append("lte", new BsonInt32(1)) + ), + SearchOperator.numberRange( + fieldPath("fieldName")) + .gtLte(-1, 1) + .toBsonDocument() + ), + () -> assertEquals( + new BsonDocument("range", + new BsonDocument("path", fieldPath("fieldName").toBsonValue()) + .append("gte", new BsonInt32(-1)).append("lt", new BsonInt32(1)) + ), + SearchOperator.numberRange( + fieldPath("fieldName")) + .gteLt(-1, 1) + .toBsonDocument() + ) + ); + } + + @Test + void dateRange() { + assertAll( + () -> assertThrows(IllegalArgumentException.class, () -> + // queries must not be empty + SearchOperator.dateRange(emptyList()) + ), + () -> assertEquals( + new BsonDocument("range", + new BsonDocument("path", fieldPath("fieldName").toBsonValue()) + .append("gt", new BsonDateTime(0)) + ), + SearchOperator.dateRange( + singleton(fieldPath("fieldName") + // multi must be ignored + .multi("analyzeName"))) + .gteLte(Instant.ofEpochMilli(-1), Instant.ofEpochMilli(1)) + // overrides + .gt(Instant.EPOCH) + .toBsonDocument() + ), + () -> assertEquals( + new BsonDocument("range", + new BsonDocument("path", fieldPath("fieldName").toBsonValue()) + .append("lt", new BsonDateTime(0)) + ), + SearchOperator.dateRange( + fieldPath("fieldName") + // multi must be ignored + .multi("analyzeName")) + .lt(Instant.EPOCH) + .toBsonDocument() + ), + () -> assertEquals( + new BsonDocument("range", + new BsonDocument("path", fieldPath("fieldName").toBsonValue()) + .append("gte", new BsonDateTime(0)) + .append("score", new BsonDocument("boost", new BsonDocument("value", new BsonDouble(0.5)))) + ), + SearchOperator.dateRange( + fieldPath("fieldName")) + .gte(Instant.EPOCH) + .score(boost(0.5f)) + .toBsonDocument() + ), + () -> assertEquals( + new BsonDocument("range", + new BsonDocument("path", fieldPath("fieldName").toBsonValue()) + .append("lte", new BsonDateTime(0)) + ), + SearchOperator.dateRange( + fieldPath("fieldName")) + .lte(Instant.EPOCH) + .toBsonDocument() + ), + () -> assertEquals( + new BsonDocument("range", + new BsonDocument("path", fieldPath("fieldName").toBsonValue()) + .append("gt", new BsonDateTime(-1)) + .append("lt", new BsonDateTime(1)) + ), + SearchOperator.dateRange( + fieldPath("fieldName")) + .gtLt( + Instant.ofEpochMilli(-1), + Instant.ofEpochMilli(1)) + .toBsonDocument() + ), + () -> assertEquals( + new BsonDocument("range", + new BsonDocument("path", fieldPath("fieldName").toBsonValue()) + .append("gte", new BsonDateTime(-1)) + .append("lte", new BsonDateTime(1)) + ), + SearchOperator.dateRange( + fieldPath("fieldName")) + .gteLte( + Instant.ofEpochMilli(-1), + Instant.ofEpochMilli(1)) + .toBsonDocument() + ), + () -> assertEquals( + new BsonDocument("range", + new BsonDocument("path", fieldPath("fieldName").toBsonValue()) + .append("gt", new BsonDateTime(-1)) + .append("lte", new BsonDateTime(1)) + ), + SearchOperator.dateRange( + fieldPath("fieldName")) + .gtLte( + Instant.ofEpochMilli(-1), + Instant.ofEpochMilli(1)) + .toBsonDocument() + ), + () -> assertEquals( + new BsonDocument("range", + new BsonDocument("path", fieldPath("fieldName").toBsonValue()) + .append("gte", new BsonDateTime(-1)) + .append("lt", new BsonDateTime(1)) + ), + SearchOperator.dateRange( + fieldPath("fieldName")) + .gteLt( + Instant.ofEpochMilli(-1), + Instant.ofEpochMilli(1)) + .toBsonDocument() + ) + ); + } + + @Test + void near() { + assertAll( + () -> assertThrows(IllegalArgumentException.class, () -> + // paths must not be empty + SearchOperator.near(new Point(new Position(0, 0)), 1, emptyList()) + ), + () -> assertEquals( + new BsonDocument("near", + new BsonDocument("origin", new BsonInt32(0)) + .append("path", new BsonString(fieldPath("fieldName1").toValue())) + .append("pivot", new BsonDouble(1.5)) + ), + SearchOperator.near( + 0, + 1.5, + fieldPath("fieldName1") + // multi must be ignored + .multi("analyzeName")) + .toBsonDocument(BsonDocument.class, MongoClientSettings.getDefaultCodecRegistry()) + ), + () -> assertEquals( + new BsonDocument("near", + new BsonDocument("origin", new BsonDouble(1.5)) + .append("path", new BsonArray(asList( + new BsonString(fieldPath("fieldName1").toValue()), + new BsonString(fieldPath("fieldName2").toValue())))) + .append("pivot", new BsonDouble(1.5)) + ), + SearchOperator.near( + 1.5, + 1.5, + asList( + fieldPath("fieldName1"), + fieldPath("fieldName2"))) + .toBsonDocument(BsonDocument.class, MongoClientSettings.getDefaultCodecRegistry()) + ), + () -> assertEquals( + new BsonDocument("near", + new BsonDocument("origin", new BsonDateTime(Instant.EPOCH.toEpochMilli())) + .append("path", new BsonString(fieldPath("fieldName1").toValue())) + .append("pivot", new BsonInt64(3)) + ), + SearchOperator.near( + Instant.EPOCH, + Duration.ofMillis(3), + fieldPath("fieldName1") + // multi must be ignored + .multi("analyzeName")) + .toBsonDocument(BsonDocument.class, MongoClientSettings.getDefaultCodecRegistry()) + ), + () -> assertEquals( + new BsonDocument("near", + new BsonDocument("origin", new BsonDateTime(Instant.EPOCH.toEpochMilli())) + .append("path", new BsonArray(asList( + new BsonString(fieldPath("fieldName1").toValue()), + new BsonString(fieldPath("fieldName2").toValue())))) + .append("pivot", new BsonInt64(3)) + ), + SearchOperator.near( + Instant.EPOCH, + Duration.ofMillis(3), + fieldPath("fieldName1"), + fieldPath("fieldName2")) + .toBsonDocument(BsonDocument.class, MongoClientSettings.getDefaultCodecRegistry()) + ), + () -> assertEquals( + new BsonDocument("near", + new BsonDocument("origin", new BsonDocument("type", new BsonString("Point")) + .append("coordinates", new BsonArray(asList(new BsonDouble(1), new BsonDouble(2))))) + .append("path", new BsonString(fieldPath("fieldName1").toValue())) + .append("pivot", new BsonDouble(1.5)) + ), + SearchOperator.near( + new Point( + new Position(1, 2)), + 1.5, + fieldPath("fieldName1") + // multi must be ignored + .multi("analyzeName")) + .toBsonDocument(BsonDocument.class, MongoClientSettings.getDefaultCodecRegistry()) + ), + () -> assertEquals( + new BsonDocument("near", + new BsonDocument("origin", new BsonDocument("type", new BsonString("Point")) + .append("coordinates", new BsonArray(asList(new BsonDouble(1), new BsonDouble(2))))) + .append("path", new BsonArray(asList( + new BsonString(fieldPath("fieldName1").toValue()), + new BsonString(fieldPath("fieldName2").toValue())))) + .append("pivot", new BsonDouble(1.5)) + ), + SearchOperator.near( + new Point( + new Position(1, 2)), + 1.5, + asList( + fieldPath("fieldName1"), + fieldPath("fieldName2"))) + .toBsonDocument(BsonDocument.class, MongoClientSettings.getDefaultCodecRegistry()) + ) + ); + } + + @Test + void in() { + ObjectId objectId = new ObjectId(); + UUID uuid = UUID.randomUUID(); + assertAll( + () -> assertThrows(IllegalArgumentException.class, () -> + // paths must not be empty + SearchOperator.in(null, true) + ), + () -> assertEquals( + new BsonDocument("in", + new BsonDocument("path", fieldPath("fieldName1").toBsonValue()) + .append("value", new BsonBoolean(true)) + ), + SearchOperator.in(fieldPath("fieldName1"), true) + .toBsonDocument() + ), + () -> assertEquals( + new BsonDocument("in", + new BsonDocument("path", fieldPath("fieldName1").toBsonValue()) + .append("value", new BsonArray(asList(new BsonBoolean(true), new BsonBoolean(false)))) + ), + SearchOperator.in(fieldPath("fieldName1"), asList(true, false)) + .toBsonDocument() + ), + () -> assertEquals( + new BsonDocument("in", + new BsonDocument("path", fieldPath("fieldName1").toBsonValue()) + .append("value", new BsonObjectId(objectId)) + ), + SearchOperator.in(fieldPath("fieldName1"), objectId) + .toBsonDocument() + ), + () -> assertEquals( + new BsonDocument("in", + new BsonDocument("path", fieldPath("fieldName1").toBsonValue()) + .append("value", new BsonInt32(1)) + ), + SearchOperator.in(fieldPath("fieldName1"), 1) + .toBsonDocument() + ), + () -> assertEquals( + new BsonDocument("in", + new BsonDocument("path", fieldPath("fieldName1").toBsonValue()) + .append("value", new BsonInt64(Long.MAX_VALUE)) + ), + SearchOperator.in(fieldPath("fieldName1"), Long.MAX_VALUE) + .toBsonDocument() + ), + () -> assertEquals( + new BsonDocument("in", + new BsonDocument("path", fieldPath("fieldName1").toBsonValue()) + .append("value", new BsonDouble(Double.MAX_VALUE)) + ), + SearchOperator.in(fieldPath("fieldName1"), Double.MAX_VALUE) + .toBsonDocument() + ), + () -> assertEquals( + new BsonDocument("in", + new BsonDocument("path", fieldPath("fieldName1").toBsonValue()) + .append("value", new BsonDateTime(Instant.EPOCH.toEpochMilli())) + ), + SearchOperator.in(fieldPath("fieldName1"), Instant.EPOCH) + .toBsonDocument() + ), + () -> assertEquals( + new BsonDocument("in", + new BsonDocument("path", fieldPath("fieldName1").toBsonValue()) + .append("value", new BsonBinary(uuid)) + ), + SearchOperator.in(fieldPath("fieldName1"), uuid) + .toBsonDocument( + Document.class, + CodecRegistries.withUuidRepresentation(Bson.DEFAULT_CODEC_REGISTRY, UuidRepresentation.STANDARD)) + ), + () -> assertEquals( + new BsonDocument("in", + new BsonDocument("path", fieldPath("fieldName1").toBsonValue()) + .append("value", new BsonString("value")) + ), + SearchOperator.in(fieldPath("fieldName1"), "value") + .toBsonDocument() + ) + ); + } + + @Test + void equals() { + ObjectId objectId = new ObjectId(); + UUID uuid = UUID.randomUUID(); + assertAll( + () -> assertThrows(IllegalArgumentException.class, () -> + // path must not be null + SearchOperator.equals(null, "term") + ), + () -> assertEquals( + new BsonDocument("equals", + new BsonDocument("path", fieldPath("fieldName").toBsonValue()) + .append("value", new BsonBoolean(true)) + ), + SearchOperator.equals( + fieldPath("fieldName"), + true) + .toBsonDocument() + ), + () -> assertEquals( + new BsonDocument("equals", + new BsonDocument("path", fieldPath("fieldName").toBsonValue()) + .append("value", new BsonObjectId(objectId)) + ), + SearchOperator.equals( + fieldPath("fieldName"), + objectId) + .toBsonDocument() + ), + () -> assertEquals( + new BsonDocument("equals", + new BsonDocument("path", fieldPath("fieldName").toBsonValue()) + .append("value", new BsonInt32(1)) + ), + SearchOperator.equals( + fieldPath("fieldName"), + 1) + .toBsonDocument() + ), + () -> assertEquals( + new BsonDocument("equals", + new BsonDocument("path", fieldPath("fieldName").toBsonValue()) + .append("value", new BsonInt64(Long.MAX_VALUE)) + ), + SearchOperator.equals( + fieldPath("fieldName"), + Long.MAX_VALUE) + .toBsonDocument() + ), + () -> assertEquals( + new BsonDocument("equals", + new BsonDocument("path", fieldPath("fieldName").toBsonValue()) + .append("value", new BsonDouble(Double.MAX_VALUE)) + ), + SearchOperator.equals( + fieldPath("fieldName"), + Double.MAX_VALUE) + .toBsonDocument() + ), + () -> assertEquals( + new BsonDocument("equals", + new BsonDocument("path", fieldPath("fieldName").toBsonValue()) + .append("value", new BsonDateTime(Instant.EPOCH.toEpochMilli())) + ), + SearchOperator.equals( + fieldPath("fieldName"), + Instant.EPOCH) + .toBsonDocument() + ), + () -> assertEquals( + new BsonDocument("equals", + new BsonDocument("path", fieldPath("fieldName").toBsonValue()) + .append("value", new BsonString("term")) + ), + SearchOperator.equals( + fieldPath("fieldName"), + "term") + .toBsonDocument() + ), + () -> assertEquals( + new BsonDocument("equals", + new BsonDocument("path", fieldPath("fieldName").toBsonValue()) + .append("value", new BsonBinary(uuid)) + ), + SearchOperator.equals( + fieldPath("fieldName"), + uuid) + .toBsonDocument() + ), + () -> assertEquals( + new BsonDocument("equals", + new BsonDocument("path", fieldPath("fieldName").toBsonValue()) + .append("value", BsonNull.VALUE) + ), + SearchOperator.equalsNull(fieldPath("fieldName")) + .toBsonDocument() + ) + ); + } + + @Test + void moreLikeThis() { + assertAll( + () -> assertThrows(IllegalArgumentException.class, () -> + // likes must not be empty + SearchOperator.moreLikeThis(emptyList()) + ), + () -> assertEquals( + new BsonDocument("moreLikeThis", + new BsonDocument("like", new BsonDocument("fieldName", new BsonString("fieldValue"))) + ), + SearchOperator.moreLikeThis(new BsonDocument("fieldName", new BsonString("fieldValue"))) + .toBsonDocument() + ), + () -> assertEquals( + new BsonDocument("moreLikeThis", + new BsonDocument("like", new BsonDocument("fieldName", new BsonString("fieldValue")) + .append("fieldName2", new BsonString("fieldValue2"))) + ), + SearchOperator.moreLikeThis(new BsonDocument("fieldName", new BsonString("fieldValue")) + .append("fieldName2", new BsonString("fieldValue2"))) + .toBsonDocument() + ), + () -> assertEquals( + new BsonDocument("moreLikeThis", + new BsonDocument("like", new BsonArray(asList( + new BsonDocument("fieldName", new BsonString("fieldValue")) + .append("fieldName2", new BsonString("fieldValue2")), + new BsonDocument("fieldName3", new BsonString("fieldValue3")) + ))) + ), + SearchOperator.moreLikeThis(asList( + new BsonDocument("fieldName", new BsonString("fieldValue")) + .append("fieldName2", new BsonString("fieldValue2")), + new BsonDocument("fieldName3", new BsonString("fieldValue3")))) + .toBsonDocument() + ) + ); + } + + @Test + void wildcard() { + assertAll( + () -> assertThrows(IllegalArgumentException.class, () -> + // queries must not be empty + SearchOperator.wildcard(emptyList(), singleton(fieldPath("fieldName"))) + ), + () -> assertThrows(IllegalArgumentException.class, () -> + // paths must not be empty + SearchOperator.wildcard(singleton("term"), emptyList()) + ), + () -> assertEquals( + new BsonDocument("wildcard", + new BsonDocument("query", new BsonString("term")) + .append("path", fieldPath("fieldName").toBsonValue()) + ), + SearchOperator.wildcard( + fieldPath("fieldName"), "term" + ) + .toBsonDocument() + ), + () -> assertEquals( + new BsonDocument("wildcard", + new BsonDocument("query", new BsonArray(asList( + new BsonString("term1"), + new BsonString("term2")))) + .append("path", new BsonArray(asList( + fieldPath("fieldName").toBsonValue(), + wildcardPath("wildc*rd").toBsonValue()))) + ), + SearchOperator.wildcard( + asList( + "term1", + "term2"), + asList( + fieldPath("fieldName"), + wildcardPath("wildc*rd"))) + .toBsonDocument() + ) + ); + } + + @Test + void queryString() { + assertAll( + () -> assertThrows(IllegalArgumentException.class, () -> + // queries must not be empty + SearchOperator.queryString(fieldPath("fieldName"), null) + ), + () -> assertThrows(IllegalArgumentException.class, () -> + // paths must not be empty + SearchOperator.queryString(null, "term1 AND (term2 OR term3)") + ), + () -> assertEquals( + new BsonDocument("queryString", + new BsonDocument("defaultPath", fieldPath("fieldName").toBsonValue()) + .append("query", new BsonString("term1 AND (term2 OR term3)")) + ), + SearchOperator.queryString( + fieldPath("fieldName"), + "term1 AND (term2 OR term3)") + .toBsonDocument() + ) + ); + } + + @Test + void phrase() { + assertAll( + () -> assertThrows(IllegalArgumentException.class, () -> + // queries must not be empty + SearchOperator.phrase(singleton(fieldPath("fieldName")), emptyList()) + ), + () -> assertThrows(IllegalArgumentException.class, () -> + // paths must not be empty + SearchOperator.phrase(emptyList(), singleton("term")) + ), + () -> assertEquals( + new BsonDocument("phrase", + new BsonDocument("path", fieldPath("fieldName").toBsonValue()) + .append("query", new BsonString("term")) + ), + SearchOperator.phrase( + fieldPath("fieldName"), + "term") + .toBsonDocument() + ), + () -> assertEquals( + new BsonDocument("phrase", + new BsonDocument("path", new BsonArray(asList( + fieldPath("fieldName").toBsonValue(), + wildcardPath("wildc*rd").toBsonValue()))) + .append("query", new BsonArray(asList( + new BsonString("term1"), + new BsonString("term2")))) + ), + SearchOperator.phrase( + asList( + fieldPath("fieldName"), + wildcardPath("wildc*rd")), + asList( + "term1", + "term2")) + .toBsonDocument() + ), + () -> assertEquals( + new BsonDocument("phrase", + new BsonDocument("path", fieldPath("fieldName").toBsonValue()) + .append("query", new BsonString("term")) + .append("synonyms", new BsonString("synonymMappingName")) + ), + SearchOperator.phrase( + singleton(fieldPath("fieldName")), + singleton("term")) + .synonyms("synonymMappingName") + .toBsonDocument() + ), + () -> assertEquals( + new BsonDocument("phrase", + new BsonDocument("path", fieldPath("fieldName").toBsonValue()) + .append("query", new BsonString("term")) + .append("synonyms", new BsonString("synonymMappingName")) + .append("slop", new BsonInt32(5)) + ), + SearchOperator.phrase( + singleton(fieldPath("fieldName")), + singleton("term")) + .synonyms("synonymMappingName") + .slop(5) + .toBsonDocument() + ) + ); + } + + @Test + void regex() { + assertAll( + () -> assertThrows(IllegalArgumentException.class, () -> + // queries must not be empty + SearchOperator.regex(singleton(fieldPath("fieldName")), emptyList()) + ), + () -> assertThrows(IllegalArgumentException.class, () -> + // paths must not be empty + SearchOperator.regex(emptyList(), singleton("term")) + ), + () -> assertEquals( + new BsonDocument("regex", + new BsonDocument("path", fieldPath("fieldName").toBsonValue()) + .append("query", new BsonString("term")) + ), + SearchOperator.regex( + fieldPath("fieldName"), + "term") + .toBsonDocument() + ), + () -> assertEquals( + new BsonDocument("regex", + new BsonDocument("path", fieldPath("fieldName").toBsonValue()) + .append("query", new BsonString("term")) + ), + SearchOperator.regex( + singleton(fieldPath("fieldName")), + singleton("term")) + .toBsonDocument() + ), + () -> assertEquals( + new BsonDocument("regex", + new BsonDocument("path", new BsonArray(asList( + fieldPath("fieldName").toBsonValue(), + wildcardPath("wildc*rd").toBsonValue()))) + .append("query", new BsonArray(asList( + new BsonString("term1"), + new BsonString("term2")))) + ), + SearchOperator.regex( + asList( + fieldPath("fieldName"), + wildcardPath("wildc*rd")), + asList( + "term1", + "term2")) + .toBsonDocument() + ) + ); + } + + private static SearchOperator docExamplePredefined() { + return SearchOperator.exists( + fieldPath("fieldName")); + } + + private static Document docExampleCustom() { + return new Document("exists", + new Document("path", fieldPath("fieldName").toValue())); + } +} diff --git a/driver-core/src/test/unit/com/mongodb/client/model/search/SearchOptionsTest.java b/driver-core/src/test/unit/com/mongodb/client/model/search/SearchOptionsTest.java new file mode 100644 index 00000000000..efdd26ebca8 --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/client/model/search/SearchOptionsTest.java @@ -0,0 +1,160 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.client.model.search; + +import org.bson.BsonArray; +import org.bson.BsonBoolean; +import org.bson.BsonDocument; +import org.bson.BsonString; +import org.junit.jupiter.api.Test; + +import static com.mongodb.client.model.search.SearchCount.total; +import static com.mongodb.client.model.search.SearchHighlight.paths; +import static com.mongodb.client.model.search.SearchPath.fieldPath; +import static com.mongodb.client.model.search.SearchPath.wildcardPath; +import static java.util.Arrays.asList; +import static java.util.Collections.singletonList; +import static org.junit.jupiter.api.Assertions.assertAll; +import static org.junit.jupiter.api.Assertions.assertEquals; + +final class SearchOptionsTest { + @Test + void searchOptions() { + assertEquals( + new BsonDocument(), + SearchOptions.searchOptions() + .toBsonDocument() + ); + } + + @Test + void option() { + assertAll( + () -> assertEquals( + SearchOptions.searchOptions() + .index("indexName") + .toBsonDocument(), + SearchOptions.searchOptions() + .option("index", new BsonString("indexName")) + .toBsonDocument() + ), + () -> assertEquals( + SearchOptions.searchOptions() + .option("index", "indexName") + .toBsonDocument(), + SearchOptions.searchOptions() + .option("index", new BsonString("indexName")) + .toBsonDocument() + ) + ); + } + + @Test + void index() { + assertEquals( + new BsonDocument() + .append("index", new BsonString("indexName")), + SearchOptions.searchOptions() + .index("indexName") + .toBsonDocument() + ); + } + + @Test + void highlight() { + assertAll( + () -> assertEquals( + new BsonDocument() + .append("highlight", new BsonDocument() + .append("path", wildcardPath("wildc*rd").toBsonValue())), + SearchOptions.searchOptions() + .highlight( + paths(wildcardPath("wildc*rd"))) + .toBsonDocument() + ), + () -> assertEquals( + new BsonDocument() + .append("highlight", new BsonDocument() + .append("path", new BsonArray(asList( + wildcardPath("wildc*rd").toBsonValue(), + fieldPath("fieldName").toBsonValue())))), + SearchOptions.searchOptions() + .highlight( + paths( + wildcardPath("wildc*rd"), + fieldPath("fieldName"))) + .toBsonDocument() + ) + ); + } + + @Test + void count() { + assertEquals( + new BsonDocument() + .append("count", total().toBsonDocument()), + SearchOptions.searchOptions() + .count(total()) + .toBsonDocument() + ); + } + + @Test + void returnStoredSource() { + assertEquals( + new BsonDocument() + .append("returnStoredSource", new BsonBoolean(true)), + SearchOptions.searchOptions() + .returnStoredSource(true) + .toBsonDocument() + ); + } + + @Test + void options() { + assertEquals( + new BsonDocument() + .append("index", new BsonString("indexName")) + .append("name", new BsonArray(singletonList(new BsonString("value")))) + .append("highlight", new BsonDocument() + .append("path", fieldPath("fieldName").toBsonValue())) + .append("count", total().toBsonDocument()) + .append("returnStoredSource", new BsonBoolean(true)), + SearchOptions.searchOptions() + .index("indexName") + .option("name", new BsonArray(singletonList(new BsonString("value")))) + .highlight( + paths(fieldPath("fieldName"))) + .count(total()) + .returnStoredSource(true) + .toBsonDocument() + ); + } + + @Test + void searchOptionsIsUnmodifiable() { + String expected = SearchOptions.searchOptions().toBsonDocument().toJson(); + SearchOptions.searchOptions().option("name", "value"); + assertEquals(expected, SearchOptions.searchOptions().toBsonDocument().toJson()); + } + + @Test + void searchOptionsIsImmutable() { + String expected = SearchOptions.searchOptions().toBsonDocument().toJson(); + SearchOptions.searchOptions().toBsonDocument().append("name", new BsonString("value")); + assertEquals(expected, SearchOptions.searchOptions().toBsonDocument().toJson()); + } +} diff --git a/driver-core/src/test/unit/com/mongodb/client/model/search/SearchPathTest.java b/driver-core/src/test/unit/com/mongodb/client/model/search/SearchPathTest.java new file mode 100644 index 00000000000..6258c2f1296 --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/client/model/search/SearchPathTest.java @@ -0,0 +1,67 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.client.model.search; + +import org.bson.BsonDocument; +import org.bson.BsonString; +import org.junit.jupiter.api.Test; + +import static org.junit.jupiter.api.Assertions.assertAll; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; + +final class SearchPathTest { + @Test + void fieldPath() { + assertAll( + () -> assertThrows(IllegalArgumentException.class, () -> + // path must not contain '*' + SearchPath.fieldPath("wildc*rd") + ), + () -> assertEquals( + new BsonString("fieldName"), + SearchPath.fieldPath("fieldName") + .toBsonValue() + ), + () -> assertEquals( + new BsonDocument("value", new BsonString("fieldName")) + .append("multi", new BsonString("analyzerName")), + SearchPath.fieldPath("fieldName") + .multi("analyzerName") + .toBsonValue() + ) + ); + } + + @Test + void wildcardPath() { + assertAll( + () -> assertThrows(IllegalArgumentException.class, () -> + // wildcardPath must contain '*' + SearchPath.wildcardPath("wildcard") + ), + () -> assertThrows(IllegalArgumentException.class, () -> + // wildcardPath must not contain '**' + SearchPath.wildcardPath("wildc**rd") + ), + () -> assertEquals( + new BsonDocument("wildcard", new BsonString("wildc*rd")), + SearchPath.wildcardPath("wildc*rd") + .toBsonValue() + ) + ); + } +} diff --git a/driver-core/src/test/unit/com/mongodb/client/model/search/SearchScoreExpressionTest.java b/driver-core/src/test/unit/com/mongodb/client/model/search/SearchScoreExpressionTest.java new file mode 100644 index 00000000000..3911716e518 --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/client/model/search/SearchScoreExpressionTest.java @@ -0,0 +1,196 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.client.model.search; + +import org.bson.BsonArray; +import org.bson.BsonDocument; +import org.bson.BsonDouble; +import org.bson.BsonString; +import org.bson.Document; +import org.junit.jupiter.api.Test; + +import static com.mongodb.client.model.search.SearchPath.fieldPath; +import static java.util.Arrays.asList; +import static java.util.Collections.singleton; +import static org.junit.jupiter.api.Assertions.assertAll; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; + +final class SearchScoreExpressionTest { + @Test + void of() { + assertEquals( + docExamplePredefined() + .toBsonDocument(), + SearchScoreExpression.of(docExampleCustom()) + .toBsonDocument() + ); + } + + @Test + void relevanceExpression() { + assertEquals( + new BsonDocument("score", new BsonString("relevance")), + SearchScoreExpression.relevanceExpression() + .toBsonDocument() + ); + } + + @Test + void pathExpression() { + assertAll( + () -> assertEquals( + docExampleCustom() + .toBsonDocument(), + docExamplePredefined() + .toBsonDocument() + ), + () -> assertEquals( + new BsonDocument("path", + new BsonDocument("value", new BsonString(fieldPath("fieldName").toValue())) + .append("undefined", new BsonDouble(-1.5))), + SearchScoreExpression.pathExpression( + fieldPath("fieldName") + // multi must be ignored + .multi("analyzerName")) + .undefined(-1.5f) + .toBsonDocument() + ) + ); + } + + @Test + void constantExpression() { + assertEquals( + new BsonDocument("constant", new BsonDouble(-1.5)), + SearchScoreExpression.constantExpression(-1.5f) + .toBsonDocument() + ); + } + + @Test + void gaussExpression() { + assertAll( + () -> assertEquals( + new BsonDocument("gauss", + new BsonDocument("origin", new BsonDouble(50)) + .append("path", SearchScoreExpression.pathExpression( + fieldPath("fieldName")) + .toBsonDocument().values().iterator().next()) + .append("scale", new BsonDouble(1))), + SearchScoreExpression.gaussExpression( + 50, + SearchScoreExpression.pathExpression( + fieldPath("fieldName")), + 1) + .toBsonDocument() + ), + () -> assertEquals( + new BsonDocument("gauss", + new BsonDocument("origin", new BsonDouble(50)) + .append("path", SearchScoreExpression.pathExpression( + fieldPath("fieldName")) + .undefined(-1.5f) + .toBsonDocument().values().iterator().next()) + .append("scale", new BsonDouble(1)) + .append("offset", new BsonDouble(0)) + .append("decay", new BsonDouble(0.5)) + ), + SearchScoreExpression.gaussExpression( + 50, + SearchScoreExpression.pathExpression( + fieldPath("fieldName")) + .undefined(-1.5f), + 1) + .offset(0) + .decay(0.5) + .toBsonDocument() + ) + ); + } + + @Test + void logExpression() { + assertEquals( + new BsonDocument("log", + SearchScoreExpression.constantExpression(3).toBsonDocument()), + SearchScoreExpression.logExpression( + SearchScoreExpression.constantExpression(3)) + .toBsonDocument() + ); + } + + @Test + void log1pExpression() { + assertEquals( + new BsonDocument("log1p", + SearchScoreExpression.constantExpression(3).toBsonDocument()), + SearchScoreExpression.log1pExpression( + SearchScoreExpression.constantExpression(3)) + .toBsonDocument() + ); + } + + @Test + void addExpression() { + assertAll( + () -> assertThrows(IllegalArgumentException.class, () -> + // expressions must contain at least 2 elements + SearchScoreExpression.addExpression(singleton(SearchScoreExpression.constantExpression(1))) + ), + () -> assertEquals( + new BsonDocument("add", new BsonArray(asList( + SearchScoreExpression.constantExpression(1.5f).toBsonDocument(), + SearchScoreExpression.relevanceExpression().toBsonDocument()))), + SearchScoreExpression.addExpression(asList( + SearchScoreExpression.constantExpression(1.5f), + SearchScoreExpression.relevanceExpression())) + .toBsonDocument() + ) + ); + } + + @Test + void multiplyExpression() { + assertAll( + () -> assertThrows(IllegalArgumentException.class, () -> + // expressions must contain at least 2 elements + SearchScoreExpression.multiplyExpression(singleton(SearchScoreExpression.constantExpression(1))) + ), + () -> assertEquals( + new BsonDocument("multiply", new BsonArray(asList( + SearchScoreExpression.constantExpression(1.5f).toBsonDocument(), + SearchScoreExpression.relevanceExpression().toBsonDocument()))), + SearchScoreExpression.multiplyExpression(asList( + SearchScoreExpression.constantExpression(1.5f), + SearchScoreExpression.relevanceExpression())) + .toBsonDocument() + ) + ); + } + + private static SearchScoreExpression docExamplePredefined() { + return SearchScoreExpression.pathExpression( + fieldPath("fieldName")) + .undefined(-1.5f); + } + + private static Document docExampleCustom() { + return new Document("path", + new Document("value", fieldPath("fieldName").toValue()) + .append("undefined", -1.5)); + } +} diff --git a/driver-core/src/test/unit/com/mongodb/client/model/search/SearchScoreTest.java b/driver-core/src/test/unit/com/mongodb/client/model/search/SearchScoreTest.java new file mode 100644 index 00000000000..56507871835 --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/client/model/search/SearchScoreTest.java @@ -0,0 +1,104 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.client.model.search; + +import org.bson.BsonDocument; +import org.bson.BsonDouble; +import org.bson.BsonString; +import org.bson.Document; +import org.junit.jupiter.api.Test; + +import static com.mongodb.client.model.search.SearchPath.fieldPath; +import static com.mongodb.client.model.search.SearchScoreExpression.constantExpression; +import static org.junit.jupiter.api.Assertions.assertAll; +import static org.junit.jupiter.api.Assertions.assertEquals; + +final class SearchScoreTest { + @Test + void of() { + assertEquals( + docExamplePredefined() + .toBsonDocument(), + SearchScore.of(docExampleCustom()) + .toBsonDocument() + ); + } + + @Test + void valueBoost() { + assertEquals( + new BsonDocument("boost", + new BsonDocument("value", new BsonDouble(0.5))), + SearchScore.boost(0.5f) + .toBsonDocument() + ); + } + + @Test + void pathBoost() { + assertAll( + () -> assertEquals( + docExampleCustom() + .toBsonDocument(), + docExamplePredefined() + .toBsonDocument() + ), + () -> assertEquals( + new BsonDocument("boost", + new BsonDocument("path", + new BsonString(fieldPath("fieldName").toValue())) + .append("undefined", new BsonDouble(1))), + SearchScore.boost( + fieldPath("fieldName") + // multi must be ignored + .multi("analyzerName")) + .undefined(1f) + .toBsonDocument() + ) + ); + } + + @Test + void constant() { + assertEquals( + new BsonDocument("constant", + new BsonDocument("value", new BsonDouble(0.5))), + SearchScore.constant(0.5f) + .toBsonDocument() + ); + } + + @Test + void function() { + assertEquals( + new BsonDocument("function", + constantExpression(1.5f).toBsonDocument()), + SearchScore.function( + constantExpression(1.5f)) + .toBsonDocument() + ); + } + + private static SearchScore docExamplePredefined() { + return SearchScore.boost( + fieldPath("fieldName")); + } + + private static Document docExampleCustom() { + return new Document("boost", + new Document("path", fieldPath("fieldName").toValue())); + } +} diff --git a/driver-core/src/test/unit/com/mongodb/connection/AsyncTransportSettingsTest.java b/driver-core/src/test/unit/com/mongodb/connection/AsyncTransportSettingsTest.java new file mode 100644 index 00000000000..180894ceb78 --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/connection/AsyncTransportSettingsTest.java @@ -0,0 +1,45 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.connection; + +import org.junit.jupiter.api.Test; + +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNull; + +class AsyncTransportSettingsTest { + + @Test + public void shouldDefaultAllValuesToNull() { + AsyncTransportSettings settings = TransportSettings.asyncBuilder().build(); + + assertNull(settings.getExecutorService()); + } + + @Test + public void shouldApplySettingsFromBuilder() { + ExecutorService executorService = Executors.newFixedThreadPool(1); + AsyncTransportSettings settings = TransportSettings.asyncBuilder() + .executorService(executorService) + .build(); + + assertEquals(executorService, settings.getExecutorService()); + } +} diff --git a/driver-core/src/test/unit/com/mongodb/connection/ClusterDescriptionTest.java b/driver-core/src/test/unit/com/mongodb/connection/ClusterDescriptionTest.java new file mode 100644 index 00000000000..39e6fd8024e --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/connection/ClusterDescriptionTest.java @@ -0,0 +1,430 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.connection; + +import com.mongodb.ReadPreference; +import com.mongodb.ServerAddress; +import com.mongodb.Tag; +import com.mongodb.TagSet; +import com.mongodb.internal.connection.ClusterDescriptionHelper; +import org.junit.Before; +import org.junit.Test; + +import java.util.Collections; +import java.util.HashSet; +import java.util.Iterator; +import java.util.List; + +import static com.mongodb.connection.ClusterConnectionMode.MULTIPLE; +import static com.mongodb.connection.ClusterConnectionMode.SINGLE; +import static com.mongodb.connection.ClusterType.REPLICA_SET; +import static com.mongodb.connection.ClusterType.SHARDED; +import static com.mongodb.connection.ClusterType.STANDALONE; +import static com.mongodb.connection.ClusterType.UNKNOWN; +import static com.mongodb.connection.ServerConnectionState.CONNECTED; +import static com.mongodb.connection.ServerConnectionState.CONNECTING; +import static com.mongodb.connection.ServerDescription.MAX_DRIVER_WIRE_VERSION; +import static com.mongodb.connection.ServerDescription.MIN_DRIVER_WIRE_VERSION; +import static com.mongodb.connection.ServerDescription.builder; +import static com.mongodb.connection.ServerType.REPLICA_SET_OTHER; +import static com.mongodb.connection.ServerType.REPLICA_SET_PRIMARY; +import static com.mongodb.connection.ServerType.REPLICA_SET_SECONDARY; +import static com.mongodb.internal.connection.ClusterDescriptionHelper.getAll; +import static com.mongodb.internal.connection.ClusterDescriptionHelper.getAny; +import static com.mongodb.internal.connection.ClusterDescriptionHelper.getAnyPrimaryOrSecondary; +import static java.util.Arrays.asList; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; + +public class ClusterDescriptionTest { + + private ServerDescription primary, secondary, otherSecondary, uninitiatedMember, notOkMember; + private ClusterDescription cluster; + + @Before + public void setUp() { + TagSet tags1 = new TagSet(asList(new Tag("foo", "1"), + new Tag("bar", "2"), + new Tag("baz", "1"))); + TagSet tags2 = new TagSet(asList(new Tag("foo", "1"), + new Tag("bar", "2"), + new Tag("baz", "2"))); + TagSet tags3 = new TagSet(asList(new Tag("foo", "1"), + new Tag("bar", "3"), + new Tag("baz", "3"))); + + primary = builder() + .state(CONNECTED).address(new ServerAddress("localhost", 27017)).ok(true) + .type(REPLICA_SET_PRIMARY).tagSet(tags1) + .build(); + + secondary = builder() + .state(CONNECTED).address(new ServerAddress("localhost", 27018)).ok(true) + .type(REPLICA_SET_SECONDARY).tagSet(tags2) + .build(); + + otherSecondary = builder() + .state(CONNECTED).address(new ServerAddress("otherhost", 27019)).ok(true) + .type(REPLICA_SET_SECONDARY).tagSet(tags3) + .build(); + uninitiatedMember = builder() + .state(CONNECTED).address(new ServerAddress("localhost", 27020)).ok(true) + .type(REPLICA_SET_OTHER) + .build(); + + notOkMember = builder().state(CONNECTED).address(new ServerAddress("localhost", 27021)).ok(false) + .build(); + + List nodeList = asList(primary, secondary, otherSecondary, uninitiatedMember, notOkMember); + + cluster = new ClusterDescription(MULTIPLE, REPLICA_SET, nodeList); + } + + @Test + public void testMode() { + ClusterDescription description = new ClusterDescription(MULTIPLE, UNKNOWN, Collections.emptyList()); + assertEquals(MULTIPLE, description.getConnectionMode()); + } + + @Test + public void testSettings() { + ClusterDescription description = new ClusterDescription(MULTIPLE, UNKNOWN, Collections.emptyList()); + assertNull(description.getClusterSettings()); + assertNull(description.getServerSettings()); + + ClusterDescription descriptionWithSettings = new ClusterDescription(MULTIPLE, UNKNOWN, Collections.emptyList(), + ClusterSettings.builder() + .hosts(asList(new ServerAddress())) + .build(), + ServerSettings.builder().build()); + assertNotNull(descriptionWithSettings.getClusterSettings()); + assertNotNull(descriptionWithSettings.getServerSettings()); + } + + @Test + public void testAll() { + ClusterDescription description = new ClusterDescription(MULTIPLE, UNKNOWN, Collections.emptyList()); + assertTrue(getAll(description).isEmpty()); + assertEquals(new HashSet<>(asList(primary, secondary, otherSecondary, uninitiatedMember, notOkMember)), + getAll(cluster)); + } + + @Test + public void testAny() { + List any = getAny(cluster); + assertEquals(4, any.size()); + assertTrue(any.contains(primary)); + assertTrue(any.contains(secondary)); + assertTrue(any.contains(uninitiatedMember)); + assertTrue(any.contains(otherSecondary)); + } + + @Test + public void testPrimaryOrSecondary() { + assertEquals(asList(primary, secondary, otherSecondary), getAnyPrimaryOrSecondary(cluster)); + assertEquals(asList(primary, secondary), getAnyPrimaryOrSecondary(cluster, new TagSet(asList(new Tag("foo", "1"), + new Tag("bar", "2"))))); + } + + @Test + public void testHasReadableServer() { + assertTrue(cluster.hasReadableServer(ReadPreference.primary())); + assertFalse(new ClusterDescription(MULTIPLE, REPLICA_SET, asList(secondary, otherSecondary)) + .hasReadableServer(ReadPreference.primary())); + assertTrue(new ClusterDescription(MULTIPLE, REPLICA_SET, asList(secondary, otherSecondary)) + .hasReadableServer(ReadPreference.secondary())); + + } + + @Test + public void testHasWritableServer() { + assertTrue(cluster.hasWritableServer()); + assertFalse(new ClusterDescription(MULTIPLE, REPLICA_SET, asList(secondary, otherSecondary)) + .hasWritableServer()); + } + + @Test + public void testGetByServerAddress() { + assertEquals(primary, ClusterDescriptionHelper.getByServerAddress(cluster, primary.getAddress())); + assertNull(ClusterDescriptionHelper.getByServerAddress(cluster, notOkMember.getAddress())); + } + + @Test + public void testSortingOfAll() { + ClusterDescription description = + new ClusterDescription(MULTIPLE, UNKNOWN, asList( + builder() + .state(CONNECTING) + .address(new ServerAddress("loc:27019")) + .build(), + builder() + .state(CONNECTING) + .address(new ServerAddress("loc:27018")) + .build(), + builder() + .state(CONNECTING) + .address(new ServerAddress("loc:27017")) + .build()) + ); + Iterator iter = getAll(description).iterator(); + assertEquals(new ServerAddress("loc:27017"), iter.next().getAddress()); + assertEquals(new ServerAddress("loc:27018"), iter.next().getAddress()); + assertEquals(new ServerAddress("loc:27019"), iter.next().getAddress()); + } + + @Test + public void clusterDescriptionWithAnIncompatiblyNewServerShouldBeIncompatible() { + ClusterDescription description = + new ClusterDescription(MULTIPLE, UNKNOWN, asList( + builder() + .state(CONNECTING) + .address(new ServerAddress("loc:27019")) + .build(), + builder() + .state(CONNECTED) + .ok(true) + .address(new ServerAddress("loc:27018")) + .minWireVersion(MAX_DRIVER_WIRE_VERSION + 1) + .maxWireVersion(MAX_DRIVER_WIRE_VERSION + 1) + .build(), + builder() + .state(CONNECTING) + .address(new ServerAddress("loc:27017")) + .build()) + ); + assertFalse(description.isCompatibleWithDriver()); + assertEquals(new ServerAddress("loc:27018"), description.findServerIncompatiblyNewerThanDriver().getAddress()); + assertNull(description.findServerIncompatiblyOlderThanDriver()); + } + + @Test + public void clusterDescriptionWithAnIncompatiblyOlderServerShouldBeIncompatible() { + ClusterDescription description = + new ClusterDescription(MULTIPLE, UNKNOWN, asList( + builder() + .state(CONNECTING) + .address(new ServerAddress("loc:27019")) + .build(), + builder() + .state(CONNECTED) + .ok(true) + .address(new ServerAddress("loc:27018")) + .minWireVersion(0) + .maxWireVersion(MIN_DRIVER_WIRE_VERSION - 1) + .build(), + builder() + .state(CONNECTING) + .address(new ServerAddress("loc:27017")) + .build()) + ); + assertFalse(description.isCompatibleWithDriver()); + assertEquals(new ServerAddress("loc:27018"), description.findServerIncompatiblyOlderThanDriver().getAddress()); + assertNull(description.findServerIncompatiblyNewerThanDriver()); + } + + @Test + public void clusterDescriptionWithCompatibleServerShouldBeCompatible() { + ClusterDescription description = + new ClusterDescription(MULTIPLE, UNKNOWN, asList( + builder() + .state(CONNECTING) + .address(new ServerAddress("loc:27019")) + .build(), + builder() + .state(CONNECTING) + .address(new ServerAddress("loc:27018")) + .build(), + builder() + .state(CONNECTING) + .address(new ServerAddress("loc:27017")) + .build()) + ); + assertTrue(description.isCompatibleWithDriver()); + assertNull(description.findServerIncompatiblyNewerThanDriver()); + assertNull(description.findServerIncompatiblyOlderThanDriver()); + } + + @Test + public void testLogicalSessionTimeoutMinutes() { + ClusterDescription description = new ClusterDescription(MULTIPLE, REPLICA_SET, asList( + builder().state(CONNECTING) + .address(new ServerAddress("loc:27017")).build() + )); + assertNull(description.getLogicalSessionTimeoutMinutes()); + + description = new ClusterDescription(MULTIPLE, REPLICA_SET, asList( + builder().state(CONNECTED) + .address(new ServerAddress("loc:27017")) + .build() + )); + assertNull(description.getLogicalSessionTimeoutMinutes()); + + description = new ClusterDescription(MULTIPLE, REPLICA_SET, asList( + builder().state(CONNECTED) + .ok(true) + .address(new ServerAddress("loc:27017")) + .build() + )); + assertNull(description.getLogicalSessionTimeoutMinutes()); + + description = new ClusterDescription(SINGLE, STANDALONE, asList( + builder().state(CONNECTED) + .ok(true) + .address(new ServerAddress("loc:27017")) + .type(ServerType.STANDALONE) + .logicalSessionTimeoutMinutes(5) + .build() + )); + assertEquals(Integer.valueOf(5), description.getLogicalSessionTimeoutMinutes()); + + description = new ClusterDescription(SINGLE, SHARDED, asList( + builder().state(CONNECTED) + .ok(true) + .address(new ServerAddress("loc:27017")) + .type(ServerType.SHARD_ROUTER) + .logicalSessionTimeoutMinutes(5) + .build() + )); + assertEquals(Integer.valueOf(5), description.getLogicalSessionTimeoutMinutes()); + + description = new ClusterDescription(MULTIPLE, SHARDED, asList( + builder().state(CONNECTED) + .ok(true) + .address(new ServerAddress("loc:27017")) + .type(ServerType.SHARD_ROUTER) + .logicalSessionTimeoutMinutes(5) + .build(), + builder().state(CONNECTING) + .address(new ServerAddress("loc:27018")) + .build() + )); + assertEquals(Integer.valueOf(5), description.getLogicalSessionTimeoutMinutes()); + + description = new ClusterDescription(MULTIPLE, SHARDED, asList( + builder().state(CONNECTED) + .ok(true) + .address(new ServerAddress("loc:27017")) + .type(ServerType.SHARD_ROUTER) + .logicalSessionTimeoutMinutes(5) + .build(), + builder().state(CONNECTED) + .ok(true) + .address(new ServerAddress("loc:27018")) + .type(ServerType.SHARD_ROUTER) + .logicalSessionTimeoutMinutes(3) + .build() + )); + assertEquals(Integer.valueOf(3), description.getLogicalSessionTimeoutMinutes()); + + description = new ClusterDescription(MULTIPLE, REPLICA_SET, asList( + builder().state(CONNECTED) + .ok(true) + .address(new ServerAddress("loc:27017")) + .type(REPLICA_SET_PRIMARY) + .logicalSessionTimeoutMinutes(5) + .build(), + builder().state(CONNECTING) + .address(new ServerAddress("loc:27018")) + .build() + )); + assertEquals(Integer.valueOf(5), description.getLogicalSessionTimeoutMinutes()); + + description = new ClusterDescription(MULTIPLE, REPLICA_SET, asList( + builder().state(CONNECTED) + .ok(true) + .address(new ServerAddress("loc:27017")) + .type(REPLICA_SET_PRIMARY) + .logicalSessionTimeoutMinutes(5) + .build(), + builder().state(CONNECTED) + .ok(true) + .address(new ServerAddress("loc:27018")) + .type(REPLICA_SET_SECONDARY) + .logicalSessionTimeoutMinutes(3) + .build(), + builder().state(CONNECTING) + .address(new ServerAddress("loc:27019")) + .build() + )); + assertEquals(Integer.valueOf(3), description.getLogicalSessionTimeoutMinutes()); + + description = new ClusterDescription(MULTIPLE, REPLICA_SET, asList( + builder().state(CONNECTED) + .ok(true) + .address(new ServerAddress("loc:27017")) + .type(REPLICA_SET_PRIMARY) + .logicalSessionTimeoutMinutes(3) + .build(), + builder().state(CONNECTED) + .ok(true) + .address(new ServerAddress("loc:27018")) + .type(REPLICA_SET_SECONDARY) + .logicalSessionTimeoutMinutes(5) + .build(), + builder().state(CONNECTING) + .address(new ServerAddress("loc:27019")) + .build() + )); + assertEquals(Integer.valueOf(3), description.getLogicalSessionTimeoutMinutes()); + } + + @Test + public void testObjectOverrides() { + ClusterDescription description = + new ClusterDescription(MULTIPLE, UNKNOWN, asList( + builder() + .state(CONNECTING) + .address(new ServerAddress("loc:27019")) + .lastUpdateTimeNanos(42L) + .build(), + builder() + .state(CONNECTING) + .address(new ServerAddress("loc:27018")) + .lastUpdateTimeNanos(42L) + .build(), + builder() + .state(CONNECTING) + .address(new ServerAddress("loc:27017")) + .lastUpdateTimeNanos(42L) + .build()) + ); + ClusterDescription descriptionTwo = + new ClusterDescription(MULTIPLE, UNKNOWN, asList( + builder() + .state(CONNECTING) + .address(new ServerAddress("loc:27019")) + .lastUpdateTimeNanos(42L) + .build(), + builder() + .state(CONNECTING) + .address(new ServerAddress("loc:27018")) + .lastUpdateTimeNanos(42L) + .build(), + builder() + .state(CONNECTING) + .address(new ServerAddress("loc:27017")) + .lastUpdateTimeNanos(42L) + .build()) + ); + assertEquals(description, descriptionTwo); + assertEquals(description.hashCode(), descriptionTwo.hashCode()); + assertTrue(description.toString().startsWith("ClusterDescription")); + } +} diff --git a/driver-core/src/test/unit/com/mongodb/connection/ClusterIdSpecification.groovy b/driver-core/src/test/unit/com/mongodb/connection/ClusterIdSpecification.groovy new file mode 100644 index 00000000000..8a3f2e4568a --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/connection/ClusterIdSpecification.groovy @@ -0,0 +1,50 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.connection + +import spock.lang.Specification +import spock.lang.Unroll + + +class ClusterIdSpecification extends Specification { + def 'should set value to string with length 24'() { + expect: + new ClusterId().value.length() == 24 + } + + def 'different ids should have different values'() { + expect: + new ClusterId().value != new ClusterId().value + } + + @Unroll + def 'equivalent ids should be equal and have same hash code'() { + when: + def id1 = new ClusterId(id, description) + def id2 = new ClusterId(id, description) + + then: + id1 == id2 + id1.hashCode() == id2.hashCode() + + where: + id | description + 'id1' | null + 'id2' | 'my server' + } + +} diff --git a/driver-core/src/test/unit/com/mongodb/connection/ClusterSettingsSpecification.groovy b/driver-core/src/test/unit/com/mongodb/connection/ClusterSettingsSpecification.groovy new file mode 100644 index 00000000000..36da5c61e2d --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/connection/ClusterSettingsSpecification.groovy @@ -0,0 +1,529 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.connection + +import com.mongodb.ConnectionString +import com.mongodb.ServerAddress +import com.mongodb.UnixServerAddress +import com.mongodb.event.ClusterListener +import com.mongodb.internal.selector.WritableServerSelector +import spock.lang.Specification + +import java.util.concurrent.TimeUnit + +class ClusterSettingsSpecification extends Specification { + def hosts = [new ServerAddress('localhost'), new ServerAddress('localhost', 30000)] + def serverSelector = new WritableServerSelector() + + def 'should set all default values'() { + when: + def settings = ClusterSettings.builder().build() + + then: + settings.hosts == [new ServerAddress()] + settings.mode == ClusterConnectionMode.SINGLE + settings.requiredClusterType == ClusterType.UNKNOWN + settings.requiredReplicaSetName == null + settings.serverSelector == null + settings.getServerSelectionTimeout(TimeUnit.SECONDS) == 30 + settings.clusterListeners == [] + settings.srvMaxHosts == null + settings.srvServiceName == 'mongodb' + } + + def 'should set all properties'() { + when: + def listenerOne = Mock(ClusterListener) + def listenerTwo = Mock(ClusterListener) + def listenerThree = Mock(ClusterListener) + def settings = ClusterSettings.builder() + .hosts(hosts) + .mode(ClusterConnectionMode.MULTIPLE) + .requiredClusterType(ClusterType.REPLICA_SET) + .requiredReplicaSetName('foo') + .localThreshold(1, TimeUnit.SECONDS) + .serverSelector(serverSelector) + .serverSelectionTimeout(1, TimeUnit.SECONDS) + .addClusterListener(listenerOne) + .addClusterListener(listenerTwo) + .build() + + then: + settings.hosts == hosts + settings.mode == ClusterConnectionMode.MULTIPLE + settings.requiredClusterType == ClusterType.REPLICA_SET + settings.requiredReplicaSetName == 'foo' + settings.serverSelector == serverSelector + settings.getServerSelectionTimeout(TimeUnit.MILLISECONDS) == 1000 + settings.clusterListeners == [listenerOne, listenerTwo] + + when: + settings = ClusterSettings.builder(settings).clusterListenerList([listenerThree]).build() + + then: + settings.clusterListeners == [listenerThree] + } + + def 'should apply settings'() { + given: + def listenerOne = Mock(ClusterListener) + def listenerTwo = Mock(ClusterListener) + def defaultSettings = ClusterSettings.builder().build() + def customSettings = ClusterSettings.builder() + .hosts(hosts) + .mode(ClusterConnectionMode.MULTIPLE) + .requiredClusterType(ClusterType.REPLICA_SET) + .requiredReplicaSetName('foo') + .serverSelector(serverSelector) + .localThreshold(10, TimeUnit.MILLISECONDS) + .serverSelectionTimeout(1, TimeUnit.SECONDS) + .addClusterListener(listenerOne) + .addClusterListener(listenerTwo) + .build() + + expect: + ClusterSettings.builder().applySettings(customSettings).build() == customSettings + ClusterSettings.builder(customSettings).applySettings(defaultSettings).build() == defaultSettings + } + + def 'should apply settings for SRV'() { + given: + def defaultSettings = ClusterSettings.builder().build() + def customSettings = ClusterSettings.builder() + .hosts([new ServerAddress('localhost')]) + .srvMaxHosts(4) + .srvServiceName('foo') + .build() + + expect: + ClusterSettings.builder().applySettings(customSettings).build() == customSettings + ClusterSettings.builder(customSettings).applySettings(defaultSettings).build() == defaultSettings + } + + def 'when hosts contains more than one element and mode is SINGLE, should throw IllegalArgumentException'() { + when: + def builder = ClusterSettings.builder() + builder.hosts([new ServerAddress('host1'), new ServerAddress('host2')]) + builder.mode(ClusterConnectionMode.SINGLE) + builder.build() + + then: + thrown(IllegalArgumentException) + } + + def 'when hosts contains more than one element and mode is LOAD_BALANCED, should throw IllegalArgumentException'() { + when: + def builder = ClusterSettings.builder() + builder.hosts([new ServerAddress('host1'), new ServerAddress('host2')]) + builder.mode(ClusterConnectionMode.LOAD_BALANCED) + builder.build() + + then: + thrown(IllegalArgumentException) + } + + def 'when srvHost is specified and mode is SINGLE, should throw'() { + when: + ClusterSettings.builder() + .srvHost('foo.bar.com') + .mode(ClusterConnectionMode.SINGLE) + .build() + + then: + thrown(IllegalArgumentException) + } + + def 'when srvHost is specified, should set mode to MULTIPLE if mode is not configured'() { + when: + def builder = ClusterSettings.builder() + builder.srvHost('foo.bar.com') + def settings = builder.build() + + then: + settings.getSrvHost() == 'foo.bar.com' + settings.getMode() == ClusterConnectionMode.MULTIPLE + } + + def 'when srvHost is specified, should use configured mode is load balanced'() { + when: + def builder = ClusterSettings.builder() + builder.srvHost('foo.bar.com') + builder.mode(ClusterConnectionMode.LOAD_BALANCED) + def settings = builder.build() + + then: + settings.getSrvHost() == 'foo.bar.com' + settings.getMode() == ClusterConnectionMode.LOAD_BALANCED + } + + def 'when srvHost contains a colon, should throw IllegalArgumentException'() { + when: + def builder = ClusterSettings.builder() + builder.srvHost('foo.bar.com:27017') + builder.build() + + then: + thrown(IllegalArgumentException) + } + + def 'when connection string is applied to builder, all properties should be set'() { + when: + def settings = ClusterSettings.builder() + .requiredReplicaSetName("test") + .applyConnectionString(new ConnectionString('mongodb://example.com:27018')) + .build() + + then: + settings.mode == ClusterConnectionMode.SINGLE + settings.hosts == [new ServerAddress('example.com:27018')] + settings.requiredClusterType == ClusterType.UNKNOWN + settings.requiredReplicaSetName == null + settings.srvMaxHosts == null + settings.srvServiceName == 'mongodb' + + when: + settings = ClusterSettings.builder() + .applyConnectionString(new ConnectionString('mongodb://example.com:27018')) + .requiredReplicaSetName("test") + .build() + + then: + settings.mode == ClusterConnectionMode.MULTIPLE + settings.hosts == [new ServerAddress('example.com:27018')] + settings.requiredClusterType == ClusterType.REPLICA_SET + settings.requiredReplicaSetName == 'test' + settings.srvMaxHosts == null + settings.srvServiceName == 'mongodb' + + when: + settings = ClusterSettings.builder().applyConnectionString(new ConnectionString('mongodb+srv://test5.test.build.10gen.cc/')).build() + + then: + settings.mode == ClusterConnectionMode.MULTIPLE + settings.hosts == [new ServerAddress('127.0.0.1:27017')] + settings.requiredClusterType == ClusterType.REPLICA_SET + settings.requiredReplicaSetName == 'repl0' + settings.srvMaxHosts == null + settings.srvServiceName == 'mongodb' + + when: + settings = ClusterSettings.builder().applyConnectionString( + new ConnectionString('mongodb+srv://test22.test.build.10gen.cc/?srvServiceName=customname&srvMaxHosts=1')).build() + + then: + settings.mode == ClusterConnectionMode.MULTIPLE + settings.hosts == [new ServerAddress('127.0.0.1:27017')] + settings.requiredClusterType == ClusterType.UNKNOWN + settings.requiredReplicaSetName == null + settings.srvMaxHosts == 1 + settings.srvServiceName == 'customname' + + when: + settings = ClusterSettings.builder() + .mode(ClusterConnectionMode.SINGLE) + .applyConnectionString(new ConnectionString('mongodb://example.com:27018/?replicaSet=test')) + .build() + + then: + settings.mode == ClusterConnectionMode.MULTIPLE + settings.hosts == [new ServerAddress('example.com:27018')] + settings.requiredClusterType == ClusterType.REPLICA_SET + settings.requiredReplicaSetName == 'test' + settings.srvMaxHosts == null + settings.srvServiceName == 'mongodb' + + when: + settings = ClusterSettings.builder() + .applyConnectionString(new ConnectionString('mongodb://example.com:27018/?directConnection=false')) + .build() + + then: + settings.mode == ClusterConnectionMode.MULTIPLE + settings.hosts == [new ServerAddress('example.com:27018')] + settings.requiredClusterType == ClusterType.UNKNOWN + settings.requiredReplicaSetName == null + settings.srvMaxHosts == null + settings.srvServiceName == 'mongodb' + + when: + settings = ClusterSettings.builder() + .applyConnectionString(new ConnectionString('mongodb://example.com:27017,example.com:27018/?directConnection=false')) + .build() + + then: + settings.mode == ClusterConnectionMode.MULTIPLE + settings.hosts == [new ServerAddress('example.com:27017'), new ServerAddress('example.com:27018')] + settings.requiredClusterType == ClusterType.UNKNOWN + settings.requiredReplicaSetName == null + settings.srvMaxHosts == null + settings.srvServiceName == 'mongodb' + + when: + settings = ClusterSettings.builder() + .applyConnectionString(new ConnectionString('mongodb://example.com:27018/?directConnection=true')) + .build() + + then: + settings.mode == ClusterConnectionMode.SINGLE + settings.hosts == [new ServerAddress('example.com:27018')] + settings.requiredClusterType == ClusterType.UNKNOWN + settings.requiredReplicaSetName == null + settings.srvMaxHosts == null + settings.srvServiceName == 'mongodb' + + when: + settings = ClusterSettings.builder().applyConnectionString(new ConnectionString('mongodb://example.com:27018,example.com:27019')) + .build() + + then: + settings.mode == ClusterConnectionMode.MULTIPLE + settings.hosts == [new ServerAddress('example.com:27018'), new ServerAddress('example.com:27019')] + settings.requiredClusterType == ClusterType.UNKNOWN + settings.requiredReplicaSetName == null + settings.srvMaxHosts == null + settings.srvServiceName == 'mongodb' + + when: + settings = ClusterSettings.builder().applyConnectionString(new ConnectionString('mongodb://example.com:27018/?' + + 'serverSelectionTimeoutMS=50000')) + .build() + + then: + settings.getServerSelectionTimeout(TimeUnit.MILLISECONDS) == 50000 + + when: + settings = ClusterSettings.builder().applyConnectionString(new ConnectionString('mongodb://localhost/?localThresholdMS=99')).build() + + then: + settings.getLocalThreshold(TimeUnit.MILLISECONDS) == 99 + + when: + settings = ClusterSettings.builder() + .applyConnectionString(new ConnectionString('mongodb://example.com:27018/?loadBalanced=true')).build() + + then: + settings.mode == ClusterConnectionMode.LOAD_BALANCED + settings.hosts == [new ServerAddress('example.com:27018')] + } + + def 'when cluster type is UNKNOWN and replica set name is set, should set cluster type to REPLICA_SET and mode to MULTIPLE'() { + when: + def settings = ClusterSettings.builder().hosts([new ServerAddress()]).requiredReplicaSetName('yeah').build() + + then: + ClusterType.REPLICA_SET == settings.requiredClusterType + ClusterConnectionMode.MULTIPLE == settings.mode + } + + def 'connection mode should default to SINGLE if replica set name is not set and one host, or MULTIPLE if more'() { + when: + def settings = ClusterSettings.builder().hosts([new ServerAddress()]).build() + + then: + settings.mode == ClusterConnectionMode.SINGLE + + when: + settings = ClusterSettings.builder().hosts(hosts).build() + + then: + settings.mode == ClusterConnectionMode.MULTIPLE + } + + def 'when a valid mode is specified, should use it'() { + when: + def mode = ClusterConnectionMode.LOAD_BALANCED + def settings = ClusterSettings.builder().mode(mode).build() + + then: + settings.mode == mode + } + + def 'when mode is Single and hosts size is greater than one, should throw'() { + when: + ClusterSettings.builder().hosts([new ServerAddress(), new ServerAddress('other')]).mode(ClusterConnectionMode.SINGLE).build() + then: + thrown(IllegalArgumentException) + + when: + ClusterSettings.builder() + .applyConnectionString(new ConnectionString("mongodb://host1,host2/")) + .mode(ClusterConnectionMode.SINGLE) + .build() + then: + thrown(IllegalArgumentException) + } + + def 'when cluster type is Standalone and multiple hosts are specified, should throw'() { + when: + ClusterSettings.builder().hosts([new ServerAddress(), new ServerAddress('other')]).requiredClusterType(ClusterType.STANDALONE) + .build() + then: + thrown(IllegalArgumentException) + } + + def 'when a replica set name is specified and type is Standalone, should throw'() { + when: + ClusterSettings.builder().hosts([new ServerAddress(), new ServerAddress('other')]).requiredReplicaSetName('foo') + .requiredClusterType(ClusterType.STANDALONE).build() + then: + thrown(IllegalArgumentException) + } + + def 'when a replica set name is specified and type is Sharded, should throw'() { + when: + ClusterSettings.builder().hosts([new ServerAddress(), new ServerAddress('other')]).requiredReplicaSetName('foo') + .requiredClusterType(ClusterType.SHARDED).build() + then: + thrown(IllegalArgumentException) + } + + + def 'should throws if hosts list is null'() { + when: + ClusterSettings.builder().hosts(null).build() + + then: + thrown(IllegalArgumentException) + } + + def 'should throws if hosts list is empty'() { + when: + ClusterSettings.builder().hosts([]).build() + + then: + thrown(IllegalArgumentException) + } + + def 'should throws if hosts list contains null value'() { + when: + ClusterSettings.builder().hosts([null]).build() + + then: + thrown(IllegalArgumentException) + } + + def 'should remove duplicate hosts'() { + when: + def settings = ClusterSettings.builder().hosts([new ServerAddress('server1'), + new ServerAddress('server2'), + new ServerAddress('server1')]).build() + + then: + settings.getHosts() == [new ServerAddress('server1'), new ServerAddress('server2')] + } + + def 'identical settings should be equal'() { + expect: + ClusterSettings.builder().hosts(hosts).build() == ClusterSettings.builder().hosts(hosts).build() + ClusterSettings.builder() + .hosts(hosts) + .mode(ClusterConnectionMode.MULTIPLE) + .requiredClusterType(ClusterType.REPLICA_SET) + .requiredReplicaSetName('foo') + .serverSelector(serverSelector) + .serverSelectionTimeout(1, TimeUnit.SECONDS) + .build() == + ClusterSettings.builder() + .hosts(hosts) + .mode(ClusterConnectionMode.MULTIPLE) + .requiredClusterType(ClusterType.REPLICA_SET) + .requiredReplicaSetName('foo') + .serverSelector(serverSelector) + .serverSelectionTimeout(1, TimeUnit.SECONDS) + .build() + } + + def 'different settings should not be equal'() { + expect: + ClusterSettings.builder() + .hosts(hosts) + .mode(ClusterConnectionMode.MULTIPLE) + .requiredClusterType(ClusterType.REPLICA_SET) + .requiredReplicaSetName('foo') + .serverSelector(serverSelector) + .serverSelectionTimeout(1, TimeUnit.SECONDS) + .build() != ClusterSettings.builder().hosts(hosts).build() + } + + def 'identical settings should have same hash code'() { + expect: + ClusterSettings.builder().hosts(hosts).build().hashCode() == ClusterSettings.builder().hosts(hosts).build().hashCode() + ClusterSettings.builder() + .hosts(hosts) + .mode(ClusterConnectionMode.MULTIPLE) + .requiredClusterType(ClusterType.REPLICA_SET) + .requiredReplicaSetName('foo') + .serverSelector(serverSelector) + .serverSelectionTimeout(1, TimeUnit.SECONDS) + .build().hashCode() == + ClusterSettings.builder() + .hosts(hosts) + .mode(ClusterConnectionMode.MULTIPLE) + .requiredClusterType(ClusterType.REPLICA_SET) + .requiredReplicaSetName('foo') + .serverSelector(serverSelector) + .serverSelectionTimeout(1, TimeUnit.SECONDS) + .build().hashCode() + } + + def 'different settings should have different hash codes'() { + expect: + ClusterSettings.builder() + .hosts(hosts) + .mode(ClusterConnectionMode.MULTIPLE) + .requiredClusterType(ClusterType.REPLICA_SET) + .requiredReplicaSetName('foo') + .serverSelector(serverSelector) + .serverSelectionTimeout(1, TimeUnit.SECONDS) + .build().hashCode() != ClusterSettings.builder().hosts(hosts).build().hashCode() + } + + def 'should replace unknown ServerAddress subclass instances with ServerAddress'() { + when: + def settings = ClusterSettings.builder().hosts([new ServerAddress('server1'), + new ServerAddressSubclass('server2'), + new UnixServerAddress('mongodb.sock')]).build() + + then: + settings.getHosts() == [new ServerAddress('server1'), new ServerAddress('server2'), new UnixServerAddress('mongodb.sock')] + } + + def 'list of cluster listeners should be unmodifiable'() { + given: + def settings = ClusterSettings.builder().hosts(hosts).build() + + when: + settings.clusterListeners.add(Mock(ClusterListener)) + + then: + thrown(UnsupportedOperationException) + } + + def 'cluster listener should not be null'() { + when: + ClusterSettings.builder().addClusterListener(null) + + then: + thrown(IllegalArgumentException) + } + + static class ServerAddressSubclass extends ServerAddress { + ServerAddressSubclass(final String host) { + super(host) + } + } +} diff --git a/driver-core/src/test/unit/com/mongodb/connection/ConnectionDescriptionSpecification.groovy b/driver-core/src/test/unit/com/mongodb/connection/ConnectionDescriptionSpecification.groovy new file mode 100644 index 00000000000..d116dc5be4c --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/connection/ConnectionDescriptionSpecification.groovy @@ -0,0 +1,80 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.connection + +import com.mongodb.ServerAddress +import org.bson.BsonArray +import org.bson.BsonString +import org.bson.types.ObjectId +import spock.lang.Specification + +class ConnectionDescriptionSpecification extends Specification { + private final serverId = new ObjectId() + private final id = new ConnectionId(new ServerId(new ClusterId(), new ServerAddress())) + private final saslSupportedMechanisms = new BsonArray([new BsonString('SCRAM-SHA-256')]) + private final description = new ConnectionDescription(serverId, id, 5, ServerType.STANDALONE, 1, 2, 3, + ['zlib'], saslSupportedMechanisms) + + def 'should initialize all values'() { + expect: + description.getServiceId() == serverId + description.connectionId == id + description.maxWireVersion == 5 + description.serverType == ServerType.STANDALONE + description.maxBatchCount == 1 + description.maxDocumentSize == 2 + description.maxMessageSize == 3 + description.compressors == ['zlib'] + description.saslSupportedMechanisms == saslSupportedMechanisms + } + + def 'withConnectionId should return a new instance with the given connectionId and preserve the rest'() { + given: + def newId = id.withServerValue(123) + def newDescription = description.withConnectionId(newId) + + expect: + !newDescription.is(description) + newDescription.serviceId == serverId + newDescription.connectionId == newId + newDescription.maxWireVersion == 5 + newDescription.serverType == ServerType.STANDALONE + newDescription.maxBatchCount == 1 + newDescription.maxDocumentSize == 2 + newDescription.maxMessageSize == 3 + newDescription.compressors == ['zlib'] + description.saslSupportedMechanisms == saslSupportedMechanisms + } + + def 'withServerId should return a new instance with the given serverId and preserve the rest'() { + given: + def newServerId = new ObjectId() + def newDescription = description.withServiceId(newServerId) + + expect: + !newDescription.is(description) + newDescription.serviceId == newServerId + newDescription.connectionId == id + newDescription.maxWireVersion == 5 + newDescription.serverType == ServerType.STANDALONE + newDescription.maxBatchCount == 1 + newDescription.maxDocumentSize == 2 + newDescription.maxMessageSize == 3 + newDescription.compressors == ['zlib'] + description.saslSupportedMechanisms == saslSupportedMechanisms + } +} diff --git a/driver-core/src/test/unit/com/mongodb/connection/ConnectionIdSpecification.groovy b/driver-core/src/test/unit/com/mongodb/connection/ConnectionIdSpecification.groovy new file mode 100644 index 00000000000..4c821d591b1 --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/connection/ConnectionIdSpecification.groovy @@ -0,0 +1,80 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.connection + +import com.mongodb.ServerAddress +import spock.lang.Specification + +class ConnectionIdSpecification extends Specification { + + def serverId = new ServerId(new ClusterId(), new ServerAddress('host1')) + + def 'should set all properties'() { + given: + def id1 = new ConnectionId(serverId) + def id2 = new ConnectionId(serverId, Long.MAX_VALUE - 1, Long.MAX_VALUE) + + expect: + id1.serverId == serverId + id1.localValue > 0 + !id1.serverValue + + id2.serverId == serverId + id2.localValue == Long.MAX_VALUE - 1 + id2.serverValue == Long.MAX_VALUE + } + + def 'should increment local value'() { + given: + def id1 = new ConnectionId(serverId) + def id2 = new ConnectionId(serverId) + + expect: + id2.localValue == id1.localValue + 1 + } + + + def 'withServerValue should return a new instance with the given server value and preserve the rest'() { + def id = new ConnectionId(serverId) + + expect: + !id.withServerValue(124).is(id) + id.withServerValue(123).serverValue == 123 + id.withServerValue(123).localValue == id.localValue + id.withServerValue(123).serverId == serverId + } + + def 'equivalent ids should be equal and have same hash code'() { + given: + def id1 = new ConnectionId(serverId, 100, 42) + def id2 = new ConnectionId(serverId, 100, 42) + + expect: + id2 == id1 + id2.hashCode() == id1.hashCode() + } + + def 'different ids should be equal and have same hash code'() { + given: + def id1 = new ConnectionId(serverId, 100, 43) + def id2 = new ConnectionId(serverId, 100, 42) + + expect: + id2 != id1 + id2.hashCode() != id1.hashCode() + } +} diff --git a/driver-core/src/test/unit/com/mongodb/connection/ConnectionPoolSettingsSpecification.groovy b/driver-core/src/test/unit/com/mongodb/connection/ConnectionPoolSettingsSpecification.groovy new file mode 100644 index 00000000000..b08aaa7c9e6 --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/connection/ConnectionPoolSettingsSpecification.groovy @@ -0,0 +1,248 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +package com.mongodb.connection + +import com.mongodb.ConnectionString +import com.mongodb.event.ConnectionPoolListener +import spock.lang.Specification +import spock.lang.Unroll + +import static java.util.concurrent.TimeUnit.MILLISECONDS +import static java.util.concurrent.TimeUnit.SECONDS + +class ConnectionPoolSettingsSpecification extends Specification { + @Unroll + def 'should set up connection provider settings #settings correctly'() { + expect: + settings.getMaxWaitTime(MILLISECONDS) == maxWaitTime + settings.maxSize == maxSize + settings.getMaxConnectionLifeTime(MILLISECONDS) == maxConnectionLifeTimeMS + settings.getMaxConnectionIdleTime(MILLISECONDS) == maxConnectionIdleTimeMS + settings.minSize == minSize + settings.getMaintenanceInitialDelay(MILLISECONDS) == maintenanceInitialDelayMS + settings.getMaintenanceFrequency(MILLISECONDS) == maintenanceFrequencyMS + settings.getMaxConnecting() == maxConnecting + + where: + settings | maxWaitTime | maxSize | maxConnectionLifeTimeMS | + maxConnectionIdleTimeMS | minSize | maintenanceInitialDelayMS | maintenanceFrequencyMS | maxConnecting + ConnectionPoolSettings + .builder() + .build() | 120000L | 100 | 0 | 0 | 0 | 0 | 60000 | 2 + ConnectionPoolSettings + .builder() + .maxWaitTime(5, SECONDS) + .maxSize(75) + .maxConnectionLifeTime( + 101, SECONDS) + .maxConnectionIdleTime( + 51, SECONDS) + .minSize(1) + .maintenanceInitialDelay( + 5, SECONDS) + .maintenanceFrequency( + 1000, SECONDS) + .maxConnecting(1) + .build() | 5000 | 75 | 101000 | 51000 | 1 | 5000 | 1000000 | 1 + ConnectionPoolSettings + .builder(ConnectionPoolSettings.builder() + .maxWaitTime(5, SECONDS) + .maxSize(75) + .maxConnectionLifeTime(101, SECONDS) + .maxConnectionIdleTime(51, SECONDS) + .minSize(1) + .maintenanceInitialDelay(5, SECONDS) + .maintenanceFrequency(1000, SECONDS) + .maxConnecting(2) + .build()) + .build() | 5000 | 75 | 101000 | 51000 | 1 | 5000 | 1000000 | 2 + ConnectionPoolSettings + .builder(ConnectionPoolSettings.builder().build()) + .maxWaitTime(5, SECONDS) + .maxSize(75) + .maxConnectionLifeTime(101, SECONDS) + .maxConnectionIdleTime(51, SECONDS) + .minSize(1) + .maintenanceInitialDelay(5, SECONDS) + .maintenanceFrequency(1000, SECONDS) + .maxConnecting(1000) + .build() | 5000 | 75 | 101000 | 51000 | 1 | 5000 | 1000000 | 1000 + } + + def 'should throw exception on invalid argument'() { + when: + ConnectionPoolSettings.builder().maxSize(1).maxConnectionLifeTime(-1, SECONDS).build() + + then: + thrown(IllegalStateException) + + when: + ConnectionPoolSettings.builder().maxSize(1).maxConnectionIdleTime(-1, SECONDS).build() + + then: + thrown(IllegalStateException) + + when: + ConnectionPoolSettings.builder().maxSize(1).minSize(2).build() + + then: + thrown(IllegalStateException) + + when: + ConnectionPoolSettings.builder().maxSize(-1).build() + + then: + thrown(IllegalStateException) + + when: + ConnectionPoolSettings.builder().maintenanceInitialDelay(-1, MILLISECONDS).build() + + then: + thrown(IllegalStateException) + + when: + ConnectionPoolSettings.builder().maintenanceFrequency(0, MILLISECONDS).build() + + then: + thrown(IllegalStateException) + + when: + ConnectionPoolSettings.builder().maxConnecting(0).build() + + then: + thrown(IllegalStateException) + + when: + ConnectionPoolSettings.builder().maxConnecting(-1).build() + + then: + thrown(IllegalStateException) + } + + def 'settings with same values should be equal'() { + when: + def settings1 = ConnectionPoolSettings.builder().maxSize(1).build() + def settings2 = ConnectionPoolSettings.builder().maxSize(1).build() + + then: + settings1 == settings2 + } + + def 'settings with same values should have the same hash code'() { + when: + def settings1 = ConnectionPoolSettings.builder().maxSize(1).build() + def settings2 = ConnectionPoolSettings.builder().maxSize(1).build() + + then: + settings1.hashCode() == settings2.hashCode() + } + + def 'should apply connection string'() { + when: + def settings = ConnectionPoolSettings.builder().applyConnectionString( + new ConnectionString('mongodb://localhost/?waitQueueTimeoutMS=100&minPoolSize=5&maxPoolSize=10&' + + 'maxIdleTimeMS=200&maxLifeTimeMS=300&maxConnecting=1')) + .build() + + then: + settings.getMaxWaitTime(MILLISECONDS) == 100 + settings.getMaxSize() == 10 + settings.getMinSize() == 5 + settings.getMaxConnectionIdleTime(MILLISECONDS) == 200 + settings.getMaxConnectionLifeTime(MILLISECONDS) == 300 + settings.getMaxConnecting() == 1 + } + + def 'should apply settings'() { + given: + def connectionPoolListener = Mock(ConnectionPoolListener) + def defaultSettings = ConnectionPoolSettings.builder().build() + def customSettings = ConnectionPoolSettings + .builder() + .addConnectionPoolListener(Stub(ConnectionPoolListener)) + .maxWaitTime(5, SECONDS) + .maxSize(75) + .maxConnectionLifeTime(101, SECONDS) + .maxConnectionIdleTime(51, SECONDS) + .minSize(1) + .maintenanceInitialDelay(5, SECONDS) + .maintenanceFrequency(1000, SECONDS) + .maxConnecting(1) + .build() + + expect: + ConnectionPoolSettings.builder().applySettings(customSettings).build() == customSettings + ConnectionPoolSettings.builder(customSettings).applySettings(defaultSettings).build() == defaultSettings + + when: + customSettings = ConnectionPoolSettings.builder(customSettings).connectionPoolListenerList([connectionPoolListener]).build() + + then: + customSettings.connectionPoolListeners == [connectionPoolListener] + } + + def 'toString should be overridden'() { + when: + def settings = ConnectionPoolSettings.builder().maxSize(1).build() + + then: + settings.toString().startsWith('ConnectionPoolSettings') + } + + def 'identical settings should be equal'() { + expect: + ConnectionPoolSettings.builder().build() == ConnectionPoolSettings.builder().build() + ConnectionPoolSettings.builder().maxWaitTime(5, SECONDS).maxSize(75).maxConnectionLifeTime(101, SECONDS). + maxConnectionIdleTime(51, SECONDS).minSize(1).maintenanceInitialDelay(5, SECONDS).maintenanceFrequency(1000, SECONDS) + .maxConnecting(1) + .build() == + ConnectionPoolSettings.builder().maxWaitTime(5, SECONDS).maxSize(75).maxConnectionLifeTime(101, SECONDS). + maxConnectionIdleTime(51, SECONDS).minSize(1).maintenanceInitialDelay(5, SECONDS).maintenanceFrequency(1000, SECONDS) + .maxConnecting(1) + .build() + } + + def 'different settings should not be equal'() { + expect: + ConnectionPoolSettings.builder().maxWaitTime(5, SECONDS).build() != ConnectionPoolSettings.builder().maxWaitTime(2, SECONDS).build() + } + + def 'identical settings should have same hash code'() { + expect: + ConnectionPoolSettings.builder().build().hashCode() == ConnectionPoolSettings.builder().build().hashCode() + ConnectionPoolSettings.builder().maxWaitTime(5, SECONDS).maxSize(75).maxConnectionLifeTime(101, SECONDS). + maxConnectionIdleTime(51, SECONDS).minSize(1).maintenanceInitialDelay(5, SECONDS).maintenanceFrequency(1000, SECONDS) + .maxConnecting(1) + .build().hashCode() == + ConnectionPoolSettings.builder().maxWaitTime(5, SECONDS).maxSize(75).maxConnectionLifeTime(101, SECONDS). + maxConnectionIdleTime(51, SECONDS).minSize(1).maintenanceInitialDelay(5, SECONDS).maintenanceFrequency(1000, SECONDS) + .maxConnecting(1) + .build().hashCode() + } + + def 'different settings should have different hash codes'() { + expect: + ConnectionPoolSettings.builder().maxWaitTime(5, SECONDS).build().hashCode() != + ConnectionPoolSettings.builder().maxWaitTime(3, SECONDS).build().hashCode() + } + + def 'should allow 0 (infinite) maxSize'() { + expect: + ConnectionPoolSettings.builder().maxSize(0).build().getMaxSize() == 0 + } +} diff --git a/driver-core/src/test/unit/com/mongodb/connection/NettyTransportSettingsTest.java b/driver-core/src/test/unit/com/mongodb/connection/NettyTransportSettingsTest.java new file mode 100644 index 00000000000..2a0fd590e1d --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/connection/NettyTransportSettingsTest.java @@ -0,0 +1,59 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.connection; + +import io.netty.buffer.UnpooledByteBufAllocator; +import io.netty.channel.DefaultEventLoopGroup; +import io.netty.channel.EventLoopGroup; +import io.netty.channel.socket.nio.NioSocketChannel; +import io.netty.handler.ssl.SslContext; +import io.netty.handler.ssl.SslContextBuilder; +import org.junit.jupiter.api.Test; + +import javax.net.ssl.SSLException; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNull; + +class NettyTransportSettingsTest { + @Test + public void shouldDefaultAllValuesToNull() { + NettyTransportSettings settings = TransportSettings.nettyBuilder().build(); + + assertNull(settings.getAllocator()); + assertNull(settings.getEventLoopGroup()); + assertNull(settings.getSslContext()); + assertNull(settings.getSocketChannelClass()); + } + + @Test + public void shouldApplySettingsFromBuilder() throws SSLException { + EventLoopGroup eventLoopGroup = new DefaultEventLoopGroup(); + SslContext sslContext = SslContextBuilder.forClient().build(); + NettyTransportSettings settings = TransportSettings.nettyBuilder() + .allocator(UnpooledByteBufAllocator.DEFAULT) + .socketChannelClass(NioSocketChannel.class) + .eventLoopGroup(eventLoopGroup) + .sslContext(sslContext) + .build(); + + assertEquals(UnpooledByteBufAllocator.DEFAULT, settings.getAllocator()); + assertEquals(NioSocketChannel.class, settings.getSocketChannelClass()); + assertEquals(eventLoopGroup, settings.getEventLoopGroup()); + assertEquals(sslContext, settings.getSslContext()); + } +} diff --git a/driver-core/src/test/unit/com/mongodb/connection/ServerAddressHelperTest.java b/driver-core/src/test/unit/com/mongodb/connection/ServerAddressHelperTest.java new file mode 100644 index 00000000000..51e5996eb6f --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/connection/ServerAddressHelperTest.java @@ -0,0 +1,43 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.connection; + +import com.mongodb.MongoClientSettings; +import com.mongodb.internal.connection.DefaultInetAddressResolver; +import com.mongodb.internal.connection.ServerAddressHelper; +import com.mongodb.spi.dns.InetAddressResolver; +import org.junit.jupiter.api.Test; + +import static org.junit.jupiter.api.Assertions.assertAll; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertSame; + +final class ServerAddressHelperTest { + @Test + void getInetAddressResolver() { + assertAll( + () -> assertEquals( + DefaultInetAddressResolver.class, + ServerAddressHelper.getInetAddressResolver(MongoClientSettings.builder().build()).getClass()), + () -> { + InetAddressResolver resolver = new DefaultInetAddressResolver(); + assertSame( + resolver, + ServerAddressHelper.getInetAddressResolver(MongoClientSettings.builder().inetAddressResolver(resolver).build())); + } + ); + } +} diff --git a/driver-core/src/test/unit/com/mongodb/connection/ServerDescriptionTest.java b/driver-core/src/test/unit/com/mongodb/connection/ServerDescriptionTest.java new file mode 100644 index 00000000000..36e25cb534c --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/connection/ServerDescriptionTest.java @@ -0,0 +1,528 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.connection; + +import com.mongodb.ServerAddress; +import com.mongodb.Tag; +import com.mongodb.TagSet; +import com.mongodb.internal.connection.Time; +import org.bson.types.ObjectId; +import org.junit.Test; + +import java.io.IOException; +import java.util.Collections; +import java.util.Date; +import java.util.HashSet; +import java.util.concurrent.TimeUnit; + +import static com.mongodb.connection.ServerConnectionState.CONNECTED; +import static com.mongodb.connection.ServerConnectionState.CONNECTING; +import static com.mongodb.connection.ServerDescription.MAX_DRIVER_WIRE_VERSION; +import static com.mongodb.connection.ServerDescription.MIN_DRIVER_WIRE_VERSION; +import static com.mongodb.connection.ServerDescription.builder; +import static com.mongodb.connection.ServerType.LOAD_BALANCER; +import static com.mongodb.connection.ServerType.REPLICA_SET_PRIMARY; +import static com.mongodb.connection.ServerType.UNKNOWN; +import static java.util.Arrays.asList; +import static java.util.Collections.singletonList; +import static org.hamcrest.CoreMatchers.not; +import static org.hamcrest.MatcherAssert.assertThat; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotEquals; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; + +public class ServerDescriptionTest { + + @Test(expected = IllegalArgumentException.class) + public void testMissingStatus() { + builder().address(new ServerAddress()).type(REPLICA_SET_PRIMARY).build(); + + } + + @Test(expected = IllegalArgumentException.class) + public void testMissingAddress() { + builder().state(CONNECTED).type(REPLICA_SET_PRIMARY).build(); + } + + @Test + public void testDefaults() { + long currentNanoTime = Time.nanoTime(); + ServerDescription serverDescription = builder().address(new ServerAddress()) + .state(CONNECTED) + .build(); + + assertEquals(new ServerAddress(), serverDescription.getAddress()); + assertFalse(serverDescription.isOk()); + assertEquals(CONNECTED, serverDescription.getState()); + assertEquals(UNKNOWN, serverDescription.getType()); + + assertFalse(serverDescription.isReplicaSetMember()); + assertFalse(serverDescription.isShardRouter()); + assertFalse(serverDescription.isStandAlone()); + + assertFalse(serverDescription.isPrimary()); + assertFalse(serverDescription.isSecondary()); + + assertEquals(0F, serverDescription.getRoundTripTimeNanos(), 0L); + assertEquals(0F, serverDescription.getMinRoundTripTimeNanos(), 0L); + + assertEquals(0x1000000, serverDescription.getMaxDocumentSize()); + + assertNull(serverDescription.getPrimary()); + assertEquals(Collections.emptySet(), serverDescription.getHosts()); + assertEquals(new TagSet(), serverDescription.getTagSet()); + assertNull(serverDescription.getCanonicalAddress()); + assertEquals(Collections.emptySet(), serverDescription.getHosts()); + assertEquals(Collections.emptySet(), serverDescription.getPassives()); + assertNull(serverDescription.getSetName()); + assertEquals(0, serverDescription.getMinWireVersion()); + assertEquals(0, serverDescription.getMaxWireVersion()); + assertFalse(serverDescription.isCryptd()); + assertNull(serverDescription.getElectionId()); + assertNull(serverDescription.getSetVersion()); + assertNull(serverDescription.getTopologyVersion()); + assertNull(serverDescription.getLastWriteDate()); + assertTrue(serverDescription.getLastUpdateTime(TimeUnit.NANOSECONDS) >= currentNanoTime); + assertNull(serverDescription.getLogicalSessionTimeoutMinutes()); + assertNull(serverDescription.getException()); + } + + @Test + public void testBuilder() { + IllegalArgumentException exception = new IllegalArgumentException(); + TopologyVersion topologyVersion = new TopologyVersion(new ObjectId(), 42); + ServerDescription serverDescription = builder() + .address(new ServerAddress("localhost:27018")) + .type(REPLICA_SET_PRIMARY) + .tagSet(new TagSet(new Tag("dc", "ny"))) + .setName("test") + .maxDocumentSize(100) + .roundTripTime(50000, java.util.concurrent.TimeUnit.NANOSECONDS) + .minRoundTripTime(10000, java.util.concurrent.TimeUnit.NANOSECONDS) + .primary("localhost:27017") + .canonicalAddress("localhost:27018") + .hosts(new HashSet<>(asList("localhost:27017", + "localhost:27018", + "localhost:27019", + "localhost:27020"))) + .arbiters(new HashSet<>(singletonList("localhost:27019"))) + .passives(new HashSet<>(singletonList("localhost:27020"))) + .ok(true) + .state(CONNECTED) + .minWireVersion(1) + .maxWireVersion(2) + .electionId(new ObjectId("123412341234123412341234")) + .setVersion(2) + .topologyVersion(topologyVersion) + .lastWriteDate(new Date(1234L)) + .lastUpdateTimeNanos(40000L) + .logicalSessionTimeoutMinutes(30) + .exception(exception) + .cryptd(true) + .build(); + + + assertEquals(new ServerAddress("localhost:27018"), serverDescription.getAddress()); + assertTrue(serverDescription.isOk()); + assertEquals(CONNECTED, serverDescription.getState()); + assertEquals(REPLICA_SET_PRIMARY, serverDescription.getType()); + + assertTrue(serverDescription.isReplicaSetMember()); + assertFalse(serverDescription.isShardRouter()); + assertFalse(serverDescription.isStandAlone()); + + assertTrue(serverDescription.isPrimary()); + assertFalse(serverDescription.isSecondary()); + + assertEquals(50000, serverDescription.getRoundTripTimeNanos(), 0L); + assertEquals(10000, serverDescription.getMinRoundTripTimeNanos(), 0L); + + assertEquals(100, serverDescription.getMaxDocumentSize()); + + assertEquals("localhost:27017", serverDescription.getPrimary()); + assertEquals("localhost:27018", serverDescription.getCanonicalAddress()); + assertEquals(new HashSet<>(asList("localhost:27017", "localhost:27018", "localhost:27019", "localhost:27020")), + serverDescription.getHosts()); + assertEquals(new TagSet(new Tag("dc", "ny")), serverDescription.getTagSet()); + assertEquals(new HashSet<>(singletonList("localhost:27019")), serverDescription.getArbiters()); + assertEquals(new HashSet<>(singletonList("localhost:27020")), serverDescription.getPassives()); + assertEquals("test", serverDescription.getSetName()); + assertEquals(1, serverDescription.getMinWireVersion()); + assertEquals(2, serverDescription.getMaxWireVersion()); + assertEquals(new ObjectId("123412341234123412341234"), serverDescription.getElectionId()); + assertEquals(Integer.valueOf(2), serverDescription.getSetVersion()); + assertEquals(topologyVersion, serverDescription.getTopologyVersion()); + assertEquals(new Date(1234), serverDescription.getLastWriteDate()); + assertEquals(40000L, serverDescription.getLastUpdateTime(TimeUnit.NANOSECONDS)); + assertEquals((Integer) 30, serverDescription.getLogicalSessionTimeoutMinutes()); + assertEquals(exception, serverDescription.getException()); + assertEquals(serverDescription, builder(serverDescription).build()); + assertTrue(serverDescription.isCryptd()); + } + + @Test + public void testObjectOverrides() { + ServerDescription.Builder builder = createBuilder(); + ServerDescription description = builder.build(); + + assertEquals(description.hashCode(), builder.build().hashCode()); + assertTrue(description.toString().startsWith("ServerDescription")); + + assertEquals(description, description); + + assertNotEquals(description, null); + assertNotEquals(description, "not a ServerDescription instance"); + assertEquals(description, builder.build()); + + ServerDescription otherDescription = createBuilder().address(new ServerAddress("localhost:27018")).build(); + assertNotEquals(builder.build(), otherDescription); + + otherDescription = createBuilder().type(ServerType.STANDALONE).build(); + assertNotEquals(builder.build(), otherDescription); + + otherDescription = createBuilder().tagSet(null).build(); + assertNotEquals(builder.build(), otherDescription); + + otherDescription = createBuilder().setName("test2").build(); + assertNotEquals(builder.build(), otherDescription); + + otherDescription = createBuilder().maxDocumentSize(200).build(); + assertNotEquals(builder.build(), otherDescription); + + otherDescription = createBuilder().primary("localhost:27018").build(); + assertNotEquals(builder.build(), otherDescription); + + otherDescription = createBuilder().canonicalAddress("localhost:27018").build(); + assertNotEquals(builder.build(), otherDescription); + + otherDescription = createBuilder().hosts(new HashSet<>(singletonList("localhost:27018"))).build(); + assertNotEquals(builder.build(), otherDescription); + + otherDescription = createBuilder().arbiters(new HashSet<>(singletonList("localhost:27018"))).build(); + assertNotEquals(builder.build(), otherDescription); + + otherDescription = createBuilder().passives(new HashSet<>(singletonList("localhost:27018"))).build(); + assertNotEquals(builder.build(), otherDescription); + + otherDescription = createBuilder().ok(false).build(); + assertNotEquals(builder.build(), otherDescription); + + otherDescription = createBuilder().state(CONNECTING).build(); + assertNotEquals(builder.build(), otherDescription); + + otherDescription = createBuilder().minWireVersion(2).build(); + assertNotEquals(builder.build(), otherDescription); + + otherDescription = createBuilder().maxWireVersion(5).build(); + assertNotEquals(builder.build(), otherDescription); + + otherDescription = createBuilder().electionId(new ObjectId()).build(); + assertNotEquals(builder.build(), otherDescription); + + otherDescription = createBuilder().setVersion(3).build(); + assertNotEquals(builder.build(), otherDescription); + + otherDescription = createBuilder().topologyVersion(new TopologyVersion(new ObjectId(), 44)).build(); + assertNotEquals(builder.build(), otherDescription); + + otherDescription = createBuilder().cryptd(true).build(); + assertNotEquals(builder.build(), otherDescription); + + // test exception state changes + assertNotEquals(createBuilder().exception(new IOException()).build(), + createBuilder().exception(new RuntimeException()).build()); + assertNotEquals(createBuilder().exception(new IOException("message one")).build(), + createBuilder().exception(new IOException("message two")).build()); + + // different lastUpdateTime and lastWriteDate are considered not equal but equivalent state + otherDescription = createBuilder().lastUpdateTimeNanos(Long.MAX_VALUE).build(); + assertNotEquals(builder.build(), otherDescription); + + otherDescription = createBuilder().lastWriteDate(new Date()).build(); + assertNotEquals(builder.build(), otherDescription); + + otherDescription = createBuilder().logicalSessionTimeoutMinutes(32).build(); + assertNotEquals(builder.build(), otherDescription); + + // roundTripTime is considered equals and equivalent state + otherDescription = createBuilder().roundTripTime(62, TimeUnit.MILLISECONDS).build(); + assertEquals(builder.build(), otherDescription); + } + + private ServerDescription.Builder createBuilder() { + return builder().address(new ServerAddress()) + .type(ServerType.SHARD_ROUTER) + .tagSet(new TagSet(singletonList(new Tag("dc", "ny")))) + .setName("test") + .maxDocumentSize(100) + .roundTripTime(50000, TimeUnit.NANOSECONDS) + .primary("localhost:27017") + .canonicalAddress("localhost:27017") + .hosts(new HashSet<>(asList("localhost:27017", "localhost:27018"))) + .passives(new HashSet<>(singletonList("localhost:27019"))) + .arbiters(new HashSet<>(singletonList("localhost:27020"))) + .ok(true) + .state(CONNECTED) + .minWireVersion(1) + .lastWriteDate(new Date()) + .maxWireVersion(2) + .electionId(new ObjectId("abcdabcdabcdabcdabcdabcd")) + .setVersion(2) + .topologyVersion(new TopologyVersion(new ObjectId("5e47699e32e4571020a96f07"), 42)) + .lastUpdateTimeNanos(1) + .lastWriteDate(new Date(42)) + .logicalSessionTimeoutMinutes(25) + .roundTripTime(56, TimeUnit.MILLISECONDS); + } + + @Test + public void testObjectOverridesWithUnequalException() { + ServerDescription.Builder builder1 = builder() + .state(CONNECTING) + .address(new ServerAddress()) + .exception(new IllegalArgumentException("This is illegal")); + ServerDescription.Builder builder2 = builder() + .state(CONNECTING) + .address(new ServerAddress()) + .exception(new IllegalArgumentException("This is also illegal")); + + ServerDescription.Builder builder3 = builder() + .state(CONNECTING) + .address(new ServerAddress()) + .exception(new IllegalStateException("This is illegal")); + ServerDescription.Builder builder4 = builder() + .state(CONNECTING) + .address(new ServerAddress()); + + + assertThat(builder1.build(), not(builder2.build())); + assertThat(builder1.build().hashCode(), not(builder2.build().hashCode())); + assertThat(builder1.build(), not(builder3.build())); + assertThat(builder1.build().hashCode(), not(builder3.build().hashCode())); + assertThat(builder1.build(), not(builder4.build())); + assertThat(builder1.build().hashCode(), not(builder4.build().hashCode())); + assertThat(builder4.build(), not(builder3.build())); + assertThat(builder4.build().hashCode(), not(builder3.build().hashCode())); + } + + @Test + public void testShortDescription() { + assertEquals("{address=127.0.0.1:27017, type=UNKNOWN, TagSet{[Tag{name='dc', value='ny'}, Tag{name='rack', value='1'}]}, " + + "roundTripTime=5000.0 ms, state=CONNECTED, exception={java.lang.IllegalArgumentException: This is illegal}, " + + "caused by {java.lang.NullPointerException: This is null}}", + builder().state(CONNECTED) + .address(new ServerAddress()) + .roundTripTime(5000, TimeUnit.MILLISECONDS) + .tagSet(new TagSet(asList(new Tag("dc", "ny"), new Tag("rack", "1")))) + .exception(new IllegalArgumentException("This is illegal", new NullPointerException("This is null"))) + .build() + .getShortDescription()); + } + + @Test + public void testIsPrimaryAndIsSecondary() { + ServerDescription serverDescription = builder() + .address(new ServerAddress()) + .type(ServerType.SHARD_ROUTER) + .ok(false) + .state(CONNECTED) + .build(); + assertFalse(serverDescription.isPrimary()); + assertFalse(serverDescription.isSecondary()); + + serverDescription = builder() + .address(new ServerAddress()) + .type(ServerType.SHARD_ROUTER) + .ok(true) + .state(CONNECTED) + .build(); + assertTrue(serverDescription.isPrimary()); + assertTrue(serverDescription.isSecondary()); + + serverDescription = builder() + .address(new ServerAddress()) + .type(ServerType.STANDALONE) + .ok(true) + .state(CONNECTED) + .build(); + assertTrue(serverDescription.isPrimary()); + assertTrue(serverDescription.isSecondary()); + + serverDescription = builder() + .address(new ServerAddress()) + .type(REPLICA_SET_PRIMARY) + .ok(true) + .state(CONNECTED) + .build(); + assertTrue(serverDescription.isPrimary()); + assertFalse(serverDescription.isSecondary()); + + serverDescription = builder() + .address(new ServerAddress()) + .type(ServerType.REPLICA_SET_SECONDARY) + .ok(true) + .state(CONNECTED) + .build(); + assertFalse(serverDescription.isPrimary()); + assertTrue(serverDescription.isSecondary()); + } + + @Test + public void testHasTags() { + ServerDescription serverDescription = builder() + .address(new ServerAddress()) + .type(ServerType.SHARD_ROUTER) + .ok(false) + .state(CONNECTED) + .build(); + assertFalse(serverDescription.hasTags(new TagSet(singletonList(new Tag("dc", "ny"))))); + + serverDescription = builder() + .address(new ServerAddress()) + .type(ServerType.SHARD_ROUTER) + .ok(true) + .state(CONNECTED) + .build(); + assertTrue(serverDescription.hasTags(new TagSet(singletonList(new Tag("dc", "ny"))))); + + serverDescription = builder() + .address(new ServerAddress()) + .type(ServerType.STANDALONE) + .ok(true) + .state(CONNECTED) + .build(); + assertTrue(serverDescription.hasTags(new TagSet(singletonList(new Tag("dc", "ny"))))); + + serverDescription = builder() + .address(new ServerAddress()) + .type(REPLICA_SET_PRIMARY) + .ok(true) + .state(CONNECTED) + .build(); + assertTrue(serverDescription.hasTags(new TagSet())); + + serverDescription = builder() + .address(new ServerAddress()) + .type(REPLICA_SET_PRIMARY) + .ok(true) + .tagSet(new TagSet(singletonList(new Tag("dc", "ca")))) + .state(CONNECTED) + .build(); + assertFalse(serverDescription.hasTags(new TagSet(singletonList(new Tag("dc", "ny"))))); + + serverDescription = builder() + .address(new ServerAddress()) + .type(REPLICA_SET_PRIMARY) + .ok(true) + .tagSet(new TagSet(singletonList(new Tag("rack", "1")))) + .state(CONNECTED) + .build(); + assertFalse(serverDescription.hasTags(new TagSet(singletonList(new Tag("rack", "2"))))); + + serverDescription = builder() + .address(new ServerAddress()) + .type(REPLICA_SET_PRIMARY) + .ok(true) + .tagSet(new TagSet(singletonList(new Tag("rack", "1")))) + .state(CONNECTED) + .build(); + assertTrue(serverDescription.hasTags(new TagSet(singletonList(new Tag("rack", "1"))))); + } + + @Test + public void notOkServerShouldBeCompatible() { + ServerDescription serverDescription = builder() + .address(new ServerAddress()) + .state(CONNECTING) + .ok(false) + .build(); + assertTrue(serverDescription.isCompatibleWithDriver()); + assertFalse(serverDescription.isIncompatiblyNewerThanDriver()); + assertFalse(serverDescription.isIncompatiblyOlderThanDriver()); + } + + @Test + public void loadBalancerIsCompatible() { + ServerDescription serverDescription = builder() + .address(new ServerAddress()) + .state(CONNECTED) + .type(LOAD_BALANCER) + .ok(true) + .build(); + assertTrue(serverDescription.isCompatibleWithDriver()); + assertFalse(serverDescription.isIncompatiblyNewerThanDriver()); + assertFalse(serverDescription.isIncompatiblyOlderThanDriver()); + } + + @Test + public void serverWithMinWireVersionEqualToDriverMaxWireVersionShouldBeCompatible() { + ServerDescription serverDescription = builder() + .address(new ServerAddress()) + .state(CONNECTING) + .ok(true) + .minWireVersion(MAX_DRIVER_WIRE_VERSION) + .maxWireVersion(MAX_DRIVER_WIRE_VERSION + 1) + .build(); + assertTrue(serverDescription.isCompatibleWithDriver()); + assertFalse(serverDescription.isIncompatiblyNewerThanDriver()); + assertFalse(serverDescription.isIncompatiblyOlderThanDriver()); + } + + @Test + public void serverWithMaxWireVersionEqualToDriverMinWireVersionShouldBeCompatible() { + ServerDescription serverDescription = builder() + .address(new ServerAddress()) + .state(CONNECTING) + .ok(true) + .minWireVersion(MIN_DRIVER_WIRE_VERSION - 1) + .maxWireVersion(MIN_DRIVER_WIRE_VERSION) + .build(); + assertTrue(serverDescription.isCompatibleWithDriver()); + assertFalse(serverDescription.isIncompatiblyNewerThanDriver()); + assertFalse(serverDescription.isIncompatiblyOlderThanDriver()); + } + + @Test + public void serverWithMinWireVersionGreaterThanDriverMaxWireVersionShouldBeIncompatible() { + ServerDescription serverDescription = builder() + .address(new ServerAddress()) + .state(CONNECTING) + .ok(true) + .minWireVersion(MAX_DRIVER_WIRE_VERSION + 1) + .maxWireVersion(MAX_DRIVER_WIRE_VERSION + 1) + .build(); + assertFalse(serverDescription.isCompatibleWithDriver()); + assertTrue(serverDescription.isIncompatiblyNewerThanDriver()); + assertFalse(serverDescription.isIncompatiblyOlderThanDriver()); + } + + @Test + public void serverWithMaxWireVersionLessThanDriverMinWireVersionShouldBeIncompatible() { + ServerDescription serverDescription = builder() + .address(new ServerAddress()) + .state(CONNECTING) + .ok(true) + .minWireVersion(MIN_DRIVER_WIRE_VERSION - 1) + .maxWireVersion(MIN_DRIVER_WIRE_VERSION - 1) + .build(); + assertFalse(serverDescription.isCompatibleWithDriver()); + assertFalse(serverDescription.isIncompatiblyNewerThanDriver()); + assertTrue(serverDescription.isIncompatiblyOlderThanDriver()); + } +} diff --git a/driver-core/src/test/unit/com/mongodb/connection/ServerIdSpecification.groovy b/driver-core/src/test/unit/com/mongodb/connection/ServerIdSpecification.groovy new file mode 100644 index 00000000000..d22fbb65a2c --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/connection/ServerIdSpecification.groovy @@ -0,0 +1,44 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.connection + +import com.mongodb.ServerAddress +import spock.lang.Specification + + +class ServerIdSpecification extends Specification { + def clusterId = new ClusterId() + def serverAddress = new ServerAddress('host1') + + def 'should set all properties'() { + given: + def serverId = new ServerId(clusterId, serverAddress) + + expect: + serverId.clusterId == clusterId + serverId.address == serverAddress + } + + def 'equivalent ids should be equal and have same hash code'() { + def id1 = new ServerId(clusterId, serverAddress) + def id2 = new ServerId(clusterId, serverAddress) + + expect: + id1 == id2 + id1.hashCode() == id2.hashCode() + } +} diff --git a/driver-core/src/test/unit/com/mongodb/connection/ServerSelectionSelectionTest.java b/driver-core/src/test/unit/com/mongodb/connection/ServerSelectionSelectionTest.java new file mode 100644 index 00000000000..5ac15e92817 --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/connection/ServerSelectionSelectionTest.java @@ -0,0 +1,249 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.connection; + +import com.mongodb.MongoConfigurationException; +import com.mongodb.ReadPreference; +import com.mongodb.ServerAddress; +import com.mongodb.Tag; +import com.mongodb.TagSet; +import com.mongodb.internal.selector.LatencyMinimizingServerSelector; +import com.mongodb.internal.selector.ReadPreferenceServerSelector; +import com.mongodb.internal.selector.WritableServerSelector; +import com.mongodb.lang.NonNull; +import com.mongodb.lang.Nullable; +import com.mongodb.selector.CompositeServerSelector; +import com.mongodb.selector.ServerSelector; +import org.bson.BsonArray; +import org.bson.BsonBoolean; +import org.bson.BsonDocument; +import org.bson.BsonInt64; +import org.bson.BsonString; +import org.bson.BsonValue; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import util.JsonPoweredTestHelper; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.Date; +import java.util.List; +import java.util.concurrent.TimeUnit; + +import static java.util.Arrays.asList; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; +import static org.junit.Assume.assumeTrue; + +// See https://github.com/mongodb/specifications/tree/master/source/server-selection/tests +@RunWith(Parameterized.class) +public class ServerSelectionSelectionTest { + private final String description; + private final BsonDocument definition; + private final ClusterDescription clusterDescription; + private final long heartbeatFrequencyMS; + private final boolean error; + + public ServerSelectionSelectionTest(final String description, final BsonDocument definition) { + this.description = description; + this.definition = definition; + this.heartbeatFrequencyMS = definition.getNumber("heartbeatFrequencyMS", new BsonInt64(10000)).longValue(); + this.error = definition.getBoolean("error", BsonBoolean.FALSE).getValue(); + this.clusterDescription = buildClusterDescription(definition.getDocument("topology_description"), + ServerSettings.builder().heartbeatFrequency(heartbeatFrequencyMS, TimeUnit.MILLISECONDS).build()); + } + + @Test + public void shouldPassAllOutcomes() { + // skip this test because the driver prohibits maxStaleness or tagSets with mode of primary at a much lower level + assumeTrue(!description.endsWith("/max-staleness/tests/ReplicaSetWithPrimary/MaxStalenessWithModePrimary.json")); + ServerSelector serverSelector = null; + List suitableServers = buildServerDescriptions(definition.getArray("suitable_servers", new BsonArray())); + List selectedServers = null; + try { + serverSelector = getServerSelector(); + selectedServers = serverSelector.select(clusterDescription); + if (error) { + fail("Should have thrown exception"); + } + } catch (MongoConfigurationException e) { + if (!error) { + fail("Should not have thrown exception: " + e); + } + return; + } + assertServers(selectedServers, suitableServers); + + ServerSelector latencyBasedServerSelector = new CompositeServerSelector(asList(serverSelector, + new LatencyMinimizingServerSelector(15, TimeUnit.MILLISECONDS))); + List inLatencyWindowServers = buildServerDescriptions(definition.getArray("in_latency_window")); + List latencyBasedSelectedServers = latencyBasedServerSelector.select(clusterDescription); + assertServers(latencyBasedSelectedServers, inLatencyWindowServers); + } + + @Parameterized.Parameters(name = "{0}") + public static Collection data() { + List data = new ArrayList<>(); + for (BsonDocument testDocument : JsonPoweredTestHelper.getSpecTestDocuments("server-selection/tests/server_selection")) { + data.add(new Object[]{testDocument.getString("resourcePath").getValue(), testDocument}); + } + for (BsonDocument testDocument : JsonPoweredTestHelper.getSpecTestDocuments("max-staleness/tests")) { + data.add(new Object[]{testDocument.getString("resourcePath").getValue(), testDocument}); + } + return data; + } + + public static ClusterDescription buildClusterDescription(final BsonDocument topologyDescription, + @Nullable final ServerSettings serverSettings) { + ClusterType clusterType = getClusterType(topologyDescription.getString("type").getValue()); + ClusterConnectionMode connectionMode = getClusterConnectionMode(clusterType); + List servers = buildServerDescriptions(topologyDescription.getArray("servers")); + return new ClusterDescription(connectionMode, clusterType, servers, null, + serverSettings == null ? ServerSettings.builder().build() : serverSettings); + } + + @NonNull + private static ClusterConnectionMode getClusterConnectionMode(final ClusterType clusterType) { + if (clusterType == ClusterType.LOAD_BALANCED) { + return ClusterConnectionMode.LOAD_BALANCED; + } + return ClusterConnectionMode.MULTIPLE; + } + + private static ClusterType getClusterType(final String type) { + if (type.equals("Single")) { + return ClusterType.STANDALONE; + } else if (type.startsWith("ReplicaSet")) { + return ClusterType.REPLICA_SET; + } else if (type.equals("Sharded")) { + return ClusterType.SHARDED; + } else if (type.equals("LoadBalanced")) { + return ClusterType.LOAD_BALANCED; + } else if (type.equals("Unknown")) { + return ClusterType.UNKNOWN; + } + + throw new UnsupportedOperationException("Unknown topology type: " + type); + } + + private static List buildServerDescriptions(final BsonArray serverDescriptions) { + List descriptions = new ArrayList<>(); + for (BsonValue document : serverDescriptions) { + descriptions.add(buildServerDescription(document.asDocument())); + } + return descriptions; + } + + private static ServerDescription buildServerDescription(final BsonDocument serverDescription) { + ServerDescription.Builder builder = ServerDescription.builder(); + builder.address(new ServerAddress(serverDescription.getString("address").getValue())); + ServerType serverType = getServerType(serverDescription.getString("type").getValue()); + builder.ok(serverType != ServerType.UNKNOWN); + builder.type(serverType); + if (serverDescription.containsKey("tags")) { + builder.tagSet(buildTagSet(serverDescription.getDocument("tags"))); + } + if (serverDescription.containsKey("avg_rtt_ms")) { + builder.roundTripTime(serverDescription.getNumber("avg_rtt_ms").asInt32().getValue(), TimeUnit.MILLISECONDS); + } + builder.state(ServerConnectionState.CONNECTED); + if (serverDescription.containsKey("lastWrite")) { + builder.lastWriteDate(new Date(serverDescription.getDocument("lastWrite").getNumber("lastWriteDate").longValue())); + } + if (serverDescription.containsKey("lastUpdateTime")) { + builder.lastUpdateTimeNanos(serverDescription.getNumber("lastUpdateTime").longValue() * 1000000); // convert to nanos + } else { + builder.lastUpdateTimeNanos(42L); + } + if (serverDescription.containsKey("maxWireVersion")) { + builder.maxWireVersion(serverDescription.getNumber("maxWireVersion").intValue()); + } + return builder.build(); + } + + private static ServerType getServerType(final String serverTypeString) { + ServerType serverType; + if (serverTypeString.equals("RSPrimary")) { + serverType = ServerType.REPLICA_SET_PRIMARY; + } else if (serverTypeString.equals("RSSecondary")) { + serverType = ServerType.REPLICA_SET_SECONDARY; + } else if (serverTypeString.equals("RSArbiter")) { + serverType = ServerType.REPLICA_SET_ARBITER; + } else if (serverTypeString.equals("RSGhost")) { + serverType = ServerType.REPLICA_SET_GHOST; + } else if (serverTypeString.equals("RSOther")) { + serverType = ServerType.REPLICA_SET_OTHER; + } else if (serverTypeString.equals("Mongos")) { + serverType = ServerType.SHARD_ROUTER; + } else if (serverTypeString.equals("Standalone")) { + serverType = ServerType.STANDALONE; + } else if (serverTypeString.equals("LoadBalancer")) { + serverType = ServerType.LOAD_BALANCER; + } else if (serverTypeString.equals("PossiblePrimary")) { + serverType = ServerType.UNKNOWN; + } else if (serverTypeString.equals("Unknown")) { + serverType = ServerType.UNKNOWN; + } else { + throw new UnsupportedOperationException("No handler for server type " + serverTypeString); + } + return serverType; + } + + private List buildTagSets(final BsonArray tags) { + List tagSets = new ArrayList<>(); + for (BsonValue tag : tags) { + tagSets.add(buildTagSet(tag.asDocument())); + } + return tagSets; + } + + + private static TagSet buildTagSet(final BsonDocument tags) { + List tagsSetTags = new ArrayList<>(); + for (String key : tags.keySet()) { + tagsSetTags.add(new Tag(key, tags.getString(key).getValue())); + } + return new TagSet(tagsSetTags); + } + + private ServerSelector getServerSelector() { + if (definition.getString("operation", new BsonString("read")).getValue().equals("write")) { + return new WritableServerSelector(); + } else { + BsonDocument readPreferenceDefinition = definition.getDocument("read_preference"); + ReadPreference readPreference; + if (readPreferenceDefinition.getString("mode").getValue().equals("Primary")) { + readPreference = ReadPreference.valueOf("Primary"); + } else if (readPreferenceDefinition.containsKey("maxStalenessSeconds")) { + readPreference = ReadPreference.valueOf(readPreferenceDefinition.getString("mode", new BsonString("Primary")).getValue(), + buildTagSets(readPreferenceDefinition.getArray("tag_sets", new BsonArray())), + Math.round(readPreferenceDefinition.getNumber("maxStalenessSeconds").doubleValue() * 1000), TimeUnit.MILLISECONDS); + } else { + readPreference = ReadPreference.valueOf(readPreferenceDefinition.getString("mode", new BsonString("Primary")).getValue(), + buildTagSets(readPreferenceDefinition.getArray("tag_sets", new BsonArray()))); + } + return new ReadPreferenceServerSelector(readPreference); + } + } + + private void assertServers(final List actual, final List expected) { + assertEquals(expected.size(), actual.size()); + assertTrue(actual.containsAll(expected)); + } +} diff --git a/driver-core/src/test/unit/com/mongodb/connection/ServerSettingsSpecification.groovy b/driver-core/src/test/unit/com/mongodb/connection/ServerSettingsSpecification.groovy new file mode 100644 index 00000000000..b11ed3a65a3 --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/connection/ServerSettingsSpecification.groovy @@ -0,0 +1,188 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.connection + +import com.mongodb.ConnectionString +import com.mongodb.event.ServerListener +import com.mongodb.event.ServerMonitorListener +import spock.lang.Specification + +import static java.util.concurrent.TimeUnit.MILLISECONDS +import static java.util.concurrent.TimeUnit.SECONDS + +/** + * Update {@link ServerSettingsTest} instead. + */ +class ServerSettingsSpecification extends Specification { + def 'should have correct defaults'() { + when: + def settings = ServerSettings.builder().build() + + then: + settings.getHeartbeatFrequency(MILLISECONDS) == 10000 + settings.getMinHeartbeatFrequency(MILLISECONDS) == 500 + settings.serverListeners == [] + settings.serverMonitorListeners == [] + } + + def 'should apply builder settings'() { + given: + def serverListenerOne = new ServerListener() { } + def serverListenerTwo = new ServerListener() { } + def serverListenerThree = new ServerListener() { } + def serverMonitorListenerOne = new ServerMonitorListener() { } + def serverMonitorListenerTwo = new ServerMonitorListener() { } + def serverMonitorListenerThree = new ServerMonitorListener() { } + + when: + def settings = ServerSettings.builder() + .heartbeatFrequency(4, SECONDS) + .minHeartbeatFrequency(1, SECONDS) + .addServerListener(serverListenerOne) + .addServerListener(serverListenerTwo) + .addServerMonitorListener(serverMonitorListenerOne) + .addServerMonitorListener(serverMonitorListenerTwo) + .build() + + + then: + settings.getHeartbeatFrequency(MILLISECONDS) == 4000 + settings.getMinHeartbeatFrequency(MILLISECONDS) == 1000 + settings.serverListeners == [serverListenerOne, serverListenerTwo] + settings.serverMonitorListeners == [serverMonitorListenerOne, serverMonitorListenerTwo] + + when: + settings = ServerSettings.builder() + .serverListenerList([serverListenerThree]) + .serverMonitorListenerList([serverMonitorListenerThree]).build() + + then: + settings.serverListeners == [serverListenerThree] + settings.serverMonitorListeners == [serverMonitorListenerThree] + } + + def 'when connection string is applied to builder, all properties should be set'() { + when: + def settings = ServerSettings.builder().applyConnectionString(new ConnectionString('mongodb://example.com:27018/?' + + 'heartbeatFrequencyMS=20000')) + .build() + + then: + settings.getHeartbeatFrequency(MILLISECONDS) == 20000 + } + + def 'should apply settings'() { + given: + def serverListenerOne = new ServerListener() { } + def serverMonitorListenerOne = new ServerMonitorListener() { } + def defaultSettings = ServerSettings.builder().build() + def customSettings = ServerSettings.builder() + .heartbeatFrequency(4, SECONDS) + .minHeartbeatFrequency(1, SECONDS) + .addServerListener(serverListenerOne) + .addServerMonitorListener(serverMonitorListenerOne) + .build() + + expect: + ServerSettings.builder().applySettings(customSettings).build() == customSettings + ServerSettings.builder(customSettings).applySettings(defaultSettings).build() == defaultSettings + } + + def 'lists of listeners should be unmodifiable'() { + given: + def settings = ServerSettings.builder().build() + + when: + settings.serverListeners.add(new ServerListener() { }) + + then: + thrown(UnsupportedOperationException) + + when: + settings.serverMonitorListeners.add(new ServerMonitorListener() { }) + + then: + thrown(UnsupportedOperationException) + } + + def 'listeners should not be null'() { + when: + ServerSettings.builder().addServerListener(null) + + then: + thrown(IllegalArgumentException) + + when: + ServerSettings.builder().addServerMonitorListener(null) + + then: + thrown(IllegalArgumentException) + } + + def 'identical settings should be equal'() { + given: + def serverListenerOne = new ServerListener() { } + def serverMonitorListenerOne = new ServerMonitorListener() { } + + expect: + ServerSettings.builder().build() == ServerSettings.builder().build() + ServerSettings.builder() + .heartbeatFrequency(4, SECONDS) + .minHeartbeatFrequency(1, SECONDS) + .addServerListener(serverListenerOne) + .addServerMonitorListener(serverMonitorListenerOne) + .build() == + ServerSettings.builder() + .heartbeatFrequency(4, SECONDS) + .minHeartbeatFrequency(1, SECONDS) + .addServerListener(serverListenerOne) + .addServerMonitorListener(serverMonitorListenerOne) + .build() + } + + def 'different settings should not be equal'() { + expect: + ServerSettings.builder().heartbeatFrequency(4, SECONDS).build() != ServerSettings.builder().heartbeatFrequency(3, SECONDS).build() + } + + def 'identical settings should have same hash code'() { + given: + def serverListenerOne = new ServerListener() { } + def serverMonitorListenerOne = new ServerMonitorListener() { } + + expect: + ServerSettings.builder().build().hashCode() == ServerSettings.builder().build().hashCode() + ServerSettings.builder() + .heartbeatFrequency(4, SECONDS) + .minHeartbeatFrequency(1, SECONDS) + .addServerListener(serverListenerOne) + .addServerMonitorListener(serverMonitorListenerOne) + .build().hashCode() == + ServerSettings.builder() + .heartbeatFrequency(4, SECONDS) + .minHeartbeatFrequency(1, SECONDS) + .addServerListener(serverListenerOne) + .addServerMonitorListener(serverMonitorListenerOne) + .build().hashCode() + } + + def 'different settings should have different hash codes'() { + expect: + ServerSettings.builder().heartbeatFrequency(4, SECONDS).build().hashCode() != + ServerSettings.builder().heartbeatFrequency(3, SECONDS).build().hashCode() + } +} diff --git a/driver-core/src/test/unit/com/mongodb/connection/ServerSettingsTest.java b/driver-core/src/test/unit/com/mongodb/connection/ServerSettingsTest.java new file mode 100644 index 00000000000..e8868813b03 --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/connection/ServerSettingsTest.java @@ -0,0 +1,106 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.connection; + +import com.mongodb.ConnectionString; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.Arguments; +import org.junit.jupiter.params.provider.MethodSource; + +import java.util.stream.Stream; + +import static org.junit.jupiter.api.Assertions.assertAll; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotEquals; + +final class ServerSettingsTest { + private static final String DEFAULT_OPTIONS = "mongodb://localhost/?"; + + @Test + void defaults() { + ServerSettings defaultServerSettings = ServerSettings.builder().build(); + assertAll(() -> assertEquals(ServerMonitoringMode.AUTO, defaultServerSettings.getServerMonitoringMode())); + } + + @ParameterizedTest + @MethodSource("equalAndHashCodeArgs") + void equalAndHashCode(final ServerSettings.Builder serverSettingsBuilder) { + ServerSettings default1 = ServerSettings.builder().build(); + ServerSettings default2 = ServerSettings.builder().build(); + ServerSettings actual1 = serverSettingsBuilder.build(); + ServerSettings actual2 = serverSettingsBuilder.build(); + assertAll( + () -> assertEquals(default1, default2), + () -> assertEquals(default1.hashCode(), default2.hashCode()), + () -> assertEquals(actual1, actual2), + () -> assertEquals(actual1.hashCode(), actual2.hashCode()), + () -> assertNotEquals(default1, actual1) + ); + } + + private static Stream equalAndHashCodeArgs() { + return Stream.of( + Arguments.of(ServerSettings.builder().serverMonitoringMode(ServerMonitoringMode.POLL)) + ); + } + + @Test + void serverMonitoringMode() { + assertAll( + () -> assertEquals( + ServerMonitoringMode.POLL, + ServerSettings.builder() + .serverMonitoringMode(ServerMonitoringMode.POLL) + .build() + .getServerMonitoringMode(), + "should set"), + () -> assertEquals( + ServerMonitoringMode.STREAM, + ServerSettings.builder() + .applySettings(ServerSettings.builder() + .serverMonitoringMode(ServerMonitoringMode.STREAM) + .build()) + .build() + .getServerMonitoringMode(), + "should apply from settings"), + () -> assertEquals( + ServerMonitoringMode.AUTO, + ServerSettings.builder() + .serverMonitoringMode(ServerMonitoringMode.STREAM) + .applySettings(ServerSettings.builder() + .build()) + .build() + .getServerMonitoringMode(), + "should apply unset from settings"), + () -> assertEquals( + ServerMonitoringMode.POLL, + ServerSettings.builder() + .applyConnectionString(new ConnectionString(DEFAULT_OPTIONS + "serverMonitoringMode=POLL")) + .build() + .getServerMonitoringMode(), + "should apply from connection string"), + () -> assertEquals( + ServerMonitoringMode.STREAM, + ServerSettings.builder() + .serverMonitoringMode(ServerMonitoringMode.STREAM) + .applyConnectionString(new ConnectionString(DEFAULT_OPTIONS)) + .build() + .getServerMonitoringMode(), + "should not apply unset from connection string") + ); + } +} diff --git a/driver-core/src/test/unit/com/mongodb/connection/ServerVersionSpecification.groovy b/driver-core/src/test/unit/com/mongodb/connection/ServerVersionSpecification.groovy new file mode 100644 index 00000000000..a6a3e45bd41 --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/connection/ServerVersionSpecification.groovy @@ -0,0 +1,144 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.connection + +import spock.lang.Specification + +class ServerVersionSpecification extends Specification { + + def 'should default to version zero'() { + when: + def version = new ServerVersion() + + + then: + version.versionList == [0, 0, 0] + } + + def 'should not accept null version array'() { + when: + new ServerVersion(null) + + then: + thrown(IllegalArgumentException) + } + + def 'should not accept version array of length unequal to three'() { + when: + new ServerVersion([2, 5, 1, 0]) + + then: + thrown(IllegalStateException) + + when: + new ServerVersion([2, 5]) + + then: + thrown(IllegalStateException) + } + + def 'should have same version array as when constructed'() { + when: + def version = new ServerVersion([3, 4, 1]) + + then: + version.versionList == [3, 4, 1] + } + + def 'should have immutable version array'() { + given: + def version = new ServerVersion([3, 4, 1]) + + when: + version.versionList[0] = 1 + + then: + thrown(UnsupportedOperationException) + } + + def 'identical versions should be equal'() { + when: + def version = new ServerVersion([3, 4, 1]) + + then: + version == new ServerVersion([3, 4, 1]) + } + + def 'identical versions should have the same hash code'() { + when: + def version = new ServerVersion([3, 4, 1]) + + then: + version.hashCode() == new ServerVersion([3, 4, 1]).hashCode() + } + + def 'different versions should not be equal'() { + when: + def version = new ServerVersion([3, 4, 1]) + + then: + version != new ServerVersion([2, 5, 1]) + } + + def 'lower version should compare less than'() { + when: + def version = new ServerVersion([1, 5, 1]) + + then: + version.compareTo(new ServerVersion([2, 5, 1])) < 0 + + when: + version = new ServerVersion([2, 3, 1]) + + then: + version.compareTo(new ServerVersion([2, 5, 1])) < 0 + + when: + version = new ServerVersion([2, 5, 0]) + + then: + version.compareTo(new ServerVersion([2, 5, 1])) < 0 + } + + def 'higher version should compare greater than'() { + when: + def version = new ServerVersion([3, 6, 0]) + + then: + version.compareTo(new ServerVersion([3, 4, 1])) > 0 + + when: + version = new ServerVersion([3, 5, 1]) + + then: + version.compareTo(new ServerVersion([3, 4, 1])) > 0 + + when: + version = new ServerVersion([3, 4, 2]) + + then: + version.compareTo(new ServerVersion([3, 4, 1])) > 0 + } + + def 'same version should compare equal'() { + when: + def version = new ServerVersion([3, 4, 1]) + + then: + version.compareTo(new ServerVersion([3, 4, 1])) == 0 + } +} diff --git a/driver-core/src/test/unit/com/mongodb/connection/SocketSettingsSpecification.groovy b/driver-core/src/test/unit/com/mongodb/connection/SocketSettingsSpecification.groovy new file mode 100644 index 00000000000..b2c646785f3 --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/connection/SocketSettingsSpecification.groovy @@ -0,0 +1,210 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.connection + +import com.mongodb.ConnectionString +import spock.lang.Specification + +import static java.util.concurrent.TimeUnit.MILLISECONDS + +/** + * New unit tests for {@link SocketSettings} are to be added to {@link SocketSettingsTest}. + */ +class SocketSettingsSpecification extends Specification { + + def 'should have correct defaults'() { + when: + def settings = SocketSettings.builder().build() + + then: + settings.getConnectTimeout(MILLISECONDS) == 10000 + settings.getReadTimeout(MILLISECONDS) == 0 + settings.receiveBufferSize == 0 + settings.sendBufferSize == 0 + settings.proxySettings == ProxySettings.builder().build() + } + + def 'should set settings'() { + when: + def settings = SocketSettings.builder() + .connectTimeout(5000, MILLISECONDS) + .readTimeout(2000, MILLISECONDS) + .sendBufferSize(1000) + .receiveBufferSize(1500) + .applyToProxySettings { + it.host('proxy.com') + it.port(1080) + it.username('username') + it.password('password') + } + .build() + + + then: + settings.getConnectTimeout(MILLISECONDS) == 5000 + settings.getReadTimeout(MILLISECONDS) == 2000 + settings.sendBufferSize == 1000 + settings.receiveBufferSize == 1500 + def proxySettings = settings.getProxySettings() + proxySettings.getHost() == 'proxy.com' + proxySettings.getPort() == 1080 + proxySettings.getUsername() == 'username' + proxySettings.getPassword() == 'password' + } + + def 'should apply builder settings'() { + when: + def original = SocketSettings.builder() + .connectTimeout(5000, MILLISECONDS) + .readTimeout(2000, MILLISECONDS) + .sendBufferSize(1000) + .receiveBufferSize(1500) + .applyToProxySettings { + it.host('proxy.com') + it.port(1080) + it.username('username') + it.password('password') + } + .build() + + def settings = SocketSettings.builder(original).build() + + then: + settings.getConnectTimeout(MILLISECONDS) == 5000 + settings.getReadTimeout(MILLISECONDS) == 2000 + settings.sendBufferSize == 1000 + settings.receiveBufferSize == 1500 + def proxySettings = settings.getProxySettings() + proxySettings.getHost() == 'proxy.com' + proxySettings.getPort() == 1080 + proxySettings.getUsername() == 'username' + proxySettings.getPassword() == 'password' + } + + def 'should apply connection string'() { + when: + def settings = SocketSettings.builder() + .applyConnectionString(new ConnectionString + ('mongodb://localhost/?connectTimeoutMS=5000&socketTimeoutMS=2000' + + '&proxyHost=proxy.com' + + '&proxyPort=1080' + + '&proxyUsername=username' + + '&proxyPassword=password')) + .build() + + + then: + settings.getConnectTimeout(MILLISECONDS) == 5000 + settings.getReadTimeout(MILLISECONDS) == 2000 + settings.sendBufferSize == 0 + settings.receiveBufferSize == 0 + def proxySettings = settings.getProxySettings() + proxySettings.getHost() == 'proxy.com' + proxySettings.getPort() == 1080 + proxySettings.getUsername() == 'username' + proxySettings.getPassword() == 'password' + } + + def 'should apply settings'() { + given: + def defaultSettings = SocketSettings.builder().build() + def customSettings = SocketSettings.builder() + .connectTimeout(5000, MILLISECONDS) + .readTimeout(2000, MILLISECONDS) + .sendBufferSize(1000) + .receiveBufferSize(1500) + .applyToProxySettings { + it.host('proxy.com') + it.port(1080) + it.username('username') + it.password('password') + } + .build() + + expect: + SocketSettings.builder().applySettings(customSettings).build() == customSettings + SocketSettings.builder(customSettings).applySettings(defaultSettings).build() == defaultSettings + } + + def 'identical settings should be equal'() { + expect: + SocketSettings.builder().build() == SocketSettings.builder().build() + SocketSettings.builder() + .connectTimeout(5000, MILLISECONDS) + .readTimeout(2000, MILLISECONDS) + .sendBufferSize(1000) + .receiveBufferSize(1500) + .applyToProxySettings { + it.host('proxy.com') + it.port(1080) + it.username('username') + it.password('password') + } + .build() == + SocketSettings.builder() + .connectTimeout(5000, MILLISECONDS) + .readTimeout(2000, MILLISECONDS) + .sendBufferSize(1000) + .receiveBufferSize(1500) + .applyToProxySettings { + it.host('proxy.com') + it.port(1080) + it.username('username') + it.password('password') + } + .build() + } + + def 'different settings should not be equal'() { + expect: + SocketSettings.builder().receiveBufferSize(4) != SocketSettings.builder().receiveBufferSize(3).build() + } + + def 'identical settings should have same hash code'() { + expect: + SocketSettings.builder().build().hashCode() == SocketSettings.builder().build().hashCode() + SocketSettings.builder() + .connectTimeout(5000, MILLISECONDS) + .readTimeout(2000, MILLISECONDS) + .sendBufferSize(1000) + .receiveBufferSize(1500) + .applyToProxySettings { + it.host('proxy.com') + it.port(1080) + it.username('username') + it.password('password') + } + .build().hashCode() == + SocketSettings.builder() + .connectTimeout(5000, MILLISECONDS) + .readTimeout(2000, MILLISECONDS) + .sendBufferSize(1000) + .receiveBufferSize(1500) + .applyToProxySettings { + it.host('proxy.com') + it.port(1080) + it.username('username') + it.password('password') + } + .build().hashCode() + } + + def 'different settings should have different hash codes'() { + expect: + SocketSettings.builder().sendBufferSize(4).build().hashCode() != SocketSettings.builder().sendBufferSize(3).build().hashCode() + } +} diff --git a/driver-core/src/test/unit/com/mongodb/connection/SocketSettingsTest.java b/driver-core/src/test/unit/com/mongodb/connection/SocketSettingsTest.java new file mode 100644 index 00000000000..bf092be0f54 --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/connection/SocketSettingsTest.java @@ -0,0 +1,37 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.connection; + +import org.junit.jupiter.api.Test; + +import java.util.concurrent.TimeUnit; + +import static org.junit.jupiter.api.Assertions.assertThrows; + +/** + * {@link SocketSettingsSpecification} contains older unit tests for {@link SocketSettings}. + */ +final class SocketSettingsTest { + @Test + void connectTimeoutThrowsIfArgumentIsTooLarge() { + assertThrows(IllegalArgumentException.class, () -> SocketSettings.builder().connectTimeout(Integer.MAX_VALUE / 2, TimeUnit.SECONDS)); + } + + @Test + void readTimeoutThrowsIfArgumentIsTooLarge() { + assertThrows(IllegalArgumentException.class, () -> SocketSettings.builder().readTimeout(Integer.MAX_VALUE / 2, TimeUnit.SECONDS)); + } +} diff --git a/driver-core/src/test/unit/com/mongodb/connection/SslSettingsSpecification.groovy b/driver-core/src/test/unit/com/mongodb/connection/SslSettingsSpecification.groovy new file mode 100644 index 00000000000..1f231f95418 --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/connection/SslSettingsSpecification.groovy @@ -0,0 +1,118 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.connection + +import com.mongodb.ConnectionString +import spock.lang.Specification + +import javax.net.ssl.SSLContext + +class SslSettingsSpecification extends Specification { + + def 'should have the expected defaults'() { + when: + def settings = SslSettings.builder().build() + + then: + settings.context == null + !settings.enabled + !settings.invalidHostNameAllowed + } + + def 'should set settings'() { + when: + def settings = SslSettings.builder() + .context(SSLContext.getDefault()) + .enabled(true) + .invalidHostNameAllowed(true) + .build() + + then: + settings.context == SSLContext.getDefault() + settings.enabled + settings.invalidHostNameAllowed + } + + def 'should apply connection string without ssl'() { + expect: + builder.applyConnectionString(new ConnectionString(connectionString)).build() == expected + + where: + connectionString | builder | expected + 'mongodb://localhost' | SslSettings.builder() | SslSettings.builder().build() + 'mongodb://localhost/?ssl=false' | SslSettings.builder() | SslSettings.builder().build() + 'mongodb://localhost/?ssl=true' | SslSettings.builder() | SslSettings.builder().enabled(true).build() + 'mongodb://localhost/?ssl=true' + + '&sslInvalidHostNameAllowed=true' | SslSettings.builder() | SslSettings.builder().enabled(true) + .invalidHostNameAllowed(true).build() + 'mongodb://localhost/?ssl=true' + + '&sslInvalidHostNameAllowed=true' | SslSettings.builder() + .context(SSLContext.getDefault()) | SslSettings.builder().enabled(true) + .context(SSLContext.getDefault()) + .invalidHostNameAllowed(true).build() + } + + def 'should apply settings'() { + given: + def defaultSettings = SslSettings.builder().build() + def customSettings = SslSettings.builder() + .context(SSLContext.getDefault()) + .enabled(true) + .invalidHostNameAllowed(true) + .build() + + expect: + SslSettings.builder().applySettings(customSettings).build() == customSettings + SslSettings.builder(customSettings).applySettings(defaultSettings).build() == defaultSettings + } + + def 'should apply builder settings'() { + when: + def original = SslSettings.builder().enabled(true) + .context(SSLContext.getDefault()) + .invalidHostNameAllowed(true).build() + + def settings = SslSettings.builder(original).build() + + then: + original == settings + } + + def 'equivalent settings should be equal and have the same hash code'() { + expect: + SslSettings.builder().build() == SslSettings.builder().build() + SslSettings.builder().build().hashCode() == SslSettings.builder().build().hashCode() + SslSettings.builder().enabled(true).invalidHostNameAllowed(true).build() == + SslSettings.builder().enabled(true).invalidHostNameAllowed(true).build() + SslSettings.builder().enabled(true).invalidHostNameAllowed(true).build().hashCode() == + SslSettings.builder().enabled(true).invalidHostNameAllowed(true).build().hashCode() + SslSettings.builder().enabled(true).invalidHostNameAllowed(true).context(SSLContext.getDefault()).build() == + SslSettings.builder().enabled(true).invalidHostNameAllowed(true) + .context(SSLContext.getDefault()).build() + SslSettings.builder().enabled(true).invalidHostNameAllowed(true) + .context(SSLContext.getDefault()).build().hashCode() == + SslSettings.builder().enabled(true).invalidHostNameAllowed(true) + .context(SSLContext.getDefault()).build().hashCode() + } + + def 'unequivalent settings should not be equal or have the same hash code'() { + expect: + SslSettings.builder().build() != SslSettings.builder().enabled(true).build() + SslSettings.builder().build() != SslSettings.builder().invalidHostNameAllowed(true).build() + SslSettings.builder().build() != SslSettings.builder().context(SSLContext.getDefault()).build() + } +} diff --git a/driver-core/src/test/unit/com/mongodb/event/CommandEventSpecification.groovy b/driver-core/src/test/unit/com/mongodb/event/CommandEventSpecification.groovy new file mode 100644 index 00000000000..015ac92aa3e --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/event/CommandEventSpecification.groovy @@ -0,0 +1,47 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.event + +import com.mongodb.ServerAddress +import com.mongodb.connection.ClusterId +import com.mongodb.connection.ConnectionDescription +import com.mongodb.connection.ServerId +import com.mongodb.internal.IgnorableRequestContext +import org.bson.BsonDocument +import org.bson.BsonInt32 +import spock.lang.Specification + +class CommandEventSpecification extends Specification { + def 'should fail if elapsed time is negative'() { + when: + new CommandSucceededEvent(IgnorableRequestContext.INSTANCE, 1, 1, + new ConnectionDescription(new ServerId(new ClusterId(), new ServerAddress())), 'test', 'ping', + new BsonDocument('ok', new BsonInt32(1)), -1) + + then: + def e = thrown(IllegalArgumentException) + e.getMessage() == 'state should be: elapsed time is not negative' + + when: + new CommandFailedEvent(IgnorableRequestContext.INSTANCE, 1, 1, + new ConnectionDescription(new ServerId(new ClusterId(), new ServerAddress())), 'test', 'ping', -1, new Throwable()) + + then: + e = thrown(IllegalArgumentException) + e.getMessage() == 'state should be: elapsed time is not negative' + } +} diff --git a/driver-core/src/test/unit/com/mongodb/event/ServerHeartbeatEventSpecification.groovy b/driver-core/src/test/unit/com/mongodb/event/ServerHeartbeatEventSpecification.groovy new file mode 100644 index 00000000000..d86910f5dea --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/event/ServerHeartbeatEventSpecification.groovy @@ -0,0 +1,45 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.event + +import com.mongodb.ServerAddress +import com.mongodb.connection.ClusterId +import com.mongodb.connection.ConnectionId +import com.mongodb.connection.ServerId +import org.bson.BsonDocument +import org.bson.BsonInt32 +import spock.lang.Specification + +class ServerHeartbeatEventSpecification extends Specification { + def 'should fail if elapsed time is negative'() { + when: + new ServerHeartbeatSucceededEvent(new ConnectionId(new ServerId(new ClusterId(), new ServerAddress())), + new BsonDocument('ok', new BsonInt32(1)), -1, false) + + then: + def e = thrown(IllegalArgumentException) + e.getMessage() == 'state should be: elapsed time is not negative' + + when: + new ServerHeartbeatFailedEvent(new ConnectionId(new ServerId(new ClusterId(), new ServerAddress())), -1, false, + new Throwable()) + + then: + e = thrown(IllegalArgumentException) + e.getMessage() == 'state should be: elapsed time is not negative' + } +} diff --git a/driver-core/src/test/unit/com/mongodb/event/TestServerMonitorListener.java b/driver-core/src/test/unit/com/mongodb/event/TestServerMonitorListener.java new file mode 100644 index 00000000000..27651c316ea --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/event/TestServerMonitorListener.java @@ -0,0 +1,159 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.event; + +import com.mongodb.annotations.ThreadSafe; +import com.mongodb.lang.Nullable; + +import java.time.Duration; +import java.util.ArrayList; +import java.util.List; +import java.util.Objects; +import java.util.Set; +import java.util.concurrent.TimeoutException; +import java.util.concurrent.locks.Condition; +import java.util.concurrent.locks.Lock; +import java.util.concurrent.locks.ReentrantLock; +import java.util.function.Predicate; +import java.util.stream.Collectors; + +import static com.mongodb.assertions.Assertions.assertNotNull; +import static com.mongodb.assertions.Assertions.assertTrue; +import static java.util.Collections.unmodifiableSet; +import static java.util.stream.StreamSupport.stream; + +@ThreadSafe +public final class TestServerMonitorListener implements ServerMonitorListener { + private final Set> listenableEventTypes; + private final Lock lock; + private final Condition condition; + private final List events; + + public TestServerMonitorListener(final Iterable listenableEventTypes) { + this.listenableEventTypes = unmodifiableSet(stream(listenableEventTypes.spliterator(), false) + .map(TestServerMonitorListener::nullableEventType) + .filter(Objects::nonNull) + .collect(Collectors.toSet())); + lock = new ReentrantLock(); + condition = lock.newCondition(); + events = new ArrayList<>(); + } + + public void reset() { + lock.lock(); + try { + events.clear(); + condition.signalAll(); + } finally { + lock.unlock(); + } + } + + public void serverHearbeatStarted(final ServerHeartbeatStartedEvent event) { + register(event); + } + + public void serverHeartbeatSucceeded(final ServerHeartbeatSucceededEvent event) { + register(event); + } + + public void serverHeartbeatFailed(final ServerHeartbeatFailedEvent event) { + register(event); + } + + public void waitForEvents(final Class type, final Predicate matcher, final long count, final Duration duration) + throws InterruptedException, TimeoutException { + assertTrue(listenable(type)); + long remainingNanos = duration.toNanos(); + lock.lock(); + try { + long observedCount = countEvents(type, matcher); + while (observedCount < count) { + if (remainingNanos <= 0) { + throw new TimeoutException(String.format("Timed out waiting for %d %s events. The observed count is %d.", + count, type.getSimpleName(), observedCount)); + } + remainingNanos = condition.awaitNanos(remainingNanos); + observedCount = countEvents(type, matcher); + } + } finally { + lock.unlock(); + } + } + + public long countEvents(final Class type, final Predicate matcher) { + assertTrue(listenable(type)); + lock.lock(); + try { + return events.stream() + .filter(type::isInstance) + .map(type::cast) + .filter(matcher) + .count(); + } finally { + lock.unlock(); + } + } + + public List getEvents() { + lock.lock(); + try { + return new ArrayList<>(events); + } finally { + lock.unlock(); + } + } + + public static Class eventType(final String eventType) { + return assertNotNull(nullableEventType(eventType)); + } + + @Nullable + private static Class nullableEventType(final String eventType) { + switch (eventType) { + case "serverHeartbeatStartedEvent": { + return ServerHeartbeatStartedEvent.class; + } + case "serverHeartbeatSucceededEvent": { + return ServerHeartbeatSucceededEvent.class; + } + case "serverHeartbeatFailedEvent": { + return ServerHeartbeatFailedEvent.class; + } + default: { + return null; + } + } + } + + private boolean listenable(final Class eventType) { + return listenableEventTypes.contains(eventType); + } + + private void register(final Object event) { + if (!listenable(event.getClass())) { + return; + } + lock.lock(); + try { + events.add(event); + condition.signalAll(); + } finally { + lock.unlock(); + } + } +} diff --git a/driver-core/src/test/unit/com/mongodb/internal/ExceptionUtilsTest.java b/driver-core/src/test/unit/com/mongodb/internal/ExceptionUtilsTest.java new file mode 100644 index 00000000000..9b9aaf59ce7 --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/internal/ExceptionUtilsTest.java @@ -0,0 +1,63 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.internal; + +import com.mongodb.MongoCommandException; +import com.mongodb.ServerAddress; +import com.mongodb.internal.ExceptionUtils.MongoCommandExceptionUtils; +import org.bson.BsonArray; +import org.bson.BsonBoolean; +import org.bson.BsonDocument; +import org.bson.BsonInt32; +import org.bson.BsonString; +import org.junit.jupiter.api.Nested; +import org.junit.jupiter.api.Test; + +import java.util.HashSet; + +import static java.util.Arrays.asList; +import static org.junit.jupiter.api.Assertions.assertArrayEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertTrue; + +final class ExceptionUtilsTest { + @Nested + final class MongoCommandExceptionUtilsTest { + @Test + void redacted() { + MongoCommandException original = new MongoCommandException( + new BsonDocument("ok", BsonBoolean.FALSE) + .append("code", new BsonInt32(26)) + .append("codeName", new BsonString("TimeoutError")) + .append("errorLabels", new BsonArray(asList(new BsonString("label"), new BsonString("label2")))) + .append("errmsg", new BsonString("err msg")), + new ServerAddress()); + MongoCommandException redacted = MongoCommandExceptionUtils.redacted(original); + assertArrayEquals(original.getStackTrace(), redacted.getStackTrace()); + String message = redacted.getMessage(); + assertTrue(message.contains("26")); + assertTrue(message.contains("TimeoutError")); + assertTrue(message.contains("label")); + assertFalse(message.contains("err msg")); + assertTrue(redacted.getErrorMessage().isEmpty()); + assertEquals(26, redacted.getErrorCode()); + assertEquals("TimeoutError", redacted.getErrorCodeName()); + assertEquals(new HashSet<>(asList("label", "label2")), redacted.getErrorLabels()); + assertEquals(MongoCommandExceptionUtils.SecurityInsensitiveResponseField.fieldNames(), redacted.getResponse().keySet()); + } + } +} diff --git a/driver-core/src/test/unit/com/mongodb/internal/ExpirableValueTest.java b/driver-core/src/test/unit/com/mongodb/internal/ExpirableValueTest.java new file mode 100644 index 00000000000..a11d18b4b2b --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/internal/ExpirableValueTest.java @@ -0,0 +1,65 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal; + +import org.junit.jupiter.api.Test; + +import java.time.Duration; + +import static com.mongodb.internal.ExpirableValue.expirable; +import static com.mongodb.internal.ExpirableValue.expired; +import static org.junit.jupiter.api.Assertions.assertAll; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertThrows; + +class ExpirableValueTest { + + @Test + void testExpired() { + assertFalse(expired().getValue().isPresent()); + } + + @SuppressWarnings("OptionalGetWithoutIsPresent") + @Test + void testExpirable() { + assertAll( + () -> assertThrows(AssertionError.class, () -> expirable(null, Duration.ofNanos(1))), + () -> assertThrows(AssertionError.class, () -> expirable(1, null)), + () -> assertFalse(expirable(1, Duration.ofNanos(-1)).getValue().isPresent()), + () -> assertFalse(expirable(1, Duration.ZERO).getValue().isPresent()), + () -> assertEquals(1, expirable(1, Duration.ofSeconds(1)).getValue().get()), + () -> { + ExpirableValue expirableValue = expirable(1, Duration.ofNanos(1)); + Thread.sleep(1); + assertFalse(expirableValue.getValue().isPresent()); + }, + () -> { + ExpirableValue expirableValue = expirable(1, Duration.ofMinutes(60), Long.MAX_VALUE); + assertEquals(1, expirableValue.getValue(Long.MAX_VALUE + Duration.ofMinutes(30).toNanos()).get()); + }, + () -> { + ExpirableValue expirableValue = expirable(1, Duration.ofMinutes(60), Long.MAX_VALUE); + assertEquals(1, expirableValue.getValue(Long.MAX_VALUE + Duration.ofMinutes(30).toNanos()).get()); + assertFalse(expirableValue.getValue(Long.MAX_VALUE + Duration.ofMinutes(61).toNanos()).isPresent()); + }, + () -> { + ExpirableValue expirableValue = expirable(1, Duration.ofNanos(10), Long.MAX_VALUE - 20); + assertFalse(expirableValue.getValue(Long.MAX_VALUE - 20 + Duration.ofNanos(30).toNanos()).isPresent()); + }); + } +} diff --git a/driver-core/src/test/unit/com/mongodb/internal/IterablesTest.java b/driver-core/src/test/unit/com/mongodb/internal/IterablesTest.java new file mode 100644 index 00000000000..bf8ab291231 --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/internal/IterablesTest.java @@ -0,0 +1,55 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.internal; + +import org.junit.jupiter.api.Test; + +import java.util.List; + +import static java.util.Arrays.asList; +import static java.util.Collections.singleton; +import static java.util.Collections.singletonList; +import static java.util.stream.Collectors.toList; +import static java.util.stream.StreamSupport.stream; +import static org.junit.jupiter.api.Assertions.assertAll; +import static org.junit.jupiter.api.Assertions.assertEquals; + +final class IterablesTest { + @Test + void concat() { + assertAll( + () -> assertIterable(singletonList(null), Iterables.concat(null)), + () -> assertIterable(singletonList(null), Iterables.concat(null, (Object[]) null)), + () -> assertIterable(singletonList(1), Iterables.concat(1)), + () -> assertIterable(singletonList(1), Iterables.concat(1, (Object[]) null)), + () -> assertIterable(asList(null, null), Iterables.concat(null, new Object[] {null})), + () -> assertIterable(asList(null, null), Iterables.concat(null, singleton(null))), + () -> assertIterable(asList(1, null), Iterables.concat(1, new Object[] {null})), + () -> assertIterable(asList(1, null), Iterables.concat(1, singleton(null))), + () -> assertIterable(asList(null, 1), Iterables.concat(null, 1)), + () -> assertIterable(asList(null, 1), Iterables.concat(null, singleton(1))), + () -> assertIterable(asList(1, 2), Iterables.concat(1, 2)), + () -> assertIterable(asList(1, 2), Iterables.concat(1, singleton(2))), + () -> assertIterable(asList(1, 2, 3), Iterables.concat(1, 2, 3)), + () -> assertIterable(asList(1, 2, 3), Iterables.concat(1, asList(2, 3))) + ); + } + + private static void assertIterable(final List expected, final Iterable actual) { + assertEquals(expected, stream(actual.spliterator(), false).collect(toList())); + assertEquals(expected.toString(), actual.toString()); + } +} diff --git a/driver-core/src/test/unit/com/mongodb/internal/SslHelperSpecification.groovy b/driver-core/src/test/unit/com/mongodb/internal/SslHelperSpecification.groovy new file mode 100644 index 00000000000..96001ed0471 --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/internal/SslHelperSpecification.groovy @@ -0,0 +1,49 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal + +import com.mongodb.ServerAddress +import com.mongodb.internal.connection.SslHelper +import spock.lang.Specification + +import javax.net.ssl.SNIHostName +import javax.net.ssl.SSLParameters + +class SslHelperSpecification extends Specification { + def 'should enable HTTPS host name verification'() { + given: + def sslParameters = new SSLParameters() + + when: + SslHelper.enableHostNameVerification(sslParameters) + + then: + sslParameters.getEndpointIdentificationAlgorithm() == 'HTTPS' + } + + def 'should enable server name indicator'() { + given: + def serverName = 'server.me' + def sslParameters = new SSLParameters() + + when: + SslHelper.enableSni(new ServerAddress(serverName).getHost(), sslParameters) + + then: + sslParameters.getServerNames() == [new SNIHostName(serverName)] + } +} diff --git a/driver-core/src/test/unit/com/mongodb/internal/TimeoutContextTest.java b/driver-core/src/test/unit/com/mongodb/internal/TimeoutContextTest.java new file mode 100644 index 00000000000..130d408076e --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/internal/TimeoutContextTest.java @@ -0,0 +1,353 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.internal; + +import com.mongodb.MongoOperationTimeoutException; +import com.mongodb.internal.time.Timeout; +import com.mongodb.lang.Nullable; +import com.mongodb.session.ClientSession; +import org.junit.jupiter.api.DisplayName; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.Arguments; +import org.junit.jupiter.params.provider.MethodSource; +import org.mockito.Mockito; + +import java.util.function.Supplier; +import java.util.stream.Stream; + +import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS; +import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS_WITH_INFINITE_TIMEOUT; +import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS_WITH_LEGACY_SETTINGS; +import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS_WITH_MAX_AWAIT_TIME; +import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS_WITH_MAX_COMMIT; +import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS_WITH_MAX_TIME; +import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS_WITH_MAX_TIME_AND_AWAIT_TIME; +import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS_WITH_TIMEOUT; +import static com.mongodb.ClusterFixture.sleep; +import static java.util.concurrent.TimeUnit.NANOSECONDS; +import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; + +final class TimeoutContextTest { + + public static long getMaxTimeMS(final TimeoutContext timeoutContext) { + long[] result = {0L}; + timeoutContext.runMaxTimeMS((ms) -> result[0] = ms); + return result[0]; + } + + @Test + @DisplayName("test defaults") + void testDefaults() { + TimeoutContext timeoutContext = new TimeoutContext(TIMEOUT_SETTINGS); + + assertFalse(timeoutContext.hasTimeoutMS()); + assertEquals(0, getMaxTimeMS(timeoutContext)); + assertEquals(0, timeoutContext.getMaxAwaitTimeMS()); + assertEquals(0, timeoutContext.getMaxCommitTimeMS()); + assertEquals(0, timeoutContext.getReadTimeoutMS()); + } + + @Test + @DisplayName("Uses timeoutMS if set") + void testUsesTimeoutMSIfSet() { + TimeoutContext timeoutContext = new TimeoutContext(TIMEOUT_SETTINGS_WITH_TIMEOUT); + + assertTrue(timeoutContext.hasTimeoutMS()); + assertTrue(getMaxTimeMS(timeoutContext) > 0); + assertEquals(0, timeoutContext.getMaxAwaitTimeMS()); + } + + @Test + @DisplayName("infinite timeoutMS") + void testInfiniteTimeoutMS() { + TimeoutContext timeoutContext = new TimeoutContext(TIMEOUT_SETTINGS_WITH_INFINITE_TIMEOUT); + + assertTrue(timeoutContext.hasTimeoutMS()); + assertEquals(0, getMaxTimeMS(timeoutContext)); + assertEquals(0, timeoutContext.getMaxAwaitTimeMS()); + } + + @Test + @DisplayName("MaxTimeMS set") + void testMaxTimeMSSet() { + TimeoutContext timeoutContext = new TimeoutContext(TIMEOUT_SETTINGS_WITH_MAX_TIME); + + assertFalse(timeoutContext.hasTimeoutMS()); + assertEquals(100, getMaxTimeMS(timeoutContext)); + assertEquals(0, timeoutContext.getMaxAwaitTimeMS()); + } + + @Test + @DisplayName("MaxAwaitTimeMS set") + void testMaxAwaitTimeMSSet() { + TimeoutContext timeoutContext = new TimeoutContext(TIMEOUT_SETTINGS_WITH_MAX_AWAIT_TIME); + + assertFalse(timeoutContext.hasTimeoutMS()); + assertEquals(0, getMaxTimeMS(timeoutContext)); + assertEquals(101, timeoutContext.getMaxAwaitTimeMS()); + } + + @Test + @DisplayName("MaxTimeMS and MaxAwaitTimeMS set") + void testMaxTimeMSAndMaxAwaitTimeMSSet() { + TimeoutContext timeoutContext = new TimeoutContext(TIMEOUT_SETTINGS_WITH_MAX_TIME_AND_AWAIT_TIME); + + assertFalse(timeoutContext.hasTimeoutMS()); + assertEquals(101, getMaxTimeMS(timeoutContext)); + assertEquals(1001, timeoutContext.getMaxAwaitTimeMS()); + } + + @Test + @DisplayName("MaxCommitTimeMS set") + void testMaxCommitTimeMSSet() { + TimeoutContext timeoutContext = new TimeoutContext(TIMEOUT_SETTINGS_WITH_MAX_COMMIT); + + assertFalse(timeoutContext.hasTimeoutMS()); + assertEquals(0, getMaxTimeMS(timeoutContext)); + assertEquals(0, timeoutContext.getMaxAwaitTimeMS()); + assertEquals(999L, timeoutContext.getMaxCommitTimeMS()); + } + + @Test + @DisplayName("All deprecated options set") + void testAllDeprecatedOptionsSet() { + TimeoutContext timeoutContext = new TimeoutContext(TIMEOUT_SETTINGS_WITH_LEGACY_SETTINGS); + + assertFalse(timeoutContext.hasTimeoutMS()); + assertEquals(101, getMaxTimeMS(timeoutContext)); + assertEquals(1001, timeoutContext.getMaxAwaitTimeMS()); + assertEquals(999, timeoutContext.getMaxCommitTimeMS()); + } + + @Test + @DisplayName("Use timeout if available or the alternative") + void testUseTimeoutIfAvailableOrTheAlternative() { + TimeoutContext timeoutContext = new TimeoutContext(TIMEOUT_SETTINGS); + assertEquals(99L, timeoutContext.timeoutOrAlternative(99)); + + timeoutContext = new TimeoutContext(TIMEOUT_SETTINGS.withTimeoutMS(0L)); + assertEquals(0L, timeoutContext.timeoutOrAlternative(99)); + + timeoutContext = new TimeoutContext(TIMEOUT_SETTINGS.withTimeoutMS(999L)); + assertTrue(timeoutContext.timeoutOrAlternative(0) <= 999); + + timeoutContext = new TimeoutContext(TIMEOUT_SETTINGS.withTimeoutMS(999L)); + assertTrue(timeoutContext.timeoutOrAlternative(999999) <= 999); + + timeoutContext = new TimeoutContext(TIMEOUT_SETTINGS); + assertEquals(0, timeoutContext.getMaxCommitTimeMS()); + + timeoutContext = new TimeoutContext(TIMEOUT_SETTINGS.withTimeoutMS(999L)); + assertTrue(timeoutContext.getMaxCommitTimeMS() <= 999); + } + + @Test + @DisplayName("withAdditionalReadTimeout works as expected") + void testWithAdditionalReadTimeout() { + TimeoutContext timeoutContext = new TimeoutContext(TIMEOUT_SETTINGS.withReadTimeoutMS(0)); + assertEquals(0L, timeoutContext.withAdditionalReadTimeout(101).getReadTimeoutMS()); + + timeoutContext = new TimeoutContext(TIMEOUT_SETTINGS.withReadTimeoutMS(10_000L)); + assertEquals(10_101L, timeoutContext.withAdditionalReadTimeout(101).getReadTimeoutMS()); + + long originalValue = Long.MAX_VALUE - 100; + timeoutContext = new TimeoutContext(TIMEOUT_SETTINGS.withReadTimeoutMS(originalValue)); + assertEquals(Long.MAX_VALUE, timeoutContext.withAdditionalReadTimeout(101).getReadTimeoutMS()); + + assertThrows(AssertionError.class, () -> new TimeoutContext(TIMEOUT_SETTINGS.withTimeoutMS(0L)).withAdditionalReadTimeout(1)); + + assertThrows(AssertionError.class, () -> new TimeoutContext(TIMEOUT_SETTINGS.withTimeoutMS(10_000L)).withAdditionalReadTimeout(1)); + } + + @Test + @DisplayName("Expired works as expected") + void testExpired() { + TimeoutContext smallTimeout = new TimeoutContext(TIMEOUT_SETTINGS.withTimeoutMS(1L)); + TimeoutContext longTimeout = + new TimeoutContext(TIMEOUT_SETTINGS.withTimeoutMS(9999999L)); + TimeoutContext noTimeout = new TimeoutContext(TIMEOUT_SETTINGS); + sleep(100); + assertFalse(hasExpired(noTimeout.getTimeout())); + assertFalse(hasExpired(longTimeout.getTimeout())); + assertTrue(hasExpired(smallTimeout.getTimeout())); + } + + private static boolean hasExpired(@Nullable final Timeout timeout) { + return Timeout.nullAsInfinite(timeout).call(NANOSECONDS, () -> false, (ns) -> false, () -> true); + } + + @Test + @DisplayName("throws when calculating timeout if expired") + void testThrowsWhenExpired() { + TimeoutContext smallTimeout = new TimeoutContext(TIMEOUT_SETTINGS.withTimeoutMS(1L)); + TimeoutContext longTimeout = new TimeoutContext(TIMEOUT_SETTINGS.withTimeoutMS(9999999L)); + TimeoutContext noTimeout = new TimeoutContext(TIMEOUT_SETTINGS); + sleep(100); + + assertThrows(MongoOperationTimeoutException.class, smallTimeout::getReadTimeoutMS); + assertThrows(MongoOperationTimeoutException.class, smallTimeout::getWriteTimeoutMS); + assertThrows(MongoOperationTimeoutException.class, smallTimeout::getConnectTimeoutMs); + assertThrows(MongoOperationTimeoutException.class, () -> getMaxTimeMS(smallTimeout)); + assertThrows(MongoOperationTimeoutException.class, smallTimeout::getMaxCommitTimeMS); + assertThrows(MongoOperationTimeoutException.class, () -> smallTimeout.timeoutOrAlternative(1)); + assertDoesNotThrow(longTimeout::getReadTimeoutMS); + assertDoesNotThrow(longTimeout::getWriteTimeoutMS); + assertDoesNotThrow(longTimeout::getConnectTimeoutMs); + assertDoesNotThrow(() -> getMaxTimeMS(longTimeout)); + assertDoesNotThrow(longTimeout::getMaxCommitTimeMS); + assertDoesNotThrow(() -> longTimeout.timeoutOrAlternative(1)); + assertDoesNotThrow(noTimeout::getReadTimeoutMS); + assertDoesNotThrow(noTimeout::getWriteTimeoutMS); + assertDoesNotThrow(noTimeout::getConnectTimeoutMs); + assertDoesNotThrow(() -> getMaxTimeMS(noTimeout)); + assertDoesNotThrow(noTimeout::getMaxCommitTimeMS); + assertDoesNotThrow(() -> noTimeout.timeoutOrAlternative(1)); + } + + @Test + @DisplayName("validates minRoundTripTime for maxTimeMS") + void testValidatedMinRoundTripTime() { + Supplier supplier = () -> new TimeoutContext(TIMEOUT_SETTINGS.withTimeoutMS(100L)); + + assertTrue(getMaxTimeMS(supplier.get()) <= 100); + assertTrue(getMaxTimeMS(supplier.get().minRoundTripTimeMS(10)) <= 90); + assertThrows(MongoOperationTimeoutException.class, () -> getMaxTimeMS(supplier.get().minRoundTripTimeMS(101))); + assertThrows(MongoOperationTimeoutException.class, () -> getMaxTimeMS(supplier.get().minRoundTripTimeMS(100))); + } + + @Test + @DisplayName("Test createTimeoutContext handles legacy settings") + void testCreateTimeoutContextLegacy() { + TimeoutContext sessionTimeoutContext = new TimeoutContext(TIMEOUT_SETTINGS); + TimeoutContext timeoutContext = new TimeoutContext(TIMEOUT_SETTINGS_WITH_LEGACY_SETTINGS); + + ClientSession clientSession = Mockito.mock(ClientSession.class); + Mockito.when(clientSession.getTimeoutContext()).thenReturn(sessionTimeoutContext); + + TimeoutContext actualTimeoutContext = TimeoutContext.createTimeoutContext(clientSession, timeoutContext.getTimeoutSettings()); + assertEquals(timeoutContext, actualTimeoutContext); + } + + @Test + @DisplayName("Test createTimeoutContext with timeout legacy settings") + void testCreateTimeoutContextWithTimeoutLegacy() { + TimeoutContext sessionTimeoutContext = new TimeoutContext(TIMEOUT_SETTINGS_WITH_TIMEOUT); + TimeoutContext timeoutContext = new TimeoutContext(TIMEOUT_SETTINGS_WITH_LEGACY_SETTINGS); + + ClientSession clientSession = Mockito.mock(ClientSession.class); + Mockito.when(clientSession.getTimeoutContext()).thenReturn(sessionTimeoutContext); + + TimeoutContext actualTimeoutContext = TimeoutContext.createTimeoutContext(clientSession, timeoutContext.getTimeoutSettings()); + assertEquals(sessionTimeoutContext, actualTimeoutContext); + } + + @Test + @DisplayName("Test createTimeoutContext with timeout") + void testCreateTimeoutContextWithTimeout() { + TimeoutContext sessionTimeoutContext = new TimeoutContext(TIMEOUT_SETTINGS_WITH_TIMEOUT); + TimeoutContext timeoutContext = new TimeoutContext(TIMEOUT_SETTINGS_WITH_TIMEOUT.withMaxAwaitTimeMS(123)); + + ClientSession clientSession = Mockito.mock(ClientSession.class); + Mockito.when(clientSession.getTimeoutContext()).thenReturn(sessionTimeoutContext); + + TimeoutContext actualTimeoutContext = TimeoutContext.createTimeoutContext(clientSession, timeoutContext.getTimeoutSettings()); + assertEquals(sessionTimeoutContext, actualTimeoutContext); + } + + @Test + @DisplayName("should override maxTimeMS when MaxTimeSupplier is set") + void shouldOverrideMaximeMS() { + TimeoutContext timeoutContext = new TimeoutContext(TIMEOUT_SETTINGS.withTimeoutMS(100L).withMaxTimeMS(1)); + + timeoutContext.setMaxTimeOverride(2L); + + assertEquals(2, getMaxTimeMS(timeoutContext)); + } + + @Test + @DisplayName("should reset maxTimeMS to default behaviour") + void shouldResetMaximeMS() { + TimeoutContext timeoutContext = new TimeoutContext(TIMEOUT_SETTINGS.withTimeoutMS(100L).withMaxTimeMS(1)); + timeoutContext.setMaxTimeOverride(1L); + + timeoutContext.resetToDefaultMaxTime(); + + assertTrue(getMaxTimeMS(timeoutContext) > 1); + } + + static Stream shouldChooseConnectTimeoutWhenItIsLessThenTimeoutMs() { + return Stream.of( + //connectTimeoutMS, timeoutMS, expected + Arguments.of(500L, 1000L, 500L), + Arguments.of(0L, null, 0L), + Arguments.of(1000L, null, 1000L), + Arguments.of(1000L, 0L, 1000L), + Arguments.of(0L, 0L, 0L) + ); + } + + @ParameterizedTest + @MethodSource + @DisplayName("should choose connectTimeoutMS when connectTimeoutMS is less than timeoutMS") + void shouldChooseConnectTimeoutWhenItIsLessThenTimeoutMs(final Long connectTimeoutMS, + final Long timeoutMS, + final long expected) { + TimeoutContext timeoutContext = new TimeoutContext( + new TimeoutSettings(0, + connectTimeoutMS, + 0, + timeoutMS, + 0)); + + long calculatedTimeoutMS = timeoutContext.getConnectTimeoutMs(); + assertEquals(expected, calculatedTimeoutMS); + } + + + static Stream shouldChooseTimeoutMsWhenItIsLessThenConnectTimeoutMS() { + return Stream.of( + //connectTimeoutMS, timeoutMS, expected + Arguments.of(1000L, 1000L, 999), + Arguments.of(1000L, 500L, 499L), + Arguments.of(0L, 1000L, 999L) + ); + } + + @ParameterizedTest + @MethodSource + @DisplayName("should choose timeoutMS when timeoutMS is less than connectTimeoutMS") + void shouldChooseTimeoutMsWhenItIsLessThenConnectTimeoutMS(final Long connectTimeoutMS, + final Long timeoutMS, + final long expected) { + TimeoutContext timeoutContext = new TimeoutContext( + new TimeoutSettings(0, + connectTimeoutMS, + 0, + timeoutMS, + 0)); + + long calculatedTimeoutMS = timeoutContext.getConnectTimeoutMs(); + assertTrue(expected - calculatedTimeoutMS <= 1); + } + + private TimeoutContextTest() { + } +} diff --git a/driver-core/src/test/unit/com/mongodb/internal/TimeoutSettingsTest.java b/driver-core/src/test/unit/com/mongodb/internal/TimeoutSettingsTest.java new file mode 100644 index 00000000000..9bffd08542b --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/internal/TimeoutSettingsTest.java @@ -0,0 +1,82 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.internal; + +import org.junit.jupiter.api.DynamicTest; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.TestFactory; + +import java.util.Collection; + +import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS; +import static java.util.Arrays.asList; +import static org.junit.jupiter.api.Assertions.assertAll; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.DynamicTest.dynamicTest; + +final class TimeoutSettingsTest { + + @TestFactory + Collection timeoutSettingsTest() { + return asList( + dynamicTest("test defaults", () -> { + TimeoutSettings timeoutSettings = TIMEOUT_SETTINGS; + assertAll( + () -> assertEquals(30_000, timeoutSettings.getServerSelectionTimeoutMS()), + () -> assertEquals(10_000, timeoutSettings.getConnectTimeoutMS()), + () -> assertEquals(0, timeoutSettings.getReadTimeoutMS()), + () -> assertNull(timeoutSettings.getTimeoutMS()), + () -> assertEquals(0, timeoutSettings.getMaxTimeMS()), + () -> assertEquals(0, timeoutSettings.getMaxAwaitTimeMS()), + () -> assertNull(timeoutSettings.getWTimeoutMS()) + ); + }), + dynamicTest("test overrides", () -> { + TimeoutSettings timeoutSettings = TIMEOUT_SETTINGS + .withTimeoutMS(100L) + .withMaxTimeMS(111) + .withMaxAwaitTimeMS(11) + .withMaxCommitMS(999L) + .withReadTimeoutMS(11_000) + .withConnectTimeoutMS(500) + .withWTimeoutMS(222L); + assertAll( + () -> assertEquals(30_000, timeoutSettings.getServerSelectionTimeoutMS()), + () -> assertEquals(500, timeoutSettings.getConnectTimeoutMS()), + () -> assertEquals(11_000, timeoutSettings.getReadTimeoutMS()), + () -> assertEquals(100, timeoutSettings.getTimeoutMS()), + () -> assertEquals(111, timeoutSettings.getMaxTimeMS()), + () -> assertEquals(11, timeoutSettings.getMaxAwaitTimeMS()), + () -> assertEquals(999, timeoutSettings.getMaxCommitTimeMS()), + () -> assertEquals(222, timeoutSettings.getWTimeoutMS()) + ); + }) + ); + } + + @Test + public void testTimeoutSettingsValidation() { + assertThrows(IllegalArgumentException.class, () -> TIMEOUT_SETTINGS.withTimeoutMS(-1L)); + assertThrows(IllegalArgumentException.class, () -> TIMEOUT_SETTINGS.withMaxAwaitTimeMS(-1)); + assertThrows(IllegalArgumentException.class, () -> TIMEOUT_SETTINGS.withMaxTimeMS(-1)); + assertThrows(IllegalArgumentException.class, () -> TIMEOUT_SETTINGS.withTimeoutMS(10L).withMaxAwaitTimeMS(11)); + } + + private TimeoutSettingsTest() { + } +} diff --git a/driver-core/src/test/unit/com/mongodb/internal/async/AsyncFunctionsAbstractTest.java b/driver-core/src/test/unit/com/mongodb/internal/async/AsyncFunctionsAbstractTest.java new file mode 100644 index 00000000000..9a9b7552d3e --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/internal/async/AsyncFunctionsAbstractTest.java @@ -0,0 +1,993 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.internal.async; + +import com.mongodb.MongoException; +import com.mongodb.internal.TimeoutContext; +import com.mongodb.internal.TimeoutSettings; +import org.junit.jupiter.api.Test; + +import java.util.function.BiConsumer; +import java.util.function.Consumer; +import java.util.function.Supplier; + +import static com.mongodb.assertions.Assertions.assertNotNull; +import static com.mongodb.internal.async.AsyncRunnable.beginAsync; + +abstract class AsyncFunctionsAbstractTest extends AsyncFunctionsTestBase { + private static final TimeoutContext TIMEOUT_CONTEXT = new TimeoutContext(new TimeoutSettings(0, 0, 0, 0L, 0)); + + @Test + void test1Method() { + // the number of expected variations is often: 1 + N methods invoked + // 1 variation with no exceptions, and N per an exception in each method + assertBehavesSameVariations(2, + () -> { + // single sync method invocations... + sync(1); + }, + (callback) -> { + // ...become a single async invocation, wrapped in begin-thenRun/finish: + beginAsync().thenRun(c -> { + async(1, c); + }).finish(callback); + }); + } + + @Test + void test2Methods() { + // tests pairs, converting: plain-sync, sync-plain, sync-sync + // (plain-plain does not need an async chain) + + assertBehavesSameVariations(3, + () -> { + // plain (unaffected) invocations... + plain(1); + sync(2); + }, + (callback) -> { + beginAsync().thenRun(c -> { + // ...are preserved above affected methods + plain(1); + async(2, c); + }).finish(callback); + }); + + assertBehavesSameVariations(3, + () -> { + // when a plain invocation follows an affected method... + sync(1); + plain(2); + }, + (callback) -> { + // ...it is moved to its own block, and must be completed: + beginAsync().thenRun(c -> { + async(1, c); + }).thenRun(c -> { + plain(2); + c.complete(c); + }).finish(callback); + }); + + assertBehavesSameVariations(3, + () -> { + // when an affected method follows an affected method + sync(1); + sync(2); + }, + (callback) -> { + // ...it is moved to its own block + beginAsync().thenRun(c -> { + async(1, c); + }).thenRun(c -> { + async(2, c); + }).finish(callback); + }); + } + + @Test + void test4Methods() { + // tests the sync-sync pair with preceding and ensuing plain methods. + + assertBehavesSameVariations(5, + () -> { + plain(11); + sync(1); + plain(22); + sync(2); + }, + (callback) -> { + beginAsync().thenRun(c -> { + plain(11); + async(1, c); + }).thenRun(c -> { + plain(22); + async(2, c); + }).finish(callback); + }); + + assertBehavesSameVariations(5, + () -> { + sync(1); + plain(11); + sync(2); + plain(22); + }, + (callback) -> { + beginAsync().thenRun(c -> { + async(1, c); + }).thenRun(c -> { + plain(11); + async(2, c); + }).thenRunAndFinish(() ->{ + plain(22); + }, callback); + }); + } + + @Test + void testSupply() { + assertBehavesSameVariations(4, + () -> { + sync(0); + plain(1); + return syncReturns(2); + }, + (callback) -> { + beginAsync().thenRun(c -> { + async(0, c); + }).thenSupply(c -> { + plain(1); + asyncReturns(2, c); + }).finish(callback); + }); + } + + @Test + void testSupplyWithMixedReturns() { + assertBehavesSameVariations(5, + () -> { + if (plainTest(1)) { + return syncReturns(11); + } else { + return plainReturns(22); + } + }, + (callback) -> { + beginAsync().thenSupply(c -> { + if (plainTest(1)) { + asyncReturns(11, c); + } else { + int r = plainReturns(22); + c.complete(r); // corresponds to a return, and + // must be followed by a return or end of method + } + }).finish(callback); + }); + } + + @Test + void testFullChain() { + // tests a chain with: runnable, producer, function, function, consumer + assertBehavesSameVariations(14, + () -> { + plain(90); + sync(0); + plain(91); + sync(1); + plain(92); + int v = syncReturns(2); + plain(93); + v = syncReturns(v + 1); + plain(94); + v = syncReturns(v + 10); + plain(95); + sync(v + 100); + plain(96); + }, + (callback) -> { + beginAsync().thenRun(c -> { + plain(90); + async(0, c); + }).thenRun(c -> { + plain(91); + async(1, c); + }).thenSupply(c -> { + plain(92); + asyncReturns(2, c); + }).thenApply((v, c) -> { + plain(93); + asyncReturns(v + 1, c); + }).thenApply((v, c) -> { + plain(94); + asyncReturns(v + 10, c); + }).thenConsume((v, c) -> { + plain(95); + async(v + 100, c); + }).thenRunAndFinish(() -> { + plain(96); + }, callback); + }); + } + + @Test + void testConditionals() { + assertBehavesSameVariations(5, + () -> { + if (plainTest(1)) { + sync(2); + } else { + sync(3); + } + }, + (callback) -> { + beginAsync().thenRun(c -> { + if (plainTest(1)) { + async(2, c); + } else { + async(3, c); + } + }).finish(callback); + }); + + // 2 : fail on first sync, fail on test + // 3 : true test, sync2, sync3 + // 2 : false test, sync3 + // 7 total + assertBehavesSameVariations(7, + () -> { + sync(0); + if (plainTest(1)) { + sync(2); + } + sync(3); + }, + (callback) -> { + beginAsync().thenRun(c -> { + async(0, c); + }).thenRunIf(() -> plainTest(1), c -> { + async(2, c); + }).thenRun(c -> { + async(3, c); + }).finish(callback); + }); + + // an additional affected method within the "if" branch + assertBehavesSameVariations(8, + () -> { + sync(0); + if (plainTest(1)) { + sync(21); + sync(22); + } + sync(3); + }, + (callback) -> { + beginAsync().thenRun(c -> { + async(0, c); + }).thenRunIf(() -> plainTest(1), + beginAsync().thenRun(c -> { + async(21, c); + }).thenRun((c) -> { + async(22, c); + }) + ).thenRun(c -> { + async(3, c); + }).finish(callback); + }); + + // empty `else` branch + assertBehavesSameVariations(5, + () -> { + if (plainTest(1)) { + Integer connection = syncReturns(2); + sync(connection + 5); + } else { + // do nothing + } + }, + (callback) -> { + beginAsync().thenRun(c -> { + if (plainTest(1)) { + beginAsync().thenSupply(c2 -> { + asyncReturns(2, c2); + }).thenConsume((connection, c3) -> { + async(connection + 5, c3); + }).finish(c); + } else { + c.complete(c); // do nothing + } + }).finish(callback); + }); + } + + @Test + void testMixedConditionalCascade() { + assertBehavesSameVariations(9, + () -> { + boolean test1 = plainTest(1); + if (test1) { + return syncReturns(11); + } + boolean test2 = plainTest(2); + if (test2) { + return 22; + } + int x = syncReturns(33); + plain(x + 100); + return syncReturns(44); + }, + (callback) -> { + beginAsync().thenSupply(c -> { + boolean test1 = plainTest(1); + if (test1) { + asyncReturns(11, c); + return; + } + boolean test2 = plainTest(2); + if (test2) { + c.complete(22); + return; + } + beginAsync().thenSupply(c2 -> { + asyncReturns(33, c2); + }).thenApply((x, c2) -> { + plain(assertNotNull(x) + 100); + asyncReturns(44, c2); + }).finish(c); + }).finish(callback); + }); + } + + @Test + void testPlain() { + // For completeness. This should not be used, since there is no async. + assertBehavesSameVariations(2, + () -> { + plain(1); + }, + (callback) -> { + beginAsync().thenRun(c -> { + plain(1); + c.complete(c); + }).finish(callback); + }); + } + + @Test + void testTryCatch() { + // single method in both try and catch + assertBehavesSameVariations(3, + () -> { + try { + sync(1); + } catch (Throwable t) { + sync(2); + } + }, + (callback) -> { + beginAsync().thenRun(c -> { + async(1, c); + }).onErrorIf(t -> true, (t, c) -> { + async(2, c); + }).finish(callback); + }); + + // mixed sync/plain + assertBehavesSameVariations(3, + () -> { + try { + sync(1); + } catch (Throwable t) { + plain(2); + } + }, + (callback) -> { + beginAsync().thenRun(c -> { + async(1, c); + }).onErrorIf(t -> true, (t, c) -> { + plain(2); + c.complete(c); + }).finish(callback); + }); + + // chain of 2 in try. + // WARNING: "onErrorIf" will consider everything in + // the preceding chain to be part of the try. + // Use nested async chains, or convenience methods, + // to define the beginning of the try. + assertBehavesSameVariations(5, + () -> { + try { + sync(1); + sync(2); + } catch (Throwable t) { + sync(9); + } + }, + (callback) -> { + beginAsync().thenRun(c -> { + async(1, c); + }).thenRun(c -> { + async(2, c); + }).onErrorIf(t -> true, (t, c) -> { + async(9, c); + }).finish(callback); + }); + + // chain of 2 in catch + assertBehavesSameVariations(4, + () -> { + try { + sync(1); + } catch (Throwable t) { + sync(8); + sync(9); + } + }, + (callback) -> { + beginAsync().thenRun(c -> { + async(1, c); + }).onErrorIf(t -> true, (t, callback2) -> { + beginAsync().thenRun(c -> { + async(8, c); + }).thenRun(c -> { + async(9, c); + }).finish(callback2); + }).finish(callback); + }); + + // method after the try-catch block + // here, the try-catch must be nested (as a code block) + assertBehavesSameVariations(5, + () -> { + try { + sync(1); + } catch (Throwable t) { + sync(2); + } + sync(3); + }, + (callback) -> { + beginAsync().thenRun(c2 -> { + beginAsync().thenRun(c -> { + async(1, c); + }).onErrorIf(t -> true, (t, c) -> { + async(2, c); + }).finish(c2); + }).thenRun(c -> { + async(3, c); + }).finish(callback); + }); + + // multiple catch blocks + // WARNING: these are not exclusive; if multiple "onErrorIf" blocks + // match, they will all be executed. + assertBehavesSameVariations(5, + () -> { + try { + if (plainTest(1)) { + throw new UnsupportedOperationException("A"); + } else { + throw new IllegalStateException("B"); + } + } catch (UnsupportedOperationException t) { + sync(8); + } catch (IllegalStateException t) { + sync(9); + } + }, + (callback) -> { + beginAsync().thenRun(c -> { + if (plainTest(1)) { + throw new UnsupportedOperationException("A"); + } else { + throw new IllegalStateException("B"); + } + }).onErrorIf(t -> t instanceof UnsupportedOperationException, (t, c) -> { + async(8, c); + }).onErrorIf(t -> t instanceof IllegalStateException, (t, c) -> { + async(9, c); + }).finish(callback); + }); + } + + @Test + void testTryWithEmptyCatch() { + assertBehavesSameVariations(2, + () -> { + try { + throw new RuntimeException(); + } catch (MongoException e) { + // ignore exceptions + } finally { + plain(2); + } + plain(3); + }, + (callback) -> { + beginAsync().thenRun(c -> { + beginAsync().thenRunTryCatchAsyncBlocks(c2 -> { + c2.completeExceptionally(new RuntimeException()); + }, MongoException.class, (e, c3) -> { + c3.complete(c3); // ignore exceptions + }) + .thenAlwaysRunAndFinish(() -> { + plain(2); + }, c); + }).thenRun(c4 -> { + plain(3); + c4.complete(c4); + }).finish(callback); + }); + } + + @Test + void testTryCatchHelper() { + assertBehavesSameVariations(4, + () -> { + plain(0); + try { + sync(1); + } catch (Throwable t) { + plain(2); + throw t; + } + }, + (callback) -> { + beginAsync().thenRun(c -> { + plain(0); + c.complete(c); + }).thenRunTryCatchAsyncBlocks(c -> { + async(1, c); + }, Throwable.class, (t, c) -> { + plain(2); + c.completeExceptionally(t); + }).finish(callback); + }); + + assertBehavesSameVariations(5, + () -> { + plain(0); + try { + sync(1); + } catch (Throwable t) { + plain(2); + throw t; + } + sync(4); + }, + (callback) -> { + beginAsync().thenRun(c -> { + plain(0); + c.complete(c); + }).thenRunTryCatchAsyncBlocks(c -> { + async(1, c); + }, Throwable.class, (t, c) -> { + plain(2); + c.completeExceptionally(t); + }).thenRun(c -> { + async(4, c); + }).finish(callback); + }); + } + + @Test + void testTryCatchWithVariables() { + // using supply etc. + assertBehavesSameVariations(12, + () -> { + try { + int i = plainTest(0) ? 1 : 2; + i = syncReturns(i + 10); + sync(i + 100); + } catch (Throwable t) { + sync(3); + } + }, + (callback) -> { + beginAsync().thenRun( + beginAsync().thenSupply(c -> { + int i = plainTest(0) ? 1 : 2; + asyncReturns(i + 10, c); + }).thenConsume((i, c) -> { + async(assertNotNull(i) + 100, c); + }) + ).onErrorIf(t -> true, (t, c) -> { + async(3, c); + }).finish(callback); + }); + + // using an externally-declared variable + assertBehavesSameVariations(17, + () -> { + int i = plainTest(0) ? 1 : 2; + try { + i = syncReturns(i + 10); + sync(i + 100); + } catch (Throwable t) { + sync(3); + } + sync(i + 1000); + }, + (callback) -> { + final int[] i = new int[1]; + beginAsync().thenRun(c -> { + i[0] = plainTest(0) ? 1 : 2; + c.complete(c); + }).thenRun(c -> { + beginAsync().thenSupply(c2 -> { + asyncReturns(i[0] + 10, c2); + }).thenConsume((i2, c2) -> { + i[0] = assertNotNull(i2); + async(i2 + 100, c2); + }).onErrorIf(t -> true, (t, c2) -> { + async(3, c2); + }).finish(c); + }).thenRun(c -> { + async(i[0] + 1000, c); + }).finish(callback); + }); + } + + @Test + void testTryCatchWithConditionInCatch() { + assertBehavesSameVariations(12, + () -> { + try { + sync(plainTest(0) ? 1 : 2); + sync(3); + } catch (Throwable t) { + sync(5); + if (t.getMessage().equals("exception-1")) { + throw t; + } else { + throw new RuntimeException("wrapped-" + t.getMessage(), t); + } + } + }, + (callback) -> { + beginAsync().thenRun(c -> { + async(plainTest(0) ? 1 : 2, c); + }).thenRun(c -> { + async(3, c); + }).onErrorIf(t -> true, (t, c) -> { + beginAsync().thenRun(c2 -> { + async(5, c2); + }).thenRun(c2 -> { + if (assertNotNull(t).getMessage().equals("exception-1")) { + throw (RuntimeException) t; + } else { + throw new RuntimeException("wrapped-" + t.getMessage(), t); + } + }).finish(c); + }).finish(callback); + }); + } + + @Test + void testTryCatchTestAndRethrow() { + // thenSupply: + assertBehavesSameVariations(5, + () -> { + try { + return syncReturns(1); + } catch (Exception e) { + if (e.getMessage().equals(plainTest(1) ? "unexpected" : "exception-1")) { + return syncReturns(2); + } else { + throw e; + } + } + }, + (callback) -> { + beginAsync().thenSupply(c -> { + asyncReturns(1, c); + }).onErrorIf(e -> e.getMessage().equals(plainTest(1) ? "unexpected" : "exception-1"), (t, c) -> { + asyncReturns(2, c); + }).finish(callback); + }); + + // thenRun: + assertBehavesSameVariations(5, + () -> { + try { + sync(1); + } catch (Exception e) { + if (e.getMessage().equals(plainTest(1) ? "unexpected" : "exception-1")) { + sync(2); + } else { + throw e; + } + } + }, + (callback) -> { + beginAsync().thenRun(c -> { + async(1, c); + }).onErrorIf(e -> e.getMessage().equals(plainTest(1) ? "unexpected" : "exception-1"), (t, c) -> { + async(2, c); + }).finish(callback); + }); + } + + @Test + void testRetryLoop() { + assertBehavesSameVariations(InvocationTracker.DEPTH_LIMIT * 2 + 1, + () -> { + while (true) { + try { + sync(plainTest(0) ? 1 : 2); + } catch (RuntimeException e) { + if (e.getMessage().equals("exception-1")) { + continue; + } + throw e; + } + break; + } + }, + (callback) -> { + beginAsync().thenRunRetryingWhile( + TIMEOUT_CONTEXT, + c -> async(plainTest(0) ? 1 : 2, c), + e -> e.getMessage().equals("exception-1") + ).finish(callback); + }); + } + + @Test + void testDoWhileLoop() { + assertBehavesSameVariations(67, + () -> { + do { + plain(0); + sync(1); + } while (plainTest(2)); + }, + (finalCallback) -> { + beginAsync().thenRunDoWhileLoop( + callback -> { + plain(0); + async(1, callback); + }, + () -> plainTest(2) + ).finish(finalCallback); + }); + } + + @Test + void testFinallyWithPlainInsideTry() { + // (in try: normal flow + exception + exception) * (in finally: normal + exception) = 6 + assertBehavesSameVariations(6, + () -> { + try { + plain(1); + sync(2); + } finally { + plain(3); + } + }, + (callback) -> { + beginAsync().thenRun(c -> { + plain(1); + async(2, c); + }).thenAlwaysRunAndFinish(() -> { + plain(3); + }, callback); + }); + } + + @Test + void testFinallyWithPlainOutsideTry() { + assertBehavesSameVariations(5, + () -> { + plain(1); + try { + sync(2); + } finally { + plain(3); + } + }, + (callback) -> { + beginAsync().thenRun(c -> { + plain(1); + beginAsync().thenRun(c2 -> { + async(2, c2); + }).thenAlwaysRunAndFinish(() -> { + plain(3); + }, c); + }).finish(callback); + }); + } + + @Test + void testSupplyFinallyWithPlainInsideTry() { + assertBehavesSameVariations(6, + () -> { + try { + plain(1); + return syncReturns(2); + } finally { + plain(3); + } + }, + (callback) -> { + beginAsync().thenSupply(c -> { + plain(1); + asyncReturns(2, c); + }).thenAlwaysRunAndFinish(() -> { + plain(3); + }, callback); + }); + } + + @Test + void testSupplyFinallyWithPlainOutsideTry() { + assertBehavesSameVariations(5, + () -> { + plain(1); + try { + return syncReturns(2); + } finally { + plain(3); + } + }, + (callback) -> { + beginAsync().thenSupply(c -> { + plain(1); + beginAsync().thenSupply(c2 -> { + asyncReturns(2, c2); + }).thenAlwaysRunAndFinish(() -> { + plain(3); + }, c); + }).finish(callback); + }); + } + + + @Test + void testUsedAsLambda() { + assertBehavesSameVariations(4, + () -> { + Supplier s = () -> syncReturns(9); + sync(0); + plain(1); + return s.get(); + }, + (callback) -> { + AsyncSupplier s = (c) -> asyncReturns(9, c); + beginAsync().thenRun(c -> { + async(0, c); + }).thenSupply((c) -> { + plain(1); + s.getAsync(c); + }).finish(callback); + }); + } + + @Test + void testVariables() { + assertBehavesSameVariations(3, + () -> { + int something; + something = 90; + sync(something); + something = something + 10; + sync(something); + }, + (callback) -> { + // Certain variables may need to be shared; these can be + // declared (but not initialized) outside the async chain. + // Any container works (atomic allowed but not needed) + final int[] something = new int[1]; + beginAsync().thenRun(c -> { + something[0] = 90; + async(something[0], c); + }).thenRun((c) -> { + something[0] = something[0] + 10; + async(something[0], c); + }).finish(callback); + }); + } + + @Test + void testDerivation() { + // Demonstrates the progression from nested async to the API. + + // Stand-ins for sync-async methods; these "happily" do not throw + // exceptions, to avoid complicating this demo async code. + Consumer happySync = (i) -> { + getNextOption(1); + listenerAdd("affected-success-" + i); + }; + BiConsumer> happyAsync = (i, c) -> { + happySync.accept(i); + c.complete(c); + }; + + // Standard nested async, no error handling: + assertBehavesSameVariations(1, + () -> { + happySync.accept(1); + happySync.accept(2); + }, + (callback) -> { + happyAsync.accept(1, (v, e) -> { + happyAsync.accept(2, callback); + }); + }); + + // When both methods are naively extracted, they are out of order: + assertBehavesSameVariations(1, + () -> { + happySync.accept(1); + happySync.accept(2); + }, + (callback) -> { + SingleResultCallback second = (v, e) -> { + happyAsync.accept(2, callback); + }; + SingleResultCallback first = (v, e) -> { + happyAsync.accept(1, second); + }; + first.onResult(null, null); + }); + + // We create an "AsyncRunnable" that takes a callback, which + // decouples any async methods from each other, allowing them + // to be declared in a sync-like order, and without nesting: + assertBehavesSameVariations(1, + () -> { + happySync.accept(1); + happySync.accept(2); + }, + (callback) -> { + AsyncRunnable first = (SingleResultCallback c) -> { + happyAsync.accept(1, c); + }; + AsyncRunnable second = (SingleResultCallback c) -> { + happyAsync.accept(2, c); + }; + // This is a simplified variant of the "then" methods; + // it has no error handling. It takes methods A and B, + // and returns C, which is B(A()). + AsyncRunnable combined = (c) -> { + first.unsafeFinish((r, e) -> { + second.unsafeFinish(c); + }); + }; + combined.unsafeFinish(callback); + }); + + // This combining method is added as a default method on AsyncRunnable, + // and a "finish" method wraps the resulting methods. This also adds + // exception handling and monadic short-circuiting of ensuing methods + // when an exception arises (comparable to how thrown exceptions "skip" + // ensuing code). + assertBehavesSameVariations(3, + () -> { + sync(1); + sync(2); + }, + (callback) -> { + beginAsync().thenRun(c -> { + async(1, c); + }).thenRun(c -> { + async(2, c); + }).finish(callback); + }); + } +} diff --git a/driver-core/src/test/unit/com/mongodb/internal/async/AsyncFunctionsTestBase.java b/driver-core/src/test/unit/com/mongodb/internal/async/AsyncFunctionsTestBase.java new file mode 100644 index 00000000000..10a58152d9f --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/internal/async/AsyncFunctionsTestBase.java @@ -0,0 +1,374 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.async; + +import com.mongodb.client.TestListener; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.opentest4j.AssertionFailedError; + +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; +import java.util.concurrent.atomic.AtomicReference; +import java.util.function.Consumer; +import java.util.function.Supplier; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; + +public abstract class AsyncFunctionsTestBase { + + private final TestListener listener = new TestListener(); + private final InvocationTracker invocationTracker = new InvocationTracker(); + private boolean isTestingAbruptCompletion = false; + private ExecutorService asyncExecutor; + + void setIsTestingAbruptCompletion(final boolean b) { + isTestingAbruptCompletion = b; + } + + public void setAsyncStep(final boolean isAsyncStep) { + invocationTracker.isAsyncStep = isAsyncStep; + } + + public void getNextOption(final int i) { + invocationTracker.getNextOption(i); + } + + public void listenerAdd(final String s) { + listener.add(s); + } + + /** + * Create an executor service for async operations before each test. + * + * @return the executor service. + */ + public abstract ExecutorService createAsyncExecutor(); + + @BeforeEach + public void setUp() { + asyncExecutor = createAsyncExecutor(); + } + + @AfterEach + public void shutDown() { + asyncExecutor.shutdownNow(); + } + + void plain(final int i) { + int cur = invocationTracker.getNextOption(2); + if (cur == 0) { + listener.add("plain-exception-" + i); + throw new RuntimeException("affected method exception-" + i); + } else { + listener.add("plain-success-" + i); + } + } + + int plainReturns(final int i) { + int cur = invocationTracker.getNextOption(2); + if (cur == 0) { + listener.add("plain-returns-exception-" + i); + throw new RuntimeException("affected method exception-" + i); + } else { + listener.add("plain-returns-success-" + i); + return i; + } + } + + boolean plainTest(final int i) { + int cur = invocationTracker.getNextOption(3); + if (cur == 0) { + listener.add("plain-exception-" + i); + throw new RuntimeException("affected method exception-" + i); + } else if (cur == 1) { + listener.add("plain-false-" + i); + return false; + } else { + listener.add("plain-true-" + i); + return true; + } + } + + void sync(final int i) { + assertFalse(invocationTracker.isAsyncStep); + affected(i); + } + + Integer syncReturns(final int i) { + assertFalse(invocationTracker.isAsyncStep); + return affectedReturns(i); + } + + + public void submit(final Runnable task) { + asyncExecutor.execute(task); + } + void async(final int i, final SingleResultCallback callback) { + assertTrue(invocationTracker.isAsyncStep); + if (isTestingAbruptCompletion) { + /* We should not test for abrupt completion in a separate thread. Once a callback is registered for an async operation, + the Async Framework does not handle exceptions thrown outside of callbacks by the executing thread. Such exception management + should be the responsibility of the thread conducting the asynchronous operations. */ + affected(i); + submit(() -> { + callback.complete(callback); + }); + } else { + submit(() -> { + try { + affected(i); + callback.complete(callback); + } catch (Throwable t) { + callback.onResult(null, t); + } + }); + } + } + + void asyncReturns(final int i, final SingleResultCallback callback) { + assertTrue(invocationTracker.isAsyncStep); + if (isTestingAbruptCompletion) { + int result = affectedReturns(i); + submit(() -> { + callback.complete(result); + }); + } else { + submit(() -> { + try { + callback.complete(affectedReturns(i)); + } catch (Throwable t) { + callback.onResult(null, t); + } + }); + } + } + + private void affected(final int i) { + int cur = invocationTracker.getNextOption(2); + if (cur == 0) { + listener.add("affected-exception-" + i); + throw new RuntimeException("exception-" + i); + } else { + listener.add("affected-success-" + i); + } + } + + private int affectedReturns(final int i) { + int cur = invocationTracker.getNextOption(2); + if (cur == 0) { + listener.add("affected-returns-exception-" + i); + throw new RuntimeException("exception-" + i); + } else { + listener.add("affected-returns-success-" + i); + return i; + } + } + + // assert methods: + + void assertBehavesSameVariations(final int expectedVariations, final Runnable sync, + final Consumer> async) { + assertBehavesSameVariations(expectedVariations, + () -> { + sync.run(); + return null; + }, + (c) -> { + async.accept((v, e) -> c.onResult(v, e)); + }); + } + + void assertBehavesSameVariations(final int expectedVariations, final Supplier sync, + final Consumer> async) { + // run the variation-trying code twice, with direct/indirect exceptions + for (int i = 0; i < 2; i++) { + isTestingAbruptCompletion = i != 0; + + // the variation-trying code: + invocationTracker.reset(); + do { + invocationTracker.startInitialStep(); + assertBehavesSame( + sync, + () -> invocationTracker.startMatchStep(), + async); + } while (invocationTracker.countDown()); + assertEquals(expectedVariations, invocationTracker.getVariationCount(), + "number of variations did not match"); + } + + } + + private void assertBehavesSame(final Supplier sync, final Runnable between, + final Consumer> async) { + + T expectedValue = null; + Throwable expectedException = null; + try { + expectedValue = sync.get(); + } catch (Throwable e) { + expectedException = e; + } + List expectedEvents = listener.getEventStrings(); + + listener.clear(); + between.run(); + + AtomicReference actualValue = new AtomicReference<>(); + AtomicReference actualException = new AtomicReference<>(); + CompletableFuture wasCalledFuture = new CompletableFuture<>(); + try { + async.accept((v, e) -> { + actualValue.set(v); + actualException.set(e); + if (wasCalledFuture.isDone()) { + fail(); + } + wasCalledFuture.complete(null); + }); + } catch (Throwable e) { + fail("async threw instead of using callback"); + } + + await(wasCalledFuture, "Callback should have been called"); + + // The following code can be used to debug variations: +// System.out.println("===VARIATION START: " + invocationTracker.getVariationCount()); +// System.out.println("sync: " + expectedEvents); +// System.out.println("sync size: " + expectedEvents.size()); +// System.out.println("callback called?: " + wasCalledFuture.isDone()); +// System.out.println("value -- sync: " + expectedValue + " -- async: " + actualValue.get()); +// System.out.println("excep -- sync: " + expectedException + " -- async: " + actualException.get()); +// System.out.println("exception mode: " + (isTestingAbruptCompletion +// ? "exceptions thrown directly (abrupt completion)" : "exceptions into callbacks")); +// System.out.println("===VARIATION END"); + + // show assertion failures arising in async tests + if (actualException.get() != null && actualException.get() instanceof AssertionFailedError) { + throw (AssertionFailedError) actualException.get(); + } + + assertTrue(wasCalledFuture.isDone(), "callback should have been called"); + assertEquals(expectedEvents, listener.getEventStrings(), "steps should have matched"); + assertEquals(expectedValue, actualValue.get()); + assertEquals(expectedException == null, actualException.get() == null, + "both or neither should have produced an exception"); + if (expectedException != null) { + assertEquals(expectedException.getMessage(), actualException.get().getMessage()); + assertEquals(expectedException.getClass(), actualException.get().getClass()); + } + + listener.clear(); + } + + protected T await(final CompletableFuture voidCompletableFuture, final String errorMessage) { + try { + return voidCompletableFuture.get(1, TimeUnit.MINUTES); + } catch (InterruptedException | ExecutionException | TimeoutException e) { + throw new AssertionError(errorMessage); + } + } + + /** + * Tracks invocations: allows testing of all variations of a method calls + */ + static class InvocationTracker { + public static final int DEPTH_LIMIT = 50; + private final List invocationOptionSequence = new ArrayList<>(); + private boolean isAsyncStep; // async = matching, vs initial step = populating + private int currentInvocationIndex; + private int variationCount; + + public void reset() { + variationCount = 0; + } + + public void startInitialStep() { + variationCount++; + isAsyncStep = false; + currentInvocationIndex = -1; + } + + public int getNextOption(final int myOptionsSize) { + /* + This method creates (or gets) the next invocation's option. Each + invoker of this method has the "option" to behave in various ways, + usually just success (option 1) and exceptional failure (option 0), + though some callers might have more options. A sequence of method + outcomes (options) is one "variation". Tests automatically test + all possible variations (up to a limit, to prevent infinite loops). + + Methods generally have labels, to ensure that corresponding + sync/async methods are called in the right order, but these labels + are unrelated to the "variation" logic here. There are two "modes" + (whether completion is abrupt, or not), which are also unrelated. + */ + + currentInvocationIndex++; // which invocation result we are dealing with + + if (currentInvocationIndex >= invocationOptionSequence.size()) { + if (isAsyncStep) { + fail("result should have been pre-initialized: steps may not match"); + } + if (isWithinDepthLimit()) { + invocationOptionSequence.add(myOptionsSize - 1); + } else { + invocationOptionSequence.add(0); // choose "0" option, should always be an exception + } + } + return invocationOptionSequence.get(currentInvocationIndex); + } + + public void startMatchStep() { + isAsyncStep = true; + currentInvocationIndex = -1; + } + + private boolean countDown() { + while (!invocationOptionSequence.isEmpty()) { + int lastItemIndex = invocationOptionSequence.size() - 1; + int lastItem = invocationOptionSequence.get(lastItemIndex); + if (lastItem > 0) { + // count current digit down by 1, until 0 + invocationOptionSequence.set(lastItemIndex, lastItem - 1); + return true; + } else { + // current digit completed, remove (move left) + invocationOptionSequence.remove(lastItemIndex); + } + } + return false; + } + + public int getVariationCount() { + return variationCount; + } + + public boolean isWithinDepthLimit() { + return invocationOptionSequence.size() < DEPTH_LIMIT; + } + } +} diff --git a/driver-core/src/test/unit/com/mongodb/internal/async/SameThreadAsyncFunctionsTest.java b/driver-core/src/test/unit/com/mongodb/internal/async/SameThreadAsyncFunctionsTest.java new file mode 100644 index 00000000000..7dd4c0f37ac --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/internal/async/SameThreadAsyncFunctionsTest.java @@ -0,0 +1,94 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.async; + +import com.mongodb.lang.NonNull; +import org.junit.jupiter.api.DisplayName; +import org.junit.jupiter.api.Test; + +import java.util.Collections; +import java.util.List; +import java.util.concurrent.AbstractExecutorService; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.TimeUnit; + +import static com.mongodb.internal.async.AsyncRunnable.beginAsync; +import static org.junit.jupiter.api.Assertions.assertNotEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; + +@DisplayName("The same thread async functions") +public class SameThreadAsyncFunctionsTest extends AsyncFunctionsAbstractTest { + @Override + public ExecutorService createAsyncExecutor() { + return new SameThreadExecutorService(); + } + + @Test + void testInvalid() { + setIsTestingAbruptCompletion(false); + setAsyncStep(true); + IllegalStateException illegalStateException = new IllegalStateException("must not cause second callback invocation"); + + assertThrows(IllegalStateException.class, () -> { + beginAsync().thenRun(c -> { + async(3, c); + throw illegalStateException; + }).finish((v, e) -> { + assertNotEquals(e, illegalStateException); + }); + }); + assertThrows(IllegalStateException.class, () -> { + beginAsync().thenRun(c -> { + async(3, c); + }).finish((v, e) -> { + throw illegalStateException; + }); + }); + } + + private static class SameThreadExecutorService extends AbstractExecutorService { + @Override + public void execute(@NonNull final Runnable command) { + command.run(); + } + + @Override + public void shutdown() { + } + + @NonNull + @Override + public List shutdownNow() { + return Collections.emptyList(); + } + + @Override + public boolean isShutdown() { + throw new UnsupportedOperationException(); + } + + @Override + public boolean isTerminated() { + throw new UnsupportedOperationException(); + } + + @Override + public boolean awaitTermination(final long timeout, @NonNull final TimeUnit unit) { + return true; + } + } +} diff --git a/driver-core/src/test/unit/com/mongodb/internal/async/SeparateThreadAsyncFunctionsTest.java b/driver-core/src/test/unit/com/mongodb/internal/async/SeparateThreadAsyncFunctionsTest.java new file mode 100644 index 00000000000..401c4d2c18e --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/internal/async/SeparateThreadAsyncFunctionsTest.java @@ -0,0 +1,118 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.async; + +import org.junit.jupiter.api.DisplayName; +import org.junit.jupiter.api.Test; + +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.atomic.AtomicBoolean; + +import static com.mongodb.internal.async.AsyncRunnable.beginAsync; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertTrue; + +@DisplayName("Separate thread async functions") +public class SeparateThreadAsyncFunctionsTest extends AsyncFunctionsAbstractTest { + + private UncaughtExceptionHandler uncaughtExceptionHandler; + + @Override + public ExecutorService createAsyncExecutor() { + uncaughtExceptionHandler = new UncaughtExceptionHandler(); + return Executors.newFixedThreadPool(1, r -> { + Thread thread = new Thread(r); + thread.setUncaughtExceptionHandler(uncaughtExceptionHandler); + return thread; + }); + } + + /** + * This test covers the scenario where a callback is erroneously invoked after a callback had been completed. + * Such behavior is considered a bug and is not expected. An AssertionError should be thrown if an asynchronous invocation + * attempts to use a callback that has already been marked as completed. + */ + @Test + void shouldPropagateAssertionErrorIfCallbackHasBeenCompletedAfterAsyncInvocation() { + //given + setIsTestingAbruptCompletion(false); + setAsyncStep(true); + IllegalStateException illegalStateException = new IllegalStateException("must not cause second callback invocation"); + AtomicBoolean callbackInvoked = new AtomicBoolean(false); + + //when + beginAsync().thenRun(c -> { + async(3, c); + throw illegalStateException; + }).thenRun(c -> { + assertInvokedOnce(callbackInvoked); + c.complete(c); + }) + .finish((v, e) -> { + assertEquals(illegalStateException, e); + } + ); + + //then + Throwable exception = uncaughtExceptionHandler.getException(); + assertNotNull(exception); + assertEquals(AssertionError.class, exception.getClass()); + assertEquals("Callback has been already completed. It could happen " + + "if code throws an exception after invoking an async method. Value: null", exception.getMessage()); + } + + @Test + void shouldPropagateUnexpectedExceptionFromFinishCallback() { + //given + setIsTestingAbruptCompletion(false); + setAsyncStep(true); + IllegalStateException illegalStateException = new IllegalStateException("must not cause second callback invocation"); + + //when + beginAsync().thenRun(c -> { + async(3, c); + }).finish((v, e) -> { + throw illegalStateException; + }); + + //then + Throwable exception = uncaughtExceptionHandler.getException(); + assertNotNull(exception); + assertEquals(illegalStateException, exception); + } + + private static void assertInvokedOnce(final AtomicBoolean callbackInvoked1) { + assertTrue(callbackInvoked1.compareAndSet(false, true)); + } + + private final class UncaughtExceptionHandler implements Thread.UncaughtExceptionHandler { + + private final CompletableFuture completable = new CompletableFuture<>(); + + @Override + public void uncaughtException(final Thread t, final Throwable e) { + completable.complete(e); + } + + public Throwable getException() { + return await(completable, "No exception was thrown"); + } + } +} diff --git a/driver-core/src/test/unit/com/mongodb/internal/async/function/LoopStateTest.java b/driver-core/src/test/unit/com/mongodb/internal/async/function/LoopStateTest.java new file mode 100644 index 00000000000..c9a8ada7c0c --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/internal/async/function/LoopStateTest.java @@ -0,0 +1,102 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.internal.async.function; + +import com.mongodb.client.syncadapter.SupplyingCallback; +import com.mongodb.internal.async.function.LoopState.AttachmentKey; +import com.mongodb.internal.operation.retry.AttachmentKeys; +import org.junit.jupiter.api.Test; + +import static org.junit.jupiter.api.Assertions.assertAll; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; + +final class LoopStateTest { + @Test + void iterationsAndAdvance() { + LoopState loopState = new LoopState(); + assertAll( + () -> assertTrue(loopState.isFirstIteration()), + () -> assertEquals(0, loopState.iteration()), + () -> assertFalse(loopState.isLastIteration()), + () -> assertTrue(loopState.advance()), + () -> assertFalse(loopState.isFirstIteration()), + () -> assertEquals(1, loopState.iteration()), + () -> assertFalse(loopState.isLastIteration()) + ); + loopState.markAsLastIteration(); + assertAll( + () -> assertFalse(loopState.isFirstIteration()), + () -> assertEquals(1, loopState.iteration()), + () -> assertTrue(loopState.isLastIteration()), + () -> assertFalse(loopState.advance()) + ); + } + + @Test + void maskAsLastIteration() { + LoopState loopState = new LoopState(); + loopState.markAsLastIteration(); + assertTrue(loopState.isLastIteration()); + assertFalse(loopState.advance()); + } + + @Test + void breakAndCompleteIfFalse() { + LoopState loopState = new LoopState(); + SupplyingCallback callback = new SupplyingCallback<>(); + assertFalse(loopState.breakAndCompleteIf(() -> false, callback)); + assertFalse(callback.completed()); + } + + @Test + void breakAndCompleteIfTrue() { + LoopState loopState = new LoopState(); + SupplyingCallback callback = new SupplyingCallback<>(); + assertTrue(loopState.breakAndCompleteIf(() -> true, callback)); + assertTrue(callback.completed()); + } + + @Test + void breakAndCompleteIfPredicateThrows() { + LoopState loopState = new LoopState(); + SupplyingCallback callback = new SupplyingCallback<>(); + RuntimeException e = new RuntimeException() { + }; + assertTrue(loopState.breakAndCompleteIf(() -> { + throw e; + }, callback)); + assertThrows(e.getClass(), callback::get); + } + + @Test + void attachAndAttachment() { + LoopState loopState = new LoopState(); + AttachmentKey attachmentKey = AttachmentKeys.maxWireVersion(); + int attachmentValue = 1; + assertFalse(loopState.attachment(attachmentKey).isPresent()); + loopState.attach(attachmentKey, attachmentValue, false); + assertEquals(attachmentValue, loopState.attachment(attachmentKey).get()); + loopState.advance(); + assertEquals(attachmentValue, loopState.attachment(attachmentKey).get()); + loopState.attach(attachmentKey, attachmentValue, true); + assertEquals(attachmentValue, loopState.attachment(attachmentKey).get()); + loopState.advance(); + assertFalse(loopState.attachment(attachmentKey).isPresent()); + } +} diff --git a/driver-core/src/test/unit/com/mongodb/internal/async/function/RetryStateTest.java b/driver-core/src/test/unit/com/mongodb/internal/async/function/RetryStateTest.java new file mode 100644 index 00000000000..970d87d33ed --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/internal/async/function/RetryStateTest.java @@ -0,0 +1,483 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.internal.async.function; + +import com.mongodb.MongoOperationTimeoutException; +import com.mongodb.client.syncadapter.SupplyingCallback; +import com.mongodb.internal.TimeoutContext; +import com.mongodb.internal.TimeoutSettings; +import com.mongodb.internal.async.function.LoopState.AttachmentKey; +import com.mongodb.internal.operation.retry.AttachmentKeys; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.DisplayName; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.Arguments; +import org.junit.jupiter.params.provider.MethodSource; + +import java.util.stream.Stream; + +import static org.junit.jupiter.api.Assertions.assertAll; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; +import static org.junit.jupiter.api.Named.named; +import static org.junit.jupiter.params.provider.Arguments.arguments; +import static org.mockito.Mockito.mock; + +final class RetryStateTest { + private static final TimeoutContext TIMEOUT_CONTEXT_NO_GLOBAL_TIMEOUT = new TimeoutContext(new TimeoutSettings(0L, 0L, + 0L, null, 0L)); + + private static final TimeoutContext TIMEOUT_CONTEXT_EXPIRED_GLOBAL_TIMEOUT = new TimeoutContext(new TimeoutSettings(0L, 0L, + 0L, 1L, 0L)); + + private static final TimeoutContext TIMEOUT_CONTEXT_INFINITE_GLOBAL_TIMEOUT = new TimeoutContext(new TimeoutSettings(0L, 0L, + 0L, 0L, 0L)); + private static final String EXPECTED_TIMEOUT_MESSAGE = "Retry attempt exceeded the timeout limit."; + + static Stream infiniteTimeout() { + return Stream.of( + arguments(named("Infinite timeoutMs", TIMEOUT_CONTEXT_INFINITE_GLOBAL_TIMEOUT)) + ); + } + + static Stream expiredTimeout() { + return Stream.of( + arguments(named("Expired timeoutMs", TIMEOUT_CONTEXT_EXPIRED_GLOBAL_TIMEOUT)) + ); + } + + static Stream noTimeout() { + return Stream.of( + arguments(named("No timeoutMs", TIMEOUT_CONTEXT_NO_GLOBAL_TIMEOUT)) + ); + } + + @ParameterizedTest + @MethodSource({"infiniteTimeout", "noTimeout"}) + void unlimitedAttemptsAndAdvance(final TimeoutContext timeoutContext) { + RetryState retryState = new RetryState(timeoutContext); + assertAll( + () -> assertTrue(retryState.isFirstAttempt()), + () -> assertEquals(0, retryState.attempt()), + () -> assertFalse(retryState.isLastAttempt()), + () -> assertEquals(0, retryState.attempts()) + ); + advance(retryState); + assertAll( + () -> assertFalse(retryState.isFirstAttempt()), + () -> assertEquals(1, retryState.attempt()), + () -> assertFalse(retryState.isLastAttempt()), + () -> assertEquals(0, retryState.attempts()) + ); + retryState.markAsLastAttempt(); + assertAll( + () -> assertFalse(retryState.isFirstAttempt()), + () -> assertEquals(1, retryState.attempt()), + () -> assertTrue(retryState.isLastAttempt()), + () -> assertEquals(0, retryState.attempts()) + ); + } + + @Test + void limitedAttemptsAndAdvance() { + RetryState retryState = RetryState.withNonRetryableState(); + RuntimeException attemptException = new RuntimeException() { + }; + assertAll( + () -> assertTrue(retryState.isFirstAttempt()), + () -> assertEquals(0, retryState.attempt()), + () -> assertTrue(retryState.isLastAttempt()), + () -> assertEquals(1, retryState.attempts()), + () -> assertThrows(attemptException.getClass(), () -> + retryState.advanceOrThrow(attemptException, (e1, e2) -> e2, (rs, e) -> true)), + // when there is only one attempt, it is both the first and the last one + () -> assertTrue(retryState.isFirstAttempt()), + () -> assertEquals(0, retryState.attempt()), + () -> assertTrue(retryState.isLastAttempt()), + () -> assertEquals(1, retryState.attempts()) + ); + } + + @ParameterizedTest + @MethodSource({"infiniteTimeout", "noTimeout"}) + void markAsLastAttemptAdvanceWithRuntimeException(final TimeoutContext timeoutContext) { + RetryState retryState = new RetryState(timeoutContext); + retryState.markAsLastAttempt(); + assertTrue(retryState.isLastAttempt()); + RuntimeException attemptException = new RuntimeException() { + }; + assertThrows(attemptException.getClass(), + () -> retryState.advanceOrThrow(attemptException, (e1, e2) -> e2, (rs, e) -> fail())); + } + + @ParameterizedTest(name = "should advance with non-retryable error when marked as last attempt and : ''{0}''") + @MethodSource({"infiniteTimeout", "expiredTimeout", "noTimeout"}) + void markAsLastAttemptAdvanceWithError(final TimeoutContext timeoutContext) { + RetryState retryState = new RetryState(timeoutContext); + retryState.markAsLastAttempt(); + assertTrue(retryState.isLastAttempt()); + Error attemptException = new Error() { + }; + assertThrows(attemptException.getClass(), + () -> retryState.advanceOrThrow(attemptException, (e1, e2) -> e2, (rs, e) -> fail())); + } + + @ParameterizedTest + @MethodSource({"infiniteTimeout", "noTimeout"}) + void breakAndThrowIfRetryAndFirstAttempt(final TimeoutContext timeoutContext) { + RetryState retryState = new RetryState(timeoutContext); + retryState.breakAndThrowIfRetryAnd(Assertions::fail); + assertFalse(retryState.isLastAttempt()); + } + + @ParameterizedTest + @MethodSource({"infiniteTimeout", "noTimeout"}) + void breakAndThrowIfRetryAndFalse(final TimeoutContext timeoutContext) { + RetryState retryState = new RetryState(timeoutContext); + advance(retryState); + retryState.breakAndThrowIfRetryAnd(() -> false); + assertFalse(retryState.isLastAttempt()); + } + + @ParameterizedTest + @MethodSource({"infiniteTimeout", "noTimeout"}) + void breakAndThrowIfRetryAndTrue() { + RetryState retryState = new RetryState(TIMEOUT_CONTEXT_NO_GLOBAL_TIMEOUT); + advance(retryState); + assertThrows(RuntimeException.class, () -> retryState.breakAndThrowIfRetryAnd(() -> true)); + assertTrue(retryState.isLastAttempt()); + } + + @Test + void breakAndThrowIfRetryAndTrueWithExpiredTimeout() { + TimeoutContext tContextMock = mock(TimeoutContext.class); + + RetryState retryState = new RetryState(tContextMock); + advance(retryState); + assertThrows(RuntimeException.class, () -> retryState.breakAndThrowIfRetryAnd(() -> true)); + assertTrue(retryState.isLastAttempt()); + } + + @ParameterizedTest + @MethodSource({"infiniteTimeout", "noTimeout"}) + void breakAndThrowIfRetryIfPredicateThrows(final TimeoutContext timeoutContext) { + RetryState retryState = new RetryState(timeoutContext); + advance(retryState); + RuntimeException e = new RuntimeException() { + }; + assertThrows(e.getClass(), () -> retryState.breakAndThrowIfRetryAnd(() -> { + throw e; + })); + assertFalse(retryState.isLastAttempt()); + } + + @ParameterizedTest + @MethodSource({"infiniteTimeout", "noTimeout"}) + void breakAndCompleteIfRetryAndFirstAttempt(final TimeoutContext timeoutContext) { + RetryState retryState = new RetryState(timeoutContext); + SupplyingCallback callback = new SupplyingCallback<>(); + assertFalse(retryState.breakAndCompleteIfRetryAnd(Assertions::fail, callback)); + assertFalse(callback.completed()); + assertFalse(retryState.isLastAttempt()); + } + + @ParameterizedTest + @MethodSource({"infiniteTimeout", "noTimeout"}) + void breakAndCompleteIfRetryAndFalse(final TimeoutContext timeoutContext) { + RetryState retryState = new RetryState(timeoutContext); + advance(retryState); + SupplyingCallback callback = new SupplyingCallback<>(); + assertFalse(retryState.breakAndCompleteIfRetryAnd(() -> false, callback)); + assertFalse(callback.completed()); + assertFalse(retryState.isLastAttempt()); + } + + @ParameterizedTest + @MethodSource({"infiniteTimeout", "noTimeout"}) + void breakAndCompleteIfRetryAndTrue(final TimeoutContext timeoutContext) { + RetryState retryState = new RetryState(timeoutContext); + advance(retryState); + SupplyingCallback callback = new SupplyingCallback<>(); + assertTrue(retryState.breakAndCompleteIfRetryAnd(() -> true, callback)); + assertThrows(RuntimeException.class, callback::get); + assertTrue(retryState.isLastAttempt()); + } + + @ParameterizedTest + @MethodSource({"infiniteTimeout", "noTimeout"}) + void breakAndCompleteIfRetryAndPredicateThrows(final TimeoutContext timeoutContext) { + RetryState retryState = new RetryState(timeoutContext); + advance(retryState); + Error e = new Error() { + }; + SupplyingCallback callback = new SupplyingCallback<>(); + assertTrue(retryState.breakAndCompleteIfRetryAnd(() -> { + throw e; + }, callback)); + assertThrows(e.getClass(), callback::get); + assertFalse(retryState.isLastAttempt()); + } + + @ParameterizedTest + @MethodSource({"infiniteTimeout", "noTimeout"}) + void advanceOrThrowPredicateFalse(final TimeoutContext timeoutContext) { + RetryState retryState = new RetryState(timeoutContext); + RuntimeException attemptException = new RuntimeException() { + }; + assertThrows(attemptException.getClass(), () -> retryState.advanceOrThrow(attemptException, (e1, e2) -> e2, (rs, e) -> false)); + } + + @ParameterizedTest + @MethodSource({"infiniteTimeout"}) + @DisplayName("should rethrow detected timeout exception even if timeout in retry state is not expired") + void advanceReThrowDetectedTimeoutExceptionEvenIfTimeoutInRetryStateIsNotExpired(final TimeoutContext timeoutContext) { + RetryState retryState = new RetryState(timeoutContext); + + MongoOperationTimeoutException expectedTimeoutException = TimeoutContext.createMongoTimeoutException("Server selection failed"); + MongoOperationTimeoutException actualTimeoutException = + assertThrows(expectedTimeoutException.getClass(), () -> retryState.advanceOrThrow(expectedTimeoutException, + (e1, e2) -> expectedTimeoutException, + (rs, e) -> false)); + + Assertions.assertEquals(actualTimeoutException, expectedTimeoutException); + } + + @Test + @DisplayName("should throw timeout exception from retry, when transformer swallows original timeout exception") + void advanceThrowTimeoutExceptionWhenTransformerSwallowOriginalTimeoutException() { + RetryState retryState = new RetryState(TIMEOUT_CONTEXT_INFINITE_GLOBAL_TIMEOUT); + RuntimeException previousAttemptException = new RuntimeException() { + }; + MongoOperationTimeoutException expectedTimeoutException = TimeoutContext.createMongoTimeoutException("Server selection failed"); + + retryState.advanceOrThrow(previousAttemptException, + (e1, e2) -> previousAttemptException, + (rs, e) -> true); + + MongoOperationTimeoutException actualTimeoutException = + assertThrows(expectedTimeoutException.getClass(), () -> retryState.advanceOrThrow(expectedTimeoutException, + (e1, e2) -> previousAttemptException, + (rs, e) -> false)); + + Assertions.assertNotEquals(actualTimeoutException, expectedTimeoutException); + Assertions.assertEquals(EXPECTED_TIMEOUT_MESSAGE, actualTimeoutException.getMessage()); + Assertions.assertEquals(previousAttemptException, actualTimeoutException.getCause(), + "Retry timeout exception should have a cause if transformer returned non-timeout exception."); + } + + + @Test + @DisplayName("should throw original timeout exception from retry, when transformer returns original timeout exception") + void advanceThrowOriginalTimeoutExceptionWhenTransformerReturnsOriginalTimeoutException() { + RetryState retryState = new RetryState(TIMEOUT_CONTEXT_INFINITE_GLOBAL_TIMEOUT); + RuntimeException previousAttemptException = new RuntimeException() { + }; + MongoOperationTimeoutException expectedTimeoutException = TimeoutContext + .createMongoTimeoutException("Server selection failed"); + + retryState.advanceOrThrow(previousAttemptException, + (e1, e2) -> previousAttemptException, + (rs, e) -> true); + + MongoOperationTimeoutException actualTimeoutException = + assertThrows(expectedTimeoutException.getClass(), () -> retryState.advanceOrThrow(expectedTimeoutException, + (e1, e2) -> expectedTimeoutException, + (rs, e) -> false)); + + Assertions.assertEquals(actualTimeoutException, expectedTimeoutException); + Assertions.assertNull(actualTimeoutException.getCause(), + "Original timeout exception should not have a cause if transformer already returned timeout exception."); + } + + @Test + void advanceOrThrowPredicateTrueAndLastAttempt() { + RetryState retryState = RetryState.withNonRetryableState(); + Error attemptException = new Error() { + }; + assertThrows(attemptException.getClass(), () -> retryState.advanceOrThrow(attemptException, (e1, e2) -> e2, (rs, e) -> true)); + } + + @ParameterizedTest + @MethodSource({"infiniteTimeout", "noTimeout"}) + void advanceOrThrowPredicateThrowsAfterFirstAttempt(final TimeoutContext timeoutContext) { + RetryState retryState = new RetryState(timeoutContext); + RuntimeException predicateException = new RuntimeException() { + }; + RuntimeException attemptException = new RuntimeException() { + }; + assertThrows(predicateException.getClass(), () -> retryState.advanceOrThrow(attemptException, (e1, e2) -> e2, (rs, e) -> { + assertTrue(rs.isFirstAttempt()); + assertEquals(attemptException, e); + throw predicateException; + })); + } + + @Test + void advanceOrThrowPredicateThrowsTimeoutAfterFirstAttempt() { + RetryState retryState = new RetryState(TIMEOUT_CONTEXT_EXPIRED_GLOBAL_TIMEOUT); + RuntimeException predicateException = new RuntimeException() { + }; + RuntimeException attemptException = new MongoOperationTimeoutException(EXPECTED_TIMEOUT_MESSAGE); + MongoOperationTimeoutException mongoOperationTimeoutException = assertThrows(MongoOperationTimeoutException.class, + () -> retryState.advanceOrThrow(attemptException, (e1, e2) -> e2, (rs, e) -> { + assertTrue(rs.isFirstAttempt()); + assertEquals(attemptException, e); + throw predicateException; + })); + + assertEquals(EXPECTED_TIMEOUT_MESSAGE, mongoOperationTimeoutException.getMessage()); + assertNull(mongoOperationTimeoutException.getCause()); + } + + @ParameterizedTest + @MethodSource({"infiniteTimeout", "noTimeout"}) + void advanceOrThrowPredicateThrows(final TimeoutContext timeoutContext) { + RetryState retryState = new RetryState(timeoutContext); + RuntimeException firstAttemptException = new RuntimeException() { + }; + retryState.advanceOrThrow(firstAttemptException, (e1, e2) -> e2, (rs, e) -> true); + RuntimeException secondAttemptException = new RuntimeException() { + }; + RuntimeException predicateException = new RuntimeException() { + }; + assertThrows(predicateException.getClass(), () -> retryState.advanceOrThrow(secondAttemptException, (e1, e2) -> e2, (rs, e) -> { + assertEquals(1, rs.attempt()); + assertEquals(secondAttemptException, e); + throw predicateException; + })); + } + + @ParameterizedTest + @MethodSource({"infiniteTimeout", "noTimeout", "expiredTimeout"}) + void advanceOrThrowTransformerThrowsAfterFirstAttempt(final TimeoutContext timeoutContext) { + RetryState retryState = new RetryState(timeoutContext); + RuntimeException transformerException = new RuntimeException() { + }; + assertThrows(transformerException.getClass(), () -> retryState.advanceOrThrow(new AssertionError(), + (e1, e2) -> { + throw transformerException; + }, + (rs, e) -> fail())); + } + + @ParameterizedTest + @MethodSource({"infiniteTimeout", "noTimeout"}) //TODO mock? + void advanceOrThrowTransformerThrows(final TimeoutContext timeoutContext) throws Throwable { + RetryState retryState = new RetryState(timeoutContext); + Error firstAttemptException = new Error() { + }; + retryState.advanceOrThrow(firstAttemptException, (e1, e2) -> e2, (rs, e) -> true); + RuntimeException transformerException = new RuntimeException() { + }; + assertThrows(transformerException.getClass(), () -> retryState.advanceOrThrow(new AssertionError(), + (e1, e2) -> { + throw transformerException; + }, + (rs, e) -> fail())); + } + + @ParameterizedTest + @MethodSource({"infiniteTimeout", "noTimeout"}) + void advanceOrThrowTransformAfterFirstAttempt(final TimeoutContext timeoutContext) { + RetryState retryState = new RetryState(timeoutContext); + RuntimeException attemptException = new RuntimeException() { + }; + RuntimeException transformerResult = new RuntimeException() { + }; + assertThrows(transformerResult.getClass(), () -> retryState.advanceOrThrow(attemptException, + (e1, e2) -> { + assertNull(e1); + assertEquals(attemptException, e2); + return transformerResult; + }, + (rs, e) -> { + assertEquals(attemptException, e); + return false; + })); + } + + @Test + void advanceOrThrowTransformThrowsTimeoutExceptionAfterFirstAttempt() { + RetryState retryState = new RetryState(TIMEOUT_CONTEXT_EXPIRED_GLOBAL_TIMEOUT); + + RuntimeException attemptException = new MongoOperationTimeoutException(EXPECTED_TIMEOUT_MESSAGE); + RuntimeException transformerResult = new RuntimeException(); + + MongoOperationTimeoutException mongoOperationTimeoutException = + assertThrows(MongoOperationTimeoutException.class, () -> retryState.advanceOrThrow(attemptException, + (e1, e2) -> { + assertNull(e1); + assertEquals(attemptException, e2); + return transformerResult; + }, + (rs, e) -> { + assertEquals(attemptException, e); + return false; + })); + + assertEquals(EXPECTED_TIMEOUT_MESSAGE, mongoOperationTimeoutException.getMessage()); + assertEquals(transformerResult, mongoOperationTimeoutException.getCause()); + } + + @ParameterizedTest + @MethodSource({"infiniteTimeout", "noTimeout"}) + void advanceOrThrowTransform(final TimeoutContext timeoutContext) { + RetryState retryState = new RetryState(timeoutContext); + RuntimeException firstAttemptException = new RuntimeException() { + }; + retryState.advanceOrThrow(firstAttemptException, (e1, e2) -> e2, (rs, e) -> true); + RuntimeException secondAttemptException = new RuntimeException() { + }; + RuntimeException transformerResult = new RuntimeException() { + }; + assertThrows(transformerResult.getClass(), () -> retryState.advanceOrThrow(secondAttemptException, + (e1, e2) -> { + assertEquals(firstAttemptException, e1); + assertEquals(secondAttemptException, e2); + return transformerResult; + }, + (rs, e) -> { + assertEquals(secondAttemptException, e); + return false; + })); + } + + @ParameterizedTest + @MethodSource({"infiniteTimeout", "noTimeout"}) + void attachAndAttachment(final TimeoutContext timeoutContext) { + RetryState retryState = new RetryState(timeoutContext); + AttachmentKey attachmentKey = AttachmentKeys.maxWireVersion(); + int attachmentValue = 1; + assertFalse(retryState.attachment(attachmentKey).isPresent()); + retryState.attach(attachmentKey, attachmentValue, false); + assertEquals(attachmentValue, retryState.attachment(attachmentKey).get()); + advance(retryState); + assertEquals(attachmentValue, retryState.attachment(attachmentKey).get()); + retryState.attach(attachmentKey, attachmentValue, true); + assertEquals(attachmentValue, retryState.attachment(attachmentKey).get()); + advance(retryState); + assertFalse(retryState.attachment(attachmentKey).isPresent()); + } + + private static void advance(final RetryState retryState) { + retryState.advanceOrThrow(new RuntimeException(), (e1, e2) -> e2, (rs, e) -> true); + } +} diff --git a/driver-core/src/test/unit/com/mongodb/internal/authentication/SaslPrepTest.java b/driver-core/src/test/unit/com/mongodb/internal/authentication/SaslPrepTest.java new file mode 100644 index 00000000000..0f48f05d7ef --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/internal/authentication/SaslPrepTest.java @@ -0,0 +1,88 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.authentication; + +import org.junit.Test; + +import static com.mongodb.internal.authentication.SaslPrep.saslPrepQuery; +import static com.mongodb.internal.authentication.SaslPrep.saslPrepStored; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.fail; + +public class SaslPrepTest { + + @Test + public void rfc4013Examples() { + // Taken from https://tools.ietf.org/html/rfc4013#section-3 + assertEquals("IX", saslPrepStored("I\u00ADX")); + assertEquals("user", saslPrepStored("user")); + assertEquals("user=", saslPrepStored("user=")); + assertEquals("USER", saslPrepStored("USER")); + assertEquals("a", saslPrepStored("\u00AA")); + assertEquals("IX", saslPrepStored("\u2168")); + try { + saslPrepStored("\u0007"); + fail("Should throw IllegalArgumentException"); + } catch (IllegalArgumentException e) { + assertEquals("Prohibited character at position 0", e.getMessage()); + } + try { + saslPrepStored("\u0627\u0031"); + fail("Should thow IllegalArgumentException"); + } catch (IllegalArgumentException e) { + assertEquals("First character is RandALCat, but last character is not", e.getMessage()); + } + } + + @Test + public void mappedToSpace() { + assertEquals("A B", saslPrepStored("A\u00A0B")); + } + + @Test + public void bidi2() { + // RandALCat character first *and* last is OK + assertEquals("\u0627\u0031\u0627", saslPrepStored("\u0627\u0031\u0627")); + // Both RandALCat character and LCat is not allowed + try { + saslPrepStored("\u0627\u0041\u0627"); + fail("Should thow IllegalArgumentException"); + } catch (IllegalArgumentException e) { + assertEquals("Contains both RandALCat characters and LCat characters", e.getMessage()); + } + } + + @Test + public void unassigned() { + int unassignedCodepoint; + for (unassignedCodepoint = Character.MAX_CODE_POINT; + unassignedCodepoint >= Character.MIN_CODE_POINT; + unassignedCodepoint--) { + if (!Character.isDefined(unassignedCodepoint) + && !SaslPrep.prohibited(unassignedCodepoint)) { + break; + } + } + String withUnassignedChar = "abc" + new String(Character.toChars(unassignedCodepoint)); + assertEquals(withUnassignedChar, saslPrepQuery(withUnassignedChar)); + try { + saslPrepStored(withUnassignedChar); + } catch (IllegalArgumentException e) { + assertEquals("Character at position 3 is unassigned", e.getMessage()); + } + } +} diff --git a/driver-core/src/test/unit/com/mongodb/internal/binding/SingleServerBindingSpecification.groovy b/driver-core/src/test/unit/com/mongodb/internal/binding/SingleServerBindingSpecification.groovy new file mode 100644 index 00000000000..824a724ee81 --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/internal/binding/SingleServerBindingSpecification.groovy @@ -0,0 +1,142 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.binding + +import com.mongodb.ReadPreference +import com.mongodb.ServerAddress +import com.mongodb.connection.ServerConnectionState +import com.mongodb.connection.ServerDescription +import com.mongodb.connection.ServerType +import com.mongodb.internal.connection.Cluster +import com.mongodb.internal.connection.Server +import com.mongodb.internal.connection.ServerTuple +import spock.lang.Specification + +import static com.mongodb.ClusterFixture.OPERATION_CONTEXT + +class SingleServerBindingSpecification extends Specification { + + def 'should implement getters'() { + given: + def cluster = Mock(Cluster) { + selectServer(_, _) >> new ServerTuple(Mock(Server), + ServerDescription.builder() + .type(ServerType.STANDALONE) + .state(ServerConnectionState.CONNECTED) + .address(new ServerAddress()) + .build()) + } + def address = new ServerAddress() + def operationContext = OPERATION_CONTEXT + + when: + + def binding = new SingleServerBinding(cluster, address, operationContext) + + then: + binding.readPreference == ReadPreference.primary() + binding.getOperationContext() == operationContext + + + when: + def source = binding.getReadConnectionSource() + + then: + source.getOperationContext() == operationContext + } + + def 'should increment and decrement reference counts'() { + given: + def cluster = Mock(Cluster) { + selectServer(_, _) >> new ServerTuple(Mock(Server), + ServerDescription.builder() + .type(ServerType.STANDALONE) + .state(ServerConnectionState.CONNECTED) + .address(new ServerAddress()) + .build()) + } + def address = new ServerAddress() + + when: + def binding = new SingleServerBinding(cluster, address, OPERATION_CONTEXT) + + then: + binding.count == 1 + + when: + def source = binding.getReadConnectionSource() + + then: + source.count == 1 + binding.count == 2 + + when: + source.retain() + + then: + source.count == 2 + binding.count == 2 + + when: + source.release() + + then: + source.count == 1 + binding.count == 2 + + when: + source.release() + + then: + source.count == 0 + binding.count == 1 + + when: + source = binding.getWriteConnectionSource() + + then: + source.count == 1 + binding.count == 2 + + when: + source.retain() + + then: + source.count == 2 + binding.count == 2 + + when: + source.release() + + then: + source.count == 1 + binding.count == 2 + + when: + source.release() + + then: + source.count == 0 + binding.count == 1 + + when: + binding.release() + + then: + binding.count == 0 + } +} diff --git a/driver-core/src/test/unit/com/mongodb/internal/client/model/AbstractConstructibleBsonElementTest.java b/driver-core/src/test/unit/com/mongodb/internal/client/model/AbstractConstructibleBsonElementTest.java new file mode 100644 index 00000000000..7eda8e22441 --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/internal/client/model/AbstractConstructibleBsonElementTest.java @@ -0,0 +1,125 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.internal.client.model; + +import org.bson.BsonDocument; +import org.bson.BsonString; +import org.bson.Document; +import org.bson.conversions.Bson; +import org.junit.jupiter.api.Test; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertSame; + +final class AbstractConstructibleBsonElementTest { + @Test + void of() { + BsonDocument value = new BsonDocument("n", new BsonString("v")); + AbstractConstructibleBsonElement constructible = AbstractConstructibleBsonElement.of( + new BsonDocument("name", value)); + assertUnmodifiable(constructible); + assertEquals(new BsonDocument("name", value), constructible.toBsonDocument()); + } + + @Test + void ofPreventsDoubleWrapping() { + BsonDocument value = new BsonDocument("n", new BsonString("v")); + AbstractConstructibleBsonElement constructible = AbstractConstructibleBsonElement.of( + new BsonDocument("name", value)); + assertUnmodifiable(constructible); + AbstractConstructibleBsonElement constructible2 = AbstractConstructibleBsonElement.of(constructible); + assertUnmodifiable(constructible2); + assertSame(constructible, constructible2); + } + + @Test + void nameConstructor() { + final class Constructible extends AbstractConstructibleBsonElement { + private Constructible(final String name) { + super(name); + } + + private Constructible(final Bson baseElement, final Bson appendedElementValue) { + super(baseElement, appendedElementValue); + } + + @Override + protected Constructible newSelf(final Bson baseElement, final Bson appendedElementValue) { + return new Constructible(baseElement, appendedElementValue); + } + } + AbstractConstructibleBsonElement constructible = new Constructible("name"); + assertUnmodifiable(constructible); + assertEquals(new BsonDocument("name", new BsonDocument()), constructible.toBsonDocument()); + } + + @Test + void nameValueConstructor() { + final class Constructible extends AbstractConstructibleBsonElement { + private Constructible(final String name, final Bson value) { + super(name, value); + } + + private Constructible(final Bson baseElement, final Bson appendedElementValue) { + super(baseElement, appendedElementValue); + } + + @Override + protected Constructible newSelf(final Bson baseElement, final Bson appendedElementValue) { + return new Constructible(baseElement, appendedElementValue); + } + } + BsonDocument value = new BsonDocument("n", new BsonString("v")); + AbstractConstructibleBsonElement constructible = new Constructible("name", value); + assertUnmodifiable(constructible); + assertEquals(new BsonDocument("name", value), constructible.toBsonDocument()); + } + + @Test + void newWithAppendedValue() { + AbstractConstructibleBsonElement constructible = AbstractConstructibleBsonElement.of( + new BsonDocument("name", new BsonDocument("n", new BsonString("v")))); + assertUnmodifiable(constructible); + AbstractConstructibleBsonElement appendedConstructible = constructible.newWithAppendedValue("n2", "v2"); + assertUnmodifiable(appendedConstructible); + assertEquals( + new BsonDocument("name", new BsonDocument("n", new BsonString("v")).append("n2", new BsonString("v2"))), + appendedConstructible.toBsonDocument()); + } + + @Test + void tostring() { + assertEquals( + new Document("name", + new Document("double", 0.5) + .append("doc", new Document("i", 42)) + .append("constructible", new Document("s", ""))) + .toString(), + AbstractConstructibleBsonElement.of(new Document("name", + AbstractConstructibleBson.of(new Document("double", 0.5)) + .newAppended("doc", new Document("i", 42)))) + .newWithAppendedValue("constructible", AbstractConstructibleBson.of(AbstractConstructibleBson.of(new Document("s", "")))) + .toString()); + } + + private static void assertUnmodifiable(final AbstractConstructibleBsonElement constructible) { + String expected = constructible.toBsonDocument().toJson(); + constructible.newWithAppendedValue("assertUnmodifiableN", "assertUnmodifiableV"); + assertEquals(expected, constructible.toBsonDocument().toJson()); + constructible.newWithMutatedValue(doc -> doc.append("assertUnmodifiableN", "assertUnmodifiableV")); + assertEquals(expected, constructible.toBsonDocument().toJson()); + } +} diff --git a/driver-core/src/test/unit/com/mongodb/internal/client/model/AbstractConstructibleBsonTest.java b/driver-core/src/test/unit/com/mongodb/internal/client/model/AbstractConstructibleBsonTest.java new file mode 100644 index 00000000000..607b76d1aed --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/internal/client/model/AbstractConstructibleBsonTest.java @@ -0,0 +1,123 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.internal.client.model; + +import org.bson.BsonArray; +import org.bson.BsonDocument; +import org.bson.BsonString; +import org.bson.Document; +import org.bson.conversions.Bson; +import org.junit.jupiter.api.Test; + +import static java.util.Arrays.asList; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertSame; + +final class AbstractConstructibleBsonTest { + @Test + void of() { + BsonDocument doc = new BsonDocument("name", new BsonString("value")); + AbstractConstructibleBson constructible = AbstractConstructibleBson.of(doc); + assertUnmodifiable(constructible); + assertEquals(doc, constructible.toBsonDocument()); + } + + @Test + void ofPreventsDoubleWrapping() { + BsonDocument doc = new BsonDocument("name", new BsonString("value")); + AbstractConstructibleBson constructible = AbstractConstructibleBson.of(doc); + assertUnmodifiable(constructible); + AbstractConstructibleBson constructible2 = AbstractConstructibleBson.of(constructible); + assertUnmodifiable(constructible2); + assertSame(constructible, constructible2); + } + + @Test + void newAppended() { + AbstractConstructibleBson constructible = AbstractConstructibleBson.of(new BsonDocument("name", new BsonString("value"))); + assertUnmodifiable(constructible); + AbstractConstructibleBson appendedConstructible = constructible.newAppended("name2", "value2"); + assertUnmodifiable(appendedConstructible); + assertEquals( + new BsonDocument("name", new BsonString("value")).append("name2", new BsonString("value2")), + appendedConstructible.toBsonDocument()); + } + + @Test + void emptyIsImmutable() { + assertImmutable(AbstractConstructibleBson.EMPTY_IMMUTABLE); + BsonDocument doc = new BsonDocument(); + assertImmutable(AbstractConstructibleBson.of(doc)); + assertEquals(new BsonDocument(), doc); + } + + @Test + void appendedCannotBeMutatedViaToBsonDocument() { + appendedCannotBeMutatedViaToBsonDocument(new Document()); + appendedCannotBeMutatedViaToBsonDocument(new Document("appendedName", "appendedValue")); + } + + @Test + void tostring() { + assertEquals( + new Document( + "array", new BsonArray(asList(new BsonString("e1"), new BsonString("e2")))) + .append("double", 0.5) + .append("doc", new Document("i", 42)) + .append("constructible", new Document("s", "")) + .toString(), + AbstractConstructibleBson.of( + new BsonDocument("array", new BsonArray(asList(new BsonString("e1"), new BsonString("e2"))))) + .newAppended("double", 0.5) + .newAppended("doc", new Document("i", 42)) + .newAppended("constructible", AbstractConstructibleBson.of(AbstractConstructibleBson.of(new Document("s", "")))) + .toString()); + } + + private static void appendedCannotBeMutatedViaToBsonDocument(final Document appended) { + String expected = appended.toBsonDocument().toJson(); + final class Constructible extends AbstractConstructibleBson { + private Constructible(final Bson base, final Document appended) { + super(base, appended); + } + + @Override + protected Constructible newSelf(final Bson base, final Document appended) { + return new Constructible(base, appended); + } + } + AbstractConstructibleBson constructible = new Constructible(new Document("name", "value"), appended); + // here we modify the document produced by `toBsonDocument` and check that it does not affect `appended` + constructible.toBsonDocument().append("name2", new BsonString("value2")); + assertEquals(expected, appended.toBsonDocument().toJson()); + } + + private static void assertImmutable(final AbstractConstructibleBson constructible) { + String expected = constructible.toBsonDocument().toJson(); + assertUnmodifiable(constructible); + // here we modify the document produced by `toBsonDocument` and check that it does not affect `constructible` + constructible.toBsonDocument().append("assertImmutableName", new BsonString("assertImmutableValue")); + assertEquals(expected, constructible.toBsonDocument().toJson()); + } + + private static void assertUnmodifiable(final AbstractConstructibleBson constructible) { + String expected = constructible.toBsonDocument().toJson(); + constructible.newAppended("assertUnmodifiableName", "assertUnmodifiableValue"); + assertEquals(expected, constructible.toBsonDocument().toJson()); + constructible.newMutated(doc -> doc.append("assertUnmodifiableName", "assertUnmodifiableValue")); + assertEquals(expected, constructible.toBsonDocument().toJson()); + } +} diff --git a/driver-core/src/test/unit/com/mongodb/internal/connection/AbstractConnectionPoolTest.java b/driver-core/src/test/unit/com/mongodb/internal/connection/AbstractConnectionPoolTest.java new file mode 100644 index 00000000000..92e224df835 --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/internal/connection/AbstractConnectionPoolTest.java @@ -0,0 +1,675 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection; + +import com.mongodb.ClusterFixture; +import com.mongodb.JsonTestServerVersionChecker; +import com.mongodb.LoggerSettings; +import com.mongodb.MongoDriverInformation; +import com.mongodb.MongoTimeoutException; +import com.mongodb.ServerAddress; +import com.mongodb.connection.ClusterConnectionMode; +import com.mongodb.connection.ClusterId; +import com.mongodb.connection.ConnectionId; +import com.mongodb.connection.ConnectionPoolSettings; +import com.mongodb.connection.ServerId; +import com.mongodb.connection.SocketSettings; +import com.mongodb.connection.SslSettings; +import com.mongodb.event.ConnectionCheckOutFailedEvent; +import com.mongodb.event.ConnectionCheckOutStartedEvent; +import com.mongodb.event.ConnectionCheckedInEvent; +import com.mongodb.event.ConnectionCheckedOutEvent; +import com.mongodb.event.ConnectionClosedEvent; +import com.mongodb.event.ConnectionCreatedEvent; +import com.mongodb.event.ConnectionPoolClearedEvent; +import com.mongodb.event.ConnectionPoolClosedEvent; +import com.mongodb.event.ConnectionPoolCreatedEvent; +import com.mongodb.event.ConnectionPoolReadyEvent; +import com.mongodb.event.ConnectionReadyEvent; +import com.mongodb.event.ServerListener; +import com.mongodb.internal.async.SingleResultCallback; +import com.mongodb.internal.inject.SameObjectProvider; +import com.mongodb.internal.operation.CommandReadOperation; +import com.mongodb.lang.Nullable; +import org.bson.BsonArray; +import org.bson.BsonDocument; +import org.bson.BsonInt64; +import org.bson.BsonString; +import org.bson.BsonValue; +import org.bson.codecs.BsonDocumentCodec; +import org.bson.types.ObjectId; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import org.mockito.Mockito; +import util.JsonPoweredTestHelper; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.Callable; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.Future; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicLong; +import java.util.stream.Collectors; + +import static com.mongodb.ClusterFixture.OPERATION_CONTEXT_FACTORY; +import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS; +import static com.mongodb.assertions.Assertions.assertFalse; +import static com.mongodb.internal.thread.InterruptionUtil.interruptAndCreateMongoInterruptedException; +import static java.lang.String.format; +import static java.util.Arrays.asList; +import static java.util.concurrent.TimeUnit.MILLISECONDS; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; +import static org.junit.Assume.assumeFalse; +import static org.junit.Assume.assumeNotNull; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.mock; + +// Implementation of +// https://github.com/mongodb/specifications/blob/master/source/connection-monitoring-and-pooling/connection-monitoring-and-pooling.md +// specification tests +@SuppressWarnings("deprecation") +@RunWith(Parameterized.class) +public abstract class AbstractConnectionPoolTest { + private static final int ANY_INT = 42; + private static final String ANY_STRING = "42"; + private static final Set PRESTART_POOL_ASYNC_WORK_MANAGER_FILE_NAMES = Collections.singleton("wait-queue-timeout.json"); + + private final String fileName; + private final String description; + private final BsonDocument definition; + private final boolean skipTest; + private ConnectionPoolSettings settings; + private final Map executorServiceMap = new HashMap<>(); + private final Map> futureMap = new HashMap<>(); + + private TestConnectionPoolListener listener; + @Nullable + private BsonDocument configureFailPointCommand; + + private final Map connectionMap = new HashMap<>(); + private ConnectionPool pool; + + public AbstractConnectionPoolTest( + final String fileName, final String description, final BsonDocument definition, final boolean skipTest) { + this.fileName = fileName; + this.description = description; + this.definition = definition; + this.skipTest = skipTest; + + // Driver does not support interruptInUseConnections option See: JAVA-4536 + assumeFalse(fileName.equals("pool-clear-schedule-run-interruptInUseConnections-false.json")); + assumeFalse(fileName.equals("pool-clear-interrupting-pending-connections.json")); + + // Events out of order - the driver closes connection first then clears the pool. See: JAVA-5664 + assumeFalse(fileName.equals("pool-create-min-size-error.json")); + } + + @Before + public void setUp() { + assumeFalse(skipTest); + ConnectionPoolSettings.Builder settingsBuilder = ConnectionPoolSettings.builder(); + BsonDocument poolOptions = definition.getDocument("poolOptions", new BsonDocument()); + + if (poolOptions.containsKey("maxPoolSize")) { + settingsBuilder.maxSize(poolOptions.getNumber("maxPoolSize").intValue()); + } + if (poolOptions.containsKey("minPoolSize")) { + settingsBuilder.minSize(poolOptions.getNumber("minPoolSize").intValue()); + } + if (poolOptions.containsKey("maxIdleTimeMS")) { + settingsBuilder.maxConnectionIdleTime(poolOptions.getNumber("maxIdleTimeMS").intValue(), MILLISECONDS); + } + if (poolOptions.containsKey("waitQueueTimeoutMS")) { + settingsBuilder.maxWaitTime(poolOptions.getNumber("waitQueueTimeoutMS").intValue(), MILLISECONDS); + } + if (poolOptions.containsKey("backgroundThreadIntervalMS")) { + long intervalMillis = poolOptions.getNumber("backgroundThreadIntervalMS").longValue(); + assertFalse(intervalMillis == 0); + if (intervalMillis < 0) { + settingsBuilder.maintenanceInitialDelay(Long.MAX_VALUE, MILLISECONDS); + } else { + /* Using frequency/period instead of an interval as required by the specification is incorrect, for example, + * because it opens up a possibility to run the background thread non-stop if runs are as long as or longer than the period. + * Nevertheless, I am reusing what we already have in the driver instead of clogging up the implementation. */ + settingsBuilder.maintenanceFrequency( + poolOptions.getNumber("backgroundThreadIntervalMS").longValue(), MILLISECONDS); + } + } + if (poolOptions.containsKey("maxConnecting")) { + settingsBuilder.maxConnecting(poolOptions.getInt32("maxConnecting").intValue()); + } + + listener = new TestConnectionPoolListener(); + settingsBuilder.addConnectionPoolListener(listener); + settings = settingsBuilder.build(); + InternalConnectionPoolSettings internalSettings = InternalConnectionPoolSettings.builder() + .prestartAsyncWorkManager(PRESTART_POOL_ASYNC_WORK_MANAGER_FILE_NAMES.contains(fileName)) + .build(); + Style style = Style.of(definition.getString("style").getValue()); + switch (style) { + case UNIT: { + ServerId serverId = new ServerId(new ClusterId(), new ServerAddress("host1")); + pool = new DefaultConnectionPool(serverId, new TestInternalConnectionFactory(), settings, internalSettings, + SameObjectProvider.initialized(mock(SdamServerDescriptionManager.class)), OPERATION_CONTEXT_FACTORY); + break; + } + case INTEGRATION: { + ServerId serverId = new ServerId(new ClusterId(), ClusterFixture.getPrimary()); + ClusterConnectionMode connectionMode = ClusterConnectionMode.MULTIPLE; + SameObjectProvider sdamProvider = SameObjectProvider.uninitialized(); + pool = new ConnectionIdAdjustingConnectionPool(new DefaultConnectionPool(serverId, + new InternalStreamConnectionFactory( + connectionMode, + createStreamFactory(SocketSettings.builder().build(), + ClusterFixture.getSslSettings()), + ClusterFixture.getCredentialWithCache(), + new ClientMetadata(poolOptions.getString("appName", new BsonString(fileName + ": " + description)).getValue(), + MongoDriverInformation.builder().build()), + Collections.emptyList(), + LoggerSettings.builder().build(), + new TestCommandListener(), + ClusterFixture.getServerApi() + ), + settings, internalSettings, sdamProvider, OPERATION_CONTEXT_FACTORY)); + sdamProvider.initialize(new DefaultSdamServerDescriptionManager(mockedCluster(), serverId, mock(ServerListener.class), + mock(ServerMonitor.class), pool, connectionMode)); + setFailPoint(); + break; + } + default: { + throw new AssertionError(format("Style %s is not implemented", style)); + } + } + if (internalSettings.isPrestartAsyncWorkManager()) { + waitForPoolAsyncWorkManagerStart(); + } + } + + @After + @SuppressWarnings("try") + public void tearDown() { + try (ConnectionPool unused = pool) { + disableFailPoint(); + } finally { + for (ExecutorService cur : executorServiceMap.values()) { + cur.shutdownNow(); + } + } + } + + @Test + public void shouldPassAllOutcomes() throws Exception { + try { + for (BsonValue cur : definition.getArray("operations")) { + BsonDocument operation = cur.asDocument(); + String name = operation.getString("name").getValue(); + + if (name.equals("start")) { + String target = operation.getString("target", new BsonString("")).getValue(); + executorServiceMap.put(target, Executors.newSingleThreadExecutor(r -> { + Thread result = Executors.defaultThreadFactory().newThread(r); + result.setName(target); + return result; + })); + } else if (name.equals("wait")) { + Thread.sleep(operation.getNumber("ms").intValue()); + } else if (name.equals("waitForThread")) { + String target = operation.getString("target", new BsonString("")).getValue(); + Exception exceptionFromFuture = futureMap.remove(target).get(5, TimeUnit.SECONDS); + if (exceptionFromFuture != null) { + throw exceptionFromFuture; + } + } else if (name.equals("waitForEvent")) { + Class eventClass = getEventClass(operation.getString("event").getValue()); + assumeNotNull(eventClass); + long timeoutMillis = operation.getNumber("timeout", new BsonInt64(TimeUnit.SECONDS.toMillis(5))) + .longValue(); + listener.waitForEvent(eventClass, operation.getNumber("count").intValue(), timeoutMillis, MILLISECONDS); + } else if (name.equals("clear")) { + pool.invalidate(null); + } else if (name.equals("ready")) { + pool.ready(); + } else if (name.equals("close")) { + pool.close(); + } else if (name.equals("checkOut") || name.equals("checkIn")) { + Callable callable = createCallable(operation); + if (operation.containsKey("thread")) { + String threadTarget = operation.getString("thread").getValue(); + ExecutorService executorService = executorServiceMap.get(threadTarget); + futureMap.put(threadTarget, executorService.submit(callable)); + } else { + callable.call(); + } + } else { + throw new UnsupportedOperationException("No support for " + name); + } + } + } catch (Exception e) { + if (!definition.containsKey("error")) { + throw e; + } + BsonDocument errorDocument = definition.getDocument("error"); + String exceptionType = errorDocument.getString("type").getValue(); + if (exceptionType.equals("PoolClosedError")) { + assertEquals(IllegalStateException.class, e.getClass()); + } else if (exceptionType.equals("WaitQueueTimeoutError")) { + if (e.getClass() != MongoTimeoutException.class) { + throw e; + } + } else { + throw e; + } + } + + if (definition.containsKey("events")) { + Iterator actualEventsIterator = getNonIgnoredActualEvents().iterator(); + BsonArray expectedEvents = definition.getArray("events"); + for (BsonValue cur : expectedEvents) { + BsonDocument expectedEvent = cur.asDocument(); + String type = expectedEvent.getString("type").getValue(); + if (type.equals("ConnectionPoolCreated")) { + assertHasOnlySupportedKeys(expectedEvent, "type", "address", "options"); + ConnectionPoolCreatedEvent actualEvent = getNextEvent(actualEventsIterator, ConnectionPoolCreatedEvent.class); + assertAddressMatch(expectedEvent, actualEvent.getServerId().getAddress()); + assertEquals(settings, actualEvent.getSettings()); + } else if (type.equals("ConnectionPoolCleared")) { + assertHasOnlySupportedKeys(expectedEvent, "type", "address"); + ConnectionPoolClearedEvent actualEvent = getNextEvent(actualEventsIterator, ConnectionPoolClearedEvent.class); + assertAddressMatch(expectedEvent, actualEvent.getServerId().getAddress()); + } else if (type.equals("ConnectionPoolReady")) { + assertHasOnlySupportedKeys(expectedEvent, "type", "address"); + ConnectionPoolReadyEvent actualEvent = getNextEvent(actualEventsIterator, ConnectionPoolReadyEvent.class); + assertAddressMatch(expectedEvent, actualEvent.getServerId().getAddress()); + } else if (type.equals("ConnectionPoolClosed")) { + assertHasOnlySupportedKeys(expectedEvent, "type", "address"); + ConnectionPoolClosedEvent actualEvent = getNextEvent(actualEventsIterator, ConnectionPoolClosedEvent.class); + assertAddressMatch(expectedEvent, actualEvent.getServerId().getAddress()); + } else if (type.equals("ConnectionCreated")) { + assertHasOnlySupportedKeys(expectedEvent, "type", "address", "connectionId"); + ConnectionCreatedEvent actualEvent = getNextEvent(actualEventsIterator, ConnectionCreatedEvent.class); + assertAddressMatch(expectedEvent, actualEvent.getConnectionId().getServerId().getAddress()); + assertConnectionIdMatch(expectedEvent, actualEvent.getConnectionId()); + } else if (type.equals("ConnectionReady")) { + assertHasOnlySupportedKeys(expectedEvent, "type", "address", "connectionId", "duration"); + ConnectionReadyEvent actualEvent = getNextEvent(actualEventsIterator, ConnectionReadyEvent.class); + assertAddressMatch(expectedEvent, actualEvent.getConnectionId().getServerId().getAddress()); + assertConnectionIdMatch(expectedEvent, actualEvent.getConnectionId()); + assertDurationMatch(expectedEvent, actualEvent); + } else if (type.equals("ConnectionClosed")) { + assertHasOnlySupportedKeys(expectedEvent, "type", "address", "connectionId", "reason"); + ConnectionClosedEvent actualEvent = getNextEvent(actualEventsIterator, ConnectionClosedEvent.class); + assertAddressMatch(expectedEvent, actualEvent.getConnectionId().getServerId().getAddress()); + assertConnectionIdMatch(expectedEvent, actualEvent.getConnectionId()); + assertReasonMatch(expectedEvent, actualEvent); + } else if (type.equals("ConnectionCheckOutStarted")) { + assertHasOnlySupportedKeys(expectedEvent, "type", "address"); + ConnectionCheckOutStartedEvent actualEvent = getNextEvent(actualEventsIterator, ConnectionCheckOutStartedEvent.class); + assertAddressMatch(expectedEvent, actualEvent.getServerId().getAddress()); + } else if (type.equals("ConnectionCheckOutFailed")) { + assertHasOnlySupportedKeys(expectedEvent, "type", "address", "reason", "duration"); + ConnectionCheckOutFailedEvent actualEvent = getNextEvent(actualEventsIterator, ConnectionCheckOutFailedEvent.class); + assertAddressMatch(expectedEvent, actualEvent.getServerId().getAddress()); + assertReasonMatch(expectedEvent, actualEvent); + assertDurationMatch(expectedEvent, actualEvent); + } else if (type.equals("ConnectionCheckedOut")) { + assertHasOnlySupportedKeys(expectedEvent, "type", "address", "connectionId", "duration"); + ConnectionCheckedOutEvent actualEvent = getNextEvent(actualEventsIterator, ConnectionCheckedOutEvent.class); + assertAddressMatch(expectedEvent, actualEvent.getConnectionId().getServerId().getAddress()); + assertConnectionIdMatch(expectedEvent, actualEvent.getConnectionId()); + assertDurationMatch(expectedEvent, actualEvent); + } else if (type.equals("ConnectionCheckedIn")) { + assertHasOnlySupportedKeys(expectedEvent, "type", "address", "connectionId"); + ConnectionCheckedInEvent actualEvent = getNextEvent(actualEventsIterator, ConnectionCheckedInEvent.class); + assertAddressMatch(expectedEvent, actualEvent.getConnectionId().getServerId().getAddress()); + assertConnectionIdMatch(expectedEvent, actualEvent.getConnectionId()); + } else { + throw new UnsupportedOperationException("Unsupported event type " + type); + } + } + } + } + + private static void assertHasOnlySupportedKeys(final BsonDocument document, final String... supportedKeys) { + List supportedKeysList = asList(supportedKeys); + List unsupportedKeys = document.keySet().stream() + .filter(key -> !supportedKeysList.contains(key)) + .collect(Collectors.toList()); + if (!unsupportedKeys.isEmpty()) { + fail(format("The runner encountered not yet supported keys %s in %s", unsupportedKeys, document)); + } + } + + private void assertReasonMatch(final BsonDocument expectedEvent, final ConnectionClosedEvent connectionClosedEvent) { + if (!expectedEvent.containsKey("reason")) { + return; + } + + String expectedReason = expectedEvent.getString("reason").getValue(); + switch (connectionClosedEvent.getReason()) { + case STALE: + assertEquals(expectedReason, "stale"); + break; + case IDLE: + assertEquals(expectedReason, "idle"); + break; + case ERROR: + assertEquals(expectedReason, "error"); + break; + case POOL_CLOSED: + assertEquals(expectedReason, "poolClosed"); + break; + default: + fail("Unexpected reason to close connection " + connectionClosedEvent.getReason()); + } + } + + protected OperationContext createOperationContext() { + return ClusterFixture.createOperationContext(TIMEOUT_SETTINGS.withMaxWaitTimeMS(settings.getMaxWaitTime(MILLISECONDS))); + } + + private void assertReasonMatch(final BsonDocument expectedEvent, final ConnectionCheckOutFailedEvent connectionCheckOutFailedEvent) { + if (!expectedEvent.containsKey("reason")) { + return; + } + + String expectedReason = expectedEvent.getString("reason").getValue(); + switch (connectionCheckOutFailedEvent.getReason()) { + case TIMEOUT: + assertEquals(expectedReason, "timeout"); + break; + case CONNECTION_ERROR: + assertEquals(expectedReason, "connectionError"); + break; + case POOL_CLOSED: + assertEquals(expectedReason, "poolClosed"); + break; + default: + fail("Unexpected reason to fail connection check out " + connectionCheckOutFailedEvent.getReason()); + } + } + + private static void assertAddressMatch(final BsonDocument expectedEvent, final ServerAddress actualAddress) { + String addressKey = "address"; + if (expectedEvent.isString(addressKey)) { + String expectedAddress = expectedEvent.getString(addressKey).getValue(); + if (!expectedAddress.equals(ANY_STRING)) { + assertEquals(format("Address does not match (expected event is %s)", expectedEvent), + new ServerAddress(expectedAddress), actualAddress); + } + } else if (expectedEvent.containsKey(addressKey)) { + assertEquals("Unsupported value", ANY_INT, expectedEvent.getInt32(addressKey).intValue()); + } + } + + private void assertConnectionIdMatch(final BsonDocument expectedEvent, final ConnectionId actualConnectionId) { + long actualConnectionIdLocalValue = actualConnectionId.getLocalValue(); + long adjustedConnectionIdLocalValue = adjustedConnectionIdLocalValue(actualConnectionIdLocalValue); + String connectionIdKey = "connectionId"; + if (expectedEvent.containsKey(connectionIdKey)) { + int expectedConnectionId = expectedEvent.getInt32(connectionIdKey).intValue(); + if (expectedConnectionId != ANY_INT) { + assertEquals(format( + "Connection id does not match (expected event is %s; actual local value before adjustment is %s)", + expectedEvent, actualConnectionIdLocalValue), + expectedConnectionId, adjustedConnectionIdLocalValue); + } + } + } + + private static void assertDurationMatch(final BsonDocument expectedEvent, final ConnectionReadyEvent actualEvent) { + assertDurationMatch(expectedEvent, actualEvent.getElapsedTime(TimeUnit.MILLISECONDS)); + } + + private static void assertDurationMatch(final BsonDocument expectedEvent, final ConnectionCheckOutFailedEvent actualEvent) { + assertDurationMatch(expectedEvent, actualEvent.getElapsedTime(TimeUnit.MILLISECONDS)); + } + + private static void assertDurationMatch(final BsonDocument expectedEvent, final ConnectionCheckedOutEvent actualEvent) { + assertDurationMatch(expectedEvent, actualEvent.getElapsedTime(TimeUnit.MILLISECONDS)); + } + + private static void assertDurationMatch(final BsonDocument expectedEvent, final long actualDurationMillis) { + String durationKey = "duration"; + if (expectedEvent.isNumber(durationKey)) { + assertTrue("actualDurationMillis must not be negative", actualDurationMillis >= 0); + long expectedDurationMillis = expectedEvent.getNumber(durationKey).longValue(); + if (expectedDurationMillis != ANY_INT) { + fail(format("Unsupported duration value %d. Pay attention to the expected value unit when supporting the value", + expectedDurationMillis)); + } + } else if (expectedEvent.containsKey(durationKey)) { + fail(format("Unsupported value %s", expectedEvent.get(durationKey))); + } + } + + private long adjustedConnectionIdLocalValue(final long connectionIdLocalValue) { + if (pool instanceof ConnectionIdAdjustingConnectionPool) { + return ((ConnectionIdAdjustingConnectionPool) pool).adjustedConnectionIdLocalValue(connectionIdLocalValue); + } else { + return connectionIdLocalValue; + } + } + + private List getNonIgnoredActualEvents() { + List nonIgnoredActualEvents = new ArrayList<>(); + Set> ignoredEventClasses = getIgnoredEventClasses(); + for (Object cur : listener.getEvents()) { + if (!ignoredEventClasses.contains(cur.getClass())) { + nonIgnoredActualEvents.add(cur); + } + } + return nonIgnoredActualEvents; + } + + private Set> getIgnoredEventClasses() { + Set> ignoredEventClasses = new HashSet<>(); + for (BsonValue cur : definition.getArray("ignore", new BsonArray())) { + String type = cur.asString().getValue(); + Class eventClass = getEventClass(type); + if (eventClass != null) { + ignoredEventClasses.add(eventClass); + } + } + return ignoredEventClasses; + } + + private Class getEventClass(final String type) { + if (type.equals("ConnectionPoolCreated")) { + return ConnectionPoolCreatedEvent.class; + } else if (type.equals("ConnectionPoolClosed")) { + return ConnectionPoolClosedEvent.class; + } else if (type.equals("ConnectionCreated")) { + return ConnectionCreatedEvent.class; + } else if (type.equals("ConnectionCheckedOut")) { + return ConnectionCheckedOutEvent.class; + } else if (type.equals("ConnectionCheckedIn")) { + return ConnectionCheckedInEvent.class; + } else if (type.equals("ConnectionClosed")) { + return ConnectionClosedEvent.class; + } else if (type.equals("ConnectionPoolCleared")) { + return ConnectionPoolClearedEvent.class; + } else if (type.equals("ConnectionPoolReady")) { + return ConnectionPoolReadyEvent.class; + } else if (type.equals("ConnectionReady")) { + return ConnectionReadyEvent.class; + } else if (type.equals("ConnectionCheckOutStarted")) { + return ConnectionCheckOutStartedEvent.class; + } else if (type.equals("ConnectionCheckOutFailed")) { + return ConnectionCheckOutFailedEvent.class; + } else { + throw new UnsupportedOperationException("Unsupported event type " + type); + } + } + + + private Event getNextEvent(final Iterator eventsIterator, final Class expectedType) { + if (!eventsIterator.hasNext()) { + fail("Expected event of type " + expectedType + " but there are no more events"); + } + Object next = eventsIterator.next(); + assertEquals(expectedType, next.getClass()); + return expectedType.cast(next); + } + + private static void executeAdminCommand(final BsonDocument command) { + new CommandReadOperation<>("admin", command, new BsonDocumentCodec()) + .execute(ClusterFixture.getBinding()); + } + + private void setFailPoint() { + final String failPointKey = "failPoint"; + if (definition.containsKey(failPointKey)) { + configureFailPointCommand = definition.getDocument(failPointKey); + executeAdminCommand(configureFailPointCommand); + } + } + + private void disableFailPoint() { + if (configureFailPointCommand != null) { + executeAdminCommand(configureFailPointCommand.append("mode", new BsonString("off"))); + } + } + + protected abstract Callable createCallable(BsonDocument operation); + + protected abstract StreamFactory createStreamFactory(SocketSettings socketSettings, SslSettings sslSettings); + + protected Map getConnectionMap() { + return connectionMap; + } + + protected ConnectionPool getPool() { + return pool; + } + + @Parameterized.Parameters(name = "{0}: {1}") + public static Collection data() { + List data = new ArrayList<>(); + for (BsonDocument testDocument + : JsonPoweredTestHelper.getSpecTestDocuments("connection-monitoring-and-pooling/tests/cmap-format")) { + data.add(new Object[]{testDocument.getString("fileName").getValue(), + testDocument.getString("description").getValue(), + testDocument, JsonTestServerVersionChecker.skipTest(testDocument, BsonDocument.parse("{}"))}); + } + return data; + } + + public static void waitForPoolAsyncWorkManagerStart() { + try { + Thread.sleep(500); + } catch (InterruptedException e) { + throw interruptAndCreateMongoInterruptedException(null, e); + } + } + + private static Cluster mockedCluster() { + Cluster cluster = mock(Cluster.class); + Mockito.doAnswer(invocation -> { + invocation.getArgument(0, Runnable.class).run(); + return null; + }).when(cluster).withLock(any(Runnable.class)); + return cluster; + } + + private enum Style { + UNIT, + INTEGRATION; + + public static Style of(final String name) { + return valueOf(name.toUpperCase()); + } + } + + private static final class ConnectionIdAdjustingConnectionPool implements ConnectionPool { + private static final long UNINITIALIZED = Long.MAX_VALUE; + + private final DefaultConnectionPool pool; + private final AtomicLong connectionIdLocalValueAdjustment; + + private ConnectionIdAdjustingConnectionPool(final DefaultConnectionPool pool) { + this.pool = pool; + connectionIdLocalValueAdjustment = new AtomicLong(UNINITIALIZED); + } + + private void updateConnectionIdLocalValueAdjustment(final InternalConnection conn) { + connectionIdLocalValueAdjustment.accumulateAndGet(conn.getDescription().getConnectionId().getLocalValue() - 1, Math::min); + } + + long adjustedConnectionIdLocalValue(final long connectionIdLocalValue) { + return connectionIdLocalValue - connectionIdLocalValueAdjustment.get(); + } + + @Override + public InternalConnection get(final OperationContext operationContext) { + InternalConnection result = pool.get(operationContext); + updateConnectionIdLocalValueAdjustment(result); + return result; + } + + @Override + public void getAsync(final OperationContext operationContext, final SingleResultCallback callback) { + pool.getAsync(operationContext, (result, problem) -> { + try { + if (result != null) { + updateConnectionIdLocalValueAdjustment(result); + } + } finally { + callback.onResult(result, problem); + } + }); + } + + @Override + public void invalidate(@Nullable final Throwable cause) { + pool.invalidate(cause); + } + + @Override + public void invalidate(final ObjectId serviceId, final int generation) { + pool.invalidate(serviceId, generation); + } + + @Override + public void ready() { + pool.ready(); + } + + @Override + public void close() { + pool.close(); + } + + @Override + public int getGeneration() { + return pool.getGeneration(); + } + } +} diff --git a/driver-core/src/test/unit/com/mongodb/internal/connection/AbstractServerDiscoveryAndMonitoringTest.java b/driver-core/src/test/unit/com/mongodb/internal/connection/AbstractServerDiscoveryAndMonitoringTest.java new file mode 100644 index 00000000000..e187e94da7b --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/internal/connection/AbstractServerDiscoveryAndMonitoringTest.java @@ -0,0 +1,214 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection; + +import com.mongodb.ConnectionString; +import com.mongodb.MongoSocketReadException; +import com.mongodb.MongoSocketReadTimeoutException; +import com.mongodb.ServerAddress; +import com.mongodb.connection.ClusterConnectionMode; +import com.mongodb.connection.ClusterId; +import com.mongodb.connection.ClusterSettings; +import com.mongodb.connection.ClusterType; +import com.mongodb.connection.ServerDescription; +import com.mongodb.connection.ServerType; +import com.mongodb.event.ClusterListener; +import com.mongodb.internal.TimeoutContext; +import com.mongodb.internal.connection.SdamServerDescriptionManager.SdamIssue; +import com.mongodb.internal.time.Timeout; +import org.bson.BsonArray; +import org.bson.BsonDocument; +import org.bson.BsonInt32; +import util.JsonPoweredTestHelper; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.concurrent.TimeUnit; + +import static com.mongodb.ClusterFixture.CLIENT_METADATA; +import static com.mongodb.ClusterFixture.OPERATION_CONTEXT; +import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS; +import static com.mongodb.connection.ServerConnectionState.CONNECTING; +import static com.mongodb.internal.connection.DescriptionHelper.createServerDescription; +import static com.mongodb.internal.connection.ProtocolHelper.getCommandFailureException; +import static org.junit.Assert.assertEquals; + +public class AbstractServerDiscoveryAndMonitoringTest { + private final BsonDocument definition; + private DefaultTestClusterableServerFactory factory; + private Cluster cluster; + + public AbstractServerDiscoveryAndMonitoringTest(final BsonDocument definition) { + this.definition = definition; + } + + public static Collection data(final String resourcePath) { + List data = new ArrayList<>(); + for (BsonDocument testDocument : JsonPoweredTestHelper.getSpecTestDocuments(resourcePath)) { + data.add(new Object[]{testDocument.getString("fileName").getValue() + + ": " + testDocument.getString("description").getValue(), testDocument}); + } + return data; + } + + protected void applyResponse(final BsonArray response) { + ServerAddress serverAddress = new ServerAddress(response.get(0).asString().getValue()); + BsonDocument helloResult = response.get(1).asDocument(); + ServerDescription serverDescription; + + if (helloResult.isEmpty()) { + serverDescription = ServerDescription.builder().type(ServerType.UNKNOWN).state(CONNECTING).address(serverAddress).build(); + } else { + serverDescription = createServerDescription(serverAddress, helloResult, 5000000, 0); + } + factory.sendNotification(serverAddress, serverDescription); + } + + protected void applyApplicationError(final BsonDocument applicationError) { + Timeout serverSelectionTimeout = OPERATION_CONTEXT.getTimeoutContext().computeServerSelectionTimeout(); + ServerAddress serverAddress = new ServerAddress(applicationError.getString("address").getValue()); + TimeoutContext timeoutContext = new TimeoutContext(TIMEOUT_SETTINGS); + int errorGeneration = applicationError.getNumber("generation", + new BsonInt32(((DefaultServer) getCluster().getServersSnapshot(serverSelectionTimeout, timeoutContext).getServer(serverAddress)) + .getConnectionPool().getGeneration())).intValue(); + int maxWireVersion = applicationError.getNumber("maxWireVersion").intValue(); + String when = applicationError.getString("when").getValue(); + String type = applicationError.getString("type").getValue(); + + DefaultServer server = (DefaultServer) cluster.getServersSnapshot(serverSelectionTimeout, timeoutContext).getServer(serverAddress); + RuntimeException exception; + + switch (type) { + case "command": + exception = getCommandFailureException(applicationError.getDocument("response"), serverAddress, + OPERATION_CONTEXT.getTimeoutContext()); + break; + case "network": + exception = new MongoSocketReadException("Read error", serverAddress, new IOException()); + break; + case "timeout": + exception = new MongoSocketReadTimeoutException("Read timeout error", serverAddress, new IOException()); + break; + default: + throw new UnsupportedOperationException("Unsupported application error type: " + type); + } + + switch (when) { + case "beforeHandshakeCompletes": + server.sdamServerDescriptionManager().handleExceptionBeforeHandshake( + SdamIssue.of(exception, new SdamIssue.Context(server.serverId(), errorGeneration, maxWireVersion))); + break; + case "afterHandshakeCompletes": + server.sdamServerDescriptionManager().handleExceptionAfterHandshake( + SdamIssue.of(exception, new SdamIssue.Context(server.serverId(), errorGeneration, maxWireVersion))); + break; + default: + throw new UnsupportedOperationException("Unsupported `when` value: " + when); + } + } + + protected ClusterType getClusterType(final String topologyType) { + return getClusterType(topologyType, Collections.emptyList()); + } + + protected ClusterType getClusterType(final String topologyType, final Collection serverDescriptions) { + if (topologyType.equalsIgnoreCase("Single")) { + assertEquals(1, serverDescriptions.size()); + return serverDescriptions.iterator().next().getClusterType(); + } else if (topologyType.equalsIgnoreCase("Sharded")) { + return ClusterType.SHARDED; + } else if (topologyType.equalsIgnoreCase("LoadBalanced")) { + return ClusterType.LOAD_BALANCED; + } else if (topologyType.startsWith("ReplicaSet")) { + return ClusterType.REPLICA_SET; + } else if (topologyType.equalsIgnoreCase("Unknown")) { + return ClusterType.UNKNOWN; + } else { + throw new IllegalArgumentException("Unsupported topology type: " + topologyType); + } + } + + protected ServerType getServerType(final String serverTypeString) { + ServerType serverType; + if (serverTypeString.equals("RSPrimary")) { + serverType = ServerType.REPLICA_SET_PRIMARY; + } else if (serverTypeString.equals("RSSecondary")) { + serverType = ServerType.REPLICA_SET_SECONDARY; + } else if (serverTypeString.equals("RSArbiter")) { + serverType = ServerType.REPLICA_SET_ARBITER; + } else if (serverTypeString.equals("RSGhost")) { + serverType = ServerType.REPLICA_SET_GHOST; + } else if (serverTypeString.equals("RSOther")) { + serverType = ServerType.REPLICA_SET_OTHER; + } else if (serverTypeString.equals("Mongos")) { + serverType = ServerType.SHARD_ROUTER; + } else if (serverTypeString.equals("Standalone")) { + serverType = ServerType.STANDALONE; + } else if (serverTypeString.equals("PossiblePrimary")) { + serverType = ServerType.UNKNOWN; + } else if (serverTypeString.equals("LoadBalancer")) { + serverType = ServerType.LOAD_BALANCER; + } else if (serverTypeString.equals("Unknown")) { + serverType = ServerType.UNKNOWN; + } else { + throw new UnsupportedOperationException("No handler for server type " + serverTypeString); + } + return serverType; + } + + protected void init(final ServerListenerFactory serverListenerFactory, final ClusterListener clusterListener) { + ConnectionString connectionString = new ConnectionString(definition.getString("uri").getValue()); + ClusterSettings settings = ClusterSettings.builder() + .applyConnectionString(connectionString) + .serverSelectionTimeout(1, TimeUnit.SECONDS) + .build(); + + ClusterId clusterId = new ClusterId(); + + factory = new DefaultTestClusterableServerFactory(settings.getMode(), serverListenerFactory); + + ClusterSettings clusterSettings = settings.getClusterListeners().contains(clusterListener) ? settings + : ClusterSettings.builder(settings).addClusterListener(clusterListener).build(); + + if (settings.getMode() == ClusterConnectionMode.SINGLE) { + cluster = new SingleServerCluster(clusterId, clusterSettings, factory, CLIENT_METADATA); + } else if (settings.getMode() == ClusterConnectionMode.MULTIPLE) { + cluster = new MultiServerCluster(clusterId, clusterSettings, factory, CLIENT_METADATA); + } else { + cluster = new LoadBalancedCluster(clusterId, clusterSettings, factory, CLIENT_METADATA, null); + } + } + + protected BsonDocument getDefinition() { + return definition; + } + + protected boolean isSingleServerClusterExpected() { + ConnectionString connectionString = new ConnectionString(definition.getString("uri").getValue()); + Boolean directConnection = connectionString.isDirectConnection(); + return (directConnection != null && directConnection) + || (directConnection == null && connectionString.getHosts().size() == 1 + && connectionString.getRequiredReplicaSetName() == null); + } + + protected Cluster getCluster() { + return cluster; + } +} diff --git a/driver-core/src/test/unit/com/mongodb/internal/connection/AsynchronousClusterEventListenerTest.java b/driver-core/src/test/unit/com/mongodb/internal/connection/AsynchronousClusterEventListenerTest.java new file mode 100644 index 00000000000..09d2fa864a3 --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/internal/connection/AsynchronousClusterEventListenerTest.java @@ -0,0 +1,171 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection; + +import com.mongodb.ServerAddress; +import com.mongodb.connection.ClusterConnectionMode; +import com.mongodb.connection.ClusterDescription; +import com.mongodb.connection.ClusterId; +import com.mongodb.connection.ClusterType; +import com.mongodb.connection.ConnectionId; +import com.mongodb.connection.ServerDescription; +import com.mongodb.connection.ServerId; +import com.mongodb.event.ClusterClosedEvent; +import com.mongodb.event.ClusterDescriptionChangedEvent; +import com.mongodb.event.ClusterListener; +import com.mongodb.event.ClusterOpeningEvent; +import com.mongodb.event.ServerClosedEvent; +import com.mongodb.event.ServerDescriptionChangedEvent; +import com.mongodb.event.ServerHeartbeatFailedEvent; +import com.mongodb.event.ServerHeartbeatStartedEvent; +import com.mongodb.event.ServerHeartbeatSucceededEvent; +import com.mongodb.event.ServerListener; +import com.mongodb.event.ServerMonitorListener; +import com.mongodb.event.ServerOpeningEvent; +import org.bson.BsonDocument; +import org.junit.jupiter.api.Test; + +import java.io.IOException; +import java.util.Collections; +import java.util.concurrent.BlockingQueue; +import java.util.concurrent.SynchronousQueue; +import java.util.concurrent.TimeUnit; + +import static com.mongodb.connection.ServerConnectionState.CONNECTED; +import static com.mongodb.connection.ServerConnectionState.CONNECTING; +import static com.mongodb.connection.ServerType.STANDALONE; +import static com.mongodb.connection.ServerType.UNKNOWN; +import static org.junit.jupiter.api.Assertions.assertEquals; + +class AsynchronousClusterEventListenerTest { + @Test + public void testEventsPublished() throws InterruptedException { + AllClusterEventListener targetListener = new AllClusterEventListener(); + ClusterId clusterId = new ClusterId(); + ServerId serverId = new ServerId(clusterId, new ServerAddress()); + ConnectionId connectionId = new ConnectionId(serverId); + + AsynchronousClusterEventListener listener = AsynchronousClusterEventListener.startNew(clusterId, targetListener, targetListener, + targetListener); + + ClusterOpeningEvent clusterOpeningEvent = new ClusterOpeningEvent(clusterId); + listener.clusterOpening(clusterOpeningEvent); + assertEquals(clusterOpeningEvent, targetListener.take()); + + ClusterDescriptionChangedEvent clusterDescriptionChangedEvent = new ClusterDescriptionChangedEvent(clusterId, + new ClusterDescription(ClusterConnectionMode.SINGLE, ClusterType.STANDALONE, Collections.emptyList()), + new ClusterDescription(ClusterConnectionMode.SINGLE, ClusterType.STANDALONE, Collections.emptyList())); + listener.clusterDescriptionChanged(clusterDescriptionChangedEvent); + assertEquals(clusterDescriptionChangedEvent, targetListener.take()); + + ServerHeartbeatStartedEvent serverHeartbeatStartedEvent = new ServerHeartbeatStartedEvent(connectionId, false); + listener.serverHearbeatStarted(serverHeartbeatStartedEvent); + assertEquals(serverHeartbeatStartedEvent, targetListener.take()); + + ServerHeartbeatSucceededEvent serverHeartbeatSucceededEvent = new ServerHeartbeatSucceededEvent(connectionId, new BsonDocument(), + 1, true); + listener.serverHeartbeatSucceeded(serverHeartbeatSucceededEvent); + assertEquals(serverHeartbeatSucceededEvent, targetListener.take()); + + ServerHeartbeatFailedEvent serverHeartbeatFailedEvent = new ServerHeartbeatFailedEvent(connectionId, 1, true, new IOException()); + listener.serverHeartbeatFailed(serverHeartbeatFailedEvent); + assertEquals(serverHeartbeatFailedEvent, targetListener.take()); + + ServerOpeningEvent serverOpeningEvent = new ServerOpeningEvent(serverId); + listener.serverOpening(serverOpeningEvent); + assertEquals(serverOpeningEvent, targetListener.take()); + + ServerDescriptionChangedEvent serverDescriptionChangedEvent = new ServerDescriptionChangedEvent(serverId, + ServerDescription.builder().address(new ServerAddress()).type(UNKNOWN).state(CONNECTING).build(), + ServerDescription.builder().address(new ServerAddress()).type(STANDALONE).state(CONNECTED).build()); + listener.serverDescriptionChanged(serverDescriptionChangedEvent); + assertEquals(serverDescriptionChangedEvent, targetListener.take()); + + ServerClosedEvent serverClosedEvent = new ServerClosedEvent(serverId); + listener.serverClosed(serverClosedEvent); + assertEquals(serverClosedEvent, targetListener.take()); + + ClusterClosedEvent clusterClosedEvent = new ClusterClosedEvent(clusterId); + listener.clusterClosed(clusterClosedEvent); + assertEquals(clusterClosedEvent, targetListener.take()); + + // The thread should die after publishing the ClusterClosedEvent + listener.getPublishingThread().join(5000); + } + + private static final class AllClusterEventListener implements ClusterListener, ServerListener, ServerMonitorListener { + private final BlockingQueue lastEvent = new SynchronousQueue<>(true); + + Object take() throws InterruptedException { + return lastEvent.poll(5, TimeUnit.SECONDS); + } + + void addEvent(final Object event) { + try { + lastEvent.put(event); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + } + } + + @Override + public void clusterOpening(final ClusterOpeningEvent event) { + addEvent(event); + } + + + @Override + public void clusterClosed(final ClusterClosedEvent event) { + addEvent(event); + } + + @Override + public void clusterDescriptionChanged(final ClusterDescriptionChangedEvent event) { + addEvent(event); + } + + @Override + public void serverOpening(final ServerOpeningEvent event) { + addEvent(event); + } + + @Override + public void serverClosed(final ServerClosedEvent event) { + addEvent(event); + } + + @Override + public void serverDescriptionChanged(final ServerDescriptionChangedEvent event) { + addEvent(event); + } + + @Override + public void serverHearbeatStarted(final ServerHeartbeatStartedEvent event) { + addEvent(event); + } + + @Override + public void serverHeartbeatSucceeded(final ServerHeartbeatSucceededEvent event) { + addEvent(event); + } + + @Override + public void serverHeartbeatFailed(final ServerHeartbeatFailedEvent event) { + addEvent(event); + } + } +} diff --git a/driver-core/src/test/unit/com/mongodb/internal/connection/AsynchronousSocketChannelStreamFactoryFactorySpecification.groovy b/driver-core/src/test/unit/com/mongodb/internal/connection/AsynchronousSocketChannelStreamFactoryFactorySpecification.groovy new file mode 100644 index 00000000000..245c6c87a5a --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/internal/connection/AsynchronousSocketChannelStreamFactoryFactorySpecification.groovy @@ -0,0 +1,44 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection + +import com.mongodb.ServerAddress +import com.mongodb.connection.SocketSettings +import com.mongodb.connection.SslSettings +import spock.lang.Specification +import spock.lang.Unroll + +class AsynchronousSocketChannelStreamFactoryFactorySpecification extends Specification { + + @Unroll + def 'should create the expected #description AsynchronousSocketChannelStream'() { + given: + def factory = new AsynchronousSocketChannelStreamFactoryFactory(new DefaultInetAddressResolver()) + .create(socketSettings, sslSettings) + + when: + AsynchronousSocketChannelStream stream = factory.create(serverAddress) as AsynchronousSocketChannelStream + + then: + stream.getSettings() == socketSettings + stream.getAddress() == serverAddress + } + + SocketSettings socketSettings = SocketSettings.builder().build() + SslSettings sslSettings = SslSettings.builder().build() + ServerAddress serverAddress = new ServerAddress() +} diff --git a/driver-core/src/test/unit/com/mongodb/internal/connection/AuthorizationHeaderTest.java b/driver-core/src/test/unit/com/mongodb/internal/connection/AuthorizationHeaderTest.java new file mode 100644 index 00000000000..dffc2bc657b --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/internal/connection/AuthorizationHeaderTest.java @@ -0,0 +1,194 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection; + +import org.junit.Test; + +import javax.security.sasl.SaslException; +import java.util.HashMap; +import java.util.Map; + +import static org.junit.Assert.assertEquals; + +public class AuthorizationHeaderTest { + private final String timestamp = "20150830T123600Z"; + + @Test + public void testHash() throws SaslException { + String actual = AuthorizationHeader.hash(""); + String expected = "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"; + assertEquals(expected, actual); + + String request = "GET\n" + + "/\n" + + "Action=ListUsers&Version=2010-05-08\n" + + "content-type:application/x-www-form-urlencoded; charset=utf-8\n" + + "host:iam.amazonaws.com\n" + + String.format("x-amz-date:%s\n", timestamp) + + "\n" + + "content-type;host;x-amz-date\n" + + "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"; + + actual = AuthorizationHeader.hash(request); + expected = "f536975d06c0309214f805bb90ccff089219ecd68b2577efef23edd43b7e1a59"; + assertEquals(expected, actual); + } + + @Test + public void testGetRegion() throws SaslException { + String actual = AuthorizationHeader.getRegion("sts.amazonaws.com"); + String expected = "us-east-1"; + assertEquals(expected, actual); + + actual = AuthorizationHeader.getRegion("first"); + assertEquals(expected, actual); + + actual = AuthorizationHeader.getRegion("first.second"); + expected = "second"; + assertEquals(expected, actual); + + actual = AuthorizationHeader.getRegion("sts.us-east-2.amazonaws.com"); + expected = "us-east-2"; + assertEquals(expected, actual); + } + + @Test(expected = SaslException.class) + public void shouldThrowErrorOnEmptyMiddleHostLabels() throws SaslException { + AuthorizationHeader.getRegion("abc..def"); + } + + @Test(expected = SaslException.class) + public void shouldThrowErrorOnEmptyHostLabelAtStart() throws SaslException { + AuthorizationHeader.getRegion(".abc.def"); + } + + @Test(expected = SaslException.class) + public void shouldThrowErrorOnEmptyHostLabelAtEnd() throws SaslException { + AuthorizationHeader.getRegion("abc.def."); + } + + @Test(expected = SaslException.class) + public void shouldThrowErrorOnEmptyHost() throws SaslException { + AuthorizationHeader.getRegion(""); + } + + @Test + public void testGetCanonicalHeaders() { + Map headers = new HashMap<>(); + + headers.put("Host", "iam.amazonaws.com"); + headers.put("Content-Type", "application/x-www-form-urlencoded; charset=utf-8"); + headers.put("My-header1", " a b c "); + headers.put("X-Amz-Date", timestamp); + headers.put("My-Header2", " \"a b c\" "); + + String actual = AuthorizationHeader.getCanonicalHeaders(headers); + String expected = "content-type:application/x-www-form-urlencoded; charset=utf-8\n" + + "host:iam.amazonaws.com\n" + + "my-header1:a b c\n" + + "my-header2:\"a b c\"\n" + + String.format("x-amz-date:%s\n", timestamp); + assertEquals(expected, actual); + } + + @Test + public void testGetSignedHeaders() { + Map headers = new HashMap<>(); + + headers.put("Host", "iam.amazonaws.com"); + headers.put("Content-Type", "application/x-www-form-urlencoded"); + headers.put("X-Amz-Date", timestamp); + + String actual = AuthorizationHeader.getSignedHeaders(headers); + String expected = "content-type;host;x-amz-date"; + assertEquals(expected, actual); + } + + @Test + public void testCreateCanonicalRequest() throws SaslException { + Map requestHeaders = new HashMap<>(); + requestHeaders.put("Content-Type", "application/x-www-form-urlencoded; charset=utf-8"); + requestHeaders.put("Host", "iam.amazonaws.com"); + requestHeaders.put("X-Amz-Date", timestamp); + + String actual = AuthorizationHeader.createCanonicalRequest("GET", "Action=ListUsers&Version=2010-05-08", "", requestHeaders); + String expected = "GET\n" + + "/\n" + + "Action=ListUsers&Version=2010-05-08\n" + + "content-type:application/x-www-form-urlencoded; charset=utf-8\n" + + "host:iam.amazonaws.com\n" + + String.format("x-amz-date:%s\n", timestamp) + + "\n" + + "content-type;host;x-amz-date\n" + + "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"; + assertEquals(expected, actual); + + String token = "FakeFakeFakeFake"; + String nonce = "9999999999999999"; + + requestHeaders.put("X-MongoDB-Server-Nonce", nonce); + requestHeaders.put("X-MongoDB-GS2-CB-Flag", "n"); + requestHeaders.put("X-Amz-Security-Token", token); + + actual = AuthorizationHeader.createCanonicalRequest("GET", "Action=ListUsers&Version=2010-05-08", "", requestHeaders); + expected = "GET\n" + + "/\n" + + "Action=ListUsers&Version=2010-05-08\n" + + "content-type:application/x-www-form-urlencoded; charset=utf-8\n" + + "host:iam.amazonaws.com\n" + + String.format("x-amz-date:%s\n", timestamp) + + String.format("x-amz-security-token:%s\n", token) + + "x-mongodb-gs2-cb-flag:n\n" + + String.format("x-mongodb-server-nonce:%s\n", nonce) + + "\n" + + "content-type;host;x-amz-date;x-amz-security-token;x-mongodb-gs2-cb-flag;x-mongodb-server-nonce\n" + + "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"; + assertEquals(expected, actual); + } + + @Test + public void testCreateStringToSign() { + String date = timestamp.substring(0, "YYYYMMDD".length()); + String credentialScope = String.format("%s/us-east-1/iam/aws4_request", date); + String hash = "f536975d06c0309214f805bb90ccff089219ecd68b2577efef23edd43b7e1a59"; + + String actual = AuthorizationHeader.createStringToSign(hash, timestamp, credentialScope); + String expected = "AWS4-HMAC-SHA256\n" + + timestamp + + "\n" + + String.format("%s/us-east-1/iam/aws4_request\n", date) + + "f536975d06c0309214f805bb90ccff089219ecd68b2577efef23edd43b7e1a59"; + assertEquals(expected, actual); + } + + @Test + public void testCalculateSignature() throws SaslException { + String date = timestamp.substring(0, "YYYYMMDD".length()); + String toSign = "AWS4-HMAC-SHA256\n" + + timestamp + + "\n" + + String.format("%s/us-east-1/iam/aws4_request\n", date) + + "f536975d06c0309214f805bb90ccff089219ecd68b2577efef23edd43b7e1a59"; + String region = "us-east-1"; + String service = "iam"; + String secret = "wJalrXUtnFEMI/K7MDENG+bPxRfiCYEXAMPLEKEY"; + + String actual = AuthorizationHeader.calculateSignature(toSign, secret, date, region, service); + String expected = "5d672d79c15b13162d9279b0855cfba6789a8edb4c82c400e06b5924a6f2b5d7"; + assertEquals(expected, actual); + } +} diff --git a/driver-core/src/test/unit/com/mongodb/internal/connection/BaseClusterSpecification.groovy b/driver-core/src/test/unit/com/mongodb/internal/connection/BaseClusterSpecification.groovy new file mode 100644 index 00000000000..56c500c6183 --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/internal/connection/BaseClusterSpecification.groovy @@ -0,0 +1,417 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection + +import com.mongodb.ClusterFixture +import com.mongodb.MongoClientException +import com.mongodb.MongoException +import com.mongodb.MongoInternalException +import com.mongodb.MongoInterruptedException +import com.mongodb.MongoTimeoutException +import com.mongodb.ReadPreference +import com.mongodb.ServerAddress +import com.mongodb.connection.ClusterDescription +import com.mongodb.connection.ClusterId +import com.mongodb.connection.ClusterType +import com.mongodb.connection.ServerConnectionState +import com.mongodb.connection.ServerDescription +import com.mongodb.connection.ServerType +import com.mongodb.event.ServerDescriptionChangedEvent +import com.mongodb.internal.TimeoutContext +import com.mongodb.internal.selector.ReadPreferenceServerSelector +import com.mongodb.internal.selector.ServerAddressSelector +import com.mongodb.internal.selector.WritableServerSelector +import com.mongodb.internal.time.Timeout +import com.mongodb.spock.Slow +import spock.lang.Specification + +import java.util.concurrent.CountDownLatch + +import static com.mongodb.ClusterFixture.CLIENT_METADATA +import static com.mongodb.ClusterFixture.OPERATION_CONTEXT +import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS +import static com.mongodb.ClusterFixture.createOperationContext +import static com.mongodb.connection.ClusterConnectionMode.MULTIPLE +import static com.mongodb.connection.ClusterSettings.builder +import static com.mongodb.connection.ServerType.REPLICA_SET_PRIMARY +import static com.mongodb.connection.ServerType.REPLICA_SET_SECONDARY +import static java.util.concurrent.TimeUnit.MILLISECONDS +import static java.util.concurrent.TimeUnit.SECONDS + +/** + * Add new tests to {@link BaseClusterTest}. + */ +class BaseClusterSpecification extends Specification { + + private final ServerAddress firstServer = new ServerAddress('localhost:27017') + private final ServerAddress secondServer = new ServerAddress('localhost:27018') + private final ServerAddress thirdServer = new ServerAddress('localhost:27019') + private final List allServers = [firstServer, secondServer, thirdServer] + private final TestClusterableServerFactory factory = new TestClusterableServerFactory() + + def 'should have current description immediately after construction'() { + given: + def clusterSettings = builder().mode(MULTIPLE) + .hosts([firstServer, secondServer, thirdServer]) + .serverSelector(new ServerAddressSelector(firstServer)) + .build() + def cluster = new BaseCluster(new ClusterId(), clusterSettings, factory, CLIENT_METADATA) { + @Override + protected void connect() { + } + + @Override + Cluster.ServersSnapshot getServersSnapshot(final Timeout serverSelectionTimeout, final TimeoutContext timeoutContext) { + Cluster.ServersSnapshot result = { + serverAddress -> throw new UnsupportedOperationException() + } + result + } + + @Override + void onChange(final ServerDescriptionChangedEvent event) { + } + } + + expect: 'the description is initialized after construction' + cluster.getCurrentDescription() == new ClusterDescription(clusterSettings.getMode(), ClusterType.UNKNOWN, [], clusterSettings, + factory.getSettings()) + + when: 'a server is selected before initialization' + cluster.selectServer({ def clusterDescription -> [] }, + createOperationContext(TIMEOUT_SETTINGS.withServerSelectionTimeoutMS(1))) + + then: 'a MongoTimeoutException is thrown' + thrown(MongoTimeoutException) + + when: 'a server is selected before initialization and timeoutMS is set' + cluster.selectServer({ def clusterDescription -> [] }, + createOperationContext(TIMEOUT_SETTINGS + .withServerSelectionTimeoutMS(1) + .withTimeout(1, MILLISECONDS))) + + then: 'a MongoTimeoutException is thrown' + thrown(MongoTimeoutException) + } + + def 'should get cluster settings'() { + given: + def clusterSettings = builder().mode(MULTIPLE) + .hosts([firstServer, secondServer, thirdServer]) + .serverSelectionTimeout(1, SECONDS) + .serverSelector(new ServerAddressSelector(firstServer)) + .build() + def cluster = new MultiServerCluster(new ClusterId(), clusterSettings, factory, CLIENT_METADATA) + + expect: + cluster.getSettings() == clusterSettings + } + + def 'should compose server selector passed to selectServer with server selector in cluster settings'() { + given: + def cluster = new MultiServerCluster(new ClusterId(), + builder().mode(MULTIPLE) + .hosts([firstServer, secondServer, thirdServer]) + .serverSelectionTimeout(1, SECONDS) + .serverSelector(new ServerAddressSelector(firstServer)) + .build(), + factory, CLIENT_METADATA) + factory.sendNotification(firstServer, REPLICA_SET_SECONDARY, allServers) + factory.sendNotification(secondServer, REPLICA_SET_SECONDARY, allServers) + factory.sendNotification(thirdServer, REPLICA_SET_PRIMARY, allServers) + + expect: + cluster.selectServer(new ReadPreferenceServerSelector(ReadPreference.secondary()), OPERATION_CONTEXT) + .serverDescription.address == firstServer + } + + def 'should use server selector passed to selectServer if server selector in cluster settings is null'() { + given: + def cluster = new MultiServerCluster(new ClusterId(), + builder().mode(MULTIPLE) + .hosts([firstServer, secondServer, thirdServer]) + .build(), + factory, CLIENT_METADATA) + factory.sendNotification(firstServer, REPLICA_SET_SECONDARY, allServers) + factory.sendNotification(secondServer, REPLICA_SET_SECONDARY, allServers) + factory.sendNotification(thirdServer, REPLICA_SET_PRIMARY, allServers) + + expect: + cluster.selectServer(new ServerAddressSelector(firstServer), + createOperationContext(TIMEOUT_SETTINGS.withServerSelectionTimeoutMS(1_000))) + .serverDescription.address == firstServer + } + + def 'should apply local threshold when custom server selector is present'() { + given: + def cluster = new MultiServerCluster(new ClusterId(), + builder().mode(MULTIPLE) + .hosts([firstServer, secondServer, thirdServer]) + .serverSelectionTimeout(1, SECONDS) + .serverSelector(new ReadPreferenceServerSelector(ReadPreference.secondary())) + .localThreshold(5, MILLISECONDS) + .build(), + factory, CLIENT_METADATA) + factory.sendNotification(firstServer, 1, REPLICA_SET_SECONDARY, allServers) + factory.sendNotification(secondServer, 7, REPLICA_SET_SECONDARY, allServers) + factory.sendNotification(thirdServer, 1, REPLICA_SET_PRIMARY, allServers) + + expect: + cluster.selectServer(new ReadPreferenceServerSelector(ReadPreference.nearest()), OPERATION_CONTEXT) + .serverDescription.address == firstServer + } + + def 'should apply local threshold when custom server selector in absent'() { + given: + def cluster = new MultiServerCluster(new ClusterId(), + builder().mode(MULTIPLE) + .serverSelectionTimeout(1, SECONDS) + .hosts([firstServer, secondServer, thirdServer]) + .localThreshold(5, MILLISECONDS) + .build(), + factory, CLIENT_METADATA) + factory.sendNotification(firstServer, 1, REPLICA_SET_SECONDARY, allServers) + factory.sendNotification(secondServer, 7, REPLICA_SET_SECONDARY, allServers) + factory.sendNotification(thirdServer, 1, REPLICA_SET_PRIMARY, allServers) + + expect: // firstServer is the only secondary within the latency threshold + cluster.selectServer(new ReadPreferenceServerSelector(ReadPreference.secondary()), OPERATION_CONTEXT) + .serverDescription.address == firstServer + } + + def 'should timeout with useful message'() { + given: + def cluster = new MultiServerCluster(new ClusterId(), + builder().mode(MULTIPLE) + .hosts([firstServer, secondServer]) + .build(), + factory, CLIENT_METADATA) + + when: + factory.sendNotification(firstServer, ServerDescription.builder().type(ServerType.UNKNOWN) + .state(ServerConnectionState.CONNECTING) + .address(firstServer) + .exception(new MongoInternalException('oops')) + .build()) + + cluster.selectServer(new WritableServerSelector(), + createOperationContext(TIMEOUT_SETTINGS.withServerSelectionTimeoutMS(serverSelectionTimeoutMS))) + + then: + def e = thrown(MongoTimeoutException) + + e.getMessage().startsWith("Timed out while waiting for a server " + + 'that matches WritableServerSelector. Client view of cluster state is {type=UNKNOWN') + e.getMessage().contains('{address=localhost:27017, type=UNKNOWN, state=CONNECTING, ' + + 'exception={com.mongodb.MongoInternalException: oops}}') + e.getMessage().contains('{address=localhost:27018, type=UNKNOWN, state=CONNECTING}') + + where: + serverSelectionTimeoutMS << [1, 0] + } + + def 'should select server'() { + given: + def cluster = new MultiServerCluster(new ClusterId(), + builder().mode(MULTIPLE) + .hosts([firstServer, secondServer, thirdServer]) + .build(), + factory, CLIENT_METADATA) + factory.sendNotification(firstServer, REPLICA_SET_SECONDARY, allServers) + factory.sendNotification(secondServer, REPLICA_SET_SECONDARY, allServers) + factory.sendNotification(thirdServer, REPLICA_SET_PRIMARY, allServers) + + expect: + cluster.selectServer(new ReadPreferenceServerSelector(ReadPreference.primary()), + createOperationContext(TIMEOUT_SETTINGS.withServerSelectionTimeoutMS(serverSelectionTimeoutMS))) + .serverDescription.address == thirdServer + + cleanup: + cluster?.close() + + where: + serverSelectionTimeoutMS << [30, 0, -1] + } + + @Slow + def 'should wait indefinitely for a server until interrupted'() { + given: + def cluster = new MultiServerCluster(new ClusterId(), + builder().mode(MULTIPLE) + .hosts([firstServer, secondServer, thirdServer]) + .build(), + factory, CLIENT_METADATA) + + when: + def latch = new CountDownLatch(1) + def thread = new Thread({ + try { + cluster.selectServer(new ReadPreferenceServerSelector(ReadPreference.primary()), + createOperationContext(TIMEOUT_SETTINGS.withServerSelectionTimeoutMS(-1_000))) + } catch (MongoInterruptedException e) { + latch.countDown() + } + }) + thread.start() + sleep(1000) + thread.interrupt() + def interrupted = latch.await(ClusterFixture.TIMEOUT, SECONDS) + + then: + interrupted + + cleanup: + cluster?.close() + } + + def 'should select server asynchronously when server is already available'() { + given: + def cluster = new MultiServerCluster(new ClusterId(), + builder().mode(MULTIPLE) + .hosts([firstServer, secondServer, thirdServer]) + .build(), + factory, CLIENT_METADATA) + factory.sendNotification(firstServer, REPLICA_SET_SECONDARY, allServers) + + when: + def serverDescription = selectServerAsync(cluster, firstServer, serverSelectionTimeoutMS).getDescription() + + then: + serverDescription.address == firstServer + + cleanup: + cluster?.close() + + where: + serverSelectionTimeoutMS << [30, 0, -1] + } + + def 'should select server asynchronously when server is not yet available'() { + given: + def cluster = new MultiServerCluster(new ClusterId(), + builder().mode(MULTIPLE) + .hosts([firstServer, secondServer, thirdServer]) + .build(), + factory, CLIENT_METADATA) + + when: + def secondServerLatch = selectServerAsync(cluster, secondServer, serverSelectionTimeoutMS) + def thirdServerLatch = selectServerAsync(cluster, thirdServer, serverSelectionTimeoutMS) + factory.sendNotification(secondServer, REPLICA_SET_SECONDARY, allServers) + factory.sendNotification(thirdServer, REPLICA_SET_SECONDARY, allServers) + + then: + secondServerLatch.getDescription().address == secondServer + thirdServerLatch.getDescription().address == thirdServer + + cleanup: + cluster?.close() + + where: + serverSelectionTimeoutMS << [500, -1] + } + + def 'when selecting server asynchronously should send MongoClientException to callback if cluster is closed before success'() { + given: + def cluster = new MultiServerCluster(new ClusterId(), + builder().mode(MULTIPLE) + .hosts([firstServer, secondServer, thirdServer]) + .build(), + factory, CLIENT_METADATA) + + when: + def serverLatch = selectServerAsync(cluster, firstServer) + cluster.close() + serverLatch.get() + + then: + thrown(MongoClientException) + + cleanup: + cluster?.close() + } + + def 'when selecting server asynchronously should send MongoTimeoutException to callback after timeout period'() { + given: + def cluster = new MultiServerCluster(new ClusterId(), + builder().mode(MULTIPLE) + .hosts([firstServer, secondServer, thirdServer]) + .build(), + factory, CLIENT_METADATA) + + when: + selectServerAsyncAndGet(cluster, firstServer, serverSelectionTimeoutMS) + + then: + thrown(MongoTimeoutException) + + cleanup: + cluster?.close() + + + where: + serverSelectionTimeoutMS << [100, 0] + } + + def selectServerAsyncAndGet(BaseCluster cluster, ServerAddress serverAddress) { + selectServerAsync(cluster, serverAddress, 1_000) + } + + def selectServerAsyncAndGet(BaseCluster cluster, ServerAddress serverAddress, long serverSelectionTimeoutMS) { + selectServerAsync(cluster, serverAddress, serverSelectionTimeoutMS).get() + } + + def selectServerAsync(BaseCluster cluster, ServerAddress serverAddress) { + selectServerAsync(cluster, serverAddress, 1_000) + } + + def selectServerAsync(BaseCluster cluster, ServerAddress serverAddress, long serverSelectionTimeoutMS) { + def serverLatch = new ServerLatch() + cluster.selectServerAsync(new ServerAddressSelector(serverAddress), + createOperationContext(TIMEOUT_SETTINGS.withServerSelectionTimeoutMS(serverSelectionTimeoutMS))) { + ServerTuple result, MongoException e -> + serverLatch.server = result != null ? result.getServer() : null + serverLatch.serverDescription = result != null ? result.serverDescription : null + serverLatch.throwable = e + serverLatch.latch.countDown() + } + serverLatch + } + + class ServerLatch { + CountDownLatch latch = new CountDownLatch(1) + Server server + ServerDescription serverDescription + Throwable throwable + + def get() { + latch.await() + if (throwable != null) { + throw throwable + } + server + } + + def getDescription() { + latch.await() + if (throwable != null) { + throw throwable + } + serverDescription + } + } +} diff --git a/driver-core/src/test/unit/com/mongodb/internal/connection/BaseClusterTest.java b/driver-core/src/test/unit/com/mongodb/internal/connection/BaseClusterTest.java new file mode 100644 index 00000000000..1cba6d91c3c --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/internal/connection/BaseClusterTest.java @@ -0,0 +1,62 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.internal.connection; + +import com.mongodb.ClusterFixture; +import com.mongodb.ServerAddress; +import com.mongodb.connection.ClusterConnectionMode; +import com.mongodb.connection.ClusterDescription; +import com.mongodb.connection.ClusterSettings; +import com.mongodb.connection.ClusterType; +import com.mongodb.connection.ServerConnectionState; +import com.mongodb.connection.ServerDescription; +import com.mongodb.internal.mockito.MongoMockito; +import com.mongodb.internal.selector.ServerAddressSelector; +import org.junit.jupiter.api.Test; + +import static java.util.Arrays.asList; +import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; +import static org.mockito.Mockito.when; + +/** + * @see BaseClusterSpecification + */ +final class BaseClusterTest { + @Test + void selectServerToleratesWhenThereIsNoServerForTheSelectedAddress() { + ServerAddress serverAddressA = new ServerAddress("a"); + ServerAddress serverAddressB = new ServerAddress("b"); + Server serverB = MongoMockito.mock(Server.class, server -> + when(server.operationCount()).thenReturn(0)); + ClusterDescription clusterDescriptionAB = new ClusterDescription(ClusterConnectionMode.MULTIPLE, ClusterType.SHARDED, + asList(serverDescription(serverAddressA), serverDescription(serverAddressB))); + Cluster.ServersSnapshot serversSnapshotB = serverAddress -> serverAddress.equals(serverAddressB) ? serverB : null; + assertDoesNotThrow(() -> BaseCluster.createCompleteSelectorAndSelectServer( + new ServerAddressSelector(serverAddressA), + clusterDescriptionAB, + serversSnapshotB, + ClusterFixture.OPERATION_CONTEXT.getServerDeprioritization(), + ClusterSettings.builder().build())); + } + + private static ServerDescription serverDescription(final ServerAddress serverAddress) { + return ServerDescription.builder() + .state(ServerConnectionState.CONNECTED) + .ok(true) + .address(serverAddress) + .build(); + } +} diff --git a/driver-core/src/test/unit/com/mongodb/internal/connection/BulkWriteBatchCombinerSpecification.groovy b/driver-core/src/test/unit/com/mongodb/internal/connection/BulkWriteBatchCombinerSpecification.groovy new file mode 100644 index 00000000000..15cfde867d3 --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/internal/connection/BulkWriteBatchCombinerSpecification.groovy @@ -0,0 +1,150 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection + +import com.mongodb.MongoBulkWriteException +import com.mongodb.ServerAddress +import com.mongodb.bulk.BulkWriteError +import com.mongodb.bulk.BulkWriteInsert +import com.mongodb.bulk.BulkWriteResult +import com.mongodb.bulk.BulkWriteUpsert +import com.mongodb.bulk.WriteConcernError +import org.bson.BsonDocument +import org.bson.BsonString +import spock.lang.Specification + +import static com.mongodb.WriteConcern.ACKNOWLEDGED +import static com.mongodb.WriteConcern.UNACKNOWLEDGED +import static com.mongodb.internal.bulk.WriteRequest.Type.INSERT +import static com.mongodb.internal.bulk.WriteRequest.Type.UPDATE + +class BulkWriteBatchCombinerSpecification extends Specification { + def 'should get unacknowledged result for an unacknowledged write'() { + given: + def combiner = new BulkWriteBatchCombiner(new ServerAddress(), true, UNACKNOWLEDGED) + combiner.addResult(BulkWriteResult.acknowledged(INSERT, 1, 0, [], [])) + + when: + def result = combiner.getResult() + + then: + result == BulkWriteResult.unacknowledged() + } + + def 'should get correct result for an insert'() { + given: + def combiner = new BulkWriteBatchCombiner(new ServerAddress(), true, ACKNOWLEDGED) + combiner.addResult(BulkWriteResult.acknowledged(INSERT, 1, 0, [], [new BulkWriteInsert(6, new BsonString('id1'))])) + combiner.addResult(BulkWriteResult.acknowledged(INSERT, 1, 0, [], [new BulkWriteInsert(3, new BsonString('id2'))])) + + when: + def result = combiner.getResult() + + then: + result == BulkWriteResult.acknowledged(INSERT, 2, 0, [], + [new BulkWriteInsert(3, new BsonString('id2')), new BulkWriteInsert(6, new BsonString('id1'))]) + } + + def 'should sort upserts'() { + given: + def combiner = new BulkWriteBatchCombiner(new ServerAddress(), true, ACKNOWLEDGED) + combiner.addResult(BulkWriteResult.acknowledged(UPDATE, 1, 0, [new BulkWriteUpsert(6, new BsonString('id1'))], [])) + combiner.addResult(BulkWriteResult.acknowledged(UPDATE, 1, 0, [new BulkWriteUpsert(3, new BsonString('id2'))], [])) + + when: + def result = combiner.getResult() + + then: + result == BulkWriteResult.acknowledged(UPDATE, 2, 0, + [new BulkWriteUpsert(3, new BsonString('id2')), + new BulkWriteUpsert(6, new BsonString('id1'))], []) + } + + def 'should throw exception on write error'() { + given: + def combiner = new BulkWriteBatchCombiner(new ServerAddress(), true, ACKNOWLEDGED) + + def error = new BulkWriteError(11000, 'dup key', new BsonDocument(), 0) + combiner.addWriteErrorResult(error, new IndexMap.RangeBased().add(0, 0)) + + when: + combiner.getResult() + + then: + def e = thrown(MongoBulkWriteException) + e == new MongoBulkWriteException(BulkWriteResult.acknowledged(INSERT, 0, 0, [], []), [error], null, new ServerAddress(), [] as Set) + } + + def 'should throw last write concern error'() { + given: + def combiner = new BulkWriteBatchCombiner(new ServerAddress(), true, ACKNOWLEDGED) + combiner.addWriteConcernErrorResult(new WriteConcernError(65, 'journalError', 'journal error', new BsonDocument())) + def writeConcernError = new WriteConcernError(75, 'wtimeout', 'wtimeout message', new BsonDocument()) + combiner.addWriteConcernErrorResult(writeConcernError) + + when: + combiner.getResult() + + then: + def e = thrown(MongoBulkWriteException) + e == new MongoBulkWriteException(BulkWriteResult.acknowledged(INSERT, 0, 0, [], []), [], writeConcernError, + new ServerAddress(), [] as Set) + } + + def 'should not stop run if no errors'() { + given: + def combiner = new BulkWriteBatchCombiner(new ServerAddress(), true, ACKNOWLEDGED) + combiner.addResult(BulkWriteResult.acknowledged(INSERT, 1, 0, [], [])) + + expect: + !combiner.shouldStopSendingMoreBatches() + } + + def 'should stop run on error if ordered'() { + given: + def combiner = new BulkWriteBatchCombiner(new ServerAddress(), true, ACKNOWLEDGED) + combiner.addWriteErrorResult(new BulkWriteError(11000, 'dup key', new BsonDocument(), 0), new IndexMap.RangeBased().add(0, 0)) + + expect: + combiner.shouldStopSendingMoreBatches() + } + + def 'should not stop run on error if unordered'() { + given: + def combiner = new BulkWriteBatchCombiner(new ServerAddress(), false, ACKNOWLEDGED) + combiner.addWriteErrorResult(new BulkWriteError(11000, 'dup key', new BsonDocument(), 0), new IndexMap.RangeBased().add(0, 0)) + + expect: + !combiner.shouldStopSendingMoreBatches() + } + + def 'should sort errors by first index'() { + given: + def combiner = new BulkWriteBatchCombiner(new ServerAddress(), false, ACKNOWLEDGED) + combiner.addErrorResult([new BulkWriteError(11000, 'dup key', new BsonDocument(), 1), + new BulkWriteError(45, 'wc error', new BsonDocument(), 0)], + null, new IndexMap.RangeBased().add(0, 0).add(1, 1).add(2, 2)) + + when: + combiner.getResult() + + then: + def e = thrown(MongoBulkWriteException) + e.writeErrors == [new BulkWriteError(45, 'wc error', new BsonDocument(), 0), + new BulkWriteError(11000, 'dup key', new BsonDocument(), 1)] + } +} diff --git a/driver-core/src/test/unit/com/mongodb/internal/connection/ByteBufBsonArrayTest.java b/driver-core/src/test/unit/com/mongodb/internal/connection/ByteBufBsonArrayTest.java new file mode 100644 index 00000000000..f7cefbf57c0 --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/internal/connection/ByteBufBsonArrayTest.java @@ -0,0 +1,268 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection; + +import org.bson.BsonArray; +import org.bson.BsonBinary; +import org.bson.BsonBinaryWriter; +import org.bson.BsonBoolean; +import org.bson.BsonDateTime; +import org.bson.BsonDbPointer; +import org.bson.BsonDecimal128; +import org.bson.BsonDocument; +import org.bson.BsonDouble; +import org.bson.BsonInt32; +import org.bson.BsonInt64; +import org.bson.BsonJavaScript; +import org.bson.BsonJavaScriptWithScope; +import org.bson.BsonMaxKey; +import org.bson.BsonMinKey; +import org.bson.BsonNull; +import org.bson.BsonObjectId; +import org.bson.BsonRegularExpression; +import org.bson.BsonString; +import org.bson.BsonSymbol; +import org.bson.BsonTimestamp; +import org.bson.BsonUndefined; +import org.bson.BsonValue; +import org.bson.ByteBuf; +import org.bson.ByteBufNIO; +import org.bson.codecs.BsonDocumentCodec; +import org.bson.codecs.EncoderContext; +import org.bson.io.BasicOutputBuffer; +import org.bson.types.Decimal128; +import org.bson.types.ObjectId; +import org.junit.jupiter.api.Test; + +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.util.Date; +import java.util.Iterator; +import java.util.List; +import java.util.ListIterator; +import java.util.NoSuchElementException; + +import static java.util.Arrays.asList; +import static java.util.Collections.emptyList; +import static java.util.Collections.singletonList; +import static org.bson.BsonBoolean.FALSE; +import static org.bson.BsonBoolean.TRUE; +import static org.junit.jupiter.api.Assertions.assertArrayEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNotEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; + +class ByteBufBsonArrayTest { + + @Test + void testGetValues() { + List values = asList(new BsonInt32(0), new BsonInt32(1), new BsonInt32(2)); + ByteBufBsonArray bsonArray = fromBsonValues(values); + assertEquals(values, bsonArray.getValues()); + } + + @Test + void testSize() { + assertEquals(0, fromBsonValues(emptyList()).size()); + assertEquals(1, fromBsonValues(singletonList(TRUE)).size()); + assertEquals(2, fromBsonValues(asList(TRUE, TRUE)).size()); + } + + @Test + void testIsEmpty() { + assertTrue(fromBsonValues(emptyList()).isEmpty()); + assertFalse(fromBsonValues(singletonList(TRUE)).isEmpty()); + assertFalse(fromBsonValues(asList(TRUE, TRUE)).isEmpty()); + } + + @Test + void testContains() { + assertFalse(fromBsonValues(emptyList()).contains(TRUE)); + assertTrue(fromBsonValues(singletonList(TRUE)).contains(TRUE)); + assertTrue(fromBsonValues(asList(FALSE, TRUE)).contains(TRUE)); + assertFalse(fromBsonValues(singletonList(FALSE)).contains(TRUE)); + assertFalse(fromBsonValues(asList(FALSE, FALSE)).contains(TRUE)); + } + + @Test + void testIterator() { + Iterator iterator = fromBsonValues(emptyList()).iterator(); + assertFalse(iterator.hasNext()); + assertThrows(NoSuchElementException.class, iterator::next); + + iterator = fromBsonValues(singletonList(TRUE)).iterator(); + assertTrue(iterator.hasNext()); + assertEquals(TRUE, iterator.next()); + assertFalse(iterator.hasNext()); + assertThrows(NoSuchElementException.class, iterator::next); + + iterator = fromBsonValues(asList(TRUE, FALSE)).iterator(); + assertTrue(iterator.hasNext()); + assertEquals(TRUE, iterator.next()); + assertTrue(iterator.hasNext()); + assertEquals(FALSE, iterator.next()); + assertFalse(iterator.hasNext()); + assertThrows(NoSuchElementException.class, iterator::next); + } + + @Test + void testToArray() { + assertArrayEquals(new BsonValue[]{TRUE, FALSE}, fromBsonValues(asList(TRUE, FALSE)).toArray()); + assertArrayEquals(new BsonValue[]{TRUE, FALSE}, fromBsonValues(asList(TRUE, FALSE)).toArray(new BsonValue[0])); + } + + @Test + void testContainsAll() { + assertTrue(fromBsonValues(asList(TRUE, FALSE)).containsAll(asList(TRUE, FALSE))); + assertFalse(fromBsonValues(asList(TRUE, TRUE)).containsAll(asList(TRUE, FALSE))); + } + + @Test + void testGet() { + ByteBufBsonArray bsonArray = fromBsonValues(asList(TRUE, FALSE)); + assertEquals(TRUE, bsonArray.get(0)); + assertEquals(FALSE, bsonArray.get(1)); + assertThrows(IndexOutOfBoundsException.class, () -> bsonArray.get(-1)); + assertThrows(IndexOutOfBoundsException.class, () -> bsonArray.get(2)); + } + + @Test + void testIndexOf() { + ByteBufBsonArray bsonArray = fromBsonValues(asList(TRUE, FALSE)); + assertEquals(0, bsonArray.indexOf(TRUE)); + assertEquals(1, bsonArray.indexOf(FALSE)); + assertEquals(-1, bsonArray.indexOf(BsonNull.VALUE)); + } + + @Test + void testLastIndexOf() { + ByteBufBsonArray bsonArray = fromBsonValues(asList(TRUE, FALSE, TRUE, FALSE)); + assertEquals(2, bsonArray.lastIndexOf(TRUE)); + assertEquals(3, bsonArray.lastIndexOf(FALSE)); + assertEquals(-1, bsonArray.lastIndexOf(BsonNull.VALUE)); + } + + @Test + void testListIterator() { + // implementation is delegated to ArrayList, so not much testing is needed + ListIterator iterator = fromBsonValues(emptyList()).listIterator(); + assertFalse(iterator.hasNext()); + assertFalse(iterator.hasPrevious()); + } + + @Test + void testSubList() { + ByteBufBsonArray bsonArray = fromBsonValues(asList(new BsonInt32(0), new BsonInt32(1), new BsonInt32(2))); + assertEquals(emptyList(), bsonArray.subList(0, 0)); + assertEquals(singletonList(new BsonInt32(0)), bsonArray.subList(0, 1)); + assertEquals(singletonList(new BsonInt32(2)), bsonArray.subList(2, 3)); + assertThrows(IndexOutOfBoundsException.class, () -> bsonArray.subList(-1, 1)); + assertThrows(IllegalArgumentException.class, () -> bsonArray.subList(3, 2)); + assertThrows(IndexOutOfBoundsException.class, () -> bsonArray.subList(2, 4)); + } + + @Test + void testEquals() { + assertEquals(new BsonArray(asList(TRUE, FALSE)), fromBsonValues(asList(TRUE, FALSE))); + assertEquals(fromBsonValues(asList(TRUE, FALSE)), new BsonArray(asList(TRUE, FALSE))); + + assertNotEquals(new BsonArray(asList(TRUE, FALSE)), fromBsonValues(asList(FALSE, TRUE))); + assertNotEquals(fromBsonValues(asList(TRUE, FALSE)), new BsonArray(asList(FALSE, TRUE))); + + assertNotEquals(new BsonArray(asList(TRUE, FALSE)), fromBsonValues(asList(TRUE, FALSE, TRUE))); + assertNotEquals(fromBsonValues(asList(TRUE, FALSE)), new BsonArray(asList(TRUE, FALSE, TRUE))); + assertNotEquals(fromBsonValues(asList(TRUE, FALSE, TRUE)), new BsonArray(asList(TRUE, FALSE))); + } + + @Test + void testHashCode() { + assertEquals(new BsonArray(asList(TRUE, FALSE)).hashCode(), fromBsonValues(asList(TRUE, FALSE)).hashCode()); + } + + @Test + void testToString() { + assertEquals(new BsonArray(asList(TRUE, FALSE)).toString(), fromBsonValues(asList(TRUE, FALSE)).toString()); + } + + @Test + void testAllBsonTypes() { + BsonValue bsonNull = new BsonNull(); + BsonValue bsonInt32 = new BsonInt32(42); + BsonValue bsonInt64 = new BsonInt64(52L); + BsonValue bsonDecimal128 = new BsonDecimal128(Decimal128.parse("1.0")); + BsonValue bsonBoolean = new BsonBoolean(true); + BsonValue bsonDateTime = new BsonDateTime(new Date().getTime()); + BsonValue bsonDouble = new BsonDouble(62.0); + BsonValue bsonString = new BsonString("the fox ..."); + BsonValue minKey = new BsonMinKey(); + BsonValue maxKey = new BsonMaxKey(); + BsonValue javaScript = new BsonJavaScript("int i = 0;"); + BsonValue objectId = new BsonObjectId(new ObjectId()); + BsonValue scope = new BsonJavaScriptWithScope("int x = y", new BsonDocument("y", new BsonInt32(1))); + BsonValue regularExpression = new BsonRegularExpression("^test.*regex.*xyz$", "i"); + BsonValue symbol = new BsonSymbol("ruby stuff"); + BsonValue timestamp = new BsonTimestamp(0x12345678, 5); + BsonValue undefined = new BsonUndefined(); + BsonValue binary = new BsonBinary((byte) 80, new byte[] {5, 4, 3, 2, 1}); + BsonValue array = new BsonArray(); + BsonValue document = new BsonDocument("a", new BsonInt32(1)); + BsonValue dbPointer = new BsonDbPointer("db.coll", new ObjectId()); + + ByteBufBsonArray bsonArray = fromBsonValues(asList( + bsonNull, bsonInt32, bsonInt64, bsonDecimal128, bsonBoolean, bsonDateTime, bsonDouble, bsonString, minKey, maxKey, + javaScript, objectId, scope, regularExpression, symbol, timestamp, undefined, binary, array, document, dbPointer)); + assertEquals(bsonNull, bsonArray.get(0)); + assertEquals(bsonInt32, bsonArray.get(1)); + assertEquals(bsonInt64, bsonArray.get(2)); + assertEquals(bsonDecimal128, bsonArray.get(3)); + assertEquals(bsonBoolean, bsonArray.get(4)); + assertEquals(bsonDateTime, bsonArray.get(5)); + assertEquals(bsonDouble, bsonArray.get(6)); + assertEquals(bsonString, bsonArray.get(7)); + assertEquals(minKey, bsonArray.get(8)); + assertEquals(maxKey, bsonArray.get(9)); + assertEquals(javaScript, bsonArray.get(10)); + assertEquals(objectId, bsonArray.get(11)); + assertEquals(scope, bsonArray.get(12)); + assertEquals(regularExpression, bsonArray.get(13)); + assertEquals(symbol, bsonArray.get(14)); + assertEquals(timestamp, bsonArray.get(15)); + assertEquals(undefined, bsonArray.get(16)); + assertEquals(binary, bsonArray.get(17)); + assertEquals(array, bsonArray.get(18)); + assertEquals(document, bsonArray.get(19)); + assertEquals(dbPointer, bsonArray.get(20)); + } + + static ByteBufBsonArray fromBsonValues(final List values) { + BsonDocument document = new BsonDocument() + .append("a", new BsonArray(values)); + BasicOutputBuffer buffer = new BasicOutputBuffer(); + new BsonDocumentCodec().encode(new BsonBinaryWriter(buffer), document, EncoderContext.builder().build()); + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + try { + buffer.pipe(baos); + } catch (IOException e) { + throw new RuntimeException("impossible!"); + } + ByteBuf documentByteBuf = new ByteBufNIO(ByteBuffer.wrap(baos.toByteArray())); + return (ByteBufBsonArray) new ByteBufBsonDocument(documentByteBuf).entrySet().iterator().next().getValue(); + } +} diff --git a/driver-core/src/test/unit/com/mongodb/internal/connection/ByteBufBsonDocumentSpecification.groovy b/driver-core/src/test/unit/com/mongodb/internal/connection/ByteBufBsonDocumentSpecification.groovy new file mode 100644 index 00000000000..8dc599706a9 --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/internal/connection/ByteBufBsonDocumentSpecification.groovy @@ -0,0 +1,313 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection + +import org.bson.BsonArray +import org.bson.BsonBinaryWriter +import org.bson.BsonBoolean +import org.bson.BsonDocument +import org.bson.BsonInt32 +import org.bson.BsonNull +import org.bson.BsonValue +import org.bson.ByteBuf +import org.bson.ByteBufNIO +import org.bson.codecs.BsonDocumentCodec +import org.bson.codecs.DecoderContext +import org.bson.codecs.EncoderContext +import org.bson.io.BasicOutputBuffer +import org.bson.json.JsonMode +import org.bson.json.JsonWriterSettings +import spock.lang.Specification + +import java.nio.ByteBuffer + +import static java.util.Arrays.asList + +class ByteBufBsonDocumentSpecification extends Specification { + def emptyDocumentByteBuf = new ByteBufNIO(ByteBuffer.wrap([5, 0, 0, 0, 0] as byte[])) + ByteBuf documentByteBuf + ByteBufBsonDocument emptyByteBufDocument = new ByteBufBsonDocument(emptyDocumentByteBuf) + def document = new BsonDocument() + .append('a', new BsonInt32(1)) + .append('b', new BsonInt32(2)) + .append('c', new BsonDocument('x', BsonBoolean.TRUE)) + .append('d', new BsonArray(asList(new BsonDocument('y', BsonBoolean.FALSE), new BsonInt32(1)))) + + ByteBufBsonDocument byteBufDocument + + def setup() { + def buffer = new BasicOutputBuffer() + new BsonDocumentCodec().encode(new BsonBinaryWriter(buffer), document, EncoderContext.builder().build()) + ByteArrayOutputStream baos = new ByteArrayOutputStream() + buffer.pipe(baos) + documentByteBuf = new ByteBufNIO(ByteBuffer.wrap(baos.toByteArray())) + byteBufDocument = new ByteBufBsonDocument(documentByteBuf) + } + + def 'get should get the value of the given key'() { + expect: + emptyByteBufDocument.get('a') == null + byteBufDocument.get('z') == null + byteBufDocument.get('a') == new BsonInt32(1) + byteBufDocument.get('b') == new BsonInt32(2) + } + + def 'get should throw if the key is null'() { + when: + byteBufDocument.get(null) + + then: + thrown(IllegalArgumentException) + documentByteBuf.referenceCount == 1 + } + + def 'containKey should throw if the key name is null'() { + when: + byteBufDocument.containsKey(null) + + then: + thrown(IllegalArgumentException) + documentByteBuf.referenceCount == 1 + } + + def 'containsKey should find an existing key'() { + expect: + byteBufDocument.containsKey('a') + byteBufDocument.containsKey('b') + byteBufDocument.containsKey('c') + byteBufDocument.containsKey('d') + documentByteBuf.referenceCount == 1 + } + + def 'containsKey should not find a non-existing key'() { + expect: + !byteBufDocument.containsKey('e') + !byteBufDocument.containsKey('x') + !byteBufDocument.containsKey('y') + documentByteBuf.referenceCount == 1 + } + + def 'containValue should find an existing value'() { + expect: + byteBufDocument.containsValue(document.get('a')) + byteBufDocument.containsValue(document.get('b')) + byteBufDocument.containsValue(document.get('c')) + byteBufDocument.containsValue(document.get('d')) + documentByteBuf.referenceCount == 1 + } + + def 'containValue should not find a non-existing value'() { + expect: + !byteBufDocument.containsValue(new BsonInt32(3)) + !byteBufDocument.containsValue(new BsonDocument('e', BsonBoolean.FALSE)) + !byteBufDocument.containsValue(new BsonArray(asList(new BsonInt32(2), new BsonInt32(4)))) + documentByteBuf.referenceCount == 1 + } + + def 'isEmpty should return false when the document is not empty'() { + expect: + !byteBufDocument.isEmpty() + documentByteBuf.referenceCount == 1 + } + + def 'isEmpty should return true when the document is empty'() { + expect: + emptyByteBufDocument.isEmpty() + emptyDocumentByteBuf.referenceCount == 1 + } + + def 'should get correct size'() { + expect: + emptyByteBufDocument.size() == 0 + byteBufDocument.size() == 4 + documentByteBuf.referenceCount == 1 + emptyDocumentByteBuf.referenceCount == 1 + } + + def 'should get correct key set'() { + expect: + emptyByteBufDocument.keySet().isEmpty() + byteBufDocument.keySet() == ['a', 'b', 'c', 'd'] as Set + documentByteBuf.referenceCount == 1 + emptyDocumentByteBuf.referenceCount == 1 + } + + def 'should get correct values set'() { + expect: + emptyByteBufDocument.values().isEmpty() + byteBufDocument.values() as Set == [document.get('a'), document.get('b'), document.get('c'), document.get('d')] as Set + documentByteBuf.referenceCount == 1 + emptyDocumentByteBuf.referenceCount == 1 + } + + def 'should get correct entry set'() { + expect: + emptyByteBufDocument.entrySet().isEmpty() + byteBufDocument.entrySet() == [new TestEntry('a', document.get('a')), + new TestEntry('b', document.get('b')), + new TestEntry('c', document.get('c')), + new TestEntry('d', document.get('d'))] as Set + documentByteBuf.referenceCount == 1 + emptyDocumentByteBuf.referenceCount == 1 + } + + def 'all write methods should throw UnsupportedOperationException'() { + when: + byteBufDocument.clear() + + then: + thrown(UnsupportedOperationException) + + when: + byteBufDocument.put('x', BsonNull.VALUE) + + then: + thrown(UnsupportedOperationException) + + when: + byteBufDocument.append('x', BsonNull.VALUE) + + then: + thrown(UnsupportedOperationException) + + when: + byteBufDocument.putAll(new BsonDocument('x', BsonNull.VALUE)) + + then: + thrown(UnsupportedOperationException) + + when: + byteBufDocument.remove(BsonNull.VALUE) + + then: + thrown(UnsupportedOperationException) + } + + def 'should get first key'() { + expect: + byteBufDocument.getFirstKey() == document.keySet().iterator().next() + documentByteBuf.referenceCount == 1 + } + + def 'getFirstKey should throw NoSuchElementException if the document is empty'() { + when: + emptyByteBufDocument.getFirstKey() + + then: + thrown(NoSuchElementException) + emptyDocumentByteBuf.referenceCount == 1 + } + + def 'should create BsonReader'() { + when: + def reader = document.asBsonReader() + + then: + new BsonDocumentCodec().decode(reader, DecoderContext.builder().build()) == document + + cleanup: + reader.close() + } + + def 'clone should make a deep copy'() { + when: + BsonDocument cloned = byteBufDocument.clone() + + then: + cloned == byteBufDocument + documentByteBuf.referenceCount == 1 + } + + def 'should serialize and deserialize'() { + given: + def baos = new ByteArrayOutputStream() + def oos = new ObjectOutputStream(baos) + + when: + oos.writeObject(byteBufDocument) + def bais = new ByteArrayInputStream(baos.toByteArray()) + def ois = new ObjectInputStream(bais) + def deserializedDocument = ois.readObject() + + then: + byteBufDocument == deserializedDocument + documentByteBuf.referenceCount == 1 + } + + def 'toJson should return equivalent'() { + expect: + document.toJson() == byteBufDocument.toJson() + documentByteBuf.referenceCount == 1 + } + + def 'toJson should be callable multiple times'() { + expect: + byteBufDocument.toJson() + byteBufDocument.toJson() + documentByteBuf.referenceCount == 1 + } + + def 'size should be callable multiple times'() { + expect: + byteBufDocument.size() + byteBufDocument.size() + documentByteBuf.referenceCount == 1 + } + + def 'toJson should respect JsonWriteSettings'() { + given: + def settings = JsonWriterSettings.builder().outputMode(JsonMode.SHELL).build() + + expect: + document.toJson(settings) == byteBufDocument.toJson(settings) + } + + def 'toJson should return equivalent when a ByteBufBsonDocument is nested in a BsonDocument'() { + given: + def topLevel = new BsonDocument('nested', byteBufDocument) + + expect: + new BsonDocument('nested', document).toJson() == topLevel.toJson() + } + + class TestEntry implements Map.Entry { + + private final String key + private BsonValue value + + TestEntry(String key, BsonValue value) { + this.key = key + this.value = value + } + + @Override + String getKey() { + key + } + + @Override + BsonValue getValue() { + value + } + + @Override + BsonValue setValue(final BsonValue value) { + this.value = value + } + } + +} diff --git a/driver-core/src/test/unit/com/mongodb/internal/connection/ByteBufSpecification.groovy b/driver-core/src/test/unit/com/mongodb/internal/connection/ByteBufSpecification.groovy new file mode 100644 index 00000000000..d052d6b23f1 --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/internal/connection/ByteBufSpecification.groovy @@ -0,0 +1,255 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection + + +import com.mongodb.internal.connection.netty.NettyByteBuf +import io.netty.buffer.ByteBufAllocator +import io.netty.buffer.PooledByteBufAllocator +import org.bson.ByteBuf +import spock.lang.Specification + +class ByteBufSpecification extends Specification { + def 'should put a byte'() { + given: + def buffer = provider.getBuffer(1024) + + when: + buffer.put((byte) 42) + buffer.flip() + + then: + buffer.get() == 42 + + cleanup: + buffer.release() + + where: + provider << [new NettyBufferProvider(), new SimpleBufferProvider()] + } + + def 'should put several bytes'() { + given: + def buffer = provider.getBuffer(1024) + + when: + buffer.with { + put((byte) 42) + put((byte) 43) + put((byte) 44) + flip() + } + + then: + buffer.get() == 42 + buffer.get() == 43 + buffer.get() == 44 + + cleanup: + buffer.release() + + where: + provider << [new NettyBufferProvider(), new SimpleBufferProvider()] + } + + def 'should put bytes at index'() { + given: + def buffer = provider.getBuffer(1024) + + when: + buffer.with { + put((byte) 0) + put((byte) 0) + put((byte) 0) + put((byte) 0) + put((byte) 43) + put((byte) 44) + put(0, (byte) 22) + put(1, (byte) 23) + put(2, (byte) 24) + put(3, (byte) 25) + flip() + } + + then: + buffer.get() == 22 + buffer.get() == 23 + buffer.get() == 24 + buffer.get() == 25 + buffer.get() == 43 + buffer.get() == 44 + + cleanup: + buffer.release() + + where: + provider << [new NettyBufferProvider(), new SimpleBufferProvider()] + } + + def 'when writing, remaining should be the number of bytes that can be written'() { + when: + def buffer = provider.getBuffer(1024) + + then: + buffer.remaining() == 1024 + + and: + buffer.put((byte) 1) + + then: + buffer.remaining() == 1023 + + cleanup: + buffer.release() + + where: + provider << [new NettyBufferProvider(), new SimpleBufferProvider()] + } + + def 'when writing, hasRemaining should be true if there is still room to write'() { + when: + def buffer = provider.getBuffer(2) + + then: + buffer.hasRemaining() + + and: + buffer.put((byte) 1) + + then: + buffer.hasRemaining() + + and: + buffer.put((byte) 1) + + then: + !buffer.hasRemaining() + + cleanup: + buffer.release() + + where: + provider << [new NettyBufferProvider(), new SimpleBufferProvider()] + } + + def 'should return NIO buffer with the same capacity and limit'() { + given: + def buffer = provider.getBuffer(36) + + when: + def nioBuffer = buffer.asNIO() + + then: + nioBuffer.limit() == 36 + nioBuffer.position() == 0 + nioBuffer.remaining() == 36 + + cleanup: + buffer.release() + + where: + provider << [new NettyBufferProvider(), new SimpleBufferProvider()] + } + + def 'should return NIO buffer with the same contents'() { + given: + def buffer = provider.getBuffer(1024) + + buffer.with { + put((byte) 42) + put((byte) 43) + put((byte) 44) + put((byte) 45) + put((byte) 46) + put((byte) 47) + + flip() + } + + when: + def nioBuffer = buffer.asNIO() + + then: + nioBuffer.limit() == 6 + nioBuffer.position() == 0 + nioBuffer.get() == 42 + nioBuffer.get() == 43 + nioBuffer.get() == 44 + nioBuffer.get() == 45 + nioBuffer.get() == 46 + nioBuffer.get() == 47 + nioBuffer.remaining() == 0 + + cleanup: + buffer.release() + + where: + provider << [new NettyBufferProvider(), new SimpleBufferProvider()] + } + + def 'should enforce reference counts'() { + when: + def buffer = provider.getBuffer(1024) + buffer.put((byte) 1) + + then: + buffer.referenceCount == 1 + + when: + buffer.retain() + buffer.put((byte) 1) + + then: + buffer.referenceCount == 2 + + when: + buffer.release() + buffer.put((byte) 1) + + then: + buffer.referenceCount == 1 + + when: + buffer.release() + + then: + buffer.referenceCount == 0 + + when: + buffer.put((byte) 1) + + then: + thrown(Exception) + + where: + provider << [new NettyBufferProvider(), new SimpleBufferProvider()] + } + + static final class NettyBufferProvider implements BufferProvider { + private final ByteBufAllocator allocator + + NettyBufferProvider() { + allocator = PooledByteBufAllocator.DEFAULT + } + + @Override + ByteBuf getBuffer(final int size) { + io.netty.buffer.ByteBuf buffer = allocator.directBuffer(size, size) + new NettyByteBuf(buffer) + } + } +} diff --git a/driver-core/src/test/unit/com/mongodb/internal/connection/ByteBufTest.java b/driver-core/src/test/unit/com/mongodb/internal/connection/ByteBufTest.java new file mode 100644 index 00000000000..722d7d62fa4 --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/internal/connection/ByteBufTest.java @@ -0,0 +1,101 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection; + + +import org.bson.ByteBuf; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.MethodSource; + +import java.util.stream.Stream; + +import static org.junit.jupiter.api.Assertions.assertEquals; + + +class ByteBufTest { + + static Stream bufferProviders() { + return Stream.of(new ByteBufSpecification.NettyBufferProvider(), new SimpleBufferProvider()); + } + + @ParameterizedTest + @MethodSource("bufferProviders") + void shouldPutInt(final BufferProvider provider) { + ByteBuf buffer = provider.getBuffer(1024); + try { + buffer.putInt(42); + buffer.flip(); + assertEquals(42, buffer.getInt()); + } finally { + buffer.release(); + } + } + + @ParameterizedTest + @MethodSource("bufferProviders") + void shouldPutLong(final BufferProvider provider) { + ByteBuf buffer = provider.getBuffer(1024); + try { + buffer.putLong(42L); + buffer.flip(); + assertEquals(42L, buffer.getLong()); + } finally { + buffer.release(); + } + } + + @ParameterizedTest + @MethodSource("bufferProviders") + void shouldPutDouble(final BufferProvider provider) { + ByteBuf buffer = provider.getBuffer(1024); + try { + buffer.putDouble(42.0D); + buffer.flip(); + assertEquals(42.0D, buffer.getDouble()); + } finally { + buffer.release(); + } + } + + @ParameterizedTest + @MethodSource("bufferProviders") + void shouldPutIntAtIndex(final BufferProvider provider) { + ByteBuf buffer = provider.getBuffer(1024); + try { + buffer.putInt(0); + buffer.putInt(0); + buffer.putInt(0); + buffer.putInt(0); + buffer.put((byte) 43); + buffer.put((byte) 44); + buffer.putInt(0, 22); + buffer.putInt(4, 23); + buffer.putInt(8, 24); + buffer.putInt(12, 25); + buffer.flip(); + + assertEquals(22, buffer.getInt()); + assertEquals(23, buffer.getInt()); + assertEquals(24, buffer.getInt()); + assertEquals(25, buffer.getInt()); + assertEquals(43, buffer.get()); + assertEquals(44, buffer.get()); + } finally { + buffer.release(); + } + } +} diff --git a/driver-core/src/test/unit/com/mongodb/internal/connection/ByteBufferBsonInputTest.java b/driver-core/src/test/unit/com/mongodb/internal/connection/ByteBufferBsonInputTest.java new file mode 100644 index 00000000000..b988f1cde1a --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/internal/connection/ByteBufferBsonInputTest.java @@ -0,0 +1,750 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection; + +import com.google.common.primitives.Ints; +import com.mongodb.internal.connection.netty.NettyByteBuf; +import io.netty.buffer.PooledByteBufAllocator; +import org.bson.BsonSerializationException; +import org.bson.ByteBuf; +import org.bson.ByteBufNIO; +import org.bson.io.ByteBufferBsonInput; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.Arguments; +import org.junit.jupiter.params.provider.MethodSource; + +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.charset.StandardCharsets; +import java.util.ArrayList; +import java.util.List; +import java.util.stream.Stream; + +import static java.lang.Character.MAX_CODE_POINT; +import static java.lang.Character.MAX_LOW_SURROGATE; +import static java.lang.Character.MIN_HIGH_SURROGATE; +import static java.lang.Integer.reverseBytes; +import static java.lang.String.join; +import static java.util.Collections.nCopies; +import static java.util.stream.Collectors.toList; +import static java.util.stream.IntStream.range; +import static java.util.stream.IntStream.rangeClosed; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; + + +class ByteBufferBsonInputTest { + + private static final List ALL_CODE_POINTS_EXCLUDING_SURROGATES = Stream.concat( + range(1, MIN_HIGH_SURROGATE).boxed(), + rangeClosed(MAX_LOW_SURROGATE + 1, MAX_CODE_POINT).boxed()) + .filter(i -> i < 128 || i % 30 == 0) // only subset of code points to speed up testing + .collect(toList()); + + static Stream bufferProviders() { + return Stream.of( + createBufferProvider( + "NettyByteBuf based on PooledByteBufAllocator.DEFAULT.directBuffer", + size -> new NettyByteBuf(PooledByteBufAllocator.DEFAULT.directBuffer(size)) + ), + createBufferProvider( + "NettyByteBuf based on PooledByteBufAllocator.DEFAULT.heapBuffer", + size -> new NettyByteBuf(PooledByteBufAllocator.DEFAULT.heapBuffer(size)) + ), + createBufferProvider( + "PowerOfTwoBufferPool", + new PowerOfTwoBufferPool() + ), + createBufferProvider( + "ByteBufNIO based on ByteBuffer with arrayOffset() -> 2", + size -> new ByteBufNIO(ByteBuffer.wrap(new byte[size + 5], 2, size).slice()) + ), + createBufferProvider( + "ByteBufNIO based on ByteBuffer with arrayOffset() -> 3,", + size -> new ByteBufNIO(ByteBuffer.wrap(new byte[size + 4], 3, size).slice()) + ), + createBufferProvider( + "ByteBufNIO emulating direct ByteBuffer", + size -> new ByteBufNIO(ByteBuffer.allocate(size)) { + @Override + public boolean isBackedByArray() { + return false; + } + + @Override + public byte[] array() { + return Assertions.fail("array() is called, when isBackedByArray() returns false"); + } + + @Override + public int arrayOffset() { + return Assertions.fail("arrayOffset() is called, when isBackedByArray() returns false"); + } + } + ) + ); + } + + private static BufferProvider createBufferProvider(final String bufferDescription, final BufferProvider bufferProvider) { + return new BufferProvider() { + @Override + public ByteBuf getBuffer(final int size) { + return bufferProvider.getBuffer(size); + } + + @Override + public String toString() { + return bufferDescription; + } + }; + } + + @ParameterizedTest(name = "should read empty string. BufferProvider={0}") + @MethodSource("bufferProviders") + void shouldReadEmptyString(final BufferProvider bufferProvider) { + // given + byte[] input = {1, 0, 0, 0, 0}; + ByteBuf buffer = allocateAndWriteToBuffer(bufferProvider, input); + + try (ByteBufferBsonInput bufferInput = new ByteBufferBsonInput(buffer)) { + // when + String result = bufferInput.readString(); + + // then + assertEquals("", result); + assertEquals(5, bufferInput.getPosition()); + } + } + + @ParameterizedTest(name = "should read empty CString. BufferProvider={0}") + @MethodSource("bufferProviders") + void shouldReadEmptyCString(final BufferProvider bufferProvider) { + // given + ByteBuf buffer = allocateAndWriteToBuffer(bufferProvider, new byte[]{0}); + try (ByteBufferBsonInput bufferInput = new ByteBufferBsonInput(buffer)) { + // when + String result = bufferInput.readCString(); + + // then + assertEquals("", result); + assertEquals(1, bufferInput.getPosition()); + } + } + + @ParameterizedTest(name = "should read invalid one byte string. BufferProvider={0}") + @MethodSource("bufferProviders") + void shouldReadInvalidOneByteString(final BufferProvider bufferProvider) { + ByteBuf buffer = allocateAndWriteToBuffer(bufferProvider, new byte[]{2, 0, 0, 0, (byte) 0xFF, 0}); + try (ByteBufferBsonInput bufferInput = new ByteBufferBsonInput(buffer)) { + + // when + String result = bufferInput.readString(); + + // then + assertEquals("\uFFFD", result); + assertEquals(6, bufferInput.getPosition()); + } + } + + @ParameterizedTest(name = "should read invalid one byte CString. BufferProvider={0}") + @MethodSource("bufferProviders") + void shouldReadInvalidOneByteCString(final BufferProvider bufferProvider) { + ByteBuf buffer = allocateAndWriteToBuffer(bufferProvider, new byte[]{-0x01, 0}); + try (ByteBufferBsonInput bufferInput = new ByteBufferBsonInput(buffer)) { + + // when + String result = bufferInput.readCString(); + + // then + assertEquals("\uFFFD", result); + assertEquals(2, bufferInput.getPosition()); + } + } + + + @ParameterizedTest(name = "should read string up to buffer limit. BufferProvider={0}") + @MethodSource("bufferProviders") + void shouldReadStringUptoBufferLimit(final BufferProvider bufferProvider) { + // given + for (Integer codePoint : ALL_CODE_POINTS_EXCLUDING_SURROGATES) { + for (int offset = 0; offset < 18; offset++) { + String expectedString = join("", nCopies(offset, "b")) + + String.valueOf(Character.toChars(codePoint)); + byte[] expectedStringEncoding = getExpectedEncodedString(expectedString); + + ByteBuf buffer = allocateAndWriteToBuffer(bufferProvider, expectedStringEncoding); + try (ByteBufferBsonInput bufferInput = new ByteBufferBsonInput(buffer)) { + + // when + String actualString = bufferInput.readString(); + + // then + assertEquals(expectedString, actualString); + assertEquals(expectedStringEncoding.length, bufferInput.getPosition()); + } + } + } + } + + @ParameterizedTest(name = "should read string with more data in buffer. BufferProvider={0}") + @MethodSource("bufferProviders") + void shouldReadStringWithMoreDataInBuffer(final BufferProvider bufferProvider) throws IOException { + // given + for (Integer codePoint : ALL_CODE_POINTS_EXCLUDING_SURROGATES) { + for (int offset = 0; offset < 18; offset++) { + String expectedString = join("", nCopies(offset, "b")) + + String.valueOf(Character.toChars(codePoint)); + byte[] expectedStringEncoding = getExpectedEncodedString(expectedString); + byte[] bufferBytes = mergeArrays( + expectedStringEncoding, + new byte[]{1, 2, 3} + ); + + ByteBuf buffer = allocateAndWriteToBuffer(bufferProvider, bufferBytes); + + try (ByteBufferBsonInput bufferInput = new ByteBufferBsonInput(buffer)) { + + // when + String actualString = bufferInput.readString(); + + // then + assertEquals(expectedString, actualString); + assertEquals(expectedStringEncoding.length, bufferInput.getPosition()); + } + } + } + } + + @ParameterizedTest(name = "should read multiple strings within buffer. BufferProvider={0}") + @MethodSource("bufferProviders") + void shouldReadMultipleStringsWithinBuffer(final BufferProvider bufferProvider) throws IOException { + // given + for (Integer codePoint : ALL_CODE_POINTS_EXCLUDING_SURROGATES) { + for (int offset = 0; offset < 18; offset++) { + String expectedString1 = join("", nCopies(offset, "b")) + + String.valueOf(Character.toChars(codePoint)); + String expectedString2 = join("", nCopies(offset, "a")) + + String.valueOf(Character.toChars(codePoint)); + + byte[] expectedStringEncoding1 = getExpectedEncodedString(expectedString1); + byte[] expectedStringEncoding2 = getExpectedEncodedString(expectedString2); + int expectedInteger = 12412; + byte[] bufferBytes = mergeArrays( + new byte[]{1, 2, 3}, + expectedStringEncoding1, + Ints.toByteArray(reverseBytes(expectedInteger)), + expectedStringEncoding2, + new byte[]{1, 2, 3, 4} + ); + ByteBuf buffer = allocateAndWriteToBuffer(bufferProvider, bufferBytes); + buffer.position(3); + + try (ByteBufferBsonInput bufferInput = new ByteBufferBsonInput(buffer)) { + // when + String actualString1 = bufferInput.readString(); + + // then + assertEquals( + expectedString1, + actualString1); + assertEquals( + 3 + expectedStringEncoding1.length, + bufferInput.getPosition()); + + // when + assertEquals(expectedInteger, bufferInput.readInt32()); + + // then + String actualString2 = bufferInput.readString(); + assertEquals( + expectedString2, + actualString2); + assertEquals( + 3 + expectedStringEncoding1.length + expectedStringEncoding2.length + Integer.BYTES, + bufferInput.getPosition()); + } + } + } + } + + @ParameterizedTest(name = "should read consecutive multiple strings within buffer. BufferProvider={0}") + @MethodSource("bufferProviders") + void shouldReadConsecutiveMultipleStringsWithinBuffer(final BufferProvider bufferProvider) throws IOException { + // given + for (Integer codePoint : ALL_CODE_POINTS_EXCLUDING_SURROGATES) { + for (int offset = 0; offset < 18; offset++) { + String expectedString1 = join("", nCopies(offset, "b")) + + String.valueOf(Character.toChars(codePoint)); + String expectedString2 = join("", nCopies(offset, "a")) + + String.valueOf(Character.toChars(codePoint)); + + byte[] expectedStringEncoding1 = getExpectedEncodedString(expectedString1); + byte[] expectedStringEncoding2 = getExpectedEncodedString(expectedString2); + byte[] bufferBytes = mergeArrays( + new byte[]{1, 2, 3}, + expectedStringEncoding1, + expectedStringEncoding2, + new byte[]{1, 2, 3, 4} + ); + ByteBuf buffer = allocateAndWriteToBuffer(bufferProvider, bufferBytes); + buffer.position(3); + + try (ByteBufferBsonInput bufferInput = new ByteBufferBsonInput(buffer)) { + // when + String actualString1 = bufferInput.readString(); + + // then + assertEquals( + expectedString1, + actualString1); + assertEquals( + 3 + expectedStringEncoding1.length, + bufferInput.getPosition()); + + // when + String actualString2 = bufferInput.readString(); + + // then + assertEquals( + expectedString2, + actualString2); + assertEquals( + 3 + expectedStringEncoding1.length + expectedStringEncoding2.length, + bufferInput.getPosition()); + } + } + + } + } + + @ParameterizedTest(name = "should read consecutive multiple CStrings within buffer. BufferProvider={0}") + @MethodSource("bufferProviders") + void shouldReadConsecutiveMultipleCStringsWithinBuffer(final BufferProvider bufferProvider) throws IOException { + // given + for (Integer codePoint : ALL_CODE_POINTS_EXCLUDING_SURROGATES) { + for (int offset = 0; offset < 18; offset++) { + String expectedString1 = join("", nCopies(offset, "b")) + + String.valueOf(Character.toChars(codePoint)); + String expectedString2 = join("", nCopies(offset, "a")) + + String.valueOf(Character.toChars(codePoint)); + + byte[] expectedStringEncoding1 = getExpectedEncodedCString(expectedString1); + byte[] expectedStringEncoding2 = getExpectedEncodedCString(expectedString2); + byte[] bufferBytes = mergeArrays( + new byte[]{1, 2, 3}, + expectedStringEncoding1, + expectedStringEncoding2, + new byte[]{1, 2, 3, 4} + ); + + ByteBuf buffer = allocateAndWriteToBuffer(bufferProvider, bufferBytes); + buffer.position(3); + + try (ByteBufferBsonInput bufferInput = new ByteBufferBsonInput(buffer)) { + // when + String actualString1 = bufferInput.readCString(); + + // then + assertEquals( + expectedString1, + actualString1); + assertEquals( + 3 + expectedStringEncoding1.length, + bufferInput.getPosition()); + + // when + String actualString2 = bufferInput.readCString(); + + // then + assertEquals( + expectedString2, + actualString2); + assertEquals( + 3 + expectedStringEncoding1.length + expectedStringEncoding2.length, + bufferInput.getPosition()); + } + } + } + } + + @ParameterizedTest(name = "should read multiple CStrings within buffer. BufferProvider={0}") + @MethodSource("bufferProviders") + void shouldReadMultipleCStringsWithinBuffer(final BufferProvider bufferProvider) throws IOException { + // given + for (Integer codePoint : ALL_CODE_POINTS_EXCLUDING_SURROGATES) { + for (int offset = 0; offset < 18; offset++) { + String expectedString1 = join("", nCopies(offset, "b")) + + String.valueOf(Character.toChars(codePoint)); + String expectedString2 = join("", nCopies(offset, "a")) + + String.valueOf(Character.toChars(codePoint)); + + byte[] expectedStringEncoding1 = getExpectedEncodedCString(expectedString1); + byte[] expectedStringEncoding2 = getExpectedEncodedCString(expectedString2); + int expectedInteger = 12412; + byte[] bufferBytes = mergeArrays( + new byte[]{1, 2, 3}, + expectedStringEncoding1, + Ints.toByteArray(reverseBytes(expectedInteger)), + expectedStringEncoding2, + new byte[]{1, 2, 3, 4} + ); + ByteBuf buffer = allocateAndWriteToBuffer(bufferProvider, bufferBytes); + buffer.position(3); + + try (ByteBufferBsonInput bufferInput = new ByteBufferBsonInput(buffer)) { + // when + String actualString1 = bufferInput.readCString(); + + // then + assertEquals( + expectedString1, + actualString1); + assertEquals( + 3 + expectedStringEncoding1.length, + bufferInput.getPosition()); + + // when + int actualInteger = bufferInput.readInt32(); + + // then + assertEquals(expectedInteger, actualInteger); + + // when + String actualString2 = bufferInput.readCString(); + + // then + assertEquals( + expectedString2, + actualString2); + assertEquals( + 3 + expectedStringEncoding1.length + expectedStringEncoding2.length + Integer.BYTES, + bufferInput.getPosition()); + } + } + } + } + + @ParameterizedTest(name = "should read string within buffer. BufferProvider={0}") + @MethodSource("bufferProviders") + void shouldReadStringWithinBuffer(final BufferProvider bufferProvider) throws IOException { + // given + for (Integer codePoint : ALL_CODE_POINTS_EXCLUDING_SURROGATES) { + for (int offset = 0; offset < 18; offset++) { + String expectedString = join("", nCopies(offset, "b")) + + String.valueOf(Character.toChars(codePoint)); + + byte[] expectedStringEncoding = getExpectedEncodedString(expectedString); + byte[] bufferBytes = mergeArrays( + new byte[]{1, 2, 3}, + expectedStringEncoding, + new byte[]{4, 5, 6} + ); + + ByteBuf buffer = allocateAndWriteToBuffer(bufferProvider, bufferBytes); + buffer.position(3); + + try (ByteBufferBsonInput bufferInput = new ByteBufferBsonInput(buffer)) { + + // when + String actualString = bufferInput.readString(); + + // then + assertEquals(expectedString, actualString); + assertEquals(3 + expectedStringEncoding.length, bufferInput.getPosition()); + } + } + } + } + + @ParameterizedTest(name = "should read CString up to buffer limit. BufferProvider={0}") + @MethodSource("bufferProviders") + void shouldReadCStringUptoBufferLimit(final BufferProvider bufferProvider) { + // given + for (Integer codePoint : ALL_CODE_POINTS_EXCLUDING_SURROGATES) { + for (int offset = 0; offset < 18; offset++) { + String expectedString = join("", nCopies(offset, "b")) + + String.valueOf(Character.toChars(codePoint)); + byte[] expectedStringEncoding = getExpectedEncodedCString(expectedString); + ByteBuf buffer = allocateAndWriteToBuffer(bufferProvider, expectedStringEncoding); + + try (ByteBufferBsonInput bufferInput = new ByteBufferBsonInput(buffer)) { + + // when + String actualString = bufferInput.readCString(); + + // then + assertEquals(expectedString, actualString); + assertEquals(expectedStringEncoding.length, bufferInput.getPosition()); + } + } + } + } + + @ParameterizedTest(name = "should read CString with more data in buffer. BufferProvider={0}") + @MethodSource("bufferProviders") + void shouldReadCStringWithMoreDataInBuffer(final BufferProvider bufferProvider) throws IOException { + // given + for (Integer codePoint : ALL_CODE_POINTS_EXCLUDING_SURROGATES) { + for (int offset = 0; offset < 18; offset++) { + String expectedString = join("", nCopies(offset, "b")) + + String.valueOf(Character.toChars(codePoint)); + byte[] expectedStringEncoding = getExpectedEncodedCString(expectedString); + byte[] bufferBytes = mergeArrays( + expectedStringEncoding, + new byte[]{1, 2, 3} + ); + + ByteBuf buffer = allocateAndWriteToBuffer(bufferProvider, bufferBytes); + + try (ByteBufferBsonInput bufferInput = new ByteBufferBsonInput(buffer)) { + + // when + String actualString = bufferInput.readCString(); + + // then + assertEquals(expectedString, actualString); + assertEquals(expectedStringEncoding.length, bufferInput.getPosition()); + } + } + } + } + + @ParameterizedTest(name = "should read CString within buffer. BufferProvider={0}") + @MethodSource("bufferProviders") + void shouldReadCStringWithingBuffer(final BufferProvider bufferProvider) throws IOException { + // given + for (Integer codePoint : ALL_CODE_POINTS_EXCLUDING_SURROGATES) { + for (int offset = 0; offset < 18; offset++) { + //given + String expectedString = join("", nCopies(offset, "b")) + + String.valueOf(Character.toChars(codePoint)); + + byte[] expectedStringEncoding = getExpectedEncodedCString(expectedString); + byte[] bufferBytes = mergeArrays( + new byte[]{1, 2, 3}, + expectedStringEncoding, + new byte[]{4, 5, 6} + ); + + ByteBuf buffer = allocateAndWriteToBuffer(bufferProvider, bufferBytes); + buffer.position(3); + + try (ByteBufferBsonInput bufferInput = new ByteBufferBsonInput(buffer)) { + // when + String actualString = bufferInput.readCString(); + + // then + assertEquals(expectedString, actualString); + assertEquals(3 + expectedStringEncoding.length, bufferInput.getPosition()); + } + } + } + } + + @ParameterizedTest(name = "should throw if CString is not null terminated skip. BufferProvider={0}") + @MethodSource("bufferProviders") + void shouldThrowIfCStringIsNotNullTerminatedSkip(final BufferProvider bufferProvider) { + // given + ByteBuf buffer = allocateAndWriteToBuffer(bufferProvider, new byte[]{(byte) 0xe0, (byte) 0xa4, (byte) 0x80}); + try (ByteBufferBsonInput expectedString = new ByteBufferBsonInput(buffer)) { + + // when & then + assertThrows(BsonSerializationException.class, expectedString::skipCString); + } + } + + + public static Stream nonNullTerminatedStringsWithBuffers() { + List arguments = new ArrayList<>(); + List collect = bufferProviders().collect(toList()); + for (BufferProvider bufferProvider : collect) { + arguments.add(Arguments.of(new byte[]{1, 0, 0, 0, 1}, bufferProvider)); + arguments.add(Arguments.of(new byte[]{2, 0, 0, 0, 1, 3}, bufferProvider)); + arguments.add(Arguments.of(new byte[]{3, 0, 0, 1, 2, 3}, bufferProvider)); + arguments.add(Arguments.of(new byte[]{4, 0, 0, 0, 1, 2, 3, 4}, bufferProvider)); + arguments.add(Arguments.of(new byte[]{8, 0, 0, 0, 2, 3, 4, 5, 6, 7, 8, 9}, bufferProvider)); + arguments.add(Arguments.of(new byte[]{9, 0, 0, 0, 2, 3, 4, 5, 6, 7, 8, 9, 1}, bufferProvider)); + } + return arguments.stream(); + } + + @ParameterizedTest(name = "should throw if string is not null terminated. Parameters: nonNullTerminatedString={0}, bufferProvider={1}") + @MethodSource("nonNullTerminatedStringsWithBuffers") + void shouldThrowIfStringIsNotNullTerminated(final byte[] nonNullTerminatedString, final BufferProvider bufferProvider) { + // given + ByteBuf buffer = allocateAndWriteToBuffer(bufferProvider, nonNullTerminatedString); + try (ByteBufferBsonInput expectedString = new ByteBufferBsonInput(buffer)) { + + // when & then + assertThrows(BsonSerializationException.class, expectedString::readString); + } + } + + public static Stream nonNullTerminatedCStringsWithBuffers() { + List arguments = new ArrayList<>(); + List collect = bufferProviders().collect(toList()); + for (BufferProvider bufferProvider : collect) { + arguments.add(Arguments.of(new byte[]{1}, bufferProvider)); + arguments.add(Arguments.of(new byte[]{1, 2}, bufferProvider)); + arguments.add(Arguments.of(new byte[]{1, 2, 3}, bufferProvider)); + arguments.add(Arguments.of(new byte[]{1, 2, 3, 4}, bufferProvider)); + arguments.add(Arguments.of(new byte[]{2, 3, 4, 5, 6, 7, 8, 9}, bufferProvider)); + arguments.add(Arguments.of(new byte[]{2, 3, 4, 5, 6, 7, 8, 9, 1}, bufferProvider)); + } + return arguments.stream(); + } + + @ParameterizedTest(name = "should throw if CString is not null terminated. Parameters: nonNullTerminatedCString={0}, bufferProvider={1}") + @MethodSource("nonNullTerminatedCStringsWithBuffers") + void shouldThrowIfCStringIsNotNullTerminated(final byte[] nonNullTerminatedCString, final BufferProvider bufferProvider) { + // given + ByteBuf buffer = allocateAndWriteToBuffer(bufferProvider, nonNullTerminatedCString); + try (ByteBufferBsonInput bufferInput = new ByteBufferBsonInput(buffer)) { + + // when & then + assertThrows(BsonSerializationException.class, bufferInput::readCString); + } + } + + + @ParameterizedTest(name = "should throw if one byte string is not null terminated. BufferProvider={0}") + @MethodSource("bufferProviders") + void shouldThrowIfOneByteStringIsNotNullTerminated(final BufferProvider bufferProvider) { + // given + ByteBuf buffer = allocateAndWriteToBuffer(bufferProvider, new byte[]{2, 0, 0, 0, 1}); + try (ByteBufferBsonInput bufferInput = new ByteBufferBsonInput(buffer)) { + + // when & then + assertThrows(BsonSerializationException.class, bufferInput::readString); + } + } + + @ParameterizedTest(name = "should throw if one byte CString is not null terminated. BufferProvider={0}") + @MethodSource("bufferProviders") + void shouldThrowIfOneByteCStringIsNotNullTerminated(final BufferProvider bufferProvider) { + // given + ByteBuf buffer = allocateAndWriteToBuffer(bufferProvider, new byte[]{1}); + try (ByteBufferBsonInput bufferInput = new ByteBufferBsonInput(buffer)) { + + // when & then + assertThrows(BsonSerializationException.class, bufferInput::readCString); + } + } + + @ParameterizedTest(name = "should throw if length of bson string is not positive. BufferProvider={0}") + @MethodSource("bufferProviders") + void shouldThrowIfLengthOfBsonStringIsNotPositive(final BufferProvider bufferProvider) { + // given + ByteBuf buffer = allocateAndWriteToBuffer(bufferProvider, new byte[]{-1, -1, -1, -1, 41, 42, 43, 0}); + try (ByteBufferBsonInput bufferInput = new ByteBufferBsonInput(buffer)) { + + // when & then + assertThrows(BsonSerializationException.class, bufferInput::readString); + } + } + + public static Stream shouldSkipCStringWhenMultipleNullTerminatorsPresent() { + List arguments = new ArrayList<>(); + List collect = bufferProviders().collect(toList()); + for (BufferProvider bufferProvider : collect) { + arguments.add(Arguments.of(new byte[]{0, 8, 0, 0, 0}, bufferProvider)); + arguments.add(Arguments.of(new byte[]{0x4a, 0, 8, 0, 0, 0}, bufferProvider)); + arguments.add(Arguments.of(new byte[]{0x4a, 0x4b, 0, 8, 0, 0, 0}, bufferProvider)); + arguments.add(Arguments.of(new byte[]{0x4a, 0x4b, 0x4c, 0, 8, 0, 0, 0}, bufferProvider)); + arguments.add(Arguments.of(new byte[]{0x4a, 0x61, 0x76, 0x61, 0, 8, 0, 0, 0}, bufferProvider)); + arguments.add(Arguments.of(new byte[]{0x4a, 0x61, 0x76, 0x61, 0x62, 0, 8, 0, 0, 0}, bufferProvider)); + arguments.add(Arguments.of(new byte[]{0x4a, 0x61, 0x76, 0x61, 0x65, 0x62, 0x67, 0, 8, 0, 0, 0}, bufferProvider)); + arguments.add(Arguments.of(new byte[]{0x4a, 0, 8, 0, 0, 0}, bufferProvider)); + } + return arguments.stream(); + } + + @ParameterizedTest(name = "should skip CString when multiple null terminatiors present. Parameters: cStringBytes={0}, bufferProvider={1}") + @MethodSource + void shouldSkipCStringWhenMultipleNullTerminatorsPresent(final byte[] cStringBytes, final BufferProvider bufferProvider) { + // given + ByteBuf buffer = allocateAndWriteToBuffer(bufferProvider, cStringBytes); + try (ByteBufferBsonInput bufferInput = new ByteBufferBsonInput(buffer)) { + + // when + bufferInput.skipCString(); + + //then + assertEquals(cStringBytes.length - Integer.BYTES, bufferInput.getPosition()); + assertEquals(8, bufferInput.readInt32()); + } + } + + @ParameterizedTest(name = "should read skip CString when multiple null terminators present within buffer. BufferProvider={0}") + @MethodSource("bufferProviders") + void shouldReadSkipCStringWhenMultipleNullTerminatorPresentWithinBuffer(final BufferProvider bufferProvider) { + // given + byte[] input = {4, 0, 0, 0, 0x4a, 0x61, 0x76, 0x61, 0, 8, 0, 0, 0}; + ByteBuf buffer = allocateAndWriteToBuffer(bufferProvider, input); + buffer.position(4); + try (ByteBufferBsonInput bufferInput = new ByteBufferBsonInput(buffer)) { + + // when + bufferInput.skipCString(); + + // then + assertEquals(9, bufferInput.getPosition()); + assertEquals(8, bufferInput.readInt32()); + } + } + + + private static ByteBuf allocateAndWriteToBuffer(final BufferProvider bufferProvider, final byte[] input) { + ByteBuf buffer = bufferProvider.getBuffer(input.length); + buffer.put(input, 0, input.length); + buffer.flip(); + return buffer; + } + + + public static byte[] mergeArrays(final byte[]... arrays) throws IOException { + int size = 0; + for (byte[] array : arrays) { + size += array.length; + } + ByteArrayOutputStream baos = new ByteArrayOutputStream(size); + for (byte[] array : arrays) { + baos.write(array); + } + return baos.toByteArray(); + } + + private static byte[] getExpectedEncodedString(final String expectedString) { + byte[] expectedEncoding = expectedString.getBytes(StandardCharsets.UTF_8); + int littleEndianLength = reverseBytes(expectedEncoding.length + "\u0000".length()); + byte[] length = Ints.toByteArray(littleEndianLength); + + byte[] combined = new byte[expectedEncoding.length + length.length + 1]; + System.arraycopy(length, 0, combined, 0, length.length); + System.arraycopy(expectedEncoding, 0, combined, length.length, expectedEncoding.length); + return combined; + } + + private static byte[] getExpectedEncodedCString(final String expectedString) { + byte[] encoding = expectedString.getBytes(StandardCharsets.UTF_8); + byte[] combined = new byte[encoding.length + 1]; + System.arraycopy(encoding, 0, combined, 0, encoding.length); + return combined; + } +} diff --git a/driver-core/src/test/unit/com/mongodb/internal/connection/ByteBufferBsonOutputTest.java b/driver-core/src/test/unit/com/mongodb/internal/connection/ByteBufferBsonOutputTest.java new file mode 100644 index 00000000000..8988ea3d6d9 --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/internal/connection/ByteBufferBsonOutputTest.java @@ -0,0 +1,1660 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection; + +import com.google.common.primitives.Ints; +import com.mongodb.internal.connection.netty.NettyByteBuf; +import io.netty.buffer.PooledByteBufAllocator; +import org.bson.BsonSerializationException; +import org.bson.ByteBuf; +import org.bson.ByteBufNIO; +import org.bson.io.OutputBuffer; +import org.bson.types.ObjectId; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.DisplayName; +import org.junit.jupiter.api.Nested; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.Arguments; +import org.junit.jupiter.params.provider.MethodSource; +import org.junit.jupiter.params.provider.ValueSource; + +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.charset.CharacterCodingException; +import java.nio.charset.StandardCharsets; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.concurrent.ThreadLocalRandom; +import java.util.function.BiConsumer; +import java.util.function.Consumer; +import java.util.stream.Stream; + +import static com.mongodb.internal.connection.ByteBufferBsonOutput.INITIAL_BUFFER_SIZE; +import static com.mongodb.internal.connection.ByteBufferBsonOutput.MAX_BUFFER_SIZE; +import static java.lang.Character.MAX_CODE_POINT; +import static java.lang.Character.MAX_HIGH_SURROGATE; +import static java.lang.Character.MAX_LOW_SURROGATE; +import static java.lang.Character.MIN_HIGH_SURROGATE; +import static java.lang.Character.MIN_LOW_SURROGATE; +import static java.lang.Integer.reverseBytes; +import static java.lang.String.format; +import static java.util.Arrays.asList; +import static java.util.Arrays.copyOfRange; +import static java.util.Collections.emptyList; +import static java.util.stream.Collectors.toList; +import static java.util.stream.IntStream.range; +import static java.util.stream.IntStream.rangeClosed; +import static org.junit.jupiter.api.Assertions.assertArrayEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; + +final class ByteBufferBsonOutputTest { + + private static final List ALL_CODE_POINTS_EXCLUDING_SURROGATES = Stream.concat( + range(1, MIN_HIGH_SURROGATE).boxed(), + rangeClosed(MAX_LOW_SURROGATE + 1, MAX_CODE_POINT).boxed()) + .filter(codePoint -> codePoint < 128 || codePoint % 30 == 0) // only subset of code points to speed up testing + .collect(toList()); + + private static final List ALL_SURROGATE_CODE_POINTS = Stream.concat( + range(MIN_LOW_SURROGATE, MAX_LOW_SURROGATE).boxed(), + range(MIN_HIGH_SURROGATE, MAX_HIGH_SURROGATE).boxed()) + .filter(codePoint -> codePoint < 128 || codePoint % 30 == 0) // only subset of code points to speed up testing + .collect(toList()); + + public static final List ALL_UTF_16_CODE_POINTS_FORMED_BY_SURROGATE_PAIRS = rangeClosed(0x10000, MAX_CODE_POINT) + .boxed() + .filter(codePoint -> codePoint < 128 || codePoint % 30 == 0) // only subset of code points to speed up testing + .collect(toList()); + + static Stream bufferProviders() { + return Stream.of( + createBufferProvider( + "NettyByteBuf based on PooledByteBufAllocator.DEFAULT.directBuffer", + size -> new NettyByteBuf(PooledByteBufAllocator.DEFAULT.directBuffer(size)) + ), + createBufferProvider( + "NettyByteBuf based on PooledByteBufAllocator.DEFAULT.heapBuffer", + size -> new NettyByteBuf(PooledByteBufAllocator.DEFAULT.heapBuffer(size)) + ), + createBufferProvider( + "PowerOfTwoBufferPool", + new PowerOfTwoBufferPool() + ), + createBufferProvider( + "ByteBufNIO based on ByteBuffer with arrayOffset() -> 2", + size -> new ByteBufNIO(ByteBuffer.wrap(new byte[size + 5], 2, size).slice()) + ), + createBufferProvider( + "ByteBufNIO based on ByteBuffer with arrayOffset() -> 3,", + size -> new ByteBufNIO(ByteBuffer.wrap(new byte[size + 4], 3, size).slice()) + ), + createBufferProvider( + "ByteBufNIO emulating direct ByteBuffer", + size -> new ByteBufNIO(ByteBuffer.allocate(size)) { + @Override + public boolean isBackedByArray() { + return false; + } + + @Override + public byte[] array() { + return Assertions.fail("array() is called, when isBackedByArray() returns false"); + } + + @Override + public int arrayOffset() { + return Assertions.fail("arrayOffset() is called, when isBackedByArray() returns false"); + } + } + ) + ); + } + + private static BufferProvider createBufferProvider(final String bufferDescription, final BufferProvider bufferProvider) { + return new BufferProvider() { + @Override + public ByteBuf getBuffer(final int size) { + return bufferProvider.getBuffer(size); + } + + @Override + public String toString() { + return bufferDescription; + } + }; + } + + public static Stream bufferProvidersWithBranches() { + List arguments = new ArrayList<>(); + List collect = bufferProviders().collect(toList()); + for (BufferProvider bufferProvider : collect) { + arguments.add(Arguments.of(true, bufferProvider)); + arguments.add(Arguments.of(false, bufferProvider)); + } + return arguments.stream(); + } + + + @DisplayName("constructor should throw if buffer provider is null") + @Test + @SuppressWarnings("try") + void constructorShouldThrowIfBufferProviderIsNull() { + assertThrows(IllegalArgumentException.class, () -> { + try (ByteBufferBsonOutput ignored = new ByteBufferBsonOutput(null)) { + // nothing to do + } + }); + } + + @DisplayName("position and size should be 0 after constructor") + @ParameterizedTest(name = "position and size should be 0 after constructor. Parameters: useBranch={0}") + @ValueSource(strings = {"none", "empty", "truncated"}) + void positionAndSizeShouldBe0AfterConstructor(final String branchState) { + try (ByteBufferBsonOutput out = new ByteBufferBsonOutput(new SimpleBufferProvider())) { + switch (branchState) { + case "none": { + break; + } + case "empty": { + try (ByteBufferBsonOutput.Branch branch = out.branch()) { + assertEquals(0, branch.getPosition()); + assertEquals(0, branch.size()); + } + break; + } + case "truncated": { + try (ByteBufferBsonOutput.Branch branch = out.branch()) { + for (int i = 0; i < MAX_BUFFER_SIZE; i++) { + branch.writeByte(i); + } + branch.truncateToPosition(0); + } + break; + } + default: { + throw com.mongodb.assertions.Assertions.fail(branchState); + } + } + assertEquals(0, out.getPosition()); + assertEquals(0, out.size()); + } + } + + @DisplayName("should write a byte") + @ParameterizedTest(name = "should write a byte. Parameters: useBranch={0}, bufferProvider={1}") + @MethodSource("bufferProvidersWithBranches") + void shouldWriteByte(final boolean useBranch, final BufferProvider bufferProvider) { + try (ByteBufferBsonOutput out = new ByteBufferBsonOutput(bufferProvider)) { + byte v = 11; + if (useBranch) { + try (ByteBufferBsonOutput.Branch branch = out.branch()) { + branch.writeByte(v); + } + } else { + out.writeByte(v); + } + assertArrayEquals(new byte[] {v}, out.toByteArray()); + assertEquals(1, out.getPosition()); + assertEquals(1, out.size()); + } + } + + @DisplayName("should write byte at position") + @ParameterizedTest(name = "should write byte at position. Parameters: useBranch={0}") + @ValueSource(booleans = {false, true}) + void shouldWriteByteAtPosition(final boolean useBranch) { + for (int offset = 0; offset < 5; offset++) { + try (ByteBufferBsonOutput out = new ByteBufferBsonOutput(new SimpleBufferProvider())) { + byte v = 11; + byte[] byteToWrite = {1, 2, 3, 4, 5}; + if (useBranch) { + try (ByteBufferBsonOutput.Branch branch = out.branch()) { + branch.writeBytes(byteToWrite); + branch.write(offset, v); + } + } else { + out.writeBytes(byteToWrite); + out.write(offset, v); + } + byteToWrite[offset] = v; + assertArrayEquals(byteToWrite, out.toByteArray()); + assertEquals(5, out.getPosition()); + assertEquals(5, out.size()); + + } + } + } + + @DisplayName("should throw exception when writing byte at invalid position") + @ParameterizedTest(name = "should throw exception when writing byte at invalid position. Parameters: useBranch={0}") + @ValueSource(booleans = {false, true}) + void shouldThrowExceptionWhenWriteByteAtInvalidPosition(final boolean useBranch) { + try (ByteBufferBsonOutput out = new ByteBufferBsonOutput(new SimpleBufferProvider())) { + byte v = 11; + byte[] byteToWrite = {1, 2, 3, 4, 5}; + if (useBranch) { + try (ByteBufferBsonOutput.Branch branch = out.branch()) { + out.writeBytes(byteToWrite); + assertThrows(IllegalArgumentException.class, () -> branch.write(-1, v)); + } + } else { + out.writeBytes(byteToWrite); + assertThrows(IllegalArgumentException.class, () -> out.write(-1, v)); + } + } + } + + @DisplayName("should write a bytes") + @ParameterizedTest(name = "should write a bytes. Parameters: useBranch={0}, bufferProvider={1}") + @MethodSource("bufferProvidersWithBranches") + void shouldWriteBytes(final boolean useBranch, final BufferProvider bufferProvider) { + try (ByteBufferBsonOutput out = new ByteBufferBsonOutput(bufferProvider)) { + byte[] v = {1, 2, 3, 4}; + if (useBranch) { + try (ByteBufferBsonOutput.Branch branch = out.branch()) { + branch.writeBytes(v); + } + } else { + out.writeBytes(v); + } + assertArrayEquals(v, out.toByteArray()); + assertEquals(v.length, out.getPosition()); + assertEquals(v.length, out.size()); + } + } + + @DisplayName("should write bytes from offset until length") + @ParameterizedTest(name = "should write bytes from offset until length. Parameters: useBranch={0}, bufferProvider={1}") + @MethodSource("bufferProvidersWithBranches") + void shouldWriteBytesFromOffsetUntilLength(final boolean useBranch, final BufferProvider bufferProvider) { + try (ByteBufferBsonOutput out = new ByteBufferBsonOutput(bufferProvider)) { + byte[] v = {0, 1, 2, 3, 4, 5}; + if (useBranch) { + try (ByteBufferBsonOutput.Branch branch = out.branch()) { + branch.writeBytes(v, 1, 4); + } + } else { + out.writeBytes(v, 1, 4); + } + assertArrayEquals(new byte[] {1, 2, 3, 4}, out.toByteArray()); + assertEquals(4, out.getPosition()); + assertEquals(4, out.size()); + } + } + + @DisplayName("should write a little endian Int32") + @ParameterizedTest(name = "should write a little endian Int32. Parameters: useBranch={0}, bufferProvider={1}") + @MethodSource("bufferProvidersWithBranches") + void shouldWriteLittleEndianInt32(final boolean useBranch, final BufferProvider bufferProvider) { + try (ByteBufferBsonOutput out = new ByteBufferBsonOutput(bufferProvider)) { + int v = 0x1020304; + if (useBranch) { + try (ByteBufferBsonOutput.Branch branch = out.branch()) { + branch.writeInt32(v); + } + } else { + out.writeInt32(v); + } + assertArrayEquals(new byte[] {4, 3, 2, 1}, out.toByteArray()); + assertEquals(4, out.getPosition()); + assertEquals(4, out.size()); + } + } + + @DisplayName("should write a little endian Int64") + @ParameterizedTest(name = "should write a little endian Int64. Parameters: useBranch={0}, bufferProvider={1}") + @MethodSource("bufferProvidersWithBranches") + void shouldWriteLittleEndianInt64(final boolean useBranch, final BufferProvider bufferProvider) { + try (ByteBufferBsonOutput out = new ByteBufferBsonOutput(bufferProvider)) { + long v = 0x102030405060708L; + if (useBranch) { + try (ByteBufferBsonOutput.Branch branch = out.branch()) { + branch.writeInt64(v); + } + } else { + out.writeInt64(v); + } + assertArrayEquals(new byte[] {8, 7, 6, 5, 4, 3, 2, 1}, out.toByteArray()); + assertEquals(8, out.getPosition()); + assertEquals(8, out.size()); + } + } + + @DisplayName("should write a double") + @ParameterizedTest(name = "should write a double. Parameters: useBranch={0}, bufferProvider={1}") + @MethodSource("bufferProvidersWithBranches") + void shouldWriteDouble(final boolean useBranch, final BufferProvider bufferProvider) { + try (ByteBufferBsonOutput out = new ByteBufferBsonOutput(bufferProvider)) { + double v = Double.longBitsToDouble(0x102030405060708L); + if (useBranch) { + try (ByteBufferBsonOutput.Branch branch = out.branch()) { + branch.writeDouble(v); + } + } else { + out.writeDouble(v); + } + assertArrayEquals(new byte[] {8, 7, 6, 5, 4, 3, 2, 1}, out.toByteArray()); + assertEquals(8, out.getPosition()); + assertEquals(8, out.size()); + } + } + + @DisplayName("should write an ObjectId") + @ParameterizedTest(name = "should write an ObjectId. Parameters: useBranch={0}, bufferProvider={1}") + @MethodSource("bufferProvidersWithBranches") + void shouldWriteObjectId(final boolean useBranch, final BufferProvider bufferProvider) { + try (ByteBufferBsonOutput out = new ByteBufferBsonOutput(bufferProvider)) { + byte[] objectIdAsByteArray = {12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1}; + ObjectId v = new ObjectId(objectIdAsByteArray); + if (useBranch) { + try (ByteBufferBsonOutput.Branch branch = out.branch()) { + branch.writeObjectId(v); + } + } else { + out.writeObjectId(v); + } + assertArrayEquals(objectIdAsByteArray, out.toByteArray()); + assertEquals(12, out.getPosition()); + assertEquals(12, out.size()); + } + } + + @DisplayName("should write an empty string") + @ParameterizedTest(name = "should write an empty string. Parameters: useBranch={0}, bufferProvider={1}") + @MethodSource("bufferProvidersWithBranches") + void shouldWriteEmptyString(final boolean useBranch, final BufferProvider bufferProvider) { + try (ByteBufferBsonOutput out = new ByteBufferBsonOutput(bufferProvider)) { + String v = ""; + if (useBranch) { + try (ByteBufferBsonOutput.Branch branch = out.branch()) { + branch.writeString(v); + } + } else { + out.writeString(v); + } + assertArrayEquals(new byte[] {1, 0, 0, 0, 0}, out.toByteArray()); + assertEquals(5, out.getPosition()); + assertEquals(5, out.size()); + } + } + + @DisplayName("should write an ASCII string") + @ParameterizedTest(name = "should write an ASCII string. Parameters: useBranch={0}, bufferProvider={1}") + @MethodSource("bufferProvidersWithBranches") + void shouldWriteAsciiString(final boolean useBranch, final BufferProvider bufferProvider) { + try (ByteBufferBsonOutput out = new ByteBufferBsonOutput(bufferProvider)) { + String v = "Java"; + if (useBranch) { + try (ByteBufferBsonOutput.Branch branch = out.branch()) { + branch.writeString(v); + } + } else { + out.writeString(v); + } + assertArrayEquals(new byte[] {5, 0, 0, 0, 0x4a, 0x61, 0x76, 0x61, 0}, out.toByteArray()); + assertEquals(9, out.getPosition()); + assertEquals(9, out.size()); + } + } + + @DisplayName("should write a UTF-8 string") + @ParameterizedTest(name = "should write a UTF-8 string. Parameters: useBranch={0}, bufferProvider={1}") + @MethodSource("bufferProvidersWithBranches") + void shouldWriteUtf8String(final boolean useBranch, final BufferProvider bufferProvider) { + try (ByteBufferBsonOutput out = new ByteBufferBsonOutput(bufferProvider)) { + String v = "\u0900"; + if (useBranch) { + try (ByteBufferBsonOutput.Branch branch = out.branch()) { + branch.writeString(v); + } + } else { + out.writeString(v); + } + assertArrayEquals(new byte[] {4, 0, 0, 0, (byte) 0xe0, (byte) 0xa4, (byte) 0x80, 0}, out.toByteArray()); + assertEquals(8, out.getPosition()); + assertEquals(8, out.size()); + } + } + + @DisplayName("should write an empty CString") + @ParameterizedTest(name = "should write an empty CString. Parameters: useBranch={0}, bufferProvider={1}") + @MethodSource("bufferProvidersWithBranches") + void shouldWriteEmptyCString(final boolean useBranch, final BufferProvider bufferProvider) { + try (ByteBufferBsonOutput out = new ByteBufferBsonOutput(bufferProvider)) { + String v = ""; + if (useBranch) { + try (ByteBufferBsonOutput.Branch branch = out.branch()) { + branch.writeCString(v); + } + } else { + out.writeCString(v); + } + assertArrayEquals(new byte[] {0}, out.toByteArray()); + assertEquals(1, out.getPosition()); + assertEquals(1, out.size()); + } + } + + @DisplayName("should write an ASCII CString") + @ParameterizedTest(name = "should write an ASCII CString. Parameters: useBranch={0}, bufferProvider={1}") + @MethodSource("bufferProvidersWithBranches") + void shouldWriteAsciiCString(final boolean useBranch, final BufferProvider bufferProvider) { + try (ByteBufferBsonOutput out = new ByteBufferBsonOutput(bufferProvider)) { + String v = "Java"; + if (useBranch) { + try (ByteBufferBsonOutput.Branch branch = out.branch()) { + branch.writeCString(v); + } + } else { + out.writeCString(v); + } + assertArrayEquals(new byte[] {0x4a, 0x61, 0x76, 0x61, 0}, out.toByteArray()); + assertEquals(5, out.getPosition()); + assertEquals(5, out.size()); + } + } + + @DisplayName("should write a UTF-8 CString") + @ParameterizedTest(name = "should write a UTF-8 CString. Parameters: useBranch={0}, bufferProvider={1}") + @MethodSource("bufferProvidersWithBranches") + void shouldWriteUtf8CString(final boolean useBranch, final BufferProvider bufferProvider) { + try (ByteBufferBsonOutput out = new ByteBufferBsonOutput(bufferProvider)) { + String v = "\u0900"; + if (useBranch) { + try (ByteBufferBsonOutput.Branch branch = out.branch()) { + branch.writeCString(v); + } + } else { + out.writeCString(v); + } + assertArrayEquals(new byte[] {(byte) 0xe0, (byte) 0xa4, (byte) 0x80, 0}, out.toByteArray()); + assertEquals(4, out.getPosition()); + assertEquals(4, out.size()); + } + } + + @DisplayName("should get byte buffers as little endian") + @ParameterizedTest(name = "should get byte buffers as little endian. Parameters: useBranch={0}, bufferProvider={1}") + @MethodSource("bufferProvidersWithBranches") + void shouldGetByteBuffersAsLittleEndian(final boolean useBranch, final BufferProvider bufferProvider) { + List byteBuffers = new ArrayList<>(); + try (ByteBufferBsonOutput out = new ByteBufferBsonOutput(bufferProvider)) { + byte[] v = {1, 0, 0, 0}; + if (useBranch) { + try (ByteBufferBsonOutput.Branch branch = out.branch()) { + branch.writeBytes(v); + } + } else { + out.writeBytes(v); + } + + byteBuffers = out.getByteBuffers(); + assertEquals(1, byteBuffers.get(0).getInt()); + } finally { + byteBuffers.forEach(ByteBuf::release); + } + } + + @DisplayName("null character in CString should throw SerializationException") + @ParameterizedTest(name = "null character in CString should throw SerializationException. Parameters: useBranch={0}, bufferProvider={1}") + @MethodSource("bufferProvidersWithBranches") + void nullCharacterInCStringShouldThrowSerializationException(final boolean useBranch, final BufferProvider bufferProvider) { + try (ByteBufferBsonOutput out = new ByteBufferBsonOutput(bufferProvider)) { + String v = "hell\u0000world"; + if (useBranch) { + try (ByteBufferBsonOutput.Branch branch = out.branch()) { + assertThrows(BsonSerializationException.class, () -> branch.writeCString(v)); + } + } else { + assertThrows(BsonSerializationException.class, () -> out.writeCString(v)); + } + } + } + + @DisplayName("null character in String should not throw SerializationException") + @ParameterizedTest(name = "null character in String should not throw SerializationException. Parameters: useBranch={0}, bufferProvider={1}") + @MethodSource("bufferProvidersWithBranches") + void nullCharacterInStringShouldNotThrowSerializationException(final boolean useBranch, final BufferProvider bufferProvider) { + try (ByteBufferBsonOutput out = new ByteBufferBsonOutput(bufferProvider)) { + String v = "h\u0000i"; + if (useBranch) { + try (ByteBufferBsonOutput.Branch branch = out.branch()) { + branch.writeString(v); + } + } else { + out.writeString(v); + } + assertArrayEquals(new byte[] {4, 0, 0, 0, (byte) 'h', 0, (byte) 'i', 0}, out.toByteArray()); + } + } + + + public static Stream writeInt32AtPositionShouldThrowWithInvalidPosition() { + return bufferProvidersWithBranches().flatMap(arguments -> { + Object[] args = arguments.get(); + boolean useBranch = (boolean) args[0]; + BufferProvider bufferProvider = (BufferProvider) args[1]; + return Stream.of( + Arguments.of(useBranch, -1, bufferProvider), + Arguments.of(useBranch, 1, bufferProvider) + ); + }); + } + + @DisplayName("write Int32 at position should throw with invalid position") + @ParameterizedTest(name = "write Int32 at position should throw with invalid position. " + + "Parameters: useBranch={0}, position={1}, bufferProvider={2}") + @MethodSource + void writeInt32AtPositionShouldThrowWithInvalidPosition(final boolean useBranch, final int position, + final BufferProvider bufferProvider) { + try (ByteBufferBsonOutput out = new ByteBufferBsonOutput(bufferProvider)) { + byte[] v = {1, 2, 3, 4}; + int v2 = 0x1020304; + if (useBranch) { + try (ByteBufferBsonOutput.Branch branch = out.branch()) { + branch.writeBytes(v); + assertThrows(IllegalArgumentException.class, () -> branch.writeInt32(position, v2)); + } + } else { + out.writeBytes(v); + assertThrows(IllegalArgumentException.class, () -> out.writeInt32(position, v2)); + } + } + } + + @DisplayName("should write Int32 at position") + @ParameterizedTest(name = "should write Int32 at position. Parameters: useBranch={0}, bufferProvider={1}") + @MethodSource("bufferProvidersWithBranches") + void shouldWriteInt32AtPosition(final boolean useBranch, final BufferProvider bufferProvider) { + try (ByteBufferBsonOutput out = new ByteBufferBsonOutput(bufferProvider)) { + Consumer lastAssertions = effectiveOut -> { + assertArrayEquals(new byte[] {4, 3, 2, 1}, copyOfRange(effectiveOut.toByteArray(), 1023, 1027), "the position is not in the first buffer"); + assertEquals(1032, effectiveOut.getPosition()); + assertEquals(1032, effectiveOut.size()); + }; + Consumer assertions = effectiveOut -> { + effectiveOut.writeBytes(new byte[] {0, 0, 0, 0, 1, 2, 3, 4}); + effectiveOut.writeInt32(0, 0x1020304); + assertArrayEquals(new byte[] {4, 3, 2, 1, 1, 2, 3, 4}, effectiveOut.toByteArray(), "the position is in the first buffer"); + assertEquals(8, effectiveOut.getPosition()); + assertEquals(8, effectiveOut.size()); + effectiveOut.writeInt32(4, 0x1020304); + assertArrayEquals(new byte[] {4, 3, 2, 1, 4, 3, 2, 1}, effectiveOut.toByteArray(), "the position is at the end of the first buffer"); + assertEquals(8, effectiveOut.getPosition()); + assertEquals(8, effectiveOut.size()); + effectiveOut.writeBytes(new byte[1024]); + effectiveOut.writeInt32(1023, 0x1020304); + lastAssertions.accept(effectiveOut); + }; + if (useBranch) { + try (ByteBufferBsonOutput.Branch branch = out.branch()) { + assertions.accept(branch); + } + } else { + assertions.accept(out); + } + lastAssertions.accept(out); + } + } + + public static Stream truncateShouldThrowWithInvalidPosition() { + return bufferProvidersWithBranches().flatMap(arguments -> { + Object[] args = arguments.get(); + boolean useBranch = (boolean) args[0]; + BufferProvider bufferProvider = (BufferProvider) args[1]; + return Stream.of( + Arguments.of(useBranch, -1, bufferProvider), + Arguments.of(useBranch, 5, bufferProvider) + ); + } + ); + } + + @DisplayName("truncate should throw with invalid position") + @ParameterizedTest(name = "truncate should throw with invalid position. " + + "Parameters: useBranch={0}, position={1}") + @MethodSource + void truncateShouldThrowWithInvalidPosition(final boolean useBranch, final int position) { + try (ByteBufferBsonOutput out = new ByteBufferBsonOutput(new SimpleBufferProvider())) { + byte[] v = {1, 2, 3, 4}; + if (useBranch) { + try (ByteBufferBsonOutput.Branch branch = out.branch()) { + branch.writeBytes(v); + assertThrows(IllegalArgumentException.class, () -> branch.truncateToPosition(position)); + } + } else { + out.writeBytes(v); + assertThrows(IllegalArgumentException.class, () -> out.truncateToPosition(position)); + } + } + } + + @DisplayName("should truncate to position") + @ParameterizedTest(name = "should truncate to position. Parameters: useBranch={0}, bufferProvider={1}") + @MethodSource("bufferProvidersWithBranches") + void shouldTruncateToPosition(final boolean useBranch, final BufferProvider bufferProvider) { + try (ByteBufferBsonOutput out = new ByteBufferBsonOutput(bufferProvider)) { + byte[] v = {1, 2, 3, 4}; + byte[] v2 = new byte[1024]; + if (useBranch) { + try (ByteBufferBsonOutput.Branch branch = out.branch()) { + branch.writeBytes(v); + branch.writeBytes(v2); + branch.truncateToPosition(2); + } + } else { + out.writeBytes(v); + out.writeBytes(v2); + out.truncateToPosition(2); + } + assertArrayEquals(new byte[] {1, 2}, out.toByteArray()); + assertEquals(2, out.getPosition()); + assertEquals(2, out.size()); + } + } + + @DisplayName("should grow to maximum allowed size of byte buffer") + @ParameterizedTest(name = "should grow to maximum allowed size of byte buffer. Parameters: useBranch={0}, bufferProvider={1}") + @MethodSource("bufferProvidersWithBranches") + void shouldGrowToMaximumAllowedSizeOfByteBuffer(final boolean useBranch, final BufferProvider bufferProvider) { + try (ByteBufferBsonOutput out = new ByteBufferBsonOutput(bufferProvider)) { + byte[] v = new byte[0x2000000]; + ThreadLocalRandom.current().nextBytes(v); + Consumer assertByteBuffers = effectiveOut -> { + List byteBuffers = new ArrayList<>(); + try { + byteBuffers = effectiveOut.getByteBuffers(); + assertEquals( + asList(1 << 10, 1 << 11, 1 << 12, 1 << 13, 1 << 14, 1 << 15, 1 << 16, 1 << 17, 1 << 18, 1 << 19, 1 << 20, + 1 << 21, 1 << 22, 1 << 23, 1 << 24, 1 << 24), + byteBuffers.stream().map(ByteBuf::capacity).collect(toList())); + } finally { + byteBuffers.forEach(ByteBuf::release); + } + }; + Consumer assertions = effectiveOut -> { + effectiveOut.writeBytes(v); + assertEquals(v.length, effectiveOut.size()); + assertByteBuffers.accept(effectiveOut); + ByteArrayOutputStream baos = new ByteArrayOutputStream(effectiveOut.size()); + try { + effectiveOut.pipe(baos); + } catch (IOException e) { + throw new RuntimeException(e); + } + assertArrayEquals(v, baos.toByteArray()); + }; + if (useBranch) { + try (ByteBufferBsonOutput.Branch branch = out.branch()) { + assertions.accept(branch); + } + } else { + assertions.accept(out); + } + assertByteBuffers.accept(out); + } + } + + @DisplayName("should pipe") + @ParameterizedTest(name = "should pipe. Parameters: useBranch={0}, bufferProvider={1}") + @MethodSource("bufferProvidersWithBranches") + void shouldPipe(final boolean useBranch, final BufferProvider bufferProvider) throws IOException { + try (ByteBufferBsonOutput out = new ByteBufferBsonOutput(bufferProvider)) { + byte[] v = new byte[1027]; + BiConsumer assertions = (effectiveOut, baos) -> { + assertArrayEquals(v, baos.toByteArray()); + assertEquals(v.length, effectiveOut.getPosition()); + assertEquals(v.length, effectiveOut.size()); + }; + if (useBranch) { + try (ByteBufferBsonOutput.Branch branch = out.branch()) { + branch.writeBytes(v); + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + branch.pipe(baos); + assertions.accept(branch, baos); + baos = new ByteArrayOutputStream(); + branch.pipe(baos); + assertions.accept(branch, baos); + } + } else { + out.writeBytes(v); + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + out.pipe(baos); + assertions.accept(out, baos); + baos = new ByteArrayOutputStream(); + out.pipe(baos); + assertions.accept(out, baos); + } + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + out.pipe(baos); + assertions.accept(out, baos); + } + } + + @DisplayName("should close") + @ParameterizedTest(name = "should close. Parameters: useBranch={0}, bufferProvider={1}") + @MethodSource("bufferProvidersWithBranches") + @SuppressWarnings("try") + void shouldClose(final boolean useBranch, final BufferProvider bufferProvider) { + try (ByteBufferBsonOutput out = new ByteBufferBsonOutput(bufferProvider)) { + byte[] v = new byte[1027]; + if (useBranch) { + try (ByteBufferBsonOutput.Branch branch = out.branch()) { + branch.writeBytes(v); + branch.close(); + assertThrows(IllegalStateException.class, () -> branch.writeByte(11)); + } + } else { + out.writeBytes(v); + out.close(); + assertThrows(IllegalStateException.class, () -> out.writeByte(11)); + } + } + } + + @DisplayName("should handle mixed branching and truncating") + @ParameterizedTest(name = "should handle mixed branching and truncating. Reps={0}") + @ValueSource(ints = {1, INITIAL_BUFFER_SIZE, INITIAL_BUFFER_SIZE * 3}) + void shouldHandleMixedBranchingAndTruncating(final int reps) throws CharacterCodingException { + BiConsumer write = (out, c) -> { + Assertions.assertTrue((byte) c.charValue() == c); + for (int i = 0; i < reps; i++) { + out.writeByte(c); + } + }; + try (ByteBufferBsonOutput out = new ByteBufferBsonOutput(new SimpleBufferProvider())) { + write.accept(out, 'a'); + try (ByteBufferBsonOutput.Branch b3 = out.branch(); + ByteBufferBsonOutput.Branch b1 = out.branch()) { + write.accept(b3, 'g'); + write.accept(out, 'b'); + write.accept(b1, 'e'); + try (ByteBufferBsonOutput.Branch b2 = b1.branch()) { + write.accept(out, 'c'); + write.accept(b2, 'f'); + int b2Position = b2.getPosition(); + write.accept(b2, 'x'); + b2.truncateToPosition(b2Position); + } + write.accept(out, 'd'); + } + write.accept(out, 'h'); + try (ByteBufferBsonOutput.Branch b4 = out.branch()) { + write.accept(b4, 'i'); + int outPosition = out.getPosition(); + try (ByteBufferBsonOutput.Branch b5 = out.branch()) { + write.accept(out, 'x'); + write.accept(b5, 'x'); + } + out.truncateToPosition(outPosition); + } + write.accept(out, 'j'); + StringBuilder expected = new StringBuilder(); + "abcdefghij".chars().forEach(c -> { + String s = String.valueOf((char) c); + for (int i = 0; i < reps; i++) { + expected.append(s); + } + }); + assertEquals(expected.toString(), StandardCharsets.UTF_8.newDecoder().decode(ByteBuffer.wrap(out.toByteArray())).toString()); + } + } + + @DisplayName("should throw exception when calling writeInt32 at absolute position where integer would not fit") + @ParameterizedTest(name = "should throw exception when calling writeInt32 at absolute position where integer would not fit. BufferProvider={0}") + @MethodSource("bufferProviders") + void shouldThrowExceptionWhenIntegerDoesNotFitWriteInt32(final BufferProvider bufferProvider) { + try (ByteBufferBsonOutput output = new ByteBufferBsonOutput(bufferProvider)) { + // Write 10 bytes (position becomes 10) + for (int i = 0; i < 10; i++) { + output.writeByte(0); + } + + // absolutePosition = 7 would require bytes at positions 7,8,9,10, but the last written element was at 9. + assertThrows(IllegalArgumentException.class, () -> + output.writeInt32(7, 5678) + ); + } + } + + @DisplayName("should throw exception when calling writeInt32 with negative absolute position") + @ParameterizedTest(name = "should throw exception when calling writeInt32 with negative absolute position. BufferProvider={0}") + @MethodSource("bufferProviders") + void shouldThrowExceptionWhenAbsolutePositionIsNegative(final BufferProvider bufferProvider) { + try (ByteBufferBsonOutput output = new ByteBufferBsonOutput(bufferProvider)) { + Assertions.assertThrows(IllegalArgumentException.class, () -> + output.writeInt32(-1, 5678) + ); + } + } + + static Stream shouldWriteInt32AbsoluteValueWithinSpanningBuffers() { + return bufferProviders().flatMap(bufferProvider -> Stream.of( + Arguments.of( + 0, // absolute position + 0x09080706, // int value + asList( + // initial data + new byte[]{0, 1, 2, 3}, + new byte[]{4, 5, 6, 7}), + asList( + // expected BsonByteBufferOutput data + new byte[]{0x06, 0x07, 0x08, 0x09}, + new byte[]{4, 5, 6, 7}), + bufferProvider // buffer to write data to + ), + Arguments.of(1, 0x09080706, + asList(new byte[]{0, 1, 2, 3}, new byte[]{4, 5, 6, 7}), + asList(new byte[]{0, 0x06, 0x07, 0x08}, new byte[]{0x09, 5, 6, 7}), + bufferProvider), + Arguments.of(2, 0x09080706, + asList(new byte[]{0, 1, 2, 3}, new byte[]{4, 5, 6, 7}), + asList(new byte[]{0, 1, 0x06, 0x07}, new byte[]{0x08, 0x09, 6, 7}), + bufferProvider + ), + Arguments.of(3, 0x09080706, + asList(new byte[]{0, 1, 2, 3}, new byte[]{4, 5, 6, 7}), + asList(new byte[]{0, 1, 2, 0x06}, new byte[]{0x07, 0x08, 0x09, 7}), + bufferProvider + ), + Arguments.of(4, 0x09080706, + asList(new byte[]{0, 1, 2, 3}, new byte[]{4, 5, 6, 7}), + asList(new byte[]{0, 1, 2, 3}, new byte[]{0x06, 0x07, 0x08, 0x09}), + bufferProvider + ))); + } + + @ParameterizedTest(name = "should write Int32 absolute value within spanning buffers. " + + "Parameters: absolutePosition={0}, intValue={1}, initialData={2}, expectedBuffers={3}, bufferProvider={4}") + @MethodSource + void shouldWriteInt32AbsoluteValueWithinSpanningBuffers( + final int absolutePosition, + final int intValue, + final List initialData, + final List expectedBuffers, + final BufferProvider bufferProvider) { + + List buffers = new ArrayList<>(); + try (ByteBufferBsonOutput output = new ByteBufferBsonOutput(size -> bufferProvider.getBuffer(Integer.BYTES))) { + + //given + initialData.forEach(output::writeBytes); + + //when + output.writeInt32(absolutePosition, intValue); + + //then + buffers = output.getByteBuffers(); + assertEquals(expectedBuffers.size(), buffers.size(), "Number of buffers mismatch"); + assertBufferContents(expectedBuffers, buffers); + } finally { + buffers.forEach(ByteBuf::release); + } + } + + static Stream int32SpanningBuffersData() { + return bufferProviders().flatMap(bufferProvider -> Stream.of( + // Test case 1: No initial data; entire int written into one buffer. + Arguments.of(0x09080706, + asList( + // No initial data + ), + asList( + // expected BsonByteBufferOutput data + new byte[]{0x06, 0x07, 0x08, 0x09}), + 4, // expected overall position after write (0 + 4) + 4, // expected last buffer position (buffer fully written) + bufferProvider //buffer to write data to + ), + Arguments.of(0x09080706, + asList(new byte[]{0}), + asList(new byte[]{0, 0x06, 0x07, 0x08}, new byte[]{0x09, 0, 0, 0}), 5, 1, + bufferProvider + ), + Arguments.of(0x09080706, + asList(new byte[]{0, 1}), + asList(new byte[]{0, 1, 0x06, 0x07}, new byte[]{0x08, 0x09, 0, 0}), 6, 2, + bufferProvider + ), + Arguments.of(0x09080706, + asList(new byte[]{0, 1, 2}), + asList(new byte[]{0, 1, 2, 0x06}, new byte[]{0x07, 0x08, 0x09, 0}), 7, 3, + bufferProvider + ), + Arguments.of(0x09080706, + asList(new byte[]{0, 1, 2, 3}), + asList(new byte[]{0, 1, 2, 3}, new byte[]{0x06, 0x07, 0x08, 0x09}), 8, 4, + bufferProvider + ))); + } + + static Stream int64SpanningBuffersData() { + return bufferProviders().flatMap(bufferProvider -> Stream.of( + // Test case 1: No initial data; entire long written into one buffer. + Arguments.of(0x0A0B0C0D0E0F1011L, + asList( + // No initial data + ), + asList( + // expected BsonByteBufferOutput data + new byte[]{0x11, 0x10, 0x0F, 0x0E, 0x0D, 0x0C, 0x0B, 0x0A} + ), + 8, // expected overall position after write (0 + 8) + 8, // expected last buffer position (buffer fully written) + bufferProvider //buffer to write data to + ), + Arguments.of(0x0A0B0C0D0E0F1011L, + asList(new byte[]{0}), + asList(new byte[]{0, 0x11, 0x10, 0x0F, 0x0E, 0x0D, 0x0C, 0x0B}, new byte[]{0x0A, 0, 0, 0, 0, 0, 0, 0}), + 9, 1, + bufferProvider + ), + Arguments.of(0x0A0B0C0D0E0F1011L, + asList(new byte[]{0, 1}), + asList(new byte[]{0, 1, 0x11, 0x10, 0x0F, 0x0E, 0x0D, 0x0C}, new byte[]{0x0B, 0x0A, 0, 0, 0, 0, 0, 0}), + 10, 2, + bufferProvider + ), + Arguments.of(0x0A0B0C0D0E0F1011L, + asList(new byte[]{0, 1, 2}), + asList(new byte[]{0, 1, 2, 0x11, 0x10, 0x0F, 0x0E, 0x0D}, new byte[]{0x0C, 0x0B, 0x0A, 0, 0, 0, 0, 0}), + 11, 3, + bufferProvider + ), + Arguments.of(0x0A0B0C0D0E0F1011L, + asList(new byte[]{0, 1, 2, 3}), + asList(new byte[]{0, 1, 2, 3, 0x11, 0x10, 0x0F, 0x0E}, new byte[]{0x0D, 0x0C, 0x0B, 0x0A, 0, 0, 0, 0}), + 12, 4, + bufferProvider + ), + Arguments.of(0x0A0B0C0D0E0F1011L, + asList(new byte[]{0, 1, 2, 3, 4}), + asList(new byte[]{0, 1, 2, 3, 4, 0x11, 0x10, 0x0F}, new byte[]{0x0E, 0x0D, 0x0C, 0x0B, 0x0A, 0, 0, 0}), + 13, 5, + bufferProvider + ), + Arguments.of(0x0A0B0C0D0E0F1011L, + asList(new byte[]{0, 1, 2, 3, 4, 5}), + asList(new byte[]{0, 1, 2, 3, 4, 5, 0x11, 0x10}, new byte[]{0x0F, 0x0E, 0x0D, 0x0C, 0x0B, 0x0A, 0, 0}), + 14, 6, + bufferProvider + ), Arguments.of(0x0A0B0C0D0E0F1011L, + asList(new byte[]{0, 1, 2, 3, 4, 5, 6}), + asList(new byte[]{0, 1, 2, 3, 4, 5, 6, 0x11}, new byte[]{0x10, 0x0F, 0x0E, 0x0D, 0x0C, 0x0B, 0x0A, 0}), + 15, 7, + bufferProvider + ), + Arguments.of(0x0A0B0C0D0E0F1011L, + asList(new byte[]{0, 1, 2, 3, 4, 5, 6, 7}), + asList(new byte[]{0, 1, 2, 3, 4, 5, 6, 7}, new byte[]{0x11, 0x10, 0x0F, 0x0E, 0x0D, 0x0C, 0x0B, 0x0A}), + 16, 8, + bufferProvider + ))); + } + + @ParameterizedTest(name = "should write Int32 within spanning buffers. " + + "Parameters: intValue={0}, initialData={1}, expectedBuffers={2}, expectedOutputPosition={3}, " + + "expectedLastBufferPosition={4}, bufferProvider={5}") + @MethodSource("int32SpanningBuffersData") + void shouldWriteInt32WithinSpanningBuffers( + final int intValue, + final List initialData, + final List expectedBuffers, + final int expectedOutputPosition, + final int expectedLastBufferPosition, + final BufferProvider bufferProvider) { + + List buffers = new ArrayList<>(); + try (ByteBufferBsonOutput output = + new ByteBufferBsonOutput(size -> bufferProvider.getBuffer(Integer.BYTES))) { + + //given + initialData.forEach(output::writeBytes); + + //when + output.writeInt32(intValue); + + //then + //getByteBuffers returns ByteBuffers with limit() set to position, position set to 0. + buffers = output.getByteBuffers(); + assertEquals(expectedBuffers.size(), buffers.size(), "Number of buffers mismatch"); + assertBufferContents(expectedBuffers, buffers); + + assertEquals(expectedLastBufferPosition, buffers.get(buffers.size() - 1).limit()); + assertEquals(expectedOutputPosition, output.getPosition()); + } finally { + buffers.forEach(ByteBuf::release); + } + } + + @ParameterizedTest(name = "should write Int64 within spanning buffers. " + + "Parameters: intValue={0}, initialData={1}, expectedBuffers={2}, expectedOutputPosition={3}, " + + "expectedLastBufferPosition={4}, bufferProvider={5}") + @MethodSource("int64SpanningBuffersData") + void shouldWriteInt64WithinSpanningBuffers( + final long intValue, + final List initialData, + final List expectedBuffers, + final int expectedOutputPosition, + final int expectedLastBufferPosition, + final BufferProvider bufferProvider) { + + List buffers = new ArrayList<>(); + try (ByteBufferBsonOutput output = + new ByteBufferBsonOutput(size -> bufferProvider.getBuffer(Long.BYTES))) { + + //given + initialData.forEach(output::writeBytes); + + //when + output.writeInt64(intValue); + + //then + //getByteBuffers returns ByteBuffers with limit() set to position, position set to 0. + buffers = output.getByteBuffers(); + assertEquals(expectedBuffers.size(), buffers.size(), "Number of buffers mismatch"); + assertBufferContents(expectedBuffers, buffers); + + assertEquals(expectedLastBufferPosition, buffers.get(buffers.size() - 1).limit()); + assertEquals(expectedOutputPosition, output.getPosition()); + } finally { + buffers.forEach(ByteBuf::release); + } + } + + @ParameterizedTest(name = "should write double within spanning buffers. " + + "Parameters: intValue={0}, initialData={1}, expectedBuffers={2}, expectedOutputPosition={3}, " + + "expectedLastBufferPosition={4}, bufferProvider={5}") + @MethodSource("int64SpanningBuffersData") + void shouldWriteDoubleWithinSpanningBuffers( + final long intValue, + final List initialData, + final List expectedBuffers, + final int expectedOutputPosition, + final int expectedLastBufferPosition, + final BufferProvider bufferProvider) { + + List buffers = new ArrayList<>(); + try (ByteBufferBsonOutput output = + new ByteBufferBsonOutput(size -> bufferProvider.getBuffer(Long.BYTES))) { + + //given + initialData.forEach(output::writeBytes); + + //when + output.writeDouble(Double.longBitsToDouble(intValue)); + + //then + //getByteBuffers returns ByteBuffers with limit() set to position, position set to 0. + buffers = output.getByteBuffers(); + assertEquals(expectedBuffers.size(), buffers.size(), "Number of buffers mismatch"); + assertBufferContents(expectedBuffers, buffers); + + assertEquals(expectedLastBufferPosition, buffers.get(buffers.size() - 1).limit()); + assertEquals(expectedOutputPosition, output.getPosition()); + } finally { + buffers.forEach(ByteBuf::release); + } + } + + private static void assertBufferContents(final List expectedBuffersContent, + final List actualByteBuffers) { + for (int i = 0; i < expectedBuffersContent.size(); i++) { + ByteBuf byteBuf = actualByteBuffers.get(i); + byte[] expectedBufferBytes = expectedBuffersContent.get(i); + byte[] actualBufferBytes = + new byte[byteBuf.capacity()]; //capacity is used because we want to compare internal ByteBuffer arrays. + byteBuf.get(actualBufferBytes, 0, byteBuf.limit()); + + assertEquals(expectedBufferBytes.length, byteBuf.capacity()); + assertArrayEquals(expectedBufferBytes, actualBufferBytes, + "Buffer " + i + " contents mismatch"); + } + } + + /* + Tests that all Unicode code points are correctly encoded in UTF-8 when: + - The buffer has just enough capacity for the UTF-8 string plus a null terminator. + - The encoded string may span multiple buffers. + + To test edge conditions, the test writes a UTF-8 CString/String at various starting offsets. This simulates scenarios where data + doesn't start at index 0, forcing the string to span multiple buffers. + + For example, assume the encoded string requires N bytes and null terminator: + 1. startingOffset == 0: + [ S S S ... S NULL ] + + 2. startingOffset == 2: + ("X" represents dummy bytes written before the string.) + Buffer 1: [ X X | S S S ... ] (Buffer 1 runs out of space, the remaining bytes (including the NULL) are written in Buffer 2.) + Buffer 2: [ S NULL ...] + + 3. startingOffset == bufferAllocationSize: + Buffer 1: [ X X X ... X ] + Buffer 2: [ S S S ... S NULL ] + */ + @Nested + @DisplayName("UTF-8 String and CString Buffer Boundary Tests") + class Utf8StringTests { + + @DisplayName("should write UTF-8 CString across buffers") + @ParameterizedTest(name = "should write UTF-8 CString across buffers. BufferProvider={0}") + @MethodSource("com.mongodb.internal.connection.ByteBufferBsonOutputTest#bufferProviders") + void shouldWriteCStringAcrossBuffersUTF8(final BufferProvider bufferProvider) throws IOException { + for (Integer codePoint : ALL_CODE_POINTS_EXCLUDING_SURROGATES) { + String stringToEncode = new String(Character.toChars(codePoint)) + "a"; + byte[] expectedStringEncoding = stringToEncode.getBytes(StandardCharsets.UTF_8); + int bufferAllocationSize = expectedStringEncoding.length + "\u0000".length(); + testWriteCStringAcrossBuffers(bufferProvider, codePoint, bufferAllocationSize, stringToEncode, expectedStringEncoding); + } + } + + @DisplayName("should write UTF-8 CString across buffers with a branch") + @ParameterizedTest(name = "should write UTF-8 CString across buffers with a branch. BufferProvider={0}") + @MethodSource("com.mongodb.internal.connection.ByteBufferBsonOutputTest#bufferProviders") + void shouldWriteCStringAcrossBuffersUTF8WithBranch(final BufferProvider bufferProvider) throws IOException { + for (Integer codePoint : ALL_CODE_POINTS_EXCLUDING_SURROGATES) { + String stringToEncode = new String(Character.toChars(codePoint)) + "a"; + int bufferAllocationSize = stringToEncode.getBytes(StandardCharsets.UTF_8).length + "\u0000".length(); + byte[] expectedEncoding = stringToEncode.getBytes(StandardCharsets.UTF_8); + + testWriteCStringAcrossBufferWithBranch(bufferProvider, codePoint, bufferAllocationSize, stringToEncode, expectedEncoding); + } + } + + @DisplayName("should write UTF-8 String across buffers") + @ParameterizedTest(name = "should write UTF-8 String across buffers. BufferProvider={0}") + @MethodSource("com.mongodb.internal.connection.ByteBufferBsonOutputTest#bufferProviders") + void shouldWriteStringAcrossBuffersUTF8(final BufferProvider bufferProvider) throws IOException { + for (Integer codePoint : ALL_CODE_POINTS_EXCLUDING_SURROGATES) { + // given + String stringToEncode = new String(Character.toChars(codePoint)) + "a"; + //4 bytes for the length prefix, bytes for encoded String, and 1 byte for the null terminator + int bufferAllocationSize = Integer.BYTES + stringToEncode.getBytes(StandardCharsets.UTF_8).length + "\u0000".length(); + byte[] expectedEncoding = stringToEncode.getBytes(StandardCharsets.UTF_8); + testWriteStringAcrossBuffers(bufferProvider, + codePoint, + bufferAllocationSize, + stringToEncode, + expectedEncoding); + } + } + + @DisplayName("should write UTF-8 String across buffers with branch") + @ParameterizedTest(name = "should write UTF-8 String across buffers with branch. BufferProvider={0}") + @MethodSource("com.mongodb.internal.connection.ByteBufferBsonOutputTest#bufferProviders") + void shouldWriteStringAcrossBuffersUTF8WithBranch(final BufferProvider bufferProvider) throws IOException { + for (Integer codePoint : ALL_CODE_POINTS_EXCLUDING_SURROGATES) { + String stringToEncode = new String(Character.toChars(codePoint)) + "a"; + //4 bytes for the length prefix, bytes for encoded String, and 1 byte for the null terminator + int bufferAllocationSize = Integer.BYTES + stringToEncode.getBytes(StandardCharsets.UTF_8).length + "\u0000".length(); + byte[] expectedEncoding = stringToEncode.getBytes(StandardCharsets.UTF_8); + testWriteStringAcrossBuffersWithBranch( + bufferProvider, + bufferAllocationSize, + stringToEncode, + codePoint, + expectedEncoding); + } + } + + /* + Tests that malformed surrogate pairs are encoded as-is without substituting any code point. + This known bug and corresponding test remain for backward compatibility. + Ticket: JAVA-5575 + */ + @DisplayName("should write malformed surrogate CString across buffers") + @ParameterizedTest(name = "should write malformed surrogate CString across buffers. BufferProvider={0}") + @MethodSource("com.mongodb.internal.connection.ByteBufferBsonOutputTest#bufferProviders") + void shouldWriteCStringWithMalformedSurrogates(final BufferProvider bufferProvider) throws IOException { + for (Integer surrogateCodePoint : ALL_SURROGATE_CODE_POINTS) { + byte[] expectedEncoding = new byte[]{ + (byte) (0xE0 | ((surrogateCodePoint >> 12) & 0x0F)), + (byte) (0x80 | ((surrogateCodePoint >> 6) & 0x3F)), + (byte) (0x80 | (surrogateCodePoint & 0x3F)) + }; + String str = new String(Character.toChars(surrogateCodePoint)); + int bufferAllocationSize = expectedEncoding.length + "\u0000".length(); + + testWriteCStringAcrossBuffers( + bufferProvider, + surrogateCodePoint, + bufferAllocationSize, + str, + expectedEncoding); + } + } + + /* + Tests that malformed surrogate pairs are encoded as-is without substituting any code point. + This known bug and corresponding test remain for backward compatibility. + Ticket: JAVA-5575 + */ + @DisplayName("should write malformed surrogate CString across buffers with branch") + @ParameterizedTest(name = "should write malformed surrogate CString across buffers with branch. BufferProvider={0}") + @MethodSource("com.mongodb.internal.connection.ByteBufferBsonOutputTest#bufferProviders") + void shouldWriteCStringWithMalformedSurrogatesWithBranch(final BufferProvider bufferProvider) throws IOException { + for (Integer surrogateCodePoint : ALL_SURROGATE_CODE_POINTS) { + byte[] expectedEncoding = new byte[]{ + (byte) (0xE0 | ((surrogateCodePoint >> 12) & 0x0F)), + (byte) (0x80 | ((surrogateCodePoint >> 6) & 0x3F)), + (byte) (0x80 | (surrogateCodePoint & 0x3F)) + }; + String str = new String(Character.toChars(surrogateCodePoint)); + int bufferAllocationSize = expectedEncoding.length + "\u0000".length(); + + testWriteCStringAcrossBufferWithBranch( + bufferProvider, + surrogateCodePoint, + bufferAllocationSize, + str, + expectedEncoding); + } + } + + @DisplayName("should write surrogate CString across buffers") + @ParameterizedTest(name = "should write surrogate CString across buffers. BufferProvider={0}") + @MethodSource("com.mongodb.internal.connection.ByteBufferBsonOutputTest#bufferProviders") + void shouldWriteCStringWithSurrogatePairs(final BufferProvider bufferProvider) throws IOException { + for (Integer surrogateCodePoint : ALL_UTF_16_CODE_POINTS_FORMED_BY_SURROGATE_PAIRS) { + String stringToEncode = new String(toSurrogatePair(surrogateCodePoint)); + byte[] expectedEncoding = stringToEncode.getBytes(StandardCharsets.UTF_8); + int bufferAllocationSize = expectedEncoding.length + "\u0000".length(); + + testWriteCStringAcrossBuffers( + bufferProvider, + surrogateCodePoint, + bufferAllocationSize, + stringToEncode, + expectedEncoding); + } + } + + @DisplayName("should write surrogate CString across buffers with branch") + @ParameterizedTest(name = "should write surrogate CString across buffers with branch. BufferProvider={0}") + @MethodSource("com.mongodb.internal.connection.ByteBufferBsonOutputTest#bufferProviders") + void shouldWriteCStringWithSurrogatePairsWithBranch(final BufferProvider bufferProvider) throws IOException { + for (Integer surrogateCodePoint : ALL_UTF_16_CODE_POINTS_FORMED_BY_SURROGATE_PAIRS) { + String stringToEncode = new String(toSurrogatePair(surrogateCodePoint)); + byte[] expectedEncoding = stringToEncode.getBytes(StandardCharsets.UTF_8); + int bufferAllocationSize = expectedEncoding.length + "\u0000".length(); + + testWriteCStringAcrossBufferWithBranch( + bufferProvider, + surrogateCodePoint, + bufferAllocationSize, + stringToEncode, + expectedEncoding); + } + } + + @DisplayName("should write surrogate String across buffers") + @ParameterizedTest(name = "should write surrogate String across buffers. BufferProvider={0}") + @MethodSource("com.mongodb.internal.connection.ByteBufferBsonOutputTest#bufferProviders") + void shouldWriteStringWithSurrogatePairs(final BufferProvider bufferProvider) throws IOException { + for (Integer surrogateCodePoint : ALL_UTF_16_CODE_POINTS_FORMED_BY_SURROGATE_PAIRS) { + String stringToEncode = new String(toSurrogatePair(surrogateCodePoint)); + byte[] expectedEncoding = stringToEncode.getBytes(StandardCharsets.UTF_8); + int bufferAllocationSize = expectedEncoding.length + "\u0000".length(); + + testWriteStringAcrossBuffers( + bufferProvider, + surrogateCodePoint, + bufferAllocationSize, + stringToEncode, + expectedEncoding); + } + } + + @DisplayName("should write surrogate String across buffers with branch") + @ParameterizedTest(name = "should write surrogate String across buffers with branch. BufferProvider={0}") + @MethodSource("com.mongodb.internal.connection.ByteBufferBsonOutputTest#bufferProviders") + void shouldWriteStringWithSurrogatePairsWithBranch(final BufferProvider bufferProvider) throws IOException { + for (Integer surrogateCodePoint : ALL_UTF_16_CODE_POINTS_FORMED_BY_SURROGATE_PAIRS) { + String stringToEncode = new String(toSurrogatePair(surrogateCodePoint)); + byte[] expectedEncoding = stringToEncode.getBytes(StandardCharsets.UTF_8); + int bufferAllocationSize = expectedEncoding.length + "\u0000".length(); + + testWriteStringAcrossBuffersWithBranch( + bufferProvider, + bufferAllocationSize, + stringToEncode, + surrogateCodePoint, + expectedEncoding); + } + } + + /* + Tests that malformed surrogate pairs are encoded as-is without substituting any code point. + This known bug and corresponding test remain for backward compatibility. + Ticket: JAVA-5575 + */ + @DisplayName("should write malformed surrogate String across buffers") + @ParameterizedTest(name = "should write malformed surrogate String across buffers. BufferProvider={0}") + @MethodSource("com.mongodb.internal.connection.ByteBufferBsonOutputTest#bufferProviders") + void shouldWriteStringWithMalformedSurrogates(final BufferProvider bufferProvider) throws IOException { + for (Integer surrogateCodePoint : ALL_SURROGATE_CODE_POINTS) { + byte[] expectedEncoding = new byte[]{ + (byte) (0xE0 | ((surrogateCodePoint >> 12) & 0x0F)), + (byte) (0x80 | ((surrogateCodePoint >> 6) & 0x3F)), + (byte) (0x80 | (surrogateCodePoint & 0x3F)) + }; + String stringToEncode = new String(Character.toChars(surrogateCodePoint)); + int bufferAllocationSize = expectedEncoding.length + "\u0000".length(); + + testWriteStringAcrossBuffers( + bufferProvider, + surrogateCodePoint, + bufferAllocationSize, + stringToEncode, + expectedEncoding); + } + } + + /* + Tests that malformed surrogate pairs are encoded as-is without substituting any code point. + This known bug and corresponding test remain for backward compatibility. + Ticket: JAVA-5575 + */ + @DisplayName("should write malformed surrogate String across buffers with branch") + @ParameterizedTest(name = "should write malformed surrogate String across buffers with branch. BufferProvider={0}") + @MethodSource("com.mongodb.internal.connection.ByteBufferBsonOutputTest#bufferProviders") + void shouldWriteStringWithMalformedSurrogatesWithBranch(final BufferProvider bufferProvider) throws IOException { + for (Integer surrogateCodePoint : ALL_SURROGATE_CODE_POINTS) { + byte[] expectedEncoding = new byte[]{ + (byte) (0xE0 | ((surrogateCodePoint >> 12) & 0x0F)), + (byte) (0x80 | ((surrogateCodePoint >> 6) & 0x3F)), + (byte) (0x80 | (surrogateCodePoint & 0x3F)) + }; + String stringToEncode = new String(Character.toChars(surrogateCodePoint)); + int bufferAllocationSize = expectedEncoding.length + "\u0000".length(); + + testWriteStringAcrossBuffersWithBranch( + bufferProvider, + bufferAllocationSize, + stringToEncode, + surrogateCodePoint, + expectedEncoding); + } + } + + private void testWriteCStringAcrossBuffers(final BufferProvider bufferProvider, + final Integer surrogateCodePoint, + final int bufferAllocationSize, + final String str, + final byte[] expectedEncoding) throws IOException { + for (int startingOffset = 0; startingOffset <= bufferAllocationSize; startingOffset++) { + //given + List actualByteBuffers = emptyList(); + + try (ByteBufferBsonOutput bsonOutput = new ByteBufferBsonOutput( + size -> bufferProvider.getBuffer(bufferAllocationSize))) { + // Write an initial startingOffset of empty bytes to shift the start position + bsonOutput.write(new byte[startingOffset]); + + // when + bsonOutput.writeCString(str); + + // then + actualByteBuffers = bsonOutput.getDuplicateByteBuffers(); + byte[] actualFlattenedByteBuffersBytes = getBytes(bsonOutput); + assertEncodedResult(surrogateCodePoint, + startingOffset, + expectedEncoding, + bufferAllocationSize, + actualByteBuffers, + actualFlattenedByteBuffersBytes); + } finally { + actualByteBuffers.forEach(ByteBuf::release); + } + } + } + + private void testWriteStringAcrossBuffers(final BufferProvider bufferProvider, + final Integer codePoint, + final int bufferAllocationSize, + final String stringToEncode, + final byte[] expectedEncoding) throws IOException { + for (int startingOffset = 0; startingOffset <= bufferAllocationSize; startingOffset++) { + //given + List actualByteBuffers = emptyList(); + + try (ByteBufferBsonOutput actualBsonOutput = new ByteBufferBsonOutput( + size -> bufferProvider.getBuffer(bufferAllocationSize))) { + // Write an initial startingOffset of empty bytes to shift the start position + actualBsonOutput.write(new byte[startingOffset]); + + // when + actualBsonOutput.writeString(stringToEncode); + + // then + actualByteBuffers = actualBsonOutput.getDuplicateByteBuffers(); + byte[] actualFlattenedByteBuffersBytes = getBytes(actualBsonOutput); + + assertEncodedStringSize(codePoint, + expectedEncoding, + actualFlattenedByteBuffersBytes, + startingOffset); + assertEncodedResult(codePoint, + startingOffset + Integer.BYTES, // +4 bytes for the length prefix + expectedEncoding, + bufferAllocationSize, + actualByteBuffers, + actualFlattenedByteBuffersBytes); + } finally { + actualByteBuffers.forEach(ByteBuf::release); + } + } + } + + private void testWriteStringAcrossBuffersWithBranch(final BufferProvider bufferProvider, + final int bufferAllocationSize, + final String stringToEncode, + final Integer codePoint, + final byte[] expectedEncoding) throws IOException { + for (int startingOffset = 0; startingOffset <= bufferAllocationSize; startingOffset++) { + //given + List actualByteBuffers = emptyList(); + List actualBranchByteBuffers = emptyList(); + + try (ByteBufferBsonOutput actualBsonOutput = new ByteBufferBsonOutput( + size -> bufferProvider.getBuffer(bufferAllocationSize))) { + + try (ByteBufferBsonOutput.Branch branchOutput = actualBsonOutput.branch()) { + // Write an initial startingOffset of empty bytes to shift the start position + branchOutput.write(new byte[startingOffset]); + + // when + branchOutput.writeString(stringToEncode); + + // then + actualBranchByteBuffers = branchOutput.getDuplicateByteBuffers(); + byte[] actualFlattenedByteBuffersBytes = getBytes(branchOutput); + assertEncodedStringSize( + codePoint, + expectedEncoding, + actualFlattenedByteBuffersBytes, + startingOffset); + assertEncodedResult(codePoint, + startingOffset + Integer.BYTES, // +4 bytes for the length prefix + expectedEncoding, + bufferAllocationSize, + actualBranchByteBuffers, + actualFlattenedByteBuffersBytes); + } + + // then + actualByteBuffers = actualBsonOutput.getDuplicateByteBuffers(); + byte[] actualFlattenedByteBuffersBytes = getBytes(actualBsonOutput); + assertEncodedStringSize( + codePoint, + expectedEncoding, + actualFlattenedByteBuffersBytes, + startingOffset); + assertEncodedResult(codePoint, + startingOffset + Integer.BYTES, // +4 bytes for the length prefix + expectedEncoding, + bufferAllocationSize, + actualByteBuffers, + actualFlattenedByteBuffersBytes); + + } finally { + actualByteBuffers.forEach(ByteBuf::release); + actualBranchByteBuffers.forEach(ByteBuf::release); + } + } + } + + // Verify that the resulting byte array (excluding the starting offset and null terminator) + // matches the expected UTF-8 encoded length of the test string. + private void assertEncodedStringSize(final Integer codePoint, + final byte[] expectedStringEncoding, + final byte[] actualFlattenedByteBuffersBytes, + final int startingOffset) { + int littleEndianLength = reverseBytes(expectedStringEncoding.length + "\u0000".length()); + byte[] expectedEncodedStringSize = Ints.toByteArray(littleEndianLength); + byte[] actualEncodedStringSize = copyOfRange( + actualFlattenedByteBuffersBytes, + startingOffset, + startingOffset + Integer.BYTES); + + assertArrayEquals( + expectedEncodedStringSize, + actualEncodedStringSize, + () -> format("Encoded String size before the test String does not match expected size. " + + "Failed with code point: %s, startingOffset: %s", + codePoint, + startingOffset)); + } + + private void testWriteCStringAcrossBufferWithBranch(final BufferProvider bufferProvider, + final Integer codePoint, + final int bufferAllocationSize, + final String str, final byte[] expectedEncoding) throws IOException { + for (int startingOffset = 0; startingOffset <= bufferAllocationSize; startingOffset++) { + List actualBranchByteBuffers = emptyList(); + List actualByteBuffers = emptyList(); + + try (ByteBufferBsonOutput bsonOutput = new ByteBufferBsonOutput( + size -> bufferProvider.getBuffer(bufferAllocationSize))) { + + try (ByteBufferBsonOutput.Branch branchOutput = bsonOutput.branch()) { + // Write an initial startingOffset of empty bytes to shift the start position + branchOutput.write(new byte[startingOffset]); + + // when + branchOutput.writeCString(str); + + // then + actualBranchByteBuffers = branchOutput.getDuplicateByteBuffers(); + byte[] actualFlattenedByteBuffersBytes = getBytes(branchOutput); + assertEncodedResult(codePoint, + startingOffset, + expectedEncoding, + bufferAllocationSize, + actualBranchByteBuffers, + actualFlattenedByteBuffersBytes); + } + + // then + actualByteBuffers = bsonOutput.getDuplicateByteBuffers(); + byte[] actualFlattenedByteBuffersBytes = getBytes(bsonOutput); + assertEncodedResult(codePoint, + startingOffset, + expectedEncoding, + bufferAllocationSize, + actualByteBuffers, + actualFlattenedByteBuffersBytes); + } finally { + actualByteBuffers.forEach(ByteBuf::release); + actualBranchByteBuffers.forEach(ByteBuf::release); + } + } + } + + private void assertEncodedResult(final int codePoint, + final int startingOffset, + final byte[] expectedEncoding, + final int expectedBufferAllocationSize, + final List actualByteBuffers, + final byte[] actualFlattenedByteBuffersBytes) { + int expectedCodeUnitCount = expectedEncoding.length; + int byteCount = startingOffset + expectedCodeUnitCount + 1; + int expectedBufferCount = (byteCount + expectedBufferAllocationSize - 1) / expectedBufferAllocationSize; + int expectedLastBufferPosition = (byteCount % expectedBufferAllocationSize) == 0 ? expectedBufferAllocationSize + : byteCount % expectedBufferAllocationSize; + + assertEquals( + expectedBufferCount, + actualByteBuffers.size(), + () -> format("expectedBufferCount failed with code point: %s, offset: %s", + codePoint, + startingOffset)); + assertEquals( + expectedLastBufferPosition, + actualByteBuffers.get(actualByteBuffers.size() - 1).position(), + () -> format("expectedLastBufferPosition failed with code point: %s, offset: %s", + codePoint, + startingOffset)); + + for (ByteBuf byteBuf : actualByteBuffers.subList(0, actualByteBuffers.size() - 1)) { + assertEquals( + byteBuf.position(), + byteBuf.limit(), + () -> format("All non-final buffers are not full. Code point: %s, offset: %s", + codePoint, + startingOffset)); + } + + // Verify that the final byte array (excluding the initial offset and null terminator) + // matches the expected UTF-8 encoding of the test string + assertArrayEquals( + expectedEncoding, + Arrays.copyOfRange(actualFlattenedByteBuffersBytes, startingOffset, actualFlattenedByteBuffersBytes.length - 1), + () -> format("Expected UTF-8 encoding of the test string does not match actual encoding. Code point: %s, offset: %s", + codePoint, + startingOffset)); + assertEquals( + 0, + actualFlattenedByteBuffersBytes[actualFlattenedByteBuffersBytes.length - 1], + () -> format("String does not end with null terminator. Code point: %s, offset: %s", + codePoint, + startingOffset)); + } + + public char[] toSurrogatePair(final int codePoint) { + if (!Character.isValidCodePoint(codePoint) || codePoint < 0x10000) { + throw new IllegalArgumentException("Invalid code point: " + codePoint); + } + char[] result = new char[2]; + result[0] = Character.highSurrogate(codePoint); + result[1] = Character.lowSurrogate(codePoint); + return result; + } + + } + + private static byte[] getBytes(final OutputBuffer basicOutputBuffer) throws IOException { + ByteArrayOutputStream baos = new ByteArrayOutputStream(basicOutputBuffer.getSize()); + basicOutputBuffer.pipe(baos); + return baos.toByteArray(); + } +} diff --git a/driver-core/src/test/unit/com/mongodb/internal/connection/ClusterClockSpecification.groovy b/driver-core/src/test/unit/com/mongodb/internal/connection/ClusterClockSpecification.groovy new file mode 100644 index 00000000000..efc84ddc619 --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/internal/connection/ClusterClockSpecification.groovy @@ -0,0 +1,64 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection + +import org.bson.BsonDocument +import org.bson.BsonTimestamp +import spock.lang.Specification + + +class ClusterClockSpecification extends Specification { + def 'should advance cluster time'() { + given: + def firstClusterTime = new BsonDocument('clusterTime', new BsonTimestamp(42, 1)) + def secondClusterTime = new BsonDocument('clusterTime', new BsonTimestamp(52, 1)) + def olderClusterTime = new BsonDocument('clusterTime', new BsonTimestamp(22, 1)) + + when: + def clock = new ClusterClock() + + then: + clock.getCurrent() == null + + when: + clock.advance(null) + + then: + clock.getCurrent() == null + clock.greaterOf(firstClusterTime) == firstClusterTime + + when: + clock.advance(firstClusterTime) + + then: + clock.getCurrent() == firstClusterTime + clock.greaterOf(secondClusterTime) == secondClusterTime + + when: + clock.advance(secondClusterTime) + + then: + clock.getCurrent() == secondClusterTime + clock.greaterOf(olderClusterTime) == secondClusterTime + + when: + clock.advance(olderClusterTime) + + then: + clock.getCurrent() == secondClusterTime + } +} diff --git a/driver-core/src/test/unit/com/mongodb/internal/connection/CommandHelperTest.java b/driver-core/src/test/unit/com/mongodb/internal/connection/CommandHelperTest.java new file mode 100644 index 00000000000..f7873379c3b --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/internal/connection/CommandHelperTest.java @@ -0,0 +1,126 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.internal.connection; + +import com.mongodb.MongoCommandException; +import com.mongodb.ServerAddress; +import com.mongodb.ServerApi; +import com.mongodb.ServerApiVersion; +import com.mongodb.connection.ClusterId; +import com.mongodb.connection.ConnectionDescription; +import com.mongodb.connection.ServerDescription; +import com.mongodb.connection.ServerId; +import com.mongodb.internal.IgnorableRequestContext; +import com.mongodb.internal.TimeoutContext; +import com.mongodb.internal.TimeoutSettings; +import org.bson.BsonDocument; +import org.bson.codecs.Decoder; +import org.junit.jupiter.api.Test; + +import static com.mongodb.assertions.Assertions.assertFalse; +import static com.mongodb.connection.ClusterConnectionMode.SINGLE; +import static com.mongodb.internal.connection.CommandHelper.executeCommand; +import static com.mongodb.internal.connection.CommandHelper.executeCommandAsync; +import static com.mongodb.internal.connection.CommandHelper.executeCommandWithoutCheckingForFailure; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +public class CommandHelperTest { + + static final BsonDocument COMMAND = BsonDocument.parse("{ping: 1}"); + static final BsonDocument OK = BsonDocument.parse("{ok: 1}"); + static final BsonDocument NOT_OK = BsonDocument.parse("{ok: 0, errmsg: 'error'}"); + + static final ConnectionDescription CONNECTION_DESCRIPTION = new ConnectionDescription( + new ServerId(new ClusterId("cluster"), new ServerAddress())); + + @Test + @SuppressWarnings("unchecked") + void testExecuteCommand() { + InternalConnection internalConnection = mock(InternalConnection.class); + ServerDescription serverDescription = mock(ServerDescription.class); + OperationContext operationContext = createOperationContext(); + + + when(internalConnection.getDescription()).thenReturn(CONNECTION_DESCRIPTION); + when(internalConnection.sendAndReceive(any(), any(), any())).thenReturn(OK); + when(internalConnection.getInitialServerDescription()).thenReturn(serverDescription); + + assertEquals(OK, + executeCommand("admin", COMMAND, SINGLE, operationContext.getServerApi(), internalConnection, operationContext)); + + verify(internalConnection).sendAndReceive(any(CommandMessage.class), any(Decoder.class), eq(operationContext)); + } + + @Test + @SuppressWarnings("unchecked") + void testExecuteCommandWithoutCheckingForFailure() { + InternalConnection internalConnection = mock(InternalConnection.class); + ServerDescription serverDescription = mock(ServerDescription.class); + OperationContext operationContext = createOperationContext(); + + when(internalConnection.getDescription()).thenReturn(CONNECTION_DESCRIPTION); + when(internalConnection.getInitialServerDescription()).thenReturn(serverDescription); + when(internalConnection.sendAndReceive(any(), any(), any())) + .thenThrow(new MongoCommandException(NOT_OK, new ServerAddress())); + + assertEquals(new BsonDocument(), + executeCommandWithoutCheckingForFailure("admin", COMMAND, SINGLE, operationContext.getServerApi(), + internalConnection, operationContext)); + + verify(internalConnection).sendAndReceive(any(CommandMessage.class), any(Decoder.class), eq(operationContext)); + } + + + @Test + @SuppressWarnings("unchecked") + void testExecuteCommandAsyncUsesTheOperationContext() { + InternalConnection internalConnection = mock(InternalConnection.class); + OperationContext operationContext = createOperationContext(); + ServerDescription serverDescription = mock(ServerDescription.class); + + when(internalConnection.getInitialServerDescription()).thenReturn(serverDescription); + when(internalConnection.getDescription()).thenReturn(CONNECTION_DESCRIPTION); + when(internalConnection.sendAndReceive(any(), any(), any())).thenReturn(OK); + + executeCommandAsync("admin", COMMAND, SINGLE, operationContext.getServerApi(), internalConnection, operationContext, + (r, t) -> {}); + + verify(internalConnection).sendAndReceiveAsync(any(CommandMessage.class), any(Decoder.class), eq(operationContext), any()); + } + + @Test + void testIsCommandOk() { + assertTrue(CommandHelper.isCommandOk(OK)); + assertTrue(CommandHelper.isCommandOk(BsonDocument.parse("{ok: true}"))); + assertFalse(CommandHelper.isCommandOk(NOT_OK)); + assertFalse(CommandHelper.isCommandOk(BsonDocument.parse("{ok: false}"))); + assertFalse(CommandHelper.isCommandOk(BsonDocument.parse("{ok: 11}"))); + assertFalse(CommandHelper.isCommandOk(BsonDocument.parse("{ok: 'nope'}"))); + assertFalse(CommandHelper.isCommandOk(new BsonDocument())); + } + + + OperationContext createOperationContext() { + return new OperationContext(IgnorableRequestContext.INSTANCE, NoOpSessionContext.INSTANCE, + new TimeoutContext(TimeoutSettings.DEFAULT), ServerApi.builder().version(ServerApiVersion.V1).build()); + } +} diff --git a/driver-core/src/test/unit/com/mongodb/internal/connection/CommandMessageSpecification.groovy b/driver-core/src/test/unit/com/mongodb/internal/connection/CommandMessageSpecification.groovy new file mode 100644 index 00000000000..77bdd5e2045 --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/internal/connection/CommandMessageSpecification.groovy @@ -0,0 +1,365 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection + + +import com.mongodb.MongoNamespace +import com.mongodb.ReadConcern +import com.mongodb.ReadPreference +import com.mongodb.connection.ClusterConnectionMode +import com.mongodb.connection.ServerType +import com.mongodb.internal.IgnorableRequestContext +import com.mongodb.internal.TimeoutContext +import com.mongodb.internal.bulk.InsertRequest +import com.mongodb.internal.bulk.WriteRequestWithIndex +import com.mongodb.internal.session.SessionContext +import com.mongodb.internal.validator.NoOpFieldNameValidator +import org.bson.BsonArray +import org.bson.BsonBinary +import org.bson.BsonDocument +import org.bson.BsonInt32 +import org.bson.BsonMaximumSizeExceededException +import org.bson.BsonString +import org.bson.BsonTimestamp +import org.bson.ByteBuf +import org.bson.ByteBufNIO +import org.bson.codecs.BsonDocumentCodec +import spock.lang.Specification + +import java.nio.ByteBuffer + +import static com.mongodb.internal.connection.SplittablePayload.Type.INSERT +import static com.mongodb.internal.operation.ServerVersionHelper.LATEST_WIRE_VERSION + +/** + * New tests must be added to {@link CommandMessageTest}. + */ +class CommandMessageSpecification extends Specification { + + def namespace = new MongoNamespace('db.test') + def command = new BsonDocument('find', new BsonString(namespace.collectionName)) + def fieldNameValidator = NoOpFieldNameValidator.INSTANCE + + def 'should encode command message with OP_MSG when server version is >= 3.6'() { + given: + def message = new CommandMessage(namespace.getDatabaseName(), command, fieldNameValidator, readPreference, + MessageSettings.builder() + .maxWireVersion(LATEST_WIRE_VERSION) + .serverType(serverType as ServerType) + .sessionSupported(true) + .build(), + responseExpected, MessageSequences.EmptyMessageSequences.INSTANCE, clusterConnectionMode, null) + def output = new ByteBufferBsonOutput(new SimpleBufferProvider()) + + when: + message.encode(output, operationContext) + + then: + def byteBuf = new ByteBufNIO(ByteBuffer.wrap(output.toByteArray())) + def messageHeader = new MessageHeader(byteBuf, 512) + def replyHeader = new ReplyHeader(byteBuf, messageHeader) + messageHeader.opCode == OpCode.OP_MSG.value + replyHeader.requestId < RequestMessage.currentGlobalId + replyHeader.responseTo == 0 + replyHeader.hasMoreToCome() != responseExpected + + def expectedCommandDocument = command.clone() + .append('$db', new BsonString(namespace.databaseName)) + + if (operationContext.getSessionContext().clusterTime != null) { + expectedCommandDocument.append('$clusterTime', operationContext.getSessionContext().clusterTime) + } + if (operationContext.getSessionContext().hasSession() && responseExpected) { + expectedCommandDocument.append('lsid', operationContext.getSessionContext().sessionId) + } + + if (readPreference != ReadPreference.primary()) { + expectedCommandDocument.append('$readPreference', readPreference.toDocument()) + } else if (clusterConnectionMode == ClusterConnectionMode.SINGLE && serverType != ServerType.SHARD_ROUTER) { + expectedCommandDocument.append('$readPreference', ReadPreference.primaryPreferred().toDocument()) + } + getCommandDocument(byteBuf, replyHeader) == expectedCommandDocument + + cleanup: + output.close() + + where: + [readPreference, serverType, clusterConnectionMode, operationContext, responseExpected, isCryptd] << [ + [ReadPreference.primary(), ReadPreference.secondary()], + [ServerType.REPLICA_SET_PRIMARY, ServerType.SHARD_ROUTER], + [ClusterConnectionMode.SINGLE, ClusterConnectionMode.MULTIPLE], + [ + new OperationContext( + IgnorableRequestContext.INSTANCE, + Stub(SessionContext) { + hasSession() >> false + getClusterTime() >> null + getSessionId() >> new BsonDocument('id', new BsonBinary([1, 2, 3] as byte[])) + getReadConcern() >> ReadConcern.DEFAULT + }, Stub(TimeoutContext), null), + new OperationContext( + IgnorableRequestContext.INSTANCE, + Stub(SessionContext) { + hasSession() >> false + getClusterTime() >> new BsonDocument('clusterTime', new BsonTimestamp(42, 1)) + getReadConcern() >> ReadConcern.DEFAULT + }, Stub(TimeoutContext), null), + new OperationContext( + IgnorableRequestContext.INSTANCE, + Stub(SessionContext) { + hasSession() >> true + getClusterTime() >> null + getSessionId() >> new BsonDocument('id', new BsonBinary([1, 2, 3] as byte[])) + getReadConcern() >> ReadConcern.DEFAULT + }, Stub(TimeoutContext), null), + new OperationContext( + IgnorableRequestContext.INSTANCE, + Stub(SessionContext) { + hasSession() >> true + getClusterTime() >> new BsonDocument('clusterTime', new BsonTimestamp(42, 1)) + getSessionId() >> new BsonDocument('id', new BsonBinary([1, 2, 3] as byte[])) + getReadConcern() >> ReadConcern.DEFAULT + }, Stub(TimeoutContext), null) + ], + [true, false], + [true, false] + ].combinations() + } + + String getString(final ByteBuf byteBuf) { + def byteArrayOutputStream = new ByteArrayOutputStream() + def cur = byteBuf.get() + while (cur != 0) { + byteArrayOutputStream.write(cur) + cur = byteBuf.get() + } + new String(byteArrayOutputStream.toByteArray(), 'UTF-8') + } + + def 'should get command document'() { + given: + def message = new CommandMessage(namespace.getDatabaseName(), originalCommandDocument, fieldNameValidator, + ReadPreference.primary(), MessageSettings.builder().maxWireVersion(maxWireVersion).build(), true, + payload == null ? MessageSequences.EmptyMessageSequences.INSTANCE : payload, + ClusterConnectionMode.MULTIPLE, null) + def output = new ByteBufferBsonOutput(new SimpleBufferProvider()) + message.encode(output, new OperationContext(IgnorableRequestContext.INSTANCE, NoOpSessionContext.INSTANCE, + Stub(TimeoutContext), null)) + + when: + def commandDocument = message.getCommandDocument(output) + + def expectedCommandDocument = new BsonDocument('insert', new BsonString('coll')).append('documents', + new BsonArray([new BsonDocument('_id', new BsonInt32(1)), new BsonDocument('_id', new BsonInt32(2))])) + expectedCommandDocument.append('$db', new BsonString(namespace.getDatabaseName())) + then: + commandDocument == expectedCommandDocument + + + where: + [maxWireVersion, originalCommandDocument, payload] << [ + [ + LATEST_WIRE_VERSION, + new BsonDocument('insert', new BsonString('coll')), + new SplittablePayload(INSERT, [new BsonDocument('_id', new BsonInt32(1)), + new BsonDocument('_id', new BsonInt32(2))] + .withIndex().collect { doc, i -> new WriteRequestWithIndex(new InsertRequest(doc), i) }, + true, NoOpFieldNameValidator.INSTANCE), + ], + [ + LATEST_WIRE_VERSION, + new BsonDocument('insert', new BsonString('coll')).append('documents', + new BsonArray([new BsonDocument('_id', new BsonInt32(1)), new BsonDocument('_id', new BsonInt32(2))])), + null + ] + ] + } + + def 'should respect the max message size'() { + given: + def maxMessageSize = 1024 + def messageSettings = MessageSettings.builder().maxMessageSize(maxMessageSize).maxWireVersion(LATEST_WIRE_VERSION).build() + def insertCommand = new BsonDocument('insert', new BsonString(namespace.collectionName)) + def payload = new SplittablePayload(INSERT, [new BsonDocument('_id', new BsonInt32(1)).append('a', new BsonBinary(new byte[913])), + new BsonDocument('_id', new BsonInt32(2)).append('b', new BsonBinary(new byte[441])), + new BsonDocument('_id', new BsonInt32(3)).append('c', new BsonBinary(new byte[450])), + new BsonDocument('_id', new BsonInt32(4)).append('b', new BsonBinary(new byte[441])), + new BsonDocument('_id', new BsonInt32(5)).append('c', new BsonBinary(new byte[451]))] + .withIndex().collect { doc, i -> new WriteRequestWithIndex(new InsertRequest(doc), i) }, true, fieldNameValidator) + def message = new CommandMessage(namespace.getDatabaseName(), insertCommand, fieldNameValidator, ReadPreference.primary(), + messageSettings, false, payload, ClusterConnectionMode.MULTIPLE, null) + def output = new ByteBufferBsonOutput(new SimpleBufferProvider()) + def sessionContext = Stub(SessionContext) { + getReadConcern() >> ReadConcern.DEFAULT + } + + when: + message.encode(output, new OperationContext(IgnorableRequestContext.INSTANCE, sessionContext, + Stub(TimeoutContext), null)) + def byteBuf = new ByteBufNIO(ByteBuffer.wrap(output.toByteArray())) + def messageHeader = new MessageHeader(byteBuf, maxMessageSize) + + then: + messageHeader.opCode == OpCode.OP_MSG.value + messageHeader.requestId < RequestMessage.currentGlobalId + messageHeader.responseTo == 0 + messageHeader.messageLength == 1024 + byteBuf.getInt() == 0 + payload.getPosition() == 1 + payload.hasAnotherSplit() + + when: + payload = payload.getNextSplit() + message = new CommandMessage(namespace.getDatabaseName(), insertCommand, fieldNameValidator, ReadPreference.primary(), + messageSettings, false, payload, ClusterConnectionMode.MULTIPLE, null) + output.truncateToPosition(0) + message.encode(output, new OperationContext(IgnorableRequestContext.INSTANCE, sessionContext, Stub(TimeoutContext), null)) + byteBuf = new ByteBufNIO(ByteBuffer.wrap(output.toByteArray())) + messageHeader = new MessageHeader(byteBuf, maxMessageSize) + + then: + messageHeader.opCode == OpCode.OP_MSG.value + messageHeader.requestId < RequestMessage.currentGlobalId + messageHeader.responseTo == 0 + messageHeader.messageLength == 1024 + byteBuf.getInt() == 0 + payload.getPosition() == 2 + payload.hasAnotherSplit() + + when: + payload = payload.getNextSplit() + message = new CommandMessage(namespace.getDatabaseName(), insertCommand, fieldNameValidator, ReadPreference.primary(), + messageSettings, false, payload, ClusterConnectionMode.MULTIPLE, null) + output.truncateToPosition(0) + message.encode(output, new OperationContext(IgnorableRequestContext.INSTANCE, sessionContext, Stub(TimeoutContext), null)) + byteBuf = new ByteBufNIO(ByteBuffer.wrap(output.toByteArray())) + messageHeader = new MessageHeader(byteBuf, maxMessageSize) + + then: + messageHeader.opCode == OpCode.OP_MSG.value + messageHeader.requestId < RequestMessage.currentGlobalId + messageHeader.responseTo == 0 + messageHeader.messageLength == 552 + byteBuf.getInt() == 0 + payload.getPosition() == 1 + payload.hasAnotherSplit() + + when: + payload = payload.getNextSplit() + message = new CommandMessage(namespace.getDatabaseName(), insertCommand, fieldNameValidator, ReadPreference.primary(), + messageSettings, false, payload, ClusterConnectionMode.MULTIPLE, null) + output.truncateToPosition(0) + message.encode(output, new OperationContext(IgnorableRequestContext.INSTANCE, + sessionContext, + Stub(TimeoutContext), + null)) + byteBuf = new ByteBufNIO(ByteBuffer.wrap(output.toByteArray())) + messageHeader = new MessageHeader(byteBuf, maxMessageSize) + + then: + messageHeader.opCode == OpCode.OP_MSG.value + messageHeader.requestId < RequestMessage.currentGlobalId + messageHeader.responseTo == 0 + messageHeader.messageLength == 562 + byteBuf.getInt() == 1 << 1 + payload.getPosition() == 1 + !payload.hasAnotherSplit() + + cleanup: + output.close() + } + + def 'should respect the max batch count'() { + given: + def messageSettings = MessageSettings.builder().maxBatchCount(2).maxWireVersion(LATEST_WIRE_VERSION).build() + def payload = new SplittablePayload(INSERT, [new BsonDocument('a', new BsonBinary(new byte[900])), + new BsonDocument('b', new BsonBinary(new byte[450])), + new BsonDocument('c', new BsonBinary(new byte[450]))] + .withIndex().collect { doc, i -> new WriteRequestWithIndex(new InsertRequest(doc), i) }, true, fieldNameValidator) + def message = new CommandMessage(namespace.getDatabaseName(), command, fieldNameValidator, ReadPreference.primary(), + messageSettings, false, payload, ClusterConnectionMode.MULTIPLE, null) + def output = new ByteBufferBsonOutput(new SimpleBufferProvider()) + def sessionContext = Stub(SessionContext) { + getReadConcern() >> ReadConcern.DEFAULT + } + + when: + message.encode(output, new OperationContext(IgnorableRequestContext.INSTANCE, sessionContext, + Stub(TimeoutContext), + null)) + def byteBuf = new ByteBufNIO(ByteBuffer.wrap(output.toByteArray())) + def messageHeader = new MessageHeader(byteBuf, 2048) + + then: + messageHeader.opCode == OpCode.OP_MSG.value + messageHeader.requestId < RequestMessage.currentGlobalId + messageHeader.responseTo == 0 + messageHeader.messageLength == 1497 + byteBuf.getInt() == 0 + payload.getPosition() == 2 + payload.hasAnotherSplit() + + when: + payload = payload.getNextSplit() + message = new CommandMessage(namespace.getDatabaseName(), command, fieldNameValidator, ReadPreference.primary(), messageSettings, + false, payload, ClusterConnectionMode.MULTIPLE, null) + output.truncateToPosition(0) + message.encode(output, new OperationContext(IgnorableRequestContext.INSTANCE, sessionContext, + Stub(TimeoutContext), null)) + byteBuf = new ByteBufNIO(ByteBuffer.wrap(output.toByteArray())) + messageHeader = new MessageHeader(byteBuf, 1024) + + then: + messageHeader.opCode == OpCode.OP_MSG.value + messageHeader.requestId < RequestMessage.currentGlobalId + messageHeader.responseTo == 0 + byteBuf.getInt() == 1 << 1 + payload.getPosition() == 1 + !payload.hasAnotherSplit() + + cleanup: + output.close() + } + + def 'should throw if payload document bigger than max document size'() { + given: + def messageSettings = MessageSettings.builder().maxDocumentSize(900) + .maxWireVersion(LATEST_WIRE_VERSION).build() + def payload = new SplittablePayload(INSERT, [new BsonDocument('a', new BsonBinary(new byte[900]))] + .withIndex().collect { doc, i -> new WriteRequestWithIndex(new InsertRequest(doc), i) }, true, fieldNameValidator) + def message = new CommandMessage(namespace.getDatabaseName(), command, fieldNameValidator, ReadPreference.primary(), + messageSettings, false, payload, ClusterConnectionMode.MULTIPLE, null) + def output = new ByteBufferBsonOutput(new SimpleBufferProvider()) + def sessionContext = Stub(SessionContext) { + getReadConcern() >> ReadConcern.DEFAULT + } + + when: + message.encode(output, new OperationContext(IgnorableRequestContext.INSTANCE, sessionContext, + Stub(TimeoutContext), null)) + + then: + thrown(BsonMaximumSizeExceededException) + + cleanup: + output.close() + } + + private static BsonDocument getCommandDocument(ByteBufNIO byteBuf, ReplyHeader replyHeader) { + new ReplyMessage(new ResponseBuffers(replyHeader, byteBuf), new BsonDocumentCodec(), 0).document + } +} diff --git a/driver-core/src/test/unit/com/mongodb/internal/connection/CommandMessageTest.java b/driver-core/src/test/unit/com/mongodb/internal/connection/CommandMessageTest.java new file mode 100644 index 00000000000..091518c715c --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/internal/connection/CommandMessageTest.java @@ -0,0 +1,171 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection; + +import com.mongodb.MongoNamespace; +import com.mongodb.MongoOperationTimeoutException; +import com.mongodb.ReadConcern; +import com.mongodb.ReadPreference; +import com.mongodb.WriteConcern; +import com.mongodb.client.model.bulk.ClientNamespacedWriteModel; +import com.mongodb.connection.ClusterConnectionMode; +import com.mongodb.connection.ServerType; +import com.mongodb.internal.IgnorableRequestContext; +import com.mongodb.internal.TimeoutContext; +import com.mongodb.internal.TimeoutSettings; +import com.mongodb.internal.client.model.bulk.ConcreteClientBulkWriteOptions; +import com.mongodb.internal.connection.MessageSequences.EmptyMessageSequences; +import com.mongodb.internal.operation.ClientBulkWriteOperation; +import com.mongodb.internal.operation.ClientBulkWriteOperation.ClientBulkWriteCommand.OpsAndNsInfo; +import com.mongodb.internal.session.SessionContext; +import com.mongodb.internal.validator.NoOpFieldNameValidator; +import org.bson.BsonArray; +import org.bson.BsonBoolean; +import org.bson.BsonDocument; +import org.bson.BsonInt32; +import org.bson.BsonString; +import org.bson.BsonTimestamp; +import org.junit.jupiter.api.Test; + +import java.util.List; +import java.util.stream.Collectors; +import java.util.stream.IntStream; + +import static com.mongodb.MongoClientSettings.getDefaultCodecRegistry; +import static com.mongodb.client.model.bulk.ClientBulkWriteOptions.clientBulkWriteOptions; +import static com.mongodb.internal.mockito.MongoMockito.mock; +import static com.mongodb.internal.operation.ServerVersionHelper.LATEST_WIRE_VERSION; +import static java.util.Arrays.asList; +import static java.util.Collections.singletonList; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.doThrow; +import static org.mockito.Mockito.verifyNoInteractions; +import static org.mockito.Mockito.when; + +class CommandMessageTest { + + private static final MongoNamespace NAMESPACE = new MongoNamespace("db.test"); + private static final BsonDocument COMMAND = new BsonDocument("find", new BsonString(NAMESPACE.getCollectionName())); + + @Test + void encodeShouldThrowTimeoutExceptionWhenTimeoutContextIsCalled() { + //given + CommandMessage commandMessage = new CommandMessage(NAMESPACE.getDatabaseName(), COMMAND, NoOpFieldNameValidator.INSTANCE, ReadPreference.primary(), + MessageSettings.builder() + .maxWireVersion(LATEST_WIRE_VERSION) + .serverType(ServerType.REPLICA_SET_SECONDARY) + .sessionSupported(true) + .build(), + true, EmptyMessageSequences.INSTANCE, ClusterConnectionMode.MULTIPLE, null); + + try (ByteBufferBsonOutput bsonOutput = new ByteBufferBsonOutput(new SimpleBufferProvider())) { + SessionContext sessionContext = mock(SessionContext.class); + TimeoutContext timeoutContext = mock(TimeoutContext.class, mock -> { + doThrow(new MongoOperationTimeoutException("test")).when(mock).runMaxTimeMS(any()); + }); + OperationContext operationContext = mock(OperationContext.class, mock -> { + when(mock.getSessionContext()).thenReturn(sessionContext); + when(mock.getTimeoutContext()).thenReturn(timeoutContext); + }); + + //when & then + assertThrows(MongoOperationTimeoutException.class, () -> + commandMessage.encode(bsonOutput, operationContext)); + } + } + + @Test + void encodeShouldNotAddExtraElementsFromTimeoutContextWhenConnectedToMongoCrypt() { + //given + CommandMessage commandMessage = new CommandMessage(NAMESPACE.getDatabaseName(), COMMAND, NoOpFieldNameValidator.INSTANCE, ReadPreference.primary(), + MessageSettings.builder() + .maxWireVersion(LATEST_WIRE_VERSION) + .serverType(ServerType.REPLICA_SET_SECONDARY) + .sessionSupported(true) + .cryptd(true) + .build(), + true, EmptyMessageSequences.INSTANCE, ClusterConnectionMode.MULTIPLE, null); + + try (ByteBufferBsonOutput bsonOutput = new ByteBufferBsonOutput(new SimpleBufferProvider())) { + SessionContext sessionContext = mock(SessionContext.class, mock -> { + when(mock.getClusterTime()).thenReturn(new BsonDocument("clusterTime", new BsonTimestamp(42, 1))); + when(mock.hasSession()).thenReturn(false); + when(mock.getReadConcern()).thenReturn(ReadConcern.DEFAULT); + when(mock.notifyMessageSent()).thenReturn(true); + when(mock.hasActiveTransaction()).thenReturn(false); + when(mock.isSnapshot()).thenReturn(false); + }); + TimeoutContext timeoutContext = mock(TimeoutContext.class); + OperationContext operationContext = mock(OperationContext.class, mock -> { + when(mock.getSessionContext()).thenReturn(sessionContext); + when(mock.getTimeoutContext()).thenReturn(timeoutContext); + }); + + //when + commandMessage.encode(bsonOutput, operationContext); + + //then + verifyNoInteractions(timeoutContext); + } + } + + @Test + void getCommandDocumentFromClientBulkWrite() { + MongoNamespace ns = new MongoNamespace("db", "test"); + boolean retryWrites = false; + BsonDocument command = new BsonDocument("bulkWrite", new BsonInt32(1)) + .append("errorsOnly", BsonBoolean.valueOf(false)) + .append("ordered", BsonBoolean.valueOf(true)); + List documents = IntStream.range(0, 2).mapToObj(i -> new BsonDocument("_id", new BsonInt32(i))) + .collect(Collectors.toList()); + List writeModels = asList( + ClientNamespacedWriteModel.insertOne(ns, documents.get(0)), + ClientNamespacedWriteModel.insertOne(ns, documents.get(1))); + OpsAndNsInfo opsAndNsInfo = new OpsAndNsInfo( + retryWrites, + writeModels, + new ClientBulkWriteOperation( + writeModels, + clientBulkWriteOptions(), + WriteConcern.MAJORITY, + retryWrites, + getDefaultCodecRegistry() + ).new BatchEncoder(), + (ConcreteClientBulkWriteOptions) clientBulkWriteOptions(), + () -> 1L); + BsonDocument expectedCommandDocument = command.clone() + .append("$db", new BsonString(ns.getDatabaseName())) + .append("ops", new BsonArray(asList( + new BsonDocument("insert", new BsonInt32(0)).append("document", documents.get(0)), + new BsonDocument("insert", new BsonInt32(0)).append("document", documents.get(1))))) + .append("nsInfo", new BsonArray(singletonList(new BsonDocument("ns", new BsonString(ns.toString()))))); + CommandMessage commandMessage = new CommandMessage( + ns.getDatabaseName(), command, NoOpFieldNameValidator.INSTANCE, ReadPreference.primary(), + MessageSettings.builder().maxWireVersion(LATEST_WIRE_VERSION).build(), true, opsAndNsInfo, ClusterConnectionMode.MULTIPLE, null); + try (ByteBufferBsonOutput output = new ByteBufferBsonOutput(new SimpleBufferProvider())) { + commandMessage.encode( + output, + new OperationContext( + IgnorableRequestContext.INSTANCE, NoOpSessionContext.INSTANCE, + new TimeoutContext(TimeoutSettings.DEFAULT), null)); + BsonDocument actualCommandDocument = commandMessage.getCommandDocument(output); + assertEquals(expectedCommandDocument, actualCommandDocument); + } + } +} diff --git a/driver-core/src/test/unit/com/mongodb/internal/connection/CompositeByteBufSpecification.groovy b/driver-core/src/test/unit/com/mongodb/internal/connection/CompositeByteBufSpecification.groovy new file mode 100644 index 00000000000..6c43e2a667e --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/internal/connection/CompositeByteBufSpecification.groovy @@ -0,0 +1,541 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection + +import org.bson.ByteBufNIO +import spock.lang.Specification + +import java.nio.ByteBuffer +import java.nio.ByteOrder + +class CompositeByteBufSpecification extends Specification { + + def 'should throw if buffers is null'() { + when: + new CompositeByteBuf(null) + + then: + thrown(IllegalArgumentException) + } + + def 'should throw if buffers is empty'() { + when: + new CompositeByteBuf([]) + + then: + thrown(IllegalArgumentException) + } + + def 'reference count should be maintained'() { + when: + def buf = new CompositeByteBuf([new ByteBufNIO(ByteBuffer.wrap([1, 2, 3, 4] as byte[]))]) + + then: + buf.getReferenceCount() == 1 + + when: + buf.retain() + + then: + buf.getReferenceCount() == 2 + + when: + buf.release() + + then: + buf.getReferenceCount() == 1 + + when: + buf.release() + + then: + buf.getReferenceCount() == 0 + + when: + buf.release() + + then: + thrown(IllegalStateException) + + when: + buf.retain() + + then: + thrown(IllegalStateException) + } + + def 'order should throw if not little endian'() { + when: + new CompositeByteBuf([new ByteBufNIO(ByteBuffer.wrap([1, 2, 3, 4] as byte[]))]).order(ByteOrder.BIG_ENDIAN) + + then: + thrown(UnsupportedOperationException) + } + + def 'order should return normally if little endian'() { + when: + new CompositeByteBuf([new ByteBufNIO(ByteBuffer.wrap([1, 2, 3, 4] as byte[]))]).order(ByteOrder.LITTLE_ENDIAN) + + then: + true + } + + def 'limit should be sum of limits of buffers'() { + expect: + new CompositeByteBuf([new ByteBufNIO(ByteBuffer.wrap([1, 2, 3, 4] as byte[]))]).limit() == 4 + new CompositeByteBuf([new ByteBufNIO(ByteBuffer.wrap([1, 2, 3, 4] as byte[])), + new ByteBufNIO(ByteBuffer.wrap([1, 2] as byte[]))]).limit() == 6 + } + + def 'capacity should be the initial limit'() { + expect: + new CompositeByteBuf([new ByteBufNIO(ByteBuffer.wrap([1, 2, 3, 4] as byte[]))]).capacity() == 4 + new CompositeByteBuf([new ByteBufNIO(ByteBuffer.wrap([1, 2, 3, 4] as byte[])), + new ByteBufNIO(ByteBuffer.wrap([1, 2] as byte[]))]).capacity() == 6 + } + + def 'position should be 0'() { + expect: + new CompositeByteBuf([new ByteBufNIO(ByteBuffer.wrap([1, 2, 3, 4] as byte[]))]).position() == 0 + } + + def 'position should be set if in range'() { + given: + def buf = new CompositeByteBuf([new ByteBufNIO(ByteBuffer.wrap([1, 2, 3] as byte[]))]) + + when: + buf.position(0) + + then: + buf.position() == 0 + + when: + buf.position(1) + + then: + buf.position() == 1 + + when: + buf.position(2) + + then: + buf.position() == 2 + + when: + buf.position(3) + + then: + buf.position() == 3 + } + + def 'position should throw if out of range'() { + given: + def buf = new CompositeByteBuf([new ByteBufNIO(ByteBuffer.wrap([1, 2, 3] as byte[]))]) + + when: + buf.position(-1) + + then: + thrown(IndexOutOfBoundsException) + + when: + buf.position(4) + + then: + thrown(IndexOutOfBoundsException) + + and: + buf.limit(2) + + when: + buf.position(3) + + then: + thrown(IndexOutOfBoundsException) + } + + def 'limit should be set if in range'() { + given: + def buf = new CompositeByteBuf([new ByteBufNIO(ByteBuffer.wrap([1, 2, 3] as byte[]))]) + + when: + buf.limit(0) + + then: + buf.limit() == 0 + + when: + buf.limit(1) + + then: + buf.limit() == 1 + + when: + buf.limit(2) + + then: + buf.limit() == 2 + + when: + buf.limit(3) + + then: + buf.limit() == 3 + } + + def 'limit should throw if out of range'() { + given: + def buf = new CompositeByteBuf([new ByteBufNIO(ByteBuffer.wrap([1, 2, 3] as byte[]))]) + + when: + buf.limit(-1) + + then: + thrown(IndexOutOfBoundsException) + + when: + buf.limit(4) + + then: + thrown(IndexOutOfBoundsException) + } + + def 'clear should reset position and limit'() { + given: + def buf = new CompositeByteBuf([new ByteBufNIO(ByteBuffer.wrap([1, 2, 3] as byte[]))]) + buf.limit(2) + buf.get() + + when: + buf.clear() + + then: + buf.position() == 0 + buf.limit() == 3 + } + + + def 'duplicate should copy all properties'() { + given: + def buf = new CompositeByteBuf([new ByteBufNIO(ByteBuffer.wrap([1, 2, 1, 2, 3, 4, 1, 2] as byte[]))]) + buf.limit(6) + buf.get() + buf.get() + + when: + def duplicate = buf.duplicate() + + then: + duplicate.position() == 2 + duplicate.limit() == 6 + duplicate.getInt() == 67305985 + !duplicate.hasRemaining() + buf.position() == 2 + } + + + def 'position, remaining, and hasRemaining should update as bytes are read'() { + when: + def buf = new CompositeByteBuf([new ByteBufNIO(ByteBuffer.wrap([1, 2, 3, 4] as byte[]))]) + + then: + buf.position() == 0 + buf.remaining() == 4 + buf.hasRemaining() + + when: + buf.get() + + then: + buf.position() == 1 + buf.remaining() == 3 + buf.hasRemaining() + + when: + buf.get() + + then: + buf.position() == 2 + buf.remaining() == 2 + buf.hasRemaining() + + when: + buf.get() + + then: + buf.position() == 3 + buf.remaining() == 1 + buf.hasRemaining() + + when: + buf.get() + + then: + buf.position() == 4 + buf.remaining() == 0 + !buf.hasRemaining() + } + + def 'absolute getInt should read little endian integer and preserve position'() { + given: + def byteBuffer = new ByteBufNIO(ByteBuffer.wrap([1, 2, 3, 4] as byte[])) + def buf = new CompositeByteBuf([byteBuffer]) + + when: + def i = buf.getInt(0) + + then: + i == 67305985 + buf.position() == 0 + byteBuffer.position() == 0 + } + + def 'absolute getInt should read little endian integer when integer is split accross buffers'() { + given: + def byteBufferOne = new ByteBufNIO(ByteBuffer.wrap([1, 2] as byte[])) + def byteBufferTwo = new ByteBufNIO(ByteBuffer.wrap([3, 4] as byte[])) + def buf = new CompositeByteBuf([byteBufferOne, byteBufferTwo]) + + when: + def i = buf.getInt(0) + + then: + i == 67305985 + buf.position() == 0 + byteBufferOne.position() == 0 + byteBufferTwo.position() == 0 + } + + def 'relative getInt should read little endian integer and move position'() { + given: + def byteBuffer = new ByteBufNIO(ByteBuffer.wrap([1, 2, 3, 4] as byte[])) + def buf = new CompositeByteBuf([byteBuffer]) + + when: + def i = buf.getInt() + + then: + i == 67305985 + buf.position() == 4 + byteBuffer.position() == 0 + } + + def 'absolute getLong should read little endian long and preserve position'() { + given: + def byteBuffer = new ByteBufNIO(ByteBuffer.wrap([1, 2, 3, 4, 5, 6, 7, 8] as byte[])) + def buf = new CompositeByteBuf([byteBuffer]) + + when: + def l = buf.getLong(0) + + then: + l == 578437695752307201L + buf.position() == 0 + byteBuffer.position() == 0 + } + + def 'absolute getLong should read little endian long when double is split accross buffers'() { + given: + def byteBufferOne = new ByteBufNIO(ByteBuffer.wrap([1, 2] as byte[])) + def byteBufferTwo = new ByteBufNIO(ByteBuffer.wrap([3, 4] as byte[])) + def byteBufferThree = new ByteBufNIO(ByteBuffer.wrap([5, 6] as byte[])) + def byteBufferFour = new ByteBufNIO(ByteBuffer.wrap([7, 8] as byte[])) + def buf = new CompositeByteBuf([byteBufferOne, byteBufferTwo, byteBufferThree, byteBufferFour]) + + when: + def l = buf.getLong(0) + + then: + l == 578437695752307201L + buf.position() == 0 + byteBufferOne.position() == 0 + byteBufferTwo.position() == 0 + } + + def 'relative getLong should read little endian long and move position'() { + given: + def byteBuffer = new ByteBufNIO(ByteBuffer.wrap([1, 2, 3, 4, 5, 6, 7, 8] as byte[])) + def buf = new CompositeByteBuf([byteBuffer]) + + when: + def l = buf.getLong() + + then: + l == 578437695752307201L + buf.position() == 8 + byteBuffer.position() == 0 + } + + def 'absolute getDouble should read little endian double and preserve position'() { + given: + def byteBuffer = new ByteBufNIO(ByteBuffer.wrap([1, 2, 3, 4, 5, 6, 7, 8] as byte[])) + def buf = new CompositeByteBuf([byteBuffer]) + + when: + def d = buf.getDouble(0) + + then: + d == 5.447603722011605E-270 as double + buf.position() == 0 + byteBuffer.position() == 0 + } + + def 'relative getDouble should read little endian double and move position'() { + given: + def byteBuffer = new ByteBufNIO(ByteBuffer.wrap([1, 2, 3, 4, 5, 6, 7, 8] as byte[])) + def buf = new CompositeByteBuf([byteBuffer]) + + when: + def d = buf.getDouble() + + then: + d == 5.447603722011605E-270 as double + buf.position() == 8 + byteBuffer.position() == 0 + } + + def 'absolute bulk get should read bytes and preserve position'() { + given: + def byteBuffer = new ByteBufNIO(ByteBuffer.wrap([1, 2, 3, 4] as byte[])) + def buf = new CompositeByteBuf([byteBuffer]) + + when: + def bytes = new byte[4] + buf.get(0, bytes) + + then: + bytes == [1, 2, 3, 4] as byte[] + buf.position() == 0 + byteBuffer.position() == 0 + } + + def 'absolute bulk get should read bytes when split across buffers'() { + given: + def byteBufferOne = new ByteBufNIO(ByteBuffer.wrap([1] as byte[])) + def byteBufferTwo = new ByteBufNIO(ByteBuffer.wrap([2, 3] as byte[])) + def byteBufferThree = new ByteBufNIO(ByteBuffer.wrap([4, 5, 6] as byte[])) + def byteBufferFour = new ByteBufNIO(ByteBuffer.wrap([7, 8, 9, 10] as byte[])) + def byteBufferFive = new ByteBufNIO(ByteBuffer.wrap([11] as byte[])) + def byteBufferSix = new ByteBufNIO(ByteBuffer.wrap([12] as byte[])) + def buf = new CompositeByteBuf([byteBufferOne, byteBufferTwo, byteBufferThree, byteBufferFour, + byteBufferFive, byteBufferSix]) + + when: + def bytes = new byte[16] + buf.get(2, bytes, 4, 9) + + then: + bytes == [0, 0, 0, 0, 3, 4, 5, 6, 7, 8, 9, 10, 11, 0, 0, 0] as byte[] + buf.position() == 0 + } + + def 'relative bulk get should read bytes and move position'() { + given: + def byteBuffer = new ByteBufNIO(ByteBuffer.wrap([1, 2, 3, 4, 5, 6, 7, 8] as byte[])) + def buf = new CompositeByteBuf([byteBuffer]) + + when: + def bytes = new byte[4] + buf.get(bytes) + + then: + bytes == [1, 2, 3, 4] as byte[] + buf.position() == 4 + byteBuffer.position() == 0 + + when: + bytes = new byte[8] + buf.get(bytes, 4, 3) + + then: + bytes == [0, 0, 0, 0, 5, 6, 7, 0] as byte[] + buf.position() == 7 + byteBuffer.position() == 0 + } + + def 'should get as NIO ByteBuffer'() { + given: + def buf = new CompositeByteBuf([new ByteBufNIO(ByteBuffer.wrap([1, 2, 3, 4, 5, 6, 7, 8] as byte[]))]) + + when: + buf.position(1).limit(5) + def nio = buf.asNIO() + + then: + nio.position() == 1 + nio.limit(5) + def bytes = new byte[4] + nio.get(bytes) + bytes == [2, 3, 4, 5] as byte[] + } + + def 'should get as NIO ByteBuffer with multiple buffers'() { + given: + def buf = new CompositeByteBuf([new ByteBufNIO(ByteBuffer.wrap([1, 2] as byte[])), + new ByteBufNIO(ByteBuffer.wrap([3, 4, 5] as byte[])), + new ByteBufNIO(ByteBuffer.wrap([6, 7, 8, 9] as byte[]))]) + + when: + buf.position(1).limit(6) + def nio = buf.asNIO() + + then: + nio.position() == 0 + nio.limit(5) + def bytes = new byte[5] + nio.get(bytes) + bytes == [2, 3, 4, 5, 6] as byte[] + } + + def 'should throw IndexOutOfBoundsException if reading out of bounds'() { + given: + def buf = new CompositeByteBuf([new ByteBufNIO(ByteBuffer.wrap([1, 2, 3, 4] as byte[]))]) + buf.position(4) + + when: + buf.get() + + then: + thrown(IndexOutOfBoundsException) + + when: + buf.position(1) + buf.getInt() + + then: + thrown(IndexOutOfBoundsException) + + when: + buf.position(0) + buf.get(new byte[2], 1, 2) + + then: + thrown(IndexOutOfBoundsException) + } + + def 'should throw IllegalStateException if buffer is closed'() { + given: + def buf = new CompositeByteBuf([new ByteBufNIO(ByteBuffer.wrap([1, 2, 3, 4] as byte[]))]) + buf.release() + + when: + buf.get() + + then: + thrown(IllegalStateException) + } +} diff --git a/driver-core/src/test/unit/com/mongodb/internal/connection/ConcurrentPoolTest.java b/driver-core/src/test/unit/com/mongodb/internal/connection/ConcurrentPoolTest.java new file mode 100644 index 00000000000..b9b2499679f --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/internal/connection/ConcurrentPoolTest.java @@ -0,0 +1,278 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection; + +import com.mongodb.MongoException; +import com.mongodb.MongoTimeoutException; +import org.junit.Test; + +import java.io.Closeable; +import java.util.function.Consumer; + +import static java.util.concurrent.TimeUnit.MILLISECONDS; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +public class ConcurrentPoolTest { + private ConcurrentPool pool; + + @Test + public void testThatGetDecreasesAvailability() { + pool = new ConcurrentPool<>(3, new TestItemFactory()); + + pool.get(); + pool.get(); + pool.get(); + try { + pool.get(1, MILLISECONDS); + fail(); + } catch (MongoTimeoutException e) { + // all good + } + } + + @Test + public void testThatReleaseIncreasesAvailability() { + pool = new ConcurrentPool<>(3, new TestItemFactory()); + + pool.get(); + pool.get(); + pool.release(pool.get()); + assertNotNull(pool.get()); + } + + @Test + public void testThatGetReleasesPermitIfCreateFails() { + pool = new ConcurrentPool<>(1, new TestItemFactory(true)); + + try { + pool.get(); + fail(); + } catch (MongoException e) { + // expected + } + + assertTrue(pool.acquirePermit(-1, MILLISECONDS)); + } + + @Test + public void testInUseCount() { + pool = new ConcurrentPool<>(3, new TestItemFactory()); + + assertEquals(0, pool.getInUseCount()); + TestCloseable closeable = pool.get(); + assertEquals(1, pool.getInUseCount()); + pool.release(closeable); + assertEquals(0, pool.getInUseCount()); + } + + @Test + public void testAvailableCount() { + pool = new ConcurrentPool<>(3, new TestItemFactory()); + + assertEquals(0, pool.getAvailableCount()); + TestCloseable closeable = pool.get(); + assertEquals(0, pool.getAvailableCount()); + pool.release(closeable); + assertEquals(1, pool.getAvailableCount()); + closeable = pool.get(); + pool.release(closeable, true); + assertEquals(0, pool.getAvailableCount()); + } + + @Test + public void testAddItemToPoolOnRelease() { + pool = new ConcurrentPool<>(3, new TestItemFactory()); + + TestCloseable closeable = pool.get(); + pool.release(closeable, false); + assertFalse(closeable.isClosed()); + } + + @Test + public void testCloseItemOnReleaseWithDiscard() { + pool = new ConcurrentPool<>(3, new TestItemFactory()); + + TestCloseable closeable = pool.get(); + pool.release(closeable, true); + assertTrue(closeable.isClosed()); + } + + @Test + public void testCloseAllItemsAfterPoolClosed() { + pool = new ConcurrentPool<>(3, new TestItemFactory()); + + TestCloseable c1 = pool.get(); + TestCloseable c2 = pool.get(); + pool.release(c1); + pool.release(c2); + pool.close(); + assertTrue(c1.isClosed()); + assertTrue(c2.isClosed()); + } + + @Test + public void testCloseItemOnReleaseAfterPoolClosed() { + pool = new ConcurrentPool<>(3, new TestItemFactory()); + + TestCloseable c1 = pool.get(); + pool.close(); + pool.release(c1); + assertTrue(c1.isClosed()); + } + + @Test + public void testEnsureMinSize() { + pool = new ConcurrentPool<>(3, new TestItemFactory()); + Consumer initAndRelease = connection -> pool.release(connection); + pool.ensureMinSize(0, initAndRelease); + assertEquals(0, pool.getAvailableCount()); + + pool.ensureMinSize(1, initAndRelease); + assertEquals(1, pool.getAvailableCount()); + + pool.ensureMinSize(1, initAndRelease); + assertEquals(1, pool.getAvailableCount()); + + pool.get(); + pool.ensureMinSize(1, initAndRelease); + assertEquals(0, pool.getAvailableCount()); + + pool.ensureMinSize(4, initAndRelease); + assertEquals(3, pool.getAvailableCount()); + } + + @Test + public void whenEnsuringMinSizeShouldNotInitializePooledItemIfNotRequested() { + pool = new ConcurrentPool<>(3, new TestItemFactory()); + + pool.ensureMinSize(1, pool::release); + assertFalse(pool.get().isInitialized()); + } + + @Test + public void whenEnsuringMinSizeShouldInitializePooledItemIfRequested() { + pool = new ConcurrentPool<>(3, new TestItemFactory()); + + pool.ensureMinSize(1, connection -> { + connection.initialized = true; + pool.release(connection); + }); + assertTrue(pool.get().isInitialized()); + } + + @Test + public void testThatEnsuringMinSizeReleasesPermitIfCreateFails() { + pool = new ConcurrentPool<>(1, new TestItemFactory(true)); + + try { + pool.ensureMinSize(1, ignore -> fail()); + fail(); + } catch (MongoException e) { + // expected + } + + assertTrue(pool.acquirePermit(-1, MILLISECONDS)); + } + + @Test + public void testPrune() { + pool = new ConcurrentPool<>(5, new TestItemFactory()); + + TestCloseable t1 = pool.get(); + TestCloseable t2 = pool.get(); + TestCloseable t3 = pool.get(); + TestCloseable t4 = pool.get(); + t1.shouldPrune = true; + t2.shouldPrune = false; + t3.shouldPrune = true; + t4.shouldPrune = false; + + pool.release(t1); + pool.release(t2); + pool.release(t3); + pool.release(t4); + + pool.prune(); + + assertEquals(2, pool.getAvailableCount()); + assertEquals(0, pool.getInUseCount()); + assertTrue(t1.isClosed()); + assertFalse(t2.isClosed()); + assertTrue(t3.isClosed()); + assertFalse(t4.isClosed()); + } + + class TestItemFactory implements ConcurrentPool.ItemFactory { + private final boolean shouldThrowOnCreate; + + TestItemFactory() { + this(false); + } + + TestItemFactory(final boolean shouldThrowOnCreate) { + this.shouldThrowOnCreate = shouldThrowOnCreate; + } + + @Override + public TestCloseable create() { + if (shouldThrowOnCreate) { + throw new MongoException("This is a journey"); + } + return new TestCloseable(); + } + + @Override + public void close(final TestCloseable closeable) { + closeable.close(); + } + + @Override + public boolean shouldPrune(final TestCloseable testCloseable) { + return testCloseable.shouldPrune(); + } + } + + static class TestCloseable implements Closeable { + private boolean closed; + private boolean shouldPrune; + private boolean initialized; + + TestCloseable() { + } + + @Override + public void close() { + closed = true; + } + + boolean isClosed() { + return closed; + } + + public boolean isInitialized() { + return initialized; + } + + public boolean shouldPrune() { + return shouldPrune; + } + } +} diff --git a/driver-core/src/test/unit/com/mongodb/internal/connection/ConnectionPoolAsyncTest.java b/driver-core/src/test/unit/com/mongodb/internal/connection/ConnectionPoolAsyncTest.java new file mode 100644 index 00000000000..b3e1693ae03 --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/internal/connection/ConnectionPoolAsyncTest.java @@ -0,0 +1,89 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection; + +import com.mongodb.async.FutureResultCallback; +import com.mongodb.connection.SocketSettings; +import com.mongodb.connection.SslSettings; +import com.mongodb.internal.diagnostics.logging.Logger; +import com.mongodb.internal.diagnostics.logging.Loggers; +import org.bson.BsonDocument; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; + +import java.util.concurrent.Callable; + +// Implementation of +// https://github.com/mongodb/specifications/blob/master/source/connection-monitoring-and-pooling/connection-monitoring-and-pooling.md +// specification tests +@RunWith(Parameterized.class) +public class ConnectionPoolAsyncTest extends AbstractConnectionPoolTest { + private static final Logger LOGGER = Loggers.getLogger(ConnectionPoolAsyncTest.class.getSimpleName()); + + public ConnectionPoolAsyncTest(final String fileName, final String description, final BsonDocument definition, final boolean skipTest) { + super(fileName, description, definition, skipTest); + } + + @Override + protected Callable createCallable(final BsonDocument operation) { + String name = operation.getString("name").getValue(); + if (name.equals("checkOut")) { + FutureResultCallback callback = new FutureResultCallback<>(); + return () -> { + try { + getPool().getAsync(createOperationContext(), (connection, t) -> { + if (t != null) { + callback.onResult(null, t); + } else { + if (operation.containsKey("label")) { + getConnectionMap().put(operation.getString("label").getValue(), connection); + } + callback.onResult(connection, null); + } + }); + callback.get(); + return null; + } catch (Exception e) { + LOGGER.error("", e); + return e; + } + }; + } else if (name.equals("checkIn")) { + return () -> { + try { + InternalConnection connection = getConnectionMap().get(operation.getString("connection").getValue()); + connection.close(); + return null; + } catch (Exception e) { + return e; + } + }; + } else { + throw new UnsupportedOperationException("Operation " + name + " not supported"); + } + } + + @Override + protected StreamFactory createStreamFactory(final SocketSettings socketSettings, final SslSettings sslSettings) { + if (sslSettings.isEnabled()) { + return new TlsChannelStreamFactoryFactory(new DefaultInetAddressResolver()).create(socketSettings, sslSettings); + } else { + return new AsynchronousSocketChannelStreamFactory(new DefaultInetAddressResolver(), socketSettings, sslSettings); + } + + } +} diff --git a/driver-core/src/test/unit/com/mongodb/internal/connection/ConnectionPoolTest.java b/driver-core/src/test/unit/com/mongodb/internal/connection/ConnectionPoolTest.java new file mode 100644 index 00000000000..3fd06d9dcca --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/internal/connection/ConnectionPoolTest.java @@ -0,0 +1,75 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection; + +import com.mongodb.connection.SocketSettings; +import com.mongodb.connection.SslSettings; +import com.mongodb.internal.diagnostics.logging.Logger; +import com.mongodb.internal.diagnostics.logging.Loggers; +import org.bson.BsonDocument; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; + +import java.util.concurrent.Callable; + +// Implementation of +// https://github.com/mongodb/specifications/blob/master/source/connection-monitoring-and-pooling/connection-monitoring-and-pooling.md +// specification tests +@RunWith(Parameterized.class) +public class ConnectionPoolTest extends AbstractConnectionPoolTest { + private static final Logger LOGGER = Loggers.getLogger(ConnectionPoolTest.class.getSimpleName()); + + public ConnectionPoolTest(final String fileName, final String description, final BsonDocument definition, final boolean skipTest) { + super(fileName, description, definition, skipTest); + } + + @Override + protected Callable createCallable(final BsonDocument operation) { + String name = operation.getString("name").getValue(); + if (name.equals("checkOut")) { + return () -> { + try { + InternalConnection connection = getPool().get(createOperationContext()); + if (operation.containsKey("label")) { + getConnectionMap().put(operation.getString("label").getValue(), connection); + } + return null; + } catch (Exception e) { + LOGGER.error("", e); + return e; + } + }; + } else if (name.equals("checkIn")) { + return () -> { + try { + InternalConnection connection = getConnectionMap().get(operation.getString("connection").getValue()); + connection.close(); + return null; + } catch (Exception e) { + return e; + } + }; + } else { + throw new UnsupportedOperationException("Operation " + name + " not supported"); + } + } + + @Override + protected StreamFactory createStreamFactory(final SocketSettings socketSettings, final SslSettings sslSettings) { + return new SocketStreamFactory(new DefaultInetAddressResolver(), socketSettings, sslSettings); + } +} diff --git a/driver-core/src/test/unit/com/mongodb/internal/connection/DefaultClusterFactoryTest.java b/driver-core/src/test/unit/com/mongodb/internal/connection/DefaultClusterFactoryTest.java new file mode 100644 index 00000000000..8ecec2c7494 --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/internal/connection/DefaultClusterFactoryTest.java @@ -0,0 +1,145 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.internal.connection; + +import ch.qos.logback.classic.Level; +import ch.qos.logback.classic.Logger; +import ch.qos.logback.classic.LoggerContext; +import ch.qos.logback.classic.spi.ILoggingEvent; +import ch.qos.logback.core.read.ListAppender; +import com.mongodb.ConnectionString; +import com.mongodb.connection.ClusterSettings; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.Arguments; +import org.junit.jupiter.params.provider.MethodSource; +import org.slf4j.LoggerFactory; + +import java.util.List; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +class DefaultClusterFactoryTest { + private static final String EXPECTED_COSMOS_DB_MESSAGE = + "You appear to be connected to a CosmosDB cluster. For more information regarding " + + "feature compatibility and support please visit https://www.mongodb.com/supportability/cosmosdb"; + + private static final String EXPECTED_DOCUMENT_DB_MESSAGE = + "You appear to be connected to a DocumentDB cluster. For more information regarding " + + "feature compatibility and support please visit https://www.mongodb.com/supportability/documentdb"; + + private static final Logger LOGGER = (Logger) LoggerFactory.getLogger("org.mongodb.driver.client"); + private static final MemoryAppender MEMORY_APPENDER = new MemoryAppender(); + + @BeforeAll + public static void setUp() { + MEMORY_APPENDER.setContext((LoggerContext) LoggerFactory.getILoggerFactory()); + LOGGER.setLevel(Level.DEBUG); + LOGGER.addAppender(MEMORY_APPENDER); + MEMORY_APPENDER.start(); + } + + @AfterAll + public static void cleanUp() { + LOGGER.detachAppender(MEMORY_APPENDER); + } + + @AfterEach + public void reset() { + MEMORY_APPENDER.reset(); + } + + static Stream shouldLogAllegedClusterEnvironmentWhenNonGenuineHostsSpecified() { + return Stream.of( + Arguments.of("mongodb://a.MONGO.COSMOS.AZURE.COM:19555", EXPECTED_COSMOS_DB_MESSAGE), + Arguments.of("mongodb://a.mongo.cosmos.azure.com:19555", EXPECTED_COSMOS_DB_MESSAGE), + Arguments.of("mongodb://a.DOCDB-ELASTIC.AMAZONAWS.COM:27017/", EXPECTED_DOCUMENT_DB_MESSAGE), + Arguments.of("mongodb://a.docdb-elastic.amazonaws.com:27017/", EXPECTED_DOCUMENT_DB_MESSAGE), + Arguments.of("mongodb://a.DOCDB.AMAZONAWS.COM", EXPECTED_DOCUMENT_DB_MESSAGE), + Arguments.of("mongodb://a.docdb.amazonaws.com", EXPECTED_DOCUMENT_DB_MESSAGE), + + /* SRV matching */ + Arguments.of("mongodb+srv://A.MONGO.COSMOS.AZURE.COM", EXPECTED_COSMOS_DB_MESSAGE), + Arguments.of("mongodb+srv://a.mongo.cosmos.azure.com", EXPECTED_COSMOS_DB_MESSAGE), + Arguments.of("mongodb+srv://a.DOCDB.AMAZONAWS.COM/", EXPECTED_DOCUMENT_DB_MESSAGE), + Arguments.of("mongodb+srv://a.docdb.amazonaws.com/", EXPECTED_DOCUMENT_DB_MESSAGE), + Arguments.of("mongodb+srv://a.DOCDB-ELASTIC.AMAZONAWS.COM/", EXPECTED_DOCUMENT_DB_MESSAGE), + Arguments.of("mongodb+srv://a.docdb-elastic.amazonaws.com/", EXPECTED_DOCUMENT_DB_MESSAGE), + + /* Mixing genuine and nongenuine hosts (unlikely in practice) */ + Arguments.of("mongodb://a.example.com:27017,b.mongo.cosmos.azure.com:19555/", EXPECTED_COSMOS_DB_MESSAGE), + Arguments.of("mongodb://a.example.com:27017,b.docdb.amazonaws.com:27017/", EXPECTED_DOCUMENT_DB_MESSAGE), + Arguments.of("mongodb://a.example.com:27017,b.docdb-elastic.amazonaws.com:27017/", EXPECTED_DOCUMENT_DB_MESSAGE) + ); + } + + @ParameterizedTest + @MethodSource + void shouldLogAllegedClusterEnvironmentWhenNonGenuineHostsSpecified(final String connectionString, final String expectedLogMessage) { + //when + ClusterSettings clusterSettings = toClusterSettings(new ConnectionString(connectionString)); + new DefaultClusterFactory().detectAndLogClusterEnvironment(clusterSettings); + + //then + List loggedEvents = MEMORY_APPENDER.search(expectedLogMessage); + + Assertions.assertEquals(1, loggedEvents.size()); + Assertions.assertEquals(Level.INFO, loggedEvents.get(0).getLevel()); + + } + + static Stream shouldNotLogClusterEnvironmentWhenGenuineHostsSpecified() { + return Stream.of( + "mongodb://a.mongo.cosmos.azure.com.tld:19555", + "mongodb://a.docdb-elastic.amazonaws.com.t", + "mongodb+srv://a.example.com", + "mongodb+srv://a.mongodb.net/", + "mongodb+srv://a.mongo.cosmos.azure.com.tld/", + "mongodb+srv://a.docdb-elastic.amazonaws.com.tld/" + ); + } + + @ParameterizedTest + @MethodSource + void shouldNotLogClusterEnvironmentWhenGenuineHostsSpecified(final String connectionUrl) { + //when + ClusterSettings clusterSettings = toClusterSettings(new ConnectionString(connectionUrl)); + new DefaultClusterFactory().detectAndLogClusterEnvironment(clusterSettings); + + //then + Assertions.assertEquals(0, MEMORY_APPENDER.search(EXPECTED_COSMOS_DB_MESSAGE).size()); + Assertions.assertEquals(0, MEMORY_APPENDER.search(EXPECTED_DOCUMENT_DB_MESSAGE).size()); + } + + private static ClusterSettings toClusterSettings(final ConnectionString connectionUrl) { + return ClusterSettings.builder().applyConnectionString(connectionUrl).build(); + } + + public static class MemoryAppender extends ListAppender { + public void reset() { + this.list.clear(); + } + + public List search(final String message) { + return this.list.stream() + .filter(event -> event.getFormattedMessage().contains(message)) + .collect(Collectors.toList()); + } + } +} diff --git a/driver-core/src/test/unit/com/mongodb/internal/connection/DefaultConnectionPoolSpecification.groovy b/driver-core/src/test/unit/com/mongodb/internal/connection/DefaultConnectionPoolSpecification.groovy new file mode 100644 index 00000000000..b3e78d2dc54 --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/internal/connection/DefaultConnectionPoolSpecification.groovy @@ -0,0 +1,755 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection + +import com.mongodb.ClusterFixture +import com.mongodb.MongoConnectionPoolClearedException +import com.mongodb.MongoServerUnavailableException +import com.mongodb.MongoTimeoutException +import com.mongodb.ServerAddress +import com.mongodb.connection.ClusterId +import com.mongodb.connection.ConnectionDescription +import com.mongodb.connection.ConnectionId +import com.mongodb.connection.ServerId +import com.mongodb.event.ConnectionCheckOutFailedEvent +import com.mongodb.event.ConnectionPoolListener +import com.mongodb.internal.inject.EmptyProvider +import com.mongodb.internal.inject.SameObjectProvider +import com.mongodb.internal.logging.LogMessage +import com.mongodb.logging.TestLoggingInterceptor +import org.bson.types.ObjectId +import spock.lang.Specification +import spock.lang.Subject +import com.mongodb.spock.Slow + +import java.util.concurrent.CompletableFuture +import java.util.concurrent.CountDownLatch +import java.util.regex.Matcher +import java.util.regex.Pattern + +import static com.mongodb.ClusterFixture.OPERATION_CONTEXT +import static com.mongodb.ClusterFixture.OPERATION_CONTEXT_FACTORY +import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS +import static com.mongodb.ClusterFixture.createOperationContext +import static com.mongodb.connection.ConnectionPoolSettings.builder +import static java.util.concurrent.TimeUnit.MILLISECONDS +import static java.util.concurrent.TimeUnit.MINUTES + +class DefaultConnectionPoolSpecification extends Specification { + private static final ServerAddress SERVER_ADDRESS = new ServerAddress() + private static final ServerId SERVER_ID = new ServerId(new ClusterId("test"), SERVER_ADDRESS) + + private final TestInternalConnectionFactory connectionFactory = Spy(TestInternalConnectionFactory) + private TestLoggingInterceptor interceptor; + + @Subject + private DefaultConnectionPool pool + + def setup() { + def filterConfig = [:] + filterConfig[LogMessage.Component.CONNECTION] = LogMessage.Level.DEBUG + interceptor = new TestLoggingInterceptor("test", + new TestLoggingInterceptor.LoggingFilter(filterConfig)) + } + + def cleanup() { + interceptor.close(); + pool.close() + } + + def 'should get non null connection'() throws InterruptedException { + given: + pool = new DefaultConnectionPool(SERVER_ID, connectionFactory, + builder().maxSize(1).build(), mockSdamProvider(), OPERATION_CONTEXT_FACTORY) + pool.ready() + + expect: + pool.get(OPERATION_CONTEXT) != null + } + + def 'should reuse released connection'() throws InterruptedException { + given: + pool = new DefaultConnectionPool(SERVER_ID, connectionFactory, + builder().maxSize(1).build(), mockSdamProvider(), OPERATION_CONTEXT_FACTORY) + pool.ready() + + when: + pool.get(OPERATION_CONTEXT).close() + pool.get(OPERATION_CONTEXT) + + then: + 1 * connectionFactory.create(SERVER_ID, _) + } + + def 'should release a connection back into the pool on close, not close the underlying connection'() throws InterruptedException { + given: + pool = new DefaultConnectionPool(SERVER_ID, connectionFactory, + builder().maxSize(1).build(), mockSdamProvider(), OPERATION_CONTEXT_FACTORY) + pool.ready() + + when: + pool.get(OPERATION_CONTEXT).close() + + then: + !connectionFactory.getCreatedConnections().get(0).isClosed() + } + + def 'should throw if pool is exhausted'() throws InterruptedException { + given: + pool = new DefaultConnectionPool(SERVER_ID, connectionFactory, + builder().maxSize(1).build(), mockSdamProvider(), OPERATION_CONTEXT_FACTORY) + pool.ready() + + when: + def first = pool.get(createOperationContext(TIMEOUT_SETTINGS.withMaxWaitTimeMS(50))) + + then: + first != null + + when: + pool.get(createOperationContext(TIMEOUT_SETTINGS.withMaxWaitTimeMS(50))) + + then: + thrown(MongoTimeoutException) + } + + def 'should throw on timeout'() throws InterruptedException { + given: + pool = new DefaultConnectionPool(SERVER_ID, connectionFactory, + builder().maxSize(1).build(), mockSdamProvider(), OPERATION_CONTEXT_FACTORY) + pool.ready() + + def timeoutSettings = TIMEOUT_SETTINGS.withMaxWaitTimeMS(50) + pool.get(createOperationContext(timeoutSettings)) + + when: + TimeoutTrackingConnectionGetter connectionGetter = new TimeoutTrackingConnectionGetter(pool, timeoutSettings) + new Thread(connectionGetter).start() + + connectionGetter.latch.await() + + then: + connectionGetter.gotTimeout + } + + def 'should have size of 0 with default settings'() { + given: + pool = new DefaultConnectionPool(SERVER_ID, connectionFactory, + builder().maxSize(10).maintenanceInitialDelay(5, MINUTES).build(), mockSdamProvider(), OPERATION_CONTEXT_FACTORY) + pool.ready() + + when: + pool.doMaintenance() + + then: + connectionFactory.createdConnections.size() == 0 + } + + @Slow + def 'should ensure min pool size after maintenance task runs'() { + given: + pool = new DefaultConnectionPool(SERVER_ID, connectionFactory, + builder().maxSize(10).minSize(5).maintenanceInitialDelay(5, MINUTES).build(), + mockSdamProvider(), OPERATION_CONTEXT_FACTORY) + pool.ready() + + when: 'the maintenance tasks runs' + pool.doMaintenance() + //not cool - but we have no way of being notified that the maintenance task has finished + Thread.sleep(500) + + then: 'it ensures the minimum size of the pool' + connectionFactory.createdConnections.size() == 5 + connectionFactory.createdConnections.get(0).opened() // if the first one is opened, they all should be + + when: 'the pool is invalidated and the maintenance tasks runs' + pool.invalidate(null) + pool.ready() + pool.doMaintenance() + //not cool - but we have no way of being notified that the maintenance task has finished + Thread.sleep(500) + + then: 'it prunes the existing connections and again ensures the minimum size of the pool' + connectionFactory.createdConnections.size() == 10 + connectionFactory.createdConnections.get(0).opened() // if the first one is opened, they all should be + } + + def 'should invoke connection pool created event'() { + given: + def listener = Mock(ConnectionPoolListener) + def settings = builder().maxSize(10).minSize(5).addConnectionPoolListener(listener).build() + + when: + pool = new DefaultConnectionPool(SERVER_ID, connectionFactory, settings, mockSdamProvider(), OPERATION_CONTEXT_FACTORY) + + then: + 1 * listener.connectionPoolCreated { it.serverId == SERVER_ID && it.settings == settings } + } + + def 'should invoke connection pool closed event'() { + given: + def listener = Mock(ConnectionPoolListener) + def settings = builder().maxSize(10).minSize(5).addConnectionPoolListener(listener).build() + pool = new DefaultConnectionPool(SERVER_ID, connectionFactory, settings, mockSdamProvider(), OPERATION_CONTEXT_FACTORY) + when: + pool.close() + + then: + 1 * listener.connectionPoolClosed { it.serverId == SERVER_ID } + } + + def 'should fire connection created to pool event'() { + given: + def listener = Mock(ConnectionPoolListener) + pool = new DefaultConnectionPool(SERVER_ID, connectionFactory, builder().maxSize(10) + .addConnectionPoolListener(listener).build(), mockSdamProvider(), OPERATION_CONTEXT_FACTORY) + + when: + pool.ready() + pool.get(OPERATION_CONTEXT) + + then: + 1 * listener.connectionCreated { it.connectionId.serverId == SERVER_ID } + 1 * listener.connectionReady { it.connectionId.serverId == SERVER_ID } + } + + def 'should log connection pool events'() { + given: + def listener = Mock(ConnectionPoolListener) + def settings = builder().maxSize(10).minSize(5).addConnectionPoolListener(listener).build() + def connection = Mock(InternalConnection) + def connectionDescription = Mock(ConnectionDescription) + def driverConnectionId = 1 + def id = new ConnectionId(SERVER_ID, driverConnectionId, 1); + connectionFactory.create(SERVER_ID, _) >> connection + connectionDescription.getConnectionId() >> id + connection.getDescription() >> connectionDescription + connection.opened() >> false + + when: 'connection pool is created' + pool = new DefaultConnectionPool(SERVER_ID, connectionFactory, settings, mockSdamProvider(), OPERATION_CONTEXT_FACTORY) + then: '"pool is created" log message is emitted' + def poolCreatedLogMessage = getMessage("Connection pool created") + "Connection pool created for ${SERVER_ADDRESS.getHost()}:${SERVER_ADDRESS.getPort()} using options " + + "maxIdleTimeMS=${settings.getMaxConnectionIdleTime(MILLISECONDS)}, " + + "minPoolSize=${settings.getMinSize()}, maxPoolSize=${settings.getMaxSize()}, " + + "maxConnecting=${settings.getMaxConnecting()}, " + + "waitQueueTimeoutMS=${settings.getMaxWaitTime(MILLISECONDS)}" == poolCreatedLogMessage + + when: 'connection pool is ready' + pool.ready() + then: '"pool is ready" log message is emitted' + def poolReadyLogMessage = getMessage("Connection pool ready") + "Connection pool ready for ${SERVER_ADDRESS.getHost()}:${SERVER_ADDRESS.getPort()}" == poolReadyLogMessage + + when: 'connection is created' + pool.get(OPERATION_CONTEXT) + then: '"connection created" and "connection ready" log messages are emitted' + def createdLogMessage = getMessage( "Connection created") + def readyLogMessage = getMessage("Connection ready") + "Connection created: address=${SERVER_ADDRESS.getHost()}:${SERVER_ADDRESS.getPort()}, " + + "driver-generated ID=${driverConnectionId}" == createdLogMessage + readyLogMessage ==~ "Connection ready: address=${quoteHostnameOrIp(SERVER_ADDRESS.getHost())}:${SERVER_ADDRESS.getPort()}" + + ", driver-generated ID=${driverConnectionId}, established in=\\d+ ms" + + when: 'connection is released back into the pool on close' + pool.get(OPERATION_CONTEXT).close() + then: '"connection check out" and "connection checked in" log messages are emitted' + def checkoutStartedMessage = getMessage("Connection checkout started") + def connectionCheckedInMessage = getMessage("Connection checked in") + def checkedOutLogMessage = getMessage("Connection checked out") + checkedOutLogMessage ==~ "Connection checked out: " + + "address=${quoteHostnameOrIp(SERVER_ADDRESS.getHost())}:${SERVER_ADDRESS.getPort()}, " + + "driver-generated ID=${driverConnectionId}, duration=\\d+ ms" + "Checkout started for connection to ${SERVER_ADDRESS.getHost()}:${SERVER_ADDRESS.getPort()}" == checkoutStartedMessage + "Connection checked in: address=${SERVER_ADDRESS.getHost()}:${SERVER_ADDRESS.getPort()}, " + + "driver-generated ID=${driverConnectionId}" == connectionCheckedInMessage + + when: 'connection pool is cleared' + pool.invalidate(null) + then: '"connection pool cleared" log message is emitted' + def poolClearedLogMessage = getMessage("Connection pool cleared") + "Connection pool for ${SERVER_ADDRESS.getHost()}:${SERVER_ADDRESS.getPort()} cleared" == poolClearedLogMessage + + when: 'the maintenance tasks runs' + pool.doMaintenance() + //not cool - but we have no way of being notified that the maintenance task has finished + Thread.sleep(500) + then: '"connection became stale" log message is emitted' + def unstructuredMessage = getMessage("Connection closed") + "Connection closed: address=${SERVER_ADDRESS.getHost()}:${SERVER_ADDRESS.getPort()}, " + + "driver-generated ID=1. " + + "Reason: Connection became stale because the pool was cleared." == unstructuredMessage + + when: 'pool is closed' + pool.close() + then: '"connection pool closed" log message is emitted' + def poolClosedLogMessage = getMessage("Connection pool closed") + "Connection pool closed for ${SERVER_ADDRESS.getHost()}:${SERVER_ADDRESS.getPort()}" == poolClosedLogMessage + + when: 'connection checked out on closed pool' + pool.get(OPERATION_CONTEXT) + then: + thrown(MongoServerUnavailableException) + def connectionCheckoutFailedInMessage = getMessage("Connection checkout failed") + connectionCheckoutFailedInMessage ==~ "Checkout failed for connection to" + + " ${quoteHostnameOrIp(SERVER_ADDRESS.getHost())}:${SERVER_ADDRESS.getPort()}." + + " Reason: Connection pool was closed. Duration: \\d+ ms" + } + + private String getMessage(messageId) { + interceptor.getMessages() + .find { + it.getMessageId() == messageId + } + .toUnstructuredLogMessage().interpolate() + } + + + def 'should log on checkout timeout fail'() throws InterruptedException { + given: + pool = new DefaultConnectionPool(SERVER_ID, connectionFactory, + builder().maxSize(1).build(), mockSdamProvider(), OPERATION_CONTEXT_FACTORY) + pool.ready() + + def timeoutSettings = ClusterFixture.TIMEOUT_SETTINGS.withMaxWaitTimeMS(50) + pool.get(createOperationContext(timeoutSettings)) + + when: + TimeoutTrackingConnectionGetter connectionGetter = new TimeoutTrackingConnectionGetter(pool, timeoutSettings) + new Thread(connectionGetter).start() + connectionGetter.latch.await() + + then: + connectionGetter.gotTimeout + def unstructuredMessage = getMessage("Connection checkout failed") + unstructuredMessage ==~ "Checkout failed for connection to" + + " ${quoteHostnameOrIp(SERVER_ADDRESS.getHost())}:${SERVER_ADDRESS.getPort()}." + + " Reason: Wait queue timeout elapsed without a connection becoming available." + + " Duration: \\d+ ms" + } + + def 'should log on connection become idle'() { + given: + pool = new DefaultConnectionPool(SERVER_ID, connectionFactory, + builder().maxSize(2).minSize(0).maxConnectionIdleTime(1, MILLISECONDS).build(), + mockSdamProvider(), OPERATION_CONTEXT_FACTORY) + + when: + pool.ready() + pool.get(OPERATION_CONTEXT).close() + //not cool - but we have no way of waiting for connection to become idle + Thread.sleep(500) + pool.close(); + + then: + def unstructuredMessage = getMessage("Connection closed") + "Connection closed: address=${SERVER_ADDRESS.getHost()}:${SERVER_ADDRESS.getPort()}," + + " driver-generated ID=1." + + " Reason: Connection has been available but unused for longer than the configured max " + + "idle time." == unstructuredMessage + } + + + def 'should log on connection pool cleared in load-balanced mode'() { + given: + def serviceId = new ObjectId() + pool = new DefaultConnectionPool(SERVER_ID, connectionFactory, + builder().maxSize(1) + .minSize(0) + .maxConnectionIdleTime(1, MILLISECONDS) + .build(), EmptyProvider.instance(), OPERATION_CONTEXT_FACTORY) + + when: + pool.ready() + pool.invalidate(serviceId, 1); + + then: + def poolClearedLogMessage = getMessage("Connection pool cleared") + "Connection pool for ${SERVER_ADDRESS.getHost()}:${SERVER_ADDRESS.getPort()} " + + "cleared for serviceId ${serviceId.toHexString()}" == poolClearedLogMessage + } + + def 'should log connection checkout failed with Reason.CONNECTION_ERROR if fails to open a connection'() { + given: + def listener = Mock(ConnectionPoolListener) + def connection = Mock(InternalConnection) + connection.getDescription() >> new ConnectionDescription(SERVER_ID) + connection.opened() >> false + connection.open(OPERATION_CONTEXT) >> { throw new UncheckedIOException('expected failure', new IOException()) } + connectionFactory.create(SERVER_ID, _) >> connection + pool = new DefaultConnectionPool(SERVER_ID, connectionFactory, builder().addConnectionPoolListener(listener).build(), + mockSdamProvider(), OPERATION_CONTEXT_FACTORY) + pool.ready() + + when: + try { + pool.get(OPERATION_CONTEXT) + } catch (UncheckedIOException e) { + if ('expected failure' != e.getMessage()) { + throw e + } + } + + then: + def unstructuredMessage = getMessage("Connection checkout failed" ) + unstructuredMessage ==~ "Checkout failed for connection to" + + " ${quoteHostnameOrIp(SERVER_ADDRESS.getHost())}:${SERVER_ADDRESS.getPort()}." + + " Reason: An error occurred while trying to establish a new connection." + + " Error: java.io.UncheckedIOException: expected failure." + + " Duration: \\d+ ms" + } + + def 'should fire asynchronous connection created to pool event'() { + given: + def listener = Mock(ConnectionPoolListener) + pool = new DefaultConnectionPool(SERVER_ID, connectionFactory, builder().maxSize(10) + .addConnectionPoolListener(listener).build(), mockSdamProvider(), OPERATION_CONTEXT_FACTORY) + + when: + pool.ready() + selectConnectionAsyncAndGet(pool) + + then: + 1 * listener.connectionCreated { it.connectionId.serverId == SERVER_ID } + 1 * listener.connectionReady { it.connectionId.serverId == SERVER_ID } + } + + def 'should fire connection removed from pool event'() { + given: + def listener = Mock(ConnectionPoolListener) + pool = new DefaultConnectionPool(SERVER_ID, connectionFactory, builder().maxSize(10) + .addConnectionPoolListener(listener).build(), mockSdamProvider(), OPERATION_CONTEXT_FACTORY) + pool.ready() + def connection = pool.get(OPERATION_CONTEXT) + connection.close() + + when: + pool.close() + + then: + 1 * listener.connectionClosed { it.connectionId.serverId == SERVER_ID } + } + + def 'should fire asynchronous connection removed from pool event'() { + given: + def listener = Mock(ConnectionPoolListener) + pool = new DefaultConnectionPool(SERVER_ID, connectionFactory, builder().maxSize(10) + .addConnectionPoolListener(listener).build(), mockSdamProvider(), OPERATION_CONTEXT_FACTORY) + pool.ready() + def connection = selectConnectionAsyncAndGet(pool) + connection.close() + + when: + pool.close() + + then: + 1 * listener.connectionClosed { it.connectionId.serverId == SERVER_ID } + } + + def 'should fire connection pool events on check out and check in'() { + given: + def listener = Mock(ConnectionPoolListener) + pool = new DefaultConnectionPool(SERVER_ID, connectionFactory, builder().maxSize(1) + .addConnectionPoolListener(listener).build(), mockSdamProvider(), OPERATION_CONTEXT_FACTORY) + pool.ready() + def connection = pool.get(OPERATION_CONTEXT) + connection.close() + + when: + connection = pool.get(OPERATION_CONTEXT) + + then: + 1 * listener.connectionCheckedOut { it.connectionId.serverId == SERVER_ID } + + when: + connection.close() + + then: + 1 * listener.connectionCheckedIn { it.connectionId.serverId == SERVER_ID } + } + + def 'should fire asynchronous connection pool events on check out and check in'() { + given: + def listener = Mock(ConnectionPoolListener) + pool = new DefaultConnectionPool(SERVER_ID, connectionFactory, builder().maxSize(1) + .addConnectionPoolListener(listener).build(), mockSdamProvider(), OPERATION_CONTEXT_FACTORY) + pool.ready() + def connection = selectConnectionAsyncAndGet(pool) + connection.close() + + when: + connection = pool.get(OPERATION_CONTEXT) + + then: + 1 * listener.connectionCheckedOut { it.connectionId.serverId == SERVER_ID } + + when: + connection.close() + + then: + 1 * listener.connectionCheckedIn { it.connectionId.serverId == SERVER_ID } + } + + def 'should fire connection checkout failed with Reason.CONNECTION_ERROR if fails to open a connection'() { + given: + def listener = Mock(ConnectionPoolListener) + def connection = Mock(InternalConnection) + connection.getDescription() >> new ConnectionDescription(SERVER_ID) + connection.opened() >> false + connection.open(OPERATION_CONTEXT) >> { throw new UncheckedIOException('expected failure', new IOException()) } + connectionFactory.create(SERVER_ID, _) >> connection + pool = new DefaultConnectionPool(SERVER_ID, connectionFactory, builder().addConnectionPoolListener(listener).build(), + mockSdamProvider(), OPERATION_CONTEXT_FACTORY) + pool.ready() + + when: + try { + pool.get(OPERATION_CONTEXT) + } catch (UncheckedIOException e) { + if ('expected failure' != e.getMessage()) { + throw e + } + } + + then: + 1 * listener.connectionCheckOutFailed { it.reason == ConnectionCheckOutFailedEvent.Reason.CONNECTION_ERROR } + } + + def 'should fire connection checkout failed with Reason.CONNECTION_ERROR if fails to open a connection asynchronously'() { + given: + def listener = Mock(ConnectionPoolListener) + def connection = Mock(InternalConnection) + connection.getDescription() >> new ConnectionDescription(SERVER_ID) + connection.opened() >> false + connection.openAsync(_, _) >> { + it.last().onResult(null, new UncheckedIOException('expected failure', new IOException())) + } + connectionFactory.create(SERVER_ID, _) >> connection + pool = new DefaultConnectionPool(SERVER_ID, connectionFactory, builder().addConnectionPoolListener(listener).build(), + mockSdamProvider(), OPERATION_CONTEXT_FACTORY) + pool.ready() + + when: + try { + selectConnectionAsyncAndGet(pool) + } catch (UncheckedIOException e) { + if ('expected failure' != e.getMessage()) { + throw e + } + } + + then: + 1 * listener.connectionCheckOutFailed { it.reason == ConnectionCheckOutFailedEvent.Reason.CONNECTION_ERROR } + } + + def 'should fire MongoConnectionPoolClearedException when checking out in paused state'() { + given: + pool = new DefaultConnectionPool(SERVER_ID, connectionFactory, builder().build(), mockSdamProvider(), OPERATION_CONTEXT_FACTORY) + Throwable caught = null + + when: + try { + pool.get(OPERATION_CONTEXT) + } catch (MongoConnectionPoolClearedException e) { + caught = e + } + + then: + caught != null + } + + def 'should fire MongoConnectionPoolClearedException when checking out asynchronously in paused state'() { + given: + pool = new DefaultConnectionPool(SERVER_ID, connectionFactory, builder().build(), mockSdamProvider(), OPERATION_CONTEXT_FACTORY) + CompletableFuture caught = new CompletableFuture<>() + + when: + pool.getAsync(OPERATION_CONTEXT) { InternalConnection result, Throwable t -> + if (t != null) { + caught.complete(t) + } + } + + then: + caught.isDone() + caught.get() instanceof MongoConnectionPoolClearedException + } + + def 'invalidate should record cause'() { + given: + pool = new DefaultConnectionPool(SERVER_ID, connectionFactory, builder().build(), mockSdamProvider(), OPERATION_CONTEXT_FACTORY) + RuntimeException cause = new RuntimeException() + Throwable caught = null + + when: + pool.invalidate(cause) + try { + pool.get(OPERATION_CONTEXT) + } catch (MongoConnectionPoolClearedException e) { + caught = e + } + + then: + caught.getCause().is(cause) + } + + def 'should not repeat ready/cleared events'() { + given: + def listener = Mock(ConnectionPoolListener) + pool = new DefaultConnectionPool(SERVER_ID, connectionFactory, builder().addConnectionPoolListener(listener).build(), + mockSdamProvider(), OPERATION_CONTEXT_FACTORY) + + when: + pool.ready() + pool.ready() + pool.invalidate(null) + pool.invalidate(new RuntimeException()) + + then: + 1 * listener.connectionPoolReady { it.getServerId() == SERVER_ID } + 1 * listener.connectionPoolCleared { it.getServerId() == SERVER_ID } + } + + def 'should continue to fire events after pool is closed'() { + def listener = Mock(ConnectionPoolListener) + pool = new DefaultConnectionPool(SERVER_ID, connectionFactory, builder().maxSize(1) + .addConnectionPoolListener(listener).build(), mockSdamProvider(), OPERATION_CONTEXT_FACTORY) + pool.ready() + def connection = pool.get(OPERATION_CONTEXT) + pool.close() + + when: + connection.close() + + then: + 1 * listener.connectionCheckedIn { it.connectionId.serverId == SERVER_ID } + 1 * listener.connectionClosed { it.connectionId.serverId == SERVER_ID } + } + + def 'should continue to fire events after pool is closed (asynchronous)'() { + def listener = Mock(ConnectionPoolListener) + pool = new DefaultConnectionPool(SERVER_ID, connectionFactory, builder().maxSize(1) + .addConnectionPoolListener(listener).build(), mockSdamProvider(), OPERATION_CONTEXT_FACTORY) + pool.ready() + def connection = selectConnectionAsyncAndGet(pool) + pool.close() + + when: + connection.close() + + then: + 1 * listener.connectionCheckedIn { it.connectionId.serverId == SERVER_ID } + 1 * listener.connectionClosed { it.connectionId.serverId == SERVER_ID } + } + + def 'should select connection asynchronously if one is immediately available'() { + given: + pool = new DefaultConnectionPool(SERVER_ID, connectionFactory, + builder().maxSize(1).build(), mockSdamProvider(), OPERATION_CONTEXT_FACTORY) + pool.ready() + + expect: + selectConnectionAsyncAndGet(pool).opened() + } + + def 'should select connection asynchronously if one is not immediately available'() { + given: + pool = new DefaultConnectionPool(SERVER_ID, connectionFactory, + builder().maxSize(1).build(), mockSdamProvider(), OPERATION_CONTEXT_FACTORY) + pool.ready() + + when: + def connection = pool.get(OPERATION_CONTEXT) + def connectionLatch = selectConnectionAsync(pool) + connection.close() + + then: + connectionLatch.get() + } + + def 'when getting a connection asynchronously should send MongoTimeoutException to callback after timeout period'() { + given: + pool = new DefaultConnectionPool(SERVER_ID, connectionFactory, + builder().maxSize(1).maxWaitTime(5, MILLISECONDS).build(), mockSdamProvider(), OPERATION_CONTEXT_FACTORY) + pool.ready() + pool.get(OPERATION_CONTEXT) + def firstConnectionLatch = selectConnectionAsync(pool) + def secondConnectionLatch = selectConnectionAsync(pool) + + when: + firstConnectionLatch.get() + + then: + thrown(MongoTimeoutException) + + when: + secondConnectionLatch.get() + + then: + thrown(MongoTimeoutException) + } + + def 'invalidate should do nothing when pool is closed'() { + given: + pool = new DefaultConnectionPool(SERVER_ID, connectionFactory, + builder().maxSize(1).build(), mockSdamProvider(), OPERATION_CONTEXT_FACTORY) + pool.close() + + when: + pool.invalidate(null) + + then: + noExceptionThrown() + } + + def selectConnectionAsyncAndGet(DefaultConnectionPool pool) { + selectConnectionAsync(pool).get() + } + + def selectConnectionAsync(DefaultConnectionPool pool) { + def serverLatch = new ConnectionLatch() + pool.getAsync(OPERATION_CONTEXT) { InternalConnection result, Throwable e -> + serverLatch.connection = result + serverLatch.throwable = e + serverLatch.latch.countDown() + } + serverLatch + } + + private mockSdamProvider() { + SameObjectProvider.initialized(Mock(SdamServerDescriptionManager)) + } + + private static quoteHostnameOrIp(String hostnameOrIp) { + hostnameOrIp.replaceAll(Pattern.quote("."), Matcher.quoteReplacement("\\.")) + } + + class ConnectionLatch { + CountDownLatch latch = new CountDownLatch(1) + InternalConnection connection + Throwable throwable + + def get() { + latch.await() + if (throwable != null) { + throw throwable + } + connection + } + } +} diff --git a/driver-core/src/test/unit/com/mongodb/internal/connection/DefaultDnsSrvRecordMonitorSpecification.groovy b/driver-core/src/test/unit/com/mongodb/internal/connection/DefaultDnsSrvRecordMonitorSpecification.groovy new file mode 100644 index 00000000000..4839fa53024 --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/internal/connection/DefaultDnsSrvRecordMonitorSpecification.groovy @@ -0,0 +1,189 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection + +import com.mongodb.MongoConfigurationException +import com.mongodb.MongoException +import com.mongodb.ServerAddress +import com.mongodb.connection.ClusterId +import com.mongodb.connection.ClusterType +import com.mongodb.internal.dns.DnsResolver +import spock.lang.Specification + +import java.util.concurrent.CountDownLatch +import java.util.concurrent.TimeUnit + +class DefaultDnsSrvRecordMonitorSpecification extends Specification { + def 'should resolve initial hosts'() { + given: + def hostName = 'test1.test.build.10gen.cc' + def srvServiceName = 'mongodb' + def resolvedHostOne = 'localhost.test.build.10gen.cc:27017' + def resolvedHostTwo = 'localhost.test.build.10gen.cc:27018' + def expectedResolvedHosts = [resolvedHostOne, resolvedHostTwo] + def dnsSrvRecordInitializer = new TestDnsSrvRecordInitializer(ClusterType.REPLICA_SET, 1) + def dnsResolver = Mock(DnsResolver) { + 1 * resolveHostFromSrvRecords(hostName, srvServiceName) >> expectedResolvedHosts + } + def monitor = new DefaultDnsSrvRecordMonitor(hostName, srvServiceName, 1, 10000, dnsSrvRecordInitializer, new ClusterId(), + dnsResolver) + + when: + monitor.start() + def hostsLists = dnsSrvRecordInitializer.waitForInitializedHosts() + + then: + hostsLists == [[new ServerAddress(resolvedHostOne), new ServerAddress(resolvedHostTwo)] as Set] + + cleanup: + monitor.close() + } + + def 'should discover new resolved hosts'() { + given: + def hostName = 'test1.test.build.10gen.cc' + def srvServiceName = 'mongodb' + def resolvedHostOne = 'localhost.test.build.10gen.cc:27017' + def resolvedHostTwo = 'localhost.test.build.10gen.cc:27018' + def resolvedHostThree = 'localhost.test.build.10gen.cc:27019' + def expectedResolvedHostsOne = [resolvedHostOne, resolvedHostTwo] + def expectedResolvedHostsTwo = [resolvedHostTwo, resolvedHostThree] + def dnsSrvRecordInitializer = new TestDnsSrvRecordInitializer(ClusterType.SHARDED, 2) + def dnsResolver = Mock(DnsResolver) { + _ * resolveHostFromSrvRecords(hostName, srvServiceName) >>> [expectedResolvedHostsOne, expectedResolvedHostsTwo] + } + def monitor = new DefaultDnsSrvRecordMonitor(hostName, srvServiceName, 1, 1, dnsSrvRecordInitializer, new ClusterId(), dnsResolver) + + when: + monitor.start() + def hostsLists = dnsSrvRecordInitializer.waitForInitializedHosts() + + then: + hostsLists == [[new ServerAddress(resolvedHostOne), new ServerAddress(resolvedHostTwo)] as Set, + [new ServerAddress(resolvedHostTwo), new ServerAddress(resolvedHostThree)] as Set] + + cleanup: + monitor.close() + } + + def 'should initialize listener with exception'() { + given: + def hostName = 'test1.test.build.10gen.cc' + def srvServiceName = 'mongodb' + def dnsSrvRecordInitializer = new TestDnsSrvRecordInitializer(ClusterType.UNKNOWN, 1) + def dnsResolver = Mock(DnsResolver) { + _ * resolveHostFromSrvRecords(hostName, srvServiceName) >> { + throw initializationException + } + } + def monitor = new DefaultDnsSrvRecordMonitor(hostName, srvServiceName, 1, 10000, dnsSrvRecordInitializer, new ClusterId(), + dnsResolver) + + when: + monitor.start() + def initializationExceptionList = dnsSrvRecordInitializer.waitForInitializedException() + if (!(initializationException instanceof MongoException)) { + initializationExceptionList[0] = initializationExceptionList[0].getCause() + } + + then: + initializationExceptionList == [initializationException] + + cleanup: + monitor.close() + + where: + initializationException << [new MongoConfigurationException('test'), new NullPointerException()] + } + + def 'should not initialize listener with exception after successful initialization'() { + given: + def hostName = 'test1.test.build.10gen.cc' + def srvServiceName = 'mongodb' + def resolvedHostListOne = ['localhost.test.build.10gen.cc:27017'] + def resolvedHostListTwo = ['localhost.test.build.10gen.cc:27018'] + def dnsSrvRecordInitializer = new TestDnsSrvRecordInitializer(ClusterType.SHARDED, 2) + def dnsResolver = Mock(DnsResolver) { + _ * resolveHostFromSrvRecords(hostName, srvServiceName) >> resolvedHostListOne >> { + throw initializationException } >> resolvedHostListTwo + } + def monitor = new DefaultDnsSrvRecordMonitor(hostName, srvServiceName, 1, 1, + dnsSrvRecordInitializer, new ClusterId(), dnsResolver) + + when: + monitor.start() + def initializedExceptionList = dnsSrvRecordInitializer.waitForInitializedException() + + then: + initializedExceptionList == [] + + cleanup: + monitor.close() + + where: + initializationException << [new MongoConfigurationException('test'), new NullPointerException()] + } + + // Can't use a mock because we need to coordinate the monitor thread with the test via thread synchronization + static class TestDnsSrvRecordInitializer implements DnsSrvRecordInitializer { + + ClusterType clusterType + List> hostsList = [] + List initializationExceptionList = [] + CountDownLatch latch + + TestDnsSrvRecordInitializer(ClusterType clusterType, int expectedInitializations) { + this.clusterType = clusterType + latch = new CountDownLatch(expectedInitializations) + } + + List> waitForInitializedHosts() { + if (!latch.await(5, TimeUnit.SECONDS)) { + throw new AssertionError('Timeout waiting for latch') + } + hostsList + } + + List waitForInitializedException() { + if (!latch.await(5, TimeUnit.SECONDS)) { + throw new AssertionError('Timeout waiting for latch') + } + initializationExceptionList + } + + @Override + void initialize(final Collection hosts) { + if (latch.count > 0) { + hostsList.add(hosts) + latch.countDown() + } + } + + @Override + void initialize(final MongoException initializationException) { + if (latch.count > 0) { + initializationExceptionList.add(initializationException) + latch.countDown() + } + } + + @Override + ClusterType getClusterType() { + clusterType + } + } +} diff --git a/driver-core/src/test/unit/com/mongodb/internal/connection/DefaultServerConnectionSpecification.groovy b/driver-core/src/test/unit/com/mongodb/internal/connection/DefaultServerConnectionSpecification.groovy new file mode 100644 index 00000000000..be6fbe06b83 --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/internal/connection/DefaultServerConnectionSpecification.groovy @@ -0,0 +1,56 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection + + +import com.mongodb.ReadPreference +import com.mongodb.connection.ClusterConnectionMode +import com.mongodb.internal.async.SingleResultCallback +import com.mongodb.internal.diagnostics.logging.Logger +import com.mongodb.internal.validator.NoOpFieldNameValidator +import org.bson.BsonDocument +import org.bson.BsonInt32 +import org.bson.codecs.BsonDocumentCodec +import spock.lang.Specification + +import static com.mongodb.ClusterFixture.OPERATION_CONTEXT +import static com.mongodb.CustomMatchers.compare +import static com.mongodb.internal.async.ErrorHandlingResultCallback.errorHandlingCallback +import static com.mongodb.internal.connection.MessageHelper.LEGACY_HELLO_LOWER + +class DefaultServerConnectionSpecification extends Specification { + def internalConnection = Mock(InternalConnection) + def callback = errorHandlingCallback(Mock(SingleResultCallback), Mock(Logger)) + + def 'should execute command protocol asynchronously'() { + given: + def command = new BsonDocument(LEGACY_HELLO_LOWER, new BsonInt32(1)) + def validator = NoOpFieldNameValidator.INSTANCE + def codec = new BsonDocumentCodec() + def executor = Mock(ProtocolExecutor) + def connection = new DefaultServerConnection(internalConnection, executor, ClusterConnectionMode.MULTIPLE) + + when: + connection.commandAsync('test', command, validator, ReadPreference.primary(), codec, OPERATION_CONTEXT, callback) + + then: + 1 * executor.executeAsync({ + compare(new CommandProtocolImpl('test', command, validator, ReadPreference.primary(), codec, true, + MessageSequences.EmptyMessageSequences.INSTANCE, ClusterConnectionMode.MULTIPLE, OPERATION_CONTEXT), it) + }, internalConnection, OPERATION_CONTEXT.getSessionContext(), callback) + } +} diff --git a/driver-core/src/test/unit/com/mongodb/internal/connection/DefaultServerMonitorSpecification.groovy b/driver-core/src/test/unit/com/mongodb/internal/connection/DefaultServerMonitorSpecification.groovy new file mode 100644 index 00000000000..e69de29bb2d diff --git a/driver-core/src/test/unit/com/mongodb/internal/connection/DefaultServerMonitorTest.java b/driver-core/src/test/unit/com/mongodb/internal/connection/DefaultServerMonitorTest.java new file mode 100644 index 00000000000..3aff244ea1e --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/internal/connection/DefaultServerMonitorTest.java @@ -0,0 +1,307 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.internal.connection; + +import com.mongodb.MongoSocketReadTimeoutException; +import com.mongodb.ServerAddress; +import com.mongodb.connection.ClusterConnectionMode; +import com.mongodb.connection.ClusterId; +import com.mongodb.connection.ConnectionDescription; +import com.mongodb.connection.ServerConnectionState; +import com.mongodb.connection.ServerDescription; +import com.mongodb.connection.ServerId; +import com.mongodb.connection.ServerSettings; +import com.mongodb.connection.ServerType; +import com.mongodb.event.ServerHeartbeatFailedEvent; +import com.mongodb.event.ServerHeartbeatStartedEvent; +import com.mongodb.event.ServerHeartbeatSucceededEvent; +import com.mongodb.event.ServerMonitorListener; +import com.mongodb.event.TestServerMonitorListener; +import com.mongodb.internal.inject.SameObjectProvider; +import org.bson.BsonDocument; +import org.bson.ByteBufNIO; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.Test; +import org.opentest4j.AssertionFailedError; + +import java.io.IOException; +import java.net.SocketException; +import java.nio.ByteBuffer; +import java.time.Duration; +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; + +import static com.mongodb.ClusterFixture.OPERATION_CONTEXT_FACTORY; +import static com.mongodb.assertions.Assertions.assertFalse; +import static com.mongodb.internal.connection.MessageHelper.LEGACY_HELLO_LOWER; +import static java.util.Arrays.asList; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyInt; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + + +public class DefaultServerMonitorTest { + + private DefaultServerMonitor monitor; + + @AfterEach + void tearDown() throws InterruptedException { + if (monitor != null) { + monitor.close(); + monitor.getServerMonitor().join(); + } + } + + @Test + void closeShouldNotSendStateChangedEvent() throws Exception { + // Given + AtomicBoolean stateChanged = new AtomicBoolean(false); + + SdamServerDescriptionManager sdamManager = new SdamServerDescriptionManager() { + + @Override + public void monitorUpdate(final ServerDescription candidateDescription) { + assertNotNull(candidateDescription); + stateChanged.set(true); + } + + @Override + public void updateToUnknown(final ServerDescription candidateDescription) { + assertNotNull(candidateDescription); + stateChanged.set(true); + } + + @Override + public void handleExceptionBeforeHandshake(final SdamServerDescriptionManager.SdamIssue sdamIssue) { + throw new UnsupportedOperationException(); + } + + @Override + public void handleExceptionAfterHandshake(final SdamServerDescriptionManager.SdamIssue sdamIssue) { + throw new UnsupportedOperationException(); + } + + @Override + public SdamServerDescriptionManager.SdamIssue.Context context() { + throw new UnsupportedOperationException(); + } + + @Override + public SdamServerDescriptionManager.SdamIssue.Context context(final InternalConnection connection) { + throw new UnsupportedOperationException(); + } + }; + + InternalConnection mockConnection = mock(InternalConnection.class); + doAnswer(invocation -> { + Thread.sleep(100); + return null; + }).when(mockConnection).open(any()); + + InternalConnectionFactory factory = createConnectionFactory(mockConnection); + + monitor = new DefaultServerMonitor( + new ServerId(new ClusterId(), new ServerAddress()), + ServerSettings.builder().build(), + factory, + ClusterConnectionMode.SINGLE, + null, + false, + SameObjectProvider.initialized(sdamManager), + OPERATION_CONTEXT_FACTORY); + + // When + monitor.start(); + monitor.close(); + + // Then + assertFalse(stateChanged.get()); + } + + @Test + void shouldSendStartedAndSucceededHeartbeatEvents() throws Exception { + // Given + ConnectionDescription connectionDescription = createDefaultConnectionDescription(); + ServerDescription initialServerDescription = createDefaultServerDescription(); + + String helloResponse = "{" + + LEGACY_HELLO_LOWER + ": true," + + "maxBsonObjectSize : 16777216, " + + "maxMessageSizeBytes : 48000000, " + + "maxWriteBatchSize : 1000, " + + "localTime : ISODate(\"2016-04-05T20:36:36.082Z\"), " + + "maxWireVersion : 4, " + + "minWireVersion : 0, " + + "ok : 1 " + + "}"; + + InternalConnection mockConnection = mock(InternalConnection.class); + when(mockConnection.getDescription()).thenReturn(connectionDescription); + when(mockConnection.getInitialServerDescription()).thenReturn(initialServerDescription); + when(mockConnection.getBuffer(anyInt())).thenReturn(new ByteBufNIO(ByteBuffer.allocate(1024))); + when(mockConnection.receive(any(), any())).thenReturn(BsonDocument.parse(helloResponse)); + + // When + TestServerMonitorListener listener = createTestServerMonitorListener(); + monitor = createAndStartMonitor(createConnectionFactory(mockConnection), listener); + + listener.waitForEvents(ServerHeartbeatSucceededEvent.class, event -> true, 1, Duration.ofSeconds(30)); + ServerHeartbeatStartedEvent startedEvent = getEvent(ServerHeartbeatStartedEvent.class, listener); + ServerHeartbeatSucceededEvent succeededEvent = getEvent(ServerHeartbeatSucceededEvent.class, listener); + + // Then + assertEquals(connectionDescription.getConnectionId(), startedEvent.getConnectionId()); + assertEquals(connectionDescription.getConnectionId(), succeededEvent.getConnectionId()); + assertEquals(BsonDocument.parse(helloResponse), succeededEvent.getReply()); + assertTrue(succeededEvent.getElapsedTime(TimeUnit.NANOSECONDS) > 0); + } + + @Test + void shouldSendStartedAndFailedHeartbeatEvents() throws Exception { + // Given + ConnectionDescription connectionDescription = createDefaultConnectionDescription(); + ServerDescription initialServerDescription = createDefaultServerDescription(); + MongoSocketReadTimeoutException exception = new MongoSocketReadTimeoutException("read timeout", + new ServerAddress(), new IOException()); + + InternalConnection mockConnection = mock(InternalConnection.class); + when(mockConnection.getDescription()).thenReturn(connectionDescription); + when(mockConnection.getInitialServerDescription()).thenReturn(initialServerDescription); + when(mockConnection.getBuffer(anyInt())).thenReturn(new ByteBufNIO(ByteBuffer.allocate(1024))); + when(mockConnection.receive(any(), any())).thenThrow(exception); + + // When + TestServerMonitorListener listener = createTestServerMonitorListener(); + monitor = createAndStartMonitor(createConnectionFactory(mockConnection), listener); + + listener.waitForEvents(ServerHeartbeatFailedEvent.class, event -> true, 1, Duration.ofSeconds(30)); + ServerHeartbeatStartedEvent startedEvent = getEvent(ServerHeartbeatStartedEvent.class, listener); + ServerHeartbeatFailedEvent failedEvent = getEvent(ServerHeartbeatFailedEvent.class, listener); + + // Then + assertEquals(connectionDescription.getConnectionId(), startedEvent.getConnectionId()); + assertEquals(connectionDescription.getConnectionId(), failedEvent.getConnectionId()); + assertEquals(exception, failedEvent.getThrowable()); + assertTrue(failedEvent.getElapsedTime(TimeUnit.NANOSECONDS) > 0); + } + + @Test + void shouldEmitHeartbeatStartedBeforeSocketIsConnected() throws Exception { + // Given + InternalConnection mockConnection = mock(InternalConnection.class); + CountDownLatch latch = new CountDownLatch(1); + List events = new ArrayList<>(); + ServerMonitorListener listener = new ServerMonitorListener() { + @Override + public void serverHearbeatStarted(final ServerHeartbeatStartedEvent event) { + events.add("serverHeartbeatStartedEvent"); + } + + @Override + public void serverHeartbeatSucceeded(final ServerHeartbeatSucceededEvent event) { + events.add("serverHeartbeatSucceededEvent"); + latch.countDown(); + } + + @Override + public void serverHeartbeatFailed(final ServerHeartbeatFailedEvent event) { + events.add("serverHeartbeatFailedEvent"); + latch.countDown(); + } + }; + + doAnswer(invocation -> { + events.add("client connected"); + return null; + }).when(mockConnection).open(any()); + + when(mockConnection.getBuffer(anyInt())).thenReturn(new ByteBufNIO(ByteBuffer.allocate(1024))); + when(mockConnection.getDescription()).thenReturn(createDefaultConnectionDescription()); + when(mockConnection.getInitialServerDescription()).thenReturn(createDefaultServerDescription()); + + doAnswer(invocation -> { + events.add("client hello received"); + throw new SocketException("Socket error"); + }).when(mockConnection).receive(any(), any()); + + // When + monitor = createAndStartMonitor(createConnectionFactory(mockConnection), listener); + assertTrue(latch.await(5, TimeUnit.SECONDS), "Timed out waiting for heartbeat"); + + // Then + List expectedEvents = asList("serverHeartbeatStartedEvent", "client connected", "client hello received", "serverHeartbeatFailedEvent"); + assertEquals(expectedEvents, events); + } + + + private InternalConnectionFactory createConnectionFactory(final InternalConnection connection) { + InternalConnectionFactory factory = mock(InternalConnectionFactory.class); + when(factory.create(any())).thenReturn(connection); + return factory; + } + + private ServerDescription createDefaultServerDescription() { + return ServerDescription.builder() + .ok(true) + .address(new ServerAddress()) + .type(ServerType.STANDALONE) + .state(ServerConnectionState.CONNECTED) + .build(); + } + + private ConnectionDescription createDefaultConnectionDescription() { + return new ConnectionDescription(new ServerId(new ClusterId(""), new ServerAddress())); + } + + private DefaultServerMonitor createAndStartMonitor(final InternalConnectionFactory factory, final ServerMonitorListener listener) { + DefaultServerMonitor monitor = new DefaultServerMonitor( + new ServerId(new ClusterId(), new ServerAddress()), + ServerSettings.builder() + .heartbeatFrequency(500, TimeUnit.MILLISECONDS) + .addServerMonitorListener(listener) + .build(), + factory, + ClusterConnectionMode.SINGLE, + null, + false, + SameObjectProvider.initialized(mock(SdamServerDescriptionManager.class)), + OPERATION_CONTEXT_FACTORY); + monitor.start(); + return monitor; + } + + private T getEvent(final Class clazz, final TestServerMonitorListener listener) { + return listener.getEvents() + .stream() + .filter(clazz::isInstance) + .map(clazz::cast) + .findFirst() + .orElseThrow(AssertionFailedError::new); + } + + private TestServerMonitorListener createTestServerMonitorListener() { + return new TestServerMonitorListener(asList("serverHeartbeatStartedEvent", "serverHeartbeatSucceededEvent", + "serverHeartbeatFailedEvent")); + } +} diff --git a/driver-core/src/test/unit/com/mongodb/internal/connection/DefaultServerSpecification.groovy b/driver-core/src/test/unit/com/mongodb/internal/connection/DefaultServerSpecification.groovy new file mode 100644 index 00000000000..6552a69a70d --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/internal/connection/DefaultServerSpecification.groovy @@ -0,0 +1,465 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection + + +import com.mongodb.MongoException +import com.mongodb.MongoNodeIsRecoveringException +import com.mongodb.MongoNotPrimaryException +import com.mongodb.MongoSecurityException +import com.mongodb.MongoServerUnavailableException +import com.mongodb.MongoSocketException +import com.mongodb.MongoSocketOpenException +import com.mongodb.MongoSocketReadException +import com.mongodb.MongoSocketReadTimeoutException +import com.mongodb.MongoSocketWriteException +import com.mongodb.MongoStalePrimaryException +import com.mongodb.ReadPreference +import com.mongodb.ServerAddress +import com.mongodb.client.syncadapter.SupplyingCallback +import com.mongodb.connection.ClusterConnectionMode +import com.mongodb.connection.ClusterId +import com.mongodb.connection.ClusterSettings +import com.mongodb.connection.ServerConnectionState +import com.mongodb.connection.ServerDescription +import com.mongodb.connection.ServerId +import com.mongodb.connection.ServerType +import com.mongodb.event.CommandListener +import com.mongodb.event.ServerDescriptionChangedEvent +import com.mongodb.event.ServerListener +import com.mongodb.internal.TimeoutContext +import com.mongodb.internal.async.SingleResultCallback +import com.mongodb.internal.inject.SameObjectProvider +import com.mongodb.internal.session.SessionContext +import com.mongodb.internal.time.Timeout +import com.mongodb.internal.validator.NoOpFieldNameValidator +import org.bson.BsonDocument +import org.bson.BsonInt32 +import org.bson.codecs.BsonDocumentCodec +import spock.lang.Specification + +import java.util.concurrent.CountDownLatch + +import static com.mongodb.ClusterFixture.CLIENT_METADATA +import static com.mongodb.ClusterFixture.OPERATION_CONTEXT +import static com.mongodb.MongoCredential.createCredential +import static com.mongodb.connection.ClusterConnectionMode.MULTIPLE +import static com.mongodb.connection.ClusterConnectionMode.SINGLE + +class DefaultServerSpecification extends Specification { + def serverId = new ServerId(new ClusterId(), new ServerAddress()) + + def 'should get a connection'() { + given: + def connectionPool = Stub(ConnectionPool) + def connectionFactory = Mock(ConnectionFactory) + def internalConnection = Stub(InternalConnection) + def connection = Stub(Connection) + + connectionPool.get(_) >> { internalConnection } + def server = new DefaultServer(serverId, mode, connectionPool, connectionFactory, Mock(ServerMonitor), + Mock(SdamServerDescriptionManager), Mock(ServerListener), Mock(CommandListener), new ClusterClock(), false) + + when: + def receivedConnection = server.getConnection(OPERATION_CONTEXT) + + then: + receivedConnection + 1 * connectionFactory.create(internalConnection, _, mode) >> connection + + where: + mode << [SINGLE, MULTIPLE] + } + + def 'should get a connection asynchronously'() { + given: + def connectionPool = Stub(ConnectionPool) + def connectionFactory = Mock(ConnectionFactory) + def internalConnection = Stub(InternalConnection) + def connection = Stub(AsyncConnection) + + connectionPool.getAsync(_, _) >> { + it.last().onResult(internalConnection, null) + } + + def server = new DefaultServer(serverId, mode, connectionPool, connectionFactory, Mock(ServerMonitor), + Mock(SdamServerDescriptionManager), Mock(ServerListener), Mock(CommandListener), new ClusterClock(), false) + + when: + def callback = new SupplyingCallback() + server.getConnectionAsync(OPERATION_CONTEXT, callback) + + then: + callback.get() == connection + 1 * connectionFactory.createAsync(_, _, mode) >> connection + + where: + mode << [SINGLE, MULTIPLE] + } + + def 'should throw MongoServerUnavailableException getting a connection when the server is closed'() { + given: + def server = new DefaultServer(serverId, SINGLE, Stub(ConnectionPool), Stub(ConnectionFactory), Mock(ServerMonitor), + Stub(SdamServerDescriptionManager), Stub(ServerListener), Stub(CommandListener), new ClusterClock(), false) + server.close() + + when: + server.getConnection(OPERATION_CONTEXT) + + then: + def ex = thrown(MongoServerUnavailableException) + ex.message == 'The server at 127.0.0.1:27017 is no longer available' + + when: + def latch = new CountDownLatch(1) + def receivedConnection = null + def receivedThrowable = null + server.getConnectionAsync(OPERATION_CONTEXT) { + result, throwable -> + receivedConnection = result; receivedThrowable = throwable; latch.countDown() + } + latch.await() + + then: + !receivedConnection + receivedThrowable instanceof MongoServerUnavailableException + receivedThrowable.message == 'The server at 127.0.0.1:27017 is no longer available' + } + + def 'invalidate should invoke server listeners'() { + given: + def serverListener = Mock(ServerListener) + def connectionPool = Mock(ConnectionPool) + def sdamProvider = SameObjectProvider.uninitialized() + def serverMonitor = new TestServerMonitor(sdamProvider) + sdamProvider.initialize(new DefaultSdamServerDescriptionManager(mockCluster(), serverId, serverListener, serverMonitor, + connectionPool, ClusterConnectionMode.MULTIPLE)) + def server = defaultServer(Mock(ConnectionPool), serverMonitor, serverListener, sdamProvider.get(), Mock(CommandListener)) + serverMonitor.updateServerDescription(ServerDescription.builder() + .address(serverId.getAddress()) + .ok(true) + .state(ServerConnectionState.CONNECTED) + .type(ServerType.STANDALONE) + .build()) + + when: + server.invalidate(exceptionToThrow) + + then: + 1 * serverListener.serverDescriptionChanged(_) + + cleanup: + server?.close() + + where: + exceptionToThrow << [ + new MongoStalePrimaryException(""), + new MongoNotPrimaryException(new BsonDocument(), new ServerAddress()), + new MongoNodeIsRecoveringException(new BsonDocument(), new ServerAddress()), + new MongoSocketException("", new ServerAddress()), + new MongoWriteConcernWithResponseException(new MongoException(""), new Object()) + ] + } + + def 'invalidate should not invoke server listeners'() { + given: + def serverListener = Mock(ServerListener) + def connectionPool = Mock(ConnectionPool) + def sdamProvider = SameObjectProvider. uninitialized() + def serverMonitor = new TestServerMonitor(sdamProvider) + sdamProvider.initialize(new DefaultSdamServerDescriptionManager(mockCluster(), serverId, serverListener, serverMonitor, + connectionPool, ClusterConnectionMode.MULTIPLE)) + def server = defaultServer(Mock(ConnectionPool), serverMonitor, serverListener, sdamProvider.get(), Mock(CommandListener)) + serverMonitor.updateServerDescription(ServerDescription.builder() + .address(serverId.getAddress()) + .ok(true) + .state(ServerConnectionState.CONNECTED) + .type(ServerType.STANDALONE) + .build()) + + when: + server.invalidate(exceptionToThrow) + + then: + 0 * serverListener.serverDescriptionChanged(_) + + cleanup: + server?.close() + + where: + exceptionToThrow << [ + new MongoException(""), + new MongoSecurityException(createCredential("jeff", "admin", "123".toCharArray()), "Auth failed"), + ] + } + + def 'invalidate should do nothing when server is closed for any exception'() { + given: + def connectionPool = Mock(ConnectionPool) + def serverMonitor = Mock(ServerMonitor) + connectionPool.get(OPERATION_CONTEXT) >> { throw exceptionToThrow } + + def server = defaultServer(connectionPool, serverMonitor) + server.close() + + when: + server.invalidate(exceptionToThrow) + + then: + 0 * connectionPool.invalidate(null) + 0 * serverMonitor.connect() + + where: + exceptionToThrow << [ + new MongoStalePrimaryException(""), + new MongoNotPrimaryException(new BsonDocument(), new ServerAddress()), + new MongoNodeIsRecoveringException(new BsonDocument(), new ServerAddress()), + new MongoSocketException("", new ServerAddress()), + new MongoWriteConcernWithResponseException(new MongoException(""), new Object()), + new MongoException(""), + new MongoSecurityException(createCredential("jeff", "admin", "123".toCharArray()), "Auth failed"), + ] + } + + def 'failed open should invalidate the server'() { + given: + def connectionPool = Mock(ConnectionPool) + connectionPool.get(_) >> { throw exceptionToThrow } + def serverMonitor = Mock(ServerMonitor) + def server = defaultServer(connectionPool, serverMonitor) + + when: + server.getConnection(OPERATION_CONTEXT) + + then: + def e = thrown(MongoException) + e.is(exceptionToThrow) + 1 * connectionPool.invalidate(exceptionToThrow) + 1 * serverMonitor.cancelCurrentCheck() + + where: + exceptionToThrow << [ + new MongoSocketOpenException('open failed', new ServerAddress(), new IOException()), + new MongoSocketWriteException('Write failed', new ServerAddress(), new IOException()), + new MongoSocketReadException('Read failed', new ServerAddress(), new IOException()), + new MongoSocketReadTimeoutException('Read timed out', new ServerAddress(), new IOException()), + ] + } + + def 'failed authentication should invalidate the connection pool'() { + given: + def connectionPool = Mock(ConnectionPool) + connectionPool.get(_) >> { throw exceptionToThrow } + def serverMonitor = Mock(ServerMonitor) + def server = defaultServer(connectionPool, serverMonitor) + + when: + server.getConnection(OPERATION_CONTEXT) + + then: + def e = thrown(MongoSecurityException) + e.is(exceptionToThrow) + 1 * connectionPool.invalidate(exceptionToThrow) + 0 * serverMonitor.connect() + + where: + exceptionToThrow << [ + new MongoSecurityException(createCredential('jeff', 'admin', '123'.toCharArray()), 'Auth failed'), + ] + } + + def 'failed open should invalidate the server asynchronously'() { + given: + def connectionPool = Mock(ConnectionPool) + connectionPool.getAsync(_, _) >> { it.last().onResult(null, exceptionToThrow) } + def serverMonitor = Mock(ServerMonitor) + def server = defaultServer(connectionPool, serverMonitor) + + when: + def latch = new CountDownLatch(1) + def receivedConnection = null + def receivedThrowable = null + server.getConnectionAsync(OPERATION_CONTEXT) { + result, throwable -> + receivedConnection = result; receivedThrowable = throwable; latch.countDown() + } + latch.await() + + then: + !receivedConnection + receivedThrowable.is(exceptionToThrow) + 1 * connectionPool.invalidate(exceptionToThrow) + 1 * serverMonitor.cancelCurrentCheck() + + + where: + exceptionToThrow << [ + new MongoSocketOpenException('open failed', new ServerAddress(), new IOException()), + new MongoSocketWriteException('Write failed', new ServerAddress(), new IOException()), + new MongoSocketReadException('Read failed', new ServerAddress(), new IOException()), + new MongoSocketReadTimeoutException('Read timed out', new ServerAddress(), new IOException()), + ] + } + + def 'failed auth should invalidate the connection pool asynchronously'() { + given: + def connectionPool = Mock(ConnectionPool) + connectionPool.getAsync(_, _) >> { it.last().onResult(null, exceptionToThrow) } + def serverMonitor = Mock(ServerMonitor) + def server = defaultServer(connectionPool, serverMonitor) + + when: + def latch = new CountDownLatch(1) + def receivedConnection = null + def receivedThrowable = null + server.getConnectionAsync(OPERATION_CONTEXT) { + result, throwable -> + receivedConnection = result; receivedThrowable = throwable; latch.countDown() + } + latch.await() + + then: + !receivedConnection + receivedThrowable.is(exceptionToThrow) + 1 * connectionPool.invalidate(exceptionToThrow) + 0 * serverMonitor.connect() + + + where: + exceptionToThrow << [ + new MongoSecurityException(createCredential('jeff', 'admin', '123'.toCharArray()), 'Auth failed'), + ] + } + + def 'should propagate cluster time'() { + given: + def clusterClock = new ClusterClock() + clusterClock.advance(clusterClockClusterTime) + def server = new DefaultServer(serverId, SINGLE, Mock(ConnectionPool), new TestConnectionFactory(), Mock(ServerMonitor), + Mock(SdamServerDescriptionManager), Mock(ServerListener), Mock(CommandListener), clusterClock, false) + def testConnection = (TestConnection) server.getConnection() + def sessionContext = new TestSessionContext(initialClusterTime) + def response = BsonDocument.parse( + '''{ + ok : 1, + operationTime : { $timestamp : { "t" : 50, "i" : 20 } }, + $clusterTime : { clusterTime : { $timestamp : { "t" : 42, "i" : 21 } } } + } + ''') + def protocol = new TestCommandProtocol(response) + testConnection.enqueueProtocol(protocol) + def operationContext = OPERATION_CONTEXT.withSessionContext(sessionContext) + + when: + if (async) { + CountDownLatch latch = new CountDownLatch(1) + testConnection.commandAsync('admin', new BsonDocument('ping', new BsonInt32(1)), NoOpFieldNameValidator.INSTANCE, + ReadPreference.primary(), new BsonDocumentCodec(), operationContext) { + BsonDocument result, Throwable t -> latch.countDown() + } + latch.await() + } else { + testConnection.command('admin', new BsonDocument('ping', new BsonInt32(1)), NoOpFieldNameValidator.INSTANCE, + ReadPreference.primary(), new BsonDocumentCodec(), operationContext) + } + + then: + clusterClock.getCurrent() == response.getDocument('$clusterTime') + protocol.contextClusterTime == (initialClusterTime.getTimestamp('clusterTime') + .compareTo(clusterClockClusterTime.getTimestamp('clusterTime')) > 0 ? initialClusterTime : clusterClockClusterTime) + sessionContext.clusterTime == response.getDocument('$clusterTime') + sessionContext.operationTime == response.getTimestamp('operationTime') + + where: + [async, initialClusterTime, clusterClockClusterTime] << [ + [false, true], + [ + BsonDocument.parse('{clusterTime : {$timestamp : {"t" : 21, "i" : 11 } } }'), + BsonDocument.parse('{clusterTime : {$timestamp : {"t" : 42, "i" : 11 } } }') + ], + [ + BsonDocument.parse('{clusterTime : {$timestamp : {"t" : 21, "i" : 11 } } }'), + BsonDocument.parse('{clusterTime : {$timestamp : {"t" : 42, "i" : 11 } } }') + ] + ].combinations() + } + + private DefaultServer defaultServer(final ConnectionPool connectionPool, final ServerMonitor serverMonitor) { + def serverListener = Mock(ServerListener) + defaultServer(connectionPool, serverMonitor, serverListener, + new DefaultSdamServerDescriptionManager(mockCluster(), serverId, serverListener, serverMonitor, connectionPool, + ClusterConnectionMode.MULTIPLE), + Mock(CommandListener)) + } + + private DefaultServer defaultServer(final ConnectionPool connectionPool, final ServerMonitor serverMonitor, + final ServerListener serverListener, + final SdamServerDescriptionManager sdam, final CommandListener commandListener) { + serverMonitor.start() + new DefaultServer(serverId, SINGLE, connectionPool, new TestConnectionFactory(), serverMonitor, + sdam, serverListener, commandListener, new ClusterClock(), false) + } + + class TestCommandProtocol implements CommandProtocol { + private final BsonDocument commandResult + private final BsonDocument responseDocument + private BsonDocument contextClusterTime + + TestCommandProtocol(BsonDocument result) { + this.commandResult = result + this.responseDocument = result + } + + @Override + BsonDocument execute(final InternalConnection connection) { + commandResult + } + + @Override + void executeAsync(final InternalConnection connection, final SingleResultCallback callback) { + callback.onResult(commandResult, null) + } + + @Override + TestCommandProtocol withSessionContext(final SessionContext sessionContext) { + contextClusterTime = sessionContext.clusterTime + sessionContext.advanceClusterTime(responseDocument.getDocument('$clusterTime')) + sessionContext.advanceOperationTime(responseDocument.getTimestamp('operationTime')) + this + } + } + + private Cluster mockCluster() { + new BaseCluster(new ClusterId(), ClusterSettings.builder().build(), Mock(ClusterableServerFactory), CLIENT_METADATA) { + @Override + protected void connect() { + } + + @Override + Cluster.ServersSnapshot getServersSnapshot(final Timeout serverSelectionTimeout, final TimeoutContext timeoutContext) { + Cluster.ServersSnapshot result = { + serverAddress -> throw new UnsupportedOperationException() + } + result + } + + @Override + void onChange(final ServerDescriptionChangedEvent event) { + } + } + } +} diff --git a/driver-core/src/test/unit/com/mongodb/internal/connection/DefaultTestClusterableServerFactory.java b/driver-core/src/test/unit/com/mongodb/internal/connection/DefaultTestClusterableServerFactory.java new file mode 100644 index 00000000000..27a3f8b7a73 --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/internal/connection/DefaultTestClusterableServerFactory.java @@ -0,0 +1,73 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection; + +import com.mongodb.ServerAddress; +import com.mongodb.connection.ClusterConnectionMode; +import com.mongodb.connection.ServerDescription; +import com.mongodb.connection.ServerId; +import com.mongodb.connection.ServerSettings; +import com.mongodb.event.ServerListener; +import com.mongodb.internal.inject.SameObjectProvider; + +import java.util.HashMap; +import java.util.Map; + +public class DefaultTestClusterableServerFactory implements ClusterableServerFactory { + private final ServerSettings settings = ServerSettings.builder().build(); + private final ClusterConnectionMode clusterConnectionMode; + private final ServerListenerFactory serverListenerFactory; + private final Map serverAddressToServerMonitorMap = new HashMap<>(); + + public DefaultTestClusterableServerFactory(final ClusterConnectionMode clusterConnectionMode, + final ServerListenerFactory serverListenerFactory) { + this.clusterConnectionMode = clusterConnectionMode; + this.serverListenerFactory = serverListenerFactory; + } + + @Override + public ClusterableServer create(final Cluster cluster, final ServerAddress serverAddress) { + ServerId serverId = new ServerId(cluster.getClusterId(), serverAddress); + if (clusterConnectionMode == ClusterConnectionMode.LOAD_BALANCED) { + return new LoadBalancedServer(serverId, new TestConnectionPool(), + new TestConnectionFactory(), serverListenerFactory.create(serverAddress), cluster.getClock()); + } else { + SameObjectProvider sdamProvider = SameObjectProvider.uninitialized(); + TestServerMonitor serverMonitor = new TestServerMonitor(sdamProvider); + serverAddressToServerMonitorMap.put(serverAddress, serverMonitor); + ConnectionPool connectionPool = new TestConnectionPool(); + ServerListener serverListener = serverListenerFactory.create(serverAddress); + SdamServerDescriptionManager sdam = new DefaultSdamServerDescriptionManager(cluster, serverId, serverListener, serverMonitor, + connectionPool, clusterConnectionMode); + sdamProvider.initialize(sdam); + serverMonitor.start(); + return new DefaultServer(serverId, clusterConnectionMode, connectionPool, new TestConnectionFactory(), serverMonitor, sdam, + serverListener, null, cluster.getClock(), true); + } + } + + @Override + public ServerSettings getSettings() { + return settings; + } + + + public void sendNotification(final ServerAddress serverAddress, final ServerDescription serverDescription) { + serverAddressToServerMonitorMap.get(serverAddress).updateServerDescription(serverDescription); + } + +} diff --git a/driver-core/src/test/unit/com/mongodb/internal/connection/DescriptionHelperSpecification.groovy b/driver-core/src/test/unit/com/mongodb/internal/connection/DescriptionHelperSpecification.groovy new file mode 100644 index 00000000000..802cf044aac --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/internal/connection/DescriptionHelperSpecification.groovy @@ -0,0 +1,538 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection + +import com.mongodb.MongoClientException +import com.mongodb.ServerAddress +import com.mongodb.Tag +import com.mongodb.TagSet +import com.mongodb.connection.ClusterConnectionMode +import com.mongodb.connection.ClusterId +import com.mongodb.connection.ConnectionDescription +import com.mongodb.connection.ConnectionId +import com.mongodb.connection.ServerConnectionState +import com.mongodb.connection.ServerDescription +import com.mongodb.connection.ServerId +import com.mongodb.connection.ServerType +import com.mongodb.connection.TopologyVersion +import org.bson.types.ObjectId +import spock.lang.Specification + +import java.util.concurrent.TimeUnit + +import static com.mongodb.internal.connection.DescriptionHelper.createConnectionDescription +import static com.mongodb.internal.connection.DescriptionHelper.createServerDescription +import static com.mongodb.internal.connection.MessageHelper.LEGACY_HELLO_LOWER +import static org.bson.BsonDocument.parse + +class DescriptionHelperSpecification extends Specification { + private final ServerAddress serverAddress = new ServerAddress('localhost', 27018) + private final int roundTripTime = 5000 + + def setup() { + Time.makeTimeConstant() + } + + def cleanup() { + Time.makeTimeMove() + } + + def 'connection description should reflect hello result'() { + def connectionId = new ConnectionId(new ServerId(new ClusterId(), serverAddress)) + expect: + createConnectionDescription(ClusterConnectionMode.SINGLE, connectionId, + parse("""{ + $LEGACY_HELLO_LOWER: true, + maxBsonObjectSize : 16777216, + maxMessageSizeBytes : 48000000, + maxWriteBatchSize : 1000, + localTime : ISODate("2015-03-04T23:03:45.848Z"), + maxWireVersion : 6, + minWireVersion : 0, + ok : 1 + }""")) == + new ConnectionDescription(connectionId, 6, ServerType.STANDALONE, 1000, 16777216, 48000000, []) + + createConnectionDescription(ClusterConnectionMode.SINGLE, connectionId, + parse("""{ + $LEGACY_HELLO_LOWER: true, + maxBsonObjectSize : 16777216, + maxMessageSizeBytes : 48000000, + maxWriteBatchSize : 1000, + localTime : ISODate("2015-03-04T23:03:45.848Z"), + maxWireVersion : 6, + minWireVersion : 0, + connectionId : 1004 + ok : 1 + }""")) == + new ConnectionDescription(connectionId, 6, ServerType.STANDALONE, 1000, 16777216, 48000000, []) + .withConnectionId(connectionId.withServerValue(1004)) + } + + + def 'connection description should reflect legacy hello result from load balancer'() { + given: + def connectionId = new ConnectionId(new ServerId(new ClusterId(), serverAddress)) + ObjectId serviceId = new ObjectId() + + expect: + createConnectionDescription(ClusterConnectionMode.LOAD_BALANCED, connectionId, + parse("""{ + $LEGACY_HELLO_LOWER: true, + msg : "isdbgrid", + maxBsonObjectSize : 16777216, + maxMessageSizeBytes : 48000000, + maxWriteBatchSize : 1000, + localTime : ISODate("2015-03-04T23:55:18.505Z"), + maxWireVersion : 13, + minWireVersion : 0, + connectionId : 1004, + serviceId: {\$oid : "${serviceId.toHexString()}"}, + ok : 1 + }""")) == + new ConnectionDescription(connectionId, 13, ServerType.SHARD_ROUTER, 1000, 16777216, 48000000, []) + .withConnectionId(connectionId.withServerValue(1004)) + .withServiceId(serviceId) + + when: + createConnectionDescription(ClusterConnectionMode.LOAD_BALANCED, connectionId, + parse("""{ + $LEGACY_HELLO_LOWER: true, + msg : "isdbgrid", + maxBsonObjectSize : 16777216, + maxMessageSizeBytes : 48000000, + maxWriteBatchSize : 1000, + localTime : ISODate("2015-03-04T23:55:18.505Z"), + maxWireVersion : 13, + minWireVersion : 0, + connectionId : 1004, + ok : 1 + }""")) + + then: + def e = thrown(MongoClientException) + e.getMessage() == 'Driver attempted to initialize in load balancing mode, but the server does not support this mode' + } + + def 'connection description should reflect legacy hello result with compressors'() { + def connectionId = new ConnectionId(new ServerId(new ClusterId(), serverAddress)) + expect: + createConnectionDescription(ClusterConnectionMode.SINGLE, connectionId, + parse("""{ + $LEGACY_HELLO_LOWER: true, + maxBsonObjectSize : 16777216, + maxMessageSizeBytes : 48000000, + maxWriteBatchSize : 1000, + localTime : ISODate("2015-03-04T23:03:45.848Z"), + maxWireVersion : 6, + minWireVersion : 0, + compression : ["zlib", "snappy"], + ok : 1 + }""")) == + new ConnectionDescription(connectionId, 6, ServerType.STANDALONE, 1000, 16777216, 48000000, + ['zlib', 'snappy']) + } + + def 'server description should reflect not ok legacy hello result'() { + expect: + createServerDescription(serverAddress, + parse('{ok : 0}'), roundTripTime, 0) == + ServerDescription.builder() + .ok(false) + .address(serverAddress) + .state(ServerConnectionState.CONNECTED) + .type(ServerType.UNKNOWN) + .build() + } + + def 'server description should reflect last update time'() { + expect: + createServerDescription(serverAddress, + parse('{ ok : 1 }'), roundTripTime, 0).getLastUpdateTime(TimeUnit.NANOSECONDS) == Time.CONSTANT_TIME + } + + def 'server description should reflect roundTripNanos'() { + expect: + createServerDescription(serverAddress, + parse("""{ + $LEGACY_HELLO_LOWER: true, + maxBsonObjectSize : 16777216, + maxMessageSizeBytes : 48000000, + maxWriteBatchSize : 1000, + localTime : ISODate("2015-03-04T23:03:45.848Z"), + maxWireVersion : 3, + minWireVersion : 0, + ok : 1 + }"""), roundTripTime, 0).roundTripTimeNanos == + ServerDescription.builder() + .ok(true) + .address(serverAddress) + .state(ServerConnectionState.CONNECTED) + .maxWireVersion(3) + .maxDocumentSize(16777216) + .type(ServerType.STANDALONE) + .roundTripTime(roundTripTime, TimeUnit.NANOSECONDS) + .build().roundTripTimeNanos + } + + def 'server description should reflect legacy hello result from standalone'() { + expect: + createServerDescription(serverAddress, + parse("""{ + $LEGACY_HELLO_LOWER: true, + maxBsonObjectSize : 16777216, + maxMessageSizeBytes : 48000000, + maxWriteBatchSize : 1000, + localTime : ISODate("2015-03-04T23:03:45.848Z"), + maxWireVersion : 3, + minWireVersion : 0, + ok : 1 + }"""), roundTripTime, 0) == + ServerDescription.builder() + .ok(true) + .address(serverAddress) + .state(ServerConnectionState.CONNECTED) + .maxWireVersion(3) + .maxDocumentSize(16777216) + .type(ServerType.STANDALONE) + .build() + } + + def 'server description should reflect legacy hello result from secondary'() { + expect: + createServerDescription(new ServerAddress('localhost', 27018), + parse("""{ + "setName" : "replset", + "$LEGACY_HELLO_LOWER": false, + "secondary" : true, + "hosts" : [ + "localhost:27017", + "localhost:27019", + "localhost:27018" + ], + "arbiters" : [ + "localhost:27020" + ], + "me" : "localhost:27017", + "maxBsonObjectSize" : 16777216, + "maxMessageSizeBytes" : 48000000, + "maxWriteBatchSize" : 1000, + "localTime" : ISODate("2015-03-04T23:14:07.338Z"), + "maxWireVersion" : 3, + "minWireVersion" : 0, + "ok" : 1 + }"""), roundTripTime, 0) == + ServerDescription.builder() + .ok(true) + .address(new ServerAddress('localhost', 27018)) + .state(ServerConnectionState.CONNECTED) + .maxWireVersion(3) + .maxDocumentSize(16777216) + .type(ServerType.REPLICA_SET_SECONDARY) + .setName('replset') + .canonicalAddress('localhost:27017') + .hosts(['localhost:27017', 'localhost:27018', 'localhost:27019'] as Set) + .arbiters(['localhost:27020'] as Set) + .build() + } + + def 'server description should reflect legacy hello result with lastWriteDate'() { + expect: + createServerDescription(new ServerAddress('localhost', 27018), + parse("""{ + "setName" : "replset", + "$LEGACY_HELLO_LOWER" : false, + "secondary" : true, + "hosts" : [ + "localhost:27017", + "localhost:27019", + "localhost:27018" + ], + "arbiters" : [ + "localhost:27020" + ], + "me" : "localhost:27017", + "maxBsonObjectSize" : 16777216, + "maxMessageSizeBytes" : 48000000, + "maxWriteBatchSize" : 1000, + "localTime" : ISODate("2015-03-04T23:14:07.338Z"), + "maxWireVersion" : 5, + "minWireVersion" : 0, + "lastWrite" : { "lastWriteDate" : ISODate("2016-03-04T23:14:07.338Z") } + "ok" : 1 + }"""), roundTripTime, 0) == + ServerDescription.builder() + .ok(true) + .address(new ServerAddress('localhost', 27018)) + .state(ServerConnectionState.CONNECTED) + .maxWireVersion(5) + .lastWriteDate(new Date(1457133247338L)) + .maxDocumentSize(16777216) + .type(ServerType.REPLICA_SET_SECONDARY) + .setName('replset') + .canonicalAddress('localhost:27017') + .hosts(['localhost:27017', 'localhost:27018', 'localhost:27019'] as Set) + .arbiters(['localhost:27020'] as Set) + .build() + } + + def 'server description should reflect legacy hello result from primary'() { + given: + ObjectId electionId = new ObjectId() + ObjectId topologyVersionProcessId = new ObjectId() + + when: + def serverDescription = createServerDescription(serverAddress, + parse("""{ + "setName" : "replset", + "setVersion" : 1, + "$LEGACY_HELLO_LOWER" : true, + "secondary" : false, + "hosts" : [ + "localhost:27017", + "localhost:27019", + "localhost:27018" + ], + "arbiters" : [ + "localhost:27020" + ], + "primary" : "localhost:27017", + "me" : "localhost:27017", + "maxBsonObjectSize" : 16777216, + "maxMessageSizeBytes" : 48000000, + "maxWriteBatchSize" : 1000, + "localTime" : ISODate("2015-03-04T23:24:18.452Z"), + "maxWireVersion" : 3, + "minWireVersion" : 0, + "electionId" : {\$oid : "${electionId.toHexString()}" }, + "topologyVersion" : { + processId: {\$oid : "${topologyVersionProcessId.toHexString()}"}, + counter: {\$numberLong : "42"} + }, + "setVersion" : 2, + tags : { "dc" : "east", "use" : "production" } + "ok" : 1 + }"""), roundTripTime, 0) + + then: + serverDescription == + ServerDescription.builder() + .ok(true) + .address(serverAddress) + .state(ServerConnectionState.CONNECTED) + .maxWireVersion(3) + .maxDocumentSize(16777216) + .electionId(electionId) + .setVersion(2) + .topologyVersion(new TopologyVersion(topologyVersionProcessId, 42)) + .type(ServerType.REPLICA_SET_PRIMARY) + .setName('replset') + .primary('localhost:27017') + .canonicalAddress('localhost:27017') + .hosts(['localhost:27017', 'localhost:27018', 'localhost:27019'] as Set) + .arbiters(['localhost:27020'] as Set) + .tagSet(new TagSet([new Tag('dc', 'east'), new Tag('use', 'production')])) + .build() + } + + def 'server description should reflect legacy hello result from arbiter'() { + expect: + createServerDescription(serverAddress, + parse("""{ + "setName" : "replset", + "$LEGACY_HELLO_LOWER": false, + "secondary" : false, + "hosts" : [ + "localhost:27019", + "localhost:27018", + "localhost:27017" + ], + "arbiters" : [ + "localhost:27020" + ], + "primary" : "localhost:27017", + "arbiterOnly" : true, + "me" : "localhost:27020", + "maxBsonObjectSize" : 16777216, + "maxMessageSizeBytes" : 48000000, + "maxWriteBatchSize" : 1000, + "localTime" : ISODate("2015-03-04T23:27:55.568Z"), + "maxWireVersion" : 3, + "minWireVersion" : 0, + "ok" : 1 + }"""), roundTripTime, 0) == + ServerDescription.builder() + .ok(true) + .address(serverAddress) + .state(ServerConnectionState.CONNECTED) + .maxWireVersion(3) + .maxDocumentSize(16777216) + .type(ServerType.REPLICA_SET_ARBITER) + .setName('replset') + .primary('localhost:27017') + .canonicalAddress('localhost:27020' ) + .hosts(['localhost:27017', 'localhost:27018', 'localhost:27019'] as Set) + .arbiters(['localhost:27020'] as Set) + .build() + } + + def 'server description should reflect legacy hello result from other'() { + given: + def serverAddressOfHidden = new ServerAddress('localhost', 27020) + + when: + def serverDescription = createServerDescription(serverAddressOfHidden, + parse("""{ + "setName" : "replset", + "$LEGACY_HELLO_LOWER": false, + "secondary" : false, + "hosts" : [ + "localhost:27019", + "localhost:27018", + "localhost:27017" + ], + "arbiters" : [ + "localhost:27021" + ], + "primary" : "localhost:27017", + "arbiterOnly" : false, + "me" : "localhost:27020", + "maxBsonObjectSize" : 16777216, + "maxMessageSizeBytes" : 48000000, + "maxWriteBatchSize" : 1000, + "localTime" : ISODate("2015-03-04T23:27:55.568Z"), + "maxWireVersion" : 3, + "minWireVersion" : 0, + "ok" : 1 + }"""), roundTripTime, 0) + + then: + serverDescription == + ServerDescription.builder() + .ok(true) + .address(serverAddressOfHidden) + .state(ServerConnectionState.CONNECTED) + .maxWireVersion(3) + .maxDocumentSize(16777216) + .type(ServerType.REPLICA_SET_OTHER) + .setName('replset') + .primary('localhost:27017') + .canonicalAddress('localhost:27020') + .hosts(['localhost:27017', 'localhost:27018', 'localhost:27019'] as Set) + .arbiters(['localhost:27021'] as Set) + .build() + } + + def 'server description should reflect legacy hello result from hidden'() { + given: + def serverAddressOfHidden = new ServerAddress('localhost', 27020) + + expect: + createServerDescription(serverAddressOfHidden, + parse("""{ + "setName" : "replset", + "$LEGACY_HELLO_LOWER": false, + "secondary" : true, + "hidden" : true, + "hosts" : [ + "localhost:27019", + "localhost:27018", + "localhost:27017" + ], + "arbiters" : [ + "localhost:27021" + ], + "primary" : "localhost:27017", + "arbiterOnly" : false, + "me" : "localhost:27020", + "maxBsonObjectSize" : 16777216, + "maxMessageSizeBytes" : 48000000, + "maxWriteBatchSize" : 1000, + "localTime" : ISODate("2015-03-04T23:27:55.568Z"), + "maxWireVersion" : 3, + "minWireVersion" : 0, + "ok" : 1 + }"""), roundTripTime, 0) == + ServerDescription.builder() + .ok(true) + .address(serverAddressOfHidden) + .state(ServerConnectionState.CONNECTED) + .maxWireVersion(3) + .maxDocumentSize(16777216) + .type(ServerType.REPLICA_SET_OTHER) + .setName('replset') + .primary('localhost:27017') + .canonicalAddress('localhost:27020') + .hosts(['localhost:27017', 'localhost:27018', 'localhost:27019'] as Set) + .arbiters(['localhost:27021'] as Set) + .build() + } + + + def 'server description should reflect legacy hello result from ghost'() { + expect: + createServerDescription(serverAddress, + parse("""{ + "setName" : "replset", + "$LEGACY_HELLO_LOWER": false, + "secondary" : false, + "arbiterOnly" : false, + "me" : "localhost:27020", + "maxBsonObjectSize" : 16777216, + "maxMessageSizeBytes" : 48000000, + "maxWriteBatchSize" : 1000, + "localTime" : ISODate("2015-03-04T23:27:55.568Z"), + "maxWireVersion" : 3, + "minWireVersion" : 0, + "ok" : 1 + }"""), roundTripTime, 0) == + ServerDescription.builder() + .ok(true) + .address(serverAddress) + .state(ServerConnectionState.CONNECTED) + .canonicalAddress('localhost:27020' ) + .maxWireVersion(3) + .maxDocumentSize(16777216) + .type(ServerType.REPLICA_SET_GHOST) + .setName('replset') + .build() + } + + def 'server description should reflect legacy hello result from shard router'() { + expect: + createServerDescription(serverAddress, + parse("""{ + "$LEGACY_HELLO_LOWER": true, + "msg" : "isdbgrid", + "maxBsonObjectSize" : 16777216, + "maxMessageSizeBytes" : 48000000, + "maxWriteBatchSize" : 1000, + "localTime" : ISODate("2015-03-04T23:55:18.505Z"), + "maxWireVersion" : 3, + "minWireVersion" : 0, + "ok" : 1 + }"""), roundTripTime, 0) == + ServerDescription.builder() + .ok(true) + .address(serverAddress) + .state(ServerConnectionState.CONNECTED) + .maxWireVersion(3) + .maxDocumentSize(16777216) + .type(ServerType.SHARD_ROUTER) + .build() + } +} diff --git a/driver-core/src/test/unit/com/mongodb/internal/connection/DnsMultiServerClusterSpecification.groovy b/driver-core/src/test/unit/com/mongodb/internal/connection/DnsMultiServerClusterSpecification.groovy new file mode 100644 index 00000000000..930e30b2c7b --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/internal/connection/DnsMultiServerClusterSpecification.groovy @@ -0,0 +1,146 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection + +import com.mongodb.ClusterFixture +import com.mongodb.MongoConfigurationException +import com.mongodb.ServerAddress +import com.mongodb.connection.ClusterId +import com.mongodb.connection.ClusterSettings +import com.mongodb.event.ClusterListener +import spock.lang.Specification + +import java.util.concurrent.TimeUnit + +import static com.mongodb.connection.ClusterConnectionMode.MULTIPLE +import static com.mongodb.connection.ClusterType.SHARDED +import static com.mongodb.connection.ServerType.SHARD_ROUTER + +class DnsMultiServerClusterSpecification extends Specification { + + private final ServerAddress firstServer = new ServerAddress('localhost:27017') + private final ServerAddress secondServer = new ServerAddress('localhost:27018') + private final ServerAddress thirdServer = new ServerAddress('localhost:27019') + + private final TestClusterableServerFactory factory = new TestClusterableServerFactory() + + def setup() { + Time.makeTimeConstant() + } + + def cleanup() { + Time.makeTimeMove() + } + + def 'should initialize from DNS SRV monitor'() { + given: + def srvHost = 'test1.test.build.10gen.cc' + def clusterListener = Mock(ClusterListener) + def dnsSrvRecordMonitor = Mock(DnsSrvRecordMonitor) + def exception = new MongoConfigurationException('test') + DnsSrvRecordInitializer initializer + def dnsSrvRecordMonitorFactory = new DnsSrvRecordMonitorFactory() { + @Override + DnsSrvRecordMonitor create(final String hostName, String srvServiceName, final DnsSrvRecordInitializer dnsSrvRecordListener) { + initializer = dnsSrvRecordListener + dnsSrvRecordMonitor + } + } + when: 'the cluster is constructed' + def cluster = new DnsMultiServerCluster(new ClusterId(), + ClusterSettings.builder() + .addClusterListener(clusterListener) + .serverSelectionTimeout(1, TimeUnit.MILLISECONDS) + .srvHost(srvHost) + .mode(MULTIPLE) + .build(), + factory, ClusterFixture.CLIENT_METADATA, dnsSrvRecordMonitorFactory) + + then: 'the monitor is created and started' + initializer != null + 1 * dnsSrvRecordMonitor.start() + + when: 'the current description is accessed before initialization' + def description = cluster.getCurrentDescription() + + then: 'the description is not null' + description != null + + when: 'the listener is initialized with an exception' + initializer.initialize(exception) + description = cluster.getCurrentDescription() + + then: 'the description includes the exception' + description.getServerDescriptions() == [] + description.getSrvResolutionException() == exception + + when: 'the listener is initialized with servers' + initializer.initialize([firstServer, secondServer] as Set) + + then: 'an event is generated' + 1 * clusterListener.clusterDescriptionChanged(_) + + when: 'the servers notify' + factory.sendNotification(firstServer, SHARD_ROUTER) + factory.sendNotification(secondServer, SHARD_ROUTER) + def firstTestServer = factory.getServer(firstServer) + def secondTestServer = factory.getServer(secondServer) + def clusterDescription = cluster.getCurrentDescription() + + then: 'events are generated, description includes hosts, exception is cleared, and servers are open' + 2 * clusterListener.clusterDescriptionChanged(_) + clusterDescription.getType() == SHARDED + ClusterDescriptionHelper.getAll(clusterDescription) == factory.getDescriptions(firstServer, secondServer) + clusterDescription.getSrvResolutionException() == null + !firstTestServer.isClosed() + !secondTestServer.isClosed() + + when: 'the listener is initialized with a different server' + initializer.initialize([secondServer, thirdServer]) + factory.sendNotification(secondServer, SHARD_ROUTER) + def thirdTestServer = factory.getServer(thirdServer) + clusterDescription = cluster.getCurrentDescription() + + then: 'events are generated, description is updated, and the removed server is closed' + 1 * clusterListener.clusterDescriptionChanged(_) + clusterDescription.getType() == SHARDED + ClusterDescriptionHelper.getAll(clusterDescription) == factory.getDescriptions(secondServer, thirdServer) + clusterDescription.getSrvResolutionException() == null + firstTestServer.isClosed() + !secondTestServer.isClosed() + !thirdTestServer.isClosed() + + when: 'the listener is initialized with another exception' + initializer.initialize(exception) + clusterDescription = cluster.getCurrentDescription() + + then: 'the exception is ignored' + 0 * clusterListener.clusterDescriptionChanged(_) + clusterDescription.getType() == SHARDED + ClusterDescriptionHelper.getAll(clusterDescription) == factory.getDescriptions(secondServer, thirdServer) + clusterDescription.getSrvResolutionException() == null + firstTestServer.isClosed() + !secondTestServer.isClosed() + !thirdTestServer.isClosed() + + when: 'the cluster is closed' + cluster.close() + + then: 'the monitor is closed' + 1 * dnsSrvRecordMonitor.close() + } +} diff --git a/driver-core/src/test/unit/com/mongodb/internal/connection/EventHelperTest.java b/driver-core/src/test/unit/com/mongodb/internal/connection/EventHelperTest.java new file mode 100644 index 00000000000..46dc2e4efa5 --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/internal/connection/EventHelperTest.java @@ -0,0 +1,171 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection; + +import com.mongodb.MongoClientException; +import com.mongodb.MongoException; +import com.mongodb.ServerAddress; +import com.mongodb.Tag; +import com.mongodb.TagSet; +import com.mongodb.connection.ClusterDescription; +import com.mongodb.connection.ServerDescription; +import com.mongodb.connection.ServerType; +import com.mongodb.connection.TopologyVersion; +import org.bson.types.ObjectId; +import org.junit.Test; + +import java.io.IOException; +import java.util.Date; +import java.util.HashSet; + +import static com.mongodb.connection.ClusterConnectionMode.MULTIPLE; +import static com.mongodb.connection.ClusterConnectionMode.SINGLE; +import static com.mongodb.connection.ClusterType.REPLICA_SET; +import static com.mongodb.connection.ClusterType.STANDALONE; +import static com.mongodb.connection.ServerConnectionState.CONNECTED; +import static com.mongodb.connection.ServerConnectionState.CONNECTING; +import static com.mongodb.connection.ServerDescription.builder; +import static com.mongodb.internal.connection.EventHelper.wouldDescriptionsGenerateEquivalentEvents; +import static java.util.Arrays.asList; +import static java.util.Collections.emptyList; +import static java.util.Collections.singletonList; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + +public class EventHelperTest { + + @Test + public void testServerDescriptionEventEquivalence() { + ServerDescription serverDescription = createBuilder().build(); + assertTrue(wouldDescriptionsGenerateEquivalentEvents(serverDescription, serverDescription)); + assertTrue(wouldDescriptionsGenerateEquivalentEvents((ServerDescription) null, null)); + assertFalse(wouldDescriptionsGenerateEquivalentEvents(serverDescription, null)); + assertFalse(wouldDescriptionsGenerateEquivalentEvents(null, serverDescription)); + + assertTrue(wouldDescriptionsGenerateEquivalentEvents(createBuilder().build(), createBuilder().build())); + + assertFalse(wouldDescriptionsGenerateEquivalentEvents(serverDescription, createBuilder().ok(false).build())); + assertFalse(wouldDescriptionsGenerateEquivalentEvents(serverDescription, createBuilder().state(CONNECTING).build())); + assertFalse(wouldDescriptionsGenerateEquivalentEvents(serverDescription, createBuilder().type(ServerType.STANDALONE).build())); + assertFalse(wouldDescriptionsGenerateEquivalentEvents(serverDescription, createBuilder().minWireVersion(2).build())); + assertFalse(wouldDescriptionsGenerateEquivalentEvents(serverDescription, createBuilder().maxWireVersion(3).build())); + assertFalse(wouldDescriptionsGenerateEquivalentEvents(serverDescription, createBuilder().canonicalAddress("host:27017").build())); + assertFalse(wouldDescriptionsGenerateEquivalentEvents(serverDescription, createBuilder() + .hosts(new HashSet<>(asList("localhost:27017", "localhost:27018", "localhost:27019"))).build())); + assertFalse(wouldDescriptionsGenerateEquivalentEvents(serverDescription, createBuilder() + .passives(new HashSet<>(singletonList("localhost:27018"))).build())); + assertFalse(wouldDescriptionsGenerateEquivalentEvents(serverDescription, createBuilder() + .arbiters(new HashSet<>(singletonList("localhost:27018"))).build())); + assertFalse(wouldDescriptionsGenerateEquivalentEvents(serverDescription, createBuilder().tagSet(new TagSet()).build())); + assertFalse(wouldDescriptionsGenerateEquivalentEvents(serverDescription, createBuilder().setName("test2").build())); + assertFalse(wouldDescriptionsGenerateEquivalentEvents(serverDescription, createBuilder().setVersion(3).build())); + assertFalse(wouldDescriptionsGenerateEquivalentEvents(serverDescription, createBuilder().electionId(new ObjectId()).build())); + assertFalse(wouldDescriptionsGenerateEquivalentEvents(serverDescription, createBuilder().primary("localhost:27018").build())); + assertFalse(wouldDescriptionsGenerateEquivalentEvents(serverDescription, createBuilder().logicalSessionTimeoutMinutes(26).build())); + assertFalse(wouldDescriptionsGenerateEquivalentEvents(serverDescription, createBuilder().topologyVersion( + new TopologyVersion(new ObjectId("5e47699e32e4571020a96f07"), 43)).build())); + + assertTrue(wouldDescriptionsGenerateEquivalentEvents(createBuilder().exception(new MongoException("msg1")).build(), + createBuilder().exception(new MongoException("msg1")).build())); + assertFalse(wouldDescriptionsGenerateEquivalentEvents(serverDescription, + createBuilder().exception(new MongoException("msg1")).build())); + assertFalse(wouldDescriptionsGenerateEquivalentEvents(createBuilder().exception(new MongoException("msg1")).build(), + createBuilder().exception(new MongoException("msg2")).build())); + assertFalse(wouldDescriptionsGenerateEquivalentEvents(createBuilder().exception(new MongoException("msg1")).build(), + createBuilder().exception(new IOException("msg1")).build())); + + assertTrue(wouldDescriptionsGenerateEquivalentEvents(serverDescription, createBuilder().lastWriteDate(new Date(100)).build())); + } + + @Test + public void testClusterDescriptionEquivalence() { + assertTrue(wouldDescriptionsGenerateEquivalentEvents( + new ClusterDescription(SINGLE, STANDALONE, singletonList(createBuilder().build())), + new ClusterDescription(SINGLE, STANDALONE, singletonList(createBuilder().build())))); + assertTrue(wouldDescriptionsGenerateEquivalentEvents(new ClusterDescription(MULTIPLE, REPLICA_SET, + asList(createBuilder("localhost:27017").build(), + createBuilder("localhost:27018").build())), + new ClusterDescription(MULTIPLE, REPLICA_SET, + asList(createBuilder("localhost:27017").build(), + createBuilder("localhost:27018").build())))); + assertTrue(wouldDescriptionsGenerateEquivalentEvents(new ClusterDescription(MULTIPLE, REPLICA_SET, + asList(createBuilder("localhost:27017").build(), + createBuilder("localhost:27018").build())), + new ClusterDescription(MULTIPLE, REPLICA_SET, + asList(createBuilder("localhost:27018").build(), + createBuilder("localhost:27017").build())))); + + assertFalse(wouldDescriptionsGenerateEquivalentEvents( + new ClusterDescription(SINGLE, STANDALONE, singletonList(createBuilder().build())), + new ClusterDescription(SINGLE, STANDALONE, singletonList(createBuilder().maxWireVersion(4).build())))); + assertFalse(wouldDescriptionsGenerateEquivalentEvents(new ClusterDescription(MULTIPLE, REPLICA_SET, + asList(createBuilder("localhost:27017").build(), + createBuilder("localhost:27018").build())), + new ClusterDescription(MULTIPLE, REPLICA_SET, + singletonList(createBuilder("localhost:27017").build())))); + assertFalse(wouldDescriptionsGenerateEquivalentEvents(new ClusterDescription(MULTIPLE, REPLICA_SET, + asList(createBuilder("localhost:27017").build(), + createBuilder("localhost:27018").build())), + new ClusterDescription(MULTIPLE, REPLICA_SET, + asList(createBuilder("localhost:27017").build(), + createBuilder("localhost:27018").maxWireVersion(4).build())))); + assertFalse(wouldDescriptionsGenerateEquivalentEvents(new ClusterDescription(MULTIPLE, REPLICA_SET, + asList(createBuilder("localhost:27017").build(), + createBuilder("localhost:27018").build())), + new ClusterDescription(MULTIPLE, REPLICA_SET, + asList(createBuilder("localhost:27018").build(), + createBuilder("localhost:27017").maxWireVersion(4).build())))); + + assertTrue(wouldDescriptionsGenerateEquivalentEvents( + new ClusterDescription(SINGLE, STANDALONE, new MongoException("msg1"), emptyList(), null, null), + new ClusterDescription(SINGLE, STANDALONE, new MongoException("msg1"), emptyList(), null, null))); + assertFalse(wouldDescriptionsGenerateEquivalentEvents( + new ClusterDescription(SINGLE, STANDALONE, new MongoException("msg1"), emptyList(), null, null), + new ClusterDescription(SINGLE, STANDALONE, null, emptyList(), null, null))); + assertFalse(wouldDescriptionsGenerateEquivalentEvents( + new ClusterDescription(SINGLE, STANDALONE, new MongoException("msg1"), emptyList(), null, null), + new ClusterDescription(SINGLE, STANDALONE, new MongoException("msg2"), emptyList(), null, null))); + assertFalse(wouldDescriptionsGenerateEquivalentEvents( + new ClusterDescription(SINGLE, STANDALONE, new MongoException("msg1"), emptyList(), null, null), + new ClusterDescription(SINGLE, STANDALONE, new MongoClientException("msg1"), emptyList(), null, null))); + } + + private ServerDescription.Builder createBuilder() { + return createBuilder("localhost:27017"); + } + + private ServerDescription.Builder createBuilder(final String address) { + return builder().address(new ServerAddress(address)) + .ok(true) + .state(CONNECTED) + .type(ServerType.REPLICA_SET_PRIMARY) + .minWireVersion(1) + .maxWireVersion(2) + .canonicalAddress(address) + .hosts(new HashSet<>(asList("localhost:27017", "localhost:27018"))) + .passives(new HashSet<>(singletonList("localhost:27019"))) + .arbiters(new HashSet<>(singletonList("localhost:27020"))) + .tagSet(new TagSet(singletonList(new Tag("dc", "ny")))) + .setName("test") + .setVersion(2) + .electionId(new ObjectId("abcdabcdabcdabcdabcdabcd")) + .primary("localhost:27017") + .logicalSessionTimeoutMinutes(25) + .topologyVersion(new TopologyVersion(new ObjectId("5e47699e32e4571020a96f07"), 42)) + .lastWriteDate(new Date(99)); + } +} diff --git a/driver-core/src/test/unit/com/mongodb/internal/connection/ExponentiallyWeightedMovingAverageTest.java b/driver-core/src/test/unit/com/mongodb/internal/connection/ExponentiallyWeightedMovingAverageTest.java new file mode 100644 index 00000000000..59da49bfbe5 --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/internal/connection/ExponentiallyWeightedMovingAverageTest.java @@ -0,0 +1,72 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.internal.connection; + +import org.junit.jupiter.api.DisplayName; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.Arguments; +import org.junit.jupiter.params.provider.MethodSource; +import org.junit.jupiter.params.provider.ValueSource; + +import java.util.List; +import java.util.stream.Stream; + +import static java.util.Arrays.asList; +import static java.util.Collections.emptyList; +import static java.util.Collections.singletonList; +import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; + + +public class ExponentiallyWeightedMovingAverageTest { + + @ParameterizedTest(name = "{index}: {0}") + @ValueSource(doubles = {-0.001, -0.01, -0.1, -1, 1.001, 1.01, 1.1}) + @DisplayName("constructor should throw if alpha is not between 0.0 and 1.0") + void testInvalidAlpha(final double alpha) { + assertThrows(IllegalArgumentException.class, () -> new ExponentiallyWeightedMovingAverage(alpha)); + } + + @ParameterizedTest(name = "{index}: {0}") + @ValueSource(doubles = {-0.0, 0.01, 0.1, 0.001, 0.01, 0.1, 0.2, 1.0}) + @DisplayName("constructor should not throw if alpha is between 0.0 and 1.0") + void testValidAlpha(final double alpha) { + assertDoesNotThrow(() -> new ExponentiallyWeightedMovingAverage(alpha)); + } + + + @ParameterizedTest(name = "{index}: samples: {1}. Expected: {2}") + @DisplayName("the average should be exponentially weighted") + @MethodSource + public void testAverageIsExponentiallyWeighted(final double alpha, final List samples, final int expectedAverageRTT) { + ExponentiallyWeightedMovingAverage average = new ExponentiallyWeightedMovingAverage(alpha); + samples.forEach(average::addSample); + + assertEquals(expectedAverageRTT, average.getAverage()); + } + + private static Stream testAverageIsExponentiallyWeighted() { + return Stream.of( + Arguments.of(0.2, emptyList(), 0), + Arguments.of(0.2, singletonList(10), 10), + Arguments.of(0.2, asList(10, 20), 12), + Arguments.of(0.2, asList(10, 20, 12), 12), + Arguments.of(0.2, asList(10, 20, 12, 17), 13) + ); + } + +} diff --git a/driver-core/src/test/unit/com/mongodb/internal/connection/IdHoldingBsonWriterSpecification.groovy b/driver-core/src/test/unit/com/mongodb/internal/connection/IdHoldingBsonWriterSpecification.groovy new file mode 100644 index 00000000000..f603576ecfb --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/internal/connection/IdHoldingBsonWriterSpecification.groovy @@ -0,0 +1,139 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection + +import org.bson.BsonArray +import org.bson.BsonBinaryReader +import org.bson.BsonBinaryWriter +import org.bson.BsonDocument +import org.bson.BsonObjectId +import org.bson.codecs.BsonDocumentCodec +import org.bson.codecs.DecoderContext +import org.bson.codecs.EncoderContext +import org.bson.io.BasicOutputBuffer +import org.bson.io.BsonOutput +import spock.lang.Specification + +import static org.bson.BsonHelper.documentWithValuesOfEveryType +import static org.bson.BsonHelper.getBsonValues + +class IdHoldingBsonWriterSpecification extends Specification { + private static final OBJECT_ID = new BsonObjectId() + + def 'should write all types'() { + given: + def bsonBinaryWriter = new BsonBinaryWriter(new BasicOutputBuffer()) + def idTrackingBsonWriter = new IdHoldingBsonWriter(bsonBinaryWriter, fallbackId) + def document = documentWithValuesOfEveryType() + + when: + new BsonDocumentCodec().encode(idTrackingBsonWriter, document, EncoderContext.builder().build()) + def encodedDocument = getEncodedDocument(bsonBinaryWriter.getBsonOutput()) + + then: + !document.containsKey('_id') + encodedDocument.containsKey('_id') + idTrackingBsonWriter.getId() == encodedDocument.get('_id') + if (expectedIdNullIfMustBeGenerated != null) { + idTrackingBsonWriter.getId() == expectedIdNullIfMustBeGenerated + } + + when: + encodedDocument.remove('_id') + + then: + encodedDocument == document + + where: + fallbackId << [null, OBJECT_ID] + expectedIdNullIfMustBeGenerated << [null, OBJECT_ID] + } + + def 'should support all types for _id value'() { + given: + def bsonBinaryWriter = new BsonBinaryWriter(new BasicOutputBuffer()) + def idTrackingBsonWriter = new IdHoldingBsonWriter(bsonBinaryWriter, fallbackId) + def document = new BsonDocument() + document.put('_id', id) + + when: + new BsonDocumentCodec().encode(idTrackingBsonWriter, document, EncoderContext.builder().build()) + def encodedDocument = getEncodedDocument(bsonBinaryWriter.getBsonOutput()) + + then: + encodedDocument == document + idTrackingBsonWriter.getId() == id + + where: + [id, fallbackId] << [ + getBsonValues(), + [null, new BsonObjectId()] + ].combinations() + } + + def 'serialize document with list of documents that contain an _id field'() { + def bsonBinaryWriter = new BsonBinaryWriter(new BasicOutputBuffer()) + def idTrackingBsonWriter = new IdHoldingBsonWriter(bsonBinaryWriter, fallbackId) + def document = new BsonDocument('_id', new BsonObjectId()) + .append('items', new BsonArray(Collections.singletonList(new BsonDocument('_id', new BsonObjectId())))) + + when: + new BsonDocumentCodec().encode(idTrackingBsonWriter, document, EncoderContext.builder().build()) + def encodedDocument = getEncodedDocument(bsonBinaryWriter.getBsonOutput()) + + then: + encodedDocument == document + + where: + fallbackId << [null, new BsonObjectId()] + } + + def 'serialize _id documents containing arrays'() { + def bsonBinaryWriter = new BsonBinaryWriter(new BasicOutputBuffer()) + def idTrackingBsonWriter = new IdHoldingBsonWriter(bsonBinaryWriter, fallbackId) + BsonDocument document = BsonDocument.parse(json) + + when: + new BsonDocumentCodec().encode(idTrackingBsonWriter, document, EncoderContext.builder() + .isEncodingCollectibleDocument(true).build()) + def encodedDocument = getEncodedDocument(bsonBinaryWriter.getBsonOutput()) + + then: + encodedDocument == document + + where: + [json, fallbackId] << [ + ['{"_id": {"a": []}, "b": 123}', + '{"_id": {"a": [1, 2]}, "b": 123}', + '{"_id": {"a": [[[[1]]]]}, "b": 123}', + '{"_id": {"a": [{"a": [1, 2]}]}, "b": 123}', + '{"_id": {"a": {"a": [1, 2]}}, "b": 123}', + '{"_id": {"a": [1, 2], "b": [123]}}', + '{"_id": [], "b": 123}', + '{"_id": [1, 2], "b": 123}', + '{"_id": [[1], [[2]]], "b": 123}', + '{"_id": [{"a": 1}], "b": 123}', + '{"_id": [{"a": [{"b": 123}]}]}'], + [null, new BsonObjectId()] + ].combinations() + } + + private static BsonDocument getEncodedDocument(BsonOutput buffer) { + new BsonDocumentCodec().decode(new BsonBinaryReader(buffer.getByteBuffers().get(0).asNIO()), + DecoderContext.builder().build()) + } +} diff --git a/driver-core/src/test/unit/com/mongodb/internal/connection/IndexMapSpecification.groovy b/driver-core/src/test/unit/com/mongodb/internal/connection/IndexMapSpecification.groovy new file mode 100644 index 00000000000..60bf1cb7f9a --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/internal/connection/IndexMapSpecification.groovy @@ -0,0 +1,106 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection + +import com.mongodb.MongoInternalException +import spock.lang.Specification + +class IndexMapSpecification extends Specification { + + def 'should map contiguous indexes'() { + given: + def indexMap = IndexMap.create() + + when: + indexMap = indexMap.add(0, 1) + indexMap = indexMap.add(1, 2) + + then: + 1 == indexMap.map(0) + 2 == indexMap.map(1) + } + + def 'should map non-contiguous indexes'() { + given: + def indexMap = IndexMap.create() + + when: + indexMap = indexMap.add(0, 1) + indexMap = indexMap.add(1, 2) + indexMap = indexMap.add(2, 5) + + then: + 1 == indexMap.map(0) + 2 == indexMap.map(1) + 5 == indexMap.map(2) + } + + def 'should throw on unmapped index'() { + when: + indexMap.map(-1) + + then: + thrown(MongoInternalException) + + when: + indexMap.map(4) + + then: + thrown(MongoInternalException) + + where: + indexMap << [IndexMap.create().add(0, 1), IndexMap.create(1000, 3).add(5, 1005)] + } + + def 'should map indexes when count is provided up front'() { + when: + def indexMap = IndexMap.create(1, 2) + + then: + 1 == indexMap.map(0) + 2 == indexMap.map(1) + } + + def 'should include ranges when converting from range based to hash based indexMap'() { + given: + def indexMap = IndexMap.create(1000, 3) + + when: 'converts from range based with a high startIndex to hash based' + indexMap = indexMap.add(5, 1005) + + then: + 1000 == indexMap.map(0) + 1001 == indexMap.map(1) + 1002 == indexMap.map(2) + 1005 == indexMap.map(5) + } + + def 'should not allow a negative startIndex or count'() { + when: + IndexMap.create(-1, 10) + + then: + thrown(IllegalArgumentException) + + when: + IndexMap.create(1, -10) + + then: + thrown(IllegalArgumentException) + } + +} diff --git a/driver-core/src/test/unit/com/mongodb/internal/connection/InitialDnsSeedListDiscoveryProseTest.java b/driver-core/src/test/unit/com/mongodb/internal/connection/InitialDnsSeedListDiscoveryProseTest.java new file mode 100644 index 00000000000..d49f67a1e38 --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/internal/connection/InitialDnsSeedListDiscoveryProseTest.java @@ -0,0 +1,134 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection; + +import com.mongodb.ClusterFixture; +import com.mongodb.MongoException; +import com.mongodb.ServerAddress; +import com.mongodb.connection.ClusterConnectionMode; +import com.mongodb.connection.ClusterId; +import com.mongodb.connection.ClusterSettings; +import com.mongodb.connection.ClusterType; +import com.mongodb.connection.ServerSettings; +import com.mongodb.internal.dns.DefaultDnsResolver; +import com.mongodb.internal.dns.DnsResolver; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.DisplayName; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.CsvSource; + +import static com.mongodb.ClusterFixture.CLIENT_METADATA; +import static java.util.Collections.singletonList; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +/** + * See https://github.com/mongodb/specifications/blob/master/source/initial-dns-seedlist-discovery/tests/README.md + */ +class InitialDnsSeedListDiscoveryProseTest { + private static final String SRV_SERVICE_NAME = "mongodb"; + + private DnsMultiServerCluster cluster; + + @AfterEach + void tearDown() { + if (cluster != null) { + cluster.close(); + } + } + + @ParameterizedTest(name = "mongodb+srv://{0} => {1}") + @CsvSource({ + "localhost, test.mongo.localhost", + "mongo.local, test.driver.mongo.local" + }) + @DisplayName("1. Allow SRVs with fewer than 3 '.' separated parts") + void testAllowSRVsWithFewerThanThreeParts(final String srvHost, final String resolvedHost) { + doTest(srvHost, resolvedHost, false); + } + + @ParameterizedTest(name = "mongodb+srv://{0} => {1}") + @CsvSource({ + "localhost, localhost.mongodb", + "mongo.local, test_1.evil.local", + "blogs.mongodb.com, blogs.evil.com" + }) + @DisplayName("2. Throw when return address does not end with SRV domain") + void testThrowWhenReturnAddressDoesnotEndWithSRVDomain(final String srvHost, final String resolvedHost) { + doTest(srvHost, resolvedHost, true); + } + + @ParameterizedTest(name = "mongodb+srv://{0} => {1}") + @CsvSource({ + "localhost, localhost", + "mongo.local, mongo.local" + }) + @DisplayName("3. Throw when return address is identical to SRV hostname and the SRV hostname has fewer than three `.` separated parts") + void testThrowWhenReturnAddressIsIdenticalToSRVHostname(final String srvHost, final String resolvedHost) { + doTest(srvHost, resolvedHost, true); + } + + @ParameterizedTest(name = "mongodb+srv://{0} => {1}") + @CsvSource({ + "localhost, test_1.cluster_1localhost", + "mongo.local, test_1.my_hostmongo.local", + "blogs.mongodb.com, cluster.testmongodb.com" + }) + @DisplayName("4. Throw when return address does not contain '.' separating shared part of domain") + void testThrowWhenReturnAddressDoesnotContainSharedPartOfDomain(final String srvHost, final String resolvedHost) { + doTest(srvHost, resolvedHost, true); + } + + private void doTest(final String srvHost, final String resolvedHost, final boolean throwException) { + final ClusterId clusterId = new ClusterId(); + + final DnsResolver dnsResolver = new DefaultDnsResolver((name, type) -> singletonList(String.format("10 5 27017 %s", + resolvedHost))); + + final DnsSrvRecordMonitorFactory dnsSrvRecordMonitorFactory = mock(DnsSrvRecordMonitorFactory.class); + when(dnsSrvRecordMonitorFactory.create(eq(srvHost), eq(SRV_SERVICE_NAME), any(DnsSrvRecordInitializer.class))).thenAnswer( + invocation -> new DefaultDnsSrvRecordMonitor(srvHost, SRV_SERVICE_NAME, 10, 10, + invocation.getArgument(2), clusterId, dnsResolver)); + + final ClusterSettings.Builder settingsBuilder = ClusterSettings.builder() + .mode(ClusterConnectionMode.MULTIPLE) + .requiredClusterType(ClusterType.SHARDED) + .srvHost(srvHost); + + final ClusterableServerFactory serverFactory = mock(ClusterableServerFactory.class); + when(serverFactory.getSettings()).thenReturn(ServerSettings.builder().build()); + when(serverFactory.create(any(Cluster.class), any(ServerAddress.class))).thenReturn(mock(ClusterableServer.class)); + + cluster = new DnsMultiServerCluster(clusterId, settingsBuilder.build(), + serverFactory, + CLIENT_METADATA, + dnsSrvRecordMonitorFactory); + + ClusterFixture.sleep(100); + + final MongoException mongoException = cluster.getSrvResolutionException(); + if (throwException) { + Assertions.assertNotNull(mongoException); + } else { + Assertions.assertNull(mongoException); + } + } +} + diff --git a/driver-core/src/test/unit/com/mongodb/internal/connection/InternalStreamConnectionInitializerSpecification.groovy b/driver-core/src/test/unit/com/mongodb/internal/connection/InternalStreamConnectionInitializerSpecification.groovy new file mode 100644 index 00000000000..1d44f8dde46 --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/internal/connection/InternalStreamConnectionInitializerSpecification.groovy @@ -0,0 +1,516 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection + +import com.mongodb.AuthenticationMechanism +import com.mongodb.MongoCompressor +import com.mongodb.ServerAddress +import com.mongodb.async.FutureResultCallback +import com.mongodb.connection.ClusterId +import com.mongodb.connection.ConnectionDescription +import com.mongodb.connection.ConnectionId +import com.mongodb.connection.ServerConnectionState +import com.mongodb.connection.ServerDescription +import com.mongodb.connection.ServerId +import com.mongodb.connection.ServerType +import com.mongodb.internal.TimeoutSettings +import org.bson.BsonArray +import org.bson.BsonBoolean +import org.bson.BsonDocument +import org.bson.BsonInt32 +import org.bson.BsonString +import spock.lang.Specification + +import java.nio.charset.Charset +import java.util.concurrent.TimeUnit + +import static com.mongodb.MongoCredential.createCredential +import static com.mongodb.MongoCredential.createMongoX509Credential +import static com.mongodb.MongoCredential.createPlainCredential +import static com.mongodb.MongoCredential.createScramSha1Credential +import static com.mongodb.MongoCredential.createScramSha256Credential +import static com.mongodb.connection.ClusterConnectionMode.SINGLE +import static com.mongodb.internal.connection.MessageHelper.LEGACY_HELLO +import static com.mongodb.internal.connection.MessageHelper.buildSuccessfulReply +import static com.mongodb.internal.connection.MessageHelper.decodeCommand +import static com.mongodb.internal.connection.OperationContext.simpleOperationContext + +class InternalStreamConnectionInitializerSpecification extends Specification { + + def serverId = new ServerId(new ClusterId(), new ServerAddress()) + def internalConnection = new TestInternalConnection(serverId, ServerType.STANDALONE) + def operationContext = simpleOperationContext(TimeoutSettings.DEFAULT, null) + + def 'should create correct description'() { + given: + def initializer = new InternalStreamConnectionInitializer(SINGLE, null, null, [], null) + + when: + enqueueSuccessfulReplies(false, 123) + def description = initializer.startHandshake(internalConnection, operationContext) + description = initializer.finishHandshake(internalConnection, description, operationContext) + def connectionDescription = description.connectionDescription + def serverDescription = description.serverDescription + + then: + connectionDescription == getExpectedConnectionDescription(connectionDescription.connectionId.localValue, 123) + serverDescription == getExpectedServerDescription(serverDescription) + } + + def 'should create correct description asynchronously'() { + given: + def initializer = new InternalStreamConnectionInitializer(SINGLE, null, null, [], null) + + when: + enqueueSuccessfulReplies(false, 123) + def futureCallback = new FutureResultCallback() + initializer.startHandshakeAsync(internalConnection, operationContext, futureCallback) + def description = futureCallback.get() + futureCallback = new FutureResultCallback() + initializer.finishHandshakeAsync(internalConnection, description, operationContext, futureCallback) + description = futureCallback.get() + def connectionDescription = description.connectionDescription + def serverDescription = description.serverDescription + + then: + connectionDescription == getExpectedConnectionDescription(connectionDescription.connectionId.localValue, 123) + serverDescription == getExpectedServerDescription(serverDescription) + } + + def 'should create correct description with server connection id'() { + given: + def initializer = new InternalStreamConnectionInitializer(SINGLE, null, null, [], null) + + when: + enqueueSuccessfulReplies(false, 123) + def internalDescription = initializer.startHandshake(internalConnection, operationContext) + def connectionDescription = initializer.finishHandshake(internalConnection, internalDescription, operationContext) + .connectionDescription + + then: + connectionDescription == getExpectedConnectionDescription(connectionDescription.connectionId.localValue, 123) + } + + def 'should create correct description with server connection id asynchronously'() { + given: + def initializer = new InternalStreamConnectionInitializer(SINGLE, null, null, [], null) + + when: + enqueueSuccessfulReplies(false, 123) + def futureCallback = new FutureResultCallback() + initializer.startHandshakeAsync(internalConnection, operationContext, futureCallback) + def description = futureCallback.get() + futureCallback = new FutureResultCallback() + initializer.finishHandshakeAsync(internalConnection, description, operationContext, futureCallback) + def connectionDescription = futureCallback.get().connectionDescription + + then: + connectionDescription == getExpectedConnectionDescription(connectionDescription.connectionId.localValue, 123) + } + + def 'should authenticate'() { + given: + def firstAuthenticator = Mock(Authenticator) + def initializer = new InternalStreamConnectionInitializer(SINGLE, firstAuthenticator, null, [], null) + + when: + enqueueSuccessfulReplies(false, 123) + + def internalDescription = initializer.startHandshake(internalConnection, operationContext) + def connectionDescription = initializer.finishHandshake(internalConnection, internalDescription, operationContext) + .connectionDescription + + then: + connectionDescription + 1 * firstAuthenticator.authenticate(internalConnection, _, _) + } + + def 'should authenticate asynchronously'() { + given: + def authenticator = Mock(Authenticator) + def initializer = new InternalStreamConnectionInitializer(SINGLE, authenticator, null, [], null) + + when: + enqueueSuccessfulReplies(false, 123) + + def futureCallback = new FutureResultCallback() + initializer.startHandshakeAsync(internalConnection, operationContext, futureCallback) + def description = futureCallback.get() + futureCallback = new FutureResultCallback() + initializer.finishHandshakeAsync(internalConnection, description, operationContext, futureCallback) + def connectionDescription = futureCallback.get().connectionDescription + + then: + connectionDescription + 1 * authenticator.authenticateAsync(internalConnection, _, _, _) >> { it[3].onResult(null, null) } + } + + def 'should not authenticate if server is an arbiter'() { + given: + def authenticator = Mock(Authenticator) + def initializer = new InternalStreamConnectionInitializer(SINGLE, authenticator, null, [], null) + + when: + enqueueSuccessfulReplies(true, 123) + + def internalDescription = initializer.startHandshake(internalConnection, operationContext) + def connectionDescription = initializer.finishHandshake(internalConnection, internalDescription, operationContext) + .connectionDescription + + then: + connectionDescription + 0 * authenticator.authenticate(internalConnection, _, _) + } + + def 'should not authenticate asynchronously if server is an arbiter asynchronously'() { + given: + def authenticator = Mock(Authenticator) + def initializer = new InternalStreamConnectionInitializer(SINGLE, authenticator, null, [], null) + + when: + enqueueSuccessfulReplies(true, 123) + + def futureCallback = new FutureResultCallback() + initializer.startHandshakeAsync(internalConnection, operationContext, futureCallback) + def description = futureCallback.get() + futureCallback = new FutureResultCallback() + initializer.finishHandshakeAsync(internalConnection, description, operationContext, futureCallback) + def connectionDescription = futureCallback.get().connectionDescription + + then: + connectionDescription + 0 * authenticator.authenticateAsync(internalConnection, _, _) + } + + def 'should add client metadata document to hello command'() { + given: + def initializer = new InternalStreamConnectionInitializer(SINGLE, null, clientMetadataDocument, [], null) + def expectedHelloCommandDocument = new BsonDocument(LEGACY_HELLO, new BsonInt32(1)) + .append('helloOk', BsonBoolean.TRUE) + .append('\$db', new BsonString('admin')) + if (clientMetadataDocument != null) { + expectedHelloCommandDocument.append('client', clientMetadataDocument) + } + + when: + enqueueSuccessfulReplies(false, 123) + if (async) { + def callback = new FutureResultCallback() + initializer.startHandshakeAsync(internalConnection, operationContext, callback) + def description = callback.get() + callback = new FutureResultCallback() + initializer.finishHandshakeAsync(internalConnection, description, operationContext, callback) + callback.get() + } else { + def internalDescription = initializer.startHandshake(internalConnection, operationContext) + initializer.finishHandshake(internalConnection, internalDescription, operationContext) + } + + then: + decodeCommand(internalConnection.getSent()[0]) == expectedHelloCommandDocument + + where: + [clientMetadataDocument, async] << [[ClientMetadataTest.createExpectedClientMetadataDocument('appName'), null], + [true, false]].combinations() + } + + def 'should add compression to hello command'() { + given: + def initializer = new InternalStreamConnectionInitializer(SINGLE, null, null, compressors, null) + def expectedHelloCommandDocument = new BsonDocument(LEGACY_HELLO, new BsonInt32(1)) + .append('helloOk', BsonBoolean.TRUE) + .append('\$db', new BsonString('admin')) + + def compressionArray = new BsonArray() + for (def compressor : compressors) { + compressionArray.add(new BsonString(compressor.getName())) + } + if (!compressionArray.isEmpty()) { + expectedHelloCommandDocument.append('compression', compressionArray) + } + + when: + enqueueSuccessfulReplies(false, 123) + if (async) { + def callback = new FutureResultCallback() + initializer.startHandshakeAsync(internalConnection, operationContext, callback) + def description = callback.get() + callback = new FutureResultCallback() + initializer.finishHandshakeAsync(internalConnection, description, operationContext, callback) + callback.get() + } else { + def internalDescription = initializer.startHandshake(internalConnection, operationContext) + initializer.finishHandshake(internalConnection, internalDescription, operationContext) + } + + then: + decodeCommand(internalConnection.getSent()[0]) == expectedHelloCommandDocument + + where: + [compressors, async] << [[[], [MongoCompressor.createZlibCompressor()]], + [true, false]].combinations() + } + + def 'should speculatively authenticate with default authenticator'() { + given: + def credential = new MongoCredentialWithCache(createCredential('user', 'database', 'pencil' as char[])) + def authenticator = Spy(DefaultAuthenticator, constructorArgs: [credential, SINGLE, null]) + def scramShaAuthenticator = Spy(ScramShaAuthenticator, + constructorArgs: [credential.withMechanism(AuthenticationMechanism.SCRAM_SHA_256), + { 'rOprNGfwEbeRWgbNEkqO' }, { 'pencil' }, SINGLE, null]) + def initializer = new InternalStreamConnectionInitializer(SINGLE, authenticator, null, [], null) + authenticator.getAuthenticatorForHello() >> scramShaAuthenticator + def serverResponse = 'r=rOprNGfwEbeRWgbNEkqO%hvYDpWUa2RaTCAfuxFIlj)hNlF$k0,s=W22ZaJ0SNY7soEsUEjb6gQ==,i=4096' + def speculativeAuthenticateResponse = + BsonDocument.parse("{ conversationId: 1, payload: BinData(0, '${encode64(serverResponse)}'), done: false }") + def firstClientChallenge = 'n,,n=user,r=rOprNGfwEbeRWgbNEkqO' + + when: + enqueueSpeculativeAuthenticationResponsesForScramSha256() + def description = initializeConnection(async, initializer, internalConnection) + + then: + description + if (async) { + 1 * scramShaAuthenticator.authenticateAsync(internalConnection, _, _, _) + } else { + 1 * scramShaAuthenticator.authenticate(internalConnection, _, _) + } + 1 * ((SpeculativeAuthenticator) scramShaAuthenticator).createSpeculativeAuthenticateCommand(_) + ((SpeculativeAuthenticator) scramShaAuthenticator).getSpeculativeAuthenticateResponse() == speculativeAuthenticateResponse + def expectedHelloCommand = createHelloCommand(firstClientChallenge, 'SCRAM-SHA-256', true) + expectedHelloCommand == decodeCommand(internalConnection.getSent()[0]) + + where: + async << [false, false] + } + + def 'should speculatively authenticate with SCRAM-SHA-256 authenticator'() { + given: + def credential = new MongoCredentialWithCache(createScramSha256Credential('user', 'database', 'pencil' as char[])) + def authenticator = Spy(ScramShaAuthenticator, constructorArgs: [credential, { 'rOprNGfwEbeRWgbNEkqO' }, { 'pencil' }, SINGLE, + null]) + def initializer = new InternalStreamConnectionInitializer(SINGLE, authenticator, null, [], null) + def serverResponse = 'r=rOprNGfwEbeRWgbNEkqO%hvYDpWUa2RaTCAfuxFIlj)hNlF$k0,s=W22ZaJ0SNY7soEsUEjb6gQ==,i=4096' + def speculativeAuthenticateResponse = + BsonDocument.parse("{ conversationId: 1, payload: BinData(0, '${encode64(serverResponse)}'), done: false }") + def firstClientChallenge = 'n,,n=user,r=rOprNGfwEbeRWgbNEkqO' + + when: + enqueueSpeculativeAuthenticationResponsesForScramSha256() + def description = initializeConnection(async, initializer, internalConnection) + + then: + description + if (async) { + 1 * authenticator.authenticateAsync(internalConnection, _, _, _) + } else { + 1 * authenticator.authenticate(internalConnection, _, _) + } + 1 * ((SpeculativeAuthenticator) authenticator).createSpeculativeAuthenticateCommand(_) + ((SpeculativeAuthenticator) authenticator).getSpeculativeAuthenticateResponse() == speculativeAuthenticateResponse + def expectedHelloCommand = createHelloCommand(firstClientChallenge, 'SCRAM-SHA-256', false) + expectedHelloCommand == decodeCommand(internalConnection.getSent()[0]) + + where: + async << [true, false] + } + + def 'should speculatively authenticate with SCRAM-SHA-1 authenticator'() { + given: + def credential = new MongoCredentialWithCache(createScramSha1Credential('user', 'database', 'pencil' as char[])) + def authenticator = Spy(ScramShaAuthenticator, constructorArgs: [credential, { 'fyko+d2lbbFgONRv9qkxdawL' }, { 'pencil' }, + SINGLE, null]) + def initializer = new InternalStreamConnectionInitializer(SINGLE, authenticator, null, [], null) + def serverResponse = 'r=fyko+d2lbbFgONRv9qkxdawL3rfcNHYJY1ZVvWVs7j,s=QSXCR+Q6sek8bf92,i=4096' + def speculativeAuthenticateResponse = + BsonDocument.parse("{ conversationId: 1, payload: BinData(0, '${encode64(serverResponse)}'), done: false }") + + when: + enqueueSpeculativeAuthenticationResponsesForScramSha1() + def description = initializeConnection(async, initializer, internalConnection) + def firstClientChallenge = 'n,,n=user,r=fyko+d2lbbFgONRv9qkxdawL' + + then: + description + if (async) { + 1 * authenticator.authenticateAsync(internalConnection, _, _, _) + } else { + 1 * authenticator.authenticate(internalConnection, _, _) + } + 1 * ((SpeculativeAuthenticator) authenticator).createSpeculativeAuthenticateCommand(_) + ((SpeculativeAuthenticator) authenticator).getSpeculativeAuthenticateResponse() == speculativeAuthenticateResponse + def expectedHelloCommand = createHelloCommand(firstClientChallenge, 'SCRAM-SHA-1', false) + expectedHelloCommand == decodeCommand(internalConnection.getSent()[0]) + + where: + async << [true, false] + } + + def 'should speculatively authenticate with X509 authenticator'() { + given: + def credential = new MongoCredentialWithCache(createMongoX509Credential()) + def authenticator = Spy(X509Authenticator, constructorArgs: [credential, SINGLE, null]) + def initializer = new InternalStreamConnectionInitializer(SINGLE, authenticator, null, [], null) + def speculativeAuthenticateResponse = + BsonDocument.parse('{ dbname: "$external", user: "CN=client,OU=KernelUser,O=MongoDB,L=New York City,ST=New York,C=US"}') + + when: + enqueueSpeculativeAuthenticationResponsesForX509() + def description = initializeConnection(async, initializer, internalConnection) + + then: + description + if (async) { + 1 * authenticator.authenticateAsync(internalConnection, _, _, _) + } else { + 1 * authenticator.authenticate(internalConnection, _, _) + } + 1 * ((SpeculativeAuthenticator) authenticator).createSpeculativeAuthenticateCommand(_) + ((SpeculativeAuthenticator) authenticator).getSpeculativeAuthenticateResponse() == speculativeAuthenticateResponse + def expectedHelloCommand = createHelloCommand('', 'MONGODB-X509', false) + expectedHelloCommand == decodeCommand(internalConnection.getSent()[0]) + + where: + async << [true, false] + } + + def 'should not speculatively authenticate with Plain authenticator'() { + given: + def credential = new MongoCredentialWithCache(createPlainCredential('user', 'database', 'pencil' as char[])) + def authenticator = Spy(PlainAuthenticator, constructorArgs: [credential, SINGLE, null]) + def initializer = new InternalStreamConnectionInitializer(SINGLE, authenticator, null, [], null) + + when: + enqueueSpeculativeAuthenticationResponsesForPlain() + initializeConnection(async, initializer, internalConnection) + + then: + ((SpeculativeAuthenticator) authenticator).getSpeculativeAuthenticateResponse() == null + ((SpeculativeAuthenticator) authenticator) + .createSpeculativeAuthenticateCommand(internalConnection) == null + BsonDocument.parse("{$LEGACY_HELLO: 1, helloOk: true, '\$db': 'admin'}") == decodeCommand(internalConnection.getSent()[0]) + + where: + async << [true, false] + } + + private ConnectionDescription getExpectedConnectionDescription(final Long localValue, final Long serverValue) { + new ConnectionDescription(new ConnectionId(serverId, localValue, serverValue), + 3, ServerType.STANDALONE, 512, 16777216, 33554432, []) + } + + def initializeConnection(final boolean async, final InternalStreamConnectionInitializer initializer, + final TestInternalConnection connection) { + if (async) { + def callback = new FutureResultCallback() + initializer.startHandshakeAsync(internalConnection, operationContext, callback) + def description = callback.get() + callback = new FutureResultCallback() + initializer.finishHandshakeAsync(internalConnection, description, operationContext, callback) + callback.get() + } else { + def internalDescription = initializer.startHandshake(connection, operationContext) + initializer.finishHandshake(connection, internalDescription, operationContext) + } + } + + private ServerDescription getExpectedServerDescription(ServerDescription actualServerDescription) { + ServerDescription.builder() + .ok(true) + .address(serverId.address) + .type(ServerType.STANDALONE) + .state(ServerConnectionState.CONNECTED) + .minWireVersion(0) + .maxWireVersion(3) + .maxDocumentSize(16777216) + .roundTripTime(actualServerDescription.getRoundTripTimeNanos(), TimeUnit.NANOSECONDS) + .lastUpdateTimeNanos(actualServerDescription.getLastUpdateTime(TimeUnit.NANOSECONDS)) + .build() + } + + def enqueueSuccessfulReplies(final boolean isArbiter, final Integer serverConnectionId) { + internalConnection.enqueueReply(buildSuccessfulReply( + '{ok: 1, ' + + 'maxWireVersion: 3,' + + 'connectionId: ' + serverConnectionId + + (isArbiter ? ', isreplicaset: true, arbiterOnly: true' : '') + + '}')) + } + + def enqueueSpeculativeAuthenticationResponsesForScramSha256() { + def initialServerResponse = 'r=rOprNGfwEbeRWgbNEkqO%hvYDpWUa2RaTCAfuxFIlj)hNlF$k0,s=W22ZaJ0SNY7soEsUEjb6gQ==,i=4096' + def finalServerResponse = 'v=6rriTRBi23WpRR/wtup+mMhUZUn/dB5nLTJRsjl95G4=' + enqueueSpeculativeAuthenticationResponsesForScramSha(initialServerResponse, finalServerResponse) + } + + def enqueueSpeculativeAuthenticationResponsesForScramSha1() { + def initialServerResponse = 'r=fyko+d2lbbFgONRv9qkxdawL3rfcNHYJY1ZVvWVs7j,s=QSXCR+Q6sek8bf92,i=4096' + def finalServerResponse = 'v=rmF9pqV8S7suAoZWja4dJRkFsKQ=' + enqueueSpeculativeAuthenticationResponsesForScramSha(initialServerResponse, finalServerResponse) + } + + def enqueueSpeculativeAuthenticationResponsesForScramSha(final String initialServerResponse, + final String finalServerResponse) { + internalConnection.enqueueReply(buildSuccessfulReply( + '{ok: 1, maxWireVersion: 9, ' + + "$LEGACY_HELLO: true," + + 'speculativeAuthenticate: { conversationId: 1, done: false, ' + + "payload: BinData(0, '${encode64(initialServerResponse)}')}}")) + internalConnection.enqueueReply(buildSuccessfulReply( + '{ok: 1, maxWireVersion: 9, ' + + 'conversationId: 1, done: true, ' + + "payload: BinData(0, '${encode64(finalServerResponse)}')}")) + internalConnection.enqueueReply(buildSuccessfulReply('{ok: 1}')) + } + + def enqueueSpeculativeAuthenticationResponsesForX509() { + internalConnection.enqueueReply(buildSuccessfulReply( + "{ok: 1, maxWireVersion: 9, $LEGACY_HELLO: true, conversationId: 1, " + + 'speculativeAuthenticate: { dbname: \"$external\", ' + + 'user: \"CN=client,OU=KernelUser,O=MongoDB,L=New York City,ST=New York,C=US\" }}')) + internalConnection.enqueueReply(buildSuccessfulReply('{ok: 1}')) + } + + def enqueueSpeculativeAuthenticationResponsesForPlain() { + internalConnection.enqueueReply(buildSuccessfulReply( + "{ok: 1, maxWireVersion: 9, $LEGACY_HELLO: true, conversationId: 1}")) + internalConnection.enqueueReply(buildSuccessfulReply( + '{ok: 1, done: true, conversationId: 1}')) + internalConnection.enqueueReply(buildSuccessfulReply('{ok: 1}')) + } + + def encode64(String string) { + Base64.getEncoder().encodeToString(string.getBytes(Charset.forName('UTF-8'))) + } + + def createHelloCommand(final String firstClientChallenge, final String mechanism, + final boolean hasSaslSupportedMechs) { + String hello = "{$LEGACY_HELLO: 1, helloOk: true, " + + (hasSaslSupportedMechs ? 'saslSupportedMechs: "database.user", ' : '') + + (mechanism == 'MONGODB-X509' ? + 'speculativeAuthenticate: { authenticate: 1, ' + + "mechanism: '${mechanism}', db: \"\$external\" }" : + 'speculativeAuthenticate: { saslStart: 1, ' + + "mechanism: '${mechanism}', payload: BinData(0, '${encode64(firstClientChallenge)}'), " + + 'db: "database", options: { skipEmptyExchange: true } }') + + ', \$db: \"admin\" }' + + + BsonDocument.parse(hello) + } +} diff --git a/driver-core/src/test/unit/com/mongodb/internal/connection/InternalStreamConnectionSpecification.groovy b/driver-core/src/test/unit/com/mongodb/internal/connection/InternalStreamConnectionSpecification.groovy new file mode 100644 index 00000000000..3cdabf31da3 --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/internal/connection/InternalStreamConnectionSpecification.groovy @@ -0,0 +1,1225 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection + +import com.mongodb.MongoCommandException +import com.mongodb.MongoInternalException +import com.mongodb.MongoInterruptedException +import com.mongodb.MongoOperationTimeoutException +import com.mongodb.MongoSocketClosedException +import com.mongodb.MongoSocketException +import com.mongodb.MongoSocketReadException +import com.mongodb.MongoSocketReadTimeoutException +import com.mongodb.MongoSocketWriteException +import com.mongodb.ReadConcern +import com.mongodb.ServerAddress +import com.mongodb.async.FutureResultCallback +import com.mongodb.connection.ClusterId +import com.mongodb.connection.ConnectionDescription +import com.mongodb.connection.ConnectionId +import com.mongodb.connection.ServerConnectionState +import com.mongodb.connection.ServerDescription +import com.mongodb.connection.ServerId +import com.mongodb.connection.ServerType +import com.mongodb.event.CommandFailedEvent +import com.mongodb.event.CommandStartedEvent +import com.mongodb.event.CommandSucceededEvent +import com.mongodb.internal.ExceptionUtils.MongoCommandExceptionUtils +import com.mongodb.internal.TimeoutContext +import com.mongodb.internal.session.SessionContext +import com.mongodb.internal.validator.NoOpFieldNameValidator +import org.bson.BsonDocument +import org.bson.BsonInt32 +import org.bson.BsonReader +import org.bson.BsonString +import org.bson.ByteBufNIO +import org.bson.codecs.BsonDocumentCodec +import org.bson.codecs.DecoderContext +import org.bson.codecs.configuration.CodecConfigurationException +import spock.lang.Specification + +import java.nio.ByteBuffer +import java.nio.channels.ClosedByInterruptException +import java.util.concurrent.CountDownLatch +import java.util.concurrent.ExecutorService +import java.util.concurrent.Executors + +import static com.mongodb.ClusterFixture.OPERATION_CONTEXT +import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS_WITH_INFINITE_TIMEOUT +import static com.mongodb.ReadPreference.primary +import static com.mongodb.connection.ClusterConnectionMode.MULTIPLE +import static com.mongodb.connection.ClusterConnectionMode.SINGLE +import static com.mongodb.connection.ConnectionDescription.getDefaultMaxMessageSize +import static com.mongodb.connection.ConnectionDescription.getDefaultMaxWriteBatchSize +import static com.mongodb.connection.ServerDescription.getDefaultMaxDocumentSize +import static com.mongodb.internal.connection.MessageHelper.LEGACY_HELLO +import static com.mongodb.internal.connection.MessageHelper.LEGACY_HELLO_LOWER +import static com.mongodb.internal.operation.ServerVersionHelper.LATEST_WIRE_VERSION +import static java.util.concurrent.TimeUnit.NANOSECONDS +import static java.util.concurrent.TimeUnit.SECONDS + +@SuppressWarnings(['UnusedVariable']) +class InternalStreamConnectionSpecification extends Specification { + private static final ServerId SERVER_ID = new ServerId(new ClusterId(), new ServerAddress()) + + def database = 'admin' + def fieldNameValidator = NoOpFieldNameValidator.INSTANCE + def helper = new StreamHelper() + def serverAddress = new ServerAddress() + def connectionId = new ConnectionId(SERVER_ID, 1, 1) + def commandListener = new TestCommandListener() + def messageSettings = MessageSettings.builder().maxWireVersion(LATEST_WIRE_VERSION).build() + + def connectionDescription = new ConnectionDescription(connectionId, 3, + ServerType.STANDALONE, getDefaultMaxWriteBatchSize(), getDefaultMaxDocumentSize(), getDefaultMaxMessageSize(), []) + def serverDescription = ServerDescription.builder() + .ok(true) + .state(ServerConnectionState.CONNECTED) + .type(ServerType.STANDALONE) + .address(serverAddress) + .build() + def internalConnectionInitializationDescription = + new InternalConnectionInitializationDescription(connectionDescription, serverDescription) + def stream = Mock(Stream) { + openAsync(_, _) >> { it.last().completed(null) } + } + def streamFactory = Mock(StreamFactory) { + create(_) >> { stream } + } + def initializer = Mock(InternalConnectionInitializer) { + startHandshake(_, _) >> { internalConnectionInitializationDescription } + finishHandshake(_, _, _) >> { internalConnectionInitializationDescription } + startHandshakeAsync(_, _, _) >> { it[2].onResult(internalConnectionInitializationDescription, null) } + finishHandshakeAsync(_, _, _, _) >> { it[3].onResult(internalConnectionInitializationDescription, null) } + } + + def getConnection() { + new InternalStreamConnection(SINGLE, SERVER_ID, new TestConnectionGenerationSupplier(), streamFactory, [], commandListener, + initializer) + } + + def getOpenedConnection() { + def connection = getConnection() + connection.open(OPERATION_CONTEXT) + connection + } + + def 'should change the description when opened'() { + when: + def connection = getConnection() + + then: + connection.getDescription().getServerType() == ServerType.UNKNOWN + connection.getDescription().getConnectionId().getServerValue() == null + connection.getInitialServerDescription() == ServerDescription.builder() + .address(serverAddress) + .type(ServerType.UNKNOWN) + .state(ServerConnectionState.CONNECTING) + .lastUpdateTimeNanos(connection.getInitialServerDescription().getLastUpdateTime(NANOSECONDS)) + .build() + when: + connection.open(OPERATION_CONTEXT) + + then: + connection.opened() + connection.getDescription().getServerType() == ServerType.STANDALONE + connection.getDescription().getConnectionId().getServerValue() == 1 + connection.getDescription() == connectionDescription + connection.getInitialServerDescription() == serverDescription + } + + + def 'should change the description when opened asynchronously'() { + when: + def connection = getConnection() + def futureResultCallback = new FutureResultCallback() + + then: + connection.getDescription().getServerType() == ServerType.UNKNOWN + connection.getDescription().getConnectionId().getServerValue() == null + connection.getInitialServerDescription() == ServerDescription.builder() + .address(serverAddress) + .type(ServerType.UNKNOWN) + .state(ServerConnectionState.CONNECTING) + .lastUpdateTimeNanos(connection.getInitialServerDescription().getLastUpdateTime(NANOSECONDS)) + .build() + + when: + connection.openAsync(OPERATION_CONTEXT, futureResultCallback) + futureResultCallback.get() + + then: + connection.opened() + connection.getDescription() == connectionDescription + connection.getInitialServerDescription() == serverDescription + } + + def 'should close the stream when initialization throws an exception'() { + given: + def failedInitializer = Mock(InternalConnectionInitializer) { + startHandshake(_, _) >> { throw new MongoInternalException('Something went wrong') } + } + def connection = new InternalStreamConnection(SINGLE, SERVER_ID, new TestConnectionGenerationSupplier(), streamFactory, [], null, + failedInitializer) + + when: + connection.open(OPERATION_CONTEXT) + + then: + thrown MongoInternalException + connection.isClosed() + } + + + def 'should close the stream when initialization throws an exception asynchronously'() { + given: + def failedInitializer = Mock(InternalConnectionInitializer) { + startHandshakeAsync(_, _, _) >> { it[2].onResult(null, new MongoInternalException('Something went wrong')) } + } + def connection = new InternalStreamConnection(SINGLE, SERVER_ID, new TestConnectionGenerationSupplier(), streamFactory, [], null, + failedInitializer) + + when: + def futureResultCallback = new FutureResultCallback() + connection.openAsync(OPERATION_CONTEXT, futureResultCallback) + futureResultCallback.get() + + then: + thrown MongoInternalException + connection.isClosed() + } + + def 'should close the stream when writing a message throws an exception'() { + given: + stream.write(_, _) >> { throw new IOException('Something went wrong') } + + def connection = getOpenedConnection() + def (buffers1, messageId1) = helper.hello() + def (buffers2, messageId2) = helper.hello() + + when: + connection.sendMessage(buffers1, messageId1, OPERATION_CONTEXT) + + then: + connection.isClosed() + thrown MongoSocketWriteException + + when: + connection.sendMessage(buffers2, messageId2, OPERATION_CONTEXT) + + then: + thrown MongoSocketClosedException + } + + + def 'should close the stream when writing a message throws an exception asynchronously'() { + given: + def (buffers1, messageId1, sndCallbck1, rcvdCallbck1) = helper.helloAsync() + def (buffers2, messageId2, sndCallbck2, rcvdCallbck2) = helper.helloAsync() + int seen = 0 + + stream.writeAsync(_, _, _) >> { buffers, operationContext, callback -> + if (seen == 0) { + seen += 1 + return callback.failed(new IOException('Something went wrong')) + } + callback.completed(null) + } + + def connection = getOpenedConnection() + + when: + connection.sendMessageAsync(buffers1, messageId1, OPERATION_CONTEXT, sndCallbck1) + sndCallbck1.get(10, SECONDS) + + then: + thrown MongoSocketWriteException + connection.isClosed() + + when: + connection.sendMessageAsync(buffers2, messageId2, OPERATION_CONTEXT, sndCallbck2) + sndCallbck2.get(10, SECONDS) + + then: + thrown MongoSocketClosedException + } + + def 'should close the stream when reading the message header throws an exception'() { + given: + stream.read(16, _) >> { throw new IOException('Something went wrong') } + + def connection = getOpenedConnection() + def (buffers1, messageId1) = helper.hello() + def (buffers2, messageId2) = helper.hello() + + when: + connection.sendMessage(buffers1, messageId1, OPERATION_CONTEXT) + connection.sendMessage(buffers2, messageId2, OPERATION_CONTEXT) + connection.receiveMessage(messageId1, OPERATION_CONTEXT) + + then: + connection.isClosed() + thrown MongoSocketReadException + + when: + connection.receiveMessage(messageId2, OPERATION_CONTEXT) + + then: + thrown MongoSocketClosedException + } + + def 'should throw MongoInternalException when reply header message length > max message length'() { + given: + stream.read(36, _) >> { helper.headerWithMessageSizeGreaterThanMax(1) } + + def connection = getOpenedConnection() + + when: + connection.receiveMessage(1, OPERATION_CONTEXT) + + then: + thrown(MongoInternalException) + connection.isClosed() + } + + def 'should throw MongoInternalException when reply header message length > max message length asynchronously'() { + given: + stream.readAsync(16, _, _) >> { numBytes, operationContext, handler -> + handler.completed(helper.headerWithMessageSizeGreaterThanMax(1, connectionDescription.maxMessageSize)) + } + + def connection = getOpenedConnection() + def callback = new FutureResultCallback() + + when: + connection.receiveMessageAsync(1, OPERATION_CONTEXT, callback) + callback.get() + + then: + thrown(MongoInternalException) + connection.isClosed() + } + + def 'should throw MongoInterruptedException and leave the interrupt status set when Stream.write throws InterruptedIOException'() { + given: + stream.write(_, _) >> { throw new InterruptedIOException() } + def connection = getOpenedConnection() + Thread.currentThread().interrupt() + + when: + connection.sendMessage([new ByteBufNIO(ByteBuffer.allocate(1))], 1, OPERATION_CONTEXT) + + then: + Thread.interrupted() + thrown(MongoInterruptedException) + connection.isClosed() + } + + def 'should throw MongoInterruptedException and leave the interrupt status unset when Stream.write throws InterruptedIOException'() { + given: + stream.write(_, _) >> { throw new InterruptedIOException() } + def connection = getOpenedConnection() + + when: + connection.sendMessage([new ByteBufNIO(ByteBuffer.allocate(1))], 1, OPERATION_CONTEXT) + + then: + !Thread.interrupted() + thrown(MongoInterruptedException) + connection.isClosed() + } + + def 'should throw MongoInterruptedException and leave the interrupt status set when Stream.write throws ClosedByInterruptException'() { + given: + stream.write(_, _) >> { throw new ClosedByInterruptException() } + def connection = getOpenedConnection() + Thread.currentThread().interrupt() + + when: + connection.sendMessage([new ByteBufNIO(ByteBuffer.allocate(1))], 1, OPERATION_CONTEXT) + + then: + Thread.interrupted() + thrown(MongoInterruptedException) + connection.isClosed() + } + + def 'should throw MongoInterruptedException when Stream.write throws SocketException and the thread is interrupted'() { + given: + stream.write(_, _) >> { throw new SocketException() } + def connection = getOpenedConnection() + Thread.currentThread().interrupt() + + when: + connection.sendMessage([new ByteBufNIO(ByteBuffer.allocate(1))], 1, OPERATION_CONTEXT) + + then: + Thread.interrupted() + thrown(MongoInterruptedException) + connection.isClosed() + } + + def 'should throw MongoSocketWriteException when Stream.write throws SocketException and the thread is not interrupted'() { + given: + stream.write(_, _) >> { throw new SocketException() } + def connection = getOpenedConnection() + + when: + connection.sendMessage([new ByteBufNIO(ByteBuffer.allocate(1))], 1, OPERATION_CONTEXT) + + then: + thrown(MongoSocketWriteException) + connection.isClosed() + } + + def 'should throw MongoInterruptedException and leave the interrupt status set when Stream.read throws InterruptedIOException'() { + given: + stream.read(_, _) >> { throw new InterruptedIOException() } + def connection = getOpenedConnection() + Thread.currentThread().interrupt() + + when: + connection.receiveMessage(1, OPERATION_CONTEXT) + + then: + Thread.interrupted() + thrown(MongoInterruptedException) + connection.isClosed() + } + + def 'should throw MongoInterruptedException and leave the interrupt status unset when Stream.read throws InterruptedIOException'() { + given: + stream.read(_, _) >> { throw new InterruptedIOException() } + def connection = getOpenedConnection() + + when: + connection.receiveMessage(1, OPERATION_CONTEXT) + + then: + !Thread.interrupted() + thrown(MongoInterruptedException) + connection.isClosed() + } + + def 'should throw MongoInterruptedException and leave the interrupt status set when Stream.read throws ClosedByInterruptException'() { + given: + stream.read(_, _) >> { throw new ClosedByInterruptException() } + def connection = getOpenedConnection() + Thread.currentThread().interrupt() + + when: + connection.receiveMessage(1, OPERATION_CONTEXT) + + then: + Thread.interrupted() + thrown(MongoInterruptedException) + connection.isClosed() + } + + def 'should throw MongoInterruptedException when Stream.read throws SocketException and the thread is interrupted'() { + given: + stream.read(_, _) >> { throw new SocketException() } + def connection = getOpenedConnection() + Thread.currentThread().interrupt() + + when: + connection.receiveMessage(1, OPERATION_CONTEXT) + + then: + Thread.interrupted() + thrown(MongoInterruptedException) + connection.isClosed() + } + + def 'should throw MongoSocketReadException when Stream.read throws SocketException and the thread is not interrupted'() { + given: + stream.read(_, _) >> { throw new SocketException() } + def connection = getOpenedConnection() + + when: + connection.receiveMessage(1, OPERATION_CONTEXT) + + then: + thrown(MongoSocketReadException) + connection.isClosed() + } + + def 'Should throw timeout exception with underlying socket exception as a cause when Stream.read throws SocketException'() { + given: + stream.read(_, _) >> { throw new SocketTimeoutException() } + def connection = getOpenedConnection() + + when: + connection.receiveMessage(1, OPERATION_CONTEXT.withTimeoutContext( + new TimeoutContext(TIMEOUT_SETTINGS_WITH_INFINITE_TIMEOUT))) + + then: + def timeoutException = thrown(MongoOperationTimeoutException) + def mongoSocketReadTimeoutException = timeoutException.getCause() + mongoSocketReadTimeoutException instanceof MongoSocketReadTimeoutException + mongoSocketReadTimeoutException.getCause() instanceof SocketTimeoutException + + connection.isClosed() + } + + def 'Should wrap MongoSocketReadTimeoutException with MongoOperationTimeoutException'() { + given: + stream.read(_, _) >> { throw new MongoSocketReadTimeoutException("test", new ServerAddress(), null) } + def connection = getOpenedConnection() + + when: + connection.receiveMessage(1, OPERATION_CONTEXT.withTimeoutContext( + new TimeoutContext(TIMEOUT_SETTINGS_WITH_INFINITE_TIMEOUT))) + + then: + def timeoutException = thrown(MongoOperationTimeoutException) + def mongoSocketReadTimeoutException = timeoutException.getCause() + mongoSocketReadTimeoutException instanceof MongoSocketReadTimeoutException + mongoSocketReadTimeoutException.getCause() == null + + connection.isClosed() + } + + + def 'Should wrap SocketException with timeout exception when Stream.read throws SocketException async'() { + given: + stream.readAsync(_ , _, _) >> { numBytes, operationContext, handler -> + handler.failed(new SocketTimeoutException()) + } + def connection = getOpenedConnection() + def callback = new FutureResultCallback() + def operationContext = OPERATION_CONTEXT.withTimeoutContext( + new TimeoutContext(TIMEOUT_SETTINGS_WITH_INFINITE_TIMEOUT)) + when: + connection.receiveMessageAsync(1, operationContext, callback) + callback.get() + + then: + def timeoutException = thrown(MongoOperationTimeoutException) + def mongoSocketReadTimeoutException = timeoutException.getCause() + mongoSocketReadTimeoutException instanceof MongoSocketReadTimeoutException + mongoSocketReadTimeoutException.getCause() instanceof SocketTimeoutException + + connection.isClosed() + } + + def 'Should wrap MongoSocketReadTimeoutException with MongoOperationTimeoutException async'() { + given: + stream.readAsync(_, _, _) >> { numBytes, operationContext, handler -> + handler.failed(new MongoSocketReadTimeoutException("test", new ServerAddress(), null)) + } + + def connection = getOpenedConnection() + def callback = new FutureResultCallback() + def operationContext = OPERATION_CONTEXT.withTimeoutContext( + new TimeoutContext(TIMEOUT_SETTINGS_WITH_INFINITE_TIMEOUT)) + when: + connection.receiveMessageAsync(1, operationContext, callback) + callback.get() + + then: + def timeoutException = thrown(MongoOperationTimeoutException) + def mongoSocketReadTimeoutException = timeoutException.getCause() + mongoSocketReadTimeoutException instanceof MongoSocketReadTimeoutException + mongoSocketReadTimeoutException.getCause() == null + + connection.isClosed() + } + + def 'should close the stream when reading the message header throws an exception asynchronously'() { + given: + int seen = 0 + def (buffers1, messageId1, sndCallbck1, rcvdCallbck1) = helper.helloAsync() + def (buffers2, messageId2, sndCallbck2, rcvdCallbck2) = helper.helloAsync() + def headers = helper.generateHeaders([messageId1, messageId2]) + + stream.writeAsync(_, _, _) >> { buffers, operationContext, callback -> + callback.completed(null) + } + stream.readAsync(16, _, _) >> { numBytes, operationContext, handler -> + if (seen == 0) { + seen += 1 + return handler.failed(new IOException('Something went wrong')) + } + handler.completed(headers.pop()) + } + stream.readAsync(94, _, _) >> { numBytes, operationContext, handler -> + handler.completed(helper.defaultBody()) + } + def connection = getOpenedConnection() + + when: + connection.sendMessageAsync(buffers1, messageId1, OPERATION_CONTEXT, sndCallbck1) + connection.sendMessageAsync(buffers2, messageId2, OPERATION_CONTEXT, sndCallbck2) + connection.receiveMessageAsync(messageId1, OPERATION_CONTEXT, rcvdCallbck1) + connection.receiveMessageAsync(messageId2, OPERATION_CONTEXT, rcvdCallbck2) + rcvdCallbck1.get(1, SECONDS) + + then: + thrown MongoSocketReadException + connection.isClosed() + + when: + rcvdCallbck2.get(1, SECONDS) + + then: + thrown MongoSocketClosedException + } + + def 'should close the stream when reading the message body throws an exception'() { + given: + stream.read(16, _) >> helper.defaultMessageHeader(1) + stream.read(90, _) >> { throw new IOException('Something went wrong') } + + def connection = getOpenedConnection() + + when: + connection.receiveMessage(1, OPERATION_CONTEXT) + + then: + connection.isClosed() + thrown MongoSocketReadException + + when: + connection.receiveMessage(1, OPERATION_CONTEXT) + + then: + thrown MongoSocketClosedException + } + + + def 'should close the stream when reading the message body throws an exception asynchronously'() { + given: + def (buffers1, messageId1, sndCallbck1, rcvdCallbck1) = helper.helloAsync() + def (buffers2, messageId2, sndCallbck2, rcvdCallbck2) = helper.helloAsync() + def headers = helper.generateHeaders([messageId1, messageId2]) + + stream.writeAsync(_, _, _) >> { buffers, operationContext, callback -> + callback.completed(null) + } + stream.readAsync(16, _, _) >> { numBytes, operationContext, handler -> + handler.completed(headers.remove(0)) + } + stream.readAsync(_, _, _) >> { numBytes, operationContext, handler -> + handler.failed(new IOException('Something went wrong')) + } + def connection = getOpenedConnection() + + when: + connection.sendMessageAsync(buffers1, messageId1, OPERATION_CONTEXT, sndCallbck1) + connection.sendMessageAsync(buffers2, messageId2, OPERATION_CONTEXT, sndCallbck2) + connection.receiveMessageAsync(messageId1, OPERATION_CONTEXT, rcvdCallbck1) + rcvdCallbck1.get(1, SECONDS) + + then: + thrown MongoSocketReadException + connection.isClosed() + + when: + connection.receiveMessageAsync(messageId2, OPERATION_CONTEXT, rcvdCallbck2) + rcvdCallbck2.get(1, SECONDS) + + then: + thrown MongoSocketClosedException + } + + def 'should not close the stream on a command exception'() { + given: + def connection = getOpenedConnection() + def pingCommandDocument = new BsonDocument('ping', new BsonInt32(1)) + def commandMessage = new CommandMessage(database, pingCommandDocument, fieldNameValidator, primary(), messageSettings, MULTIPLE, + null) + def response = '{ok : 0, errmsg : "failed"}' + stream.getBuffer(1024) >> { new ByteBufNIO(ByteBuffer.wrap(new byte[1024])) } + stream.read(16, _) >> helper.messageHeader(commandMessage.getId(), response) + stream.read(_, _) >> helper.reply(response) + + when: + connection.sendAndReceive(commandMessage, new BsonDocumentCodec(), OPERATION_CONTEXT) + + then: + thrown(MongoCommandException) + !connection.isClosed() + } + + def 'should not close the stream on an asynchronous command exception'() { + given: + def connection = getOpenedConnection() + def pingCommandDocument = new BsonDocument('ping', new BsonInt32(1)) + def commandMessage = new CommandMessage(database, pingCommandDocument, fieldNameValidator, primary(), messageSettings, MULTIPLE, + null) + def callback = new FutureResultCallback() + def response = '{ok : 0, errmsg : "failed"}' + + stream.getBuffer(1024) >> { new ByteBufNIO(ByteBuffer.wrap(new byte[1024])) } + stream.writeAsync(_, _, _) >> { buffers, operationContext, handler -> + handler.completed(null) + } + stream.readAsync(16, _, _) >> { numBytes, operationContext, handler -> + handler.completed(helper.defaultMessageHeader(commandMessage.getId())) + } + stream.readAsync(_, _, _) >> { numBytes, operationContext, handler -> + handler.completed(helper.reply(response)) + } + + when: + connection.sendAndReceiveAsync(commandMessage, new BsonDocumentCodec(), OPERATION_CONTEXT, callback) + callback.get() + + then: + thrown(MongoCommandException) + !connection.isClosed() + } + + def 'should notify all asynchronous writers of an exception'() { + given: + int numberOfOperations = 3 + ExecutorService streamPool = Executors.newFixedThreadPool(1) + + def messages = (1..numberOfOperations).collect { helper.helloAsync() } + + def streamLatch = new CountDownLatch(1) + stream.writeAsync(_, _, _) >> { buffers, operationContext, callback -> + streamPool.submit { + streamLatch.await() + callback.failed(new IOException()) + } + } + + when: + def connection = getOpenedConnection() + def callbacks = [] + (1..numberOfOperations).each { n -> + def (buffers, messageId, sndCallbck, rcvdCallbck) = messages.pop() + connection.sendMessageAsync(buffers, messageId, OPERATION_CONTEXT, sndCallbck) + callbacks.add(sndCallbck) + } + streamLatch.countDown() + + then: + expectException(callbacks.pop()) + expectException(callbacks.pop()) + expectException(callbacks.pop()) + + cleanup: + streamPool.shutdown() + } + + def 'should send events for successful command'() { + given: + def connection = getOpenedConnection() + def pingCommandDocument = new BsonDocument('ping', new BsonInt32(1)) + def commandMessage = new CommandMessage(database, pingCommandDocument, fieldNameValidator, primary(), messageSettings, MULTIPLE, + null) + stream.getBuffer(1024) >> { new ByteBufNIO(ByteBuffer.wrap(new byte[1024])) } + stream.read(16, _) >> helper.defaultMessageHeader(commandMessage.getId()) + stream.read(90, _) >> helper.defaultReply() + + when: + connection.sendAndReceive(commandMessage, new BsonDocumentCodec(), OPERATION_CONTEXT) + + then: + commandListener.eventsWereDelivered([ + new CommandStartedEvent(null, 1, 1, connection.getDescription(), 'admin', 'ping', + pingCommandDocument.append('$db', new BsonString('admin'))), + new CommandSucceededEvent(null, 1, 1, connection.getDescription(), 'admin', 'ping', + new BsonDocument('ok', new BsonInt32(1)), 1000)]) + } + + def 'should send events for successful command with decoding error'() { + given: + def connection = getOpenedConnection() + def pingCommandDocument = new BsonDocument('ping', new BsonInt32(1)) + def commandMessage = new CommandMessage(database, pingCommandDocument, fieldNameValidator, primary(), messageSettings, MULTIPLE, + null) + stream.getBuffer(1024) >> { new ByteBufNIO(ByteBuffer.wrap(new byte[1024])) } + stream.read(16, _) >> helper.defaultMessageHeader(commandMessage.getId()) + stream.read(90, _) >> helper.defaultReply() + + when: + connection.sendAndReceive(commandMessage, { + BsonReader reader, DecoderContext decoderContext -> throw new CodecConfigurationException('') + }, OPERATION_CONTEXT) + + then: + thrown(CodecConfigurationException) + commandListener.eventsWereDelivered([ + new CommandStartedEvent(null, 1, 1, connection.getDescription(), 'admin', 'ping', + pingCommandDocument.append('$db', new BsonString('admin'))), + new CommandSucceededEvent(null, 1, 1, connection.getDescription(), 'admin', 'ping', + new BsonDocument('ok', new BsonInt32(1)), 1000)]) + } + + def 'should extract cluster and operation time into session context'() { + given: + def connection = getOpenedConnection() + def pingCommandDocument = new BsonDocument('ping', new BsonInt32(1)) + def commandMessage = new CommandMessage(database, pingCommandDocument, fieldNameValidator, primary(), messageSettings, MULTIPLE, + null) + def response = '''{ + ok : 1, + operationTime : { $timestamp : { "t" : 40, "i" : 20 } }, + $clusterTime : { clusterTime : { $timestamp : { "t" : 42, "i" : 21 } } } + }''' + stream.getBuffer(1024) >> { new ByteBufNIO(ByteBuffer.wrap(new byte[1024])) } + stream.read(16, _) >> helper.defaultMessageHeader(commandMessage.getId()) + stream.read(_, _) >> helper.reply(response) + def sessionContext = Mock(SessionContext) { + 1 * advanceOperationTime(BsonDocument.parse(response).getTimestamp('operationTime')) + 1 * advanceClusterTime(BsonDocument.parse(response).getDocument('$clusterTime')) + getReadConcern() >> ReadConcern.DEFAULT + } + def operationContext = OPERATION_CONTEXT.withSessionContext(sessionContext) + + when: + connection.sendAndReceive(commandMessage, new BsonDocumentCodec(), operationContext) + + then: + true + } + + def 'should extract cluster and operation time into session context asynchronously'() { + given: + def connection = getOpenedConnection() + def pingCommandDocument = new BsonDocument('ping', new BsonInt32(1)) + def commandMessage = new CommandMessage(database, pingCommandDocument, fieldNameValidator, primary(), messageSettings, MULTIPLE, + null) + def callback = new FutureResultCallback() + def response = '''{ + ok : 1, + operationTime : { $timestamp : { "t" : 40, "i" : 20 } }, + $clusterTime : { clusterTime : { $timestamp : { "t" : 42, "i" : 21 } } } + }''' + stream.getBuffer(1024) >> { new ByteBufNIO(ByteBuffer.wrap(new byte[1024])) } + stream.writeAsync(_, _, _) >> { buffers, operationContext, handler -> + handler.completed(null) + } + stream.readAsync(16, _, _) >> { numBytes, operationContext, handler -> + handler.completed(helper.defaultMessageHeader(commandMessage.getId())) + } + stream.readAsync(_, _, _) >> { numBytes, operationContext, handler -> + handler.completed(helper.reply(response)) + } + def sessionContext = Mock(SessionContext) { + 1 * advanceOperationTime(BsonDocument.parse(response).getTimestamp('operationTime')) + 1 * advanceClusterTime(BsonDocument.parse(response).getDocument('$clusterTime')) + getReadConcern() >> ReadConcern.DEFAULT + } + def operationContext = OPERATION_CONTEXT.withSessionContext(sessionContext) + + when: + connection.sendAndReceiveAsync(commandMessage, new BsonDocumentCodec(), operationContext, callback) + callback.get() + + then: + true + } + + def 'should send events for command failure with exception writing message'() { + given: + def connection = getOpenedConnection() + def pingCommandDocument = new BsonDocument('ping', new BsonInt32(1)) + def commandMessage = new CommandMessage(database, pingCommandDocument, fieldNameValidator, primary(), messageSettings, MULTIPLE, + null) + stream.getBuffer(1024) >> { new ByteBufNIO(ByteBuffer.wrap(new byte[1024])) } + stream.write(_, _) >> { throw new MongoSocketWriteException('Failed to write', serverAddress, new IOException()) } + + when: + connection.sendAndReceive(commandMessage, new BsonDocumentCodec(), OPERATION_CONTEXT) + + then: + def e = thrown(MongoSocketWriteException) + commandListener.eventsWereDelivered([ + new CommandStartedEvent(null, 1, 1, connection.getDescription(), 'admin', 'ping', + pingCommandDocument.append('$db', new BsonString('admin'))), + new CommandFailedEvent(null, 1, 1, connection.getDescription(), 'admin', 'ping', 0, e)]) + } + + def 'should send events for command failure with exception reading header'() { + given: + def connection = getOpenedConnection() + def pingCommandDocument = new BsonDocument('ping', new BsonInt32(1)) + def commandMessage = new CommandMessage(database, pingCommandDocument, fieldNameValidator, primary(), messageSettings, MULTIPLE, + null) + stream.getBuffer(1024) >> { new ByteBufNIO(ByteBuffer.wrap(new byte[1024])) } + stream.read(16, _) >> { throw new MongoSocketReadException('Failed to read', serverAddress) } + + when: + connection.sendAndReceive(commandMessage, new BsonDocumentCodec(), OPERATION_CONTEXT) + + then: + def e = thrown(MongoSocketReadException) + commandListener.eventsWereDelivered([ + new CommandStartedEvent(null, 1, 1, connection.getDescription(), 'admin', 'ping', + pingCommandDocument.append('$db', new BsonString('admin'))), + new CommandFailedEvent(null, 1, 1, connection.getDescription(), 'admin', 'ping', 0, e)]) + } + + def 'should send events for command failure with exception reading body'() { + given: + def connection = getOpenedConnection() + def pingCommandDocument = new BsonDocument('ping', new BsonInt32(1)) + def commandMessage = new CommandMessage(database, pingCommandDocument, fieldNameValidator, primary(), messageSettings, MULTIPLE, + null) + stream.getBuffer(1024) >> { new ByteBufNIO(ByteBuffer.wrap(new byte[1024])) } + stream.read(16, _) >> helper.defaultMessageHeader(commandMessage.getId()) + stream.read(90, _) >> { throw new MongoSocketReadException('Failed to read', serverAddress) } + + when: + connection.sendAndReceive(commandMessage, new BsonDocumentCodec(), OPERATION_CONTEXT) + + then: + def e = thrown(MongoSocketException) + commandListener.eventsWereDelivered([ + new CommandStartedEvent(null, 1, 1, connection.getDescription(), 'admin', 'ping', + pingCommandDocument.append('$db', new BsonString('admin'))), + new CommandFailedEvent(null, 1, 1, connection.getDescription(), 'admin', 'ping', 0, e)]) + } + + def 'should send events for command failure with exception from failed command'() { + given: + def connection = getOpenedConnection() + def pingCommandDocument = new BsonDocument('ping', new BsonInt32(1)) + def commandMessage = new CommandMessage(database, pingCommandDocument, fieldNameValidator, primary(), messageSettings, MULTIPLE, + null) + def response = '{ok : 0, errmsg : "failed"}' + stream.getBuffer(1024) >> { new ByteBufNIO(ByteBuffer.wrap(new byte[1024])) } + stream.read(16, _) >> helper.messageHeader(commandMessage.getId(), response) + stream.read(_, _) >> helper.reply(response) + + when: + connection.sendAndReceive(commandMessage, new BsonDocumentCodec(), OPERATION_CONTEXT) + + then: + def e = thrown(MongoCommandException) + commandListener.eventsWereDelivered([ + new CommandStartedEvent(null, 1, 1, connection.getDescription(), 'admin', 'ping', + pingCommandDocument.append('$db', new BsonString('admin'))), + new CommandFailedEvent(null, 1, 1, connection.getDescription(), 'admin', 'ping', 0, e)]) + } + + def 'should send events with elided command and response in successful security-sensitive commands'() { + given: + def securitySensitiveCommandName = securitySensitiveCommand.keySet().iterator().next() + def connection = getOpenedConnection() + def commandMessage = new CommandMessage(database, securitySensitiveCommand, fieldNameValidator, primary(), messageSettings, + MULTIPLE, null) + stream.getBuffer(1024) >> { new ByteBufNIO(ByteBuffer.wrap(new byte[1024])) } + stream.read(16, _) >> helper.defaultMessageHeader(commandMessage.getId()) + stream.read(90, _) >> helper.defaultReply() + + when: + connection.sendAndReceive(commandMessage, new BsonDocumentCodec(), OPERATION_CONTEXT) + + then: + commandListener.eventsWereDelivered([ + new CommandStartedEvent(null, 1, 1, connection.getDescription(), 'admin', securitySensitiveCommandName, + new BsonDocument()), + new CommandSucceededEvent(null, 1, 1, connection.getDescription(), 'admin', securitySensitiveCommandName, + new BsonDocument(), 1)]) + + where: + securitySensitiveCommand << [ + new BsonDocument('authenticate', new BsonInt32(1)), + new BsonDocument('saslStart', new BsonInt32(1)), + new BsonDocument('saslContinue', new BsonInt32(1)), + new BsonDocument('getnonce', new BsonInt32(1)), + new BsonDocument('createUser', new BsonInt32(1)), + new BsonDocument('updateUser', new BsonInt32(1)), + new BsonDocument('copydbgetnonce', new BsonInt32(1)), + new BsonDocument('copydbsaslstart', new BsonInt32(1)), + new BsonDocument('copydb', new BsonInt32(1)), + new BsonDocument('hello', new BsonInt32(1)).append('speculativeAuthenticate', new BsonDocument()), + new BsonDocument(LEGACY_HELLO_LOWER, new BsonInt32(1)).append('speculativeAuthenticate', new BsonDocument()), + new BsonDocument(LEGACY_HELLO, new BsonInt32(1)).append('speculativeAuthenticate', new BsonDocument()) + ] + } + + def 'should send failed event with redacted exception in failed security-sensitive commands'() { + given: + def connection = getOpenedConnection() + def commandMessage = new CommandMessage(database, securitySensitiveCommand, fieldNameValidator, primary(), messageSettings, + MULTIPLE, null) + stream.getBuffer(1024) >> { new ByteBufNIO(ByteBuffer.wrap(new byte[1024])) } + stream.read(16, _) >> helper.defaultMessageHeader(commandMessage.getId()) + stream.read(_, _) >> helper.reply('{ok : 0, errmsg : "failed"}') + + when: + connection.sendAndReceive(commandMessage, new BsonDocumentCodec(), OPERATION_CONTEXT) + + then: + thrown(MongoCommandException) + CommandFailedEvent failedEvent = commandListener.getEvents().get(1) + failedEvent.throwable.class == MongoCommandException + MongoCommandException e = failedEvent.throwable + MongoCommandExceptionUtils.SecurityInsensitiveResponseField.fieldNames().containsAll(e.getResponse().keySet()) + + where: + securitySensitiveCommand << [ + new BsonDocument('authenticate', new BsonInt32(1)), + new BsonDocument('saslStart', new BsonInt32(1)), + new BsonDocument('saslContinue', new BsonInt32(1)), + new BsonDocument('getnonce', new BsonInt32(1)), + new BsonDocument('createUser', new BsonInt32(1)), + new BsonDocument('updateUser', new BsonInt32(1)), + new BsonDocument('copydbgetnonce', new BsonInt32(1)), + new BsonDocument('copydbsaslstart', new BsonInt32(1)), + new BsonDocument('copydb', new BsonInt32(1)), + new BsonDocument('hello', new BsonInt32(1)).append('speculativeAuthenticate', new BsonDocument()), + new BsonDocument(LEGACY_HELLO_LOWER, new BsonInt32(1)).append('speculativeAuthenticate', new BsonDocument()), + new BsonDocument(LEGACY_HELLO, new BsonInt32(1)).append('speculativeAuthenticate', new BsonDocument()) + ] + } + + def 'should send events for successful asynchronous command'() { + given: + def connection = getOpenedConnection() + def pingCommandDocument = new BsonDocument('ping', new BsonInt32(1)) + def commandMessage = new CommandMessage(database, pingCommandDocument, fieldNameValidator, primary(), messageSettings, MULTIPLE, + null) + def callback = new FutureResultCallback() + + stream.getBuffer(1024) >> { new ByteBufNIO(ByteBuffer.wrap(new byte[1024])) } + stream.writeAsync(_, _, _) >> { buffers, operationContext, handler -> + handler.completed(null) + } + stream.readAsync(16, _, _) >> { numBytes, operationContext, handler -> + handler.completed(helper.defaultMessageHeader(commandMessage.getId())) + } + stream.readAsync(90, _, _) >> { numBytes, operationContext, handler -> + handler.completed(helper.defaultReply()) + } + + when: + connection.sendAndReceiveAsync(commandMessage, new BsonDocumentCodec(), OPERATION_CONTEXT, callback) + callback.get() + + then: + commandListener.eventsWereDelivered([ + new CommandStartedEvent(null, 1, 1, connection.getDescription(), 'admin', 'ping', + pingCommandDocument.append('$db', new BsonString('admin'))), + new CommandSucceededEvent(null, 1, 1, connection.getDescription(), 'admin', 'ping', + new BsonDocument('ok', new BsonInt32(1)), 1000)]) + } + + def 'should send events for successful asynchronous command with decoding error'() { + given: + def connection = getOpenedConnection() + def pingCommandDocument = new BsonDocument('ping', new BsonInt32(1)) + def commandMessage = new CommandMessage(database, pingCommandDocument, fieldNameValidator, primary(), messageSettings, MULTIPLE, + null) + def callback = new FutureResultCallback() + + stream.getBuffer(1024) >> { new ByteBufNIO(ByteBuffer.wrap(new byte[1024])) } + stream.writeAsync(_, _, _) >> { buffers, operationContext, handler -> + handler.completed(null) + } + stream.readAsync(16, _, _) >> { numBytes, operationContext, handler -> + handler.completed(helper.defaultMessageHeader(commandMessage.getId())) + } + stream.readAsync(90, _, _) >> { numBytes, operationContext, handler -> + handler.completed(helper.defaultReply()) + } + + when: + connection.sendAndReceiveAsync(commandMessage, { + BsonReader reader, DecoderContext decoderContext -> throw new CodecConfigurationException('') + }, OPERATION_CONTEXT, callback) + callback.get() + + then: + thrown(CodecConfigurationException) + commandListener.eventsWereDelivered([ + new CommandStartedEvent(null, 1, 1, connection.getDescription(), 'admin', 'ping', + pingCommandDocument.append('$db', new BsonString('admin'))), + new CommandSucceededEvent(null, 1, 1, connection.getDescription(), 'admin', 'ping', + new BsonDocument('ok', new BsonInt32(1)), 1000)]) + } + + + def 'should send events for asynchronous command failure with exception writing message'() { + given: + def connection = getOpenedConnection() + def pingCommandDocument = new BsonDocument('ping', new BsonInt32(1)) + def commandMessage = new CommandMessage(database, pingCommandDocument, fieldNameValidator, primary(), messageSettings, MULTIPLE, + null) + def callback = new FutureResultCallback() + + stream.getBuffer(1024) >> { new ByteBufNIO(ByteBuffer.wrap(new byte[1024])) } + stream.writeAsync(_, _, _) >> { buffers, operationContext, handler -> + handler.failed(new MongoSocketWriteException('failed', serverAddress, new IOException())) + } + + when: + connection.sendAndReceiveAsync(commandMessage, new BsonDocumentCodec(), OPERATION_CONTEXT, callback) + callback.get() + + then: + def e = thrown(MongoSocketWriteException) + commandListener.eventsWereDelivered([ + new CommandStartedEvent(null, 1, 1, connection.getDescription(), 'admin', 'ping', + pingCommandDocument.append('$db', new BsonString('admin'))), + new CommandFailedEvent(null, 1, 1, connection.getDescription(), 'admin', 'ping', 0, e)]) + } + + def 'should send events for asynchronous command failure with exception reading header'() { + given: + def connection = getOpenedConnection() + def pingCommandDocument = new BsonDocument('ping', new BsonInt32(1)) + def commandMessage = new CommandMessage(database, pingCommandDocument, fieldNameValidator, primary(), messageSettings, MULTIPLE, + null) + def callback = new FutureResultCallback() + + stream.getBuffer(1024) >> { new ByteBufNIO(ByteBuffer.wrap(new byte[1024])) } + stream.writeAsync(_, _, _) >> { buffers, operationContext, handler -> + handler.completed(null) + } + stream.readAsync(16, _, _) >> { numBytes, operationContext, handler -> + handler.failed(new MongoSocketReadException('Failed to read', serverAddress)) + } + + when: + connection.sendAndReceiveAsync(commandMessage, new BsonDocumentCodec(), OPERATION_CONTEXT, callback) + callback.get() + + then: + def e = thrown(MongoSocketReadException) + commandListener.eventsWereDelivered([ + new CommandStartedEvent(null, 1, 1, connection.getDescription(), 'admin', 'ping', + pingCommandDocument.append('$db', new BsonString('admin'))), + new CommandFailedEvent(null, 1, 1, connection.getDescription(), 'admin', 'ping', 0, e)]) + } + + def 'should send events for asynchronous command failure with exception reading body'() { + given: + def connection = getOpenedConnection() + def pingCommandDocument = new BsonDocument('ping', new BsonInt32(1)) + def commandMessage = new CommandMessage(database, pingCommandDocument, fieldNameValidator, primary(), messageSettings, MULTIPLE, + null) + def callback = new FutureResultCallback() + + stream.getBuffer(1024) >> { new ByteBufNIO(ByteBuffer.wrap(new byte[1024])) } + stream.writeAsync(_, _, _) >> { buffers, operationContext, handler -> + handler.completed(null) + } + stream.readAsync(16, _, _) >> { numBytes, operationContext, handler -> + handler.completed(helper.defaultMessageHeader(commandMessage.getId())) + } + stream.readAsync(90, _, _) >> { numBytes, operationContext, handler -> + handler.failed(new MongoSocketReadException('Failed to read', serverAddress)) + } + + when: + connection.sendAndReceiveAsync(commandMessage, new BsonDocumentCodec(), OPERATION_CONTEXT, callback) + callback.get() + + then: + def e = thrown(MongoSocketReadException) + commandListener.eventsWereDelivered([ + new CommandStartedEvent(null, 1, 1, connection.getDescription(), 'admin', 'ping', + pingCommandDocument.append('$db', new BsonString('admin'))), + new CommandFailedEvent(null, 1, 1, connection.getDescription(), 'admin', 'ping', 0, e)]) + } + + def 'should send events for asynchronous command failure with exception from failed command'() { + given: + def connection = getOpenedConnection() + def pingCommandDocument = new BsonDocument('ping', new BsonInt32(1)) + def commandMessage = new CommandMessage(database, pingCommandDocument, fieldNameValidator, primary(), messageSettings, MULTIPLE, + null) + def callback = new FutureResultCallback() + def response = '{ok : 0, errmsg : "failed"}' + + stream.getBuffer(1024) >> { new ByteBufNIO(ByteBuffer.wrap(new byte[1024])) } + stream.writeAsync(_, _, _) >> { buffers, operationContext, handler -> + handler.completed(null) + } + stream.readAsync(16, _, _) >> { numBytes, operationContext, handler -> + handler.completed(helper.defaultMessageHeader(commandMessage.getId())) + } + stream.readAsync(_, _, _) >> { numBytes, operationContext, handler -> + handler.completed(helper.reply(response)) + } + + when: + connection.sendAndReceiveAsync(commandMessage, new BsonDocumentCodec(), OPERATION_CONTEXT, callback) + callback.get() + + then: + def e = thrown(MongoCommandException) + commandListener.eventsWereDelivered([ + new CommandStartedEvent(null, 1, 1, connection.getDescription(), 'admin', 'ping', + pingCommandDocument.append('$db', new BsonString('admin'))), + new CommandFailedEvent(null, 1, 1, connection.getDescription(), 'admin', 'ping', 0, e)]) + } + + def 'should send events with elided command and response in successful security-sensitive asynchronous commands'() { + given: + def securitySensitiveCommandName = securitySensitiveCommand.keySet().iterator().next() + def connection = getOpenedConnection() + def commandMessage = new CommandMessage(database, securitySensitiveCommand, fieldNameValidator, primary(), messageSettings, + MULTIPLE, null) + def callback = new FutureResultCallback() + + stream.getBuffer(1024) >> { new ByteBufNIO(ByteBuffer.wrap(new byte[1024])) } + stream.writeAsync(_, _, _) >> { buffers, operationContext, handler -> + handler.completed(null) + } + stream.readAsync(16, _, _) >> { numBytes, operationContext, handler -> + handler.completed(helper.defaultMessageHeader(commandMessage.getId())) + } + stream.readAsync(90, _, _) >> { numBytes, operationContext, handler -> + handler.completed(helper.defaultReply()) + } + + when: + connection.sendAndReceiveAsync(commandMessage, new BsonDocumentCodec(), OPERATION_CONTEXT, callback) + callback.get() + + then: + commandListener.eventsWereDelivered([ + new CommandStartedEvent(null, 1, 1, connection.getDescription(), 'admin', securitySensitiveCommandName, + new BsonDocument()), + new CommandSucceededEvent(null, 1, 1, connection.getDescription(), 'admin', securitySensitiveCommandName, + new BsonDocument(), 1)]) + + where: + securitySensitiveCommand << [ + new BsonDocument('authenticate', new BsonInt32(1)), + new BsonDocument('saslStart', new BsonInt32(1)), + new BsonDocument('saslContinue', new BsonInt32(1)), + new BsonDocument('getnonce', new BsonInt32(1)), + new BsonDocument('createUser', new BsonInt32(1)), + new BsonDocument('updateUser', new BsonInt32(1)), + new BsonDocument('copydbgetnonce', new BsonInt32(1)), + new BsonDocument('copydbsaslstart', new BsonInt32(1)), + new BsonDocument('copydb', new BsonInt32(1)), + new BsonDocument('hello', new BsonInt32(1)).append('speculativeAuthenticate', new BsonDocument()), + new BsonDocument(LEGACY_HELLO_LOWER, new BsonInt32(1)).append('speculativeAuthenticate', new BsonDocument()), + new BsonDocument(LEGACY_HELLO, new BsonInt32(1)).append('speculativeAuthenticate', new BsonDocument()) + ] + } + + private static boolean expectException(rcvdCallbck) { + try { + rcvdCallbck.get() + false + } catch (MongoSocketWriteException e) { + true + } + } +} diff --git a/driver-core/src/test/unit/com/mongodb/internal/connection/JMXConnectionPoolListenerSpecification.groovy b/driver-core/src/test/unit/com/mongodb/internal/connection/JMXConnectionPoolListenerSpecification.groovy new file mode 100644 index 00000000000..374687f7d01 --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/internal/connection/JMXConnectionPoolListenerSpecification.groovy @@ -0,0 +1,175 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection + +import com.mongodb.ServerAddress +import com.mongodb.connection.ClusterId +import com.mongodb.connection.ConnectionPoolSettings +import com.mongodb.connection.ServerId +import com.mongodb.internal.inject.SameObjectProvider +import com.mongodb.management.JMXConnectionPoolListener +import spock.lang.Specification +import spock.lang.Subject +import spock.lang.Unroll + +import javax.management.ObjectName +import java.lang.management.ManagementFactory + +import static com.mongodb.ClusterFixture.OPERATION_CONTEXT +import static com.mongodb.ClusterFixture.OPERATION_CONTEXT_FACTORY + +class JMXConnectionPoolListenerSpecification extends Specification { + private static final ServerId SERVER_ID = new ServerId(new ClusterId(), new ServerAddress('host1', 27018)) + + private final connectionFactory = new TestInternalConnectionFactory() + + private provider + + @Subject + private final JMXConnectionPoolListener jmxListener = new JMXConnectionPoolListener() + + def 'statistics should reflect values from the provider'() { + given: + provider = new DefaultConnectionPool(SERVER_ID, connectionFactory, + ConnectionPoolSettings.builder().minSize(0).maxSize(5) + .addConnectionPoolListener(jmxListener).build(), mockSdamProvider(), OPERATION_CONTEXT_FACTORY) + provider.ready() + + when: + provider.get(OPERATION_CONTEXT) + provider.get(OPERATION_CONTEXT).close() + + then: + with(jmxListener.getMBean(SERVER_ID)) { + host == SERVER_ID.address.host + port == SERVER_ID.address.port + minSize == 0 + maxSize == 5 + size == 2 + checkedOutCount == 1 + } + + cleanup: + provider.close() + } + + def 'should add MBean'() { + when: + provider = new DefaultConnectionPool(SERVER_ID, connectionFactory, + ConnectionPoolSettings.builder().minSize(0).maxSize(5) + .addConnectionPoolListener(jmxListener).build(), mockSdamProvider(), OPERATION_CONTEXT_FACTORY) + + then: + ManagementFactory.getPlatformMBeanServer().isRegistered( + new ObjectName(jmxListener.getMBeanObjectName(SERVER_ID))) + + cleanup: + provider.close() + } + + def 'should remove MBean'() { + given: + provider = new DefaultConnectionPool(SERVER_ID, connectionFactory, + ConnectionPoolSettings.builder().minSize(0).maxSize(5) + .addConnectionPoolListener(jmxListener).build(), mockSdamProvider(), OPERATION_CONTEXT_FACTORY) + + when: + provider.close() + + then: + jmxListener.getMBean(SERVER_ID) == null + !ManagementFactory.getPlatformMBeanServer().isRegistered(new ObjectName(jmxListener.getMBeanObjectName(SERVER_ID))) + } + + def 'should create a valid ObjectName for hostname'() { + given: + String beanName = jmxListener.getMBeanObjectName(SERVER_ID) + + when: + ObjectName objectName = new ObjectName(beanName) + + then: + objectName.toString() == "org.mongodb.driver:type=ConnectionPool,clusterId=${SERVER_ID.clusterId.value}," + + "host=${SERVER_ID.address.host},port=${SERVER_ID.address.port}" + } + + def 'should create a valid ObjectName for ipv4 addresses'() { + given: + def serverId = new ServerId(new ClusterId(), new ServerAddress('127.0.0.1')) + String beanName = jmxListener.getMBeanObjectName(serverId) + + when: + ObjectName objectName = new ObjectName(beanName) + + then: + objectName.toString() == "org.mongodb.driver:type=ConnectionPool,clusterId=${serverId.clusterId.value},host=127.0.0.1,port=27017" + } + + def 'should create a valid ObjectName for ipv6 address'() { + given: + def serverId = new ServerId(new ClusterId(), new ServerAddress('[::1]')) + String beanName = jmxListener.getMBeanObjectName(serverId) + + when: + ObjectName objectName = new ObjectName(beanName) + + then: + objectName.toString() == "org.mongodb.driver:type=ConnectionPool,clusterId=${serverId.clusterId.value},host=\"::1\",port=27017" + } + + def 'should include the description in the object name if set'() { + when: + def serverId = new ServerId(new ClusterId(), new ServerAddress()) + + then: + !jmxListener.getMBeanObjectName(serverId).contains('description') + + when: + serverId = new ServerId(new ClusterId('my app server'), new ServerAddress()) + + then: + jmxListener.getMBeanObjectName(serverId).contains('description') + } + + @Unroll + def 'should quote values containing special characters'() { + when: + def clusterId = new ClusterId(clusterIdName, description) + def serverId = new ServerId(clusterId, new ServerAddress(host)) + def objectName = new ObjectName(jmxListener.getMBeanObjectName(serverId)) + + then: + objectName.toString() == "org.mongodb.driver:type=ConnectionPool,clusterId=${expectedClusterIdName},host=${expectedHost}" + + ",port=27017,description=${expectedDescription}" + + where: + clusterIdName | expectedClusterIdName | host | expectedHost | description | expectedDescription + 'cluster Id' | 'cluster Id' | 'host name' | 'host name' | 'client description' | 'client description' + 'cluster,Id' | '"cluster,Id"' | 'host,name' | '"host,name"' | 'client, description' | '"client, description"' + 'cluster:Id' | '"cluster:Id"' | 'hostname' | 'hostname' | 'client: description' | '"client: description"' + 'cluster=Id' | '"cluster=Id"' | 'host=name' | '"host=name"' | 'client= description' | '"client= description"' + 'cluster"Id' | '"cluster\\"Id"' | 'host"name' | '"host\\"name"' | 'client" description' | '"client\\" description"' + 'cluster*Id' | '"cluster\\*Id"' | 'host*name' | '"host\\*name"' | 'client* description' | '"client\\* description"' + 'cluster?Id' | '"cluster\\?Id"' | 'host?name' | '"host\\?name"' | 'client? description' | '"client\\? description"' + 'cluster\\Id' | '"cluster\\\\Id"' | 'host\\name' | '"host\\\\name"' | 'client\\ description' | '"client\\\\ description"' + 'cluster\nId' | '"cluster\\nId"' | 'host\nname' | '"host\\nname"' | 'client\n description' | '"client\\n description"' + } + + private mockSdamProvider() { + SameObjectProvider.initialized(Mock(SdamServerDescriptionManager)) + } +} diff --git a/driver-core/src/test/unit/com/mongodb/internal/connection/LoadBalancedClusterTest.java b/driver-core/src/test/unit/com/mongodb/internal/connection/LoadBalancedClusterTest.java new file mode 100644 index 00000000000..7366a03b584 --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/internal/connection/LoadBalancedClusterTest.java @@ -0,0 +1,597 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection; + +import com.mongodb.MongoClientException; +import com.mongodb.MongoConfigurationException; +import com.mongodb.MongoException; +import com.mongodb.MongoOperationTimeoutException; +import com.mongodb.MongoTimeoutException; +import com.mongodb.ServerAddress; +import com.mongodb.async.FutureResultCallback; +import com.mongodb.connection.ClusterConnectionMode; +import com.mongodb.connection.ClusterId; +import com.mongodb.connection.ClusterSettings; +import com.mongodb.connection.ServerConnectionState; +import com.mongodb.connection.ServerDescription; +import com.mongodb.connection.ServerSettings; +import com.mongodb.connection.ServerType; +import com.mongodb.lang.NonNull; +import com.mongodb.selector.ServerSelector; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.RepeatedTest; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.Test; +import org.mockito.ArgumentCaptor; + +import java.time.Duration; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.Future; +import java.util.concurrent.TimeoutException; +import java.util.concurrent.atomic.AtomicReference; + +import static com.mongodb.ClusterFixture.CLIENT_METADATA; +import static com.mongodb.ClusterFixture.OPERATION_CONTEXT; +import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS; +import static com.mongodb.ClusterFixture.createOperationContext; +import static java.util.concurrent.TimeUnit.MILLISECONDS; +import static java.util.concurrent.TimeUnit.SECONDS; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.atLeastOnce; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +public class LoadBalancedClusterTest { + private LoadBalancedCluster cluster; + + @BeforeEach + public void after() { + if (cluster != null) { + cluster.close(); + } + } + + @Test + public void shouldSelectServerWhenThereIsNoSRVLookup() { + // given + ServerAddress serverAddress = new ServerAddress("host1"); + ClusterableServer expectedServer = mock(ClusterableServer.class); + + ClusterSettings clusterSettings = ClusterSettings.builder() + .mode(ClusterConnectionMode.LOAD_BALANCED) + .hosts(Collections.singletonList(serverAddress)) + .build(); + + ClusterableServerFactory serverFactory = mockServerFactory(serverAddress, expectedServer); + cluster = new LoadBalancedCluster(new ClusterId(), clusterSettings, serverFactory, CLIENT_METADATA, + mock(DnsSrvRecordMonitorFactory.class)); + + // when + ServerTuple serverTuple = cluster.selectServer(mock(ServerSelector.class), OPERATION_CONTEXT); + + // then + assertServerTupleExpectations(serverAddress, expectedServer, serverTuple); + + // when + FutureResultCallback callback = new FutureResultCallback<>(); + cluster.selectServerAsync(mock(ServerSelector.class), OPERATION_CONTEXT, callback); + serverTuple = callback.get(); + + // then + assertServerTupleExpectations(serverAddress, expectedServer, serverTuple); + } + + @Test + public void shouldSelectServerWhenThereIsSRVLookup() { + // given + String srvHostName = "foo.bar.com"; + ServerAddress resolvedServerAddress = new ServerAddress("host1"); + ClusterableServer expectedServer = mock(ClusterableServer.class); + + ClusterSettings clusterSettings = ClusterSettings.builder() + .mode(ClusterConnectionMode.LOAD_BALANCED) + .srvHost(srvHostName) + .build(); + + ClusterableServerFactory serverFactory = mockServerFactory(resolvedServerAddress, expectedServer); + + DnsSrvRecordMonitorFactory dnsSrvRecordMonitorFactory = mock(DnsSrvRecordMonitorFactory.class); + when(dnsSrvRecordMonitorFactory.create(eq(srvHostName), eq(clusterSettings.getSrvServiceName()), any())).thenAnswer( + invocation -> new TestDnsSrvRecordMonitor(invocation.getArgument(2))); + + cluster = new LoadBalancedCluster(new ClusterId(), clusterSettings, serverFactory, CLIENT_METADATA, dnsSrvRecordMonitorFactory); + + // when + ServerTuple serverTuple = cluster.selectServer(mock(ServerSelector.class), OPERATION_CONTEXT); + + // then + assertServerTupleExpectations(resolvedServerAddress, expectedServer, serverTuple); + } + + @Test + public void shouldSelectServerAsynchronouslyWhenThereIsSRVLookup() { + // given + String srvHostName = "foo.bar.com"; + ServerAddress resolvedServerAddress = new ServerAddress("host1"); + ClusterableServer expectedServer = mock(ClusterableServer.class); + + ClusterSettings clusterSettings = ClusterSettings.builder() + .mode(ClusterConnectionMode.LOAD_BALANCED) + .srvHost(srvHostName) + .build(); + + ClusterableServerFactory serverFactory = mockServerFactory(resolvedServerAddress, expectedServer); + + DnsSrvRecordMonitorFactory dnsSrvRecordMonitorFactory = mock(DnsSrvRecordMonitorFactory.class); + when(dnsSrvRecordMonitorFactory.create(eq(srvHostName), eq(clusterSettings.getSrvServiceName()), any())).thenAnswer( + invocation -> new TestDnsSrvRecordMonitor(invocation.getArgument(2))); + + cluster = new LoadBalancedCluster(new ClusterId(), clusterSettings, serverFactory, CLIENT_METADATA, dnsSrvRecordMonitorFactory); + + // when + FutureResultCallback callback = new FutureResultCallback<>(); + cluster.selectServerAsync(mock(ServerSelector.class), OPERATION_CONTEXT, callback); + ServerTuple serverTuple = callback.get(); + + // then + assertServerTupleExpectations(resolvedServerAddress, expectedServer, serverTuple); + } + + @Test + public void shouldFailSelectServerWhenThereIsSRVMisconfiguration() { + // given + String srvHostName = "foo.bar.com"; + ClusterSettings clusterSettings = ClusterSettings.builder() + .mode(ClusterConnectionMode.LOAD_BALANCED) + .srvHost(srvHostName) + .build(); + + ClusterableServerFactory serverFactory = mockServerFactory(); + + DnsSrvRecordMonitorFactory dnsSrvRecordMonitorFactory = mock(DnsSrvRecordMonitorFactory.class); + when(dnsSrvRecordMonitorFactory.create(eq(srvHostName), eq(clusterSettings.getSrvServiceName()), any())).thenAnswer( + invocation -> new TestDnsSrvRecordMonitor(invocation.getArgument(2)) + .hosts(Arrays.asList(new ServerAddress("host1"), new ServerAddress("host2")))); + + cluster = new LoadBalancedCluster(new ClusterId(), clusterSettings, serverFactory, CLIENT_METADATA, dnsSrvRecordMonitorFactory); + + MongoClientException exception = assertThrows(MongoClientException.class, () -> cluster.selectServer(mock(ServerSelector.class), + OPERATION_CONTEXT)); + assertEquals("In load balancing mode, the host must resolve to a single SRV record, but instead it resolved to multiple hosts", + exception.getMessage()); + } + + @Test + public void shouldFailSelectServerAsynchronouslyWhenThereIsSRVMisconfiguration() { + // given + String srvHostName = "foo.bar.com"; + ClusterSettings clusterSettings = ClusterSettings.builder() + .mode(ClusterConnectionMode.LOAD_BALANCED) + .srvHost(srvHostName) + .build(); + + ClusterableServerFactory serverFactory = mockServerFactory(); + + DnsSrvRecordMonitorFactory dnsSrvRecordMonitorFactory = mock(DnsSrvRecordMonitorFactory.class); + when(dnsSrvRecordMonitorFactory.create(eq(srvHostName), eq(clusterSettings.getSrvServiceName()), any())).thenAnswer( + invocation -> new TestDnsSrvRecordMonitor(invocation.getArgument(2)) + .hosts(Arrays.asList(new ServerAddress("host1"), new ServerAddress("host2")))); + + cluster = new LoadBalancedCluster(new ClusterId(), clusterSettings, serverFactory, CLIENT_METADATA, dnsSrvRecordMonitorFactory); + + FutureResultCallback callback = new FutureResultCallback<>(); + cluster.selectServerAsync(mock(ServerSelector.class), OPERATION_CONTEXT, callback); + + MongoClientException exception = assertThrows(MongoClientException.class, callback::get); + assertEquals("In load balancing mode, the host must resolve to a single SRV record, but instead it resolved to multiple hosts", + exception.getMessage()); + } + + @Test + public void shouldTimeoutSelectServerWhenThereIsSRVLookup() { + // given + String srvHostName = "foo.bar.com"; + ServerAddress resolvedServerAddress = new ServerAddress("host1"); + ClusterableServer expectedServer = mock(ClusterableServer.class); + + ClusterSettings clusterSettings = ClusterSettings.builder() + .mode(ClusterConnectionMode.LOAD_BALANCED) + .srvHost(srvHostName) + .build(); + + ClusterableServerFactory serverFactory = mockServerFactory(resolvedServerAddress, expectedServer); + + DnsSrvRecordMonitorFactory dnsSrvRecordMonitorFactory = mock(DnsSrvRecordMonitorFactory.class); + when(dnsSrvRecordMonitorFactory.create(eq(srvHostName), eq(clusterSettings.getSrvServiceName()), any())).thenAnswer( + invocation -> new TestDnsSrvRecordMonitor(invocation.getArgument(2)).sleepTime(Duration.ofHours(1))); + + cluster = new LoadBalancedCluster(new ClusterId(), clusterSettings, serverFactory, CLIENT_METADATA, dnsSrvRecordMonitorFactory); + + MongoTimeoutException exception = assertThrows(MongoTimeoutException.class, () -> cluster.selectServer(mock(ServerSelector.class), + createOperationContext(TIMEOUT_SETTINGS.withServerSelectionTimeoutMS(5)))); + assertTrue(exception.getMessage().contains("while waiting to resolve SRV records for foo.bar.com")); + } + + @Test + public void shouldTimeoutSelectServerWhenThereIsSRVLookupAndTimeoutMsIsSet() { + // given + String srvHostName = "foo.bar.com"; + ServerAddress resolvedServerAddress = new ServerAddress("host1"); + ClusterableServer expectedServer = mock(ClusterableServer.class); + + ClusterSettings clusterSettings = ClusterSettings.builder() + .mode(ClusterConnectionMode.LOAD_BALANCED) + .srvHost(srvHostName) + .build(); + + ClusterableServerFactory serverFactory = mockServerFactory(resolvedServerAddress, expectedServer); + + DnsSrvRecordMonitorFactory dnsSrvRecordMonitorFactory = mock(DnsSrvRecordMonitorFactory.class); + when(dnsSrvRecordMonitorFactory.create(eq(srvHostName), eq(clusterSettings.getSrvServiceName()), any())).thenAnswer( + invocation -> new TestDnsSrvRecordMonitor(invocation.getArgument(2)).sleepTime(Duration.ofHours(1))); + + cluster = new LoadBalancedCluster(new ClusterId(), clusterSettings, serverFactory, CLIENT_METADATA, dnsSrvRecordMonitorFactory); + + //when & then + MongoOperationTimeoutException exception = assertThrows(MongoOperationTimeoutException.class, () -> cluster.selectServer(mock(ServerSelector.class), + createOperationContext(TIMEOUT_SETTINGS.withServerSelectionTimeoutMS(5).withTimeout(10L, MILLISECONDS)))); + assertTrue(exception.getMessage().contains("while waiting to resolve SRV records for foo.bar.com")); + } + + @Test + public void shouldTimeoutSelectServerWhenThereIsSRVLookupException() { + // given + String srvHostName = "foo.bar.com"; + ServerAddress resolvedServerAddress = new ServerAddress("host1"); + ClusterableServer expectedServer = mock(ClusterableServer.class); + + ClusterSettings clusterSettings = ClusterSettings.builder() + .mode(ClusterConnectionMode.LOAD_BALANCED) + .srvHost(srvHostName) + .build(); + + ClusterableServerFactory serverFactory = mockServerFactory(resolvedServerAddress, expectedServer); + + DnsSrvRecordMonitorFactory dnsSrvRecordMonitorFactory = mock(DnsSrvRecordMonitorFactory.class); + when(dnsSrvRecordMonitorFactory.create(eq(srvHostName), eq(clusterSettings.getSrvServiceName()), any())).thenAnswer( + invocation -> new TestDnsSrvRecordMonitor(invocation.getArgument(2)) + .sleepTime(Duration.ofMillis(1)) + .exception(new MongoConfigurationException("Unable to resolve SRV record"))); + cluster = new LoadBalancedCluster(new ClusterId(), clusterSettings, serverFactory, CLIENT_METADATA, dnsSrvRecordMonitorFactory); + + MongoTimeoutException exception = assertThrows(MongoTimeoutException.class, () -> cluster.selectServer(mock(ServerSelector.class), + createOperationContext(TIMEOUT_SETTINGS.withServerSelectionTimeoutMS(10)))); + + assertTrue(exception.getMessage().contains("while waiting to resolve SRV records for foo.bar.com")); + assertTrue(exception.getMessage().contains("Resolution exception was 'com.mongodb.MongoConfigurationException: Unable to resolve SRV record'")); + } + + @Test + public void shouldTimeoutSelectServerAsynchronouslyWhenThereIsSRVLookup() { + // given + String srvHostName = "foo.bar.com"; + ServerAddress resolvedServerAddress = new ServerAddress("host1"); + ClusterableServer expectedServer = mock(ClusterableServer.class); + + ClusterSettings clusterSettings = ClusterSettings + .builder() + .mode(ClusterConnectionMode.LOAD_BALANCED) + .srvHost(srvHostName) + .build(); + + ClusterableServerFactory serverFactory = mockServerFactory(resolvedServerAddress, expectedServer); + + DnsSrvRecordMonitorFactory dnsSrvRecordMonitorFactory = mock(DnsSrvRecordMonitorFactory.class); + when(dnsSrvRecordMonitorFactory.create(eq(srvHostName), eq(clusterSettings.getSrvServiceName()), any())).thenAnswer( + invocation -> new TestDnsSrvRecordMonitor(invocation.getArgument(2)).sleepTime(Duration.ofHours(1))); + + cluster = new LoadBalancedCluster(new ClusterId(), clusterSettings, serverFactory, CLIENT_METADATA, dnsSrvRecordMonitorFactory); + + FutureResultCallback callback = new FutureResultCallback<>(); + cluster.selectServerAsync(mock(ServerSelector.class), + createOperationContext(TIMEOUT_SETTINGS.withServerSelectionTimeoutMS(5)), callback); + + MongoTimeoutException exception = assertThrows(MongoTimeoutException.class, callback::get); + assertTrue(exception.getMessage().contains("while waiting to resolve SRV records for foo.bar.com")); + } + + @Test + public void shouldTimeoutSelectServerAsynchronouslyWhenThereIsSRVLookupException() { + // given + String srvHostName = "foo.bar.com"; + ServerAddress resolvedServerAddress = new ServerAddress("host1"); + ClusterableServer expectedServer = mock(ClusterableServer.class); + + ClusterSettings clusterSettings = ClusterSettings.builder() + .mode(ClusterConnectionMode.LOAD_BALANCED) + .srvHost(srvHostName) + .build(); + + ClusterableServerFactory serverFactory = mockServerFactory(resolvedServerAddress, expectedServer); + + DnsSrvRecordMonitorFactory dnsSrvRecordMonitorFactory = mock(DnsSrvRecordMonitorFactory.class); + when(dnsSrvRecordMonitorFactory.create(eq(srvHostName), eq(clusterSettings.getSrvServiceName()), any())).thenAnswer( + invocation -> new TestDnsSrvRecordMonitor(invocation.getArgument(2)) + .sleepTime(Duration.ofMillis(1)) + .exception(new MongoConfigurationException("Unable to resolve SRV record"))); + cluster = new LoadBalancedCluster(new ClusterId(), clusterSettings, serverFactory, CLIENT_METADATA, dnsSrvRecordMonitorFactory); + + FutureResultCallback callback = new FutureResultCallback<>(); + cluster.selectServerAsync(mock(ServerSelector.class), + createOperationContext(TIMEOUT_SETTINGS.withServerSelectionTimeoutMS(10)), callback); + + MongoTimeoutException exception = assertThrows(MongoTimeoutException.class, callback::get); + assertTrue(exception.getMessage().contains("while waiting to resolve SRV records for foo.bar.com")); + assertTrue(exception.getMessage().contains("Resolution exception was 'com.mongodb.MongoConfigurationException: Unable to resolve SRV record'")); + } + + @Test + void shouldNotInitServerAfterClosing() { + // prepare mocks + ClusterSettings clusterSettings = ClusterSettings.builder().mode(ClusterConnectionMode.LOAD_BALANCED).srvHost("foo.bar.com").build(); + ClusterableServerFactory serverFactory = mock(ClusterableServerFactory.class); + when(serverFactory.getSettings()).thenReturn(mock(ServerSettings.class)); + DnsSrvRecordMonitorFactory srvRecordMonitorFactory = mock(DnsSrvRecordMonitorFactory.class); + when(srvRecordMonitorFactory.create(any(), eq(clusterSettings.getSrvServiceName()), any(DnsSrvRecordInitializer.class))).thenReturn(mock(DnsSrvRecordMonitor.class)); + ArgumentCaptor serverInitializerCaptor = ArgumentCaptor.forClass(DnsSrvRecordInitializer.class); + // create `cluster` and capture its `DnsSrvRecordInitializer` (server initializer) + LoadBalancedCluster cluster = new LoadBalancedCluster(new ClusterId(), clusterSettings, serverFactory, CLIENT_METADATA, srvRecordMonitorFactory); + verify(srvRecordMonitorFactory, times(1)).create(any(), eq(clusterSettings.getSrvServiceName()), serverInitializerCaptor.capture()); + // close `cluster`, call `DnsSrvRecordInitializer.initialize` and check that it does not result in creating a `ClusterableServer` + cluster.close(); + serverInitializerCaptor.getValue().initialize(Collections.singleton(new ServerAddress())); + verify(serverFactory, never()).create(any(), any()); + } + + @Test + void shouldCloseServerWhenClosing() { + // prepare mocks + ClusterableServerFactory serverFactory = mock(ClusterableServerFactory.class); + when(serverFactory.getSettings()).thenReturn(mock(ServerSettings.class)); + ClusterableServer server = mock(ClusterableServer.class); + when(serverFactory.create(any(), any())).thenReturn(server); + // create `cluster` and check that it creates a `ClusterableServer` + LoadBalancedCluster cluster = new LoadBalancedCluster(new ClusterId(), + ClusterSettings.builder().mode(ClusterConnectionMode.LOAD_BALANCED).build(), serverFactory, CLIENT_METADATA, + mock(DnsSrvRecordMonitorFactory.class)); + verify(serverFactory, times(1)).create(any(), any()); + // close `cluster` and check that it closes `server` + cluster.close(); + verify(server, atLeastOnce()).close(); + } + + @RepeatedTest(value = 10, name = RepeatedTest.LONG_DISPLAY_NAME) + @Tag("Slow") + public void synchronousConcurrentTest() throws InterruptedException, ExecutionException, TimeoutException { + String srvHostName = "foo.bar.com"; + ServerAddress resolvedServerAddress = new ServerAddress("host1"); + ClusterableServer expectedServer = mock(ClusterableServer.class); + + ClusterSettings clusterSettings = ClusterSettings.builder() + .mode(ClusterConnectionMode.LOAD_BALANCED) + .srvHost(srvHostName) + .build(); + + ClusterableServerFactory serverFactory = mockServerFactory(resolvedServerAddress, expectedServer); + + Duration srvResolutionTime = Duration.ofSeconds(5); + DnsSrvRecordMonitorFactory dnsSrvRecordMonitorFactory = mock(DnsSrvRecordMonitorFactory.class); + when(dnsSrvRecordMonitorFactory.create(eq(srvHostName), eq(clusterSettings.getSrvServiceName()), any())).thenAnswer( + invocation -> new TestDnsSrvRecordMonitor(invocation.getArgument(2)).sleepTime(srvResolutionTime)); + cluster = new LoadBalancedCluster(new ClusterId(), clusterSettings, serverFactory, CLIENT_METADATA, dnsSrvRecordMonitorFactory); + + int numThreads = 100; + ExecutorService executorService = Executors.newFixedThreadPool(numThreads); + List> futures = new ArrayList<>(numThreads); + for (int i = 0; i < numThreads; i++) { + futures.add(executorService.submit(() -> { + boolean success = false; + while (!success) { + try { + cluster.selectServer(mock(ServerSelector.class), + createOperationContext(TIMEOUT_SETTINGS.withServerSelectionTimeoutMS(5))); + success = true; + } catch (MongoTimeoutException e) { + // this is expected + } + } + // Keep going for a little while + for (int j = 0; j < 100; j++) { + cluster.selectServer(mock(ServerSelector.class), + createOperationContext(TIMEOUT_SETTINGS.withServerSelectionTimeoutMS(5))); + } + })); + } + + for (Future future : futures) { + future.get(10, SECONDS); + } + + executorService.shutdownNow(); + } + + @RepeatedTest(value = 10, name = RepeatedTest.LONG_DISPLAY_NAME) + @Tag("Slow") + public void asynchronousConcurrentTest() throws InterruptedException, ExecutionException, TimeoutException { + String srvHostName = "foo.bar.com"; + ServerAddress resolvedServerAddress = new ServerAddress("host1"); + ClusterableServer expectedServer = mock(ClusterableServer.class); + + ClusterSettings clusterSettings = ClusterSettings.builder() + .mode(ClusterConnectionMode.LOAD_BALANCED) + .srvHost(srvHostName) + .build(); + + ClusterableServerFactory serverFactory = mockServerFactory(resolvedServerAddress, expectedServer); + + Duration srvResolutionTime = Duration.ofSeconds(5); + DnsSrvRecordMonitorFactory dnsSrvRecordMonitorFactory = mock(DnsSrvRecordMonitorFactory.class); + AtomicReference dnsSrvRecordMonitorReference = new AtomicReference<>(); + when(dnsSrvRecordMonitorFactory.create(eq(srvHostName), eq(clusterSettings.getSrvServiceName()), any())).thenAnswer( + invocation -> { + TestDnsSrvRecordMonitor dnsSrvRecordMonitor = new TestDnsSrvRecordMonitor(invocation.getArgument(2)) + .sleepTime(srvResolutionTime); + dnsSrvRecordMonitorReference.set(dnsSrvRecordMonitor); + return dnsSrvRecordMonitor; + }); + cluster = new LoadBalancedCluster(new ClusterId(), clusterSettings, serverFactory, CLIENT_METADATA, dnsSrvRecordMonitorFactory); + + int numThreads = 10; + List>> callbacksList = new ArrayList<>(numThreads); + ExecutorService executorService = Executors.newFixedThreadPool(numThreads); + List> futures = new ArrayList<>(numThreads); + for (int i = 0; i < numThreads; i++) { + List> callbacks = new ArrayList<>(); + callbacksList.add(callbacks); + futures.add(executorService.submit(() -> { + while (!dnsSrvRecordMonitorReference.get().isInitialized()) { + FutureResultCallback callback = new FutureResultCallback<>(); + callbacks.add(callback); + cluster.selectServerAsync(mock(ServerSelector.class), + createOperationContext(TIMEOUT_SETTINGS.withServerSelectionTimeoutMS(5)), callback); + } + // Keep going for a little while + for (int j = 0; j < 100; j++) { + FutureResultCallback callback = new FutureResultCallback<>(); + callbacks.add(callback); + cluster.selectServerAsync(mock(ServerSelector.class), + createOperationContext(TIMEOUT_SETTINGS.withServerSelectionTimeoutMS(5)), callback); + } + })); + } + + for (Future future : futures) { + future.get(10, SECONDS); + } + + executorService.shutdownNow(); + + for (List> callbacks : callbacksList) { + boolean foundFirstNonExceptionResult = false; + for (FutureResultCallback curCallback : callbacks) { + assertFalse(curCallback.wasInvokedMultipleTimes()); + assertTrue(curCallback.isDone()); + if (!curCallback.isCompletedExceptionally()) { + foundFirstNonExceptionResult = true; + } + if (foundFirstNonExceptionResult) { + assertFalse(curCallback.isCompletedExceptionally()); + } + } + } + } + + private void assertServerTupleExpectations(final ServerAddress serverAddress, final ClusterableServer expectedServer, + final ServerTuple serverTuple) { + assertEquals(expectedServer, serverTuple.getServer()); + // Can't just use assertEquals here because the equals method compares lastUpdateTimeNanos property, which won't ever be the same + ServerDescription serverDescription = serverTuple.getServerDescription(); + assertTrue(serverDescription.isOk()); + assertEquals(ServerConnectionState.CONNECTED, serverDescription.getState()); + assertEquals(serverAddress, serverDescription.getAddress()); + assertEquals(ServerType.LOAD_BALANCER, serverDescription.getType()); + } + + @NonNull + private ClusterableServerFactory mockServerFactory(final ServerAddress serverAddress, final ClusterableServer expectedServer) { + ClusterableServerFactory serverFactory = mock(ClusterableServerFactory.class); + when(serverFactory.getSettings()).thenReturn(ServerSettings.builder().build()); + when(serverFactory.create(any(), eq(serverAddress))).thenReturn(expectedServer); + return serverFactory; + } + + @NonNull + private ClusterableServerFactory mockServerFactory() { + ClusterableServerFactory serverFactory = mock(ClusterableServerFactory.class); + when(serverFactory.getSettings()).thenReturn(ServerSettings.builder().build()); + return serverFactory; + } + + private static class TestDnsSrvRecordMonitor implements DnsSrvRecordMonitor { + private final DnsSrvRecordInitializer initializer; + private Duration sleepTime; + private Thread thread; + private Collection hosts; + private MongoException exception; + private volatile boolean initialized; + + TestDnsSrvRecordMonitor(final DnsSrvRecordInitializer initializer) { + this.initializer = initializer; + sleepTime = Duration.ofMillis(50); + hosts = Collections.singletonList(new ServerAddress("host1")); + } + + TestDnsSrvRecordMonitor sleepTime(final Duration sleepTime) { + this.sleepTime = sleepTime; + return this; + } + + TestDnsSrvRecordMonitor hosts(final Collection hosts) { + this.hosts = hosts; + return this; + } + + public TestDnsSrvRecordMonitor exception(final MongoException exception) { + this.exception = exception; + return this; + } + + public boolean isInitialized() { + return initialized; + } + + @Override + public void start() { + thread = new Thread(() -> { + try { + Thread.sleep(sleepTime.toMillis()); + if (exception != null) { + initializer.initialize(exception); + } else { + initializer.initialize(hosts); + } + initialized = true; + } catch (InterruptedException e) { + // ignore + } + }); + thread.start(); + } + + @Override + public void close() { + if (thread != null) { + thread.interrupt(); + } + } + } +} diff --git a/driver-core/src/test/unit/com/mongodb/internal/connection/LoggingCommandEventSenderSpecification.groovy b/driver-core/src/test/unit/com/mongodb/internal/connection/LoggingCommandEventSenderSpecification.groovy new file mode 100644 index 00000000000..6aa30aa4aa6 --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/internal/connection/LoggingCommandEventSenderSpecification.groovy @@ -0,0 +1,217 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection + +import com.mongodb.LoggerSettings +import com.mongodb.MongoInternalException +import com.mongodb.ReadPreference +import com.mongodb.ServerAddress +import com.mongodb.connection.ClusterId +import com.mongodb.connection.ConnectionDescription +import com.mongodb.connection.ConnectionId +import com.mongodb.connection.ServerId +import com.mongodb.event.CommandFailedEvent +import com.mongodb.event.CommandListener +import com.mongodb.event.CommandStartedEvent +import com.mongodb.event.CommandSucceededEvent +import com.mongodb.internal.IgnorableRequestContext +import com.mongodb.internal.TimeoutContext +import com.mongodb.internal.diagnostics.logging.Logger +import com.mongodb.internal.logging.StructuredLogger +import com.mongodb.internal.validator.NoOpFieldNameValidator +import org.bson.BsonBinary +import org.bson.BsonDocument +import org.bson.BsonInt32 +import org.bson.BsonString +import spock.lang.Specification + +import static com.mongodb.ClusterFixture.OPERATION_CONTEXT +import static com.mongodb.connection.ClusterConnectionMode.MULTIPLE +import static com.mongodb.connection.ClusterConnectionMode.SINGLE +import static com.mongodb.internal.operation.ServerVersionHelper.LATEST_WIRE_VERSION + +class LoggingCommandEventSenderSpecification extends Specification { + + def 'should send events'() { + given: + def connectionDescription = new ConnectionDescription(new ServerId(new ClusterId(), new ServerAddress())) + def database = 'test' + def messageSettings = MessageSettings.builder().maxWireVersion(LATEST_WIRE_VERSION).build() + def commandListener = new TestCommandListener() + def commandDocument = new BsonDocument('ping', new BsonInt32(1)) + def replyDocument = new BsonDocument('ok', new BsonInt32(1)) + def failureException = new MongoInternalException('failure!') + def message = new CommandMessage(database, commandDocument, + NoOpFieldNameValidator.INSTANCE, ReadPreference.primary(), messageSettings, MULTIPLE, null) + def bsonOutput = new ByteBufferBsonOutput(new SimpleBufferProvider()) + message.encode(bsonOutput, new OperationContext(IgnorableRequestContext.INSTANCE, NoOpSessionContext.INSTANCE, + Stub(TimeoutContext), null)) + def logger = Stub(Logger) { + isDebugEnabled() >> debugLoggingEnabled + } + def operationContext = OPERATION_CONTEXT + def sender = new LoggingCommandEventSender([] as Set, [] as Set, connectionDescription, commandListener, + operationContext, message, bsonOutput, new StructuredLogger(logger), LoggerSettings.builder().build()) + + when: + sender.sendStartedEvent() + sender.sendSucceededEventForOneWayCommand() + sender.sendSucceededEvent(MessageHelper.buildSuccessfulReply(message.getId(), replyDocument.toJson())) + sender.sendFailedEvent(failureException) + + then: + commandListener.eventsWereDelivered([ + new CommandStartedEvent(null, operationContext.id, message.getId(), connectionDescription, + database, commandDocument.getFirstKey(), + commandDocument.append('$db', new BsonString(database))), + new CommandSucceededEvent(null, operationContext.id, message.getId(), connectionDescription, + database, commandDocument.getFirstKey(), new BsonDocument(), 1), + new CommandSucceededEvent(null, operationContext.id, message.getId(), connectionDescription, + database, commandDocument.getFirstKey(), replyDocument, 1), + new CommandFailedEvent(null, operationContext.id, message.getId(), connectionDescription, + database, commandDocument.getFirstKey(), 1, failureException) + ]) + + where: + debugLoggingEnabled << [true, false] + } + + def 'should log events'() { + given: + def serverId = new ServerId(new ClusterId(), new ServerAddress()) + def connectionDescription = new ConnectionDescription(serverId) + .withConnectionId(new ConnectionId(serverId, 42, 1000)) + def database = 'test' + def messageSettings = MessageSettings.builder().maxWireVersion(LATEST_WIRE_VERSION).build() + def commandDocument = new BsonDocument('ping', new BsonInt32(1)) + def replyDocument = new BsonDocument('ok', new BsonInt32(42)) + def failureException = new MongoInternalException('failure!') + def message = new CommandMessage(database, commandDocument, NoOpFieldNameValidator.INSTANCE, ReadPreference.primary(), + messageSettings, MULTIPLE, null) + def bsonOutput = new ByteBufferBsonOutput(new SimpleBufferProvider()) + message.encode(bsonOutput, new OperationContext(IgnorableRequestContext.INSTANCE, NoOpSessionContext.INSTANCE, + Stub(TimeoutContext), null)) + def logger = Mock(Logger) { + isDebugEnabled() >> true + } + def operationContext = OPERATION_CONTEXT + def sender = new LoggingCommandEventSender([] as Set, [] as Set, connectionDescription, commandListener, + operationContext, message, bsonOutput, new StructuredLogger(logger), + LoggerSettings.builder().build()) + when: + sender.sendStartedEvent() + sender.sendSucceededEventForOneWayCommand() + sender.sendSucceededEvent(MessageHelper.buildSuccessfulReply(message.getId(), replyDocument.toJson())) + sender.sendFailedEvent(failureException) + + then: + 1 * logger.debug { + it == "Command \"ping\" started on database \"test\" using a connection with driver-generated ID " + + "${connectionDescription.connectionId.localValue} and server-generated ID " + + "${connectionDescription.connectionId.serverValue} to 127.0.0.1:27017. The " + + "request ID is ${message.getId()} and the operation ID is ${operationContext.getId()}. " + + "Command: {\"ping\": 1, " + "\"\$db\": \"test\"}" + } + 1 * logger.debug { + it.matches("Command \"ping\" succeeded on database \"test\" in \\d+\\.\\d+ ms using a connection with driver-generated ID " + + "${connectionDescription.connectionId.localValue} and server-generated ID " + + "${connectionDescription.connectionId.serverValue} to 127.0.0.1:27017. The " + + "request ID is ${message.getId()} and the operation ID is ${operationContext.getId()}. Command reply: \\{\"ok\": 1}") + } + 1 * logger.debug { + it.matches("Command \"ping\" succeeded on database \"test\" in \\d+\\.\\d+ ms using a connection with driver-generated ID " + + "${connectionDescription.connectionId.localValue} and server-generated ID " + + "${connectionDescription.connectionId.serverValue} to 127.0.0.1:27017. The " + + "request ID is ${message.getId()} and the operation ID is ${operationContext.getId()}. Command reply: \\{\"ok\": 42}") + } + 1 * logger.debug({ + it.matches("Command \"ping\" failed on database \"test\" in \\d+\\.\\d+ ms using a connection with driver-generated ID " + + "${connectionDescription.connectionId.localValue} and server-generated ID " + + "${connectionDescription.connectionId.serverValue} to 127.0.0.1:27017. The " + + "request ID is ${message.getId()} and the operation ID is ${operationContext.getId()}.") + }, failureException) + + where: + commandListener << [null, Stub(CommandListener)] + } + + def 'should log large command with ellipses'() { + given: + def serverId = new ServerId(new ClusterId(), new ServerAddress()) + def connectionDescription = new ConnectionDescription(serverId) + .withConnectionId(new ConnectionId(serverId, 42, 1000)) + def database = 'test' + def messageSettings = MessageSettings.builder().maxWireVersion(LATEST_WIRE_VERSION).build() + def commandDocument = new BsonDocument('fake', new BsonBinary(new byte[2048])) + def message = new CommandMessage(database, commandDocument, NoOpFieldNameValidator.INSTANCE, ReadPreference.primary(), + messageSettings, SINGLE, null) + def bsonOutput = new ByteBufferBsonOutput(new SimpleBufferProvider()) + message.encode(bsonOutput, new OperationContext(IgnorableRequestContext.INSTANCE, NoOpSessionContext.INSTANCE, + Stub(TimeoutContext), null)) + def logger = Mock(Logger) { + isDebugEnabled() >> true + } + def operationContext = OPERATION_CONTEXT + + def sender = new LoggingCommandEventSender([] as Set, [] as Set, connectionDescription, null, operationContext, + message, bsonOutput, new StructuredLogger(logger), LoggerSettings.builder().build()) + + when: + sender.sendStartedEvent() + + then: + 1 * logger.debug { + it == "Command \"fake\" started on database \"test\" using a connection with driver-generated ID " + + "${connectionDescription.connectionId.localValue} and server-generated ID " + + "${connectionDescription.connectionId.serverValue} to 127.0.0.1:27017. The " + + "request ID is ${message.getId()} and the operation ID is ${operationContext.getId()}. " + + "Command: {\"fake\": {\"\$binary\": {\"base64\": \"${'A' * 967} ..." + } + } + + def 'should log redacted command with ellipses'() { + given: + def serverId = new ServerId(new ClusterId(), new ServerAddress()) + def connectionDescription = new ConnectionDescription(serverId) + .withConnectionId(new ConnectionId(serverId, 42, 1000)) + def database = 'test' + def messageSettings = MessageSettings.builder().maxWireVersion(LATEST_WIRE_VERSION).build() + def commandDocument = new BsonDocument('createUser', new BsonString('private')) + def message = new CommandMessage(database, commandDocument, NoOpFieldNameValidator.INSTANCE, ReadPreference.primary(), + messageSettings, SINGLE, null) + def bsonOutput = new ByteBufferBsonOutput(new SimpleBufferProvider()) + message.encode(bsonOutput, new OperationContext(IgnorableRequestContext.INSTANCE, NoOpSessionContext.INSTANCE, + Stub(TimeoutContext), null)) + def logger = Mock(Logger) { + isDebugEnabled() >> true + } + def operationContext = OPERATION_CONTEXT + def sender = new LoggingCommandEventSender(['createUser'] as Set, [] as Set, connectionDescription, null, + operationContext, message, bsonOutput, new StructuredLogger(logger), LoggerSettings.builder().build()) + + when: + sender.sendStartedEvent() + + then: + 1 * logger.debug { + it == "Command \"createUser\" started on database \"test\" using a connection with driver-generated ID " + + "${connectionDescription.connectionId.localValue} and server-generated ID " + + "${connectionDescription.connectionId.serverValue} to 127.0.0.1:27017. The " + + "request ID is ${message.getId()} and the operation ID is ${operationContext.getId()}. Command: {}" + } + } +} diff --git a/driver-core/src/test/unit/com/mongodb/internal/connection/MessageHelper.java b/driver-core/src/test/unit/com/mongodb/internal/connection/MessageHelper.java new file mode 100644 index 00000000000..2ef3c59cb95 --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/internal/connection/MessageHelper.java @@ -0,0 +1,129 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection; + +import org.bson.BsonBinaryReader; +import org.bson.BsonBinaryWriter; +import org.bson.BsonDocument; +import org.bson.ByteBuf; +import org.bson.ByteBufNIO; +import org.bson.codecs.BsonDocumentCodec; +import org.bson.codecs.DecoderContext; +import org.bson.codecs.EncoderContext; +import org.bson.io.BasicOutputBuffer; +import org.bson.io.BsonInput; +import org.bson.io.OutputBuffer; +import org.bson.json.JsonReader; + +import java.nio.Buffer; +import java.nio.ByteBuffer; +import java.nio.ByteOrder; +import java.util.Locale; + +import static com.mongodb.ClusterFixture.getServerApi; +import static com.mongodb.connection.ConnectionDescription.getDefaultMaxMessageSize; +import static org.junit.Assert.fail; + +final class MessageHelper { + + private MessageHelper() { + } + + public static final String LEGACY_HELLO = "isMaster"; + public static final String LEGACY_HELLO_LOWER = LEGACY_HELLO.toLowerCase(Locale.ROOT); + + + public static ResponseBuffers buildSuccessfulReply(final String json) { + return buildSuccessfulReply(0, json); + } + + public static ResponseBuffers buildSuccessfulReply(final int responseTo, final String json) { + return buildReply(responseTo, json, 0); + } + + public static ResponseBuffers buildReply(final int responseTo, final String json, final int responseFlags) { + ByteBuf body = encodeJson(json); + body.flip(); + + ReplyHeader header = buildReplyHeader(responseTo, body.remaining(), responseFlags); + return new ResponseBuffers(header, body); + } + + private static ReplyHeader buildReplyHeader(final int responseTo, final int documentsSize, final int responseFlags) { + ByteBuffer headerByteBuffer = ByteBuffer.allocate(36); + headerByteBuffer.order(ByteOrder.LITTLE_ENDIAN); + headerByteBuffer.putInt(36 + documentsSize); // length + headerByteBuffer.putInt(2456); //request id + headerByteBuffer.putInt(responseTo); // response to + headerByteBuffer.putInt(1); // opcode + headerByteBuffer.putInt(responseFlags); // responseFlags + headerByteBuffer.putLong(0); // cursorId + headerByteBuffer.putInt(0); // startingFrom + headerByteBuffer.putInt(1); //numberReturned + ((Buffer) headerByteBuffer).flip(); + + ByteBufNIO buffer = new ByteBufNIO(headerByteBuffer); + return new ReplyHeader(buffer, new MessageHeader(buffer, getDefaultMaxMessageSize())); + } + + public static BsonDocument decodeCommand(final BsonInput bsonInput) { + bsonInput.readInt32(); // length + bsonInput.readInt32(); // requestId + bsonInput.readInt32(); // responseTo + int opCode = bsonInput.readInt32(); + + if (opCode == 2004) { // OP_QUERY + bsonInput.readInt32(); // flags + bsonInput.readCString(); // collectionName + bsonInput.readInt32(); // numToSkip + bsonInput.readInt32(); // numToReturn + } else if (opCode == 2013) { // OP_MSG + bsonInput.readInt32(); // flags + bsonInput.readByte(); // kind + } else { + fail("Unexpected opcode " + opCode); + } + + BsonBinaryReader reader = new BsonBinaryReader(bsonInput); + return new BsonDocumentCodec().decode(reader, DecoderContext.builder().build()); + } + + public static String decodeCommandAsJson(final BsonInput bsonInput) { + return decodeCommand(bsonInput).toJson(); + } + + public static String getApiVersionField() { + return getServerApi() == null ? "" : ", \"apiVersion\": \"" + getServerApi().getVersion().getValue() + "\""; + } + + public static String getDbField(final String databaseName) { + return ", \"$db\": \"" + databaseName + "\""; + } + + private static ByteBuf encodeJson(final String json) { + OutputBuffer outputBuffer = new BasicOutputBuffer(); + JsonReader jsonReader = new JsonReader(json); + BsonDocumentCodec codec = new BsonDocumentCodec(); + BsonDocument document = codec.decode(jsonReader, DecoderContext.builder().build()); + BsonBinaryWriter writer = new BsonBinaryWriter(outputBuffer); + codec.encode(writer, document, EncoderContext.builder().build()); + + ByteBuffer documentByteBuffer = ByteBuffer.allocate(outputBuffer.size()); + documentByteBuffer.put(outputBuffer.toByteArray()); + return new ByteBufNIO(documentByteBuffer); + } +} diff --git a/driver-core/src/test/unit/com/mongodb/internal/connection/MultiServerClusterSpecification.groovy b/driver-core/src/test/unit/com/mongodb/internal/connection/MultiServerClusterSpecification.groovy new file mode 100644 index 00000000000..a3cf8104fd3 --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/internal/connection/MultiServerClusterSpecification.groovy @@ -0,0 +1,526 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection + +import com.mongodb.ServerAddress +import com.mongodb.connection.ClusterDescription +import com.mongodb.connection.ClusterId +import com.mongodb.connection.ClusterSettings +import com.mongodb.connection.ClusterType +import com.mongodb.connection.ServerDescription +import com.mongodb.connection.ServerType +import com.mongodb.event.ClusterListener +import com.mongodb.internal.selector.WritableServerSelector +import org.bson.types.ObjectId +import spock.lang.Specification + +import static com.mongodb.ClusterFixture.CLIENT_METADATA +import static com.mongodb.ClusterFixture.OPERATION_CONTEXT +import static com.mongodb.connection.ClusterConnectionMode.MULTIPLE +import static com.mongodb.connection.ClusterType.REPLICA_SET +import static com.mongodb.connection.ClusterType.SHARDED +import static com.mongodb.connection.ClusterType.UNKNOWN +import static com.mongodb.connection.ServerConnectionState.CONNECTED +import static com.mongodb.connection.ServerConnectionState.CONNECTING +import static com.mongodb.connection.ServerDescription.MAX_DRIVER_WIRE_VERSION +import static com.mongodb.connection.ServerType.REPLICA_SET_GHOST +import static com.mongodb.connection.ServerType.REPLICA_SET_PRIMARY +import static com.mongodb.connection.ServerType.REPLICA_SET_SECONDARY +import static com.mongodb.connection.ServerType.SHARD_ROUTER +import static com.mongodb.connection.ServerType.STANDALONE +import static com.mongodb.internal.connection.ClusterDescriptionHelper.getAll +import static com.mongodb.internal.connection.ClusterDescriptionHelper.getByServerAddress +import static java.util.concurrent.TimeUnit.MILLISECONDS + +class MultiServerClusterSpecification extends Specification { + private static final ClusterId CLUSTER_ID = new ClusterId() + + private final ServerAddress firstServer = new ServerAddress('localhost:27017') + private final ServerAddress secondServer = new ServerAddress('localhost:27018') + private final ServerAddress thirdServer = new ServerAddress('localhost:27019') + + private final TestClusterableServerFactory factory = new TestClusterableServerFactory() + + def setup() { + Time.makeTimeConstant() + } + + def cleanup() { + Time.makeTimeMove() + } + + def 'should include settings in cluster description'() { + given: + def cluster = new MultiServerCluster(CLUSTER_ID, ClusterSettings.builder().mode(MULTIPLE) + .serverSelectionTimeout(1, MILLISECONDS) + .hosts([firstServer]).build(), factory, CLIENT_METADATA) + sendNotification(firstServer, REPLICA_SET_PRIMARY) + + expect: + cluster.getCurrentDescription().clusterSettings != null + cluster.getCurrentDescription().serverSettings != null + } + + def 'should correct report description when connected to a primary'() { + given: + def cluster = new MultiServerCluster(CLUSTER_ID, ClusterSettings.builder().mode(MULTIPLE).hosts([firstServer]).build(), + factory, CLIENT_METADATA) + + when: + sendNotification(firstServer, REPLICA_SET_PRIMARY) + + then: + cluster.getCurrentDescription().type == REPLICA_SET + cluster.getCurrentDescription().connectionMode == MULTIPLE + } + + def 'should not get servers snapshot when closed'() { + given: + def cluster = new MultiServerCluster(CLUSTER_ID, ClusterSettings.builder().hosts(Arrays.asList(firstServer)).mode(MULTIPLE).build(), + factory, CLIENT_METADATA) + cluster.close() + + when: + cluster.getServersSnapshot( + OPERATION_CONTEXT.getTimeoutContext().computeServerSelectionTimeout(), + OPERATION_CONTEXT.getTimeoutContext()) + + then: + thrown(IllegalStateException) + } + + def 'should discover all hosts in the cluster when notified by the primary'() { + given: + def cluster = new MultiServerCluster(CLUSTER_ID, ClusterSettings.builder().mode(MULTIPLE).hosts([firstServer]).build(), + factory, CLIENT_METADATA) + + when: + factory.sendNotification(firstServer, REPLICA_SET_PRIMARY, [firstServer, secondServer, thirdServer]) + + then: + getAll(cluster.getCurrentDescription()) == factory.getDescriptions(firstServer, secondServer, thirdServer) + } + + def 'should discover all hosts in the cluster when notified by a secondary and there is no primary'() { + given: + def cluster = new MultiServerCluster(CLUSTER_ID, ClusterSettings.builder().mode(MULTIPLE).hosts([firstServer]).build(), + factory, CLIENT_METADATA) + + when: + factory.sendNotification(firstServer, REPLICA_SET_SECONDARY, [firstServer, secondServer, thirdServer]) + + then: + getAll(cluster.getCurrentDescription()) == factory.getDescriptions(firstServer, secondServer, thirdServer) + } + + def 'should discover all passives in the cluster'() { + given: + def cluster = new MultiServerCluster(CLUSTER_ID, ClusterSettings.builder().mode(MULTIPLE).hosts([firstServer]).build(), + factory, CLIENT_METADATA) + + when: + factory.sendNotification(firstServer, REPLICA_SET_PRIMARY, [firstServer], [secondServer, thirdServer]) + + then: + getAll(cluster.getCurrentDescription()) == factory.getDescriptions(firstServer, secondServer, thirdServer) + } + + def 'should remove a secondary server whose reported host name does not match the address connected to'() { + given: + def seedListAddress = new ServerAddress('127.0.0.1:27017') + def cluster = new MultiServerCluster(CLUSTER_ID, ClusterSettings.builder().hosts([seedListAddress]).mode(MULTIPLE).build(), + factory, CLIENT_METADATA) + + when: + factory.sendNotification(seedListAddress, REPLICA_SET_SECONDARY, [firstServer, secondServer], firstServer) + + then: + getAll(cluster.getCurrentDescription()) == factory.getDescriptions(firstServer, secondServer) + } + + def 'should remove a primary server whose reported host name does not match the address connected to'() { + given: + def seedListAddress = new ServerAddress('127.0.0.1:27017') + def cluster = new MultiServerCluster(CLUSTER_ID, + ClusterSettings.builder().hosts([seedListAddress]).mode(MULTIPLE).build(), factory, CLIENT_METADATA) + + when: + factory.sendNotification(seedListAddress, REPLICA_SET_PRIMARY, [firstServer, secondServer], firstServer) + + then: + getAll(cluster.getCurrentDescription()) == factory.getDescriptions(firstServer, secondServer) + } + + def 'should remove a server when it no longer appears in hosts reported by the primary'() { + given: + def cluster = new MultiServerCluster(CLUSTER_ID, + ClusterSettings.builder().hosts([firstServer, secondServer, thirdServer]).build(), factory, CLIENT_METADATA) + sendNotification(firstServer, REPLICA_SET_PRIMARY) + sendNotification(secondServer, REPLICA_SET_SECONDARY) + sendNotification(thirdServer, REPLICA_SET_SECONDARY) + + when: + factory.sendNotification(firstServer, REPLICA_SET_PRIMARY, [firstServer, secondServer]) + + then: + getAll(cluster.getCurrentDescription()) == factory.getDescriptions(firstServer, secondServer) + factory.getServer(thirdServer).isClosed() + } + + def 'should remove a server of the wrong type when type is replica set'() { + given: + def cluster = new MultiServerCluster( + CLUSTER_ID, ClusterSettings.builder().requiredClusterType(REPLICA_SET).hosts([firstServer, secondServer]).build(), + factory, CLIENT_METADATA) + + when: + sendNotification(secondServer, SHARD_ROUTER) + + then: + cluster.getCurrentDescription().type == REPLICA_SET + getAll(cluster.getCurrentDescription()) == factory.getDescriptions(firstServer) + } + + def 'should ignore an empty list of hosts when type is replica set'() { + given: + def cluster = new MultiServerCluster( + CLUSTER_ID, ClusterSettings.builder().requiredClusterType(REPLICA_SET).hosts([firstServer, secondServer]).build(), + factory, CLIENT_METADATA) + + when: + factory.sendNotification(secondServer, REPLICA_SET_GHOST, []) + + then: + cluster.getCurrentDescription().type == REPLICA_SET + getAll(cluster.getCurrentDescription()) == factory.getDescriptions(firstServer, secondServer) + getByServerAddress(cluster.getCurrentDescription(), secondServer).getType() == REPLICA_SET_GHOST + } + + def 'should ignore a host without a replica set name when type is replica set'() { + given: + def cluster = new MultiServerCluster( + CLUSTER_ID, ClusterSettings.builder().requiredClusterType(REPLICA_SET).hosts([firstServer, secondServer]).build(), + factory, CLIENT_METADATA) + + when: + factory.sendNotification(secondServer, REPLICA_SET_GHOST, [firstServer, secondServer], (String) null) // null replica set name + + then: + cluster.getCurrentDescription().type == REPLICA_SET + getAll(cluster.getCurrentDescription()) == factory.getDescriptions(firstServer, secondServer) + getByServerAddress(cluster.getCurrentDescription(), secondServer).getType() == REPLICA_SET_GHOST + } + + def 'should remove a server of the wrong type when type is sharded'() { + given: + def cluster = new MultiServerCluster( + CLUSTER_ID, ClusterSettings.builder().requiredClusterType(SHARDED).hosts([firstServer, secondServer]).build(), + factory, CLIENT_METADATA) + sendNotification(firstServer, SHARD_ROUTER) + + when: + sendNotification(secondServer, REPLICA_SET_PRIMARY) + + then: + cluster.getCurrentDescription().type == SHARDED + getAll(cluster.getCurrentDescription()) == factory.getDescriptions(firstServer) + } + + def 'should remove a server of wrong type from discovered replica set'() { + given: + def cluster = new MultiServerCluster(CLUSTER_ID, + ClusterSettings.builder().mode(MULTIPLE).hosts([firstServer, secondServer]).build(), factory, CLIENT_METADATA) + sendNotification(firstServer, REPLICA_SET_PRIMARY) + + when: + sendNotification(secondServer, STANDALONE) + + then: + cluster.getCurrentDescription().type == REPLICA_SET + getAll(cluster.getCurrentDescription()) == factory.getDescriptions(firstServer, thirdServer) + } + + def 'should not set cluster type when connected to a standalone when seed list size is greater than one'() { + given: + def cluster = new MultiServerCluster(CLUSTER_ID, + ClusterSettings.builder() + .serverSelectionTimeout(1, MILLISECONDS) + .mode(MULTIPLE).hosts([firstServer, secondServer]).build(), + factory, CLIENT_METADATA) + + when: + sendNotification(firstServer, STANDALONE) + + then: + cluster.getCurrentDescription().getType() == UNKNOWN + } + + def 'should not set cluster type when connected to a replica set ghost until a valid replica set member connects'() { + given: + def cluster = new MultiServerCluster(CLUSTER_ID, + ClusterSettings.builder() + .serverSelectionTimeout(1, MILLISECONDS) + .mode(MULTIPLE).hosts([firstServer, secondServer]).build(), + factory, CLIENT_METADATA) + + when: + sendNotification(firstServer, REPLICA_SET_GHOST) + + then: + cluster.getCurrentDescription().getType() == UNKNOWN + + when: + sendNotification(secondServer, REPLICA_SET_PRIMARY) + + then: + cluster.getCurrentDescription().type == REPLICA_SET + getAll(cluster.getCurrentDescription()) == factory.getDescriptions(firstServer, secondServer, thirdServer) + } + + def 'should invalidate existing primary when a new primary notifies'() { + given: + def cluster = new MultiServerCluster(CLUSTER_ID, ClusterSettings.builder().hosts([firstServer, secondServer]).build(), + factory, CLIENT_METADATA) + sendNotification(firstServer, REPLICA_SET_PRIMARY) + + when: + sendNotification(secondServer, REPLICA_SET_PRIMARY) + + then: + factory.getDescription(firstServer).state == CONNECTING + getAll(cluster.getCurrentDescription()) == factory.getDescriptions(firstServer, secondServer, thirdServer) + } + + def 'should invalidate new primary if its electionId is less than the previously reported electionId'() { + given: + def cluster = new MultiServerCluster(CLUSTER_ID, ClusterSettings.builder().hosts([firstServer, secondServer]).build(), + factory, CLIENT_METADATA) + + def electionId = new ObjectId(new Date(1000)) + factory.sendNotification(firstServer, REPLICA_SET_PRIMARY, [firstServer, secondServer, thirdServer], electionId) + + when: + def outdatedElectionId = new ObjectId(new Date(999)) + factory.sendNotification(secondServer, REPLICA_SET_PRIMARY, [firstServer, secondServer, thirdServer], outdatedElectionId) + + then: + factory.getDescription(firstServer).state == CONNECTED + factory.getDescription(firstServer).type == REPLICA_SET_PRIMARY + factory.getDescription(secondServer).state == CONNECTING + getAll(cluster.getCurrentDescription()) == factory.getDescriptions(firstServer, secondServer, thirdServer) + } + + def 'should remove a server when a server in the seed list is not in hosts list, it should be removed'() { + given: + def serverAddressAlias = new ServerAddress('alternate') + def cluster = new MultiServerCluster(CLUSTER_ID, + ClusterSettings.builder().mode(MULTIPLE).hosts([serverAddressAlias]).build(), factory, CLIENT_METADATA) + + when: + sendNotification(serverAddressAlias, REPLICA_SET_PRIMARY) + + then: + getAll(cluster.getCurrentDescription()) == factory.getDescriptions(firstServer, secondServer, thirdServer) + } + + def 'should retain a Standalone server given a hosts list of size 1'() { + given: + def cluster = new MultiServerCluster(CLUSTER_ID, ClusterSettings.builder().mode(MULTIPLE).hosts([firstServer]).build(), + factory, CLIENT_METADATA) + + when: + sendNotification(firstServer, STANDALONE) + + then: + cluster.getCurrentDescription().type == ClusterType.STANDALONE + getAll(cluster.getCurrentDescription()) == factory.getDescriptions(firstServer) + } + + def 'should remove any Standalone server given a hosts list of size greater than one'() { + given: + def cluster = new MultiServerCluster(CLUSTER_ID, ClusterSettings.builder().hosts([firstServer, secondServer]).build(), + factory, CLIENT_METADATA) + + when: + sendNotification(firstServer, STANDALONE) + // necessary so that getting description doesn't block + factory.sendNotification(secondServer, REPLICA_SET_PRIMARY, [secondServer, thirdServer]) + + then: + !(factory.getDescription(firstServer) in getAll(cluster.getCurrentDescription())) + cluster.getCurrentDescription().type == REPLICA_SET + } + + def 'should remove a member whose replica set name does not match the required one'() { + given: + def cluster = new MultiServerCluster( + CLUSTER_ID, ClusterSettings.builder().hosts([secondServer]).mode(MULTIPLE).requiredReplicaSetName('test1').build(), + factory, CLIENT_METADATA) + when: + factory.sendNotification(secondServer, REPLICA_SET_PRIMARY, [firstServer, secondServer, thirdServer], 'test2') + + then: + cluster.getCurrentDescription().type == REPLICA_SET + getAll(cluster.getCurrentDescription()) == [] as Set + } + + def 'should throw from getServer if cluster is closed'() { + given: + def cluster = new MultiServerCluster(CLUSTER_ID, + ClusterSettings.builder().serverSelectionTimeout(100, MILLISECONDS).hosts([firstServer]).mode(MULTIPLE).build(), + factory, CLIENT_METADATA) + cluster.close() + + when: + cluster.selectServer(new WritableServerSelector(), OPERATION_CONTEXT) + + then: + thrown(IllegalStateException) + } + + def 'should ignore a notification from a server that has been removed'() { + given: + def cluster = new MultiServerCluster(CLUSTER_ID, ClusterSettings.builder().hosts([firstServer, secondServer]).build(), + factory, CLIENT_METADATA) + factory.sendNotification(firstServer, REPLICA_SET_PRIMARY, [firstServer, thirdServer]) + + when: + factory.sendNotification(secondServer, REPLICA_SET_SECONDARY, [secondServer]) + + then: + getAll(cluster.getCurrentDescription()) == factory.getDescriptions(firstServer, thirdServer) + } + + def 'should add servers from a secondary host list when there is no primary'() { + given: + def cluster = new MultiServerCluster(CLUSTER_ID, + ClusterSettings.builder().hosts([firstServer, secondServer, thirdServer]).build(), factory, CLIENT_METADATA) + factory.sendNotification(firstServer, REPLICA_SET_SECONDARY, [firstServer, secondServer]) + + when: + factory.sendNotification(secondServer, REPLICA_SET_SECONDARY, [secondServer, thirdServer]) + + then: + getAll(cluster.getCurrentDescription()) == factory.getDescriptions(firstServer, secondServer, thirdServer) + } + + def 'should add and removes servers from a primary host list when there is a primary'() { + given: + def cluster = new MultiServerCluster(CLUSTER_ID, + ClusterSettings.builder().hosts([firstServer, secondServer, thirdServer]).build(), factory, CLIENT_METADATA) + factory.sendNotification(firstServer, REPLICA_SET_PRIMARY, [firstServer, secondServer]) + + when: + factory.sendNotification(firstServer, REPLICA_SET_PRIMARY, [firstServer, thirdServer]) + + then: + getAll(cluster.getCurrentDescription()) == factory.getDescriptions(firstServer, thirdServer) + + when: + factory.sendNotification(thirdServer, REPLICA_SET_PRIMARY, [secondServer, thirdServer]) + + then: + getAll(cluster.getCurrentDescription()) == factory.getDescriptions(secondServer, thirdServer) + } + + def 'should ignore a secondary host list when there is a primary'() { + given: + def cluster = new MultiServerCluster(CLUSTER_ID, + ClusterSettings.builder().hosts([firstServer, secondServer, thirdServer]).build(), factory, CLIENT_METADATA) + factory.sendNotification(firstServer, REPLICA_SET_PRIMARY, [firstServer, secondServer]) + + when: + factory.sendNotification(secondServer, REPLICA_SET_SECONDARY, [secondServer, thirdServer]) + + then: + getAll(cluster.getCurrentDescription()) == factory.getDescriptions(firstServer, secondServer) + } + + def 'should ignore a notification from a server that is not ok'() { + given: + def cluster = new MultiServerCluster(CLUSTER_ID, ClusterSettings.builder().hosts([firstServer, secondServer]).build(), + factory, CLIENT_METADATA) + factory.sendNotification(firstServer, REPLICA_SET_PRIMARY, [firstServer, secondServer, thirdServer]) + + when: + factory.sendNotification(secondServer, REPLICA_SET_SECONDARY, [], false) + + then: + getAll(cluster.getCurrentDescription()) == factory.getDescriptions(firstServer, secondServer, thirdServer) + } + + def 'should fire cluster events'() { + given: + def clusterListener = Mock(ClusterListener) + def initialDescription = new ClusterDescription(MULTIPLE, UNKNOWN, + [ServerDescription.builder().state(CONNECTING).address(firstServer).build()]) + def serverDescription = ServerDescription.builder().ok(true).address(firstServer).state(CONNECTED) + .type(REPLICA_SET_PRIMARY).hosts([firstServer.toString(), secondServer.toString(), thirdServer.toString()] as Set) + .setName('test') + .canonicalAddress(firstServer.toString()) + .setVersion(1) + .maxWireVersion(MAX_DRIVER_WIRE_VERSION) + .build() + + when: + def cluster = new MultiServerCluster(CLUSTER_ID, ClusterSettings.builder().mode(MULTIPLE).hosts([firstServer]) + .addClusterListener(clusterListener).build(), factory, CLIENT_METADATA) + + then: + 1 * clusterListener.clusterOpening { it.clusterId == CLUSTER_ID } + 1 * clusterListener.clusterDescriptionChanged { + it.clusterId == CLUSTER_ID && + it.previousDescription == new ClusterDescription(MULTIPLE, UNKNOWN, []) && + it.newDescription == initialDescription + } + + when: + sendNotification(firstServer, REPLICA_SET_PRIMARY) + + then: + 1 * clusterListener.clusterDescriptionChanged { + it.clusterId == CLUSTER_ID && + it.previousDescription == initialDescription && + it.newDescription == new ClusterDescription(MULTIPLE, REPLICA_SET, + [serverDescription, + ServerDescription.builder().state(CONNECTING).address(secondServer).build(), + ServerDescription.builder().state(CONNECTING).address(thirdServer).build()]) + } + + when: + cluster.close() + + then: + 1 * clusterListener.clusterClosed { it.clusterId == CLUSTER_ID } + } + + def 'should connect to all servers'() { + given: + def cluster = new MultiServerCluster(CLUSTER_ID, ClusterSettings.builder().hosts([firstServer, secondServer]).build(), + factory, CLIENT_METADATA) + + when: + cluster.connect() + + then: + [firstServer, secondServer].collect { factory.getServer(it).connectCount } == [1, 1] + } + + def sendNotification(ServerAddress serverAddress, ServerType serverType) { + factory.sendNotification(serverAddress, serverType, [firstServer, secondServer, thirdServer]) + } +} diff --git a/driver-core/src/test/unit/com/mongodb/internal/connection/NoOpSessionContextSpecification.groovy b/driver-core/src/test/unit/com/mongodb/internal/connection/NoOpSessionContextSpecification.groovy new file mode 100644 index 00000000000..0eb5fd3e3af --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/internal/connection/NoOpSessionContextSpecification.groovy @@ -0,0 +1,68 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection + +import com.mongodb.ReadConcern +import org.bson.BsonDocument +import org.bson.BsonTimestamp +import spock.lang.Specification + +class NoOpSessionContextSpecification extends Specification { + def 'should be a no-op'() { + given: + def sessionContext = NoOpSessionContext.INSTANCE + + expect: + !sessionContext.hasSession() + sessionContext.getClusterTime() == null + sessionContext.getOperationTime() == null + !sessionContext.isCausallyConsistent() + sessionContext.readConcern == ReadConcern.DEFAULT + + when: + sessionContext.advanceOperationTime(new BsonTimestamp(42, 1)) + + then: + true + + when: + sessionContext.advanceClusterTime(new BsonDocument()) + + then: + true + + when: + sessionContext.getSessionId() + + then: + thrown(UnsupportedOperationException) + + when: + sessionContext.advanceTransactionNumber() + + then: + thrown(UnsupportedOperationException) + } + + def 'should provide given read concern for ReadConcernAwareNoOpSessionContext'() { + given: + def sessionContext = new ReadConcernAwareNoOpSessionContext(ReadConcern.MAJORITY) + + expect: + sessionContext.readConcern == ReadConcern.MAJORITY + } +} \ No newline at end of file diff --git a/driver-core/src/test/unit/com/mongodb/internal/connection/PlainAuthenticatorUnitTest.java b/driver-core/src/test/unit/com/mongodb/internal/connection/PlainAuthenticatorUnitTest.java new file mode 100644 index 00000000000..12d8e9fa7c3 --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/internal/connection/PlainAuthenticatorUnitTest.java @@ -0,0 +1,94 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection; + +import com.mongodb.MongoCredential; +import com.mongodb.ServerAddress; +import com.mongodb.async.FutureResultCallback; +import com.mongodb.connection.ClusterConnectionMode; +import com.mongodb.connection.ClusterId; +import com.mongodb.connection.ConnectionDescription; +import com.mongodb.connection.ServerId; +import org.bson.io.BsonInput; +import org.junit.Before; +import org.junit.Test; + +import java.util.List; +import java.util.concurrent.ExecutionException; + +import static com.mongodb.ClusterFixture.OPERATION_CONTEXT; +import static com.mongodb.ClusterFixture.getServerApi; +import static com.mongodb.internal.connection.MessageHelper.getApiVersionField; +import static com.mongodb.internal.connection.MessageHelper.getDbField; +import static org.junit.Assert.assertEquals; + +public class PlainAuthenticatorUnitTest { + private TestInternalConnection connection; + private ConnectionDescription connectionDescription; + private MongoCredential credential; + private PlainAuthenticator subject; + + @Before + public void before() { + connection = new TestInternalConnection(new ServerId(new ClusterId(), new ServerAddress("localhost", 27017))); + connectionDescription = new ConnectionDescription(new ServerId(new ClusterId(), new ServerAddress())); + credential = MongoCredential.createPlainCredential("user", "$external", "pencil".toCharArray()); + subject = new PlainAuthenticator(new MongoCredentialWithCache(credential), ClusterConnectionMode.MULTIPLE, getServerApi()); + } + + @Test + public void testSuccessfulAuthentication() { + enqueueSuccessfulReply(); + + subject.authenticate(connection, connectionDescription, OPERATION_CONTEXT); + + validateMessages(); + } + + @Test + public void testSuccessfulAuthenticationAsync() throws ExecutionException, InterruptedException { + enqueueSuccessfulReply(); + + FutureResultCallback futureCallback = new FutureResultCallback<>(); + subject.authenticateAsync(connection, connectionDescription, OPERATION_CONTEXT, futureCallback); + futureCallback.get(); + + validateMessages(); + } + + private void validateMessages() { + List sent = connection.getSent(); + String command = MessageHelper.decodeCommandAsJson(sent.get(0)); + String expectedCommand = "{\"saslStart\": 1, " + + "\"mechanism\": \"PLAIN\", " + + "\"payload\": {\"$binary\": {\"base64\": \"dXNlcgB1c2VyAHBlbmNpbA==\", \"subType\": \"00\"}}" + + getDbField("$external") + + getApiVersionField() + + "}"; + + assertEquals(expectedCommand, command); + } + + private void enqueueSuccessfulReply() { + ResponseBuffers reply = MessageHelper.buildSuccessfulReply( + "{conversationId: 1, " + + "done: true, " + + "ok: 1}"); + + connection.enqueueReply(reply); + } +} diff --git a/driver-core/src/test/unit/com/mongodb/internal/connection/PowerOfTwoBufferPoolTest.java b/driver-core/src/test/unit/com/mongodb/internal/connection/PowerOfTwoBufferPoolTest.java new file mode 100644 index 00000000000..e2b439ba6c6 --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/internal/connection/PowerOfTwoBufferPoolTest.java @@ -0,0 +1,94 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection; + +import org.bson.ByteBuf; +import org.junit.Before; +import org.junit.Test; + +import java.nio.ByteBuffer; +import java.util.concurrent.TimeUnit; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotSame; +import static org.junit.Assert.assertSame; + +public class PowerOfTwoBufferPoolTest { + private PowerOfTwoBufferPool pool; + + @Before + public void setUp() { + pool = new PowerOfTwoBufferPool(10); + } + + @Test + public void testNormalRequest() { + + for (int i = 0; i <= 10; i++) { + ByteBuf buf = pool.getBuffer((int) Math.pow(2, i)); + assertEquals((int) Math.pow(2, i), buf.capacity()); + assertEquals((int) Math.pow(2, i), buf.limit()); + + if (i > 1) { + buf = pool.getBuffer((int) Math.pow(2, i) - 1); + assertEquals((int) Math.pow(2, i), buf.capacity()); + assertEquals((int) Math.pow(2, i) - 1, buf.limit()); + } + + if (i < 10) { + buf = pool.getBuffer((int) Math.pow(2, i) + 1); + assertEquals((int) Math.pow(2, i + 1), buf.capacity()); + assertEquals((int) Math.pow(2, i) + 1, buf.limit()); + } + } + } + + @Test + public void testReuse() { + ByteBuf buf = pool.getBuffer((int) Math.pow(2, 10)); + ByteBuffer byteBuffer = buf.asNIO(); + buf.release(); + assertSame(byteBuffer, pool.getBuffer((int) Math.pow(2, 10)).asNIO()); + } + + @Test + public void testHugeBufferRequest() { + ByteBuf buf = pool.getBuffer((int) Math.pow(2, 10) + 1); + assertEquals((int) Math.pow(2, 10) + 1, buf.capacity()); + assertEquals((int) Math.pow(2, 10) + 1, buf.limit()); + + buf.release(); + assertNotSame(buf, pool.getBuffer((int) Math.pow(2, 10) + 1)); + } + + // Racy test + @Test + public void testPruning() throws InterruptedException { + PowerOfTwoBufferPool pool = new PowerOfTwoBufferPool(10, 5, TimeUnit.MILLISECONDS) + .enablePruning(); + try { + ByteBuf byteBuf = pool.getBuffer(256); + ByteBuffer wrappedByteBuf = byteBuf.asNIO(); + byteBuf.release(); + Thread.sleep(50); + ByteBuf newByteBuf = pool.getBuffer(256); + assertNotSame(wrappedByteBuf, newByteBuf.asNIO()); + } finally { + pool.disablePruning(); + } + } +} diff --git a/driver-core/src/test/unit/com/mongodb/internal/connection/ProtocolHelperSpecification.groovy b/driver-core/src/test/unit/com/mongodb/internal/connection/ProtocolHelperSpecification.groovy new file mode 100644 index 00000000000..7e4fee2c2cf --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/internal/connection/ProtocolHelperSpecification.groovy @@ -0,0 +1,170 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection + +import com.mongodb.MongoCommandException +import com.mongodb.MongoExecutionTimeoutException +import com.mongodb.MongoNodeIsRecoveringException +import com.mongodb.MongoNotPrimaryException +import com.mongodb.MongoOperationTimeoutException +import com.mongodb.MongoQueryException +import com.mongodb.ServerAddress +import com.mongodb.internal.TimeoutContext +import org.bson.BsonBoolean +import org.bson.BsonDocument +import org.bson.BsonDouble +import org.bson.BsonInt32 +import org.bson.BsonInt64 +import org.bson.BsonNull +import org.bson.BsonString +import spock.lang.Specification + +import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS +import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS_WITH_INFINITE_TIMEOUT +import static com.mongodb.internal.connection.ProtocolHelper.getCommandFailureException +import static com.mongodb.internal.connection.ProtocolHelper.getQueryFailureException +import static com.mongodb.internal.connection.ProtocolHelper.isCommandOk + +class ProtocolHelperSpecification extends Specification { + + def 'isCommandOk should be false if ok field is missing'() { + expect: + !isCommandOk(new BsonDocument()) + } + + def 'isCommandOk should be false for numbers that are 0'() { + expect: + !isCommandOk(new BsonDocument('ok', new BsonInt32(0))) + !isCommandOk(new BsonDocument('ok', new BsonInt64(0))) + !isCommandOk(new BsonDocument('ok', new BsonDouble(0.0))) + } + + def 'isCommandOk should be true for numbers that are not 0'() { + expect: + !isCommandOk(new BsonDocument('ok', new BsonInt32(10))) + !isCommandOk(new BsonDocument('ok', new BsonInt64(10))) + !isCommandOk(new BsonDocument('ok', new BsonDouble(10.0))) + } + + def 'isCommandOk should equal the boolean value'() { + expect: + isCommandOk(new BsonDocument('ok', BsonBoolean.TRUE)) + !isCommandOk(new BsonDocument('ok', BsonBoolean.FALSE)) + } + + def 'isCommandOk should be false of ok is not a number or a boolean'() { + expect: + !isCommandOk(new BsonDocument('ok', new BsonNull())) + } + + def 'command failure exception should be MongoExecutionTimeoutException if error code is 50'() { + expect: + getCommandFailureException(new BsonDocument('ok', new BsonInt32(0)).append('code', new BsonInt32(50)), + new ServerAddress(), new TimeoutContext(TIMEOUT_SETTINGS)) instanceof MongoExecutionTimeoutException + } + + def 'command failure exception should be MongoOperationTimeoutException if error code is 50 and timeoutMS is set'() { + expect: + getCommandFailureException(new BsonDocument('ok', new BsonInt32(0)).append('code', new BsonInt32(50)), + new ServerAddress(), new TimeoutContext(TIMEOUT_SETTINGS_WITH_INFINITE_TIMEOUT)) instanceof MongoOperationTimeoutException + } + + def 'query failure exception should be MongoExecutionTimeoutException if error code is 50'() { + expect: + getQueryFailureException(new BsonDocument('code', new BsonInt32(50)), + new ServerAddress(), new TimeoutContext(TIMEOUT_SETTINGS)) instanceof MongoExecutionTimeoutException + } + + def 'query failure exception should be MongoOperationTimeoutException if error code is 50'() { + expect: + def exception = getQueryFailureException(new BsonDocument('code', new BsonInt32(50)), + new ServerAddress(), new TimeoutContext(TIMEOUT_SETTINGS_WITH_INFINITE_TIMEOUT)) + exception instanceof MongoOperationTimeoutException + exception.getCause() instanceof MongoExecutionTimeoutException + } + + def 'command failure exceptions should handle MongoNotPrimaryException scenarios'() { + expect: + getCommandFailureException( + exception, new ServerAddress(), new TimeoutContext(TIMEOUT_SETTINGS)) instanceof MongoNotPrimaryException + + where: + exception << [ + BsonDocument.parse('{ok: 0, errmsg: "not master server"}'), + BsonDocument.parse('{ok: 0, code: 10107}'), + BsonDocument.parse('{ok: 0, code: 13435}') + ] + } + + def 'query failure exceptions should handle MongoNotPrimaryException scenarios'() { + expect: + getQueryFailureException( + exception, new ServerAddress(), new TimeoutContext(TIMEOUT_SETTINGS)) instanceof MongoNotPrimaryException + + where: + exception << [ + BsonDocument.parse('{$err: "not master server"}'), + BsonDocument.parse('{code: 10107}'), + BsonDocument.parse('{code: 13435}') + ] + } + + def 'command failure exceptions should handle MongoNodeIsRecoveringException scenarios'() { + expect: + getCommandFailureException( + exception, new ServerAddress(), new TimeoutContext(TIMEOUT_SETTINGS)) instanceof MongoNodeIsRecoveringException + + where: + exception << [ + BsonDocument.parse('{ok: 0, errmsg: "node is recovering now"}'), + BsonDocument.parse('{ok: 0, code: 11600}'), + BsonDocument.parse('{ok: 0, code: 11602}'), + BsonDocument.parse('{ok: 0, code: 13436}'), + BsonDocument.parse('{ok: 0, code: 189}'), + BsonDocument.parse('{ok: 0, code: 91}'), + ] + } + + def 'query failure exceptions should handle MongoNodeIsRecoveringException scenarios'() { + expect: + getQueryFailureException( + exception, new ServerAddress(), new TimeoutContext(TIMEOUT_SETTINGS)) instanceof MongoNodeIsRecoveringException + + where: + exception << [ + BsonDocument.parse('{$err: "node is recovering now"}'), + BsonDocument.parse('{code: 11600}'), + BsonDocument.parse('{code: 11602}'), + BsonDocument.parse('{code: 13436}'), + BsonDocument.parse('{code: 189}'), + BsonDocument.parse('{code: 91}'), + ] + } + + def 'command failure exception should be MongoCommandException'() { + expect: + getCommandFailureException(new BsonDocument('ok', new BsonInt32(0)).append('errmsg', new BsonString('some other problem')), + new ServerAddress(), new TimeoutContext(TIMEOUT_SETTINGS)) instanceof MongoCommandException + } + + def 'query failure exception should be MongoQueryException'() { + expect: + getQueryFailureException(new BsonDocument('$err', new BsonString('some other problem')), + new ServerAddress(), new TimeoutContext(TIMEOUT_SETTINGS)) instanceof MongoQueryException + } + +} diff --git a/driver-core/src/test/unit/com/mongodb/internal/connection/ReadConcernHelperSpecification.groovy b/driver-core/src/test/unit/com/mongodb/internal/connection/ReadConcernHelperSpecification.groovy new file mode 100644 index 00000000000..924f8dee4d6 --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/internal/connection/ReadConcernHelperSpecification.groovy @@ -0,0 +1,90 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection + +import com.mongodb.ReadConcern +import com.mongodb.internal.session.SessionContext +import org.bson.BsonDocument +import org.bson.BsonString +import org.bson.BsonTimestamp +import spock.lang.Specification + +import static com.mongodb.internal.connection.ReadConcernHelper.getReadConcernDocument +import static com.mongodb.internal.operation.ServerVersionHelper.UNKNOWN_WIRE_VERSION + +class ReadConcernHelperSpecification extends Specification { + + def 'should throw IllegalArgumentException if session context is null'() { + when: + getReadConcernDocument(null, UNKNOWN_WIRE_VERSION) + + then: + thrown(IllegalArgumentException) + } + + def 'should add afterClusterTime to majority read concern when session is causally consistent'() { + given: + def operationTime = new BsonTimestamp(42, 1) + def sessionContext = Stub(SessionContext) { + isCausallyConsistent() >> true + getOperationTime() >> operationTime + getReadConcern() >> ReadConcern.MAJORITY + } + + expect: + getReadConcernDocument(sessionContext, UNKNOWN_WIRE_VERSION) == new BsonDocument('level', new BsonString('majority')) + .append('afterClusterTime', operationTime) + } + + def 'should add afterClusterTime to default read concern when session is causally consistent'() { + given: + def operationTime = new BsonTimestamp(42, 1) + def sessionContext = Stub(SessionContext) { + isCausallyConsistent() >> true + getOperationTime() >> operationTime + getReadConcern() >> ReadConcern.DEFAULT + } + + expect: + getReadConcernDocument(sessionContext, UNKNOWN_WIRE_VERSION) == + new BsonDocument(new BsonDocument('afterClusterTime', operationTime)) + } + + def 'should not add afterClusterTime to ReadConcern when session is not causally consistent'() { + given: + def sessionContext = Stub(SessionContext) { + isCausallyConsistent() >> false + getOperationTime() >> { throw new UnsupportedOperationException() } + getReadConcern() >> ReadConcern.MAJORITY + } + + expect: + getReadConcernDocument(sessionContext, UNKNOWN_WIRE_VERSION) == new BsonDocument('level', new BsonString('majority')) + } + + def 'should not add afterClusterTime to ReadConcern when operation time is null'() { + given: + def sessionContext = Stub(SessionContext) { + isCausallyConsistent() >> true + getOperationTime() >> null + getReadConcern() >> ReadConcern.MAJORITY + } + + expect: + getReadConcernDocument(sessionContext, UNKNOWN_WIRE_VERSION) == new BsonDocument('level', new BsonString('majority')) + } +} diff --git a/driver-core/src/test/unit/com/mongodb/internal/connection/ReplyMessageTest.java b/driver-core/src/test/unit/com/mongodb/internal/connection/ReplyMessageTest.java new file mode 100644 index 00000000000..8f454a30168 --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/internal/connection/ReplyMessageTest.java @@ -0,0 +1,38 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection; + +import com.mongodb.MongoInternalException; +import org.bson.codecs.BsonDocumentCodec; +import org.junit.jupiter.api.Test; + +import static com.mongodb.internal.connection.MessageHelper.buildReply; +import static org.junit.jupiter.api.Assertions.assertThrows; + +public class ReplyMessageTest { + + @Test + public void shouldThrowExceptionIfRequestIdDoesNotMatchResponseTo() { + int badResponseTo = 34565; + int expectedResponseTo = 5; + + ResponseBuffers responseBuffers = buildReply(badResponseTo, "{ok: 1}", 0); + + assertThrows(MongoInternalException.class, () -> + new ReplyMessage<>(responseBuffers, new BsonDocumentCodec(), expectedResponseTo)); + } +} diff --git a/driver-core/src/test/unit/com/mongodb/internal/connection/RoundTripTimeSamplerTest.java b/driver-core/src/test/unit/com/mongodb/internal/connection/RoundTripTimeSamplerTest.java new file mode 100644 index 00000000000..b44afb7a725 --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/internal/connection/RoundTripTimeSamplerTest.java @@ -0,0 +1,55 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.internal.connection; + +import org.junit.jupiter.api.DisplayName; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.Arguments; +import org.junit.jupiter.params.provider.MethodSource; + +import java.util.List; +import java.util.stream.Stream; + +import static java.util.Arrays.asList; +import static java.util.Collections.emptyList; +import static java.util.Collections.singletonList; +import static org.junit.jupiter.api.Assertions.assertEquals; + + +public class RoundTripTimeSamplerTest { + + @ParameterizedTest(name = "{index}: samples: {0}. Expected: average: {1} min: {2}") + @DisplayName("RoundTripTimeSampler should calculate the expected average and min round trip times") + @MethodSource + public void testRoundTripTimeSampler(final List samples, final int expectedAverageRTT, final int expectedMinRTT) { + RoundTripTimeSampler sampler = new RoundTripTimeSampler(); + samples.forEach(sampler::addSample); + + assertEquals(expectedMinRTT, sampler.getMin()); + assertEquals(expectedAverageRTT, sampler.getAverage()); + } + + private static Stream testRoundTripTimeSampler() { + return Stream.of( + Arguments.of(emptyList(), 0, 0), + Arguments.of(singletonList(10), 10, 0), + Arguments.of(asList(10, 20), 12, 10), + Arguments.of(asList(10, 20, 8), 11, 8), + Arguments.of(asList(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15), 11, 6) + ); + } + +} diff --git a/driver-core/src/test/unit/com/mongodb/internal/connection/ScramShaAuthenticatorSpecification.groovy b/driver-core/src/test/unit/com/mongodb/internal/connection/ScramShaAuthenticatorSpecification.groovy new file mode 100644 index 00000000000..21f9bc28161 --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/internal/connection/ScramShaAuthenticatorSpecification.groovy @@ -0,0 +1,560 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection + +import com.mongodb.MongoSecurityException +import com.mongodb.ServerAddress +import com.mongodb.async.FutureResultCallback +import com.mongodb.connection.ClusterId +import com.mongodb.connection.ConnectionDescription +import com.mongodb.connection.ServerId +import com.mongodb.connection.ServerType +import com.mongodb.internal.TimeoutSettings +import org.bson.BsonDocument +import spock.lang.Specification + +import javax.security.sasl.SaslException +import java.nio.charset.Charset +import java.util.concurrent.TimeUnit + +import static com.mongodb.MongoCredential.createScramSha1Credential +import static com.mongodb.MongoCredential.createScramSha256Credential +import static com.mongodb.connection.ClusterConnectionMode.SINGLE +import static com.mongodb.internal.connection.MessageHelper.buildSuccessfulReply +import static com.mongodb.internal.connection.OperationContext.simpleOperationContext +import static org.junit.Assert.assertEquals + +class ScramShaAuthenticatorSpecification extends Specification { + def serverId = new ServerId(new ClusterId(), new ServerAddress('localhost', 27017)) + def connectionDescription = new ConnectionDescription(serverId) + def operationContext = simpleOperationContext(TimeoutSettings.DEFAULT, null) + private final static MongoCredentialWithCache SHA1_CREDENTIAL = + new MongoCredentialWithCache(createScramSha1Credential('user', 'database', 'pencil' as char[])) + private final static MongoCredentialWithCache SHA256_CREDENTIAL = + new MongoCredentialWithCache(createScramSha256Credential('user', 'database', 'pencil' as char[])) + + def 'should successfully authenticate with sha1 as per RFC spec'() { + given: + def user = 'user' + def password = 'pencil' + def preppedPassword = 'pencil' + def payloads = ''' + C: n,,n=user,r=fyko+d2lbbFgONRv9qkxdawL + S: r=fyko+d2lbbFgONRv9qkxdawL3rfcNHYJY1ZVvWVs7j,s=QSXCR+Q6sek8bf92,i=4096 + C: c=biws,r=fyko+d2lbbFgONRv9qkxdawL3rfcNHYJY1ZVvWVs7j,p=v0X8v3Bz2T0CJGbJQyF0X+HI4Ts= + S: v=rmF9pqV8S7suAoZWja4dJRkFsKQ= + ''' + + when: + def credential = new MongoCredentialWithCache(createScramSha1Credential(user, 'database', password as char[])) + def authenticator = new ScramShaAuthenticator(credential, { 'fyko+d2lbbFgONRv9qkxdawL' }, { preppedPassword }, SINGLE, null) + + then: + validateAuthentication(payloads, authenticator, async, emptyExchange) + + where: + [async, emptyExchange] << [[true, false], [true, false]].combinations() + } + + def 'should speculatively authenticate with sha1'() { + given: + def user = 'user' + def password = 'pencil' + def preppedPassword = 'pencil' + def payloads = ''' + C: c=biws,r=fyko+d2lbbFgONRv9qkxdawL3rfcNHYJY1ZVvWVs7j,p=v0X8v3Bz2T0CJGbJQyF0X+HI4Ts= + S: v=rmF9pqV8S7suAoZWja4dJRkFsKQ= + ''' + def firstClientChallenge = 'n,,n=user,r=fyko+d2lbbFgONRv9qkxdawL' + def expectedSpeculativeAuthenticateCommand = BsonDocument.parse('{ saslStart: 1, mechanism: "SCRAM-SHA-1", ' + + "payload: BinData(0, '${encode64(firstClientChallenge)}'), " + + 'db: "database", options: { skipEmptyExchange: true }}') + def serverResponse = 'r=fyko+d2lbbFgONRv9qkxdawL3rfcNHYJY1ZVvWVs7j,s=QSXCR+Q6sek8bf92,i=4096' + def speculativeAuthenticateResponse = + BsonDocument.parse("{ conversationId: 1, payload: BinData(0, '${encode64(serverResponse)}'), done: false }") + + when: + def credential = new MongoCredentialWithCache(createScramSha1Credential(user, 'database', password as char[])) + def authenticator = new ScramShaAuthenticator(credential, { 'fyko+d2lbbFgONRv9qkxdawL' }, { preppedPassword }, SINGLE, null) + + then: + def speculativeAuthenticateCommand = + validateSpeculativeAuthentication(payloads, authenticator, async, speculativeAuthenticateResponse) + ((SpeculativeAuthenticator) authenticator).getSpeculativeAuthenticateResponse() == speculativeAuthenticateResponse + speculativeAuthenticateCommand.equals(expectedSpeculativeAuthenticateCommand) + + where: + async << [true, false] + } + + def 'should successfully authenticate with sha256 as per RFC spec'() { + given: + def user = 'user' + def password = 'pencil' + def preppedPassword = 'pencil' + def payloads = ''' + C: n,,n=user,r=rOprNGfwEbeRWgbNEkqO + S: r=rOprNGfwEbeRWgbNEkqO%hvYDpWUa2RaTCAfuxFIlj)hNlF$k0,s=W22ZaJ0SNY7soEsUEjb6gQ==,i=4096 + C: c=biws,r=rOprNGfwEbeRWgbNEkqO%hvYDpWUa2RaTCAfuxFIlj)hNlF$k0,p=dHzbZapWIk4jUhN+Ute9ytag9zjfMHgsqmmiz7AndVQ= + S: v=6rriTRBi23WpRR/wtup+mMhUZUn/dB5nLTJRsjl95G4= + ''' + + when: + def credential = new MongoCredentialWithCache(createScramSha256Credential(user, 'database', password as char[])) + def authenticator = new ScramShaAuthenticator(credential, { 'rOprNGfwEbeRWgbNEkqO' }, { preppedPassword }, SINGLE, null) + + then: + validateAuthentication(payloads, authenticator, async, emptyExchange) + + where: + [async, emptyExchange] << [[true, false], [true, false]].combinations() + } + + def 'should speculatively authenticate with sha256'() { + given: + def user = 'user' + def password = 'pencil' + def preppedPassword = 'pencil' + def payloads = ''' + C: c=biws,r=rOprNGfwEbeRWgbNEkqO%hvYDpWUa2RaTCAfuxFIlj)hNlF$k0,p=dHzbZapWIk4jUhN+Ute9ytag9zjfMHgsqmmiz7AndVQ= + S: v=6rriTRBi23WpRR/wtup+mMhUZUn/dB5nLTJRsjl95G4= + ''' + def firstClientChallenge = 'n,,n=user,r=rOprNGfwEbeRWgbNEkqO' + def expectedSpeculativeAuthenticateCommand = BsonDocument.parse('{ saslStart: 1, mechanism: "SCRAM-SHA-256", ' + + "payload: BinData(0, '${encode64(firstClientChallenge)}'), " + + 'db: "database", options: { skipEmptyExchange: true }}') + def serverResponse = 'r=rOprNGfwEbeRWgbNEkqO%hvYDpWUa2RaTCAfuxFIlj)hNlF$k0,s=W22ZaJ0SNY7soEsUEjb6gQ==,i=4096' + def speculativeAuthenticateResponse = + BsonDocument.parse("{ conversationId: 1, payload: BinData(0, '${encode64(serverResponse)}'), done: false }") + + when: + def credential = new MongoCredentialWithCache(createScramSha256Credential(user, 'database', password as char[])) + def authenticator = new ScramShaAuthenticator(credential, { 'rOprNGfwEbeRWgbNEkqO' }, { preppedPassword }, SINGLE, null) + + then: + def speculativeAuthenticateCommand = + validateSpeculativeAuthentication(payloads, authenticator, async, speculativeAuthenticateResponse) + ((SpeculativeAuthenticator) authenticator).getSpeculativeAuthenticateResponse() == speculativeAuthenticateResponse + speculativeAuthenticateCommand.equals(expectedSpeculativeAuthenticateCommand) + + where: + async << [true, false] + } + + def 'should successfully authenticate with SHA-1 ASCII'() { + given: + def user = 'user' + def password = 'pencil' + def preppedPassword = 'pencil' + def payloads = ''' + C: n,,n=user,r=clientNONCE + S: r=clientNONCEserverNONCE,s=c2FsdFNBTFRzYWx0,i=4096 + C: c=biws,r=clientNONCEserverNONCE,p=I4oktcY7BOL0Agn0NlWRXlRP1mg= + S: v=oKPvB1bE/9ydptJ+kohMgL+NdM0= + ''' + + when: + def credential = new MongoCredentialWithCache(createScramSha1Credential(user, 'database', password as char[])) + def authenticator = new ScramShaAuthenticator(credential, { 'clientNONCE' }, { preppedPassword }, SINGLE, null) + + then: + validateAuthentication(payloads, authenticator, async, emptyExchange) + + where: + [async, emptyExchange] << [[true, false], [true, false]].combinations() + } + + def 'should successfully authenticate with SHA-1 ASCII user'() { + given: + def user = 'user' + def password = 'p\u00e8ncil' + def preppedPassword = 'p\u00e8ncil' + def payloads = ''' + C: n,,n=user,r=clientNONCE + S: r=clientNONCEserverNONCE,s=c2FsdFNBTFRzYWx0,i=4096 + C: c=biws,r=clientNONCEserverNONCE,p=yn797N2/XhIwZBB29LhEs6D6XVw= + S: v=a6QRQikpGygizEM4/rCOvkgdglI= + ''' + + when: + def credential = new MongoCredentialWithCache(createScramSha1Credential(user, 'database', password as char[])) + def authenticator = new ScramShaAuthenticator(credential, { 'clientNONCE' }, { preppedPassword }, SINGLE, null) + + then: + validateAuthentication(payloads, authenticator, async, emptyExchange) + + where: + [async, emptyExchange] << [[true, false], [true, false]].combinations() + } + + def 'should successfully authenticate with SHA-1 ASCII pass'() { + given: + def user = 'ram\u00f5n' + def password = 'pencil' + def preppedPassword = 'pencil' + def payloads = ''' + C: n,,n=ram\u00f5n,r=clientNONCE + S: r=clientNONCEserverNONCE,s=c2FsdFNBTFRzYWx0,i=4096 + C: c=biws,r=clientNONCEserverNONCE,p=kvH02DJiH7oHwk+SKpN4plfpF04= + S: v=BoA2mAPlV/b9A5WPDbHmHZi3EGc= + ''' + + when: + def credential = new MongoCredentialWithCache(createScramSha1Credential(user, 'database', password as char[])) + def authenticator = new ScramShaAuthenticator(credential, { 'clientNONCE' }, { preppedPassword }, SINGLE, null) + + then: + validateAuthentication(payloads, authenticator, async, emptyExchange) + + where: + [async, emptyExchange] << [[true, false], [true, false]].combinations() + } + + def 'should successfully authenticate with SHA-256 ASCII'(){ + given: + def user ='user' + def password ='pencil' + def preppedPassword ='pencil' + def payloads = ''' + C: n,,n=user,r=clientNONCE + S: r=clientNONCEserverNONCE,s=c2FsdFNBTFRzYWx0,i=4096 + C: c=biws,r=clientNONCEserverNONCE,p=ItXnHvCDW7VGij6H+4rv2o93HvkLwrQaLkfVjeSMfrc= + S: v=P61v8wxOu6B9J7Uij+Sk4zewSK1e6en6f5rCFO4OUNE= + ''' + + when: + def credential = new MongoCredentialWithCache(createScramSha256Credential(user, 'database', password as char[])) + def authenticator = new ScramShaAuthenticator(credential, { 'clientNONCE' }, { preppedPassword }, SINGLE, null) + + then: + validateAuthentication(payloads, authenticator, async, emptyExchange) + + where: + [async, emptyExchange] << [[true, false], [true, false]].combinations() + } + + def 'should successfully authenticate with SHA-256 ASCII user'(){ + given: + def user ='user' + def password ='p\u00e8ncil' + def preppedPassword ='p\u00e8ncil' + def payloads = ''' + C: n,,n=user,r=clientNONCE + S: r=clientNONCEserverNONCE,s=c2FsdFNBTFRzYWx0,i=4096 + C: c=biws,r=clientNONCEserverNONCE,p=o6rKPfQCKSGHClFxHjdSeiVCPA6K53++gpY3XlP8lI8= + S: v=rsyNAwnHfclZKxAKx1tKfInH3xPVAzCy237DQo5n/N8= + ''' + + when: + def credential = new MongoCredentialWithCache(createScramSha256Credential(user, 'database', password as char[])) + def authenticator = new ScramShaAuthenticator(credential, { 'clientNONCE' }, { preppedPassword }, SINGLE, null) + + then: + validateAuthentication(payloads, authenticator, async, emptyExchange) + + where: + [async, emptyExchange] << [[true, false], [true, false]].combinations() + } + + def 'should successfully authenticate with SHA-256 ASCII pass'(){ + given: + def user ='ram\u00f5n' + def password ='pencil' + def preppedPassword ='pencil' + def payloads = ''' + C: n,,n=ram\u00f5n,r=clientNONCE + S: r=clientNONCEserverNONCE,s=c2FsdFNBTFRzYWx0,i=4096 + C: c=biws,r=clientNONCEserverNONCE,p=vRdD7SqiY5kMyAFX2enPOJK9BL+3YIVyuzCt1H2qc4o= + S: v=sh7QPwVuquMatYobYpYOaPiNS+lqwTCmy3rdexRDDkE= + ''' + + when: + def credential = new MongoCredentialWithCache(createScramSha256Credential(user, 'database', password as char[])) + def authenticator = new ScramShaAuthenticator(credential, { 'clientNONCE' }, { preppedPassword }, SINGLE, null) + + then: + validateAuthentication(payloads, authenticator, async, emptyExchange) + + where: + [async, emptyExchange] << [[true, false], [true, false]].combinations() + } + + def 'should successfully authenticate with SHA-256 SASLprep normal'(){ + given: + def user ='ram\u00f5n' + def password ='p\u00c5assword' + def preppedPassword ='p\u00c5assword' + def payloads = ''' + C: n,,n=ram\u00f5n,r=clientNONCE + S: r=clientNONCEserverNONCE,s=c2FsdFNBTFRzYWx0,i=4096 + C: c=biws,r=clientNONCEserverNONCE,p=Km2zqmf/GbLdkItzscNI5D0c1f+GmLDi2fScTPm6d4k= + S: v=30soY0l2BiInoDyrHxIuamz2LBvci1lFKo/tOMpqo98= + ''' + + when: + def credential = new MongoCredentialWithCache(createScramSha256Credential(user, 'database', password as char[])) + def authenticator = new ScramShaAuthenticator(credential, { 'clientNONCE' }, { preppedPassword }, SINGLE, null) + + then: + validateAuthentication(payloads, authenticator, async, emptyExchange) + + where: + [async, emptyExchange] << [[true, false], [true, false]].combinations() + } + + def 'should successfully authenticate with SHA-256 SASLprep non-normal'(){ + given: + def user ='ramo\u0301n' + def password ='p\u212bssword' + def preppedPassword ='p\u00c5ssword' + def payloads = ''' + C: n,,n=ramo\u0301n,r=clientNONCE + S: r=clientNONCEserverNONCE,s=c2FsdFNBTFRzYWx0,i=4096 + C: c=biws,r=clientNONCEserverNONCE,p=KkLV/eEHHw0LrTlnmElWuTiL0RxDa8lF/RqzsDP04sE= + S: v=eLTDerRxJFOBV8+/9xOcIkv4PezVAcNAarSyqa5mQyI= + ''' + + when: + def credential = new MongoCredentialWithCache(createScramSha256Credential(user, 'database', password as char[])) + def authenticator = new ScramShaAuthenticator(credential, { 'clientNONCE' }, { preppedPassword }, SINGLE, null) + + then: + validateAuthentication(payloads, authenticator, async, emptyExchange) + + where: + [async, emptyExchange] << [[true, false], [true, false]].combinations() + } + + def 'should throw if invalid r value from server'() { + when: + def serverResponses = ['r=InvalidRValue,s=MYSALT,i=4096'] + def authenticator = new ScramShaAuthenticator(credential, { 'rOprNGfwEbeRWgbNEkqO' }, { 'pencil' }, SINGLE, null) + authenticate(createConnection(serverResponses), authenticator, async) + + then: + def e = thrown(MongoSecurityException) + e.getCause() instanceof SaslException + e.getCause().getMessage() == 'Server sent an invalid nonce.' + + where: + [async, credential] << [[true, false], [SHA1_CREDENTIAL, SHA256_CREDENTIAL]].combinations() + } + + def 'should throw if iteration count is below the minimium allowed count'() { + when: + def serverResponses = createMessages('S: r=rOprNGfwEbeRWgbNEkqO%hvYDpWUa2RaTCAfuxFIlj)hNlF$k0,s=QSXCR+Q6sek8bf92,i=4095').last() + def authenticator = new ScramShaAuthenticator(credential, { 'rOprNGfwEbeRWgbNEkqO' }, { 'pencil' }, SINGLE, null) + authenticate(createConnection(serverResponses), authenticator, async) + + then: + def e = thrown(MongoSecurityException) + e.getCause() instanceof SaslException + e.getCause().getMessage() == 'Invalid iteration count.' + + where: + [async, credential] << [[true, false], [SHA1_CREDENTIAL, SHA256_CREDENTIAL]].combinations() + } + + def 'should throw if invalid server signature'() { + when: + def serverResponses = createMessages(''' + S: r=rOprNGfwEbeRWgbNEkqO%hvYDpWUa2RaTCAfuxFIlj)hNlF$k0,s=QSXCR+Q6sek8bf92,i=4096 + S: v=InvalidServerSignature + ''').last() + def authenticator = new ScramShaAuthenticator(credential, { 'rOprNGfwEbeRWgbNEkqO' }, { 'pencil' }, SINGLE, null) + authenticate(createConnection(serverResponses), authenticator, async) + + then: + def e = thrown(MongoSecurityException) + e.getCause() instanceof SaslException + e.getCause().getMessage() == 'Server signature was invalid.' + + where: + [async, credential] << [[true, false], [SHA1_CREDENTIAL, SHA256_CREDENTIAL]].combinations() + } + + def 'should throw if too many steps SHA-1'() { + when: + def serverResponses = createMessages(''' + S: r=fyko+d2lbbFgONRv9qkxdawL3rfcNHYJY1ZVvWVs7j,s=QSXCR+Q6sek8bf92,i=4096 + S: v=rmF9pqV8S7suAoZWja4dJRkFsKQ= + S: z=ExtraStep + ''').last() + def authenticator = new ScramShaAuthenticator(SHA1_CREDENTIAL, { 'fyko+d2lbbFgONRv9qkxdawL' }, { 'pencil' }, SINGLE, null) + authenticate(createConnection(serverResponses), authenticator, async) + + then: + def e = thrown(MongoSecurityException) + e.getCause() instanceof SaslException + e.getCause().getMessage() == 'Too many steps involved in the SCRAM-SHA-1 negotiation.' + + where: + async << [true, false] + } + + def 'should throw if too many steps SHA-256'() { + when: + def serverResponses = createMessages(''' + S: r=rOprNGfwEbeRWgbNEkqO%hvYDpWUa2RaTCAfuxFIlj)hNlF$k0,s=W22ZaJ0SNY7soEsUEjb6gQ==,i=4096 + S: v=6rriTRBi23WpRR/wtup+mMhUZUn/dB5nLTJRsjl95G4= + S: z=ExtraStep + ''', true).last() + def authenticator = new ScramShaAuthenticator(SHA256_CREDENTIAL, { 'rOprNGfwEbeRWgbNEkqO' }, { 'pencil' }, SINGLE, null) + authenticate(createConnection(serverResponses), authenticator, async) + + then: + def e = thrown(MongoSecurityException) + e.getCause() instanceof SaslException + e.getCause().getMessage() == 'Too many steps involved in the SCRAM-SHA-256 negotiation.' + + where: + async << [true, false] + } + + def 'should complete authentication when done is set to true prematurely SHA-256'() { + given: + def serverResponses = createMessages(''' + S: r=rOprNGfwEbeRWgbNEkqO%hvYDpWUa2RaTCAfuxFIlj)hNlF$k0,s=W22ZaJ0SNY7soEsUEjb6gQ==,i=4096 + S: v=6rriTRBi23WpRR/wtup+mMhUZUn/dB5nLTJRsjl95G4= + ''').last() + def authenticator = new ScramShaAuthenticator(SHA256_CREDENTIAL, { 'rOprNGfwEbeRWgbNEkqO' }, { 'pencil' }, + SINGLE, null) + + when: + // server sends done=true on first response, client is not complete after processing response + authenticate(createConnection(serverResponses, 0), authenticator, async) + + then: + def e = thrown(MongoSecurityException) + e.getMessage().contains('server completed challenges before client completed responses') + + when: + // server sends done=true on second response, client is complete after processing response + authenticate(createConnection(serverResponses, 1), authenticator, async) + + then: + noExceptionThrown() + + where: + async << [true, false] + } + + def 'should throw exception when done is set to true prematurely and server response is invalid SHA-256'() { + given: + def serverResponses = createMessages(''' + S: r=rOprNGfwEbeRWgbNEkqO%hvYDpWUa2RaTCAfuxFIlj)hNlF$k0,s=W22ZaJ0SNY7soEsUEjb6gQ==,i=4096 + S: v=invalidResponse + ''').last() + def authenticator = new ScramShaAuthenticator(SHA256_CREDENTIAL, { 'rOprNGfwEbeRWgbNEkqO' }, { 'pencil' }, SINGLE, null) + + when: + // server sends done=true on second response, client throws exception on invalid server response + authenticate(createConnection(serverResponses, 1), authenticator, async) + + then: + def e = thrown(MongoSecurityException) + e.getCause() instanceof SaslException + e.getCause().getMessage() == 'Server signature was invalid.' + + where: + async << [true, false] + } + + def createConnection(List serverResponses, int responseWhereDoneIsTrue = -1) { + TestInternalConnection connection = new TestInternalConnection(serverId, ServerType.STANDALONE) + serverResponses.eachWithIndex { response, index -> + def isDone = (index == responseWhereDoneIsTrue).booleanValue() + connection.enqueueReply( + buildSuccessfulReply("{conversationId: 1, payload: BinData(0, '${encode64(response)}'), done: ${isDone}, ok: 1}") + ) + } + if (responseWhereDoneIsTrue < 0) { + connection.enqueueReply(buildSuccessfulReply('{conversationId: 1, done: true, ok: 1}')) + } + connection + } + + def validateClientMessages(TestInternalConnection connection, List clientMessages, String mechanism, + boolean speculativeAuthenticate = false) { + def sent = connection.getSent().collect { MessageHelper.decodeCommand( it ) } + assert(clientMessages.size() == sent.size()) + sent.indices.each { + def sentMessage = sent.get(it) + def messageStart = speculativeAuthenticate || it != 0 ? 'saslContinue: 1, conversationId: 1' + : "saslStart: 1, mechanism:'$mechanism', options: {skipEmptyExchange: true}" + def expectedMessage = BsonDocument.parse( + "{$messageStart, payload: BinData(0, '${encode64(clientMessages.get(it))}'), \$db: \"database\"}") + assertEquals(expectedMessage, sentMessage) + } + } + + def validateAuthentication(String payloads, ScramShaAuthenticator authenticator, boolean async, + boolean emptyExchange) { + def (clientMessages, serverResponses) = createMessages(payloads, emptyExchange) + def connection = createConnection(serverResponses, emptyExchange ? -1 : 1) + authenticate(connection, authenticator, async) + validateClientMessages(connection, clientMessages, authenticator.getMechanismName()) + } + + def validateSpeculativeAuthentication(String payloads, ScramShaAuthenticator authenticator, boolean async, + BsonDocument speculativeAuthenticateResponse) { + def (clientMessages, serverResponses) = createMessages(payloads, false) + def connection = createConnection(serverResponses, 0) + def speculativeAuthenticateCommand = authenticator.createSpeculativeAuthenticateCommand(connection) + authenticator.setSpeculativeAuthenticateResponse(speculativeAuthenticateResponse) + + authenticate(connection, authenticator, async) + validateClientMessages(connection, clientMessages, authenticator.getMechanismName(), true) + speculativeAuthenticateCommand + } + + def authenticate(TestInternalConnection connection, ScramShaAuthenticator authenticator, boolean async) { + if (async) { + FutureResultCallback futureCallback = new FutureResultCallback() + authenticator.authenticateAsync(connection, connectionDescription, operationContext, futureCallback) + futureCallback.get(5, TimeUnit.SECONDS) + } else { + authenticator.authenticate(connection, connectionDescription, operationContext) + } + } + + + def encode64(String string) { + Base64.getEncoder().encodeToString(string.getBytes(Charset.forName('UTF-8'))) + } + + def createMessages(String messages, boolean emptyExchange = true) { + def (clientMessages, serverResponses) = [[], []] + def payloads = messages.stripMargin().readLines()*.trim().findAll { it.length() > 0 } + payloads.each { + def type = it[0..1] + def message = it[2..-1].trim() + + if (type == 'C:') { + clientMessages += message + } else if (type == 'S:') { + serverResponses += message + } else { + throw new IllegalArgumentException("Invalid message: $message") + } + } + if (emptyExchange) { + clientMessages += '' + } + [clientMessages, serverResponses] + } +} diff --git a/driver-core/src/test/unit/com/mongodb/internal/connection/ServerDeprioritizationTest.java b/driver-core/src/test/unit/com/mongodb/internal/connection/ServerDeprioritizationTest.java new file mode 100644 index 00000000000..f1c8f69eb29 --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/internal/connection/ServerDeprioritizationTest.java @@ -0,0 +1,126 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.internal.connection; + +import com.mongodb.MongoConnectionPoolClearedException; +import com.mongodb.ServerAddress; +import com.mongodb.connection.ClusterConnectionMode; +import com.mongodb.connection.ClusterDescription; +import com.mongodb.connection.ClusterId; +import com.mongodb.connection.ClusterType; +import com.mongodb.connection.ServerConnectionState; +import com.mongodb.connection.ServerDescription; +import com.mongodb.connection.ServerId; +import com.mongodb.internal.connection.OperationContext.ServerDeprioritization; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.EnumSource; + +import java.util.List; + +import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS; +import static com.mongodb.ClusterFixture.createOperationContext; +import static java.util.Arrays.asList; +import static java.util.Collections.unmodifiableList; +import static org.junit.jupiter.api.Assertions.assertAll; +import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.params.provider.EnumSource.Mode.EXCLUDE; + +final class ServerDeprioritizationTest { + private static final ServerDescription SERVER_A = serverDescription("a"); + private static final ServerDescription SERVER_B = serverDescription("b"); + private static final ServerDescription SERVER_C = serverDescription("c"); + private static final List ALL_SERVERS = unmodifiableList(asList(SERVER_A, SERVER_B, SERVER_C)); + private static final ClusterDescription REPLICA_SET = clusterDescription(ClusterType.REPLICA_SET); + private static final ClusterDescription SHARDED_CLUSTER = clusterDescription(ClusterType.SHARDED); + + private ServerDeprioritization serverDeprioritization; + + @BeforeEach + void beforeEach() { + serverDeprioritization = createOperationContext(TIMEOUT_SETTINGS).getServerDeprioritization(); + } + + @Test + void selectNoneDeprioritized() { + assertAll( + () -> assertEquals(ALL_SERVERS, serverDeprioritization.getServerSelector().select(SHARDED_CLUSTER)), + () -> assertEquals(ALL_SERVERS, serverDeprioritization.getServerSelector().select(REPLICA_SET)) + ); + } + + @Test + void selectSomeDeprioritized() { + deprioritize(SERVER_B); + assertAll( + () -> assertEquals(asList(SERVER_A, SERVER_C), serverDeprioritization.getServerSelector().select(SHARDED_CLUSTER)), + () -> assertEquals(ALL_SERVERS, serverDeprioritization.getServerSelector().select(REPLICA_SET)) + ); + } + + @Test + void selectAllDeprioritized() { + deprioritize(SERVER_A); + deprioritize(SERVER_B); + deprioritize(SERVER_C); + assertAll( + () -> assertEquals(ALL_SERVERS, serverDeprioritization.getServerSelector().select(SHARDED_CLUSTER)), + () -> assertEquals(ALL_SERVERS, serverDeprioritization.getServerSelector().select(REPLICA_SET)) + ); + } + + @ParameterizedTest + @EnumSource(value = ClusterType.class, mode = EXCLUDE, names = {"SHARDED"}) + void serverSelectorSelectsAllIfNotShardedCluster(final ClusterType clusterType) { + serverDeprioritization.updateCandidate(SERVER_A.getAddress()); + serverDeprioritization.onAttemptFailure(new RuntimeException()); + assertEquals(ALL_SERVERS, serverDeprioritization.getServerSelector().select(clusterDescription(clusterType))); + } + + @Test + void onAttemptFailureIgnoresIfPoolClearedException() { + serverDeprioritization.updateCandidate(SERVER_A.getAddress()); + serverDeprioritization.onAttemptFailure( + new MongoConnectionPoolClearedException(new ServerId(new ClusterId(), new ServerAddress()), null)); + assertEquals(ALL_SERVERS, serverDeprioritization.getServerSelector().select(SHARDED_CLUSTER)); + } + + @Test + void onAttemptFailureDoesNotThrowIfNoCandidate() { + assertDoesNotThrow(() -> serverDeprioritization.onAttemptFailure(new RuntimeException())); + } + + private void deprioritize(final ServerDescription... serverDescriptions) { + for (ServerDescription serverDescription : serverDescriptions) { + serverDeprioritization.updateCandidate(serverDescription.getAddress()); + serverDeprioritization.onAttemptFailure(new RuntimeException()); + } + } + + private static ServerDescription serverDescription(final String host) { + return ServerDescription.builder() + .state(ServerConnectionState.CONNECTED) + .ok(true) + .address(new ServerAddress(host)) + .build(); + } + + private static ClusterDescription clusterDescription(final ClusterType clusterType) { + return new ClusterDescription(ClusterConnectionMode.MULTIPLE, clusterType, ALL_SERVERS); + } +} diff --git a/driver-core/src/test/unit/com/mongodb/internal/connection/ServerDiscoveryAndMonitoringMonitoringTest.java b/driver-core/src/test/unit/com/mongodb/internal/connection/ServerDiscoveryAndMonitoringMonitoringTest.java new file mode 100644 index 00000000000..1a65534b526 --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/internal/connection/ServerDiscoveryAndMonitoringMonitoringTest.java @@ -0,0 +1,232 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection; + +import com.mongodb.ServerAddress; +import com.mongodb.connection.ClusterDescription; +import com.mongodb.connection.ServerDescription; +import com.mongodb.connection.ServerId; +import com.mongodb.connection.ServerType; +import com.mongodb.event.ClusterDescriptionChangedEvent; +import com.mongodb.event.ClusterOpeningEvent; +import com.mongodb.event.ServerClosedEvent; +import com.mongodb.event.ServerDescriptionChangedEvent; +import com.mongodb.event.ServerListener; +import com.mongodb.event.ServerOpeningEvent; +import org.bson.BsonArray; +import org.bson.BsonDocument; +import org.bson.BsonValue; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Set; + +import static com.mongodb.connection.ServerConnectionState.CONNECTED; +import static com.mongodb.connection.ServerConnectionState.CONNECTING; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; + +// See https://github.com/mongodb/specifications/tree/master/source/... +@RunWith(Parameterized.class) +public class ServerDiscoveryAndMonitoringMonitoringTest extends AbstractServerDiscoveryAndMonitoringTest { + private final TestClusterListener clusterListener = new TestClusterListener(); + private final TestServerListenerFactory serverListenerFactory = new TestServerListenerFactory(); + + public ServerDiscoveryAndMonitoringMonitoringTest(final String description, final BsonDocument definition) { + super(definition); + init(serverListenerFactory, clusterListener); + } + + @Test + public void shouldPassAllOutcomes() { + for (BsonValue phase : getDefinition().getArray("phases")) { + try { + for (BsonValue response : phase.asDocument().getArray("responses", new BsonArray())) { + applyResponse(response.asArray()); + } + BsonDocument outcome = phase.asDocument().getDocument("outcome"); + assertEvents(outcome.getArray("events")); + } finally { + clusterListener.clearClusterDescriptionChangedEvents(); + } + } + } + + @Parameterized.Parameters(name = "{0}") + public static Collection data() { + return data("server-discovery-and-monitoring/tests/monitoring"); + } + + private void assertEvents(final BsonArray events) { + Iterator clusterDescriptionChangedEventIterator = + clusterListener.getClusterDescriptionChangedEvents().iterator(); + for (BsonValue eventValue : events) { + BsonDocument eventDocument = eventValue.asDocument(); + if (eventDocument.containsKey("topology_opening_event")) { + ClusterOpeningEvent event = clusterListener.getClusterOpeningEvent(); + assertNotNull("event", event); + assertEquals("clusterId", getCluster().getClusterId(), event.getClusterId()); + } else if (eventDocument.containsKey("topology_description_changed_event")) { + ClusterDescriptionChangedEvent event = clusterDescriptionChangedEventIterator.next(); + assertNotNull("event", event); + assertEquals(getCluster().getClusterId(), event.getClusterId()); + BsonDocument topologyDescriptionChangedEventDocument = eventDocument.getDocument("topology_description_changed_event"); + assertEqualClusterDescriptions(createClusterDescriptionFromClusterDescriptionDocument( + topologyDescriptionChangedEventDocument.getDocument("previousDescription")), + event.getPreviousDescription()); + BsonDocument newDescription = topologyDescriptionChangedEventDocument.getDocument("newDescription"); + assertEqualClusterDescriptions(createClusterDescriptionFromClusterDescriptionDocument(newDescription), + event.getNewDescription()); + if (newDescription.getString("topologyType").getValue().equals("Single") && isSingleServerClusterExpected()) { + assertEquals(SingleServerCluster.class, getCluster().getClass()); + } else if (newDescription.getString("topologyType").getValue().equals("LoadBalanced")) { + assertEquals(LoadBalancedCluster.class, getCluster().getClass()); + } else { + assertEquals(MultiServerCluster.class, getCluster().getClass()); + } + + } else if (eventDocument.containsKey("server_opening_event")) { + BsonDocument serverOpeningEventDocument = eventDocument.getDocument("server_opening_event"); + ServerAddress serverAddress = new ServerAddress(serverOpeningEventDocument.getString("address").getValue()); + TestServerListener serverListener = serverListenerFactory.getListener(serverAddress); + assertNotNull("serverListener", serverListener); + ServerOpeningEvent event = serverListener.getServerOpeningEvent(); + assertNotNull("event", event); + assertEquals("serverId", new ServerId(getCluster().getClusterId(), serverAddress), event.getServerId()); + } else if (eventDocument.containsKey("server_closed_event")) { + BsonDocument serverClosedEventDocument = eventDocument.getDocument("server_closed_event"); + ServerAddress serverAddress = new ServerAddress(serverClosedEventDocument.getString("address").getValue()); + TestServerListener serverListener = serverListenerFactory.getListener(serverAddress); + assertNotNull("serverListener", serverListener); + ServerClosedEvent event = serverListener.getServerClosedEvent(); + assertNotNull("event", event); + assertEquals("serverId", new ServerId(getCluster().getClusterId(), serverAddress), event.getServerId()); + } else if (eventDocument.containsKey("server_description_changed_event")) { + BsonDocument serverDescriptionChangedEventDocument = eventDocument.getDocument("server_description_changed_event"); + ServerAddress serverAddress = new ServerAddress(serverDescriptionChangedEventDocument.getString("address").getValue()); + TestServerListener serverListener = serverListenerFactory.getListener(serverAddress); + assertNotNull("serverListener", serverListener); + assertEquals("serverDescriptionChangedEvents size", 1, serverListener.getServerDescriptionChangedEvents().size()); + ServerDescriptionChangedEvent event = serverListener.getServerDescriptionChangedEvents().get(0); + assertNotNull("event", event); + assertEquals("serverId", new ServerId(getCluster().getClusterId(), serverAddress), event.getServerId()); + assertEqualServerDescriptions(createServerDescriptionFromServerDescriptionDocument(serverDescriptionChangedEventDocument + .getDocument("previousDescription")), + event.getPreviousDescription()); + assertEqualServerDescriptions(createServerDescriptionFromServerDescriptionDocument(serverDescriptionChangedEventDocument + .getDocument("newDescription")), + event.getNewDescription()); + } else { + throw new IllegalArgumentException("Unsupported event type: " + eventDocument.keySet().iterator().next()); + } + } + + assertFalse(clusterDescriptionChangedEventIterator.hasNext()); + } + + private void assertEqualClusterDescriptions(final ClusterDescription expected, final ClusterDescription actual) { + assertEquals(expected.getType(), actual.getType()); + assertEquals(expected.getServerDescriptions().size(), actual.getServerDescriptions().size()); + for (ServerDescription curExpected: expected.getServerDescriptions()) { + ServerDescription curActual = getByServerAddress(curExpected.getAddress(), actual.getServerDescriptions()); + assertNotNull(curActual); + assertEqualServerDescriptions(curExpected, curActual); + } + } + + private ServerDescription getByServerAddress(final ServerAddress serverAddress, final List serverDescriptions) { + for (ServerDescription cur: serverDescriptions) { + if (cur.getAddress().equals(serverAddress)) { + return cur; + } + } + return null; + } + + private void assertEqualServerDescriptions(final ServerDescription expected, final ServerDescription actual) { + assertEquals("address", expected.getAddress(), actual.getAddress()); + assertEquals("ok", expected.isOk(), actual.isOk()); + assertEquals("type", expected.getType(), actual.getType()); + assertEquals("state", expected.getState(), actual.getState()); + assertEquals("setName", expected.getSetName(), actual.getSetName()); + assertEquals("primary", expected.getPrimary(), actual.getPrimary()); + assertEquals("hosts", expected.getHosts(), actual.getHosts()); + assertEquals("arbiters", expected.getArbiters(), actual.getArbiters()); + assertEquals("passives", expected.getPassives(), actual.getPassives()); + } + + private ClusterDescription createClusterDescriptionFromClusterDescriptionDocument(final BsonDocument clusterDescriptionDocument) { + List serverDescriptions = new ArrayList<>(); + for (BsonValue cur : clusterDescriptionDocument.getArray("servers")) { + serverDescriptions.add(createServerDescriptionFromServerDescriptionDocument(cur.asDocument())); + } + return new ClusterDescription(getCluster().getSettings().getMode(), + getClusterType(clusterDescriptionDocument.getString("topologyType").getValue(), serverDescriptions), + serverDescriptions); + } + + private ServerDescription createServerDescriptionFromServerDescriptionDocument(final BsonDocument serverDescriptionDocument) { + ServerType serverType = getServerType(serverDescriptionDocument.getString("type").getValue()); + return ServerDescription.builder() + .address(new ServerAddress(serverDescriptionDocument.getString("address").getValue())) + .ok(serverType != ServerType.UNKNOWN) + .state(serverType == ServerType.UNKNOWN ? CONNECTING : CONNECTED) + .type(serverType) + .setName(serverDescriptionDocument.containsKey("setName") + ? serverDescriptionDocument.getString("setName").getValue() + : null) + .primary(serverDescriptionDocument.containsKey("primary") + ? serverDescriptionDocument.getString("primary").getValue() : null) + .hosts(getHostNamesSet(serverDescriptionDocument, "hosts")) + .arbiters(getHostNamesSet(serverDescriptionDocument, "arbiters")) + .passives(getHostNamesSet(serverDescriptionDocument, "passives")) + .build(); + } + + private Set getHostNamesSet(final BsonDocument serverDescriptionDocument, final String fieldName) { + Set hostsSet = new HashSet<>(); + for (BsonValue cur : serverDescriptionDocument.getArray(fieldName)) { + hostsSet.add(cur.asString().getValue()); + } + return hostsSet; + } + + private static class TestServerListenerFactory implements ServerListenerFactory { + private final Map serverAddressServerListenerMap = + new HashMap<>(); + + @Override + public ServerListener create(final ServerAddress serverAddress) { + TestServerListener serverListener = new TestServerListener(); + serverAddressServerListenerMap.put(serverAddress, serverListener); + return serverListener; + } + + TestServerListener getListener(final ServerAddress serverAddress) { + return serverAddressServerListenerMap.get(serverAddress); + } + } +} diff --git a/driver-core/src/test/unit/com/mongodb/internal/connection/ServerDiscoveryAndMonitoringTest.java b/driver-core/src/test/unit/com/mongodb/internal/connection/ServerDiscoveryAndMonitoringTest.java new file mode 100644 index 00000000000..2a70deaf90d --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/internal/connection/ServerDiscoveryAndMonitoringTest.java @@ -0,0 +1,222 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection; + +import com.mongodb.ServerAddress; +import com.mongodb.connection.ClusterType; +import com.mongodb.connection.ServerDescription; +import com.mongodb.internal.time.Timeout; +import org.bson.BsonDocument; +import org.bson.BsonNull; +import org.bson.BsonValue; +import org.bson.assertions.Assertions; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; + +import java.util.Collection; +import java.util.stream.Collectors; + +import static com.mongodb.ClusterFixture.OPERATION_CONTEXT; +import static com.mongodb.ClusterFixture.getClusterDescription; +import static com.mongodb.internal.connection.ClusterDescriptionHelper.getPrimaries; +import static com.mongodb.internal.event.EventListenerHelper.NO_OP_CLUSTER_LISTENER; +import static com.mongodb.internal.event.EventListenerHelper.NO_OP_SERVER_LISTENER; +import static java.lang.Character.toLowerCase; +import static java.lang.String.format; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; +import static org.junit.Assume.assumeFalse; + +// See https://github.com/mongodb/specifications/tree/master/source/server-discovery-and-monitoring/tests +@RunWith(Parameterized.class) +public class ServerDiscoveryAndMonitoringTest extends AbstractServerDiscoveryAndMonitoringTest { + + private final String description; + + public ServerDiscoveryAndMonitoringTest(final String description, final BsonDocument definition) { + super(definition); + this.description = description; + init(serverAddress -> NO_OP_SERVER_LISTENER, NO_OP_CLUSTER_LISTENER); + } + + @Before + public void setUp() { + assumeFalse(description.startsWith("pre-42")); + } + + @Test + public void shouldPassAllOutcomes() { + for (BsonValue phase : getDefinition().getArray("phases")) { + if (phase.asDocument().containsKey("responses")) { + for (BsonValue response : phase.asDocument().getArray("responses")) { + applyResponse(response.asArray()); + } + } + if (phase.asDocument().containsKey("applicationErrors")) { + for (BsonValue response : phase.asDocument().getArray("applicationErrors")) { + applyApplicationError(response.asDocument()); + } + } + BsonDocument outcome = phase.asDocument().getDocument("outcome"); + assertTopology(outcome); + assertServers(outcome.getDocument("servers")); + } + } + + private void assertTopology(final BsonDocument outcome) { + String topologyType = outcome.getString("topologyType").getValue(); + assertTopologyType(topologyType); + assertLogicalSessionTimeout(outcome.get("logicalSessionTimeoutMinutes", BsonNull.VALUE)); + assertDriverCompatibility(outcome.get("compatible")); + } + + @Parameterized.Parameters(name = "{0}") + public static Collection data() { + // Unified and monitoring tests have their own test runners so filter them out. + return data("server-discovery-and-monitoring") + .stream().filter(i -> { + Object definition = i[1]; + Assertions.assertTrue("Expected a BsonDocument for definition", definition instanceof BsonDocument); + BsonDocument definitionDocument = (BsonDocument) definition; + String resourcePath = definitionDocument.getString("resourcePath").getValue(); + return !(resourcePath.contains("/tests/unified/") || resourcePath.contains("/tests/monitoring")); + }).collect(Collectors.toList()); + } + + private void assertServers(final BsonDocument servers) { + if (servers.size() != getCluster().getCurrentDescription().getServerDescriptions().size()) { + fail("Cluster description contains servers that are not part of the expected outcome"); + } + + for (String serverName : servers.keySet()) { + assertServer(serverName, servers.getDocument(serverName)); + } + } + + private void assertServer(final String serverName, final BsonDocument expectedServerDescriptionDocument) { + ServerDescription serverDescription = getServerDescription(serverName); + + assertNotNull(serverDescription); + assertEquals(getServerType(expectedServerDescriptionDocument.getString("type").getValue()), serverDescription.getType()); + + if (expectedServerDescriptionDocument.containsKey("error")) { + String expectedErrorMessage = expectedServerDescriptionDocument.getString("error").getValue(); + + Throwable exception = serverDescription.getException(); + assertNotNull(format("Expected exception with message \"%s\" in cluster description", expectedErrorMessage), exception); + String actualErrorMessage = exception.getMessage(); + assertEquals("Expected exception message is not equal to actual one", expectedErrorMessage, + toLowerCase(actualErrorMessage.charAt(0)) + actualErrorMessage.substring(1)); + } + if (expectedServerDescriptionDocument.isObjectId("electionId")) { + assertNotNull(serverDescription.getElectionId()); + assertEquals(expectedServerDescriptionDocument.getObjectId("electionId").getValue(), serverDescription.getElectionId()); + } else { + assertNull(serverDescription.getElectionId()); + } + + if (expectedServerDescriptionDocument.isNumber("setVersion")) { + assertNotNull(serverDescription.getSetVersion()); + assertEquals(expectedServerDescriptionDocument.getNumber("setVersion").intValue(), + serverDescription.getSetVersion().intValue()); + } else { + assertNull(serverDescription.getSetVersion()); + } + + if (expectedServerDescriptionDocument.isString("setName")) { + assertNotNull(serverDescription.getSetName()); + assertEquals(expectedServerDescriptionDocument.getString("setName").getValue(), serverDescription.getSetName()); + } else { + assertNull(serverDescription.getSetName()); + } + + if (expectedServerDescriptionDocument.isDocument("pool")) { + int expectedGeneration = expectedServerDescriptionDocument.getDocument("pool").getNumber("generation").intValue(); + Timeout serverSelectionTimeout = OPERATION_CONTEXT.getTimeoutContext().computeServerSelectionTimeout(); + DefaultServer server = (DefaultServer) getCluster() + .getServersSnapshot(serverSelectionTimeout, OPERATION_CONTEXT.getTimeoutContext()) + .getServer(new ServerAddress(serverName)); + assertEquals(expectedGeneration, server.getConnectionPool().getGeneration()); + } + } + + private ServerDescription getServerDescription(final String serverName) { + ServerDescription serverDescription = null; + for (ServerDescription cur : getCluster().getCurrentDescription().getServerDescriptions()) { + if (cur.getAddress().equals(new ServerAddress(serverName))) { + serverDescription = cur; + break; + } + } + return serverDescription; + } + + private void assertTopologyType(final String topologyType) { + switch (topologyType) { + case "Single": + assertTrue(getCluster().getClass() == SingleServerCluster.class + || (getCluster().getClass() == MultiServerCluster.class + && getClusterDescription(getCluster()).getType() == ClusterType.STANDALONE)); + assertEquals(getClusterType(topologyType, getCluster().getCurrentDescription().getServerDescriptions()), + getCluster().getCurrentDescription().getType()); + break; + case "ReplicaSetWithPrimary": + assertEquals(MultiServerCluster.class, getCluster().getClass()); + assertEquals(getClusterType(topologyType), getCluster().getCurrentDescription().getType()); + assertEquals(1, getPrimaries(getCluster().getCurrentDescription()).size()); + break; + case "ReplicaSetNoPrimary": + assertEquals(MultiServerCluster.class, getCluster().getClass()); + assertEquals(getClusterType(topologyType), getCluster().getCurrentDescription().getType()); + assertEquals(0, getPrimaries(getCluster().getCurrentDescription()).size()); + break; + case "Sharded": + assertEquals(MultiServerCluster.class, getCluster().getClass()); + assertEquals(getClusterType(topologyType), getCluster().getCurrentDescription().getType()); + break; + case "LoadBalanced": + assertEquals(LoadBalancedCluster.class, getCluster().getClass()); + assertEquals(getClusterType(topologyType), getCluster().getCurrentDescription().getType()); + break; + case "Unknown": + assertEquals(getClusterType(topologyType), getCluster().getCurrentDescription().getType()); + break; + default: + throw new UnsupportedOperationException("No handler for topology type " + topologyType); + } + } + + private void assertLogicalSessionTimeout(final BsonValue logicalSessionTimeoutMinutes) { + if (logicalSessionTimeoutMinutes.isNull()) { + assertNull(getCluster().getCurrentDescription().getLogicalSessionTimeoutMinutes()); + } else if (logicalSessionTimeoutMinutes.isNumber()) { + assertEquals((Integer) logicalSessionTimeoutMinutes.asNumber().intValue(), + getCluster().getCurrentDescription().getLogicalSessionTimeoutMinutes()); + } + } + + private void assertDriverCompatibility(final BsonValue compatible) { + if (compatible != null) { + assertEquals(compatible.asBoolean().getValue(), getCluster().getCurrentDescription().isCompatibleWithDriver()); + } + } +} diff --git a/driver-core/src/test/unit/com/mongodb/internal/connection/ServerListenerFactory.java b/driver-core/src/test/unit/com/mongodb/internal/connection/ServerListenerFactory.java new file mode 100644 index 00000000000..f0b14296d7b --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/internal/connection/ServerListenerFactory.java @@ -0,0 +1,24 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection; + +import com.mongodb.ServerAddress; +import com.mongodb.event.ServerListener; + +interface ServerListenerFactory { + ServerListener create(ServerAddress serverAddress); +} diff --git a/driver-core/src/test/unit/com/mongodb/internal/connection/ServerMonitoringModeUtilTest.java b/driver-core/src/test/unit/com/mongodb/internal/connection/ServerMonitoringModeUtilTest.java new file mode 100644 index 00000000000..f549207b74a --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/internal/connection/ServerMonitoringModeUtilTest.java @@ -0,0 +1,44 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.internal.connection; + +import com.mongodb.connection.ServerMonitoringMode; +import org.junit.jupiter.api.Test; + +import static org.junit.jupiter.api.Assertions.assertAll; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; + +final class ServerMonitoringModeUtilTest { + @Test + public void fromString() { + assertAll( + () -> assertEquals(ServerMonitoringMode.STREAM, ServerMonitoringModeUtil.fromString("stream")), + () -> assertEquals(ServerMonitoringMode.POLL, ServerMonitoringModeUtil.fromString("poll")), + () -> assertEquals(ServerMonitoringMode.AUTO, ServerMonitoringModeUtil.fromString("auto")), + () -> assertThrows(IllegalArgumentException.class, () -> ServerMonitoringModeUtil.fromString("invalid")) + ); + } + + @Test + public void getValue() { + assertAll( + () -> assertEquals("stream", ServerMonitoringModeUtil.getValue(ServerMonitoringMode.STREAM)), + () -> assertEquals("poll", ServerMonitoringModeUtil.getValue(ServerMonitoringMode.POLL)), + () -> assertEquals("auto", ServerMonitoringModeUtil.getValue(ServerMonitoringMode.AUTO)) + ); + } +} diff --git a/driver-core/src/test/unit/com/mongodb/internal/connection/ServerSelectionRttTest.java b/driver-core/src/test/unit/com/mongodb/internal/connection/ServerSelectionRttTest.java new file mode 100644 index 00000000000..3f001c8b6a3 --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/internal/connection/ServerSelectionRttTest.java @@ -0,0 +1,65 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection; + +import org.bson.BsonDocument; +import org.bson.BsonValue; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import util.JsonPoweredTestHelper; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; + +import static org.junit.Assert.assertEquals; + +// See https://github.com/mongodb/specifications/tree/master/source/server-selection/tests +@RunWith(Parameterized.class) +public class ServerSelectionRttTest { + private final BsonDocument definition; + + public ServerSelectionRttTest(final String description, final BsonDocument definition) { + this.definition = definition; + } + + @Test + public void shouldPassAllOutcomes() { + RoundTripTimeSampler subject = new RoundTripTimeSampler(); + + BsonValue current = definition.get("avg_rtt_ms"); + if (current.isNumber()) { + subject.addSample(current.asNumber().longValue()); + } + + subject.addSample(definition.getNumber("new_rtt_ms").longValue()); + long expected = definition.getNumber("new_avg_rtt").asNumber().longValue(); + + assertEquals(subject.getAverage(), expected); + } + + @Parameterized.Parameters(name = "{0}") + public static Collection data() { + List data = new ArrayList<>(); + for (BsonDocument testDocument : JsonPoweredTestHelper.getSpecTestDocuments("server-selection/tests/rtt")) { + data.add(new Object[]{testDocument.getString("fileName").getValue(), testDocument}); + } + return data; + } + +} diff --git a/driver-core/src/test/unit/com/mongodb/internal/connection/ServerSelectionWithinLatencyWindowTest.java b/driver-core/src/test/unit/com/mongodb/internal/connection/ServerSelectionWithinLatencyWindowTest.java new file mode 100644 index 00000000000..14d6c59b0c6 --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/internal/connection/ServerSelectionWithinLatencyWindowTest.java @@ -0,0 +1,154 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection; + +import com.mongodb.ReadPreference; +import com.mongodb.ServerAddress; +import com.mongodb.assertions.Assertions; +import com.mongodb.connection.ClusterDescription; +import com.mongodb.connection.ClusterSettings; +import com.mongodb.internal.selector.ReadPreferenceServerSelector; +import com.mongodb.selector.ServerSelector; +import org.bson.BsonArray; +import org.bson.BsonDocument; +import org.bson.BsonNumber; +import org.bson.BsonValue; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import org.mockito.Mockito; +import util.JsonPoweredTestHelper; + +import java.math.BigDecimal; +import java.math.RoundingMode; +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; +import java.util.Map; +import java.util.stream.IntStream; + +import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS; +import static com.mongodb.ClusterFixture.createOperationContext; +import static com.mongodb.connection.ServerSelectionSelectionTest.buildClusterDescription; +import static java.util.stream.Collectors.groupingBy; +import static java.util.stream.Collectors.toMap; +import static org.junit.Assert.assertEquals; +import static org.mockito.Mockito.when; + +/** + * A runner for + * + * Selection Within Latency Window Tests. + */ +@RunWith(Parameterized.class) +public class ServerSelectionWithinLatencyWindowTest { + private final ClusterDescription clusterDescription; + private final Cluster.ServersSnapshot serversSnapshot; + private final int iterations; + private final Outcome outcome; + + public ServerSelectionWithinLatencyWindowTest( + @SuppressWarnings("unused") final String fileName, + @SuppressWarnings("unused") final String description, + final BsonDocument definition) { + clusterDescription = buildClusterDescription(definition.getDocument("topology_description"), null); + serversSnapshot = serverCatalog(definition.getArray("mocked_topology_state")); + iterations = definition.getInt32("iterations").getValue(); + outcome = Outcome.parse(definition.getDocument("outcome")); + } + + @Test + public void shouldPassAllOutcomes() { + ServerSelector selector = new ReadPreferenceServerSelector(ReadPreference.nearest()); + OperationContext.ServerDeprioritization emptyServerDeprioritization = createOperationContext(TIMEOUT_SETTINGS) + .getServerDeprioritization(); + ClusterSettings defaultClusterSettings = ClusterSettings.builder().build(); + Map> selectionResultsGroupedByServerAddress = IntStream.range(0, iterations) + .mapToObj(i -> BaseCluster.createCompleteSelectorAndSelectServer(selector, clusterDescription, serversSnapshot, + emptyServerDeprioritization, defaultClusterSettings)) + .collect(groupingBy(serverTuple -> serverTuple.getServerDescription().getAddress())); + Map selectionFrequencies = selectionResultsGroupedByServerAddress.entrySet() + .stream() + .collect(toMap(Map.Entry::getKey, entry -> BigDecimal.valueOf(entry.getValue().size()) + .setScale(2, RoundingMode.UNNECESSARY) + .divide(BigDecimal.valueOf(iterations), RoundingMode.HALF_UP))); + outcome.assertMatches(selectionFrequencies); + } + + @Parameterized.Parameters(name = "{0}: {1}") + public static Collection data() { + List data = new ArrayList<>(); + for (BsonDocument testDocument : JsonPoweredTestHelper.getSpecTestDocuments("server-selection/tests/in_window")) { + data.add(new Object[]{testDocument.getString("fileName").getValue(), + testDocument.getString("description").getValue(), + testDocument}); + } + return data; + } + + private static Cluster.ServersSnapshot serverCatalog(final BsonArray mockedTopologyState) { + Map serverMap = mockedTopologyState.stream() + .map(BsonValue::asDocument) + .collect(toMap( + el -> new ServerAddress(el.getString("address").getValue()), + el -> { + int operationCount = el.getInt32("operation_count").getValue(); + Server server = Mockito.mock(Server.class); + when(server.operationCount()).thenReturn(operationCount); + return server; + })); + return serverAddress -> Assertions.assertNotNull(serverMap.get(serverAddress)); + } + + private static final class Outcome { + private final double tolerance; + private final Map expectedFrequencies; + + private Outcome(final double tolerance, final Map expectedFrequencies) { + this.tolerance = tolerance; + this.expectedFrequencies = expectedFrequencies; + } + + static Outcome parse(final BsonDocument outcome) { + return new Outcome( + outcome.getNumber("tolerance").doubleValue(), + outcome.getDocument("expected_frequencies") + .entrySet() + .stream() + .collect(toMap( + entry -> new ServerAddress(entry.getKey()), + entry -> { + BsonNumber frequency = entry.getValue().asNumber(); + return frequency.isInt32() + ? BigDecimal.valueOf(frequency.intValue()) + : BigDecimal.valueOf(frequency.doubleValue()); + }))); + } + + void assertMatches(final Map actualFrequencies) { + String msg = String.format("Expected %s,%nactual %s", expectedFrequencies, actualFrequencies); + expectedFrequencies.forEach((address, expectedFrequency) -> { + BigDecimal actualFrequency = actualFrequencies.getOrDefault(address, BigDecimal.ZERO); + if (expectedFrequency.compareTo(BigDecimal.ZERO) == 0 || expectedFrequency.compareTo(BigDecimal.ONE) == 0) { + assertEquals(msg, 0, expectedFrequency.compareTo(actualFrequency)); + } else { + assertEquals(msg, expectedFrequency.doubleValue(), actualFrequency.doubleValue(), tolerance); + } + }); + } + } +} diff --git a/driver-core/src/test/unit/com/mongodb/internal/connection/SimpleBufferProvider.java b/driver-core/src/test/unit/com/mongodb/internal/connection/SimpleBufferProvider.java new file mode 100644 index 00000000000..e50f718a67c --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/internal/connection/SimpleBufferProvider.java @@ -0,0 +1,29 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection; + +import org.bson.ByteBuf; +import org.bson.ByteBufNIO; + +import java.nio.ByteBuffer; + +public class SimpleBufferProvider implements BufferProvider { + @Override + public ByteBuf getBuffer(final int size) { + return new ByteBufNIO(ByteBuffer.allocate(size)); + } +} diff --git a/driver-core/src/test/unit/com/mongodb/internal/connection/SingleServerClusterSpecification.groovy b/driver-core/src/test/unit/com/mongodb/internal/connection/SingleServerClusterSpecification.groovy new file mode 100644 index 00000000000..faa04a188f9 --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/internal/connection/SingleServerClusterSpecification.groovy @@ -0,0 +1,236 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection + +import com.mongodb.MongoIncompatibleDriverException +import com.mongodb.ServerAddress +import com.mongodb.connection.ClusterDescription +import com.mongodb.connection.ClusterId +import com.mongodb.connection.ClusterSettings +import com.mongodb.connection.ClusterType +import com.mongodb.connection.ServerDescription +import com.mongodb.connection.ServerType +import com.mongodb.event.ClusterListener +import com.mongodb.internal.selector.WritableServerSelector +import spock.lang.Specification + +import static com.mongodb.ClusterFixture.CLIENT_METADATA +import static com.mongodb.ClusterFixture.OPERATION_CONTEXT +import static com.mongodb.connection.ClusterConnectionMode.SINGLE +import static com.mongodb.connection.ClusterType.REPLICA_SET +import static com.mongodb.connection.ClusterType.UNKNOWN +import static com.mongodb.connection.ServerConnectionState.CONNECTED +import static com.mongodb.connection.ServerConnectionState.CONNECTING +import static com.mongodb.connection.ServerType.STANDALONE +import static java.util.concurrent.TimeUnit.SECONDS + +class SingleServerClusterSpecification extends Specification { + private static final ClusterId CLUSTER_ID = new ClusterId() + private final ServerAddress firstServer = new ServerAddress('localhost:27017') + + private final TestClusterableServerFactory factory = new TestClusterableServerFactory() + + def setup() { + Time.makeTimeConstant() + } + + def cleanup() { + Time.makeTimeMove() + } + + def 'should update description when the server connects'() { + given: + def cluster = new SingleServerCluster(CLUSTER_ID, + ClusterSettings.builder().mode(SINGLE).hosts(Arrays.asList(firstServer)).build(), factory, CLIENT_METADATA) + + when: + sendNotification(firstServer, STANDALONE) + + then: + cluster.getCurrentDescription().type == ClusterType.STANDALONE + cluster.getCurrentDescription().connectionMode == SINGLE + ClusterDescriptionHelper.getAll(cluster.getCurrentDescription()) == getDescriptions() + + cleanup: + cluster?.close() + } + + def 'should get server when open'() { + given: + def cluster = new SingleServerCluster(CLUSTER_ID, + ClusterSettings.builder().mode(SINGLE).hosts(Arrays.asList(firstServer)).build(), factory, CLIENT_METADATA) + + when: + sendNotification(firstServer, STANDALONE) + + then: + cluster.getServersSnapshot(OPERATION_CONTEXT + .getTimeoutContext() + .computeServerSelectionTimeout(), + OPERATION_CONTEXT.getTimeoutContext()).getServer(firstServer) == factory.getServer(firstServer) + + cleanup: + cluster?.close() + } + + + def 'should not get servers snapshot when closed'() { + given: + def cluster = new SingleServerCluster(CLUSTER_ID, + ClusterSettings.builder().mode(SINGLE).hosts(Arrays.asList(firstServer)).build(), factory, CLIENT_METADATA) + cluster.close() + + when: + cluster.getServersSnapshot(OPERATION_CONTEXT.getTimeoutContext().computeServerSelectionTimeout(), + OPERATION_CONTEXT.getTimeoutContext()) + + then: + thrown(IllegalStateException) + + cleanup: + cluster?.close() + } + + def 'should have no servers of the wrong type in the description'() { + given: + def cluster = new SingleServerCluster(CLUSTER_ID, + ClusterSettings.builder().mode(SINGLE).requiredClusterType(ClusterType.SHARDED).hosts(Arrays.asList(firstServer)).build(), + factory, CLIENT_METADATA) + + when: + sendNotification(firstServer, ServerType.REPLICA_SET_PRIMARY) + + then: + cluster.getCurrentDescription().type == ClusterType.SHARDED + ClusterDescriptionHelper.getAll(cluster.getCurrentDescription()) == [] as Set + + cleanup: + cluster?.close() + } + + def 'should have server in description when replica set name does matches required one'() { + given: + def cluster = new SingleServerCluster(CLUSTER_ID, + ClusterSettings.builder().mode(SINGLE).requiredReplicaSetName('test1').hosts(Arrays.asList(firstServer)).build(), + factory, CLIENT_METADATA) + + when: + sendNotification(firstServer, ServerType.REPLICA_SET_PRIMARY, 'test1') + + then: + cluster.getCurrentDescription().type == REPLICA_SET + ClusterDescriptionHelper.getAll(cluster.getCurrentDescription()) == getDescriptions() + + cleanup: + cluster?.close() + } + + def 'getServer should throw when cluster is incompatible'() { + given: + def cluster = new SingleServerCluster(CLUSTER_ID, ClusterSettings.builder().mode(SINGLE).hosts(Arrays.asList(firstServer)) + .serverSelectionTimeout(1, SECONDS).build(), factory, CLIENT_METADATA) + sendNotification(firstServer, getBuilder(firstServer).minWireVersion(1000).maxWireVersion(1000).build()) + + when: + cluster.selectServer(new WritableServerSelector(), OPERATION_CONTEXT) + + then: + thrown(MongoIncompatibleDriverException) + + cleanup: + cluster?.close() + } + + def 'should connect to server'() { + given: + def cluster = new SingleServerCluster(CLUSTER_ID, ClusterSettings.builder().mode(SINGLE).hosts([firstServer]).build(), + factory, CLIENT_METADATA) + + when: + cluster.connect() + + then: + factory.getServer(firstServer).connectCount == 1 + } + + def 'should fire cluster events'() { + given: + def serverDescription = ServerDescription.builder() + .address(firstServer) + .ok(true) + .state(CONNECTED) + .type(ServerType.REPLICA_SET_SECONDARY) + .hosts(new HashSet(['localhost:27017', 'localhost:27018', 'localhost:27019'])) + .build() + def initialDescription = new ClusterDescription(SINGLE, UNKNOWN, + [ServerDescription.builder().state(CONNECTING).address(firstServer).build()]) + def listener = Mock(ClusterListener) + when: + def cluster = new SingleServerCluster(CLUSTER_ID, ClusterSettings.builder().mode(SINGLE).hosts([firstServer]) + .addClusterListener(listener).build(), + factory, CLIENT_METADATA) + + then: + 1 * listener.clusterOpening { it.clusterId == CLUSTER_ID } + 1 * listener.clusterDescriptionChanged { + it.clusterId == CLUSTER_ID && + it.previousDescription == new ClusterDescription(SINGLE, UNKNOWN, []) && + it.newDescription == initialDescription + } + + when: + factory.getServer(firstServer).sendNotification(serverDescription) + + then: + 1 * listener.clusterDescriptionChanged { + it.clusterId == CLUSTER_ID && + it.previousDescription == initialDescription && + it.newDescription == new ClusterDescription(SINGLE, REPLICA_SET, [serverDescription]) + } + + when: + cluster.close() + + then: + 1 * listener.clusterClosed { it.clusterId == CLUSTER_ID } + } + + def sendNotification(ServerAddress serverAddress, ServerType serverType) { + sendNotification(serverAddress, serverType, null) + } + + def sendNotification(ServerAddress serverAddress, ServerType serverType, String replicaSetName) { + sendNotification(serverAddress, getBuilder(serverAddress, serverType, replicaSetName).build()) + } + + def sendNotification(ServerAddress serverAddress, ServerDescription serverDescription) { + factory.getServer(serverAddress).sendNotification(serverDescription) + } + + + def getDescriptions() { + [factory.getServer(firstServer).description] as Set + } + + ServerDescription.Builder getBuilder(ServerAddress serverAddress) { + ServerDescription.builder().address(serverAddress).type(STANDALONE).ok(true).state(CONNECTED) + } + + ServerDescription.Builder getBuilder(ServerAddress serverAddress, ServerType serverType, String replicaSetName) { + ServerDescription.builder().address(serverAddress).type(serverType).setName(replicaSetName).ok(true).state(CONNECTED) + } +} diff --git a/driver-core/src/test/unit/com/mongodb/internal/connection/SrvPollingProseTests.java b/driver-core/src/test/unit/com/mongodb/internal/connection/SrvPollingProseTests.java new file mode 100644 index 00000000000..51cc4884f02 --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/internal/connection/SrvPollingProseTests.java @@ -0,0 +1,244 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection; + +import com.mongodb.MongoConfigurationException; +import com.mongodb.ServerAddress; +import com.mongodb.connection.ClusterConnectionMode; +import com.mongodb.connection.ClusterId; +import com.mongodb.connection.ClusterSettings; +import com.mongodb.connection.ClusterType; +import com.mongodb.connection.ServerDescription; +import com.mongodb.connection.ServerSettings; +import com.mongodb.internal.dns.DnsResolver; +import com.mongodb.lang.Nullable; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; + +import javax.naming.NamingException; +import java.util.HashSet; +import java.util.List; +import java.util.Set; +import java.util.stream.Collectors; + +import static com.mongodb.ClusterFixture.CLIENT_METADATA; +import static java.util.Arrays.asList; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class SrvPollingProseTests { + + private final ClusterId clusterId = new ClusterId(); + private final String srvHost = "test1.test.build.10gen.cc"; + private final String srvServiceName = "mongodb"; + private final ClusterSettings.Builder settingsBuilder = ClusterSettings.builder() + .mode(ClusterConnectionMode.MULTIPLE) + .requiredClusterType(ClusterType.SHARDED) + .srvHost(srvHost); + private final ClusterableServerFactory serverFactory = mock(ClusterableServerFactory.class); + private final String firstHost = "localhost.test.build.10gen.cc:27017"; + private final String secondHost = "localhost.test.build.10gen.cc:27018"; + private final String thirdHost = "localhost.test.build.10gen.cc:27019"; + private final String fourthHost = "localhost.test.build.10gen.cc:27020"; + + private final List initialHosts = asList(firstHost, secondHost); + private DnsSrvRecordMonitor dnsSrvRecordMonitor; + private DnsMultiServerCluster cluster; + + @BeforeEach + public void beforeEach() { + when(serverFactory.getSettings()).thenReturn(ServerSettings.builder().build()); + when(serverFactory.create(any(), any())).thenReturn(mock(ClusterableServer.class)); + } + + @AfterEach + public void afterEach() { + if (cluster != null) { + cluster.close(); + } + } + + // 1. Addition of a new DNS record + @Test + public void shouldAddDnsRecord() { + List updatedHosts = asList(firstHost, secondHost, thirdHost); + + initCluster(updatedHosts); + assertEquals(setOf(updatedHosts), clusterHostsSet()); + } + + // 2. Removal of an existing DNS record + @Test + public void shouldRemoveDnsRecord() { + List updatedHosts = asList(firstHost); + + initCluster(updatedHosts); + assertEquals(setOf(updatedHosts), clusterHostsSet()); + } + + // 3. Replacement of a DNS record + @Test + public void shouldReplaceDnsRecord() { + List updatedHosts = asList(firstHost, thirdHost); + + initCluster(updatedHosts); + assertEquals(setOf(updatedHosts), clusterHostsSet()); + } + + // 4. Replacement of both existing DNS records with *one* new record + @Test + public void shouldReplaceTwoDnsRecordsWithOne() { + List updatedHosts = asList(thirdHost); + + initCluster(updatedHosts); + assertEquals(setOf(updatedHosts), clusterHostsSet()); + } + + // 5. Replacement of both existing DNS records with *two* new records + @Test + public void shouldReplaceTwoDnsRecordsWithTwoNewOnes() { + List updatedHosts = asList(thirdHost, fourthHost); + + initCluster(updatedHosts); + assertEquals(setOf(updatedHosts), clusterHostsSet()); + } + + // 6. DNS record lookup timeout + // Unimplemented as DnsResolver doesn't throw a different exception for timeouts + + // 7. DNS record lookup failure + @Test + public void shouldIgnoreDnsRecordLookupFailure() { + initCluster(new MongoConfigurationException("Unable to look up SRV record for host " + srvHost, new NamingException())); + assertEquals(setOf(initialHosts), clusterHostsSet()); + } + + // 8. Removal of all DNS SRV records + // Unimplemented: the test is supposed to assert that the hosts are unchanged, but the driver actually changes them + + // 9. Test that SRV polling is not done for load balanced clusters + // Unimplemented because DnsMultiServerCluster is not used for load balanced clusters + + // 10. All DNS records are selected (srvMaxHosts = 0) + @Test + public void shouldUseAllRecordsWhenSrvMaxHostsIsZero() { + List updatedHosts = asList(firstHost, thirdHost, fourthHost); + + initCluster(updatedHosts, 0); + assertEquals(setOf(updatedHosts), clusterHostsSet()); + } + + // 11. All DNS records are selected (srvMaxHosts >= records) + @Test + public void shouldUseAllRecordsWhenSrvMaxHostsIsGreaterThanOrEqualToNumSrvRecords() { + List updatedHosts = asList(thirdHost, fourthHost); + + initCluster(updatedHosts, 2); + assertEquals(setOf(updatedHosts), clusterHostsSet()); + } + + // 12. New DNS records are randomly selected (srvMaxHosts > 0) + @Test + public void shouldUseSrvMaxHostsWhenSrvMaxHostsIsLessThanNumSrvRecords() { + int srvMaxHosts = 2; + List updatedHosts = asList(firstHost, thirdHost, fourthHost); + initCluster(updatedHosts, srvMaxHosts); + + assertEquals(srvMaxHosts, clusterHostsSet().size()); + assertTrue(updatedHosts.contains(firstHost)); + assertTrue(updatedHosts.containsAll(clusterHostsSet())); + } + + private Set clusterHostsSet() { + return cluster.getCurrentDescription().getServerDescriptions().stream() + .map(ServerDescription::getAddress) + .map(ServerAddress::toString) + .collect(Collectors.toSet()); + } + + private Set setOf(final List list) { + return new HashSet<>(list); + } + + private void initCluster(final List updatedHosts) { + initCluster(new TestDnsResolver(asList(initialHosts, updatedHosts)), null); + } + + private void initCluster(final List updatedHosts, @Nullable final Integer srvMaxHosts) { + initCluster(new TestDnsResolver(asList(initialHosts, updatedHosts)), srvMaxHosts); + } + + private void initCluster(final RuntimeException lastResponseException) { + initCluster(new TestDnsResolver(asList(initialHosts), lastResponseException), null); + } + + private void initCluster(final TestDnsResolver dnsResolver, @Nullable final Integer srvMaxHosts) { + DnsSrvRecordMonitorFactory dnsSrvRecordMonitorFactory = mock(DnsSrvRecordMonitorFactory.class); + when(dnsSrvRecordMonitorFactory.create(eq(srvHost), eq(srvServiceName), any())).thenAnswer( + invocation -> { + dnsSrvRecordMonitor = new DefaultDnsSrvRecordMonitor(srvHost, srvServiceName, 10, 10, + invocation.getArgument(2), clusterId, dnsResolver); + return dnsSrvRecordMonitor; + }); + cluster = new DnsMultiServerCluster(clusterId, settingsBuilder.srvMaxHosts(srvMaxHosts).build(), serverFactory, CLIENT_METADATA, + dnsSrvRecordMonitorFactory); + try { + Thread.sleep(100); // racy + } catch (InterruptedException e) { + // ignore + } + } + + private static final class TestDnsResolver implements DnsResolver { + private final List> responses; + private final RuntimeException lastResponseException; + private int curPos = 0; + + TestDnsResolver(final List> responses) { + this(responses, null); + } + + TestDnsResolver(final List> responses, final RuntimeException lastResponseException) { + this.responses = responses; + this.lastResponseException = lastResponseException; + } + + @Override + public List resolveHostFromSrvRecords(final String srvHost, final String srvServiceName) { + List retVal; + if (curPos >= responses.size() && lastResponseException != null) { + throw lastResponseException; + } else if (curPos >= responses.size() - 1) { + retVal = responses.get(responses.size() - 1); + } else { + retVal = responses.get(curPos); + } + curPos++; + return retVal; + } + + @Override + public String resolveAdditionalQueryParametersFromTxtRecords(final String host) { + throw new UnsupportedOperationException(); + } + } +} diff --git a/driver-core/src/test/unit/com/mongodb/internal/connection/StreamFactoryHelperTest.java b/driver-core/src/test/unit/com/mongodb/internal/connection/StreamFactoryHelperTest.java new file mode 100644 index 00000000000..9afd1478fe4 --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/internal/connection/StreamFactoryHelperTest.java @@ -0,0 +1,50 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection; + +import com.mongodb.MongoClientSettings; +import com.mongodb.connection.NettyTransportSettings; +import com.mongodb.connection.TransportSettings; +import com.mongodb.internal.connection.netty.NettyStreamFactoryFactory; +import com.mongodb.spi.dns.InetAddressResolver; +import io.netty.buffer.PooledByteBufAllocator; +import io.netty.channel.nio.NioEventLoopGroup; +import org.junit.jupiter.api.Test; + +import static org.junit.jupiter.api.Assertions.assertEquals; + +@SuppressWarnings("deprecation") +class StreamFactoryHelperTest { + + @Test + void streamFactoryFactoryIsDerivedFromTransportSettings() { + InetAddressResolver inetAddressResolver = new DefaultInetAddressResolver(); + NettyTransportSettings nettyTransportSettings = TransportSettings.nettyBuilder() + .eventLoopGroup(new NioEventLoopGroup()) + .allocator(PooledByteBufAllocator.DEFAULT) + .socketChannelClass(io.netty.channel.socket.oio.OioSocketChannel.class) + .build(); + + MongoClientSettings settings = MongoClientSettings.builder() + .transportSettings(nettyTransportSettings) + .build(); + + assertEquals(NettyStreamFactoryFactory.builder().applySettings(nettyTransportSettings) + .inetAddressResolver(inetAddressResolver).build(), + StreamFactoryHelper.getAsyncStreamFactoryFactory(settings, inetAddressResolver)); + } +} diff --git a/driver-core/src/test/unit/com/mongodb/internal/connection/StreamHelper.groovy b/driver-core/src/test/unit/com/mongodb/internal/connection/StreamHelper.groovy new file mode 100644 index 00000000000..3520c15d931 --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/internal/connection/StreamHelper.groovy @@ -0,0 +1,186 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection + +import com.mongodb.ClusterFixture +import com.mongodb.ReadPreference +import com.mongodb.async.FutureResultCallback +import com.mongodb.internal.IgnorableRequestContext +import com.mongodb.internal.TimeoutContext +import com.mongodb.internal.validator.NoOpFieldNameValidator +import org.bson.BsonBinaryWriter +import org.bson.BsonDocument +import org.bson.BsonInt32 +import org.bson.BsonReader +import org.bson.BsonWriter +import org.bson.ByteBuf +import org.bson.ByteBufNIO +import org.bson.io.BasicOutputBuffer +import org.bson.json.JsonReader + +import java.nio.ByteBuffer +import java.nio.ByteOrder +import java.security.SecureRandom + +import static com.mongodb.connection.ClusterConnectionMode.SINGLE +import static com.mongodb.internal.connection.MessageHelper.LEGACY_HELLO + +class StreamHelper { + private static int nextMessageId = 900000 // Generates a message then adds one to the id + private static final DEFAULT_JSON_RESPONSE = '{connectionId: 1, n: 0, syncMillis: 0, writtenTo: null, err: null, ok: 1 }' + + + private static defaultHeader(messageId) { + header(messageId, DEFAULT_JSON_RESPONSE) + } + + static defaultMessageHeader(messageId) { + messageHeader(messageId, DEFAULT_JSON_RESPONSE) + } + + static defaultReply() { + ByteBuf header = replyHeader() + ByteBuf body = defaultBody() + ByteBuffer reply = ByteBuffer.allocate(header.remaining() + body.remaining()) + append(reply, header) + append(reply, body) + reply.flip() + new ByteBufNIO(reply) + } + + private static append(final ByteBuffer to, final ByteBuf from) { + byte[] bytes = new byte[from.remaining()] + from.get(bytes) + to.put(bytes) + } + + private static defaultReplyHeader() { + replyHeader() + } + + static messageHeader(messageId, json) { + ByteBuffer headerByteBuffer = ByteBuffer.allocate(16).with { + order(ByteOrder.LITTLE_ENDIAN) + putInt(36 + body(json).remaining()) // messageLength + putInt(4) // requestId + putInt(messageId) // responseTo + putInt(1) // opCode + } + headerByteBuffer.flip() + new ByteBufNIO(headerByteBuffer) + } + + private static replyHeader() { + ByteBuffer headerByteBuffer = ByteBuffer.allocate(20).with { + order(ByteOrder.LITTLE_ENDIAN) + putInt(0) // responseFlags + putLong(0) // cursorId + putInt(0) // starting from + putInt(1) // number returned + } + headerByteBuffer.flip() + new ByteBufNIO(headerByteBuffer) + } + + private static header(messageId, json) { + ByteBuffer headerByteBuffer = ByteBuffer.allocate(36).with { + order(ByteOrder.LITTLE_ENDIAN) + putInt(36 + body(json).remaining()) // messageLength + putInt(4) // requestId + putInt(messageId) // responseTo + putInt(1) // opCode + putInt(0) // responseFlags + putLong(0) // cursorId + putInt(0) // starting from + putInt(1) // number returned + } + headerByteBuffer.flip() + new ByteBufNIO(headerByteBuffer) + } + + static headerWithMessageSizeGreaterThanMax(messageId, maxMessageSize) { + ByteBuffer headerByteBuffer = ByteBuffer.allocate(36).with { + order(ByteOrder.LITTLE_ENDIAN) + putInt(maxMessageSize + 1) // messageLength + putInt(4) // requestId + putInt(messageId) // responseTo + putInt(1) // opCode + putInt(0) // responseFlags + putLong(0) // cursoimport static com.mongodb.rId + putInt(0) // starting from + putInt(1) // number returned + } + headerByteBuffer.flip() + new ByteBufNIO(headerByteBuffer) + } + + static defaultBody() { + body(DEFAULT_JSON_RESPONSE) + } + + static reply(json) { + ByteBuf replyHeader = defaultReplyHeader() + BsonReader reader = new JsonReader(json) + BasicOutputBuffer outputBuffer = new BasicOutputBuffer() + BsonWriter writer = new BsonBinaryWriter(outputBuffer) + writer.pipe(reader) + + ByteBuffer buffer = ByteBuffer.allocate(replyHeader.remaining() + outputBuffer.size()) + append(buffer, replyHeader) + buffer.put(outputBuffer.toByteArray()) + buffer.flip() + new ByteBufNIO(buffer) + } + + private static body(json) { + BsonReader reader = new JsonReader(json) + BasicOutputBuffer outputBuffer = new BasicOutputBuffer() + BsonWriter writer = new BsonBinaryWriter(outputBuffer) + writer.pipe(reader) + new ByteBufNIO(ByteBuffer.allocate(outputBuffer.size()).put(outputBuffer.toByteArray())).flip() + } + + static generateHeaders(List messageIds) { + boolean ordered = true + List headers = messageIds.collect { defaultHeader(it) } + if (!ordered) { + Collections.shuffle(headers, new SecureRandom()) + } + headers + } + + static hello() { + CommandMessage command = new CommandMessage('admin', + new BsonDocument(LEGACY_HELLO, new BsonInt32(1)), NoOpFieldNameValidator.INSTANCE, ReadPreference.primary(), + MessageSettings.builder().build(), SINGLE, null) + ByteBufferBsonOutput outputBuffer = new ByteBufferBsonOutput(new SimpleBufferProvider()) + try { + command.encode(outputBuffer, new OperationContext( + IgnorableRequestContext.INSTANCE, + NoOpSessionContext.INSTANCE, + new TimeoutContext(ClusterFixture.TIMEOUT_SETTINGS), null)) + nextMessageId++ + [outputBuffer.byteBuffers, nextMessageId] + } finally { + outputBuffer.close() + } + } + + static helloAsync() { + hello() + [new FutureResultCallback(), new FutureResultCallback()] + } +} diff --git a/driver-core/src/test/unit/com/mongodb/internal/connection/TestClusterListener.java b/driver-core/src/test/unit/com/mongodb/internal/connection/TestClusterListener.java new file mode 100644 index 00000000000..edf1babd028 --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/internal/connection/TestClusterListener.java @@ -0,0 +1,135 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection; + +import com.mongodb.event.ClusterClosedEvent; +import com.mongodb.event.ClusterDescriptionChangedEvent; +import com.mongodb.event.ClusterListener; +import com.mongodb.event.ClusterOpeningEvent; +import com.mongodb.lang.Nullable; + +import java.time.Duration; +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; +import java.util.concurrent.locks.Condition; +import java.util.concurrent.locks.ReentrantLock; +import java.util.function.Predicate; + +import static com.mongodb.assertions.Assertions.notNull; +import static com.mongodb.internal.Locks.withLock; + +public final class TestClusterListener implements ClusterListener { + @Nullable + private volatile ClusterOpeningEvent clusterOpeningEvent; + @Nullable + private volatile ClusterClosedEvent clusterClosingEvent; + private final ArrayList clusterDescriptionChangedEvents = new ArrayList<>(); + private final ReentrantLock lock = new ReentrantLock(); + private final Condition newClusterDescriptionChangedEventCondition = lock.newCondition(); + private final CountDownLatch closedLatch = new CountDownLatch(1); + + @Override + public void clusterOpening(final ClusterOpeningEvent event) { + if (clusterOpeningEvent == null) { + clusterOpeningEvent = event; + } + } + + @Override + public void clusterClosed(final ClusterClosedEvent event) { + if (clusterClosingEvent == null) { + closedLatch.countDown(); + clusterClosingEvent = event; + } + } + + @Override + public void clusterDescriptionChanged(final ClusterDescriptionChangedEvent event) { + notNull("event", event); + withLock(lock, () -> { + clusterDescriptionChangedEvents.add(event); + newClusterDescriptionChangedEventCondition.signalAll(); + }); + } + + @Nullable + public ClusterOpeningEvent getClusterOpeningEvent() { + return clusterOpeningEvent; + } + + @Nullable + public ClusterClosedEvent getClusterClosingEvent() { + return clusterClosingEvent; + } + + public List getClusterDescriptionChangedEvents() { + return withLock(lock, () -> new ArrayList<>(clusterDescriptionChangedEvents)); + } + + /** + * Calling this method concurrently with {@link #waitForClusterDescriptionChangedEvents(Predicate, int, Duration)}, + * may result in {@link #waitForClusterDescriptionChangedEvents(Predicate, int, Duration)} not working as expected. + */ + public void clearClusterDescriptionChangedEvents() { + withLock(lock, clusterDescriptionChangedEvents::clear); + } + + /** + * Calling this method concurrently with {@link #clearClusterDescriptionChangedEvents()}, + * may result in {@link #waitForClusterDescriptionChangedEvents(Predicate, int, Duration)} not working as expected. + */ + public void waitForClusterDescriptionChangedEvents( + final Predicate matcher, final int count, final Duration duration) + throws InterruptedException, TimeoutException { + long nanosRemaining = duration.toNanos(); + lock.lock(); + try { + long observedCount = unguardedCount(matcher); + while (observedCount < count) { + if (nanosRemaining <= 0) { + throw new TimeoutException(String.format("Timed out waiting for %d %s events. The observed count is %d.", + count, ClusterDescriptionChangedEvent.class.getSimpleName(), observedCount)); + } + nanosRemaining = newClusterDescriptionChangedEventCondition.awaitNanos(nanosRemaining); + observedCount = unguardedCount(matcher); + } + } finally { + lock.unlock(); + } + } + + /** + * Waits for the cluster to be closed, which is signaled by a {@link ClusterClosedEvent}. + */ + public void waitForClusterClosedEvent(final Duration duration) + throws InterruptedException, TimeoutException { + boolean await = closedLatch.await(duration.toMillis(), TimeUnit.MILLISECONDS); + if (!await) { + throw new TimeoutException("Timed out waiting for cluster to close"); + } + } + + /** + * Must be guarded by {@link #lock}. + */ + private long unguardedCount(final Predicate matcher) { + return clusterDescriptionChangedEvents.stream().filter(matcher).count(); + } +} diff --git a/driver-core/src/test/unit/com/mongodb/internal/connection/TestClusterableServerFactory.java b/driver-core/src/test/unit/com/mongodb/internal/connection/TestClusterableServerFactory.java new file mode 100644 index 00000000000..d0eb7bdbc6b --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/internal/connection/TestClusterableServerFactory.java @@ -0,0 +1,180 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection; + +import com.mongodb.ServerAddress; +import com.mongodb.connection.ServerDescription; +import com.mongodb.connection.ServerSettings; +import com.mongodb.connection.ServerType; +import org.bson.types.ObjectId; + +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.TimeUnit; + +import static com.mongodb.connection.ServerConnectionState.CONNECTED; +import static com.mongodb.connection.ServerDescription.MAX_DRIVER_WIRE_VERSION; +import static com.mongodb.internal.event.EventListenerHelper.NO_OP_SERVER_LISTENER; + +public class TestClusterableServerFactory implements ClusterableServerFactory { + private final Map addressToServerMap = new HashMap<>(); + + @Override + public ClusterableServer create(final Cluster cluster, final ServerAddress serverAddress) { + addressToServerMap.put(serverAddress, new TestServer(serverAddress, cluster, NO_OP_SERVER_LISTENER)); + return addressToServerMap.get(serverAddress); + } + + @Override + public ServerSettings getSettings() { + return ServerSettings.builder().build(); + } + + public TestServer getServer(final ServerAddress serverAddress) { + return addressToServerMap.get(serverAddress); + } + + public void sendNotification(final ServerAddress serverAddress, final ServerDescription serverDescription) { + getServer(serverAddress).sendNotification(serverDescription); + } + + public void sendNotification(final ServerAddress serverAddress, final ServerType serverType) { + getServer(serverAddress).sendNotification(ServerDescription.builder() + .ok(true) + .address(serverAddress) + .type(serverType) + .state(CONNECTED) + .build()); + } + + public void sendNotification(final ServerAddress serverAddress, final ServerType serverType, final List hosts) { + sendNotification(serverAddress, serverType, hosts, "test"); + } + + public void sendNotification(final ServerAddress serverAddress, final long roundTripTimeMillis, final ServerType serverType, + final List hosts) { + sendNotification(serverAddress, roundTripTimeMillis, serverType, hosts, "test"); + } + + public void sendNotification(final ServerAddress serverAddress, final ServerType serverType, final List hosts, + final ServerAddress trueAddress) { + sendNotification(serverAddress, serverType, hosts, "test", trueAddress); + } + + public void sendNotification(final ServerAddress serverAddress, final ServerType serverType, final List hosts, + final ObjectId electionId) { + sendNotification(serverAddress, serverType, hosts, "test", electionId); + } + + public void sendNotification(final ServerAddress serverAddress, final ServerType serverType, final List hosts, + final List passives) { + getServer(serverAddress).sendNotification(getBuilder(serverAddress, + serverType, + hosts, + passives, + true, + "test", + null, + null, 1).build()); + } + + public void sendNotification(final ServerAddress serverAddress, final ServerType serverType, final List hosts, + final String setName) { + sendNotification(serverAddress, serverType, hosts, setName, (ObjectId) null); + } + + public void sendNotification(final ServerAddress serverAddress, final long roundTripTimeMillis, final ServerType serverType, + final List hosts, final String setName) { + sendNotification(serverAddress, roundTripTimeMillis, serverType, hosts, setName, null); + } + + public void sendNotification(final ServerAddress serverAddress, final ServerType serverType, final List hosts, + final String setName, final ServerAddress trueAddress) { + sendNotification(serverAddress, serverType, hosts, setName, null, trueAddress); + } + + public void sendNotification(final ServerAddress serverAddress, final ServerType serverType, final List hosts, + final String setName, final ObjectId electionId) { + getServer(serverAddress).sendNotification(getBuilder(serverAddress, serverType, hosts, Collections.emptyList(), + true, setName, electionId, null, 1) + .build()); + } + + public void sendNotification(final ServerAddress serverAddress, final long roundTripTimeMillis, final ServerType serverType, + final List hosts, final String setName, final ObjectId electionId) { + getServer(serverAddress).sendNotification(getBuilder(serverAddress, serverType, hosts, Collections.emptyList(), + true, setName, electionId, null, roundTripTimeMillis) + .build()); + } + + public void sendNotification(final ServerAddress serverAddress, final ServerType serverType, final List hosts, + final String setName, final ObjectId electionId, final ServerAddress trueAddress) { + getServer(serverAddress).sendNotification(getBuilder(serverAddress, serverType, hosts, Collections.emptyList(), + true, setName, electionId, trueAddress, 1) + .build()); + } + + public void sendNotification(final ServerAddress serverAddress, final ServerType serverType, final List hosts, + final boolean ok) { + getServer(serverAddress).sendNotification(getBuilder(serverAddress, serverType, hosts, Collections.emptyList(), + ok, null, null, null, 1) + .build()); + } + + public ServerDescription getDescription(final ServerAddress server) { + return getServer(server).getDescription(); + } + + public Set getDescriptions(final ServerAddress... servers) { + Set serverDescriptions = new HashSet<>(); + for (ServerAddress cur : servers) { + serverDescriptions.add(getServer(cur).getDescription()); + } + return serverDescriptions; + } + + private ServerDescription.Builder getBuilder(final ServerAddress serverAddress, final ServerType serverType, + final List hosts, final List passives, final boolean ok, + final String setName, final ObjectId electionId, final ServerAddress trueAddress, final long roundTripTimeMillis) { + Set hostsSet = new HashSet<>(); + for (ServerAddress cur : hosts) { + hostsSet.add(cur.toString()); + } + + Set passivesSet = new HashSet<>(); + for (ServerAddress cur : passives) { + passivesSet.add(cur.toString()); + } + return ServerDescription.builder() + .address(serverAddress) + .type(serverType) + .ok(ok) + .state(CONNECTED) + .canonicalAddress(trueAddress == null ? serverAddress.toString() : trueAddress.toString()) + .hosts(hostsSet) + .passives(passivesSet) + .setName(setName) + .electionId(electionId) + .roundTripTime(roundTripTimeMillis, TimeUnit.MILLISECONDS) + .maxWireVersion(MAX_DRIVER_WIRE_VERSION) + .setVersion(1); + } +} diff --git a/driver-core/src/test/unit/com/mongodb/internal/connection/TestConnection.java b/driver-core/src/test/unit/com/mongodb/internal/connection/TestConnection.java new file mode 100644 index 00000000000..5fbc6dafde0 --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/internal/connection/TestConnection.java @@ -0,0 +1,104 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection; + +import com.mongodb.ReadPreference; +import com.mongodb.connection.ConnectionDescription; +import com.mongodb.internal.async.SingleResultCallback; +import org.bson.BsonDocument; +import org.bson.FieldNameValidator; +import org.bson.codecs.Decoder; + +@SuppressWarnings({"rawtypes", "unchecked"}) +class TestConnection implements Connection, AsyncConnection { + private final InternalConnection internalConnection; + private final ProtocolExecutor executor; + private CommandProtocol enqueuedCommandProtocol; + + TestConnection(final InternalConnection internalConnection, final ProtocolExecutor executor) { + this.internalConnection = internalConnection; + this.executor = executor; + } + + @Override + public int getCount() { + return 1; + } + + @Override + public TestConnection retain() { + return this; + } + + @Override + public int release() { + return 1; + } + + @Override + public ConnectionDescription getDescription() { + throw new UnsupportedOperationException("Not implemented yet!"); + } + + @Override + public T command(final String database, final BsonDocument command, final FieldNameValidator fieldNameValidator, + final ReadPreference readPreference, final Decoder commandResultDecoder, + final OperationContext operationContext) { + return executeEnqueuedCommandBasedProtocol(operationContext); + } + + @Override + public T command(final String database, final BsonDocument command, final FieldNameValidator commandFieldNameValidator, + final ReadPreference readPreference, final Decoder commandResultDecoder, final OperationContext operationContext, + final boolean responseExpected, final MessageSequences sequences) { + return executeEnqueuedCommandBasedProtocol(operationContext); + } + + @Override + public void commandAsync(final String database, final BsonDocument command, final FieldNameValidator fieldNameValidator, + final ReadPreference readPreference, final Decoder commandResultDecoder, final OperationContext operationContext, + final SingleResultCallback callback) { + executeEnqueuedCommandBasedProtocolAsync(operationContext, callback); + } + + @Override + public void commandAsync(final String database, final BsonDocument command, final FieldNameValidator commandFieldNameValidator, + final ReadPreference readPreference, final Decoder commandResultDecoder, final OperationContext operationContext, + final boolean responseExpected, final MessageSequences sequences, final SingleResultCallback callback) { + executeEnqueuedCommandBasedProtocolAsync(operationContext, callback); + } + + @Override + public void markAsPinned(final PinningMode pinningMode) { + throw new UnsupportedOperationException(); + } + + @SuppressWarnings("unchecked") + private T executeEnqueuedCommandBasedProtocol(final OperationContext operationContext) { + return (T) executor.execute(enqueuedCommandProtocol, internalConnection, operationContext.getSessionContext()); + } + + @SuppressWarnings("unchecked") + private void executeEnqueuedCommandBasedProtocolAsync(final OperationContext operationContext, + final SingleResultCallback callback) { + executor.executeAsync(enqueuedCommandProtocol, internalConnection, operationContext.getSessionContext(), callback); + } + + void enqueueProtocol(final CommandProtocol protocol) { + enqueuedCommandProtocol = protocol; + } +} diff --git a/driver-core/src/test/unit/com/mongodb/internal/connection/TestConnectionFactory.java b/driver-core/src/test/unit/com/mongodb/internal/connection/TestConnectionFactory.java new file mode 100644 index 00000000000..41a4db991a8 --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/internal/connection/TestConnectionFactory.java @@ -0,0 +1,33 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection; + +import com.mongodb.connection.ClusterConnectionMode; + +class TestConnectionFactory implements ConnectionFactory { + @Override + public Connection create(final InternalConnection internalConnection, final ProtocolExecutor executor, + final ClusterConnectionMode clusterConnectionMode) { + return new TestConnection(internalConnection, executor); + } + + @Override + public AsyncConnection createAsync(final InternalConnection internalConnection, final ProtocolExecutor executor, + final ClusterConnectionMode clusterConnectionMode) { + return new TestConnection(internalConnection, executor); + } +} diff --git a/driver-core/src/test/unit/com/mongodb/internal/connection/TestConnectionGenerationSupplier.java b/driver-core/src/test/unit/com/mongodb/internal/connection/TestConnectionGenerationSupplier.java new file mode 100644 index 00000000000..491b3986fe2 --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/internal/connection/TestConnectionGenerationSupplier.java @@ -0,0 +1,32 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection; + +import com.mongodb.lang.NonNull; +import org.bson.types.ObjectId; + +class TestConnectionGenerationSupplier implements ConnectionGenerationSupplier { + @Override + public int getGeneration() { + return 0; + } + + @Override + public int getGeneration(@NonNull final ObjectId serviceId) { + return 0; + } +} diff --git a/driver-core/src/test/unit/com/mongodb/internal/connection/TestConnectionPool.java b/driver-core/src/test/unit/com/mongodb/internal/connection/TestConnectionPool.java new file mode 100644 index 00000000000..008ae7bf7b7 --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/internal/connection/TestConnectionPool.java @@ -0,0 +1,168 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection; + +import com.mongodb.MongoException; +import com.mongodb.connection.ConnectionDescription; +import com.mongodb.connection.ServerDescription; +import com.mongodb.internal.async.SingleResultCallback; +import com.mongodb.lang.Nullable; +import org.bson.ByteBuf; +import org.bson.codecs.Decoder; +import org.bson.types.ObjectId; + +import java.util.List; + +public class TestConnectionPool implements ConnectionPool { + + private final MongoException exceptionToThrow; + private int generation; + + public TestConnectionPool() { + exceptionToThrow = null; + } + + @Override + public InternalConnection get(final OperationContext operationContext) { + return new InternalConnection() { + @Override + public ByteBuf getBuffer(final int capacity) { + throw new UnsupportedOperationException("Not implemented yet!"); + } + + @Override + public void sendMessage(final List byteBuffers, final int lastRequestId, final OperationContext operationContext) { + throw new UnsupportedOperationException("Not implemented yet!"); + } + + @Override + public T sendAndReceive(final CommandMessage message, final Decoder decoder, final OperationContext operationContext) { + throw new UnsupportedOperationException("Not implemented yet!"); + } + + @Override + public void send(final CommandMessage message, final Decoder decoder, final OperationContext operationContext) { + throw new UnsupportedOperationException(); + } + + @Override + public T receive(final Decoder decoder, final OperationContext operationContext) { + throw new UnsupportedOperationException(); + } + + @Override + public boolean hasMoreToCome() { + throw new UnsupportedOperationException(); + } + + @Override + public void sendAndReceiveAsync(final CommandMessage message, final Decoder decoder, + final OperationContext operationContext, final SingleResultCallback callback) { + throw new UnsupportedOperationException("Not implemented yet!"); + } + + @Override + public ResponseBuffers receiveMessage(final int responseTo, final OperationContext operationContext) { + throw new UnsupportedOperationException("Not implemented yet!"); + } + + @Override + public void sendMessageAsync(final List byteBuffers, final int lastRequestId, final OperationContext operationContext, + final SingleResultCallback callback) { + throw new UnsupportedOperationException("Not implemented yet!"); + } + + @Override + public void receiveMessageAsync(final int responseTo, final OperationContext operationContext, + final SingleResultCallback callback) { + throw new UnsupportedOperationException("Not implemented yet!"); + } + + @Override + public ConnectionDescription getDescription() { + throw new UnsupportedOperationException("Not implemented yet"); + } + + @Override + public ServerDescription getInitialServerDescription() { + throw new UnsupportedOperationException("Not implemented yet"); + } + + @Override + public void open(final OperationContext operationContext) { + throw new UnsupportedOperationException("Not implemented yet"); + } + + @Override + public void openAsync(final OperationContext operationContext, final SingleResultCallback callback) { + callback.onResult(null, new UnsupportedOperationException("Not implemented yet")); + } + + @Override + public void close() { + throw new UnsupportedOperationException("Not implemented yet!"); + } + + @Override + public boolean opened() { + throw new UnsupportedOperationException("Not implemented yet!"); + } + + @Override + public boolean isClosed() { + throw new UnsupportedOperationException("Not implemented yet!"); + } + + @Override + public int getGeneration() { + return 0; + } + }; + } + + @Override + public void getAsync(final OperationContext operationContext, final SingleResultCallback callback) { + if (exceptionToThrow != null) { + callback.onResult(null, exceptionToThrow); + } else { + callback.onResult(get(operationContext), null); + } + } + + @Override + public void invalidate(@Nullable final Throwable cause) { + generation++; + } + + @Override + public void invalidate(final ObjectId serviceId, final int generation) { + throw new UnsupportedOperationException(); + } + + @Override + public void ready() { + } + + @Override + public void close() { + } + + @Override + public int getGeneration() { + return generation; + } +} diff --git a/driver-core/src/test/unit/com/mongodb/internal/connection/TestConnectionPoolListener.java b/driver-core/src/test/unit/com/mongodb/internal/connection/TestConnectionPoolListener.java new file mode 100644 index 00000000000..12008cdec93 --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/internal/connection/TestConnectionPoolListener.java @@ -0,0 +1,230 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection; + +import com.mongodb.event.ConnectionCheckOutFailedEvent; +import com.mongodb.event.ConnectionCheckOutStartedEvent; +import com.mongodb.event.ConnectionCheckedInEvent; +import com.mongodb.event.ConnectionCheckedOutEvent; +import com.mongodb.event.ConnectionClosedEvent; +import com.mongodb.event.ConnectionCreatedEvent; +import com.mongodb.event.ConnectionPoolClearedEvent; +import com.mongodb.event.ConnectionPoolClosedEvent; +import com.mongodb.event.ConnectionPoolCreatedEvent; +import com.mongodb.event.ConnectionPoolListener; +import com.mongodb.event.ConnectionPoolReadyEvent; +import com.mongodb.event.ConnectionReadyEvent; +import com.mongodb.internal.time.StartTime; +import com.mongodb.internal.time.TimePointTest; +import com.mongodb.internal.time.Timeout; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashSet; +import java.util.List; +import java.util.Set; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.locks.Condition; +import java.util.concurrent.locks.Lock; +import java.util.concurrent.locks.ReentrantLock; + +public class TestConnectionPoolListener implements ConnectionPoolListener { + + private final Set eventTypes; + + private final List events = new ArrayList<>(); + private final Lock lock = new ReentrantLock(); + private final Condition condition = lock.newCondition(); + private volatile Class waitingForEventClass; + private volatile int waitingForEventCount; + private final AtomicInteger numConnectionsCheckedOut = new AtomicInteger(); + + + public TestConnectionPoolListener() { + this(Arrays.asList("poolCreatedEvent", "poolReadyEvent", "poolClearedEvent", "poolClosedEvent", "connectionCreatedEvent", + "connectionReadyEvent", "connectionClosedEvent", "connectionCheckOutStartedEvent", "connectionCheckOutFailedEvent", + "connectionCheckedOutEvent", "connectionCheckedInEvent", + // These are deprecated, but still used by some tests + "poolOpenedEvent", "connectionAddedEvent", "connectionRemovedEvent")); + } + + public TestConnectionPoolListener(final List eventTypes) { + this.eventTypes = new HashSet<>(eventTypes); + } + + public List getEvents() { + lock.lock(); + try { + return new ArrayList<>(events); + } finally { + lock.unlock(); + } + } + + public int countEvents(final Class eventClass) { + int eventCount = 0; + for (Object event : getEvents()) { + if (event.getClass().equals(eventClass)) { + eventCount++; + } + } + return eventCount; + } + + public void waitForEvents(final List> eventClasses, final long time, final TimeUnit unit) + throws InterruptedException, TimeoutException { + Timeout timeout = StartTime.now().timeoutAfterOrInfiniteIfNegative(time, unit); + ArrayList seen = new ArrayList<>(); + + for (Class eventClass : eventClasses) { + waitForEvent(eventClass, 1, TimePointTest.remaining(timeout, unit), unit); + + if (TimePointTest.hasExpired(timeout)) { + throw new TimeoutException("Timed out waiting for event of type " + eventClass + + ". Timing out after seeing " + seen); + } + seen.add(eventClass); + } + } + + public void waitForEvent(final Class eventClass, final int count, final long time, final TimeUnit unit) + throws InterruptedException, TimeoutException { + lock.lock(); + try { + if (waitingForEventClass != null) { + throw new IllegalStateException("Already waiting for events of class " + waitingForEventClass); + } + waitingForEventClass = eventClass; + waitingForEventCount = count; + if (containsEvent(eventClass, count)) { + return; + } + if (!condition.await(time, unit)) { + throw new TimeoutException("Timed out waiting for " + count + " events of type " + eventClass + + ". The count after timing out is " + countEvents(eventClass)); + } + } finally { + waitingForEventClass = null; + lock.unlock(); + } + } + + + private boolean containsEvent(final Class eventClass, final int expectedEventCount) { + return countEvents(eventClass) >= expectedEventCount; + } + + private void addEvent(final Object event) { + lock.lock(); + try { + events.add(event); + if (containsEvent(waitingForEventClass, waitingForEventCount)) { + if (waitingForEventClass != null) { + waitingForEventClass = null; + condition.signalAll(); + } + } + } finally { + lock.unlock(); + } + } + + @Override + public void connectionPoolCreated(final ConnectionPoolCreatedEvent event) { + if (eventTypes.contains("poolCreatedEvent")) { + addEvent(event); + } + } + + @Override + public void connectionPoolCleared(final ConnectionPoolClearedEvent event) { + if (eventTypes.contains("poolClearedEvent")) { + addEvent(event); + } + } + + @Override + public void connectionPoolReady(final ConnectionPoolReadyEvent event) { + if (eventTypes.contains("poolReadyEvent")) { + addEvent(event); + } + } + + @Override + public void connectionPoolClosed(final ConnectionPoolClosedEvent event) { + if (eventTypes.contains("poolClosedEvent")) { + addEvent(event); + } + } + + @Override + public void connectionCheckOutStarted(final ConnectionCheckOutStartedEvent event) { + if (eventTypes.contains("connectionCheckOutStartedEvent")) { + addEvent(event); + } + } + + @Override + public void connectionCheckedOut(final ConnectionCheckedOutEvent event) { + numConnectionsCheckedOut.incrementAndGet(); + if (eventTypes.contains("connectionCheckedOutEvent")) { + addEvent(event); + } + } + + @Override + public void connectionCheckOutFailed(final ConnectionCheckOutFailedEvent event) { + if (eventTypes.contains("connectionCheckOutFailedEvent")) { + addEvent(event); + } + } + + @Override + public void connectionCheckedIn(final ConnectionCheckedInEvent event) { + numConnectionsCheckedOut.decrementAndGet(); + if (eventTypes.contains("connectionCheckedInEvent")) { + addEvent(event); + } + } + + @Override + public void connectionCreated(final ConnectionCreatedEvent event) { + if (eventTypes.contains("connectionCreatedEvent")) { + addEvent(event); + } + } + + @Override + public void connectionReady(final ConnectionReadyEvent event) { + if (eventTypes.contains("connectionReadyEvent")) { + addEvent(event); + } + } + + @Override + public void connectionClosed(final ConnectionClosedEvent event) { + if (eventTypes.contains("connectionClosedEvent")) { + addEvent(event); + } + } + + public int getNumConnectionsCheckedOut() { + return numConnectionsCheckedOut.get(); + } +} diff --git a/driver-core/src/test/unit/com/mongodb/internal/connection/TestInternalConnection.java b/driver-core/src/test/unit/com/mongodb/internal/connection/TestInternalConnection.java new file mode 100644 index 00000000000..2853780f93a --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/internal/connection/TestInternalConnection.java @@ -0,0 +1,278 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection; + +import com.mongodb.MongoException; +import com.mongodb.ServerAddress; +import com.mongodb.connection.ConnectionDescription; +import com.mongodb.connection.ConnectionId; +import com.mongodb.connection.ServerConnectionState; +import com.mongodb.connection.ServerDescription; +import com.mongodb.connection.ServerId; +import com.mongodb.connection.ServerType; +import com.mongodb.internal.async.SingleResultCallback; +import org.bson.BsonBinaryReader; +import org.bson.BsonDocument; +import org.bson.ByteBuf; +import org.bson.ByteBufNIO; +import org.bson.codecs.BsonDocumentCodec; +import org.bson.codecs.Decoder; +import org.bson.io.BsonInput; +import org.bson.io.ByteBufferBsonInput; + +import java.nio.Buffer; +import java.nio.ByteBuffer; +import java.nio.ByteOrder; +import java.util.Collections; +import java.util.Deque; +import java.util.LinkedList; +import java.util.List; + +import static com.mongodb.internal.connection.ProtocolHelper.getCommandFailureException; +import static com.mongodb.internal.connection.ProtocolHelper.isCommandOk; +import static com.mongodb.internal.operation.ServerVersionHelper.LATEST_WIRE_VERSION; + +class TestInternalConnection implements InternalConnection { + + private static class Interaction { + private ResponseBuffers responseBuffers; + private RuntimeException receiveException; + private RuntimeException sendException; + } + + private final ConnectionDescription description; + private final ServerDescription serverDescription; + private final BufferProvider bufferProvider; + private final Deque replies; + private final List sent; + private boolean opened; + private boolean closed; + + TestInternalConnection(final ServerId serverId) { + this(serverId, ServerType.UNKNOWN); + } + + TestInternalConnection(final ServerId serverId, final ServerType serverType) { + this.description = new ConnectionDescription(new ConnectionId(serverId), LATEST_WIRE_VERSION, serverType, 0, 0, 0, + Collections.emptyList()); + this.serverDescription = ServerDescription.builder() + .address(new ServerAddress("localhost", 27017)) + .type(serverType) + .state(ServerConnectionState.CONNECTED).build(); + this.bufferProvider = new SimpleBufferProvider(); + + this.replies = new LinkedList<>(); + this.sent = new LinkedList<>(); + } + + public void enqueueReply(final ResponseBuffers responseBuffers) { + Interaction interaction = new Interaction(); + interaction.responseBuffers = responseBuffers; + replies.add(interaction); + } + + public void enqueueSendMessageException(final RuntimeException e) { + Interaction interaction = new Interaction(); + interaction.sendException = e; + replies.add(interaction); + } + + public void enqueueReceiveMessageException(final RuntimeException e) { + Interaction interaction = new Interaction(); + interaction.receiveException = e; + replies.add(interaction); + } + + public List getSent() { + return sent; + } + + @Override + public ConnectionDescription getDescription() { + return description; + } + + @Override + public ServerDescription getInitialServerDescription() { + return serverDescription; + } + + public void open(final OperationContext operationContext) { + opened = true; + } + + @Override + public void openAsync(final OperationContext operationContext, final SingleResultCallback callback) { + opened = true; + callback.onResult(null, null); + } + + @Override + public void close() { + closed = true; + } + + @Override + public boolean opened() { + return opened; + } + + @Override + public boolean isClosed() { + return closed; + } + + @Override + public int getGeneration() { + return 0; + } + + @Override + public void sendMessage(final List byteBuffers, final int lastRequestId, final OperationContext operationContext) { + // repackage all byte buffers into a single byte buffer... + int totalSize = 0; + for (ByteBuf buf : byteBuffers) { + totalSize += buf.remaining(); + } + + ByteBuffer combined = ByteBuffer.allocate(totalSize); + for (ByteBuf buf : byteBuffers) { + combined.put(buf.array(), 0, buf.remaining()); + } + + ((Buffer) combined).flip(); + + Interaction interaction = replies.getFirst(); + if (interaction.responseBuffers != null) { + ReplyHeader header = replaceResponseTo(interaction.responseBuffers.getReplyHeader(), lastRequestId); + interaction.responseBuffers = (new ResponseBuffers(header, interaction.responseBuffers.getBodyByteBuffer())); + + sent.add(new ByteBufferBsonInput(new ByteBufNIO(combined))); + } else if (interaction.sendException != null) { + replies.removeFirst(); + throw interaction.sendException; + } + } + + @Override + public T sendAndReceive(final CommandMessage message, final Decoder decoder, final OperationContext operationContext) { + try (ByteBufferBsonOutput bsonOutput = new ByteBufferBsonOutput(this)) { + message.encode(bsonOutput, operationContext); + sendMessage(bsonOutput.getByteBuffers(), message.getId(), operationContext); + } + try (ResponseBuffers responseBuffers = receiveMessage(message.getId(), operationContext)) { + boolean commandOk = isCommandOk(new BsonBinaryReader(new ByteBufferBsonInput(responseBuffers.getBodyByteBuffer()))); + responseBuffers.reset(); + if (!commandOk) { + throw getCommandFailureException(getResponseDocument(responseBuffers, message, new BsonDocumentCodec()), + description.getServerAddress(), operationContext.getTimeoutContext()); + } + return new ReplyMessage<>(responseBuffers, decoder, message.getId()).getDocument(); + } + } + + @Override + public void send(final CommandMessage message, final Decoder decoder, final OperationContext operationContext) { + throw new UnsupportedOperationException(); + } + + @Override + public T receive(final Decoder decoder, final OperationContext operationContext) { + throw new UnsupportedOperationException(); + } + + @Override + public boolean hasMoreToCome() { + throw new UnsupportedOperationException(); + } + + private T getResponseDocument(final ResponseBuffers responseBuffers, + final CommandMessage commandMessage, final Decoder decoder) { + ReplyMessage replyMessage = new ReplyMessage<>(responseBuffers, decoder, commandMessage.getId()); + responseBuffers.reset(); + return replyMessage.getDocument(); + } + + @Override + public void sendAndReceiveAsync(final CommandMessage message, final Decoder decoder, final OperationContext operationContext, + final SingleResultCallback callback) { + try { + T result = sendAndReceive(message, decoder, operationContext); + callback.onResult(result, null); + } catch (MongoException ex) { + callback.onResult(null, ex); + } + } + + private ReplyHeader replaceResponseTo(final ReplyHeader header, final int responseTo) { + ByteBuffer headerByteBuffer = ByteBuffer.allocate(36); + headerByteBuffer.order(ByteOrder.LITTLE_ENDIAN); + headerByteBuffer.putInt(header.getMessageLength()); + headerByteBuffer.putInt(header.getRequestId()); + headerByteBuffer.putInt(responseTo); + headerByteBuffer.putInt(1); + headerByteBuffer.putInt(0); + headerByteBuffer.putLong(0); + headerByteBuffer.putInt(0); + headerByteBuffer.putInt(1); + ((Buffer) headerByteBuffer).flip(); + + ByteBufNIO buffer = new ByteBufNIO(headerByteBuffer); + MessageHeader messageHeader = new MessageHeader(buffer, ConnectionDescription.getDefaultMaxMessageSize()); + return new ReplyHeader(buffer, messageHeader); } + + @Override + public ResponseBuffers receiveMessage(final int responseTo, final OperationContext operationContext) { + if (this.replies.isEmpty()) { + throw new MongoException("Test was not setup properly as too many calls to receiveMessage occured."); + } + + Interaction interaction = replies.removeFirst(); + if (interaction.responseBuffers != null) { + return interaction.responseBuffers; + } else { + throw interaction.receiveException; + } + } + + @Override + public void sendMessageAsync(final List byteBuffers, final int lastRequestId, final OperationContext operationContext, + final SingleResultCallback callback) { + try { + sendMessage(byteBuffers, lastRequestId, operationContext); + callback.onResult(null, null); + } catch (Exception e) { + callback.onResult(null, e); + } + } + + @Override + public void receiveMessageAsync(final int responseTo, final OperationContext operationContext, + final SingleResultCallback callback) { + try { + ResponseBuffers buffers = receiveMessage(responseTo, operationContext); + callback.onResult(buffers, null); + } catch (MongoException ex) { + callback.onResult(null, ex); + } + } + + @Override + public ByteBuf getBuffer(final int size) { + return this.bufferProvider.getBuffer(size); + } +} diff --git a/driver-core/src/test/unit/com/mongodb/internal/connection/TestInternalConnectionFactory.java b/driver-core/src/test/unit/com/mongodb/internal/connection/TestInternalConnectionFactory.java new file mode 100644 index 00000000000..7669eab9b91 --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/internal/connection/TestInternalConnectionFactory.java @@ -0,0 +1,158 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection; + +import com.mongodb.connection.ConnectionDescription; +import com.mongodb.connection.ConnectionId; +import com.mongodb.connection.ServerDescription; +import com.mongodb.connection.ServerId; +import com.mongodb.connection.ServerType; +import com.mongodb.internal.async.SingleResultCallback; +import org.bson.ByteBuf; +import org.bson.codecs.Decoder; + +import java.util.Collections; +import java.util.List; +import java.util.concurrent.CopyOnWriteArrayList; +import java.util.concurrent.atomic.AtomicInteger; + +import static com.mongodb.connection.ServerDescription.getDefaultMaxDocumentSize; + +class TestInternalConnectionFactory implements InternalConnectionFactory { + private final AtomicInteger incrementingId = new AtomicInteger(); + private final List createdConnections = new CopyOnWriteArrayList<>(); + + @Override + public InternalConnection create(final ServerId serverId, final ConnectionGenerationSupplier connectionGenerationSupplier) { + TestInternalConnection connection = new TestInternalConnection(serverId, connectionGenerationSupplier.getGeneration()); + createdConnections.add(connection); + return connection; + } + + List getCreatedConnections() { + return createdConnections; + } + + int getNumCreatedConnections() { + return createdConnections.size(); + } + + class TestInternalConnection implements InternalConnection { + private final ConnectionId connectionId; + private final int generation; + private volatile boolean closed; + private volatile boolean opened; + + TestInternalConnection(final ServerId serverId, final int generation) { + this.connectionId = new ConnectionId(serverId, incrementingId.incrementAndGet(), null); + this.generation = generation; + } + + @Override + public int getGeneration() { + return generation; + } + + public void open(final OperationContext operationContext) { + opened = true; + } + + @Override + public void openAsync(final OperationContext operationContext, final SingleResultCallback callback) { + opened = true; + callback.onResult(null, null); + } + + @Override + public void close() { + closed = true; + } + + @Override + public boolean opened() { + return opened; + } + + @Override + public boolean isClosed() { + return closed; + } + + @Override + public ByteBuf getBuffer(final int size) { + return null; + } + + @Override + public void sendMessage(final List byteBuffers, final int lastRequestId, final OperationContext operationContext) { + } + + @Override + public T sendAndReceive(final CommandMessage message, final Decoder decoder, final OperationContext operationContext) { + return null; + } + + @Override + public void send(final CommandMessage message, final Decoder decoder, final OperationContext operationContext) { + } + + @Override + public T receive(final Decoder decoder, final OperationContext operationContext) { + return null; + } + + @Override + public boolean hasMoreToCome() { + return false; + } + + @Override + public void sendAndReceiveAsync(final CommandMessage message, final Decoder decoder, + final OperationContext operationContext, final SingleResultCallback callback) { + callback.onResult(null, null); + } + + @Override + public ResponseBuffers receiveMessage(final int responseTo, final OperationContext operationContext) { + return null; + } + + @Override + public void sendMessageAsync(final List byteBuffers, final int lastRequestId, final OperationContext operationContext, + final SingleResultCallback callback) { + callback.onResult(null, null); + } + + @Override + public void receiveMessageAsync(final int responseTo, final OperationContext operationContext, + final SingleResultCallback callback) { + callback.onResult(null, null); + } + + @Override + public ConnectionDescription getDescription() { + return new ConnectionDescription(connectionId, 7, ServerType.UNKNOWN, 1000, + getDefaultMaxDocumentSize(), 100000, Collections.emptyList()); + + } + + @Override + public ServerDescription getInitialServerDescription() { + return ServerDescription.builder().build(); // TODO: do we need more than this? + } + } +} diff --git a/driver-core/src/test/unit/com/mongodb/internal/connection/TestServer.java b/driver-core/src/test/unit/com/mongodb/internal/connection/TestServer.java new file mode 100644 index 00000000000..a7672a6bf66 --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/internal/connection/TestServer.java @@ -0,0 +1,105 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection; + +import com.mongodb.MongoException; +import com.mongodb.ServerAddress; +import com.mongodb.connection.ClusterId; +import com.mongodb.connection.ServerDescription; +import com.mongodb.connection.ServerId; +import com.mongodb.event.ServerDescriptionChangedEvent; +import com.mongodb.event.ServerListener; +import com.mongodb.internal.async.SingleResultCallback; + +import static com.mongodb.connection.ServerConnectionState.CONNECTING; + +public class TestServer implements ClusterableServer { + private final Cluster cluster; + private final ServerListener serverListener; + private ServerDescription description; + private boolean isClosed; + private final ServerId serverId; + private int connectCount; + + public TestServer(final ServerAddress serverAddress, final Cluster cluster, final ServerListener serverListener) { + this.serverId = new ServerId(new ClusterId(), serverAddress); + this.cluster = cluster; + this.serverListener = serverListener; + this.description = ServerDescription.builder().state(CONNECTING).address(serverId.getAddress()).build(); + sendNotification(ServerDescription.builder().state(CONNECTING).address(serverId.getAddress()).build()); + } + + public void sendNotification(final ServerDescription newDescription) { + ServerDescription currentDescription = description; + description = newDescription; + ServerDescriptionChangedEvent event = new ServerDescriptionChangedEvent(serverId, newDescription, currentDescription); + if (cluster != null) { + cluster.onChange(event); + } + if (serverListener != null) { + serverListener.serverDescriptionChanged(event); + } + } + + @Override + public void resetToConnecting(final MongoException cause) { + sendNotification(ServerDescription.builder().state(CONNECTING).exception(cause).address(serverId.getAddress()).build()); + } + + @Override + public void invalidate(final MongoException cause) { + sendNotification(ServerDescription.builder().state(CONNECTING).exception(cause).address(serverId.getAddress()).build()); + } + + @Override + public void close() { + isClosed = true; + } + + @Override + public boolean isClosed() { + return isClosed; + } + + @Override + public void connect() { + connectCount++; + } + + public int getConnectCount() { + return connectCount; + } + + public ServerDescription getDescription() { + return description; + } + + @Override + public Connection getConnection(final OperationContext operationContext) { + throw new UnsupportedOperationException(); + } + + @Override + public void getConnectionAsync(final OperationContext operationContext, final SingleResultCallback callback) { + throw new UnsupportedOperationException(); + } + + @Override + public int operationCount() { + return -1; + } +} diff --git a/driver-core/src/test/unit/com/mongodb/internal/connection/TestServerListener.java b/driver-core/src/test/unit/com/mongodb/internal/connection/TestServerListener.java new file mode 100644 index 00000000000..007074f8cc6 --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/internal/connection/TestServerListener.java @@ -0,0 +1,109 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection; + +import com.mongodb.event.ClusterDescriptionChangedEvent; +import com.mongodb.event.ServerClosedEvent; +import com.mongodb.event.ServerDescriptionChangedEvent; +import com.mongodb.event.ServerListener; +import com.mongodb.event.ServerOpeningEvent; +import com.mongodb.lang.Nullable; + +import java.time.Duration; +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.TimeoutException; +import java.util.concurrent.locks.Condition; +import java.util.concurrent.locks.Lock; +import java.util.concurrent.locks.ReentrantLock; +import java.util.function.Predicate; + +import static com.mongodb.assertions.Assertions.notNull; +import static com.mongodb.internal.Locks.withLock; + +public class TestServerListener implements ServerListener { + @Nullable + private volatile ServerOpeningEvent serverOpeningEvent; + @Nullable + private volatile ServerClosedEvent serverClosedEvent; + private final List serverDescriptionChangedEvents = new ArrayList<>(); + private final Lock lock = new ReentrantLock(); + private final Condition condition = lock.newCondition(); + + @Override + public void serverOpening(final ServerOpeningEvent event) { + serverOpeningEvent = event; + } + + @Override + public void serverClosed(final ServerClosedEvent event) { + serverClosedEvent = event; + } + + @Override + public void serverDescriptionChanged(final ServerDescriptionChangedEvent event) { + notNull("event", event); + withLock(lock, () -> { + serverDescriptionChangedEvents.add(event); + condition.signalAll(); + }); + } + + @Nullable + public ServerOpeningEvent getServerOpeningEvent() { + return serverOpeningEvent; + } + + @Nullable + public ServerClosedEvent getServerClosedEvent() { + return serverClosedEvent; + } + + public List getServerDescriptionChangedEvents() { + return withLock(lock, () -> new ArrayList<>(serverDescriptionChangedEvents)); + } + + public void waitForServerDescriptionChangedEvents( + final Predicate matcher, final int count, final Duration duration) + throws InterruptedException, TimeoutException { + if (count <= 0) { + throw new IllegalArgumentException(); + } + long nanosRemaining = duration.toNanos(); + lock.lock(); + try { + long observedCount = unguardedCount(matcher); + while (observedCount < count) { + if (nanosRemaining <= 0) { + throw new TimeoutException(String.format("Timed out waiting for %d %s events. The observed count is %d.", + count, ClusterDescriptionChangedEvent.class.getSimpleName(), observedCount)); + } + nanosRemaining = condition.awaitNanos(nanosRemaining); + observedCount = unguardedCount(matcher); + } + } finally { + lock.unlock(); + } + } + + /** + * Must be guarded by {@link #lock}. + */ + private long unguardedCount(final Predicate matcher) { + return serverDescriptionChangedEvents.stream().filter(matcher).count(); + } +} diff --git a/driver-core/src/test/unit/com/mongodb/internal/connection/TestServerMonitor.java b/driver-core/src/test/unit/com/mongodb/internal/connection/TestServerMonitor.java new file mode 100644 index 00000000000..7b546868f81 --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/internal/connection/TestServerMonitor.java @@ -0,0 +1,48 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection; + +import com.mongodb.connection.ServerDescription; +import com.mongodb.internal.inject.Provider; + +class TestServerMonitor implements ServerMonitor { + private final Provider sdamProvider; + + TestServerMonitor(final Provider sdamProvider) { + this.sdamProvider = sdamProvider; + } + + @Override + public void start() { + } + + @Override + public void connect() { + } + + @Override + public void close() { + } + + @Override + public void cancelCurrentCheck() { + } + + public void updateServerDescription(final ServerDescription serverDescription) { + sdamProvider.get().monitorUpdate(serverDescription); + } +} diff --git a/driver-core/src/test/unit/com/mongodb/internal/connection/TestSessionContext.java b/driver-core/src/test/unit/com/mongodb/internal/connection/TestSessionContext.java new file mode 100644 index 00000000000..4e69b5fc9b9 --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/internal/connection/TestSessionContext.java @@ -0,0 +1,132 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection; + +import com.mongodb.ReadConcern; +import com.mongodb.internal.session.SessionContext; +import com.mongodb.lang.Nullable; +import org.bson.BsonDocument; +import org.bson.BsonTimestamp; + +class TestSessionContext implements SessionContext { + + private BsonDocument clusterTime; + private BsonTimestamp operationTime; + + TestSessionContext(final BsonDocument initialClusterTime) { + this.clusterTime = initialClusterTime; + } + + public BsonTimestamp getOperationTime() { + return operationTime; + } + + @Override + public boolean hasSession() { + return false; + } + + @Override + public boolean isImplicitSession() { + throw new UnsupportedOperationException(); + } + + @Override + public BsonDocument getSessionId() { + throw new UnsupportedOperationException(); + } + + @Override + public boolean isCausallyConsistent() { + throw new UnsupportedOperationException(); + } + + @Override + public long getTransactionNumber() { + throw new UnsupportedOperationException(); + } + + @Override + public long advanceTransactionNumber() { + throw new UnsupportedOperationException(); + } + + @Override + public boolean notifyMessageSent() { + return false; + } + + @Override + public void advanceOperationTime(final BsonTimestamp operationTime) { + this.operationTime = operationTime; + } + + @Override + public BsonDocument getClusterTime() { + return clusterTime; + } + + @Override + public void advanceClusterTime(final BsonDocument clusterTime) { + this.clusterTime = clusterTime; + } + + @Override + public boolean isSnapshot() { + throw new UnsupportedOperationException(); + } + + @Override + public void setSnapshotTimestamp(final BsonTimestamp snapshotTimestamp) { + // TODO + } + + @Override + @Nullable + public BsonTimestamp getSnapshotTimestamp() { + throw new UnsupportedOperationException(); + } + + @Override + public boolean hasActiveTransaction() { + return false; + } + + @Override + public ReadConcern getReadConcern() { + return ReadConcern.DEFAULT; + } + + @Override + public void setRecoveryToken(final BsonDocument recoveryToken) { + throw new UnsupportedOperationException(); + } + + @Override + public void clearTransactionContext() { + throw new UnsupportedOperationException(); + } + + @Override + public void markSessionDirty() { + } + + @Override + public boolean isSessionMarkedDirty() { + return false; + } +} diff --git a/driver-core/src/test/unit/com/mongodb/internal/connection/TimeSpecification.groovy b/driver-core/src/test/unit/com/mongodb/internal/connection/TimeSpecification.groovy new file mode 100644 index 00000000000..3ba0e1c6d37 --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/internal/connection/TimeSpecification.groovy @@ -0,0 +1,42 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection + +import spock.lang.Specification + +class TimeSpecification extends Specification { + + def 'should get the real and frozen time'() { + given: + def now = System.nanoTime() + + expect: + Time.nanoTime() > now + + when: + Time.makeTimeConstant() + + then: + Time.nanoTime() == Time.CONSTANT_TIME + + when: + Time.makeTimeMove() + + then: + Time.nanoTime() > now + } +} diff --git a/driver-core/src/test/unit/com/mongodb/internal/connection/TimeoutTrackingConnectionGetter.java b/driver-core/src/test/unit/com/mongodb/internal/connection/TimeoutTrackingConnectionGetter.java new file mode 100644 index 00000000000..6fd27893c70 --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/internal/connection/TimeoutTrackingConnectionGetter.java @@ -0,0 +1,57 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection; + +import com.mongodb.MongoTimeoutException; +import com.mongodb.internal.TimeoutSettings; + +import java.util.concurrent.CountDownLatch; + +import static com.mongodb.ClusterFixture.createOperationContext; + +class TimeoutTrackingConnectionGetter implements Runnable { + private final ConnectionPool connectionPool; + private final TimeoutSettings timeoutSettings; + private final CountDownLatch latch = new CountDownLatch(1); + + private volatile boolean gotTimeout; + + TimeoutTrackingConnectionGetter(final ConnectionPool connectionPool, final TimeoutSettings timeoutSettings) { + this.connectionPool = connectionPool; + this.timeoutSettings = timeoutSettings; + } + + boolean isGotTimeout() { + return gotTimeout; + } + + @Override + public void run() { + try { + InternalConnection connection = connectionPool.get(createOperationContext(timeoutSettings)); + connection.close(); + } catch (MongoTimeoutException e) { + gotTimeout = true; + } finally { + latch.countDown(); + } + } + + CountDownLatch getLatch() { + return latch; + } +} diff --git a/driver-core/src/test/unit/com/mongodb/internal/connection/UsageTrackingConnectionSpecification.groovy b/driver-core/src/test/unit/com/mongodb/internal/connection/UsageTrackingConnectionSpecification.groovy new file mode 100644 index 00000000000..78d79fba8b2 --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/internal/connection/UsageTrackingConnectionSpecification.groovy @@ -0,0 +1,205 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection + +import com.mongodb.ServerAddress +import com.mongodb.async.FutureResultCallback +import com.mongodb.connection.ClusterId +import com.mongodb.connection.ServerId +import com.mongodb.internal.validator.NoOpFieldNameValidator +import org.bson.BsonDocument +import org.bson.BsonInt32 +import org.bson.codecs.BsonDocumentCodec +import spock.lang.Specification + +import static com.mongodb.ClusterFixture.OPERATION_CONTEXT +import static com.mongodb.ReadPreference.primary +import static com.mongodb.connection.ClusterConnectionMode.SINGLE + +class UsageTrackingConnectionSpecification extends Specification { + private static final ServerId SERVER_ID = new ServerId(new ClusterId(), new ServerAddress()) + + def 'generation returns wrapped value'() { + when: + def connection = createConnection() + + then: + connection.generation == 0 + } + + def 'openAt should be set on open'() { + when: + def connection = createConnection() + + then: + connection.openedAt == Long.MAX_VALUE + + when: + connection.open(OPERATION_CONTEXT) + + then: + connection.openedAt <= System.currentTimeMillis() + } + + + def 'openAt should be set on open asynchronously'() { + when: + def futureResultCallback = new FutureResultCallback() + def connection = createConnection() + + then: + connection.openedAt == Long.MAX_VALUE + + when: + connection.openAsync(OPERATION_CONTEXT, futureResultCallback) + futureResultCallback.get() + + then: + connection.openedAt <= System.currentTimeMillis() + } + + def 'lastUsedAt should be set on open'() { + when: + def connection = createConnection() + + then: + connection.lastUsedAt == Long.MAX_VALUE + + when: + connection.open(OPERATION_CONTEXT) + + then: + connection.lastUsedAt <= System.currentTimeMillis() + } + + + def 'lastUsedAt should be set on open asynchronously'() { + when: + def futureResultCallback = new FutureResultCallback() + def connection = createConnection() + + then: + connection.lastUsedAt == Long.MAX_VALUE + + when: + connection.openAsync(OPERATION_CONTEXT, futureResultCallback) + futureResultCallback.get() + + then: + connection.lastUsedAt <= System.currentTimeMillis() + } + + def 'lastUsedAt should be set on sendMessage'() { + given: + def connection = createConnection() + connection.open(OPERATION_CONTEXT) + def openedLastUsedAt = connection.lastUsedAt + + when: + connection.sendMessage([], 1, OPERATION_CONTEXT) + + then: + connection.lastUsedAt >= openedLastUsedAt + connection.lastUsedAt <= System.currentTimeMillis() + } + + + def 'lastUsedAt should be set on sendMessage asynchronously'() { + given: + def connection = createConnection() + connection.open(OPERATION_CONTEXT) + def openedLastUsedAt = connection.lastUsedAt + def futureResultCallback = new FutureResultCallback() + + when: + connection.sendMessageAsync([], 1, OPERATION_CONTEXT, futureResultCallback) + futureResultCallback.get() + + then: + connection.lastUsedAt >= openedLastUsedAt + connection.lastUsedAt <= System.currentTimeMillis() + } + + def 'lastUsedAt should be set on receiveMessage'() { + given: + def connection = createConnection() + connection.open(OPERATION_CONTEXT) + def openedLastUsedAt = connection.lastUsedAt + when: + connection.receiveMessage(1, OPERATION_CONTEXT) + + then: + connection.lastUsedAt >= openedLastUsedAt + connection.lastUsedAt <= System.currentTimeMillis() + } + + def 'lastUsedAt should be set on receiveMessage asynchronously'() { + given: + def connection = createConnection() + connection.open(OPERATION_CONTEXT) + def openedLastUsedAt = connection.lastUsedAt + def futureResultCallback = new FutureResultCallback() + + when: + connection.receiveMessageAsync(1, OPERATION_CONTEXT, futureResultCallback) + futureResultCallback.get() + + then: + connection.lastUsedAt >= openedLastUsedAt + connection.lastUsedAt <= System.currentTimeMillis() + } + + def 'lastUsedAt should be set on sendAndReceive'() { + given: + def connection = createConnection() + connection.open(OPERATION_CONTEXT) + def openedLastUsedAt = connection.lastUsedAt + + when: + connection.sendAndReceive(new CommandMessage('test', + new BsonDocument('ping', new BsonInt32(1)), NoOpFieldNameValidator.INSTANCE, primary(), + MessageSettings.builder().build(), SINGLE, null), new BsonDocumentCodec(), OPERATION_CONTEXT) + + then: + connection.lastUsedAt >= openedLastUsedAt + connection.lastUsedAt <= System.currentTimeMillis() + } + + def 'lastUsedAt should be set on sendAndReceive asynchronously'() { + given: + def connection = createConnection() + connection.open(OPERATION_CONTEXT) + def openedLastUsedAt = connection.lastUsedAt + def futureResultCallback = new FutureResultCallback() + + when: + connection.sendAndReceiveAsync(new CommandMessage('test', + new BsonDocument('ping', new BsonInt32(1)), NoOpFieldNameValidator.INSTANCE, primary(), + MessageSettings.builder().build(), SINGLE, null), + new BsonDocumentCodec(), OPERATION_CONTEXT, futureResultCallback) + futureResultCallback.get() + + then: + connection.lastUsedAt >= openedLastUsedAt + connection.lastUsedAt <= System.currentTimeMillis() + } + + private static UsageTrackingInternalConnection createConnection() { + new UsageTrackingInternalConnection(new TestInternalConnectionFactory().create(SERVER_ID), + new DefaultConnectionPool.ServiceStateManager()) + } +} diff --git a/driver-core/src/test/unit/com/mongodb/internal/connection/X509AuthenticatorNoUserNameTest.java b/driver-core/src/test/unit/com/mongodb/internal/connection/X509AuthenticatorNoUserNameTest.java new file mode 100644 index 00000000000..5326c8c723d --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/internal/connection/X509AuthenticatorNoUserNameTest.java @@ -0,0 +1,93 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection; + +import com.mongodb.MongoCredential; +import com.mongodb.ServerAddress; +import com.mongodb.async.FutureResultCallback; +import com.mongodb.connection.ClusterId; +import com.mongodb.connection.ConnectionDescription; +import com.mongodb.connection.ConnectionId; +import com.mongodb.connection.ServerId; +import com.mongodb.connection.ServerType; +import org.bson.io.BsonInput; +import org.junit.Before; +import org.junit.Test; + +import java.util.Collections; +import java.util.List; +import java.util.concurrent.ExecutionException; + +import static com.mongodb.ClusterFixture.OPERATION_CONTEXT; +import static com.mongodb.ClusterFixture.getServerApi; +import static com.mongodb.connection.ClusterConnectionMode.MULTIPLE; +import static com.mongodb.internal.connection.MessageHelper.buildSuccessfulReply; +import static com.mongodb.internal.connection.MessageHelper.getApiVersionField; +import static com.mongodb.internal.connection.MessageHelper.getDbField; +import static com.mongodb.internal.operation.ServerVersionHelper.LATEST_WIRE_VERSION; +import static org.junit.Assert.assertEquals; + +public class X509AuthenticatorNoUserNameTest { + private TestInternalConnection connection; + private ConnectionDescription connectionDescriptionThreeSix; + + @Before + public void before() { + connection = new TestInternalConnection(new ServerId(new ClusterId(), new ServerAddress("localhost", 27017))); + connectionDescriptionThreeSix = new ConnectionDescription(new ConnectionId(new ServerId(new ClusterId(), new ServerAddress())), + LATEST_WIRE_VERSION, ServerType.STANDALONE, 1000, 16000, + 48000, Collections.emptyList()); + } + + @Test + public void testSuccessfulAuthentication() { + enqueueSuccessfulAuthenticationReply(); + + new X509Authenticator(getCredentialWithCache(), MULTIPLE, getServerApi()) + .authenticate(connection, connectionDescriptionThreeSix, OPERATION_CONTEXT); + + validateMessages(); + } + + @Test + public void testSuccessfulAuthenticationAsync() throws ExecutionException, InterruptedException { + enqueueSuccessfulAuthenticationReply(); + + FutureResultCallback futureCallback = new FutureResultCallback<>(); + new X509Authenticator(getCredentialWithCache(), MULTIPLE, getServerApi()).authenticateAsync(connection, + connectionDescriptionThreeSix, OPERATION_CONTEXT, futureCallback); + + futureCallback.get(); + + validateMessages(); + } + + private void enqueueSuccessfulAuthenticationReply() { + connection.enqueueReply(buildSuccessfulReply("{ok: 1}")); + } + + private void validateMessages() { + List sent = connection.getSent(); + String command = MessageHelper.decodeCommandAsJson(sent.get(0)); + assertEquals("{\"authenticate\": 1, \"mechanism\": \"MONGODB-X509\"" + + getDbField("$external") + getApiVersionField() + "}", command); + } + + private MongoCredentialWithCache getCredentialWithCache() { + return new MongoCredentialWithCache(MongoCredential.createMongoX509Credential()); + } +} diff --git a/driver-core/src/test/unit/com/mongodb/internal/connection/X509AuthenticatorUnitTest.java b/driver-core/src/test/unit/com/mongodb/internal/connection/X509AuthenticatorUnitTest.java new file mode 100644 index 00000000000..a8b2d7b71d5 --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/internal/connection/X509AuthenticatorUnitTest.java @@ -0,0 +1,142 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection; + +import com.mongodb.MongoCredential; +import com.mongodb.MongoSecurityException; +import com.mongodb.ServerAddress; +import com.mongodb.async.FutureResultCallback; +import com.mongodb.connection.ClusterConnectionMode; +import com.mongodb.connection.ClusterId; +import com.mongodb.connection.ConnectionDescription; +import com.mongodb.connection.ServerId; +import org.bson.BsonDocument; +import org.bson.io.BsonInput; +import org.junit.Before; +import org.junit.Test; + +import java.util.List; + +import static com.mongodb.ClusterFixture.OPERATION_CONTEXT; +import static com.mongodb.ClusterFixture.getServerApi; +import static com.mongodb.internal.connection.MessageHelper.buildSuccessfulReply; +import static com.mongodb.internal.connection.MessageHelper.getApiVersionField; +import static com.mongodb.internal.connection.MessageHelper.getDbField; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.fail; + +public class X509AuthenticatorUnitTest { + private TestInternalConnection connection; + private ConnectionDescription connectionDescription; + private MongoCredential credential; + private X509Authenticator subject; + + @Before + public void before() { + connection = new TestInternalConnection(new ServerId(new ClusterId(), new ServerAddress("localhost", 27017))); + connectionDescription = new ConnectionDescription(new ServerId(new ClusterId(), new ServerAddress())); + credential = MongoCredential.createMongoX509Credential("CN=client,OU=kerneluser,O=10Gen,L=New York City,ST=New York,C=US"); + subject = new X509Authenticator(new MongoCredentialWithCache(credential), ClusterConnectionMode.MULTIPLE, getServerApi()); + } + + @Test + public void testFailedAuthentication() { + enqueueFailedAuthenticationReply(); + + try { + subject.authenticate(connection, connectionDescription, OPERATION_CONTEXT); + fail(); + } catch (MongoSecurityException e) { + // all good + } + } + + @Test + public void testFailedAuthenticationAsync() { + enqueueFailedAuthenticationReply(); + + FutureResultCallback futureCallback = new FutureResultCallback<>(); + subject.authenticateAsync(connection, connectionDescription, OPERATION_CONTEXT, futureCallback); + + try { + futureCallback.get(); + } catch (Throwable t) { + if (!(t instanceof MongoSecurityException)) { + fail(); + } + } + } + + private void enqueueFailedAuthenticationReply() { + ResponseBuffers authenticateReply = buildSuccessfulReply("{ok: 0}"); + + connection.enqueueReply(authenticateReply); + } + + + @Test + public void testSuccessfulAuthentication() { + enqueueSuccessfulAuthenticationReply(); + + subject.authenticate(connection, connectionDescription, OPERATION_CONTEXT); + + validateMessages(); + } + + @Test + public void testSuccessfulAuthenticationAsync() { + enqueueSuccessfulAuthenticationReply(); + + FutureResultCallback futureCallback = new FutureResultCallback<>(); + subject.authenticateAsync(connection, connectionDescription, OPERATION_CONTEXT, futureCallback); + + futureCallback.get(); + + validateMessages(); + } + + @Test + public void testSpeculativeAuthentication() { + String speculativeAuthenticateResponse = "{\"dbname\": \"$external\", " + + "\"user\": \"CN=client,OU=kerneluser,O=10Gen,L=New York City,ST=New York,C=US\"}"; + BsonDocument expectedSpeculativeAuthenticateCommand = BsonDocument.parse("{authenticate: 1, " + + "user: \"CN=client,OU=kerneluser,O=10Gen,L=New York City,ST=New York,C=US\", " + + "mechanism: \"MONGODB-X509\", db: \"$external\"}"); + subject.setSpeculativeAuthenticateResponse(BsonDocument.parse(speculativeAuthenticateResponse)); + subject.authenticate(connection, connectionDescription, OPERATION_CONTEXT); + + assertEquals(connection.getSent().size(), 0); + assertEquals(expectedSpeculativeAuthenticateCommand, subject.createSpeculativeAuthenticateCommand(connection)); + } + + private void enqueueSuccessfulAuthenticationReply() { + connection.enqueueReply(buildSuccessfulReply("{ok: 1}")); + } + + private void validateMessages() { + List sent = connection.getSent(); + String command = MessageHelper.decodeCommandAsJson(sent.get(0)); + String expectedCommand = "{\"authenticate\": 1, " + + "\"user\": \"CN=client,OU=kerneluser,O=10Gen,L=New York City,ST=New York,C=US\", " + + "\"mechanism\": \"MONGODB-X509\"" + + getDbField("$external") + + getApiVersionField() + + "}"; + + assertEquals(expectedCommand, command); + } +} diff --git a/driver-core/src/test/unit/com/mongodb/internal/connection/netty/ByteBufSpecification.groovy b/driver-core/src/test/unit/com/mongodb/internal/connection/netty/ByteBufSpecification.groovy new file mode 100644 index 00000000000..0a59c4e8ad4 --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/internal/connection/netty/ByteBufSpecification.groovy @@ -0,0 +1,152 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection.netty + + +import io.netty.buffer.ByteBufAllocator +import org.bson.ByteBufNIO +import spock.lang.Specification + +import java.nio.ByteBuffer + +class ByteBufSpecification extends Specification { + def 'should set position and limit correctly'() { + expect: + buf.capacity() == 16 + buf.position() == 0 + buf.limit() == 16 + + when: + buf.put(new byte[10], 0, 10) + + then: + buf.position() == 10 + buf.limit() == 16 + + when: + buf.flip() + + then: + buf.position() == 0 + buf.limit() == 10 + + when: + buf.position(3) + + then: + buf.position() == 3 + buf.limit() == 10 + + when: + buf.limit(7) + + then: + buf.position() == 3 + buf.limit() == 7 + + when: + buf.get(new byte[4]) + + then: + buf.position() == 7 + buf.limit() == 7 + + where: + buf << [new ByteBufNIO(ByteBuffer.allocate(16)), + new NettyByteBuf(ByteBufAllocator.DEFAULT.buffer(16)) + ] + } + + // the fact that setting the limit on a NettyByteBuf throws an exception is a design flaw in the ByteBuf interface, but one that + // doesn't need to be addressed immediately, as the driver does not need to be able to set the limit while writing to a the buffer, + // only when reading. + def 'should throw when setting limit while writing'() { + given: + def buf = new NettyByteBuf(ByteBufAllocator.DEFAULT.buffer(16)) + + when: + buf.limit(10) + + then: + thrown(UnsupportedOperationException) + } + + def 'should manage reference count of proxied Netty ByteBuf correctly'() { + given: + def nettyBuf = ByteBufAllocator.DEFAULT.buffer(16) + + when: + def buf = new NettyByteBuf(nettyBuf) + + then: + nettyBuf.refCnt() == 1 + + when: + buf.retain() + + then: + nettyBuf.refCnt() == 2 + + when: + buf.release() + + then: + nettyBuf.refCnt() == 1 + + when: + buf.release() + + then: + nettyBuf.refCnt() == 0 + } + + def 'should manage reference count of duplicated proxied Netty ByteBuf correctly'() { + given: + def nettyBuf = ByteBufAllocator.DEFAULT.buffer(16) + def buf = new NettyByteBuf(nettyBuf) + + when: + def duplicated = buf.duplicate() + + then: + nettyBuf.refCnt() == 2 + + when: + buf.retain() + + then: + nettyBuf.refCnt() == 3 + + when: + buf.release() + + then: + nettyBuf.refCnt() == 2 + + when: + duplicated.release() + + then: + nettyBuf.refCnt() == 1 + + when: + buf.release() + + then: + nettyBuf.refCnt() == 0 + } +} diff --git a/driver-core/src/test/unit/com/mongodb/internal/connection/netty/NettyStreamFactoryFactorySpecification.groovy b/driver-core/src/test/unit/com/mongodb/internal/connection/netty/NettyStreamFactoryFactorySpecification.groovy new file mode 100644 index 00000000000..a92e4f26ee6 --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/internal/connection/netty/NettyStreamFactoryFactorySpecification.groovy @@ -0,0 +1,92 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection.netty + +import com.mongodb.ServerAddress +import com.mongodb.connection.SocketSettings +import com.mongodb.connection.SslSettings +import com.mongodb.connection.TransportSettings +import com.mongodb.internal.connection.DefaultInetAddressResolver +import io.netty.buffer.ByteBufAllocator +import io.netty.buffer.UnpooledByteBufAllocator +import io.netty.channel.nio.NioEventLoopGroup +import io.netty.channel.oio.OioEventLoopGroup +import io.netty.channel.socket.nio.NioSocketChannel +import io.netty.channel.socket.oio.OioSocketChannel +import io.netty.handler.ssl.SslContextBuilder +import spock.lang.Specification +import spock.lang.Unroll + +class NettyStreamFactoryFactorySpecification extends Specification { + + def 'should apply NettingSettings'() { + given: + def nettySettings = TransportSettings.nettyBuilder() + .allocator(UnpooledByteBufAllocator.DEFAULT) + .socketChannelClass(OioSocketChannel) + .eventLoopGroup(new OioEventLoopGroup()) + .sslContext(SslContextBuilder.forClient().build()) + .build() + + when: + def factoryFactory = NettyStreamFactoryFactory.builder() + .inetAddressResolver(new DefaultInetAddressResolver()) + .applySettings(nettySettings) + .build() + + then: + factoryFactory.getAllocator() == nettySettings.getAllocator() + factoryFactory.getEventLoopGroup() == nettySettings.getEventLoopGroup(); + factoryFactory.getSocketChannelClass() == nettySettings.getSocketChannelClass() + factoryFactory.getSslContext() == nettySettings.getSslContext() + } + + @Unroll + def 'should create the expected #description NettyStream'() { + given: + def factory = factoryFactory.create(socketSettings, sslSettings) + + when: + NettyStream stream = factory.create(serverAddress) + + then: + stream.getSettings() == socketSettings + stream.getSslSettings() == sslSettings + stream.getAddress() == serverAddress + stream.getAllocator() == allocator + stream.getSocketChannelClass() == socketChannelClass + stream.getWorkerGroup().getClass() == eventLoopGroupClass + + where: + description | factoryFactory | allocator | socketChannelClass | eventLoopGroupClass + 'default' | DEFAULT_FACTORY | ByteBufAllocator.DEFAULT | NioSocketChannel | NioEventLoopGroup + 'custom' | CUSTOM_FACTORY | UnpooledByteBufAllocator.DEFAULT | OioSocketChannel | OioEventLoopGroup + } + + SocketSettings socketSettings = SocketSettings.builder().build() + SslSettings sslSettings = SslSettings.builder().build() + ServerAddress serverAddress = new ServerAddress() + static final DEFAULT_FACTORY = NettyStreamFactoryFactory.builder() + .inetAddressResolver(new DefaultInetAddressResolver()) + .build() + static final CUSTOM_FACTORY = NettyStreamFactoryFactory.builder() + .allocator(UnpooledByteBufAllocator.DEFAULT) + .socketChannelClass(OioSocketChannel) + .eventLoopGroup(new OioEventLoopGroup()) + .inetAddressResolver(new DefaultInetAddressResolver()) + .build() +} diff --git a/driver-core/src/test/unit/com/mongodb/internal/connection/netty/NettyStreamFactorySpecification.groovy b/driver-core/src/test/unit/com/mongodb/internal/connection/netty/NettyStreamFactorySpecification.groovy new file mode 100644 index 00000000000..c60f9a838f3 --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/internal/connection/netty/NettyStreamFactorySpecification.groovy @@ -0,0 +1,70 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection.netty + +import com.mongodb.ClusterFixture +import com.mongodb.connection.SocketSettings +import com.mongodb.connection.SslSettings +import io.netty.buffer.PooledByteBufAllocator +import io.netty.buffer.UnpooledByteBufAllocator +import io.netty.channel.nio.NioEventLoopGroup +import io.netty.channel.oio.OioEventLoopGroup +import io.netty.channel.socket.nio.NioSocketChannel +import io.netty.channel.socket.oio.OioSocketChannel +import spock.lang.Specification + +class NettyStreamFactorySpecification extends Specification { + + private static final SOCKET_SETTINGS = SocketSettings.builder().build() + private static final SSL_SETTINGS = SslSettings.builder().enabled(true).invalidHostNameAllowed(true).build() + private static final EVENT_LOOP_GROUP = new OioEventLoopGroup() + private static final SOCKET_CHANNEL_CLASS = OioSocketChannel + private static final ALLOCATOR = UnpooledByteBufAllocator.DEFAULT + + def cleanupSpec() { + EVENT_LOOP_GROUP.shutdownGracefully().awaitUninterruptibly() + } + + def 'should use arguments to create NettyStream'() { + when: + NettyStream stream = factory.create(ClusterFixture.getPrimary()) as NettyStream + + then: + stream.address == ClusterFixture.getPrimary() + stream.settings == SOCKET_SETTINGS + stream.sslSettings == SSL_SETTINGS + stream.socketChannelClass == socketChannelClass + stream.getWorkerGroup().class == eventLoopGroupClass + stream.allocator == allocator + + cleanup: + stream?.close() + if (stream.getWorkerGroup().class != eventLoopGroupClass) { + stream.getWorkerGroup().shutdownGracefully().awaitUninterruptibly() + } + + where: + eventLoopGroupClass | socketChannelClass | allocator | factory + NioEventLoopGroup | NioSocketChannel | PooledByteBufAllocator.DEFAULT | new NettyStreamFactory(SOCKET_SETTINGS, SSL_SETTINGS) + OioEventLoopGroup | NioSocketChannel | PooledByteBufAllocator.DEFAULT | new NettyStreamFactory(SOCKET_SETTINGS, SSL_SETTINGS, + EVENT_LOOP_GROUP) + OioEventLoopGroup | NioSocketChannel | ALLOCATOR | new NettyStreamFactory(SOCKET_SETTINGS, SSL_SETTINGS, + EVENT_LOOP_GROUP, ALLOCATOR) + OioEventLoopGroup | SOCKET_CHANNEL_CLASS | ALLOCATOR | new NettyStreamFactory(SOCKET_SETTINGS, SSL_SETTINGS, + EVENT_LOOP_GROUP, SOCKET_CHANNEL_CLASS, ALLOCATOR) + } +} diff --git a/driver-core/src/test/unit/com/mongodb/internal/connection/tlschannel/util/UtilTest.java b/driver-core/src/test/unit/com/mongodb/internal/connection/tlschannel/util/UtilTest.java new file mode 100644 index 00000000000..b8e903095d0 --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/internal/connection/tlschannel/util/UtilTest.java @@ -0,0 +1,39 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.internal.connection.tlschannel.util; + +import org.junit.jupiter.api.Test; + +import static org.junit.jupiter.api.Assertions.assertAll; +import static org.junit.jupiter.api.Assertions.assertEquals; + +final class UtilTest { + @Test + void getJavaMajorVersion() { + assertAll( + () -> assertEquals(8, Util.getJavaMajorVersion("1.8.0_72-ea")), + () -> assertEquals(9, Util.getJavaMajorVersion("9-ea")), + () -> assertEquals(9, Util.getJavaMajorVersion("9")), + () -> assertEquals(9, Util.getJavaMajorVersion("9.0.1")), + () -> assertEquals(17, Util.getJavaMajorVersion("17")), + () -> assertEquals(19, Util.getJavaMajorVersion("19-ea")), + () -> assertEquals(42, Util.getJavaMajorVersion("42.1.0-ea")) + ); + } + + private UtilTest() { + } +} diff --git a/driver-core/src/test/unit/com/mongodb/internal/dns/DefaultDnsResolverTest.java b/driver-core/src/test/unit/com/mongodb/internal/dns/DefaultDnsResolverTest.java new file mode 100644 index 00000000000..b300df8e0ac --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/internal/dns/DefaultDnsResolverTest.java @@ -0,0 +1,50 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.dns; + +import com.mongodb.MongoConfigurationException; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.Test; + +import javax.naming.Context; + +import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; +import static org.junit.jupiter.api.Assertions.assertThrows; + +public class DefaultDnsResolverTest { + private static final String TEST_HOST = "test1.test.build.10gen.cc"; + private static final String DEFAULT_PROVIDER_URL_VALUE = System.getProperty(Context.PROVIDER_URL); + + @AfterEach + public void resetDefaultProviderUrl() { + if (DEFAULT_PROVIDER_URL_VALUE != null) { + System.setProperty(Context.PROVIDER_URL, DEFAULT_PROVIDER_URL_VALUE); + } + } + + @Test + public void nonDnsProviderUrlShouldBeIgnored() { + System.setProperty(Context.PROVIDER_URL, "file:///tmp/provider.txt"); + assertDoesNotThrow(() -> new DefaultDnsResolver().resolveHostFromSrvRecords(TEST_HOST, "mongodb")); + } + + @Test + public void dnsProviderUrlShouldNotBeIgnored() { + System.setProperty(Context.PROVIDER_URL, "dns:///mongodb.unknown.server.com"); + assertThrows(MongoConfigurationException.class, () -> new DefaultDnsResolver().resolveHostFromSrvRecords(TEST_HOST, "mongodb")); + } +} diff --git a/driver-core/src/test/unit/com/mongodb/internal/function/AsyncCallbackSupplierTest.java b/driver-core/src/test/unit/com/mongodb/internal/function/AsyncCallbackSupplierTest.java new file mode 100644 index 00000000000..2686beb477f --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/internal/function/AsyncCallbackSupplierTest.java @@ -0,0 +1,374 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.internal.function; + +import com.mongodb.internal.async.SingleResultCallback; +import com.mongodb.internal.async.function.AsyncCallbackSupplier; +import com.mongodb.lang.Nullable; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; + +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; +import java.util.function.Consumer; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertNotSame; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertSame; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.fail; + +final class AsyncCallbackSupplierTest { + private static final Object RESULT = new Object(); + + private RuntimeException exception = new RuntimeException(); + private Error exception2 = new Error(); + @Nullable + private Thread getThread; + @Nullable + private Thread finallyThread; + private Callback callback; + + @BeforeEach + void beforeEach() { + callback = new Callback(); + } + + @AfterEach + void afterEach() { + exception = new RuntimeException(); + exception2 = new Error(); + getThread = null; + finallyThread = null; + callback = new Callback(); + } + + @Test + void whenComplete() { + try { + new PredefinedResultAsyncCallbackSupplier(RESULT) + .whenComplete(() -> { + assertNull(finallyThread); + finallyThread = Thread.currentThread(); + }) + .get(callback); + } finally { + callback.assertResult(RESULT); + assertNotNull(finallyThread); + assertNotSame(Thread.currentThread(), finallyThread); + } + } + + @Test + void whenComplete2() { + try { + new PredefinedResultAsyncCallbackSupplier(exception) + .whenComplete(() -> { + assertNull(finallyThread); + finallyThread = Thread.currentThread(); + }) + .get(callback); + } finally { + callback.assertResult(exception); + assertNotNull(finallyThread); + assertNotSame(Thread.currentThread(), finallyThread); + } + } + + @Test + void whenCompleteThrow() { + try { + new PredefinedResultAsyncCallbackSupplier(RESULT) + .whenComplete(() -> { + assertNull(finallyThread); + finallyThread = Thread.currentThread(); + throw exception; + }) + .get(callback); + } finally { + callback.assertResult(exception); + assertNotNull(finallyThread); + assertNotSame(Thread.currentThread(), finallyThread); + } + } + + @Test + void whenCompleteThrow2() { + try { + new PredefinedResultAsyncCallbackSupplier(exception) + .whenComplete(() -> { + assertNull(finallyThread); + finallyThread = Thread.currentThread(); + throw exception2; + }) + .get(callback); + } finally { + callback.assertResult(exception); + assertEquals(1, exception.getSuppressed().length); + assertSame(exception2, exception.getSuppressed()[0]); + assertNotNull(finallyThread); + assertNotSame(Thread.currentThread(), finallyThread); + } + } + + @Test + void whenCompleteCallbackThrows() { + PredefinedResultAsyncCallbackSupplier asyncSupplier = new PredefinedResultAsyncCallbackSupplier(RESULT); + try { + asyncSupplier + .whenComplete(() -> { + assertNull(finallyThread); + finallyThread = Thread.currentThread(); + }) + .get((result, t) -> { + assertNull(getThread); + getThread = Thread.currentThread(); + throw exception; + }); + } finally { + asyncSupplier.waitForCompletion(); + assertNotNull(getThread); + assertNotSame(Thread.currentThread(), getThread); + assertNotNull(finallyThread); + assertNotSame(Thread.currentThread(), finallyThread); + } + } + + @Test + void whenCompleteCallbackThrowsSync() { + try { + assertThrows(exception.getClass(), () -> ((AsyncCallbackSupplier) callback -> + callback.onResult(RESULT, null)) + .whenComplete(() -> { + assertNull(finallyThread); + finallyThread = Thread.currentThread(); + }) + .get((result, t) -> { + throw exception; + })); + } finally { + assertSame(Thread.currentThread(), finallyThread); + } + } + + @Test + void whenCompleteThrowCallbackThrows() { + PredefinedResultAsyncCallbackSupplier asyncSupplier = new PredefinedResultAsyncCallbackSupplier(RESULT); + try { + asyncSupplier + .whenComplete(() -> { + assertNull(finallyThread); + finallyThread = Thread.currentThread(); + throw exception; + }) + .get((result, t) -> { + assertNull(getThread); + getThread = Thread.currentThread(); + throw exception2; + }); + } finally { + asyncSupplier.waitForCompletion(); + assertNotNull(getThread); + assertNotSame(Thread.currentThread(), getThread); + assertNotNull(finallyThread); + assertNotSame(Thread.currentThread(), finallyThread); + } + } + + @Test + void whenCompleteThrowCallbackThrowsSync() { + try { + assertThrows(exception2.getClass(), () -> ((AsyncCallbackSupplier) callback -> + callback.onResult(RESULT, null)) + .whenComplete(() -> { + assertNull(finallyThread); + finallyThread = Thread.currentThread(); + throw exception; + }) + .get((result, t) -> { + assertNull(getThread); + getThread = Thread.currentThread(); + throw exception2; + })); + } finally { + assertSame(Thread.currentThread(), finallyThread); + assertSame(Thread.currentThread(), getThread); + } + } + + /** + * If {@link AsyncCallbackSupplier#get(SingleResultCallback)} does not throw an exception and also does not complete its + * callback, then it is impossible to execute the action supplied to {@link AsyncCallbackSupplier#whenComplete(Runnable)}. + */ + @Test + void whenCompleteGetThrows() { + PredefinedResultAsyncCallbackSupplier asyncSupplier = new PredefinedResultAsyncCallbackSupplier(callback -> { + throw exception; + }); + try { + asyncSupplier + .whenComplete(() -> { + assertNull(finallyThread); + finallyThread = Thread.currentThread(); + }) + .get((result, t) -> { + assertNull(getThread); + getThread = Thread.currentThread(); + }); + } finally { + asyncSupplier.waitForCompletion(); + assertNull(finallyThread); + assertNull(getThread); + } + } + + @Test + void whenCompleteGetThrowsSync() { + try { + assertThrows(exception.getClass(), () -> ((AsyncCallbackSupplier) callback -> { + throw exception; + }) + .whenComplete(() -> { + assertNull(finallyThread); + finallyThread = Thread.currentThread(); + }) + .get((result, t) -> { + assertNull(getThread); + getThread = Thread.currentThread(); + })); + } finally { + assertSame(Thread.currentThread(), finallyThread); + assertNull(getThread); + } + } + + @Test + void whenCompleteThrowGetThrowsSync() { + try { + assertThrows(exception.getClass(), () -> ((AsyncCallbackSupplier) callback -> { + throw exception; + }) + .whenComplete(() -> { + assertNull(finallyThread); + finallyThread = Thread.currentThread(); + throw exception2; + }) + .get((result, t) -> { + assertNull(getThread); + getThread = Thread.currentThread(); + })); + } finally { + assertSame(Thread.currentThread(), finallyThread); + assertNull(getThread); + assertEquals(1, exception.getSuppressed().length); + assertSame(exception2, exception.getSuppressed()[0]); + } + } + + private static final class Callback implements SingleResultCallback { + private final CompletableFuture result = new CompletableFuture<>(); + + @Override + public void onResult(@Nullable final Object result, @Nullable final Throwable t) { + if (t != null) { + this.result.completeExceptionally(t); + } else { + this.result.complete(result); + } + } + + void assertResult(final Object expectedResult) { + try { + assertSame(expectedResult, result.get()); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + throw new RuntimeException(e); + } catch (ExecutionException e) { + fail(e); + } + } + + void assertResult(final Throwable expectedT) { + try { + result.get(3, TimeUnit.SECONDS); + fail(); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + throw new RuntimeException(e); + } catch (ExecutionException e) { + assertSame(expectedT, e.getCause()); + } catch (TimeoutException e) { + fail(e); + } + } + } + + private static final class PredefinedResultAsyncCallbackSupplier implements AsyncCallbackSupplier { + @Nullable + private final Object result; + @Nullable + private final Throwable t; + @Nullable + private final Consumer> get; + @Nullable + private Thread thread; + + PredefinedResultAsyncCallbackSupplier(final Object result) { + this.result = result; + this.t = null; + get = null; + } + + PredefinedResultAsyncCallbackSupplier(final Throwable t) { + this.result = null; + this.t = t; + get = null; + } + + PredefinedResultAsyncCallbackSupplier(final Consumer> asyncGet) { + this.result = null; + this.t = null; + this.get = asyncGet; + } + + @Override + public void get(final SingleResultCallback callback) { + thread = new Thread(() -> { + if (get == null) { + callback.onResult(result, t); + } else { + get(callback); + } + }); + thread.start(); + } + + void waitForCompletion() { + assertNotNull(thread); + try { + thread.join(TimeUnit.SECONDS.toMillis(3)); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + throw new RuntimeException(e); + } + } + } +} diff --git a/driver-core/src/test/unit/com/mongodb/internal/logging/LogMessageTest.java b/driver-core/src/test/unit/com/mongodb/internal/logging/LogMessageTest.java new file mode 100644 index 00000000000..ecf5a0ad014 --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/internal/logging/LogMessageTest.java @@ -0,0 +1,189 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.logging; + +import com.mongodb.connection.ClusterId; +import com.mongodb.lang.NonNull; +import com.mongodb.lang.Nullable; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.Arguments; +import org.junit.jupiter.params.provider.MethodSource; + +import java.util.Arrays; +import java.util.List; +import java.util.stream.Stream; + +import static com.mongodb.internal.logging.LogMessage.Entry.Name.COMMAND_NAME; +import static com.mongodb.internal.logging.LogMessage.Entry.Name.DRIVER_CONNECTION_ID; +import static com.mongodb.internal.logging.LogMessage.Entry.Name.DURATION_MS; +import static com.mongodb.internal.logging.LogMessage.Entry.Name.OPERATION_ID; +import static com.mongodb.internal.logging.LogMessage.Entry.Name.REPLY; +import static com.mongodb.internal.logging.LogMessage.Entry.Name.REQUEST_ID; +import static com.mongodb.internal.logging.LogMessage.Entry.Name.SERVER_CONNECTION_ID; +import static com.mongodb.internal.logging.LogMessage.Entry.Name.SERVER_HOST; +import static com.mongodb.internal.logging.LogMessage.Entry.Name.SERVER_PORT; +import static com.mongodb.internal.logging.LogMessage.Entry.Name.SERVICE_ID; + +class UnstructuredLogMessageTest { + + @ParameterizedTest + @MethodSource("provideExpectedWithEntries") + void shouldInterpolateMessage(final String format, final String expectedMessage, final List entries) { + LogMessage.UnstructuredLogMessage unstructuredLogMessage = new LogMessage(LogMessage.Component.COMMAND, LogMessage.Level.DEBUG, + "Connection id", new ClusterId(), entries, format).toUnstructuredLogMessage(); + + String actualMessage = unstructuredLogMessage.interpolate(); + Assertions.assertEquals(expectedMessage, actualMessage); + } + + private static Stream provideExpectedWithEntries() { + + String format = "Command \"{}\" succeeded in {} ms using a connection with driver-generated ID {}" + + "[ and server-generated ID {}] to {}:{}[ with service ID {}]. The requestID is {} and the " + + "operation ID is {}. Command reply: {}"; + return Stream.of( + Arguments.of(format, "Command \"create\" succeeded in 5000 ms using a connection with driver-generated ID 1" + + " and server-generated ID 2 to localhost:8080 with service ID 3. The requestID is 333 and the " + + "operation ID is 444. Command reply: create", createEntries( + entry(COMMAND_NAME, "create"), + entry(DURATION_MS, 5000), + entry(DRIVER_CONNECTION_ID, 1), + entry(SERVER_CONNECTION_ID, 2), + entry(SERVER_HOST, "localhost"), + entry(SERVER_PORT, 8080), + entry(SERVICE_ID, 3), + entry(REQUEST_ID, 333), + entry(OPERATION_ID, 444), + entry(REPLY, "create") + )), + Arguments.of(format, "Command \"null\" succeeded in null ms using a connection with driver-generated ID null" + + " and server-generated ID 2 to localhost:8080 with service ID 3. The requestID is null and the " + + "operation ID is null. Command reply: null", createEntries( + entry(COMMAND_NAME, null), + entry(DURATION_MS, null), + entry(DRIVER_CONNECTION_ID, null), + entry(SERVER_CONNECTION_ID, 2), + entry(SERVER_HOST, "localhost"), + entry(SERVER_PORT, 8080), + entry(SERVICE_ID, 3), + entry(REQUEST_ID, null), + entry(OPERATION_ID, null), + entry(REPLY, null) + )), Arguments.of(format, "Command \"null\" succeeded in null ms using a connection with driver-generated ID null" + + " to localhost:8080 with service ID 3. The requestID is null and the " + + "operation ID is null. Command reply: null", createEntries( + entry(COMMAND_NAME, null), + entry(DURATION_MS, null), + entry(DRIVER_CONNECTION_ID, null), + entry(SERVER_CONNECTION_ID, null), + entry(SERVER_HOST, "localhost"), + entry(SERVER_PORT, 8080), + entry(SERVICE_ID, 3), + entry(REQUEST_ID, null), + entry(OPERATION_ID, null), + entry(REPLY, null) + )), + Arguments.of(format, "Command \"null\" succeeded in null ms using a connection with driver-generated ID null" + + " to localhost:8080. The requestID is null and the " + + "operation ID is null. Command reply: null", createEntries( + entry(COMMAND_NAME, null), + entry(DURATION_MS, null), + entry(DRIVER_CONNECTION_ID, null), + entry(SERVER_CONNECTION_ID, null), + entry(SERVER_HOST, "localhost"), + entry(SERVER_PORT, 8080), + entry(SERVICE_ID, null), + entry(REQUEST_ID, null), + entry(OPERATION_ID, null), + entry(REPLY, null) + )), + Arguments.of(format, "Command \"create\" succeeded in 5000 ms using a connection with driver-generated ID 1" + + " to localhost:8080. The requestID is 333 and the " + + "operation ID is 444. Command reply: create", createEntries( + entry(COMMAND_NAME, "create"), + entry(DURATION_MS, 5000), + entry(DRIVER_CONNECTION_ID, 1), + entry(SERVER_CONNECTION_ID, null), + entry(SERVER_HOST, "localhost"), + entry(SERVER_PORT, 8080), + entry(SERVICE_ID, null), + entry(REQUEST_ID, 333), + entry(OPERATION_ID, 444), + entry(REPLY, "create") + )), + Arguments.of("Command \"{}\" succeeded in {} ms using a connection with driver-generated ID {}" + + "[ and server-generated ID {}] to {}:{}[ with service ID {}]. The requestID is {} and the " + + "operation ID is {}. Command reply: {}. Command finished", + + "Command \"create\" succeeded in 5000 ms using a connection with driver-generated ID 1" + + " to localhost:8080. The requestID is 333 and the " + + "operation ID is 444. Command reply: create. Command finished", createEntries( + entry(COMMAND_NAME, "create"), + entry(DURATION_MS, 5000), + entry(DRIVER_CONNECTION_ID, 1), + entry(SERVER_CONNECTION_ID, null), + entry(SERVER_HOST, "localhost"), + entry(SERVER_PORT, 8080), + entry(SERVICE_ID, null), + entry(REQUEST_ID, 333), + entry(OPERATION_ID, 444), + entry(REPLY, "create") + )), + Arguments.of("Command \"{}\" succeeded in {} ms using a connection with driver-generated ID {}" + + "[ and server-generated ID {}] to {}:{}[ with service ID {} generated]. The requestID is {} and the " + + "operation ID is {}. Command reply: {}.", + + "Command \"create\" succeeded in 5000 ms using a connection with driver-generated ID 1" + + " to localhost:8080 with service ID 1 generated. The requestID is 333 and the " + + "operation ID is 444. Command reply: create.", createEntries( + entry(COMMAND_NAME, "create"), + entry(DURATION_MS, 5000), + entry(DRIVER_CONNECTION_ID, 1), + entry(SERVER_CONNECTION_ID, null), + entry(SERVER_HOST, "localhost"), + entry(SERVER_PORT, 8080), + entry(SERVICE_ID, 1), + entry(REQUEST_ID, 333), + entry(OPERATION_ID, 444), + entry(REPLY, "create") + )), + Arguments.of("Command succeeded.", "Command succeeded.", createEntries( + entry(COMMAND_NAME, "create"), + entry(DURATION_MS, 5000), + entry(DRIVER_CONNECTION_ID, 1), + entry(SERVER_CONNECTION_ID, null), + entry(SERVER_HOST, "localhost"), + entry(SERVER_PORT, 8080), + entry(SERVICE_ID, null), + entry(REQUEST_ID, 333), + entry(OPERATION_ID, 444), + entry(REPLY, "create") + )) + ); + } + + + private static LogMessage.Entry entry(final LogMessage.Entry.Name name, final @Nullable Object key) { + return new LogMessage.Entry(name, key); + } + + @NonNull + private static List createEntries(final LogMessage.Entry... entry) { + return Arrays.asList(entry); + } +} diff --git a/driver-core/src/test/unit/com/mongodb/internal/mockito/InsufficientStubbingDetector.java b/driver-core/src/test/unit/com/mongodb/internal/mockito/InsufficientStubbingDetector.java new file mode 100644 index 00000000000..e26f774b04e --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/internal/mockito/InsufficientStubbingDetector.java @@ -0,0 +1,48 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.internal.mockito; + +import com.mongodb.lang.Nullable; +import org.mockito.invocation.InvocationOnMock; +import org.mockito.stubbing.Answer; + +import java.util.function.Consumer; + +import static com.mongodb.assertions.Assertions.fail; +import static java.lang.String.format; + +/** + * @see MongoMockito#mock(Class, Consumer) + */ +final class InsufficientStubbingDetector implements Answer { + private boolean enabled; + + InsufficientStubbingDetector() { + } + + @Nullable + @Override + public Void answer(final InvocationOnMock invocation) throws AssertionError { + if (enabled) { + throw fail(format("Insufficient stubbing. Unexpected invocation %s on the object %s.", invocation, invocation.getMock())); + } + return null; + } + + void enable() { + enabled = true; + } +} diff --git a/driver-core/src/test/unit/com/mongodb/internal/mockito/InsufficientStubbingDetectorDemoTest.java b/driver-core/src/test/unit/com/mongodb/internal/mockito/InsufficientStubbingDetectorDemoTest.java new file mode 100644 index 00000000000..40d33c31288 --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/internal/mockito/InsufficientStubbingDetectorDemoTest.java @@ -0,0 +1,72 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.internal.mockito; + +import com.mongodb.internal.binding.ReadBinding; +import com.mongodb.internal.operation.ListCollectionsOperation; +import org.bson.BsonDocument; +import org.bson.codecs.BsonDocumentCodec; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.mockito.Mockito; +import org.mockito.internal.stubbing.answers.ThrowsException; + +import static com.mongodb.ClusterFixture.OPERATION_CONTEXT; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.mockito.Mockito.when; + +final class InsufficientStubbingDetectorDemoTest { + + private ListCollectionsOperation operation; + + @BeforeEach + void beforeEach() { + operation = new ListCollectionsOperation<>("db", new BsonDocumentCodec()); + } + + @Test + void mockObjectWithDefaultAnswer() { + ReadBinding binding = Mockito.mock(ReadBinding.class); + assertThrows(NullPointerException.class, () -> operation.execute(binding)); + } + + @Test + void mockObjectWithThrowsException() { + ReadBinding binding = Mockito.mock(ReadBinding.class, + new ThrowsException(new AssertionError("Insufficient stubbing for " + ReadBinding.class))); + assertThrows(AssertionError.class, () -> operation.execute(binding)); + } + + @Test + void mockObjectWithInsufficientStubbingDetector() { + ReadBinding binding = MongoMockito.mock(ReadBinding.class); + assertThrows(AssertionError.class, () -> operation.execute(binding)); + } + + @Test + void stubbingWithThrowsException() { + ReadBinding binding = Mockito.mock(ReadBinding.class, + new ThrowsException(new AssertionError("Unfortunately, you cannot do stubbing"))); + assertThrows(AssertionError.class, () -> when(binding.getOperationContext()).thenReturn(OPERATION_CONTEXT)); + } + + @Test + void stubbingWithInsufficientStubbingDetector() { + MongoMockito.mock(ReadBinding.class, bindingMock -> + when(bindingMock.getOperationContext()).thenReturn(OPERATION_CONTEXT) + ); + } +} diff --git a/driver-core/src/test/unit/com/mongodb/internal/mockito/MongoMockito.java b/driver-core/src/test/unit/com/mongodb/internal/mockito/MongoMockito.java new file mode 100644 index 00000000000..7b6c08a2efb --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/internal/mockito/MongoMockito.java @@ -0,0 +1,80 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.internal.mockito; + +import com.mongodb.lang.Nullable; +import org.mockito.Answers; +import org.mockito.Mockito; +import org.mockito.internal.stubbing.answers.ThrowsException; +import org.mockito.stubbing.OngoingStubbing; + +import java.util.function.Consumer; + +import static org.mockito.Mockito.when; +import static org.mockito.Mockito.withSettings; + +/** + * Complements {@link Mockito}. + */ +public final class MongoMockito { + /** + * Is equivalent to calling {@link #mock(Class, Consumer)} with a {@code null} {@code tuner}. + */ + public static T mock(final Class classToMock) { + return mock(classToMock, null); + } + + /** + * This method is similar to {@link Mockito#mock(Class)} but changes the default behavior of the methods of a mock object + * such that insufficient stubbing is detected and reported. By default, Mockito uses {@link Answers#RETURNS_DEFAULTS}. + * While this answer has potential to save users some stubbing work, the provided convenience may not be worth the cost: + * if the default result (often {@code null} for reference types) is insufficient, + * one likely gets an unhelpful {@link NullPointerException} + * (see {@link InsufficientStubbingDetectorDemoTest#mockObjectWithDefaultAnswer()}), + * or a silent incorrect behavior with no clear indication of the mock object method that caused the problem. + * Furthermore, a working test that uses mock objects may be unwittingly broken when refactoring production code. + * While this particular issue is inherent to tests that use mock objects, + * broken tests not indicating clearly what is wrong make matters worse. + *

+ * Mockito has {@link ThrowsException}, + * and at first glance it may seem like using it may help detecting insufficient stubbing. + * It can point us to a line where the insufficiently stubbed method was called at, but it cannot tell us the name of that method + * (see {@link InsufficientStubbingDetectorDemoTest#mockObjectWithThrowsException()}). + * Moreover, a mock object created with {@link ThrowsException} as its default answer cannot be stubbed: + * stubbing requires calling methods of the mock object, but they all complete abruptly + * (see {@link InsufficientStubbingDetectorDemoTest#stubbingWithThrowsException()}). + * Therefore, {@link ThrowsException} is not suitable for detecting insufficient stubbing.

+ *

+ * This method overcomes both of the aforementioned limitations by using {@link InsufficientStubbingDetector} as the default answer + * (see {@link InsufficientStubbingDetectorDemoTest#mockObjectWithInsufficientStubbingDetector()}, + * {@link InsufficientStubbingDetectorDemoTest#stubbingWithInsufficientStubbingDetector()}). + * Note also that for convenience, {@link InsufficientStubbingDetector} stubs the {@link Object#toString()} method by using + * {@link OngoingStubbing#thenCallRealMethod()}, unless this stubbing is overwritten by the {@code tuner}.

+ */ + public static T mock(final Class classToMock, @Nullable final Consumer tuner) { + final InsufficientStubbingDetector insufficientStubbingDetector = new InsufficientStubbingDetector(); + final T mock = Mockito.mock(classToMock, withSettings().defaultAnswer(insufficientStubbingDetector)); + when(mock.toString()).thenCallRealMethod(); + if (tuner != null) { + tuner.accept(mock); + } + insufficientStubbingDetector.enable(); + return mock; + } + + private MongoMockito() { + } +} diff --git a/driver-core/src/test/unit/com/mongodb/internal/operation/AsyncChangeStreamBatchCursorSpecification.groovy b/driver-core/src/test/unit/com/mongodb/internal/operation/AsyncChangeStreamBatchCursorSpecification.groovy new file mode 100644 index 00000000000..998c0a28b6e --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/internal/operation/AsyncChangeStreamBatchCursorSpecification.groovy @@ -0,0 +1,141 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.operation + +import com.mongodb.MongoException +import com.mongodb.async.FutureResultCallback +import com.mongodb.internal.TimeoutContext +import com.mongodb.internal.async.SingleResultCallback +import com.mongodb.internal.binding.AsyncReadBinding +import com.mongodb.internal.connection.OperationContext +import org.bson.Document +import spock.lang.Specification + +import static java.util.concurrent.TimeUnit.SECONDS + +class AsyncChangeStreamBatchCursorSpecification extends Specification { + + def 'should call the underlying AsyncCommandBatchCursor'() { + given: + def changeStreamOpertation = Stub(ChangeStreamOperation) + def binding = Mock(AsyncReadBinding) + def operationContext = Mock(OperationContext) + def timeoutContext = Mock(TimeoutContext) + binding.getOperationContext() >> operationContext + operationContext.getTimeoutContext() >> timeoutContext + timeoutContext.hasTimeoutMS() >> hasTimeoutMS + + def wrapped = Mock(AsyncCommandBatchCursor) + def callback = Stub(SingleResultCallback) + def cursor = new AsyncChangeStreamBatchCursor(changeStreamOpertation, wrapped, binding, null, + ServerVersionHelper.FOUR_DOT_FOUR_WIRE_VERSION) + + when: + cursor.setBatchSize(10) + + then: + 1 * wrapped.setBatchSize(10) + + when: + cursor.next(callback) + + then: + 1 * wrapped.next(_) >> { it[0].onResult([], null) } + + when: + cursor.close() + + then: + 1 * wrapped.close() + 1 * binding.release() + + when: + cursor.close() + + then: + 0 * wrapped.close() + 0 * binding.release() + + where: + hasTimeoutMS << [true, false] + } + + def 'should not close the cursor in next if the cursor was closed before next completed'() { + def changeStreamOpertation = Stub(ChangeStreamOperation) + def binding = Mock(AsyncReadBinding) + def operationContext = Mock(OperationContext) + def timeoutContext = Mock(TimeoutContext) + binding.getOperationContext() >> operationContext + operationContext.getTimeoutContext() >> timeoutContext + timeoutContext.hasTimeoutMS() >> hasTimeoutMS + def wrapped = Mock(AsyncCommandBatchCursor) + def callback = Stub(SingleResultCallback) + def cursor = new AsyncChangeStreamBatchCursor(changeStreamOpertation, wrapped, binding, null, + ServerVersionHelper.FOUR_DOT_FOUR_WIRE_VERSION) + + when: + cursor.next(callback) + + then: + 1 * wrapped.next(_) >> { + // Simulate the user calling close while wrapped.next() is in flight + cursor.close() + it[0].onResult([], null) + } + + then: + noExceptionThrown() + + then: + cursor.isClosed() + + where: + hasTimeoutMS << [true, false] + } + + def 'should throw a MongoException when next/tryNext is called after the cursor is closed'() { + def changeStreamOpertation = Stub(ChangeStreamOperation) + def binding = Mock(AsyncReadBinding) + def operationContext = Mock(OperationContext) + def timeoutContext = Mock(TimeoutContext) + binding.getOperationContext() >> operationContext + operationContext.getTimeoutContext() >> timeoutContext + timeoutContext.hasTimeoutMS() >> hasTimeoutMS + def wrapped = Mock(AsyncCommandBatchCursor) + def cursor = new AsyncChangeStreamBatchCursor(changeStreamOpertation, wrapped, binding, null, + ServerVersionHelper.FOUR_DOT_FOUR_WIRE_VERSION) + + given: + cursor.close() + + when: + nextBatch(cursor) + + then: + def exception = thrown(MongoException) + exception.getMessage() == 'next() called after the cursor was closed.' + + where: + hasTimeoutMS << [true, false] + } + + List nextBatch(AsyncChangeStreamBatchCursor cursor) { + def futureResultCallback = new FutureResultCallback() + cursor.next(futureResultCallback) + futureResultCallback.get(1, SECONDS) + } +} diff --git a/driver-core/src/test/unit/com/mongodb/internal/operation/AsyncCommandBatchCursorSpecification.groovy b/driver-core/src/test/unit/com/mongodb/internal/operation/AsyncCommandBatchCursorSpecification.groovy new file mode 100644 index 00000000000..d2bcd0804bb --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/internal/operation/AsyncCommandBatchCursorSpecification.groovy @@ -0,0 +1,558 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.operation + +import com.mongodb.MongoClientSettings +import com.mongodb.MongoCommandException +import com.mongodb.MongoException +import com.mongodb.MongoNamespace +import com.mongodb.ServerAddress +import com.mongodb.ServerCursor +import com.mongodb.async.FutureResultCallback +import com.mongodb.client.cursor.TimeoutMode +import com.mongodb.connection.ConnectionDescription +import com.mongodb.connection.ServerConnectionState +import com.mongodb.connection.ServerDescription +import com.mongodb.connection.ServerType +import com.mongodb.connection.ServerVersion +import com.mongodb.internal.TimeoutContext +import com.mongodb.internal.TimeoutSettings +import com.mongodb.internal.async.SingleResultCallback +import com.mongodb.internal.binding.AsyncConnectionSource +import com.mongodb.internal.connection.AsyncConnection +import com.mongodb.internal.connection.OperationContext +import org.bson.BsonArray +import org.bson.BsonDocument +import org.bson.BsonInt32 +import org.bson.BsonInt64 +import org.bson.BsonString +import org.bson.Document +import org.bson.codecs.DocumentCodec +import spock.lang.Specification + +import java.util.concurrent.TimeUnit + +import static OperationUnitSpecification.getMaxWireVersionForServerVersion +import static com.mongodb.ReadPreference.primary +import static com.mongodb.internal.operation.CommandBatchCursorHelper.MESSAGE_IF_CLOSED_AS_CURSOR +import static com.mongodb.internal.operation.CommandBatchCursorHelper.MESSAGE_IF_CONCURRENT_OPERATION + +class AsyncCommandBatchCursorSpecification extends Specification { + + def 'should generate expected command with batchSize and maxTimeMS'() { + given: + def initialConnection = referenceCountedAsyncConnection() + def connection = referenceCountedAsyncConnection() + def connectionSource = getAsyncConnectionSource(connection) + def timeoutContext = connectionSource.getOperationContext().getTimeoutContext() + def firstBatch = createCommandResult([]) + def expectedCommand = new BsonDocument('getMore': new BsonInt64(CURSOR_ID)) + .append('collection', new BsonString(NAMESPACE.getCollectionName())) + if (batchSize != 0) { + expectedCommand.append('batchSize', new BsonInt32(batchSize)) + } + + def reply = getMoreResponse([], 0) + + when: + def cursor = new AsyncCommandBatchCursor(TimeoutMode.CURSOR_LIFETIME, firstBatch, batchSize, maxTimeMS, CODEC, + null, connectionSource, initialConnection) + then: + 1 * timeoutContext.setMaxTimeOverride(*_) + + when: + def batch = nextBatch(cursor) + + then: + 1 * connection.commandAsync(NAMESPACE.getDatabaseName(), expectedCommand, *_) >> { + it.last().onResult(reply, null) + } + batch.isEmpty() + + then: + cursor.isClosed() + + then: + cursor.close() + + then: + connection.getCount() == 0 + initialConnection.getCount() == 0 + connectionSource.getCount() == 0 + + where: + batchSize | maxTimeMS | expectedMaxTimeFieldValue + 0 | 0 | null + 2 | 0 | null + 0 | 100 | 100 + } + + def 'should close the cursor'() { + given: + def initialConnection = referenceCountedAsyncConnection() + def serverVersion = new ServerVersion([3, 6, 0]) + def connection = referenceCountedAsyncConnection(serverVersion) + def connectionSource = getAsyncConnectionSource(connection) + def cursor = new AsyncCommandBatchCursor(TimeoutMode.CURSOR_LIFETIME, firstBatch, 0, 0, CODEC, + null, connectionSource, initialConnection) + + when: + cursor.close() + + then: + if (cursor.getServerCursor() != null) { + 1 * connection.commandAsync(NAMESPACE.databaseName, createKillCursorsDocument(cursor.getServerCursor()), _, primary(), *_) >> { + it.last().onResult(null, null) + } + } + + then: + connection.getCount() == 0 + initialConnection.getCount() == 0 + connectionSource.getCount() == 0 + + where: + firstBatch << [createCommandResult(), createCommandResult(FIRST_BATCH, 0)] + } + + def 'should return the expected results from next'() { + given: + def initialConnection = referenceCountedAsyncConnection() + def connection = referenceCountedAsyncConnection() + def connectionSource = getAsyncConnectionSource(connection) + + when: + def firstBatch = createCommandResult(FIRST_BATCH, 0) + def cursor = new AsyncCommandBatchCursor(TimeoutMode.CURSOR_LIFETIME, firstBatch, 0, 0, CODEC, + null, connectionSource, initialConnection) + + then: + nextBatch(cursor) == FIRST_BATCH + + then: + connectionSource.getCount() == 0 + + then: + cursor.isClosed() + + when: + nextBatch(cursor) + + then: + def exception = thrown(IllegalStateException) + exception.getMessage() == MESSAGE_IF_CLOSED_AS_CURSOR + initialConnection.getCount() == 0 + connectionSource.getCount() == 0 + } + + def 'should handle getMore when there are empty results but there is a cursor'() { + given: + def initialConnection = referenceCountedAsyncConnection() + def connection = referenceCountedAsyncConnection() + def connectionSource = getAsyncConnectionSource(connection) + + when: + def firstBatch = createCommandResult([], CURSOR_ID) + def cursor = new AsyncCommandBatchCursor(TimeoutMode.CURSOR_LIFETIME, firstBatch, 0, 0, CODEC, + null, connectionSource, initialConnection) + def batch = nextBatch(cursor) + + then: + 1 * connection.commandAsync(*_) >> { + connection.getCount() == 1 + connectionSource.getCount() == 1 + it.last().onResult(response, null) + } + + 1 * connection.commandAsync(*_) >> { + connection.getCount() == 1 + connectionSource.getCount() == 1 + it.last().onResult(response2, null) + } + + then: + batch == SECOND_BATCH + + when: + cursor.close() + + then: + 0 * connection._ + initialConnection.getCount() == 0 + connectionSource.getCount() == 0 + + where: + serverVersion | response | response2 + new ServerVersion([3, 6, 0]) | getMoreResponse([]) | getMoreResponse(SECOND_BATCH, 0) + } + + def 'should close cursor after getMore finishes if cursor was closed while getMore was in progress and getMore returns a response'() { + given: + def serverVersion = new ServerVersion([3, 6, 0]) + def initialConnection = referenceCountedAsyncConnection(serverVersion, 'connectionOri', serverType) + def connectionA = referenceCountedAsyncConnection(serverVersion, 'connectionA', serverType) + def connectionB = referenceCountedAsyncConnection(serverVersion, 'connectionB', serverType) + def connectionSource = getAsyncConnectionSource(serverType, connectionA, connectionB) + + def firstConnection = serverType == ServerType.LOAD_BALANCER ? initialConnection : connectionA + def secondConnection = serverType == ServerType.LOAD_BALANCER ? initialConnection : connectionB + def firstBatch = createCommandResult() + + when: + def cursor = new AsyncCommandBatchCursor(TimeoutMode.CURSOR_LIFETIME, firstBatch, 0, 0, CODEC, + null, connectionSource, initialConnection) + def batch = nextBatch(cursor) + + then: + batch == FIRST_BATCH + + when: + nextBatch(cursor) + + then: + // simulate the user calling `close` while `getMore` is in flight + // in LB mode the same connection is used to execute both `getMore` and `killCursors` + 1 * firstConnection.commandAsync(*_) >> { + // `getMore` command + cursor.close() + ((SingleResultCallback) it.last()).onResult(getMoreResponse([], responseCursorId), null) + } + + then: + if (responseCursorId > 0) { + 1 * secondConnection.commandAsync(*_) >> { + // `killCursors` command + ((SingleResultCallback) it.last()).onResult(null, null) + } + } + + then: + noExceptionThrown() + + then: + connectionA.getCount() == 0 + connectionB.getCount() == 0 + initialConnection.getCount() == 0 + connectionSource.getCount() == 0 + cursor.isClosed() + + where: + serverType | responseCursorId + ServerType.LOAD_BALANCER | 42 + ServerType.LOAD_BALANCER | 0 + ServerType.STANDALONE | 42 + ServerType.STANDALONE | 0 + } + + def 'should throw concurrent operation assertion error'() { + given: + def serverVersion = new ServerVersion([3, 6, 0]) + def initialConnection = referenceCountedAsyncConnection(serverVersion, 'connectionOri') + def connectionA = referenceCountedAsyncConnection(serverVersion, 'connectionA') + def connectionB = referenceCountedAsyncConnection(serverVersion, 'connectionB') + def connectionSource = getAsyncConnectionSource(connectionA, connectionB) + + when: + def cursor = new AsyncCommandBatchCursor(TimeoutMode.CURSOR_LIFETIME, createCommandResult(FIRST_BATCH, 42), 0, 0, CODEC, + null, connectionSource, initialConnection) + def batch = nextBatch(cursor) + + then: + batch == FIRST_BATCH + + when: + nextBatch(cursor) + + then: + // simulate the user calling `cursor.next()` while `getMore` is in flight + 1 * connectionA.commandAsync(*_) >> { + // `getMore` command + nextBatch(cursor) + } + + then: + def exception = thrown(AssertionError) + exception.getMessage() == MESSAGE_IF_CONCURRENT_OPERATION + } + + def 'should close cursor after getMore finishes if cursor was closed while getMore was in progress and getMore throws exception'() { + given: + def serverVersion = new ServerVersion([4, 4, 0]) + def initialConnection = referenceCountedAsyncConnection(serverVersion, 'connectionOri', serverType) + def connectionA = referenceCountedAsyncConnection(serverVersion, 'connectionA', serverType) + def connectionB = referenceCountedAsyncConnection(serverVersion, 'connectionB', serverType) + def connectionSource = getAsyncConnectionSource(serverType, connectionA, connectionB) + + def firstConnection = serverType == ServerType.LOAD_BALANCER ? initialConnection : connectionA + def secondConnection = serverType == ServerType.LOAD_BALANCER ? initialConnection : connectionB + + def firstBatch = createCommandResult() + + when: + def cursor = new AsyncCommandBatchCursor(TimeoutMode.CURSOR_LIFETIME, firstBatch, 0, 0, CODEC, + null, connectionSource, initialConnection) + def batch = nextBatch(cursor) + + then: + batch == FIRST_BATCH + + when: + nextBatch(cursor) + + then: + 1 * firstConnection.commandAsync(*_) >> { + // Simulate the user calling close while the getMore is throwing a MongoException + cursor.close() + ((SingleResultCallback) it.last()).onResult(null, MONGO_EXCEPTION) + } + + then: + 1 * secondConnection.commandAsync(*_) >> { + // `killCursors` command + ((SingleResultCallback) it.last()).onResult(null, null) + } + + then: + thrown(MongoException) + + then: + connectionA.getCount() == 0 + initialConnection.getCount() == 0 + cursor.isClosed() + + where: + serverType << [ServerType.LOAD_BALANCER, ServerType.STANDALONE] + } + + def 'should handle errors when calling close'() { + given: + def initialConnection = referenceCountedAsyncConnection() + def connectionSource = getAsyncConnectionSourceWithResult(ServerType.STANDALONE) { [null, MONGO_EXCEPTION] } + def firstBatch = createCommandResult() + def cursor = new AsyncCommandBatchCursor(TimeoutMode.CURSOR_LIFETIME, firstBatch, 0, 0, CODEC, + null, connectionSource, initialConnection) + + when: + cursor.close() + + then: + cursor.isClosed() + initialConnection.getCount() == 0 + connectionSource.getCount() == 0 + } + + + def 'should handle errors when getting a connection for getMore'() { + given: + def initialConnection = referenceCountedAsyncConnection() + def connectionSource = getAsyncConnectionSourceWithResult(ServerType.STANDALONE) { [null, MONGO_EXCEPTION] } + + when: + def firstBatch = createCommandResult() + def cursor = new AsyncCommandBatchCursor(TimeoutMode.CURSOR_LIFETIME, firstBatch, 0, 0, CODEC, + null, connectionSource, initialConnection) + + then: + nextBatch(cursor) + + when: + nextBatch(cursor) + + then: + thrown(MongoException) + + then: + initialConnection.getCount() == 0 + connectionSource.getCount() == 1 + } + + def 'should handle errors when calling getMore'() { + given: + def serverVersion = new ServerVersion([3, 6, 0]) + def initialConnection = referenceCountedAsyncConnection() + def connectionA = referenceCountedAsyncConnection(serverVersion, 'connectionA') + def connectionB = referenceCountedAsyncConnection(serverVersion, 'connectionB') + def connectionSource = getAsyncConnectionSource(connectionA, connectionB) + + when: + def firstBatch = createCommandResult() + def cursor = new AsyncCommandBatchCursor(TimeoutMode.CURSOR_LIFETIME, firstBatch, 0, 0, CODEC, + null, connectionSource, initialConnection) + + then: + connectionSource.getCount() == 1 + + when: + nextBatch(cursor) + nextBatch(cursor) + + then: + 1 * connectionA.commandAsync(*_) >> { + connectionA.getCount() == 1 + connectionSource.getCount() == 1 + it.last().onResult(null, exception) + } + then: + thrown(MongoException) + + then: + connectionA.getCount() == 0 + connectionSource.getCount() == 1 + + when: + cursor.close() + + then: + 1 * connectionB.commandAsync(*_) >> { + connectionB.getCount() == 1 + connectionSource.getCount() == 1 + it.last().onResult(null, null) + } + + then: + connectionA.getCount() == 0 + connectionB.getCount() == 0 + initialConnection.getCount() == 0 + connectionSource.getCount() == 0 + + where: + exception << [COMMAND_EXCEPTION, MONGO_EXCEPTION] + } + + List nextBatch(AsyncCommandBatchCursor cursor) { + def futureResultCallback = new FutureResultCallback() + cursor.next(futureResultCallback) + futureResultCallback.get() + } + + private static final MongoNamespace NAMESPACE = new MongoNamespace('db', 'coll') + private static final ServerAddress SERVER_ADDRESS = new ServerAddress() + private static final CURSOR_ID = 42 + private static final FIRST_BATCH = [new Document('_id', 1), new Document('_id', 2)] + private static final SECOND_BATCH = [new Document('_id', 3), new Document('_id', 4)] + private static final CODEC = new DocumentCodec() + private static final MONGO_EXCEPTION = new MongoException('error') + private static final COMMAND_EXCEPTION = new MongoCommandException(BsonDocument.parse('{"ok": false, "errmsg": "error"}'), + SERVER_ADDRESS) + + private static BsonDocument getMoreResponse(results, cursorId = CURSOR_ID) { + createCommandResult(results, cursorId, "nextBatch") + } + + private static BsonDocument createCommandResult(List results = FIRST_BATCH, Long cursorId = CURSOR_ID, + String fieldNameContainingBatch = "firstBatch") { + new BsonDocument("ok", new BsonInt32(1)) + .append("cursor", + new BsonDocument("ns", new BsonString(NAMESPACE.fullName)) + .append("id", new BsonInt64(cursorId)) + .append(fieldNameContainingBatch, new BsonArrayWrapper(results))) + } + + private static BsonDocument createKillCursorsDocument(ServerCursor serverCursor) { + new BsonDocument('killCursors', new BsonString(NAMESPACE.getCollectionName())) + .append('cursors', new BsonArray(Collections.singletonList(new BsonInt64(serverCursor.id)))) + } + + AsyncConnection referenceCountedAsyncConnection() { + referenceCountedAsyncConnection(new ServerVersion([3, 6, 0])) + } + + AsyncConnection referenceCountedAsyncConnection(ServerVersion serverVersion, String name = 'connection', + ServerType serverType = ServerType.STANDALONE) { + def released = false + def counter = 0 + def mock = Mock(AsyncConnection, name: name) { + _ * getDescription() >> Stub(ConnectionDescription) { + getMaxWireVersion() >> getMaxWireVersionForServerVersion(serverVersion.getVersionList()) + getServerAddress() >> SERVER_ADDRESS + getServerType() >> serverType + } + } + mock.retain() >> { + if (released) { + throw new IllegalStateException('Tried to retain AsyncConnection when already released') + } else { + counter += 1 + } + mock + } + mock.release() >> { + counter -= 1 + if (counter == 0) { + released = true + } else if (counter < 0) { + throw new IllegalStateException('Tried to release AsyncConnection below 0') + } + counter + } + mock.getCount() >> { counter } + mock + } + + AsyncConnectionSource getAsyncConnectionSource(AsyncConnection... connections) { + getAsyncConnectionSource(ServerType.STANDALONE, connections) + } + + AsyncConnectionSource getAsyncConnectionSource(ServerType serverType, AsyncConnection... connections) { + def index = -1 + getAsyncConnectionSourceWithResult(serverType) { index += 1; [connections.toList().get(index).retain(), null] } + } + + def getAsyncConnectionSourceWithResult(ServerType serverType, Closure connectionCallbackResults) { + def released = false + int counter = 0 + def mock = Mock(AsyncConnectionSource) + mock.getServerDescription() >> { + ServerDescription.builder() + .address(new ServerAddress()) + .type(serverType) + .state(ServerConnectionState.CONNECTED) + .build() + } + OperationContext operationContext = Mock(OperationContext) + def timeoutContext = Spy(new TimeoutContext(TimeoutSettings.create( + MongoClientSettings.builder().timeout(3, TimeUnit.SECONDS).build()))) + operationContext.getTimeoutContext() >> timeoutContext + mock.getOperationContext() >> operationContext + mock.getConnection(_) >> { + if (counter == 0) { + throw new IllegalStateException('Tried to use released AsyncConnectionSource') + } + def (result, error) = connectionCallbackResults() + it[0].onResult(result, error) + } + mock.retain() >> { + if (released) { + throw new IllegalStateException('Tried to retain AsyncConnectionSource when already released') + } else { + counter += 1 + } + mock + } + mock.release() >> { + counter -= 1 + if (counter == 0) { + released = true + } else if (counter < 0) { + throw new IllegalStateException('Tried to release AsyncConnectionSource below 0') + } + counter + } + mock.getCount() >> { counter } + mock + } +} diff --git a/driver-core/src/test/unit/com/mongodb/internal/operation/AsyncOperationHelperSpecification.groovy b/driver-core/src/test/unit/com/mongodb/internal/operation/AsyncOperationHelperSpecification.groovy new file mode 100644 index 00000000000..ba69097cffa --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/internal/operation/AsyncOperationHelperSpecification.groovy @@ -0,0 +1,160 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.operation + +import com.mongodb.MongoWriteConcernException +import com.mongodb.ReadConcern +import com.mongodb.ReadPreference +import com.mongodb.connection.ConnectionDescription +import com.mongodb.connection.ServerDescription +import com.mongodb.connection.ServerType +import com.mongodb.internal.async.SingleResultCallback +import com.mongodb.internal.binding.AsyncConnectionSource +import com.mongodb.internal.binding.AsyncReadBinding +import com.mongodb.internal.binding.AsyncWriteBinding +import com.mongodb.internal.connection.AsyncConnection +import com.mongodb.internal.session.SessionContext +import com.mongodb.internal.validator.NoOpFieldNameValidator +import org.bson.BsonDocument +import org.bson.BsonNull +import org.bson.codecs.BsonDocumentCodec +import org.bson.codecs.Decoder +import spock.lang.Specification + +import static com.mongodb.ClusterFixture.OPERATION_CONTEXT +import static com.mongodb.ReadPreference.primary +import static com.mongodb.internal.operation.AsyncOperationHelper.CommandReadTransformerAsync +import static com.mongodb.internal.operation.AsyncOperationHelper.executeCommandAsync +import static com.mongodb.internal.operation.AsyncOperationHelper.executeRetryableReadAsync +import static com.mongodb.internal.operation.AsyncOperationHelper.executeRetryableWriteAsync +import static com.mongodb.internal.operation.OperationUnitSpecification.getMaxWireVersionForServerVersion + +class AsyncOperationHelperSpecification extends Specification { + + def 'should retry with retryable exception async'() { + given: + def dbName = 'db' + def command = BsonDocument.parse('''{findAndModify: "coll", query: {a: 1}, new: false, update: {$inc: {a :1}}, txnNumber: 1}''') + def serverDescription = Stub(ServerDescription) + def connectionDescription = Stub(ConnectionDescription) { + getMaxWireVersion() >> getMaxWireVersionForServerVersion([4, 0, 0]) + getServerType() >> ServerType.REPLICA_SET_PRIMARY + } + def commandCreator = { csot, serverDesc, connectionDesc -> command } + def callback = new SingleResultCallback() { + def result + def throwable + @Override + void onResult(final Object result, final Throwable t) { + this.result = result + this.throwable = t + } + } + def decoder = new BsonDocumentCodec() + def results = [ + BsonDocument.parse('{ok: 1.0, writeConcernError: {code: 91, errmsg: "Replication is being shut down"}}'), + BsonDocument.parse('{ok: 1.0, writeConcernError: {code: -1, errmsg: "UnknownError"}}')] as Queue + + def connection = Mock(AsyncConnection) { + _ * getDescription() >> connectionDescription + } + + def operationContext = OPERATION_CONTEXT.withSessionContext( + Stub(SessionContext) { + hasSession() >> true + hasActiveTransaction() >> false + getReadConcern() >> ReadConcern.DEFAULT + }) + def connectionSource = Stub(AsyncConnectionSource) { + getConnection(_) >> { it[0].onResult(connection, null) } + getServerDescription() >> serverDescription + getOperationContext() >> operationContext + } + def asyncWriteBinding = Stub(AsyncWriteBinding) { + getWriteConnectionSource(_) >> { it[0].onResult(connectionSource, null) } + getOperationContext() >> operationContext + } + + when: + executeRetryableWriteAsync(asyncWriteBinding, dbName, primary(), + NoOpFieldNameValidator.INSTANCE, decoder, commandCreator, FindAndModifyHelper.asyncTransformer(), + { cmd -> cmd }, callback) + + then: + 2 * connection.commandAsync(dbName, command, _, primary(), decoder, *_) >> { it.last().onResult(results.poll(), null) } + + then: + callback.throwable instanceof MongoWriteConcernException + callback.throwable.writeConcernError.code == -1 + } + + def 'should set read preference to primary when using AsyncWriteBinding'() { + given: + def dbName = 'db' + def command = new BsonDocument() + def callback = Stub(SingleResultCallback) + def connection = Mock(AsyncConnection) + def connectionSource = Stub(AsyncConnectionSource) { + getConnection(_) >> { it[0].onResult(connection, null) } + } + def asyncWriteBinding = Stub(AsyncWriteBinding) { + getWriteConnectionSource(_) >> { it[0].onResult(connectionSource, null) } + } + def connectionDescription = Stub(ConnectionDescription) + + when: + executeCommandAsync(asyncWriteBinding, dbName, command, connection, { t, conn -> t }, callback) + + then: + _ * connection.getDescription() >> connectionDescription + 1 * connection.commandAsync(dbName, command, _, primary(), *_) >> { it.last().onResult(1, null) } +// 1 * connection.release() + } + + def 'should use the AsyncConnectionSource readPreference'() { + given: + def dbName = 'db' + def command = new BsonDocument('fakeCommandName', BsonNull.VALUE) + def commandCreator = { csot, serverDescription, connectionDescription -> command } + def decoder = Stub(Decoder) + def callback = Stub(SingleResultCallback) + def function = Stub(CommandReadTransformerAsync) + def connection = Mock(AsyncConnection) + def connectionSource = Stub(AsyncConnectionSource) { + getOperationContext() >> OPERATION_CONTEXT + getConnection(_) >> { it[0].onResult(connection, null) } + getReadPreference() >> readPreference + } + def asyncReadBinding = Stub(AsyncReadBinding) { + getOperationContext() >> OPERATION_CONTEXT + getReadConnectionSource(_) >> { it[0].onResult(connectionSource, null) } + } + def connectionDescription = Stub(ConnectionDescription) + + when: + executeRetryableReadAsync(asyncReadBinding, dbName, commandCreator, decoder, function, false, callback) + + then: + _ * connection.getDescription() >> connectionDescription + 1 * connection.commandAsync(dbName, command, _, readPreference, decoder, *_) >> { it.last().onResult(1, null) } + 1 * connection.release() + + where: + readPreference << [primary(), ReadPreference.secondary()] + } + +} diff --git a/driver-core/src/test/unit/com/mongodb/internal/operation/AsyncSingleBatchCursorTest.java b/driver-core/src/test/unit/com/mongodb/internal/operation/AsyncSingleBatchCursorTest.java new file mode 100644 index 00000000000..561a4cf9f31 --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/internal/operation/AsyncSingleBatchCursorTest.java @@ -0,0 +1,84 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.operation; + +import com.mongodb.MongoException; +import com.mongodb.async.FutureResultCallback; +import org.bson.Document; +import org.junit.jupiter.api.DisplayName; +import org.junit.jupiter.api.Test; + +import java.util.List; +import java.util.concurrent.TimeUnit; + +import static com.mongodb.ClusterFixture.TIMEOUT; +import static com.mongodb.internal.operation.AsyncSingleBatchCursor.createEmptyAsyncSingleBatchCursor; +import static java.util.Arrays.asList; +import static java.util.Collections.emptyList; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertIterableEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; + + +class AsyncSingleBatchCursorTest { + + private static final List SINGLE_BATCH = asList(new Document("a", 1), new Document("b", 2)); + + @Test + @DisplayName("should work as expected") + void shouldWorkAsExpected() { + try (AsyncSingleBatchCursor cursor = new AsyncSingleBatchCursor<>(SINGLE_BATCH, 0)) { + + assertIterableEquals(SINGLE_BATCH, nextBatch(cursor)); + assertIterableEquals(emptyList(), nextBatch(cursor)); + assertTrue(cursor.isClosed()); + + assertThrows(MongoException.class, () -> nextBatch(cursor)); + } + } + + @Test + @DisplayName("should work as expected emptyCursor") + void shouldWorkAsExpectedEmptyCursor() { + try (AsyncSingleBatchCursor cursor = createEmptyAsyncSingleBatchCursor(0)) { + assertIterableEquals(emptyList(), nextBatch(cursor)); + assertTrue(cursor.isClosed()); + + assertThrows(MongoException.class, () -> nextBatch(cursor)); + } + } + + @Test + @DisplayName("should not support setting batch size") + void shouldNotSupportSettingBatchSize() { + try (AsyncSingleBatchCursor cursor = new AsyncSingleBatchCursor<>(SINGLE_BATCH, 0)) { + + assertEquals(0, cursor.getBatchSize()); + + cursor.setBatchSize(1); + assertEquals(0, cursor.getBatchSize()); + } + } + + List nextBatch(final AsyncSingleBatchCursor cursor) { + FutureResultCallback> futureResultCallback = new FutureResultCallback<>(); + cursor.next(futureResultCallback); + return futureResultCallback.get(TIMEOUT, TimeUnit.MILLISECONDS); + } + +} diff --git a/driver-core/src/test/unit/com/mongodb/internal/operation/BulkWriteBatchSpecification.groovy b/driver-core/src/test/unit/com/mongodb/internal/operation/BulkWriteBatchSpecification.groovy new file mode 100644 index 00000000000..2ccd3513cf7 --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/internal/operation/BulkWriteBatchSpecification.groovy @@ -0,0 +1,394 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.operation + +import com.mongodb.MongoBulkWriteException +import com.mongodb.MongoNamespace +import com.mongodb.ReadConcern +import com.mongodb.ServerAddress +import com.mongodb.WriteConcern +import com.mongodb.bulk.BulkWriteInsert +import com.mongodb.bulk.BulkWriteResult +import com.mongodb.bulk.BulkWriteUpsert +import com.mongodb.client.model.Collation +import com.mongodb.connection.ClusterId +import com.mongodb.connection.ConnectionDescription +import com.mongodb.connection.ConnectionId +import com.mongodb.connection.ServerDescription +import com.mongodb.connection.ServerId +import com.mongodb.connection.ServerType +import com.mongodb.internal.IgnorableRequestContext +import com.mongodb.internal.TimeoutContext +import com.mongodb.internal.TimeoutSettings +import com.mongodb.internal.bulk.DeleteRequest +import com.mongodb.internal.bulk.InsertRequest +import com.mongodb.internal.bulk.UpdateRequest +import com.mongodb.internal.bulk.WriteRequest +import com.mongodb.internal.connection.OperationContext +import com.mongodb.internal.connection.ReadConcernAwareNoOpSessionContext +import org.bson.BsonDocument +import org.bson.BsonInt32 +import spock.lang.Specification + +import static com.mongodb.connection.ServerConnectionState.CONNECTED +import static com.mongodb.internal.bulk.WriteRequest.Type.REPLACE +import static com.mongodb.internal.bulk.WriteRequest.Type.UPDATE + +class BulkWriteBatchSpecification extends Specification { + private static final TimeoutContext TIMEOUT_CONTEXT = new TimeoutContext(new TimeoutSettings(0, 0, 0, 0, 0)) + def namespace = new MongoNamespace('db.coll') + def serverDescription = ServerDescription.builder().address(new ServerAddress()).state(CONNECTED) + .logicalSessionTimeoutMinutes(30) + .build() + def connectionDescription = new ConnectionDescription( + new ConnectionId(new ServerId(new ClusterId(), serverDescription.getAddress())), 6, + ServerType.REPLICA_SET_PRIMARY, 1000, 16000, 48000, []) + def sessionContext = new ReadConcernAwareNoOpSessionContext(ReadConcern.DEFAULT) + def operationContext = new OperationContext(IgnorableRequestContext.INSTANCE, sessionContext, TIMEOUT_CONTEXT, null) + + def 'should split payloads by type when ordered'() { + when: + def bulkWriteBatch = BulkWriteBatch.createBulkWriteBatch(namespace, connectionDescription, true, + WriteConcern.ACKNOWLEDGED, null, false, getWriteRequests(), operationContext, null, null) + def payload = bulkWriteBatch.getPayload() + payload.setPosition(payload.size()) + + then: + payload.getPayloadName() == 'documents' + payload.getPayload() == getWriteRequestsAsDocuments()[0..0] + bulkWriteBatch.getCommand() == toBsonDocument('{ "insert" : "coll", "ordered" : true }') + bulkWriteBatch.hasAnotherBatch() + + when: + bulkWriteBatch = bulkWriteBatch.getNextBatch() + payload = bulkWriteBatch.getPayload() + payload.setPosition(payload.size()) + + then: + payload.getPayloadName() == 'updates' + payload.getPayload() == getWriteRequestsAsDocuments()[1..2] + bulkWriteBatch.getCommand() == toBsonDocument('{ "update" : "coll", "ordered" : true }') + bulkWriteBatch.hasAnotherBatch() + + when: + bulkWriteBatch = bulkWriteBatch.getNextBatch() + payload = bulkWriteBatch.getPayload() + payload.setPosition(payload.size()) + + then: + payload.getPayloadName() == 'documents' + payload.getPayload() == getWriteRequestsAsDocuments()[3..4] + bulkWriteBatch.getCommand() == toBsonDocument('{ "insert" : "coll", "ordered" : true }') + bulkWriteBatch.hasAnotherBatch() + + when: + bulkWriteBatch = bulkWriteBatch.getNextBatch() + payload = bulkWriteBatch.getPayload() + payload.setPosition(payload.size()) + + then: + payload.getPayloadName() == 'updates' + payload.getPayload() == getWriteRequestsAsDocuments()[5..5] + bulkWriteBatch.getCommand() == toBsonDocument('{ "update" : "coll", "ordered" : true }') + bulkWriteBatch.hasAnotherBatch() + + when: + bulkWriteBatch = bulkWriteBatch.getNextBatch() + payload = bulkWriteBatch.getPayload() + payload.setPosition(payload.size()) + + then: + payload.getPayloadName() == 'deletes' + payload.getPayload() == getWriteRequestsAsDocuments()[6..7] + bulkWriteBatch.getCommand() == toBsonDocument('{ "delete" : "coll", "ordered" : true }') + bulkWriteBatch.hasAnotherBatch() + + when: + bulkWriteBatch = bulkWriteBatch.getNextBatch() + payload = bulkWriteBatch.getPayload() + payload.setPosition(payload.size()) + + then: + payload.getPayloadName() == 'documents' + payload.getPayload() == getWriteRequestsAsDocuments()[8..8] + bulkWriteBatch.getCommand() == toBsonDocument('{ "insert" : "coll", "ordered" : true }') + bulkWriteBatch.hasAnotherBatch() + + when: + bulkWriteBatch = bulkWriteBatch.getNextBatch() + payload = bulkWriteBatch.getPayload() + payload.setPosition(payload.size()) + + then: + payload.getPayloadName() == 'deletes' + payload.getPayload() == getWriteRequestsAsDocuments()[9..9] + bulkWriteBatch.getCommand() == toBsonDocument('{ "delete" : "coll", "ordered" : true }') + !bulkWriteBatch.hasAnotherBatch() + } + + def 'should group payloads by type when unordered'() { + when: + def bulkWriteBatch = BulkWriteBatch.createBulkWriteBatch(namespace, connectionDescription, false, + WriteConcern.MAJORITY, true, false, getWriteRequests(), operationContext, null, null) + def payload = bulkWriteBatch.getPayload() + payload.setPosition(payload.size()) + + then: + payload.getPayloadName() == 'documents' + payload.getPayload() == [getWriteRequestsAsDocuments()[0], getWriteRequestsAsDocuments()[3], getWriteRequestsAsDocuments()[4], + getWriteRequestsAsDocuments()[8]] + bulkWriteBatch.hasAnotherBatch() + bulkWriteBatch.getCommand() == toBsonDocument('''{"insert": "coll", "ordered": false, + "writeConcern": {"w" : "majority"}, "bypassDocumentValidation" : true }''') + + when: + bulkWriteBatch = bulkWriteBatch.getNextBatch() + payload = bulkWriteBatch.getPayload() + payload.setPosition(payload.size()) + + then: + payload.getPayloadName() == 'updates' + payload.getPayload() == [getWriteRequestsAsDocuments()[1], getWriteRequestsAsDocuments()[2]] + bulkWriteBatch.hasAnotherBatch() + bulkWriteBatch.getCommand() == toBsonDocument('''{"update": "coll", "ordered": false, + "writeConcern": {"w" : "majority"}, "bypassDocumentValidation" : true }''') + + when: + bulkWriteBatch = bulkWriteBatch.getNextBatch() + payload = bulkWriteBatch.getPayload() + payload.setPosition(payload.size()) + + then: + payload.getPayloadName() == 'updates' + payload.getPayload() == [getWriteRequestsAsDocuments()[5]] + bulkWriteBatch.hasAnotherBatch() + bulkWriteBatch.getCommand() == toBsonDocument('''{"update": "coll", "ordered": false, + "writeConcern": {"w" : "majority"}, "bypassDocumentValidation" : true }''') + + when: + bulkWriteBatch = bulkWriteBatch.getNextBatch() + payload = bulkWriteBatch.getPayload() + payload.setPosition(payload.size()) + + then: + payload.getPayloadName() == 'deletes' + payload.getPayload() == [getWriteRequestsAsDocuments()[6], getWriteRequestsAsDocuments()[7], getWriteRequestsAsDocuments()[9]] + !bulkWriteBatch.hasAnotherBatch() + bulkWriteBatch.getCommand() == toBsonDocument('''{"delete": "coll", "ordered": false, + "writeConcern": {"w" : "majority"}, "bypassDocumentValidation" : true }''') + } + + def 'should split payloads if only payload partially processed'() { + when: + def bulkWriteBatch = BulkWriteBatch.createBulkWriteBatch(namespace, connectionDescription, false, + WriteConcern.ACKNOWLEDGED, null, false, getWriteRequests()[0..3], operationContext, null, null) + def payload = bulkWriteBatch.getPayload() + payload.setPosition(1) + + then: + payload.getPayloadName() == 'documents' + payload.getPayload() == [getWriteRequestsAsDocuments()[0], getWriteRequestsAsDocuments()[3]] + bulkWriteBatch.hasAnotherBatch() + + when: + bulkWriteBatch = bulkWriteBatch.getNextBatch() + payload = bulkWriteBatch.getPayload() + payload.setPosition(1) + + then: + payload.getPayloadName() == 'documents' + payload.getPayload() == [getWriteRequestsAsDocuments()[3]] + bulkWriteBatch.hasAnotherBatch() + + when: + bulkWriteBatch = bulkWriteBatch.getNextBatch() + payload = bulkWriteBatch.getPayload() + payload.setPosition(1) + + then: + payload.getPayloadName() == 'updates' + payload.getPayload() == [getWriteRequestsAsDocuments()[1], getWriteRequestsAsDocuments()[2]] + !bulkWriteBatch.hasAnotherBatch() + + when: + bulkWriteBatch = bulkWriteBatch.getNextBatch() + payload = bulkWriteBatch.getPayload() + payload.setPosition(1) + + then: + payload.getPayloadName() == 'updates' + payload.getPayload() == [getWriteRequestsAsDocuments()[2]] + !bulkWriteBatch.hasAnotherBatch() + } + + def 'should map all inserted ids'() { + when: + def bulkWriteBatch = BulkWriteBatch.createBulkWriteBatch(namespace, connectionDescription, false, + WriteConcern.ACKNOWLEDGED, null, false, + [new InsertRequest(toBsonDocument('{_id: 0}')), + new InsertRequest(toBsonDocument('{_id: 1}')), + new InsertRequest(toBsonDocument('{_id: 2}')) + ], + operationContext, null, null) + def payload = bulkWriteBatch.getPayload() + payload.setPosition(1) + payload.insertedIds.put(0, new BsonInt32(0)) + bulkWriteBatch.addResult(BsonDocument.parse('{"n": 1, "ok": 1.0}')) + + then: + bulkWriteBatch.getResult().inserts == [new BulkWriteInsert(0, new BsonInt32(0))] + + when: + bulkWriteBatch = bulkWriteBatch.getNextBatch() + payload = bulkWriteBatch.getPayload() + payload.setPosition(1) + payload.insertedIds.put(1, new BsonInt32(1)) + bulkWriteBatch.addResult(BsonDocument.parse('{"n": 1, "ok": 1.0}')) + + then: + bulkWriteBatch.getResult().inserts == [new BulkWriteInsert(0, new BsonInt32(0)), + new BulkWriteInsert(1, new BsonInt32(1))] + + when: + bulkWriteBatch = bulkWriteBatch.getNextBatch() + payload = bulkWriteBatch.getPayload() + payload.setPosition(1) + payload.insertedIds.put(2, new BsonInt32(2)) + bulkWriteBatch.addResult(BsonDocument.parse('{"n": 1, "ok": 1.0}')) + + then: + bulkWriteBatch.getResult().inserts == [new BulkWriteInsert(0, new BsonInt32(0)), + new BulkWriteInsert(1, new BsonInt32(1)), + new BulkWriteInsert(2, new BsonInt32(2))] + } + + def 'should not map inserted id with a write error'() { + given: + def bulkWriteBatch = BulkWriteBatch.createBulkWriteBatch(namespace, connectionDescription, false, + WriteConcern.ACKNOWLEDGED, null, false, + [new InsertRequest(toBsonDocument('{_id: 0}')), + new InsertRequest(toBsonDocument('{_id: 1}')), + new InsertRequest(toBsonDocument('{_id: 2}')) + ], + operationContext, null, null) + def payload = bulkWriteBatch.getPayload() + payload.setPosition(3) + payload.insertedIds.put(0, new BsonInt32(0)) + payload.insertedIds.put(1, new BsonInt32(1)) + payload.insertedIds.put(2, new BsonInt32(2)) + + when: + bulkWriteBatch.addResult(toBsonDocument('''{"ok": 1, "n": 2, + "writeErrors": [{ "index" : 1, "code" : 11000, "errmsg": "duplicate key error"}] }''')) + bulkWriteBatch.getResult() + + then: + def ex = thrown(MongoBulkWriteException) + ex.getWriteResult().inserts == [new BulkWriteInsert(0, new BsonInt32(0)), + new BulkWriteInsert(2, new BsonInt32(2))] + } + + def 'should not retry when at least one write is not retryable'() { + when: + def bulkWriteBatch = BulkWriteBatch.createBulkWriteBatch(namespace, connectionDescription, false, + WriteConcern.ACKNOWLEDGED, null, true, + [new DeleteRequest(new BsonDocument()).multi(true), new InsertRequest(new BsonDocument())], operationContext, null, null) + + then: + !bulkWriteBatch.getRetryWrites() + } + + def 'should handle operation responses'() { + given: + def bulkWriteBatch = BulkWriteBatch.createBulkWriteBatch(namespace, connectionDescription, true, + WriteConcern.ACKNOWLEDGED, null, false, getWriteRequests()[1..1], operationContext, null, null) + def writeConcernError = toBsonDocument('{ok: 1, n: 1, upserted: [{_id: 2, index: 0}]}') + + when: + bulkWriteBatch.addResult(writeConcernError) + + then: + !bulkWriteBatch.hasErrors() + bulkWriteBatch.getResult() == BulkWriteResult.acknowledged(0, 0, 0, 0, [new BulkWriteUpsert(0, new BsonInt32(2))], []) + bulkWriteBatch.shouldProcessBatch() + } + + def 'should handle writeConcernError error responses'() { + given: + def bulkWriteBatch = BulkWriteBatch.createBulkWriteBatch(namespace, connectionDescription, true, + WriteConcern.ACKNOWLEDGED, null, false, getWriteRequests()[0..0], operationContext, null, null) + def writeConcernError = toBsonDocument('{n: 1, writeConcernError: {code: 75, errmsg: "wtimeout", errInfo: {wtimeout: "0"}}}') + + when: + bulkWriteBatch.addResult(writeConcernError) + + then: + bulkWriteBatch.hasErrors() + bulkWriteBatch.getError().getWriteErrors().isEmpty() + bulkWriteBatch.getError().getWriteConcernError() + bulkWriteBatch.shouldProcessBatch() + } + + def 'should handle writeErrors error responses'() { + given: + def bulkWriteBatch = BulkWriteBatch.createBulkWriteBatch(namespace, connectionDescription, true, + WriteConcern.ACKNOWLEDGED, null, false, getWriteRequests()[0..0], operationContext, null, null) + def writeError = toBsonDocument('''{"ok": 0, "n": 1, "code": 65, "errmsg": "bulk op errors", + "writeErrors": [{ "index" : 0, "code" : 100, "errmsg": "some error"}] }''') + + when: + bulkWriteBatch.addResult(writeError) + + then: + bulkWriteBatch.hasErrors() + bulkWriteBatch.getError().getWriteErrors().size() == 1 + !bulkWriteBatch.shouldProcessBatch() + } + + private static List getWriteRequests() { + [new InsertRequest(toBsonDocument('{_id: 1, x: 1}')), + new UpdateRequest(toBsonDocument('{ _id: 2}'), toBsonDocument('{$set: {x : 2}}'), UPDATE).upsert(true), + new UpdateRequest(toBsonDocument('{ _id: 3}'), toBsonDocument('{$set: {x : 3}}'), UPDATE), + new InsertRequest(toBsonDocument('{_id: 4, x: 4}')), + new InsertRequest(toBsonDocument('{_id: 5, x: 5}')), + new UpdateRequest(toBsonDocument('{ _id: 6}'), toBsonDocument('{_id: 6, x: 6}'), REPLACE) + .collation(Collation.builder().locale('en').build()), + new DeleteRequest(toBsonDocument('{_id: 7}')), + new DeleteRequest(toBsonDocument('{_id: 8}')), + new InsertRequest(toBsonDocument('{_id: 9, x: 9}')), + new DeleteRequest(toBsonDocument('{_id: 10}')).collation(Collation.builder().locale('de').build()) + ] + } + + private static List getWriteRequestsAsDocuments() { + ['{_id: 1, x: 1}', + '{"q": { "_id" : 2}, "u": { "$set": {"x": 2}}, "multi": true, "upsert": true }', + '{"q": { "_id" : 3}, "u": { "$set": {"x": 3}}, "multi": true}', + '{"_id": 4, "x": 4}', + '{"_id": 5, "x": 5}', + '{"q": { "_id" : 6 }, "u": { "_id": 6, "x": 6 }, "collation": { "locale": "en" }}', + '{"q": { "_id" : 7 }, "limit": 0 }', + '{"q": { "_id" : 8 }, "limit": 0 }', + '{"_id": 9, "x": 9}', + '{"q": { "_id" : 10 }, "limit": 0, "collation" : { "locale" : "de" }}' + ].collect { toBsonDocument(it) } + } + + private static BsonDocument toBsonDocument(final String json) { + BsonDocument.parse(json) + } +} diff --git a/driver-core/src/test/unit/com/mongodb/internal/operation/ChangeStreamBatchCursorHelperTest.java b/driver-core/src/test/unit/com/mongodb/internal/operation/ChangeStreamBatchCursorHelperTest.java new file mode 100644 index 00000000000..1ddfbc0277b --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/internal/operation/ChangeStreamBatchCursorHelperTest.java @@ -0,0 +1,79 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.operation; + +import com.mongodb.MongoChangeStreamException; +import com.mongodb.MongoClientException; +import com.mongodb.MongoCommandException; +import com.mongodb.MongoCursorNotFoundException; +import com.mongodb.MongoInterruptedException; +import com.mongodb.MongoNotPrimaryException; +import com.mongodb.MongoServerUnavailableException; +import com.mongodb.MongoSocketException; +import com.mongodb.MongoSocketReadTimeoutException; +import com.mongodb.ServerAddress; +import org.bson.BsonArray; +import org.bson.BsonDocument; +import org.bson.BsonInt32; +import org.bson.BsonString; +import org.junit.jupiter.api.Test; + +import java.io.IOException; + +import static com.mongodb.internal.operation.ChangeStreamBatchCursorHelper.RESUMABLE_CHANGE_STREAM_ERROR_LABEL; +import static com.mongodb.internal.operation.ChangeStreamBatchCursorHelper.RETRYABLE_SERVER_ERROR_CODES; +import static com.mongodb.internal.operation.ChangeStreamBatchCursorHelper.isResumableError; +import static com.mongodb.internal.operation.ServerVersionHelper.FOUR_DOT_FOUR_WIRE_VERSION; +import static com.mongodb.internal.operation.ServerVersionHelper.FOUR_DOT_TWO_WIRE_VERSION; +import static java.util.Collections.singletonList; +import static org.junit.jupiter.api.Assertions.assertAll; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertTrue; + +public class ChangeStreamBatchCursorHelperTest { + @Test + public void testIsResumableError() { + assertAll( + () -> assertFalse(isResumableError(new IllegalStateException(), FOUR_DOT_FOUR_WIRE_VERSION)), + () -> assertFalse(isResumableError(new MongoChangeStreamException(""), FOUR_DOT_FOUR_WIRE_VERSION)), + () -> assertFalse(isResumableError(new MongoInterruptedException("", new InterruptedException()), + FOUR_DOT_FOUR_WIRE_VERSION)), + + () -> assertTrue(isResumableError(new MongoNotPrimaryException(new BsonDocument(), new ServerAddress()), + FOUR_DOT_FOUR_WIRE_VERSION)), + () -> assertTrue(isResumableError(new MongoCursorNotFoundException(1L, new BsonDocument("ok", new BsonInt32(0)) + .append("code", new BsonInt32(43)), new ServerAddress()), FOUR_DOT_FOUR_WIRE_VERSION)), + () -> assertTrue(isResumableError(new MongoSocketException("", new ServerAddress()), FOUR_DOT_FOUR_WIRE_VERSION)), + () -> assertTrue(isResumableError(new MongoSocketReadTimeoutException("", new ServerAddress(), new IOException()), + FOUR_DOT_FOUR_WIRE_VERSION)), + () -> assertTrue(isResumableError(new MongoClientException(""), FOUR_DOT_FOUR_WIRE_VERSION)), + () -> assertTrue(isResumableError(new MongoServerUnavailableException(""), FOUR_DOT_FOUR_WIRE_VERSION)), + + () -> assertTrue(isResumableError(new MongoCommandException(new BsonDocument("ok", new BsonInt32(0)) + .append("code", new BsonInt32(1000)) + .append("errorLabels", new BsonArray(singletonList(new BsonString(RESUMABLE_CHANGE_STREAM_ERROR_LABEL)))), + new ServerAddress()), FOUR_DOT_FOUR_WIRE_VERSION)), + () -> assertFalse(isResumableError(new MongoCommandException(new BsonDocument("ok", new BsonInt32(0)) + .append("code", new BsonInt32(RETRYABLE_SERVER_ERROR_CODES.get(0))), + new ServerAddress()), FOUR_DOT_FOUR_WIRE_VERSION)), + + () -> assertTrue(isResumableError(new MongoCommandException(new BsonDocument("ok", new BsonInt32(0)) + .append("code", new BsonInt32(RETRYABLE_SERVER_ERROR_CODES.get(0))), + new ServerAddress()), FOUR_DOT_TWO_WIRE_VERSION)) + ); + } +} diff --git a/driver-core/src/test/unit/com/mongodb/internal/operation/ChangeStreamBatchCursorSpecification.groovy b/driver-core/src/test/unit/com/mongodb/internal/operation/ChangeStreamBatchCursorSpecification.groovy new file mode 100644 index 00000000000..09c6ff221b6 --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/internal/operation/ChangeStreamBatchCursorSpecification.groovy @@ -0,0 +1,70 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.operation + +import com.mongodb.internal.binding.ReadBinding +import org.bson.BsonDocument +import org.bson.BsonInt32 +import spock.lang.Specification + +import static java.util.Collections.emptyList + +class ChangeStreamBatchCursorSpecification extends Specification { + + def 'should call the underlying CommandBatchCursor'() { + given: + def changeStreamOperation = Stub(ChangeStreamOperation) + def binding = Stub(ReadBinding) + def wrapped = Mock(CommandBatchCursor) + def resumeToken = new BsonDocument('_id': new BsonInt32(1)) + def cursor = new ChangeStreamBatchCursor(changeStreamOperation, wrapped, binding, resumeToken, + ServerVersionHelper.FOUR_DOT_FOUR_WIRE_VERSION) + + when: + cursor.setBatchSize(10) + + then: + 1 * wrapped.setBatchSize(10) + + when: + cursor.tryNext() + + then: + 1 * wrapped.tryNext() + 1 * wrapped.getPostBatchResumeToken() + + when: + cursor.next() + + then: + 1 * wrapped.next() >> emptyList() + 1 * wrapped.getPostBatchResumeToken() + + when: + cursor.close() + + then: + 1 * wrapped.close() + + when: + cursor.close() + + then: + 0 * wrapped.close() + } + +} diff --git a/driver-core/src/test/unit/com/mongodb/internal/operation/ChangeStreamBatchCursorTest.java b/driver-core/src/test/unit/com/mongodb/internal/operation/ChangeStreamBatchCursorTest.java new file mode 100644 index 00000000000..48c3a50e79a --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/internal/operation/ChangeStreamBatchCursorTest.java @@ -0,0 +1,332 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.internal.operation; + +import com.mongodb.MongoException; +import com.mongodb.MongoNotPrimaryException; +import com.mongodb.MongoOperationTimeoutException; +import com.mongodb.ServerAddress; +import com.mongodb.connection.ServerDescription; +import com.mongodb.internal.TimeoutContext; +import com.mongodb.internal.binding.ConnectionSource; +import com.mongodb.internal.binding.ReadBinding; +import com.mongodb.internal.connection.Connection; +import com.mongodb.internal.connection.OperationContext; +import org.bson.BsonDocument; +import org.bson.BsonInt32; +import org.bson.Document; +import org.bson.RawBsonDocument; +import org.bson.codecs.DocumentCodec; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.DisplayName; +import org.junit.jupiter.api.Test; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; + +import static com.mongodb.internal.operation.CommandBatchCursorHelper.MESSAGE_IF_CLOSED_AS_CURSOR; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.atLeastOnce; +import static org.mockito.Mockito.clearInvocations; +import static org.mockito.Mockito.doNothing; +import static org.mockito.Mockito.doReturn; +import static org.mockito.Mockito.doThrow; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.verifyNoInteractions; +import static org.mockito.Mockito.verifyNoMoreInteractions; +import static org.mockito.Mockito.when; + +final class ChangeStreamBatchCursorTest { + + private static final List RESULT_FROM_NEW_CURSOR = new ArrayList<>(); + private final int maxWireVersion = ServerVersionHelper.SIX_DOT_ZERO_WIRE_VERSION; + private ServerDescription serverDescription; + private TimeoutContext timeoutContext; + private OperationContext operationContext; + private Connection connection; + private ConnectionSource connectionSource; + private ReadBinding readBinding; + private BsonDocument resumeToken; + private CommandBatchCursor commandBatchCursor; + private CommandBatchCursor newCommandBatchCursor; + private ChangeStreamBatchCursor newChangeStreamCursor; + private ChangeStreamOperation changeStreamOperation; + + @Test + @DisplayName("should return result on next") + void shouldReturnResultOnNext() { + when(commandBatchCursor.next()).thenReturn(RESULT_FROM_NEW_CURSOR); + ChangeStreamBatchCursor cursor = createChangeStreamCursor(); + + //when + List next = cursor.next(); + + //then + assertEquals(RESULT_FROM_NEW_CURSOR, next); + verify(timeoutContext, times(1)).resetTimeoutIfPresent(); + verify(commandBatchCursor, times(1)).next(); + verify(commandBatchCursor, atLeastOnce()).getPostBatchResumeToken(); + verifyNoMoreInteractions(commandBatchCursor); + verify(changeStreamOperation, times(1)).getDecoder(); + verifyNoMoreInteractions(changeStreamOperation); + } + + @Test + @DisplayName("should throw timeout exception without resume attempt on next") + void shouldThrowTimeoutExceptionWithoutResumeAttemptOnNext() { + when(commandBatchCursor.next()).thenThrow(new MongoOperationTimeoutException("timeout")); + ChangeStreamBatchCursor cursor = createChangeStreamCursor(); + //when + assertThrows(MongoOperationTimeoutException.class, cursor::next); + + //then + verify(timeoutContext, times(1)).resetTimeoutIfPresent(); + verify(commandBatchCursor, times(1)).next(); + verify(commandBatchCursor, atLeastOnce()).getPostBatchResumeToken(); + verifyNoMoreInteractions(commandBatchCursor); + verifyNoResumeAttemptCalled(); + } + + @Test + @DisplayName("should perform resume attempt on next when resumable error is thrown") + void shouldPerformResumeAttemptOnNextWhenResumableErrorIsThrown() { + when(commandBatchCursor.next()).thenThrow(new MongoNotPrimaryException(new BsonDocument(), new ServerAddress())); + ChangeStreamBatchCursor cursor = createChangeStreamCursor(); + //when + List next = cursor.next(); + + //then + assertEquals(RESULT_FROM_NEW_CURSOR, next); + verify(timeoutContext, times(1)).resetTimeoutIfPresent(); + verify(commandBatchCursor, times(1)).next(); + verify(commandBatchCursor, atLeastOnce()).getPostBatchResumeToken(); + verifyResumeAttemptCalled(); + verify(changeStreamOperation, times(1)).getDecoder(); + verify(newCommandBatchCursor, times(1)).next(); + verify(newCommandBatchCursor, atLeastOnce()).getPostBatchResumeToken(); + verifyNoMoreInteractions(newCommandBatchCursor); + verifyNoMoreInteractions(changeStreamOperation); + } + + + @Test + @DisplayName("should resume only once on subsequent calls after timeout error") + void shouldResumeOnlyOnceOnSubsequentCallsAfterTimeoutError() { + when(commandBatchCursor.next()).thenThrow(new MongoOperationTimeoutException("timeout")); + ChangeStreamBatchCursor cursor = createChangeStreamCursor(); + //when + assertThrows(MongoOperationTimeoutException.class, cursor::next); + + //then + verify(timeoutContext, times(1)).resetTimeoutIfPresent(); + verify(commandBatchCursor, times(1)).next(); + verify(commandBatchCursor, atLeastOnce()).getPostBatchResumeToken(); + verifyNoMoreInteractions(commandBatchCursor); + verifyNoResumeAttemptCalled(); + clearInvocations(commandBatchCursor, newCommandBatchCursor, timeoutContext, changeStreamOperation, readBinding); + + //when seconds next is called. Resume is attempted. + List next = cursor.next(); + + //then + assertEquals(Collections.emptyList(), next); + verify(timeoutContext, times(1)).resetTimeoutIfPresent(); + verify(commandBatchCursor, times(1)).close(); + verifyNoMoreInteractions(commandBatchCursor); + verify(changeStreamOperation).setChangeStreamOptionsForResume(resumeToken, maxWireVersion); + verify(changeStreamOperation, times(1)).getDecoder(); + verify(changeStreamOperation, times(1)).execute(readBinding); + verifyNoMoreInteractions(changeStreamOperation); + verify(newCommandBatchCursor, times(1)).next(); + verify(newCommandBatchCursor, atLeastOnce()).getPostBatchResumeToken(); + clearInvocations(commandBatchCursor, newCommandBatchCursor, timeoutContext, changeStreamOperation, readBinding); + + //when third next is called. No resume is attempted. + List next2 = cursor.next(); + + //then + assertEquals(Collections.emptyList(), next2); + verifyNoInteractions(commandBatchCursor); + verify(timeoutContext, times(1)).resetTimeoutIfPresent(); + verify(newCommandBatchCursor, times(1)).next(); + verify(newCommandBatchCursor, atLeastOnce()).getPostBatchResumeToken(); + verifyNoMoreInteractions(newCommandBatchCursor); + verify(changeStreamOperation, times(1)).getDecoder(); + verifyNoMoreInteractions(changeStreamOperation); + verifyNoInteractions(readBinding); + verifyNoMoreInteractions(changeStreamOperation); + } + + @Test + @DisplayName("should propagate any errors occurred in aggregate operation during creating new change stream when previous next timed out") + void shouldPropagateAnyErrorsOccurredInAggregateOperation() { + when(commandBatchCursor.next()).thenThrow(new MongoOperationTimeoutException("timeout")); + MongoNotPrimaryException resumableError = new MongoNotPrimaryException(new BsonDocument(), new ServerAddress()); + when(changeStreamOperation.execute(readBinding)).thenThrow(resumableError); + + ChangeStreamBatchCursor cursor = createChangeStreamCursor(); + //when + assertThrows(MongoOperationTimeoutException.class, cursor::next); + clearInvocations(commandBatchCursor, newCommandBatchCursor, timeoutContext, changeStreamOperation, readBinding); + assertThrows(MongoNotPrimaryException.class, cursor::next); + + //then + verify(timeoutContext, times(1)).resetTimeoutIfPresent(); + verifyResumeAttemptCalled(); + verifyNoMoreInteractions(changeStreamOperation); + verifyNoInteractions(newCommandBatchCursor); + } + + + @Test + @DisplayName("should perform a resume attempt in subsequent next call when previous resume attempt in next timed out") + void shouldResumeAfterTimeoutInAggregateOnNextCall() { + //given + ChangeStreamBatchCursor cursor = createChangeStreamCursor(); + + //first next operation times out on getMore + when(commandBatchCursor.next()).thenThrow(new MongoOperationTimeoutException("timeout during next call")); + assertThrows(MongoOperationTimeoutException.class, cursor::next); + clearInvocations(commandBatchCursor, newCommandBatchCursor, timeoutContext, changeStreamOperation, readBinding); + + //second next operation times out on resume attempt when creating change stream + when(changeStreamOperation.execute(readBinding)).thenThrow(new MongoOperationTimeoutException("timeout during resumption")); + assertThrows(MongoOperationTimeoutException.class, cursor::next); + clearInvocations(commandBatchCursor, newCommandBatchCursor, timeoutContext, changeStreamOperation); + + doReturn(newChangeStreamCursor).when(changeStreamOperation).execute(readBinding); + + //when third operation succeeds to resume and call next + List next = cursor.next(); + + //then + assertEquals(RESULT_FROM_NEW_CURSOR, next); + verify(timeoutContext, times(1)).resetTimeoutIfPresent(); + + verifyResumeAttemptCalled(); + verify(changeStreamOperation, times(1)).getDecoder(); + verifyNoMoreInteractions(changeStreamOperation); + + verify(newCommandBatchCursor, times(1)).next(); + verify(newCommandBatchCursor, atLeastOnce()).getPostBatchResumeToken(); + verifyNoMoreInteractions(newCommandBatchCursor); + } + + @Test + @DisplayName("should close change stream when resume operation fails due to non-timeout error") + void shouldCloseChangeStreamWhenResumeOperationFailsDueToNonTimeoutError() { + //given + ChangeStreamBatchCursor cursor = createChangeStreamCursor(); + + //first next operation times out on getMore + when(commandBatchCursor.next()).thenThrow(new MongoOperationTimeoutException("timeout during next call")); + assertThrows(MongoOperationTimeoutException.class, cursor::next); + clearInvocations(commandBatchCursor, newCommandBatchCursor, timeoutContext, changeStreamOperation, readBinding); + + //when second next operation errors on resume attempt when creating change stream + when(changeStreamOperation.execute(readBinding)).thenThrow(new MongoNotPrimaryException(new BsonDocument(), new ServerAddress())); + assertThrows(MongoNotPrimaryException.class, cursor::next); + + //then + verify(timeoutContext, times(1)).resetTimeoutIfPresent(); + verifyResumeAttemptCalled(); + verifyNoMoreInteractions(changeStreamOperation); + verifyNoInteractions(newCommandBatchCursor); + clearInvocations(commandBatchCursor, newCommandBatchCursor, timeoutContext, changeStreamOperation, readBinding); + + + //when third next operation errors with cursor closed exception + doThrow(new IllegalStateException(MESSAGE_IF_CLOSED_AS_CURSOR)).when(commandBatchCursor).next(); + MongoException mongoException = assertThrows(MongoException.class, cursor::next); + + //then + assertEquals(MESSAGE_IF_CLOSED_AS_CURSOR, mongoException.getMessage()); + verify(timeoutContext, times(1)).resetTimeoutIfPresent(); + verifyNoResumeAttemptCalled(); + } + + private ChangeStreamBatchCursor createChangeStreamCursor() { + ChangeStreamBatchCursor cursor = + new ChangeStreamBatchCursor<>(changeStreamOperation, commandBatchCursor, readBinding, null, maxWireVersion); + clearInvocations(commandBatchCursor, newCommandBatchCursor, timeoutContext, changeStreamOperation, readBinding); + return cursor; + } + + private void verifyNoResumeAttemptCalled() { + verifyNoInteractions(changeStreamOperation); + verifyNoInteractions(newCommandBatchCursor); + verifyNoInteractions(readBinding); + } + + + private void verifyResumeAttemptCalled() { + verify(commandBatchCursor, times(1)).close(); + verify(changeStreamOperation).setChangeStreamOptionsForResume(resumeToken, maxWireVersion); + verify(changeStreamOperation, times(1)).execute(readBinding); + verifyNoMoreInteractions(commandBatchCursor); + } + + @BeforeEach + @SuppressWarnings("unchecked") + void setUp() { + resumeToken = new BsonDocument("_id", new BsonInt32(1)); + serverDescription = mock(ServerDescription.class); + when(serverDescription.getMaxWireVersion()).thenReturn(maxWireVersion); + + timeoutContext = mock(TimeoutContext.class); + when(timeoutContext.hasTimeoutMS()).thenReturn(true); + doNothing().when(timeoutContext).resetTimeoutIfPresent(); + + operationContext = mock(OperationContext.class); + when(operationContext.getTimeoutContext()).thenReturn(timeoutContext); + connection = mock(Connection.class); + when(connection.command(any(), any(), any(), any(), any(), any())).thenReturn(null); + connectionSource = mock(ConnectionSource.class); + when(connectionSource.getConnection()).thenReturn(connection); + when(connectionSource.release()).thenReturn(1); + when(connectionSource.getServerDescription()).thenReturn(serverDescription); + + readBinding = mock(ReadBinding.class); + when(readBinding.getOperationContext()).thenReturn(operationContext); + when(readBinding.retain()).thenReturn(readBinding); + when(readBinding.release()).thenReturn(1); + when(readBinding.getReadConnectionSource()).thenReturn(connectionSource); + + + commandBatchCursor = mock(CommandBatchCursor.class); + when(commandBatchCursor.getPostBatchResumeToken()).thenReturn(resumeToken); + doNothing().when(commandBatchCursor).close(); + + newCommandBatchCursor = mock(CommandBatchCursor.class); + when(newCommandBatchCursor.getPostBatchResumeToken()).thenReturn(resumeToken); + when(newCommandBatchCursor.next()).thenReturn(RESULT_FROM_NEW_CURSOR); + doNothing().when(newCommandBatchCursor).close(); + + newChangeStreamCursor = mock(ChangeStreamBatchCursor.class); + when(newChangeStreamCursor.getWrapped()).thenReturn(newCommandBatchCursor); + + changeStreamOperation = mock(ChangeStreamOperation.class); + when(changeStreamOperation.getDecoder()).thenReturn(new DocumentCodec()); + doNothing().when(changeStreamOperation).setChangeStreamOptionsForResume(resumeToken, maxWireVersion); + when(changeStreamOperation.execute(readBinding)).thenReturn(newChangeStreamCursor); + } + +} diff --git a/driver-core/src/test/unit/com/mongodb/internal/operation/CommandBatchCursorSpecification.groovy b/driver-core/src/test/unit/com/mongodb/internal/operation/CommandBatchCursorSpecification.groovy new file mode 100644 index 00000000000..c95a119134a --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/internal/operation/CommandBatchCursorSpecification.groovy @@ -0,0 +1,608 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.operation + +import com.mongodb.MongoClientSettings +import com.mongodb.MongoCommandException +import com.mongodb.MongoException +import com.mongodb.MongoNamespace +import com.mongodb.MongoSocketException +import com.mongodb.MongoSocketOpenException +import com.mongodb.ServerAddress +import com.mongodb.ServerCursor +import com.mongodb.client.cursor.TimeoutMode +import com.mongodb.connection.ConnectionDescription +import com.mongodb.connection.ServerConnectionState +import com.mongodb.connection.ServerDescription +import com.mongodb.connection.ServerType +import com.mongodb.connection.ServerVersion +import com.mongodb.internal.TimeoutContext +import com.mongodb.internal.TimeoutSettings +import com.mongodb.internal.binding.ConnectionSource +import com.mongodb.internal.connection.Connection +import com.mongodb.internal.connection.OperationContext +import org.bson.BsonArray +import org.bson.BsonDocument +import org.bson.BsonInt32 +import org.bson.BsonInt64 +import org.bson.BsonString +import org.bson.Document +import org.bson.codecs.DocumentCodec +import spock.lang.Specification + +import java.util.concurrent.TimeUnit + +import static com.mongodb.ReadPreference.primary +import static com.mongodb.internal.operation.CommandBatchCursorHelper.MESSAGE_IF_CLOSED_AS_CURSOR +import static com.mongodb.internal.operation.CommandBatchCursorHelper.MESSAGE_IF_CONCURRENT_OPERATION +import static com.mongodb.internal.operation.OperationUnitSpecification.getMaxWireVersionForServerVersion + +class CommandBatchCursorSpecification extends Specification { + + def 'should generate expected command with batchSize and maxTimeMS'() { + given: + def initialConnection = referenceCountedConnection() + def connection = referenceCountedConnection() + def connectionSource = getConnectionSource(connection) + def timeoutContext = connectionSource.getOperationContext().getTimeoutContext() + + def firstBatch = createCommandResult([]) + def expectedCommand = new BsonDocument('getMore': new BsonInt64(CURSOR_ID)) + .append('collection', new BsonString(NAMESPACE.getCollectionName())) + if (batchSize != 0) { + expectedCommand.append('batchSize', new BsonInt32(batchSize)) + } + + def reply = getMoreResponse([], 0) + + when: + def cursor = new CommandBatchCursor<>(TimeoutMode.CURSOR_LIFETIME, firstBatch, batchSize, maxTimeMS, CODEC, + null, connectionSource, initialConnection) + + then: + 1 * timeoutContext.setMaxTimeOverride(*_) + + when: + cursor.hasNext() + + then: + 1 * connection.command(NAMESPACE.getDatabaseName(), expectedCommand, *_) >> reply + + then: + !cursor.isClosed() + + when: + cursor.close() + + then: + connection.getCount() == 0 + initialConnection.getCount() == 0 + connectionSource.getCount() == 0 + + where: + batchSize | maxTimeMS | expectedMaxTimeFieldValue + 0 | 0 | null + 2 | 0 | null + 0 | 100 | 100 + } + + def 'should close the cursor'() { + given: + def initialConnection = referenceCountedConnection() + def serverVersion = new ServerVersion([3, 6, 0]) + def connection = referenceCountedConnection(serverVersion) + def connectionSource = getConnectionSource(connection) + def cursor = new CommandBatchCursor<>(TimeoutMode.CURSOR_LIFETIME, firstBatch, 0, 0, CODEC, + null, connectionSource, initialConnection) + + when: + cursor.close() + + then: + if (cursor.getServerCursor() != null) { + 1 * connection.command(NAMESPACE.databaseName, createKillCursorsDocument(cursor.getServerCursor()), _, primary(), *_) + } + + then: + connection.getCount() == 0 + initialConnection.getCount() == 0 + connectionSource.getCount() == 0 + + where: + firstBatch << [createCommandResult(FIRST_BATCH, 42), createCommandResult(FIRST_BATCH, 0)] + } + + def 'should return the expected results from next'() { + given: + def initialConnection = referenceCountedConnection() + def connection = referenceCountedConnection() + def connectionSource = getConnectionSource(connection) + + when: + def firstBatch = createCommandResult(FIRST_BATCH, 0) + def cursor = new CommandBatchCursor<>(TimeoutMode.CURSOR_LIFETIME, firstBatch, 0, 0, CODEC, + null, connectionSource, initialConnection) + + then: + cursor.next() == FIRST_BATCH + + then: + initialConnection.getCount() == 0 + connectionSource.getCount() == 0 + + then: + // Unlike the AsyncCommandBatchCursor - the cursor isn't automatically closed + !cursor.isClosed() + } + + def 'should handle getMore when there are empty results but there is a cursor'() { + given: + def initialConnection = referenceCountedConnection() + def connectionA = referenceCountedConnection(serverVersion, 'connectionA') + def connectionB = referenceCountedConnection(serverVersion, 'connectionB') + def connectionSource = getConnectionSource(connectionA, connectionB) + + when: + def firstBatch = createCommandResult([], CURSOR_ID) + def cursor = new CommandBatchCursor<>(TimeoutMode.CURSOR_LIFETIME, firstBatch, 0, 0, CODEC, + null, connectionSource, initialConnection) + def batch = cursor.next() + + then: + 1 * connectionA.command(*_) >> { + connectionA.getCount() == 1 + connectionSource.getCount() == 1 + response + } + + 1 * connectionB.command(*_) >> { + connectionB.getCount() == 1 + connectionSource.getCount() == 1 + response2 + } + + then: + batch == SECOND_BATCH + + then: + connectionA.getCount() == 0 + connectionB.getCount() == 0 + initialConnection.getCount() == 0 + connectionSource.getCount() == 0 + + when: + cursor.close() + + then: + 0 * connectionA._ + 0 * connectionB._ + initialConnection.getCount() == 0 + connectionSource.getCount() == 0 + + where: + serverVersion | response | response2 + new ServerVersion([3, 6, 0]) | getMoreResponse([]) | getMoreResponse(SECOND_BATCH, 0) + } + + def 'should close cursor after getMore finishes if cursor was closed while getMore was in progress and getMore returns a response'() { + given: + def serverVersion = new ServerVersion([3, 6, 0]) + def initialConnection = referenceCountedConnection(serverVersion, 'connectionOri', serverType) + def connectionA = referenceCountedConnection(serverVersion, 'connectionA', serverType) + def connectionB = referenceCountedConnection(serverVersion, 'connectionB', serverType) + def connectionSource = getConnectionSource(serverType, connectionA, connectionB) + + def firstConnection = serverType == ServerType.LOAD_BALANCER ? initialConnection : connectionA + def secondConnection = serverType == ServerType.LOAD_BALANCER ? initialConnection : connectionB + + def firstBatch = createCommandResult() + + when: + def cursor = new CommandBatchCursor<>(TimeoutMode.CURSOR_LIFETIME, firstBatch, 0, 0, CODEC, + null, connectionSource, initialConnection) + List batch = cursor.next() + + then: + batch == FIRST_BATCH + + when: + cursor.next() + + then: + // simulate the user calling `close` while `getMore` is in flight + // in LB mode the same connection is used to execute both `getMore` and `killCursors` + 1 * firstConnection.command(*_) >> { + // `getMore` command + cursor.close() + getMoreResponse([], responseCursorId) + } + + then: + if (responseCursorId > 0) { + 1 * secondConnection.command(*_) >> null + } + + then: + IllegalStateException e = thrown() + e.getMessage() == MESSAGE_IF_CLOSED_AS_CURSOR + + then: + connectionA.getCount() == 0 + connectionB.getCount() == 0 + initialConnection.getCount() == 0 + connectionSource.getCount() == 0 + cursor.isClosed() + + where: + serverType | responseCursorId + ServerType.LOAD_BALANCER | 42 + ServerType.LOAD_BALANCER | 0 + ServerType.STANDALONE | 42 + ServerType.STANDALONE | 0 + } + + def 'should throw concurrent operation illegal state exception'() { + given: + def serverVersion = new ServerVersion([3, 6, 0]) + def initialConnection = referenceCountedConnection(serverVersion, 'connectionOri') + def connectionA = referenceCountedConnection(serverVersion, 'connectionA') + def connectionB = referenceCountedConnection(serverVersion, 'connectionB') + def connectionSource = getConnectionSource(connectionA, connectionB) + + when: + def cursor = new CommandBatchCursor(TimeoutMode.CURSOR_LIFETIME, createCommandResult(FIRST_BATCH, 42), 0, 0, CODEC, + null, connectionSource, initialConnection) + def batch = cursor.next() + + then: + batch == FIRST_BATCH + + when: + cursor.next() + + then: + // simulate the user calling `cursor.next()` while `getMore` is in flight + 1 * connectionA.command(*_) >> { + // `getMore` command + cursor.next() + } + + then: + def exception = thrown(IllegalStateException) + exception.getMessage() == MESSAGE_IF_CONCURRENT_OPERATION + } + + def 'should close cursor after getMore finishes if cursor was closed while getMore was in progress and getMore throws exception'() { + given: + def serverVersion = new ServerVersion([4, 4, 0]) + def initialConnection = referenceCountedConnection(serverVersion, 'connectionOri', serverType) + def connectionA = referenceCountedConnection(serverVersion, 'connectionA', serverType) + def connectionB = referenceCountedConnection(serverVersion, 'connectionB', serverType) + def connectionSource = getConnectionSource(serverType, connectionA, connectionB) + + def firstConnection = serverType == ServerType.LOAD_BALANCER ? initialConnection : connectionA + def secondConnection = serverType == ServerType.LOAD_BALANCER ? initialConnection : connectionB + + def firstBatch = createCommandResult() + + when: + def cursor = new CommandBatchCursor<>(TimeoutMode.CURSOR_LIFETIME, firstBatch, 0, 0, CODEC, + null, connectionSource, initialConnection) + def batch = cursor.next() + + then: + batch == FIRST_BATCH + + when: + cursor.next() + + then: + 1 * firstConnection.command(*_) >> { + // Simulate the user calling close while the getMore is throwing a MongoException + cursor.close() + throw MONGO_EXCEPTION + } + + then: + 1 * secondConnection.command(*_) >> { + // `killCursors` command + null + } + + then: + thrown(MongoException) + + then: + connectionA.getCount() == 0 + cursor.isClosed() + + where: + serverType << [ServerType.LOAD_BALANCER, ServerType.STANDALONE] + } + + def 'should handle errors when calling close'() { + given: + def initialConnection = referenceCountedConnection() + def connectionSource = getConnectionSourceWithResult(ServerType.STANDALONE) { throw MONGO_EXCEPTION } + def firstBatch = createCommandResult() + def cursor = new CommandBatchCursor<>(TimeoutMode.CURSOR_LIFETIME, firstBatch, 0, 0, CODEC, + null, connectionSource, initialConnection) + + when: + cursor.close() + + then: + cursor.isClosed() + initialConnection.getCount() == 0 + connectionSource.getCount() == 0 + } + + + def 'should handle errors when getting a connection for getMore'() { + given: + def initialConnection = referenceCountedConnection() + def connection = referenceCountedConnection() + def connectionSource = getConnectionSourceWithResult(ServerType.STANDALONE) { throw MONGO_EXCEPTION } + + when: + def firstBatch = createCommandResult() + def cursor = new CommandBatchCursor<>(TimeoutMode.CURSOR_LIFETIME, firstBatch, 0, 0, CODEC, + null, connectionSource, initialConnection) + + then: + cursor.next() + + when: + cursor.hasNext() + + then: + thrown(MongoException) + + then: + connection.getCount() == 0 + connectionSource.getCount() == 1 + } + + def 'should handle errors when calling getMore'() { + given: + def initialConnection = referenceCountedConnection() + def serverVersion = new ServerVersion([3, 6, 0]) + def connectionA = referenceCountedConnection(serverVersion, 'connectionA') + def connectionB = referenceCountedConnection(serverVersion, 'connectionB') + def connectionSource = getConnectionSource(connectionA, connectionB) + + when: + def firstBatch = createCommandResult() + def cursor = new CommandBatchCursor<>(TimeoutMode.CURSOR_LIFETIME, firstBatch, 0, 0, CODEC, + null, connectionSource, initialConnection) + + then: + connectionSource.getCount() == 1 + + when: + cursor.next() + cursor.next() + + then: + 1 * connectionA.command(*_) >> { + connectionA.getCount() == 1 + connectionSource.getCount() == 1 + throw exception + } + + then: + thrown(MongoException) + + then: + connectionA.getCount() == 0 + connectionSource.getCount() == 1 + + when: + cursor.close() + + then: + 1 * connectionB.command(*_) >> { + connectionB.getCount() == 1 + connectionSource.getCount() == 1 + null + } + + then: + connectionA.getCount() == 0 + connectionB.getCount() == 0 + initialConnection.getCount() == 0 + connectionSource.getCount() == 0 + + where: + exception << [COMMAND_EXCEPTION, MONGO_EXCEPTION] + } + + def 'should handle exceptions when closing'() { + given: + def initialConnection = referenceCountedConnection() + def connection = Mock(Connection) { + _ * getDescription() >> Stub(ConnectionDescription) { + getMaxWireVersion() >> 4 + } + _ * command(*_) >> { throw new MongoSocketException('No MongoD', SERVER_ADDRESS) } + } + def connectionSource = Stub(ConnectionSource) { + getServerApi() >> null + getConnection() >> { connection } + } + connectionSource.retain() >> connectionSource + + def initialResults = createCommandResult([]) + def cursor = new CommandBatchCursor(TimeoutMode.CURSOR_LIFETIME, initialResults, 2, 100, CODEC, + null, connectionSource, initialConnection) + + when: + cursor.close() + + then: + notThrown(MongoSocketException) + + when: + cursor.close() + + then: + notThrown(Exception) + } + + def 'should handle exceptions when killing cursor and a connection can not be obtained'() { + given: + def initialConnection = referenceCountedConnection() + def connectionSource = Stub(ConnectionSource) { + getConnection() >> { throw new MongoSocketOpenException("can't open socket", SERVER_ADDRESS, new IOException()) } + getServerApi() >> null + } + connectionSource.retain() >> connectionSource + + def initialResults = createCommandResult([]) + def cursor = new CommandBatchCursor(TimeoutMode.CURSOR_LIFETIME, initialResults, 2, 100, CODEC, + null, connectionSource, initialConnection) + + when: + cursor.close() + + then: + notThrown(MongoSocketException) + + when: + cursor.close() + + then: + notThrown(Exception) + } + + private static final MongoNamespace NAMESPACE = new MongoNamespace('db', 'coll') + private static final ServerAddress SERVER_ADDRESS = new ServerAddress() + private static final CURSOR_ID = 42 + private static final FIRST_BATCH = [new Document('_id', 1), new Document('_id', 2)] + private static final SECOND_BATCH = [new Document('_id', 3), new Document('_id', 4)] + private static final CODEC = new DocumentCodec() + private static final MONGO_EXCEPTION = new MongoException('error') + private static final COMMAND_EXCEPTION = new MongoCommandException(BsonDocument.parse('{"ok": false, "errmsg": "error"}'), + SERVER_ADDRESS) + + + private static BsonDocument getMoreResponse(results, cursorId = CURSOR_ID) { + createCommandResult(results, cursorId, "nextBatch") + } + + private static BsonDocument createCommandResult(List results = FIRST_BATCH, Long cursorId = CURSOR_ID, + String fieldNameContainingBatch = "firstBatch") { + new BsonDocument("ok", new BsonInt32(1)) + .append("cursor", + new BsonDocument("ns", new BsonString(NAMESPACE.fullName)) + .append("id", new BsonInt64(cursorId)) + .append(fieldNameContainingBatch, new BsonArrayWrapper(results))) + } + + private static BsonDocument createKillCursorsDocument(ServerCursor serverCursor) { + new BsonDocument('killCursors', new BsonString(NAMESPACE.getCollectionName())) + .append('cursors', new BsonArray(Collections.singletonList(new BsonInt64(serverCursor.id)))) + } + + Connection referenceCountedConnection() { + referenceCountedConnection(new ServerVersion([3, 6, 0])) + } + + Connection referenceCountedConnection(ServerVersion serverVersion, String name = 'connection', + ServerType serverType = ServerType.STANDALONE) { + def released = false + def counter = 0 + def mock = Mock(Connection, name: name) { + _ * getDescription() >> Stub(ConnectionDescription) { + getMaxWireVersion() >> getMaxWireVersionForServerVersion(serverVersion.getVersionList()) + getServerType() >> serverType + } + } + mock.retain() >> { + if (released) { + throw new IllegalStateException('Tried to retain Connection when already released') + } else { + counter += 1 + } + mock + } + mock.release() >> { + counter -= 1 + if (counter == 0) { + released = true + } else if (counter < 0) { + throw new IllegalStateException('Tried to release Connection below 0') + } + counter + } + mock.getCount() >> { counter } + mock + } + + ConnectionSource getConnectionSource(Connection... connections) { + getConnectionSource(ServerType.STANDALONE, connections) + } + + ConnectionSource getConnectionSource(ServerType serverType, Connection... connections) { + def index = -1 + getConnectionSourceWithResult(serverType) { index += 1; connections.toList().get(index).retain() } + } + + def getConnectionSourceWithResult(ServerType serverType, Closure connectionCallbackResults) { + def released = false + int counter = 0 + def mock = Mock(ConnectionSource) + mock.getServerDescription() >> { + ServerDescription.builder() + .address(new ServerAddress()) + .type(serverType) + .state(ServerConnectionState.CONNECTED) + .build() + } + OperationContext operationContext = Mock(OperationContext) + def timeoutContext = Spy(new TimeoutContext(TimeoutSettings.create( + MongoClientSettings.builder().timeout(3, TimeUnit.SECONDS).build()))) + operationContext.getTimeoutContext() >> timeoutContext + mock.getOperationContext() >> operationContext + mock.getConnection() >> { + if (counter == 0) { + throw new IllegalStateException('Tried to use released ConnectionSource') + } + connectionCallbackResults() + } + mock.retain() >> { + if (released) { + throw new IllegalStateException('Tried to retain ConnectionSource when already released') + } else { + counter += 1 + } + mock + } + mock.release() >> { + counter -= 1 + if (counter == 0) { + released = true + } else if (counter < 0) { + throw new IllegalStateException('Tried to release ConnectionSource below 0') + } + counter + } + mock.getCount() >> { counter } + mock + } + +} diff --git a/driver-core/src/test/unit/com/mongodb/internal/operation/CommandBatchCursorTest.java b/driver-core/src/test/unit/com/mongodb/internal/operation/CommandBatchCursorTest.java new file mode 100644 index 00000000000..c3bec291432 --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/internal/operation/CommandBatchCursorTest.java @@ -0,0 +1,232 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.operation; + +import com.mongodb.MongoClientSettings; +import com.mongodb.MongoNamespace; +import com.mongodb.MongoOperationTimeoutException; +import com.mongodb.MongoSocketException; +import com.mongodb.ServerAddress; +import com.mongodb.client.cursor.TimeoutMode; +import com.mongodb.connection.ConnectionDescription; +import com.mongodb.connection.ServerDescription; +import com.mongodb.connection.ServerType; +import com.mongodb.connection.ServerVersion; +import com.mongodb.internal.TimeoutContext; +import com.mongodb.internal.TimeoutSettings; +import com.mongodb.internal.binding.ConnectionSource; +import com.mongodb.internal.connection.Connection; +import com.mongodb.internal.connection.OperationContext; +import org.bson.BsonArray; +import org.bson.BsonDocument; +import org.bson.BsonInt32; +import org.bson.BsonInt64; +import org.bson.BsonString; +import org.bson.Document; +import org.bson.codecs.Decoder; +import org.bson.codecs.DocumentCodec; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.ValueSource; + +import java.time.Duration; + +import static com.mongodb.internal.operation.OperationUnitSpecification.getMaxWireVersionForServerVersion; +import static com.mongodb.internal.thread.InterruptionUtil.interruptAndCreateMongoInterruptedException; +import static java.util.concurrent.TimeUnit.MILLISECONDS; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.argThat; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +class CommandBatchCursorTest { + + private static final MongoNamespace NAMESPACE = new MongoNamespace("test", "test"); + private static final BsonInt64 CURSOR_ID = new BsonInt64(1); + private static final BsonDocument COMMAND_CURSOR_DOCUMENT = new BsonDocument("ok", new BsonInt32(1)) + .append("cursor", + new BsonDocument("ns", new BsonString(NAMESPACE.getFullName())) + .append("id", CURSOR_ID) + .append("firstBatch", new BsonArrayWrapper<>(new BsonArray()))); + + private static final Decoder DOCUMENT_CODEC = new DocumentCodec(); + private static final Duration TIMEOUT = Duration.ofMillis(3_000); + + private Connection mockConnection; + private ConnectionDescription mockDescription; + private ConnectionSource connectionSource; + private OperationContext operationContext; + private TimeoutContext timeoutContext; + private ServerDescription serverDescription; + + @BeforeEach + void setUp() { + ServerVersion serverVersion = new ServerVersion(3, 6); + + mockConnection = mock(Connection.class, "connection"); + mockDescription = mock(ConnectionDescription.class); + when(mockDescription.getMaxWireVersion()).thenReturn(getMaxWireVersionForServerVersion(serverVersion.getVersionList())); + when(mockDescription.getServerType()).thenReturn(ServerType.LOAD_BALANCER); + when(mockConnection.getDescription()).thenReturn(mockDescription); + when(mockConnection.retain()).thenReturn(mockConnection); + + connectionSource = mock(ConnectionSource.class); + operationContext = mock(OperationContext.class); + timeoutContext = new TimeoutContext(TimeoutSettings.create( + MongoClientSettings.builder().timeout(TIMEOUT.toMillis(), MILLISECONDS).build())); + serverDescription = mock(ServerDescription.class); + when(operationContext.getTimeoutContext()).thenReturn(timeoutContext); + when(connectionSource.getOperationContext()).thenReturn(operationContext); + when(connectionSource.getConnection()).thenReturn(mockConnection); + when(connectionSource.getServerDescription()).thenReturn(serverDescription); + } + + + @Test + void shouldSkipKillsCursorsCommandWhenNetworkErrorOccurs() { + //given + when(mockConnection.command(eq(NAMESPACE.getDatabaseName()), any(), any(), any(), any(), any())).thenThrow( + new MongoSocketException("test", new ServerAddress())); + when(serverDescription.getType()).thenReturn(ServerType.LOAD_BALANCER); + + CommandBatchCursor commandBatchCursor = createBatchCursor(0); + //when + assertThrows(MongoSocketException.class, commandBatchCursor::next); + + //then + commandBatchCursor.close(); + verify(mockConnection, times(1)).command(eq(NAMESPACE.getDatabaseName()), any(), any(), any(), any(), any()); + } + + private CommandBatchCursor createBatchCursor(final long maxTimeMS) { + return new CommandBatchCursor<>( + TimeoutMode.CURSOR_LIFETIME, + COMMAND_CURSOR_DOCUMENT, + 0, + maxTimeMS, + DOCUMENT_CODEC, + null, + connectionSource, + mockConnection); + } + + @Test + void shouldNotSkipKillsCursorsCommandWhenTimeoutExceptionDoesNotHaveNetworkErrorCause() { + //given + when(mockConnection.command(eq(NAMESPACE.getDatabaseName()), any(), any(), any(), any(), any())).thenThrow( + new MongoOperationTimeoutException("test")); + when(serverDescription.getType()).thenReturn(ServerType.LOAD_BALANCER); + + CommandBatchCursor commandBatchCursor = createBatchCursor(0); + + //when + assertThrows(MongoOperationTimeoutException.class, commandBatchCursor::next); + + commandBatchCursor.close(); + + + //then + verify(mockConnection, times(2)).command(any(), + any(), any(), any(), any(), any()); + verify(mockConnection, times(1)).command(eq(NAMESPACE.getDatabaseName()), + argThat(bsonDocument -> bsonDocument.containsKey("getMore")), any(), any(), any(), any()); + verify(mockConnection, times(1)).command(eq(NAMESPACE.getDatabaseName()), + argThat(bsonDocument -> bsonDocument.containsKey("killCursors")), any(), any(), any(), any()); + } + + @Test + void shouldSkipKillsCursorsCommandWhenTimeoutExceptionHaveNetworkErrorCause() { + //given + when(mockConnection.command(eq(NAMESPACE.getDatabaseName()), any(), any(), any(), any(), any())).thenThrow( + new MongoOperationTimeoutException("test", new MongoSocketException("test", new ServerAddress()))); + when(serverDescription.getType()).thenReturn(ServerType.LOAD_BALANCER); + + CommandBatchCursor commandBatchCursor = createBatchCursor(0); + + //when + assertThrows(MongoOperationTimeoutException.class, commandBatchCursor::next); + commandBatchCursor.close(); + + //then + verify(mockConnection, times(1)).command(any(), + any(), any(), any(), any(), any()); + verify(mockConnection, times(1)).command(eq(NAMESPACE.getDatabaseName()), + argThat(bsonDocument -> bsonDocument.containsKey("getMore")), any(), any(), any(), any()); + verify(mockConnection, never()).command(eq(NAMESPACE.getDatabaseName()), + argThat(bsonDocument -> bsonDocument.containsKey("killCursors")), any(), any(), any(), any()); + } + + @Test + @SuppressWarnings("try") + void closeShouldResetTimeoutContextToDefaultMaxTime() { + long maxTimeMS = 10; + com.mongodb.assertions.Assertions.assertTrue(maxTimeMS < TIMEOUT.toMillis()); + try (CommandBatchCursor commandBatchCursor = createBatchCursor(maxTimeMS)) { + // verify that the `maxTimeMS` override was applied + timeoutContext.runMaxTimeMS(remainingMillis -> assertTrue(remainingMillis <= maxTimeMS)); + } catch (Exception e) { + throw new RuntimeException(e); + } + timeoutContext.runMaxTimeMS(remainingMillis -> { + // verify that the `maxTimeMS` override was reset + assertTrue(remainingMillis > maxTimeMS); + assertTrue(remainingMillis <= TIMEOUT.toMillis()); + }); + } + + @ParameterizedTest + @ValueSource(booleans = {false, true}) + void closeShouldNotResetOriginalTimeout(final boolean disableTimeoutResetWhenClosing) { + Duration thirdOfTimeout = TIMEOUT.dividedBy(3); + com.mongodb.assertions.Assertions.assertTrue(thirdOfTimeout.toMillis() > 0); + try (CommandBatchCursor commandBatchCursor = createBatchCursor(0)) { + if (disableTimeoutResetWhenClosing) { + commandBatchCursor.disableTimeoutResetWhenClosing(); + } + try { + Thread.sleep(thirdOfTimeout.toMillis()); + } catch (InterruptedException e) { + throw interruptAndCreateMongoInterruptedException(null, e); + } + when(mockConnection.release()).then(invocation -> { + Thread.sleep(thirdOfTimeout.toMillis()); + return null; + }); + } catch (Exception e) { + throw new RuntimeException(e); + } + verify(mockConnection, times(1)).release(); + // at this point at least (2 * thirdOfTimeout) have passed + com.mongodb.assertions.Assertions.assertNotNull(timeoutContext.getTimeout()).run( + MILLISECONDS, + com.mongodb.assertions.Assertions::fail, + remainingMillis -> { + // Verify that the original timeout has not been intact. + // If `close` had reset it, we would have observed more than `thirdOfTimeout` left. + assertTrue(remainingMillis <= thirdOfTimeout.toMillis()); + }, + Assertions::fail); + } +} diff --git a/driver-core/src/test/unit/com/mongodb/internal/operation/CommandOperationHelperSpecification.groovy b/driver-core/src/test/unit/com/mongodb/internal/operation/CommandOperationHelperSpecification.groovy new file mode 100644 index 00000000000..38b3ad48f25 --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/internal/operation/CommandOperationHelperSpecification.groovy @@ -0,0 +1,93 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.operation + +import com.mongodb.MongoCommandException +import com.mongodb.ServerAddress +import org.bson.BsonBoolean +import org.bson.BsonDocument +import org.bson.BsonInt32 +import org.bson.BsonString +import spock.lang.Specification + +import static com.mongodb.internal.operation.CommandOperationHelper.isNamespaceError +import static com.mongodb.internal.operation.CommandOperationHelper.rethrowIfNotNamespaceError + +class CommandOperationHelperSpecification extends Specification { + + def 'should be a namespace error if Throwable is a MongoCommandException and error code is 26'() { + expect: + isNamespaceError(new MongoCommandException(new BsonDocument('ok', BsonBoolean.FALSE) + .append('code', new BsonInt32(26)), + new ServerAddress())) + } + + def 'should be a namespace error if Throwable is a MongoCommandException and error message contains "ns not found"'() { + expect: + isNamespaceError(new MongoCommandException(new BsonDocument('ok', BsonBoolean.FALSE) + .append('errmsg', new BsonString('the ns not found here')), + new ServerAddress())) + } + + def 'should not be a namespace error if Throwable is a MongoCommandException and error message does not contain "ns not found"'() { + expect: + !isNamespaceError(new MongoCommandException(new BsonDocument('ok', BsonBoolean.FALSE) + .append('errmsg', new BsonString('some other error')), + new ServerAddress())) + } + + def 'should not be a namespace error should return false if Throwable is not a MongoCommandException'() { + expect: + !isNamespaceError(new NullPointerException()) + } + + def 'should rethrow if not namespace error'() { + when: + rethrowIfNotNamespaceError(new MongoCommandException(new BsonDocument('ok', BsonBoolean.FALSE) + .append('errmsg', new BsonString('some other error')), + new ServerAddress())) + + then: + thrown(MongoCommandException) + + when: + rethrowIfNotNamespaceError(new MongoCommandException(new BsonDocument('ok', BsonBoolean.FALSE) + .append('errmsg', new BsonString('some other error')), + new ServerAddress()), 'some value') + + then: + thrown(MongoCommandException) + } + + def 'should not rethrow if namespace error'() { + when: + rethrowIfNotNamespaceError(new MongoCommandException(new BsonDocument('ok', BsonBoolean.FALSE) + .append('code', new BsonInt32(26)), + new ServerAddress())) + + then: + true + } + + def 'should return default value if not namespace error'() { + expect: + rethrowIfNotNamespaceError(new MongoCommandException(new BsonDocument('ok', BsonBoolean.FALSE) + .append('code', new BsonInt32(26)), + new ServerAddress()), 'some value') == 'some value' + } + +} diff --git a/driver-core/src/test/unit/com/mongodb/internal/operation/CommitTransactionOperationUnitSpecification.groovy b/driver-core/src/test/unit/com/mongodb/internal/operation/CommitTransactionOperationUnitSpecification.groovy new file mode 100644 index 00000000000..21ae1c4dfb9 --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/internal/operation/CommitTransactionOperationUnitSpecification.groovy @@ -0,0 +1,74 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.operation + +import com.mongodb.MongoException +import com.mongodb.MongoTimeoutException +import com.mongodb.ReadConcern +import com.mongodb.WriteConcern +import com.mongodb.async.FutureResultCallback +import com.mongodb.internal.binding.AsyncWriteBinding +import com.mongodb.internal.binding.WriteBinding +import com.mongodb.internal.session.SessionContext + +import static com.mongodb.ClusterFixture.OPERATION_CONTEXT + +class CommitTransactionOperationUnitSpecification extends OperationUnitSpecification { + def 'should add UnknownTransactionCommitResult error label to MongoTimeoutException'() { + given: + def sessionContext = Stub(SessionContext) { + getReadConcern() >> ReadConcern.DEFAULT + hasActiveTransaction() >> true + } + def writeBinding = Stub(WriteBinding) { + getWriteConnectionSource() >> { throw new MongoTimeoutException('Time out!') } + getOperationContext() >> OPERATION_CONTEXT.withSessionContext(sessionContext) + } + def operation = new CommitTransactionOperation(WriteConcern.ACKNOWLEDGED) + + when: + operation.execute(writeBinding) + + then: + def e = thrown(MongoTimeoutException) + e.hasErrorLabel(MongoException.UNKNOWN_TRANSACTION_COMMIT_RESULT_LABEL) + } + + def 'should add UnknownTransactionCommitResult error label to MongoTimeoutException asynchronously'() { + given: + def sessionContext = Stub(SessionContext) { + getReadConcern() >> ReadConcern.DEFAULT + hasActiveTransaction() >> true + } + def writeBinding = Stub(AsyncWriteBinding) { + getWriteConnectionSource(_) >> { + it[0].onResult(null, new MongoTimeoutException('Time out!')) + } + getOperationContext() >> OPERATION_CONTEXT.withSessionContext(sessionContext) + } + def operation = new CommitTransactionOperation(WriteConcern.ACKNOWLEDGED) + def callback = new FutureResultCallback() + + when: + operation.executeAsync(writeBinding, callback) + callback.get() + + then: + def e = thrown(MongoTimeoutException) + e.hasErrorLabel(MongoException.UNKNOWN_TRANSACTION_COMMIT_RESULT_LABEL) + } +} diff --git a/driver-core/src/test/unit/com/mongodb/internal/operation/CursorResourceManagerTest.java b/driver-core/src/test/unit/com/mongodb/internal/operation/CursorResourceManagerTest.java new file mode 100644 index 00000000000..15a8bd972f1 --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/internal/operation/CursorResourceManagerTest.java @@ -0,0 +1,59 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.internal.operation; + +import com.mongodb.MongoNamespace; +import com.mongodb.ServerCursor; +import com.mongodb.internal.binding.AsyncConnectionSource; +import com.mongodb.internal.binding.ReferenceCounted; +import com.mongodb.internal.connection.Connection; +import com.mongodb.internal.mockito.MongoMockito; +import org.junit.jupiter.api.Test; + +import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; +import static org.mockito.Mockito.when; + +final class CursorResourceManagerTest { + @Test + void doubleCloseExecutedConcurrentlyWithOperationBeingInProgressShouldNotFail() { + CursorResourceManager cursorResourceManager = new CursorResourceManager( + new MongoNamespace("db", "coll"), + MongoMockito.mock(AsyncConnectionSource.class, mock -> { + when(mock.retain()).thenReturn(mock); + when(mock.release()).thenReturn(1); + }), + null, + MongoMockito.mock(ServerCursor.class)) { + @Override + void markAsPinned(final ReferenceCounted connectionToPin, final Connection.PinningMode pinningMode) { + } + + @Override + void doClose() { + } + }; + cursorResourceManager.tryStartOperation(); + try { + assertDoesNotThrow(() -> { + cursorResourceManager.close(); + cursorResourceManager.close(); + cursorResourceManager.setServerCursor(null); + }); + } finally { + cursorResourceManager.endOperation(); + } + } +} diff --git a/driver-core/src/test/unit/com/mongodb/internal/operation/DeleteRequestSpecification.groovy b/driver-core/src/test/unit/com/mongodb/internal/operation/DeleteRequestSpecification.groovy new file mode 100644 index 00000000000..9ac19a8c245 --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/internal/operation/DeleteRequestSpecification.groovy @@ -0,0 +1,72 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + + +package com.mongodb.internal.operation + +import com.mongodb.client.model.Collation +import com.mongodb.internal.bulk.DeleteRequest +import com.mongodb.internal.bulk.WriteRequest +import org.bson.BsonDocument +import org.bson.BsonInt32 +import spock.lang.Specification + +class DeleteRequestSpecification extends Specification { + + def 'should have correct type'() { + expect: + new DeleteRequest(new BsonDocument()).getType() == WriteRequest.Type.DELETE + } + + def 'should not allow null filter'() { + when: + new DeleteRequest(null) + + then: + thrown(IllegalArgumentException) + } + + def 'should set fields from constructor'() { + given: + def filter = new BsonDocument('_id', new BsonInt32(1)) + + when: + def removeRequest = new DeleteRequest(filter) + + then: + removeRequest.filter == filter + } + + def 'multi property should default to true'() { + expect: + new DeleteRequest(new BsonDocument()).multi + } + + def 'should set multi property'() { + expect: + !new DeleteRequest(new BsonDocument()).multi(false).isMulti() + } + + def 'should set collation property'() { + when: + def collation = Collation.builder().locale('en').build() + + then: + new DeleteRequest(new BsonDocument()).collation(null).getCollation() == null + new DeleteRequest(new BsonDocument()).collation(collation).getCollation() == collation + } +} diff --git a/driver-core/src/test/unit/com/mongodb/internal/operation/FindOperationUnitSpecification.groovy b/driver-core/src/test/unit/com/mongodb/internal/operation/FindOperationUnitSpecification.groovy new file mode 100644 index 00000000000..dd843985bbb --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/internal/operation/FindOperationUnitSpecification.groovy @@ -0,0 +1,143 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.operation + +import com.mongodb.MongoNamespace +import com.mongodb.ReadPreference +import org.bson.BsonBoolean +import org.bson.BsonDocument +import org.bson.BsonInt32 +import org.bson.BsonInt64 +import org.bson.BsonString +import org.bson.Document +import org.bson.codecs.BsonDocumentCodec +import org.bson.codecs.DocumentCodec + +import static com.mongodb.CursorType.TailableAwait + +class FindOperationUnitSpecification extends OperationUnitSpecification { + + def 'should find with correct command'() { + when: + def operation = new FindOperation(namespace, new BsonDocumentCodec()) + def expectedCommand = new BsonDocument('find', new BsonString(namespace.getCollectionName())) + + then: + testOperation(operation, [3, 2, 0], expectedCommand, async, commandResult) + // Overrides + when: + operation = new FindOperation(namespace, new BsonDocumentCodec()) + .filter(new BsonDocument('a', BsonBoolean.TRUE)) + .projection(new BsonDocument('x', new BsonInt32(1))) + .skip(2) + .limit(limit) + .batchSize(batchSize) + .cursorType(TailableAwait) + .noCursorTimeout(true) + .partial(true) + + .comment(new BsonString('my comment')) + .hint(BsonDocument.parse('{ hint : 1}')) + .min(BsonDocument.parse('{ abc: 99 }')) + .max(BsonDocument.parse('{ abc: 1000 }')) + .returnKey(true) + .showRecordId(true) + + if (allowDiskUse != null) { + operation.allowDiskUse(allowDiskUse) + } + + expectedCommand.append('filter', operation.getFilter()) + .append('projection', operation.getProjection()) + .append('skip', new BsonInt32(operation.getSkip())) + .append('tailable', BsonBoolean.TRUE) + .append('awaitData', BsonBoolean.TRUE) + .append('allowPartialResults', BsonBoolean.TRUE) + .append('noCursorTimeout', BsonBoolean.TRUE) + .append('comment', operation.getComment()) + .append('hint', operation.getHint()) + .append('min', operation.getMin()) + .append('max', operation.getMax()) + .append('returnKey', BsonBoolean.TRUE) + .append('showRecordId', BsonBoolean.TRUE) + + if (allowDiskUse != null) { + expectedCommand.append('allowDiskUse', new BsonBoolean(allowDiskUse)) + } + if (commandLimit != null) { + expectedCommand.append('limit', new BsonInt32(commandLimit)) + } + if (commandBatchSize != null) { + expectedCommand.append('batchSize', new BsonInt32(commandBatchSize)) + } + if (commandSingleBatch != null) { + expectedCommand.append('singleBatch', BsonBoolean.valueOf(commandSingleBatch)) + } + + then: + testOperation(operation, version, expectedCommand, async, commandResult) + + where: + async << [true] * 5 + [false] * 5 + [true] * 5 + [false] * 5 + limit << [100, -100, 100, 0, 100] * 4 + batchSize << [10, 10, -10, 10, 0] * 4 + commandLimit << [100, 100, 10, null, 100] * 4 + commandBatchSize << [10, null, null, 10, null] * 4 + commandSingleBatch << [null, true, true, null, null] * 4 + allowDiskUse << [null] * 10 + [true] * 10 + version << [[3, 2, 0]] * 10 + [[3, 4, 0]] * 10 + } + + def 'should find with correct command with effective batch size'() { + when: + def operation = new FindOperation(namespace, new BsonDocumentCodec()) + .batchSize(batchSize) + .limit(limit) + + def expectedCommand = new BsonDocument('find', new BsonString(namespace.getCollectionName())) + .append('batchSize', new BsonInt32(commandBatchSize)) + .append('limit', new BsonInt32(commandLimit)) + + then: + testOperation(operation, [7, 0, 0], expectedCommand, async, commandResult) + + where: + async << [true, true, false, false] + batchSize << [10, Integer.MAX_VALUE] * 2 + limit << [10, Integer.MAX_VALUE] * 2 + commandLimit << [10, Integer.MAX_VALUE] * 2 + commandBatchSize << [11, Integer.MAX_VALUE] * 2 + } + + def 'should use the readPreference to set secondaryOk for commands'() { + when: + def operation = new FindOperation(namespace, new DocumentCodec()) + + then: + testOperationSecondaryOk(operation, [3, 2, 0], readPreference, async, commandResult) + + where: + [async, readPreference] << [[true, false], [ReadPreference.primary(), ReadPreference.secondary()]].combinations() + } + + def namespace = new MongoNamespace('db', 'coll') + def decoder = new BsonDocumentCodec() + def readPreference = ReadPreference.secondary() + def commandResult = new BsonDocument('cursor', new BsonDocument('id', new BsonInt64(0)) + .append('ns', new BsonString('db.coll')) + .append('firstBatch', new BsonArrayWrapper([]))) +} diff --git a/driver-core/src/test/unit/com/mongodb/internal/operation/InsertRequestSpecification.groovy b/driver-core/src/test/unit/com/mongodb/internal/operation/InsertRequestSpecification.groovy new file mode 100644 index 00000000000..2b25fc31e46 --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/internal/operation/InsertRequestSpecification.groovy @@ -0,0 +1,53 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + + +package com.mongodb.internal.operation + +import com.mongodb.internal.bulk.InsertRequest +import com.mongodb.internal.bulk.WriteRequest +import org.bson.BsonDocument +import org.bson.BsonInt32 +import spock.lang.Specification + +class InsertRequestSpecification extends Specification { + + def 'should have correct type'() { + expect: + new InsertRequest(new BsonDocument()).getType() == WriteRequest.Type.INSERT + } + + def 'should not allow null document'() { + when: + new InsertRequest(null) + + then: + thrown(IllegalArgumentException) + } + + + def 'should set fields from constructor'() { + given: + def document = new BsonDocument('_id', new BsonInt32(1)) + + when: + def insertRequest = new InsertRequest(document) + + then: + insertRequest.document == document + } +} diff --git a/driver-core/src/test/unit/com/mongodb/internal/operation/ListCollectionsOperationTest.java b/driver-core/src/test/unit/com/mongodb/internal/operation/ListCollectionsOperationTest.java new file mode 100644 index 00000000000..12a964db625 --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/internal/operation/ListCollectionsOperationTest.java @@ -0,0 +1,170 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.internal.operation; + +import com.mongodb.MongoNamespace; +import com.mongodb.ReadPreference; +import com.mongodb.ServerAddress; +import com.mongodb.connection.ClusterId; +import com.mongodb.connection.ConnectionDescription; +import com.mongodb.connection.ServerConnectionState; +import com.mongodb.connection.ServerDescription; +import com.mongodb.connection.ServerId; +import com.mongodb.connection.ServerType; +import com.mongodb.internal.binding.ConnectionSource; +import com.mongodb.internal.binding.ReadBinding; +import com.mongodb.internal.connection.Connection; +import com.mongodb.lang.Nullable; +import org.bson.BsonBoolean; +import org.bson.BsonDocument; +import org.bson.BsonInt32; +import org.bson.BsonInt64; +import org.bson.BsonString; +import org.bson.BsonValue; +import org.bson.codecs.BsonDocumentCodec; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.mockito.ArgumentCaptor; + +import static com.mongodb.ClusterFixture.OPERATION_CONTEXT; +import static com.mongodb.assertions.Assertions.assertNotNull; +import static com.mongodb.internal.mockito.MongoMockito.mock; +import static java.util.Collections.emptyList; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.mockito.ArgumentCaptor.forClass; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +final class ListCollectionsOperationTest { + private ListCollectionsOperation operation; + private Mocks mocks; + + @BeforeEach + void beforeEach() { + MongoNamespace namespace = new MongoNamespace("db", "coll"); + operation = new ListCollectionsOperation<>(namespace.getDatabaseName(), new BsonDocumentCodec()); + mocks = mocks(namespace); + } + + @Test + void executedCommandIsCorrect() { + BsonDocument filter = new BsonDocument("key", new BsonString("value")); + boolean nameOnly = true; + boolean authorizedCollections = true; + int batchSize = 123; + BsonValue comment = new BsonString("comment"); + operation.filter(filter) + .nameOnly(nameOnly) + .authorizedCollections(authorizedCollections) + .batchSize(batchSize) + .comment(comment); + assertEquals( + new BsonDocument() + .append("listCollections", new BsonInt32(1)) + .append("filter", filter) + .append("nameOnly", new BsonBoolean(nameOnly)) + .append("authorizedCollections", new BsonBoolean(authorizedCollections)) + .append("cursor", new BsonDocument() + .append("batchSize", new BsonInt32(batchSize)) + ) + .append("comment", comment), + executeOperationAndCaptureCommand() + ); + } + + @Test + void authorizedCollectionsIsAbsentIfFalse() { + operation.authorizedCollections(false); + assertFalse(executeOperationAndCaptureCommand().containsKey("authorizedCollections")); + } + + @Test + void authorizedCollectionsIsFalseByDefault() { + assertFalse(executeOperationAndCaptureCommand().containsKey("authorizedCollections")); + } + + private BsonDocument executeOperationAndCaptureCommand() { + operation.execute(mocks.readBinding()); + ArgumentCaptor commandCaptor = forClass(BsonDocument.class); + verify(mocks.connection()).command(any(), commandCaptor.capture(), any(), any(), any(), any()); + return commandCaptor.getValue(); + } + + private static Mocks mocks(final MongoNamespace namespace) { + Mocks result = new Mocks(); + result.readBinding(mock(ReadBinding.class, bindingMock -> { + when(bindingMock.getOperationContext()).thenReturn(OPERATION_CONTEXT); + ConnectionSource connectionSource = mock(ConnectionSource.class, connectionSourceMock -> { + when(connectionSourceMock.getOperationContext()).thenReturn(OPERATION_CONTEXT); + when(connectionSourceMock.release()).thenReturn(1); + ServerAddress serverAddress = new ServerAddress(); + result.connection(mock(Connection.class, connectionMock -> { + when(connectionMock.release()).thenReturn(1); + ConnectionDescription connectionDescription = new ConnectionDescription(new ServerId(new ClusterId(), serverAddress)); + when(connectionMock.getDescription()).thenReturn(connectionDescription); + when(connectionMock.command(any(), any(), any(), any(), any(), any())).thenReturn(cursorDoc(namespace)); + })); + when(connectionSourceMock.getConnection()).thenReturn(result.connection()); + ServerDescription serverDescription = ServerDescription.builder() + .address(serverAddress) + .type(ServerType.STANDALONE) + .state(ServerConnectionState.CONNECTED) + .build(); + when(connectionSourceMock.getServerDescription()).thenReturn(serverDescription); + when(connectionSourceMock.getReadPreference()).thenReturn(ReadPreference.primary()); + }); + when(bindingMock.getReadConnectionSource()).thenReturn(connectionSource); + })); + return result; + } + + private static BsonDocument cursorDoc(final MongoNamespace namespace) { + return new BsonDocument() + .append("cursor", new BsonDocument() + .append("firstBatch", new BsonArrayWrapper(emptyList())) + .append("ns", new BsonString(namespace.getFullName())) + .append("id", new BsonInt64(0)) + ); + } + + private static final class Mocks { + @Nullable + private ReadBinding readBinding; + @Nullable + private Connection connection; + + Mocks() { + } + + void readBinding(final ReadBinding readBinding) { + this.readBinding = readBinding; + } + + ReadBinding readBinding() { + return assertNotNull(readBinding); + } + + void connection(final Connection connection) { + this.connection = connection; + } + + Connection connection() { + return assertNotNull(connection); + } + } +} diff --git a/driver-core/src/test/unit/com/mongodb/internal/operation/OperationHelperSpecification.groovy b/driver-core/src/test/unit/com/mongodb/internal/operation/OperationHelperSpecification.groovy new file mode 100644 index 00000000000..fd9786e8dbf --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/internal/operation/OperationHelperSpecification.groovy @@ -0,0 +1,130 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.operation + +import com.mongodb.ServerAddress +import com.mongodb.client.model.Collation +import com.mongodb.connection.ClusterId +import com.mongodb.connection.ConnectionDescription +import com.mongodb.connection.ConnectionId +import com.mongodb.connection.ServerDescription +import com.mongodb.connection.ServerId +import com.mongodb.internal.bulk.DeleteRequest +import com.mongodb.internal.bulk.UpdateRequest +import com.mongodb.internal.bulk.WriteRequest +import com.mongodb.internal.connection.AsyncConnection +import com.mongodb.internal.session.SessionContext +import org.bson.BsonArray +import org.bson.BsonDocument +import spock.lang.Specification + +import static com.mongodb.ClusterFixture.OPERATION_CONTEXT +import static com.mongodb.WriteConcern.ACKNOWLEDGED +import static com.mongodb.WriteConcern.UNACKNOWLEDGED +import static com.mongodb.connection.ServerConnectionState.CONNECTED +import static com.mongodb.connection.ServerType.REPLICA_SET_PRIMARY +import static com.mongodb.connection.ServerType.STANDALONE +import static com.mongodb.internal.operation.OperationHelper.canRetryRead +import static com.mongodb.internal.operation.OperationHelper.isRetryableWrite +import static com.mongodb.internal.operation.OperationHelper.validateWriteRequests + +class OperationHelperSpecification extends Specification { + + def 'should accept valid writeRequests'() { + when: + validateWriteRequests(connectionDescription, bypassDocumentValidation, writeRequests, writeConcern) + + then: + notThrown(IllegalArgumentException) + + when: + def asyncConnection = Stub(AsyncConnection) { + getDescription() >> connectionDescription + } + validateWriteRequests(asyncConnection.getDescription(), bypassDocumentValidation, writeRequests, writeConcern) + + then: + notThrown(IllegalArgumentException) + + where: + connectionDescription | bypassDocumentValidation | writeConcern | writeRequests + threeSixConnectionDescription | null | ACKNOWLEDGED | [new DeleteRequest(BsonDocument.parse('{a: "a"}}'))] + threeSixConnectionDescription | null | UNACKNOWLEDGED | [new DeleteRequest(BsonDocument.parse('{a: "a"}}'))] + threeSixConnectionDescription | null | ACKNOWLEDGED | [new DeleteRequest(BsonDocument.parse('{a: "a"}}')) + .collation(enCollation)] + threeSixConnectionDescription | true | ACKNOWLEDGED | [new UpdateRequest(BsonDocument.parse('{a: "a"}}'), + BsonDocument.parse('{$set: {a: "A"}}'), + WriteRequest.Type.REPLACE).collation(enCollation)] + } + + def 'should check if a valid retryable write'() { + given: + def activeTransactionSessionContext = Stub(SessionContext) { + hasSession() >> true + hasActiveTransaction() >> true + } + def noTransactionSessionContext = Stub(SessionContext) { + hasSession() >> true + hasActiveTransaction() >> false + } + + expect: + isRetryableWrite(retryWrites, writeConcern, connectionDescription, noTransactionSessionContext) == expected + !isRetryableWrite(retryWrites, writeConcern, connectionDescription, activeTransactionSessionContext) + + where: + retryWrites | writeConcern | connectionDescription | expected + false | ACKNOWLEDGED | threeSixConnectionDescription | false + true | UNACKNOWLEDGED | threeSixConnectionDescription | false + true | ACKNOWLEDGED | threeSixConnectionDescription | false + true | ACKNOWLEDGED | threeFourConnectionDescription | false + true | ACKNOWLEDGED | threeSixConnectionDescription | false + true | ACKNOWLEDGED | threeSixPrimaryConnectionDescription | true + } + + def 'should check if a valid retryable read'() { + given: + def activeTransactionSessionContext = Stub(SessionContext) { + hasSession() >> true + hasActiveTransaction() >> true + } + def noTransactionSessionContext = Stub(SessionContext) { + hasSession() >> true + hasActiveTransaction() >> false + } + + expect: + canRetryRead(retryableServerDescription, OPERATION_CONTEXT.withSessionContext(noTransactionSessionContext)) + !canRetryRead(retryableServerDescription, OPERATION_CONTEXT.withSessionContext(activeTransactionSessionContext)) + } + + + static ConnectionId connectionId = new ConnectionId(new ServerId(new ClusterId(), new ServerAddress())) + static ConnectionDescription threeSixConnectionDescription = new ConnectionDescription(connectionId, 6, + STANDALONE, 1000, 100000, 100000, [], new BsonArray(), 30) + static ConnectionDescription threeSixPrimaryConnectionDescription = new ConnectionDescription(connectionId, 6, + REPLICA_SET_PRIMARY, 1000, 100000, 100000, [], new BsonArray(), 30) + static ConnectionDescription threeFourConnectionDescription = new ConnectionDescription(connectionId, 5, + STANDALONE, 1000, 100000, 100000, [], new BsonArray(), null) + + static ServerDescription retryableServerDescription = ServerDescription.builder().address(new ServerAddress()).state(CONNECTED) + .logicalSessionTimeoutMinutes(1).build() + static ServerDescription nonRetryableServerDescription = ServerDescription.builder().address(new ServerAddress()) + .state(CONNECTED).build() + + static Collation enCollation = Collation.builder().locale('en').build() +} diff --git a/driver-core/src/test/unit/com/mongodb/internal/operation/OperationUnitSpecification.groovy b/driver-core/src/test/unit/com/mongodb/internal/operation/OperationUnitSpecification.groovy new file mode 100644 index 00000000000..d298112656e --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/internal/operation/OperationUnitSpecification.groovy @@ -0,0 +1,234 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.operation + +import com.mongodb.MongoException +import com.mongodb.ReadConcern +import com.mongodb.ReadPreference +import com.mongodb.async.FutureResultCallback +import com.mongodb.client.model.Collation +import com.mongodb.client.model.CollationAlternate +import com.mongodb.client.model.CollationCaseFirst +import com.mongodb.client.model.CollationMaxVariable +import com.mongodb.client.model.CollationStrength +import com.mongodb.connection.ConnectionDescription +import com.mongodb.internal.binding.AsyncConnectionSource +import com.mongodb.internal.binding.AsyncReadBinding +import com.mongodb.internal.binding.AsyncWriteBinding +import com.mongodb.internal.binding.ConnectionSource +import com.mongodb.internal.binding.ReadBinding +import com.mongodb.internal.binding.WriteBinding +import com.mongodb.internal.connection.AsyncConnection +import com.mongodb.internal.connection.Connection +import com.mongodb.internal.session.SessionContext +import org.bson.BsonDocument +import spock.lang.Shared +import spock.lang.Specification + +import java.util.concurrent.TimeUnit + +import static com.mongodb.ClusterFixture.OPERATION_CONTEXT + +class OperationUnitSpecification extends Specification { + + // Have to add to this map for every server release + private static final SERVER_TO_WIRE_VERSION_MAP = [ + [2, 6]: 2, + [3, 0]: 3, + [3, 2]: 4, + [3, 4]: 5, + [3, 6]: 6, + [4, 0]: 7, + [4, 1]: 8, + [4, 2]: 8, + [4, 4]: 9, + [5, 0]: 13, + [5, 1]: 14, + [5, 2]: 15, + [5, 3]: 16, + [6, 0]: 17, + [6, 1]: 18, + [6, 2]: 19, + [6, 3]: 20, + [7, 0]: 21, + [9, 0]: 25, + ] + + static Integer getMaxWireVersionForServerVersion(List serverVersion) { + def maxWireVersion = SERVER_TO_WIRE_VERSION_MAP[serverVersion.subList(0, 2)] + + if (maxWireVersion == null) { + throw new IllegalArgumentException('Unknown server version ' + serverVersion.subList(0, 2) + '. Check if it has been added ' + + 'to SERVER_TO_WIRE_VERSION_MAP') + } + + maxWireVersion + } + + void testOperation(operation, List serverVersion, BsonDocument expectedCommand, boolean async, BsonDocument result) { + def test = async ? this.&testAsyncOperation : this.&testSyncOperation + test(operation, serverVersion, result, true, expectedCommand) + } + + void testOperationSecondaryOk(operation, List serverVersion, ReadPreference readPreference, boolean async, result = null) { + def test = async ? this.&testAsyncOperation : this.&testSyncOperation + test(operation, serverVersion, result, false, null, true, readPreference) + } + + void testOperationThrows(operation, List serverVersion, boolean async) { + def test = async ? this.&testAsyncOperation : this.&testSyncOperation + test(operation, serverVersion, null, false) + } + + def testSyncOperation(operation, List serverVersion, result, Boolean checkCommand=true, + BsonDocument expectedCommand=null, + Boolean checkSecondaryOk=false, ReadPreference readPreference=ReadPreference.primary()) { + def operationContext = OPERATION_CONTEXT + .withSessionContext(Stub(SessionContext) { + hasActiveTransaction() >> false + getReadConcern() >> ReadConcern.DEFAULT + }) + + def connection = Mock(Connection) { + _ * getDescription() >> Stub(ConnectionDescription) { + getMaxWireVersion() >> getMaxWireVersionForServerVersion(serverVersion) + } + } + + def connectionSource = Stub(ConnectionSource) { + getConnection() >> connection + getReadPreference() >> readPreference + getOperationContext() >> operationContext + } + def readBinding = Stub(ReadBinding) { + getReadConnectionSource() >> connectionSource + getReadPreference() >> readPreference + getOperationContext() >> operationContext + } + def writeBinding = Stub(WriteBinding) { + getWriteConnectionSource() >> connectionSource + getOperationContext() >> operationContext + } + + if (checkCommand) { + 1 * connection.command(*_) >> { + assert(it[1] == expectedCommand) + result + } + } else if (checkSecondaryOk) { + 1 * connection.command(*_) >> { + assert(it[3] == readPreference) + result + } + } + + 0 * connection.command(*_) >> { + // Unexpected Command + result + } + + 1 * connection.release() + + if (operation instanceof ReadOperation) { + operation.execute(readBinding) + } else if (operation instanceof WriteOperation) { + operation.execute(writeBinding) + } + } + + def testAsyncOperation(operation, List serverVersion, result = null, + Boolean checkCommand=true, BsonDocument expectedCommand=null, + Boolean checkSecondaryOk=false, ReadPreference readPreference=ReadPreference.primary()) { + + def operationContext = OPERATION_CONTEXT + .withSessionContext(Stub(SessionContext) { + hasActiveTransaction() >> false + getReadConcern() >> ReadConcern.DEFAULT + }) + + def connection = Mock(AsyncConnection) { + _ * getDescription() >> Stub(ConnectionDescription) { + getMaxWireVersion() >> getMaxWireVersionForServerVersion(serverVersion) + } + } + + def connectionSource = Stub(AsyncConnectionSource) { + getConnection(_) >> { it[0].onResult(connection, null) } + getReadPreference() >> readPreference + getOperationContext() >> getOperationContext() >> operationContext + } + def readBinding = Stub(AsyncReadBinding) { + getReadConnectionSource(_) >> { it[0].onResult(connectionSource, null) } + getReadPreference() >> readPreference + getOperationContext() >> operationContext + } + def writeBinding = Stub(AsyncWriteBinding) { + getWriteConnectionSource(_) >> { it[0].onResult(connectionSource, null) } + getOperationContext() >> operationContext + } + def callback = new FutureResultCallback() + + if (checkCommand) { + 1 * connection.commandAsync(*_) >> { + assert(it[1] == expectedCommand) + it.last().onResult(result, null) + } + } else if (checkSecondaryOk) { + 1 * connection.commandAsync(*_) >> { + assert(it[3] == readPreference) + it.last().onResult(result, null) + } + } + + 0 * connection.commandAsync(_, _, _, _, _, _, _, _) >> { + // Unexpected Command + it.last().onResult(result, null) + } + + 1 * connection.release() + + if (operation instanceof ReadOperation) { + operation.executeAsync(readBinding, callback) + } else if (operation instanceof WriteOperation) { + operation.executeAsync(writeBinding, callback) + } + try { + callback.get(1000, TimeUnit.MILLISECONDS) + } catch (MongoException e) { + throw e.cause + } + } + + @Shared + Collation defaultCollation = Collation.builder() + .locale('en') + .caseLevel(true) + .collationCaseFirst(CollationCaseFirst.OFF) + .collationStrength(CollationStrength.IDENTICAL) + .numericOrdering(true) + .collationAlternate(CollationAlternate.SHIFTED) + .collationMaxVariable(CollationMaxVariable.SPACE) + .normalization(true) + .backwards(true) + .build() + + @Shared + Collation caseInsensitiveCollation = Collation.builder() + .locale('en') + .collationStrength(CollationStrength.SECONDARY) + .build() +} diff --git a/driver-core/src/test/unit/com/mongodb/internal/operation/SingleBatchCursorTest.java b/driver-core/src/test/unit/com/mongodb/internal/operation/SingleBatchCursorTest.java new file mode 100644 index 00000000000..a71f067f5d6 --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/internal/operation/SingleBatchCursorTest.java @@ -0,0 +1,93 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.operation; + +import com.mongodb.ServerAddress; +import org.bson.Document; +import org.junit.jupiter.api.DisplayName; +import org.junit.jupiter.api.Test; + +import java.util.List; +import java.util.NoSuchElementException; + +import static com.mongodb.assertions.Assertions.assertFalse; +import static com.mongodb.assertions.Assertions.assertNull; +import static com.mongodb.internal.connection.tlschannel.util.Util.assertTrue; +import static com.mongodb.internal.operation.SingleBatchCursor.createEmptySingleBatchCursor; +import static java.util.Arrays.asList; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertIterableEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; + + +class SingleBatchCursorTest { + + private static final List SINGLE_BATCH = asList(new Document("a", 1), new Document("b", 2)); + private static final ServerAddress SERVER_ADDRESS = new ServerAddress(); + + @Test + @DisplayName("should work as expected") + void shouldWorkAsExpected() { + + try (SingleBatchCursor cursor = new SingleBatchCursor<>(SINGLE_BATCH, 0, SERVER_ADDRESS)) { + assertEquals(SERVER_ADDRESS, cursor.getServerAddress()); + assertEquals(1, cursor.available()); + assertNull(cursor.getServerCursor()); + + assertTrue(cursor.hasNext()); + assertIterableEquals(SINGLE_BATCH, cursor.next()); + assertEquals(0, cursor.available()); + + assertFalse(cursor.hasNext()); + assertThrows(NoSuchElementException.class, cursor::next); + } + } + + @Test + @DisplayName("should work as expected emptyCursor") + void shouldWorkAsExpectedEmptyCursor() { + try (SingleBatchCursor cursor = createEmptySingleBatchCursor(SERVER_ADDRESS, 0)) { + assertEquals(SERVER_ADDRESS, cursor.getServerAddress()); + assertEquals(0, cursor.available()); + assertNull(cursor.getServerCursor()); + + assertFalse(cursor.hasNext()); + assertThrows(NoSuchElementException.class, cursor::next); + } + } + + @Test + @DisplayName("should work as expected with try methods") + void shouldWorkAsExpectedWithTryMethods() { + try (SingleBatchCursor cursor = new SingleBatchCursor<>(SINGLE_BATCH, 0, SERVER_ADDRESS)) { + assertIterableEquals(SINGLE_BATCH, cursor.tryNext()); + assertNull(cursor.tryNext()); + } + } + + @Test + @DisplayName("should not support setting batch size") + void shouldNotSupportSettingBatchSize() { + try (SingleBatchCursor cursor = new SingleBatchCursor<>(SINGLE_BATCH, 0, SERVER_ADDRESS)) { + assertEquals(0, cursor.getBatchSize()); + + cursor.setBatchSize(1); + assertEquals(0, cursor.getBatchSize()); + } + } + +} diff --git a/driver-core/src/test/unit/com/mongodb/internal/operation/SyncOperationHelperSpecification.groovy b/driver-core/src/test/unit/com/mongodb/internal/operation/SyncOperationHelperSpecification.groovy new file mode 100644 index 00000000000..df2d54bfb9d --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/internal/operation/SyncOperationHelperSpecification.groovy @@ -0,0 +1,152 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.operation + +import com.mongodb.MongoWriteConcernException +import com.mongodb.ReadConcern +import com.mongodb.ReadPreference +import com.mongodb.connection.ConnectionDescription +import com.mongodb.connection.ServerDescription +import com.mongodb.connection.ServerType +import com.mongodb.internal.binding.ConnectionSource +import com.mongodb.internal.binding.ReadBinding +import com.mongodb.internal.binding.WriteBinding +import com.mongodb.internal.connection.Connection +import com.mongodb.internal.session.SessionContext +import com.mongodb.internal.validator.NoOpFieldNameValidator +import org.bson.BsonDocument +import org.bson.BsonNull +import org.bson.codecs.BsonDocumentCodec +import org.bson.codecs.Decoder +import spock.lang.Specification + +import static com.mongodb.ClusterFixture.OPERATION_CONTEXT +import static com.mongodb.ReadPreference.primary +import static com.mongodb.internal.operation.OperationUnitSpecification.getMaxWireVersionForServerVersion +import static com.mongodb.internal.operation.SyncOperationHelper.CommandReadTransformer +import static com.mongodb.internal.operation.SyncOperationHelper.CommandWriteTransformer +import static com.mongodb.internal.operation.SyncOperationHelper.executeCommand +import static com.mongodb.internal.operation.SyncOperationHelper.executeRetryableRead +import static com.mongodb.internal.operation.SyncOperationHelper.executeRetryableWrite + +class SyncOperationHelperSpecification extends Specification { + + def 'should set read preference to primary to false when using WriteBinding'() { + given: + def dbName = 'db' + def command = new BsonDocument() + def decoder = Stub(Decoder) + def connection = Mock(Connection) + def function = Stub(CommandWriteTransformer) + def connectionSource = Stub(ConnectionSource) { + getConnection() >> connection + getOperationContext() >> OPERATION_CONTEXT + } + def writeBinding = Stub(WriteBinding) { + getWriteConnectionSource() >> connectionSource + getOperationContext() >> OPERATION_CONTEXT + } + def connectionDescription = Stub(ConnectionDescription) + + when: + executeCommand(writeBinding, dbName, command, decoder, function) + + then: + _ * connection.getDescription() >> connectionDescription + 1 * connection.command(dbName, command, _, primary(), decoder, OPERATION_CONTEXT) >> new BsonDocument() + 1 * connection.release() + } + + def 'should retry with retryable exception'() { + given: + def operationContext = OPERATION_CONTEXT + .withSessionContext(Stub(SessionContext) { + hasSession() >> true + hasActiveTransaction() >> false + getReadConcern() >> ReadConcern.DEFAULT + }) + def dbName = 'db' + def command = BsonDocument.parse('''{findAndModify: "coll", query: {a: 1}, new: false, update: {$inc: {a :1}}, txnNumber: 1}''') + def commandCreator = { csot, serverDescription, connectionDescription -> command } + def decoder = new BsonDocumentCodec() + def results = [ + BsonDocument.parse('{ok: 1.0, writeConcernError: {code: 91, errmsg: "Replication is being shut down"}}'), + BsonDocument.parse('{ok: 1.0, writeConcernError: {code: -1, errmsg: "UnknownError"}}')] as Queue + def connection = Mock(Connection) { + _ * release() + _ * getDescription() >> Stub(ConnectionDescription) { + getMaxWireVersion() >> getMaxWireVersionForServerVersion([4, 0, 0]) + getServerType() >> ServerType.REPLICA_SET_PRIMARY + } + } + def connectionSource = Stub(ConnectionSource) { + _ * getConnection() >> connection + _ * getServerDescription() >> Stub(ServerDescription) { + getLogicalSessionTimeoutMinutes() >> 1 + } + getOperationContext() >> operationContext + } + def writeBinding = Stub(WriteBinding) { + getWriteConnectionSource() >> connectionSource + getOperationContext() >> operationContext + } + + when: + executeRetryableWrite(writeBinding, dbName, primary(), + NoOpFieldNameValidator.INSTANCE, decoder, commandCreator, FindAndModifyHelper.transformer()) + { cmd -> cmd } + + then: + 2 * connection.command(dbName, command, _, primary(), decoder, operationContext) >> { results.poll() } + + then: + def ex = thrown(MongoWriteConcernException) + ex.writeConcernError.code == -1 + } + + def 'should use the ConnectionSource readPreference'() { + given: + def dbName = 'db' + def command = new BsonDocument('fakeCommandName', BsonNull.VALUE) + def commandCreator = { csot, serverDescription, connectionDescription -> command } + def decoder = Stub(Decoder) + def function = Stub(CommandReadTransformer) + def connection = Mock(Connection) + def connectionSource = Stub(ConnectionSource) { + getConnection() >> connection + getReadPreference() >> readPreference + getOperationContext() >> OPERATION_CONTEXT + } + def readBinding = Stub(ReadBinding) { + getReadConnectionSource() >> connectionSource + getOperationContext() >> OPERATION_CONTEXT + } + def connectionDescription = Stub(ConnectionDescription) + + when: + executeRetryableRead(readBinding, dbName, commandCreator, decoder, function, false) + + then: + _ * connection.getDescription() >> connectionDescription + 1 * connection.command(dbName, command, _, readPreference, decoder, OPERATION_CONTEXT) >> new BsonDocument() + 1 * connection.release() + + where: + readPreference << [primary(), ReadPreference.secondary()] + } + +} diff --git a/driver-core/src/test/unit/com/mongodb/internal/operation/UpdateRequestSpecification.groovy b/driver-core/src/test/unit/com/mongodb/internal/operation/UpdateRequestSpecification.groovy new file mode 100644 index 00000000000..7ab84bb670b --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/internal/operation/UpdateRequestSpecification.groovy @@ -0,0 +1,137 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.operation + +import com.mongodb.client.model.Collation +import com.mongodb.internal.bulk.UpdateRequest +import com.mongodb.internal.bulk.WriteRequest +import org.bson.BsonBoolean +import org.bson.BsonDocument +import org.bson.BsonInt32 +import spock.lang.Specification + +class UpdateRequestSpecification extends Specification { + + def 'should have correct type'() { + expect: + new UpdateRequest(new BsonDocument(), new BsonDocument(), WriteRequest.Type.UPDATE).getType() == WriteRequest.Type.UPDATE + new UpdateRequest(new BsonDocument(), new BsonDocument(), WriteRequest.Type.REPLACE).getType() == WriteRequest.Type.REPLACE + } + + def 'should throw if type is not update or replace'() { + when: + new UpdateRequest(new BsonDocument(), new BsonDocument(), WriteRequest.Type.INSERT) + + then: + thrown(IllegalArgumentException) + + when: + new UpdateRequest(new BsonDocument(), new BsonDocument(), WriteRequest.Type.DELETE) + + then: + thrown(IllegalArgumentException) + } + + def 'should not allow null filter'() { + when: + new UpdateRequest(null, new BsonDocument(), WriteRequest.Type.UPDATE) + + then: + thrown(IllegalArgumentException) + } + + def 'should not allow null update'() { + when: + new UpdateRequest(new BsonDocument(), null, WriteRequest.Type.UPDATE) + + then: + thrown(IllegalArgumentException) + } + + def 'should set fields from constructor'() { + given: + def filter = new BsonDocument('_id', new BsonInt32(1)) + def update = new BsonDocument('$set', new BsonDocument('x', BsonBoolean.TRUE)) + + when: + def updateRequest = new UpdateRequest(filter, update, WriteRequest.Type.UPDATE) + + then: + updateRequest.filter == filter + updateRequest.updateValue == update + } + + def 'multi property should default to true'() { + expect: + new UpdateRequest(new BsonDocument(), new BsonDocument(), WriteRequest.Type.UPDATE).multi + !new UpdateRequest(new BsonDocument(), new BsonDocument(), WriteRequest.Type.REPLACE).multi + } + + def 'should set multi property'() { + expect: + !new UpdateRequest(new BsonDocument(), new BsonDocument(), WriteRequest.Type.UPDATE).multi(false).isMulti() + !new UpdateRequest(new BsonDocument(), new BsonDocument(), WriteRequest.Type.REPLACE).multi(false).isMulti() + } + + def 'should throw if multi set to true on a replace'() { + when: + new UpdateRequest(new BsonDocument(), new BsonDocument(), WriteRequest.Type.REPLACE).multi(true) + + then: + thrown(IllegalArgumentException) + } + + def 'upsert property should default to false'() { + expect: + !new UpdateRequest(new BsonDocument(), new BsonDocument(), WriteRequest.Type.UPDATE).upsert + } + + def 'should set upsert property'() { + expect: + new UpdateRequest(new BsonDocument(), new BsonDocument(), WriteRequest.Type.UPDATE).upsert(true).isUpsert() + } + + def 'should set collation property'() { + when: + def collation = Collation.builder().locale('en').build() + + then: + new UpdateRequest(new BsonDocument(), new BsonDocument(), type).collation(null).getCollation() == null + new UpdateRequest(new BsonDocument(), new BsonDocument(), type).collation(collation).getCollation() == collation + + where: + type << [WriteRequest.Type.UPDATE, WriteRequest.Type.REPLACE] + } + + def 'should set arrayFilters property'() { + expect: + new UpdateRequest(new BsonDocument(), new BsonDocument(), WriteRequest.Type.UPDATE).arrayFilters(arrayFilters) + .getArrayFilters() == arrayFilters + + where: + arrayFilters << [null, [], [new BsonDocument('a.b', new BsonInt32(42))]] + } + + def 'should set sort property'() { + expect: + new UpdateRequest(new BsonDocument(), new BsonDocument(), type).sort(sort).getSort() == sort + + where: + type << [WriteRequest.Type.UPDATE, WriteRequest.Type.REPLACE] + sort << [null, new BsonDocument('_id', new BsonInt32(1))] + } +} diff --git a/driver-core/src/test/unit/com/mongodb/internal/operation/WriteConcernHelperSpecification.groovy b/driver-core/src/test/unit/com/mongodb/internal/operation/WriteConcernHelperSpecification.groovy new file mode 100644 index 00000000000..cbef728a564 --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/internal/operation/WriteConcernHelperSpecification.groovy @@ -0,0 +1,52 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.operation + +import org.bson.BsonDocument +import org.bson.BsonInt32 +import org.bson.BsonString +import spock.lang.Specification + +import static com.mongodb.internal.operation.WriteConcernHelper.createWriteConcernError + +class WriteConcernHelperSpecification extends Specification { + + def 'should create write concern error'() { + when: + def writeConcernError = createWriteConcernError(new BsonDocument('code', new BsonInt32(42)) + .append('errmsg', new BsonString('a timeout')) + .append('errInfo', new BsonDocument('wtimeout', new BsonInt32(1)))) + + then: + writeConcernError.getCode() == 42 + writeConcernError.getCodeName() == '' + writeConcernError.getMessage() == 'a timeout' + writeConcernError.getDetails() == new BsonDocument('wtimeout', new BsonInt32(1)) + + when: + writeConcernError = createWriteConcernError(new BsonDocument('code', new BsonInt32(42)) + .append('codeName', new BsonString('TimeoutError')) + .append('errmsg', new BsonString('a timeout')) + .append('errInfo', new BsonDocument('wtimeout', new BsonInt32(1)))) + + then: + writeConcernError.getCode() == 42 + writeConcernError.getCodeName() == 'TimeoutError' + writeConcernError.getMessage() == 'a timeout' + writeConcernError.getDetails() == new BsonDocument('wtimeout', new BsonInt32(1)) + } +} diff --git a/driver-core/src/test/unit/com/mongodb/internal/operation/WriteConcernHelperTest.java b/driver-core/src/test/unit/com/mongodb/internal/operation/WriteConcernHelperTest.java new file mode 100644 index 00000000000..2c7b71949c8 --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/internal/operation/WriteConcernHelperTest.java @@ -0,0 +1,61 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.operation; + +import com.mongodb.WriteConcern; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.MethodSource; + +import java.util.concurrent.TimeUnit; + +import static com.mongodb.assertions.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertEquals; + +class WriteConcernHelperTest { + + static WriteConcern[] shouldRemoveWtimeout(){ + return new WriteConcern[]{ + WriteConcern.ACKNOWLEDGED, + WriteConcern.MAJORITY, + WriteConcern.W1, + WriteConcern.W2, + WriteConcern.W3, + WriteConcern.UNACKNOWLEDGED, + WriteConcern.JOURNALED, + + WriteConcern.ACKNOWLEDGED.withWTimeout(100, TimeUnit.MILLISECONDS), + WriteConcern.MAJORITY.withWTimeout(100, TimeUnit.MILLISECONDS), + WriteConcern.W1.withWTimeout(100, TimeUnit.MILLISECONDS), + WriteConcern.W2.withWTimeout(100, TimeUnit.MILLISECONDS), + WriteConcern.W3.withWTimeout(100, TimeUnit.MILLISECONDS), + WriteConcern.UNACKNOWLEDGED.withWTimeout(100, TimeUnit.MILLISECONDS), + WriteConcern.JOURNALED.withWTimeout(100, TimeUnit.MILLISECONDS), + }; + } + + @MethodSource + @ParameterizedTest + void shouldRemoveWtimeout(final WriteConcern writeConcern){ + //when + WriteConcern clonedWithoutTimeout = WriteConcernHelper.cloneWithoutTimeout(writeConcern); + + //then + assertEquals(writeConcern.getWObject(), clonedWithoutTimeout.getWObject()); + assertEquals(writeConcern.getJournal(), clonedWithoutTimeout.getJournal()); + assertNull(clonedWithoutTimeout.getWTimeout(TimeUnit.MILLISECONDS)); + } +} diff --git a/driver-core/src/test/unit/com/mongodb/internal/selector/AtMostTwoRandomServerSelectorTest.java b/driver-core/src/test/unit/com/mongodb/internal/selector/AtMostTwoRandomServerSelectorTest.java new file mode 100644 index 00000000000..1d174ccabe7 --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/internal/selector/AtMostTwoRandomServerSelectorTest.java @@ -0,0 +1,99 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.internal.selector; + +import com.mongodb.ServerAddress; +import com.mongodb.connection.ClusterConnectionMode; +import com.mongodb.connection.ClusterDescription; +import com.mongodb.connection.ClusterType; +import com.mongodb.connection.ServerConnectionState; +import com.mongodb.connection.ServerDescription; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.Arguments; +import org.junit.jupiter.params.provider.MethodSource; + +import java.util.Collection; +import java.util.HashMap; +import java.util.List; +import java.util.stream.Stream; + +import static java.util.Arrays.asList; +import static java.util.Collections.emptyList; +import static java.util.Collections.singletonList; +import static java.util.stream.Collectors.toList; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.params.provider.Arguments.arguments; + +final class AtMostTwoRandomServerSelectorTest { + @ParameterizedTest + @MethodSource("args") + void select( + final List hosts, + final int numberOfSelectIterations, + final double expectedCount, + final double frequencyTolerance, + final int expectedSelectedSize) { + ClusterDescription clusterDescription = clusterDescription(hosts); + HashMap actualCounters = new HashMap<>(); + for (int i = 0; i < numberOfSelectIterations; i++) { + List selected = AtMostTwoRandomServerSelector.instance().select(clusterDescription); + assertEquals(expectedSelectedSize, selected.size(), selected::toString); + selected.forEach(serverDescription -> actualCounters.merge(serverDescription.getAddress(), 1, Integer::sum)); + } + actualCounters.forEach((serverAddress, counter) -> + assertEquals( + expectedCount / numberOfSelectIterations, + (double) counter / numberOfSelectIterations, + frequencyTolerance, + () -> String.format("serverAddress=%s, counter=%d, actualCounters=%s", serverAddress, counter, actualCounters))); + } + + private static Stream args() { + int smallNumberOfSelectIterations = 10; + int largeNumberOfSelectIterations = 2_000; + int maxSelectedSize = 2; + return Stream.of( + arguments(emptyList(), + smallNumberOfSelectIterations, 0, 0, 0), + arguments(singletonList("1"), + smallNumberOfSelectIterations, smallNumberOfSelectIterations, 0, 1), + arguments(asList("1", "2"), + smallNumberOfSelectIterations, smallNumberOfSelectIterations, 0, maxSelectedSize), + arguments(asList("1", "2", "3"), + largeNumberOfSelectIterations, (double) maxSelectedSize * largeNumberOfSelectIterations / 3, 0.05, maxSelectedSize), + arguments(asList("1", "2", "3", "4", "5", "6", "7"), + largeNumberOfSelectIterations, (double) maxSelectedSize * largeNumberOfSelectIterations / 7, 0.05, maxSelectedSize) + ); + } + + private static ClusterDescription clusterDescription(final List hosts) { + return new ClusterDescription(ClusterConnectionMode.MULTIPLE, ClusterType.REPLICA_SET, serverDescriptions(hosts)); + } + + private static List serverDescriptions(final Collection hosts) { + return hosts.stream() + .map(AtMostTwoRandomServerSelectorTest::serverDescription) + .collect(toList()); + } + + private static ServerDescription serverDescription(final String host) { + return ServerDescription.builder() + .state(ServerConnectionState.CONNECTED) + .ok(true) + .address(new ServerAddress(host)) + .build(); + } +} diff --git a/driver-core/src/test/unit/com/mongodb/internal/selector/LatencyMinimizingServerSelectorTest.java b/driver-core/src/test/unit/com/mongodb/internal/selector/LatencyMinimizingServerSelectorTest.java new file mode 100644 index 00000000000..a9b34f4d74e --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/internal/selector/LatencyMinimizingServerSelectorTest.java @@ -0,0 +1,91 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.selector; + +import com.mongodb.ServerAddress; +import com.mongodb.connection.ClusterDescription; +import com.mongodb.connection.ServerDescription; +import com.mongodb.connection.ServerType; +import org.junit.Test; + +import java.net.UnknownHostException; +import java.util.Arrays; +import java.util.concurrent.TimeUnit; + +import static com.mongodb.connection.ClusterConnectionMode.MULTIPLE; +import static com.mongodb.connection.ClusterType.REPLICA_SET; +import static com.mongodb.connection.ServerConnectionState.CONNECTED; +import static org.junit.Assert.assertEquals; + +public class LatencyMinimizingServerSelectorTest { + @Test + public void testLatencyDifferentialMinimization() throws UnknownHostException { + LatencyMinimizingServerSelector selector = new LatencyMinimizingServerSelector(20, TimeUnit.MILLISECONDS); + ServerDescription primary = ServerDescription.builder() + .state(CONNECTED) + .address(new ServerAddress()) + .ok(true) + .type(ServerType.REPLICA_SET_PRIMARY) + .roundTripTime(10, TimeUnit.MILLISECONDS) + .build(); + ServerDescription secondaryOne = ServerDescription.builder() + .state(CONNECTED) + .address(new ServerAddress("localhost:27018")) + .ok(true) + .type(ServerType.REPLICA_SET_SECONDARY) + .roundTripTime(15, TimeUnit.MILLISECONDS) + .build(); + ServerDescription secondaryTwo = ServerDescription.builder() + .state(CONNECTED) + .address(new ServerAddress("localhost:27019")) + .ok(true) + .type(ServerType.REPLICA_SET_SECONDARY) + .roundTripTime(31, TimeUnit.MILLISECONDS) + .build(); + ServerDescription secondaryThree = ServerDescription.builder() + .state(CONNECTED) + .address(new ServerAddress("localhost:27020")) + .ok(true) + .type(ServerType.REPLICA_SET_SECONDARY) + .roundTripTime(30, TimeUnit.MILLISECONDS) + .build(); + assertEquals(Arrays.asList(primary, secondaryOne, secondaryThree), + selector.select(new ClusterDescription(MULTIPLE, REPLICA_SET, + Arrays.asList(primary, secondaryOne, secondaryTwo, secondaryThree)))); + } + + @Test + public void testZeroLatencyDifferentialTolerance() throws UnknownHostException { + LatencyMinimizingServerSelector selector = new LatencyMinimizingServerSelector(0, TimeUnit.NANOSECONDS); + ServerDescription primary = ServerDescription.builder() + .state(CONNECTED) + .address(new ServerAddress()) + .ok(true) + .type(ServerType.REPLICA_SET_PRIMARY) + .roundTripTime(10, TimeUnit.NANOSECONDS) + .build(); + ServerDescription secondaryOne = ServerDescription.builder() + .state(CONNECTED) + .address(new ServerAddress("localhost:27018")) + .ok(true) + .type(ServerType.REPLICA_SET_SECONDARY) + .roundTripTime(11, TimeUnit.NANOSECONDS) + .build(); + assertEquals(Arrays.asList(primary), selector.select(new ClusterDescription(MULTIPLE, REPLICA_SET, + Arrays.asList(primary, secondaryOne)))); + } +} diff --git a/driver-core/src/test/unit/com/mongodb/internal/selector/MinimumOperationCountServerSelectorTest.java b/driver-core/src/test/unit/com/mongodb/internal/selector/MinimumOperationCountServerSelectorTest.java new file mode 100644 index 00000000000..3a0d754cb97 --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/internal/selector/MinimumOperationCountServerSelectorTest.java @@ -0,0 +1,136 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.internal.selector; + +import com.mongodb.ServerAddress; +import com.mongodb.connection.ClusterConnectionMode; +import com.mongodb.connection.ClusterDescription; +import com.mongodb.connection.ClusterType; +import com.mongodb.connection.ServerConnectionState; +import com.mongodb.connection.ServerDescription; +import com.mongodb.internal.connection.Cluster; +import com.mongodb.internal.connection.Server; +import com.mongodb.internal.mockito.MongoMockito; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.Arguments; +import org.junit.jupiter.params.provider.MethodSource; + +import java.util.Collection; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; +import java.util.function.Consumer; +import java.util.stream.Stream; + +import static java.util.Collections.emptyList; +import static java.util.Collections.emptyMap; +import static java.util.Collections.singletonList; +import static java.util.Collections.singletonMap; +import static java.util.stream.Collectors.toList; +import static java.util.stream.Collectors.toMap; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.params.provider.Arguments.arguments; +import static org.mockito.Mockito.when; + +final class MinimumOperationCountServerSelectorTest { + @ParameterizedTest + @MethodSource("args") + void select(final Map hostToOperationCount, final List expectedHosts) { + ClusterDescriptionAndServersSnapshot pair = clusterDescriptionAndServersSnapshot(hostToOperationCount); + List actualHosts = new MinimumOperationCountServerSelector(pair.getServersSnapshot()) + .select(pair.getClusterDescription()) + .stream() + .map(serverDescription -> serverDescription.getAddress().getHost()) + .collect(toList()); + assertEquals(expectedHosts, actualHosts, hostToOperationCount::toString); + } + + private static Stream args() { + return Stream.of( + arguments(emptyMap(), emptyList()), + arguments(singletonMap("a", 0), singletonList("a")), + arguments(linkedMap(m -> { + m.put("b", 0); + m.put("a", 5); + }), singletonList("b")), + arguments(linkedMap(m -> { + m.put("b", 2); + m.put("a", 3); + m.put("c", 2); + }), singletonList("b")), + arguments(linkedMap(m -> { + m.put("b", 5); + m.put("a", 5); + m.put("e", 0); + m.put("c", 5); + m.put("d", 8); + }), singletonList("e")) + ); + } + + private static ClusterDescriptionAndServersSnapshot clusterDescriptionAndServersSnapshot(final Map hostToOperationCount) { + ClusterDescription clusterDescription = new ClusterDescription( + ClusterConnectionMode.MULTIPLE, ClusterType.REPLICA_SET, serverDescriptions(hostToOperationCount.keySet())); + Map serverAddressToOperationCount = hostToOperationCount.entrySet() + .stream().collect(toMap(entry -> new ServerAddress(entry.getKey()), Map.Entry::getValue)); + Cluster.ServersSnapshot serversSnapshot = serverAddress -> { + int operationCount = serverAddressToOperationCount.get(serverAddress); + return MongoMockito.mock(Server.class, server -> + when(server.operationCount()).thenReturn(operationCount)); + }; + return new ClusterDescriptionAndServersSnapshot(clusterDescription, serversSnapshot); + } + + private static List serverDescriptions(final Collection hosts) { + return hosts.stream() + .map(MinimumOperationCountServerSelectorTest::serverDescription) + .collect(toList()); + } + + private static ServerDescription serverDescription(final String host) { + return ServerDescription.builder() + .state(ServerConnectionState.CONNECTED) + .ok(true) + .address(new ServerAddress(host)) + .build(); + } + + private static LinkedHashMap linkedMap(final Consumer> filler) { + LinkedHashMap result = new LinkedHashMap<>(); + filler.accept(result); + return result; + } + + private static final class ClusterDescriptionAndServersSnapshot { + private final ClusterDescription clusterDescription; + private final Cluster.ServersSnapshot serversSnapshot; + + private ClusterDescriptionAndServersSnapshot( + final ClusterDescription clusterDescription, + final Cluster.ServersSnapshot serversSnapshot) { + this.clusterDescription = clusterDescription; + this.serversSnapshot = serversSnapshot; + } + + ClusterDescription getClusterDescription() { + return clusterDescription; + } + + Cluster.ServersSnapshot getServersSnapshot() { + return serversSnapshot; + } + } +} diff --git a/driver-core/src/test/unit/com/mongodb/internal/selector/PrimaryServerSelectorSpecification.groovy b/driver-core/src/test/unit/com/mongodb/internal/selector/PrimaryServerSelectorSpecification.groovy new file mode 100644 index 00000000000..30c7ec522f5 --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/internal/selector/PrimaryServerSelectorSpecification.groovy @@ -0,0 +1,52 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.selector + +import com.mongodb.ServerAddress +import com.mongodb.connection.ClusterDescription +import com.mongodb.connection.ClusterType +import com.mongodb.connection.ServerDescription +import spock.lang.Specification +import spock.lang.Unroll + +import static com.mongodb.connection.ClusterConnectionMode.MULTIPLE +import static com.mongodb.connection.ServerConnectionState.CONNECTED +import static com.mongodb.connection.ServerType.REPLICA_SET_PRIMARY +import static com.mongodb.connection.ServerType.REPLICA_SET_SECONDARY + +class PrimaryServerSelectorSpecification extends Specification { + private static final ServerDescription.Builder SERVER_DESCRIPTION_BUILDER = ServerDescription.builder() + .state(CONNECTED) + .address(new ServerAddress()) + .ok(true) + private static final ServerDescription PRIMARY_SERVER = SERVER_DESCRIPTION_BUILDER.type(REPLICA_SET_PRIMARY).build() + private static final ServerDescription SECONDARY_SERVER = SERVER_DESCRIPTION_BUILDER.type(REPLICA_SET_SECONDARY).build() + + @Unroll + def 'PrimaryServerSelector will choose primary server for #clusterDescription'() throws UnknownHostException { + expect: + PrimaryServerSelector selector = new PrimaryServerSelector() + expectedServerList == selector.select(clusterDescription) + + where: + expectedServerList | clusterDescription + [PRIMARY_SERVER] | new ClusterDescription(MULTIPLE, ClusterType.REPLICA_SET, [PRIMARY_SERVER]) + [PRIMARY_SERVER] | new ClusterDescription(MULTIPLE, ClusterType.REPLICA_SET, [PRIMARY_SERVER, SECONDARY_SERVER]) + [] | new ClusterDescription(MULTIPLE, ClusterType.REPLICA_SET, [SECONDARY_SERVER]) + } + +} diff --git a/driver-core/src/test/unit/com/mongodb/internal/selector/ReadPreferenceServerSelectorSpecification.groovy b/driver-core/src/test/unit/com/mongodb/internal/selector/ReadPreferenceServerSelectorSpecification.groovy new file mode 100644 index 00000000000..26b47a7f00e --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/internal/selector/ReadPreferenceServerSelectorSpecification.groovy @@ -0,0 +1,77 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.selector + +import com.mongodb.ServerAddress +import com.mongodb.connection.ClusterDescription +import com.mongodb.connection.ServerDescription +import com.mongodb.connection.ServerType +import spock.lang.Specification + +import static com.mongodb.ReadPreference.primary +import static com.mongodb.ReadPreference.secondary +import static com.mongodb.connection.ClusterConnectionMode.MULTIPLE +import static com.mongodb.connection.ClusterConnectionMode.SINGLE +import static com.mongodb.connection.ClusterType.REPLICA_SET +import static com.mongodb.connection.ServerConnectionState.CONNECTED + +class ReadPreferenceServerSelectorSpecification extends Specification { + + def primary = ServerDescription.builder() + .state(CONNECTED) + .address(new ServerAddress()) + .ok(true) + .type(ServerType.REPLICA_SET_PRIMARY) + .build() + def secondary = ServerDescription.builder() + .state(CONNECTED) + .address(new ServerAddress('localhost', 27018)) + .ok(true) + .type(ServerType.REPLICA_SET_SECONDARY) + .build() + + def 'constructor should throws if read preference is null'() { + when: + new ReadPreferenceServerSelector(null) + + then: + thrown(IllegalArgumentException) + } + + def 'should get read preference'() { + expect: + new ReadPreferenceServerSelector(primary()).readPreference == primary() + } + + def 'should override toString'() { + expect: + new ReadPreferenceServerSelector(primary()).toString() == 'ReadPreferenceServerSelector{readPreference=primary}' + } + + def 'should select server that matches read preference when connection mode is multiple'() { + expect: + new ReadPreferenceServerSelector(primary()).select(new ClusterDescription(MULTIPLE, REPLICA_SET, [primary, secondary])) == + [primary] + new ReadPreferenceServerSelector(secondary()).select(new ClusterDescription(MULTIPLE, REPLICA_SET, [primary, secondary])) == + [secondary] + } + + def 'should select any ok server when connection mode is single'() { + expect: + new ReadPreferenceServerSelector(primary()).select(new ClusterDescription(SINGLE, REPLICA_SET, [secondary])) == [secondary] + } +} diff --git a/driver-core/src/test/unit/com/mongodb/internal/selector/ReadPreferenceWithFallbackServerSelectorTest.java b/driver-core/src/test/unit/com/mongodb/internal/selector/ReadPreferenceWithFallbackServerSelectorTest.java new file mode 100644 index 00000000000..ef796d71df7 --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/internal/selector/ReadPreferenceWithFallbackServerSelectorTest.java @@ -0,0 +1,115 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.selector; + +import com.mongodb.ReadPreference; +import com.mongodb.ServerAddress; +import com.mongodb.connection.ClusterConnectionMode; +import com.mongodb.connection.ClusterDescription; +import com.mongodb.connection.ClusterType; +import com.mongodb.connection.ServerDescription; +import org.junit.jupiter.api.Test; + +import java.util.List; + +import static com.mongodb.connection.ServerConnectionState.CONNECTED; +import static com.mongodb.connection.ServerConnectionState.CONNECTING; +import static com.mongodb.connection.ServerDescription.builder; +import static com.mongodb.connection.ServerType.REPLICA_SET_PRIMARY; +import static com.mongodb.connection.ServerType.REPLICA_SET_SECONDARY; +import static com.mongodb.internal.operation.ServerVersionHelper.FIVE_DOT_ZERO_WIRE_VERSION; +import static com.mongodb.internal.operation.ServerVersionHelper.FOUR_DOT_FOUR_WIRE_VERSION; +import static java.util.Arrays.asList; +import static java.util.Collections.emptyList; +import static java.util.stream.Collectors.toList; +import static org.junit.jupiter.api.Assertions.assertEquals; + +public class ReadPreferenceWithFallbackServerSelectorTest { + + @Test + public void shouldSelectCorrectServersWhenAtLeastOneServerIsOlderThanMinimum() { + ReadPreferenceWithFallbackServerSelector selector = + new ReadPreferenceWithFallbackServerSelector( + ReadPreference.secondary(), FIVE_DOT_ZERO_WIRE_VERSION, ReadPreference.primary()); + + ClusterDescription clusterDescription = new ClusterDescription( + ClusterConnectionMode.MULTIPLE, + ClusterType.REPLICA_SET, + asList( + builder().ok(true).state(CONNECTED).type(REPLICA_SET_PRIMARY).address(new ServerAddress("localhost:27017")) + .maxWireVersion(FOUR_DOT_FOUR_WIRE_VERSION).build(), + builder().ok(true).state(CONNECTED).type(REPLICA_SET_SECONDARY).address(new ServerAddress("localhost:27018")) + .maxWireVersion(FIVE_DOT_ZERO_WIRE_VERSION).build())); + assertEquals(clusterDescription.getServerDescriptions().stream() + .filter(serverDescription -> serverDescription.getType() == REPLICA_SET_PRIMARY).collect(toList()), + selector.select(clusterDescription)); + assertEquals(ReadPreference.primary(), selector.getAppliedReadPreference()); + } + + @Test + public void shouldSelectCorrectServersWhenAllServersAreAtLeastMinimum() { + ReadPreferenceWithFallbackServerSelector selector = + new ReadPreferenceWithFallbackServerSelector( + ReadPreference.secondary(), FIVE_DOT_ZERO_WIRE_VERSION, ReadPreference.primary()); + + ClusterDescription clusterDescription = new ClusterDescription( + ClusterConnectionMode.MULTIPLE, + ClusterType.REPLICA_SET, + asList( + builder().ok(true).state(CONNECTED).type(REPLICA_SET_PRIMARY).address(new ServerAddress("localhost:27017")) + .maxWireVersion(FIVE_DOT_ZERO_WIRE_VERSION).build(), + builder().ok(true).state(CONNECTED).type(REPLICA_SET_SECONDARY).address(new ServerAddress("localhost:27018")) + .maxWireVersion(FIVE_DOT_ZERO_WIRE_VERSION).build())); + assertEquals(clusterDescription.getServerDescriptions().stream() + .filter(serverDescription -> serverDescription.getType() == REPLICA_SET_SECONDARY).collect(toList()), + selector.select(clusterDescription)); + assertEquals(ReadPreference.secondary(), selector.getAppliedReadPreference()); + } + + @Test + public void shouldSelectCorrectServersWhenNoServersHaveBeenDiscovered() { + ReadPreferenceWithFallbackServerSelector selector = + new ReadPreferenceWithFallbackServerSelector( + ReadPreference.secondary(), FIVE_DOT_ZERO_WIRE_VERSION, ReadPreference.primary()); + + ClusterDescription clusterDescription = new ClusterDescription( + ClusterConnectionMode.MULTIPLE, + ClusterType.REPLICA_SET, + asList(builder().ok(false).state(CONNECTING).address(new ServerAddress("localhost:27017")).build(), + builder().ok(false).state(CONNECTING).address(new ServerAddress("localhost:27018")).build())); + assertEquals(emptyList(), selector.select(clusterDescription)); + assertEquals(ReadPreference.secondary(), selector.getAppliedReadPreference()); + + // when there is one connecting server, and a primary and secondary with maxWireVersion >= minWireVersion, apply read preference + clusterDescription = new ClusterDescription( + ClusterConnectionMode.MULTIPLE, + ClusterType.REPLICA_SET, + asList( + builder().ok(false).state(CONNECTING).address(new ServerAddress("localhost:27017")).build(), + builder().ok(true).state(CONNECTED).type(REPLICA_SET_PRIMARY).address(new ServerAddress("localhost:27018")) + .maxWireVersion(FIVE_DOT_ZERO_WIRE_VERSION) + .build(), + builder().ok(true).state(CONNECTED).type(REPLICA_SET_SECONDARY).address(new ServerAddress("localhost:27019")) + .maxWireVersion(FIVE_DOT_ZERO_WIRE_VERSION) + .build())); + List serverDescriptionList = selector.select(clusterDescription); + assertEquals(clusterDescription.getServerDescriptions().stream() + .filter(serverDescription -> serverDescription.getType() == REPLICA_SET_SECONDARY).collect(toList()), + serverDescriptionList); + assertEquals(ReadPreference.secondary(), selector.getAppliedReadPreference()); + } +} diff --git a/driver-core/src/test/unit/com/mongodb/internal/selector/ServerAddressSelectorTest.java b/driver-core/src/test/unit/com/mongodb/internal/selector/ServerAddressSelectorTest.java new file mode 100644 index 00000000000..efa9c27c341 --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/internal/selector/ServerAddressSelectorTest.java @@ -0,0 +1,58 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.selector; + +import com.mongodb.ServerAddress; +import com.mongodb.connection.ClusterDescription; +import com.mongodb.connection.ServerDescription; +import com.mongodb.connection.ServerType; +import org.junit.Test; + +import java.net.UnknownHostException; +import java.util.Arrays; + +import static com.mongodb.connection.ClusterConnectionMode.MULTIPLE; +import static com.mongodb.connection.ClusterType.REPLICA_SET; +import static com.mongodb.connection.ServerConnectionState.CONNECTED; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + +public class ServerAddressSelectorTest { + @Test + public void testAll() throws UnknownHostException { + ServerAddressSelector selector = new ServerAddressSelector(new ServerAddress("localhost:27018")); + + assertTrue(selector.toString().startsWith("ServerAddressSelector")); + + assertEquals(selector.getServerAddress(), selector.getServerAddress()); + + ServerDescription primary = ServerDescription.builder() + .state(CONNECTED) + .address(new ServerAddress()) + .ok(true) + .type(ServerType.REPLICA_SET_PRIMARY) + .build(); + ServerDescription secondary = ServerDescription.builder() + .state(CONNECTED) + .address(new ServerAddress("localhost:27018")) + .ok(true) + .type(ServerType.REPLICA_SET_SECONDARY) + .build(); + assertEquals(Arrays.asList(secondary), selector.select(new ClusterDescription(MULTIPLE, REPLICA_SET, + Arrays.asList(primary, secondary)))); + } +} diff --git a/driver-core/src/test/unit/com/mongodb/internal/selector/WritableServerSelectorSpecification.groovy b/driver-core/src/test/unit/com/mongodb/internal/selector/WritableServerSelectorSpecification.groovy new file mode 100644 index 00000000000..579d90717ba --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/internal/selector/WritableServerSelectorSpecification.groovy @@ -0,0 +1,64 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.selector + +import com.mongodb.ServerAddress +import com.mongodb.connection.ClusterDescription +import com.mongodb.connection.ClusterType +import com.mongodb.connection.ServerDescription +import spock.lang.Specification +import spock.lang.Unroll + +import static com.mongodb.connection.ClusterConnectionMode.MULTIPLE +import static com.mongodb.connection.ClusterConnectionMode.SINGLE +import static com.mongodb.connection.ServerConnectionState.CONNECTED +import static com.mongodb.connection.ServerType.REPLICA_SET_PRIMARY +import static com.mongodb.connection.ServerType.REPLICA_SET_SECONDARY + +class WritableServerSelectorSpecification extends Specification { + private static final ServerDescription.Builder SERVER_DESCRIPTION_BUILDER = ServerDescription.builder() + .state(CONNECTED) + .address(new ServerAddress()) + .ok(true) + private static final ServerDescription PRIMARY_SERVER = SERVER_DESCRIPTION_BUILDER.type(REPLICA_SET_PRIMARY).build() + private static final ServerDescription SECONDARY_SERVER = SERVER_DESCRIPTION_BUILDER.type(REPLICA_SET_SECONDARY).build() + + @Unroll + def 'WritableServerSelector will choose primary server for #clusterDescription'() throws UnknownHostException { + expect: + WritableServerSelector selector = new WritableServerSelector() + expectedServerList == selector.select(clusterDescription) + + where: + expectedServerList | clusterDescription + [PRIMARY_SERVER] | new ClusterDescription(MULTIPLE, ClusterType.REPLICA_SET, [PRIMARY_SERVER]) + [PRIMARY_SERVER] | new ClusterDescription(MULTIPLE, ClusterType.REPLICA_SET, [PRIMARY_SERVER, SECONDARY_SERVER]) + [] | new ClusterDescription(MULTIPLE, ClusterType.REPLICA_SET, [SECONDARY_SERVER]) + } + + @Unroll + def 'WritableServerSelector will choose secondary server in single mode for #clusterDescription'() throws UnknownHostException { + expect: + WritableServerSelector selector = new WritableServerSelector() + expectedServerList == selector.select(clusterDescription) + + where: + expectedServerList | clusterDescription + [SECONDARY_SERVER] | new ClusterDescription(SINGLE, ClusterType.REPLICA_SET, [SECONDARY_SERVER]) + } + +} diff --git a/driver-core/src/test/unit/com/mongodb/internal/session/BaseClientSessionImplTest.java b/driver-core/src/test/unit/com/mongodb/internal/session/BaseClientSessionImplTest.java new file mode 100644 index 00000000000..c7fc1d73e20 --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/internal/session/BaseClientSessionImplTest.java @@ -0,0 +1,56 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.session; + +import com.mongodb.ClientSessionOptions; +import com.mongodb.session.ClientSession; +import org.junit.jupiter.api.Test; + +import static com.mongodb.ClusterFixture.OPERATION_CONTEXT; +import static com.mongodb.ClusterFixture.getCluster; +import static org.junit.jupiter.api.Assertions.assertEquals; + +class BaseClientSessionImplTest { + + @Test + void shouldNotCheckoutServerSessionIfNeverRequested() { + ServerSessionPool serverSessionPool = new ServerSessionPool(getCluster(), OPERATION_CONTEXT); + ClientSession clientSession = new BaseClientSessionImpl(serverSessionPool, new Object(), ClientSessionOptions.builder().build()); + + assertEquals(0, serverSessionPool.getInUseCount()); + + clientSession.close(); + + assertEquals(0, serverSessionPool.getInUseCount()); + } + + @Test + void shouldDelayServerSessionCheckoutUntilRequested() { + ServerSessionPool serverSessionPool = new ServerSessionPool(getCluster(), OPERATION_CONTEXT); + ClientSession clientSession = new BaseClientSessionImpl(serverSessionPool, new Object(), ClientSessionOptions.builder().build()); + + assertEquals(0, serverSessionPool.getInUseCount()); + + clientSession.getServerSession(); + + assertEquals(1, serverSessionPool.getInUseCount()); + + clientSession.close(); + + assertEquals(0, serverSessionPool.getInUseCount()); + } +} diff --git a/driver-core/src/test/unit/com/mongodb/internal/session/ClientSessionContextSpecification.groovy b/driver-core/src/test/unit/com/mongodb/internal/session/ClientSessionContextSpecification.groovy new file mode 100644 index 00000000000..bb8fb9a7394 --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/internal/session/ClientSessionContextSpecification.groovy @@ -0,0 +1,138 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.session + +import com.mongodb.ReadConcern +import com.mongodb.session.ClientSession +import com.mongodb.session.ServerSession +import org.bson.BsonBoolean +import org.bson.BsonDocument +import org.bson.BsonInt32 +import org.bson.BsonTimestamp +import spock.lang.Specification + +class ClientSessionContextSpecification extends Specification { + + class TestClientSessionContext extends ClientSessionContext { + + TestClientSessionContext(final ClientSession clientSession) { + super(clientSession) + } + + @Override + boolean hasActiveTransaction() { + false + } + + @Override + boolean isImplicitSession() { + throw new UnsupportedOperationException() + } + + @Override + boolean notifyMessageSent() { + false + } + + @Override + ReadConcern getReadConcern() { + ReadConcern.DEFAULT + } + } + + def 'should have session'() { + given: + def clientSession = Mock(ClientSession) + def context = new TestClientSessionContext(clientSession) + + expect: + context.hasSession() + } + + def 'should forward all methods to wrapped session'() { + given: + def expectedSessionId = new BsonDocument('id', new BsonInt32(1)) + def expectedClusterTime = new BsonDocument('x', BsonBoolean.TRUE) + def expectedOperationTime = new BsonTimestamp(42, 1) + def expectedTransactionNumber = 2 + + def serverSession = Mock(ServerSession) + def clientSession = Mock(ClientSession) { + _ * getServerSession() >> { + serverSession + } + } + def context = new TestClientSessionContext(clientSession) + + when: + def sessionId = context.getSessionId() + + then: + sessionId == expectedSessionId + 1 * serverSession.getIdentifier() >> { + expectedSessionId + } + + when: + context.isCausallyConsistent() + + then: + 1 * clientSession.isCausallyConsistent() + + when: + context.advanceClusterTime(expectedClusterTime) + + then: + 1 * clientSession.advanceClusterTime(expectedClusterTime) + + when: + context.getOperationTime() + + then: + 1 * clientSession.getOperationTime() + + when: + context.advanceOperationTime(expectedOperationTime) + + then: + 1 * clientSession.advanceOperationTime(expectedOperationTime) + + when: + context.advanceTransactionNumber() + + then: + 1 * serverSession.advanceTransactionNumber() + + when: + def transactionNumber = context.getTransactionNumber() + + then: + transactionNumber == expectedTransactionNumber + 1 * serverSession.getTransactionNumber() >> { + expectedTransactionNumber + } + + when: + def clusterTime = context.getClusterTime() + + then: + clusterTime == expectedClusterTime + 1 * clientSession.getClusterTime() >> { + expectedClusterTime + } + } +} diff --git a/driver-core/src/test/unit/com/mongodb/internal/session/ServerSessionPoolSpecification.groovy b/driver-core/src/test/unit/com/mongodb/internal/session/ServerSessionPoolSpecification.groovy new file mode 100644 index 00000000000..19bfa994200 --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/internal/session/ServerSessionPoolSpecification.groovy @@ -0,0 +1,229 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.session + +import com.mongodb.ServerAddress +import com.mongodb.connection.ClusterDescription +import com.mongodb.connection.ClusterSettings +import com.mongodb.connection.ServerDescription +import com.mongodb.connection.ServerSettings +import com.mongodb.internal.connection.Cluster +import com.mongodb.internal.connection.Connection +import com.mongodb.internal.connection.Server +import com.mongodb.internal.connection.ServerTuple +import com.mongodb.internal.validator.NoOpFieldNameValidator +import org.bson.BsonArray +import org.bson.BsonBinarySubType +import org.bson.BsonDocument +import org.bson.codecs.BsonDocumentCodec +import spock.lang.Specification + +import static com.mongodb.ClusterFixture.OPERATION_CONTEXT +import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS +import static com.mongodb.ClusterFixture.getServerApi +import static com.mongodb.ReadPreference.primaryPreferred +import static com.mongodb.connection.ClusterConnectionMode.MULTIPLE +import static com.mongodb.connection.ClusterType.REPLICA_SET +import static com.mongodb.connection.ServerConnectionState.CONNECTED +import static com.mongodb.connection.ServerConnectionState.CONNECTING +import static com.mongodb.connection.ServerType.REPLICA_SET_PRIMARY +import static com.mongodb.connection.ServerType.UNKNOWN +import static java.util.concurrent.TimeUnit.MINUTES + +class ServerSessionPoolSpecification extends Specification { + + def connectedDescription = new ClusterDescription(MULTIPLE, REPLICA_SET, + [ + ServerDescription.builder().ok(true) + .state(CONNECTED) + .address(new ServerAddress()) + .type(REPLICA_SET_PRIMARY) + .logicalSessionTimeoutMinutes(30) + .build() + ], ClusterSettings.builder().hosts([new ServerAddress()]).build(), ServerSettings.builder().build()) + + def unconnectedDescription = new ClusterDescription(MULTIPLE, REPLICA_SET, + [ + ServerDescription.builder().ok(true) + .state(CONNECTING) + .address(new ServerAddress()) + .type(UNKNOWN) + .logicalSessionTimeoutMinutes(null) + .build() + ], ClusterSettings.builder().hosts([new ServerAddress()]).build(), ServerSettings.builder().build()) + + def 'should get session'() { + given: + def cluster = Stub(Cluster) { + getCurrentDescription() >> connectedDescription + } + def pool = new ServerSessionPool(cluster, TIMEOUT_SETTINGS, getServerApi()) + + when: + def session = pool.get() + + then: + session != null + } + + def 'should throw IllegalStateException if pool is closed'() { + given: + def cluster = Stub(Cluster) { + getCurrentDescription() >> connectedDescription + } + def pool = new ServerSessionPool(cluster, TIMEOUT_SETTINGS, getServerApi()) + pool.close() + + when: + pool.get() + + then: + thrown(IllegalStateException) + } + + def 'should pool session'() { + given: + def cluster = Stub(Cluster) { + getCurrentDescription() >> connectedDescription + } + def pool = new ServerSessionPool(cluster, TIMEOUT_SETTINGS, getServerApi()) + def session = pool.get() + + when: + pool.release(session) + def pooledSession = pool.get() + + then: + session == pooledSession + } + + def 'should prune sessions when getting'() { + given: + def cluster = Mock(Cluster) { + getCurrentDescription() >> connectedDescription + } + def clock = Stub(ServerSessionPool.Clock) { + millis() >>> [0, MINUTES.toMillis(29) + 1, + ] + } + def pool = new ServerSessionPool(cluster, OPERATION_CONTEXT, clock) + def sessionOne = pool.get() + + when: + pool.release(sessionOne) + + then: + !sessionOne.closed + + when: + def sessionTwo = pool.get() + + then: + sessionTwo != sessionOne + sessionOne.closed + 0 * cluster.selectServer(_) + } + + def 'should not prune session when timeout is null'() { + given: + def cluster = Stub(Cluster) { + getCurrentDescription() >> unconnectedDescription + } + def clock = Stub(ServerSessionPool.Clock) { + millis() >>> [0, 0, 0] + } + def pool = new ServerSessionPool(cluster, OPERATION_CONTEXT, clock) + def session = pool.get() + + when: + pool.release(session) + def newSession = pool.get() + + then: + session == newSession + } + + def 'should initialize session'() { + given: + def cluster = Stub(Cluster) { + getCurrentDescription() >> connectedDescription + } + def clock = Stub(ServerSessionPool.Clock) { + millis() >> 42 + } + def pool = new ServerSessionPool(cluster, OPERATION_CONTEXT, clock) + + when: + def session = pool.get() as ServerSessionPool.ServerSessionImpl + + then: + session.lastUsedAtMillis == 42 + session.transactionNumber == 0 + def uuid = session.identifier.getBinary('id') + uuid != null + uuid.type == BsonBinarySubType.UUID_STANDARD.value + uuid.data.length == 16 + } + + def 'should advance transaction number'() { + given: + def cluster = Stub(Cluster) { + getCurrentDescription() >> connectedDescription + } + def clock = Stub(ServerSessionPool.Clock) { + millis() >> 42 + } + def pool = new ServerSessionPool(cluster, OPERATION_CONTEXT, clock) + + when: + def session = pool.get() as ServerSessionPool.ServerSessionImpl + + then: + session.transactionNumber == 0 + session.advanceTransactionNumber() == 1 + session.transactionNumber == 1 + } + + def 'should end pooled sessions when pool is closed'() { + given: + def connection = Mock(Connection) + def server = Stub(Server) { + getConnection(_) >> connection + } + def cluster = Mock(Cluster) { + getCurrentDescription() >> connectedDescription + } + def pool = new ServerSessionPool(cluster, TIMEOUT_SETTINGS, getServerApi()) + def sessions = [] + 10.times { sessions.add(pool.get()) } + + for (def cur : sessions) { + pool.release(cur) + } + + when: + pool.close() + + then: + 1 * cluster.selectServer(_, _) >> new ServerTuple(server, connectedDescription.serverDescriptions[0]) + 1 * connection.command('admin', + new BsonDocument('endSessions', new BsonArray(sessions*.getIdentifier())), + { it instanceof NoOpFieldNameValidator }, primaryPreferred(), + { it instanceof BsonDocumentCodec }, _) >> new BsonDocument() + 1 * connection.release() + } +} diff --git a/driver-core/src/test/unit/com/mongodb/internal/thread/DaemonThreadFactorySpecification.groovy b/driver-core/src/test/unit/com/mongodb/internal/thread/DaemonThreadFactorySpecification.groovy new file mode 100644 index 00000000000..4f60bb22a82 --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/internal/thread/DaemonThreadFactorySpecification.groovy @@ -0,0 +1,31 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.thread + +import spock.lang.Specification + +class DaemonThreadFactorySpecification extends Specification { + def 'should create daemon threads'() { + def factory = new DaemonThreadFactory() + + when: + def thread = factory.newThread { } + + then: + thread.isDaemon() + } +} diff --git a/driver-core/src/test/unit/com/mongodb/internal/time/TimePointTest.java b/driver-core/src/test/unit/com/mongodb/internal/time/TimePointTest.java new file mode 100644 index 00000000000..a1b3f37dd98 --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/internal/time/TimePointTest.java @@ -0,0 +1,330 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.internal.time; + +import com.mongodb.lang.Nullable; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.Arguments; +import org.junit.jupiter.params.provider.MethodSource; +import org.junit.jupiter.params.provider.ValueSource; + +import java.time.Duration; +import java.util.Collection; +import java.util.Objects; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicLong; +import java.util.concurrent.locks.Condition; +import java.util.stream.Stream; + +import static com.mongodb.internal.time.Timeout.ZeroSemantics.ZERO_DURATION_MEANS_EXPIRED; +import static java.util.Arrays.asList; +import static java.util.concurrent.TimeUnit.MILLISECONDS; +import static java.util.concurrent.TimeUnit.MINUTES; +import static java.util.concurrent.TimeUnit.NANOSECONDS; +import static org.junit.jupiter.api.Assertions.assertAll; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNotEquals; +import static org.junit.jupiter.api.Assertions.assertSame; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.params.provider.Arguments.arguments; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyLong; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.reset; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.verifyNoMoreInteractions; + +public final class TimePointTest { + + private final AtomicLong currentNanos = new AtomicLong(); + private final TimePoint mockTimePoint = new TimePoint(0L) { + @Override + long currentNanos() { + return currentNanos.get(); + } + }; + + public static boolean isInfinite(final Timeout timeout) { + return timeout.call(NANOSECONDS, () -> true, (ns) -> false, () -> false); + } + + public static boolean hasExpired(final Timeout timeout) { + return timeout.call(NANOSECONDS, () -> false, (ns) -> false, () -> true); + } + + public static long remaining(final Timeout timeout, final TimeUnit unit) { + return timeout.checkedCall(unit, + () -> { + throw new AssertionError("Infinite TimePoints have infinite remaining time"); + }, + (time) -> time, + () -> 0L); + } + + // Timeout + + @Test + void timeoutExpiresIn() { + assertAll( + () -> assertThrows(AssertionError.class, () -> Timeout.expiresIn(-1000, MINUTES, ZERO_DURATION_MEANS_EXPIRED)), + () -> assertTrue(hasExpired(Timeout.expiresIn(0L, NANOSECONDS, ZERO_DURATION_MEANS_EXPIRED))), + () -> assertFalse(isInfinite(Timeout.expiresIn(1L, NANOSECONDS, ZERO_DURATION_MEANS_EXPIRED))), + () -> assertFalse(hasExpired(Timeout.expiresIn(1000, MINUTES, ZERO_DURATION_MEANS_EXPIRED)))); + } + + @Test + void timeoutInfinite() { + assertEquals(Timeout.infinite(), TimePoint.infinite()); + } + + @Test + void timeoutAwaitOnCondition() throws InterruptedException { + Condition condition = mock(Condition.class); + + Timeout.infinite().awaitOn(condition, () -> "ignored"); + verify(condition, times(1)).await(); + verifyNoMoreInteractions(condition); + + reset(condition); + + Timeout.expiresIn(100, NANOSECONDS, ZERO_DURATION_MEANS_EXPIRED).awaitOn(condition, () -> "ignored"); + verify(condition, times(1)).awaitNanos(anyLong()); + verifyNoMoreInteractions(condition); + } + + @Test + void timeoutAwaitOnLatch() throws InterruptedException { + CountDownLatch latch = mock(CountDownLatch.class); + + Timeout.infinite().awaitOn(latch, () -> "ignored"); + verify(latch, times(1)).await(); + verifyNoMoreInteractions(latch); + + reset(latch); + + Timeout.expiresIn(100, NANOSECONDS, ZERO_DURATION_MEANS_EXPIRED).awaitOn(latch, () -> "ignored"); + verify(latch, times(1)).await(anyLong(), any(TimeUnit.class)); + verifyNoMoreInteractions(latch); + } + + // TimePoint + + @Test + void now() { + TimePoint timePointLowerBound = TimePoint.at(System.nanoTime()); + TimePoint timePoint = TimePoint.now(); + TimePoint timePointUpperBound = TimePoint.at(System.nanoTime()); + assertTrue(timePoint.compareTo(timePointLowerBound) >= 0, "the point is too early"); + assertTrue(timePoint.compareTo(timePointUpperBound) <= 0, "the point is too late"); + } + + @Test + void infinite() { + TimePoint infinite = TimePoint.infinite(); + TimePoint now = TimePoint.now(); + assertEquals(0, infinite.compareTo(TimePoint.infinite())); + assertTrue(infinite.compareTo(now) > 0); + assertTrue(now.compareTo(infinite) < 0); + } + + @Test + void isInfinite() { + assertAll( + () -> assertTrue(isInfinite(Timeout.infinite())), + () -> assertFalse(isInfinite(TimePoint.now()))); + } + + @Test + void asTimeout() { + TimePoint t1 = TimePoint.now(); + assertSame(t1, t1.asTimeout()); + TimePoint t2 = TimePoint.infinite(); + assertSame(t2, t2.asTimeout()); + } + + + @Test + void remaining() { + assertAll( + () -> assertThrows(AssertionError.class, () -> remaining(TimePoint.infinite(), NANOSECONDS)), + () -> assertEquals(0, remaining(TimePoint.now(), NANOSECONDS)) + ); + Timeout earlier = TimePoint.at(System.nanoTime() - 100); + assertEquals(0, remaining(earlier, NANOSECONDS)); + assertTrue(hasExpired(earlier)); + + currentNanos.set(-100); + assertEquals(100, remaining(mockTimePoint, NANOSECONDS)); + currentNanos.set(-1000000); + assertEquals(1, remaining(mockTimePoint, MILLISECONDS)); + currentNanos.set(-1000000 + 1); + assertEquals(0, remaining(mockTimePoint, MILLISECONDS)); + } + + @ParameterizedTest + @ValueSource(longs = {1, 7, 10, 100, 1000}) + void remaining(final long durationNanos) { + TimePoint start = TimePoint.now(); + Timeout timeout = start.timeoutAfterOrInfiniteIfNegative(durationNanos, NANOSECONDS); + while (!hasExpired(timeout)) { + long remainingNanosUpperBound = Math.max(0, durationNanos - TimePoint.now().durationSince(start).toNanos()); + long remainingNanos = remaining(timeout, NANOSECONDS); + long remainingNanosLowerBound = Math.max(0, durationNanos - TimePoint.now().durationSince(start).toNanos()); + assertTrue(remainingNanos >= remainingNanosLowerBound, "remaining nanos is too low"); + assertTrue(remainingNanos <= remainingNanosUpperBound, "remaining nanos is too high"); + Thread.yield(); + } + assertTrue(TimePoint.now().durationSince(start).toNanos() >= durationNanos, "expired too early"); + } + + @Test + void elapsed() { + TimePoint timePoint = TimePoint.now(); + Duration elapsedLowerBound = TimePoint.now().durationSince(timePoint); + Duration elapsed = timePoint.elapsed(); + Duration elapsedUpperBound = TimePoint.now().durationSince(timePoint); + assertTrue(elapsed.compareTo(elapsedLowerBound) >= 0, "the elapsed is too low"); + assertTrue(elapsed.compareTo(elapsedUpperBound) <= 0, "the elapsed is too high"); + assertThrows(AssertionError.class, () -> TimePoint.infinite().elapsed()); + + currentNanos.set(100); + assertEquals(100, mockTimePoint.elapsed().toNanos()); + currentNanos.set(1000000); + assertEquals(1, mockTimePoint.elapsed().toMillis()); + currentNanos.set(1000000 - 1); + assertEquals(0, mockTimePoint.elapsed().toMillis()); + } + + @Test + void hasExpired() { + assertAll( + () -> assertFalse(hasExpired(Timeout.infinite())), + () -> assertTrue(hasExpired(TimePoint.now())), + () -> assertThrows(AssertionError.class, () -> Timeout.expiresIn(-1000, MINUTES, ZERO_DURATION_MEANS_EXPIRED)), + () -> assertFalse(hasExpired(Timeout.expiresIn(1000, MINUTES, ZERO_DURATION_MEANS_EXPIRED)))); + } + + @ParameterizedTest + @MethodSource("earlierNanosAndNanosArguments") + void durationSince(final Long earlierNanos, @Nullable final Long nanos) { + TimePoint earlierTimePoint = TimePoint.at(earlierNanos); + TimePoint timePoint = TimePoint.at(nanos); + + if (nanos == null) { + assertThrows(AssertionError.class, () -> timePoint.durationSince(earlierTimePoint)); + return; + } + + Duration expectedDuration = Duration.ofNanos(nanos - earlierNanos); + assertFalse(expectedDuration.isNegative()); + assertEquals(expectedDuration, timePoint.durationSince(earlierTimePoint)); + assertEquals(expectedDuration.negated(), earlierTimePoint.durationSince(timePoint)); + } + + @ParameterizedTest + @ValueSource(longs = {1, 7, Long.MAX_VALUE / 2, Long.MAX_VALUE - 1}) + void remainingNanos(final long durationNanos) { + TimePoint start = TimePoint.now(); + TimePoint timeout = start.add(Duration.ofNanos(durationNanos)); + assertEquals(durationNanos, timeout.durationSince(start).toNanos()); + assertEquals(Math.max(0, durationNanos - 1), timeout.durationSince(start.add(Duration.ofNanos(1))).toNanos()); + assertEquals(0, timeout.durationSince(start.add(Duration.ofNanos(durationNanos))).toNanos()); + assertEquals(-1, timeout.durationSince(start.add(Duration.ofNanos(durationNanos + 1))).toNanos()); + } + + @Test + void fromNowOrInfinite() { + TimePoint timePoint = TimePoint.now(); + assertAll( + () -> assertFalse(isInfinite(TimePoint.now().timeoutAfterOrInfiniteIfNegative(1L, NANOSECONDS))), + () -> assertEquals(timePoint, timePoint.timeoutAfterOrInfiniteIfNegative(0, NANOSECONDS)), + () -> assertNotEquals(TimePoint.infinite(), timePoint.timeoutAfterOrInfiniteIfNegative(1, NANOSECONDS)), + () -> assertNotEquals(timePoint, timePoint.timeoutAfterOrInfiniteIfNegative(1, NANOSECONDS)), + () -> assertNotEquals(TimePoint.infinite(), timePoint.timeoutAfterOrInfiniteIfNegative(Long.MAX_VALUE - 1, NANOSECONDS))); + } + + @ParameterizedTest + @MethodSource("nanosAndDurationsArguments") + void add(final long nanos, final Duration duration) { + TimePoint timePoint = TimePoint.at(nanos); + assertEquals(duration, timePoint.add(duration).durationSince(timePoint)); + } + + private static Stream nanosAndDurationsArguments() { + Collection nanos = asList(Long.MIN_VALUE, Long.MIN_VALUE / 2, 0L, Long.MAX_VALUE / 2, Long.MAX_VALUE); + Collection durationsInNanos = asList( + // Using `-Long.MAX_VALUE` results in `ArithmeticException` in OpenJDK JDK 8 because of https://bugs.openjdk.org/browse/JDK-8146747. + // This was fixed in OpenJDK JDK 9. + -Long.MAX_VALUE / 2, 0L, Long.MAX_VALUE / 2, Long.MAX_VALUE); + return nanos.stream() + .flatMap(nano -> durationsInNanos.stream() + .map(durationNanos -> arguments(nano, Duration.ofNanos(durationNanos)))); + } + + @ParameterizedTest + @MethodSource("earlierNanosAndNanosArguments") + void compareTo(final Long earlierNanos, final Long nanos) { + TimePoint earlierTimePoint = TimePoint.at(earlierNanos); + TimePoint timePoint = TimePoint.at(nanos); + if (Objects.equals(earlierNanos, nanos)) { + assertEquals(0, earlierTimePoint.compareTo(timePoint)); + assertEquals(0, timePoint.compareTo(earlierTimePoint)); + assertEquals(earlierTimePoint, timePoint); + assertEquals(timePoint, earlierTimePoint); + } else { + assertTrue(earlierTimePoint.compareTo(timePoint) < 0); + assertTrue(timePoint.compareTo(earlierTimePoint) > 0); + assertNotEquals(earlierTimePoint, timePoint); + assertNotEquals(timePoint, earlierTimePoint); + } + } + + private static Stream earlierNanosAndNanosArguments() { + Collection earlierNanos = asList(Long.MIN_VALUE, Long.MIN_VALUE / 2, 0L, Long.MAX_VALUE / 2, Long.MAX_VALUE); + Collection durationsInNanos = asList(0L, 1L, Long.MAX_VALUE / 2, Long.MAX_VALUE, null); + return earlierNanos.stream() + .flatMap(earlier -> durationsInNanos.stream() + .map(durationNanos -> arguments(earlier, durationNanos == null ? null : earlier + durationNanos))); + } + + @ParameterizedTest + @MethodSource("durationArguments") + void convertsUnits(final long duration, final TimeUnit unit) { + TimePoint start = TimePoint.now(); + TimePoint end = start.timeoutAfterOrInfiniteIfNegative(duration, unit); + if (duration < 0) { + assertTrue(isInfinite(end)); + } else { + assertEquals(unit.toNanos(duration), end.durationSince(start).toNanos()); + } + } + + private static Stream durationArguments() { + return Stream.of(TimeUnit.values()) + .flatMap(unit -> Stream.of( + Arguments.of(-7, unit), + Arguments.of(0, unit), + Arguments.of(7, unit))); + } + + private TimePointTest() { + } +} diff --git a/driver-core/src/test/unit/com/mongodb/internal/validator/ReplacingDocumentFieldNameValidatorTest.java b/driver-core/src/test/unit/com/mongodb/internal/validator/ReplacingDocumentFieldNameValidatorTest.java new file mode 100644 index 00000000000..ff7ef713653 --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/internal/validator/ReplacingDocumentFieldNameValidatorTest.java @@ -0,0 +1,44 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.validator; + +import org.junit.Test; + +import static com.mongodb.internal.validator.ReplacingDocumentFieldNameValidator.INSTANCE; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + +public class ReplacingDocumentFieldNameValidatorTest { + @Test + public void testFieldValidationSuccess() { + assertTrue(INSTANCE.validate("ok")); + } + + @Test + public void testFieldNameStartsWithDollarValidation() { + assertFalse(INSTANCE.validate("$1")); + assertTrue(INSTANCE.validate("$db")); + assertTrue(INSTANCE.validate("$ref")); + assertTrue(INSTANCE.validate("$id")); + } + + @Test + public void testNestedDocumentsAreNotValidated() { + assertEquals(NoOpFieldNameValidator.class, INSTANCE.getValidatorForField("nested").getClass()); + } +} diff --git a/driver-core/src/test/unit/com/mongodb/logging/TestLoggingInterceptor.java b/driver-core/src/test/unit/com/mongodb/logging/TestLoggingInterceptor.java new file mode 100644 index 00000000000..785252b1902 --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/logging/TestLoggingInterceptor.java @@ -0,0 +1,72 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.logging; + +import com.mongodb.internal.logging.LogMessage; +import com.mongodb.internal.logging.LoggingInterceptor; +import com.mongodb.internal.logging.StructuredLogger; +import com.mongodb.lang.NonNull; + +import java.util.ArrayList; +import java.util.List; +import java.util.Map; + +import static java.util.Objects.requireNonNull; + +public class TestLoggingInterceptor implements LoggingInterceptor, AutoCloseable { + + private final List messages = new ArrayList<>(); + private final String applicationName; + private final LoggingFilter filter; + + public TestLoggingInterceptor(final String applicationName, final LoggingFilter filter) { + this.applicationName = requireNonNull(applicationName); + this.filter = requireNonNull(filter); + StructuredLogger.addInterceptor(applicationName, this); + } + + @Override + public synchronized void intercept(@NonNull final LogMessage message) { + if (filter.match(message)) { + messages.add(message); + } + } + + @Override + public void close() { + StructuredLogger.removeInterceptor(applicationName); + } + + public synchronized List getMessages() { + return new ArrayList<>(messages); + } + + public static final class LoggingFilter{ + private final Map filterConfig; + + public LoggingFilter(final Map filterConfig){ + this.filterConfig = filterConfig; + } + boolean match(final LogMessage message){ + LogMessage.Level expectedLevel = filterConfig.get(message.getComponent()); + if (expectedLevel != null) { + return message.getLevel().compareTo(expectedLevel) <= 0; + } + return false; + } + } +} diff --git a/driver-core/src/test/unit/com/mongodb/selector/CompositeServerSelectorTest.java b/driver-core/src/test/unit/com/mongodb/selector/CompositeServerSelectorTest.java new file mode 100644 index 00000000000..db20cac2fb1 --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/selector/CompositeServerSelectorTest.java @@ -0,0 +1,117 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.selector; + +import com.mongodb.ServerAddress; +import com.mongodb.connection.ClusterDescription; +import com.mongodb.connection.ClusterSettings; +import com.mongodb.connection.ServerDescription; +import com.mongodb.connection.ServerSettings; +import com.mongodb.internal.selector.LatencyMinimizingServerSelector; +import com.mongodb.internal.selector.ReadPreferenceServerSelector; +import org.junit.Before; +import org.junit.Test; + +import java.util.Arrays; +import java.util.Collections; +import java.util.List; + +import static com.mongodb.ReadPreference.secondary; +import static com.mongodb.connection.ClusterConnectionMode.MULTIPLE; +import static com.mongodb.connection.ClusterType.REPLICA_SET; +import static com.mongodb.connection.ServerConnectionState.CONNECTED; +import static com.mongodb.connection.ServerType.REPLICA_SET_PRIMARY; +import static com.mongodb.connection.ServerType.REPLICA_SET_SECONDARY; +import static java.util.Arrays.asList; +import static java.util.concurrent.TimeUnit.MILLISECONDS; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; + +public class CompositeServerSelectorTest { + private CompositeServerSelector selector; + private ServerDescription second; + private ServerDescription first; + private ServerDescription third; + + @Before + public void setUp() { + first = ServerDescription.builder() + .state(CONNECTED) + .address(new ServerAddress()) + .ok(true) + .roundTripTime(5, MILLISECONDS) + .type(REPLICA_SET_PRIMARY) + .build(); + + second = ServerDescription.builder() + .state(CONNECTED) + .address(new ServerAddress("localhost:27018")) + .ok(true) + .roundTripTime(30, MILLISECONDS) + .type(REPLICA_SET_SECONDARY) + .build(); + + third = ServerDescription.builder() + .state(CONNECTED) + .address(new ServerAddress("localhost:27019")) + .ok(true) + .roundTripTime(35, MILLISECONDS) + .type(REPLICA_SET_SECONDARY) + .build(); + } + + @Test + public void shouldApplyServerSelectorsInOrder() { + selector = new CompositeServerSelector(asList(new ReadPreferenceServerSelector(secondary()), + new LatencyMinimizingServerSelector(15, MILLISECONDS))); + assertEquals(selector.select(new ClusterDescription(MULTIPLE, REPLICA_SET, asList(first, second, third))), asList(second, third)); + } + + @Test + public void shouldCollapseNestedComposite() { + CompositeServerSelector composedSelector = + new CompositeServerSelector(asList(new ReadPreferenceServerSelector(secondary()), + new LatencyMinimizingServerSelector(15, MILLISECONDS))); + selector = new CompositeServerSelector(Arrays.asList(composedSelector)); + assertEquals(selector.select(new ClusterDescription(MULTIPLE, REPLICA_SET, asList(first, second, third))), asList(second, third)); + + } + + @Test + public void shouldPassOnClusterDescriptionWithCorrectServersAndSettings() { + TestServerSelector firstSelector = new TestServerSelector(); + TestServerSelector secondSelector = new TestServerSelector(); + CompositeServerSelector composedSelector = new CompositeServerSelector(asList(firstSelector, secondSelector)); + composedSelector.select(new ClusterDescription(MULTIPLE, REPLICA_SET, asList(first, second, third), + ClusterSettings.builder().hosts(asList(new ServerAddress())).build(), + ServerSettings.builder().build())); + assertTrue(secondSelector.clusterDescription.getServerDescriptions().isEmpty()); + assertNotNull(secondSelector.clusterDescription.getClusterSettings()); + assertNotNull(secondSelector.clusterDescription.getServerSettings()); + } + + static class TestServerSelector implements ServerSelector { + private ClusterDescription clusterDescription; + + @Override + public List select(final ClusterDescription clusterDescription) { + this.clusterDescription = clusterDescription; + return Collections.emptyList(); + } + } +} diff --git a/driver-core/src/test/unit/com/mongodb/spock/Slow.java b/driver-core/src/test/unit/com/mongodb/spock/Slow.java new file mode 100644 index 00000000000..fba0029931d --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/spock/Slow.java @@ -0,0 +1,26 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.spock; + +import java.lang.annotation.ElementType; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.annotation.Target; + +@Retention(RetentionPolicy.RUNTIME) +@Target({ElementType.METHOD, ElementType.TYPE}) +public @interface Slow { +} diff --git a/driver-core/src/test/unit/com/mongodb/testing/MongoAssertions.java b/driver-core/src/test/unit/com/mongodb/testing/MongoAssertions.java new file mode 100644 index 00000000000..8f1bbf8df67 --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/testing/MongoAssertions.java @@ -0,0 +1,43 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.testing; + +import org.junit.jupiter.api.function.Executable; +import org.opentest4j.AssertionFailedError; + +import static org.junit.jupiter.api.Assertions.assertThrows; + +public final class MongoAssertions { + + private MongoAssertions() { + //NOP + } + + public static void assertCause( + final Class expectedCause, final String expectedMessageFragment, final Executable e) { + Throwable cause = assertThrows(Throwable.class, e); + while (cause.getCause() != null) { + cause = cause.getCause(); + } + if (!cause.getMessage().contains(expectedMessageFragment)) { + throw new AssertionFailedError("Unexpected message: " + cause.getMessage(), cause); + } + if (!expectedCause.isInstance(cause)) { + throw new AssertionFailedError("Unexpected cause: " + cause.getClass(), assertThrows(Throwable.class, e)); + } + } +} diff --git a/driver-core/src/test/unit/com/mongodb/testing/MongoBaseInterfaceAssertions.java b/driver-core/src/test/unit/com/mongodb/testing/MongoBaseInterfaceAssertions.java new file mode 100644 index 00000000000..0c0fe913123 --- /dev/null +++ b/driver-core/src/test/unit/com/mongodb/testing/MongoBaseInterfaceAssertions.java @@ -0,0 +1,69 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.testing; + +import org.reflections.Reflections; + +import java.lang.reflect.Method; +import java.lang.reflect.Modifier; +import java.util.Set; +import java.util.stream.Collectors; + +import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; +import static org.junit.jupiter.api.Assertions.assertEquals; + +public final class MongoBaseInterfaceAssertions { + + private MongoBaseInterfaceAssertions() { + //NOP + } + + public static void assertSubtypeReturn(final Class baseClass) { + Reflections reflections = new Reflections("com.mongodb"); + Set> subtypes = reflections.getSubTypesOf(baseClass).stream() + .filter(aClass -> Modifier.isPublic(aClass.getModifiers())) + .filter(aClass -> !aClass.getPackage().getName().contains(".internal")) + .collect(Collectors.toSet()); + + Method[] baseMethods = baseClass.getDeclaredMethods(); + + for (Class subtype : subtypes) { + for (Method baseMethod : baseMethods) { + Method method = assertDoesNotThrow( + () -> subtype.getDeclaredMethod(baseMethod.getName(), baseMethod.getParameterTypes()), + String.format( + "`%s` does not override `%s`. The methods must be copied into the implementing class/interface.", + subtype, + baseMethod + ) + ); + + assertEquals( + subtype, + method.getReturnType(), + String.format( + "Method `%s` in `%s` does not return `%s`. " + + "The return type must match the defining class/interface.", + method, + subtype, + subtype + ) + ); + } + } + } +} diff --git a/driver-kotlin-coroutine/build.gradle.kts b/driver-kotlin-coroutine/build.gradle.kts new file mode 100644 index 00000000000..02a2bf047aa --- /dev/null +++ b/driver-kotlin-coroutine/build.gradle.kts @@ -0,0 +1,53 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +import ProjectExtensions.configureJarManifest +import ProjectExtensions.configureMavenPublication + +plugins { + id("project.kotlin") + id("conventions.test-artifacts") + id("conventions.test-artifacts-runtime-dependencies") +} + +base.archivesName.set("mongodb-driver-kotlin-coroutine") + +dependencies { + api(project(path = ":bson", configuration = "default")) + api(project(path = ":driver-reactive-streams", configuration = "default")) + + implementation(platform(libs.kotlinx.coroutines.bom)) + api(libs.kotlinx.coroutines.core) + implementation(libs.kotlinx.coroutines.reactive) + implementation(project(path = ":bson-kotlin", configuration = "default")) + + integrationTestImplementation(libs.kotlinx.coroutines.test) + integrationTestImplementation(project(path = ":driver-sync", configuration = "default")) + integrationTestImplementation(project(path = ":bson", configuration = "testArtifacts")) + integrationTestImplementation(project(path = ":driver-sync", configuration = "testArtifacts")) + integrationTestImplementation(project(path = ":driver-core", configuration = "testArtifacts")) +} + +configureMavenPublication { + pom { + name.set("MongoDB Kotlin Coroutine Driver") + description.set("The MongoDB Kotlin Coroutine Driver") + } +} + +configureJarManifest { + attributes["Automatic-Module-Name"] = "org.mongodb.driver.kotlin.coroutine" + attributes["Bundle-SymbolicName"] = "org.mongodb.mongodb-driver-kotlin-coroutine" +} diff --git a/driver-kotlin-coroutine/src/integrationTest/kotlin/com/mongodb/kotlin/client/coroutine/SmokeTests.kt b/driver-kotlin-coroutine/src/integrationTest/kotlin/com/mongodb/kotlin/client/coroutine/SmokeTests.kt new file mode 100644 index 00000000000..db51912d17c --- /dev/null +++ b/driver-kotlin-coroutine/src/integrationTest/kotlin/com/mongodb/kotlin/client/coroutine/SmokeTests.kt @@ -0,0 +1,101 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.kotlin.client.coroutine + +import com.mongodb.client.Fixture.getDefaultDatabaseName +import com.mongodb.client.Fixture.getMongoClientSettings +import kotlin.test.assertContentEquals +import kotlin.test.assertEquals +import kotlinx.coroutines.ExperimentalCoroutinesApi +import kotlinx.coroutines.flow.map +import kotlinx.coroutines.flow.toList +import kotlinx.coroutines.flow.toSet +import kotlinx.coroutines.runBlocking +import kotlinx.coroutines.test.runTest +import org.bson.Document +import org.junit.jupiter.api.AfterAll +import org.junit.jupiter.api.AfterEach +import org.junit.jupiter.api.BeforeAll +import org.junit.jupiter.api.DisplayName +import org.junit.jupiter.api.Test + +@OptIn(ExperimentalCoroutinesApi::class) +class SmokeTests { + + @AfterEach + fun afterEach() { + runBlocking { database?.drop() } + } + + @Test + @DisplayName("distinct and return nulls") + fun testDistinctNullable() = runTest { + collection!!.insertMany( + listOf( + Document.parse("{_id: 1, a: 0}"), + Document.parse("{_id: 2, a: 1}"), + Document.parse("{_id: 3, a: 0}"), + Document.parse("{_id: 4, a: null}"))) + + // nulls are auto excluded in reactive streams! + val actual = collection!!.distinct("a").toSet() + assertEquals(setOf(0, 1), actual) + } + + @Test + @DisplayName("mapping can return nulls") + fun testMongoIterableMap() = runTest { + collection!!.insertMany( + listOf( + Document.parse("{_id: 1, a: 0}"), + Document.parse("{_id: 2, a: 1}"), + Document.parse("{_id: 3, a: 0}"), + Document.parse("{_id: 4, a: null}"))) + + val actual = collection!!.find().map { it["a"] as Int? }.toList() + assertContentEquals(listOf(0, 1, 0, null), actual) + } + + companion object { + + private var mongoClient: MongoClient? = null + private var database: MongoDatabase? = null + private var collection: MongoCollection? = null + + @BeforeAll + @JvmStatic + internal fun beforeAll() { + runBlocking { + mongoClient = MongoClient.create(getMongoClientSettings()) + database = mongoClient?.getDatabase(getDefaultDatabaseName()) + database?.drop() + collection = database?.getCollection("SmokeTests") + } + } + + @AfterAll + @JvmStatic + internal fun afterAll() { + runBlocking { + collection = null + database?.drop() + database = null + mongoClient?.close() + mongoClient = null + } + } + } +} diff --git a/driver-kotlin-coroutine/src/integrationTest/kotlin/com/mongodb/kotlin/client/coroutine/UnifiedCrudTest.kt b/driver-kotlin-coroutine/src/integrationTest/kotlin/com/mongodb/kotlin/client/coroutine/UnifiedCrudTest.kt new file mode 100644 index 00000000000..73f6c0cf6fa --- /dev/null +++ b/driver-kotlin-coroutine/src/integrationTest/kotlin/com/mongodb/kotlin/client/coroutine/UnifiedCrudTest.kt @@ -0,0 +1,30 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.kotlin.client.coroutine + +import java.io.IOException +import java.net.URISyntaxException +import org.junit.jupiter.params.provider.Arguments + +internal class UnifiedCrudTest() : UnifiedTest() { + companion object { + @JvmStatic + @Throws(URISyntaxException::class, IOException::class) + fun data(): Collection? { + return getTestData("crud", true, Language.KOTLIN) + } + } +} diff --git a/driver-kotlin-coroutine/src/integrationTest/kotlin/com/mongodb/kotlin/client/coroutine/UnifiedTest.kt b/driver-kotlin-coroutine/src/integrationTest/kotlin/com/mongodb/kotlin/client/coroutine/UnifiedTest.kt new file mode 100644 index 00000000000..f676f93956f --- /dev/null +++ b/driver-kotlin-coroutine/src/integrationTest/kotlin/com/mongodb/kotlin/client/coroutine/UnifiedTest.kt @@ -0,0 +1,46 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.kotlin.client.coroutine + +import com.mongodb.ClientEncryptionSettings +import com.mongodb.MongoClientSettings +import com.mongodb.client.MongoClient as JMongoClient +import com.mongodb.client.MongoDatabase as JMongoDatabase +import com.mongodb.client.gridfs.GridFSBucket +import com.mongodb.client.unified.UnifiedTest as JUnifiedTest +import com.mongodb.client.vault.ClientEncryption +import com.mongodb.kotlin.client.coroutine.syncadapter.SyncMongoClient + +internal abstract class UnifiedTest() : JUnifiedTest() { + + override fun createMongoClient(settings: MongoClientSettings): JMongoClient = + SyncMongoClient(MongoClient.create(settings)) + + override fun createGridFSBucket(database: JMongoDatabase?): GridFSBucket { + TODO("Not yet implemented - JAVA-4893") + } + + override fun createClientEncryption( + keyVaultClient: JMongoClient?, + clientEncryptionSettings: ClientEncryptionSettings? + ): ClientEncryption { + TODO("Not yet implemented - JAVA-4896") + } + + override fun isReactive(): Boolean = true + + override fun getLanguage(): Language = Language.KOTLIN +} diff --git a/driver-kotlin-coroutine/src/integrationTest/kotlin/com/mongodb/kotlin/client/coroutine/syncadapter/SyncAggregateIterable.kt b/driver-kotlin-coroutine/src/integrationTest/kotlin/com/mongodb/kotlin/client/coroutine/syncadapter/SyncAggregateIterable.kt new file mode 100644 index 00000000000..439a0ccbb29 --- /dev/null +++ b/driver-kotlin-coroutine/src/integrationTest/kotlin/com/mongodb/kotlin/client/coroutine/syncadapter/SyncAggregateIterable.kt @@ -0,0 +1,77 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.kotlin.client.coroutine.syncadapter + +import com.mongodb.ExplainVerbosity +import com.mongodb.client.AggregateIterable as JAggregateIterable +import com.mongodb.client.cursor.TimeoutMode +import com.mongodb.client.model.Collation +import com.mongodb.kotlin.client.coroutine.AggregateFlow +import java.util.concurrent.TimeUnit +import kotlinx.coroutines.runBlocking +import org.bson.BsonValue +import org.bson.Document +import org.bson.conversions.Bson + +data class SyncAggregateIterable(val wrapped: AggregateFlow) : + JAggregateIterable, SyncMongoIterable(wrapped) { + override fun batchSize(batchSize: Int): SyncAggregateIterable = apply { wrapped.batchSize(batchSize) } + override fun toCollection() = runBlocking { wrapped.toCollection() } + + override fun allowDiskUse(allowDiskUse: Boolean?): SyncAggregateIterable = apply { + wrapped.allowDiskUse(allowDiskUse) + } + + override fun maxTime(maxTime: Long, timeUnit: TimeUnit): SyncAggregateIterable = apply { + wrapped.maxTime(maxTime, timeUnit) + } + + override fun maxAwaitTime(maxAwaitTime: Long, timeUnit: TimeUnit): SyncAggregateIterable = apply { + wrapped.maxAwaitTime(maxAwaitTime, timeUnit) + } + + override fun bypassDocumentValidation(bypassDocumentValidation: Boolean?): SyncAggregateIterable = apply { + wrapped.bypassDocumentValidation(bypassDocumentValidation) + } + + override fun collation(collation: Collation?): SyncAggregateIterable = apply { wrapped.collation(collation) } + + override fun comment(comment: String?): SyncAggregateIterable = apply { wrapped.comment(comment) } + + override fun comment(comment: BsonValue?): SyncAggregateIterable = apply { wrapped.comment(comment) } + + override fun hint(hint: Bson?): SyncAggregateIterable = apply { wrapped.hint(hint) } + + override fun hintString(hint: String?): SyncAggregateIterable = apply { wrapped.hintString(hint) } + + override fun let(variables: Bson?): SyncAggregateIterable = apply { wrapped.let(variables) } + + override fun timeoutMode(timeoutMode: TimeoutMode): SyncAggregateIterable = apply { + wrapped.timeoutMode(timeoutMode) + } + + override fun explain(): Document = runBlocking { wrapped.explain() } + + override fun explain(verbosity: ExplainVerbosity): Document = runBlocking { wrapped.explain(verbosity) } + + override fun explain(explainResultClass: Class): E = runBlocking { + wrapped.explain(explainResultClass) + } + + override fun explain(explainResultClass: Class, verbosity: ExplainVerbosity): E = runBlocking { + wrapped.explain(explainResultClass, verbosity) + } +} diff --git a/driver-kotlin-coroutine/src/integrationTest/kotlin/com/mongodb/kotlin/client/coroutine/syncadapter/SyncChangeStreamIterable.kt b/driver-kotlin-coroutine/src/integrationTest/kotlin/com/mongodb/kotlin/client/coroutine/syncadapter/SyncChangeStreamIterable.kt new file mode 100644 index 00000000000..3f5269cd10d --- /dev/null +++ b/driver-kotlin-coroutine/src/integrationTest/kotlin/com/mongodb/kotlin/client/coroutine/syncadapter/SyncChangeStreamIterable.kt @@ -0,0 +1,63 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.kotlin.client.coroutine.syncadapter + +import com.mongodb.client.ChangeStreamIterable as JChangeStreamIterable +import com.mongodb.client.MongoIterable +import com.mongodb.client.model.Collation +import com.mongodb.client.model.changestream.ChangeStreamDocument +import com.mongodb.client.model.changestream.FullDocument +import com.mongodb.client.model.changestream.FullDocumentBeforeChange +import com.mongodb.kotlin.client.coroutine.ChangeStreamFlow +import java.util.concurrent.TimeUnit +import kotlinx.coroutines.runBlocking +import org.bson.BsonDocument +import org.bson.BsonTimestamp +import org.bson.BsonValue + +data class SyncChangeStreamIterable(val wrapped: ChangeStreamFlow) : + JChangeStreamIterable, SyncMongoIterable>(wrapped) { + override fun withDocumentClass(clazz: Class): MongoIterable = runBlocking { + SyncMongoIterable(wrapped.withDocumentClass(clazz)) + } + + override fun batchSize(batchSize: Int): SyncChangeStreamIterable = apply { wrapped.batchSize(batchSize) } + override fun collation(collation: Collation?): SyncChangeStreamIterable = apply { wrapped.collation(collation) } + override fun comment(comment: BsonValue?): SyncChangeStreamIterable = apply { wrapped.comment(comment) } + override fun comment(comment: String?): SyncChangeStreamIterable = apply { wrapped.comment(comment) } + override fun cursor(): SyncMongoChangeStreamCursor> = SyncMongoChangeStreamCursor(wrapped) + override fun fullDocument(fullDocument: FullDocument): SyncChangeStreamIterable = apply { + wrapped.fullDocument(fullDocument) + } + override fun fullDocumentBeforeChange( + fullDocumentBeforeChange: FullDocumentBeforeChange + ): SyncChangeStreamIterable = apply { wrapped.fullDocumentBeforeChange(fullDocumentBeforeChange) } + override fun maxAwaitTime(maxAwaitTime: Long, timeUnit: TimeUnit): SyncChangeStreamIterable = apply { + wrapped.maxAwaitTime(maxAwaitTime, timeUnit) + } + override fun resumeAfter(resumeToken: BsonDocument): SyncChangeStreamIterable = apply { + wrapped.resumeAfter(resumeToken) + } + override fun showExpandedEvents(showExpandedEvents: Boolean): SyncChangeStreamIterable = apply { + wrapped.showExpandedEvents(showExpandedEvents) + } + override fun startAfter(startAfter: BsonDocument): SyncChangeStreamIterable = apply { + wrapped.startAfter(startAfter) + } + override fun startAtOperationTime(startAtOperationTime: BsonTimestamp): SyncChangeStreamIterable = apply { + wrapped.startAtOperationTime(startAtOperationTime) + } +} diff --git a/driver-kotlin-coroutine/src/integrationTest/kotlin/com/mongodb/kotlin/client/coroutine/syncadapter/SyncClientSession.kt b/driver-kotlin-coroutine/src/integrationTest/kotlin/com/mongodb/kotlin/client/coroutine/syncadapter/SyncClientSession.kt new file mode 100644 index 00000000000..83ba91df16b --- /dev/null +++ b/driver-kotlin-coroutine/src/integrationTest/kotlin/com/mongodb/kotlin/client/coroutine/syncadapter/SyncClientSession.kt @@ -0,0 +1,92 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.kotlin.client.coroutine.syncadapter + +import com.mongodb.ClientSessionOptions +import com.mongodb.ServerAddress +import com.mongodb.TransactionOptions +import com.mongodb.client.ClientSession as JClientSession +import com.mongodb.client.TransactionBody +import com.mongodb.internal.TimeoutContext +import com.mongodb.kotlin.client.coroutine.ClientSession +import com.mongodb.session.ServerSession +import kotlinx.coroutines.runBlocking +import org.bson.BsonDocument +import org.bson.BsonTimestamp + +class SyncClientSession(internal val wrapped: ClientSession, private val originator: Any) : JClientSession { + override fun close(): Unit = wrapped.close() + + override fun getPinnedServerAddress(): ServerAddress? = wrapped.pinnedServerAddress + + override fun getTransactionContext(): Any? = wrapped.transactionContext + + override fun setTransactionContext(address: ServerAddress, transactionContext: Any): Unit = + wrapped.setTransactionContext(address, transactionContext) + + override fun clearTransactionContext(): Unit = wrapped.clearTransactionContext() + + override fun getRecoveryToken(): BsonDocument? = wrapped.recoveryToken + + override fun setRecoveryToken(recoveryToken: BsonDocument): Unit = wrapped.setRecoveryToken(recoveryToken) + + override fun getOptions(): ClientSessionOptions = wrapped.options + + override fun isCausallyConsistent(): Boolean = wrapped.isCausallyConsistent + + override fun getOriginator(): Any = originator + + override fun getServerSession(): ServerSession = wrapped.serverSession + + override fun getOperationTime(): BsonTimestamp = wrapped.operationTime + + override fun advanceOperationTime(operationTime: BsonTimestamp?): Unit = wrapped.advanceOperationTime(operationTime) + + override fun advanceClusterTime(clusterTime: BsonDocument?): Unit = wrapped.advanceClusterTime(clusterTime) + + override fun setSnapshotTimestamp(snapshotTimestamp: BsonTimestamp?) { + wrapped.snapshotTimestamp = snapshotTimestamp + } + + override fun getSnapshotTimestamp(): BsonTimestamp? = wrapped.snapshotTimestamp + + override fun getClusterTime(): BsonDocument = wrapped.clusterTime + + override fun hasActiveTransaction(): Boolean = wrapped.hasActiveTransaction() + + override fun notifyMessageSent(): Boolean = wrapped.notifyMessageSent() + + override fun notifyOperationInitiated(operation: Any): Unit = wrapped.notifyOperationInitiated(operation) + + override fun getTransactionOptions(): TransactionOptions = wrapped.getTransactionOptions() + + override fun startTransaction(): Unit = wrapped.startTransaction() + + override fun startTransaction(transactionOptions: TransactionOptions): Unit = + wrapped.startTransaction(transactionOptions) + + override fun commitTransaction(): Unit = runBlocking { wrapped.commitTransaction() } + + override fun abortTransaction(): Unit = runBlocking { wrapped.abortTransaction() } + + override fun withTransaction(transactionBody: TransactionBody): T = + throw UnsupportedOperationException() + + override fun withTransaction(transactionBody: TransactionBody, options: TransactionOptions): T = + throw UnsupportedOperationException() + + override fun getTimeoutContext(): TimeoutContext? = wrapped.getTimeoutContext() +} diff --git a/driver-kotlin-coroutine/src/integrationTest/kotlin/com/mongodb/kotlin/client/coroutine/syncadapter/SyncDistinctIterable.kt b/driver-kotlin-coroutine/src/integrationTest/kotlin/com/mongodb/kotlin/client/coroutine/syncadapter/SyncDistinctIterable.kt new file mode 100644 index 00000000000..1c5a382c8da --- /dev/null +++ b/driver-kotlin-coroutine/src/integrationTest/kotlin/com/mongodb/kotlin/client/coroutine/syncadapter/SyncDistinctIterable.kt @@ -0,0 +1,41 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.kotlin.client.coroutine.syncadapter + +import com.mongodb.client.DistinctIterable as JDistinctIterable +import com.mongodb.client.cursor.TimeoutMode +import com.mongodb.client.model.Collation +import com.mongodb.kotlin.client.coroutine.DistinctFlow +import java.util.concurrent.TimeUnit +import org.bson.BsonValue +import org.bson.conversions.Bson + +data class SyncDistinctIterable(val wrapped: DistinctFlow) : + JDistinctIterable, SyncMongoIterable(wrapped) { + override fun batchSize(batchSize: Int): SyncDistinctIterable = apply { wrapped.batchSize(batchSize) } + override fun filter(filter: Bson?): SyncDistinctIterable = apply { wrapped.filter(filter) } + override fun maxTime(maxTime: Long, timeUnit: TimeUnit): SyncDistinctIterable = apply { + wrapped.maxTime(maxTime, timeUnit) + } + override fun collation(collation: Collation?): SyncDistinctIterable = apply { wrapped.collation(collation) } + override fun comment(comment: String?): SyncDistinctIterable = apply { wrapped.comment(comment) } + override fun comment(comment: BsonValue?): SyncDistinctIterable = apply { wrapped.comment(comment) } + override fun hint(hint: Bson?): SyncDistinctIterable = apply { wrapped.hint(hint) } + override fun hintString(hint: String?): SyncDistinctIterable = apply { wrapped.hintString(hint) } + override fun timeoutMode(timeoutMode: TimeoutMode): SyncDistinctIterable = apply { + wrapped.timeoutMode(timeoutMode) + } +} diff --git a/driver-kotlin-coroutine/src/integrationTest/kotlin/com/mongodb/kotlin/client/coroutine/syncadapter/SyncFindIterable.kt b/driver-kotlin-coroutine/src/integrationTest/kotlin/com/mongodb/kotlin/client/coroutine/syncadapter/SyncFindIterable.kt new file mode 100644 index 00000000000..6c500a9cf90 --- /dev/null +++ b/driver-kotlin-coroutine/src/integrationTest/kotlin/com/mongodb/kotlin/client/coroutine/syncadapter/SyncFindIterable.kt @@ -0,0 +1,93 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.kotlin.client.coroutine.syncadapter + +import com.mongodb.CursorType +import com.mongodb.ExplainVerbosity +import com.mongodb.client.FindIterable as JFindIterable +import com.mongodb.client.cursor.TimeoutMode +import com.mongodb.client.model.Collation +import com.mongodb.kotlin.client.coroutine.FindFlow +import java.util.concurrent.TimeUnit +import kotlinx.coroutines.runBlocking +import org.bson.BsonValue +import org.bson.Document +import org.bson.conversions.Bson + +data class SyncFindIterable(val wrapped: FindFlow) : JFindIterable, SyncMongoIterable(wrapped) { + override fun batchSize(batchSize: Int): SyncFindIterable = apply { wrapped.batchSize(batchSize) } + override fun filter(filter: Bson?): SyncFindIterable = apply { wrapped.filter(filter) } + + override fun limit(limit: Int): SyncFindIterable = apply { wrapped.limit(limit) } + + override fun skip(skip: Int): SyncFindIterable = apply { wrapped.skip(skip) } + + override fun allowDiskUse(allowDiskUse: Boolean?): SyncFindIterable = apply { + wrapped.allowDiskUse(allowDiskUse) + } + + override fun maxTime(maxTime: Long, timeUnit: TimeUnit): SyncFindIterable = apply { + wrapped.maxTime(maxTime, timeUnit) + } + + override fun maxAwaitTime(maxAwaitTime: Long, timeUnit: TimeUnit): SyncFindIterable = apply { + wrapped.maxAwaitTime(maxAwaitTime, timeUnit) + } + + override fun projection(projection: Bson?): SyncFindIterable = apply { wrapped.projection(projection) } + + override fun sort(sort: Bson?): SyncFindIterable = apply { wrapped.sort(sort) } + + override fun noCursorTimeout(noCursorTimeout: Boolean): SyncFindIterable = apply { + wrapped.noCursorTimeout(noCursorTimeout) + } + + override fun partial(partial: Boolean): SyncFindIterable = apply { wrapped.partial(partial) } + + override fun cursorType(cursorType: CursorType): SyncFindIterable = apply { wrapped.cursorType(cursorType) } + + override fun collation(collation: Collation?): SyncFindIterable = apply { wrapped.collation(collation) } + + override fun comment(comment: String?): SyncFindIterable = apply { wrapped.comment(comment) } + + override fun comment(comment: BsonValue?): SyncFindIterable = apply { wrapped.comment(comment) } + + override fun hint(hint: Bson?): SyncFindIterable = apply { wrapped.hint(hint) } + + override fun hintString(hint: String?): SyncFindIterable = apply { wrapped.hintString(hint) } + + override fun let(variables: Bson?): SyncFindIterable = apply { wrapped.let(variables) } + override fun max(max: Bson?): SyncFindIterable = apply { wrapped.max(max) } + + override fun min(min: Bson?): SyncFindIterable = apply { wrapped.min(min) } + + override fun returnKey(returnKey: Boolean): SyncFindIterable = apply { wrapped.returnKey(returnKey) } + + override fun showRecordId(showRecordId: Boolean): SyncFindIterable = apply { wrapped.showRecordId(showRecordId) } + override fun timeoutMode(timeoutMode: TimeoutMode): SyncFindIterable = apply { wrapped.timeoutMode(timeoutMode) } + + override fun explain(): Document = runBlocking { wrapped.explain() } + + override fun explain(verbosity: ExplainVerbosity): Document = runBlocking { wrapped.explain(verbosity) } + + override fun explain(explainResultClass: Class): E = runBlocking { + wrapped.explain(explainResultClass) + } + + override fun explain(explainResultClass: Class, verbosity: ExplainVerbosity): E = runBlocking { + wrapped.explain(explainResultClass, verbosity) + } +} diff --git a/driver-kotlin-coroutine/src/integrationTest/kotlin/com/mongodb/kotlin/client/coroutine/syncadapter/SyncListCollectionNamesIterable.kt b/driver-kotlin-coroutine/src/integrationTest/kotlin/com/mongodb/kotlin/client/coroutine/syncadapter/SyncListCollectionNamesIterable.kt new file mode 100644 index 00000000000..63a7af3f526 --- /dev/null +++ b/driver-kotlin-coroutine/src/integrationTest/kotlin/com/mongodb/kotlin/client/coroutine/syncadapter/SyncListCollectionNamesIterable.kt @@ -0,0 +1,42 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.kotlin.client.coroutine.syncadapter + +import com.mongodb.client.ListCollectionNamesIterable as JListCollectionNamesIterable +import com.mongodb.kotlin.client.coroutine.ListCollectionNamesFlow +import java.util.concurrent.TimeUnit +import org.bson.BsonValue +import org.bson.conversions.Bson + +data class SyncListCollectionNamesIterable(val wrapped: ListCollectionNamesFlow) : + JListCollectionNamesIterable, SyncMongoIterable(wrapped) { + + override fun batchSize(batchSize: Int): SyncListCollectionNamesIterable = apply { wrapped.batchSize(batchSize) } + + override fun maxTime(maxTime: Long, timeUnit: TimeUnit): SyncListCollectionNamesIterable = apply { + wrapped.maxTime(maxTime, timeUnit) + } + + override fun filter(filter: Bson?): SyncListCollectionNamesIterable = apply { wrapped.filter(filter) } + + override fun comment(comment: String?): SyncListCollectionNamesIterable = apply { wrapped.comment(comment) } + + override fun comment(comment: BsonValue?): SyncListCollectionNamesIterable = apply { wrapped.comment(comment) } + + override fun authorizedCollections(authorizedCollections: Boolean): SyncListCollectionNamesIterable = apply { + wrapped.authorizedCollections(authorizedCollections) + } +} diff --git a/driver-kotlin-coroutine/src/integrationTest/kotlin/com/mongodb/kotlin/client/coroutine/syncadapter/SyncListCollectionsIterable.kt b/driver-kotlin-coroutine/src/integrationTest/kotlin/com/mongodb/kotlin/client/coroutine/syncadapter/SyncListCollectionsIterable.kt new file mode 100644 index 00000000000..ab1853c756d --- /dev/null +++ b/driver-kotlin-coroutine/src/integrationTest/kotlin/com/mongodb/kotlin/client/coroutine/syncadapter/SyncListCollectionsIterable.kt @@ -0,0 +1,39 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.kotlin.client.coroutine.syncadapter + +import com.mongodb.client.ListCollectionsIterable as JListCollectionsIterable +import com.mongodb.client.cursor.TimeoutMode +import com.mongodb.kotlin.client.coroutine.ListCollectionsFlow +import java.util.concurrent.TimeUnit +import org.bson.BsonValue +import org.bson.conversions.Bson + +data class SyncListCollectionsIterable(val wrapped: ListCollectionsFlow) : + JListCollectionsIterable, SyncMongoIterable(wrapped) { + + override fun batchSize(batchSize: Int): SyncListCollectionsIterable = apply { wrapped.batchSize(batchSize) } + override fun maxTime(maxTime: Long, timeUnit: TimeUnit): SyncListCollectionsIterable = apply { + wrapped.maxTime(maxTime, timeUnit) + } + + override fun filter(filter: Bson?): SyncListCollectionsIterable = apply { wrapped.filter(filter) } + override fun comment(comment: String?): SyncListCollectionsIterable = apply { wrapped.comment(comment) } + override fun comment(comment: BsonValue?): SyncListCollectionsIterable = apply { wrapped.comment(comment) } + override fun timeoutMode(timeoutMode: TimeoutMode): SyncListCollectionsIterable = apply { + wrapped.timeoutMode(timeoutMode) + } +} diff --git a/driver-kotlin-coroutine/src/integrationTest/kotlin/com/mongodb/kotlin/client/coroutine/syncadapter/SyncListDatabasesIterable.kt b/driver-kotlin-coroutine/src/integrationTest/kotlin/com/mongodb/kotlin/client/coroutine/syncadapter/SyncListDatabasesIterable.kt new file mode 100644 index 00000000000..4563dfe4a4f --- /dev/null +++ b/driver-kotlin-coroutine/src/integrationTest/kotlin/com/mongodb/kotlin/client/coroutine/syncadapter/SyncListDatabasesIterable.kt @@ -0,0 +1,48 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.kotlin.client.coroutine.syncadapter + +import com.mongodb.client.ListDatabasesIterable as JListDatabasesIterable +import com.mongodb.client.cursor.TimeoutMode +import com.mongodb.kotlin.client.coroutine.ListDatabasesFlow +import java.util.concurrent.TimeUnit +import org.bson.BsonValue +import org.bson.conversions.Bson + +data class SyncListDatabasesIterable(val wrapped: ListDatabasesFlow) : + JListDatabasesIterable, SyncMongoIterable(wrapped) { + + override fun batchSize(batchSize: Int): SyncListDatabasesIterable = apply { wrapped.batchSize(batchSize) } + + override fun maxTime(maxTime: Long, timeUnit: TimeUnit): SyncListDatabasesIterable = apply { + wrapped.maxTime(maxTime, timeUnit) + } + + override fun filter(filter: Bson?): SyncListDatabasesIterable = apply { wrapped.filter(filter) } + + override fun nameOnly(nameOnly: Boolean?): SyncListDatabasesIterable = apply { wrapped.nameOnly(nameOnly) } + + override fun authorizedDatabasesOnly(authorizedDatabasesOnly: Boolean?): SyncListDatabasesIterable = apply { + wrapped.authorizedDatabasesOnly(authorizedDatabasesOnly) + } + + override fun comment(comment: String?): SyncListDatabasesIterable = apply { wrapped.comment(comment) } + + override fun comment(comment: BsonValue?): SyncListDatabasesIterable = apply { wrapped.comment(comment) } + override fun timeoutMode(timeoutMode: TimeoutMode): SyncListDatabasesIterable = apply { + wrapped.timeoutMode(timeoutMode) + } +} diff --git a/driver-kotlin-coroutine/src/integrationTest/kotlin/com/mongodb/kotlin/client/coroutine/syncadapter/SyncListIndexesIterable.kt b/driver-kotlin-coroutine/src/integrationTest/kotlin/com/mongodb/kotlin/client/coroutine/syncadapter/SyncListIndexesIterable.kt new file mode 100644 index 00000000000..0e329c7bcdd --- /dev/null +++ b/driver-kotlin-coroutine/src/integrationTest/kotlin/com/mongodb/kotlin/client/coroutine/syncadapter/SyncListIndexesIterable.kt @@ -0,0 +1,35 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.kotlin.client.coroutine.syncadapter + +import com.mongodb.client.ListIndexesIterable as JListIndexesIterable +import com.mongodb.client.cursor.TimeoutMode +import com.mongodb.kotlin.client.coroutine.ListIndexesFlow +import java.util.concurrent.TimeUnit +import org.bson.BsonValue + +data class SyncListIndexesIterable(val wrapped: ListIndexesFlow) : + JListIndexesIterable, SyncMongoIterable(wrapped) { + override fun batchSize(batchSize: Int): SyncListIndexesIterable = apply { wrapped.batchSize(batchSize) } + override fun maxTime(maxTime: Long, timeUnit: TimeUnit): SyncListIndexesIterable = apply { + wrapped.maxTime(maxTime, timeUnit) + } + override fun comment(comment: String?): SyncListIndexesIterable = apply { wrapped.comment(comment) } + override fun comment(comment: BsonValue?): SyncListIndexesIterable = apply { wrapped.comment(comment) } + override fun timeoutMode(timeoutMode: TimeoutMode): SyncListIndexesIterable = apply { + wrapped.timeoutMode(timeoutMode) + } +} diff --git a/driver-kotlin-coroutine/src/integrationTest/kotlin/com/mongodb/kotlin/client/coroutine/syncadapter/SyncListSearchIndexesIterable.kt b/driver-kotlin-coroutine/src/integrationTest/kotlin/com/mongodb/kotlin/client/coroutine/syncadapter/SyncListSearchIndexesIterable.kt new file mode 100644 index 00000000000..a7df87779df --- /dev/null +++ b/driver-kotlin-coroutine/src/integrationTest/kotlin/com/mongodb/kotlin/client/coroutine/syncadapter/SyncListSearchIndexesIterable.kt @@ -0,0 +1,63 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.kotlin.client.coroutine.syncadapter + +import com.mongodb.ExplainVerbosity +import com.mongodb.client.ListSearchIndexesIterable as JListSearchIndexesIterable +import com.mongodb.client.cursor.TimeoutMode +import com.mongodb.client.model.Collation +import com.mongodb.kotlin.client.coroutine.ListSearchIndexesFlow +import java.util.concurrent.TimeUnit +import kotlinx.coroutines.runBlocking +import org.bson.BsonValue +import org.bson.Document + +internal class SyncListSearchIndexesIterable(val wrapped: ListSearchIndexesFlow) : + JListSearchIndexesIterable, SyncMongoIterable(wrapped) { + + override fun name(indexName: String): SyncListSearchIndexesIterable = apply { wrapped.name(indexName) } + + override fun batchSize(batchSize: Int): SyncListSearchIndexesIterable = apply { wrapped.batchSize(batchSize) } + + override fun allowDiskUse(allowDiskUse: Boolean?): SyncListSearchIndexesIterable = apply { + wrapped.allowDiskUse(allowDiskUse) + } + + override fun maxTime(maxTime: Long, timeUnit: TimeUnit): SyncListSearchIndexesIterable = apply { + wrapped.maxTime(maxTime, timeUnit) + } + + override fun collation(collation: Collation?): SyncListSearchIndexesIterable = apply { + wrapped.collation(collation) + } + + override fun comment(comment: String?): SyncListSearchIndexesIterable = apply { wrapped.comment(comment) } + override fun comment(comment: BsonValue?): SyncListSearchIndexesIterable = apply { wrapped.comment(comment) } + override fun timeoutMode(timeoutMode: TimeoutMode): SyncListSearchIndexesIterable = apply { + wrapped.timeoutMode(timeoutMode) + } + override fun explain(): Document = runBlocking { wrapped.explain() } + + override fun explain(verbosity: ExplainVerbosity): Document = runBlocking { wrapped.explain(verbosity) } + + override fun explain(explainResultClass: Class): E = runBlocking { + wrapped.explain(explainResultClass) + } + + override fun explain(explainResultClass: Class, verbosity: ExplainVerbosity): E = runBlocking { + wrapped.explain(explainResultClass, verbosity) + } +} diff --git a/driver-kotlin-coroutine/src/integrationTest/kotlin/com/mongodb/kotlin/client/coroutine/syncadapter/SyncMapReduceIterable.kt b/driver-kotlin-coroutine/src/integrationTest/kotlin/com/mongodb/kotlin/client/coroutine/syncadapter/SyncMapReduceIterable.kt new file mode 100644 index 00000000000..8e5fc82455a --- /dev/null +++ b/driver-kotlin-coroutine/src/integrationTest/kotlin/com/mongodb/kotlin/client/coroutine/syncadapter/SyncMapReduceIterable.kt @@ -0,0 +1,64 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +@file:Suppress("DEPRECATION") + +package com.mongodb.kotlin.client.coroutine.syncadapter + +import com.mongodb.client.MapReduceIterable as JMapReduceIterable +import com.mongodb.client.cursor.TimeoutMode +import com.mongodb.client.model.Collation +import com.mongodb.client.model.MapReduceAction +import com.mongodb.kotlin.client.coroutine.MapReduceFlow +import java.util.concurrent.TimeUnit +import kotlinx.coroutines.runBlocking +import org.bson.conversions.Bson + +data class SyncMapReduceIterable(val wrapped: MapReduceFlow) : + JMapReduceIterable, SyncMongoIterable(wrapped) { + override fun batchSize(batchSize: Int): SyncMapReduceIterable = apply { wrapped.batchSize(batchSize) } + override fun toCollection() = runBlocking { wrapped.toCollection() } + override fun collectionName(collectionName: String): SyncMapReduceIterable = apply { + wrapped.collectionName(collectionName) + } + + override fun finalizeFunction(finalizeFunction: String?): SyncMapReduceIterable = apply { + wrapped.finalizeFunction(finalizeFunction) + } + + override fun scope(scope: Bson?): SyncMapReduceIterable = apply { wrapped.scope(scope) } + override fun sort(sort: Bson?): SyncMapReduceIterable = apply { wrapped.sort(sort) } + override fun filter(filter: Bson?): SyncMapReduceIterable = apply { wrapped.filter(filter) } + override fun limit(limit: Int): SyncMapReduceIterable = apply { wrapped.limit(limit) } + override fun jsMode(jsMode: Boolean): SyncMapReduceIterable = apply { wrapped.jsMode(jsMode) } + override fun verbose(verbose: Boolean): SyncMapReduceIterable = apply { wrapped.verbose(verbose) } + + override fun maxTime(maxTime: Long, timeUnit: TimeUnit): SyncMapReduceIterable = apply { + wrapped.maxTime(maxTime, timeUnit) + } + override fun action(action: MapReduceAction): SyncMapReduceIterable = apply { wrapped.action(action) } + override fun databaseName(databaseName: String?): SyncMapReduceIterable = apply { + wrapped.databaseName(databaseName) + } + + override fun bypassDocumentValidation(bypassDocumentValidation: Boolean?): SyncMapReduceIterable = apply { + wrapped.bypassDocumentValidation(bypassDocumentValidation) + } + + override fun collation(collation: Collation?): SyncMapReduceIterable = apply { wrapped.collation(collation) } + override fun timeoutMode(timeoutMode: TimeoutMode): SyncMapReduceIterable = apply { + wrapped.timeoutMode(timeoutMode) + } +} diff --git a/driver-kotlin-coroutine/src/integrationTest/kotlin/com/mongodb/kotlin/client/coroutine/syncadapter/SyncMongoChangeStreamCursor.kt b/driver-kotlin-coroutine/src/integrationTest/kotlin/com/mongodb/kotlin/client/coroutine/syncadapter/SyncMongoChangeStreamCursor.kt new file mode 100644 index 00000000000..5a4fb636f47 --- /dev/null +++ b/driver-kotlin-coroutine/src/integrationTest/kotlin/com/mongodb/kotlin/client/coroutine/syncadapter/SyncMongoChangeStreamCursor.kt @@ -0,0 +1,25 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.kotlin.client.coroutine.syncadapter + +import com.mongodb.client.MongoChangeStreamCursor as JMongoChangeStreamCursor +import kotlinx.coroutines.flow.Flow +import org.bson.BsonDocument + +data class SyncMongoChangeStreamCursor(val wrapped: Flow) : + JMongoChangeStreamCursor, SyncMongoCursor(wrapped) { + override fun getResumeToken(): BsonDocument? = throw UnsupportedOperationException() +} diff --git a/driver-kotlin-coroutine/src/integrationTest/kotlin/com/mongodb/kotlin/client/coroutine/syncadapter/SyncMongoClient.kt b/driver-kotlin-coroutine/src/integrationTest/kotlin/com/mongodb/kotlin/client/coroutine/syncadapter/SyncMongoClient.kt new file mode 100644 index 00000000000..4a97557d14a --- /dev/null +++ b/driver-kotlin-coroutine/src/integrationTest/kotlin/com/mongodb/kotlin/client/coroutine/syncadapter/SyncMongoClient.kt @@ -0,0 +1,30 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.kotlin.client.coroutine.syncadapter + +import com.mongodb.MongoDriverInformation +import com.mongodb.client.MongoClient as JMongoClient +import com.mongodb.connection.ClusterDescription +import com.mongodb.kotlin.client.coroutine.MongoClient + +internal class SyncMongoClient(override val wrapped: MongoClient) : SyncMongoCluster(wrapped), JMongoClient { + override fun close(): Unit = wrapped.close() + + override fun getClusterDescription(): ClusterDescription = wrapped.getClusterDescription() + + override fun appendMetadata(mongoDriverInformation: MongoDriverInformation): Unit = + wrapped.appendMetadata(mongoDriverInformation) +} diff --git a/driver-kotlin-coroutine/src/integrationTest/kotlin/com/mongodb/kotlin/client/coroutine/syncadapter/SyncMongoCluster.kt b/driver-kotlin-coroutine/src/integrationTest/kotlin/com/mongodb/kotlin/client/coroutine/syncadapter/SyncMongoCluster.kt new file mode 100644 index 00000000000..01d0e27ff58 --- /dev/null +++ b/driver-kotlin-coroutine/src/integrationTest/kotlin/com/mongodb/kotlin/client/coroutine/syncadapter/SyncMongoCluster.kt @@ -0,0 +1,138 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.kotlin.client.coroutine.syncadapter + +import com.mongodb.ClientSessionOptions +import com.mongodb.ReadConcern +import com.mongodb.ReadPreference +import com.mongodb.WriteConcern +import com.mongodb.client.ChangeStreamIterable +import com.mongodb.client.ClientSession +import com.mongodb.client.ListDatabasesIterable +import com.mongodb.client.MongoCluster as JMongoCluster +import com.mongodb.client.MongoDatabase +import com.mongodb.client.MongoIterable +import com.mongodb.client.model.bulk.ClientBulkWriteOptions +import com.mongodb.client.model.bulk.ClientBulkWriteResult +import com.mongodb.client.model.bulk.ClientNamespacedWriteModel +import com.mongodb.kotlin.client.coroutine.MongoCluster +import java.util.concurrent.TimeUnit +import kotlinx.coroutines.runBlocking +import org.bson.Document +import org.bson.codecs.configuration.CodecRegistry +import org.bson.conversions.Bson + +internal open class SyncMongoCluster(open val wrapped: MongoCluster) : JMongoCluster { + override fun getCodecRegistry(): CodecRegistry = wrapped.codecRegistry + + override fun getReadPreference(): ReadPreference = wrapped.readPreference + + override fun getWriteConcern(): WriteConcern = wrapped.writeConcern + + override fun getReadConcern(): ReadConcern = wrapped.readConcern + + override fun getTimeout(timeUnit: TimeUnit): Long? = wrapped.timeout(timeUnit) + + override fun withCodecRegistry(codecRegistry: CodecRegistry): SyncMongoCluster = + SyncMongoCluster(wrapped.withCodecRegistry(codecRegistry)) + + override fun withReadPreference(readPreference: ReadPreference): SyncMongoCluster = + SyncMongoCluster(wrapped.withReadPreference(readPreference)) + + override fun withReadConcern(readConcern: ReadConcern): SyncMongoCluster = + SyncMongoCluster(wrapped.withReadConcern(readConcern)) + + override fun withWriteConcern(writeConcern: WriteConcern): SyncMongoCluster = + SyncMongoCluster(wrapped.withWriteConcern(writeConcern)) + + override fun withTimeout(timeout: Long, timeUnit: TimeUnit): SyncMongoCluster = + SyncMongoCluster(wrapped.withTimeout(timeout, timeUnit)) + + override fun getDatabase(databaseName: String): MongoDatabase = SyncMongoDatabase(wrapped.getDatabase(databaseName)) + + override fun startSession(): ClientSession = SyncClientSession(runBlocking { wrapped.startSession() }, this) + + override fun startSession(options: ClientSessionOptions): ClientSession = + SyncClientSession(runBlocking { wrapped.startSession(options) }, this) + + override fun listDatabaseNames(): MongoIterable = SyncMongoIterable(wrapped.listDatabaseNames()) + + override fun listDatabaseNames(clientSession: ClientSession): MongoIterable = + SyncMongoIterable(wrapped.listDatabaseNames(clientSession.unwrapped())) + + override fun listDatabases(): ListDatabasesIterable = SyncListDatabasesIterable(wrapped.listDatabases()) + + override fun listDatabases(clientSession: ClientSession): ListDatabasesIterable = + SyncListDatabasesIterable(wrapped.listDatabases(clientSession.unwrapped())) + + override fun listDatabases(resultClass: Class): ListDatabasesIterable = + SyncListDatabasesIterable(wrapped.listDatabases(resultClass)) + + override fun listDatabases( + clientSession: ClientSession, + resultClass: Class + ): ListDatabasesIterable = + SyncListDatabasesIterable(wrapped.listDatabases(clientSession.unwrapped(), resultClass)) + + override fun watch(): ChangeStreamIterable = SyncChangeStreamIterable(wrapped.watch()) + + override fun watch(resultClass: Class): ChangeStreamIterable = + SyncChangeStreamIterable(wrapped.watch(resultClass = resultClass)) + + override fun watch(pipeline: MutableList): ChangeStreamIterable = + SyncChangeStreamIterable(wrapped.watch(pipeline)) + + override fun watch(pipeline: MutableList, resultClass: Class): ChangeStreamIterable = + SyncChangeStreamIterable(wrapped.watch(pipeline, resultClass)) + + override fun watch(clientSession: ClientSession): ChangeStreamIterable = + SyncChangeStreamIterable(wrapped.watch(clientSession.unwrapped())) + + override fun watch(clientSession: ClientSession, resultClass: Class): ChangeStreamIterable = + SyncChangeStreamIterable(wrapped.watch(clientSession.unwrapped(), resultClass = resultClass)) + + override fun watch(clientSession: ClientSession, pipeline: MutableList): ChangeStreamIterable = + SyncChangeStreamIterable(wrapped.watch(clientSession.unwrapped(), pipeline)) + + override fun watch( + clientSession: ClientSession, + pipeline: MutableList, + resultClass: Class + ): ChangeStreamIterable = + SyncChangeStreamIterable(wrapped.watch(clientSession.unwrapped(), pipeline, resultClass)) + + override fun bulkWrite(models: MutableList): ClientBulkWriteResult = runBlocking { + wrapped.bulkWrite(models) + } + + override fun bulkWrite( + models: MutableList, + options: ClientBulkWriteOptions + ): ClientBulkWriteResult = runBlocking { wrapped.bulkWrite(models, options) } + + override fun bulkWrite( + clientSession: ClientSession, + models: MutableList + ): ClientBulkWriteResult = runBlocking { wrapped.bulkWrite(clientSession.unwrapped(), models) } + + override fun bulkWrite( + clientSession: ClientSession, + models: MutableList, + options: ClientBulkWriteOptions + ): ClientBulkWriteResult = runBlocking { wrapped.bulkWrite(clientSession.unwrapped(), models, options) } + + private fun ClientSession.unwrapped() = (this as SyncClientSession).wrapped +} diff --git a/driver-kotlin-coroutine/src/integrationTest/kotlin/com/mongodb/kotlin/client/coroutine/syncadapter/SyncMongoCollection.kt b/driver-kotlin-coroutine/src/integrationTest/kotlin/com/mongodb/kotlin/client/coroutine/syncadapter/SyncMongoCollection.kt new file mode 100644 index 00000000000..fa26fae86c1 --- /dev/null +++ b/driver-kotlin-coroutine/src/integrationTest/kotlin/com/mongodb/kotlin/client/coroutine/syncadapter/SyncMongoCollection.kt @@ -0,0 +1,635 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +@file:Suppress("DEPRECATION") + +package com.mongodb.kotlin.client.coroutine.syncadapter + +import com.mongodb.MongoNamespace +import com.mongodb.ReadConcern +import com.mongodb.ReadPreference +import com.mongodb.WriteConcern +import com.mongodb.bulk.BulkWriteResult +import com.mongodb.client.AggregateIterable +import com.mongodb.client.ChangeStreamIterable +import com.mongodb.client.ClientSession +import com.mongodb.client.DistinctIterable +import com.mongodb.client.FindIterable +import com.mongodb.client.ListIndexesIterable +import com.mongodb.client.ListSearchIndexesIterable +import com.mongodb.client.MapReduceIterable +import com.mongodb.client.MongoCollection as JMongoCollection +import com.mongodb.client.model.BulkWriteOptions +import com.mongodb.client.model.CountOptions +import com.mongodb.client.model.CreateIndexOptions +import com.mongodb.client.model.DeleteOptions +import com.mongodb.client.model.DropCollectionOptions +import com.mongodb.client.model.DropIndexOptions +import com.mongodb.client.model.EstimatedDocumentCountOptions +import com.mongodb.client.model.FindOneAndDeleteOptions +import com.mongodb.client.model.FindOneAndReplaceOptions +import com.mongodb.client.model.FindOneAndUpdateOptions +import com.mongodb.client.model.IndexModel +import com.mongodb.client.model.IndexOptions +import com.mongodb.client.model.InsertManyOptions +import com.mongodb.client.model.InsertOneOptions +import com.mongodb.client.model.RenameCollectionOptions +import com.mongodb.client.model.ReplaceOptions +import com.mongodb.client.model.SearchIndexModel +import com.mongodb.client.model.UpdateOptions +import com.mongodb.client.model.WriteModel +import com.mongodb.client.result.DeleteResult +import com.mongodb.client.result.InsertManyResult +import com.mongodb.client.result.InsertOneResult +import com.mongodb.client.result.UpdateResult +import com.mongodb.kotlin.client.coroutine.MongoCollection +import java.util.concurrent.TimeUnit +import kotlinx.coroutines.flow.toCollection +import kotlinx.coroutines.runBlocking +import org.bson.Document +import org.bson.codecs.configuration.CodecRegistry +import org.bson.conversions.Bson + +@Suppress("OVERRIDE_DEPRECATION") +data class SyncMongoCollection(val wrapped: MongoCollection) : JMongoCollection { + override fun getNamespace(): MongoNamespace = wrapped.namespace + + override fun getDocumentClass(): Class = wrapped.documentClass + + override fun getCodecRegistry(): CodecRegistry = wrapped.codecRegistry + + override fun getReadPreference(): ReadPreference = wrapped.readPreference + + override fun getWriteConcern(): WriteConcern = wrapped.writeConcern + + override fun getReadConcern(): ReadConcern = wrapped.readConcern + override fun getTimeout(timeUnit: TimeUnit): Long? = wrapped.timeout(timeUnit) + + override fun withDocumentClass(clazz: Class): SyncMongoCollection = + SyncMongoCollection(wrapped.withDocumentClass(clazz)) + + override fun withCodecRegistry(codecRegistry: CodecRegistry): SyncMongoCollection = + SyncMongoCollection(wrapped.withCodecRegistry(codecRegistry)) + + override fun withReadPreference(readPreference: ReadPreference): SyncMongoCollection = + SyncMongoCollection(wrapped.withReadPreference(readPreference)) + + override fun withWriteConcern(writeConcern: WriteConcern): SyncMongoCollection = + SyncMongoCollection(wrapped.withWriteConcern(writeConcern)) + + override fun withReadConcern(readConcern: ReadConcern): SyncMongoCollection = + SyncMongoCollection(wrapped.withReadConcern(readConcern)) + + override fun withTimeout(timeout: Long, timeUnit: TimeUnit): com.mongodb.client.MongoCollection = + SyncMongoCollection(wrapped.withTimeout(timeout, timeUnit)) + + override fun countDocuments(): Long = runBlocking { wrapped.countDocuments() } + + override fun countDocuments(filter: Bson): Long = runBlocking { wrapped.countDocuments(filter) } + + override fun countDocuments(filter: Bson, options: CountOptions): Long = runBlocking { + wrapped.countDocuments(filter, options) + } + + override fun countDocuments(clientSession: ClientSession): Long = runBlocking { + wrapped.countDocuments(clientSession.unwrapped()) + } + + override fun countDocuments(clientSession: ClientSession, filter: Bson): Long = runBlocking { + wrapped.countDocuments(clientSession.unwrapped(), filter) + } + + override fun countDocuments(clientSession: ClientSession, filter: Bson, options: CountOptions): Long = runBlocking { + wrapped.countDocuments(clientSession.unwrapped(), filter, options) + } + + override fun estimatedDocumentCount(): Long = runBlocking { wrapped.estimatedDocumentCount() } + + override fun estimatedDocumentCount(options: EstimatedDocumentCountOptions): Long = runBlocking { + wrapped.estimatedDocumentCount(options) + } + + override fun distinct(fieldName: String, resultClass: Class): DistinctIterable = + SyncDistinctIterable(wrapped.distinct(fieldName, resultClass = resultClass)) + + override fun distinct(fieldName: String, filter: Bson, resultClass: Class): DistinctIterable = + SyncDistinctIterable(wrapped.distinct(fieldName, filter, resultClass = resultClass)) + + override fun distinct( + clientSession: ClientSession, + fieldName: String, + resultClass: Class + ): DistinctIterable = + SyncDistinctIterable(wrapped.distinct(clientSession.unwrapped(), fieldName, resultClass = resultClass)) + + override fun distinct( + clientSession: ClientSession, + fieldName: String, + filter: Bson, + resultClass: Class + ): DistinctIterable = + SyncDistinctIterable(wrapped.distinct(clientSession.unwrapped(), fieldName, filter, resultClass)) + + override fun find(): FindIterable = SyncFindIterable(wrapped.find()) + + override fun find(resultClass: Class): FindIterable = + SyncFindIterable(wrapped.find(resultClass = resultClass)) + + override fun find(filter: Bson): FindIterable = SyncFindIterable(wrapped.find(filter)) + + override fun find(filter: Bson, resultClass: Class): FindIterable = + SyncFindIterable(wrapped.find(filter, resultClass)) + + override fun find(clientSession: ClientSession): FindIterable = + SyncFindIterable(wrapped.find(clientSession.unwrapped())) + + override fun find(clientSession: ClientSession, resultClass: Class): FindIterable = + SyncFindIterable(wrapped.find(clientSession.unwrapped(), resultClass = resultClass)) + + override fun find(clientSession: ClientSession, filter: Bson): FindIterable = + SyncFindIterable(wrapped.find(clientSession.unwrapped(), filter)) + + override fun find(clientSession: ClientSession, filter: Bson, resultClass: Class): FindIterable = + SyncFindIterable(wrapped.find(clientSession.unwrapped(), filter, resultClass)) + + override fun aggregate(pipeline: MutableList): AggregateIterable = + SyncAggregateIterable(wrapped.aggregate(pipeline)) + + override fun aggregate(pipeline: MutableList, resultClass: Class): AggregateIterable = + SyncAggregateIterable(wrapped.aggregate(pipeline, resultClass)) + + override fun aggregate(clientSession: ClientSession, pipeline: MutableList): AggregateIterable = + SyncAggregateIterable(wrapped.aggregate(clientSession.unwrapped(), pipeline)) + + override fun aggregate( + clientSession: ClientSession, + pipeline: MutableList, + resultClass: Class + ): AggregateIterable = SyncAggregateIterable(wrapped.aggregate(clientSession.unwrapped(), pipeline, resultClass)) + + override fun watch(): ChangeStreamIterable = SyncChangeStreamIterable(wrapped.watch()) + + override fun watch(resultClass: Class): ChangeStreamIterable = + SyncChangeStreamIterable(wrapped.watch(resultClass = resultClass)) + + override fun watch(pipeline: MutableList): ChangeStreamIterable = + SyncChangeStreamIterable(wrapped.watch(pipeline)) + + override fun watch(pipeline: MutableList, resultClass: Class): ChangeStreamIterable = + SyncChangeStreamIterable(wrapped.watch(pipeline, resultClass)) + + override fun watch(clientSession: ClientSession): ChangeStreamIterable = + SyncChangeStreamIterable(wrapped.watch(clientSession.unwrapped())) + + override fun watch(clientSession: ClientSession, resultClass: Class): ChangeStreamIterable = + SyncChangeStreamIterable(wrapped.watch(clientSession.unwrapped(), resultClass = resultClass)) + + override fun watch(clientSession: ClientSession, pipeline: MutableList): ChangeStreamIterable = + SyncChangeStreamIterable(wrapped.watch(clientSession.unwrapped(), pipeline)) + + override fun watch( + clientSession: ClientSession, + pipeline: MutableList, + resultClass: Class + ): ChangeStreamIterable = + SyncChangeStreamIterable(wrapped.watch(clientSession.unwrapped(), pipeline, resultClass)) + + override fun mapReduce(mapFunction: String, reduceFunction: String): MapReduceIterable = + SyncMapReduceIterable(wrapped.mapReduce(mapFunction, reduceFunction)) + + override fun mapReduce( + mapFunction: String, + reduceFunction: String, + resultClass: Class + ): MapReduceIterable = SyncMapReduceIterable(wrapped.mapReduce(mapFunction, reduceFunction, resultClass)) + + override fun mapReduce( + clientSession: ClientSession, + mapFunction: String, + reduceFunction: String + ): MapReduceIterable = + SyncMapReduceIterable(wrapped.mapReduce(clientSession.unwrapped(), mapFunction, reduceFunction)) + + override fun mapReduce( + clientSession: ClientSession, + mapFunction: String, + reduceFunction: String, + resultClass: Class + ): MapReduceIterable = + SyncMapReduceIterable(wrapped.mapReduce(clientSession.unwrapped(), mapFunction, reduceFunction, resultClass)) + + override fun deleteOne(filter: Bson): DeleteResult = runBlocking { wrapped.deleteOne(filter) } + + override fun deleteOne(filter: Bson, options: DeleteOptions): DeleteResult = runBlocking { + wrapped.deleteOne(filter, options) + } + + override fun deleteOne(clientSession: ClientSession, filter: Bson): DeleteResult = runBlocking { + wrapped.deleteOne(clientSession.unwrapped(), filter) + } + + override fun deleteOne(clientSession: ClientSession, filter: Bson, options: DeleteOptions): DeleteResult = + runBlocking { + wrapped.deleteOne(clientSession.unwrapped(), filter, options) + } + + override fun deleteMany(filter: Bson): DeleteResult = runBlocking { wrapped.deleteMany(filter) } + + override fun deleteMany(filter: Bson, options: DeleteOptions): DeleteResult = runBlocking { + wrapped.deleteMany(filter, options) + } + + override fun deleteMany(clientSession: ClientSession, filter: Bson): DeleteResult = runBlocking { + wrapped.deleteMany(clientSession.unwrapped(), filter) + } + + override fun deleteMany(clientSession: ClientSession, filter: Bson, options: DeleteOptions): DeleteResult = + runBlocking { + wrapped.deleteMany(clientSession.unwrapped(), filter, options) + } + + override fun updateOne(filter: Bson, update: Bson): UpdateResult = runBlocking { wrapped.updateOne(filter, update) } + + override fun updateOne(filter: Bson, update: Bson, updateOptions: UpdateOptions): UpdateResult = runBlocking { + wrapped.updateOne(filter, update, updateOptions) + } + + override fun updateOne(clientSession: ClientSession, filter: Bson, update: Bson): UpdateResult = runBlocking { + wrapped.updateOne(clientSession.unwrapped(), filter, update) + } + + override fun updateOne( + clientSession: ClientSession, + filter: Bson, + update: Bson, + updateOptions: UpdateOptions + ): UpdateResult = runBlocking { wrapped.updateOne(clientSession.unwrapped(), filter, update, updateOptions) } + + override fun updateOne(filter: Bson, update: MutableList): UpdateResult = runBlocking { + wrapped.updateOne(filter, update) + } + + override fun updateOne(filter: Bson, update: MutableList, updateOptions: UpdateOptions): UpdateResult = + runBlocking { + wrapped.updateOne(filter, update, updateOptions) + } + + override fun updateOne(clientSession: ClientSession, filter: Bson, update: MutableList): UpdateResult = + runBlocking { + wrapped.updateOne(clientSession.unwrapped(), filter, update) + } + + override fun updateOne( + clientSession: ClientSession, + filter: Bson, + update: MutableList, + updateOptions: UpdateOptions + ): UpdateResult = runBlocking { wrapped.updateOne(clientSession.unwrapped(), filter, update, updateOptions) } + + override fun updateMany(filter: Bson, update: Bson): UpdateResult = runBlocking { + wrapped.updateMany(filter, update) + } + + override fun updateMany(filter: Bson, update: Bson, updateOptions: UpdateOptions): UpdateResult = runBlocking { + wrapped.updateMany(filter, update, updateOptions) + } + + override fun updateMany(clientSession: ClientSession, filter: Bson, update: Bson): UpdateResult = runBlocking { + wrapped.updateMany(clientSession.unwrapped(), filter, update) + } + + override fun updateMany( + clientSession: ClientSession, + filter: Bson, + update: Bson, + updateOptions: UpdateOptions + ): UpdateResult = runBlocking { wrapped.updateMany(clientSession.unwrapped(), filter, update, updateOptions) } + + override fun updateMany(filter: Bson, update: MutableList): UpdateResult = runBlocking { + wrapped.updateMany(filter, update) + } + + override fun updateMany(filter: Bson, update: MutableList, updateOptions: UpdateOptions): UpdateResult = + runBlocking { + wrapped.updateMany(filter, update, updateOptions) + } + + override fun updateMany(clientSession: ClientSession, filter: Bson, update: MutableList): UpdateResult = + runBlocking { + wrapped.updateMany(clientSession.unwrapped(), filter, update) + } + + override fun updateMany( + clientSession: ClientSession, + filter: Bson, + update: MutableList, + updateOptions: UpdateOptions + ): UpdateResult = runBlocking { wrapped.updateMany(clientSession.unwrapped(), filter, update, updateOptions) } + + override fun findOneAndDelete(filter: Bson): T? = runBlocking { wrapped.findOneAndDelete(filter) } + + override fun findOneAndDelete(filter: Bson, options: FindOneAndDeleteOptions): T? = runBlocking { + wrapped.findOneAndDelete(filter, options) + } + + override fun findOneAndDelete(clientSession: ClientSession, filter: Bson): T? = runBlocking { + wrapped.findOneAndDelete(clientSession.unwrapped(), filter) + } + + override fun findOneAndDelete(clientSession: ClientSession, filter: Bson, options: FindOneAndDeleteOptions): T? = + runBlocking { + wrapped.findOneAndDelete(clientSession.unwrapped(), filter, options) + } + + override fun findOneAndUpdate(filter: Bson, update: Bson): T? = runBlocking { + wrapped.findOneAndUpdate(filter, update) + } + + override fun findOneAndUpdate(filter: Bson, update: Bson, options: FindOneAndUpdateOptions): T? = runBlocking { + wrapped.findOneAndUpdate(filter, update, options) + } + + override fun findOneAndUpdate(clientSession: ClientSession, filter: Bson, update: Bson): T? = runBlocking { + wrapped.findOneAndUpdate(clientSession.unwrapped(), filter, update) + } + + override fun findOneAndUpdate( + clientSession: ClientSession, + filter: Bson, + update: Bson, + options: FindOneAndUpdateOptions + ): T? = runBlocking { wrapped.findOneAndUpdate(clientSession.unwrapped(), filter, update, options) } + + override fun findOneAndUpdate(filter: Bson, update: MutableList): T? = runBlocking { + wrapped.findOneAndUpdate(filter, update) + } + + override fun findOneAndUpdate(filter: Bson, update: MutableList, options: FindOneAndUpdateOptions): T? = + runBlocking { + wrapped.findOneAndUpdate(filter, update, options) + } + + override fun findOneAndUpdate(clientSession: ClientSession, filter: Bson, update: MutableList): T? = + runBlocking { + wrapped.findOneAndUpdate(clientSession.unwrapped(), filter, update) + } + + override fun findOneAndUpdate( + clientSession: ClientSession, + filter: Bson, + update: MutableList, + options: FindOneAndUpdateOptions + ): T? = runBlocking { wrapped.findOneAndUpdate(clientSession.unwrapped(), filter, update, options) } + + override fun drop() = runBlocking { wrapped.drop() } + + override fun drop(clientSession: ClientSession) = runBlocking { wrapped.drop(clientSession.unwrapped()) } + + override fun drop(dropCollectionOptions: DropCollectionOptions) = runBlocking { + wrapped.drop(dropCollectionOptions) + } + + override fun drop(clientSession: ClientSession, dropCollectionOptions: DropCollectionOptions) = runBlocking { + wrapped.drop(clientSession.unwrapped(), dropCollectionOptions) + } + + override fun createSearchIndex(name: String, definition: Bson) = runBlocking { + wrapped.createSearchIndex(name, definition) + } + + override fun createSearchIndex(definition: Bson) = runBlocking { wrapped.createSearchIndex(definition) } + + override fun createSearchIndexes(searchIndexModels: MutableList): MutableList = + runBlocking { + wrapped.createSearchIndexes(searchIndexModels).toCollection(mutableListOf()) + } + + override fun updateSearchIndex(indexName: String, definition: Bson) = runBlocking { + wrapped.updateSearchIndex(indexName, definition) + } + + override fun dropSearchIndex(indexName: String) = runBlocking { wrapped.dropSearchIndex(indexName) } + + override fun listSearchIndexes(): ListSearchIndexesIterable = + SyncListSearchIndexesIterable(wrapped.listSearchIndexes()) + + override fun listSearchIndexes(resultClass: Class): ListSearchIndexesIterable = + SyncListSearchIndexesIterable(wrapped.listSearchIndexes(resultClass = resultClass)) + + override fun createIndex(keys: Bson): String = runBlocking { wrapped.createIndex(keys) } + + override fun createIndex(keys: Bson, indexOptions: IndexOptions): String = runBlocking { + wrapped.createIndex(keys, indexOptions) + } + + override fun createIndex(clientSession: ClientSession, keys: Bson): String = runBlocking { + wrapped.createIndex(clientSession.unwrapped(), keys) + } + + override fun createIndex(clientSession: ClientSession, keys: Bson, indexOptions: IndexOptions): String = + runBlocking { + wrapped.createIndex(clientSession.unwrapped(), keys, indexOptions) + } + + override fun createIndexes(indexes: MutableList): MutableList = runBlocking { + wrapped.createIndexes(indexes).toCollection(mutableListOf()) + } + + override fun createIndexes( + indexes: MutableList, + createIndexOptions: CreateIndexOptions + ): MutableList = runBlocking { + wrapped.createIndexes(indexes, createIndexOptions).toCollection(mutableListOf()) + } + + override fun createIndexes(clientSession: ClientSession, indexes: MutableList): MutableList = + runBlocking { + wrapped.createIndexes(clientSession.unwrapped(), indexes).toCollection(mutableListOf()) + } + + override fun createIndexes( + clientSession: ClientSession, + indexes: MutableList, + createIndexOptions: CreateIndexOptions + ): MutableList = runBlocking { + wrapped.createIndexes(clientSession.unwrapped(), indexes, createIndexOptions).toCollection(mutableListOf()) + } + + override fun listIndexes(): ListIndexesIterable = SyncListIndexesIterable(wrapped.listIndexes()) + + override fun listIndexes(resultClass: Class): ListIndexesIterable = + SyncListIndexesIterable(wrapped.listIndexes(resultClass = resultClass)) + + override fun listIndexes(clientSession: ClientSession): ListIndexesIterable = + SyncListIndexesIterable(wrapped.listIndexes(clientSession.unwrapped())) + + override fun listIndexes(clientSession: ClientSession, resultClass: Class): ListIndexesIterable = + SyncListIndexesIterable(wrapped.listIndexes(clientSession.unwrapped(), resultClass)) + + override fun dropIndex(indexName: String) = runBlocking { wrapped.dropIndex(indexName) } + + override fun dropIndex(indexName: String, dropIndexOptions: DropIndexOptions) = runBlocking { + wrapped.dropIndex(indexName, dropIndexOptions) + } + + override fun dropIndex(keys: Bson) = runBlocking { wrapped.dropIndex(keys) } + + override fun dropIndex(keys: Bson, dropIndexOptions: DropIndexOptions) = runBlocking { + wrapped.dropIndex(keys, dropIndexOptions) + } + + override fun dropIndex(clientSession: ClientSession, indexName: String) = runBlocking { + wrapped.dropIndex(clientSession.unwrapped(), indexName) + } + + override fun dropIndex(clientSession: ClientSession, keys: Bson) = runBlocking { + wrapped.dropIndex(clientSession.unwrapped(), keys) + } + override fun dropIndex(clientSession: ClientSession, indexName: String, dropIndexOptions: DropIndexOptions) = + runBlocking { + wrapped.dropIndex(clientSession.unwrapped(), indexName, dropIndexOptions) + } + + override fun dropIndex(clientSession: ClientSession, keys: Bson, dropIndexOptions: DropIndexOptions) = runBlocking { + wrapped.dropIndex(clientSession.unwrapped(), keys, dropIndexOptions) + } + + override fun dropIndexes() = runBlocking { wrapped.dropIndexes() } + + override fun dropIndexes(clientSession: ClientSession) = runBlocking { + wrapped.dropIndexes(clientSession.unwrapped()) + } + + override fun dropIndexes(dropIndexOptions: DropIndexOptions) = runBlocking { wrapped.dropIndexes(dropIndexOptions) } + + override fun dropIndexes(clientSession: ClientSession, dropIndexOptions: DropIndexOptions) = runBlocking { + wrapped.dropIndexes(clientSession.unwrapped(), dropIndexOptions) + } + + override fun renameCollection(newCollectionNamespace: MongoNamespace) = runBlocking { + wrapped.renameCollection(newCollectionNamespace) + } + + override fun renameCollection( + newCollectionNamespace: MongoNamespace, + renameCollectionOptions: RenameCollectionOptions + ) = runBlocking { wrapped.renameCollection(newCollectionNamespace, renameCollectionOptions) } + + override fun renameCollection(clientSession: ClientSession, newCollectionNamespace: MongoNamespace) = runBlocking { + wrapped.renameCollection(clientSession.unwrapped(), newCollectionNamespace) + } + + override fun renameCollection( + clientSession: ClientSession, + newCollectionNamespace: MongoNamespace, + renameCollectionOptions: RenameCollectionOptions + ) = runBlocking { + wrapped.renameCollection(clientSession.unwrapped(), newCollectionNamespace, renameCollectionOptions) + } + + override fun findOneAndReplace( + clientSession: ClientSession, + filter: Bson, + replacement: T, + options: FindOneAndReplaceOptions + ): T? = runBlocking { wrapped.findOneAndReplace(clientSession.unwrapped(), filter, replacement, options) } + + override fun findOneAndReplace(clientSession: ClientSession, filter: Bson, replacement: T): T? = runBlocking { + wrapped.findOneAndReplace(clientSession.unwrapped(), filter, replacement) + } + + override fun findOneAndReplace(filter: Bson, replacement: T, options: FindOneAndReplaceOptions): T? = runBlocking { + wrapped.findOneAndReplace(filter, replacement, options) + } + + override fun findOneAndReplace(filter: Bson, replacement: T): T? = runBlocking { + wrapped.findOneAndReplace(filter, replacement) + } + + override fun replaceOne( + clientSession: ClientSession, + filter: Bson, + replacement: T, + replaceOptions: ReplaceOptions + ): UpdateResult = runBlocking { wrapped.replaceOne(clientSession.unwrapped(), filter, replacement, replaceOptions) } + + override fun replaceOne(clientSession: ClientSession, filter: Bson, replacement: T): UpdateResult = runBlocking { + wrapped.replaceOne(clientSession.unwrapped(), filter, replacement) + } + + override fun replaceOne(filter: Bson, replacement: T, replaceOptions: ReplaceOptions): UpdateResult = runBlocking { + wrapped.replaceOne(filter, replacement, replaceOptions) + } + + override fun replaceOne(filter: Bson, replacement: T): UpdateResult = runBlocking { + wrapped.replaceOne(filter, replacement) + } + + override fun insertMany( + clientSession: ClientSession, + documents: MutableList, + options: InsertManyOptions + ): InsertManyResult = runBlocking { wrapped.insertMany(clientSession.unwrapped(), documents, options) } + + override fun insertMany(clientSession: ClientSession, documents: MutableList): InsertManyResult = + runBlocking { + wrapped.insertMany(clientSession.unwrapped(), documents) + } + + override fun insertMany(documents: MutableList, options: InsertManyOptions): InsertManyResult = runBlocking { + wrapped.insertMany(documents, options) + } + + override fun insertMany(documents: MutableList): InsertManyResult = runBlocking { + wrapped.insertMany(documents) + } + + override fun insertOne(clientSession: ClientSession, document: T, options: InsertOneOptions): InsertOneResult = + runBlocking { + wrapped.insertOne(clientSession.unwrapped(), document, options) + } + + override fun insertOne(clientSession: ClientSession, document: T): InsertOneResult = runBlocking { + wrapped.insertOne(clientSession.unwrapped(), document) + } + + override fun insertOne(document: T, options: InsertOneOptions): InsertOneResult = runBlocking { + wrapped.insertOne(document, options) + } + + override fun insertOne(document: T): InsertOneResult = runBlocking { wrapped.insertOne(document) } + + override fun bulkWrite( + clientSession: ClientSession, + requests: MutableList>, + options: BulkWriteOptions + ): BulkWriteResult = runBlocking { wrapped.bulkWrite(clientSession.unwrapped(), requests, options) } + + override fun bulkWrite( + clientSession: ClientSession, + requests: MutableList> + ): BulkWriteResult = runBlocking { wrapped.bulkWrite(clientSession.unwrapped(), requests) } + + override fun bulkWrite(requests: MutableList>, options: BulkWriteOptions): BulkWriteResult = + runBlocking { + wrapped.bulkWrite(requests, options) + } + + override fun bulkWrite(requests: MutableList>): BulkWriteResult = runBlocking { + wrapped.bulkWrite(requests) + } + + private fun ClientSession.unwrapped() = (this as SyncClientSession).wrapped +} diff --git a/driver-kotlin-coroutine/src/integrationTest/kotlin/com/mongodb/kotlin/client/coroutine/syncadapter/SyncMongoCursor.kt b/driver-kotlin-coroutine/src/integrationTest/kotlin/com/mongodb/kotlin/client/coroutine/syncadapter/SyncMongoCursor.kt new file mode 100644 index 00000000000..fd96b6028c9 --- /dev/null +++ b/driver-kotlin-coroutine/src/integrationTest/kotlin/com/mongodb/kotlin/client/coroutine/syncadapter/SyncMongoCursor.kt @@ -0,0 +1,46 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.kotlin.client.coroutine.syncadapter + +import com.mongodb.ServerAddress +import com.mongodb.ServerCursor +import com.mongodb.client.MongoCursor +import java.lang.UnsupportedOperationException +import kotlinx.coroutines.flow.Flow +import kotlinx.coroutines.flow.toList +import kotlinx.coroutines.runBlocking + +open class SyncMongoCursor(private val delegate: Flow) : MongoCursor { + + val iterator: Iterator by lazy { runBlocking { delegate.toList() }.iterator() } + + override fun remove() { + TODO("Not yet implemented") + } + + override fun hasNext(): Boolean = iterator.hasNext() + @Suppress("UNCHECKED_CAST") override fun next(): T & Any = iterator.next() as (T & Any) + + override fun close() {} + + override fun available(): Int = throw UnsupportedOperationException() + + override fun tryNext(): T? = throw UnsupportedOperationException() + + override fun getServerCursor(): ServerCursor? = throw UnsupportedOperationException() + + override fun getServerAddress(): ServerAddress = throw UnsupportedOperationException() +} diff --git a/driver-kotlin-coroutine/src/integrationTest/kotlin/com/mongodb/kotlin/client/coroutine/syncadapter/SyncMongoDatabase.kt b/driver-kotlin-coroutine/src/integrationTest/kotlin/com/mongodb/kotlin/client/coroutine/syncadapter/SyncMongoDatabase.kt new file mode 100644 index 00000000000..ae83a1443b7 --- /dev/null +++ b/driver-kotlin-coroutine/src/integrationTest/kotlin/com/mongodb/kotlin/client/coroutine/syncadapter/SyncMongoDatabase.kt @@ -0,0 +1,214 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.kotlin.client.coroutine.syncadapter + +import com.mongodb.ReadConcern +import com.mongodb.ReadPreference +import com.mongodb.WriteConcern +import com.mongodb.client.* +import com.mongodb.client.MongoDatabase as JMongoDatabase +import com.mongodb.client.model.CreateCollectionOptions +import com.mongodb.client.model.CreateViewOptions +import com.mongodb.kotlin.client.coroutine.MongoDatabase +import java.util.concurrent.TimeUnit +import kotlinx.coroutines.runBlocking +import org.bson.Document +import org.bson.codecs.configuration.CodecRegistry +import org.bson.conversions.Bson + +data class SyncMongoDatabase(val wrapped: MongoDatabase) : JMongoDatabase { + override fun getName(): String = wrapped.name + + override fun getCodecRegistry(): CodecRegistry = wrapped.codecRegistry + + override fun getReadPreference(): ReadPreference = wrapped.readPreference + + override fun getWriteConcern(): WriteConcern = wrapped.writeConcern + + override fun getReadConcern(): ReadConcern = wrapped.readConcern + + override fun getTimeout(timeUnit: TimeUnit): Long? = wrapped.timeout(timeUnit) + + override fun withCodecRegistry(codecRegistry: CodecRegistry): SyncMongoDatabase = + SyncMongoDatabase(wrapped.withCodecRegistry(codecRegistry)) + + override fun withReadPreference(readPreference: ReadPreference): SyncMongoDatabase = + SyncMongoDatabase(wrapped.withReadPreference(readPreference)) + + override fun withWriteConcern(writeConcern: WriteConcern): SyncMongoDatabase = + SyncMongoDatabase(wrapped.withWriteConcern(writeConcern)) + + override fun withReadConcern(readConcern: ReadConcern): SyncMongoDatabase = + SyncMongoDatabase(wrapped.withReadConcern(readConcern)) + + override fun withTimeout(timeout: Long, timeUnit: TimeUnit): SyncMongoDatabase = + SyncMongoDatabase(wrapped.withTimeout(timeout, timeUnit)) + + override fun getCollection(collectionName: String): MongoCollection = + SyncMongoCollection(wrapped.getCollection(collectionName, Document::class.java)) + + override fun getCollection(collectionName: String, documentClass: Class): MongoCollection = + SyncMongoCollection(wrapped.getCollection(collectionName, documentClass)) + + override fun runCommand(command: Bson): Document = runBlocking { wrapped.runCommand(command) } + + override fun runCommand(command: Bson, readPreference: ReadPreference): Document = runBlocking { + wrapped.runCommand(command, readPreference) + } + + override fun runCommand(command: Bson, resultClass: Class): T = runBlocking { + wrapped.runCommand(command, resultClass = resultClass) + } + + override fun runCommand(command: Bson, readPreference: ReadPreference, resultClass: Class): T = + runBlocking { + wrapped.runCommand(command, readPreference, resultClass) + } + + override fun runCommand(clientSession: ClientSession, command: Bson): Document = runBlocking { + wrapped.runCommand(clientSession.unwrapped(), command) + } + + override fun runCommand(clientSession: ClientSession, command: Bson, readPreference: ReadPreference): Document = + runBlocking { + wrapped.runCommand(clientSession.unwrapped(), command, readPreference) + } + + override fun runCommand(clientSession: ClientSession, command: Bson, resultClass: Class): T = + runBlocking { + wrapped.runCommand(clientSession.unwrapped(), command, resultClass = resultClass) + } + + override fun runCommand( + clientSession: ClientSession, + command: Bson, + readPreference: ReadPreference, + resultClass: Class + ): T = runBlocking { wrapped.runCommand(clientSession.unwrapped(), command, readPreference, resultClass) } + + override fun drop() = runBlocking { wrapped.drop() } + + override fun drop(clientSession: ClientSession) = runBlocking { wrapped.drop(clientSession.unwrapped()) } + + override fun listCollectionNames(): ListCollectionNamesIterable = + SyncListCollectionNamesIterable(wrapped.listCollectionNames()) + + override fun listCollectionNames(clientSession: ClientSession): ListCollectionNamesIterable = + SyncListCollectionNamesIterable(wrapped.listCollectionNames(clientSession.unwrapped())) + + override fun listCollections(): ListCollectionsIterable = + SyncListCollectionsIterable(wrapped.listCollections()) + + override fun listCollections(resultClass: Class): ListCollectionsIterable = + SyncListCollectionsIterable(wrapped.listCollections(resultClass)) + + override fun listCollections(clientSession: ClientSession): ListCollectionsIterable = + SyncListCollectionsIterable(wrapped.listCollections(clientSession.unwrapped())) + + override fun listCollections( + clientSession: ClientSession, + resultClass: Class + ): ListCollectionsIterable = + SyncListCollectionsIterable(wrapped.listCollections(clientSession.unwrapped(), resultClass)) + + override fun createCollection(collectionName: String) { + runBlocking { wrapped.createCollection(collectionName) } + } + + override fun createCollection(collectionName: String, createCollectionOptions: CreateCollectionOptions) { + runBlocking { wrapped.createCollection(collectionName, createCollectionOptions) } + } + + override fun createCollection(clientSession: ClientSession, collectionName: String) { + runBlocking { wrapped.createCollection(clientSession.unwrapped(), collectionName) } + } + + override fun createCollection( + clientSession: ClientSession, + collectionName: String, + createCollectionOptions: CreateCollectionOptions + ) = runBlocking { wrapped.createCollection(clientSession.unwrapped(), collectionName, createCollectionOptions) } + + override fun createView(viewName: String, viewOn: String, pipeline: MutableList) = runBlocking { + wrapped.createView(viewName, viewOn, pipeline) + } + + override fun createView( + viewName: String, + viewOn: String, + pipeline: MutableList, + createViewOptions: CreateViewOptions + ) = runBlocking { wrapped.createView(viewName, viewOn, pipeline, createViewOptions) } + + override fun createView( + clientSession: ClientSession, + viewName: String, + viewOn: String, + pipeline: MutableList + ) = runBlocking { wrapped.createView(clientSession.unwrapped(), viewName, viewOn, pipeline) } + + override fun createView( + clientSession: ClientSession, + viewName: String, + viewOn: String, + pipeline: MutableList, + createViewOptions: CreateViewOptions + ) = runBlocking { wrapped.createView(clientSession.unwrapped(), viewName, viewOn, pipeline, createViewOptions) } + + override fun watch(): ChangeStreamIterable = SyncChangeStreamIterable(wrapped.watch()) + + override fun watch(resultClass: Class): ChangeStreamIterable = + SyncChangeStreamIterable(wrapped.watch(resultClass = resultClass)) + + override fun watch(pipeline: MutableList): ChangeStreamIterable = + SyncChangeStreamIterable(wrapped.watch(pipeline)) + + override fun watch(pipeline: MutableList, resultClass: Class): ChangeStreamIterable = + SyncChangeStreamIterable(wrapped.watch(pipeline, resultClass)) + + override fun watch(clientSession: ClientSession): ChangeStreamIterable = + SyncChangeStreamIterable(wrapped.watch(clientSession.unwrapped())) + + override fun watch(clientSession: ClientSession, resultClass: Class): ChangeStreamIterable = + SyncChangeStreamIterable(wrapped.watch(clientSession.unwrapped(), resultClass = resultClass)) + + override fun watch(clientSession: ClientSession, pipeline: MutableList): ChangeStreamIterable = + SyncChangeStreamIterable(wrapped.watch(clientSession.unwrapped(), pipeline)) + + override fun watch( + clientSession: ClientSession, + pipeline: MutableList, + resultClass: Class + ): ChangeStreamIterable = + SyncChangeStreamIterable(wrapped.watch(clientSession.unwrapped(), pipeline, resultClass)) + + override fun aggregate(pipeline: MutableList): AggregateIterable = + SyncAggregateIterable(wrapped.aggregate(pipeline)) + + override fun aggregate(pipeline: MutableList, resultClass: Class): AggregateIterable = + SyncAggregateIterable(wrapped.aggregate(pipeline, resultClass)) + + override fun aggregate(clientSession: ClientSession, pipeline: MutableList): AggregateIterable = + SyncAggregateIterable(wrapped.aggregate(clientSession.unwrapped(), pipeline)) + + override fun aggregate( + clientSession: ClientSession, + pipeline: MutableList, + resultClass: Class + ): AggregateIterable = SyncAggregateIterable(wrapped.aggregate(clientSession.unwrapped(), pipeline, resultClass)) + + private fun ClientSession.unwrapped() = (this as SyncClientSession).wrapped +} diff --git a/driver-kotlin-coroutine/src/integrationTest/kotlin/com/mongodb/kotlin/client/coroutine/syncadapter/SyncMongoIterable.kt b/driver-kotlin-coroutine/src/integrationTest/kotlin/com/mongodb/kotlin/client/coroutine/syncadapter/SyncMongoIterable.kt new file mode 100644 index 00000000000..98ab0d93b75 --- /dev/null +++ b/driver-kotlin-coroutine/src/integrationTest/kotlin/com/mongodb/kotlin/client/coroutine/syncadapter/SyncMongoIterable.kt @@ -0,0 +1,51 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.kotlin.client.coroutine.syncadapter + +import com.mongodb.Function +import com.mongodb.client.MongoCursor +import com.mongodb.client.MongoIterable as JMongoIterable +import com.mongodb.client.cursor.TimeoutMode +import kotlinx.coroutines.flow.Flow +import kotlinx.coroutines.flow.firstOrNull +import kotlinx.coroutines.flow.map +import kotlinx.coroutines.flow.toList +import kotlinx.coroutines.runBlocking + +open class SyncMongoIterable(private val delegate: Flow) : JMongoIterable { + private var batchSize: Int? = null + private var timeoutMode: TimeoutMode? = null + + override fun iterator(): MongoCursor = cursor() + + override fun cursor(): MongoCursor = SyncMongoCursor(delegate) + + override fun first(): T? = runBlocking { delegate.firstOrNull() } + + override fun batchSize(batchSize: Int): SyncMongoIterable = apply { + this@SyncMongoIterable.batchSize = batchSize + } + + @Suppress("UNCHECKED_CAST") + override fun ?> into(target: A): A & Any { + runBlocking { target?.addAll(delegate.toList()) } + return target as (A & Any) + } + + @Suppress("UNCHECKED_CAST") + override fun map(mapper: Function): SyncMongoIterable = + SyncMongoIterable(delegate.map { mapper.apply(it as (T & Any)) as (U & Any) }) +} diff --git a/driver-kotlin-coroutine/src/main/kotlin/com/mongodb/kotlin/client/coroutine/AggregateFlow.kt b/driver-kotlin-coroutine/src/main/kotlin/com/mongodb/kotlin/client/coroutine/AggregateFlow.kt new file mode 100644 index 00000000000..a1debfd812f --- /dev/null +++ b/driver-kotlin-coroutine/src/main/kotlin/com/mongodb/kotlin/client/coroutine/AggregateFlow.kt @@ -0,0 +1,226 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.kotlin.client.coroutine + +import com.mongodb.ExplainVerbosity +import com.mongodb.annotations.Alpha +import com.mongodb.annotations.Reason +import com.mongodb.client.cursor.TimeoutMode +import com.mongodb.client.model.Collation +import com.mongodb.reactivestreams.client.AggregatePublisher +import java.util.concurrent.TimeUnit +import kotlinx.coroutines.flow.Flow +import kotlinx.coroutines.flow.FlowCollector +import kotlinx.coroutines.reactive.asFlow +import kotlinx.coroutines.reactive.awaitFirstOrNull +import kotlinx.coroutines.reactive.awaitSingle +import org.bson.BsonValue +import org.bson.Document +import org.bson.conversions.Bson + +/** + * Flow implementation for aggregate operations. + * + * @param T The type of the result. + * @see [Aggregation command](https://www.mongodb.com/docs/manual/reference/command/aggregate) + */ +public class AggregateFlow(private val wrapped: AggregatePublisher) : Flow by wrapped.asFlow() { + + /** + * Sets the number of documents to return per batch. + * + * @param batchSize the batch size + * @return this + * @see [Batch Size](https://www.mongodb.com/docs/manual/reference/method/cursor.batchSize/#cursor.batchSize) + */ + public fun batchSize(batchSize: Int): AggregateFlow = apply { wrapped.batchSize(batchSize) } + + /** + * Sets the timeoutMode for the cursor. + * + * Requires the `timeout` to be set, either in the [com.mongodb.MongoClientSettings], via [MongoDatabase] or via + * [MongoCollection] + * + * @param timeoutMode the timeout mode + * @return this + * @since 5.2 + */ + @Alpha(Reason.CLIENT) + public fun timeoutMode(timeoutMode: TimeoutMode): AggregateFlow = apply { wrapped.timeoutMode(timeoutMode) } + + /** + * Aggregates documents according to the specified aggregation pipeline, which must end with an `$out` or `$merge` + * stage. Calling this method is the preferred alternative to consuming this [AggregateFlow], because this method + * does what is explicitly requested without executing implicit operations. + * + * @throws IllegalStateException if the pipeline does not end with an `$out` or `$merge` stage + * @see [$out stage](https://www.mongodb.com/docs/manual/reference/operator/aggregation/out/) + * @see [$merge stage](https://www.mongodb.com/docs/manual/reference/operator/aggregation/merge/) + */ + public suspend fun toCollection() { + wrapped.toCollection().awaitFirstOrNull() + } + + /** + * Enables writing to temporary files. A null value indicates that it's unspecified. + * + * @param allowDiskUse true if writing to temporary files is enabled + * @return this + * @see [Aggregation command](https://www.mongodb.com/docs/manual/reference/command/aggregate/) + */ + public fun allowDiskUse(allowDiskUse: Boolean?): AggregateFlow = apply { wrapped.allowDiskUse(allowDiskUse) } + + /** + * Sets the maximum execution time on the server for this operation. + * + * @param maxTime the max time + * @param timeUnit the time unit, defaults to Milliseconds + * @return this + * @see [Max Time](https://www.mongodb.com/docs/manual/reference/method/cursor.maxTimeMS/#cursor.maxTimeMS) + */ + public fun maxTime(maxTime: Long, timeUnit: TimeUnit = TimeUnit.MILLISECONDS): AggregateFlow = apply { + wrapped.maxTime(maxTime, timeUnit) + } + + /** + * The maximum amount of time for the server to wait on new documents to satisfy a `$changeStream` aggregation. + * + * A zero value will be ignored. + * + * @param maxAwaitTime the max await time + * @param timeUnit the time unit to return the result in, defaults to Milliseconds + * @return the maximum await execution time in the given time unit + */ + public fun maxAwaitTime(maxAwaitTime: Long, timeUnit: TimeUnit = TimeUnit.MILLISECONDS): AggregateFlow = apply { + wrapped.maxAwaitTime(maxAwaitTime, timeUnit) + } + + /** + * Sets the bypass document level validation flag. + * + * Note: This only applies when an $out or $merge stage is specified. + * + * @param bypassDocumentValidation If true, allows the write to opt-out of document level validation. + * @return this + * @see [Aggregation command](https://www.mongodb.com/docs/manual/reference/command/aggregate/) + */ + public fun bypassDocumentValidation(bypassDocumentValidation: Boolean?): AggregateFlow = apply { + wrapped.bypassDocumentValidation(bypassDocumentValidation) + } + + /** + * Sets the collation options + * + * A null value represents the server default. + * + * @param collation the collation options to use + * @return this + */ + public fun collation(collation: Collation?): AggregateFlow = apply { wrapped.collation(collation) } + + /** + * Sets the comment for this operation. A null value means no comment is set. + * + * @param comment the comment + * @return this + */ + public fun comment(comment: String?): AggregateFlow = apply { wrapped.comment(comment) } + + /** + * Sets the comment for this operation. A null value means no comment is set. + * + * The comment can be any valid BSON type for server versions 4.4 and above. Server versions between 3.6 and 4.2 + * only support string as comment, and providing a non-string type will result in a server-side error. + * + * @param comment the comment + * @return this + */ + public fun comment(comment: BsonValue?): AggregateFlow = apply { wrapped.comment(comment) } + + /** + * Sets the hint for which index to use. A null value means no hint is set. + * + * @param hint the hint + * @return this + */ + public fun hint(hint: Bson?): AggregateFlow = apply { wrapped.hint(hint) } + + /** + * Sets the hint to apply. + * + * Note: If [AggregateFlow.hint] is set that will be used instead of any hint string. + * + * @param hint the name of the index which should be used for the operation + * @return this + */ + public fun hintString(hint: String?): AggregateFlow = apply { wrapped.hintString(hint) } + + /** + * Add top-level variables to the aggregation. + * + * For MongoDB 5.0+, the aggregate command accepts a `let` option. This option is a document consisting of zero or + * more fields representing variables that are accessible to the aggregation pipeline. The key is the name of the + * variable and the value is a constant in the aggregate expression language. Each parameter name is then usable to + * access the value of the corresponding expression with the "$$" syntax within aggregate expression contexts which + * may require the use of $expr or a pipeline. + * + * @param variables the variables + * @return this + */ + public fun let(variables: Bson?): AggregateFlow = apply { wrapped.let(variables) } + + /** + * Explain the execution plan for this operation with the given verbosity level + * + * @param verbosity the verbosity of the explanation + * @return the execution plan + * @see [Explain command](https://www.mongodb.com/docs/manual/reference/command/explain/) + */ + @JvmName("explainDocument") + public suspend fun explain(verbosity: ExplainVerbosity? = null): Document = explain(verbosity) + + /** + * Explain the execution plan for this operation with the given verbosity level + * + * @param R the type of the document class + * @param resultClass the result document type. + * @param verbosity the verbosity of the explanation + * @return the execution plan + * @see [Explain command](https://www.mongodb.com/docs/manual/reference/command/explain/) + */ + public suspend fun explain(resultClass: Class, verbosity: ExplainVerbosity? = null): R = + if (verbosity == null) wrapped.explain(resultClass).awaitSingle() + else wrapped.explain(resultClass, verbosity).awaitSingle() + + /** + * Explain the execution plan for this operation with the given verbosity level + * + * @param R the type of the document class + * @param verbosity the verbosity of the explanation + * @return the execution plan + * @see [Explain command](https://www.mongodb.com/docs/manual/reference/command/explain/) + */ + public suspend inline fun explain(verbosity: ExplainVerbosity? = null): R = + explain(R::class.java, verbosity) + + /** + * Requests [AggregateFlow] to start streaming data according to the specified aggregation pipeline. + * - If the aggregation pipeline ends with an `$out` or `$merge` stage, then finds all documents in the affected + * namespace and emits them. You may want to use [toCollection] instead. + * - Otherwise, emits no values. + */ + public override suspend fun collect(collector: FlowCollector): Unit = wrapped.asFlow().collect(collector) +} diff --git a/driver-kotlin-coroutine/src/main/kotlin/com/mongodb/kotlin/client/coroutine/ChangeStreamFlow.kt b/driver-kotlin-coroutine/src/main/kotlin/com/mongodb/kotlin/client/coroutine/ChangeStreamFlow.kt new file mode 100644 index 00000000000..55bfeb82060 --- /dev/null +++ b/driver-kotlin-coroutine/src/main/kotlin/com/mongodb/kotlin/client/coroutine/ChangeStreamFlow.kt @@ -0,0 +1,178 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.kotlin.client.coroutine + +import com.mongodb.client.model.Collation +import com.mongodb.client.model.changestream.ChangeStreamDocument +import com.mongodb.client.model.changestream.FullDocument +import com.mongodb.client.model.changestream.FullDocumentBeforeChange +import com.mongodb.reactivestreams.client.ChangeStreamPublisher +import java.util.concurrent.TimeUnit +import kotlinx.coroutines.flow.Flow +import kotlinx.coroutines.flow.FlowCollector +import kotlinx.coroutines.reactive.asFlow +import org.bson.BsonDocument +import org.bson.BsonTimestamp +import org.bson.BsonValue + +/** + * Flow implementation for change streams. + * + * Note: the [ChangeStreamDocument] class will not be applicable for all change stream outputs. If using custom + * pipelines that radically change the result, then the [withDocumentClass] method can be used to provide an alternative + * document format. + * + * @param T The type of the result. + */ +public class ChangeStreamFlow(private val wrapped: ChangeStreamPublisher) : Flow> { + + /** + * Sets the number of documents to return per batch. + * + * @param batchSize the batch size + * @return this + * @see [Batch Size](https://www.mongodb.com/docs/manual/reference/method/cursor.batchSize/#cursor.batchSize) + */ + public fun batchSize(batchSize: Int): ChangeStreamFlow = apply { wrapped.batchSize(batchSize) } + + /** + * Sets the fullDocument value. + * + * @param fullDocument the fullDocument + * @return this + */ + public fun fullDocument(fullDocument: FullDocument): ChangeStreamFlow = apply { + wrapped.fullDocument(fullDocument) + } + + /** + * Sets the fullDocumentBeforeChange value. + * + * @param fullDocumentBeforeChange the fullDocumentBeforeChange + * @return this + */ + public fun fullDocumentBeforeChange(fullDocumentBeforeChange: FullDocumentBeforeChange): ChangeStreamFlow = + apply { + wrapped.fullDocumentBeforeChange(fullDocumentBeforeChange) + } + + /** + * Sets the logical starting point for the new change stream. + * + * @param resumeToken the resume token + * @return this + */ + public fun resumeAfter(resumeToken: BsonDocument): ChangeStreamFlow = apply { wrapped.resumeAfter(resumeToken) } + + /** + * Sets the maximum await execution time on the server for this operation. + * + * @param maxAwaitTime the max await time. A zero value will be ignored, and indicates that the driver should + * respect the server's default value + * @param timeUnit the time unit, which defaults to MILLISECONDS + * @return this + */ + public fun maxAwaitTime(maxAwaitTime: Long, timeUnit: TimeUnit = TimeUnit.MILLISECONDS): ChangeStreamFlow = + apply { + wrapped.maxAwaitTime(maxAwaitTime, timeUnit) + } + + /** + * Sets the collation options + * + * A null value represents the server default. + * + * @param collation the collation options to use + * @return this + */ + public fun collation(collation: Collation?): ChangeStreamFlow = apply { wrapped.collation(collation) } + + /** + * Returns a `MongoIterable` containing the results of the change stream based on the document class provided. + * + * @param R the Mongo Iterable type + * @param resultClass the target document type of the iterable. + * @return the new Mongo Iterable + */ + public fun withDocumentClass(resultClass: Class): Flow = + wrapped.withDocumentClass(resultClass).asFlow() + + /** + * Returns a `MongoIterable` containing the results of the change stream based on the document class provided. + * + * @param R the Mongo Iterable type + * @return the new Mongo Iterable + */ + public inline fun withDocumentClass(): Flow = withDocumentClass(R::class.java) + + /** + * The change stream will only provide changes that occurred at or after the specified timestamp. + * + * Any command run against the server will return an operation time that can be used here. + * + * The default value is an operation time obtained from the server before the change stream was created. + * + * @param startAtOperationTime the start at operation time + * @return this + */ + public fun startAtOperationTime(startAtOperationTime: BsonTimestamp): ChangeStreamFlow = apply { + wrapped.startAtOperationTime(startAtOperationTime) + } + + /** + * Similar to `resumeAfter`, this option takes a resume token and starts a new change stream returning the first + * notification after the token. + * + * This will allow users to watch collections that have been dropped and recreated or newly renamed collections + * without missing any notifications. + * + * Note: The server will report an error if both `startAfter` and `resumeAfter` are specified. + * + * @param startAfter the startAfter resumeToken + * @return this + * @see [Start After](https://www.mongodb.com/docs/manual/changeStreams/#change-stream-start-after) + */ + public fun startAfter(startAfter: BsonDocument): ChangeStreamFlow = apply { wrapped.startAfter(startAfter) } + + /** + * Sets the comment for this operation. A null value means no comment is set. + * + * @param comment the comment + */ + public fun comment(comment: String?): ChangeStreamFlow = apply { wrapped.comment(comment) } + /** + * Sets the comment for this operation. A null value means no comment is set. + * + * The comment can be any valid BSON type for server versions 4.4 and above. Server versions between 3.6 and 4.2 + * only support string as comment, and providing a non-string type will result in a server-side error. + * + * @param comment the comment + */ + public fun comment(comment: BsonValue?): ChangeStreamFlow = apply { wrapped.comment(comment) } + + /** + * Sets whether to include expanded change stream events, which are: createIndexes, dropIndexes, modify, create, + * shardCollection, reshardCollection, refineCollectionShardKey. False by default. + * + * @param showExpandedEvents true to include expanded events + * @return this + */ + public fun showExpandedEvents(showExpandedEvents: Boolean): ChangeStreamFlow = apply { + wrapped.showExpandedEvents(showExpandedEvents) + } + public override suspend fun collect(collector: FlowCollector>): Unit = + wrapped.asFlow().collect(collector) +} diff --git a/driver-kotlin-coroutine/src/main/kotlin/com/mongodb/kotlin/client/coroutine/ClientSession.kt b/driver-kotlin-coroutine/src/main/kotlin/com/mongodb/kotlin/client/coroutine/ClientSession.kt new file mode 100644 index 00000000000..6c53a1faf47 --- /dev/null +++ b/driver-kotlin-coroutine/src/main/kotlin/com/mongodb/kotlin/client/coroutine/ClientSession.kt @@ -0,0 +1,239 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.kotlin.client.coroutine + +import com.mongodb.ClientSessionOptions +import com.mongodb.ServerAddress +import com.mongodb.TransactionOptions +import com.mongodb.internal.TimeoutContext +import com.mongodb.reactivestreams.client.ClientSession as reactiveClientSession +import com.mongodb.session.ClientSession as jClientSession +import com.mongodb.session.ServerSession +import java.util.concurrent.TimeUnit +import kotlinx.coroutines.reactive.awaitFirstOrNull +import org.bson.BsonDocument +import org.bson.BsonTimestamp + +/** A client session that supports transactions. */ +public class ClientSession(public val wrapped: reactiveClientSession) : jClientSession { + + public override fun close(): Unit = wrapped.close() + + /** + * Returns true if there is an active transaction on this session, and false otherwise + * + * @return true if there is an active transaction on this session + */ + public fun hasActiveTransaction(): Boolean = wrapped.hasActiveTransaction() + + /** + * Notify the client session that a message has been sent. + * + * For internal use only + * + * @return true if this is the first message sent, false otherwise + */ + public fun notifyMessageSent(): Boolean = wrapped.notifyMessageSent() + + /** + * Notify the client session that command execution is being initiated. This should be called before server + * selection occurs. + * + * For internal use only + * + * @param operation the operation + */ + public fun notifyOperationInitiated(operation: Any): Unit = wrapped.notifyOperationInitiated(operation) + + /** + * Get the server address of the pinned mongos on this session. For internal use only. + * + * @return the server address of the pinned mongos + */ + public override fun getPinnedServerAddress(): ServerAddress? = wrapped.pinnedServerAddress + + /** + * Gets the transaction context. + * + * For internal use only + * + * @return the transaction context + */ + public override fun getTransactionContext(): Any? = wrapped.transactionContext + + /** + * Sets the transaction context. + * + * For internal use only + * + * Implementations may place additional restrictions on the type of the transaction context + * + * @param address the server address + * @param transactionContext the transaction context + */ + public override fun setTransactionContext(address: ServerAddress, transactionContext: Any): Unit = + wrapped.setTransactionContext(address, transactionContext) + + /** + * Clears the transaction context. + * + * For internal use only + */ + public override fun clearTransactionContext(): Unit = wrapped.clearTransactionContext() + + /** + * Get the recovery token from the latest outcome in a sharded transaction. For internal use only. + * + * @return the recovery token @mongodb.server.release 4.2 + * @since 3.11 + */ + public override fun getRecoveryToken(): BsonDocument? = wrapped.recoveryToken + + /** + * Set the recovery token. For internal use only. + * + * @param recoveryToken the recovery token + */ + public override fun setRecoveryToken(recoveryToken: BsonDocument) { + wrapped.recoveryToken = recoveryToken + } + + /** + * Get the options for this session. + * + * @return the options, which may not be null + */ + public override fun getOptions(): ClientSessionOptions = wrapped.options + + /** + * Returns true if operations in this session must be causally consistent + * + * @return whether operations in this session must be causally consistent. + */ + public override fun isCausallyConsistent(): Boolean = wrapped.isCausallyConsistent + + /** + * Gets the originator for the session. + * + * Important because sessions must only be used by their own originator. + * + * @return the sessions originator + */ + public override fun getOriginator(): Any = wrapped.originator + + /** @return the server session */ + public override fun getServerSession(): ServerSession = wrapped.serverSession + + /** + * Gets the operation time of the last operation executed in this session. + * + * @return the operation time + */ + public override fun getOperationTime(): BsonTimestamp = wrapped.operationTime + + /** + * Set the operation time of the last operation executed in this session. + * + * @param operationTime the operation time + */ + public override fun advanceOperationTime(operationTime: BsonTimestamp?): Unit = + wrapped.advanceOperationTime(operationTime) + + /** @param clusterTime the cluster time to advance to */ + public override fun advanceClusterTime(clusterTime: BsonDocument?): Unit = wrapped.advanceClusterTime(clusterTime) + + /** + * For internal use only. + * + * @param snapshotTimestamp the snapshot timestamp + */ + public override fun setSnapshotTimestamp(snapshotTimestamp: BsonTimestamp?) { + wrapped.snapshotTimestamp = snapshotTimestamp + } + + /** + * For internal use only. + * + * @return the snapshot timestamp + */ + public override fun getSnapshotTimestamp(): BsonTimestamp? = wrapped.snapshotTimestamp + + /** @return the latest cluster time seen by this session */ + public override fun getClusterTime(): BsonDocument = wrapped.clusterTime + + /** + * Gets the transaction options. Only call this method of the session has an active transaction + * + * @return the transaction options + */ + public fun getTransactionOptions(): TransactionOptions = wrapped.transactionOptions + + /** + * Start a transaction in the context of this session with default transaction options. A transaction can not be + * started if there is already an active transaction on this session. + */ + public fun startTransaction(): Unit = wrapped.startTransaction() + + /** + * Start a transaction in the context of this session with the given transaction options. A transaction can not be + * started if there is already an active transaction on this session. + * + * @param transactionOptions the options to apply to the transaction + */ + public fun startTransaction(transactionOptions: TransactionOptions): Unit = + wrapped.startTransaction(transactionOptions) + + /** + * Commit a transaction in the context of this session. A transaction can only be commmited if one has first been + * started. + * + * @return an empty publisher that indicates when the operation has completed + */ + public suspend fun commitTransaction() { + wrapped.commitTransaction().awaitFirstOrNull() + } + + /** + * Abort a transaction in the context of this session. A transaction can only be aborted if one has first been + * started. + * + * @return an empty publisher that indicates when the operation has completed + */ + public suspend fun abortTransaction() { + wrapped.abortTransaction().awaitFirstOrNull() + } + + /** + * Gets the timeout context to use with this session: + * * `MongoClientSettings#getTimeoutMS` + * * `ClientSessionOptions#getDefaultTimeout` + * + * Note: For internal use only + * + * @return the timeout to use + * @since 5.2 + */ + public override fun getTimeoutContext(): TimeoutContext? = wrapped.timeoutContext +} + +/** + * maxCommitTime extension function + * + * @param maxCommitTime time in milliseconds + * @return the options + */ +public fun TransactionOptions.Builder.maxCommitTime(maxCommitTime: Long): TransactionOptions.Builder = + this.apply { maxCommitTime(maxCommitTime, TimeUnit.MILLISECONDS) } diff --git a/driver-kotlin-coroutine/src/main/kotlin/com/mongodb/kotlin/client/coroutine/DistinctFlow.kt b/driver-kotlin-coroutine/src/main/kotlin/com/mongodb/kotlin/client/coroutine/DistinctFlow.kt new file mode 100644 index 00000000000..10eef030429 --- /dev/null +++ b/driver-kotlin-coroutine/src/main/kotlin/com/mongodb/kotlin/client/coroutine/DistinctFlow.kt @@ -0,0 +1,126 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.kotlin.client.coroutine + +import com.mongodb.annotations.Alpha +import com.mongodb.annotations.Reason +import com.mongodb.client.cursor.TimeoutMode +import com.mongodb.client.model.Collation +import com.mongodb.lang.Nullable +import com.mongodb.reactivestreams.client.DistinctPublisher +import java.util.concurrent.TimeUnit +import kotlinx.coroutines.flow.Flow +import kotlinx.coroutines.flow.FlowCollector +import kotlinx.coroutines.reactive.asFlow +import org.bson.BsonValue +import org.bson.conversions.Bson + +/** + * Flow implementation for distinct operations. + * + * @param T The type of the result. + * @see [Distinct command](https://www.mongodb.com/docs/manual/reference/command/distinct/) + */ +public class DistinctFlow(private val wrapped: DistinctPublisher) : Flow by wrapped.asFlow() { + + /** + * Sets the number of documents to return per batch. + * + * @param batchSize the batch size + * @return this + * @see [Batch Size](https://www.mongodb.com/docs/manual/reference/method/cursor.batchSize/#cursor.batchSize) + */ + public fun batchSize(batchSize: Int): DistinctFlow = apply { wrapped.batchSize(batchSize) } + + /** + * Sets the timeoutMode for the cursor. + * + * Requires the `timeout` to be set, either in the [com.mongodb.MongoClientSettings], via [MongoDatabase] or via + * [MongoCollection] + * + * @param timeoutMode the timeout mode + * @return this + * @since 5.2 + */ + @Alpha(Reason.CLIENT) + public fun timeoutMode(timeoutMode: TimeoutMode): DistinctFlow = apply { wrapped.timeoutMode(timeoutMode) } + + /** + * Sets the query filter to apply to the query. + * + * @param filter the filter, which may be null. + * @return this + * @see [Filter results](https://www.mongodb.com/docs/manual/reference/method/db.collection.find/) + */ + public fun filter(filter: Bson?): DistinctFlow = apply { wrapped.filter(filter) } + + /** + * Sets the maximum execution time on the server for this operation. + * + * @param maxTime the max time + * @param timeUnit the time unit, which defaults to Milliseconds + * @return this + */ + public fun maxTime(maxTime: Long, timeUnit: TimeUnit = TimeUnit.MILLISECONDS): DistinctFlow = apply { + wrapped.maxTime(maxTime, timeUnit) + } + + /** + * Sets the collation options + * + * A null value represents the server default. + * + * @param collation the collation options to use + * @return this + */ + public fun collation(collation: Collation?): DistinctFlow = apply { wrapped.collation(collation) } + + /** + * Sets the comment for this operation. A null value means no comment is set. + * + * @param comment the comment + * @return this + */ + public fun comment(comment: String?): DistinctFlow = apply { wrapped.comment(comment) } + + /** + * Sets the comment for this operation. A null value means no comment is set. + * + * @param comment the comment + * @return this + */ + public fun comment(comment: BsonValue?): DistinctFlow = apply { wrapped.comment(comment) } + + /** + * Sets the hint for which index to use. A null value means no hint is set. + * + * @param hint the hint + * @return this + */ + public fun hint(@Nullable hint: Bson?): DistinctFlow = apply { wrapped.hint(hint) } + + /** + * Sets the hint for which index to use. A null value means no hint is set. + * + * Note: If [DistinctFlow.hint] is set that will be used instead of any hint string. + * + * @param hint the name of the index which should be used for the operation + * @return this + */ + public fun hintString(@Nullable hint: String?): DistinctFlow = apply { wrapped.hintString(hint) } + + public override suspend fun collect(collector: FlowCollector): Unit = wrapped.asFlow().collect(collector) +} diff --git a/driver-kotlin-coroutine/src/main/kotlin/com/mongodb/kotlin/client/coroutine/FindFlow.kt b/driver-kotlin-coroutine/src/main/kotlin/com/mongodb/kotlin/client/coroutine/FindFlow.kt new file mode 100644 index 00000000000..f0afb4e9937 --- /dev/null +++ b/driver-kotlin-coroutine/src/main/kotlin/com/mongodb/kotlin/client/coroutine/FindFlow.kt @@ -0,0 +1,306 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.kotlin.client.coroutine + +import com.mongodb.CursorType +import com.mongodb.ExplainVerbosity +import com.mongodb.annotations.Alpha +import com.mongodb.annotations.Reason +import com.mongodb.client.cursor.TimeoutMode +import com.mongodb.client.model.Collation +import com.mongodb.reactivestreams.client.FindPublisher +import java.util.concurrent.TimeUnit +import kotlinx.coroutines.flow.Flow +import kotlinx.coroutines.flow.FlowCollector +import kotlinx.coroutines.reactive.asFlow +import kotlinx.coroutines.reactive.awaitSingle +import org.bson.BsonValue +import org.bson.Document +import org.bson.conversions.Bson + +/** + * Flow implementation for find operations. + * + * @param T The type of the result. + * @see [Collection filter](https://www.mongodb.com/docs/manual/reference/method/db.collection.find/) + */ +public class FindFlow(private val wrapped: FindPublisher) : Flow by wrapped.asFlow() { + + /** + * Sets the number of documents to return per batch. + * + * @param batchSize the batch size + * @return this + * @see [Batch Size](https://www.mongodb.com/docs/manual/reference/method/cursor.batchSize/#cursor.batchSize) + */ + public fun batchSize(batchSize: Int): FindFlow = apply { wrapped.batchSize(batchSize) } + + /** + * Sets the timeoutMode for the cursor. + * + * Requires the `timeout` to be set, either in the [com.mongodb.MongoClientSettings], via [MongoDatabase] or via + * [MongoCollection] + * + * If the `timeout` is set then: + * * For non-tailable cursors, the default value of timeoutMode is [TimeoutMode.CURSOR_LIFETIME] + * * For tailable cursors, the default value of timeoutMode is [TimeoutMode.ITERATION] and its an error to configure + * it as: [TimeoutMode.CURSOR_LIFETIME] + * + * @param timeoutMode the timeout mode + * @return this + * @since 5.2 + */ + @Alpha(Reason.CLIENT) + public fun timeoutMode(timeoutMode: TimeoutMode): FindFlow = apply { wrapped.timeoutMode(timeoutMode) } + + /** + * Sets the query filter to apply to the query. + * + * @param filter the filter. + * @return this + * @see [Collection filter](https://www.mongodb.com/docs/manual/reference/method/db.collection.find/) + */ + public fun filter(filter: Bson?): FindFlow = apply { wrapped.filter(filter) } + + /** + * Sets the limit to apply. + * + * @param limit the limit, which may be 0 + * @return this + * @see [Cursor limit](https://www.mongodb.com/docs/manual/reference/method/cursor.limit/#cursor.limit) + */ + public fun limit(limit: Int): FindFlow = apply { wrapped.limit(limit) } + + /** + * Sets the number of documents to skip. + * + * @param skip the number of documents to skip + * @return this + * @see [Cursor skip](https://www.mongodb.com/docs/manual/reference/method/cursor.skip/#cursor.skip) + */ + public fun skip(skip: Int): FindFlow = apply { wrapped.skip(skip) } + + /** + * Sets the maximum execution time on the server for this operation. + * + * @param maxTime the max time + * @param timeUnit the time unit, which defaults to Milliseconds + * @return this + */ + public fun maxTime(maxTime: Long, timeUnit: TimeUnit = TimeUnit.MILLISECONDS): FindFlow = apply { + wrapped.maxTime(maxTime, timeUnit) + } + + /** + * The maximum amount of time for the server to wait on new documents to satisfy a tailable cursor query. This only + * applies to a TAILABLE_AWAIT cursor. When the cursor is not a TAILABLE_AWAIT cursor, this option is ignored. + * + * On servers >= 3.2, this option will be specified on the getMore command as "maxTimeMS". The default is no value: + * no "maxTimeMS" is sent to the server with the getMore command. + * + * On servers < 3.2, this option is ignored, and indicates that the driver should respect the server's default value + * + * A zero value will be ignored. + * + * @param maxAwaitTime the max await time + * @param timeUnit the time unit to return results in, which defaults to Milliseconds + * @return the maximum await execution time in the given time unit + * @see [Max Time](https://www.mongodb.com/docs/manual/reference/method/cursor.maxTimeMS/#cursor.maxTimeMS) + */ + public fun maxAwaitTime(maxAwaitTime: Long, timeUnit: TimeUnit = TimeUnit.MILLISECONDS): FindFlow = apply { + wrapped.maxAwaitTime(maxAwaitTime, timeUnit) + } + + /** + * Sets a document describing the fields to return for all matching documents. + * + * @param projection the project document. + * @return this + */ + public fun projection(projection: Bson?): FindFlow = apply { wrapped.projection(projection) } + + /** + * Sets the sort criteria to apply to the query. + * + * @param sort the sort criteria. + * @return this + * @see [Cursor sort](https://www.mongodb.com/docs/manual/reference/method/cursor.sort/) + */ + public fun sort(sort: Bson?): FindFlow = apply { wrapped.sort(sort) } + + /** + * The server normally times out idle cursors after an inactivity period (10 minutes) to prevent excess memory use. + * Set this option to prevent that. + * + * @param noCursorTimeout true if cursor timeout is disabled + * @return this + */ + public fun noCursorTimeout(noCursorTimeout: Boolean): FindFlow = apply { + wrapped.noCursorTimeout(noCursorTimeout) + } + + /** + * Get partial results from a sharded cluster if one or more shards are unreachable (instead of throwing an error). + * + * @param partial if partial results for sharded clusters is enabled + * @return this + */ + public fun partial(partial: Boolean): FindFlow = apply { wrapped.partial(partial) } + + /** + * Sets the cursor type. + * + * @param cursorType the cursor type + * @return this + */ + public fun cursorType(cursorType: CursorType): FindFlow = apply { wrapped.cursorType(cursorType) } + + /** + * Sets the collation options + * + * A null value represents the server default. + * + * @param collation the collation options to use + * @return this + */ + public fun collation(collation: Collation?): FindFlow = apply { wrapped.collation(collation) } + + /** + * Sets the comment for this operation. A null value means no comment is set. + * + * @param comment the comment + * @return this + */ + public fun comment(comment: String?): FindFlow = apply { wrapped.comment(comment) } + + /** + * Sets the comment for this operation. A null value means no comment is set. + * + * The comment can be any valid BSON type for server versions 4.4 and above. Server versions between 3.6 and 4.2 + * only support string as comment, and providing a non-string type will result in a server-side error. + * + * @param comment the comment + * @return this + */ + public fun comment(comment: BsonValue?): FindFlow = apply { wrapped.comment(comment) } + + /** + * Sets the hint for which index to use. A null value means no hint is set. + * + * @param hint the hint + * @return this + */ + public fun hint(hint: Bson?): FindFlow = apply { wrapped.hint(hint) } + + /** + * Sets the hint to apply. + * + * Note: If [FindFlow.hint] is set that will be used instead of any hint string. + * + * @param hint the name of the index which should be used for the operation + * @return this + */ + public fun hintString(hint: String?): FindFlow = apply { wrapped.hintString(hint) } + + /** + * Add top-level variables to the operation. A null value means no variables are set. + * + * Allows for improved command readability by separating the variables from the query text. + * + * @param variables for find operation + * @return this + */ + public fun let(variables: Bson?): FindFlow = apply { wrapped.let(variables) } + + /** + * Sets the exclusive upper bound for a specific index. A null value means no max is set. + * + * @param max the max + * @return this + */ + public fun max(max: Bson?): FindFlow = apply { wrapped.max(max) } + + /** + * Sets the minimum inclusive lower bound for a specific index. A null value means no max is set. + * + * @param min the min + * @return this + */ + public fun min(min: Bson?): FindFlow = apply { wrapped.min(min) } + + /** + * Sets the returnKey. If true the find operation will return only the index keys in the resulting documents. + * + * @param returnKey the returnKey + * @return this + */ + public fun returnKey(returnKey: Boolean): FindFlow = apply { wrapped.returnKey(returnKey) } + + /** + * Sets the showRecordId. Set to true to add a field `$recordId` to the returned documents. + * + * @param showRecordId the showRecordId + * @return this + */ + public fun showRecordId(showRecordId: Boolean): FindFlow = apply { wrapped.showRecordId(showRecordId) } + + /** + * Enables writing to temporary files on the server. When set to true, the server can write temporary data to disk + * while executing the find operation. + * + * This option is sent only if the caller explicitly sets it to true. + * + * @param allowDiskUse the allowDiskUse + * @return this + */ + public fun allowDiskUse(allowDiskUse: Boolean?): FindFlow = apply { wrapped.allowDiskUse(allowDiskUse) } + + /** + * Explain the execution plan for this operation with the given verbosity level + * + * @param verbosity the verbosity of the explanation + * @return the execution plan + * @see [Explain command](https://www.mongodb.com/docs/manual/reference/command/explain/) + */ + @JvmName("explainDocument") + public suspend fun explain(verbosity: ExplainVerbosity? = null): Document = explain(verbosity) + + /** + * Explain the execution plan for this operation with the given verbosity level + * + * @param R the type of the document class + * @param resultClass the result document type. + * @param verbosity the verbosity of the explanation + * @return the execution plan + * @see [Explain command](https://www.mongodb.com/docs/manual/reference/command/explain/) + */ + public suspend fun explain(resultClass: Class, verbosity: ExplainVerbosity? = null): R = + if (verbosity == null) wrapped.explain(resultClass).awaitSingle() + else wrapped.explain(resultClass, verbosity).awaitSingle() + + /** + * Explain the execution plan for this operation with the given verbosity level + * + * @param R the type of the document class + * @param verbosity the verbosity of the explanation + * @return the execution plan + * @see [Explain command](https://www.mongodb.com/docs/manual/reference/command/explain/) + */ + public suspend inline fun explain(verbosity: ExplainVerbosity? = null): R = + explain(R::class.java, verbosity) + + public override suspend fun collect(collector: FlowCollector): Unit = wrapped.asFlow().collect(collector) +} diff --git a/driver-kotlin-coroutine/src/main/kotlin/com/mongodb/kotlin/client/coroutine/ListCollectionNamesFlow.kt b/driver-kotlin-coroutine/src/main/kotlin/com/mongodb/kotlin/client/coroutine/ListCollectionNamesFlow.kt new file mode 100644 index 00000000000..2dc64e870a7 --- /dev/null +++ b/driver-kotlin-coroutine/src/main/kotlin/com/mongodb/kotlin/client/coroutine/ListCollectionNamesFlow.kt @@ -0,0 +1,93 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.kotlin.client.coroutine + +import com.mongodb.reactivestreams.client.ListCollectionNamesPublisher +import java.util.concurrent.TimeUnit +import kotlinx.coroutines.flow.Flow +import kotlinx.coroutines.flow.FlowCollector +import kotlinx.coroutines.reactive.asFlow +import org.bson.BsonValue +import org.bson.conversions.Bson + +/** + * Flow for listing collection names. + * + * @see [List collections](https://www.mongodb.com/docs/manual/reference/command/listCollections/) + * @since 5.0 + */ +public class ListCollectionNamesFlow(private val wrapped: ListCollectionNamesPublisher) : + Flow by wrapped.asFlow() { + /** + * Sets the maximum execution time on the server for this operation. + * + * @param maxTime the max time + * @param timeUnit the time unit, defaults to Milliseconds + * @return this + * @see [Max Time](https://www.mongodb.com/docs/manual/reference/operator/meta/maxTimeMS/) + */ + public fun maxTime(maxTime: Long, timeUnit: TimeUnit = TimeUnit.MILLISECONDS): ListCollectionNamesFlow = apply { + wrapped.maxTime(maxTime, timeUnit) + } + + /** + * Sets the number of documents to return per batch. + * + * @param batchSize the batch size + * @return this + * @see [Batch Size](https://www.mongodb.com/docs/manual/reference/method/cursor.batchSize/#cursor.batchSize) + */ + public fun batchSize(batchSize: Int): ListCollectionNamesFlow = apply { wrapped.batchSize(batchSize) } + + /** + * Sets the query filter to apply to the returned database names. + * + * @param filter the filter, which may be null. + * @return this + */ + public fun filter(filter: Bson?): ListCollectionNamesFlow = apply { wrapped.filter(filter) } + + /** + * Sets the comment for this operation. A null value means no comment is set. + * + * @param comment the comment + * @return this + */ + public fun comment(comment: String?): ListCollectionNamesFlow = apply { wrapped.comment(comment) } + + /** + * Sets the comment for this operation. A null value means no comment is set. + * + * @param comment the comment + * @return this + */ + public fun comment(comment: BsonValue?): ListCollectionNamesFlow = apply { wrapped.comment(comment) } + + /** + * Sets the `authorizedCollections` field of the `listCollections` command. + * + * @param authorizedCollections If `true`, allows executing the `listCollections` command, which has the `nameOnly` + * field set to `true`, without having the + * [`listCollections` privilege](https://docs.mongodb.com/manual/reference/privilege-actions/#mongodb-authaction-listCollections) + * on the database resource. + * @return `this`. + */ + public fun authorizedCollections(authorizedCollections: Boolean): ListCollectionNamesFlow = apply { + wrapped.authorizedCollections(authorizedCollections) + } + + public override suspend fun collect(collector: FlowCollector): Unit = wrapped.asFlow().collect(collector) +} diff --git a/driver-kotlin-coroutine/src/main/kotlin/com/mongodb/kotlin/client/coroutine/ListCollectionsFlow.kt b/driver-kotlin-coroutine/src/main/kotlin/com/mongodb/kotlin/client/coroutine/ListCollectionsFlow.kt new file mode 100644 index 00000000000..a6dfd770e08 --- /dev/null +++ b/driver-kotlin-coroutine/src/main/kotlin/com/mongodb/kotlin/client/coroutine/ListCollectionsFlow.kt @@ -0,0 +1,99 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.kotlin.client.coroutine + +import com.mongodb.annotations.Alpha +import com.mongodb.annotations.Reason +import com.mongodb.client.cursor.TimeoutMode +import com.mongodb.reactivestreams.client.ListCollectionsPublisher +import java.util.concurrent.TimeUnit +import kotlinx.coroutines.flow.Flow +import kotlinx.coroutines.flow.FlowCollector +import kotlinx.coroutines.reactive.asFlow +import org.bson.BsonValue +import org.bson.conversions.Bson + +/** + * Flow implementation for list collection operations. + * + * @param T The type of the result. + * @see [List collections](https://www.mongodb.com/docs/manual/reference/command/listCollections/) + */ +public class ListCollectionsFlow(private val wrapped: ListCollectionsPublisher) : + Flow by wrapped.asFlow() { + + /** + * Sets the number of documents to return per batch. + * + * @param batchSize the batch size + * @return this + * @see [Batch Size](https://www.mongodb.com/docs/manual/reference/method/cursor.batchSize/#cursor.batchSize) + */ + public fun batchSize(batchSize: Int): ListCollectionsFlow = apply { wrapped.batchSize(batchSize) } + + /** + * Sets the timeoutMode for the cursor. + * + * Requires the `timeout` to be set, either in the [com.mongodb.MongoClientSettings], via [MongoDatabase] or via + * [MongoCollection] + * + * @param timeoutMode the timeout mode + * @return this + * @since 5.2 + */ + @Alpha(Reason.CLIENT) + public fun timeoutMode(timeoutMode: TimeoutMode): ListCollectionsFlow = apply { + wrapped.timeoutMode(timeoutMode) + } + + /** + * Sets the maximum execution time on the server for this operation. + * + * @param maxTime the max time + * @param timeUnit the time unit, defaults to Milliseconds + * @return this + * @see [Max Time](https://www.mongodb.com/docs/manual/reference/operator/meta/maxTimeMS/) + */ + public fun maxTime(maxTime: Long, timeUnit: TimeUnit = TimeUnit.MILLISECONDS): ListCollectionsFlow = apply { + wrapped.maxTime(maxTime, timeUnit) + } + + /** + * Sets the query filter to apply to the returned database names. + * + * @param filter the filter, which may be null. + * @return this + */ + public fun filter(filter: Bson?): ListCollectionsFlow = apply { wrapped.filter(filter) } + + /** + * Sets the comment for this operation. A null value means no comment is set. + * + * @param comment the comment + * @return this + */ + public fun comment(comment: String?): ListCollectionsFlow = apply { wrapped.comment(comment) } + + /** + * Sets the comment for this operation. A null value means no comment is set. + * + * @param comment the comment + * @return this + */ + public fun comment(comment: BsonValue?): ListCollectionsFlow = apply { wrapped.comment(comment) } + + public override suspend fun collect(collector: FlowCollector): Unit = wrapped.asFlow().collect(collector) +} diff --git a/driver-kotlin-coroutine/src/main/kotlin/com/mongodb/kotlin/client/coroutine/ListDatabasesFlow.kt b/driver-kotlin-coroutine/src/main/kotlin/com/mongodb/kotlin/client/coroutine/ListDatabasesFlow.kt new file mode 100644 index 00000000000..473cde087b6 --- /dev/null +++ b/driver-kotlin-coroutine/src/main/kotlin/com/mongodb/kotlin/client/coroutine/ListDatabasesFlow.kt @@ -0,0 +1,115 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.kotlin.client.coroutine + +import com.mongodb.annotations.Alpha +import com.mongodb.annotations.Reason +import com.mongodb.client.cursor.TimeoutMode +import com.mongodb.reactivestreams.client.ListDatabasesPublisher +import java.util.concurrent.TimeUnit +import kotlinx.coroutines.flow.Flow +import kotlinx.coroutines.flow.FlowCollector +import kotlinx.coroutines.reactive.asFlow +import org.bson.BsonValue +import org.bson.conversions.Bson + +/** + * Flow implementation for list database operations. + * + * @param T The type of the result. + * @see [List databases](https://www.mongodb.com/docs/manual/reference/command/listDatabases/) + */ +public class ListDatabasesFlow(private val wrapped: ListDatabasesPublisher) : Flow by wrapped.asFlow() { + + /** + * Sets the number of documents to return per batch. + * + * @param batchSize the batch size + * @return this + * @see [Batch Size](https://www.mongodb.com/docs/manual/reference/method/cursor.batchSize/#cursor.batchSize) + */ + public fun batchSize(batchSize: Int): ListDatabasesFlow = apply { wrapped.batchSize(batchSize) } + + /** + * Sets the timeoutMode for the cursor. + * + * Requires the `timeout` to be set, either in the [com.mongodb.MongoClientSettings], via [MongoDatabase] or via + * [MongoCollection] + * + * @param timeoutMode the timeout mode + * @return this + * @since 5.2 + */ + @Alpha(Reason.CLIENT) + public fun timeoutMode(timeoutMode: TimeoutMode): ListDatabasesFlow = apply { wrapped.timeoutMode(timeoutMode) } + + /** + * Sets the maximum execution time on the server for this operation. + * + * @param maxTime the max time + * @param timeUnit the time unit, defaults to Milliseconds + * @return this + * @see [Max Time](https://www.mongodb.com/docs/manual/reference/operator/meta/maxTimeMS/) + */ + public fun maxTime(maxTime: Long, timeUnit: TimeUnit = TimeUnit.MILLISECONDS): ListDatabasesFlow = apply { + wrapped.maxTime(maxTime, timeUnit) + } + + /** + * Sets the query filter to apply to the returned database names. + * + * @param filter the filter, which may be null. + * @return this + */ + public fun filter(filter: Bson?): ListDatabasesFlow = apply { wrapped.filter(filter) } + /** + * Sets the nameOnly flag that indicates whether the command should return just the database names or return the + * database names and size information. + * + * @param nameOnly the nameOnly flag, which may be null + * @return this + */ + public fun nameOnly(nameOnly: Boolean?): ListDatabasesFlow = apply { wrapped.nameOnly(nameOnly) } + + /** + * Sets the authorizedDatabasesOnly flag that indicates whether the command should return just the databases which + * the user is authorized to see. + * + * @param authorizedDatabasesOnly the authorizedDatabasesOnly flag, which may be null + * @return this + */ + public fun authorizedDatabasesOnly(authorizedDatabasesOnly: Boolean?): ListDatabasesFlow = apply { + wrapped.authorizedDatabasesOnly(authorizedDatabasesOnly) + } + + /** + * Sets the comment for this operation. A null value means no comment is set. + * + * @param comment the comment + * @return this + */ + public fun comment(comment: String?): ListDatabasesFlow = apply { wrapped.comment(comment) } + + /** + * Sets the comment for this operation. A null value means no comment is set. + * + * @param comment the comment + * @return this + */ + public fun comment(comment: BsonValue?): ListDatabasesFlow = apply { wrapped.comment(comment) } + + public override suspend fun collect(collector: FlowCollector): Unit = wrapped.asFlow().collect(collector) +} diff --git a/driver-kotlin-coroutine/src/main/kotlin/com/mongodb/kotlin/client/coroutine/ListIndexesFlow.kt b/driver-kotlin-coroutine/src/main/kotlin/com/mongodb/kotlin/client/coroutine/ListIndexesFlow.kt new file mode 100644 index 00000000000..b92453158a1 --- /dev/null +++ b/driver-kotlin-coroutine/src/main/kotlin/com/mongodb/kotlin/client/coroutine/ListIndexesFlow.kt @@ -0,0 +1,87 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.kotlin.client.coroutine + +import com.mongodb.annotations.Alpha +import com.mongodb.annotations.Reason +import com.mongodb.client.cursor.TimeoutMode +import com.mongodb.reactivestreams.client.ListIndexesPublisher +import java.util.concurrent.TimeUnit +import kotlinx.coroutines.flow.Flow +import kotlinx.coroutines.flow.FlowCollector +import kotlinx.coroutines.reactive.asFlow +import org.bson.BsonValue + +/** + * Flow implementation for list index operations. + * + * @param T The type of the result. + * @see [List indexes](https://www.mongodb.com/docs/manual/reference/command/listIndexes/) + */ +public class ListIndexesFlow(private val wrapped: ListIndexesPublisher) : Flow by wrapped.asFlow() { + + /** + * Sets the number of documents to return per batch. + * + * @param batchSize the batch size + * @return this + * @see [Batch Size](https://www.mongodb.com/docs/manual/reference/method/cursor.batchSize/#cursor.batchSize) + */ + public fun batchSize(batchSize: Int): ListIndexesFlow = apply { wrapped.batchSize(batchSize) } + + /** + * Sets the timeoutMode for the cursor. + * + * Requires the `timeout` to be set, either in the [com.mongodb.MongoClientSettings], via [MongoDatabase] or via + * [MongoCollection] + * + * @param timeoutMode the timeout mode + * @return this + * @since 5.2 + */ + @Alpha(Reason.CLIENT) + public fun timeoutMode(timeoutMode: TimeoutMode): ListIndexesFlow = apply { wrapped.timeoutMode(timeoutMode) } + + /** + * Sets the maximum execution time on the server for this operation. + * + * @param maxTime the max time + * @param timeUnit the time unit, defaults to Milliseconds + * @return this + * @see [Max Time](https://www.mongodb.com/docs/manual/reference/operator/meta/maxTimeMS/) + */ + public fun maxTime(maxTime: Long, timeUnit: TimeUnit = TimeUnit.MILLISECONDS): ListIndexesFlow = apply { + wrapped.maxTime(maxTime, timeUnit) + } + + /** + * Sets the comment for this operation. A null value means no comment is set. + * + * @param comment the comment + * @return this + */ + public fun comment(comment: String?): ListIndexesFlow = apply { wrapped.comment(comment) } + + /** + * Sets the comment for this operation. A null value means no comment is set. + * + * @param comment the comment + * @return this + */ + public fun comment(comment: BsonValue?): ListIndexesFlow = apply { wrapped.comment(comment) } + + public override suspend fun collect(collector: FlowCollector): Unit = wrapped.asFlow().collect(collector) +} diff --git a/driver-kotlin-coroutine/src/main/kotlin/com/mongodb/kotlin/client/coroutine/ListSearchIndexesFlow.kt b/driver-kotlin-coroutine/src/main/kotlin/com/mongodb/kotlin/client/coroutine/ListSearchIndexesFlow.kt new file mode 100644 index 00000000000..1c7fe4ded5e --- /dev/null +++ b/driver-kotlin-coroutine/src/main/kotlin/com/mongodb/kotlin/client/coroutine/ListSearchIndexesFlow.kt @@ -0,0 +1,157 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.kotlin.client.coroutine + +import com.mongodb.ExplainVerbosity +import com.mongodb.annotations.Alpha +import com.mongodb.annotations.Reason +import com.mongodb.client.cursor.TimeoutMode +import com.mongodb.client.model.Collation +import com.mongodb.reactivestreams.client.ListSearchIndexesPublisher +import java.util.concurrent.TimeUnit +import kotlinx.coroutines.flow.Flow +import kotlinx.coroutines.flow.FlowCollector +import kotlinx.coroutines.reactive.asFlow +import kotlinx.coroutines.reactive.awaitSingle +import org.bson.BsonValue +import org.bson.Document + +/** + * Flow implementation for list Atlas Search index operations. + * + * @param T The type of the result. + * @see [List Atlas Search indexes] + * (https://www.mongodb.com/docs/manual/reference/operator/aggregation/listSearchIndexes) + */ +public class ListSearchIndexesFlow(private val wrapped: ListSearchIndexesPublisher) : + Flow by wrapped.asFlow() { + + /** + * Sets the number of documents to return per batch. + * + * @param batchSize the batch size + * @return this + * @see [Batch Size](https://www.mongodb.com/docs/manual/reference/method/cursor.batchSize/#cursor.batchSize) + */ + public fun batchSize(batchSize: Int): ListSearchIndexesFlow = apply { wrapped.batchSize(batchSize) } + + /** + * Sets the timeoutMode for the cursor. + * + * Requires the `timeout` to be set, either in the [com.mongodb.MongoClientSettings], via [MongoDatabase] or via + * [MongoCollection] + * + * @param timeoutMode the timeout mode + * @return this + * @since 5.2 + */ + @Alpha(Reason.CLIENT) + public fun timeoutMode(timeoutMode: TimeoutMode): ListSearchIndexesFlow = apply { + wrapped.timeoutMode(timeoutMode) + } + + /** + * Sets an Atlas Search index name for this operation. + * + * @param indexName Atlas Search index name. + * @return this. + */ + public fun name(indexName: String): ListSearchIndexesFlow = apply { wrapped.name(indexName) } + + /** + * Enables writing to temporary files. A null value indicates that it's unspecified. + * + * @param allowDiskUse true if writing to temporary files is enabled. + * @return this. + * @see [Aggregation command](https://www.mongodb.com/docs/manual/reference/command/aggregate/) + */ + public fun allowDiskUse(allowDiskUse: Boolean?): ListSearchIndexesFlow = apply { + wrapped.allowDiskUse(allowDiskUse) + } + + /** + * Sets the maximum execution time on the server for this operation. + * + * @param maxTime the max time. + * @param timeUnit the time unit, defaults to Milliseconds. + * @return this. + */ + public fun maxTime(maxTime: Long, timeUnit: TimeUnit = TimeUnit.MILLISECONDS): ListSearchIndexesFlow = apply { + wrapped.maxTime(maxTime, timeUnit) + } + + /** + * Sets the collation options. + * + * A null value represents the server default. + * + * @param collation the collation options to use. + * @return this. + */ + public fun collation(collation: Collation?): ListSearchIndexesFlow = apply { wrapped.collation(collation) } + + /** + * Sets the comment for this operation. A null value means no comment is set. + * + * @param comment the comment. + * @return this. + */ + public fun comment(comment: String?): ListSearchIndexesFlow = apply { wrapped.comment(comment) } + + /** + * Sets the comment for this operation. A null value means no comment is set. + * + * @param comment the comment. + * @return this. + */ + public fun comment(comment: BsonValue?): ListSearchIndexesFlow = apply { wrapped.comment(comment) } + + /** + * Explain the execution plan for this operation with the given verbosity level. + * + * @param verbosity the verbosity of the explanation. + * @return the execution plan. + * @see [Explain command](https://www.mongodb.com/docs/manual/reference/command/explain/) + */ + @JvmName("explainDocument") + public suspend fun explain(verbosity: ExplainVerbosity? = null): Document = explain(verbosity) + + /** + * Explain the execution plan for this operation with the given verbosity level. + * + * @param R the type of the document class. + * @param resultClass the result document type. + * @param verbosity the verbosity of the explanation. + * @return the execution plan. + * @see [Explain command](https://www.mongodb.com/docs/manual/reference/command/explain/) + */ + public suspend fun explain(resultClass: Class, verbosity: ExplainVerbosity? = null): R = + if (verbosity == null) wrapped.explain(resultClass).awaitSingle() + else wrapped.explain(resultClass, verbosity).awaitSingle() + + /** + * Explain the execution plan for this operation with the given verbosity level. + * + * @param R the type of the document class. + * @param verbosity the verbosity of the explanation. + * @return the execution plan. + * @see [Explain command](https://www.mongodb.com/docs/manual/reference/command/explain/) + */ + public suspend inline fun explain(verbosity: ExplainVerbosity? = null): R = + explain(R::class.java, verbosity) + + public override suspend fun collect(collector: FlowCollector): Unit = wrapped.asFlow().collect(collector) +} diff --git a/driver-kotlin-coroutine/src/main/kotlin/com/mongodb/kotlin/client/coroutine/MapReduceFlow.kt b/driver-kotlin-coroutine/src/main/kotlin/com/mongodb/kotlin/client/coroutine/MapReduceFlow.kt new file mode 100644 index 00000000000..c43fe3842d5 --- /dev/null +++ b/driver-kotlin-coroutine/src/main/kotlin/com/mongodb/kotlin/client/coroutine/MapReduceFlow.kt @@ -0,0 +1,222 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +@file:Suppress("DEPRECATION") + +package com.mongodb.kotlin.client.coroutine + +import com.mongodb.annotations.Alpha +import com.mongodb.annotations.Reason +import com.mongodb.client.cursor.TimeoutMode +import com.mongodb.client.model.Collation +import com.mongodb.client.model.MapReduceAction +import com.mongodb.reactivestreams.client.MapReducePublisher +import java.util.concurrent.TimeUnit +import kotlinx.coroutines.flow.Flow +import kotlinx.coroutines.flow.FlowCollector +import kotlinx.coroutines.reactive.asFlow +import kotlinx.coroutines.reactive.awaitFirstOrNull +import org.bson.conversions.Bson + +/** + * Flow implementation for map reduce operations. + * + * By default, the [MapReduceFlow] emits the results inline. You can write map-reduce output to a collection by using + * the [collectionName] and [toCollection] methods. + * + * Note: Starting in MongoDB 5.0, map-reduce is deprecated, prefer Aggregation instead + * + * @param T The type of the result. + * @see [Map Reduce](https://www.mongodb.com/docs/manual/reference/command/mapReduce/) + */ +@Deprecated("Map Reduce has been deprecated. Use Aggregation instead", replaceWith = ReplaceWith("")) +public class MapReduceFlow(private val wrapped: MapReducePublisher) : Flow by wrapped.asFlow() { + + /** + * Sets the number of documents to return per batch. + * + * @param batchSize the batch size + * @return this + * @see [Batch Size](https://www.mongodb.com/docs/manual/reference/method/cursor.batchSize/#cursor.batchSize) + */ + public fun batchSize(batchSize: Int): MapReduceFlow = apply { wrapped.batchSize(batchSize) } + + /** + * Sets the timeoutMode for the cursor. + * + * Requires the `timeout` to be set, either in the [com.mongodb.MongoClientSettings], via [MongoDatabase] or via + * [MongoCollection] + * + * @param timeoutMode the timeout mode + * @return this + * @since 5.2 + */ + @Alpha(Reason.CLIENT) + public fun timeoutMode(timeoutMode: TimeoutMode): MapReduceFlow = apply { wrapped.timeoutMode(timeoutMode) } + + /** + * Aggregates documents to a collection according to the specified map-reduce function with the given options, which + * must not emit results inline. Calling this method is the preferred alternative to consuming this [MapReduceFlow], + * because this method does what is explicitly requested without executing implicit operations. + * + * @throws IllegalStateException if a collection name to write the results to has not been specified + * @see collectionName + */ + public suspend fun toCollection() { + wrapped.toCollection().awaitFirstOrNull() + } + + /** + * Sets the collectionName for the output of the MapReduce + * + * The default action is replace the collection if it exists, to change this use [.action]. + * + * @param collectionName the name of the collection that you want the map-reduce operation to write its output. + * @return this + * @see toCollection + */ + public fun collectionName(collectionName: String): MapReduceFlow = apply { + wrapped.collectionName(collectionName) + } + + /** + * Sets the JavaScript function that follows the reduce method and modifies the output. + * + * @param finalizeFunction the JavaScript function that follows the reduce method and modifies the output. + * @return this + * @see + * [Requirements for the finalize Function](https://www.mongodb.com/docs/manual/reference/command/mapReduce/#mapreduce-finalize-cmd) + */ + public fun finalizeFunction(finalizeFunction: String?): MapReduceFlow = apply { + wrapped.finalizeFunction(finalizeFunction) + } + + /** + * Sets the global variables that are accessible in the map, reduce and finalize functions. + * + * @param scope the global variables that are accessible in the map, reduce and finalize functions. + * @return this + * @see [mapReduce command](https://www.mongodb.com/docs/manual/reference/command/mapReduce) + */ + public fun scope(scope: Bson?): MapReduceFlow = apply { wrapped.scope(scope) } + + /** + * Sets the sort criteria to apply to the query. + * + * @param sort the sort criteria + * @return this + * @see [Sort results](https://www.mongodb.com/docs/manual/reference/method/cursor.sort/) + */ + public fun sort(sort: Bson?): MapReduceFlow = apply { wrapped.sort(sort) } + + /** + * Sets the query filter to apply to the query. + * + * @param filter the filter to apply to the query. + * @return this + * @see [Filter results](https://www.mongodb.com/docs/manual/reference/method/db.collection.find/) + */ + public fun filter(filter: Bson?): MapReduceFlow = apply { wrapped.filter(filter) } + + /** + * Sets the limit to apply. + * + * @param limit the limit + * @return this + * @see [Cursor limit](https://www.mongodb.com/docs/manual/reference/method/cursor.limit/#cursor.limit) + */ + public fun limit(limit: Int): MapReduceFlow = apply { wrapped.limit(limit) } + + /** + * Sets the flag that specifies whether to convert intermediate data into BSON format between the execution of the + * map and reduce functions. Defaults to false. + * + * @param jsMode the flag that specifies whether to convert intermediate data into BSON format between the execution + * of the map and reduce functions + * @return jsMode + * @see [mapReduce command](https://www.mongodb.com/docs/manual/reference/command/mapReduce) + */ + public fun jsMode(jsMode: Boolean): MapReduceFlow = apply { wrapped.jsMode(jsMode) } + + /** + * Sets whether to include the timing information in the result information. + * + * @param verbose whether to include the timing information in the result information. + * @return this + */ + public fun verbose(verbose: Boolean): MapReduceFlow = apply { wrapped.verbose(verbose) } + + /** + * Sets the maximum execution time on the server for this operation. + * + * @param maxTime the max time + * @param timeUnit the time unit, defaults to Milliseconds + * @return this + * @see [Max Time](https://www.mongodb.com/docs/manual/reference/method/cursor.maxTimeMS/#cursor.maxTimeMS) + */ + public fun maxTime(maxTime: Long, timeUnit: TimeUnit = TimeUnit.MILLISECONDS): MapReduceFlow = apply { + wrapped.maxTime(maxTime, timeUnit) + } + + /** + * Specify the `MapReduceAction` to be used when writing to a collection. + * + * @param action an [com.mongodb.client.model.MapReduceAction] to perform on the collection + * @return this + */ + public fun action(action: MapReduceAction): MapReduceFlow = apply { wrapped.action(action) } + + /** + * Sets the name of the database to output into. + * + * @param databaseName the name of the database to output into. + * @return this + * @see + * [output with an action](https://www.mongodb.com/docs/manual/reference/command/mapReduce/#output-to-a-collection-with-an-action) + */ + public fun databaseName(databaseName: String?): MapReduceFlow = apply { wrapped.databaseName(databaseName) } + + /** + * Sets the bypass document level validation flag. + * + * Note: This only applies when an $out or $merge stage is specified. + * + * @param bypassDocumentValidation If true, allows the write to opt-out of document level validation. + * @return this + * @see [Aggregation command](https://www.mongodb.com/docs/manual/reference/command/aggregate/) + */ + public fun bypassDocumentValidation(bypassDocumentValidation: Boolean?): MapReduceFlow = apply { + wrapped.bypassDocumentValidation(bypassDocumentValidation) + } + + /** + * Sets the collation options + * + * A null value represents the server default. + * + * @param collation the collation options to use + * @return this + */ + public fun collation(collation: Collation?): MapReduceFlow = apply { wrapped.collation(collation) } + + /** + * Requests [MapReduceFlow] to start streaming data according to the specified map-reduce function with the given + * options. + * - If the aggregation produces results inline, then finds all documents in the affected namespace and emits them. + * You may want to use [toCollection] instead. + * - Otherwise, emits no values. + */ + public override suspend fun collect(collector: FlowCollector): Unit = wrapped.asFlow().collect(collector) +} diff --git a/driver-kotlin-coroutine/src/main/kotlin/com/mongodb/kotlin/client/coroutine/MongoClient.kt b/driver-kotlin-coroutine/src/main/kotlin/com/mongodb/kotlin/client/coroutine/MongoClient.kt new file mode 100644 index 00000000000..64832903b40 --- /dev/null +++ b/driver-kotlin-coroutine/src/main/kotlin/com/mongodb/kotlin/client/coroutine/MongoClient.kt @@ -0,0 +1,137 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.kotlin.client.coroutine + +import com.mongodb.ClientSessionOptions +import com.mongodb.ConnectionString +import com.mongodb.MongoClientSettings +import com.mongodb.MongoDriverInformation +import com.mongodb.connection.ClusterDescription +import com.mongodb.lang.Nullable +import com.mongodb.reactivestreams.client.MongoClient as JMongoClient +import com.mongodb.reactivestreams.client.MongoClients as JMongoClients +import java.io.Closeable +import java.util.concurrent.TimeUnit + +/** + * A client-side representation of a MongoDB cluster. + * + * Instances can represent either a standalone MongoDB instance, a replica set, or a sharded cluster. Instance of this + * class are responsible for maintaining an up-to-date state of the cluster, and possibly cache resources related to + * this, including background threads for monitoring, and connection pools. + * + * Instances of this class serve as factories for [MongoDatabase] instances. Instances of this class can be created via + * the [MongoClient.create] helpers + * + * @see MongoClient.create + */ +public class MongoClient(private val wrapped: JMongoClient) : MongoCluster(wrapped), Closeable { + + /** + * A factory for [MongoClient] instances. + * + * @see MongoClient + * @since 4.10 + */ + public companion object Factory { + /** + * Create a new client with the given connection string as if by a call to [create]. + * + * @param connectionString the connection + * @return the client + */ + public fun create(connectionString: String): MongoClient = create(ConnectionString(connectionString)) + + /** + * Create a new client with the given connection string. + * + * @param connectionString the connection string, defaults to `mongodb://localhost`. + * @param mongoDriverInformation any driver information to associate with the MongoClient + * @return the client + */ + public fun create( + connectionString: ConnectionString = ConnectionString("mongodb://localhost"), + @Nullable mongoDriverInformation: MongoDriverInformation? = null + ): MongoClient { + return create( + MongoClientSettings.builder().applyConnectionString(connectionString).build(), mongoDriverInformation) + } + + /** + * Create a new client with the given connection string. + * + * For each of the settings classed configurable via [MongoClientSettings], the connection string is applied by + * calling the `applyConnectionString` method on an instance of setting's builder class, building the setting, + * and adding it to an instance of [com.mongodb.MongoClientSettings.Builder]. + * + * @param settings the client settings + * @param mongoDriverInformation any driver information to associate with the MongoClient + * @return + */ + public fun create( + settings: MongoClientSettings, + @Nullable mongoDriverInformation: MongoDriverInformation? = null + ): MongoClient { + val builder = + if (mongoDriverInformation == null) MongoDriverInformation.builder() + else MongoDriverInformation.builder(mongoDriverInformation) + return MongoClient( + JMongoClients.create( + settings, builder.driverName("kotlin").driverPlatform("kotlin/${KotlinVersion.CURRENT}").build())) + } + } + + public override fun close(): Unit = wrapped.close() + + /** + * Gets the current cluster description. + * + * This method will not block, meaning that it may return a [ClusterDescription] whose `clusterType` is unknown and + * whose [com.mongodb.connection.ServerDescription]s are all in the connecting state. If the application requires + * notifications after the driver has connected to a member of the cluster, it should register a + * [com.mongodb.event.ClusterListener] via the [com.mongodb.connection.ClusterSettings] in + * [com.mongodb.MongoClientSettings]. + * + * @return the current cluster description + * @see com.mongodb.connection.ClusterSettings.Builder.addClusterListener + * @see com.mongodb.MongoClientSettings.Builder.applyToClusterSettings + */ + public fun getClusterDescription(): ClusterDescription = wrapped.clusterDescription + + /** + * Appends the provided [MongoDriverInformation] to the existing metadata. + * + * This enables frameworks and libraries to include identifying metadata (e.g., name, version, platform) which might + * be visible in the MongoD/MongoS logs. This can assist with diagnostics by making client identity visible to the + * server. + * + * **Note:** Metadata is limited to 512 bytes; any excess will be truncated. + * + * @param mongoDriverInformation the driver information to append to the existing metadata + * @since 5.6 + */ + public fun appendMetadata(mongoDriverInformation: MongoDriverInformation): Unit = + wrapped.appendMetadata(mongoDriverInformation) +} + +/** + * ClientSessionOptions.Builder.defaultTimeout extension function + * + * @param defaultTimeout time in milliseconds + * @return the options + */ +public fun ClientSessionOptions.Builder.defaultTimeout(defaultTimeout: Long): ClientSessionOptions.Builder = + this.apply { defaultTimeout(defaultTimeout, TimeUnit.MILLISECONDS) } diff --git a/driver-kotlin-coroutine/src/main/kotlin/com/mongodb/kotlin/client/coroutine/MongoCluster.kt b/driver-kotlin-coroutine/src/main/kotlin/com/mongodb/kotlin/client/coroutine/MongoCluster.kt new file mode 100644 index 00000000000..cf25ac5e8bf --- /dev/null +++ b/driver-kotlin-coroutine/src/main/kotlin/com/mongodb/kotlin/client/coroutine/MongoCluster.kt @@ -0,0 +1,417 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.kotlin.client.coroutine + +import com.mongodb.ClientBulkWriteException +import com.mongodb.ClientSessionOptions +import com.mongodb.MongoClientSettings +import com.mongodb.MongoException +import com.mongodb.ReadConcern +import com.mongodb.ReadPreference +import com.mongodb.WriteConcern +import com.mongodb.annotations.Alpha +import com.mongodb.annotations.Reason +import com.mongodb.client.model.bulk.ClientBulkWriteOptions +import com.mongodb.client.model.bulk.ClientBulkWriteResult +import com.mongodb.client.model.bulk.ClientNamespacedDeleteManyModel +import com.mongodb.client.model.bulk.ClientNamespacedUpdateManyModel +import com.mongodb.client.model.bulk.ClientNamespacedWriteModel +import com.mongodb.reactivestreams.client.MongoCluster as JMongoCluster +import java.util.concurrent.TimeUnit +import kotlinx.coroutines.flow.Flow +import kotlinx.coroutines.reactive.asFlow +import kotlinx.coroutines.reactive.awaitSingle +import org.bson.Document +import org.bson.codecs.configuration.CodecRegistry +import org.bson.conversions.Bson + +/** + * The client-side representation of a MongoDB cluster operations. + * + * The originating [MongoClient] is responsible for the closing of resources. If the originator [MongoClient] is closed, + * then any operations will fail. + * + * @see MongoClient + * @since 5.2 + */ +public open class MongoCluster protected constructor(private val wrapped: JMongoCluster) { + + /** The codec registry. */ + public val codecRegistry: CodecRegistry + get() = wrapped.codecRegistry + + /** The read concern. */ + public val readConcern: ReadConcern + get() = wrapped.readConcern + + /** The read preference. */ + public val readPreference: ReadPreference + get() = wrapped.readPreference + + /** The write concern. */ + public val writeConcern: WriteConcern + get() = wrapped.writeConcern + + /** + * The time limit for the full execution of an operation. + * + * If not null the following deprecated options will be ignored: `waitQueueTimeoutMS`, `socketTimeoutMS`, + * `wTimeoutMS`, `maxTimeMS` and `maxCommitTimeMS`. + * - `null` means that the timeout mechanism for operations will defer to using: + * - `waitQueueTimeoutMS`: The maximum wait time in milliseconds that a thread may wait for a connection to + * become available + * - `socketTimeoutMS`: How long a send or receive on a socket can take before timing out. + * - `wTimeoutMS`: How long the server will wait for the write concern to be fulfilled before timing out. + * - `maxTimeMS`: The time limit for processing operations on a cursor. See: + * [cursor.maxTimeMS](https://docs.mongodb.com/manual/reference/method/cursor.maxTimeMS"). + * - `maxCommitTimeMS`: The maximum amount of time to allow a single `commitTransaction` command to execute. + * - `0` means infinite timeout. + * - `> 0` The time limit to use for the full execution of an operation. + * + * @return the optional timeout duration + */ + @Alpha(Reason.CLIENT) + public fun timeout(timeUnit: TimeUnit = TimeUnit.MILLISECONDS): Long? = wrapped.getTimeout(timeUnit) + + /** + * Create a new MongoCluster instance with a different codec registry. + * + * The [CodecRegistry] configured by this method is effectively treated by the driver as an instance of + * [org.bson.codecs.configuration.CodecProvider], which [CodecRegistry] extends. So there is no benefit to defining + * a class that implements [CodecRegistry]. Rather, an application should always create [CodecRegistry] instances + * using the factory methods in [org.bson.codecs.configuration.CodecRegistries]. + * + * @param newCodecRegistry the new [org.bson.codecs.configuration.CodecRegistry] for the database + * @return a new MongoCluster instance with the different codec registry + * @see org.bson.codecs.configuration.CodecRegistries + */ + public fun withCodecRegistry(newCodecRegistry: CodecRegistry): MongoCluster = + MongoCluster(wrapped.withCodecRegistry(newCodecRegistry)) + + /** + * Create a new MongoCluster instance with a different read preference. + * + * @param newReadPreference the new [ReadPreference] for the database + * @return a new MongoCluster instance with the different readPreference + */ + public fun withReadPreference(newReadPreference: ReadPreference): MongoCluster = + MongoCluster(wrapped.withReadPreference(newReadPreference)) + + /** + * Create a new MongoCluster instance with a different read concern. + * + * @param newReadConcern the new [ReadConcern] for the database + * @return a new MongoCluster instance with the different ReadConcern + * @see [Read Concern](https://www.mongodb.com/docs/manual/reference/readConcern/) + */ + public fun withReadConcern(newReadConcern: ReadConcern): MongoCluster = + MongoCluster(wrapped.withReadConcern(newReadConcern)) + + /** + * Create a new MongoCluster instance with a different write concern. + * + * @param newWriteConcern the new [WriteConcern] for the database + * @return a new MongoCluster instance with the different writeConcern + */ + public fun withWriteConcern(newWriteConcern: WriteConcern): MongoCluster = + MongoCluster(wrapped.withWriteConcern(newWriteConcern)) + + /** + * Create a new MongoCluster instance with the set time limit for the full execution of an operation. + * - `0` means an infinite timeout + * - `> 0` The time limit to use for the full execution of an operation. + * + * @param timeout the timeout, which must be greater than or equal to 0 + * @param timeUnit the time unit, defaults to Milliseconds + * @return a new MongoCluster instance with the set time limit for operations + * @see [MongoDatabase.timeout] + * @since 5.2 + */ + @Alpha(Reason.CLIENT) + public fun withTimeout(timeout: Long, timeUnit: TimeUnit = TimeUnit.MILLISECONDS): MongoCluster = + MongoCluster(wrapped.withTimeout(timeout, timeUnit)) + + /** + * Gets a [MongoDatabase] instance for the given database name. + * + * @param databaseName the name of the database to retrieve + * @return a `MongoDatabase` representing the specified database + * @throws IllegalArgumentException if databaseName is invalid + * @see com.mongodb.MongoNamespace.checkDatabaseNameValidity + */ + public fun getDatabase(databaseName: String): MongoDatabase = MongoDatabase(wrapped.getDatabase(databaseName)) + + /** + * Creates a client session. + * + * Note: A ClientSession instance can not be used concurrently in multiple operations. + * + * @param options the options for the client session + * @return the client session + */ + public suspend fun startSession( + options: ClientSessionOptions = ClientSessionOptions.builder().build() + ): ClientSession = ClientSession(wrapped.startSession(options).awaitSingle()) + + /** + * Get a list of the database names + * + * @return an iterable containing all the names of all the databases + * @see [List Databases](https://www.mongodb.com/docs/manual/reference/command/listDatabases) + */ + public fun listDatabaseNames(): Flow = wrapped.listDatabaseNames().asFlow() + + /** + * Gets the list of databases + * + * @param clientSession the client session with which to associate this operation + * @return the list databases iterable interface + * @see [List Databases](https://www.mongodb.com/docs/manual/reference/command/listDatabases) + */ + public fun listDatabaseNames(clientSession: ClientSession): Flow = + wrapped.listDatabaseNames(clientSession.wrapped).asFlow() + + /** + * Gets the list of databases + * + * @return the list databases iterable interface + */ + @JvmName("listDatabasesAsDocument") + public fun listDatabases(): ListDatabasesFlow = listDatabases() + + /** + * Gets the list of databases + * + * @param clientSession the client session with which to associate this operation + * @return the list databases iterable interface + */ + @JvmName("listDatabasesAsDocumentWithSession") + public fun listDatabases(clientSession: ClientSession): ListDatabasesFlow = + listDatabases(clientSession) + + /** + * Gets the list of databases + * + * @param T the type of the class to use + * @param resultClass the target document type of the iterable. + * @return the list databases iterable interface + */ + public fun listDatabases(resultClass: Class): ListDatabasesFlow = + ListDatabasesFlow(wrapped.listDatabases(resultClass)) + + /** + * Gets the list of databases + * + * @param T the type of the class to use + * @param clientSession the client session with which to associate this operation + * @param resultClass the target document type of the iterable. + * @return the list databases iterable interface + */ + public fun listDatabases(clientSession: ClientSession, resultClass: Class): ListDatabasesFlow = + ListDatabasesFlow(wrapped.listDatabases(clientSession.wrapped, resultClass)) + + /** + * Gets the list of databases + * + * @param T the type of the class to use + * @return the list databases iterable interface + */ + public inline fun listDatabases(): ListDatabasesFlow = listDatabases(T::class.java) + + /** + * Gets the list of databases + * + * @param clientSession the client session with which to associate this operation + * @param T the type of the class to use + * @return the list databases iterable interface + */ + public inline fun listDatabases(clientSession: ClientSession): ListDatabasesFlow = + listDatabases(clientSession, T::class.java) + + /** + * Creates a change stream for this client. + * + * @param pipeline the aggregation pipeline to apply to the change stream, defaults to an empty pipeline. + * @return the change stream iterable + * @see [Change Streams](https://dochub.mongodb.org/changestreams] + */ + @JvmName("watchAsDocument") + public fun watch(pipeline: List = emptyList()): ChangeStreamFlow = watch(pipeline) + + /** + * Creates a change stream for this client. + * + * @param clientSession the client session with which to associate this operation + * @param pipeline the aggregation pipeline to apply to the change stream, defaults to an empty pipeline. + * @return the change stream iterable + * @see [Change Streams](https://dochub.mongodb.org/changestreams] + */ + @JvmName("watchAsDocumentWithSession") + public fun watch(clientSession: ClientSession, pipeline: List = emptyList()): ChangeStreamFlow = + watch(clientSession, pipeline) + + /** + * Creates a change stream for this client. + * + * @param T the target document type of the iterable. + * @param pipeline the aggregation pipeline to apply to the change stream, defaults to an empty pipeline. + * @param resultClass the target document type of the iterable. + * @return the change stream iterable + * @see [Change Streams](https://dochub.mongodb.org/changestreams] + */ + public fun watch(pipeline: List = emptyList(), resultClass: Class): ChangeStreamFlow = + ChangeStreamFlow(wrapped.watch(pipeline, resultClass)) + + /** + * Creates a change stream for this client. + * + * @param T the target document type of the iterable. + * @param clientSession the client session with which to associate this operation + * @param pipeline the aggregation pipeline to apply to the change stream, defaults to an empty pipeline. + * @param resultClass the target document type of the iterable. + * @return the change stream iterable + * @see [Change Streams](https://dochub.mongodb.org/changestreams] + */ + public fun watch( + clientSession: ClientSession, + pipeline: List = emptyList(), + resultClass: Class + ): ChangeStreamFlow = ChangeStreamFlow(wrapped.watch(clientSession.wrapped, pipeline, resultClass)) + + /** + * Creates a change stream for this client. + * + * @param T the target document type of the iterable. + * @param pipeline the aggregation pipeline to apply to the change stream, defaults to an empty pipeline. + * @return the change stream iterable + * @see [Change Streams](https://dochub.mongodb.org/changestreams] + */ + public inline fun watch(pipeline: List = emptyList()): ChangeStreamFlow = + watch(pipeline, T::class.java) + + /** + * Creates a change stream for this client. + * + * @param T the target document type of the iterable. + * @param clientSession the client session with which to associate this operation + * @param pipeline the aggregation pipeline to apply to the change stream, defaults to an empty pipeline. + * @return the change stream iterable + * @see [Change Streams](https://dochub.mongodb.org/changestreams] + */ + public inline fun watch( + clientSession: ClientSession, + pipeline: List = emptyList() + ): ChangeStreamFlow = watch(clientSession, pipeline, T::class.java) + + /** + * Executes a client-level bulk write operation. This method is functionally equivalent to + * [bulkWrite(models, options)][bulkWrite] with the + * [default options][ClientBulkWriteOptions.clientBulkWriteOptions]. + * + * This operation supports [retryable writes][MongoClientSettings.getRetryWrites]. Depending on the number of + * `models`, encoded size of `models`, and the size limits in effect, executing this operation may require multiple + * `bulkWrite` commands. The eligibility for retries is determined per each `bulkWrite` command: + * [ClientNamespacedUpdateManyModel], [ClientNamespacedDeleteManyModel] in a command render it non-retryable. + * + * @param models The [individual write operations][ClientNamespacedWriteModel]. + * @return The [ClientBulkWriteResult] if the operation is successful. + * @throws ClientBulkWriteException If and only if the operation is unsuccessful or partially unsuccessful, and + * there is at least one of the following pieces of information to report: + * [ClientBulkWriteException.getWriteConcernErrors], [ClientBulkWriteException.getWriteErrors], + * [ClientBulkWriteException.getPartialResult]. + * @throws MongoException Only if the operation is unsuccessful. + * @see [BulkWrite command](https://www.mongodb.com/docs/manual/reference/command/bulkWrite/) + * @since 5.3 + */ + public suspend fun bulkWrite(models: List): ClientBulkWriteResult = + wrapped.bulkWrite(models).awaitSingle() + + /** + * Executes a client-level bulk write operation. + * + * This operation supports [retryable writes][MongoClientSettings.getRetryWrites]. Depending on the number of + * `models`, encoded size of `models`, and the size limits in effect, executing this operation may require multiple + * `bulkWrite` commands. The eligibility for retries is determined per each `bulkWrite` command: + * [ClientNamespacedUpdateManyModel], [ClientNamespacedDeleteManyModel] in a command render it non-retryable. + * + * @param models The [individual write operations][ClientNamespacedWriteModel]. + * @param options The [options][ClientBulkWriteOptions]. + * @return The [ClientBulkWriteResult] if the operation is successful. + * @throws ClientBulkWriteException If and only if the operation is unsuccessful or partially unsuccessful, and + * there is at least one of the following pieces of information to report: + * [ClientBulkWriteException.getWriteConcernErrors], [ClientBulkWriteException.getWriteErrors], + * [ClientBulkWriteException.getPartialResult]. + * @throws MongoException Only if the operation is unsuccessful. + * @see [BulkWrite command](https://www.mongodb.com/docs/manual/reference/command/bulkWrite/) + * @since 5.3 + */ + public suspend fun bulkWrite( + models: List, + options: ClientBulkWriteOptions + ): ClientBulkWriteResult = wrapped.bulkWrite(models, options).awaitSingle() + + /** + * Executes a client-level bulk write operation. This method is functionally equivalent to + * [bulkWrite(clientSession, models, options)][bulkWrite] with the + * [default options][ClientBulkWriteOptions.clientBulkWriteOptions]. + * + * This operation supports [retryable writes][MongoClientSettings.getRetryWrites]. Depending on the number of + * `models`, encoded size of `models`, and the size limits in effect, executing this operation may require multiple + * `bulkWrite` commands. The eligibility for retries is determined per each `bulkWrite` command: + * [ClientNamespacedUpdateManyModel], [ClientNamespacedDeleteManyModel] in a command render it non-retryable. + * + * @param clientSession The [client session][ClientSession] with which to associate this operation. + * @param models The [individual write operations][ClientNamespacedWriteModel]. + * @return The [ClientBulkWriteResult] if the operation is successful. + * @throws ClientBulkWriteException If and only if the operation is unsuccessful or partially unsuccessful, and + * there is at least one of the following pieces of information to report: + * [ClientBulkWriteException.getWriteConcernErrors], [ClientBulkWriteException.getWriteErrors], + * [ClientBulkWriteException.getPartialResult]. + * @throws MongoException Only if the operation is unsuccessful. + * @see [BulkWrite command](https://www.mongodb.com/docs/manual/reference/command/bulkWrite/) + * @since 5.3 + */ + public suspend fun bulkWrite( + clientSession: ClientSession, + models: List + ): ClientBulkWriteResult = wrapped.bulkWrite(clientSession.wrapped, models).awaitSingle() + + /** + * Executes a client-level bulk write operation. + * + * This operation supports [retryable writes][MongoClientSettings.getRetryWrites]. Depending on the number of + * `models`, encoded size of `models`, and the size limits in effect, executing this operation may require multiple + * `bulkWrite` commands. The eligibility for retries is determined per each `bulkWrite` command: + * [ClientNamespacedUpdateManyModel], [ClientNamespacedDeleteManyModel] in a command render it non-retryable. + * + * @param clientSession The [client session][ClientSession] with which to associate this operation. + * @param models The [individual write operations][ClientNamespacedWriteModel]. + * @param options The [options][ClientBulkWriteOptions]. + * @return The [ClientBulkWriteResult] if the operation is successful. + * @throws ClientBulkWriteException If and only if the operation is unsuccessful or partially unsuccessful, and + * there is at least one of the following pieces of information to report: + * [ClientBulkWriteException.getWriteConcernErrors], [ClientBulkWriteException.getWriteErrors], + * [ClientBulkWriteException.getPartialResult]. + * @throws MongoException Only if the operation is unsuccessful. + * @see [BulkWrite command](https://www.mongodb.com/docs/manual/reference/command/bulkWrite/) + * @since 5.3 + */ + public suspend fun bulkWrite( + clientSession: ClientSession, + models: List, + options: ClientBulkWriteOptions + ): ClientBulkWriteResult = wrapped.bulkWrite(clientSession.wrapped, models, options).awaitSingle() +} diff --git a/driver-kotlin-coroutine/src/main/kotlin/com/mongodb/kotlin/client/coroutine/MongoCollection.kt b/driver-kotlin-coroutine/src/main/kotlin/com/mongodb/kotlin/client/coroutine/MongoCollection.kt new file mode 100644 index 00000000000..5602b5ecd11 --- /dev/null +++ b/driver-kotlin-coroutine/src/main/kotlin/com/mongodb/kotlin/client/coroutine/MongoCollection.kt @@ -0,0 +1,1649 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.kotlin.client.coroutine + +import com.mongodb.MongoNamespace +import com.mongodb.ReadConcern +import com.mongodb.ReadPreference +import com.mongodb.WriteConcern +import com.mongodb.annotations.Alpha +import com.mongodb.annotations.Reason +import com.mongodb.bulk.BulkWriteResult +import com.mongodb.client.model.BulkWriteOptions +import com.mongodb.client.model.CountOptions +import com.mongodb.client.model.CreateIndexOptions +import com.mongodb.client.model.DeleteOptions +import com.mongodb.client.model.DropCollectionOptions +import com.mongodb.client.model.DropIndexOptions +import com.mongodb.client.model.EstimatedDocumentCountOptions +import com.mongodb.client.model.FindOneAndDeleteOptions +import com.mongodb.client.model.FindOneAndReplaceOptions +import com.mongodb.client.model.FindOneAndUpdateOptions +import com.mongodb.client.model.IndexModel +import com.mongodb.client.model.IndexOptions +import com.mongodb.client.model.InsertManyOptions +import com.mongodb.client.model.InsertOneOptions +import com.mongodb.client.model.RenameCollectionOptions +import com.mongodb.client.model.ReplaceOptions +import com.mongodb.client.model.SearchIndexModel +import com.mongodb.client.model.UpdateOptions +import com.mongodb.client.model.WriteModel +import com.mongodb.client.result.DeleteResult +import com.mongodb.client.result.InsertManyResult +import com.mongodb.client.result.InsertOneResult +import com.mongodb.client.result.UpdateResult +import com.mongodb.reactivestreams.client.MongoCollection as JMongoCollection +import java.util.concurrent.TimeUnit +import kotlinx.coroutines.flow.Flow +import kotlinx.coroutines.reactive.asFlow +import kotlinx.coroutines.reactive.awaitFirstOrNull +import kotlinx.coroutines.reactive.awaitSingle +import org.bson.BsonDocument +import org.bson.Document +import org.bson.codecs.configuration.CodecRegistry +import org.bson.conversions.Bson + +/** + * The MongoCollection representation. + * + * Note: Additions to this interface will not be considered to break binary compatibility. + * + * @param T The type that this collection will encode documents from and decode documents to. + */ +public class MongoCollection(private val wrapped: JMongoCollection) { + + /** The class of documents stored in this collection. */ + public val documentClass: Class + get() = wrapped.documentClass + + /** The namespace of this collection. */ + public val namespace: MongoNamespace + get() = wrapped.namespace + + /** The codec registry for the collection. */ + public val codecRegistry: CodecRegistry + get() = wrapped.codecRegistry + + /** the read preference for the collection. */ + public val readPreference: ReadPreference + get() = wrapped.readPreference + + /** The read concern for the collection. */ + public val readConcern: ReadConcern + get() = wrapped.readConcern + + /** The write concern for the collection. */ + public val writeConcern: WriteConcern + get() = wrapped.writeConcern + + /** + * The time limit for the full execution of an operation. + * + * If not null the following deprecated options will be ignored: `waitQueueTimeoutMS`, `socketTimeoutMS`, + * `wTimeoutMS`, `maxTimeMS` and `maxCommitTimeMS`. + * - `null` means that the timeout mechanism for operations will defer to using: + * - `waitQueueTimeoutMS`: The maximum wait time in milliseconds that a thread may wait for a connection to + * become available + * - `socketTimeoutMS`: How long a send or receive on a socket can take before timing out. + * - `wTimeoutMS`: How long the server will wait for the write concern to be fulfilled before timing out. + * - `maxTimeMS`: The time limit for processing operations on a cursor. See: + * [cursor.maxTimeMS](https://docs.mongodb.com/manual/reference/method/cursor.maxTimeMS"). + * - `maxCommitTimeMS`: The maximum amount of time to allow a single `commitTransaction` command to execute. + * - `0` means infinite timeout. + * - `> 0` The time limit to use for the full execution of an operation. + * + * @return the optional timeout duration + * @since 5.2 + */ + @Alpha(Reason.CLIENT) + public fun timeout(timeUnit: TimeUnit = TimeUnit.MILLISECONDS): Long? = wrapped.getTimeout(timeUnit) + + /** + * Create a new collection instance with a different default class to cast any documents returned from the database + * into. + * + * @param R the default class to cast any documents returned from the database into. + * @param resultClass the target document type for the collection. + * @return a new MongoCollection instance with the different default class + */ + public fun withDocumentClass(resultClass: Class): MongoCollection = + MongoCollection(wrapped.withDocumentClass(resultClass)) + + /** + * Create a new collection instance with a different default class to cast any documents returned from the database + * into. + * + * @param R the default class to cast any documents returned from the database into. + * @return a new MongoCollection instance with the different default class + */ + public inline fun withDocumentClass(): MongoCollection = withDocumentClass(R::class.java) + + /** + * Create a new collection instance with a different codec registry. + * + * The [CodecRegistry] configured by this method is effectively treated by the driver as an instance of + * [org.bson.codecs.configuration.CodecProvider], which [CodecRegistry] extends. So there is no benefit to defining + * a class that implements [CodecRegistry]. Rather, an application should always create [CodecRegistry] instances + * using the factory methods in [org.bson.codecs.configuration.CodecRegistries]. + * + * @param newCodecRegistry the new [org.bson.codecs.configuration.CodecRegistry] for the collection + * @return a new MongoCollection instance with the different codec registry + * @see org.bson.codecs.configuration.CodecRegistries + */ + public fun withCodecRegistry(newCodecRegistry: CodecRegistry): MongoCollection = + MongoCollection(wrapped.withCodecRegistry(newCodecRegistry)) + + /** + * Create a new collection instance with a different read preference. + * + * @param newReadPreference the new [com.mongodb.ReadPreference] for the collection + * @return a new MongoCollection instance with the different readPreference + */ + public fun withReadPreference(newReadPreference: ReadPreference): MongoCollection = + MongoCollection(wrapped.withReadPreference(newReadPreference)) + + /** + * Create a new collection instance with a different read concern. + * + * @param newReadConcern the new [ReadConcern] for the collection + * @return a new MongoCollection instance with the different ReadConcern + * @see [Read Concern](https://www.mongodb.com/docs/manual/reference/readConcern/) + */ + public fun withReadConcern(newReadConcern: ReadConcern): MongoCollection = + MongoCollection(wrapped.withReadConcern(newReadConcern)) + + /** + * Create a new collection instance with a different write concern. + * + * @param newWriteConcern the new [com.mongodb.WriteConcern] for the collection + * @return a new MongoCollection instance with the different writeConcern + */ + public fun withWriteConcern(newWriteConcern: WriteConcern): MongoCollection = + MongoCollection(wrapped.withWriteConcern(newWriteConcern)) + + /** + * Create a new MongoCollection instance with the set time limit for the full execution of an operation. + * - `0` means an infinite timeout + * - `> 0` The time limit to use for the full execution of an operation. + * + * @param timeout the timeout, which must be greater than or equal to 0 + * @param timeUnit the time unit, defaults to Milliseconds + * @return a new MongoCollection instance with the set time limit for operations + * @see [MongoCollection.timeout] + * @since 5.2 + */ + @Alpha(Reason.CLIENT) + public fun withTimeout(timeout: Long, timeUnit: TimeUnit = TimeUnit.MILLISECONDS): MongoCollection = + MongoCollection(wrapped.withTimeout(timeout, timeUnit)) + + /** + * Counts the number of documents in the collection. + * + * Note: For a fast count of the total documents in a collection see [estimatedDocumentCount]. When migrating from + * `count()` to `countDocuments()` the following query operators must be replaced: + * ``` + * +-------------+--------------------------------+ + * | Operator | Replacement | + * +=============+================================+ + * | $where | $expr | + * +-------------+--------------------------------+ + * | $near | $geoWithin with $center | + * +-------------+--------------------------------+ + * | $nearSphere | $geoWithin with $centerSphere | + * +-------------+--------------------------------+ + * ``` + * + * @return the number of documents in the collection + */ + public suspend fun countDocuments(filter: Bson = BsonDocument(), options: CountOptions = CountOptions()): Long = + wrapped.countDocuments(filter, options).awaitSingle() + + /** + * Counts the number of documents in the collection according to the given options. + * + * Note: For a fast count of the total documents in a collection see [estimatedDocumentCount]. When migrating from + * `count()` to `countDocuments()` the following query operators must be replaced: + * ``` + * +-------------+--------------------------------+ + * | Operator | Replacement | + * +=============+================================+ + * | $where | $expr | + * +-------------+--------------------------------+ + * | $near | $geoWithin with $center | + * +-------------+--------------------------------+ + * | $nearSphere | $geoWithin with $centerSphere | + * +-------------+--------------------------------+ + * ``` + * + * @param clientSession the client session with which to associate this operation + * @param filter the query filter + * @param options the options describing the count + * @return the number of documents in the collection + */ + public suspend fun countDocuments( + clientSession: ClientSession, + filter: Bson = BsonDocument(), + options: CountOptions = CountOptions() + ): Long = wrapped.countDocuments(clientSession.wrapped, filter, options).awaitSingle() + + /** + * Gets an estimate of the count of documents in a collection using collection metadata. + * + * Implementation note: this method is implemented using the MongoDB server's count command + * + * @param options the options describing the count + * @return the number of documents in the collection + * @see [Count behaviour](https://www.mongodb.com/docs/manual/reference/command/count/#behavior) + */ + public suspend fun estimatedDocumentCount( + options: EstimatedDocumentCountOptions = EstimatedDocumentCountOptions() + ): Long = wrapped.estimatedDocumentCount(options).awaitSingle() + + /** + * Gets the distinct values of the specified field name. + * + * @param R the target type of the iterable. + * @param fieldName the field name + * @param filter the query filter + * @param resultClass the target document type of the iterable. + * @return an iterable of distinct values + * @see [Distinct command](https://www.mongodb.com/docs/manual/reference/command/distinct/) + */ + public fun distinct( + fieldName: String, + filter: Bson = BsonDocument(), + resultClass: Class + ): DistinctFlow = DistinctFlow(wrapped.distinct(fieldName, filter, resultClass)) + + /** + * Gets the distinct values of the specified field name. + * + * @param R the target type of the iterable. + * @param clientSession the client session with which to associate this operation + * @param fieldName the field name + * @param filter the query filter + * @param resultClass the target document type of the iterable. + * @return an iterable of distinct values + * @see [Distinct command](https://www.mongodb.com/docs/manual/reference/command/distinct/) + */ + public fun distinct( + clientSession: ClientSession, + fieldName: String, + filter: Bson = BsonDocument(), + resultClass: Class + ): DistinctFlow = DistinctFlow(wrapped.distinct(clientSession.wrapped, fieldName, filter, resultClass)) + + /** + * Gets the distinct values of the specified field name. + * + * @param R the target type of the iterable. + * @param fieldName the field name + * @param filter the query filter + * @return an iterable of distinct values + * @see [Distinct command](https://www.mongodb.com/docs/manual/reference/command/distinct/) + */ + public inline fun distinct(fieldName: String, filter: Bson = BsonDocument()): DistinctFlow = + distinct(fieldName, filter, R::class.java) + + /** + * Gets the distinct values of the specified field name. + * + * @param R the target type of the iterable. + * @param clientSession the client session with which to associate this operation + * @param fieldName the field name + * @param filter the query filter + * @return an iterable of distinct values + * @see [Distinct command](https://www.mongodb.com/docs/manual/reference/command/distinct/) + */ + public inline fun distinct( + clientSession: ClientSession, + fieldName: String, + filter: Bson = BsonDocument() + ): DistinctFlow = distinct(clientSession, fieldName, filter, R::class.java) + + /** + * Finds all documents in the collection. + * + * @param filter the query filter + * @return the find iterable interface + * @see [Query Documents](https://www.mongodb.com/docs/manual/tutorial/query-documents/) + */ + @JvmName("findAsT") public fun find(filter: Bson = BsonDocument()): FindFlow = find(filter, documentClass) + + /** + * Finds all documents in the collection. + * + * @param clientSession the client session with which to associate this operation + * @param filter the query filter + * @return the find iterable interface + * @see [Query Documents](https://www.mongodb.com/docs/manual/tutorial/query-documents/) + */ + @JvmName("findAsTWithSession") + public fun find(clientSession: ClientSession, filter: Bson = BsonDocument()): FindFlow = + find(clientSession, filter, documentClass) + + /** + * Finds all documents in the collection. + * + * @param R the class to decode each document into + * @param filter the query filter + * @param resultClass the target document type of the iterable. + * @return the find iterable interface + * @see [Query Documents](https://www.mongodb.com/docs/manual/tutorial/query-documents/) + */ + public fun find(filter: Bson = BsonDocument(), resultClass: Class): FindFlow = + FindFlow(wrapped.find(filter, resultClass)) + + /** + * Finds all documents in the collection. + * + * @param R the class to decode each document into + * @param clientSession the client session with which to associate this operation + * @param filter the query filter + * @param resultClass the target document type of the iterable. + * @return the find iterable interface + * @see [Query Documents](https://www.mongodb.com/docs/manual/tutorial/query-documents/) + */ + public fun find( + clientSession: ClientSession, + filter: Bson = BsonDocument(), + resultClass: Class + ): FindFlow = FindFlow(wrapped.find(clientSession.wrapped, filter, resultClass)) + + /** + * Finds all documents in the collection. + * + * @param R the class to decode each document into + * @param filter the query filter + * @return the find iterable interface + * @see [Query Documents](https://www.mongodb.com/docs/manual/tutorial/query-documents/) + */ + public inline fun find(filter: Bson = BsonDocument()): FindFlow = find(filter, R::class.java) + + /** + * Finds all documents in the collection. + * + * @param R the class to decode each document into + * @param clientSession the client session with which to associate this operation + * @param filter the query filter + * @return the find iterable interface + * @see [Query Documents](https://www.mongodb.com/docs/manual/tutorial/query-documents/) + */ + public inline fun find(clientSession: ClientSession, filter: Bson = BsonDocument()): FindFlow = + find(clientSession, filter, R::class.java) + + /** + * Aggregates documents according to the specified aggregation pipeline. + * + * @param pipeline the aggregation pipeline + * @return an iterable containing the result of the aggregation operation + * @see [Aggregate Command](https://www.mongodb.com/docs/manual/reference/command/aggregate/#dbcmd.aggregate/) + */ + @JvmName("aggregateAsT") + public fun aggregate(pipeline: List): AggregateFlow = + AggregateFlow(wrapped.aggregate(pipeline, documentClass)) + + /** + * Aggregates documents according to the specified aggregation pipeline. + * + * @param clientSession the client session with which to associate this operation + * @param pipeline the aggregation pipeline + * @return an iterable containing the result of the aggregation operation + * @see [Aggregate Command](https://www.mongodb.com/docs/manual/reference/command/aggregate/#dbcmd.aggregate/) + */ + @JvmName("aggregateAsTWithSession") + public fun aggregate(clientSession: ClientSession, pipeline: List): AggregateFlow = + AggregateFlow(wrapped.aggregate(clientSession.wrapped, pipeline, documentClass)) + + /** + * Aggregates documents according to the specified aggregation pipeline. + * + * @param R the class to decode each document into + * @param pipeline the aggregation pipeline + * @param resultClass the target document type of the iterable. + * @return an iterable containing the result of the aggregation operation + * @see [Aggregate Command](https://www.mongodb.com/docs/manual/reference/command/aggregate/#dbcmd.aggregate/) + */ + public fun aggregate(pipeline: List, resultClass: Class): AggregateFlow = + AggregateFlow(wrapped.aggregate(pipeline, resultClass)) + + /** + * Aggregates documents according to the specified aggregation pipeline. + * + * @param R the class to decode each document into + * @param clientSession the client session with which to associate this operation + * @param pipeline the aggregation pipeline + * @param resultClass the target document type of the iterable. + * @return an iterable containing the result of the aggregation operation + * @see [Aggregate Command](https://www.mongodb.com/docs/manual/reference/command/aggregate/#dbcmd.aggregate/) + */ + public fun aggregate( + clientSession: ClientSession, + pipeline: List, + resultClass: Class + ): AggregateFlow = AggregateFlow(wrapped.aggregate(clientSession.wrapped, pipeline, resultClass)) + + /** + * Aggregates documents according to the specified aggregation pipeline. + * + * @param R the class to decode each document into + * @param pipeline the aggregation pipeline + * @return an iterable containing the result of the aggregation operation + * @see [Aggregate Command](https://www.mongodb.com/docs/manual/reference/command/aggregate/#dbcmd.aggregate/) + */ + public inline fun aggregate(pipeline: List): AggregateFlow = + aggregate(pipeline, R::class.java) + + /** + * Aggregates documents according to the specified aggregation pipeline. + * + * @param R the class to decode each document into + * @param clientSession the client session with which to associate this operation + * @param pipeline the aggregation pipeline + * @return an iterable containing the result of the aggregation operation + * @see [Aggregate Command](https://www.mongodb.com/docs/manual/reference/command/aggregate/#dbcmd.aggregate/) + */ + public inline fun aggregate( + clientSession: ClientSession, + pipeline: List + ): AggregateFlow = aggregate(clientSession, pipeline, R::class.java) + + /** + * Creates a change stream for this collection. + * + * @param pipeline the aggregation pipeline to apply to the change stream, defaults to an empty pipeline. + * @return the change stream iterable + * @see [Change Streams](https://dochub.mongodb.org/changestreams] + */ + @JvmName("watchAsDocument") + public fun watch(pipeline: List = emptyList()): ChangeStreamFlow = watch(pipeline, documentClass) + + /** + * Creates a change stream for this collection. + * + * @param clientSession the client session with which to associate this operation + * @param pipeline the aggregation pipeline to apply to the change stream, defaults to an empty pipeline. + * @return the change stream iterable + * @see [Change Streams](https://dochub.mongodb.org/changestreams] + */ + @JvmName("watchAsDocumentWithSession") + public fun watch(clientSession: ClientSession, pipeline: List = emptyList()): ChangeStreamFlow = + watch(clientSession, pipeline, documentClass) + + /** + * Creates a change stream for this collection. + * + * @param R the target document type of the iterable. + * @param pipeline the aggregation pipeline to apply to the change stream, defaults to an empty pipeline. + * @param resultClass the target document type of the iterable. + * @return the change stream iterable + * @see [Change Streams](https://dochub.mongodb.org/changestreams] + */ + public fun watch(pipeline: List = emptyList(), resultClass: Class): ChangeStreamFlow = + ChangeStreamFlow(wrapped.watch(pipeline, resultClass)) + + /** + * Creates a change stream for this collection. + * + * @param R the target document type of the iterable. + * @param clientSession the client session with which to associate this operation + * @param pipeline the aggregation pipeline to apply to the change stream, defaults to an empty pipeline. + * @param resultClass the target document type of the iterable. + * @return the change stream iterable + * @see [Change Streams](https://dochub.mongodb.org/changestreams] + */ + public fun watch( + clientSession: ClientSession, + pipeline: List = emptyList(), + resultClass: Class + ): ChangeStreamFlow = ChangeStreamFlow(wrapped.watch(clientSession.wrapped, pipeline, resultClass)) + + /** + * Creates a change stream for this collection. + * + * @param R the target document type of the iterable. + * @param pipeline the aggregation pipeline to apply to the change stream, defaults to an empty pipeline. + * @return the change stream iterable + * @see [Change Streams](https://dochub.mongodb.org/changestreams] + */ + public inline fun watch(pipeline: List = emptyList()): ChangeStreamFlow = + watch(pipeline, R::class.java) + + /** + * Creates a change stream for this collection. + * + * @param R the target document type of the iterable. + * @param clientSession the client session with which to associate this operation + * @param pipeline the aggregation pipeline to apply to the change stream, defaults to an empty pipeline. + * @return the change stream iterable + * @see [Change Streams](https://dochub.mongodb.org/changestreams] + */ + public inline fun watch( + clientSession: ClientSession, + pipeline: List = emptyList() + ): ChangeStreamFlow = watch(clientSession, pipeline, R::class.java) + + /** + * Aggregates documents according to the specified map-reduce function. + * + * @param mapFunction A JavaScript function that associates or "maps" a value with a key and emits the key and value + * pair. + * @param reduceFunction A JavaScript function that "reduces" to a single object all the values associated with a + * particular key. + * @return an iterable containing the result of the map-reduce operation + * @see [map-reduce](https://www.mongodb.com/docs/manual/reference/command/mapReduce/) + */ + @Suppress("DEPRECATION") + @Deprecated("Map Reduce has been deprecated. Use Aggregation instead", replaceWith = ReplaceWith("")) + @JvmName("mapReduceAsT") + public fun mapReduce(mapFunction: String, reduceFunction: String): MapReduceFlow = + mapReduce(mapFunction, reduceFunction, documentClass) + + /** + * Aggregates documents according to the specified map-reduce function. + * + * @param clientSession the client session with which to associate this operation + * @param mapFunction A JavaScript function that associates or "maps" a value with a key and emits the key and value + * pair. + * @param reduceFunction A JavaScript function that "reduces" to a single object all the values associated with a + * particular key. + * @return an iterable containing the result of the map-reduce operation + * @see [map-reduce](https://www.mongodb.com/docs/manual/reference/command/mapReduce/) + */ + @Suppress("DEPRECATION") + @Deprecated("Map Reduce has been deprecated. Use Aggregation instead", replaceWith = ReplaceWith("")) + @JvmName("mapReduceAsTWithSession") + public fun mapReduce(clientSession: ClientSession, mapFunction: String, reduceFunction: String): MapReduceFlow = + mapReduce(clientSession, mapFunction, reduceFunction, documentClass) + + /** + * Aggregates documents according to the specified map-reduce function. + * + * @param R the class to decode each resulting document into. + * @param mapFunction A JavaScript function that associates or "maps" a value with a key and emits the key and value + * pair. + * @param reduceFunction A JavaScript function that "reduces" to a single object all the values associated with a + * particular key. + * @param resultClass the target document type of the iterable. + * @return an iterable containing the result of the map-reduce operation + * @see [map-reduce](https://www.mongodb.com/docs/manual/reference/command/mapReduce/) + */ + @Suppress("DEPRECATION") + @Deprecated("Map Reduce has been deprecated. Use Aggregation instead", replaceWith = ReplaceWith("")) + public fun mapReduce( + mapFunction: String, + reduceFunction: String, + resultClass: Class + ): MapReduceFlow = MapReduceFlow(wrapped.mapReduce(mapFunction, reduceFunction, resultClass)) + + /** + * Aggregates documents according to the specified map-reduce function. + * + * @param R the class to decode each resulting document into. + * @param clientSession the client session with which to associate this operation + * @param mapFunction A JavaScript function that associates or "maps" a value with a key and emits the key and value + * pair. + * @param reduceFunction A JavaScript function that "reduces" to a single object all the values associated with a + * particular key. + * @param resultClass the target document type of the iterable. + * @return an iterable containing the result of the map-reduce operation + * @see [map-reduce](https://www.mongodb.com/docs/manual/reference/command/mapReduce/) + */ + @Suppress("DEPRECATION") + @Deprecated("Map Reduce has been deprecated. Use Aggregation instead", replaceWith = ReplaceWith("")) + public fun mapReduce( + clientSession: ClientSession, + mapFunction: String, + reduceFunction: String, + resultClass: Class + ): MapReduceFlow = + MapReduceFlow(wrapped.mapReduce(clientSession.wrapped, mapFunction, reduceFunction, resultClass)) + + /** + * Aggregates documents according to the specified map-reduce function. + * + * @param R the class to decode each resulting document into. + * @param mapFunction A JavaScript function that associates or "maps" a value with a key and emits the key and value + * pair. + * @param reduceFunction A JavaScript function that "reduces" to a single object all the values associated with a + * particular key. + * @return an iterable containing the result of the map-reduce operation + * @see [map-reduce](https://www.mongodb.com/docs/manual/reference/command/mapReduce/) + */ + @Suppress("DEPRECATION") + @Deprecated("Map Reduce has been deprecated. Use Aggregation instead", replaceWith = ReplaceWith("")) + public inline fun mapReduce(mapFunction: String, reduceFunction: String): MapReduceFlow = + mapReduce(mapFunction, reduceFunction, R::class.java) + + /** + * Aggregates documents according to the specified map-reduce function. + * + * @param R the class to decode each resulting document into. + * @param clientSession the client session with which to associate this operation + * @param mapFunction A JavaScript function that associates or "maps" a value with a key and emits the key and value + * pair. + * @param reduceFunction A JavaScript function that "reduces" to a single object all the values associated with a + * particular key. + * @return an iterable containing the result of the map-reduce operation + * @see [map-reduce](https://www.mongodb.com/docs/manual/reference/command/mapReduce/) + */ + @Suppress("DEPRECATION") + @Deprecated("Map Reduce has been deprecated. Use Aggregation instead", replaceWith = ReplaceWith("")) + public inline fun mapReduce( + clientSession: ClientSession, + mapFunction: String, + reduceFunction: String + ): MapReduceFlow = mapReduce(clientSession, mapFunction, reduceFunction, R::class.java) + + /** + * Inserts the provided document. If the document is missing an identifier, the driver should generate one. + * + * Note: Supports retryable writes on MongoDB server versions 3.6 or higher when the retryWrites setting is enabled. + * + * @param document the document to insert + * @param options the options to apply to the operation + * @return the insert one result + * @throws com.mongodb.MongoWriteException if the write failed due to some specific write exception + * @throws com.mongodb.MongoWriteConcernException if the write failed due to being unable to fulfil the write + * concern + * @throws com.mongodb.MongoCommandException if the write failed due to a specific command exception + * @throws com.mongodb.MongoException if the write failed due some other failure + */ + public suspend fun insertOne(document: T, options: InsertOneOptions = InsertOneOptions()): InsertOneResult = + wrapped.insertOne(document, options).awaitSingle() + + /** + * Inserts the provided document. If the document is missing an identifier, the driver should generate one. + * + * Note: Supports retryable writes on MongoDB server versions 3.6 or higher when the retryWrites setting is enabled. + * + * @param clientSession the client session with which to associate this operation + * @param document the document to insert + * @param options the options to apply to the operation + * @return the insert one result + * @throws com.mongodb.MongoWriteException if the write failed due to some specific write exception + * @throws com.mongodb.MongoWriteConcernException if the write failed due to being unable to fulfil the write + * concern + * @throws com.mongodb.MongoCommandException if the write failed due to a specific command exception + * @throws com.mongodb.MongoException if the write failed due some other failure + */ + public suspend fun insertOne( + clientSession: ClientSession, + document: T, + options: InsertOneOptions = InsertOneOptions() + ): InsertOneResult = wrapped.insertOne(clientSession.wrapped, document, options).awaitSingle() + + /** + * Inserts one or more documents. A call to this method is equivalent to a call to the `bulkWrite` method + * + * Note: Supports retryable writes on MongoDB server versions 3.6 or higher when the retryWrites setting is enabled. + * + * @param documents the documents to insert + * @param options the options to apply to the operation + * @return the insert many result + * @throws com.mongodb.MongoBulkWriteException if there's an exception in the bulk write operation + * @throws com.mongodb.MongoCommandException if the write failed due to a specific command exception + * @throws com.mongodb.MongoException if the write failed due some other failure + * @throws IllegalArgumentException if the documents list is null or empty, or any of the documents in the list are + * null + */ + public suspend fun insertMany( + documents: List, + options: InsertManyOptions = InsertManyOptions() + ): InsertManyResult = wrapped.insertMany(documents, options).awaitSingle() + + /** + * Inserts one or more documents. A call to this method is equivalent to a call to the `bulkWrite` method + * + * Note: Supports retryable writes on MongoDB server versions 3.6 or higher when the retryWrites setting is enabled. + * + * @param clientSession the client session with which to associate this operation + * @param documents the documents to insert + * @param options the options to apply to the operation + * @return the insert many result + * @throws com.mongodb.MongoBulkWriteException if there's an exception in the bulk write operation + * @throws com.mongodb.MongoCommandException if the write failed due to a specific command exception + * @throws com.mongodb.MongoException if the write failed due some other failure + * @throws IllegalArgumentException if the documents list is null or empty, or any of the documents in the list are + * null + */ + public suspend fun insertMany( + clientSession: ClientSession, + documents: List, + options: InsertManyOptions = InsertManyOptions() + ): InsertManyResult = wrapped.insertMany(clientSession.wrapped, documents, options).awaitSingle() + + /** + * Update a single document in the collection according to the specified arguments. + * + * Use this method to only update the corresponding fields in the document according to the update operators used in + * the update document. To replace the entire document with a new document, use the corresponding [replaceOne] + * method. + * + * Note: Supports retryable writes on MongoDB server versions 3.6 or higher when the retryWrites setting is enabled. + * + * @param filter a document describing the query filter, which may not be null. + * @param update a document describing the update, which may not be null. The update to apply must include at least + * one update operator. + * @param options the options to apply to the update operation + * @return the result of the update one operation + * @throws com.mongodb.MongoWriteException if the write failed due to some specific write exception + * @throws com.mongodb.MongoWriteConcernException if the write failed due to being unable to fulfil the write + * concern + * @throws com.mongodb.MongoCommandException if the write failed due to a specific command exception + * @throws com.mongodb.MongoException if the write failed due some other failure + * @see [Modify Documents](https://www.mongodb.com/docs/manual/tutorial/modify-documents/) + * @see [Update Operators](https://www.mongodb.com/docs/manual/reference/operator/update/) + * @see [Update Command Behaviors](https://www.mongodb.com/docs/manual/reference/command/update/) + * @see [replaceOne] + */ + public suspend fun updateOne(filter: Bson, update: Bson, options: UpdateOptions = UpdateOptions()): UpdateResult = + wrapped.updateOne(filter, update, options).awaitSingle() + + /** + * Update a single document in the collection according to the specified arguments. + * + * Use this method to only update the corresponding fields in the document according to the update operators used in + * the update document. To replace the entire document with a new document, use the corresponding [replaceOne] + * method. + * + * Note: Supports retryable writes on MongoDB server versions 3.6 or higher when the retryWrites setting is enabled. + * + * @param clientSession the client session with which to associate this operation + * @param filter a document describing the query filter, which may not be null. + * @param update a document describing the update, which may not be null. The update to apply must include at least + * one update operator. + * @param options the options to apply to the update operation + * @return the result of the update one operation + * @throws com.mongodb.MongoWriteException if the write failed due to some specific write exception + * @throws com.mongodb.MongoWriteConcernException if the write failed due to being unable to fulfil the write + * concern + * @throws com.mongodb.MongoCommandException if the write failed due to a specific command exception + * @throws com.mongodb.MongoException if the write failed due some other failure + * @see [Modify Documents](https://www.mongodb.com/docs/manual/tutorial/modify-documents/) + * @see [Update Operators](https://www.mongodb.com/docs/manual/reference/operator/update/) + * @see [Update Command](https://www.mongodb.com/docs/manual/reference/command/update/) + * @see com.mongodb.client.MongoCollection.replaceOne + */ + public suspend fun updateOne( + clientSession: ClientSession, + filter: Bson, + update: Bson, + options: UpdateOptions = UpdateOptions() + ): UpdateResult = wrapped.updateOne(clientSession.wrapped, filter, update, options).awaitSingle() + + /** + * Update a single document in the collection according to the specified arguments. + * + * Note: Supports retryable writes on MongoDB server versions 3.6 or higher when the retryWrites setting is enabled. + * + * @param filter a document describing the query filter, which may not be null. + * @param update a pipeline describing the update, which may not be null. + * @param options the options to apply to the update operation + * @return the result of the update one operation + * @throws com.mongodb.MongoWriteException if the write failed due some other failure specific to the update command + * @throws com.mongodb.MongoWriteConcernException if the write failed due being unable to fulfil the write concern + * @throws com.mongodb.MongoException if the write failed due some other failure + * @see [Modify Documents](https://www.mongodb.com/docs/manual/tutorial/modify-documents/) + * @see [Update Operators](https://www.mongodb.com/docs/manual/reference/operator/update/) + */ + public suspend fun updateOne( + filter: Bson, + update: List, + options: UpdateOptions = UpdateOptions() + ): UpdateResult = wrapped.updateOne(filter, update, options).awaitSingle() + + /** + * Update a single document in the collection according to the specified arguments. + * + * Note: Supports retryable writes on MongoDB server versions 3.6 or higher when the retryWrites setting is enabled. + * + * @param clientSession the client session with which to associate this operation + * @param filter a document describing the query filter, which may not be null. + * @param update a pipeline describing the update, which may not be null. + * @param options the options to apply to the update operation + * @return the result of the update one operation + * @throws com.mongodb.MongoWriteException if the write failed due some other failure specific to the update command + * @throws com.mongodb.MongoWriteConcernException if the write failed due being unable to fulfil the write concern + * @throws com.mongodb.MongoException if the write failed due some other failure + * @see [Modify Documents](https://www.mongodb.com/docs/manual/tutorial/modify-documents/) + * @see [Update Operators](https://www.mongodb.com/docs/manual/reference/operator/update/) + */ + public suspend fun updateOne( + clientSession: ClientSession, + filter: Bson, + update: List, + options: UpdateOptions = UpdateOptions() + ): UpdateResult = wrapped.updateOne(clientSession.wrapped, filter, update, options).awaitSingle() + + /** + * Update all documents in the collection according to the specified arguments. + * + * @param filter a document describing the query filter, which may not be null. + * @param update a document describing the update, which may not be null. The update to apply must include only + * update operators. + * @param options the options to apply to the update operation + * @return the result of the update many operation + * @throws com.mongodb.MongoWriteException if the write failed due to some specific write exception + * @throws com.mongodb.MongoWriteConcernException if the write failed due to being unable to fulfil the write + * concern + * @throws com.mongodb.MongoCommandException if the write failed due to a specific command exception + * @throws com.mongodb.MongoException if the write failed due some other failure + * @see [Modify Documents](https://www.mongodb.com/docs/manual/tutorial/modify-documents/) + * @see [Update Operators](https://www.mongodb.com/docs/manual/reference/operator/update/) + */ + public suspend fun updateMany(filter: Bson, update: Bson, options: UpdateOptions = UpdateOptions()): UpdateResult = + wrapped.updateMany(filter, update, options).awaitSingle() + + /** + * Update all documents in the collection according to the specified arguments. + * + * @param clientSession the client session with which to associate this operation + * @param filter a document describing the query filter, which may not be null. + * @param update a document describing the update, which may not be null. The update to apply must include only + * update operators. + * @param options the options to apply to the update operation + * @return the result of the update many operation + * @throws com.mongodb.MongoWriteException if the write failed due to some specific write exception + * @throws com.mongodb.MongoWriteConcernException if the write failed due to being unable to fulfil the write + * concern + * @throws com.mongodb.MongoCommandException if the write failed due to a specific command exception + * @throws com.mongodb.MongoException if the write failed due some other failure + * @see [Modify Documents](https://www.mongodb.com/docs/manual/tutorial/modify-documents/) + * @see [Update Operators](https://www.mongodb.com/docs/manual/reference/operator/update/) + */ + public suspend fun updateMany( + clientSession: ClientSession, + filter: Bson, + update: Bson, + options: UpdateOptions = UpdateOptions() + ): UpdateResult = wrapped.updateMany(clientSession.wrapped, filter, update, options).awaitSingle() + + /** + * Update all documents in the collection according to the specified arguments. + * + * @param filter a document describing the query filter, which may not be null. + * @param update a pipeline describing the update, which may not be null. + * @param options the options to apply to the update operation + * @return the result of the update many operation + * @throws com.mongodb.MongoWriteException if the write failed due some other failure specific to the update command + * @throws com.mongodb.MongoWriteConcernException if the write failed due being unable to fulfil the write concern + * @throws com.mongodb.MongoException if the write failed due some other failure + * @see [Modify Documents](https://www.mongodb.com/docs/manual/tutorial/modify-documents/) + * @see [Update Operators](https://www.mongodb.com/docs/manual/reference/operator/update/) + */ + public suspend fun updateMany( + filter: Bson, + update: List, + options: UpdateOptions = UpdateOptions() + ): UpdateResult = wrapped.updateMany(filter, update, options).awaitSingle() + + /** + * Update all documents in the collection according to the specified arguments. + * + * @param clientSession the client session with which to associate this operation + * @param filter a document describing the query filter, which may not be null. + * @param update a pipeline describing the update, which may not be null. + * @param options the options to apply to the update operation + * @return the result of the update many operation + * @throws com.mongodb.MongoWriteException if the write failed due some other failure specific to the update command + * @throws com.mongodb.MongoWriteConcernException if the write failed due being unable to fulfil the write concern + * @throws com.mongodb.MongoException if the write failed due some other failure + * @see [Modify Documents](https://www.mongodb.com/docs/manual/tutorial/modify-documents/) + * @see [Update Operators](https://www.mongodb.com/docs/manual/reference/operator/update/) + */ + public suspend fun updateMany( + clientSession: ClientSession, + filter: Bson, + update: List, + options: UpdateOptions = UpdateOptions() + ): UpdateResult = wrapped.updateMany(clientSession.wrapped, filter, update, options).awaitSingle() + + /** + * Replace a document in the collection according to the specified arguments. + * + * Use this method to replace a document using the specified replacement argument. To update the document with + * update operators, use the corresponding [updateOne] method. + * + * Note: Supports retryable writes on MongoDB server versions 3.6 or higher when the retryWrites setting is enabled. + * + * @param filter the query filter to apply the replace operation + * @param replacement the replacement document + * @param options the options to apply to the replace operation + * @return the result of the replace one operation + * @throws com.mongodb.MongoWriteException if the write failed due to some specific write exception + * @throws com.mongodb.MongoWriteConcernException if the write failed due to being unable to fulfil the write + * concern + * @throws com.mongodb.MongoCommandException if the write failed due to a specific command exception + * @throws com.mongodb.MongoException if the write failed due some other failure + * @see [Modify Documents](https://www.mongodb.com/docs/manual/tutorial/modify-documents/#replace-the-document/) + * @see [Update Command Behaviors](https://www.mongodb.com/docs/manual/reference/command/update/) + * @since 3.6 + */ + public suspend fun replaceOne( + filter: Bson, + replacement: T, + options: ReplaceOptions = ReplaceOptions() + ): UpdateResult = wrapped.replaceOne(filter, replacement, options).awaitSingle() + + /** + * Replace a document in the collection according to the specified arguments. + * + * Use this method to replace a document using the specified replacement argument. To update the document with + * update operators, use the corresponding [updateOne] method. + * + * Note: Supports retryable writes on MongoDB server versions 3.6 or higher when the retryWrites setting is enabled. + * + * @param clientSession the client session with which to associate this operation + * @param filter the query filter to apply the replace operation + * @param replacement the replacement document + * @param options the options to apply to the replace operation + * @return the result of the replace one operation + * @throws com.mongodb.MongoWriteException if the write failed due to some specific write exception + * @throws com.mongodb.MongoWriteConcernException if the write failed due to being unable to fulfil the write + * concern + * @throws com.mongodb.MongoCommandException if the write failed due to a specific command exception + * @throws com.mongodb.MongoException if the write failed due some other failure + * @see [Modify Documents](https://www.mongodb.com/docs/manual/tutorial/modify-documents/#replace-the-document/) + * @see [Update Command Behaviors](https://www.mongodb.com/docs/manual/reference/command/update/) + * @since 3.6 + */ + public suspend fun replaceOne( + clientSession: ClientSession, + filter: Bson, + replacement: T, + options: ReplaceOptions = ReplaceOptions() + ): UpdateResult = wrapped.replaceOne(clientSession.wrapped, filter, replacement, options).awaitSingle() + + /** + * Removes at most one document from the collection that matches the given filter. + * + * If no documents match, the collection is not modified. + * + * Note: Supports retryable writes on MongoDB server versions 3.6 or higher when the retryWrites setting is enabled. + * + * @param filter the query filter to apply the delete operation + * @param options the options to apply to the delete operation + * @return the result of the remove one operation + * @throws com.mongodb.MongoWriteException if the write failed due to some specific write exception + * @throws com.mongodb.MongoWriteConcernException if the write failed due to being unable to fulfil the write + * concern + * @throws com.mongodb.MongoCommandException if the write failed due to a specific command exception + * @throws com.mongodb.MongoException if the write failed due some other failure + */ + public suspend fun deleteOne(filter: Bson, options: DeleteOptions = DeleteOptions()): DeleteResult = + wrapped.deleteOne(filter, options).awaitSingle() + + /** + * Removes at most one document from the collection that matches the given filter. + * + * If no documents match, the collection is not modified. + * + * Note: Supports retryable writes on MongoDB server versions 3.6 or higher when the retryWrites setting is enabled. + * + * @param clientSession the client session with which to associate this operation + * @param filter the query filter to apply the delete operation + * @param options the options to apply to the delete operation + * @return the result of the remove one operation + * @throws com.mongodb.MongoWriteException if the write failed due to some specific write exception + * @throws com.mongodb.MongoWriteConcernException if the write failed due to being unable to fulfil the write + * concern + * @throws com.mongodb.MongoCommandException if the write failed due to a specific command exception + * @throws com.mongodb.MongoException if the write failed due some other failure + */ + public suspend fun deleteOne( + clientSession: ClientSession, + filter: Bson, + options: DeleteOptions = DeleteOptions() + ): DeleteResult = wrapped.deleteOne(clientSession.wrapped, filter, options).awaitSingle() + + /** + * Removes all documents from the collection that match the given query filter. + * + * If no documents match, the collection is not modified. + * + * @param filter the query filter to apply the delete operation + * @param options the options to apply to the delete operation + * @return the result of the remove many operation + * @throws com.mongodb.MongoWriteException if the write failed due to some specific write exception + * @throws com.mongodb.MongoWriteConcernException if the write failed due to being unable to fulfil the write + * concern + * @throws com.mongodb.MongoCommandException if the write failed due to a specific command exception + * @throws com.mongodb.MongoException if the write failed due some other failure + */ + public suspend fun deleteMany(filter: Bson, options: DeleteOptions = DeleteOptions()): DeleteResult = + wrapped.deleteMany(filter, options).awaitSingle() + + /** + * Removes all documents from the collection that match the given query filter. + * + * If no documents match, the collection is not modified. + * + * @param clientSession the client session with which to associate this operation + * @param filter the query filter to apply the delete operation + * @param options the options to apply to the delete operation + * @return the result of the remove many operation + * @throws com.mongodb.MongoWriteException if the write failed due to some specific write exception + * @throws com.mongodb.MongoWriteConcernException if the write failed due to being unable to fulfil the write + * concern + * @throws com.mongodb.MongoCommandException if the write failed due to a specific command exception + * @throws com.mongodb.MongoException if the write failed due some other failure + */ + public suspend fun deleteMany( + clientSession: ClientSession, + filter: Bson, + options: DeleteOptions = DeleteOptions() + ): DeleteResult = wrapped.deleteMany(clientSession.wrapped, filter, options).awaitSingle() + + /** + * Executes a mix of inserts, updates, replaces, and deletes. + * + * Note: Supports retryable writes on MongoDB server versions 3.6 or higher when the retryWrites setting is enabled. + * The eligibility for retryable write support for bulk operations is determined on the whole bulk write. If the + * `requests` contain any `UpdateManyModels` or `DeleteManyModels` then the bulk operation will not support + * retryable writes. + * + * @param requests the writes to execute + * @param options the options to apply to the bulk write operation + * @return the result of the bulk write + * @throws com.mongodb.MongoBulkWriteException if there's an exception in the bulk write operation + * @throws com.mongodb.MongoException if there's an exception running the operation + */ + public suspend fun bulkWrite( + requests: List>, + options: BulkWriteOptions = BulkWriteOptions() + ): BulkWriteResult = wrapped.bulkWrite(requests, options).awaitSingle() + + /** + * Executes a mix of inserts, updates, replaces, and deletes. + * + * Note: Supports retryable writes on MongoDB server versions 3.6 or higher when the retryWrites setting is enabled. + * The eligibility for retryable write support for bulk operations is determined on the whole bulk write. If the + * `requests` contain any `UpdateManyModels` or `DeleteManyModels` then the bulk operation will not support + * retryable writes. + * + * @param clientSession the client session with which to associate this operation + * @param requests the writes to execute + * @param options the options to apply to the bulk write operation + * @return the result of the bulk write + * @throws com.mongodb.MongoBulkWriteException if there's an exception in the bulk write operation + * @throws com.mongodb.MongoException if there's an exception running the operation + */ + public suspend fun bulkWrite( + clientSession: ClientSession, + requests: List>, + options: BulkWriteOptions = BulkWriteOptions() + ): BulkWriteResult = wrapped.bulkWrite(clientSession.wrapped, requests, options).awaitSingle() + + /** + * Atomically find a document and remove it. + * + * Note: Supports retryable writes on MongoDB server versions 3.6 or higher when the retryWrites setting is enabled. + * + * @param filter the query filter to find the document with + * @param options the options to apply to the operation + * @return the document that was removed. If no documents matched the query filter, then null will be returned + */ + public suspend fun findOneAndDelete( + filter: Bson, + options: FindOneAndDeleteOptions = FindOneAndDeleteOptions() + ): T? = wrapped.findOneAndDelete(filter, options).awaitFirstOrNull() + + /** + * Atomically find a document and remove it. + * + * Note: Supports retryable writes on MongoDB server versions 3.6 or higher when the retryWrites setting is enabled. + * + * @param clientSession the client session with which to associate this operation + * @param filter the query filter to find the document with + * @param options the options to apply to the operation + * @return the document that was removed. If no documents matched the query filter, then null will be returned + */ + public suspend fun findOneAndDelete( + clientSession: ClientSession, + filter: Bson, + options: FindOneAndDeleteOptions = FindOneAndDeleteOptions() + ): T? = wrapped.findOneAndDelete(clientSession.wrapped, filter, options).awaitFirstOrNull() + + /** + * Atomically find a document and update it. + * + * Use this method to only update the corresponding fields in the document according to the update operators used in + * the update document. To replace the entire document with a new document, use the corresponding + * [findOneAndReplace] method. + * + * Note: Supports retryable writes on MongoDB server versions 3.6 or higher when the retryWrites setting is enabled. + * + * @param filter a document describing the query filter, which may not be null. + * @param update a document describing the update, which may not be null. The update to apply must include at least + * one update operator. + * @param options the options to apply to the operation + * @return the document that was updated. Depending on the value of the `returnOriginal` property, this will either + * be the document as it was before the update or as it is after the update. If no documents matched the query + * filter, then null will be returned + * @see [Update Command Behaviors](https://www.mongodb.com/docs/manual/reference/command/update/) + * @see com.mongodb.client.MongoCollection.findOneAndReplace + */ + public suspend fun findOneAndUpdate( + filter: Bson, + update: Bson, + options: FindOneAndUpdateOptions = FindOneAndUpdateOptions() + ): T? = wrapped.findOneAndUpdate(filter, update, options).awaitFirstOrNull() + + /** + * Atomically find a document and update it. + * + * Use this method to only update the corresponding fields in the document according to the update operators used in + * the update document. To replace the entire document with a new document, use the corresponding + * [findOneAndReplace] method. + * + * Note: Supports retryable writes on MongoDB server versions 3.6 or higher when the retryWrites setting is enabled. + * + * @param clientSession the client session with which to associate this operation + * @param filter a document describing the query filter, which may not be null. + * @param update a document describing the update, which may not be null. The update to apply must include at least + * one update operator. + * @param options the options to apply to the operation + * @return the document that was updated. Depending on the value of the `returnOriginal` property, this will either + * be the document as it was before the update or as it is after the update. If no documents matched the query + * filter, then null will be returned + * @see [Update Command Behaviors](https://www.mongodb.com/docs/manual/reference/command/update/) + * @see com.mongodb.client.MongoCollection.findOneAndReplace + */ + public suspend fun findOneAndUpdate( + clientSession: ClientSession, + filter: Bson, + update: Bson, + options: FindOneAndUpdateOptions = FindOneAndUpdateOptions() + ): T? = wrapped.findOneAndUpdate(clientSession.wrapped, filter, update, options).awaitFirstOrNull() + + /** + * Atomically find a document and update it. + * + * Note: Supports retryable writes on MongoDB server versions 3.6 or higher when the retryWrites setting is enabled. + * + * @param filter a document describing the query filter, which may not be null. + * @param update a pipeline describing the update, which may not be null. + * @param options the options to apply to the operation + * @return the document that was updated. Depending on the value of the `returnOriginal` property, this will either + * be the document as it was before the update or as it is after the update. If no documents matched the query + * filter, then null will be returned + */ + public suspend fun findOneAndUpdate( + filter: Bson, + update: List, + options: FindOneAndUpdateOptions = FindOneAndUpdateOptions() + ): T? = wrapped.findOneAndUpdate(filter, update, options).awaitFirstOrNull() + + /** + * Atomically find a document and update it. + * + * Note: Supports retryable writes on MongoDB server versions 3.6 or higher when the retryWrites setting is enabled. + * + * @param clientSession the client session with which to associate this operation + * @param filter a document describing the query filter, which may not be null. + * @param update a pipeline describing the update, which may not be null. + * @param options the options to apply to the operation + * @return the document that was updated. Depending on the value of the `returnOriginal` property, this will either + * be the document as it was before the update or as it is after the update. If no documents matched the query + * filter, then null will be returned + */ + public suspend fun findOneAndUpdate( + clientSession: ClientSession, + filter: Bson, + update: List, + options: FindOneAndUpdateOptions = FindOneAndUpdateOptions() + ): T? = wrapped.findOneAndUpdate(clientSession.wrapped, filter, update, options).awaitFirstOrNull() + + /** + * Atomically find a document and replace it. + * + * Use this method to replace a document using the specified replacement argument. To update the document with + * update operators, use the corresponding [findOneAndUpdate] method. + * + * Note: Supports retryable writes on MongoDB server versions 3.6 or higher when the retryWrites setting is enabled. + * + * @param filter the query filter to apply the replace operation + * @param replacement the replacement document + * @param options the options to apply to the operation + * @return the document that was replaced. Depending on the value of the `returnOriginal` property, this will either + * be the document as it was before the update or as it is after the update. If no documents matched the query + * filter, then null will be returned + * @see [Update Command Behaviors](https://www.mongodb.com/docs/manual/reference/command/update/) + */ + public suspend fun findOneAndReplace( + filter: Bson, + replacement: T, + options: FindOneAndReplaceOptions = FindOneAndReplaceOptions() + ): T? = wrapped.findOneAndReplace(filter, replacement, options).awaitFirstOrNull() + + /** + * Atomically find a document and replace it. + * + * Use this method to replace a document using the specified replacement argument. To update the document with + * update operators, use the corresponding [findOneAndUpdate] method. + * + * Note: Supports retryable writes on MongoDB server versions 3.6 or higher when the retryWrites setting is enabled. + * + * @param clientSession the client session with which to associate this operation + * @param filter the query filter to apply the replace operation + * @param replacement the replacement document + * @param options the options to apply to the operation + * @return the document that was replaced. Depending on the value of the `returnOriginal` property, this will either + * be the document as it was before the update or as it is after the update. If no documents matched the query + * filter, then null will be returned + * @see [Update Command Behaviors](https://www.mongodb.com/docs/manual/reference/command/update/) + */ + public suspend fun findOneAndReplace( + clientSession: ClientSession, + filter: Bson, + replacement: T, + options: FindOneAndReplaceOptions = FindOneAndReplaceOptions() + ): T? = wrapped.findOneAndReplace(clientSession.wrapped, filter, replacement, options).awaitFirstOrNull() + + /** + * Drops this collection from the Database. + * + * @param options various options for dropping the collection + * @see [Drop Collection](https://www.mongodb.com/docs/manual/reference/command/drop/) + */ + public suspend fun drop(options: DropCollectionOptions = DropCollectionOptions()) { + wrapped.drop(options).awaitFirstOrNull() + } + /** + * Drops this collection from the Database. + * + * @param clientSession the client session with which to associate this operation + * @param options various options for dropping the collection + * @see [Drop Collection](https://www.mongodb.com/docs/manual/reference/command/drop/) + */ + public suspend fun drop(clientSession: ClientSession, options: DropCollectionOptions = DropCollectionOptions()) { + wrapped.drop(clientSession.wrapped, options).awaitFirstOrNull() + } + + /** + * Create an Atlas Search index for the collection. + * + * @param indexName the name of the search index to create. + * @param definition the search index mapping definition. + * @return the search index name. + * @see [Create search indexes](https://www.mongodb.com/docs/manual/reference/command/createSearchIndexes/) + */ + public suspend fun createSearchIndex(indexName: String, definition: Bson): String = + wrapped.createSearchIndex(indexName, definition).awaitSingle() + + /** + * Create an Atlas Search index with `default` name for the collection. + * + * @param definition the search index mapping definition. + * @return the search index name. + * @see [Create search indexes](https://www.mongodb.com/docs/manual/reference/command/createSearchIndexes/) + */ + public suspend fun createSearchIndex(definition: Bson): String = wrapped.createSearchIndex(definition).awaitSingle() + + /** + * Create one or more Atlas Search indexes for the collection. + * + *

+ * The name can be omitted for a single index, in which case a name will `default`.

+ * + * @param searchIndexModels the search index models. + * @return the search index names in the order specified by the given list of [SearchIndexModel]s. + * @see [Create search indexes](https://www.mongodb.com/docs/manual/reference/command/createSearchIndexes/) + */ + public fun createSearchIndexes(searchIndexModels: List): Flow = + wrapped.createSearchIndexes(searchIndexModels).asFlow() + + /** + * Update an Atlas Search index in the collection. + * + * @param indexName the name of the search index to update. + * @param definition the search index mapping definition. + * @see [Update search index](https://www.mongodb.com/docs/manual/reference/command/updateSearchIndex/) + */ + public suspend fun updateSearchIndex(indexName: String, definition: Bson) { + wrapped.updateSearchIndex(indexName, definition).awaitSingle() + } + + /** + * Drop an Atlas Search index given its name. + * + * @param indexName the name of the search index to drop. + * @see [Drop search index](https://www.mongodb.com/docs/manual/reference/command/dropSearchIndex/) + */ + public suspend fun dropSearchIndex(indexName: String) { + wrapped.dropSearchIndex(indexName).awaitSingle() + } + + /** + * Get all the Atlas Search indexes in this collection. + * + * @return the list search indexes iterable interface. + * @see [List search indexes](https://www.mongodb.com/docs/manual/reference/operator/aggregation/listSearchIndexes) + */ + @JvmName("listSearchIndexesAsDocument") + public fun listSearchIndexes(): ListSearchIndexesFlow = listSearchIndexes() + + /** + * Get all the Atlas Search indexes in this collection. + * + * @param R the class to decode each document into. + * @param resultClass the target document type of the iterable. + * @return the list search indexes iterable interface. + * @see [List search indexes](https://www.mongodb.com/docs/manual/reference/operator/aggregation/listSearchIndexes) + */ + public fun listSearchIndexes(resultClass: Class): ListSearchIndexesFlow = + ListSearchIndexesFlow(wrapped.listSearchIndexes(resultClass)) + + /** + * Get all the Atlas Search indexes in this collection. + * + * @param R the class to decode each document into. + * @return the list search indexes iterable interface. + * @see [List search indexes]](https://www.mongodb.com/docs/manual/reference/operator/aggregation/listSearchIndexes) + */ + public inline fun listSearchIndexes(): ListSearchIndexesFlow = listSearchIndexes(R::class.java) + + /** + * Create an index with the given keys and options. + * + * @param keys an object describing the index key(s), which may not be null. + * @param options the options for the index + * @return the index name + * @see [Create indexes](https://www.mongodb.com/docs/manual/reference/command/createIndexes/) + */ + public suspend fun createIndex(keys: Bson, options: IndexOptions = IndexOptions()): String = + wrapped.createIndex(keys, options).awaitSingle() + + /** + * Create an index with the given keys and options. + * + * @param clientSession the client session with which to associate this operation + * @param keys an object describing the index key(s), which may not be null. + * @param options the options for the index + * @return the index name + * @see [Create indexes](https://www.mongodb.com/docs/manual/reference/command/createIndexes/) + */ + public suspend fun createIndex( + clientSession: ClientSession, + keys: Bson, + options: IndexOptions = IndexOptions() + ): String = wrapped.createIndex(clientSession.wrapped, keys, options).awaitSingle() + + /** + * Create multiple indexes. + * + * @param indexes the list of indexes + * @param options options to use when creating indexes + * @return the list of index names + * @see [Create indexes](https://www.mongodb.com/docs/manual/reference/command/createIndexes/) + */ + public fun createIndexes( + indexes: List, + options: CreateIndexOptions = CreateIndexOptions() + ): Flow = wrapped.createIndexes(indexes, options).asFlow() + + /** + * Create multiple indexes. + * + * @param clientSession the client session with which to associate this operation + * @param indexes the list of indexes + * @param options: options to use when creating indexes + * @return the list of index names + * @see [Create indexes](https://www.mongodb.com/docs/manual/reference/command/createIndexes/) + */ + public fun createIndexes( + clientSession: ClientSession, + indexes: List, + options: CreateIndexOptions = CreateIndexOptions() + ): Flow = wrapped.createIndexes(clientSession.wrapped, indexes, options).asFlow() + + /** + * Get all the indexes in this collection. + * + * @return the list indexes iterable interface + * @see [List indexes](https://www.mongodb.com/docs/manual/reference/command/listIndexes/) + */ + @JvmName("listIndexesAsDocument") public fun listIndexes(): ListIndexesFlow = listIndexes() + + /** + * Get all the indexes in this collection. + * + * @param clientSession the client session with which to associate this operation + * @return the list indexes iterable interface + * @see [List indexes](https://www.mongodb.com/docs/manual/reference/command/listIndexes/) + */ + @JvmName("listIndexesAsDocumentWithSession") + public fun listIndexes(clientSession: ClientSession): ListIndexesFlow = + listIndexes(clientSession) + + /** + * Get all the indexes in this collection. + * + * @param R the class to decode each document into + * @param resultClass the target document type of the iterable. + * @return the list indexes iterable interface + * @see [List indexes](https://www.mongodb.com/docs/manual/reference/command/listIndexes/) + */ + public fun listIndexes(resultClass: Class): ListIndexesFlow = + ListIndexesFlow(wrapped.listIndexes(resultClass)) + + /** + * Get all the indexes in this collection. + * + * @param R the class to decode each document into + * @param clientSession the client session with which to associate this operation + * @param resultClass the target document type of the iterable. + * @return the list indexes iterable interface + * @see [List indexes](https://www.mongodb.com/docs/manual/reference/command/listIndexes/) + */ + public fun listIndexes(clientSession: ClientSession, resultClass: Class): ListIndexesFlow = + ListIndexesFlow(wrapped.listIndexes(clientSession.wrapped, resultClass)) + + /** + * Get all the indexes in this collection. + * + * @param R the class to decode each document into + * @return the list indexes iterable interface + * @see [List indexes](https://www.mongodb.com/docs/manual/reference/command/listIndexes/) + */ + public inline fun listIndexes(): ListIndexesFlow = listIndexes(R::class.java) + + /** + * Get all the indexes in this collection. + * + * @param R the class to decode each document into + * @param clientSession the client session with which to associate this operation + * @return the list indexes iterable interface + * @see [List indexes](https://www.mongodb.com/docs/manual/reference/command/listIndexes/) + */ + public inline fun listIndexes(clientSession: ClientSession): ListIndexesFlow = + listIndexes(clientSession, R::class.java) + + /** + * Drops the index given its name. + * + * @param indexName the name of the index to remove + * @param options the options to use when dropping indexes + * @see [Drop indexes](https://www.mongodb.com/docs/manual/reference/command/dropIndexes/) + */ + public suspend fun dropIndex(indexName: String, options: DropIndexOptions = DropIndexOptions()) { + wrapped.dropIndex(indexName, options).awaitFirstOrNull() + } + + /** + * Drops the index given the keys used to create it. + * + * @param keys the keys of the index to remove + * @param options the options to use when dropping indexes + * @see [Drop indexes](https://www.mongodb.com/docs/manual/reference/command/dropIndexes/) + */ + public suspend fun dropIndex(keys: Bson, options: DropIndexOptions = DropIndexOptions()) { + wrapped.dropIndex(keys, options).awaitFirstOrNull() + } + + /** + * Drops the index given its name. + * + * @param clientSession the client session with which to associate this operation + * @param indexName the name of the index to remove + * @param options the options to use when dropping indexes + * @see [Drop indexes](https://www.mongodb.com/docs/manual/reference/command/dropIndexes/) + */ + public suspend fun dropIndex( + clientSession: ClientSession, + indexName: String, + options: DropIndexOptions = DropIndexOptions() + ) { + wrapped.dropIndex(clientSession.wrapped, indexName, options).awaitFirstOrNull() + } + + /** + * Drops the index given the keys used to create it. + * + * @param clientSession the client session with which to associate this operation + * @param keys the keys of the index to remove + * @param options the options to use when dropping indexes + * @see [Drop indexes](https://www.mongodb.com/docs/manual/reference/command/dropIndexes/) + */ + public suspend fun dropIndex( + clientSession: ClientSession, + keys: Bson, + options: DropIndexOptions = DropIndexOptions() + ) { + wrapped.dropIndex(clientSession.wrapped, keys, options).awaitFirstOrNull() + } + + /** + * Drop all the indexes on this collection, except for the default on `_id`. + * + * @param options the options to use when dropping indexes + * @see [Drop indexes](https://www.mongodb.com/docs/manual/reference/command/dropIndexes/) + */ + public suspend fun dropIndexes(options: DropIndexOptions = DropIndexOptions()) { + wrapped.dropIndexes(options).awaitFirstOrNull() + } + + /** + * Drop all the indexes on this collection, except for the default on `_id`. + * + * @param clientSession the client session with which to associate this operation + * @param options the options to use when dropping indexes + * @see [Drop indexes](https://www.mongodb.com/docs/manual/reference/command/dropIndexes/) + */ + public suspend fun dropIndexes(clientSession: ClientSession, options: DropIndexOptions = DropIndexOptions()) { + wrapped.dropIndexes(clientSession.wrapped, options).awaitFirstOrNull() + } + + /** + * Rename the collection with oldCollectionName to the newCollectionName. + * + * @param newCollectionNamespace the name the collection will be renamed to + * @param options the options for renaming a collection + * @throws com.mongodb.MongoServerException if you provide a newCollectionName that is the name of an existing + * collection and dropTarget is false, or if the oldCollectionName is the name of a collection that doesn't exist + * @see [Rename collection](https://www.mongodb.com/docs/manual/reference/command/renameCollection/) + */ + public suspend fun renameCollection( + newCollectionNamespace: MongoNamespace, + options: RenameCollectionOptions = RenameCollectionOptions() + ) { + wrapped.renameCollection(newCollectionNamespace, options).awaitFirstOrNull() + } + + /** + * Rename the collection with oldCollectionName to the newCollectionName. + * + * @param clientSession the client session with which to associate this operation + * @param newCollectionNamespace the name the collection will be renamed to + * @param options the options for renaming a collection + * @throws com.mongodb.MongoServerException if you provide a newCollectionName that is the name of an existing + * collection and dropTarget is false, or if the oldCollectionName is the name of a collection that doesn't exist + * @see [Rename collection](https://www.mongodb.com/docs/manual/reference/command/renameCollection/) + * @since 3.6 + */ + public suspend fun renameCollection( + clientSession: ClientSession, + newCollectionNamespace: MongoNamespace, + options: RenameCollectionOptions = RenameCollectionOptions() + ) { + wrapped.renameCollection(clientSession.wrapped, newCollectionNamespace, options).awaitFirstOrNull() + } +} + +/** + * maxTime extension function + * + * @param maxTime time in milliseconds + * @return the options + */ +public fun CreateIndexOptions.maxTime(maxTime: Long): CreateIndexOptions = + this.apply { maxTime(maxTime, TimeUnit.MILLISECONDS) } +/** + * maxTime extension function + * + * @param maxTime time in milliseconds + * @return the options + */ +public fun CountOptions.maxTime(maxTime: Long): CountOptions = this.apply { maxTime(maxTime, TimeUnit.MILLISECONDS) } +/** + * maxTime extension function + * + * @param maxTime time in milliseconds + * @return the options + */ +public fun DropIndexOptions.maxTime(maxTime: Long): DropIndexOptions = + this.apply { maxTime(maxTime, TimeUnit.MILLISECONDS) } +/** + * maxTime extension function + * + * @param maxTime time in milliseconds + * @return the options + */ +public fun EstimatedDocumentCountOptions.maxTime(maxTime: Long): EstimatedDocumentCountOptions = + this.apply { maxTime(maxTime, TimeUnit.MILLISECONDS) } +/** + * maxTime extension function + * + * @param maxTime time in milliseconds + * @return the options + */ +public fun FindOneAndDeleteOptions.maxTime(maxTime: Long): FindOneAndDeleteOptions = + this.apply { maxTime(maxTime, TimeUnit.MILLISECONDS) } +/** + * maxTime extension function + * + * @param maxTime time in milliseconds + * @return the options + */ +public fun FindOneAndReplaceOptions.maxTime(maxTime: Long): FindOneAndReplaceOptions = + this.apply { maxTime(maxTime, TimeUnit.MILLISECONDS) } +/** + * maxTime extension function + * + * @param maxTime time in milliseconds + * @return the options + */ +public fun FindOneAndUpdateOptions.maxTime(maxTime: Long): FindOneAndUpdateOptions = + this.apply { maxTime(maxTime, TimeUnit.MILLISECONDS) } +/** + * expireAfter extension function + * + * @param expireAfter time in seconds + * @return the options + */ +public fun IndexOptions.expireAfter(expireAfter: Long): IndexOptions = + this.apply { expireAfter(expireAfter, TimeUnit.SECONDS) } diff --git a/driver-kotlin-coroutine/src/main/kotlin/com/mongodb/kotlin/client/coroutine/MongoDatabase.kt b/driver-kotlin-coroutine/src/main/kotlin/com/mongodb/kotlin/client/coroutine/MongoDatabase.kt new file mode 100644 index 00000000000..007251bab31 --- /dev/null +++ b/driver-kotlin-coroutine/src/main/kotlin/com/mongodb/kotlin/client/coroutine/MongoDatabase.kt @@ -0,0 +1,599 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.kotlin.client.coroutine + +import com.mongodb.ReadConcern +import com.mongodb.ReadPreference +import com.mongodb.WriteConcern +import com.mongodb.annotations.Alpha +import com.mongodb.annotations.Reason +import com.mongodb.client.model.CreateCollectionOptions +import com.mongodb.client.model.CreateViewOptions +import com.mongodb.reactivestreams.client.MongoDatabase as JMongoDatabase +import java.util.concurrent.TimeUnit +import kotlinx.coroutines.reactive.awaitFirstOrNull +import kotlinx.coroutines.reactive.awaitSingle +import org.bson.Document +import org.bson.codecs.configuration.CodecRegistry +import org.bson.conversions.Bson + +/** The MongoDatabase representation. */ +public class MongoDatabase(private val wrapped: JMongoDatabase) { + + /** The name of the database. */ + public val name: String + get() = wrapped.name + + /** The codec registry for the database. */ + public val codecRegistry: CodecRegistry + get() = wrapped.codecRegistry + + /** The read preference for the database. */ + public val readPreference: ReadPreference + get() = wrapped.readPreference + + /** + * The read concern for the database. + * + * @see [Read Concern](https://www.mongodb.com/docs/manual/reference/readConcern/) + */ + public val readConcern: ReadConcern + get() = wrapped.readConcern + + /** The write concern for the database. */ + public val writeConcern: WriteConcern + get() = wrapped.writeConcern + + /** + * The time limit for the full execution of an operation. + * + * If not null the following deprecated options will be ignored: `waitQueueTimeoutMS`, `socketTimeoutMS`, + * `wTimeoutMS`, `maxTimeMS` and `maxCommitTimeMS`. + * - `null` means that the timeout mechanism for operations will defer to using: + * - `waitQueueTimeoutMS`: The maximum wait time in milliseconds that a thread may wait for a connection to + * become available + * - `socketTimeoutMS`: How long a send or receive on a socket can take before timing out. + * - `wTimeoutMS`: How long the server will wait for the write concern to be fulfilled before timing out. + * - `maxTimeMS`: The time limit for processing operations on a cursor. See: + * [cursor.maxTimeMS](https://docs.mongodb.com/manual/reference/method/cursor.maxTimeMS"). + * - `maxCommitTimeMS`: The maximum amount of time to allow a single `commitTransaction` command to execute. + * - `0` means infinite timeout. + * - `> 0` The time limit to use for the full execution of an operation. + * + * @return the optional timeout duration + * @since 5.2 + */ + @Alpha(Reason.CLIENT) + public fun timeout(timeUnit: TimeUnit = TimeUnit.MILLISECONDS): Long? = wrapped.getTimeout(timeUnit) + + /** + * Create a new MongoDatabase instance with a different codec registry. + * + * The [CodecRegistry] configured by this method is effectively treated by the driver as an instance of + * [org.bson.codecs.configuration.CodecProvider], which [CodecRegistry] extends. So there is no benefit to defining + * a class that implements [CodecRegistry]. Rather, an application should always create [CodecRegistry] instances + * using the factory methods in [org.bson.codecs.configuration.CodecRegistries]. + * + * @param newCodecRegistry the new [org.bson.codecs.configuration.CodecRegistry] for the database + * @return a new MongoDatabase instance with the different codec registry + * @see org.bson.codecs.configuration.CodecRegistries + */ + public fun withCodecRegistry(newCodecRegistry: CodecRegistry): MongoDatabase = + MongoDatabase(wrapped.withCodecRegistry(newCodecRegistry)) + + /** + * Create a new MongoDatabase instance with a different read preference. + * + * @param newReadPreference the new [ReadPreference] for the database + * @return a new MongoDatabase instance with the different readPreference + */ + public fun withReadPreference(newReadPreference: ReadPreference): MongoDatabase = + MongoDatabase(wrapped.withReadPreference(newReadPreference)) + + /** + * Create a new MongoDatabase instance with a different read concern. + * + * @param newReadConcern the new [ReadConcern] for the database + * @return a new MongoDatabase instance with the different ReadConcern + * @see [Read Concern](https://www.mongodb.com/docs/manual/reference/readConcern/) + */ + public fun withReadConcern(newReadConcern: ReadConcern): MongoDatabase = + MongoDatabase(wrapped.withReadConcern(newReadConcern)) + + /** + * Create a new MongoDatabase instance with a different write concern. + * + * @param newWriteConcern the new [WriteConcern] for the database + * @return a new MongoDatabase instance with the different writeConcern + */ + public fun withWriteConcern(newWriteConcern: WriteConcern): MongoDatabase = + MongoDatabase(wrapped.withWriteConcern(newWriteConcern)) + + /** + * Create a new MongoDatabase instance with the set time limit for the full execution of an operation. + * - `0` means an infinite timeout + * - `> 0` The time limit to use for the full execution of an operation. + * + * @param timeout the timeout, which must be greater than or equal to 0 + * @param timeUnit the time unit, defaults to Milliseconds + * @return a new MongoDatabase instance with the set time limit for operations + * @see [MongoDatabase.timeout] + * @since 5.2 + */ + @Alpha(Reason.CLIENT) + public fun withTimeout(timeout: Long, timeUnit: TimeUnit = TimeUnit.MILLISECONDS): MongoDatabase = + MongoDatabase(wrapped.withTimeout(timeout, timeUnit)) + + /** + * Gets a collection. + * + * @param T the default class to covert documents returned from the collection into. + * @param collectionName the name of the collection to return + * @param resultClass the target document type for the collection + * @return the collection + */ + public fun getCollection(collectionName: String, resultClass: Class): MongoCollection = + MongoCollection(wrapped.getCollection(collectionName, resultClass)) + + /** + * Gets a collection. + * + * @param T the default class to covert documents returned from the collection into. + * @param collectionName the name of the collection to return + * @return the collection + */ + public inline fun getCollection(collectionName: String): MongoCollection = + getCollection(collectionName, T::class.java) + + /** + * Executes the given command in the context of the current database with the given read preference. + * + * @param command the command to be run + * @param readPreference the [ReadPreference] to be used when executing the command, defaults to + * [MongoDatabase.readPreference] + * @return the command result + */ + @JvmName("runCommandDocument") + public suspend fun runCommand(command: Bson, readPreference: ReadPreference = this.readPreference): Document = + runCommand(command, readPreference) + + /** + * Executes the given command in the context of the current database with the given read preference. + * + * @param clientSession the client session with which to associate this operation + * @param command the command to be run + * @param readPreference the [ReadPreference] to be used when executing the command, defaults to + * [MongoDatabase.readPreference] + * @return the command result + */ + @JvmName("runCommandDocumentWithSession") + public suspend fun runCommand( + clientSession: ClientSession, + command: Bson, + readPreference: ReadPreference = this.readPreference + ): Document = runCommand(clientSession, command, readPreference) + + /** + * Executes the given command in the context of the current database with the given read preference. + * + * Note: The behavior of `runCommand` is undefined if the provided command document includes a `maxTimeMS` field and + * the `timeoutMS` setting has been set. + * + * @param T the class to decode each document into + * @param command the command to be run + * @param readPreference the [ReadPreference] to be used when executing the command, defaults to + * [MongoDatabase.readPreference] + * @param resultClass the target document class + * @return the command result + */ + public suspend fun runCommand( + command: Bson, + readPreference: ReadPreference = this.readPreference, + resultClass: Class + ): T = wrapped.runCommand(command, readPreference, resultClass).awaitSingle() + + /** + * Executes the given command in the context of the current database with the given read preference. + * + * Note: The behavior of `runCommand` is undefined if the provided command document includes a `maxTimeMS` field and + * the `timeoutMS` setting has been set. + * + * @param T the class to decode each document into + * @param clientSession the client session with which to associate this operation + * @param command the command to be run + * @param readPreference the [ReadPreference] to be used when executing the command, defaults to + * [MongoDatabase.readPreference] + * @param resultClass the target document class + * @return the command result + */ + public suspend fun runCommand( + clientSession: ClientSession, + command: Bson, + readPreference: ReadPreference = this.readPreference, + resultClass: Class + ): T = wrapped.runCommand(clientSession.wrapped, command, readPreference, resultClass).awaitSingle() + + /** + * Executes the given command in the context of the current database with the given read preference. + * + * Note: The behavior of `runCommand` is undefined if the provided command document includes a `maxTimeMS` field and + * the `timeoutMS` setting has been set. + * + * @param T the class to decode each document into + * @param command the command to be run + * @param readPreference the [ReadPreference] to be used when executing the command, defaults to + * [MongoDatabase.readPreference] + * @return the command result + */ + public suspend inline fun runCommand( + command: Bson, + readPreference: ReadPreference = this.readPreference + ): T = runCommand(command, readPreference, T::class.java) + + /** + * Executes the given command in the context of the current database with the given read preference. + * + * Note: The behavior of `runCommand` is undefined if the provided command document includes a `maxTimeMS` field and + * the `timeoutMS` setting has been set. + * + * @param T the class to decode each document into + * @param clientSession the client session with which to associate this operation + * @param command the command to be run + * @param readPreference the [ReadPreference] to be used when executing the command, defaults to + * [MongoDatabase.readPreference] + * @return the command result + */ + public suspend inline fun runCommand( + clientSession: ClientSession, + command: Bson, + readPreference: ReadPreference = this.readPreference + ): T = runCommand(clientSession, command, readPreference, T::class.java) + + /** + * Drops this database. + * + * @see [Drop database](https://www.mongodb.com/docs/manual/reference/command/dropDatabase/#dbcmd.dropDatabase) + */ + public suspend fun drop() { + wrapped.drop().awaitFirstOrNull() + } + + /** + * Drops this database. + * + * @param clientSession the client session with which to associate this operation + * @see [Drop database](https://www.mongodb.com/docs/manual/reference/command/dropDatabase/#dbcmd.dropDatabase) + */ + public suspend fun drop(clientSession: ClientSession) { + wrapped.drop(clientSession.wrapped).awaitFirstOrNull() + } + + /** + * Gets the names of all the collections in this database. + * + * @return an iterable containing all the names of all the collections in this database + * @see [listCollections](https://www.mongodb.com/docs/manual/reference/command/listCollections) + */ + public fun listCollectionNames(): ListCollectionNamesFlow = ListCollectionNamesFlow(wrapped.listCollectionNames()) + + /** + * Gets the names of all the collections in this database. + * + * @param clientSession the client session with which to associate this operation + * @return an iterable containing all the names of all the collections in this database + * @see [listCollections](https://www.mongodb.com/docs/manual/reference/command/listCollections) + */ + public fun listCollectionNames(clientSession: ClientSession): ListCollectionNamesFlow = + ListCollectionNamesFlow(wrapped.listCollectionNames(clientSession.wrapped)) + + /** + * Gets all the collections in this database. + * + * @return the list collections iterable interface + * @see [listCollections](https://www.mongodb.com/docs/manual/reference/command/listCollections) + */ + @JvmName("listCollectionsAsDocument") + public fun listCollections(): ListCollectionsFlow = listCollections() + + /** + * Gets all the collections in this database. + * + * @param clientSession the client session with which to associate this operation + * @return the list collections iterable interface + * @see [listCollections](https://www.mongodb.com/docs/manual/reference/command/listCollections) + */ + @JvmName("listCollectionsAsDocumentWithSession") + public fun listCollections(clientSession: ClientSession): ListCollectionsFlow = + listCollections(clientSession) + + /** + * Gets all the collections in this database. + * + * @param T the type of the class to use + * @param resultClass the target document type of the iterable. + * @return the list collections iterable interface + * @see [listCollections](https://www.mongodb.com/docs/manual/reference/command/listCollections) + */ + public fun listCollections(resultClass: Class): ListCollectionsFlow = + ListCollectionsFlow(wrapped.listCollections(resultClass)) + + /** + * Gets all the collections in this database. + * + * @param T the type of the class to use + * @param clientSession the client session with which to associate this operation + * @param resultClass the target document type of the iterable. + * @return the list collections iterable interface + * @see [listCollections](https://www.mongodb.com/docs/manual/reference/command/listCollections) + */ + public fun listCollections(clientSession: ClientSession, resultClass: Class): ListCollectionsFlow = + ListCollectionsFlow(wrapped.listCollections(clientSession.wrapped, resultClass)) + + /** + * Gets all the collections in this database. + * + * @param T the type of the class to use + * @return the list collections iterable interface + * @see [listCollections](https://www.mongodb.com/docs/manual/reference/command/listCollections) + */ + public inline fun listCollections(): ListCollectionsFlow = listCollections(T::class.java) + + /** + * Gets all the collections in this database. + * + * @param clientSession the client session with which to associate this operation + * @param T the type of the class to use + * @return the list collections iterable interface + * @see [listCollections](https://www.mongodb.com/docs/manual/reference/command/listCollections) + */ + public inline fun listCollections(clientSession: ClientSession): ListCollectionsFlow = + listCollections(clientSession, T::class.java) + + /** + * Create a new collection with the selected options + * + * @param collectionName the name for the new collection to create + * @param createCollectionOptions various options for creating the collection + * @see [Create Command](https://www.mongodb.com/docs/manual/reference/command/create) + */ + public suspend fun createCollection( + collectionName: String, + createCollectionOptions: CreateCollectionOptions = CreateCollectionOptions() + ) { + wrapped.createCollection(collectionName, createCollectionOptions).awaitFirstOrNull() + } + + /** + * Create a new collection with the selected options + * + * @param clientSession the client session with which to associate this operation + * @param collectionName the name for the new collection to create + * @param createCollectionOptions various options for creating the collection + * @see [Create Command](https://www.mongodb.com/docs/manual/reference/command/create) + */ + public suspend fun createCollection( + clientSession: ClientSession, + collectionName: String, + createCollectionOptions: CreateCollectionOptions = CreateCollectionOptions() + ) { + wrapped.createCollection(clientSession.wrapped, collectionName, createCollectionOptions).awaitFirstOrNull() + } + + /** + * Creates a view with the given name, backing collection/view name, aggregation pipeline, and options that defines + * the view. + * + * @param viewName the name of the view to create + * @param viewOn the backing collection/view for the view + * @param pipeline the pipeline that defines the view + * @param createViewOptions various options for creating the view + * @see [Create Command](https://www.mongodb.com/docs/manual/reference/command/create) + */ + public suspend fun createView( + viewName: String, + viewOn: String, + pipeline: List, + createViewOptions: CreateViewOptions = CreateViewOptions() + ) { + wrapped.createView(viewName, viewOn, pipeline, createViewOptions).awaitFirstOrNull() + } + + /** + * Creates a view with the given name, backing collection/view name, aggregation pipeline, and options that defines + * the view. + * + * @param clientSession the client session with which to associate this operation + * @param viewName the name of the view to create + * @param viewOn the backing collection/view for the view + * @param pipeline the pipeline that defines the view + * @param createViewOptions various options for creating the view + * @see [Create Command](https://www.mongodb.com/docs/manual/reference/command/create) + */ + public suspend fun createView( + clientSession: ClientSession, + viewName: String, + viewOn: String, + pipeline: List, + createViewOptions: CreateViewOptions = CreateViewOptions() + ) { + wrapped.createView(clientSession.wrapped, viewName, viewOn, pipeline, createViewOptions).awaitFirstOrNull() + } + + /** + * Runs an aggregation framework pipeline on the database for pipeline stages that do not require an underlying + * collection, such as `$currentOp` and `$listLocalSessions`. + * + * @param pipeline the aggregation pipeline + * @return an iterable containing the result of the aggregation operation + * @see [Aggregate Command](https://www.mongodb.com/docs/manual/reference/command/aggregate/#dbcmd.aggregate) + */ + @JvmName("aggregateAsDocument") + public fun aggregate(pipeline: List): AggregateFlow = aggregate(pipeline) + + /** + * Runs an aggregation framework pipeline on the database for pipeline stages that do not require an underlying + * collection, such as `$currentOp` and `$listLocalSessions`. + * + * @param clientSession the client session with which to associate this operation + * @param pipeline the aggregation pipeline + * @return an iterable containing the result of the aggregation operation + * @see [Aggregate Command](https://www.mongodb.com/docs/manual/reference/command/aggregate/#dbcmd.aggregate) + */ + @JvmName("aggregateAsDocumentWithSession") + public fun aggregate(clientSession: ClientSession, pipeline: List): AggregateFlow = + aggregate(clientSession, pipeline) + + /** + * Runs an aggregation framework pipeline on the database for pipeline stages that do not require an underlying + * collection, such as `$currentOp` and `$listLocalSessions`. + * + * @param T the class to decode each document into + * @param pipeline the aggregation pipeline + * @param resultClass the target document type of the iterable. + * @return an iterable containing the result of the aggregation operation + * @see [Aggregate Command](https://www.mongodb.com/docs/manual/reference/command/aggregate/#dbcmd.aggregate) + */ + public fun aggregate(pipeline: List, resultClass: Class): AggregateFlow = + AggregateFlow(wrapped.aggregate(pipeline, resultClass)) + + /** + * Runs an aggregation framework pipeline on the database for pipeline stages that do not require an underlying + * collection, such as `$currentOp` and `$listLocalSessions`. + * + * @param T the class to decode each document into + * @param clientSession the client session with which to associate this operation + * @param pipeline the aggregation pipeline + * @param resultClass the target document type of the iterable. + * @return an iterable containing the result of the aggregation operation + * @see [Aggregate Command](https://www.mongodb.com/docs/manual/reference/command/aggregate/#dbcmd.aggregate) + */ + public fun aggregate( + clientSession: ClientSession, + pipeline: List, + resultClass: Class + ): AggregateFlow = AggregateFlow(wrapped.aggregate(clientSession.wrapped, pipeline, resultClass)) + + /** + * Runs an aggregation framework pipeline on the database for pipeline stages that do not require an underlying + * collection, such as `$currentOp` and `$listLocalSessions`. + * + * @param T the class to decode each document into + * @param pipeline the aggregation pipeline + * @return an iterable containing the result of the aggregation operation + * @see [Aggregate Command](https://www.mongodb.com/docs/manual/reference/command/aggregate/#dbcmd.aggregate) + */ + public inline fun aggregate(pipeline: List): AggregateFlow = + aggregate(pipeline, T::class.java) + + /** + * Runs an aggregation framework pipeline on the database for pipeline stages that do not require an underlying + * collection, such as `$currentOp` and `$listLocalSessions`. + * + * @param T the class to decode each document into + * @param clientSession the client session with which to associate this operation + * @param pipeline the aggregation pipeline + * @return an iterable containing the result of the aggregation operation + * @see [Aggregate Command](https://www.mongodb.com/docs/manual/reference/command/aggregate/#dbcmd.aggregate) + */ + public inline fun aggregate( + clientSession: ClientSession, + pipeline: List + ): AggregateFlow = aggregate(clientSession, pipeline, T::class.java) + + /** + * Creates a change stream for this database. + * + * @param pipeline the aggregation pipeline to apply to the change stream, defaults to an empty pipeline. + * @return the change stream iterable + * @see [Change Streams](https://dochub.mongodb.org/changestreams] + */ + @JvmName("watchAsDocument") + public fun watch(pipeline: List = emptyList()): ChangeStreamFlow = watch(pipeline) + + /** + * Creates a change stream for this database. + * + * @param clientSession the client session with which to associate this operation + * @param pipeline the aggregation pipeline to apply to the change stream, defaults to an empty pipeline. + * @return the change stream iterable + * @see [Change Streams](https://dochub.mongodb.org/changestreams] + */ + @JvmName("watchAsDocumentWithSession") + public fun watch(clientSession: ClientSession, pipeline: List = emptyList()): ChangeStreamFlow = + watch(clientSession, pipeline) + + /** + * Creates a change stream for this database. + * + * @param T the target document type of the iterable. + * @param pipeline the aggregation pipeline to apply to the change stream, defaults to an empty pipeline. + * @param resultClass the target document type of the iterable. + * @return the change stream iterable + * @see [Change Streams](https://dochub.mongodb.org/changestreams] + */ + public fun watch(pipeline: List = emptyList(), resultClass: Class): ChangeStreamFlow = + ChangeStreamFlow(wrapped.watch(pipeline, resultClass)) + + /** + * Creates a change stream for this database. + * + * @param T the target document type of the iterable. + * @param clientSession the client session with which to associate this operation + * @param pipeline the aggregation pipeline to apply to the change stream, defaults to an empty pipeline. + * @param resultClass the target document type of the iterable. + * @return the change stream iterable + * @see [Change Streams](https://dochub.mongodb.org/changestreams] + */ + public fun watch( + clientSession: ClientSession, + pipeline: List = emptyList(), + resultClass: Class + ): ChangeStreamFlow = ChangeStreamFlow(wrapped.watch(clientSession.wrapped, pipeline, resultClass)) + + /** + * Creates a change stream for this database. + * + * @param T the target document type of the iterable. + * @param pipeline the aggregation pipeline to apply to the change stream, defaults to an empty pipeline. + * @return the change stream iterable + * @see [Change Streams](https://dochub.mongodb.org/changestreams] + */ + public inline fun watch(pipeline: List = emptyList()): ChangeStreamFlow = + watch(pipeline, T::class.java) + + /** + * Creates a change stream for this database. + * + * @param T the target document type of the iterable. + * @param clientSession the client session with which to associate this operation + * @param pipeline the aggregation pipeline to apply to the change stream, defaults to an empty pipeline. + * @return the change stream iterable + * @see [Change Streams](https://dochub.mongodb.org/changestreams] + */ + public inline fun watch( + clientSession: ClientSession, + pipeline: List = emptyList() + ): ChangeStreamFlow = watch(clientSession, pipeline, T::class.java) +} + +/** + * expireAfter extension function + * + * @param maxTime time in seconds + * @return the options + */ +public fun CreateCollectionOptions.expireAfter(maxTime: Long): CreateCollectionOptions = + this.apply { expireAfter(maxTime, TimeUnit.SECONDS) } diff --git a/driver-kotlin-coroutine/src/test/kotlin/com/mongodb/kotlin/client/coroutine/AggregateFlowTest.kt b/driver-kotlin-coroutine/src/test/kotlin/com/mongodb/kotlin/client/coroutine/AggregateFlowTest.kt new file mode 100644 index 00000000000..ab4b9694986 --- /dev/null +++ b/driver-kotlin-coroutine/src/test/kotlin/com/mongodb/kotlin/client/coroutine/AggregateFlowTest.kt @@ -0,0 +1,115 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.kotlin.client.coroutine + +import com.mongodb.ExplainVerbosity +import com.mongodb.client.cursor.TimeoutMode +import com.mongodb.client.model.Collation +import com.mongodb.reactivestreams.client.AggregatePublisher +import java.util.concurrent.TimeUnit +import kotlin.reflect.full.declaredFunctions +import kotlin.test.assertEquals +import kotlinx.coroutines.runBlocking +import org.bson.BsonDocument +import org.bson.BsonString +import org.bson.Document +import org.junit.jupiter.api.Test +import org.mockito.kotlin.doReturn +import org.mockito.kotlin.mock +import org.mockito.kotlin.times +import org.mockito.kotlin.verify +import org.mockito.kotlin.verifyNoMoreInteractions +import org.mockito.kotlin.whenever +import reactor.core.publisher.Mono + +class AggregateFlowTest { + + @Test + fun shouldHaveTheSameMethods() { + val jAggregatePublisherFunctions = + AggregatePublisher::class.declaredFunctions.map { it.name }.toSet() - "first" - "subscribe" + val kAggregateFlowFunctions = AggregateFlow::class.declaredFunctions.map { it.name }.toSet() - "collect" + + assertEquals(jAggregatePublisherFunctions, kAggregateFlowFunctions) + } + + @Test + fun shouldCallTheUnderlyingMethods() { + val wrapped: AggregatePublisher = mock() + val flow = AggregateFlow(wrapped) + + val batchSize = 10 + val bson = BsonDocument() + val bsonComment = BsonString("a comment") + val collation = Collation.builder().locale("en").build() + val comment = "comment" + val hint = Document("h", 1) + val hintString = "hintString" + val verbosity = ExplainVerbosity.QUERY_PLANNER + + flow.allowDiskUse(true) + flow.batchSize(batchSize) + flow.bypassDocumentValidation(true) + flow.collation(collation) + flow.comment(bsonComment) + flow.comment(comment) + flow.hint(hint) + flow.hintString(hintString) + flow.let(bson) + flow.maxAwaitTime(1) + flow.maxAwaitTime(1, TimeUnit.SECONDS) + flow.maxTime(1) + flow.maxTime(1, TimeUnit.SECONDS) + flow.timeoutMode(TimeoutMode.ITERATION) + + verify(wrapped).allowDiskUse(true) + verify(wrapped).batchSize(batchSize) + verify(wrapped).bypassDocumentValidation(true) + verify(wrapped).collation(collation) + verify(wrapped).comment(bsonComment) + verify(wrapped).comment(comment) + verify(wrapped).hint(hint) + verify(wrapped).hintString(hintString) + verify(wrapped).maxAwaitTime(1, TimeUnit.MILLISECONDS) + verify(wrapped).maxAwaitTime(1, TimeUnit.SECONDS) + verify(wrapped).maxTime(1, TimeUnit.MILLISECONDS) + verify(wrapped).maxTime(1, TimeUnit.SECONDS) + verify(wrapped).let(bson) + verify(wrapped).timeoutMode(TimeoutMode.ITERATION) + + whenever(wrapped.explain(Document::class.java)).doReturn(Mono.fromCallable { Document() }) + whenever(wrapped.explain(Document::class.java, verbosity)).doReturn(Mono.fromCallable { Document() }) + whenever(wrapped.explain(BsonDocument::class.java, verbosity)).doReturn(Mono.fromCallable { BsonDocument() }) + whenever(wrapped.toCollection()).doReturn(Mono.empty()) + + runBlocking { + flow.explain() + flow.explain(verbosity) + flow.explain(Document::class.java) + flow.explain(BsonDocument::class.java, verbosity) + flow.explain() + flow.explain(verbosity) + flow.toCollection() + } + + verify(wrapped, times(3)).explain(Document::class.java) + verify(wrapped, times(1)).explain(Document::class.java, verbosity) + verify(wrapped, times(2)).explain(BsonDocument::class.java, verbosity) + verify(wrapped).toCollection() + + verifyNoMoreInteractions(wrapped) + } +} diff --git a/driver-kotlin-coroutine/src/test/kotlin/com/mongodb/kotlin/client/coroutine/ChangeStreamFlowTest.kt b/driver-kotlin-coroutine/src/test/kotlin/com/mongodb/kotlin/client/coroutine/ChangeStreamFlowTest.kt new file mode 100644 index 00000000000..47030468588 --- /dev/null +++ b/driver-kotlin-coroutine/src/test/kotlin/com/mongodb/kotlin/client/coroutine/ChangeStreamFlowTest.kt @@ -0,0 +1,92 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.kotlin.client.coroutine + +import com.mongodb.client.model.Collation +import com.mongodb.client.model.changestream.FullDocument +import com.mongodb.client.model.changestream.FullDocumentBeforeChange +import com.mongodb.reactivestreams.client.ChangeStreamPublisher +import java.util.concurrent.TimeUnit +import kotlin.reflect.full.declaredFunctions +import kotlin.test.assertEquals +import kotlinx.coroutines.runBlocking +import org.bson.BsonDocument +import org.bson.BsonString +import org.bson.BsonTimestamp +import org.bson.Document +import org.junit.jupiter.api.Test +import org.mockito.kotlin.doReturn +import org.mockito.kotlin.mock +import org.mockito.kotlin.verify +import org.mockito.kotlin.verifyNoMoreInteractions +import org.mockito.kotlin.whenever + +class ChangeStreamFlowTest { + + @Test + fun shouldHaveTheSameMethods() { + val jChangeStreamPublisherFunctions = + ChangeStreamPublisher::class.declaredFunctions.map { it.name }.toSet() - "first" + val kChangeStreamFlowFunctions = ChangeStreamFlow::class.declaredFunctions.map { it.name }.toSet() - "collect" + + assertEquals(jChangeStreamPublisherFunctions, kChangeStreamFlowFunctions) + } + + @Test + fun shouldCallTheUnderlyingMethods() { + val wrapped: ChangeStreamPublisher = mock() + val flow = ChangeStreamFlow(wrapped) + + val batchSize = 10 + val bsonComment = BsonString("a comment") + val collation = Collation.builder().locale("en").build() + val comment = "comment" + val operationTime = BsonTimestamp(1) + val resumeToken = BsonDocument() + + flow.batchSize(batchSize) + flow.collation(collation) + flow.comment(comment) + flow.comment(bsonComment) + flow.fullDocument(FullDocument.UPDATE_LOOKUP) + flow.fullDocumentBeforeChange(FullDocumentBeforeChange.REQUIRED) + flow.maxAwaitTime(1) + flow.maxAwaitTime(1, TimeUnit.SECONDS) + flow.resumeAfter(resumeToken) + flow.showExpandedEvents(true) + flow.startAfter(resumeToken) + flow.startAtOperationTime(operationTime) + + verify(wrapped).batchSize(batchSize) + verify(wrapped).collation(collation) + verify(wrapped).comment(comment) + verify(wrapped).comment(bsonComment) + verify(wrapped).fullDocument(FullDocument.UPDATE_LOOKUP) + verify(wrapped).fullDocumentBeforeChange(FullDocumentBeforeChange.REQUIRED) + verify(wrapped).maxAwaitTime(1, TimeUnit.MILLISECONDS) + verify(wrapped).maxAwaitTime(1, TimeUnit.SECONDS) + verify(wrapped).resumeAfter(resumeToken) + verify(wrapped).showExpandedEvents(true) + verify(wrapped).startAfter(resumeToken) + verify(wrapped).startAtOperationTime(operationTime) + + whenever(wrapped.withDocumentClass(BsonDocument::class.java)).doReturn(mock()) + runBlocking { flow.withDocumentClass() } + verify(wrapped).withDocumentClass(BsonDocument::class.java) + + verifyNoMoreInteractions(wrapped) + } +} diff --git a/driver-kotlin-coroutine/src/test/kotlin/com/mongodb/kotlin/client/coroutine/ClientSessionTest.kt b/driver-kotlin-coroutine/src/test/kotlin/com/mongodb/kotlin/client/coroutine/ClientSessionTest.kt new file mode 100644 index 00000000000..1c0ab88a744 --- /dev/null +++ b/driver-kotlin-coroutine/src/test/kotlin/com/mongodb/kotlin/client/coroutine/ClientSessionTest.kt @@ -0,0 +1,80 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.kotlin.client.coroutine + +import com.mongodb.ClientSessionOptions +import com.mongodb.TransactionOptions +import com.mongodb.reactivestreams.client.ClientSession as JClientSession +import kotlin.reflect.full.functions +import kotlin.test.assertEquals +import kotlinx.coroutines.runBlocking +import org.junit.jupiter.api.Test +import org.mockito.kotlin.doReturn +import org.mockito.kotlin.mock +import org.mockito.kotlin.verify +import org.mockito.kotlin.verifyNoMoreInteractions +import org.mockito.kotlin.whenever +import reactor.core.publisher.Mono + +class ClientSessionTest { + + @Test + fun shouldHaveTheSameMethods() { + val jClientSessionFunctions = JClientSession::class.functions.map { it.name }.toSet() + val kClientSessionFunctions = ClientSession::class.functions.map { it.name }.toSet() + + assertEquals(jClientSessionFunctions, kClientSessionFunctions) + } + + @Test + fun shouldCallTheUnderlyingMethods() { + val wrapped: JClientSession = mock() + val session = ClientSession(wrapped) + + val transactionOptions = TransactionOptions.builder().maxCommitTime(10).build() + + whenever(wrapped.options).doReturn(ClientSessionOptions.builder().build()) + whenever(wrapped.serverSession).doReturn(mock()) + whenever(wrapped.isCausallyConsistent).doReturn(true) + whenever(wrapped.transactionOptions).doReturn(transactionOptions) + + session.options + session.serverSession + session.isCausallyConsistent + session.startTransaction() + session.startTransaction(transactionOptions) + session.getTransactionOptions() + + verify(wrapped).options + verify(wrapped).serverSession + verify(wrapped).isCausallyConsistent + verify(wrapped).startTransaction() + verify(wrapped).startTransaction(transactionOptions) + verify(wrapped).transactionOptions + + whenever(wrapped.abortTransaction()).doReturn(Mono.empty()) + whenever(wrapped.commitTransaction()).doReturn(Mono.empty()) + + runBlocking { + session.abortTransaction() + session.commitTransaction() + } + + verify(wrapped).abortTransaction() + verify(wrapped).commitTransaction() + verifyNoMoreInteractions(wrapped) + } +} diff --git a/driver-kotlin-coroutine/src/test/kotlin/com/mongodb/kotlin/client/coroutine/DistinctFlowTest.kt b/driver-kotlin-coroutine/src/test/kotlin/com/mongodb/kotlin/client/coroutine/DistinctFlowTest.kt new file mode 100644 index 00000000000..571c6f579bb --- /dev/null +++ b/driver-kotlin-coroutine/src/test/kotlin/com/mongodb/kotlin/client/coroutine/DistinctFlowTest.kt @@ -0,0 +1,72 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.kotlin.client.coroutine + +import com.mongodb.client.cursor.TimeoutMode +import com.mongodb.client.model.Collation +import com.mongodb.reactivestreams.client.DistinctPublisher +import java.util.concurrent.TimeUnit +import kotlin.reflect.full.declaredFunctions +import kotlin.test.assertEquals +import org.bson.BsonDocument +import org.bson.BsonString +import org.bson.Document +import org.junit.jupiter.api.Test +import org.mockito.kotlin.mock +import org.mockito.kotlin.verify +import org.mockito.kotlin.verifyNoMoreInteractions + +class DistinctFlowTest { + @Test + fun shouldHaveTheSameMethods() { + val jDistinctPublisherFunctions = DistinctPublisher::class.declaredFunctions.map { it.name }.toSet() - "first" + val kDistinctFlowFunctions = DistinctFlow::class.declaredFunctions.map { it.name }.toSet() - "collect" + + assertEquals(jDistinctPublisherFunctions, kDistinctFlowFunctions) + } + + @Test + fun shouldCallTheUnderlyingMethods() { + val wrapped: DistinctPublisher = mock() + val flow = DistinctFlow(wrapped) + + val batchSize = 10 + val bsonComment = BsonString("a comment") + val collation = Collation.builder().locale("en").build() + val comment = "comment" + val filter = BsonDocument() + + flow.batchSize(batchSize) + flow.collation(collation) + flow.comment(bsonComment) + flow.comment(comment) + flow.filter(filter) + flow.maxTime(1) + flow.maxTime(1, TimeUnit.SECONDS) + flow.timeoutMode(TimeoutMode.ITERATION) + + verify(wrapped).batchSize(batchSize) + verify(wrapped).collation(collation) + verify(wrapped).comment(bsonComment) + verify(wrapped).comment(comment) + verify(wrapped).filter(filter) + verify(wrapped).maxTime(1, TimeUnit.MILLISECONDS) + verify(wrapped).maxTime(1, TimeUnit.SECONDS) + verify(wrapped).timeoutMode(TimeoutMode.ITERATION) + + verifyNoMoreInteractions(wrapped) + } +} diff --git a/driver-kotlin-coroutine/src/test/kotlin/com/mongodb/kotlin/client/coroutine/ExtensionMethodsTest.kt b/driver-kotlin-coroutine/src/test/kotlin/com/mongodb/kotlin/client/coroutine/ExtensionMethodsTest.kt new file mode 100644 index 00000000000..ae4f13639eb --- /dev/null +++ b/driver-kotlin-coroutine/src/test/kotlin/com/mongodb/kotlin/client/coroutine/ExtensionMethodsTest.kt @@ -0,0 +1,58 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.kotlin.client.coroutine + +import io.github.classgraph.ClassGraph +import kotlin.test.assertEquals +import org.junit.jupiter.api.Test + +class ExtensionMethodsTest { + + @Test + fun shouldHaveTimeUnitExtensionsMethodsForOptionsClasses() { + + val extensionsAddedForClasses = + setOf( + "CountOptions", + "CreateCollectionOptions", + "CreateIndexOptions", + "ClientSessionOptions", + "DropIndexOptions", + "EstimatedDocumentCountOptions", + "FindOneAndDeleteOptions", + "FindOneAndReplaceOptions", + "FindOneAndUpdateOptions", + "IndexOptions", + "TransactionOptions", + "TimeSeriesOptions") + + ClassGraph().enableClassInfo().enableMethodInfo().acceptPackages("com.mongodb").scan().use { scanResult -> + val optionsClassesWithTimeUnit = + scanResult.allClasses + .filter { !it.packageName.contains("internal") } + .filter { it.simpleName.endsWith("Options") } + .filter { + it.methodInfo.any { m -> + m.parameterInfo.any { p -> p.typeDescriptor.toStringWithSimpleNames().equals("TimeUnit") } + } + } + .map { c -> c.simpleName } + .toSet() + + assertEquals(extensionsAddedForClasses, optionsClassesWithTimeUnit) + } + } +} diff --git a/driver-kotlin-coroutine/src/test/kotlin/com/mongodb/kotlin/client/coroutine/FindFlowTest.kt b/driver-kotlin-coroutine/src/test/kotlin/com/mongodb/kotlin/client/coroutine/FindFlowTest.kt new file mode 100644 index 00000000000..450059c8211 --- /dev/null +++ b/driver-kotlin-coroutine/src/test/kotlin/com/mongodb/kotlin/client/coroutine/FindFlowTest.kt @@ -0,0 +1,129 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.kotlin.client.coroutine + +import com.mongodb.CursorType +import com.mongodb.ExplainVerbosity +import com.mongodb.client.cursor.TimeoutMode +import com.mongodb.client.model.Collation +import com.mongodb.reactivestreams.client.FindPublisher +import java.util.concurrent.TimeUnit +import kotlin.reflect.full.declaredFunctions +import kotlin.test.assertEquals +import kotlinx.coroutines.runBlocking +import org.bson.BsonDocument +import org.bson.BsonString +import org.bson.Document +import org.junit.jupiter.api.Test +import org.mockito.kotlin.* +import reactor.core.publisher.Mono + +class FindFlowTest { + @Test + fun shouldHaveTheSameMethods() { + val jFindPublisherFunctions = FindPublisher::class.declaredFunctions.map { it.name }.toSet() - "first" + val kFindFlowFunctions = FindFlow::class.declaredFunctions.map { it.name }.toSet() - "collect" + + assertEquals(jFindPublisherFunctions, kFindFlowFunctions) + } + + @Test + fun shouldCallTheUnderlyingMethods() { + val wrapped: FindPublisher = mock() + val flow = FindFlow(wrapped) + + val batchSize = 10 + val bson = BsonDocument() + val bsonComment = BsonString("a comment") + val collation = Collation.builder().locale("en").build() + val comment = "comment" + val filter = BsonDocument() + val hint = Document("h", 1) + val hintString = "hintString" + val verbosity = ExplainVerbosity.QUERY_PLANNER + + flow.allowDiskUse(true) + flow.batchSize(batchSize) + flow.collation(collation) + flow.comment(bsonComment) + flow.comment(comment) + flow.cursorType(CursorType.NonTailable) + flow.filter(filter) + flow.hint(hint) + flow.hintString(hintString) + flow.let(bson) + flow.limit(1) + flow.max(bson) + flow.maxAwaitTime(1) + flow.maxAwaitTime(1, TimeUnit.SECONDS) + flow.maxTime(1) + flow.maxTime(1, TimeUnit.SECONDS) + flow.min(bson) + flow.noCursorTimeout(true) + flow.partial(true) + flow.projection(bson) + flow.returnKey(true) + flow.showRecordId(true) + flow.skip(1) + flow.sort(bson) + flow.timeoutMode(TimeoutMode.ITERATION) + + verify(wrapped).allowDiskUse(true) + verify(wrapped).batchSize(batchSize) + verify(wrapped).collation(collation) + verify(wrapped).comment(bsonComment) + verify(wrapped).comment(comment) + verify(wrapped).cursorType(CursorType.NonTailable) + verify(wrapped).filter(filter) + verify(wrapped).hint(hint) + verify(wrapped).hintString(hintString) + verify(wrapped).let(bson) + verify(wrapped).limit(1) + verify(wrapped).max(bson) + verify(wrapped).maxAwaitTime(1, TimeUnit.MILLISECONDS) + verify(wrapped).maxAwaitTime(1, TimeUnit.SECONDS) + verify(wrapped).maxTime(1, TimeUnit.MILLISECONDS) + verify(wrapped).maxTime(1, TimeUnit.SECONDS) + verify(wrapped).min(bson) + verify(wrapped).noCursorTimeout(true) + verify(wrapped).partial(true) + verify(wrapped).projection(bson) + verify(wrapped).returnKey(true) + verify(wrapped).showRecordId(true) + verify(wrapped).skip(1) + verify(wrapped).sort(bson) + verify(wrapped).timeoutMode(TimeoutMode.ITERATION) + + whenever(wrapped.explain(Document::class.java)).doReturn(Mono.fromCallable { Document() }) + whenever(wrapped.explain(Document::class.java, verbosity)).doReturn(Mono.fromCallable { Document() }) + whenever(wrapped.explain(BsonDocument::class.java, verbosity)).doReturn(Mono.fromCallable { BsonDocument() }) + + runBlocking { + flow.explain() + flow.explain(verbosity) + flow.explain(Document::class.java) + flow.explain(BsonDocument::class.java, verbosity) + flow.explain() + flow.explain(verbosity) + } + + verify(wrapped, times(3)).explain(Document::class.java) + verify(wrapped, times(1)).explain(Document::class.java, verbosity) + verify(wrapped, times(2)).explain(BsonDocument::class.java, verbosity) + + verifyNoMoreInteractions(wrapped) + } +} diff --git a/driver-kotlin-coroutine/src/test/kotlin/com/mongodb/kotlin/client/coroutine/ListCollectionNamesFlowTest.kt b/driver-kotlin-coroutine/src/test/kotlin/com/mongodb/kotlin/client/coroutine/ListCollectionNamesFlowTest.kt new file mode 100644 index 00000000000..c2aa221c98e --- /dev/null +++ b/driver-kotlin-coroutine/src/test/kotlin/com/mongodb/kotlin/client/coroutine/ListCollectionNamesFlowTest.kt @@ -0,0 +1,70 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.kotlin.client.coroutine + +import com.mongodb.reactivestreams.client.ListCollectionNamesPublisher +import java.util.concurrent.TimeUnit +import kotlin.reflect.full.declaredFunctions +import kotlin.test.assertEquals +import org.bson.BsonDocument +import org.bson.BsonString +import org.junit.jupiter.api.Test +import org.mockito.kotlin.mock +import org.mockito.kotlin.verify +import org.mockito.kotlin.verifyNoMoreInteractions + +class ListCollectionNamesFlowTest { + @Test + fun shouldHaveTheSameMethods() { + val jListCollectionNamesPublisherFunctions = + ListCollectionNamesPublisher::class.declaredFunctions.map { it.name }.toSet() - "first" + val kListCollectionNamesFlowFunctions = + ListCollectionNamesFlow::class.declaredFunctions.map { it.name }.toSet() - "collect" + + assertEquals(jListCollectionNamesPublisherFunctions, kListCollectionNamesFlowFunctions) + } + + @Test + @Suppress("DEPRECATION") + fun shouldCallTheUnderlyingMethods() { + val wrapped: ListCollectionNamesPublisher = mock() + val flow = ListCollectionNamesFlow(wrapped) + + val batchSize = 10 + val bsonComment = BsonString("a comment") + val authorizedCollections = true + val comment = "comment" + val filter = BsonDocument() + + flow.batchSize(batchSize) + flow.authorizedCollections(authorizedCollections) + flow.comment(bsonComment) + flow.comment(comment) + flow.filter(filter) + flow.maxTime(1) + flow.maxTime(1, TimeUnit.SECONDS) + + verify(wrapped).batchSize(batchSize) + verify(wrapped).authorizedCollections(authorizedCollections) + verify(wrapped).comment(bsonComment) + verify(wrapped).comment(comment) + verify(wrapped).filter(filter) + verify(wrapped).maxTime(1, TimeUnit.MILLISECONDS) + verify(wrapped).maxTime(1, TimeUnit.SECONDS) + + verifyNoMoreInteractions(wrapped) + } +} diff --git a/driver-kotlin-coroutine/src/test/kotlin/com/mongodb/kotlin/client/coroutine/ListCollectionsFlowTest.kt b/driver-kotlin-coroutine/src/test/kotlin/com/mongodb/kotlin/client/coroutine/ListCollectionsFlowTest.kt new file mode 100644 index 00000000000..59c6f896c86 --- /dev/null +++ b/driver-kotlin-coroutine/src/test/kotlin/com/mongodb/kotlin/client/coroutine/ListCollectionsFlowTest.kt @@ -0,0 +1,70 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.kotlin.client.coroutine + +import com.mongodb.client.cursor.TimeoutMode +import com.mongodb.reactivestreams.client.ListCollectionsPublisher +import java.util.concurrent.TimeUnit +import kotlin.reflect.full.declaredFunctions +import kotlin.test.assertEquals +import org.bson.BsonDocument +import org.bson.BsonString +import org.bson.Document +import org.junit.jupiter.api.Test +import org.mockito.kotlin.mock +import org.mockito.kotlin.verify +import org.mockito.kotlin.verifyNoMoreInteractions + +class ListCollectionsFlowTest { + @Test + fun shouldHaveTheSameMethods() { + val jListCollectionsPublisherFunctions = + ListCollectionsPublisher::class.declaredFunctions.map { it.name }.toSet() - "first" + val kListCollectionsFlowFunctions = + ListCollectionsFlow::class.declaredFunctions.map { it.name }.toSet() - "collect" + + assertEquals(jListCollectionsPublisherFunctions, kListCollectionsFlowFunctions) + } + + @Test + fun shouldCallTheUnderlyingMethods() { + val wrapped: ListCollectionsPublisher = mock() + val flow = ListCollectionsFlow(wrapped) + + val batchSize = 10 + val bsonComment = BsonString("a comment") + val comment = "comment" + val filter = BsonDocument() + + flow.batchSize(batchSize) + flow.comment(bsonComment) + flow.comment(comment) + flow.filter(filter) + flow.maxTime(1) + flow.maxTime(1, TimeUnit.SECONDS) + flow.timeoutMode(TimeoutMode.ITERATION) + + verify(wrapped).batchSize(batchSize) + verify(wrapped).comment(bsonComment) + verify(wrapped).comment(comment) + verify(wrapped).filter(filter) + verify(wrapped).maxTime(1, TimeUnit.MILLISECONDS) + verify(wrapped).maxTime(1, TimeUnit.SECONDS) + verify(wrapped).timeoutMode(TimeoutMode.ITERATION) + + verifyNoMoreInteractions(wrapped) + } +} diff --git a/driver-kotlin-coroutine/src/test/kotlin/com/mongodb/kotlin/client/coroutine/ListDatabasesFlowTest.kt b/driver-kotlin-coroutine/src/test/kotlin/com/mongodb/kotlin/client/coroutine/ListDatabasesFlowTest.kt new file mode 100644 index 00000000000..eac18960b3f --- /dev/null +++ b/driver-kotlin-coroutine/src/test/kotlin/com/mongodb/kotlin/client/coroutine/ListDatabasesFlowTest.kt @@ -0,0 +1,73 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.kotlin.client.coroutine + +import com.mongodb.client.cursor.TimeoutMode +import com.mongodb.reactivestreams.client.ListDatabasesPublisher +import java.util.concurrent.TimeUnit +import kotlin.reflect.full.declaredFunctions +import kotlin.test.assertEquals +import org.bson.BsonDocument +import org.bson.BsonString +import org.bson.Document +import org.junit.jupiter.api.Test +import org.mockito.kotlin.mock +import org.mockito.kotlin.verify +import org.mockito.kotlin.verifyNoMoreInteractions + +class ListDatabasesFlowTest { + @Test + fun shouldHaveTheSameMethods() { + val jListDatabasesPublisherFunctions = + ListDatabasesPublisher::class.declaredFunctions.map { it.name }.toSet() - "first" + val kListDatabasesFlowFunctions = ListDatabasesFlow::class.declaredFunctions.map { it.name }.toSet() - "collect" + + assertEquals(jListDatabasesPublisherFunctions, kListDatabasesFlowFunctions) + } + + @Test + fun shouldCallTheUnderlyingMethods() { + val wrapped: ListDatabasesPublisher = mock() + val flow = ListDatabasesFlow(wrapped) + + val batchSize = 10 + val bsonComment = BsonString("a comment") + val comment = "comment" + val filter = BsonDocument() + + flow.authorizedDatabasesOnly(true) + flow.batchSize(batchSize) + flow.comment(bsonComment) + flow.comment(comment) + flow.filter(filter) + flow.maxTime(1) + flow.maxTime(1, TimeUnit.SECONDS) + flow.nameOnly(true) + flow.timeoutMode(TimeoutMode.ITERATION) + + verify(wrapped).authorizedDatabasesOnly(true) + verify(wrapped).batchSize(batchSize) + verify(wrapped).comment(bsonComment) + verify(wrapped).comment(comment) + verify(wrapped).filter(filter) + verify(wrapped).maxTime(1, TimeUnit.MILLISECONDS) + verify(wrapped).maxTime(1, TimeUnit.SECONDS) + verify(wrapped).nameOnly(true) + verify(wrapped).timeoutMode(TimeoutMode.ITERATION) + + verifyNoMoreInteractions(wrapped) + } +} diff --git a/driver-kotlin-coroutine/src/test/kotlin/com/mongodb/kotlin/client/coroutine/ListIndexesFlowTest.kt b/driver-kotlin-coroutine/src/test/kotlin/com/mongodb/kotlin/client/coroutine/ListIndexesFlowTest.kt new file mode 100644 index 00000000000..d84765d428b --- /dev/null +++ b/driver-kotlin-coroutine/src/test/kotlin/com/mongodb/kotlin/client/coroutine/ListIndexesFlowTest.kt @@ -0,0 +1,65 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.kotlin.client.coroutine + +import com.mongodb.client.cursor.TimeoutMode +import com.mongodb.reactivestreams.client.ListIndexesPublisher +import java.util.concurrent.TimeUnit +import kotlin.reflect.full.declaredFunctions +import kotlin.test.assertEquals +import org.bson.BsonString +import org.bson.Document +import org.junit.jupiter.api.Test +import org.mockito.kotlin.mock +import org.mockito.kotlin.verify +import org.mockito.kotlin.verifyNoMoreInteractions + +class ListIndexesFlowTest { + @Test + fun shouldHaveTheSameMethods() { + val jListIndexesPublisherFunctions = + ListIndexesPublisher::class.declaredFunctions.map { it.name }.toSet() - "first" + val kListIndexesFlowFunctions = ListIndexesFlow::class.declaredFunctions.map { it.name }.toSet() - "collect" + + assertEquals(jListIndexesPublisherFunctions, kListIndexesFlowFunctions) + } + + @Test + fun shouldCallTheUnderlyingMethods() { + val wrapped: ListIndexesPublisher = mock() + val flow = ListIndexesFlow(wrapped) + + val batchSize = 10 + val bsonComment = BsonString("a comment") + val comment = "comment" + + flow.batchSize(batchSize) + flow.comment(bsonComment) + flow.comment(comment) + flow.maxTime(1) + flow.maxTime(1, TimeUnit.SECONDS) + flow.timeoutMode(TimeoutMode.ITERATION) + + verify(wrapped).batchSize(batchSize) + verify(wrapped).comment(bsonComment) + verify(wrapped).comment(comment) + verify(wrapped).maxTime(1, TimeUnit.MILLISECONDS) + verify(wrapped).maxTime(1, TimeUnit.SECONDS) + verify(wrapped).timeoutMode(TimeoutMode.ITERATION) + + verifyNoMoreInteractions(wrapped) + } +} diff --git a/driver-kotlin-coroutine/src/test/kotlin/com/mongodb/kotlin/client/coroutine/MapReduceFlowTest.kt b/driver-kotlin-coroutine/src/test/kotlin/com/mongodb/kotlin/client/coroutine/MapReduceFlowTest.kt new file mode 100644 index 00000000000..3a38b7e6460 --- /dev/null +++ b/driver-kotlin-coroutine/src/test/kotlin/com/mongodb/kotlin/client/coroutine/MapReduceFlowTest.kt @@ -0,0 +1,101 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +@file:Suppress("DEPRECATION") + +package com.mongodb.kotlin.client.coroutine + +import com.mongodb.client.cursor.TimeoutMode +import com.mongodb.client.model.Collation +import com.mongodb.client.model.MapReduceAction +import com.mongodb.reactivestreams.client.MapReducePublisher +import java.util.concurrent.TimeUnit +import kotlin.reflect.full.declaredFunctions +import kotlin.test.assertEquals +import kotlinx.coroutines.runBlocking +import org.bson.BsonDocument +import org.bson.Document +import org.junit.jupiter.api.Test +import org.mockito.kotlin.doReturn +import org.mockito.kotlin.mock +import org.mockito.kotlin.verify +import org.mockito.kotlin.verifyNoMoreInteractions +import org.mockito.kotlin.whenever +import reactor.core.publisher.Mono + +class MapReduceFlowTest { + @Test + fun shouldHaveTheSameMethods() { + val jMapReducePublisherFunctions = + MapReducePublisher::class.declaredFunctions.map { it.name }.toSet() - "first" - "subscribe" + val kMapReduceFlowFunctions = MapReduceFlow::class.declaredFunctions.map { it.name }.toSet() - "collect" + + assertEquals(jMapReducePublisherFunctions, kMapReduceFlowFunctions) + } + + @Test + fun shouldCallTheUnderlyingMethods() { + val wrapped: MapReducePublisher = mock() + val flow = MapReduceFlow(wrapped) + + val batchSize = 10 + val bson = BsonDocument() + val collation = Collation.builder().locale("en").build() + val collectionName = "coll" + val databaseName = "db" + val filter = BsonDocument() + val finalizeFunction = "finalize" + + flow.batchSize(batchSize) + flow.bypassDocumentValidation(true) + flow.collation(collation) + flow.collectionName(collectionName) + flow.databaseName(databaseName) + flow.filter(filter) + flow.finalizeFunction(finalizeFunction) + flow.jsMode(true) + flow.limit(1) + flow.maxTime(1) + flow.maxTime(1, TimeUnit.SECONDS) + flow.scope(bson) + flow.sort(bson) + flow.verbose(true) + flow.action(MapReduceAction.MERGE) + flow.timeoutMode(TimeoutMode.ITERATION) + + verify(wrapped).batchSize(batchSize) + verify(wrapped).bypassDocumentValidation(true) + verify(wrapped).collation(collation) + verify(wrapped).collectionName(collectionName) + verify(wrapped).databaseName(databaseName) + verify(wrapped).filter(filter) + verify(wrapped).finalizeFunction(finalizeFunction) + verify(wrapped).jsMode(true) + verify(wrapped).limit(1) + verify(wrapped).maxTime(1, TimeUnit.MILLISECONDS) + verify(wrapped).maxTime(1, TimeUnit.SECONDS) + verify(wrapped).scope(bson) + verify(wrapped).sort(bson) + verify(wrapped).verbose(true) + verify(wrapped).action(MapReduceAction.MERGE) + verify(wrapped).timeoutMode(TimeoutMode.ITERATION) + + whenever(wrapped.toCollection()).doReturn(Mono.empty()) + runBlocking { flow.toCollection() } + verify(wrapped).toCollection() + + verifyNoMoreInteractions(wrapped) + } +} diff --git a/driver-kotlin-coroutine/src/test/kotlin/com/mongodb/kotlin/client/coroutine/MockitoHelper.kt b/driver-kotlin-coroutine/src/test/kotlin/com/mongodb/kotlin/client/coroutine/MockitoHelper.kt new file mode 100644 index 00000000000..cd5d0f4d68a --- /dev/null +++ b/driver-kotlin-coroutine/src/test/kotlin/com/mongodb/kotlin/client/coroutine/MockitoHelper.kt @@ -0,0 +1,53 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.kotlin.client.coroutine + +import org.assertj.core.api.Assertions.assertThat +import org.mockito.ArgumentMatcher +import org.mockito.ArgumentMatchers.argThat + +/** Mockito test helper object */ +object MockitoHelper { + + /** + * Deep reflection comparison for complex nested objects + * + * The usecase is to reflect complex objects that don't have an equals method and contain nested complex properties + * that also do not contain equals values + * + * Example: + * ``` + * verify(wrapped).createCollection(eq(name), deepRefEq(defaultOptions)) + * ``` + * + * @param T the type of the value + * @param value the value + * @return the value + * @see [org.mockito.kotlin.refEq] + */ + fun deepRefEq(value: T): T = argThat(DeepReflectionEqMatcher(value)) + + private class DeepReflectionEqMatcher(private val expected: T) : ArgumentMatcher { + override fun matches(argument: T): Boolean { + return try { + assertThat(argument).usingRecursiveComparison().isEqualTo(expected) + true + } catch (e: Throwable) { + false + } + } + } +} diff --git a/driver-kotlin-coroutine/src/test/kotlin/com/mongodb/kotlin/client/coroutine/MongoClientTest.kt b/driver-kotlin-coroutine/src/test/kotlin/com/mongodb/kotlin/client/coroutine/MongoClientTest.kt new file mode 100644 index 00000000000..b1dc72e6a81 --- /dev/null +++ b/driver-kotlin-coroutine/src/test/kotlin/com/mongodb/kotlin/client/coroutine/MongoClientTest.kt @@ -0,0 +1,214 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.kotlin.client.coroutine + +import com.mongodb.ClientSessionOptions +import com.mongodb.MongoDriverInformation +import com.mongodb.MongoNamespace +import com.mongodb.client.model.bulk.ClientBulkWriteOptions +import com.mongodb.client.model.bulk.ClientNamespacedWriteModel +import com.mongodb.reactivestreams.client.MongoClient as JMongoClient +import kotlin.reflect.full.declaredFunctions +import kotlin.test.assertEquals +import kotlinx.coroutines.runBlocking +import org.bson.BsonDocument +import org.bson.Document +import org.junit.jupiter.api.Test +import org.mockito.Mock +import org.mockito.kotlin.any +import org.mockito.kotlin.doReturn +import org.mockito.kotlin.mock +import org.mockito.kotlin.refEq +import org.mockito.kotlin.times +import org.mockito.kotlin.verify +import org.mockito.kotlin.verifyNoMoreInteractions +import org.mockito.kotlin.whenever +import reactor.core.publisher.Mono + +class MongoClientTest { + + @Mock val wrapped: JMongoClient = mock() + @Mock val clientSession: ClientSession = ClientSession(mock()) + + @Test + fun shouldHaveTheSameMethods() { + val jMongoClientFunctions = JMongoClient::class.declaredFunctions.map { it.name }.toSet() + val kMongoClientFunctions = MongoClient::class.declaredFunctions.map { it.name }.toSet() + + assertEquals(jMongoClientFunctions, kMongoClientFunctions) + } + + @Test + fun shouldCallTheUnderlyingClose() { + val mongoClient = MongoClient(wrapped) + mongoClient.close() + + verify(wrapped).close() + verifyNoMoreInteractions(wrapped) + } + + @Test + fun shouldCallTheUnderlyingClusterDescription() { + val mongoClient = MongoClient(wrapped) + whenever(wrapped.clusterDescription).doReturn(mock()) + + mongoClient.getClusterDescription() + + verify(wrapped).clusterDescription + verifyNoMoreInteractions(wrapped) + } + + @Test + fun shouldCallTheUnderlyingAppendMetadata() { + val mongoClient = MongoClient(wrapped) + + val mongoDriverInformation = + MongoDriverInformation.builder() + .driverName("kotlin") + .driverPlatform("kotlin/${KotlinVersion.CURRENT}") + .build() + + mongoClient.appendMetadata(mongoDriverInformation) + + verify(wrapped).appendMetadata(mongoDriverInformation) + verifyNoMoreInteractions(wrapped) + } + + @Test + fun shouldCallTheUnderlyingGetDatabase() { + val mongoClient = MongoClient(wrapped) + whenever(wrapped.getDatabase(any())).doReturn(mock()) + + mongoClient.getDatabase("dbName") + verify(wrapped).getDatabase("dbName") + verifyNoMoreInteractions(wrapped) + } + + @Test + fun shoulCallTheUnderlyingStartSession() { + val mongoClient = MongoClient(wrapped) + val defaultOptions = ClientSessionOptions.builder().build() + val options = ClientSessionOptions.builder().causallyConsistent(true).build() + + whenever(wrapped.startSession(refEq(defaultOptions))).doReturn(Mono.fromCallable { mock() }) + whenever(wrapped.startSession(options)).doReturn(Mono.fromCallable { mock() }) + + runBlocking { + mongoClient.startSession() + mongoClient.startSession(options) + } + + verify(wrapped).startSession(refEq(defaultOptions)) + verify(wrapped).startSession(options) + verifyNoMoreInteractions(wrapped) + } + + @Test + fun shouldCallTheUnderlyingListDatabaseNames() { + val mongoClient = MongoClient(wrapped) + whenever(wrapped.listDatabaseNames()).doReturn(mock()) + whenever(wrapped.listDatabaseNames(any())).doReturn(mock()) + + mongoClient.listDatabaseNames() + mongoClient.listDatabaseNames(clientSession) + + verify(wrapped).listDatabaseNames() + verify(wrapped).listDatabaseNames(clientSession.wrapped) + verifyNoMoreInteractions(wrapped) + } + + @Test + fun shouldCallTheUnderlyingListDatabases() { + val mongoClient = MongoClient(wrapped) + whenever(wrapped.listDatabases(Document::class.java)).doReturn(mock()) + whenever(wrapped.listDatabases(clientSession.wrapped, Document::class.java)).doReturn(mock()) + whenever(wrapped.listDatabases(clientSession.wrapped, BsonDocument::class.java)).doReturn(mock()) + + mongoClient.listDatabases() + mongoClient.listDatabases(clientSession) + mongoClient.listDatabases(Document::class.java) + mongoClient.listDatabases(clientSession, BsonDocument::class.java) + mongoClient.listDatabases() + mongoClient.listDatabases(clientSession) + + verify(wrapped, times(3)).listDatabases(Document::class.java) + verify(wrapped, times(1)).listDatabases(clientSession.wrapped, Document::class.java) + verify(wrapped, times(2)).listDatabases(clientSession.wrapped, BsonDocument::class.java) + verifyNoMoreInteractions(wrapped) + } + + @Test + fun shouldCallTheUnderlyingWatch() { + val mongoClient = MongoClient(wrapped) + val pipeline = listOf(Document(mapOf("a" to 1))) + + whenever(wrapped.watch(emptyList(), Document::class.java)).doReturn(mock()) + whenever(wrapped.watch(pipeline, Document::class.java)).doReturn(mock()) + whenever(wrapped.watch(clientSession.wrapped, emptyList(), Document::class.java)).doReturn(mock()) + whenever(wrapped.watch(clientSession.wrapped, pipeline, Document::class.java)).doReturn(mock()) + whenever(wrapped.watch(pipeline, BsonDocument::class.java)).doReturn(mock()) + whenever(wrapped.watch(clientSession.wrapped, emptyList(), Document::class.java)).doReturn(mock()) + whenever(wrapped.watch(clientSession.wrapped, pipeline, BsonDocument::class.java)).doReturn(mock()) + + mongoClient.watch() + mongoClient.watch(pipeline) + mongoClient.watch(clientSession) + mongoClient.watch(clientSession, pipeline) + + mongoClient.watch(resultClass = Document::class.java) + mongoClient.watch(pipeline, BsonDocument::class.java) + mongoClient.watch(clientSession = clientSession, resultClass = Document::class.java) + mongoClient.watch(clientSession, pipeline, BsonDocument::class.java) + + mongoClient.watch() + mongoClient.watch(pipeline) + mongoClient.watch(clientSession) + mongoClient.watch(clientSession, pipeline) + + verify(wrapped, times(3)).watch(emptyList(), Document::class.java) + verify(wrapped, times(1)).watch(pipeline, Document::class.java) + verify(wrapped, times(3)).watch(clientSession.wrapped, emptyList(), Document::class.java) + verify(wrapped, times(1)).watch(clientSession.wrapped, pipeline, Document::class.java) + verify(wrapped, times(2)).watch(pipeline, BsonDocument::class.java) + verify(wrapped, times(2)).watch(clientSession.wrapped, pipeline, BsonDocument::class.java) + verifyNoMoreInteractions(wrapped) + } + + @Test + fun shouldCallTheUnderlyingBulkWrite() { + val mongoClient = MongoClient(wrapped) + val requests = listOf(ClientNamespacedWriteModel.insertOne(MongoNamespace("test.test"), Document())) + val options = ClientBulkWriteOptions.clientBulkWriteOptions().bypassDocumentValidation(true) + + whenever(wrapped.bulkWrite(requests)).doReturn(Mono.fromCallable { mock() }) + whenever(wrapped.bulkWrite(requests, options)).doReturn(Mono.fromCallable { mock() }) + whenever(wrapped.bulkWrite(clientSession.wrapped, requests)).doReturn(Mono.fromCallable { mock() }) + whenever(wrapped.bulkWrite(clientSession.wrapped, requests, options)).doReturn(Mono.fromCallable { mock() }) + + runBlocking { + mongoClient.bulkWrite(requests) + mongoClient.bulkWrite(requests, options) + mongoClient.bulkWrite(clientSession, requests) + mongoClient.bulkWrite(clientSession, requests, options) + } + + verify(wrapped).bulkWrite(requests) + verify(wrapped).bulkWrite(requests, options) + verify(wrapped).bulkWrite(clientSession.wrapped, requests) + verify(wrapped).bulkWrite(clientSession.wrapped, requests, options) + verifyNoMoreInteractions(wrapped) + } +} diff --git a/driver-kotlin-coroutine/src/test/kotlin/com/mongodb/kotlin/client/coroutine/MongoCollectionTest.kt b/driver-kotlin-coroutine/src/test/kotlin/com/mongodb/kotlin/client/coroutine/MongoCollectionTest.kt new file mode 100644 index 00000000000..7be5c068a84 --- /dev/null +++ b/driver-kotlin-coroutine/src/test/kotlin/com/mongodb/kotlin/client/coroutine/MongoCollectionTest.kt @@ -0,0 +1,1020 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.kotlin.client.coroutine + +import com.mongodb.CreateIndexCommitQuorum +import com.mongodb.MongoNamespace +import com.mongodb.ReadConcern +import com.mongodb.ReadPreference +import com.mongodb.WriteConcern +import com.mongodb.client.model.BulkWriteOptions +import com.mongodb.client.model.CountOptions +import com.mongodb.client.model.CreateIndexOptions +import com.mongodb.client.model.DeleteOptions +import com.mongodb.client.model.DropCollectionOptions +import com.mongodb.client.model.DropIndexOptions +import com.mongodb.client.model.EstimatedDocumentCountOptions +import com.mongodb.client.model.FindOneAndDeleteOptions +import com.mongodb.client.model.FindOneAndReplaceOptions +import com.mongodb.client.model.FindOneAndUpdateOptions +import com.mongodb.client.model.IndexModel +import com.mongodb.client.model.IndexOptions +import com.mongodb.client.model.InsertManyOptions +import com.mongodb.client.model.InsertOneModel +import com.mongodb.client.model.InsertOneOptions +import com.mongodb.client.model.RenameCollectionOptions +import com.mongodb.client.model.ReplaceOptions +import com.mongodb.client.model.UpdateOptions +import com.mongodb.reactivestreams.client.MongoCollection as JMongoCollection +import java.util.concurrent.TimeUnit +import kotlin.reflect.full.declaredFunctions +import kotlin.reflect.full.declaredMemberProperties +import kotlin.test.assertEquals +import kotlinx.coroutines.runBlocking +import org.bson.BsonDocument +import org.bson.Document +import org.bson.codecs.configuration.CodecRegistry +import org.junit.jupiter.api.Test +import org.mockito.Mock +import org.mockito.kotlin.doReturn +import org.mockito.kotlin.eq +import org.mockito.kotlin.mock +import org.mockito.kotlin.refEq +import org.mockito.kotlin.times +import org.mockito.kotlin.verify +import org.mockito.kotlin.verifyNoMoreInteractions +import org.mockito.kotlin.whenever +import reactor.core.publisher.Mono + +class MongoCollectionTest { + + @Mock val wrapped: JMongoCollection = mock() + @Mock val clientSession: ClientSession = ClientSession(mock()) + + private val defaultFilter = BsonDocument() + private val filter = Document("a", 1) + private val pipeline = listOf(Document(mapOf("a" to 1))) + + @Test + fun shouldHaveTheSameMethods() { + val jMongoCollectionFunctions = JMongoCollection::class.declaredFunctions.map { it.name }.toSet() + val kMongoCollectionFunctions = + MongoCollection::class + .declaredFunctions + .map { + if (it.name == "timeout") { + "getTimeout" + } else { + it.name + } + } + .toSet() + + MongoCollection::class + .declaredMemberProperties + .filterNot { it.name == "wrapped" } + .map { "get${it.name.replaceFirstChar{c -> c.uppercaseChar() }}" } + + assertEquals(jMongoCollectionFunctions, kMongoCollectionFunctions) + } + + @Test + fun shouldCallTheUnderlyingGetDocumentClass() { + val mongoCollection = MongoCollection(wrapped) + whenever(wrapped.documentClass).doReturn(Document::class.java) + + mongoCollection.documentClass + verify(wrapped).documentClass + verifyNoMoreInteractions(wrapped) + } + + @Test + fun shouldCallTheUnderlyingGetNamespace() { + val mongoCollection = MongoCollection(wrapped) + whenever(wrapped.namespace).doReturn(MongoNamespace("a.b")) + + mongoCollection.namespace + verify(wrapped).namespace + verifyNoMoreInteractions(wrapped) + } + + @Test + fun shouldCallTheUnderlyingGetCodecRegistry() { + val mongoCollection = MongoCollection(wrapped) + whenever(wrapped.codecRegistry).doReturn(mock()) + + mongoCollection.codecRegistry + verify(wrapped).codecRegistry + verifyNoMoreInteractions(wrapped) + } + + @Test + fun shouldCallTheUnderlyingGetReadPreference() { + val mongoCollection = MongoCollection(wrapped) + whenever(wrapped.readPreference).doReturn(mock()) + + mongoCollection.readPreference + verify(wrapped).readPreference + verifyNoMoreInteractions(wrapped) + } + + @Test + fun shouldCallTheUnderlyingGetReadConcern() { + val mongoCollection = MongoCollection(wrapped) + whenever(wrapped.readConcern).doReturn(ReadConcern.DEFAULT) + + mongoCollection.readConcern + verify(wrapped).readConcern + verifyNoMoreInteractions(wrapped) + } + + @Test + fun shouldCallTheUnderlyingGetWriteConcern() { + val mongoCollection = MongoCollection(wrapped) + whenever(wrapped.writeConcern).doReturn(mock()) + + mongoCollection.writeConcern + verify(wrapped).writeConcern + verifyNoMoreInteractions(wrapped) + } + + @Test + fun shouldCallTheUnderlyingWithDocumentClass() { + val mongoCollection = MongoCollection(wrapped) + whenever(wrapped.withDocumentClass(BsonDocument::class.java)).doReturn(mock()) + + mongoCollection.withDocumentClass() + verify(wrapped).withDocumentClass(BsonDocument::class.java) + verifyNoMoreInteractions(wrapped) + } + + @Test + fun shouldCallTheUnderlyingWithCodecRegistry() { + val mongoCollection = MongoCollection(wrapped) + val codecRegistry = mock() + whenever(wrapped.withCodecRegistry(codecRegistry)).doReturn(mock()) + + mongoCollection.withCodecRegistry(codecRegistry) + verify(wrapped).withCodecRegistry(codecRegistry) + verifyNoMoreInteractions(wrapped) + } + + @Test + fun shouldCallTheUnderlyingWithReadPreference() { + val mongoCollection = MongoCollection(wrapped) + val readPreference = ReadPreference.primaryPreferred() + whenever(wrapped.withReadPreference(readPreference)).doReturn(mock()) + + mongoCollection.withReadPreference(readPreference) + verify(wrapped).withReadPreference(readPreference) + verifyNoMoreInteractions(wrapped) + } + + @Test + fun shouldCallTheUnderlyingWithReadConcern() { + val mongoCollection = MongoCollection(wrapped) + val readConcern = ReadConcern.AVAILABLE + whenever(wrapped.withReadConcern(readConcern)).doReturn(mock()) + + mongoCollection.withReadConcern(readConcern) + verify(wrapped).withReadConcern(readConcern) + verifyNoMoreInteractions(wrapped) + } + + @Test + fun shouldCallTheUnderlyingWithWriteConcern() { + val mongoCollection = MongoCollection(wrapped) + val writeConcern = WriteConcern.MAJORITY + whenever(wrapped.withWriteConcern(writeConcern)).doReturn(mock()) + + mongoCollection.withWriteConcern(writeConcern) + verify(wrapped).withWriteConcern(writeConcern) + verifyNoMoreInteractions(wrapped) + } + + @Test + fun shouldCallTheUnderlyingCountDocuments() { + val mongoCollection = MongoCollection(wrapped) + + val defaultOptions = CountOptions() + + val options = CountOptions().comment("comment") + + whenever(wrapped.countDocuments(eq(defaultFilter), refEq(defaultOptions))).doReturn(Mono.fromCallable { 1 }) + whenever(wrapped.countDocuments(eq(filter), refEq(defaultOptions))).doReturn(Mono.fromCallable { 2 }) + whenever(wrapped.countDocuments(eq(filter), eq(options))).doReturn(Mono.fromCallable { 3 }) + whenever(wrapped.countDocuments(eq(clientSession.wrapped), eq(defaultFilter), refEq(defaultOptions))) + .doReturn(Mono.fromCallable { 4 }) + whenever(wrapped.countDocuments(eq(clientSession.wrapped), eq(filter), refEq(defaultOptions))) + .doReturn(Mono.fromCallable { 5 }) + whenever(wrapped.countDocuments(eq(clientSession.wrapped), eq(filter), eq(options))) + .doReturn(Mono.fromCallable { 6 }) + + runBlocking { + assertEquals(1, mongoCollection.countDocuments()) + assertEquals(2, mongoCollection.countDocuments(filter)) + assertEquals(3, mongoCollection.countDocuments(filter, options)) + assertEquals(4, mongoCollection.countDocuments(clientSession)) + assertEquals(5, mongoCollection.countDocuments(clientSession, filter)) + assertEquals(6, mongoCollection.countDocuments(clientSession, filter, options)) + } + + verify(wrapped).countDocuments(eq(defaultFilter), refEq(defaultOptions)) + verify(wrapped).countDocuments(eq(filter), refEq(defaultOptions)) + verify(wrapped).countDocuments(eq(filter), eq(options)) + verify(wrapped).countDocuments(eq(clientSession.wrapped), eq(defaultFilter), refEq(defaultOptions)) + verify(wrapped).countDocuments(eq(clientSession.wrapped), eq(filter), refEq(defaultOptions)) + verify(wrapped).countDocuments(eq(clientSession.wrapped), eq(filter), eq(options)) + verifyNoMoreInteractions(wrapped) + } + + @Test + fun shouldCallTheUnderlyingEstimatedDocumentCount() { + val mongoCollection = MongoCollection(wrapped) + val defaultOptions = EstimatedDocumentCountOptions() + val options = EstimatedDocumentCountOptions().comment("comment") + + whenever(wrapped.estimatedDocumentCount(refEq(defaultOptions))).doReturn(Mono.fromCallable { 1 }) + whenever(wrapped.estimatedDocumentCount(options)).doReturn(Mono.fromCallable { 2 }) + + runBlocking { + assertEquals(1, mongoCollection.estimatedDocumentCount()) + assertEquals(2, mongoCollection.estimatedDocumentCount(options)) + } + + verify(wrapped).estimatedDocumentCount(refEq(defaultOptions)) + verify(wrapped).estimatedDocumentCount(options) + verifyNoMoreInteractions(wrapped) + } + + @Test + fun shouldCallTheUnderlyingDistinct() { + val mongoCollection = MongoCollection(wrapped) + val fieldName = "fieldName" + + whenever(wrapped.distinct(fieldName, defaultFilter, Document::class.java)).doReturn(mock()) + whenever(wrapped.distinct(fieldName, filter, Document::class.java)).doReturn(mock()) + whenever(wrapped.distinct(clientSession.wrapped, fieldName, defaultFilter, Document::class.java)) + .doReturn(mock()) + whenever(wrapped.distinct(clientSession.wrapped, fieldName, filter, Document::class.java)).doReturn(mock()) + whenever(wrapped.distinct(fieldName, defaultFilter, BsonDocument::class.java)).doReturn(mock()) + whenever(wrapped.distinct(fieldName, filter, BsonDocument::class.java)).doReturn(mock()) + whenever(wrapped.distinct(clientSession.wrapped, fieldName, defaultFilter, BsonDocument::class.java)) + .doReturn(mock()) + whenever(wrapped.distinct(clientSession.wrapped, fieldName, filter, BsonDocument::class.java)).doReturn(mock()) + + mongoCollection.distinct("fieldName", resultClass = Document::class.java) + mongoCollection.distinct("fieldName", filter, Document::class.java) + mongoCollection.distinct(clientSession, "fieldName", resultClass = Document::class.java) + mongoCollection.distinct(clientSession, "fieldName", filter, Document::class.java) + + mongoCollection.distinct("fieldName") + mongoCollection.distinct("fieldName", filter) + mongoCollection.distinct(clientSession, "fieldName") + mongoCollection.distinct(clientSession, "fieldName", filter) + + verify(wrapped).distinct(fieldName, defaultFilter, Document::class.java) + verify(wrapped).distinct(fieldName, filter, Document::class.java) + verify(wrapped).distinct(clientSession.wrapped, fieldName, defaultFilter, Document::class.java) + verify(wrapped).distinct(clientSession.wrapped, fieldName, filter, Document::class.java) + + verify(wrapped).distinct(fieldName, defaultFilter, BsonDocument::class.java) + verify(wrapped).distinct(fieldName, filter, BsonDocument::class.java) + verify(wrapped).distinct(clientSession.wrapped, fieldName, defaultFilter, BsonDocument::class.java) + verify(wrapped).distinct(clientSession.wrapped, fieldName, filter, BsonDocument::class.java) + verifyNoMoreInteractions(wrapped) + } + + @Test + fun shouldCallTheUnderlyingFind() { + val mongoCollection = MongoCollection(wrapped) + + whenever(wrapped.documentClass).doReturn(Document::class.java) + whenever(wrapped.find(defaultFilter, Document::class.java)).doReturn(mock()) + whenever(wrapped.find(filter, Document::class.java)).doReturn(mock()) + whenever(wrapped.find(clientSession.wrapped, defaultFilter, Document::class.java)).doReturn(mock()) + whenever(wrapped.find(clientSession.wrapped, filter, Document::class.java)).doReturn(mock()) + whenever(wrapped.find(defaultFilter, BsonDocument::class.java)).doReturn(mock()) + whenever(wrapped.find(filter, BsonDocument::class.java)).doReturn(mock()) + whenever(wrapped.find(clientSession.wrapped, defaultFilter, BsonDocument::class.java)).doReturn(mock()) + whenever(wrapped.find(clientSession.wrapped, filter, BsonDocument::class.java)).doReturn(mock()) + + mongoCollection.find() + mongoCollection.find(filter) + mongoCollection.find(clientSession) + mongoCollection.find(clientSession, filter) + + mongoCollection.find(resultClass = Document::class.java) + mongoCollection.find(filter, resultClass = Document::class.java) + mongoCollection.find(clientSession, resultClass = Document::class.java) + mongoCollection.find(clientSession, filter, Document::class.java) + + mongoCollection.find() + mongoCollection.find(filter) + mongoCollection.find(clientSession) + mongoCollection.find(clientSession, filter) + + verify(wrapped, times(4)).documentClass + verify(wrapped, times(2)).find(defaultFilter, Document::class.java) + verify(wrapped, times(2)).find(filter, Document::class.java) + verify(wrapped, times(2)).find(clientSession.wrapped, defaultFilter, Document::class.java) + verify(wrapped, times(2)).find(clientSession.wrapped, filter, Document::class.java) + verify(wrapped, times(1)).find(defaultFilter, BsonDocument::class.java) + verify(wrapped, times(1)).find(filter, BsonDocument::class.java) + verify(wrapped, times(1)).find(clientSession.wrapped, defaultFilter, BsonDocument::class.java) + verify(wrapped, times(1)).find(clientSession.wrapped, filter, BsonDocument::class.java) + verifyNoMoreInteractions(wrapped) + } + + @Test + fun shouldCallTheUnderlyingAggregate() { + val mongoCollection = MongoCollection(wrapped) + + whenever(wrapped.documentClass).doReturn(Document::class.java) + whenever(wrapped.aggregate(pipeline, Document::class.java)).doReturn(mock()) + whenever(wrapped.aggregate(clientSession.wrapped, pipeline, Document::class.java)).doReturn(mock()) + whenever(wrapped.aggregate(pipeline, BsonDocument::class.java)).doReturn(mock()) + whenever(wrapped.aggregate(clientSession.wrapped, pipeline, BsonDocument::class.java)).doReturn(mock()) + + mongoCollection.aggregate(pipeline) + mongoCollection.aggregate(clientSession, pipeline) + + mongoCollection.aggregate(pipeline, resultClass = Document::class.java) + mongoCollection.aggregate(clientSession, pipeline, Document::class.java) + + mongoCollection.aggregate(pipeline) + mongoCollection.aggregate(clientSession, pipeline) + + verify(wrapped, times(2)).documentClass + verify(wrapped, times(2)).aggregate(pipeline, Document::class.java) + verify(wrapped, times(2)).aggregate(clientSession.wrapped, pipeline, Document::class.java) + verify(wrapped, times(1)).aggregate(pipeline, BsonDocument::class.java) + verify(wrapped, times(1)).aggregate(clientSession.wrapped, pipeline, BsonDocument::class.java) + verifyNoMoreInteractions(wrapped) + } + + @Test + fun shouldCallTheUnderlyingWatch() { + val mongoCollection = MongoCollection(wrapped) + val pipeline = listOf(Document(mapOf("a" to 1))) + + whenever(wrapped.documentClass).doReturn(Document::class.java) + whenever(wrapped.watch(emptyList(), Document::class.java)).doReturn(mock()) + whenever(wrapped.watch(pipeline, Document::class.java)).doReturn(mock()) + whenever(wrapped.watch(clientSession.wrapped, emptyList(), Document::class.java)).doReturn(mock()) + whenever(wrapped.watch(clientSession.wrapped, pipeline, Document::class.java)).doReturn(mock()) + whenever(wrapped.watch(emptyList(), BsonDocument::class.java)).doReturn(mock()) + whenever(wrapped.watch(pipeline, BsonDocument::class.java)).doReturn(mock()) + whenever(wrapped.watch(clientSession.wrapped, emptyList(), BsonDocument::class.java)).doReturn(mock()) + whenever(wrapped.watch(clientSession.wrapped, pipeline, BsonDocument::class.java)).doReturn(mock()) + + mongoCollection.watch() + mongoCollection.watch(pipeline) + mongoCollection.watch(clientSession) + mongoCollection.watch(clientSession, pipeline) + + mongoCollection.watch(resultClass = Document::class.java) + mongoCollection.watch(pipeline, Document::class.java) + mongoCollection.watch(clientSession, resultClass = Document::class.java) + mongoCollection.watch(clientSession, pipeline, Document::class.java) + + mongoCollection.watch() + mongoCollection.watch(pipeline) + mongoCollection.watch(clientSession) + mongoCollection.watch(clientSession, pipeline) + + verify(wrapped, times(4)).documentClass + verify(wrapped, times(2)).watch(emptyList(), Document::class.java) + verify(wrapped, times(2)).watch(pipeline, Document::class.java) + verify(wrapped, times(2)).watch(clientSession.wrapped, emptyList(), Document::class.java) + verify(wrapped, times(2)).watch(clientSession.wrapped, pipeline, Document::class.java) + verify(wrapped, times(1)).watch(emptyList(), BsonDocument::class.java) + verify(wrapped, times(1)).watch(pipeline, BsonDocument::class.java) + verify(wrapped, times(1)).watch(clientSession.wrapped, emptyList(), BsonDocument::class.java) + verify(wrapped, times(1)).watch(clientSession.wrapped, pipeline, BsonDocument::class.java) + verifyNoMoreInteractions(wrapped) + } + + @Suppress("DEPRECATION") + @Test + fun shouldCallTheUnderlyingMapReduce() { + val mongoCollection = MongoCollection(wrapped) + val mapFunction = "mapper" + val reduceFunction = "mapper" + + whenever(wrapped.documentClass).doReturn(Document::class.java) + whenever(wrapped.mapReduce(mapFunction, reduceFunction, Document::class.java)).doReturn(mock()) + whenever(wrapped.mapReduce(clientSession.wrapped, mapFunction, reduceFunction, Document::class.java)) + .doReturn(mock()) + whenever(wrapped.mapReduce(mapFunction, reduceFunction, BsonDocument::class.java)).doReturn(mock()) + whenever(wrapped.mapReduce(clientSession.wrapped, mapFunction, reduceFunction, BsonDocument::class.java)) + .doReturn(mock()) + + mongoCollection.mapReduce(mapFunction, reduceFunction) + mongoCollection.mapReduce(clientSession, mapFunction, reduceFunction) + + mongoCollection.mapReduce(mapFunction, reduceFunction, Document::class.java) + mongoCollection.mapReduce(clientSession, mapFunction, reduceFunction, Document::class.java) + + mongoCollection.mapReduce(mapFunction, reduceFunction) + mongoCollection.mapReduce(clientSession, mapFunction, reduceFunction) + + verify(wrapped, times(2)).documentClass + verify(wrapped, times(2)).mapReduce(mapFunction, reduceFunction, Document::class.java) + verify(wrapped, times(2)).mapReduce(clientSession.wrapped, mapFunction, reduceFunction, Document::class.java) + verify(wrapped, times(1)).mapReduce(mapFunction, reduceFunction, BsonDocument::class.java) + verify(wrapped, times(1)) + .mapReduce(clientSession.wrapped, mapFunction, reduceFunction, BsonDocument::class.java) + verifyNoMoreInteractions(wrapped) + } + + @Test + fun shouldCallTheUnderlyingInsertOne() { + val mongoCollection = MongoCollection(wrapped) + val value = Document("u", 1) + val defaultOptions = InsertOneOptions() + val options = InsertOneOptions().comment("comment") + + whenever(wrapped.insertOne(eq(value), refEq(defaultOptions))).doReturn(Mono.fromCallable { mock() }) + whenever(wrapped.insertOne(eq(value), eq(options))).doReturn(Mono.fromCallable { mock() }) + whenever(wrapped.insertOne(eq(clientSession.wrapped), eq(value), refEq(defaultOptions))) + .doReturn(Mono.fromCallable { mock() }) + whenever(wrapped.insertOne(eq(clientSession.wrapped), eq(value), eq(options))) + .doReturn(Mono.fromCallable { mock() }) + + runBlocking { + mongoCollection.insertOne(value) + mongoCollection.insertOne(value, options) + mongoCollection.insertOne(clientSession, value) + mongoCollection.insertOne(clientSession, value, options) + } + + verify(wrapped).insertOne(eq(value), refEq(defaultOptions)) + verify(wrapped).insertOne(eq(value), eq(options)) + verify(wrapped).insertOne(eq(clientSession.wrapped), eq(value), refEq(defaultOptions)) + verify(wrapped).insertOne(eq(clientSession.wrapped), eq(value), eq(options)) + verifyNoMoreInteractions(wrapped) + } + + @Test + fun shouldCallTheUnderlyingInsertMany() { + val mongoCollection = MongoCollection(wrapped) + val value = listOf(Document("u", 1)) + val defaultOptions = InsertManyOptions() + val options = InsertManyOptions().comment("comment") + + whenever(wrapped.insertMany(eq(value), refEq(defaultOptions))).doReturn(Mono.fromCallable { mock() }) + whenever(wrapped.insertMany(eq(value), eq(options))).doReturn(Mono.fromCallable { mock() }) + whenever(wrapped.insertMany(eq(clientSession.wrapped), eq(value), refEq(defaultOptions))) + .doReturn(Mono.fromCallable { mock() }) + whenever(wrapped.insertMany(eq(clientSession.wrapped), eq(value), eq(options))) + .doReturn(Mono.fromCallable { mock() }) + + runBlocking { + mongoCollection.insertMany(value) + mongoCollection.insertMany(value, options) + mongoCollection.insertMany(clientSession, value) + mongoCollection.insertMany(clientSession, value, options) + } + + verify(wrapped).insertMany(eq(value), refEq(defaultOptions)) + verify(wrapped).insertMany(eq(value), eq(options)) + verify(wrapped).insertMany(eq(clientSession.wrapped), eq(value), refEq(defaultOptions)) + verify(wrapped).insertMany(eq(clientSession.wrapped), eq(value), eq(options)) + verifyNoMoreInteractions(wrapped) + } + + @Test + fun shouldCallTheUnderlyingBulkWrite() { + val mongoCollection = MongoCollection(wrapped) + val value = listOf(InsertOneModel(Document("u", 1))) + val defaultOptions = BulkWriteOptions() + val options = BulkWriteOptions().comment("comment") + + whenever(wrapped.bulkWrite(eq(value), refEq(defaultOptions))).doReturn(Mono.fromCallable { mock() }) + whenever(wrapped.bulkWrite(eq(value), eq(options))).doReturn(Mono.fromCallable { mock() }) + whenever(wrapped.bulkWrite(eq(clientSession.wrapped), eq(value), refEq(defaultOptions))) + .doReturn(Mono.fromCallable { mock() }) + whenever(wrapped.bulkWrite(eq(clientSession.wrapped), eq(value), eq(options))) + .doReturn(Mono.fromCallable { mock() }) + + runBlocking { + mongoCollection.bulkWrite(value) + mongoCollection.bulkWrite(value, options) + mongoCollection.bulkWrite(clientSession, value) + mongoCollection.bulkWrite(clientSession, value, options) + } + + verify(wrapped).bulkWrite(eq(value), refEq(defaultOptions)) + verify(wrapped).bulkWrite(eq(value), eq(options)) + verify(wrapped).bulkWrite(eq(clientSession.wrapped), eq(value), refEq(defaultOptions)) + verify(wrapped).bulkWrite(eq(clientSession.wrapped), eq(value), eq(options)) + verifyNoMoreInteractions(wrapped) + } + + @Test + fun shouldCallTheUnderlyingUpdateOne() { + val mongoCollection = MongoCollection(wrapped) + val update = Document("u", 1) + val updates = listOf(update) + val defaultOptions = UpdateOptions() + val options = UpdateOptions().comment("comment") + + whenever(wrapped.updateOne(eq(filter), eq(update), refEq(defaultOptions))) + .doReturn(Mono.fromCallable { mock() }) + whenever(wrapped.updateOne(eq(filter), eq(update), eq(options))).doReturn(Mono.fromCallable { mock() }) + whenever(wrapped.updateOne(eq(filter), eq(updates), refEq(defaultOptions))) + .doReturn(Mono.fromCallable { mock() }) + whenever(wrapped.updateOne(eq(filter), eq(updates), eq(options))).doReturn(Mono.fromCallable { mock() }) + whenever(wrapped.updateOne(eq(clientSession.wrapped), eq(filter), eq(update), refEq(defaultOptions))) + .doReturn(Mono.fromCallable { mock() }) + whenever(wrapped.updateOne(eq(clientSession.wrapped), eq(filter), eq(update), eq(options))) + .doReturn(Mono.fromCallable { mock() }) + whenever(wrapped.updateOne(eq(clientSession.wrapped), eq(filter), eq(updates), refEq(defaultOptions))) + .doReturn(Mono.fromCallable { mock() }) + whenever(wrapped.updateOne(eq(clientSession.wrapped), eq(filter), eq(updates), eq(options))) + .doReturn(Mono.fromCallable { mock() }) + + runBlocking { + mongoCollection.updateOne(filter, update) + mongoCollection.updateOne(filter, update, options) + mongoCollection.updateOne(filter, updates) + mongoCollection.updateOne(filter, updates, options) + mongoCollection.updateOne(clientSession, filter, update) + mongoCollection.updateOne(clientSession, filter, update, options) + mongoCollection.updateOne(clientSession, filter, updates) + mongoCollection.updateOne(clientSession, filter, updates, options) + } + + verify(wrapped).updateOne(eq(filter), eq(update), refEq(defaultOptions)) + verify(wrapped).updateOne(eq(filter), eq(update), eq(options)) + verify(wrapped).updateOne(eq(filter), eq(updates), refEq(defaultOptions)) + verify(wrapped).updateOne(eq(filter), eq(updates), eq(options)) + verify(wrapped).updateOne(eq(clientSession.wrapped), eq(filter), eq(update), refEq(defaultOptions)) + verify(wrapped).updateOne(eq(clientSession.wrapped), eq(filter), eq(update), eq(options)) + verify(wrapped).updateOne(eq(clientSession.wrapped), eq(filter), eq(updates), refEq(defaultOptions)) + verify(wrapped).updateOne(eq(clientSession.wrapped), eq(filter), eq(updates), eq(options)) + verifyNoMoreInteractions(wrapped) + } + + @Test + fun shouldCallTheUnderlyingUpdateMany() { + val mongoCollection = MongoCollection(wrapped) + val update = Document("u", 1) + val updates = listOf(update) + val defaultOptions = UpdateOptions() + val options = UpdateOptions().comment("comment") + + whenever(wrapped.updateMany(eq(filter), eq(update), refEq(defaultOptions))) + .doReturn(Mono.fromCallable { mock() }) + whenever(wrapped.updateMany(eq(filter), eq(update), eq(options))).doReturn(Mono.fromCallable { mock() }) + whenever(wrapped.updateMany(eq(filter), eq(updates), refEq(defaultOptions))) + .doReturn(Mono.fromCallable { mock() }) + whenever(wrapped.updateMany(eq(filter), eq(updates), eq(options))).doReturn(Mono.fromCallable { mock() }) + whenever(wrapped.updateMany(eq(clientSession.wrapped), eq(filter), eq(update), refEq(defaultOptions))) + .doReturn(Mono.fromCallable { mock() }) + whenever(wrapped.updateMany(eq(clientSession.wrapped), eq(filter), eq(update), eq(options))) + .doReturn(Mono.fromCallable { mock() }) + whenever(wrapped.updateMany(eq(clientSession.wrapped), eq(filter), eq(updates), refEq(defaultOptions))) + .doReturn(Mono.fromCallable { mock() }) + whenever(wrapped.updateMany(eq(clientSession.wrapped), eq(filter), eq(updates), eq(options))) + .doReturn(Mono.fromCallable { mock() }) + + runBlocking { + mongoCollection.updateMany(filter, update) + mongoCollection.updateMany(filter, update, options) + mongoCollection.updateMany(filter, updates) + mongoCollection.updateMany(filter, updates, options) + mongoCollection.updateMany(clientSession, filter, update) + mongoCollection.updateMany(clientSession, filter, update, options) + mongoCollection.updateMany(clientSession, filter, updates) + mongoCollection.updateMany(clientSession, filter, updates, options) + } + + verify(wrapped).updateMany(eq(filter), eq(update), refEq(defaultOptions)) + verify(wrapped).updateMany(eq(filter), eq(update), eq(options)) + verify(wrapped).updateMany(eq(filter), eq(updates), refEq(defaultOptions)) + verify(wrapped).updateMany(eq(filter), eq(updates), eq(options)) + verify(wrapped).updateMany(eq(clientSession.wrapped), eq(filter), eq(update), refEq(defaultOptions)) + verify(wrapped).updateMany(eq(clientSession.wrapped), eq(filter), eq(update), eq(options)) + verify(wrapped).updateMany(eq(clientSession.wrapped), eq(filter), eq(updates), refEq(defaultOptions)) + verify(wrapped).updateMany(eq(clientSession.wrapped), eq(filter), eq(updates), eq(options)) + verifyNoMoreInteractions(wrapped) + } + + @Test + fun shouldCallTheUnderlyingReplaceOne() { + val mongoCollection = MongoCollection(wrapped) + val replacement = Document("u", 1) + val defaultOptions = ReplaceOptions() + val options = ReplaceOptions().comment("comment") + + whenever(wrapped.replaceOne(eq(filter), eq(replacement), refEq(defaultOptions))) + .doReturn(Mono.fromCallable { mock() }) + whenever(wrapped.replaceOne(eq(filter), eq(replacement), eq(options))).doReturn(Mono.fromCallable { mock() }) + whenever(wrapped.replaceOne(eq(clientSession.wrapped), eq(filter), eq(replacement), refEq(defaultOptions))) + .doReturn(Mono.fromCallable { mock() }) + whenever(wrapped.replaceOne(eq(clientSession.wrapped), eq(filter), eq(replacement), eq(options))) + .doReturn(Mono.fromCallable { mock() }) + + runBlocking { + mongoCollection.replaceOne(filter, replacement) + mongoCollection.replaceOne(filter, replacement, options) + mongoCollection.replaceOne(clientSession, filter, replacement) + mongoCollection.replaceOne(clientSession, filter, replacement, options) + } + + verify(wrapped).replaceOne(eq(filter), eq(replacement), refEq(defaultOptions)) + verify(wrapped).replaceOne(eq(filter), eq(replacement), eq(options)) + verify(wrapped).replaceOne(eq(clientSession.wrapped), eq(filter), eq(replacement), refEq(defaultOptions)) + verify(wrapped).replaceOne(eq(clientSession.wrapped), eq(filter), eq(replacement), eq(options)) + verifyNoMoreInteractions(wrapped) + } + + @Test + fun shouldCallTheUnderlyingDeleteOne() { + val mongoCollection = MongoCollection(wrapped) + + val defaultOptions = DeleteOptions() + val options = DeleteOptions().comment("comment") + + whenever(wrapped.deleteOne(eq(filter), refEq(defaultOptions))).doReturn(Mono.fromCallable { mock() }) + whenever(wrapped.deleteOne(eq(filter), eq(options))).doReturn(Mono.fromCallable { mock() }) + whenever(wrapped.deleteOne(eq(clientSession.wrapped), eq(filter), refEq(defaultOptions))) + .doReturn(Mono.fromCallable { mock() }) + whenever(wrapped.deleteOne(eq(clientSession.wrapped), eq(filter), eq(options))) + .doReturn(Mono.fromCallable { mock() }) + + runBlocking { + mongoCollection.deleteOne(filter) + mongoCollection.deleteOne(filter, options) + mongoCollection.deleteOne(clientSession, filter) + mongoCollection.deleteOne(clientSession, filter, options) + } + + verify(wrapped).deleteOne(eq(filter), refEq(defaultOptions)) + verify(wrapped).deleteOne(eq(filter), eq(options)) + verify(wrapped).deleteOne(eq(clientSession.wrapped), eq(filter), refEq(defaultOptions)) + verify(wrapped).deleteOne(eq(clientSession.wrapped), eq(filter), eq(options)) + verifyNoMoreInteractions(wrapped) + } + + @Test + fun shouldCallTheUnderlyingDeleteMany() { + val mongoCollection = MongoCollection(wrapped) + + val defaultOptions = DeleteOptions() + val options = DeleteOptions().comment("comment") + + whenever(wrapped.deleteMany(eq(filter), refEq(defaultOptions))).doReturn(Mono.fromCallable { mock() }) + whenever(wrapped.deleteMany(eq(filter), eq(options))).doReturn(Mono.fromCallable { mock() }) + whenever(wrapped.deleteMany(eq(clientSession.wrapped), eq(filter), refEq(defaultOptions))) + .doReturn(Mono.fromCallable { mock() }) + whenever(wrapped.deleteMany(eq(clientSession.wrapped), eq(filter), eq(options))) + .doReturn(Mono.fromCallable { mock() }) + + runBlocking { + mongoCollection.deleteMany(filter) + mongoCollection.deleteMany(filter, options) + mongoCollection.deleteMany(clientSession, filter) + mongoCollection.deleteMany(clientSession, filter, options) + } + + verify(wrapped).deleteMany(eq(filter), refEq(defaultOptions)) + verify(wrapped).deleteMany(eq(filter), eq(options)) + verify(wrapped).deleteMany(eq(clientSession.wrapped), eq(filter), refEq(defaultOptions)) + verify(wrapped).deleteMany(eq(clientSession.wrapped), eq(filter), eq(options)) + verifyNoMoreInteractions(wrapped) + } + + @Test + fun shouldCallTheUnderlyingFindOneAndDelete() { + val mongoCollection = MongoCollection(wrapped) + + val defaultOptions = FindOneAndDeleteOptions() + val options = FindOneAndDeleteOptions().comment("comment") + + whenever(wrapped.findOneAndDelete(eq(filter), refEq(defaultOptions))).doReturn(Mono.fromCallable { mock() }) + whenever(wrapped.findOneAndDelete(eq(filter), eq(options))).doReturn(Mono.fromCallable { mock() }) + whenever(wrapped.findOneAndDelete(eq(clientSession.wrapped), eq(filter), refEq(defaultOptions))) + .doReturn(Mono.fromCallable { mock() }) + whenever(wrapped.findOneAndDelete(eq(clientSession.wrapped), eq(filter), eq(options))) + .doReturn(Mono.fromCallable { mock() }) + + runBlocking { + mongoCollection.findOneAndDelete(filter) + mongoCollection.findOneAndDelete(filter, options) + mongoCollection.findOneAndDelete(clientSession, filter) + mongoCollection.findOneAndDelete(clientSession, filter, options) + } + + verify(wrapped).findOneAndDelete(eq(filter), refEq(defaultOptions)) + verify(wrapped).findOneAndDelete(eq(filter), eq(options)) + verify(wrapped).findOneAndDelete(eq(clientSession.wrapped), eq(filter), refEq(defaultOptions)) + verify(wrapped).findOneAndDelete(eq(clientSession.wrapped), eq(filter), eq(options)) + verifyNoMoreInteractions(wrapped) + } + + @Test + fun shouldCallTheUnderlyingFindOneAndUpdate() { + val mongoCollection = MongoCollection(wrapped) + val update = Document("u", 1) + val updateList = listOf(update) + val defaultOptions = FindOneAndUpdateOptions() + val options = FindOneAndUpdateOptions().comment("comment") + + whenever(wrapped.findOneAndUpdate(eq(filter), eq(update), refEq(defaultOptions))) + .doReturn(Mono.fromCallable { mock() }) + whenever(wrapped.findOneAndUpdate(eq(filter), eq(update), eq(options))).doReturn(Mono.fromCallable { mock() }) + whenever(wrapped.findOneAndUpdate(eq(filter), eq(updateList), refEq(defaultOptions))) + .doReturn(Mono.fromCallable { mock() }) + whenever(wrapped.findOneAndUpdate(eq(filter), eq(updateList), eq(options))) + .doReturn(Mono.fromCallable { mock() }) + whenever(wrapped.findOneAndUpdate(eq(clientSession.wrapped), eq(filter), eq(update), refEq(defaultOptions))) + .doReturn(Mono.fromCallable { mock() }) + whenever(wrapped.findOneAndUpdate(eq(clientSession.wrapped), eq(filter), eq(update), eq(options))) + .doReturn(Mono.fromCallable { mock() }) + whenever(wrapped.findOneAndUpdate(eq(clientSession.wrapped), eq(filter), eq(updateList), refEq(defaultOptions))) + .doReturn(Mono.fromCallable { mock() }) + whenever(wrapped.findOneAndUpdate(eq(clientSession.wrapped), eq(filter), eq(updateList), eq(options))) + .doReturn(Mono.fromCallable { mock() }) + + runBlocking { + mongoCollection.findOneAndUpdate(filter, update) + mongoCollection.findOneAndUpdate(filter, update, options) + mongoCollection.findOneAndUpdate(filter, updateList) + mongoCollection.findOneAndUpdate(filter, updateList, options) + mongoCollection.findOneAndUpdate(clientSession, filter, update) + mongoCollection.findOneAndUpdate(clientSession, filter, update, options) + mongoCollection.findOneAndUpdate(clientSession, filter, updateList) + mongoCollection.findOneAndUpdate(clientSession, filter, updateList, options) + } + + verify(wrapped).findOneAndUpdate(eq(filter), eq(update), refEq(defaultOptions)) + verify(wrapped).findOneAndUpdate(eq(filter), eq(update), eq(options)) + verify(wrapped).findOneAndUpdate(eq(filter), eq(updateList), refEq(defaultOptions)) + verify(wrapped).findOneAndUpdate(eq(filter), eq(updateList), eq(options)) + verify(wrapped).findOneAndUpdate(eq(clientSession.wrapped), eq(filter), eq(update), refEq(defaultOptions)) + verify(wrapped).findOneAndUpdate(eq(clientSession.wrapped), eq(filter), eq(update), eq(options)) + verify(wrapped).findOneAndUpdate(eq(clientSession.wrapped), eq(filter), eq(updateList), refEq(defaultOptions)) + verify(wrapped).findOneAndUpdate(eq(clientSession.wrapped), eq(filter), eq(updateList), eq(options)) + verifyNoMoreInteractions(wrapped) + } + + @Test + fun shouldCallTheUnderlyingFindOneAndReplace() { + val mongoCollection = MongoCollection(wrapped) + val replacement = Document("u", 1) + val defaultOptions = FindOneAndReplaceOptions() + val options = FindOneAndReplaceOptions().comment("comment") + + whenever(wrapped.findOneAndReplace(eq(filter), eq(replacement), refEq(defaultOptions))) + .doReturn(Mono.fromCallable { mock() }) + whenever(wrapped.findOneAndReplace(eq(filter), eq(replacement), eq(options))) + .doReturn(Mono.fromCallable { mock() }) + whenever( + wrapped.findOneAndReplace( + eq(clientSession.wrapped), eq(filter), eq(replacement), refEq(defaultOptions))) + .doReturn(Mono.fromCallable { mock() }) + whenever(wrapped.findOneAndReplace(eq(clientSession.wrapped), eq(filter), eq(replacement), eq(options))) + .doReturn(Mono.fromCallable { mock() }) + + runBlocking { + mongoCollection.findOneAndReplace(filter, replacement) + mongoCollection.findOneAndReplace(filter, replacement, options) + mongoCollection.findOneAndReplace(clientSession, filter, replacement) + mongoCollection.findOneAndReplace(clientSession, filter, replacement, options) + } + + verify(wrapped).findOneAndReplace(eq(filter), eq(replacement), refEq(defaultOptions)) + verify(wrapped).findOneAndReplace(eq(filter), eq(replacement), eq(options)) + verify(wrapped).findOneAndReplace(eq(clientSession.wrapped), eq(filter), eq(replacement), refEq(defaultOptions)) + verify(wrapped).findOneAndReplace(eq(clientSession.wrapped), eq(filter), eq(replacement), eq(options)) + verifyNoMoreInteractions(wrapped) + } + + @Test + fun shouldCallTheUnderlyingDrop() { + val mongoCollection = MongoCollection(wrapped) + val defaultOptions = DropCollectionOptions() + val options = DropCollectionOptions().encryptedFields(Document()) + + whenever(wrapped.drop(refEq(defaultOptions))).doReturn(Mono.empty()) + whenever(wrapped.drop(options)).doReturn(Mono.empty()) + whenever(wrapped.drop(eq(clientSession.wrapped), refEq(defaultOptions))).doReturn(Mono.empty()) + whenever(wrapped.drop(clientSession.wrapped, options)).doReturn(Mono.empty()) + + runBlocking { + mongoCollection.drop() + mongoCollection.drop(options) + mongoCollection.drop(clientSession) + mongoCollection.drop(clientSession, options) + } + + verify(wrapped).drop(refEq(defaultOptions)) + verify(wrapped).drop(eq(options)) + verify(wrapped).drop(eq(clientSession.wrapped), refEq(defaultOptions)) + verify(wrapped).drop(eq(clientSession.wrapped), eq(options)) + verifyNoMoreInteractions(wrapped) + } + + @Test + fun shouldCallTheUnderlyingCreateIndex() { + val mongoCollection = MongoCollection(wrapped) + val key = Document() + val defaultOptions = IndexOptions() + val options = IndexOptions().name("name") + + whenever(wrapped.createIndex(eq(key), refEq(defaultOptions))).doReturn(Mono.fromCallable { "1" }) + whenever(wrapped.createIndex(eq(key), eq(options))).doReturn(Mono.fromCallable { "2" }) + whenever(wrapped.createIndex(eq(clientSession.wrapped), eq(key), refEq(defaultOptions))) + .doReturn(Mono.fromCallable { "3" }) + whenever(wrapped.createIndex(eq(clientSession.wrapped), eq(key), eq(options))) + .doReturn(Mono.fromCallable { "4" }) + + runBlocking { + assertEquals("1", mongoCollection.createIndex(key)) + assertEquals("2", mongoCollection.createIndex(key, options)) + assertEquals("3", mongoCollection.createIndex(clientSession, key)) + assertEquals("4", mongoCollection.createIndex(clientSession, key, options)) + } + + verify(wrapped).createIndex(eq(key), refEq(defaultOptions)) + verify(wrapped).createIndex(eq(key), eq(options)) + verify(wrapped).createIndex(eq(clientSession.wrapped), eq(key), refEq(defaultOptions)) + verify(wrapped).createIndex(eq(clientSession.wrapped), eq(key), eq(options)) + verifyNoMoreInteractions(wrapped) + } + + @Test + fun shouldCallTheUnderlyingCreateIndexes() { + val mongoCollection = MongoCollection(wrapped) + val indexes = listOf(IndexModel(Document())) + val defaultOptions = CreateIndexOptions() + val options = CreateIndexOptions().commitQuorum(CreateIndexCommitQuorum.MAJORITY) + + whenever(wrapped.createIndexes(eq(indexes), refEq(defaultOptions))).doReturn(Mono.fromCallable { mock() }) + whenever(wrapped.createIndexes(eq(indexes), eq(options))).doReturn(Mono.fromCallable { mock() }) + whenever(wrapped.createIndexes(eq(clientSession.wrapped), eq(indexes), refEq(defaultOptions))) + .doReturn(Mono.fromCallable { mock() }) + whenever(wrapped.createIndexes(eq(clientSession.wrapped), eq(indexes), eq(options))) + .doReturn(Mono.fromCallable { mock() }) + + runBlocking { + mongoCollection.createIndexes(indexes) + mongoCollection.createIndexes(indexes, options) + mongoCollection.createIndexes(clientSession, indexes) + mongoCollection.createIndexes(clientSession, indexes, options) + } + + verify(wrapped).createIndexes(eq(indexes), refEq(defaultOptions)) + verify(wrapped).createIndexes(eq(indexes), eq(options)) + verify(wrapped).createIndexes(eq(clientSession.wrapped), eq(indexes), refEq(defaultOptions)) + verify(wrapped).createIndexes(eq(clientSession.wrapped), eq(indexes), eq(options)) + verifyNoMoreInteractions(wrapped) + } + + @Test + fun shouldCallTheUnderlyingListIndexes() { + val mongoCollection = MongoCollection(wrapped) + + whenever(wrapped.listIndexes(Document::class.java)).doReturn(mock()) + whenever(wrapped.listIndexes(clientSession.wrapped, Document::class.java)).doReturn(mock()) + whenever(wrapped.listIndexes(BsonDocument::class.java)).doReturn(mock()) + whenever(wrapped.listIndexes(clientSession.wrapped, BsonDocument::class.java)).doReturn(mock()) + + mongoCollection.listIndexes() + mongoCollection.listIndexes(clientSession) + + mongoCollection.listIndexes(resultClass = Document::class.java) + mongoCollection.listIndexes(clientSession, Document::class.java) + + mongoCollection.listIndexes() + mongoCollection.listIndexes(clientSession) + + verify(wrapped, times(2)).listIndexes(Document::class.java) + verify(wrapped, times(2)).listIndexes(clientSession.wrapped, Document::class.java) + verify(wrapped, times(1)).listIndexes(BsonDocument::class.java) + verify(wrapped, times(1)).listIndexes(clientSession.wrapped, BsonDocument::class.java) + verifyNoMoreInteractions(wrapped) + } + + @Test + fun shouldCallTheUnderlyingDropIndex() { + val mongoCollection = MongoCollection(wrapped) + val indexName = "index" + val keys = Document() + val defaultOptions = DropIndexOptions() + val options = DropIndexOptions().maxTime(1, TimeUnit.MILLISECONDS) + + whenever(wrapped.dropIndex(eq(indexName), refEq(defaultOptions))).doReturn(Mono.empty()) + whenever(wrapped.dropIndex(eq(indexName), eq(options))).doReturn(Mono.empty()) + whenever(wrapped.dropIndex(eq(keys), refEq(defaultOptions))).doReturn(Mono.empty()) + whenever(wrapped.dropIndex(eq(keys), eq(options))).doReturn(Mono.empty()) + whenever(wrapped.dropIndex(eq(clientSession.wrapped), eq(indexName), refEq(defaultOptions))) + .doReturn(Mono.empty()) + whenever(wrapped.dropIndex(eq(clientSession.wrapped), eq(indexName), eq(options))).doReturn(Mono.empty()) + whenever(wrapped.dropIndex(eq(clientSession.wrapped), eq(keys), refEq(defaultOptions))).doReturn(Mono.empty()) + whenever(wrapped.dropIndex(eq(clientSession.wrapped), eq(keys), eq(options))).doReturn(Mono.empty()) + + runBlocking { + mongoCollection.dropIndex(indexName) + mongoCollection.dropIndex(indexName, options) + mongoCollection.dropIndex(keys) + mongoCollection.dropIndex(keys, options) + mongoCollection.dropIndex(clientSession, indexName) + mongoCollection.dropIndex(clientSession, indexName, options) + mongoCollection.dropIndex(clientSession, keys) + mongoCollection.dropIndex(clientSession, keys, options) + } + + verify(wrapped).dropIndex(eq(indexName), refEq(defaultOptions)) + verify(wrapped).dropIndex(eq(indexName), eq(options)) + verify(wrapped).dropIndex(eq(keys), refEq(defaultOptions)) + verify(wrapped).dropIndex(eq(keys), eq(options)) + verify(wrapped).dropIndex(eq(clientSession.wrapped), eq(indexName), refEq(defaultOptions)) + verify(wrapped).dropIndex(eq(clientSession.wrapped), eq(indexName), eq(options)) + verify(wrapped).dropIndex(eq(clientSession.wrapped), eq(keys), refEq(defaultOptions)) + verify(wrapped).dropIndex(eq(clientSession.wrapped), eq(keys), eq(options)) + verifyNoMoreInteractions(wrapped) + } + + @Test + fun shouldCallTheUnderlyingDropIndexes() { + val mongoCollection = MongoCollection(wrapped) + val defaultOptions = DropIndexOptions() + val options = DropIndexOptions().maxTime(1, TimeUnit.MILLISECONDS) + + whenever(wrapped.dropIndexes(refEq(defaultOptions))).doReturn(Mono.empty()) + whenever(wrapped.dropIndexes(options)).doReturn(Mono.empty()) + whenever(wrapped.dropIndexes(eq(clientSession.wrapped), refEq(defaultOptions))).doReturn(Mono.empty()) + whenever(wrapped.dropIndexes(clientSession.wrapped, options)).doReturn(Mono.empty()) + + runBlocking { + mongoCollection.dropIndexes() + mongoCollection.dropIndexes(options) + mongoCollection.dropIndexes(clientSession) + mongoCollection.dropIndexes(clientSession, options) + } + + verify(wrapped).dropIndexes(refEq(defaultOptions)) + verify(wrapped).dropIndexes(eq(options)) + verify(wrapped).dropIndexes(eq(clientSession.wrapped), refEq(defaultOptions)) + verify(wrapped).dropIndexes(eq(clientSession.wrapped), eq(options)) + verifyNoMoreInteractions(wrapped) + } + + @Test + fun shouldCallTheUnderlyingRenameCollection() { + val mongoCollection = MongoCollection(wrapped) + val mongoNamespace = MongoNamespace("db", "coll") + val defaultOptions = RenameCollectionOptions() + val options = RenameCollectionOptions().dropTarget(true) + + whenever(wrapped.renameCollection(eq(mongoNamespace), refEq(defaultOptions))).doReturn(Mono.empty()) + whenever(wrapped.renameCollection(eq(mongoNamespace), eq(options))).doReturn(Mono.empty()) + whenever(wrapped.renameCollection(eq(clientSession.wrapped), eq(mongoNamespace), refEq(defaultOptions))) + .doReturn(Mono.empty()) + whenever(wrapped.renameCollection(eq(clientSession.wrapped), eq(mongoNamespace), eq(options))) + .doReturn(Mono.empty()) + + runBlocking { + mongoCollection.renameCollection(mongoNamespace) + mongoCollection.renameCollection(mongoNamespace, options) + mongoCollection.renameCollection(clientSession, mongoNamespace) + mongoCollection.renameCollection(clientSession, mongoNamespace, options) + } + + verify(wrapped).renameCollection(eq(mongoNamespace), refEq(defaultOptions)) + verify(wrapped).renameCollection(eq(mongoNamespace), eq(options)) + verify(wrapped).renameCollection(eq(clientSession.wrapped), eq(mongoNamespace), refEq(defaultOptions)) + verify(wrapped).renameCollection(eq(clientSession.wrapped), eq(mongoNamespace), eq(options)) + verifyNoMoreInteractions(wrapped) + } + + @Test + fun shouldProvideExtensionFunctionsForTimeBasedOptions() { + val oneThousand = 1000L + + assertEquals(1, CreateIndexOptions().maxTime(oneThousand).getMaxTime(TimeUnit.SECONDS)) + assertEquals(1, CountOptions().maxTime(oneThousand).getMaxTime(TimeUnit.SECONDS)) + assertEquals(1, DropIndexOptions().maxTime(oneThousand).getMaxTime(TimeUnit.SECONDS)) + assertEquals(1, EstimatedDocumentCountOptions().maxTime(oneThousand).getMaxTime(TimeUnit.SECONDS)) + assertEquals(1, FindOneAndDeleteOptions().maxTime(oneThousand).getMaxTime(TimeUnit.SECONDS)) + assertEquals(1, FindOneAndReplaceOptions().maxTime(oneThousand).getMaxTime(TimeUnit.SECONDS)) + assertEquals(1, FindOneAndUpdateOptions().maxTime(oneThousand).getMaxTime(TimeUnit.SECONDS)) + assertEquals(oneThousand, IndexOptions().expireAfter(oneThousand).getExpireAfter(TimeUnit.SECONDS)) + } +} diff --git a/driver-kotlin-coroutine/src/test/kotlin/com/mongodb/kotlin/client/coroutine/MongoDatabaseTest.kt b/driver-kotlin-coroutine/src/test/kotlin/com/mongodb/kotlin/client/coroutine/MongoDatabaseTest.kt new file mode 100644 index 00000000000..031e2e6d1ef --- /dev/null +++ b/driver-kotlin-coroutine/src/test/kotlin/com/mongodb/kotlin/client/coroutine/MongoDatabaseTest.kt @@ -0,0 +1,417 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.kotlin.client.coroutine + +import com.mongodb.ReadConcern +import com.mongodb.ReadPreference +import com.mongodb.WriteConcern +import com.mongodb.client.model.Collation +import com.mongodb.client.model.CreateCollectionOptions +import com.mongodb.client.model.CreateViewOptions +import com.mongodb.client.model.ValidationAction +import com.mongodb.client.model.ValidationOptions +import com.mongodb.kotlin.client.coroutine.MockitoHelper.deepRefEq +import com.mongodb.reactivestreams.client.MongoDatabase as JMongoDatabase +import java.util.concurrent.TimeUnit +import kotlin.reflect.full.declaredFunctions +import kotlin.reflect.full.declaredMemberProperties +import kotlin.test.assertEquals +import kotlinx.coroutines.runBlocking +import org.bson.BsonDocument +import org.bson.Document +import org.bson.codecs.configuration.CodecRegistry +import org.junit.jupiter.api.Test +import org.mockito.Mock +import org.mockito.kotlin.doReturn +import org.mockito.kotlin.eq +import org.mockito.kotlin.mock +import org.mockito.kotlin.refEq +import org.mockito.kotlin.times +import org.mockito.kotlin.verify +import org.mockito.kotlin.verifyNoMoreInteractions +import org.mockito.kotlin.whenever +import reactor.core.publisher.Mono + +class MongoDatabaseTest { + + @Mock val wrapped: JMongoDatabase = mock() + @Mock val clientSession: ClientSession = ClientSession(mock()) + + @Test + fun shouldHaveTheSameMethods() { + val jMongoDatabaseFunctions = JMongoDatabase::class.declaredFunctions.map { it.name }.toSet() + val kMongoDatabaseFunctions = + MongoDatabase::class + .declaredFunctions + .map { + if (it.name == "timeout") { + "getTimeout" + } else { + it.name + } + } + .toSet() + + MongoDatabase::class + .declaredMemberProperties + .filterNot { it.name == "wrapped" } + .map { "get${it.name.replaceFirstChar{c -> c.uppercaseChar() }}" } + + assertEquals(jMongoDatabaseFunctions, kMongoDatabaseFunctions) + } + + @Test + fun shouldCallTheUnderlyingGetNamespace() { + val mongoDatabase = MongoDatabase(wrapped) + whenever(wrapped.name).doReturn("name") + + mongoDatabase.name + verify(wrapped).name + verifyNoMoreInteractions(wrapped) + } + + @Test + fun shouldCallTheUnderlyingGetCodecRegistry() { + val mongoDatabase = MongoDatabase(wrapped) + whenever(wrapped.codecRegistry).doReturn(mock()) + + mongoDatabase.codecRegistry + verify(wrapped).codecRegistry + verifyNoMoreInteractions(wrapped) + } + + @Test + fun shouldCallTheUnderlyingGetReadPreference() { + val mongoDatabase = MongoDatabase(wrapped) + whenever(wrapped.readPreference).doReturn(mock()) + + mongoDatabase.readPreference + verify(wrapped).readPreference + verifyNoMoreInteractions(wrapped) + } + + @Test + fun shouldCallTheUnderlyingGetReadConcern() { + val mongoDatabase = MongoDatabase(wrapped) + whenever(wrapped.readConcern).doReturn(ReadConcern.DEFAULT) + + mongoDatabase.readConcern + verify(wrapped).readConcern + verifyNoMoreInteractions(wrapped) + } + + @Test + fun shouldCallTheUnderlyingGetWriteConcern() { + val mongoDatabase = MongoDatabase(wrapped) + whenever(wrapped.writeConcern).doReturn(mock()) + + mongoDatabase.writeConcern + verify(wrapped).writeConcern + verifyNoMoreInteractions(wrapped) + } + + @Test + fun shouldCallTheUnderlyingWithCodecRegistry() { + val mongoDatabase = MongoDatabase(wrapped) + val codecRegistry = mock() + whenever(wrapped.withCodecRegistry(codecRegistry)).doReturn(mock()) + + mongoDatabase.withCodecRegistry(codecRegistry) + verify(wrapped).withCodecRegistry(codecRegistry) + verifyNoMoreInteractions(wrapped) + } + + @Test + fun shouldCallTheUnderlyingWithReadPreference() { + val mongoDatabase = MongoDatabase(wrapped) + val readPreference = ReadPreference.primaryPreferred() + whenever(wrapped.withReadPreference(readPreference)).doReturn(mock()) + + mongoDatabase.withReadPreference(readPreference) + verify(wrapped).withReadPreference(readPreference) + verifyNoMoreInteractions(wrapped) + } + + @Test + fun shouldCallTheUnderlyingWithReadConcern() { + val mongoDatabase = MongoDatabase(wrapped) + val readConcern = ReadConcern.AVAILABLE + whenever(wrapped.withReadConcern(readConcern)).doReturn(mock()) + + mongoDatabase.withReadConcern(readConcern) + verify(wrapped).withReadConcern(readConcern) + verifyNoMoreInteractions(wrapped) + } + + @Test + fun shouldCallTheUnderlyingWithWriteConcern() { + val mongoDatabase = MongoDatabase(wrapped) + val writeConcern = WriteConcern.MAJORITY + whenever(wrapped.withWriteConcern(writeConcern)).doReturn(mock()) + + mongoDatabase.withWriteConcern(writeConcern) + verify(wrapped).withWriteConcern(writeConcern) + verifyNoMoreInteractions(wrapped) + } + + @Test + fun shouldCallTheUnderlyingGetCollection() { + val mongoDatabase = MongoDatabase(wrapped) + whenever(wrapped.getCollection("collectionName", Document::class.java)).doReturn(mock()) + + mongoDatabase.getCollection("collectionName") + verify(wrapped).getCollection("collectionName", Document::class.java) + verifyNoMoreInteractions(wrapped) + } + + @Test + fun shouldCallTheUnderlyingRunCommand() { + val mongoDatabase = MongoDatabase(wrapped) + val command = Document(mapOf("a" to 1)) + val primary = ReadPreference.primary() + val primaryPreferred = ReadPreference.primaryPreferred() + + whenever(wrapped.readPreference).doReturn(primary) + whenever(wrapped.runCommand(command, primary, Document::class.java)).doReturn(Mono.fromCallable { Document() }) + whenever(wrapped.runCommand(clientSession.wrapped, command, primary, Document::class.java)) + .doReturn(Mono.fromCallable { Document() }) + whenever(wrapped.runCommand(command, primary, BsonDocument::class.java)) + .doReturn(Mono.fromCallable { BsonDocument() }) + whenever(wrapped.runCommand(clientSession.wrapped, command, primary, BsonDocument::class.java)) + .doReturn(Mono.fromCallable { BsonDocument() }) + whenever(wrapped.runCommand(command, primaryPreferred, BsonDocument::class.java)) + .doReturn(Mono.fromCallable { BsonDocument() }) + whenever(wrapped.runCommand(clientSession.wrapped, command, primaryPreferred, BsonDocument::class.java)) + .doReturn(Mono.fromCallable { BsonDocument() }) + + runBlocking { + mongoDatabase.runCommand(command) + mongoDatabase.runCommand(command, primary) + mongoDatabase.runCommand(command, resultClass = Document::class.java) + mongoDatabase.runCommand(command, primary, Document::class.java) + + mongoDatabase.runCommand(clientSession, command) + mongoDatabase.runCommand(clientSession, command, primary) + mongoDatabase.runCommand(clientSession, command, resultClass = Document::class.java) + mongoDatabase.runCommand(clientSession, command, primary, Document::class.java) + + mongoDatabase.runCommand(command) + mongoDatabase.runCommand(command, primaryPreferred) + mongoDatabase.runCommand(clientSession, command) + mongoDatabase.runCommand(clientSession, command, primaryPreferred) + } + + verify(wrapped, times(6)).readPreference + verify(wrapped, times(4)).runCommand(command, primary, Document::class.java) + verify(wrapped, times(4)).runCommand(clientSession.wrapped, command, primary, Document::class.java) + verify(wrapped, times(1)).runCommand(command, primary, BsonDocument::class.java) + verify(wrapped, times(1)).runCommand(clientSession.wrapped, command, primary, BsonDocument::class.java) + verify(wrapped, times(1)).runCommand(command, primaryPreferred, BsonDocument::class.java) + verify(wrapped, times(1)).runCommand(clientSession.wrapped, command, primaryPreferred, BsonDocument::class.java) + + verifyNoMoreInteractions(wrapped) + } + + @Test + fun shouldCallTheUnderlyingDrop() { + val mongoDatabase = MongoDatabase(wrapped) + + whenever(wrapped.drop()).doReturn(Mono.empty()) + whenever(wrapped.drop(clientSession.wrapped)).doReturn(Mono.empty()) + + runBlocking { + mongoDatabase.drop() + mongoDatabase.drop(clientSession) + } + + verify(wrapped).drop() + verify(wrapped).drop(clientSession.wrapped) + verifyNoMoreInteractions(wrapped) + } + + @Test + fun shouldCallTheUnderlyingListCollectionNames() { + val mongoDatabase = MongoDatabase(wrapped) + whenever(wrapped.listCollectionNames()).doReturn(mock()) + whenever(wrapped.listCollectionNames(clientSession.wrapped)).doReturn(mock()) + + mongoDatabase.listCollectionNames() + mongoDatabase.listCollectionNames(clientSession) + + verify(wrapped).listCollectionNames() + verify(wrapped).listCollectionNames(clientSession.wrapped) + verifyNoMoreInteractions(wrapped) + } + + @Test + fun shouldCallTheUnderlyingListCollections() { + val mongoDatabase = MongoDatabase(wrapped) + whenever(wrapped.listCollections(Document::class.java)).doReturn(mock()) + whenever(wrapped.listCollections(BsonDocument::class.java)).doReturn(mock()) + whenever(wrapped.listCollections(clientSession.wrapped, Document::class.java)).doReturn(mock()) + whenever(wrapped.listCollections(clientSession.wrapped, BsonDocument::class.java)).doReturn(mock()) + + mongoDatabase.listCollections() + mongoDatabase.listCollections(clientSession) + + mongoDatabase.listCollections(resultClass = Document::class.java) + mongoDatabase.listCollections(clientSession, Document::class.java) + + mongoDatabase.listCollections() + mongoDatabase.listCollections(clientSession) + + verify(wrapped, times(2)).listCollections(Document::class.java) + verify(wrapped, times(2)).listCollections(clientSession.wrapped, Document::class.java) + verify(wrapped, times(1)).listCollections(BsonDocument::class.java) + verify(wrapped, times(1)).listCollections(clientSession.wrapped, BsonDocument::class.java) + verifyNoMoreInteractions(wrapped) + } + + @Test + fun shouldCallTheUnderlyingCreateCollection() { + val mongoDatabase = MongoDatabase(wrapped) + val name = "coll" + val name2 = "coll2" + val defaultOptions = CreateCollectionOptions() + val options = + CreateCollectionOptions().validationOptions(ValidationOptions().validationAction(ValidationAction.WARN)) + + whenever(wrapped.createCollection(eq(name), deepRefEq(defaultOptions))).doReturn(Mono.empty()) + whenever(wrapped.createCollection(eq(name2), eq(options))).doReturn(Mono.empty()) + whenever(wrapped.createCollection(eq(clientSession.wrapped), eq(name), deepRefEq(defaultOptions))) + .doReturn(Mono.empty()) + whenever(wrapped.createCollection(eq(clientSession.wrapped), eq(name2), eq(options))).doReturn(Mono.empty()) + + runBlocking { + mongoDatabase.createCollection(name) + mongoDatabase.createCollection(name2, options) + mongoDatabase.createCollection(clientSession, name) + mongoDatabase.createCollection(clientSession, name2, options) + } + + verify(wrapped).createCollection(eq(name), deepRefEq(defaultOptions)) + verify(wrapped).createCollection(eq(name2), eq(options)) + verify(wrapped).createCollection(eq(clientSession.wrapped), eq(name), deepRefEq(defaultOptions)) + verify(wrapped).createCollection(eq(clientSession.wrapped), eq(name2), eq(options)) + verifyNoMoreInteractions(wrapped) + } + + @Test + fun shouldCallTheUnderlyingCreateView() { + val mongoDatabase = MongoDatabase(wrapped) + val viewName = "view" + val viewOn = "coll" + val pipeline = listOf(Document(mapOf("a" to 1))) + val defaultOptions = CreateViewOptions() + val options = CreateViewOptions().collation(Collation.builder().backwards(true).build()) + + whenever(wrapped.createView(eq(viewName), eq(viewOn), eq(pipeline), refEq(defaultOptions))) + .doReturn(Mono.empty()) + whenever(wrapped.createView(eq(viewName), eq(viewOn), eq(pipeline), eq(options))).doReturn(Mono.empty()) + whenever( + wrapped.createView( + eq(clientSession.wrapped), eq(viewName), eq(viewOn), eq(pipeline), refEq(defaultOptions))) + .doReturn(Mono.empty()) + whenever(wrapped.createView(eq(clientSession.wrapped), eq(viewName), eq(viewOn), eq(pipeline), eq(options))) + .doReturn(Mono.empty()) + + runBlocking { + mongoDatabase.createView(viewName, viewOn, pipeline) + mongoDatabase.createView(viewName, viewOn, pipeline, options) + mongoDatabase.createView(clientSession, viewName, viewOn, pipeline) + mongoDatabase.createView(clientSession, viewName, viewOn, pipeline, options) + } + + verify(wrapped).createView(eq(viewName), eq(viewOn), eq(pipeline), refEq(defaultOptions)) + verify(wrapped).createView(eq(viewName), eq(viewOn), eq(pipeline), eq(options)) + verify(wrapped) + .createView(eq(clientSession.wrapped), eq(viewName), eq(viewOn), eq(pipeline), refEq(defaultOptions)) + verify(wrapped).createView(eq(clientSession.wrapped), eq(viewName), eq(viewOn), eq(pipeline), eq(options)) + verifyNoMoreInteractions(wrapped) + } + + @Test + fun shouldCallTheUnderlyingAggregate() { + val mongoDatabase = MongoDatabase(wrapped) + val pipeline = listOf(Document(mapOf("a" to 1))) + + whenever(wrapped.aggregate(pipeline, Document::class.java)).doReturn(mock()) + whenever(wrapped.aggregate(clientSession.wrapped, pipeline, Document::class.java)).doReturn(mock()) + whenever(wrapped.aggregate(pipeline, BsonDocument::class.java)).doReturn(mock()) + whenever(wrapped.aggregate(clientSession.wrapped, pipeline, BsonDocument::class.java)).doReturn(mock()) + + mongoDatabase.aggregate(pipeline) + mongoDatabase.aggregate(clientSession, pipeline) + + mongoDatabase.aggregate(pipeline, resultClass = Document::class.java) + mongoDatabase.aggregate(clientSession, pipeline, Document::class.java) + + mongoDatabase.aggregate(pipeline) + mongoDatabase.aggregate(clientSession, pipeline) + + verify(wrapped, times(2)).aggregate(pipeline, Document::class.java) + verify(wrapped, times(2)).aggregate(clientSession.wrapped, pipeline, Document::class.java) + verify(wrapped, times(1)).aggregate(pipeline, BsonDocument::class.java) + verify(wrapped, times(1)).aggregate(clientSession.wrapped, pipeline, BsonDocument::class.java) + verifyNoMoreInteractions(wrapped) + } + + @Test + fun shouldCallTheUnderlyingWatch() { + val mongoDatabase = MongoDatabase(wrapped) + val pipeline = listOf(Document(mapOf("a" to 1))) + + whenever(wrapped.watch(emptyList(), Document::class.java)).doReturn(mock()) + whenever(wrapped.watch(pipeline, Document::class.java)).doReturn(mock()) + whenever(wrapped.watch(clientSession.wrapped, emptyList(), Document::class.java)).doReturn(mock()) + whenever(wrapped.watch(clientSession.wrapped, pipeline, Document::class.java)).doReturn(mock()) + whenever(wrapped.watch(emptyList(), BsonDocument::class.java)).doReturn(mock()) + whenever(wrapped.watch(pipeline, BsonDocument::class.java)).doReturn(mock()) + whenever(wrapped.watch(clientSession.wrapped, emptyList(), BsonDocument::class.java)).doReturn(mock()) + whenever(wrapped.watch(clientSession.wrapped, pipeline, BsonDocument::class.java)).doReturn(mock()) + + mongoDatabase.watch() + mongoDatabase.watch(pipeline) + mongoDatabase.watch(clientSession) + mongoDatabase.watch(clientSession, pipeline) + + mongoDatabase.watch(resultClass = Document::class.java) + mongoDatabase.watch(pipeline, Document::class.java) + mongoDatabase.watch(clientSession, resultClass = Document::class.java) + mongoDatabase.watch(clientSession, pipeline, Document::class.java) + + mongoDatabase.watch() + mongoDatabase.watch(pipeline) + mongoDatabase.watch(clientSession) + mongoDatabase.watch(clientSession, pipeline) + + verify(wrapped, times(2)).watch(emptyList(), Document::class.java) + verify(wrapped, times(2)).watch(pipeline, Document::class.java) + verify(wrapped, times(2)).watch(clientSession.wrapped, emptyList(), Document::class.java) + verify(wrapped, times(2)).watch(clientSession.wrapped, pipeline, Document::class.java) + verify(wrapped, times(1)).watch(emptyList(), BsonDocument::class.java) + verify(wrapped, times(1)).watch(pipeline, BsonDocument::class.java) + verify(wrapped, times(1)).watch(clientSession.wrapped, emptyList(), BsonDocument::class.java) + verify(wrapped, times(1)).watch(clientSession.wrapped, pipeline, BsonDocument::class.java) + verifyNoMoreInteractions(wrapped) + } + + @Test + fun shouldProvideExtensionFunctionsForTimeBasedOptions() { + val oneThousand = 1000L + + assertEquals(oneThousand, CreateCollectionOptions().expireAfter(oneThousand).getExpireAfter(TimeUnit.SECONDS)) + } +} diff --git a/driver-kotlin-extensions/build.gradle.kts b/driver-kotlin-extensions/build.gradle.kts new file mode 100644 index 00000000000..0bd9405f27e --- /dev/null +++ b/driver-kotlin-extensions/build.gradle.kts @@ -0,0 +1,56 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +import ProjectExtensions.configureJarManifest +import ProjectExtensions.configureMavenPublication +import org.jetbrains.kotlin.gradle.tasks.KotlinCompile + +plugins { id("project.kotlin") } + +base.archivesName.set("mongodb-driver-kotlin-extensions") + +dependencies { + api(project(path = ":driver-core", configuration = "default")) + + // Some extensions require higher API like MongoCollection which are defined in the sync & + // coroutine Kotlin driver + optionalImplementation(project(path = ":driver-kotlin-sync", configuration = "default")) + optionalImplementation(project(path = ":driver-kotlin-coroutine", configuration = "default")) + + testImplementation(platform(libs.kotlinx.serialization)) + testImplementation(libs.kotlinx.serialization.core) +} + +configureMavenPublication { + pom { + name.set("MongoDB Kotlin Driver Extensions") + description.set("The MongoDB Kotlin Driver Extensions") + } +} + +configureJarManifest { + attributes["Automatic-Module-Name"] = "org.mongodb.driver.kotlin.extensions" + attributes["Bundle-SymbolicName"] = "org.mongodb.mongodb-driver-kotlin-extensions" +} + +tasks.withType { + kotlinOptions { + freeCompilerArgs = + listOf( + // Adds OnlyInputTypes support + "-Xallow-kotlin-package", + ) + } +} diff --git a/driver-kotlin-extensions/src/main/kotlin/com/mongodb/kotlin/client/model/Accumulators.kt b/driver-kotlin-extensions/src/main/kotlin/com/mongodb/kotlin/client/model/Accumulators.kt new file mode 100644 index 00000000000..2edbd35341d --- /dev/null +++ b/driver-kotlin-extensions/src/main/kotlin/com/mongodb/kotlin/client/model/Accumulators.kt @@ -0,0 +1,503 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * Copyright (C) 2016/2022 Litote + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @custom-license-header + */ +package com.mongodb.kotlin.client.model + +import com.mongodb.client.model.Accumulators +import com.mongodb.client.model.BsonField +import com.mongodb.client.model.QuantileMethod +import kotlin.reflect.KProperty +import org.bson.conversions.Bson + +/** + * Accumulators extension methods to improve Kotlin interop + * + * @since 5.3 + */ +@Suppress("TooManyFunctions") +public object Accumulators { + /** + * Gets a field name for a $group operation representing the sum of the values of the given expression when applied + * to all members of the group. + * + * @param property the data class property + * @param expression the expression + * @param the expression type + * @return the field @mongodb.driver.manual reference/operator/aggregation/sum/ $sum + */ + public fun sum(property: KProperty<*>, expression: TExpression): BsonField = + Accumulators.sum(property.path(), expression) + + /** + * Gets a field name for a $group operation representing the average of the values of the given expression when + * applied to all members of the group. + * + * @param property the data class property + * @param expression the expression + * @param the expression type + * @return the field @mongodb.driver.manual reference/operator/aggregation/avg/ $avg + */ + public fun avg(property: KProperty<*>, expression: TExpression): BsonField = + Accumulators.avg(property.path(), expression) + + /** + * Returns a combination of a computed field and an accumulator that generates a BSON {@link org.bson.BsonType#ARRAY + * Array} containing computed values from the given {@code inExpression} based on the provided {@code pExpression}, + * which represents an array of percentiles of interest within a group, where each element is a numeric value + * between 0.0 and 1.0 (inclusive). + * + * @param property The data class property computed by the accumulator. + * @param inExpression The input expression. + * @param pExpression The expression representing a percentiles of interest. + * @param method The method to be used for computing the percentiles. + * @param The type of the input expression. + * @param The type of the percentile expression. + * @return The requested {@link BsonField}. @mongodb.driver.manual reference/operator/aggregation/percentile/ + * $percentile @mongodb.server.release 7.0 + */ + public fun percentile( + property: KProperty<*>, + inExpression: InExpression, + pExpression: PExpression, + method: QuantileMethod + ): BsonField = Accumulators.percentile(property.path(), inExpression, pExpression, method) + + /** + * Returns a combination of a computed field and an accumulator that generates a BSON {@link + * org.bson.BsonType#DOUBLE Double } representing the median value computed from the given {@code inExpression} + * within a group. + * + * @param property The data class property computed by the accumulator. + * @param inExpression The input expression. + * @param method The method to be used for computing the median. + * @param The type of the input expression. + * @return The requested {@link BsonField}. @mongodb.driver.manual reference/operator/aggregation/median/ + * $median @mongodb.server.release 7.0 + */ + public fun median( + property: KProperty<*>, + inExpression: InExpression, + method: QuantileMethod + ): BsonField = Accumulators.median(property.path(), inExpression, method) + + /** + * Gets a field name for a $group operation representing the value of the given expression when applied to the first + * member of the group. + * + * @param property The data class property + * @param expression the expression + * @param the expression type + * @return the field @mongodb.driver.manual reference/operator/aggregation/first/ $first + */ + public fun first(property: KProperty<*>, expression: TExpression): BsonField = + Accumulators.first(property.path(), expression) + + /** + * Returns a combination of a computed field and an accumulator that produces a BSON {@link org.bson.BsonType#ARRAY + * Array} of values of the given {@code inExpression} computed for the first {@code N} elements within a presorted + * group, where {@code N} is the positive integral value of the {@code nExpression}. + * + * @param property The data class property computed by the accumulator. + * @param inExpression The input expression. + * @param nExpression The expression limiting the number of produced values. + * @param The type of the input expression. + * @param The type of the limiting expression. + * @return The requested {@link BsonField}. @mongodb.driver.manual reference/operator/aggregation/firstN/ + * $firstN @mongodb.server.release 5.2 + */ + public fun firstN( + property: KProperty<*>, + inExpression: InExpression, + nExpression: NExpression + ): BsonField = Accumulators.firstN(property.path(), inExpression, nExpression) + + /** + * Returns a combination of a computed field and an accumulator that produces a value of the given {@code + * outExpression} computed for the top element within a group sorted according to the provided {@code sortBy} + * specification. + * + * @param property The data class property computed by the accumulator. + * @param sortBy The {@linkplain Sorts sort specification}. The syntax is identical to the one expected by {@link + * Aggregates#sort(Bson)}. + * @param outExpression The output expression. + * @param The type of the output expression. + * @return The requested {@link BsonField}. @mongodb.driver.manual reference/operator/aggregation/top/ + * $top @mongodb.server.release 5.2 + */ + public fun top(property: KProperty<*>, sortBy: Bson, outExpression: OutExpression): BsonField = + Accumulators.top(property.path(), sortBy, outExpression) + + /** + * Returns a combination of a computed field and an accumulator that produces a BSON {@link org.bson.BsonType#ARRAY + * Array} of values of the given {@code outExpression} computed for the top {@code N} elements within a group sorted + * according to the provided {@code sortBy} specification, where {@code N} is the positive integral value of the + * {@code nExpression}. + * + * @param property The data class property computed by the accumulator. + * @param sortBy The {@linkplain Sorts sort specification}. The syntax is identical to the one expected by {@link + * Aggregates#sort(Bson)}. + * @param outExpression The output expression. + * @param nExpression The expression limiting the number of produced values. + * @param The type of the output expression. + * @param The type of the limiting expression. + * @return The requested {@link BsonField}. @mongodb.driver.manual reference/operator/aggregation/topN/ $topN + * @since 4.7 @mongodb.server.release 5.2 + */ + public fun topN( + property: KProperty<*>, + sortBy: Bson, + outExpression: OutExpression, + nExpression: NExpression + ): BsonField = Accumulators.topN(property.path(), sortBy, outExpression, nExpression) + + /** + * Gets a field name for a $group operation representing the value of the given expression when applied to the last + * member of the group. + * + * @param property The data class property + * @param expression the expression + * @param the expression type + * @return the field @mongodb.driver.manual reference/operator/aggregation/last/ $last + */ + public fun last(property: KProperty<*>, expression: TExpression): BsonField = + Accumulators.last(property.path(), expression) + + /** + * Returns a combination of a computed field and an accumulator that produces a BSON {@link org.bson.BsonType#ARRAY + * Array} of values of the given {@code inExpression} computed for the last {@code N} elements within a presorted + * group, where {@code N} is the positive integral value of the {@code nExpression}. + * + * @param property The data class property computed by the accumulator. + * @param inExpression The input expression. + * @param nExpression The expression limiting the number of produced values. + * @param The type of the input expression. + * @param The type of the limiting expression. + * @return The requested {@link BsonField}. @mongodb.driver.manual reference/operator/aggregation/lastN/ + * $lastN @mongodb.server.release 5.2 + */ + public fun lastN( + property: KProperty<*>, + inExpression: InExpression, + nExpression: NExpression + ): BsonField = Accumulators.lastN(property.path(), inExpression, nExpression) + + /** + * Returns a combination of a computed field and an accumulator that produces a value of the given {@code + * outExpression} computed for the bottom element within a group sorted according to the provided {@code sortBy} + * specification. + * + * @param property The data class property computed by the accumulator. + * @param sortBy The {@linkplain Sorts sort specification}. The syntax is identical to the one expected by {@link + * Aggregates#sort(Bson)}. + * @param outExpression The output expression. + * @param The type of the output expression. + * @return The requested {@link BsonField}. @mongodb.driver.manual reference/operator/aggregation/bottom/ + * $bottom @mongodb.server.release 5.2 + */ + public fun bottom(property: KProperty<*>, sortBy: Bson, outExpression: OutExpression): BsonField = + Accumulators.bottom(property.path(), sortBy, outExpression) + + /** + * Returns a combination of a computed field and an accumulator that produces a BSON {@link org.bson.BsonType#ARRAY + * Array} of values of the given {@code outExpression} computed for the bottom {@code N} elements within a group + * sorted according to the provided {@code sortBy} specification, where {@code N} is the positive integral value of + * the {@code nExpression}. + * + * @param property The data class property computed by the accumulator. + * @param sortBy The {@linkplain Sorts sort specification}. The syntax is identical to the one expected by {@link + * Aggregates#sort(Bson)}. + * @param outExpression The output expression. + * @param nExpression The expression limiting the number of produced values. + * @param The type of the output expression. + * @param The type of the limiting expression. + * @return The requested {@link BsonField}. @mongodb.driver.manual reference/operator/aggregation/bottomN/ $bottomN + * @since 4.7 @mongodb.server.release 5.2 + */ + public fun bottomN( + property: KProperty<*>, + sortBy: Bson, + outExpression: OutExpression, + nExpression: NExpression + ): BsonField = Accumulators.bottomN(property.path(), sortBy, outExpression, nExpression) + + /** + * Gets a field name for a $group operation representing the maximum of the values of the given expression when + * applied to all members of the group. + * + * @param property The data class property + * @param expression the expression + * @param the expression type + * @return the field @mongodb.driver.manual reference/operator/aggregation/max/ $max + */ + public fun max(property: KProperty<*>, expression: TExpression): BsonField = + Accumulators.max(property.path(), expression) + + /** + * Returns a combination of a computed field and an accumulator that produces a BSON {@link org.bson.BsonType#ARRAY + * Array} of {@code N} largest values of the given {@code inExpression}, where {@code N} is the positive integral + * value of the {@code nExpression}. + * + * @param property The data class property computed by the accumulator. + * @param inExpression The input expression. + * @param nExpression The expression limiting the number of produced values. + * @param The type of the input expression. + * @param The type of the limiting expression. + * @return The requested {@link BsonField}. @mongodb.driver.manual reference/operator/aggregation/maxN/ $maxN + * @since 4.7 @mongodb.server.release 5.2 + */ + public fun maxN( + property: KProperty<*>, + inExpression: InExpression, + nExpression: NExpression + ): BsonField = Accumulators.maxN(property.path(), inExpression, nExpression) + + /** + * Gets a field name for a $group operation representing the minimum of the values of the given expression when + * applied to all members of the group. + * + * @param property The data class property + * @param expression the expression + * @param the expression type + * @return the field @mongodb.driver.manual reference/operator/aggregation/min/ $min + */ + public fun min(property: KProperty<*>, expression: TExpression): BsonField = + Accumulators.min(property.path(), expression) + + /** + * Returns a combination of a computed field and an accumulator that produces a BSON {@link org.bson.BsonType#ARRAY + * Array} of {@code N} smallest values of the given {@code inExpression}, where {@code N} is the positive integral + * value of the {@code nExpression}. + * + * @param property The data class property computed by the accumulator. + * @param inExpression The input expression. + * @param nExpression The expression limiting the number of produced values. + * @param The type of the input expression. + * @param The type of the limiting expression. + * @return The requested {@link BsonField}. @mongodb.driver.manual reference/operator/aggregation/minN/ + * $minN @mongodb.server.release 5.2 + */ + public fun minN( + property: KProperty<*>, + inExpression: InExpression, + nExpression: NExpression + ): BsonField = Accumulators.minN(property.path(), inExpression, nExpression) + + /** + * Gets a field name for a $group operation representing an array of all values that results from applying an + * expression to each document in a group of documents that share the same group by key. + * + * @param property The data class property + * @param expression the expression + * @param the expression type + * @return the field @mongodb.driver.manual reference/operator/aggregation/push/ $push + */ + public fun push(property: KProperty<*>, expression: TExpression): BsonField = + Accumulators.push(property.path(), expression) + + /** + * Gets a field name for a $group operation representing all unique values that results from applying the given + * expression to each document in a group of documents that share the same group by key. + * + * @param property The data class property + * @param expression the expression + * @param the expression type + * @return the field @mongodb.driver.manual reference/operator/aggregation/addToSet/ $addToSet + */ + public fun addToSet(property: KProperty<*>, expression: TExpression): BsonField = + Accumulators.addToSet(property.path(), expression) + + /** + * Gets a field name for a $group operation representing the result of merging the fields of the documents. If + * documents to merge include the same field name, the field, in the resulting document, has the value from the last + * document merged for the field. + * + * @param property The data class property + * @param expression the expression + * @param the expression type + * @return the field @mongodb.driver.manual reference/operator/aggregation/mergeObjects/ $mergeObjects + */ + public fun mergeObjects(property: KProperty<*>, expression: TExpression): BsonField = + Accumulators.mergeObjects(property.path(), expression) + + /** + * Gets a field name for a $group operation representing the sample standard deviation of the values of the given + * expression when applied to all members of the group. + * + *

Use if the values encompass the entire population of data you want to represent and do not wish to generalize + * about a larger population.

+ * + * @param property The data class property + * @param expression the expression + * @param the expression type + * @return the field @mongodb.driver.manual reference/operator/aggregation/stdDevPop/ + * $stdDevPop @mongodb.server.release 3.2 + */ + public fun stdDevPop(property: KProperty<*>, expression: TExpression): BsonField = + Accumulators.stdDevPop(property.path(), expression) + + /** + * Gets a field name for a $group operation representing the sample standard deviation of the values of the given + * expression when applied to all members of the group. + * + *

Use if the values encompass a sample of a population of data from which to generalize about the + * population.

+ * + * @param property the data class property + * @param expression the expression + * @param the expression type + * @return the field @mongodb.driver.manual reference/operator/aggregation/stdDevSamp/ + * $stdDevSamp @mongodb.server.release 3.2 + */ + public fun stdDevSamp(property: KProperty<*>, expression: TExpression): BsonField = + Accumulators.stdDevSamp(property.path(), expression) + + /** + * Creates an $accumulator pipeline stage + * + * @param property the data class property + * @param initFunction a function used to initialize the state + * @param accumulateFunction a function used to accumulate documents + * @param mergeFunction a function used to merge two internal states, e.g. accumulated on different shards or + * threads. It returns the resulting state of the accumulator. + * @return the $accumulator pipeline stage @mongodb.driver.manual reference/operator/aggregation/accumulator/ + * $accumulator @mongodb.server.release 4.4 + */ + public fun accumulator( + property: KProperty, + initFunction: String, + accumulateFunction: String, + mergeFunction: String + ): BsonField = Accumulators.accumulator(property.path(), initFunction, accumulateFunction, mergeFunction) + + /** + * Creates an $accumulator pipeline stage + * + * @param property the data class property + * @param initFunction a function used to initialize the state + * @param accumulateFunction a function used to accumulate documents + * @param mergeFunction a function used to merge two internal states, e.g. accumulated on different shards or + * threads. It returns the resulting state of the accumulator. + * @param finalizeFunction a function used to finalize the state and return the result (may be null) + * @return the $accumulator pipeline stage @mongodb.driver.manual reference/operator/aggregation/accumulator/ + * $accumulator @mongodb.server.release 4.4 + */ + public fun accumulator( + property: KProperty, + initFunction: String, + accumulateFunction: String, + mergeFunction: String, + finalizeFunction: String? + ): BsonField = + Accumulators.accumulator(property.path(), initFunction, accumulateFunction, mergeFunction, finalizeFunction) + + /** + * Creates an $accumulator pipeline stage + * + * @param property the data class property + * @param initFunction a function used to initialize the state + * @param initArgs init function’s arguments (may be null) + * @param accumulateFunction a function used to accumulate documents + * @param accumulateArgs additional accumulate function’s arguments (may be null). The first argument to the + * function is ‘state’. + * @param mergeFunction a function used to merge two internal states, e.g. accumulated on different shards or + * threads. It returns the resulting state of the accumulator. + * @param finalizeFunction a function used to finalize the state and return the result (may be null) + * @return the $accumulator pipeline stage @mongodb.driver.manual reference/operator/aggregation/accumulator/ + * $accumulator @mongodb.server.release 4.4 + */ + @Suppress("LongParameterList") + public fun accumulator( + property: KProperty, + initFunction: String, + initArgs: List?, + accumulateFunction: String, + accumulateArgs: List?, + mergeFunction: String, + finalizeFunction: String? + ): BsonField = + Accumulators.accumulator( + property.path(), + initFunction, + initArgs, + accumulateFunction, + accumulateArgs, + mergeFunction, + finalizeFunction) + + /** + * Creates an $accumulator pipeline stage + * + * @param property the data class property + * @param initFunction a function used to initialize the state + * @param accumulateFunction a function used to accumulate documents + * @param mergeFunction a function used to merge two internal states, e.g. accumulated on different shards or + * threads. It returns the resulting state of the accumulator. + * @param finalizeFunction a function used to finalize the state and return the result (may be null) + * @param lang a language specifier + * @return the $accumulator pipeline stage @mongodb.driver.manual reference/operator/aggregation/accumulator/ + * $accumulator @mongodb.server.release 4.4 + */ + @Suppress("LongParameterList") + public fun accumulator( + property: KProperty, + initFunction: String, + accumulateFunction: String, + mergeFunction: String, + finalizeFunction: String?, + lang: String + ): BsonField = + Accumulators.accumulator( + property.path(), initFunction, accumulateFunction, mergeFunction, finalizeFunction, lang) + + /** + * Creates an $accumulator pipeline stage + * + * @param property The data class property. + * @param initFunction a function used to initialize the state + * @param initArgs init function’s arguments (may be null) + * @param accumulateFunction a function used to accumulate documents + * @param accumulateArgs additional accumulate function’s arguments (may be null). The first argument to the + * function is ‘state’. + * @param mergeFunction a function used to merge two internal states, e.g. accumulated on different shards or + * threads. It returns the resulting state of the accumulator. + * @param finalizeFunction a function used to finalize the state and return the result (may be null) + * @param lang a language specifier + * @return the $accumulator pipeline stage @mongodb.driver.manual reference/operator/aggregation/accumulator/ + * $accumulator @mongodb.server.release 4.4 + */ + @Suppress("LongParameterList") + public fun accumulator( + property: KProperty, + initFunction: String, + initArgs: List?, + accumulateFunction: String, + accumulateArgs: List?, + mergeFunction: String, + finalizeFunction: String?, + lang: String + ): BsonField = + Accumulators.accumulator( + property.path(), + initFunction, + initArgs, + accumulateFunction, + accumulateArgs, + mergeFunction, + finalizeFunction, + lang) +} diff --git a/driver-kotlin-extensions/src/main/kotlin/com/mongodb/kotlin/client/model/Aggregates.kt b/driver-kotlin-extensions/src/main/kotlin/com/mongodb/kotlin/client/model/Aggregates.kt new file mode 100644 index 00000000000..395ce98d88d --- /dev/null +++ b/driver-kotlin-extensions/src/main/kotlin/com/mongodb/kotlin/client/model/Aggregates.kt @@ -0,0 +1,244 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * Copyright (C) 2016/2022 Litote + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @custom-license-header + */ +package com.mongodb.kotlin.client.model + +import com.mongodb.client.model.Aggregates +import com.mongodb.client.model.GraphLookupOptions +import com.mongodb.client.model.MergeOptions +import com.mongodb.client.model.UnwindOptions +import com.mongodb.client.model.densify.DensifyOptions +import com.mongodb.client.model.densify.DensifyRange +import com.mongodb.kotlin.client.model.Projections.projection +import kotlin.reflect.KProperty +import kotlin.reflect.KProperty1 +import org.bson.conversions.Bson + +/** + * Aggregates extension methods to improve Kotlin interop + * + * @since 5.3 + */ +public object Aggregates { + /** + * Creates a $count pipeline stage using the named field to store the result + * + * @param property the data class field in which to store the count + * @return the $count pipeline stage @mongodb.driver.manual reference/operator/aggregation/count/ $count + */ + public fun count(property: KProperty): Bson = Aggregates.count(property.path()) + + /** + * Creates a $lookup pipeline stage, joining the current collection with the one specified in from using the given + * pipeline. If the first stage in the pipeline is a {@link Aggregates#documents(List) $documents} stage, then the + * {@code from} collection is ignored. + * + * @param from the collection in the same database to perform the join with. + * @param localField the data class field from the local collection to match values against. + * @param foreignField the data class field in the from collection to match values against. + * @param pipeline the pipeline to run on the joined collection. + * @param as the name of the new array field to add to the input documents. + * @return the $lookup pipeline stage @mongodb.driver.manual reference/operator/aggregation/lookup/ + * $lookup @mongodb.server.release 3.6 + */ + public fun lookup( + from: com.mongodb.kotlin.client.MongoCollection, + localField: KProperty1, + foreignField: KProperty1, + newAs: String + ): Bson = Aggregates.lookup(from.namespace.collectionName, localField.path(), foreignField.path(), newAs) + + /** + * Creates a $lookup pipeline stage, joining the current collection with the one specified in from using the given + * pipeline. If the first stage in the pipeline is a {@link Aggregates#documents(List) $documents} stage, then the + * {@code from} collection is ignored. + * + * @param from the collection in the same database to perform the join with. + * @param localField the data class field from the local collection to match values against. + * @param foreignField the data class field in the from collection to match values against. + * @param pipeline the pipeline to run on the joined collection. + * @param as the name of the new array field to add to the input documents. + * @return the $lookup pipeline stage @mongodb.driver.manual reference/operator/aggregation/lookup/ + * $lookup @mongodb.server.release 3.6 + */ + public fun lookup( + from: com.mongodb.kotlin.client.coroutine.MongoCollection, + localField: KProperty1, + foreignField: KProperty1, + newAs: String + ): Bson = Aggregates.lookup(from.namespace.collectionName, localField.path(), foreignField.path(), newAs) + + /** + * Creates a graphLookup pipeline stage for the specified filter + * + * @param the expression type + * @param from the collection to query + * @param startWith the expression to start the graph lookup with + * @param connectFromField the data class from field + * @param connectToField the data class to field + * @param fieldAs name of field in output document + * @param options optional values for the graphLookup + * @return the $graphLookup pipeline stage @mongodb.driver.manual reference/operator/aggregation/graphLookup/ + * $graphLookup @mongodb.server.release 3.4 + */ + @Suppress("LongParameterList") + public fun graphLookup( + from: com.mongodb.kotlin.client.MongoCollection, + startWith: TExpression, + connectFromField: KProperty1, + connectToField: KProperty1, + fieldAs: String, + options: GraphLookupOptions = GraphLookupOptions() + ): Bson = + Aggregates.graphLookup( + from.namespace.collectionName, startWith, connectFromField.path(), connectToField.path(), fieldAs, options) + + /** + * Creates a graphLookup pipeline stage for the specified filter + * + * @param the expression type + * @param from the collection to query + * @param startWith the expression to start the graph lookup with + * @param connectFromField the data class from field + * @param connectToField the data class to field + * @param fieldAs name of field in output document + * @param options optional values for the graphLookup + * @return the $graphLookup pipeline stage @mongodb.driver.manual reference/operator/aggregation/graphLookup/ + * $graphLookup @mongodb.server.release 3.4 + */ + @Suppress("LongParameterList") + public fun graphLookup( + from: com.mongodb.kotlin.client.coroutine.MongoCollection, + startWith: TExpression, + connectFromField: KProperty1, + connectToField: KProperty1, + fieldAs: String, + options: GraphLookupOptions = GraphLookupOptions() + ): Bson = + Aggregates.graphLookup( + from.namespace.collectionName, startWith, connectFromField.path(), connectToField.path(), fieldAs, options) + + /** + * Creates a $unionWith pipeline stage. + * + * @param collection the collection in the same database to perform the union with. + * @param pipeline the pipeline to run on the union. + * @return the $unionWith pipeline stage @mongodb.driver.manual reference/operator/aggregation/unionWith/ + * $unionWith @mongodb.server.release 4.4 + */ + public fun unionWith(collection: com.mongodb.kotlin.client.MongoCollection<*>, pipeline: List): Bson = + Aggregates.unionWith(collection.namespace.collectionName, pipeline) + + /** + * Creates a $unionWith pipeline stage. + * + * @param collection the collection in the same database to perform the union with. + * @param pipeline the pipeline to run on the union. + * @return the $unionWith pipeline stage @mongodb.driver.manual reference/operator/aggregation/unionWith/ + * $unionWith @mongodb.server.release 4.4 + */ + public fun unionWith( + collection: com.mongodb.kotlin.client.coroutine.MongoCollection<*>, + pipeline: List + ): Bson = Aggregates.unionWith(collection.namespace.collectionName, pipeline) + + /** + * Creates a $unwind pipeline stage for the specified field name, which must be prefixed by a {@code '$'} sign. + * + * @param property the data class field name + * @param unwindOptions options for the unwind pipeline stage + * @return the $unwind pipeline stage @mongodb.driver.manual reference/operator/aggregation/unwind/ $unwind + */ + public fun unwind(property: KProperty?>, unwindOptions: UnwindOptions = UnwindOptions()): Bson { + return if (unwindOptions == UnwindOptions()) { + Aggregates.unwind(property.projection) + } else { + Aggregates.unwind(property.projection, unwindOptions) + } + } + + /** + * Creates a $out pipeline stage that writes into the specified collection + * + * @param collection the collection + * @return the $out pipeline stage @mongodb.driver.manual reference/operator/aggregation/out/ $out + */ + public fun out(collection: com.mongodb.kotlin.client.MongoCollection<*>): Bson = + Aggregates.out(collection.namespace.collectionName) + + /** + * Creates a $out pipeline stage that writes into the specified collection + * + * @param collection the collection + * @return the $out pipeline stage @mongodb.driver.manual reference/operator/aggregation/out/ $out + */ + public fun out(collection: com.mongodb.kotlin.client.coroutine.MongoCollection<*>): Bson = + Aggregates.out(collection.namespace.collectionName) + + /** + * Creates a $merge pipeline stage that merges into the specified collection + * + * @param collection the collection to merge into + * @param options the merge options + * @return the $merge pipeline stage @mongodb.driver.manual reference/operator/aggregation/merge/ + * $merge @mongodb.server.release 4.2 + */ + public fun merge( + collection: com.mongodb.kotlin.client.MongoCollection<*>, + options: MergeOptions = MergeOptions() + ): Bson = Aggregates.merge(collection.namespace.collectionName, options) + + /** + * Creates a $merge pipeline stage that merges into the specified collection + * + * @param collection the collection to merge into + * @param options the merge options + * @return the $merge pipeline stage @mongodb.driver.manual reference/operator/aggregation/merge/ + * $merge @mongodb.server.release 4.2 + */ + public fun merge( + collection: com.mongodb.kotlin.client.coroutine.MongoCollection<*>, + options: MergeOptions = MergeOptions() + ): Bson = Aggregates.merge(collection.namespace.collectionName, options) + + /** + * Creates a `$densify` pipeline stage, which adds documents to a sequence of documents where certain values in the + * `field` are missing. + * + * @param field The field to densify. + * @param range The range. + * @return The requested pipeline stage. @mongodb.driver.manual reference/operator/aggregation/densify/ + * $densify @mongodb.driver.manual core/document/#dot-notation Dot notation @mongodb.server.release 5.1 + */ + public fun densify(property: KProperty, range: DensifyRange): Bson = + Aggregates.densify(property.path(), range) + + /** + * Creates a {@code $densify} pipeline stage, which adds documents to a sequence of documents where certain values + * in the {@code field} are missing. + * + * @param field The field to densify. + * @param range The range. + * @param options The densify options. Specifying {@link DensifyOptions#densifyOptions()} is equivalent to calling + * {@link #densify(String, DensifyRange)}. + * @return The requested pipeline stage. @mongodb.driver.manual reference/operator/aggregation/densify/ + * $densify @mongodb.driver.manual core/document/#dot-notation Dot notation @mongodb.server.release 5.1 + */ + public fun densify(property: KProperty, range: DensifyRange, options: DensifyOptions): Bson = + Aggregates.densify(property.path(), range, options) +} diff --git a/driver-kotlin-extensions/src/main/kotlin/com/mongodb/kotlin/client/model/Filters.kt b/driver-kotlin-extensions/src/main/kotlin/com/mongodb/kotlin/client/model/Filters.kt new file mode 100644 index 00000000000..3faef6a8458 --- /dev/null +++ b/driver-kotlin-extensions/src/main/kotlin/com/mongodb/kotlin/client/model/Filters.kt @@ -0,0 +1,1219 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * Copyright (C) 2016/2022 Litote + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @custom-license-header + */ +@file:Suppress("TooManyFunctions") + +package com.mongodb.kotlin.client.model + +import com.mongodb.client.model.Filters +import com.mongodb.client.model.TextSearchOptions +import com.mongodb.client.model.geojson.Geometry +import com.mongodb.client.model.geojson.Point +import java.util.regex.Pattern +import kotlin.internal.OnlyInputTypes +import kotlin.reflect.KProperty +import org.bson.BsonType +import org.bson.conversions.Bson + +/** + * Filters extension methods to improve Kotlin interop + * + * @since 5.3 + */ +public object Filters { + + /** + * Creates a filter that matches all documents where the value of the property equals the specified value. Note that + * this doesn't actually generate an $eq operator, as the query language doesn't require it. + * + * @param value the value, which may be null + * @param the value type + * @return the filter + */ + @JvmSynthetic + @JvmName("eqExt") + public infix fun <@OnlyInputTypes T> KProperty.eq(value: T?): Bson = Filters.eq(path(), value) + + /** + * Creates a filter that matches all documents where the value of the property equals the specified value. Note that + * this doesn't actually generate an $eq operator, as the query language doesn't require it. + * + * @param property the data class property + * @param value the value, which may be null + * @param the value type + * @return the filter + */ + public fun <@OnlyInputTypes T> eq(property: KProperty, value: T?): Bson = property.eq(value) + + /** + * Creates a filter that matches all documents where the value of the property does not equal the specified value. + * + * @param value the value + * @param the value type + * @return the filter + */ + @JvmSynthetic + @JvmName("neExt") + public infix fun <@OnlyInputTypes T> KProperty.ne(value: T?): Bson = Filters.ne(path(), value) + + /** + * Creates a filter that matches all documents where the value of the property does not equal the specified value. + * + * @param property the data class property + * @param value the value + * @param the value type + * @return the filter + */ + public fun <@OnlyInputTypes T> ne(property: KProperty, value: T?): Bson = property.ne(value) + + /** + * Creates a filter that matches all documents where the value of the given property is less than the specified + * value. + * + * @param value the value + * @param the value type + * @return the filter + */ + @JvmSynthetic + @JvmName("ltExt") + public infix fun <@OnlyInputTypes T> KProperty.lt(value: T): Bson = Filters.lt(path(), value) + + /** + * Creates a filter that matches all documents where the value of the given property is less than the specified + * value. + * + * @param property the data class property + * @param value the value + * @param the value type + * @return the filter + */ + public fun <@OnlyInputTypes T> lt(property: KProperty, value: T): Bson = property.lt(value) + + /** + * Creates a filter that matches all documents where the value of the given property is less than or equal to the + * specified value. + * + * @param value the value + * @param the value type + * @return the filter + */ + @JvmSynthetic + @JvmName("lteExt") + public infix fun <@OnlyInputTypes T> KProperty.lte(value: T): Bson = Filters.lte(path(), value) + + /** + * Creates a filter that matches all documents where the value of the given property is less than or equal to the + * specified value. + * + * @param property the data class property + * @param value the value + * @param the value type + * @return the filter + */ + public fun <@OnlyInputTypes T> lte(property: KProperty, value: T): Bson = property.lte(value) + + /** + * Creates a filter that matches all documents where the value of the given property is greater than the specified + * value. + * + * @param value the value + * @param the value type + * @return the filter + */ + @JvmSynthetic + @JvmName("gtExt") + public infix fun <@OnlyInputTypes T> KProperty.gt(value: T): Bson = Filters.gt(path(), value) + + /** + * Creates a filter that matches all documents where the value of the given property is greater than the specified + * value. + * + * @param property the data class property + * @param value the value + * @param the value type + * @return the filter + */ + public fun <@OnlyInputTypes T> gt(property: KProperty, value: T): Bson = property.gt(value) + + /** + * Creates a filter that matches all documents where the value of the given property is greater than or equal to the + * specified value. + * + * @param value the value + * @param the value type + * @return the filter + */ + @JvmSynthetic + @JvmName("gteExt") + public infix fun <@OnlyInputTypes T> KProperty.gte(value: T): Bson = Filters.gte(path(), value) + + /** + * Creates a filter that matches all documents where the value of the given property is greater than or equal to the + * specified value. + * + * @param property the data class property + * @param value the value + * @param the value type + * @return the filter + */ + public fun <@OnlyInputTypes T> gte(property: KProperty, value: T): Bson = property.gte(value) + + /** + * Creates a filter that matches all documents where the value of a property equals any value in the list of + * specified values. + * + * @param values the list of values + * @param the value type + * @return the filter + */ + @Suppress("FunctionNaming") + @JvmSynthetic + @JvmName("inExt") + public infix fun <@OnlyInputTypes T> KProperty.`in`(values: Iterable): Bson = Filters.`in`(path(), values) + + /** + * Creates a filter that matches all documents where the value of a property equals any value in the list of + * specified values. + * + * @param property the data class property + * @param values the list of values + * @param the value type + * @return the filter + */ + @Suppress("FunctionNaming") + public fun <@OnlyInputTypes T> `in`(property: KProperty, values: Iterable): Bson = property.`in`(values) + + /** + * Creates a filter that matches all documents where the value of a property equals any value in the list of + * specified values. + * + * @param values the list of values + * @param the value type + * @return the filter + */ + @Suppress("FunctionNaming") + @JvmSynthetic + @JvmName("inIterableExt") + public infix fun <@OnlyInputTypes T> KProperty?>.`in`(values: Iterable): Bson = + Filters.`in`(path(), values) + + /** + * Creates a filter that matches all documents where the value of a property equals any value in the list of + * specified values. + * + * @param property the data class property + * @param values the list of values + * @param the value type + * @return the filter + */ + @Suppress("FunctionNaming") + @JvmSynthetic + @JvmName("inIterable") + public fun <@OnlyInputTypes T> `in`(property: KProperty?>, values: Iterable): Bson = + property.`in`(values) + + /** + * Creates a filter that matches all documents where the value of a property does not equal any of the specified + * values or does not exist. + * + * @param values the list of values + * @param the value type + * @return the filter + */ + @JvmSynthetic + @JvmName("ninExt") + public infix fun <@OnlyInputTypes T> KProperty.nin(values: Iterable): Bson = Filters.nin(path(), values) + + /** + * Creates a filter that matches all documents where the value of a property does not equal any of the specified + * values or does not exist. + * + * @param property the data class property + * @param values the list of values + * @param the value type + * @return the filter + */ + public fun <@OnlyInputTypes T> nin(property: KProperty, values: Iterable): Bson = property.nin(values) + + /** + * Creates a filter that matches all documents where the value of a property does not equal any of the specified + * values or does not exist. + * + * @param values the list of values + * @param the value type + * @return the filter + */ + @JvmSynthetic + @JvmName("ninIterableExt") + public infix fun <@OnlyInputTypes T> KProperty?>.nin(values: Iterable): Bson = + Filters.nin(path(), values) + + /** + * Creates a filter that matches all documents where the value of a property does not equal any of the specified + * values or does not exist. + * + * @param property the data class property + * @param values the list of values + * @param the value type + * @return the filter + */ + @JvmSynthetic + @JvmName("ninIterable") + public fun <@OnlyInputTypes T> nin(property: KProperty?>, values: Iterable): Bson = + property.nin(values) + + /** + * Creates a filter that performs a logical AND of the provided list of filters. Note that this will only generate + * an "$and" operator if absolutely necessary, as the query language implicitly ands together all the keys. In other + * words, a query expression like: + * ```and(eq("x", 1), lt("y", 3))``` + * + * will generate a MongoDB query like: `{x : 1, y : {$lt : 3}}`` + * + * @param filters the list of filters to and together + * @return the filter + */ + public fun and(filters: Iterable): Bson = Filters.and(filters) + + /** + * Creates a filter that performs a logical AND of the provided list of filters. Note that this will only generate + * an "$and" operator if absolutely necessary, as the query language implicitly ands together all the keys. In other + * words, a query expression like: + * ```and(eq("x", 1), lt("y", 3))``` + * + * will generate a MongoDB query like: `{x : 1, y : {$lt : 3}}`` + * + * @param filters the list of filters to and together + * @return the filter + */ + public fun and(vararg filters: Bson?): Bson = and(filters.toList()) + + /** + * Creates a filter that preforms a logical OR of the provided list of filters. + * + * @param filters the list of filters to and together + * @return the filter + */ + public fun or(filters: Iterable): Bson = Filters.or(filters) + + /** + * Creates a filter that preforms a logical OR of the provided list of filters. + * + * @param filters the list of filters to and together + * @return the filter + */ + public fun or(vararg filters: Bson?): Bson = or(filters.toList()) + + /** + * Creates a filter that matches all documents that do not match the passed in filter. Requires the property to + * passed as part of the value passed in and lifts it to create a valid "$not" query: + * ```not(eq("x", 1))``` + * + * will generate a MongoDB query like: `{x : $not: {$eq : 1}}` + * + * @param filter the value + * @return the filter + */ + public fun not(filter: Bson): Bson = Filters.not(filter) + + /** + * Creates a filter that performs a logical NOR operation on all the specified filters. + * + * @param filters the list of values + * @return the filter + */ + public fun nor(vararg filters: Bson): Bson = Filters.nor(*filters) + + /** + * Creates a filter that performs a logical NOR operation on all the specified filters. + * + * @param filters the list of values + * @return the filter + */ + public fun nor(filters: Iterable): Bson = Filters.nor(filters) + + /** + * Creates a filter that matches all documents that contain the given property. + * + * @return the filter + */ + @JvmSynthetic @JvmName("existsExt") public fun KProperty.exists(): Bson = Filters.exists(path()) + + /** + * Creates a filter that matches all documents that contain the given property. + * + * @param property the data class property + * @return the filter + */ + public fun exists(property: KProperty): Bson = Filters.exists(property.path()) + + /** + * Creates a filter that matches all documents that either contain or do not contain the given property, depending + * on the value of the exists parameter. + * + * @param exists true to check for existence, false to check for absence + * @return the filter + */ + @JvmSynthetic + @JvmName("existsExt") + public infix fun KProperty.exists(exists: Boolean): Bson = Filters.exists(path(), exists) + + /** + * Creates a filter that matches all documents that either contain or do not contain the given property, depending + * on the value of the exists parameter. + * + * @param property the data class property + * @param exists true to check for existence, false to check for absence + * @return the filter + */ + public fun exists(property: KProperty, exists: Boolean): Bson = property.exists(exists) + + /** + * Creates a filter that matches all documents where the value of the property is of the specified BSON type. + * + * @param type the BSON type + * @return the filter + */ + @JvmSynthetic + @JvmName("typeExt") + public infix fun KProperty.type(type: BsonType): Bson = Filters.type(path(), type) + + /** + * Creates a filter that matches all documents where the value of the property is of the specified BSON type. + * + * @param property the data class property + * @param type the BSON type + * @return the filter + */ + public fun type(property: KProperty, type: BsonType): Bson = property.type(type) + + /** + * Creates a filter that matches all documents where the value of a property divided by a divisor has the specified + * remainder (i.e. perform a modulo operation to select documents). + * + * @param divisor the modulus + * @param remainder the remainder + * @return the filter + */ + @JvmSynthetic + @JvmName("modExt") + public fun KProperty.mod(divisor: Long, remainder: Long): Bson = Filters.mod(path(), divisor, remainder) + + /** + * Creates a filter that matches all documents where the value of a property divided by a divisor has the specified + * remainder (i.e. perform a modulo operation to select documents). + * + * @param property the data class property + * @param divisor the modulus + * @param remainder the remainder + * @return the filter + */ + public fun mod(property: KProperty, divisor: Long, remainder: Long): Bson = property.mod(divisor, remainder) + + /** + * Creates a filter that matches all documents where the value of the property matches the given regular expression + * pattern. + * + * @param pattern the pattern + * @return the filter + */ + @JvmSynthetic + @JvmName("regexExt") + public infix fun KProperty.regex(pattern: String): Bson = Filters.regex(path(), pattern) + + /** + * Creates a filter that matches all documents where the value of the property matches the given regular expression + * pattern. + * + * @param property the data class property + * @param pattern the pattern + * @return the filter + */ + public fun regex(property: KProperty, pattern: String): Bson = property.regex(pattern) + + /** + * Creates a filter that matches all documents where the value of the property matches the given regular expression + * pattern. + * + * @param pattern the pattern + * @return the filter + */ + @JvmSynthetic + @JvmName("regexExt") + public infix fun KProperty.regex(pattern: Pattern): Bson = Filters.regex(path(), pattern) + + /** + * Creates a filter that matches all documents where the value of the property matches the given regular expression + * pattern. + * + * @param property the data class property + * @param pattern the pattern + * @return the filter + */ + public fun regex(property: KProperty, pattern: Pattern): Bson = property.regex(pattern) + + /** + * Creates a filter that matches all documents where the value of the option matches the given regular expression + * pattern with the given options applied. + * + * @param pattern the pattern + * @param options the options + * @return the filter + */ + @JvmSynthetic + @JvmName("regexExt") + public fun KProperty.regex(pattern: String, options: String): Bson = + Filters.regex(path(), pattern, options) + + /** + * Creates a filter that matches all documents where the value of the option matches the given regular expression + * pattern with the given options applied. + * + * @param property the data class property + * @param pattern the pattern + * @param options the options + * @return the filter + */ + public fun regex(property: KProperty, pattern: String, options: String): Bson = + property.regex(pattern, options) + + /** + * Creates a filter that matches all documents where the value of the property matches the given regular expression + * pattern. + * + * @param regex the regex + * @return the filter + */ + @JvmSynthetic + @JvmName("regexExt") + public infix fun KProperty.regex(regex: Regex): Bson = Filters.regex(path(), regex.toPattern()) + + /** + * Creates a filter that matches all documents where the value of the property matches the given regular expression + * pattern. + * + * @param property the data class property + * @param regex the regex + * @return the filter + */ + public fun regex(property: KProperty, regex: Regex): Bson = property.regex(regex.toPattern()) + + /** + * Creates a filter that matches all documents where the value of the property matches the given regular expression + * pattern. + * + * @param pattern the pattern + * @return the filter + */ + @JvmSynthetic + @JvmName("regexIterableExt") + public infix fun KProperty>.regex(pattern: String): Bson = Filters.regex(path(), pattern) + + /** + * Creates a filter that matches all documents where the value of the property matches the given regular expression + * pattern. + * + * @param property the data class property + * @param pattern the pattern + * @return the filter + */ + @JvmSynthetic + @JvmName("regexIterable") + public fun regex(property: KProperty>, pattern: String): Bson = property.regex(pattern) + + /** + * Creates a filter that matches all documents where the value of the property matches the given regular expression + * pattern. + * + * @param pattern the pattern + * @return the filter + */ + @JvmSynthetic + @JvmName("regexIterableExt") + public infix fun KProperty>.regex(pattern: Pattern): Bson = Filters.regex(path(), pattern) + + /** + * Creates a filter that matches all documents where the value of the property matches the given regular expression + * pattern. + * + * @param property the data class property + * @param pattern the pattern + * @return the filter + */ + @JvmSynthetic + @JvmName("regexIterable") + public fun regex(property: KProperty>, pattern: Pattern): Bson = property.regex(pattern) + + /** + * Creates a filter that matches all documents where the value of the option matches the given regular expression + * pattern with the given options applied. + * + * @param regex the regex pattern + * @param options the options + * @return the filter + */ + @JvmSynthetic + @JvmName("regexIterableExt") + public fun KProperty>.regex(regex: String, options: String): Bson = + Filters.regex(path(), regex, options) + + /** + * Creates a filter that matches all documents where the value of the option matches the given regular expression + * pattern with the given options applied. + * + * @param property the data class property + * @param regex the regex pattern + * @param options the options + * @return the filter + */ + @JvmSynthetic + @JvmName("regexIterable") + public fun regex(property: KProperty>, regex: String, options: String): Bson = + property.regex(regex, options) + + /** + * Creates a filter that matches all documents where the value of the property matches the given regular expression + * pattern. + * + * @param regex the regex + * @return the filter + */ + @JvmSynthetic + @JvmName("regexIterableExt") + public infix fun KProperty>.regex(regex: Regex): Bson = Filters.regex(path(), regex.toPattern()) + + /** + * Creates a filter that matches all documents where the value of the property matches the given regular expression + * pattern. + * + * @param property the data class property + * @param regex the regex + * @return the filter + */ + @JvmSynthetic + @JvmName("regexIterable") + public fun regex(property: KProperty>, regex: Regex): Bson = property.regex(regex.toPattern()) + + /** + * Creates a filter that matches all documents matching the given the search term with the given text search + * options. + * + * @param search the search term + * @param textSearchOptions the text search options to use + * @return the filter + */ + public fun text(search: String, textSearchOptions: TextSearchOptions = TextSearchOptions()): Bson = + Filters.text(search, textSearchOptions) + + /** + * Creates a filter that matches all documents for which the given expression is true. + * + * @param javaScriptExpression the JavaScript expression + * @return the filter + */ + public fun where(javaScriptExpression: String): Bson = Filters.where(javaScriptExpression) + + /** + * Creates a filter that matches all documents that validate against the given JSON schema document. + * + * @param expression the aggregation expression + * @param the expression type + * @return the filter + */ + public fun expr(expression: T): Bson = Filters.expr(expression) + + /** + * Creates a filter that matches all documents where the value of a property is an array that contains all the + * specified values. + * + * @param values the list of values + * @param the value type + * @return the filter + */ + @JvmSynthetic + @JvmName("allExt") + public infix fun <@OnlyInputTypes T> KProperty?>.all(values: Iterable): Bson = + Filters.all(path(), values) + + /** + * Creates a filter that matches all documents where the value of a property is an array that contains all the + * specified values. + * + * @param property the data class property + * @param values the list of values + * @param the value type + * @return the filter + */ + public fun <@OnlyInputTypes T> all(property: KProperty?>, values: Iterable): Bson = + property.all(values) + + /** + * Creates a filter that matches all documents where the value of a property is an array that contains all the + * specified values. + * + * @param values the list of values + * @param the value type + * @return the filter + */ + @JvmSynthetic + @JvmName("allvargsExt") + public fun <@OnlyInputTypes T> KProperty?>.all(vararg values: T): Bson = Filters.all(path(), *values) + + /** + * Creates a filter that matches all documents where the value of a property is an array that contains all the + * specified values. + * + * @param property the data class property + * @param values the list of values + * @param the value type + * @return the filter + */ + public fun <@OnlyInputTypes T> all(property: KProperty?>, vararg values: T): Bson = + property.all(*values) + + /** + * Creates a filter that matches all documents containing a property that is an array where at least one member of + * the array matches the given filter. + * + * @param filter the filter to apply to each element + * @return the filter + */ + @JvmSynthetic + @JvmName("elemMatchExt") + public infix fun KProperty?>.elemMatch(filter: Bson): Bson = Filters.elemMatch(path(), filter) + + /** + * Creates a filter that matches all documents containing a property that is an array where at least one member of + * the array matches the given filter. + * + * @param property the data class property + * @param filter the filter to apply to each element + * @return the filter + */ + public fun elemMatch(property: KProperty?>, filter: Bson): Bson = property.elemMatch(filter) + + /** + * Creates a filter that matches all documents where the value of a property is an array of the specified size. + * + * @param size the size of the array + * @return the filter + */ + @JvmSynthetic + @JvmName("sizeExt") + public infix fun KProperty.size(size: Int): Bson = Filters.size(path(), size) + + /** + * Creates a filter that matches all documents where the value of a property is an array of the specified size. + * + * @param property the data class property + * @param size the size of the array + * @return the filter + */ + public fun size(property: KProperty, size: Int): Bson = property.size(size) + + /** + * Creates a filter that matches all documents where all of the bit positions are clear in the property. + * + * @param bitmask the bitmask + * @return the filter + */ + @JvmSynthetic + @JvmName("bitsAllClearExt") + public infix fun KProperty.bitsAllClear(bitmask: Long): Bson = Filters.bitsAllClear(path(), bitmask) + + /** + * Creates a filter that matches all documents where all of the bit positions are clear in the property. + * + * @param property the data class property + * @param bitmask the bitmask + * @return the filter + */ + public fun bitsAllClear(property: KProperty, bitmask: Long): Bson = property.bitsAllClear(bitmask) + + /** + * Creates a filter that matches all documents where all of the bit positions are set in the property. + * + * @param bitmask the bitmask + * @return the filter + */ + @JvmSynthetic + @JvmName("bitsAllSetExt") + public infix fun KProperty.bitsAllSet(bitmask: Long): Bson = Filters.bitsAllSet(path(), bitmask) + + /** + * Creates a filter that matches all documents where all of the bit positions are set in the property. + * + * @param property the data class property + * @param bitmask the bitmask + * @return the filter + */ + public fun bitsAllSet(property: KProperty, bitmask: Long): Bson = property.bitsAllSet(bitmask) + + /** + * Creates a filter that matches all documents where any of the bit positions are clear in the property. + * + * @param bitmask the bitmask + * @return the filter + */ + @JvmSynthetic + @JvmName("bitsAnyClearExt") + public infix fun KProperty.bitsAnyClear(bitmask: Long): Bson = Filters.bitsAnyClear(path(), bitmask) + + /** + * Creates a filter that matches all documents where any of the bit positions are clear in the property. + * + * @param property the data class property + * @param bitmask the bitmask + * @return the filter + */ + public fun bitsAnyClear(property: KProperty, bitmask: Long): Bson = property.bitsAnyClear(bitmask) + + /** + * Creates a filter that matches all documents where any of the bit positions are set in the property. + * + * @param bitmask the bitmask + * @return the filter + */ + @JvmSynthetic + @JvmName("bitsAnySetExt") + public infix fun KProperty.bitsAnySet(bitmask: Long): Bson = Filters.bitsAnySet(path(), bitmask) + + /** + * Creates a filter that matches all documents where any of the bit positions are set in the property. + * + * @param property the data class property + * @param bitmask the bitmask + * @return the filter + */ + public fun bitsAnySet(property: KProperty, bitmask: Long): Bson = property.bitsAnySet(bitmask) + + /** + * Creates a filter that matches all documents containing a property with geospatial data that exists entirely + * within the specified shape. + * + * @param geometry the bounding GeoJSON geometry object + * @return the filter + */ + @JvmSynthetic + @JvmName("geoWithinExt") + public infix fun KProperty.geoWithin(geometry: Geometry): Bson = Filters.geoWithin(path(), geometry) + + /** + * Creates a filter that matches all documents containing a property with geospatial data that exists entirely + * within the specified shape. + * + * @param property the data class property + * @param geometry the bounding GeoJSON geometry object + * @return the filter + */ + public fun geoWithin(property: KProperty, geometry: Geometry): Bson = property.geoWithin(geometry) + + /** + * Creates a filter that matches all documents containing a property with geospatial data that exists entirely + * within the specified shape. + * + * @param geometry the bounding GeoJSON geometry object + * @return the filter + */ + @JvmSynthetic + @JvmName("geoWithinExt") + public infix fun KProperty.geoWithin(geometry: Bson): Bson = Filters.geoWithin(path(), geometry) + + /** + * Creates a filter that matches all documents containing a property with geospatial data that exists entirely + * within the specified shape. + * + * @param property the data class property + * @param geometry the bounding GeoJSON geometry object + * @return the filter + */ + public fun geoWithin(property: KProperty, geometry: Bson): Bson = property.geoWithin(geometry) + + /** + * Creates a filter that matches all documents containing a property with grid coordinates data that exist entirely + * within the specified box. + * + * @param lowerLeftX the lower left x coordinate of the box + * @param lowerLeftY the lower left y coordinate of the box + * @param upperRightX the upper left x coordinate of the box + * @param upperRightY the upper left y coordinate of the box + * @return the filter + */ + @JvmSynthetic + @JvmName("geoWithinBoxExt") + public fun KProperty.geoWithinBox( + lowerLeftX: Double, + lowerLeftY: Double, + upperRightX: Double, + upperRightY: Double + ): Bson = Filters.geoWithinBox(path(), lowerLeftX, lowerLeftY, upperRightX, upperRightY) + + /** + * Creates a filter that matches all documents containing a property with grid coordinates data that exist entirely + * within the specified box. + * + * @param property the data class property + * @param lowerLeftX the lower left x coordinate of the box + * @param lowerLeftY the lower left y coordinate of the box + * @param upperRightX the upper left x coordinate of the box + * @param upperRightY the upper left y coordinate of the box + * @return the filter + */ + public fun geoWithinBox( + property: KProperty, + lowerLeftX: Double, + lowerLeftY: Double, + upperRightX: Double, + upperRightY: Double + ): Bson = property.geoWithinBox(lowerLeftX, lowerLeftY, upperRightX, upperRightY) + + /** + * Creates a filter that matches all documents containing a property with grid coordinates data that exist entirely + * within the specified polygon. + * + * @param points a list of pairs of x, y coordinates. Any extra dimensions are ignored + * @return the filter + */ + @JvmSynthetic + @JvmName("geoWithinPolygonExt") + public infix fun KProperty.geoWithinPolygon(points: List>): Bson = + Filters.geoWithinPolygon(path(), points) + + /** + * Creates a filter that matches all documents containing a property with grid coordinates data that exist entirely + * within the specified polygon. + * + * @param property the data class property + * @param points a list of pairs of x, y coordinates. Any extra dimensions are ignored + * @return the filter + */ + public fun geoWithinPolygon(property: KProperty, points: List>): Bson = + property.geoWithinPolygon(points) + + /** + * Creates a filter that matches all documents containing a property with grid coordinates data that exist entirely + * within the specified circle. + * + * @param x the x coordinate of the circle + * @param y the y coordinate of the circle + * @param radius the radius of the circle, as measured in the units used by the coordinate system + * @return the filter + */ + @JvmSynthetic + @JvmName("geoWithinCenterExt") + public fun KProperty.geoWithinCenter(x: Double, y: Double, radius: Double): Bson = + Filters.geoWithinCenter(path(), x, y, radius) + + /** + * Creates a filter that matches all documents containing a property with grid coordinates data that exist entirely + * within the specified circle. + * + * @param property the data class property + * @param x the x coordinate of the circle + * @param y the y coordinate of the circle + * @param radius the radius of the circle, as measured in the units used by the coordinate system + * @return the filter + */ + public fun geoWithinCenter(property: KProperty, x: Double, y: Double, radius: Double): Bson = + property.geoWithinCenter(x, y, radius) + + /** + * Creates a filter that matches all documents containing a property with geospatial data (GeoJSON or legacy + * coordinate pairs) that exist entirely within the specified circle, using spherical geometry. If using longitude + * and latitude, specify longitude first. + * + * @param x the x coordinate of the circle + * @param y the y coordinate of the circle + * @param radius the radius of the circle, in radians + * @return the filter + */ + @JvmSynthetic + @JvmName("geoWithinCenterSphereExt") + public fun KProperty.geoWithinCenterSphere(x: Double, y: Double, radius: Double): Bson = + Filters.geoWithinCenterSphere(path(), x, y, radius) + + /** + * Creates a filter that matches all documents containing a property with geospatial data (GeoJSON or legacy + * coordinate pairs) that exist entirely within the specified circle, using spherical geometry. If using longitude + * and latitude, specify longitude first. + * + * @param property the data class property + * @param x the x coordinate of the circle + * @param y the y coordinate of the circle + * @param radius the radius of the circle, in radians + * @return the filter + */ + public fun geoWithinCenterSphere(property: KProperty, x: Double, y: Double, radius: Double): Bson = + property.geoWithinCenterSphere(x, y, radius) + + /** + * Creates a filter that matches all documents containing a property with geospatial data that intersects with the + * specified shape. + * + * @param geometry the bounding GeoJSON geometry object + * @return the filter + */ + @JvmSynthetic + @JvmName("geoIntersectsExt") + public infix fun KProperty.geoIntersects(geometry: Geometry): Bson = Filters.geoIntersects(path(), geometry) + + /** + * Creates a filter that matches all documents containing a property with geospatial data that intersects with the + * specified shape. + * + * @param property the data class property + * @param geometry the bounding GeoJSON geometry object + * @return the filter + */ + public fun geoIntersects(property: KProperty, geometry: Geometry): Bson = property.geoIntersects(geometry) + + /** + * Creates a filter that matches all documents containing a property with geospatial data that intersects with the + * specified shape. + * + * @param geometry the bounding GeoJSON geometry object + * @return the filter + */ + @JvmSynthetic + @JvmName("geoIntersectsExt") + public infix fun KProperty.geoIntersects(geometry: Bson): Bson = Filters.geoIntersects(path(), geometry) + + /** + * Creates a filter that matches all documents containing a property with geospatial data that intersects with the + * specified shape. + * + * @param property the data class property + * @param geometry the bounding GeoJSON geometry object + * @return the filter + */ + public fun geoIntersects(property: KProperty, geometry: Bson): Bson = property.geoIntersects(geometry) + + /** + * Creates a filter that matches all documents containing a property with geospatial data that is near the specified + * GeoJSON point. + * + * @param geometry the bounding GeoJSON geometry object + * @param maxDistance the maximum distance from the point, in meters + * @param minDistance the minimum distance from the point, in meters + * @return the filter + */ + @JvmSynthetic + @JvmName("nearExt") + public fun KProperty.near(geometry: Point, maxDistance: Double? = null, minDistance: Double? = null): Bson = + Filters.near(path(), geometry, maxDistance, minDistance) + + /** + * Creates a filter that matches all documents containing a property with geospatial data that is near the specified + * GeoJSON point. + * + * @param property the data class property + * @param geometry the bounding GeoJSON geometry object + * @param maxDistance the maximum distance from the point, in meters + * @param minDistance the minimum distance from the point, in meters + * @return the filter + */ + public fun near( + property: KProperty, + geometry: Point, + maxDistance: Double? = null, + minDistance: Double? = null + ): Bson = property.near(geometry, maxDistance, minDistance) + + /** + * Creates a filter that matches all documents containing a property with geospatial data that is near the specified + * GeoJSON point. + * + * @param geometry the bounding GeoJSON geometry object + * @param maxDistance the maximum distance from the point, in meters + * @param minDistance the minimum distance from the point, in meters + * @return the filter + */ + @JvmSynthetic + @JvmName("nearExt") + public fun KProperty.near(geometry: Bson, maxDistance: Double? = null, minDistance: Double? = null): Bson = + Filters.near(path(), geometry, maxDistance, minDistance) + + /** + * Creates a filter that matches all documents containing a property with geospatial data that is near the specified + * GeoJSON point. + * + * @param property the data class property + * @param geometry the bounding GeoJSON geometry object + * @param maxDistance the maximum distance from the point, in meters + * @param minDistance the minimum distance from the point, in meters + * @return the filter + */ + public fun near( + property: KProperty, + geometry: Bson, + maxDistance: Double? = null, + minDistance: Double? = null + ): Bson = property.near(geometry, maxDistance, minDistance) + + /** + * Creates a filter that matches all documents containing a property with geospatial data that is near the specified + * point. + * + * @param x the x coordinate + * @param y the y coordinate + * @param maxDistance the maximum distance from the point, in radians + * @param minDistance the minimum distance from the point, in radians + * @return the filter + */ + @JvmSynthetic + @JvmName("nearExt") + public fun KProperty.near( + x: Double, + y: Double, + maxDistance: Double? = null, + minDistance: Double? = null + ): Bson = Filters.near(path(), x, y, maxDistance, minDistance) + + /** + * Creates a filter that matches all documents containing a property with geospatial data that is near the specified + * point. + * + * @param property the data class property + * @param x the x coordinate + * @param y the y coordinate + * @param maxDistance the maximum distance from the point, in radians + * @param minDistance the minimum distance from the point, in radians + * @return the filter + */ + public fun near( + property: KProperty, + x: Double, + y: Double, + maxDistance: Double? = null, + minDistance: Double? = null + ): Bson = property.near(x, y, maxDistance, minDistance) + + /** + * Creates a filter that matches all documents containing a property with geospatial data that is near the specified + * GeoJSON point using spherical geometry. + * + * @param geometry the bounding GeoJSON geometry object + * @param maxDistance the maximum distance from the point, in meters + * @param minDistance the minimum distance from the point, in meters + * @return the filter + */ + @JvmSynthetic + @JvmName("nearSphereExt") + public fun KProperty.nearSphere( + geometry: Bson, + maxDistance: Double? = null, + minDistance: Double? = null + ): Bson = Filters.nearSphere(path(), geometry, maxDistance, minDistance) + + /** + * Creates a filter that matches all documents containing a property with geospatial data that is near the specified + * GeoJSON point using spherical geometry. + * + * @param property the data class property + * @param geometry the bounding GeoJSON geometry object + * @param maxDistance the maximum distance from the point, in meters + * @param minDistance the minimum distance from the point, in meters + * @return the filter + */ + public fun nearSphere( + property: KProperty, + geometry: Bson, + maxDistance: Double? = null, + minDistance: Double? = null + ): Bson = property.nearSphere(geometry, maxDistance, minDistance) + + /** + * Creates a filter that matches all documents containing a property with geospatial data that is near the specified + * GeoJSON point using spherical geometry. + * + * @param geometry the bounding GeoJSON geometry object + * @param maxDistance the maximum distance from the point, in meters + * @param minDistance the minimum distance from the point, in meters + * @return the filter + */ + @JvmSynthetic + @JvmName("nearSphereExt") + public fun KProperty.nearSphere( + geometry: Point, + maxDistance: Double? = null, + minDistance: Double? = null + ): Bson = Filters.nearSphere(path(), geometry, maxDistance, minDistance) + + /** + * Creates a filter that matches all documents containing a property with geospatial data that is near the specified + * GeoJSON point using spherical geometry. + * + * @param property the data class property + * @param geometry the bounding GeoJSON geometry object + * @param maxDistance the maximum distance from the point, in meters + * @param minDistance the minimum distance from the point, in meters + * @return the filter + */ + public fun nearSphere( + property: KProperty, + geometry: Point, + maxDistance: Double? = null, + minDistance: Double? = null + ): Bson = property.nearSphere(geometry, maxDistance, minDistance) + + /** + * Creates a filter that matches all documents containing a property with geospatial data that is near the specified + * point using spherical geometry. + * + * @param x the x coordinate + * @param y the y coordinate + * @param maxDistance the maximum distance from the point, in radians + * @param minDistance the minimum distance from the point, in radians + * @return the filter + */ + @JvmSynthetic + @JvmName("nearSphereExt") + public fun KProperty.nearSphere( + x: Double, + y: Double, + maxDistance: Double? = null, + minDistance: Double? = null + ): Bson = Filters.nearSphere(path(), x, y, maxDistance, minDistance) + + /** + * Creates a filter that matches all documents containing a property with geospatial data that is near the specified + * point using spherical geometry. + * + * @param property the data class property + * @param x the x coordinate + * @param y the y coordinate + * @param maxDistance the maximum distance from the point, in radians + * @param minDistance the minimum distance from the point, in radians + * @return the filter + */ + public fun nearSphere( + property: KProperty, + x: Double, + y: Double, + maxDistance: Double? = null, + minDistance: Double? = null + ): Bson = property.nearSphere(x, y, maxDistance, minDistance) + + /** + * Creates a filter that matches all documents that validate against the given JSON schema document. + * + * @param schema the JSON schema to validate against + * @return the filter + */ + public fun jsonSchema(schema: Bson): Bson = Filters.jsonSchema(schema) +} diff --git a/driver-kotlin-extensions/src/main/kotlin/com/mongodb/kotlin/client/model/Indexes.kt b/driver-kotlin-extensions/src/main/kotlin/com/mongodb/kotlin/client/model/Indexes.kt new file mode 100644 index 00000000000..e87dad6400c --- /dev/null +++ b/driver-kotlin-extensions/src/main/kotlin/com/mongodb/kotlin/client/model/Indexes.kt @@ -0,0 +1,106 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * Copyright (C) 2016/2022 Litote + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @custom-license-header + */ +package com.mongodb.kotlin.client.model + +import com.mongodb.client.model.Indexes +import kotlin.reflect.KProperty +import org.bson.conversions.Bson + +/** + * Indexes extension methods to improve Kotlin interop + * + * @since 5.3 + */ +public object Indexes { + /** + * Create an index key for an ascending index on the given fields. + * + * @param properties the properties, which must contain at least one + * @return the index specification @mongodb.driver.manual core/indexes indexes + */ + public fun ascending(vararg properties: KProperty<*>): Bson = Indexes.ascending(properties.map { it.path() }) + + /** + * Create an index key for an ascending index on the given fields. + * + * @param properties the properties, which must contain at least one + * @return the index specification @mongodb.driver.manual core/indexes indexes + */ + public fun ascending(properties: Iterable>): Bson = Indexes.ascending(properties.map { it.path() }) + + /** + * Create an index key for a descending index on the given fields. + * + * @param properties the properties, which must contain at least one + * @return the index specification @mongodb.driver.manual core/indexes indexes + */ + public fun descending(vararg properties: KProperty<*>): Bson = Indexes.descending(properties.map { it.path() }) + + /** + * Create an index key for a descending index on the given fields. + * + * @param properties the properties, which must contain at least one + * @return the index specification @mongodb.driver.manual core/indexes indexes + */ + public fun descending(properties: Iterable>): Bson = Indexes.descending(properties.map { it.path() }) + + /** + * Create an index key for an 2dsphere index on the given fields. + * + * @param properties the properties, which must contain at least one + * @return the index specification @mongodb.driver.manual core/2dsphere 2dsphere Index + */ + public fun geo2dsphere(vararg properties: KProperty<*>): Bson = Indexes.geo2dsphere(properties.map { it.path() }) + + /** + * Create an index key for an 2dsphere index on the given fields. + * + * @param properties the properties, which must contain at least one + * @return the index specification @mongodb.driver.manual core/2dsphere 2dsphere Index + */ + public fun geo2dsphere(properties: Iterable>): Bson = Indexes.geo2dsphere(properties.map { it.path() }) + + /** + * Create an index key for a text index on the given property. + * + * @param property the property to create a text index on + * @return the index specification @mongodb.driver.manual core/text text index + */ + public fun text(property: KProperty): Bson = Indexes.text(property.path()) + + /** + * Create an index key for a hashed index on the given property. + * + * @param property the property to create a hashed index on + * @return the index specification @mongodb.driver.manual core/hashed hashed index + */ + public fun hashed(property: KProperty): Bson = Indexes.hashed(property.path()) + + /** + * Create an index key for a 2d index on the given field. + * + *

+ * Note: A 2d index is for data stored as points on a two-dimensional plane. The 2d index is + * intended for legacy coordinate pairs used in MongoDB 2.2 and earlier.

+ * + * @param property the property to create a 2d index on + * @return the index specification @mongodb.driver.manual core/2d 2d index + */ + public fun geo2d(property: KProperty): Bson = Indexes.geo2d(property.path()) +} diff --git a/driver-kotlin-extensions/src/main/kotlin/com/mongodb/kotlin/client/model/Projections.kt b/driver-kotlin-extensions/src/main/kotlin/com/mongodb/kotlin/client/model/Projections.kt new file mode 100644 index 00000000000..d8f09d73be1 --- /dev/null +++ b/driver-kotlin-extensions/src/main/kotlin/com/mongodb/kotlin/client/model/Projections.kt @@ -0,0 +1,347 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * Copyright (C) 2016/2022 Litote + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @custom-license-header + */ +package com.mongodb.kotlin.client.model + +import com.mongodb.annotations.Beta +import com.mongodb.annotations.Reason +import com.mongodb.client.model.Aggregates +import com.mongodb.client.model.Projections +import kotlin.reflect.KProperty +import kotlin.reflect.KProperty1 +import org.bson.conversions.Bson + +/** + * Projection extension methods to improve Kotlin interop + * + * @since 5.3 + */ +public object Projections { + + /** The projection of the property. This is used in an aggregation pipeline to reference a property from a path. */ + public val KProperty.projection: String + get() = path().projection + + /** The projection of the property. */ + public val String.projection: String + get() = "\$$this" + + /** In order to write `$p.p2` */ + @JvmSynthetic public infix fun KProperty1.projectionWith(p2: String): String = "$projection.$p2" + + /** + * Creates a projection of a property whose value is computed from the given expression. Projection with an + * expression can be used in the following contexts: + *
    + *
  • $project aggregation pipeline stage.
  • + *
  • Starting from MongoDB 4.4, it's also accepted in various find-related methods within the {@code + * MongoCollection}-based API where projection is supported, for example:
      + *
    • {@code find()}
    • + *
    • {@code findOneAndReplace()}
    • + *
    • {@code findOneAndUpdate()}
    • + *
    • {@code findOneAndDelete()}
    • + *
    + * + *
+ * + * @param expression the expression + * @param the expression type + * @return the projection + * @see #computedSearchMeta(String) + * @see Aggregates#project(Bson) + */ + @JvmSynthetic + @JvmName("computedFromExt") + public infix fun KProperty.computed(expression: Any): Bson = + Projections.computed(path(), (expression as? KProperty<*>)?.projection ?: expression) + + /** + * Creates a projection of a property whose value is computed from the given expression. Projection with an + * expression can be used in the following contexts: + *
    + *
  • $project aggregation pipeline stage.
  • + *
  • Starting from MongoDB 4.4, it's also accepted in various find-related methods within the {@code + * MongoCollection}-based API where projection is supported, for example:
      + *
    • {@code find()}
    • + *
    • {@code findOneAndReplace()}
    • + *
    • {@code findOneAndUpdate()}
    • + *
    • {@code findOneAndDelete()}
    • + *
    + * + *
+ * + * @param property the data class property + * @param expression the expression + * @param the expression type + * @return the projection + * @see #computedSearchMeta(String) + * @see Aggregates#project(Bson) + */ + public fun computed(property: KProperty, expression: Any): Bson = property.computed(expression) + + /** + * Creates a projection of a String whose value is computed from the given expression. Projection with an expression + * can be used in the following contexts: + *
    + *
  • $project aggregation pipeline stage.
  • + *
  • Starting from MongoDB 4.4, it's also accepted in various find-related methods within the {@code + * MongoCollection}-based API where projection is supported, for example:
      + *
    • {@code find()}
    • + *
    • {@code findOneAndReplace()}
    • + *
    • {@code findOneAndUpdate()}
    • + *
    • {@code findOneAndDelete()}
    • + *
    + * + *
+ * + * @param expression the expression + * @return the projection + * @see #computedSearchMeta(String) + * @see Aggregates#project(Bson) + */ + @JvmSynthetic + @JvmName("computedFromExt") + public infix fun String.computed(expression: Any): Bson = + @Suppress("UNCHECKED_CAST") + Projections.computed(this, (expression as? KProperty)?.projection ?: expression) + + /** + * Creates a projection of a String whose value is computed from the given expression. Projection with an expression + * can be used in the following contexts: + *
    + *
  • $project aggregation pipeline stage.
  • + *
  • Starting from MongoDB 4.4, it's also accepted in various find-related methods within the {@code + * MongoCollection}-based API where projection is supported, for example:
      + *
    • {@code find()}
    • + *
    • {@code findOneAndReplace()}
    • + *
    • {@code findOneAndUpdate()}
    • + *
    • {@code findOneAndDelete()}
    • + *
    + * + *
+ * + * @param property the data class property + * @param expression the expression + * @return the projection + * @see #computedSearchMeta(String) + * @see Aggregates#project(Bson) + */ + public fun computed(property: String, expression: Any): Bson = property.computed(expression) + + /** + * Creates a projection of a property whose value is equal to the {@code $$SEARCH_META} variable. for use with + * {@link Aggregates#search(SearchOperator, SearchOptions)} / {@link Aggregates#search(SearchCollector, + * SearchOptions)}. Calling this method is equivalent to calling {@link #computed(String, Object)} with {@code + * "$$SEARCH_META"} as the second argument. + * + * @param property the data class property + * @return the projection + * @see #computed(String) + * @see Aggregates#project(Bson) + */ + @JvmSynthetic + @JvmName("computedSearchMetaExt") + public fun KProperty.computedSearchMeta(): Bson = Projections.computedSearchMeta(path()) + + /** + * Creates a projection of a property whose value is equal to the {@code $$SEARCH_META} variable. for use with + * {@link Aggregates#search(SearchOperator, SearchOptions)} / {@link Aggregates#search(SearchCollector, + * SearchOptions)}. Calling this method is equivalent to calling {@link #computed(String, Object)} with {@code + * "$$SEARCH_META"} as the second argument. + * + * @param property the data class property + * @return the projection + * @see #computed(String) + * @see Aggregates#project(Bson) + */ + public fun computedSearchMeta(property: KProperty): Bson = property.computedSearchMeta() + + /** + * Creates a projection that includes all of the given properties. + * + * @param properties the field names + * @return the projection + */ + public fun include(vararg properties: KProperty<*>): Bson = include(properties.asList()) + + /** + * Creates a projection that includes all of the given properties. + * + * @param properties the field names + * @return the projection + */ + public fun include(properties: Iterable>): Bson = Projections.include(properties.map { it.path() }) + + /** + * Creates a projection that excludes all of the given properties. + * + * @param properties the field names + * @return the projection + */ + public fun exclude(vararg properties: KProperty<*>): Bson = exclude(properties.asList()) + + /** + * Creates a projection that excludes all of the given properties. + * + * @param properties the field names + * @return the projection + */ + public fun exclude(properties: Iterable>): Bson = Projections.exclude(properties.map { it.path() }) + + /** + * Creates a projection that excludes the _id field. This suppresses the automatic inclusion of _id that is the + * default, even when other fields are explicitly included. + * + * @return the projection + */ + public fun excludeId(): Bson = Projections.excludeId() + + /** + * Creates a projection that includes for the given property only the first element of an array that matches the + * query filter. This is referred to as the positional $ operator. + * + * @return the projection @mongodb.driver.manual reference/operator/projection/positional/#projection Project the + * first matching element ($ operator) + */ + public val KProperty.elemMatch: Bson + get() = Projections.elemMatch(path()) + + /** + * Creates a projection that includes for the given property only the first element of the array value of that field + * that matches the given query filter. + * + * @param filter the filter to apply + * @return the projection @mongodb.driver.manual reference/operator/projection/elemMatch elemMatch + */ + @JvmSynthetic + @JvmName("elemMatchProjExt") + public infix fun KProperty.elemMatch(filter: Bson): Bson = Projections.elemMatch(path(), filter) + + /** + * Creates a projection that includes for the given property only the first element of the array value of that field + * that matches the given query filter. + * + * @param property the data class property + * @param filter the filter to apply + * @return the projection @mongodb.driver.manual reference/operator/projection/elemMatch elemMatch + */ + public fun elemMatch(property: KProperty, filter: Bson): Bson = property.elemMatch(filter) + + /** + * Creates a $meta projection for the given property + * + * @param metaFieldName the meta field name + * @return the projection @mongodb.driver.manual reference/operator/aggregation/meta/ + * @see #metaTextScore(String) + * @see #metaSearchScore(String) + * @see #metaVectorSearchScore(String) + * @see #metaSearchHighlights(String) + */ + @JvmSynthetic + @JvmName("metaExt") + public infix fun KProperty.meta(metaFieldName: String): Bson = Projections.meta(path(), metaFieldName) + + /** + * Creates a $meta projection for the given property + * + * @param property the data class property + * @param metaFieldName the meta field name + * @return the projection @mongodb.driver.manual reference/operator/aggregation/meta/ + * @see #metaTextScore(String) + * @see #metaSearchScore(String) + * @see #metaVectorSearchScore(String) + * @see #metaSearchHighlights(String) + */ + public fun meta(property: KProperty, metaFieldName: String): Bson = property.meta(metaFieldName) + + /** + * Creates a textScore projection for the given property, for use with text queries. Calling this method is + * equivalent to calling {@link #meta(String)} with {@code "textScore"} as the argument. + * + * @return the projection + * @see Filters#text(String, TextSearchOptions) @mongodb.driver.manual + * reference/operator/aggregation/meta/#text-score-metadata--meta---textscore- textScore + */ + public fun KProperty.metaTextScore(): Bson = Projections.metaTextScore(path()) + + /** + * Creates a searchScore projection for the given property, for use with {@link Aggregates#search(SearchOperator, + * SearchOptions)} / {@link Aggregates#search(SearchCollector, SearchOptions)}. Calling this method is equivalent to + * calling {@link #meta(String, String)} with {@code "searchScore"} as the argument. + * + * @return the projection @mongodb.atlas.manual atlas-search/scoring/ Scoring + */ + public fun KProperty.metaSearchScore(): Bson = Projections.metaSearchScore(path()) + + /** + * Creates a vectorSearchScore projection for the given property, for use with {@link + * Aggregates#vectorSearch(FieldSearchPath, Iterable, String, long, VectorSearchOptions)} . Calling this method is + * equivalent to calling {@link #meta(String, String)} with {@code "vectorSearchScore"} as the argument. + * + * @return the projection @mongodb.atlas.manual atlas-search/scoring/ Scoring @mongodb.server.release 6.0.10 + */ + @Beta(Reason.SERVER) + public fun KProperty.metaVectorSearchScore(): Bson = Projections.metaVectorSearchScore(path()) + + /** + * Creates a searchHighlights projection for the given property, for use with {@link + * Aggregates#search(SearchOperator, SearchOptions)} / {@link Aggregates#search(SearchCollector, SearchOptions)}. + * Calling this method is equivalent to calling {@link #meta(String, String)} with {@code "searchHighlights"} as the + * argument. + * + * @return the projection + * @see com.mongodb.client.model.search.SearchHighlight @mongodb.atlas.manual atlas-search/highlighting/ + * Highlighting + */ + public fun KProperty.metaSearchHighlights(): Bson = Projections.metaSearchHighlights(path()) + + /** + * Creates a projection to the given property of a slice of the array value of that field. + * + * @param limit the number of elements to project. + * @return the projection @mongodb.driver.manual reference/operator/projection/slice Slice + */ + public infix fun KProperty.slice(limit: Int): Bson = Projections.slice(path(), limit) + + /** + * Creates a projection to the given property of a slice of the array value of that field. + * + * @param skip the number of elements to skip before applying the limit + * @param limit the number of elements to project + * @return the projection @mongodb.driver.manual reference/operator/projection/slice Slice + */ + public fun KProperty.slice(skip: Int, limit: Int): Bson = Projections.slice(path(), skip, limit) + + /** + * Creates a projection that combines the list of projections into a single one. If there are duplicate keys, the + * last one takes precedence. + * + * @param projections the list of projections to combine + * @return the combined projection + */ + public fun fields(vararg projections: Bson): Bson = Projections.fields(*projections) + + /** + * Creates a projection that combines the list of projections into a single one. If there are duplicate keys, the + * last one takes precedence. + * + * @param projections the list of projections to combine + * @return the combined projection @mongodb.driver.manual + */ + public fun fields(projections: List): Bson = Projections.fields(projections) +} diff --git a/driver-kotlin-extensions/src/main/kotlin/com/mongodb/kotlin/client/model/Properties.kt b/driver-kotlin-extensions/src/main/kotlin/com/mongodb/kotlin/client/model/Properties.kt new file mode 100644 index 00000000000..97ebae27d63 --- /dev/null +++ b/driver-kotlin-extensions/src/main/kotlin/com/mongodb/kotlin/client/model/Properties.kt @@ -0,0 +1,139 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * Copyright (C) 2016/2022 Litote + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @custom-license-header + */ +package com.mongodb.kotlin.client.model + +import com.mongodb.kotlin.client.property.KCollectionSimplePropertyPath +import com.mongodb.kotlin.client.property.KMapSimplePropertyPath +import com.mongodb.kotlin.client.property.KPropertyPath +import com.mongodb.kotlin.client.property.KPropertyPath.Companion.CustomProperty +import java.util.concurrent.ConcurrentHashMap +import kotlin.reflect.KProperty +import kotlin.reflect.KProperty1 +import kotlin.reflect.full.findParameterByName +import kotlin.reflect.full.primaryConstructor +import kotlin.reflect.jvm.internal.ReflectProperties.lazySoft +import kotlin.reflect.jvm.javaField +import org.bson.codecs.pojo.annotations.BsonId +import org.bson.codecs.pojo.annotations.BsonProperty + +private val pathCache: MutableMap by lazySoft { ConcurrentHashMap() } + +/** Returns a composed property. For example Friend::address / Address::postalCode = "address.postalCode". */ +public operator fun KProperty1.div(p2: KProperty1): KProperty1 = + KPropertyPath(this, p2) + +/** + * Returns a composed property without type checks. For example Friend::address % Address::postalCode = + * "address.postalCode". + */ +public operator fun KProperty1.rem(p2: KProperty1): KProperty1 = + KPropertyPath(this, p2) + +/** + * Returns a collection composed property. For example Friend::addresses / Address::postalCode = "addresses.postalCode". + */ +@JvmName("divCol") +public operator fun KProperty1?>.div(p2: KProperty1): KProperty1 = + KPropertyPath(this, p2) + +/** Returns a map composed property. For example Friend::addresses / Address::postalCode = "addresses.postalCode". */ +@JvmName("divMap") +public operator fun KProperty1?>.div( + p2: KProperty1 +): KProperty1 = KPropertyPath(this, p2) + +/** + * Returns a mongo path of a property. + * + * The path name is computed by checking the following and picking the first value to exist: + * - SerialName annotation value + * - BsonId annotation use '_id' + * - BsonProperty annotation + * - Property name + */ +public fun KProperty.path(): String { + return if (this is KPropertyPath<*, T>) { + this.name + } else { + pathCache.computeIfAbsent(hashCode()) { + // Check serial name - Note kotlinx.serialization.SerialName may not be on the class + // path + val serialName = + annotations.firstOrNull { it.annotationClass.qualifiedName == "kotlinx.serialization.SerialName" } + var path = + serialName?.annotationClass?.members?.firstOrNull { it.name == "value" }?.call(serialName) as String? + + // If no path (serialName) then check for BsonId / BsonProperty + if (path == null) { + val originator = if (this is CustomProperty<*, *>) this.previous.property else this + // If this property is calculated (doesn't have a backing field) ex + // "(Student::grades / Grades::score).posOp then + // originator.javaField will NPE. + // Only read various annotations on a declared property with a backing field + if (originator.javaField != null) { + val constructorProperty = + originator.javaField!!.declaringClass.kotlin.primaryConstructor?.findParameterByName(this.name) + + // Prefer BsonId annotation over BsonProperty + path = constructorProperty?.annotations?.filterIsInstance()?.firstOrNull()?.let { "_id" } + path = + path ?: constructorProperty?.annotations?.filterIsInstance()?.firstOrNull()?.value + } + path = path ?: this.name + } + path + } + } +} + +/** Returns a collection property. */ +public val KProperty1?>.colProperty: KCollectionSimplePropertyPath + get() = KCollectionSimplePropertyPath(null, this) + +/** In order to write array indexed expressions (like `accesses.0.timestamp`). */ +public fun KProperty1?>.pos(position: Int): KPropertyPath = + colProperty.pos(position) + +/** Returns a map property. */ +public val KProperty1?>.mapProperty: KMapSimplePropertyPath + get() = KMapSimplePropertyPath(null, this) + +@Suppress("MaxLineLength") +/** + * [The positional array operator $ (projection or update)](https://docs.mongodb.com/manual/reference/operator/update/positional/) + */ +public val KProperty1?>.posOp: KPropertyPath + get() = colProperty.posOp + +@Suppress("MaxLineLength") +/** [The all positional operator $[]](https://docs.mongodb.com/manual/reference/operator/update/positional-all/) */ +public val KProperty1?>.allPosOp: KPropertyPath + get() = colProperty.allPosOp + +@Suppress("MaxLineLength") +/** + * [The filtered positional operator $[\]](https://docs.mongodb.com/manual/reference/operator/update/positional-filtered/) + */ +public fun KProperty1?>.filteredPosOp(identifier: String): KPropertyPath = + colProperty.filteredPosOp(identifier) + +/** Key projection of map. Sample: `p.keyProjection(Locale.ENGLISH) / Gift::amount` */ +@Suppress("UNCHECKED_CAST") +public fun KProperty1?>.keyProjection(key: K): KPropertyPath = + mapProperty.keyProjection(key) as KPropertyPath diff --git a/driver-kotlin-extensions/src/main/kotlin/com/mongodb/kotlin/client/model/Sorts.kt b/driver-kotlin-extensions/src/main/kotlin/com/mongodb/kotlin/client/model/Sorts.kt new file mode 100644 index 00000000000..4464026d39e --- /dev/null +++ b/driver-kotlin-extensions/src/main/kotlin/com/mongodb/kotlin/client/model/Sorts.kt @@ -0,0 +1,71 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * Copyright (C) 2016/2022 Litote + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @custom-license-header + */ +package com.mongodb.kotlin.client.model + +import com.mongodb.client.model.Sorts +import kotlin.reflect.KProperty +import org.bson.conversions.Bson + +/** + * Sorts extension methods to improve Kotlin interop + * + * @since 5.3 + */ +public object Sorts { + + /** + * Create a sort specification for an ascending sort on the given properties. + * + * @param properties the properties, which must contain at least one + * @return the sort specification @mongodb.driver.manual reference/operator/meta/orderby Sort + */ + public fun ascending(vararg properties: KProperty<*>): Bson = ascending(properties.asList()) + + /** + * Create a sort specification for an ascending sort on the given properties. + * + * @param properties the properties, which must contain at least one + * @return the sort specification @mongodb.driver.manual reference/operator/meta/orderby Sort + */ + public fun ascending(properties: List>): Bson = Sorts.ascending(properties.map { it.path() }) + + /** + * Create a sort specification for a descending sort on the given properties. + * + * @param properties the properties, which must contain at least one + * @return the sort specification @mongodb.driver.manual reference/operator/meta/orderby Sort + */ + public fun descending(vararg properties: KProperty<*>): Bson = descending(properties.asList()) + + /** + * Create a sort specification for a descending sort on the given properties. + * + * @param properties the properties, which must contain at least one + * @return the sort specification @mongodb.driver.manual reference/operator/meta/orderby Sort + */ + public fun descending(properties: List>): Bson = Sorts.descending(properties.map { it.path() }) + + /** + * Create a sort specification for the text score meta projection on the given property. + * + * @param property the data class property + * @return the sort specification @mongodb.driver.manual reference/operator/getProjection/meta/#sort textScore + */ + public fun metaTextScore(property: KProperty): Bson = Sorts.metaTextScore(property.path()) +} diff --git a/driver-kotlin-extensions/src/main/kotlin/com/mongodb/kotlin/client/model/Updates.kt b/driver-kotlin-extensions/src/main/kotlin/com/mongodb/kotlin/client/model/Updates.kt new file mode 100644 index 00000000000..4bb272b9fb5 --- /dev/null +++ b/driver-kotlin-extensions/src/main/kotlin/com/mongodb/kotlin/client/model/Updates.kt @@ -0,0 +1,506 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * Copyright (C) 2016/2022 Litote + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @custom-license-header + */ +package com.mongodb.kotlin.client.model + +import com.mongodb.client.model.PushOptions +import com.mongodb.client.model.UpdateOptions +import com.mongodb.client.model.Updates +import kotlin.internal.OnlyInputTypes +import kotlin.reflect.KProperty +import org.bson.conversions.Bson + +/** + * Updates extension methods to improve Kotlin interop + * + * @since 5.3 + */ +@Suppress("TooManyFunctions") +public object Updates { + + /** + * Creates an update that sets the value of the property to the given value. + * + * @param value the value + * @param the value type + * @return the update @mongodb.driver.manual reference/operator/update/set/ $set + */ + @JvmSynthetic + @JvmName("setExt") + public infix fun <@OnlyInputTypes T> KProperty.set(value: T?): Bson = Updates.set(path(), value) + + /** + * Creates an update that sets the value of the property to the given value. + * + * @param property the data class property + * @param value the value + * @param the value type + * @return the update @mongodb.driver.manual reference/operator/update/set/ $set + */ + public fun <@OnlyInputTypes T> set(property: KProperty, value: T?): Bson = property.set(value) + + /** + * Combine a list of updates into a single update. + * + * @param updates the list of updates + * @return a combined update + */ + public fun combine(vararg updates: Bson): Bson = Updates.combine(*updates) + + /** + * Combine a list of updates into a single update. + * + * @param updates the list of updates + * @return a combined update + */ + public fun combine(updates: List): Bson = Updates.combine(updates) + + /** + * Creates an update that deletes the property with the given name. + * + * @param property the property + * @return the update @mongodb.driver.manual reference/operator/update/unset/ $unset + */ + public fun unset(property: KProperty): Bson = Updates.unset(property.path()) + + /** + * Creates an update that sets the value of the property to the given value, but only if the update is an upsert + * that results in an insert of a document. + * + * @param value the value + * @param the value type + * @return the update @mongodb.driver.manual reference/operator/update/setOnInsert/ $setOnInsert + * @see UpdateOptions#upsert(boolean) + */ + @JvmSynthetic + @JvmName("setOnInsertExt") + public infix fun <@OnlyInputTypes T> KProperty.setOnInsert(value: T?): Bson = Updates.setOnInsert(path(), value) + + /** + * Creates an update that sets the value of the property to the given value, but only if the update is an upsert + * that results in an insert of a document. + * + * @param property the property + * @param value the value + * @param the value type + * @return the update @mongodb.driver.manual reference/operator/update/setOnInsert/ $setOnInsert + * @see UpdateOptions#upsert(boolean) + */ + public fun <@OnlyInputTypes T> setOnInsert(property: KProperty, value: T?): Bson = property.setOnInsert(value) + + /** + * Creates an update that renames a field. + * + * @param newProperty the new property + * @return the update @mongodb.driver.manual reference/operator/update/rename/ $rename + */ + @JvmSynthetic + @JvmName("renameExt") + public infix fun <@OnlyInputTypes T> KProperty.rename(newProperty: KProperty): Bson = + Updates.rename(path(), newProperty.path()) + + /** + * Creates an update that renames a field. + * + * @param property the property + * @param newProperty the new property + * @return the update @mongodb.driver.manual reference/operator/update/rename/ $rename + */ + public fun <@OnlyInputTypes T> rename(property: KProperty, newProperty: KProperty): Bson = + property.rename(newProperty) + + /** + * Creates an update that increments the value of the property by the given value. + * + * @param number the value + * @return the update @mongodb.driver.manual reference/operator/update/inc/ $inc + */ + @JvmSynthetic + @JvmName("incExt") + public infix fun KProperty.inc(number: Number): Bson = Updates.inc(path(), number) + + /** + * Creates an update that increments the value of the property by the given value. + * + * @param property the property + * @param number the value + * @return the update @mongodb.driver.manual reference/operator/update/inc/ $inc + */ + public fun inc(property: KProperty, number: Number): Bson = property.inc(number) + + /** + * Creates an update that multiplies the value of the property by the given number. + * + * @param number the non-null number + * @return the update @mongodb.driver.manual reference/operator/update/mul/ $mul + */ + @JvmSynthetic + @JvmName("mulExt") + public infix fun KProperty.mul(number: Number): Bson = Updates.mul(path(), number) + + /** + * Creates an update that multiplies the value of the property by the given number. + * + * @param property the property + * @param number the non-null number + * @return the update @mongodb.driver.manual reference/operator/update/mul/ $mul + */ + public fun mul(property: KProperty, number: Number): Bson = property.mul(number) + + /** + * Creates an update that sets the value of the property if the given value is less than the current value of the + * property. + * + * @param value the value + * @param the value type + * @return the update @mongodb.driver.manual reference/operator/update/min/ $min + */ + @JvmSynthetic + @JvmName("minExt") + public infix fun <@OnlyInputTypes T> KProperty.min(value: T): Bson = Updates.min(path(), value) + + /** + * Creates an update that sets the value of the property if the given value is less than the current value of the + * property. + * + * @param property the property + * @param value the value + * @param the value type + * @return the update @mongodb.driver.manual reference/operator/update/min/ $min + */ + public fun <@OnlyInputTypes T> min(property: KProperty, value: T): Bson = property.min(value) + + /** + * Creates an update that sets the value of the property if the given value is greater than the current value of the + * property. + * + * @param value the value + * @param the value type + * @return the update @mongodb.driver.manual reference/operator/update/min/ $min + */ + @JvmSynthetic + @JvmName("maxExt") + public infix fun <@OnlyInputTypes T> KProperty.max(value: T): Bson = Updates.max(path(), value) + + /** + * Creates an update that sets the value of the property if the given value is greater than the current value of the + * property. + * + * @param property the property + * @param value the value + * @param the value type + * @return the update @mongodb.driver.manual reference/operator/update/min/ $min + */ + public fun <@OnlyInputTypes T> max(property: KProperty, value: T): Bson = property.max(value) + + /** + * Creates an update that sets the value of the property to the current date as a BSON date. + * + * @param property the property + * @return the update @mongodb.driver.manual reference/operator/update/currentDate/ + * $currentDate @mongodb.driver.manual reference/bson-types/#date Date + */ + public fun currentDate(property: KProperty): Bson = Updates.currentDate(property.path()) + + /** + * Creates an update that sets the value of the property to the current date as a BSON timestamp. + * + * @param property the property + * @return the update @mongodb.driver.manual reference/operator/update/currentDate/ + * $currentDate @mongodb.driver.manual reference/bson-types/#document-bson-type-timestamp Timestamp + */ + public fun currentTimestamp(property: KProperty): Bson = Updates.currentTimestamp(property.path()) + + /** + * Creates an update that adds the given value to the array value of the property, unless the value is already + * present, in which case it does nothing + * + * @param value the value + * @param the value type + * @return the update @mongodb.driver.manual reference/operator/update/addToSet/ $addToSet + */ + @JvmSynthetic + @JvmName("addToSetExt") + public infix fun <@OnlyInputTypes T> KProperty?>.addToSet(value: T): Bson = + Updates.addToSet(path(), value) + + /** + * Creates an update that adds the given value to the array value of the property, unless the value is already + * present, in which case it does nothing + * + * @param property the property + * @param value the value + * @param the value type + * @return the update @mongodb.driver.manual reference/operator/update/addToSet/ $addToSet + */ + public fun <@OnlyInputTypes T> addToSet(property: KProperty?>, value: T): Bson = + property.addToSet(value) + + /** + * Creates an update that adds each of the given values to the array value of the property, unless the value is + * already present, in which case it does nothing + * + * @param property the property + * @param values the values + * @param the value type + * @return the update @mongodb.driver.manual reference/operator/update/addToSet/ $addToSet + */ + public fun <@OnlyInputTypes T> addEachToSet(property: KProperty?>, values: List): Bson = + Updates.addEachToSet(property.path(), values) + + /** + * Creates an update that adds the given value to the array value of the property. + * + * @param property the property + * @param value the value + * @param the value type + * @return the update @mongodb.driver.manual reference/operator/update/push/ $push + */ + public fun <@OnlyInputTypes T> push(property: KProperty?>, value: T): Bson = + Updates.push(property.path(), value) + + /** + * Creates an update that adds each of the given values to the array value of the property, applying the given + * options for positioning the pushed values, and then slicing and/or sorting the array. + * + * @param property the property + * @param values the values + * @param options the non-null push options + * @param the value type + * @return the update @mongodb.driver.manual reference/operator/update/push/ $push + */ + public fun <@OnlyInputTypes T> pushEach( + property: KProperty?>, + values: List, + options: PushOptions = PushOptions() + ): Bson = Updates.pushEach(property.path(), values, options) + + /** + * Creates an update that removes all instances of the given value from the array value of the property. + * + * @param value the value + * @param the value type + * @return the update @mongodb.driver.manual reference/operator/update/pull/ $pull + */ + @JvmSynthetic + @JvmName("pullExt") + public infix fun <@OnlyInputTypes T> KProperty?>.pull(value: T?): Bson = Updates.pull(path(), value) + + /** + * Creates an update that removes all instances of the given value from the array value of the property. + * + * @param property the property + * @param value the value + * @param the value type + * @return the update @mongodb.driver.manual reference/operator/update/pull/ $pull + */ + public fun <@OnlyInputTypes T> pull(property: KProperty?>, value: T?): Bson = property.pull(value) + + /** + * Creates an update that removes all instances of the given value from the array value of the property. + * + * @param filter the value + * @return the update @mongodb.driver.manual reference/operator/update/pull/ $pull + */ + @JvmSynthetic + @JvmName("pullByFilterExt") + public infix fun KProperty<*>.pullByFilter(filter: Bson): Bson = Updates.pull(path(), filter) + + /** + * Creates an update that removes all instances of the given value from the array value of the property. + * + * @param property the property + * @param filter the value + * @return the update @mongodb.driver.manual reference/operator/update/pull/ $pull + */ + public fun pullByFilter(property: KProperty<*>, filter: Bson): Bson = property.pullByFilter(filter) + + /** + * Creates an update that removes from an array all elements that match the given filter. + * + * @param filter the query filter + * @return the update @mongodb.driver.manual reference/operator/update/pull/ $pull + */ + public fun pullByFilter(filter: Bson): Bson = Updates.pullByFilter(filter) + + /** + * Creates an update that removes all instances of the given values from the array value of the property. + * + * @param values the values + * @param the value type + * @return the update @mongodb.driver.manual reference/operator/update/pull/ $pull + */ + @JvmSynthetic + @JvmName("pullAllExt") + public infix fun <@OnlyInputTypes T> KProperty?>.pullAll(values: List?): Bson = + Updates.pullAll(path(), values ?: emptyList()) + + /** + * Creates an update that removes all instances of the given values from the array value of the property. + * + * @param property the property + * @param values the values + * @param the value type + * @return the update @mongodb.driver.manual reference/operator/update/pull/ $pull + */ + public fun <@OnlyInputTypes T> pullAll(property: KProperty?>, values: List?): Bson = + property.pullAll(values ?: emptyList()) + + /** + * Creates an update that pops the first element of an array that is the value of the property. + * + * @param property the property + * @return the update @mongodb.driver.manual reference/operator/update/pop/ $pop + */ + public fun popFirst(property: KProperty): Bson = Updates.popFirst(property.path()) + + /** + * Creates an update that pops the last element of an array that is the value of the property. + * + * @param property the property + * @return the update @mongodb.driver.manual reference/operator/update/pop/ $pop + */ + public fun popLast(property: KProperty): Bson = Updates.popLast(property.path()) + + /** + * Creates an update that performs a bitwise and between the given integer value and the integral value of the + * property. + * + * @param value the value + * @return the update + */ + @JvmSynthetic + @JvmName("bitwiseAndExt") + public infix fun KProperty.bitwiseAnd(value: Int): Bson = Updates.bitwiseAnd(path(), value) + + /** + * Creates an update that performs a bitwise and between the given integer value and the integral value of the + * property. + * + * @param property the property + * @param value the value + * @return the update + */ + public fun bitwiseAnd(property: KProperty, value: Int): Bson = property.bitwiseAnd(value) + + /** + * Creates an update that performs a bitwise and between the given long value and the integral value of the + * property. + * + * @param value the value + * @return the update @mongodb.driver.manual reference/operator/update/bit/ $bit + */ + @JvmSynthetic + @JvmName("bitwiseAndExt") + public infix fun KProperty.bitwiseAnd(value: Long): Bson = Updates.bitwiseAnd(path(), value) + + /** + * Creates an update that performs a bitwise and between the given long value and the integral value of the + * property. + * + * @param property the property + * @param value the value + * @return the update @mongodb.driver.manual reference/operator/update/bit/ $bit + */ + public fun bitwiseAnd(property: KProperty, value: Long): Bson = property.bitwiseAnd(value) + + /** + * Creates an update that performs a bitwise or between the given integer value and the integral value of the + * property. + * + * @param value the value + * @return the update @mongodb.driver.manual reference/operator/update/bit/ $bit + */ + @JvmSynthetic + @JvmName("bitwiseOrExt") + public infix fun KProperty.bitwiseOr(value: Int): Bson = Updates.bitwiseOr(path(), value) + + /** + * Creates an update that performs a bitwise or between the given integer value and the integral value of the + * property. + * + * @param property the property + * @param value the value + * @return the update @mongodb.driver.manual reference/operator/update/bit/ $bit + */ + public fun bitwiseOr(property: KProperty, value: Int): Bson = + Updates.bitwiseOr(property.path(), value) + + /** + * Creates an update that performs a bitwise or between the given long value and the integral value of the property. + * + * @param value the value + * @return the update @mongodb.driver.manual reference/operator/update/bit/ $bit + */ + @JvmSynthetic + @JvmName("bitwiseOrExt") + public infix fun KProperty.bitwiseOr(value: Long): Bson = Updates.bitwiseOr(path(), value) + + /** + * Creates an update that performs a bitwise or between the given long value and the integral value of the property. + * + * @param property the property + * @param value the value + * @return the update @mongodb.driver.manual reference/operator/update/bit/ $bit + */ + public fun bitwiseOr(property: KProperty, value: Long): Bson = property.bitwiseOr(value) + + /** + * Creates an update that performs a bitwise xor between the given integer value and the integral value of the + * property. + * + * @param value the value + * @return the update + */ + @JvmSynthetic + @JvmName("bitwiseXorExt") + public infix fun KProperty.bitwiseXor(value: Int): Bson = Updates.bitwiseXor(path(), value) + + /** + * Creates an update that performs a bitwise xor between the given integer value and the integral value of the + * property. + * + * @param property the property + * @param value the value + * @return the update + */ + public fun bitwiseXor(property: KProperty, value: Int): Bson = + Updates.bitwiseXor(property.path(), value) + + /** + * Creates an update that performs a bitwise xor between the given long value and the integral value of the + * property. + * + * @param value the value + * @return the update + */ + @JvmSynthetic + @JvmName("addToSetExt") + public infix fun KProperty.bitwiseXor(value: Long): Bson = Updates.bitwiseXor(path(), value) + + /** + * Creates an update that performs a bitwise xor between the given long value and the integral value of the + * property. + * + * @param property the property + * @param value the value + * @return the update + */ + public fun bitwiseXor(property: KProperty, value: Long): Bson = + Updates.bitwiseXor(property.path(), value) +} diff --git a/driver-kotlin-extensions/src/main/kotlin/com/mongodb/kotlin/client/property/KPropertyPath.kt b/driver-kotlin-extensions/src/main/kotlin/com/mongodb/kotlin/client/property/KPropertyPath.kt new file mode 100644 index 00000000000..1aaa3f622e9 --- /dev/null +++ b/driver-kotlin-extensions/src/main/kotlin/com/mongodb/kotlin/client/property/KPropertyPath.kt @@ -0,0 +1,205 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * Copyright (C) 2016/2022 Litote + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @custom-license-header + */ +package com.mongodb.kotlin.client.property + +import com.mongodb.annotations.Sealed +import com.mongodb.kotlin.client.model.path +import java.util.Objects +import kotlin.reflect.KParameter +import kotlin.reflect.KProperty1 +import kotlin.reflect.KType +import kotlin.reflect.KTypeParameter +import kotlin.reflect.KVisibility + +/** + * A property path, operations on which take one receiver as a parameter. + * + * @param T the type of the receiver which should be used to obtain the value of the property. + * @param R the type of the property. + */ +@Sealed +public open class KPropertyPath( + private val previous: KPropertyPath?, + internal val property: KProperty1<*, R?> +) : KProperty1 { + + @Suppress("UNCHECKED_CAST") + internal constructor( + previous: KProperty1<*, Any?>, + property: KProperty1<*, R?> + ) : this( + if (previous is KPropertyPath<*, *>) { + previous as KPropertyPath? + } else { + KPropertyPath(null as (KPropertyPath?), previous) + }, + property) + + private val path: String by lazy { "${previous?.path?.let { "$it." } ?: ""}${property.path()}" } + + override val name: String + get() = path + + override val annotations: List + get() = unSupportedOperation() + override val getter: KProperty1.Getter + get() = unSupportedOperation() + override val isAbstract: Boolean + get() = unSupportedOperation() + override val isConst: Boolean + get() = unSupportedOperation() + override val isFinal: Boolean + get() = unSupportedOperation() + override val isLateinit: Boolean + get() = unSupportedOperation() + override val isOpen: Boolean + get() = unSupportedOperation() + override val isSuspend: Boolean + get() = unSupportedOperation() + override val parameters: List + get() = unSupportedOperation() + override val returnType: KType + get() = unSupportedOperation() + override val typeParameters: List + get() = unSupportedOperation() + override val visibility: KVisibility? + get() = unSupportedOperation() + override fun invoke(p1: T): R = unSupportedOperation() + override fun call(vararg args: Any?): R = unSupportedOperation() + override fun callBy(args: Map): R = unSupportedOperation() + override fun get(receiver: T): R = unSupportedOperation() + override fun getDelegate(receiver: T): Any? = unSupportedOperation() + override fun hashCode(): Int = Objects.hash(previous, property, name) + override fun equals(other: Any?): Boolean { + if (this === other) return true + if (javaClass != other?.javaClass) return false + other as KPropertyPath<*, *> + return Objects.equals(previous, other.previous) && + Objects.equals(property, other.property) && + Objects.equals(name, other.name) + } + + public companion object { + + private fun unSupportedOperation(): Nothing = throw UnsupportedOperationException() + + internal class CustomProperty(val previous: KPropertyPath<*, T>, path: String) : KProperty1 { + override val annotations: List + get() = emptyList() + + override val getter: KProperty1.Getter + get() = unSupportedOperation() + override val isAbstract: Boolean + get() = previous.isAbstract + override val isConst: Boolean + get() = previous.isConst + override val isFinal: Boolean + get() = previous.isFinal + override val isLateinit: Boolean + get() = previous.isLateinit + override val isOpen: Boolean + get() = previous.isOpen + override val isSuspend: Boolean + get() = previous.isSuspend + override val name: String = path + override val parameters: List + get() = previous.parameters + override val returnType: KType + get() = unSupportedOperation() + override val typeParameters: List + get() = previous.typeParameters + override val visibility: KVisibility? + get() = previous.visibility + override fun call(vararg args: Any?): R = unSupportedOperation() + override fun callBy(args: Map): R = unSupportedOperation() + override fun get(receiver: T): R = unSupportedOperation() + override fun getDelegate(receiver: T): Any? = unSupportedOperation() + override fun invoke(p1: T): R = unSupportedOperation() + override fun hashCode(): Int = Objects.hash(previous, name) + override fun equals(other: Any?): Boolean { + if (this === other) return true + if (javaClass != other?.javaClass) return false + other as CustomProperty<*, *> + return Objects.equals(previous, other.previous) && Objects.equals(name, other.name) + } + } + + /** Provides "fake" property with custom name. */ + public fun customProperty(previous: KPropertyPath<*, T>, path: String): KProperty1 = + CustomProperty(previous, path) + } +} + +/** Base class for collection property path. */ +public open class KCollectionPropertyPath>( + previous: KPropertyPath?, + property: KProperty1<*, Iterable?> +) : KPropertyPath?>(previous, property) { + + /** To be overridden to return the right type. */ + @Suppress("UNCHECKED_CAST") + public open fun memberWithAdditionalPath(additionalPath: String): MEMBER = + KPropertyPath( + this as KProperty1?>, customProperty(this as KPropertyPath<*, T>, additionalPath)) + as MEMBER + + /** [The positional array operator $](https://docs.mongodb.com/manual/reference/operator/update/positional/) */ + public val posOp: MEMBER + get() = memberWithAdditionalPath("\$") + + /** [The all positional operator $[]](https://docs.mongodb.com/manual/reference/operator/update/positional-all/) */ + public val allPosOp: MEMBER + get() = memberWithAdditionalPath("\$[]") + + /** + * [The filtered positional operator $[\]] + * (https://docs.mongodb.com/manual/reference/operator/update/positional-filtered/) + */ + public fun filteredPosOp(identifier: String): MEMBER = memberWithAdditionalPath("\$[$identifier]") + + /** In order to write array indexed expressions (like `accesses.0.timestamp`) */ + public fun pos(position: Int): MEMBER = memberWithAdditionalPath(position.toString()) +} + +/** A property path for a collection property. */ +public class KCollectionSimplePropertyPath( + previous: KPropertyPath?, + property: KProperty1<*, Iterable?> +) : KCollectionPropertyPath>(previous, property) + +/** Base class for map property path. */ +public open class KMapPropertyPath>( + previous: KPropertyPath?, + property: KProperty1<*, Map?> +) : KPropertyPath?>(previous, property) { + + /** To be overridden to returns the right type. */ + @Suppress("UNCHECKED_CAST") + public open fun memberWithAdditionalPath(additionalPath: String): MEMBER = + KPropertyPath( + this as KProperty1?>, customProperty(this as KPropertyPath<*, T>, additionalPath)) + as MEMBER + + /** Key projection of map. Sample: `Restaurant::localeMap.keyProjection(Locale.ENGLISH).path()` */ + public fun keyProjection(key: K): MEMBER = memberWithAdditionalPath(key.toString()) +} + +/** A property path for a map property. */ +public class KMapSimplePropertyPath(previous: KPropertyPath?, property: KProperty1<*, Map?>) : + KMapPropertyPath>(previous, property) diff --git a/driver-kotlin-extensions/src/main/kotlin/kotlin/internal/OnlyInputTypes.kt b/driver-kotlin-extensions/src/main/kotlin/kotlin/internal/OnlyInputTypes.kt new file mode 100644 index 00000000000..7526cd9c370 --- /dev/null +++ b/driver-kotlin-extensions/src/main/kotlin/kotlin/internal/OnlyInputTypes.kt @@ -0,0 +1,27 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package kotlin.internal + +/** + * Work around to expose `kotlin.internal` OnlyInputTypes + * + * Enables compile time type checking, so that captured types cannot be expanded. + * + * See: https://youtrack.jetbrains.com/issue/KT-13198/ + */ +@Target(AnnotationTarget.TYPE_PARAMETER) +@Retention(AnnotationRetention.BINARY) +internal annotation class OnlyInputTypes diff --git a/driver-kotlin-extensions/src/test/kotlin/com/mongodb/kotlin/client/model/AggregatesTest.kt b/driver-kotlin-extensions/src/test/kotlin/com/mongodb/kotlin/client/model/AggregatesTest.kt new file mode 100644 index 00000000000..f0fd6d7a1a4 --- /dev/null +++ b/driver-kotlin-extensions/src/test/kotlin/com/mongodb/kotlin/client/model/AggregatesTest.kt @@ -0,0 +1,355 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * Copyright (C) 2016/2022 Litote + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @custom-license-header + */ +package com.mongodb.kotlin.client.model + +import com.mongodb.MongoNamespace +import com.mongodb.client.model.Aggregates +import com.mongodb.client.model.Aggregates.project +import com.mongodb.client.model.GraphLookupOptions +import com.mongodb.client.model.MergeOptions +import com.mongodb.client.model.QuantileMethod +import com.mongodb.client.model.UnwindOptions +import com.mongodb.client.model.densify.DensifyOptions +import com.mongodb.client.model.densify.DensifyRange +import com.mongodb.kotlin.client.MongoCollection +import com.mongodb.kotlin.client.model.Accumulators.accumulator +import com.mongodb.kotlin.client.model.Accumulators.addToSet +import com.mongodb.kotlin.client.model.Accumulators.avg +import com.mongodb.kotlin.client.model.Accumulators.bottom +import com.mongodb.kotlin.client.model.Accumulators.bottomN +import com.mongodb.kotlin.client.model.Accumulators.first +import com.mongodb.kotlin.client.model.Accumulators.firstN +import com.mongodb.kotlin.client.model.Accumulators.last +import com.mongodb.kotlin.client.model.Accumulators.lastN +import com.mongodb.kotlin.client.model.Accumulators.max +import com.mongodb.kotlin.client.model.Accumulators.maxN +import com.mongodb.kotlin.client.model.Accumulators.median +import com.mongodb.kotlin.client.model.Accumulators.mergeObjects +import com.mongodb.kotlin.client.model.Accumulators.min +import com.mongodb.kotlin.client.model.Accumulators.minN +import com.mongodb.kotlin.client.model.Accumulators.percentile +import com.mongodb.kotlin.client.model.Accumulators.push +import com.mongodb.kotlin.client.model.Accumulators.stdDevPop +import com.mongodb.kotlin.client.model.Accumulators.stdDevSamp +import com.mongodb.kotlin.client.model.Accumulators.sum +import com.mongodb.kotlin.client.model.Accumulators.top +import com.mongodb.kotlin.client.model.Accumulators.topN +import com.mongodb.kotlin.client.model.Aggregates.count +import com.mongodb.kotlin.client.model.Aggregates.densify +import com.mongodb.kotlin.client.model.Aggregates.graphLookup +import com.mongodb.kotlin.client.model.Aggregates.lookup +import com.mongodb.kotlin.client.model.Aggregates.merge +import com.mongodb.kotlin.client.model.Aggregates.out +import com.mongodb.kotlin.client.model.Aggregates.unionWith +import com.mongodb.kotlin.client.model.Aggregates.unwind +import com.mongodb.kotlin.client.model.Projections.excludeId +import com.mongodb.kotlin.client.model.Projections.projection +import com.mongodb.kotlin.client.model.Sorts.ascending +import kotlin.test.Test +import kotlin.test.assertEquals +import org.bson.BsonDocument +import org.bson.conversions.Bson +import org.junit.jupiter.api.BeforeAll +import org.mockito.Mock +import org.mockito.Mockito.verify +import org.mockito.kotlin.doReturn +import org.mockito.kotlin.mock +import org.mockito.kotlin.whenever + +class AggregatesTest { + + companion object { + @Mock internal val wrappedEmployee: com.mongodb.client.MongoCollection = mock() + @Mock + internal val wrappedEmployeeCoroutine: com.mongodb.reactivestreams.client.MongoCollection = mock() + @Mock internal val wrappedCustomer: com.mongodb.client.MongoCollection = mock() + @Mock + internal val wrappedCustomerCoroutine: com.mongodb.reactivestreams.client.MongoCollection = mock() + + lateinit var employeeCollection: MongoCollection + lateinit var employeeCollectionCoroutine: com.mongodb.kotlin.client.coroutine.MongoCollection + lateinit var customerCollection: MongoCollection + lateinit var customerCollectionCoroutine: com.mongodb.kotlin.client.coroutine.MongoCollection + + @JvmStatic + @BeforeAll + internal fun setUpMocks() { + employeeCollection = MongoCollection(wrappedEmployee) + employeeCollectionCoroutine = com.mongodb.kotlin.client.coroutine.MongoCollection(wrappedEmployeeCoroutine) + + customerCollection = MongoCollection(wrappedCustomer) + customerCollectionCoroutine = com.mongodb.kotlin.client.coroutine.MongoCollection(wrappedCustomerCoroutine) + + whenever(wrappedEmployee.namespace).doReturn(MongoNamespace("db", Employee::class.simpleName!!)) + whenever(wrappedEmployeeCoroutine.namespace).doReturn(MongoNamespace("db", Employee::class.simpleName!!)) + whenever(wrappedCustomer.namespace).doReturn(MongoNamespace("db", Customer::class.simpleName!!)) + whenever(wrappedCustomerCoroutine.namespace).doReturn(MongoNamespace("db", Customer::class.simpleName!!)) + + employeeCollection.namespace + verify(wrappedEmployee).namespace + assertEquals(Employee::class.simpleName, employeeCollection.namespace.collectionName) + + employeeCollectionCoroutine.namespace + verify(wrappedEmployeeCoroutine).namespace + assertEquals(Employee::class.simpleName, employeeCollectionCoroutine.namespace.collectionName) + + customerCollection.namespace + verify(wrappedCustomer).namespace + assertEquals(Customer::class.simpleName, customerCollection.namespace.collectionName) + + customerCollectionCoroutine.namespace + verify(wrappedCustomerCoroutine).namespace + assertEquals(Customer::class.simpleName, customerCollectionCoroutine.namespace.collectionName) + } + } + + @Test + fun count() { + assertEquals(""" {${'$'}count: "name"}""", count(Person::name)) + } + + @Test + fun lookup() { + assertEquals( + """ {"${'$'}lookup": + {"from": "Customer", "localField": "customerId", "foreignField": "customerId", "as": "invoice"}}""", + lookup(customerCollection, Order::customerId, Customer::customerId, "invoice")) + assertEquals( + Aggregates.lookup("Customer", "customerId", "customerId", "invoice"), + lookup(customerCollection, Order::customerId, Customer::customerId, "invoice")) + + assertEquals( + """ {"${'$'}lookup": + {"from": "Customer", "localField": "customerId", "foreignField": "customerId", "as": "invoice"}}""", + lookup(customerCollectionCoroutine, Order::customerId, Customer::customerId, "invoice")) + assertEquals( + Aggregates.lookup("Customer", "customerId", "customerId", "invoice"), + lookup(customerCollectionCoroutine, Order::customerId, Customer::customerId, "invoice")) + } + + @Test + fun graphLookup() { + assertEquals( + """ {"${'$'}graphLookup": + {"from": "Employee", "startWith": "${'$'}id", "connectFromField": "id", "connectToField": + "reportsTo", "as": "subordinates", "maxDepth": 1}} """, + graphLookup( + from = employeeCollection, + startWith = Employee::id.projection, + connectFromField = Employee::id, + connectToField = Employee::reportsTo, + fieldAs = "subordinates", + options = GraphLookupOptions().maxDepth(1))) + + assertEquals( + """ {"${'$'}graphLookup": + {"from": "Employee", "startWith": "${'$'}id", "connectFromField": "id", "connectToField": + "reportsTo", "as": "subordinates", "maxDepth": 1}} """, + graphLookup( + from = employeeCollectionCoroutine, + startWith = Employee::id.projection, + connectFromField = Employee::id, + connectToField = Employee::reportsTo, + fieldAs = "subordinates", + options = GraphLookupOptions().maxDepth(1))) + } + + @Test + fun unionWith() { + assertEquals( + """ {"${'$'}unionWith": {"coll": "Customer", "pipeline": [{"${'$'}project": {"_id": 0}}]}} """, + unionWith(collection = customerCollection, pipeline = listOf(project(excludeId())))) + + assertEquals( + """ {"${'$'}unionWith": {"coll": "Customer", "pipeline": [{"${'$'}project": {"_id": 0}}]}} """, + unionWith(collection = customerCollectionCoroutine, pipeline = listOf(project(excludeId())))) + } + + @Test + fun unwind() { + assertEquals(UnwindOptions(), UnwindOptions()) + assertEquals(""" {"${'$'}unwind": "${'$'}address"} """, unwind(Person::address)) + + assertEquals( + """ {"${'$'}unwind": + {"path": "${'$'}address", "preserveNullAndEmptyArrays": true, "includeArrayIndex": "idx"}} """, + unwind(Person::address, UnwindOptions().includeArrayIndex("idx").preserveNullAndEmptyArrays(true))) + } + + @Test + fun out() { + assertEquals(""" {"${'$'}out": "Employee"} """, out(employeeCollection)) + assertEquals(""" {"${'$'}out": "Employee"} """, out(employeeCollectionCoroutine)) + } + + @Test + fun merge() { + assertEquals(""" {"${'$'}merge": {"into": "Customer"}} """, merge(customerCollection)) + assertEquals(""" {"${'$'}merge": {"into": "Customer"}} """, merge(customerCollectionCoroutine)) + + assertEquals( + """ {"${'$'}merge": {"into": "Customer", "on": "ssn"}} """, + merge(customerCollection, MergeOptions().uniqueIdentifier("ssn"))) + assertEquals( + """ {"${'$'}merge": {"into": "Customer", "on": "ssn"}} """, + merge(customerCollectionCoroutine, MergeOptions().uniqueIdentifier("ssn"))) + } + + @Test + fun densify() { + assertEquals( + """ {"${'$'}densify": { + "field": "email", + "range": { "bounds": "full", "step": 1 } + }} """, + densify(Customer::email, DensifyRange.fullRangeWithStep(1))) + + assertEquals( + """ {"${'$'}densify": { + "field": "email", + "range": { "bounds": "full", "step": 1 }, + "partitionByFields": ["foo"] + }} """, + densify( + Customer::email, + range = DensifyRange.fullRangeWithStep(1), + options = DensifyOptions.densifyOptions().partitionByFields("foo"))) + } + + @Test + @Suppress("LongMethod") + fun accumulators() { + assertEquals(com.mongodb.client.model.Accumulators.sum("age", 1), sum(Person::age, 1)) + + assertEquals(com.mongodb.client.model.Accumulators.avg("age", 1), avg(Person::age, 1)) + + assertEquals( + com.mongodb.client.model.Accumulators.percentile("age", 1, 2, QuantileMethod.approximate()), + percentile(Person::age, 1, 2, QuantileMethod.approximate())) + + assertEquals( + com.mongodb.client.model.Accumulators.median("age", 1, QuantileMethod.approximate()), + median(Person::age, 1, QuantileMethod.approximate())) + + assertEquals(com.mongodb.client.model.Accumulators.first("age", 1), first(Person::age, 1)) + + assertEquals(com.mongodb.client.model.Accumulators.firstN("age", 1, 2), firstN(Person::age, 1, 2)) + + assertEquals( + com.mongodb.client.model.Accumulators.top("age", com.mongodb.client.model.Sorts.ascending("name"), 1), + top(Person::age, ascending(Person::name), 1)) + + assertEquals( + com.mongodb.client.model.Accumulators.topN("age", com.mongodb.client.model.Sorts.ascending("name"), 1, 2), + topN(Person::age, ascending(Person::name), 1, 2)) + + assertEquals(com.mongodb.client.model.Accumulators.last("age", 1), last(Person::age, 1)) + + assertEquals(com.mongodb.client.model.Accumulators.lastN("age", 1, 2), lastN(Person::age, 1, 2)) + + assertEquals( + com.mongodb.client.model.Accumulators.bottom("age", com.mongodb.client.model.Sorts.ascending("name"), 1), + bottom(Person::age, ascending(Person::name), 1)) + + assertEquals( + com.mongodb.client.model.Accumulators.bottomN( + "age", com.mongodb.client.model.Sorts.ascending("name"), 1, 2), + bottomN(Person::age, ascending(Person::name), 1, 2)) + + assertEquals(com.mongodb.client.model.Accumulators.max("age", 1), max(Person::age, 1)) + + assertEquals(com.mongodb.client.model.Accumulators.maxN("age", 1, 2), maxN(Person::age, 1, 2)) + + assertEquals(com.mongodb.client.model.Accumulators.min("age", 1), min(Person::age, 1)) + + assertEquals(com.mongodb.client.model.Accumulators.minN("age", 1, 2), minN(Person::age, 1, 2)) + + assertEquals(com.mongodb.client.model.Accumulators.push("age", 1), push(Person::age, 1)) + + assertEquals(com.mongodb.client.model.Accumulators.addToSet("age", 1), addToSet(Person::age, 1)) + + assertEquals(com.mongodb.client.model.Accumulators.mergeObjects("age", 1), mergeObjects(Person::age, 1)) + + assertEquals( + com.mongodb.client.model.Accumulators.accumulator( + "age", "initFunction", "accumulateFunction", "mergeFunction"), + accumulator(Person::age, "initFunction", "accumulateFunction", "mergeFunction")) + + assertEquals( + com.mongodb.client.model.Accumulators.accumulator( + "age", "initFunction", "accumulateFunction", "mergeFunction", "finalizeFunction"), + accumulator(Person::age, "initFunction", "accumulateFunction", "mergeFunction", "finalizeFunction")) + + assertEquals( + com.mongodb.client.model.Accumulators.accumulator( + "age", + "initFunction", + listOf("a", "b"), + "accumulateFunction", + listOf("c", "d"), + "mergeFunction", + "finalizeFunction"), + accumulator( + Person::age, + "initFunction", + listOf("a", "b"), + "accumulateFunction", + listOf("c", "d"), + "mergeFunction", + "finalizeFunction")) + + assertEquals( + com.mongodb.client.model.Accumulators.accumulator( + "age", "initFunction", "accumulateFunction", "mergeFunction", "finalizeFunction", "Kotlin"), + accumulator( + Person::age, "initFunction", "accumulateFunction", "mergeFunction", "finalizeFunction", "Kotlin")) + + assertEquals( + com.mongodb.client.model.Accumulators.accumulator( + "age", + "initFunction", + listOf("a", "b"), + "accumulateFunction", + listOf("c", "d"), + "mergeFunction", + "finalizeFunction", + "Kotlin"), + accumulator( + Person::age, + "initFunction", + listOf("a", "b"), + "accumulateFunction", + listOf("c", "d"), + "mergeFunction", + "finalizeFunction", + "Kotlin")) + + assertEquals(com.mongodb.client.model.Accumulators.stdDevPop("age", 1), stdDevPop(Person::age, 1)) + + assertEquals(com.mongodb.client.model.Accumulators.stdDevSamp("age", 1), stdDevSamp(Person::age, 1)) + } + + data class Person(val name: String, val age: Int, val address: List?, val results: List) + data class Employee(val id: String, val name: String, val reportsTo: String) + data class Order(val id: String, val orderId: String, val customerId: Int, val amount: Int) + data class Customer(val id: String, val customerId: Int, val name: String, val email: String?) + + private fun assertEquals(expected: String, result: Bson) = + assertEquals(BsonDocument.parse(expected), result.toBsonDocument()) +} diff --git a/driver-kotlin-extensions/src/test/kotlin/com/mongodb/kotlin/client/model/ExtensionsApiTest.kt b/driver-kotlin-extensions/src/test/kotlin/com/mongodb/kotlin/client/model/ExtensionsApiTest.kt new file mode 100644 index 00000000000..5bad9104408 --- /dev/null +++ b/driver-kotlin-extensions/src/test/kotlin/com/mongodb/kotlin/client/model/ExtensionsApiTest.kt @@ -0,0 +1,121 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.kotlin.client.model + +import io.github.classgraph.ClassGraph +import kotlin.test.Test +import kotlin.test.assertTrue + +class ExtensionsApiTest { + + @Test + fun shouldHaveAllFiltersExtensions() { + val kotlinExtensions: Set = getKotlinExtensions("Filters") + val javaMethods: Set = getJavaMethods("Filters") + + val notImplemented = javaMethods subtract kotlinExtensions + assertTrue(notImplemented.isEmpty(), "Some possible Filters were not implemented: $notImplemented") + } + + @Test + fun shouldHaveAllProjectionsExtensions() { + val kotlinExtensions: Set = getKotlinExtensions("Projections") + val javaMethods: Set = getJavaMethods("Projections") + + val notImplemented = javaMethods subtract kotlinExtensions + assertTrue(notImplemented.isEmpty(), "Some possible Projections were not implemented: $notImplemented") + } + + @Test + fun shouldHaveAllUpdatesExtensions() { + val kotlinExtensions: Set = getKotlinExtensions("Updates") + val javaMethods: Set = getJavaMethods("Updates") + + val notImplemented = javaMethods subtract kotlinExtensions + assertTrue(notImplemented.isEmpty(), "Some possible Updates were not implemented: $notImplemented") + } + + @Test + fun shouldHaveAllIndexesExtensions() { + val kotlinExtensions: Set = getKotlinExtensions("Indexes") + val javaMethods: Set = getJavaMethods("Indexes") + val notImplemented = javaMethods subtract kotlinExtensions + assertTrue(notImplemented.isEmpty(), "Some possible Indexes were not implemented: $notImplemented") + } + + @Test + fun shouldHaveAllSortsExtensions() { + val kotlinExtensions: Set = getKotlinExtensions("Sorts") + val javaMethods: Set = getJavaMethods("Sorts") + + val notImplemented = javaMethods subtract kotlinExtensions + assertTrue(notImplemented.isEmpty(), "Some possible Sorts were not implemented: $notImplemented") + } + + @Test + fun shouldHaveAllAggregatesExtensions() { + val kotlinExtensions: Set = getKotlinExtensions("Aggregates") + val javaMethods: Set = getJavaMethods("Aggregates") + + val notImplemented = javaMethods subtract kotlinExtensions + assertTrue(notImplemented.isEmpty(), "Some possible Aggregates were not implemented: $notImplemented") + } + + @Test + fun shouldHaveAllAccumulatorsExtensions() { + val kotlinExtensions: Set = getKotlinExtensions("Accumulators") + val javaMethods: Set = getJavaMethods("Accumulators") + + val notImplemented = javaMethods subtract kotlinExtensions + assertTrue(notImplemented.isEmpty(), "Some possible Accumulators were not implemented: $notImplemented") + } + + private fun getKotlinExtensions(className: String): Set { + return ClassGraph() + .enableClassInfo() + .enableMethodInfo() + .acceptPackages("com.mongodb.kotlin.client.model") + .scan() + .use { result -> + result.allClasses + .filter { it.simpleName == className } + .asSequence() + .flatMap { it.methodInfo } + .filter { it.isPublic } + .map { it.name } + .filter { !it.contains("$") } + .toSet() + } + } + + private fun getJavaMethods(className: String): Set { + return ClassGraph().enableClassInfo().enableMethodInfo().acceptPackages("com.mongodb.client.model").scan().use { + it.getClassInfo("com.mongodb.client.model.$className") + .methodInfo + .filter { methodInfo -> + methodInfo.isPublic && + methodInfo.parameterInfo.isNotEmpty() && + methodInfo.parameterInfo[0] + .typeDescriptor + .toStringWithSimpleNames() + .equals("String") // only method starting + // with a String (property name) + } + .map { m -> m.name } + .toSet() + } + } +} diff --git a/driver-kotlin-extensions/src/test/kotlin/com/mongodb/kotlin/client/model/FiltersTest.kt b/driver-kotlin-extensions/src/test/kotlin/com/mongodb/kotlin/client/model/FiltersTest.kt new file mode 100644 index 00000000000..0ab3d83936a --- /dev/null +++ b/driver-kotlin-extensions/src/test/kotlin/com/mongodb/kotlin/client/model/FiltersTest.kt @@ -0,0 +1,667 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * Copyright (C) 2016/2022 Litote + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @custom-license-header + */ +package com.mongodb.kotlin.client.model + +import com.mongodb.MongoClientSettings +import com.mongodb.client.model.TextSearchOptions +import com.mongodb.client.model.geojson.Point +import com.mongodb.client.model.geojson.Polygon +import com.mongodb.client.model.geojson.Position +import com.mongodb.kotlin.client.model.Filters.all +import com.mongodb.kotlin.client.model.Filters.and +import com.mongodb.kotlin.client.model.Filters.bitsAllClear +import com.mongodb.kotlin.client.model.Filters.bitsAllSet +import com.mongodb.kotlin.client.model.Filters.bitsAnyClear +import com.mongodb.kotlin.client.model.Filters.bitsAnySet +import com.mongodb.kotlin.client.model.Filters.elemMatch +import com.mongodb.kotlin.client.model.Filters.eq +import com.mongodb.kotlin.client.model.Filters.exists +import com.mongodb.kotlin.client.model.Filters.expr +import com.mongodb.kotlin.client.model.Filters.geoIntersects +import com.mongodb.kotlin.client.model.Filters.geoWithin +import com.mongodb.kotlin.client.model.Filters.geoWithinBox +import com.mongodb.kotlin.client.model.Filters.geoWithinCenter +import com.mongodb.kotlin.client.model.Filters.geoWithinCenterSphere +import com.mongodb.kotlin.client.model.Filters.geoWithinPolygon +import com.mongodb.kotlin.client.model.Filters.gt +import com.mongodb.kotlin.client.model.Filters.gte +import com.mongodb.kotlin.client.model.Filters.`in` +import com.mongodb.kotlin.client.model.Filters.jsonSchema +import com.mongodb.kotlin.client.model.Filters.lt +import com.mongodb.kotlin.client.model.Filters.lte +import com.mongodb.kotlin.client.model.Filters.mod +import com.mongodb.kotlin.client.model.Filters.ne +import com.mongodb.kotlin.client.model.Filters.near +import com.mongodb.kotlin.client.model.Filters.nearSphere +import com.mongodb.kotlin.client.model.Filters.nin +import com.mongodb.kotlin.client.model.Filters.nor +import com.mongodb.kotlin.client.model.Filters.not +import com.mongodb.kotlin.client.model.Filters.or +import com.mongodb.kotlin.client.model.Filters.regex +import com.mongodb.kotlin.client.model.Filters.size +import com.mongodb.kotlin.client.model.Filters.text +import com.mongodb.kotlin.client.model.Filters.type +import com.mongodb.kotlin.client.model.Filters.where +import kotlin.test.Test +import kotlin.test.assertEquals +import org.bson.BsonDocument +import org.bson.BsonType +import org.bson.conversions.Bson + +class FiltersTest { + + data class Person(val name: String, val age: Int, val address: List, val results: List) + val person = Person("Ada", 20, listOf("St James Square", "London", "W1"), listOf(1, 2, 3)) + + @Test + fun testEqSupport() { + val expected = BsonDocument.parse("""{"name": "Ada"}""") + val bson = eq(Person::name, person.name) + assertEquals(expected, bson.document) + + val kmongoDsl = Person::name eq person.name + assertEquals(expected, kmongoDsl.document) + } + + @Test + fun testNeSupport() { + val expected = BsonDocument.parse("""{"age": {"${'$'}ne": 20 }}""") + val bson = ne(Person::age, person.age) + assertEquals(expected, bson.document) + + val kmongoDsl = Person::age ne person.age + assertEquals(expected, kmongoDsl.document) + } + + @Test + fun testNotSupport() { + val expected = BsonDocument.parse("""{"age": {${'$'}not: {${'$'}eq: 20 }}}""") + val bson = not(eq(Person::age, person.age)) + assertEquals(expected, bson.document) + + val kmongoDsl = not(Person::age eq person.age) + assertEquals(expected, kmongoDsl.document) + } + + @Test + fun testGtSupport() { + val expected = BsonDocument.parse("""{"age": {"${'$'}gt": 20}}""") + val bson = gt(Person::age, person.age) + assertEquals(expected, bson.document) + + val kmongoDsl = Person::age gt 20 + assertEquals(expected, kmongoDsl.document) + } + + @Test + fun testGteSupport() { + val expected = BsonDocument.parse("""{"age": {"${'$'}gte": 20}}""") + val bson = gte(Person::age, person.age) + assertEquals(expected, bson.document) + + val kmongoDsl = Person::age gte 20 + assertEquals(expected, kmongoDsl.document) + } + + @Test + fun testLtSupport() { + val expected = BsonDocument.parse("""{"age": {"${'$'}lt": 20}}""") + val bson = lt(Person::age, person.age) + assertEquals(expected, bson.document) + + val kmongoDsl = Person::age lt 20 + assertEquals(expected, kmongoDsl.document) + } + + @Test + fun testLteSupport() { + val expected = BsonDocument.parse("""{"age": {"${'$'}lte": 20}}""") + val bson = lte(Person::age, person.age) + assertEquals(expected, bson.document) + + val kmongoDsl = Person::age lte 20 + assertEquals(expected, kmongoDsl.document) + } + + @Test + fun testExistsSupport() { + val expected = BsonDocument.parse("""{"age": {"${'$'}exists": true}}""") + + var bson = exists(Person::age) + assertEquals(expected, bson.document) + + var kmongoDsl = Person::age.exists() + assertEquals(expected, kmongoDsl.document) + + bson = exists(Person::age, true) + assertEquals(expected, bson.document) + + kmongoDsl = Person::age exists true + assertEquals(expected, kmongoDsl.document) + } + + @Test + fun testOrSupport() { + val expected = BsonDocument.parse("""{${'$'}or: [{"name": "Ada"}, {"age": 20 }]}""") + val bson = or(eq(Person::name, person.name), eq(Person::age, person.age)) + assertEquals(expected, bson.document) + + val kmongoDsl = or(Person::name eq person.name, Person::age eq person.age) + assertEquals(expected, kmongoDsl.document) + } + + @Test + fun testNorSupport() { + val expected = BsonDocument.parse("""{${'$'}nor: [{"name": "Ada"}, {"age": 20 }]}""") + var bson = nor(eq(Person::name, person.name), eq(Person::age, person.age)) + assertEquals(expected, bson.document) + + var kmongoDsl = nor(Person::name eq person.name, Person::age eq person.age) + assertEquals(expected, kmongoDsl.document) + + // List api + bson = nor(listOf(eq(Person::name, person.name), eq(Person::age, person.age))) + assertEquals(expected, bson.document) + + kmongoDsl = nor(listOf(Person::name eq person.name, Person::age eq person.age)) + assertEquals(expected, kmongoDsl.document) + } + + @Test + fun testAndSupport() { + val expected = BsonDocument.parse("""{${'$'}and: [{"name": "Ada"}, {"age": 20 }]}""") + val bson = and(eq(Person::name, person.name), eq(Person::age, person.age)) + assertEquals(expected, bson.document) + + val kmongoDsl = and(Person::name.eq(person.name), Person::age.eq(person.age)) + assertEquals(expected, kmongoDsl.document) + } + + @Test + fun testAllSupport() { + val expected = BsonDocument.parse("""{"address": {${'$'}all: ["a", "b", "c"]}}""") + var bson = all(Person::address, "a", "b", "c") + assertEquals(expected, bson.document) + + var kmongoDsl = Person::address.all("a", "b", "c") + assertEquals(expected, kmongoDsl.document) + + bson = all(Person::address, listOf("a", "b", "c")) + assertEquals(expected, bson.document) + + kmongoDsl = Person::address.all(listOf("a", "b", "c")) + assertEquals(expected, kmongoDsl.document) + } + + @Test + fun testElemMatchSupport() { + val expected = + BsonDocument.parse( + """{"results": {"${'$'}elemMatch": + |{"${'$'}and": [{"age": {"${'$'}gt": 1}}, {"age": {"${'$'}lt": 10}}]}}}""" + .trimMargin()) + val bson = elemMatch(Person::results, and(gt(Person::age, 1), lt(Person::age, 10))) + assertEquals(expected, bson.document) + + val kmongoDsl = Person::results elemMatch and(gt(Person::age, 1), lt(Person::age, 10)) + assertEquals(expected, kmongoDsl.document) + } + + @Test + fun testInSupport() { + // List of values + var expected = BsonDocument.parse("""{"results": {"${'$'}in": [1, 2, 3]}}""") + var bson = `in`(Person::results, person.results) + assertEquals(expected, bson.document) + + var kmongoDsl = Person::results.`in`(person.results) + assertEquals(expected, kmongoDsl.document) + + // Alternative implementations + expected = BsonDocument.parse("""{"name": {"${'$'}in": ["Abe", "Ada", "Asda"]}}""") + bson = `in`(Person::name, listOf("Abe", "Ada", "Asda")) + assertEquals(expected, bson.document) + + kmongoDsl = Person::name.`in`(listOf("Abe", "Ada", "Asda")) + assertEquals(expected, kmongoDsl.document) + } + + @Test + fun testNinSupport() { + // List of values + var expected = BsonDocument.parse("""{"results": {"${'$'}nin": [1, 2, 3]}}""") + var bson = nin(Person::results, person.results) + assertEquals(expected, bson.document) + + var kmongoDsl = Person::results.nin(person.results) + assertEquals(expected, kmongoDsl.document) + + // Alternative implementations + expected = BsonDocument.parse("""{"name": {"${'$'}nin": ["Abe", "Ada", "Asda"]}}""") + bson = nin(Person::name, listOf("Abe", "Ada", "Asda")) + assertEquals(expected, bson.document) + + kmongoDsl = Person::name.nin(listOf("Abe", "Ada", "Asda")) + assertEquals(expected, kmongoDsl.document) + } + + @Test + fun testModSupport() { + val expected = + BsonDocument.parse( + """{"age": {"${'$'}mod": [{ "${'$'}numberLong" : "20" }, { "${'$'}numberLong" : "0" }]}}""") + val bson = mod(Person::age, person.age.toLong(), 0) + assertEquals(expected, bson.document) + + val kmongoDsl = Person::age.mod(person.age.toLong(), 0) + assertEquals(expected, kmongoDsl.document) + } + + @Test + fun testSizeSupport() { + val expected = BsonDocument.parse("""{"results": {"${'$'}size": 3}}""") + val bson = size(Person::results, 3) + assertEquals(expected, bson.document) + + val kmongoDsl = Person::results.size(3) + assertEquals(expected, kmongoDsl.document) + } + + @Test + fun testBitsAllClearSupport() { + val expected = BsonDocument.parse("""{"results": {"${'$'}bitsAllClear": { "${'$'}numberLong" : "3" }}}""") + val bson = bitsAllClear(Person::results, 3) + assertEquals(expected, bson.document) + + val kmongoDsl = Person::results bitsAllClear 3 + assertEquals(expected, kmongoDsl.document) + } + + @Test + fun testBitsSetClearSupport() { + // List of values + val expected = BsonDocument.parse("""{"results": {"${'$'}bitsAllSet": { "${'$'}numberLong" : "3" }}}""") + val bson = bitsAllSet(Person::results, 3) + assertEquals(expected, bson.document) + + val kmongoDsl = Person::results bitsAllSet 3 + assertEquals(expected, kmongoDsl.document) + } + + @Test + fun testBitsAnyClearSupport() { + val expected = BsonDocument.parse("""{"results": {"${'$'}bitsAnyClear": { "${'$'}numberLong" : "3" }}}""") + val bson = bitsAnyClear(Person::results, 3) + assertEquals(expected, bson.document) + + val kmongoDsl = Person::results bitsAnyClear 3 + assertEquals(expected, kmongoDsl.document) + } + + @Test + fun testBitsAnySetSupport() { + val expected = BsonDocument.parse("""{"results": {"${'$'}bitsAnySet": { "${'$'}numberLong" : "3" }}}""") + val bson = bitsAnySet(Person::results, 3) + assertEquals(expected, bson.document) + + val kmongoDsl = Person::results bitsAnySet 3 + assertEquals(expected, kmongoDsl.document) + } + + @Test + fun testTypeSupport() { + val expected = BsonDocument.parse("""{"results": {"${'$'}type": 5}}""") + val bson = type(Person::results, BsonType.BINARY) + assertEquals(expected, bson.document) + + val kmongoDsl = Person::results type BsonType.BINARY + assertEquals(expected, kmongoDsl.document) + } + + @Test + fun testTextSupport() { + var expected = BsonDocument.parse("""{${'$'}text: {${'$'}search: "mongoDB for GIANT ideas"}}""") + var bson = text("mongoDB for GIANT ideas") + assertEquals(expected, bson.document) + + expected = + BsonDocument.parse("""{${'$'}text: {${'$'}search: "mongoDB for GIANT ideas", ${'$'}language: "english"}}""") + bson = text("mongoDB for GIANT ideas", TextSearchOptions().language("english")) + assertEquals(expected, bson.document) + } + + @Test + fun testRegexSupport() { + val pattern = "acme.*corp" + var expected = BsonDocument.parse("""{"name": {"${'$'}regex": "$pattern", ${'$'}options : ""}}}""") + var bson = regex(Person::name, pattern) + assertEquals(expected, bson.document) + + bson = regex(Person::name, pattern.toRegex()) + assertEquals(expected, bson.document) + + bson = regex(Person::name, pattern.toRegex().toPattern()) + assertEquals(expected, bson.document) + + var kmongoDsl = Person::name.regex(pattern) + assertEquals(expected, kmongoDsl.document) + + kmongoDsl = Person::name.regex(pattern.toRegex()) + assertEquals(expected, kmongoDsl.document) + + kmongoDsl = Person::name.regex(pattern.toRegex().toPattern()) + assertEquals(expected, kmongoDsl.document) + + // With options + val options = "iu" + expected = BsonDocument.parse("""{"name": {"${'$'}regex": "$pattern", ${'$'}options : "$options"}}}""") + bson = regex(Person::name, pattern, options) + assertEquals(expected, bson.document) + + bson = regex(Person::name, pattern.toRegex(RegexOption.IGNORE_CASE)) + assertEquals(expected, bson.document) + + bson = regex(Person::name, pattern.toRegex(RegexOption.IGNORE_CASE).toPattern()) + assertEquals(expected, bson.document) + + kmongoDsl = Person::name.regex(pattern, options) + assertEquals(expected, kmongoDsl.document) + + kmongoDsl = Person::name.regex(pattern.toRegex(RegexOption.IGNORE_CASE)) + assertEquals(expected, kmongoDsl.document) + + kmongoDsl = Person::name.regex(pattern.toRegex(RegexOption.IGNORE_CASE).toPattern()) + assertEquals(expected, kmongoDsl.document) + + // Iterable + expected = BsonDocument.parse("""{"address": {"${'$'}regex": "$pattern", ${'$'}options : ""}}}""") + bson = regex(Person::address, pattern) + assertEquals(expected, bson.document) + + bson = regex(Person::address, pattern.toRegex()) + assertEquals(expected, bson.document) + + bson = regex(Person::address, pattern.toRegex().toPattern()) + assertEquals(expected, bson.document) + + kmongoDsl = Person::address.regex(pattern) + assertEquals(expected, kmongoDsl.document) + + kmongoDsl = Person::address.regex(pattern.toRegex()) + assertEquals(expected, kmongoDsl.document) + + kmongoDsl = Person::address.regex(pattern.toRegex().toPattern()) + assertEquals(expected, kmongoDsl.document) + + expected = BsonDocument.parse("""{"address": {"${'$'}regex": "$pattern", ${'$'}options : "$options"}}}""") + bson = regex(Person::address, pattern, options) + assertEquals(expected, bson.document) + + bson = regex(Person::address, pattern.toRegex(RegexOption.IGNORE_CASE)) + assertEquals(expected, bson.document) + + bson = regex(Person::address, pattern.toRegex(RegexOption.IGNORE_CASE).toPattern()) + assertEquals(expected, bson.document) + + kmongoDsl = Person::address.regex(pattern, options) + assertEquals(expected, kmongoDsl.document) + + kmongoDsl = Person::address.regex(pattern.toRegex(RegexOption.IGNORE_CASE)) + assertEquals(expected, kmongoDsl.document) + + kmongoDsl = Person::address.regex(pattern.toRegex(RegexOption.IGNORE_CASE).toPattern()) + assertEquals(expected, kmongoDsl.document) + } + + @Test + fun testWhereSupport() { + val expected = BsonDocument.parse("""{${'$'}where: "this.address.0 == this.address.1"}""") + val bson = where("this.address.0 == this.address.1") + + assertEquals(expected, bson.document) + } + + @Test + fun testExprSupport() { + val expected = BsonDocument.parse("""{${'$'}expr: {"name": "Ada"}}""") + val bson = expr(Person::name eq person.name) + + assertEquals(expected, bson.document) + } + + @Test + fun testGeoWithinSupport() { + val geometry = + """{"${'$'}geometry": {"type": "Polygon", + | "coordinates": [[[1.0, 2.0], [2.0, 3.0], [3.0, 4.0], [1.0, 2.0]]]}}""" + .trimMargin() + val expected = BsonDocument.parse("""{"address": {"${'$'}geoWithin": $geometry}}""") + val polygon = Polygon(listOf(Position(1.0, 2.0), Position(2.0, 3.0), Position(3.0, 4.0), Position(1.0, 2.0))) + var bson = geoWithin(Person::address, polygon) + assertEquals(expected, bson.document) + + var kmongoDsl = Person::address geoWithin polygon + assertEquals(expected, kmongoDsl.document) + + // Using Bson + val bsonGeometry = BsonDocument.parse(geometry).getDocument("${'$'}geometry") + bson = geoWithin(Person::address, bsonGeometry) + assertEquals(expected, bson.document) + + kmongoDsl = Person::address geoWithin bsonGeometry + assertEquals(expected, kmongoDsl.document) + } + + @Test + fun testGeoWithinBoxSupport() { + val expected = + BsonDocument.parse("""{"address": {"${'$'}geoWithin": {"${'$'}box": [[1.0, 2.0], [3.0, 4.0]]}}}}""") + val bson = geoWithinBox(Person::address, 1.0, 2.0, 3.0, 4.0) + assertEquals(expected, bson.document) + + val kmongoDsl = Person::address.geoWithinBox(1.0, 2.0, 3.0, 4.0) + assertEquals(expected, kmongoDsl.document) + } + + @Test + fun testGeoWithinPolygonSupport() { + val expected = + BsonDocument.parse( + """{"address": {"${'$'}geoWithin": {"${'$'}polygon": [[0.0, 0.0], [1.0, 2.0], [2.0, 0.0]]}}}}""") + val bson = geoWithinPolygon(Person::address, listOf(listOf(0.0, 0.0), listOf(1.0, 2.0), listOf(2.0, 0.0))) + assertEquals(expected, bson.document) + + val kmongoDsl = Person::address.geoWithinPolygon(listOf(listOf(0.0, 0.0), listOf(1.0, 2.0), listOf(2.0, 0.0))) + assertEquals(expected, kmongoDsl.document) + } + + @Test + fun testGeoWithinCenterSupport() { + val expected = + BsonDocument.parse("""{"address": {"${'$'}geoWithin": {"${'$'}center": [[1.0, 2.0], 30.0]}}}}""") + val bson = geoWithinCenter(Person::address, 1.0, 2.0, 30.0) + assertEquals(expected, bson.document) + + val kmongoDsl = Person::address.geoWithinCenter(1.0, 2.0, 30.0) + assertEquals(expected, kmongoDsl.document) + } + + @Test + fun testGeoWithinCenterSphereSupport() { + val expected = + BsonDocument.parse("""{"address": {"${'$'}geoWithin": {"${'$'}centerSphere": [[1.0, 2.0], 30.0]}}}}""") + val bson = geoWithinCenterSphere(Person::address, 1.0, 2.0, 30.0) + assertEquals(expected, bson.document) + + val kmongoDsl = Person::address.geoWithinCenterSphere(1.0, 2.0, 30.0) + assertEquals(expected, kmongoDsl.document) + } + + @Test + fun testGeoIntersectsSupport() { + val geometry = + """{"${'$'}geometry": {"type": "Polygon", + | "coordinates": [[[1.0, 2.0], [2.0, 3.0], [3.0, 4.0], [1.0, 2.0]]]}}""" + .trimMargin() + val expected = BsonDocument.parse("""{"address": {"${'$'}geoIntersects": $geometry}}""") + val polygon = Polygon(listOf(Position(1.0, 2.0), Position(2.0, 3.0), Position(3.0, 4.0), Position(1.0, 2.0))) + var bson = geoIntersects(Person::address, polygon) + assertEquals(expected, bson.document) + + var kmongoDsl = Person::address.geoIntersects(polygon) + assertEquals(expected, kmongoDsl.document) + + // Using Bson + val bsonGeometry = BsonDocument.parse(geometry).getDocument("${'$'}geometry") + bson = geoIntersects(Person::address, bsonGeometry) + assertEquals(expected, bson.document) + + kmongoDsl = Person::address.geoIntersects(bsonGeometry) + assertEquals(expected, kmongoDsl.document) + } + + @Test + fun testNearSupport() { + var geometry = """{"${'$'}geometry": {"type": "Point", "coordinates": [1.0, 2.0]}}}""" + var expected = BsonDocument.parse("""{"address": {"${'$'}near": $geometry}}""") + val point = Point(Position(1.0, 2.0)) + var bson = near(Person::address, point) + assertEquals(expected, bson.document) + + var kmongoDsl = Person::address.near(point) + assertEquals(expected, kmongoDsl.document) + + // Using Bson + var bsonGeometry = BsonDocument.parse(geometry).getDocument("${'$'}geometry") + bson = near(Person::address, bsonGeometry) + assertEquals(expected, bson.document) + + kmongoDsl = Person::address.near(bsonGeometry) + assertEquals(expected, kmongoDsl.document) + + // Using short api + expected = BsonDocument.parse("""{"address": {"${'$'}near": [1.0, 2.0]}}""") + bson = near(Person::address, 1.0, 2.0) + assertEquals(expected, bson.document) + + kmongoDsl = Person::address.near(1.0, 2.0) + assertEquals(expected, kmongoDsl.document) + + // With optionals + geometry = + """{"${'$'}geometry": {"type": "Point", "coordinates": [1.0, 2.0]}, + |"${'$'}maxDistance": 10.0, "${'$'}minDistance": 1.0}""" + .trimMargin() + expected = BsonDocument.parse("""{"address": {"${'$'}near": $geometry}}""") + bson = near(Person::address, point, 10.0, 1.0) + assertEquals(expected, bson.document) + + kmongoDsl = Person::address.near(point, 10.0, 1.0) + assertEquals(expected, kmongoDsl.document) + + // Using Bson + bsonGeometry = BsonDocument.parse(geometry).getDocument("${'$'}geometry") + bson = near(Person::address, bsonGeometry, 10.0, 1.0) + assertEquals(expected, bson.document) + + kmongoDsl = Person::address.near(bsonGeometry, 10.0, 1.0) + assertEquals(expected, kmongoDsl.document) + + // Using short api + expected = + BsonDocument.parse( + """{"address": {"${'$'}near": [1.0, 2.0], "${'$'}maxDistance": 10.0, "${'$'}minDistance": 1.0}}""") + bson = near(Person::address, 1.0, 2.0, 10.0, 1.0) + assertEquals(expected, bson.document) + + kmongoDsl = Person::address.near(1.0, 2.0, 10.0, 1.0) + assertEquals(expected, kmongoDsl.document) + } + + @Test + fun testNearSphereSupport() { + var geometry = """{"${'$'}geometry": {"type": "Point", "coordinates": [1.0, 2.0]}}}""" + var expected = BsonDocument.parse("""{"address": {"${'$'}nearSphere": $geometry}}""") + val point = Point(Position(1.0, 2.0)) + var bson = nearSphere(Person::address, point) + assertEquals(expected, bson.document) + + var kmongoDsl = Person::address.nearSphere(point) + assertEquals(expected, kmongoDsl.document) + + // Using Bson + var bsonGeometry = BsonDocument.parse(geometry).getDocument("${'$'}geometry") + bson = nearSphere(Person::address, bsonGeometry) + assertEquals(expected, bson.document) + + kmongoDsl = Person::address.nearSphere(point) + assertEquals(expected, kmongoDsl.document) + + // Using short api + expected = BsonDocument.parse("""{"address": {"${'$'}nearSphere": [1.0, 2.0]}}""") + bson = nearSphere(Person::address, 1.0, 2.0) + assertEquals(expected, bson.document) + + kmongoDsl = Person::address.nearSphere(1.0, 2.0) + assertEquals(expected, kmongoDsl.document) + + // With optionals + geometry = + """{"${'$'}geometry": {"type": "Point", "coordinates": [1.0, 2.0]}, + |"${'$'}maxDistance": 10.0, "${'$'}minDistance": 1.0}""" + .trimMargin() + expected = BsonDocument.parse("""{"address": {"${'$'}nearSphere": $geometry}}""") + bson = nearSphere(Person::address, point, 10.0, 1.0) + assertEquals(expected, bson.document) + + kmongoDsl = Person::address.nearSphere(point, 10.0, 1.0) + assertEquals(expected, kmongoDsl.document) + + // Using Bson + bsonGeometry = BsonDocument.parse(geometry).getDocument("${'$'}geometry") + bson = nearSphere(Person::address, bsonGeometry, 10.0, 1.0) + assertEquals(expected, bson.document) + + kmongoDsl = Person::address.nearSphere(point, 10.0, 1.0) + assertEquals(expected, kmongoDsl.document) + + // Using short api + expected = + BsonDocument.parse( + """{"address": {"${'$'}nearSphere": [1.0, 2.0], + |"${'$'}maxDistance": 10.0, "${'$'}minDistance": 1.0}}""" + .trimMargin()) + bson = nearSphere(Person::address, 1.0, 2.0, 10.0, 1.0) + assertEquals(expected, bson.document) + + kmongoDsl = Person::address.nearSphere(1.0, 2.0, 10.0, 1.0) + assertEquals(expected, kmongoDsl.document) + } + + @Test + fun testJsonSchemaSupport() { + val expected = BsonDocument.parse("""{"${'$'}jsonSchema": {"bsonType": "object"}}""") + + val bson = jsonSchema(BsonDocument.parse("""{"bsonType": "object"}""")) + assertEquals(expected, bson.document) + } + + private val Bson.document: BsonDocument + get() = toBsonDocument(BsonDocument::class.java, MongoClientSettings.getDefaultCodecRegistry()) +} diff --git a/driver-kotlin-extensions/src/test/kotlin/com/mongodb/kotlin/client/model/IndexesTest.kt b/driver-kotlin-extensions/src/test/kotlin/com/mongodb/kotlin/client/model/IndexesTest.kt new file mode 100644 index 00000000000..6c4646dd7f6 --- /dev/null +++ b/driver-kotlin-extensions/src/test/kotlin/com/mongodb/kotlin/client/model/IndexesTest.kt @@ -0,0 +1,94 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * Copyright (C) 2016/2022 Litote + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @custom-license-header + */ +package com.mongodb.kotlin.client.model + +import com.mongodb.client.model.Indexes +import com.mongodb.client.model.Indexes.compoundIndex +import com.mongodb.kotlin.client.model.Indexes.ascending +import com.mongodb.kotlin.client.model.Indexes.descending +import com.mongodb.kotlin.client.model.Indexes.geo2d +import com.mongodb.kotlin.client.model.Indexes.geo2dsphere +import com.mongodb.kotlin.client.model.Indexes.hashed +import com.mongodb.kotlin.client.model.Indexes.text +import kotlin.test.Test +import kotlin.test.assertEquals +import org.bson.BsonDocument +import org.bson.conversions.Bson + +class IndexesTest { + + @Test + fun `ascending index`() { + assertEquals(""" {name: 1} """, ascending(Person::name)) + assertEquals(""" {name: 1, age: 1} """, ascending(Person::name, Person::age)) + assertEquals(""" {name: 1, age: 1} """, ascending(listOf(Person::name, Person::age))) + } + + @Test + fun `descending index`() { + assertEquals(""" {name: -1} """, descending(Person::name)) + assertEquals(""" {name: -1, age: -1} """, descending(Person::name, Person::age)) + assertEquals(""" {name: -1, age: -1} """, descending(listOf(Person::name, Person::age))) + } + + @Test + fun `geo2dsphere index`() { + assertEquals(""" {name: "2dsphere"} """, geo2dsphere(Person::name)) + assertEquals(""" {name: "2dsphere", age: "2dsphere"} """, geo2dsphere(Person::name, Person::age)) + assertEquals(""" {name: "2dsphere", age: "2dsphere"} """, geo2dsphere(listOf(Person::name, Person::age))) + } + + @Test + fun `geo2d index`() { + assertEquals(""" {name: "2d"} """, geo2d(Person::name)) + } + + @Test + fun `text helper`() { + assertEquals(""" {name: "text"} """, text(Person::name)) + assertEquals(""" { "${'$'}**" : "text"} """, Indexes.text()) + } + + @Test + fun `hashed index`() { + assertEquals(""" {name: "hashed"} """, hashed(Person::name)) + } + + @Test + fun `compound index`() { + assertEquals(""" {name : 1, age : -1} """, compoundIndex(ascending(Person::name), descending(Person::age))) + } + + @Test + fun `should test equals on CompoundIndex`() { + assertEquals( + compoundIndex(ascending(Person::name), descending(Person::age)), + compoundIndex(ascending(Person::name), descending(Person::age))) + + assertEquals( + compoundIndex(listOf(ascending(Person::name), descending(Person::age))), + compoundIndex(listOf(ascending(Person::name), descending(Person::age)))) + } + + // Utils + private data class Person(val name: String, val age: Int) + + private fun assertEquals(expected: String, result: Bson) = + assertEquals(BsonDocument.parse(expected), result.toBsonDocument()) +} diff --git a/driver-kotlin-extensions/src/test/kotlin/com/mongodb/kotlin/client/model/KPropertiesTest.kt b/driver-kotlin-extensions/src/test/kotlin/com/mongodb/kotlin/client/model/KPropertiesTest.kt new file mode 100644 index 00000000000..51e09675d72 --- /dev/null +++ b/driver-kotlin-extensions/src/test/kotlin/com/mongodb/kotlin/client/model/KPropertiesTest.kt @@ -0,0 +1,158 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * Copyright (C) 2016/2022 Litote + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.kotlin.client.model + +import java.util.Locale +import kotlin.test.Test +import kotlin.test.assertEquals +import kotlinx.serialization.SerialName +import org.bson.codecs.pojo.annotations.BsonId +import org.bson.codecs.pojo.annotations.BsonProperty +import org.junit.jupiter.api.assertThrows + +class KPropertiesTest { + + data class Restaurant( + @BsonId val a: String, + @BsonProperty("b") val bsonProperty: String, + @SerialName("c") val serialName: String, + val name: String, + val stringList: List, + val localeMap: Map, + val reviews: List, + @BsonProperty("nested") val subDocument: Restaurant? + ) + + data class Review( + @BsonProperty("prop") val bsonProperty: String, + @SerialName("rating") val score: String, + val name: String, + @BsonProperty("old") val previous: Review?, + @BsonProperty("nested") val misc: List + ) + + @Test + fun testPath() { + assertEquals("_id", Restaurant::a.path()) + assertEquals("b", Restaurant::bsonProperty.path()) + assertEquals("c", Restaurant::serialName.path()) + assertEquals("name", Restaurant::name.path()) + assertEquals("stringList", Restaurant::stringList.path()) + assertEquals("localeMap", Restaurant::localeMap.path()) + assertEquals("nested", Restaurant::subDocument.path()) + assertEquals("reviews", Restaurant::reviews.path()) + + assertEquals("prop", Review::bsonProperty.path()) + assertEquals("rating", Review::score.path()) + assertEquals("name", Review::name.path()) + assertEquals("old", Review::previous.path()) + } + + @Test + fun testDivOperator() { + assertEquals("nested._id", (Restaurant::subDocument / Restaurant::a).path()) + assertEquals("nested.b", (Restaurant::subDocument / Restaurant::bsonProperty).path()) + assertEquals("nested.c", (Restaurant::subDocument / Restaurant::serialName).path()) + assertEquals("nested.name", (Restaurant::subDocument / Restaurant::name).path()) + assertEquals("nested.stringList", (Restaurant::subDocument / Restaurant::stringList).path()) + assertEquals("nested.localeMap", (Restaurant::subDocument / Restaurant::localeMap).path()) + assertEquals("nested.nested", (Restaurant::subDocument / Restaurant::subDocument).path()) + } + + @Test + fun testRemOperator() { + assertEquals("nested.prop", (Restaurant::subDocument % Review::bsonProperty).path()) + assertEquals("nested.rating", (Restaurant::subDocument % Review::score).path()) + assertEquals("nested.name", (Restaurant::subDocument % Review::name).path()) + assertEquals("nested.old", (Restaurant::subDocument % Review::previous).path()) + } + + @Test + fun testArrayPositionalOperator() { + assertEquals("reviews.\$", Restaurant::reviews.posOp.path()) + assertEquals("reviews.rating", (Restaurant::reviews / Review::score).path()) + assertEquals("reviews.nested.\$", (Restaurant::reviews / Review::misc).posOp.path()) + assertEquals("reviews.\$.rating", (Restaurant::reviews.posOp / Review::score).path()) + } + + @Test + fun testArrayAllPositionalOperator() { + assertEquals("reviews.\$[]", Restaurant::reviews.allPosOp.path()) + assertEquals("reviews.\$[].rating", (Restaurant::reviews.allPosOp / Review::score).path()) + assertEquals("reviews.nested.\$[]", (Restaurant::reviews / Review::misc).allPosOp.path()) + } + + @Test + fun testArrayFilteredPositionalOperator() { + assertEquals("reviews.\$[elem]", Restaurant::reviews.filteredPosOp("elem").path()) + assertEquals("reviews.\$[elem].rating", (Restaurant::reviews.filteredPosOp("elem") / Review::score).path()) + } + + @Test + fun testMapProjection() { + assertEquals("localeMap", Restaurant::localeMap.path()) + assertEquals("localeMap.rating", (Restaurant::localeMap / Review::score).path()) + assertEquals("localeMap.en", Restaurant::localeMap.keyProjection(Locale.ENGLISH).path()) + assertEquals( + "localeMap.en.rating", (Restaurant::localeMap.keyProjection(Locale.ENGLISH) / Review::score).path()) + } + + @Test + fun testArrayIndexProperty() { + assertEquals("reviews.1.rating", (Restaurant::reviews.pos(1) / Review::score).path()) + } + + @Test + fun testKPropertyPath() { + val property = (Restaurant::subDocument / Restaurant::a) + assertThrows { property.annotations } + assertThrows { property.isAbstract } + assertThrows { property.isConst } + assertThrows { property.isFinal } + assertThrows { property.isLateinit } + assertThrows { property.isOpen } + assertThrows { property.isSuspend } + assertThrows { property.parameters } + assertThrows { property.returnType } + assertThrows { property.typeParameters } + assertThrows { property.visibility } + + val restaurant = Restaurant("a", "b", "c", "name", listOf(), mapOf(), listOf(), null) + assertThrows { property.getter } + assertThrows { property.invoke(restaurant) } + assertThrows { property.call() } + assertThrows { property.callBy(mapOf()) } + assertThrows { property.get(restaurant) } + assertThrows { property.getDelegate(restaurant) } + } + + @Test + fun testNoCacheCollisions() { + for (i in 1.rangeTo(25_000)) { + assertEquals("reviews.$i", Restaurant::reviews.pos(i).path()) + assertEquals("reviews.$[identifier$i]", Restaurant::reviews.filteredPosOp("identifier$i").path()) + assertEquals("localeMap.$i", Restaurant::localeMap.keyProjection(i).path()) + + val x = i / 2 + assertEquals( + "reviews.$[identifier$x].rating", + (Restaurant::reviews.filteredPosOp("identifier$x") / Review::score).path()) + assertEquals("reviews.$x.rating", (Restaurant::reviews.pos(x) / Review::score).path()) + assertEquals("localeMap.$x.rating", (Restaurant::localeMap.keyProjection(x) / Review::score).path()) + } + } +} diff --git a/driver-kotlin-extensions/src/test/kotlin/com/mongodb/kotlin/client/model/ProjectionTest.kt b/driver-kotlin-extensions/src/test/kotlin/com/mongodb/kotlin/client/model/ProjectionTest.kt new file mode 100644 index 00000000000..2033713d4dc --- /dev/null +++ b/driver-kotlin-extensions/src/test/kotlin/com/mongodb/kotlin/client/model/ProjectionTest.kt @@ -0,0 +1,239 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * Copyright (C) 2016/2022 Litote + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @custom-license-header + */ +package com.mongodb.kotlin.client.model + +import com.mongodb.client.model.Aggregates.project +import com.mongodb.client.model.Projections.excludeId +import com.mongodb.client.model.Projections.fields +import com.mongodb.kotlin.client.model.Filters.and +import com.mongodb.kotlin.client.model.Filters.eq +import com.mongodb.kotlin.client.model.Filters.gt +import com.mongodb.kotlin.client.model.Projections.computed +import com.mongodb.kotlin.client.model.Projections.computedSearchMeta +import com.mongodb.kotlin.client.model.Projections.elemMatch +import com.mongodb.kotlin.client.model.Projections.exclude +import com.mongodb.kotlin.client.model.Projections.include +import com.mongodb.kotlin.client.model.Projections.meta +import com.mongodb.kotlin.client.model.Projections.metaSearchHighlights +import com.mongodb.kotlin.client.model.Projections.metaSearchScore +import com.mongodb.kotlin.client.model.Projections.metaTextScore +import com.mongodb.kotlin.client.model.Projections.metaVectorSearchScore +import com.mongodb.kotlin.client.model.Projections.projection +import com.mongodb.kotlin.client.model.Projections.projectionWith +import com.mongodb.kotlin.client.model.Projections.slice +import kotlin.test.Test +import kotlin.test.assertEquals +import org.bson.BsonDocument +import org.bson.conversions.Bson + +class ProjectionTest { + + @Test + fun projection() { + assertEquals("\$name", Person::name.projection) + assertEquals("\$name.foo", Student::name.projectionWith("foo")) + } + + @Test + fun include() { + assertEquals( + """{"name": 1, "age": 1, "results": 1, "address": 1}""", + include(Person::name, Person::age, Person::results, Person::address)) + + assertEquals("""{"name": 1, "age": 1}""", include(listOf(Person::name, Person::age))) + assertEquals("""{"name": 1, "age": 1}""", include(listOf(Person::name, Person::age, Person::name))) + } + + @Test + fun exclude() { + assertEquals( + """{"name": 0, "age": 0, "results": 0, "address": 0}""", + exclude(Person::name, Person::age, Person::results, Person::address)) + assertEquals("""{"name": 0, "age": 0}""", exclude(listOf(Person::name, Person::age))) + assertEquals("""{"name": 0, "age": 0}""", exclude(listOf(Person::name, Person::age, Person::name))) + + assertEquals("""{"name": 0, "age": 0}""", exclude(listOf(Person::name, Person::age, Person::name))) + + assertEquals("""{"_id": 0}""", excludeId()) + assertEquals( + "Projections{projections=[{\"_id\": 0}, {\"name\": 1}]}", + fields(excludeId(), include(Person::name)).toString()) + } + + @Test + fun firstElem() { + assertEquals(""" {"name.${'$'}" : 1} """, Person::name.elemMatch) + } + + @Test + fun elemMatch() { + val expected = + """ + {"grades": {"${'$'}elemMatch": {"${'$'}and": [{"subject": "Math"}, {"score": {"${'$'}gt": 80}}]}}} + """ + assertEquals(expected, Student::grades.elemMatch(and((Grade::subject eq "Math"), (Grade::score gt 80)))) + + assertEquals(Student::grades elemMatch (Grade::score gt 80), elemMatch(Student::grades, Grade::score gt 80)) + + // Should create string representation for elemMatch with filter + assertEquals( + "ElemMatch Projection{fieldName='grades'," + + " filter=And Filter{filters=[Filter{fieldName='score', value=90}, " + + "Filter{fieldName='subject', value=Math}]}}", + Student::grades.elemMatch(and(Grade::score eq 90, Grade::subject eq "Math")).toString()) + } + + @Test + fun slice() { + var expected = """ + {"grades": {"${'$'}slice": -1}} + """ + + assertEquals(expected, Student::grades.slice(-1)) + + // skip one, limit to two + expected = """ + {"grades": {"${'$'}slice": [1, 2]}} + """ + + assertEquals(expected, Student::grades.slice(1, 2)) + + // Combining projection + expected = """ + {"name": 0, "grades": {"${'$'}slice": [2, 1]}} + """ + + assertEquals(expected, fields(exclude(Student::name), Student::grades.slice(2, 1))) + } + + @Test + fun meta() { + var expected = """ + {"score": {"${'$'}meta": "textScore"}} + """ + assertEquals(expected, Grade::score.metaTextScore()) + + // combining + expected = + """ + {"_id": 0, "score": {"${'$'}meta": "textScore"}, "grades": {"${'$'}elemMatch": {"score": {"${'$'}gt": 87}}}} + """ + assertEquals( + expected, fields(excludeId(), Grade::score.metaTextScore(), Student::grades.elemMatch(Grade::score gt 87))) + + expected = """ + {"score": {"${'$'}meta": "searchScore"}} + """ + assertEquals(expected, Grade::score.metaSearchScore()) + + expected = """ + {"score": {"${'$'}meta": "searchHighlights"}} + """ + assertEquals(expected, Grade::score.metaSearchHighlights()) + + expected = """ + {"score": {"${'$'}meta": "vectorSearchScore"}} + """ + assertEquals(expected, Grade::score.metaVectorSearchScore()) + assertEquals(expected, Grade::score.meta("vectorSearchScore")) + + expected = """ + {"_id": 0, "score": {"${'$'}meta": "vectorSearchScore"}} + """ + assertEquals(expected, fields(excludeId(), Grade::score meta "vectorSearchScore")) + + assertEquals(Grade::score meta "vectorSearchScore", meta(Grade::score, "vectorSearchScore")) + } + + @Test + fun `computed projection`() { + assertEquals(""" {"c": "${'$'}y"} """, "c" computed "\$y") + + assertEquals(computed(Grade::score, Student::age), Grade::score computed Student::age) + + assertEquals( + """{"${'$'}project": {"c": "${'$'}name", "score": "${'$'}age"}}""", + project(fields("c" computed Student::name, Grade::score computed Student::age))) + + assertEquals( + fields("c" computed Student::name, Grade::score computed Student::age), + fields(computed("c", Student::name), Grade::score computed Student::age)) + + assertEquals( + """{"${'$'}project": {"c": "${'$'}name", "score": "${'$'}age"}}""", + project(fields("c" computed Student::name, Grade::score computed Student::age))) + + assertEquals( + fields(listOf("c" computed Student::name, Grade::score computed Student::age)), + fields("c" computed Student::name, Grade::score computed Student::age)) + + // combine fields + assertEquals("{name : 1, age : 1, _id : 0}", fields(include(Student::name, Student::age), excludeId())) + + assertEquals("{name : 1, age : 1, _id : 0}", fields(include(listOf(Student::name, Student::age)), excludeId())) + + assertEquals("{name : 1, age : 0}", fields(include(Student::name, Student::age), exclude(Student::age))) + + // computedSearchMeta + assertEquals("""{"name": "${'$'}${'$'}SEARCH_META"}""", Student::name.computedSearchMeta()) + assertEquals(Student::name.computedSearchMeta(), computedSearchMeta(Student::name)) + + // Should create string representation for include and exclude + assertEquals("""{"age": 1, "name": 1}""", include(Student::name, Student::age, Student::name).toString()) + assertEquals("""{"age": 0, "name": 0}""", exclude(Student::name, Student::age, Student::name).toString()) + assertEquals("""{"_id": 0}""", excludeId().toString()) + + // Should create string representation for computed + assertEquals( + "Expression{name='c', expression=\$y}", + com.mongodb.client.model.Projections.computed("c", "\$y").toString()) + assertEquals("Expression{name='c', expression=\$y}", ("c" computed "\$y").toString()) + assertEquals("Expression{name='name', expression=\$y}", (Student::name computed "\$y").toString()) + } + + @Test + fun `array projection`() { + assertEquals("{ \"grades.${'$'}\": 1 }", include(Student::grades.posOp)) + assertEquals("{ \"grades.comments.${'$'}\": 1 }", include((Student::grades / Grade::comments).posOp)) + } + + @Test + fun `projection in aggregation`() { + // Field Reference in Aggregation + assertEquals( + """ {"${'$'}project": {"score": "${'$'}grades.score"}} """, + project((Grade::score computed (Student::grades.projectionWith("score"))))) + + assertEquals( + "{\"${'$'}project\": {\"My Score\": \"${'$'}grades.score\"}}", + project("My Score" computed (Student::grades / Grade::score))) + + assertEquals("{\"${'$'}project\": {\"My Age\": \"${'$'}age\"}}", project("My Age" computed Student::age)) + + assertEquals( + "{\"${'$'}project\": {\"My Age\": \"${'$'}age\"}}", project("My Age" computed Student::age.projection)) + } + + private data class Person(val name: String, val age: Int, val address: List, val results: List) + private data class Student(val name: String, val age: Int, val grades: List) + private data class Grade(val subject: String, val score: Int, val comments: List) + + private fun assertEquals(expected: String, result: Bson) = + assertEquals(BsonDocument.parse(expected), result.toBsonDocument()) +} diff --git a/driver-kotlin-extensions/src/test/kotlin/com/mongodb/kotlin/client/model/SortsTest.kt b/driver-kotlin-extensions/src/test/kotlin/com/mongodb/kotlin/client/model/SortsTest.kt new file mode 100644 index 00000000000..2a8ec84530f --- /dev/null +++ b/driver-kotlin-extensions/src/test/kotlin/com/mongodb/kotlin/client/model/SortsTest.kt @@ -0,0 +1,73 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * Copyright (C) 2016/2022 Litote + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @custom-license-header + */ +package com.mongodb.kotlin.client.model + +import com.mongodb.client.model.Sorts.orderBy +import com.mongodb.kotlin.client.model.Sorts.ascending +import com.mongodb.kotlin.client.model.Sorts.descending +import com.mongodb.kotlin.client.model.Sorts.metaTextScore +import kotlin.test.Test +import kotlin.test.assertEquals +import org.bson.BsonDocument +import org.bson.conversions.Bson + +class SortsTest { + + @Test + fun ascending() { + assertEquals(""" {name : 1} """, ascending(Person::name)) + assertEquals(""" {name : 1, age: 1} """, ascending(Person::name, Person::age)) + assertEquals(""" {name : 1, age: 1} """, ascending(listOf(Person::name, Person::age))) + } + + @Test + fun descending() { + assertEquals(""" {name : -1} """, descending(Person::name)) + assertEquals(""" {name : -1, age: -1} """, descending(Person::name, Person::age)) + assertEquals(""" {name : -1, age: -1} """, descending(listOf(Person::name, Person::age))) + } + + @Test + fun metaTextScore() { + assertEquals(""" {name : {${'$'}meta : "textScore"}} """, metaTextScore(Person::name)) + } + + @Test + fun orderBy() { + assertEquals(""" {name : 1, age : -1} """, orderBy(ascending(Person::name), descending(Person::age))) + assertEquals(""" {name : 1, age : -1} """, orderBy(listOf(ascending(Person::name), descending(Person::age)))) + assertEquals( + """ {name : -1, age : -1} """, + orderBy(ascending(Person::name), descending(Person::age), descending(Person::name))) + assertEquals( + """ {name : 1, age : 1, results: -1, address: -1} """, + orderBy(ascending(Person::name, Person::age), descending(Person::results, Person::address))) + } + + @Test + fun `should create string representation for compound sorts`() { + assertEquals( + """Compound Sort{sorts=[{"name": 1, "age": 1}, {"results": -1, "address": -1}]}""", + orderBy(ascending(Person::name, Person::age), descending(Person::results, Person::address)).toString()) + } + + private data class Person(val name: String, val age: Int, val address: List, val results: List) + private fun assertEquals(expected: String, result: Bson) = + assertEquals(BsonDocument.parse(expected), result.toBsonDocument()) +} diff --git a/driver-kotlin-extensions/src/test/kotlin/com/mongodb/kotlin/client/model/UpdatesTest.kt b/driver-kotlin-extensions/src/test/kotlin/com/mongodb/kotlin/client/model/UpdatesTest.kt new file mode 100644 index 00000000000..9b07d6b8345 --- /dev/null +++ b/driver-kotlin-extensions/src/test/kotlin/com/mongodb/kotlin/client/model/UpdatesTest.kt @@ -0,0 +1,287 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * Copyright (C) 2016/2022 Litote + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * @custom-license-header + */ +package com.mongodb.kotlin.client.model + +import com.mongodb.client.model.PushOptions +import com.mongodb.kotlin.client.model.Filters.gte +import com.mongodb.kotlin.client.model.Updates.addEachToSet +import com.mongodb.kotlin.client.model.Updates.addToSet +import com.mongodb.kotlin.client.model.Updates.bitwiseAnd +import com.mongodb.kotlin.client.model.Updates.bitwiseOr +import com.mongodb.kotlin.client.model.Updates.bitwiseXor +import com.mongodb.kotlin.client.model.Updates.combine +import com.mongodb.kotlin.client.model.Updates.currentDate +import com.mongodb.kotlin.client.model.Updates.currentTimestamp +import com.mongodb.kotlin.client.model.Updates.inc +import com.mongodb.kotlin.client.model.Updates.max +import com.mongodb.kotlin.client.model.Updates.min +import com.mongodb.kotlin.client.model.Updates.mul +import com.mongodb.kotlin.client.model.Updates.popFirst +import com.mongodb.kotlin.client.model.Updates.popLast +import com.mongodb.kotlin.client.model.Updates.pull +import com.mongodb.kotlin.client.model.Updates.pullAll +import com.mongodb.kotlin.client.model.Updates.pullByFilter +import com.mongodb.kotlin.client.model.Updates.push +import com.mongodb.kotlin.client.model.Updates.pushEach +import com.mongodb.kotlin.client.model.Updates.rename +import com.mongodb.kotlin.client.model.Updates.set +import com.mongodb.kotlin.client.model.Updates.setOnInsert +import com.mongodb.kotlin.client.model.Updates.unset +import java.time.Instant +import java.util.Date +import kotlin.test.Test +import kotlin.test.assertEquals +import org.bson.BsonDocument +import org.bson.Document +import org.bson.codecs.configuration.CodecRegistries +import org.bson.codecs.configuration.CodecRegistries.fromProviders +import org.bson.codecs.configuration.CodecRegistry +import org.bson.codecs.pojo.PojoCodecProvider +import org.bson.conversions.Bson + +class UpdatesTest { + @Test + fun `should render $set`() { + assertEquals(""" {"${'$'}set": {"name": "foo"}} """, set(Person::name, "foo")) + assertEquals(""" {"${'$'}set": {"name": null}} """, set(Person::name, null)) + + assertEquals(set(Person::name, "foo"), Person::name set "foo") + assertEquals(set(Person::name, null), Person::name set null) + } + + @Test + fun `should render $setOnInsert`() { + assertEquals(""" {${'$'}setOnInsert : { age : 42} } """, setOnInsert(Person::age, 42)) + assertEquals(setOnInsert(Person::age, 42), Person::age setOnInsert 42) + + assertEquals(""" {${'$'}setOnInsert : { age : null} } """, setOnInsert(Person::age, null)) + assertEquals(setOnInsert(Person::age, null), Person::age setOnInsert null) + + assertEquals( + """ {"${'$'}setOnInsert": {"name": "foo", "age": 42}}""", + combine(listOf(setOnInsert(Person::name, "foo"), setOnInsert(Person::age, 42)))) + } + + @Test + fun `should render $unset`() { + assertEquals(""" {"${'$'}unset": {"name": ""}} """, unset(Person::name)) + } + + @Test + fun `should render $rename`() { + assertEquals(""" {${'$'}rename : { "age" : "score"} } """, rename(Person::age, Grade::score)) + } + + @Test + fun `should render $inc`() { + assertEquals(""" {${'$'}inc : { age : 1} } """, inc(Person::age, 1)) + assertEquals(inc(Person::age, 1), Person::age inc 1) + + assertEquals(""" {${'$'}inc : { age : {${'$'}numberLong : "42"}} } """, inc(Person::age, 42L)) + assertEquals(""" {${'$'}inc : { age : 3.14 } } """, inc(Person::age, 3.14)) + } + + @Test + fun `should render $mul`() { + assertEquals(""" {${'$'}mul : { "age" : 1} } """, mul(Person::age, 1)) + assertEquals(""" {${'$'}mul : { "age" : 1} } """, Person::age mul 1) + + assertEquals(""" {${'$'}mul : { "age" : {${'$'}numberLong : "5"}} } """, mul(Person::age, 5L)) + assertEquals(""" {${'$'}mul : { "age" : 3.14} } """, mul(Person::age, 3.14)) + } + + @Test + fun `should render $min`() { + assertEquals(""" {${'$'}min : { age : 42} } """, min(Person::age, 42)) + assertEquals(""" {${'$'}min : { age : 42} } """, Person::age min 42) + } + + @Test + fun `should render max`() { + assertEquals(""" {${'$'}max : { age : 42} } """, max(Person::age, 42)) + assertEquals(""" {${'$'}max : { age : 42} } """, Person::age max 42) + } + + @Test + fun `should render $currentDate`() { + assertEquals(""" {${'$'}currentDate : { date : true} } """, currentDate(Person::date)) + assertEquals( + """ {${'$'}currentDate : { date : {${'$'}type : "timestamp"}} } """, currentTimestamp(Person::date)) + } + + @Test + fun `should render $addToSet`() { + assertEquals(""" {${'$'}addToSet : { results : 1} } """, addToSet(Person::results, 1)) + assertEquals(""" {${'$'}addToSet : { results : 1} } """, Person::results addToSet 1) + assertEquals( + """ {"${'$'}addToSet": {"results": {"${'$'}each": [1, 2, 3]}}} """, + addEachToSet(Person::results, listOf(1, 2, 3))) + } + + @Test + fun `should render $push`() { + assertEquals(""" {${'$'}push : { results : 1} } """, push(Person::results, 1)) + assertEquals( + """ {"${'$'}push": {"results": {"${'$'}each": [1, 2, 3]}}} """, + pushEach(Person::results, listOf(1, 2, 3), options = PushOptions())) + + assertEquals( + """ {"${'$'}push": {"grades": {"${'$'}each": + |[{"comments": [], "score": 11, "subject": "Science"}], + | "${'$'}position": 0, "${'$'}slice": 3, "${'$'}sort": {"score": -1}}}} """ + .trimMargin(), + pushEach( + Student::grades, + listOf(Grade("Science", 11, emptyList())), + options = PushOptions().position(0).slice(3).sortDocument(Document("score", -1)))) + + assertEquals( + """ {${'$'}push : { results : { ${'$'}each : + |[89, 65], ${'$'}position : 0, ${'$'}slice : 3, ${'$'}sort : -1 } } } """ + .trimMargin(), + pushEach(Person::results, listOf(89, 65), options = PushOptions().position(0).slice(3).sort(-1))) + } + + @Test + fun `should render $pull`() { + assertEquals(""" {${'$'}pull : { address : "foo"} } """, pull(Person::address, "foo")) + assertEquals(""" {${'$'}pull : { address : "foo"} } """, Person::address pull "foo") + + assertEquals(""" {${'$'}pull : { score : { ${'$'}gte : 5 }} } """, pullByFilter(Grade::score gte 5)) + assertEquals( + """ {"${'$'}pull": {"grades": {"score": {"${'$'}gte": 5}}}} """, + pullByFilter(Student::grades, Grade::score gte 5)) + assertEquals( + """ {"${'$'}pull": {"grades": {"score": {"${'$'}gte": 5}}}} """, + Student::grades pullByFilter (Grade::score gte 5)) + } + + @Test + fun `should render $pullAll`() { + assertEquals(""" {${'$'}pullAll : { results : []} } """, pullAll(Person::results, emptyList())) + assertEquals(""" {${'$'}pullAll : { results : []} } """, Person::results pullAll emptyList()) + assertEquals(""" {${'$'}pullAll : { results : [1,2,3]} } """, pullAll(Person::results, listOf(1, 2, 3))) + } + + @Test + fun `should render $pop`() { + assertEquals(""" {${'$'}pop : { address : -1} } """, popFirst(Person::address)) + assertEquals(""" {${'$'}pop : { address : 1} } """, popLast(Person::address)) + } + + @Test + fun `should render $bit`() { + assertEquals(""" {${'$'}bit : { "score" : {and : 5} } } """, bitwiseAnd(Grade::score, 5)) + assertEquals(""" {${'$'}bit : { "score" : {and : 5} } } """, Grade::score bitwiseAnd 5) + assertEquals( + """ {${'$'}bit : { "score" : {and : {${'$'}numberLong : "5"}} } } """, bitwiseAnd(Grade::score, 5L)) + assertEquals( + """ {${'$'}bit : { "score" : {and : {${'$'}numberLong : "5"}} } } """, Grade::score bitwiseAnd (5L)) + assertEquals(""" {${'$'}bit : { "score" : {or : 5} } } """, bitwiseOr(Grade::score, 5)) + assertEquals(""" {${'$'}bit : { "score" : {or : 5} } } """, Grade::score bitwiseOr 5) + assertEquals(""" {${'$'}bit : { "score" : {or : {${'$'}numberLong : "5"}} } } """, bitwiseOr(Grade::score, 5L)) + assertEquals(""" {${'$'}bit : { "score" : {or : {${'$'}numberLong : "5"}} } } """, Grade::score bitwiseOr 5L) + assertEquals(""" {${'$'}bit : { "score" : {xor : 5} } } """, bitwiseXor(Grade::score, 5)) + assertEquals(""" {${'$'}bit : { "score" : {xor : 5} } } """, Grade::score bitwiseXor 5) + assertEquals(""" {${'$'}bit : { "score" : {xor : {${'$'}numberLong : "5"}} } } """, Grade::score bitwiseXor 5L) + assertEquals( + """ {${'$'}bit : { "score" : {xor : {${'$'}numberLong : "5"}} } } """, bitwiseXor(Grade::score, 5L)) + } + + @Test + fun `should combine updates`() { + assertEquals(""" {${'$'}set : { name : "foo"} } """, combine(set(Person::name, "foo"))) + assertEquals( + """ {${'$'}set : { name : "foo", age: 42} } """, combine(set(Person::name, "foo"), set(Person::age, 42))) + assertEquals( + """ {${'$'}set : { name : "bar"} } """, combine(set(Person::name, "foo"), set(Person::name, "bar"))) + assertEquals( + """ {"${'$'}set": {"name": "foo", "date": {"${'$'}date": "1970-01-01T00:00:00Z"}}, + | "${'$'}inc": {"age": 3, "floatField": 3.14}} """ + .trimMargin(), + combine( + set(Person::name, "foo"), + inc(Person::age, 3), + set(Person::date, Date.from(Instant.EPOCH)), + inc(Person::floatField, 3.14))) + + assertEquals(""" {${'$'}set : { "name" : "foo"} } """, combine(combine(set(Person::name, "foo")))) + assertEquals( + """ {${'$'}set : { "name" : "foo", "age": 42} } """, + combine(combine(set(Person::name, "foo"), set(Person::age, 42)))) + assertEquals( + """ {${'$'}set : { "name" : "bar"} } """, + combine(combine(set(Person::name, "foo"), set(Person::name, "bar")))) + + assertEquals( + """ {"${'$'}set": {"name": "bar"}, "${'$'}inc": {"age": 3, "floatField": 3.14}} """, + combine( + combine( + set(Person::name, "foo"), + inc(Person::age, 3), + set(Person::name, "bar"), + inc(Person::floatField, 3.14)))) + } + + @Test + fun `should create string representation for simple updates`() { + assertEquals( + """Update{fieldName='name', operator='${'$'}set', value=foo}""", set(Person::name, "foo").toString()) + } + + @Test + fun `should create string representation for with each update`() { + assertEquals( + """Each Update{fieldName='results', operator='${'$'}addToSet', values=[1, 2, 3]}""", + addEachToSet(Person::results, listOf(1, 2, 3)).toString()) + } + + @Test + fun `should test equals for SimpleBsonKeyValue`() { + assertEquals(setOnInsert(Person::name, "foo"), setOnInsert(Person::name, "foo")) + assertEquals(setOnInsert(Person::name, null), setOnInsert(Person::name, null)) + } + + @Test + fun `should test hashCode for SimpleBsonKeyValue`() { + assertEquals(setOnInsert(Person::name, "foo").hashCode(), setOnInsert(Person::name, "foo").hashCode()) + assertEquals(setOnInsert(Person::name, null).hashCode(), setOnInsert(Person::name, null).hashCode()) + } + + // Utils + private data class Person( + val name: String, + val age: Int, + val address: List, + val results: List, + val date: Date, + val floatField: Float + ) + + private data class Student(val name: String, val grade: Int, val grades: List) + data class Grade(val subject: String, val score: Int?, val comments: List) + + private val defaultsAndPojoCodecRegistry: CodecRegistry = + CodecRegistries.fromRegistries( + Bson.DEFAULT_CODEC_REGISTRY, fromProviders(PojoCodecProvider.builder().automatic(true).build())) + + private fun assertEquals(expected: String, result: Bson) = + assertEquals( + BsonDocument.parse(expected), result.toBsonDocument(BsonDocument::class.java, defaultsAndPojoCodecRegistry)) +} diff --git a/driver-kotlin-sync/build.gradle.kts b/driver-kotlin-sync/build.gradle.kts new file mode 100644 index 00000000000..5da1a5eec26 --- /dev/null +++ b/driver-kotlin-sync/build.gradle.kts @@ -0,0 +1,47 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +import ProjectExtensions.configureJarManifest +import ProjectExtensions.configureMavenPublication + +plugins { + id("project.kotlin") + id("conventions.test-artifacts") + id("conventions.test-artifacts-runtime-dependencies") +} + +base.archivesName.set("mongodb-driver-kotlin-sync") + +dependencies { + api(project(path = ":bson", configuration = "default")) + api(project(path = ":driver-sync", configuration = "default")) + implementation(project(path = ":bson-kotlin", configuration = "default")) + + integrationTestImplementation(project(path = ":bson", configuration = "testArtifacts")) + integrationTestImplementation(project(path = ":driver-sync", configuration = "testArtifacts")) + integrationTestImplementation(project(path = ":driver-core", configuration = "testArtifacts")) +} + +configureMavenPublication { + pom { + name.set("MongoDB Kotlin Driver") + description.set("The MongoDB Kotlin Driver") + } +} + +configureJarManifest { + attributes["Automatic-Module-Name"] = "org.mongodb.driver.kotlin.sync" + attributes["Bundle-SymbolicName"] = "org.mongodb.mongodb-driver-kotlin-sync" +} diff --git a/driver-kotlin-sync/src/integrationTest/kotlin/com/mongodb/kotlin/client/SmokeTests.kt b/driver-kotlin-sync/src/integrationTest/kotlin/com/mongodb/kotlin/client/SmokeTests.kt new file mode 100644 index 00000000000..3fc601d6425 --- /dev/null +++ b/driver-kotlin-sync/src/integrationTest/kotlin/com/mongodb/kotlin/client/SmokeTests.kt @@ -0,0 +1,89 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.kotlin.client + +import com.mongodb.client.Fixture.getDefaultDatabaseName +import com.mongodb.client.Fixture.getMongoClientSettings +import kotlin.test.assertContentEquals +import org.bson.Document +import org.junit.jupiter.api.AfterAll +import org.junit.jupiter.api.AfterEach +import org.junit.jupiter.api.Assertions.assertEquals +import org.junit.jupiter.api.BeforeAll +import org.junit.jupiter.api.DisplayName +import org.junit.jupiter.api.Test + +class SmokeTests { + + @AfterEach + fun afterEach() { + database?.drop() + } + + @Test + @DisplayName("distinct and return nulls") + fun testDistinctNullable() { + collection!!.insertMany( + listOf( + Document.parse("{_id: 1, a: 0}"), + Document.parse("{_id: 2, a: 1}"), + Document.parse("{_id: 3, a: 0}"), + Document.parse("{_id: 4, a: null}"))) + + val actual = collection!!.distinct("a").toList().toSet() + assertEquals(setOf(null, 0, 1), actual) + } + + @Test + @DisplayName("mapping can return nulls") + fun testMongoIterableMap() { + collection!!.insertMany( + listOf( + Document.parse("{_id: 1, a: 0}"), + Document.parse("{_id: 2, a: 1}"), + Document.parse("{_id: 3, a: 0}"), + Document.parse("{_id: 4, a: null}"))) + + val actual = collection!!.find().map { it["a"] }.toList() + assertContentEquals(listOf(0, 1, 0, null), actual) + } + + companion object { + + private var mongoClient: MongoClient? = null + private var database: MongoDatabase? = null + private var collection: MongoCollection? = null + + @BeforeAll + @JvmStatic + internal fun beforeAll() { + mongoClient = MongoClient.create(getMongoClientSettings()) + database = mongoClient?.getDatabase(getDefaultDatabaseName()) + database?.drop() + collection = database?.getCollection("SmokeTests") + } + + @AfterAll + @JvmStatic + internal fun afterAll() { + collection = null + database?.drop() + database = null + mongoClient?.close() + mongoClient = null + } + } +} diff --git a/driver-kotlin-sync/src/integrationTest/kotlin/com/mongodb/kotlin/client/UnifiedCrudTest.kt b/driver-kotlin-sync/src/integrationTest/kotlin/com/mongodb/kotlin/client/UnifiedCrudTest.kt new file mode 100644 index 00000000000..7296ab204bb --- /dev/null +++ b/driver-kotlin-sync/src/integrationTest/kotlin/com/mongodb/kotlin/client/UnifiedCrudTest.kt @@ -0,0 +1,30 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.kotlin.client + +import java.io.IOException +import java.net.URISyntaxException +import org.junit.jupiter.params.provider.Arguments + +internal class UnifiedCrudTest() : UnifiedTest() { + companion object { + @JvmStatic + @Throws(URISyntaxException::class, IOException::class) + fun data(): Collection? { + return getTestData("crud", false, Language.KOTLIN) + } + } +} diff --git a/driver-kotlin-sync/src/integrationTest/kotlin/com/mongodb/kotlin/client/UnifiedTest.kt b/driver-kotlin-sync/src/integrationTest/kotlin/com/mongodb/kotlin/client/UnifiedTest.kt new file mode 100644 index 00000000000..7c115efc6f8 --- /dev/null +++ b/driver-kotlin-sync/src/integrationTest/kotlin/com/mongodb/kotlin/client/UnifiedTest.kt @@ -0,0 +1,44 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.kotlin.client + +import com.mongodb.ClientEncryptionSettings +import com.mongodb.MongoClientSettings +import com.mongodb.client.MongoClient as JMongoClient +import com.mongodb.client.MongoDatabase as JMongoDatabase +import com.mongodb.client.gridfs.GridFSBucket +import com.mongodb.client.unified.UnifiedTest as JUnifiedTest +import com.mongodb.client.vault.ClientEncryption +import com.mongodb.kotlin.client.syncadapter.SyncMongoClient + +internal abstract class UnifiedTest() : JUnifiedTest() { + + override fun createMongoClient(settings: MongoClientSettings): JMongoClient = + SyncMongoClient(MongoClient.create(settings)) + + override fun createGridFSBucket(database: JMongoDatabase?): GridFSBucket { + TODO("Not yet implemented - JAVA-4893") + } + + override fun createClientEncryption( + keyVaultClient: JMongoClient?, + clientEncryptionSettings: ClientEncryptionSettings? + ): ClientEncryption { + TODO("Not yet implemented - JAVA-4896") + } + + override fun getLanguage(): Language = Language.KOTLIN +} diff --git a/driver-kotlin-sync/src/integrationTest/kotlin/com/mongodb/kotlin/client/syncadapter/SyncAggregateIterable.kt b/driver-kotlin-sync/src/integrationTest/kotlin/com/mongodb/kotlin/client/syncadapter/SyncAggregateIterable.kt new file mode 100644 index 00000000000..b563c67c368 --- /dev/null +++ b/driver-kotlin-sync/src/integrationTest/kotlin/com/mongodb/kotlin/client/syncadapter/SyncAggregateIterable.kt @@ -0,0 +1,73 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.kotlin.client.syncadapter + +import com.mongodb.ExplainVerbosity +import com.mongodb.client.AggregateIterable as JAggregateIterable +import com.mongodb.client.cursor.TimeoutMode +import com.mongodb.client.model.Collation +import com.mongodb.kotlin.client.AggregateIterable +import java.util.concurrent.TimeUnit +import org.bson.BsonValue +import org.bson.Document +import org.bson.conversions.Bson + +internal class SyncAggregateIterable(val wrapped: AggregateIterable) : + JAggregateIterable, SyncMongoIterable(wrapped) { + override fun batchSize(batchSize: Int): SyncAggregateIterable = apply { wrapped.batchSize(batchSize) } + override fun timeoutMode(timeoutMode: TimeoutMode): SyncAggregateIterable = apply { + wrapped.timeoutMode(timeoutMode) + } + + override fun toCollection() = wrapped.toCollection() + + override fun allowDiskUse(allowDiskUse: Boolean?): SyncAggregateIterable = apply { + wrapped.allowDiskUse(allowDiskUse) + } + + override fun maxTime(maxTime: Long, timeUnit: TimeUnit): SyncAggregateIterable = apply { + wrapped.maxTime(maxTime, timeUnit) + } + + override fun maxAwaitTime(maxAwaitTime: Long, timeUnit: TimeUnit): SyncAggregateIterable = apply { + wrapped.maxAwaitTime(maxAwaitTime, timeUnit) + } + + override fun bypassDocumentValidation(bypassDocumentValidation: Boolean?): SyncAggregateIterable = apply { + wrapped.bypassDocumentValidation(bypassDocumentValidation) + } + + override fun collation(collation: Collation?): SyncAggregateIterable = apply { wrapped.collation(collation) } + + override fun comment(comment: String?): SyncAggregateIterable = apply { wrapped.comment(comment) } + + override fun comment(comment: BsonValue?): SyncAggregateIterable = apply { wrapped.comment(comment) } + + override fun hint(hint: Bson?): SyncAggregateIterable = apply { wrapped.hint(hint) } + + override fun hintString(hint: String?): SyncAggregateIterable = apply { wrapped.hintString(hint) } + + override fun let(variables: Bson?): SyncAggregateIterable = apply { wrapped.let(variables) } + + override fun explain(): Document = wrapped.explain() + + override fun explain(verbosity: ExplainVerbosity): Document = wrapped.explain(verbosity) + + override fun explain(explainResultClass: Class): E = wrapped.explain(explainResultClass) + + override fun explain(explainResultClass: Class, verbosity: ExplainVerbosity): E = + wrapped.explain(explainResultClass, verbosity) +} diff --git a/driver-kotlin-sync/src/integrationTest/kotlin/com/mongodb/kotlin/client/syncadapter/SyncChangeStreamIterable.kt b/driver-kotlin-sync/src/integrationTest/kotlin/com/mongodb/kotlin/client/syncadapter/SyncChangeStreamIterable.kt new file mode 100644 index 00000000000..0d579e68006 --- /dev/null +++ b/driver-kotlin-sync/src/integrationTest/kotlin/com/mongodb/kotlin/client/syncadapter/SyncChangeStreamIterable.kt @@ -0,0 +1,61 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.kotlin.client.syncadapter + +import com.mongodb.client.ChangeStreamIterable as JChangeStreamIterable +import com.mongodb.client.MongoIterable +import com.mongodb.client.model.Collation +import com.mongodb.client.model.changestream.ChangeStreamDocument +import com.mongodb.client.model.changestream.FullDocument +import com.mongodb.client.model.changestream.FullDocumentBeforeChange +import com.mongodb.kotlin.client.ChangeStreamIterable +import java.util.concurrent.TimeUnit +import org.bson.BsonDocument +import org.bson.BsonTimestamp +import org.bson.BsonValue + +internal class SyncChangeStreamIterable(val wrapped: ChangeStreamIterable) : + JChangeStreamIterable, SyncMongoIterable>(wrapped) { + override fun withDocumentClass(clazz: Class): MongoIterable = + SyncMongoIterable(wrapped.withDocumentClass(clazz)) + override fun batchSize(batchSize: Int): SyncChangeStreamIterable = apply { wrapped.batchSize(batchSize) } + override fun collation(collation: Collation?): SyncChangeStreamIterable = apply { wrapped.collation(collation) } + override fun comment(comment: BsonValue?): SyncChangeStreamIterable = apply { wrapped.comment(comment) } + override fun comment(comment: String?): SyncChangeStreamIterable = apply { wrapped.comment(comment) } + override fun cursor(): SyncMongoChangeStreamCursor> = + SyncMongoChangeStreamCursor(wrapped.cursor()) + override fun fullDocument(fullDocument: FullDocument): SyncChangeStreamIterable = apply { + wrapped.fullDocument(fullDocument) + } + override fun fullDocumentBeforeChange( + fullDocumentBeforeChange: FullDocumentBeforeChange + ): SyncChangeStreamIterable = apply { wrapped.fullDocumentBeforeChange(fullDocumentBeforeChange) } + override fun maxAwaitTime(maxAwaitTime: Long, timeUnit: TimeUnit): SyncChangeStreamIterable = apply { + wrapped.maxAwaitTime(maxAwaitTime, timeUnit) + } + override fun resumeAfter(resumeToken: BsonDocument): SyncChangeStreamIterable = apply { + wrapped.resumeAfter(resumeToken) + } + override fun showExpandedEvents(showExpandedEvents: Boolean): SyncChangeStreamIterable = apply { + wrapped.showExpandedEvents(showExpandedEvents) + } + override fun startAfter(startAfter: BsonDocument): SyncChangeStreamIterable = apply { + wrapped.startAfter(startAfter) + } + override fun startAtOperationTime(startAtOperationTime: BsonTimestamp): SyncChangeStreamIterable = apply { + wrapped.startAtOperationTime(startAtOperationTime) + } +} diff --git a/driver-kotlin-sync/src/integrationTest/kotlin/com/mongodb/kotlin/client/syncadapter/SyncClientSession.kt b/driver-kotlin-sync/src/integrationTest/kotlin/com/mongodb/kotlin/client/syncadapter/SyncClientSession.kt new file mode 100644 index 00000000000..64cd27b776f --- /dev/null +++ b/driver-kotlin-sync/src/integrationTest/kotlin/com/mongodb/kotlin/client/syncadapter/SyncClientSession.kt @@ -0,0 +1,96 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.kotlin.client.syncadapter + +import com.mongodb.ClientSessionOptions +import com.mongodb.ServerAddress +import com.mongodb.TransactionOptions +import com.mongodb.client.ClientSession as JClientSession +import com.mongodb.client.TransactionBody +import com.mongodb.internal.TimeoutContext +import com.mongodb.kotlin.client.ClientSession +import com.mongodb.session.ServerSession +import org.bson.BsonDocument +import org.bson.BsonTimestamp + +internal class SyncClientSession(internal val wrapped: ClientSession, private val originator: Any) : JClientSession { + private val delegate: JClientSession = wrapped.wrapped + + override fun close(): Unit = delegate.close() + + override fun getPinnedServerAddress(): ServerAddress? = delegate.pinnedServerAddress + + override fun getTransactionContext(): Any? = delegate.transactionContext + + override fun setTransactionContext(address: ServerAddress, transactionContext: Any): Unit = + delegate.setTransactionContext(address, transactionContext) + + override fun clearTransactionContext(): Unit = delegate.clearTransactionContext() + + override fun getRecoveryToken(): BsonDocument? = delegate.recoveryToken + + override fun setRecoveryToken(recoveryToken: BsonDocument): Unit { + delegate.recoveryToken = recoveryToken + } + + override fun getOptions(): ClientSessionOptions = delegate.options + + override fun isCausallyConsistent(): Boolean = delegate.isCausallyConsistent + + override fun getOriginator(): Any = originator + + override fun getServerSession(): ServerSession = delegate.serverSession + + override fun getOperationTime(): BsonTimestamp = delegate.operationTime + + override fun advanceOperationTime(operationTime: BsonTimestamp?): Unit = + delegate.advanceOperationTime(operationTime) + + override fun advanceClusterTime(clusterTime: BsonDocument?): Unit = delegate.advanceClusterTime(clusterTime) + + override fun setSnapshotTimestamp(snapshotTimestamp: BsonTimestamp?) { + delegate.snapshotTimestamp = snapshotTimestamp + } + + override fun getSnapshotTimestamp(): BsonTimestamp? = delegate.snapshotTimestamp + + override fun getClusterTime(): BsonDocument = delegate.clusterTime + + override fun hasActiveTransaction(): Boolean = delegate.hasActiveTransaction() + + override fun notifyMessageSent(): Boolean = delegate.notifyMessageSent() + + override fun notifyOperationInitiated(operation: Any): Unit = delegate.notifyOperationInitiated(operation) + + override fun getTransactionOptions(): TransactionOptions = delegate.transactionOptions + + override fun startTransaction(): Unit = delegate.startTransaction() + + override fun startTransaction(transactionOptions: TransactionOptions): Unit = + delegate.startTransaction(transactionOptions) + + override fun commitTransaction(): Unit = delegate.commitTransaction() + + override fun abortTransaction(): Unit = delegate.abortTransaction() + + override fun withTransaction(transactionBody: TransactionBody): T = + throw UnsupportedOperationException() + + override fun withTransaction(transactionBody: TransactionBody, options: TransactionOptions): T = + throw UnsupportedOperationException() + + override fun getTimeoutContext(): TimeoutContext = throw UnsupportedOperationException() +} diff --git a/driver-kotlin-sync/src/integrationTest/kotlin/com/mongodb/kotlin/client/syncadapter/SyncDistinctIterable.kt b/driver-kotlin-sync/src/integrationTest/kotlin/com/mongodb/kotlin/client/syncadapter/SyncDistinctIterable.kt new file mode 100644 index 00000000000..92e52ce39da --- /dev/null +++ b/driver-kotlin-sync/src/integrationTest/kotlin/com/mongodb/kotlin/client/syncadapter/SyncDistinctIterable.kt @@ -0,0 +1,41 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.kotlin.client.syncadapter + +import com.mongodb.client.DistinctIterable as JDistinctIterable +import com.mongodb.client.cursor.TimeoutMode +import com.mongodb.client.model.Collation +import com.mongodb.kotlin.client.DistinctIterable +import java.util.concurrent.TimeUnit +import org.bson.BsonValue +import org.bson.conversions.Bson + +internal class SyncDistinctIterable(val wrapped: DistinctIterable) : + JDistinctIterable, SyncMongoIterable(wrapped) { + override fun batchSize(batchSize: Int): SyncDistinctIterable = apply { wrapped.batchSize(batchSize) } + override fun timeoutMode(timeoutMode: TimeoutMode): SyncDistinctIterable = apply { + wrapped.timeoutMode(timeoutMode) + } + override fun filter(filter: Bson?): SyncDistinctIterable = apply { wrapped.filter(filter) } + override fun maxTime(maxTime: Long, timeUnit: TimeUnit): SyncDistinctIterable = apply { + wrapped.maxTime(maxTime, timeUnit) + } + override fun collation(collation: Collation?): SyncDistinctIterable = apply { wrapped.collation(collation) } + override fun comment(comment: String?): SyncDistinctIterable = apply { wrapped.comment(comment) } + override fun comment(comment: BsonValue?): SyncDistinctIterable = apply { wrapped.comment(comment) } + override fun hint(hint: Bson?): SyncDistinctIterable = apply { wrapped.hint(hint) } + override fun hintString(hint: String?): SyncDistinctIterable = apply { wrapped.hintString(hint) } +} diff --git a/driver-kotlin-sync/src/integrationTest/kotlin/com/mongodb/kotlin/client/syncadapter/SyncFindIterable.kt b/driver-kotlin-sync/src/integrationTest/kotlin/com/mongodb/kotlin/client/syncadapter/SyncFindIterable.kt new file mode 100644 index 00000000000..81247aeb2a0 --- /dev/null +++ b/driver-kotlin-sync/src/integrationTest/kotlin/com/mongodb/kotlin/client/syncadapter/SyncFindIterable.kt @@ -0,0 +1,90 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.kotlin.client.syncadapter + +import com.mongodb.CursorType +import com.mongodb.ExplainVerbosity +import com.mongodb.client.FindIterable as JFindIterable +import com.mongodb.client.cursor.TimeoutMode +import com.mongodb.client.model.Collation +import com.mongodb.kotlin.client.FindIterable +import java.util.concurrent.TimeUnit +import org.bson.BsonValue +import org.bson.Document +import org.bson.conversions.Bson + +internal class SyncFindIterable(val wrapped: FindIterable) : + JFindIterable, SyncMongoIterable(wrapped) { + override fun batchSize(batchSize: Int): SyncFindIterable = apply { wrapped.batchSize(batchSize) } + override fun timeoutMode(timeoutMode: TimeoutMode): SyncFindIterable = apply { wrapped.timeoutMode(timeoutMode) } + override fun filter(filter: Bson?): SyncFindIterable = apply { wrapped.filter(filter) } + + override fun limit(limit: Int): SyncFindIterable = apply { wrapped.limit(limit) } + + override fun skip(skip: Int): SyncFindIterable = apply { wrapped.skip(skip) } + + override fun allowDiskUse(allowDiskUse: Boolean?): SyncFindIterable = apply { + wrapped.allowDiskUse(allowDiskUse) + } + + override fun maxTime(maxTime: Long, timeUnit: TimeUnit): SyncFindIterable = apply { + wrapped.maxTime(maxTime, timeUnit) + } + + override fun maxAwaitTime(maxAwaitTime: Long, timeUnit: TimeUnit): SyncFindIterable = apply { + wrapped.maxAwaitTime(maxAwaitTime, timeUnit) + } + + override fun projection(projection: Bson?): SyncFindIterable = apply { wrapped.projection(projection) } + + override fun sort(sort: Bson?): SyncFindIterable = apply { wrapped.sort(sort) } + + override fun noCursorTimeout(noCursorTimeout: Boolean): SyncFindIterable = apply { + wrapped.noCursorTimeout(noCursorTimeout) + } + + override fun partial(partial: Boolean): SyncFindIterable = apply { wrapped.partial(partial) } + + override fun cursorType(cursorType: CursorType): SyncFindIterable = apply { wrapped.cursorType(cursorType) } + + override fun collation(collation: Collation?): SyncFindIterable = apply { wrapped.collation(collation) } + + override fun comment(comment: String?): SyncFindIterable = apply { wrapped.comment(comment) } + + override fun comment(comment: BsonValue?): SyncFindIterable = apply { wrapped.comment(comment) } + + override fun hint(hint: Bson?): SyncFindIterable = apply { wrapped.hint(hint) } + + override fun hintString(hint: String?): SyncFindIterable = apply { wrapped.hintString(hint) } + + override fun let(variables: Bson?): SyncFindIterable = apply { wrapped.let(variables) } + override fun max(max: Bson?): SyncFindIterable = apply { wrapped.max(max) } + + override fun min(min: Bson?): SyncFindIterable = apply { wrapped.min(min) } + + override fun returnKey(returnKey: Boolean): SyncFindIterable = apply { wrapped.returnKey(returnKey) } + + override fun showRecordId(showRecordId: Boolean): SyncFindIterable = apply { wrapped.showRecordId(showRecordId) } + + override fun explain(): Document = wrapped.explain() + + override fun explain(verbosity: ExplainVerbosity): Document = wrapped.explain(verbosity) + + override fun explain(explainResultClass: Class): E = wrapped.explain(explainResultClass) + + override fun explain(explainResultClass: Class, verbosity: ExplainVerbosity): E = + wrapped.explain(explainResultClass, verbosity) +} diff --git a/driver-kotlin-sync/src/integrationTest/kotlin/com/mongodb/kotlin/client/syncadapter/SyncListCollectionNamesIterable.kt b/driver-kotlin-sync/src/integrationTest/kotlin/com/mongodb/kotlin/client/syncadapter/SyncListCollectionNamesIterable.kt new file mode 100644 index 00000000000..45f910664a7 --- /dev/null +++ b/driver-kotlin-sync/src/integrationTest/kotlin/com/mongodb/kotlin/client/syncadapter/SyncListCollectionNamesIterable.kt @@ -0,0 +1,41 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.kotlin.client.syncadapter + +import com.mongodb.client.ListCollectionNamesIterable as JListCollectionsIterable +import com.mongodb.kotlin.client.ListCollectionNamesIterable +import java.util.concurrent.TimeUnit +import org.bson.BsonValue +import org.bson.conversions.Bson + +internal class SyncListCollectionNamesIterable(val wrapped: ListCollectionNamesIterable) : + JListCollectionsIterable, SyncMongoIterable(wrapped) { + + override fun batchSize(batchSize: Int): SyncListCollectionNamesIterable = apply { wrapped.batchSize(batchSize) } + + override fun maxTime(maxTime: Long, timeUnit: TimeUnit): SyncListCollectionNamesIterable = apply { + wrapped.maxTime(maxTime, timeUnit) + } + + override fun filter(filter: Bson?): SyncListCollectionNamesIterable = apply { wrapped.filter(filter) } + + override fun comment(comment: String?): SyncListCollectionNamesIterable = apply { wrapped.comment(comment) } + override fun comment(comment: BsonValue?): SyncListCollectionNamesIterable = apply { wrapped.comment(comment) } + + override fun authorizedCollections(authorizedCollections: Boolean): SyncListCollectionNamesIterable = apply { + wrapped.authorizedCollections(authorizedCollections) + } +} diff --git a/driver-kotlin-sync/src/integrationTest/kotlin/com/mongodb/kotlin/client/syncadapter/SyncListCollectionsIterable.kt b/driver-kotlin-sync/src/integrationTest/kotlin/com/mongodb/kotlin/client/syncadapter/SyncListCollectionsIterable.kt new file mode 100644 index 00000000000..f38e7eed5e7 --- /dev/null +++ b/driver-kotlin-sync/src/integrationTest/kotlin/com/mongodb/kotlin/client/syncadapter/SyncListCollectionsIterable.kt @@ -0,0 +1,40 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.kotlin.client.syncadapter + +import com.mongodb.client.ListCollectionsIterable as JListCollectionsIterable +import com.mongodb.client.cursor.TimeoutMode +import com.mongodb.kotlin.client.ListCollectionsIterable +import java.util.concurrent.TimeUnit +import org.bson.BsonValue +import org.bson.conversions.Bson + +internal class SyncListCollectionsIterable(val wrapped: ListCollectionsIterable) : + JListCollectionsIterable, SyncMongoIterable(wrapped) { + + override fun batchSize(batchSize: Int): SyncListCollectionsIterable = apply { wrapped.batchSize(batchSize) } + override fun timeoutMode(timeoutMode: TimeoutMode): SyncListCollectionsIterable = apply { + wrapped.timeoutMode(timeoutMode) + } + + override fun maxTime(maxTime: Long, timeUnit: TimeUnit): SyncListCollectionsIterable = apply { + wrapped.maxTime(maxTime, timeUnit) + } + + override fun filter(filter: Bson?): SyncListCollectionsIterable = apply { wrapped.filter(filter) } + override fun comment(comment: String?): SyncListCollectionsIterable = apply { wrapped.comment(comment) } + override fun comment(comment: BsonValue?): SyncListCollectionsIterable = apply { wrapped.comment(comment) } +} diff --git a/driver-kotlin-sync/src/integrationTest/kotlin/com/mongodb/kotlin/client/syncadapter/SyncListDatabasesIterable.kt b/driver-kotlin-sync/src/integrationTest/kotlin/com/mongodb/kotlin/client/syncadapter/SyncListDatabasesIterable.kt new file mode 100644 index 00000000000..34874827826 --- /dev/null +++ b/driver-kotlin-sync/src/integrationTest/kotlin/com/mongodb/kotlin/client/syncadapter/SyncListDatabasesIterable.kt @@ -0,0 +1,48 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.kotlin.client.syncadapter + +import com.mongodb.client.ListDatabasesIterable as JListDatabasesIterable +import com.mongodb.client.cursor.TimeoutMode +import com.mongodb.kotlin.client.ListDatabasesIterable +import java.util.concurrent.TimeUnit +import org.bson.BsonValue +import org.bson.conversions.Bson + +internal class SyncListDatabasesIterable(val wrapped: ListDatabasesIterable) : + JListDatabasesIterable, SyncMongoIterable(wrapped) { + + override fun batchSize(batchSize: Int): SyncListDatabasesIterable = apply { wrapped.batchSize(batchSize) } + override fun timeoutMode(timeoutMode: TimeoutMode): SyncListDatabasesIterable = apply { + wrapped.timeoutMode(timeoutMode) + } + + override fun maxTime(maxTime: Long, timeUnit: TimeUnit): SyncListDatabasesIterable = apply { + wrapped.maxTime(maxTime, timeUnit) + } + + override fun filter(filter: Bson?): SyncListDatabasesIterable = apply { wrapped.filter(filter) } + + override fun nameOnly(nameOnly: Boolean?): SyncListDatabasesIterable = apply { wrapped.nameOnly(nameOnly) } + + override fun authorizedDatabasesOnly(authorizedDatabasesOnly: Boolean?): SyncListDatabasesIterable = apply { + wrapped.authorizedDatabasesOnly(authorizedDatabasesOnly) + } + + override fun comment(comment: String?): SyncListDatabasesIterable = apply { wrapped.comment(comment) } + + override fun comment(comment: BsonValue?): SyncListDatabasesIterable = apply { wrapped.comment(comment) } +} diff --git a/driver-kotlin-sync/src/integrationTest/kotlin/com/mongodb/kotlin/client/syncadapter/SyncListIndexesIterable.kt b/driver-kotlin-sync/src/integrationTest/kotlin/com/mongodb/kotlin/client/syncadapter/SyncListIndexesIterable.kt new file mode 100644 index 00000000000..56e5fec91cd --- /dev/null +++ b/driver-kotlin-sync/src/integrationTest/kotlin/com/mongodb/kotlin/client/syncadapter/SyncListIndexesIterable.kt @@ -0,0 +1,35 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.kotlin.client.syncadapter + +import com.mongodb.client.ListIndexesIterable as JListIndexesIterable +import com.mongodb.client.cursor.TimeoutMode +import com.mongodb.kotlin.client.ListIndexesIterable +import java.util.concurrent.TimeUnit +import org.bson.BsonValue + +internal class SyncListIndexesIterable(val wrapped: ListIndexesIterable) : + JListIndexesIterable, SyncMongoIterable(wrapped) { + override fun batchSize(batchSize: Int): SyncListIndexesIterable = apply { wrapped.batchSize(batchSize) } + override fun timeoutMode(timeoutMode: TimeoutMode): SyncListIndexesIterable = apply { + wrapped.timeoutMode(timeoutMode) + } + override fun maxTime(maxTime: Long, timeUnit: TimeUnit): SyncListIndexesIterable = apply { + wrapped.maxTime(maxTime, timeUnit) + } + override fun comment(comment: String?): SyncListIndexesIterable = apply { wrapped.comment(comment) } + override fun comment(comment: BsonValue?): SyncListIndexesIterable = apply { wrapped.comment(comment) } +} diff --git a/driver-kotlin-sync/src/integrationTest/kotlin/com/mongodb/kotlin/client/syncadapter/SyncListSearchIndexesIterable.kt b/driver-kotlin-sync/src/integrationTest/kotlin/com/mongodb/kotlin/client/syncadapter/SyncListSearchIndexesIterable.kt new file mode 100644 index 00000000000..b0e6d522b7e --- /dev/null +++ b/driver-kotlin-sync/src/integrationTest/kotlin/com/mongodb/kotlin/client/syncadapter/SyncListSearchIndexesIterable.kt @@ -0,0 +1,57 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.kotlin.client.syncadapter + +import com.mongodb.ExplainVerbosity +import com.mongodb.client.ListSearchIndexesIterable as JListSearchIndexesIterable +import com.mongodb.client.cursor.TimeoutMode +import com.mongodb.client.model.Collation +import com.mongodb.kotlin.client.ListSearchIndexesIterable +import java.util.concurrent.TimeUnit +import org.bson.BsonValue +import org.bson.Document + +internal class SyncListSearchIndexesIterable(val wrapped: ListSearchIndexesIterable) : + JListSearchIndexesIterable, SyncMongoIterable(wrapped) { + override fun batchSize(batchSize: Int): SyncListSearchIndexesIterable = apply { wrapped.batchSize(batchSize) } + override fun timeoutMode(timeoutMode: TimeoutMode): SyncListSearchIndexesIterable = apply { + wrapped.timeoutMode(timeoutMode) + } + override fun name(indexName: String): SyncListSearchIndexesIterable = apply { wrapped.name(indexName) } + + override fun allowDiskUse(allowDiskUse: Boolean?): com.mongodb.client.ListSearchIndexesIterable = apply { + wrapped.allowDiskUse(allowDiskUse) + } + + override fun maxTime(maxTime: Long, timeUnit: TimeUnit): SyncListSearchIndexesIterable = apply { + wrapped.maxTime(maxTime, timeUnit) + } + + override fun collation(collation: Collation?): com.mongodb.client.ListSearchIndexesIterable = apply { + wrapped.collation(collation) + } + + override fun comment(comment: String?): SyncListSearchIndexesIterable = apply { wrapped.comment(comment) } + override fun comment(comment: BsonValue?): SyncListSearchIndexesIterable = apply { wrapped.comment(comment) } + override fun explain(): Document = wrapped.explain() + + override fun explain(verbosity: ExplainVerbosity): Document = wrapped.explain(verbosity) + + override fun explain(explainResultClass: Class): E = wrapped.explain(explainResultClass) + + override fun explain(explainResultClass: Class, verbosity: ExplainVerbosity): E = + wrapped.explain(explainResultClass, verbosity) +} diff --git a/driver-kotlin-sync/src/integrationTest/kotlin/com/mongodb/kotlin/client/syncadapter/SyncMongoChangeStreamCursor.kt b/driver-kotlin-sync/src/integrationTest/kotlin/com/mongodb/kotlin/client/syncadapter/SyncMongoChangeStreamCursor.kt new file mode 100644 index 00000000000..1ddfe72a866 --- /dev/null +++ b/driver-kotlin-sync/src/integrationTest/kotlin/com/mongodb/kotlin/client/syncadapter/SyncMongoChangeStreamCursor.kt @@ -0,0 +1,25 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.kotlin.client.syncadapter + +import com.mongodb.client.MongoChangeStreamCursor as JMongoChangeStreamCursor +import com.mongodb.kotlin.client.MongoChangeStreamCursor +import org.bson.BsonDocument + +internal class SyncMongoChangeStreamCursor(val wrapped: MongoChangeStreamCursor) : + JMongoChangeStreamCursor, SyncMongoCursor(wrapped) { + override fun getResumeToken(): BsonDocument? = wrapped.resumeToken +} diff --git a/driver-kotlin-sync/src/integrationTest/kotlin/com/mongodb/kotlin/client/syncadapter/SyncMongoClient.kt b/driver-kotlin-sync/src/integrationTest/kotlin/com/mongodb/kotlin/client/syncadapter/SyncMongoClient.kt new file mode 100644 index 00000000000..02c58833df5 --- /dev/null +++ b/driver-kotlin-sync/src/integrationTest/kotlin/com/mongodb/kotlin/client/syncadapter/SyncMongoClient.kt @@ -0,0 +1,29 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.kotlin.client.syncadapter + +import com.mongodb.MongoDriverInformation +import com.mongodb.client.MongoClient as JMongoClient +import com.mongodb.connection.ClusterDescription +import com.mongodb.kotlin.client.MongoClient + +internal class SyncMongoClient(override val wrapped: MongoClient) : SyncMongoCluster(wrapped), JMongoClient { + override fun close(): Unit = wrapped.close() + + override fun getClusterDescription(): ClusterDescription = wrapped.clusterDescription + override fun appendMetadata(mongoDriverInformation: MongoDriverInformation): Unit = + wrapped.appendMetadata(mongoDriverInformation) +} diff --git a/driver-kotlin-sync/src/integrationTest/kotlin/com/mongodb/kotlin/client/syncadapter/SyncMongoCluster.kt b/driver-kotlin-sync/src/integrationTest/kotlin/com/mongodb/kotlin/client/syncadapter/SyncMongoCluster.kt new file mode 100644 index 00000000000..b86f2447a17 --- /dev/null +++ b/driver-kotlin-sync/src/integrationTest/kotlin/com/mongodb/kotlin/client/syncadapter/SyncMongoCluster.kt @@ -0,0 +1,136 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.kotlin.client.syncadapter + +import com.mongodb.ClientSessionOptions +import com.mongodb.ReadConcern +import com.mongodb.ReadPreference +import com.mongodb.WriteConcern +import com.mongodb.client.ChangeStreamIterable +import com.mongodb.client.ClientSession +import com.mongodb.client.ListDatabasesIterable +import com.mongodb.client.MongoCluster as JMongoCluster +import com.mongodb.client.MongoDatabase +import com.mongodb.client.MongoIterable +import com.mongodb.client.model.bulk.ClientBulkWriteOptions +import com.mongodb.client.model.bulk.ClientBulkWriteResult +import com.mongodb.client.model.bulk.ClientNamespacedWriteModel +import com.mongodb.kotlin.client.MongoCluster +import java.util.concurrent.TimeUnit +import org.bson.Document +import org.bson.codecs.configuration.CodecRegistry +import org.bson.conversions.Bson + +internal open class SyncMongoCluster(open val wrapped: MongoCluster) : JMongoCluster { + override fun getCodecRegistry(): CodecRegistry = wrapped.codecRegistry + + override fun getReadPreference(): ReadPreference = wrapped.readPreference + + override fun getWriteConcern(): WriteConcern = wrapped.writeConcern + + override fun getReadConcern(): ReadConcern = wrapped.readConcern + + override fun getTimeout(timeUnit: TimeUnit): Long? = wrapped.timeout(timeUnit) + + override fun withCodecRegistry(codecRegistry: CodecRegistry): SyncMongoCluster = + SyncMongoCluster(wrapped.withCodecRegistry(codecRegistry)) + + override fun withReadPreference(readPreference: ReadPreference): SyncMongoCluster = + SyncMongoCluster(wrapped.withReadPreference(readPreference)) + + override fun withReadConcern(readConcern: ReadConcern): SyncMongoCluster = + SyncMongoCluster(wrapped.withReadConcern(readConcern)) + + override fun withWriteConcern(writeConcern: WriteConcern): SyncMongoCluster = + SyncMongoCluster(wrapped.withWriteConcern(writeConcern)) + + override fun withTimeout(timeout: Long, timeUnit: TimeUnit): SyncMongoCluster = + SyncMongoCluster(wrapped.withTimeout(timeout, timeUnit)) + + override fun getDatabase(databaseName: String): MongoDatabase = SyncMongoDatabase(wrapped.getDatabase(databaseName)) + + override fun startSession(): ClientSession = SyncClientSession(wrapped.startSession(), this) + + override fun startSession(options: ClientSessionOptions): ClientSession = + SyncClientSession(wrapped.startSession(options), this) + + override fun listDatabaseNames(): MongoIterable = SyncMongoIterable(wrapped.listDatabaseNames()) + + override fun listDatabaseNames(clientSession: ClientSession): MongoIterable = + SyncMongoIterable(wrapped.listDatabaseNames(clientSession.unwrapped())) + + override fun listDatabases(): ListDatabasesIterable = SyncListDatabasesIterable(wrapped.listDatabases()) + + override fun listDatabases(clientSession: ClientSession): ListDatabasesIterable = + SyncListDatabasesIterable(wrapped.listDatabases(clientSession.unwrapped())) + + override fun listDatabases(resultClass: Class): ListDatabasesIterable = + SyncListDatabasesIterable(wrapped.listDatabases(resultClass)) + + override fun listDatabases( + clientSession: ClientSession, + resultClass: Class + ): ListDatabasesIterable = + SyncListDatabasesIterable(wrapped.listDatabases(clientSession.unwrapped(), resultClass)) + + override fun watch(): ChangeStreamIterable = SyncChangeStreamIterable(wrapped.watch()) + + override fun watch(resultClass: Class): ChangeStreamIterable = + SyncChangeStreamIterable(wrapped.watch(resultClass = resultClass)) + + override fun watch(pipeline: MutableList): ChangeStreamIterable = + SyncChangeStreamIterable(wrapped.watch(pipeline)) + + override fun watch(pipeline: MutableList, resultClass: Class): ChangeStreamIterable = + SyncChangeStreamIterable(wrapped.watch(pipeline, resultClass)) + + override fun watch(clientSession: ClientSession): ChangeStreamIterable = + SyncChangeStreamIterable(wrapped.watch(clientSession.unwrapped())) + + override fun watch(clientSession: ClientSession, resultClass: Class): ChangeStreamIterable = + SyncChangeStreamIterable(wrapped.watch(clientSession.unwrapped(), resultClass = resultClass)) + + override fun watch(clientSession: ClientSession, pipeline: MutableList): ChangeStreamIterable = + SyncChangeStreamIterable(wrapped.watch(clientSession.unwrapped(), pipeline)) + + override fun watch( + clientSession: ClientSession, + pipeline: MutableList, + resultClass: Class + ): ChangeStreamIterable = + SyncChangeStreamIterable(wrapped.watch(clientSession.unwrapped(), pipeline, resultClass)) + + override fun bulkWrite(models: MutableList): ClientBulkWriteResult = + wrapped.bulkWrite(models) + + override fun bulkWrite( + models: MutableList, + options: ClientBulkWriteOptions + ): ClientBulkWriteResult = wrapped.bulkWrite(models, options) + + override fun bulkWrite( + clientSession: ClientSession, + models: MutableList + ): ClientBulkWriteResult = wrapped.bulkWrite(clientSession.unwrapped(), models) + + override fun bulkWrite( + clientSession: ClientSession, + models: MutableList, + options: ClientBulkWriteOptions + ): ClientBulkWriteResult = wrapped.bulkWrite(clientSession.unwrapped(), models, options) + + private fun ClientSession.unwrapped() = (this as SyncClientSession).wrapped +} diff --git a/driver-kotlin-sync/src/integrationTest/kotlin/com/mongodb/kotlin/client/syncadapter/SyncMongoCollection.kt b/driver-kotlin-sync/src/integrationTest/kotlin/com/mongodb/kotlin/client/syncadapter/SyncMongoCollection.kt new file mode 100644 index 00000000000..51c3a7db7e1 --- /dev/null +++ b/driver-kotlin-sync/src/integrationTest/kotlin/com/mongodb/kotlin/client/syncadapter/SyncMongoCollection.kt @@ -0,0 +1,530 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +@file:Suppress("DEPRECATION") + +package com.mongodb.kotlin.client.syncadapter + +import com.mongodb.MongoNamespace +import com.mongodb.ReadConcern +import com.mongodb.ReadPreference +import com.mongodb.WriteConcern +import com.mongodb.bulk.BulkWriteResult +import com.mongodb.client.AggregateIterable +import com.mongodb.client.ChangeStreamIterable +import com.mongodb.client.ClientSession +import com.mongodb.client.DistinctIterable +import com.mongodb.client.FindIterable +import com.mongodb.client.ListIndexesIterable +import com.mongodb.client.ListSearchIndexesIterable +import com.mongodb.client.MapReduceIterable +import com.mongodb.client.MongoCollection as JMongoCollection +import com.mongodb.client.model.BulkWriteOptions +import com.mongodb.client.model.CountOptions +import com.mongodb.client.model.CreateIndexOptions +import com.mongodb.client.model.DeleteOptions +import com.mongodb.client.model.DropCollectionOptions +import com.mongodb.client.model.DropIndexOptions +import com.mongodb.client.model.EstimatedDocumentCountOptions +import com.mongodb.client.model.FindOneAndDeleteOptions +import com.mongodb.client.model.FindOneAndReplaceOptions +import com.mongodb.client.model.FindOneAndUpdateOptions +import com.mongodb.client.model.IndexModel +import com.mongodb.client.model.IndexOptions +import com.mongodb.client.model.InsertManyOptions +import com.mongodb.client.model.InsertOneOptions +import com.mongodb.client.model.RenameCollectionOptions +import com.mongodb.client.model.ReplaceOptions +import com.mongodb.client.model.SearchIndexModel +import com.mongodb.client.model.UpdateOptions +import com.mongodb.client.model.WriteModel +import com.mongodb.client.result.DeleteResult +import com.mongodb.client.result.InsertManyResult +import com.mongodb.client.result.InsertOneResult +import com.mongodb.client.result.UpdateResult +import com.mongodb.kotlin.client.MongoCollection +import java.lang.UnsupportedOperationException +import java.util.concurrent.TimeUnit +import org.bson.Document +import org.bson.codecs.configuration.CodecRegistry +import org.bson.conversions.Bson + +@Suppress("OVERRIDE_DEPRECATION") +internal class SyncMongoCollection(val wrapped: MongoCollection) : JMongoCollection { + override fun getNamespace(): MongoNamespace = wrapped.namespace + + override fun getDocumentClass(): Class = wrapped.documentClass + + override fun getCodecRegistry(): CodecRegistry = wrapped.codecRegistry + + override fun getReadPreference(): ReadPreference = wrapped.readPreference + + override fun getWriteConcern(): WriteConcern = wrapped.writeConcern + + override fun getReadConcern(): ReadConcern = wrapped.readConcern + override fun getTimeout(timeUnit: TimeUnit): Long? = wrapped.timeout(timeUnit) + + override fun withDocumentClass(clazz: Class): SyncMongoCollection = + SyncMongoCollection(wrapped.withDocumentClass(clazz)) + + override fun withCodecRegistry(codecRegistry: CodecRegistry): SyncMongoCollection = + SyncMongoCollection(wrapped.withCodecRegistry(codecRegistry)) + + override fun withReadPreference(readPreference: ReadPreference): SyncMongoCollection = + SyncMongoCollection(wrapped.withReadPreference(readPreference)) + + override fun withWriteConcern(writeConcern: WriteConcern): SyncMongoCollection = + SyncMongoCollection(wrapped.withWriteConcern(writeConcern)) + + override fun withReadConcern(readConcern: ReadConcern): SyncMongoCollection = + SyncMongoCollection(wrapped.withReadConcern(readConcern)) + + override fun withTimeout(timeout: Long, timeUnit: TimeUnit): com.mongodb.client.MongoCollection = + SyncMongoCollection(wrapped.withTimeout(timeout, timeUnit)) + + override fun countDocuments(): Long = wrapped.countDocuments() + + override fun countDocuments(filter: Bson): Long = wrapped.countDocuments(filter) + + override fun countDocuments(filter: Bson, options: CountOptions): Long = wrapped.countDocuments(filter, options) + + override fun countDocuments(clientSession: ClientSession): Long = wrapped.countDocuments(clientSession.unwrapped()) + + override fun countDocuments(clientSession: ClientSession, filter: Bson): Long = + wrapped.countDocuments(clientSession.unwrapped(), filter) + + override fun countDocuments(clientSession: ClientSession, filter: Bson, options: CountOptions): Long = + wrapped.countDocuments(clientSession.unwrapped(), filter, options) + + override fun estimatedDocumentCount(): Long = wrapped.estimatedDocumentCount() + + override fun estimatedDocumentCount(options: EstimatedDocumentCountOptions): Long = + wrapped.estimatedDocumentCount(options) + + override fun distinct(fieldName: String, resultClass: Class): DistinctIterable = + SyncDistinctIterable(wrapped.distinct(fieldName, resultClass = resultClass)) + + override fun distinct(fieldName: String, filter: Bson, resultClass: Class): DistinctIterable = + SyncDistinctIterable(wrapped.distinct(fieldName, filter, resultClass = resultClass)) + + override fun distinct( + clientSession: ClientSession, + fieldName: String, + resultClass: Class + ): DistinctIterable = + SyncDistinctIterable(wrapped.distinct(clientSession.unwrapped(), fieldName, resultClass = resultClass)) + + override fun distinct( + clientSession: ClientSession, + fieldName: String, + filter: Bson, + resultClass: Class + ): DistinctIterable = + SyncDistinctIterable(wrapped.distinct(clientSession.unwrapped(), fieldName, filter, resultClass)) + + override fun find(): FindIterable = SyncFindIterable(wrapped.find()) + + override fun find(resultClass: Class): FindIterable = + SyncFindIterable(wrapped.find(resultClass = resultClass)) + + override fun find(filter: Bson): FindIterable = SyncFindIterable(wrapped.find(filter)) + + override fun find(filter: Bson, resultClass: Class): FindIterable = + SyncFindIterable(wrapped.find(filter, resultClass)) + + override fun find(clientSession: ClientSession): FindIterable = + SyncFindIterable(wrapped.find(clientSession.unwrapped())) + + override fun find(clientSession: ClientSession, resultClass: Class): FindIterable = + SyncFindIterable(wrapped.find(clientSession.unwrapped(), resultClass = resultClass)) + + override fun find(clientSession: ClientSession, filter: Bson): FindIterable = + SyncFindIterable(wrapped.find(clientSession.unwrapped(), filter)) + + override fun find(clientSession: ClientSession, filter: Bson, resultClass: Class): FindIterable = + SyncFindIterable(wrapped.find(clientSession.unwrapped(), filter, resultClass)) + + override fun aggregate(pipeline: MutableList): AggregateIterable = + SyncAggregateIterable(wrapped.aggregate(pipeline)) + + override fun aggregate(pipeline: MutableList, resultClass: Class): AggregateIterable = + SyncAggregateIterable(wrapped.aggregate(pipeline, resultClass)) + + override fun aggregate(clientSession: ClientSession, pipeline: MutableList): AggregateIterable = + SyncAggregateIterable(wrapped.aggregate(clientSession.unwrapped(), pipeline)) + + override fun aggregate( + clientSession: ClientSession, + pipeline: MutableList, + resultClass: Class + ): AggregateIterable = SyncAggregateIterable(wrapped.aggregate(clientSession.unwrapped(), pipeline, resultClass)) + + override fun watch(): ChangeStreamIterable = SyncChangeStreamIterable(wrapped.watch()) + + override fun watch(resultClass: Class): ChangeStreamIterable = + SyncChangeStreamIterable(wrapped.watch(resultClass = resultClass)) + + override fun watch(pipeline: MutableList): ChangeStreamIterable = + SyncChangeStreamIterable(wrapped.watch(pipeline)) + + override fun watch(pipeline: MutableList, resultClass: Class): ChangeStreamIterable = + SyncChangeStreamIterable(wrapped.watch(pipeline, resultClass)) + + override fun watch(clientSession: ClientSession): ChangeStreamIterable = + SyncChangeStreamIterable(wrapped.watch(clientSession.unwrapped())) + + override fun watch(clientSession: ClientSession, resultClass: Class): ChangeStreamIterable = + SyncChangeStreamIterable(wrapped.watch(clientSession.unwrapped(), resultClass = resultClass)) + + override fun watch(clientSession: ClientSession, pipeline: MutableList): ChangeStreamIterable = + SyncChangeStreamIterable(wrapped.watch(clientSession.unwrapped(), pipeline)) + + override fun watch( + clientSession: ClientSession, + pipeline: MutableList, + resultClass: Class + ): ChangeStreamIterable = + SyncChangeStreamIterable(wrapped.watch(clientSession.unwrapped(), pipeline, resultClass)) + + override fun mapReduce(mapFunction: String, reduceFunction: String): MapReduceIterable = + throw UnsupportedOperationException("No MapReduce support") + + override fun mapReduce( + mapFunction: String, + reduceFunction: String, + resultClass: Class + ): MapReduceIterable = throw UnsupportedOperationException("No MapReduce support") + + override fun mapReduce( + clientSession: ClientSession, + mapFunction: String, + reduceFunction: String + ): MapReduceIterable = throw UnsupportedOperationException("No MapReduce support") + + override fun mapReduce( + clientSession: ClientSession, + mapFunction: String, + reduceFunction: String, + resultClass: Class + ): MapReduceIterable = throw UnsupportedOperationException("No MapReduce support") + + override fun deleteOne(filter: Bson): DeleteResult = wrapped.deleteOne(filter) + + override fun deleteOne(filter: Bson, options: DeleteOptions): DeleteResult = wrapped.deleteOne(filter, options) + + override fun deleteOne(clientSession: ClientSession, filter: Bson): DeleteResult = + wrapped.deleteOne(clientSession.unwrapped(), filter) + + override fun deleteOne(clientSession: ClientSession, filter: Bson, options: DeleteOptions): DeleteResult = + wrapped.deleteOne(clientSession.unwrapped(), filter, options) + + override fun deleteMany(filter: Bson): DeleteResult = wrapped.deleteMany(filter) + + override fun deleteMany(filter: Bson, options: DeleteOptions): DeleteResult = wrapped.deleteMany(filter, options) + + override fun deleteMany(clientSession: ClientSession, filter: Bson): DeleteResult = + wrapped.deleteMany(clientSession.unwrapped(), filter) + + override fun deleteMany(clientSession: ClientSession, filter: Bson, options: DeleteOptions): DeleteResult = + wrapped.deleteMany(clientSession.unwrapped(), filter, options) + + override fun updateOne(filter: Bson, update: Bson): UpdateResult = wrapped.updateOne(filter, update) + + override fun updateOne(filter: Bson, update: Bson, updateOptions: UpdateOptions): UpdateResult = + wrapped.updateOne(filter, update, updateOptions) + + override fun updateOne(clientSession: ClientSession, filter: Bson, update: Bson): UpdateResult = + wrapped.updateOne(clientSession.unwrapped(), filter, update) + + override fun updateOne( + clientSession: ClientSession, + filter: Bson, + update: Bson, + updateOptions: UpdateOptions + ): UpdateResult = wrapped.updateOne(clientSession.unwrapped(), filter, update, updateOptions) + + override fun updateOne(filter: Bson, update: MutableList): UpdateResult = + wrapped.updateOne(filter, update) + + override fun updateOne(filter: Bson, update: MutableList, updateOptions: UpdateOptions): UpdateResult = + wrapped.updateOne(filter, update, updateOptions) + + override fun updateOne(clientSession: ClientSession, filter: Bson, update: MutableList): UpdateResult = + wrapped.updateOne(clientSession.unwrapped(), filter, update) + + override fun updateOne( + clientSession: ClientSession, + filter: Bson, + update: MutableList, + updateOptions: UpdateOptions + ): UpdateResult = wrapped.updateOne(clientSession.unwrapped(), filter, update, updateOptions) + + override fun updateMany(filter: Bson, update: Bson): UpdateResult = wrapped.updateMany(filter, update) + + override fun updateMany(filter: Bson, update: Bson, updateOptions: UpdateOptions): UpdateResult = + wrapped.updateMany(filter, update, updateOptions) + + override fun updateMany(clientSession: ClientSession, filter: Bson, update: Bson): UpdateResult = + wrapped.updateMany(clientSession.unwrapped(), filter, update) + + override fun updateMany( + clientSession: ClientSession, + filter: Bson, + update: Bson, + updateOptions: UpdateOptions + ): UpdateResult = wrapped.updateMany(clientSession.unwrapped(), filter, update, updateOptions) + + override fun updateMany(filter: Bson, update: MutableList): UpdateResult = + wrapped.updateMany(filter, update) + + override fun updateMany(filter: Bson, update: MutableList, updateOptions: UpdateOptions): UpdateResult = + wrapped.updateMany(filter, update, updateOptions) + + override fun updateMany(clientSession: ClientSession, filter: Bson, update: MutableList): UpdateResult = + wrapped.updateMany(clientSession.unwrapped(), filter, update) + + override fun updateMany( + clientSession: ClientSession, + filter: Bson, + update: MutableList, + updateOptions: UpdateOptions + ): UpdateResult = wrapped.updateMany(clientSession.unwrapped(), filter, update, updateOptions) + + override fun findOneAndDelete(filter: Bson): T? = wrapped.findOneAndDelete(filter) + + override fun findOneAndDelete(filter: Bson, options: FindOneAndDeleteOptions): T? = + wrapped.findOneAndDelete(filter, options) + + override fun findOneAndDelete(clientSession: ClientSession, filter: Bson): T? = + wrapped.findOneAndDelete(clientSession.unwrapped(), filter) + + override fun findOneAndDelete(clientSession: ClientSession, filter: Bson, options: FindOneAndDeleteOptions): T? = + wrapped.findOneAndDelete(clientSession.unwrapped(), filter, options) + + override fun findOneAndUpdate(filter: Bson, update: Bson): T? = wrapped.findOneAndUpdate(filter, update) + + override fun findOneAndUpdate(filter: Bson, update: Bson, options: FindOneAndUpdateOptions): T? = + wrapped.findOneAndUpdate(filter, update, options) + + override fun findOneAndUpdate(clientSession: ClientSession, filter: Bson, update: Bson): T? = + wrapped.findOneAndUpdate(clientSession.unwrapped(), filter, update) + + override fun findOneAndUpdate( + clientSession: ClientSession, + filter: Bson, + update: Bson, + options: FindOneAndUpdateOptions + ): T? = wrapped.findOneAndUpdate(clientSession.unwrapped(), filter, update, options) + + override fun findOneAndUpdate(filter: Bson, update: MutableList): T? = + wrapped.findOneAndUpdate(filter, update) + + override fun findOneAndUpdate(filter: Bson, update: MutableList, options: FindOneAndUpdateOptions): T? = + wrapped.findOneAndUpdate(filter, update, options) + + override fun findOneAndUpdate(clientSession: ClientSession, filter: Bson, update: MutableList): T? = + wrapped.findOneAndUpdate(clientSession.unwrapped(), filter, update) + + override fun findOneAndUpdate( + clientSession: ClientSession, + filter: Bson, + update: MutableList, + options: FindOneAndUpdateOptions + ): T? = wrapped.findOneAndUpdate(clientSession.unwrapped(), filter, update, options) + + override fun drop() = wrapped.drop() + + override fun drop(clientSession: ClientSession) = wrapped.drop(clientSession.unwrapped()) + + override fun drop(dropCollectionOptions: DropCollectionOptions) = wrapped.drop(dropCollectionOptions) + + override fun drop(clientSession: ClientSession, dropCollectionOptions: DropCollectionOptions) = + wrapped.drop(clientSession.unwrapped(), dropCollectionOptions) + + override fun createSearchIndex(name: String, definition: Bson) = wrapped.createSearchIndex(name, definition) + + override fun createSearchIndex(definition: Bson) = wrapped.createSearchIndex(definition) + + override fun createSearchIndexes(searchIndexModels: MutableList): MutableList = + wrapped.createSearchIndexes(searchIndexModels).toCollection(mutableListOf()) + + override fun updateSearchIndex(indexName: String, definition: Bson) = + wrapped.updateSearchIndex(indexName, definition) + + override fun dropSearchIndex(indexName: String) = wrapped.dropSearchIndex(indexName) + + override fun listSearchIndexes(): ListSearchIndexesIterable = + SyncListSearchIndexesIterable(wrapped.listSearchIndexes()) + + override fun listSearchIndexes(resultClass: Class): ListSearchIndexesIterable = + SyncListSearchIndexesIterable(wrapped.listSearchIndexes(resultClass = resultClass)) + + override fun createIndex(keys: Bson): String = wrapped.createIndex(keys) + + override fun createIndex(keys: Bson, indexOptions: IndexOptions): String = wrapped.createIndex(keys, indexOptions) + + override fun createIndex(clientSession: ClientSession, keys: Bson): String = + wrapped.createIndex(clientSession.unwrapped(), keys) + + override fun createIndex(clientSession: ClientSession, keys: Bson, indexOptions: IndexOptions): String = + wrapped.createIndex(clientSession.unwrapped(), keys, indexOptions) + + override fun createIndexes(indexes: MutableList): MutableList = + wrapped.createIndexes(indexes).toMutableList() + + override fun createIndexes( + indexes: MutableList, + createIndexOptions: CreateIndexOptions + ): MutableList = wrapped.createIndexes(indexes, createIndexOptions).toMutableList() + + override fun createIndexes(clientSession: ClientSession, indexes: MutableList): MutableList = + wrapped.createIndexes(clientSession.unwrapped(), indexes).toMutableList() + + override fun createIndexes( + clientSession: ClientSession, + indexes: MutableList, + createIndexOptions: CreateIndexOptions + ): MutableList = + wrapped.createIndexes(clientSession.unwrapped(), indexes, createIndexOptions).toMutableList() + + override fun listIndexes(): ListIndexesIterable = SyncListIndexesIterable(wrapped.listIndexes()) + + override fun listIndexes(resultClass: Class): ListIndexesIterable = + SyncListIndexesIterable(wrapped.listIndexes(resultClass = resultClass)) + + override fun listIndexes(clientSession: ClientSession): ListIndexesIterable = + SyncListIndexesIterable(wrapped.listIndexes(clientSession.unwrapped())) + + override fun listIndexes(clientSession: ClientSession, resultClass: Class): ListIndexesIterable = + SyncListIndexesIterable(wrapped.listIndexes(clientSession.unwrapped(), resultClass)) + + override fun dropIndex(indexName: String) = wrapped.dropIndex(indexName) + + override fun dropIndex(indexName: String, dropIndexOptions: DropIndexOptions) = + wrapped.dropIndex(indexName, dropIndexOptions) + + override fun dropIndex(keys: Bson) = wrapped.dropIndex(keys) + + override fun dropIndex(keys: Bson, dropIndexOptions: DropIndexOptions) = wrapped.dropIndex(keys, dropIndexOptions) + + override fun dropIndex(clientSession: ClientSession, indexName: String) = + wrapped.dropIndex(clientSession.unwrapped(), indexName) + + override fun dropIndex(clientSession: ClientSession, keys: Bson) = + wrapped.dropIndex(clientSession.unwrapped(), keys) + override fun dropIndex(clientSession: ClientSession, indexName: String, dropIndexOptions: DropIndexOptions) = + wrapped.dropIndex(clientSession.unwrapped(), indexName, dropIndexOptions) + + override fun dropIndex(clientSession: ClientSession, keys: Bson, dropIndexOptions: DropIndexOptions) = + wrapped.dropIndex(clientSession.unwrapped(), keys, dropIndexOptions) + + override fun dropIndexes() = wrapped.dropIndexes() + + override fun dropIndexes(clientSession: ClientSession) = wrapped.dropIndexes(clientSession.unwrapped()) + + override fun dropIndexes(dropIndexOptions: DropIndexOptions) = wrapped.dropIndexes(dropIndexOptions) + + override fun dropIndexes(clientSession: ClientSession, dropIndexOptions: DropIndexOptions) = + wrapped.dropIndexes(clientSession.unwrapped(), dropIndexOptions) + + override fun renameCollection(newCollectionNamespace: MongoNamespace) = + wrapped.renameCollection(newCollectionNamespace) + + override fun renameCollection( + newCollectionNamespace: MongoNamespace, + renameCollectionOptions: RenameCollectionOptions + ) = wrapped.renameCollection(newCollectionNamespace, renameCollectionOptions) + + override fun renameCollection(clientSession: ClientSession, newCollectionNamespace: MongoNamespace) = + wrapped.renameCollection(clientSession.unwrapped(), newCollectionNamespace) + + override fun renameCollection( + clientSession: ClientSession, + newCollectionNamespace: MongoNamespace, + renameCollectionOptions: RenameCollectionOptions + ) = wrapped.renameCollection(clientSession.unwrapped(), newCollectionNamespace, renameCollectionOptions) + override fun findOneAndReplace( + clientSession: ClientSession, + filter: Bson, + replacement: T, + options: FindOneAndReplaceOptions + ): T? = wrapped.findOneAndReplace(clientSession.unwrapped(), filter, replacement, options) + + override fun findOneAndReplace(clientSession: ClientSession, filter: Bson, replacement: T): T? = + wrapped.findOneAndReplace(clientSession.unwrapped(), filter, replacement) + + override fun findOneAndReplace(filter: Bson, replacement: T, options: FindOneAndReplaceOptions): T? = + wrapped.findOneAndReplace(filter, replacement, options) + + override fun findOneAndReplace(filter: Bson, replacement: T): T? = wrapped.findOneAndReplace(filter, replacement) + + override fun replaceOne( + clientSession: ClientSession, + filter: Bson, + replacement: T, + replaceOptions: ReplaceOptions + ): UpdateResult = wrapped.replaceOne(clientSession.unwrapped(), filter, replacement, replaceOptions) + + override fun replaceOne(clientSession: ClientSession, filter: Bson, replacement: T): UpdateResult = + wrapped.replaceOne(clientSession.unwrapped(), filter, replacement) + + override fun replaceOne(filter: Bson, replacement: T, replaceOptions: ReplaceOptions): UpdateResult = + wrapped.replaceOne(filter, replacement, replaceOptions) + + override fun replaceOne(filter: Bson, replacement: T): UpdateResult = wrapped.replaceOne(filter, replacement) + + override fun insertMany( + clientSession: ClientSession, + documents: MutableList, + options: InsertManyOptions + ): InsertManyResult = wrapped.insertMany(clientSession.unwrapped(), documents, options) + + override fun insertMany(clientSession: ClientSession, documents: MutableList): InsertManyResult = + wrapped.insertMany(clientSession.unwrapped(), documents) + + override fun insertMany(documents: MutableList, options: InsertManyOptions): InsertManyResult = + wrapped.insertMany(documents, options) + + override fun insertMany(documents: MutableList): InsertManyResult = wrapped.insertMany(documents) + + override fun insertOne(clientSession: ClientSession, document: T, options: InsertOneOptions): InsertOneResult = + wrapped.insertOne(clientSession.unwrapped(), document, options) + + override fun insertOne(clientSession: ClientSession, document: T): InsertOneResult = + wrapped.insertOne(clientSession.unwrapped(), document) + + override fun insertOne(document: T, options: InsertOneOptions): InsertOneResult = + wrapped.insertOne(document, options) + + override fun insertOne(document: T): InsertOneResult = wrapped.insertOne(document) + + override fun bulkWrite( + clientSession: ClientSession, + requests: MutableList>, + options: BulkWriteOptions + ): BulkWriteResult = wrapped.bulkWrite(clientSession.unwrapped(), requests, options) + + override fun bulkWrite( + clientSession: ClientSession, + requests: MutableList> + ): BulkWriteResult = wrapped.bulkWrite(clientSession.unwrapped(), requests) + + override fun bulkWrite(requests: MutableList>, options: BulkWriteOptions): BulkWriteResult = + wrapped.bulkWrite(requests, options) + + override fun bulkWrite(requests: MutableList>): BulkWriteResult = wrapped.bulkWrite(requests) + + private fun ClientSession.unwrapped() = (this as SyncClientSession).wrapped +} diff --git a/driver-kotlin-sync/src/integrationTest/kotlin/com/mongodb/kotlin/client/syncadapter/SyncMongoCursor.kt b/driver-kotlin-sync/src/integrationTest/kotlin/com/mongodb/kotlin/client/syncadapter/SyncMongoCursor.kt new file mode 100644 index 00000000000..d115e979521 --- /dev/null +++ b/driver-kotlin-sync/src/integrationTest/kotlin/com/mongodb/kotlin/client/syncadapter/SyncMongoCursor.kt @@ -0,0 +1,40 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.kotlin.client.syncadapter + +import com.mongodb.ServerAddress +import com.mongodb.ServerCursor +import com.mongodb.client.MongoCursor as JMongoCursor +import com.mongodb.kotlin.client.MongoCursor + +internal open class SyncMongoCursor(private val delegate: MongoCursor) : JMongoCursor { + override fun remove() { + TODO("Not yet implemented") + } + + override fun hasNext(): Boolean = delegate.hasNext() + override fun next(): T = delegate.next() + + override fun close() = delegate.close() + + override fun available(): Int = delegate.available + + override fun tryNext(): T? = delegate.tryNext() + + override fun getServerCursor(): ServerCursor? = delegate.serverCursor + + override fun getServerAddress(): ServerAddress = delegate.serverAddress +} diff --git a/driver-kotlin-sync/src/integrationTest/kotlin/com/mongodb/kotlin/client/syncadapter/SyncMongoDatabase.kt b/driver-kotlin-sync/src/integrationTest/kotlin/com/mongodb/kotlin/client/syncadapter/SyncMongoDatabase.kt new file mode 100644 index 00000000000..1111ee282ca --- /dev/null +++ b/driver-kotlin-sync/src/integrationTest/kotlin/com/mongodb/kotlin/client/syncadapter/SyncMongoDatabase.kt @@ -0,0 +1,199 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.kotlin.client.syncadapter + +import com.mongodb.ReadConcern +import com.mongodb.ReadPreference +import com.mongodb.WriteConcern +import com.mongodb.client.* +import com.mongodb.client.MongoDatabase as JMongoDatabase +import com.mongodb.client.model.CreateCollectionOptions +import com.mongodb.client.model.CreateViewOptions +import com.mongodb.kotlin.client.MongoDatabase +import java.util.concurrent.TimeUnit +import org.bson.Document +import org.bson.codecs.configuration.CodecRegistry +import org.bson.conversions.Bson + +internal class SyncMongoDatabase(val wrapped: MongoDatabase) : JMongoDatabase { + override fun getName(): String = wrapped.name + + override fun getCodecRegistry(): CodecRegistry = wrapped.codecRegistry + + override fun getReadPreference(): ReadPreference = wrapped.readPreference + + override fun getWriteConcern(): WriteConcern = wrapped.writeConcern + + override fun getReadConcern(): ReadConcern = wrapped.readConcern + + override fun getTimeout(timeUnit: TimeUnit): Long? = wrapped.timeout(timeUnit) + + override fun withCodecRegistry(codecRegistry: CodecRegistry): SyncMongoDatabase = + SyncMongoDatabase(wrapped.withCodecRegistry(codecRegistry)) + + override fun withReadPreference(readPreference: ReadPreference): SyncMongoDatabase = + SyncMongoDatabase(wrapped.withReadPreference(readPreference)) + + override fun withWriteConcern(writeConcern: WriteConcern): SyncMongoDatabase = + SyncMongoDatabase(wrapped.withWriteConcern(writeConcern)) + + override fun withReadConcern(readConcern: ReadConcern): SyncMongoDatabase = + SyncMongoDatabase(wrapped.withReadConcern(readConcern)) + + override fun withTimeout(timeout: Long, timeUnit: TimeUnit): SyncMongoDatabase = + SyncMongoDatabase(wrapped.withTimeout(timeout, timeUnit)) + + override fun getCollection(collectionName: String): MongoCollection = + SyncMongoCollection(wrapped.getCollection(collectionName, Document::class.java)) + + override fun getCollection(collectionName: String, documentClass: Class): MongoCollection = + SyncMongoCollection(wrapped.getCollection(collectionName, documentClass)) + + override fun runCommand(command: Bson): Document = wrapped.runCommand(command) + + override fun runCommand(command: Bson, readPreference: ReadPreference): Document = + wrapped.runCommand(command, readPreference) + + override fun runCommand(command: Bson, resultClass: Class): T = + wrapped.runCommand(command, resultClass = resultClass) + + override fun runCommand(command: Bson, readPreference: ReadPreference, resultClass: Class): T = + wrapped.runCommand(command, readPreference, resultClass) + + override fun runCommand(clientSession: ClientSession, command: Bson): Document = + wrapped.runCommand(clientSession.unwrapped(), command) + + override fun runCommand(clientSession: ClientSession, command: Bson, readPreference: ReadPreference): Document = + wrapped.runCommand(clientSession.unwrapped(), command, readPreference) + + override fun runCommand(clientSession: ClientSession, command: Bson, resultClass: Class): T = + wrapped.runCommand(clientSession.unwrapped(), command, resultClass = resultClass) + + override fun runCommand( + clientSession: ClientSession, + command: Bson, + readPreference: ReadPreference, + resultClass: Class + ): T = wrapped.runCommand(clientSession.unwrapped(), command, readPreference, resultClass) + + override fun drop() = wrapped.drop() + + override fun drop(clientSession: ClientSession) = wrapped.drop(clientSession.unwrapped()) + + override fun listCollectionNames(): ListCollectionNamesIterable = + SyncListCollectionNamesIterable(wrapped.listCollectionNames()) + + override fun listCollectionNames(clientSession: ClientSession): ListCollectionNamesIterable = + SyncListCollectionNamesIterable(wrapped.listCollectionNames(clientSession.unwrapped())) + + override fun listCollections(): ListCollectionsIterable = + SyncListCollectionsIterable(wrapped.listCollections()) + + override fun listCollections(resultClass: Class): ListCollectionsIterable = + SyncListCollectionsIterable(wrapped.listCollections(resultClass)) + + override fun listCollections(clientSession: ClientSession): ListCollectionsIterable = + SyncListCollectionsIterable(wrapped.listCollections(clientSession.unwrapped())) + + override fun listCollections( + clientSession: ClientSession, + resultClass: Class + ): ListCollectionsIterable = + SyncListCollectionsIterable(wrapped.listCollections(clientSession.unwrapped(), resultClass)) + + override fun createCollection(collectionName: String) = wrapped.createCollection(collectionName) + + override fun createCollection(collectionName: String, createCollectionOptions: CreateCollectionOptions) = + wrapped.createCollection(collectionName, createCollectionOptions) + + override fun createCollection(clientSession: ClientSession, collectionName: String) = + wrapped.createCollection(clientSession.unwrapped(), collectionName) + + override fun createCollection( + clientSession: ClientSession, + collectionName: String, + createCollectionOptions: CreateCollectionOptions + ) = wrapped.createCollection(clientSession.unwrapped(), collectionName, createCollectionOptions) + + override fun createView(viewName: String, viewOn: String, pipeline: MutableList) = + wrapped.createView(viewName, viewOn, pipeline) + + override fun createView( + viewName: String, + viewOn: String, + pipeline: MutableList, + createViewOptions: CreateViewOptions + ) = wrapped.createView(viewName, viewOn, pipeline, createViewOptions) + + override fun createView( + clientSession: ClientSession, + viewName: String, + viewOn: String, + pipeline: MutableList + ) = wrapped.createView(clientSession.unwrapped(), viewName, viewOn, pipeline) + + override fun createView( + clientSession: ClientSession, + viewName: String, + viewOn: String, + pipeline: MutableList, + createViewOptions: CreateViewOptions + ) = wrapped.createView(clientSession.unwrapped(), viewName, viewOn, pipeline, createViewOptions) + + override fun watch(): ChangeStreamIterable = SyncChangeStreamIterable(wrapped.watch()) + + override fun watch(resultClass: Class): ChangeStreamIterable = + SyncChangeStreamIterable(wrapped.watch(resultClass = resultClass)) + + override fun watch(pipeline: MutableList): ChangeStreamIterable = + SyncChangeStreamIterable(wrapped.watch(pipeline)) + + override fun watch(pipeline: MutableList, resultClass: Class): ChangeStreamIterable = + SyncChangeStreamIterable(wrapped.watch(pipeline, resultClass)) + + override fun watch(clientSession: ClientSession): ChangeStreamIterable = + SyncChangeStreamIterable(wrapped.watch(clientSession.unwrapped())) + + override fun watch(clientSession: ClientSession, resultClass: Class): ChangeStreamIterable = + SyncChangeStreamIterable(wrapped.watch(clientSession.unwrapped(), resultClass = resultClass)) + + override fun watch(clientSession: ClientSession, pipeline: MutableList): ChangeStreamIterable = + SyncChangeStreamIterable(wrapped.watch(clientSession.unwrapped(), pipeline)) + + override fun watch( + clientSession: ClientSession, + pipeline: MutableList, + resultClass: Class + ): ChangeStreamIterable = + SyncChangeStreamIterable(wrapped.watch(clientSession.unwrapped(), pipeline, resultClass)) + + override fun aggregate(pipeline: MutableList): AggregateIterable = + SyncAggregateIterable(wrapped.aggregate(pipeline)) + + override fun aggregate(pipeline: MutableList, resultClass: Class): AggregateIterable = + SyncAggregateIterable(wrapped.aggregate(pipeline, resultClass)) + + override fun aggregate(clientSession: ClientSession, pipeline: MutableList): AggregateIterable = + SyncAggregateIterable(wrapped.aggregate(clientSession.unwrapped(), pipeline)) + + override fun aggregate( + clientSession: ClientSession, + pipeline: MutableList, + resultClass: Class + ): AggregateIterable = SyncAggregateIterable(wrapped.aggregate(clientSession.unwrapped(), pipeline, resultClass)) + + private fun ClientSession.unwrapped() = (this as SyncClientSession).wrapped +} diff --git a/driver-kotlin-sync/src/integrationTest/kotlin/com/mongodb/kotlin/client/syncadapter/SyncMongoIterable.kt b/driver-kotlin-sync/src/integrationTest/kotlin/com/mongodb/kotlin/client/syncadapter/SyncMongoIterable.kt new file mode 100644 index 00000000000..7a7f7af5221 --- /dev/null +++ b/driver-kotlin-sync/src/integrationTest/kotlin/com/mongodb/kotlin/client/syncadapter/SyncMongoIterable.kt @@ -0,0 +1,40 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.kotlin.client.syncadapter + +import com.mongodb.Function +import com.mongodb.client.MongoCursor +import com.mongodb.client.MongoIterable as JMongoIterable +import com.mongodb.kotlin.client.MongoIterable + +internal open class SyncMongoIterable(val delegate: MongoIterable) : JMongoIterable { + override fun iterator(): MongoCursor = cursor() + + override fun cursor(): MongoCursor = SyncMongoCursor(delegate.cursor()) + + override fun first(): T? = delegate.firstOrNull() + + override fun batchSize(batchSize: Int): SyncMongoIterable = apply { delegate.batchSize(batchSize) } + + @Suppress("UNCHECKED_CAST") + override fun
?> into(target: A): A & Any { + delegate.forEach { target?.add(it) } + return target as (A & Any) + } + + override fun map(mapper: Function): SyncMongoIterable = + SyncMongoIterable(delegate.map { mapper.apply(it) }) +} diff --git a/driver-kotlin-sync/src/main/kotlin/com/mongodb/kotlin/client/AggregateIterable.kt b/driver-kotlin-sync/src/main/kotlin/com/mongodb/kotlin/client/AggregateIterable.kt new file mode 100644 index 00000000000..49130b82c60 --- /dev/null +++ b/driver-kotlin-sync/src/main/kotlin/com/mongodb/kotlin/client/AggregateIterable.kt @@ -0,0 +1,222 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.kotlin.client + +import com.mongodb.ExplainVerbosity +import com.mongodb.annotations.Alpha +import com.mongodb.annotations.Reason +import com.mongodb.client.AggregateIterable as JAggregateIterable +import com.mongodb.client.cursor.TimeoutMode +import com.mongodb.client.model.Collation +import java.util.concurrent.TimeUnit +import org.bson.BsonValue +import org.bson.Document +import org.bson.conversions.Bson + +/** + * Iterable like implementation for aggregate operations. + * + * @param T The type of the result. + * @see [Aggregation command](https://www.mongodb.com/docs/manual/reference/command/aggregate) + */ +public class AggregateIterable(private val wrapped: JAggregateIterable) : MongoIterable(wrapped) { + + public override fun batchSize(batchSize: Int): AggregateIterable { + super.batchSize(batchSize) + return this + } + + /** + * Sets the timeoutMode for the cursor. + * + * Requires the `timeout` to be set, either in the [com.mongodb.MongoClientSettings], via [MongoDatabase] or via + * [MongoCollection] + * + * If the `timeout` is set then: + * * For non-tailable cursors, the default value of timeoutMode is [TimeoutMode.CURSOR_LIFETIME] + * * For tailable cursors, the default value of timeoutMode is [TimeoutMode.ITERATION] and its an error to configure + * it as: [TimeoutMode.CURSOR_LIFETIME] + * + * @param timeoutMode the timeout mode + * @return this + * @since 5.2 + */ + @Alpha(Reason.CLIENT) + public fun timeoutMode(timeoutMode: TimeoutMode): AggregateIterable { + wrapped.timeoutMode(timeoutMode) + return this + } + + /** + * Aggregates documents according to the specified aggregation pipeline, which must end with an `$out` or `$merge` + * stage. This method is the preferred alternative to [cursor], because this method does what is explicitly + * requested without executing implicit operations. + * + * @throws IllegalStateException if the pipeline does not end with an `$out` or `$merge` stage + * @see [$out stage](https://www.mongodb.com/docs/manual/reference/operator/aggregation/out/) + * @see [$merge stage](https://www.mongodb.com/docs/manual/reference/operator/aggregation/merge/) + */ + public fun toCollection(): Unit = wrapped.toCollection() + + /** + * Aggregates documents according to the specified aggregation pipeline. + * - If the aggregation pipeline ends with an `$out` or `$merge` stage, then finds all documents in the affected + * namespace and returns a [MongoCursor] over them. You may want to use [toCollection] instead. + * - Otherwise, returns a [MongoCursor] producing no elements. + */ + public override fun cursor(): MongoCursor = super.cursor() + + /** + * Enables writing to temporary files. A null value indicates that it's unspecified. + * + * @param allowDiskUse true if writing to temporary files is enabled + * @return this + * @see [Aggregation command](https://www.mongodb.com/docs/manual/reference/command/aggregate/) + */ + public fun allowDiskUse(allowDiskUse: Boolean?): AggregateIterable = apply { wrapped.allowDiskUse(allowDiskUse) } + + /** + * Sets the maximum execution time on the server for this operation. + * + * @param maxTime the max time + * @param timeUnit the time unit, defaults to Milliseconds + * @return this + * @see [Max Time](https://www.mongodb.com/docs/manual/reference/method/cursor.maxTimeMS/#cursor.maxTimeMS) + */ + public fun maxTime(maxTime: Long, timeUnit: TimeUnit = TimeUnit.MILLISECONDS): AggregateIterable = apply { + wrapped.maxTime(maxTime, timeUnit) + } + + /** + * The maximum amount of time for the server to wait on new documents to satisfy a `$changeStream` aggregation. + * + * A zero value will be ignored. + * + * @param maxAwaitTime the max await time + * @param timeUnit the time unit to return the result in, defaults to Milliseconds + * @return the maximum await execution time in the given time unit + */ + public fun maxAwaitTime(maxAwaitTime: Long, timeUnit: TimeUnit = TimeUnit.MILLISECONDS): AggregateIterable = + apply { + wrapped.maxAwaitTime(maxAwaitTime, timeUnit) + } + + /** + * Sets the bypass document level validation flag. + * + * Note: This only applies when an $out or $merge stage is specified. + * + * @param bypassDocumentValidation If true, allows the write to opt-out of document level validation. + * @return this + * @see [Aggregation command](https://www.mongodb.com/docs/manual/reference/command/aggregate/) + */ + public fun bypassDocumentValidation(bypassDocumentValidation: Boolean?): AggregateIterable = apply { + wrapped.bypassDocumentValidation(bypassDocumentValidation) + } + + /** + * Sets the collation options + * + * A null value represents the server default. + * + * @param collation the collation options to use + * @return this + */ + public fun collation(collation: Collation?): AggregateIterable = apply { wrapped.collation(collation) } + + /** + * Sets the comment for this operation. A null value means no comment is set. + * + * @param comment the comment + * @return this + */ + public fun comment(comment: String?): AggregateIterable = apply { wrapped.comment(comment) } + + /** + * Sets the comment for this operation. A null value means no comment is set. + * + * The comment can be any valid BSON type for server versions 4.4 and above. Server versions between 3.6 and 4.2 + * only support string as comment, and providing a non-string type will result in a server-side error. + * + * @param comment the comment + * @return this + */ + public fun comment(comment: BsonValue?): AggregateIterable = apply { wrapped.comment(comment) } + + /** + * Sets the hint for which index to use. A null value means no hint is set. + * + * @param hint the hint + * @return this + */ + public fun hint(hint: Bson?): AggregateIterable = apply { wrapped.hint(hint) } + + /** + * Sets the hint to apply. + * + * Note: If [AggregateIterable.hint] is set that will be used instead of any hint string. + * + * @param hint the name of the index which should be used for the operation + * @return this + */ + public fun hintString(hint: String?): AggregateIterable = apply { wrapped.hintString(hint) } + + /** + * Add top-level variables to the aggregation. + * + * For MongoDB 5.0+, the aggregate command accepts a `let` option. This option is a document consisting of zero or + * more fields representing variables that are accessible to the aggregation pipeline. The key is the name of the + * variable and the value is a constant in the aggregate expression language. Each parameter name is then usable to + * access the value of the corresponding expression with the "$$" syntax within aggregate expression contexts which + * may require the use of $expr or a pipeline. + * + * @param variables the variables + * @return this + */ + public fun let(variables: Bson?): AggregateIterable = apply { wrapped.let(variables) } + + /** + * Explain the execution plan for this operation with the given verbosity level + * + * @param verbosity the verbosity of the explanation + * @return the execution plan + * @see [Explain command](https://www.mongodb.com/docs/manual/reference/command/explain/) + */ + public fun explain(verbosity: ExplainVerbosity? = null): Document = explain(verbosity) + + /** + * Explain the execution plan for this operation with the given verbosity level + * + * @param R the type of the document class + * @param resultClass the result document type. + * @param verbosity the verbosity of the explanation + * @return the execution plan + * @see [Explain command](https://www.mongodb.com/docs/manual/reference/command/explain/) + */ + public fun explain(resultClass: Class, verbosity: ExplainVerbosity? = null): R = + if (verbosity == null) wrapped.explain(resultClass) else wrapped.explain(resultClass, verbosity) + + /** + * Explain the execution plan for this operation with the given verbosity level + * + * @param R the type of the document class + * @param verbosity the verbosity of the explanation + * @return the execution plan + * @see [Explain command](https://www.mongodb.com/docs/manual/reference/command/explain/) + */ + public inline fun explain(verbosity: ExplainVerbosity? = null): R = + explain(R::class.java, verbosity) +} diff --git a/driver-kotlin-sync/src/main/kotlin/com/mongodb/kotlin/client/ChangeStreamIterable.kt b/driver-kotlin-sync/src/main/kotlin/com/mongodb/kotlin/client/ChangeStreamIterable.kt new file mode 100644 index 00000000000..cf7cc35b0b0 --- /dev/null +++ b/driver-kotlin-sync/src/main/kotlin/com/mongodb/kotlin/client/ChangeStreamIterable.kt @@ -0,0 +1,181 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.kotlin.client + +import com.mongodb.client.ChangeStreamIterable as JChangeStreamIterable +import com.mongodb.client.model.Collation +import com.mongodb.client.model.changestream.ChangeStreamDocument +import com.mongodb.client.model.changestream.FullDocument +import com.mongodb.client.model.changestream.FullDocumentBeforeChange +import java.util.concurrent.TimeUnit +import org.bson.BsonDocument +import org.bson.BsonTimestamp +import org.bson.BsonValue + +/** + * Iterable like implementation for change streams. + * + * Note: the [ChangeStreamDocument] class will not be applicable for all change stream outputs. If using custom + * pipelines that radically change the result, then the [withDocumentClass] method can be used to provide an alternative + * document format. + * + * @param T The type of the result. + */ +public class ChangeStreamIterable(private val wrapped: JChangeStreamIterable) : + MongoIterable>(wrapped) { + + public override fun batchSize(batchSize: Int): ChangeStreamIterable { + super.batchSize(batchSize) + return this + } + + /** + * Returns a cursor used for iterating over elements of type {@code ChangeStreamDocument}. The cursor has a + * covariant return type to additionally provide a method to access the resume token in change stream batches. + * + * @return the change stream cursor + */ + public override fun cursor(): MongoChangeStreamCursor> = + MongoChangeStreamCursorImpl(wrapped.cursor()) + + /** + * Sets the fullDocument value. + * + * @param fullDocument the fullDocument + * @return this + */ + public fun fullDocument(fullDocument: FullDocument): ChangeStreamIterable = apply { + wrapped.fullDocument(fullDocument) + } + + /** + * Sets the fullDocumentBeforeChange value. + * + * @param fullDocumentBeforeChange the fullDocumentBeforeChange + * @return this + */ + public fun fullDocumentBeforeChange(fullDocumentBeforeChange: FullDocumentBeforeChange): ChangeStreamIterable = + apply { + wrapped.fullDocumentBeforeChange(fullDocumentBeforeChange) + } + + /** + * Sets the logical starting point for the new change stream. + * + * @param resumeToken the resume token + * @return this + */ + public fun resumeAfter(resumeToken: BsonDocument): ChangeStreamIterable = apply { + wrapped.resumeAfter(resumeToken) + } + + /** + * Sets the maximum await execution time on the server for this operation. + * + * @param maxAwaitTime the max await time. A zero value will be ignored, and indicates that the driver should + * respect the server's default value + * @param timeUnit the time unit, which defaults to MILLISECONDS + * @return this + */ + public fun maxAwaitTime(maxAwaitTime: Long, timeUnit: TimeUnit = TimeUnit.MILLISECONDS): ChangeStreamIterable = + apply { + wrapped.maxAwaitTime(maxAwaitTime, timeUnit) + } + + /** + * Sets the collation options + * + * A null value represents the server default. + * + * @param collation the collation options to use + * @return this + */ + public fun collation(collation: Collation?): ChangeStreamIterable = apply { wrapped.collation(collation) } + + /** + * Returns a `MongoIterable` containing the results of the change stream based on the document class provided. + * + * @param R the Mongo Iterable type + * @param resultClass the target document type of the iterable. + * @return the new Mongo Iterable + */ + public fun withDocumentClass(resultClass: Class): MongoIterable = + MongoIterable(wrapped.withDocumentClass(resultClass)) + + /** + * Returns a `MongoIterable` containing the results of the change stream based on the document class provided. + * + * @param R the Mongo Iterable type + * @return the new Mongo Iterable + */ + public inline fun withDocumentClass(): MongoIterable = withDocumentClass(R::class.java) + + /** + * The change stream will only provide changes that occurred at or after the specified timestamp. + * + * Any command run against the server will return an operation time that can be used here. + * + * The default value is an operation time obtained from the server before the change stream was created. + * + * @param startAtOperationTime the start at operation time + * @return this + */ + public fun startAtOperationTime(startAtOperationTime: BsonTimestamp): ChangeStreamIterable = apply { + wrapped.startAtOperationTime(startAtOperationTime) + } + + /** + * Similar to `resumeAfter`, this option takes a resume token and starts a new change stream returning the first + * notification after the token. + * + * This will allow users to watch collections that have been dropped and recreated or newly renamed collections + * without missing any notifications. + * + * Note: The server will report an error if both `startAfter` and `resumeAfter` are specified. + * + * @param startAfter the startAfter resumeToken + * @return this + * @see [Start After](https://www.mongodb.com/docs/manual/changeStreams/#change-stream-start-after) + */ + public fun startAfter(startAfter: BsonDocument): ChangeStreamIterable = apply { wrapped.startAfter(startAfter) } + + /** + * Sets the comment for this operation. A null value means no comment is set. + * + * @param comment the comment + */ + public fun comment(comment: String?): ChangeStreamIterable = apply { wrapped.comment(comment) } + /** + * Sets the comment for this operation. A null value means no comment is set. + * + * The comment can be any valid BSON type for server versions 4.4 and above. Server versions between 3.6 and 4.2 + * only support string as comment, and providing a non-string type will result in a server-side error. + * + * @param comment the comment + */ + public fun comment(comment: BsonValue?): ChangeStreamIterable = apply { wrapped.comment(comment) } + + /** + * Sets whether to include expanded change stream events, which are: createIndexes, dropIndexes, modify, create, + * shardCollection, reshardCollection, refineCollectionShardKey. False by default. + * + * @param showExpandedEvents true to include expanded events + * @return this + */ + public fun showExpandedEvents(showExpandedEvents: Boolean): ChangeStreamIterable = apply { + wrapped.showExpandedEvents(showExpandedEvents) + } +} diff --git a/driver-kotlin-sync/src/main/kotlin/com/mongodb/kotlin/client/ClientSession.kt b/driver-kotlin-sync/src/main/kotlin/com/mongodb/kotlin/client/ClientSession.kt new file mode 100644 index 00000000000..9103689b251 --- /dev/null +++ b/driver-kotlin-sync/src/main/kotlin/com/mongodb/kotlin/client/ClientSession.kt @@ -0,0 +1,98 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.kotlin.client + +import com.mongodb.ClientSessionOptions +import com.mongodb.TransactionOptions +import com.mongodb.client.ClientSession as JClientSession +import java.io.Closeable +import java.util.concurrent.TimeUnit + +/** A client session that supports transactions. */ +public class ClientSession(public val wrapped: JClientSession) : Closeable { + + public override fun close(): Unit = wrapped.close() + + /** The options for this session. */ + public val options: ClientSessionOptions + get() = wrapped.options + + /** Returns true if operations in this session must be causally consistent */ + public val isCausallyConsistent: Boolean + get() = wrapped.isCausallyConsistent + + /** Returns true if there is an active transaction on this session, and false otherwise */ + public val hasActiveTransaction: Boolean + get() = wrapped.hasActiveTransaction() + + /** + * Gets the transaction options. + * + * Only call this method of the session has an active transaction + */ + public val transactionOptions: TransactionOptions + get() = wrapped.transactionOptions + + /** + * Start a transaction in the context of this session with default transaction options. A transaction can not be + * started if there is already an active transaction on this session. + */ + public fun startTransaction(): Unit = wrapped.startTransaction() + + /** + * Start a transaction in the context of this session with the given transaction options. A transaction can not be + * started if there is already an active transaction on this session. + * + * @param transactionOptions the options to apply to the transaction + */ + public fun startTransaction(transactionOptions: TransactionOptions): Unit = + wrapped.startTransaction(transactionOptions) + + /** + * Commit a transaction in the context of this session. A transaction can only be commmited if one has first been + * started. + */ + public fun commitTransaction(): Unit = wrapped.commitTransaction() + + /** + * Abort a transaction in the context of this session. + * + * A transaction can only be aborted if one has first been started. + */ + public fun abortTransaction(): Unit = wrapped.abortTransaction() + + /** + * Execute the given function within a transaction. + * + * @param T the return type of the transaction body + * @param transactionBody the body of the transaction + * @param options the transaction options + * @return the return value of the transaction body + */ + public fun withTransaction( + transactionBody: () -> T, + options: TransactionOptions = TransactionOptions.builder().build() + ): T = wrapped.withTransaction(transactionBody, options) +} + +/** + * maxCommitTime extension function + * + * @param maxCommitTime time in milliseconds + * @return the options + */ +public fun TransactionOptions.Builder.maxCommitTime(maxCommitTime: Long): TransactionOptions.Builder = + this.apply { maxCommitTime(maxCommitTime, TimeUnit.MILLISECONDS) } diff --git a/driver-kotlin-sync/src/main/kotlin/com/mongodb/kotlin/client/DistinctIterable.kt b/driver-kotlin-sync/src/main/kotlin/com/mongodb/kotlin/client/DistinctIterable.kt new file mode 100644 index 00000000000..98cfd590104 --- /dev/null +++ b/driver-kotlin-sync/src/main/kotlin/com/mongodb/kotlin/client/DistinctIterable.kt @@ -0,0 +1,121 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.kotlin.client + +import com.mongodb.annotations.Alpha +import com.mongodb.annotations.Reason +import com.mongodb.client.DistinctIterable as JDistinctIterable +import com.mongodb.client.cursor.TimeoutMode +import com.mongodb.client.model.Collation +import com.mongodb.lang.Nullable +import java.util.concurrent.TimeUnit +import org.bson.BsonValue +import org.bson.conversions.Bson + +/** + * Iterable like implementation for distinct operations. + * + * @param T The type of the result. + * @see [Distinct command](https://www.mongodb.com/docs/manual/reference/command/distinct/) + */ +public class DistinctIterable(private val wrapped: JDistinctIterable) : MongoIterable(wrapped) { + + /** + * Sets the number of documents to return per batch. + * + * @param batchSize the batch size + * @return this + * @see [Batch Size](https://www.mongodb.com/docs/manual/reference/method/cursor.batchSize/#cursor.batchSize) + */ + public override fun batchSize(batchSize: Int): DistinctIterable = apply { wrapped.batchSize(batchSize) } + + /** + * Sets the timeoutMode for the cursor. + * + * Requires the `timeout` to be set, either in the [com.mongodb.MongoClientSettings], via [MongoDatabase] or via + * [MongoCollection] + * + * @param timeoutMode the timeout mode + * @return this + * @since 5.2 + */ + @Alpha(Reason.CLIENT) + public fun timeoutMode(timeoutMode: TimeoutMode): DistinctIterable = apply { wrapped.timeoutMode(timeoutMode) } + + /** + * Sets the query filter to apply to the query. + * + * @param filter the filter, which may be null. + * @return this + * @see [Filter results](https://www.mongodb.com/docs/manual/reference/method/db.collection.find/) + */ + public fun filter(filter: Bson?): DistinctIterable = apply { wrapped.filter(filter) } + + /** + * Sets the maximum execution time on the server for this operation. + * + * @param maxTime the max time + * @param timeUnit the time unit, which defaults to Milliseconds + * @return this + */ + public fun maxTime(maxTime: Long, timeUnit: TimeUnit = TimeUnit.MILLISECONDS): DistinctIterable = apply { + wrapped.maxTime(maxTime, timeUnit) + } + + /** + * Sets the collation options + * + * A null value represents the server default. + * + * @param collation the collation options to use + * @return this + */ + public fun collation(collation: Collation?): DistinctIterable = apply { wrapped.collation(collation) } + + /** + * Sets the comment for this operation. A null value means no comment is set. + * + * @param comment the comment + * @return this + */ + public fun comment(comment: String?): DistinctIterable = apply { wrapped.comment(comment) } + + /** + * Sets the comment for this operation. A null value means no comment is set. + * + * @param comment the comment + * @return this + */ + public fun comment(comment: BsonValue?): DistinctIterable = apply { wrapped.comment(comment) } + + /** + * Sets the hint for which index to use. A null value means no hint is set. + * + * @param hint the hint + * @return this + */ + public fun hint(@Nullable hint: Bson?): DistinctIterable = apply { wrapped.hint(hint) } + + /** + * Sets the hint for which index to use. A null value means no hint is set. + * + * Note: If [DistinctFlow.hint] is set that will be used instead of any hint string. + * + * @param hint the name of the index which should be used for the operation + * @return this + */ + public fun hintString(@Nullable hint: String?): DistinctIterable = apply { wrapped.hintString(hint) } +} diff --git a/driver-kotlin-sync/src/main/kotlin/com/mongodb/kotlin/client/FindIterable.kt b/driver-kotlin-sync/src/main/kotlin/com/mongodb/kotlin/client/FindIterable.kt new file mode 100644 index 00000000000..81e1bb51864 --- /dev/null +++ b/driver-kotlin-sync/src/main/kotlin/com/mongodb/kotlin/client/FindIterable.kt @@ -0,0 +1,297 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.kotlin.client + +import com.mongodb.CursorType +import com.mongodb.ExplainVerbosity +import com.mongodb.annotations.Alpha +import com.mongodb.annotations.Reason +import com.mongodb.client.FindIterable as JFindIterable +import com.mongodb.client.cursor.TimeoutMode +import com.mongodb.client.model.Collation +import java.util.concurrent.TimeUnit +import org.bson.BsonValue +import org.bson.Document +import org.bson.conversions.Bson + +/** + * Iterable like implementation for find operations. + * + * @param T The type of the result. + * @see [Collection filter](https://www.mongodb.com/docs/manual/reference/method/db.collection.find/) + */ +public class FindIterable(private val wrapped: JFindIterable) : MongoIterable(wrapped) { + + public override fun batchSize(batchSize: Int): FindIterable { + super.batchSize(batchSize) + return this + } + + /** + * Sets the timeoutMode for the cursor. + * + * Requires the `timeout` to be set, either in the [com.mongodb.MongoClientSettings], via [MongoDatabase] or via + * [MongoCollection] + * + * If the `timeout` is set then: + * * For non-tailable cursors, the default value of timeoutMode is [TimeoutMode.CURSOR_LIFETIME] + * * For tailable cursors, the default value of timeoutMode is [TimeoutMode.ITERATION] and its an error to configure + * it as: [TimeoutMode.CURSOR_LIFETIME] + * + * @param timeoutMode the timeout mode + * @return this + * @since 5.2 + */ + @Alpha(Reason.CLIENT) + public fun timeoutMode(timeoutMode: TimeoutMode): FindIterable { + wrapped.timeoutMode(timeoutMode) + return this + } + + /** + * Sets the query filter to apply to the query. + * + * @param filter the filter. + * @return this + * @see [Collection filter](https://www.mongodb.com/docs/manual/reference/method/db.collection.find/) + */ + public fun filter(filter: Bson?): FindIterable = apply { wrapped.filter(filter) } + + /** + * Sets the limit to apply. + * + * @param limit the limit, which may be 0 + * @return this + * @see [Cursor limit](https://www.mongodb.com/docs/manual/reference/method/cursor.limit/#cursor.limit) + */ + public fun limit(limit: Int): FindIterable = apply { wrapped.limit(limit) } + + /** + * Sets the number of documents to skip. + * + * @param skip the number of documents to skip + * @return this + * @see [Cursor skip](https://www.mongodb.com/docs/manual/reference/method/cursor.skip/#cursor.skip) + */ + public fun skip(skip: Int): FindIterable = apply { wrapped.skip(skip) } + + /** + * Sets the maximum execution time on the server for this operation. + * + * @param maxTime the max time + * @param timeUnit the time unit, which defaults to Milliseconds + * @return this + */ + public fun maxTime(maxTime: Long, timeUnit: TimeUnit = TimeUnit.MILLISECONDS): FindIterable = apply { + wrapped.maxTime(maxTime, timeUnit) + } + + /** + * The maximum amount of time for the server to wait on new documents to satisfy a tailable cursor query. This only + * applies to a TAILABLE_AWAIT cursor. When the cursor is not a TAILABLE_AWAIT cursor, this option is ignored. + * + * On servers >= 3.2, this option will be specified on the getMore command as "maxTimeMS". The default is no value: + * no "maxTimeMS" is sent to the server with the getMore command. + * + * On servers < 3.2, this option is ignored, and indicates that the driver should respect the server's default value + * + * A zero value will be ignored. + * + * @param maxAwaitTime the max await time + * @param timeUnit the time unit to return results in, which defaults to Milliseconds + * @return the maximum await execution time in the given time unit + * @see [Max Time](https://www.mongodb.com/docs/manual/reference/method/cursor.maxTimeMS/#cursor.maxTimeMS) + */ + public fun maxAwaitTime(maxAwaitTime: Long, timeUnit: TimeUnit = TimeUnit.MILLISECONDS): FindIterable = apply { + wrapped.maxAwaitTime(maxAwaitTime, timeUnit) + } + + /** + * Sets a document describing the fields to return for all matching documents. + * + * @param projection the project document. + * @return this + */ + public fun projection(projection: Bson?): FindIterable = apply { wrapped.projection(projection) } + + /** + * Sets the sort criteria to apply to the query. + * + * @param sort the sort criteria. + * @return this + * @see [Cursor sort](https://www.mongodb.com/docs/manual/reference/method/cursor.sort/) + */ + public fun sort(sort: Bson?): FindIterable = apply { wrapped.sort(sort) } + + /** + * The server normally times out idle cursors after an inactivity period (10 minutes) to prevent excess memory use. + * Set this option to prevent that. + * + * @param noCursorTimeout true if cursor timeout is disabled + * @return this + */ + public fun noCursorTimeout(noCursorTimeout: Boolean): FindIterable = apply { + wrapped.noCursorTimeout(noCursorTimeout) + } + + /** + * Get partial results from a sharded cluster if one or more shards are unreachable (instead of throwing an error). + * + * @param partial if partial results for sharded clusters is enabled + * @return this + */ + public fun partial(partial: Boolean): FindIterable = apply { wrapped.partial(partial) } + + /** + * Sets the cursor type. + * + * @param cursorType the cursor type + * @return this + */ + public fun cursorType(cursorType: CursorType): FindIterable = apply { wrapped.cursorType(cursorType) } + + /** + * Sets the collation options + * + * A null value represents the server default. + * + * @param collation the collation options to use + * @return this + */ + public fun collation(collation: Collation?): FindIterable = apply { wrapped.collation(collation) } + + /** + * Sets the comment for this operation. A null value means no comment is set. + * + * @param comment the comment + * @return this + */ + public fun comment(comment: String?): FindIterable = apply { wrapped.comment(comment) } + + /** + * Sets the comment for this operation. A null value means no comment is set. + * + * The comment can be any valid BSON type for server versions 4.4 and above. Server versions between 3.6 and 4.2 + * only support string as comment, and providing a non-string type will result in a server-side error. + * + * @param comment the comment + * @return this + */ + public fun comment(comment: BsonValue?): FindIterable = apply { wrapped.comment(comment) } + + /** + * Sets the hint for which index to use. A null value means no hint is set. + * + * @param hint the hint + * @return this + */ + public fun hint(hint: Bson?): FindIterable = apply { wrapped.hint(hint) } + + /** + * Sets the hint to apply. + * + * Note: If [FindIterable.hint] is set that will be used instead of any hint string. + * + * @param hint the name of the index which should be used for the operation + * @return this + */ + public fun hintString(hint: String?): FindIterable = apply { wrapped.hintString(hint) } + + /** + * Add top-level variables to the operation. A null value means no variables are set. + * + * Allows for improved command readability by separating the variables from the query text. + * + * @param variables for find operation + * @return this + */ + public fun let(variables: Bson?): FindIterable = apply { wrapped.let(variables) } + + /** + * Sets the exclusive upper bound for a specific index. A null value means no max is set. + * + * @param max the max + * @return this + */ + public fun max(max: Bson?): FindIterable = apply { wrapped.max(max) } + + /** + * Sets the minimum inclusive lower bound for a specific index. A null value means no max is set. + * + * @param min the min + * @return this + */ + public fun min(min: Bson?): FindIterable = apply { wrapped.min(min) } + + /** + * Sets the returnKey. If true the find operation will return only the index keys in the resulting documents. + * + * @param returnKey the returnKey + * @return this + */ + public fun returnKey(returnKey: Boolean): FindIterable = apply { wrapped.returnKey(returnKey) } + + /** + * Sets the showRecordId. Set to true to add a field `$recordId` to the returned documents. + * + * @param showRecordId the showRecordId + * @return this + */ + public fun showRecordId(showRecordId: Boolean): FindIterable = apply { wrapped.showRecordId(showRecordId) } + + /** + * Enables writing to temporary files on the server. When set to true, the server can write temporary data to disk + * while executing the find operation. + * + * This option is sent only if the caller explicitly sets it to true. + * + * @param allowDiskUse the allowDiskUse + * @return this + */ + public fun allowDiskUse(allowDiskUse: Boolean?): FindIterable = apply { wrapped.allowDiskUse(allowDiskUse) } + + /** + * Explain the execution plan for this operation with the given verbosity level + * + * @param verbosity the verbosity of the explanation + * @return the execution plan + * @see [Explain command](https://www.mongodb.com/docs/manual/reference/command/explain/) + */ + public fun explain(verbosity: ExplainVerbosity? = null): Document = explain(verbosity) + + /** + * Explain the execution plan for this operation with the given verbosity level + * + * @param R the type of the document class + * @param resultClass the result document type. + * @param verbosity the verbosity of the explanation + * @return the execution plan + * @see [Explain command](https://www.mongodb.com/docs/manual/reference/command/explain/) + */ + public fun explain(resultClass: Class, verbosity: ExplainVerbosity? = null): R = + if (verbosity == null) wrapped.explain(resultClass) else wrapped.explain(resultClass, verbosity) + + /** + * Explain the execution plan for this operation with the given verbosity level + * + * @param R the type of the document class + * @param verbosity the verbosity of the explanation + * @return the execution plan + * @see [Explain command](https://www.mongodb.com/docs/manual/reference/command/explain/) + */ + public inline fun explain(verbosity: ExplainVerbosity? = null): R = + explain(R::class.java, verbosity) +} diff --git a/driver-kotlin-sync/src/main/kotlin/com/mongodb/kotlin/client/ListCollectionNamesIterable.kt b/driver-kotlin-sync/src/main/kotlin/com/mongodb/kotlin/client/ListCollectionNamesIterable.kt new file mode 100644 index 00000000000..33053dfc876 --- /dev/null +++ b/driver-kotlin-sync/src/main/kotlin/com/mongodb/kotlin/client/ListCollectionNamesIterable.kt @@ -0,0 +1,88 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.kotlin.client + +import com.mongodb.client.ListCollectionNamesIterable as JListCollectionNamesIterable +import java.util.concurrent.TimeUnit +import org.bson.BsonValue +import org.bson.conversions.Bson + +/** + * Iterable for listing collection names. + * + * @see [List collections](https://www.mongodb.com/docs/manual/reference/command/listCollections/) + * @since 5.0 + */ +public class ListCollectionNamesIterable(private val wrapped: JListCollectionNamesIterable) : + MongoIterable(wrapped) { + /** + * Sets the maximum execution time on the server for this operation. + * + * @param maxTime the max time + * @param timeUnit the time unit, defaults to Milliseconds + * @return this + * @see [Max Time](https://www.mongodb.com/docs/manual/reference/operator/meta/maxTimeMS/) + */ + public fun maxTime(maxTime: Long, timeUnit: TimeUnit = TimeUnit.MILLISECONDS): ListCollectionNamesIterable = apply { + wrapped.maxTime(maxTime, timeUnit) + } + + /** + * Sets the number of documents to return per batch. + * + * @param batchSize the batch size + * @return this + * @see [Batch Size](https://www.mongodb.com/docs/manual/reference/method/cursor.batchSize/#cursor.batchSize) + */ + public override fun batchSize(batchSize: Int): ListCollectionNamesIterable = apply { wrapped.batchSize(batchSize) } + + /** + * Sets the query filter to apply to the returned database names. + * + * @param filter the filter, which may be null. + * @return this + */ + public fun filter(filter: Bson?): ListCollectionNamesIterable = apply { wrapped.filter(filter) } + + /** + * Sets the comment for this operation. A null value means no comment is set. + * + * @param comment the comment + * @return this + */ + public fun comment(comment: String?): ListCollectionNamesIterable = apply { wrapped.comment(comment) } + + /** + * Sets the comment for this operation. A null value means no comment is set. + * + * @param comment the comment + * @return this + */ + public fun comment(comment: BsonValue?): ListCollectionNamesIterable = apply { wrapped.comment(comment) } + + /** + * Sets the `authorizedCollections` field of the `listCollections` command. + * + * @param authorizedCollections If `true`, allows executing the `listCollections` command, which has the `nameOnly` + * field set to `true`, without having the + * [`listCollections` privilege](https://docs.mongodb.com/manual/reference/privilege-actions/#mongodb-authaction-listCollections) + * on the database resource. + * @return `this`. + */ + public fun authorizedCollections(authorizedCollections: Boolean): ListCollectionNamesIterable = apply { + wrapped.authorizedCollections(authorizedCollections) + } +} diff --git a/driver-kotlin-sync/src/main/kotlin/com/mongodb/kotlin/client/ListCollectionsIterable.kt b/driver-kotlin-sync/src/main/kotlin/com/mongodb/kotlin/client/ListCollectionsIterable.kt new file mode 100644 index 00000000000..43b2a9ba510 --- /dev/null +++ b/driver-kotlin-sync/src/main/kotlin/com/mongodb/kotlin/client/ListCollectionsIterable.kt @@ -0,0 +1,91 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.kotlin.client + +import com.mongodb.annotations.Alpha +import com.mongodb.annotations.Reason +import com.mongodb.client.ListCollectionsIterable as JListCollectionsIterable +import com.mongodb.client.cursor.TimeoutMode +import java.util.concurrent.TimeUnit +import org.bson.BsonValue +import org.bson.conversions.Bson + +/** + * Iterable like implementation for list collection operations. + * + * @param T The type of the result. + * @see [List collections](https://www.mongodb.com/docs/manual/reference/command/listCollections/) + */ +public class ListCollectionsIterable(private val wrapped: JListCollectionsIterable) : + MongoIterable(wrapped) { + + public override fun batchSize(batchSize: Int): ListCollectionsIterable { + super.batchSize(batchSize) + return this + } + + /** + * Sets the timeoutMode for the cursor. + * + * Requires the `timeout` to be set, either in the [com.mongodb.MongoClientSettings], via [MongoDatabase] or via + * [MongoCollection] + * + * @param timeoutMode the timeout mode + * @return this + * @since 5.2 + */ + @Alpha(Reason.CLIENT) + public fun timeoutMode(timeoutMode: TimeoutMode): ListCollectionsIterable { + wrapped.timeoutMode(timeoutMode) + return this + } + + /** + * Sets the maximum execution time on the server for this operation. + * + * @param maxTime the max time + * @param timeUnit the time unit, defaults to Milliseconds + * @return this + * @see [Max Time](https://www.mongodb.com/docs/manual/reference/operator/meta/maxTimeMS/) + */ + public fun maxTime(maxTime: Long, timeUnit: TimeUnit = TimeUnit.MILLISECONDS): ListCollectionsIterable = apply { + wrapped.maxTime(maxTime, timeUnit) + } + + /** + * Sets the query filter to apply to the returned database names. + * + * @param filter the filter, which may be null. + * @return this + */ + public fun filter(filter: Bson?): ListCollectionsIterable = apply { wrapped.filter(filter) } + + /** + * Sets the comment for this operation. A null value means no comment is set. + * + * @param comment the comment + * @return this + */ + public fun comment(comment: String?): ListCollectionsIterable = apply { wrapped.comment(comment) } + + /** + * Sets the comment for this operation. A null value means no comment is set. + * + * @param comment the comment + * @return this + */ + public fun comment(comment: BsonValue?): ListCollectionsIterable = apply { wrapped.comment(comment) } +} diff --git a/driver-kotlin-sync/src/main/kotlin/com/mongodb/kotlin/client/ListDatabasesIterable.kt b/driver-kotlin-sync/src/main/kotlin/com/mongodb/kotlin/client/ListDatabasesIterable.kt new file mode 100644 index 00000000000..dd9e1e0bcc8 --- /dev/null +++ b/driver-kotlin-sync/src/main/kotlin/com/mongodb/kotlin/client/ListDatabasesIterable.kt @@ -0,0 +1,110 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.kotlin.client + +import com.mongodb.annotations.Alpha +import com.mongodb.annotations.Reason +import com.mongodb.client.ListDatabasesIterable as JListDatabasesIterable +import com.mongodb.client.cursor.TimeoutMode +import java.util.concurrent.TimeUnit +import org.bson.BsonValue +import org.bson.conversions.Bson + +/** + * Iterable like implementation for list database operations. + * + * @param T The type of the result. + * @see [List databases](https://www.mongodb.com/docs/manual/reference/command/listDatabases/) + */ +public class ListDatabasesIterable(private val wrapped: JListDatabasesIterable) : + MongoIterable(wrapped) { + + public override fun batchSize(batchSize: Int): ListDatabasesIterable { + super.batchSize(batchSize) + return this + } + + /** + * Sets the timeoutMode for the cursor. + * + * Requires the `timeout` to be set, either in the [com.mongodb.MongoClientSettings], via [MongoDatabase] or via + * [MongoCollection] + * + * @param timeoutMode the timeout mode + * @return this + * @since 5.2 + */ + @Alpha(Reason.CLIENT) + public fun timeoutMode(timeoutMode: TimeoutMode): ListDatabasesIterable { + wrapped.timeoutMode(timeoutMode) + return this + } + + /** + * Sets the maximum execution time on the server for this operation. + * + * @param maxTime the max time + * @param timeUnit the time unit, defaults to Milliseconds + * @return this + * @see [Max Time](https://www.mongodb.com/docs/manual/reference/operator/meta/maxTimeMS/) + */ + public fun maxTime(maxTime: Long, timeUnit: TimeUnit = TimeUnit.MILLISECONDS): ListDatabasesIterable = apply { + wrapped.maxTime(maxTime, timeUnit) + } + + /** + * Sets the query filter to apply to the returned database names. + * + * @param filter the filter, which may be null. + * @return this + */ + public fun filter(filter: Bson?): ListDatabasesIterable = apply { wrapped.filter(filter) } + /** + * Sets the nameOnly flag that indicates whether the command should return just the database names or return the + * database names and size information. + * + * @param nameOnly the nameOnly flag, which may be null + * @return this + */ + public fun nameOnly(nameOnly: Boolean?): ListDatabasesIterable = apply { wrapped.nameOnly(nameOnly) } + + /** + * Sets the authorizedDatabasesOnly flag that indicates whether the command should return just the databases which + * the user is authorized to see. + * + * @param authorizedDatabasesOnly the authorizedDatabasesOnly flag, which may be null + * @return this + */ + public fun authorizedDatabasesOnly(authorizedDatabasesOnly: Boolean?): ListDatabasesIterable = apply { + wrapped.authorizedDatabasesOnly(authorizedDatabasesOnly) + } + + /** + * Sets the comment for this operation. A null value means no comment is set. + * + * @param comment the comment + * @return this + */ + public fun comment(comment: String?): ListDatabasesIterable = apply { wrapped.comment(comment) } + + /** + * Sets the comment for this operation. A null value means no comment is set. + * + * @param comment the comment + * @return this + */ + public fun comment(comment: BsonValue?): ListDatabasesIterable = apply { wrapped.comment(comment) } +} diff --git a/driver-kotlin-sync/src/main/kotlin/com/mongodb/kotlin/client/ListIndexesIterable.kt b/driver-kotlin-sync/src/main/kotlin/com/mongodb/kotlin/client/ListIndexesIterable.kt new file mode 100644 index 00000000000..cc4449384b8 --- /dev/null +++ b/driver-kotlin-sync/src/main/kotlin/com/mongodb/kotlin/client/ListIndexesIterable.kt @@ -0,0 +1,81 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.kotlin.client + +import com.mongodb.annotations.Alpha +import com.mongodb.annotations.Reason +import com.mongodb.client.ListIndexesIterable as JListIndexesIterable +import com.mongodb.client.cursor.TimeoutMode +import java.util.concurrent.TimeUnit +import org.bson.BsonValue + +/** + * Iterable like implementation for list index operations. + * + * @param T The type of the result. + * @see [List indexes](https://www.mongodb.com/docs/manual/reference/command/listIndexes/) + */ +public class ListIndexesIterable(private val wrapped: JListIndexesIterable) : MongoIterable(wrapped) { + + public override fun batchSize(batchSize: Int): ListIndexesIterable { + super.batchSize(batchSize) + return this + } + + /** + * Sets the timeoutMode for the cursor. + * + * Requires the `timeout` to be set, either in the [com.mongodb.MongoClientSettings], via [MongoDatabase] or via + * [MongoCollection] + * + * @param timeoutMode the timeout mode + * @return this + * @since 5.2 + */ + @Alpha(Reason.CLIENT) + public fun timeoutMode(timeoutMode: TimeoutMode): ListIndexesIterable { + wrapped.timeoutMode(timeoutMode) + return this + } + + /** + * Sets the maximum execution time on the server for this operation. + * + * @param maxTime the max time + * @param timeUnit the time unit, defaults to Milliseconds + * @return this + * @see [Max Time](https://www.mongodb.com/docs/manual/reference/operator/meta/maxTimeMS/) + */ + public fun maxTime(maxTime: Long, timeUnit: TimeUnit = TimeUnit.MILLISECONDS): ListIndexesIterable = apply { + wrapped.maxTime(maxTime, timeUnit) + } + + /** + * Sets the comment for this operation. A null value means no comment is set. + * + * @param comment the comment + * @return this + */ + public fun comment(comment: String?): ListIndexesIterable = apply { wrapped.comment(comment) } + + /** + * Sets the comment for this operation. A null value means no comment is set. + * + * @param comment the comment + * @return this + */ + public fun comment(comment: BsonValue?): ListIndexesIterable = apply { wrapped.comment(comment) } +} diff --git a/driver-kotlin-sync/src/main/kotlin/com/mongodb/kotlin/client/ListSearchIndexesIterable.kt b/driver-kotlin-sync/src/main/kotlin/com/mongodb/kotlin/client/ListSearchIndexesIterable.kt new file mode 100644 index 00000000000..aa0dc1664bd --- /dev/null +++ b/driver-kotlin-sync/src/main/kotlin/com/mongodb/kotlin/client/ListSearchIndexesIterable.kt @@ -0,0 +1,147 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.kotlin.client + +import com.mongodb.ExplainVerbosity +import com.mongodb.annotations.Alpha +import com.mongodb.annotations.Reason +import com.mongodb.client.ListSearchIndexesIterable as JListSearchIndexesIterable +import com.mongodb.client.cursor.TimeoutMode +import com.mongodb.client.model.Collation +import java.util.concurrent.TimeUnit +import org.bson.BsonValue +import org.bson.Document + +/** + * Iterable like implementation for list Atlas Search index operations. + * + * @param T The type of the result. + * @see [List indexes](https://www.mongodb.com/docs/manual/reference/command/listIndexes/) + */ +public class ListSearchIndexesIterable(private val wrapped: JListSearchIndexesIterable) : + MongoIterable(wrapped) { + + public override fun batchSize(batchSize: Int): ListSearchIndexesIterable { + super.batchSize(batchSize) + return this + } + + /** + * Sets the timeoutMode for the cursor. + * + * Requires the `timeout` to be set, either in the [com.mongodb.MongoClientSettings], via [MongoDatabase] or via + * [MongoCollection] + * + * @param timeoutMode the timeout mode + * @return this + * @since 5.2 + */ + @Alpha(Reason.CLIENT) + public fun timeoutMode(timeoutMode: TimeoutMode): ListSearchIndexesIterable { + wrapped.timeoutMode(timeoutMode) + return this + } + + /** + * Sets an Atlas Search index name for this operation. + * + * @param indexName Atlas Search index name. + * @return this. + */ + public fun name(indexName: String): ListSearchIndexesIterable = apply { wrapped.name(indexName) } + + /** + * Enables writing to temporary files. A null value indicates that it's unspecified. + * + * @param allowDiskUse true if writing to temporary files is enabled. + * @return this. + * @see [Aggregation command](https://www.mongodb.com/docs/manual/reference/command/aggregate/) + */ + public fun allowDiskUse(allowDiskUse: Boolean?): ListSearchIndexesIterable = apply { + wrapped.allowDiskUse(allowDiskUse) + } + + /** + * Sets the maximum execution time on the server for this operation. + * + * @param maxTime the max time. + * @param timeUnit the time unit, defaults to Milliseconds. + * @return this. + * @see [Max Time](https://www.mongodb.com/docs/manual/reference/method/cursor.maxTimeMS/#cursor.maxTimeMS) + */ + public fun maxTime(maxTime: Long, timeUnit: TimeUnit = TimeUnit.MILLISECONDS): ListSearchIndexesIterable = + apply { + wrapped.maxTime(maxTime, timeUnit) + } + + /** + * Sets the collation options. + * + * A null value represents the server default. + * + * @param collation the collation options to use. + * @return this. + */ + public fun collation(collation: Collation?): ListSearchIndexesIterable = apply { wrapped.collation(collation) } + + /** + * Sets the comment for this operation. A null value means no comment is set. + * + * @param comment the comment. + * @return this. + */ + public fun comment(comment: String?): ListSearchIndexesIterable = apply { wrapped.comment(comment) } + + /** + * Sets the comment for this operation. A null value means no comment is set. + * + * @param comment the comment. + * @return this. + */ + public fun comment(comment: BsonValue?): ListSearchIndexesIterable = apply { wrapped.comment(comment) } + + /** + * Explain the execution plan for this operation with the given verbosity level. + * + * @param verbosity the verbosity of the explanation. + * @return the execution plan. + * @see [Explain command](https://www.mongodb.com/docs/manual/reference/command/explain/) + */ + public fun explain(verbosity: ExplainVerbosity? = null): Document = explain(verbosity) + + /** + * Explain the execution plan for this operation with the given verbosity level. + * + * @param R the type of the document class. + * @param resultClass the result document type. + * @param verbosity the verbosity of the explanation. + * @return the execution plan. + * @see [Explain command](https://www.mongodb.com/docs/manual/reference/command/explain/) + */ + public fun explain(resultClass: Class, verbosity: ExplainVerbosity? = null): R = + if (verbosity == null) wrapped.explain(resultClass) else wrapped.explain(resultClass, verbosity) + + /** + * Explain the execution plan for this operation with the given verbosity level. + * + * @param R the type of the document class. + * @param verbosity the verbosity of the explanation. + * @return the execution plan. + * @see [Explain command](https://www.mongodb.com/docs/manual/reference/command/explain/) + */ + public inline fun explain(verbosity: ExplainVerbosity? = null): R = + explain(R::class.java, verbosity) +} diff --git a/driver-kotlin-sync/src/main/kotlin/com/mongodb/kotlin/client/MongoClient.kt b/driver-kotlin-sync/src/main/kotlin/com/mongodb/kotlin/client/MongoClient.kt new file mode 100644 index 00000000000..c71e59520b6 --- /dev/null +++ b/driver-kotlin-sync/src/main/kotlin/com/mongodb/kotlin/client/MongoClient.kt @@ -0,0 +1,136 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.kotlin.client + +import com.mongodb.ClientSessionOptions +import com.mongodb.ConnectionString +import com.mongodb.MongoClientSettings +import com.mongodb.MongoDriverInformation +import com.mongodb.client.MongoClient as JMongoClient +import com.mongodb.client.MongoClients as JMongoClients +import com.mongodb.connection.ClusterDescription +import java.io.Closeable +import java.util.concurrent.TimeUnit + +/** + * A client-side representation of a MongoDB cluster. + * + * Instances can represent either a standalone MongoDB instance, a replica set, or a sharded cluster. Instance of this + * class are responsible for maintaining an up-to-date state of the cluster, and possibly cache resources related to + * this, including background threads for monitoring, and connection pools. + * + * Instances of this class serve as factories for [MongoDatabase] instances. Instances of this class can be created via + * the [MongoClient.create] helpers + * + * @see MongoClient.create + */ +public class MongoClient(private val wrapped: JMongoClient) : MongoCluster(wrapped), Closeable { + + /** + * A factory for [MongoClient] instances. + * + * @see MongoClient + */ + public companion object Factory { + /** + * Create a new client with the given connection string as if by a call to [create]. + * + * @param connectionString the connection + * @return the client + */ + public fun create(connectionString: String): MongoClient = create(ConnectionString(connectionString)) + + /** + * Create a new client with the given connection string. + * + * @param connectionString the connection string, defaults to `mongodb://localhost`. + * @param mongoDriverInformation any driver information to associate with the MongoClient + * @return the client + */ + public fun create( + connectionString: ConnectionString = ConnectionString("mongodb://localhost"), + mongoDriverInformation: MongoDriverInformation? = null + ): MongoClient { + return create( + MongoClientSettings.builder().applyConnectionString(connectionString).build(), mongoDriverInformation) + } + + /** + * Create a new client with the given connection string. + * + * For each of the settings classed configurable via [MongoClientSettings], the connection string is applied by + * calling the `applyConnectionString` method on an instance of setting's builder class, building the setting, + * and adding it to an instance of [com.mongodb.MongoClientSettings.Builder]. + * + * @param settings the client settings + * @param mongoDriverInformation any driver information to associate with the MongoClient + * @return + */ + public fun create( + settings: MongoClientSettings, + mongoDriverInformation: MongoDriverInformation? = null + ): MongoClient { + val builder = + if (mongoDriverInformation == null) MongoDriverInformation.builder() + else MongoDriverInformation.builder(mongoDriverInformation) + return MongoClient( + JMongoClients.create( + settings, builder.driverName("kotlin").driverPlatform("kotlin/${KotlinVersion.CURRENT}").build())) + } + } + + public override fun close(): Unit = wrapped.close() + + /** + * Gets the current cluster description. + * + * This method will not block, meaning that it may return a [ClusterDescription] whose `clusterType` is unknown and + * whose [com.mongodb.connection.ServerDescription]s are all in the connecting state. If the application requires + * notifications after the driver has connected to a member of the cluster, it should register a + * [com.mongodb.event.ClusterListener] via the [com.mongodb.connection.ClusterSettings] in + * [com.mongodb.MongoClientSettings]. + * + * @return the current cluster description + * @see com.mongodb.connection.ClusterSettings.Builder.addClusterListener + * @see com.mongodb.MongoClientSettings.Builder.applyToClusterSettings + */ + public val clusterDescription: ClusterDescription + get() = wrapped.clusterDescription + + /** + * Appends the provided [MongoDriverInformation] to the existing metadata. + * + * This enables frameworks and libraries to include identifying metadata (e.g., name, version, platform) which might + * be visible in the MongoD/MongoS logs. This can assist with diagnostics by making client identity visible to the + * server. + * + * **Note:** Metadata is limited to 512 bytes; any excess will be truncated. + * + * @param mongoDriverInformation the driver information to append to the existing metadata + * @since 5.6 + */ + public fun appendMetadata(mongoDriverInformation: MongoDriverInformation): Unit = + wrapped.appendMetadata(mongoDriverInformation) +} + +/** + * ClientSessionOptions.Builder.defaultTimeout extension function + * + * @param defaultTimeout time in milliseconds + * @return the options + */ +public fun ClientSessionOptions.Builder.defaultTimeout(defaultTimeout: Long): ClientSessionOptions.Builder = + this.apply { defaultTimeout(defaultTimeout, TimeUnit.MILLISECONDS) } diff --git a/driver-kotlin-sync/src/main/kotlin/com/mongodb/kotlin/client/MongoCluster.kt b/driver-kotlin-sync/src/main/kotlin/com/mongodb/kotlin/client/MongoCluster.kt new file mode 100644 index 00000000000..1961989aaa2 --- /dev/null +++ b/driver-kotlin-sync/src/main/kotlin/com/mongodb/kotlin/client/MongoCluster.kt @@ -0,0 +1,412 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.kotlin.client + +import com.mongodb.ClientBulkWriteException +import com.mongodb.ClientSessionOptions +import com.mongodb.MongoClientSettings +import com.mongodb.MongoException +import com.mongodb.ReadConcern +import com.mongodb.ReadPreference +import com.mongodb.WriteConcern +import com.mongodb.annotations.Alpha +import com.mongodb.annotations.Reason +import com.mongodb.client.MongoCluster as JMongoCluster +import com.mongodb.client.model.bulk.ClientBulkWriteOptions +import com.mongodb.client.model.bulk.ClientBulkWriteResult +import com.mongodb.client.model.bulk.ClientNamespacedDeleteManyModel +import com.mongodb.client.model.bulk.ClientNamespacedUpdateManyModel +import com.mongodb.client.model.bulk.ClientNamespacedWriteModel +import java.util.concurrent.TimeUnit +import org.bson.Document +import org.bson.codecs.configuration.CodecRegistry +import org.bson.conversions.Bson + +/** + * The client-side representation of a MongoDB cluster operations. + * + * The originating [MongoClient] is responsible for the closing of resources. If the originator [MongoClient] is closed, + * then any operations will fail. + * + * @see MongoClient + * @since 5.2 + */ +public open class MongoCluster protected constructor(private val wrapped: JMongoCluster) { + + /** The codec registry. */ + public val codecRegistry: CodecRegistry + get() = wrapped.codecRegistry + + /** The read concern. */ + public val readConcern: ReadConcern + get() = wrapped.readConcern + + /** The read preference. */ + public val readPreference: ReadPreference + get() = wrapped.readPreference + + /** The write concern. */ + public val writeConcern: WriteConcern + get() = wrapped.writeConcern + + /** + * The time limit for the full execution of an operation. + * + * If not null the following deprecated options will be ignored: `waitQueueTimeoutMS`, `socketTimeoutMS`, + * `wTimeoutMS`, `maxTimeMS` and `maxCommitTimeMS`. + * - `null` means that the timeout mechanism for operations will defer to using: + * - `waitQueueTimeoutMS`: The maximum wait time in milliseconds that a thread may wait for a connection to + * become available + * - `socketTimeoutMS`: How long a send or receive on a socket can take before timing out. + * - `wTimeoutMS`: How long the server will wait for the write concern to be fulfilled before timing out. + * - `maxTimeMS`: The time limit for processing operations on a cursor. See: + * [cursor.maxTimeMS](https://docs.mongodb.com/manual/reference/method/cursor.maxTimeMS"). + * - `maxCommitTimeMS`: The maximum amount of time to allow a single `commitTransaction` command to execute. + * - `0` means infinite timeout. + * - `> 0` The time limit to use for the full execution of an operation. + * + * @return the optional timeout duration + */ + @Alpha(Reason.CLIENT) + public fun timeout(timeUnit: TimeUnit = TimeUnit.MILLISECONDS): Long? = wrapped.getTimeout(timeUnit) + + /** + * Create a new MongoCluster instance with a different codec registry. + * + * The [CodecRegistry] configured by this method is effectively treated by the driver as an instance of + * [org.bson.codecs.configuration.CodecProvider], which [CodecRegistry] extends. So there is no benefit to defining + * a class that implements [CodecRegistry]. Rather, an application should always create [CodecRegistry] instances + * using the factory methods in [org.bson.codecs.configuration.CodecRegistries]. + * + * @param newCodecRegistry the new [org.bson.codecs.configuration.CodecRegistry] for the database + * @return a new MongoCluster instance with the different codec registry + * @see org.bson.codecs.configuration.CodecRegistries + */ + public fun withCodecRegistry(newCodecRegistry: CodecRegistry): MongoCluster = + MongoCluster(wrapped.withCodecRegistry(newCodecRegistry)) + + /** + * Create a new MongoCluster instance with a different read preference. + * + * @param newReadPreference the new [ReadPreference] for the database + * @return a new MongoCluster instance with the different readPreference + */ + public fun withReadPreference(newReadPreference: ReadPreference): MongoCluster = + MongoCluster(wrapped.withReadPreference(newReadPreference)) + + /** + * Create a new MongoCluster instance with a different read concern. + * + * @param newReadConcern the new [ReadConcern] for the database + * @return a new MongoCluster instance with the different ReadConcern + * @see [Read Concern](https://www.mongodb.com/docs/manual/reference/readConcern/) + */ + public fun withReadConcern(newReadConcern: ReadConcern): MongoCluster = + MongoCluster(wrapped.withReadConcern(newReadConcern)) + + /** + * Create a new MongoCluster instance with a different write concern. + * + * @param newWriteConcern the new [WriteConcern] for the database + * @return a new MongoCluster instance with the different writeConcern + */ + public fun withWriteConcern(newWriteConcern: WriteConcern): MongoCluster = + MongoCluster(wrapped.withWriteConcern(newWriteConcern)) + + /** + * Create a new MongoCluster instance with the set time limit for the full execution of an operation. + * - `0` means an infinite timeout + * - `> 0` The time limit to use for the full execution of an operation. + * + * @param timeout the timeout, which must be greater than or equal to 0 + * @param timeUnit the time unit, defaults to Milliseconds + * @return a new MongoCluster instance with the set time limit for operations + * @see [MongoDatabase.timeout] + * @since 5.2 + */ + @Alpha(Reason.CLIENT) + public fun withTimeout(timeout: Long, timeUnit: TimeUnit = TimeUnit.MILLISECONDS): MongoCluster = + MongoCluster(wrapped.withTimeout(timeout, timeUnit)) + + /** + * Gets a [MongoDatabase] instance for the given database name. + * + * @param databaseName the name of the database to retrieve + * @return a `MongoDatabase` representing the specified database + * @throws IllegalArgumentException if databaseName is invalid + * @see com.mongodb.MongoNamespace.checkDatabaseNameValidity + */ + public fun getDatabase(databaseName: String): MongoDatabase = MongoDatabase(wrapped.getDatabase(databaseName)) + + /** + * Creates a client session. + * + * Note: A ClientSession instance can not be used concurrently in multiple operations. + * + * @param options the options for the client session + * @return the client session + */ + public fun startSession(options: ClientSessionOptions = ClientSessionOptions.builder().build()): ClientSession = + ClientSession(wrapped.startSession(options)) + + /** + * Get a list of the database names + * + * @return an iterable containing all the names of all the databases + * @see [List Databases](https://www.mongodb.com/docs/manual/reference/command/listDatabases) + */ + public fun listDatabaseNames(): MongoIterable = MongoIterable(wrapped.listDatabaseNames()) + + /** + * Gets the list of databases + * + * @param clientSession the client session with which to associate this operation + * @return the list databases iterable interface + * @see [List Databases](https://www.mongodb.com/docs/manual/reference/command/listDatabases) + */ + public fun listDatabaseNames(clientSession: ClientSession): MongoIterable = + MongoIterable(wrapped.listDatabaseNames(clientSession.wrapped)) + + /** + * Gets the list of databases + * + * @return the list databases iterable interface + */ + @JvmName("listDatabasesAsDocument") + public fun listDatabases(): ListDatabasesIterable = listDatabases() + + /** + * Gets the list of databases + * + * @param clientSession the client session with which to associate this operation + * @return the list databases iterable interface + */ + @JvmName("listDatabasesAsDocumentWithSession") + public fun listDatabases(clientSession: ClientSession): ListDatabasesIterable = + listDatabases(clientSession) + + /** + * Gets the list of databases + * + * @param T the type of the class to use + * @param resultClass the target document type of the iterable. + * @return the list databases iterable interface + */ + public fun listDatabases(resultClass: Class): ListDatabasesIterable = + ListDatabasesIterable(wrapped.listDatabases(resultClass)) + + /** + * Gets the list of databases + * + * @param T the type of the class to use + * @param clientSession the client session with which to associate this operation + * @param resultClass the target document type of the iterable. + * @return the list databases iterable interface + */ + public fun listDatabases(clientSession: ClientSession, resultClass: Class): ListDatabasesIterable = + ListDatabasesIterable(wrapped.listDatabases(clientSession.wrapped, resultClass)) + + /** + * Gets the list of databases + * + * @param T the type of the class to use + * @return the list databases iterable interface + */ + public inline fun listDatabases(): ListDatabasesIterable = listDatabases(T::class.java) + + /** + * Gets the list of databases + * + * @param clientSession the client session with which to associate this operation + * @param T the type of the class to use + * @return the list databases iterable interface + */ + public inline fun listDatabases(clientSession: ClientSession): ListDatabasesIterable = + listDatabases(clientSession, T::class.java) + + /** + * Creates a change stream for this client. + * + * @param pipeline the aggregation pipeline to apply to the change stream, defaults to an empty pipeline. + * @return the change stream iterable + * @see [Change Streams](https://dochub.mongodb.org/changestreams] + */ + @JvmName("watchAsDocument") + public fun watch(pipeline: List = emptyList()): ChangeStreamIterable = watch(pipeline) + + /** + * Creates a change stream for this client. + * + * @param clientSession the client session with which to associate this operation + * @param pipeline the aggregation pipeline to apply to the change stream, defaults to an empty pipeline. + * @return the change stream iterable + * @see [Change Streams](https://dochub.mongodb.org/changestreams] + */ + @JvmName("watchAsDocumentWithSession") + public fun watch(clientSession: ClientSession, pipeline: List = emptyList()): ChangeStreamIterable = + watch(clientSession, pipeline) + + /** + * Creates a change stream for this client. + * + * @param T the target document type of the iterable. + * @param pipeline the aggregation pipeline to apply to the change stream, defaults to an empty pipeline. + * @param resultClass the target document type of the iterable. + * @return the change stream iterable + * @see [Change Streams](https://dochub.mongodb.org/changestreams] + */ + public fun watch(pipeline: List = emptyList(), resultClass: Class): ChangeStreamIterable = + ChangeStreamIterable(wrapped.watch(pipeline, resultClass)) + + /** + * Creates a change stream for this client. + * + * @param T the target document type of the iterable. + * @param clientSession the client session with which to associate this operation + * @param pipeline the aggregation pipeline to apply to the change stream, defaults to an empty pipeline. + * @param resultClass the target document type of the iterable. + * @return the change stream iterable + * @see [Change Streams](https://dochub.mongodb.org/changestreams] + */ + public fun watch( + clientSession: ClientSession, + pipeline: List = emptyList(), + resultClass: Class + ): ChangeStreamIterable = ChangeStreamIterable(wrapped.watch(clientSession.wrapped, pipeline, resultClass)) + + /** + * Creates a change stream for this client. + * + * @param T the target document type of the iterable. + * @param pipeline the aggregation pipeline to apply to the change stream, defaults to an empty pipeline. + * @return the change stream iterable + * @see [Change Streams](https://dochub.mongodb.org/changestreams] + */ + public inline fun watch(pipeline: List = emptyList()): ChangeStreamIterable = + watch(pipeline, T::class.java) + + /** + * Creates a change stream for this client. + * + * @param T the target document type of the iterable. + * @param clientSession the client session with which to associate this operation + * @param pipeline the aggregation pipeline to apply to the change stream, defaults to an empty pipeline. + * @return the change stream iterable + * @see [Change Streams](https://dochub.mongodb.org/changestreams] + */ + public inline fun watch( + clientSession: ClientSession, + pipeline: List = emptyList() + ): ChangeStreamIterable = watch(clientSession, pipeline, T::class.java) + + /** + * Executes a client-level bulk write operation. This method is functionally equivalent to + * [bulkWrite(models, options)][bulkWrite] with the + * [default options][ClientBulkWriteOptions.clientBulkWriteOptions]. + * + * This operation supports [retryable writes][MongoClientSettings.getRetryWrites]. Depending on the number of + * `models`, encoded size of `models`, and the size limits in effect, executing this operation may require multiple + * `bulkWrite` commands. The eligibility for retries is determined per each `bulkWrite` command: + * [ClientNamespacedUpdateManyModel], [ClientNamespacedDeleteManyModel] in a command render it non-retryable. + * + * @param models The [individual write operations][ClientNamespacedWriteModel]. + * @return The [ClientBulkWriteResult] if the operation is successful. + * @throws ClientBulkWriteException If and only if the operation is unsuccessful or partially unsuccessful, and + * there is at least one of the following pieces of information to report: + * [ClientBulkWriteException.getWriteConcernErrors], [ClientBulkWriteException.getWriteErrors], + * [ClientBulkWriteException.getPartialResult]. + * @throws MongoException Only if the operation is unsuccessful. + * @see [BulkWrite command](https://www.mongodb.com/docs/manual/reference/command/bulkWrite/) + * @since 5.3 + */ + public fun bulkWrite(models: List): ClientBulkWriteResult = wrapped.bulkWrite(models) + + /** + * Executes a client-level bulk write operation. + * + * This operation supports [retryable writes][MongoClientSettings.getRetryWrites]. Depending on the number of + * `models`, encoded size of `models`, and the size limits in effect, executing this operation may require multiple + * `bulkWrite` commands. The eligibility for retries is determined per each `bulkWrite` command: + * [ClientNamespacedUpdateManyModel], [ClientNamespacedDeleteManyModel] in a command render it non-retryable. + * + * @param models The [individual write operations][ClientNamespacedWriteModel]. + * @param options The [options][ClientBulkWriteOptions]. + * @return The [ClientBulkWriteResult] if the operation is successful. + * @throws ClientBulkWriteException If and only if the operation is unsuccessful or partially unsuccessful, and + * there is at least one of the following pieces of information to report: + * [ClientBulkWriteException.getWriteConcernErrors], [ClientBulkWriteException.getWriteErrors], + * [ClientBulkWriteException.getPartialResult]. + * @throws MongoException Only if the operation is unsuccessful. + * @see [BulkWrite command](https://www.mongodb.com/docs/manual/reference/command/bulkWrite/) + * @since 5.3 + */ + public fun bulkWrite( + models: List, + options: ClientBulkWriteOptions + ): ClientBulkWriteResult = wrapped.bulkWrite(models, options) + + /** + * Executes a client-level bulk write operation. This method is functionally equivalent to + * [bulkWrite(clientSession, models, options)][bulkWrite] with the + * [default options][ClientBulkWriteOptions.clientBulkWriteOptions]. + * + * This operation supports [retryable writes][MongoClientSettings.getRetryWrites]. Depending on the number of + * `models`, encoded size of `models`, and the size limits in effect, executing this operation may require multiple + * `bulkWrite` commands. The eligibility for retries is determined per each `bulkWrite` command: + * [ClientNamespacedUpdateManyModel], [ClientNamespacedDeleteManyModel] in a command render it non-retryable. + * + * @param clientSession The [client session][ClientSession] with which to associate this operation. + * @param models The [individual write operations][ClientNamespacedWriteModel]. + * @return The [ClientBulkWriteResult] if the operation is successful. + * @throws ClientBulkWriteException If and only if the operation is unsuccessful or partially unsuccessful, and + * there is at least one of the following pieces of information to report: + * [ClientBulkWriteException.getWriteConcernErrors], [ClientBulkWriteException.getWriteErrors], + * [ClientBulkWriteException.getPartialResult]. + * @throws MongoException Only if the operation is unsuccessful. + * @see [BulkWrite command](https://www.mongodb.com/docs/manual/reference/command/bulkWrite/) + * @since 5.3 + */ + public fun bulkWrite( + clientSession: ClientSession, + models: List + ): ClientBulkWriteResult = wrapped.bulkWrite(clientSession.wrapped, models) + + /** + * Executes a client-level bulk write operation. + * + * This operation supports [retryable writes][com.mongodb.MongoClientSettings.getRetryWrites]. Depending on the + * number of `models`, encoded size of `models`, and the size limits in effect, executing this operation may require + * multiple `bulkWrite` commands. The eligibility for retries is determined per each `bulkWrite` command: + * [ClientNamespacedUpdateManyModel], [ClientNamespacedDeleteManyModel] in a command render it non-retryable. + * + * @param clientSession The [client session][ClientSession] with which to associate this operation. + * @param models The [individual write operations][ClientNamespacedWriteModel]. + * @param options The [options][ClientBulkWriteOptions]. + * @return The [ClientBulkWriteResult] if the operation is successful. + * @throws ClientBulkWriteException If and only if the operation is unsuccessful or partially unsuccessful, and + * there is at least one of the following pieces of information to report: + * [ClientBulkWriteException.getWriteConcernErrors], [ClientBulkWriteException.getWriteErrors], + * [ClientBulkWriteException.getPartialResult]. + * @throws MongoException Only if the operation is unsuccessful. + * @see [BulkWrite command](https://www.mongodb.com/docs/manual/reference/command/bulkWrite/) + * @since 5.3 + */ + public fun bulkWrite( + clientSession: ClientSession, + models: List, + options: ClientBulkWriteOptions + ): ClientBulkWriteResult = wrapped.bulkWrite(clientSession.wrapped, models, options) +} diff --git a/driver-kotlin-sync/src/main/kotlin/com/mongodb/kotlin/client/MongoCollection.kt b/driver-kotlin-sync/src/main/kotlin/com/mongodb/kotlin/client/MongoCollection.kt new file mode 100644 index 00000000000..9521c502460 --- /dev/null +++ b/driver-kotlin-sync/src/main/kotlin/com/mongodb/kotlin/client/MongoCollection.kt @@ -0,0 +1,1506 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.kotlin.client + +import com.mongodb.MongoNamespace +import com.mongodb.ReadConcern +import com.mongodb.ReadPreference +import com.mongodb.WriteConcern +import com.mongodb.annotations.Alpha +import com.mongodb.annotations.Reason +import com.mongodb.bulk.BulkWriteResult +import com.mongodb.client.MongoCollection as JMongoCollection +import com.mongodb.client.model.BulkWriteOptions +import com.mongodb.client.model.CountOptions +import com.mongodb.client.model.CreateIndexOptions +import com.mongodb.client.model.DeleteOptions +import com.mongodb.client.model.DropCollectionOptions +import com.mongodb.client.model.DropIndexOptions +import com.mongodb.client.model.EstimatedDocumentCountOptions +import com.mongodb.client.model.FindOneAndDeleteOptions +import com.mongodb.client.model.FindOneAndReplaceOptions +import com.mongodb.client.model.FindOneAndUpdateOptions +import com.mongodb.client.model.IndexModel +import com.mongodb.client.model.IndexOptions +import com.mongodb.client.model.InsertManyOptions +import com.mongodb.client.model.InsertOneOptions +import com.mongodb.client.model.RenameCollectionOptions +import com.mongodb.client.model.ReplaceOptions +import com.mongodb.client.model.SearchIndexModel +import com.mongodb.client.model.UpdateOptions +import com.mongodb.client.model.WriteModel +import com.mongodb.client.result.DeleteResult +import com.mongodb.client.result.InsertManyResult +import com.mongodb.client.result.InsertOneResult +import com.mongodb.client.result.UpdateResult +import java.util.concurrent.TimeUnit +import org.bson.BsonDocument +import org.bson.Document +import org.bson.codecs.configuration.CodecRegistry +import org.bson.conversions.Bson + +/** + * The MongoCollection representation. + * + * Note: Additions to this interface will not be considered to break binary compatibility. + * + * @param T The type of documents the collection will encode documents from and decode documents to. + * @property wrapped the underlying sync MongoCollection + */ +public class MongoCollection(private val wrapped: JMongoCollection) { + + /** The class of documents stored in this collection. */ + public val documentClass: Class + get() = wrapped.documentClass + + /** The namespace of this collection. */ + public val namespace: MongoNamespace + get() = wrapped.namespace + + /** The codec registry for the collection. */ + public val codecRegistry: CodecRegistry + get() = wrapped.codecRegistry + + /** the read preference for the collection. */ + public val readPreference: ReadPreference + get() = wrapped.readPreference + + /** The read concern for the collection. */ + public val readConcern: ReadConcern + get() = wrapped.readConcern + + /** The write concern for the collection. */ + public val writeConcern: WriteConcern + get() = wrapped.writeConcern + + /** + * The time limit for the full execution of an operation. + * + * If not null the following deprecated options will be ignored: `waitQueueTimeoutMS`, `socketTimeoutMS`, + * `wTimeoutMS`, `maxTimeMS` and `maxCommitTimeMS`. + * - `null` means that the timeout mechanism for operations will defer to using: + * - `waitQueueTimeoutMS`: The maximum wait time in milliseconds that a thread may wait for a connection to + * become available + * - `socketTimeoutMS`: How long a send or receive on a socket can take before timing out. + * - `wTimeoutMS`: How long the server will wait for the write concern to be fulfilled before timing out. + * - `maxTimeMS`: The time limit for processing operations on a cursor. See: + * [cursor.maxTimeMS](https://docs.mongodb.com/manual/reference/method/cursor.maxTimeMS"). + * - `maxCommitTimeMS`: The maximum amount of time to allow a single `commitTransaction` command to execute. + * - `0` means infinite timeout. + * - `> 0` The time limit to use for the full execution of an operation. + * + * @return the optional timeout duration + * @since 5.2 + */ + @Alpha(Reason.CLIENT) + public fun timeout(timeUnit: TimeUnit = TimeUnit.MILLISECONDS): Long? = wrapped.getTimeout(timeUnit) + + /** + * Create a new collection instance with a different default class to cast any documents returned from the database + * into. + * + * @param R the default class to cast any documents returned from the database into. + * @param resultClass the target document type for the collection. + * @return a new MongoCollection instance with the different default class + */ + public fun withDocumentClass(resultClass: Class): MongoCollection = + MongoCollection(wrapped.withDocumentClass(resultClass)) + + /** + * Create a new collection instance with a different default class to cast any documents returned from the database + * into. + * + * @param R the default class to cast any documents returned from the database into. + * @return a new MongoCollection instance with the different default class + */ + public inline fun withDocumentClass(): MongoCollection = withDocumentClass(R::class.java) + + /** + * Create a new collection instance with a different codec registry. + * + * The [CodecRegistry] configured by this method is effectively treated by the driver as an instance of + * [org.bson.codecs.configuration.CodecProvider], which [CodecRegistry] extends. So there is no benefit to defining + * a class that implements [CodecRegistry]. Rather, an application should always create [CodecRegistry] instances + * using the factory methods in [org.bson.codecs.configuration.CodecRegistries]. + * + * @param newCodecRegistry the new [org.bson.codecs.configuration.CodecRegistry] for the collection + * @return a new MongoCollection instance with the different codec registry + * @see org.bson.codecs.configuration.CodecRegistries + */ + public fun withCodecRegistry(newCodecRegistry: CodecRegistry): MongoCollection = + MongoCollection(wrapped.withCodecRegistry(newCodecRegistry)) + + /** + * Create a new collection instance with a different read preference. + * + * @param newReadPreference the new [com.mongodb.ReadPreference] for the collection + * @return a new MongoCollection instance with the different readPreference + */ + public fun withReadPreference(newReadPreference: ReadPreference): MongoCollection = + MongoCollection(wrapped.withReadPreference(newReadPreference)) + + /** + * Create a new collection instance with a different read concern. + * + * @param newReadConcern the new [ReadConcern] for the collection + * @return a new MongoCollection instance with the different ReadConcern + * @see [Read Concern](https://www.mongodb.com/docs/manual/reference/readConcern/) + */ + public fun withReadConcern(newReadConcern: ReadConcern): MongoCollection = + MongoCollection(wrapped.withReadConcern(newReadConcern)) + + /** + * Create a new collection instance with a different write concern. + * + * @param newWriteConcern the new [com.mongodb.WriteConcern] for the collection + * @return a new MongoCollection instance with the different writeConcern + */ + public fun withWriteConcern(newWriteConcern: WriteConcern): MongoCollection = + MongoCollection(wrapped.withWriteConcern(newWriteConcern)) + + /** + * Create a new MongoCollection instance with the set time limit for the full execution of an operation. + * - `0` means an infinite timeout + * - `> 0` The time limit to use for the full execution of an operation. + * + * @param timeout the timeout, which must be greater than or equal to 0 + * @param timeUnit the time unit, defaults to Milliseconds + * @return a new MongoCollection instance with the set time limit for operations + * @see [MongoCollection.timeout] + * @since 5.2 + */ + @Alpha(Reason.CLIENT) + public fun withTimeout(timeout: Long, timeUnit: TimeUnit = TimeUnit.MILLISECONDS): MongoCollection = + MongoCollection(wrapped.withTimeout(timeout, timeUnit)) + + /** + * Counts the number of documents in the collection. + * + * Note: For a fast count of the total documents in a collection see [estimatedDocumentCount]. When migrating from + * `count()` to `countDocuments()` the following query operators must be replaced: + * ``` + * +-------------+--------------------------------+ + * | Operator | Replacement | + * +=============+================================+ + * | $where | $expr | + * +-------------+--------------------------------+ + * | $near | $geoWithin with $center | + * +-------------+--------------------------------+ + * | $nearSphere | $geoWithin with $centerSphere | + * +-------------+--------------------------------+ + * ``` + * + * @return the number of documents in the collection + */ + public fun countDocuments(filter: Bson = BsonDocument(), options: CountOptions = CountOptions()): Long = + wrapped.countDocuments(filter, options) + + /** + * Counts the number of documents in the collection according to the given options. + * + * Note: For a fast count of the total documents in a collection see [estimatedDocumentCount]. When migrating from + * `count()` to `countDocuments()` the following query operators must be replaced: + * ``` + * +-------------+--------------------------------+ + * | Operator | Replacement | + * +=============+================================+ + * | $where | $expr | + * +-------------+--------------------------------+ + * | $near | $geoWithin with $center | + * +-------------+--------------------------------+ + * | $nearSphere | $geoWithin with $centerSphere | + * +-------------+--------------------------------+ + * ``` + * + * @param clientSession the client session with which to associate this operation + * @param filter the query filter + * @param options the options describing the count + * @return the number of documents in the collection + */ + public fun countDocuments( + clientSession: ClientSession, + filter: Bson = BsonDocument(), + options: CountOptions = CountOptions() + ): Long = wrapped.countDocuments(clientSession.wrapped, filter, options) + + /** + * Gets an estimate of the count of documents in a collection using collection metadata. + * + * Implementation note: this method is implemented using the MongoDB server's count command + * + * @param options the options describing the count + * @return the number of documents in the collection + * @see [Count behaviour](https://www.mongodb.com/docs/manual/reference/command/count/#behavior) + */ + public fun estimatedDocumentCount(options: EstimatedDocumentCountOptions = EstimatedDocumentCountOptions()): Long = + wrapped.estimatedDocumentCount(options) + + /** + * Gets the distinct values of the specified field name. + * + * @param R the target type of the iterable. + * @param fieldName the field name + * @param filter the query filter + * @param resultClass the target document type of the iterable. + * @return an iterable of distinct values + * @see [Distinct command](https://www.mongodb.com/docs/manual/reference/command/distinct/) + */ + public fun distinct( + fieldName: String, + filter: Bson = BsonDocument(), + resultClass: Class + ): DistinctIterable = DistinctIterable(wrapped.distinct(fieldName, filter, resultClass)) + + /** + * Gets the distinct values of the specified field name. + * + * @param R the target type of the iterable. + * @param clientSession the client session with which to associate this operation + * @param fieldName the field name + * @param filter the query filter + * @param resultClass the target document type of the iterable. + * @return an iterable of distinct values + * @see [Distinct command](https://www.mongodb.com/docs/manual/reference/command/distinct/) + */ + public fun distinct( + clientSession: ClientSession, + fieldName: String, + filter: Bson = BsonDocument(), + resultClass: Class + ): DistinctIterable = DistinctIterable(wrapped.distinct(clientSession.wrapped, fieldName, filter, resultClass)) + + /** + * Gets the distinct values of the specified field name. + * + * @param R the target type of the iterable. + * @param fieldName the field name + * @param filter the query filter + * @return an iterable of distinct values + * @see [Distinct command](https://www.mongodb.com/docs/manual/reference/command/distinct/) + */ + public inline fun distinct( + fieldName: String, + filter: Bson = BsonDocument() + ): DistinctIterable = distinct(fieldName, filter, R::class.java) + + /** + * Gets the distinct values of the specified field name. + * + * @param R the target type of the iterable. + * @param clientSession the client session with which to associate this operation + * @param fieldName the field name + * @param filter the query filter + * @return an iterable of distinct values + * @see [Distinct command](https://www.mongodb.com/docs/manual/reference/command/distinct/) + */ + public inline fun distinct( + clientSession: ClientSession, + fieldName: String, + filter: Bson = BsonDocument() + ): DistinctIterable = distinct(clientSession, fieldName, filter, R::class.java) + + /** + * Finds all documents in the collection. + * + * @param filter the query filter + * @return the find iterable interface + * @see [Query Documents](https://www.mongodb.com/docs/manual/tutorial/query-documents/) + */ + @JvmName("findAsT") public fun find(filter: Bson = BsonDocument()): FindIterable = find(filter, documentClass) + + /** + * Finds all documents in the collection. + * + * @param clientSession the client session with which to associate this operation + * @param filter the query filter + * @return the find iterable interface + * @see [Query Documents](https://www.mongodb.com/docs/manual/tutorial/query-documents/) + */ + @JvmName("findAsTWithSession") + public fun find(clientSession: ClientSession, filter: Bson = BsonDocument()): FindIterable = + find(clientSession, filter, documentClass) + + /** + * Finds all documents in the collection. + * + * @param R the class to decode each document into + * @param filter the query filter + * @param resultClass the target document type of the iterable. + * @return the find iterable interface + * @see [Query Documents](https://www.mongodb.com/docs/manual/tutorial/query-documents/) + */ + public fun find(filter: Bson = BsonDocument(), resultClass: Class): FindIterable = + FindIterable(wrapped.find(filter, resultClass)) + + /** + * Finds all documents in the collection. + * + * @param R the class to decode each document into + * @param clientSession the client session with which to associate this operation + * @param filter the query filter + * @param resultClass the target document type of the iterable. + * @return the find iterable interface + * @see [Query Documents](https://www.mongodb.com/docs/manual/tutorial/query-documents/) + */ + public fun find( + clientSession: ClientSession, + filter: Bson = BsonDocument(), + resultClass: Class + ): FindIterable = FindIterable(wrapped.find(clientSession.wrapped, filter, resultClass)) + + /** + * Finds all documents in the collection. + * + * @param R the class to decode each document into + * @param filter the query filter + * @return the find iterable interface + * @see [Query Documents](https://www.mongodb.com/docs/manual/tutorial/query-documents/) + */ + public inline fun find(filter: Bson = BsonDocument()): FindIterable = + find(filter, R::class.java) + + /** + * Finds all documents in the collection. + * + * @param R the class to decode each document into + * @param clientSession the client session with which to associate this operation + * @param filter the query filter + * @return the find iterable interface + * @see [Query Documents](https://www.mongodb.com/docs/manual/tutorial/query-documents/) + */ + public inline fun find( + clientSession: ClientSession, + filter: Bson = BsonDocument() + ): FindIterable = find(clientSession, filter, R::class.java) + + /** + * Aggregates documents according to the specified aggregation pipeline. + * + * @param pipeline the aggregation pipeline + * @return an iterable containing the result of the aggregation operation + * @see [Aggregate Command](https://www.mongodb.com/docs/manual/reference/command/aggregate/#dbcmd.aggregate/) + */ + @JvmName("aggregateAsT") + public fun aggregate(pipeline: List): AggregateIterable = + AggregateIterable(wrapped.aggregate(pipeline, documentClass)) + + /** + * Aggregates documents according to the specified aggregation pipeline. + * + * @param clientSession the client session with which to associate this operation + * @param pipeline the aggregation pipeline + * @return an iterable containing the result of the aggregation operation + * @see [Aggregate Command](https://www.mongodb.com/docs/manual/reference/command/aggregate/#dbcmd.aggregate/) + */ + @JvmName("aggregateAsTWithSession") + public fun aggregate(clientSession: ClientSession, pipeline: List): AggregateIterable = + AggregateIterable(wrapped.aggregate(clientSession.wrapped, pipeline, documentClass)) + + /** + * Aggregates documents according to the specified aggregation pipeline. + * + * @param R the class to decode each document into + * @param pipeline the aggregation pipeline + * @param resultClass the target document type of the iterable. + * @return an iterable containing the result of the aggregation operation + * @see [Aggregate Command](https://www.mongodb.com/docs/manual/reference/command/aggregate/#dbcmd.aggregate/) + */ + public fun aggregate(pipeline: List, resultClass: Class): AggregateIterable = + AggregateIterable(wrapped.aggregate(pipeline, resultClass)) + + /** + * Aggregates documents according to the specified aggregation pipeline. + * + * @param R the class to decode each document into + * @param clientSession the client session with which to associate this operation + * @param pipeline the aggregation pipeline + * @param resultClass the target document type of the iterable. + * @return an iterable containing the result of the aggregation operation + * @see [Aggregate Command](https://www.mongodb.com/docs/manual/reference/command/aggregate/#dbcmd.aggregate/) + */ + public fun aggregate( + clientSession: ClientSession, + pipeline: List, + resultClass: Class + ): AggregateIterable = AggregateIterable(wrapped.aggregate(clientSession.wrapped, pipeline, resultClass)) + + /** + * Aggregates documents according to the specified aggregation pipeline. + * + * @param R the class to decode each document into + * @param pipeline the aggregation pipeline + * @return an iterable containing the result of the aggregation operation + * @see [Aggregate Command](https://www.mongodb.com/docs/manual/reference/command/aggregate/#dbcmd.aggregate/) + */ + public inline fun aggregate(pipeline: List): AggregateIterable = + aggregate(pipeline, R::class.java) + + /** + * Aggregates documents according to the specified aggregation pipeline. + * + * @param R the class to decode each document into + * @param clientSession the client session with which to associate this operation + * @param pipeline the aggregation pipeline + * @return an iterable containing the result of the aggregation operation + * @see [Aggregate Command](https://www.mongodb.com/docs/manual/reference/command/aggregate/#dbcmd.aggregate/) + */ + public inline fun aggregate( + clientSession: ClientSession, + pipeline: List + ): AggregateIterable = aggregate(clientSession, pipeline, R::class.java) + + /** + * Creates a change stream for this collection. + * + * @param pipeline the aggregation pipeline to apply to the change stream, defaults to an empty pipeline. + * @return the change stream iterable + * @see [Change Streams](https://dochub.mongodb.org/changestreams] + */ + @JvmName("watchAsDocument") + public fun watch(pipeline: List = emptyList()): ChangeStreamIterable = watch(pipeline, documentClass) + + /** + * Creates a change stream for this collection. + * + * @param clientSession the client session with which to associate this operation + * @param pipeline the aggregation pipeline to apply to the change stream, defaults to an empty pipeline. + * @return the change stream iterable + * @see [Change Streams](https://dochub.mongodb.org/changestreams] + */ + @JvmName("watchAsDocumentWithSession") + public fun watch(clientSession: ClientSession, pipeline: List = emptyList()): ChangeStreamIterable = + watch(clientSession, pipeline, documentClass) + + /** + * Creates a change stream for this collection. + * + * @param R the target document type of the iterable. + * @param pipeline the aggregation pipeline to apply to the change stream, defaults to an empty pipeline. + * @param resultClass the target document type of the iterable. + * @return the change stream iterable + * @see [Change Streams](https://dochub.mongodb.org/changestreams] + */ + public fun watch(pipeline: List = emptyList(), resultClass: Class): ChangeStreamIterable = + ChangeStreamIterable(wrapped.watch(pipeline, resultClass)) + + /** + * Creates a change stream for this collection. + * + * @param R the target document type of the iterable. + * @param clientSession the client session with which to associate this operation + * @param pipeline the aggregation pipeline to apply to the change stream, defaults to an empty pipeline. + * @param resultClass the target document type of the iterable. + * @return the change stream iterable + * @see [Change Streams](https://dochub.mongodb.org/changestreams] + */ + public fun watch( + clientSession: ClientSession, + pipeline: List = emptyList(), + resultClass: Class + ): ChangeStreamIterable = ChangeStreamIterable(wrapped.watch(clientSession.wrapped, pipeline, resultClass)) + + /** + * Creates a change stream for this collection. + * + * @param R the target document type of the iterable. + * @param pipeline the aggregation pipeline to apply to the change stream, defaults to an empty pipeline. + * @return the change stream iterable + * @see [Change Streams](https://dochub.mongodb.org/changestreams] + */ + public inline fun watch(pipeline: List = emptyList()): ChangeStreamIterable = + watch(pipeline, R::class.java) + + /** + * Creates a change stream for this collection. + * + * @param R the target document type of the iterable. + * @param clientSession the client session with which to associate this operation + * @param pipeline the aggregation pipeline to apply to the change stream, defaults to an empty pipeline. + * @return the change stream iterable + * @see [Change Streams](https://dochub.mongodb.org/changestreams] + */ + public inline fun watch( + clientSession: ClientSession, + pipeline: List = emptyList() + ): ChangeStreamIterable = watch(clientSession, pipeline, R::class.java) + + /** + * Inserts the provided document. If the document is missing an identifier, the driver should generate one. + * + * Note: Supports retryable writes on MongoDB server versions 3.6 or higher when the retryWrites setting is enabled. + * + * @param document the document to insert + * @param options the options to apply to the operation + * @return the insert one result + * @throws com.mongodb.MongoWriteException if the write failed due to some specific write exception + * @throws com.mongodb.MongoWriteConcernException if the write failed due to being unable to fulfil the write + * concern + * @throws com.mongodb.MongoCommandException if the write failed due to a specific command exception + * @throws com.mongodb.MongoException if the write failed due some other failure + */ + public fun insertOne(document: T, options: InsertOneOptions = InsertOneOptions()): InsertOneResult = + wrapped.insertOne(document, options) + + /** + * Inserts the provided document. If the document is missing an identifier, the driver should generate one. + * + * Note: Supports retryable writes on MongoDB server versions 3.6 or higher when the retryWrites setting is enabled. + * + * @param clientSession the client session with which to associate this operation + * @param document the document to insert + * @param options the options to apply to the operation + * @return the insert one result + * @throws com.mongodb.MongoWriteException if the write failed due to some specific write exception + * @throws com.mongodb.MongoWriteConcernException if the write failed due to being unable to fulfil the write + * concern + * @throws com.mongodb.MongoCommandException if the write failed due to a specific command exception + * @throws com.mongodb.MongoException if the write failed due some other failure + */ + public fun insertOne( + clientSession: ClientSession, + document: T, + options: InsertOneOptions = InsertOneOptions() + ): InsertOneResult = wrapped.insertOne(clientSession.wrapped, document, options) + + /** + * Inserts one or more documents. A call to this method is equivalent to a call to the `bulkWrite` method + * + * Note: Supports retryable writes on MongoDB server versions 3.6 or higher when the retryWrites setting is enabled. + * + * @param documents the documents to insert + * @param options the options to apply to the operation + * @return the insert many result + * @throws com.mongodb.MongoBulkWriteException if there's an exception in the bulk write operation + * @throws com.mongodb.MongoCommandException if the write failed due to a specific command exception + * @throws com.mongodb.MongoException if the write failed due some other failure + * @throws IllegalArgumentException if the documents list is null or empty, or any of the documents in the list are + * null + */ + public fun insertMany(documents: List, options: InsertManyOptions = InsertManyOptions()): InsertManyResult = + wrapped.insertMany(documents, options) + + /** + * Inserts one or more documents. A call to this method is equivalent to a call to the `bulkWrite` method + * + * Note: Supports retryable writes on MongoDB server versions 3.6 or higher when the retryWrites setting is enabled. + * + * @param clientSession the client session with which to associate this operation + * @param documents the documents to insert + * @param options the options to apply to the operation + * @return the insert many result + * @throws com.mongodb.MongoBulkWriteException if there's an exception in the bulk write operation + * @throws com.mongodb.MongoCommandException if the write failed due to a specific command exception + * @throws com.mongodb.MongoException if the write failed due some other failure + * @throws IllegalArgumentException if the documents list is null or empty, or any of the documents in the list are + * null + */ + public fun insertMany( + clientSession: ClientSession, + documents: List, + options: InsertManyOptions = InsertManyOptions() + ): InsertManyResult = wrapped.insertMany(clientSession.wrapped, documents, options) + + /** + * Update a single document in the collection according to the specified arguments. + * + * Use this method to only update the corresponding fields in the document according to the update operators used in + * the update document. To replace the entire document with a new document, use the corresponding [replaceOne] + * method. + * + * Note: Supports retryable writes on MongoDB server versions 3.6 or higher when the retryWrites setting is enabled. + * + * @param filter a document describing the query filter, which may not be null. + * @param update a document describing the update, which may not be null. The update to apply must include at least + * one update operator. + * @param options the options to apply to the update operation + * @return the result of the update one operation + * @throws com.mongodb.MongoWriteException if the write failed due to some specific write exception + * @throws com.mongodb.MongoWriteConcernException if the write failed due to being unable to fulfil the write + * concern + * @throws com.mongodb.MongoCommandException if the write failed due to a specific command exception + * @throws com.mongodb.MongoException if the write failed due some other failure + * @see [Modify Documents](https://www.mongodb.com/docs/manual/tutorial/modify-documents/) + * @see [Update Operators](https://www.mongodb.com/docs/manual/reference/operator/update/) + * @see [Update Command Behaviors](https://www.mongodb.com/docs/manual/reference/command/update/) + * @see [replaceOne] + */ + public fun updateOne(filter: Bson, update: Bson, options: UpdateOptions = UpdateOptions()): UpdateResult = + wrapped.updateOne(filter, update, options) + + /** + * Update a single document in the collection according to the specified arguments. + * + * Use this method to only update the corresponding fields in the document according to the update operators used in + * the update document. To replace the entire document with a new document, use the corresponding [replaceOne] + * method. + * + * Note: Supports retryable writes on MongoDB server versions 3.6 or higher when the retryWrites setting is enabled. + * + * @param clientSession the client session with which to associate this operation + * @param filter a document describing the query filter, which may not be null. + * @param update a document describing the update, which may not be null. The update to apply must include at least + * one update operator. + * @param options the options to apply to the update operation + * @return the result of the update one operation + * @throws com.mongodb.MongoWriteException if the write failed due to some specific write exception + * @throws com.mongodb.MongoWriteConcernException if the write failed due to being unable to fulfil the write + * concern + * @throws com.mongodb.MongoCommandException if the write failed due to a specific command exception + * @throws com.mongodb.MongoException if the write failed due some other failure + * @see [Modify Documents](https://www.mongodb.com/docs/manual/tutorial/modify-documents/) + * @see [Update Operators](https://www.mongodb.com/docs/manual/reference/operator/update/) + * @see [Update Command](https://www.mongodb.com/docs/manual/reference/command/update/) + * @see com.mongodb.client.MongoCollection.replaceOne + */ + public fun updateOne( + clientSession: ClientSession, + filter: Bson, + update: Bson, + options: UpdateOptions = UpdateOptions() + ): UpdateResult = wrapped.updateOne(clientSession.wrapped, filter, update, options) + + /** + * Update a single document in the collection according to the specified arguments. + * + * Note: Supports retryable writes on MongoDB server versions 3.6 or higher when the retryWrites setting is enabled. + * + * @param filter a document describing the query filter, which may not be null. + * @param update a pipeline describing the update, which may not be null. + * @param options the options to apply to the update operation + * @return the result of the update one operation + * @throws com.mongodb.MongoWriteException if the write failed due some other failure specific to the update command + * @throws com.mongodb.MongoWriteConcernException if the write failed due being unable to fulfil the write concern + * @throws com.mongodb.MongoException if the write failed due some other failure + * @see [Modify Documents](https://www.mongodb.com/docs/manual/tutorial/modify-documents/) + * @see [Update Operators](https://www.mongodb.com/docs/manual/reference/operator/update/) + */ + public fun updateOne(filter: Bson, update: List, options: UpdateOptions = UpdateOptions()): UpdateResult = + wrapped.updateOne(filter, update, options) + + /** + * Update a single document in the collection according to the specified arguments. + * + * Note: Supports retryable writes on MongoDB server versions 3.6 or higher when the retryWrites setting is enabled. + * + * @param clientSession the client session with which to associate this operation + * @param filter a document describing the query filter, which may not be null. + * @param update a pipeline describing the update, which may not be null. + * @param options the options to apply to the update operation + * @return the result of the update one operation + * @throws com.mongodb.MongoWriteException if the write failed due some other failure specific to the update command + * @throws com.mongodb.MongoWriteConcernException if the write failed due being unable to fulfil the write concern + * @throws com.mongodb.MongoException if the write failed due some other failure + * @see [Modify Documents](https://www.mongodb.com/docs/manual/tutorial/modify-documents/) + * @see [Update Operators](https://www.mongodb.com/docs/manual/reference/operator/update/) + */ + public fun updateOne( + clientSession: ClientSession, + filter: Bson, + update: List, + options: UpdateOptions = UpdateOptions() + ): UpdateResult = wrapped.updateOne(clientSession.wrapped, filter, update, options) + + /** + * Update all documents in the collection according to the specified arguments. + * + * @param filter a document describing the query filter, which may not be null. + * @param update a document describing the update, which may not be null. The update to apply must include only + * update operators. + * @param options the options to apply to the update operation + * @return the result of the update many operation + * @throws com.mongodb.MongoWriteException if the write failed due to some specific write exception + * @throws com.mongodb.MongoWriteConcernException if the write failed due to being unable to fulfil the write + * concern + * @throws com.mongodb.MongoCommandException if the write failed due to a specific command exception + * @throws com.mongodb.MongoException if the write failed due some other failure + * @see [Modify Documents](https://www.mongodb.com/docs/manual/tutorial/modify-documents/) + * @see [Update Operators](https://www.mongodb.com/docs/manual/reference/operator/update/) + */ + public fun updateMany(filter: Bson, update: Bson, options: UpdateOptions = UpdateOptions()): UpdateResult = + wrapped.updateMany(filter, update, options) + + /** + * Update all documents in the collection according to the specified arguments. + * + * @param clientSession the client session with which to associate this operation + * @param filter a document describing the query filter, which may not be null. + * @param update a document describing the update, which may not be null. The update to apply must include only + * update operators. + * @param options the options to apply to the update operation + * @return the result of the update many operation + * @throws com.mongodb.MongoWriteException if the write failed due to some specific write exception + * @throws com.mongodb.MongoWriteConcernException if the write failed due to being unable to fulfil the write + * concern + * @throws com.mongodb.MongoCommandException if the write failed due to a specific command exception + * @throws com.mongodb.MongoException if the write failed due some other failure + * @see [Modify Documents](https://www.mongodb.com/docs/manual/tutorial/modify-documents/) + * @see [Update Operators](https://www.mongodb.com/docs/manual/reference/operator/update/) + */ + public fun updateMany( + clientSession: ClientSession, + filter: Bson, + update: Bson, + options: UpdateOptions = UpdateOptions() + ): UpdateResult = wrapped.updateMany(clientSession.wrapped, filter, update, options) + + /** + * Update all documents in the collection according to the specified arguments. + * + * @param filter a document describing the query filter, which may not be null. + * @param update a pipeline describing the update, which may not be null. + * @param options the options to apply to the update operation + * @return the result of the update many operation + * @throws com.mongodb.MongoWriteException if the write failed due some other failure specific to the update command + * @throws com.mongodb.MongoWriteConcernException if the write failed due being unable to fulfil the write concern + * @throws com.mongodb.MongoException if the write failed due some other failure + * @see [Modify Documents](https://www.mongodb.com/docs/manual/tutorial/modify-documents/) + * @see [Update Operators](https://www.mongodb.com/docs/manual/reference/operator/update/) + */ + public fun updateMany(filter: Bson, update: List, options: UpdateOptions = UpdateOptions()): UpdateResult = + wrapped.updateMany(filter, update, options) + + /** + * Update all documents in the collection according to the specified arguments. + * + * @param clientSession the client session with which to associate this operation + * @param filter a document describing the query filter, which may not be null. + * @param update a pipeline describing the update, which may not be null. + * @param options the options to apply to the update operation + * @return the result of the update many operation + * @throws com.mongodb.MongoWriteException if the write failed due some other failure specific to the update command + * @throws com.mongodb.MongoWriteConcernException if the write failed due being unable to fulfil the write concern + * @throws com.mongodb.MongoException if the write failed due some other failure + * @see [Modify Documents](https://www.mongodb.com/docs/manual/tutorial/modify-documents/) + * @see [Update Operators](https://www.mongodb.com/docs/manual/reference/operator/update/) + */ + public fun updateMany( + clientSession: ClientSession, + filter: Bson, + update: List, + options: UpdateOptions = UpdateOptions() + ): UpdateResult = wrapped.updateMany(clientSession.wrapped, filter, update, options) + + /** + * Replace a document in the collection according to the specified arguments. + * + * Use this method to replace a document using the specified replacement argument. To update the document with + * update operators, use the corresponding [updateOne] method. + * + * Note: Supports retryable writes on MongoDB server versions 3.6 or higher when the retryWrites setting is enabled. + * + * @param filter the query filter to apply the replace operation + * @param replacement the replacement document + * @param options the options to apply to the replace operation + * @return the result of the replace one operation + * @throws com.mongodb.MongoWriteException if the write failed due to some specific write exception + * @throws com.mongodb.MongoWriteConcernException if the write failed due to being unable to fulfil the write + * concern + * @throws com.mongodb.MongoCommandException if the write failed due to a specific command exception + * @throws com.mongodb.MongoException if the write failed due some other failure + * @see [Modify Documents](https://www.mongodb.com/docs/manual/tutorial/modify-documents/#replace-the-document/) + * @see [Update Command Behaviors](https://www.mongodb.com/docs/manual/reference/command/update/) + */ + public fun replaceOne(filter: Bson, replacement: T, options: ReplaceOptions = ReplaceOptions()): UpdateResult = + wrapped.replaceOne(filter, replacement, options) + + /** + * Replace a document in the collection according to the specified arguments. + * + * Use this method to replace a document using the specified replacement argument. To update the document with + * update operators, use the corresponding [updateOne] method. + * + * Note: Supports retryable writes on MongoDB server versions 3.6 or higher when the retryWrites setting is enabled. + * + * @param clientSession the client session with which to associate this operation + * @param filter the query filter to apply the replace operation + * @param replacement the replacement document + * @param options the options to apply to the replace operation + * @return the result of the replace one operation + * @throws com.mongodb.MongoWriteException if the write failed due to some specific write exception + * @throws com.mongodb.MongoWriteConcernException if the write failed due to being unable to fulfil the write + * concern + * @throws com.mongodb.MongoCommandException if the write failed due to a specific command exception + * @throws com.mongodb.MongoException if the write failed due some other failure + * @see [Modify Documents](https://www.mongodb.com/docs/manual/tutorial/modify-documents/#replace-the-document/) + * @see [Update Command Behaviors](https://www.mongodb.com/docs/manual/reference/command/update/) + */ + public fun replaceOne( + clientSession: ClientSession, + filter: Bson, + replacement: T, + options: ReplaceOptions = ReplaceOptions() + ): UpdateResult = wrapped.replaceOne(clientSession.wrapped, filter, replacement, options) + + /** + * Removes at most one document from the collection that matches the given filter. + * + * If no documents match, the collection is not modified. + * + * Note: Supports retryable writes on MongoDB server versions 3.6 or higher when the retryWrites setting is enabled. + * + * @param filter the query filter to apply the delete operation + * @param options the options to apply to the delete operation + * @return the result of the remove one operation + * @throws com.mongodb.MongoWriteException if the write failed due to some specific write exception + * @throws com.mongodb.MongoWriteConcernException if the write failed due to being unable to fulfil the write + * concern + * @throws com.mongodb.MongoCommandException if the write failed due to a specific command exception + * @throws com.mongodb.MongoException if the write failed due some other failure + */ + public fun deleteOne(filter: Bson, options: DeleteOptions = DeleteOptions()): DeleteResult = + wrapped.deleteOne(filter, options) + + /** + * Removes at most one document from the collection that matches the given filter. + * + * If no documents match, the collection is not modified. + * + * Note: Supports retryable writes on MongoDB server versions 3.6 or higher when the retryWrites setting is enabled. + * + * @param clientSession the client session with which to associate this operation + * @param filter the query filter to apply the delete operation + * @param options the options to apply to the delete operation + * @return the result of the remove one operation + * @throws com.mongodb.MongoWriteException if the write failed due to some specific write exception + * @throws com.mongodb.MongoWriteConcernException if the write failed due to being unable to fulfil the write + * concern + * @throws com.mongodb.MongoCommandException if the write failed due to a specific command exception + * @throws com.mongodb.MongoException if the write failed due some other failure + */ + public fun deleteOne( + clientSession: ClientSession, + filter: Bson, + options: DeleteOptions = DeleteOptions() + ): DeleteResult = wrapped.deleteOne(clientSession.wrapped, filter, options) + + /** + * Removes all documents from the collection that match the given query filter. + * + * If no documents match, the collection is not modified. + * + * @param filter the query filter to apply the delete operation + * @param options the options to apply to the delete operation + * @return the result of the remove many operation + * @throws com.mongodb.MongoWriteException if the write failed due to some specific write exception + * @throws com.mongodb.MongoWriteConcernException if the write failed due to being unable to fulfil the write + * concern + * @throws com.mongodb.MongoCommandException if the write failed due to a specific command exception + * @throws com.mongodb.MongoException if the write failed due some other failure + */ + public fun deleteMany(filter: Bson, options: DeleteOptions = DeleteOptions()): DeleteResult = + wrapped.deleteMany(filter, options) + + /** + * Removes all documents from the collection that match the given query filter. + * + * If no documents match, the collection is not modified. + * + * @param clientSession the client session with which to associate this operation + * @param filter the query filter to apply the delete operation + * @param options the options to apply to the delete operation + * @return the result of the remove many operation + * @throws com.mongodb.MongoWriteException if the write failed due to some specific write exception + * @throws com.mongodb.MongoWriteConcernException if the write failed due to being unable to fulfil the write + * concern + * @throws com.mongodb.MongoCommandException if the write failed due to a specific command exception + * @throws com.mongodb.MongoException if the write failed due some other failure + */ + public fun deleteMany( + clientSession: ClientSession, + filter: Bson, + options: DeleteOptions = DeleteOptions() + ): DeleteResult = wrapped.deleteMany(clientSession.wrapped, filter, options) + + /** + * Executes a mix of inserts, updates, replaces, and deletes. + * + * Note: Supports retryable writes on MongoDB server versions 3.6 or higher when the retryWrites setting is enabled. + * The eligibility for retryable write support for bulk operations is determined on the whole bulk write. If the + * `requests` contain any `UpdateManyModels` or `DeleteManyModels` then the bulk operation will not support + * retryable writes. + * + * @param requests the writes to execute + * @param options the options to apply to the bulk write operation + * @return the result of the bulk write + * @throws com.mongodb.MongoBulkWriteException if there's an exception in the bulk write operation + * @throws com.mongodb.MongoException if there's an exception running the operation + */ + public fun bulkWrite( + requests: List>, + options: BulkWriteOptions = BulkWriteOptions() + ): BulkWriteResult = wrapped.bulkWrite(requests, options) + + /** + * Executes a mix of inserts, updates, replaces, and deletes. + * + * Note: Supports retryable writes on MongoDB server versions 3.6 or higher when the retryWrites setting is enabled. + * The eligibility for retryable write support for bulk operations is determined on the whole bulk write. If the + * `requests` contain any `UpdateManyModels` or `DeleteManyModels` then the bulk operation will not support + * retryable writes. + * + * @param clientSession the client session with which to associate this operation + * @param requests the writes to execute + * @param options the options to apply to the bulk write operation + * @return the result of the bulk write + * @throws com.mongodb.MongoBulkWriteException if there's an exception in the bulk write operation + * @throws com.mongodb.MongoException if there's an exception running the operation + */ + public fun bulkWrite( + clientSession: ClientSession, + requests: List>, + options: BulkWriteOptions = BulkWriteOptions() + ): BulkWriteResult = wrapped.bulkWrite(clientSession.wrapped, requests, options) + + /** + * Atomically find a document and remove it. + * + * Note: Supports retryable writes on MongoDB server versions 3.6 or higher when the retryWrites setting is enabled. + * + * @param filter the query filter to find the document with + * @param options the options to apply to the operation + * @return the document that was removed. If no documents matched the query filter, then null will be returned + */ + public fun findOneAndDelete(filter: Bson, options: FindOneAndDeleteOptions = FindOneAndDeleteOptions()): T? = + wrapped.findOneAndDelete(filter, options) + + /** + * Atomically find a document and remove it. + * + * Note: Supports retryable writes on MongoDB server versions 3.6 or higher when the retryWrites setting is enabled. + * + * @param clientSession the client session with which to associate this operation + * @param filter the query filter to find the document with + * @param options the options to apply to the operation + * @return the document that was removed. If no documents matched the query filter, then null will be returned + */ + public fun findOneAndDelete( + clientSession: ClientSession, + filter: Bson, + options: FindOneAndDeleteOptions = FindOneAndDeleteOptions() + ): T? = wrapped.findOneAndDelete(clientSession.wrapped, filter, options) + + /** + * Atomically find a document and update it. + * + * Use this method to only update the corresponding fields in the document according to the update operators used in + * the update document. To replace the entire document with a new document, use the corresponding + * [findOneAndReplace] method. + * + * Note: Supports retryable writes on MongoDB server versions 3.6 or higher when the retryWrites setting is enabled. + * + * @param filter a document describing the query filter, which may not be null. + * @param update a document describing the update, which may not be null. The update to apply must include at least + * one update operator. + * @param options the options to apply to the operation + * @return the document that was updated. Depending on the value of the `returnOriginal` property, this will either + * be the document as it was before the update or as it is after the update. If no documents matched the query + * filter, then null will be returned + * @see [Update Command Behaviors](https://www.mongodb.com/docs/manual/reference/command/update/) + * @see com.mongodb.client.MongoCollection.findOneAndReplace + */ + public fun findOneAndUpdate( + filter: Bson, + update: Bson, + options: FindOneAndUpdateOptions = FindOneAndUpdateOptions() + ): T? = wrapped.findOneAndUpdate(filter, update, options) + + /** + * Atomically find a document and update it. + * + * Use this method to only update the corresponding fields in the document according to the update operators used in + * the update document. To replace the entire document with a new document, use the corresponding + * [findOneAndReplace] method. + * + * Note: Supports retryable writes on MongoDB server versions 3.6 or higher when the retryWrites setting is enabled. + * + * @param clientSession the client session with which to associate this operation + * @param filter a document describing the query filter, which may not be null. + * @param update a document describing the update, which may not be null. The update to apply must include at least + * one update operator. + * @param options the options to apply to the operation + * @return the document that was updated. Depending on the value of the `returnOriginal` property, this will either + * be the document as it was before the update or as it is after the update. If no documents matched the query + * filter, then null will be returned + * @see [Update Command Behaviors](https://www.mongodb.com/docs/manual/reference/command/update/) + * @see com.mongodb.client.MongoCollection.findOneAndReplace + */ + public fun findOneAndUpdate( + clientSession: ClientSession, + filter: Bson, + update: Bson, + options: FindOneAndUpdateOptions = FindOneAndUpdateOptions() + ): T? = wrapped.findOneAndUpdate(clientSession.wrapped, filter, update, options) + + /** + * Atomically find a document and update it. + * + * Note: Supports retryable writes on MongoDB server versions 3.6 or higher when the retryWrites setting is enabled. + * + * @param filter a document describing the query filter, which may not be null. + * @param update a pipeline describing the update, which may not be null. + * @param options the options to apply to the operation + * @return the document that was updated. Depending on the value of the `returnOriginal` property, this will either + * be the document as it was before the update or as it is after the update. If no documents matched the query + * filter, then null will be returned + */ + public fun findOneAndUpdate( + filter: Bson, + update: List, + options: FindOneAndUpdateOptions = FindOneAndUpdateOptions() + ): T? = wrapped.findOneAndUpdate(filter, update, options) + + /** + * Atomically find a document and update it. + * + * Note: Supports retryable writes on MongoDB server versions 3.6 or higher when the retryWrites setting is enabled. + * + * @param clientSession the client session with which to associate this operation + * @param filter a document describing the query filter, which may not be null. + * @param update a pipeline describing the update, which may not be null. + * @param options the options to apply to the operation + * @return the document that was updated. Depending on the value of the `returnOriginal` property, this will either + * be the document as it was before the update or as it is after the update. If no documents matched the query + * filter, then null will be returned + */ + public fun findOneAndUpdate( + clientSession: ClientSession, + filter: Bson, + update: List, + options: FindOneAndUpdateOptions = FindOneAndUpdateOptions() + ): T? = wrapped.findOneAndUpdate(clientSession.wrapped, filter, update, options) + + /** + * Atomically find a document and replace it. + * + * Use this method to replace a document using the specified replacement argument. To update the document with + * update operators, use the corresponding [findOneAndUpdate] method. + * + * Note: Supports retryable writes on MongoDB server versions 3.6 or higher when the retryWrites setting is enabled. + * + * @param filter the query filter to apply the replace operation + * @param replacement the replacement document + * @param options the options to apply to the operation + * @return the document that was replaced. Depending on the value of the `returnOriginal` property, this will either + * be the document as it was before the update or as it is after the update. If no documents matched the query + * filter, then null will be returned + * @see [Update Command Behaviors](https://www.mongodb.com/docs/manual/reference/command/update/) + */ + public fun findOneAndReplace( + filter: Bson, + replacement: T, + options: FindOneAndReplaceOptions = FindOneAndReplaceOptions() + ): T? = wrapped.findOneAndReplace(filter, replacement, options) + + /** + * Atomically find a document and replace it. + * + * Use this method to replace a document using the specified replacement argument. To update the document with + * update operators, use the corresponding [findOneAndUpdate] method. + * + * Note: Supports retryable writes on MongoDB server versions 3.6 or higher when the retryWrites setting is enabled. + * + * @param clientSession the client session with which to associate this operation + * @param filter the query filter to apply the replace operation + * @param replacement the replacement document + * @param options the options to apply to the operation + * @return the document that was replaced. Depending on the value of the `returnOriginal` property, this will either + * be the document as it was before the update or as it is after the update. If no documents matched the query + * filter, then null will be returned + * @see [Update Command Behaviors](https://www.mongodb.com/docs/manual/reference/command/update/) + */ + public fun findOneAndReplace( + clientSession: ClientSession, + filter: Bson, + replacement: T, + options: FindOneAndReplaceOptions = FindOneAndReplaceOptions() + ): T? = wrapped.findOneAndReplace(clientSession.wrapped, filter, replacement, options) + + /** + * Drops this collection from the Database. + * + * @param options various options for dropping the collection + * @see [Drop Collection](https://www.mongodb.com/docs/manual/reference/command/drop/) + */ + public fun drop(options: DropCollectionOptions = DropCollectionOptions()): Unit = wrapped.drop(options) + + /** + * Drops this collection from the Database. + * + * @param clientSession the client session with which to associate this operation + * @param options various options for dropping the collection + * @see [Drop Collection](https://www.mongodb.com/docs/manual/reference/command/drop/) + */ + public fun drop(clientSession: ClientSession, options: DropCollectionOptions = DropCollectionOptions()): Unit = + wrapped.drop(clientSession.wrapped, options) + + /** + * Create an Atlas Search index for the collection. + * + * @param indexName the name of the search index to create. + * @param definition the search index mapping definition. + * @return the search index name. + * @see [Create search indexes](https://www.mongodb.com/docs/manual/reference/command/createSearchIndexes/) + */ + public fun createSearchIndex(indexName: String, definition: Bson): String = + wrapped.createSearchIndex(indexName, definition) + + /** + * Create an Atlas Search index with `default` name for the collection. + * + * @param definition the search index mapping definition. + * @return the search index name. + * @see [Create search indexes](https://www.mongodb.com/docs/manual/reference/command/createSearchIndexes/) + */ + public fun createSearchIndex(definition: Bson): String = wrapped.createSearchIndex(definition) + + /** + * Create one or more Atlas Search indexes for the collection. + * + *

+ * The name can be omitted for a single index, in which case a name will be `default`.

+ * + * @param searchIndexModels the search index models. + * @return the search index names in the order specified by the given list of [SearchIndexModel]s. + * @see [Create search indexes](https://www.mongodb.com/docs/manual/reference/command/createSearchIndexes/) + */ + public fun createSearchIndexes(searchIndexModels: List): List = + wrapped.createSearchIndexes(searchIndexModels) + + /** + * Update an Atlas Search index in the collection. + * + * @param indexName the name of the search index to update. + * @param definition the search index mapping definition. + * @see [Update search index](https://www.mongodb.com/docs/manual/reference/command/updateSearchIndex/) + */ + public fun updateSearchIndex(indexName: String, definition: Bson) { + wrapped.updateSearchIndex(indexName, definition) + } + + /** + * Drop an Atlas Search index given its name. + * + * @param indexName the name of the search index to drop. + * @see [Drop search index](https://www.mongodb.com/docs/manual/reference/command/dropSearchIndex/) + */ + public fun dropSearchIndex(indexName: String) { + wrapped.dropSearchIndex(indexName) + } + + /** + * Get all the Atlas Search indexes in this collection. + * + * @return the list search indexes iterable interface. + * @see [List search indexes](https://www.mongodb.com/docs/manual/reference/operator/aggregation/listSearchIndexes) + */ + @JvmName("listSearchIndexesAsDocument") + public fun listSearchIndexes(): ListSearchIndexesIterable = listSearchIndexes() + + /** + * Get all the Atlas Search indexes in this collection. + * + * @param R the class to decode each document into. + * @param resultClass the target document type of the iterable. + * @return the list search indexes iterable interface. + * @see [List search indexes](https://www.mongodb.com/docs/manual/reference/operator/aggregation/listSearchIndexes) + */ + public fun listSearchIndexes(resultClass: Class): ListSearchIndexesIterable = + ListSearchIndexesIterable(wrapped.listSearchIndexes(resultClass)) + + /** + * Get all the Atlas Search indexes in this collection. + * + * @param R the class to decode each document into. + * @return the list search indexes iterable interface. + * @see [List Atlas Search indexes]] + * (https://www.mongodb.com/docs/manual/reference/operator/aggregation/listSearchIndexes) + */ + public inline fun listSearchIndexes(): ListSearchIndexesIterable = + listSearchIndexes(R::class.java) + + /** + * Create an index with the given keys and options. + * + * @param keys an object describing the index key(s), which may not be null. + * @param options the options for the index + * @return the index name + * @see [Create indexes](https://www.mongodb.com/docs/manual/reference/command/createIndexes/) + */ + public fun createIndex(keys: Bson, options: IndexOptions = IndexOptions()): String = + wrapped.createIndex(keys, options) + + /** + * Create an index with the given keys and options. + * + * @param clientSession the client session with which to associate this operation + * @param keys an object describing the index key(s), which may not be null. + * @param options the options for the index + * @return the index name + * @see [Create indexes](https://www.mongodb.com/docs/manual/reference/command/createIndexes/) + */ + public fun createIndex(clientSession: ClientSession, keys: Bson, options: IndexOptions = IndexOptions()): String = + wrapped.createIndex(clientSession.wrapped, keys, options) + + /** + * Create multiple indexes. + * + * @param indexes the list of indexes + * @param options options to use when creating indexes + * @return the list of index names + * @see [Create indexes](https://www.mongodb.com/docs/manual/reference/command/createIndexes/) + */ + public fun createIndexes( + indexes: List, + options: CreateIndexOptions = CreateIndexOptions() + ): List = wrapped.createIndexes(indexes, options) + + /** + * Create multiple indexes. + * + * @param clientSession the client session with which to associate this operation + * @param indexes the list of indexes + * @param options: options to use when creating indexes + * @return the list of index names + * @see [Create indexes](https://www.mongodb.com/docs/manual/reference/command/createIndexes/) + */ + public fun createIndexes( + clientSession: ClientSession, + indexes: List, + options: CreateIndexOptions = CreateIndexOptions() + ): List = wrapped.createIndexes(clientSession.wrapped, indexes, options) + + /** + * Get all the indexes in this collection. + * + * @return the list indexes iterable interface + * @see [List indexes](https://www.mongodb.com/docs/manual/reference/command/listIndexes/) + */ + @JvmName("listIndexesAsDocument") public fun listIndexes(): ListIndexesIterable = listIndexes() + + /** + * Get all the indexes in this collection. + * + * @param clientSession the client session with which to associate this operation + * @return the list indexes iterable interface + * @see [List indexes](https://www.mongodb.com/docs/manual/reference/command/listIndexes/) + */ + @JvmName("listIndexesAsDocumentWithSession") + public fun listIndexes(clientSession: ClientSession): ListIndexesIterable = + listIndexes(clientSession) + + /** + * Get all the indexes in this collection. + * + * @param R the class to decode each document into + * @param resultClass the target document type of the iterable. + * @return the list indexes iterable interface + * @see [List indexes](https://www.mongodb.com/docs/manual/reference/command/listIndexes/) + */ + public fun listIndexes(resultClass: Class): ListIndexesIterable = + ListIndexesIterable(wrapped.listIndexes(resultClass)) + + /** + * Get all the indexes in this collection. + * + * @param R the class to decode each document into + * @param clientSession the client session with which to associate this operation + * @param resultClass the target document type of the iterable. + * @return the list indexes iterable interface + * @see [List indexes](https://www.mongodb.com/docs/manual/reference/command/listIndexes/) + */ + public fun listIndexes(clientSession: ClientSession, resultClass: Class): ListIndexesIterable = + ListIndexesIterable(wrapped.listIndexes(clientSession.wrapped, resultClass)) + + /** + * Get all the indexes in this collection. + * + * @param R the class to decode each document into + * @return the list indexes iterable interface + * @see [List indexes](https://www.mongodb.com/docs/manual/reference/command/listIndexes/) + */ + public inline fun listIndexes(): ListIndexesIterable = listIndexes(R::class.java) + + /** + * Get all the indexes in this collection. + * + * @param R the class to decode each document into + * @param clientSession the client session with which to associate this operation + * @return the list indexes iterable interface + * @see [List indexes](https://www.mongodb.com/docs/manual/reference/command/listIndexes/) + */ + public inline fun listIndexes(clientSession: ClientSession): ListIndexesIterable = + listIndexes(clientSession, R::class.java) + + /** + * Drops the index given its name. + * + * @param indexName the name of the index to remove + * @param options the options to use when dropping indexes + * @see [Drop indexes](https://www.mongodb.com/docs/manual/reference/command/dropIndexes/) + */ + public fun dropIndex(indexName: String, options: DropIndexOptions = DropIndexOptions()): Unit = + wrapped.dropIndex(indexName, options) + + /** + * Drops the index given the keys used to create it. + * + * @param keys the keys of the index to remove + * @param options the options to use when dropping indexes + * @see [Drop indexes](https://www.mongodb.com/docs/manual/reference/command/dropIndexes/) + */ + public fun dropIndex(keys: Bson, options: DropIndexOptions = DropIndexOptions()): Unit = + wrapped.dropIndex(keys, options) + + /** + * Drops the index given its name. + * + * @param clientSession the client session with which to associate this operation + * @param indexName the name of the index to remove + * @param options the options to use when dropping indexes + * @see [Drop indexes](https://www.mongodb.com/docs/manual/reference/command/dropIndexes/) + */ + public fun dropIndex( + clientSession: ClientSession, + indexName: String, + options: DropIndexOptions = DropIndexOptions() + ): Unit = wrapped.dropIndex(clientSession.wrapped, indexName, options) + + /** + * Drops the index given the keys used to create it. + * + * @param clientSession the client session with which to associate this operation + * @param keys the keys of the index to remove + * @param options the options to use when dropping indexes + * @see [Drop indexes](https://www.mongodb.com/docs/manual/reference/command/dropIndexes/) + */ + public fun dropIndex( + clientSession: ClientSession, + keys: Bson, + options: DropIndexOptions = DropIndexOptions() + ): Unit = wrapped.dropIndex(clientSession.wrapped, keys, options) + + /** + * Drop all the indexes on this collection, except for the default on `_id`. + * + * @param options the options to use when dropping indexes + * @see [Drop indexes](https://www.mongodb.com/docs/manual/reference/command/dropIndexes/) + */ + public fun dropIndexes(options: DropIndexOptions = DropIndexOptions()): Unit = wrapped.dropIndexes(options) + + /** + * Drop all the indexes on this collection, except for the default on `_id`. + * + * @param clientSession the client session with which to associate this operation + * @param options the options to use when dropping indexes + * @see [Drop indexes](https://www.mongodb.com/docs/manual/reference/command/dropIndexes/) + */ + public fun dropIndexes(clientSession: ClientSession, options: DropIndexOptions = DropIndexOptions()): Unit = + wrapped.dropIndexes(clientSession.wrapped, options) + + /** + * Rename the collection with oldCollectionName to the newCollectionName. + * + * @param newCollectionNamespace the name the collection will be renamed to + * @param options the options for renaming a collection + * @throws com.mongodb.MongoServerException if you provide a newCollectionName that is the name of an existing + * collection and dropTarget is false, or if the oldCollectionName is the name of a collection that doesn't exist + * @see [Rename collection](https://www.mongodb.com/docs/manual/reference/command/renameCollection/) + */ + public fun renameCollection( + newCollectionNamespace: MongoNamespace, + options: RenameCollectionOptions = RenameCollectionOptions() + ): Unit = wrapped.renameCollection(newCollectionNamespace, options) + + /** + * Rename the collection with oldCollectionName to the newCollectionName. + * + * @param clientSession the client session with which to associate this operation + * @param newCollectionNamespace the name the collection will be renamed to + * @param options the options for renaming a collection + * @throws com.mongodb.MongoServerException if you provide a newCollectionName that is the name of an existing + * collection and dropTarget is false, or if the oldCollectionName is the name of a collection that doesn't exist + * @see [Rename collection](https://www.mongodb.com/docs/manual/reference/command/renameCollection/) + */ + public fun renameCollection( + clientSession: ClientSession, + newCollectionNamespace: MongoNamespace, + options: RenameCollectionOptions = RenameCollectionOptions() + ): Unit = wrapped.renameCollection(clientSession.wrapped, newCollectionNamespace, options) +} + +/** + * maxTime extension function + * + * @param maxTime time in milliseconds + * @return the options + */ +public fun CreateIndexOptions.maxTime(maxTime: Long): CreateIndexOptions = + this.apply { maxTime(maxTime, TimeUnit.MILLISECONDS) } +/** + * maxTime extension function + * + * @param maxTime time in milliseconds + * @return the options + */ +public fun CountOptions.maxTime(maxTime: Long): CountOptions = this.apply { maxTime(maxTime, TimeUnit.MILLISECONDS) } +/** + * maxTime extension function + * + * @param maxTime time in milliseconds + * @return the options + */ +public fun DropIndexOptions.maxTime(maxTime: Long): DropIndexOptions = + this.apply { maxTime(maxTime, TimeUnit.MILLISECONDS) } +/** + * maxTime extension function + * + * @param maxTime time in milliseconds + * @return the options + */ +public fun EstimatedDocumentCountOptions.maxTime(maxTime: Long): EstimatedDocumentCountOptions = + this.apply { maxTime(maxTime, TimeUnit.MILLISECONDS) } +/** + * maxTime extension function + * + * @param maxTime time in milliseconds + * @return the options + */ +public fun FindOneAndDeleteOptions.maxTime(maxTime: Long): FindOneAndDeleteOptions = + this.apply { maxTime(maxTime, TimeUnit.MILLISECONDS) } +/** + * maxTime extension function + * + * @param maxTime time in milliseconds + * @return the options + */ +public fun FindOneAndReplaceOptions.maxTime(maxTime: Long): FindOneAndReplaceOptions = + this.apply { maxTime(maxTime, TimeUnit.MILLISECONDS) } +/** + * maxTime extension function + * + * @param maxTime time in milliseconds + * @return the options + */ +public fun FindOneAndUpdateOptions.maxTime(maxTime: Long): FindOneAndUpdateOptions = + this.apply { maxTime(maxTime, TimeUnit.MILLISECONDS) } +/** + * expireAfter extension function + * + * @param expireAfter time in seconds + * @return the options + */ +public fun IndexOptions.expireAfter(expireAfter: Long): IndexOptions = + this.apply { expireAfter(expireAfter, TimeUnit.SECONDS) } diff --git a/driver-kotlin-sync/src/main/kotlin/com/mongodb/kotlin/client/MongoCursor.kt b/driver-kotlin-sync/src/main/kotlin/com/mongodb/kotlin/client/MongoCursor.kt new file mode 100644 index 00000000000..714e82fa78e --- /dev/null +++ b/driver-kotlin-sync/src/main/kotlin/com/mongodb/kotlin/client/MongoCursor.kt @@ -0,0 +1,143 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.kotlin.client + +import com.mongodb.ServerAddress +import com.mongodb.ServerCursor +import com.mongodb.client.MongoChangeStreamCursor as JMongoChangeStreamCursor +import com.mongodb.client.MongoCursor as JMongoCursor +import java.io.Closeable +import org.bson.BsonDocument + +/** + * The Mongo Cursor interface implementing the iterator protocol. + * + * An application should ensure that a cursor is closed in all circumstances, e.g. using a `use` statement: + * ``` + * collection.find().cursor().use { c -> + * while (c.hasNext()) { + * println(c.next()) + * } + * } + * ``` + * + * @param T The type of documents the cursor contains + */ +public sealed interface MongoCursor : Iterator, Closeable { + + /** + * Gets the number of results available locally without blocking, which may be 0. + * + * If the cursor is known to be exhausted, returns 0. If the cursor is closed before it's been exhausted, it may + * return a non-zero value. + */ + public val available: Int + + /** + * A special [next] case that returns the next element in the iteration if available or null. + * + * Tailable cursors are an example where this is useful. A call to [tryNext] may return null, ut in the future + * calling [tryNext] would return a new element if a document had been added to the capped collection.

+ * + * @return the next element in the iteration if available or null. + * @see [Tailable Cursor](https://www.mongodb.com/docs/manual/reference/glossary/#term-tailable-cursor) + */ + public fun tryNext(): T? + + /** @return the ServerCursor if available */ + public val serverCursor: ServerCursor? + + /** @return the ServerAddress */ + public val serverAddress: ServerAddress +} + +/** + * The Mongo Cursor interface for change streams implementing the iterator protocol. + * + * An application should ensure that a cursor is closed in all circumstances, e.g. using a `use` statement: + * ``` + * collection.watch().cursor().use { c -> + * while (c.hasNext()) { + * println(c.next()) + * } + * } + * ``` + * + * A [com.mongodb.MongoOperationTimeoutException] does not invalidate the [MongoChangeStreamCursor], but is immediately + * propagated to the caller. Subsequent method calls will attempt to resume operation by establishing a new change + * stream on the server, without performing a `getMore` request first. + * + * If a [com.mongodb.MongoOperationTimeoutException] occurs before any events are received, it indicates that the server + * has timed out before it could finish processing the existing oplog. In such cases, it is recommended to close the + * current stream and recreate it with a higher timeout setting. + * + * @param T The type of documents the cursor contains + */ +public sealed interface MongoChangeStreamCursor : MongoCursor { + /** + * Returns the resume token. If a batch has been iterated to the last change stream document in the batch and a + * postBatchResumeToken is included in the document, the postBatchResumeToken will be returned. Otherwise, the + * resume token contained in the last change stream document will be returned. + * + * @return the resume token, which can be null if the cursor has either not been iterated yet, or the cursor is + * closed. + */ + public val resumeToken: BsonDocument? +} + +internal class MongoCursorImpl(private val wrapped: JMongoCursor) : MongoCursor { + + override fun hasNext(): Boolean = wrapped.hasNext() + + override fun next(): T = wrapped.next() + + override fun close(): Unit = wrapped.close() + + override val available: Int + get() = wrapped.available() + + override fun tryNext(): T? = wrapped.tryNext() + + override val serverCursor: ServerCursor? + get() = wrapped.serverCursor + + override val serverAddress: ServerAddress + get() = wrapped.serverAddress +} + +internal class MongoChangeStreamCursorImpl(private val wrapped: JMongoChangeStreamCursor) : + MongoChangeStreamCursor { + + override fun hasNext(): Boolean = wrapped.hasNext() + + override fun next(): T = wrapped.next() + + override fun close(): Unit = wrapped.close() + + override val available: Int + get() = wrapped.available() + + override fun tryNext(): T? = wrapped.tryNext() + + override val serverCursor: ServerCursor? + get() = wrapped.serverCursor + + override val serverAddress: ServerAddress + get() = wrapped.serverAddress + + override val resumeToken: BsonDocument? + get() = wrapped.resumeToken +} diff --git a/driver-kotlin-sync/src/main/kotlin/com/mongodb/kotlin/client/MongoDatabase.kt b/driver-kotlin-sync/src/main/kotlin/com/mongodb/kotlin/client/MongoDatabase.kt new file mode 100644 index 00000000000..d59ba628008 --- /dev/null +++ b/driver-kotlin-sync/src/main/kotlin/com/mongodb/kotlin/client/MongoDatabase.kt @@ -0,0 +1,592 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.kotlin.client + +import com.mongodb.ReadConcern +import com.mongodb.ReadPreference +import com.mongodb.WriteConcern +import com.mongodb.annotations.Alpha +import com.mongodb.annotations.Reason +import com.mongodb.client.MongoDatabase as JMongoDatabase +import com.mongodb.client.model.CreateCollectionOptions +import com.mongodb.client.model.CreateViewOptions +import java.util.concurrent.TimeUnit +import org.bson.Document +import org.bson.codecs.configuration.CodecRegistry +import org.bson.conversions.Bson + +/** The MongoDatabase representation. */ +public class MongoDatabase(private val wrapped: JMongoDatabase) { + + /** The name of the database. */ + public val name: String + get() = wrapped.name + + /** The codec registry for the database. */ + public val codecRegistry: CodecRegistry + get() = wrapped.codecRegistry + + /** The read preference for the database. */ + public val readPreference: ReadPreference + get() = wrapped.readPreference + + /** + * The read concern for the database. + * + * @see [Read Concern](https://www.mongodb.com/docs/manual/reference/readConcern/) + */ + public val readConcern: ReadConcern + get() = wrapped.readConcern + + /** The write concern for the database. */ + public val writeConcern: WriteConcern + get() = wrapped.writeConcern + + /** + * The time limit for the full execution of an operation. + * + * If not null the following deprecated options will be ignored: `waitQueueTimeoutMS`, `socketTimeoutMS`, + * `wTimeoutMS`, `maxTimeMS` and `maxCommitTimeMS`. + * - `null` means that the timeout mechanism for operations will defer to using: + * - `waitQueueTimeoutMS`: The maximum wait time in milliseconds that a thread may wait for a connection to + * become available + * - `socketTimeoutMS`: How long a send or receive on a socket can take before timing out. + * - `wTimeoutMS`: How long the server will wait for the write concern to be fulfilled before timing out. + * - `maxTimeMS`: The time limit for processing operations on a cursor. See: + * [cursor.maxTimeMS](https://docs.mongodb.com/manual/reference/method/cursor.maxTimeMS"). + * - `maxCommitTimeMS`: The maximum amount of time to allow a single `commitTransaction` command to execute. + * - `0` means infinite timeout. + * - `> 0` The time limit to use for the full execution of an operation. + * + * @return the optional timeout duration + * @since 5.2 + */ + @Alpha(Reason.CLIENT) + public fun timeout(timeUnit: TimeUnit = TimeUnit.MILLISECONDS): Long? = wrapped.getTimeout(timeUnit) + + /** + * Create a new MongoDatabase instance with a different codec registry. + * + * The [CodecRegistry] configured by this method is effectively treated by the driver as an instance of + * [org.bson.codecs.configuration.CodecProvider], which [CodecRegistry] extends. So there is no benefit to defining + * a class that implements [CodecRegistry]. Rather, an application should always create [CodecRegistry] instances + * using the factory methods in [org.bson.codecs.configuration.CodecRegistries]. + * + * @param newCodecRegistry the new [org.bson.codecs.configuration.CodecRegistry] for the database + * @return a new MongoDatabase instance with the different codec registry + * @see org.bson.codecs.configuration.CodecRegistries + */ + public fun withCodecRegistry(newCodecRegistry: CodecRegistry): MongoDatabase = + MongoDatabase(wrapped.withCodecRegistry(newCodecRegistry)) + + /** + * Create a new MongoDatabase instance with a different read preference. + * + * @param newReadPreference the new [ReadPreference] for the database + * @return a new MongoDatabase instance with the different readPreference + */ + public fun withReadPreference(newReadPreference: ReadPreference): MongoDatabase = + MongoDatabase(wrapped.withReadPreference(newReadPreference)) + + /** + * Create a new MongoDatabase instance with a different read concern. + * + * @param newReadConcern the new [ReadConcern] for the database + * @return a new MongoDatabase instance with the different ReadConcern + * @see [Read Concern](https://www.mongodb.com/docs/manual/reference/readConcern/) + */ + public fun withReadConcern(newReadConcern: ReadConcern): MongoDatabase = + MongoDatabase(wrapped.withReadConcern(newReadConcern)) + + /** + * Create a new MongoDatabase instance with a different write concern. + * + * @param newWriteConcern the new [WriteConcern] for the database + * @return a new MongoDatabase instance with the different writeConcern + */ + public fun withWriteConcern(newWriteConcern: WriteConcern): MongoDatabase = + MongoDatabase(wrapped.withWriteConcern(newWriteConcern)) + + /** + * Create a new MongoDatabase instance with the set time limit for the full execution of an operation. + * - `0` means an infinite timeout + * - `> 0` The time limit to use for the full execution of an operation. + * + * @param timeout the timeout, which must be greater than or equal to 0 + * @param timeUnit the time unit, defaults to Milliseconds + * @return a new MongoDatabase instance with the set time limit for operations + * @see [MongoDatabase.timeout] + * @since 5.2 + */ + @Alpha(Reason.CLIENT) + public fun withTimeout(timeout: Long, timeUnit: TimeUnit = TimeUnit.MILLISECONDS): MongoDatabase = + MongoDatabase(wrapped.withTimeout(timeout, timeUnit)) + + /** + * Gets a collection. + * + * @param T the default class to covert documents returned from the collection into. + * @param collectionName the name of the collection to return + * @param resultClass the target document type for the collection + * @return the collection + */ + public fun getCollection(collectionName: String, resultClass: Class): MongoCollection = + MongoCollection(wrapped.getCollection(collectionName, resultClass)) + + /** + * Gets a collection. + * + * @param T the default class to covert documents returned from the collection into. + * @param collectionName the name of the collection to return + * @return the collection + */ + public inline fun getCollection(collectionName: String): MongoCollection = + getCollection(collectionName, T::class.java) + + /** + * Executes the given command in the context of the current database with the given read preference. + * + * Note: The behavior of `runCommand` is undefined if the provided command document includes a `maxTimeMS` field and + * the `timeoutMS` setting has been set. + * + * @param command the command to be run + * @param readPreference the [ReadPreference] to be used when executing the command, defaults to + * [MongoDatabase.readPreference] + * @return the command result + */ + public fun runCommand(command: Bson, readPreference: ReadPreference = this.readPreference): Document = + runCommand(command, readPreference) + + /** + * Executes the given command in the context of the current database with the given read preference. + * + * Note: The behavior of `runCommand` is undefined if the provided command document includes a `maxTimeMS` field and + * the `timeoutMS` setting has been set. + * + * @param clientSession the client session with which to associate this operation + * @param command the command to be run + * @param readPreference the [ReadPreference] to be used when executing the command, defaults to + * [MongoDatabase.readPreference] + * @return the command result + */ + public fun runCommand( + clientSession: ClientSession, + command: Bson, + readPreference: ReadPreference = this.readPreference + ): Document = runCommand(clientSession, command, readPreference) + + /** + * Executes the given command in the context of the current database with the given read preference. + * + * Note: The behavior of `runCommand` is undefined if the provided command document includes a `maxTimeMS` field and + * the `timeoutMS` setting has been set. + * + * @param T the class to decode each document into + * @param command the command to be run + * @param readPreference the [ReadPreference] to be used when executing the command, defaults to + * [MongoDatabase.readPreference] + * @param resultClass the target document class + * @return the command result + */ + public fun runCommand( + command: Bson, + readPreference: ReadPreference = this.readPreference, + resultClass: Class + ): T = wrapped.runCommand(command, readPreference, resultClass) + + /** + * Executes the given command in the context of the current database with the given read preference. + * + * Note: The behavior of `runCommand` is undefined if the provided command document includes a `maxTimeMS` field and + * the `timeoutMS` setting has been set. + * + * @param T the class to decode each document into + * @param clientSession the client session with which to associate this operation + * @param command the command to be run + * @param readPreference the [ReadPreference] to be used when executing the command, defaults to + * [MongoDatabase.readPreference] + * @param resultClass the target document class + * @return the command result + */ + public fun runCommand( + clientSession: ClientSession, + command: Bson, + readPreference: ReadPreference = this.readPreference, + resultClass: Class + ): T = wrapped.runCommand(clientSession.wrapped, command, readPreference, resultClass) + + /** + * Executes the given command in the context of the current database with the given read preference. + * + * Note: The behavior of `runCommand` is undefined if the provided command document includes a `maxTimeMS` field and + * the `timeoutMS` setting has been set. + * + * @param T the class to decode each document into + * @param command the command to be run + * @param readPreference the [ReadPreference] to be used when executing the command, defaults to + * [MongoDatabase.readPreference] + * @return the command result + */ + public inline fun runCommand( + command: Bson, + readPreference: ReadPreference = this.readPreference + ): T = runCommand(command, readPreference, T::class.java) + + /** + * Executes the given command in the context of the current database with the given read preference. + * + * Note: The behavior of `runCommand` is undefined if the provided command document includes a `maxTimeMS` field and + * the `timeoutMS` setting has been set. + * + * @param T the class to decode each document into + * @param clientSession the client session with which to associate this operation + * @param command the command to be run + * @param readPreference the [ReadPreference] to be used when executing the command, defaults to + * [MongoDatabase.readPreference] + * @return the command result + */ + public inline fun runCommand( + clientSession: ClientSession, + command: Bson, + readPreference: ReadPreference = this.readPreference + ): T = runCommand(clientSession, command, readPreference, T::class.java) + + /** + * Drops this database. + * + * @see [Drop database](https://www.mongodb.com/docs/manual/reference/command/dropDatabase/#dbcmd.dropDatabase) + */ + public fun drop(): Unit = wrapped.drop() + + /** + * Drops this database. + * + * @param clientSession the client session with which to associate this operation + * @see [Drop database](https://www.mongodb.com/docs/manual/reference/command/dropDatabase/#dbcmd.dropDatabase) + */ + public fun drop(clientSession: ClientSession): Unit = wrapped.drop(clientSession.wrapped) + + /** + * Gets the names of all the collections in this database. + * + * @return an iterable containing all the names of all the collections in this database + * @see [listCollections](https://www.mongodb.com/docs/manual/reference/command/listCollections) + */ + public fun listCollectionNames(): ListCollectionNamesIterable = + ListCollectionNamesIterable(wrapped.listCollectionNames()) + + /** + * Gets the names of all the collections in this database. + * + * @param clientSession the client session with which to associate this operation + * @return an iterable containing all the names of all the collections in this database + * @see [listCollections](https://www.mongodb.com/docs/manual/reference/command/listCollections) + */ + public fun listCollectionNames(clientSession: ClientSession): ListCollectionNamesIterable = + ListCollectionNamesIterable(wrapped.listCollectionNames(clientSession.wrapped)) + + /** + * Gets all the collections in this database. + * + * @return the list collections iterable interface + * @see [listCollections](https://www.mongodb.com/docs/manual/reference/command/listCollections) + */ + @JvmName("listCollectionsAsDocument") + public fun listCollections(): ListCollectionsIterable = listCollections() + + /** + * Gets all the collections in this database. + * + * @param clientSession the client session with which to associate this operation + * @return the list collections iterable interface + * @see [listCollections](https://www.mongodb.com/docs/manual/reference/command/listCollections) + */ + @JvmName("listCollectionsAsDocumentWithSession") + public fun listCollections(clientSession: ClientSession): ListCollectionsIterable = + listCollections(clientSession) + + /** + * Gets all the collections in this database. + * + * @param T the type of the class to use + * @param resultClass the target document type of the iterable. + * @return the list collections iterable interface + * @see [listCollections](https://www.mongodb.com/docs/manual/reference/command/listCollections) + */ + public fun listCollections(resultClass: Class): ListCollectionsIterable = + ListCollectionsIterable(wrapped.listCollections(resultClass)) + + /** + * Gets all the collections in this database. + * + * @param T the type of the class to use + * @param clientSession the client session with which to associate this operation + * @param resultClass the target document type of the iterable. + * @return the list collections iterable interface + * @see [listCollections](https://www.mongodb.com/docs/manual/reference/command/listCollections) + */ + public fun listCollections( + clientSession: ClientSession, + resultClass: Class + ): ListCollectionsIterable = ListCollectionsIterable(wrapped.listCollections(clientSession.wrapped, resultClass)) + + /** + * Gets all the collections in this database. + * + * @param T the type of the class to use + * @return the list collections iterable interface + * @see [listCollections](https://www.mongodb.com/docs/manual/reference/command/listCollections) + */ + public inline fun listCollections(): ListCollectionsIterable = listCollections(T::class.java) + + /** + * Gets all the collections in this database. + * + * @param clientSession the client session with which to associate this operation + * @param T the type of the class to use + * @return the list collections iterable interface + * @see [listCollections](https://www.mongodb.com/docs/manual/reference/command/listCollections) + */ + public inline fun listCollections(clientSession: ClientSession): ListCollectionsIterable = + listCollections(clientSession, T::class.java) + + /** + * Create a new collection with the selected options + * + * @param collectionName the name for the new collection to create + * @param createCollectionOptions various options for creating the collection + * @see [Create Command](https://www.mongodb.com/docs/manual/reference/command/create) + */ + public fun createCollection( + collectionName: String, + createCollectionOptions: CreateCollectionOptions = CreateCollectionOptions() + ): Unit = wrapped.createCollection(collectionName, createCollectionOptions) + + /** + * Create a new collection with the selected options + * + * @param clientSession the client session with which to associate this operation + * @param collectionName the name for the new collection to create + * @param createCollectionOptions various options for creating the collection + * @see [Create Command](https://www.mongodb.com/docs/manual/reference/command/create) + */ + public fun createCollection( + clientSession: ClientSession, + collectionName: String, + createCollectionOptions: CreateCollectionOptions = CreateCollectionOptions() + ): Unit = wrapped.createCollection(clientSession.wrapped, collectionName, createCollectionOptions) + + /** + * Creates a view with the given name, backing collection/view name, aggregation pipeline, and options that defines + * the view. + * + * @param viewName the name of the view to create + * @param viewOn the backing collection/view for the view + * @param pipeline the pipeline that defines the view + * @param createViewOptions various options for creating the view + * @see [Create Command](https://www.mongodb.com/docs/manual/reference/command/create) + */ + public fun createView( + viewName: String, + viewOn: String, + pipeline: List, + createViewOptions: CreateViewOptions = CreateViewOptions() + ): Unit = wrapped.createView(viewName, viewOn, pipeline, createViewOptions) + + /** + * Creates a view with the given name, backing collection/view name, aggregation pipeline, and options that defines + * the view. + * + * @param clientSession the client session with which to associate this operation + * @param viewName the name of the view to create + * @param viewOn the backing collection/view for the view + * @param pipeline the pipeline that defines the view + * @param createViewOptions various options for creating the view + * @see [Create Command](https://www.mongodb.com/docs/manual/reference/command/create) + */ + public fun createView( + clientSession: ClientSession, + viewName: String, + viewOn: String, + pipeline: List, + createViewOptions: CreateViewOptions = CreateViewOptions() + ): Unit = wrapped.createView(clientSession.wrapped, viewName, viewOn, pipeline, createViewOptions) + + /** + * Runs an aggregation framework pipeline on the database for pipeline stages that do not require an underlying + * collection, such as `$currentOp` and `$listLocalSessions`. + * + * @param pipeline the aggregation pipeline + * @return an iterable containing the result of the aggregation operation + * @see [Aggregate Command](https://www.mongodb.com/docs/manual/reference/command/aggregate/#dbcmd.aggregate) + */ + @JvmName("aggregateAsDocument") + public fun aggregate(pipeline: List): AggregateIterable = aggregate(pipeline) + + /** + * Runs an aggregation framework pipeline on the database for pipeline stages that do not require an underlying + * collection, such as `$currentOp` and `$listLocalSessions`. + * + * @param clientSession the client session with which to associate this operation + * @param pipeline the aggregation pipeline + * @return an iterable containing the result of the aggregation operation + * @see [Aggregate Command](https://www.mongodb.com/docs/manual/reference/command/aggregate/#dbcmd.aggregate) + */ + @JvmName("aggregateAsDocumentWithSession") + public fun aggregate(clientSession: ClientSession, pipeline: List): AggregateIterable = + aggregate(clientSession, pipeline) + + /** + * Runs an aggregation framework pipeline on the database for pipeline stages that do not require an underlying + * collection, such as `$currentOp` and `$listLocalSessions`. + * + * @param T the class to decode each document into + * @param pipeline the aggregation pipeline + * @param resultClass the target document type of the iterable. + * @return an iterable containing the result of the aggregation operation + * @see [Aggregate Command](https://www.mongodb.com/docs/manual/reference/command/aggregate/#dbcmd.aggregate) + */ + public fun aggregate(pipeline: List, resultClass: Class): AggregateIterable = + AggregateIterable(wrapped.aggregate(pipeline, resultClass)) + + /** + * Runs an aggregation framework pipeline on the database for pipeline stages that do not require an underlying + * collection, such as `$currentOp` and `$listLocalSessions`. + * + * @param T the class to decode each document into + * @param clientSession the client session with which to associate this operation + * @param pipeline the aggregation pipeline + * @param resultClass the target document type of the iterable. + * @return an iterable containing the result of the aggregation operation + * @see [Aggregate Command](https://www.mongodb.com/docs/manual/reference/command/aggregate/#dbcmd.aggregate) + */ + public fun aggregate( + clientSession: ClientSession, + pipeline: List, + resultClass: Class + ): AggregateIterable = AggregateIterable(wrapped.aggregate(clientSession.wrapped, pipeline, resultClass)) + + /** + * Runs an aggregation framework pipeline on the database for pipeline stages that do not require an underlying + * collection, such as `$currentOp` and `$listLocalSessions`. + * + * @param T the class to decode each document into + * @param pipeline the aggregation pipeline + * @return an iterable containing the result of the aggregation operation + * @see [Aggregate Command](https://www.mongodb.com/docs/manual/reference/command/aggregate/#dbcmd.aggregate) + */ + public inline fun aggregate(pipeline: List): AggregateIterable = + aggregate(pipeline, T::class.java) + + /** + * Runs an aggregation framework pipeline on the database for pipeline stages that do not require an underlying + * collection, such as `$currentOp` and `$listLocalSessions`. + * + * @param T the class to decode each document into + * @param clientSession the client session with which to associate this operation + * @param pipeline the aggregation pipeline + * @return an iterable containing the result of the aggregation operation + * @see [Aggregate Command](https://www.mongodb.com/docs/manual/reference/command/aggregate/#dbcmd.aggregate) + */ + public inline fun aggregate( + clientSession: ClientSession, + pipeline: List + ): AggregateIterable = aggregate(clientSession, pipeline, T::class.java) + + /** + * Creates a change stream for this database. + * + * @param pipeline the aggregation pipeline to apply to the change stream, defaults to an empty pipeline. + * @return the change stream iterable + * @see [Change Streams](https://dochub.mongodb.org/changestreams] + */ + @JvmName("watchAsDocument") + public fun watch(pipeline: List = emptyList()): ChangeStreamIterable = watch(pipeline) + + /** + * Creates a change stream for this database. + * + * @param clientSession the client session with which to associate this operation + * @param pipeline the aggregation pipeline to apply to the change stream, defaults to an empty pipeline. + * @return the change stream iterable + * @see [Change Streams](https://dochub.mongodb.org/changestreams] + */ + @JvmName("watchAsDocumentWithSession") + public fun watch(clientSession: ClientSession, pipeline: List = emptyList()): ChangeStreamIterable = + watch(clientSession, pipeline) + + /** + * Creates a change stream for this database. + * + * @param T the target document type of the iterable. + * @param pipeline the aggregation pipeline to apply to the change stream, defaults to an empty pipeline. + * @param resultClass the target document type of the iterable. + * @return the change stream iterable + * @see [Change Streams](https://dochub.mongodb.org/changestreams] + */ + public fun watch(pipeline: List = emptyList(), resultClass: Class): ChangeStreamIterable = + ChangeStreamIterable(wrapped.watch(pipeline, resultClass)) + + /** + * Creates a change stream for this database. + * + * @param T the target document type of the iterable. + * @param clientSession the client session with which to associate this operation + * @param pipeline the aggregation pipeline to apply to the change stream, defaults to an empty pipeline. + * @param resultClass the target document type of the iterable. + * @return the change stream iterable + * @see [Change Streams](https://dochub.mongodb.org/changestreams] + */ + public fun watch( + clientSession: ClientSession, + pipeline: List = emptyList(), + resultClass: Class + ): ChangeStreamIterable = ChangeStreamIterable(wrapped.watch(clientSession.wrapped, pipeline, resultClass)) + + /** + * Creates a change stream for this database. + * + * @param T the target document type of the iterable. + * @param pipeline the aggregation pipeline to apply to the change stream, defaults to an empty pipeline. + * @return the change stream iterable + * @see [Change Streams](https://dochub.mongodb.org/changestreams] + */ + public inline fun watch(pipeline: List = emptyList()): ChangeStreamIterable = + watch(pipeline, T::class.java) + + /** + * Creates a change stream for this database. + * + * @param T the target document type of the iterable. + * @param clientSession the client session with which to associate this operation + * @param pipeline the aggregation pipeline to apply to the change stream, defaults to an empty pipeline. + * @return the change stream iterable + * @see [Change Streams](https://dochub.mongodb.org/changestreams] + */ + public inline fun watch( + clientSession: ClientSession, + pipeline: List = emptyList() + ): ChangeStreamIterable = watch(clientSession, pipeline, T::class.java) +} + +/** + * expireAfter extension function + * + * @param maxTime time in seconds + * @return the options + */ +public fun CreateCollectionOptions.expireAfter(maxTime: Long): CreateCollectionOptions = + this.apply { expireAfter(maxTime, TimeUnit.SECONDS) } diff --git a/driver-kotlin-sync/src/main/kotlin/com/mongodb/kotlin/client/MongoIterable.kt b/driver-kotlin-sync/src/main/kotlin/com/mongodb/kotlin/client/MongoIterable.kt new file mode 100644 index 00000000000..b3c37d05d43 --- /dev/null +++ b/driver-kotlin-sync/src/main/kotlin/com/mongodb/kotlin/client/MongoIterable.kt @@ -0,0 +1,90 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.kotlin.client + +import com.mongodb.MongoClientException +import com.mongodb.client.MongoIterable as JMongoIterable + +/** + * The MongoIterable is the results from an operation, such as a query. + * + * @param T The type that this iterable will decode documents to. + */ +public open class MongoIterable(private val delegate: JMongoIterable) { + + /** + * Returns a cursor used for iterating over elements of type `T. The cursor is primarily used for change streams. + * + * Note: Care must be taken to ensure the returned [MongoCursor] is closed after use. + * + * @return a cursor + */ + public open fun cursor(): MongoCursor = MongoCursorImpl(delegate.cursor()) + + /** @return the first item or null */ + public fun firstOrNull(): T? = delegate.first() + + /** @return the first item or throw a [MongoClientException] if no results are available */ + public fun first(): T = firstOrNull() ?: throw MongoClientException("No results available") + + /** + * Sets the number of documents to return per batch. + * + * @param batchSize the batch size + * @return this + * @see [Batch Size](https://www.mongodb.com/docs/manual/reference/method/cursor.batchSize/#cursor.batchSize) + */ + public open fun batchSize(batchSize: Int): MongoIterable = apply { delegate.batchSize(batchSize) } + + /** + * Creates a new cursor and treats it as a [Sequence], invokes the given [consumer] function and closes the cursor + * down correctly whether an exception is thrown or not. + * + * This allows the [MongoIterable] to be safely treated as a lazily evaluated sequence. + * + * Note: Sequence filters and aggregations have a performance cost, it is best to use server side filters and + * aggregations where available. + * + * @param R the result type + * @param consumer the sequence consumer + * @return the result of the consumer + */ + public fun use(consumer: (Sequence) -> R): R = cursor().use { consumer.invoke(it.asSequence()) } + + /** + * Maps this iterable from the source document type to the target document type. + * + * @param R the result document type + * @param transform a function that maps from the source to the target document type + * @return an iterable which maps T to U + */ + public fun map(transform: (T) -> R): MongoIterable = MongoIterable(delegate.map(transform)) + + /** Performs the given [action] on each element and safely closes the cursor. */ + public fun forEach(action: (T) -> Unit): Unit = use { it.forEach(action) } + + /** + * Appends all elements to the given [destination] collection. + * + * @param C the type of the collection + * @param destination the destination collection + * @return the collection + */ + public fun > toCollection(destination: C): C = use { it.toCollection(destination) } + + /** @return a [List] containing all elements. */ + public fun toList(): List = toCollection(ArrayList()).toList() +} diff --git a/driver-kotlin-sync/src/test/kotlin/com/mongodb/kotlin/client/AggregateIterableTest.kt b/driver-kotlin-sync/src/test/kotlin/com/mongodb/kotlin/client/AggregateIterableTest.kt new file mode 100644 index 00000000000..83ca9bce0ad --- /dev/null +++ b/driver-kotlin-sync/src/test/kotlin/com/mongodb/kotlin/client/AggregateIterableTest.kt @@ -0,0 +1,109 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.kotlin.client + +import com.mongodb.ExplainVerbosity +import com.mongodb.client.AggregateIterable as JAggregateIterable +import com.mongodb.client.cursor.TimeoutMode +import com.mongodb.client.model.Collation +import java.util.concurrent.TimeUnit +import kotlin.reflect.full.declaredFunctions +import kotlin.test.assertEquals +import org.bson.BsonDocument +import org.bson.BsonString +import org.bson.Document +import org.junit.jupiter.api.Test +import org.mockito.kotlin.doReturn +import org.mockito.kotlin.mock +import org.mockito.kotlin.times +import org.mockito.kotlin.verify +import org.mockito.kotlin.verifyNoMoreInteractions +import org.mockito.kotlin.whenever + +class AggregateIterableTest { + + @Test + fun shouldHaveTheSameMethods() { + val jAggregateIterableFunctions = + JAggregateIterable::class.declaredFunctions.map { it.name }.toSet() - "iterator" + val kAggregateIterableFunctions = AggregateIterable::class.declaredFunctions.map { it.name }.toSet() + + assertEquals(jAggregateIterableFunctions, kAggregateIterableFunctions) + } + + @Test + fun shouldCallTheUnderlyingMethods() { + val wrapped: JAggregateIterable = mock() + val iterable = AggregateIterable(wrapped) + + val batchSize = 10 + val bson = BsonDocument() + val bsonComment = BsonString("a comment") + val collation = Collation.builder().locale("en").build() + val comment = "comment" + val hint = Document("h", 1) + val hintString = "hintString" + val verbosity = ExplainVerbosity.QUERY_PLANNER + + whenever(wrapped.explain(Document::class.java)).doReturn(mock()) + whenever(wrapped.explain(Document::class.java, verbosity)).doReturn(mock()) + whenever(wrapped.explain(BsonDocument::class.java, verbosity)).doReturn(mock()) + + iterable.allowDiskUse(true) + iterable.batchSize(batchSize) + iterable.bypassDocumentValidation(true) + iterable.collation(collation) + iterable.comment(bsonComment) + iterable.comment(comment) + iterable.explain() + iterable.explain(verbosity) + iterable.explain(Document::class.java) + iterable.explain(BsonDocument::class.java, verbosity) + iterable.explain() + iterable.explain(verbosity) + iterable.hint(hint) + iterable.hintString(hintString) + iterable.let(bson) + iterable.maxAwaitTime(1) + iterable.maxAwaitTime(1, TimeUnit.SECONDS) + iterable.maxTime(1) + iterable.maxTime(1, TimeUnit.SECONDS) + iterable.timeoutMode(TimeoutMode.ITERATION) + + verify(wrapped).allowDiskUse(true) + verify(wrapped).batchSize(batchSize) + verify(wrapped).bypassDocumentValidation(true) + verify(wrapped).collation(collation) + verify(wrapped).comment(bsonComment) + verify(wrapped).comment(comment) + verify(wrapped, times(3)).explain(Document::class.java) + verify(wrapped, times(1)).explain(Document::class.java, verbosity) + verify(wrapped, times(2)).explain(BsonDocument::class.java, verbosity) + verify(wrapped).hint(hint) + verify(wrapped).hintString(hintString) + verify(wrapped).maxAwaitTime(1, TimeUnit.MILLISECONDS) + verify(wrapped).maxAwaitTime(1, TimeUnit.SECONDS) + verify(wrapped).maxTime(1, TimeUnit.MILLISECONDS) + verify(wrapped).maxTime(1, TimeUnit.SECONDS) + verify(wrapped).let(bson) + verify(wrapped).timeoutMode(TimeoutMode.ITERATION) + + iterable.toCollection() + verify(wrapped).toCollection() + + verifyNoMoreInteractions(wrapped) + } +} diff --git a/driver-kotlin-sync/src/test/kotlin/com/mongodb/kotlin/client/ChangeStreamIterableTest.kt b/driver-kotlin-sync/src/test/kotlin/com/mongodb/kotlin/client/ChangeStreamIterableTest.kt new file mode 100644 index 00000000000..53d56485c21 --- /dev/null +++ b/driver-kotlin-sync/src/test/kotlin/com/mongodb/kotlin/client/ChangeStreamIterableTest.kt @@ -0,0 +1,93 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.kotlin.client + +import com.mongodb.client.ChangeStreamIterable as JChangeStreamIterable +import com.mongodb.client.model.Collation +import com.mongodb.client.model.changestream.FullDocument +import com.mongodb.client.model.changestream.FullDocumentBeforeChange +import java.util.concurrent.TimeUnit +import kotlin.reflect.full.declaredFunctions +import kotlin.test.assertEquals +import org.bson.BsonDocument +import org.bson.BsonString +import org.bson.BsonTimestamp +import org.bson.Document +import org.junit.jupiter.api.Test +import org.mockito.kotlin.doReturn +import org.mockito.kotlin.mock +import org.mockito.kotlin.verify +import org.mockito.kotlin.verifyNoMoreInteractions +import org.mockito.kotlin.whenever + +class ChangeStreamIterableTest { + + @Test + fun shouldHaveTheSameMethods() { + val jChangeStreamIterableFunctions = JChangeStreamIterable::class.declaredFunctions.map { it.name }.toSet() + val kChangeStreamIterableFunctions = ChangeStreamIterable::class.declaredFunctions.map { it.name }.toSet() + + assertEquals(jChangeStreamIterableFunctions, kChangeStreamIterableFunctions) + } + + @Test + fun shouldCallTheUnderlyingMethods() { + val wrapped: JChangeStreamIterable = mock() + val iterable = ChangeStreamIterable(wrapped) + + val batchSize = 10 + val bsonComment = BsonString("a comment") + val collation = Collation.builder().locale("en").build() + val comment = "comment" + val operationTime = BsonTimestamp(1) + val resumeToken = BsonDocument() + + whenever(wrapped.withDocumentClass(BsonDocument::class.java)).doReturn(mock()) + whenever(wrapped.cursor()).doReturn(mock()) + + iterable.batchSize(batchSize) + iterable.collation(collation) + iterable.comment(comment) + iterable.comment(bsonComment) + iterable.cursor() + iterable.fullDocument(FullDocument.UPDATE_LOOKUP) + iterable.fullDocumentBeforeChange(FullDocumentBeforeChange.REQUIRED) + iterable.maxAwaitTime(1) + iterable.maxAwaitTime(1, TimeUnit.SECONDS) + iterable.resumeAfter(resumeToken) + iterable.showExpandedEvents(true) + iterable.startAfter(resumeToken) + iterable.startAtOperationTime(operationTime) + iterable.withDocumentClass() + + verify(wrapped).batchSize(batchSize) + verify(wrapped).collation(collation) + verify(wrapped).comment(comment) + verify(wrapped).comment(bsonComment) + verify(wrapped).cursor() + verify(wrapped).fullDocument(FullDocument.UPDATE_LOOKUP) + verify(wrapped).fullDocumentBeforeChange(FullDocumentBeforeChange.REQUIRED) + verify(wrapped).maxAwaitTime(1, TimeUnit.MILLISECONDS) + verify(wrapped).maxAwaitTime(1, TimeUnit.SECONDS) + verify(wrapped).resumeAfter(resumeToken) + verify(wrapped).showExpandedEvents(true) + verify(wrapped).startAfter(resumeToken) + verify(wrapped).startAtOperationTime(operationTime) + verify(wrapped).withDocumentClass(BsonDocument::class.java) + + verifyNoMoreInteractions(wrapped) + } +} diff --git a/driver-kotlin-sync/src/test/kotlin/com/mongodb/kotlin/client/ClientSessionTest.kt b/driver-kotlin-sync/src/test/kotlin/com/mongodb/kotlin/client/ClientSessionTest.kt new file mode 100644 index 00000000000..c3c4772f9d6 --- /dev/null +++ b/driver-kotlin-sync/src/test/kotlin/com/mongodb/kotlin/client/ClientSessionTest.kt @@ -0,0 +1,99 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.kotlin.client + +import com.mongodb.ClientSessionOptions +import com.mongodb.TransactionOptions +import com.mongodb.client.ClientSession as JClientSession +import kotlin.reflect.full.declaredMemberProperties +import kotlin.reflect.full.functions +import kotlin.test.assertEquals +import org.junit.jupiter.api.Test +import org.mockito.kotlin.doReturn +import org.mockito.kotlin.mock +import org.mockito.kotlin.verify +import org.mockito.kotlin.verifyNoMoreInteractions +import org.mockito.kotlin.whenever + +class ClientSessionTest { + + @Test + fun shouldHaveTheSameMethods() { + val internalFunctions = + setOf( + "advanceClusterTime", + "advanceOperationTime", + "clearTransactionContext", + "getClusterTime", + "getOperationTime", + "getOriginator", + "getPinnedServerAddress", + "getRecoveryToken", + "getServerSession", + "getSnapshotTimestamp", + "getTransactionContext", + "getTimeoutContext", + "notifyMessageSent", + "notifyOperationInitiated", + "setRecoveryToken", + "setSnapshotTimestamp", + "setTransactionContext") + + val jClientSessionFunctions = JClientSession::class.functions.map { it.name }.toSet() - internalFunctions + val kClientSessionFunctions = + ClientSession::class.functions.map { it.name }.toSet() + + ClientSession::class + .declaredMemberProperties + .filterNot { it.name == "wrapped" } + .map { + if (it.name.startsWith("is") || it.name.startsWith("has")) it.name + else "get${it.name.replaceFirstChar { c -> c.uppercaseChar()}}" + } + + assertEquals(jClientSessionFunctions, kClientSessionFunctions) + } + + @Test + fun shouldCallTheUnderlyingMethods() { + val wrapped: JClientSession = mock() + val session = ClientSession(wrapped) + + val transactionOptions = TransactionOptions.builder().maxCommitTime(10).build() + + whenever(wrapped.options).doReturn(ClientSessionOptions.builder().build()) + whenever(wrapped.isCausallyConsistent).doReturn(true) + whenever(wrapped.transactionOptions).doReturn(transactionOptions) + + session.options + session.isCausallyConsistent + session.startTransaction() + session.startTransaction(transactionOptions) + session.transactionOptions + + verify(wrapped).options + verify(wrapped).isCausallyConsistent + verify(wrapped).startTransaction() + verify(wrapped).startTransaction(transactionOptions) + verify(wrapped).transactionOptions + + session.abortTransaction() + session.commitTransaction() + + verify(wrapped).abortTransaction() + verify(wrapped).commitTransaction() + verifyNoMoreInteractions(wrapped) + } +} diff --git a/driver-kotlin-sync/src/test/kotlin/com/mongodb/kotlin/client/DistinctIterableTest.kt b/driver-kotlin-sync/src/test/kotlin/com/mongodb/kotlin/client/DistinctIterableTest.kt new file mode 100644 index 00000000000..91f5e9b6f44 --- /dev/null +++ b/driver-kotlin-sync/src/test/kotlin/com/mongodb/kotlin/client/DistinctIterableTest.kt @@ -0,0 +1,73 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.kotlin.client + +import com.mongodb.client.DistinctIterable as JDistinctIterable +import com.mongodb.client.cursor.TimeoutMode +import com.mongodb.client.model.Collation +import java.util.concurrent.TimeUnit +import kotlin.reflect.full.declaredFunctions +import kotlin.test.assertEquals +import org.bson.BsonDocument +import org.bson.BsonString +import org.bson.Document +import org.junit.jupiter.api.Test +import org.mockito.kotlin.mock +import org.mockito.kotlin.verify +import org.mockito.kotlin.verifyNoMoreInteractions + +class DistinctIterableTest { + @Test + fun shouldHaveTheSameMethods() { + val jDistinctIterableFunctions = + JDistinctIterable::class.declaredFunctions.map { it.name }.toSet() + "timeoutMode" + val kDistinctIterableFunctions = DistinctIterable::class.declaredFunctions.map { it.name }.toSet() + + assertEquals(jDistinctIterableFunctions, kDistinctIterableFunctions) + } + + @Test + fun shouldCallTheUnderlyingMethods() { + val wrapped: JDistinctIterable = mock() + val iterable = DistinctIterable(wrapped) + + val batchSize = 10 + val bsonComment = BsonString("a comment") + val collation = Collation.builder().locale("en").build() + val comment = "comment" + val filter = BsonDocument() + + iterable.batchSize(batchSize) + iterable.collation(collation) + iterable.comment(bsonComment) + iterable.comment(comment) + iterable.filter(filter) + iterable.maxTime(1) + iterable.maxTime(1, TimeUnit.SECONDS) + iterable.timeoutMode(TimeoutMode.ITERATION) + + verify(wrapped).batchSize(batchSize) + verify(wrapped).collation(collation) + verify(wrapped).comment(bsonComment) + verify(wrapped).comment(comment) + verify(wrapped).filter(filter) + verify(wrapped).maxTime(1, TimeUnit.MILLISECONDS) + verify(wrapped).maxTime(1, TimeUnit.SECONDS) + verify(wrapped).timeoutMode(TimeoutMode.ITERATION) + + verifyNoMoreInteractions(wrapped) + } +} diff --git a/driver-kotlin-sync/src/test/kotlin/com/mongodb/kotlin/client/ExtensionMethodsTest.kt b/driver-kotlin-sync/src/test/kotlin/com/mongodb/kotlin/client/ExtensionMethodsTest.kt new file mode 100644 index 00000000000..29374ff5c6b --- /dev/null +++ b/driver-kotlin-sync/src/test/kotlin/com/mongodb/kotlin/client/ExtensionMethodsTest.kt @@ -0,0 +1,58 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.kotlin.client + +import io.github.classgraph.ClassGraph +import kotlin.test.assertEquals +import org.junit.jupiter.api.Test + +class ExtensionMethodsTest { + + @Test + fun shouldHaveTimeUnitExtensionsMethodsForOptionsClasses() { + + val extensionsAddedForClasses = + setOf( + "CountOptions", + "CreateCollectionOptions", + "CreateIndexOptions", + "ClientSessionOptions", + "DropIndexOptions", + "EstimatedDocumentCountOptions", + "FindOneAndDeleteOptions", + "FindOneAndReplaceOptions", + "FindOneAndUpdateOptions", + "IndexOptions", + "TransactionOptions", + "TimeSeriesOptions") + + ClassGraph().enableClassInfo().enableMethodInfo().acceptPackages("com.mongodb").scan().use { scanResult -> + val optionsClassesWithTimeUnit = + scanResult.allClasses + .filter { !it.packageName.contains("internal") } + .filter { it.simpleName.endsWith("Options") } + .filter { + it.methodInfo.any { m -> + m.parameterInfo.any { p -> p.typeDescriptor.toStringWithSimpleNames().equals("TimeUnit") } + } + } + .map { c -> c.simpleName } + .toSet() + + assertEquals(extensionsAddedForClasses, optionsClassesWithTimeUnit) + } + } +} diff --git a/driver-kotlin-sync/src/test/kotlin/com/mongodb/kotlin/client/FindIterableTest.kt b/driver-kotlin-sync/src/test/kotlin/com/mongodb/kotlin/client/FindIterableTest.kt new file mode 100644 index 00000000000..0f4b2725b2e --- /dev/null +++ b/driver-kotlin-sync/src/test/kotlin/com/mongodb/kotlin/client/FindIterableTest.kt @@ -0,0 +1,123 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.kotlin.client + +import com.mongodb.CursorType +import com.mongodb.ExplainVerbosity +import com.mongodb.client.FindIterable as JFindIterable +import com.mongodb.client.cursor.TimeoutMode +import com.mongodb.client.model.Collation +import java.util.concurrent.TimeUnit +import kotlin.reflect.full.declaredFunctions +import kotlin.test.assertEquals +import org.bson.BsonDocument +import org.bson.BsonString +import org.bson.Document +import org.junit.jupiter.api.Test +import org.mockito.kotlin.* + +class FindIterableTest { + @Test + fun shouldHaveTheSameMethods() { + val jFindIterableFunctions = JFindIterable::class.declaredFunctions.map { it.name }.toSet() + "timeoutMode" + val kFindIterableFunctions = FindIterable::class.declaredFunctions.map { it.name }.toSet() + + assertEquals(jFindIterableFunctions, kFindIterableFunctions) + } + + @Test + fun shouldCallTheUnderlyingMethods() { + val wrapped: JFindIterable = mock() + val iterable = FindIterable(wrapped) + + val batchSize = 10 + val bson = BsonDocument() + val bsonComment = BsonString("a comment") + val collation = Collation.builder().locale("en").build() + val comment = "comment" + val filter = BsonDocument() + val hint = Document("h", 1) + val hintString = "hintString" + val verbosity = ExplainVerbosity.QUERY_PLANNER + + whenever(wrapped.explain(Document::class.java)).doReturn(mock()) + whenever(wrapped.explain(Document::class.java, verbosity)).doReturn(mock()) + whenever(wrapped.explain(BsonDocument::class.java, verbosity)).doReturn(mock()) + + iterable.allowDiskUse(true) + iterable.batchSize(batchSize) + iterable.collation(collation) + iterable.comment(bsonComment) + iterable.comment(comment) + iterable.cursorType(CursorType.NonTailable) + iterable.explain() + iterable.explain(verbosity) + iterable.explain(Document::class.java) + iterable.explain(BsonDocument::class.java, verbosity) + iterable.explain() + iterable.explain(verbosity) + iterable.filter(filter) + iterable.hint(hint) + iterable.hintString(hintString) + iterable.let(bson) + iterable.limit(1) + iterable.max(bson) + iterable.maxAwaitTime(1) + iterable.maxAwaitTime(1, TimeUnit.SECONDS) + iterable.maxTime(1) + iterable.maxTime(1, TimeUnit.SECONDS) + iterable.min(bson) + iterable.noCursorTimeout(true) + iterable.partial(true) + iterable.projection(bson) + iterable.returnKey(true) + iterable.showRecordId(true) + iterable.skip(1) + iterable.sort(bson) + iterable.timeoutMode(TimeoutMode.ITERATION) + + verify(wrapped).allowDiskUse(true) + verify(wrapped).batchSize(batchSize) + verify(wrapped).collation(collation) + verify(wrapped).comment(bsonComment) + verify(wrapped).comment(comment) + verify(wrapped).cursorType(CursorType.NonTailable) + verify(wrapped, times(3)).explain(Document::class.java) + verify(wrapped, times(1)).explain(Document::class.java, verbosity) + verify(wrapped, times(2)).explain(BsonDocument::class.java, verbosity) + verify(wrapped).filter(filter) + verify(wrapped).hint(hint) + verify(wrapped).hintString(hintString) + verify(wrapped).let(bson) + verify(wrapped).limit(1) + verify(wrapped).max(bson) + verify(wrapped).maxAwaitTime(1, TimeUnit.MILLISECONDS) + verify(wrapped).maxAwaitTime(1, TimeUnit.SECONDS) + verify(wrapped).maxTime(1, TimeUnit.MILLISECONDS) + verify(wrapped).maxTime(1, TimeUnit.SECONDS) + verify(wrapped).min(bson) + verify(wrapped).noCursorTimeout(true) + verify(wrapped).partial(true) + verify(wrapped).projection(bson) + verify(wrapped).returnKey(true) + verify(wrapped).showRecordId(true) + verify(wrapped).skip(1) + verify(wrapped).sort(bson) + verify(wrapped).timeoutMode(TimeoutMode.ITERATION) + + verifyNoMoreInteractions(wrapped) + } +} diff --git a/driver-kotlin-sync/src/test/kotlin/com/mongodb/kotlin/client/ListCollectionNamesIterableTest.kt b/driver-kotlin-sync/src/test/kotlin/com/mongodb/kotlin/client/ListCollectionNamesIterableTest.kt new file mode 100644 index 00000000000..c5466a62e60 --- /dev/null +++ b/driver-kotlin-sync/src/test/kotlin/com/mongodb/kotlin/client/ListCollectionNamesIterableTest.kt @@ -0,0 +1,69 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.kotlin.client + +import com.mongodb.client.ListCollectionNamesIterable as JListCollectionNamesIterable +import java.util.concurrent.TimeUnit +import kotlin.reflect.full.declaredFunctions +import kotlin.test.assertEquals +import org.bson.BsonDocument +import org.bson.BsonString +import org.junit.jupiter.api.Test +import org.mockito.kotlin.mock +import org.mockito.kotlin.verify +import org.mockito.kotlin.verifyNoMoreInteractions + +class ListCollectionNamesIterableTest { + @Test + fun shouldHaveTheSameMethods() { + val jListCollectionNamesIterableFunctions = + JListCollectionNamesIterable::class.declaredFunctions.map { it.name }.toSet() + val kListCollectionNamesIterableFunctions = + ListCollectionNamesIterable::class.declaredFunctions.map { it.name }.toSet() + + assertEquals(jListCollectionNamesIterableFunctions, kListCollectionNamesIterableFunctions) + } + + @Test + fun shouldCallTheUnderlyingMethods() { + val wrapped: JListCollectionNamesIterable = mock() + val iterable = ListCollectionNamesIterable(wrapped) + + val batchSize = 10 + val authorizedCollections = true + val bsonComment = BsonString("a comment") + val comment = "comment" + val filter = BsonDocument() + + iterable.batchSize(batchSize) + iterable.authorizedCollections(authorizedCollections) + iterable.comment(bsonComment) + iterable.comment(comment) + iterable.filter(filter) + iterable.maxTime(1) + iterable.maxTime(1, TimeUnit.SECONDS) + + verify(wrapped).batchSize(batchSize) + verify(wrapped).authorizedCollections(authorizedCollections) + verify(wrapped).comment(bsonComment) + verify(wrapped).comment(comment) + verify(wrapped).filter(filter) + verify(wrapped).maxTime(1, TimeUnit.MILLISECONDS) + verify(wrapped).maxTime(1, TimeUnit.SECONDS) + + verifyNoMoreInteractions(wrapped) + } +} diff --git a/driver-kotlin-sync/src/test/kotlin/com/mongodb/kotlin/client/ListCollectionsIterableTest.kt b/driver-kotlin-sync/src/test/kotlin/com/mongodb/kotlin/client/ListCollectionsIterableTest.kt new file mode 100644 index 00000000000..26dd071768c --- /dev/null +++ b/driver-kotlin-sync/src/test/kotlin/com/mongodb/kotlin/client/ListCollectionsIterableTest.kt @@ -0,0 +1,69 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.kotlin.client + +import com.mongodb.client.ListCollectionsIterable as JListCollectionsIterable +import com.mongodb.client.cursor.TimeoutMode +import java.util.concurrent.TimeUnit +import kotlin.reflect.full.declaredFunctions +import kotlin.test.assertEquals +import org.bson.BsonDocument +import org.bson.BsonString +import org.bson.Document +import org.junit.jupiter.api.Test +import org.mockito.kotlin.mock +import org.mockito.kotlin.verify +import org.mockito.kotlin.verifyNoMoreInteractions + +class ListCollectionsIterableTest { + @Test + fun shouldHaveTheSameMethods() { + val jListCollectionsIterableFunctions = + JListCollectionsIterable::class.declaredFunctions.map { it.name }.toSet() + val kListCollectionsIterableFunctions = ListCollectionsIterable::class.declaredFunctions.map { it.name }.toSet() + + assertEquals(jListCollectionsIterableFunctions, kListCollectionsIterableFunctions) + } + + @Test + fun shouldCallTheUnderlyingMethods() { + val wrapped: JListCollectionsIterable = mock() + val iterable = ListCollectionsIterable(wrapped) + + val batchSize = 10 + val bsonComment = BsonString("a comment") + val comment = "comment" + val filter = BsonDocument() + + iterable.batchSize(batchSize) + iterable.comment(bsonComment) + iterable.comment(comment) + iterable.filter(filter) + iterable.maxTime(1) + iterable.maxTime(1, TimeUnit.SECONDS) + iterable.timeoutMode(TimeoutMode.ITERATION) + + verify(wrapped).batchSize(batchSize) + verify(wrapped).comment(bsonComment) + verify(wrapped).comment(comment) + verify(wrapped).filter(filter) + verify(wrapped).maxTime(1, TimeUnit.MILLISECONDS) + verify(wrapped).maxTime(1, TimeUnit.SECONDS) + verify(wrapped).timeoutMode(TimeoutMode.ITERATION) + + verifyNoMoreInteractions(wrapped) + } +} diff --git a/driver-kotlin-sync/src/test/kotlin/com/mongodb/kotlin/client/ListDatabasesIterableTest.kt b/driver-kotlin-sync/src/test/kotlin/com/mongodb/kotlin/client/ListDatabasesIterableTest.kt new file mode 100644 index 00000000000..a1c95cad1a0 --- /dev/null +++ b/driver-kotlin-sync/src/test/kotlin/com/mongodb/kotlin/client/ListDatabasesIterableTest.kt @@ -0,0 +1,73 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.kotlin.client + +import com.mongodb.client.ListDatabasesIterable as JListDatabasesIterable +import com.mongodb.client.cursor.TimeoutMode +import java.util.concurrent.TimeUnit +import kotlin.reflect.full.declaredFunctions +import kotlin.test.assertEquals +import org.bson.BsonDocument +import org.bson.BsonString +import org.bson.Document +import org.junit.jupiter.api.Test +import org.mockito.kotlin.mock +import org.mockito.kotlin.verify +import org.mockito.kotlin.verifyNoMoreInteractions + +class ListDatabasesIterableTest { + @Test + fun shouldHaveTheSameMethods() { + val jListDatabasesIterableFunctions = + JListDatabasesIterable::class.declaredFunctions.map { it.name }.toSet() + "timeoutMode" + val kListDatabasesIterableFunctions = ListDatabasesIterable::class.declaredFunctions.map { it.name }.toSet() + + assertEquals(jListDatabasesIterableFunctions, kListDatabasesIterableFunctions) + } + + @Test + fun shouldCallTheUnderlyingMethods() { + val wrapped: JListDatabasesIterable = mock() + val iterable = ListDatabasesIterable(wrapped) + + val batchSize = 10 + val bsonComment = BsonString("a comment") + val comment = "comment" + val filter = BsonDocument() + + iterable.authorizedDatabasesOnly(true) + iterable.batchSize(batchSize) + iterable.comment(bsonComment) + iterable.comment(comment) + iterable.filter(filter) + iterable.maxTime(1) + iterable.maxTime(1, TimeUnit.SECONDS) + iterable.nameOnly(true) + iterable.timeoutMode(TimeoutMode.ITERATION) + + verify(wrapped).authorizedDatabasesOnly(true) + verify(wrapped).batchSize(batchSize) + verify(wrapped).comment(bsonComment) + verify(wrapped).comment(comment) + verify(wrapped).filter(filter) + verify(wrapped).maxTime(1, TimeUnit.MILLISECONDS) + verify(wrapped).maxTime(1, TimeUnit.SECONDS) + verify(wrapped).nameOnly(true) + verify(wrapped).timeoutMode(TimeoutMode.ITERATION) + + verifyNoMoreInteractions(wrapped) + } +} diff --git a/driver-kotlin-sync/src/test/kotlin/com/mongodb/kotlin/client/ListIndexesIterableTest.kt b/driver-kotlin-sync/src/test/kotlin/com/mongodb/kotlin/client/ListIndexesIterableTest.kt new file mode 100644 index 00000000000..08bd5b4e685 --- /dev/null +++ b/driver-kotlin-sync/src/test/kotlin/com/mongodb/kotlin/client/ListIndexesIterableTest.kt @@ -0,0 +1,65 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.kotlin.client + +import com.mongodb.client.ListIndexesIterable as JListIndexesIterable +import com.mongodb.client.cursor.TimeoutMode +import java.util.concurrent.TimeUnit +import kotlin.reflect.full.declaredFunctions +import kotlin.test.assertEquals +import org.bson.BsonString +import org.bson.Document +import org.junit.jupiter.api.Test +import org.mockito.kotlin.mock +import org.mockito.kotlin.verify +import org.mockito.kotlin.verifyNoMoreInteractions + +class ListIndexesIterableTest { + @Test + fun shouldHaveTheSameMethods() { + val jListIndexesIterableFunctions = + JListIndexesIterable::class.declaredFunctions.map { it.name }.toSet() + "timeoutMode" + val kListIndexesIterableFunctions = ListIndexesIterable::class.declaredFunctions.map { it.name }.toSet() + + assertEquals(jListIndexesIterableFunctions, kListIndexesIterableFunctions) + } + + @Test + fun shouldCallTheUnderlyingMethods() { + val wrapped: JListIndexesIterable = mock() + val iterable = ListIndexesIterable(wrapped) + + val batchSize = 10 + val bsonComment = BsonString("a comment") + val comment = "comment" + + iterable.batchSize(batchSize) + iterable.comment(bsonComment) + iterable.comment(comment) + iterable.maxTime(1) + iterable.maxTime(1, TimeUnit.SECONDS) + iterable.timeoutMode(TimeoutMode.ITERATION) + + verify(wrapped).batchSize(batchSize) + verify(wrapped).comment(bsonComment) + verify(wrapped).comment(comment) + verify(wrapped).maxTime(1, TimeUnit.MILLISECONDS) + verify(wrapped).maxTime(1, TimeUnit.SECONDS) + verify(wrapped).timeoutMode(TimeoutMode.ITERATION) + + verifyNoMoreInteractions(wrapped) + } +} diff --git a/driver-kotlin-sync/src/test/kotlin/com/mongodb/kotlin/client/MockitoHelper.kt b/driver-kotlin-sync/src/test/kotlin/com/mongodb/kotlin/client/MockitoHelper.kt new file mode 100644 index 00000000000..838a26f5dff --- /dev/null +++ b/driver-kotlin-sync/src/test/kotlin/com/mongodb/kotlin/client/MockitoHelper.kt @@ -0,0 +1,53 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.kotlin.client + +import org.assertj.core.api.Assertions.assertThat +import org.mockito.ArgumentMatcher +import org.mockito.ArgumentMatchers.argThat + +/** Mockito test helper object */ +object MockitoHelper { + + /** + * Deep reflection comparison for complex nested objects + * + * The usecase is to reflect complex objects that don't have an equals method and contain nested complex properties + * that also do not contain equals values + * + * Example: + * ``` + * verify(wrapped).createCollection(eq(name), deepRefEq(defaultOptions)) + * ``` + * + * @param T the type of the value + * @param value the value + * @return the value + * @see [org.mockito.kotlin.refEq] + */ + fun deepRefEq(value: T): T = argThat(DeepReflectionEqMatcher(value)) + + private class DeepReflectionEqMatcher(private val expected: T) : ArgumentMatcher { + override fun matches(argument: T): Boolean { + return try { + assertThat(argument).usingRecursiveComparison().isEqualTo(expected) + true + } catch (e: Throwable) { + false + } + } + } +} diff --git a/driver-kotlin-sync/src/test/kotlin/com/mongodb/kotlin/client/MongoChangeStreamCursorTest.kt b/driver-kotlin-sync/src/test/kotlin/com/mongodb/kotlin/client/MongoChangeStreamCursorTest.kt new file mode 100644 index 00000000000..7bd6008df7c --- /dev/null +++ b/driver-kotlin-sync/src/test/kotlin/com/mongodb/kotlin/client/MongoChangeStreamCursorTest.kt @@ -0,0 +1,72 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.kotlin.client + +import com.mongodb.ServerAddress +import com.mongodb.ServerCursor +import com.mongodb.client.MongoChangeStreamCursor as JMongoChangeStreamCursor +import kotlin.reflect.full.declaredFunctions +import kotlin.reflect.full.declaredMemberProperties +import kotlin.test.assertEquals +import org.bson.Document +import org.junit.jupiter.api.Test +import org.mockito.kotlin.doReturn +import org.mockito.kotlin.mock +import org.mockito.kotlin.verify +import org.mockito.kotlin.verifyNoMoreInteractions +import org.mockito.kotlin.whenever + +class MongoChangeStreamCursorTest { + @Test + fun shouldHaveTheSameMethods() { + val jMongoChangeStreamCursorFunctions = + JMongoChangeStreamCursor::class.declaredFunctions.map { it.name }.toSet() + val kMongoChangeStreamCursorFunctions = + MongoChangeStreamCursor::class.declaredFunctions.map { it.name }.toSet() + + MongoChangeStreamCursor::class + .declaredMemberProperties + .filterNot { it.name == "wrapped" } + .map { "get${it.name.replaceFirstChar{c -> c.uppercaseChar() }}" } + + assertEquals(jMongoChangeStreamCursorFunctions, kMongoChangeStreamCursorFunctions) + } + + @Test + fun shouldCallTheUnderlyingMethods() { + val wrapped: JMongoChangeStreamCursor = mock() + val cursor = MongoChangeStreamCursorImpl(wrapped) + + whenever(wrapped.resumeToken).doReturn(mock()) + whenever(wrapped.serverCursor).doReturn(ServerCursor(1, ServerAddress())) + whenever(wrapped.serverAddress).doReturn(mock()) + + cursor.serverCursor + cursor.serverAddress + cursor.hasNext() + cursor.tryNext() + cursor.available + cursor.resumeToken + + verify(wrapped).serverCursor + verify(wrapped).serverAddress + verify(wrapped).hasNext() + verify(wrapped).tryNext() + verify(wrapped).available() + verify(wrapped).resumeToken + + verifyNoMoreInteractions(wrapped) + } +} diff --git a/driver-kotlin-sync/src/test/kotlin/com/mongodb/kotlin/client/MongoClientTest.kt b/driver-kotlin-sync/src/test/kotlin/com/mongodb/kotlin/client/MongoClientTest.kt new file mode 100644 index 00000000000..a6f67b22ce7 --- /dev/null +++ b/driver-kotlin-sync/src/test/kotlin/com/mongodb/kotlin/client/MongoClientTest.kt @@ -0,0 +1,215 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.kotlin.client + +import com.mongodb.ClientSessionOptions +import com.mongodb.MongoDriverInformation +import com.mongodb.MongoNamespace +import com.mongodb.client.MongoClient as JMongoClient +import com.mongodb.client.model.bulk.ClientBulkWriteOptions +import com.mongodb.client.model.bulk.ClientNamespacedWriteModel +import kotlin.reflect.full.declaredFunctions +import kotlin.reflect.full.declaredMemberProperties +import kotlin.test.assertEquals +import org.bson.BsonDocument +import org.bson.Document +import org.junit.jupiter.api.Test +import org.mockito.Mock +import org.mockito.kotlin.any +import org.mockito.kotlin.doReturn +import org.mockito.kotlin.mock +import org.mockito.kotlin.refEq +import org.mockito.kotlin.times +import org.mockito.kotlin.verify +import org.mockito.kotlin.verifyNoMoreInteractions +import org.mockito.kotlin.whenever + +class MongoClientTest { + + @Mock val wrapped: JMongoClient = mock() + @Mock val clientSession: ClientSession = ClientSession(mock()) + + @Test + fun shouldHaveTheSameMethods() { + val jMongoClientFunctions = JMongoClient::class.declaredFunctions.map { it.name }.toSet() + + val kMongoClientFunctions = + MongoClient::class.declaredFunctions.map { it.name }.toSet() + + MongoClient::class + .declaredMemberProperties + .filterNot { it.name == "wrapped" } + .map { "get${it.name.replaceFirstChar{c -> c.uppercaseChar() }}" } + + assertEquals(jMongoClientFunctions, kMongoClientFunctions) + } + + @Test + fun shouldCallTheUnderlyingClose() { + val mongoClient = MongoClient(wrapped) + mongoClient.close() + + verify(wrapped).close() + verifyNoMoreInteractions(wrapped) + } + + @Test + fun shouldCallTheUnderlyingClusterDescription() { + val mongoClient = MongoClient(wrapped) + whenever(wrapped.clusterDescription).doReturn(mock()) + + mongoClient.clusterDescription + + verify(wrapped).clusterDescription + verifyNoMoreInteractions(wrapped) + } + + @Test + fun shouldCallTheUnderlyingAppendMetadata() { + val mongoClient = MongoClient(wrapped) + + val mongoDriverInformation = + MongoDriverInformation.builder() + .driverName("kotlin") + .driverPlatform("kotlin/${KotlinVersion.CURRENT}") + .build() + + mongoClient.appendMetadata(mongoDriverInformation) + + verify(wrapped).appendMetadata(mongoDriverInformation) + verifyNoMoreInteractions(wrapped) + } + + @Test + fun shouldCallTheUnderlyingGetDatabase() { + val mongoClient = MongoClient(wrapped) + whenever(wrapped.getDatabase(any())).doReturn(mock()) + + mongoClient.getDatabase("dbName") + verify(wrapped).getDatabase("dbName") + verifyNoMoreInteractions(wrapped) + } + + @Test + fun shoulCallTheUnderlyingStartSession() { + val mongoClient = MongoClient(wrapped) + val defaultOptions = ClientSessionOptions.builder().build() + val options = ClientSessionOptions.builder().causallyConsistent(true).build() + + whenever(wrapped.startSession(refEq(defaultOptions))).doReturn(mock()) + whenever(wrapped.startSession(options)).doReturn(mock()) + + mongoClient.startSession() + mongoClient.startSession(options) + + verify(wrapped).startSession(refEq(defaultOptions)) + verify(wrapped).startSession(options) + verifyNoMoreInteractions(wrapped) + } + + @Test + fun shouldCallTheUnderlyingListDatabaseNames() { + val mongoClient = MongoClient(wrapped) + whenever(wrapped.listDatabaseNames()).doReturn(mock()) + whenever(wrapped.listDatabaseNames(any())).doReturn(mock()) + + mongoClient.listDatabaseNames() + mongoClient.listDatabaseNames(clientSession) + + verify(wrapped).listDatabaseNames() + verify(wrapped).listDatabaseNames(clientSession.wrapped) + verifyNoMoreInteractions(wrapped) + } + + @Test + fun shouldCallTheUnderlyingListDatabases() { + val mongoClient = MongoClient(wrapped) + whenever(wrapped.listDatabases(Document::class.java)).doReturn(mock()) + whenever(wrapped.listDatabases(clientSession.wrapped, Document::class.java)).doReturn(mock()) + whenever(wrapped.listDatabases(clientSession.wrapped, BsonDocument::class.java)).doReturn(mock()) + + mongoClient.listDatabases() + mongoClient.listDatabases(clientSession) + mongoClient.listDatabases(Document::class.java) + mongoClient.listDatabases(clientSession, BsonDocument::class.java) + mongoClient.listDatabases() + mongoClient.listDatabases(clientSession) + + verify(wrapped, times(3)).listDatabases(Document::class.java) + verify(wrapped, times(1)).listDatabases(clientSession.wrapped, Document::class.java) + verify(wrapped, times(2)).listDatabases(clientSession.wrapped, BsonDocument::class.java) + verifyNoMoreInteractions(wrapped) + } + + @Test + fun shouldCallTheUnderlyingWatch() { + val mongoClient = MongoClient(wrapped) + val pipeline = listOf(Document(mapOf("a" to 1))) + + whenever(wrapped.watch(emptyList(), Document::class.java)).doReturn(mock()) + whenever(wrapped.watch(pipeline, Document::class.java)).doReturn(mock()) + whenever(wrapped.watch(clientSession.wrapped, emptyList(), Document::class.java)).doReturn(mock()) + whenever(wrapped.watch(clientSession.wrapped, pipeline, Document::class.java)).doReturn(mock()) + whenever(wrapped.watch(pipeline, BsonDocument::class.java)).doReturn(mock()) + whenever(wrapped.watch(clientSession.wrapped, emptyList(), Document::class.java)).doReturn(mock()) + whenever(wrapped.watch(clientSession.wrapped, pipeline, BsonDocument::class.java)).doReturn(mock()) + + mongoClient.watch() + mongoClient.watch(pipeline) + mongoClient.watch(clientSession) + mongoClient.watch(clientSession, pipeline) + + mongoClient.watch(resultClass = Document::class.java) + mongoClient.watch(pipeline, BsonDocument::class.java) + mongoClient.watch(clientSession = clientSession, resultClass = Document::class.java) + mongoClient.watch(clientSession, pipeline, BsonDocument::class.java) + + mongoClient.watch() + mongoClient.watch(pipeline) + mongoClient.watch(clientSession) + mongoClient.watch(clientSession, pipeline) + + verify(wrapped, times(3)).watch(emptyList(), Document::class.java) + verify(wrapped, times(1)).watch(pipeline, Document::class.java) + verify(wrapped, times(3)).watch(clientSession.wrapped, emptyList(), Document::class.java) + verify(wrapped, times(1)).watch(clientSession.wrapped, pipeline, Document::class.java) + verify(wrapped, times(2)).watch(pipeline, BsonDocument::class.java) + verify(wrapped, times(2)).watch(clientSession.wrapped, pipeline, BsonDocument::class.java) + verifyNoMoreInteractions(wrapped) + } + + @Test + fun shouldCallTheUnderlyingBulkWrite() { + val mongoClient = MongoClient(wrapped) + val requests = listOf(ClientNamespacedWriteModel.insertOne(MongoNamespace("test.test"), Document())) + val options = ClientBulkWriteOptions.clientBulkWriteOptions().bypassDocumentValidation(true) + + whenever(wrapped.bulkWrite(requests)).doReturn(mock()) + whenever(wrapped.bulkWrite(requests, options)).doReturn(mock()) + whenever(wrapped.bulkWrite(clientSession.wrapped, requests)).doReturn(mock()) + whenever(wrapped.bulkWrite(clientSession.wrapped, requests, options)).doReturn(mock()) + + mongoClient.bulkWrite(requests) + mongoClient.bulkWrite(requests, options) + mongoClient.bulkWrite(clientSession, requests) + mongoClient.bulkWrite(clientSession, requests, options) + + verify(wrapped).bulkWrite(requests) + verify(wrapped).bulkWrite(requests, options) + verify(wrapped).bulkWrite(clientSession.wrapped, requests) + verify(wrapped).bulkWrite(clientSession.wrapped, requests, options) + verifyNoMoreInteractions(wrapped) + } +} diff --git a/driver-kotlin-sync/src/test/kotlin/com/mongodb/kotlin/client/MongoCollectionTest.kt b/driver-kotlin-sync/src/test/kotlin/com/mongodb/kotlin/client/MongoCollectionTest.kt new file mode 100644 index 00000000000..e27b7852bba --- /dev/null +++ b/driver-kotlin-sync/src/test/kotlin/com/mongodb/kotlin/client/MongoCollectionTest.kt @@ -0,0 +1,891 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.kotlin.client + +import com.mongodb.CreateIndexCommitQuorum +import com.mongodb.MongoNamespace +import com.mongodb.ReadConcern +import com.mongodb.ReadPreference +import com.mongodb.WriteConcern +import com.mongodb.client.MongoCollection as JMongoCollection +import com.mongodb.client.model.BulkWriteOptions +import com.mongodb.client.model.CountOptions +import com.mongodb.client.model.CreateIndexOptions +import com.mongodb.client.model.DeleteOptions +import com.mongodb.client.model.DropCollectionOptions +import com.mongodb.client.model.DropIndexOptions +import com.mongodb.client.model.EstimatedDocumentCountOptions +import com.mongodb.client.model.FindOneAndDeleteOptions +import com.mongodb.client.model.FindOneAndReplaceOptions +import com.mongodb.client.model.FindOneAndUpdateOptions +import com.mongodb.client.model.IndexModel +import com.mongodb.client.model.IndexOptions +import com.mongodb.client.model.InsertManyOptions +import com.mongodb.client.model.InsertOneModel +import com.mongodb.client.model.InsertOneOptions +import com.mongodb.client.model.RenameCollectionOptions +import com.mongodb.client.model.ReplaceOptions +import com.mongodb.client.model.UpdateOptions +import java.util.concurrent.TimeUnit +import kotlin.reflect.full.declaredFunctions +import kotlin.reflect.full.declaredMemberProperties +import kotlin.test.assertContentEquals +import kotlin.test.assertEquals +import org.bson.BsonDocument +import org.bson.Document +import org.bson.codecs.configuration.CodecRegistry +import org.junit.jupiter.api.Test +import org.mockito.Mock +import org.mockito.kotlin.doReturn +import org.mockito.kotlin.eq +import org.mockito.kotlin.mock +import org.mockito.kotlin.refEq +import org.mockito.kotlin.times +import org.mockito.kotlin.verify +import org.mockito.kotlin.verifyNoMoreInteractions +import org.mockito.kotlin.whenever + +class MongoCollectionTest { + + @Mock val wrapped: JMongoCollection = mock() + @Mock val clientSession: ClientSession = ClientSession(mock()) + + private val defaultFilter = BsonDocument() + private val filter = Document("a", 1) + private val pipeline = listOf(Document(mapOf("a" to 1))) + + @Test + fun shouldHaveTheSameMethods() { + val jMongoCollectionFunctions = JMongoCollection::class.declaredFunctions.map { it.name }.toSet() - "mapReduce" + val kMongoCollectionFunctions = + MongoCollection::class + .declaredFunctions + .map { + if (it.name == "timeout") { + "getTimeout" + } else { + it.name + } + } + .toSet() + + MongoCollection::class + .declaredMemberProperties + .filterNot { it.name == "wrapped" } + .map { "get${it.name.replaceFirstChar { c -> c.uppercaseChar() }}" } + + assertEquals(jMongoCollectionFunctions, kMongoCollectionFunctions) + } + + @Test + fun shouldCallTheUnderlyingGetDocumentClass() { + val mongoCollection = MongoCollection(wrapped) + whenever(wrapped.documentClass).doReturn(Document::class.java) + + mongoCollection.documentClass + verify(wrapped).documentClass + verifyNoMoreInteractions(wrapped) + } + + @Test + fun shouldCallTheUnderlyingGetNamespace() { + val mongoCollection = MongoCollection(wrapped) + whenever(wrapped.namespace).doReturn(MongoNamespace("a.b")) + + mongoCollection.namespace + verify(wrapped).namespace + verifyNoMoreInteractions(wrapped) + } + + @Test + fun shouldCallTheUnderlyingGetCodecRegistry() { + val mongoCollection = MongoCollection(wrapped) + whenever(wrapped.codecRegistry).doReturn(mock()) + + mongoCollection.codecRegistry + verify(wrapped).codecRegistry + verifyNoMoreInteractions(wrapped) + } + + @Test + fun shouldCallTheUnderlyingGetReadPreference() { + val mongoCollection = MongoCollection(wrapped) + whenever(wrapped.readPreference).doReturn(mock()) + + mongoCollection.readPreference + verify(wrapped).readPreference + verifyNoMoreInteractions(wrapped) + } + + @Test + fun shouldCallTheUnderlyingGetReadConcern() { + val mongoCollection = MongoCollection(wrapped) + whenever(wrapped.readConcern).doReturn(ReadConcern.DEFAULT) + + mongoCollection.readConcern + verify(wrapped).readConcern + verifyNoMoreInteractions(wrapped) + } + + @Test + fun shouldCallTheUnderlyingGetWriteConcern() { + val mongoCollection = MongoCollection(wrapped) + whenever(wrapped.writeConcern).doReturn(mock()) + + mongoCollection.writeConcern + verify(wrapped).writeConcern + verifyNoMoreInteractions(wrapped) + } + + @Test + fun shouldCallTheUnderlyingWithDocumentClass() { + val mongoCollection = MongoCollection(wrapped) + whenever(wrapped.withDocumentClass(BsonDocument::class.java)).doReturn(mock()) + + mongoCollection.withDocumentClass() + verify(wrapped).withDocumentClass(BsonDocument::class.java) + verifyNoMoreInteractions(wrapped) + } + + @Test + fun shouldCallTheUnderlyingWithCodecRegistry() { + val mongoCollection = MongoCollection(wrapped) + val codecRegistry = mock() + whenever(wrapped.withCodecRegistry(codecRegistry)).doReturn(mock()) + + mongoCollection.withCodecRegistry(codecRegistry) + verify(wrapped).withCodecRegistry(codecRegistry) + verifyNoMoreInteractions(wrapped) + } + + @Test + fun shouldCallTheUnderlyingWithReadPreference() { + val mongoCollection = MongoCollection(wrapped) + val readPreference = ReadPreference.primaryPreferred() + whenever(wrapped.withReadPreference(readPreference)).doReturn(mock()) + + mongoCollection.withReadPreference(readPreference) + verify(wrapped).withReadPreference(readPreference) + verifyNoMoreInteractions(wrapped) + } + + @Test + fun shouldCallTheUnderlyingWithReadConcern() { + val mongoCollection = MongoCollection(wrapped) + val readConcern = ReadConcern.AVAILABLE + whenever(wrapped.withReadConcern(readConcern)).doReturn(mock()) + + mongoCollection.withReadConcern(readConcern) + verify(wrapped).withReadConcern(readConcern) + verifyNoMoreInteractions(wrapped) + } + + @Test + fun shouldCallTheUnderlyingWithWriteConcern() { + val mongoCollection = MongoCollection(wrapped) + val writeConcern = WriteConcern.MAJORITY + whenever(wrapped.withWriteConcern(writeConcern)).doReturn(mock()) + + mongoCollection.withWriteConcern(writeConcern) + verify(wrapped).withWriteConcern(writeConcern) + verifyNoMoreInteractions(wrapped) + } + + @Test + fun shouldCallTheUnderlyingCountDocuments() { + val mongoCollection = MongoCollection(wrapped) + + val defaultOptions = CountOptions() + + val options = CountOptions().comment("comment") + + whenever(wrapped.countDocuments(eq(defaultFilter), refEq(defaultOptions))).doReturn(1) + whenever(wrapped.countDocuments(eq(filter), refEq(defaultOptions))).doReturn(2) + whenever(wrapped.countDocuments(eq(filter), eq(options))).doReturn(3) + whenever(wrapped.countDocuments(eq(clientSession.wrapped), eq(defaultFilter), refEq(defaultOptions))) + .doReturn(4) + whenever(wrapped.countDocuments(eq(clientSession.wrapped), eq(filter), refEq(defaultOptions))).doReturn(5) + whenever(wrapped.countDocuments(eq(clientSession.wrapped), eq(filter), eq(options))).doReturn(6) + + assertEquals(1, mongoCollection.countDocuments()) + assertEquals(2, mongoCollection.countDocuments(filter)) + assertEquals(3, mongoCollection.countDocuments(filter, options)) + assertEquals(4, mongoCollection.countDocuments(clientSession)) + assertEquals(5, mongoCollection.countDocuments(clientSession, filter)) + assertEquals(6, mongoCollection.countDocuments(clientSession, filter, options)) + + verify(wrapped).countDocuments(eq(defaultFilter), refEq(defaultOptions)) + verify(wrapped).countDocuments(eq(filter), refEq(defaultOptions)) + verify(wrapped).countDocuments(eq(filter), eq(options)) + verify(wrapped).countDocuments(eq(clientSession.wrapped), eq(defaultFilter), refEq(defaultOptions)) + verify(wrapped).countDocuments(eq(clientSession.wrapped), eq(filter), refEq(defaultOptions)) + verify(wrapped).countDocuments(eq(clientSession.wrapped), eq(filter), eq(options)) + verifyNoMoreInteractions(wrapped) + } + + @Test + fun shouldCallTheUnderlyingEstimatedDocumentCount() { + val mongoCollection = MongoCollection(wrapped) + val defaultOptions = EstimatedDocumentCountOptions() + val options = EstimatedDocumentCountOptions().comment("comment") + + whenever(wrapped.estimatedDocumentCount(refEq(defaultOptions))).doReturn(1) + whenever(wrapped.estimatedDocumentCount(options)).doReturn(2) + + assertEquals(1, mongoCollection.estimatedDocumentCount()) + assertEquals(2, mongoCollection.estimatedDocumentCount(options)) + + verify(wrapped).estimatedDocumentCount(refEq(defaultOptions)) + verify(wrapped).estimatedDocumentCount(options) + verifyNoMoreInteractions(wrapped) + } + + @Test + fun shouldCallTheUnderlyingDistinct() { + val mongoCollection = MongoCollection(wrapped) + val fieldName = "fieldName" + + whenever(wrapped.distinct(fieldName, defaultFilter, Document::class.java)).doReturn(mock()) + whenever(wrapped.distinct(fieldName, filter, Document::class.java)).doReturn(mock()) + whenever(wrapped.distinct(clientSession.wrapped, fieldName, defaultFilter, Document::class.java)) + .doReturn(mock()) + whenever(wrapped.distinct(clientSession.wrapped, fieldName, filter, Document::class.java)).doReturn(mock()) + whenever(wrapped.distinct(fieldName, defaultFilter, BsonDocument::class.java)).doReturn(mock()) + whenever(wrapped.distinct(fieldName, filter, BsonDocument::class.java)).doReturn(mock()) + whenever(wrapped.distinct(clientSession.wrapped, fieldName, defaultFilter, BsonDocument::class.java)) + .doReturn(mock()) + whenever(wrapped.distinct(clientSession.wrapped, fieldName, filter, BsonDocument::class.java)).doReturn(mock()) + + mongoCollection.distinct("fieldName", resultClass = Document::class.java) + mongoCollection.distinct("fieldName", filter, Document::class.java) + mongoCollection.distinct(clientSession, "fieldName", resultClass = Document::class.java) + mongoCollection.distinct(clientSession, "fieldName", filter, Document::class.java) + + mongoCollection.distinct("fieldName") + mongoCollection.distinct("fieldName", filter) + mongoCollection.distinct(clientSession, "fieldName") + mongoCollection.distinct(clientSession, "fieldName", filter) + + verify(wrapped).distinct(fieldName, defaultFilter, Document::class.java) + verify(wrapped).distinct(fieldName, filter, Document::class.java) + verify(wrapped).distinct(clientSession.wrapped, fieldName, defaultFilter, Document::class.java) + verify(wrapped).distinct(clientSession.wrapped, fieldName, filter, Document::class.java) + + verify(wrapped).distinct(fieldName, defaultFilter, BsonDocument::class.java) + verify(wrapped).distinct(fieldName, filter, BsonDocument::class.java) + verify(wrapped).distinct(clientSession.wrapped, fieldName, defaultFilter, BsonDocument::class.java) + verify(wrapped).distinct(clientSession.wrapped, fieldName, filter, BsonDocument::class.java) + verifyNoMoreInteractions(wrapped) + } + + @Test + fun shouldCallTheUnderlyingFind() { + val mongoCollection = MongoCollection(wrapped) + + whenever(wrapped.documentClass).doReturn(Document::class.java) + whenever(wrapped.find(defaultFilter, Document::class.java)).doReturn(mock()) + whenever(wrapped.find(filter, Document::class.java)).doReturn(mock()) + whenever(wrapped.find(clientSession.wrapped, defaultFilter, Document::class.java)).doReturn(mock()) + whenever(wrapped.find(clientSession.wrapped, filter, Document::class.java)).doReturn(mock()) + whenever(wrapped.find(defaultFilter, BsonDocument::class.java)).doReturn(mock()) + whenever(wrapped.find(filter, BsonDocument::class.java)).doReturn(mock()) + whenever(wrapped.find(clientSession.wrapped, defaultFilter, BsonDocument::class.java)).doReturn(mock()) + whenever(wrapped.find(clientSession.wrapped, filter, BsonDocument::class.java)).doReturn(mock()) + + mongoCollection.find() + mongoCollection.find(filter) + mongoCollection.find(clientSession) + mongoCollection.find(clientSession, filter) + + mongoCollection.find(resultClass = Document::class.java) + mongoCollection.find(filter, resultClass = Document::class.java) + mongoCollection.find(clientSession, resultClass = Document::class.java) + mongoCollection.find(clientSession, filter, Document::class.java) + + mongoCollection.find() + mongoCollection.find(filter) + mongoCollection.find(clientSession) + mongoCollection.find(clientSession, filter) + + verify(wrapped, times(4)).documentClass + verify(wrapped, times(2)).find(defaultFilter, Document::class.java) + verify(wrapped, times(2)).find(filter, Document::class.java) + verify(wrapped, times(2)).find(clientSession.wrapped, defaultFilter, Document::class.java) + verify(wrapped, times(2)).find(clientSession.wrapped, filter, Document::class.java) + verify(wrapped, times(1)).find(defaultFilter, BsonDocument::class.java) + verify(wrapped, times(1)).find(filter, BsonDocument::class.java) + verify(wrapped, times(1)).find(clientSession.wrapped, defaultFilter, BsonDocument::class.java) + verify(wrapped, times(1)).find(clientSession.wrapped, filter, BsonDocument::class.java) + verifyNoMoreInteractions(wrapped) + } + + @Test + fun shouldCallTheUnderlyingAggregate() { + val mongoCollection = MongoCollection(wrapped) + + whenever(wrapped.documentClass).doReturn(Document::class.java) + whenever(wrapped.aggregate(pipeline, Document::class.java)).doReturn(mock()) + whenever(wrapped.aggregate(clientSession.wrapped, pipeline, Document::class.java)).doReturn(mock()) + whenever(wrapped.aggregate(pipeline, BsonDocument::class.java)).doReturn(mock()) + whenever(wrapped.aggregate(clientSession.wrapped, pipeline, BsonDocument::class.java)).doReturn(mock()) + + mongoCollection.aggregate(pipeline) + mongoCollection.aggregate(clientSession, pipeline) + + mongoCollection.aggregate(pipeline, resultClass = Document::class.java) + mongoCollection.aggregate(clientSession, pipeline, Document::class.java) + + mongoCollection.aggregate(pipeline) + mongoCollection.aggregate(clientSession, pipeline) + + verify(wrapped, times(2)).documentClass + verify(wrapped, times(2)).aggregate(pipeline, Document::class.java) + verify(wrapped, times(2)).aggregate(clientSession.wrapped, pipeline, Document::class.java) + verify(wrapped, times(1)).aggregate(pipeline, BsonDocument::class.java) + verify(wrapped, times(1)).aggregate(clientSession.wrapped, pipeline, BsonDocument::class.java) + verifyNoMoreInteractions(wrapped) + } + + @Test + fun shouldCallTheUnderlyingWatch() { + val mongoCollection = MongoCollection(wrapped) + val pipeline = listOf(Document(mapOf("a" to 1))) + + whenever(wrapped.documentClass).doReturn(Document::class.java) + whenever(wrapped.watch(emptyList(), Document::class.java)).doReturn(mock()) + whenever(wrapped.watch(pipeline, Document::class.java)).doReturn(mock()) + whenever(wrapped.watch(clientSession.wrapped, emptyList(), Document::class.java)).doReturn(mock()) + whenever(wrapped.watch(clientSession.wrapped, pipeline, Document::class.java)).doReturn(mock()) + whenever(wrapped.watch(emptyList(), BsonDocument::class.java)).doReturn(mock()) + whenever(wrapped.watch(pipeline, BsonDocument::class.java)).doReturn(mock()) + whenever(wrapped.watch(clientSession.wrapped, emptyList(), BsonDocument::class.java)).doReturn(mock()) + whenever(wrapped.watch(clientSession.wrapped, pipeline, BsonDocument::class.java)).doReturn(mock()) + + mongoCollection.watch() + mongoCollection.watch(pipeline) + mongoCollection.watch(clientSession) + mongoCollection.watch(clientSession, pipeline) + + mongoCollection.watch(resultClass = Document::class.java) + mongoCollection.watch(pipeline, Document::class.java) + mongoCollection.watch(clientSession, resultClass = Document::class.java) + mongoCollection.watch(clientSession, pipeline, Document::class.java) + + mongoCollection.watch() + mongoCollection.watch(pipeline) + mongoCollection.watch(clientSession) + mongoCollection.watch(clientSession, pipeline) + + verify(wrapped, times(4)).documentClass + verify(wrapped, times(2)).watch(emptyList(), Document::class.java) + verify(wrapped, times(2)).watch(pipeline, Document::class.java) + verify(wrapped, times(2)).watch(clientSession.wrapped, emptyList(), Document::class.java) + verify(wrapped, times(2)).watch(clientSession.wrapped, pipeline, Document::class.java) + verify(wrapped, times(1)).watch(emptyList(), BsonDocument::class.java) + verify(wrapped, times(1)).watch(pipeline, BsonDocument::class.java) + verify(wrapped, times(1)).watch(clientSession.wrapped, emptyList(), BsonDocument::class.java) + verify(wrapped, times(1)).watch(clientSession.wrapped, pipeline, BsonDocument::class.java) + verifyNoMoreInteractions(wrapped) + } + + @Test + fun shouldCallTheUnderlyingInsertOne() { + val mongoCollection = MongoCollection(wrapped) + val value = Document("u", 1) + val defaultOptions = InsertOneOptions() + val options = InsertOneOptions().comment("comment") + + whenever(wrapped.insertOne(eq(value), refEq(defaultOptions))).doReturn(mock()) + whenever(wrapped.insertOne(eq(value), eq(options))).doReturn(mock()) + whenever(wrapped.insertOne(eq(clientSession.wrapped), eq(value), refEq(defaultOptions))).doReturn(mock()) + whenever(wrapped.insertOne(eq(clientSession.wrapped), eq(value), eq(options))).doReturn(mock()) + + mongoCollection.insertOne(value) + mongoCollection.insertOne(value, options) + mongoCollection.insertOne(clientSession, value) + mongoCollection.insertOne(clientSession, value, options) + + verify(wrapped).insertOne(eq(value), refEq(defaultOptions)) + verify(wrapped).insertOne(eq(value), eq(options)) + verify(wrapped).insertOne(eq(clientSession.wrapped), eq(value), refEq(defaultOptions)) + verify(wrapped).insertOne(eq(clientSession.wrapped), eq(value), eq(options)) + verifyNoMoreInteractions(wrapped) + } + + @Test + fun shouldCallTheUnderlyingInsertMany() { + val mongoCollection = MongoCollection(wrapped) + val value = listOf(Document("u", 1)) + val defaultOptions = InsertManyOptions() + val options = InsertManyOptions().comment("comment") + + whenever(wrapped.insertMany(eq(value), refEq(defaultOptions))).doReturn(mock()) + whenever(wrapped.insertMany(eq(value), eq(options))).doReturn(mock()) + whenever(wrapped.insertMany(eq(clientSession.wrapped), eq(value), refEq(defaultOptions))).doReturn(mock()) + whenever(wrapped.insertMany(eq(clientSession.wrapped), eq(value), eq(options))).doReturn(mock()) + + mongoCollection.insertMany(value) + mongoCollection.insertMany(value, options) + mongoCollection.insertMany(clientSession, value) + mongoCollection.insertMany(clientSession, value, options) + + verify(wrapped).insertMany(eq(value), refEq(defaultOptions)) + verify(wrapped).insertMany(eq(value), eq(options)) + verify(wrapped).insertMany(eq(clientSession.wrapped), eq(value), refEq(defaultOptions)) + verify(wrapped).insertMany(eq(clientSession.wrapped), eq(value), eq(options)) + verifyNoMoreInteractions(wrapped) + } + + @Test + fun shouldCallTheUnderlyingBulkWrite() { + val mongoCollection = MongoCollection(wrapped) + val value = listOf(InsertOneModel(Document("u", 1))) + val defaultOptions = BulkWriteOptions() + val options = BulkWriteOptions().comment("comment") + + whenever(wrapped.bulkWrite(eq(value), refEq(defaultOptions))).doReturn(mock()) + whenever(wrapped.bulkWrite(eq(value), eq(options))).doReturn(mock()) + whenever(wrapped.bulkWrite(eq(clientSession.wrapped), eq(value), refEq(defaultOptions))).doReturn(mock()) + whenever(wrapped.bulkWrite(eq(clientSession.wrapped), eq(value), eq(options))).doReturn(mock()) + + mongoCollection.bulkWrite(value) + mongoCollection.bulkWrite(value, options) + mongoCollection.bulkWrite(clientSession, value) + mongoCollection.bulkWrite(clientSession, value, options) + + verify(wrapped).bulkWrite(eq(value), refEq(defaultOptions)) + verify(wrapped).bulkWrite(eq(value), eq(options)) + verify(wrapped).bulkWrite(eq(clientSession.wrapped), eq(value), refEq(defaultOptions)) + verify(wrapped).bulkWrite(eq(clientSession.wrapped), eq(value), eq(options)) + verifyNoMoreInteractions(wrapped) + } + + @Test + fun shouldCallTheUnderlyingUpdateOne() { + val mongoCollection = MongoCollection(wrapped) + val update = Document("u", 1) + val updates = listOf(update) + val defaultOptions = UpdateOptions() + val options = UpdateOptions().comment("comment") + + whenever(wrapped.updateOne(eq(filter), eq(update), refEq(defaultOptions))).doReturn(mock()) + whenever(wrapped.updateOne(eq(filter), eq(update), eq(options))).doReturn(mock()) + whenever(wrapped.updateOne(eq(filter), eq(updates), refEq(defaultOptions))).doReturn(mock()) + whenever(wrapped.updateOne(eq(filter), eq(updates), eq(options))).doReturn(mock()) + whenever(wrapped.updateOne(eq(clientSession.wrapped), eq(filter), eq(update), refEq(defaultOptions))) + .doReturn(mock()) + whenever(wrapped.updateOne(eq(clientSession.wrapped), eq(filter), eq(update), eq(options))).doReturn(mock()) + whenever(wrapped.updateOne(eq(clientSession.wrapped), eq(filter), eq(updates), refEq(defaultOptions))) + .doReturn(mock()) + whenever(wrapped.updateOne(eq(clientSession.wrapped), eq(filter), eq(updates), eq(options))).doReturn(mock()) + + mongoCollection.updateOne(filter, update) + mongoCollection.updateOne(filter, update, options) + mongoCollection.updateOne(filter, updates) + mongoCollection.updateOne(filter, updates, options) + mongoCollection.updateOne(clientSession, filter, update) + mongoCollection.updateOne(clientSession, filter, update, options) + mongoCollection.updateOne(clientSession, filter, updates) + mongoCollection.updateOne(clientSession, filter, updates, options) + + verify(wrapped).updateOne(eq(filter), eq(update), refEq(defaultOptions)) + verify(wrapped).updateOne(eq(filter), eq(update), eq(options)) + verify(wrapped).updateOne(eq(filter), eq(updates), refEq(defaultOptions)) + verify(wrapped).updateOne(eq(filter), eq(updates), eq(options)) + verify(wrapped).updateOne(eq(clientSession.wrapped), eq(filter), eq(update), refEq(defaultOptions)) + verify(wrapped).updateOne(eq(clientSession.wrapped), eq(filter), eq(update), eq(options)) + verify(wrapped).updateOne(eq(clientSession.wrapped), eq(filter), eq(updates), refEq(defaultOptions)) + verify(wrapped).updateOne(eq(clientSession.wrapped), eq(filter), eq(updates), eq(options)) + verifyNoMoreInteractions(wrapped) + } + + @Test + fun shouldCallTheUnderlyingUpdateMany() { + val mongoCollection = MongoCollection(wrapped) + val update = Document("u", 1) + val updates = listOf(update) + val defaultOptions = UpdateOptions() + val options = UpdateOptions().comment("comment") + + whenever(wrapped.updateMany(eq(filter), eq(update), refEq(defaultOptions))).doReturn(mock()) + whenever(wrapped.updateMany(eq(filter), eq(update), eq(options))).doReturn(mock()) + whenever(wrapped.updateMany(eq(filter), eq(updates), refEq(defaultOptions))).doReturn(mock()) + whenever(wrapped.updateMany(eq(filter), eq(updates), eq(options))).doReturn(mock()) + whenever(wrapped.updateMany(eq(clientSession.wrapped), eq(filter), eq(update), refEq(defaultOptions))) + .doReturn(mock()) + whenever(wrapped.updateMany(eq(clientSession.wrapped), eq(filter), eq(update), eq(options))).doReturn(mock()) + whenever(wrapped.updateMany(eq(clientSession.wrapped), eq(filter), eq(updates), refEq(defaultOptions))) + .doReturn(mock()) + whenever(wrapped.updateMany(eq(clientSession.wrapped), eq(filter), eq(updates), eq(options))).doReturn(mock()) + + mongoCollection.updateMany(filter, update) + mongoCollection.updateMany(filter, update, options) + mongoCollection.updateMany(filter, updates) + mongoCollection.updateMany(filter, updates, options) + mongoCollection.updateMany(clientSession, filter, update) + mongoCollection.updateMany(clientSession, filter, update, options) + mongoCollection.updateMany(clientSession, filter, updates) + mongoCollection.updateMany(clientSession, filter, updates, options) + + verify(wrapped).updateMany(eq(filter), eq(update), refEq(defaultOptions)) + verify(wrapped).updateMany(eq(filter), eq(update), eq(options)) + verify(wrapped).updateMany(eq(filter), eq(updates), refEq(defaultOptions)) + verify(wrapped).updateMany(eq(filter), eq(updates), eq(options)) + verify(wrapped).updateMany(eq(clientSession.wrapped), eq(filter), eq(update), refEq(defaultOptions)) + verify(wrapped).updateMany(eq(clientSession.wrapped), eq(filter), eq(update), eq(options)) + verify(wrapped).updateMany(eq(clientSession.wrapped), eq(filter), eq(updates), refEq(defaultOptions)) + verify(wrapped).updateMany(eq(clientSession.wrapped), eq(filter), eq(updates), eq(options)) + verifyNoMoreInteractions(wrapped) + } + + @Test + fun shouldCallTheUnderlyingReplaceOne() { + val mongoCollection = MongoCollection(wrapped) + val replacement = Document("u", 1) + val defaultOptions = ReplaceOptions() + val options = ReplaceOptions().comment("comment") + + whenever(wrapped.replaceOne(eq(filter), eq(replacement), refEq(defaultOptions))).doReturn(mock()) + whenever(wrapped.replaceOne(eq(filter), eq(replacement), eq(options))).doReturn(mock()) + whenever(wrapped.replaceOne(eq(clientSession.wrapped), eq(filter), eq(replacement), refEq(defaultOptions))) + .doReturn(mock()) + whenever(wrapped.replaceOne(eq(clientSession.wrapped), eq(filter), eq(replacement), eq(options))) + .doReturn(mock()) + + mongoCollection.replaceOne(filter, replacement) + mongoCollection.replaceOne(filter, replacement, options) + mongoCollection.replaceOne(clientSession, filter, replacement) + mongoCollection.replaceOne(clientSession, filter, replacement, options) + + verify(wrapped).replaceOne(eq(filter), eq(replacement), refEq(defaultOptions)) + verify(wrapped).replaceOne(eq(filter), eq(replacement), eq(options)) + verify(wrapped).replaceOne(eq(clientSession.wrapped), eq(filter), eq(replacement), refEq(defaultOptions)) + verify(wrapped).replaceOne(eq(clientSession.wrapped), eq(filter), eq(replacement), eq(options)) + verifyNoMoreInteractions(wrapped) + } + + @Test + fun shouldCallTheUnderlyingDeleteOne() { + val mongoCollection = MongoCollection(wrapped) + + val defaultOptions = DeleteOptions() + val options = DeleteOptions().comment("comment") + + whenever(wrapped.deleteOne(eq(filter), refEq(defaultOptions))).doReturn(mock()) + whenever(wrapped.deleteOne(eq(filter), eq(options))).doReturn(mock()) + whenever(wrapped.deleteOne(eq(clientSession.wrapped), eq(filter), refEq(defaultOptions))).doReturn(mock()) + whenever(wrapped.deleteOne(eq(clientSession.wrapped), eq(filter), eq(options))).doReturn(mock()) + + mongoCollection.deleteOne(filter) + mongoCollection.deleteOne(filter, options) + mongoCollection.deleteOne(clientSession, filter) + mongoCollection.deleteOne(clientSession, filter, options) + + verify(wrapped).deleteOne(eq(filter), refEq(defaultOptions)) + verify(wrapped).deleteOne(eq(filter), eq(options)) + verify(wrapped).deleteOne(eq(clientSession.wrapped), eq(filter), refEq(defaultOptions)) + verify(wrapped).deleteOne(eq(clientSession.wrapped), eq(filter), eq(options)) + verifyNoMoreInteractions(wrapped) + } + + @Test + fun shouldCallTheUnderlyingDeleteMany() { + val mongoCollection = MongoCollection(wrapped) + + val defaultOptions = DeleteOptions() + val options = DeleteOptions().comment("comment") + + whenever(wrapped.deleteMany(eq(filter), refEq(defaultOptions))).doReturn(mock()) + whenever(wrapped.deleteMany(eq(filter), eq(options))).doReturn(mock()) + whenever(wrapped.deleteMany(eq(clientSession.wrapped), eq(filter), refEq(defaultOptions))).doReturn(mock()) + whenever(wrapped.deleteMany(eq(clientSession.wrapped), eq(filter), eq(options))).doReturn(mock()) + + mongoCollection.deleteMany(filter) + mongoCollection.deleteMany(filter, options) + mongoCollection.deleteMany(clientSession, filter) + mongoCollection.deleteMany(clientSession, filter, options) + + verify(wrapped).deleteMany(eq(filter), refEq(defaultOptions)) + verify(wrapped).deleteMany(eq(filter), eq(options)) + verify(wrapped).deleteMany(eq(clientSession.wrapped), eq(filter), refEq(defaultOptions)) + verify(wrapped).deleteMany(eq(clientSession.wrapped), eq(filter), eq(options)) + verifyNoMoreInteractions(wrapped) + } + + @Test + fun shouldCallTheUnderlyingFindOneAndDelete() { + val mongoCollection = MongoCollection(wrapped) + + val defaultOptions = FindOneAndDeleteOptions() + val options = FindOneAndDeleteOptions().comment("comment") + + whenever(wrapped.findOneAndDelete(eq(filter), refEq(defaultOptions))).doReturn(mock()) + whenever(wrapped.findOneAndDelete(eq(filter), eq(options))).doReturn(mock()) + whenever(wrapped.findOneAndDelete(eq(clientSession.wrapped), eq(filter), refEq(defaultOptions))) + .doReturn(mock()) + whenever(wrapped.findOneAndDelete(eq(clientSession.wrapped), eq(filter), eq(options))).doReturn(mock()) + + mongoCollection.findOneAndDelete(filter) + mongoCollection.findOneAndDelete(filter, options) + mongoCollection.findOneAndDelete(clientSession, filter) + mongoCollection.findOneAndDelete(clientSession, filter, options) + + verify(wrapped).findOneAndDelete(eq(filter), refEq(defaultOptions)) + verify(wrapped).findOneAndDelete(eq(filter), eq(options)) + verify(wrapped).findOneAndDelete(eq(clientSession.wrapped), eq(filter), refEq(defaultOptions)) + verify(wrapped).findOneAndDelete(eq(clientSession.wrapped), eq(filter), eq(options)) + verifyNoMoreInteractions(wrapped) + } + + @Test + fun shouldCallTheUnderlyingFindOneAndUpdate() { + val mongoCollection = MongoCollection(wrapped) + val update = Document("u", 1) + val updateList = listOf(update) + val defaultOptions = FindOneAndUpdateOptions() + val options = FindOneAndUpdateOptions().comment("comment") + + whenever(wrapped.findOneAndUpdate(eq(filter), eq(update), refEq(defaultOptions))).doReturn(mock()) + whenever(wrapped.findOneAndUpdate(eq(filter), eq(update), eq(options))).doReturn(mock()) + whenever(wrapped.findOneAndUpdate(eq(filter), eq(updateList), refEq(defaultOptions))).doReturn(mock()) + whenever(wrapped.findOneAndUpdate(eq(filter), eq(updateList), eq(options))).doReturn(mock()) + whenever(wrapped.findOneAndUpdate(eq(clientSession.wrapped), eq(filter), eq(update), refEq(defaultOptions))) + .doReturn(mock()) + whenever(wrapped.findOneAndUpdate(eq(clientSession.wrapped), eq(filter), eq(update), eq(options))) + .doReturn(mock()) + whenever(wrapped.findOneAndUpdate(eq(clientSession.wrapped), eq(filter), eq(updateList), refEq(defaultOptions))) + .doReturn(mock()) + whenever(wrapped.findOneAndUpdate(eq(clientSession.wrapped), eq(filter), eq(updateList), eq(options))) + .doReturn(mock()) + + mongoCollection.findOneAndUpdate(filter, update) + mongoCollection.findOneAndUpdate(filter, update, options) + mongoCollection.findOneAndUpdate(filter, updateList) + mongoCollection.findOneAndUpdate(filter, updateList, options) + mongoCollection.findOneAndUpdate(clientSession, filter, update) + mongoCollection.findOneAndUpdate(clientSession, filter, update, options) + mongoCollection.findOneAndUpdate(clientSession, filter, updateList) + mongoCollection.findOneAndUpdate(clientSession, filter, updateList, options) + + verify(wrapped).findOneAndUpdate(eq(filter), eq(update), refEq(defaultOptions)) + verify(wrapped).findOneAndUpdate(eq(filter), eq(update), eq(options)) + verify(wrapped).findOneAndUpdate(eq(filter), eq(updateList), refEq(defaultOptions)) + verify(wrapped).findOneAndUpdate(eq(filter), eq(updateList), eq(options)) + verify(wrapped).findOneAndUpdate(eq(clientSession.wrapped), eq(filter), eq(update), refEq(defaultOptions)) + verify(wrapped).findOneAndUpdate(eq(clientSession.wrapped), eq(filter), eq(update), eq(options)) + verify(wrapped).findOneAndUpdate(eq(clientSession.wrapped), eq(filter), eq(updateList), refEq(defaultOptions)) + verify(wrapped).findOneAndUpdate(eq(clientSession.wrapped), eq(filter), eq(updateList), eq(options)) + verifyNoMoreInteractions(wrapped) + } + + @Test + fun shouldCallTheUnderlyingFindOneAndReplace() { + val mongoCollection = MongoCollection(wrapped) + val replacement = Document("u", 1) + val defaultOptions = FindOneAndReplaceOptions() + val options = FindOneAndReplaceOptions().comment("comment") + + whenever(wrapped.findOneAndReplace(eq(filter), eq(replacement), refEq(defaultOptions))).doReturn(mock()) + whenever(wrapped.findOneAndReplace(eq(filter), eq(replacement), eq(options))).doReturn(mock()) + whenever( + wrapped.findOneAndReplace( + eq(clientSession.wrapped), eq(filter), eq(replacement), refEq(defaultOptions))) + .doReturn(mock()) + whenever(wrapped.findOneAndReplace(eq(clientSession.wrapped), eq(filter), eq(replacement), eq(options))) + .doReturn(mock()) + + mongoCollection.findOneAndReplace(filter, replacement) + mongoCollection.findOneAndReplace(filter, replacement, options) + mongoCollection.findOneAndReplace(clientSession, filter, replacement) + mongoCollection.findOneAndReplace(clientSession, filter, replacement, options) + + verify(wrapped).findOneAndReplace(eq(filter), eq(replacement), refEq(defaultOptions)) + verify(wrapped).findOneAndReplace(eq(filter), eq(replacement), eq(options)) + verify(wrapped).findOneAndReplace(eq(clientSession.wrapped), eq(filter), eq(replacement), refEq(defaultOptions)) + verify(wrapped).findOneAndReplace(eq(clientSession.wrapped), eq(filter), eq(replacement), eq(options)) + verifyNoMoreInteractions(wrapped) + } + + @Test + fun shouldCallTheUnderlyingDrop() { + val mongoCollection = MongoCollection(wrapped) + val defaultOptions = DropCollectionOptions() + val options = DropCollectionOptions().encryptedFields(Document()) + + mongoCollection.drop() + mongoCollection.drop(options) + mongoCollection.drop(clientSession) + mongoCollection.drop(clientSession, options) + + verify(wrapped).drop(refEq(defaultOptions)) + verify(wrapped).drop(eq(options)) + verify(wrapped).drop(eq(clientSession.wrapped), refEq(defaultOptions)) + verify(wrapped).drop(eq(clientSession.wrapped), eq(options)) + verifyNoMoreInteractions(wrapped) + } + + @Test + fun shouldCallTheUnderlyingCreateIndex() { + val mongoCollection = MongoCollection(wrapped) + val key = Document() + val defaultOptions = IndexOptions() + val options = IndexOptions().name("name") + + whenever(wrapped.createIndex(eq(key), refEq(defaultOptions))).doReturn("1") + whenever(wrapped.createIndex(eq(key), eq(options))).doReturn("2") + whenever(wrapped.createIndex(eq(clientSession.wrapped), eq(key), refEq(defaultOptions))).doReturn("3") + whenever(wrapped.createIndex(eq(clientSession.wrapped), eq(key), eq(options))).doReturn("4") + + assertEquals("1", mongoCollection.createIndex(key)) + assertEquals("2", mongoCollection.createIndex(key, options)) + assertEquals("3", mongoCollection.createIndex(clientSession, key)) + assertEquals("4", mongoCollection.createIndex(clientSession, key, options)) + + verify(wrapped).createIndex(eq(key), refEq(defaultOptions)) + verify(wrapped).createIndex(eq(key), eq(options)) + verify(wrapped).createIndex(eq(clientSession.wrapped), eq(key), refEq(defaultOptions)) + verify(wrapped).createIndex(eq(clientSession.wrapped), eq(key), eq(options)) + verifyNoMoreInteractions(wrapped) + } + + @Test + fun shouldCallTheUnderlyingCreateIndexes() { + val mongoCollection = MongoCollection(wrapped) + val indexes = listOf(IndexModel(Document())) + val defaultOptions = CreateIndexOptions() + val options = CreateIndexOptions().commitQuorum(CreateIndexCommitQuorum.MAJORITY) + + whenever(wrapped.createIndexes(eq(indexes), refEq(defaultOptions))).doReturn(listOf("1")) + whenever(wrapped.createIndexes(eq(indexes), eq(options))).doReturn(listOf("2")) + whenever(wrapped.createIndexes(eq(clientSession.wrapped), eq(indexes), refEq(defaultOptions))) + .doReturn(listOf("3")) + whenever(wrapped.createIndexes(eq(clientSession.wrapped), eq(indexes), eq(options))).doReturn(listOf("4")) + + assertContentEquals(listOf("1"), mongoCollection.createIndexes(indexes)) + assertContentEquals(listOf("2"), mongoCollection.createIndexes(indexes, options)) + assertContentEquals(listOf("3"), mongoCollection.createIndexes(clientSession, indexes)) + assertContentEquals(listOf("4"), mongoCollection.createIndexes(clientSession, indexes, options)) + + verify(wrapped).createIndexes(eq(indexes), refEq(defaultOptions)) + verify(wrapped).createIndexes(eq(indexes), eq(options)) + verify(wrapped).createIndexes(eq(clientSession.wrapped), eq(indexes), refEq(defaultOptions)) + verify(wrapped).createIndexes(eq(clientSession.wrapped), eq(indexes), eq(options)) + verifyNoMoreInteractions(wrapped) + } + + @Test + fun shouldCallTheUnderlyingListIndexes() { + val mongoCollection = MongoCollection(wrapped) + + whenever(wrapped.listIndexes(Document::class.java)).doReturn(mock()) + whenever(wrapped.listIndexes(clientSession.wrapped, Document::class.java)).doReturn(mock()) + whenever(wrapped.listIndexes(BsonDocument::class.java)).doReturn(mock()) + whenever(wrapped.listIndexes(clientSession.wrapped, BsonDocument::class.java)).doReturn(mock()) + + mongoCollection.listIndexes() + mongoCollection.listIndexes(clientSession) + + mongoCollection.listIndexes(resultClass = Document::class.java) + mongoCollection.listIndexes(clientSession, Document::class.java) + + mongoCollection.listIndexes() + mongoCollection.listIndexes(clientSession) + + verify(wrapped, times(2)).listIndexes(Document::class.java) + verify(wrapped, times(2)).listIndexes(clientSession.wrapped, Document::class.java) + verify(wrapped, times(1)).listIndexes(BsonDocument::class.java) + verify(wrapped, times(1)).listIndexes(clientSession.wrapped, BsonDocument::class.java) + verifyNoMoreInteractions(wrapped) + } + + @Test + fun shouldCallTheUnderlyingDropIndex() { + val mongoCollection = MongoCollection(wrapped) + val indexName = "index" + val keys = Document() + val defaultOptions = DropIndexOptions() + val options = DropIndexOptions().maxTime(1, TimeUnit.MILLISECONDS) + + mongoCollection.dropIndex(indexName) + mongoCollection.dropIndex(indexName, options) + mongoCollection.dropIndex(keys) + mongoCollection.dropIndex(keys, options) + mongoCollection.dropIndex(clientSession, indexName) + mongoCollection.dropIndex(clientSession, indexName, options) + mongoCollection.dropIndex(clientSession, keys) + mongoCollection.dropIndex(clientSession, keys, options) + + verify(wrapped).dropIndex(eq(indexName), refEq(defaultOptions)) + verify(wrapped).dropIndex(eq(indexName), eq(options)) + verify(wrapped).dropIndex(eq(keys), refEq(defaultOptions)) + verify(wrapped).dropIndex(eq(keys), eq(options)) + verify(wrapped).dropIndex(eq(clientSession.wrapped), eq(indexName), refEq(defaultOptions)) + verify(wrapped).dropIndex(eq(clientSession.wrapped), eq(indexName), eq(options)) + verify(wrapped).dropIndex(eq(clientSession.wrapped), eq(keys), refEq(defaultOptions)) + verify(wrapped).dropIndex(eq(clientSession.wrapped), eq(keys), eq(options)) + verifyNoMoreInteractions(wrapped) + } + + @Test + fun shouldCallTheUnderlyingDropIndexes() { + val mongoCollection = MongoCollection(wrapped) + val defaultOptions = DropIndexOptions() + val options = DropIndexOptions().maxTime(1, TimeUnit.MILLISECONDS) + + mongoCollection.dropIndexes() + mongoCollection.dropIndexes(options) + mongoCollection.dropIndexes(clientSession) + mongoCollection.dropIndexes(clientSession, options) + + verify(wrapped).dropIndexes(refEq(defaultOptions)) + verify(wrapped).dropIndexes(eq(options)) + verify(wrapped).dropIndexes(eq(clientSession.wrapped), refEq(defaultOptions)) + verify(wrapped).dropIndexes(eq(clientSession.wrapped), eq(options)) + verifyNoMoreInteractions(wrapped) + } + + @Test + fun shouldCallTheUnderlyingRenameCollection() { + val mongoCollection = MongoCollection(wrapped) + val mongoNamespace = MongoNamespace("db", "coll") + val defaultOptions = RenameCollectionOptions() + val options = RenameCollectionOptions().dropTarget(true) + + mongoCollection.renameCollection(mongoNamespace) + mongoCollection.renameCollection(mongoNamespace, options) + mongoCollection.renameCollection(clientSession, mongoNamespace) + mongoCollection.renameCollection(clientSession, mongoNamespace, options) + + verify(wrapped).renameCollection(eq(mongoNamespace), refEq(defaultOptions)) + verify(wrapped).renameCollection(eq(mongoNamespace), eq(options)) + verify(wrapped).renameCollection(eq(clientSession.wrapped), eq(mongoNamespace), refEq(defaultOptions)) + verify(wrapped).renameCollection(eq(clientSession.wrapped), eq(mongoNamespace), eq(options)) + verifyNoMoreInteractions(wrapped) + } + + @Test + fun shouldProvideExtensionFunctionsForTimeBasedOptions() { + val oneThousand = 1000L + + assertEquals(1, CreateIndexOptions().maxTime(oneThousand).getMaxTime(TimeUnit.SECONDS)) + assertEquals(1, CountOptions().maxTime(oneThousand).getMaxTime(TimeUnit.SECONDS)) + assertEquals(1, DropIndexOptions().maxTime(oneThousand).getMaxTime(TimeUnit.SECONDS)) + assertEquals(1, EstimatedDocumentCountOptions().maxTime(oneThousand).getMaxTime(TimeUnit.SECONDS)) + assertEquals(1, FindOneAndDeleteOptions().maxTime(oneThousand).getMaxTime(TimeUnit.SECONDS)) + assertEquals(1, FindOneAndReplaceOptions().maxTime(oneThousand).getMaxTime(TimeUnit.SECONDS)) + assertEquals(1, FindOneAndUpdateOptions().maxTime(oneThousand).getMaxTime(TimeUnit.SECONDS)) + assertEquals(oneThousand, IndexOptions().expireAfter(oneThousand).getExpireAfter(TimeUnit.SECONDS)) + } +} diff --git a/driver-kotlin-sync/src/test/kotlin/com/mongodb/kotlin/client/MongoCursorTest.kt b/driver-kotlin-sync/src/test/kotlin/com/mongodb/kotlin/client/MongoCursorTest.kt new file mode 100644 index 00000000000..4548f4c70a7 --- /dev/null +++ b/driver-kotlin-sync/src/test/kotlin/com/mongodb/kotlin/client/MongoCursorTest.kt @@ -0,0 +1,77 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.kotlin.client + +import com.mongodb.ServerAddress +import com.mongodb.ServerCursor +import com.mongodb.client.MongoCursor as JMongoCursor +import kotlin.reflect.full.declaredFunctions +import kotlin.reflect.full.declaredMemberProperties +import kotlin.test.assertEquals +import org.bson.Document +import org.junit.jupiter.api.Test +import org.mockito.kotlin.doReturn +import org.mockito.kotlin.mock +import org.mockito.kotlin.verify +import org.mockito.kotlin.verifyNoMoreInteractions +import org.mockito.kotlin.whenever + +class MongoCursorTest { + @Test + fun shouldHaveTheSameMethods() { + val jMongoCursorFunctions = + JMongoCursor::class + .declaredFunctions + .map { it.name } + // exclude since this method has a default implementation in MongoCursor interface + .filterNot { it == "forEachRemaining" } + .toSet() + val kMongoCursorFunctions = + MongoCursorImpl::class.declaredFunctions.map { it.name }.toSet() + + MongoCursorImpl::class + .declaredMemberProperties + .filterNot { it.name == "wrapped" } + .map { + if (it.name == "available") it.name + else "get${it.name.replaceFirstChar{c -> c.uppercaseChar() }}" + } + + assertEquals(jMongoCursorFunctions, kMongoCursorFunctions) + } + + @Test + fun shouldCallTheUnderlyingMethods() { + val wrapped: JMongoCursor = mock() + val cursor = MongoCursorImpl(wrapped) + + whenever(wrapped.serverCursor).doReturn(ServerCursor(1, ServerAddress())) + whenever(wrapped.serverAddress).doReturn(mock()) + + cursor.serverCursor + cursor.serverAddress + cursor.hasNext() + cursor.tryNext() + cursor.available + + verify(wrapped).serverCursor + verify(wrapped).serverAddress + verify(wrapped).hasNext() + verify(wrapped).tryNext() + verify(wrapped).available() + + verifyNoMoreInteractions(wrapped) + } +} diff --git a/driver-kotlin-sync/src/test/kotlin/com/mongodb/kotlin/client/MongoDatabaseTest.kt b/driver-kotlin-sync/src/test/kotlin/com/mongodb/kotlin/client/MongoDatabaseTest.kt new file mode 100644 index 00000000000..1a7bc1d25c2 --- /dev/null +++ b/driver-kotlin-sync/src/test/kotlin/com/mongodb/kotlin/client/MongoDatabaseTest.kt @@ -0,0 +1,384 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.kotlin.client + +import com.mongodb.ReadConcern +import com.mongodb.ReadPreference +import com.mongodb.WriteConcern +import com.mongodb.client.MongoDatabase as JMongoDatabase +import com.mongodb.client.model.Collation +import com.mongodb.client.model.CreateCollectionOptions +import com.mongodb.client.model.CreateViewOptions +import com.mongodb.client.model.ValidationAction +import com.mongodb.client.model.ValidationOptions +import com.mongodb.kotlin.client.MockitoHelper.deepRefEq +import java.util.concurrent.TimeUnit +import kotlin.reflect.full.declaredFunctions +import kotlin.reflect.full.declaredMemberProperties +import kotlin.test.assertEquals +import org.bson.BsonDocument +import org.bson.Document +import org.bson.codecs.configuration.CodecRegistry +import org.junit.jupiter.api.Test +import org.mockito.Mock +import org.mockito.kotlin.doReturn +import org.mockito.kotlin.eq +import org.mockito.kotlin.mock +import org.mockito.kotlin.refEq +import org.mockito.kotlin.times +import org.mockito.kotlin.verify +import org.mockito.kotlin.verifyNoMoreInteractions +import org.mockito.kotlin.whenever + +class MongoDatabaseTest { + + @Mock val wrapped: JMongoDatabase = mock() + @Mock val clientSession: ClientSession = ClientSession(mock()) + + @Test + fun shouldHaveTheSameMethods() { + val jMongoDatabaseFunctions = JMongoDatabase::class.declaredFunctions.map { it.name }.toSet() + val kMongoDatabaseFunctions = + MongoDatabase::class + .declaredFunctions + .map { + if (it.name == "timeout") { + "getTimeout" + } else { + it.name + } + } + .toSet() + + MongoDatabase::class + .declaredMemberProperties + .filterNot { it.name == "wrapped" } + .map { "get${it.name.replaceFirstChar { c -> c.uppercaseChar() }}" } + + assertEquals(jMongoDatabaseFunctions, kMongoDatabaseFunctions) + } + + @Test + fun shouldCallTheUnderlyingGetNamespace() { + val mongoDatabase = MongoDatabase(wrapped) + whenever(wrapped.name).doReturn("name") + + mongoDatabase.name + verify(wrapped).name + verifyNoMoreInteractions(wrapped) + } + + @Test + fun shouldCallTheUnderlyingGetCodecRegistry() { + val mongoDatabase = MongoDatabase(wrapped) + whenever(wrapped.codecRegistry).doReturn(mock()) + + mongoDatabase.codecRegistry + verify(wrapped).codecRegistry + verifyNoMoreInteractions(wrapped) + } + + @Test + fun shouldCallTheUnderlyingGetReadPreference() { + val mongoDatabase = MongoDatabase(wrapped) + whenever(wrapped.readPreference).doReturn(mock()) + + mongoDatabase.readPreference + verify(wrapped).readPreference + verifyNoMoreInteractions(wrapped) + } + + @Test + fun shouldCallTheUnderlyingGetReadConcern() { + val mongoDatabase = MongoDatabase(wrapped) + whenever(wrapped.readConcern).doReturn(ReadConcern.DEFAULT) + + mongoDatabase.readConcern + verify(wrapped).readConcern + verifyNoMoreInteractions(wrapped) + } + + @Test + fun shouldCallTheUnderlyingGetWriteConcern() { + val mongoDatabase = MongoDatabase(wrapped) + whenever(wrapped.writeConcern).doReturn(mock()) + + mongoDatabase.writeConcern + verify(wrapped).writeConcern + verifyNoMoreInteractions(wrapped) + } + + @Test + fun shouldCallTheUnderlyingWithCodecRegistry() { + val mongoDatabase = MongoDatabase(wrapped) + val codecRegistry = mock() + whenever(wrapped.withCodecRegistry(codecRegistry)).doReturn(mock()) + + mongoDatabase.withCodecRegistry(codecRegistry) + verify(wrapped).withCodecRegistry(codecRegistry) + verifyNoMoreInteractions(wrapped) + } + + @Test + fun shouldCallTheUnderlyingWithReadPreference() { + val mongoDatabase = MongoDatabase(wrapped) + val readPreference = ReadPreference.primaryPreferred() + whenever(wrapped.withReadPreference(readPreference)).doReturn(mock()) + + mongoDatabase.withReadPreference(readPreference) + verify(wrapped).withReadPreference(readPreference) + verifyNoMoreInteractions(wrapped) + } + + @Test + fun shouldCallTheUnderlyingWithReadConcern() { + val mongoDatabase = MongoDatabase(wrapped) + val readConcern = ReadConcern.AVAILABLE + whenever(wrapped.withReadConcern(readConcern)).doReturn(mock()) + + mongoDatabase.withReadConcern(readConcern) + verify(wrapped).withReadConcern(readConcern) + verifyNoMoreInteractions(wrapped) + } + + @Test + fun shouldCallTheUnderlyingWithWriteConcern() { + val mongoDatabase = MongoDatabase(wrapped) + val writeConcern = WriteConcern.MAJORITY + whenever(wrapped.withWriteConcern(writeConcern)).doReturn(mock()) + + mongoDatabase.withWriteConcern(writeConcern) + verify(wrapped).withWriteConcern(writeConcern) + verifyNoMoreInteractions(wrapped) + } + + @Test + fun shouldCallTheUnderlyingGetCollection() { + val mongoDatabase = MongoDatabase(wrapped) + whenever(wrapped.getCollection("collectionName", Document::class.java)).doReturn(mock()) + + mongoDatabase.getCollection("collectionName") + verify(wrapped).getCollection("collectionName", Document::class.java) + verifyNoMoreInteractions(wrapped) + } + + @Test + fun shouldCallTheUnderlyingRunCommand() { + val mongoDatabase = MongoDatabase(wrapped) + val command = Document(mapOf("a" to 1)) + val primary = ReadPreference.primary() + val primaryPreferred = ReadPreference.primaryPreferred() + + whenever(wrapped.readPreference).doReturn(primary) + whenever(wrapped.runCommand(command, primary, Document::class.java)).doReturn(mock()) + whenever(wrapped.runCommand(clientSession.wrapped, command, primary, Document::class.java)).doReturn(mock()) + whenever(wrapped.runCommand(command, primary, BsonDocument::class.java)).doReturn(mock()) + whenever(wrapped.runCommand(clientSession.wrapped, command, primary, BsonDocument::class.java)).doReturn(mock()) + whenever(wrapped.runCommand(command, primaryPreferred, BsonDocument::class.java)).doReturn(mock()) + whenever(wrapped.runCommand(clientSession.wrapped, command, primaryPreferred, BsonDocument::class.java)) + .doReturn(mock()) + + mongoDatabase.runCommand(command) + mongoDatabase.runCommand(command, primary) + mongoDatabase.runCommand(command, resultClass = Document::class.java) + mongoDatabase.runCommand(command, primary, Document::class.java) + + mongoDatabase.runCommand(clientSession, command) + mongoDatabase.runCommand(clientSession, command, primary) + mongoDatabase.runCommand(clientSession, command, resultClass = Document::class.java) + mongoDatabase.runCommand(clientSession, command, primary, Document::class.java) + + mongoDatabase.runCommand(command) + mongoDatabase.runCommand(command, primaryPreferred) + mongoDatabase.runCommand(clientSession, command) + mongoDatabase.runCommand(clientSession, command, primaryPreferred) + + verify(wrapped, times(6)).readPreference + verify(wrapped, times(4)).runCommand(command, primary, Document::class.java) + verify(wrapped, times(4)).runCommand(clientSession.wrapped, command, primary, Document::class.java) + verify(wrapped, times(1)).runCommand(command, primary, BsonDocument::class.java) + verify(wrapped, times(1)).runCommand(clientSession.wrapped, command, primary, BsonDocument::class.java) + verify(wrapped, times(1)).runCommand(command, primaryPreferred, BsonDocument::class.java) + verify(wrapped, times(1)).runCommand(clientSession.wrapped, command, primaryPreferred, BsonDocument::class.java) + + verifyNoMoreInteractions(wrapped) + } + + @Test + fun shouldCallTheUnderlyingDrop() { + val mongoDatabase = MongoDatabase(wrapped) + + mongoDatabase.drop() + mongoDatabase.drop(clientSession) + + verify(wrapped).drop() + verify(wrapped).drop(clientSession.wrapped) + verifyNoMoreInteractions(wrapped) + } + + @Test + fun shouldCallTheUnderlyingListCollectionNames() { + val mongoDatabase = MongoDatabase(wrapped) + whenever(wrapped.listCollectionNames()).doReturn(mock()) + whenever(wrapped.listCollectionNames(clientSession.wrapped)).doReturn(mock()) + + mongoDatabase.listCollectionNames() + mongoDatabase.listCollectionNames(clientSession) + + verify(wrapped).listCollectionNames() + verify(wrapped).listCollectionNames(clientSession.wrapped) + verifyNoMoreInteractions(wrapped) + } + + @Test + fun shouldCallTheUnderlyingListCollections() { + val mongoDatabase = MongoDatabase(wrapped) + whenever(wrapped.listCollections(Document::class.java)).doReturn(mock()) + whenever(wrapped.listCollections(BsonDocument::class.java)).doReturn(mock()) + whenever(wrapped.listCollections(clientSession.wrapped, Document::class.java)).doReturn(mock()) + whenever(wrapped.listCollections(clientSession.wrapped, BsonDocument::class.java)).doReturn(mock()) + + mongoDatabase.listCollections() + mongoDatabase.listCollections(clientSession) + + mongoDatabase.listCollections(resultClass = Document::class.java) + mongoDatabase.listCollections(clientSession, Document::class.java) + + mongoDatabase.listCollections() + mongoDatabase.listCollections(clientSession) + + verify(wrapped, times(2)).listCollections(Document::class.java) + verify(wrapped, times(2)).listCollections(clientSession.wrapped, Document::class.java) + verify(wrapped, times(1)).listCollections(BsonDocument::class.java) + verify(wrapped, times(1)).listCollections(clientSession.wrapped, BsonDocument::class.java) + verifyNoMoreInteractions(wrapped) + } + + @Test + fun shouldCallTheUnderlyingCreateCollection() { + val mongoDatabase = MongoDatabase(wrapped) + val name = "coll" + val name2 = "coll2" + val defaultOptions = CreateCollectionOptions() + val options = + CreateCollectionOptions().validationOptions(ValidationOptions().validationAction(ValidationAction.WARN)) + + mongoDatabase.createCollection(name) + mongoDatabase.createCollection(name2, options) + mongoDatabase.createCollection(clientSession, name) + mongoDatabase.createCollection(clientSession, name2, options) + + verify(wrapped).createCollection(eq(name), deepRefEq(defaultOptions)) + verify(wrapped).createCollection(eq(name2), eq(options)) + verify(wrapped).createCollection(eq(clientSession.wrapped), eq(name), deepRefEq(defaultOptions)) + verify(wrapped).createCollection(eq(clientSession.wrapped), eq(name2), eq(options)) + verifyNoMoreInteractions(wrapped) + } + + @Test + fun shouldCallTheUnderlyingCreateView() { + val mongoDatabase = MongoDatabase(wrapped) + val viewName = "view" + val viewOn = "coll" + val pipeline = listOf(Document(mapOf("a" to 1))) + val defaultOptions = CreateViewOptions() + val options = CreateViewOptions().collation(Collation.builder().backwards(true).build()) + + mongoDatabase.createView(viewName, viewOn, pipeline) + mongoDatabase.createView(viewName, viewOn, pipeline, options) + mongoDatabase.createView(clientSession, viewName, viewOn, pipeline) + mongoDatabase.createView(clientSession, viewName, viewOn, pipeline, options) + + verify(wrapped).createView(eq(viewName), eq(viewOn), eq(pipeline), refEq(defaultOptions)) + verify(wrapped).createView(eq(viewName), eq(viewOn), eq(pipeline), eq(options)) + verify(wrapped) + .createView(eq(clientSession.wrapped), eq(viewName), eq(viewOn), eq(pipeline), refEq(defaultOptions)) + verify(wrapped).createView(eq(clientSession.wrapped), eq(viewName), eq(viewOn), eq(pipeline), eq(options)) + verifyNoMoreInteractions(wrapped) + } + + @Test + fun shouldCallTheUnderlyingAggregate() { + val mongoDatabase = MongoDatabase(wrapped) + val pipeline = listOf(Document(mapOf("a" to 1))) + + whenever(wrapped.aggregate(pipeline, Document::class.java)).doReturn(mock()) + whenever(wrapped.aggregate(clientSession.wrapped, pipeline, Document::class.java)).doReturn(mock()) + whenever(wrapped.aggregate(pipeline, BsonDocument::class.java)).doReturn(mock()) + whenever(wrapped.aggregate(clientSession.wrapped, pipeline, BsonDocument::class.java)).doReturn(mock()) + + mongoDatabase.aggregate(pipeline) + mongoDatabase.aggregate(clientSession, pipeline) + + mongoDatabase.aggregate(pipeline, resultClass = Document::class.java) + mongoDatabase.aggregate(clientSession, pipeline, Document::class.java) + + mongoDatabase.aggregate(pipeline) + mongoDatabase.aggregate(clientSession, pipeline) + + verify(wrapped, times(2)).aggregate(pipeline, Document::class.java) + verify(wrapped, times(2)).aggregate(clientSession.wrapped, pipeline, Document::class.java) + verify(wrapped, times(1)).aggregate(pipeline, BsonDocument::class.java) + verify(wrapped, times(1)).aggregate(clientSession.wrapped, pipeline, BsonDocument::class.java) + verifyNoMoreInteractions(wrapped) + } + + @Test + fun shouldCallTheUnderlyingWatch() { + val mongoDatabase = MongoDatabase(wrapped) + val pipeline = listOf(Document(mapOf("a" to 1))) + + whenever(wrapped.watch(emptyList(), Document::class.java)).doReturn(mock()) + whenever(wrapped.watch(pipeline, Document::class.java)).doReturn(mock()) + whenever(wrapped.watch(clientSession.wrapped, emptyList(), Document::class.java)).doReturn(mock()) + whenever(wrapped.watch(clientSession.wrapped, pipeline, Document::class.java)).doReturn(mock()) + whenever(wrapped.watch(emptyList(), BsonDocument::class.java)).doReturn(mock()) + whenever(wrapped.watch(pipeline, BsonDocument::class.java)).doReturn(mock()) + whenever(wrapped.watch(clientSession.wrapped, emptyList(), BsonDocument::class.java)).doReturn(mock()) + whenever(wrapped.watch(clientSession.wrapped, pipeline, BsonDocument::class.java)).doReturn(mock()) + + mongoDatabase.watch() + mongoDatabase.watch(pipeline) + mongoDatabase.watch(clientSession) + mongoDatabase.watch(clientSession, pipeline) + + mongoDatabase.watch(resultClass = Document::class.java) + mongoDatabase.watch(pipeline, Document::class.java) + mongoDatabase.watch(clientSession, resultClass = Document::class.java) + mongoDatabase.watch(clientSession, pipeline, Document::class.java) + + mongoDatabase.watch() + mongoDatabase.watch(pipeline) + mongoDatabase.watch(clientSession) + mongoDatabase.watch(clientSession, pipeline) + + verify(wrapped, times(2)).watch(emptyList(), Document::class.java) + verify(wrapped, times(2)).watch(pipeline, Document::class.java) + verify(wrapped, times(2)).watch(clientSession.wrapped, emptyList(), Document::class.java) + verify(wrapped, times(2)).watch(clientSession.wrapped, pipeline, Document::class.java) + verify(wrapped, times(1)).watch(emptyList(), BsonDocument::class.java) + verify(wrapped, times(1)).watch(pipeline, BsonDocument::class.java) + verify(wrapped, times(1)).watch(clientSession.wrapped, emptyList(), BsonDocument::class.java) + verify(wrapped, times(1)).watch(clientSession.wrapped, pipeline, BsonDocument::class.java) + verifyNoMoreInteractions(wrapped) + } + + @Test + fun shouldProvideExtensionFunctionsForTimeBasedOptions() { + val oneThousand = 1000L + + assertEquals(oneThousand, CreateCollectionOptions().expireAfter(oneThousand).getExpireAfter(TimeUnit.SECONDS)) + } +} diff --git a/driver-kotlin-sync/src/test/kotlin/com/mongodb/kotlin/client/MongoIterableTest.kt b/driver-kotlin-sync/src/test/kotlin/com/mongodb/kotlin/client/MongoIterableTest.kt new file mode 100644 index 00000000000..ab16dd08b24 --- /dev/null +++ b/driver-kotlin-sync/src/test/kotlin/com/mongodb/kotlin/client/MongoIterableTest.kt @@ -0,0 +1,126 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.kotlin.client + +import com.mongodb.Function +import com.mongodb.client.MongoCursor as JMongoCursor +import com.mongodb.client.MongoIterable as JMongoIterable +import kotlin.test.assertContentEquals +import org.bson.Document +import org.junit.jupiter.api.Test +import org.mockito.ArgumentMatchers +import org.mockito.kotlin.doReturn +import org.mockito.kotlin.mock +import org.mockito.kotlin.times +import org.mockito.kotlin.verify +import org.mockito.kotlin.verifyNoMoreInteractions +import org.mockito.kotlin.whenever + +class MongoIterableTest { + + @Suppress("UNCHECKED_CAST") + @Test + fun shouldCallTheUnderlyingMethods() { + val delegate: JMongoIterable = mock() + val cursor: JMongoCursor = mock() + val iterable = MongoIterable(delegate) + + val batchSize = 10 + val documents = listOf(Document("a", 1), Document("b", 2), Document("c", 3)) + val transform: (Document) -> String = { it.toJson() } + val transformClass: Class> = + Function::class.java as Class> + + whenever(cursor.hasNext()).thenReturn(true, true, true, false, true, true, true, false, true, true, true, false) + whenever(cursor.next()) + .thenReturn( + documents[0], + documents[1], + documents[2], + documents[0], + documents[1], + documents[2], + documents[0], + documents[1], + documents[2]) + whenever(delegate.cursor()).doReturn(cursor) + whenever(delegate.first()).doReturn(documents[0]) + + whenever(delegate.map(ArgumentMatchers.any(transformClass))).doReturn(mock()) + + iterable.batchSize(batchSize) + iterable.cursor() + iterable.first() + iterable.firstOrNull() + iterable.forEach { it.toString() } + iterable.toCollection(mutableListOf()) + iterable.use { it.take(2) } + iterable.map(transform) + + verify(delegate, times(1)).batchSize(batchSize) + verify(delegate, times(4)).cursor() + verify(delegate, times(2)).first() + verify(delegate, times(1)).map(ArgumentMatchers.any(transformClass)) + + verifyNoMoreInteractions(delegate) + } + + @Test + fun shouldCloseTheUnderlyingCursorWhenUsingUse() { + val delegate: JMongoIterable = mock() + val cursor: JMongoCursor = mock() + val iterable = MongoIterable(delegate) + + val documents = listOf(Document("a", 1), Document("b", 2), Document("c", 3)) + + whenever(cursor.hasNext()).thenReturn(true, true, true, false) + whenever(cursor.next()).thenReturn(documents[0], documents[1], documents[2]) + whenever(delegate.cursor()).doReturn(cursor) + + assertContentEquals(documents.subList(0, 2), iterable.use { it.take(2) }.toList()) + + verify(delegate, times(1)).cursor() + verify(cursor, times(2)).hasNext() + verify(cursor, times(2)).next() + verify(cursor, times(1)).close() + + verifyNoMoreInteractions(delegate) + verifyNoMoreInteractions(cursor) + } + + @Test + fun shouldCloseTheUnderlyingCursorWhenUsingToList() { + val delegate: JMongoIterable = mock() + val cursor: JMongoCursor = mock() + val iterable = MongoIterable(delegate) + + val documents = listOf(Document("a", 1), Document("b", 2), Document("c", 3)) + + whenever(cursor.hasNext()).thenReturn(true, true, true, false) + whenever(cursor.next()).thenReturn(documents[0], documents[1], documents[2]) + whenever(delegate.cursor()).doReturn(cursor) + + assertContentEquals(documents, iterable.toList()) + + verify(delegate, times(1)).cursor() + verify(cursor, times(4)).hasNext() + verify(cursor, times(3)).next() + verify(cursor, times(1)).close() + + verifyNoMoreInteractions(delegate) + verifyNoMoreInteractions(cursor) + } +} diff --git a/driver-lambda/build.gradle.kts b/driver-lambda/build.gradle.kts new file mode 100644 index 00000000000..d5cd57cfaa9 --- /dev/null +++ b/driver-lambda/build.gradle.kts @@ -0,0 +1,64 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import com.github.jengelman.gradle.plugins.shadow.tasks.ShadowJar + +plugins { + id("application") + id("java-library") + id("project.base") + alias(libs.plugins.shadow) +} + +application { + mainClass = "com.mongodb.lambdatest.LambdaTestApp" +} + +sourceSets { + main { + java { setSrcDirs(listOf("src/main")) } + resources { setSrcDirs(listOf("src/resources")) } + } +} + +dependencies { + implementation(project(":driver-sync")) + implementation(project(":bson")) + + implementation(libs.aws.lambda.core) + implementation(libs.aws.lambda.events) + implementation(platform(libs.junit.bom)) + implementation(libs.bundles.junit) +} + +tasks.withType().configureEach { + enabled = false +} + +java { + sourceCompatibility = JavaVersion.VERSION_11 + targetCompatibility = JavaVersion.VERSION_11 +} + +tasks.withType { + manifest { + attributes["Main-Class"] = "com.mongodb.lambdatest.LambdaTestApp" + } +} +tasks.withType { + archiveBaseName.set("lambdatest") + archiveVersion.set("") +} diff --git a/driver-lambda/samconfig.toml b/driver-lambda/samconfig.toml new file mode 100644 index 00000000000..332bf47b0b0 --- /dev/null +++ b/driver-lambda/samconfig.toml @@ -0,0 +1,31 @@ +# More information about the configuration file can be found here: +# https://docs.aws.amazon.com/serverless-application-model/latest/developerguide/serverless-sam-cli-config.html +version = 0.1 + +[default] +[default.global.parameters] +stack_name = "lambdatest" + +[default.build.parameters] +cached = true +parallel = false + +[default.validate.parameters] +lint = true + +[default.deploy.parameters] +capabilities = "CAPABILITY_IAM" +confirm_changeset = false # headless +resolve_s3 = true + +[default.package.parameters] +resolve_s3 = true + +[default.sync.parameters] +watch = true + +[default.local_start_api.parameters] +warm_containers = "EAGER" + +[default.local_start_lambda.parameters] +warm_containers = "EAGER" diff --git a/driver-lambda/src/main/com/mongodb/lambdatest/LambdaTestApp.java b/driver-lambda/src/main/com/mongodb/lambdatest/LambdaTestApp.java new file mode 100644 index 00000000000..c2643375160 --- /dev/null +++ b/driver-lambda/src/main/com/mongodb/lambdatest/LambdaTestApp.java @@ -0,0 +1,175 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.lambdatest; + +import com.amazonaws.services.lambda.runtime.Context; +import com.amazonaws.services.lambda.runtime.RequestHandler; +import com.amazonaws.services.lambda.runtime.events.APIGatewayProxyRequestEvent; +import com.amazonaws.services.lambda.runtime.events.APIGatewayProxyResponseEvent; +import com.mongodb.ConnectionString; +import com.mongodb.MongoClientSettings; +import com.mongodb.client.MongoClient; +import com.mongodb.client.MongoClients; +import com.mongodb.client.MongoCollection; +import com.mongodb.event.CommandFailedEvent; +import com.mongodb.event.CommandListener; +import com.mongodb.event.CommandSucceededEvent; +import com.mongodb.event.ConnectionClosedEvent; +import com.mongodb.event.ConnectionCreatedEvent; +import com.mongodb.event.ConnectionPoolListener; +import com.mongodb.event.ServerHeartbeatFailedEvent; +import com.mongodb.event.ServerHeartbeatStartedEvent; +import com.mongodb.event.ServerHeartbeatSucceededEvent; +import com.mongodb.event.ServerMonitorListener; +import com.mongodb.lang.NonNull; +import org.bson.BsonDocument; +import org.bson.BsonInt64; +import org.bson.BsonString; +import org.bson.BsonValue; +import org.bson.Document; + +import java.io.PrintWriter; +import java.io.StringWriter; +import java.util.HashMap; +import java.util.Map; +import java.util.concurrent.CopyOnWriteArrayList; + +import static java.util.concurrent.TimeUnit.MILLISECONDS; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertTrue; + +/** + * Test App for AWS lambda functions + */ +public class LambdaTestApp implements RequestHandler { + private final MongoClient mongoClient; + private long openConnections = 0; + private long totalHeartbeatCount = 0; + private long totalHeartbeatDurationMs = 0; + private long totalCommandCount = 0; + private long totalCommandDurationMs = 0; + private final CopyOnWriteArrayList failedAssertions = new CopyOnWriteArrayList<>(); + + public LambdaTestApp() { + String connectionString = System.getenv("MONGODB_URI"); + + MongoClientSettings settings = MongoClientSettings.builder() + .applyConnectionString(new ConnectionString(connectionString)) + .addCommandListener(new CommandListener() { + @Override + public void commandSucceeded(@NonNull final CommandSucceededEvent event) { + totalCommandCount++; + totalCommandDurationMs += event.getElapsedTime(MILLISECONDS); + } + @Override + public void commandFailed(@NonNull final CommandFailedEvent event) { + totalCommandCount++; + totalCommandDurationMs += event.getElapsedTime(MILLISECONDS); + } + }) + .applyToServerSettings(builder -> builder.addServerMonitorListener(new ServerMonitorListener() { + @Override + public void serverHearbeatStarted(@NonNull final ServerHeartbeatStartedEvent event) { + checkAssertion(() -> assertFalse(event.isAwaited(), event::toString)); + } + + @Override + public void serverHeartbeatSucceeded(@NonNull final ServerHeartbeatSucceededEvent event) { + checkAssertion(() -> assertFalse(event.isAwaited(), event::toString)); + totalHeartbeatCount++; + totalHeartbeatDurationMs += event.getElapsedTime(MILLISECONDS); + } + @Override + public void serverHeartbeatFailed(@NonNull final ServerHeartbeatFailedEvent event) { + checkAssertion(() -> assertFalse(event.isAwaited(), event::toString)); + totalHeartbeatCount++; + totalHeartbeatDurationMs += event.getElapsedTime(MILLISECONDS); + } + })) + .applyToConnectionPoolSettings(builder -> builder.addConnectionPoolListener(new ConnectionPoolListener() { + @Override + public void connectionCreated(@NonNull final ConnectionCreatedEvent event) { + openConnections++; + } + @Override + public void connectionClosed(@NonNull final ConnectionClosedEvent event) { + openConnections--; + } + })) + .build(); + mongoClient = MongoClients.create(settings); + } + + public APIGatewayProxyResponseEvent handleRequest(final APIGatewayProxyRequestEvent input, final Context context) { + try { + MongoCollection collection = mongoClient + .getDatabase("lambdaTest") + .getCollection("test"); + BsonValue id = collection.insertOne(new Document("n", 1)).getInsertedId(); + collection.deleteOne(new Document("_id", id)); + + assertTrue(failedAssertions.isEmpty(), failedAssertions.toString()); + BsonDocument responseBody = getBsonDocument(); + + return templateResponse() + .withStatusCode(200) + .withBody(responseBody.toJson()); + + } catch (Throwable e) { + StringWriter sw = new StringWriter(); + e.printStackTrace(new PrintWriter(sw)); + BsonDocument responseBody = new BsonDocument() + .append("throwable", new BsonString(e.getMessage())) + .append("stacktrace", new BsonString(sw.toString())); + return templateResponse() + .withBody(responseBody.toJson()) + .withStatusCode(500); + } + } + + private BsonDocument getBsonDocument() { + BsonDocument responseBody = new BsonDocument() + .append("totalCommandDurationMs", new BsonInt64(totalCommandDurationMs)) + .append("totalCommandCount", new BsonInt64(totalCommandCount)) + .append("totalHeartbeatDurationMs", new BsonInt64(totalHeartbeatDurationMs)) + .append("totalHeartbeatCount", new BsonInt64(totalHeartbeatCount)) + .append("openConnections", new BsonInt64(openConnections)); + + totalCommandDurationMs = 0; + totalCommandCount = 0; + totalHeartbeatCount = 0; + totalHeartbeatDurationMs = 0; + + return responseBody; + } + + private APIGatewayProxyResponseEvent templateResponse() { + Map headers = new HashMap<>(); + headers.put("Content-Type", "application/json"); + headers.put("X-Custom-Header", "application/json"); + return new APIGatewayProxyResponseEvent() + .withHeaders(headers); + } + + private void checkAssertion(final Runnable assertion) { + try { + assertion.run(); + } catch (Throwable t) { + failedAssertions.add(t); + } + } +} diff --git a/driver-lambda/template.yaml b/driver-lambda/template.yaml new file mode 100644 index 00000000000..9441f804f20 --- /dev/null +++ b/driver-lambda/template.yaml @@ -0,0 +1,49 @@ +AWSTemplateFormatVersion: '2010-09-09' +Transform: AWS::Serverless-2016-10-31 +Description: > + Java driver lambda function test +Parameters: + MongoDbUri: + Type: String + Description: The MongoDB connection string. + +Globals: + Function: + Timeout: 30 + MemorySize: 128 + Tracing: Active + Api: + TracingEnabled: false + +Resources: + MongoDBFunction: + Type: AWS::Serverless::Function + Metadata: + SkipBuild: True + Properties: + CodeUri: build/libs/lambdatest-all.jar + Handler: com.mongodb.lambdatest.LambdaTestApp::handleRequest + Runtime: java11 + Environment: + Variables: + MONGODB_URI: !Ref MongoDbUri + JAVA_TOOL_OPTIONS: -XX:+TieredCompilation -XX:TieredStopAtLevel=1 + Architectures: + - x86_64 + MemorySize: 512 + Events: + LambdaTest: + Type: Api + Properties: + Path: /mongodb + Method: get +Outputs: + LambdaTestApi: + Description: API Gateway endpoint URL for Prod stage for Lambda Test function + Value: !Sub "https://${ServerlessRestApi}.execute-api.${AWS::Region}.amazonaws.com/Prod/mongodb/" + MongoDBFunction: + Description: Lambda Test Lambda Function ARN + Value: !GetAtt MongoDBFunction.Arn + MongoDBFunctionIamRole: + Description: Implicit IAM Role created for Lambda Test function + Value: !GetAtt MongoDBFunctionRole.Arn diff --git a/driver-legacy/build.gradle.kts b/driver-legacy/build.gradle.kts new file mode 100644 index 00000000000..2855bce4cdf --- /dev/null +++ b/driver-legacy/build.gradle.kts @@ -0,0 +1,48 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +import ProjectExtensions.configureJarManifest +import ProjectExtensions.configureMavenPublication + +plugins { + id("project.java") + id("conventions.test-artifacts") + id("conventions.test-artifacts-runtime-dependencies") + id("conventions.test-include-optionals") + id("conventions.testing-junit") + id("conventions.testing-spock-exclude-slow") +} + +base.archivesName.set("mongodb-driver-legacy") + +dependencies { + api(project(path = ":bson", configuration = "default")) + api(project(path = ":driver-core", configuration = "default")) + api(project(path = ":driver-sync", configuration = "default")) + + testImplementation(project(path = ":bson", configuration = "testArtifacts")) + testImplementation(project(path = ":driver-core", configuration = "testArtifacts")) + testImplementation(project(path = ":driver-sync", configuration = "testArtifacts")) +} + +configureMavenPublication { + pom { + name.set("The Legacy MongoDB Driver") + description.set("The Legacy MongoDB Driver") + } +} + +// Disable the manifest for driver-legacy as its not a valid OSGI package +configureJarManifest { attributes["-nomanifest"] = true } diff --git a/driver-legacy/src/examples/tour/Decimal128LegacyAPIQuickTour.java b/driver-legacy/src/examples/tour/Decimal128LegacyAPIQuickTour.java new file mode 100644 index 00000000000..fbf036e6326 --- /dev/null +++ b/driver-legacy/src/examples/tour/Decimal128LegacyAPIQuickTour.java @@ -0,0 +1,78 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package tour; + +import com.mongodb.BasicDBObject; +import com.mongodb.DB; +import com.mongodb.DBCollection; +import com.mongodb.DBObject; +import com.mongodb.MongoClient; +import com.mongodb.MongoClientURI; +import com.mongodb.QueryBuilder; +import org.bson.types.Decimal128; + +import java.math.BigDecimal; + +/** + * Decimal128 Quick Tour for the Legacy API + */ +@SuppressWarnings("deprecation") +public class Decimal128LegacyAPIQuickTour { + /** + * Run this main method to see the output of this quick example. + * + * @param args takes an optional single argument for the connection string + */ + public static void main(final String[] args) { + MongoClient mongoClient; + + if (args.length == 0) { + // connect to the local database server + mongoClient = new MongoClient(); + } else { + mongoClient = new MongoClient(new MongoClientURI(args[0])); + } + + // get handle to "mydb" database + DB database = mongoClient.getDB("mydb"); + + + // get a handle to the "test" collection + DBCollection collection = database.getCollection("test"); + + // drop all the data in it + collection.drop(); + + // make a document and insert it + BasicDBObject doc = new BasicDBObject("name", "MongoDB") + .append("amount1", Decimal128.parse(".10")) + .append("amount2", new Decimal128(42L)) + .append("amount3", new Decimal128(new BigDecimal(".200"))); + + collection.insert(doc); + + + DBObject first = collection.findOne(QueryBuilder.start("amount1").is(new Decimal128(new BigDecimal(".10"))).get()); + + Decimal128 amount3 = (Decimal128) first.get("amount3"); + BigDecimal amount2AsBigDecimal = amount3.bigDecimalValue(); + + System.out.println(amount3); + System.out.println(amount2AsBigDecimal); + + } +} diff --git a/driver-legacy/src/examples/tour/package-info.java b/driver-legacy/src/examples/tour/package-info.java new file mode 100644 index 00000000000..ebd69343c4a --- /dev/null +++ b/driver-legacy/src/examples/tour/package-info.java @@ -0,0 +1,20 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * This package contains the quick tour examples + */ +package tour; diff --git a/driver-legacy/src/main/com/mongodb/AcknowledgedBulkWriteResult.java b/driver-legacy/src/main/com/mongodb/AcknowledgedBulkWriteResult.java new file mode 100644 index 00000000000..d7b727d073a --- /dev/null +++ b/driver-legacy/src/main/com/mongodb/AcknowledgedBulkWriteResult.java @@ -0,0 +1,120 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb; + +import java.util.Collections; +import java.util.List; + +import static com.mongodb.assertions.Assertions.notNull; + +class AcknowledgedBulkWriteResult extends BulkWriteResult { + private final int insertedCount; + private final int matchedCount; + private final int removedCount; + private final int modifiedCount; + private final List upserts; + + AcknowledgedBulkWriteResult(final int insertedCount, final int matchedCount, final int removedCount, + final Integer modifiedCount, final List upserts) { + this.insertedCount = insertedCount; + this.matchedCount = matchedCount; + this.removedCount = removedCount; + this.modifiedCount = notNull("modifiedCount", modifiedCount); + this.upserts = Collections.unmodifiableList(notNull("upserts", upserts)); + } + + @Override + public boolean isAcknowledged() { + return true; + } + + @Override + public int getInsertedCount() { + return insertedCount; + } + + @Override + public int getMatchedCount() { + return matchedCount; + } + + @Override + public int getRemovedCount() { + return removedCount; + } + + @Override + public int getModifiedCount() { + return modifiedCount; + } + + @Override + public List getUpserts() { + return upserts; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + AcknowledgedBulkWriteResult that = (AcknowledgedBulkWriteResult) o; + + if (insertedCount != that.insertedCount) { + return false; + } + if (matchedCount != that.matchedCount) { + return false; + } + if (removedCount != that.removedCount) { + return false; + } + if (modifiedCount != that.modifiedCount) { + return false; + } + if (!upserts.equals(that.upserts)) { + return false; + } + + return true; + } + + @Override + public int hashCode() { + int result = insertedCount; + result = 31 * result + matchedCount; + result = 31 * result + removedCount; + result = 31 * result + modifiedCount; + result = 31 * result + upserts.hashCode(); + return result; + } + + @Override + public String toString() { + return "AcknowledgedBulkWriteResult{" + + "insertedCount=" + insertedCount + + ", matchedCount=" + matchedCount + + ", removedCount=" + removedCount + + ", modifiedCount=" + modifiedCount + + ", upserts=" + upserts + + '}'; + } +} diff --git a/driver-legacy/src/main/com/mongodb/AggregationOptions.java b/driver-legacy/src/main/com/mongodb/AggregationOptions.java new file mode 100644 index 00000000000..7e2ed155058 --- /dev/null +++ b/driver-legacy/src/main/com/mongodb/AggregationOptions.java @@ -0,0 +1,212 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb; + + +import com.mongodb.annotations.NotThreadSafe; +import com.mongodb.client.model.Collation; +import com.mongodb.lang.Nullable; + +import java.util.concurrent.TimeUnit; + +import static java.util.concurrent.TimeUnit.MILLISECONDS; + +/** + * The options to apply to an aggregate operation. + * + * @mongodb.driver.manual reference/command/aggregate/ aggregate + * @since 2.12 + */ +public class AggregationOptions { + private final Integer batchSize; + private final Boolean allowDiskUse; + private final long maxTimeMS; + private final Boolean bypassDocumentValidation; + private final Collation collation; + + AggregationOptions(final Builder builder) { + batchSize = builder.batchSize; + allowDiskUse = builder.allowDiskUse; + maxTimeMS = builder.maxTimeMS; + bypassDocumentValidation = builder.bypassDocumentValidation; + collation = builder.collation; + } + + /** + * If true, this enables external sort capabilities, otherwise $sort produces an error if the operation consumes 10 percent or more of + * RAM. + * + * @return true if aggregation stages can write data to temporary files + */ + @Nullable + public Boolean getAllowDiskUse() { + return allowDiskUse; + } + + /** + * The size of batches to use when iterating over results. + * + * @return the batch size + */ + @Nullable + public Integer getBatchSize() { + return batchSize; + } + + /** + * Gets the maximum execution time for the aggregation command. + * + * @param timeUnit the time unit for the result + * @return the max time + * @since 2.12 + */ + public long getMaxTime(final TimeUnit timeUnit) { + return timeUnit.convert(maxTimeMS, MILLISECONDS); + } + + /** + * Gets whether to bypass document validation, or null if unspecified. The default is null. + * + * @return whether to bypass document validation, or null if unspecified. + * @since 2.14 + * @mongodb.server.release 3.2 + */ + @Nullable + public Boolean getBypassDocumentValidation() { + return bypassDocumentValidation; + } + + /** + * Returns the collation options + * + * @return the collation options + * @since 3.4 + * @mongodb.server.release 3.4 + */ + @Nullable + public Collation getCollation() { + return collation; + } + + @Override + public String toString() { + return "AggregationOptions{" + + "batchSize=" + batchSize + + ", allowDiskUse=" + allowDiskUse + + ", maxTimeMS=" + maxTimeMS + + ", bypassDocumentValidation=" + bypassDocumentValidation + + ", collation=" + collation + + "}"; + } + + /** + * Creates a new Builder for {@code AggregationOptions}. + * + * @return a new empty builder. + */ + public static Builder builder() { + return new Builder(); + } + + /** + * Builder for creating {@code AggregationOptions}. + * + * @mongodb.server.release 2.2 + * @mongodb.driver.manual reference/command/aggregate/ aggregate + */ + @NotThreadSafe + public static class Builder { + private Integer batchSize; + private Boolean allowDiskUse; + private long maxTimeMS; + private Boolean bypassDocumentValidation; + private Collation collation; + + private Builder() { + } + + /** + * Sets the size of batches to use when iterating over results. Can be null. + * + * @param size the batch size to apply to the cursor + * @return {@code this} so calls can be chained + */ + public Builder batchSize(@Nullable final Integer size) { + batchSize = size; + return this; + } + + /** + * Set whether to enable external sort capabilities. If set to false, $sort produces an error if the operation consumes 10 percent + * or more RAM. + * + * @param allowDiskUse whether or not aggregation stages can write data to temporary files + * @return {@code this} so calls can be chained + */ + public Builder allowDiskUse(@Nullable final Boolean allowDiskUse) { + this.allowDiskUse = allowDiskUse; + return this; + } + + /** + * Sets the maximum execution time for the aggregation command. + * + * @param maxTime the max time + * @param timeUnit the time unit + * @return {@code this} so calls can be chained + */ + public Builder maxTime(final long maxTime, final TimeUnit timeUnit) { + maxTimeMS = MILLISECONDS.convert(maxTime, timeUnit); + return this; + } + + /** + * Sets whether to bypass document validation. + * + * @param bypassDocumentValidation whether to bypass document validation, or null if unspecified + * @return this + * @since 2.14 + * @mongodb.server.release 3.2 + */ + public Builder bypassDocumentValidation(@Nullable final Boolean bypassDocumentValidation) { + this.bypassDocumentValidation = bypassDocumentValidation; + return this; + } + + /** + * Sets the collation + * + * @param collation the collation + * @return this + * @since 3.4 + * @mongodb.server.release 3.4 + */ + public Builder collation(@Nullable final Collation collation) { + this.collation = collation; + return this; + } + + /** + * Return the options based on this builder. + * + * @return the aggregation options + */ + public AggregationOptions build() { + return new AggregationOptions(this); + } + } +} diff --git a/driver-legacy/src/main/com/mongodb/BulkUpdateRequestBuilder.java b/driver-legacy/src/main/com/mongodb/BulkUpdateRequestBuilder.java new file mode 100644 index 00000000000..e2b6825fdd2 --- /dev/null +++ b/driver-legacy/src/main/com/mongodb/BulkUpdateRequestBuilder.java @@ -0,0 +1,116 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb; + +import com.mongodb.client.model.Collation; +import com.mongodb.lang.Nullable; +import org.bson.codecs.Encoder; + +import java.util.List; + +/** + * A builder for a single update request. + * + * @mongodb.driver.manual /reference/command/update + * @since 2.12 + */ +public class BulkUpdateRequestBuilder { + private final BulkWriteOperation bulkWriteOperation; + private final DBObject query; + private final boolean upsert; + private final Encoder queryCodec; + private final Encoder replacementCodec; + private Collation collation; + private final List arrayFilters; + + BulkUpdateRequestBuilder(final BulkWriteOperation bulkWriteOperation, final DBObject query, final boolean upsert, + final Encoder queryCodec, final Encoder replacementCodec, + @Nullable final Collation collation, @Nullable final List arrayFilters) { + this.bulkWriteOperation = bulkWriteOperation; + this.query = query; + this.upsert = upsert; + this.queryCodec = queryCodec; + this.replacementCodec = replacementCodec; + this.collation = collation; + this.arrayFilters = arrayFilters; + } + + /** + * Returns the collation + * + * @return the collation + * @since 3.4 + * @mongodb.server.release 3.4 + */ + @Nullable + public Collation getCollation() { + return collation; + } + + /** + * Sets the collation + * + * @param collation the collation + * @return this + * @since 3.4 + * @mongodb.server.release 3.4 + */ + @Nullable + public BulkUpdateRequestBuilder collation(final Collation collation) { + this.collation = collation; + return this; + } + + /** + * Gets the array filters to apply to the update operation + * @return the array filters, which may be null + * @since 3.6 + * @mongodb.server.release 3.6 + */ + @Nullable + public List getArrayFilters() { + return arrayFilters; + } + + /** + * Adds a request to replace one document in the collection that matches the query with which this builder was created. + * + * @param document the replacement document, which must be structured just as a document you would insert. It can not contain any + * update operators. + */ + public void replaceOne(final DBObject document) { + bulkWriteOperation.addRequest(new ReplaceRequest(query, document, upsert, queryCodec, replacementCodec, collation)); + } + + /** + * Adds a request to update all documents in the collection that match the query with which this builder was created. + * + * @param update the update criteria + */ + public void update(final DBObject update) { + bulkWriteOperation.addRequest(new UpdateRequest(query, update, true, upsert, queryCodec, collation, arrayFilters)); + } + + /** + * Adds a request to update one document in the collection that matches the query with which this builder was created. + * + * @param update the update criteria + */ + public void updateOne(final DBObject update) { + bulkWriteOperation.addRequest(new UpdateRequest(query, update, false, upsert, queryCodec, collation, arrayFilters)); + } +} diff --git a/driver-legacy/src/main/com/mongodb/BulkWriteError.java b/driver-legacy/src/main/com/mongodb/BulkWriteError.java new file mode 100644 index 00000000000..d896f849056 --- /dev/null +++ b/driver-legacy/src/main/com/mongodb/BulkWriteError.java @@ -0,0 +1,129 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb; + +import static com.mongodb.assertions.Assertions.notNull; + +/** + * Represents an error for an item included in a bulk write operation, e.g. a duplicate key error + * + * @mongodb.driver.manual reference/method/BulkWriteResult/#BulkWriteResult.writeErrors BulkWriteResult.writeErrors + * @since 2.12 + */ +public class BulkWriteError { + private final int index; + private final int code; + private final String message; + private final DBObject details; + + /** + * Constructs a new instance. + * + * @param code the error code + * @param message the error message + * @param details details about the error + * @param index the index of the item in the bulk write operation that had this error + */ + public BulkWriteError(final int code, final String message, final DBObject details, final int index) { + this.code = code; + this.message = notNull("message", message); + this.details = notNull("details", details); + this.index = index; + } + + /** + * Gets the code associated with this error. + * + * @return the code + */ + public int getCode() { + return code; + } + + /** + * Gets the message associated with this error. + * + * @return the message + */ + public String getMessage() { + return message; + } + + /** + * Gets the details associated with this error. This document will not be null, but may be empty. + * + * @return the details + */ + public DBObject getDetails() { + return details; + } + + /** + * The index of the item in the bulk write operation with this error. + * + * @return the index + */ + public int getIndex() { + return index; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + BulkWriteError that = (BulkWriteError) o; + + if (code != that.code) { + return false; + } + if (index != that.index) { + return false; + } + if (!details.equals(that.details)) { + return false; + } + if (!message.equals(that.message)) { + return false; + } + + return true; + } + + @Override + public int hashCode() { + int result = index; + result = 31 * result + code; + result = 31 * result + message.hashCode(); + result = 31 * result + details.hashCode(); + return result; + } + + @Override + public String toString() { + return "BulkWriteError{" + + "index=" + index + + ", code=" + code + + ", message='" + message + '\'' + + ", details=" + details + + '}'; + } +} diff --git a/driver-legacy/src/main/com/mongodb/BulkWriteException.java b/driver-legacy/src/main/com/mongodb/BulkWriteException.java new file mode 100644 index 00000000000..427c20db702 --- /dev/null +++ b/driver-legacy/src/main/com/mongodb/BulkWriteException.java @@ -0,0 +1,122 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb; + +import com.mongodb.lang.Nullable; + +import java.util.List; +import java.util.Objects; + +/** + * An exception that represents all errors associated with a bulk write operation. + * + * @mongodb.driver.manual reference/method/BulkWriteResult/#BulkWriteResult.writeErrors BulkWriteResult.writeErrors + * @since 2.12 + * @serial exclude + */ +public class BulkWriteException extends MongoServerException { + private static final long serialVersionUID = -1505950263354313025L; + + private final BulkWriteResult writeResult; + private final List writeErrors; + private final ServerAddress serverAddress; + private final WriteConcernError writeConcernError; + + /** + * Constructs a new instance. + * + * @param writeResult the write result + * @param writeErrors the list of write errors + * @param writeConcernError the write concern error + * @param serverAddress the server address. + */ + BulkWriteException(final BulkWriteResult writeResult, final List writeErrors, + @Nullable final WriteConcernError writeConcernError, final ServerAddress serverAddress) { + super("Bulk write operation error on MongoDB server " + serverAddress + ". " + + (writeErrors.isEmpty() ? "" : "Write errors: " + writeErrors + ". ") + + (writeConcernError == null ? "" : "Write concern error: " + writeConcernError + ". "), serverAddress); + this.writeResult = writeResult; + this.writeErrors = writeErrors; + this.writeConcernError = writeConcernError; + this.serverAddress = serverAddress; + } + + /** + * The result of all successfully processed write operations. This will never be null. + * + * @return the bulk write result + */ + public BulkWriteResult getWriteResult() { + return writeResult; + } + + /** + * The list of errors, which will not be null, but may be empty (if the write concern error is not null). + * + * @return the list of errors + */ + public List getWriteErrors() { + return writeErrors; + } + + /** + * The write concern error, which may be null (in which case the list of errors will not be empty). + * + * @return the write concern error + */ + @Nullable + public WriteConcernError getWriteConcernError() { + return writeConcernError; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + BulkWriteException that = (BulkWriteException) o; + + if (!writeErrors.equals(that.writeErrors)) { + return false; + } + if (!serverAddress.equals(that.serverAddress)) { + return false; + } + if (!Objects.equals(writeConcernError, that.writeConcernError)) { + return false; + } + if (!writeResult.equals(that.writeResult)) { + return false; + } + + return true; + } + + @Override + public int hashCode() { + int result = writeResult.hashCode(); + result = 31 * result + writeErrors.hashCode(); + result = 31 * result + serverAddress.hashCode(); + result = 31 * result + (writeConcernError != null ? writeConcernError.hashCode() : 0); + return result; + } +} + diff --git a/driver-legacy/src/main/com/mongodb/BulkWriteHelper.java b/driver-legacy/src/main/com/mongodb/BulkWriteHelper.java new file mode 100644 index 00000000000..f2200fbcb76 --- /dev/null +++ b/driver-legacy/src/main/com/mongodb/BulkWriteHelper.java @@ -0,0 +1,75 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb; + +import com.mongodb.lang.Nullable; +import org.bson.BsonDocument; +import org.bson.BsonDocumentReader; +import org.bson.codecs.Decoder; +import org.bson.codecs.DecoderContext; + +import java.util.ArrayList; +import java.util.List; + +final class BulkWriteHelper { + + static BulkWriteResult translateBulkWriteResult(final com.mongodb.bulk.BulkWriteResult bulkWriteResult, + final Decoder decoder) { + if (bulkWriteResult.wasAcknowledged()) { + return new AcknowledgedBulkWriteResult(bulkWriteResult.getInsertedCount(), bulkWriteResult.getMatchedCount(), + bulkWriteResult.getDeletedCount(), bulkWriteResult.getModifiedCount(), + translateBulkWriteUpserts(bulkWriteResult.getUpserts(), decoder)); + } else { + return new UnacknowledgedBulkWriteResult(); + } + } + + static List translateBulkWriteUpserts(final List upserts, + final Decoder decoder) { + List retVal = new ArrayList<>(upserts.size()); + for (com.mongodb.bulk.BulkWriteUpsert cur : upserts) { + retVal.add(new com.mongodb.BulkWriteUpsert(cur.getIndex(), getUpsertedId(cur, decoder))); + } + return retVal; + } + + private static Object getUpsertedId(final com.mongodb.bulk.BulkWriteUpsert cur, final Decoder decoder) { + return decoder.decode(new BsonDocumentReader(new BsonDocument("_id", cur.getId())), DecoderContext.builder().build()).get("_id"); + } + + static BulkWriteException translateBulkWriteException(final MongoBulkWriteException e, final Decoder decoder) { + return new BulkWriteException(translateBulkWriteResult(e.getWriteResult(), decoder), translateWriteErrors(e.getWriteErrors()), + translateWriteConcernError(e.getWriteConcernError()), e.getServerAddress()); + } + + @Nullable + static WriteConcernError translateWriteConcernError(@Nullable final com.mongodb.bulk.WriteConcernError writeConcernError) { + return writeConcernError == null ? null : new WriteConcernError(writeConcernError.getCode(), writeConcernError.getMessage(), + DBObjects.toDBObject(writeConcernError.getDetails())); + } + + static List translateWriteErrors(final List errors) { + List retVal = new ArrayList<>(errors.size()); + for (com.mongodb.bulk.BulkWriteError cur : errors) { + retVal.add(new BulkWriteError(cur.getCode(), cur.getMessage(), DBObjects.toDBObject(cur.getDetails()), cur.getIndex())); + } + return retVal; + } + + private BulkWriteHelper() { + } +} diff --git a/driver-legacy/src/main/com/mongodb/BulkWriteOperation.java b/driver-legacy/src/main/com/mongodb/BulkWriteOperation.java new file mode 100644 index 00000000000..24f7230673c --- /dev/null +++ b/driver-legacy/src/main/com/mongodb/BulkWriteOperation.java @@ -0,0 +1,144 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb; + +import com.mongodb.lang.Nullable; +import org.bson.types.ObjectId; + +import java.util.ArrayList; +import java.util.List; + +import static com.mongodb.assertions.Assertions.isTrue; + +/** + * A bulk write operation. A bulk write operation consists of an ordered or unordered collection of write requests, + * which can be any combination of inserts, updates, replaces, or removes. + * + * @see DBCollection#initializeOrderedBulkOperation() + * @see com.mongodb.DBCollection#initializeUnorderedBulkOperation() + * + * @mongodb.driver.manual /reference/command/delete/ Delete + * @mongodb.driver.manual /reference/command/update/ Update + * @mongodb.driver.manual /reference/command/insert/ Insert + * @since 2.12 + */ +public class BulkWriteOperation { + private static final String ID_FIELD_NAME = "_id"; + private final boolean ordered; + private final DBCollection collection; + private final List requests = new ArrayList<>(); + private Boolean bypassDocumentValidation; + private boolean closed; + + BulkWriteOperation(final boolean ordered, final DBCollection collection) { + this.ordered = ordered; + this.collection = collection; + } + + /** + * Returns true if this is building an ordered bulk write request. + * + * @return whether this is building an ordered bulk write operation + * @see DBCollection#initializeOrderedBulkOperation() + * @see DBCollection#initializeUnorderedBulkOperation() + */ + public boolean isOrdered() { + return ordered; + } + + /** + * Gets whether to bypass document validation, or null if unspecified. The default is null. + * + * @return whether to bypass document validation, or null if unspecified. + * @since 2.14 + * @mongodb.server.release 3.2 + */ + @Nullable + public Boolean getBypassDocumentValidation() { + return bypassDocumentValidation; + } + + /** + * Sets whether to bypass document validation. + * + * @param bypassDocumentValidation whether to bypass document validation, or null if unspecified + * @since 2.14 + * @mongodb.server.release 3.2 + */ + public void setBypassDocumentValidation(@Nullable final Boolean bypassDocumentValidation) { + this.bypassDocumentValidation = bypassDocumentValidation; + } + + /** + * Add an insert request to the bulk operation + * + * @param document the document to insert + */ + public void insert(final DBObject document) { + isTrue("already executed", !closed); + if (document.get(ID_FIELD_NAME) == null) { + document.put(ID_FIELD_NAME, new ObjectId()); + } + addRequest(new InsertRequest(document, collection.getObjectCodec())); + } + + /** + * Start building a write request to add to the bulk write operation. The returned builder can be used to create an update, replace, + * or remove request with the given query. + * + * @param query the query for an update, replace or remove request + * @return a builder for a single write request + */ + public BulkWriteRequestBuilder find(final DBObject query) { + isTrue("already executed", !closed); + return new BulkWriteRequestBuilder(this, query, collection.getDefaultDBObjectCodec(), collection.getObjectCodec()); + } + + /** + * Execute the bulk write operation with the default write concern of the collection from which this came. Note that the + * continueOnError property of the write concern is ignored. + * + * @return the result of the bulk write operation. + * @throws com.mongodb.BulkWriteException if the write failed due some other failure specific to the write command + * @throws MongoException if the operation failed for some other reason + */ + public BulkWriteResult execute() { + isTrue("already executed", !closed); + closed = true; + return collection.executeBulkWriteOperation(ordered, bypassDocumentValidation, requests); + } + + /** + * Execute the bulk write operation with the given write concern. Note that the continueOnError property of the write concern is + * ignored. + * + * @param writeConcern the write concern to apply to the bulk operation. + * @return the result of the bulk write operation. + * @throws com.mongodb.BulkWriteException if the write failed due some other failure specific to the write command + * @throws MongoException if the operation failed for some other reason + */ + public BulkWriteResult execute(final WriteConcern writeConcern) { + isTrue("already executed", !closed); + closed = true; + return collection.executeBulkWriteOperation(ordered, bypassDocumentValidation, requests, writeConcern); + } + + void addRequest(final WriteRequest request) { + isTrue("already executed", !closed); + requests.add(request); + } +} diff --git a/driver-legacy/src/main/com/mongodb/BulkWriteRequestBuilder.java b/driver-legacy/src/main/com/mongodb/BulkWriteRequestBuilder.java new file mode 100644 index 00000000000..98866e45a8b --- /dev/null +++ b/driver-legacy/src/main/com/mongodb/BulkWriteRequestBuilder.java @@ -0,0 +1,138 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb; + +import com.mongodb.client.model.Collation; +import com.mongodb.lang.Nullable; +import org.bson.codecs.Encoder; + +import java.util.List; + +/** + * A builder for a single write request. + * + * @mongodb.driver.manual reference/command/delete/ Delete + * @mongodb.driver.manual reference/command/update/ Update + * @mongodb.driver.manual reference/command/insert/ Insert + * @since 2.12 + */ +public class BulkWriteRequestBuilder { + private final BulkWriteOperation bulkWriteOperation; + private final DBObject query; + private final Encoder codec; + private final Encoder replacementCodec; + private Collation collation; + + BulkWriteRequestBuilder(final BulkWriteOperation bulkWriteOperation, final DBObject query, final Encoder queryCodec, + final Encoder replacementCodec) { + this.bulkWriteOperation = bulkWriteOperation; + this.query = query; + this.codec = queryCodec; + this.replacementCodec = replacementCodec; + } + + /** + * Returns the collation + * + * @return the collation + * @since 3.4 + * @mongodb.server.release 3.4 + */ + @Nullable + public Collation getCollation() { + return collation; + } + + /** + * Sets the collation + * + * @param collation the collation + * @return this + * @since 3.4 + * @mongodb.server.release 3.4 + */ + @Nullable + public BulkWriteRequestBuilder collation(final Collation collation) { + this.collation = collation; + return this; + } + + /** + * Adds a request to remove all documents in the collection that match the query with which this builder was created. + */ + public void remove() { + bulkWriteOperation.addRequest(new RemoveRequest(query, true, codec, collation)); + } + + /** + * Adds a request to remove one document in the collection that matches the query with which this builder was created. + */ + public void removeOne() { + bulkWriteOperation.addRequest(new RemoveRequest(query, false, codec, collation)); + } + + /** + * Adds a request to replace one document in the collection that matches the query with which this builder was created. + * + * @param document the replacement document, which must be structured just as a document you would insert. It can not contain any + * update operators. + */ + public void replaceOne(final DBObject document) { + new BulkUpdateRequestBuilder(bulkWriteOperation, query, false, codec, replacementCodec, collation, null).replaceOne(document); + } + + /** + * Adds a request to update all documents in the collection that match the query with which this builder was created. + * + * @param update the update criteria + */ + public void update(final DBObject update) { + new BulkUpdateRequestBuilder(bulkWriteOperation, query, false, codec, replacementCodec, collation, null).update(update); + } + + /** + * Adds a request to update one document in the collection that matches the query with which this builder was created. + * + * @param update the update criteria + */ + public void updateOne(final DBObject update) { + new BulkUpdateRequestBuilder(bulkWriteOperation, query, false, codec, replacementCodec, collation, null).updateOne(update); + } + + /** + * Specifies that the request being built should be an upsert. + * + * @return a new builder that allows only update and replace, since upsert does not apply to remove. + * @mongodb.driver.manual tutorial/modify-documents/#upsert-option Upsert + */ + public BulkUpdateRequestBuilder upsert() { + return new BulkUpdateRequestBuilder(bulkWriteOperation, query, true, codec, replacementCodec, collation, null); + } + + /** + * Specifies that the request being built should use the given array filters for an update. Note that this option only applies to + * update operations and will be ignored for replace operations + * + * @param arrayFilters the array filters to apply to the update operation + * @return a new builder that allows only update and replace, since upsert does not apply to remove. + * @since 3.6 + * @mongodb.server.release 3.6 + */ + public BulkUpdateRequestBuilder arrayFilters(final List arrayFilters) { + return new BulkUpdateRequestBuilder(bulkWriteOperation, query, false, codec, replacementCodec, collation, arrayFilters); + } +} diff --git a/driver-legacy/src/main/com/mongodb/BulkWriteResult.java b/driver-legacy/src/main/com/mongodb/BulkWriteResult.java new file mode 100644 index 00000000000..ceab7be4f34 --- /dev/null +++ b/driver-legacy/src/main/com/mongodb/BulkWriteResult.java @@ -0,0 +1,86 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb; + +import java.util.List; + +/** + * The result of a successful bulk write operation. + * + * @mongodb.driver.manual reference/command/delete/#delete-command-output Delete Result + * @mongodb.driver.manual reference/command/update/#delete-command-output Delete Result + * @mongodb.driver.manual reference/command/insert/#delete-command-output Delete Result + * @since 2.12 + */ +public abstract class BulkWriteResult { + + /** + * Returns true if the write was acknowledged. + * + * @return true if the write was acknowledged + * @see WriteConcern#UNACKNOWLEDGED + */ + public abstract boolean isAcknowledged(); + + /** + * Returns the number of documents inserted by the write operation. + * + * @return the number of documents inserted by the write operation + * @throws java.lang.UnsupportedOperationException if the write was unacknowledged. + * @see WriteConcern#UNACKNOWLEDGED + */ + public abstract int getInsertedCount(); + + /** + * Returns the number of documents matched by updates or replacements in the write operation. This will include documents that matched + * the query but where the modification didn't result in any actual change to the document; for example, if you set the value of some + * field, and the field already has that value, that will still count as an update. + * + * @return the number of documents matched by updates in the write operation + * @throws java.lang.UnsupportedOperationException if the write was unacknowledged. + * @see WriteConcern#UNACKNOWLEDGED + */ + public abstract int getMatchedCount(); + + /** + * Returns the number of documents removed by the write operation. + * + * @return the number of documents removed by the write operation + * @throws java.lang.UnsupportedOperationException if the write was unacknowledged. + * @see WriteConcern#UNACKNOWLEDGED + */ + public abstract int getRemovedCount(); + + /** + *

Returns the number of documents modified by the write operation. This only applies to updates or replacements, and will only + * count documents that were actually changed; for example, if you set the value of some field , and the field already has that value, + * that will not count as a modification.

+ * + * @return the number of documents modified by the write operation + * @see WriteConcern#UNACKNOWLEDGED + */ + public abstract int getModifiedCount(); + + /** + * Gets an unmodifiable list of upserted items, or the empty list if there were none. + * + * @return a list of upserted items, or the empty list if there were none. + * @throws java.lang.UnsupportedOperationException if the write was unacknowledged. + * @see WriteConcern#UNACKNOWLEDGED + */ + public abstract List getUpserts(); +} diff --git a/driver-legacy/src/main/com/mongodb/BulkWriteUpsert.java b/driver-legacy/src/main/com/mongodb/BulkWriteUpsert.java new file mode 100644 index 00000000000..d2370a1a11a --- /dev/null +++ b/driver-legacy/src/main/com/mongodb/BulkWriteUpsert.java @@ -0,0 +1,96 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb; + +/** + * Represents an upsert request in a bulk write operation that resulted in an insert. It contains the index of the upsert request in the + * operation and the value of the _id field of the inserted document. + * + * @since 2.12 + * @see BulkWriteRequestBuilder#upsert() + * + * @mongodb.driver.manual reference/command/update/#update.upserted Bulk Upsert + */ +public class BulkWriteUpsert { + private final int index; + private final Object id; + + /** + * Constructs an instance. + * + * @param index the index of the item that was upserted + * @param id the value of the _id of the upserted item + */ + public BulkWriteUpsert(final int index, final Object id) { + this.index = index; + this.id = id; + } + + /** + * Gets the index of the upserted item based on the order it was added to the bulk write operation. + * + * @return the index + */ + public int getIndex() { + return index; + } + + /** + * Gets the id of the upserted item. + * + * @return the id + */ + public Object getId() { + return id; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + BulkWriteUpsert that = (BulkWriteUpsert) o; + + if (index != that.index) { + return false; + } + if (!id.equals(that.id)) { + return false; + } + + return true; + } + + @Override + public int hashCode() { + int result = index; + result = 31 * result + id.hashCode(); + return result; + } + + @Override + public String toString() { + return "BulkWriteUpsert{" + + "index=" + index + + ", id=" + id + + '}'; + } +} diff --git a/driver-legacy/src/main/com/mongodb/CommandResult.java b/driver-legacy/src/main/com/mongodb/CommandResult.java new file mode 100644 index 00000000000..70ddf5bd847 --- /dev/null +++ b/driver-legacy/src/main/com/mongodb/CommandResult.java @@ -0,0 +1,113 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb; + +import com.mongodb.lang.Nullable; +import org.bson.BsonDocument; +import org.bson.BsonDocumentReader; +import org.bson.codecs.Decoder; +import org.bson.codecs.DecoderContext; + +import static com.mongodb.assertions.Assertions.notNull; + +/** + * A simple wrapper to hold the result of a command. All the fields from the response document have been added to this result. + * + * @mongodb.driver.manual reference/command/ Database Commands + */ +public class CommandResult extends BasicDBObject { + private static final long serialVersionUID = 5907909423864204060L; + + /** + * The response document. + */ + private final BsonDocument response; + /** + * The server address. + */ + private final ServerAddress address; + + CommandResult(final BsonDocument response, final Decoder decoder) { + this(response, decoder, null); + } + + CommandResult(final BsonDocument response, final Decoder decoder, @Nullable final ServerAddress address) { + this.address = address; + this.response = notNull("response", response); + putAll(decoder.decode(new BsonDocumentReader(response), DecoderContext.builder().build())); + } + + /** + * Gets the "ok" field, which is whether this command executed correctly or not. + * + * @return true if the command executed without error. + */ + public boolean ok() { + Object okValue = get("ok"); + if (okValue instanceof Boolean) { + return (Boolean) okValue; + } else if (okValue instanceof Number) { + return ((Number) okValue).intValue() == 1; + } else { + return false; + } + } + + /** + * Gets the error message associated with a failed command. + * + * @return The error message or null + */ + @Nullable + public String getErrorMessage() { + Object foo = get("errmsg"); + if (foo == null) { + return null; + } + return foo.toString(); + } + + /** + * Utility method to create an exception from a failed command. + * + * @return The mongo exception, or null if the command was successful. + */ + @Nullable + public MongoException getException() { + if (!ok()) { + return createException(); + } + + return null; + } + + /** + * Throws a {@code CommandFailureException} if the command failed. Otherwise, returns normally. + * + * @throws MongoException with the exception from the failed command + * @see #ok() + */ + public void throwOnError() { + if (!ok()) { + throw createException(); + } + } + + private MongoException createException() { + return new MongoCommandException(response, address); + } +} diff --git a/driver-legacy/src/main/com/mongodb/CompoundDBObjectCodec.java b/driver-legacy/src/main/com/mongodb/CompoundDBObjectCodec.java new file mode 100644 index 00000000000..4d0f0ecff97 --- /dev/null +++ b/driver-legacy/src/main/com/mongodb/CompoundDBObjectCodec.java @@ -0,0 +1,63 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb; + +import org.bson.BsonReader; +import org.bson.BsonWriter; +import org.bson.codecs.Codec; +import org.bson.codecs.Decoder; +import org.bson.codecs.DecoderContext; +import org.bson.codecs.Encoder; +import org.bson.codecs.EncoderContext; + +class CompoundDBObjectCodec implements Codec { + + private final Encoder encoder; + private final Decoder decoder; + + CompoundDBObjectCodec(final Encoder encoder, final Decoder decoder) { + this.encoder = encoder; + this.decoder = decoder; + } + + CompoundDBObjectCodec(final Codec codec) { + this(codec, codec); + } + + @Override + public DBObject decode(final BsonReader reader, final DecoderContext decoderContext) { + return decoder.decode(reader, decoderContext); + } + + @Override + public void encode(final BsonWriter writer, final DBObject value, final EncoderContext encoderContext) { + encoder.encode(writer, value, encoderContext); + } + + @Override + public Class getEncoderClass() { + return DBObject.class; + } + + public Encoder getEncoder() { + return encoder; + } + + public Decoder getDecoder() { + return decoder; + } +} diff --git a/driver-legacy/src/main/com/mongodb/Cursor.java b/driver-legacy/src/main/com/mongodb/Cursor.java new file mode 100644 index 00000000000..67bbf8bf4ef --- /dev/null +++ b/driver-legacy/src/main/com/mongodb/Cursor.java @@ -0,0 +1,65 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb; + +import com.mongodb.lang.Nullable; + +import java.io.Closeable; +import java.util.Iterator; + +/** + * Interface for providing consistent behaviour between different Cursor implementations. + * + * @mongodb.driver.manual core/cursors/ Cursors + * @since 2.12 + */ +public interface Cursor extends Iterator, Closeable { + + /** + * Gets the number of results available locally without blocking, which may be 0. + * + *

+ * If the cursor is known to be exhausted, returns 0. If the cursor is closed before it's been exhausted, it may return a non-zero + * value. + *

+ * + * @return the number of results available locally without blocking + * @since 4.5 + */ + int available(); + + /** + * Gets the server's identifier for this Cursor. + * + * @return the cursor's ID, or 0 if there is no active cursor. + */ + long getCursorId(); + + /** + * Gets the address of the server that data is pulled from. Note that this information may not be available until hasNext() or + * next() is called. + * + * @return the address of the server that data is pulled from, or null if a cursor is no longer established + */ + @Nullable + ServerAddress getServerAddress(); + + /** + * Terminates this cursor on the server. + */ + void close(); +} diff --git a/driver-legacy/src/main/com/mongodb/DB.java b/driver-legacy/src/main/com/mongodb/DB.java new file mode 100644 index 00000000000..b5aa60a58e1 --- /dev/null +++ b/driver-legacy/src/main/com/mongodb/DB.java @@ -0,0 +1,596 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb; + +import com.mongodb.annotations.ThreadSafe; +import com.mongodb.client.internal.MongoIterableImpl; +import com.mongodb.client.internal.OperationExecutor; +import com.mongodb.client.model.Collation; +import com.mongodb.client.model.DBCreateViewOptions; +import com.mongodb.client.model.ValidationAction; +import com.mongodb.client.model.ValidationLevel; +import com.mongodb.internal.TimeoutSettings; +import com.mongodb.internal.operation.CommandReadOperation; +import com.mongodb.internal.operation.CreateCollectionOperation; +import com.mongodb.internal.operation.CreateViewOperation; +import com.mongodb.internal.operation.DropDatabaseOperation; +import com.mongodb.internal.operation.ListCollectionsOperation; +import com.mongodb.internal.operation.ReadOperationCursor; +import com.mongodb.lang.Nullable; +import org.bson.BsonDocument; +import org.bson.BsonDocumentWrapper; +import org.bson.codecs.BsonDocumentCodec; +import org.bson.codecs.Codec; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashSet; +import java.util.LinkedHashSet; +import java.util.List; +import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; + +import static com.mongodb.DBCollection.createWriteConcernException; +import static com.mongodb.MongoNamespace.checkDatabaseNameValidity; +import static com.mongodb.ReadPreference.primary; +import static com.mongodb.assertions.Assertions.notNull; + +/** + * A thread-safe client view of a logical database in a MongoDB cluster. A DB instance can be achieved from a {@link MongoClient} instance + * using code like: + *
+ * {@code
+ * MongoClient mongoClient = new MongoClient();
+ * DB db = mongoClient.getDB("");
+ * }
+ * 
+ * + * See {@link MongoClient#getDB(String)} for further information about the effective deprecation of this class. + * + * @mongodb.driver.manual reference/glossary/#term-database Database + * @see MongoClient + */ +@ThreadSafe +@SuppressWarnings("deprecation") +public class DB { + private final MongoClient mongo; + private final String name; + private final OperationExecutor executor; + private final ConcurrentHashMap collectionCache; + private final Codec commandCodec; + private volatile ReadPreference readPreference; + private volatile WriteConcern writeConcern; + private volatile ReadConcern readConcern; + + DB(final MongoClient mongo, final String name, final OperationExecutor executor) { + checkDatabaseNameValidity(name); + this.mongo = mongo; + this.name = name; + this.executor = executor; + this.collectionCache = new ConcurrentHashMap<>(); + this.commandCodec = new DBObjectCodec(mongo.getCodecRegistry()); + } + + /** + * Gets the MongoClient instance + * + * @return the MongoClient instance that this database was constructed from + * @throws IllegalStateException if this DB was not created from a MongoClient instance + * @since 3.9 + */ + public MongoClient getMongoClient() { + return mongo; + } + + /** + * Sets the read preference for this database. Will be used as default for read operations from any collection in this database. See the + * documentation for {@link ReadPreference} for more information. + * + * @param readPreference {@code ReadPreference} to use + * @mongodb.driver.manual core/read-preference/ Read Preference + */ + public void setReadPreference(final ReadPreference readPreference) { + this.readPreference = readPreference; + } + + /** + * Sets the write concern for this database. It will be used for write operations to any collection in this database. See the + * documentation for {@link WriteConcern} for more information. + * + * @param writeConcern {@code WriteConcern} to use + * @mongodb.driver.manual core/write-concern/ Write Concern + */ + public void setWriteConcern(final WriteConcern writeConcern) { + this.writeConcern = writeConcern; + } + + /** + * Gets the read preference for this database. + * + * @return {@code ReadPreference} to be used for read operations, if not specified explicitly + * @mongodb.driver.manual core/read-preference/ Read Preference + */ + public ReadPreference getReadPreference() { + return readPreference != null ? readPreference : mongo.getReadPreference(); + } + + /** + * Gets the write concern for this database. + * + * @return {@code WriteConcern} to be used for write operations, if not specified explicitly + * @mongodb.driver.manual core/write-concern/ Write Concern + */ + public WriteConcern getWriteConcern() { + return writeConcern != null ? writeConcern : mongo.getWriteConcern(); + } + + /** + * Sets the read concern for this database. + * + * @param readConcern the read concern to use for this collection + * @since 3.3 + * @mongodb.server.release 3.2 + * @mongodb.driver.manual reference/readConcern/ Read Concern + */ + public void setReadConcern(final ReadConcern readConcern) { + this.readConcern = readConcern; + } + + /** + * Get the read concern for this database. + * + * @return the {@link com.mongodb.ReadConcern} + * @since 3.3 + * @mongodb.server.release 3.2 + * @mongodb.driver.manual reference/readConcern/ Read Concern + */ + public ReadConcern getReadConcern() { + return readConcern != null ? readConcern : mongo.getReadConcern(); + } + + /** + * Gets a collection with a given name. + * + * @param name the name of the collection to return + * @return the collection + * @throws IllegalArgumentException if the name is invalid + * @see MongoNamespace#checkCollectionNameValidity(String) + */ + public DBCollection getCollection(final String name) { + DBCollection collection = collectionCache.get(name); + if (collection != null) { + return collection; + } + + collection = new DBCollection(name, this, executor); + if (mongo.getMongoClientOptions().getDbDecoderFactory() != DefaultDBDecoder.FACTORY) { + collection.setDBDecoderFactory(mongo.getMongoClientOptions().getDbDecoderFactory()); + } + if (mongo.getMongoClientOptions().getDbEncoderFactory() != DefaultDBEncoder.FACTORY) { + collection.setDBEncoderFactory(mongo.getMongoClientOptions().getDbEncoderFactory()); + } + DBCollection old = collectionCache.putIfAbsent(name, collection); + return old != null ? old : collection; + } + + /** + * Drops this database. Removes all data on disk. Use with caution. + * + * @throws MongoException if the operation failed + * @mongodb.driver.manual reference/command/dropDatabase/ Drop Database + */ + public void dropDatabase() { + try { + getExecutor().execute(new DropDatabaseOperation(getName(), getWriteConcern()), getReadConcern()); + } catch (MongoWriteConcernException e) { + throw createWriteConcernException(e); + } + } + + /** + * Returns the name of this database. + * + * @return the name + */ + public String getName() { + return name; + } + + /** + * Returns a set containing the names of all collections in this database. + * + * @return the names of collections in this database + * @throws MongoException if the operation failed + * @mongodb.driver.manual reference/method/db.getCollectionNames/ getCollectionNames() + */ + public Set getCollectionNames() { + List collectionNames = + new MongoIterableImpl(null, executor, ReadConcern.DEFAULT, primary(), + mongo.getMongoClientOptions().getRetryReads(), DB.this.getTimeoutSettings()) { + @Override + public ReadOperationCursor asReadOperation() { + return new ListCollectionsOperation<>(name, commandCodec).nameOnly(true); + } + + @Override + protected OperationExecutor getExecutor() { + return executor; + } + }.map(result -> (String) result.get("name")).into(new ArrayList<>()); + Collections.sort(collectionNames); + return new LinkedHashSet<>(collectionNames); + } + + /** + *

Creates a collection with a given name and options. If the collection already exists, + * this throws a {@code CommandFailureException}.

+ * + *

Possible options:

+ *
    + *
  • capped ({@code boolean}) - Enables a collection cap. False by default. If enabled, + * you must specify a size parameter.
  • + *
  • size ({@code int}) - If capped is true, size specifies a maximum size in bytes for the capped collection. When + * capped is false, you may use size to preallocate space.
  • + *
  • max ({@code int}) - Optional. Specifies a maximum "cap" in number of documents for capped collections. You must + * also specify size when specifying max.
  • + *
+ *

Note that if the {@code options} parameter is {@code null}, the creation will be deferred to when the collection is written + * to.

+ * + * @param collectionName the name of the collection to return + * @param options options + * @return the collection + * @throws MongoCommandException if the server is unable to create the collection + * @throws WriteConcernException if the {@code WriteConcern} specified on this {@code DB} could not be satisfied + * @throws MongoException for all other failures + * @mongodb.driver.manual reference/method/db.createCollection/ createCollection() + */ + public DBCollection createCollection(final String collectionName, @Nullable final DBObject options) { + if (options != null) { + try { + executor.execute(getCreateCollectionOperation(collectionName, options), getReadConcern()); + } catch (MongoWriteConcernException e) { + throw createWriteConcernException(e); + } + } + return getCollection(collectionName); + } + + /** + * Creates a view with the given name, backing collection/view name, and aggregation pipeline that defines the view. + * + * @param viewName the name of the view to create + * @param viewOn the backing collection/view for the view + * @param pipeline the pipeline that defines the view + * @return the view as a DBCollection + * @throws MongoCommandException if the server is unable to create the collection + * @throws WriteConcernException if the {@code WriteConcern} specified on this {@code DB} could not be satisfied + * @throws MongoException for all other failures + * @since 3.4 + * @mongodb.server.release 3.4 + * @mongodb.driver.manual reference/command/create Create Command + */ + public DBCollection createView(final String viewName, final String viewOn, final List pipeline) { + return createView(viewName, viewOn, pipeline, new DBCreateViewOptions()); + } + + /** + * Creates a view with the given name, backing collection/view name, aggregation pipeline, and options that defines the view. + * + * @param viewName the name of the view to create + * @param viewOn the backing collection/view for the view + * @param pipeline the pipeline that defines the view + * @param options the options for creating the view + * @return the view as a DBCollection + * @throws MongoCommandException if the server is unable to create the collection + * @throws WriteConcernException if the {@code WriteConcern} specified on this {@code DB} could not be satisfied + * @throws MongoException for all other failures + * @since 3.4 + * @mongodb.server.release 3.4 + * @mongodb.driver.manual reference/command/create Create Command + */ + public DBCollection createView(final String viewName, final String viewOn, final List pipeline, + final DBCreateViewOptions options) { + try { + notNull("options", options); + DBCollection view = getCollection(viewName); + executor.execute(new CreateViewOperation(name, viewName, viewOn, + view.preparePipeline(pipeline), writeConcern) + .collation(options.getCollation()), getReadConcern()); + return view; + } catch (MongoWriteConcernException e) { + throw createWriteConcernException(e); + } + } + + + private CreateCollectionOperation getCreateCollectionOperation(final String collectionName, final DBObject options) { + if (options.get("size") != null && !(options.get("size") instanceof Number)) { + throw new IllegalArgumentException("'size' should be Number"); + } + if (options.get("max") != null && !(options.get("max") instanceof Number)) { + throw new IllegalArgumentException("'max' should be Number"); + } + if (options.get("capped") != null && !(options.get("capped") instanceof Boolean)) { + throw new IllegalArgumentException("'capped' should be Boolean"); + } + if (options.get("autoIndexId") != null && !(options.get("autoIndexId") instanceof Boolean)) { + throw new IllegalArgumentException("'autoIndexId' should be Boolean"); + } + if (options.get("storageEngine") != null && !(options.get("storageEngine") instanceof DBObject)) { + throw new IllegalArgumentException("'storageEngine' should be DBObject"); + } + if (options.get("indexOptionDefaults") != null && !(options.get("indexOptionDefaults") instanceof DBObject)) { + throw new IllegalArgumentException("'indexOptionDefaults' should be DBObject"); + } + if (options.get("validator") != null && !(options.get("validator") instanceof DBObject)) { + throw new IllegalArgumentException("'validator' should be DBObject"); + } + if (options.get("validationLevel") != null && !(options.get("validationLevel") instanceof String)) { + throw new IllegalArgumentException("'validationLevel' should be String"); + } + if (options.get("validationAction") != null && !(options.get("validationAction") instanceof String)) { + throw new IllegalArgumentException("'validationAction' should be String"); + } + + boolean capped = false; + boolean autoIndex = true; + long sizeInBytes = 0; + long maxDocuments = 0; + BsonDocument storageEngineOptions = null; + BsonDocument indexOptionDefaults = null; + BsonDocument validator = null; + ValidationLevel validationLevel = null; + ValidationAction validationAction = null; + + if (options.get("capped") != null) { + capped = (Boolean) options.get("capped"); + } + if (options.get("size") != null) { + sizeInBytes = ((Number) options.get("size")).longValue(); + } + if (options.get("autoIndexId") != null) { + autoIndex = (Boolean) options.get("autoIndexId"); + } + if (options.get("max") != null) { + maxDocuments = ((Number) options.get("max")).longValue(); + } + if (options.get("storageEngine") != null) { + storageEngineOptions = wrap((DBObject) options.get("storageEngine")); + } + if (options.get("indexOptionDefaults") != null) { + indexOptionDefaults = wrap((DBObject) options.get("indexOptionDefaults")); + } + if (options.get("validator") != null) { + validator = wrap((DBObject) options.get("validator")); + } + if (options.get("validationLevel") != null) { + validationLevel = ValidationLevel.fromString((String) options.get("validationLevel")); + } + if (options.get("validationAction") != null) { + validationAction = ValidationAction.fromString((String) options.get("validationAction")); + } + Collation collation = DBObjectCollationHelper.createCollationFromOptions(options); + return new CreateCollectionOperation(getName(), collectionName, + getWriteConcern()) + .capped(capped) + .collation(collation) + .sizeInBytes(sizeInBytes) + .autoIndex(autoIndex) + .maxDocuments(maxDocuments) + .storageEngineOptions(storageEngineOptions) + .indexOptionDefaults(indexOptionDefaults) + .validator(validator) + .validationLevel(validationLevel) + .validationAction(validationAction); + } + + /** + * Executes a database command. This method constructs a simple DBObject using {@code command} as the field name and {@code true} as its + * value, and calls {@link DB#command(DBObject, ReadPreference) } with the default read preference for the database. + * + * @param command command to execute + * @return result of command from the database + * @throws MongoException if the command failed + * @mongodb.driver.manual tutorial/use-database-commands Commands + */ + public CommandResult command(final String command) { + return command(new BasicDBObject(command, Boolean.TRUE), getReadPreference()); + } + + /** + * Executes a database command. This method calls {@link DB#command(DBObject, ReadPreference) } with the default read preference for the + * database. + * + * @param command {@code DBObject} representation of the command to be executed + * @return result of the command execution + * @throws MongoException if the command failed + * @mongodb.driver.manual tutorial/use-database-commands Commands + */ + public CommandResult command(final DBObject command) { + return command(command, getReadPreference()); + } + + /** + * Executes a database command. This method calls {@link DB#command(DBObject, ReadPreference, DBEncoder) } with the default read + * preference for the database. + * + * @param command {@code DBObject} representation of the command to be executed + * @param encoder {@link DBEncoder} to be used for command encoding + * @return result of the command execution + * @throws MongoException if the command failed + * @mongodb.driver.manual tutorial/use-database-commands Commands + */ + public CommandResult command(final DBObject command, final DBEncoder encoder) { + return command(command, getReadPreference(), encoder); + } + + /** + * Executes a database command with the selected readPreference, and encodes the command using the given encoder. + * + * @param command The {@code DBObject} representation the command to be executed + * @param readPreference Where to execute the command - this will only be applied for a subset of commands + * @param encoder The DBEncoder that knows how to serialise the command + * @return The result of executing the command, success or failure + * @mongodb.driver.manual tutorial/use-database-commands Commands + * @since 2.12 + */ + public CommandResult command(final DBObject command, final ReadPreference readPreference, @Nullable final DBEncoder encoder) { + try { + return executeCommand(wrap(command, encoder), getCommandReadPreference(command, readPreference)); + } catch (MongoCommandException ex) { + return new CommandResult(ex.getResponse(), getDefaultDBObjectCodec(), ex.getServerAddress()); + } + } + + /** + * Executes the command against the database with the given read preference. + * + * @param command The {@code DBObject} representation the command to be executed + * @param readPreference Where to execute the command - this will only be applied for a subset of commands + * @return The result of executing the command, success or failure + * @mongodb.driver.manual tutorial/use-database-commands Commands + * @since 2.12 + */ + public CommandResult command(final DBObject command, final ReadPreference readPreference) { + return command(command, readPreference, null); + } + + /** + * Executes a database command. This method constructs a simple {@link DBObject} and calls {@link DB#command(DBObject, ReadPreference) + * }. + * + * @param command The name of the command to be executed + * @param readPreference Where to execute the command - this will only be applied for a subset of commands + * @return The result of the command execution + * @throws MongoException if the command failed + * @mongodb.driver.manual tutorial/use-database-commands Commands + * @since 2.12 + */ + public CommandResult command(final String command, final ReadPreference readPreference) { + return command(new BasicDBObject(command, true), readPreference); + } + + /** + * Gets another database on same server + * + * @param name name of the database + * @return the DB for the given name + */ + @SuppressWarnings("deprecation") // The old API (i.e. DB) will use deprecated methods. + public DB getSisterDB(final String name) { + return mongo.getDB(name); + } + + /** + * Checks to see if a collection with a given name exists on a server. + * + * @param collectionName a name of the collection to test for existence + * @return {@code false} if no collection by that name exists, {@code true} if a match to an existing collection was found + * @throws MongoException if the operation failed + */ + public boolean collectionExists(final String collectionName) { + Set collectionNames = getCollectionNames(); + for (final String name : collectionNames) { + if (name.equalsIgnoreCase(collectionName)) { + return true; + } + } + return false; + } + + @Override + public String toString() { + return "DB{name='" + name + '\'' + '}'; + } + + CommandResult executeCommand(final BsonDocument commandDocument, final ReadPreference readPreference) { + return new CommandResult(executor.execute( + new CommandReadOperation<>(getName(), commandDocument, + new BsonDocumentCodec()), readPreference, getReadConcern(), null), getDefaultDBObjectCodec()); + } + + OperationExecutor getExecutor() { + return executor; + } + TimeoutSettings getTimeoutSettings() { + return mongo.getTimeoutSettings(); + } + + private BsonDocument wrap(final DBObject document) { + return new BsonDocumentWrapper<>(document, commandCodec); + } + + private BsonDocument wrap(final DBObject document, @Nullable final DBEncoder encoder) { + if (encoder == null) { + return wrap(document); + } else { + return new BsonDocumentWrapper<>(document, new DBEncoderAdapter(encoder)); + } + } + + /** + * Determines the read preference that should be used for the given command. + * + * @param command the {@link DBObject} representing the command + * @param requestedPreference the preference requested by the client. + * @return the read preference to use for the given command. It will never return {@code null}. + * @see com.mongodb.ReadPreference + */ + ReadPreference getCommandReadPreference(final DBObject command, @Nullable final ReadPreference requestedPreference) { + String comString = command.keySet().iterator().next().toLowerCase(); + boolean primaryRequired = !OBEDIENT_COMMANDS.contains(comString); + + if (primaryRequired) { + return primary(); + } else if (requestedPreference == null) { + return primary(); + } else { + return requestedPreference; + } + } + + Codec getDefaultDBObjectCodec() { + return new DBObjectCodec(getMongoClient().getCodecRegistry(), + DBObjectCodec.getDefaultBsonTypeClassMap(), + new DBCollectionObjectFactory()) + .withUuidRepresentation(getMongoClient().getMongoClientOptions().getUuidRepresentation()); + } + + @Nullable + Long getTimeoutMS() { + return mongo.getMongoClientOptions().getTimeout(); + } + + private static final Set OBEDIENT_COMMANDS = new HashSet<>(); + + static { + OBEDIENT_COMMANDS.add("aggregate"); + OBEDIENT_COMMANDS.add("collstats"); + OBEDIENT_COMMANDS.add("count"); + OBEDIENT_COMMANDS.add("dbstats"); + OBEDIENT_COMMANDS.add("distinct"); + OBEDIENT_COMMANDS.add("geonear"); + OBEDIENT_COMMANDS.add("geosearch"); + OBEDIENT_COMMANDS.add("geowalk"); + OBEDIENT_COMMANDS.add("group"); + OBEDIENT_COMMANDS.add("listcollections"); + OBEDIENT_COMMANDS.add("listindexes"); + OBEDIENT_COMMANDS.add("parallelcollectionscan"); + OBEDIENT_COMMANDS.add("text"); + } +} diff --git a/driver-legacy/src/main/com/mongodb/DBCallback.java b/driver-legacy/src/main/com/mongodb/DBCallback.java new file mode 100644 index 00000000000..c59fef983ee --- /dev/null +++ b/driver-legacy/src/main/com/mongodb/DBCallback.java @@ -0,0 +1,25 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb; + +import org.bson.BSONCallback; + +/** + * The DB callback interface. + */ +public interface DBCallback extends BSONCallback { +} diff --git a/driver-legacy/src/main/com/mongodb/DBCallbackFactory.java b/driver-legacy/src/main/com/mongodb/DBCallbackFactory.java new file mode 100644 index 00000000000..04b71b6c549 --- /dev/null +++ b/driver-legacy/src/main/com/mongodb/DBCallbackFactory.java @@ -0,0 +1,32 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb; + +/** + * Factory for creating concrete implementations of DBCallback. + */ +public interface DBCallbackFactory { + + /** + * Creates a DBCallback for the given collection. + * + * @param collection a DBCollection for the DBCallback + * @return a new DBCallback that operates on the collection. + */ + DBCallback create(DBCollection collection); + +} diff --git a/driver-legacy/src/main/com/mongodb/DBCollection.java b/driver-legacy/src/main/com/mongodb/DBCollection.java new file mode 100644 index 00000000000..7e460af74ba --- /dev/null +++ b/driver-legacy/src/main/com/mongodb/DBCollection.java @@ -0,0 +1,2218 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb; + +import com.mongodb.annotations.ThreadSafe; +import com.mongodb.client.internal.MongoBatchCursorAdapter; +import com.mongodb.client.internal.MongoIterableImpl; +import com.mongodb.client.internal.OperationExecutor; +import com.mongodb.client.model.DBCollectionCountOptions; +import com.mongodb.client.model.DBCollectionDistinctOptions; +import com.mongodb.client.model.DBCollectionFindAndModifyOptions; +import com.mongodb.client.model.DBCollectionFindOptions; +import com.mongodb.client.model.DBCollectionRemoveOptions; +import com.mongodb.client.model.DBCollectionUpdateOptions; +import com.mongodb.internal.TimeoutSettings; +import com.mongodb.internal.bulk.DeleteRequest; +import com.mongodb.internal.bulk.IndexRequest; +import com.mongodb.internal.bulk.InsertRequest; +import com.mongodb.internal.bulk.UpdateRequest; +import com.mongodb.internal.bulk.WriteRequest.Type; +import com.mongodb.internal.connection.PowerOfTwoBufferPool; +import com.mongodb.internal.operation.AggregateOperation; +import com.mongodb.internal.operation.AggregateToCollectionOperation; +import com.mongodb.internal.operation.BatchCursor; +import com.mongodb.internal.operation.CountOperation; +import com.mongodb.internal.operation.CreateIndexesOperation; +import com.mongodb.internal.operation.DistinctOperation; +import com.mongodb.internal.operation.DropCollectionOperation; +import com.mongodb.internal.operation.DropIndexOperation; +import com.mongodb.internal.operation.FindAndDeleteOperation; +import com.mongodb.internal.operation.FindAndReplaceOperation; +import com.mongodb.internal.operation.FindAndUpdateOperation; +import com.mongodb.internal.operation.ListIndexesOperation; +import com.mongodb.internal.operation.MapReduceBatchCursor; +import com.mongodb.internal.operation.MapReduceStatistics; +import com.mongodb.internal.operation.MapReduceToCollectionOperation; +import com.mongodb.internal.operation.MapReduceWithInlineResultsOperation; +import com.mongodb.internal.operation.MixedBulkWriteOperation; +import com.mongodb.internal.operation.ReadOperationCursor; +import com.mongodb.internal.operation.RenameCollectionOperation; +import com.mongodb.internal.operation.WriteOperation; +import com.mongodb.lang.Nullable; +import org.bson.BsonDocument; +import org.bson.BsonDocumentReader; +import org.bson.BsonDocumentWrapper; +import org.bson.BsonInt32; +import org.bson.BsonJavaScript; +import org.bson.BsonString; +import org.bson.BsonValue; +import org.bson.codecs.BsonDocumentCodec; +import org.bson.codecs.BsonValueCodec; +import org.bson.codecs.Codec; +import org.bson.codecs.Decoder; +import org.bson.codecs.DecoderContext; +import org.bson.codecs.Encoder; +import org.bson.types.ObjectId; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.locks.Lock; +import java.util.concurrent.locks.ReentrantLock; + +import static com.mongodb.BulkWriteHelper.translateBulkWriteResult; +import static com.mongodb.LegacyMixedBulkWriteOperation.createBulkWriteOperationForDelete; +import static com.mongodb.LegacyMixedBulkWriteOperation.createBulkWriteOperationForInsert; +import static com.mongodb.LegacyMixedBulkWriteOperation.createBulkWriteOperationForReplace; +import static com.mongodb.LegacyMixedBulkWriteOperation.createBulkWriteOperationForUpdate; +import static com.mongodb.MongoNamespace.checkCollectionNameValidity; +import static com.mongodb.ReadPreference.primary; +import static com.mongodb.ReadPreference.primaryPreferred; +import static com.mongodb.TimeoutSettingsHelper.createTimeoutSettings; +import static com.mongodb.assertions.Assertions.notNull; +import static com.mongodb.internal.Locks.withLock; +import static com.mongodb.internal.bulk.WriteRequest.Type.UPDATE; +import static java.lang.String.format; +import static java.util.Arrays.asList; +import static java.util.Collections.singletonList; +import static java.util.concurrent.TimeUnit.MILLISECONDS; + +/** + * Implementation of a database collection. A typical invocation sequence is thus: + *
+ * {@code
+ * MongoClient mongoClient = new MongoClient(new ServerAddress("localhost", 27017));
+ * DB db = mongoClient.getDB("mydb");
+ * DBCollection collection = db.getCollection("test"); }
+ * 
+ * To get a collection to use, just specify the name of the collection to the getCollection(String collectionName) method: + *
+ * {@code
+ * DBCollection coll = db.getCollection("testCollection"); }
+ * 
+ * Once you have the collection object, you can insert documents into the collection: + *
+ * {@code
+ * BasicDBObject doc = new BasicDBObject("name", "MongoDB").append("type", "database")
+ *                                                         .append("count", 1)
+ *                                                         .append("info", new BasicDBObject("x", 203).append("y", 102));
+ * coll.insert(doc); }
+ * 
+ * To show that the document we inserted in the previous step is there, we can do a simple findOne() operation to get the first document in + * the collection: + *
+ * {@code
+ * DBObject myDoc = coll.findOne();
+ * System.out.println(myDoc); }
+ * 
+ * + * See {@link MongoClient#getDB(String)} for further information about the effective deprecation of this class. + * + * @mongodb.driver.manual reference/glossary/#term-collection Collection + */ +@ThreadSafe +@SuppressWarnings({"rawtypes", "deprecation"}) +public class DBCollection { + /** + * The name of the field that uniquely identifies each document in a collection. + */ + public static final String ID_FIELD_NAME = "_id"; + private final String name; + private final DB database; + private final OperationExecutor executor; + private final boolean retryWrites; + private final boolean retryReads; + private volatile ReadPreference readPreference; + private volatile WriteConcern writeConcern; + private volatile ReadConcern readConcern; + private final Lock factoryAndCodecLock = new ReentrantLock(); + private DBEncoderFactory encoderFactory; + private DBDecoderFactory decoderFactory; + private volatile DBCollectionObjectFactory objectFactory; + private volatile CompoundDBObjectCodec objectCodec; + + + /** + * Constructs new {@code DBCollection} instance. This operation not reflected on the server. + * @param name the name of the collection + * @param database the database to which this collections belongs to + */ + DBCollection(final String name, final DB database, final OperationExecutor executor) { + checkCollectionNameValidity(name); + this.name = name; + this.database = database; + this.executor = executor; + this.objectFactory = new DBCollectionObjectFactory(); + this.objectCodec = new CompoundDBObjectCodec(getDefaultDBObjectCodec()); + this.retryWrites = database.getMongoClient().getMongoClientOptions().getRetryWrites(); + this.retryReads = database.getMongoClient().getMongoClientOptions().getRetryReads(); + } + + /** + * Initializes a new collection. No operation is actually performed on the database. + * + * @param database database in which to create the collection + * @param name the name of the collection + */ + protected DBCollection(final DB database, final String name) { + this(name, database, database.getExecutor()); + } + + /** + * Insert a document into a collection. If the collection does not exists on the server, then it will be created. If the new document + * does not contain an '_id' field, it will be added. + * + * @param document {@code DBObject} to be inserted + * @param writeConcern {@code WriteConcern} to be used during operation + * @return the result of the operation + * @throws com.mongodb.DuplicateKeyException if the write failed to a duplicate unique key + * @throws com.mongodb.WriteConcernException if the write failed due some other failure specific to the insert command + * @throws com.mongodb.MongoCommandException if the write failed due to a specific command exception + * @throws MongoException if the operation failed for some other reason + * @mongodb.driver.manual tutorial/insert-documents/ Insert Documents + */ + public WriteResult insert(final DBObject document, final WriteConcern writeConcern) { + return insert(asList(document), writeConcern); + } + + /** + * Insert documents into a collection. If the collection does not exists on the server, then it will be created. If the new document + * does not contain an '_id' field, it will be added. Collection wide {@code WriteConcern} will be used. + * + * @param documents {@code DBObject}'s to be inserted + * @return the result of the operation + * @throws com.mongodb.DuplicateKeyException if the write failed to a duplicate unique key + * @throws com.mongodb.WriteConcernException if the write failed due some other failure specific to the insert command + * @throws com.mongodb.MongoCommandException if the write failed due to a specific command exception + * @throws MongoException if the operation failed for some other reason + * @mongodb.driver.manual tutorial/insert-documents/ Insert Documents + */ + public WriteResult insert(final DBObject... documents) { + return insert(asList(documents), getWriteConcern()); + } + + /** + * Insert documents into a collection. If the collection does not exists on the server, then it will be created. If the new document + * does not contain an '_id' field, it will be added. + * + * @param documents {@code DBObject}'s to be inserted + * @param writeConcern {@code WriteConcern} to be used during operation + * @return the result of the operation + * @throws com.mongodb.MongoCommandException if the write failed due to a specific command exception + * @throws com.mongodb.MongoException if the write failed due some other failure + * @mongodb.driver.manual tutorial/insert-documents/ Insert Documents + */ + public WriteResult insert(final WriteConcern writeConcern, final DBObject... documents) { + return insert(documents, writeConcern); + } + + /** + * Insert documents into a collection. If the collection does not exists on the server, then it will be created. If the new document + * does not contain an '_id' field, it will be added. + * + * @param documents {@code DBObject}'s to be inserted + * @param writeConcern {@code WriteConcern} to be used during operation + * @return the result of the operation + * @throws com.mongodb.DuplicateKeyException if the write failed to a duplicate unique key + * @throws com.mongodb.WriteConcernException if the write failed due some other failure specific to the insert command + * @throws com.mongodb.MongoCommandException if the write failed due to a specific command exception + * @throws MongoException if the operation failed for some other reason + * @mongodb.driver.manual tutorial/insert-documents/ Insert Documents + */ + public WriteResult insert(final DBObject[] documents, final WriteConcern writeConcern) { + return insert(asList(documents), writeConcern); + } + + /** + * Insert documents into a collection. If the collection does not exists on the server, then it will be created. If the new document + * does not contain an '_id' field, it will be added. + * + * @param documents list of {@code DBObject} to be inserted + * @return the result of the operation + * @throws com.mongodb.DuplicateKeyException if the write failed to a duplicate unique key + * @throws com.mongodb.WriteConcernException if the write failed due some other failure specific to the insert command + * @throws com.mongodb.MongoCommandException if the write failed due to a specific command exception + * @throws MongoException if the operation failed for some other reason + * @mongodb.driver.manual tutorial/insert-documents/ Insert Documents + */ + public WriteResult insert(final List documents) { + return insert(documents, getWriteConcern()); + } + + /** + * Insert documents into a collection. If the collection does not exists on the server, then it will be created. If the new document + * does not contain an '_id' field, it will be added. + * + * @param documents list of {@code DBObject}'s to be inserted + * @param aWriteConcern {@code WriteConcern} to be used during operation + * @return the result of the operation + * @throws com.mongodb.DuplicateKeyException if the write failed to a duplicate unique key + * @throws com.mongodb.WriteConcernException if the write failed due some other failure specific to the insert command + * @throws com.mongodb.MongoCommandException if the write failed due to a specific command exception + * @throws MongoException if the operation failed for some other reason + * @mongodb.driver.manual tutorial/insert-documents/ Insert Documents + */ + public WriteResult insert(final List documents, final WriteConcern aWriteConcern) { + return insert(documents, aWriteConcern, null); + } + + /** + * Insert documents into a collection. If the collection does not exists on the server, then it will be created. If the new document + * does not contain an '_id' field, it will be added. + * + * @param documents {@code DBObject}'s to be inserted + * @param aWriteConcern {@code WriteConcern} to be used during operation + * @param encoder {@code DBEncoder} to be used + * @return the result of the operation + * @throws com.mongodb.DuplicateKeyException if the write failed to a duplicate unique key + * @throws com.mongodb.WriteConcernException if the write failed due some other failure specific to the insert command + * @throws com.mongodb.MongoCommandException if the write failed due to a specific command exception + * @throws MongoException if the operation failed for some other reason + * @mongodb.driver.manual tutorial/insert-documents/ Insert Documents + */ + public WriteResult insert(final DBObject[] documents, final WriteConcern aWriteConcern, final DBEncoder encoder) { + return insert(asList(documents), aWriteConcern, encoder); + } + + /** + * Insert documents into a collection. If the collection does not exists on the server, then it will be created. If the new document + * does not contain an '_id' field, it will be added. + * + * @param documents a list of {@code DBObject}'s to be inserted + * @param aWriteConcern {@code WriteConcern} to be used during operation + * @param dbEncoder {@code DBEncoder} to be used + * @return the result of the operation + * @throws com.mongodb.DuplicateKeyException if the write failed to a duplicate unique key + * @throws com.mongodb.WriteConcernException if the write failed due some other failure specific to the insert command + * @throws com.mongodb.MongoCommandException if the write failed due to a specific command exception + * @throws MongoException if the operation failed for some other reason + * @mongodb.driver.manual tutorial/insert-documents/ Insert Documents + */ + public WriteResult insert(final List documents, final WriteConcern aWriteConcern, + @Nullable final DBEncoder dbEncoder) { + return insert(documents, new InsertOptions().writeConcern(aWriteConcern).dbEncoder(dbEncoder)); + } + + /** + *

Insert documents into a collection. If the collection does not exists on the server, then it will be created. If the new document + * does not contain an '_id' field, it will be added.

+ * + *

If the value of the continueOnError property of the given {@code InsertOptions} is true, + * that value will override the value of the continueOnError property of the given {@code WriteConcern}. Otherwise, + * the value of the continueOnError property of the given {@code WriteConcern} will take effect.

+ * + * @param documents a list of {@code DBObject}'s to be inserted + * @param insertOptions the options to use for the insert + * @return the result of the operation + * @throws com.mongodb.DuplicateKeyException if the write failed to a duplicate unique key + * @throws com.mongodb.WriteConcernException if the write failed due some other failure specific to the insert command + * @throws com.mongodb.MongoCommandException if the write failed due to a specific command exception + * @throws MongoException if the operation failed for some other reason + * @mongodb.driver.manual tutorial/insert-documents/ Insert Documents + */ + public WriteResult insert(final List documents, final InsertOptions insertOptions) { + WriteConcern writeConcern = insertOptions.getWriteConcern() != null ? insertOptions.getWriteConcern() : getWriteConcern(); + Encoder encoder = toEncoder(insertOptions.getDbEncoder()); + + List insertRequestList = new ArrayList<>(documents.size()); + for (DBObject cur : documents) { + if (cur.get(ID_FIELD_NAME) == null) { + cur.put(ID_FIELD_NAME, new ObjectId()); + } + insertRequestList.add(new InsertRequest(new BsonDocumentWrapper<>(cur, encoder))); + } + return insert(insertRequestList, writeConcern, insertOptions.isContinueOnError(), insertOptions.getBypassDocumentValidation()); + } + + private Encoder toEncoder(@Nullable final DBEncoder dbEncoder) { + return dbEncoder != null ? new DBEncoderAdapter(dbEncoder) : objectCodec; + } + + private WriteResult insert(final List insertRequestList, final WriteConcern writeConcern, + final boolean continueOnError, @Nullable final Boolean bypassDocumentValidation) { + return executeWriteOperation(createBulkWriteOperationForInsert(getNamespace(), + !continueOnError, writeConcern, retryWrites, insertRequestList).bypassDocumentValidation(bypassDocumentValidation)); + } + + WriteResult executeWriteOperation(final LegacyMixedBulkWriteOperation operation) { + return translateWriteResult(executor.execute(operation, getReadConcern())); + } + + private WriteResult translateWriteResult(final WriteConcernResult writeConcernResult) { + if (!writeConcernResult.wasAcknowledged()) { + return WriteResult.unacknowledged(); + } + + return translateWriteResult(writeConcernResult.getCount(), writeConcernResult.isUpdateOfExisting(), + writeConcernResult.getUpsertedId()); + } + + private WriteResult translateWriteResult(final int count, final boolean isUpdateOfExisting, @Nullable final BsonValue upsertedId) { + Object newUpsertedId = upsertedId == null + ? null + : getObjectCodec().decode(new BsonDocumentReader(new BsonDocument("_id", upsertedId)), + DecoderContext.builder().build()) + .get("_id"); + return new WriteResult(count, isUpdateOfExisting, newUpsertedId); + } + + /** + * Update an existing document or insert a document depending on the parameter. If the document does not contain an '_id' field, then + * the method performs an insert with the specified fields in the document as well as an '_id' field with a unique objectId value. If + * the document contains an '_id' field, then the method performs an upsert querying the collection on the '_id' field: + *
    + *
  • If a document does not exist with the specified '_id' value, the method performs an insert with the specified fields in + * the document.
  • + *
  • If a document exists with the specified '_id' value, the method performs an update, + * replacing all field in the existing record with the fields from the document.
  • + *
+ * + * @param document {@link DBObject} to save to the collection. + * @return the result of the operation + * @throws com.mongodb.DuplicateKeyException if the write failed to a duplicate unique key + * @throws com.mongodb.WriteConcernException if the write failed due some other failure specific to the insert or update command + * @throws com.mongodb.MongoCommandException if the write failed due to a specific command exception + * @throws MongoException if the operation failed for some other reason + * @mongodb.driver.manual tutorial/modify-documents/#modify-a-document-with-save-method Save + */ + public WriteResult save(final DBObject document) { + return save(document, getWriteConcern()); + } + + /** + * Update an existing document or insert a document depending on the parameter. If the document does not contain an '_id' field, then + * the method performs an insert with the specified fields in the document as well as an '_id' field with a unique objectId value. If + * the document contains an '_id' field, then the method performs an upsert querying the collection on the '_id' field: + *
    + *
  • If a document does not exist with the specified '_id' value, the method performs an insert with the specified fields in + * the document.
  • + *
  • If a document exists with the specified '_id' value, the method performs an update, + * replacing all field in the existing record with the fields from the document.
  • + *
+ * + * @param document {@link DBObject} to save to the collection. + * @param writeConcern {@code WriteConcern} to be used during operation + * @return the result of the operation + * @throws com.mongodb.DuplicateKeyException if the write failed to a duplicate unique key + * @throws com.mongodb.WriteConcernException if the write failed due some other failure specific to the insert or update command + * @throws com.mongodb.MongoCommandException if the write failed due to a specific command exception + * @throws MongoException if the operation failed for some other reason + * @mongodb.driver.manual tutorial/modify-documents/#modify-a-document-with-save-method Save + */ + public WriteResult save(final DBObject document, final WriteConcern writeConcern) { + Object id = document.get(ID_FIELD_NAME); + if (id == null) { + return insert(document, writeConcern); + } else { + return replaceOrInsert(document, id, writeConcern); + } + } + + private WriteResult replaceOrInsert(final DBObject obj, final Object id, final WriteConcern writeConcern) { + DBObject filter = new BasicDBObject(ID_FIELD_NAME, id); + + UpdateRequest replaceRequest = new UpdateRequest(wrap(filter), wrap(obj, objectCodec), + Type.REPLACE).upsert(true); + + return executeWriteOperation(createBulkWriteOperationForReplace(getNamespace(), false, + writeConcern, retryWrites, singletonList(replaceRequest))); + } + + /** + * Modify an existing document or documents in collection. The query parameter employs the same query selectors, as used in {@code + * find()}. + * + * @param query the selection criteria for the update + * @param update the modifications to apply + * @param upsert when true, inserts a document if no document matches the update query criteria + * @param multi when true, updates all documents in the collection that match the update query criteria, otherwise only updates + * one + * @param aWriteConcern {@code WriteConcern} to be used during operation + * @return the result of the operation + * @throws com.mongodb.DuplicateKeyException if the write failed to a duplicate unique key + * @throws com.mongodb.WriteConcernException if the write failed due some other failure specific to the update command + * @throws com.mongodb.MongoCommandException if the write failed due to a specific command exception + * @throws MongoException if the operation failed for some other reason + * @mongodb.driver.manual tutorial/modify-documents/ Modify Documents + */ + public WriteResult update(final DBObject query, final DBObject update, final boolean upsert, final boolean multi, + final WriteConcern aWriteConcern) { + return update(query, update, upsert, multi, aWriteConcern, null); + } + + /** + * Modify an existing document or documents in collection. By default the method updates a single document. The query parameter employs + * the same query selectors, as used in {@code find()}. + * + * @param query the selection criteria for the update + * @param update the modifications to apply + * @param upsert when true, inserts a document if no document matches the update query criteria + * @param multi when true, updates all documents in the collection that match the update query criteria, otherwise only updates + * one + * @param concern {@code WriteConcern} to be used during operation + * @param encoder {@code DBEncoder} to be used + * @return the result of the operation + * @throws com.mongodb.DuplicateKeyException if the write failed to a duplicate unique key + * @throws com.mongodb.WriteConcernException if the write failed due some other failure specific to the update command + * @throws com.mongodb.MongoCommandException if the write failed due to a specific command exception + * @throws MongoException if the operation failed for some other reason + * @mongodb.driver.manual tutorial/modify-documents/ Modify Documents + */ + public WriteResult update(final DBObject query, final DBObject update, final boolean upsert, final boolean multi, + final WriteConcern concern, @Nullable final DBEncoder encoder) { + return update(query, update, upsert, multi, concern, null, encoder); + } + + /** + * Modify an existing document or documents in collection. By default the method updates a single document. The query parameter employs + * the same query selectors, as used in {@link DBCollection#find(DBObject)}. + * + * @param query the selection criteria for the update + * @param update the modifications to apply + * @param upsert when true, inserts a document if no document matches the update query criteria + * @param multi when true, updates all documents in the collection that match the update query criteria, otherwise only updates one + * @param concern {@code WriteConcern} to be used during operation + * @param bypassDocumentValidation whether to bypass document validation. + * @param encoder the DBEncoder to use + * @return the result of the operation + * @throws com.mongodb.DuplicateKeyException if the write failed to a duplicate unique key + * @throws com.mongodb.WriteConcernException if the write failed due some other failure specific to the update command + * @throws com.mongodb.MongoCommandException if the write failed due to a specific command exception + * @throws MongoException if the operation failed for some other reason + * @mongodb.driver.manual tutorial/modify-documents/ Modify + * @since 2.14 + */ + public WriteResult update(final DBObject query, final DBObject update, final boolean upsert, final boolean multi, + final WriteConcern concern, @Nullable final Boolean bypassDocumentValidation, + @Nullable final DBEncoder encoder) { + return update(query, update, new DBCollectionUpdateOptions().upsert(upsert).multi(multi) + .writeConcern(concern).bypassDocumentValidation(bypassDocumentValidation).encoder(encoder)); + } + + /** + * Modify an existing document or documents in collection. The query parameter employs the same query selectors, as used in {@code + * find()}. + * + * @param query the selection criteria for the update + * @param update the modifications to apply + * @param upsert when true, inserts a document if no document matches the update query criteria + * @param multi when true, updates all documents in the collection that match the update query criteria, otherwise only updates one + * @return the result of the operation + * @throws com.mongodb.DuplicateKeyException if the write failed to a duplicate unique key + * @throws com.mongodb.WriteConcernException if the write failed due some other failure specific to the update command + * @throws com.mongodb.MongoCommandException if the write failed due to a specific command exception + * @throws MongoException if the operation failed for some other reason + * @mongodb.driver.manual tutorial/modify-documents/ Modify Documents + */ + public WriteResult update(final DBObject query, final DBObject update, final boolean upsert, final boolean multi) { + return update(query, update, upsert, multi, getWriteConcern()); + } + + /** + * Modify an existing document. The query parameter employs the same query selectors, as used in {@code find()}. + * + * @param query the selection criteria for the update + * @param update the modifications to apply + * @return the result of the operation + * @throws com.mongodb.DuplicateKeyException if the write failed to a duplicate unique key + * @throws com.mongodb.WriteConcernException if the write failed due some other failure specific to the update command + * @throws com.mongodb.MongoCommandException if the write failed due to a specific command exception + * @throws MongoException if the operation failed for some other reason + * @mongodb.driver.manual tutorial/modify-documents/ Modify Documents + */ + public WriteResult update(final DBObject query, final DBObject update) { + return update(query, update, false, false); + } + + /** + * Modify documents in collection. The query parameter employs the same query selectors, as used in {@code find()}. + * + * @param query the selection criteria for the update + * @param update the modifications to apply + * @return the result of the operation + * @throws com.mongodb.DuplicateKeyException if the write failed to a duplicate unique key + * @throws com.mongodb.WriteConcernException if the write failed due some other failure specific to the update command + * @throws com.mongodb.MongoCommandException if the write failed due to a specific command exception + * @throws MongoException if the operation failed for some other reason + * @mongodb.driver.manual tutorial/modify-documents/ Modify Documents + */ + public WriteResult updateMulti(final DBObject query, final DBObject update) { + return update(query, update, false, true); + } + + /** + * Modify an existing document or documents in collection. + * + * @param query the selection criteria for the update + * @param update the modifications to apply + * @param options the options to apply to the update operation + * @return the result of the operation + * @throws com.mongodb.DuplicateKeyException if the write failed to a duplicate unique key + * @throws com.mongodb.WriteConcernException if the write failed due some other failure specific to the update command + * @throws com.mongodb.MongoCommandException if the write failed due to a specific command exception + * @throws MongoException if the operation failed for some other reason + * @mongodb.driver.manual tutorial/modify-documents/ Modify + * @since 3.4 + */ + public WriteResult update(final DBObject query, final DBObject update, final DBCollectionUpdateOptions options) { + notNull("query", query); + notNull("update", update); + notNull("options", options); + WriteConcern optionsWriteConcern = options.getWriteConcern(); + WriteConcern writeConcern = optionsWriteConcern != null ? optionsWriteConcern : getWriteConcern(); + Type updateType = (!update.keySet().isEmpty() && update.keySet().iterator().next().startsWith("$")) + ? UPDATE : Type.REPLACE; + UpdateRequest updateRequest = new UpdateRequest(wrap(query), wrap(update, options.getEncoder()), updateType) + .upsert(options.isUpsert()).multi(options.isMulti()) + .collation(options.getCollation()) + .arrayFilters(wrapAllowNull(options.getArrayFilters(), options.getEncoder())); + LegacyMixedBulkWriteOperation operation = (updateType == UPDATE + ? createBulkWriteOperationForUpdate(getNamespace(), true, writeConcern, retryWrites, + singletonList(updateRequest)) + : createBulkWriteOperationForReplace(getNamespace(), true, writeConcern, retryWrites, + singletonList(updateRequest))) + .bypassDocumentValidation(options.getBypassDocumentValidation()); + return executeWriteOperation(operation); + } + + /** + * Remove documents from a collection. + * + * @param query the deletion criteria using query operators. Omit the query parameter or pass an empty document to delete all documents + * in the collection. + * @return the result of the operation + * @throws com.mongodb.WriteConcernException if the write failed due some other failure specific to the delete command + * @throws com.mongodb.MongoCommandException if the write failed due to a specific command exception + * @throws MongoException if the operation failed for some other reason + * @mongodb.driver.manual tutorial/remove-documents/ Remove Documents + */ + public WriteResult remove(final DBObject query) { + return remove(query, getWriteConcern()); + } + + /** + * Remove documents from a collection. + * + * @param query the deletion criteria using query operators. Omit the query parameter or pass an empty document to delete all + * documents in the collection. + * @param writeConcern {@code WriteConcern} to be used during operation + * @return the result of the operation + * @throws com.mongodb.WriteConcernException if the write failed due some other failure specific to the delete command + * @throws com.mongodb.MongoCommandException if the write failed due to a specific command exception + * @throws MongoException if the operation failed for some other reason + * @mongodb.driver.manual tutorial/remove-documents/ Remove Documents + */ + public WriteResult remove(final DBObject query, final WriteConcern writeConcern) { + return remove(query, new DBCollectionRemoveOptions().writeConcern(writeConcern)); + } + + /** + * Remove documents from a collection. + * + * @param query the deletion criteria using query operators. Omit the query parameter or pass an empty document to delete all + * documents in the collection. + * @param writeConcern {@code WriteConcern} to be used during operation + * @param encoder {@code DBEncoder} to be used + * @return the result of the operation + * @throws com.mongodb.WriteConcernException if the write failed due some other failure specific to the delete command + * @throws com.mongodb.MongoCommandException if the write failed due to a specific command exception + * @throws MongoException if the operation failed for some other reason + * @mongodb.driver.manual tutorial/remove-documents/ Remove Documents + */ + public WriteResult remove(final DBObject query, final WriteConcern writeConcern, final DBEncoder encoder) { + return remove(query, new DBCollectionRemoveOptions().writeConcern(writeConcern).encoder(encoder)); + } + + /** + * Remove documents from a collection. + * + * @param query the deletion criteria using query operators. Omit the query parameter or pass an empty document to delete all + * documents in the collection. + * @param options the options to apply to the delete operation + * @return the result of the operation + * @throws com.mongodb.WriteConcernException if the write failed due some other failure specific to the delete command + * @throws com.mongodb.MongoCommandException if the write failed due to a specific command exception + * @throws MongoException if the operation failed for some other reason + * @mongodb.driver.manual tutorial/remove-documents/ Remove Documents + * @since 3.4 + */ + public WriteResult remove(final DBObject query, final DBCollectionRemoveOptions options) { + notNull("query", query); + notNull("options", options); + WriteConcern optionsWriteConcern = options.getWriteConcern(); + WriteConcern writeConcern = optionsWriteConcern != null ? optionsWriteConcern : getWriteConcern(); + DeleteRequest deleteRequest = new DeleteRequest(wrap(query, options.getEncoder())).collation(options.getCollation()); + return executeWriteOperation(createBulkWriteOperationForDelete(getNamespace(), false, + writeConcern, retryWrites, singletonList(deleteRequest))); + } + + /** + * Select documents in collection and get a cursor to the selected documents. + * + * @param query the selection criteria using query operators. Omit the query parameter or pass an empty document to return all documents + * in the collection. + * @return A cursor to the documents that match the query criteria + * @mongodb.driver.manual tutorial/query-documents/ Querying + */ + public DBCursor find(final DBObject query) { + return new DBCursor(this, query, null, getReadPreference()); + } + + /** + * Select documents in collection and get a cursor to the selected documents. + * + * @param query the selection criteria using query operators. Omit the query parameter or pass an empty document to return all + * documents in the collection. + * @param projection specifies which fields MongoDB will return from the documents in the result set. + * @return A cursor to the documents that match the query criteria + * @mongodb.driver.manual tutorial/query-documents/ Querying + */ + public DBCursor find(final DBObject query, final DBObject projection) { + return new DBCursor(this, query, projection, getReadPreference()); + } + + /** + * Select all documents in collection and get a cursor to the selected documents. + * + * @return A cursor to the documents that match the query criteria + * @mongodb.driver.manual tutorial/query-documents/ Querying + */ + public DBCursor find() { + return find(new BasicDBObject()); + } + + /** + * Select documents in collection and get a cursor to the selected documents. + * + * @param query the selection criteria using query operators. Omit the query parameter or pass an empty document to return all + * documents in the collection. + * @param options the options for the find operation. + * @return A cursor to the documents that match the query criteria + * @mongodb.driver.manual tutorial/query-documents/ Querying + * @since 3.4 + */ + public DBCursor find(@Nullable final DBObject query, final DBCollectionFindOptions options) { + return new DBCursor(this, query, options); + } + + /** + * Get a single document from collection. + * + * @return A document that satisfies the query specified as the argument to this method. + * @mongodb.driver.manual tutorial/query-documents/ Querying + */ + @Nullable + public DBObject findOne() { + return findOne(new BasicDBObject()); + } + + /** + * Get a single document from collection. + * + * @param query the selection criteria using query operators. + * @return A document that satisfies the query specified as the argument to this method. + * @mongodb.driver.manual tutorial/query-documents/ Querying + */ + @Nullable + public DBObject findOne(final DBObject query) { + return findOne(query, null, null, getReadPreference()); + } + + /** + * Get a single document from collection. + * + * @param query the selection criteria using query operators. + * @param projection specifies which fields MongoDB will return from the documents in the result set. + * @return A document that satisfies the query specified as the argument to this method. + * @mongodb.driver.manual tutorial/query-documents/ Querying + */ + @Nullable + public DBObject findOne(final DBObject query, final DBObject projection) { + return findOne(query, projection, null, getReadPreference()); + } + + /** + * Get a single document from collection. + * + * @param query the selection criteria using query operators. + * @param projection specifies which fields MongoDB will return from the documents in the result set. + * @param sort A document whose fields specify the attributes on which to sort the result set. + * @return A document that satisfies the query specified as the argument to this method. + * @mongodb.driver.manual tutorial/query-documents/ Querying + */ + @Nullable + public DBObject findOne(final DBObject query, final DBObject projection, final DBObject sort) { + return findOne(query, projection, sort, getReadPreference()); + } + + /** + * Get a single document from collection. + * + * @param query the selection criteria using query operators. + * @param projection specifies which fields MongoDB will return from the documents in the result set. + * @param readPreference {@link ReadPreference} to be used for this operation + * @return A document that satisfies the query specified as the argument to this method. + * @mongodb.driver.manual tutorial/query-documents/ Querying + */ + @Nullable + public DBObject findOne(final DBObject query, final DBObject projection, final ReadPreference readPreference) { + return findOne(query, projection, null, readPreference); + } + + /** + * Get a single document from collection. + * + * @param query the selection criteria using query operators. + * @param projection specifies which projection MongoDB will return from the documents in the result set. + * @param sort A document whose fields specify the attributes on which to sort the result set. + * @param readPreference {@code ReadPreference} to be used for this operation + * @return A document that satisfies the query specified as the argument to this method. + * @mongodb.driver.manual tutorial/query-documents/ Querying + */ + @Nullable + public DBObject findOne(@Nullable final DBObject query, @Nullable final DBObject projection, @Nullable final DBObject sort, + final ReadPreference readPreference) { + return findOne(query != null ? query : new BasicDBObject(), + new DBCollectionFindOptions().projection(projection).sort(sort).readPreference(readPreference)); + } + + /** + * Get a single document from collection by '_id'. + * + * @param id value of '_id' field of a document we are looking for + * @return A document with '_id' provided as the argument to this method. + * @mongodb.driver.manual tutorial/query-documents/ Querying + */ + @Nullable + public DBObject findOne(final Object id) { + return findOne(new BasicDBObject("_id", id), new DBCollectionFindOptions()); + } + + /** + * Get a single document from collection by '_id'. + * + * @param id value of '_id' field of a document we are looking for + * @param projection specifies which projection MongoDB will return from the documents in the result set. + * @return A document that satisfies the query specified as the argument to this method. + * @mongodb.driver.manual tutorial/query-documents/ Querying + */ + @Nullable + public DBObject findOne(final Object id, final DBObject projection) { + return findOne(new BasicDBObject("_id", id), new DBCollectionFindOptions().projection(projection)); + } + + /** + * Get a single document from collection. + * + * @param query the selection criteria using query operators. + * @param findOptions the options for the find operation. + * @return A document that satisfies the query specified as the argument to this method. + * @mongodb.driver.manual tutorial/query-documents/ Querying + * @since 3.4 + */ + @Nullable + public DBObject findOne(@Nullable final DBObject query, final DBCollectionFindOptions findOptions) { + return find(query, findOptions).one(); + } + + /** + * Same as {@link #getCount()} + * + * @return the number of documents in collection + * @throws MongoException if the operation failed + * @mongodb.driver.manual reference/command/count/ Count + */ + public long count() { + return getCount(new BasicDBObject(), new DBCollectionCountOptions()); + } + + /** + * Get the count of documents in collection that would match a criteria. + * + * @param query specifies the selection criteria + * @return the number of documents that matches selection criteria + * @throws MongoException if the operation failed + * @mongodb.driver.manual reference/command/count/ Count + */ + public long count(@Nullable final DBObject query) { + return getCount(query, new DBCollectionCountOptions()); + } + + /** + * Get the count of documents in collection that would match a criteria. + * + * @param query specifies the selection criteria + * @param readPreference {@link ReadPreference} to be used for this operation + * @return the number of documents that matches selection criteria + * @throws MongoException if the operation failed + * @mongodb.driver.manual reference/command/count/ Count + */ + public long count(@Nullable final DBObject query, final ReadPreference readPreference) { + return getCount(query, new DBCollectionCountOptions().readPreference(readPreference)); + } + + /** + * Get the count of documents in collection that would match a criteria. + * + * @param query specifies the selection criteria + * @param options the options for the count operation. + * @return the number of documents that matches selection criteria + * @throws MongoException if the operation failed + * @mongodb.driver.manual reference/command/count/ Count + * @since 3.4 + */ + public long count(@Nullable final DBObject query, final DBCollectionCountOptions options) { + return getCount(query, options); + } + + /** + * Get the count of documents in collection. + * + * @return the number of documents in collection + * @throws MongoException if the operation failed + * @mongodb.driver.manual reference/command/count/ Count + */ + public long getCount() { + return getCount(new BasicDBObject(), new DBCollectionCountOptions()); + } + + /** + * Get the count of documents in collection that would match a criteria. + * + * @param query specifies the selection criteria + * @return the number of documents that matches selection criteria + * @throws MongoException if the operation failed + * @mongodb.driver.manual reference/command/count/ Count + */ + public long getCount(@Nullable final DBObject query) { + return getCount(query, new DBCollectionCountOptions()); + } + + /** + * Get the count of documents in collection that would match a criteria. + * + * @param query specifies the selection criteria + * @param options the options for the count operation. + * @return the number of documents that matches selection criteria + * @throws MongoException if the operation failed + * @mongodb.driver.manual reference/command/count/ Count + * @since 3.4 + */ + public long getCount(@Nullable final DBObject query, final DBCollectionCountOptions options) { + notNull("countOptions", options); + CountOperation operation = new CountOperation( + getNamespace()) + .skip(options.getSkip()) + .limit(options.getLimit()) + .collation(options.getCollation()) + .retryReads(retryReads); + if (query != null) { + operation.filter(wrap(query)); + } + DBObject hint = options.getHint(); + if (hint != null) { + operation.hint(wrap(hint)); + } else { + String hintString = options.getHintString(); + if (hintString != null) { + operation.hint(new BsonString(hintString)); + } + } + ReadPreference optionsReadPreference = options.getReadPreference(); + ReadConcern optionsReadConcern = options.getReadConcern(); + return getExecutor(createTimeoutSettings(getTimeoutSettings(), options)) + .execute(operation, optionsReadPreference != null ? optionsReadPreference : getReadPreference(), + optionsReadConcern != null ? optionsReadConcern : getReadConcern(), null); + } + + /** + * Change the name of an existing collection. + * + * @param newName specifies the new name of the collection + * @return the collection with new name + * @throws MongoException if newName is the name of an existing collection. + * @mongodb.driver.manual reference/command/renameCollection/ Rename Collection + */ + public DBCollection rename(final String newName) { + return rename(newName, false); + } + + /** + * Change the name of an existing collection. + * + * @param newName specifies the new name of the collection + * @param dropTarget If {@code true}, mongod will drop the collection with the target name if it exists + * @return the collection with new name + * @throws MongoException if target is the name of an existing collection and {@code dropTarget=false}. + * @mongodb.driver.manual reference/command/renameCollection/ Rename Collection + */ + public DBCollection rename(final String newName, final boolean dropTarget) { + try { + executor.execute(new RenameCollectionOperation(getNamespace(), + new MongoNamespace(getNamespace().getDatabaseName(), newName), getWriteConcern()) + .dropTarget(dropTarget), getReadConcern()); + return getDB().getCollection(newName); + } catch (MongoWriteConcernException e) { + throw createWriteConcernException(e); + } + } + + /** + * Find the distinct values for a specified field across a collection and returns the results in an array. + * + * @param fieldName Specifies the field for which to return the distinct values. + * @return a List of the distinct values + * @mongodb.driver.manual reference/command/distinct Distinct Command + */ + public List distinct(final String fieldName) { + return distinct(fieldName, getReadPreference()); + } + + /** + * Find the distinct values for a specified field across a collection and returns the results in an array. + * + * @param fieldName Specifies the field for which to return the distinct values + * @param readPreference {@link ReadPreference} to be used for this operation + * @return a List of the distinct values + * @mongodb.driver.manual reference/command/distinct Distinct Command + */ + public List distinct(final String fieldName, final ReadPreference readPreference) { + return distinct(fieldName, new BasicDBObject(), readPreference); + } + + /** + * Find the distinct values for a specified field across a collection and returns the results in an array. + * + * @param fieldName Specifies the field for which to return the distinct values + * @param query specifies the selection query to determine the subset of documents from which to retrieve the distinct values + * @return an array of the distinct values + * @mongodb.driver.manual reference/command/distinct Distinct Command + */ + public List distinct(final String fieldName, final DBObject query) { + return distinct(fieldName, query, getReadPreference()); + } + + /** + * Find the distinct values for a specified field across a collection and returns the results in an array. + * + * @param fieldName Specifies the field for which to return the distinct values + * @param query specifies the selection query to determine the subset of documents from which to retrieve the distinct values + * @param readPreference {@link ReadPreference} to be used for this operation + * @return A {@code List} of the distinct values + * @mongodb.driver.manual reference/command/distinct Distinct Command + */ + public List distinct(final String fieldName, final DBObject query, final ReadPreference readPreference) { + return distinct(fieldName, new DBCollectionDistinctOptions().filter(query).readPreference(readPreference)); + } + + /** + * Find the distinct values for a specified field across a collection and returns the results in an array. + * + * @param fieldName Specifies the field for which to return the distinct values + * @param options the options to apply for this operation + * @return A {@code List} of the distinct values + * @mongodb.driver.manual reference/command/distinct Distinct Command + * @since 3.4 + */ + public List distinct(final String fieldName, final DBCollectionDistinctOptions options) { + notNull("fieldName", fieldName); + return new MongoIterableImpl(null, executor, + options.getReadConcern() != null ? options.getReadConcern() : getReadConcern(), + options.getReadPreference() != null ? options.getReadPreference() : getReadPreference(), + retryReads, DBCollection.this.getTimeoutSettings()) { + @Override + public ReadOperationCursor asReadOperation() { + return new DistinctOperation<>(getNamespace(), fieldName, new BsonValueCodec()) + .filter(wrapAllowNull(options.getFilter())) + .collation(options.getCollation()) + .retryReads(retryReads); + } + + @Override + protected OperationExecutor getExecutor() { + return executor; + } + + }.map(bsonValue -> { + if (bsonValue == null) { + return null; + } + BsonDocument document = new BsonDocument("value", bsonValue); + DBObject obj = getDefaultDBObjectCodec().decode(new BsonDocumentReader(document), DecoderContext.builder().build()); + return obj.get("value"); + }).into(new ArrayList<>()); + } + + /** + * Allows you to run map-reduce aggregation operations over a collection. + * + * @param map a JavaScript function that associates or "maps" a value with a key and emits the key and value pair. + * @param reduce a JavaScript function that "reduces" to a single object all the values associated with a particular key. + * @param outputTarget specifies the location of the result of the map-reduce operation. + * @param query specifies the selection criteria using query operators for determining the documents input to the map function. + * @return a MapReduceOutput which contains the results of this map reduce operation + * @mongodb.driver.manual core/map-reduce/ Map-Reduce + */ + public MapReduceOutput mapReduce(final String map, final String reduce, final String outputTarget, + final DBObject query) { + MapReduceCommand command = new MapReduceCommand(this, map, reduce, outputTarget, MapReduceCommand.OutputType.REDUCE, query); + return mapReduce(command); + } + + /** + * Allows you to run map-reduce aggregation operations over a collection and saves to a named collection. + * + * @param map a JavaScript function that associates or "maps" a value with a key and emits the key and value pair. + * @param reduce a JavaScript function that "reduces" to a single object all the values associated with a particular key. + * @param outputTarget specifies the location of the result of the map-reduce operation. + * @param outputType specifies the type of job output + * @param query specifies the selection criteria using query operators for determining the documents input to the map function. + * @return a MapReduceOutput which contains the results of this map reduce operation + * @mongodb.driver.manual core/map-reduce/ Map-Reduce + */ + public MapReduceOutput mapReduce(final String map, final String reduce, final String outputTarget, + final MapReduceCommand.OutputType outputType, final DBObject query) { + MapReduceCommand command = new MapReduceCommand(this, map, reduce, outputTarget, outputType, query); + return mapReduce(command); + } + + /** + * Allows you to run map-reduce aggregation operations over a collection and saves to a named collection. + * + * @param map a JavaScript function that associates or "maps" a value with a key and emits the key and value pair. + * @param reduce a JavaScript function that "reduces" to a single object all the values associated with a particular key. + * @param outputTarget specifies the location of the result of the map-reduce operation. + * @param outputType specifies the type of job output + * @param query specifies the selection criteria using query operators for determining the documents input to the map + * function. + * @param readPreference the read preference specifying where to run the query. Only applied for Inline output type + * @return a MapReduceOutput which contains the results of this map reduce operation + * @mongodb.driver.manual core/map-reduce/ Map-Reduce + */ + public MapReduceOutput mapReduce(final String map, final String reduce, final String outputTarget, + final MapReduceCommand.OutputType outputType, final DBObject query, + final ReadPreference readPreference) { + MapReduceCommand command = new MapReduceCommand(this, map, reduce, outputTarget, outputType, query); + command.setReadPreference(readPreference); + return mapReduce(command); + } + + /** + * Allows you to run map-reduce aggregation operations over a collection. + * + * @param command specifies the details of the Map Reduce operation to perform + * @return a MapReduceOutput containing the results of the map reduce operation + * @mongodb.driver.manual core/map-reduce/ Map-Reduce + */ + public MapReduceOutput mapReduce(final MapReduceCommand command) { + ReadPreference readPreference = command.getReadPreference() == null ? getReadPreference() : command.getReadPreference(); + Map scope = command.getScope(); + Boolean jsMode = command.getJsMode(); + if (command.getOutputType() == MapReduceCommand.OutputType.INLINE) { + + MapReduceWithInlineResultsOperation operation = new MapReduceWithInlineResultsOperation<>( + getNamespace(), new BsonJavaScript(command.getMap()), + new BsonJavaScript(command.getReduce()), getDefaultDBObjectCodec()) + .filter(wrapAllowNull(command.getQuery())) + .limit(command.getLimit()) + .jsMode(jsMode != null && jsMode) + .sort(wrapAllowNull(command.getSort())) + .verbose(command.isVerbose()) + .collation(command.getCollation()); + + if (scope != null) { + operation.scope(wrap(new BasicDBObject(scope))); + } + if (command.getFinalize() != null) { + operation.finalizeFunction(new BsonJavaScript(command.getFinalize())); + } + MapReduceBatchCursor executionResult = + getExecutor(createTimeoutSettings(getTimeoutSettings(), command)) + .execute(operation, readPreference, getReadConcern(), null); + return new MapReduceOutput(command.toDBObject(), executionResult); + } else { + String action; + switch (command.getOutputType()) { + case REPLACE: + action = "replace"; + break; + case MERGE: + action = "merge"; + break; + case REDUCE: + action = "reduce"; + break; + default: + throw new IllegalArgumentException("Unexpected output type"); + } + + MapReduceToCollectionOperation operation = + new MapReduceToCollectionOperation( + getNamespace(), new BsonJavaScript(command.getMap()), new BsonJavaScript(command.getReduce()), + command.getOutputTarget(), getWriteConcern()) + .filter(wrapAllowNull(command.getQuery())) + .limit(command.getLimit()) + .jsMode(jsMode != null && jsMode) + .sort(wrapAllowNull(command.getSort())) + .verbose(command.isVerbose()) + .action(action) + .databaseName(command.getOutputDB()) + .bypassDocumentValidation(command.getBypassDocumentValidation()) + .collation(command.getCollation()); + + if (scope != null) { + operation.scope(wrap(new BasicDBObject(scope))); + } + if (command.getFinalize() != null) { + operation.finalizeFunction(new BsonJavaScript(command.getFinalize())); + } + try { + MapReduceStatistics mapReduceStatistics = executor.execute(operation, getReadConcern()); + DBCollection mapReduceOutputCollection = getMapReduceOutputCollection(command); + DBCursor executionResult = mapReduceOutputCollection.find(); + return new MapReduceOutput(command.toDBObject(), executionResult, mapReduceStatistics, mapReduceOutputCollection); + } catch (MongoWriteConcernException e) { + throw createWriteConcernException(e); + } + } + } + + private DBCollection getMapReduceOutputCollection(final MapReduceCommand command) { + String requestedDatabaseName = command.getOutputDB(); + DB database = requestedDatabaseName != null + ? getDB().getSisterDB(requestedDatabaseName) + : getDB(); + return database.getCollection(command.getOutputTargetNonNull()); + } + + /** + * Method implements aggregation framework. + * + * @param pipeline operations to be performed in the aggregation pipeline + * @param options options to apply to the aggregation + * @return the aggregation operation's result set + * @mongodb.driver.manual core/aggregation-pipeline/ Aggregation + * @mongodb.server.release 2.2 + */ + public Cursor aggregate(final List pipeline, final AggregationOptions options) { + return aggregate(pipeline, options, getReadPreference()); + } + + /** + * Method implements aggregation framework. + * + * @param pipeline operations to be performed in the aggregation pipeline + * @param options options to apply to the aggregation + * @param readPreference {@link ReadPreference} to be used for this operation + * @return the aggregation operation's result set + * @mongodb.driver.manual core/aggregation-pipeline/ Aggregation + * @mongodb.server.release 2.2 + */ + public Cursor aggregate(final List pipeline, final AggregationOptions options, + final ReadPreference readPreference) { + Cursor result; + notNull("options", options); + List stages = preparePipeline(pipeline); + + BsonValue outCollection = stages.get(stages.size() - 1).get("$out"); + + if (outCollection != null) { + AggregateToCollectionOperation operation = + new AggregateToCollectionOperation( + getNamespace(), stages, getReadConcern(), getWriteConcern()) + .allowDiskUse(options.getAllowDiskUse()) + .bypassDocumentValidation(options.getBypassDocumentValidation()) + .collation(options.getCollation()); + try { + getExecutor(createTimeoutSettings(getTimeoutSettings(), options)) + .execute(operation, getReadPreference(), getReadConcern(), null); + result = new DBCursor(database.getCollection(outCollection.asString().getValue()), new BasicDBObject(), + new DBCollectionFindOptions().readPreference(primary()).collation(options.getCollation())); + } catch (MongoWriteConcernException e) { + throw createWriteConcernException(e); + } + } else { + AggregateOperation operation = new AggregateOperation<>( + getNamespace(), stages, + getDefaultDBObjectCodec()) + .allowDiskUse(options.getAllowDiskUse()) + .batchSize(options.getBatchSize()) + .collation(options.getCollation()) + .retryReads(retryReads); + BatchCursor cursor1 = + getExecutor(createTimeoutSettings(getTimeoutSettings(), options)) + .execute(operation, readPreference, getReadConcern(), null); + result = new MongoCursorAdapter(new MongoBatchCursorAdapter<>(cursor1)); + } + return result; + } + + /** + * Return the explain plan for the aggregation pipeline. + * + * @param pipeline the aggregation pipeline to explain + * @param options the options to apply to the aggregation + * @return the command result. The explain output may change from release to release, so best to simply log this. + * @mongodb.driver.manual core/aggregation-pipeline/ Aggregation + * @mongodb.driver.manual reference/operator/meta/explain/ Explain query + * @mongodb.server.release 3.6 + */ + public CommandResult explainAggregate(final List pipeline, final AggregationOptions options) { + AggregateOperation operation = new AggregateOperation<>( + getNamespace(), + preparePipeline(pipeline), new BsonDocumentCodec()) + .allowDiskUse(options.getAllowDiskUse()) + .collation(options.getCollation()) + .retryReads(retryReads); + return new CommandResult(executor.execute( + operation.asExplainableOperation(ExplainVerbosity.QUERY_PLANNER, new BsonDocumentCodec()), primaryPreferred(), getReadConcern(), null), getDefaultDBObjectCodec()); + } + + List preparePipeline(final List pipeline) { + List stages = new ArrayList<>(); + for (final DBObject op : pipeline) { + stages.add(wrap(op)); + } + + return stages; + } + + /** + * Get the name of a collection. + * + * @return the name of a collection + */ + public String getName() { + return name; + } + + /** + * Get the full name of a collection, with the database name as a prefix. + * + * @return the name of a collection + * @mongodb.driver.manual reference/glossary/#term-namespace Namespace + */ + public String getFullName() { + return getNamespace().getFullName(); + } + + /** + * Find a collection that is prefixed with this collection's name. A typical use of this might be + *
{@code
+     *    DBCollection users = mongo.getCollection( "wiki" ).getCollection( "users" );
+     * }
+ * Which is equivalent to + *
{@code
+     *   DBCollection users = mongo.getCollection( "wiki.users" );
+     * }
+ * + * @param name the name of the collection to find + * @return the matching collection + */ + public DBCollection getCollection(final String name) { + return database.getCollection(getName() + "." + name); + } + + /** + * Forces creation of an ascending index on a field with the default options. + * + * @param name name of field to index on + * @throws MongoException if the operation failed + * @mongodb.driver.manual /administration/indexes-creation/ Index Creation Tutorials + */ + public void createIndex(final String name) { + createIndex(new BasicDBObject(name, 1)); + } + + /** + * Forces creation of an index on a set of fields, if one does not already exist. + * + * @param keys a document that contains pairs with the name of the field or fields to index and order of the index + * @param name an identifier for the index. If null or empty, the default name will be used. + * @throws MongoException if the operation failed + * @mongodb.driver.manual /administration/indexes-creation/ Index Creation Tutorials + */ + public void createIndex(final DBObject keys, final String name) { + createIndex(keys, name, false); + } + + /** + * Forces creation of an index on a set of fields, if one does not already exist. + * + * @param keys a document that contains pairs with the name of the field or fields to index and order of the index + * @param name an identifier for the index. If null or empty, the default name will be used. + * @param unique if the index should be unique + * @throws MongoException if the operation failed + * @mongodb.driver.manual /administration/indexes-creation/ Index Creation Tutorials + */ + public void createIndex(final DBObject keys, @Nullable final String name, final boolean unique) { + DBObject options = new BasicDBObject(); + if (name != null && name.length() > 0) { + options.put("name", name); + } + if (unique) { + options.put("unique", Boolean.TRUE); + } + createIndex(keys, options); + } + + /** + * Creates an index on the field specified, if that index does not already exist. + * + * @param keys a document that contains pairs with the name of the field or fields to index and order of the index + * @mongodb.driver.manual /administration/indexes-creation/ Index Creation Tutorials + */ + public void createIndex(final DBObject keys) { + createIndex(keys, new BasicDBObject()); + } + + /** + * Creates an index on the field specified, if that index does not already exist. + * + *

Prior to MongoDB 3.0 the dropDups option could be used with unique indexes allowing documents with duplicate values to be dropped + * when building the index. Later versions of MongoDB will silently ignore this setting.

+ * + * @param keys a document that contains pairs with the name of the field or fields to index and order of the index + * @param options a document that controls the creation of the index. + * @mongodb.driver.manual /administration/indexes-creation/ Index Creation Tutorials + */ + public void createIndex(final DBObject keys, final DBObject options) { + try { + executor.execute(createIndexOperation(keys, options), getReadConcern()); + } catch (MongoWriteConcernException e) { + throw createWriteConcernException(e); + } + } + + /** + * Atomically modify and return a single document. By default, the returned document does not include the modifications made on the + * update. + * + * @param query specifies the selection criteria for the modification + * @param sort determines which document the operation will modify if the query selects multiple documents + * @param update the modifications to apply + * @return pre-modification document + * @throws WriteConcernException if the write failed due some other failure specific to the update command + * @throws com.mongodb.MongoCommandException if the write failed due to a specific command exception + * @throws MongoException if the operation failed for some other reason + * @mongodb.driver.manual reference/command/findAndModify/ Find and Modify + */ + @Nullable + public DBObject findAndModify(@Nullable final DBObject query, @Nullable final DBObject sort, final DBObject update) { + return findAndModify(query, null, sort, false, update, false, false); + } + + /** + * Atomically modify and return a single document. By default, the returned document does not include the modifications made on the + * update. + * + * @param query specifies the selection criteria for the modification + * @param update the modifications to apply + * @return the document as it was before the modifications + * @throws WriteConcernException if the write failed due some other failure specific to the update command + * @throws com.mongodb.MongoCommandException if the write failed due to a specific command exception + * @throws MongoException if the operation failed for some other reason + * @mongodb.driver.manual reference/command/findAndModify/ Find and Modify + */ + @Nullable + public DBObject findAndModify(@Nullable final DBObject query, final DBObject update) { + return findAndModify(query, null, null, false, update, false, false); + } + + /** + * Atomically remove and return a single document. The returned document is the original document before removal. + * + * @param query specifies the selection criteria for the modification + * @return the document as it was before the modifications + * @throws WriteConcernException if the write failed due some other failure specific to the update command + * @throws com.mongodb.MongoCommandException if the write failed due to a specific command exception + * @throws MongoException if the operation failed for some other reason + * @mongodb.driver.manual reference/command/findAndModify/ Find and Modify + */ + @Nullable + public DBObject findAndRemove(@Nullable final DBObject query) { + return findAndModify(query, null, null, true, null, false, false); + } + + /** + * Atomically modify and return a single document. By default, the returned document does not include the modifications made on the + * update. + * + * @param query specifies the selection criteria for the modification + * @param fields a subset of fields to return + * @param sort determines which document the operation will modify if the query selects multiple documents + * @param remove when {@code true}, removes the selected document + * @param returnNew when true, returns the modified document rather than the original + * @param update the modifications to apply + * @param upsert when true, operation creates a new document if the query returns no documents + * @return the document as it was before the modifications, unless {@code returnNew} is true, in which case it returns the document + * after the changes were made + * @throws WriteConcernException if the write failed due some other failure specific to the update command + * @throws com.mongodb.MongoCommandException if the write failed due to a specific command exception + * @throws MongoException if the operation failed for some other reason + * @mongodb.driver.manual reference/command/findAndModify/ Find and Modify + */ + @Nullable + public DBObject findAndModify(@Nullable final DBObject query, @Nullable final DBObject fields, @Nullable final DBObject sort, + final boolean remove, @Nullable final DBObject update, + final boolean returnNew, final boolean upsert) { + return findAndModify(query, fields, sort, remove, update, returnNew, upsert, 0L, MILLISECONDS); + } + + /** + * Atomically modify and return a single document. By default, the returned document does not include the modifications made on the + * update. + * + * @param query specifies the selection criteria for the modification + * @param fields a subset of fields to return + * @param sort determines which document the operation will modify if the query selects multiple documents + * @param remove when true, removes the selected document + * @param returnNew when true, returns the modified document rather than the original + * @param update the modifications to apply + * @param upsert when true, operation creates a new document if the query returns no documents + * @param writeConcern the write concern to apply to this operation + * @return the document as it was before the modifications, unless {@code returnNew} is true, in which case it returns the document + * after the changes were made + * @throws WriteConcernException if the write failed due some other failure specific to the update command + * @throws com.mongodb.MongoCommandException if the write failed due to a specific command exception + * @throws MongoException if the operation failed for some other reason + * @since 2.14 + * @mongodb.driver.manual reference/command/findAndModify/ Find and Modify + */ + @Nullable + public DBObject findAndModify(@Nullable final DBObject query, @Nullable final DBObject fields, @Nullable final DBObject sort, + final boolean remove, final DBObject update, final boolean returnNew, + final boolean upsert, final WriteConcern writeConcern){ + return findAndModify(query, fields, sort, remove, update, returnNew, upsert, 0L, MILLISECONDS, writeConcern); + } + + /** + * Atomically modify and return a single document. By default, the returned document does not include the modifications made on the + * update. + * + * @param query specifies the selection criteria for the modification + * @param fields a subset of fields to return + * @param sort determines which document the operation will modify if the query selects multiple documents + * @param remove when true, removes the selected document + * @param returnNew when true, returns the modified document rather than the original + * @param update the modifications to apply + * @param upsert when true, operation creates a new document if the query returns no documents + * @param maxTime the maximum time that the server will allow this operation to execute before killing it. + * @param maxTimeUnit the unit that maxTime is specified in + * @return the document as it was before the modifications, unless {@code returnNew} is true, in which case it returns the document + * after the changes were made + * @throws WriteConcernException if the write failed due some other failure specific to the update command + * @throws com.mongodb.MongoCommandException if the write failed due to a specific command exception + * @throws MongoException if the operation failed for some other reason + * @mongodb.driver.manual reference/command/findAndModify/ Find and Modify + * @since 2.12.0 + */ + @Nullable + public DBObject findAndModify(@Nullable final DBObject query, @Nullable final DBObject fields, @Nullable final DBObject sort, + final boolean remove, @Nullable final DBObject update, + final boolean returnNew, final boolean upsert, + final long maxTime, final TimeUnit maxTimeUnit) { + return findAndModify(query, fields, sort, remove, update, returnNew, upsert, maxTime, maxTimeUnit, getWriteConcern()); + + } + + /** + * Atomically modify and return a single document. By default, the returned document does not include the modifications made on the + * update. + * + * @param query specifies the selection criteria for the modification + * @param fields a subset of fields to return + * @param sort determines which document the operation will modify if the query selects multiple documents + * @param remove when {@code true}, removes the selected document + * @param returnNew when true, returns the modified document rather than the original + * @param update performs an update of the selected document + * @param upsert when true, operation creates a new document if the query returns no documents + * @param maxTime the maximum time that the server will allow this operation to execute before killing it + * @param maxTimeUnit the unit that maxTime is specified in + * @param writeConcern the write concern to apply to this operation + * @return the document as it was before the modifications, unless {@code returnNew} is true, in which case it returns the document + * after the changes were made + * @throws WriteConcernException if the write failed due some other failure specific to the update command + * @throws com.mongodb.MongoCommandException if the write failed due to a specific command exception + * @throws MongoException if the operation failed for some other reason + * @mongodb.driver.manual reference/command/findAndModify/ Find and Modify + * @since 2.14.0 + */ + @Nullable + public DBObject findAndModify(@Nullable final DBObject query, @Nullable final DBObject fields, @Nullable final DBObject sort, + final boolean remove, @Nullable final DBObject update, + final boolean returnNew, final boolean upsert, + final long maxTime, final TimeUnit maxTimeUnit, + final WriteConcern writeConcern) { + return findAndModify(query != null ? query : new BasicDBObject(), new DBCollectionFindAndModifyOptions() + .projection(fields) + .sort(sort) + .remove(remove) + .update(update) + .returnNew(returnNew) + .upsert(upsert) + .maxTime(maxTime, maxTimeUnit) + .writeConcern(writeConcern)); + } + + /** + * Atomically modify and return a single document. By default, the returned document does not include the modifications made on the + * update. + * + * @param query specifies the selection criteria for the modification + * @param fields a subset of fields to return + * @param sort determines which document the operation will modify if the query selects multiple documents + * @param remove when {@code true}, removes the selected document + * @param returnNew when true, returns the modified document rather than the original + * @param update performs an update of the selected document + * @param upsert when true, operation creates a new document if the query returns no documents + * @param bypassDocumentValidation whether to bypass document validation. + * @param maxTime the maximum time that the server will allow this operation to execute before killing it + * @param maxTimeUnit the unit that maxTime is specified in + * @return the document as it was before the modifications, unless {@code returnNew} is true, in which case it returns the document + * after the changes were made + * @throws WriteConcernException if the write failed due some other failure specific to the update command + * @throws com.mongodb.MongoCommandException if the write failed due to a specific command exception + * @throws MongoException if the operation failed for some other reason + * @mongodb.driver.manual reference/command/findAndModify/ Find and Modify + * @since 2.14.0 + */ + @Nullable + public DBObject findAndModify(final DBObject query, final DBObject fields, final DBObject sort, + final boolean remove, @Nullable final DBObject update, + final boolean returnNew, final boolean upsert, + final boolean bypassDocumentValidation, + final long maxTime, final TimeUnit maxTimeUnit) { + return findAndModify(query, fields, sort, remove, update, returnNew, upsert, bypassDocumentValidation, maxTime, maxTimeUnit, + getWriteConcern()); + } + + /** + * Atomically modify and return a single document. By default, the returned document does not include the modifications made on the + * update. + * + * @param query specifies the selection criteria for the modification + * @param fields a subset of fields to return + * @param sort determines which document the operation will modify if the query selects multiple documents + * @param remove when {@code true}, removes the selected document + * @param returnNew when true, returns the modified document rather than the original + * @param update performs an update of the selected document + * @param upsert when true, operation creates a new document if the query returns no documents + * @param bypassDocumentValidation whether to bypass document validation. + * @param maxTime the maximum time that the server will allow this operation to execute before killing it + * @param maxTimeUnit the unit that maxTime is specified in + * @param writeConcern the write concern to apply to this operation + * @return the document as it was before the modifications, unless {@code returnNew} is true, in which case it returns the document + * after the changes were made + * @throws WriteConcernException if the write failed due some other failure specific to the update command + * @throws com.mongodb.MongoCommandException if the write failed due to a specific command exception + * @throws MongoException if the operation failed for some other reason + * @mongodb.driver.manual reference/command/findAndModify/ Find and Modify + * @since 2.14.0 + */ + public DBObject findAndModify(@Nullable final DBObject query, @Nullable final DBObject fields, @Nullable final DBObject sort, + final boolean remove, @Nullable final DBObject update, + final boolean returnNew, final boolean upsert, + final boolean bypassDocumentValidation, + final long maxTime, final TimeUnit maxTimeUnit, + final WriteConcern writeConcern) { + return findAndModify(query != null ? query : new BasicDBObject(), new DBCollectionFindAndModifyOptions() + .projection(fields) + .sort(sort) + .remove(remove) + .update(update) + .returnNew(returnNew) + .upsert(upsert) + .bypassDocumentValidation(bypassDocumentValidation) + .maxTime(maxTime, maxTimeUnit) + .writeConcern(writeConcern)); + } + + /** + * Atomically modify and return a single document. By default, the returned document does not include the modifications made on the + * update. + * + * @param query specifies the selection criteria for the modification + * @param options the options regarding the find and modify operation + * @return the document as it was before the modifications, unless {@code oprtions.returnNew} is true, in which case it returns the + * document after the changes were made + * @throws WriteConcernException if the write failed due some other failure specific to the update command + * @throws com.mongodb.MongoCommandException if the write failed due to a specific command exception + * @throws MongoException if the operation failed for some other reason + * @mongodb.driver.manual reference/command/findAndModify/ Find and Modify + * @since 3.4 + */ + public DBObject findAndModify(final DBObject query, final DBCollectionFindAndModifyOptions options) { + notNull("query", query); + notNull("options", options); + WriteConcern optionsWriteConcern = options.getWriteConcern(); + WriteConcern writeConcern = optionsWriteConcern != null ? optionsWriteConcern : getWriteConcern(); + WriteOperation operation; + if (options.isRemove()) { + operation = new FindAndDeleteOperation<>(getNamespace(), writeConcern, retryWrites, objectCodec) + .filter(wrapAllowNull(query)) + .projection(wrapAllowNull(options.getProjection())) + .sort(wrapAllowNull(options.getSort())) + .collation(options.getCollation()); + } else { + DBObject update = options.getUpdate(); + if (update == null) { + throw new IllegalArgumentException("update can not be null unless it's a remove"); + } + if (!update.keySet().isEmpty() && update.keySet().iterator().next().charAt(0) == '$') { + operation = new FindAndUpdateOperation<>(getNamespace(), writeConcern, retryWrites, + objectCodec, wrap(update)) + .filter(wrap(query)) + .projection(wrapAllowNull(options.getProjection())) + .sort(wrapAllowNull(options.getSort())) + .returnOriginal(!options.returnNew()) + .upsert(options.isUpsert()) + .bypassDocumentValidation(options.getBypassDocumentValidation()) + .collation(options.getCollation()) + .arrayFilters(wrapAllowNull(options.getArrayFilters(), (Encoder) null)); + } else { + operation = new FindAndReplaceOperation<>(getNamespace(), writeConcern, retryWrites, + objectCodec, wrap(update)) + .filter(wrap(query)) + .projection(wrapAllowNull(options.getProjection())) + .sort(wrapAllowNull(options.getSort())) + .returnOriginal(!options.returnNew()) + .upsert(options.isUpsert()) + .bypassDocumentValidation(options.getBypassDocumentValidation()) + .collation(options.getCollation()); + } + } + + try { + return getExecutor(createTimeoutSettings(getTimeoutSettings(), options)).execute(operation, getReadConcern()); + } catch (MongoWriteConcernException e) { + throw createWriteConcernException(e); + } + } + + /** + * Returns the database this collection is a member of. + * + * @return this collection's database + * @mongodb.driver.manual reference/glossary/#term-database Database + */ + public DB getDB() { + return database; + } + + /** + * Get the {@link WriteConcern} for this collection. + * + * @return the default write concern for this collection + * @mongodb.driver.manual core/write-concern/ Write Concern + */ + public WriteConcern getWriteConcern() { + if (writeConcern != null) { + return writeConcern; + } + return database.getWriteConcern(); + } + + /** + * Set the {@link WriteConcern} for this collection. Will be used for writes to this collection. Overrides any setting of write concern + * at the DB level. + * + * @param writeConcern WriteConcern to use + * @mongodb.driver.manual core/write-concern/ Write Concern + */ + public void setWriteConcern(final WriteConcern writeConcern) { + this.writeConcern = writeConcern; + } + + /** + * Gets the {@link ReadPreference}. + * + * @return the default read preference for this collection + * @mongodb.driver.manual core/read-preference/ Read Preference + */ + public ReadPreference getReadPreference() { + if (readPreference != null) { + return readPreference; + } + return database.getReadPreference(); + } + + /** + * Sets the {@link ReadPreference} for this collection. Will be used as default for reads from this collection; overrides DB and + * Connection level settings. See the documentation for {@link ReadPreference} for more information. + * + * @param preference ReadPreference to use + * @mongodb.driver.manual core/read-preference/ Read Preference + */ + public void setReadPreference(final ReadPreference preference) { + this.readPreference = preference; + } + + /** + * Sets the read concern for this collection. + * + * @param readConcern the read concern to use for this collection + * @since 3.3 + * @mongodb.server.release 3.2 + * @mongodb.driver.manual reference/readConcern/ Read Concern + */ + public void setReadConcern(final ReadConcern readConcern) { + this.readConcern = readConcern; + } + + /** + * Get the read concern for this collection. + * + * @return the {@link com.mongodb.ReadConcern} + * @since 3.3 + * @mongodb.server.release 3.2 + * @mongodb.driver.manual reference/readConcern/ Read Concern + */ + public ReadConcern getReadConcern() { + if (readConcern != null) { + return readConcern; + } + return database.getReadConcern(); + } + + /** + * Drops (deletes) this collection from the database. Use with care. + * + * @throws com.mongodb.MongoCommandException if the write failed due to a specific command exception + * @throws MongoException if the operation failed + * @mongodb.driver.manual reference/command/drop/ Drop Command + */ + public void drop() { + try { + executor.execute(new DropCollectionOperation(getNamespace(), + getWriteConcern()), getReadConcern()); + } catch (MongoWriteConcernException e) { + throw createWriteConcernException(e); + } + } + + /** + * Get the decoder factory for this collection. A null return value means that the default from MongoClientOptions is being used. + * + * @return the factory + */ + public DBDecoderFactory getDBDecoderFactory() { + return withLock(factoryAndCodecLock, () -> decoderFactory); + } + + /** + * Set a custom decoder factory for this collection. Set to null to use the default from MongoClientOptions. + * + * @param factory the factory to set. + */ + public void setDBDecoderFactory(@Nullable final DBDecoderFactory factory) { + withLock(factoryAndCodecLock, () -> { + this.decoderFactory = factory; + + //Are we are using default factory? + // If yes then we can use CollectibleDBObjectCodec directly, otherwise it will be wrapped. + Decoder decoder = (factory == null || factory == DefaultDBDecoder.FACTORY) + ? getDefaultDBObjectCodec() + : new DBDecoderAdapter(factory.create(), this, PowerOfTwoBufferPool.DEFAULT); + this.objectCodec = new CompoundDBObjectCodec(objectCodec.getEncoder(), decoder); + }); + } + + /** + * Get the encoder factory for this collection. A null return value means that the default from MongoClientOptions is being used. + * + * @return the factory + */ + public DBEncoderFactory getDBEncoderFactory() { + return withLock(factoryAndCodecLock, () -> encoderFactory); + } + + /** + * Set a custom encoder factory for this collection. Set to null to use the default from MongoClientOptions. + * + * @param factory the factory to set. + */ + public void setDBEncoderFactory(@Nullable final DBEncoderFactory factory) { + withLock(factoryAndCodecLock, () -> { + this.encoderFactory = factory; + + //Are we are using default factory? + // If yes then we can use CollectibleDBObjectCodec directly, otherwise it will be wrapped. + Encoder encoder = (factory == null || factory == DefaultDBEncoder.FACTORY) + ? getDefaultDBObjectCodec() + : new DBEncoderFactoryAdapter(encoderFactory); + this.objectCodec = new CompoundDBObjectCodec(encoder, objectCodec.getDecoder()); + }); + } + + /** + * Return a list of the indexes for this collection. Each object in the list is the "info document" from MongoDB + * + * @return list of index documents + * @throws MongoException if the operation failed + * @mongodb.driver.manual core/indexes/ Indexes + */ + public List getIndexInfo() { + return new MongoIterableImpl(null, executor, ReadConcern.DEFAULT, primary(), retryReads, + DBCollection.this.getTimeoutSettings()) { + @Override + public ReadOperationCursor asReadOperation() { + return new ListIndexesOperation<>(getNamespace(), getDefaultDBObjectCodec()) + .retryReads(retryReads); + } + + @Override + public OperationExecutor getExecutor() { + return executor; + } + }.into(new ArrayList<>()); + } + + /** + * Drops an index from this collection. The DBObject index parameter must match the specification of the index to drop, i.e. correct + * key name and type must be specified. + * + * @param index the specification of the index to drop + * @throws MongoException if the index does not exist and the server version is less than 8.3 + * @mongodb.driver.manual core/indexes/ Indexes + */ + public void dropIndex(final DBObject index) { + try { + executor.execute(new DropIndexOperation(getNamespace(), wrap(index), + getWriteConcern()), getReadConcern()); + } catch (MongoWriteConcernException e) { + throw createWriteConcernException(e); + } + } + + /** + * Drops the index with the given name from this collection. + * + * @param indexName name of index to drop + * @throws MongoException if the index does not exist and the server version is less than 8.3 + * @mongodb.driver.manual core/indexes/ Indexes + */ + public void dropIndex(final String indexName) { + try { + executor.execute(new DropIndexOperation(getNamespace(), indexName, + getWriteConcern()), getReadConcern()); + } catch (MongoWriteConcernException e) { + throw createWriteConcernException(e); + } + } + + /** + * Drop all indexes on this collection. The default index on the _id field will not be deleted. + * @mongodb.driver.manual core/indexes/ Indexes + */ + public void dropIndexes() { + dropIndex("*"); + } + + /** + * Drops the index with the given name from this collection. This method is exactly the same as {@code dropIndex(indexName)}. + * + * @param indexName name of index to drop + * @throws MongoException if the index does not exist and the server version is less than 8.3 + * @mongodb.driver.manual core/indexes/ Indexes + */ + public void dropIndexes(final String indexName) { + dropIndex(indexName); + } + + /** + * Gets the default class for objects in the collection + * + * @return the class + */ + public Class getObjectClass() { + return objectFactory.getClassForPath(Collections.emptyList()); + } + + /** + * Sets a default class for objects in this collection; null resets the class to nothing. + * + * @param aClass the class + */ + public void setObjectClass(final Class aClass) { + setObjectFactory(objectFactory.update(aClass)); + } + + /** + * Sets the internal class for the given path in the document hierarchy + * + * @param path the path to map the given Class to + * @param aClass the Class to map the given path to + */ + public void setInternalClass(final String path, final Class aClass) { + setObjectFactory(objectFactory.update(aClass, asList(path.split("\\.")))); + } + + @Override + public String toString() { + return "DBCollection{database=" + database + ", name='" + name + '\'' + '}'; + } + + DBObjectFactory getObjectFactory() { + return withLock(factoryAndCodecLock, () -> objectFactory); + } + + void setObjectFactory(final DBCollectionObjectFactory factory) { + withLock(factoryAndCodecLock, () -> { + this.objectFactory = factory; + this.objectCodec = new CompoundDBObjectCodec(objectCodec.getEncoder(), getDefaultDBObjectCodec()); + }); + } + + /** + *

Creates a builder for an ordered bulk write operation, consisting of an ordered collection of write requests, which can be any + * combination of inserts, updates, replaces, or removes. Write requests included in the bulk operations will be executed in order, and + * will halt on the first failure.

+ * + *

Note: While this bulk write operation will execute on MongoDB 2.4 servers and below, the writes will be performed one at a time, + * as that is the only way to preserve the semantics of the value returned from execution or the exception thrown.

+ * + *

Note: While a bulk write operation with a mix of inserts, updates, replaces, and removes is supported, the implementation will + * batch up consecutive requests of the same type and send them to the server one at a time. For example, if a bulk write operation + * consists of 10 inserts followed by 5 updates, followed by 10 more inserts, it will result in three round trips to the server.

+ * + * @return the builder + * @mongodb.driver.manual reference/method/db.collection.initializeOrderedBulkOp/ initializeOrderedBulkOp() + * @since 2.12 + */ + public BulkWriteOperation initializeOrderedBulkOperation() { + return new BulkWriteOperation(true, this); + } + + /** + *

Creates a builder for an unordered bulk operation, consisting of an unordered collection of write requests, which can be any + * combination of inserts, updates, replaces, or removes. Write requests included in the bulk operation will be executed in an undefined + * order, and all requests will be executed even if some fail.

+ * + *

Note: While this bulk write operation will execute on MongoDB 2.4 servers and below, the writes will be performed one at a time, + * as that is the only way to preserve the semantics of the value returned from execution or the exception thrown.

+ * + * @return the builder + * @since 2.12 + * @mongodb.driver.manual reference/method/db.collection.initializeUnorderedBulkOp/ initializeUnorderedBulkOp() + */ + public BulkWriteOperation initializeUnorderedBulkOperation() { + return new BulkWriteOperation(false, this); + } + + BulkWriteResult executeBulkWriteOperation(final boolean ordered, final Boolean bypassDocumentValidation, + final List writeRequests) { + return executeBulkWriteOperation(ordered, bypassDocumentValidation, writeRequests, getWriteConcern()); + } + + BulkWriteResult executeBulkWriteOperation(final boolean ordered, final Boolean bypassDocumentValidation, + final List writeRequests, + final WriteConcern writeConcern) { + try { + return translateBulkWriteResult(executor.execute(new MixedBulkWriteOperation( + getNamespace(), translateWriteRequestsToNew(writeRequests), ordered, writeConcern, false) + .bypassDocumentValidation(bypassDocumentValidation), getReadConcern()), getObjectCodec()); + } catch (MongoBulkWriteException e) { + throw BulkWriteHelper.translateBulkWriteException(e, MongoClient.getDefaultCodecRegistry().get(DBObject.class)); + } + } + + private List translateWriteRequestsToNew(final List writeRequests) { + List retVal = new ArrayList<>(writeRequests.size()); + for (WriteRequest cur : writeRequests) { + retVal.add(cur.toNew(this)); + } + return retVal; + } + + Codec getDefaultDBObjectCodec() { + return new DBObjectCodec(getDB().getMongoClient().getCodecRegistry(), + DBObjectCodec.getDefaultBsonTypeClassMap(), + getObjectFactory()) + .withUuidRepresentation(getDB().getMongoClient().getMongoClientOptions().getUuidRepresentation()); + } + + private T convertOptionsToType(final DBObject options, final String field, final Class clazz) { + return convertToType(clazz, options.get(field), format("'%s' should be of class %s", field, clazz.getSimpleName())); + } + + @SuppressWarnings("unchecked") + private T convertToType(final Class clazz, final Object value, final String errorMessage) { + Object transformedValue = value; + if (clazz == Boolean.class) { + if (value instanceof Boolean) { + transformedValue = value; + } else if (value instanceof Number) { + transformedValue = ((Number) value).doubleValue() != 0; + } + } else if (clazz == Double.class) { + if (value instanceof Number) { + transformedValue = ((Number) value).doubleValue(); + } + } else if (clazz == Integer.class) { + if (value instanceof Number) { + transformedValue = ((Number) value).intValue(); + } + } else if (clazz == Long.class) { + if (value instanceof Number) { + transformedValue = ((Number) value).longValue(); + } + } + + if (!clazz.isAssignableFrom(transformedValue.getClass())) { + throw new IllegalArgumentException(errorMessage); + } + return (T) transformedValue; + } + + + private CreateIndexesOperation createIndexOperation(final DBObject key, final DBObject options) { + IndexRequest request = new IndexRequest(wrap(key)); + if (options.containsField("name")) { + request.name(convertOptionsToType(options, "name", String.class)); + } + if (options.containsField("background")) { + request.background(convertOptionsToType(options, "background", Boolean.class)); + } + if (options.containsField("unique")) { + request.unique(convertOptionsToType(options, "unique", Boolean.class)); + } + if (options.containsField("sparse")) { + request.sparse(convertOptionsToType(options, "sparse", Boolean.class)); + } + if (options.containsField("expireAfterSeconds")) { + request.expireAfter(convertOptionsToType(options, "expireAfterSeconds", Long.class), TimeUnit.SECONDS); + } + if (options.containsField("v")) { + request.version(convertOptionsToType(options, "v", Integer.class)); + } + if (options.containsField("weights")) { + request.weights(wrap(convertOptionsToType(options, "weights", DBObject.class))); + } + if (options.containsField("default_language")) { + request.defaultLanguage(convertOptionsToType(options, "default_language", String.class)); + } + if (options.containsField("language_override")) { + request.languageOverride(convertOptionsToType(options, "language_override", String.class)); + } + if (options.containsField("textIndexVersion")) { + request.textVersion(convertOptionsToType(options, "textIndexVersion", Integer.class)); + } + if (options.containsField("2dsphereIndexVersion")) { + request.sphereVersion(convertOptionsToType(options, "2dsphereIndexVersion", Integer.class)); + } + if (options.containsField("bits")) { + request.bits(convertOptionsToType(options, "bits", Integer.class)); + } + if (options.containsField("min")) { + request.min(convertOptionsToType(options, "min", Double.class)); + } + if (options.containsField("max")) { + request.max(convertOptionsToType(options, "max", Double.class)); + } + if (options.containsField("dropDups")) { + request.dropDups(convertOptionsToType(options, "dropDups", Boolean.class)); + } + if (options.containsField("storageEngine")) { + request.storageEngine(wrap(convertOptionsToType(options, "storageEngine", DBObject.class))); + } + if (options.containsField("partialFilterExpression")) { + request.partialFilterExpression(wrap(convertOptionsToType(options, "partialFilterExpression", DBObject.class))); + } + if (options.containsField("collation")) { + request.collation(DBObjectCollationHelper.createCollationFromOptions(options)); + } + return new CreateIndexesOperation(getNamespace(), singletonList(request), writeConcern); + } + + Codec getObjectCodec() { + return objectCodec; + } + + OperationExecutor getExecutor() { + return executor; + } + + MongoNamespace getNamespace() { + return new MongoNamespace(getDB().getName(), getName()); + } + + @Nullable + BsonDocument wrapAllowNull(@Nullable final DBObject document) { + if (document == null) { + return null; + } + return wrap(document); + } + + @Nullable + List wrapAllowNull(@Nullable final List documentList, @Nullable final DBEncoder encoder) { + return wrapAllowNull(documentList, encoder == null ? null : new DBEncoderAdapter(encoder)); + } + + @Nullable + List wrapAllowNull(@Nullable final List documentList, @Nullable final Encoder encoder) { + if (documentList == null) { + return null; + } + List wrappedDocumentList = new ArrayList<>(documentList.size()); + for (DBObject cur : documentList) { + wrappedDocumentList.add(encoder == null ? wrap(cur) : wrap(cur, encoder)); + } + return wrappedDocumentList; + } + + + BsonDocument wrap(final DBObject document) { + return new BsonDocumentWrapper<>(document, getDefaultDBObjectCodec()); + } + + BsonDocument wrap(final DBObject document, @Nullable final DBEncoder encoder) { + if (encoder == null) { + return wrap(document); + } else { + return new BsonDocumentWrapper<>(document, new DBEncoderAdapter(encoder)); + } + } + + BsonDocument wrap(final DBObject document, @Nullable final Encoder encoder) { + if (encoder == null) { + return wrap(document); + } else { + return new BsonDocumentWrapper<>(document, encoder); + } + } + + TimeoutSettings getTimeoutSettings(){ + return database.getTimeoutSettings(); + } + + static WriteConcernException createWriteConcernException(final MongoWriteConcernException e) { + return new WriteConcernException(new BsonDocument("code", new BsonInt32(e.getWriteConcernError().getCode())) + .append("errmsg", new BsonString(e.getWriteConcernError().getMessage())), + e.getServerAddress(), + e.getWriteResult()); + } + + private OperationExecutor getExecutor(final TimeoutSettings timeoutSettings) { + return executor.withTimeoutSettings(timeoutSettings); + } + +} diff --git a/driver-legacy/src/main/com/mongodb/DBCollectionObjectFactory.java b/driver-legacy/src/main/com/mongodb/DBCollectionObjectFactory.java new file mode 100644 index 00000000000..e777b4862e8 --- /dev/null +++ b/driver-legacy/src/main/com/mongodb/DBCollectionObjectFactory.java @@ -0,0 +1,83 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb; + +import com.mongodb.annotations.Immutable; + +import java.lang.reflect.InvocationTargetException; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +@Immutable +final class DBCollectionObjectFactory implements DBObjectFactory { + + private final Map, Class> pathToClassMap; + + DBCollectionObjectFactory() { + this(Collections.emptyMap()); + } + + private DBCollectionObjectFactory(final Map, Class> pathToClassMap) { + this.pathToClassMap = pathToClassMap; + } + + @Override + public DBObject getInstance() { + return getInstance(Collections.emptyList()); + } + + @Override + public DBObject getInstance(final List path) { + Class aClass = getClassForPath(path); + try { + return aClass.getDeclaredConstructor().newInstance(); + } catch (InstantiationException | IllegalAccessException | NoSuchMethodException e) { + throw createInternalException(aClass, e); + } catch (InvocationTargetException e) { + throw createInternalException(aClass, e.getTargetException()); + } + } + + public DBCollectionObjectFactory update(final Class aClass) { + return new DBCollectionObjectFactory(updatePathToClassMap(aClass, Collections.emptyList())); + } + + public DBCollectionObjectFactory update(final Class aClass, final List path) { + return new DBCollectionObjectFactory(updatePathToClassMap(aClass, path)); + } + + private Map, Class> updatePathToClassMap(final Class aClass, + final List path) { + Map, Class> map = new HashMap<>(pathToClassMap); + if (aClass != null) { + map.put(path, aClass); + } else { + map.remove(path); + } + return map; + } + + Class getClassForPath(final List path) { + return pathToClassMap.getOrDefault(path, BasicDBObject.class); + } + + private MongoInternalException createInternalException(final Class aClass, final Throwable e) { + throw new MongoInternalException("Can't instantiate class " + aClass, e); + } +} diff --git a/driver-legacy/src/main/com/mongodb/DBCursor.java b/driver-legacy/src/main/com/mongodb/DBCursor.java new file mode 100644 index 00000000000..9b91bad5984 --- /dev/null +++ b/driver-legacy/src/main/com/mongodb/DBCursor.java @@ -0,0 +1,881 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb; + +import com.mongodb.annotations.NotThreadSafe; +import com.mongodb.client.MongoCursor; +import com.mongodb.client.internal.MongoBatchCursorAdapter; +import com.mongodb.client.internal.OperationExecutor; +import com.mongodb.client.model.Collation; +import com.mongodb.client.model.DBCollectionCountOptions; +import com.mongodb.client.model.DBCollectionFindOptions; +import com.mongodb.internal.connection.PowerOfTwoBufferPool; +import com.mongodb.internal.operation.FindOperation; +import com.mongodb.lang.Nullable; +import org.bson.BsonString; +import org.bson.codecs.Decoder; + +import java.util.ArrayList; +import java.util.Iterator; +import java.util.List; +import java.util.NoSuchElementException; +import java.util.concurrent.TimeUnit; + +import static com.mongodb.MongoClient.getDefaultCodecRegistry; +import static com.mongodb.TimeoutSettingsHelper.createTimeoutSettings; +import static com.mongodb.assertions.Assertions.notNull; +import static java.util.concurrent.TimeUnit.MILLISECONDS; + +/** + *

An iterator over database results. Doing a {@code find()} query on a collection returns a {@code DBCursor}.

+ *

An application should ensure that a cursor is closed in all circumstances, e.g. using a try-with-resources statement:

+ *
+ *    try (DBCursor cursor = collection.find(query)) {
+ *        while (cursor.hasNext()) {
+ *            System.out.println(cursor.next();
+ *        }
+ *    }
+ * 
+ * + *

Warning: Calling {@code toArray} or {@code length} on a DBCursor will irrevocably turn it into an array. This means that, if + * the cursor was iterating over ten million results (which it was lazily fetching from the database), suddenly there will be a ten-million + * element array in memory. Before converting to an array, make sure that there are a reasonable number of results using {@code skip()} and + * {@code limit()}. + * + *

For example, to get an array of the 1000-1100th elements of a cursor, use

+ * + *
{@code
+ *    List obj = collection.find(query).skip(1000).limit(100).toArray();
+ * }
+ * + * See {@link MongoClient#getDB(String)} for further information about the effective deprecation of this class. + * + * @mongodb.driver.manual core/read-operations Read Operations + */ +@NotThreadSafe +public class DBCursor implements Cursor, Iterable { + private final DBCollection collection; + private final DBObject filter; + private final DBCollectionFindOptions findOptions; + private final OperationExecutor executor; + private final boolean retryReads; + private DBDecoderFactory decoderFactory; + private Decoder decoder; + private IteratorOrArray iteratorOrArray; + private DBObject currentObject; + private int numSeen; + private boolean closed; + private final List all = new ArrayList<>(); + private MongoCursor cursor; + private DBCursorCleaner optionalCleaner; + + /** + * Initializes a new database cursor. + * + * @param collection collection to use + * @param query the query filter to apply + * @param fields keys to return from the query + * @param readPreference the read preference for this query + */ + public DBCursor(final DBCollection collection, final DBObject query, @Nullable final DBObject fields, + @Nullable final ReadPreference readPreference) { + this(collection, query, fields, readPreference, true); + } + + /** + * Initializes a new database cursor. + * + * @param collection collection to use + * @param query the query filter to apply + * @param fields keys to return from the query + * @param readPreference the read preference for this query + * @param retryReads true if reads should be retried + */ + public DBCursor(final DBCollection collection, final DBObject query, @Nullable final DBObject fields, + @Nullable final ReadPreference readPreference, final boolean retryReads) { + this(collection, query, new DBCollectionFindOptions().projection(fields).readPreference(readPreference), retryReads); + } + + DBCursor(final DBCollection collection, @Nullable final DBObject filter, final DBCollectionFindOptions findOptions) { + this(collection, filter, findOptions, true); + } + + DBCursor(final DBCollection collection, @Nullable final DBObject filter, final DBCollectionFindOptions findOptions, + final boolean retryReads) { + this(collection, filter, findOptions, collection.getExecutor(), collection.getDBDecoderFactory(), + collection.getObjectCodec(), retryReads); + } + + private DBCursor(final DBCollection collection, @Nullable final DBObject filter, final DBCollectionFindOptions findOptions, + final OperationExecutor executor, final DBDecoderFactory decoderFactory, final Decoder decoder, + final boolean retryReads) { + this.collection = notNull("collection", collection); + this.filter = filter; + this.executor = notNull("executor", executor); + this.findOptions = notNull("findOptions", findOptions.copy()); + this.decoderFactory = decoderFactory; + this.decoder = notNull("decoder", decoder); + this.retryReads = retryReads; + } + + /** + * Creates a copy of an existing database cursor. The new cursor is an iterator, even if the original was an array. + * + * @return the new cursor + */ + public DBCursor copy() { + return new DBCursor(collection, filter, findOptions, executor, decoderFactory, decoder, retryReads); + } + + /** + * Checks if there is another object available. + * + *

Note: Automatically turns cursors of type Tailable to TailableAwait. For non-blocking tailable cursors see + * {@link #tryNext }.

+ * + * @return true if there is another object available + * @mongodb.driver.manual /core/cursors/#cursor-batches Cursor Batches + */ + @Override + public boolean hasNext() { + if (closed) { + throw new IllegalStateException("Cursor has been closed"); + } + + if (cursor == null) { + FindOperation operation = getQueryOperation(decoder); + if (operation.getCursorType() == CursorType.Tailable) { + operation.cursorType(CursorType.TailableAwait); + } + initializeCursor(operation); + } + + boolean hasNext = cursor.hasNext(); + if (cursor.getServerCursor() == null) { + clearCursorOnCleaner(); + } + return hasNext; + } + + /** + * Returns the object the cursor is at and moves the cursor ahead by one. + * + *

Note: Automatically turns cursors of type Tailable to TailableAwait. For non-blocking tailable cursors see + * {@link #tryNext }.

+ * + * @return the next element + * @mongodb.driver.manual /core/cursors/#cursor-batches Cursor Batches + */ + @Override + public DBObject next() { + checkIteratorOrArray(IteratorOrArray.ITERATOR); + if (!hasNext()) { + throw new NoSuchElementException(); + } + + return nextInternal(); + } + + /** + * Gets the number of results available locally without blocking, which may be 0. + * + *

+ * If the cursor is known to be exhausted, returns 0. If the cursor is closed before it's been exhausted, it may return a non-zero + * value. + *

+ * + * @return the number of results available locally without blocking + * @since 4.5 + */ + public int available() { + return cursor != null ? cursor.available() : 0; + } + + /** + * Non blocking check for tailable cursors to see if another object is available. + * + *

Returns the object the cursor is at and moves the cursor ahead by one or + * return null if no documents is available.

+ * + * @return the next element or null + * @throws MongoException if failed + * @throws IllegalArgumentException if the cursor is not tailable + * @mongodb.driver.manual /core/cursors/#cursor-batches Cursor Batches + */ + @Nullable + public DBObject tryNext() { + if (cursor == null) { + FindOperation operation = getQueryOperation(decoder); + if (!operation.getCursorType().isTailable()) { + throw new IllegalArgumentException("Can only be used with a tailable cursor"); + } + initializeCursor(operation); + } + DBObject next = cursor.tryNext(); + if (cursor.getServerCursor() == null) { + clearCursorOnCleaner(); + } + return currentObject(next); + } + + + /** + * Returns the element the cursor is at. + * + * @return the current element + */ + public DBObject curr() { + return currentObject; + } + + @Override + public void remove() { + throw new UnsupportedOperationException(); + } + + /** + * Gets the query limit. + * + * @return the limit, or 0 if no limit is set + */ + public int getLimit() { + return findOptions.getLimit(); + } + + /** + * Gets the batch size. + * + * @return the batch size + */ + public int getBatchSize() { + return findOptions.getBatchSize(); + } + + /** + * Adds a comment to the query to identify queries in the database profiler output. + * + * @param comment the comment that is to appear in the profiler output + * @return {@code this} so calls can be chained + * @mongodb.driver.manual reference/operator/meta/comment/ $comment + * @since 2.12 + */ + public DBCursor comment(final String comment) { + findOptions.comment(comment); + return this; + } + + /** + * Specifies an exclusive upper limit for the index to use in a query. + * + * @param max a document specifying the fields, and the upper bound values for those fields + * @return {@code this} so calls can be chained + * @mongodb.driver.manual reference/operator/meta/max/ $max + * @since 2.12 + */ + public DBCursor max(final DBObject max) { + findOptions.max(max); + return this; + } + + /** + * Specifies an inclusive lower limit for the index to use in a query. + * + * @param min a document specifying the fields, and the lower bound values for those fields + * @return {@code this} so calls can be chained + * @mongodb.driver.manual reference/operator/meta/min/ $min + * @since 2.12 + */ + public DBCursor min(final DBObject min) { + findOptions.min(min); + return this; + } + + /** + * Forces the cursor to only return fields included in the index. + * + * @return {@code this} so calls can be chained + * @mongodb.driver.manual reference/operator/meta/returnKey/ $returnKey + * @since 2.12 + */ + public DBCursor returnKey() { + findOptions.returnKey(true); + return this; + } + + /** + * Informs the database of indexed fields of the collection in order to improve performance. + * + * @param indexKeys a {@code DBObject} with fields and direction + * @return same DBCursor for chaining operations + * @mongodb.driver.manual reference/operator/meta/hint/ $hint + */ + public DBCursor hint(final DBObject indexKeys) { + findOptions.hint(indexKeys); + return this; + } + + /** + * Informs the database of an indexed field of the collection in order to improve performance. + * + * @param indexName the name of an index + * @return same DBCursor for chaining operations + * @since 4.4 + * @mongodb.driver.manual reference/operator/meta/hint/ $hint + */ + public DBCursor hint(final String indexName) { + findOptions.hintString(indexName); + return this; + } + + /** + * Set the maximum execution time for operations on this cursor. + * + * @param maxTime the maximum time that the server will allow the query to run, before killing the operation. + * @param timeUnit the time unit + * @return same DBCursor for chaining operations + * @mongodb.driver.manual reference/operator/meta/maxTimeMS/ $maxTimeMS + * @since 2.12.0 + */ + public DBCursor maxTime(final long maxTime, final TimeUnit timeUnit) { + findOptions.maxTime(maxTime, timeUnit); + return this; + } + + /** + * Returns an object containing basic information about the execution of the query that created this cursor. This creates a {@code + * DBObject} with a number of fields, including but not limited to: + *
    + *
  • cursor: cursor type
  • + *
  • nScanned: number of records examined by the database for this query
  • + *
  • n: the number of records that the database returned
  • + *
  • millis: how long it took the database to execute the query
  • + *
+ * + * @return a {@code DBObject} containing the explain output for this DBCursor's query + * @throws MongoException if the operation failed + * @mongodb.driver.manual reference/command/explain Explain Output + * @mongodb.server.release 3.0 + */ + public DBObject explain() { + return executor.execute( + getQueryOperation(collection.getObjectCodec()) + .asExplainableOperation(null, getDefaultCodecRegistry().get(DBObject.class)), getReadPreference(), getReadConcern(), null); + } + + /** + * Sets the cursor type. + * + * @param cursorType the cursor type, which may not be null + * @return this + * @since 3.9 + */ + public DBCursor cursorType(final CursorType cursorType) { + findOptions.cursorType(cursorType); + return this; + } + + /** + * The server normally times out idle cursors after an inactivity period (10 minutes) + * to prevent excess memory use. Set this option to prevent that. + * + * @param noCursorTimeout true if cursor timeout is disabled + * @return this + * @since 3.9 + */ + public DBCursor noCursorTimeout(final boolean noCursorTimeout) { + findOptions.noCursorTimeout(noCursorTimeout); + return this; + } + + /** + * Get partial results from a sharded cluster if one or more shards are unreachable (instead of throwing an error). + * + * @param partial if partial results for sharded clusters is enabled + * @return this + * @since 3.9 + */ + public DBCursor partial(final boolean partial) { + findOptions.partial(partial); + return this; + } + + private FindOperation getQueryOperation(final Decoder decoder) { + return new FindOperation<>( + collection.getNamespace(), decoder) + .filter(collection.wrapAllowNull(filter)) + .batchSize(findOptions.getBatchSize()) + .skip(findOptions.getSkip()) + .limit(findOptions.getLimit()) + .projection(collection.wrapAllowNull(findOptions.getProjection())) + .sort(collection.wrapAllowNull(findOptions.getSort())) + .collation(findOptions.getCollation()) + .comment(findOptions.getComment() != null + ? new BsonString(findOptions.getComment()) : null) + .hint(findOptions.getHint() != null + ? collection.wrapAllowNull(findOptions.getHint()) + : (findOptions.getHintString() != null + ? new BsonString(findOptions.getHintString()) : null)) + .min(collection.wrapAllowNull(findOptions.getMin())) + .max(collection.wrapAllowNull(findOptions.getMax())) + .cursorType(findOptions.getCursorType()) + .noCursorTimeout(findOptions.isNoCursorTimeout()) + .partial(findOptions.isPartial()) + .returnKey(findOptions.isReturnKey()) + .showRecordId(findOptions.isShowRecordId()) + .retryReads(retryReads); + } + + /** + * Sorts this cursor's elements. This method must be called before getting any object from the cursor. + * + * @param orderBy the fields by which to sort + * @return a cursor pointing to the first element of the sorted results + */ + public DBCursor sort(final DBObject orderBy) { + findOptions.sort(orderBy); + return this; + } + + /** + * Limits the number of elements returned. Note: parameter {@code limit} should be positive, although a negative value is + * supported for legacy reason. Passing a negative value will call {@link DBCursor#batchSize(int)} which is the preferred method. + * + * @param limit the number of elements to return + * @return a cursor to iterate the results + * @mongodb.driver.manual reference/method/cursor.limit Limit + */ + public DBCursor limit(final int limit) { + findOptions.limit(limit); + return this; + } + + /** + *

Limits the number of elements returned in one batch. A cursor typically fetches a batch of result objects and store them + * locally.

+ * + *

If {@code batchSize} is positive, it represents the size of each batch of objects retrieved. It can be adjusted to optimize + * performance and limit data transfer.

+ * + *

If {@code batchSize} is negative, it will limit of number objects returned, that fit within the max batch size limit (usually + * 4MB), and cursor will be closed. For example if {@code batchSize} is -10, then the server will return a maximum of 10 documents and + * as many as can fit in 4MB, then close the cursor. Note that this feature is different from limit() in that documents must fit within + * a maximum size, and it removes the need to send a request to close the cursor server-side.

+ * + * @param numberOfElements the number of elements to return in a batch + * @return {@code this} so calls can be chained + */ + public DBCursor batchSize(final int numberOfElements) { + findOptions.batchSize(numberOfElements); + return this; + } + + /** + * Discards a given number of elements at the beginning of the cursor. + * + * @param numberOfElements the number of elements to skip + * @return a cursor pointing to the new first element of the results + * @throws IllegalStateException if the cursor has started to be iterated through + */ + public DBCursor skip(final int numberOfElements) { + findOptions.skip(numberOfElements); + return this; + } + + @Override + public long getCursorId() { + if (cursor != null) { + ServerCursor serverCursor = cursor.getServerCursor(); + if (serverCursor == null) { + return 0; + } + return serverCursor.getId(); + } else { + return 0; + } + } + + /** + * Returns the number of objects through which the cursor has iterated. + * + * @return the number of objects seen + */ + public int numSeen() { + return numSeen; + } + + @Override + public void close() { + closed = true; + if (cursor != null) { + cursor.close(); + cursor = null; + clearCursorOnCleaner(); + } + + currentObject = null; + } + + /** + *

Creates a copy of this cursor object that can be iterated. Note: - you can iterate the DBCursor itself without calling this method + * - no actual data is getting copied.

+ * + *

Note that use of this method does not let you call close the underlying cursor in the case of either an exception or an early + * break. The preferred method of iteration is to use DBCursor as an Iterator, so that you can call close() on it in a finally + * block.

+ * + * @return an iterator + */ + @Override + public Iterator iterator() { + return this.copy(); + } + + /** + * Converts this cursor to an array. + * + * @return an array of elements + * @throws MongoException if failed + */ + public List toArray() { + return toArray(Integer.MAX_VALUE); + } + + /** + * Converts this cursor to an array. + * + * @param max the maximum number of objects to return + * @return an array of objects + * @throws MongoException if failed + */ + public List toArray(final int max) { + checkIteratorOrArray(IteratorOrArray.ARRAY); + fillArray(max - 1); + return all; + } + + /** + * Counts the number of objects matching the query. This does not take limit/skip into consideration, and does initiate a call to the + * server. + * + * @return the number of objects + * @throws MongoException if the operation failed + * @see DBCursor#size + */ + public int count() { + DBCollectionCountOptions countOptions = getDbCollectionCountOptions(); + return (int) collection.getCount(getQuery(), countOptions); + } + + /** + * Returns the first document that matches the query. + * + * @return the first matching document + * @since 2.12 + */ + @Nullable + public DBObject one() { + try (DBCursor findOneCursor = copy().limit(-1)) { + return findOneCursor.hasNext() ? findOneCursor.next() : null; + } + } + + /** + * Pulls back all items into an array and returns the number of objects. Note: this can be resource intensive. + * + * @return the number of elements in the array + * @throws MongoException if failed + * @see #count() + * @see #size() + */ + public int length() { + checkIteratorOrArray(IteratorOrArray.ARRAY); + fillArray(Integer.MAX_VALUE); + return all.size(); + } + + /** + * For testing only! Iterates cursor and counts objects + * + * @return num objects + * @throws MongoException if failed + * @see #count() + */ + public int itcount() { + int n = 0; + while (this.hasNext()) { + this.next(); + n++; + } + return n; + } + + /** + * Counts the number of objects matching the query this does take limit/skip into consideration + * + * @return the number of objects + * @throws MongoException if the operation failed + * @see #count() + */ + public int size() { + DBCollectionCountOptions countOptions = getDbCollectionCountOptions().skip(findOptions.getSkip()).limit(findOptions.getLimit()); + return (int) collection.getCount(getQuery(), countOptions); + } + + /** + * Gets the fields to be returned. + * + * @return the field selector that cursor used + */ + @Nullable + public DBObject getKeysWanted() { + return findOptions.getProjection(); + } + + /** + * Gets the query. + * + * @return the query that cursor used + */ + public DBObject getQuery() { + return filter; + } + + /** + * Gets the collection. + * + * @return the collection that data is pulled from + */ + public DBCollection getCollection() { + return collection; + } + + @Override + @Nullable + public ServerAddress getServerAddress() { + if (cursor != null) { + return cursor.getServerAddress(); + } else { + return null; + } + } + + /** + * Sets the read preference for this cursor. See the documentation for {@link ReadPreference} for more information. + * + * @param readPreference read preference to use + * @return {@code this} so calls can be chained + */ + public DBCursor setReadPreference(final ReadPreference readPreference) { + findOptions.readPreference(readPreference); + return this; + } + + /** + * Gets the default read preference. + * + * @return the readPreference used by this cursor + */ + public ReadPreference getReadPreference() { + ReadPreference readPreference = findOptions.getReadPreference(); + if (readPreference != null) { + return readPreference; + } + return collection.getReadPreference(); + } + + + /** + * Sets the read concern for this collection. + * + * @param readConcern the read concern to use for this collection + * @since 3.2 + * @mongodb.server.release 3.2 + * @mongodb.driver.manual reference/readConcern/ Read Concern + */ + DBCursor setReadConcern(@Nullable final ReadConcern readConcern) { + findOptions.readConcern(readConcern); + return this; + } + + /** + * Get the read concern for this collection. + * + * @return the {@link com.mongodb.ReadConcern} + * @since 3.2 + * @mongodb.server.release 3.2 + * @mongodb.driver.manual reference/readConcern/ Read Concern + */ + ReadConcern getReadConcern() { + ReadConcern readConcern = findOptions.getReadConcern(); + if (readConcern != null) { + return readConcern; + } + return collection.getReadConcern(); + } + + /** + * Returns the collation options + * + * @return the collation options + * @since 3.4 + * @mongodb.server.release 3.4 + */ + @Nullable + public Collation getCollation() { + return findOptions.getCollation(); + } + + /** + * Sets the collation options + * + *

A null value represents the server default.

+ * @param collation the collation options to use + * @return this + * @since 3.4 + * @mongodb.server.release 3.4 + */ + public DBCursor setCollation(@Nullable final Collation collation) { + findOptions.collation(collation); + return this; + } + + /** + * Sets the factory that will be used create a {@code DBDecoder} that will be used to decode BSON documents into DBObject instances. + * + * @param factory the DBDecoderFactory + * @return {@code this} so calls can be chained + */ + public DBCursor setDecoderFactory(final DBDecoderFactory factory) { + this.decoderFactory = factory; + + //Not creating new CompoundDBObjectCodec because we don't care about encoder. + this.decoder = new DBDecoderAdapter(factory.create(), collection, PowerOfTwoBufferPool.DEFAULT); + return this; + } + + /** + * Gets the decoder factory that creates the decoder this cursor will use to decode objects from MongoDB. + * + * @return the decoder factory. + */ + public DBDecoderFactory getDecoderFactory() { + return decoderFactory; + } + + @Override + public String toString() { + return "DBCursor{" + + "collection=" + collection + + ", find=" + findOptions + + (cursor != null ? (", cursor=" + cursor.getServerCursor()) : "") + + '}'; + } + + private void initializeCursor(final FindOperation operation) { + cursor = + new MongoBatchCursorAdapter<>(executor + .withTimeoutSettings(createTimeoutSettings(collection.getTimeoutSettings(), findOptions)) + .execute(operation, getReadPreference(), getReadConcern(), null)); + ServerCursor serverCursor = cursor.getServerCursor(); + if (isCursorFinalizerEnabled() && serverCursor != null) { + optionalCleaner = DBCursorCleaner.create(collection.getDB().getMongoClient(), collection.getNamespace(), + serverCursor); + } + } + + private void clearCursorOnCleaner() { + if (optionalCleaner != null) { + optionalCleaner.clearCursor(); + } + } + + private boolean isCursorFinalizerEnabled() { + return collection.getDB().getMongoClient().getMongoClientOptions().isCursorFinalizerEnabled(); + } + + private void checkIteratorOrArray(final IteratorOrArray expected) { + if (iteratorOrArray == null) { + iteratorOrArray = expected; + return; + } + + if (expected == iteratorOrArray) { + return; + } + + throw new IllegalArgumentException("Can't switch cursor access methods"); + } + + private void fillArray(final int n) { + checkIteratorOrArray(IteratorOrArray.ARRAY); + while (n >= all.size() && hasNext()) { + all.add(nextInternal()); + } + } + + private DBObject nextInternal() { + if (iteratorOrArray == null) { + checkIteratorOrArray(IteratorOrArray.ITERATOR); + } + + DBObject next = cursor.next(); + if (cursor.getServerCursor() == null) { + clearCursorOnCleaner(); + } + return currentObjectNonNull(next); + } + + @Nullable + private DBObject currentObject(@Nullable final DBObject newCurrentObject){ + if (newCurrentObject != null) { + currentObject = newCurrentObject; + numSeen++; + + DBObject projection = findOptions.getProjection(); + if (projection != null && !(projection.keySet().isEmpty())) { + currentObject.markAsPartialObject(); + } + } + return newCurrentObject; + } + + private DBObject currentObjectNonNull(final DBObject newCurrentObject){ + currentObject = newCurrentObject; + numSeen++; + + DBObject projection = findOptions.getProjection(); + if (projection != null && !(projection.keySet().isEmpty())) { + currentObject.markAsPartialObject(); + } + return newCurrentObject; + } + + private enum IteratorOrArray { + ITERATOR, + ARRAY + } + + private DBCollectionCountOptions getDbCollectionCountOptions() { + return new DBCollectionCountOptions() + .readPreference(getReadPreference()) + .readConcern(getReadConcern()) + .collation(getCollation()) + .maxTime(findOptions.getMaxTime(MILLISECONDS), MILLISECONDS) + .hint(findOptions.getHint()) + .hintString(findOptions.getHintString()); + } +} diff --git a/driver-legacy/src/main/com/mongodb/DBCursorCleaner.java b/driver-legacy/src/main/com/mongodb/DBCursorCleaner.java new file mode 100644 index 00000000000..9a7b6322def --- /dev/null +++ b/driver-legacy/src/main/com/mongodb/DBCursorCleaner.java @@ -0,0 +1,66 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb; + +/** + * A cleaner for abandoned {@link DBCursor} instances. + * + * @see DBCursor + */ +abstract class DBCursorCleaner { + // Should be true on Java 9+ + private static final boolean CLEANER_IS_AVAILABLE; + + static { + boolean cleanerIsAvailable = false; + try { + Class.forName("java.lang.ref.Cleaner"); + cleanerIsAvailable = true; + } catch (ClassNotFoundException ignored) { + } + CLEANER_IS_AVAILABLE = cleanerIsAvailable; + } + + /** + * Create a new instance. + * + *

+ * The implementation of this method ensures that a {@code java.lang.ref.Cleaner}-based implementation is used when + * the runtime is Java 9+. Otherwise a {@link Object#finalize}-based implementation is used. + *

+ * + * @param mongoClient the client from which the {@link DBCursor} came from + * @param namespace the namespace of the cursor + * @param serverCursor the server cursor + * @return the cleaner + */ + @SuppressWarnings("deprecation") + static DBCursorCleaner create(final MongoClient mongoClient, final MongoNamespace namespace, + final ServerCursor serverCursor) { + if (CLEANER_IS_AVAILABLE) { + return new Java9DBCursorCleaner(mongoClient, namespace, serverCursor); + } else { + return new Java8DBCursorCleaner(mongoClient, namespace, serverCursor); + } + } + + /** + * {@link DBCursor} should call this method when the cursor has been exhausted and/or explicitly closed by the + * application. + */ + abstract void clearCursor(); +} diff --git a/driver-legacy/src/main/com/mongodb/DBDecoder.java b/driver-legacy/src/main/com/mongodb/DBDecoder.java new file mode 100644 index 00000000000..69ecb8fe535 --- /dev/null +++ b/driver-legacy/src/main/com/mongodb/DBDecoder.java @@ -0,0 +1,55 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb; + +import org.bson.BSONDecoder; + +import java.io.IOException; +import java.io.InputStream; + +/** + * An interface for decoders of BSON into instances of DBObject that belong to a DBCollection. + */ +public interface DBDecoder extends BSONDecoder { + + /** + * Get a callback for the given collection. + * + * @param collection the collection + * @return the callback + */ + DBCallback getDBCallback(DBCollection collection); + + /** + * Decode a single DBObject belonging to the given collection from the given input stream. + * + * @param input the input stream + * @param collection the collection + * @return the DBObject + * @throws IOException may throw an exception while decoding from the {@code InputStream} + */ + DBObject decode(InputStream input, DBCollection collection) throws IOException; + + /** + * Decode a single DBObject belonging to the given collection from the given array of bytes. + * + * @param bytes the byte array + * @param collection the collection + * @return the DBObject + */ + DBObject decode(byte[] bytes, DBCollection collection); +} diff --git a/driver-legacy/src/main/com/mongodb/DBDecoderAdapter.java b/driver-legacy/src/main/com/mongodb/DBDecoderAdapter.java new file mode 100644 index 00000000000..dd761234df9 --- /dev/null +++ b/driver-legacy/src/main/com/mongodb/DBDecoderAdapter.java @@ -0,0 +1,69 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb; + +import com.mongodb.internal.connection.BufferProvider; +import com.mongodb.internal.connection.ByteBufferBsonOutput; +import org.bson.BsonBinaryWriter; +import org.bson.BsonReader; +import org.bson.codecs.Decoder; +import org.bson.codecs.DecoderContext; + +import java.io.ByteArrayOutputStream; +import java.io.IOException; + +class DBDecoderAdapter implements Decoder { + private final DBDecoder decoder; + private final DBCollection collection; + private final BufferProvider bufferProvider; + + DBDecoderAdapter(final DBDecoder decoder, final DBCollection collection, final BufferProvider bufferProvider) { + this.decoder = decoder; + this.collection = collection; + this.bufferProvider = bufferProvider; + } + + @Override + public DBObject decode(final BsonReader reader, final DecoderContext decoderContext) { + ByteBufferBsonOutput bsonOutput = new ByteBufferBsonOutput(bufferProvider); + BsonBinaryWriter binaryWriter = new BsonBinaryWriter(bsonOutput); + try { + binaryWriter.pipe(reader); + BufferExposingByteArrayOutputStream byteArrayOutputStream = + new BufferExposingByteArrayOutputStream(binaryWriter.getBsonOutput().getSize()); + bsonOutput.pipe(byteArrayOutputStream); + return decoder.decode(byteArrayOutputStream.getInternalBytes(), collection); + } catch (IOException e) { + // impossible with a byte array output stream + throw new MongoInternalException("An unlikely IOException thrown.", e); + } finally { + binaryWriter.close(); + bsonOutput.close(); + } + } + + // Just so we don't have to copy the buffer + private static class BufferExposingByteArrayOutputStream extends ByteArrayOutputStream { + BufferExposingByteArrayOutputStream(final int size) { + super(size); + } + + byte[] getInternalBytes() { + return buf; + } + } +} diff --git a/driver-legacy/src/main/com/mongodb/DBDecoderFactory.java b/driver-legacy/src/main/com/mongodb/DBDecoderFactory.java new file mode 100644 index 00000000000..ece26b4dc40 --- /dev/null +++ b/driver-legacy/src/main/com/mongodb/DBDecoderFactory.java @@ -0,0 +1,29 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb; + +/** + * Creates concrete DBDecoder instances. + */ +public interface DBDecoderFactory { + /** + * Creates an instance. + * + * @return the concrete implementation of {@code DBDecoder}. + */ + DBDecoder create(); +} diff --git a/driver-legacy/src/main/com/mongodb/DBEncoder.java b/driver-legacy/src/main/com/mongodb/DBEncoder.java new file mode 100644 index 00000000000..79ceba31c52 --- /dev/null +++ b/driver-legacy/src/main/com/mongodb/DBEncoder.java @@ -0,0 +1,34 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb; + +import org.bson.BSONObject; +import org.bson.io.OutputBuffer; + +/** + * An interface for encoders of BSONObject to BSON. + */ +public interface DBEncoder { + /** + * Encode the BSONObject. + * + * @param outputBuffer the OutputBuffer to write to + * @param document the BSONObject to write + * @return the number of characters in the encoding + */ + int writeObject(OutputBuffer outputBuffer, BSONObject document); +} diff --git a/driver-legacy/src/main/com/mongodb/DBEncoderAdapter.java b/driver-legacy/src/main/com/mongodb/DBEncoderAdapter.java new file mode 100644 index 00000000000..9ae51678ec4 --- /dev/null +++ b/driver-legacy/src/main/com/mongodb/DBEncoderAdapter.java @@ -0,0 +1,54 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb; + +import org.bson.BsonBinaryReader; +import org.bson.BsonWriter; +import org.bson.ByteBufNIO; +import org.bson.codecs.Encoder; +import org.bson.codecs.EncoderContext; +import org.bson.io.BasicOutputBuffer; +import org.bson.io.ByteBufferBsonInput; + +import static com.mongodb.assertions.Assertions.notNull; +import static java.nio.ByteBuffer.wrap; + +class DBEncoderAdapter implements Encoder { + + private final DBEncoder encoder; + + DBEncoderAdapter(final DBEncoder encoder) { + this.encoder = notNull("encoder", encoder); + } + + // TODO: this can be optimized to reduce copying of buffers. For that we'd need an InputBuffer that could iterate + // over an array of ByteBuffer instances from a PooledByteBufferOutputBuffer + @Override + public void encode(final BsonWriter writer, final DBObject document, final EncoderContext encoderContext) { + try (BasicOutputBuffer buffer = new BasicOutputBuffer()) { + encoder.writeObject(buffer, document); + try (BsonBinaryReader reader = new BsonBinaryReader(new ByteBufferBsonInput(new ByteBufNIO(wrap(buffer.toByteArray()))))) { + writer.pipe(reader); + } + } + } + + @Override + public Class getEncoderClass() { + return DBObject.class; + } +} diff --git a/driver-legacy/src/main/com/mongodb/DBEncoderFactory.java b/driver-legacy/src/main/com/mongodb/DBEncoderFactory.java new file mode 100644 index 00000000000..a793d3eb7c7 --- /dev/null +++ b/driver-legacy/src/main/com/mongodb/DBEncoderFactory.java @@ -0,0 +1,29 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb; + +/** + * Creates concrete DBEncoder instances. + */ +public interface DBEncoderFactory { + /** + * Creates an instance. + * + * @return the concrete implementation of {@code DBEncoder}. + */ + DBEncoder create(); +} diff --git a/driver-legacy/src/main/com/mongodb/DBEncoderFactoryAdapter.java b/driver-legacy/src/main/com/mongodb/DBEncoderFactoryAdapter.java new file mode 100644 index 00000000000..8cf828621a3 --- /dev/null +++ b/driver-legacy/src/main/com/mongodb/DBEncoderFactoryAdapter.java @@ -0,0 +1,40 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb; + +import org.bson.BsonWriter; +import org.bson.codecs.Encoder; +import org.bson.codecs.EncoderContext; + +class DBEncoderFactoryAdapter implements Encoder { + + private final DBEncoderFactory encoderFactory; + + DBEncoderFactoryAdapter(final DBEncoderFactory encoderFactory) { + this.encoderFactory = encoderFactory; + } + + @Override + public void encode(final BsonWriter writer, final DBObject value, final EncoderContext encoderContext) { + new DBEncoderAdapter(encoderFactory.create()).encode(writer, value, encoderContext); + } + + @Override + public Class getEncoderClass() { + return DBObject.class; + } +} diff --git a/driver-legacy/src/main/com/mongodb/DBObjectCollationHelper.java b/driver-legacy/src/main/com/mongodb/DBObjectCollationHelper.java new file mode 100644 index 00000000000..d41454a516b --- /dev/null +++ b/driver-legacy/src/main/com/mongodb/DBObjectCollationHelper.java @@ -0,0 +1,118 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb; + +import com.mongodb.client.model.Collation; +import com.mongodb.client.model.CollationAlternate; +import com.mongodb.client.model.CollationCaseFirst; +import com.mongodb.client.model.CollationMaxVariable; +import com.mongodb.client.model.CollationStrength; +import com.mongodb.lang.Nullable; + +final class DBObjectCollationHelper { + + @Nullable + static Collation createCollationFromOptions(final DBObject options) { + if (options.get("collation") == null) { + return null; + } else if (!(options.get("collation") instanceof DBObject)) { + throw new IllegalArgumentException("collation options should be a document"); + } else { + Collation.Builder builder = Collation.builder(); + DBObject collation = (DBObject) options.get("collation"); + + if (collation.get("locale") == null) { + throw new IllegalArgumentException("'locale' is required when providing collation options"); + } else { + Object locale = collation.get("locale"); + if (!(locale instanceof String)) { + throw new IllegalArgumentException("collation 'locale' should be a String"); + } else { + builder.locale((String) locale); + } + } + if (collation.get("caseLevel") != null){ + Object caseLevel = collation.get("caseLevel"); + if (!(caseLevel instanceof Boolean)) { + throw new IllegalArgumentException("collation 'caseLevel' should be a Boolean"); + } else { + builder.caseLevel((Boolean) caseLevel); + } + } + if (collation.get("caseFirst") != null) { + Object caseFirst = collation.get("caseFirst"); + if (!(caseFirst instanceof String)) { + throw new IllegalArgumentException("collation 'caseFirst' should be a String"); + } else { + builder.collationCaseFirst(CollationCaseFirst.fromString((String) caseFirst)); + } + } + if (collation.get("strength") != null) { + Object strength = collation.get("strength"); + if (!(strength instanceof Integer)) { + throw new IllegalArgumentException("collation 'strength' should be an Integer"); + } else { + builder.collationStrength(CollationStrength.fromInt((Integer) strength)); + } + } + if (collation.get("numericOrdering") != null) { + Object numericOrdering = collation.get("numericOrdering"); + if (!(numericOrdering instanceof Boolean)) { + throw new IllegalArgumentException("collation 'numericOrdering' should be a Boolean"); + } else { + builder.numericOrdering((Boolean) numericOrdering); + } + } + if (collation.get("alternate") != null) { + Object alternate = collation.get("alternate"); + if (!(alternate instanceof String)) { + throw new IllegalArgumentException("collation 'alternate' should be a String"); + } else { + builder.collationAlternate(CollationAlternate.fromString((String) alternate)); + } + } + if (collation.get("maxVariable") != null) { + Object maxVariable = collation.get("maxVariable"); + if (!(maxVariable instanceof String)) { + throw new IllegalArgumentException("collation 'maxVariable' should be a String"); + } else { + builder.collationMaxVariable(CollationMaxVariable.fromString((String) maxVariable)); + } + } + if (collation.get("normalization") != null) { + Object normalization = collation.get("normalization"); + if (!(normalization instanceof Boolean)) { + throw new IllegalArgumentException("collation 'normalization' should be a Boolean"); + } else { + builder.normalization((Boolean) normalization); + } + } + if (collation.get("backwards") != null) { + Object backwards = collation.get("backwards"); + if (!(backwards instanceof Boolean)) { + throw new IllegalArgumentException("collation 'backwards' should be a Boolean"); + } else { + builder.backwards((Boolean) backwards); + } + } + return builder.build(); + } + } + + private DBObjectCollationHelper() { + } +} diff --git a/driver-legacy/src/main/com/mongodb/DBObjects.java b/driver-legacy/src/main/com/mongodb/DBObjects.java new file mode 100644 index 00000000000..94f2308324f --- /dev/null +++ b/driver-legacy/src/main/com/mongodb/DBObjects.java @@ -0,0 +1,31 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb; + +import org.bson.BsonDocument; +import org.bson.BsonDocumentReader; +import org.bson.codecs.DecoderContext; + +final class DBObjects { + public static DBObject toDBObject(final BsonDocument document) { + return MongoClient.getDefaultCodecRegistry().get(DBObject.class).decode(new BsonDocumentReader(document), + DecoderContext.builder().build()); + } + + private DBObjects() { + } +} diff --git a/driver-legacy/src/main/com/mongodb/DefaultDBCallback.java b/driver-legacy/src/main/com/mongodb/DefaultDBCallback.java new file mode 100644 index 00000000000..ddea4f9268e --- /dev/null +++ b/driver-legacy/src/main/com/mongodb/DefaultDBCallback.java @@ -0,0 +1,81 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb; + +import org.bson.BSONObject; +import org.bson.BasicBSONCallback; +import org.bson.types.BasicBSONList; +import org.bson.types.ObjectId; + +import java.util.Collections; +import java.util.Iterator; +import java.util.List; + +/** + * An implementation of DBCallback that decodes into a DBObject. + */ +public class DefaultDBCallback extends BasicBSONCallback implements DBCallback { + + private final DBObjectFactory objectFactory; + + /** + * Creates a new DefaultDBCallback. If the Collection is null, it uses {@link DBCollectionObjectFactory} to create documents, otherwise + * it uses the collection's object factory. + * + * @param collection an optionally null Collection that the documents created by this callback belong to. + */ + public DefaultDBCallback(final DBCollection collection) { + if (collection != null) { + this.objectFactory = collection.getObjectFactory(); + } else { + this.objectFactory = new DBCollectionObjectFactory(); + } + } + + @Override + public BSONObject create() { + return objectFactory.getInstance(); + } + + @Override + public BSONObject create(final boolean array, final List path) { + return array ? new BasicDBList() : objectFactory.getInstance(path != null ? path : Collections.emptyList()); + } + + @Override + public void gotDBRef(final String name, final String namespace, final ObjectId id) { + _put(name, new DBRef(namespace, id)); + } + + @Override + public Object objectDone() { + String name = curName(); + BSONObject document = (BSONObject) super.objectDone(); + if (!(document instanceof BasicBSONList)) { + Iterator iterator = document.keySet().iterator(); + if (iterator.hasNext() && iterator.next().equals("$ref") && iterator.hasNext() && iterator.next().equals("$id")) { + _put(name, new DBRef((String) document.get("$db"), (String) document.get("$ref"), document.get("$id"))); + } + } + return document; + } + + /** + * The {@code DBCallbackFactory} for {@code DefaultDBCallback} instances. + */ + public static final DBCallbackFactory FACTORY = collection -> new DefaultDBCallback(collection); +} diff --git a/driver-legacy/src/main/com/mongodb/DefaultDBDecoder.java b/driver-legacy/src/main/com/mongodb/DefaultDBDecoder.java new file mode 100644 index 00000000000..c8b9144fafa --- /dev/null +++ b/driver-legacy/src/main/com/mongodb/DefaultDBDecoder.java @@ -0,0 +1,57 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb; + +import org.bson.BasicBSONDecoder; + +import java.io.IOException; +import java.io.InputStream; + +/** + * An implementation of DBDecoder + */ +public class DefaultDBDecoder extends BasicBSONDecoder implements DBDecoder { + + @Override + public DBCallback getDBCallback(final DBCollection collection) { + return new DefaultDBCallback(collection); + } + + @Override + public DBObject decode(final InputStream input, final DBCollection collection) throws IOException { + DBCallback callback = getDBCallback(collection); + decode(input, callback); + return (DBObject) callback.get(); + } + + @Override + public DBObject decode(final byte[] bytes, final DBCollection collection) { + DBCallback callback = getDBCallback(collection); + decode(bytes, callback); + return (DBObject) callback.get(); + } + + @Override + public String toString() { + return String.format("DBDecoder{class=%s}", getClass().getName()); + } + + /** + * The {@code DBDecoderFactory} for {@code DefaultDBDecoder} instances. + */ + public static final DBDecoderFactory FACTORY = () -> new DefaultDBDecoder(); +} diff --git a/driver-legacy/src/main/com/mongodb/DefaultDBEncoder.java b/driver-legacy/src/main/com/mongodb/DefaultDBEncoder.java new file mode 100644 index 00000000000..8697243c93e --- /dev/null +++ b/driver-legacy/src/main/com/mongodb/DefaultDBEncoder.java @@ -0,0 +1,70 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb; + +import org.bson.BSONObject; +import org.bson.BasicBSONEncoder; +import org.bson.io.OutputBuffer; + +/** + * The default BSON encoder for BSONObject instances. + */ +public class DefaultDBEncoder extends BasicBSONEncoder implements DBEncoder { + + @Override + public int writeObject(final OutputBuffer outputBuffer, final BSONObject document) { + set(outputBuffer); + int x = putObject(document); + done(); + return x; + } + + @Override + protected boolean putSpecial(final String name, final Object value) { + if (value instanceof DBRef) { + putDBRef(name, (DBRef) value); + return true; + } else { + return false; + } + } + + /** + * Deals with encoding database references. + * + * @param name the name of the field in the document + * @param ref the database reference object + */ + protected void putDBRef(final String name, final DBRef ref) { + BasicDBObject dbRefDocument = new BasicDBObject("$ref", ref.getCollectionName()).append("$id", ref.getId()); + if (ref.getDatabaseName() != null) { + dbRefDocument.put("$db", ref.getDatabaseName()); + } + + putObject(name, dbRefDocument); + } + + @Override + public String toString() { + return String.format("DBEncoder{class=%s}", getClass().getName()); + } + + /** + * The {@code DBEncoderFactory} for {@code DefaultDBEncoder} instances. + */ + public static final DBEncoderFactory FACTORY = () -> new DefaultDBEncoder(); +} diff --git a/driver-legacy/src/main/com/mongodb/InsertOptions.java b/driver-legacy/src/main/com/mongodb/InsertOptions.java new file mode 100644 index 00000000000..2235d90ea7e --- /dev/null +++ b/driver-legacy/src/main/com/mongodb/InsertOptions.java @@ -0,0 +1,121 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb; + +import com.mongodb.lang.Nullable; + +/** + * Options related to insertion of documents into MongoDB. The setter methods return {@code this} so that a chaining style can be used. + * + * @since 2.13 + * @mongodb.driver.manual tutorial/insert-documents/ Insert Tutorial + */ +public final class InsertOptions { + private WriteConcern writeConcern; + private boolean continueOnError; + private DBEncoder dbEncoder; + private Boolean bypassDocumentValidation; + + /** + * Set the write concern to use for the insert. + * + * @param writeConcern the write concern + * @return this + */ + public InsertOptions writeConcern(@Nullable final WriteConcern writeConcern) { + this.writeConcern = writeConcern; + return this; + } + + /** + * Set whether documents will continue to be inserted after a failure to insert one. + * + * @param continueOnError whether to continue on error + * @return this + */ + public InsertOptions continueOnError(final boolean continueOnError) { + this.continueOnError = continueOnError; + return this; + } + + /** + * Set the encoder to use for the documents. + * + * @param dbEncoder the encoder + * @return this + */ + public InsertOptions dbEncoder(@Nullable final DBEncoder dbEncoder) { + this.dbEncoder = dbEncoder; + return this; + } + + /** + * The write concern to use for the insertion. By default the write concern configured for the DBCollection instance will be used. + * + * @return the write concern, or null if the default will be used. + */ + @Nullable + public WriteConcern getWriteConcern() { + return writeConcern; + } + + /** + * Whether documents will continue to be inserted after a failure to insert one (most commonly due to a duplicate key error). Note that + * this only is relevant for multi-document inserts. The default value is false. + * + * @return whether insertion will continue on error. + */ + public boolean isContinueOnError() { + return continueOnError; + } + + /** + * The encoder to use for the documents. By default the codec configured for the DBCollection instance will be used. + * + * @return the encoder, or null if the default will be used + */ + @Nullable + public DBEncoder getDbEncoder() { + return dbEncoder; + } + + + /** + * Gets whether to bypass document validation, or null if unspecified. The default is null. + * + * @return whether to bypass document validation, or null if unspecified. + * @since 2.14 + * @mongodb.server.release 3.2 + */ + @Nullable + public Boolean getBypassDocumentValidation() { + return bypassDocumentValidation; + } + + /** + * Sets whether to bypass document validation. + * + * @param bypassDocumentValidation whether to bypass document validation, or null if unspecified + * @return this + * @since 2.14 + * @mongodb.server.release 3.2 + */ + public InsertOptions bypassDocumentValidation(@Nullable final Boolean bypassDocumentValidation) { + this.bypassDocumentValidation = bypassDocumentValidation; + return this; + } +} diff --git a/driver-legacy/src/main/com/mongodb/InsertRequest.java b/driver-legacy/src/main/com/mongodb/InsertRequest.java new file mode 100644 index 00000000000..b7a7efe426d --- /dev/null +++ b/driver-legacy/src/main/com/mongodb/InsertRequest.java @@ -0,0 +1,39 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb; + +import org.bson.BsonDocumentWrapper; +import org.bson.codecs.Encoder; + +class InsertRequest extends WriteRequest { + private final DBObject document; + private final Encoder codec; + + InsertRequest(final DBObject document, final Encoder codec) { + this.document = document; + this.codec = codec; + } + + public DBObject getDocument() { + return document; + } + + @Override + com.mongodb.internal.bulk.WriteRequest toNew(final DBCollection dbCollection) { + return new com.mongodb.internal.bulk.InsertRequest(new BsonDocumentWrapper<>(document, codec)); + } +} diff --git a/driver-legacy/src/main/com/mongodb/Java8DBCursorCleaner.java b/driver-legacy/src/main/com/mongodb/Java8DBCursorCleaner.java new file mode 100644 index 00000000000..195620eb45b --- /dev/null +++ b/driver-legacy/src/main/com/mongodb/Java8DBCursorCleaner.java @@ -0,0 +1,51 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb; + +import static com.mongodb.assertions.Assertions.assertNotNull; + +/** + * A {@link Object#finalize()}-based implementation of {@link DBCursorCleaner}. + * {@link DBCursorCleaner#create(MongoClient, MongoNamespace, ServerCursor)} is responsible for ensuring + * that this class is only used if the {@code java.lang.ref.Cleaner} class is not available + * (i.e. the runtime is Java 8). + */ +@SuppressWarnings("deprecation") +final class Java8DBCursorCleaner extends DBCursorCleaner { + private final MongoClient mongoClient; + private final MongoNamespace namespace; + private volatile ServerCursor serverCursor; + + Java8DBCursorCleaner(final MongoClient mongoClient, final MongoNamespace namespace, + final ServerCursor serverCursor) { + this.mongoClient = assertNotNull(mongoClient); + this.namespace = assertNotNull(namespace); + this.serverCursor = assertNotNull(serverCursor); + } + + @Override + void clearCursor() { + serverCursor = null; + } + + @Override + protected void finalize() { + if (serverCursor != null) { + mongoClient.addOrphanedCursor(serverCursor, namespace); + } + } +} diff --git a/driver-legacy/src/main/com/mongodb/Java9DBCursorCleaner.java b/driver-legacy/src/main/com/mongodb/Java9DBCursorCleaner.java new file mode 100644 index 00000000000..a526cdfb352 --- /dev/null +++ b/driver-legacy/src/main/com/mongodb/Java9DBCursorCleaner.java @@ -0,0 +1,94 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb; + +import java.lang.reflect.InvocationTargetException; +import java.lang.reflect.Method; + +import static com.mongodb.assertions.Assertions.assertNotNull; + +/** + * A {@code java.lang.ref.Cleaner}-based implementation of {@link DBCursorCleaner}. The implementation + * is reflection-based so that it will compile with Java 8 even though {@code java.lang.ref.Cleaner} was introduced in + * Java 9. {@link DBCursorCleaner#create(MongoClient, MongoNamespace, ServerCursor)} is responsible for ensuring that + * this class is only used if the {@code java.lang.ref.Cleaner} class is available (i.e. the runtime is Java 9+). + */ +final class Java9DBCursorCleaner extends DBCursorCleaner { + // Actual type is java.lang.ref.Cleaner + private static final Object CLEANER; + // Actual method is Cleaner#register(Object, Runnable) + private static final Method REGISTER_METHOD; + // Actual method is Cleanable#clean + private static final Method CLEAN_METHOD; + + static { + try { + Class cleanerClass = Class.forName("java.lang.ref.Cleaner"); + CLEANER = cleanerClass.getMethod("create").invoke(null); + REGISTER_METHOD = cleanerClass.getMethod("register", Object.class, Runnable.class); + CLEAN_METHOD = Class.forName("java.lang.ref.Cleaner$Cleanable").getMethod("clean"); + } catch (IllegalAccessException | InvocationTargetException | NoSuchMethodException | ClassNotFoundException e) { + throw new MongoInternalException("Unexpected exception", e); + } + } + + private final CleanerState cleanerState; + // Actual type is java.lang.ref.Cleaner$Cleanable + private final Object cleanable; + + Java9DBCursorCleaner(final MongoClient mongoClient, final MongoNamespace namespace, + final ServerCursor serverCursor) { + cleanerState = new CleanerState(mongoClient, namespace, serverCursor); + try { + cleanable = REGISTER_METHOD.invoke(CLEANER, this, cleanerState); + } catch (IllegalAccessException | InvocationTargetException e) { + throw new MongoInternalException("Unexpected exception", e); + } + } + + @Override + void clearCursor() { + cleanerState.clear(); + try { + CLEAN_METHOD.invoke(cleanable); + } catch (IllegalAccessException | InvocationTargetException e) { + throw new MongoInternalException("Unexpected exception", e); + } + } + + private static class CleanerState implements Runnable { + private final MongoClient mongoClient; + private final MongoNamespace namespace; + private volatile ServerCursor serverCursor; + + CleanerState(final MongoClient mongoClient, final MongoNamespace namespace, final ServerCursor serverCursor) { + this.mongoClient = assertNotNull(mongoClient); + this.namespace = assertNotNull(namespace); + this.serverCursor = assertNotNull(serverCursor); + } + + public void run() { + if (serverCursor != null) { + mongoClient.addOrphanedCursor(serverCursor, namespace); + } + } + + public void clear() { + serverCursor = null; + } + } +} diff --git a/driver-legacy/src/main/com/mongodb/LazyDBCallback.java b/driver-legacy/src/main/com/mongodb/LazyDBCallback.java new file mode 100644 index 00000000000..612da12c285 --- /dev/null +++ b/driver-legacy/src/main/com/mongodb/LazyDBCallback.java @@ -0,0 +1,59 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb; + +import com.mongodb.lang.Nullable; +import org.bson.LazyBSONCallback; +import org.bson.types.ObjectId; + +import java.util.Iterator; +import java.util.List; + +/** + * A {@code BSONCallback} for the creation of {@code LazyDBObject} and {@code LazyDBList} instances. + */ +public class LazyDBCallback extends LazyBSONCallback implements DBCallback { + + /** + * Construct an instance. + * + * @param collection the {@code DBCollection} containing the document. This parameter is no longer used. + */ + public LazyDBCallback(@Nullable final DBCollection collection) { + } + + @Override + public Object createObject(final byte[] bytes, final int offset) { + LazyDBObject document = new LazyDBObject(bytes, offset, this); + Iterator iterator = document.keySet().iterator(); + if (iterator.hasNext() && iterator.next().equals("$ref") && iterator.hasNext() && iterator.next().equals("$id")) { + return new DBRef((String) document.get("$db"), (String) document.get("$ref"), document.get("$id")); + } + return document; + } + + @Override + @SuppressWarnings("rawtypes") + public List createArray(final byte[] bytes, final int offset) { + return new LazyDBList(bytes, offset, this); + } + + @Override + public Object createDBRef(final String ns, final ObjectId id) { + return new DBRef(ns, id); + } +} diff --git a/driver-legacy/src/main/com/mongodb/LazyDBDecoder.java b/driver-legacy/src/main/com/mongodb/LazyDBDecoder.java new file mode 100644 index 00000000000..a68eaccda99 --- /dev/null +++ b/driver-legacy/src/main/com/mongodb/LazyDBDecoder.java @@ -0,0 +1,62 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb; + +import com.mongodb.lang.Nullable; +import org.bson.LazyBSONDecoder; + +import java.io.IOException; +import java.io.InputStream; + +/** + * A decoder for {@code LazyDBObject} instances. + */ +public class LazyDBDecoder extends LazyBSONDecoder implements DBDecoder { + + @Override + public DBCallback getDBCallback(@Nullable final DBCollection collection) { + // callback doesn't do anything special, could be unique per decoder + // but have to create per collection due to DBRef, at least + return new LazyDBCallback(collection); + } + + @Override + public DBObject readObject(final InputStream in) throws IOException { + DBCallback dbCallback = getDBCallback(null); + decode(in, dbCallback); + return (DBObject) dbCallback.get(); + } + + @Override + public DBObject decode(final InputStream input, final DBCollection collection) throws IOException { + DBCallback callback = getDBCallback(collection); + decode(input, callback); + return (DBObject) callback.get(); + } + + @Override + public DBObject decode(final byte[] bytes, final DBCollection collection) { + DBCallback callback = getDBCallback(collection); + decode(bytes, callback); + return (DBObject) callback.get(); + } + + /** + * The {@code DBDecoderFactory} for {@code LazyDBDecoder} instances. + */ + public static final DBDecoderFactory FACTORY = () -> new LazyDBDecoder(); +} diff --git a/driver-legacy/src/main/com/mongodb/LazyDBEncoder.java b/driver-legacy/src/main/com/mongodb/LazyDBEncoder.java new file mode 100644 index 00000000000..3917edb8af7 --- /dev/null +++ b/driver-legacy/src/main/com/mongodb/LazyDBEncoder.java @@ -0,0 +1,43 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb; + +import org.bson.BSONObject; +import org.bson.io.OutputBuffer; + +import java.io.IOException; + +/** + * Encoder that only knows how to encode BSONObject instances of type LazyDBObject. + */ +public class LazyDBEncoder implements DBEncoder { + + @Override + public int writeObject(final OutputBuffer outputBuffer, final BSONObject document) { + if (!(document instanceof LazyDBObject)) { + throw new IllegalArgumentException("LazyDBEncoder can only encode BSONObject instances of type LazyDBObject"); + } + + LazyDBObject lazyDBObject = (LazyDBObject) document; + + try { + return lazyDBObject.pipe(outputBuffer); + } catch (IOException e) { + throw new MongoException("Exception serializing a LazyDBObject", e); + } + } +} diff --git a/driver-legacy/src/main/com/mongodb/LazyDBList.java b/driver-legacy/src/main/com/mongodb/LazyDBList.java new file mode 100644 index 00000000000..627e9405568 --- /dev/null +++ b/driver-legacy/src/main/com/mongodb/LazyDBList.java @@ -0,0 +1,59 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb; + +import org.bson.LazyBSONCallback; +import org.bson.LazyBSONList; + +/** + * A {@code LazyDBObject} representing a BSON array. + */ +public class LazyDBList extends LazyBSONList implements DBObject { + + private boolean isPartial; + + /** + * Construct an instance with the given raw bytes and offset. + * + * @param bytes the raw BSON bytes + * @param callback the callback to use to create nested values + */ + public LazyDBList(final byte[] bytes, final LazyBSONCallback callback) { + super(bytes, callback); + } + + /** + * Construct an instance with the given raw bytes and offset. + * + * @param bytes the raw BSON bytes + * @param offset the offset into the raw bytes + * @param callback the callback to use to create nested values + */ + public LazyDBList(final byte[] bytes, final int offset, final LazyBSONCallback callback) { + super(bytes, offset, callback); + } + + @Override + public void markAsPartialObject() { + isPartial = true; + } + + @Override + public boolean isPartialObject() { + return isPartial; + } +} diff --git a/driver-legacy/src/main/com/mongodb/LazyDBObject.java b/driver-legacy/src/main/com/mongodb/LazyDBObject.java new file mode 100644 index 00000000000..451da42ed16 --- /dev/null +++ b/driver-legacy/src/main/com/mongodb/LazyDBObject.java @@ -0,0 +1,79 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb; + +import com.mongodb.annotations.Immutable; +import org.bson.LazyBSONCallback; +import org.bson.LazyBSONObject; +import org.bson.codecs.EncoderContext; +import org.bson.json.JsonWriter; +import org.bson.json.JsonWriterSettings; + +import java.io.StringWriter; + +/** + * An immutable {@code DBObject} backed by a byte buffer that lazily provides keys and values on request. This is useful for transferring + * BSON documents between servers when you don't want to pay the performance penalty of encoding or decoding them fully. + */ +@Immutable +public class LazyDBObject extends LazyBSONObject implements DBObject { + + private boolean isPartial = false; + + /** + * Construct an instance. + * + * @param bytes the raw bytes + * @param callback the callback to use to construct nested values + */ + public LazyDBObject(final byte[] bytes, final LazyBSONCallback callback) { + super(bytes, callback); + } + + /** + * Construct an instance. + * + * @param bytes the raw bytes + * @param offset the offset into the raw bytes + * @param callback the callback to use to construct nested values + */ + public LazyDBObject(final byte[] bytes, final int offset, final LazyBSONCallback callback) { + super(bytes, offset, callback); + } + + @Override + public void markAsPartialObject() { + isPartial = true; + } + + @Override + public boolean isPartialObject() { + return isPartial; + } + + /** + * Returns a JSON serialization of this object + * + * @return JSON serialization + */ + public String toString() { + JsonWriter writer = new JsonWriter(new StringWriter(), JsonWriterSettings.builder().build()); + DBObjectCodec.getDefaultRegistry().get(LazyDBObject.class).encode(writer, this, + EncoderContext.builder().isEncodingCollectibleDocument(true).build()); + return writer.getWriter().toString(); + } +} diff --git a/driver-legacy/src/main/com/mongodb/LegacyMixedBulkWriteOperation.java b/driver-legacy/src/main/com/mongodb/LegacyMixedBulkWriteOperation.java new file mode 100644 index 00000000000..95990833f00 --- /dev/null +++ b/driver-legacy/src/main/com/mongodb/LegacyMixedBulkWriteOperation.java @@ -0,0 +1,201 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb; + +import com.mongodb.bulk.BulkWriteError; +import com.mongodb.bulk.BulkWriteResult; +import com.mongodb.bulk.WriteConcernError; +import com.mongodb.internal.async.SingleResultCallback; +import com.mongodb.internal.binding.AsyncWriteBinding; +import com.mongodb.internal.binding.WriteBinding; +import com.mongodb.internal.bulk.DeleteRequest; +import com.mongodb.internal.bulk.InsertRequest; +import com.mongodb.internal.bulk.UpdateRequest; +import com.mongodb.internal.bulk.WriteRequest; +import com.mongodb.internal.operation.MixedBulkWriteOperation; +import com.mongodb.internal.operation.WriteOperation; +import com.mongodb.lang.Nullable; +import org.bson.BsonBoolean; +import org.bson.BsonDocument; +import org.bson.BsonInt32; +import org.bson.BsonString; + +import java.util.List; + +import static com.mongodb.assertions.Assertions.assertTrue; +import static com.mongodb.assertions.Assertions.isTrueArgument; +import static com.mongodb.assertions.Assertions.notNull; +import static com.mongodb.internal.bulk.WriteRequest.Type.DELETE; +import static com.mongodb.internal.bulk.WriteRequest.Type.INSERT; +import static com.mongodb.internal.bulk.WriteRequest.Type.REPLACE; +import static com.mongodb.internal.bulk.WriteRequest.Type.UPDATE; + + +/** + * Operation for bulk writes for the legacy API. + */ +final class LegacyMixedBulkWriteOperation implements WriteOperation { + private final MixedBulkWriteOperation wrappedOperation; + private final WriteRequest.Type type; + private Boolean bypassDocumentValidation; + + static LegacyMixedBulkWriteOperation createBulkWriteOperationForInsert(final MongoNamespace namespace, final boolean ordered, + final WriteConcern writeConcern, final boolean retryWrites, final List insertRequests) { + return new LegacyMixedBulkWriteOperation(namespace, ordered, writeConcern, retryWrites, insertRequests, INSERT); + } + + static LegacyMixedBulkWriteOperation createBulkWriteOperationForUpdate(final MongoNamespace namespace, final boolean ordered, + final WriteConcern writeConcern, final boolean retryWrites, final List updateRequests) { + assertTrue(updateRequests.stream().allMatch(updateRequest -> updateRequest.getType() == UPDATE)); + return new LegacyMixedBulkWriteOperation(namespace, ordered, writeConcern, retryWrites, updateRequests, UPDATE); + } + + static LegacyMixedBulkWriteOperation createBulkWriteOperationForReplace(final MongoNamespace namespace, final boolean ordered, + final WriteConcern writeConcern, final boolean retryWrites, final List replaceRequests) { + assertTrue(replaceRequests.stream().allMatch(updateRequest -> updateRequest.getType() == REPLACE)); + return new LegacyMixedBulkWriteOperation(namespace, ordered, writeConcern, retryWrites, replaceRequests, REPLACE); + } + + static LegacyMixedBulkWriteOperation createBulkWriteOperationForDelete(final MongoNamespace namespace, final boolean ordered, + final WriteConcern writeConcern, final boolean retryWrites, final List deleteRequests) { + return new LegacyMixedBulkWriteOperation(namespace, ordered, writeConcern, retryWrites, deleteRequests, DELETE); + } + + private LegacyMixedBulkWriteOperation(final MongoNamespace namespace, final boolean ordered, final WriteConcern writeConcern, + final boolean retryWrites, final List writeRequests, final WriteRequest.Type type) { + notNull("writeRequests", writeRequests); + isTrueArgument("writeRequests is not an empty list", !writeRequests.isEmpty()); + this.type = type; + this.wrappedOperation = new MixedBulkWriteOperation(namespace, writeRequests, ordered, writeConcern, retryWrites); + } + + List getWriteRequests() { + return wrappedOperation.getWriteRequests(); + } + + LegacyMixedBulkWriteOperation bypassDocumentValidation(@Nullable final Boolean bypassDocumentValidation) { + this.bypassDocumentValidation = bypassDocumentValidation; + return this; + } + + @Override + public String getCommandName() { + return wrappedOperation.getCommandName(); + } + + @Override + public WriteConcernResult execute(final WriteBinding binding) { + try { + BulkWriteResult result = wrappedOperation.bypassDocumentValidation(bypassDocumentValidation).execute(binding); + if (result.wasAcknowledged()) { + return translateBulkWriteResult(result); + } else { + return WriteConcernResult.unacknowledged(); + } + } catch (MongoBulkWriteException e) { + throw convertBulkWriteException(e); + } + } + + @Override + public void executeAsync(final AsyncWriteBinding binding, final SingleResultCallback callback) { + throw new UnsupportedOperationException("This operation is sync only"); + } + + private MongoException convertBulkWriteException(final MongoBulkWriteException e) { + BulkWriteError lastError = getLastError(e); + if (lastError != null) { + if (ErrorCategory.fromErrorCode(lastError.getCode()) == ErrorCategory.DUPLICATE_KEY) { + return new DuplicateKeyException(manufactureGetLastErrorResponse(e), e.getServerAddress(), + translateBulkWriteResult(e.getWriteResult())); + } else { + return new WriteConcernException(manufactureGetLastErrorResponse(e), e.getServerAddress(), + translateBulkWriteResult(e.getWriteResult())); + } + } else { + return new WriteConcernException(manufactureGetLastErrorResponse(e), e.getServerAddress(), + translateBulkWriteResult(e.getWriteResult())); + } + } + + private BsonDocument manufactureGetLastErrorResponse(final MongoBulkWriteException e) { + BsonDocument response = new BsonDocument(); + addBulkWriteResultToResponse(e.getWriteResult(), response); + + WriteConcernError writeConcernError = e.getWriteConcernError(); + if (writeConcernError != null) { + response.putAll(writeConcernError.getDetails()); + } + + BulkWriteError lastError = getLastError(e); + if (lastError != null) { + response.put("err", new BsonString(lastError.getMessage())); + response.put("code", new BsonInt32(lastError.getCode())); + response.putAll(lastError.getDetails()); + + } else if (writeConcernError != null) { + response.put("err", new BsonString(writeConcernError.getMessage())); + response.put("code", new BsonInt32(writeConcernError.getCode())); + } + return response; + } + + private void addBulkWriteResultToResponse(final BulkWriteResult bulkWriteResult, final BsonDocument response) { + response.put("ok", new BsonInt32(1)); + if (type == INSERT) { + response.put("n", new BsonInt32(0)); + } else if (type == DELETE) { + response.put("n", new BsonInt32(bulkWriteResult.getDeletedCount())); + } else if (type == UPDATE || type == REPLACE) { + response.put("n", new BsonInt32(bulkWriteResult.getMatchedCount() + bulkWriteResult.getUpserts().size())); + if (bulkWriteResult.getUpserts().isEmpty()) { + response.put("updatedExisting", BsonBoolean.TRUE); + } else { + response.put("updatedExisting", BsonBoolean.FALSE); + response.put("upserted", bulkWriteResult.getUpserts().get(0).getId()); + } + } + } + + private WriteConcernResult translateBulkWriteResult(final BulkWriteResult bulkWriteResult) { + return WriteConcernResult.acknowledged(getCount(bulkWriteResult), getUpdatedExisting(bulkWriteResult), + bulkWriteResult.getUpserts().isEmpty() + ? null : bulkWriteResult.getUpserts().get(0).getId()); + } + + private int getCount(final BulkWriteResult bulkWriteResult) { + int count = 0; + if (type == UPDATE || type == REPLACE) { + count = bulkWriteResult.getMatchedCount() + bulkWriteResult.getUpserts().size(); + } else if (type == DELETE) { + count = bulkWriteResult.getDeletedCount(); + } + return count; + } + + private boolean getUpdatedExisting(final BulkWriteResult bulkWriteResult) { + if (type == UPDATE || type == REPLACE) { + return bulkWriteResult.getMatchedCount() > 0; + } + return false; + } + + @Nullable + private BulkWriteError getLastError(final MongoBulkWriteException e) { + return e.getWriteErrors().isEmpty() ? null : e.getWriteErrors().get(e.getWriteErrors().size() - 1); + } +} diff --git a/driver-legacy/src/main/com/mongodb/MapReduceCommand.java b/driver-legacy/src/main/com/mongodb/MapReduceCommand.java new file mode 100644 index 00000000000..d812d6a12af --- /dev/null +++ b/driver-legacy/src/main/com/mongodb/MapReduceCommand.java @@ -0,0 +1,462 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb; + +import com.mongodb.client.model.Collation; +import com.mongodb.lang.Nullable; + +import java.util.Map; +import java.util.concurrent.TimeUnit; + +import static java.util.concurrent.TimeUnit.MILLISECONDS; + +/** + * This class groups the argument for a map/reduce operation and can build the underlying command object + * + * @mongodb.driver.manual applications/map-reduce Map-Reduce + * @deprecated Superseded by aggregate + */ +@Deprecated +public class MapReduceCommand { + + private final String mapReduce; + private final String map; + private final String reduce; + private String finalize; + private ReadPreference readPreference; + private final OutputType outputType; + private final String outputCollection; + private String outputDB; + private final DBObject query; + private DBObject sort; + private int limit; + private long maxTimeMS; + private Map scope; + private Boolean jsMode; + private Boolean verbose; + private Boolean bypassDocumentValidation; + private Collation collation; + + /** + * Represents the command for a map reduce operation Runs the command in REPLACE output type to a named collection + * + * @param inputCollection collection to use as the source documents to perform the map reduce operation. + * @param map a JavaScript function that associates or "maps" a value with a key and emits the key and value pair. + * @param reduce a JavaScript function that "reduces" to a single object all the values associated with a particular key. + * @param outputCollection optional - leave null if want to get the result inline + * @param type the type of output + * @param query specifies the selection criteria using query operators for determining the documents input to the map + * function. + * @mongodb.driver.manual reference/command/mapReduce/ Map Reduce Command + */ + public MapReduceCommand(final DBCollection inputCollection, final String map, final String reduce, + @Nullable final String outputCollection, final OutputType type, final DBObject query) { + this.mapReduce = inputCollection.getName(); + this.map = map; + this.reduce = reduce; + this.outputCollection = outputCollection; + this.outputType = type; + this.query = query; + this.outputDB = null; + this.verbose = true; + } + + /** + * Sets the verbosity of the MapReduce job, defaults to 'true' + * + * @param verbose The verbosity level. + */ + public void setVerbose(final Boolean verbose) { + this.verbose = verbose; + } + + /** + * Gets the verbosity of the MapReduce job. + * + * @return the verbosity level. + */ + public Boolean isVerbose() { + return verbose; + } + + /** + * Get the name of the collection the MapReduce will read from + * + * @return name of the collection the MapReduce will read from + */ + public String getInput() { + return mapReduce; + } + + + /** + * Get the map function, as a JS String + * + * @return the map function (as a JS String) + */ + public String getMap() { + return map; + } + + /** + * Gets the reduce function, as a JS String + * + * @return the reduce function (as a JS String) + */ + public String getReduce() { + return reduce; + } + + /** + * Gets the output target (name of collection to save to) This value is nullable only if OutputType is set to INLINE + * + * @return The outputCollection + */ + @Nullable + public String getOutputTarget() { + return outputCollection; + } + + + /** + * Gets the OutputType for this instance. + * + * @return The outputType. + */ + public OutputType getOutputType() { + return outputType; + } + + + /** + * Gets the Finalize JS Function + * + * @return The finalize function (as a JS String). + */ + @Nullable + public String getFinalize() { + return finalize; + } + + /** + * Sets the Finalize JS Function + * + * @param finalize The finalize function (as a JS String) + */ + public void setFinalize(@Nullable final String finalize) { + this.finalize = finalize; + } + + /** + * Gets the query to run for this MapReduce job + * + * @return The query object + */ + @Nullable + public DBObject getQuery() { + return query; + } + + /** + * Gets the (optional) sort specification object + * + * @return the Sort DBObject + */ + @Nullable + public DBObject getSort() { + return sort; + } + + /** + * Sets the (optional) sort specification object + * + * @param sort The sort specification object + */ + public void setSort(@Nullable final DBObject sort) { + this.sort = sort; + } + + /** + * Gets the (optional) limit on input + * + * @return The limit specification object + */ + public int getLimit() { + return limit; + } + + /** + * Sets the (optional) limit on input + * + * @param limit The limit specification object + */ + public void setLimit(final int limit) { + this.limit = limit; + } + + /** + * Gets the max execution time for this command, in the given time unit. + * + * @param timeUnit the time unit to return the value in. + * @return the maximum execution time + * @since 2.12.0 + */ + public long getMaxTime(final TimeUnit timeUnit) { + return timeUnit.convert(maxTimeMS, MILLISECONDS); + } + + /** + * Sets the max execution time for this command, in the given time unit. + * + * @param maxTime the maximum execution time. + * @param timeUnit the time unit that maxTime is specified in + * @since 2.12.0 + */ + public void setMaxTime(final long maxTime, final TimeUnit timeUnit) { + this.maxTimeMS = MILLISECONDS.convert(maxTime, timeUnit); + } + + /** + * Gets the (optional) JavaScript scope + * + * @return The JavaScript scope + */ + @Nullable + public Map getScope() { + return scope; + } + + /** + * Sets the (optional) JavaScript scope + * + * @param scope The JavaScript scope + */ + public void setScope(@Nullable final Map scope) { + this.scope = scope; + } + + /** + * Gets the (optional) JavaScript mode + * + * @return The JavaScript mode + * @since 2.13 + */ + @Nullable + public Boolean getJsMode() { + return jsMode; + } + + /** + * Sets the (optional) JavaScript Mode + * + * @param jsMode Specifies whether to convert intermediate data into BSON format between the execution of the map and reduce functions + * @since 2.13 + */ + public void setJsMode(@Nullable final Boolean jsMode) { + this.jsMode = jsMode; + } + + /** + * Gets the (optional) database name where the output collection should reside + * + * @return the name of the database the result is stored in, or null. + */ + @Nullable + public String getOutputDB() { + return this.outputDB; + } + + /** + * Sets the (optional) database name where the output collection should reside + * + * @param outputDB the name of the database to send the Map Reduce output to + */ + public void setOutputDB(@Nullable final String outputDB) { + this.outputDB = outputDB; + } + + /** + * Gets whether to bypass document validation, or null if unspecified. The default is null. + * + * @return whether to bypass document validation, or null if unspecified. + * @since 2.14 + * @mongodb.server.release 3.2 + */ + @Nullable + public Boolean getBypassDocumentValidation() { + return bypassDocumentValidation; + } + + /** + * Sets whether to bypass document validation. + * + * @param bypassDocumentValidation whether to bypass document validation, or null if unspecified + * @since 2.14 + * @mongodb.server.release 3.2 + */ + public void setBypassDocumentValidation(@Nullable final Boolean bypassDocumentValidation) { + this.bypassDocumentValidation = bypassDocumentValidation; + } + + /** + * Turns this command into a DBObject representation of this map reduce command. + * + * @return a DBObject that contains the MongoDB document representation of this command. + */ + public DBObject toDBObject() { + BasicDBObject cmd = new BasicDBObject(); + + cmd.put("mapreduce", mapReduce); + cmd.put("map", map); + cmd.put("reduce", reduce); + + if (verbose != null) { + cmd.put("verbose", verbose); + } + + BasicDBObject out = new BasicDBObject(); + switch (outputType) { + case INLINE: + out.put("inline", 1); + break; + case REPLACE: + out.put("replace", outputCollection); + break; + case MERGE: + out.put("merge", outputCollection); + break; + case REDUCE: + out.put("reduce", outputCollection); + break; + default: + throw new IllegalArgumentException("Unexpected output type"); + } + if (outputDB != null) { + out.put("db", outputDB); + } + cmd.put("out", out); + + if (query != null) { + cmd.put("query", query); + } + + if (finalize != null) { + cmd.put("finalize", finalize); + } + + if (sort != null) { + cmd.put("sort", sort); + } + + if (limit > 0) { + cmd.put("limit", limit); + } + + if (scope != null) { + cmd.put("scope", scope); + } + + if (jsMode != null) { + cmd.put("jsMode", jsMode); + } + + if (maxTimeMS != 0) { + cmd.put("maxTimeMS", maxTimeMS); + } + + return cmd; + } + + /** + * Sets the read preference for this command. See the * documentation for {@link ReadPreference} for more information. + * + * @param preference Read Preference to use + */ + public void setReadPreference(@Nullable final ReadPreference preference) { + this.readPreference = preference; + } + + /** + * Gets the read preference + * + * @return the readPreference + */ + @Nullable + public ReadPreference getReadPreference() { + return readPreference; + } + + /** + * Returns the collation + * + * @return the collation + * @since 3.4 + * @mongodb.server.release 3.4 + */ + @Nullable + public Collation getCollation() { + return collation; + } + + /** + * Sets the collation options + * + * @param collation the collation options + * @since 3.4 + * @mongodb.server.release 3.4 + */ + public void setCollation(final Collation collation) { + this.collation = collation; + } + + @Override + public String toString() { + return toDBObject().toString(); + } + + String getOutputTargetNonNull() { + if (outputCollection == null) { + throw new MongoInternalException("outputCollection can not be null in this context"); + } + return outputCollection; + } + + /** + * Represents the different options available for outputting the results of a map-reduce operation. + * + * @mongodb.driver.manual reference/command/mapReduce/#mapreduce-out-cmd Output options + */ + public enum OutputType { + /** + * Save the job output to a collection, replacing its previous content + */ + REPLACE, + /** + * Merge the job output with the existing contents of outputTarget collection + */ + MERGE, + /** + * Reduce the job output with the existing contents of outputTarget collection + */ + REDUCE, + /** + * Return results inline, no result is written to the DB server + */ + INLINE + } + +} diff --git a/driver-legacy/src/main/com/mongodb/MapReduceOutput.java b/driver-legacy/src/main/com/mongodb/MapReduceOutput.java new file mode 100644 index 00000000000..dd20b801cc1 --- /dev/null +++ b/driver-legacy/src/main/com/mongodb/MapReduceOutput.java @@ -0,0 +1,179 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb; + +import com.mongodb.internal.operation.MapReduceBatchCursor; +import com.mongodb.internal.operation.MapReduceStatistics; +import com.mongodb.lang.Nullable; + +import java.util.ArrayList; +import java.util.List; + +/** + * Represents the result of a map/reduce operation. Users should interact with the results of the map reduce via the results() method, or + * by interacting directly with the collection the results were input into. + * + * @mongodb.driver.manual applications/map-reduce Map-Reduce + * @deprecated Superseded by aggregate + */ +@Deprecated +public class MapReduceOutput { + + private final DBCollection collection; + private final DBObject command; + private final List inlineResults; + private final MapReduceStatistics mapReduceStatistics; + private final DBCursor resultsFromCollection; + + /** + * Constructor for use with inline map reduce. Collection will always be null. + */ + MapReduceOutput(final DBObject command, final MapReduceBatchCursor results) { + + this.command = command; + this.mapReduceStatistics = results.getStatistics(); + + this.collection = null; + this.resultsFromCollection = null; + this.inlineResults = new ArrayList<>(); + while (results.hasNext()) { + this.inlineResults.addAll(results.next()); + } + results.close(); + } + + /** + * Constructor for use when the map reduce output was put into a collection + */ + MapReduceOutput(final DBObject command, final DBCursor resultsFromCollection, final MapReduceStatistics mapReduceStatistics, + final DBCollection outputCollection) { + this.command = command; + this.inlineResults = null; + this.mapReduceStatistics = mapReduceStatistics; + + this.collection = outputCollection; + this.resultsFromCollection = resultsFromCollection; + } + + /** + * Returns an iterable containing the results of the operation. + * + * @return the results in iterable form + */ + public Iterable results() { + if (inlineResults != null) { + return inlineResults; + } else { + return resultsFromCollection; + } + } + + /** + * Drops the collection that holds the results. Does nothing if the map-reduce returned the results inline. + */ + public void drop() { + if (collection != null) { + collection.drop(); + } + } + + /** + * Gets the collection that holds the results (Will return null if results are Inline). + * + * @return the collection or null + */ + @Nullable + public DBCollection getOutputCollection() { + return collection; + } + + /** + * Get the original command that was sent to the database. + * + * @return a DBObject containing the values of the original map-reduce command. + */ + public DBObject getCommand() { + return command; + } + + @Override + public String toString() { + return "MapReduceOutput{" + + "collection=" + collection + + ", command=" + command + + ", inlineResults=" + inlineResults + + ", resultsFromCollection=" + resultsFromCollection + + '}'; + } + + /** + * Get the name of the collection that the results of the map reduce were saved into. If the map reduce was an inline operation (i.e . + * the results were returned directly from calling the map reduce) this will return null. + * + * @return the name of the collection that the map reduce results are stored in + */ + @Nullable + public final String getCollectionName() { + return collection == null ? null : collection.getName(); + } + + /** + * Get the name of the database that the results of the map reduce were saved into. If the map reduce was an inline operation (i.e . + * the results were returned directly from calling the map reduce) this will return null. + * + * @return the name of the database that holds the collection that the map reduce results are stored in + */ + public String getDatabaseName() { + return collection.getDB().getName(); + } + + /** + * Get the amount of time, in milliseconds, that it took to run this map reduce. + * + * @return an int representing the number of milliseconds it took to run the map reduce operation + */ + public int getDuration() { + return mapReduceStatistics.getDuration(); + } + + /** + * Get the number of documents that were input into the map reduce operation + * + * @return the number of documents that read while processing this map reduce + */ + public int getInputCount() { + return mapReduceStatistics.getInputCount(); + } + + /** + * Get the number of documents generated as a result of this map reduce + * + * @return the number of documents output by the map reduce + */ + public int getOutputCount() { + return mapReduceStatistics.getOutputCount(); + } + + /** + * Get the number of messages emitted from the provided map function. + * + * @return the number of items emitted from the map function + */ + public int getEmitCount() { + return mapReduceStatistics.getEmitCount(); + } +} diff --git a/driver-legacy/src/main/com/mongodb/MongoClient.java b/driver-legacy/src/main/com/mongodb/MongoClient.java new file mode 100644 index 00000000000..09d58e1b493 --- /dev/null +++ b/driver-legacy/src/main/com/mongodb/MongoClient.java @@ -0,0 +1,899 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb; + +import com.mongodb.client.ChangeStreamIterable; +import com.mongodb.client.ClientSession; +import com.mongodb.client.ListDatabasesIterable; +import com.mongodb.client.MongoDatabase; +import com.mongodb.client.MongoIterable; +import com.mongodb.client.internal.Clusters; +import com.mongodb.client.internal.MongoClientImpl; +import com.mongodb.client.internal.OperationExecutor; +import com.mongodb.connection.ClusterConnectionMode; +import com.mongodb.connection.ClusterDescription; +import com.mongodb.connection.ClusterSettings; +import com.mongodb.event.ClusterListener; +import com.mongodb.internal.IgnorableRequestContext; +import com.mongodb.internal.TimeoutContext; +import com.mongodb.internal.TimeoutSettings; +import com.mongodb.internal.binding.ConnectionSource; +import com.mongodb.internal.binding.ReadWriteBinding; +import com.mongodb.internal.binding.SingleServerBinding; +import com.mongodb.internal.connection.Cluster; +import com.mongodb.internal.connection.Connection; +import com.mongodb.internal.connection.NoOpSessionContext; +import com.mongodb.internal.connection.OperationContext; +import com.mongodb.internal.connection.StreamFactoryFactory; +import com.mongodb.internal.diagnostics.logging.Logger; +import com.mongodb.internal.diagnostics.logging.Loggers; +import com.mongodb.internal.session.ServerSessionPool; +import com.mongodb.internal.thread.DaemonThreadFactory; +import com.mongodb.internal.validator.NoOpFieldNameValidator; +import com.mongodb.lang.Nullable; +import org.bson.BsonArray; +import org.bson.BsonDocument; +import org.bson.BsonInt64; +import org.bson.BsonString; +import org.bson.Document; +import org.bson.codecs.BsonDocumentCodec; +import org.bson.codecs.configuration.CodecRegistry; +import org.bson.conversions.Bson; + +import java.io.Closeable; +import java.util.List; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentLinkedQueue; +import java.util.concurrent.ConcurrentMap; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.stream.Collectors; + +import static com.mongodb.assertions.Assertions.notNull; +import static com.mongodb.internal.connection.ServerAddressHelper.createServerAddress; +import static com.mongodb.internal.connection.ServerAddressHelper.getInetAddressResolver; +import static com.mongodb.internal.connection.StreamFactoryHelper.getSyncStreamFactoryFactory; +import static java.lang.String.format; +import static java.util.Collections.singletonList; +import static java.util.concurrent.TimeUnit.SECONDS; + +/** + *

A MongoDB client with internal connection pooling. For most applications, you should have one MongoClient instance for the entire + * JVM. + *

The following are equivalent, and all connect to the local database running on the default port:

+ *
+ * new MongoClient()
+ * new MongoClient("mongodb://localhost")
+ * new MongoClient("mongodb://localhost:27017");
+ * new MongoClient(MongoClientSettings.builder()
+ *   .applyConnectionString("mongodb://localhost")
+ *   .build())
+ * 
+ *
+ *
+ * new MongoClient("mongodb://localhost:27017,localhost:27018,localhost:27019")
+ * new MongoClient(MongoClientSettings.builder()
+ *   .applyConnectionString("mongodb://localhost:27017,localhost:27018,localhost:27019")
+ *   .build())
+ * 
+ *

You can connect to a sharded cluster using the same constructor invocations. MongoClient will auto-detect whether the servers are a + * list of replica set members or a list of mongos servers.

+ * + *

By default, all read and write operations will be made on the primary, but it's possible to read from secondaries by changing the read + * preference:

+ *
+ * new MongoClient("mongodb://localhost:27017,localhost:27018,localhost:27019?readPreference=primary")
+ * new MongoClient(MongoClientSettings.builder()
+ *   .applyConnectionString("mongodb://localhost:27017,localhost:27018,localhost:27019/?readPreference=primary")
+ *   .build())
+ * new MongoClient(MongoClientSettings.builder()
+ *   .applyConnectionString("mongodb://localhost:27017,localhost:27018,localhost:27019")
+ *   .readPreference(ReadPreference.primary())
+ *   .build())
+ * 
+ *

By default, all write operations will wait for acknowledgment by the server, as the default write concern is {@code + * WriteConcern.ACKNOWLEDGED}. It's possible to change this with a setting:

+ *
+ * new MongoClient("mongodb://localhost:27017,localhost:27018,localhost:27019?w=majority")
+ * new MongoClient(MongoClientSettings.builder()
+ *   .applyConnectionString("mongodb://localhost:27017,localhost:27018,localhost:27019/?w=majority")
+ *   .build())
+ * new MongoClient(MongoClientSettings.builder()
+ *   .applyConnectionString("mongodb://localhost:27017,localhost:27018,localhost:27019")
+ *   .writeConcern(WriteConcern.MAJORITY)
+ *   .build())
+ * 
+ *

In general, users of this class will pick up all of the default options specified in {@code MongoClientSettings}. + * + * @see ConnectionString + * @see MongoClientSettings + * @since 2.10.0 + */ +public class MongoClient implements Closeable { + private static final Logger LOGGER = Loggers.getLogger("client"); + + private final ConcurrentMap dbCache = new ConcurrentHashMap<>(); + + private final MongoClientOptions options; + + private final ConcurrentLinkedQueue orphanedCursors = new ConcurrentLinkedQueue<>(); + private final ExecutorService cursorCleaningService; + private final MongoClientImpl delegate; + private final AtomicBoolean closed; + + /** + * Gets the default codec registry. It includes the following providers: + * + *

    + *
  • {@link org.bson.codecs.ValueCodecProvider}
  • + *
  • {@link org.bson.codecs.BsonValueCodecProvider}
  • + *
  • {@link com.mongodb.DBRefCodecProvider}
  • + *
  • {@link com.mongodb.DBObjectCodecProvider}
  • + *
  • {@link org.bson.codecs.DocumentCodecProvider}
  • + *
  • {@link org.bson.codecs.CollectionCodecProvider}
  • + *
  • {@link org.bson.codecs.IterableCodecProvider}
  • + *
  • {@link org.bson.codecs.MapCodecProvider}
  • + *
  • {@link com.mongodb.client.model.geojson.codecs.GeoJsonCodecProvider}
  • + *
  • {@link com.mongodb.client.gridfs.codecs.GridFSFileCodecProvider}
  • + *
  • {@link org.bson.codecs.jsr310.Jsr310CodecProvider}
  • + *
  • {@link org.bson.codecs.JsonObjectCodecProvider}
  • + *
  • {@link org.bson.codecs.BsonCodecProvider}
  • + *
+ * + * @return the default codec registry + * @see MongoClientOptions#getCodecRegistry() + * @see MongoClientSettings#getDefaultCodecRegistry() + * @since 3.0 + */ + public static CodecRegistry getDefaultCodecRegistry() { + return com.mongodb.MongoClientSettings.getDefaultCodecRegistry(); + } + + /** + * Creates an instance based on a (single) MongoDB server ({@code "mongodb://127.0.0.1:27017"}). + */ + public MongoClient() { + this(new ConnectionString("mongodb://127.0.0.1")); + } + + /** + * Creates a MongoClient instance based on a connection string. + * + * @param connectionString server to connect to in connection string format. For backwards compatibility, the + * {@code "mongodb://"} prefix can be omitted + * @see ConnectionString + */ + public MongoClient(final String connectionString) { + this(connectionString.contains("://") + ? new ConnectionString(connectionString) : new ConnectionString("mongodb://" + connectionString)); + } + + /** + * Create a new client with the given connection string. + * + *

+ * For each of the settings classed configurable via {@link MongoClientSettings}, the connection string is applied by calling the + * {@code applyConnectionString} method on an instance of setting's builder class, building the setting, and adding it to an instance of + * {@link com.mongodb.MongoClientSettings.Builder}. + *

+ * + * @param connectionString the connection string + * @see com.mongodb.MongoClientSettings.Builder#applyConnectionString(ConnectionString) + * @since 4.2 + */ + public MongoClient(final ConnectionString connectionString) { + this(connectionString, null); + } + + /** + * Create a new client with the given connection string. + * + *

+ * For each of the settings classed configurable via {@link MongoClientSettings}, the connection string is applied by calling the + * {@code applyConnectionString} method on an instance of setting's builder class, building the setting, and adding it to an instance of + * {@link com.mongodb.MongoClientSettings.Builder}. + *

+ * + *

Note: Intended for driver and library authors to associate extra driver metadata with the connections.

+ * + * @param connectionString the settings + * @param mongoDriverInformation any driver information to associate with the MongoClient + * @since 4.2 + */ + public MongoClient(final ConnectionString connectionString, + @Nullable final MongoDriverInformation mongoDriverInformation) { + this(MongoClientSettings.builder().applyConnectionString(connectionString).build(), mongoDriverInformation); + } + + /** + * Create a new client with the given client settings. + * + * @param settings the settings + * @since 4.2 + */ + public MongoClient(final MongoClientSettings settings) { + this(settings, null); + } + + /** + * Creates a new client with the given client settings. + * + *

Note: Intended for driver and library authors to associate extra driver metadata with the connections.

+ * + * @param settings the settings + * @param mongoDriverInformation any driver information to associate with the MongoClient + * @since 4.2 + */ + public MongoClient(final MongoClientSettings settings, @Nullable final MongoDriverInformation mongoDriverInformation) { + this(settings, null, mongoDriverInformation); + } + + private MongoClient(final MongoClientSettings settings, + @Nullable final MongoClientOptions options, + @Nullable final MongoDriverInformation mongoDriverInformation) { + notNull("settings", settings); + + MongoDriverInformation wrappedMongoDriverInformation = wrapMongoDriverInformation(mongoDriverInformation); + + StreamFactoryFactory syncStreamFactoryFactory = getSyncStreamFactoryFactory( + settings.getTransportSettings(), + getInetAddressResolver(settings)); + + Cluster cluster = Clusters.createCluster( + settings, + wrappedMongoDriverInformation, + syncStreamFactoryFactory); + + delegate = new MongoClientImpl(cluster, settings, wrappedMongoDriverInformation, syncStreamFactoryFactory); + this.options = options != null ? options : MongoClientOptions.builder(settings).build(); + cursorCleaningService = this.options.isCursorFinalizerEnabled() ? createCursorCleaningService() : null; + this.closed = new AtomicBoolean(); + + BsonDocument clientMetadataDocument = delegate.getCluster().getClientMetadata().getBsonDocument(); + LOGGER.info(format("MongoClient with metadata %s created with settings %s", clientMetadataDocument.toJson(), settings)); + } + + private static MongoDriverInformation wrapMongoDriverInformation(@Nullable final MongoDriverInformation mongoDriverInformation) { + return (mongoDriverInformation == null ? MongoDriverInformation.builder() : MongoDriverInformation.builder(mongoDriverInformation)) + .driverName("legacy").build(); + } + + + /** + * Creates an instance based on a (single) mongodb node (default port). + * + * @param host server to connect to in format host[:port] + * @param options default query options + */ + public MongoClient(final String host, final MongoClientOptions options) { + this(createServerAddress(host), options); + } + + /** + * Creates an instance based on a (single) mongodb node. + * + * @param host the database's host address + * @param port the port on which the database is running + */ + public MongoClient(final String host, final int port) { + this(createServerAddress(host, port)); + } + + /** + * Creates an instance based on a (single) mongodb node + * + * @param addr the database address + * @see com.mongodb.ServerAddress + */ + public MongoClient(final ServerAddress addr) { + this(addr, MongoClientOptions.builder().build()); + } + + /** + * Creates an instance based on a (single) mongo node using a given ServerAddress and default options. + * + * @param addr the database address + * @param options default options + * @see com.mongodb.ServerAddress + */ + public MongoClient(final ServerAddress addr, final MongoClientOptions options) { + this(addr, null, options); + } + + /** + * Creates an instance based on a (single) mongo node using a given server address, credential, and options + * + * @param addr the database address + * @param credential the credential used to authenticate all connections + * @param options default options + * @see com.mongodb.ServerAddress + * @since 3.6 + */ + public MongoClient(final ServerAddress addr, @Nullable final MongoCredential credential, final MongoClientOptions options) { + this(addr, credential, options, null); + } + + /** + *

Creates an instance based on a list of replica set members or mongos servers. For a replica set it will discover all members. + * For a list with a single seed, the driver will still discover all members of the replica set. For a direct + * connection to a replica set member, with no discovery, use the {@link #MongoClient(ServerAddress)} constructor instead.

+ * + *

When there is more than one server to choose from based on the type of request (read or write) and the read preference (if it's a + * read request), the driver will randomly select a server to send a request. This applies to both replica sets and sharded clusters. + * The servers to randomly select from are further limited by the local threshold. See + * {@link MongoClientOptions#getLocalThreshold()}

+ * + * @param seeds Put as many servers as you can in the list and the system will figure out the rest. This can either be a list of mongod + * servers in the same replica set or a list of mongos servers in the same sharded cluster. + * @see MongoClientOptions#getLocalThreshold() + */ + public MongoClient(final List seeds) { + this(seeds, MongoClientOptions.builder().build()); + } + + /** + *

Construct an instance based on a list of replica set members or mongos servers. For a replica set it will discover all members. + * For a list with a single seed, the driver will still discover all members of the replica set. For a direct + * connection to a replica set member, with no discovery, use the {@link #MongoClient(ServerAddress, MongoClientOptions)} constructor + * instead.

+ * + *

When there is more than one server to choose from based on the type of request (read or write) and the read preference (if it's a + * read request), the driver will randomly select a server to send a request. This applies to both replica sets and sharded clusters. + * The servers to randomly select from are further limited by the local threshold. See + * {@link MongoClientOptions#getLocalThreshold()}

+ * + * @param seeds Put as many servers as you can in the list and the system will figure out the rest. This can either be a list of + * mongod servers in the same replica set or a list of mongos servers in the same sharded cluster. + * @param options the options + * @see MongoClientOptions#getLocalThreshold() + */ + public MongoClient(final List seeds, final MongoClientOptions options) { + this(seeds, null, options); + } + + /** + *

Creates an instance based on a list of replica set members or mongos servers. For a replica set it will discover all members. + * For a list with a single seed, the driver will still discover all members of the replica set. For a direct + * connection to a replica set member, with no discovery, use the + * {@link #MongoClient(ServerAddress, MongoCredential, MongoClientOptions)} constructor instead.

+ * + *

When there is more than one server to choose from based on the type of request (read or write) and the read preference (if it's a + * read request), the driver will randomly select a server to send a request. This applies to both replica sets and sharded clusters. + * The servers to randomly select from are further limited by the local threshold. See + * {@link MongoClientOptions#getLocalThreshold()}

+ * + * @param seeds Put as many servers as you can in the list and the system will figure out the rest. This can either be a list of + * mongod servers in the same replica set or a list of mongos servers in the same sharded cluster. + * @param credential the credential used to authenticate all connections + * @param options the options + * @see MongoClientOptions#getLocalThreshold() + * @since 3.6 + */ + public MongoClient(final List seeds, @Nullable final MongoCredential credential, final MongoClientOptions options) { + this(seeds, credential, options, null); + } + + /** + * Creates an instance described by a URI. If only one address is used it will only connect to that node, otherwise it will discover all + * nodes. + * + * @param uri the URI + * @throws MongoException if theres a failure + */ + public MongoClient(final MongoClientURI uri) { + this(uri, null); + } + + /** + * Creates an instance described by a URI. + * + *

Note: Intended for driver and library authors to associate extra driver metadata with the connections.

+ * + * @param uri the URI + * @param mongoDriverInformation any driver information to associate with the MongoClient + * @throws MongoException if theres a failure + * @since 3.4 + */ + public MongoClient(final MongoClientURI uri, @Nullable final MongoDriverInformation mongoDriverInformation) { + this(uri.getOptions().asMongoClientSettings( + uri.getProxied().isSrvProtocol() + ? null : uri.getProxied().getHosts().stream().map(ServerAddress::new).collect(Collectors.toList()), + uri.getProxied().isSrvProtocol() + ? uri.getProxied().getHosts().get(0) : null, + getClusterConnectionMode(uri.getProxied()), + uri.getCredentials()), + uri.getOptions(), + mongoDriverInformation); + } + + private static ClusterConnectionMode getClusterConnectionMode(final ConnectionString connectionString) { + return ClusterSettings.builder().applyConnectionString(connectionString).build().getMode(); + } + + /** + * Creates a MongoClient to a single node using a given ServerAddress. + * + *

Note: Intended for driver and library authors to associate extra driver metadata with the connections.

+ * + * @param addr the database address + * @param credential the credential used to authenticate all connections + * @param options default options + * @param mongoDriverInformation any driver information to associate with the MongoClient + * @see com.mongodb.ServerAddress + * @since 3.6 + */ + public MongoClient(final ServerAddress addr, @Nullable final MongoCredential credential, final MongoClientOptions options, + @Nullable final MongoDriverInformation mongoDriverInformation) { + this(options.asMongoClientSettings(singletonList(addr), null, ClusterConnectionMode.SINGLE, credential), options, + mongoDriverInformation); + } + + /** + * Creates a MongoClient + * + *

Note: Intended for driver and library authors to associate extra driver metadata with the connections.

+ * + * @param seeds Put as many servers as you can in the list and the system will figure out the rest. This can either + * be a list of mongod servers in the same replica set or a list of mongos servers in the same sharded + * cluster. + * @param credential the credential used to authenticate all connections + * @param options the options + * @param mongoDriverInformation any driver information to associate with the MongoClient + * @since 3.6 + */ + public MongoClient(final List seeds, @Nullable final MongoCredential credential, final MongoClientOptions options, + @Nullable final MongoDriverInformation mongoDriverInformation) { + this(options.asMongoClientSettings(seeds, null, ClusterConnectionMode.MULTIPLE, credential), options, mongoDriverInformation); + } + + /** + * Gets the options that this client uses to connect to server. + * + *

Note: {@link MongoClientOptions} is immutable.

+ * + * @return the options + */ + public MongoClientOptions getMongoClientOptions() { + return options; + } + + /** + * Gets the credential that this client authenticates all connections with + * + * @return the credential, which may be null in unsecured deployments + * @since 3.9 + */ + @Nullable + public MongoCredential getCredential() { + return delegate.getSettings().getCredential(); + } + + /** + * Get a list of the database names + * + * @return an iterable containing all the names of all the databases + * @mongodb.driver.manual reference/command/listDatabases List Databases + * @since 3.0 + */ + public MongoIterable listDatabaseNames() { + return delegate.listDatabaseNames(); + } + + /** + * Get a list of the database names + * + * @param clientSession the client session with which to associate this operation + * @return an iterable containing all the names of all the databases + * @mongodb.server.release 3.6 + * @mongodb.driver.manual reference/command/listDatabases List Databases + * @since 3.6 + */ + public MongoIterable listDatabaseNames(final ClientSession clientSession) { + return delegate.listDatabaseNames(clientSession); + } + + /** + * Gets the list of databases + * + * @return the list of databases + * @since 3.0 + */ + public ListDatabasesIterable listDatabases() { + return delegate.listDatabases(); + } + + /** + * Gets the list of databases + * + * @param clazz the class to cast the database documents to + * @param the type of the class to use instead of {@code Document}. + * @return the list of databases + * @since 3.0 + */ + public ListDatabasesIterable listDatabases(final Class clazz) { + return delegate.listDatabases(clazz); + } + + /** + * Gets the list of databases + * + * @param clientSession the client session with which to associate this operation + * @return the list of databases + * @mongodb.server.release 3.6 + * @since 3.6 + */ + public ListDatabasesIterable listDatabases(final ClientSession clientSession) { + return delegate.listDatabases(clientSession); + } + + /** + * Gets the list of databases + * + * @param clientSession the client session with which to associate this operation + * @param clazz the class to cast the database documents to + * @param the type of the class to use instead of {@code Document}. + * @return the list of databases + * @mongodb.server.release 3.6 + * @since 3.6 + */ + public ListDatabasesIterable listDatabases(final ClientSession clientSession, final Class clazz) { + return delegate.listDatabases(clientSession, clazz); + } + + + /** + * @param databaseName the name of the database to retrieve + * @return a {@code MongoDatabase} representing the specified database + * @throws IllegalArgumentException if databaseName is invalid + * @see MongoNamespace#checkDatabaseNameValidity(String) + */ + public MongoDatabase getDatabase(final String databaseName) { + return delegate.getDatabase(databaseName); + } + + /** + * Creates a client session with default session options. + * + * @return the client session + * @throws MongoClientException if the MongoDB cluster to which this client is connected does not support sessions + * @mongodb.server.release 3.6 + * @since 3.8 + */ + public ClientSession startSession() { + return delegate.startSession(); + } + + /** + * Creates a client session. + * + * @param options the options for the client session + * @return the client session + * @throws MongoClientException if the MongoDB cluster to which this client is connected does not support sessions + * @mongodb.server.release 3.6 + * @since 3.6 + */ + public ClientSession startSession(final ClientSessionOptions options) { + return delegate.startSession(options); + } + + /** + * Creates a change stream for this client. + * + * @return the change stream iterable + * @mongodb.server.release 4.0 + * @mongodb.driver.dochub core/changestreams Change Streams + * @since 3.8 + */ + public ChangeStreamIterable watch() { + return delegate.watch(); + } + + /** + * Creates a change stream for this client. + * + * @param resultClass the class to decode each document into + * @param the target document type of the iterable. + * @return the change stream iterable + * @mongodb.server.release 4.0 + * @mongodb.driver.dochub core/changestreams Change Streams + * @since 3.8 + */ + public ChangeStreamIterable watch(final Class resultClass) { + return delegate.watch(resultClass); + } + + /** + * Creates a change stream for this client. + * + * @param pipeline the aggregation pipeline to apply to the change stream + * @return the change stream iterable + * @mongodb.server.release 4.0 + * @mongodb.driver.dochub core/changestreams Change Streams + * @since 3.8 + */ + public ChangeStreamIterable watch(final List pipeline) { + return delegate.watch(pipeline); + } + + /** + * Creates a change stream for this client. + * + * @param pipeline the aggregation pipeline to apply to the change stream + * @param resultClass the class to decode each document into + * @param the target document type of the iterable. + * @return the change stream iterable + * @mongodb.server.release 4.0 + * @mongodb.driver.dochub core/changestreams Change Streams + * @since 3.8 + */ + public ChangeStreamIterable watch(final List pipeline, final Class resultClass) { + return delegate.watch(pipeline, resultClass); + } + + /** + * Creates a change stream for this client. + * + * @param clientSession the client session with which to associate this operation + * @return the change stream iterable + * @mongodb.server.release 4.0 + * @mongodb.driver.dochub core/changestreams Change Streams + * @since 3.8 + */ + public ChangeStreamIterable watch(final ClientSession clientSession) { + return delegate.watch(clientSession); + } + + /** + * Creates a change stream for this client. + * + * @param clientSession the client session with which to associate this operation + * @param resultClass the class to decode each document into + * @param the target document type of the iterable. + * @return the change stream iterable + * @mongodb.server.release 4.0 + * @mongodb.driver.dochub core/changestreams Change Streams + * @since 3.8 + */ + public ChangeStreamIterable watch(final ClientSession clientSession, final Class resultClass) { + return delegate.watch(clientSession, resultClass); + } + + /** + * Creates a change stream for this client. + * + * @param clientSession the client session with which to associate this operation + * @param pipeline the aggregation pipeline to apply to the change stream + * @return the change stream iterable + * @mongodb.server.release 4.0 + * @mongodb.driver.dochub core/changestreams Change Streams + * @since 3.8 + */ + public ChangeStreamIterable watch(final ClientSession clientSession, final List pipeline) { + return delegate.watch(clientSession, pipeline); + } + + /** + * Creates a change stream for this client. + * + * @param clientSession the client session with which to associate this operation + * @param pipeline the aggregation pipeline to apply to the change stream + * @param resultClass the class to decode each document into + * @param the target document type of the iterable. + * @return the change stream iterable + * @mongodb.server.release 4.0 + * @mongodb.driver.dochub core/changestreams Change Streams + * @since 3.8 + */ + public ChangeStreamIterable watch(final ClientSession clientSession, final List pipeline, + final Class resultClass) { + return delegate.watch(clientSession, pipeline, resultClass); + } + + /** + * Gets the current cluster description. + * + *

+ * This method will not block, meaning that it may return a {@link ClusterDescription} whose {@code clusterType} is unknown + * and whose {@link com.mongodb.connection.ServerDescription}s are all in the connecting state. If the application requires + * notifications after the driver has connected to a member of the cluster, it should register a {@link ClusterListener} via + * the {@link ClusterSettings} in {@link com.mongodb.MongoClientSettings}. + *

+ * + * @return the current cluster description + * @see ClusterSettings.Builder#addClusterListener(ClusterListener) + * @see com.mongodb.MongoClientSettings.Builder#applyToClusterSettings(com.mongodb.Block) + * @since 4.2 + */ + public ClusterDescription getClusterDescription() { + return delegate.getClusterDescription(); + } + + /** + * Gets the write concern + * + * @return the write concern + */ + public WriteConcern getWriteConcern() { + return options.getWriteConcern(); + } + + /** + * Gets the read concern + * + * @return the read concern + */ + public ReadConcern getReadConcern() { + return options.getReadConcern(); + } + + /** + * Gets the default read preference + * + * @return the default read preference + */ + public ReadPreference getReadPreference() { + return options.getReadPreference(); + } + + /** + * Gets a database object. Users should use {@link com.mongodb.MongoClient#getDatabase(String)} instead. + * + *

+ * The {@link DB} class has been superseded by {@link com.mongodb.client.MongoDatabase}. The deprecation of this method effectively + * deprecates the {@link DB}, {@link DBCollection}, and {@link DBCursor} classes, among others; but in order to give users time to + * migrate to the new API without experiencing a huge number of compiler warnings, those classes have not yet been formally + * deprecated. + *

+ * + * @param dbName the name of the database to retrieve + * @return a DB representing the specified database + * @throws IllegalArgumentException if the name is invalid + * @see MongoNamespace#checkDatabaseNameValidity(String) + * @deprecated This method is not currently scheduled for removal, but prefer {@link com.mongodb.MongoClient#getDatabase(String)} for + * new code. Note that {@link DB} and {@link com.mongodb.client.MongoDatabase} can be used together in the same application, with the + * same instance. + */ + @Deprecated // NOT CURRENTLY INTENDED FOR REMOVAL + public DB getDB(final String dbName) { + DB db = dbCache.get(dbName); + if (db != null) { + return db; + } + + db = new DB(this, dbName, getOperationExecutor()); + DB temp = dbCache.putIfAbsent(dbName, db); + if (temp != null) { + return temp; + } + return db; + } + + /** + * Drops the database if it exists. + * + * @param dbName name of database to drop + * @throws MongoException if the operation fails + */ + public void dropDatabase(final String dbName) { + getDB(dbName).dropDatabase(); + } + + /** + * Closes all resources associated with this instance, in particular any open network connections. Once called, this instance and any + * databases obtained from it can no longer be used. + */ + public void close() { + if (!closed.getAndSet(true)) { + delegate.close(); + if (cursorCleaningService != null) { + cursorCleaningService.shutdownNow(); + } + } + } + + @Override + public String toString() { + return "MongoClient{" + + "options=" + options + + '}'; + } + + Cluster getCluster() { + return delegate.getCluster(); + } + + CodecRegistry getCodecRegistry() { + return delegate.getCodecRegistry(); + } + + ServerSessionPool getServerSessionPool() { + return delegate.getServerSessionPool(); + } + + @Nullable + ExecutorService getCursorCleaningService() { + return cursorCleaningService; + } + + void addOrphanedCursor(final ServerCursor serverCursor, final MongoNamespace namespace) { + orphanedCursors.add(new ServerCursorAndNamespace(serverCursor, namespace)); + } + + // Leave as package-protected so that unit tests can spy on it. + OperationExecutor getOperationExecutor() { + return delegate.getOperationExecutor(); + } + + MongoClientImpl getDelegate() { + return delegate; + } + + TimeoutSettings getTimeoutSettings() { + return delegate.getTimeoutSettings(); + } + + private ExecutorService createCursorCleaningService() { + ScheduledExecutorService newTimer = Executors.newSingleThreadScheduledExecutor(new DaemonThreadFactory("CleanCursors")); + newTimer.scheduleAtFixedRate(this::cleanCursors, 1, 1, SECONDS); + return newTimer; + } + + private void cleanCursors() { + try { + ServerCursorAndNamespace cur; + while ((cur = orphanedCursors.poll()) != null) { + ReadWriteBinding binding = new SingleServerBinding(delegate.getCluster(), cur.serverCursor.getAddress(), + new OperationContext(IgnorableRequestContext.INSTANCE, NoOpSessionContext.INSTANCE, + new TimeoutContext(getTimeoutSettings()), options.getServerApi())); + try { + ConnectionSource source = binding.getReadConnectionSource(); + try { + Connection connection = source.getConnection(); + try { + BsonDocument killCursorsCommand = new BsonDocument("killCursors", new BsonString(cur.namespace.getCollectionName())) + .append("cursors", new BsonArray(singletonList(new BsonInt64(cur.serverCursor.getId())))); + connection.command(cur.namespace.getDatabaseName(), killCursorsCommand, NoOpFieldNameValidator.INSTANCE, + ReadPreference.primary(), new BsonDocumentCodec(), source.getOperationContext()); + } finally { + connection.release(); + } + } finally { + source.release(); + } + } finally { + binding.release(); + } + } + } catch (Throwable t) { + LOGGER.error(this + " stopped cleaning cursors. You may want to recreate the MongoClient", t); + throw t; + } + } + + private static class ServerCursorAndNamespace { + private final ServerCursor serverCursor; + private final MongoNamespace namespace; + + ServerCursorAndNamespace(final ServerCursor serverCursor, final MongoNamespace namespace) { + this.serverCursor = serverCursor; + this.namespace = namespace; + } + } +} diff --git a/driver-legacy/src/main/com/mongodb/MongoClientOptions.java b/driver-legacy/src/main/com/mongodb/MongoClientOptions.java new file mode 100644 index 00000000000..1f19fba3484 --- /dev/null +++ b/driver-legacy/src/main/com/mongodb/MongoClientOptions.java @@ -0,0 +1,1394 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb; + +import com.mongodb.annotations.Alpha; +import com.mongodb.annotations.Immutable; +import com.mongodb.annotations.NotThreadSafe; +import com.mongodb.annotations.Reason; +import com.mongodb.connection.ClusterConnectionMode; +import com.mongodb.connection.ConnectionPoolSettings; +import com.mongodb.event.ClusterListener; +import com.mongodb.event.CommandListener; +import com.mongodb.event.ConnectionCreatedEvent; +import com.mongodb.event.ConnectionPoolListener; +import com.mongodb.event.ConnectionReadyEvent; +import com.mongodb.event.ServerListener; +import com.mongodb.event.ServerMonitorListener; +import com.mongodb.lang.Nullable; +import com.mongodb.selector.ServerSelector; +import org.bson.UuidRepresentation; +import org.bson.codecs.configuration.CodecRegistry; + +import javax.net.ssl.SSLContext; +import java.util.List; +import java.util.Objects; +import java.util.Optional; + +import static com.mongodb.assertions.Assertions.notNull; +import static java.lang.Math.toIntExact; +import static java.util.concurrent.TimeUnit.MILLISECONDS; + +/** + *

Various settings to control the behavior of a {@code MongoClient}.

+ * + * @see MongoClient + * @since 2.10.0 + */ +@Immutable +public class MongoClientOptions { + private final MongoClientSettings wrapped; + private final DBDecoderFactory dbDecoderFactory; + private final DBEncoderFactory dbEncoderFactory; + private final boolean cursorFinalizerEnabled; + + private MongoClientOptions(final Builder builder) { + wrapped = builder.wrapped.build(); + dbDecoderFactory = builder.dbDecoderFactory; + dbEncoderFactory = builder.dbEncoderFactory; + cursorFinalizerEnabled = builder.cursorFinalizerEnabled; + } + + /** + * Creates a builder instance. + * + * @return a builder + * @since 3.0.0 + */ + public static Builder builder() { + return new Builder(); + } + + /** + * Creates a builder instance. + * + * @param options existing MongoClientOptions to default the builder settings on. + * @return a builder + * @since 3.0.0 + */ + public static Builder builder(final MongoClientOptions options) { + return new Builder(options); + } + + /** + * Creates a builder instance from a {@code MongoClientSettings} instance. + * + * @param settings the settings to from which to initialize the builder + * @return a builder + * @since 4.2 + */ + public static Builder builder(final MongoClientSettings settings) { + return new Builder(settings); + } + + /** + * Translate this instance into {@link MongoClientSettings}. + * + * @param hosts the seed list of hosts to connect to, which must be null if srvHost is not + * @param srvHost the SRV host name, which must be null if hosts is not + * @param clusterConnectionMode the connection mode + * @param credential the credential, which may be null + * @return the settings + * @see MongoClientSettings + * @since 4.2 + */ + public MongoClientSettings asMongoClientSettings(@Nullable final List hosts, + @Nullable final String srvHost, + final ClusterConnectionMode clusterConnectionMode, + @Nullable final MongoCredential credential) { + MongoClientSettings.Builder mongoClientSettingsBuilder = MongoClientSettings.builder(wrapped); + + Optional.ofNullable(credential).ifPresent(mongoClientSettingsBuilder::credential); + mongoClientSettingsBuilder.applyToClusterSettings(builder -> { + builder.mode(clusterConnectionMode); + if (srvHost != null) { + builder.srvHost(srvHost); + } + if (hosts != null) { + builder.hosts(hosts); + } + }); + return mongoClientSettingsBuilder.build(); + } + + /** + * Gets the logical name of the application using this MongoClient. The application name may be used by the client to identify + * the application to the server, for use in server logs, slow query logs, and profile collection. + * + *

Default is null.

+ * + * @return the application name, which may be null + * @mongodb.server.release 3.4 + * @since 3.4 + */ + @Nullable + public String getApplicationName() { + return wrapped.getApplicationName(); + } + + /** + * Gets the compressors to use for compressing messages to the server. The driver will use the first compressor in the list + * that the server is configured to support. + * + *

Default is the empty list.

+ * + * @return the compressors + * @mongodb.server.release 3.4 + * @since 3.6 + */ + public List getCompressorList() { + return wrapped.getCompressorList(); + } + + /** + *

The maximum number of connections allowed per host for this MongoClient instance. Those connections will be kept in a pool when + * idle. Once the pool is exhausted, any operation requiring a connection will block waiting for an available connection.

+ * + *

Default is 100.

+ * + * @return the maximum size of the connection pool per host; if 0, then there is no limit. + */ + public int getConnectionsPerHost() { + return wrapped.getConnectionPoolSettings().getMaxSize(); + } + + /** + *

The minimum number of connections per host for this MongoClient instance. Those connections will be kept in a pool when idle, and + * the pool will ensure over time that it contains at least this minimum number.

+ * + *

Default is 0.

+ * + * @return the minimum size of the connection pool per host + */ + public int getMinConnectionsPerHost() { + return wrapped.getConnectionPoolSettings().getMinSize(); + } + + /** + *

Gets the server selection timeout in milliseconds, which defines how long the driver will wait for server selection to + * succeed before throwing an exception.

+ * + *

Default is 30,000 milliseconds. A value of 0 means that it will timeout immediately if no server is available. A negative value + * means to wait indefinitely.

+ * + * @return the server selection timeout in milliseconds. + */ + public int getServerSelectionTimeout() { + return toIntExact(wrapped.getClusterSettings().getServerSelectionTimeout(MILLISECONDS)); + } + + /** + *

The maximum wait time in milliseconds that a thread may wait for a connection to become available.

+ * + *

Default is 120,000 milliseconds. A value of 0 means that it will not wait. A negative value means to wait indefinitely.

+ * + * @return the maximum wait time. + */ + public int getMaxWaitTime() { + return toIntExact(wrapped.getConnectionPoolSettings().getMaxWaitTime(MILLISECONDS)); + } + + /** + * The maximum idle time of a pooled connection. A zero value indicates no limit to the idle time. A pooled connection that has + * exceeded its idle time will be closed and replaced when necessary by a new connection. + * + *

Default is 0, indicating no limit to the idle time.

+ * + * @return the maximum idle time, in milliseconds + * @since 2.12 + */ + public int getMaxConnectionIdleTime() { + return toIntExact(wrapped.getConnectionPoolSettings().getMaxConnectionIdleTime(MILLISECONDS)); + } + + /** + * The maximum life time of a pooled connection. A zero value indicates no limit to the life time. A pooled connection that has + * exceeded its life time will be closed and replaced when necessary by a new connection. + * + *

Default is 0, indicating no limit to the life time.

+ * + * @return the maximum life time, in milliseconds + * @since 2.12 + */ + public int getMaxConnectionLifeTime() { + return toIntExact(wrapped.getConnectionPoolSettings().getMaxConnectionLifeTime(MILLISECONDS)); + } + + /** + * The maximum number of connections a pool may be establishing concurrently. + * Establishment of a connection is a part of its life cycle + * starting after a {@link ConnectionCreatedEvent} and ending before a {@link ConnectionReadyEvent}. + *

+ * Default is 2.

+ * + * @return The maximum number of connections a pool may be establishing concurrently. + * @see Builder#maxConnecting(int) + * @see ConnectionPoolSettings#getMaxConnecting() + * @since 4.4 + */ + public int getMaxConnecting() { + return wrapped.getConnectionPoolSettings().getMaxConnecting(); + } + + /** + * Returns the period of time to wait before running the first maintenance job on each connection pool. + *

+ * Default is 0 ms.

+ * + * @return the time period to wait in milliseconds + * @see ConnectionPoolSettings#getMaintenanceInitialDelay + * @since 4.7 + */ + public long getMaintenanceInitialDelay() { + return wrapped.getConnectionPoolSettings().getMaintenanceInitialDelay(MILLISECONDS); + } + + /** + * Returns the time period between runs of the maintenance job on each connection pool. + *

+ * Default is 60,000 ms.

+ * + * @return the time period between runs of the maintainance job in milliseconds + * @see ConnectionPoolSettings#getMaintenanceFrequency + * @since 4.7 + */ + public long getMaintenanceFrequency() { + return wrapped.getConnectionPoolSettings().getMaintenanceFrequency(MILLISECONDS); + } + + /** + *

The connection timeout in milliseconds. A value of 0 means no timeout. It is used solely when establishing a new connection + * {@link java.net.Socket#connect(java.net.SocketAddress, int) }

+ * + *

Default is 10,000 milliseconds.

+ * + * @return the socket connect timeout + */ + public int getConnectTimeout() { + return wrapped.getSocketSettings().getConnectTimeout(MILLISECONDS); + } + + /** + *

The socket timeout in milliseconds. It is used for I/O socket read operations {@link + * java.net.Socket#setSoTimeout(int)}

+ * + *

Default is 0 and means no timeout.

+ * + * @return the socket timeout, in milliseconds + */ + public int getSocketTimeout() { + return wrapped.getSocketSettings().getReadTimeout(MILLISECONDS); + } + + /** + * Gets the heartbeat frequency. This is the frequency that the driver will attempt to determine the current state of each server in the + * cluster. + * + *

Default is 10,000 milliseconds.

+ * + * @return the heartbeat frequency, in milliseconds + * @since 2.12 + */ + public int getHeartbeatFrequency() { + return toIntExact(wrapped.getServerSettings().getHeartbeatFrequency(MILLISECONDS)); + } + + /** + * Gets the minimum heartbeat frequency. In the event that the driver has to frequently re-check a server's availability, + * it will wait at least this long since the previous check to avoid wasted effort. + * + *

Default is 500 milliseconds.

+ * + * @return the minimum heartbeat frequency, in milliseconds + * @since 2.13 + */ + public int getMinHeartbeatFrequency() { + return toIntExact(wrapped.getServerSettings().getMinHeartbeatFrequency(MILLISECONDS)); + } + + /** + *

Gets the connect timeout for connections used for the cluster heartbeat.

+ * + *

Default is 20,000 milliseconds.

+ * + * @return the heartbeat connect timeout, in milliseconds + * @since 2.12 + */ + public int getHeartbeatConnectTimeout() { + return wrapped.getHeartbeatSocketSettings().getConnectTimeout(MILLISECONDS); + } + + /** + * Gets the socket timeout for connections used for the cluster heartbeat. + * + *

Default is 20,000 milliseconds.

+ * + * @return the heartbeat socket timeout, in milliseconds + * @since 2.12 + */ + public int getHeartbeatSocketTimeout() { + return wrapped.getHeartbeatSocketSettings().getReadTimeout(MILLISECONDS); + } + + /** + *

Gets the local threshold. When choosing among multiple MongoDB servers to send a request, the MongoClient will only + * send that request to a server whose ping time is less than or equal to the server with the fastest ping time plus the local + * threshold.

+ * + *

For example, let's say that the client is choosing a server to send a query when the read preference is {@code + * ReadPreference.secondary()}, and that there are three secondaries, server1, server2, and server3, whose ping times are 10, 15, and 16 + * milliseconds, respectively. With a local threshold of 5 milliseconds, the client will send the query to either + * server1 or server2 (randomly selecting between the two). + *

+ * + *

Default is 15 milliseconds.

+ * + * @return the local threshold, in milliseconds + * @mongodb.driver.manual reference/program/mongos/#cmdoption--localThreshold Local Threshold + * @since 2.13.0 + */ + public int getLocalThreshold() { + return toIntExact(wrapped.getClusterSettings().getLocalThreshold(MILLISECONDS)); + } + + /** + *

Gets the required replica set name. With this option set, the MongoClient instance will

+ * + *
    + *
  1. Connect in replica set mode, and discover all members of the set based on the given servers
  2. + *
  3. Make sure that the set name reported by all members matches the required set name.
  4. + *
  5. Refuse to service any requests if any member of the seed list is not part of a replica set with the required name.
  6. + *
+ * + * @return the required replica set name + * @since 2.12 + */ + @Nullable + public String getRequiredReplicaSetName() { + return wrapped.getClusterSettings().getRequiredReplicaSetName(); + } + + /** + * Whether to use SSL. + * + *

Default is {@code false}.

+ * + * @return true if SSL should be used + * @since 3.0 + */ + public boolean isSslEnabled() { + return wrapped.getSslSettings().isEnabled(); + } + + /** + * Returns whether invalid host names should be allowed if SSL is enabled. Take care before setting this to + * true, as it makes the application susceptible to man-in-the-middle attacks. + * + *

Default is {@code false}.

+ * + * @return true if invalid host names are allowed. + */ + public boolean isSslInvalidHostNameAllowed() { + return wrapped.getSslSettings().isInvalidHostNameAllowed(); + } + + /** + * Returns the SSLContext. This property is ignored when either sslEnabled is false or socketFactory is non-null. + * + * @return the configured SSLContext, which may be null. In that case {@code SSLContext.getDefault()} will be used when SSL is enabled. + * @since 3.5 + */ + @Nullable + public SSLContext getSslContext() { + return wrapped.getSslSettings().getContext(); + } + + /** + *

The read preference to use for queries, map-reduce, aggregation, and count.

+ * + *

Default is {@code ReadPreference.primary()}.

+ * + * @return the read preference + * @see com.mongodb.ReadPreference#primary() + */ + public ReadPreference getReadPreference() { + return wrapped.getReadPreference(); + } + + /** + *

The write concern to use.

+ * + *

Default is {@code WriteConcern.ACKNOWLEDGED}.

+ * + * @return the write concern + * @see WriteConcern#ACKNOWLEDGED + */ + public WriteConcern getWriteConcern() { + return wrapped.getWriteConcern(); + } + + /** + * Returns true if writes should be retried if they fail due to a network error or other retryable error. + * + *

Starting with the 3.11.0 release, the default value is true

+ * + * @return the retryWrites value + * @mongodb.server.release 3.6 + * @since 3.6 + */ + public boolean getRetryWrites() { + return wrapped.getRetryWrites(); + } + + /** + * Returns true if reads should be retried if they fail due to a network error or other retryable error. + * + * @return the retryReads value + * @mongodb.server.release 3.6 + * @since 3.11 + */ + public boolean getRetryReads() { + return wrapped.getRetryReads(); + } + + /** + *

The read concern to use.

+ * + * @return the read concern + * @mongodb.server.release 3.2 + * @mongodb.driver.manual reference/readConcern/ Read Concern + * @since 3.2 + */ + public ReadConcern getReadConcern() { + return wrapped.getReadConcern(); + } + + /** + *

The codec registry to use. By default, a {@code MongoClient} will be able to encode and decode instances of {@code + * Document}.

+ * + *

Note that instances of {@code DB} and {@code DBCollection} do not use the registry, so it's not necessary to include a codec for + * DBObject in the registry.

+ * + * @return the codec registry + * @see MongoClient#getDatabase + * @since 3.0 + */ + public CodecRegistry getCodecRegistry() { + return wrapped.getCodecRegistry(); + } + + /** + * Gets the UUID representation to use when encoding instances of {@link java.util.UUID} and when decoding BSON binary values with + * subtype of 3. + * + *

The default is {@link UuidRepresentation#UNSPECIFIED}, If your application stores UUID values in MongoDB, you must set this + * value to the desired representation. New applications should prefer {@link UuidRepresentation#STANDARD}, while existing Java + * applications should prefer {@link UuidRepresentation#JAVA_LEGACY}. Applications wishing to interoperate with existing Python or + * .NET applications should prefer {@link UuidRepresentation#PYTHON_LEGACY} or {@link UuidRepresentation#C_SHARP_LEGACY}, + * respectively. Applications that do not store UUID values in MongoDB don't need to set this value. + *

+ * + * @return the UUID representation, which may not be null + * @since 3.12 + */ + public UuidRepresentation getUuidRepresentation() { + return wrapped.getUuidRepresentation(); + } + + /** + * Gets the maximum number of hosts to connect to when using SRV protocol. + * + * @return the maximum number of hosts to connect to when using SRV protocol. Defaults to null. + * @since 4.5 + */ + @Nullable + public Integer getSrvMaxHosts() { + return wrapped.getClusterSettings().getSrvMaxHosts(); + } + + /** + * Gets the SRV service name. + * + *

+ * The SRV resource record (RFC 2782) + * service name, which is limited to 15 characters + * (RFC 6335 section 5.1). + * If specified, it is combined with the single host name as follows: {@code _srvServiceName._tcp.hostName}. + * The combined string is an SRV resource record + * name (RFC 1035 section 2.3.1), which is limited to 255 + * characters (RFC 1035 section 2.3.4). + *

+ * + * @return the SRV service name, which defaults to {@code "mongodb"} + * @since 4.5 + */ + public String getSrvServiceName() { + return wrapped.getClusterSettings().getSrvServiceName(); + } + + /** + * Gets the server API to use when sending commands to the server. + * + * @return the server API, which may be null + * @since 4.3 + */ + @Nullable + public ServerApi getServerApi() { + return wrapped.getServerApi(); + } + + /** + * The time limit for the full execution of an operation in Milliseconds. + * + *

If set the following deprecated options will be ignored: + * {@code waitQueueTimeoutMS}, {@code socketTimeoutMS}, {@code wTimeoutMS}, {@code maxTimeMS} and {@code maxCommitTimeMS}

+ * + *
    + *
  • {@code null} means that the timeout mechanism for operations will defer to using: + *
      + *
    • {@code waitQueueTimeoutMS}: The maximum wait time in milliseconds that a thread may wait for a connection to become + * available
    • + *
    • {@code socketTimeoutMS}: How long a send or receive on a socket can take before timing out.
    • + *
    • {@code wTimeoutMS}: How long the server will wait for the write concern to be fulfilled before timing out.
    • + *
    • {@code maxTimeMS}: The cumulative time limit for processing operations on a cursor. + * See: cursor.maxTimeMS.
    • + *
    • {@code maxCommitTimeMS}: The maximum amount of time to allow a single {@code commitTransaction} command to execute. + * See: {@link TransactionOptions#getMaxCommitTime}.
    • + *
    + *
  • + *
  • {@code 0} means infinite timeout.
  • + *
  • {@code > 0} The time limit to use for the full execution of an operation.
  • + *
+ * + * @return the timeout in milliseconds + * @since 5.2 + */ + @Alpha(Reason.CLIENT) + @Nullable + public Long getTimeout() { + return wrapped.getTimeout(MILLISECONDS); + } + + /** + * Gets the server selector. + * + *

The server selector augments the normal server selection rules applied by the driver when determining + * which server to send an operation to. At the point that it's called by the driver, the + * {@link com.mongodb.connection.ClusterDescription} which is passed to it contains a list of + * {@link com.mongodb.connection.ServerDescription} instances which satisfy either the configured {@link ReadPreference} for any + * read operation or ones that can take writes (e.g. a standalone, mongos, or replica set primary).

+ *

The server selector can then filter the {@code ServerDescription} list using whatever criteria that is required by the + * application.

+ *

After this selector executes, two additional selectors are applied by the driver:

+ *
    + *
  • select from within the latency window
  • + *
  • select a random server from those remaining
  • + *
+ *

To skip the latency window selector, an application can:

+ *
    + *
  • configure the local threshold to a sufficiently high value so that it doesn't exclude any servers
  • + *
  • return a list containing a single server from this selector (which will also make the random member selector a no-op)
  • + *
+ * + * @return the server selector, which may be null + * @since 3.6 + */ + @Nullable + public ServerSelector getServerSelector() { + return wrapped.getClusterSettings().getServerSelector(); + } + + /** + * Gets the list of added {@code ClusterListener}. The default is an empty list. + * + * @return the unmodifiable list of cluster listeners + * @since 3.3 + */ + public List getClusterListeners() { + return wrapped.getClusterSettings().getClusterListeners(); + } + + /** + * Gets the list of added {@code CommandListener}. + * + *

Default is an empty list.

+ * + * @return the unmodifiable list of command listeners + * @since 3.1 + */ + public List getCommandListeners() { + return wrapped.getCommandListeners(); + } + + /** + * Gets the list of added {@code ConnectionPoolListener}. The default is an empty list. + * + * @return the unmodifiable list of connection pool listeners + * @since 3.5 + */ + public List getConnectionPoolListeners() { + return wrapped.getConnectionPoolSettings().getConnectionPoolListeners(); + } + + /** + * Gets the list of added {@code ServerListener}. The default is an empty list. + * + * @return the unmodifiable list of server listeners + * @since 3.3 + */ + public List getServerListeners() { + return wrapped.getServerSettings().getServerListeners(); + } + + /** + * Gets the list of added {@code ServerMonitorListener}. The default is an empty list. + * + * @return the unmodifiable list of server monitor listeners + * @since 3.3 + */ + public List getServerMonitorListeners() { + return wrapped.getServerSettings().getServerMonitorListeners(); + } + + /** + * Override the decoder factory. + * + *

Default is for the standard Mongo Java driver configuration.

+ * + * @return the decoder factory + */ + public DBDecoderFactory getDbDecoderFactory() { + return dbDecoderFactory; + } + + /** + * Override the encoder factory. + * + *

Default is for the standard Mongo Java driver configuration.

+ * + * @return the encoder factory + */ + public DBEncoderFactory getDbEncoderFactory() { + return dbEncoderFactory; + } + + /** + *

Gets whether there is a a finalize method created that cleans up instances of DBCursor that the client does not close. If you are + * careful to always call the close method of DBCursor, then this can safely be set to false.

+ * + *

Default is true.

+ * + * @return whether finalizers are enabled on cursors + * @see DBCursor + * @see com.mongodb.DBCursor#close() + */ + public boolean isCursorFinalizerEnabled() { + return cursorFinalizerEnabled; + } + + /** + * Gets the auto-encryption settings + * + * @return the auto-encryption settings, which may be null + * @since 3.11 + */ + @Nullable + public AutoEncryptionSettings getAutoEncryptionSettings() { + return wrapped.getAutoEncryptionSettings(); + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + MongoClientOptions that = (MongoClientOptions) o; + return wrapped.equals(that.wrapped) + && cursorFinalizerEnabled == that.cursorFinalizerEnabled + && dbDecoderFactory.equals(that.dbDecoderFactory) + && dbEncoderFactory.equals(that.dbEncoderFactory); + } + + @Override + public int hashCode() { + return Objects.hash(wrapped, dbDecoderFactory, dbEncoderFactory, cursorFinalizerEnabled); + } + + @Override + public String toString() { + return "MongoClientOptions{" + + "wrapped=" + wrapped + + ", dbDecoderFactory=" + dbDecoderFactory + + ", dbEncoderFactory=" + dbEncoderFactory + + ", cursorFinalizerEnabled=" + cursorFinalizerEnabled + + '}'; + } + + /** + * A builder for MongoClientOptions so that MongoClientOptions can be immutable, and to support easier construction through chaining. + * + * @since 2.10.0 + */ + @NotThreadSafe + public static class Builder { + private final MongoClientSettings.Builder wrapped; + private DBDecoderFactory dbDecoderFactory = DefaultDBDecoder.FACTORY; + private DBEncoderFactory dbEncoderFactory = DefaultDBEncoder.FACTORY; + private boolean cursorFinalizerEnabled = true; + + /** + * Creates a Builder for MongoClientOptions. + */ + public Builder() { + wrapped = MongoClientSettings.builder(); + } + + /** + * Creates a Builder from an existing MongoClientOptions. + * + * @param options create a builder from existing options + */ + public Builder(final MongoClientOptions options) { + wrapped = MongoClientSettings.builder(options.wrapped); + dbDecoderFactory = options.dbDecoderFactory; + dbEncoderFactory = options.dbEncoderFactory; + cursorFinalizerEnabled = options.cursorFinalizerEnabled; + } + + Builder(final MongoClientSettings settings) { + wrapped = MongoClientSettings.builder(settings); + } + + /** + * Sets the logical name of the application using this MongoClient. The application name may be used by the client to identify + * the application to the server, for use in server logs, slow query logs, and profile collection. + * + * @param applicationName the logical name of the application using this MongoClient. It may be null. + * The UTF-8 encoding may not exceed 128 bytes. + * @return {@code this} + * @mongodb.server.release 3.4 + * @see #getApplicationName() + * @since 3.4 + */ + public Builder applicationName(@Nullable final String applicationName) { + wrapped.applicationName(applicationName); + return this; + } + + /** + * Sets the compressors to use for compressing messages to the server. The driver will use the first compressor in the list + * that the server is configured to support. + * + * @param compressorList the list of compressors to request + * @return {@code this} + * @mongodb.server.release 3.4 + * @see #getCompressorList() + * @since 3.6 + */ + public Builder compressorList(final List compressorList) { + wrapped.compressorList(compressorList); + return this; + } + + /** + * Sets the minimum number of connections per host. + * + * @param minConnectionsPerHost minimum number of connections + * @return {@code this} + * @throws IllegalArgumentException if {@code minConnectionsPerHost < 0} + * @see MongoClientOptions#getMinConnectionsPerHost() + * @since 2.12 + */ + public Builder minConnectionsPerHost(final int minConnectionsPerHost) { + wrapped.applyToConnectionPoolSettings(builder -> builder.minSize(minConnectionsPerHost)); + return this; + } + + /** + * Sets the maximum number of connections per host. + * + * @param connectionsPerHost the maximum size of the connection pool per host; if 0, then there is no limit. + * @return {@code this} + * @throws IllegalArgumentException if {@code connectionsPerHost < 0} + * @see MongoClientOptions#getConnectionsPerHost() + */ + public Builder connectionsPerHost(final int connectionsPerHost) { + wrapped.applyToConnectionPoolSettings(builder -> builder.maxSize(connectionsPerHost)); + return this; + } + + /** + *

Sets the server selection timeout in milliseconds, which defines how long the driver will wait for server selection to + * succeed before throwing an exception.

+ * + *

A value of 0 means that it will timeout immediately if no server is available. A negative value means to wait + * indefinitely.

+ * + * @param serverSelectionTimeout the server selection timeout, in milliseconds + * @return {@code this} + * @see com.mongodb.MongoClientOptions#getServerSelectionTimeout() + */ + public Builder serverSelectionTimeout(final int serverSelectionTimeout) { + wrapped.applyToClusterSettings(builder -> builder.serverSelectionTimeout(serverSelectionTimeout, MILLISECONDS)); + return this; + } + + /** + * Sets the maximum time that a thread will block waiting for a connection. + * + * @param maxWaitTime the maximum wait time, in milliseconds + * @return {@code this} + * @see MongoClientOptions#getMaxWaitTime() + */ + public Builder maxWaitTime(final int maxWaitTime) { + wrapped.applyToConnectionPoolSettings(builder -> builder.maxWaitTime(maxWaitTime, MILLISECONDS)); + return this; + } + + /** + * Sets the maximum idle time for a pooled connection. + * + * @param maxConnectionIdleTime the maximum idle time, in milliseconds, which must be >= 0. + * A zero value indicates no limit to the life time. + * @return {@code this} + * @throws IllegalArgumentException if {@code maxConnectionIdleTime < 0} + * @see com.mongodb.MongoClientOptions#getMaxConnectionIdleTime() + * @since 2.12 + */ + public Builder maxConnectionIdleTime(final int maxConnectionIdleTime) { + wrapped.applyToConnectionPoolSettings(builder -> builder.maxConnectionIdleTime(maxConnectionIdleTime, MILLISECONDS)); + return this; + } + + /** + * Sets the maximum life time for a pooled connection. + * + * @param maxConnectionLifeTime the maximum life time, in milliseconds, which must be >= 0. + * A zero value indicates no limit to the life time. + * @return {@code this} + * @throws IllegalArgumentException if {@code maxConnectionLifeTime < 0} + * @see com.mongodb.MongoClientOptions#getMaxConnectionIdleTime() + * @since 2.12 + */ + public Builder maxConnectionLifeTime(final int maxConnectionLifeTime) { + wrapped.applyToConnectionPoolSettings(builder -> builder.maxConnectionLifeTime(maxConnectionLifeTime, MILLISECONDS)); + return this; + } + + /** + * Sets the maximum number of connections a pool may be establishing concurrently. + * + * @param maxConnecting The maximum number of connections a pool may be establishing concurrently. Must be positive. + * @return {@code this}. + * @see MongoClientOptions#getMaxConnecting() + * @since 4.4 + */ + public Builder maxConnecting(final int maxConnecting) { + wrapped.applyToConnectionPoolSettings(builder -> builder.maxConnecting(maxConnecting)); + return this; + } + + /** + * The period of time to wait before running the first maintenance job on each connection pool. + * + * @param maintenanceInitialDelay the time period to wait in milliseconds + * @return {@code this}. + * @see ConnectionPoolSettings.Builder#maintenanceInitialDelay + * @since 4.7 + */ + public Builder maintenanceInitialDelay(final long maintenanceInitialDelay) { + wrapped.applyToConnectionPoolSettings(builder -> builder.maintenanceInitialDelay(maintenanceInitialDelay, MILLISECONDS)); + return this; + } + + /** + * The time period between runs of the maintenance job on each connection pool. + * + * @param maintenanceFrequency the time period between runs of the maintenance job in milliseconds + * @return {@code this} + * @see ConnectionPoolSettings.Builder#maintenanceFrequency + * @since 4.7 + */ + public Builder maintenanceFrequency(final long maintenanceFrequency) { + wrapped.applyToConnectionPoolSettings(builder -> builder.maintenanceFrequency(maintenanceFrequency, MILLISECONDS)); + return this; + } + + /** + * Sets the connection timeout. + * + * @param connectTimeout the connection timeout, in milliseconds, which must be > 0 + * @return {@code this} + * @throws IllegalArgumentException if {@code connectTimeout <= 0} + * @see com.mongodb.MongoClientOptions#getConnectTimeout() + */ + public Builder connectTimeout(final int connectTimeout) { + wrapped.applyToSocketSettings(builder -> builder.connectTimeout(connectTimeout, MILLISECONDS)); + return this; + } + + /** + * Sets the socket timeout. + * + * @param socketTimeout the socket timeout, in milliseconds + * @return {@code this} + * @see com.mongodb.MongoClientOptions#getSocketTimeout() + */ + public Builder socketTimeout(final int socketTimeout) { + wrapped.applyToSocketSettings(builder -> builder.readTimeout(socketTimeout, MILLISECONDS)); + return this; + } + + /** + * Sets whether to use SSL. + * + * @param sslEnabled set to true if using SSL + * @return {@code this} + * @see MongoClientOptions#isSslEnabled() + * @since 3.0 + */ + public Builder sslEnabled(final boolean sslEnabled) { + wrapped.applyToSslSettings(builder -> builder.enabled(sslEnabled)); + return this; + } + + /** + * Define whether invalid host names should be allowed. Defaults to false. Take care before setting this to true, as it makes + * the application susceptible to man-in-the-middle attacks. + * + * @param sslInvalidHostNameAllowed whether invalid host names are allowed in SSL certificates. + * @return this + */ + public Builder sslInvalidHostNameAllowed(final boolean sslInvalidHostNameAllowed) { + wrapped.applyToSslSettings(builder -> builder.invalidHostNameAllowed(sslInvalidHostNameAllowed)); + return this; + } + + /** + * Sets the SSLContext to be used with SSL is enabled. This property is ignored when either sslEnabled is false or socketFactory is + * non-null. + * + * @param sslContext the SSLContext to be used for SSL connections + * @return {@code this} + * @since 3.5 + */ + public Builder sslContext(final SSLContext sslContext) { + wrapped.applyToSslSettings(builder -> builder.context(sslContext)); + return this; + } + + /** + * Sets the read preference. + * + * @param readPreference read preference + * @return {@code this} + * @see MongoClientOptions#getReadPreference() + */ + public Builder readPreference(final ReadPreference readPreference) { + wrapped.readPreference(readPreference); + return this; + } + + /** + * Sets the write concern. + * + * @param writeConcern the write concern + * @return {@code this} + * @see MongoClientOptions#getWriteConcern() + */ + public Builder writeConcern(final WriteConcern writeConcern) { + wrapped.writeConcern(writeConcern); + return this; + } + + /** + * Sets whether writes should be retried if they fail due to a network error. + * + *

Starting with the 3.11.0 release, the default value is true

+ * + * @param retryWrites sets if writes should be retried if they fail due to a network error. + * @return {@code this} + * @mongodb.server.release 3.6 + * @see #getRetryWrites() + * @since 3.6 + */ + public Builder retryWrites(final boolean retryWrites) { + wrapped.retryWrites(retryWrites); + return this; + } + + /** + * Sets whether reads should be retried if they fail due to a network error. + * + * @param retryReads sets if reads should be retried if they fail due to a network error. + * @return {@code this} + * @mongodb.server.release 3.6 + * @see #getRetryReads() + * @since 3.11 + */ + public Builder retryReads(final boolean retryReads) { + wrapped.retryReads(retryReads); + return this; + } + + /** + * Sets the read concern. + * + * @param readConcern the read concern. + * @return this + * @mongodb.server.release 3.2 + * @mongodb.driver.manual reference/readConcern/ Read Concern + * @see MongoClientOptions#getReadConcern() + * @since 3.2 + */ + public Builder readConcern(final ReadConcern readConcern) { + wrapped.readConcern(readConcern); + return this; + } + + /** + * Sets the codec registry + * + *

Note that instances of {@code DB} and {@code DBCollection} do not use the registry, so it's not necessary to include a + * codec for DBObject in the registry.

+ * + * @param codecRegistry the codec registry + * @return {@code this} + * @see MongoClientOptions#getCodecRegistry() + * @since 3.0 + */ + public Builder codecRegistry(final CodecRegistry codecRegistry) { + wrapped.codecRegistry(codecRegistry); + return this; + } + + /** + * Sets the UUID representation to use when encoding instances of {@link java.util.UUID} and when decoding BSON binary values with + * subtype of 3. + * + *

See {@link #getUuidRepresentation()} for recommendations on settings this value

+ * + * @param uuidRepresentation the UUID representation, which may not be null + * @return this + * @since 3.12 + */ + public Builder uuidRepresentation(final UuidRepresentation uuidRepresentation) { + wrapped.uuidRepresentation(uuidRepresentation); + return this; + } + + /** + * Sets the server API to use when sending commands to the server. + *

+ * This is required for some MongoDB deployments. + *

+ * + * @param serverApi the server API, which may not be null + * @return this + * @since 4.3 + */ + public Builder serverApi(final ServerApi serverApi) { + wrapped.serverApi(serverApi); + return this; + } + + /** + * Sets a server selector that augments the normal server selection rules applied by the driver when determining + * which server to send an operation to. See {@link #getServerSelector()} for further details. + * + * @param serverSelector the server selector + * @return this + * @see #getServerSelector() + * @since 3.6 + */ + public Builder serverSelector(final ServerSelector serverSelector) { + wrapped.applyToClusterSettings(builder -> builder.serverSelector(serverSelector)); + return this; + } + + /** + * Adds the given command listener. + * + * @param commandListener the non-null command listener + * @return this + * @since 3.1 + */ + public Builder addCommandListener(final CommandListener commandListener) { + wrapped.addCommandListener(commandListener); + return this; + } + + /** + * Adds the given connection pool listener. + * + * @param connectionPoolListener the non-null connection pool listener + * @return this + * @since 3.5 + */ + public Builder addConnectionPoolListener(final ConnectionPoolListener connectionPoolListener) { + wrapped.applyToConnectionPoolSettings(builder -> builder.addConnectionPoolListener(connectionPoolListener)); + return this; + } + + /** + * Adds the given cluster listener. + * + * @param clusterListener the non-null cluster listener + * @return this + * @since 3.3 + */ + public Builder addClusterListener(final ClusterListener clusterListener) { + wrapped.applyToClusterSettings(builder -> builder.addClusterListener(clusterListener)); + return this; + } + + /** + * Adds the given server listener. + * + * @param serverListener the non-null server listener + * @return this + * @since 3.3 + */ + public Builder addServerListener(final ServerListener serverListener) { + wrapped.applyToServerSettings(builder -> builder.addServerListener(serverListener)); + return this; + } + + /** + * Adds the given server monitor listener. + * + * @param serverMonitorListener the non-null server monitor listener + * @return this + * @since 3.3 + */ + public Builder addServerMonitorListener(final ServerMonitorListener serverMonitorListener) { + wrapped.applyToServerSettings(builder -> builder.addServerMonitorListener(serverMonitorListener)); + return this; + } + + /** + * Sets whether cursor finalizers are enabled. + * + * @param cursorFinalizerEnabled whether cursor finalizers are enabled. + * @return {@code this} + * @see MongoClientOptions#isCursorFinalizerEnabled() + */ + public Builder cursorFinalizerEnabled(final boolean cursorFinalizerEnabled) { + this.cursorFinalizerEnabled = cursorFinalizerEnabled; + return this; + } + + /** + * Sets the decoder factory. + * + * @param dbDecoderFactory the decoder factory + * @return {@code this} + * @see MongoClientOptions#getDbDecoderFactory() + */ + public Builder dbDecoderFactory(final DBDecoderFactory dbDecoderFactory) { + this.dbDecoderFactory = notNull("dbDecoderFactory", dbDecoderFactory); + return this; + } + + /** + * Sets the encoder factory. + * + * @param dbEncoderFactory the encoder factory + * @return {@code this} + * @see MongoClientOptions#getDbEncoderFactory() + */ + public Builder dbEncoderFactory(final DBEncoderFactory dbEncoderFactory) { + this.dbEncoderFactory = notNull("dbEncoderFactory", dbEncoderFactory); + return this; + } + + /** + * Sets the heartbeat frequency. This is the frequency that the driver will attempt to determine the current state of each server in + * the cluster. The default value is 10,000 milliseconds + * + * @param heartbeatFrequency the heartbeat frequency for the cluster, in milliseconds, which must be > 0 + * @return {@code this} + * @throws IllegalArgumentException if heartbeatFrequency is not > 0 + * @see MongoClientOptions#getHeartbeatFrequency() + * @since 2.12 + */ + public Builder heartbeatFrequency(final int heartbeatFrequency) { + wrapped.applyToServerSettings(builder -> builder.heartbeatFrequency(heartbeatFrequency, MILLISECONDS)); + return this; + } + + /** + * Sets the minimum heartbeat frequency. In the event that the driver has to frequently re-check a server's availability, + * it will wait at least this long since the previous check to avoid wasted effort. The default value is 500 milliseconds. + * + * @param minHeartbeatFrequency the minimum heartbeat frequency, in milliseconds, which must be > 0 + * @return {@code this} + * @throws IllegalArgumentException if {@code minHeartbeatFrequency <= 0} + * @see MongoClientOptions#getMinHeartbeatFrequency() + * @since 2.13 + */ + public Builder minHeartbeatFrequency(final int minHeartbeatFrequency) { + wrapped.applyToServerSettings(builder -> builder.minHeartbeatFrequency(minHeartbeatFrequency, MILLISECONDS)); + return this; + } + + /** + * Sets the connect timeout for connections used for the cluster heartbeat. + * + * @param connectTimeout the connection timeout, in milliseconds + * @return {@code this} + * @see MongoClientOptions#getHeartbeatConnectTimeout() + * @since 2.12 + */ + public Builder heartbeatConnectTimeout(final int connectTimeout) { + wrapped.heartbeatConnectTimeoutMS(connectTimeout); + return this; + } + + /** + * Sets the socket timeout for connections used for the cluster heartbeat. + * + * @param socketTimeout the socket timeout, in milliseconds + * @return {@code this} + * @see MongoClientOptions#getHeartbeatSocketTimeout() + * @since 2.12 + */ + public Builder heartbeatSocketTimeout(final int socketTimeout) { + wrapped.heartbeatSocketTimeoutMS(socketTimeout); + return this; + } + + /** + * Sets the local threshold. + * + * @param localThreshold the acceptable latency difference, in milliseconds, which must be >= 0 + * @return {@code this} + * @throws IllegalArgumentException if {@code localThreshold < 0} + * @see com.mongodb.MongoClientOptions#getLocalThreshold() + * @since 2.13.0 + */ + public Builder localThreshold(final int localThreshold) { + wrapped.applyToClusterSettings(builder -> builder.localThreshold(localThreshold, MILLISECONDS)); + return this; + } + + /** + * Sets the required replica set name for the cluster. + * + * @param requiredReplicaSetName the required replica set name for the replica set. + * @return this + * @see MongoClientOptions#getRequiredReplicaSetName() + * @since 2.12 + */ + public Builder requiredReplicaSetName(final String requiredReplicaSetName) { + wrapped.applyToClusterSettings(builder -> builder.requiredReplicaSetName(requiredReplicaSetName)); + return this; + } + + /** + * Set options for auto-encryption. + * + * @param autoEncryptionSettings auto encryption settings + * @return this + * @since 3.11 + */ + public Builder autoEncryptionSettings(final AutoEncryptionSettings autoEncryptionSettings) { + wrapped.autoEncryptionSettings(autoEncryptionSettings); + return this; + } + + + /** + * Sets the maximum number of hosts to connect to when using SRV protocol. + * + * @param srvMaxHosts the maximum number of hosts to connect to when using SRV protocol + * @return this + * @since 4.5 + */ + public Builder srvMaxHosts(final Integer srvMaxHosts) { + wrapped.applyToClusterSettings(builder -> builder.srvMaxHosts(srvMaxHosts)); + return this; + } + + /** + * Sets the SRV service name. + * + *

+ * The SRV resource record (RFC 2782) + * service name, which is limited to 15 characters + * (RFC 6335 section 5.1). + * If specified, it is combined with the single host name as follows: {@code _srvServiceName._tcp.hostName}. + * The combined string is an SRV resource record + * name (RFC 1035 section 2.3.1), which is limited to 255 + * characters (RFC 1035 section 2.3.4). + *

+ * + * @param srvServiceName the SRV service name + * @return this + * @since 4.5 + */ + public Builder srvServiceName(final String srvServiceName) { + wrapped.applyToClusterSettings(builder -> builder.srvServiceName(srvServiceName)); + return this; + } + + /** + * Sets the time limit, in milliseconds for the full execution of an operation. + * + *
    + *
  • {@code null} means that the timeout mechanism for operations will defer to using: + *
      + *
    • {@code waitQueueTimeoutMS}: The maximum wait time in milliseconds that a thread may wait for a connection to become + * available
    • + *
    • {@code socketTimeoutMS}: How long a send or receive on a socket can take before timing out.
    • + *
    • {@code wTimeoutMS}: How long the server will wait for the write concern to be fulfilled before timing out.
    • + *
    • {@code maxTimeMS}: The cumulative time limit for processing operations on a cursor. + * See: cursor.maxTimeMS.
    • + *
    • {@code maxCommitTimeMS}: The maximum amount of time to allow a single {@code commitTransaction} command to execute. + * See: {@link TransactionOptions#getMaxCommitTime}.
    • + *
    + *
  • + *
  • {@code 0} means infinite timeout.
  • + *
  • {@code > 0} The time limit to use for the full execution of an operation.
  • + *
+ * + * @param timeoutMS the timeout in milliseconds + * @return this + * @since 5.2 + * @see #getTimeout + */ + @Alpha(Reason.CLIENT) + public Builder timeout(final long timeoutMS) { + wrapped.timeout(timeoutMS, MILLISECONDS); + return this; + } + + /** + * Build an instance of MongoClientOptions. + * + * @return the options from this builder + */ + public MongoClientOptions build() { + return new MongoClientOptions(this); + } + + } +} diff --git a/driver-legacy/src/main/com/mongodb/MongoClientURI.java b/driver-legacy/src/main/com/mongodb/MongoClientURI.java new file mode 100644 index 00000000000..e471bbf1686 --- /dev/null +++ b/driver-legacy/src/main/com/mongodb/MongoClientURI.java @@ -0,0 +1,526 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb; + +import com.mongodb.lang.Nullable; +import org.bson.UuidRepresentation; + +import java.util.List; + +import static com.mongodb.assertions.Assertions.notNull; + +/** + * Represents a URI + * which can be used to create a MongoClient instance. The URI describes the hosts to + * be used and options. + *

The format of the URI is: + *

+ *   mongodb://[username:password@]host1[:port1][,host2[:port2],...[,hostN[:portN]]][/[database.collection][?options]]
+ * 
+ *
    + *
  • {@code mongodb://} is a required prefix to identify that this is a string in the standard connection format.
  • + *
  • {@code username:password@} are optional. If given, the driver will attempt to login to a database after + * connecting to a database server. For some authentication mechanisms, only the username is specified and the password is not, + * in which case the ":" after the username is left off as well
  • + *
  • {@code host1} is the only required part of the connection string. It identifies a server address to connect to. + * Support for Unix domain sockets was added in 3.7. Note: The path must be urlencoded eg: {@code mongodb://%2Ftmp%2Fmongodb-27017.sock} + * and the {@code jnr.unixsocket} library installed. + *
  • {@code :portX} is optional and defaults to :27017 if not provided.
  • + *
  • {@code /database} is the name of the database to login to and thus is only relevant if the + * {@code username:password@} syntax is used. If not specified the "admin" database will be used by default.
  • + *
  • {@code ?options} are connection options. Options are name=value pairs and the pairs + * are separated by "&". For backwards compatibility, ";" is accepted as a separator in addition to "&", + * but should be considered as deprecated.
  • + *
+ *

An alternative format, using the mongodb+srv protocol, is: + *

+ *   mongodb+srv://[username:password@]host[/[database][?options]]
+ * 
+ *
    + *
  • {@code mongodb+srv://} is a required prefix for this format.
  • + *
  • {@code username:password@} are optional. If given, the driver will attempt to login to a database after + * connecting to a database server. For some authentication mechanisms, only the username is specified and the password is not, + * in which case the ":" after the username is left off as well
  • + *
  • {@code host} is the only required part of the URI. It identifies a single host name for which SRV records are looked up + * from a Domain Name Server after prefixing the host name with, by default, {@code "_mongodb._tcp"} ({@code "mongodb"} is the default SRV + * service name, but can be replaced via the {@code srvServiceName} query parameter), The host/port for each SRV record becomes the + * seed list used to connect, as if each one were provided as host/port pair in a URI using the normal mongodb protocol.
  • + *
  • {@code /database} is the name of the database to login to and thus is only relevant if the + * {@code username:password@} syntax is used. If not specified the "admin" database will be used by default.
  • + *
  • {@code ?options} are connection options. Options are name=value pairs and the pairs + * are separated by "&". For backwards compatibility, ";" is accepted as a separator in addition to "&", + * but should be considered as deprecated. Additionally with the mongodb+srv protocol, TXT records are looked up from a Domain Name + * Server for the given host, and the text value of each one is prepended to any options on the URI itself. Because the last specified + * value for any option wins, that means that options provided on the URI will override any that are provided via TXT records.
  • + *
+ *

The following options are supported (case insensitive):

+ * + *

Server Selection Configuration:

+ *
    + *
  • {@code serverSelectionTimeoutMS=ms}: How long the driver will wait for server selection to succeed before throwing an exception.
  • + *
  • {@code localThresholdMS=ms}: When choosing among multiple MongoDB servers to send a request, the driver will only + * send that request to a server whose ping time is less than or equal to the server with the fastest ping time plus the local + * threshold.
  • + *
+ *

Server Monitoring Configuration:

+ *
    + *
  • {@code heartbeatFrequencyMS=ms}: The frequency that the driver will attempt to determine the current state of each server in the + * cluster.
  • + *
+ *

Replica set configuration:

+ *
    + *
  • {@code replicaSet=name}: Implies that the hosts given are a seed list, and the driver will attempt to find + * all members of the set.
  • + *
+ * + *

Connection Configuration:

+ *
    + *
  • {@code ssl=true|false}: Whether to connect using TLS.
  • + *
  • {@code tls=true|false}: Whether to connect using TLS. Supersedes the ssl option
  • + *
  • {@code tlsInsecure=true|false}: If connecting with TLS, this option enables insecure TLS connections. Currently this has the + * same effect of setting tlsAllowInvalidHostnames to true. Other mechanism for relaxing TLS security constraints must be handled in + * the application by customizing the {@link javax.net.ssl.SSLContext}
  • + *
  • {@code sslInvalidHostNameAllowed=true|false}: Whether to allow invalid host names for TLS connections.
  • + *
  • {@code tlsAllowInvalidHostnames=true|false}: Whether to allow invalid host names for TLS connections. Supersedes the + * sslInvalidHostNameAllowed option
  • + *
  • {@code connectTimeoutMS=ms}: How long a connection can take to be opened before timing out.
  • + *
  • {@code socketTimeoutMS=ms}: How long a receive on a socket can take before timing out. + * This option is the same as {@link MongoClientOptions#getSocketTimeout()}. + * Deprecated, use {@code timeoutMS} instead.
  • + *
  • {@code maxIdleTimeMS=ms}: Maximum idle time of a pooled connection. A connection that exceeds this limit will be closed
  • + *
  • {@code maxLifeTimeMS=ms}: Maximum life time of a pooled connection. A connection that exceeds this limit will be closed
  • + *
+ * + *

SRV configuration:

+ *
    + *
  • {@code srvServiceName=string}: The SRV service name. See {@link MongoClientOptions#getSrvServiceName()} for details.
  • + *
  • {@code srvMaxHosts=number}: The maximum number of hosts from the SRV record to connect to.
  • + *
+ * + *

Connection pool configuration:

+ *
    + *
  • {@code maxPoolSize=n}: The maximum number of connections in the connection pool.
  • + *
  • {@code maxConnecting=n}: The maximum number of connections a pool may be establishing concurrently.
  • + *
  • {@code waitQueueTimeoutMS=ms}: The maximum wait time in milliseconds that a thread may wait for a connection to + * become available. Deprecated, use {@code timeoutMS} instead.
  • + *
+ * + *

Write concern configuration:

+ *
    + *
  • {@code safe=true|false} + *
      + *
    • {@code true}: the driver ensures that all writes are acknowledged by the MongoDB server, or else throws an exception. + * (see also {@code w} and {@code wtimeoutMS}).
    • + *
    • {@code false}: the driver does not ensure that all writes are acknowledged by the MongoDB server.
    • + *
    + *
  • + *
  • {@code journal=true|false} + *
      + *
    • {@code true}: the driver waits for the server to group commit to the journal file on disk.
    • + *
    • {@code false}: the driver does not wait for the server to group commit to the journal file on disk.
    • + *
    + *
  • + *
  • {@code w=wValue} + *
      + *
    • The driver adds { w : wValue } to all write commands. Implies {@code safe=true}.
    • + *
    • wValue is typically a number, but can be any string in order to allow for specifications like + * {@code "majority"}
    • + *
    + *
  • + *
  • {@code wtimeoutMS=ms}. Deprecated, use {@code timeoutMS} instead. + *
      + *
    • The driver adds { wtimeout : ms } to all write commands. Implies {@code safe=true}.
    • + *
    • Used in combination with {@code w}
    • + *
    + *
  • + *
  • {@code retryWrites=true|false}. If true the driver will retry supported write operations if they fail due to a network error. + * Defaults to false.
  • + *
  • {@code retryReads=true|false}. If true the driver will retry supported read operations if they fail due to a network error. + * Defaults to false.
  • + *
+ * + * + *

Read preference configuration:

+ *
    + *
  • {@code readPreference=enum}: The read preference for this connection. + *
      + *
    • Enumerated values: + *
        + *
      • {@code primary}
      • + *
      • {@code primaryPreferred}
      • + *
      • {@code secondary}
      • + *
      • {@code secondaryPreferred}
      • + *
      • {@code nearest}
      • + *
      + *
    • + *
    + *
  • + *
  • {@code readPreferenceTags=string}. A representation of a tag set as a comma-separated list of colon-separated + * key-value pairs, e.g. {@code "dc:ny,rack:1}". Spaces are stripped from beginning and end of all keys and values. + * To specify a list of tag sets, using multiple readPreferenceTags, + * e.g. {@code readPreferenceTags=dc:ny,rack:1;readPreferenceTags=dc:ny;readPreferenceTags=} + *
      + *
    • Note the empty value for the last one, which means match any secondary as a last resort.
    • + *
    • Order matters when using multiple readPreferenceTags.
    • + *
    + *
  • + *
  • {@code maxStalenessSeconds=seconds}. The maximum staleness in seconds. For use with any non-primary read preference, the driver + * estimates the staleness of each secondary, based on lastWriteDate values provided in server hello responses, and selects only those + * secondaries whose staleness is less than or equal to maxStalenessSeconds. Not providing the parameter or explicitly setting it to -1 + * indicates that there should be no max staleness check. The maximum staleness feature is designed to prevent badly-lagging servers from + * being selected. The staleness estimate is imprecise and shouldn't be used to try to select "up-to-date" secondaries. The minimum value + * is either 90 seconds, or the heartbeat frequency plus 10 seconds, whichever is greatest. + *
  • + *
+ *

Authentication configuration:

+ *
    + *
  • {@code authMechanism=MONGO-CR|GSSAPI|PLAIN|MONGODB-X509}: The authentication mechanism to use if a credential was supplied. + * The default is unspecified, in which case the client will pick the most secure mechanism available based on the sever version. For the + * GSSAPI and MONGODB-X509 mechanisms, no password is accepted, only the username. + *
  • + *
  • {@code authSource=string}: The source of the authentication credentials. This is typically the database that + * the credentials have been created. The value defaults to the database specified in the path portion of the URI. + * If the database is specified in neither place, the default value is "admin". This option is only respected when using the MONGO-CR + * mechanism (the default). + *
  • + *
  • {@code gssapiServiceName=string}: This option only applies to the GSSAPI mechanism and is used to alter the service name.. + *
  • + *
+ *

Server Handshake configuration:

+ *
    + *
  • {@code appName=string}: Sets the logical name of the application. The application name may be used by the client to identify + * the application to the server, for use in server logs, slow query logs, and profile collection.
  • + *
+ *

Compressor configuration:

+ *
    + *
  • {@code compressors=string}: A comma-separated list of compressors to request from the server. The supported compressors + * currently are 'zlib', 'snappy' and 'zstd'.
  • + *
  • {@code zlibCompressionLevel=integer}: Integer value from -1 to 9 representing the zlib compression level. Lower values will make + * compression faster, while higher values will make compression better.
  • + *
+ *

General configuration:

+ *
    + *
  • {@code retryWrites=true|false}. If true the driver will retry supported write operations if they fail due to a network error. + * Defaults to true.
  • + *
  • {@code retryReads=true|false}. If true the driver will retry supported read operations if they fail due to a network error. + * Defaults to true.
  • + *
  • {@code uuidRepresentation=unspecified|standard|javaLegacy|csharpLegacy|pythonLegacy}. See + * {@link MongoClientOptions#getUuidRepresentation()} for documentation of semantics of this parameter. Defaults to "javaLegacy", but + * will change to "unspecified" in the next major release.
  • + *
  • {@code directConnection=true|false}. If true the driver will set the connection to be a direct connection to the host.
  • + *
+ * + * @mongodb.driver.manual reference/connection-string Connection String URI Format + * @see MongoClientOptions for the default values for all options + * @since 2.10.0 + */ +public class MongoClientURI { + private final ConnectionString proxied; + private final MongoClientOptions.Builder builder; + + /** + * Creates a MongoClientURI from the given string. + * + * @param uri the URI + */ + public MongoClientURI(final String uri) { + this(uri, new MongoClientOptions.Builder()); + } + + /** + * Creates a MongoClientURI from the given URI string, and MongoClientOptions.Builder. The builder can be configured with default + * options, which may be overridden by options specified in the URI string. + * + *

+ * The {@code MongoClientURI} takes ownership of the {@code MongoClientOptions.Builder} instance that is passed to this constructor, + * and may modify it. + *

+ * + * @param uri the URI + * @param builder a non-null Builder, which may be modified within this constructor, + * @since 2.11.0 + */ + public MongoClientURI(final String uri, final MongoClientOptions.Builder builder) { + this.builder = notNull("builder", builder); + proxied = new ConnectionString(uri); + } + + ConnectionString getProxied() { + return proxied; + } + + // --------------------------------- + + /** + * Gets the username + * + * @return the username + */ + @Nullable + public String getUsername() { + return proxied.getUsername(); + } + + /** + * Gets the password + * + * @return the password + */ + @Nullable + public char[] getPassword() { + return proxied.getPassword(); + } + + /** + * Gets the list of hosts + * + * @return the host list + */ + public List getHosts() { + return proxied.getHosts(); + } + + /** + * Gets the database name + * + * @return the database name + */ + @Nullable + public String getDatabase() { + return proxied.getDatabase(); + } + + + /** + * Gets the collection name + * + * @return the collection name + */ + @Nullable + public String getCollection() { + return proxied.getCollection(); + } + + /** + * Get the unparsed URI. + * + * @return the URI + */ + public String getURI() { + return proxied.getConnectionString(); + } + + /** + * Gets the credentials. + * + * @return the credentials + */ + @Nullable + public MongoCredential getCredentials() { + return proxied.getCredential(); + } + + /** + * Gets the maximum number of hosts to connect to when using SRV protocol. + * + * @return the maximum number of hosts to connect to when using SRV protocol. Defaults to null. + * @since 4.5 + */ + @Nullable + public Integer getSrvMaxHosts() { + return proxied.getSrvMaxHosts(); + } + + /** + * Gets the SRV service name. + * + * @return the SRV service name. Defaults to null in the connection string, but defaults to {@code "mongodb"} in + * {@link MongoClientOptions}. + * @since 4.5 + * @see MongoClientOptions#getSrvServiceName() + */ + @Nullable + public String getSrvServiceName() { + return proxied.getSrvServiceName(); + } + + /** + * Gets the options + * + * @return the MongoClientOptions based on this URI. + */ + public MongoClientOptions getOptions() { + ReadPreference readPreference = proxied.getReadPreference(); + if (readPreference != null) { + builder.readPreference(readPreference); + } + ReadConcern readConcern = proxied.getReadConcern(); + if (readConcern != null) { + builder.readConcern(readConcern); + } + WriteConcern writeConcern = proxied.getWriteConcern(); + if (writeConcern != null) { + builder.writeConcern(writeConcern); + } + + Boolean retryWritesValue = proxied.getRetryWritesValue(); + if (retryWritesValue != null) { + builder.retryWrites(retryWritesValue); + } + + Boolean retryReads = proxied.getRetryReads(); + if (retryReads != null) { + builder.retryReads(retryReads); + } + + Integer maxConnectionPoolSize = proxied.getMaxConnectionPoolSize(); + if (maxConnectionPoolSize != null) { + builder.connectionsPerHost(maxConnectionPoolSize); + } + Integer integer = proxied.getMinConnectionPoolSize(); + if (integer != null) { + builder.minConnectionsPerHost(integer); + } + Integer maxWaitTime = proxied.getMaxWaitTime(); + if (maxWaitTime != null) { + builder.maxWaitTime(maxWaitTime); + } + Integer maxConnectionIdleTime = proxied.getMaxConnectionIdleTime(); + if (maxConnectionIdleTime != null) { + builder.maxConnectionIdleTime(maxConnectionIdleTime); + } + Integer maxConnectionLifeTime = proxied.getMaxConnectionLifeTime(); + if (maxConnectionLifeTime != null) { + builder.maxConnectionLifeTime(maxConnectionLifeTime); + } + Integer maxConnecting = proxied.getMaxConnecting(); + if (maxConnecting != null) { + builder.maxConnecting(maxConnecting); + } + Integer socketTimeout = proxied.getSocketTimeout(); + if (socketTimeout != null) { + builder.socketTimeout(socketTimeout); + } + Integer connectTimeout = proxied.getConnectTimeout(); + if (connectTimeout != null) { + builder.connectTimeout(connectTimeout); + } + String requiredReplicaSetName = proxied.getRequiredReplicaSetName(); + if (requiredReplicaSetName != null) { + builder.requiredReplicaSetName(requiredReplicaSetName); + } + Boolean sslEnabled = proxied.getSslEnabled(); + if (sslEnabled != null) { + builder.sslEnabled(sslEnabled); + } + Boolean sslInvalidHostnameAllowed = proxied.getSslInvalidHostnameAllowed(); + if (sslInvalidHostnameAllowed != null) { + builder.sslInvalidHostNameAllowed(sslInvalidHostnameAllowed); + } + Integer serverSelectionTimeout = proxied.getServerSelectionTimeout(); + if (serverSelectionTimeout != null) { + builder.serverSelectionTimeout(serverSelectionTimeout); + } + Integer localThreshold = proxied.getLocalThreshold(); + if (localThreshold != null) { + builder.localThreshold(localThreshold); + } + Integer heartbeatFrequency = proxied.getHeartbeatFrequency(); + if (heartbeatFrequency != null) { + builder.heartbeatFrequency(heartbeatFrequency); + } + String applicationName = proxied.getApplicationName(); + if (applicationName != null) { + builder.applicationName(applicationName); + } + if (!proxied.getCompressorList().isEmpty()) { + builder.compressorList(proxied.getCompressorList()); + } + UuidRepresentation uuidRepresentation = proxied.getUuidRepresentation(); + if (uuidRepresentation != null) { + builder.uuidRepresentation(uuidRepresentation); + } + Integer srvMaxHosts = proxied.getSrvMaxHosts(); + if (srvMaxHosts != null) { + builder.srvMaxHosts(srvMaxHosts); + } + String srvServiceName = proxied.getSrvServiceName(); + if (srvServiceName != null) { + builder.srvServiceName(srvServiceName); + } + Long timeout = proxied.getTimeout(); + if (timeout != null) { + builder.timeout(timeout); + } + return builder.build(); + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + MongoClientURI that = (MongoClientURI) o; + + if (!getHosts().equals(that.getHosts())) { + return false; + } + String database = getDatabase(); + if (database != null ? !database.equals(that.getDatabase()) : that.getDatabase() != null) { + return false; + } + String collection = getCollection(); + if (collection != null ? !collection.equals(that.getCollection()) : that.getCollection() != null) { + return false; + } + MongoCredential credentials = getCredentials(); + if (credentials != null ? !credentials.equals(that.getCredentials()) : that.getCredentials() != null) { + return false; + } + if (!getOptions().equals(that.getOptions())) { + return false; + } + + return true; + } + + @Override + public int hashCode() { + int result = getOptions().hashCode(); + result = 31 * result + getHosts().hashCode(); + + MongoCredential credentials = getCredentials(); + result = 31 * result + (credentials != null ? credentials.hashCode() : 0); + + String database = getDatabase(); + result = 31 * result + (database != null ? database.hashCode() : 0); + + String collection = getCollection(); + result = 31 * result + (collection != null ? collection.hashCode() : 0); + + return result; + } + + @Override + public String toString() { + return proxied.toString(); + } +} diff --git a/driver-legacy/src/main/com/mongodb/MongoCursorAdapter.java b/driver-legacy/src/main/com/mongodb/MongoCursorAdapter.java new file mode 100644 index 00000000000..bec238f64cb --- /dev/null +++ b/driver-legacy/src/main/com/mongodb/MongoCursorAdapter.java @@ -0,0 +1,67 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb; + + +import com.mongodb.client.MongoCursor; + +class MongoCursorAdapter implements Cursor { + private final MongoCursor cursor; + + MongoCursorAdapter(final MongoCursor cursor) { + this.cursor = cursor; + } + + @Override + public int available() { + return cursor.available(); + } + + @Override + public long getCursorId() { + ServerCursor serverCursor = cursor.getServerCursor(); + if (serverCursor == null) { + return 0; + } + return serverCursor.getId(); + } + + @Override + public ServerAddress getServerAddress() { + return cursor.getServerAddress(); + } + + @Override + public void close() { + cursor.close(); + } + + @Override + public boolean hasNext() { + return cursor.hasNext(); + } + + @Override + public DBObject next() { + return cursor.next(); + } + + @Override + public void remove() { + cursor.remove(); + } +} diff --git a/driver-legacy/src/main/com/mongodb/QueryBuilder.java b/driver-legacy/src/main/com/mongodb/QueryBuilder.java new file mode 100644 index 00000000000..8d4f7376014 --- /dev/null +++ b/driver-legacy/src/main/com/mongodb/QueryBuilder.java @@ -0,0 +1,508 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb; + +import com.mongodb.lang.Nullable; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.regex.Pattern; + +import static com.mongodb.assertions.Assertions.notNull; +import static java.util.Arrays.asList; + +/** + * Utility for creating DBObject queries + * + * @mongodb.driver.manual tutorial/query-documents/ Querying + */ +@SuppressWarnings("rawtypes") +public class QueryBuilder { + + /** + * Creates a builder with an empty query + */ + public QueryBuilder() { + _query = new BasicDBObject(); + } + + /** + * Returns a new QueryBuilder. + * + * @return a builder + */ + public static QueryBuilder start() { + return new QueryBuilder(); + } + + /** + * Creates a new query with a document key + * + * @param key MongoDB document key + * @return {@code this} + */ + public static QueryBuilder start(final String key) { + return (new QueryBuilder()).put(key); + } + + /** + * Adds a new key to the query if not present yet. Sets this key as the current key. + * + * @param key MongoDB document key + * @return {@code this} + */ + public QueryBuilder put(final String key) { + _currentKey = key; + if (_query.get(key) == null) { + _query.put(_currentKey, new NullObject()); + } + return this; + } + + /** + * Equivalent to {@code QueryBuilder.put(key)}. Intended for compound query chains to be more readable, e.g. {@code + * QueryBuilder.start("a").greaterThan(1).and("b").lessThan(3) } + * + * @param key MongoDB document key + * @return {@code this} + */ + public QueryBuilder and(final String key) { + return put(key); + } + + /** + * Equivalent to the $gt operator + * + * @param object Value to query + * @return {@code this} + */ + public QueryBuilder greaterThan(final Object object) { + addOperand(QueryOperators.GT, object); + return this; + } + + /** + * Equivalent to the $gte operator + * + * @param object Value to query + * @return {@code this} + */ + public QueryBuilder greaterThanEquals(final Object object) { + addOperand(QueryOperators.GTE, object); + return this; + } + + /** + * Equivalent to the $lt operand + * + * @param object Value to query + * @return {@code this} + */ + public QueryBuilder lessThan(final Object object) { + addOperand(QueryOperators.LT, object); + return this; + } + + /** + * Equivalent to the $lte operand + * + * @param object Value to query + * @return {@code this} + */ + public QueryBuilder lessThanEquals(final Object object) { + addOperand(QueryOperators.LTE, object); + return this; + } + + /** + * Equivalent of the find({key:value}) + * + * @param object Value to query + * @return {@code this} + */ + public QueryBuilder is(final Object object) { + addOperand(null, object); + return this; + } + + /** + * Equivalent of the $ne operand + * + * @param object Value to query + * @return {@code this} + */ + public QueryBuilder notEquals(final Object object) { + addOperand(QueryOperators.NE, object); + return this; + } + + /** + * Equivalent of the $in operand + * + * @param object Value to query + * @return {@code this} + */ + public QueryBuilder in(final Object object) { + addOperand(QueryOperators.IN, object); + return this; + } + + /** + * Equivalent of the $nin operand + * + * @param object Value to query + * @return {@code this} + */ + public QueryBuilder notIn(final Object object) { + addOperand(QueryOperators.NIN, object); + return this; + } + + /** + * Equivalent of the $mod operand + * + * @param object Value to query + * @return {@code this} + */ + public QueryBuilder mod(final Object object) { + addOperand(QueryOperators.MOD, object); + return this; + } + + /** + * Equivalent of the $all operand + * + * @param object Value to query + * @return {@code this} + */ + public QueryBuilder all(final Object object) { + addOperand(QueryOperators.ALL, object); + return this; + } + + /** + * Equivalent of the $size operand + * + * @param object Value to query + * @return {@code this} + */ + public QueryBuilder size(final Object object) { + addOperand(QueryOperators.SIZE, object); + return this; + } + + /** + * Equivalent of the $exists operand + * + * @param object Value to query + * @return {@code this} + */ + public QueryBuilder exists(final Object object) { + addOperand(QueryOperators.EXISTS, object); + return this; + } + + /** + * Passes a regular expression for a query + * + * @param regex Regex pattern object + * @return {@code this} + */ + public QueryBuilder regex(final Pattern regex) { + addOperand(null, regex); + return this; + } + + /** + * Equivalent to the $elemMatch operand + * + * @param match the object to match + * @return {@code this} + */ + public QueryBuilder elemMatch(final DBObject match) { + addOperand(QueryOperators.ELEM_MATCH, match); + return this; + } + + + /** + * Equivalent of the $within operand, used for geospatial operation + * + * @param x x coordinate + * @param y y coordinate + * @param radius radius + * @return {@code this} + */ + public QueryBuilder withinCenter(final double x, final double y, final double radius) { + addOperand(QueryOperators.WITHIN, + new BasicDBObject(QueryOperators.CENTER, asList(asList(x, y), radius))); + return this; + } + + /** + * Equivalent of the $near operand + * + * @param x x coordinate + * @param y y coordinate + * @return {@code this} + */ + public QueryBuilder near(final double x, final double y) { + addOperand(QueryOperators.NEAR, + asList(x, y)); + return this; + } + + /** + * Equivalent of the $near operand + * + * @param x x coordinate + * @param y y coordinate + * @param maxDistance max distance + * @return {@code this} + */ + public QueryBuilder near(final double x, final double y, final double maxDistance) { + addOperand(QueryOperators.NEAR, + asList(x, y)); + addOperand(QueryOperators.MAX_DISTANCE, + maxDistance); + return this; + } + + /** + * Equivalent of the $nearSphere operand + * + * @param longitude coordinate in decimal degrees + * @param latitude coordinate in decimal degrees + * @return {@code this} + */ + public QueryBuilder nearSphere(final double longitude, final double latitude) { + addOperand(QueryOperators.NEAR_SPHERE, + asList(longitude, latitude)); + return this; + } + + /** + * Equivalent of the $nearSphere operand + * + * @param longitude coordinate in decimal degrees + * @param latitude coordinate in decimal degrees + * @param maxDistance max spherical distance + * @return {@code this} + */ + public QueryBuilder nearSphere(final double longitude, final double latitude, final double maxDistance) { + addOperand(QueryOperators.NEAR_SPHERE, + asList(longitude, latitude)); + addOperand(QueryOperators.MAX_DISTANCE, + maxDistance); + return this; + } + + /** + * Equivalent of the $centerSphere operand mostly intended for queries up to a few hundred miles or km. + * + * @param longitude coordinate in decimal degrees + * @param latitude coordinate in decimal degrees + * @param maxDistance max spherical distance + * @return {@code this} + */ + public QueryBuilder withinCenterSphere(final double longitude, final double latitude, final double maxDistance) { + addOperand(QueryOperators.WITHIN, + new BasicDBObject(QueryOperators.CENTER_SPHERE, + asList(asList(longitude, latitude), maxDistance))); + return this; + } + + /** + * Equivalent to a $within operand, based on a bounding box using represented by two corners + * + * @param x the x coordinate of the first box corner. + * @param y the y coordinate of the first box corner. + * @param x2 the x coordinate of the second box corner. + * @param y2 the y coordinate of the second box corner. + * @return {@code this} + */ + public QueryBuilder withinBox(final double x, final double y, final double x2, final double y2) { + addOperand(QueryOperators.WITHIN, + new BasicDBObject(QueryOperators.BOX, new Object[]{new Double[]{x, y}, new Double[]{x2, y2}})); + return this; + } + + /** + * Equivalent to a $within operand, based on a bounding polygon represented by an array of points + * + * @param points an array of Double[] defining the vertices of the search area + * @return {@code this} + */ + public QueryBuilder withinPolygon(final List points) { + notNull("points", points); + if (points.isEmpty() || points.size() < 3) { + throw new IllegalArgumentException("Polygon insufficient number of vertices defined"); + } + addOperand(QueryOperators.WITHIN, + new BasicDBObject(QueryOperators.POLYGON, convertToListOfLists(points))); + return this; + } + + private List> convertToListOfLists(final List points) { + List> listOfLists = new ArrayList<>(points.size()); + for (Double[] cur : points) { + List list = new ArrayList<>(cur.length); + Collections.addAll(list, cur); + listOfLists.add(list); + } + return listOfLists; + } + + /** + * Equivalent to a $text operand. + * + * @param search the search terms to apply to the text index. + * @return {@code this} + */ + public QueryBuilder text(final String search) { + return text(search, null); + } + + /** + * Equivalent to a $text operand. + * + * @param search the search terms to apply to the text index. + * @param language the language to use. + * @return {@code this} + */ + public QueryBuilder text(final String search, @Nullable final String language) { + if (_currentKey != null) { + throw new QueryBuilderException("The text operand may only occur at the top-level of a query. It does" + + " not apply to a specific element, but rather to a document as a whole."); + } + + put(QueryOperators.TEXT); + addOperand(QueryOperators.SEARCH, search); + if (language != null) { + addOperand(QueryOperators.LANGUAGE, language); + } + + return this; + } + + /** + * Equivalent to $not meta operator. Must be followed by an operand, not a value, e.g. {@code + * QueryBuilder.start("val").not().mod(Arrays.asList(10, 1)) } + * + * @return {@code this} + */ + public QueryBuilder not() { + _hasNot = true; + return this; + } + + /** + * Equivalent to an $or operand + * + * @param ors the list of conditions to or together + * @return {@code this} + */ + @SuppressWarnings("unchecked") + public QueryBuilder or(final DBObject... ors) { + List l = (List) _query.get(QueryOperators.OR); + if (l == null) { + l = new ArrayList(); + _query.put(QueryOperators.OR, l); + } + Collections.addAll(l, ors); + return this; + } + + /** + * Equivalent to an $and operand + * + * @param ands the list of conditions to and together + * @return {@code this} + */ + @SuppressWarnings("unchecked") + public QueryBuilder and(final DBObject... ands) { + List l = (List) _query.get(QueryOperators.AND); + if (l == null) { + l = new ArrayList(); + _query.put(QueryOperators.AND, l); + } + Collections.addAll(l, ands); + return this; + } + + /** + * Creates a {@code DBObject} query to be used for the driver's find operations + * + * @return {@code this} + * @throws RuntimeException if a key does not have a matching operand + */ + public DBObject get() { + for (final String key : _query.keySet()) { + if (_query.get(key) instanceof NullObject) { + throw new QueryBuilderException("No operand for key:" + key); + } + } + return _query; + } + + private void addOperand(@Nullable final String op, final Object value) { + Object valueToPut = value; + if (op == null) { + if (_hasNot) { + valueToPut = new BasicDBObject(QueryOperators.NOT, valueToPut); + _hasNot = false; + } + _query.put(_currentKey, valueToPut); + return; + } + + Object storedValue = _query.get(_currentKey); + BasicDBObject operand; + if (!(storedValue instanceof DBObject)) { + operand = new BasicDBObject(); + if (_hasNot) { + DBObject notOperand = new BasicDBObject(QueryOperators.NOT, operand); + _query.put(_currentKey, notOperand); + _hasNot = false; + } else { + _query.put(_currentKey, operand); + } + } else { + operand = (BasicDBObject) _query.get(_currentKey); + if (operand.get(QueryOperators.NOT) != null) { + operand = (BasicDBObject) operand.get(QueryOperators.NOT); + } + } + operand.put(op, valueToPut); + } + + @SuppressWarnings("serial") + static class QueryBuilderException extends RuntimeException { + QueryBuilderException(final String message) { + super(message); + } + } + + private static class NullObject { + } + + private final DBObject _query; + private String _currentKey; + private boolean _hasNot; + +} diff --git a/driver-legacy/src/main/com/mongodb/QueryOperators.java b/driver-legacy/src/main/com/mongodb/QueryOperators.java new file mode 100644 index 00000000000..40e1b230588 --- /dev/null +++ b/driver-legacy/src/main/com/mongodb/QueryOperators.java @@ -0,0 +1,162 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb; + +/** + * MongoDB keywords for various query operations. + * + * @mongodb.driver.manual reference/operator/query/ Query Operators + */ +public class QueryOperators { + /** + * OR + */ + public static final String OR = "$or"; + /** + * AND + */ + public static final String AND = "$and"; + + /** + * GT + */ + public static final String GT = "$gt"; + /** + * GTE + */ + public static final String GTE = "$gte"; + /** + * LT + */ + public static final String LT = "$lt"; + /** + * LTE + */ + public static final String LTE = "$lte"; + + /** + * NE + */ + public static final String NE = "$ne"; + /** + * IN + */ + public static final String IN = "$in"; + /** + * NIN + */ + public static final String NIN = "$nin"; + /** + * MOD + */ + public static final String MOD = "$mod"; + /** + * ALL + */ + public static final String ALL = "$all"; + /** + * SIZE + */ + public static final String SIZE = "$size"; + /** + * EXISTS + */ + public static final String EXISTS = "$exists"; + /** + * ELEM_MATCH + */ + public static final String ELEM_MATCH = "$elemMatch"; + + // (to be implemented in QueryBuilder) + + /** + * WHERE + */ + public static final String WHERE = "$where"; + /** + * NOR + */ + public static final String NOR = "$nor"; + /** + * TYPE + */ + public static final String TYPE = "$type"; + /** + * NOT + */ + public static final String NOT = "$not"; + + // geo operators + + /** + * WITHIN + */ + public static final String WITHIN = "$within"; + /** + * NEAR + */ + public static final String NEAR = "$near"; + /** + * NEAR_SPHERE + */ + public static final String NEAR_SPHERE = "$nearSphere"; + /** + * BOX + */ + public static final String BOX = "$box"; + /** + * CENTER + */ + public static final String CENTER = "$center"; + /** + * POLYGON + */ + public static final String POLYGON = "$polygon"; + /** + * CENTER_SPHERE + */ + public static final String CENTER_SPHERE = "$centerSphere"; + + // (to be implemented in QueryBuilder) + + /** + * MAX_DISTANCE + */ + public static final String MAX_DISTANCE = "$maxDistance"; + /** + * UNIQUE_DOCS + */ + public static final String UNIQUE_DOCS = "$uniqueDocs"; + + // text operators + + /** + * TEXT + */ + public static final String TEXT = "$text"; + /** + * SEARCH + */ + public static final String SEARCH = "$search"; + /** + * LANGUAGE + */ + public static final String LANGUAGE = "$language"; + + private QueryOperators() { + } +} diff --git a/driver-legacy/src/main/com/mongodb/RemoveRequest.java b/driver-legacy/src/main/com/mongodb/RemoveRequest.java new file mode 100644 index 00000000000..16620926f3f --- /dev/null +++ b/driver-legacy/src/main/com/mongodb/RemoveRequest.java @@ -0,0 +1,49 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb; + +import com.mongodb.client.model.Collation; +import com.mongodb.internal.bulk.DeleteRequest; +import org.bson.BsonDocumentWrapper; +import org.bson.codecs.Encoder; + +class RemoveRequest extends WriteRequest { + private final DBObject query; + private final boolean multi; + private final Encoder codec; + private final Collation collation; + + RemoveRequest(final DBObject query, final boolean multi, final Encoder codec, final Collation collation) { + this.query = query; + this.multi = multi; + this.codec = codec; + this.collation = collation; + } + + public DBObject getQuery() { + return query; + } + + public boolean isMulti() { + return multi; + } + + @Override + com.mongodb.internal.bulk.WriteRequest toNew(final DBCollection dbCollection) { + return new DeleteRequest(new BsonDocumentWrapper<>(query, this.codec)).multi(isMulti()).collation(collation); + } +} diff --git a/driver-legacy/src/main/com/mongodb/ReplaceRequest.java b/driver-legacy/src/main/com/mongodb/ReplaceRequest.java new file mode 100644 index 00000000000..8427471af18 --- /dev/null +++ b/driver-legacy/src/main/com/mongodb/ReplaceRequest.java @@ -0,0 +1,66 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb; + +import com.mongodb.client.model.Collation; +import com.mongodb.internal.bulk.UpdateRequest; +import org.bson.BsonDocumentWrapper; +import org.bson.codecs.Encoder; + +class ReplaceRequest extends WriteRequest { + private final DBObject query; + private final DBObject document; + private final boolean upsert; + private final Encoder codec; + private final Encoder replacementCodec; + private final Collation collation; + + ReplaceRequest(final DBObject query, final DBObject document, final boolean upsert, final Encoder codec, + final Encoder replacementCodec, final Collation collation) { + this.query = query; + this.document = document; + this.upsert = upsert; + this.codec = codec; + this.replacementCodec = replacementCodec; + this.collation = collation; + } + + public DBObject getQuery() { + return query; + } + + public DBObject getDocument() { + return document; + } + + public boolean isUpsert() { + return upsert; + } + + public Collation getCollation() { + return collation; + } + + @Override + com.mongodb.internal.bulk.WriteRequest toNew(final DBCollection dbCollection) { + return new UpdateRequest(new BsonDocumentWrapper<>(query, codec), + new BsonDocumentWrapper<>(document, replacementCodec), + com.mongodb.internal.bulk.WriteRequest.Type.REPLACE) + .upsert(isUpsert()) + .collation(getCollation()); + } +} diff --git a/driver-legacy/src/main/com/mongodb/TimeoutSettingsHelper.java b/driver-legacy/src/main/com/mongodb/TimeoutSettingsHelper.java new file mode 100644 index 00000000000..e47dd7bd32b --- /dev/null +++ b/driver-legacy/src/main/com/mongodb/TimeoutSettingsHelper.java @@ -0,0 +1,60 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb; + +import com.mongodb.client.model.DBCollectionCountOptions; +import com.mongodb.client.model.DBCollectionFindAndModifyOptions; +import com.mongodb.client.model.DBCollectionFindOptions; +import com.mongodb.internal.TimeoutSettings; + +import static java.util.concurrent.TimeUnit.MILLISECONDS; + +final class TimeoutSettingsHelper { + + private TimeoutSettingsHelper() { + } + + static TimeoutSettings createTimeoutSettings(final TimeoutSettings timeoutSettings, final long maxTimeMS) { + return timeoutSettings.withMaxTimeMS(maxTimeMS); + } + + static TimeoutSettings createTimeoutSettings(final TimeoutSettings timeoutSettings, final long maxTimeMS, final long maxAwaitTimeMS) { + return timeoutSettings.withMaxTimeAndMaxAwaitTimeMS(maxTimeMS, maxAwaitTimeMS); + } + + static TimeoutSettings createTimeoutSettings(final TimeoutSettings timeoutSettings, final AggregationOptions options) { + return createTimeoutSettings(timeoutSettings, options.getMaxTime(MILLISECONDS)); + } + + static TimeoutSettings createTimeoutSettings(final TimeoutSettings timeoutSettings, final DBCollectionCountOptions options) { + return createTimeoutSettings(timeoutSettings, options.getMaxTime(MILLISECONDS)); + } + + static TimeoutSettings createTimeoutSettings(final TimeoutSettings timeoutSettings, final DBCollectionFindOptions options) { + return timeoutSettings.withMaxTimeAndMaxAwaitTimeMS(options.getMaxTime(MILLISECONDS), options.getMaxAwaitTime(MILLISECONDS)); + } + + static TimeoutSettings createTimeoutSettings(final TimeoutSettings timeoutSettings, final DBCollectionFindAndModifyOptions options) { + return createTimeoutSettings(timeoutSettings, options.getMaxTime(MILLISECONDS)); + } + + @SuppressWarnings("deprecation") + static TimeoutSettings createTimeoutSettings(final TimeoutSettings timeoutSettings, final MapReduceCommand options) { + return createTimeoutSettings(timeoutSettings, options.getMaxTime(MILLISECONDS)); + } + +} diff --git a/driver-legacy/src/main/com/mongodb/UnacknowledgedBulkWriteResult.java b/driver-legacy/src/main/com/mongodb/UnacknowledgedBulkWriteResult.java new file mode 100644 index 00000000000..47f3888e515 --- /dev/null +++ b/driver-legacy/src/main/com/mongodb/UnacknowledgedBulkWriteResult.java @@ -0,0 +1,83 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb; + +import java.util.List; + +class UnacknowledgedBulkWriteResult extends BulkWriteResult { + + UnacknowledgedBulkWriteResult() { + } + + @Override + public boolean isAcknowledged() { + return false; + } + + @Override + public int getInsertedCount() { + throw getUnacknowledgedWriteException(); + } + + @Override + public int getMatchedCount() { + throw getUnacknowledgedWriteException(); + } + + @Override + public int getRemovedCount() { + throw getUnacknowledgedWriteException(); + } + + @Override + public int getModifiedCount() { + throw getUnacknowledgedWriteException(); + } + + @Override + public List getUpserts() { + throw getUnacknowledgedWriteException(); + } + + private UnsupportedOperationException getUnacknowledgedWriteException() { + return new UnsupportedOperationException("Can not get information about an unacknowledged write"); + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + return true; + } + + @Override + public int hashCode() { + return 0; + } + + @Override + public String toString() { + return "UnacknowledgedBulkWriteResult{" + + '}'; + } +} + diff --git a/driver-legacy/src/main/com/mongodb/UpdateRequest.java b/driver-legacy/src/main/com/mongodb/UpdateRequest.java new file mode 100644 index 00000000000..d1dc03b5b76 --- /dev/null +++ b/driver-legacy/src/main/com/mongodb/UpdateRequest.java @@ -0,0 +1,79 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb; + +import com.mongodb.client.model.Collation; +import org.bson.BsonDocumentWrapper; +import org.bson.codecs.Encoder; + +import java.util.List; + +class UpdateRequest extends WriteRequest { + private final DBObject query; + private final DBObject update; + private final boolean multi; + private final boolean upsert; + private final Encoder codec; + private final Collation collation; + private final List arrayFilters; + + UpdateRequest(final DBObject query, final DBObject update, final boolean multi, final boolean upsert, + final Encoder codec, final Collation collation, final List arrayFilters) { + this.query = query; + this.update = update; + this.multi = multi; + this.upsert = upsert; + this.codec = codec; + this.collation = collation; + this.arrayFilters = arrayFilters; + } + + public DBObject getQuery() { + return query; + } + + public DBObject getUpdate() { + return update; + } + + public boolean isUpsert() { + return upsert; + } + + public boolean isMulti() { + return multi; + } + + public Collation getCollation() { + return collation; + } + + public List getArrayFilters() { + return arrayFilters; + } + + @Override + com.mongodb.internal.bulk.WriteRequest toNew(final DBCollection dbCollection) { + return new com.mongodb.internal.bulk.UpdateRequest(new BsonDocumentWrapper<>(query, codec), + new BsonDocumentWrapper<>(update, codec), + com.mongodb.internal.bulk.WriteRequest.Type.UPDATE) + .upsert(isUpsert()) + .multi(isMulti()) + .collation(getCollation()) + .arrayFilters(dbCollection.wrapAllowNull(arrayFilters, codec)); + } +} diff --git a/driver-legacy/src/main/com/mongodb/WriteConcernError.java b/driver-legacy/src/main/com/mongodb/WriteConcernError.java new file mode 100644 index 00000000000..c5d96d8a10b --- /dev/null +++ b/driver-legacy/src/main/com/mongodb/WriteConcernError.java @@ -0,0 +1,112 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb; + +import static com.mongodb.assertions.Assertions.notNull; + +/** + * An error representing a failure by the server to apply the requested write concern to the bulk operation. + * + * @since 2.12 + * @mongodb.driver.manual core/write-concern/ Write Concern + */ +public class WriteConcernError { + private final int code; + private final String message; + private final DBObject details; + + /** + * Constructs a new instance. + * + * @param code the error code + * @param message the error message + * @param details any details + */ + public WriteConcernError(final int code, final String message, final DBObject details) { + this.code = code; + this.message = notNull("message", message); + this.details = notNull("details", details); + } + + /** + * Gets the code associated with this error. + * + * @return the code + */ + public int getCode() { + return code; + } + + /** + * Gets the message associated with this error. + * + * @return the message + */ + public String getMessage() { + return message; + } + + /** + * Gets the details associated with this error. This document will not be null, but may be empty. + * + * @return the details + */ + public DBObject getDetails() { + return details; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + WriteConcernError that = (WriteConcernError) o; + + if (code != that.code) { + return false; + } + if (!details.equals(that.details)) { + return false; + } + if (!message.equals(that.message)) { + return false; + } + + return true; + } + + @Override + public int hashCode() { + int result = code; + result = 31 * result + message.hashCode(); + result = 31 * result + details.hashCode(); + return result; + } + + @Override + public String toString() { + return "BulkWriteConcernError{" + + "code=" + code + + ", message='" + message + '\'' + + ", details=" + details + + '}'; + } +} diff --git a/driver-legacy/src/main/com/mongodb/WriteRequest.java b/driver-legacy/src/main/com/mongodb/WriteRequest.java new file mode 100644 index 00000000000..500e5ce950c --- /dev/null +++ b/driver-legacy/src/main/com/mongodb/WriteRequest.java @@ -0,0 +1,21 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb; + +abstract class WriteRequest { + abstract com.mongodb.internal.bulk.WriteRequest toNew(DBCollection dbCollection); +} diff --git a/driver-legacy/src/main/com/mongodb/WriteResult.java b/driver-legacy/src/main/com/mongodb/WriteResult.java new file mode 100644 index 00000000000..1d2010e46a4 --- /dev/null +++ b/driver-legacy/src/main/com/mongodb/WriteResult.java @@ -0,0 +1,135 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb; + + +import com.mongodb.lang.Nullable; + +/** + * This class lets you access the results of the previous acknowledged write. If the write was unacknowledged, all property access + * methods will throw {@link UnsupportedOperationException}. + * + * @see WriteConcern#UNACKNOWLEDGED + */ +public class WriteResult { + + private final boolean acknowledged; + private final int n; + private final boolean updateOfExisting; + private final Object upsertedId; + + /** + * Gets an instance representing an unacknowledged write. + * + * @return an instance representing an unacknowledged write + * @since 3.0 + */ + public static WriteResult unacknowledged() { + return new WriteResult(); + } + + /** + * Construct a new instance. + * + * @param n the number of existing documents affected by this operation + * @param updateOfExisting true if the operation was an update and an existing document was updated + * @param upsertedId the _id of a document that was upserted by this operation, which may be null + */ + public WriteResult(final int n, final boolean updateOfExisting, @Nullable final Object upsertedId) { + this.acknowledged = true; + this.n = n; + this.updateOfExisting = updateOfExisting; + this.upsertedId = upsertedId; + } + + WriteResult() { + acknowledged = false; + n = 0; + updateOfExisting = false; + upsertedId = null; + } + + /** + * Returns true if the write was acknowledged. + * + * @return true if the write was acknowledged + * @see WriteConcern#UNACKNOWLEDGED + * @since 3.0 + */ + public boolean wasAcknowledged() { + return acknowledged; + } + + /** + * Gets the "n" field, which contains the number of documents affected in the write operation. + * + * @return the value of the "n" field + * @throws UnsupportedOperationException if the write was unacknowledged + * @see WriteConcern#UNACKNOWLEDGED + */ + public int getN() { + throwIfUnacknowledged("n"); + return n; + } + + /** + * Gets the _id value of an upserted document that resulted from this write. + * + * @return the value of the _id of an upserted document, which may be null + * @throws UnsupportedOperationException if the write was unacknowledged + * @since 2.12 + */ + @Nullable + public Object getUpsertedId() { + throwIfUnacknowledged("upsertedId"); + return upsertedId; + } + + + /** + * Returns true if this write resulted in an update of an existing document. + * + * @return whether the write resulted in an update of an existing document. + * @throws UnsupportedOperationException if the write was unacknowledged + * @since 2.12 + */ + public boolean isUpdateOfExisting() { + throwIfUnacknowledged("updateOfExisting"); + return updateOfExisting; + } + + @Override + public String toString() { + if (acknowledged) { + return "WriteResult{" + + "n=" + n + + ", updateOfExisting=" + updateOfExisting + + ", upsertedId=" + upsertedId + + '}'; + } else { + return "WriteResult{acknowledged=false}"; + } + } + + private void throwIfUnacknowledged(final String property) { + if (!acknowledged) { + throw new UnsupportedOperationException("Cannot get " + property + " property for an unacknowledged write"); + } + } +} + + diff --git a/driver-legacy/src/main/com/mongodb/client/jndi/MongoClientFactory.java b/driver-legacy/src/main/com/mongodb/client/jndi/MongoClientFactory.java new file mode 100644 index 00000000000..d287865b8c0 --- /dev/null +++ b/driver-legacy/src/main/com/mongodb/client/jndi/MongoClientFactory.java @@ -0,0 +1,106 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.jndi; + +import com.mongodb.MongoClient; +import com.mongodb.MongoClientURI; +import com.mongodb.MongoException; +import com.mongodb.internal.diagnostics.logging.Logger; +import com.mongodb.internal.diagnostics.logging.Loggers; + +import javax.naming.Context; +import javax.naming.Name; +import javax.naming.RefAddr; +import javax.naming.Reference; +import javax.naming.spi.ObjectFactory; +import java.util.Enumeration; +import java.util.Hashtable; + +import static java.lang.String.format; + +/** + * An ObjectFactory for MongoClient instances. + * + * @since 3.3 + */ +public class MongoClientFactory implements ObjectFactory { + + private static final Logger LOGGER = Loggers.getLogger("client.jndi"); + + private static final String CONNECTION_STRING = "connectionString"; + + /** + * This implementation will create instances of {@link MongoClient} based on a connection string conforming to the format specified in + * {@link MongoClientURI}. + *

The connection string is specified in one of two ways:

+ *
    + *
  • As the {@code String} value of a property in the {@code environment} parameter with a key of {@code "connectionString"}
  • + *
  • As the {@code String} value of a {@link RefAddr} with type {@code "connectionString"} in an {@code obj} parameter + * of type {@link Reference}
  • + *
+ * + * Specification of the connection string in the {@code environment} parameter takes precedence over specification in the {@code obj} + * parameter. The {@code name} and {@code nameCtx} parameters are ignored. + *

+ * If a non-empty connection string is not specified in either of these two ways, a {@link MongoException} is thrown. + * @return an instance of {@link MongoClient} based on the specified connection string + * @throws MongoException + * + * Note: Not all options that can be specified via {@link com.mongodb.MongoClientOptions} can be specified via the connection string. + */ + @Override + public Object getObjectInstance(final Object obj, final Name name, final Context nameCtx, final Hashtable environment) { + + // Some app servers, e.g. Wildfly, use the environment to pass location information to an ObjectFactory + String connectionString = null; + + if (environment.get(CONNECTION_STRING) instanceof String) { + connectionString = (String) environment.get(CONNECTION_STRING); + } + + if (connectionString == null || connectionString.isEmpty()) { + LOGGER.debug(format("No '%s' property in environment. Casting 'obj' to java.naming.Reference to look for a " + + "javax.naming.RefAddr with type equal to '%s'", CONNECTION_STRING, CONNECTION_STRING)); + + // Some app servers, e.g. Tomcat, pass obj as an instance of javax.naming.Reference and pass location information in a + // javax.naming.RefAddr + if (obj instanceof Reference) { + Enumeration props = ((Reference) obj).getAll(); + + while (props.hasMoreElements()) { + RefAddr addr = props.nextElement(); + if (addr != null) { + if (CONNECTION_STRING.equals(addr.getType())) { + if (addr.getContent() instanceof String) { + connectionString = (String) addr.getContent(); + break; + } + } + } + } + } + } + + if (connectionString == null || connectionString.isEmpty()) { + throw new MongoException(format("Could not locate '%s' in either environment or obj", CONNECTION_STRING)); + } + + MongoClientURI uri = new MongoClientURI(connectionString); + + return new MongoClient(uri); + } +} diff --git a/driver-legacy/src/main/com/mongodb/client/jndi/package-info.java b/driver-legacy/src/main/com/mongodb/client/jndi/package-info.java new file mode 100644 index 00000000000..02358c8003b --- /dev/null +++ b/driver-legacy/src/main/com/mongodb/client/jndi/package-info.java @@ -0,0 +1,22 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * This package contains a JNDI ObjectFactory implementation. + * + * @since 3.4 + */ +package com.mongodb.client.jndi; diff --git a/driver-legacy/src/main/com/mongodb/client/model/DBCollectionCountOptions.java b/driver-legacy/src/main/com/mongodb/client/model/DBCollectionCountOptions.java new file mode 100644 index 00000000000..660d0013c96 --- /dev/null +++ b/driver-legacy/src/main/com/mongodb/client/model/DBCollectionCountOptions.java @@ -0,0 +1,254 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model; + +import com.mongodb.DBObject; +import com.mongodb.ReadConcern; +import com.mongodb.ReadPreference; +import com.mongodb.lang.Nullable; + +import java.util.concurrent.TimeUnit; + +import static com.mongodb.assertions.Assertions.isTrue; +import static com.mongodb.assertions.Assertions.notNull; + +/** + * The options for a count operation. + * + * @since 3.4 + * @mongodb.driver.manual reference/command/count/ Count + */ +public class DBCollectionCountOptions { + private DBObject hint; + private String hintString; + private int limit; + private int skip; + private long maxTimeMS; + private ReadPreference readPreference; + private ReadConcern readConcern; + private Collation collation; + + /** + * Construct a new instance + */ + public DBCollectionCountOptions() { + } + + /** + * Gets the hint to apply. + * + * @return the hint, which should describe an existing + */ + @Nullable + public DBObject getHint() { + return hint; + } + + /** + * Gets the hint string to apply. + * + * @return the hint string, which should be the name of an existing index + */ + @Nullable + public String getHintString() { + return hintString; + } + + /** + * Sets the hint to apply. + * + * @param hint a document describing the index which should be used for this operation. + * @return this + */ + public DBCollectionCountOptions hint(@Nullable final DBObject hint) { + this.hint = hint; + return this; + } + + /** + * Sets the hint to apply. + * + * @param hint the name of the index which should be used for the operation + * @return this + */ + public DBCollectionCountOptions hintString(@Nullable final String hint) { + this.hintString = hint; + return this; + } + + /** + * Gets the limit to apply. The default is 0, which means there is no limit. + * + * @return the limit + * @mongodb.driver.manual reference/method/cursor.limit/#cursor.limit Limit + */ + public int getLimit() { + return limit; + } + + /** + * Sets the limit to apply. + * + * @param limit the limit + * @return this + * @mongodb.driver.manual reference/method/cursor.limit/#cursor.limit Limit + */ + public DBCollectionCountOptions limit(final int limit) { + this.limit = limit; + return this; + } + + /** + * Gets the number of documents to skip. The default is 0. + * + * @return the number of documents to skip + * @mongodb.driver.manual reference/method/cursor.skip/#cursor.skip Skip + */ + public int getSkip() { + return skip; + } + + /** + * Sets the number of documents to skip. + * + * @param skip the number of documents to skip + * @return this + * @mongodb.driver.manual reference/method/cursor.skip/#cursor.skip Skip + */ + public DBCollectionCountOptions skip(final int skip) { + this.skip = skip; + return this; + } + + /** + * Sets the limit to apply. + * + * @param limit the limit + * @return this + * @mongodb.driver.manual reference/method/cursor.limit/#cursor.limit Limit + */ + public DBCollectionCountOptions limit(final long limit) { + isTrue("limit is too large", limit <= Integer.MAX_VALUE); + this.limit = (int) limit; + return this; + } + + /** + * Sets the number of documents to skip. + * + * @param skip the number of documents to skip + * @return this + * @mongodb.driver.manual reference/method/cursor.skip/#cursor.skip Skip + */ + public DBCollectionCountOptions skip(final long skip) { + isTrue("skip is too large", skip <= Integer.MAX_VALUE); + this.skip = (int) skip; + return this; + } + + /** + * Gets the maximum execution time on the server for this operation. The default is 0, which places no limit on the execution time. + * + * @param timeUnit the time unit to return the result in + * @return the maximum execution time in the given time unit + */ + public long getMaxTime(final TimeUnit timeUnit) { + notNull("timeUnit", timeUnit); + return timeUnit.convert(maxTimeMS, TimeUnit.MILLISECONDS); + } + + /** + * Sets the maximum execution time on the server for this operation. + * + * @param maxTime the max time + * @param timeUnit the time unit, which may not be null + * @return this + */ + public DBCollectionCountOptions maxTime(final long maxTime, final TimeUnit timeUnit) { + notNull("timeUnit", timeUnit); + this.maxTimeMS = TimeUnit.MILLISECONDS.convert(maxTime, timeUnit); + return this; + } + + /** + * Returns the readPreference + * + * @return the readPreference + */ + @Nullable + public ReadPreference getReadPreference() { + return readPreference; + } + + /** + * Sets the readPreference + * + * @param readPreference the readPreference + * @return this + */ + public DBCollectionCountOptions readPreference(@Nullable final ReadPreference readPreference) { + this.readPreference = readPreference; + return this; + } + + /** + * Returns the readConcern + * + * @return the readConcern + * @mongodb.server.release 3.2 + */ + @Nullable + public ReadConcern getReadConcern() { + return readConcern; + } + + /** + * Sets the readConcern + * + * @param readConcern the readConcern + * @return this + * @mongodb.server.release 3.2 + */ + public DBCollectionCountOptions readConcern(@Nullable final ReadConcern readConcern) { + this.readConcern = readConcern; + return this; + } + + /** + * Returns the collation options + * + * @return the collation options + * @mongodb.server.release 3.4 + */ + @Nullable + public Collation getCollation() { + return collation; + } + + /** + * Sets the collation + * + * @param collation the collation + * @return this + * @mongodb.server.release 3.4 + */ + public DBCollectionCountOptions collation(@Nullable final Collation collation) { + this.collation = collation; + return this; + } +} + diff --git a/driver-legacy/src/main/com/mongodb/client/model/DBCollectionDistinctOptions.java b/driver-legacy/src/main/com/mongodb/client/model/DBCollectionDistinctOptions.java new file mode 100644 index 00000000000..42e43221500 --- /dev/null +++ b/driver-legacy/src/main/com/mongodb/client/model/DBCollectionDistinctOptions.java @@ -0,0 +1,130 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model; + +import com.mongodb.DBObject; +import com.mongodb.ReadConcern; +import com.mongodb.ReadPreference; +import com.mongodb.lang.Nullable; + +/** + * The options for a distinct operation. + * + * @since 3.4 + * @mongodb.driver.manual reference/command/count/ Count + */ +public class DBCollectionDistinctOptions { + private DBObject filter; + private ReadPreference readPreference; + private ReadConcern readConcern; + private Collation collation; + + /** + * Construct a new instance + */ + public DBCollectionDistinctOptions() { + } + + /** + * Gets the selection query to determine the subset of documents from which to retrieve the distinct values + * + * @return the query + */ + @Nullable + public DBObject getFilter() { + return filter; + } + + /** + * Sets the selection query to determine the subset of documents from which to retrieve the distinct values. + * + * @param filter the selection query to determine the subset of documents from which to retrieve the distinct values + * @return this + */ + public DBCollectionDistinctOptions filter(@Nullable final DBObject filter) { + this.filter = filter; + return this; + } + + /** + * Returns the readPreference + * + * @return the readPreference + */ + @Nullable + public ReadPreference getReadPreference() { + return readPreference; + } + + /** + * Sets the readPreference + * + * @param readPreference the readPreference + * @return this + */ + public DBCollectionDistinctOptions readPreference(@Nullable final ReadPreference readPreference) { + this.readPreference = readPreference; + return this; + } + + /** + * Returns the readConcern + * + * @return the readConcern + * @mongodb.server.release 3.2 + */ + @Nullable + public ReadConcern getReadConcern() { + return readConcern; + } + + /** + * Sets the readConcern + * + * @param readConcern the readConcern + * @return this + * @mongodb.server.release 3.2 + */ + public DBCollectionDistinctOptions readConcern(@Nullable final ReadConcern readConcern) { + this.readConcern = readConcern; + return this; + } + + /** + * Returns the collation options + * + * @return the collation options + * @mongodb.server.release 3.4 + */ + @Nullable + public Collation getCollation() { + return collation; + } + + /** + * Sets the collation + * + * @param collation the collation + * @return this + * @mongodb.server.release 3.4 + */ + public DBCollectionDistinctOptions collation(@Nullable final Collation collation) { + this.collation = collation; + return this; + } +} + diff --git a/driver-legacy/src/main/com/mongodb/client/model/DBCollectionFindAndModifyOptions.java b/driver-legacy/src/main/com/mongodb/client/model/DBCollectionFindAndModifyOptions.java new file mode 100644 index 00000000000..b8c3bb95c96 --- /dev/null +++ b/driver-legacy/src/main/com/mongodb/client/model/DBCollectionFindAndModifyOptions.java @@ -0,0 +1,292 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model; + +import com.mongodb.DBObject; +import com.mongodb.WriteConcern; +import com.mongodb.lang.Nullable; + +import java.util.List; +import java.util.concurrent.TimeUnit; + +import static com.mongodb.assertions.Assertions.isTrueArgument; +import static com.mongodb.assertions.Assertions.notNull; + +/** + * The oprtions for find and modify operations. + * + * @since 3.4 + */ +public final class DBCollectionFindAndModifyOptions { + private DBObject projection; + private DBObject sort; + private boolean remove; + private DBObject update; + private boolean upsert; + private boolean returnNew; + private Boolean bypassDocumentValidation; + private long maxTimeMS; + private WriteConcern writeConcern; + private Collation collation; + private List arrayFilters; + + /** + * Construct a new instance + */ + public DBCollectionFindAndModifyOptions() { + } + + /** + * Returns the projection + * + * @return the projection + */ + @Nullable + public DBObject getProjection() { + return projection; + } + + /** + * Sets the projection + * + * @param projection the projection + * @return this + */ + public DBCollectionFindAndModifyOptions projection(@Nullable final DBObject projection) { + this.projection = projection; + return this; + } + + /** + * Returns the sort + * + * @return the sort + */ + @Nullable + public DBObject getSort() { + return sort; + } + + /** + * Sets the sort + * + * @param sort the sort + * @return this + */ + public DBCollectionFindAndModifyOptions sort(@Nullable final DBObject sort) { + this.sort = sort; + return this; + } + + /** + * Returns the remove + * + * @return the remove + */ + public boolean isRemove() { + return remove; + } + + /** + * Sets the remove + * + * @param remove the remove + * @return this + */ + public DBCollectionFindAndModifyOptions remove(final boolean remove) { + this.remove = remove; + return this; + } + + /** + * Returns the update + * + * @return the update + */ + @Nullable + public DBObject getUpdate() { + return update; + } + + /** + * Sets the update + * + * @param update the update + * @return this + */ + public DBCollectionFindAndModifyOptions update(@Nullable final DBObject update) { + this.update = update; + return this; + } + + /** + * Returns the upsert + * + * @return the upsert + */ + public boolean isUpsert() { + return upsert; + } + + /** + * Sets the upsert + * + * @param upsert the upsert + * @return this + */ + public DBCollectionFindAndModifyOptions upsert(final boolean upsert) { + this.upsert = upsert; + return this; + } + + /** + * Returns the returnNew + * + * @return the returnNew + */ + public boolean returnNew() { + return returnNew; + } + + /** + * Sets the returnNew + * + * @param returnNew the returnNew + * @return this + */ + public DBCollectionFindAndModifyOptions returnNew(final boolean returnNew) { + this.returnNew = returnNew; + return this; + } + + /** + * Returns the bypassDocumentValidation + * + * @return the bypassDocumentValidation + */ + public Boolean getBypassDocumentValidation() { + return bypassDocumentValidation; + } + + /** + * Sets the bypassDocumentValidation + * + * @param bypassDocumentValidation the bypassDocumentValidation + * @return this + */ + public DBCollectionFindAndModifyOptions bypassDocumentValidation(final Boolean bypassDocumentValidation) { + this.bypassDocumentValidation = bypassDocumentValidation; + return this; + } + + /** + * Gets the maximum execution time on the server for this operation. The default is 0, which places no limit on the execution time. + * + * @param timeUnit the time unit to return the result in + * @return the maximum execution time in the given time unit + * @mongodb.driver.manual reference/method/cursor.maxTimeMS/#cursor.maxTimeMS Max Time + */ + public long getMaxTime(final TimeUnit timeUnit) { + notNull("timeUnit", timeUnit); + return timeUnit.convert(maxTimeMS, TimeUnit.MILLISECONDS); + } + + /** + * Sets the maximum execution time on the server for this operation. + * + * @param maxTime the max time + * @param timeUnit the time unit, which may not be null + * @return this + * @mongodb.driver.manual reference/method/cursor.maxTimeMS/#cursor.maxTimeMS Max Time + */ + public DBCollectionFindAndModifyOptions maxTime(final long maxTime, final TimeUnit timeUnit) { + notNull("timeUnit", timeUnit); + isTrueArgument("maxTime > = 0", maxTime >= 0); + this.maxTimeMS = TimeUnit.MILLISECONDS.convert(maxTime, timeUnit); + return this; + } + + /** + * Returns the writeConcern + * + * @return the writeConcern + * @mongodb.server.release 3.2 + */ + @Nullable + public WriteConcern getWriteConcern() { + return writeConcern; + } + + /** + * Sets the writeConcern + * + * @param writeConcern the writeConcern + * @return this + * @mongodb.server.release 3.2 + */ + public DBCollectionFindAndModifyOptions writeConcern(@Nullable final WriteConcern writeConcern) { + this.writeConcern = writeConcern; + return this; + } + + /** + * Returns the collation options + * + * @return the collation options + * @mongodb.server.release 3.4 + */ + public Collation getCollation() { + return collation; + } + + /** + * Sets the collation + * + * @param collation the collation + * @return this + * @mongodb.server.release 3.4 + */ + public DBCollectionFindAndModifyOptions collation(final Collation collation) { + this.collation = collation; + return this; + } + + /** + * Sets the array filters option + * + * @param arrayFilters the array filters, which may be null + * @return this + * @since 3.6 + * @mongodb.server.release 3.6 + */ + public DBCollectionFindAndModifyOptions arrayFilters(final List arrayFilters) { + this.arrayFilters = arrayFilters; + return this; + } + + /** + * Returns the array filters option + * + * @return the array filters, which may be null + * @since 3.6 + * @mongodb.server.release 3.6 + */ + @Nullable + public List getArrayFilters() { + return arrayFilters; + } +} diff --git a/driver-legacy/src/main/com/mongodb/client/model/DBCollectionFindOptions.java b/driver-legacy/src/main/com/mongodb/client/model/DBCollectionFindOptions.java new file mode 100644 index 00000000000..256419315f7 --- /dev/null +++ b/driver-legacy/src/main/com/mongodb/client/model/DBCollectionFindOptions.java @@ -0,0 +1,564 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model; + +import com.mongodb.CursorType; +import com.mongodb.DBObject; +import com.mongodb.ReadConcern; +import com.mongodb.ReadPreference; +import com.mongodb.lang.Nullable; + +import java.util.concurrent.TimeUnit; + +import static com.mongodb.assertions.Assertions.isTrueArgument; +import static com.mongodb.assertions.Assertions.notNull; + +/** + * The options to apply to a find operation (also commonly referred to as a query). + * + * @since 3.4 + * @mongodb.driver.manual tutorial/query-documents/ Find + * @mongodb.driver.manual ../meta-driver/latest/legacy/mongodb-wire-protocol/#op-query OP_QUERY + */ +public final class DBCollectionFindOptions { + private int batchSize; + private int limit; + private DBObject projection; + private long maxTimeMS; + private long maxAwaitTimeMS; + private int skip; + private DBObject sort; + private CursorType cursorType = CursorType.NonTailable; + private boolean noCursorTimeout; + private boolean partial; + private ReadPreference readPreference; + private ReadConcern readConcern; + private Collation collation; + private String comment; + private DBObject hint; + private String hintString; + private DBObject max; + private DBObject min; + private boolean returnKey; + private boolean showRecordId; + + /** + * Construct a new instance + */ + public DBCollectionFindOptions() { + } + + /** + * Copy this DBCollectionFindOptions instance into a new instance. + * + * @return the new DBCollectionFindOptions with the same settings as this instance. + */ + public DBCollectionFindOptions copy() { + DBCollectionFindOptions copiedOptions = new DBCollectionFindOptions(); + copiedOptions.batchSize(batchSize); + copiedOptions.limit(limit); + copiedOptions.projection(projection); + copiedOptions.maxTime(maxTimeMS, TimeUnit.MILLISECONDS); + copiedOptions.maxAwaitTime(maxAwaitTimeMS, TimeUnit.MILLISECONDS); + copiedOptions.skip(skip); + copiedOptions.sort(sort); + copiedOptions.cursorType(cursorType); + copiedOptions.noCursorTimeout(noCursorTimeout); + copiedOptions.partial(partial); + copiedOptions.readPreference(readPreference); + copiedOptions.readConcern(readConcern); + copiedOptions.collation(collation); + copiedOptions.comment(comment); + copiedOptions.hint(hint); + copiedOptions.hintString(hintString); + copiedOptions.max(max); + copiedOptions.min(min); + copiedOptions.returnKey(returnKey); + copiedOptions.showRecordId(showRecordId); + return copiedOptions; + } + + /** + * Gets the limit to apply. The default is null. + * + * @return the limit + * @mongodb.driver.manual reference/method/cursor.limit/#cursor.limit Limit + */ + public int getLimit() { + return limit; + } + + /** + * Sets the limit to apply. + * + * @param limit the limit + * @return this + * @mongodb.driver.manual reference/method/cursor.limit/#cursor.limit Limit + */ + public DBCollectionFindOptions limit(final int limit) { + this.limit = limit; + return this; + } + + /** + * Gets the number of documents to skip. The default is 0. + * + * @return the number of documents to skip + * @mongodb.driver.manual reference/method/cursor.skip/#cursor.skip Skip + */ + public int getSkip() { + return skip; + } + + /** + * Sets the number of documents to skip. + * + * @param skip the number of documents to skip + * @return this + * @mongodb.driver.manual reference/method/cursor.skip/#cursor.skip Skip + */ + public DBCollectionFindOptions skip(final int skip) { + this.skip = skip; + return this; + } + + /** + * Gets the maximum execution time on the server for this operation. The default is 0, which places no limit on the execution time. + * + * @param timeUnit the time unit to return the result in + * @return the maximum execution time in the given time unit + * @mongodb.driver.manual reference/method/cursor.maxTimeMS/#cursor.maxTimeMS Max Time + */ + public long getMaxTime(final TimeUnit timeUnit) { + notNull("timeUnit", timeUnit); + return timeUnit.convert(maxTimeMS, TimeUnit.MILLISECONDS); + } + + /** + * Sets the maximum execution time on the server for this operation. + * + * @param maxTime the max time + * @param timeUnit the time unit, which may not be null + * @return this + * @mongodb.driver.manual reference/method/cursor.maxTimeMS/#cursor.maxTimeMS Max Time + */ + public DBCollectionFindOptions maxTime(final long maxTime, final TimeUnit timeUnit) { + notNull("timeUnit", timeUnit); + isTrueArgument("maxTime > = 0", maxTime >= 0); + this.maxTimeMS = TimeUnit.MILLISECONDS.convert(maxTime, timeUnit); + return this; + } + + /** + * The maximum amount of time for the server to wait on new documents to satisfy a tailable cursor + * query. This only applies to a TAILABLE_AWAIT cursor. When the cursor is not a TAILABLE_AWAIT cursor, + * this option is ignored. + *

+ * On servers >= 3.2, this option will be specified on the getMore command as "maxTimeMS". The default + * is no value: no "maxTimeMS" is sent to the server with the getMore command. + *

+ * On servers < 3.2, this option is ignored, and indicates that the driver should respect the server's default value + *

+ * A zero value will be ignored. + * + * @param timeUnit the time unit to return the result in + * @return the maximum await execution time in the given time unit + * @mongodb.driver.manual reference/method/cursor.maxTimeMS/#cursor.maxTimeMS Max Time + */ + public long getMaxAwaitTime(final TimeUnit timeUnit) { + notNull("timeUnit", timeUnit); + return timeUnit.convert(maxAwaitTimeMS, TimeUnit.MILLISECONDS); + } + + /** + * Sets the maximum await execution time on the server for this operation. + * + * @param maxAwaitTime the max await time. A zero value will be ignored, and indicates that the driver should respect the server's + * default value + * @param timeUnit the time unit, which may not be null + * @return this + * @mongodb.driver.manual reference/method/cursor.maxTimeMS/#cursor.maxTimeMS Max Time + */ + public DBCollectionFindOptions maxAwaitTime(final long maxAwaitTime, final TimeUnit timeUnit) { + notNull("timeUnit", timeUnit); + isTrueArgument("maxAwaitTime > = 0", maxAwaitTime >= 0); + this.maxAwaitTimeMS = TimeUnit.MILLISECONDS.convert(maxAwaitTime, timeUnit); + return this; + } + + /** + * Gets the number of documents to return per batch. Default to 0, which indicates that the server chooses an appropriate batch + * size. + * + * @return the batch size + * @mongodb.driver.manual reference/method/cursor.batchSize/#cursor.batchSize Batch Size + */ + public int getBatchSize() { + return batchSize; + } + + /** + * Sets the number of documents to return per batch. + * + * @param batchSize the batch size + * @return this + * @mongodb.driver.manual reference/method/cursor.batchSize/#cursor.batchSize Batch Size + */ + public DBCollectionFindOptions batchSize(final int batchSize) { + this.batchSize = batchSize; + return this; + } + + /** + * Gets a document describing the fields to return for all matching documents. + * + * @return the project document, which may be null + * @mongodb.driver.manual reference/method/db.collection.find/ Projection + */ + @Nullable + public DBObject getProjection() { + return projection; + } + + /** + * Sets a document describing the fields to return for all matching documents. + * + * @param projection the project document, which may be null. + * @return this + * @mongodb.driver.manual reference/method/db.collection.find/ Projection + */ + public DBCollectionFindOptions projection(@Nullable final DBObject projection) { + this.projection = projection; + return this; + } + + /** + * Gets the sort criteria to apply to the query. The default is null, which means that the documents will be returned in an undefined + * order. + * + * @return a document describing the sort criteria + * @mongodb.driver.manual reference/method/cursor.sort/ Sort + */ + @Nullable + public DBObject getSort() { + return sort; + } + + /** + * Sets the sort criteria to apply to the query. + * + * @param sort the sort criteria, which may be null. + * @return this + * @mongodb.driver.manual reference/method/cursor.sort/ Sort + */ + public DBCollectionFindOptions sort(@Nullable final DBObject sort) { + this.sort = sort; + return this; + } + + /** + * The server normally times out idle cursors after an inactivity period (10 minutes) + * to prevent excess memory use. If true, that timeout is disabled. + * + * @return true if cursor timeout is disabled + */ + public boolean isNoCursorTimeout() { + return noCursorTimeout; + } + + /** + * The server normally times out idle cursors after an inactivity period (10 minutes) + * to prevent excess memory use. Set this option to prevent that. + * + * @param noCursorTimeout true if cursor timeout is disabled + * @return this + */ + public DBCollectionFindOptions noCursorTimeout(final boolean noCursorTimeout) { + this.noCursorTimeout = noCursorTimeout; + return this; + } + + /** + * Get partial results from a sharded cluster if one or more shards are unreachable (instead of throwing an error). + * + * @return if partial results for sharded clusters is enabled + */ + public boolean isPartial() { + return partial; + } + + /** + * Get partial results from a sharded cluster if one or more shards are unreachable (instead of throwing an error). + * + * @param partial if partial results for sharded clusters is enabled + * @return this + */ + public DBCollectionFindOptions partial(final boolean partial) { + this.partial = partial; + return this; + } + + /** + * Get the cursor type. + * + * @return the cursor type + */ + public CursorType getCursorType() { + return cursorType; + } + + /** + * Sets the cursor type. + * + * @param cursorType the cursor type + * @return this + */ + public DBCollectionFindOptions cursorType(final CursorType cursorType) { + this.cursorType = notNull("cursorType", cursorType); + return this; + } + + /** + * Returns the readPreference + * + * @return the readPreference + */ + @Nullable + public ReadPreference getReadPreference() { + return readPreference; + } + + /** + * Sets the readPreference + * + * @param readPreference the readPreference + * @return this + */ + public DBCollectionFindOptions readPreference(@Nullable final ReadPreference readPreference) { + this.readPreference = readPreference; + return this; + } + + /** + * Returns the readConcern + * + * @return the readConcern + * @mongodb.server.release 3.2 + */ + @Nullable + public ReadConcern getReadConcern() { + return readConcern; + } + + /** + * Sets the readConcern + * + * @param readConcern the readConcern + * @return this + * @mongodb.server.release 3.2 + */ + public DBCollectionFindOptions readConcern(@Nullable final ReadConcern readConcern) { + this.readConcern = readConcern; + return this; + } + + /** + * Returns the collation options + * + * @return the collation options + * @mongodb.server.release 3.4 + */ + @Nullable + public Collation getCollation() { + return collation; + } + + /** + * Sets the collation + * + * @param collation the collation + * @return this + * @mongodb.server.release 3.4 + */ + public DBCollectionFindOptions collation(@Nullable final Collation collation) { + this.collation = collation; + return this; + } + + /** + * Returns the comment to send with the query. The default is not to include a comment with the query. + * + * @return the comment + * @since 3.9 + */ + @Nullable + public String getComment() { + return comment; + } + + /** + * Sets the comment to the query. A null value means no comment is set. + * + * @param comment the comment + * @return this + * @since 3.9 + */ + public DBCollectionFindOptions comment(@Nullable final String comment) { + this.comment = comment; + return this; + } + + /** + * Returns the hint for which index to use. The default is not to set a hint. + * + * @return the hint + * @since 3.9 + */ + @Nullable + public DBObject getHint() { + return hint; + } + + /** + * Returns the hint string for the name of the index to use. The default is not to set a hint. + * + * @return the hint string + * @since 4.4 + */ + @Nullable + public String getHintString() { + return hintString; + } + + /** + * Sets the hint for which index to use. A null value means no hint is set. + * + * @param hint the hint + * @return this + * @since 3.9 + */ + public DBCollectionFindOptions hint(@Nullable final DBObject hint) { + this.hint = hint; + return this; + } + + /** + * Sets the hint for the name of the index to use. A null value means no hint is set. + * + * @param hintString the hint string + * @return this + * @since 4.4 + */ + public DBCollectionFindOptions hintString(@Nullable final String hintString) { + this.hintString = hintString; + return this; + } + /** + * Returns the exclusive upper bound for a specific index. By default there is no max bound. + * + * @return the max + * @since 3.9 + */ + @Nullable + public DBObject getMax() { + return max; + } + + /** + * Sets the exclusive upper bound for a specific index. A null value means no max is set. + * + * @param max the max + * @return this + * @since 3.9 + */ + public DBCollectionFindOptions max(@Nullable final DBObject max) { + this.max = max; + return this; + } + + /** + * Returns the minimum inclusive lower bound for a specific index. By default there is no min bound. + * + * @return the min + * @since 3.9 + */ + @Nullable + public DBObject getMin() { + return min; + } + + /** + * Sets the minimum inclusive lower bound for a specific index. A null value means no max is set. + * + * @param min the min + * @return this + * @since 3.9 + */ + public DBCollectionFindOptions min(@Nullable final DBObject min) { + this.min = min; + return this; + } + + /** + * Returns the returnKey. If true the find operation will return only the index keys in the resulting documents. + *

+ * Default value is false. If returnKey is true and the find command does not use an index, the returned documents will be empty. + * + * @return the returnKey + * @since 3.9 + */ + public boolean isReturnKey() { + return returnKey; + } + + /** + * Sets the returnKey. If true the find operation will return only the index keys in the resulting documents. + * + * @param returnKey the returnKey + * @return this + * @since 3.9 + */ + public DBCollectionFindOptions returnKey(final boolean returnKey) { + this.returnKey = returnKey; + return this; + } + + /** + * Returns the showRecordId. + *

+ * Determines whether to return the record identifier for each document. If true, adds a field $recordId to the returned documents. + * The default is false. + * + * @return the showRecordId + * @since 3.9 + */ + public boolean isShowRecordId() { + return showRecordId; + } + + /** + * Sets the showRecordId. Set to true to add a field {@code $recordId} to the returned documents. + * + * @param showRecordId the showRecordId + * @return this + * @since 3.9 + */ + public DBCollectionFindOptions showRecordId(final boolean showRecordId) { + this.showRecordId = showRecordId; + return this; + } +} diff --git a/driver-legacy/src/main/com/mongodb/client/model/DBCollectionRemoveOptions.java b/driver-legacy/src/main/com/mongodb/client/model/DBCollectionRemoveOptions.java new file mode 100644 index 00000000000..7b31523c647 --- /dev/null +++ b/driver-legacy/src/main/com/mongodb/client/model/DBCollectionRemoveOptions.java @@ -0,0 +1,104 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model; + +import com.mongodb.DBEncoder; +import com.mongodb.WriteConcern; +import com.mongodb.lang.Nullable; + +/** + * The options to apply when removing documents from the DBCollection + * + * @since 3.4 + * @mongodb.driver.manual tutorial/remove-documents/ Remove Documents + */ +public final class DBCollectionRemoveOptions { + private Collation collation; + private WriteConcern writeConcern; + private DBEncoder encoder; + + /** + * Construct a new instance + */ + public DBCollectionRemoveOptions() { + } + + /** + * Returns the collation options + * + * @return the collation options + * @mongodb.server.release 3.4 + */ + @Nullable + public Collation getCollation() { + return collation; + } + + /** + * Sets the collation + * + * @param collation the collation + * @return this + * @mongodb.server.release 3.4 + */ + public DBCollectionRemoveOptions collation(@Nullable final Collation collation) { + this.collation = collation; + return this; + } + + /** + * The write concern to use for the insertion. By default the write concern configured for the DBCollection instance will be used. + * + * @return the write concern, or null if the default will be used. + */ + @Nullable + public WriteConcern getWriteConcern() { + return writeConcern; + } + + /** + * Sets the write concern + * + * @param writeConcern the write concern + * @return this + */ + public DBCollectionRemoveOptions writeConcern(@Nullable final WriteConcern writeConcern) { + this.writeConcern = writeConcern; + return this; + } + + /** + * Returns the encoder + * + * @return the encoder + */ + @Nullable + public DBEncoder getEncoder() { + return encoder; + } + + /** + * Sets the encoder + * + * @param encoder the encoder + * @return this + */ + public DBCollectionRemoveOptions encoder(@Nullable final DBEncoder encoder) { + this.encoder = encoder; + return this; + } +} diff --git a/driver-legacy/src/main/com/mongodb/client/model/DBCollectionUpdateOptions.java b/driver-legacy/src/main/com/mongodb/client/model/DBCollectionUpdateOptions.java new file mode 100644 index 00000000000..230daba7930 --- /dev/null +++ b/driver-legacy/src/main/com/mongodb/client/model/DBCollectionUpdateOptions.java @@ -0,0 +1,201 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model; + +import com.mongodb.DBEncoder; +import com.mongodb.DBObject; +import com.mongodb.WriteConcern; +import com.mongodb.lang.Nullable; + +import java.util.List; + +/** + * The options to apply when updating documents in the DBCollection + * + * @since 3.4 + * @mongodb.driver.manual tutorial/modify-documents/ Updates + * @mongodb.driver.manual reference/operator/update/ Update Operators + * @mongodb.driver.manual reference/command/update/ Update Command + */ +public class DBCollectionUpdateOptions { + private boolean upsert; + private Boolean bypassDocumentValidation; + private boolean multi; + private Collation collation; + private List arrayFilters; + private WriteConcern writeConcern; + private DBEncoder encoder; + + /** + * Construct a new instance + */ + public DBCollectionUpdateOptions() { + } + + /** + * Returns true if a new document should be inserted if there are no matches to the query filter. The default is false. + * + * @return true if a new document should be inserted if there are no matches to the query filter + */ + public boolean isUpsert() { + return upsert; + } + + /** + * Set to true if a new document should be inserted if there are no matches to the query filter. + * + * @param isUpsert true if a new document should be inserted if there are no matches to the query filter + * @return this + */ + public DBCollectionUpdateOptions upsert(final boolean isUpsert) { + this.upsert = isUpsert; + return this; + } + + /** + * Gets the bypass document level validation flag + * + * @return the bypass document level validation flag + * @mongodb.server.release 3.2 + */ + @Nullable + public Boolean getBypassDocumentValidation() { + return bypassDocumentValidation; + } + + /** + * Sets the bypass document level validation flag. + * + * @param bypassDocumentValidation If true, allows the write to opt-out of document level validation. + * @return this + * @mongodb.server.release 3.2 + */ + public DBCollectionUpdateOptions bypassDocumentValidation(@Nullable final Boolean bypassDocumentValidation) { + this.bypassDocumentValidation = bypassDocumentValidation; + return this; + } + + /** + * Sets whether all documents matching the query filter will be removed. + * + * @param multi true if all documents matching the query filter will be removed + * @return this + */ + public DBCollectionUpdateOptions multi(final boolean multi) { + this.multi = multi; + return this; + } + + /** + * Gets whether all documents matching the query filter will be removed. The default is true. + * + * @return whether all documents matching the query filter will be removed + */ + public boolean isMulti() { + return multi; + } + + /** + * Returns the collation options + * + * @return the collation options + * @mongodb.server.release 3.4 + */ + @Nullable + public Collation getCollation() { + return collation; + } + + /** + * Sets the collation + * + * @param collation the collation + * @return this + * @mongodb.server.release 3.4 + */ + public DBCollectionUpdateOptions collation(@Nullable final Collation collation) { + this.collation = collation; + return this; + } + + /** + * Sets the array filters option + * + * @param arrayFilters the array filters, which may be null + * @return this + * @since 3.6 + * @mongodb.server.release 3.6 + */ + public DBCollectionUpdateOptions arrayFilters(@Nullable final List arrayFilters) { + this.arrayFilters = arrayFilters; + return this; + } + + /** + * Returns the array filters option + * + * @return the array filters, which may be null + * @since 3.6 + * @mongodb.server.release 3.6 + */ + @Nullable + public List getArrayFilters() { + return arrayFilters; + } + + /** + * The write concern to use for the insertion. By default the write concern configured for the DBCollection instance will be used. + * + * @return the write concern, or null if the default will be used. + */ + @Nullable + public WriteConcern getWriteConcern() { + return writeConcern; + } + + /** + * Sets the write concern + * + * @param writeConcern the write concern + * @return this + */ + public DBCollectionUpdateOptions writeConcern(@Nullable final WriteConcern writeConcern) { + this.writeConcern = writeConcern; + return this; + } + + /** + * Returns the encoder + * + * @return the encoder + */ + @Nullable + public DBEncoder getEncoder() { + return encoder; + } + + /** + * Sets the encoder + * + * @param encoder the encoder + * @return this + */ + public DBCollectionUpdateOptions encoder(@Nullable final DBEncoder encoder) { + this.encoder = encoder; + return this; + } +} diff --git a/driver-legacy/src/main/com/mongodb/client/model/DBCreateViewOptions.java b/driver-legacy/src/main/com/mongodb/client/model/DBCreateViewOptions.java new file mode 100644 index 00000000000..e9b6c2f9752 --- /dev/null +++ b/driver-legacy/src/main/com/mongodb/client/model/DBCreateViewOptions.java @@ -0,0 +1,48 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model; + +/** + * The options to apply when creating a view + * + * @since 3.4 + * @mongodb.server.release 3.4 + * @mongodb.driver.manual reference/command/create Create Command + */ +public class DBCreateViewOptions { + private Collation collation; + + /** + * Returns the collation options + * + * @return the collation options + */ + public Collation getCollation() { + return collation; + } + + /** + * Sets the collation + * + * @param collation the collation + * @return this + */ + public DBCreateViewOptions collation(final Collation collation) { + this.collation = collation; + return this; + } +} diff --git a/driver-legacy/src/main/com/mongodb/gridfs/GridFS.java b/driver-legacy/src/main/com/mongodb/gridfs/GridFS.java new file mode 100644 index 00000000000..e17b8c25dee --- /dev/null +++ b/driver-legacy/src/main/com/mongodb/gridfs/GridFS.java @@ -0,0 +1,436 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.gridfs; + +import com.mongodb.BasicDBObject; +import com.mongodb.DB; +import com.mongodb.DBCollection; +import com.mongodb.DBCursor; +import com.mongodb.DBObject; +import com.mongodb.MongoException; +import com.mongodb.lang.Nullable; +import org.bson.types.ObjectId; + +import java.io.ByteArrayInputStream; +import java.io.File; +import java.io.FileInputStream; +import java.io.IOException; +import java.io.InputStream; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; + +/** + *

Implementation of GridFS - a specification for storing and retrieving files that exceed the BSON-document size limit of 16MB.

+ * + *

Instead of storing a file in a single document, GridFS divides a file into parts, or chunks, and stores each of those chunks as a + * separate document. By default GridFS limits chunk size to 255k. GridFS uses two collections to store files. One collection stores the + * file chunks, and the other stores file metadata.

+ * + *

When you query a GridFS store for a file, the driver or client will reassemble the chunks as needed. You can perform range queries on + * files stored through GridFS. You also can access information from arbitrary sections of files, which allows you to "skip" into the middle + * of a video or audio file.

+ * + *

GridFS is useful not only for storing files that exceed 16MB but also for storing any files for which you want access without having + * to load the entire file into memory. For more information on the indications of GridFS, see MongoDB official documentation.

+ * + * @mongodb.driver.manual core/gridfs/ GridFS + */ +public class GridFS { + + /** + * File's chunk size + */ + public static final int DEFAULT_CHUNKSIZE = 255 * 1024; + /** + * Bucket to use for the collection namespaces + */ + public static final String DEFAULT_BUCKET = "fs"; + + private final DB database; + private final String bucketName; + + private final DBCollection filesCollection; + private final DBCollection chunksCollection; + + /** + * Creates a GridFS instance for the default bucket "fs" in the given database. Set the preferred WriteConcern on the give DB with + * DB.setWriteConcern + * + * @param db database to work with + * @throws com.mongodb.MongoException if there's a failure + * @see com.mongodb.WriteConcern + */ + public GridFS(final DB db) { + this(db, DEFAULT_BUCKET); + } + + /** + * Creates a GridFS instance for the specified bucket in the given database. Set the preferred WriteConcern on the give DB with + * DB.setWriteConcern + * + * @param db database to work with + * @param bucket bucket to use in the given database + * @throws com.mongodb.MongoException if there's a failure + * @see com.mongodb.WriteConcern + */ + public GridFS(final DB db, final String bucket) { + this.database = db; + this.bucketName = bucket; + + this.filesCollection = database.getCollection(bucketName + ".files"); + this.chunksCollection = database.getCollection(bucketName + ".chunks"); + + // ensure standard indexes as long as collections are small + try { + if (filesCollection.count() < 1000) { + filesCollection.createIndex(new BasicDBObject("filename", 1).append("uploadDate", 1)); + } + if (chunksCollection.count() < 1000) { + chunksCollection.createIndex(new BasicDBObject("files_id", 1).append("n", 1), + new BasicDBObject("unique", true)); + } + } catch (MongoException e) { + //TODO: Logging + } + + filesCollection.setObjectClass(GridFSDBFile.class); + } + + /** + * Gets the list of files stored in this gridfs, sorted by filename. + * + * @return cursor of file objects + */ + public DBCursor getFileList() { + return filesCollection.find().sort(new BasicDBObject("filename", 1)); + } + + /** + * Gets a filtered list of files stored in this gridfs, sorted by filename. + * + * @param query filter to apply + * @return cursor of file objects + */ + public DBCursor getFileList(final DBObject query) { + return filesCollection.find(query).sort(new BasicDBObject("filename", 1)); + } + + /** + * Gets a sorted, filtered list of files stored in this gridfs. + * + * @param query filter to apply + * @param sort sorting to apply + * @return cursor of file objects + */ + public DBCursor getFileList(final DBObject query, final DBObject sort) { + return filesCollection.find(query).sort(sort); + } + + /** + * Finds one file matching the given objectId. Equivalent to findOne(objectId). + * + * @param objectId the objectId of the file stored on a server + * @return a gridfs file + * @throws com.mongodb.MongoException if the operation fails + */ + @Nullable + public GridFSDBFile find(final ObjectId objectId) { + return findOne(objectId); + } + + /** + * Finds one file matching the given objectId. + * + * @param objectId the objectId of the file stored on a server + * @return a gridfs file + * @throws com.mongodb.MongoException if the operation fails + */ + @Nullable + public GridFSDBFile findOne(final ObjectId objectId) { + return findOne(new BasicDBObject("_id", objectId)); + } + + /** + * Finds one file matching the given filename. + * + * @param filename the name of the file stored on a server + * @return the gridfs db file + * @throws com.mongodb.MongoException if the operation fails + */ + @Nullable + public GridFSDBFile findOne(final String filename) { + return findOne(new BasicDBObject("filename", filename)); + } + + /** + * Finds one file matching the given query. + * + * @param query filter to apply + * @return a gridfs file + * @throws com.mongodb.MongoException if the operation fails + */ + @Nullable + public GridFSDBFile findOne(final DBObject query) { + return injectGridFSInstance(filesCollection.findOne(query)); + } + + /** + * Finds a list of files matching the given filename. + * + * @param filename the filename to look for + * @return list of gridfs files + * @throws com.mongodb.MongoException if the operation fails + */ + public List find(final String filename) { + return find(new BasicDBObject("filename", filename)); + } + + /** + * Finds a list of files matching the given filename. + * + * @param filename the filename to look for + * @param sort the fields to sort with + * @return list of gridfs files + * @throws com.mongodb.MongoException if the operation fails + */ + public List find(final String filename, final DBObject sort) { + return find(new BasicDBObject("filename", filename), sort); + } + + /** + * Finds a list of files matching the given query. + * + * @param query the filter to apply + * @return list of gridfs files + * @throws com.mongodb.MongoException if the operation fails + */ + public List find(final DBObject query) { + return find(query, null); + } + + /** + * Finds a list of files matching the given query. + * + * @param query the filter to apply + * @param sort the fields to sort with + * @return list of gridfs files + * @throws com.mongodb.MongoException if the operation fails + */ + public List find(final DBObject query, @Nullable final DBObject sort) { + List files = new ArrayList<>(); + + DBCursor cursor = filesCollection.find(query); + if (sort != null) { + cursor.sort(sort); + } + + try { + while (cursor.hasNext()) { + files.add(injectGridFSInstance(cursor.next())); + } + } finally { + cursor.close(); + } + return Collections.unmodifiableList(files); + } + + @Nullable + private GridFSDBFile injectGridFSInstance(@Nullable final Object o) { + if (o == null) { + return null; + } + + if (!(o instanceof GridFSDBFile)) { + throw new IllegalArgumentException("somehow didn't get a GridFSDBFile"); + } + + GridFSDBFile f = (GridFSDBFile) o; + f.fs = this; + return f; + } + + /** + * Removes the file matching the given id. + * + * @param id the id of the file to be removed + * @throws com.mongodb.MongoException if the operation fails + */ + public void remove(final ObjectId id) { + if (id == null) { + throw new IllegalArgumentException("file id can not be null"); + } + + filesCollection.remove(new BasicDBObject("_id", id)); + chunksCollection.remove(new BasicDBObject("files_id", id)); + } + + /** + * Removes all files matching the given filename. + * + * @param filename the name of the file to be removed + * @throws com.mongodb.MongoException if the operation fails + */ + public void remove(final String filename) { + if (filename == null) { + throw new IllegalArgumentException("filename can not be null"); + } + + remove(new BasicDBObject("filename", filename)); + } + + /** + * Removes all files matching the given query. + * + * @param query filter to apply + * @throws com.mongodb.MongoException if the operation fails + */ + public void remove(final DBObject query) { + if (query == null) { + throw new IllegalArgumentException("query can not be null"); + } + + for (final GridFSDBFile f : find(query)) { + f.remove(); + } + } + + /** + * Creates a file entry. After calling this method, you have to call {@link com.mongodb.gridfs.GridFSInputFile#save()}. + * + * @param data the file's data + * @return a gridfs input file + */ + public GridFSInputFile createFile(final byte[] data) { + return createFile(new ByteArrayInputStream(data), true); + } + + /** + * Creates a file entry. After calling this method, you have to call {@link com.mongodb.gridfs.GridFSInputFile#save()}. + * + * @param file the file object + * @return a GridFS input file + * @throws IOException if there are problems reading {@code file} + */ + public GridFSInputFile createFile(final File file) throws IOException { + return createFile(new FileInputStream(file), file.getName(), true); + } + + /** + * Creates a file entry. After calling this method, you have to call {@link com.mongodb.gridfs.GridFSInputFile#save()}. + * + * @param in an inputstream containing the file's data + * @return a gridfs input file + */ + public GridFSInputFile createFile(final InputStream in) { + return createFile(in, null); + } + + /** + * Creates a file entry. After calling this method, you have to call {@link com.mongodb.gridfs.GridFSInputFile#save()}. + * + * @param in an inputstream containing the file's data + * @param closeStreamOnPersist indicate the passed in input stream should be closed once the data chunk persisted + * @return a gridfs input file + */ + public GridFSInputFile createFile(final InputStream in, final boolean closeStreamOnPersist) { + return createFile(in, null, closeStreamOnPersist); + } + + /** + * Creates a file entry. After calling this method, you have to call {@link com.mongodb.gridfs.GridFSInputFile#save()}. + * + * @param in an inputstream containing the file's data + * @param filename the file name as stored in the db + * @return a gridfs input file + */ + public GridFSInputFile createFile(final InputStream in, @Nullable final String filename) { + return new GridFSInputFile(this, in, filename); + } + + /** + * Creates a file entry. After calling this method, you have to call {@link com.mongodb.gridfs.GridFSInputFile#save()}. + * + * @param in an inputstream containing the file's data + * @param filename the file name as stored in the db + * @param closeStreamOnPersist indicate the passed in input stream should be closed once the data chunk persisted + * @return a gridfs input file + */ + public GridFSInputFile createFile(final InputStream in, @Nullable final String filename, final boolean closeStreamOnPersist) { + return new GridFSInputFile(this, in, filename, closeStreamOnPersist); + } + + /** + * Creates a file entry. + * + * @param filename the file name as stored in the db + * @return a gridfs input file + * @see GridFS#createFile() + */ + public GridFSInputFile createFile(final String filename) { + return new GridFSInputFile(this, filename); + } + + /** + * This method creates an empty {@link GridFSInputFile} instance. On this instance an {@link java.io.OutputStream} can be obtained using + * the {@link GridFSInputFile#getOutputStream()} method. You can still call {@link GridFSInputFile#setContentType(String)} and {@link + * GridFSInputFile#setFilename(String)}. The file will be completely written and closed after calling the {@link + * java.io.OutputStream#close()} method on the output stream. + * + * @return GridFS file handle instance. + */ + public GridFSInputFile createFile() { + return new GridFSInputFile(this); + } + + /** + * Gets the bucket name used in the collection's namespace. Default value is 'fs'. + * + * @return the name of the file bucket + */ + public String getBucketName() { + return bucketName; + } + + /** + * Gets the database used. + * + * @return the database + */ + public DB getDB() { + return database; + } + + /** + * Gets the {@link DBCollection} in which the file's metadata is stored. + * + * @return the collection + */ + protected DBCollection getFilesCollection() { + return filesCollection; + } + + /** + * Gets the {@link DBCollection} in which the binary chunks are stored. + * + * @return the collection + */ + protected DBCollection getChunksCollection() { + return chunksCollection; + } + +} diff --git a/driver-legacy/src/main/com/mongodb/gridfs/GridFSDBFile.java b/driver-legacy/src/main/com/mongodb/gridfs/GridFSDBFile.java new file mode 100644 index 00000000000..f236b9b0cad --- /dev/null +++ b/driver-legacy/src/main/com/mongodb/gridfs/GridFSDBFile.java @@ -0,0 +1,196 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.gridfs; + +import com.mongodb.BasicDBObject; +import com.mongodb.DBObject; +import com.mongodb.MongoException; + +import java.io.File; +import java.io.FileOutputStream; +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; + +/** + * This class enables retrieving a GridFS file metadata and content. Operations include: + *
    + *
  • Writing data to a file on disk or an OutputStream
  • + *
  • Creating an {@code InputStream} to stream the data into
  • + *
+ * + * @mongodb.driver.manual core/gridfs/ GridFS + */ +public class GridFSDBFile extends GridFSFile { + /** + * Returns an InputStream from which data can be read. + * + * @return the input stream + */ + public InputStream getInputStream() { + return new GridFSInputStream(); + } + + /** + * Writes the file's data to a file on disk. + * + * @param filename the file name on disk + * @return number of bytes written + * @throws IOException if there are problems writing to the file + */ + public long writeTo(final String filename) throws IOException { + return writeTo(new File(filename)); + } + + /** + * Writes the file's data to a file on disk. + * + * @param file the File object + * @return number of bytes written + * @throws IOException if there are problems writing to the {@code file} + */ + public long writeTo(final File file) throws IOException { + try (FileOutputStream out = new FileOutputStream(file)) { + return writeTo(out); + } + } + + /** + * Writes the file's data to an OutputStream. + * + * @param out the OutputStream + * @return number of bytes written + * @throws IOException if there are problems writing to {@code out} + */ + public long writeTo(final OutputStream out) throws IOException { + int nc = numChunks(); + for (int i = 0; i < nc; i++) { + out.write(getChunk(i)); + } + return length; + } + + private byte[] getChunk(final int chunkNumber) { + if (fs == null) { + throw new IllegalStateException("No GridFS instance defined!"); + } + + DBObject chunk = fs.getChunksCollection().findOne(new BasicDBObject("files_id", id).append("n", chunkNumber)); + if (chunk == null) { + throw new MongoException("Can't find a chunk! file id: " + id + " chunk: " + chunkNumber); + } + + return (byte[]) chunk.get("data"); + } + + /** + * Removes file from GridFS i.e. removes documents from files and chunks collections. + */ + void remove() { + fs.getFilesCollection().remove(new BasicDBObject("_id", id)); + fs.getChunksCollection().remove(new BasicDBObject("files_id", id)); + } + + private class GridFSInputStream extends InputStream { + + private final int numberOfChunks; + private int currentChunkId = -1; + private int offset = 0; + private byte[] buffer = null; + + GridFSInputStream() { + this.numberOfChunks = numChunks(); + } + + @Override + public int available() { + if (buffer == null) { + return 0; + } + return buffer.length - offset; + } + + @Override + public int read() { + byte[] b = new byte[1]; + int res = read(b); + if (res < 0) { + return -1; + } + return b[0] & 0xFF; + } + + @Override + public int read(final byte[] b) { + return read(b, 0, b.length); + } + + @Override + public int read(final byte[] b, final int off, final int len) { + + if (buffer == null || offset >= buffer.length) { + if (currentChunkId + 1 >= numberOfChunks) { + return -1; + } + + buffer = getChunk(++currentChunkId); + offset = 0; + } + + int r = Math.min(len, buffer.length - offset); + System.arraycopy(buffer, offset, b, off, r); + offset += r; + return r; + } + + /** + * Will smartly skip over chunks without fetching them if possible. + */ + @Override + public long skip(final long bytesToSkip) throws IOException { + if (bytesToSkip <= 0) { + return 0; + } + + if (currentChunkId == numberOfChunks) { + //We're actually skipping over the back end of the file, short-circuit here + //Don't count those extra bytes to skip in with the return value + return 0; + } + + // offset in the whole file + long offsetInFile = 0; + if (currentChunkId >= 0) { + offsetInFile = currentChunkId * chunkSize + offset; + } + if (bytesToSkip + offsetInFile >= length) { + currentChunkId = numberOfChunks; + buffer = null; + return length - offsetInFile; + } + + int temp = currentChunkId; + currentChunkId = (int) ((bytesToSkip + offsetInFile) / chunkSize); + if (temp != currentChunkId) { + buffer = getChunk(currentChunkId); + } + offset = (int) ((bytesToSkip + offsetInFile) % chunkSize); + + return bytesToSkip; + } + } +} diff --git a/driver-legacy/src/main/com/mongodb/gridfs/GridFSFile.java b/driver-legacy/src/main/com/mongodb/gridfs/GridFSFile.java new file mode 100644 index 00000000000..80e6a7c18fa --- /dev/null +++ b/driver-legacy/src/main/com/mongodb/gridfs/GridFSFile.java @@ -0,0 +1,285 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.gridfs; + +import com.mongodb.BasicDBObject; +import com.mongodb.DBObject; +import com.mongodb.DBObjectCodecProvider; +import com.mongodb.MongoException; +import org.bson.BSONObject; +import org.bson.codecs.BsonValueCodecProvider; +import org.bson.codecs.EncoderContext; +import org.bson.codecs.ValueCodecProvider; +import org.bson.codecs.configuration.CodecRegistry; +import org.bson.json.JsonWriter; +import org.bson.json.JsonWriterSettings; + +import java.io.StringWriter; +import java.util.Collections; +import java.util.Date; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; + +import static java.util.Arrays.asList; +import static org.bson.codecs.configuration.CodecRegistries.fromProviders; + +/** + * The abstract class representing a GridFS file. + * + * @mongodb.driver.manual core/gridfs/ GridFS + */ +public abstract class GridFSFile implements DBObject { + + private static final CodecRegistry DEFAULT_REGISTRY = + fromProviders(asList(new ValueCodecProvider(), new BsonValueCodecProvider(), new DBObjectCodecProvider())); + + private static final Set VALID_FIELDS = Collections.unmodifiableSet(new HashSet<>(asList("_id", + "filename", + "contentType", + "length", + "chunkSize", + "uploadDate", + "aliases"))); + + final DBObject extra = new BasicDBObject(); + + GridFS fs; + Object id; + String filename; + String contentType; + long length; + long chunkSize; + Date uploadDate; + + /** + * Saves the file entry to the files collection + * + * @throws MongoException if there's a failure + */ + public void save() { + if (fs == null) { + throw new MongoException("need fs"); + } + fs.getFilesCollection().save(this); + } + + /** + * Returns the number of chunks that store the file data. + * + * @return number of chunks + */ + public int numChunks() { + double d = length; + d = d / chunkSize; + return (int) Math.ceil(d); + } + + /** + * Gets the id. + * + * @return the id of the file. + */ + public Object getId() { + return id; + } + + /** + * Gets the filename. + * + * @return the name of the file + */ + public String getFilename() { + return filename; + } + + /** + * Gets the content type. + * + * @return the content type + */ + public String getContentType() { + return contentType; + } + + /** + * Gets the file's length. + * + * @return the length of the file + */ + public long getLength() { + return length; + } + + /** + * Gets the size of a chunk. + * + * @return the chunkSize + */ + public long getChunkSize() { + return chunkSize; + } + + /** + * Gets the upload date. + * + * @return the date + */ + public Date getUploadDate() { + return uploadDate; + } + + /** + * Gets the aliases from the metadata. note: to set aliases, call {@link #put(String, Object)} with {@code "aliases" , List}. + * + * @return list of aliases + */ + @SuppressWarnings("unchecked") + public List getAliases() { + return (List) extra.get("aliases"); + } + + /** + * Gets the file metadata. + * + * @return the metadata + */ + public DBObject getMetaData() { + return (DBObject) extra.get("metadata"); + } + + /** + * Gets the file metadata. + * + * @param metadata metadata to be set + */ + public void setMetaData(final DBObject metadata) { + extra.put("metadata", metadata); + } + + @Override + public Object put(final String key, final Object v) { + if (key == null) { + throw new RuntimeException("key should never be null"); + } else if (key.equals("_id")) { + id = v; + } else if (key.equals("filename")) { + filename = v == null ? null : v.toString(); + } else if (key.equals("contentType")) { + contentType = (String) v; + } else if (key.equals("length")) { + length = ((Number) v).longValue(); + } else if (key.equals("chunkSize")) { + chunkSize = ((Number) v).longValue(); + } else if (key.equals("uploadDate")) { + uploadDate = (Date) v; + } else { + extra.put(key, v); + } + return v; + } + + @Override + public Object get(final String key) { + if (key == null) { + throw new IllegalArgumentException("Key should never be null"); + } else if (key.equals("_id")) { + return id; + } else if (key.equals("filename")) { + return filename; + } else if (key.equals("contentType")) { + return contentType; + } else if (key.equals("length")) { + return length; + } else if (key.equals("chunkSize")) { + return chunkSize; + } else if (key.equals("uploadDate")) { + return uploadDate; + } + return extra.get(key); + } + + @Override + public boolean containsField(final String s) { + return keySet().contains(s); + } + + @Override + public Set keySet() { + Set keys = new HashSet<>(); + keys.addAll(VALID_FIELDS); + keys.addAll(extra.keySet()); + return keys; + } + + @Override + public boolean isPartialObject() { + return false; + } + + @Override + public void markAsPartialObject() { + throw new MongoException("Can't load partial GridFSFile file"); + } + + @Override + public String toString() { + JsonWriter writer = new JsonWriter(new StringWriter(), JsonWriterSettings.builder().build()); + DEFAULT_REGISTRY.get(GridFSFile.class).encode(writer, this, + EncoderContext.builder().isEncodingCollectibleDocument(true).build()); + return writer.getWriter().toString(); + } + + /** + * Sets the GridFS associated with this file. + * + * @param fs gridFS instance + */ + protected void setGridFS(final GridFS fs) { + this.fs = fs; + } + + /** + * Gets the GridFS associated with this file + * + * @return gridFS instance + */ + protected GridFS getGridFS() { + return this.fs; + } + + @Override + public void putAll(final BSONObject o) { + throw new UnsupportedOperationException(); + } + + @Override + public void putAll(@SuppressWarnings("rawtypes") final Map m) { + throw new UnsupportedOperationException(); + } + + @Override + public Map toMap() { + throw new UnsupportedOperationException(); + } + + @Override + public Object removeField(final String key) { + throw new UnsupportedOperationException(); + } +} diff --git a/driver-legacy/src/main/com/mongodb/gridfs/GridFSInputFile.java b/driver-legacy/src/main/com/mongodb/gridfs/GridFSInputFile.java new file mode 100644 index 00000000000..c25eb81520b --- /dev/null +++ b/driver-legacy/src/main/com/mongodb/gridfs/GridFSInputFile.java @@ -0,0 +1,370 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.gridfs; + +import com.mongodb.BasicDBObject; +import com.mongodb.DBObject; +import com.mongodb.MongoException; +import com.mongodb.lang.Nullable; +import org.bson.types.ObjectId; + +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.util.Date; + +/** + *

This class represents a GridFS file to be written to the database. Operations include:

+ * + *
    + *
  • Writing data obtained from an InputStream
  • + *
  • Getting an OutputStream to stream the data out to
  • + *
+ * + * @mongodb.driver.manual core/gridfs/ GridFS + */ +public class GridFSInputFile extends GridFSFile { + + private final InputStream inputStream; + private final boolean closeStreamOnPersist; + private boolean savedChunks = false; + private byte[] buffer = null; + private int currentChunkNumber = 0; + private int currentBufferPosition = 0; + private long totalBytes = 0; + private OutputStream outputStream = null; + + /** + * Default constructor setting the GridFS file name and providing an input stream containing data to be written to the file. + * + * @param gridFS The GridFS connection handle. + * @param inputStream Stream used for reading data from. + * @param filename Name of the file to be created. + * @param closeStreamOnPersist indicate the passed in input stream should be closed once the data chunk persisted + */ + protected GridFSInputFile(final GridFS gridFS, @Nullable final InputStream inputStream, @Nullable final String filename, + final boolean closeStreamOnPersist) { + this.fs = gridFS; + this.inputStream = inputStream; + this.filename = filename; + this.closeStreamOnPersist = closeStreamOnPersist; + + this.id = new ObjectId(); + this.chunkSize = GridFS.DEFAULT_CHUNKSIZE; + this.uploadDate = new Date(); + this.buffer = new byte[(int) chunkSize]; + } + + /** + * Default constructor setting the GridFS file name and providing an input stream containing data to be written to the file. + * + * @param gridFS The GridFS connection handle. + * @param inputStream Stream used for reading data from. + * @param filename Name of the file to be created. + */ + protected GridFSInputFile(final GridFS gridFS, @Nullable final InputStream inputStream, @Nullable final String filename) { + this(gridFS, inputStream, filename, false); + } + + /** + * Constructor that only provides a file name, but does not rely on the presence of an {@link java.io.InputStream}. An {@link + * java.io.OutputStream} can later be obtained for writing using the {@link #getOutputStream()} method. + * + * @param gridFS The GridFS connection handle. + * @param filename Name of the file to be created. + */ + protected GridFSInputFile(final GridFS gridFS, final String filename) { + this(gridFS, null, filename); + } + + /** + * Minimal constructor that does not rely on the presence of an {@link java.io.InputStream}. An {@link java.io.OutputStream} can later + * be obtained for writing using the {@link #getOutputStream()} method. + * + * @param gridFS The GridFS connection handle. + */ + protected GridFSInputFile(final GridFS gridFS) { + this(gridFS, null, null); + } + + /** + * Sets the ID of this GridFS file. + * + * @param id the file's ID. + */ + public void setId(final Object id) { + this.id = id; + } + + /** + * Sets the file name on the GridFS entry. + * + * @param filename File name. + */ + public void setFilename(final String filename) { + this.filename = filename; + } + + /** + * Sets the content type (MIME type) on the GridFS entry. + * + * @param contentType Content type. + */ + public void setContentType(final String contentType) { + this.contentType = contentType; + } + + /** + * Set the chunk size. This must be called before saving any data. + * + * @param chunkSize The size in bytes. + */ + public void setChunkSize(final long chunkSize) { + if (outputStream != null || savedChunks) { + return; + } + this.chunkSize = chunkSize; + buffer = new byte[(int) this.chunkSize]; + } + + /** + * Calls {@link GridFSInputFile#save(long)} with the existing chunk size. + * + * @throws MongoException if there's a problem saving the file. + */ + @Override + public void save() { + save(chunkSize); + } + + /** + * This method first calls saveChunks(long) if the file data has not been saved yet. Then it persists the file entry to GridFS. + * + * @param chunkSize Size of chunks for file in bytes. + * @throws MongoException if there's a problem saving the file. + */ + public void save(final long chunkSize) { + if (outputStream != null) { + throw new MongoException("cannot mix OutputStream and regular save()"); + } + + // note that chunkSize only changes chunkSize in case we actually save chunks + // otherwise there is a risk file and chunks are not compatible + if (!savedChunks) { + try { + saveChunks(chunkSize); + } catch (IOException ioe) { + throw new MongoException("couldn't save chunks", ioe); + } + } + + super.save(); + } + + /** + * Saves all data into chunks from configured {@link java.io.InputStream} input stream to GridFS. + * + * @return Number of the next chunk. + * @throws IOException on problems reading the new entry's {@link java.io.InputStream}. + * @throws MongoException if there's a failure + * @see com.mongodb.gridfs.GridFSInputFile#saveChunks(long) + */ + public int saveChunks() throws IOException { + return saveChunks(chunkSize); + } + + /** + * Saves all data into chunks from configured {@link java.io.InputStream} input stream to GridFS. A non-default chunk size can be + * specified. This method does NOT save the file object itself, one must call save() to do so. + * + * @param chunkSize Size of chunks for file in bytes. + * @return Number of the next chunk. + * @throws IOException on problems reading the new entry's {@link java.io.InputStream}. + * @throws MongoException if there's a failure + */ + public int saveChunks(final long chunkSize) throws IOException { + if (outputStream != null) { + throw new MongoException("Cannot mix OutputStream and regular save()"); + } + if (savedChunks) { + throw new MongoException("Chunks already saved!"); + } + + if (chunkSize <= 0) { + throw new MongoException("chunkSize must be greater than zero"); + } + + if (this.chunkSize != chunkSize) { + this.chunkSize = chunkSize; + buffer = new byte[(int) this.chunkSize]; + } + + int bytesRead = 0; + while (bytesRead >= 0) { + currentBufferPosition = 0; + bytesRead = _readStream2Buffer(); + dumpBuffer(true); + } + + // only finish data, do not write file, in case one wants to change metadata + finishData(); + return currentChunkNumber; + } + + /** + * After retrieving this {@link java.io.OutputStream}, this object will be capable of accepting successively written data to the output + * stream. To completely persist this GridFS object, you must finally call the {@link java.io.OutputStream#close()} method on the output + * stream. Note that calling the save() and saveChunks() methods will throw Exceptions once you obtained the OutputStream. + * + * @return Writable stream object. + */ + public OutputStream getOutputStream() { + if (outputStream == null) { + outputStream = new GridFSOutputStream(); + } + return outputStream; + } + + /** + * Dumps a new chunk into the chunks collection. Depending on the flag, also partial buffers (at the end) are going to be written + * immediately. + * + * @param writePartial Write also partial buffers full. + * @throws MongoException if there's a failure + */ + private void dumpBuffer(final boolean writePartial) { + if ((currentBufferPosition < chunkSize) && !writePartial) { + // Bail out, chunk not complete yet + return; + } + if (currentBufferPosition == 0) { + // chunk is empty, may be last chunk + return; + } + + byte[] writeBuffer = buffer; + if (currentBufferPosition != chunkSize) { + writeBuffer = new byte[currentBufferPosition]; + System.arraycopy(buffer, 0, writeBuffer, 0, currentBufferPosition); + } + + DBObject chunk = createChunk(id, currentChunkNumber, writeBuffer); + + fs.getChunksCollection().save(chunk); + + currentChunkNumber++; + totalBytes += writeBuffer.length; + currentBufferPosition = 0; + } + + /** + * Creates a new chunk of this file. Can be over-ridden, if input files need to be split into chunks using a different mechanism. + * + * @param id the file ID + * @param currentChunkNumber the unique id for this chunk + * @param writeBuffer the byte array containing the data for this chunk + * @return a DBObject representing this chunk. + */ + protected DBObject createChunk(final Object id, final int currentChunkNumber, final byte[] writeBuffer) { + return new BasicDBObject("files_id", id) + .append("n", currentChunkNumber) + .append("data", writeBuffer); + } + + /** + * Reads a buffer full from the {@link java.io.InputStream}. + * + * @return Number of bytes read from stream. + * @throws IOException if the reading from the stream fails. + */ + private int _readStream2Buffer() throws IOException { + int bytesRead = 0; + while (currentBufferPosition < chunkSize && bytesRead >= 0) { + bytesRead = inputStream.read(buffer, currentBufferPosition, (int) chunkSize - currentBufferPosition); + if (bytesRead > 0) { + currentBufferPosition += bytesRead; + } else if (bytesRead == 0) { + throw new RuntimeException("i'm doing something wrong"); + } + } + return bytesRead; + } + + /** + * Marks the data as fully written. This needs to be called before super.save() + */ + private void finishData() { + if (!savedChunks) { + length = totalBytes; + savedChunks = true; + try { + if (inputStream != null && closeStreamOnPersist) { + inputStream.close(); + } + } catch (IOException e) { + //ignore + } + } + } + + /** + * An output stream implementation that can be used to successively write to a GridFS file. + */ + private class GridFSOutputStream extends OutputStream { + + @Override + public void write(final int b) throws IOException { + byte[] byteArray = new byte[1]; + byteArray[0] = (byte) (b & 0xff); + write(byteArray, 0, 1); + } + + @Override + public void write(final byte[] b, final int off, final int len) throws IOException { + int offset = off; + int length = len; + int toCopy = 0; + while (length > 0) { + toCopy = length; + if (toCopy > chunkSize - currentBufferPosition) { + toCopy = (int) chunkSize - currentBufferPosition; + } + System.arraycopy(b, offset, buffer, currentBufferPosition, toCopy); + currentBufferPosition += toCopy; + offset += toCopy; + length -= toCopy; + if (currentBufferPosition == chunkSize) { + dumpBuffer(false); + } + } + } + + /** + * Processes/saves all data from {@link java.io.InputStream} and closes the potentially present {@link java.io.OutputStream}. The + * GridFS file will be persisted afterwards. + */ + @Override + public void close() { + // write last buffer if needed + dumpBuffer(true); + // finish stream + finishData(); + // save file obj + GridFSInputFile.super.save(); + } + } +} diff --git a/driver-legacy/src/main/com/mongodb/gridfs/package-info.java b/driver-legacy/src/main/com/mongodb/gridfs/package-info.java new file mode 100644 index 00000000000..ceb09602e81 --- /dev/null +++ b/driver-legacy/src/main/com/mongodb/gridfs/package-info.java @@ -0,0 +1,24 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * Contains the classes for supporting MongoDB's specification for storing very large files, GridFS. + * @mongodb.driver.manual core/gridfs/ GridFS + */ +@NonNullApi +package com.mongodb.gridfs; + +import com.mongodb.lang.NonNullApi; diff --git a/driver-legacy/src/test/functional/com/mongodb/ClassA.java b/driver-legacy/src/test/functional/com/mongodb/ClassA.java new file mode 100644 index 00000000000..c22f30ca8ee --- /dev/null +++ b/driver-legacy/src/test/functional/com/mongodb/ClassA.java @@ -0,0 +1,21 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb; + +public class ClassA extends BasicDBObject { + private static final long serialVersionUID = -5066012099738511289L; +} diff --git a/driver-legacy/src/test/functional/com/mongodb/ClassB.java b/driver-legacy/src/test/functional/com/mongodb/ClassB.java new file mode 100644 index 00000000000..b489ed5e055 --- /dev/null +++ b/driver-legacy/src/test/functional/com/mongodb/ClassB.java @@ -0,0 +1,21 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb; + +public class ClassB extends BasicDBObject { + private static final long serialVersionUID = -270548788202734185L; +} diff --git a/driver-legacy/src/test/functional/com/mongodb/ConnectivityTest.java b/driver-legacy/src/test/functional/com/mongodb/ConnectivityTest.java new file mode 100644 index 00000000000..9bc228e5c0b --- /dev/null +++ b/driver-legacy/src/test/functional/com/mongodb/ConnectivityTest.java @@ -0,0 +1,41 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb; + +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.MethodSource; + +import java.util.List; + +import static com.mongodb.client.ConnectivityTestHelper.LEGACY_HELLO_COMMAND; +import static com.mongodb.client.Fixture.getMongoClientSettings; + +public class ConnectivityTest { + + // the test succeeds if no exception is thrown, and fail otherwise + @ParameterizedTest(name = "{1}") + @MethodSource("com.mongodb.client.ConnectivityTestHelper#getConnectivityTestArguments") + public void testConnectivity(final ConnectionString connectionString, @SuppressWarnings("unused") final List hosts) { + try (MongoClient client = new MongoClient(getMongoClientSettings(connectionString).build())) { + // test that a command that doesn't require auth completes normally + client.getDatabase("admin").runCommand(LEGACY_HELLO_COMMAND); + + // test that a command that requires auth completes normally + client.getDatabase("test").getCollection("test").estimatedDocumentCount(); + } + } +} diff --git a/driver-legacy/src/test/functional/com/mongodb/DBCollectionAggregationTest.java b/driver-legacy/src/test/functional/com/mongodb/DBCollectionAggregationTest.java new file mode 100644 index 00000000000..5ca589c54df --- /dev/null +++ b/driver-legacy/src/test/functional/com/mongodb/DBCollectionAggregationTest.java @@ -0,0 +1,197 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb; + +import org.junit.Test; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import static com.mongodb.ClusterFixture.clusterIsType; +import static com.mongodb.ClusterFixture.disableMaxTimeFailPoint; +import static com.mongodb.ClusterFixture.enableMaxTimeFailPoint; +import static com.mongodb.ClusterFixture.isDiscoverableReplicaSet; +import static com.mongodb.ClusterFixture.isSharded; +import static com.mongodb.connection.ClusterType.REPLICA_SET; +import static java.util.Arrays.asList; +import static java.util.concurrent.TimeUnit.SECONDS; +import static org.hamcrest.CoreMatchers.is; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; +import static org.junit.Assume.assumeThat; +import static org.junit.Assume.assumeTrue; + +public class DBCollectionAggregationTest extends DatabaseTestCase { + + @Test + public void testAggregationCursor() { + List pipeline = prepareData(); + + verify(pipeline, AggregationOptions.builder() + .batchSize(1) + .allowDiskUse(true) + .build()); + + verify(pipeline, AggregationOptions.builder() + .batchSize(1) + .build()); + } + + @Test + public void testDollarOut() { + String aggCollection = "aggCollection"; + database.getCollection(aggCollection) + .drop(); + assertEquals(0, database.getCollection(aggCollection) + .count()); + + List pipeline = new ArrayList<>(prepareData()); + pipeline.add(new BasicDBObject("$out", aggCollection)); + verify(pipeline, AggregationOptions.builder() + .build()); + assertEquals(2, database.getCollection(aggCollection) + .count()); + } + + @Test + public void testDollarOutOnSecondary() throws InterruptedException { + assumeTrue(clusterIsType(REPLICA_SET)); + + List pipeline = new ArrayList<>(prepareData()); + pipeline.add(new BasicDBObject("$out", "aggCollection")); + AggregationOptions options = AggregationOptions.builder() + .build(); + ServerAddress serverAddress = verify(pipeline, options, ReadPreference.secondary(), collection); + assertEquals(2, database.getCollection("aggCollection") + .count()); + assertEquals(Fixture.getPrimary(), serverAddress); + } + + public List prepareData() { + collection.remove(new BasicDBObject()); + + DBObject foo = new BasicDBObject("name", "foo").append("count", 5); + DBObject bar = new BasicDBObject("name", "bar").append("count", 2); + DBObject baz = new BasicDBObject("name", "foo").append("count", 7); + collection.insert(foo, bar, baz); + + DBObject projection = new BasicDBObject("name", 1).append("count", 1); + + DBObject group = new BasicDBObject().append("_id", "$name") + .append("docsPerName", new BasicDBObject("$sum", 1)) + .append("countPerName", new BasicDBObject("$sum", "$count")); + return asList(new BasicDBObject("$project", projection), new BasicDBObject("$group", group)); + } + + @Test + public void testExplain() { + List pipeline = new ArrayList<>(prepareData()); + CommandResult out = collection.explainAggregate(pipeline, AggregationOptions.builder().build()); + assertTrue(out.keySet().iterator().hasNext()); + } + + @Test(expected = IllegalArgumentException.class) + public void testNullOptions() { + collection.aggregate(new ArrayList<>(), null); + } + + @Test + public void testMaxTime() { + assumeThat(isSharded(), is(false)); + enableMaxTimeFailPoint(); + DBCollection collection = database.getCollection("testMaxTime"); + try { + collection.aggregate(prepareData(), AggregationOptions.builder().maxTime(1, SECONDS).build()); + fail("Show have thrown"); + } catch (MongoExecutionTimeoutException e) { + assertEquals(50, e.getCode()); + } finally { + disableMaxTimeFailPoint(); + } + } + + @Test + public void testWriteConcern() { + assumeThat(isDiscoverableReplicaSet(), is(true)); + DBCollection collection = database.getCollection("testWriteConcern"); + collection.setWriteConcern(new WriteConcern(5)); + try { + collection.aggregate(asList(new BasicDBObject("$out", "copy")), AggregationOptions.builder().build()); + fail("Should have thrown"); + } catch (WriteConcernException e) { + assertEquals(100, e.getCode()); + } + } + + @Test + public void testAvailable() { + prepareData(); + Cursor cursor = collection.aggregate(asList(new BasicDBObject("$match", new BasicDBObject())), + AggregationOptions.builder().build()); + + assertEquals(3, cursor.available()); + + cursor.next(); + assertEquals(2, cursor.available()); + + cursor.next(); + assertEquals(1, cursor.available()); + } + + + private void verify(final List pipeline, final AggregationOptions options) { + verify(pipeline, options, ReadPreference.primary()); + } + + private void verify(final List pipeline, final AggregationOptions options, final ReadPreference readPreference) { + verify(pipeline, options, readPreference, collection); + } + + private ServerAddress verify(final List pipeline, final AggregationOptions options, final ReadPreference readPreference, + final DBCollection collection) { + Cursor cursor = collection.aggregate(pipeline, options, readPreference); + ServerAddress serverAddress; + Map results; + try { + results = new HashMap<>(); + while (cursor.hasNext()) { + DBObject next = cursor.next(); + results.put((String) next.get("_id"), next); + } + } finally { + serverAddress = cursor.getServerAddress(); + cursor.close(); + } + + + DBObject fooResult = results.get("foo"); + assertNotNull(fooResult); + assertEquals(2, fooResult.get("docsPerName")); + assertEquals(12, fooResult.get("countPerName")); + + DBObject barResult = results.get("bar"); + assertNotNull(barResult); + assertEquals(1, barResult.get("docsPerName")); + assertEquals(2, barResult.get("countPerName")); + + return serverAddress; + } +} diff --git a/driver-legacy/src/test/functional/com/mongodb/DBCollectionFunctionalSpecification.groovy b/driver-legacy/src/test/functional/com/mongodb/DBCollectionFunctionalSpecification.groovy new file mode 100644 index 00000000000..9df5058866c --- /dev/null +++ b/driver-legacy/src/test/functional/com/mongodb/DBCollectionFunctionalSpecification.groovy @@ -0,0 +1,624 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb + +import com.mongodb.client.model.Collation +import com.mongodb.client.model.CollationAlternate +import com.mongodb.client.model.CollationCaseFirst +import com.mongodb.client.model.CollationMaxVariable +import com.mongodb.client.model.CollationStrength +import com.mongodb.client.model.DBCollectionCountOptions +import com.mongodb.client.model.DBCollectionFindAndModifyOptions +import com.mongodb.client.model.DBCollectionRemoveOptions +import com.mongodb.client.model.DBCollectionUpdateOptions +import org.bson.BsonDocument +import org.bson.BsonDocumentWrapper +import spock.lang.IgnoreIf +import spock.lang.Unroll + +import static com.mongodb.ClusterFixture.isDiscoverableReplicaSet +import static com.mongodb.ClusterFixture.serverVersionAtLeast +import static org.hamcrest.Matchers.contains +import static org.hamcrest.Matchers.containsInAnyOrder +import static spock.util.matcher.HamcrestSupport.that + +@SuppressWarnings('DuplicateMapLiteral') +class DBCollectionFunctionalSpecification extends FunctionalSpecification { + private idOfExistingDocument + + def setupSpec() { + Map.metaClass.bitwiseNegate = { new BasicDBObject(delegate) } + } + + def setup() { + def existingDocument = ~['a': ~[:], + 'b': ~[:]] + collection.insert(existingDocument) + idOfExistingDocument = existingDocument.get('_id') + collection.setObjectClass(BasicDBObject) + } + + def 'should update a document'() { + when: + collection.update(new BasicDBObject('_id', 1), new BasicDBObject('$set', new BasicDBObject('x', 1)), true, false) + + then: + collection.findOne(new BasicDBObject('_id', 1)) == new BasicDBObject('_id', 1).append('x', 1) + + when: + collection.update(new BasicDBObject('_id', 2), new BasicDBObject('$set', new BasicDBObject('x', 1))) + + then: + collection.findOne(new BasicDBObject('_id', 2)) == null + } + + def 'should update multiple documents'() { + given: + collection.insert([new BasicDBObject('x', 1), new BasicDBObject('x', 1)]) + + when: + collection.update(new BasicDBObject('x', 1), new BasicDBObject('$set', new BasicDBObject('x', 2)), false, true) + + then: + collection.count(new BasicDBObject('x', 2)) == 2 + } + + def 'should replace a document'() { + when: + collection.update(new BasicDBObject('_id', 1), new BasicDBObject('_id', 1).append('x', 1), true, false) + + then: + collection.findOne(new BasicDBObject('_id', 1)) == new BasicDBObject('_id', 1).append('x', 1) + + when: + collection.update(new BasicDBObject('_id', 2), new BasicDBObject('_id', 2).append('x', 1)) + + then: + collection.findOne(new BasicDBObject('_id', 2)) == null + } + + def 'should drop collection that exists'() { + given: + collection.insert(~['name': 'myName']) + + when: + collection.drop() + + then: + !(this.collectionName in database.getCollectionNames()) + } + + def 'should not error when dropping a collection that does not exist'() { + given: + !(this.collectionName in database.getCollectionNames()) + + when: + collection.drop() + + then: + notThrown(MongoException) + } + + def 'should use top-level class for findAndModify'() { + given: + collection.setObjectClass(ClassA) + + when: + DBObject document = collection.findAndModify(null, ~['_id': idOfExistingDocument, 'c': 1]) + + then: + document instanceof ClassA + } + + def 'should use internal classes for findAndModify'() { + given: + collection.setInternalClass('a', ClassA) + collection.setInternalClass('b', ClassB) + + when: + DBObject document = collection.findAndModify(null, ~['_id': idOfExistingDocument, 'c': 1]) + + then: + document.get('a') instanceof ClassA + document.get('b') instanceof ClassB + } + + def 'should support index options'() { + given: + def options = ~[ + 'sparse' : true, + 'background' : true, + 'expireAfterSeconds': 42 + ] + + when: + collection.createIndex(~['y': 1], options) + + then: + collection.getIndexInfo().size() == 2 + + DBObject document = collection.getIndexInfo()[1] + document.get('expireAfterSeconds') == 42 + document.get('background') == true + } + + def 'drop index should not fail if collection does not exist'() { + given: + collection.drop() + + expect: + collection.dropIndex('indexOnCollectionThatDoesNotExist') + } + + @IgnoreIf({ serverVersionAtLeast(8, 3) }) + def 'drop index should error if index does not exist'() { + given: + collection.createIndex(new BasicDBObject('x', 1)) + + when: + collection.dropIndex('y_1') + + then: + def exception = thrown(MongoCommandException) + exception.getErrorMessage().contains('index not found') + } + + @IgnoreIf({ serverVersionAtLeast(8, 3) }) + def 'should throw Exception if dropping an index with an incorrect type'() { + given: + BasicDBObject index = new BasicDBObject('x', 1) + collection.createIndex(index) + + when: + collection.dropIndex(new BasicDBObject('x', '2d')) + + then: + def exception = thrown(MongoCommandException) + exception.getErrorMessage().contains('can\'t find index') + } + + def 'should drop nested index'() { + given: + collection.save(new BasicDBObject('x', new BasicDBObject('y', 1))) + BasicDBObject index = new BasicDBObject('x.y', 1) + collection.createIndex(index) + assert collection.indexInfo.size() == 2 + + when: + collection.dropIndex(index) + + then: + collection.indexInfo.size() == 1 + } + + def 'should drop all indexes except the default index on _id'() { + given: + collection.createIndex(new BasicDBObject('x', 1)) + collection.createIndex(new BasicDBObject('x.y', 1)) + assert collection.indexInfo.size() == 3 + + when: + collection.dropIndexes() + + then: + collection.indexInfo.size() == 1 + } + + def 'should drop unique index'() { + given: + BasicDBObject index = new BasicDBObject('x', 1) + collection.createIndex(index, new BasicDBObject('unique', true)) + + when: + collection.dropIndex(index) + + then: + collection.indexInfo.size() == 1 + } + + def 'should use compound index for min query'() { + given: + collection.createIndex(new BasicDBObject('a', 1).append('_id', 1)) + + when: + def cursor = collection.find().min(new BasicDBObject('a', 1).append('_id', idOfExistingDocument)) + + then: + cursor.size() == 1 + } + + def 'should be able to rename a collection'() { + given: + assert database.getCollectionNames().contains(collectionName) + String newCollectionName = 'someNewName' + + when: + collection.rename(newCollectionName) + + then: + !database.getCollectionNames().contains(collectionName) + + database.getCollection(newCollectionName) != null + database.getCollectionNames().contains(newCollectionName) + } + + def 'should be able to rename collection to an existing collection name and replace it when drop is true'() { + given: + String existingCollectionName = 'anExistingCollection' + String originalCollectionName = 'someOriginalCollection' + + DBCollection originalCollection = database.getCollection(originalCollectionName) + String keyInOriginalCollection = 'someKey' + String valueInOriginalCollection = 'someValue' + originalCollection.insert(new BasicDBObject(keyInOriginalCollection, valueInOriginalCollection)) + + DBCollection existingCollection = database.getCollection(existingCollectionName) + String keyInExistingCollection = 'aDifferentDocument' + existingCollection.insert(new BasicDBObject(keyInExistingCollection, 'withADifferentValue')) + + assert database.getCollectionNames().contains(originalCollectionName) + assert database.getCollectionNames().contains(existingCollectionName) + + when: + originalCollection.rename(existingCollectionName, true) + + then: + !database.getCollectionNames().contains(originalCollectionName) + database.getCollectionNames().contains(existingCollectionName) + + DBCollection replacedCollection = database.getCollection(existingCollectionName) + replacedCollection.findOne().get(keyInExistingCollection) == null + replacedCollection.findOne().get(keyInOriginalCollection).toString() == valueInOriginalCollection + } + + def 'should return a list of all the values of a given field without duplicates'() { + given: + collection.drop() + (0..99).each { collection.save(~['_id': it, 'x' : it % 10]) } + assert collection.count() == 100 + + when: + List distinctValuesOfFieldX = collection.distinct('x') + + then: + distinctValuesOfFieldX.size() == 10 + that distinctValuesOfFieldX, contains(0, 1, 2, 3, 4, 5, 6, 7, 8, 9) + } + + def 'should query database for values and return a list of all the distinct values of a given field that match the filter'() { + given: + collection.drop() + (0..99).each { collection.save(~['_id': it, 'x' : it % 10, 'isOddNumber': it % 2]) } + assert collection.count() == 100 + + when: + List distinctValuesOfFieldX = collection.distinct('x', ~['isOddNumber': 1]) + + then: + distinctValuesOfFieldX.size() == 5 + that distinctValuesOfFieldX, contains(1, 3, 5, 7, 9) + } + + def 'should return distinct values of differing types '() { + given: + collection.drop() + def documents = [~['id' : null], ~['id' :'a'], ~['id' : 1], + ~['id' : ~['b': 'c']], ~['id' : ~['list': [2, 'd', ~['e': 3]]]]] + + collection.insert(documents) + + when: + List distinctValues = collection.distinct('id') + + then: + distinctValues.size() == 5 + that distinctValues, containsInAnyOrder(null, 'a', 1, ~['b': 'c'], ~['list': [2, 'd', ~['e': 3]]]) + } + + + def 'should return null when findOne finds nothing'() { + expect: + collection.findOne([field: 'That Does Not Exist']) == null + } + + def 'should return null when findOne finds nothing and a projection field is specified'() { + given: + collection.drop() + + expect: + collection.findOne(null, [_id: true] as BasicDBObject) == null + } + + @Unroll + def 'should return #result when performing findOne with #criteria'() { + given: + collection.insert([_id: 100, x: 1, y: 2] as BasicDBObject) + collection.insert([_id: 123, x: 2, z: 2] as BasicDBObject) + + expect: + result == collection.findOne(criteria) + + where: + criteria | result + 123 | [_id: 123, x: 2, z: 2] + [x: 1] as BasicDBObject | [_id: 100, x: 1, y: 2] + } + + @Unroll + def 'should return #result when performing findOne with #criteria and projection #projection'() { + given: + collection.insert([_id: 100, x: 1, y: 2] as BasicDBObject) + collection.insert([_id: 123, x: 2, z: 2] as BasicDBObject) + + expect: + result == collection.findOne(criteria, projection) + + where: + criteria | projection | result + 123 | [x: 1] as BasicDBObject | [_id: 123, x: 2] + [x: 1] as BasicDBObject | [y: 1] as BasicDBObject | [_id: 100, y: 2] + } + + @Unroll + def 'should sort with #sortBy and filter with #criteria before selecting first result'() { + given: + collection.drop() + collection.insert([_id: 1, x: 100, y: 'abc'] as BasicDBObject) + collection.insert([_id: 2, x: 200, y: 'abc'] as BasicDBObject) + collection.insert([_id: 3, x: 1, y: 'abc'] as BasicDBObject) + collection.insert([_id: 4, x: -100, y: 'xyz'] as BasicDBObject) + collection.insert([_id: 5, x: -50, y: 'zzz'] as BasicDBObject) + collection.insert([_id: 6, x: 9, y: 'aaa'] as BasicDBObject) + + expect: + collection.findOne(criteria, null, sortBy)['_id'] == expectedId + + where: + criteria | sortBy | expectedId + new BasicDBObject() | [x: 1] as BasicDBObject | 4 + new BasicDBObject() | [x: -1] as BasicDBObject | 2 + [x: 1] as BasicDBObject | [x: 1, y: 1] as BasicDBObject | 3 + QueryBuilder.start('x').lessThan(2).get() | [y: -1] as BasicDBObject | 5 + } + + @IgnoreIf({ !isDiscoverableReplicaSet() }) + def 'should throw WriteConcernException on write concern error for rename'() { + given: + assert database.getCollectionNames().contains(collectionName) + collection.setWriteConcern(new WriteConcern(5)) + + when: + collection.rename('someOtherNewName') + + then: + def e = thrown(WriteConcernException) + e.getErrorCode() == 100 + + cleanup: + collection.setWriteConcern(null) + } + + @IgnoreIf({ !isDiscoverableReplicaSet() }) + def 'should throw WriteConcernException on write concern error for drop'() { + given: + assert database.getCollectionNames().contains(collectionName) + collection.setWriteConcern(new WriteConcern(5)) + + when: + collection.drop() + + then: + def e = thrown(WriteConcernException) + e.getErrorCode() == 100 + + cleanup: + collection.setWriteConcern(null) + } + + @IgnoreIf({ !isDiscoverableReplicaSet() }) + def 'should throw WriteConcernException on write concern error for createIndex'() { + given: + assert database.getCollectionNames().contains(collectionName) + collection.setWriteConcern(new WriteConcern(5)) + + when: + collection.createIndex(new BasicDBObject('somekey', 1)) + + then: + def e = thrown(WriteConcernException) + e.getErrorCode() == 100 + + cleanup: + collection.setWriteConcern(null) + } + + @IgnoreIf({ !isDiscoverableReplicaSet() }) + def 'should throw WriteConcernException on write concern error for dropIndex'() { + given: + assert database.getCollectionNames().contains(collectionName) + collection.createIndex(new BasicDBObject('somekey', 1)) + collection.setWriteConcern(new WriteConcern(5)) + + when: + collection.dropIndex(new BasicDBObject('somekey', 1)) + + then: + def e = thrown(WriteConcernException) + e.getErrorCode() == 100 + + cleanup: + collection.setWriteConcern(null) + } + + def 'should support creating an index with collation options'() { + given: + def collation = Collation.builder() + .locale('en') + .caseLevel(true) + .collationCaseFirst(CollationCaseFirst.OFF) + .collationStrength(CollationStrength.IDENTICAL) + .numericOrdering(true) + .normalization(false) + .collationAlternate(CollationAlternate.SHIFTED) + .collationMaxVariable(CollationMaxVariable.SPACE) + .backwards(true) + .build() + + def options = BasicDBObject.parse('''{ collation: { locale: "en", caseLevel: true, caseFirst: "off", strength: 5, + numericOrdering: true, alternate: "shifted", maxVariable: "space", backwards: true }}''') + + when: + collection.drop() + collection.createIndex(~['y': 1], new BasicDBObject(options)) + + then: + collection.getIndexInfo().size() == 2 + + when: + BsonDocument indexCollation = new BsonDocumentWrapper(collection.getIndexInfo()[1].get('collation'), + collection.getDefaultDBObjectCodec()) + indexCollation.remove('version') + + then: + indexCollation == collation.asDocument() + } + + def 'should find with collation'() { + given: + def document = BasicDBObject.parse('{_id: 1, str: "foo"}') + collection.insert(document) + + when: + def result = collection.find(BasicDBObject.parse('{str: "FOO"}')) + + then: + !result.hasNext() + + when: + result = collection.find(BasicDBObject.parse('{str: "FOO"}')).setCollation(caseInsensitive) + + then: + result.hasNext() + ++result == document + } + + def 'should aggregate with collation'() { + given: + def document = BasicDBObject.parse('{_id: 1, str: "foo"}') + collection.insert(document) + + when: + def result = collection.aggregate([BasicDBObject.parse('{ $match: { str: "FOO"}}')], AggregationOptions.builder().build()) + + then: + !result.hasNext() + + when: + result = collection.aggregate([BasicDBObject.parse('{ $match: { str: "FOO"}}')], + AggregationOptions.builder().collation(caseInsensitive).build()) + + then: + result.hasNext() + ++result == document + } + + def 'should count with collation'() { + given: + collection.insert(BasicDBObject.parse('{_id: 1, str: "foo"}')) + + when: + def result = collection.count(BasicDBObject.parse('{str: "FOO"}')) + + then: + result == 0L + + when: + result = collection.count(BasicDBObject.parse('{str: "FOO"}'), new DBCollectionCountOptions().collation(caseInsensitive)) + + then: + result == 1L + } + + def 'should update with collation'() { + given: + collection.insert(BasicDBObject.parse('{_id: 1, str: "foo"}')) + + when: + def result = collection.update(BasicDBObject.parse('{str: "FOO"}'), BasicDBObject.parse('{str: "bar"}')) + + then: + result.getN() == 0 + + when: + result = collection.update(BasicDBObject.parse('{str: "FOO"}'), BasicDBObject.parse('{str: "bar"}'), + new DBCollectionUpdateOptions().collation(caseInsensitive)) + + then: + result.getN() == 1 + } + + def 'should remove with collation'() { + given: + collection.insert(BasicDBObject.parse('{_id: 1, str: "foo"}')) + + when: + def result = collection.remove(BasicDBObject.parse('{str: "FOO"}')) + + then: + result.getN() == 0 + + when: + result = collection.remove(BasicDBObject.parse('{str: "FOO"}'), new DBCollectionRemoveOptions().collation(caseInsensitive)) + + then: + result.getN() == 1 + } + + def 'should find and modify with collation'() { + given: + def document = BasicDBObject.parse('{_id: 1, str: "foo"}') + collection.insert(document) + + when: + def result = collection.findAndModify(BasicDBObject.parse('{str: "FOO"}'), + new DBCollectionFindAndModifyOptions().update(BasicDBObject.parse('{_id: 1, str: "BAR"}'))) + + then: + result == null + + when: + result = collection.findAndModify(BasicDBObject.parse('{str: "FOO"}'), + new DBCollectionFindAndModifyOptions().update(BasicDBObject.parse('{_id: 1, str: "BAR"}')).collation(caseInsensitive)) + + then: + result == document + } + + def 'should drop compound index by key'() { + given: + def indexKeys = new BasicDBObject('x', 1).append('y', -1) + collection.createIndex(indexKeys) + + when: + collection.dropIndex(indexKeys) + + then: + collection.getIndexInfo().size() == 1 + } + + def caseInsensitive = Collation.builder().locale('en').collationStrength(CollationStrength.SECONDARY).build() +} diff --git a/driver-legacy/src/test/functional/com/mongodb/DBCollectionOldTest.java b/driver-legacy/src/test/functional/com/mongodb/DBCollectionOldTest.java new file mode 100644 index 00000000000..fa2b31faf39 --- /dev/null +++ b/driver-legacy/src/test/functional/com/mongodb/DBCollectionOldTest.java @@ -0,0 +1,310 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb; + +import org.bson.types.ObjectId; +import org.junit.Test; + +import java.util.Arrays; +import java.util.List; + +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.core.Is.is; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +// DBCollection is tested a lot, however little analysis has been done to see if the tests overlap. Some of the tests in this class +// almost definitely overlap either with DBCollectionTest or DBCollectionSpecification. If they do not overlap, +// they should be moved into one of those test files and this test case ultimately removed. +public class DBCollectionOldTest extends DatabaseTestCase { + @Test + public void testMultiInsert() { + DBCollection c = collection; + + DBObject obj = c.findOne(); + assertNull(obj); + + DBObject inserted1 = BasicDBObjectBuilder.start().add("x", 1).add("y", 2).get(); + DBObject inserted2 = BasicDBObjectBuilder.start().add("x", 3).add("y", 3).get(); + c.insert(inserted1, inserted2); + assertThat(collection.count(), is(2L)); + } + + @Test(expected = DuplicateKeyException.class) + public void testDuplicateKeyException() { + DBCollection c = collection; + + DBObject obj = new BasicDBObject(); + c.insert(obj, WriteConcern.ACKNOWLEDGED); + c.insert(obj, WriteConcern.ACKNOWLEDGED); + } + + @Test + public void testDropIndividualIndexes() { + DBCollection c = database.getCollection("dropindex2"); + c.drop(); + + c.save(new BasicDBObject("x", 1)); + assertEquals(1, c.getIndexInfo().size()); + + c.createIndex(new BasicDBObject("x", 1)); + assertEquals(2, c.getIndexInfo().size()); + + c.createIndex(new BasicDBObject("y", 1)); + assertEquals(3, c.getIndexInfo().size()); + + c.createIndex(new BasicDBObject("z", 1)); + assertEquals(4, c.getIndexInfo().size()); + + c.dropIndex("y_1"); + assertEquals(3, c.getIndexInfo().size()); + + c.dropIndex(new BasicDBObject("x", 1)); + assertEquals(2, c.getIndexInfo().size()); + + c.dropIndexes("z_1"); + assertEquals(1, c.getIndexInfo().size()); + } + + @Test + public void shouldDropCompoundIndexes1() { + DBCollection c = database.getCollection("dropindex3"); + c.drop(); + + BasicDBObject newDoc = new BasicDBObject("x", "some value").append("y", "another value"); + + c.save(newDoc); + assertEquals(1, c.getIndexInfo().size()); + + BasicDBObject indexFields = new BasicDBObject("x", 1).append("y", 1); + c.createIndex(indexFields); + assertEquals(2, c.getIndexInfo().size()); + + c.dropIndex(indexFields); + assertEquals(1, c.getIndexInfo().size()); + } + + @Test + public void shouldDropCompoundIndexes2() { + DBCollection c = database.getCollection("dropindex4"); + c.drop(); + + BasicDBObject newDoc = new BasicDBObject("x", "some value").append("y", "another value"); + + c.save(newDoc); + assertEquals(1, c.getIndexInfo().size()); + + BasicDBObject indexFields = new BasicDBObject("x", 1).append("y", 1); + c.createIndex(indexFields); + assertEquals(2, c.getIndexInfo().size()); + + c.dropIndex("x_1_y_1"); + assertEquals(1, c.getIndexInfo().size()); + } + + @Test + public void shouldDropCompoundGeoIndexes() { + DBCollection c = database.getCollection("dropindex5"); + c.drop(); + + BasicDBObject newDoc = new BasicDBObject("x", "some value").append("y", "another value"); + + c.save(newDoc); + assertEquals(1, c.getIndexInfo().size()); + + BasicDBObject indexFields = new BasicDBObject("x", "2d").append("y", 1); + c.createIndex(indexFields); + assertEquals(2, c.getIndexInfo().size()); + + c.dropIndex("x_2d_y_1"); + assertEquals(1, c.getIndexInfo().size()); + } + + @Test + public void shouldDropGeoIndexes() { + DBCollection c = database.getCollection("dropindex6"); + c.drop(); + + c.save(new BasicDBObject("x", 1)); + assertEquals(1, c.getIndexInfo().size()); + + BasicDBObject indexFields = new BasicDBObject("x", "2d"); + c.createIndex(indexFields); + assertEquals(2, c.getIndexInfo().size()); + + c.createIndex(new BasicDBObject("y", "2d")); + assertEquals(3, c.getIndexInfo().size()); + + c.createIndex(new BasicDBObject("z", "2d")); + assertEquals(4, c.getIndexInfo().size()); + + c.dropIndex("y_2d"); + assertEquals(3, c.getIndexInfo().size()); + + c.dropIndex(indexFields); + assertEquals(2, c.getIndexInfo().size()); + + c.dropIndexes("z_2d"); + assertEquals(1, c.getIndexInfo().size()); + + } + + @Test + public void testEnsureIndex() { + collection.save(new BasicDBObject("x", 1)); + assertEquals(1, collection.getIndexInfo().size()); + + collection.createIndex(new BasicDBObject("x", 1), new BasicDBObject("unique", true)); + assertEquals(2, collection.getIndexInfo().size()); + assertEquals(Boolean.TRUE, collection.getIndexInfo().get(1).get("unique")); + } + + @Test + public void testEnsureNestedIndex() { + DBCollection c = collection; + + BasicDBObject newDoc = new BasicDBObject("x", new BasicDBObject("y", 1)); + c.save(newDoc); + + assertEquals(1, c.getIndexInfo().size()); + c.createIndex(new BasicDBObject("x.y", 1), new BasicDBObject("name", "nestedIdx1").append("unique", false)); + assertEquals(2, c.getIndexInfo().size()); + } + + @Test + public void shouldSupportIndexAliases() { + // given + collection.save(new BasicDBObject("x", 1)); + assertEquals(1, collection.getIndexInfo().size()); + + // when + String indexAlias = "indexAlias"; + collection.createIndex(new BasicDBObject("x", 1), new BasicDBObject("name", indexAlias)); + + // then + assertEquals(2, collection.getIndexInfo().size()); + assertEquals(indexAlias, collection.getIndexInfo().get(1).get("name")); + } + + @Test(expected = DuplicateKeyException.class) + public void testIndexExceptions() { + DBCollection c = collection; + + c.insert(new BasicDBObject("x", 1)); + c.insert(new BasicDBObject("x", 1)); + + c.createIndex(new BasicDBObject("y", 1)); + c.createIndex(new BasicDBObject("y", 1)); // make sure this doesn't throw + + c.createIndex(new BasicDBObject("x", 1), new BasicDBObject("unique", true)); + } + + @Test + public void testWriteResultOnUnacknowledgedUpdate(){ + collection.insert(new BasicDBObject("_id", 1)); + WriteResult res = collection.update(new BasicDBObject("_id", 1), new BasicDBObject("$inc", new BasicDBObject("x", 1)), + false, false, WriteConcern.UNACKNOWLEDGED); + try { + res.getN(); + fail(); + } catch (UnsupportedOperationException e) { + // expected + } + try { + res.getUpsertedId(); + fail(); + } catch (UnsupportedOperationException e) { + // expected + } + + try { + res.isUpdateOfExisting(); + fail(); + } catch (UnsupportedOperationException e) { + // expected + } + } + + @Test + public void testWriteResultOnUpdate(){ + collection.insert(new BasicDBObject("_id", 1)); + WriteResult res = collection.update(new BasicDBObject("_id", 1), new BasicDBObject("$inc", new BasicDBObject("x", 1))); + assertEquals(1, res.getN()); + assertTrue(res.isUpdateOfExisting()); + assertNull(res.getUpsertedId()); + } + + @Test + public void testWriteResultOnUpsert(){ + ObjectId id = new ObjectId(); + collection.insert(new BasicDBObject("_id", 1)); + WriteResult res = collection.update(new BasicDBObject("_id", id), new BasicDBObject("$inc", new BasicDBObject("x", 1)), true, + false); + assertEquals(1, res.getN()); + assertFalse(res.isUpdateOfExisting()); + assertEquals(id, res.getUpsertedId()); + } + + @Test + public void testWriteResultOnRemove() { + collection.insert(new BasicDBObject("_id", 1)); + collection.insert(new BasicDBObject("_id", 2)); + WriteResult res = collection.remove(new BasicDBObject()); + assertEquals(2, res.getN()); + assertFalse(res.isUpdateOfExisting()); + assertNull(res.getUpsertedId()); + } + + @Test + public void testMultiInsertNoContinue() { + List documents = Arrays.asList(new BasicDBObject("_id", 1).append("x", 1).append("y", 2), + new BasicDBObject("_id", 1).append("x", 3).append("y", 4), + new BasicDBObject("x", 5).append("y", 6)); + try { + collection.insert(documents, WriteConcern.ACKNOWLEDGED); + fail("Insert should have failed"); + } catch (MongoException e) { + assertEquals(11000, e.getCode()); + } + assertEquals(1, collection.count()); + + try { + collection.insert(documents, new InsertOptions()); + fail("Insert should have failed"); + } catch (MongoException e) { + assertEquals(11000, e.getCode()); + } + assertEquals(1, collection.count()); + } + + @Test + public void testMultiInsertWithContinue() { + List documents = Arrays.asList(new BasicDBObject("_id", 1).append("x", 1).append("y", 2), + new BasicDBObject("_id", 1).append("x", 3).append("y", 4), + new BasicDBObject("x", 5).append("y", 6)); + try { + collection.insert(documents, new InsertOptions().continueOnError(true)); + fail("Insert should have failed"); + } catch (MongoException e) { + assertEquals(11000, e.getCode()); + } + assertEquals(collection.count(), 2); + } +} diff --git a/driver-legacy/src/test/functional/com/mongodb/DBCollectionSpecification.groovy b/driver-legacy/src/test/functional/com/mongodb/DBCollectionSpecification.groovy new file mode 100644 index 00000000000..98cb8282c17 --- /dev/null +++ b/driver-legacy/src/test/functional/com/mongodb/DBCollectionSpecification.groovy @@ -0,0 +1,858 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb + +import com.mongodb.client.internal.TestOperationExecutor +import com.mongodb.client.model.Collation +import com.mongodb.client.model.CollationAlternate +import com.mongodb.client.model.CollationCaseFirst +import com.mongodb.client.model.CollationMaxVariable +import com.mongodb.client.model.CollationStrength +import com.mongodb.client.model.DBCollectionCountOptions +import com.mongodb.client.model.DBCollectionDistinctOptions +import com.mongodb.client.model.DBCollectionFindAndModifyOptions +import com.mongodb.client.model.DBCollectionFindOptions +import com.mongodb.client.model.DBCollectionRemoveOptions +import com.mongodb.client.model.DBCollectionUpdateOptions +import com.mongodb.internal.bulk.DeleteRequest +import com.mongodb.internal.bulk.IndexRequest +import com.mongodb.internal.bulk.InsertRequest +import com.mongodb.internal.bulk.UpdateRequest +import com.mongodb.internal.operation.AggregateOperation +import com.mongodb.internal.operation.AggregateToCollectionOperation +import com.mongodb.internal.operation.BatchCursor +import com.mongodb.internal.operation.CountOperation +import com.mongodb.internal.operation.CreateIndexesOperation +import com.mongodb.internal.operation.DistinctOperation +import com.mongodb.internal.operation.FindAndDeleteOperation +import com.mongodb.internal.operation.FindAndReplaceOperation +import com.mongodb.internal.operation.FindAndUpdateOperation +import com.mongodb.internal.operation.FindOperation +import com.mongodb.internal.operation.MapReduceBatchCursor +import com.mongodb.internal.operation.MapReduceStatistics +import com.mongodb.internal.operation.MapReduceToCollectionOperation +import com.mongodb.internal.operation.MapReduceWithInlineResultsOperation +import com.mongodb.internal.operation.MixedBulkWriteOperation +import org.bson.BsonBinary +import org.bson.BsonDocument +import org.bson.BsonDocumentWrapper +import org.bson.BsonInt32 +import org.bson.BsonJavaScript +import org.bson.UuidRepresentation +import org.bson.codecs.BsonDocumentCodec +import org.bson.codecs.BsonValueCodec +import org.bson.codecs.UuidCodec +import spock.lang.Specification + +import java.util.concurrent.TimeUnit + +import static Fixture.getMongoClient +import static com.mongodb.CustomMatchers.isTheSameAs +import static com.mongodb.LegacyMixedBulkWriteOperation.createBulkWriteOperationForDelete +import static com.mongodb.LegacyMixedBulkWriteOperation.createBulkWriteOperationForUpdate +import static java.util.Arrays.asList +import static org.bson.codecs.configuration.CodecRegistries.fromCodecs +import static spock.util.matcher.HamcrestSupport.expect + +class DBCollectionSpecification extends Specification { + + private static final DEFAULT_DBOBJECT_CODEC_FACTORY = new DBObjectCodec(MongoClient.getDefaultCodecRegistry(), + DBObjectCodec.getDefaultBsonTypeClassMap(), + new DBCollectionObjectFactory()) + + def 'should throw IllegalArgumentException if name is invalid'() { + when: + new DB(getMongoClient(), 'myDatabase', new TestOperationExecutor([])).getCollection('') + + then: + thrown(IllegalArgumentException) + } + + def 'should use MongoClient CodecRegistry'() { + given: + def mongoClient = Stub(MongoClient) { + getCodecRegistry() >> fromCodecs(new UuidCodec(UuidRepresentation.STANDARD)) + getReadConcern() >> ReadConcern.DEFAULT + getWriteConcern() >> WriteConcern.ACKNOWLEDGED + getMongoClientOptions() >> MongoClientOptions.builder().build() + } + def executor = new TestOperationExecutor([WriteConcernResult.unacknowledged()]) + def db = new DB(mongoClient, 'myDatabase', executor) + def collection = db.getCollection('test') + def uuid = UUID.fromString('01020304-0506-0708-090a-0b0c0d0e0f10') + + when: + collection.insert(new BasicDBObject('_id', uuid)) + def operation = executor.writeOperation as LegacyMixedBulkWriteOperation + + then: + (operation.writeRequests[0] as InsertRequest).document.getBinary('_id') == new BsonBinary(uuid, UuidRepresentation.STANDARD) + } + + def 'should get and set read concern'() { + when: + def db = new DB(getMongoClient(), 'myDatabase', new TestOperationExecutor([])) + db.setReadConcern(ReadConcern.MAJORITY) + def collection = db.getCollection('test') + + then: + collection.readConcern == ReadConcern.MAJORITY + + when: + collection.setReadConcern(ReadConcern.LOCAL) + + then: + collection.readConcern == ReadConcern.LOCAL + + when: + collection.setReadConcern(null) + + then: + collection.readConcern == ReadConcern.MAJORITY + } + + def 'should use CreateIndexOperation properly'() { + given: + def executor = new TestOperationExecutor([null, null, null]) + def collection = new DB(getMongoClient(), 'myDatabase', executor).getCollection('test') + def keys = new BasicDBObject('a', 1) + + when: + collection.createIndex(keys) + def request = (executor.getWriteOperation() as CreateIndexesOperation).requests[0] + + then: + expect request, isTheSameAs(new IndexRequest(new BsonDocument('a', new BsonInt32(1)))) + + when: + def storageEngine = '{ wiredTiger: { configString: "block_compressor=zlib" }}' + def partialFilterExpression = '{ a: { $gte: 10 } }' + def collation = Collation.builder() + .locale('en') + .caseLevel(true) + .collationCaseFirst(CollationCaseFirst.OFF) + .collationStrength(CollationStrength.IDENTICAL) + .numericOrdering(true) + .collationAlternate(CollationAlternate.SHIFTED) + .collationMaxVariable(CollationMaxVariable.SPACE) + .backwards(true) + .build() + collection.createIndex(keys, new BasicDBObject(['background': true, 'unique': true, 'sparse': true, 'name': 'aIndex', + 'expireAfterSeconds': 100, 'v': 1, 'weights': new BasicDBObject(['a': 1000]), + 'default_language': 'es', 'language_override': 'language', 'textIndexVersion': 1, + '2dsphereIndexVersion': 1, 'bits': 1, 'min': new Double(-180.0), + 'max' : new Double(180.0), 'dropDups': true, + 'storageEngine': BasicDBObject.parse(storageEngine), + 'partialFilterExpression': BasicDBObject.parse(partialFilterExpression), + 'collation': BasicDBObject.parse(collation.asDocument().toJson())])) + + request = (executor.getWriteOperation() as CreateIndexesOperation).requests[0] + + then: + expect request, isTheSameAs(new IndexRequest(new BsonDocument('a', new BsonInt32(1))) + .background(true) + .unique(true) + .sparse(true) + .name('aIndex') + .expireAfter(100, TimeUnit.SECONDS) + .version(1) + .weights(new BsonDocument('a', new BsonInt32(1000))) + .defaultLanguage('es') + .languageOverride('language') + .textVersion(1) + .sphereVersion(1) + .bits(1) + .min(-180.0) + .max(180.0) + .dropDups(true) + .storageEngine(BsonDocument.parse(storageEngine)) + .partialFilterExpression(BsonDocument.parse(partialFilterExpression)) + .collation(collation)) + } + + def 'should support boolean index options that are numbers'() { + given: + def executor = new TestOperationExecutor([null, null]) + def collection = new DB(getMongoClient(), 'myDatabase', executor).getCollection('test') + def options = new BasicDBObject('sparse', value) + + when: + collection.createIndex(new BasicDBObject('y', 1), options) + + then: + def operation = executor.getWriteOperation() as CreateIndexesOperation + operation.requests[0].sparse == expectedValue + + where: + value | expectedValue + 0 | false + 0F | false + 0D | false + 1 | true + -1 | true + 4L | true + 4.3F | true + 4.0D | true + } + + def 'should support integer index options that are numbers'() { + given: + def executor = new TestOperationExecutor([null, null]) + def collection = new DB(getMongoClient(), 'myDatabase', executor).getCollection('test') + def options = new BasicDBObject('expireAfterSeconds', integerValue) + + when: + collection.createIndex(new BasicDBObject('y', 1), options) + + then: + def operation = executor.getWriteOperation() as CreateIndexesOperation + operation.requests[0].getExpireAfter(TimeUnit.SECONDS) == integerValue + + where: + integerValue << [4, 4L, (double) 4.0] + } + + def 'should support double index options that are numbers'() { + given: + def executor = new TestOperationExecutor([null, null]) + def collection = new DB(getMongoClient(), 'myDatabase', executor).getCollection('test') + def options = new BasicDBObject('max', doubleValue) + + when: + collection.createIndex(new BasicDBObject('y', '2d'), options) + + then: + def operation = executor.getWriteOperation() as CreateIndexesOperation + operation.requests[0].max == doubleValue + + where: + doubleValue << [4, 4L, (double) 4.0] + } + + def 'should throw IllegalArgumentException for unsupported option value type'() { + given: + def executor = new TestOperationExecutor([null, null]) + def collection = new DB(getMongoClient(), 'myDatabase', executor).getCollection('test') + def options = new BasicDBObject('sparse', 'true') + + + when: + collection.createIndex(new BasicDBObject('y', '1'), options) + + then: + thrown(IllegalArgumentException) + } + + def 'find should create the correct FindOperation'() { + given: + def cursor = Stub(BatchCursor) { + hasNext() >> false + getServerCursor() >> new ServerCursor(12L, new ServerAddress()) + } + def executor = new TestOperationExecutor([cursor, cursor, cursor]) + def db = new DB(getMongoClient(), 'myDatabase', executor) + def collection = db.getCollection('test') + + when: + collection.find().iterator().hasNext() + + then: + expect executor.getReadOperation(), isTheSameAs(new FindOperation(collection.getNamespace(), + collection.getObjectCodec()) + .filter(new BsonDocument()) + .retryReads(true)) + + when: // Inherits from DB + db.setReadConcern(ReadConcern.MAJORITY) + collection.find().iterator().hasNext() + + then: + expect executor.getReadOperation(), isTheSameAs(new FindOperation(collection.getNamespace(), + collection.getObjectCodec()) + .filter(new BsonDocument()) + .retryReads(true)) + + when: + collection.setReadConcern(ReadConcern.LOCAL) + collection.find(new BasicDBObject(), new DBCollectionFindOptions().collation(collation)).iterator().hasNext() + + then: + expect executor.getReadOperation(), isTheSameAs(new FindOperation(collection.getNamespace(), + collection.getObjectCodec()) + .filter(new BsonDocument()) + .collation(collation) + .retryReads(true)) + } + + def 'findOne should create the correct FindOperation'() { + given: + def dbObject = new BasicDBObject('_id', 1) + def cursor = Stub(BatchCursor) { + next() >> [dbObject] + hasNext() >> true + getServerCursor() >> new ServerCursor(12L, new ServerAddress()) + } + def executor = new TestOperationExecutor([cursor, cursor, cursor]) + def db = new DB(getMongoClient(), 'myDatabase', executor) + def collection = db.getCollection('test') + + when: + collection.findOne() + + then: + expect executor.getReadOperation(), isTheSameAs(new FindOperation(collection.getNamespace(), + collection.getObjectCodec()) + .filter(new BsonDocument()) + .limit(-1) + .retryReads(true)) + + when: // Inherits from DB + db.setReadConcern(ReadConcern.MAJORITY) + collection.findOne() + + then: + expect executor.getReadOperation(), isTheSameAs(new FindOperation(collection.getNamespace(), + collection.getObjectCodec()) + .filter(new BsonDocument()) + .limit(-1) + .retryReads(true)) + + when: + collection.setReadConcern(ReadConcern.LOCAL) + collection.findOne(new BasicDBObject(), new DBCollectionFindOptions().collation(collation)) + + then: + expect executor.getReadOperation(), isTheSameAs(new FindOperation(collection.getNamespace(), + collection.getObjectCodec()) + .filter(new BsonDocument()) + .limit(-1) + .collation(collation) + .retryReads(true)) + } + + def 'findAndRemove should create the correct FindAndDeleteOperation'() { + given: + def query = new BasicDBObject() + def cannedResult = BasicDBObject.parse('{value: {}}') + def executor = new TestOperationExecutor([cannedResult, cannedResult, cannedResult]) + def db = new DB(getMongoClient(), 'myDatabase', executor) + def retryWrites = db.getMongoClient().getMongoClientOptions().getRetryWrites() + def collection = db.getCollection('test') + + when: + collection.findAndRemove(query) + + then: + expect executor.getWriteOperation(), isTheSameAs(new FindAndDeleteOperation(collection. + getNamespace(), WriteConcern.ACKNOWLEDGED, retryWrites, collection.getObjectCodec()).filter(new BsonDocument())) + } + + def 'findAndModify should create the correct FindAndUpdateOperation'() { + given: + def query = new BasicDBObject() + def updateJson = '{$set: {a : 1}}' + def update = BasicDBObject.parse(updateJson) + def bsonUpdate = BsonDocument.parse(updateJson) + def cannedResult = BasicDBObject.parse('{value: {}}') + def executor = new TestOperationExecutor([cannedResult, cannedResult, cannedResult]) + def db = new DB(getMongoClient(), 'myDatabase', executor) + def retryWrites = db.getMongoClient().getMongoClientOptions().getRetryWrites() + def collection = db.getCollection('test') + + when: + collection.findAndModify(query, update) + + then: + expect executor.getWriteOperation(), isTheSameAs(new FindAndUpdateOperation(collection.getNamespace(), + WriteConcern.ACKNOWLEDGED, retryWrites, collection.getObjectCodec(), bsonUpdate) + .filter(new BsonDocument())) + + when: // With options + collection.findAndModify(query, new DBCollectionFindAndModifyOptions().update(update).collation(collation) + .arrayFilters(dbObjectArrayFilters).writeConcern(WriteConcern.W3)) + + then: + expect executor.getWriteOperation(), isTheSameAs(new FindAndUpdateOperation(collection.getNamespace(), WriteConcern.W3, + retryWrites, collection.getObjectCodec(), bsonUpdate) + .filter(new BsonDocument()) + .collation(collation) + .arrayFilters(bsonDocumentWrapperArrayFilters)) + + where: + dbObjectArrayFilters << [null, [], [new BasicDBObject('i.b', 1)]] + bsonDocumentWrapperArrayFilters << [null, [], [new BsonDocumentWrapper(new BasicDBObject('i.b', 1), + DEFAULT_DBOBJECT_CODEC_FACTORY)]] + } + + def 'findAndModify should create the correct FindAndReplaceOperation'() { + given: + def query = new BasicDBObject() + def replacementJson = '{a : 1}' + def replace = BasicDBObject.parse(replacementJson) + def bsonReplace = BsonDocument.parse(replacementJson) + def cannedResult = BasicDBObject.parse('{value: {}}') + def executor = new TestOperationExecutor([cannedResult, cannedResult, cannedResult]) + def db = new DB(getMongoClient(), 'myDatabase', executor) + def retryWrites = db.getMongoClient().getMongoClientOptions().getRetryWrites() + def collection = db.getCollection('test') + + when: + collection.findAndModify(query, replace) + + then: + expect executor.getWriteOperation(), isTheSameAs(new FindAndReplaceOperation(collection. + getNamespace(), WriteConcern.ACKNOWLEDGED, retryWrites, collection.getObjectCodec(), bsonReplace) + .filter(new BsonDocument())) + + when: // With options + collection.findAndModify(query, new DBCollectionFindAndModifyOptions().update(replace).collation(collation) + .writeConcern(WriteConcern.W3)) + + then: + expect executor.getWriteOperation(), isTheSameAs(new FindAndReplaceOperation(collection.getNamespace(), WriteConcern.W3, + retryWrites, collection.getObjectCodec(), bsonReplace) + .filter(new BsonDocument()) + .collation(collation)) + } + + def 'count should create the correct CountOperation'() { + given: + def executor = new TestOperationExecutor([42L, 42L, 42L]) + def db = new DB(getMongoClient(), 'myDatabase', executor) + def collection = db.getCollection('test') + + when: + collection.count() + + then: + expect executor.getReadOperation(), isTheSameAs(new CountOperation(collection.getNamespace()) + .filter(new BsonDocument()).retryReads(true)) + + when: // Inherits from DB + db.setReadConcern(ReadConcern.MAJORITY) + collection.count() + executor.getReadConcern() == ReadConcern.MAJORITY + + then: + expect executor.getReadOperation(), isTheSameAs(new CountOperation(collection.getNamespace()) + .filter(new BsonDocument()).retryReads(true)) + executor.getReadConcern() == ReadConcern.MAJORITY + + when: + collection.setReadConcern(ReadConcern.LOCAL) + collection.count(new BasicDBObject(), new DBCollectionCountOptions().collation(collation)) + + then: + expect executor.getReadOperation(), isTheSameAs(new CountOperation(collection.getNamespace()) + .filter(new BsonDocument()).retryReads(true) + .collation(collation)) + executor.getReadConcern() == ReadConcern.LOCAL + } + + def 'distinct should create the correct DistinctOperation'() { + given: + def cursor = Stub(BatchCursor) { + def count = 0 + next() >> { + count++ + [new BsonInt32(1), new BsonInt32(2)] + } + hasNext() >> { + count == 0 + } + } + def executor = new TestOperationExecutor([cursor, cursor, cursor]) + def db = new DB(getMongoClient(), 'myDatabase', executor) + def collection = db.getCollection('test') + + when: + def distinctFieldValues = collection.distinct('field1') + + then: + distinctFieldValues == [1, 2] + expect executor.getReadOperation(), isTheSameAs(new DistinctOperation(collection.getNamespace(), 'field1', + new BsonValueCodec()).filter(new BsonDocument()).retryReads(true)) + executor.getReadConcern() == ReadConcern.DEFAULT + + when: // Inherits from DB + db.setReadConcern(ReadConcern.MAJORITY) + collection.distinct('field1') + + then: + expect executor.getReadOperation(), isTheSameAs(new DistinctOperation(collection.getNamespace(), 'field1', + new BsonValueCodec()) + .filter(new BsonDocument()).retryReads(true)) + executor.getReadConcern() == ReadConcern.MAJORITY + + when: + collection.setReadConcern(ReadConcern.LOCAL) + collection.distinct('field1', new DBCollectionDistinctOptions().collation(collation)) + + then: + expect executor.getReadOperation(), isTheSameAs(new DistinctOperation(collection.getNamespace(), 'field1', + new BsonValueCodec()).collation(collation).retryReads(true)) + executor.getReadConcern() == ReadConcern.LOCAL + } + + def 'mapReduce should create the correct MapReduceInlineResultsOperation'() { + given: + def cursor = Stub(MapReduceBatchCursor) { + next() >> { } + hasNext() >> false + } + def executor = new TestOperationExecutor([cursor, cursor, cursor]) + def db = new DB(getMongoClient(), 'myDatabase', executor) + def collection = db.getCollection('test') + + when: + collection.mapReduce('map', 'reduce', null, MapReduceCommand.OutputType.INLINE, new BasicDBObject()) + + then: + expect executor.getReadOperation(), isTheSameAs( + new MapReduceWithInlineResultsOperation(collection.getNamespace(), new BsonJavaScript('map'), + new BsonJavaScript('reduce'), collection.getDefaultDBObjectCodec()) + .verbose(true) + .filter(new BsonDocument())) + executor.getReadConcern() == ReadConcern.DEFAULT + + when: // Inherits from DB + db.setReadConcern(ReadConcern.LOCAL) + collection.mapReduce('map', 'reduce', null, MapReduceCommand.OutputType.INLINE, new BasicDBObject()) + + then: + expect executor.getReadOperation(), isTheSameAs( + new MapReduceWithInlineResultsOperation(collection.getNamespace(), new BsonJavaScript('map'), + new BsonJavaScript('reduce'), collection.getDefaultDBObjectCodec()) + .verbose(true) + .filter(new BsonDocument())) + executor.getReadConcern() == ReadConcern.LOCAL + + when: + collection.setReadConcern(ReadConcern.MAJORITY) + def mapReduceCommand = new MapReduceCommand(collection, 'map', 'reduce', null, MapReduceCommand.OutputType.INLINE, + new BasicDBObject()) + mapReduceCommand.setCollation(collation) + collection.mapReduce(mapReduceCommand) + + then: + expect executor.getReadOperation(), isTheSameAs( + new MapReduceWithInlineResultsOperation(collection.getNamespace(), new BsonJavaScript('map'), + new BsonJavaScript('reduce'), collection.getDefaultDBObjectCodec()) + .verbose(true) + .filter(new BsonDocument()) + .collation(collation)) + executor.getReadConcern() == ReadConcern.MAJORITY + } + + def 'mapReduce should create the correct MapReduceToCollectionOperation'() { + given: + def stats = Stub(MapReduceStatistics) + def executor = new TestOperationExecutor([stats, stats, stats]) + def db = new DB(getMongoClient(), 'myDatabase', executor) + def collection = db.getCollection('test') + + when: + collection.mapReduce('map', 'reduce', 'myColl', MapReduceCommand.OutputType.REPLACE, new BasicDBObject()) + + then: + expect executor.getWriteOperation(), isTheSameAs( + new MapReduceToCollectionOperation(collection.getNamespace(), new BsonJavaScript('map'), + new BsonJavaScript('reduce'), 'myColl', collection.getWriteConcern()) + .verbose(true) + .filter(new BsonDocument()) + ) + + when: // Inherits from DB + collection.mapReduce('map', 'reduce', 'myColl', MapReduceCommand.OutputType.REPLACE, new BasicDBObject()) + + then: + expect executor.getWriteOperation(), isTheSameAs( + new MapReduceToCollectionOperation(collection.getNamespace(), new BsonJavaScript('map'), + new BsonJavaScript('reduce'), 'myColl', collection.getWriteConcern()) + .verbose(true) + .filter(new BsonDocument()) + ) + + when: + def mapReduceCommand = new MapReduceCommand(collection, 'map', 'reduce', 'myColl', MapReduceCommand.OutputType.REPLACE, + new BasicDBObject()) + mapReduceCommand.setCollation(collation) + collection.mapReduce(mapReduceCommand) + + then: + expect executor.getWriteOperation(), isTheSameAs( + new MapReduceToCollectionOperation(collection.getNamespace(), new BsonJavaScript('map'), + new BsonJavaScript('reduce'), 'myColl', collection.getWriteConcern()) + .verbose(true) + .filter(new BsonDocument()) + .collation(collation) + ) + } + + def 'aggregate should create the correct AggregateOperation'() { + given: + def cursor = Stub(MapReduceBatchCursor) { + next() >> { } + hasNext() >> false + } + def executor = new TestOperationExecutor([cursor, cursor, cursor]) + def db = new DB(getMongoClient(), 'myDatabase', executor) + def collection = db.getCollection('test') + def pipeline = [BasicDBObject.parse('{$match: {}}')] + def bsonPipeline = [BsonDocument.parse('{$match: {}}')] + + when: + collection.aggregate(pipeline, AggregationOptions.builder().build()) + + then: + expect executor.getReadOperation(), isTheSameAs(new AggregateOperation(collection.getNamespace(), + bsonPipeline, collection.getDefaultDBObjectCodec()).retryReads(true)) + executor.getReadConcern() == ReadConcern.DEFAULT + + when: // Inherits from DB + db.setReadConcern(ReadConcern.MAJORITY) + collection.aggregate(pipeline, AggregationOptions.builder().build()) + + then: + expect executor.getReadOperation(), isTheSameAs(new AggregateOperation(collection.getNamespace(), + bsonPipeline, collection.getDefaultDBObjectCodec()).retryReads(true)) + executor.getReadConcern() == ReadConcern.MAJORITY + + when: + collection.setReadConcern(ReadConcern.LOCAL) + collection.aggregate(pipeline, AggregationOptions.builder().collation(collation).build()) + + then: + expect executor.getReadOperation(), isTheSameAs(new AggregateOperation(collection.getNamespace(), + bsonPipeline, collection.getDefaultDBObjectCodec()).collation(collation).retryReads(true)) + executor.getReadConcern() == ReadConcern.LOCAL + } + + def 'aggregate should create the correct AggregateToCollectionOperation'() { + given: + def executor = new TestOperationExecutor([null, null, null]) + def db = new DB(getMongoClient(), 'myDatabase', executor) + def collection = db.getCollection('test') + def pipeline = [BasicDBObject.parse('{$match: {}}'), BasicDBObject.parse('{$out: "myColl"}')] + def bsonPipeline = [BsonDocument.parse('{$match: {}}'), BsonDocument.parse('{$out: "myColl"}')] + + when: + collection.aggregate(pipeline, AggregationOptions.builder().build()) + + then: + expect executor.getReadOperation(), isTheSameAs(new AggregateToCollectionOperation(collection.getNamespace(), + bsonPipeline, collection.getReadConcern(), collection.getWriteConcern())) + + when: // Inherits from DB + collection.aggregate(pipeline, AggregationOptions.builder().build()) + + then: + expect executor.getReadOperation(), isTheSameAs(new AggregateToCollectionOperation(collection.getNamespace(), + bsonPipeline, collection.getReadConcern(), collection.getWriteConcern())) + + when: + collection.aggregate(pipeline, AggregationOptions.builder().collation(collation).build()) + + then: + expect executor.getReadOperation(), isTheSameAs(new AggregateToCollectionOperation(collection.getNamespace(), + bsonPipeline, collection.getReadConcern(), collection.getWriteConcern()).collation(collation)) + } + + def 'explainAggregate should create the correct AggregateOperation'() { + given: + def result = BsonDocument.parse('{ok: 1}') + def executor = new TestOperationExecutor([result, result, result]) + def db = new DB(getMongoClient(), 'myDatabase', executor) + def collection = db.getCollection('test') + def options = AggregationOptions.builder().collation(collation).build() + def pipeline = [BasicDBObject.parse('{$match: {}}')] + def bsonPipeline = [BsonDocument.parse('{$match: {}}')] + + when: + collection.explainAggregate(pipeline, options) + + then: + expect executor.getReadOperation(), isTheSameAs(new AggregateOperation(collection.getNamespace(), + bsonPipeline, collection.getDefaultDBObjectCodec()).retryReads(true).collation(collation) + .asExplainableOperation(ExplainVerbosity.QUERY_PLANNER, new BsonDocumentCodec())) + + when: // Inherits from DB + db.setReadConcern(ReadConcern.MAJORITY) + collection.explainAggregate(pipeline, options) + + then: + expect executor.getReadOperation(), isTheSameAs(new AggregateOperation(collection.getNamespace(), + bsonPipeline, collection.getDefaultDBObjectCodec()).retryReads(true).collation(collation) + .asExplainableOperation(ExplainVerbosity.QUERY_PLANNER, new BsonDocumentCodec())) + + when: + collection.setReadConcern(ReadConcern.LOCAL) + collection.explainAggregate(pipeline, options) + + then: + expect executor.getReadOperation(), isTheSameAs(new AggregateOperation(collection.getNamespace(), + bsonPipeline, collection.getDefaultDBObjectCodec()).retryReads(true).collation(collation) + .asExplainableOperation(ExplainVerbosity.QUERY_PLANNER, new BsonDocumentCodec())) + } + + def 'update should create the correct UpdateOperation'() { + given: + def result = Stub(WriteConcernResult) + def executor = new TestOperationExecutor([result, result, result]) + def db = new DB(getMongoClient(), 'myDatabase', executor) + def retryWrites = db.getMongoClient().getMongoClientOptions().getRetryWrites() + def collection = db.getCollection('test') + def query = '{a: 1}' + def update = '{$set: {a: 2}}' + + when: + def updateRequest = new UpdateRequest(BsonDocument.parse(query), BsonDocument.parse(update), + com.mongodb.internal.bulk.WriteRequest.Type.UPDATE).multi(false) + collection.update(BasicDBObject.parse(query), BasicDBObject.parse(update)) + + then: + expect executor.getWriteOperation(), isTheSameAs(createBulkWriteOperationForUpdate(collection.getNamespace(), + true, WriteConcern.ACKNOWLEDGED, retryWrites, asList(updateRequest))) + + when: // Inherits from DB + db.setWriteConcern(WriteConcern.W3) + collection.update(BasicDBObject.parse(query), BasicDBObject.parse(update)) + + + then: + expect executor.getWriteOperation(), isTheSameAs(createBulkWriteOperationForUpdate(collection.getNamespace(), + true, WriteConcern.W3, retryWrites, asList(updateRequest))) + + when: + collection.setWriteConcern(WriteConcern.W1) + updateRequest.collation(collation) + collection.update(BasicDBObject.parse(query), BasicDBObject.parse(update), + new DBCollectionUpdateOptions().collation(collation).arrayFilters(dbObjectArrayFilters)) + + then: + expect executor.getWriteOperation(), isTheSameAs(createBulkWriteOperationForUpdate(collection.getNamespace(), + true, WriteConcern.W1, retryWrites, asList(updateRequest.arrayFilters(bsonDocumentWrapperArrayFilters)))) + + where: + dbObjectArrayFilters << [null, [], [new BasicDBObject('i.b', 1)]] + bsonDocumentWrapperArrayFilters << [null, [], [new BsonDocumentWrapper(new BasicDBObject('i.b', 1), + DEFAULT_DBOBJECT_CODEC_FACTORY)]] + } + + def 'remove should create the correct DeleteOperation'() { + given: + def result = Stub(WriteConcernResult) + def executor = new TestOperationExecutor([result, result, result]) + def db = new DB(getMongoClient(), 'myDatabase', executor) + def retryWrites = db.getMongoClient().getMongoClientOptions().getRetryWrites() + def collection = db.getCollection('test') + def query = '{a: 1}' + + when: + def deleteRequest = new DeleteRequest(BsonDocument.parse(query)) + collection.remove(BasicDBObject.parse(query)) + + then: + expect executor.getWriteOperation(), isTheSameAs(createBulkWriteOperationForDelete(collection.getNamespace(), + false, WriteConcern.ACKNOWLEDGED, retryWrites, asList(deleteRequest))) + + when: // Inherits from DB + db.setWriteConcern(WriteConcern.W3) + collection.remove(BasicDBObject.parse(query)) + + then: + expect executor.getWriteOperation(), isTheSameAs(createBulkWriteOperationForDelete(collection.getNamespace(), + false, WriteConcern.W3, retryWrites, asList(deleteRequest))) + + when: + collection.setWriteConcern(WriteConcern.W1) + deleteRequest.collation(collation) + collection.remove(BasicDBObject.parse(query), new DBCollectionRemoveOptions().collation(collation)) + + then: + expect executor.getWriteOperation(), isTheSameAs(createBulkWriteOperationForDelete(collection.getNamespace(), + false, WriteConcern.W1, retryWrites, asList(deleteRequest))) + } + + def 'should create the correct MixedBulkWriteOperation'() { + given: + def result = Stub(com.mongodb.bulk.BulkWriteResult) + def executor = new TestOperationExecutor([result, result, result]) + def db = new DB(getMongoClient(), 'myDatabase', executor) + def collection = db.getCollection('test') + def query = '{a: 1}' + def update = '{$set: {level: 1}}' + def insertedDocument = new BasicDBObject('_id', 1) + def insertRequest = new InsertRequest(new BsonDocumentWrapper(insertedDocument, collection.getDefaultDBObjectCodec())) + def updateRequest = new UpdateRequest(BsonDocument.parse(query), BsonDocument.parse(update), + com.mongodb.internal.bulk.WriteRequest.Type.UPDATE).multi(false).collation(collation) + .arrayFilters(bsonDocumentWrapperArrayFilters) + def deleteRequest = new DeleteRequest(BsonDocument.parse(query)).multi(false).collation(frenchCollation) + def writeRequests = asList(insertRequest, updateRequest, deleteRequest) + + when: + def bulk = { + def bulkOp = ordered ? collection.initializeOrderedBulkOperation() : collection.initializeUnorderedBulkOperation() + bulkOp.insert(insertedDocument) + bulkOp.find(BasicDBObject.parse(query)).collation(collation).arrayFilters(dbObjectArrayFilters) + .updateOne(BasicDBObject.parse(update)) + bulkOp.find(BasicDBObject.parse(query)).collation(frenchCollation).removeOne() + bulkOp + } + bulk().execute() + + then: + expect executor.getWriteOperation(), isTheSameAs(new MixedBulkWriteOperation(collection.getNamespace(), + writeRequests, ordered, + WriteConcern.ACKNOWLEDGED, false)) + + when: // Inherits from DB + db.setWriteConcern(WriteConcern.W3) + bulk().execute() + + then: + expect executor.getWriteOperation(), isTheSameAs(new MixedBulkWriteOperation(collection.getNamespace(), + writeRequests, ordered, WriteConcern.W3, false)) + + when: + collection.setWriteConcern(WriteConcern.W1) + bulk().execute() + + then: + expect executor.getWriteOperation(), isTheSameAs(new MixedBulkWriteOperation(collection.getNamespace(), + writeRequests, ordered, WriteConcern.W1, false)) + + where: + ordered << [true, false, true] + dbObjectArrayFilters << [null, [], [new BasicDBObject('i.b', 1)]] + bsonDocumentWrapperArrayFilters << [null, [], [new BsonDocumentWrapper(new BasicDBObject('i.b', 1), + DEFAULT_DBOBJECT_CODEC_FACTORY)]] + } + + def collation = Collation.builder() + .locale('en') + .caseLevel(true) + .collationCaseFirst(CollationCaseFirst.OFF) + .collationStrength(CollationStrength.IDENTICAL) + .numericOrdering(true) + .collationAlternate(CollationAlternate.SHIFTED) + .collationMaxVariable(CollationMaxVariable.SPACE) + .backwards(true) + .build() + + def frenchCollation = Collation.builder().locale('fr').build() +} diff --git a/driver-legacy/src/test/functional/com/mongodb/DBCollectionTest.java b/driver-legacy/src/test/functional/com/mongodb/DBCollectionTest.java new file mode 100644 index 00000000000..c4a238ed73b --- /dev/null +++ b/driver-legacy/src/test/functional/com/mongodb/DBCollectionTest.java @@ -0,0 +1,1343 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb; + +import com.mongodb.client.model.DBCollectionCountOptions; +import org.bson.BSONObject; +import org.bson.BsonBinarySubType; +import org.bson.BsonBinaryWriter; +import org.bson.BsonObjectId; +import org.bson.Document; +import org.bson.io.OutputBuffer; +import org.bson.types.BSONTimestamp; +import org.bson.types.Binary; +import org.bson.types.Code; +import org.bson.types.CodeWScope; +import org.bson.types.MaxKey; +import org.bson.types.MinKey; +import org.bson.types.ObjectId; +import org.junit.Test; +import org.junit.jupiter.api.Tag; + +import java.net.UnknownHostException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.Date; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.UUID; +import java.util.concurrent.TimeUnit; +import java.util.regex.Pattern; + +import static com.mongodb.ClusterFixture.disableMaxTimeFailPoint; +import static com.mongodb.ClusterFixture.enableMaxTimeFailPoint; +import static com.mongodb.ClusterFixture.isDiscoverableReplicaSet; +import static com.mongodb.ClusterFixture.isSharded; +import static com.mongodb.DBObjectMatchers.hasSubdocument; +import static java.util.Arrays.asList; +import static java.util.concurrent.TimeUnit.MILLISECONDS; +import static org.hamcrest.CoreMatchers.hasItem; +import static org.hamcrest.CoreMatchers.instanceOf; +import static org.hamcrest.CoreMatchers.is; +import static org.hamcrest.CoreMatchers.notNullValue; +import static org.hamcrest.MatcherAssert.assertThat; +import static org.junit.Assert.assertArrayEquals; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; +import static org.junit.Assume.assumeThat; + +public class DBCollectionTest extends DatabaseTestCase { + + @Test + public void shouldCreateIdOnInsertIfThereIsNone() { + BasicDBObject document = new BasicDBObject(); + collection.insert(document); + assertEquals(ObjectId.class, document.get("_id").getClass()); + assertEquals(document, collection.findOne()); + } + + @Test + public void shouldCreateIdOnInsertIfTheValueIsNull() { + BasicDBObject document = new BasicDBObject("_id", null); + collection.insert(document); + assertEquals(ObjectId.class, document.get("_id").getClass()); + assertEquals(document, collection.findOne()); + } + + @Test + public void saveShouldInsertADocumentWithNullId() { + BasicDBObject document = new BasicDBObject("_id", null); + collection.save(document); + assertEquals(ObjectId.class, document.get("_id").getClass()); + assertEquals(document, collection.findOne()); + } + + @Test + public void saveShouldInsertADocumentWithNewObjectId() { + ObjectId newObjectId = new ObjectId(); + BasicDBObject document = new BasicDBObject("_id", newObjectId); + collection.save(document); + assertEquals(newObjectId, document.get("_id")); + assertEquals(document, collection.findOne()); + } + + @Test + public void testDefaultSettings() { + assertNull(collection.getDBDecoderFactory()); + assertNull(collection.getDBEncoderFactory()); + assertEquals(BasicDBObject.class, collection.getObjectClass()); + assertEquals(ReadPreference.primary(), collection.getReadPreference()); + assertEquals(WriteConcern.ACKNOWLEDGED, collection.getWriteConcern()); + } + + @Test + public void insertEmptyListShouldThrowIllegalArgumentException() { + try { + collection.insert(Collections.emptyList()); + fail("Should throw IllegalArgumentException"); + } catch (IllegalArgumentException e) { + // empty + } + } + + @Test + public void testInsert() { + WriteResult res = collection.insert(new BasicDBObject("_id", 1).append("x", 2)); + assertNotNull(res); + assertEquals(1L, collection.count()); + assertEquals(new BasicDBObject("_id", 1).append("x", 2), collection.findOne()); + } + + @Test + public void testFindWithNullQuery() { + collection.insert(new BasicDBObject("_id", 1).append("x", 2)); + assertEquals(new BasicDBObject("_id", 1).append("x", 2), collection.find(null).next()); + } + + @Test + public void testInsertDuplicateKeyException() { + DBObject doc = new BasicDBObject("_id", 1); + collection.insert(doc, WriteConcern.ACKNOWLEDGED); + try { + collection.insert(doc, WriteConcern.ACKNOWLEDGED); + fail("should throw DuplicateKey exception"); + } catch (DuplicateKeyException e) { + assertThat(e.getCode(), is(11000)); + } + } + + @Test + public void testSaveDuplicateKeyException() { + collection.createIndex(new BasicDBObject("x", 1), new BasicDBObject("unique", true)); + collection.save(new BasicDBObject("x", 1), WriteConcern.ACKNOWLEDGED); + try { + collection.save(new BasicDBObject("x", 1), WriteConcern.ACKNOWLEDGED); + fail("should throw DuplicateKey exception"); + } catch (DuplicateKeyException e) { + assertThat(e.getCode(), is(11000)); + } + } + + @Test + public void testSaveWithIdDefined() { + DBObject document = new BasicDBObject("_id", new ObjectId()).append("a", Math.random()); + collection.save(document); + assertThat(collection.count(), is(1L)); + assertEquals(document, collection.findOne()); + } + + @Test + public void testUpdate() { + WriteResult res = collection.update(new BasicDBObject("_id", 1), + new BasicDBObject("$set", new BasicDBObject("x", 2)), + true, false); + assertNotNull(res); + assertEquals(1L, collection.count()); + assertEquals(new BasicDBObject("_id", 1).append("x", 2), collection.findOne()); + } + + @Test + public void testObjectClass() { + collection.setObjectClass(MyDBObject.class); + collection.insert(new BasicDBObject("_id", 1)); + DBObject obj = collection.findOne(); + assertEquals(MyDBObject.class, obj.getClass()); + } + + @Test + public void testDotInDBObjectSucceeds() { + collection.save(new BasicDBObject("x.y", 1)); + collection.save(new BasicDBObject("x", new BasicDBObject("a.b", 1))); + + Map map = new HashMap<>(); + map.put("a.b", 1); + collection.save(new BasicDBObject("x", map)); + } + + @Test + public void testInsertWithDBEncoder() { + List objects = new ArrayList<>(); + objects.add(new BasicDBObject("a", 1)); + collection.insert(objects, WriteConcern.ACKNOWLEDGED, new MyEncoder()); + assertEquals(MyEncoder.getConstantObject(), collection.findOne()); + } + + @Test + public void testInsertWithDBEncoderFactorySet() { + collection.setDBEncoderFactory(new MyEncoderFactory()); + List objects = new ArrayList<>(); + objects.add(new BasicDBObject("a", 1)); + collection.insert(objects, WriteConcern.ACKNOWLEDGED, null); + assertEquals(MyEncoder.getConstantObject(), collection.findOne()); + collection.setDBEncoderFactory(null); + } + + @Test(expected = MongoCommandException.class) + public void testCreateIndexWithInvalidIndexType() { + DBObject index = new BasicDBObject("x", "funny"); + collection.createIndex(index); + } + + @Test + public void testCreateIndexByName() { + collection.createIndex("x"); + assertEquals(2, collection.getIndexInfo().size()); + + assertNotNull(getIndexInfoForNameStartingWith("x")); + } + + @Test + public void testCreateIndexAsAscending() { + collection.createIndex(new BasicDBObject("x", 1)); + assertEquals(2, collection.getIndexInfo().size()); + + assertNotNull(getIndexInfoForNameStartingWith("x_1")); + } + + @Test + public void testCreateIndexAsDescending() { + DBObject index = new BasicDBObject("x", -1); + collection.createIndex(index); + + DBObject indexInfo = getIndexInfoForNameStartingWith("x_-1"); + assertEquals(indexInfo.get("key"), index); + } + + @Test + public void testCreateIndexByKeysName() { + collection.createIndex(new BasicDBObject("x", 1), "zulu"); + assertEquals(2, collection.getIndexInfo().size()); + DBObject indexInfo = getIndexInfoForNameStartingWith("zulu"); + + assertEquals("zulu", indexInfo.get("name")); + } + + @Test + public void testCreateIndexByKeysNameUnique() { + collection.createIndex(new BasicDBObject("x", 1), "zulu", true); + assertEquals(2, collection.getIndexInfo().size()); + DBObject indexInfo = getIndexInfoForNameStartingWith("zulu"); + + assertEquals("zulu", indexInfo.get("name")); + assertTrue((Boolean) indexInfo.get("unique")); + } + + @Test + public void testCreateIndexAs2d() { + DBObject index = new BasicDBObject("x", "2d"); + collection.createIndex(index); + + DBObject indexInfo = getIndexInfoForNameStartingWith("x"); + assertEquals(indexInfo.get("key"), index); + } + + @Test + public void testCreateIndexAs2dsphere() { + // when + DBObject index = new BasicDBObject("x", "2dsphere"); + collection.createIndex(index); + + // then + DBObject indexInfo = getIndexInfoForNameStartingWith("x"); + assertEquals(indexInfo.get("key"), index); + } + + @Test + public void testCreateIndexAsText() { + DBObject index = new BasicDBObject("x", "text"); + collection.createIndex(index); + + DBObject indexInfo = getIndexInfoForNameStartingWith("x"); + assertEquals(indexInfo.get("name"), "x_text"); + assertThat(indexInfo.get("weights"), notNullValue()); + } + + @Test + public void testRemoveWithDBEncoder() { + DBObject document = new BasicDBObject("x", 1); + collection.insert(document); + collection.insert(MyEncoder.getConstantObject()); + collection.remove(new BasicDBObject("x", 1), WriteConcern.ACKNOWLEDGED, new MyEncoder()); + + assertEquals(1, collection.count()); + assertEquals(document, collection.findOne()); + } + + @Test + public void testCount() { + for (int i = 0; i < 10; i++) { + collection.insert(new BasicDBObject("_id", i)); + } + assertEquals(10, collection.getCount()); + assertEquals(5, collection.getCount(new BasicDBObject("_id", new BasicDBObject("$lt", 5)))); + assertEquals(4, collection.getCount(new BasicDBObject("_id", new BasicDBObject("$lt", 5)), + new DBCollectionCountOptions().limit(100).skip(1))); + assertEquals(4, collection.getCount(new BasicDBObject("_id", new BasicDBObject("$lt", 5)), + new DBCollectionCountOptions().limit(4))); + } + + @Test + public void testUpdateWithDBEncoder() { + DBObject document = new BasicDBObject("_id", 1).append("x", 1); + collection.insert(document); + collection.update(new BasicDBObject("x", 1), + new BasicDBObject("y", false), + true, + false, + WriteConcern.ACKNOWLEDGED, + new MyEncoder()); + + assertEquals(1, collection.count()); + assertThat(collection.find(), hasItem(MyEncoder.getConstantObject())); + } + + @Test + public void testSaveWithDBEncoder() { + try { + DBObject document = new BasicDBObject("_id", 1).append("x", 1); + collection.setDBEncoderFactory(new MyEncoderFactory()); + collection.save(document); + + assertEquals(1, collection.count()); + assertThat(collection.find(), hasItem(MyEncoder.getConstantObject())); + + collection.save(document); + + assertEquals(1, collection.count()); + assertThat(collection.find(), hasItem(MyEncoder.getConstantObject())); + } finally { + collection.setDBEncoderFactory(null); + } + } + + @Test + public void testFindAndRemove() { + DBObject doc = new BasicDBObject("_id", 1).append("x", true); + collection.insert(doc); + DBObject newDoc = collection.findAndRemove(new BasicDBObject("_id", 1)); + assertEquals(doc, newDoc); + assertEquals(0, collection.count()); + } + + @Test + public void testFindAndReplace() { + DBObject doc1 = new BasicDBObject("_id", 1).append("x", true); + DBObject doc2 = new BasicDBObject("_id", 1).append("y", false); + + collection.insert(doc1); + DBObject newDoc = collection.findAndModify(new BasicDBObject("x", true), doc2); + assertEquals(doc1, newDoc); + assertEquals(doc2, collection.findOne()); + } + + @Test + public void testFindAndReplaceOrInsert() { + collection.insert(new BasicDBObject("_id", 1).append("p", "abc")); + + DBObject doc = new BasicDBObject("_id", 2).append("p", "foo"); + + DBObject newDoc = collection.findAndModify(new BasicDBObject("p", "bar"), + null, + null, + false, + doc, + false, + true); + assertNull(newDoc); + assertEquals(doc, collection.findOne(null, null, new BasicDBObject("_id", -1))); + } + + @Test + public void testFindAndUpdate() { + collection.insert(new BasicDBObject("_id", 1).append("x", true)); + DBObject newDoc = collection.findAndModify(new BasicDBObject("x", true), + new BasicDBObject("$set", new BasicDBObject("x", false))); + assertNotNull(newDoc); + assertEquals(new BasicDBObject("_id", 1).append("x", true), newDoc); + assertEquals(new BasicDBObject("_id", 1).append("x", false), collection.findOne()); + } + + @Test + public void findAndUpdateAndReturnNew() { + DBObject newDoc = collection.findAndModify(new BasicDBObject("x", true), + null, + null, + false, + new BasicDBObject("$set", new BasicDBObject("x", false)), + true, + true); + assertNotNull(newDoc); + assertThat(newDoc, hasSubdocument(new BasicDBObject("x", false))); + } + + @Test(expected = MongoExecutionTimeoutException.class) + public void testFindAndUpdateTimeout() { + assumeThat(ClusterFixture.isAuthenticated(), is(false)); + collection.insert(new BasicDBObject("_id", 1)); + enableMaxTimeFailPoint(); + try { + collection.findAndModify(new BasicDBObject("_id", 1), null, null, false, new BasicDBObject("$set", new BasicDBObject("x", 1)), + false, false, 1, TimeUnit.SECONDS); + } finally { + disableMaxTimeFailPoint(); + } + } + + @Test(expected = MongoExecutionTimeoutException.class) + public void testFindAndReplaceTimeout() { + assumeThat(isSharded(), is(false)); + collection.insert(new BasicDBObject("_id", 1)); + enableMaxTimeFailPoint(); + try { + collection.findAndModify(new BasicDBObject("_id", 1), null, null, false, new BasicDBObject("x", 1), false, false, + 1, TimeUnit.SECONDS); + } finally { + disableMaxTimeFailPoint(); + } + } + + @Test(expected = MongoExecutionTimeoutException.class) + public void testFindAndRemoveTimeout() { + assumeThat(isSharded(), is(false)); + collection.insert(new BasicDBObject("_id", 1)); + enableMaxTimeFailPoint(); + try { + collection.findAndModify(new BasicDBObject("_id", 1), null, null, true, null, false, false, 1, TimeUnit.SECONDS); + } finally { + disableMaxTimeFailPoint(); + } + } + + @Test + @Tag("Slow") + public void testFindAndReplaceA16MDocument() { + BasicDBObject documentWithJustId = new BasicDBObject("_id", 42); + DBObject foundDocument = collection.findAndModify(documentWithJustId, new BasicDBObject("_id", 1), null, false, + new BasicDBObject("_id", 42).append("b", new byte[16 * 1024 * 1024 - 30]), true, + true); + assertEquals(documentWithJustId, foundDocument); + } + + @Test + public void testGenericBinary() { + byte[] data = {1, 2, 3}; + collection.insert(new BasicDBObject("binary", new Binary(data))); + assertArrayEquals(data, (byte[]) collection.findOne().get("binary")); + } + + @Test + public void testOtherBinary() { + byte[] data = {1, 2, 3}; + Binary binaryValue = new Binary(BsonBinarySubType.USER_DEFINED, data); + collection.insert(new BasicDBObject("binary", binaryValue)); + assertEquals(binaryValue, collection.findOne().get("binary")); + } + + @Test + public void testUUID() { + UUID uuid = UUID.randomUUID(); + collection.insert(new BasicDBObject("uuid", uuid)); + assertEquals(uuid, collection.findOne().get("uuid")); + } + + @Test + public void testDotKeysArraySucceeds() { + DBObject obj = new BasicDBObject("x", 1).append("y", 2) + .append("array", new Object[]{new BasicDBObject("foo.bar", "baz")}); + collection.insert(obj); + } + + @Test + public void testDotKeysListSucceeds() { + DBObject obj = new BasicDBObject("x", 1).append("y", 2) + .append("array", asList(new BasicDBObject("foo.bar", "baz"))); + collection.insert(obj); + } + + @Test + public void testDotKeysMapInArraySucceeds() { + Map map = new HashMap<>(1); + map.put("foo.bar", 2); + DBObject obj = new BasicDBObject("x", 1).append("y", 2).append("array", new Object[]{map}); + collection.insert(obj); + } + + @Test + public void testPathToClassMapDecoding() { + collection.setObjectClass(TopLevelDBObject.class); + collection.setInternalClass("a", NestedOneDBObject.class); + collection.setInternalClass("a.b", NestedTwoDBObject.class); + + DBObject doc = new TopLevelDBObject().append("a", new NestedOneDBObject().append("b", asList(new NestedTwoDBObject())) + .append("c", new BasicDBObject())); + collection.save(doc); + DBObject found = collection.findOne(); + assertEquals(doc, found); + } + + + @Test + public void shouldAcceptDocumentsWithAllValidValueTypes() { + BasicDBObject doc = new BasicDBObject(); + doc.append("_id", new ObjectId()); + doc.append("bool", true); + doc.append("int", 3); + doc.append("short", (short) 4); + doc.append("long", 5L); + doc.append("str", "Hello MongoDB"); + doc.append("float", 6.0f); + doc.append("double", 1.1); + doc.append("date", new Date()); + doc.append("ts", new BSONTimestamp(5, 1)); + doc.append("pattern", Pattern.compile(".*")); + doc.append("minKey", new MinKey()); + doc.append("maxKey", new MaxKey()); + doc.append("js", new Code("code")); + doc.append("jsWithScope", new CodeWScope("code", new BasicDBObject())); + doc.append("null", null); + doc.append("uuid", UUID.randomUUID()); + doc.append("db ref", new com.mongodb.DBRef("test", new ObjectId())); + doc.append("binary", new Binary((byte) 42, new byte[]{10, 11, 12})); + doc.append("byte array", new byte[]{1, 2, 3}); + doc.append("int array", new int[]{4, 5, 6}); + doc.append("list", asList(7, 8, 9)); + doc.append("doc list", asList(new Document("x", 1), new Document("x", 2))); + + collection.insert(doc); + DBObject found = collection.findOne(); + assertNotNull(found); + assertEquals(ObjectId.class, found.get("_id").getClass()); + assertEquals(Boolean.class, found.get("bool").getClass()); + assertEquals(Integer.class, found.get("int").getClass()); + assertEquals(Integer.class, found.get("short").getClass()); + assertEquals(Long.class, found.get("long").getClass()); + assertEquals(String.class, found.get("str").getClass()); + assertEquals(Double.class, found.get("float").getClass()); + assertEquals(Double.class, found.get("double").getClass()); + assertEquals(Date.class, found.get("date").getClass()); + assertEquals(BSONTimestamp.class, found.get("ts").getClass()); + assertEquals(Pattern.class, found.get("pattern").getClass()); + assertEquals(MinKey.class, found.get("minKey").getClass()); + assertEquals(MaxKey.class, found.get("maxKey").getClass()); + assertEquals(Code.class, found.get("js").getClass()); + assertEquals(CodeWScope.class, found.get("jsWithScope").getClass()); + assertNull(found.get("null")); + assertEquals(UUID.class, found.get("uuid").getClass()); + assertEquals(DBRef.class, found.get("db ref").getClass()); + assertEquals(Binary.class, found.get("binary").getClass()); + assertEquals(byte[].class, found.get("byte array").getClass()); + assertTrue(found.get("int array") instanceof List); + assertTrue(found.get("list") instanceof List); + assertTrue(found.get("doc list") instanceof List); + } + + + @Test + public void testCompoundCodecWithDefaultValues() { + assertThat(collection.getObjectCodec(), instanceOf(CompoundDBObjectCodec.class)); + CompoundDBObjectCodec codec = (CompoundDBObjectCodec) collection.getObjectCodec(); + assertThat(codec.getDecoder(), instanceOf(DBObjectCodec.class)); + assertThat(codec.getEncoder(), instanceOf(DBObjectCodec.class)); + } + + @Test + public void testCompoundCodecWithCustomEncoderFactory() { + collection.setDBEncoderFactory(() -> new DefaultDBEncoder()); + assertThat(collection.getObjectCodec(), instanceOf(CompoundDBObjectCodec.class)); + CompoundDBObjectCodec codec = (CompoundDBObjectCodec) collection.getObjectCodec(); + assertThat(codec.getEncoder(), instanceOf(DBEncoderFactoryAdapter.class)); + } + + @Test + public void testCompoundCodecWithCustomDecoderFactory() { + collection.setDBDecoderFactory(() -> new DefaultDBDecoder()); + assertThat(collection.getObjectCodec(), instanceOf(CompoundDBObjectCodec.class)); + CompoundDBObjectCodec codec = (CompoundDBObjectCodec) collection.getObjectCodec(); + assertThat(codec.getDecoder(), instanceOf(DBDecoderAdapter.class)); + } + + @Test + public void testBulkWriteOperation() { + // given + collection.insert(Arrays.asList(new BasicDBObject("_id", 3), + new BasicDBObject("_id", 4), + new BasicDBObject("_id", 5), + new BasicDBObject("_id", 6).append("z", 1), + new BasicDBObject("_id", 7).append("z", 1), + new BasicDBObject("_id", 8).append("z", 2), + new BasicDBObject("_id", 9).append("z", 2))); + + // when + BulkWriteOperation bulkWriteOperation = collection.initializeOrderedBulkOperation(); + bulkWriteOperation.insert(new BasicDBObject("_id", 0)); + ObjectId upsertOneId = new ObjectId(); + ObjectId upsertTwoId = new ObjectId(); + bulkWriteOperation.find(new BasicDBObject("_id", upsertOneId)).upsert() + .updateOne(new BasicDBObject("$set", new BasicDBObject("x", 2))); + bulkWriteOperation.find(new BasicDBObject("_id", upsertTwoId)).upsert() + .replaceOne(new BasicDBObject("_id", upsertTwoId).append("y", 2)); + bulkWriteOperation.find(new BasicDBObject("_id", 3)).removeOne(); + bulkWriteOperation.find(new BasicDBObject("_id", 4)).updateOne(new BasicDBObject("$set", new BasicDBObject("x", 1))); + bulkWriteOperation.find(new BasicDBObject("_id", 5)).replaceOne(new BasicDBObject("_id", 5).append("y", 1)); + bulkWriteOperation.find(new BasicDBObject("z", 1)).remove(); + bulkWriteOperation.find(new BasicDBObject("z", 2)).update(new BasicDBObject("$set", new BasicDBObject("z", 3))); + + BulkWriteResult result = bulkWriteOperation.execute(); + + // then + assertTrue(bulkWriteOperation.isOrdered()); + assertTrue(result.isAcknowledged()); + assertEquals(1, result.getInsertedCount()); + assertEquals(4, result.getMatchedCount()); + assertEquals(3, result.getRemovedCount()); + assertEquals(4, result.getModifiedCount()); + assertEquals(asList(new BulkWriteUpsert(1, upsertOneId), + new BulkWriteUpsert(2, upsertTwoId)), + result.getUpserts()); + + assertEquals(Arrays.asList(new BasicDBObject("_id", 0), + new BasicDBObject("_id", 4).append("x", 1), + new BasicDBObject("_id", 5).append("y", 1), + new BasicDBObject("_id", 8).append("z", 3), + new BasicDBObject("_id", 9).append("z", 3), + new BasicDBObject("_id", upsertOneId).append("x", 2), + new BasicDBObject("_id", upsertTwoId).append("y", 2)), + + collection.find().sort(new BasicDBObject("_id", 1)).toArray()); + + // when + try { + bulkWriteOperation.insert(new BasicDBObject()); + fail(); + } catch (IllegalStateException e) { + // then should throw + } + + // when + try { + bulkWriteOperation.find(new BasicDBObject()); + fail(); + } catch (IllegalStateException e) { + // then should throw + } + + // when + try { + bulkWriteOperation.execute(); + fail(); + } catch (IllegalStateException e) { + // then should throw + } + + // when + try { + bulkWriteOperation.execute(WriteConcern.ACKNOWLEDGED); + fail(); + } catch (IllegalStateException e) { + // then should throw + } + } + + @Test + public void testOrderedBulkWriteOperation() { + // given + collection.insert(new BasicDBObject("_id", 1)); + + // when + BulkWriteOperation bulkWriteOperation = collection.initializeOrderedBulkOperation(); + bulkWriteOperation.insert(new BasicDBObject("_id", 0)); + bulkWriteOperation.insert(new BasicDBObject("_id", 1)); + bulkWriteOperation.insert(new BasicDBObject("_id", 2)); + + try { + bulkWriteOperation.execute(); + fail(); + } catch (BulkWriteException e) { + assertEquals(1, e.getWriteErrors().size()); + } + + assertEquals(Arrays.asList(new BasicDBObject("_id", 0), new BasicDBObject("_id", 1)), + collection.find().sort(new BasicDBObject("_id", 1)).toArray()); + + } + + @Test + public void testUnorderedBulkWriteOperation() { + // given + collection.insert(new BasicDBObject("_id", 1)); + + // when + BulkWriteOperation bulkWriteOperation = collection.initializeUnorderedBulkOperation(); + bulkWriteOperation.insert(new BasicDBObject("_id", 0)); + bulkWriteOperation.insert(new BasicDBObject("_id", 1)); + bulkWriteOperation.insert(new BasicDBObject("_id", 2)); + + try { + bulkWriteOperation.execute(); + fail(); + } catch (BulkWriteException e) { + assertEquals(1, e.getWriteErrors().size()); + } + + assertEquals(Arrays.asList(new BasicDBObject("_id", 0), new BasicDBObject("_id", 1), new BasicDBObject("_id", 2)), + collection.find().sort(new BasicDBObject("_id", 1)).toArray()); + + } + + @Test + public void bulkWriteOperationShouldGenerateIdsForInserts() { + // when + BulkWriteOperation bulkWriteOperation = collection.initializeOrderedBulkOperation(); + BasicDBObject document = new BasicDBObject(); + bulkWriteOperation.insert(document); + bulkWriteOperation.execute(); + + // then + assertTrue(document.containsField("_id")); + assertTrue(document.get("_id") instanceof ObjectId); + } + + + @Test(expected = BulkWriteException.class) + public void testBulkWriteException() { + // given + collection.insert(new BasicDBObject("_id", 1)); + + // when + BulkWriteOperation bulkWriteOperation = collection.initializeOrderedBulkOperation(); + bulkWriteOperation.insert(new BasicDBObject("_id", 1)); + bulkWriteOperation.execute(); + } + + @Test + public void testWriteConcernExceptionOnInsert() throws UnknownHostException { + assumeThat(isDiscoverableReplicaSet(), is(true)); + try { + WriteResult res = collection.insert(new BasicDBObject(), new WriteConcern(5).withWTimeout(1, MILLISECONDS)); + fail("Write should have failed but succeeded with result " + res); + } catch (WriteConcernException e) { + assertEquals(0, e.getWriteConcernResult().getCount()); + } + } + + @Test + public void testWriteConcernExceptionOnUpdate() throws UnknownHostException { + assumeThat(isDiscoverableReplicaSet(), is(true)); + ObjectId id = new ObjectId(); + collection.insert(new BasicDBObject("_id", id)); + try { + WriteResult res = collection.update(new BasicDBObject("_id", id), new BasicDBObject("$set", new BasicDBObject("x", 1)), + false, false, new WriteConcern(5).withWTimeout(1, MILLISECONDS)); + fail("Write should have failed but succeeded with result " + res); + } catch (WriteConcernException e) { + assertEquals(1, e.getWriteConcernResult().getCount()); + assertTrue(e.getWriteConcernResult().isUpdateOfExisting()); + assertNull(e.getWriteConcernResult().getUpsertedId()); + } + } + + @Test + public void testWriteConcernExceptionOnFindAndModify() throws UnknownHostException { + assumeThat(isDiscoverableReplicaSet(), is(true)); + + ObjectId id = new ObjectId(); + WriteConcern writeConcern = new WriteConcern(5, 1); + + // FindAndUpdateOperation path + try { + collection.findAndModify(new BasicDBObject("_id", id), null, null, false, + new BasicDBObject("$set", new BasicDBObject("x", 1)), + true, true, writeConcern); + fail("Expected findAndModify to error"); + } catch (WriteConcernException e) { + assertNotNull(e.getServerAddress()); + assertNotNull(e.getErrorMessage()); + assertTrue(e.getCode() > 0); + assertTrue(e.getWriteConcernResult().wasAcknowledged()); + assertEquals(1, e.getWriteConcernResult().getCount()); + assertFalse(e.getWriteConcernResult().isUpdateOfExisting()); + assertEquals(new BsonObjectId(id), e.getWriteConcernResult().getUpsertedId()); + } + + // FindAndReplaceOperation path + try { + collection.findAndModify(new BasicDBObject("_id", id), null, null, false, + new BasicDBObject("x", 1), + true, true, writeConcern); + fail("Expected findAndModify to error"); + } catch (WriteConcernException e) { + assertNotNull(e.getServerAddress()); + assertNotNull(e.getErrorMessage()); + assertTrue(e.getCode() > 0); + assertTrue(e.getWriteConcernResult().wasAcknowledged()); + assertEquals(1, e.getWriteConcernResult().getCount()); + assertTrue(e.getWriteConcernResult().isUpdateOfExisting()); + assertNull(e.getWriteConcernResult().getUpsertedId()); + } + + // FindAndDeleteOperation path + try { + collection.findAndModify(new BasicDBObject("_id", id), null, null, true, + null, + false, false, writeConcern); + fail("Expected findAndModify to error"); + } catch (WriteConcernException e) { + assertNotNull(e.getServerAddress()); + assertNotNull(e.getErrorMessage()); + assertTrue(e.getCode() > 0); + assertTrue(e.getWriteConcernResult().wasAcknowledged()); + assertEquals(1, e.getWriteConcernResult().getCount()); + assertFalse(e.getWriteConcernResult().isUpdateOfExisting()); + assertNull(e.getWriteConcernResult().getUpsertedId()); + } + } + + @Test + public void testWriteConcernExceptionOnUpsert() throws UnknownHostException { + assumeThat(isDiscoverableReplicaSet(), is(true)); + ObjectId id = new ObjectId(); + try { + WriteResult res = collection.update(new BasicDBObject("_id", id), new BasicDBObject("$set", new BasicDBObject("x", 1)), + true, false, new WriteConcern(5).withWTimeout(1, MILLISECONDS)); + fail("Write should have failed but succeeded with result " + res); + } catch (WriteConcernException e) { + assertEquals(1, e.getWriteConcernResult().getCount()); + assertFalse(e.getWriteConcernResult().isUpdateOfExisting()); + assertEquals(new BsonObjectId(id), e.getWriteConcernResult().getUpsertedId()); + } + } + + @Test + public void testWriteConcernExceptionOnRemove() throws UnknownHostException { + assumeThat(isDiscoverableReplicaSet(), is(true)); + try { + collection.insert(new BasicDBObject()); + WriteResult res = collection.remove(new BasicDBObject(), new WriteConcern(5).withWTimeout(1, MILLISECONDS)); + fail("Write should have failed but succeeded with result " + res); + } catch (WriteConcernException e) { + assertEquals(1, e.getWriteConcernResult().getCount()); + } + } + + @Test + public void testBulkWriteConcernException() throws UnknownHostException { + assumeThat(isDiscoverableReplicaSet(), is(true)); + try { + BulkWriteOperation bulkWriteOperation = collection.initializeUnorderedBulkOperation(); + bulkWriteOperation.insert(new BasicDBObject()); + BulkWriteResult res = bulkWriteOperation.execute(new WriteConcern(5).withWTimeout(1, MILLISECONDS)); + fail("Write should have failed but succeeded with result " + res); + } catch (BulkWriteException e) { + assertNotNull(e.getWriteConcernError()); // unclear what else we can reliably assert here + } + } + + + @Test + public void testBypassDocumentValidationForInserts() { + //given + DBObject options = new BasicDBObject("validator", QueryBuilder.start("level").greaterThanEquals(10).get()); + DBCollection c = database.createCollection(collectionName, options); + + + try { + c.insert(Collections.singletonList(new BasicDBObject("level", 9))); + fail(); + } catch (MongoException e) { + // success + } + + try { + c.insert(Collections.singletonList(new BasicDBObject("level", 9)), + new InsertOptions().bypassDocumentValidation(false)); + fail(); + } catch (MongoException e) { + // success + } + + try { + c.insert(Collections.singletonList(new BasicDBObject("level", 9)), + new InsertOptions().bypassDocumentValidation(true)); + } catch (MongoException e) { + fail(); + } + + // should fail if write concern is unacknowledged + try { + c.insert(Collections.singletonList(new BasicDBObject("level", 9)), + new InsertOptions() + .bypassDocumentValidation(true) + .writeConcern(WriteConcern.UNACKNOWLEDGED)); + fail(); + } catch (MongoException e) { + // success + } + } + + @Test + public void testBypassDocumentValidationForUpdates() { + + //given + DBObject options = new BasicDBObject("validator", QueryBuilder.start("level").greaterThanEquals(10).get()); + DBCollection c = database.createCollection(collectionName, options); + + try { + c.update(new BasicDBObject("_id", 1), new BasicDBObject("_id", 1).append("level", 9), true, false, WriteConcern.ACKNOWLEDGED, + null); + fail(); + } catch (MongoException e) { + // success + } + + try { + c.update(new BasicDBObject("_id", 1), new BasicDBObject("_id", 1).append("level", 9), true, false, WriteConcern.ACKNOWLEDGED, + false, null); + fail(); + } catch (MongoException e) { + // success + } + + try { + c.update(new BasicDBObject("_id", 1), new BasicDBObject("_id", 1).append("level", 9), true, false, WriteConcern.ACKNOWLEDGED, + true, null); + } catch (MongoException e) { + fail(); + } + + try { + c.update(new BasicDBObject("_id", 1), new BasicDBObject("$set", new BasicDBObject("level", 9)), true, false, + WriteConcern.ACKNOWLEDGED, true, null); + } catch (MongoException e) { + fail(); + } + + // should fail if write concern is unacknowledged + try { + c.update(new BasicDBObject("_id", 1), new BasicDBObject("_id", 1).append("level", 9), true, false, + WriteConcern.UNACKNOWLEDGED, + true, null); + fail(); + } catch (MongoException e) { + // success + } + } + + @Test + public void testBypassDocumentValidationForFindAndModify() { + + //given + DBObject options = new BasicDBObject("validator", QueryBuilder.start("level").greaterThanEquals(10).get()); + DBCollection c = database.createCollection(collectionName, options); + c.insert(new BasicDBObject("_id", 1).append("level", 11)); + + try { + c.findAndModify(new BasicDBObject("_id", 1), new BasicDBObject("_id", 1).append("level", 9)); + fail(); + } catch (MongoException e) { + // success + } + + try { + c.findAndModify(new BasicDBObject("_id", 1), null, null, false, new BasicDBObject("_id", 1).append("level", 9), false, false, + false, 0, TimeUnit.SECONDS); + fail(); + } catch (MongoException e) { + // success + } + + try { + c.findAndModify(new BasicDBObject("_id", 1), null, null, false, new BasicDBObject("_id", 1).append("level", 9), false, false, + true, 0, TimeUnit.SECONDS); + } catch (MongoException e) { + fail(); + } + + try { + c.findAndModify(new BasicDBObject("_id", 1), null, null, false, new BasicDBObject("$set", new BasicDBObject("level", 9)), + false, false, true, 0, TimeUnit.SECONDS); + } catch (MongoException e) { + fail(); + } + } + + @Test + public void testBypassDocumentValidationForBulkInsert() { + //given + DBObject options = new BasicDBObject("validator", QueryBuilder.start("level").greaterThanEquals(10).get()); + DBCollection c = database.createCollection(collectionName, options); + + try { + BulkWriteOperation bulk = c.initializeOrderedBulkOperation(); + bulk.insert(new BasicDBObject("level", 9)); + bulk.execute(); + fail(); + } catch (MongoException e) { + // success + } + + try { + BulkWriteOperation bulk = c.initializeOrderedBulkOperation(); + bulk.setBypassDocumentValidation(false); + bulk.insert(new BasicDBObject("level", 9)); + bulk.execute(); + fail(); + } catch (MongoException e) { + // success + } + + try { + BulkWriteOperation bulk = c.initializeOrderedBulkOperation(); + bulk.setBypassDocumentValidation(true); + bulk.insert(new BasicDBObject("level", 9)); + bulk.execute(); + } catch (MongoException e) { + fail(); + } + + try { + BulkWriteOperation bulk = c.initializeOrderedBulkOperation(); + bulk.setBypassDocumentValidation(true); + bulk.insert(new BasicDBObject("level", 9)); + bulk.execute(WriteConcern.ACKNOWLEDGED); + } catch (MongoException e) { + fail(); + } + + try { + BulkWriteOperation bulk = c.initializeUnorderedBulkOperation(); + bulk.setBypassDocumentValidation(true); + bulk.insert(new BasicDBObject("level", 9)); + bulk.execute(WriteConcern.ACKNOWLEDGED); + } catch (MongoException e) { + fail(); + } + + // should fail if write concern is unacknowledged + try { + BulkWriteOperation bulk = c.initializeUnorderedBulkOperation(); + bulk.setBypassDocumentValidation(true); + bulk.insert(new BasicDBObject("level", 9)); + bulk.execute(WriteConcern.UNACKNOWLEDGED); + fail(); + } catch (MongoException e) { + // success + } + } + + @Test + public void testBypassDocumentValidationForBulkUpdate() { + //given + DBObject options = new BasicDBObject("validator", QueryBuilder.start("level").greaterThanEquals(10).get()); + DBCollection c = database.createCollection(collectionName, options); + + try { + BulkWriteOperation bulk = c.initializeOrderedBulkOperation(); + bulk.find(new BasicDBObject("_id", 1)).upsert().update(new BasicDBObject("$set", new BasicDBObject("level", 9))); + bulk.execute(); + fail(); + } catch (MongoException e) { + // success + } + + try { + BulkWriteOperation bulk = c.initializeOrderedBulkOperation(); + bulk.setBypassDocumentValidation(false); + bulk.find(new BasicDBObject("_id", 1)).upsert().update(new BasicDBObject("$set", new BasicDBObject("level", 9))); + bulk.execute(); + fail(); + } catch (MongoException e) { + // success + } + + try { + BulkWriteOperation bulk = c.initializeOrderedBulkOperation(); + bulk.setBypassDocumentValidation(true); + bulk.find(new BasicDBObject("_id", 1)).upsert().update(new BasicDBObject("$set", new BasicDBObject("level", 9))); + bulk.execute(); + } catch (MongoException e) { + fail(); + } + + try { + BulkWriteOperation bulk = c.initializeOrderedBulkOperation(); + bulk.setBypassDocumentValidation(true); + bulk.find(new BasicDBObject("_id", 1)).upsert().update(new BasicDBObject("$set", new BasicDBObject("level", 9))); + bulk.execute(WriteConcern.ACKNOWLEDGED); + } catch (MongoException e) { + fail(); + } + + try { + BulkWriteOperation bulk = c.initializeUnorderedBulkOperation(); + bulk.setBypassDocumentValidation(true); + bulk.find(new BasicDBObject("_id", 1)).upsert().update(new BasicDBObject("$set", new BasicDBObject("level", 9))); + bulk.execute(WriteConcern.ACKNOWLEDGED); + } catch (MongoException e) { + fail(); + } + + // should fail if write concern is unacknowledged + try { + BulkWriteOperation bulk = c.initializeUnorderedBulkOperation(); + bulk.setBypassDocumentValidation(true); + bulk.find(new BasicDBObject("_id", 1)).upsert().update(new BasicDBObject("$set", new BasicDBObject("level", 9))); + bulk.execute(WriteConcern.UNACKNOWLEDGED); + fail(); + } catch (MongoException e) { + // success + } + } + + @Test + public void testBypassDocumentValidationForBulkReplace() { + //given + DBObject options = new BasicDBObject("validator", QueryBuilder.start("level").greaterThanEquals(10).get()); + DBCollection c = database.createCollection(collectionName, options); + + try { + BulkWriteOperation bulk = c.initializeOrderedBulkOperation(); + bulk.find(new BasicDBObject("_id", 1)).upsert().replaceOne(new BasicDBObject("level", 9)); + bulk.execute(); + fail(); + } catch (MongoException e) { + // success + } + + try { + BulkWriteOperation bulk = c.initializeOrderedBulkOperation(); + bulk.setBypassDocumentValidation(false); + bulk.find(new BasicDBObject("_id", 1)).upsert().replaceOne(new BasicDBObject("level", 9)); + bulk.execute(); + fail(); + } catch (MongoException e) { + // success + } + + try { + BulkWriteOperation bulk = c.initializeOrderedBulkOperation(); + bulk.setBypassDocumentValidation(true); + bulk.find(new BasicDBObject("_id", 1)).upsert().replaceOne(new BasicDBObject("level", 9)); + bulk.execute(); + } catch (MongoException e) { + fail(); + } + + try { + BulkWriteOperation bulk = c.initializeOrderedBulkOperation(); + bulk.setBypassDocumentValidation(true); + bulk.find(new BasicDBObject("_id", 1)).upsert().replaceOne(new BasicDBObject("level", 9)); + bulk.execute(WriteConcern.ACKNOWLEDGED); + } catch (MongoException e) { + fail(); + } + + try { + BulkWriteOperation bulk = c.initializeUnorderedBulkOperation(); + bulk.setBypassDocumentValidation(true); + bulk.find(new BasicDBObject("_id", 1)).upsert().replaceOne(new BasicDBObject("level", 9)); + bulk.execute(WriteConcern.ACKNOWLEDGED); + } catch (MongoException e) { + fail(); + } + + // should fail if write concern is unacknowledged + try { + BulkWriteOperation bulk = c.initializeUnorderedBulkOperation(); + bulk.setBypassDocumentValidation(true); + bulk.find(new BasicDBObject("_id", 1)).upsert().replaceOne(new BasicDBObject("level", 9)); + bulk.execute(WriteConcern.UNACKNOWLEDGED); + fail(); + } catch (MongoException e) { + // success + } + } + + @Test + public void testBypassDocumentValidationForAggregateDollarOut() { + //given + DBObject options = new BasicDBObject("validator", QueryBuilder.start("level").greaterThanEquals(10).get()); + DBCollection cOut = database.createCollection(collectionName + ".out", options); + DBCollection c = collection; + + c.insert(new BasicDBObject("level", 9)); + + try { + c.aggregate(Collections.singletonList(new BasicDBObject("$out", cOut.getName())), + AggregationOptions.builder().build()); + fail(); + } catch (MongoException e) { + // success + } + + try { + c.aggregate(Collections.singletonList(new BasicDBObject("$out", cOut.getName())), + AggregationOptions.builder() + .bypassDocumentValidation(false) + .build()); + fail(); + } catch (MongoException e) { + // success + } + + try { + c.aggregate(Collections.singletonList(new BasicDBObject("$out", cOut.getName())), + AggregationOptions.builder() + .bypassDocumentValidation(true) + .build()); + } catch (MongoException e) { + fail(); + } + + try { + c.aggregate(Collections.singletonList(new BasicDBObject("$match", new BasicDBObject("_id", 1))), + AggregationOptions.builder() + .bypassDocumentValidation(true) + .build()); + } catch (MongoException e) { + fail(); + } + } + + @SuppressWarnings("deprecation") + @Test + public void testBypassDocumentValidationForNonInlineMapReduce() { + //given + DBObject options = new BasicDBObject("validator", QueryBuilder.start("level").greaterThanEquals(10).get()); + DBCollection cOut = database.createCollection(collectionName + ".out", options); + DBCollection c = collection; + + c.insert(new BasicDBObject("level", 9)); + + String map = "function() { emit(this.level, this._id); }"; + String reduce = "function(level, _id) { return 1; }"; + try { + MapReduceCommand mapReduceCommand = new MapReduceCommand(c, map, reduce, cOut.getName(), MapReduceCommand.OutputType.REPLACE, + new BasicDBObject()); + c.mapReduce(mapReduceCommand); + fail(); + } catch (MongoException e) { + // success + } + + try { + MapReduceCommand mapReduceCommand = new MapReduceCommand(c, map, reduce, cOut.getName(), MapReduceCommand.OutputType.REPLACE, + new BasicDBObject()); + mapReduceCommand.setBypassDocumentValidation(false); + c.mapReduce(mapReduceCommand); + fail(); + } catch (MongoException e) { + // success + } + + try { + MapReduceCommand mapReduceCommand = new MapReduceCommand(c, map, reduce, cOut.getName(), MapReduceCommand.OutputType.REPLACE, + new BasicDBObject()); + mapReduceCommand.setBypassDocumentValidation(true); + c.mapReduce(mapReduceCommand); + } catch (MongoException e) { + fail(); + } + + try { + MapReduceCommand mapReduceCommand = new MapReduceCommand(c, map, reduce, null, MapReduceCommand.OutputType.INLINE, + new BasicDBObject()); + mapReduceCommand.setBypassDocumentValidation(true); + c.mapReduce(mapReduceCommand); + } catch (MongoException e) { + fail(); + } + } + + + public static class MyDBObject extends BasicDBObject { + private static final long serialVersionUID = 3352369936048544621L; + } + + public static class MyEncoder implements DBEncoder { + @Override + public int writeObject(final OutputBuffer outputBuffer, final BSONObject document) { + int start = outputBuffer.getPosition(); + try (BsonBinaryWriter bsonWriter = new BsonBinaryWriter(outputBuffer)) { + bsonWriter.writeStartDocument(); + bsonWriter.writeInt32("_id", 1); + bsonWriter.writeString("s", "foo"); + bsonWriter.writeEndDocument(); + return outputBuffer.getPosition() - start; + } + } + + public static DBObject getConstantObject() { + return new BasicDBObject() + .append("_id", 1) + .append("s", "foo"); + } + } + + public static class MyEncoderFactory implements DBEncoderFactory { + @Override + public DBEncoder create() { + return new MyEncoder(); + } + } + + public static class TopLevelDBObject extends BasicDBObject { + private static final long serialVersionUID = 7029929727222305692L; + } + + public static class NestedOneDBObject extends BasicDBObject { + private static final long serialVersionUID = -5821458746671670383L; + } + + public static class NestedTwoDBObject extends BasicDBObject { + private static final long serialVersionUID = 5243874721805359328L; + } + + private DBObject getIndexInfoForNameStartingWith(final String field) { + for (DBObject indexInfo : collection.getIndexInfo()) { + if (((String) indexInfo.get("name")).startsWith(field)) { + return indexInfo; + } + } + throw new IllegalArgumentException("No index for field " + field); + } +} diff --git a/driver-legacy/src/test/functional/com/mongodb/DBCursorFunctionalSpecification.groovy b/driver-legacy/src/test/functional/com/mongodb/DBCursorFunctionalSpecification.groovy new file mode 100644 index 00000000000..a527fa8cdba --- /dev/null +++ b/driver-legacy/src/test/functional/com/mongodb/DBCursorFunctionalSpecification.groovy @@ -0,0 +1,304 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb + +import com.mongodb.client.internal.TestOperationExecutor +import com.mongodb.client.model.Collation +import com.mongodb.client.model.CollationStrength +import com.mongodb.internal.operation.BatchCursor +import spock.lang.Subject + +class DBCursorFunctionalSpecification extends FunctionalSpecification { + + def cursorMap = [a: 1] + + @Subject + private DBCursor dbCursor + + def setup() { + collection.insert(new BasicDBObject('a', 1)) + } + + def 'should use provided decoder factory'() { + given: + DBDecoder decoder = Mock() + DBDecoderFactory factory = Mock() + factory.create() >> decoder + + when: + dbCursor = collection.find() + dbCursor.setDecoderFactory(factory) + dbCursor.next() + + then: + 1 * decoder.decode(_ as byte[], collection) + } + + def 'should use provided hints for queries mongod > 3.0'() { + given: + collection.createIndex(new BasicDBObject('a', 1)) + + when: + dbCursor = collection.find().hint(new BasicDBObject('a', 1)) + def explainPlan = dbCursor.explain() + + then: + getKeyPattern(explainPlan) == cursorMap + + when: + dbCursor = collection.find().hint(new BasicDBObject('a', 1)) + explainPlan = dbCursor.explain() + + then: + getKeyPattern(explainPlan) == cursorMap + } + + def 'should use provided hint for count'() { + expect: + collection.createIndex(new BasicDBObject('a', 1)) + collection.find().hint('a_1').count() == 1 + collection.find().hint(new BasicDBObject('a', 1)).count() == 1 + } + + def 'should use provided hints for find'() { + given: + collection.createIndex(new BasicDBObject('a', 1)) + + when: + dbCursor = collection.find().hint(new BasicDBObject('a', 1)) + + then: + dbCursor.one() + + when: + dbCursor = collection.find().hint('a_1') + + then: + dbCursor.one() + } + + def 'should use provided hints for count'() { + when: + collection.insert(new BasicDBObject('a', 2)) + + then: + collection.find().count() == 2 + + when: + collection.createIndex(new BasicDBObject('a', 1)) + + then: + collection.find(new BasicDBObject('a', 1)).hint('_id_').count() == 1 + collection.find().hint('_id_').count() == 2 + + when: + collection.createIndex(new BasicDBObject('x', 1), new BasicDBObject('sparse', true)) + + then: + collection.find(new BasicDBObject('a', 1)).hint('x_1').count() == 0 + collection.find().hint('a_1').count() == 2 + } + + def 'should throw with bad hint'() { + when: + collection.find(new BasicDBObject('a', 1)).hint('BAD HINT').count() + + then: + thrown(MongoException) + + when: + collection.find(new BasicDBObject('a', 1)).hint('BAD HINT').one() + + then: + thrown(MongoException) + + when: + collection.find(new BasicDBObject('a', 1)).hint(new BasicDBObject('BAD HINT', 1)).one() + + then: + thrown(MongoException) + } + + def 'should return results in the order they are on disk when natural sort applied'() { + given: + collection.insert(new BasicDBObject('name', 'Chris')) + collection.insert(new BasicDBObject('name', 'Adam')) + collection.insert(new BasicDBObject('name', 'Bob')) + + when: + dbCursor = collection.find(new BasicDBObject('name', new BasicDBObject('$exists', true))) + .sort(new BasicDBObject('$natural', 1)) + + then: + dbCursor*.get('name') == ['Chris', 'Adam', 'Bob'] + } + + def 'should return results in the reverse order they are on disk when natural sort of minus one applied'() { + given: + collection.insert(new BasicDBObject('name', 'Chris')) + collection.insert(new BasicDBObject('name', 'Adam')) + collection.insert(new BasicDBObject('name', 'Bob')) + + when: + dbCursor = collection.find(new BasicDBObject('name', new BasicDBObject('$exists', true))) + .sort(new BasicDBObject('$natural', -1)) + + then: + dbCursor*.get('name') == ['Bob', 'Adam', 'Chris'] + } + + def 'should sort in reverse order'() { + given: + def range = 1..10 + for (i in range) { + collection.insert(new BasicDBObject('x', i)) + } + + when: + def cursor = collection.find() + .sort(new BasicDBObject('x', -1)) + + then: + cursor.next().get('x') == 10 + } + + def 'should sort in order'() { + given: + def range = 80..89 + for (i in range) { + def document = new BasicDBObject('x', i) + collection.insert(document) + } + + when: + def cursor = collection.find(new BasicDBObject('x', new BasicDBObject('$exists', true))) + .sort(new BasicDBObject('x', 1)) + + then: + cursor.next().get('x') == 80 + } + + def 'should sort on two fields'() { + given: + collection.insert(new BasicDBObject('_id', 1).append('name', 'Chris')) + collection.insert(new BasicDBObject('_id', 2).append('name', 'Adam')) + collection.insert(new BasicDBObject('_id', 3).append('name', 'Bob')) + collection.insert(new BasicDBObject('_id', 5).append('name', 'Adam')) + collection.insert(new BasicDBObject('_id', 4).append('name', 'Adam')) + + when: + dbCursor = collection.find(new BasicDBObject('name', new BasicDBObject('$exists', true))) + .sort(new BasicDBObject('name', 1).append('_id', 1)) + + then: + dbCursor.collect { it -> [it.get('name'), it.get('_id')] } == [['Adam', 2], ['Adam', 4], ['Adam', 5], ['Bob', 3], ['Chris', 1]] + } + + // Spock bug as MongoCursor does implement closeable + @SuppressWarnings('CloseWithoutCloseable') + def 'DBCursor options should set the correct read preference'() { + given: + def tailableCursor = + new BatchCursor() { + @Override + List tryNext() { null } + + @Override + void close() { } + + @Override + boolean hasNext() { true } + + @Override + List next() { null } + + @Override + int available() { + 0 + } + + @Override + void setBatchSize(final int batchSize) { } + + @Override + int getBatchSize() { 0 } + + @Override + void remove() { } + + @Override + ServerCursor getServerCursor() { null } + + @Override + ServerAddress getServerAddress() { null } + } + + def executor = new TestOperationExecutor([tailableCursor, tailableCursor, tailableCursor, tailableCursor]) + def collection = new DBCollection('collectionName', database, executor) + + when: + collection.find().hasNext() + + then: + executor.getReadPreference() == ReadPreference.primary() + + when: + collection.find().cursorType(CursorType.Tailable).tryNext() + + then: + executor.getReadPreference() == ReadPreference.primary() + + when: + collection.find().cursorType(CursorType.Tailable).setReadPreference(ReadPreference.secondaryPreferred()).tryNext() + + then: + executor.getReadPreference() == ReadPreference.secondaryPreferred() + } + + def 'should support collation'() { + when: + def document = BasicDBObject.parse('{_id: 1, str: "foo"}') + collection.insert(document) + dbCursor = collection.find(BasicDBObject.parse('{str: "FOO"}')).setCollation(caseInsensitiveCollation) + + then: + dbCursor.count() == 1 + + then: + dbCursor.one() == document + + then: + ++dbCursor.iterator() == document + } + + def caseInsensitiveCollation = Collation.builder().locale('en').collationStrength(CollationStrength.SECONDARY).build() + + static DBObject getKeyPattern(DBObject explainPlan) { + if (explainPlan.queryPlanner.winningPlan.queryPlan?.inputStage != null) { + return explainPlan.queryPlanner.winningPlan.queryPlan.inputStage.keyPattern + } else if (explainPlan.queryPlanner.winningPlan.inputStage != null) { + return explainPlan.queryPlanner.winningPlan.inputStage.keyPattern + } else if (explainPlan.queryPlanner.winningPlan.shards != null) { + def winningPlan = explainPlan.queryPlanner.winningPlan.shards[0].winningPlan + if (winningPlan.queryPlan?.inputStage != null) { + return winningPlan.queryPlan.inputStage.keyPattern + } else if (winningPlan.inputStage != null) { + return winningPlan.inputStage.keyPattern + } + } + } +} diff --git a/driver-legacy/src/test/functional/com/mongodb/DBCursorOldTest.java b/driver-legacy/src/test/functional/com/mongodb/DBCursorOldTest.java new file mode 100644 index 00000000000..c697480b7db --- /dev/null +++ b/driver-legacy/src/test/functional/com/mongodb/DBCursorOldTest.java @@ -0,0 +1,410 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb; + +import org.junit.Test; + +import java.util.ArrayList; +import java.util.List; +import java.util.NoSuchElementException; +import java.util.concurrent.Callable; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.Future; +import java.util.concurrent.TimeoutException; + +import static java.util.concurrent.TimeUnit.SECONDS; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +public class DBCursorOldTest extends DatabaseTestCase { + + @Test + public void testGetServerAddressLoop() { + insertTestData(collection, 10); + + DBCursor cur = collection.find(); + + while (cur.hasNext()) { + cur.next(); + assertNotNull(cur.getServerAddress()); + } + } + + @Test + public void testGetServerAddressQuery() { + insertTestData(collection, 10); + + DBCursor cur = collection.find(); + cur.hasNext(); + assertNotNull(cur.getServerAddress()); + } + + @Test + public void testGetServerAddressQuery1() { + insertTestData(collection, 10); + + DBCursor cur = collection.find(new BasicDBObject("x", 9)); + cur.hasNext(); + assertNotNull(cur.getServerAddress()); + } + + @Test + public void testCount() { + assertEquals(collection.find().count(), 0); + BasicDBObject obj = new BasicDBObject(); + obj.put("x", "foo"); + collection.insert(obj); + + assertEquals(1, collection.find().count()); + } + + @Test + public void testTailable() { + DBCollection c = database.getCollection("tail1"); + c.drop(); + database.createCollection("tail1", new BasicDBObject("capped", true).append("size", 10000)); + + DBObject firstDBObject = new BasicDBObject("x", 1); + DBObject secondDBObject = new BasicDBObject("x", 2); + + DBCursor cur = c.find() + .sort(new BasicDBObject("$natural", 1)) + .cursorType(CursorType.Tailable); + c.save(firstDBObject, WriteConcern.ACKNOWLEDGED); + + assertEquals(firstDBObject, cur.tryNext()); + assertEquals(firstDBObject, cur.curr()); + assertEquals(1, cur.numSeen()); + + assertNull(cur.tryNext()); + assertEquals(firstDBObject, cur.curr()); + assertEquals(1, cur.numSeen()); + + c.save(secondDBObject, WriteConcern.ACKNOWLEDGED); + assertEquals(secondDBObject, cur.tryNext()); + assertEquals(secondDBObject, cur.curr()); + assertEquals(2, cur.numSeen()); + + assertNull(cur.tryNext()); + assertEquals(secondDBObject, cur.curr()); + assertEquals(2, cur.numSeen()); + + cur.close(); + } + + @Test + public void testTailableImplicitAwaitOnHasNext() throws ExecutionException, TimeoutException, InterruptedException { + DBCollection c = database.getCollection("tail1"); + c.drop(); + database.createCollection("tail1", new BasicDBObject("capped", true).append("size", 10000)); + for (int i = 0; i < 10; i++) { + c.save(new BasicDBObject("x", i), WriteConcern.ACKNOWLEDGED); + } + + DBCursor cur = c.find() + .sort(new BasicDBObject("$natural", 1)) + .cursorType(CursorType.Tailable); + + CountDownLatch latch = new CountDownLatch(1); + Callable callable = () -> { + // the following call will block on the last hasNext + int i = 0; + while (cur.hasNext()) { + DBObject obj = cur.next(); + i++; + if (i == 10) { + latch.countDown(); + } else if (i > 10) { + return (Integer) obj.get("x"); + } + } + + return null; + }; + + ExecutorService es = Executors.newSingleThreadExecutor(); + Future future = es.submit(callable); + + latch.await(5, SECONDS); + + // this doc should unblock thread + c.save(new BasicDBObject("x", 10), WriteConcern.ACKNOWLEDGED); + assertEquals(10, (long) future.get(5, SECONDS)); + + cur.close(); + } + + @Test + public void testTailableImplicitAwaitOnNext() throws ExecutionException, TimeoutException, InterruptedException { + DBCollection c = database.getCollection("tail1"); + c.drop(); + database.createCollection("tail1", new BasicDBObject("capped", true).append("size", 10000)); + for (int i = 0; i < 10; i++) { + c.save(new BasicDBObject("x", i), WriteConcern.ACKNOWLEDGED); + } + + DBCursor cur = c.find() + .sort(new BasicDBObject("$natural", 1)) + .cursorType(CursorType.Tailable); + + CountDownLatch latch = new CountDownLatch(1); + Callable callable = () -> { + // the following call will block on the last hasNext + int i = 0; + while (i < 11) { + DBObject obj = cur.next(); + i++; + if (i == 10) { + latch.countDown(); + } else if (i > 10) { + return (Integer) obj.get("x"); + } + } + return null; + }; + + ExecutorService es = Executors.newSingleThreadExecutor(); + Future future = es.submit(callable); + + latch.await(5, SECONDS); + + // this doc should unblock thread + c.save(new BasicDBObject("x", 10), WriteConcern.ACKNOWLEDGED); + assertEquals(10, (long) future.get(5, SECONDS)); + + cur.close(); + } + + @Test + public void shouldSupportTryNextOnTailableCursors() { + DBCollection c = database.getCollection("tail1"); + c.drop(); + database.createCollection("tail1", new BasicDBObject("capped", true).append("size", 10000)); + + c.save(new BasicDBObject("x", 1), WriteConcern.ACKNOWLEDGED); + + try (DBCursor cur = c.find() + .sort(new BasicDBObject("$natural", 1)) + .cursorType(CursorType.Tailable)) { + cur.tryNext(); + } catch (IllegalArgumentException e) { + fail(); + } + } + + @Test + public void shouldSupportTryNextOnTailableAwaitCursors() { + DBCollection c = database.getCollection("tail1"); + c.drop(); + database.createCollection("tail1", new BasicDBObject("capped", true).append("size", 10000)); + + c.save(new BasicDBObject("x", 1), WriteConcern.ACKNOWLEDGED); + + try (DBCursor cur = c.find() + .sort(new BasicDBObject("$natural", 1)) + .cursorType(CursorType.TailableAwait)) { + cur.tryNext(); + } catch (IllegalArgumentException e) { + fail(); + } + } + + @Test(expected = IllegalArgumentException.class) + public void shouldThrowExceptionOnTryNextForNonTailableCursors() { + DBCollection c = database.getCollection("tail1"); + c.drop(); + database.createCollection("tail1", new BasicDBObject("capped", true).append("size", 10000)); + + c.save(new BasicDBObject("x", 1), WriteConcern.ACKNOWLEDGED); + DBCursor cur = c.find() + .sort(new BasicDBObject("$natural", 1)); + + cur.tryNext(); + } + + @Test + public void testBatchWithLimit() { + insertTestData(collection, 100); + + assertEquals(50, collection.find().limit(50).itcount()); + assertEquals(50, collection.find().batchSize(5).limit(50).itcount()); + } + + @Test + public void testUpsert() { + collection.update(new BasicDBObject("page", "/"), new BasicDBObject("$inc", new BasicDBObject("count", 1)), true, false); + collection.update(new BasicDBObject("page", "/"), new BasicDBObject("$inc", new BasicDBObject("count", 1)), true, false); + + assertEquals(1, collection.getCount()); + assertEquals(2, collection.findOne().get("count")); + } + + @Test + public void testLimitAndBatchSize() { + insertTestData(collection, 1000); + + DBObject q = BasicDBObjectBuilder.start().push("x").add("$lt", 200).get(); + + DBCursor cur = collection.find(q); + assertEquals(0, cur.getCursorId()); + assertEquals(200, cur.itcount()); + + cur = collection.find(q).limit(50); + assertEquals(0, cur.getCursorId()); + assertEquals(50, cur.itcount()); + + cur = collection.find(q).batchSize(50); + assertEquals(0, cur.getCursorId()); + assertEquals(200, cur.itcount()); + + cur = collection.find(q).batchSize(100).limit(50); + assertEquals(0, cur.getCursorId()); + assertEquals(50, cur.itcount()); + + cur = collection.find(q).batchSize(-40); + assertEquals(0, cur.getCursorId()); + assertEquals(40, cur.itcount()); + + cur = collection.find(q).limit(-100); + assertEquals(0, cur.getCursorId()); + assertEquals(100, cur.itcount()); + + cur = collection.find(q).batchSize(-40).limit(20); + assertEquals(0, cur.getCursorId()); + assertEquals(20, cur.itcount()); + + cur = collection.find(q).batchSize(-20).limit(100); + assertEquals(0, cur.getCursorId()); + assertEquals(20, cur.itcount()); + } + + @Test + public void testCurrentObjectAndNumSeen() { + for (int i = 1; i < 100; i++) { + collection.save(new BasicDBObject("x", i)); + } + + DBCursor cur = collection.find().sort(new BasicDBObject("x", 1)); + while (cur.hasNext()) { + DBObject current = cur.next(); + int num = (Integer) current.get("x"); + + assertEquals(current, cur.curr()); + assertEquals(num, cur.numSeen()); + } + } + + @Test + public void testSort() { + for (int i = 0; i < 1000; i++) { + collection.save(new BasicDBObject("x", i).append("y", 1000 - i)); + } + + //x ascending + DBCursor cur = collection.find().sort(new BasicDBObject("x", 1)); + int curmax = -100; + while (cur.hasNext()) { + int val = (Integer) cur.next().get("x"); + assertTrue(val > curmax); + curmax = val; + } + + //x desc + cur = collection.find().sort(new BasicDBObject("x", -1)); + curmax = 9999; + while (cur.hasNext()) { + int val = (Integer) cur.next().get("x"); + assertTrue(val < curmax); + curmax = val; + } + + //query and sort + cur = collection.find(QueryBuilder.start("x").greaterThanEquals(500).get()).sort(new BasicDBObject("y", 1)); + assertEquals(500, cur.count()); + curmax = -100; + while (cur.hasNext()) { + int val = (Integer) cur.next().get("y"); + assertTrue(val > curmax); + curmax = val; + } + } + + @Test(expected = NoSuchElementException.class) + public void testShouldThrowNoSuchElementException() { + DBCursor cursor = collection.find(); + cursor.next(); + } + + private void insertTestData(final DBCollection dbCollection, final int numberOfDocuments) { + List documents = new ArrayList<>(); + for (int i = 0; i < numberOfDocuments; i++) { + documents.add(new BasicDBObject("x", i)); + } + dbCollection.insert(documents); + } + + //TODO: why is this commented out? + // @Test + // public void testHasFinalizer() throws UnknownHostException, UnknownHostException { + // DBCollection c = database.getCollection("HasFinalizerTest"); + // c.drop(); + // + // for (int i = 0; i < 1000; i++) { + // c.save(new BasicDBObject("_id", i), WriteConcern.ACKNOWLEDGED); + // } + // + // // finalizer is on by default so after calling hasNext should report that it has one + // DBCursor cursor = c.find(); + // assertFalse(cursor.hasFinalizer()); + // cursor.hasNext(); + // assertTrue(cursor.hasFinalizer()); + // cursor.close(); + // + // // no finalizer if there is no cursor, as there should not be for a query with only one result + // cursor = c.find(new BasicDBObject("_id", 1)); + // cursor.hasNext(); + // assertFalse(cursor.hasFinalizer()); + // cursor.close(); + // + // // no finalizer if there is no cursor, as there should not be for a query with negative batch size + // cursor = c.find(); + // cursor.batchSize(-1); + // cursor.hasNext(); + // assertFalse(cursor.hasFinalizer()); + // cursor.close(); + // + // // finally, no finalizer if disabled in mongo options + // MongoClientOptions mongoOptions = MongoClientOptions.builder().build(); + // mongoOptions.cursorFinalizerEnabled = false; + // MongoClient m = new MongoClient("127.0.0.1", mongoOptions); + // try { + // c = m.getDB(cleanupDB).getCollection("HasFinalizerTest"); + // cursor = c.find(); + // cursor.hasNext(); + // assertFalse(cursor.hasFinalizer()); + // cursor.close(); + // } finally { + // m.close(); + // } + // } +} diff --git a/driver-legacy/src/test/functional/com/mongodb/DBCursorTest.java b/driver-legacy/src/test/functional/com/mongodb/DBCursorTest.java new file mode 100644 index 00000000000..6c4f622507b --- /dev/null +++ b/driver-legacy/src/test/functional/com/mongodb/DBCursorTest.java @@ -0,0 +1,523 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb; + + +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +import java.util.Iterator; +import java.util.NoSuchElementException; +import java.util.concurrent.TimeUnit; + +import static com.mongodb.ClusterFixture.disableMaxTimeFailPoint; +import static com.mongodb.ClusterFixture.enableMaxTimeFailPoint; +import static com.mongodb.ClusterFixture.isSharded; +import static org.hamcrest.CoreMatchers.not; +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.core.Is.is; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; +import static org.junit.Assume.assumeThat; + +public class DBCursorTest extends DatabaseTestCase { + private static final int NUMBER_OF_DOCUMENTS = 10; + private DBCursor cursor; + + @Before + public void setUp() { + super.setUp(); + for (int i = 0; i < NUMBER_OF_DOCUMENTS; i++) { + collection.insert(new BasicDBObject("_id", i).append("x", i)); + } + collection.createIndex(new BasicDBObject("x", 1)); + + cursor = collection.find(); + } + + @After + public void after() { + if (cursor != null) { + cursor.close(); + } + } + + @Test + public void testAvailable() { + cursor.batchSize(3); + assertEquals(0, cursor.available()); + + cursor.next(); + assertEquals(2, cursor.available()); + + cursor.next(); + assertEquals(1, cursor.available()); + + cursor.next(); + assertEquals(0, cursor.available()); + + cursor.next(); + assertEquals(2, cursor.available()); + + cursor.close(); + assertEquals(0, cursor.available()); + } + + @Test + public void testNextHasNext() { + cursor.sort(new BasicDBObject("_id", 1)); + int i = 0; + while (cursor.hasNext()) { + DBObject cur = cursor.next(); + assertEquals(i, cur.get("_id")); + i++; + } + + try { + cursor.next(); + fail(); + } catch (NoSuchElementException e) { + // all good + } + } + + @Test + public void testCurr() { + assertNull(cursor.curr()); + DBObject next = cursor.next(); + assertEquals(next, cursor.curr()); + next = cursor.next(); + assertEquals(next, cursor.curr()); + } + + @Test + public void testMarkPartial() { + DBCursor markPartialCursor = collection.find(new BasicDBObject(), new BasicDBObject("_id", 1)); + assertTrue(markPartialCursor.next().isPartialObject()); + } + + @Test + public void testMarkPartialForEmptyObjects() { + DBCursor cursor = collection.find(new BasicDBObject(), new BasicDBObject("_id", 0)); + for (final DBObject document : cursor) { + assertTrue(document.isPartialObject()); + } + } + + @Test + public void testIterator() { + cursor.sort(new BasicDBObject("_id", 1)); + Iterator iter = cursor.iterator(); + int i = 0; + while (iter.hasNext()) { + DBObject cur = iter.next(); + assertEquals(i, cur.get("_id")); + i++; + } + } + + @Test + public void testCopy() { + DBCursor cursorCopy = cursor.copy(); + assertEquals(cursor.getCollection(), cursorCopy.getCollection()); + assertEquals(cursor.getQuery(), cursorCopy.getQuery()); + } + + @Test(expected = UnsupportedOperationException.class) + public void testRemove() { + cursor.remove(); + } + + @Test + public void testLimit() { + DBCollection c = collection; + collection.drop(); + for (int i = 0; i < 100; i++) { + c.save(new BasicDBObject("x", i)); + } + + DBCursor dbCursor = c.find(); + try { + assertEquals(0, dbCursor.getLimit()); + assertEquals(0, dbCursor.getBatchSize()); + assertEquals(100, dbCursor.toArray().size()); + } finally { + cursor.close(); + } + + dbCursor = c.find().limit(50); + try { + assertEquals(50, dbCursor.getLimit()); + assertEquals(50, dbCursor.toArray().size()); + } finally { + cursor.close(); + } + + dbCursor = c.find().limit(-50); + try { + assertEquals(-50, dbCursor.getLimit()); + assertEquals(50, dbCursor.toArray().size()); + } finally { + cursor.close(); + } + } + + @Test + public void testBatchSize() { + DBCollection c = collection; + collection.drop(); + for (int i = 0; i < 100; i++) { + c.save(new BasicDBObject("x", i)); + } + + DBCursor dbCursor = c.find().batchSize(0); + try { + assertEquals(0, dbCursor.getBatchSize()); + assertEquals(100, dbCursor.toArray().size()); + } finally { + cursor.close(); + } + + dbCursor = c.find().batchSize(50); + try { + assertEquals(50, dbCursor.getBatchSize()); + assertEquals(100, dbCursor.toArray().size()); + } finally { + cursor.close(); + } + + dbCursor = c.find().batchSize(-50); + try { + assertEquals(-50, dbCursor.getBatchSize()); + assertEquals(50, dbCursor.toArray().size()); + } finally { + cursor.close(); + } + } + + @Test + public void testSkip() { + try (DBCursor cursor = collection.find().skip(2)) { + assertEquals(8, cursor.toArray().size()); + } + } + + @Test + public void testGetCursorId() { + DBCursor cursor = collection.find().batchSize(2); + try { + assertEquals(0, cursor.getCursorId()); + cursor.hasNext(); + assertThat(cursor.getCursorId(), is(not(0L))); + } finally { + cursor.close(); + } + + cursor = collection.find(); + try { + assertEquals(0, cursor.getCursorId()); + cursor.hasNext(); + assertThat(cursor.getCursorId(), is(0L)); + } finally { + cursor.close(); + } + } + + @Test + public void getNumSeen() { + DBCursor cursor = collection.find(); + assertEquals(0, cursor.numSeen()); + cursor.hasNext(); + assertEquals(0, cursor.numSeen()); + cursor.next(); + assertEquals(1, cursor.numSeen()); + cursor.next(); + assertEquals(2, cursor.numSeen()); + } + + @Test + public void testLength() { + assertEquals(NUMBER_OF_DOCUMENTS, cursor.length()); + } + + @Test + public void testToArray() { + assertEquals(NUMBER_OF_DOCUMENTS, cursor.toArray().size()); + assertEquals(NUMBER_OF_DOCUMENTS, cursor.toArray().size()); + } + + @Test + public void testToArrayWithMax() { + assertEquals(9, cursor.toArray(9).size()); + assertEquals(NUMBER_OF_DOCUMENTS, cursor.toArray().size()); + } + + @Test + public void testIterationCount() { + assertEquals(NUMBER_OF_DOCUMENTS, cursor.itcount()); + } + + @Test + public void testCount() { + assertEquals(NUMBER_OF_DOCUMENTS, cursor.count()); + assertEquals(1, collection.find(new BasicDBObject("_id", 1)).count()); + } + + @Test + public void testGetKeysWanted() { + assertNull(cursor.getKeysWanted()); + DBObject keys = new BasicDBObject("x", 1); + DBCursor cursorWithKeys = collection.find(new BasicDBObject(), keys); + assertEquals(keys, cursorWithKeys.getKeysWanted()); + } + + @Test + public void testGetQuery() { + assertEquals(new BasicDBObject(), cursor.getQuery()); + DBObject query = new BasicDBObject("x", 1); + DBCursor cursorWithQuery = collection.find(query); + assertEquals(query, cursorWithQuery.getQuery()); + } + + @Test + public void testReadPreference() { + assertEquals(ReadPreference.primary(), cursor.getReadPreference()); + cursor.setReadPreference(ReadPreference.secondary()); + assertEquals(ReadPreference.secondary(), cursor.getReadPreference()); + } + + @Test + public void testConstructor() { + DBObject query = new BasicDBObject("x", 1); + DBObject keys = new BasicDBObject("x", 1).append("y", 1); + DBCursor local = new DBCursor(collection, query, keys, ReadPreference.secondary()); + assertEquals(ReadPreference.secondary(), local.getReadPreference()); + assertEquals(query, local.getQuery()); + assertEquals(keys, local.getKeysWanted()); + } + + @Test + public void testMax() { + collection.createIndex(new BasicDBObject("x", 1)); + countResults(new DBCursor(collection, new BasicDBObject(), new BasicDBObject(), ReadPreference.primary()) + .max(new BasicDBObject("x", 4)) + .hint(new BasicDBObject("x", 1)), 4); + countResults(new DBCursor(collection, new BasicDBObject(), new BasicDBObject(), ReadPreference.primary()) + .max(new BasicDBObject("x", 4)) + .hint(new BasicDBObject("x", 1)), 4); + } + + @Test + public void testMin() { + collection.createIndex(new BasicDBObject("x", 1)); + countResults(new DBCursor(collection, new BasicDBObject(), new BasicDBObject(), ReadPreference.primary()) + .min(new BasicDBObject("x", 4)) + .hint(new BasicDBObject("x", 1)), 6); + countResults(new DBCursor(collection, new BasicDBObject(), new BasicDBObject(), ReadPreference.primary()) + .min(new BasicDBObject("x", 4)) + .hint(new BasicDBObject("x", 1)), 6); + } + + @Test + public void testReturnKey() { + DBCursor cursor = new DBCursor(collection, new BasicDBObject(), new BasicDBObject(), ReadPreference.primary()) + .returnKey(); + try { + while (cursor.hasNext()) { + assertNull(cursor.next() + .get("_id")); + } + } finally { + cursor.close(); + } + cursor = new DBCursor(collection, new BasicDBObject(), new BasicDBObject(), ReadPreference.primary()) + .returnKey(); + try { + while (cursor.hasNext()) { + assertNull(cursor.next() + .get("_id")); + } + } finally { + cursor.close(); + } + } + + private void countResults(final DBCursor cursor, final int expected) { + int count = 0; + while (cursor.hasNext()) { + cursor.next(); + count++; + } + cursor.close(); + assertEquals(expected, count); + } + + @Test + public void testSettingACommentInsertsCommentIntoProfileCollectionWhenProfilingIsTurnedOn() { + assumeThat(isSharded(), is(false)); + + // given + String expectedComment = "test comment"; + + DBCollection profileCollection = database.getCollection("system.profile"); + profileCollection.drop(); + + database.command(new BasicDBObject("profile", 2)); + + try { + // when + DBCursor cursor = new DBCursor(collection, new BasicDBObject(), new BasicDBObject(), ReadPreference.primary()) + .comment(expectedComment); + while (cursor.hasNext()) { + cursor.next(); + } + + // then + assertEquals(1, profileCollection.count()); + + DBObject profileDocument = profileCollection.findOne(); + assertEquals(expectedComment, ((DBObject) profileDocument.get("command")).get("comment")); + } finally { + database.command(new BasicDBObject("profile", 0)); + profileCollection.drop(); + } + } + + @Test + public void testShouldReturnOnlyTheFieldThatWasInTheIndexUsedForTheFindWhenReturnKeyIsUsed() { + // Given + // put some documents into the collection + for (int i = 0; i < NUMBER_OF_DOCUMENTS; i++) { + collection.insert(new BasicDBObject("y", i).append("someOtherKey", "someOtherValue")); + } + //set an index on the field "y" + collection.createIndex(new BasicDBObject("y", 1)); + + // When + // find a document by using a search on the field in the index + DBCursor cursor = collection.find(new BasicDBObject("y", 7)) + .returnKey(); + + // Then + DBObject foundItem = cursor.next(); + assertThat("There should only be one field in the resulting document", foundItem.keySet().size(), is(1)); + assertThat("This should be the 'y' field with its value", (Integer) foundItem.get("y"), is(7)); + } + + @Test + public void testMaxTimeForIterator() { + enableMaxTimeFailPoint(); + DBCursor cursor = new DBCursor(collection, new BasicDBObject("x", 1), new BasicDBObject(), ReadPreference.primary()); + cursor.maxTime(1, TimeUnit.SECONDS); + try { + cursor.hasNext(); + fail("Show have thrown"); + } catch (MongoExecutionTimeoutException e) { + assertEquals(50, e.getCode()); + } finally { + disableMaxTimeFailPoint(); + } + } + + @Test + public void testMaxTimeForIterable() { + assumeThat(isSharded(), is(false)); + enableMaxTimeFailPoint(); + DBCursor cursor = new DBCursor(collection, new BasicDBObject("x", 1), new BasicDBObject(), ReadPreference.primary()); + cursor.maxTime(1, TimeUnit.SECONDS); + try { + cursor.iterator().hasNext(); + fail("Show have thrown"); + } catch (MongoExecutionTimeoutException e) { + assertEquals(50, e.getCode()); + } finally { + disableMaxTimeFailPoint(); + } + } + + @Test + public void testMaxTimeForOne() { + assumeThat(isSharded(), is(false)); + enableMaxTimeFailPoint(); + DBCursor cursor = new DBCursor(collection, new BasicDBObject("x", 1), new BasicDBObject(), ReadPreference.primary()); + cursor.maxTime(1, TimeUnit.SECONDS); + try { + cursor.one(); + fail("Show have thrown"); + } catch (MongoExecutionTimeoutException e) { + assertEquals(50, e.getCode()); + } finally { + disableMaxTimeFailPoint(); + } + } + + @Test + public void testMaxTimeForCount() { + assumeThat(isSharded(), is(false)); + enableMaxTimeFailPoint(); + DBCursor cursor = new DBCursor(collection, new BasicDBObject("x", 1), new BasicDBObject(), ReadPreference.primary()); + cursor.maxTime(1, TimeUnit.SECONDS); + try { + cursor.count(); + fail("Show have thrown"); + } catch (MongoExecutionTimeoutException e) { + assertEquals(50, e.getCode()); + } finally { + disableMaxTimeFailPoint(); + } + } + + @Test + public void testMaxTimeForSize() { + assumeThat(isSharded(), is(false)); + enableMaxTimeFailPoint(); + DBCursor cursor = new DBCursor(collection, new BasicDBObject("x", 1), new BasicDBObject(), ReadPreference.primary()); + cursor.maxTime(1, TimeUnit.SECONDS); + try { + cursor.size(); + fail("Show have thrown"); + } catch (MongoExecutionTimeoutException e) { + assertEquals(50, e.getCode()); + } finally { + disableMaxTimeFailPoint(); + } + } + + @Test + public void testPropertyMutability() { + DBCursor cursor = new DBCursor(collection, new BasicDBObject("x", 1), new BasicDBObject("y", 1), ReadPreference.primary()); + cursor.getQuery().put("z", 2); + cursor.getKeysWanted().put("v", 1); + assertTrue(cursor.getQuery().containsField("z")); + assertTrue(cursor.getKeysWanted().containsField("v")); + } + + @Test + public void testClose() { + cursor.next(); + cursor.close(); + try { + cursor.next(); + fail(); + } catch (IllegalStateException e) { + // all good + } + } +} diff --git a/driver-legacy/src/test/functional/com/mongodb/DBFunctionalSpecification.groovy b/driver-legacy/src/test/functional/com/mongodb/DBFunctionalSpecification.groovy new file mode 100644 index 00000000000..2e0f3936a9e --- /dev/null +++ b/driver-legacy/src/test/functional/com/mongodb/DBFunctionalSpecification.groovy @@ -0,0 +1,90 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb + +import org.bson.BsonDocument +import spock.lang.IgnoreIf + +import static com.mongodb.ClusterFixture.configureFailPoint +import static com.mongodb.ClusterFixture.isDiscoverableReplicaSet + +class DBFunctionalSpecification extends FunctionalSpecification { + + @IgnoreIf({ !isDiscoverableReplicaSet() }) + def 'should throw WriteConcernException on write concern error for drop'() { + given: + database.createCollection('ctest', new BasicDBObject()) + + def w = 2 + database.setWriteConcern(new WriteConcern(w)) + configureFailPoint(BsonDocument.parse('{ configureFailPoint: "failCommand", ' + + 'mode : {times : 1}, ' + + 'data : {failCommands : ["dropDatabase"], ' + + 'writeConcernError : {code : 100, errmsg : "failed"}}}')) + + when: + database.dropDatabase() + + then: + def e = thrown(WriteConcernException) + e.getErrorCode() == 100 + + cleanup: + database.setWriteConcern(null) + } + + @IgnoreIf({ !isDiscoverableReplicaSet() }) + def 'should throw WriteConcernException on write concern error for create collection'() { + given: + database.setWriteConcern(new WriteConcern(5)) + + when: + database.createCollection('ctest', new BasicDBObject()) + + then: + def e = thrown(WriteConcernException) + e.getErrorCode() == 100 + + cleanup: + database.setWriteConcern(null) + } + + @IgnoreIf({ !isDiscoverableReplicaSet() }) + def 'should throw WriteConcernException on write concern error for create view'() { + given: + database.setWriteConcern(new WriteConcern(5)) + + when: + database.createView('view1', 'collection1', []) + + then: + def e = thrown(WriteConcernException) + e.getErrorCode() == 100 + + cleanup: + database.setWriteConcern(null) + } + + + def 'should execute command with customer encoder'() { + when: + CommandResult commandResult = database.command(new BasicDBObject('isMaster', 1), DefaultDBEncoder.FACTORY.create()) + + then: + commandResult.ok() + } +} diff --git a/driver-legacy/src/test/functional/com/mongodb/DBObjectCodecTest.java b/driver-legacy/src/test/functional/com/mongodb/DBObjectCodecTest.java new file mode 100644 index 00000000000..309f7e1a111 --- /dev/null +++ b/driver-legacy/src/test/functional/com/mongodb/DBObjectCodecTest.java @@ -0,0 +1,178 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb; + +import org.bson.BsonArray; +import org.bson.BsonDocument; +import org.bson.BsonDocumentWriter; +import org.bson.BsonInt32; +import org.bson.BsonNull; +import org.bson.BsonObjectId; +import org.bson.LazyBSONCallback; +import org.bson.codecs.BsonValueCodecProvider; +import org.bson.codecs.EncoderContext; +import org.bson.codecs.ValueCodecProvider; +import org.junit.Test; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.Iterator; +import java.util.List; +import java.util.Map; + +import static java.util.Arrays.asList; +import static org.bson.codecs.configuration.CodecRegistries.fromProviders; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + +public class DBObjectCodecTest extends DatabaseTestCase { + + @Test + public void testDBListEncoding() { + BasicDBList list = new BasicDBList(); + list.add(new BasicDBObject("a", 1).append("b", true)); + list.add(new BasicDBObject("c", "string").append("d", 0.1)); + collection.save(new BasicDBObject("l", list)); + assertEquals(list, collection.findOne().get("l")); + } + + @Test + public void shouldNotGenerateIdIfPresent() { + DBObjectCodec dbObjectCodec = new DBObjectCodec(fromProviders(asList(new ValueCodecProvider(), new DBObjectCodecProvider(), + new BsonValueCodecProvider()))); + DBObject document = new BasicDBObject("_id", 1); + assertTrue(dbObjectCodec.documentHasId(document)); + document = dbObjectCodec.generateIdIfAbsentFromDocument(document); + assertTrue(dbObjectCodec.documentHasId(document)); + assertEquals(new BsonInt32(1), dbObjectCodec.getDocumentId(document)); + } + + @Test + public void shouldGenerateIdIfAbsent() { + DBObjectCodec dbObjectCodec = new DBObjectCodec(fromProviders(asList(new ValueCodecProvider(), new DBObjectCodecProvider(), + new BsonValueCodecProvider()))); + DBObject document = new BasicDBObject(); + assertFalse(dbObjectCodec.documentHasId(document)); + document = dbObjectCodec.generateIdIfAbsentFromDocument(document); + assertTrue(dbObjectCodec.documentHasId(document)); + assertEquals(BsonObjectId.class, dbObjectCodec.getDocumentId(document).getClass()); + } + + @Test + public void shouldRespectEncodeIdFirstPropertyInEncoderContext() { + DBObjectCodec dbObjectCodec = new DBObjectCodec(fromProviders(asList(new ValueCodecProvider(), new DBObjectCodecProvider(), + new BsonValueCodecProvider()))); + // given + DBObject doc = new BasicDBObject("x", 2).append("_id", 2); + + // when + BsonDocument encodedDocument = new BsonDocument(); + dbObjectCodec.encode(new BsonDocumentWriter(encodedDocument), + doc, + EncoderContext.builder().isEncodingCollectibleDocument(true).build()); + + // then + assertEquals(new ArrayList<>(encodedDocument.keySet()), asList("_id", "x")); + + // when + encodedDocument.clear(); + dbObjectCodec.encode(new BsonDocumentWriter(encodedDocument), + doc, + EncoderContext.builder().isEncodingCollectibleDocument(false).build()); + + // then + assertEquals(new ArrayList<>(encodedDocument.keySet()), asList("x", "_id")); + } + + @Test + public void shouldEncodeNull() { + DBObjectCodec dbObjectCodec = new DBObjectCodec(fromProviders(asList(new ValueCodecProvider(), new DBObjectCodecProvider(), + new BsonValueCodecProvider()))); + + DBObject doc = new BasicDBObject("null", null); + + BsonDocumentWriter writer = new BsonDocumentWriter(new BsonDocument()); + dbObjectCodec.encode(writer, doc, EncoderContext.builder().build()); + + assertEquals(new BsonDocument("null", BsonNull.VALUE), writer.getDocument()); + } + + @Test + public void shouldEncodedNestedMapsListsAndDocuments() { + byte[] zeroOneDocumentBytes = {19, 0, 0, 0, 16, 48, 0, 0, 0, 0, 0, 16, 49, 0, 1, 0, 0, 0, 0}; // {"0" : 0, "1", 1} + Map zeroOneMap = new HashMap<>(); + zeroOneMap.put("0", 0); + zeroOneMap.put("1", 1); + DBObject zeroOneDBObject = new BasicDBObject(); + zeroOneDBObject.putAll(zeroOneMap); + DBObject zeroOneDBList = new BasicDBList(); + zeroOneDBList.putAll(zeroOneMap); + List zeroOneList = asList(0, 1); + + DBObjectCodec dbObjectCodec = new DBObjectCodec(fromProviders(asList(new ValueCodecProvider(), new DBObjectCodecProvider(), + new BsonValueCodecProvider()))); + + DBObject doc = new BasicDBObject() + .append("map", zeroOneMap) + .append("dbDocument", zeroOneDBObject) + .append("dbList", zeroOneDBList) + .append("list", zeroOneList) + .append("array", new int[] {0, 1}) + .append("lazyDoc", new LazyDBObject(zeroOneDocumentBytes, new LazyBSONCallback())) + .append("lazyArray", new LazyDBList(zeroOneDocumentBytes, new LazyBSONCallback())); + + BsonDocumentWriter writer = new BsonDocumentWriter(new BsonDocument()); + dbObjectCodec.encode(writer, doc, EncoderContext.builder().build()); + + BsonDocument zeroOneBsonDocument = new BsonDocument().append("0", new BsonInt32(0)).append("1", new BsonInt32(1)); + BsonArray zeroOneBsonArray = new BsonArray(asList(new BsonInt32(0), new BsonInt32(1))); + + assertEquals(new BsonDocument("map", zeroOneBsonDocument) + .append("dbDocument", zeroOneBsonDocument) + .append("dbList", zeroOneBsonArray) + .append("list", zeroOneBsonArray) + .append("array", zeroOneBsonArray) + .append("lazyDoc", zeroOneBsonDocument) + .append("lazyArray", zeroOneBsonArray), writer.getDocument()); + } + + @Test + public void shouldEncodeIterableMapAsMap() { + IterableMap iterableMap = new IterableMap(); + iterableMap.put("first", 1); + + DBObjectCodec dbObjectCodec = new DBObjectCodec(fromProviders(asList(new ValueCodecProvider(), new DBObjectCodecProvider(), + new BsonValueCodecProvider()))); + + DBObject doc = new BasicDBObject("map", iterableMap); + + BsonDocumentWriter writer = new BsonDocumentWriter(new BsonDocument()); + dbObjectCodec.encode(writer, doc, EncoderContext.builder().build()); + + assertEquals(new BsonDocument("map", new BsonDocument("first", new BsonInt32(1))), writer.getDocument()); + } + + static class IterableMap extends HashMap implements Iterable { + private static final long serialVersionUID = -5090421898469363392L; + + @Override + public Iterator iterator() { + return values().iterator(); + } + } +} diff --git a/driver-legacy/src/test/functional/com/mongodb/DBRefTest.java b/driver-legacy/src/test/functional/com/mongodb/DBRefTest.java new file mode 100644 index 00000000000..7e9e8dfec55 --- /dev/null +++ b/driver-legacy/src/test/functional/com/mongodb/DBRefTest.java @@ -0,0 +1,136 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb; + +import org.junit.Test; + +import java.util.ArrayList; +import java.util.List; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; + +public class DBRefTest extends DatabaseTestCase { + + @SuppressWarnings("unchecked") + @Test + public void testRefListRoundTrip() { + DBCollection a = database.getCollection("reflistfield"); + List refs = new ArrayList<>(); + refs.add(new DBRef("other", 12)); + refs.add(new DBRef("other", 14)); + refs.add(new DBRef("other", 16)); + a.save(new BasicDBObject("refs", refs)); + + DBObject loaded = a.findOne(); + assertNotNull(loaded); + List refsLoaded = (List) loaded.get("refs"); + assertNotNull(refsLoaded); + assertEquals(3, refsLoaded.size()); + assertEquals(DBRef.class, refsLoaded.get(0).getClass()); + assertEquals(12, refsLoaded.get(0).getId()); + assertEquals(14, refsLoaded.get(1).getId()); + assertEquals(16, refsLoaded.get(2).getId()); + + } + + @Test + public void testRoundTrip() { + DBCollection a = database.getCollection("refroundtripa"); + DBCollection b = database.getCollection("refroundtripb"); + a.drop(); + b.drop(); + + a.save(new BasicDBObject("_id", 17).append("n", 111)); + b.save(new BasicDBObject("n", 12).append("l", new DBRef("refroundtripa", 17))); + + assertEquals(12, b.findOne().get("n")); + assertEquals(DBRef.class, b.findOne().get("l").getClass()); + } + + @Test + public void testFindByDBRef() { + DBRef ref = new DBRef("fake", 17); + + collection.save(new BasicDBObject("n", 12).append("l", ref)); + + assertEquals(12, collection.findOne().get("n")); + assertEquals(DBRef.class, collection.findOne().get("l").getClass()); + + DBObject loaded = collection.findOne(new BasicDBObject("l", ref)); + assertEquals(12, loaded.get("n")); + assertEquals(DBRef.class, loaded.get("l").getClass()); + assertEquals(ref.getId(), ((DBRef) loaded.get("l")).getId()); + assertEquals(ref.getCollectionName(), ((DBRef) loaded.get("l")).getCollectionName()); + } + + @Test + public void testGetEntityWithSingleDBRefWithCompoundId() { + DBCollection a = database.getCollection("a"); + a.drop(); + + BasicDBObject compoundId = new BasicDBObject("name", "someName").append("email", "test@example.com"); + BasicDBObject entity = new BasicDBObject("_id", "testId").append("ref", new DBRef("fake", compoundId)); + a.save(entity); + + DBObject fetched = a.findOne(new BasicDBObject("_id", "testId")); + + assertNotNull(fetched); + assertFalse(fetched.containsField("$id")); + assertEquals(fetched, entity); + } + + @Test + public void testGetEntityWithArrayOfDBRefsWithCompoundIds() { + DBCollection a = database.getCollection("a"); + a.drop(); + + BasicDBObject compoundId1 = new BasicDBObject("name", "someName").append("email", "test@example.com"); + BasicDBObject compoundId2 = new BasicDBObject("name", "someName2").append("email", "test2@example.com"); + BasicDBList listOfRefs = new BasicDBList(); + listOfRefs.add(new DBRef("fake", compoundId1)); + listOfRefs.add(new DBRef("fake", compoundId2)); + BasicDBObject entity = new BasicDBObject("_id", "testId").append("refs", listOfRefs); + a.save(entity); + + DBObject fetched = a.findOne(new BasicDBObject("_id", "testId")); + + assertNotNull(fetched); + assertEquals(fetched, entity); + } + + @Test + public void testGetEntityWithMapOfDBRefsWithCompoundIds() { + DBCollection base = database.getCollection("basecollection"); + base.drop(); + + BasicDBObject compoundId1 = new BasicDBObject("name", "someName").append("email", "test@example.com"); + BasicDBObject compoundId2 = new BasicDBObject("name", "someName2").append("email", "test2@example.com"); + BasicDBObject mapOfRefs = new BasicDBObject().append("someName", new DBRef("compoundkeys", compoundId1)) + .append("someName2", new DBRef("compoundkeys", compoundId2)); + BasicDBObject entity = new BasicDBObject("_id", "testId").append("refs", mapOfRefs); + base.save(entity); + + DBObject fetched = base.findOne(new BasicDBObject("_id", "testId")); + + assertNotNull(fetched); + DBObject fetchedRefs = (DBObject) fetched.get("refs"); + assertFalse(fetchedRefs.keySet().contains("$id")); + assertEquals(fetched, entity); + } +} diff --git a/driver-legacy/src/test/functional/com/mongodb/DBTest.java b/driver-legacy/src/test/functional/com/mongodb/DBTest.java new file mode 100644 index 00000000000..4ce9b3f760b --- /dev/null +++ b/driver-legacy/src/test/functional/com/mongodb/DBTest.java @@ -0,0 +1,361 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb; + +import com.mongodb.client.model.Collation; +import com.mongodb.client.model.CollationAlternate; +import com.mongodb.client.model.CollationCaseFirst; +import com.mongodb.client.model.CollationMaxVariable; +import com.mongodb.client.model.CollationStrength; +import com.mongodb.internal.operation.ListCollectionsOperation; +import org.bson.BsonDocument; +import org.bson.BsonString; +import org.bson.UuidRepresentation; +import org.bson.codecs.BsonDocumentCodec; +import org.junit.Test; + +import java.util.Locale; +import java.util.UUID; + +import static com.mongodb.ClusterFixture.disableMaxTimeFailPoint; +import static com.mongodb.ClusterFixture.enableMaxTimeFailPoint; +import static com.mongodb.ClusterFixture.getBinding; +import static com.mongodb.ClusterFixture.isDiscoverableReplicaSet; +import static com.mongodb.ClusterFixture.isSharded; +import static com.mongodb.DBObjectMatchers.hasFields; +import static com.mongodb.DBObjectMatchers.hasSubdocument; +import static com.mongodb.Fixture.getDefaultDatabaseName; +import static com.mongodb.Fixture.getMongoClient; +import static com.mongodb.ReadPreference.secondary; +import static com.mongodb.client.Fixture.getMongoClientSettingsBuilder; +import static java.util.Collections.singletonList; +import static org.hamcrest.CoreMatchers.containsString; +import static org.hamcrest.CoreMatchers.hasItem; +import static org.hamcrest.CoreMatchers.hasItems; +import static org.hamcrest.CoreMatchers.is; +import static org.hamcrest.CoreMatchers.not; +import static org.hamcrest.CoreMatchers.sameInstance; +import static org.hamcrest.MatcherAssert.assertThat; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; +import static org.junit.Assume.assumeThat; +import static org.junit.Assume.assumeTrue; + +@SuppressWarnings("deprecation") +public class DBTest extends DatabaseTestCase { + static final String LEGACY_HELLO = "isMaster"; + static final String LEGACY_HELLO_LOWER = LEGACY_HELLO.toLowerCase(Locale.ROOT); + @Test + public void shouldGetDefaultWriteConcern() { + assertEquals(WriteConcern.ACKNOWLEDGED, database.getWriteConcern()); + } + + @Test + public void shouldGetDefaultReadPreference() { + assertEquals(ReadPreference.primary(), database.getReadPreference()); + } + + @Test + public void shouldGetMongoClient() { + assertEquals(getMongoClient(), database.getMongoClient()); + } + + @Test + public void shouldReturnCachedCollectionObjectIfExists() { + DBCollection collection1 = database.getCollection("test"); + DBCollection collection2 = database.getCollection("test"); + assertThat("Checking that references are equal", collection1, sameInstance(collection2)); + } + + @Test + public void shouldDropItself() { + // when + String databaseName = "drop-test-" + System.nanoTime(); + DB db = getMongoClient().getDB(databaseName); + db.createCollection(collectionName, new BasicDBObject()); + + // then + assertThat(getMongoClient().listDatabaseNames(), hasItem(databaseName)); + + // when + db.dropDatabase(); + + // then + assertThat(getMongoClient().listDatabaseNames(), not(hasItem(databaseName))); + } + + @Test + public void shouldGetCollectionNames() { + database.dropDatabase(); + + String[] collectionNames = {"c1", "c2", "c3"}; + + for (final String name : collectionNames) { + database.createCollection(name, new BasicDBObject()); + } + + assertThat(database.getCollectionNames(), hasItems(collectionNames)); + } + + @Test + public void shouldDeferCollectionCreationIfOptionsIsNull() { + collection.drop(); + database.createCollection(collectionName, null); + assertFalse(database.getCollectionNames().contains(collectionName)); + } + + @Test + public void shouldCreateCappedCollection() { + collection.drop(); + database.createCollection(collectionName, new BasicDBObject("capped", true) + .append("size", 242880)); + assertTrue(isCapped(database.getCollection(collectionName))); + } + + @Test + public void shouldCreateCappedCollectionWithMaxNumberOfDocuments() { + collection.drop(); + DBCollection cappedCollectionWithMax = database.createCollection(collectionName, new BasicDBObject("capped", true) + .append("size", 242880) + .append("max", 10)); + + assertThat(storageStats(cappedCollectionWithMax), hasSubdocument(new BasicDBObject("capped", true).append("max", 10))); + + for (int i = 0; i < 11; i++) { + cappedCollectionWithMax.insert(new BasicDBObject("x", i)); + } + assertThat(cappedCollectionWithMax.find().count(), is(10)); + } + + @Test + public void shouldCreateUncappedCollection() { + collection.drop(); + BasicDBObject creationOptions = new BasicDBObject("capped", false); + database.createCollection(collectionName, creationOptions); + + assertFalse(isCapped(database.getCollection(collectionName))); + } + + @Test(expected = MongoCommandException.class) + public void shouldThrowErrorIfCreatingACappedCollectionWithANegativeSize() { + collection.drop(); + DBObject creationOptions = BasicDBObjectBuilder.start().add("capped", true) + .add("size", -20).get(); + database.createCollection(collectionName, creationOptions); + } + + @Test + public void shouldCreateCollectionWithTheSetCollation() { + // Given + collection.drop(); + Collation collation = Collation.builder() + .locale("en") + .caseLevel(true) + .collationCaseFirst(CollationCaseFirst.OFF) + .collationStrength(CollationStrength.IDENTICAL) + .numericOrdering(true) + .collationAlternate(CollationAlternate.SHIFTED) + .collationMaxVariable(CollationMaxVariable.SPACE) + .backwards(true) + .build(); + + DBObject options = BasicDBObject.parse("{ collation: { locale: 'en', caseLevel: true, caseFirst: 'off', strength: 5," + + "numericOrdering: true, alternate: 'shifted', maxVariable: 'space', backwards: true }}"); + + // When + database.createCollection(collectionName, options); + BsonDocument collectionCollation = getCollectionInfo(collectionName).getDocument("options").getDocument("collation"); + + // Then + BsonDocument collationDocument = collation.asDocument(); + for (String key: collationDocument.keySet()) { + assertEquals(collationDocument.get(key), collectionCollation.get(key)); + } + + // When - collation set on the database + database.getCollection(collectionName).drop(); + database.createCollection(collectionName, new BasicDBObject("collation", BasicDBObject.parse(collation.asDocument().toJson()))); + collectionCollation = getCollectionInfo(collectionName).getDocument("options").getDocument("collation"); + + // Then + collationDocument = collation.asDocument(); + for (String key: collationDocument.keySet()) { + assertEquals(collationDocument.get(key), collectionCollation.get(key)); + } + } + + @Test(expected = DuplicateKeyException.class) + public void shouldGetDuplicateKeyException() { + DBObject doc = new BasicDBObject("_id", 1); + collection.insert(doc); + collection.insert(doc, WriteConcern.ACKNOWLEDGED); + } + + @Test + public void shouldExecuteCommand() { + CommandResult commandResult = database.command(new BasicDBObject(LEGACY_HELLO, 1)); + assertThat(commandResult, hasFields(new String[]{LEGACY_HELLO_LOWER, "maxBsonObjectSize", "ok"})); + } + + @Test(expected = MongoExecutionTimeoutException.class) + public void shouldTimeOutCommand() { + assumeThat(isSharded(), is(false)); + enableMaxTimeFailPoint(); + try { + database.command(new BasicDBObject(LEGACY_HELLO, 1).append("maxTimeMS", 1)); + } finally { + disableMaxTimeFailPoint(); + } + } + + @Test + public void shouldExecuteCommandWithReadPreference() { + assumeTrue(isDiscoverableReplicaSet()); + CommandResult commandResult = database.command(new BasicDBObject("dbStats", 1).append("scale", 1), secondary()); + assertThat(commandResult, hasFields(new String[]{"collections", "avgObjSize", "indexes", "db", "indexSize", "storageSize"})); + } + + @Test + public void shouldNotThrowAnExceptionOnCommandFailure() { + CommandResult commandResult = database.command(new BasicDBObject("nonExistentCommand", 1)); + assertThat(commandResult, hasFields(new String[]{"ok", "errmsg"})); + } + + @Test(expected = IllegalArgumentException.class) + public void shouldThrowAnExceptionWhenDBNameContainsSpaces() { + getMongoClient().getDB("foo bar"); + } + + @Test(expected = IllegalArgumentException.class) + public void shouldThrowAnExceptionWhenDBNameIsEmpty() { + getMongoClient().getDB(""); + } + + @Test + public void shouldIgnoreCaseWhenCheckingIfACollectionExists() { + // Given + database.getCollection("foo1").drop(); + assertFalse(database.collectionExists("foo1")); + + // When + database.createCollection("foo1", new BasicDBObject()); + + // Then + assertTrue(database.collectionExists("foo1")); + assertTrue(database.collectionExists("FOO1")); + assertTrue(database.collectionExists("fOo1")); + + // Finally + database.getCollection("foo1").drop(); + } + + @Test + public void shouldReturnFailureWithErrorMessageWhenExecutingInvalidCommand() { + assumeTrue(!isSharded()); + + // When + CommandResult commandResult = database.command(new BasicDBObject("NotRealCommandName", 1)); + + // Then + assertThat(commandResult.ok(), is(false)); + assertThat(commandResult.getErrorMessage(), containsString("no such")); + } + + @Test + public void shouldReturnOKWhenASimpleCommandExecutesSuccessfully() { + // When + CommandResult commandResult = database.command(new BasicDBObject(LEGACY_HELLO, 1)); + + // Then + assertThat(commandResult.ok(), is(true)); + assertThat((Boolean) commandResult.get(LEGACY_HELLO_LOWER), is(true)); + } + + @Test + public void shouldRunCommandAgainstSecondaryWhenOnlySecondaryReadPreferenceSpecified() { + assumeTrue(isDiscoverableReplicaSet()); + + // When + CommandResult commandResult = database.command(new BasicDBObject("dbstats", 1), secondary()); + + // Then + assertThat(commandResult.ok(), is(true)); + assertThat((String) commandResult.get("serverUsed"), not(containsString(":27017"))); + } + + @Test + public void shouldRunStringCommandAgainstSecondaryWhenSecondaryReadPreferenceSpecified() { + assumeTrue(isDiscoverableReplicaSet()); + + // When + CommandResult commandResult = database.command("dbstats", secondary()); + + // Then + assertThat(commandResult.ok(), is(true)); + assertThat((String) commandResult.get("serverUsed"), not(containsString(":27017"))); + } + + @Test + public void shouldRunCommandAgainstSecondaryWhenOnlySecondaryReadPreferenceSpecifiedAlongWithEncoder() { + assumeTrue(isDiscoverableReplicaSet()); + + // When + CommandResult commandResult = database.command(new BasicDBObject("dbstats", 1), secondary(), DefaultDBEncoder.FACTORY.create()); + + // Then + assertThat(commandResult.ok(), is(true)); + assertThat((String) commandResult.get("serverUsed"), not(containsString(":27017"))); + } + + @Test + public void shouldApplyUuidRepresentationToCommandEncodingAndDecoding() { + try (MongoClient client = new MongoClient(getMongoClientSettingsBuilder() + .uuidRepresentation(UuidRepresentation.STANDARD) + .build())) { + // given + UUID id = UUID.randomUUID(); + DB db = client.getDB(getDefaultDatabaseName()); + db.getCollection(collectionName).insert(new BasicDBObject("_id", id)); + + // when + DBObject reply = db.command(new BasicDBObject("findAndModify", collectionName) + .append("query", new BasicDBObject("_id", id)) + .append("remove", true)); + + // then + assertThat((UUID) ((DBObject) reply.get("value")).get("_id"), is(id)); + } + } + + BsonDocument getCollectionInfo(final String collectionName) { + return new ListCollectionsOperation<>(getDefaultDatabaseName(), new BsonDocumentCodec()) + .filter(new BsonDocument("name", new BsonString(collectionName))).execute(getBinding()).next().get(0); + } + + private boolean isCapped(final DBCollection collection) { + return Boolean.TRUE.equals(storageStats(collection).get("capped")); + } + + private DBObject storageStats(final DBCollection collection) { + try (Cursor cursor = collection.aggregate(singletonList( + new BasicDBObject("$collStats", new BasicDBObject("storageStats", new BasicDBObject()))), + AggregationOptions.builder().build())) { + return (DBObject) cursor.next().get("storageStats"); + } + } +} diff --git a/driver-legacy/src/test/functional/com/mongodb/DatabaseTestCase.java b/driver-legacy/src/test/functional/com/mongodb/DatabaseTestCase.java new file mode 100644 index 00000000000..2dc6f79ac31 --- /dev/null +++ b/driver-legacy/src/test/functional/com/mongodb/DatabaseTestCase.java @@ -0,0 +1,56 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb; + +import org.junit.After; +import org.junit.Before; + +import static com.mongodb.Fixture.getDefaultDatabaseName; +import static com.mongodb.Fixture.getMongoClient; +import static com.mongodb.Fixture.getServerSessionPoolInUseCount; + +@SuppressWarnings("deprecation") // This is for testing the old API, so it will use deprecated methods +public class DatabaseTestCase { + //For ease of use and readability, in this specific case we'll allow protected variables + //CHECKSTYLE:OFF + protected DB database; + protected DBCollection collection; + protected String collectionName; + //CHECKSTYLE:ON + + @Before + public void setUp() { + database = getMongoClient().getDB(getDefaultDatabaseName()); + + //create a brand new collection for each test + collectionName = getClass().getName() + System.nanoTime(); + collection = database.getCollection(collectionName); + } + + @After + public void tearDown() { + collection.drop(); + + if (getServerSessionPoolInUseCount() != 0) { + throw new IllegalStateException("Server session in use count is " + getServerSessionPoolInUseCount()); + } + } + + public MongoClient getClient() { + return getMongoClient(); + } +} diff --git a/driver-legacy/src/test/functional/com/mongodb/DefaultDBDecoderTest.java b/driver-legacy/src/test/functional/com/mongodb/DefaultDBDecoderTest.java new file mode 100644 index 00000000000..b42b09109dc --- /dev/null +++ b/driver-legacy/src/test/functional/com/mongodb/DefaultDBDecoderTest.java @@ -0,0 +1,53 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb; + +import org.junit.Test; + +import static org.junit.Assert.assertEquals; + +public class DefaultDBDecoderTest extends DatabaseTestCase { + + @Test + public void testDecodingDBRef() { + DBObject dbObject = new BasicDBObject("r", new DBRef("test", 1)); + byte[] bytes = {37, 0, 0, 0, 3, 114, 0, 29, 0, 0, 0, 2, 36, 114, 101, 102, 0, 5, 0, 0, 0, 116, 101, 115, 116, 0, 16, 36, 105, 100, + 0, 1, 0, 0, 0, 0, 0}; + DBObject o = new DefaultDBDecoder().decode(bytes, collection); + assertEquals(dbObject, o); + } + + @Test + public void testTypeMapping() { + collection.setObjectClass(MyDBObject.class); + collection.setInternalClass("a", AnotherDBObject.class); + byte[] bytes = {20, 0, 0, 0, 3, 97, 0, 12, 0, 0, 0, 16, 105, 0, 1, 0, 0, 0, 0, 0}; + DBObject object = new DefaultDBDecoder().decode(bytes, collection); + assertEquals(MyDBObject.class, object.getClass()); + assertEquals(AnotherDBObject.class, object.get("a").getClass()); + } + + @SuppressWarnings("serial") + public static class MyDBObject extends BasicDBObject { + + } + + @SuppressWarnings("serial") + public static class AnotherDBObject extends BasicDBObject { + + } +} diff --git a/driver-legacy/src/test/functional/com/mongodb/ExplicitUuidCodecUuidRepresentationTest.java b/driver-legacy/src/test/functional/com/mongodb/ExplicitUuidCodecUuidRepresentationTest.java new file mode 100644 index 00000000000..2c9b5a7e56f --- /dev/null +++ b/driver-legacy/src/test/functional/com/mongodb/ExplicitUuidCodecUuidRepresentationTest.java @@ -0,0 +1,59 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb; + +import com.mongodb.client.AbstractExplicitUuidCodecUuidRepresentationTest; +import com.mongodb.client.MongoDatabase; +import org.bson.BsonBinarySubType; +import org.bson.UuidRepresentation; +import org.bson.codecs.UuidCodec; +import org.bson.codecs.configuration.CodecRegistry; +import org.junit.After; + +import java.util.UUID; + +public class ExplicitUuidCodecUuidRepresentationTest extends AbstractExplicitUuidCodecUuidRepresentationTest { + private MongoClient mongoClient; + + public ExplicitUuidCodecUuidRepresentationTest(final UuidRepresentation uuidRepresentationForClient, + final UuidRepresentation uuidRepresentationForExplicitEncoding, + final BsonBinarySubType subType, + final UuidCodec uuidCodec, final UUID uuid, final byte[] encodedValue, + final byte[] standardEncodedValue) { + super(uuidRepresentationForClient, uuidRepresentationForExplicitEncoding, subType, uuidCodec, uuid, encodedValue, + standardEncodedValue); + } + + @Override + protected void createMongoClient(final UuidRepresentation uuidRepresentation, final CodecRegistry codecRegistry) { + mongoClient = new com.mongodb.MongoClient(Fixture.getMongoClientURI(MongoClientOptions.builder(Fixture.getOptions()) + .uuidRepresentation(uuidRepresentation) + .codecRegistry(codecRegistry))); + } + + @Override + protected MongoDatabase getDatabase(final String databaseName) { + return mongoClient.getDatabase(databaseName); + } + + @After + public void cleanUp() { + if (mongoClient != null) { + mongoClient.close(); + } + } +} diff --git a/driver-legacy/src/test/functional/com/mongodb/Fixture.java b/driver-legacy/src/test/functional/com/mongodb/Fixture.java new file mode 100644 index 00000000000..53c92a2b445 --- /dev/null +++ b/driver-legacy/src/test/functional/com/mongodb/Fixture.java @@ -0,0 +1,108 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb; + +import com.mongodb.connection.ServerDescription; +import org.bson.UuidRepresentation; + +import java.util.List; + +import static com.mongodb.ClusterFixture.getClusterDescription; +import static com.mongodb.ClusterFixture.getServerApi; +import static com.mongodb.internal.connection.ClusterDescriptionHelper.getPrimaries; + +/** + * Helper class for the acceptance tests. + */ +public final class Fixture { + private static final String DEFAULT_DATABASE_NAME = "JavaDriverTest"; + + private static MongoClient mongoClient; + private static MongoClientURI mongoClientURI; + private static DB defaultDatabase; + + private Fixture() { + } + + public static synchronized com.mongodb.MongoClient getMongoClient() { + if (mongoClient == null) { + MongoClientOptions.Builder builder = MongoClientOptions.builder().uuidRepresentation(UuidRepresentation.STANDARD); + if (getServerApi() != null) { + builder.serverApi(getServerApi()); + } + MongoClientURI mongoURI = new MongoClientURI(getMongoClientURIString(), builder); + mongoClient = new MongoClient(mongoURI); + Runtime.getRuntime().addShutdownHook(new ShutdownHook()); + } + return mongoClient; + } + + public static long getServerSessionPoolInUseCount() { + return getMongoClient().getServerSessionPool().getInUseCount(); + } + + public static String getDefaultDatabaseName() { + return DEFAULT_DATABASE_NAME; + } + + static class ShutdownHook extends Thread { + @Override + public void run() { + synchronized (Fixture.class) { + if (mongoClient != null) { + if (defaultDatabase != null) { + defaultDatabase.dropDatabase(); + } + mongoClient.close(); + mongoClient = null; + } + } + } + } + + public static synchronized String getMongoClientURIString() { + return ClusterFixture.getConnectionString().getConnectionString(); + } + + public static synchronized MongoClientURI getMongoClientURI() { + if (mongoClientURI == null) { + mongoClientURI = getMongoClientURI(MongoClientOptions.builder()); + } + return mongoClientURI; + } + + public static synchronized MongoClientURI getMongoClientURI(final MongoClientOptions.Builder builder) { + if (getServerApi() != null) { + builder.serverApi(getServerApi()); + } + return new MongoClientURI(getMongoClientURIString(), builder); + } + + public static MongoClientOptions getOptions() { + return getMongoClientURI().getOptions(); + } + + public static ServerAddress getPrimary() throws InterruptedException { + getMongoClient(); + List serverDescriptions = getPrimaries(getClusterDescription(mongoClient.getCluster())); + while (serverDescriptions.isEmpty()) { + Thread.sleep(100); + serverDescriptions = getPrimaries(getClusterDescription(mongoClient.getCluster())); + } + return serverDescriptions.get(0).getAddress(); + } +} diff --git a/driver-legacy/src/test/functional/com/mongodb/FunctionalSpecification.groovy b/driver-legacy/src/test/functional/com/mongodb/FunctionalSpecification.groovy new file mode 100644 index 00000000000..6ffbb8bcce7 --- /dev/null +++ b/driver-legacy/src/test/functional/com/mongodb/FunctionalSpecification.groovy @@ -0,0 +1,51 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb + +import spock.lang.Specification + +import static com.mongodb.Fixture.getDefaultDatabaseName +import static com.mongodb.Fixture.getMongoClient +import static com.mongodb.Fixture.getServerSessionPoolInUseCount + +class FunctionalSpecification extends Specification { + protected DB database + protected DBCollection collection + + def setup() { + database = getMongoClient().getDB(getDefaultDatabaseName()) + collection = database.getCollection(getClass().getName()) + collection.drop() + } + + def cleanup() { + if (collection != null) { + collection.drop() + } + if (getServerSessionPoolInUseCount() != 0) { + throw new IllegalStateException('Server session in use count is ' + getServerSessionPoolInUseCount()) + } + } + + String getDatabaseName() { + getDefaultDatabaseName() + } + + String getCollectionName() { + collection.getName() + } +} diff --git a/driver-legacy/src/test/functional/com/mongodb/LegacyMixedBulkWriteOperationSpecification.groovy b/driver-legacy/src/test/functional/com/mongodb/LegacyMixedBulkWriteOperationSpecification.groovy new file mode 100644 index 00000000000..42854387e4a --- /dev/null +++ b/driver-legacy/src/test/functional/com/mongodb/LegacyMixedBulkWriteOperationSpecification.groovy @@ -0,0 +1,310 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb + +import com.mongodb.internal.bulk.DeleteRequest +import com.mongodb.internal.bulk.InsertRequest +import com.mongodb.internal.bulk.UpdateRequest +import org.bson.BsonDocument +import org.bson.BsonInt32 +import org.bson.BsonObjectId +import org.bson.Document +import org.bson.codecs.BsonDocumentCodec +import org.bson.codecs.DocumentCodec +import org.bson.types.ObjectId +import spock.lang.IgnoreIf + +import static com.mongodb.ClusterFixture.getBinding +import static com.mongodb.ClusterFixture.getSingleConnectionBinding +import static com.mongodb.ClusterFixture.isDiscoverableReplicaSet +import static com.mongodb.LegacyMixedBulkWriteOperation.createBulkWriteOperationForDelete +import static com.mongodb.LegacyMixedBulkWriteOperation.createBulkWriteOperationForInsert +import static com.mongodb.LegacyMixedBulkWriteOperation.createBulkWriteOperationForReplace +import static com.mongodb.LegacyMixedBulkWriteOperation.createBulkWriteOperationForUpdate +import static com.mongodb.WriteConcern.ACKNOWLEDGED +import static com.mongodb.WriteConcern.UNACKNOWLEDGED +import static com.mongodb.internal.bulk.WriteRequest.Type.REPLACE +import static com.mongodb.internal.bulk.WriteRequest.Type.UPDATE +import static java.util.Arrays.asList + +class LegacyMixedBulkWriteOperationSpecification extends OperationFunctionalSpecification { + + def 'should throw IllegalArgumentException for empty list of requests'() { + when: + createBulkWriteOperationForInsert(getNamespace(), true, ACKNOWLEDGED, true, []) + + then: + thrown(IllegalArgumentException) + } + + def 'should return correct result for insert'() { + given: + def inserts = [new InsertRequest(new BsonDocument('_id', new BsonInt32(1))), + new InsertRequest(new BsonDocument('_id', new BsonInt32(2)))] + def operation = createBulkWriteOperationForInsert(getNamespace(), true, ACKNOWLEDGED, false, inserts) + + when: + def result = execute(operation) + + then: + result.wasAcknowledged() + result.count == 0 + result.upsertedId == null + !result.isUpdateOfExisting() + + inserts*.getDocument() == getCollectionHelper().find(new BsonDocumentCodec()) + } + + def 'should insert a single document'() { + given: + def insert = new InsertRequest(new BsonDocument('_id', new BsonInt32(1))) + def operation = createBulkWriteOperationForInsert(getNamespace(), true, ACKNOWLEDGED, false, asList(insert)) + + when: + execute(operation) + + then: + asList(insert.getDocument()) == getCollectionHelper().find(new BsonDocumentCodec()) + } + + def 'should execute unacknowledged write'() { + given: + def binding = getSingleConnectionBinding() + def operation = createBulkWriteOperationForInsert(getNamespace(), true, UNACKNOWLEDGED, false, + [new InsertRequest(new BsonDocument('_id', new BsonInt32(1))), + new InsertRequest(new BsonDocument('_id', new BsonInt32(2)))]) + + when: + def result = execute(operation, binding) + + then: + !result.wasAcknowledged() + getCollectionHelper().count(binding) == 2 + + cleanup: + binding?.release() + } + + def 'should continue on error when continuing on error'() { + given: + def documents = [ + new InsertRequest(new BsonDocument('_id', new BsonInt32(1))), + new InsertRequest(new BsonDocument('_id', new BsonInt32(1))), + new InsertRequest(new BsonDocument('_id', new BsonInt32(2))), + ] + def operation = createBulkWriteOperationForInsert(getNamespace(), false, ACKNOWLEDGED, false, documents) + + when: + execute(operation) + + then: + thrown(DuplicateKeyException) + getCollectionHelper().count() == 2 + } + + def 'should not continue on error when not continuing on error'() { + given: + def documents = [ + new InsertRequest(new BsonDocument('_id', new BsonInt32(1))), + new InsertRequest(new BsonDocument('_id', new BsonInt32(1))), + new InsertRequest(new BsonDocument('_id', new BsonInt32(2))), + ] + def operation = createBulkWriteOperationForInsert(getNamespace(), true, ACKNOWLEDGED, false, documents) + + when: + execute(operation) + + then: + thrown(DuplicateKeyException) + getCollectionHelper().count() == 1 + } + + @IgnoreIf({ !isDiscoverableReplicaSet() }) + def 'should support retryable writes'() { + given: + def insert = new InsertRequest(new BsonDocument('_id', new BsonInt32(1))) + def operation = createBulkWriteOperationForInsert(getNamespace(), true, ACKNOWLEDGED, true, asList(insert)) + + when: + executeWithSession(operation, false) + + then: + asList(insert.getDocument()) == getCollectionHelper().find(new BsonDocumentCodec()) + } + + def 'should remove a document'() { + given: + getCollectionHelper().insertDocuments(new DocumentCodec(), new Document('_id', 1)) + def operation = createBulkWriteOperationForDelete(getNamespace(), true, ACKNOWLEDGED, false, + [new DeleteRequest(new BsonDocument('_id', new BsonInt32(1)))]) + + when: + def result = execute(operation) + + then: + result.wasAcknowledged() + result.count == 1 + result.upsertedId == null + !result.isUpdateOfExisting() + getCollectionHelper().count() == 0 + } + + def 'should return correct result for replace'() { + given: + def replacement = new UpdateRequest(new BsonDocument(), new BsonDocument('_id', new BsonInt32(1)), REPLACE) + def operation = createBulkWriteOperationForReplace(getNamespace(), true, ACKNOWLEDGED, + false, asList(replacement)) + + when: + def result = execute(operation) + + then: + result.wasAcknowledged() + result.count == 0 + result.upsertedId == null + !result.isUpdateOfExisting() + } + + def 'should replace a single document'() { + given: + def insert = new InsertRequest(new BsonDocument('_id', new BsonInt32(1))) + createBulkWriteOperationForInsert(getNamespace(), true, ACKNOWLEDGED, false, asList(insert)) + .execute(getBinding()) + + def replacement = new UpdateRequest(new BsonDocument('_id', new BsonInt32(1)), + new BsonDocument('_id', new BsonInt32(1)).append('x', new BsonInt32(1)), REPLACE) + def operation = createBulkWriteOperationForReplace(getNamespace(), true, ACKNOWLEDGED, + false, asList(replacement)) + + when: + def result = execute(operation) + + then: + result.wasAcknowledged() + result.count == 1 + result.upsertedId == null + result.isUpdateOfExisting() + asList(replacement.getUpdateValue()) == getCollectionHelper().find(new BsonDocumentCodec()) + getCollectionHelper().find().get(0).keySet().iterator().next() == '_id' + } + + def 'should upsert a single document'() { + given: + def replacement = new UpdateRequest(new BsonDocument('_id', new BsonInt32(1)), + new BsonDocument('_id', new BsonInt32(1)).append('x', new BsonInt32(1)), REPLACE) + .upsert(true) + def operation = createBulkWriteOperationForReplace(getNamespace(), true, ACKNOWLEDGED, + false, asList(replacement)) + + when: + execute(operation) + + then: + asList(replacement.getUpdateValue()) == getCollectionHelper().find(new BsonDocumentCodec()) + } + + def 'should update nothing if no documents match'() { + given: + def operation = createBulkWriteOperationForUpdate(getNamespace(), true, ACKNOWLEDGED, + false, asList(new UpdateRequest(new BsonDocument('x', new BsonInt32(1)), + new BsonDocument('$set', new BsonDocument('y', new BsonInt32(2))), UPDATE).multi(false))) + + when: + WriteConcernResult result = execute(operation) + + then: + result.wasAcknowledged() + result.count == 0 + result.upsertedId == null + !result.isUpdateOfExisting() + getCollectionHelper().count() == 0 + } + + def 'when multi is false should update one matching document'() { + given: + getCollectionHelper().insertDocuments(new DocumentCodec(), + new Document('x', 1), + new Document('x', 1)) + def operation = createBulkWriteOperationForUpdate(getNamespace(), true, ACKNOWLEDGED, false, + asList(new UpdateRequest(new BsonDocument('x', new BsonInt32(1)), + new BsonDocument('$set', new BsonDocument('y', new BsonInt32(2))), UPDATE).multi(false))) + + when: + WriteConcernResult result = execute(operation) + + then: + result.wasAcknowledged() + result.count == 1 + result.upsertedId == null + result.isUpdateOfExisting() + getCollectionHelper().count(new Document('y', 2)) == 1 + } + + def 'when multi is true should update all matching documents'() { + given: + getCollectionHelper().insertDocuments(new DocumentCodec(), + new Document('x', 1), + new Document('x', 1)) + def operation = createBulkWriteOperationForUpdate(getNamespace(), true, ACKNOWLEDGED, false, + asList(new UpdateRequest(new BsonDocument('x', new BsonInt32(1)), + new BsonDocument('$set', new BsonDocument('y', new BsonInt32(2))), UPDATE).multi(true))) + + when: + WriteConcernResult result = execute(operation) + + then: + result.wasAcknowledged() + result.count == 2 + result.upsertedId == null + result.isUpdateOfExisting() + getCollectionHelper().count(new Document('y', 2)) == 2 + } + + def 'when upsert is true should insert a document if there are no matching documents'() { + given: + def operation = createBulkWriteOperationForUpdate(getNamespace(), true, ACKNOWLEDGED, false, + asList(new UpdateRequest(new BsonDocument('_id', new BsonInt32(1)), + new BsonDocument('$set', new BsonDocument('y', new BsonInt32(2))), UPDATE).upsert(true))) + + when: + WriteConcernResult result = execute(operation) + + then: + result.wasAcknowledged() + result.count == 1 + result.upsertedId == new BsonInt32(1) + !result.isUpdateOfExisting() + getCollectionHelper().count(new Document('y', 2)) == 1 + } + + def 'should return correct result for upsert'() { + given: + def id = new ObjectId() + def operation = createBulkWriteOperationForUpdate(getNamespace(), true, ACKNOWLEDGED, false, + asList(new UpdateRequest(new BsonDocument('_id', new BsonObjectId(id)), + new BsonDocument('$set', new BsonDocument('x', new BsonInt32(1))), UPDATE).upsert(true))) + + when: + WriteConcernResult result = execute(operation) + + then: + result.wasAcknowledged() + result.count == 1 + result.upsertedId == new BsonObjectId(id) + !result.isUpdateOfExisting() + } +} diff --git a/driver-legacy/src/test/functional/com/mongodb/MapReduceOutputSpecification.groovy b/driver-legacy/src/test/functional/com/mongodb/MapReduceOutputSpecification.groovy new file mode 100644 index 00000000000..08e21d79306 --- /dev/null +++ b/driver-legacy/src/test/functional/com/mongodb/MapReduceOutputSpecification.groovy @@ -0,0 +1,158 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb + +import com.mongodb.internal.operation.MapReduceBatchCursor +import com.mongodb.internal.operation.MapReduceStatistics +import spock.lang.Subject + +@SuppressWarnings('deprecated') +class MapReduceOutputSpecification extends FunctionalSpecification { + //example response: +// CommandResult{ +// address=localhost:27017, +// response={ 'result' : { 'db':'output-1383912431569888000', +// 'collection' : 'jmr1_out' +// }, +// 'timeMillis' : 2774, +// 'timing' : { 'mapTime' : 0, +// 'emitLoop' : 2755, +// 'reduceTime' : 15, +// 'mode' : 'mixed', +// 'total' : 2774 }, +// 'counts' : { 'input' : 3, +// 'emit' : 6, +// 'reduce' : 2, +// 'output' : 4 }, +// 'ok' : 1.0 }, +// elapsedNanoseconds=2777341000} + + + def 'should return the name of the collection the results are contained in if it is not inline'() { + given: + def expectedCollectionName = 'collectionForResults' + def outputCollection = database.getCollection(expectedCollectionName) + def results = outputCollection.find() + + @Subject + def mapReduceOutput = new MapReduceOutput(new BasicDBObject(), results, null, outputCollection) + + when: + def collectionName = mapReduceOutput.getCollectionName() + + then: + collectionName != null + collectionName == expectedCollectionName + } + + def 'should return null for the name of the collection if it is inline'() { + given: + MapReduceBatchCursor mongoCursor = Mock() + + @Subject + def mapReduceOutput = new MapReduceOutput(new BasicDBObject(), mongoCursor) + + when: + def collectionName = mapReduceOutput.getCollectionName() + + then: + collectionName == null + } + + def 'should return the name of the database the results are contained in if it is not inline'() { + given: + def expectedDatabaseName = databaseName + def expectedCollectionName = 'collectionForResults' + def outputCollection = database.getCollection(expectedCollectionName) + def results = outputCollection.find() + + @Subject + def mapReduceOutput = new MapReduceOutput(new BasicDBObject(), results, null, outputCollection) + + when: + def databaseName = mapReduceOutput.getDatabaseName() + + then: + databaseName != null + databaseName == expectedDatabaseName + } + + def 'should return the duration for a map-reduce into a collection'() { + given: + def expectedDuration = 2774 + + MapReduceStatistics mapReduceStats = Mock() + mapReduceStats.getDuration() >> expectedDuration + + @Subject + def mapReduceOutput = new MapReduceOutput(new BasicDBObject(), null, mapReduceStats, null) + + expect: + mapReduceOutput.getDuration() == expectedDuration + } + + def 'should return the duration for an inline map-reduce'() { + given: + def expectedDuration = 2774 + + MapReduceBatchCursor mongoCursor = Mock() + mongoCursor.getStatistics() >> new MapReduceStatistics(5, 10, 5, expectedDuration) + + @Subject + def mapReduceOutput = new MapReduceOutput(new BasicDBObject(), mongoCursor) + + expect: + mapReduceOutput.getDuration() == expectedDuration + } + + def 'should return the count values for a map-reduce into a collection'() { + given: + def expectedInputCount = 3 + def expectedOutputCount = 4 + def expectedEmitCount = 6 + + MapReduceStatistics mapReduceStats = new MapReduceStatistics(expectedInputCount, expectedOutputCount, expectedEmitCount, 5) + + @Subject + def mapReduceOutput = new MapReduceOutput(new BasicDBObject(), null, mapReduceStats, null) + + expect: + mapReduceOutput.getInputCount() == expectedInputCount + mapReduceOutput.getOutputCount() == expectedOutputCount + mapReduceOutput.getEmitCount() == expectedEmitCount + } + + def 'should return the count values for an inline map-reduce output'() { + given: + def expectedInputCount = 3 + def expectedOutputCount = 4 + def expectedEmitCount = 6 + def expectedDuration = 10 + + MapReduceBatchCursor mapReduceCursor = Mock() + mapReduceCursor.getStatistics() >> new MapReduceStatistics(expectedInputCount, expectedOutputCount, expectedEmitCount, + expectedDuration) + + @Subject + def mapReduceOutput = new MapReduceOutput(new BasicDBObject(), mapReduceCursor) + + expect: + mapReduceOutput.getInputCount() == expectedInputCount + mapReduceOutput.getOutputCount() == expectedOutputCount + mapReduceOutput.getEmitCount() == expectedEmitCount + } +} diff --git a/driver-legacy/src/test/functional/com/mongodb/MapReduceTest.java b/driver-legacy/src/test/functional/com/mongodb/MapReduceTest.java new file mode 100644 index 00000000000..f10a2fd6e93 --- /dev/null +++ b/driver-legacy/src/test/functional/com/mongodb/MapReduceTest.java @@ -0,0 +1,366 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb; + +import org.junit.AfterClass; +import org.junit.Ignore; +import org.junit.Test; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import static com.mongodb.ClusterFixture.disableMaxTimeFailPoint; +import static com.mongodb.ClusterFixture.enableMaxTimeFailPoint; +import static com.mongodb.ClusterFixture.isDiscoverableReplicaSet; +import static com.mongodb.ClusterFixture.isSharded; +import static com.mongodb.ClusterFixture.serverVersionLessThan; +import static com.mongodb.DBObjectMatchers.hasFields; +import static com.mongodb.DBObjectMatchers.hasSubdocument; +import static java.util.concurrent.TimeUnit.SECONDS; +import static org.hamcrest.CoreMatchers.allOf; +import static org.hamcrest.CoreMatchers.everyItem; +import static org.hamcrest.CoreMatchers.hasItem; +import static org.hamcrest.CoreMatchers.is; +import static org.hamcrest.CoreMatchers.isA; +import static org.hamcrest.CoreMatchers.not; +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; +import static org.junit.Assume.assumeThat; + +@SuppressWarnings("deprecation") +public class MapReduceTest extends DatabaseTestCase { + + private static final String MR_DATABASE = "output-" + System.nanoTime(); + private static final String DEFAULT_COLLECTION = "jmr1_out"; + private static final String DEFAULT_MAP = "function(){ for ( var i=0; i map = new HashMap<>(); + for (final DBObject r : output.results()) { + map.put(r.get("_id").toString(), ((Number) (r.get("value"))).intValue()); + } + + assertEquals(4, map.size()); + assertEquals(1, map.get("a").intValue()); + assertEquals(2, map.get("b").intValue()); + assertEquals(2, map.get("c").intValue()); + assertEquals(1, map.get("d").intValue()); + } + + @Test + public void testMapReduceWithOutputToAnotherDatabase() { + MapReduceCommand command = new MapReduceCommand(collection, + DEFAULT_MAP, + DEFAULT_REDUCE, + DEFAULT_COLLECTION, + MapReduceCommand.OutputType.REPLACE, + new BasicDBObject()); + command.setOutputDB(MR_DATABASE); + getClient().getDatabase(MR_DATABASE).createCollection(DEFAULT_COLLECTION); + MapReduceOutput output = collection.mapReduce(command); + + @SuppressWarnings("deprecation") + DB db = database.getMongoClient().getDB(MR_DATABASE); + assertTrue(db.collectionExists(DEFAULT_COLLECTION)); + assertEquals(toList(output.results()), toList(db.getCollection(DEFAULT_COLLECTION).find())); + } + + + @Test + public void testMapReduceInlineWScope() { + MapReduceCommand command = new MapReduceCommand(collection, + "function(){ for (var i=0; i scope = new HashMap<>(); + scope.put("exclude", "a"); + command.setScope(scope); + + List resultsAsList = toList(collection.mapReduce(command).results()); + + assertThat(resultsAsList, not(hasItem(hasSubdocument(new BasicDBObject("_id", "a"))))); + assertThat(resultsAsList, hasItem(hasSubdocument(new BasicDBObject("_id", "b")))); + } + + @Test + public void testOutputCollection() { + String anotherCollectionName = "anotherCollection" + System.nanoTime(); + MapReduceOutput output = collection.mapReduce(DEFAULT_MAP, + DEFAULT_REDUCE, + anotherCollectionName, null); + + assertEquals(database.getCollection(anotherCollectionName).getFullName(), output.getOutputCollection().getFullName()); + assertTrue(database.collectionExists(anotherCollectionName)); + + output.drop(); + + assertFalse(database.collectionExists(anotherCollectionName)); + } + + + @Test + public void testOutputTypeMerge() { + + database.getCollection(DEFAULT_COLLECTION).insert(new BasicDBObject("z", 10)); + + MapReduceOutput output = collection.mapReduce(DEFAULT_MAP, + DEFAULT_REDUCE, + DEFAULT_COLLECTION, + MapReduceCommand.OutputType.MERGE, + null); + + List documents = toList(output.results()); + + assertThat(documents, hasItem(hasSubdocument(new BasicDBObject("z", 10)))); + assertThat(documents, hasItem(hasSubdocument(new BasicDBObject("_id", "a").append("value", 1.0)))); + } + + @Test + public void testOutputTypeReduce() { + //TODO: what exactly is this testing? + collection.mapReduce(DEFAULT_MAP, + DEFAULT_REDUCE, + DEFAULT_COLLECTION, + MapReduceCommand.OutputType.REDUCE, + null); + } + + @Test + public void testMapReduceWithFinalize() { + MapReduceCommand command = new MapReduceCommand(collection, + DEFAULT_MAP, + DEFAULT_REDUCE, + DEFAULT_COLLECTION, + MapReduceCommand.OutputType.REPLACE, new BasicDBObject() + ); + command.setFinalize("function(key,reducedValue){ return reducedValue*5; }"); + + List output = toList(collection.mapReduce(command).results()); + + assertThat(output, hasItem(hasSubdocument(new BasicDBObject("_id", "b").append("value", 10.0)))); + } + + @Test + public void testMapReduceWithQuery() { + MapReduceCommand command = new MapReduceCommand(collection, + DEFAULT_MAP, + DEFAULT_REDUCE, + DEFAULT_COLLECTION, + MapReduceCommand.OutputType.REPLACE, + new BasicDBObject("x", "a")); + + MapReduceOutput output = collection.mapReduce(command); + + Map map = toMap(output.results()); + assertEquals(2, map.size()); + assertEquals(1.0, map.get("a")); + assertEquals(1.0, map.get("b")); + } + + @Test + @Ignore("Not sure about the behavior of sort") + public void testMapReduceWithSort() { + collection.createIndex(new BasicDBObject("s", 1)); + + MapReduceCommand command = new MapReduceCommand(collection, + DEFAULT_MAP, + DEFAULT_REDUCE, + DEFAULT_COLLECTION, + MapReduceCommand.OutputType.REPLACE, + new BasicDBObject("x", "a")); + + command.setSort(new BasicDBObject("s", -1)); + command.setLimit(1); + + MapReduceOutput output = collection.mapReduce(command); + + Map map = toMap(output.results()); + assertEquals(2, map.size()); + assertEquals(1.0, map.get("c")); + assertEquals(1.0, map.get("d")); + } + + @Test + public void testMapReduceWithLimit() { + MapReduceCommand command = new MapReduceCommand(collection, + DEFAULT_MAP, + DEFAULT_REDUCE, + DEFAULT_COLLECTION, + MapReduceCommand.OutputType.INLINE, + new BasicDBObject()); + + command.setLimit(1); + + MapReduceOutput output = collection.mapReduce(command); + + Map map = toMap(output.results()); + assertEquals(2, map.size()); + assertEquals(1.0, map.get("a")); + assertEquals(1.0, map.get("b")); + } + + @Test + public void shouldReturnStatisticsForInlineMapReduce() { + MapReduceCommand command = new MapReduceCommand(collection, + DEFAULT_MAP, + DEFAULT_REDUCE, + DEFAULT_COLLECTION, + MapReduceCommand.OutputType.INLINE, + new BasicDBObject()); + + //when + MapReduceOutput output = collection.mapReduce(command); + + //then + if (serverVersionLessThan(4, 4)) { + assertThat(output.getEmitCount(), is(6)); + assertThat(output.getInputCount(), is(3)); + assertThat(output.getOutputCount(), is(4)); + } else { + assertThat(output.getEmitCount(), is(0)); + assertThat(output.getInputCount(), is(0)); + assertThat(output.getOutputCount(), is(0)); + } + } + + @Test + public void shouldReturnStatisticsForMapReduceIntoACollection() { + MapReduceCommand command = new MapReduceCommand(collection, + DEFAULT_MAP, + DEFAULT_REDUCE, + DEFAULT_COLLECTION, + MapReduceCommand.OutputType.REPLACE, + new BasicDBObject()); + + //when + MapReduceOutput output = collection.mapReduce(command); + + //then + if (serverVersionLessThan(4, 4)) { + assertThat(output.getDuration(), is(greaterThanOrEqualTo(0))); + assertThat(output.getEmitCount(), is(6)); + assertThat(output.getInputCount(), is(3)); + assertThat(output.getOutputCount(), is(4)); + } else { + assertThat(output.getDuration(), is(0)); + assertThat(output.getEmitCount(), is(0)); + assertThat(output.getInputCount(), is(0)); + assertThat(output.getOutputCount(), is(0)); + } + } + + + //TODO: test read preferences - always go to primary for non-inline. Presumably do whatever if inline + + private List toList(final Iterable results) { + List resultsAsList = new ArrayList<>(); + for (final DBObject result : results) { + resultsAsList.add(result); + } + return resultsAsList; + } + + private Map toMap(final Iterable result) { + Map map = new HashMap<>(); + for (final DBObject document : result) { + map.put((String) document.get("_id"), document.get("value")); + } + return map; + } +} diff --git a/driver-legacy/src/test/functional/com/mongodb/MongoClientListenerRegistrationSpecification.groovy b/driver-legacy/src/test/functional/com/mongodb/MongoClientListenerRegistrationSpecification.groovy new file mode 100644 index 00000000000..46e40ed87aa --- /dev/null +++ b/driver-legacy/src/test/functional/com/mongodb/MongoClientListenerRegistrationSpecification.groovy @@ -0,0 +1,185 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb + +import com.mongodb.event.ClusterListener +import com.mongodb.event.CommandListener +import com.mongodb.event.ConnectionPoolListener +import com.mongodb.event.ServerListener +import com.mongodb.event.ServerMonitorListener +import org.bson.Document + +import java.util.concurrent.CountDownLatch +import java.util.concurrent.TimeUnit + +import static Fixture.mongoClientURI + +class MongoClientListenerRegistrationSpecification extends FunctionalSpecification { + + def 'should register event listeners'() { + given: + def clusterListener = Mock(ClusterListener) { + (1.._) * _ + } + def commandListener = Mock(CommandListener) { + (1.._) * _ + } + def connectionPoolListener = Mock(ConnectionPoolListener) { + (1.._) * _ + } + def serverListener = Mock(ServerListener) { + (1.._) * _ + } + when: + def optionBuilder = MongoClientOptions.builder(mongoClientURI.options) + .addClusterListener(clusterListener) + .addCommandListener(commandListener) + .addConnectionPoolListener(connectionPoolListener) + .addServerListener(serverListener) + def client = new MongoClient(getMongoClientURI(optionBuilder)) + + then: + client.getDatabase('admin').runCommand(new Document('ping', 1)) + + cleanup: + client?.close() + } + + def 'should register single command listener'() { + given: + def first = Mock(CommandListener) + def optionsBuilder = MongoClientOptions.builder(mongoClientURI.options) + .addCommandListener(first) + def client = new MongoClient(getMongoClientURI(optionsBuilder)) + + when: + client.getDatabase('admin').runCommand(new Document('ping', 1)) + + then: + 1 * first.commandStarted(_) + 1 * first.commandSucceeded(_) + + cleanup: + client?.close() + } + + def 'should register multiple command listeners'() { + given: + def first = Mock(CommandListener) + def second = Mock(CommandListener) + def optionsBuilder = MongoClientOptions.builder(mongoClientURI.options) + .addCommandListener(first) + .addCommandListener(second) + def client = new MongoClient(getMongoClientURI(optionsBuilder)) + + when: + client.getDatabase('admin').runCommand(new Document('ping', 1)) + + then: + 1 * first.commandStarted(_) + 1 * second.commandStarted(_) + 1 * first.commandSucceeded(_) + 1 * second.commandSucceeded(_) + + cleanup: + client?.close() + } + + def 'should register single listeners for monitor events'() { + given: + def latch = new CountDownLatch(1) + def clusterListener = Mock(ClusterListener) { + 1 * clusterOpening(_) + } + def serverListener = Mock(ServerListener) { + (1.._) * serverOpening(_) + } + def serverMonitorListener = Mock(ServerMonitorListener) { + (1.._) * serverHearbeatStarted(_) >> { + if (latch.count > 0) { + latch.countDown() + } + } + } + + def optionsBuilder = MongoClientOptions.builder(mongoClientURI.options) + .heartbeatFrequency(1) + .addClusterListener(clusterListener) + .addServerListener(serverListener) + .addServerMonitorListener(serverMonitorListener) + def client = new MongoClient(getMongoClientURI(optionsBuilder)) + + when: + def finished = latch.await(5, TimeUnit.SECONDS) + + then: + finished + + cleanup: + client?.close() + } + + def 'should register multiple listeners for monitor events'() { + given: + def latch = new CountDownLatch(2) + def clusterListener = Mock(ClusterListener) { + 1 * clusterOpening(_) + } + def serverListener = Mock(ServerListener) { + (1.._) * serverOpening(_) + } + def serverMonitorListener = Mock(ServerMonitorListener) { + (1.._) * serverHearbeatStarted(_) >> { + if (latch.count > 0) { + latch.countDown() + } + } + } + def clusterListenerTwo = Mock(ClusterListener) { + 1 * clusterOpening(_) + } + def serverListenerTwo = Mock(ServerListener) { + (1.._) * serverOpening(_) + } + def serverMonitorListenerTwo = Mock(ServerMonitorListener) { + (1.._) * serverHearbeatStarted(_) >> { + if (latch.count > 0) { + latch.countDown() + } + } + } + + def optionsBuilder = MongoClientOptions.builder(mongoClientURI.options) + .heartbeatFrequency(1) + .addClusterListener(clusterListener) + .addServerListener(serverListener) + .addServerMonitorListener(serverMonitorListener) + .addClusterListener(clusterListenerTwo) + .addServerListener(serverListenerTwo) + .addServerMonitorListener(serverMonitorListenerTwo) + def client = new MongoClient(getMongoClientURI(optionsBuilder)) + + when: + def finished = latch.await(5, TimeUnit.SECONDS) + + then: + finished + + cleanup: + client?.close() + } +} diff --git a/driver-legacy/src/test/functional/com/mongodb/MongoClientSessionSpecification.groovy b/driver-legacy/src/test/functional/com/mongodb/MongoClientSessionSpecification.groovy new file mode 100644 index 00000000000..333a9b95bd8 --- /dev/null +++ b/driver-legacy/src/test/functional/com/mongodb/MongoClientSessionSpecification.groovy @@ -0,0 +1,361 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb + +import com.mongodb.client.MongoCollection +import com.mongodb.client.model.Filters +import com.mongodb.event.CommandStartedEvent +import com.mongodb.internal.connection.TestCommandListener +import com.mongodb.spock.Slow +import org.bson.BsonBinarySubType +import org.bson.BsonDocument +import org.bson.BsonInt32 +import org.bson.BsonTimestamp +import org.bson.Document +import org.bson.types.ObjectId +import org.junit.Assert +import spock.lang.IgnoreIf + +import java.util.concurrent.TimeUnit + +import static Fixture.getDefaultDatabaseName +import static Fixture.getMongoClientURI +import static com.mongodb.ClusterFixture.isDiscoverableReplicaSet +import static com.mongodb.Fixture.getMongoClient +import static com.mongodb.Fixture.getOptions + +class MongoClientSessionSpecification extends FunctionalSpecification { + + def 'should throw IllegalArgumentException if options are null'() { + when: + getMongoClient().startSession(null) + + then: + thrown(IllegalArgumentException) + } + + def 'should create session with correct defaults'() { + given: + def clientSession = getMongoClient().startSession() + + expect: + clientSession.getOriginator() == getMongoClient().getDelegate() + clientSession.isCausallyConsistent() + clientSession.getOptions() == ClientSessionOptions.builder() + .defaultTransactionOptions(TransactionOptions.builder() + .readConcern(ReadConcern.DEFAULT) + .writeConcern(WriteConcern.ACKNOWLEDGED) + .readPreference(ReadPreference.primary()) + .build()) + .build() + clientSession.getClusterTime() == null + clientSession.getOperationTime() == null + clientSession.getServerSession() != null + + cleanup: + clientSession?.close() + } + + def 'cluster time should advance'() { + given: + def firstOperationTime = new BsonTimestamp(42, 1) + def secondOperationTime = new BsonTimestamp(52, 1) + def thirdOperationTime = new BsonTimestamp(22, 1) + def firstClusterTime = new BsonDocument('clusterTime', firstOperationTime) + def secondClusterTime = new BsonDocument('clusterTime', secondOperationTime) + def olderClusterTime = new BsonDocument('clusterTime', thirdOperationTime) + + when: + def clientSession = getMongoClient().startSession(ClientSessionOptions.builder().build()) + + then: + clientSession.getClusterTime() == null + + when: + clientSession.advanceClusterTime(null) + + then: + clientSession.getClusterTime() == null + + when: + clientSession.advanceClusterTime(firstClusterTime) + + then: + clientSession.getClusterTime() == firstClusterTime + + when: + clientSession.advanceClusterTime(secondClusterTime) + + then: + clientSession.getClusterTime() == secondClusterTime + + when: + clientSession.advanceClusterTime(olderClusterTime) + + then: + clientSession.getClusterTime() == secondClusterTime + + cleanup: + clientSession?.close() + } + + def 'operation time should advance'() { + given: + def firstOperationTime = new BsonTimestamp(42, 1) + def secondOperationTime = new BsonTimestamp(52, 1) + def olderOperationTime = new BsonTimestamp(22, 1) + + when: + def clientSession = getMongoClient().startSession(ClientSessionOptions.builder().build()) + + then: + clientSession.getOperationTime() == null + + when: + clientSession.advanceOperationTime(null) + + then: + clientSession.getOperationTime() == null + + when: + clientSession.advanceOperationTime(firstOperationTime) + + then: + clientSession.getOperationTime() == firstOperationTime + + when: + clientSession.advanceOperationTime(secondOperationTime) + + then: + clientSession.getOperationTime() == secondOperationTime + + when: + clientSession.advanceOperationTime(olderOperationTime) + + then: + clientSession.getOperationTime() == secondOperationTime + + cleanup: + clientSession?.close() + } + + def 'methods that use the session should throw if the session is closed'() { + given: + def options = ClientSessionOptions.builder().build() + def clientSession = getMongoClient().startSession(options) + clientSession.close() + + when: + clientSession.getServerSession() + + then: + thrown(IllegalStateException) + + when: + clientSession.advanceOperationTime(new BsonTimestamp(42, 0)) + + then: + thrown(IllegalStateException) + + when: + clientSession.advanceClusterTime(new BsonDocument()) + + then: + thrown(IllegalStateException) + + cleanup: + clientSession?.close() + } + + def 'informational methods should not throw if the session is closed'() { + given: + def options = ClientSessionOptions.builder().build() + def clientSession = getMongoClient().startSession(options) + clientSession.close() + + when: + clientSession.getOptions() + clientSession.isCausallyConsistent() + clientSession.getClusterTime() + clientSession.getOperationTime() + + then: + noExceptionThrown() + + cleanup: + clientSession?.close() + } + + def 'should apply causally consistent session option to client session'() { + when: + def clientSession = getMongoClient().startSession(ClientSessionOptions.builder() + .causallyConsistent(causallyConsistent) + .build()) + + then: + clientSession != null + clientSession.isCausallyConsistent() == causallyConsistent + + cleanup: + clientSession?.close() + + where: + causallyConsistent << [true, false] + } + + def 'client session should have server session with valid identifier'() { + given: + def clientSession = getMongoClient().startSession(ClientSessionOptions.builder().build()) + + when: + def identifier = clientSession.getServerSession().identifier + + then: + identifier.size() == 1 + identifier.containsKey('id') + identifier.get('id').isBinary() + identifier.getBinary('id').getType() == BsonBinarySubType.UUID_STANDARD.value + identifier.getBinary('id').data.length == 16 + + cleanup: + clientSession?.close() + } + + def 'should use a default session'() { + given: + def commandListener = new TestCommandListener() + def optionsBuilder = MongoClientOptions.builder(getOptions()) + .addCommandListener(commandListener) + def client = new MongoClient(getMongoClientURI(optionsBuilder)) + + when: + client.getDatabase('admin').runCommand(new BsonDocument('ping', new BsonInt32(1))) + + then: + commandListener.events.size() == 2 + def pingCommandStartedEvent = commandListener.events.get(0) as CommandStartedEvent + pingCommandStartedEvent.command.containsKey('lsid') + + cleanup: + client?.close() + } + + // This test attempts attempts to demonstrate that causal consistency works correctly by inserting a document and then immediately + // searching for that document on a secondary by its _id and failing the test if the document is not found. Without causal consistency + // enabled the expectation is that eventually that test would fail since generally the find will execute on the secondary before + // the secondary has a chance to replicate the document. + // This test is inherently racy as it's possible that the server _does_ replicate fast enough and therefore the test passes anyway + // even if causal consistency was not actually in effect. For that reason the test iterates a number of times in order to increase + // confidence that it's really causal consistency that is causing the test to succeed + @Slow + def 'should find inserted document on a secondary when causal consistency is enabled'() { + given: + def collection = getMongoClient().getDatabase(getDefaultDatabaseName()).getCollection(getCollectionName()) + + expect: + def clientSession = getMongoClient().startSession(ClientSessionOptions.builder() + .causallyConsistent(true) + .build()) + try { + for (int i = 0; i < 16; i++) { + Document document = new Document('_id', i) + collection.insertOne(clientSession, document) + Document foundDocument = collection + .withReadPreference(ReadPreference.secondaryPreferred()) // read from secondary if available + .withReadConcern(readConcern) + .find(clientSession, document) + .maxTime(30, TimeUnit.SECONDS) // to avoid the test running forever in case replication is broken + .first() + if (foundDocument == null) { + Assert.fail('Should have found recently inserted document on secondary with causal consistency enabled') + } + } + } finally { + clientSession.close() + } + + where: + readConcern << [ReadConcern.DEFAULT, ReadConcern.LOCAL, ReadConcern.MAJORITY] + } + + + def 'should not use an implicit session for an unacknowledged write'() { + given: + def commandListener = new TestCommandListener() + def optionsBuilder = MongoClientOptions.builder(getOptions()) + .addCommandListener(commandListener) + def mongoClientURI = getMongoClientURI(optionsBuilder) + def client = new MongoClient(mongoClientURI) + def collection = client.getDatabase(getDatabaseName()).getCollection(getCollectionName()) + def id = new ObjectId() + + when: + collection.withWriteConcern(WriteConcern.UNACKNOWLEDGED).insertOne(new Document('_id', id)) + + then: + def insertEvent = commandListener.events.get(0) as CommandStartedEvent + !insertEvent.command.containsKey('lsid') + + cleanup: + waitForInsertAcknowledgement(collection, id) + client?.close() + } + + def 'should throw exception if unacknowledged write used with explicit session'() { + given: + def session = getMongoClient().startSession() + + when: + getMongoClient().getDatabase(getDatabaseName()).getCollection(getCollectionName()) + .withWriteConcern(WriteConcern.UNACKNOWLEDGED) + .insertOne(session, new Document()) + + then: + thrown(MongoClientException) + + cleanup: + session?.close() + } + + @IgnoreIf({ !isDiscoverableReplicaSet() }) + def 'should ignore unacknowledged write concern when in a transaction'() { + given: + def collection = getMongoClient().getDatabase(getDatabaseName()).getCollection(getCollectionName()) + collection.insertOne(new Document()) + + def session = getMongoClient().startSession() + session.startTransaction() + + when: + collection.withWriteConcern(WriteConcern.UNACKNOWLEDGED) + .insertOne(session, new Document()) + + then: + noExceptionThrown() + + cleanup: + session.close() + } + + void waitForInsertAcknowledgement(MongoCollection collection, ObjectId id) { + Document document = collection.find(Filters.eq(id)).first() + while (document == null) { + Thread.sleep(1) + document = collection.find(Filters.eq(id)).first() + } + } +} diff --git a/driver-legacy/src/test/functional/com/mongodb/MongoClientsSpecification.groovy b/driver-legacy/src/test/functional/com/mongodb/MongoClientsSpecification.groovy new file mode 100644 index 00000000000..307f8e9440c --- /dev/null +++ b/driver-legacy/src/test/functional/com/mongodb/MongoClientsSpecification.groovy @@ -0,0 +1,81 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb + +import com.mongodb.connection.ClusterDescription +import com.mongodb.connection.ServerDescription +import com.mongodb.event.CommandFailedEvent +import com.mongodb.event.CommandListener +import com.mongodb.event.CommandStartedEvent +import com.mongodb.event.CommandSucceededEvent +import spock.lang.IgnoreIf + +import static Fixture.getDefaultDatabaseName +import static Fixture.getMongoClientURI +import static com.mongodb.ClusterFixture.isDiscoverableReplicaSet +import static com.mongodb.Fixture.getOptions + +class MongoClientsSpecification extends FunctionalSpecification { + + @IgnoreIf({ !isDiscoverableReplicaSet() }) + def 'should use server selector from MongoClientOptions'() { + given: + def expectedWinningAddresses = [] as Set + def actualWinningAddresses = [] as Set + def optionsBuilder = MongoClientOptions.builder(getOptions()) + // select the suitable server with the highest port number + .serverSelector { ClusterDescription clusterDescription -> + def highestPortServer + for (ServerDescription cur : clusterDescription.getServerDescriptions()) { + if (highestPortServer == null || cur.address.port > highestPortServer.address.port) { + highestPortServer = cur + } + } + if (highestPortServer == null) { + return [] + } + expectedWinningAddresses.add(highestPortServer.address) + [highestPortServer] + }.addCommandListener(new CommandListener() { + // record each address actually used + @Override + void commandStarted(final CommandStartedEvent event) { + actualWinningAddresses.add(event.connectionDescription.connectionId.serverId.address) + } + + @Override + void commandSucceeded(final CommandSucceededEvent event) { + } + + @Override + void commandFailed(final CommandFailedEvent event) { + } + }) + + def client = new MongoClient(getMongoClientURI(optionsBuilder)) + def collection = client.getDatabase(getDefaultDatabaseName()).getCollection(getCollectionName()) + .withReadPreference(ReadPreference.nearest()) + + when: + for (int i = 0; i < 10; i++) { + collection.countDocuments() + } + + then: + expectedWinningAddresses.containsAll(actualWinningAddresses) + } +} diff --git a/driver-legacy/src/test/functional/com/mongodb/QueryBuilderTest.java b/driver-legacy/src/test/functional/com/mongodb/QueryBuilderTest.java new file mode 100644 index 00000000000..9c70d0dc8a2 --- /dev/null +++ b/driver-legacy/src/test/functional/com/mongodb/QueryBuilderTest.java @@ -0,0 +1,439 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb; + +import com.mongodb.QueryBuilder.QueryBuilderException; +import org.junit.Test; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.regex.Pattern; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + +public class QueryBuilderTest extends DatabaseTestCase { + + @Test + public void elemMatchTest() { + DBObject query = QueryBuilder.start("array").elemMatch( + QueryBuilder.start("x").is(1).and("y").is(2).get()).get(); + DBObject expected = new BasicDBObject("array", new BasicDBObject("$elemMatch", + new BasicDBObject("x", 1).append("y", 2))); + assertEquals(expected, query); + // TODO: add integration test + } + + @Test + public void notTest() { + Pattern pattern = Pattern.compile("\\w*"); + DBObject query = QueryBuilder.start("x").not().regex(pattern).get(); + DBObject expected = new BasicDBObject("x", new BasicDBObject("$not", pattern)); + assertEquals(expected, query); + + query = QueryBuilder.start("x").not().regex(pattern).and("y").is("foo").get(); + expected = new BasicDBObject("x", new BasicDBObject("$not", pattern)).append("y", "foo"); + assertEquals(expected, query); + + query = QueryBuilder.start("x").not().greaterThan(2).get(); + expected = new BasicDBObject("x", new BasicDBObject("$not", new BasicDBObject("$gt", 2))); + assertEquals(expected, query); + + query = QueryBuilder.start("x").not().greaterThan(2).and("y").is("foo").get(); + expected = new BasicDBObject("x", new BasicDBObject("$not", new BasicDBObject("$gt", 2))).append("y", "foo"); + assertEquals(expected, query); + + + query = QueryBuilder.start("x").not().greaterThan(2).lessThan(0).get(); + expected = new BasicDBObject("x", new BasicDBObject("$not", new BasicDBObject("$gt", 2).append("$lt", 0))); + assertEquals(expected, query); + + } + + @Test + public void greaterThanTest() { + String key = "x"; + saveTestDocument(collection, key, 0); + + DBObject queryTrue = QueryBuilder.start(key).greaterThan(-1).get(); + assertTrue(testQuery(collection, queryTrue)); + + DBObject queryFalse = QueryBuilder.start(key).greaterThan(0).get(); + assertFalse(testQuery(collection, queryFalse)); + } + + @Test + public void greaterThanEqualsTest() { + String key = "x"; + saveTestDocument(collection, key, 0); + + DBObject queryTrue = QueryBuilder.start(key).greaterThanEquals(0).get(); + assertTrue(testQuery(collection, queryTrue)); + + DBObject queryTrue2 = QueryBuilder.start(key).greaterThanEquals(-1).get(); + assertTrue(testQuery(collection, queryTrue2)); + + DBObject queryFalse = QueryBuilder.start(key).greaterThanEquals(1).get(); + assertFalse(testQuery(collection, queryFalse)); + + } + + @Test + public void lessThanTest() { + String key = "x"; + saveTestDocument(collection, key, 0); + + DBObject queryTrue = QueryBuilder.start(key).lessThan(1).get(); + assertTrue(testQuery(collection, queryTrue)); + + DBObject queryFalse = QueryBuilder.start(key).lessThan(0).get(); + assertFalse(testQuery(collection, queryFalse)); + + } + + @Test + public void lessThanEqualsTest() { + String key = "x"; + saveTestDocument(collection, key, 0); + + DBObject queryTrue = QueryBuilder.start(key).lessThanEquals(1).get(); + assertTrue(testQuery(collection, queryTrue)); + + DBObject queryTrue2 = QueryBuilder.start(key).lessThanEquals(0).get(); + assertTrue(testQuery(collection, queryTrue2)); + + DBObject queryFalse = QueryBuilder.start(key).lessThanEquals(-1).get(); + assertFalse(testQuery(collection, queryFalse)); + } + + @Test + public void isTest() { + String key = "x"; + saveTestDocument(collection, key, "test"); + + DBObject queryTrue = QueryBuilder.start(key).is("test").get(); + assertTrue(testQuery(collection, queryTrue)); + + DBObject queryFalse = QueryBuilder.start(key).is("test1").get(); + assertFalse(testQuery(collection, queryFalse)); + } + + @Test + public void notEqualsTest() { + String key = "x"; + saveTestDocument(collection, key, "test"); + + DBObject queryTrue = QueryBuilder.start(key).notEquals("test1").get(); + assertTrue(testQuery(collection, queryTrue)); + + DBObject queryFalse = QueryBuilder.start(key).notEquals("test").get(); + assertFalse(testQuery(collection, queryFalse)); + + } + + @Test + public void inTest() { + String key = "x"; + saveTestDocument(collection, key, 1); + + DBObject queryTrue = QueryBuilder.start(key).in(Arrays.asList(1, 2, 3)).get(); + assertTrue(testQuery(collection, queryTrue)); + + DBObject queryFalse = QueryBuilder.start(key).in(Arrays.asList(2, 3, 4)).get(); + assertFalse(testQuery(collection, queryFalse)); + } + + @Test + public void notInTest() { + String key = "x"; + saveTestDocument(collection, key, 1); + + DBObject queryTrue = QueryBuilder.start(key).notIn(Arrays.asList(2, 3, 4)).get(); + assertTrue(testQuery(collection, queryTrue)); + + DBObject queryFalse = QueryBuilder.start(key).notIn(Arrays.asList(1, 2, 3)).get(); + assertFalse(testQuery(collection, queryFalse)); + } + + @Test + public void modTest() { + String key = "x"; + saveTestDocument(collection, key, 9); + + DBObject queryTrue = QueryBuilder.start(key).mod(Arrays.asList(2, 1)).get(); + assertTrue(testQuery(collection, queryTrue)); + + DBObject queryFalse = QueryBuilder.start(key).mod(Arrays.asList(2, 0)).get(); + assertFalse(testQuery(collection, queryFalse)); + } + + @Test + public void allTest() { + String key = "x"; + saveTestDocument(collection, key, Arrays.asList(1, 2, 3)); + + DBObject query = QueryBuilder.start(key).all(Arrays.asList(1, 2, 3)).get(); + assertTrue(testQuery(collection, query)); + + DBObject queryFalse = QueryBuilder.start(key).all(Arrays.asList(2, 3, 4)).get(); + assertFalse(testQuery(collection, queryFalse)); + } + + @Test + public void sizeTest() { + String key = "x"; + saveTestDocument(collection, key, Arrays.asList(1, 2, 3)); + + DBObject queryTrue = QueryBuilder.start(key).size(3).get(); + assertTrue(testQuery(collection, queryTrue)); + + DBObject queryFalse = QueryBuilder.start(key).size(4).get(); + assertFalse(testQuery(collection, queryFalse)); + + DBObject queryFalse2 = QueryBuilder.start(key).size(2).get(); + assertFalse(testQuery(collection, queryFalse2)); + } + + @Test + public void existsTest() { + String key = "x"; + saveTestDocument(collection, key, "test"); + + DBObject queryTrue = QueryBuilder.start(key).exists(true).get(); + assertTrue(testQuery(collection, queryTrue)); + + DBObject queryFalse = QueryBuilder.start(key).exists(false).get(); + assertFalse(testQuery(collection, queryFalse)); + } + + @Test + public void regexTest() { + String key = "x"; + saveTestDocument(collection, key, "test"); + + DBObject queryTrue = QueryBuilder.start(key).regex(Pattern.compile("\\w*")).get(); + assertTrue(testQuery(collection, queryTrue)); + } + + @Test + public void rangeChainTest() { + String key = "x"; + saveTestDocument(collection, key, 2); + + DBObject queryTrue = QueryBuilder.start(key).greaterThan(0).lessThan(3).get(); + assertTrue(testQuery(collection, queryTrue)); + } + + @Test + public void compoundChainTest() { + String key = "x"; + String key2 = "y"; + String value = key; + DBObject testDocument = new BasicDBObject(); + testDocument.put(key, value); + testDocument.put(key2, 9); + collection.save(testDocument); + + DBObject queryTrue = QueryBuilder.start(key).is(value).and(key2).mod(Arrays.asList(2, 1)).get(); + assertTrue(testQuery(collection, queryTrue)); + } + + @Test + public void arrayChainTest() { + String key = "x"; + saveTestDocument(collection, key, Arrays.asList(1, 2, 3)); + + DBObject queryTrue = QueryBuilder.start(key).all(Arrays.asList(1, 2, 3)).size(3).get(); + assertTrue(testQuery(collection, queryTrue)); + } + + @Test + public void nearTest() { + String key = "loc"; + BasicDBObject geoSpatialIndex = new BasicDBObject(); + geoSpatialIndex.put(key, "2d"); + collection.createIndex(geoSpatialIndex); + + Double[] coordinates = {(double) 50, (double) 30}; + saveTestDocument(collection, key, coordinates); + + DBObject queryTrue = QueryBuilder.start(key).near(45, 45).get(); + DBObject expected = new BasicDBObject(key, new BasicDBObject("$near", Arrays.asList(45.0, 45.0))); + assertEquals(queryTrue, expected); + assertTrue(testQuery(collection, queryTrue)); + + queryTrue = QueryBuilder.start(key).near(45, 45, 16).get(); + expected = new BasicDBObject(key, new BasicDBObject("$near", Arrays.asList(45.0, 45.0)) + .append("$maxDistance", 16.0)); + assertEquals(queryTrue, expected); + assertTrue(testQuery(collection, queryTrue)); + + queryTrue = QueryBuilder.start(key).nearSphere(45, 45).get(); + expected = new BasicDBObject(key, new BasicDBObject("$nearSphere", Arrays.asList(45.0, 45.0))); + assertEquals(queryTrue, expected); + assertTrue(testQuery(collection, queryTrue)); + + queryTrue = QueryBuilder.start(key).nearSphere(45, 45, 0.5).get(); + expected = new BasicDBObject(key, new BasicDBObject("$nearSphere", Arrays.asList(45.0, 45.0)) + .append("$maxDistance", 0.5)); + assertEquals(queryTrue, expected); + assertTrue(testQuery(collection, queryTrue)); + + queryTrue = QueryBuilder.start(key).withinCenterSphere(50, 30, 0.5).get(); + assertTrue(testQuery(collection, queryTrue)); + + ArrayList points = new ArrayList<>(); + points.add(new Double[]{(double) 30, (double) 30}); + points.add(new Double[]{(double) 70, (double) 30}); + points.add(new Double[]{(double) 70, (double) 30}); + queryTrue = QueryBuilder.start(key).withinPolygon(points).get(); + assertTrue(testQuery(collection, queryTrue)); + } + + @Test(expected = IllegalArgumentException.class) + public void shouldThrowExceptionNoPolygonGivenForWithinQuery() { + QueryBuilder.start("loc").withinPolygon(null); + } + + @Test(expected = IllegalArgumentException.class) + public void shouldThrowExceptionIfNoVerticesDefinedForPolygon() { + QueryBuilder.start("loc").withinPolygon(new ArrayList<>()); + } + + @Test(expected = IllegalArgumentException.class) + public void shouldThrowExceptionIfInsufficientVerticesDefinedForPolygon() { + QueryBuilder.start("loc").withinPolygon(Arrays.asList(new Double[]{30.0, 30.0})); + } + + @Test + public void textTest() { + BasicDBObject enableTextCommand = new BasicDBObject("setParameter", 1).append("textSearchEnabled", true); + database.getSisterDB("admin").command(enableTextCommand); + DBCollection collection = database.getCollection("text-test"); + BasicDBObject textIndex = new BasicDBObject("comments", "text"); + collection.createIndex(textIndex); + + BasicDBObject doc = new BasicDBObject("comments", "Lorem ipsum dolor sit amet, consectetur adipiscing elit.") + .append("meaning", 42); + collection.save(doc); + + DBObject queryTrue = QueryBuilder.start().text("dolor").get(); + DBObject expected = new BasicDBObject("$text", new BasicDBObject("$search", "dolor")); + assertEquals(expected, queryTrue); + assertTrue(testQuery(collection, queryTrue)); + + queryTrue = QueryBuilder.start().text("dolor", "english").get(); + expected = new BasicDBObject("$text", new BasicDBObject("$search", "dolor").append("$language", "english")); + assertEquals(expected, queryTrue); + assertTrue(testQuery(collection, queryTrue)); + + queryTrue = QueryBuilder.start().and( + QueryBuilder.start().text("dolor").get(), + QueryBuilder.start("meaning").greaterThan(21).get()).get(); + expected = new BasicDBObject("$and", + Arrays.asList( + new BasicDBObject("$text", new BasicDBObject("$search", "dolor")), + new BasicDBObject("meaning", new BasicDBObject("$gt", 21)))); + assertEquals(expected, queryTrue); + assertTrue(testQuery(collection, queryTrue)); + } + + @Test(expected = QueryBuilderException.class) + public void shouldThrowAnExceptionIfTetIsNotAtTheTopLevelOfTheQuery() { + QueryBuilder.start("x").text("funny"); + } + + @Test + public void failureTest() { + boolean thrown = false; + try { + QueryBuilder.start("x").get(); + } catch (QueryBuilderException e) { + thrown = true; + } + assertTrue(thrown); + + boolean thrown2 = false; + try { + QueryBuilder.start("x").exists(true).and("y").get(); + } catch (QueryBuilderException e) { + thrown2 = true; + } + assertTrue(thrown2); + + boolean thrown3 = false; + try { + QueryBuilder.start("x").and("y").get(); + } catch (QueryBuilderException e) { + thrown3 = true; + } + assertTrue(thrown3); + } + + @Test + public void testOr() { + collection.drop(); + collection.insert(new BasicDBObject("a", 1)); + collection.insert(new BasicDBObject("b", 1)); + + DBObject q = QueryBuilder.start().or(new BasicDBObject("a", 1), new BasicDBObject("b", 1)).get(); + + assertEquals(2, collection.find(q).count()); + } + + @Test + public void testAnd() { + collection.drop(); + collection.insert(new BasicDBObject("a", 1).append("b", 1)); + collection.insert(new BasicDBObject("b", 1)); + + DBObject q = QueryBuilder.start().and(new BasicDBObject("a", 1), new BasicDBObject("b", 1)).get(); + + assertEquals(1, collection.find(q).count()); + } + + @Test + public void testMultipleAnd() { + collection.drop(); + collection.insert(new BasicDBObject("a", 1).append("b", 1)); + collection.insert(new BasicDBObject("b", 1)); + + DBObject q = QueryBuilder.start().and(new BasicDBObject("a", 1), new BasicDBObject("b", 1)).get(); + + assertEquals(1, collection.find(q).count()); + } + + /** + * Convenience method that creates a new MongoDB Document with a key-value pair and saves it inside the specified collection + * + * @param collection Collection to save the new document to + * @param key key of the field to be inserted to the new document + * @param value value of the field to be inserted to the new document + */ + private void saveTestDocument(final DBCollection collection, final String key, final Object value) { + DBObject testDocument = new BasicDBObject(); + testDocument.put(key, value); + collection.save(testDocument); + } + + private boolean testQuery(final DBCollection collection, final DBObject query) { + DBCursor cursor = collection.find(query); + return cursor.hasNext(); + } +} + diff --git a/driver-legacy/src/test/functional/com/mongodb/QueryTest.java b/driver-legacy/src/test/functional/com/mongodb/QueryTest.java new file mode 100644 index 00000000000..d58111f46e6 --- /dev/null +++ b/driver-legacy/src/test/functional/com/mongodb/QueryTest.java @@ -0,0 +1,47 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb; + +import org.junit.Ignore; +import org.junit.Test; + +import static org.hamcrest.core.Is.is; +import static org.hamcrest.MatcherAssert.assertThat; + +public class QueryTest extends DatabaseTestCase { + + @Test + public void shouldBeAbleToUseOldQueryBuilderWithNewFilterMethod() { + // given + collection.insert(new BasicDBObject("name", "Bob")); + + //when + DBObject filter = QueryBuilder.start("name").is("Bob").get(); + DBCursor dbCursor = collection.find(filter); + + //then + assertThat(dbCursor.next().get("name").toString(), is("Bob")); + } + + @Test + @Ignore + public void shouldBeAbleToQueryWithJSONString() { + // JSON.parse(jsonString); + // collection.find(JSON.parse(jsonString)) + } + +} diff --git a/driver-legacy/src/test/functional/com/mongodb/UuidRepresentationTest.java b/driver-legacy/src/test/functional/com/mongodb/UuidRepresentationTest.java new file mode 100644 index 00000000000..ce1bf9903a8 --- /dev/null +++ b/driver-legacy/src/test/functional/com/mongodb/UuidRepresentationTest.java @@ -0,0 +1,59 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb; + +import com.mongodb.client.AbstractUuidRepresentationTest; +import com.mongodb.client.MongoDatabase; +import org.bson.BsonBinarySubType; +import org.bson.UuidRepresentation; +import org.bson.codecs.configuration.CodecRegistry; +import org.junit.After; + +import java.util.UUID; + +import static com.mongodb.client.Fixture.getMongoClientSettingsBuilder; + +public class UuidRepresentationTest extends AbstractUuidRepresentationTest { + + private MongoClient mongoClient; + + public UuidRepresentationTest(final UuidRepresentation uuidRepresentation, final BsonBinarySubType subType, + final UUID uuid, final byte[] encodedValue, final byte[] standardEncodedValue) { + super(uuidRepresentation, subType, uuid, encodedValue, standardEncodedValue); + } + + + @Override + protected void createMongoClient(final UuidRepresentation uuidRepresentation, final CodecRegistry codecRegistry) { + mongoClient = new MongoClient(getMongoClientSettingsBuilder() + .uuidRepresentation(uuidRepresentation) + .codecRegistry(codecRegistry) + .build()); + } + + @Override + protected MongoDatabase getDatabase(final String databaseName) { + return mongoClient.getDatabase(databaseName); + } + + @After + public void cleanUp() { + if (mongoClient != null) { + mongoClient.close(); + } + } +} diff --git a/driver-legacy/src/test/functional/com/mongodb/client/LegacyDatabaseTestCase.java b/driver-legacy/src/test/functional/com/mongodb/client/LegacyDatabaseTestCase.java new file mode 100644 index 00000000000..e8421bccd58 --- /dev/null +++ b/driver-legacy/src/test/functional/com/mongodb/client/LegacyDatabaseTestCase.java @@ -0,0 +1,86 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client; + +import com.mongodb.MongoClient; +import com.mongodb.MongoNamespace; +import com.mongodb.client.test.CollectionHelper; +import com.mongodb.internal.connection.ServerHelper; +import org.bson.BsonDocument; +import org.bson.BsonDocumentWrapper; +import org.bson.Document; +import org.bson.codecs.DocumentCodec; +import org.junit.After; +import org.junit.Before; + +import static com.mongodb.Fixture.getDefaultDatabaseName; +import static com.mongodb.Fixture.getMongoClient; +import static com.mongodb.Fixture.getPrimary; +import static com.mongodb.Fixture.getServerSessionPoolInUseCount; + +public class LegacyDatabaseTestCase { + //For ease of use and readability, in this specific case we'll allow protected variables + //CHECKSTYLE:OFF + protected MongoClient client; + protected MongoDatabase database; + protected MongoCollection collection; + //CHECKSTYLE:ON + + @Before + public void setUp() { + client = getMongoClient(); + database = client.getDatabase(getDefaultDatabaseName()); + collection = database.getCollection(getClass().getName()); + collection.drop(); + } + + @After + public void tearDown() { + if (collection != null) { + collection.drop(); + } + try { + ServerHelper.checkPool(getPrimary()); + } catch (InterruptedException e) { + // ignore + } + + if (getServerSessionPoolInUseCount() != 0) { + throw new IllegalStateException("Server session in use count is " + getServerSessionPoolInUseCount()); + } + } + + protected String getDatabaseName() { + return database.getName(); + } + + protected String getCollectionName() { + return collection.getNamespace().getCollectionName(); + } + + protected MongoNamespace getNamespace() { + return collection.getNamespace(); + } + + protected CollectionHelper getCollectionHelper() { + return new CollectionHelper<>(new DocumentCodec(), getNamespace()); + } + + protected BsonDocument wrap(final Document document) { + return new BsonDocumentWrapper<>(document, new DocumentCodec()); + } +} diff --git a/driver-legacy/src/test/functional/com/mongodb/client/jndi/MongoClientFactorySpecification.groovy b/driver-legacy/src/test/functional/com/mongodb/client/jndi/MongoClientFactorySpecification.groovy new file mode 100644 index 00000000000..4a39ed7d246 --- /dev/null +++ b/driver-legacy/src/test/functional/com/mongodb/client/jndi/MongoClientFactorySpecification.groovy @@ -0,0 +1,71 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.jndi + +import com.mongodb.Fixture +import com.mongodb.FunctionalSpecification +import com.mongodb.MongoClient +import com.mongodb.MongoException + +import javax.naming.Reference +import javax.naming.StringRefAddr + +class MongoClientFactorySpecification extends FunctionalSpecification { + def mongoClientFactory = new MongoClientFactory() + + def 'should create MongoClient from environment'() { + given: + def environment = new Hashtable() + environment.put('connectionString', Fixture.getMongoClientURIString()) + + when: + MongoClient client = mongoClientFactory.getObjectInstance(null, null, null, environment) + + then: + client != null + + cleanup: + client?.close() + } + + def 'should create MongoClient from obj that is of type Reference'() { + given: + def environment = new Hashtable() + def reference = new Reference(null, new StringRefAddr('connectionString', Fixture.getMongoClientURIString())) + + when: + MongoClient client = mongoClientFactory.getObjectInstance(reference, null, null, environment) + + then: + client != null + + cleanup: + client?.close() + } + + def 'should throw if no connection string is provided'() { + given: + def environment = new Hashtable() + + when: + mongoClientFactory.getObjectInstance(null, null, null, environment) + + then: + thrown(MongoException) + } +} + diff --git a/driver-legacy/src/test/functional/com/mongodb/gridfs/GridFSTest.java b/driver-legacy/src/test/functional/com/mongodb/gridfs/GridFSTest.java new file mode 100644 index 00000000000..b7a2d495f75 --- /dev/null +++ b/driver-legacy/src/test/functional/com/mongodb/gridfs/GridFSTest.java @@ -0,0 +1,348 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.gridfs; + +import com.mongodb.BasicDBObject; +import com.mongodb.DBObject; +import com.mongodb.DatabaseTestCase; +import com.mongodb.MongoException; +import org.bson.types.ObjectId; +import org.junit.Before; +import org.junit.Test; +import org.junit.jupiter.api.Tag; + +import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; +import java.io.File; +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.net.URI; +import java.util.Scanner; + +import static java.nio.charset.Charset.defaultCharset; +import static org.junit.Assert.assertArrayEquals; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +public class GridFSTest extends DatabaseTestCase { + private GridFS gridFS; + + @Before + public void setUp() { + super.setUp(); + gridFS = new GridFS(database); + } + + @Test + public void testSmall() throws Exception { + testInOut("this is a simple test"); + } + + @Test + public void testBig() throws Exception { + int target = GridFS.DEFAULT_CHUNKSIZE * 3; + StringBuilder buf = new StringBuilder(target); + while (buf.length() < target) { + buf.append("asdasdkjasldkjasldjlasjdlajsdljasldjlasjdlkasjdlaskjdlaskjdlsakjdlaskjdasldjsad"); + } + String s = buf.toString(); + testInOut(s); + } + + void testOutStream(final String s) throws Exception { + + int[] start = getCurrentCollectionCounts(); + + GridFSInputFile in = gridFS.createFile(); + OutputStream writeStream = in.getOutputStream(); + writeStream.write(s.getBytes(defaultCharset()), 0, s.length()); + writeStream.close(); + GridFSDBFile out = gridFS.findOne(new BasicDBObject("_id", in.getId())); + assert (out.getId().equals(in.getId())); + assert (out.getChunkSize() == (long) GridFS.DEFAULT_CHUNKSIZE); + + ByteArrayOutputStream bout = new ByteArrayOutputStream(); + out.writeTo(bout); + String outString = new String(bout.toByteArray(), defaultCharset()); + assert (outString.equals(s)); + + out.remove(); + int[] end = getCurrentCollectionCounts(); + assertEquals(start[0], end[0]); + assertEquals(start[1], end[1]); + } + + @Test + public void testOutStreamSmall() throws Exception { + testOutStream("this is a simple test"); + } + + @Test + public void testOutStreamBig() throws Exception { + int target = (int) (GridFS.DEFAULT_CHUNKSIZE * 3.5); + StringBuilder buf = new StringBuilder(target); + while (buf.length() < target) { + buf.append("asdasdkjasldkjasldjlasjdlajsdljasldjlasjdlkasjdlaskjdlaskjdlsakjdlaskjdasldjsad"); + } + String s = buf.toString(); + testOutStream(s); + } + + @Test + public void testOutStreamBigAligned() throws Exception { + int target = (GridFS.DEFAULT_CHUNKSIZE * 4); + StringBuilder buf = new StringBuilder(target); + while (buf.length() < target) { + buf.append("a"); + } + String s = buf.toString(); + testOutStream(s); + } + + @Test + public void testCreateFileWithFile() throws Exception { + URI fileURI = GridFSTest.class.getResource("/GridFSLegacy/GridFSTestFile.txt").toURI(); + GridFSInputFile in = gridFS.createFile(new File(fileURI)); + in.save(); + + String expectedString = new Scanner(new File(fileURI)).useDelimiter("\\Z").next(); + + GridFSDBFile out = gridFS.findOne(new BasicDBObject("_id", in.getId())); + ByteArrayOutputStream bout = new ByteArrayOutputStream(); + out.writeTo(bout); + String outString = new String(bout.toByteArray(), defaultCharset()).trim(); + + assertEquals(expectedString, outString); + } + + @Test + public void testMetadata() throws Exception { + + GridFSInputFile in = gridFS.createFile("foo".getBytes(defaultCharset())); + in.put("meta", 5); + in.save(); + GridFSDBFile out = gridFS.findOne(new BasicDBObject("_id", in.getId())); + assertEquals(5, out.get("meta")); + } + + @Test + public void testFind() throws Exception { + + GridFSInputFile in = gridFS.createFile(new ByteArrayInputStream("foo".getBytes(defaultCharset())), "testFind"); + in.save(); + assertNotNull(gridFS.find((ObjectId) in.getId())); + assertNotNull(gridFS.findOne((ObjectId) in.getId())); + assertNotNull(gridFS.findOne("testFind")); + assertNotNull(gridFS.findOne(new BasicDBObject("_id", in.getId()))); + } + + @Test + public void testBadChunkSize() throws Exception { + byte[] randomBytes = new byte[256]; + GridFSInputFile inputFile = gridFS.createFile(randomBytes); + inputFile.setFilename("bad_chunk_size.bin"); + try { + inputFile.save(0); + fail("should have received an exception about a chunk size being zero"); + } catch (MongoException e) { + //We expect this exception to complain about the chunksize + assertTrue(e.toString().contains("chunkSize must be greater than zero")); + } + } + + @Test + @Tag("Slow") + public void testMultipleChunks() throws Exception { + int fileSize = 1024 * 128; + byte[] randomBytes = new byte[fileSize]; + for (int idx = 0; idx < fileSize; ++idx) { + randomBytes[idx] = (byte) (256 * Math.random()); + } + + GridFSInputFile inputFile = gridFS.createFile(randomBytes); + inputFile.setFilename("bad_chunk_size.bin"); + + //For good measure let's save and restore the bytes + inputFile.save(1024); + GridFSDBFile savedFile = gridFS.findOne(new BasicDBObject("_id", inputFile.getId())); + ByteArrayOutputStream savedFileByteStream = new ByteArrayOutputStream(); + savedFile.writeTo(savedFileByteStream); + byte[] savedFileBytes = savedFileByteStream.toByteArray(); + + assertArrayEquals(randomBytes, savedFileBytes); + } + + @Test + @Tag("Slow") + public void getBigChunkSize() throws Exception { + GridFSInputFile file = gridFS.createFile("512kb_bucket"); + file.setChunkSize(file.getChunkSize() * 2); + OutputStream os = file.getOutputStream(); + for (int i = 0; i < 1024; i++) { + os.write(new byte[GridFS.DEFAULT_CHUNKSIZE / 1024 + 1]); + } + os.close(); + } + + + @Test + public void testInputStreamSkipping() throws Exception { + //int chunkSize = 5; + int chunkSize = GridFS.DEFAULT_CHUNKSIZE; + int fileSize = (int) (7.25 * chunkSize); + + byte[] fileBytes = new byte[fileSize]; + for (int idx = 0; idx < fileSize; ++idx) { + fileBytes[idx] = (byte) (idx % 251); + } + //Don't want chunks to be aligned at byte position 0 + + GridFSInputFile inputFile = gridFS.createFile(fileBytes); + inputFile.setFilename("input_stream_skipping.bin"); + inputFile.save(chunkSize); + + GridFSDBFile savedFile = gridFS.findOne(new BasicDBObject("_id", inputFile.getId())); + InputStream inputStream = savedFile.getInputStream(); + + //Quick run-through, make sure the file is as expected + for (int idx = 0; idx < fileSize; ++idx) { + assertEquals((byte) (idx % 251), (byte) inputStream.read()); + } + + inputStream = savedFile.getInputStream(); + + long skipped = inputStream.skip(1); + assertEquals(1, skipped); + int position = 1; + assertEquals((byte) (position++ % 251), (byte) inputStream.read()); + + skipped = inputStream.skip(chunkSize); + assertEquals(chunkSize, skipped); + position += chunkSize; + assertEquals((byte) (position++ % 251), (byte) inputStream.read()); + + skipped = inputStream.skip(-1); + assertEquals(0, skipped); + skipped = inputStream.skip(0); + assertEquals(0, skipped); + + skipped = inputStream.skip(3 * chunkSize); + assertEquals(3 * chunkSize, skipped); + position += 3 * chunkSize; + assertEquals((byte) (position++ % 251), (byte) inputStream.read()); + + //Make sure skipping works when we skip to an exact chunk boundary + long toSkip = inputStream.available(); + skipped = inputStream.skip(toSkip); + assertEquals(toSkip, skipped); + position += toSkip; + assertEquals((byte) (position++ % 251), (byte) inputStream.read()); + + skipped = inputStream.skip(2L * fileSize); + assertEquals(fileSize - position, skipped); + assertEquals(-1, inputStream.read()); + } + + @Test + public void testCustomFileID() throws IOException { + // given + int id = 1; + gridFS.remove(new BasicDBObject("_id", id)); + + int chunkSize = 10; + int fileSize = (int) (3.25 * chunkSize); + + byte[] fileBytes = new byte[fileSize]; + for (int idx = 0; idx < fileSize; ++idx) { + fileBytes[idx] = (byte) (idx % 251); + } + + // when + GridFSInputFile inputFile = gridFS.createFile(fileBytes); + inputFile.setId(id); + inputFile.setFilename("custom_file_id.bin"); + inputFile.save(chunkSize); + assertEquals(id, inputFile.getId()); + + // then + GridFSDBFile savedFile = gridFS.findOne(new BasicDBObject("_id", id)); + InputStream inputStream = savedFile.getInputStream(); + + for (int idx = 0; idx < fileSize; ++idx) { + assertEquals((byte) (idx % 251), (byte) inputStream.read()); + } + + // finally + gridFS.remove(new BasicDBObject("_id", id)); + } + + void testInOut(final String s) throws Exception { + + int[] start = getCurrentCollectionCounts(); + + GridFSInputFile in = gridFS.createFile(s.getBytes(defaultCharset())); + in.save(); + GridFSDBFile out = gridFS.findOne(new BasicDBObject("_id", in.getId())); + assert (out.getId().equals(in.getId())); + assert (out.getChunkSize() == (long) GridFS.DEFAULT_CHUNKSIZE); + + ByteArrayOutputStream bout = new ByteArrayOutputStream(); + out.writeTo(bout); + String outString = new String(bout.toByteArray(), defaultCharset()); + assert (outString.equals(s)); + + out.remove(); + int[] end = getCurrentCollectionCounts(); + assertEquals(start[0], end[0]); + assertEquals(start[1], end[1]); + } + + int[] getCurrentCollectionCounts() { + int[] i = new int[2]; + i[0] = gridFS.getFilesCollection().find().count(); + i[1] = gridFS.getChunksCollection().find().count(); + return i; + } + + @Test(expected = IllegalArgumentException.class) + public void testRemoveWhenObjectIdIsNull() { + ObjectId objectId = null; + gridFS.remove(objectId); + } + + @Test(expected = IllegalArgumentException.class) + public void testRemoveWhenFileNameIsNull() { + String fileName = null; + gridFS.remove(fileName); + } + + @Test(expected = IllegalArgumentException.class) + public void testRemoveWhenQueryIsNull() { + DBObject dbObjectQuery = null; + gridFS.remove(dbObjectQuery); + } + + @Test + public void testToStringOverride() { + GridFSInputFile in = gridFS.createFile(); + String json = in.toString(); + assertTrue(json.startsWith("{") && json.endsWith("}")); + } +} diff --git a/driver-legacy/src/test/resources/GridFSLegacy/GridFSTestFile.txt b/driver-legacy/src/test/resources/GridFSLegacy/GridFSTestFile.txt new file mode 100644 index 00000000000..ee31b9ced1d --- /dev/null +++ b/driver-legacy/src/test/resources/GridFSLegacy/GridFSTestFile.txt @@ -0,0 +1 @@ +GridFS Test File diff --git a/driver-legacy/src/test/unit/com/mongodb/AggregationOptionsSpecification.groovy b/driver-legacy/src/test/unit/com/mongodb/AggregationOptionsSpecification.groovy new file mode 100644 index 00000000000..940a86cef81 --- /dev/null +++ b/driver-legacy/src/test/unit/com/mongodb/AggregationOptionsSpecification.groovy @@ -0,0 +1,37 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb + +import spock.lang.Specification + +import static java.util.concurrent.TimeUnit.MILLISECONDS + +class AggregationOptionsSpecification extends Specification { + + def "should return new options with the same property values"() { + when: + def options = AggregationOptions.builder() + .allowDiskUse(true) + .batchSize(3) + .maxTime(42, MILLISECONDS) + .build() + then: + options.allowDiskUse + options.batchSize == 3 + options.getMaxTime(MILLISECONDS) == 42 + } +} diff --git a/driver-legacy/src/test/unit/com/mongodb/CommandResultTest.java b/driver-legacy/src/test/unit/com/mongodb/CommandResultTest.java new file mode 100644 index 00000000000..abb9cc712b4 --- /dev/null +++ b/driver-legacy/src/test/unit/com/mongodb/CommandResultTest.java @@ -0,0 +1,107 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb; + +import org.bson.BsonBoolean; +import org.bson.BsonDocument; +import org.bson.BsonDouble; +import org.bson.BsonInt32; +import org.bson.BsonString; +import org.bson.codecs.Codec; +import org.junit.Test; + +import java.net.UnknownHostException; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +public class CommandResultTest { + + private static final Codec DECODER = MongoClientSettings.getDefaultCodecRegistry().get(DBObject.class); + + @Test + public void shouldBeOkWhenOkFieldIsTrue() throws UnknownHostException { + CommandResult commandResult = + new CommandResult( + new BsonDocument("ok", BsonBoolean.TRUE), DECODER); + assertTrue(commandResult.ok()); + } + + @Test + public void shouldNotBeOkWithNoOkField() throws UnknownHostException { + CommandResult commandResult = new CommandResult(new BsonDocument(), DECODER); + assertFalse(commandResult.ok()); + } + + @Test + public void shouldNotBeOkWhenOkFieldIsFalse() throws UnknownHostException { + CommandResult commandResult = new CommandResult(new BsonDocument(), DECODER); + commandResult.put("ok", false); + assertFalse(commandResult.ok()); + } + + @Test + public void shouldBeOkWhenOkFieldIsOne() throws UnknownHostException { + CommandResult commandResult = new CommandResult(new BsonDocument("ok", new BsonDouble(1.0)), DECODER); + assertTrue(commandResult.ok()); + } + + @Test + public void shouldNotBeOkWhenOkFieldIsZero() throws UnknownHostException { + CommandResult commandResult = new CommandResult(new BsonDocument("ok", new BsonDouble(0.0)), DECODER); + assertFalse(commandResult.ok()); + } + + @Test + public void shouldNotHaveExceptionWhenOkIsTrue() throws UnknownHostException { + CommandResult commandResult = new CommandResult(new BsonDocument("ok", new BsonBoolean(true)), DECODER); + assertNull(commandResult.getException()); + } + + @Test + public void shouldNotBeOkWhenOkFieldTypeIsNotBooleanOrNumber() throws UnknownHostException { + CommandResult commandResult = new CommandResult(new BsonDocument("ok", new BsonString("1")), DECODER); + assertFalse(commandResult.ok()); + } + + @Test + public void testNullErrorCode() throws UnknownHostException { + try { + new CommandResult(new BsonDocument("ok", new BsonInt32(0)), DECODER, new ServerAddress()) + .throwOnError(); + fail("Should throw"); + } catch (MongoCommandException e) { + assertEquals(-1, e.getCode()); + } + } + + @Test + public void testCommandFailure() throws UnknownHostException { + try { + new CommandResult(new BsonDocument("ok", new BsonInt32(0)) + .append("errmsg", new BsonString("ns not found")) + .append("code", new BsonInt32(5000)), DECODER, new ServerAddress()) + .throwOnError(); + fail("Should throw"); + } catch (MongoCommandException e) { + assertEquals(5000, e.getCode()); + } + } +} diff --git a/driver-legacy/src/test/unit/com/mongodb/DBCollectionObjectFactoryTest.java b/driver-legacy/src/test/unit/com/mongodb/DBCollectionObjectFactoryTest.java new file mode 100644 index 00000000000..99848ff136b --- /dev/null +++ b/driver-legacy/src/test/unit/com/mongodb/DBCollectionObjectFactoryTest.java @@ -0,0 +1,146 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb; + +import org.bson.BSONObject; +import org.hamcrest.Matchers; +import org.junit.Before; +import org.junit.Test; + +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; + +import static java.util.Arrays.asList; +import static java.util.Collections.singletonList; +import static org.hamcrest.CoreMatchers.instanceOf; +import static org.hamcrest.MatcherAssert.assertThat; + +public class DBCollectionObjectFactoryTest { + + private DBCollectionObjectFactory factory; + + @Before + public void setUp() { + factory = new DBCollectionObjectFactory(); + } + + @Test + public void testDefaultTopLevelClass() { + assertThat(factory.getInstance(), instanceOf(DBObject.class)); + } + + @Test + public void testDefaultInternalClass() { + assertThat(factory.getInstance(asList("a", "b", "c")), instanceOf(DBObject.class)); + } + + @Test + public void testTopLevelClassWhenSet() { + factory = factory.update(TopLevelDBObject.class); + assertThat(factory.getInstance(), instanceOf(TopLevelDBObject.class)); + } + + @Test + public void testEmptyPath() { + factory = factory.update(TopLevelDBObject.class); + assertThat(factory.getInstance(Collections.emptyList()), instanceOf(TopLevelDBObject.class)); + } + + @Test + public void testInternalClassWhenTopLevelSet() { + factory = factory.update(TopLevelDBObject.class); + assertThat(factory.getInstance(asList("a", "b", "c")), instanceOf(DBObject.class)); + } + + @Test + public void testSeveralInternalClassesSet() { + factory = factory.update(NestedOneDBObject.class, asList("a", "b")); + factory = factory.update(NestedTwoDBObject.class, asList("a", "c")); + + assertThat(factory.getInstance(asList("a", "b")), instanceOf(NestedOneDBObject.class)); + assertThat(factory.getInstance(asList("a", "c")), instanceOf(NestedTwoDBObject.class)); + } + + @Test + public void testThatNullObjectClassRevertsToDefault() { + factory = factory.update(MyDBObject.class, singletonList("a")).update(null); + assertThat(factory.getInstance(), Matchers.instanceOf(BasicDBObject.class)); + assertThat(factory.getInstance(singletonList("a")), instanceOf(MyDBObject.class)); + + factory = factory.update(null, singletonList("a")); + assertThat(factory.getInstance(), Matchers.instanceOf(BasicDBObject.class)); + assertThat(factory.getInstance(singletonList("a")), instanceOf(BasicDBObject.class)); + } + + public static class TopLevelDBObject extends BasicDBObject { + private static final long serialVersionUID = 7029929727222305692L; + } + + public static class NestedOneDBObject extends BasicDBObject { + private static final long serialVersionUID = -5821458746671670383L; + } + + public static class NestedTwoDBObject extends BasicDBObject { + private static final long serialVersionUID = 5243874721805359328L; + } + + @SuppressWarnings("rawtypes") + public static class MyDBObject extends HashMap implements DBObject { + + private static final long serialVersionUID = -8540791504402368127L; + + @Override + public void markAsPartialObject() { + } + + @Override + public boolean isPartialObject() { + return false; + } + + @Override + public void putAll(final BSONObject o) { + throw new UnsupportedOperationException(); + } + + @Override + public void putAll(final Map m) { + throw new UnsupportedOperationException(); + } + + @Override + public Object get(final String key) { + throw new UnsupportedOperationException(); + } + + @Override + public Map toMap() { + throw new UnsupportedOperationException(); + } + + @Override + public Object removeField(final String key) { + throw new UnsupportedOperationException(); + } + + @Override + public boolean containsField(final String s) { + throw new UnsupportedOperationException(); + } + } +} diff --git a/driver-legacy/src/test/unit/com/mongodb/DBCursorSpecification.groovy b/driver-legacy/src/test/unit/com/mongodb/DBCursorSpecification.groovy new file mode 100644 index 00000000000..59dceb6478a --- /dev/null +++ b/driver-legacy/src/test/unit/com/mongodb/DBCursorSpecification.groovy @@ -0,0 +1,317 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb + +import com.mongodb.client.internal.TestOperationExecutor +import com.mongodb.client.model.Collation +import com.mongodb.client.model.DBCollectionFindOptions +import com.mongodb.internal.operation.BatchCursor +import com.mongodb.internal.operation.CountOperation +import com.mongodb.internal.operation.FindOperation +import org.bson.BsonDocument +import org.bson.BsonString +import spock.lang.Specification + +import java.util.concurrent.TimeUnit + +import static Fixture.getMongoClient +import static com.mongodb.CustomMatchers.isTheSameAs +import static spock.util.matcher.HamcrestSupport.expect + +class DBCursorSpecification extends Specification { + + def 'should get and set read preference'() { + when: + def collection = new DB(getMongoClient(), 'myDatabase', new TestOperationExecutor([])).getCollection('test') + collection.setReadPreference(ReadPreference.nearest()) + def cursor = new DBCursor(collection, new BasicDBObject(), new BasicDBObject(), ReadPreference.nearest()) + + then: + cursor.readPreference == ReadPreference.nearest() + + when: + cursor.setReadPreference(ReadPreference.secondary()) + + then: + cursor.readPreference == ReadPreference.secondary() + + when: + cursor.setReadPreference(null) + + then: + cursor.readPreference == ReadPreference.nearest() + } + + def 'should get and set read concern'() { + when: + def collection = new DB(getMongoClient(), 'myDatabase', new TestOperationExecutor([])).getCollection('test') + collection.setReadConcern(ReadConcern.MAJORITY) + def cursor = new DBCursor(collection, new BasicDBObject(), new BasicDBObject(), ReadPreference.primary()) + + then: + cursor.readConcern == ReadConcern.MAJORITY + + when: + cursor.setReadConcern(ReadConcern.LOCAL) + + then: + cursor.readConcern == ReadConcern.LOCAL + + when: + cursor.setReadConcern(null) + + then: + cursor.readConcern == ReadConcern.MAJORITY + } + + def 'should get and set collation'() { + when: + def collection = new DB(getMongoClient(), 'myDatabase', new TestOperationExecutor([])).getCollection('test') + def collation = Collation.builder().locale('en').build() + def cursor = new DBCursor(collection, new BasicDBObject(), new BasicDBObject(), ReadPreference.primary()) + + then: + cursor.getCollation() == null + + when: + cursor.setCollation(collation) + + then: + cursor.getCollation() == collation + + when: + cursor.setCollation(null) + + then: + cursor.getCollation() == null + } + + def 'should copy as expected'() { + when: + def collection = new DB(getMongoClient(), 'myDatabase', new TestOperationExecutor([])).getCollection('test') + def cursor = new DBCursor(collection, new BasicDBObject(), new BasicDBObject(), ReadPreference.nearest()) + .setReadConcern(ReadConcern.LOCAL) + .setCollation(Collation.builder().locale('en').build()) + + then: + expect(cursor, isTheSameAs(cursor.copy())) + } + + def 'find should create the correct FindOperation'() { + given: + def executor = new TestOperationExecutor([stubBatchCursor()]) + def collection = new DB(getMongoClient(), 'myDatabase', executor).getCollection('test') + def cursor = new DBCursor(collection, new BasicDBObject(), new BasicDBObject(), ReadPreference.primary()) + cursor.setReadConcern(ReadConcern.MAJORITY) + + when: + cursor.toArray() + + then: + expect executor.getReadOperation(), isTheSameAs(new FindOperation(collection.getNamespace(), + collection.getObjectCodec()) + .filter(new BsonDocument()) + .projection(new BsonDocument()) + .retryReads(true)) + } + + + def 'one should create the correct FindOperation'() { + given: + def executor = new TestOperationExecutor([stubBatchCursor()]) + def collection = new DB(getMongoClient(), 'myDatabase', executor).getCollection('test') + def cursor = new DBCursor(collection, new BasicDBObject(), new BasicDBObject(), ReadPreference.primary()) + cursor.setReadConcern(ReadConcern.MAJORITY) + + when: + cursor.one() + + then: + expect executor.getReadOperation(), isTheSameAs( + new FindOperation(collection.getNamespace(), collection.getObjectCodec()) + .limit(-1) + .filter(new BsonDocument()) + .projection(new BsonDocument()) + .retryReads(true) + ) + } + + def 'DBCursor methods should be used to create the expected operation'() { + given: + def executor = new TestOperationExecutor([stubBatchCursor()]) + def collection = new DB(getMongoClient(), 'myDatabase', executor).getCollection('test') + def collation = Collation.builder().locale('en').build() + def cursorType = CursorType.NonTailable + def filter = new BasicDBObject() + def sort = BasicDBObject.parse('{a: 1}') + def bsonFilter = new BsonDocument() + def bsonSort = BsonDocument.parse(sort.toJson()) + def readConcern = ReadConcern.LOCAL + def readPreference = ReadPreference.nearest() + def findOptions = new DBCollectionFindOptions() + def cursor = new DBCursor(collection, filter, findOptions) + .setReadConcern(readConcern) + .setReadPreference(readPreference) + .setCollation(collation) + .batchSize(1) + .cursorType(cursorType) + .limit(1) + .maxTime(100, TimeUnit.MILLISECONDS) + .noCursorTimeout(true) + .partial(true) + .skip(1) + .sort(sort) + + when: + cursor.toArray() + + then: + expect executor.getReadOperation(), isTheSameAs( + new FindOperation(collection.getNamespace(), collection.getObjectCodec()) + .batchSize(1) + .collation(collation) + .cursorType(cursorType) + .filter(bsonFilter) + .limit(1) + .noCursorTimeout(true) + .partial(true) + .skip(1) + .sort(bsonSort) + .retryReads(true) + ) + + executor.getReadPreference() == readPreference + executor.getReadConcern() == readConcern + } + + def 'DBCollectionFindOptions should be used to create the expected operation'() { + given: + def executor = new TestOperationExecutor([stubBatchCursor()]) + def collection = new DB(getMongoClient(), 'myDatabase', executor).getCollection('test') + def collation = Collation.builder().locale('en').build() + def cursorType = CursorType.NonTailable + def filter = new BasicDBObject() + def projection = BasicDBObject.parse('{a: 1, _id: 0}') + def sort = BasicDBObject.parse('{a: 1}') + def bsonFilter = new BsonDocument() + def bsonProjection = BsonDocument.parse(projection.toJson()) + def bsonSort = BsonDocument.parse(sort.toJson()) + def comment = 'comment' + def hint = BasicDBObject.parse('{x : 1}') + def min = BasicDBObject.parse('{y : 1}') + def max = BasicDBObject.parse('{y : 100}') + def bsonHint = BsonDocument.parse(hint.toJson()) + def bsonMin = BsonDocument.parse(min.toJson()) + def bsonMax = BsonDocument.parse(max.toJson()) + def readConcern = ReadConcern.LOCAL + def readPreference = ReadPreference.nearest() + def findOptions = new DBCollectionFindOptions() + .batchSize(1) + .collation(collation) + .cursorType(cursorType) + .limit(1) + .maxAwaitTime(1001, TimeUnit.MILLISECONDS) + .maxTime(101, TimeUnit.MILLISECONDS) + .noCursorTimeout(true) + .partial(true) + .projection(projection) + .readConcern(readConcern) + .readPreference(readPreference) + .skip(1) + .sort(sort) + .comment(comment) + .hint(hint) + .max(max) + .min(min) + .returnKey(true) + .showRecordId(true) + + def cursor = new DBCursor(collection, filter, findOptions) + + when: + cursor.toArray() + + then: + expect executor.getReadOperation(), isTheSameAs(new FindOperation(collection.getNamespace(), collection.getObjectCodec()) + .batchSize(1) + .collation(collation) + .cursorType(cursorType) + .filter(bsonFilter) + .limit(1) + .noCursorTimeout(true) + .partial(true) + .projection(bsonProjection) + .skip(1) + .sort(bsonSort) + .comment(new BsonString(comment)) + .hint(bsonHint) + .max(bsonMax) + .min(bsonMin) + .returnKey(true) + .showRecordId(true) + .retryReads(true) + ) + + executor.getReadPreference() == findOptions.getReadPreference() + } + + def 'count should create the correct CountOperation'() { + def executor = new TestOperationExecutor([42L]) + def collection = new DB(getMongoClient(), 'myDatabase', executor).getCollection('test') + def cursor = new DBCursor(collection, new BasicDBObject(), new BasicDBObject(), ReadPreference.primary()) + cursor.setReadConcern(ReadConcern.MAJORITY) + + when: + def result = cursor.count() + + then: + result == 42 + expect executor.getReadOperation(), isTheSameAs(new CountOperation(collection.getNamespace()) + .filter(new BsonDocument()).retryReads(true)) + executor.getReadConcern() == ReadConcern.MAJORITY + } + + def 'size should create the correct CountOperation'() { + def executor = new TestOperationExecutor([42L]) + def collection = new DB(getMongoClient(), 'myDatabase', executor).getCollection('test') + def cursor = new DBCursor(collection, new BasicDBObject(), new BasicDBObject(), ReadPreference.primary()) + cursor.setReadConcern(ReadConcern.MAJORITY) + + when: + def result = cursor.size() + + then: + result == 42 + expect executor.getReadOperation(), isTheSameAs(new CountOperation(collection.getNamespace()) + .filter(new BsonDocument()).retryReads(true)) + executor.getReadConcern() == ReadConcern.MAJORITY + } + + private stubBatchCursor() { + Stub(BatchCursor) { + def count = 0 + next() >> { + count++ + [new BasicDBObject('_id', 1)] + } + hasNext() >> { + count == 0 + } + getServerCursor() >> new ServerCursor(12L, new ServerAddress()) + } + } +} diff --git a/driver-legacy/src/test/unit/com/mongodb/DBEncoderDecoderDBRefSpecification.groovy b/driver-legacy/src/test/unit/com/mongodb/DBEncoderDecoderDBRefSpecification.groovy new file mode 100644 index 00000000000..c7e64686ee7 --- /dev/null +++ b/driver-legacy/src/test/unit/com/mongodb/DBEncoderDecoderDBRefSpecification.groovy @@ -0,0 +1,64 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb + +import org.bson.BSONDecoder +import org.bson.BasicBSONDecoder +import org.bson.io.BasicOutputBuffer +import org.bson.io.OutputBuffer +import spock.lang.Specification + +class DBEncoderDecoderDBRefSpecification extends Specification { + + def 'should encode and decode DBRefs'() { + given: + DBRef reference = new DBRef('coll', 'hello world') + DBObject document = new BasicDBObject('!', reference) + OutputBuffer buffer = new BasicOutputBuffer() + + when: + DefaultDBEncoder.FACTORY.create().writeObject(buffer, document) + DefaultDBCallback callback = new DefaultDBCallback(null) + BSONDecoder decoder = new BasicBSONDecoder() + decoder.decode(buffer.toByteArray(), callback) + DBRef decoded = ((DBObject) callback.get()).get('!') + + then: + decoded.databaseName == null + decoded.collectionName == 'coll' + decoded.id == 'hello world' + } + + def 'should encode and decode DBRefs with a database name'() { + given: + DBRef reference = new DBRef('db', 'coll', 'hello world') + DBObject document = new BasicDBObject('!', reference) + OutputBuffer buffer = new BasicOutputBuffer() + + when: + DefaultDBEncoder.FACTORY.create().writeObject(buffer, document) + DefaultDBCallback callback = new DefaultDBCallback(null) + BSONDecoder decoder = new BasicBSONDecoder() + decoder.decode(buffer.toByteArray(), callback) + DBRef decoded = ((DBObject) callback.get()).get('!') + + then: + decoded.databaseName == 'db' + decoded.collectionName == 'coll' + decoded.id == 'hello world' + } +} diff --git a/driver-legacy/src/test/unit/com/mongodb/DBObjectCollationHelperSpecification.groovy b/driver-legacy/src/test/unit/com/mongodb/DBObjectCollationHelperSpecification.groovy new file mode 100644 index 00000000000..914f7243b4c --- /dev/null +++ b/driver-legacy/src/test/unit/com/mongodb/DBObjectCollationHelperSpecification.groovy @@ -0,0 +1,73 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb + +import com.mongodb.client.model.Collation +import com.mongodb.client.model.CollationAlternate +import com.mongodb.client.model.CollationCaseFirst +import com.mongodb.client.model.CollationMaxVariable +import com.mongodb.client.model.CollationStrength +import spock.lang.Specification + +class DBObjectCollationHelperSpecification extends Specification { + + def 'should create the expected collation'() { + expect: + DBObjectCollationHelper.createCollationFromOptions(new BasicDBObject('collation', BasicDBObject.parse(options))) == collation + + where: + collation | options + Collation.builder().locale('en').build() | '{locale: "en"}' + Collation.builder() + .locale('en') + .caseLevel(true) + .collationCaseFirst(CollationCaseFirst.OFF) + .collationStrength(CollationStrength.IDENTICAL) + .numericOrdering(true) + .collationAlternate(CollationAlternate.SHIFTED) + .collationMaxVariable(CollationMaxVariable.SPACE) + .normalization(true) + .backwards(true) + .build() | '''{locale: "en", caseLevel: true, caseFirst: "off", strength: 5, + numericOrdering: true, alternate: "shifted", + maxVariable: "space", normalization: true, backwards: true}''' + } + + def 'should return null if no options are set'() { + DBObjectCollationHelper.createCollationFromOptions(new BasicDBObject()) == null + } + + def 'should throw an exception if the collation options are invalid'() { + when: + DBObjectCollationHelper.createCollationFromOptions(new BasicDBObject('collation', BasicDBObject.parse(options))) + + then: + thrown(IllegalArgumentException) + + where: + options << ['{}', + '{locale: true}', + '{ locale: "en", caseLevel: "true"}', + '{ locale: "en", caseFirst: false}', + '{ locale: "en", strength: true }', + '{ locale: "en", numericOrdering: 1}', + '{ locale: "en", alternate: true}', + '{ locale: "en", maxVariable: true}', + '{ locale: "en", normalization: 1}', + '{ locale: "en", backwards: 1}'] + } +} diff --git a/driver-legacy/src/test/unit/com/mongodb/DBObjectMatchers.java b/driver-legacy/src/test/unit/com/mongodb/DBObjectMatchers.java new file mode 100644 index 00000000000..f82cb75bcda --- /dev/null +++ b/driver-legacy/src/test/unit/com/mongodb/DBObjectMatchers.java @@ -0,0 +1,89 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb; + +import org.hamcrest.Description; +import org.hamcrest.Factory; +import org.hamcrest.Matcher; +import org.hamcrest.TypeSafeMatcher; + +public final class DBObjectMatchers { + + private DBObjectMatchers() { } + + @Factory + public static Matcher hasFields(final String[] fields) { + return new HasFieldsMatcher(fields); + } + + @Factory + public static Matcher hasSubdocument(final DBObject subdocument) { + return new HasSubdocumentMatcher(subdocument); + } + + public static class HasFieldsMatcher extends TypeSafeMatcher { + + private final String[] fieldNames; + + public HasFieldsMatcher(final String[] fieldNames) { + this.fieldNames = fieldNames; + } + + @Override + protected boolean matchesSafely(final DBObject item) { + for (final String fieldName : fieldNames) { + if (!item.containsField(fieldName)) { + return false; + } + } + return true; + } + + @Override + public void describeTo(final Description description) { + description.appendText(" has fields ") + .appendValue(fieldNames); + } + } + + public static class HasSubdocumentMatcher extends TypeSafeMatcher { + private final DBObject document; + + public HasSubdocumentMatcher(final DBObject document) { + this.document = document; + } + + @Override + protected boolean matchesSafely(final DBObject item) { + for (final String key : document.keySet()) { + if (document.get(key) != null && item.get(key) == null) { + return false; + } + if (document.get(key) != null && !document.get(key).equals(item.get(key))) { + return false; + } + } + return true; + } + + @Override + public void describeTo(final Description description) { + description.appendText(" has subdocument ") + .appendValue(document); + } + } +} diff --git a/driver-legacy/src/test/unit/com/mongodb/DBSpecification.groovy b/driver-legacy/src/test/unit/com/mongodb/DBSpecification.groovy new file mode 100644 index 00000000000..5f0c81f28cc --- /dev/null +++ b/driver-legacy/src/test/unit/com/mongodb/DBSpecification.groovy @@ -0,0 +1,269 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb + +import com.mongodb.client.internal.TestOperationExecutor +import com.mongodb.client.model.Collation +import com.mongodb.client.model.CollationAlternate +import com.mongodb.client.model.CollationCaseFirst +import com.mongodb.client.model.CollationMaxVariable +import com.mongodb.client.model.CollationStrength +import com.mongodb.client.model.DBCreateViewOptions +import com.mongodb.client.model.ValidationAction +import com.mongodb.client.model.ValidationLevel +import com.mongodb.internal.operation.BatchCursor +import com.mongodb.internal.operation.CreateCollectionOperation +import com.mongodb.internal.operation.CreateViewOperation +import com.mongodb.internal.operation.ListCollectionsOperation +import org.bson.BsonBoolean +import org.bson.BsonDocument +import org.bson.BsonDouble +import spock.lang.Specification + +import static Fixture.getMongoClient +import static com.mongodb.ClusterFixture.serverVersionLessThan +import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS +import static com.mongodb.CustomMatchers.isTheSameAs +import static com.mongodb.MongoClientSettings.getDefaultCodecRegistry +import static org.junit.Assume.assumeTrue +import static spock.util.matcher.HamcrestSupport.expect + +class DBSpecification extends Specification { + + def 'should throw IllegalArgumentException if name is invalid'() { + when: + new DB(getMongoClient(), 'a.b', new TestOperationExecutor()) + + then: + thrown(IllegalArgumentException) + } + + def 'should get and set read concern'() { + when: + def db = new DB(getMongoClient(), 'test', new TestOperationExecutor([])) + + then: + db.readConcern == ReadConcern.DEFAULT + + when: + db.setReadConcern(ReadConcern.MAJORITY) + + then: + db.readConcern == ReadConcern.MAJORITY + + when: + db.setReadConcern(null) + + then: + db.readConcern == ReadConcern.DEFAULT + } + + def 'should execute CreateCollectionOperation'() { + given: + def mongo = Stub(MongoClient) + mongo.mongoClientOptions >> MongoClientOptions.builder().build() + mongo.codecRegistry >> getDefaultCodecRegistry() + mongo.timeoutSettings >> TIMEOUT_SETTINGS + def executor = new TestOperationExecutor([1L, 2L, 3L]) + def db = new DB(mongo, 'test', executor) + db.setReadConcern(ReadConcern.MAJORITY) + db.setWriteConcern(WriteConcern.MAJORITY) + + when: + db.createCollection('ctest', new BasicDBObject()) + + then: + def operation = executor.getWriteOperation() as CreateCollectionOperation + expect operation, isTheSameAs(new CreateCollectionOperation('test', 'ctest', db.getWriteConcern())) + executor.getReadConcern() == ReadConcern.MAJORITY + + when: + def options = new BasicDBObject() + .append('size', 100000) + .append('max', 2000) + .append('capped', true) + .append('autoIndexId', true) + .append('storageEngine', BasicDBObject.parse('{ wiredTiger: {}}')) + .append('indexOptionDefaults', BasicDBObject.parse('{storageEngine: { mmapv1: {}}}')) + .append('validator', BasicDBObject.parse('{level : { $gte: 10 } }')) + .append('validationLevel', ValidationLevel.MODERATE.getValue()) + .append('validationAction', ValidationAction.WARN.getValue()) + + + db.createCollection('ctest', options) + operation = executor.getWriteOperation() as CreateCollectionOperation + + then: + expect operation, isTheSameAs(new CreateCollectionOperation('test', 'ctest', db.getWriteConcern()) + .sizeInBytes(100000) + .maxDocuments(2000) + .capped(true) + .autoIndex(true) + .storageEngineOptions(BsonDocument.parse('{ wiredTiger: {}}')) + .indexOptionDefaults(BsonDocument.parse('{storageEngine: { mmapv1: {}}}')) + .validator(BsonDocument.parse('{level : { $gte: 10 } }')) + .validationLevel(ValidationLevel.MODERATE) + .validationAction(ValidationAction.WARN)) + executor.getReadConcern() == ReadConcern.MAJORITY + + when: + def collation = Collation.builder() + .locale('en') + .caseLevel(true) + .collationCaseFirst(CollationCaseFirst.OFF) + .collationStrength(CollationStrength.IDENTICAL) + .numericOrdering(true) + .collationAlternate(CollationAlternate.SHIFTED) + .collationMaxVariable(CollationMaxVariable.SPACE) + .backwards(true) + .build() + + db.createCollection('ctest', new BasicDBObject('collation', BasicDBObject.parse(collation.asDocument().toJson()))) + operation = executor.getWriteOperation() as CreateCollectionOperation + + then: + expect operation, isTheSameAs(new CreateCollectionOperation('test', 'ctest', db.getWriteConcern()) + .collation(collation)) + executor.getReadConcern() == ReadConcern.MAJORITY + } + + def 'should execute CreateViewOperation'() { + given: + def mongo = Stub(MongoClient) { + getCodecRegistry() >> MongoClient.defaultCodecRegistry + } + mongo.mongoClientOptions >> MongoClientOptions.builder().build() + mongo.timeoutSettings >> TIMEOUT_SETTINGS + def executor = new TestOperationExecutor([1L, 2L, 3L]) + + def databaseName = 'test' + def viewName = 'view1' + def viewOn = 'collection1' + def pipeline = [new BasicDBObject('$match', new BasicDBObject('x', true))] + def writeConcern = WriteConcern.JOURNALED + def collation = Collation.builder().locale('en').build() + + def db = new DB(mongo, databaseName, executor) + db.setWriteConcern(writeConcern) + db.setReadConcern(ReadConcern.MAJORITY) + + when: + db.createView(viewName, viewOn, pipeline) + + then: + def operation = executor.getWriteOperation() as CreateViewOperation + expect operation, isTheSameAs(new CreateViewOperation(databaseName, viewName, viewOn, + [new BsonDocument('$match', new BsonDocument('x', BsonBoolean.TRUE))], writeConcern)) + executor.getReadConcern() == ReadConcern.MAJORITY + + when: + db.createView(viewName, viewOn, pipeline, new DBCreateViewOptions().collation(collation)) + operation = executor.getWriteOperation() as CreateViewOperation + + then: + expect operation, isTheSameAs(new CreateViewOperation(databaseName, viewName, viewOn, + [new BsonDocument('$match', new BsonDocument('x', BsonBoolean.TRUE))], writeConcern).collation(collation)) + executor.getReadConcern() == ReadConcern.MAJORITY + } + + def 'should execute ListCollectionsOperation'() { + given: + def mongo = Stub(MongoClient) + mongo.mongoClientOptions >> MongoClientOptions.builder().build() + mongo.timeoutSettings >> TIMEOUT_SETTINGS + def executor = new TestOperationExecutor([Stub(BatchCursor), Stub(BatchCursor)]) + + def databaseName = 'test' + + def db = new DB(mongo, databaseName, executor) + + when: + db.getCollectionNames() + def operation = executor.getReadOperation() as ListCollectionsOperation + + then: + expect operation, isTheSameAs(new ListCollectionsOperation(databaseName, + new DBObjectCodec(getDefaultCodecRegistry())) + .nameOnly(true)) + + when: + db.collectionExists('someCollection') + operation = executor.getReadOperation() as ListCollectionsOperation + + then: + expect operation, isTheSameAs(new ListCollectionsOperation(databaseName, + new DBObjectCodec(getDefaultCodecRegistry())) + .nameOnly(true)) + } + + def 'should use provided read preference for obedient commands'() { + if (cmd.get('collStats') != null) { + assumeTrue(serverVersionLessThan(6, 2)) + } + given: + def mongo = Stub(MongoClient) + mongo.mongoClientOptions >> MongoClientOptions.builder().build() + mongo.codecRegistry >> getDefaultCodecRegistry() + def executor = new TestOperationExecutor([new BsonDocument('ok', new BsonDouble(1.0))]) + def database = new DB(mongo, 'test', executor) + database.setReadPreference(ReadPreference.secondary()) + database.setReadConcern(ReadConcern.MAJORITY) + + when: + database.command(cmd) + + then: + executor.getReadPreference() == expectedReadPreference + executor.getReadConcern() == ReadConcern.MAJORITY + + where: + expectedReadPreference | cmd + ReadPreference.secondary() | new BasicDBObject('listCollections', 1) + ReadPreference.secondary() | new BasicDBObject('collStats', 1) + ReadPreference.secondary() | new BasicDBObject('dbStats', 1) + ReadPreference.secondary() | new BasicDBObject('distinct', 1) + ReadPreference.secondary() | new BasicDBObject('geoNear', 1) + ReadPreference.secondary() | new BasicDBObject('geoSearch', 1) + ReadPreference.secondary() | new BasicDBObject('group', 1) + ReadPreference.secondary() | new BasicDBObject('listCollections', 1) + ReadPreference.secondary() | new BasicDBObject('listIndexes', 1) + ReadPreference.secondary() | new BasicDBObject('parallelCollectionScan', 1) + ReadPreference.secondary() | new BasicDBObject('text', 1) + } + + def 'should use primary read preference for non obedient commands'() { + given: + def mongo = Stub(MongoClient) + mongo.mongoClientOptions >> MongoClientOptions.builder().build() + mongo.codecRegistry >> getDefaultCodecRegistry() + def executor = new TestOperationExecutor([new BsonDocument('ok', new BsonDouble(1.0))]) + def database = new DB(mongo, 'test', executor) + database.setReadPreference(ReadPreference.secondary()) + database.setReadConcern(ReadConcern.MAJORITY) + + when: + database.command(cmd) + + then: + executor.getReadPreference() == expectedReadPreference + executor.getReadConcern() == ReadConcern.MAJORITY + + where: + expectedReadPreference | cmd + ReadPreference.primary() | new BasicDBObject('command', 1) + } +} diff --git a/driver-legacy/src/test/unit/com/mongodb/LazyDBEncoderTest.java b/driver-legacy/src/test/unit/com/mongodb/LazyDBEncoderTest.java new file mode 100644 index 00000000000..7ce00ed6962 --- /dev/null +++ b/driver-legacy/src/test/unit/com/mongodb/LazyDBEncoderTest.java @@ -0,0 +1,41 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb; + +import org.bson.io.BasicOutputBuffer; +import org.bson.io.OutputBuffer; +import org.junit.Test; + +import static org.junit.Assert.assertArrayEquals; + +public class LazyDBEncoderTest { + + @Test + public void testEncodingObject() { + byte[] bytes = {12, 0, 0, 0, 16, 97, 0, 1, 0, 0, 0, 0}; + DBObject document = new LazyDBObject(bytes, new LazyDBCallback(null)); + OutputBuffer buffer = new BasicOutputBuffer(); + new LazyDBEncoder().writeObject(buffer, document); + assertArrayEquals(bytes, buffer.toByteArray()); + } + + @Test(expected = IllegalArgumentException.class) + public void testEncodingRegularDBObjectWithLazyDBEncoder() { + DBObject document = new BasicDBObject(); + new LazyDBEncoder().writeObject(new BasicOutputBuffer(), document); + } +} diff --git a/driver-legacy/src/test/unit/com/mongodb/LazyDBObjectSpecification.groovy b/driver-legacy/src/test/unit/com/mongodb/LazyDBObjectSpecification.groovy new file mode 100644 index 00000000000..53775b7cbd6 --- /dev/null +++ b/driver-legacy/src/test/unit/com/mongodb/LazyDBObjectSpecification.groovy @@ -0,0 +1,313 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb + +import org.bson.BSONEncoder +import org.bson.io.BasicOutputBuffer +import org.bson.io.OutputBuffer +import org.bson.types.BSONTimestamp +import org.bson.types.Binary +import org.bson.types.Code +import org.bson.types.MaxKey +import org.bson.types.MinKey +import org.bson.types.ObjectId +import org.bson.types.Symbol +import spock.lang.Specification + +import static java.util.regex.Pattern.CASE_INSENSITIVE +import static java.util.regex.Pattern.compile + +class LazyDBObjectSpecification extends Specification { + BSONEncoder encoder = new DefaultDBEncoder() + OutputBuffer buf = new BasicOutputBuffer() + ByteArrayOutputStream bios + LazyDBDecoder lazyDBDecoder + DefaultDBDecoder defaultDBDecoder + + def setup() { + encoder.set(buf) + bios = new ByteArrayOutputStream() + lazyDBDecoder = new LazyDBDecoder() + defaultDBDecoder = new DefaultDBDecoder() + } + + def 'should lazily decode a DBRef'() { + given: + byte[] bytes = [ + 44, 0, 0, 0, 3, 102, 0, 36, 0, 0, 0, 2, 36, 114, 101, 102, + 0, 4, 0, 0, 0, 97, 46, 98, 0, 7, 36, 105, 100, 0, 18, 52, + 86, 120, -112, 18, 52, 86, 120, -112, 18, 52, 0, 0, + ] + + when: + LazyDBObject document = new LazyDBObject(bytes, new LazyDBCallback(null)) + + then: + document['f'] instanceof DBRef + document['f'] == new DBRef('a.b', new ObjectId('123456789012345678901234')) + } + + def 'should lazily decode a DBRef with $db'() { + given: + byte[] bytes = [ + 58, 0, 0, 0, 3, 102, 0, 50, 0, 0, 0, 2, 36, 114, 101, 102, + 0, 4, 0, 0, 0, 97, 46, 98, 0, 7, 36, 105, 100, 0, 18, 52, + 86, 120, -112, 18, 52, 86, 120, -112, 18, 52, + 2, 36, 100, 98, 0, 5, 0, 0, 0, 109, 121, 100, 98, 0, 0, 0 + ] + + when: + LazyDBObject document = new LazyDBObject(bytes, new LazyDBCallback(null)) + + then: + document['f'] instanceof DBRef + document['f'] == new DBRef('mydb', 'a.b', new ObjectId('123456789012345678901234')) + } + + def testToString() throws IOException { + given: + DBObject origDoc = new BasicDBObject('x', true) + encoder.putObject(origDoc) + buf.pipe(bios) + + when: + DBObject doc = lazyDBDecoder.decode(new ByteArrayInputStream(bios.toByteArray()), (DBCollection) null) + + then: + doc.toString() == '{"x": true}' + } + + def testDecodeAllTypes() throws IOException { + given: + DBObject origDoc = getTestDocument() + encoder.putObject(origDoc) + buf.pipe(bios) + + when: + DBObject doc = defaultDBDecoder.decode(new ByteArrayInputStream(bios.toByteArray()), (DBCollection) null) + + then: + assertDocsSame(origDoc, doc) + } + + def testLazyDecodeAllTypes() throws InterruptedException, IOException { + given: + DBObject origDoc = getTestDocument() + encoder.putObject(origDoc) + buf.pipe(bios) + + when: + DBObject doc = lazyDBDecoder.decode(new ByteArrayInputStream(bios.toByteArray()), (DBCollection) null) + + then: + assertDocsSame(origDoc, doc) + } + + def testMissingKey() throws IOException { + given: + encoder.putObject(getSimpleTestDocument()) + buf.pipe(bios) + + when: + DBObject decodedObj = lazyDBDecoder.decode(new ByteArrayInputStream(bios.toByteArray()), (DBCollection) null) + + then: + decodedObj['missingKey'] == null + } + + def testKeySet() throws IOException { + given: + DBObject obj = getSimpleTestDocument() + encoder.putObject(obj) + buf.pipe(bios) + + when: + DBObject decodedObj = lazyDBDecoder.decode(new ByteArrayInputStream(bios.toByteArray()), (DBCollection) null) + + then: + decodedObj != null + decodedObj instanceof LazyDBObject + Set keySet = decodedObj.keySet() + + keySet.size() == 6 + !keySet.isEmpty() + + keySet.toArray().length == 6 + + def typedArray = keySet.toArray(new String[0]) + typedArray.length == 6 + + def array = keySet.toArray(new String[7]) + array.length == 7 + array[6] == null + + keySet.contains('first') + !keySet.contains('x') + + keySet.containsAll(['first', 'second', '_id', 'third', 'fourth', 'fifth']) + !keySet.containsAll(['first', 'notFound']) + + obj['_id'] == decodedObj['_id'] + obj['first'] == decodedObj['first'] + obj['second'] == decodedObj['second'] + obj['third'] == decodedObj['third'] + obj['fourth'] == decodedObj['fourth'] + obj['fifth'] == decodedObj['fifth'] + } + + def testEntrySet() throws IOException { + given: + DBObject obj = getSimpleTestDocument() + encoder.putObject(obj) + buf.pipe(bios) + + when: + DBObject decodedObj = lazyDBDecoder.decode(new ByteArrayInputStream(bios.toByteArray()), (DBCollection) null) + + then: + Set> entrySet = decodedObj.entrySet() + entrySet.size() == 6 + !entrySet.isEmpty() + + entrySet.toArray().length == 6 // kind of a lame test + + Map.Entry[] typedArray = entrySet.toArray(new Map.Entry[entrySet.size()]) + typedArray.length == 6 + + def array = entrySet.toArray(new Map.Entry[7]) + array.length == 7 + array[6] == null + } + + def testPipe() throws IOException { + given: + DBObject obj = getSimpleTestDocument() + encoder.putObject(obj) + buf.pipe(bios) + + when: + LazyDBObject lazyDBObj = lazyDBDecoder.decode(new ByteArrayInputStream(bios.toByteArray()), (DBCollection) null) + bios.reset() + int byteCount = lazyDBObj.pipe(bios) + + then: + lazyDBObj.getBSONSize() == byteCount + + when: + LazyDBObject lazyDBObjectFromPipe = lazyDBDecoder.decode(new ByteArrayInputStream(bios.toByteArray()), (DBCollection) null) + + then: + lazyDBObj == lazyDBObjectFromPipe + } + + def testLazyDBEncoder() throws IOException { + // this is all set up just to get a lazy db object that can be encoded + given: + DBObject obj = getSimpleTestDocument() + encoder.putObject(obj) + buf.pipe(bios) + LazyDBObject lazyDBObj = (LazyDBObject) lazyDBDecoder.decode( + new ByteArrayInputStream(bios.toByteArray()), (DBCollection) null) + + // now to the actual test + when: + BasicOutputBuffer outputBuffer = new BasicOutputBuffer() + int size = new LazyDBEncoder().writeObject(outputBuffer, lazyDBObj) + + then: + lazyDBObj.getBSONSize() == size + lazyDBObj.getBSONSize() == outputBuffer.size() + + // this is just asserting that the encoder actually piped the correct bytes + ByteArrayOutputStream baos = new ByteArrayOutputStream() + lazyDBObj.pipe(baos) + baos.toByteArray() == outputBuffer.toByteArray() + } + + def getSimpleTestDocument() { + [_id : new ObjectId(), + first : 1, + second: 'str1', + third : true, + fourth : null, + fifth: [firstNested: 1] as BasicDBObject] as BasicDBObject + } + + def getTestDocument() { + [_id : new ObjectId(), + null : null, + max : new MaxKey(), + min : new MinKey(), + booleanTrue : true, + booleanFalse : false, + int1 : 1, + int1500 : 1500, + int3753 : 3753, + tsp : new BSONTimestamp(), + date : new Date(), + long5 : 5L, + long3254525 : 3254525L, + float324_582 : 324.582f, + double245_6289: 245.6289 as double, + oid : new ObjectId(), + // Symbol wonky + symbol : new Symbol('foobar'), + // Code wonky + code : new Code('var x = 12345;'), + // TODO - Shell doesn't work with Code W/ Scope, return to this test later + /* + b.append( "code_scoped", new CodeWScope( "return x * 500;", test_doc ) );*/ + str : 'foobarbaz', + ref : new DBRef('testRef', new ObjectId()), + object : ['abc', '12345'] as BasicDBObject, + array : ['foo', 'bar', 'baz', 'x', 'y', 'z'], + binary : new Binary('scott'.getBytes()), + regex : compile('^test.*regex.*xyz$', CASE_INSENSITIVE)] as BasicDBObject + } + + void assertDocsSame(final DBObject origDoc, final DBObject doc) { + assert doc['str'] == origDoc['str'] + assert doc['_id'] == origDoc['_id'] + assert doc['null'] == null + assert doc['max'] == origDoc['max'] + assert doc['min'] == origDoc['min'] + assert doc['booleanTrue'] + assert !doc['booleanFalse'] + assert doc['int1'] == origDoc['int1'] + assert doc['int1500'] == origDoc['int1500'] + assert doc['int3753'] == origDoc['int3753'] + assert doc['tsp'] == origDoc['tsp'] + assert doc['date'] == doc['date'] + assert doc['long5'] == 5L + assert doc['long3254525'] == 3254525L + assert doc['float324_582'] == 324.5820007324219f + assert doc['double245_6289'] == 245.6289 as double + assert doc['oid'] == origDoc['oid'] + assert doc['str'] == 'foobarbaz' + assert doc['ref'] == origDoc['ref'] + assert doc['object']['abc'] == origDoc['object']['abc'] + assert doc['array'][0] == 'foo' + assert doc['array'][1] == 'bar' + assert doc['array'][2] == 'baz' + assert doc['array'][3] == 'x' + assert doc['array'][4] == 'y' + assert doc['array'][5] == 'z' + assert doc['binary'] == origDoc['binary'].getData() + assert doc['regex'].pattern() == origDoc['regex'].pattern() + assert doc['regex'].flags() == origDoc['regex'].flags() + } +} diff --git a/driver-legacy/src/test/unit/com/mongodb/MapReduceCommandSpecification.groovy b/driver-legacy/src/test/unit/com/mongodb/MapReduceCommandSpecification.groovy new file mode 100644 index 00000000000..1a739508526 --- /dev/null +++ b/driver-legacy/src/test/unit/com/mongodb/MapReduceCommandSpecification.groovy @@ -0,0 +1,111 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb + +import spock.lang.Shared +import spock.lang.Specification +import spock.lang.Unroll + +import static com.mongodb.MapReduceCommand.OutputType +import static com.mongodb.ReadPreference.primary +import static java.util.concurrent.TimeUnit.SECONDS + +class MapReduceCommandSpecification extends Specification { + @Shared + private MapReduceCommand cmd + private static final String COLLECTION_NAME = 'collectionName' + + def mapReduceCommand() { + def collection = Mock(DBCollection) { + getName() >> { COLLECTION_NAME } + } + new MapReduceCommand(collection, 'map', 'reduce', 'test', OutputType.REDUCE, new BasicDBObject()) + } + + def sort() { ['a': 1] as BasicDBObject } + + def scope() { ['a': 'b'] } + + def setupSpec() { cmd = mapReduceCommand() } + + @Unroll + def 'should have the correct default for #field'() throws Exception { + expect: + value == expected + + where: + field | value | expected + 'finalize' | cmd.getFinalize() | null + 'input' | cmd.getInput() | COLLECTION_NAME + 'jsMode' | cmd.getJsMode() | null + 'limit' | cmd.getLimit() | 0 + 'map' | cmd.getMap() | 'map' + 'maxTime' | cmd.getMaxTime(SECONDS) | 0 + 'output db' | cmd.getOutputDB() | null + 'output target' | cmd.getOutputTarget() | 'test' + 'output type' | cmd.getOutputType() | OutputType.REDUCE + 'query' | cmd.getQuery() | new BasicDBObject() + 'readPreference' | cmd.getReadPreference() | null + 'reduce' | cmd.getReduce() | 'reduce' + 'scope' | cmd.getScope() | null + 'sort' | cmd.getSort() | null + 'verbose' | cmd.isVerbose() | true + } + + @Unroll + def 'should be able to change the default for #field'() throws Exception { + expect: + value == expected + + where: + field | change | value | expected + 'finalize' | cmd.setFinalize('final') | cmd.getFinalize() | 'final' + 'jsMode' | cmd.setJsMode(true) | cmd.getJsMode() | true + 'limit' | cmd.setLimit(100) | cmd.getLimit() | 100 + 'maxTime' | cmd.setMaxTime(1, SECONDS) | cmd.getMaxTime(SECONDS) | 1 + 'output db' | cmd.setOutputDB('outDB') | cmd.getOutputDB() | 'outDB' + 'readPreference' | cmd.setReadPreference(primary()) | cmd.getReadPreference() | primary() + 'scope' | cmd.setScope(scope()) | cmd.getScope() | scope() + 'sort' | cmd.setSort(sort()) | cmd.getSort() | sort() + 'verbose' | cmd.setVerbose(false) | cmd.isVerbose() | false + } + + def 'should produce the expected DBObject when changed'() throws Exception { + given: + cmd.with { + setFinalize('final') + setJsMode(true) + setLimit(100) + setMaxTime(1, SECONDS) + setOutputDB('outDB') + setReadPreference(primary()) + setScope(scope()) + setSort(sort()) + setVerbose(false) + } + + when: + def expected = [mapreduce: COLLECTION_NAME, map: 'map', reduce: 'reduce', verbose: false, + out : [reduce: 'test', db: 'outDB'] as BasicDBObject, query: [:] as BasicDBObject, + finalize : 'final', sort: sort(), limit: 100, scope: scope(), jsMode: true, + maxTimeMS: 1000] as BasicDBObject + + then: + cmd.toDBObject() == expected + } + +} diff --git a/driver-legacy/src/test/unit/com/mongodb/MongoClientOptionsSpecification.groovy b/driver-legacy/src/test/unit/com/mongodb/MongoClientOptionsSpecification.groovy new file mode 100644 index 00000000000..ae1d332674c --- /dev/null +++ b/driver-legacy/src/test/unit/com/mongodb/MongoClientOptionsSpecification.groovy @@ -0,0 +1,672 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb + +import com.mongodb.connection.ClusterSettings +import com.mongodb.event.ClusterListener +import com.mongodb.event.CommandListener +import com.mongodb.event.ConnectionPoolListener +import com.mongodb.event.ServerListener +import com.mongodb.event.ServerMonitorListener +import com.mongodb.selector.ServerSelector +import org.bson.UuidRepresentation +import org.bson.codecs.configuration.CodecRegistry +import spock.lang.Specification + +import javax.net.ssl.SSLContext +import java.util.concurrent.TimeUnit + +import static com.mongodb.CustomMatchers.isTheSameAs +import static com.mongodb.connection.ClusterConnectionMode.MULTIPLE +import static com.mongodb.connection.ClusterConnectionMode.SINGLE +import static spock.util.matcher.HamcrestSupport.expect + +class MongoClientOptionsSpecification extends Specification { + + def 'should set the correct default values'() { + given: + def options = new MongoClientOptions.Builder().build() + + expect: + options.getApplicationName() == null + options.getWriteConcern() == WriteConcern.ACKNOWLEDGED + options.getRetryWrites() + options.getRetryReads() + options.getCodecRegistry() == MongoClientSettings.defaultCodecRegistry + options.getUuidRepresentation() == UuidRepresentation.UNSPECIFIED + options.getMinConnectionsPerHost() == 0 + options.getConnectionsPerHost() == 100 + options.getMaxConnecting() == 2 + options.getTimeout() == null + options.getConnectTimeout() == 10000 + options.getReadPreference() == ReadPreference.primary() + options.getServerSelector() == null + !options.isSslEnabled() + !options.isSslInvalidHostNameAllowed() + options.getSslContext() == null + options.getDbDecoderFactory() == DefaultDBDecoder.FACTORY + options.getDbEncoderFactory() == DefaultDBEncoder.FACTORY + options.getLocalThreshold() == 15 + options.isCursorFinalizerEnabled() + options.getHeartbeatFrequency() == 10000 + options.getMinHeartbeatFrequency() == 500 + options.getServerSelectionTimeout() == 30000 + + options.getCommandListeners() == [] + options.getClusterListeners() == [] + options.getConnectionPoolListeners() == [] + options.getServerListeners() == [] + options.getServerMonitorListeners() == [] + + options.compressorList == [] + options.getAutoEncryptionSettings() == null + options.getServerApi() == null + + options.getSrvMaxHosts() == null + options.getSrvServiceName() == 'mongodb' + } + + def 'should handle illegal arguments'() { + given: + def builder = new MongoClientOptions.Builder() + + when: + builder.dbDecoderFactory(null) + then: + thrown(IllegalArgumentException) + + when: + builder.dbEncoderFactory(null) + then: + thrown(IllegalArgumentException) + } + + def 'should build with set options'() { + given: + def encoderFactory = new MyDBEncoderFactory() + def serverSelector = Mock(ServerSelector) + def commandListener = Mock(CommandListener) + def clusterListener = Mock(ClusterListener) + def serverListener = Mock(ServerListener) + def serverMonitorListener = Mock(ServerMonitorListener) + def autoEncryptionSettings = AutoEncryptionSettings.builder() + .keyVaultNamespace('admin.keys') + .kmsProviders(['local': ['key': new byte[64]]]) + .build() + def codecRegistry = Mock(CodecRegistry) + def serverApi = ServerApi.builder().version(ServerApiVersion.V1).build() + + when: + def options = MongoClientOptions.builder() + .applicationName('appName') + .readPreference(ReadPreference.secondary()) + .retryWrites(true) + .retryReads(false) + .writeConcern(WriteConcern.JOURNALED) + .readConcern(ReadConcern.MAJORITY) + .minConnectionsPerHost(30) + .connectionsPerHost(500) + .timeout(10_000) + .connectTimeout(100) + .socketTimeout(700) + .serverSelector(serverSelector) + .serverSelectionTimeout(150) + .maxWaitTime(200) + .maxConnectionIdleTime(300) + .maxConnectionLifeTime(400) + .maxConnecting(1) + .maintenanceInitialDelay(100) + .maintenanceFrequency(100) + .sslEnabled(true) + .sslInvalidHostNameAllowed(true) + .sslContext(SSLContext.getDefault()) + .dbDecoderFactory(LazyDBDecoder.FACTORY) + .heartbeatFrequency(5) + .minHeartbeatFrequency(11) + .heartbeatConnectTimeout(15) + .heartbeatSocketTimeout(20) + .localThreshold(25) + .requiredReplicaSetName('test') + .cursorFinalizerEnabled(false) + .dbEncoderFactory(encoderFactory) + .compressorList([MongoCompressor.createZlibCompressor()]) + .autoEncryptionSettings(autoEncryptionSettings) + .codecRegistry(codecRegistry) + .addCommandListener(commandListener) + .addClusterListener(clusterListener) + .addServerListener(serverListener) + .addServerMonitorListener(serverMonitorListener) + .uuidRepresentation(UuidRepresentation.C_SHARP_LEGACY) + .serverApi(serverApi) + .build() + + then: + options.getApplicationName() == 'appName' + options.getReadPreference() == ReadPreference.secondary() + options.getWriteConcern() == WriteConcern.JOURNALED + options.getReadConcern() == ReadConcern.MAJORITY + options.getServerSelector() == serverSelector + options.getRetryWrites() + !options.getRetryReads() + options.getServerSelectionTimeout() == 150 + options.getTimeout() == 10_000 + options.getMaxWaitTime() == 200 + options.getMaxConnectionIdleTime() == 300 + options.getMaxConnectionLifeTime() == 400 + options.getMaxConnecting() == 1 + options.getMaintenanceInitialDelay() == 100 + options.getMaintenanceFrequency() == 100 + options.getMinConnectionsPerHost() == 30 + options.getConnectionsPerHost() == 500 + options.getConnectTimeout() == 100 + options.getSocketTimeout() == 700 + options.isSslEnabled() + options.isSslInvalidHostNameAllowed() + options.getSslContext() == SSLContext.getDefault() + options.getDbDecoderFactory() == LazyDBDecoder.FACTORY + options.getDbEncoderFactory() == encoderFactory + options.getHeartbeatFrequency() == 5 + options.getMinHeartbeatFrequency() == 11 + options.getHeartbeatConnectTimeout() == 15 + options.getHeartbeatSocketTimeout() == 20 + options.getLocalThreshold() == 25 + options.getRequiredReplicaSetName() == 'test' + !options.isCursorFinalizerEnabled() + options.compressorList == [MongoCompressor.createZlibCompressor()] + options.getAutoEncryptionSettings() == autoEncryptionSettings + options.getClusterListeners() == [clusterListener] + options.getCommandListeners() == [commandListener] + options.getServerListeners() == [serverListener] + options.getServerMonitorListeners() == [serverMonitorListener] + options.getUuidRepresentation() == UuidRepresentation.C_SHARP_LEGACY + options.getServerApi() == serverApi + + when: + def credential = MongoCredential.createCredential('user1', 'app1', 'pwd'.toCharArray()) + def settings = options.asMongoClientSettings([new ServerAddress('host1')], null, SINGLE, + credential) + + then: + settings.credential == credential + settings.readPreference == ReadPreference.secondary() + settings.applicationName == 'appName' + settings.writeConcern == WriteConcern.JOURNALED + settings.retryWrites + !settings.retryReads + settings.autoEncryptionSettings == autoEncryptionSettings + settings.codecRegistry == codecRegistry + settings.commandListeners == [commandListener] + settings.compressorList == [MongoCompressor.createZlibCompressor()] + settings.readConcern == ReadConcern.MAJORITY + settings.uuidRepresentation == UuidRepresentation.C_SHARP_LEGACY + settings.serverApi == serverApi + settings.getTimeout(TimeUnit.MILLISECONDS) == 10_000 + + when: + def optionsFromSettings = MongoClientOptions.builder(settings).build() + + then: + optionsFromSettings.getApplicationName() == 'appName' + optionsFromSettings.getReadPreference() == ReadPreference.secondary() + optionsFromSettings.getWriteConcern() == WriteConcern.JOURNALED + optionsFromSettings.getReadConcern() == ReadConcern.MAJORITY + optionsFromSettings.getServerSelector() == serverSelector + optionsFromSettings.getRetryWrites() + !optionsFromSettings.getRetryReads() + optionsFromSettings.getServerSelectionTimeout() == 150 + optionsFromSettings.getServerSelectionTimeout() == 150 + optionsFromSettings.getMaxWaitTime() == 200 + optionsFromSettings.getMaxConnectionIdleTime() == 300 + optionsFromSettings.getMaxConnectionLifeTime() == 400 + optionsFromSettings.getMaxConnecting() == settings.connectionPoolSettings.maxConnecting + optionsFromSettings.getMaintenanceInitialDelay() == 100 + optionsFromSettings.getMaintenanceInitialDelay() == + settings.connectionPoolSettings.getMaintenanceInitialDelay(TimeUnit.MILLISECONDS) + optionsFromSettings.getMaintenanceFrequency() == 100 + optionsFromSettings.getMaintenanceFrequency() == + settings.connectionPoolSettings.getMaintenanceFrequency(TimeUnit.MILLISECONDS) + optionsFromSettings.getMinConnectionsPerHost() == 30 + optionsFromSettings.getConnectionsPerHost() == 500 + optionsFromSettings.getConnectTimeout() == 100 + optionsFromSettings.getSocketTimeout() == 700 + optionsFromSettings.isSslEnabled() + optionsFromSettings.isSslInvalidHostNameAllowed() + optionsFromSettings.getSslContext() == SSLContext.getDefault() + optionsFromSettings.getHeartbeatFrequency() == 5 + optionsFromSettings.getMinHeartbeatFrequency() == 11 + optionsFromSettings.getHeartbeatConnectTimeout() == 15 + optionsFromSettings.getHeartbeatSocketTimeout() == 20 + optionsFromSettings.getLocalThreshold() == 25 + optionsFromSettings.getRequiredReplicaSetName() == 'test' + optionsFromSettings.compressorList == [MongoCompressor.createZlibCompressor()] + optionsFromSettings.getAutoEncryptionSettings() == autoEncryptionSettings + optionsFromSettings.getClusterListeners() == [clusterListener] + optionsFromSettings.getCommandListeners() == [commandListener] + optionsFromSettings.getServerListeners() == [serverListener] + optionsFromSettings.getServerMonitorListeners() == [serverMonitorListener] + optionsFromSettings.getUuidRepresentation() == UuidRepresentation.C_SHARP_LEGACY + optionsFromSettings.getServerApi() == serverApi + } + + def 'should create settings with SRV protocol'() { + when: + MongoClientOptions.builder().build().asMongoClientSettings(null, 'test3.test.build.10gen.cc', SINGLE, null) + + then: + thrown(IllegalArgumentException) + + when: + MongoClientOptions.builder().build().asMongoClientSettings([new ServerAddress('host1'), new ServerAddress('host2')], + 'test3.test.build.10gen.cc', MULTIPLE, null) + + then: + thrown(IllegalArgumentException) + + when: + MongoClientOptions.builder().build().asMongoClientSettings(null, 'test3.test.build.10gen.cc:27018', MULTIPLE, null) + + then: + thrown(IllegalArgumentException) + + when: + MongoClientOptions.builder().build().asMongoClientSettings(null, 'test3.test.build.10gen.cc:27017', + MULTIPLE, null) + + then: + thrown(IllegalArgumentException) + + when: + def settings = MongoClientOptions.builder().build().asMongoClientSettings(null, 'test3.test.build.10gen.cc', + MULTIPLE, null) + + then: + settings.clusterSettings == ClusterSettings.builder().srvHost('test3.test.build.10gen.cc').build() + when: + def options = MongoClientOptions.builder() + .srvServiceName('test') + .srvMaxHosts(4) + .build() + settings = options.asMongoClientSettings(null, 'test3.test.build.10gen.cc', + MULTIPLE, null) + + then: + settings.clusterSettings == ClusterSettings.builder().srvHost('test3.test.build.10gen.cc') + .srvServiceName('test') + .srvMaxHosts(4) + .build() + options.getSrvServiceName() == 'test' + options.getSrvMaxHosts() == 4 + } + + def 'should be easy to create new options from existing'() { + when: + def options = MongoClientOptions.builder() + .applicationName('appName') + .readPreference(ReadPreference.secondary()) + .retryReads(true) + .uuidRepresentation(UuidRepresentation.STANDARD) + .writeConcern(WriteConcern.JOURNALED) + .minConnectionsPerHost(30) + .connectionsPerHost(500) + .timeout(10_000) + .connectTimeout(100) + .socketTimeout(700) + .serverSelectionTimeout(150) + .maxWaitTime(200) + .maxConnectionIdleTime(300) + .maxConnectionLifeTime(400) + .maxConnecting(1) + .sslEnabled(true) + .sslInvalidHostNameAllowed(true) + .sslContext(SSLContext.getDefault()) + .dbDecoderFactory(LazyDBDecoder.FACTORY) + .heartbeatFrequency(5) + .minHeartbeatFrequency(11) + .heartbeatConnectTimeout(15) + .heartbeatSocketTimeout(20) + .localThreshold(25) + .requiredReplicaSetName('test') + .cursorFinalizerEnabled(false) + .dbEncoderFactory(new MyDBEncoderFactory()) + .addCommandListener(Mock(CommandListener)) + .addConnectionPoolListener(Mock(ConnectionPoolListener)) + .addClusterListener(Mock(ClusterListener)) + .addServerListener(Mock(ServerListener)) + .addServerMonitorListener(Mock(ServerMonitorListener)) + .compressorList([MongoCompressor.createZlibCompressor()]) + .autoEncryptionSettings(AutoEncryptionSettings.builder() + .keyVaultNamespace('admin.keys') + .kmsProviders(['local': ['key': new byte[64]]]) + .build()) + .serverApi(ServerApi.builder().version(ServerApiVersion.V1).build()) + .build() + + then: + expect options, isTheSameAs(MongoClientOptions.builder(options).build()) + } + + def 'applicationName can be 128 bytes when encoded as UTF-8'() { + given: + def applicationName = 'a' * 126 + '\u00A0' + + when: + def options = MongoClientOptions.builder().applicationName(applicationName).build() + + then: + options.applicationName == applicationName + } + + def 'should throw IllegalArgumentException if applicationName exceeds 128 bytes when encoded as UTF-8'() { + given: + def applicationName = 'a' * 127 + '\u00A0' + + when: + MongoClientOptions.builder().applicationName(applicationName) + + then: + thrown(IllegalArgumentException) + } + + def 'should add command listeners'() { + given: + CommandListener commandListenerOne = Mock(CommandListener) + CommandListener commandListenerTwo = Mock(CommandListener) + CommandListener commandListenerThree = Mock(CommandListener) + + when: + def options = MongoClientOptions.builder() + .build() + + then: + options.commandListeners.size() == 0 + + when: + options = MongoClientOptions.builder() + .addCommandListener(commandListenerOne) + .build() + + then: + options.commandListeners.size() == 1 + options.commandListeners[0].is commandListenerOne + + when: + options = MongoClientOptions.builder() + .addCommandListener(commandListenerOne) + .addCommandListener(commandListenerTwo) + .build() + + then: + options.commandListeners.size() == 2 + options.commandListeners[0].is commandListenerOne + options.commandListeners[1].is commandListenerTwo + + when: + def copiedOptions = MongoClientOptions.builder(options).addCommandListener(commandListenerThree).build() + + then: + copiedOptions.commandListeners.size() == 3 + copiedOptions.commandListeners[0].is commandListenerOne + copiedOptions.commandListeners[1].is commandListenerTwo + copiedOptions.commandListeners[2].is commandListenerThree + options.commandListeners.size() == 2 + options.commandListeners[0].is commandListenerOne + options.commandListeners[1].is commandListenerTwo + } + + def 'should add connection pool listeners'() { + given: + ConnectionPoolListener connectionPoolListenerOne = Mock(ConnectionPoolListener) + ConnectionPoolListener connectionPoolListenerTwo = Mock(ConnectionPoolListener) + ConnectionPoolListener connectionPoolListenerThree = Mock(ConnectionPoolListener) + + when: + def options = MongoClientOptions.builder() + .build() + + then: + options.connectionPoolListeners.size() == 0 + + when: + options = MongoClientOptions.builder() + .addConnectionPoolListener(connectionPoolListenerOne) + .build() + + then: + options.connectionPoolListeners.size() == 1 + options.connectionPoolListeners[0].is connectionPoolListenerOne + + when: + options = MongoClientOptions.builder() + .addConnectionPoolListener(connectionPoolListenerOne) + .addConnectionPoolListener(connectionPoolListenerTwo) + .build() + + then: + options.connectionPoolListeners.size() == 2 + options.connectionPoolListeners[0].is connectionPoolListenerOne + options.connectionPoolListeners[1].is connectionPoolListenerTwo + + when: + def copiedOptions = MongoClientOptions.builder(options).addConnectionPoolListener(connectionPoolListenerThree).build() + + then: + copiedOptions.connectionPoolListeners.size() == 3 + copiedOptions.connectionPoolListeners[0].is connectionPoolListenerOne + copiedOptions.connectionPoolListeners[1].is connectionPoolListenerTwo + copiedOptions.connectionPoolListeners[2].is connectionPoolListenerThree + options.connectionPoolListeners.size() == 2 + options.connectionPoolListeners[0].is connectionPoolListenerOne + options.connectionPoolListeners[1].is connectionPoolListenerTwo + } + + def 'should add cluster listeners'() { + given: + ClusterListener clusterListenerOne = Mock(ClusterListener) + ClusterListener clusterListenerTwo = Mock(ClusterListener) + ClusterListener clusterListenerThree = Mock(ClusterListener) + + when: + def options = MongoClientOptions.builder() + .build() + + then: + options.clusterListeners.size() == 0 + + when: + options = MongoClientOptions.builder() + .addClusterListener(clusterListenerOne) + .build() + + then: + options.clusterListeners.size() == 1 + options.clusterListeners[0].is clusterListenerOne + + when: + options = MongoClientOptions.builder() + .addClusterListener(clusterListenerOne) + .addClusterListener(clusterListenerTwo) + .build() + + then: + options.clusterListeners.size() == 2 + options.clusterListeners[0].is clusterListenerOne + options.clusterListeners[1].is clusterListenerTwo + + when: + def copiedOptions = MongoClientOptions.builder(options).addClusterListener(clusterListenerThree).build() + + then: + copiedOptions.clusterListeners.size() == 3 + copiedOptions.clusterListeners[0].is clusterListenerOne + copiedOptions.clusterListeners[1].is clusterListenerTwo + copiedOptions.clusterListeners[2].is clusterListenerThree + options.clusterListeners.size() == 2 + options.clusterListeners[0].is clusterListenerOne + options.clusterListeners[1].is clusterListenerTwo + } + + def 'should add server listeners'() { + given: + ServerListener serverListenerOne = Mock(ServerListener) + ServerListener serverListenerTwo = Mock(ServerListener) + ServerListener serverListenerThree = Mock(ServerListener) + + when: + def options = MongoClientOptions.builder() + .build() + + then: + options.serverListeners.size() == 0 + + when: + options = MongoClientOptions.builder() + .addServerListener(serverListenerOne) + .build() + + then: + options.serverListeners.size() == 1 + options.serverListeners[0].is serverListenerOne + + when: + options = MongoClientOptions.builder() + .addServerListener(serverListenerOne) + .addServerListener(serverListenerTwo) + .build() + + then: + options.serverListeners.size() == 2 + options.serverListeners[0].is serverListenerOne + options.serverListeners[1].is serverListenerTwo + + when: + def copiedOptions = MongoClientOptions.builder(options).addServerListener(serverListenerThree).build() + + then: + copiedOptions.serverListeners.size() == 3 + copiedOptions.serverListeners[0].is serverListenerOne + copiedOptions.serverListeners[1].is serverListenerTwo + copiedOptions.serverListeners[2].is serverListenerThree + options.serverListeners.size() == 2 + options.serverListeners[0].is serverListenerOne + options.serverListeners[1].is serverListenerTwo + } + + def 'should add server monitor listeners'() { + given: + ServerMonitorListener serverMonitorListenerOne = Mock(ServerMonitorListener) + ServerMonitorListener serverMonitorListenerTwo = Mock(ServerMonitorListener) + ServerMonitorListener serverMonitorListenerThree = Mock(ServerMonitorListener) + + when: + def options = MongoClientOptions.builder() + .build() + + then: + options.serverMonitorListeners.size() == 0 + + when: + options = MongoClientOptions.builder() + .addServerMonitorListener(serverMonitorListenerOne) + .build() + + then: + options.serverMonitorListeners.size() == 1 + options.serverMonitorListeners[0].is serverMonitorListenerOne + + when: + options = MongoClientOptions.builder() + .addServerMonitorListener(serverMonitorListenerOne) + .addServerMonitorListener(serverMonitorListenerTwo) + .build() + + then: + options.serverMonitorListeners.size() == 2 + options.serverMonitorListeners[0].is serverMonitorListenerOne + options.serverMonitorListeners[1].is serverMonitorListenerTwo + + when: + def copiedOptions = MongoClientOptions.builder(options).addServerMonitorListener(serverMonitorListenerThree).build() + + then: + copiedOptions.serverMonitorListeners.size() == 3 + copiedOptions.serverMonitorListeners[0].is serverMonitorListenerOne + copiedOptions.serverMonitorListeners[1].is serverMonitorListenerTwo + copiedOptions.serverMonitorListeners[2].is serverMonitorListenerThree + options.serverMonitorListeners.size() == 2 + options.serverMonitorListeners[0].is serverMonitorListenerOne + options.serverMonitorListeners[1].is serverMonitorListenerTwo + } + + def 'builder should copy all values from the existing MongoClientOptions'() { + given: + def options = MongoClientOptions.builder() + .applicationName('appName') + .readPreference(ReadPreference.secondary()) + .writeConcern(WriteConcern.JOURNALED) + .retryWrites(true) + .retryReads(true) + .uuidRepresentation(UuidRepresentation.STANDARD) + .minConnectionsPerHost(30) + .connectionsPerHost(500) + .timeout(10_000) + .connectTimeout(100) + .socketTimeout(700) + .serverSelectionTimeout(150) + .maxWaitTime(200) + .maxConnectionIdleTime(300) + .maxConnectionLifeTime(400) + .maxConnecting(1) + .sslEnabled(true) + .sslInvalidHostNameAllowed(true) + .sslContext(SSLContext.getDefault()) + .dbDecoderFactory(LazyDBDecoder.FACTORY) + .heartbeatFrequency(5) + .minHeartbeatFrequency(11) + .heartbeatConnectTimeout(15) + .heartbeatSocketTimeout(20) + .localThreshold(25) + .requiredReplicaSetName('test') + .cursorFinalizerEnabled(false) + .dbEncoderFactory(new MyDBEncoderFactory()) + .addCommandListener(Mock(CommandListener)) + .addClusterListener(Mock(ClusterListener)) + .addConnectionPoolListener(Mock(ConnectionPoolListener)) + .addServerListener(Mock(ServerListener)) + .addServerMonitorListener(Mock(ServerMonitorListener)) + .compressorList([MongoCompressor.createZlibCompressor()]) + .autoEncryptionSettings(null) + .build() + + when: + def copy = MongoClientOptions.builder(options).build() + + then: + copy == options + } + + def 'should allow 0 (infinite) connectionsPerHost'() { + expect: + MongoClientOptions.builder().connectionsPerHost(0).build().getConnectionsPerHost() == 0 + } + + private static class MyDBEncoderFactory implements DBEncoderFactory { + @Override + DBEncoder create() { + new DefaultDBEncoder() + } + } +} diff --git a/driver-legacy/src/test/unit/com/mongodb/MongoClientSpecification.groovy b/driver-legacy/src/test/unit/com/mongodb/MongoClientSpecification.groovy new file mode 100644 index 00000000000..1389a41c760 --- /dev/null +++ b/driver-legacy/src/test/unit/com/mongodb/MongoClientSpecification.groovy @@ -0,0 +1,368 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb + +import com.mongodb.client.internal.MongoClientImpl +import com.mongodb.client.internal.MongoDatabaseImpl +import com.mongodb.client.internal.TestOperationExecutor +import com.mongodb.client.model.geojson.MultiPolygon +import com.mongodb.connection.ClusterSettings +import com.mongodb.internal.connection.ClientMetadata +import com.mongodb.internal.connection.Cluster +import org.bson.BsonDocument +import org.bson.Document +import org.bson.codecs.UuidCodec +import org.bson.codecs.ValueCodecProvider +import org.bson.codecs.configuration.CodecRegistry +import org.bson.json.JsonObject +import spock.lang.Specification + +import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS +import static com.mongodb.CustomMatchers.isTheSameAs +import static com.mongodb.MongoClientSettings.getDefaultCodecRegistry +import static com.mongodb.MongoCredential.createMongoX509Credential +import static com.mongodb.ReadPreference.secondary +import static com.mongodb.connection.ClusterConnectionMode.MULTIPLE +import static com.mongodb.connection.ClusterConnectionMode.SINGLE +import static java.util.concurrent.TimeUnit.MILLISECONDS +import static org.bson.UuidRepresentation.C_SHARP_LEGACY +import static org.bson.UuidRepresentation.STANDARD +import static org.bson.codecs.configuration.CodecRegistries.fromProviders +import static spock.util.matcher.HamcrestSupport.expect + +class MongoClientSpecification extends Specification { + + private static CodecRegistry codecRegistry = fromProviders(new ValueCodecProvider()) + + def 'default codec registry should contain all supported providers'() { + given: + def codecRegistry = getDefaultCodecRegistry() + + expect: + codecRegistry.get(BsonDocument) + codecRegistry.get(BasicDBObject) + codecRegistry.get(Document) + codecRegistry.get(Integer) + codecRegistry.get(MultiPolygon) + codecRegistry.get(Collection) + codecRegistry.get(Iterable) + codecRegistry.get(JsonObject) + } + + def 'should construct with correct settings'() { + expect: + client.delegate.settings.clusterSettings == clusterSettings + client.credential == credential + + cleanup: + client?.close() + + where: + client | clusterSettings | credential + new MongoClient() | + ClusterSettings.builder().build() | + null + new MongoClient('host:27018') | + ClusterSettings.builder().hosts([new ServerAddress('host:27018')]).mode(SINGLE).build() | + null + new MongoClient('host', 27018) | + ClusterSettings.builder().hosts([new ServerAddress('host:27018')]).mode(SINGLE).build() | + null + new MongoClient('mongodb://host:27018') | + ClusterSettings.builder().hosts([new ServerAddress('host:27018')]).mode(SINGLE).build() | + null + new MongoClient('mongodb://user:pwd@host:27018') | + ClusterSettings.builder().hosts([new ServerAddress('host:27018')]).mode(SINGLE).build() | + MongoCredential.createCredential('user', 'admin', 'pwd'.toCharArray()) + new MongoClient('mongodb+srv://test3.test.build.10gen.cc') | + ClusterSettings.builder().srvHost('test3.test.build.10gen.cc').mode(MULTIPLE).build() | + null + new MongoClient('mongodb+srv://user:pwd@test3.test.build.10gen.cc') | + ClusterSettings.builder().srvHost('test3.test.build.10gen.cc').mode(MULTIPLE).build() | + MongoCredential.createCredential('user', 'admin', 'pwd'.toCharArray()) + new MongoClient(new ConnectionString('mongodb://host:27018')) | + ClusterSettings.builder().hosts([new ServerAddress('host:27018')]).mode(SINGLE).build() | + null + new MongoClient(new ConnectionString('mongodb://user:pwd@host:27018')) | + ClusterSettings.builder().hosts([new ServerAddress('host:27018')]).mode(SINGLE).build() | + MongoCredential.createCredential('user', 'admin', 'pwd'.toCharArray()) + new MongoClient(new ConnectionString('mongodb+srv://test3.test.build.10gen.cc')) | + ClusterSettings.builder().srvHost('test3.test.build.10gen.cc').mode(MULTIPLE).build() | + null + new MongoClient(MongoClientSettings.builder() + .applyConnectionString(new ConnectionString('mongodb://host:27018')).build()) | + ClusterSettings.builder().hosts([new ServerAddress('host:27018')]).mode(SINGLE).build() | + null + new MongoClient(new MongoClientURI('mongodb://host:27018')) | + ClusterSettings.builder().hosts([new ServerAddress('host:27018')]).mode(SINGLE).build() | + null + new MongoClient(new MongoClientURI('mongodb://host:27018/?replicaSet=rs0')) | + ClusterSettings.builder().hosts([new ServerAddress('host:27018')]).mode(MULTIPLE).requiredReplicaSetName('rs0') + .build() | + null + new MongoClient(new MongoClientURI('mongodb://user:pwd@host:27018')) | + ClusterSettings.builder().hosts([new ServerAddress('host:27018')]).mode(SINGLE).build() | + MongoCredential.createCredential('user', 'admin', 'pwd'.toCharArray()) + new MongoClient(new MongoClientURI('mongodb://host1:27018,host2:27018')) | + ClusterSettings.builder().hosts([new ServerAddress('host1:27018'), new ServerAddress('host2:27018')]) + .mode(MULTIPLE).build() | + null + new MongoClient(new MongoClientURI('mongodb+srv://test3.test.build.10gen.cc')) | + ClusterSettings.builder().srvHost('test3.test.build.10gen.cc').mode(MULTIPLE).build() | + null + new MongoClient('host:27018', MongoClientOptions.builder().serverSelectionTimeout(5).build()) | + ClusterSettings.builder().hosts([new ServerAddress('host:27018')]).mode(SINGLE) + .serverSelectionTimeout(5, MILLISECONDS).build() | + null + new MongoClient(new ServerAddress('host:27018')) | + ClusterSettings.builder().hosts([new ServerAddress('host:27018')]).mode(SINGLE).build() | + null + new MongoClient(new ServerAddress('host:27018'), MongoClientOptions.builder().serverSelectionTimeout(5).build()) | + ClusterSettings.builder().hosts([new ServerAddress('host:27018')]).mode(SINGLE) + .serverSelectionTimeout(5, MILLISECONDS).build() | + null + new MongoClient(new ServerAddress('host:27018'), createMongoX509Credential(), + MongoClientOptions.builder().serverSelectionTimeout(5).build()) | + ClusterSettings.builder().hosts([new ServerAddress('host:27018')]).mode(SINGLE) + .serverSelectionTimeout(5, MILLISECONDS).build() | + createMongoX509Credential() + new MongoClient([new ServerAddress('host:27018')]) | + ClusterSettings.builder().hosts([new ServerAddress('host:27018')]).mode(MULTIPLE).build() | + null + new MongoClient([new ServerAddress('host:27018')], MongoClientOptions.builder().serverSelectionTimeout(5).build()) | + ClusterSettings.builder().hosts([new ServerAddress('host:27018')]).mode(MULTIPLE) + .serverSelectionTimeout(5, MILLISECONDS).build() | + null + new MongoClient([new ServerAddress('host:27018')], createMongoX509Credential(), + MongoClientOptions.builder().serverSelectionTimeout(5).build()) | + ClusterSettings.builder().hosts([new ServerAddress('host:27018')]).mode(MULTIPLE) + .serverSelectionTimeout(5, MILLISECONDS).build() | + createMongoX509Credential() + } + + def 'should wrap MongoDBDriverInformation with legacy information'() { + expect: + client.delegate.mongoDriverInformation.driverNames == mongoDriverInformation.driverNames + client.delegate.mongoDriverInformation.driverPlatforms == mongoDriverInformation.driverPlatforms + client.delegate.mongoDriverInformation.driverVersions == mongoDriverInformation.driverVersions + + cleanup: + client?.close() + + where: + client | mongoDriverInformation + new MongoClient() | + MongoDriverInformation.builder().driverName('legacy').build() + new MongoClient('host:27018') | + MongoDriverInformation.builder().driverName('legacy').build() + new MongoClient('host', 27018) | + MongoDriverInformation.builder().driverName('legacy').build() + new MongoClient('mongodb://host:27018') | + MongoDriverInformation.builder().driverName('legacy').build() + new MongoClient(new ConnectionString('mongodb://host:27018')) | + MongoDriverInformation.builder().driverName('legacy').build() + new MongoClient(new ConnectionString('mongodb://host:27018'), + MongoDriverInformation.builder().driverName('test').driverPlatform('osx').driverVersion('1.0').build()) | + MongoDriverInformation.builder( + MongoDriverInformation.builder().driverName('test').driverPlatform('osx').driverVersion('1.0').build()) + .driverName('legacy').build() + new MongoClient(MongoClientSettings.builder() + .applyConnectionString(new ConnectionString('mongodb://host:27018')).build()) | + MongoDriverInformation.builder().driverName('legacy').build() + new MongoClient(MongoClientSettings.builder() + .applyConnectionString(new ConnectionString('mongodb://host:27018')).build(), + MongoDriverInformation.builder().driverName('test').driverPlatform('osx').driverVersion('1.0').build()) | + MongoDriverInformation.builder( + MongoDriverInformation.builder().driverName('test').driverPlatform('osx').driverVersion('1.0').build()) + .driverName('legacy').build() + new MongoClient(new MongoClientURI('mongodb://host:27018')) | + MongoDriverInformation.builder().driverName('legacy').build() + new MongoClient(new MongoClientURI('mongodb://host:27018'), + MongoDriverInformation.builder().driverName('test').driverPlatform('osx').driverVersion('1.0').build()) | + MongoDriverInformation.builder( + MongoDriverInformation.builder().driverName('test').driverPlatform('osx').driverVersion('1.0').build()) + .driverName('legacy').build() + new MongoClient('host:27018', MongoClientOptions.builder().serverSelectionTimeout(5).build()) | + MongoDriverInformation.builder().driverName('legacy').build() + new MongoClient(new ServerAddress('host:27018')) | + MongoDriverInformation.builder().driverName('legacy').build() + new MongoClient(new ServerAddress('host:27018'), MongoClientOptions.builder().serverSelectionTimeout(5).build()) | + MongoDriverInformation.builder().driverName('legacy').build() + new MongoClient(new ServerAddress('host:27018'), createMongoX509Credential(), + MongoClientOptions.builder().serverSelectionTimeout(5).build()) | + MongoDriverInformation.builder().driverName('legacy').build() + new MongoClient(new ServerAddress('host:27018'), createMongoX509Credential(), + MongoClientOptions.builder().serverSelectionTimeout(5).build(), + MongoDriverInformation.builder().driverName('test').driverPlatform('osx').driverVersion('1.0').build()) | + MongoDriverInformation.builder( + MongoDriverInformation.builder().driverName('test').driverPlatform('osx').driverVersion('1.0').build()) + .driverName('legacy').build() + new MongoClient([new ServerAddress('host:27018')]) | + MongoDriverInformation.builder().driverName('legacy').build() + new MongoClient([new ServerAddress('host:27018')], MongoClientOptions.builder().serverSelectionTimeout(5).build()) | + MongoDriverInformation.builder().driverName('legacy').build() + new MongoClient([new ServerAddress('host:27018')], MongoClientOptions.builder().serverSelectionTimeout(5).build()) | + MongoDriverInformation.builder().driverName('legacy').build() + new MongoClient([new ServerAddress('host:27018')], MongoClientOptions.builder().serverSelectionTimeout(5).build()) | + MongoDriverInformation.builder().driverName('legacy').build() + new MongoClient([new ServerAddress('host:27018')], createMongoX509Credential(), + MongoClientOptions.builder().serverSelectionTimeout(5).build()) | + MongoDriverInformation.builder().driverName('legacy').build() + new MongoClient([new ServerAddress('host:27018')], createMongoX509Credential(), + MongoClientOptions.builder().serverSelectionTimeout(5).build(), + MongoDriverInformation.builder().driverName('test').driverPlatform('osx').driverVersion('1.0').build()) | + MongoDriverInformation.builder( + MongoDriverInformation.builder().driverName('test').driverPlatform('osx').driverVersion('1.0').build()) + .driverName('legacy').build() + } + + def 'should preserve original options'() { + given: + def options = MongoClientOptions.builder().cursorFinalizerEnabled(false).build() + + when: + def client = new MongoClient('localhost', options) + + then: + client.mongoClientOptions == options + + cleanup: + client?.close() + } + + def 'should preserve original options from MongoClientURI'() { + given: + def builder = MongoClientOptions.builder().cursorFinalizerEnabled(false) + + when: + def client = new MongoClient(new MongoClientURI('mongodb://localhost', builder)) + + then: + client.mongoClientOptions == builder.build() + + cleanup: + client?.close() + } + + def 'should manage cursor cleaning service if enabled'() { + when: + def client = new MongoClient('localhost', MongoClientOptions.builder().cursorFinalizerEnabled(true).build()) + + then: + client.cursorCleaningService != null + + when: + client.close() + + then: + client.cursorCleaningService.isShutdown() + } + + def 'should not create cursor cleaning service if disabled'() { + when: + def client = new MongoClient('localhost', MongoClientOptions.builder().cursorFinalizerEnabled(false).build()) + + then: + client.cursorCleaningService == null + + cleanup: + client?.close() + } + + def 'should get specified options'() { + when: + def options = MongoClientOptions.builder().cursorFinalizerEnabled(false).build() + def client = new MongoClient('localhost', options) + + then: + client.mongoClientOptions == options + + cleanup: + client?.close() + } + + def 'should get options from specified settings'() { + when: + def settings = MongoClientSettings.builder().writeConcern(WriteConcern.MAJORITY).build() + def client = new MongoClient(settings) + + then: + client.mongoClientOptions == MongoClientOptions.builder(settings).build() + + cleanup: + client?.close() + } + + def 'should validate the ChangeStreamIterable pipeline data correctly'() { + given: + def executor = new TestOperationExecutor([]) + + def clusterStub = Stub(Cluster) + clusterStub.getClientMetadata() >> new ClientMetadata("test", MongoDriverInformation.builder().build()) + + def client = new MongoClientImpl(clusterStub, null, MongoClientSettings.builder().build(), null, executor) + + when: + client.watch((Class) null) + + then: + thrown(IllegalArgumentException) + + when: + client.watch([null]).into([]) + + then: + thrown(IllegalArgumentException) + } + def 'should pass the correct settings to getDatabase'() { + given: + def options = MongoClientOptions.builder() + .readPreference(secondary()) + .writeConcern(WriteConcern.MAJORITY) + .readConcern(ReadConcern.MAJORITY) + .retryWrites(true) + .codecRegistry(codecRegistry) + .uuidRepresentation(STANDARD) + .build() + def client = new MongoClient('localhost', options) + + when: + def database = client.getDatabase('name') + + then: + expect database, isTheSameAs(new MongoDatabaseImpl('name', client.getCodecRegistry(), secondary(), + WriteConcern.MAJORITY, true, true, ReadConcern.MAJORITY, STANDARD, null, + TIMEOUT_SETTINGS.withMaxWaitTimeMS(120_000), client.getOperationExecutor())) + } + + def 'should create registry reflecting UuidRepresentation'() { + given: + def options = MongoClientOptions.builder() + .codecRegistry(codecRegistry) + .uuidRepresentation(C_SHARP_LEGACY) + .build() + + when: + def client = new MongoClient('localhost', options) + + then: + (client.getCodecRegistry().get(UUID) as UuidCodec).getUuidRepresentation() == C_SHARP_LEGACY + + cleanup: + client?.close() + } +} diff --git a/driver-legacy/src/test/unit/com/mongodb/MongoClientURISpecification.groovy b/driver-legacy/src/test/unit/com/mongodb/MongoClientURISpecification.groovy new file mode 100644 index 00000000000..241ac958c8a --- /dev/null +++ b/driver-legacy/src/test/unit/com/mongodb/MongoClientURISpecification.groovy @@ -0,0 +1,449 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb + +import com.mongodb.connection.ClusterConnectionMode +import org.bson.UuidRepresentation +import spock.lang.Specification +import spock.lang.Unroll + +import javax.net.ssl.SSLContext + +import static com.mongodb.MongoCredential.createCredential +import static com.mongodb.MongoCredential.createGSSAPICredential +import static com.mongodb.MongoCredential.createMongoX509Credential +import static com.mongodb.MongoCredential.createPlainCredential +import static com.mongodb.MongoCredential.createScramSha1Credential +import static com.mongodb.ReadPreference.secondaryPreferred +import static java.util.Arrays.asList +import static java.util.concurrent.TimeUnit.MILLISECONDS + +class MongoClientURISpecification extends Specification { + + def 'should not throw an Exception if URI contains an unknown option'() { + when: + new MongoClientURI('mongodb://localhost/?unknownOption=5') + + then: + notThrown(IllegalArgumentException) + } + + @Unroll + def 'should parse #uri into correct components'() { + expect: + uri.getHosts().size() == num + uri.getHosts() == hosts + uri.getDatabase() == database + uri.getCollection() == collection + uri.getUsername() == username + uri.getPassword() == password + + where: + uri | num | hosts | database | collection | username | password + new MongoClientURI('mongodb://db.example.com') | 1 | ['db.example.com'] | null | null | null | null + new MongoClientURI('mongodb://10.0.0.1') | 1 | ['10.0.0.1'] | null | null | null | null + new MongoClientURI('mongodb://[::1]') | 1 | ['[::1]'] | null | null | null | null + new MongoClientURI('mongodb://foo/bar') | 1 | ['foo'] | 'bar' | null | null | null + new MongoClientURI('mongodb://10.0.0.1/bar') | 1 | ['10.0.0.1'] | 'bar' | null | null | null + new MongoClientURI('mongodb://[::1]/bar') | 1 | ['[::1]'] | 'bar' | null | null | null + new MongoClientURI('mongodb://localhost/' + + 'test.my.coll') | 1 | ['localhost'] | 'test' | 'my.coll' | null | null + new MongoClientURI('mongodb://foo/bar.goo') | 1 | ['foo'] | 'bar' | 'goo' | null | null + new MongoClientURI('mongodb://user:pass@' + + 'host/bar') | 1 | ['host'] | 'bar' | null | 'user' | 'pass' as char[] + new MongoClientURI('mongodb://user:pass@' + + 'host:27011/bar') | 1 | ['host:27011'] | 'bar' | null | 'user' | 'pass' as char[] + new MongoClientURI('mongodb://user:pass@' + + '10.0.0.1:27011/bar') | 1 | ['10.0.0.1:27011'] | 'bar' | null | 'user' | 'pass' as char[] + new MongoClientURI('mongodb://user:pass@' + + '[::1]:27011/bar') | 1 | ['[::1]:27011'] | 'bar' | null | 'user' | 'pass' as char[] + new MongoClientURI('mongodb://user:pass@' + + 'host:7,' + + 'host2:8,' + + 'host3:9/bar') | 3 | ['host2:8', + 'host3:9', + 'host:7'] | 'bar' | null | 'user' | 'pass' as char[] + new MongoClientURI('mongodb://user:pass@' + + '10.0.0.1:7,' + + '[::1]:8,' + + 'host3:9/bar') | 3 | ['10.0.0.1:7', + '[::1]:8', + 'host3:9'] | 'bar' | null | 'user' | 'pass' as char[] + } + + def 'should correctly parse different write concerns'() { + expect: + uri.getOptions().getWriteConcern() == writeConcern + + where: + uri | writeConcern + new MongoClientURI('mongodb://localhost') | WriteConcern.ACKNOWLEDGED + new MongoClientURI('mongodb://localhost/?safe=true') | WriteConcern.ACKNOWLEDGED + new MongoClientURI('mongodb://localhost/?safe=false') | WriteConcern.UNACKNOWLEDGED + new MongoClientURI('mongodb://localhost/?wTimeout=5') | WriteConcern.ACKNOWLEDGED + .withWTimeout(5, MILLISECONDS) + new MongoClientURI('mongodb://localhost/?journal=true') | WriteConcern.ACKNOWLEDGED.withJournal(true) + new MongoClientURI('mongodb://localhost/?w=2&wtimeoutMS=5&fsync=true&journal=true') | new WriteConcern(2, 5).withJournal(true) + new MongoClientURI('mongodb://localhost/?w=majority&wtimeoutMS=5&j=true') | new WriteConcern('majority') + .withWTimeout(5, MILLISECONDS).withJournal(true) + } + + @Unroll + def 'should correctly parse legacy wtimeout write concerns'() { + expect: + uri.getOptions().getWriteConcern() == writeConcern + + where: + uri | writeConcern + new MongoClientURI('mongodb://localhost') | WriteConcern.ACKNOWLEDGED + new MongoClientURI('mongodb://localhost/?wTimeout=5') | WriteConcern.ACKNOWLEDGED + .withWTimeout(5, MILLISECONDS) + new MongoClientURI('mongodb://localhost/?w=2&wtimeout=5&j=true') | new WriteConcern(2, 5).withJournal(true) + new MongoClientURI('mongodb://localhost/?w=majority&wtimeout=5&j=true') | new WriteConcern('majority') + .withWTimeout(5, MILLISECONDS).withJournal(true) + new MongoClientURI('mongodb://localhost/?wTimeout=1&wtimeoutMS=5') | WriteConcern.ACKNOWLEDGED + .withWTimeout(5, MILLISECONDS) + } + + def 'should correctly parse URI options for #type'() { + given: + def uri = new MongoClientURI('mongodb://localhost/?minPoolSize=5&maxPoolSize=10&waitQueueTimeoutMS=150&' + + 'maxIdleTimeMS=200&maxLifeTimeMS=300&maxConnecting=1&replicaSet=test&' + + 'connectTimeoutMS=2500&socketTimeoutMS=5500&' + + 'safe=false&w=1&wtimeout=2500&ssl=true&readPreference=secondary&' + + 'sslInvalidHostNameAllowed=true&' + + 'serverSelectionTimeoutMS=25000&' + + 'localThresholdMS=30&' + + 'heartbeatFrequencyMS=20000&' + + 'retryWrites=true&' + + 'retryReads=true&' + + 'uuidRepresentation=csharpLegacy&' + + 'appName=app1&' + + 'timeoutMS=10000') + + when: + def options = uri.getOptions() + + then: + options.getWriteConcern() == new WriteConcern(1, 2500) + options.getReadPreference() == ReadPreference.secondary() + options.getConnectionsPerHost() == 10 + options.getMinConnectionsPerHost() == 5 + options.getMaxWaitTime() == 150 + options.getMaxConnectionIdleTime() == 200 + options.getMaxConnectionLifeTime() == 300 + options.getMaxConnecting() == 1 + options.getTimeout() == 10_000 + options.getSocketTimeout() == 5500 + options.getConnectTimeout() == 2500 + options.getRequiredReplicaSetName() == 'test' + options.isSslEnabled() + options.isSslInvalidHostNameAllowed() + options.getServerSelectionTimeout() == 25000 + options.getLocalThreshold() == 30 + options.getHeartbeatFrequency() == 20000 + options.getRetryWrites() + options.getRetryReads() + options.getUuidRepresentation() == UuidRepresentation.C_SHARP_LEGACY + options.getApplicationName() == 'app1' + } + + def 'should have correct defaults for options'() { + when: + MongoClientOptions options = new MongoClientURI('mongodb://localhost').getOptions() + + then: + options.getConnectionsPerHost() == 100 + options.getMaxConnecting() == 2 + options.getTimeout() == null + options.getMaxWaitTime() == 120000 + options.getConnectTimeout() == 10000 + options.getSocketTimeout() == 0 + options.getReadPreference() == ReadPreference.primary() + options.getRequiredReplicaSetName() == null + !options.isSslEnabled() + options.getRetryWrites() + options.getRetryReads() + options.getUuidRepresentation() == UuidRepresentation.UNSPECIFIED + } + + def 'should apply default uri to options'() { + given: + def optionsBuilder = MongoClientOptions.builder() + .applicationName('appName') + .readPreference(ReadPreference.secondary()) + .retryWrites(true) + .retryReads(true) + .writeConcern(WriteConcern.JOURNALED) + .minConnectionsPerHost(30) + .connectionsPerHost(500) + .timeout(10_000) + .connectTimeout(100) + .socketTimeout(700) + .serverSelectionTimeout(150) + .maxWaitTime(200) + .maxConnectionIdleTime(300) + .maxConnectionLifeTime(400) + .maxConnecting(1) + .sslEnabled(true) + .sslInvalidHostNameAllowed(true) + .sslContext(SSLContext.getDefault()) + .heartbeatFrequency(5) + .minHeartbeatFrequency(11) + .heartbeatConnectTimeout(15) + .heartbeatSocketTimeout(20) + .localThreshold(25) + .requiredReplicaSetName('test') + .compressorList([MongoCompressor.createZlibCompressor()]) + .uuidRepresentation(UuidRepresentation.C_SHARP_LEGACY) + + when: + def options = new MongoClientURI('mongodb://localhost', optionsBuilder).getOptions() + + then: + options.getApplicationName() == 'appName' + options.getReadPreference() == ReadPreference.secondary() + options.getWriteConcern() == WriteConcern.JOURNALED + options.getRetryWrites() + options.getRetryReads() + options.getTimeout() == 10_000 + options.getServerSelectionTimeout() == 150 + options.getMaxWaitTime() == 200 + options.getMaxConnectionIdleTime() == 300 + options.getMaxConnectionLifeTime() == 400 + options.getMaxConnecting() == 1 + options.getMinConnectionsPerHost() == 30 + options.getConnectionsPerHost() == 500 + options.getConnectTimeout() == 100 + options.getSocketTimeout() == 700 + options.isSslEnabled() + options.isSslInvalidHostNameAllowed() + options.getHeartbeatFrequency() == 5 + options.getMinHeartbeatFrequency() == 11 + options.getHeartbeatConnectTimeout() == 15 + options.getHeartbeatSocketTimeout() == 20 + options.getLocalThreshold() == 25 + options.getRequiredReplicaSetName() == 'test' + options.asMongoClientSettings(null, null, ClusterConnectionMode.SINGLE, null) + .getServerSettings().getHeartbeatFrequency(MILLISECONDS) == 5 + options.asMongoClientSettings(null, null, ClusterConnectionMode.SINGLE, null) + .getServerSettings().getMinHeartbeatFrequency(MILLISECONDS) == 11 + options.compressorList == [MongoCompressor.createZlibCompressor()] + options.getUuidRepresentation() == UuidRepresentation.C_SHARP_LEGACY + } + + @Unroll + def 'should support all credential types'() { + expect: + uri.credentials == credentialList + + where: + uri | credentialList + new MongoClientURI('mongodb://jeff:123@localhost') | createCredential('jeff', 'admin', '123'.toCharArray()) + new MongoClientURI('mongodb://jeff:123@localhost/?' + + 'authMechanism=MONGODB-CR') | createCredential('jeff', 'admin', '123'.toCharArray()) + new MongoClientURI('mongodb://jeff:123@localhost/?' + + 'authMechanism=MONGODB-CR' + + '&authSource=test') | createCredential('jeff', 'test', '123'.toCharArray()) + new MongoClientURI('mongodb://jeff:123@localhost/?' + + 'authMechanism=SCRAM-SHA-1') | createScramSha1Credential('jeff', 'admin', '123'.toCharArray()) + new MongoClientURI('mongodb://jeff:123@localhost/?' + + 'authMechanism=SCRAM-SHA-1' + + '&authSource=test') | createScramSha1Credential('jeff', 'test', '123'.toCharArray()) + new MongoClientURI('mongodb://jeff@localhost/?' + + 'authMechanism=GSSAPI') | createGSSAPICredential('jeff') + new MongoClientURI('mongodb://jeff:123@localhost/?' + + 'authMechanism=PLAIN') | createPlainCredential('jeff', '$external', '123'.toCharArray()) + new MongoClientURI('mongodb://jeff@localhost/?' + + 'authMechanism=MONGODB-X509') | createMongoX509Credential('jeff') + new MongoClientURI('mongodb://jeff@localhost/?' + + 'authMechanism=GSSAPI' + + '&gssapiServiceName=foo') | createGSSAPICredential('jeff').withMechanismProperty('SERVICE_NAME', 'foo') + new MongoClientURI('mongodb://jeff:123@localhost/?' + + 'authMechanism=SCRAM-SHA-1') | createScramSha1Credential('jeff', 'admin', '123'.toCharArray()) + } + + @Unroll + def 'should correct parse read preference for #readPreference'() { + expect: + uri.getOptions().getReadPreference() == readPreference + + where: + uri | readPreference + new MongoClientURI('mongodb://localhost/' + + '?readPreference=secondaryPreferred') | secondaryPreferred() + new MongoClientURI('mongodb://localhost/' + + '?readPreference=secondaryPreferred' + + '&readPreferenceTags=dc:ny,rack:1' + + '&readPreferenceTags=dc:ny' + + '&readPreferenceTags=') | secondaryPreferred([new TagSet(asList(new Tag('dc', 'ny'), + new Tag('rack', '1'))), + new TagSet(asList(new Tag('dc', 'ny'))), + new TagSet()]) + } + + def 'should apply SRV parameters'() { + when: + def uri = new MongoClientURI('mongodb+srv://test3.test.build.10gen.cc/?srvMaxHosts=4&srvServiceName=test') + + then: + uri.getSrvMaxHosts() == 4 + uri.getSrvServiceName() == 'test' + + when: + def options = uri.getOptions() + + then: + options.getSrvMaxHosts() == 4 + options.getSrvServiceName() == 'test' + } + + def 'should respect MongoClientOptions builder'() { + given: + def uri = new MongoClientURI('mongodb://localhost/', MongoClientOptions.builder().connectionsPerHost(200)) + + when: + def options = uri.getOptions() + + then: + options.getConnectionsPerHost() == 200 + } + + def 'should override MongoClientOptions builder'() { + given: + def uri = new MongoClientURI('mongodb://localhost/?maxPoolSize=250', MongoClientOptions.builder().connectionsPerHost(200)) + + when: + def options = uri.getOptions() + + then: + options.getConnectionsPerHost() == 250 + } + + def 'should be equal to another MongoClientURI with the same string values'() { + expect: + uri1 == uri2 + uri1.hashCode() == uri2.hashCode() + + where: + uri1 | uri2 + new MongoClientURI('mongodb://user:pass@host1:1/') | new MongoClientURI('mongodb://user:pass@host1:1/') + new MongoClientURI('mongodb://user:pass@host1:1,host2:2,' + + 'host3:3/bar') | new MongoClientURI('mongodb://user:pass@host3:3,host1:1,' + + 'host2:2/bar') + new MongoClientURI('mongodb://localhost/' + + '?readPreference=secondaryPreferred' + + '&readPreferenceTags=dc:ny,rack:1' + + '&readPreferenceTags=dc:ny' + + '&readPreferenceTags=') | new MongoClientURI('mongodb://localhost/' + + '?readPreference=secondaryPreferred' + + '&readPreferenceTags=dc:ny, rack:1' + + '&readPreferenceTags=dc:ny' + + '&readPreferenceTags=') + new MongoClientURI('mongodb://localhost/?readPreference=' + + 'secondaryPreferred') | new MongoClientURI('mongodb://localhost/?readPreference=' + + 'secondaryPreferred') + new MongoClientURI('mongodb://ross:123@localhost/?' + + 'authMechanism=SCRAM-SHA-1') | new MongoClientURI('mongodb://ross:123@localhost/?' + + 'authMechanism=SCRAM-SHA-1') + new MongoClientURI('mongodb://localhost/db.coll' + + '?minPoolSize=5;' + + 'maxPoolSize=10;' + + 'waitQueueTimeoutMS=150;' + + 'maxIdleTimeMS=200;' + + 'maxLifeTimeMS=300;replicaSet=test;' + + 'maxConnecting=1;' + + 'connectTimeoutMS=2500;' + + 'socketTimeoutMS=5500;' + + 'safe=false;w=1;wtimeout=2500;' + + 'fsync=true;readPreference=primary;' + + 'ssl=true') | new MongoClientURI('mongodb://localhost/db.coll?minPoolSize=5;' + + 'maxPoolSize=10;' + + 'waitQueueTimeoutMS=150;' + + 'maxIdleTimeMS=200&maxLifeTimeMS=300;' + + 'maxConnecting=1;' + + '&replicaSet=test;connectTimeoutMS=2500;' + + 'socketTimeoutMS=5500&safe=false&w=1;' + + 'wtimeout=2500;fsync=true' + + '&readPreference=primary;ssl=true') + } + + def 'should be not equal to another MongoClientURI with the different string values'() { + expect: + uri1 != uri2 + uri1.hashCode() != uri2.hashCode() + + where: + uri1 | uri2 + new MongoClientURI('mongodb://user:pass@host1:1/') | new MongoClientURI('mongodb://user:pass@host1:2/') + new MongoClientURI('mongodb://user:pass@host1:1,host2:2,' + + 'host3:3/bar') | new MongoClientURI('mongodb://user:pass@host1:1,host2:2,' + + 'host4:4/bar') + new MongoClientURI('mongodb://localhost/?readPreference=' + + 'secondaryPreferred') | new MongoClientURI('mongodb://localhost/?readPreference=' + + 'secondary') + new MongoClientURI('mongodb://localhost/' + + '?readPreference=secondaryPreferred' + + '&readPreferenceTags=dc:ny,rack:1' + + '&readPreferenceTags=dc:ny' + + '&readPreferenceTags=' + + '&maxConnecting=1') | new MongoClientURI('mongodb://localhost/' + + '?readPreference=secondaryPreferred' + + '&readPreferenceTags=dc:ny' + + '&readPreferenceTags=dc:ny, rack:1' + + '&readPreferenceTags=' + + '&maxConnecting=2') + new MongoClientURI('mongodb://ross:123@localhost/?' + + 'authMechanism=SCRAM-SHA-1') | new MongoClientURI('mongodb://ross:123@localhost/?' + + 'authMechanism=GSSAPI') + } + + def 'should be equal to another MongoClientURI with options'() { + when: + MongoClientURI uri1 = new MongoClientURI('mongodb://user:pass@host1:1,host2:2,host3:3/bar?' + + 'maxPoolSize=10;waitQueueTimeoutMS=150;' + + 'minPoolSize=7;maxIdleTimeMS=1000;maxLifeTimeMS=2000;maxConnecting=1;' + + 'replicaSet=test;' + + 'connectTimeoutMS=2500;socketTimeoutMS=5500;autoConnectRetry=true;' + + 'readPreference=secondaryPreferred;safe=false;w=1;wtimeout=2600') + + MongoClientOptions.Builder builder = MongoClientOptions.builder() + .connectionsPerHost(10) + .maxWaitTime(150) + .minConnectionsPerHost(7) + .maxConnectionIdleTime(1000) + .maxConnectionLifeTime(2000) + .maxConnecting(1) + .requiredReplicaSetName('test') + .connectTimeout(2500) + .socketTimeout(5500) + .readPreference(secondaryPreferred()) + .writeConcern(new WriteConcern(1, 2600)) + + MongoClientOptions options = builder.build() + + then: + uri1.getOptions() == options + + when: + MongoClientURI uri2 = new MongoClientURI('mongodb://user:pass@host3:3,host1:1,host2:2/bar?', builder) + + then: + uri1 == uri2 + uri1.hashCode() == uri2.hashCode() + } +} diff --git a/driver-legacy/src/test/unit/com/mongodb/client/model/DBCollectionCountOptionsSpecification.groovy b/driver-legacy/src/test/unit/com/mongodb/client/model/DBCollectionCountOptionsSpecification.groovy new file mode 100644 index 00000000000..920065e2439 --- /dev/null +++ b/driver-legacy/src/test/unit/com/mongodb/client/model/DBCollectionCountOptionsSpecification.groovy @@ -0,0 +1,73 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model + +import com.mongodb.BasicDBObject +import com.mongodb.ReadConcern +import com.mongodb.ReadPreference +import spock.lang.Specification + +import java.util.concurrent.TimeUnit + +class DBCollectionCountOptionsSpecification extends Specification { + + def 'should have the expected default values'() { + when: + def options = new DBCollectionCountOptions() + + then: + options.getCollation() == null + options.getHint() == null + options.getHintString() == null + options.getLimit() == 0 + options.getMaxTime(TimeUnit.MILLISECONDS) == 0 + options.getReadConcern() == null + options.getReadPreference() == null + options.getSkip() == 0 + } + + def 'should set and return the expected values'() { + given: + def collation = Collation.builder().locale('en').build() + def readConcern = ReadConcern.LOCAL + def readPreference = ReadPreference.nearest() + def hint = BasicDBObject.parse('{a: 1}') + def hintString = 'a_1' + + when: + def options = new DBCollectionCountOptions() + .collation(collation) + .hint(hint) + .hintString(hintString) + .limit(1) + .maxTime(1, TimeUnit.MILLISECONDS) + .readConcern(readConcern) + .readPreference(readPreference) + .skip(1) + + then: + options.getCollation() == collation + options.getHint() == hint + options.getHintString() == hintString + options.getLimit() == 1 + options.getMaxTime(TimeUnit.MILLISECONDS) == 1 + options.getReadConcern() == readConcern + options.getReadPreference() == readPreference + options.getSkip() == 1 + } + +} diff --git a/driver-legacy/src/test/unit/com/mongodb/client/model/DBCollectionDistinctOptionsSpecification.groovy b/driver-legacy/src/test/unit/com/mongodb/client/model/DBCollectionDistinctOptionsSpecification.groovy new file mode 100644 index 00000000000..c25c2c44875 --- /dev/null +++ b/driver-legacy/src/test/unit/com/mongodb/client/model/DBCollectionDistinctOptionsSpecification.groovy @@ -0,0 +1,58 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model + +import com.mongodb.BasicDBObject +import com.mongodb.ReadConcern +import com.mongodb.ReadPreference +import spock.lang.Specification + +class DBCollectionDistinctOptionsSpecification extends Specification { + + def 'should have the expected default values'() { + when: + def options = new DBCollectionDistinctOptions() + + then: + options.getCollation() == null + options.getFilter() == null + options.getReadConcern() == null + options.getReadPreference() == null + } + + def 'should set and return the expected values'() { + given: + def collation = Collation.builder().locale('en').build() + def readConcern = ReadConcern.LOCAL + def readPreference = ReadPreference.nearest() + def filter = BasicDBObject.parse('{a: 1}') + + when: + def options = new DBCollectionDistinctOptions() + .collation(collation) + .filter(filter) + .readConcern(readConcern) + .readPreference(readPreference) + + then: + options.getCollation() == collation + options.getFilter() == filter + options.getReadConcern() == readConcern + options.getReadPreference() == readPreference + } + +} diff --git a/driver-legacy/src/test/unit/com/mongodb/client/model/DBCollectionFindAndModifyOptionsSpecification.groovy b/driver-legacy/src/test/unit/com/mongodb/client/model/DBCollectionFindAndModifyOptionsSpecification.groovy new file mode 100644 index 00000000000..0dc42c85815 --- /dev/null +++ b/driver-legacy/src/test/unit/com/mongodb/client/model/DBCollectionFindAndModifyOptionsSpecification.groovy @@ -0,0 +1,82 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model + +import com.mongodb.BasicDBObject +import com.mongodb.WriteConcern +import spock.lang.Specification + +import java.util.concurrent.TimeUnit + +class DBCollectionFindAndModifyOptionsSpecification extends Specification { + + def 'should have the expected default values'() { + when: + def options = new DBCollectionFindAndModifyOptions() + + then: + !options.isRemove() + !options.isUpsert() + !options.returnNew() + options.getBypassDocumentValidation() == null + options.getCollation() == null + options.getArrayFilters() == null + options.getMaxTime(TimeUnit.MILLISECONDS) == 0 + options.getProjection() == null + options.getSort() == null + options.getUpdate() == null + options.getWriteConcern() == null + } + + def 'should set and return the expected values'() { + given: + def collation = Collation.builder().locale('en').build() + def writeConcern = WriteConcern.MAJORITY + def projection = BasicDBObject.parse('{a: 1, _id: 0}') + def sort = BasicDBObject.parse('{a: 1}') + def update = BasicDBObject.parse('{$set: {a: 2}}') + def arrayFilters = [new BasicDBObject('i.b', 1)] + + when: + def options = new DBCollectionFindAndModifyOptions() + .bypassDocumentValidation(true) + .collation(collation) + .maxTime(1, TimeUnit.MILLISECONDS) + .projection(projection) + .remove(true) + .returnNew(true) + .sort(sort) + .update(update) + .upsert(true) + .arrayFilters(arrayFilters) + .writeConcern(writeConcern) + + then: + options.getBypassDocumentValidation() + options.getCollation() == collation + options.getMaxTime(TimeUnit.MILLISECONDS) == 1 + options.getProjection() == projection + options.getSort() == sort + options.getUpdate() == update + options.getWriteConcern() == writeConcern + options.isRemove() + options.isUpsert() + options.returnNew() + options.arrayFilters(arrayFilters) + } + +} diff --git a/driver-legacy/src/test/unit/com/mongodb/client/model/DBCollectionFindOptionsSpecification.groovy b/driver-legacy/src/test/unit/com/mongodb/client/model/DBCollectionFindOptionsSpecification.groovy new file mode 100644 index 00000000000..71127c6e9e3 --- /dev/null +++ b/driver-legacy/src/test/unit/com/mongodb/client/model/DBCollectionFindOptionsSpecification.groovy @@ -0,0 +1,180 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model + +import com.mongodb.BasicDBObject +import com.mongodb.CursorType +import com.mongodb.ReadConcern +import com.mongodb.ReadPreference +import spock.lang.Specification + +import java.util.concurrent.TimeUnit + +class DBCollectionFindOptionsSpecification extends Specification { + + def 'should have the expected default values'() { + when: + def options = new DBCollectionFindOptions() + + then: + !options.isNoCursorTimeout() + !options.isPartial() + options.getBatchSize() == 0 + options.getCollation() == null + options.getCursorType() == CursorType.NonTailable + options.getLimit() == 0 + options.getMaxAwaitTime(TimeUnit.MILLISECONDS) == 0 + options.getMaxTime(TimeUnit.MILLISECONDS) == 0 + options.getProjection() == null + options.getReadConcern() == null + options.getReadPreference() == null + options.getSkip() == 0 + options.getSort() == null + options.getComment() == null + options.getHint() == null + options.getHintString() == null + options.getMax() == null + options.getMin() == null + !options.isReturnKey() + !options.isShowRecordId() + } + + def 'should set and return the expected values'() { + given: + def collation = Collation.builder().locale('en').build() + def projection = BasicDBObject.parse('{a: 1, _id: 0}') + def sort = BasicDBObject.parse('{a: 1}') + def cursorType = CursorType.TailableAwait + def readConcern = ReadConcern.LOCAL + def readPreference = ReadPreference.nearest() + def comment = 'comment' + def hint = BasicDBObject.parse('{x : 1}') + def hintString = 'a_1' + def min = BasicDBObject.parse('{y : 1}') + def max = BasicDBObject.parse('{y : 100}') + + when: + def options = new DBCollectionFindOptions() + .batchSize(1) + .collation(collation) + .cursorType(cursorType) + .limit(1) + .maxAwaitTime(1, TimeUnit.MILLISECONDS) + .maxTime(1, TimeUnit.MILLISECONDS) + .noCursorTimeout(true) + .partial(true) + .projection(projection) + .readConcern(readConcern) + .readPreference(readPreference) + .skip(1) + .sort(sort) + .comment(comment) + .hint(hint) + .hintString(hintString) + .max(max) + .min(min) + .returnKey(true) + .showRecordId(true) + + then: + options.getBatchSize() == 1 + options.getCollation() == collation + options.getCursorType() == cursorType + options.getLimit() == 1 + options.getMaxAwaitTime(TimeUnit.MILLISECONDS) == 1 + options.getMaxTime(TimeUnit.MILLISECONDS) == 1 + options.getProjection() == projection + options.getReadConcern() == readConcern + options.getReadPreference() == readPreference + options.getSkip() == 1 + options.getSort() == sort + options.isNoCursorTimeout() + options.isPartial() + options.getComment() == comment + options.getHint() == hint + options.getHintString() == hintString + options.getMax() == max + options.getMin() == min + options.isReturnKey() + options.isShowRecordId() + } + + def 'it should copy and return the expected values'() { + given: + def collation = Collation.builder().locale('en').build() + def projection = BasicDBObject.parse('{a: 1, _id: 0}') + def sort = BasicDBObject.parse('{a: 1}') + def cursorType = CursorType.TailableAwait + def readConcern = ReadConcern.LOCAL + def readPreference = ReadPreference.nearest() + def comment = 'comment' + def hint = BasicDBObject.parse('{x : 1}') + def hintString = 'a_1' + def min = BasicDBObject.parse('{y : 1}') + def max = BasicDBObject.parse('{y : 100}') + + when: + def original = new DBCollectionFindOptions() + .batchSize(1) + .collation(collation) + .cursorType(cursorType) + .limit(1) + .maxAwaitTime(1, TimeUnit.MILLISECONDS) + .maxTime(1, TimeUnit.MILLISECONDS) + .noCursorTimeout(true) + .partial(true) + .projection(projection) + .readConcern(readConcern) + .readPreference(readPreference) + .skip(1) + .sort(sort) + .comment(comment) + .hint(hint) + .hintString(hintString) + .max(max) + .min(min) + .returnKey(true) + .showRecordId(true) + + def options = original.copy() + + then: + original != options + + options.getBatchSize() == 1 + options.getCollation() == collation + options.getCursorType() == cursorType + options.getLimit() == 1 + options.getMaxAwaitTime(TimeUnit.MILLISECONDS) == 1 + options.getMaxTime(TimeUnit.MILLISECONDS) == 1 + options.getProjection() == projection + options.getReadConcern() == readConcern + options.getReadPreference() == readPreference + options.getSkip() == 1 + options.getSort() == sort + options.isNoCursorTimeout() + options.isPartial() + options.getComment() == comment + options.getHint() == hint + options.getHintString() == hintString + options.getMax() == max + options.getMin() == min + options.isReturnKey() + options.isShowRecordId() + } + +} diff --git a/driver-legacy/src/test/unit/com/mongodb/client/model/DBCollectionRemoveOptionsSpecification.groovy b/driver-legacy/src/test/unit/com/mongodb/client/model/DBCollectionRemoveOptionsSpecification.groovy new file mode 100644 index 00000000000..32c3300853c --- /dev/null +++ b/driver-legacy/src/test/unit/com/mongodb/client/model/DBCollectionRemoveOptionsSpecification.groovy @@ -0,0 +1,53 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model + +import com.mongodb.DefaultDBEncoder +import com.mongodb.WriteConcern +import spock.lang.Specification + +class DBCollectionRemoveOptionsSpecification extends Specification { + + def 'should have the expected default values'() { + when: + def options = new DBCollectionRemoveOptions() + + then: + options.getCollation() == null + options.getEncoder() == null + options.getWriteConcern() == null + } + + def 'should set and return the expected values'() { + given: + def collation = Collation.builder().locale('en').build() + def writeConcern = WriteConcern.MAJORITY + def encoder = new DefaultDBEncoder() + + when: + def options = new DBCollectionRemoveOptions() + .collation(collation) + .encoder(encoder) + .writeConcern(writeConcern) + + then: + options.getCollation() == collation + options.getEncoder() == encoder + options.getWriteConcern() == writeConcern + } + +} diff --git a/driver-legacy/src/test/unit/com/mongodb/client/model/DBCollectionUpdateOptionsSpecification.groovy b/driver-legacy/src/test/unit/com/mongodb/client/model/DBCollectionUpdateOptionsSpecification.groovy new file mode 100644 index 00000000000..a755a533740 --- /dev/null +++ b/driver-legacy/src/test/unit/com/mongodb/client/model/DBCollectionUpdateOptionsSpecification.groovy @@ -0,0 +1,63 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model + +import com.mongodb.BasicDBObject +import com.mongodb.DefaultDBEncoder +import com.mongodb.WriteConcern +import spock.lang.Specification + +class DBCollectionUpdateOptionsSpecification extends Specification { + + def 'should have the expected default values'() { + when: + def options = new DBCollectionUpdateOptions() + + then: + !options.isMulti() + !options.isUpsert() + options.getArrayFilters() == null + options.getBypassDocumentValidation() == null + options.getEncoder() == null + options.getWriteConcern() == null + } + + def 'should set and return the expected values'() { + given: + def writeConcern = WriteConcern.MAJORITY + def encoder = new DefaultDBEncoder() + def arrayFilters = [new BasicDBObject('i.b', 1)] + + when: + def options = new DBCollectionUpdateOptions() + .bypassDocumentValidation(true) + .encoder(encoder) + .multi(true) + .upsert(true) + .arrayFilters(arrayFilters) + .writeConcern(writeConcern) + + then: + options.getBypassDocumentValidation() + options.getEncoder() == encoder + options.getWriteConcern() == writeConcern + options.isMulti() + options.isUpsert() + options.getArrayFilters() == arrayFilters + } + +} diff --git a/driver-reactive-streams/build.gradle.kts b/driver-reactive-streams/build.gradle.kts new file mode 100644 index 00000000000..f1c758b31da --- /dev/null +++ b/driver-reactive-streams/build.gradle.kts @@ -0,0 +1,79 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +import ProjectExtensions.configureJarManifest +import ProjectExtensions.configureMavenPublication + +plugins { + id("project.java") + id("conventions.test-artifacts") + id("conventions.test-artifacts-runtime-dependencies") + id("conventions.test-include-optionals") + id("conventions.testing-mockito") + id("conventions.testing-junit") + id("conventions.testing-spock-exclude-slow") +} + +base.archivesName.set("mongodb-driver-reactivestreams") + +dependencies { + api(project(path = ":bson", configuration = "default")) + api(project(path = ":driver-core", configuration = "default")) + api(libs.reactive.streams) + implementation(platform(libs.project.reactor.bom)) + implementation(libs.project.reactor.core) + compileOnly(project(path = ":mongodb-crypt", configuration = "default")) + + testImplementation(libs.project.reactor.test) + testImplementation(project(path = ":driver-sync", configuration = "default")) + testImplementation(project(path = ":bson", configuration = "testArtifacts")) + testImplementation(project(path = ":driver-core", configuration = "testArtifacts")) + testImplementation(project(path = ":driver-sync", configuration = "testArtifacts")) + + // Reactive Streams TCK testing + testImplementation(libs.reactive.streams.tck) +} + +configureMavenPublication { + pom { + name.set("The MongoDB Reactive Streams Driver") + description.set("A Reactive Streams implementation of the MongoDB Java driver") + } +} + +configureJarManifest { + attributes["Automatic-Module-Name"] = "org.mongodb.driver.reactivestreams" + attributes["Bundle-SymbolicName"] = "org.mongodb.driver-reactivestreams" + attributes["Import-Package"] = + listOf( + "com.mongodb.crypt.capi.*;resolution:=optional", + "com.mongodb.internal.crypt.capi.*;resolution:=optional", + "*" // import all that is not excluded or modified before + ) + .joinToString(",") +} + +sourceSets { test { java { setSrcDirs(listOf("src/test/tck")) } } } + +// Reactive Streams TCK uses TestNG +tasks.register("tckTest", Test::class) { + useTestNG() + maxParallelForks = 1 + isScanForTestClasses = false + + binaryResultsDirectory.set(layout.buildDirectory.dir("$name-results/binary")) + reports.html.outputLocation.set(layout.buildDirectory.dir("reports/$name")) + reports.junitXml.outputLocation.set(layout.buildDirectory.dir("reports/$name-results")) +} diff --git a/driver-reactive-streams/src/examples/reactivestreams/documentation/DocumentationSamples.java b/driver-reactive-streams/src/examples/reactivestreams/documentation/DocumentationSamples.java new file mode 100644 index 00000000000..1085d00faaf --- /dev/null +++ b/driver-reactive-streams/src/examples/reactivestreams/documentation/DocumentationSamples.java @@ -0,0 +1,698 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package reactivestreams.documentation; + +import com.mongodb.client.result.DeleteResult; +import com.mongodb.client.result.InsertManyResult; +import com.mongodb.client.result.InsertOneResult; +import com.mongodb.client.result.UpdateResult; +import com.mongodb.reactivestreams.client.FindPublisher; +import com.mongodb.reactivestreams.client.MongoCollection; +import org.bson.BsonType; +import org.bson.Document; +import org.junit.Before; +import org.junit.Test; +import reactivestreams.helpers.SubscriberHelpers.ObservableSubscriber; +import reactivestreams.helpers.SubscriberHelpers.OperationSubscriber; + +// imports required for filters, projections and updates +import static com.mongodb.client.Fixture.getDefaultDatabaseName; +import static com.mongodb.client.model.Filters.all; +import static com.mongodb.client.model.Filters.and; +import static com.mongodb.client.model.Filters.elemMatch; +import static com.mongodb.client.model.Filters.eq; +import static com.mongodb.client.model.Filters.exists; +import static com.mongodb.client.model.Filters.gt; +import static com.mongodb.client.model.Filters.in; +import static com.mongodb.client.model.Filters.lt; +import static com.mongodb.client.model.Filters.lte; +import static com.mongodb.client.model.Filters.or; +import static com.mongodb.client.model.Filters.regex; +import static com.mongodb.client.model.Filters.size; +import static com.mongodb.client.model.Filters.type; +import static com.mongodb.client.model.Projections.exclude; +import static com.mongodb.client.model.Projections.excludeId; +import static com.mongodb.client.model.Projections.fields; +import static com.mongodb.client.model.Projections.include; +import static com.mongodb.client.model.Projections.slice; +import static com.mongodb.client.model.Updates.combine; +import static com.mongodb.client.model.Updates.currentDate; +import static com.mongodb.client.model.Updates.set; +// end required filters, projections and updates imports + +import static com.mongodb.reactivestreams.client.Fixture.getMongoClient; +import static java.util.Arrays.asList; +import static java.util.Collections.singletonList; + + +public final class DocumentationSamples { + + private final MongoCollection collection = + getMongoClient().getDatabase(getDefaultDatabaseName()).getCollection("inventory"); + + @Before + public void setup() { + ObservableSubscriber dropSubscriber = new OperationSubscriber<>(); + collection.drop().subscribe(dropSubscriber); + dropSubscriber.await(); + } + + @Test + public void testInsert() { + + // Start Example 1 + Document canvas = new Document("item", "canvas") + .append("qty", 100) + .append("tags", singletonList("cotton")); + + Document size = new Document("h", 28) + .append("w", 35.5) + .append("uom", "cm"); + canvas.put("size", size); + + ObservableSubscriber insertOneSubscriber = new OperationSubscriber<>(); + collection.insertOne(canvas) + .subscribe(insertOneSubscriber); + insertOneSubscriber.await(); + // End Example 1 + + // Start Example 2 + FindPublisher findPublisher = collection.find(eq("item", "canvas")); + // End Example 2 + + ObservableSubscriber findSubscriber = new OperationSubscriber<>(); + findPublisher.subscribe(findSubscriber); + findSubscriber.await(); + + // Start Example 3 + Document journal = new Document("item", "journal") + .append("qty", 25) + .append("tags", asList("blank", "red")); + + Document journalSize = new Document("h", 14) + .append("w", 21) + .append("uom", "cm"); + journal.put("size", journalSize); + + Document mat = new Document("item", "mat") + .append("qty", 85) + .append("tags", singletonList("gray")); + + Document matSize = new Document("h", 27.9) + .append("w", 35.5) + .append("uom", "cm"); + mat.put("size", matSize); + + Document mousePad = new Document("item", "mousePad") + .append("qty", 25) + .append("tags", asList("gel", "blue")); + + Document mousePadSize = new Document("h", 19) + .append("w", 22.85) + .append("uom", "cm"); + mousePad.put("size", mousePadSize); + + ObservableSubscriber insertManySubscriber = new OperationSubscriber<>(); + collection.insertMany(asList(journal, mat, mousePad)) + .subscribe(insertManySubscriber); + insertManySubscriber.await(); + // End Example 3 + + ObservableSubscriber countSubscriber = new OperationSubscriber<>(); + collection.countDocuments().subscribe(countSubscriber); + countSubscriber.await(); + } + + @Test + public void testQueryingAtTheTopLevel() { + // Start Example 6 + ObservableSubscriber insertManySubscriber = new OperationSubscriber<>(); + collection.insertMany(asList( + Document.parse("{ item: 'journal', qty: 25, size: { h: 14, w: 21, uom: 'cm' }, status: 'A' }"), + Document.parse("{ item: 'notebook', qty: 50, size: { h: 8.5, w: 11, uom: 'in' }, status: 'A' }"), + Document.parse("{ item: 'paper', qty: 100, size: { h: 8.5, w: 11, uom: 'in' }, status: 'D' }"), + Document.parse("{ item: 'planner', qty: 75, size: { h: 22.85, w: 30, uom: 'cm' }, status: 'D' }"), + Document.parse("{ item: 'postcard', qty: 45, size: { h: 10, w: 15.25, uom: 'cm' }, status: 'A' }")) + ).subscribe(insertManySubscriber); + insertManySubscriber.await(); + // End Example 6 + + ObservableSubscriber countSubscriber = new OperationSubscriber<>(); + collection.countDocuments() + .subscribe(countSubscriber); + countSubscriber.await(); + + // Start Example 7 + FindPublisher findPublisher = collection.find(new Document()); + // End Example 7 + + ObservableSubscriber findSubscriber = new OperationSubscriber<>(); + findPublisher.subscribe(findSubscriber); + findSubscriber.await(); + + // Start Example 8 + findPublisher = collection.find(); + // End Example 8 + + findSubscriber = new OperationSubscriber<>(); + findPublisher.subscribe(findSubscriber); + findSubscriber.await(); + + // Start Example 9 + findPublisher = collection.find(eq("status", "D")); + // End Example 9 + + findSubscriber = new OperationSubscriber<>(); + findPublisher.subscribe(findSubscriber); + findSubscriber.await(); + + // Start Example 10 + findPublisher = collection.find(in("status", "A", "D")); + // End Example 10 + + findSubscriber = new OperationSubscriber<>(); + findPublisher.subscribe(findSubscriber); + findSubscriber.await(); + + // Start Example 11 + findPublisher = collection.find(and(eq("status", "A"), lt("qty", 30))); + // End Example 11 + + findSubscriber = new OperationSubscriber<>(); + findPublisher.subscribe(findSubscriber); + findSubscriber.await(); + + + // Start Example 12 + findPublisher = collection.find(or(eq("status", "A"), lt("qty", 30))); + // End Example 12 + + findSubscriber = new OperationSubscriber<>(); + findPublisher.subscribe(findSubscriber); + findSubscriber.await(); + + // Start Example 13 + findPublisher = collection.find( + and(eq("status", "A"), + or(lt("qty", 30), regex("item", "^p"))) + ); + // End Example 13 + + findSubscriber = new OperationSubscriber<>(); + findPublisher.subscribe(findSubscriber); + findSubscriber.await(); + } + + @Test + public void testQueryingEmbeddedDocuments() { + // Start Example 14 + ObservableSubscriber insertManySubscriber = new OperationSubscriber<>(); + collection.insertMany(asList( + Document.parse("{ item: 'journal', qty: 25, size: { h: 14, w: 21, uom: 'cm' }, status: 'A' }"), + Document.parse("{ item: 'notebook', qty: 50, size: { h: 8.5, w: 11, uom: 'in' }, status: 'A' }"), + Document.parse("{ item: 'paper', qty: 100, size: { h: 8.5, w: 11, uom: 'in' }, status: 'D' }"), + Document.parse("{ item: 'planner', qty: 75, size: { h: 22.85, w: 30, uom: 'cm' }, status: 'D' }"), + Document.parse("{ item: 'postcard', qty: 45, size: { h: 10, w: 15.25, uom: 'cm' }, status: 'A' }")) + ).subscribe(insertManySubscriber); + insertManySubscriber.await(); + // End Example 14 + + ObservableSubscriber countSubscriber = new OperationSubscriber<>(); + collection.countDocuments().subscribe(countSubscriber); + countSubscriber.await(); + + // Start Example 15 + FindPublisher findPublisher = collection.find(eq("size", Document.parse("{ h: 14, w: 21, uom: 'cm' }"))); + // End Example 15 + + ObservableSubscriber findSubscriber = new OperationSubscriber<>(); + findPublisher.subscribe(findSubscriber); + findSubscriber.await(); + + // Start Example 16 + findPublisher = collection.find(eq("size", Document.parse("{ w: 21, h: 14, uom: 'cm' }"))); + // End Example 16 + + findSubscriber = new OperationSubscriber<>(); + findPublisher.subscribe(findSubscriber); + findSubscriber.await(); + + // Start Example 17 + findPublisher = collection.find(eq("size.uom", "in")); + // End Example 17 + + findSubscriber = new OperationSubscriber<>(); + findPublisher.subscribe(findSubscriber); + findSubscriber.await(); + + // Start Example 18 + findPublisher = collection.find(lt("size.h", 15)); + // End Example 18 + + findSubscriber = new OperationSubscriber<>(); + findPublisher.subscribe(findSubscriber); + findSubscriber.await(); + + // Start Example 19 + findPublisher = collection.find(and( + lt("size.h", 15), + eq("size.uom", "in"), + eq("status", "D") + )); + // End Example 19 + + findSubscriber = new OperationSubscriber<>(); + findPublisher.subscribe(findSubscriber); + findSubscriber.await(); + } + + @Test + public void testQueryingArrayValues() { + + //Start Example 20 + ObservableSubscriber insertManySubscriber = new OperationSubscriber<>(); + collection.insertMany(asList( + Document.parse("{ item: 'journal', qty: 25, tags: ['blank', 'red'], dim_cm: [ 14, 21 ] }"), + Document.parse("{ item: 'notebook', qty: 50, tags: ['red', 'blank'], dim_cm: [ 14, 21 ] }"), + Document.parse("{ item: 'paper', qty: 100, tags: ['red', 'blank', 'plain'], dim_cm: [ 14, 21 ] }"), + Document.parse("{ item: 'planner', qty: 75, tags: ['blank', 'red'], dim_cm: [ 22.85, 30 ] }"), + Document.parse("{ item: 'postcard', qty: 45, tags: ['blue'], dim_cm: [ 10, 15.25 ] }")) + ).subscribe(insertManySubscriber); + insertManySubscriber.await(); + // End Example 20 + + ObservableSubscriber countSubscriber = new OperationSubscriber<>(); + collection.countDocuments().subscribe(countSubscriber); + countSubscriber.await(); + + //Start Example 21 + FindPublisher findPublisher = collection.find(eq("tags", asList("red", "blank"))); + //End Example 21 + + ObservableSubscriber findSubscriber = new OperationSubscriber<>(); + findPublisher.subscribe(findSubscriber); + findSubscriber.await(); + + //Start Example 22 + findPublisher = collection.find(all("tags", asList("red", "blank"))); + //End Example 22 + + findSubscriber = new OperationSubscriber<>(); + findPublisher.subscribe(findSubscriber); + findSubscriber.await(); + + //Start Example 23 + findPublisher = collection.find(eq("tags", "red")); + //End Example 23 + + findSubscriber = new OperationSubscriber<>(); + findPublisher.subscribe(findSubscriber); + findSubscriber.await(); + + //Start Example 24 + findPublisher = collection.find(gt("dim_cm", 25)); + //End Example 24 + + findSubscriber = new OperationSubscriber<>(); + findPublisher.subscribe(findSubscriber); + findSubscriber.await(); + + //Start Example 25 + findPublisher = collection.find(and(gt("dim_cm", 15), lt("dim_cm", 20))); + //End Example 25 + + findSubscriber = new OperationSubscriber<>(); + findPublisher.subscribe(findSubscriber); + findSubscriber.await(); + + //Start Example 26 + findPublisher = collection.find(elemMatch("dim_cm", Document.parse("{ $gt: 22, $lt: 30 }"))); + //End Example 26 + + findSubscriber = new OperationSubscriber<>(); + findPublisher.subscribe(findSubscriber); + findSubscriber.await(); + + //Start Example 27 + findPublisher = collection.find(gt("dim_cm.1", 25)); + //End Example 27 + + findSubscriber = new OperationSubscriber<>(); + findPublisher.subscribe(findSubscriber); + findSubscriber.await(); + + //Start Example 28 + findPublisher = collection.find(size("tags", 3)); + //End Example 28 + + findSubscriber = new OperationSubscriber<>(); + findPublisher.subscribe(findSubscriber); + findSubscriber.await(); + } + + @Test + public void testQueryingArraysContainingDocuments() { + + //Start Example 29 + ObservableSubscriber insertManySubscriber = new OperationSubscriber<>(); + collection.insertMany(asList( + Document.parse("{ item: 'journal', instock: [ { warehouse: 'A', qty: 5 }, { warehouse: 'C', qty: 15 } ] }"), + Document.parse("{ item: 'notebook', instock: [ { warehouse: 'C', qty: 5 } ] }"), + Document.parse("{ item: 'paper', instock: [ { warehouse: 'A', qty: 60 }, { warehouse: 'B', qty: 15 } ] }"), + Document.parse("{ item: 'planner', instock: [ { warehouse: 'A', qty: 40 }, { warehouse: 'B', qty: 5 } ] }"), + Document.parse("{ item: 'postcard', instock: [ { warehouse: 'B', qty: 15 }, { warehouse: 'C', qty: 35 } ] }")) + ).subscribe(insertManySubscriber); + insertManySubscriber.await(); + // End Example 29 + + ObservableSubscriber countSubscriber = new OperationSubscriber<>(); + collection.countDocuments().subscribe(countSubscriber); + countSubscriber.await(); + + //Start Example 30 + FindPublisher findPublisher = collection.find(eq("instock", Document.parse("{ warehouse: 'A', qty: 5 }"))); + //End Example 30 + + ObservableSubscriber findSubscriber = new OperationSubscriber<>(); + findPublisher.subscribe(findSubscriber); + findSubscriber.await(); + + //Start Example 31 + findPublisher = collection.find(eq("instock", Document.parse("{ qty: 5, warehouse: 'A' }"))); + //End Example 31 + + findSubscriber = new OperationSubscriber<>(); + findPublisher.subscribe(findSubscriber); + findSubscriber.await(); + + //Start Example 32 + findPublisher = collection.find(lte("instock.0.qty", 20)); + //End Example 32 + + findSubscriber = new OperationSubscriber<>(); + findPublisher.subscribe(findSubscriber); + findSubscriber.await(); + + //Start Example 33 + findPublisher = collection.find(lte("instock.qty", 20)); + //End Example 33 + + findSubscriber = new OperationSubscriber<>(); + findPublisher.subscribe(findSubscriber); + findSubscriber.await(); + + //Start Example 34 + findPublisher = collection.find(elemMatch("instock", Document.parse("{ qty: 5, warehouse: 'A' }"))); + //End Example 34 + + findSubscriber = new OperationSubscriber<>(); + findPublisher.subscribe(findSubscriber); + findSubscriber.await(); + + //Start Example 35 + findPublisher = collection.find(elemMatch("instock", Document.parse("{ qty: { $gt: 10, $lte: 20 } }"))); + //End Example 35 + + findSubscriber = new OperationSubscriber<>(); + findPublisher.subscribe(findSubscriber); + findSubscriber.await(); + + //Start Example 36 + findPublisher = collection.find(and(gt("instock.qty", 10), lte("instock.qty", 20))); + //End Example 36 + + findSubscriber = new OperationSubscriber<>(); + findPublisher.subscribe(findSubscriber); + findSubscriber.await(); + //Start Example 37 + findPublisher = collection.find(and(eq("instock.qty", 5), eq("instock.warehouse", "A"))); + //End Example 37 + + findSubscriber = new OperationSubscriber<>(); + findPublisher.subscribe(findSubscriber); + findSubscriber.await(); + } + + @Test + public void testQueryingNullandMissingFields() { + + //Start Example 38 + ObservableSubscriber insertManySubscriber = new OperationSubscriber<>(); + collection.insertMany(asList( + Document.parse("{'_id': 1, 'item': null}"), + Document.parse("{'_id': 2}")) + ).subscribe(insertManySubscriber); + insertManySubscriber.await(); + // End Example 38 + + ObservableSubscriber countSubscriber = new OperationSubscriber<>(); + collection.countDocuments().subscribe(countSubscriber); + countSubscriber.await(); + + //Start Example 39 + FindPublisher findPublisher = collection.find(eq("item", null)); + //End Example 39 + + ObservableSubscriber findSubscriber = new OperationSubscriber<>(); + findPublisher.subscribe(findSubscriber); + findSubscriber.await(); + + //Start Example 40 + findPublisher = collection.find(type("item", BsonType.NULL)); + //End Example 40 + + findSubscriber = new OperationSubscriber<>(); + findPublisher.subscribe(findSubscriber); + findSubscriber.await(); + + //Start Example 41 + findPublisher = collection.find(exists("item", false)); + //End Example 41 + + findSubscriber = new OperationSubscriber<>(); + findPublisher.subscribe(findSubscriber); + findSubscriber.await(); + } + + @Test + public void testProjectingFields() { + + //Start Example 42 + ObservableSubscriber insertManySubscriber = new OperationSubscriber<>(); + collection.insertMany(asList( + Document.parse("{ item: 'journal', status: 'A', size: { h: 14, w: 21, uom: 'cm' }, instock: [ { warehouse: 'A', qty: 5 }]}"), + Document.parse("{ item: 'notebook', status: 'A', size: { h: 8.5, w: 11, uom: 'in' }, instock: [ { warehouse: 'C', qty: 5}]}"), + Document.parse("{ item: 'paper', status: 'D', size: { h: 8.5, w: 11, uom: 'in' }, instock: [ { warehouse: 'A', qty: 60 }]}"), + Document.parse("{ item: 'planner', status: 'D', size: { h: 22.85, w: 30, uom: 'cm' }, instock: [ { warehouse: 'A', qty: 40}]}"), + Document.parse("{ item: 'postcard', status: 'A', size: { h: 10, w: 15.25, uom: 'cm' }, " + + "instock: [ { warehouse: 'B', qty: 15 }, { warehouse: 'C', qty: 35 } ] }")) + ).subscribe(insertManySubscriber); + insertManySubscriber.await(); + // End Example 42 + + ObservableSubscriber countSubscriber = new OperationSubscriber<>(); + collection.countDocuments().subscribe(countSubscriber); + countSubscriber.await(); + + //Start Example 43 + FindPublisher findPublisher = collection.find(eq("status", "A")); + //End Example 43 + + ObservableSubscriber findSubscriber = new OperationSubscriber<>(); + findPublisher.subscribe(findSubscriber); + findSubscriber.await(); + + //Start Example 44 + findPublisher = collection.find(eq("status", "A")).projection(include("item", "status")); + //End Example 44 + + findSubscriber = new OperationSubscriber<>(); + findPublisher.subscribe(findSubscriber); + findSubscriber.await(); + + //Start Example 45 + findPublisher = collection.find(eq("status", "A")) + .projection(fields(include("item", "status"), excludeId())); + //End Example 45 + + findSubscriber = new OperationSubscriber<>(); + findPublisher.subscribe(findSubscriber); + findSubscriber.await(); + + //Start Example 46 + findPublisher = collection.find(eq("status", "A")).projection(exclude("item", "status")); + //End Example 46 + + findSubscriber = new OperationSubscriber<>(); + findPublisher.subscribe(findSubscriber); + findSubscriber.await(); + + //Start Example 47 + findPublisher = collection.find(eq("status", "A")).projection(include("item", "status", "size.uom")); + //End Example 47 + + findSubscriber = new OperationSubscriber<>(); + findPublisher.subscribe(findSubscriber); + findSubscriber.await(); + + //Start Example 48 + findPublisher = collection.find(eq("status", "A")).projection(exclude("size.uom")); + //End Example 48 + + findSubscriber = new OperationSubscriber<>(); + findPublisher.subscribe(findSubscriber); + findSubscriber.await(); + + //Start Example 49 + findPublisher = collection.find(eq("status", "A")).projection(include("item", "status", "instock.qty")); + //End Example 49 + + findSubscriber = new OperationSubscriber<>(); + findPublisher.subscribe(findSubscriber); + findSubscriber.await(); + + //Start Example 50 + findPublisher = collection.find(eq("status", "A")) + .projection(fields(include("item", "status"), slice("instock", -1))); + //End Example 50 + + findSubscriber = new OperationSubscriber<>(); + findPublisher.subscribe(findSubscriber); + findSubscriber.await(); + } + + @Test + public void testUpdates() { + //Start Example 51 + ObservableSubscriber insertManySubscriber = new OperationSubscriber<>(); + collection.insertMany(asList( + Document.parse("{ item: 'canvas', qty: 100, size: { h: 28, w: 35.5, uom: 'cm' }, status: 'A' }"), + Document.parse("{ item: 'journal', qty: 25, size: { h: 14, w: 21, uom: 'cm' }, status: 'A' }"), + Document.parse("{ item: 'mat', qty: 85, size: { h: 27.9, w: 35.5, uom: 'cm' }, status: 'A' }"), + Document.parse("{ item: 'mousepad', qty: 25, size: { h: 19, w: 22.85, uom: 'cm' }, status: 'P' }"), + Document.parse("{ item: 'notebook', qty: 50, size: { h: 8.5, w: 11, uom: 'in' }, status: 'P' }"), + Document.parse("{ item: 'paper', qty: 100, size: { h: 8.5, w: 11, uom: 'in' }, status: 'D' }"), + Document.parse("{ item: 'planner', qty: 75, size: { h: 22.85, w: 30, uom: 'cm' }, status: 'D' }"), + Document.parse("{ item: 'postcard', qty: 45, size: { h: 10, w: 15.25, uom: 'cm' }, status: 'A' }"), + Document.parse("{ item: 'sketchbook', qty: 80, size: { h: 14, w: 21, uom: 'cm' }, status: 'A' }"), + Document.parse("{ item: 'sketch pad', qty: 95, size: { h: 22.85, w: 30.5, uom: 'cm' }, status: 'A' }")) + ).subscribe(insertManySubscriber); + insertManySubscriber.await(); + // End Example 51 + + ObservableSubscriber countSubscriber = new OperationSubscriber<>(); + collection.countDocuments().subscribe(countSubscriber); + countSubscriber.await(); + + //Start Example 52 + ObservableSubscriber updateSubscriber = new OperationSubscriber<>(); + collection.updateOne(eq("item", "paper"), + combine(set("size.uom", "cm"), set("status", "P"), currentDate("lastModified"))) + .subscribe(updateSubscriber); + updateSubscriber.await(); + //End Example 52 + + FindPublisher findPublisher = collection.find(eq("item", "paper")); + + ObservableSubscriber findSubscriber = new OperationSubscriber<>(); + findPublisher.subscribe(findSubscriber); + findSubscriber.await(); + + //Start Example 53 + updateSubscriber = new OperationSubscriber<>(); + collection.updateMany(lt("qty", 50), + combine(set("size.uom", "in"), set("status", "P"), currentDate("lastModified"))) + .subscribe(updateSubscriber); + updateSubscriber.await(); + //End Example 53 + + findPublisher = collection.find(lt("qty", 50)); + + findSubscriber = new OperationSubscriber<>(); + findPublisher.subscribe(findSubscriber); + findSubscriber.await(); + + //Start Example 54 + updateSubscriber = new OperationSubscriber<>(); + collection.replaceOne(eq("item", "paper"), + Document.parse("{ item: 'paper', instock: [ { warehouse: 'A', qty: 60 }, { warehouse: 'B', qty: 40 } ] }")) + .subscribe(updateSubscriber); + updateSubscriber.await(); + //End Example 54 + + findPublisher = collection.find(eq("item", "paper")).projection(excludeId()); + + findSubscriber = new OperationSubscriber<>(); + findPublisher.subscribe(findSubscriber); + findSubscriber.await(); + } + + @Test + public void testDeletions() { + + //Start Example 55 + ObservableSubscriber insertManySubscriber = new OperationSubscriber<>(); + collection.insertMany(asList( + Document.parse("{ item: 'journal', qty: 25, size: { h: 14, w: 21, uom: 'cm' }, status: 'A' }"), + Document.parse("{ item: 'notebook', qty: 50, size: { h: 8.5, w: 11, uom: 'in' }, status: 'A' }"), + Document.parse("{ item: 'paper', qty: 100, size: { h: 8.5, w: 11, uom: 'in' }, status: 'D' }"), + Document.parse("{ item: 'planner', qty: 75, size: { h: 22.85, w: 30, uom: 'cm' }, status: 'D' }"), + Document.parse("{ item: 'postcard', qty: 45, size: { h: 10, w: 15.25, uom: 'cm' }, status: 'A' }")) + ).subscribe(insertManySubscriber); + insertManySubscriber.await(); + // End Example 55 + + ObservableSubscriber countSubscriber = new OperationSubscriber<>(); + collection.countDocuments().subscribe(countSubscriber); + countSubscriber.await(); + + //Start Example 57 + ObservableSubscriber deleteSubscriber = new OperationSubscriber<>(); + collection.deleteMany(eq("status", "A")) + .subscribe(deleteSubscriber); + deleteSubscriber.await(); + //End Example 57 + + countSubscriber = new OperationSubscriber<>(); + collection.countDocuments().subscribe(countSubscriber); + countSubscriber.await(); + + //Start Example 58 + deleteSubscriber = new OperationSubscriber<>(); + collection.deleteOne(eq("status", "D")) + .subscribe(deleteSubscriber); + deleteSubscriber.await(); + //End Example 58 + + countSubscriber = new OperationSubscriber<>(); + collection.countDocuments().subscribe(countSubscriber); + countSubscriber.await(); + + //Start Example 56 + deleteSubscriber = new OperationSubscriber<>(); + collection.deleteMany(new Document()) + .subscribe(deleteSubscriber); + deleteSubscriber.await(); + //End Example 56 + + countSubscriber = new OperationSubscriber<>(); + collection.countDocuments().subscribe(countSubscriber); + countSubscriber.await(); + } + +} diff --git a/driver-reactive-streams/src/examples/reactivestreams/gridfs/GridFSTour.java b/driver-reactive-streams/src/examples/reactivestreams/gridfs/GridFSTour.java new file mode 100644 index 00000000000..3a9dea940c9 --- /dev/null +++ b/driver-reactive-streams/src/examples/reactivestreams/gridfs/GridFSTour.java @@ -0,0 +1,148 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package reactivestreams.gridfs; + + +import com.mongodb.client.gridfs.model.GridFSDownloadOptions; +import com.mongodb.client.gridfs.model.GridFSFile; +import com.mongodb.client.gridfs.model.GridFSUploadOptions; +import com.mongodb.reactivestreams.client.MongoClient; +import com.mongodb.reactivestreams.client.MongoClients; +import com.mongodb.reactivestreams.client.MongoDatabase; +import com.mongodb.reactivestreams.client.gridfs.GridFSBucket; +import com.mongodb.reactivestreams.client.gridfs.GridFSBuckets; +import org.bson.Document; +import org.bson.types.ObjectId; +import org.reactivestreams.Publisher; +import reactivestreams.helpers.SubscriberHelpers.ConsumerSubscriber; +import reactivestreams.helpers.SubscriberHelpers.ObservableSubscriber; +import reactivestreams.helpers.SubscriberHelpers.OperationSubscriber; + +import java.io.FileNotFoundException; +import java.io.IOException; +import java.nio.Buffer; +import java.nio.ByteBuffer; +import java.nio.charset.StandardCharsets; + +import static com.mongodb.client.model.Filters.eq; +import static reactivestreams.helpers.PublisherHelpers.toPublisher; + +/** + * The GridFS code example see: https://mongodb.github.io/mongo-java-driver/3.1/driver/reference/gridfs + */ +public final class GridFSTour { + + /** + * Run this main method to see the output of this quick example. + * + * @param args takes an optional single argument for the connection string + * @throws FileNotFoundException if the sample file cannot be found + * @throws IOException if there was an exception closing an input stream + */ + public static void main(final String[] args) throws FileNotFoundException, IOException { + MongoClient mongoClient; + + if (args.length == 0) { + // connect to the local database server + mongoClient = MongoClients.create(); + } else { + mongoClient = MongoClients.create(args[0]); + } + + // get handle to "mydb" database + MongoDatabase database = mongoClient.getDatabase("mydb"); + ObservableSubscriber dropSubscriber = new OperationSubscriber<>(); + database.drop().subscribe(dropSubscriber); + dropSubscriber.await(); + + GridFSBucket gridFSBucket = GridFSBuckets.create(database); + + /* + * UploadFromPublisher Example + */ + // Get the input publisher + Publisher publisherToUploadFrom = toPublisher(ByteBuffer.wrap("MongoDB Tutorial..".getBytes(StandardCharsets.UTF_8))); + + // Create some custom options + GridFSUploadOptions options = new GridFSUploadOptions() + .chunkSizeBytes(1024) + .metadata(new Document("type", "presentation")); + + ObservableSubscriber uploadSubscriber = new OperationSubscriber<>(); + gridFSBucket.uploadFromPublisher("mongodb-tutorial", publisherToUploadFrom, options).subscribe(uploadSubscriber); + ObjectId fileId = uploadSubscriber.get().get(0); + + /* + * Find documents + */ + System.out.println("File names:"); + ConsumerSubscriber filesSubscriber = new ConsumerSubscriber<>(gridFSFile -> + System.out.println(" - " + gridFSFile.getFilename())); + gridFSBucket.find().subscribe(filesSubscriber); + filesSubscriber.await(); + + /* + * Find documents with a filter + */ + filesSubscriber = new ConsumerSubscriber<>(gridFSFile -> System.out.println("Found: " + gridFSFile.getFilename())); + gridFSBucket.find(eq("metadata.contentType", "image/png")).subscribe(filesSubscriber); + filesSubscriber.await(); + + /* + * DownloadToPublisher + */ + ObservableSubscriber downloadSubscriber = new OperationSubscriber<>(); + gridFSBucket.downloadToPublisher(fileId).subscribe(downloadSubscriber); + Integer size = downloadSubscriber.get().stream().map(Buffer::limit).reduce(0, Integer::sum); + System.out.println("downloaded file sized: " + size); + + /* + * DownloadToStreamByName + */ + GridFSDownloadOptions downloadOptions = new GridFSDownloadOptions().revision(0); + downloadSubscriber = new OperationSubscriber<>(); + gridFSBucket.downloadToPublisher("mongodb-tutorial", downloadOptions).subscribe(downloadSubscriber); + size = downloadSubscriber.get().stream().map(Buffer::limit).reduce(0, Integer::sum); + System.out.println("downloaded file sized: " + size); + + /* + * Rename + */ + OperationSubscriber successSubscriber = new OperationSubscriber<>(); + gridFSBucket.rename(fileId, "mongodbTutorial").subscribe(successSubscriber); + successSubscriber.await(); + System.out.println("Renamed file"); + + /* + * Delete + */ + successSubscriber = new OperationSubscriber<>(); + gridFSBucket.delete(fileId).subscribe(successSubscriber); + successSubscriber.await(); + System.out.println("Deleted file"); + + // Final cleanup + successSubscriber = new OperationSubscriber<>(); + database.drop().subscribe(successSubscriber); + successSubscriber.await(); + System.out.println("Finished"); + } + + private GridFSTour() { + } +} diff --git a/driver-reactive-streams/src/examples/reactivestreams/gridfs/package-info.java b/driver-reactive-streams/src/examples/reactivestreams/gridfs/package-info.java new file mode 100644 index 00000000000..4b5b1c7ad22 --- /dev/null +++ b/driver-reactive-streams/src/examples/reactivestreams/gridfs/package-info.java @@ -0,0 +1,21 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +/** + * This package contains the gridfs tour example + */ +package reactivestreams.gridfs; diff --git a/driver-reactive-streams/src/examples/reactivestreams/helpers/PublisherHelpers.java b/driver-reactive-streams/src/examples/reactivestreams/helpers/PublisherHelpers.java new file mode 100644 index 00000000000..e7cad6131a7 --- /dev/null +++ b/driver-reactive-streams/src/examples/reactivestreams/helpers/PublisherHelpers.java @@ -0,0 +1,59 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +/* + * Copyright 2015 MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package reactivestreams.helpers; + +import org.reactivestreams.Publisher; +import reactor.core.publisher.Flux; + +import java.nio.ByteBuffer; + +import static java.util.Arrays.asList; + +/** + * Publisher helper for the Quick Tour. + */ +public final class PublisherHelpers { + + /** + * Creates a {@code Publisher} from the ByteBuffers + * @param byteBuffers the bytebuffers + * @return a {@code Publisher} + */ + public static Publisher toPublisher(final ByteBuffer... byteBuffers) { + return Flux.fromIterable(asList(byteBuffers)); + } + + private PublisherHelpers() { + } +} diff --git a/driver-reactive-streams/src/examples/reactivestreams/helpers/SubscriberHelpers.java b/driver-reactive-streams/src/examples/reactivestreams/helpers/SubscriberHelpers.java new file mode 100644 index 00000000000..29c6e9f3735 --- /dev/null +++ b/driver-reactive-streams/src/examples/reactivestreams/helpers/SubscriberHelpers.java @@ -0,0 +1,282 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +/* + * Copyright 2015 MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package reactivestreams.helpers; + +import com.mongodb.MongoTimeoutException; +import org.bson.Document; +import org.reactivestreams.Subscriber; +import org.reactivestreams.Subscription; + +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; +import java.util.function.Consumer; + +import static com.mongodb.internal.thread.InterruptionUtil.interruptAndCreateMongoInterruptedException; + +/** + * Subscriber helper implementations for the Quick Tour. + */ +public final class SubscriberHelpers { + + /** + * A Subscriber that stores the publishers results and provides a latch so can block on completion. + * + * @param The publishers result type + */ + public abstract static class ObservableSubscriber implements Subscriber { + private final List received; + private final List errors; + private final CountDownLatch latch; + private volatile Subscription subscription; + private volatile boolean completed; + + /** + * Construct an instance + */ + public ObservableSubscriber() { + this.received = new ArrayList<>(); + this.errors = new ArrayList<>(); + this.latch = new CountDownLatch(1); + } + + @Override + public void onSubscribe(final Subscription s) { + subscription = s; + } + + @Override + public void onNext(final T t) { + received.add(t); + } + + @Override + public void onError(final Throwable t) { + if (t instanceof RuntimeException) { + errors.add((RuntimeException) t); + } else { + errors.add(new RuntimeException("Unexpected exception", t)); + } + onComplete(); + } + + @Override + public void onComplete() { + completed = true; + latch.countDown(); + } + + /** + * Gets the subscription + * + * @return the subscription + */ + public Subscription getSubscription() { + return subscription; + } + + /** + * Get received elements + * + * @return the list of received elements + */ + public List getReceived() { + return received; + } + + /** + * Get error from subscription + * + * @return the error, which may be null + */ + public RuntimeException getError() { + if (errors.size() > 0) { + return errors.get(0); + } + return null; + } + + /** + * Get received elements. + * + * @return the list of receive elements + */ + public List get() { + return await().getReceived(); + } + + /** + * Get received elements. + * + * @param timeout how long to wait + * @param unit the time unit + * @return the list of receive elements + */ + public List get(final long timeout, final TimeUnit unit) { + return await(timeout, unit).getReceived(); + } + + + /** + * Get the first received element. + * + * @return the first received element + */ + public T first() { + List received = await().getReceived(); + return received.size() > 0 ? received.get(0) : null; + } + + /** + * Await completion or error + * + * @return this + */ + public ObservableSubscriber await() { + return await(60, TimeUnit.SECONDS); + } + + /** + * Await completion or error + * + * @param timeout how long to wait + * @param unit the time unit + * @return this + */ + public ObservableSubscriber await(final long timeout, final TimeUnit unit) { + subscription.request(Integer.MAX_VALUE); + try { + if (!latch.await(timeout, unit)) { + throw new MongoTimeoutException("Publisher onComplete timed out"); + } + } catch (InterruptedException e) { + throw interruptAndCreateMongoInterruptedException("Interrupted waiting for observeration", e); + } + if (!errors.isEmpty()) { + throw errors.get(0); + } + return this; + } + } + + /** + * A Subscriber that immediately requests Integer.MAX_VALUE onSubscribe + * + * @param The publishers result type + */ + public static class OperationSubscriber extends ObservableSubscriber { + + @Override + public void onSubscribe(final Subscription s) { + super.onSubscribe(s); + s.request(Integer.MAX_VALUE); + } + } + + /** + * A Subscriber that prints a message including the received items on completion + * + * @param The publishers result type + */ + public static class PrintSubscriber extends OperationSubscriber { + private final String message; + + /** + * A Subscriber that outputs a message onComplete. + * + * @param message the message to output onComplete + */ + public PrintSubscriber(final String message) { + this.message = message; + } + + @Override + public void onComplete() { + System.out.printf((message) + "%n", getReceived()); + super.onComplete(); + } + } + + /** + * A Subscriber that prints the json version of each document + */ + public static class PrintDocumentSubscriber extends ConsumerSubscriber { + /** + * Construct a new instance + */ + public PrintDocumentSubscriber() { + super(t -> System.out.println(t.toJson())); + } + } + + /** + * A Subscriber that prints the toString version of each element + * @param the type of the element + */ + public static class PrintToStringSubscriber extends ConsumerSubscriber { + /** + * Construct a new instance + */ + public PrintToStringSubscriber() { + super(System.out::println); + } + } + + /** + * A Subscriber that processes a consumer for each element + * @param the type of the element + */ + public static class ConsumerSubscriber extends OperationSubscriber { + private final Consumer consumer; + + /** + * Construct a new instance + * @param consumer the consumer + */ + public ConsumerSubscriber(final Consumer consumer) { + this.consumer = consumer; + } + + + @Override + public void onNext(final T document) { + super.onNext(document); + consumer.accept(document); + } + } + + private SubscriberHelpers() { + } +} diff --git a/driver-reactive-streams/src/examples/reactivestreams/helpers/package-info.java b/driver-reactive-streams/src/examples/reactivestreams/helpers/package-info.java new file mode 100644 index 00000000000..3096835ab2f --- /dev/null +++ b/driver-reactive-streams/src/examples/reactivestreams/helpers/package-info.java @@ -0,0 +1,18 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package reactivestreams.helpers; diff --git a/driver-reactive-streams/src/examples/reactivestreams/primer/AggregatePrimer.java b/driver-reactive-streams/src/examples/reactivestreams/primer/AggregatePrimer.java new file mode 100644 index 00000000000..353d9caa693 --- /dev/null +++ b/driver-reactive-streams/src/examples/reactivestreams/primer/AggregatePrimer.java @@ -0,0 +1,92 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package reactivestreams.primer; + +// @import: start +import com.mongodb.reactivestreams.client.AggregatePublisher; +import org.bson.Document; +import org.junit.Test; +import reactivestreams.helpers.SubscriberHelpers.ObservableSubscriber; +import reactivestreams.helpers.SubscriberHelpers.PrintDocumentSubscriber; + +import static java.util.Arrays.asList; +import static java.util.Collections.singletonList; +// @import: end + +public class AggregatePrimer extends PrimerTestCase { + + @Test + public void groupDocumentsByAFieldAndCalculateCount() { + + // @begin: group-documents-by-a-field-and-calculate-count + // @code: start + AggregatePublisher publisher = db.getCollection("restaurants").aggregate(singletonList( + new Document("$group", new Document("_id", "$borough").append("count", new Document("$sum", 1))))); + // @code: end + + // @pre: Iterate the results and apply a block to each resulting document + // @code: start + ObservableSubscriber aggregateSubscriber = new PrintDocumentSubscriber(); + publisher.subscribe(aggregateSubscriber); + aggregateSubscriber.await(); + // @code: end + + /* + // @results: start + { "_id" : "Missing", "count" : 51 } + { "_id" : "Staten Island", "count" : 969 } + { "_id" : "Manhattan", "count" : 10259 } + { "_id" : "Brooklyn", "count" : 6086 } + { "_id" : "Queens", "count" : 5656 } + { "_id" : "Bronx", "count" : 2338 } + // @results: end + */ + + // @end: group-documents-by-a-field-and-calculate-count + } + + @Test + public void filterAndGroupDocuments() { + + // @begin: filter-and-group-documents + // @code: start + AggregatePublisher publisher = db.getCollection("restaurants").aggregate(asList( + new Document("$match", new Document("borough", "Queens").append("cuisine", "Brazilian")), + new Document("$group", new Document("_id", "$address.zipcode").append("count", new Document("$sum", 1))))); + // @code: end + + // @pre: Iterate the results and apply a block to each resulting document + // @code: start + ObservableSubscriber aggregateSubscriber = new PrintDocumentSubscriber(); + publisher.subscribe(aggregateSubscriber); + aggregateSubscriber.await(); + // @code: end + + /* + // @results: start + { "_id" : "11377", "count" : 1 } + { "_id" : "11368", "count" : 1 } + { "_id" : "11101", "count" : 2 } + { "_id" : "11106", "count" : 3 } + { "_id" : "11103", "count" : 1 } + // @results: end + */ + + // @end: filter-and-group-documents + } +} diff --git a/driver-reactive-streams/src/examples/reactivestreams/primer/IndexesPrimer.java b/driver-reactive-streams/src/examples/reactivestreams/primer/IndexesPrimer.java new file mode 100644 index 00000000000..c437ae2b6c3 --- /dev/null +++ b/driver-reactive-streams/src/examples/reactivestreams/primer/IndexesPrimer.java @@ -0,0 +1,58 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package reactivestreams.primer; + + +// @imports: start +import org.bson.Document; +import org.junit.Test; +import reactivestreams.helpers.SubscriberHelpers.ObservableSubscriber; +import reactivestreams.helpers.SubscriberHelpers.PrintSubscriber; +// @imports: end + +public class IndexesPrimer extends PrimerTestCase { + + @Test + public void singleFieldIndex() { + + // @begin: single-field-index + // @code: start + ObservableSubscriber indexSubscriber = new PrintSubscriber<>("Index created: %s"); + db.getCollection("restaurants").createIndex(new Document("cuisine", 1)) + .subscribe(indexSubscriber); + indexSubscriber.await(); + // @code: end + + // @post: The method does not return a result. + // @end: single-field-index + } + + @Test + public void createCompoundIndex() { + // @begin: create-compound-index + // @code: start + ObservableSubscriber indexSubscriber = new PrintSubscriber<>("Index created: %s"); + db.getCollection("restaurants").createIndex(new Document("cuisine", 1).append("address.zipcode", 1)) + .subscribe(indexSubscriber); + indexSubscriber.await(); + // @code: end + + // @post: The method does not return a result. + // @end: create-compound-index + } +} diff --git a/driver-reactive-streams/src/examples/reactivestreams/primer/InsertPrimer.java b/driver-reactive-streams/src/examples/reactivestreams/primer/InsertPrimer.java new file mode 100644 index 00000000000..b230998417c --- /dev/null +++ b/driver-reactive-streams/src/examples/reactivestreams/primer/InsertPrimer.java @@ -0,0 +1,72 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package reactivestreams.primer; + +// @imports: start +import com.mongodb.client.result.InsertOneResult; +import org.bson.Document; +import org.junit.Test; +import reactivestreams.helpers.SubscriberHelpers.ObservableSubscriber; +import reactivestreams.helpers.SubscriberHelpers.OperationSubscriber; + +import java.text.DateFormat; +import java.text.ParseException; +import java.text.SimpleDateFormat; +import java.util.Locale; + +import static java.util.Arrays.asList; +// @imports: end + +public class InsertPrimer extends PrimerTestCase { + + @Test + public void insertADocument() throws ParseException { + + // @begin: insert-a-document + // @code: start + DateFormat format = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss'Z'", Locale.ENGLISH); + + ObservableSubscriber insertOneSubscriber = new OperationSubscriber<>(); + db.getCollection("restaurants").insertOne( + new Document("address", + new Document() + .append("street", "2 Avenue") + .append("zipcode", "10075") + .append("building", "1480") + .append("coord", asList(-73.9557413, 40.7720266))) + .append("borough", "Manhattan") + .append("cuisine", "Italian") + .append("grades", asList( + new Document() + .append("date", format.parse("2014-10-01T00:00:00Z")) + .append("grade", "A") + .append("score", 11), + new Document() + .append("date", format.parse("2014-01-16T00:00:00Z")) + .append("grade", "B") + .append("score", 17))) + .append("name", "Vella") + .append("restaurant_id", "41704620")) + .subscribe(insertOneSubscriber); + insertOneSubscriber.await(); + // @code: end + + // @post: The method does not return a result + // @end: insert-a-document + } +} diff --git a/driver-reactive-streams/src/examples/reactivestreams/primer/PrimerTestCase.java b/driver-reactive-streams/src/examples/reactivestreams/primer/PrimerTestCase.java new file mode 100644 index 00000000000..30087835f3c --- /dev/null +++ b/driver-reactive-streams/src/examples/reactivestreams/primer/PrimerTestCase.java @@ -0,0 +1,27 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package reactivestreams.primer; + + +import com.mongodb.reactivestreams.client.MongoDatabase; + +import static com.mongodb.reactivestreams.client.Fixture.getMongoClient; + +public class PrimerTestCase { + MongoDatabase db = getMongoClient().getDatabase("test"); +} diff --git a/driver-reactive-streams/src/examples/reactivestreams/primer/QueryPrimer.java b/driver-reactive-streams/src/examples/reactivestreams/primer/QueryPrimer.java new file mode 100644 index 00000000000..4dae74d1302 --- /dev/null +++ b/driver-reactive-streams/src/examples/reactivestreams/primer/QueryPrimer.java @@ -0,0 +1,243 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package reactivestreams.primer; + +// @imports: start +import com.mongodb.reactivestreams.client.FindPublisher; +import org.bson.Document; +import org.junit.Test; +import reactivestreams.helpers.SubscriberHelpers.ObservableSubscriber; +import reactivestreams.helpers.SubscriberHelpers.PrintDocumentSubscriber; + +import static com.mongodb.client.model.Filters.and; +import static com.mongodb.client.model.Filters.eq; +import static com.mongodb.client.model.Filters.gt; +import static com.mongodb.client.model.Filters.lt; +import static com.mongodb.client.model.Filters.or; +import static com.mongodb.client.model.Sorts.ascending; +import static java.util.Arrays.asList; +// @imports: end + + +public class QueryPrimer extends PrimerTestCase { + + @Test + public void queryAll() { + // @begin: query-all + // @code: start + FindPublisher publisher = db.getCollection("restaurants").find(); + // @code: end + + // @pre: Iterate the results and apply a block to each resulting document + // @code: start + ObservableSubscriber documentSubscriber = new PrintDocumentSubscriber(); + publisher.subscribe(documentSubscriber); + documentSubscriber.await(); + // @code: end + // @end: query-all + } + + + @Test + public void logicalAnd() { + + // @begin: logical-and + // @code: start + FindPublisher publisher = db.getCollection("restaurants").find( + new Document("cuisine", "Italian").append("address.zipcode", "10075")); + // @code: end + + // @pre: Iterate the results and apply a block to each resulting document + // @code: start + ObservableSubscriber documentSubscriber = new PrintDocumentSubscriber(); + publisher.subscribe(documentSubscriber); + documentSubscriber.await(); + // @code: end + + // @pre: To simplify building queries the Java driver provides static helpers + // @code: start + db.getCollection("restaurants").find(and(eq("cuisine", "Italian"), eq("address.zipcode", "10075"))); + // @code: end + + // @end: logical-and + } + + @Test + public void logicalOr() { + + // @begin: logical-or + // @code: start + FindPublisher publisher = db.getCollection("restaurants").find( + new Document("$or", asList(new Document("cuisine", "Italian"), + new Document("address.zipcode", "10075")))); + // @code: end + + // @pre: Iterate the results and apply a block to each resulting document + // @code: start + ObservableSubscriber documentSubscriber = new PrintDocumentSubscriber(); + publisher.subscribe(documentSubscriber); + documentSubscriber.await(); + // @code: end + + // @pre: To simplify building queries the Java driver provides static helpers + // @code: start + db.getCollection("restaurants").find(or(eq("cuisine", "Italian"), eq("address.zipcode", "10075"))); + // @code: end + + // @end: logical-or + } + + @Test + public void queryTopLevelField() { + // @begin: query-top-level-field + // @code: start + FindPublisher publisher = db.getCollection("restaurants").find( + new Document("borough", "Manhattan")); + // @code: end + + // @pre: Iterate the results and apply a block to each resulting document + // @code: start + ObservableSubscriber documentSubscriber = new PrintDocumentSubscriber(); + publisher.subscribe(documentSubscriber); + documentSubscriber.await(); + + // @code: end + + // @pre: To simplify building queries the Java driver provides static helpers + // @code: start + db.getCollection("restaurants").find(eq("borough", "Manhattan")); + // @code: end + // @end: query-top-level-field + } + + @Test + public void queryEmbeddedDocument() { + // @begin: query-embedded-document + // @code: start + FindPublisher publisher = db.getCollection("restaurants").find( + new Document("address.zipcode", "10075")); + // @code: end + + // @pre: Iterate the results and apply a block to each resulting document + // @code: start + ObservableSubscriber documentSubscriber = new PrintDocumentSubscriber(); + publisher.subscribe(documentSubscriber); + documentSubscriber.await(); + + // @code: end + + // @pre: To simplify building queries the Java driver provides static helpers + // @code: start + db.getCollection("restaurants").find(eq("address.zipcode", "10075")); + // @code: end + // @end: query-embedded-document + } + + @Test + public void queryFieldInArray() { + // @begin: query-field-in-array + // @code: start + FindPublisher publisher = db.getCollection("restaurants").find( + new Document("grades.grade", "B")); + // @code: end + + // @pre: Iterate the results and apply a block to each resulting document + // @code: start + ObservableSubscriber documentSubscriber = new PrintDocumentSubscriber(); + publisher.subscribe(documentSubscriber); + documentSubscriber.await(); + + // @code: end + + // @pre: To simplify building queries the Java driver provides static helpers + // @code: start + db.getCollection("restaurants").find(eq("grades.grade", "B")); + // @code: end + // @end: query-field-in-array + } + + @Test + public void greaterThan() { + // @begin: greater-than + // @code: start + FindPublisher publisher = db.getCollection("restaurants").find( + new Document("grades.score", new Document("$gt", 30))); + // @code: end + + // @pre: Iterate the results and apply a block to each resulting document + // @code: start + ObservableSubscriber documentSubscriber = new PrintDocumentSubscriber(); + publisher.subscribe(documentSubscriber); + documentSubscriber.await(); + + // @code: end + + // @pre: To simplify building queries the Java driver provides static helpers + // @code: start + db.getCollection("restaurants").find(gt("grades.score", 30)); + // @code: end + // @end: greater-than + } + + @Test + public void lessThan() { + // @begin: less-than + // @code: start + FindPublisher publisher = db.getCollection("restaurants").find( + new Document("grades.score", new Document("$lt", 10))); + // @code: end + + // @pre: Iterate the results and apply a block to each resulting document + // @code: start + ObservableSubscriber documentSubscriber = new PrintDocumentSubscriber(); + publisher.subscribe(documentSubscriber); + documentSubscriber.await(); + + // @code: end + + // @pre: To simplify building queries the Java driver provides static helpers + // @code: start + db.getCollection("restaurants").find(lt("grades.score", 10)); + // @code: end + // @end: less-than + } + + + @Test + public void sort() { + // @begin: sort + // @code: start + FindPublisher publisher = db.getCollection("restaurants").find() + .sort(new Document("borough", 1).append("address.zipcode", 1)); + // @code: end + + // @pre: Iterate the results and apply a block to each resulting document + // @code: start + ObservableSubscriber documentSubscriber = new PrintDocumentSubscriber(); + publisher.subscribe(documentSubscriber); + documentSubscriber.await(); + + // @code: end + + // @pre: To simplify sorting fields the Java driver provides static helpers + // @code: start + db.getCollection("restaurants").find().sort(ascending("borough", "address.zipcode")); + // @code: end + // @end: sort + } +} diff --git a/driver-reactive-streams/src/examples/reactivestreams/primer/RemovePrimer.java b/driver-reactive-streams/src/examples/reactivestreams/primer/RemovePrimer.java new file mode 100644 index 00000000000..e9e4e46b511 --- /dev/null +++ b/driver-reactive-streams/src/examples/reactivestreams/primer/RemovePrimer.java @@ -0,0 +1,75 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package reactivestreams.primer; + +// @import: start + +import com.mongodb.client.result.DeleteResult; +import org.bson.Document; +import org.junit.Test; +import reactivestreams.helpers.SubscriberHelpers.ObservableSubscriber; +import reactivestreams.helpers.SubscriberHelpers.OperationSubscriber; +import reactivestreams.helpers.SubscriberHelpers.PrintSubscriber; +// @import: end + +public class RemovePrimer extends PrimerTestCase { + + @Test + public void removeMatchingDocuments() { + // @begin: remove-matching-documents + ObservableSubscriber deleteSubscriber = new PrintSubscriber<>("Update complete: %s"); + db.getCollection("restaurants").deleteMany(new Document("borough", "Manhattan")) + .subscribe(deleteSubscriber); + deleteSubscriber.await(); + + /* + // @post: start + The deleteMany operation returns a ``DeleteResult`` which contains information about the operation. + The ``getDeletedCount`` method returns number of documents deleted. + // @post: end + */ + // @end: remove-matching-documents + } + + @Test + public void removeAllDocuments() { + // @begin: remove-all-documents + ObservableSubscriber deleteSubscriber = new PrintSubscriber<>("Update complete: %s"); + db.getCollection("restaurants").deleteMany(new Document()) + .subscribe(deleteSubscriber); + deleteSubscriber.await(); + + /* + // @post: start + The deleteMany operation returns a ``DeleteResult`` which contains information about the operation. + The ``getDeletedCount`` method returns number of documents deleted. + // @post: end + */ + // @end: remove-all-documents + } + + @Test + public void dropCollection() { + // @begin: drop-collection + ObservableSubscriber successSubscriber = new OperationSubscriber<>(); + db.getCollection("restaurants").drop() + .subscribe(successSubscriber); + successSubscriber.await(); + // @end: drop-collection + } +} diff --git a/driver-reactive-streams/src/examples/reactivestreams/primer/UpdatePrimer.java b/driver-reactive-streams/src/examples/reactivestreams/primer/UpdatePrimer.java new file mode 100644 index 00000000000..3c1c6a5650d --- /dev/null +++ b/driver-reactive-streams/src/examples/reactivestreams/primer/UpdatePrimer.java @@ -0,0 +1,87 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package reactivestreams.primer; + +// @import: start +import com.mongodb.client.result.UpdateResult; +import org.bson.Document; +import org.junit.Test; +import reactivestreams.helpers.SubscriberHelpers.ObservableSubscriber; +import reactivestreams.helpers.SubscriberHelpers.PrintSubscriber; +// @import: end + +public class UpdatePrimer extends PrimerTestCase { + + @Test + public void updateTopLevelFields() { + // @begin: update-top-level-fields + ObservableSubscriber updateSubscriber = new PrintSubscriber<>("Update complete: %s"); + db.getCollection("restaurants").updateOne(new Document("name", "Juni"), + new Document("$set", new Document("cuisine", "American (New)")) + .append("$currentDate", new Document("lastModified", true))) + .subscribe(updateSubscriber); + updateSubscriber.await(); + + /* + // @post: start + The updateOne operation returns a ``UpdateResult`` which contains information about the operation. + The ``getModifiedCount`` method returns the number of documents modified + // @post: end + */ + // @end: update-top-level-fields + } + + @Test + public void updateEmbeddedField() { + // @begin: update-top-level-fields + ObservableSubscriber updateSubscriber = new PrintSubscriber<>("Update complete: %s"); + db.getCollection("restaurants").updateOne(new Document("restaurant_id", "41156888"), + new Document("$set", new Document("address.street", "East 31st Street"))) + .subscribe(updateSubscriber); + updateSubscriber.await(); + + /* + // @post: start + The updateOne operation returns a ``UpdateResult`` which contains information about the operation. + The ``getModifiedCount`` method returns the number of documents modified + // @post: end + */ + // @end: update-top-level-fields + } + + + @Test + public void updateMultipleDocuments() { + + // @begin: update-multiple-documents + ObservableSubscriber updateSubscriber = new PrintSubscriber<>("Update complete: %s"); + db.getCollection("restaurants").updateMany(new Document("address.zipcode", "10016").append("cuisine", "Other"), + new Document("$set", new Document("cuisine", "Category To Be Determined")) + .append("$currentDate", new Document("lastModified", true))) + .subscribe(updateSubscriber); + updateSubscriber.await(); + + /* + // @post: start + The updateMany operation returns a ``UpdateResult`` which contains information about the operation. + The ``getModifiedCount`` method returns the number of documents modified + // @post: end + */ + // @end: update-multiple-documents + } +} diff --git a/driver-reactive-streams/src/examples/reactivestreams/tour/Address.java b/driver-reactive-streams/src/examples/reactivestreams/tour/Address.java new file mode 100644 index 00000000000..26ec811e2e1 --- /dev/null +++ b/driver-reactive-streams/src/examples/reactivestreams/tour/Address.java @@ -0,0 +1,142 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package reactivestreams.tour; + +/** + * The Address POJO + */ +public final class Address { + + private String street; + private String city; + private String zip; + + /** + * Construct a new instance + */ + public Address() { + } + + /** + * Construct a new instance + * + * @param street the street + * @param city the city + * @param zip the zip / postal code + */ + public Address(final String street, final String city, final String zip) { + this.street = street; + this.city = city; + this.zip = zip; + } + + /** + * Returns the street + * + * @return the street + */ + public String getStreet() { + return street; + } + + /** + * Sets the street + * + * @param street the street + */ + public void setStreet(final String street) { + this.street = street; + } + + /** + * Returns the city + * + * @return the city + */ + public String getCity() { + return city; + } + + /** + * Sets the city + * + * @param city the city + */ + public void setCity(final String city) { + this.city = city; + } + + /** + * Returns the zip + * + * @return the zip + */ + public String getZip() { + return zip; + } + + /** + * Sets the zip + * + * @param zip the zip + */ + public void setZip(final String zip) { + this.zip = zip; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + Address address = (Address) o; + + if (getStreet() != null ? !getStreet().equals(address.getStreet()) : address.getStreet() != null) { + return false; + } + if (getCity() != null ? !getCity().equals(address.getCity()) : address.getCity() != null) { + return false; + } + if (getZip() != null ? !getZip().equals(address.getZip()) : address.getZip() != null) { + return false; + } + + return true; + } + + @Override + public int hashCode() { + int result = getStreet() != null ? getStreet().hashCode() : 0; + result = 31 * result + (getCity() != null ? getCity().hashCode() : 0); + result = 31 * result + (getZip() != null ? getZip().hashCode() : 0); + return result; + } + + @Override + public String toString() { + return "Address{" + + "street='" + street + "'" + + ", city='" + city + "'" + + ", zip='" + zip + "'" + + "}"; + } +} diff --git a/driver-reactive-streams/src/examples/reactivestreams/tour/ClientSideEncryptionAutoEncryptionSettingsTour.java b/driver-reactive-streams/src/examples/reactivestreams/tour/ClientSideEncryptionAutoEncryptionSettingsTour.java new file mode 100644 index 00000000000..e1b3e24f00a --- /dev/null +++ b/driver-reactive-streams/src/examples/reactivestreams/tour/ClientSideEncryptionAutoEncryptionSettingsTour.java @@ -0,0 +1,135 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package reactivestreams.tour; + +import com.mongodb.AutoEncryptionSettings; +import com.mongodb.ClientEncryptionSettings; +import com.mongodb.ConnectionString; +import com.mongodb.MongoClientSettings; +import com.mongodb.client.model.vault.DataKeyOptions; +import com.mongodb.client.result.InsertOneResult; +import com.mongodb.reactivestreams.client.MongoClient; +import com.mongodb.reactivestreams.client.MongoClients; +import com.mongodb.reactivestreams.client.MongoCollection; +import com.mongodb.reactivestreams.client.vault.ClientEncryption; +import com.mongodb.reactivestreams.client.vault.ClientEncryptions; +import org.bson.BsonBinary; +import org.bson.BsonDocument; +import org.bson.Document; +import reactivestreams.helpers.SubscriberHelpers.ObservableSubscriber; +import reactivestreams.helpers.SubscriberHelpers.OperationSubscriber; +import reactivestreams.helpers.SubscriberHelpers.PrintDocumentSubscriber; + +import java.security.SecureRandom; +import java.util.Base64; +import java.util.HashMap; +import java.util.Map; + +/** + * ClientSideEncryption AutoEncryptionSettings tour + */ +public class ClientSideEncryptionAutoEncryptionSettingsTour { + + /** + * Run this main method to see the output of this quick example. + *

+ * Requires the mongodb-crypt library in the class path and mongocryptd on the system path. + * + * @param args ignored args + */ + public static void main(final String[] args) { + + // This would have to be the same master key as was used to create the encryption key + byte[] localMasterKey = new byte[96]; + new SecureRandom().nextBytes(localMasterKey); + + Map> kmsProviders = new HashMap>() {{ + put("local", new HashMap() {{ + put("key", localMasterKey); + }}); + }}; + + MongoClientSettings commonClientSettings = ( + args.length == 0 + ? MongoClientSettings.builder() + : MongoClientSettings.builder().applyConnectionString(new ConnectionString(args[0]))) + .build(); + String keyVaultNamespace = "admin.datakeys"; + ClientEncryptionSettings clientEncryptionSettings = ClientEncryptionSettings.builder() + .keyVaultMongoClientSettings(commonClientSettings) + .keyVaultNamespace(keyVaultNamespace) + .kmsProviders(kmsProviders) + .build(); + + ClientEncryption clientEncryption = ClientEncryptions.create(clientEncryptionSettings); + + ObservableSubscriber dataKeySubscriber = new OperationSubscriber<>(); + clientEncryption.createDataKey("local", new DataKeyOptions()).subscribe(dataKeySubscriber); + dataKeySubscriber.await(); + String base64DataKeyId = Base64.getEncoder().encodeToString(dataKeySubscriber.getReceived().get(0).getData()); + + final String dbName = "test"; + final String collName = "coll"; + AutoEncryptionSettings autoEncryptionSettings = AutoEncryptionSettings.builder() + .keyVaultNamespace(keyVaultNamespace) + .kmsProviders(kmsProviders) + .schemaMap(new HashMap() {{ + put(dbName + "." + collName, + // Need a schema that references the new data key + BsonDocument.parse("{" + + " properties: {" + + " encryptedField: {" + + " encrypt: {" + + " keyId: [{" + + " \"$binary\": {" + + " \"base64\": \"" + base64DataKeyId + "\"," + + " \"subType\": \"04\"" + + " }" + + " }]," + + " bsonType: \"string\"," + + " algorithm: \"AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic\"" + + " }" + + " }" + + " }," + + " \"bsonType\": \"object\"" + + "}")); + }}).build(); + + MongoClientSettings clientSettings = MongoClientSettings.builder(commonClientSettings) + .autoEncryptionSettings(autoEncryptionSettings) + .build(); + + MongoClient mongoClient = MongoClients.create(clientSettings); + MongoCollection collection = mongoClient.getDatabase("test").getCollection("coll"); + + ObservableSubscriber successSubscriber = new OperationSubscriber<>(); + collection.drop().subscribe(successSubscriber); + successSubscriber.await(); + + ObservableSubscriber insertOneSubscriber = new OperationSubscriber<>(); + collection.insertOne(new Document("encryptedField", "123456789")).subscribe(insertOneSubscriber); + insertOneSubscriber.await(); + + ObservableSubscriber documentSubscriber = new PrintDocumentSubscriber(); + collection.find().first().subscribe(documentSubscriber); + documentSubscriber.await(); + + // release resources + mongoClient.close(); + } +} diff --git a/driver-reactive-streams/src/examples/reactivestreams/tour/ClientSideEncryptionExplicitEncryptionAndDecryptionTour.java b/driver-reactive-streams/src/examples/reactivestreams/tour/ClientSideEncryptionExplicitEncryptionAndDecryptionTour.java new file mode 100644 index 00000000000..14d4e156668 --- /dev/null +++ b/driver-reactive-streams/src/examples/reactivestreams/tour/ClientSideEncryptionExplicitEncryptionAndDecryptionTour.java @@ -0,0 +1,134 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package reactivestreams.tour; + +import com.mongodb.ClientEncryptionSettings; +import com.mongodb.ConnectionString; +import com.mongodb.MongoClientSettings; +import com.mongodb.MongoNamespace; +import com.mongodb.client.model.Filters; +import com.mongodb.client.model.IndexOptions; +import com.mongodb.client.model.Indexes; +import com.mongodb.client.model.vault.DataKeyOptions; +import com.mongodb.client.model.vault.EncryptOptions; +import com.mongodb.client.result.InsertOneResult; +import com.mongodb.client.vault.ClientEncryption; +import com.mongodb.client.vault.ClientEncryptions; +import com.mongodb.reactivestreams.client.MongoClient; +import com.mongodb.reactivestreams.client.MongoClients; +import com.mongodb.reactivestreams.client.MongoCollection; +import org.bson.BsonBinary; +import org.bson.BsonString; +import org.bson.Document; +import org.bson.types.Binary; +import reactivestreams.helpers.SubscriberHelpers.ObservableSubscriber; +import reactivestreams.helpers.SubscriberHelpers.OperationSubscriber; + +import java.security.SecureRandom; +import java.util.HashMap; +import java.util.Map; + +/** + * ClientSideEncryption Simple tour + */ +public class ClientSideEncryptionExplicitEncryptionAndDecryptionTour { + + /** + * Run this main method to see the output of this quick example. + *

+ * Requires the mongodb-crypt library in the class path and mongocryptd on the system path. + * Assumes the schema has already been created in MongoDB. + * + * @param args ignored args + */ + public static void main(final String[] args) { + + // This would have to be the same master key as was used to create the encryption key + byte[] localMasterKey = new byte[96]; + new SecureRandom().nextBytes(localMasterKey); + + Map> kmsProviders = new HashMap>() {{ + put("local", new HashMap() {{ + put("key", localMasterKey); + }}); + }}; + + MongoNamespace keyVaultNamespace = new MongoNamespace("encryption.testKeyVault"); + MongoClientSettings commonClientSettings = ( + args.length == 0 + ? MongoClientSettings.builder() + : MongoClientSettings.builder().applyConnectionString(new ConnectionString(args[0]))) + .build(); + MongoClient mongoClient = MongoClients.create(commonClientSettings); + + // Set up the key vault for this example + MongoCollection keyVaultCollection = mongoClient.getDatabase(keyVaultNamespace.getDatabaseName()) + .getCollection(keyVaultNamespace.getCollectionName()); + + ObservableSubscriber successSubscriber = new OperationSubscriber<>(); + keyVaultCollection.drop().subscribe(successSubscriber); + successSubscriber.await(); + + // Ensure that two data keys cannot share the same keyAltName. + ObservableSubscriber indexSubscriber = new OperationSubscriber<>(); + keyVaultCollection.createIndex(Indexes.ascending("keyAltNames"), + new IndexOptions().unique(true) + .partialFilterExpression(Filters.exists("keyAltNames"))) + .subscribe(indexSubscriber); + indexSubscriber.await(); + + MongoCollection collection = mongoClient.getDatabase("test").getCollection("coll"); + successSubscriber = new OperationSubscriber<>(); + collection.drop().subscribe(successSubscriber); + successSubscriber.await(); + + // Create the ClientEncryption instance + ClientEncryptionSettings clientEncryptionSettings = ClientEncryptionSettings.builder() + .keyVaultMongoClientSettings(commonClientSettings) + .keyVaultNamespace(keyVaultNamespace.getFullName()) + .kmsProviders(kmsProviders) + .build(); + + ClientEncryption clientEncryption = ClientEncryptions.create(clientEncryptionSettings); + + BsonBinary dataKeyId = clientEncryption.createDataKey("local", new DataKeyOptions()); + + // Explicitly encrypt a field + BsonBinary encryptedFieldValue = clientEncryption.encrypt(new BsonString("123456789"), + new EncryptOptions("AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic").keyId(dataKeyId)); + + ObservableSubscriber insertOneSubscriber = new OperationSubscriber<>(); + collection.insertOne(new Document("encryptedField", encryptedFieldValue)).subscribe(insertOneSubscriber); + insertOneSubscriber.await(); + + ObservableSubscriber documentSubscriber = new OperationSubscriber<>(); + collection.find().first().subscribe(documentSubscriber); + + Document doc = documentSubscriber.get().get(0); + System.out.println(doc.toJson()); + + // Explicitly decrypt the field + Binary encryptedField = doc.get("encryptedField", Binary.class); + BsonString decryptedField = clientEncryption.decrypt(new BsonBinary(encryptedField.getType(), encryptedField.getData())).asString(); + System.out.println(decryptedField.getValue()); + + // release resources + clientEncryption.close(); + mongoClient.close(); + } +} diff --git a/driver-reactive-streams/src/examples/reactivestreams/tour/ClientSideEncryptionExplicitEncryptionOnlyTour.java b/driver-reactive-streams/src/examples/reactivestreams/tour/ClientSideEncryptionExplicitEncryptionOnlyTour.java new file mode 100644 index 00000000000..ecd680d9ed8 --- /dev/null +++ b/driver-reactive-streams/src/examples/reactivestreams/tour/ClientSideEncryptionExplicitEncryptionOnlyTour.java @@ -0,0 +1,136 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package reactivestreams.tour; + +import com.mongodb.AutoEncryptionSettings; +import com.mongodb.ClientEncryptionSettings; +import com.mongodb.ConnectionString; +import com.mongodb.MongoClientSettings; +import com.mongodb.MongoNamespace; +import com.mongodb.client.model.Filters; +import com.mongodb.client.model.IndexOptions; +import com.mongodb.client.model.Indexes; +import com.mongodb.client.model.vault.DataKeyOptions; +import com.mongodb.client.model.vault.EncryptOptions; +import com.mongodb.client.result.InsertOneResult; +import com.mongodb.client.vault.ClientEncryption; +import com.mongodb.client.vault.ClientEncryptions; +import com.mongodb.reactivestreams.client.MongoClient; +import com.mongodb.reactivestreams.client.MongoClients; +import com.mongodb.reactivestreams.client.MongoCollection; +import org.bson.BsonBinary; +import org.bson.BsonString; +import org.bson.Document; +import reactivestreams.helpers.SubscriberHelpers.ObservableSubscriber; +import reactivestreams.helpers.SubscriberHelpers.OperationSubscriber; + +import java.security.SecureRandom; +import java.util.HashMap; +import java.util.Map; + +/** + * ClientSideEncryption Simple tour + */ +public class ClientSideEncryptionExplicitEncryptionOnlyTour { + + /** + * Run this main method to see the output of this quick example. + *

+ * Requires the mongodb-crypt library in the class path and mongocryptd on the system path. + * Assumes the schema has already been created in MongoDB. + * + * @param args ignored args + */ + public static void main(final String[] args) { + + // This would have to be the same master key as was used to create the encryption key + byte[] localMasterKey = new byte[96]; + new SecureRandom().nextBytes(localMasterKey); + + Map> kmsProviders = new HashMap>() {{ + put("local", new HashMap() {{ + put("key", localMasterKey); + }}); + }}; + + MongoNamespace keyVaultNamespace = new MongoNamespace("encryption.testKeyVault"); + MongoClientSettings commonClientSettings = ( + args.length == 0 + ? MongoClientSettings.builder() + : MongoClientSettings.builder().applyConnectionString(new ConnectionString(args[0]))) + .build(); + MongoClientSettings clientSettings = MongoClientSettings.builder(commonClientSettings) + .autoEncryptionSettings(AutoEncryptionSettings.builder() + .keyVaultNamespace(keyVaultNamespace.getFullName()) + .kmsProviders(kmsProviders) + .bypassAutoEncryption(true) + .build()) + .build(); + MongoClient mongoClient = MongoClients.create(clientSettings); + + // Set up the key vault for this example + MongoCollection keyVaultCollection = mongoClient.getDatabase(keyVaultNamespace.getDatabaseName()) + .getCollection(keyVaultNamespace.getCollectionName()); + + ObservableSubscriber successSubscriber = new OperationSubscriber<>(); + keyVaultCollection.drop().subscribe(successSubscriber); + successSubscriber.await(); + + // Ensure that two data keys cannot share the same keyAltName. + ObservableSubscriber indexSubscriber = new OperationSubscriber<>(); + keyVaultCollection.createIndex(Indexes.ascending("keyAltNames"), + new IndexOptions().unique(true) + .partialFilterExpression(Filters.exists("keyAltNames"))) + .subscribe(indexSubscriber); + indexSubscriber.await(); + + MongoCollection collection = mongoClient.getDatabase("test").getCollection("coll"); + successSubscriber = new OperationSubscriber<>(); + collection.drop().subscribe(successSubscriber); + successSubscriber.await(); + + // Create the ClientEncryption instance + ClientEncryptionSettings clientEncryptionSettings = ClientEncryptionSettings.builder() + .keyVaultMongoClientSettings(commonClientSettings) + .keyVaultNamespace(keyVaultNamespace.getFullName()) + .kmsProviders(kmsProviders) + .build(); + + ClientEncryption clientEncryption = ClientEncryptions.create(clientEncryptionSettings); + + BsonBinary dataKeyId = clientEncryption.createDataKey("local", new DataKeyOptions()); + + // Explicitly encrypt a field + BsonBinary encryptedFieldValue = clientEncryption.encrypt(new BsonString("123456789"), + new EncryptOptions("AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic").keyId(dataKeyId)); + + ObservableSubscriber insertOneSubscriber = new OperationSubscriber<>(); + collection.insertOne(new Document("encryptedField", encryptedFieldValue)).subscribe(insertOneSubscriber); + insertOneSubscriber.await(); + + ObservableSubscriber documentSubscriber = new OperationSubscriber<>(); + collection.find().first().subscribe(documentSubscriber); + + Document doc = documentSubscriber.get().get(0); + System.out.println(doc.toJson()); + + // release resources + clientEncryption.close(); + mongoClient.close(); + } +} diff --git a/driver-reactive-streams/src/examples/reactivestreams/tour/ClientSideEncryptionQueryableEncryptionTour.java b/driver-reactive-streams/src/examples/reactivestreams/tour/ClientSideEncryptionQueryableEncryptionTour.java new file mode 100644 index 00000000000..f847d346783 --- /dev/null +++ b/driver-reactive-streams/src/examples/reactivestreams/tour/ClientSideEncryptionQueryableEncryptionTour.java @@ -0,0 +1,179 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package reactivestreams.tour; + +import com.mongodb.AutoEncryptionSettings; +import com.mongodb.ClientEncryptionSettings; +import com.mongodb.ConnectionString; +import com.mongodb.MongoClientSettings; +import com.mongodb.client.model.Filters; +import com.mongodb.client.model.vault.DataKeyOptions; +import com.mongodb.client.result.InsertOneResult; +import com.mongodb.connection.ClusterDescription; +import com.mongodb.connection.ClusterType; +import com.mongodb.reactivestreams.client.MongoClient; +import com.mongodb.reactivestreams.client.MongoClients; +import com.mongodb.reactivestreams.client.MongoCollection; +import com.mongodb.reactivestreams.client.MongoDatabase; +import com.mongodb.reactivestreams.client.vault.ClientEncryption; +import com.mongodb.reactivestreams.client.vault.ClientEncryptions; +import org.bson.BsonBinary; +import org.bson.BsonDocument; +import org.bson.BsonString; +import org.bson.BsonType; + +import java.security.SecureRandom; +import java.util.Base64; +import java.util.HashMap; +import java.util.Map; + +import static java.lang.String.format; +import static reactivestreams.helpers.SubscriberHelpers.ObservableSubscriber; +import static reactivestreams.helpers.SubscriberHelpers.OperationSubscriber; + +/** + * ClientSideEncryption Queryable Encryption tour + */ +public class ClientSideEncryptionQueryableEncryptionTour { + + /** + * Run this main method to test queryable encryption. + *

+ * Requires the latest mongodb-crypt library in the class path. + * + * @param args ignored args + */ + public static void main(final String[] args) { + String uri = args.length == 0 ? "mongodb://localhost:27017,localhost:27018,localhost:27019/" : args[0]; + ConnectionString connectionString = new ConnectionString(uri); + + // This would have to be the same master key as was used to create the encryption key + byte[] localMasterKey = new byte[96]; + new SecureRandom().nextBytes(localMasterKey); + + Map> kmsProviders = new HashMap>() {{ + put("local", new HashMap() {{ + put("key", localMasterKey); + }}); + }}; + + MongoClient mongoClient = MongoClients.create(connectionString); + ObservableSubscriber successSubscriber = new OperationSubscriber<>(); + mongoClient.getDatabase("keyvault").getCollection("datakeys").drop().subscribe(successSubscriber); + successSubscriber.await(); + + successSubscriber = new OperationSubscriber<>(); + mongoClient.getDatabase("docsExamples").drop().subscribe(successSubscriber); + successSubscriber.await(); + + ClusterDescription clusterDescription = mongoClient.getClusterDescription(); + ClusterType clusterType = clusterDescription.getType(); + if (clusterType.equals(ClusterType.STANDALONE) || clusterType.equals(ClusterType.UNKNOWN)) { + System.out.println("Requires a replicaset or sharded cluster"); + return; + } + if (clusterDescription.getServerDescriptions().get(0).getMaxWireVersion() < 17) { + System.out.println("Requires MongoDB 6.0 or greater"); + return; + } + + String keyVaultNamespace = "keyvault.datakeys"; + ClientEncryptionSettings clientEncryptionSettings = ClientEncryptionSettings.builder() + .keyVaultMongoClientSettings(MongoClientSettings.builder() + .applyConnectionString(connectionString) + .build()) + .keyVaultNamespace(keyVaultNamespace) + .kmsProviders(kmsProviders) + .build(); + + ClientEncryption clientEncryption = ClientEncryptions.create(clientEncryptionSettings); + + ObservableSubscriber keySubscriber1 = new OperationSubscriber<>(); + ObservableSubscriber keySubscriber2 = new OperationSubscriber<>(); + clientEncryption.createDataKey("local", new DataKeyOptions()).subscribe(keySubscriber1); + clientEncryption.createDataKey("local", new DataKeyOptions()).subscribe(keySubscriber2); + + BsonBinary dataKeyId1 = keySubscriber1.first(); + BsonBinary dataKeyId2 = keySubscriber2.first(); + String base64DataKeyId1 = Base64.getEncoder().encodeToString(dataKeyId1.getData()); + String base64DataKeyId2 = Base64.getEncoder().encodeToString(dataKeyId2.getData()); + + // Create an encryptedFieldsMap with an indexed and unindexed field. + Map encryptedFieldsMap = new HashMap<>(); + encryptedFieldsMap.put("docsExamples.encrypted", BsonDocument.parse("{" + + "fields: [" + + "{'path': 'encryptedIndexed', 'bsonType': 'string', 'queries': {'queryType': 'equality'}, 'keyId': " + + "{'$binary': {'base64' : '" + base64DataKeyId1 + "', 'subType': '" + dataKeyId1.asBinary().getType() + "'}}}," + + "{'path': 'encryptedUnindexed', 'bsonType': 'string', 'keyId': " + + "{'$binary': {'base64' : '" + base64DataKeyId2 + "', 'subType': '" + dataKeyId2.asBinary().getType() + "'}}}" + + "]" + + "}")); + + AutoEncryptionSettings autoEncryptionSettings = AutoEncryptionSettings.builder() + .keyVaultNamespace(keyVaultNamespace) + .kmsProviders(kmsProviders) + .encryptedFieldsMap(encryptedFieldsMap) + .build(); + + MongoClient encryptedClient = + MongoClients.create(MongoClientSettings.builder() + .applyConnectionString(connectionString) + .autoEncryptionSettings(autoEncryptionSettings).build()); + + // Create an FLE 2 collection. + MongoDatabase docsExamplesDatabase = encryptedClient.getDatabase("docsExamples"); + successSubscriber = new OperationSubscriber<>(); + docsExamplesDatabase.createCollection("encrypted").subscribe(successSubscriber); + successSubscriber.await(); + MongoCollection encryptedCollection = docsExamplesDatabase.getCollection("encrypted", BsonDocument.class); + + // Auto encrypt an insert and find with "Indexed" and "Unindexed" encrypted fields. + String indexedValue = "indexedValue"; + String unindexedValue = "unindexedValue"; + + OperationSubscriber insertSubscriber = new OperationSubscriber<>(); + encryptedCollection.insertOne(BsonDocument.parse(format("{'_id': 1, 'encryptedIndexed': '%s', 'encryptedUnindexed': '%s'}", + indexedValue, unindexedValue))) + .subscribe(insertSubscriber); + insertSubscriber.await(); + + OperationSubscriber findSubscriber = new OperationSubscriber<>(); + encryptedCollection.find(Filters.eq("encryptedIndexed", "indexedValue")).first().subscribe(findSubscriber); + BsonDocument findResult = findSubscriber.first(); + assert findResult != null; + assert findResult.getString("encryptedIndexed").equals(new BsonString(indexedValue)); + assert findResult.getString("encryptedUnindexed").equals(new BsonString(unindexedValue)); + + + // Find documents without decryption. + MongoCollection unencryptedCollection = mongoClient.getDatabase("docsExamples") + .getCollection("encrypted", BsonDocument.class); + + findSubscriber = new OperationSubscriber<>(); + unencryptedCollection.find(Filters.eq("_id", 1)).first().subscribe(findSubscriber); + findResult = findSubscriber.first(); + + assert findResult != null; + assert findResult.get("encryptedIndexed").getBsonType().equals(BsonType.BINARY); + assert findResult.get("encryptedUnindexed").getBsonType().equals(BsonType.BINARY); + + // release resources + clientEncryption.close(); + encryptedClient.close(); + mongoClient.close(); + } +} diff --git a/driver-reactive-streams/src/examples/reactivestreams/tour/ClientSideEncryptionSimpleTour.java b/driver-reactive-streams/src/examples/reactivestreams/tour/ClientSideEncryptionSimpleTour.java new file mode 100644 index 00000000000..a9246a8d45d --- /dev/null +++ b/driver-reactive-streams/src/examples/reactivestreams/tour/ClientSideEncryptionSimpleTour.java @@ -0,0 +1,93 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package reactivestreams.tour; + +import com.mongodb.AutoEncryptionSettings; +import com.mongodb.ConnectionString; +import com.mongodb.MongoClientSettings; +import com.mongodb.client.result.InsertOneResult; +import com.mongodb.reactivestreams.client.MongoClient; +import com.mongodb.reactivestreams.client.MongoClients; +import com.mongodb.reactivestreams.client.MongoCollection; +import org.bson.Document; +import reactivestreams.helpers.SubscriberHelpers.ObservableSubscriber; +import reactivestreams.helpers.SubscriberHelpers.OperationSubscriber; +import reactivestreams.helpers.SubscriberHelpers.PrintDocumentSubscriber; + +import java.security.SecureRandom; +import java.util.HashMap; +import java.util.Map; + +/** + * ClientSideEncryption Simple tour + */ +public class ClientSideEncryptionSimpleTour { + + /** + * Run this main method to see the output of this quick example. + *

+ * Requires the mongodb-crypt library in the class path and mongocryptd on the system path. + * Assumes the schema has already been created in MongoDB. + * + * @param args ignored args + */ + public static void main(final String[] args) { + + // This would have to be the same master key as was used to create the encryption key + byte[] localMasterKey = new byte[96]; + new SecureRandom().nextBytes(localMasterKey); + + Map> kmsProviders = new HashMap>() {{ + put("local", new HashMap() {{ + put("key", localMasterKey); + }}); + }}; + + String keyVaultNamespace = "admin.datakeys"; + + AutoEncryptionSettings autoEncryptionSettings = AutoEncryptionSettings.builder() + .keyVaultNamespace(keyVaultNamespace) + .kmsProviders(kmsProviders) + .build(); + + MongoClientSettings clientSettings = ( + args.length == 0 + ? MongoClientSettings.builder() + : MongoClientSettings.builder().applyConnectionString(new ConnectionString(args[0]))) + .autoEncryptionSettings(autoEncryptionSettings) + .build(); + + MongoClient mongoClient = MongoClients.create(clientSettings); + MongoCollection collection = mongoClient.getDatabase("test").getCollection("coll"); + + ObservableSubscriber successSubscriber = new OperationSubscriber<>(); + collection.drop().subscribe(successSubscriber); + successSubscriber.await(); + + ObservableSubscriber insertOneSubscriber = new OperationSubscriber<>(); + collection.insertOne(new Document("encryptedField", "123456789")).subscribe(insertOneSubscriber); + insertOneSubscriber.await(); + + ObservableSubscriber documentSubscriber = new PrintDocumentSubscriber(); + collection.find().first().subscribe(documentSubscriber); + documentSubscriber.await(); + + // release resources + mongoClient.close(); + } +} diff --git a/driver-reactive-streams/src/examples/reactivestreams/tour/Person.java b/driver-reactive-streams/src/examples/reactivestreams/tour/Person.java new file mode 100644 index 00000000000..4a856692b3f --- /dev/null +++ b/driver-reactive-streams/src/examples/reactivestreams/tour/Person.java @@ -0,0 +1,167 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package reactivestreams.tour; + +import org.bson.types.ObjectId; + +/** + * The Person Pojo + */ +public final class Person { + private ObjectId id; + private String name; + private int age; + private Address address; + + /** + * Construct a new instance + */ + public Person() { + } + + /** + * Construct a new instance + * + * @param name the name + * @param age the age + * @param address the address + */ + public Person(final String name, final int age, final Address address) { + this.name = name; + this.age = age; + this.address = address; + } + + /** + * Returns the id + * + * @return the id + */ + public ObjectId getId() { + return id; + } + + /** + * Sets the id + * + * @param id the id + */ + public void setId(final ObjectId id) { + this.id = id; + } + + /** + * Returns the name + * + * @return the name + */ + public String getName() { + return name; + } + + /** + * Sets the name + * + * @param name the name + */ + public void setName(final String name) { + this.name = name; + } + + /** + * Returns the age + * + * @return the age + */ + public int getAge() { + return age; + } + + /** + * Sets the age + * + * @param age the age + */ + public void setAge(final int age) { + this.age = age; + } + + /** + * Returns the address + * + * @return the address + */ + public Address getAddress() { + return address; + } + + /** + * Sets the address + * + * @param address the address + */ + public void setAddress(final Address address) { + this.address = address; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + Person person = (Person) o; + + if (getAge() != person.getAge()) { + return false; + } + if (getId() != null ? !getId().equals(person.getId()) : person.getId() != null) { + return false; + } + if (getName() != null ? !getName().equals(person.getName()) : person.getName() != null) { + return false; + } + if (getAddress() != null ? !getAddress().equals(person.getAddress()) : person.getAddress() != null) { + return false; + } + + return true; + } + + @Override + public int hashCode() { + int result = getId() != null ? getId().hashCode() : 0; + result = 31 * result + (getName() != null ? getName().hashCode() : 0); + result = 31 * result + getAge(); + result = 31 * result + (getAddress() != null ? getAddress().hashCode() : 0); + return result; + } + + @Override + public String toString() { + return "Person{" + + "id='" + id + "'" + + ", name='" + name + "'" + + ", age=" + age + + ", address=" + address + + "}"; + } +} diff --git a/driver-reactive-streams/src/examples/reactivestreams/tour/PojoQuickTour.java b/driver-reactive-streams/src/examples/reactivestreams/tour/PojoQuickTour.java new file mode 100644 index 00000000000..682ab0ce517 --- /dev/null +++ b/driver-reactive-streams/src/examples/reactivestreams/tour/PojoQuickTour.java @@ -0,0 +1,154 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package reactivestreams.tour; + +import com.mongodb.client.result.DeleteResult; +import com.mongodb.client.result.InsertManyResult; +import com.mongodb.client.result.InsertOneResult; +import com.mongodb.client.result.UpdateResult; +import com.mongodb.reactivestreams.client.MongoClient; +import com.mongodb.reactivestreams.client.MongoClients; +import com.mongodb.reactivestreams.client.MongoCollection; +import com.mongodb.reactivestreams.client.MongoDatabase; +import org.bson.codecs.configuration.CodecRegistry; +import org.bson.codecs.pojo.PojoCodecProvider; +import reactivestreams.helpers.SubscriberHelpers.ObservableSubscriber; +import reactivestreams.helpers.SubscriberHelpers.OperationSubscriber; +import reactivestreams.helpers.SubscriberHelpers.PrintToStringSubscriber; + +import java.util.List; + +import static com.mongodb.client.model.Filters.eq; +import static com.mongodb.client.model.Filters.gt; +import static com.mongodb.client.model.Filters.not; +import static com.mongodb.client.model.Updates.combine; +import static com.mongodb.client.model.Updates.set; +import static java.util.Arrays.asList; +import static org.bson.codecs.configuration.CodecRegistries.fromProviders; +import static org.bson.codecs.configuration.CodecRegistries.fromRegistries; + +/** + * The POJO QuickTour code example + */ +public class PojoQuickTour { + /** + * Run this main method to see the output of this quick example. + * + * @param args takes an optional single argument for the connection string + */ + public static void main(final String[] args) { + MongoClient mongoClient; + + if (args.length == 0) { + // connect to the local database server + mongoClient = MongoClients.create(); + } else { + mongoClient = MongoClients.create(args[0]); + } + + // create codec registry for POJOs + CodecRegistry pojoCodecRegistry = fromRegistries(MongoClients.getDefaultCodecRegistry(), + fromProviders(PojoCodecProvider.builder().automatic(true).build())); + + // get handle to "mydb" database + MongoDatabase database = mongoClient.getDatabase("mydb").withCodecRegistry(pojoCodecRegistry); + + // get a handle to the "people" collection + MongoCollection collection = database.getCollection("people", Person.class); + + // drop all the data in it + ObservableSubscriber successSubscriber = new OperationSubscriber<>(); + collection.drop().subscribe(successSubscriber); + successSubscriber.await(); + + // make a document and insert it + Person ada = new Person("Ada Byron", 20, new Address("St James Square", "London", "W1")); + System.out.println("Original Person Model: " + ada); + + ObservableSubscriber insertOneSubscriber = new OperationSubscriber<>(); + collection.insertOne(ada).subscribe(insertOneSubscriber); + insertOneSubscriber.await(); + + // get it (since it's the only one in there since we dropped the rest earlier on) + ObservableSubscriber personSubscriber = new PrintToStringSubscriber<>(); + collection.find().first().subscribe(personSubscriber); + personSubscriber.await(); + + + // now, lets add some more people so we can explore queries and cursors + List people = asList( + new Person("Charles Babbage", 45, new Address("5 Devonshire Street", "London", "W11")), + new Person("Alan Turing", 28, new Address("Bletchley Hall", "Bletchley Park", "MK12")), + new Person("Timothy Berners-Lee", 61, new Address("Colehill", "Wimborne", null)) + ); + + ObservableSubscriber insertManySubscriber = new OperationSubscriber<>(); + collection.insertMany(people).subscribe(insertManySubscriber); + insertManySubscriber.await(); + + // get all the documents in the collection and print them out + personSubscriber = new PrintToStringSubscriber<>(); + collection.find().subscribe(personSubscriber); + personSubscriber.await(); + + // now use a query to get 1 document out + personSubscriber = new PrintToStringSubscriber<>(); + collection.find(eq("address.city", "Wimborne")).first().subscribe(personSubscriber); + personSubscriber.await(); + + // now lets find every over 30 + personSubscriber = new PrintToStringSubscriber<>(); + collection.find(gt("age", 30)).subscribe(personSubscriber); + personSubscriber.await(); + + // Update One + ObservableSubscriber updateSubscriber = new OperationSubscriber<>(); + collection.updateOne(eq("name", "Ada Byron"), combine(set("age", 23), set("name", "Ada Lovelace"))) + .subscribe(updateSubscriber); + updateSubscriber.await(); + + // Update Many + updateSubscriber = new OperationSubscriber<>(); + collection.updateMany(not(eq("zip", null)), set("zip", null)) + .subscribe(updateSubscriber); + updateSubscriber.await(); + + // Replace One + updateSubscriber = new OperationSubscriber<>(); + collection.replaceOne(eq("name", "Ada Lovelace"), ada).subscribe(updateSubscriber); + updateSubscriber.await(); + + // Delete One + ObservableSubscriber deleteSubscriber = new OperationSubscriber<>(); + collection.deleteOne(eq("address.city", "Wimborne")).subscribe(deleteSubscriber); + deleteSubscriber.await(); + + // Delete Many + deleteSubscriber = new OperationSubscriber<>(); + collection.deleteMany(eq("address.city", "London")).subscribe(deleteSubscriber); + deleteSubscriber.await(); + + // Clean up + successSubscriber = new OperationSubscriber<>(); + database.drop().subscribe(successSubscriber); + successSubscriber.await(); + + // release resources + mongoClient.close(); + } +} diff --git a/driver-reactive-streams/src/examples/reactivestreams/tour/QuickTour.java b/driver-reactive-streams/src/examples/reactivestreams/tour/QuickTour.java new file mode 100644 index 00000000000..5953f75cdbb --- /dev/null +++ b/driver-reactive-streams/src/examples/reactivestreams/tour/QuickTour.java @@ -0,0 +1,193 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package reactivestreams.tour; + +import com.mongodb.client.result.DeleteResult; +import com.mongodb.client.result.InsertManyResult; +import com.mongodb.client.result.InsertOneResult; +import com.mongodb.client.result.UpdateResult; +import com.mongodb.reactivestreams.client.MongoClient; +import com.mongodb.reactivestreams.client.MongoClients; +import com.mongodb.reactivestreams.client.MongoCollection; +import com.mongodb.reactivestreams.client.MongoDatabase; +import org.bson.Document; +import reactivestreams.helpers.SubscriberHelpers.ObservableSubscriber; +import reactivestreams.helpers.SubscriberHelpers.OperationSubscriber; +import reactivestreams.helpers.SubscriberHelpers.PrintDocumentSubscriber; +import reactivestreams.helpers.SubscriberHelpers.PrintSubscriber; + +import java.util.ArrayList; +import java.util.List; + +import static com.mongodb.client.model.Accumulators.sum; +import static com.mongodb.client.model.Aggregates.group; +import static com.mongodb.client.model.Aggregates.match; +import static com.mongodb.client.model.Aggregates.project; +import static com.mongodb.client.model.Filters.and; +import static com.mongodb.client.model.Filters.eq; +import static com.mongodb.client.model.Filters.exists; +import static com.mongodb.client.model.Filters.gt; +import static com.mongodb.client.model.Filters.gte; +import static com.mongodb.client.model.Filters.lt; +import static com.mongodb.client.model.Filters.lte; +import static com.mongodb.client.model.Projections.excludeId; +import static com.mongodb.client.model.Sorts.descending; +import static com.mongodb.client.model.Updates.inc; +import static com.mongodb.client.model.Updates.set; +import static java.util.Arrays.asList; +import static java.util.Collections.singletonList; + +/** + * The QuickTour code example + */ +public class QuickTour { + /** + * Run this main method to see the output of this quick example. + * + * @param args takes an optional single argument for the connection string + */ + public static void main(final String[] args) { + MongoClient mongoClient; + + if (args.length == 0) { + // connect to the local database server + mongoClient = MongoClients.create(); + } else { + mongoClient = MongoClients.create(args[0]); + } + + // get handle to "mydb" database + MongoDatabase database = mongoClient.getDatabase("mydb"); + + // get a handle to the "test" collection + MongoCollection collection = database.getCollection("test"); + + // drop all the data in it + ObservableSubscriber successSubscriber = new OperationSubscriber<>(); + collection.drop().subscribe(successSubscriber); + successSubscriber.await(); + + // make a document and insert it + Document doc = new Document("name", "MongoDB") + .append("type", "database") + .append("count", 1) + .append("info", new Document("x", 203).append("y", 102)); + + ObservableSubscriber insertOneSubscriber = new OperationSubscriber<>(); + collection.insertOne(doc).subscribe(insertOneSubscriber); + insertOneSubscriber.await(); + + // get it (since it's the only one in there since we dropped the rest earlier on) + ObservableSubscriber documentSubscriber = new PrintDocumentSubscriber(); + collection.find().first().subscribe(documentSubscriber); + documentSubscriber.await(); + + // now, lets add lots of little documents to the collection so we can explore queries and cursors + List documents = new ArrayList<>(); + for (int i = 0; i < 100; i++) { + documents.add(new Document("i", i)); + } + + ObservableSubscriber insertManySubscriber = new OperationSubscriber<>(); + collection.insertMany(documents).subscribe(insertManySubscriber); + insertManySubscriber.await(); + + // find first + documentSubscriber = new PrintDocumentSubscriber(); + collection.find().first().subscribe(documentSubscriber); + documentSubscriber.await(); + + // lets get all the documents in the collection and print them out + documentSubscriber = new PrintDocumentSubscriber(); + collection.find().subscribe(documentSubscriber); + documentSubscriber.await(); + + // Query Filters + + // now use a query to get 1 document out + documentSubscriber = new PrintDocumentSubscriber(); + collection.find(eq("i", 71)).first().subscribe(documentSubscriber); + documentSubscriber.await(); + + // now use a range query to get a larger subset + documentSubscriber = new PrintDocumentSubscriber(); + collection.find(gt("i", 50)).subscribe(documentSubscriber); + documentSubscriber.await(); + + // range query with multiple constraints + documentSubscriber = new PrintDocumentSubscriber(); + collection.find(and(gt("i", 50), lte("i", 100))).subscribe(documentSubscriber); + documentSubscriber.await(); + + // Sorting + documentSubscriber = new PrintDocumentSubscriber(); + collection.find(exists("i")).sort(descending("i")).first().subscribe(documentSubscriber); + documentSubscriber.await(); + + // Projection + documentSubscriber = new PrintDocumentSubscriber(); + collection.find().projection(excludeId()).first().subscribe(documentSubscriber); + documentSubscriber.await(); + + // Aggregation + documentSubscriber = new PrintDocumentSubscriber(); + collection.aggregate(asList( + match(gt("i", 0)), + project(Document.parse("{ITimes10: {$multiply: ['$i', 10]}}"))) + ).subscribe(documentSubscriber); + documentSubscriber.await(); + + documentSubscriber = new PrintDocumentSubscriber(); + collection.aggregate(singletonList(group(null, sum("total", "$i")))) + .first().subscribe(documentSubscriber); + documentSubscriber.await(); + + // Update One + ObservableSubscriber updateSubscriber = new OperationSubscriber<>(); + collection.updateOne(eq("i", 10), set("i", 110)).subscribe(updateSubscriber); + updateSubscriber.await(); + + // Update Many + updateSubscriber = new OperationSubscriber<>(); + collection.updateMany(lt("i", 100), inc("i", 100)).subscribe(updateSubscriber); + updateSubscriber.await(); + + // Delete One + ObservableSubscriber deleteSubscriber = new OperationSubscriber<>(); + collection.deleteOne(eq("i", 110)).subscribe(deleteSubscriber); + deleteSubscriber.await(); + + // Delete Many + deleteSubscriber = new OperationSubscriber<>(); + collection.deleteMany(gte("i", 100)).subscribe(deleteSubscriber); + deleteSubscriber.await(); + + // Create Index + OperationSubscriber createIndexSubscriber = new PrintSubscriber<>("Create Index Result: %s"); + collection.createIndex(new Document("i", 1)).subscribe(createIndexSubscriber); + createIndexSubscriber.await(); + + // Clean up + successSubscriber = new OperationSubscriber<>(); + collection.drop().subscribe(successSubscriber); + successSubscriber.await(); + + // release resources + mongoClient.close(); + } +} diff --git a/driver-reactive-streams/src/examples/reactivestreams/tour/package-info.java b/driver-reactive-streams/src/examples/reactivestreams/tour/package-info.java new file mode 100644 index 00000000000..580c9ebfbec --- /dev/null +++ b/driver-reactive-streams/src/examples/reactivestreams/tour/package-info.java @@ -0,0 +1,18 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package reactivestreams.tour; diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/AggregatePublisher.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/AggregatePublisher.java new file mode 100644 index 00000000000..4f18fe272bc --- /dev/null +++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/AggregatePublisher.java @@ -0,0 +1,275 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.reactivestreams.client; + +import com.mongodb.ExplainVerbosity; +import com.mongodb.MongoNamespace; +import com.mongodb.annotations.Alpha; +import com.mongodb.annotations.Reason; +import com.mongodb.client.cursor.TimeoutMode; +import com.mongodb.client.model.Aggregates; +import com.mongodb.client.model.Collation; +import com.mongodb.client.model.MergeOptions; +import com.mongodb.lang.Nullable; +import org.bson.BsonValue; +import org.bson.Document; +import org.bson.conversions.Bson; +import org.reactivestreams.Publisher; +import org.reactivestreams.Subscriber; + +import java.util.concurrent.TimeUnit; + +/** + * Publisher for aggregate. + * + * @param The type of the result. + * @since 1.0 + */ +public interface AggregatePublisher extends Publisher { + + /** + * Enables writing to temporary files. A null value indicates that it's unspecified. + * + * @param allowDiskUse true if writing to temporary files is enabled + * @return this + * @mongodb.driver.manual reference/command/aggregate/ Aggregation + */ + AggregatePublisher allowDiskUse(@Nullable Boolean allowDiskUse); + + /** + * Sets the maximum execution time on the server for this operation. + * + * @param maxTime the max time + * @param timeUnit the time unit, which may not be null + * @return this + * @mongodb.driver.manual reference/method/cursor.maxTimeMS/#cursor.maxTimeMS Max Time + */ + AggregatePublisher maxTime(long maxTime, TimeUnit timeUnit); + + /** + * The maximum amount of time for the server to wait on new documents to satisfy a {@code $changeStream} aggregation. + *

+ * A zero value will be ignored. + * + * @param maxAwaitTime the max await time + * @param timeUnit the time unit to return the result in + * @return the maximum await execution time in the given time unit + * @mongodb.server.release 3.6 + * @since 1.6 + */ + AggregatePublisher maxAwaitTime(long maxAwaitTime, TimeUnit timeUnit); + + /** + * Sets the bypass document level validation flag. + * + *

Note: This only applies when an $out stage is specified

. + * + * @param bypassDocumentValidation If true, allows the write to opt-out of document level validation. + * @return this + * @since 1.2 + * @mongodb.driver.manual reference/command/aggregate/ Aggregation + * @mongodb.server.release 3.2 + */ + AggregatePublisher bypassDocumentValidation(@Nullable Boolean bypassDocumentValidation); + + /** + * Aggregates documents according to the specified aggregation pipeline, which must end with an + * {@link Aggregates#out(String, String) $out} or {@link Aggregates#merge(MongoNamespace, MergeOptions) $merge} stage. + * Calling this method and then {@linkplain Publisher#subscribe(Subscriber) subscribing} to the returned {@link Publisher} + * is the preferred alternative to {@linkplain #subscribe(Subscriber) subscribing} to this {@link AggregatePublisher}, + * because this method does what is explicitly requested without executing implicit operations. + * + * @throws IllegalStateException if the pipeline does not end with an {@code $out} or {@code $merge} stage + * @return an empty publisher that indicates when the operation has completed + * @mongodb.driver.manual aggregation/ Aggregation + */ + Publisher toCollection(); + + /** + * Requests {@link AggregatePublisher} to start streaming data according to the specified aggregation pipeline. + *
    + *
  • + * If the aggregation pipeline ends with an {@link Aggregates#out(String, String) $out} or + * {@link Aggregates#merge(MongoNamespace, MergeOptions) $merge} stage, + * then {@linkplain MongoCollection#find() finds all} documents in the affected namespace and produces them. + * You may want to use {@link #toCollection()} instead.
  • + *
  • + * Otherwise, produces no elements.
  • + *
+ */ + void subscribe(Subscriber s); + + /** + * Sets the collation options + * + *

A null value represents the server default.

+ * @param collation the collation options to use + * @return this + * @since 1.3 + * @mongodb.server.release 3.4 + */ + AggregatePublisher collation(@Nullable Collation collation); + + /** + * Sets the comment for this operation. A null value means no comment is set. + * + * @param comment the comment + * @return this + * @mongodb.server.release 3.6 + * @since 1.7 + */ + AggregatePublisher comment(@Nullable String comment); + + /** + * Sets the comment for this operation. A null value means no comment is set. + * + *

The comment can be any valid BSON type for server versions 4.4 and above. + * Server versions between 3.6 and 4.2 only support string as comment, + * and providing a non-string type will result in a server-side error. + * + * @param comment the comment + * @return this + * @since 4.6 + * @mongodb.server.release 3.6 + */ + AggregatePublisher comment(@Nullable BsonValue comment); + + /** + * Sets the hint for which index to use. A null value means no hint is set. + * + * @param hint the hint + * @return this + * @mongodb.server.release 3.6 + * @since 1.7 + */ + AggregatePublisher hint(@Nullable Bson hint); + + /** + * Sets the hint for which index to use. A null value means no hint is set. + * + *

Note: If {@link AggregatePublisher#hint(Bson)} is set that will be used instead of any hint string.

+ * + * @param hint the name of the index which should be used for the operation + * @return this + * @since 4.4 + */ + AggregatePublisher hintString(@Nullable String hint); + + /** + * Add top-level variables to the aggregation. + *

+ * For MongoDB 5.0+, the aggregate command accepts a {@code let} option. This option is a document consisting of zero or more + * fields representing variables that are accessible to the aggregation pipeline. The key is the name of the variable and the value is + * a constant in the aggregate expression language. Each parameter name is then usable to access the value of the corresponding + * expression with the "$$" syntax within aggregate expression contexts which may require the use of $expr or a pipeline. + *

+ * + * @param variables the variables + * @return this + * @since 4.3 + * @mongodb.server.release 5.0 + */ + AggregatePublisher let(@Nullable Bson variables); + + /** + * Sets the number of documents to return per batch. + * + *

Overrides the {@link org.reactivestreams.Subscription#request(long)} value for setting the batch size, allowing for fine-grained + * control over the underlying cursor.

+ * + * @param batchSize the batch size + * @return this + * @since 1.8 + * @mongodb.driver.manual reference/method/cursor.batchSize/#cursor.batchSize Batch Size + */ + AggregatePublisher batchSize(int batchSize); + + /** + * Sets the timeoutMode for the cursor. + * + *

+ * Requires the {@code timeout} to be set, either in the {@link com.mongodb.MongoClientSettings}, + * via {@link MongoDatabase} or via {@link MongoCollection} + *

+ *

+ * If the {@code timeout} is set then: + *

    + *
  • For non-tailable cursors, the default value of timeoutMode is {@link TimeoutMode#CURSOR_LIFETIME}
  • + *
  • For tailable cursors, the default value of timeoutMode is {@link TimeoutMode#ITERATION} and its an error + * to configure it as: {@link TimeoutMode#CURSOR_LIFETIME}
  • + *
+ * @param timeoutMode the timeout mode + * @return this + * @since 5.2 + */ + @Alpha(Reason.CLIENT) + AggregatePublisher timeoutMode(TimeoutMode timeoutMode); + + /** + * Helper to return a publisher limited to the first result. + * + * @return a Publisher which will contain a single item. + * @since 1.8 + */ + Publisher first(); + + /** + * Explain the execution plan for this operation with the server's default verbosity level + * + * @return the execution plan + * @since 4.2 + * @mongodb.driver.manual reference/command/explain/ + * @mongodb.server.release 3.6 + */ + Publisher explain(); + + /** + * Explain the execution plan for this operation with the given verbosity level + * + * @param verbosity the verbosity of the explanation + * @return the execution plan + * @since 4.2 + * @mongodb.driver.manual reference/command/explain/ + * @mongodb.server.release 3.6 + */ + Publisher explain(ExplainVerbosity verbosity); + + /** + * Explain the execution plan for this operation with the server's default verbosity level + * + * @param the type of the document class + * @param explainResultClass the document class to decode into + * @return the execution plan + * @since 4.2 + * @mongodb.driver.manual reference/command/explain/ + * @mongodb.server.release 3.6 + */ + Publisher explain(Class explainResultClass); + + /** + * Explain the execution plan for this operation with the given verbosity level + * + * @param the type of the document class + * @param explainResultClass the document class to decode into + * @param verbosity the verbosity of the explanation + * @return the execution plan + * @since 4.2 + * @mongodb.driver.manual reference/command/explain/ + * @mongodb.server.release 3.6 + */ + Publisher explain(Class explainResultClass, ExplainVerbosity verbosity); +} diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/ChangeStreamPublisher.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/ChangeStreamPublisher.java new file mode 100644 index 00000000000..27dc7b58238 --- /dev/null +++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/ChangeStreamPublisher.java @@ -0,0 +1,181 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.mongodb.reactivestreams.client; + +import com.mongodb.client.model.Collation; +import com.mongodb.client.model.changestream.ChangeStreamDocument; +import com.mongodb.client.model.changestream.FullDocument; +import com.mongodb.client.model.changestream.FullDocumentBeforeChange; +import com.mongodb.lang.Nullable; +import org.bson.BsonDocument; +import org.bson.BsonTimestamp; +import org.bson.BsonValue; +import org.reactivestreams.Publisher; + +import java.util.concurrent.TimeUnit; + +/** + * Iterable for change streams. + * + * @param The type of the result. + * @mongodb.server.release 3.6 + * @since 1.6 + */ +public interface ChangeStreamPublisher extends Publisher> { + /** + * Sets the fullDocument value. + * + * @param fullDocument the fullDocument + * @return this + */ + ChangeStreamPublisher fullDocument(FullDocument fullDocument); + + /** + * Sets the fullDocumentBeforeChange value. + * + * @param fullDocumentBeforeChange the fullDocumentBeforeChange + * @return this + * @since 4.7 + * @mongodb.server.release 6.0 + */ + ChangeStreamPublisher fullDocumentBeforeChange(FullDocumentBeforeChange fullDocumentBeforeChange); + + /** + * Sets the logical starting point for the new change stream. + * + * @param resumeToken the resume token + * @return this + */ + ChangeStreamPublisher resumeAfter(BsonDocument resumeToken); + + /** + * The change stream will only provide changes that occurred after the specified timestamp. + * + *

Any command run against the server will return an operation time that can be used here.

+ *

The default value is an operation time obtained from the server before the change stream was created.

+ * + * @param startAtOperationTime the start at operation time + * @since 1.9 + * @return this + * @mongodb.server.release 4.0 + * @mongodb.driver.manual reference/method/db.runCommand/ + */ + ChangeStreamPublisher startAtOperationTime(BsonTimestamp startAtOperationTime); + + /** + * Similar to {@code resumeAfter}, this option takes a resume token and starts a + * new change stream returning the first notification after the token. + * + *

This will allow users to watch collections that have been dropped and recreated + * or newly renamed collections without missing any notifications.

+ * + *

Note: The server will report an error if both {@code startAfter} and {@code resumeAfter} are specified.

+ * + * @param startAfter the startAfter resumeToken + * @return this + * @since 1.12 + * @mongodb.server.release 4.2 + * @mongodb.driver.manual changeStreams/#change-stream-start-after + */ + ChangeStreamPublisher startAfter(BsonDocument startAfter); + + /** + * Sets the maximum await execution time on the server for this operation. + * + * @param maxAwaitTime the max await time. A zero value will be ignored, and indicates that the driver should respect the server's + * default value + * @param timeUnit the time unit, which may not be null + * @return this + */ + ChangeStreamPublisher maxAwaitTime(long maxAwaitTime, TimeUnit timeUnit); + + /** + * Sets the collation options + * + *

A null value represents the server default.

+ * @param collation the collation options to use + * @return this + */ + ChangeStreamPublisher collation(@Nullable Collation collation); + + /** + * Returns a {@code MongoIterable} containing the results of the change stream based on the document class provided. + * + * @param clazz the class to use for the raw result. + * @param the result type + * @return the new Mongo Iterable + */ + Publisher withDocumentClass(Class clazz); + + /** + * Sets the number of documents to return per batch. + * + *

Overrides the {@link org.reactivestreams.Subscription#request(long)} value for setting the batch size, allowing for fine-grained + * control over the underlying cursor.

+ * + * @param batchSize the batch size + * @return this + * @since 1.8 + * @mongodb.driver.manual reference/method/cursor.batchSize/#cursor.batchSize Batch Size + */ + ChangeStreamPublisher batchSize(int batchSize); + + /** + * Helper to return a publisher limited to the first result. + * + * @return a Publisher which will contain a single item. + * @since 1.8 + */ + Publisher> first(); + + /** + * Sets the comment for this operation. A null value means no comment is set. + * + * @param comment the comment + * @return this + * @since 4.6 + * @mongodb.server.release 3.6 + */ + ChangeStreamPublisher comment(@Nullable String comment); + + /** + * Sets the comment for this operation. A null value means no comment is set. + * + *

The comment can be any valid BSON type for server versions 4.4 and above. + * Server versions between 3.6 and 4.2 only support string as comment, + * and providing a non-string type will result in a server-side error. + * + * @param comment the comment + * @return this + * @since 4.6 + * @mongodb.server.release 3.6 + */ + ChangeStreamPublisher comment(@Nullable BsonValue comment); + + /** + * Sets whether to include expanded change stream events, which are: + * createIndexes, dropIndexes, modify, create, shardCollection, + * reshardCollection, refineCollectionShardKey. False by default. + * + * @param showExpandedEvents true to include expanded events + * @return this + * @since 4.7 + * @mongodb.server.release 6.0 + */ + ChangeStreamPublisher showExpandedEvents(boolean showExpandedEvents); +} diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/ClientSession.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/ClientSession.java new file mode 100644 index 00000000000..3d9354e9ae9 --- /dev/null +++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/ClientSession.java @@ -0,0 +1,97 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.mongodb.reactivestreams.client; + +import com.mongodb.TransactionOptions; +import org.reactivestreams.Publisher; + +/** + * A client session that supports transactions. + * + * @since 1.9 + */ +public interface ClientSession extends com.mongodb.session.ClientSession { + /** + * Returns true if there is an active transaction on this session, and false otherwise + * + * @return true if there is an active transaction on this session + * @mongodb.server.release 4.0 + */ + boolean hasActiveTransaction(); + + /** + * Notify the client session that a message has been sent. + *

+ * For internal use only + *

+ * + * @return true if this is the first message sent, false otherwise + * @since 4.0 + */ + boolean notifyMessageSent(); + + /** + * Notify the client session that command execution is being initiated. This should be called before server selection occurs. + *

+ * For internal use only + *

+ * @param operation the operation + */ + void notifyOperationInitiated(Object operation); + + /** + * Gets the transaction options. Only call this method of the session has an active transaction + * + * @return the transaction options + */ + TransactionOptions getTransactionOptions(); + + /** + * Start a transaction in the context of this session with default transaction options. A transaction can not be started if there is + * already an active transaction on this session. + * + * @mongodb.server.release 4.0 + */ + void startTransaction(); + + /** + * Start a transaction in the context of this session with the given transaction options. A transaction can not be started if there is + * already an active transaction on this session. + * + * @param transactionOptions the options to apply to the transaction + * + * @mongodb.server.release 4.0 + */ + void startTransaction(TransactionOptions transactionOptions); + + /** + * Commit a transaction in the context of this session. A transaction can only be commmited if one has first been started. + * + * @return an empty publisher that indicates when the operation has completed + * @mongodb.server.release 4.0 + */ + Publisher commitTransaction(); + + /** + * Abort a transaction in the context of this session. A transaction can only be aborted if one has first been started. + * + * @return an empty publisher that indicates when the operation has completed + * @mongodb.server.release 4.0 + */ + Publisher abortTransaction(); +} diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/DistinctPublisher.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/DistinctPublisher.java new file mode 100644 index 00000000000..f91b2a50722 --- /dev/null +++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/DistinctPublisher.java @@ -0,0 +1,140 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.reactivestreams.client; + +import com.mongodb.annotations.Alpha; +import com.mongodb.annotations.Reason; +import com.mongodb.client.cursor.TimeoutMode; +import com.mongodb.client.model.Collation; +import com.mongodb.lang.Nullable; +import org.bson.BsonValue; +import org.bson.conversions.Bson; +import org.reactivestreams.Publisher; + +import java.util.concurrent.TimeUnit; + +/** + * Iterable for distinct. + * + * @param The type of the result. + * @since 1.0 + */ +public interface DistinctPublisher extends Publisher { + + /** + * Sets the query filter to apply to the query. + * + * @param filter the filter, which may be null. + * @return this + * @mongodb.driver.manual reference/method/db.collection.find/ Filter + */ + DistinctPublisher filter(@Nullable Bson filter); + + /** + * Sets the maximum execution time on the server for this operation. + * + * @param maxTime the max time + * @param timeUnit the time unit, which may not be null + * @return this + */ + DistinctPublisher maxTime(long maxTime, TimeUnit timeUnit); + + /** + * Sets the collation options + * + *

A null value represents the server default.

+ * @param collation the collation options to use + * @return this + * @since 1.3 + * @mongodb.server.release 3.4 + */ + DistinctPublisher collation(@Nullable Collation collation); + + /** + * Sets the number of documents to return per batch. + * + *

Overrides the {@link org.reactivestreams.Subscription#request(long)} value for setting the batch size, allowing for fine-grained + * control over the underlying cursor.

+ * + * @param batchSize the batch size + * @return this + * @since 1.8 + * @mongodb.driver.manual reference/method/cursor.batchSize/#cursor.batchSize Batch Size + */ + DistinctPublisher batchSize(int batchSize); + + /** + * Sets the comment for this operation. A null value means no comment is set. + * + * @param comment the comment + * @return this + * @since 4.6 + * @mongodb.server.release 4.4 + */ + DistinctPublisher comment(@Nullable String comment); + + /** + * Sets the comment for this operation. A null value means no comment is set. + * + * @param comment the comment + * @return this + * @since 4.6 + * @mongodb.server.release 4.4 + */ + DistinctPublisher comment(@Nullable BsonValue comment); + + /** + * Sets the hint for which index to use. A null value means no hint is set. + * + * @param hint the hint + * @return this + * @since 5.3 + */ + DistinctPublisher hint(@Nullable Bson hint); + + /** + * Sets the hint for which index to use. A null value means no hint is set. + * + * @param hint the name of the index which should be used for the operation + * @return this + * @since 5.3 + */ + DistinctPublisher hintString(@Nullable String hint); + + /** + * Sets the timeoutMode for the cursor. + * + *

+ * Requires the {@code timeout} to be set, either in the {@link com.mongodb.MongoClientSettings}, + * via {@link MongoDatabase} or via {@link MongoCollection} + *

+ * @param timeoutMode the timeout mode + * @return this + * @since 5.2 + */ + @Alpha(Reason.CLIENT) + DistinctPublisher timeoutMode(TimeoutMode timeoutMode); + + /** + * Helper to return a publisher limited to the first result. + * + * @return a Publisher which will contain a single item. + * @since 1.8 + */ + Publisher first(); + +} diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/FindPublisher.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/FindPublisher.java new file mode 100644 index 00000000000..1128c87bd02 --- /dev/null +++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/FindPublisher.java @@ -0,0 +1,341 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.reactivestreams.client; + +import com.mongodb.CursorType; +import com.mongodb.ExplainVerbosity; +import com.mongodb.annotations.Alpha; +import com.mongodb.annotations.Reason; +import com.mongodb.client.cursor.TimeoutMode; +import com.mongodb.client.model.Collation; +import com.mongodb.client.model.Projections; +import com.mongodb.lang.Nullable; +import org.bson.BsonValue; +import org.bson.Document; +import org.bson.conversions.Bson; +import org.reactivestreams.Publisher; + +import java.util.concurrent.TimeUnit; + +/** + * Publisher interface for find. + * + * @param The type of the result. + * @since 1.0 + */ +public interface FindPublisher extends Publisher { + + /** + * Helper to return a publisher limited to the first result. + * + * @return a Publisher which will contain a single item. + */ + Publisher first(); + + /** + * Sets the query filter to apply to the query. + * + * @param filter the filter, which may be null. + * @return this + * @mongodb.driver.manual reference/method/db.collection.find/ Filter + */ + FindPublisher filter(@Nullable Bson filter); + + /** + * Sets the limit to apply. + * + * @param limit the limit + * @return this + * @mongodb.driver.manual reference/method/cursor.limit/#cursor.limit Limit + */ + FindPublisher limit(int limit); + /** + * Sets the number of documents to skip. + * + * @param skip the number of documents to skip + * @return this + * @mongodb.driver.manual reference/method/cursor.skip/#cursor.skip Skip + */ + FindPublisher skip(int skip); + + /** + * Sets the maximum execution time on the server for this operation. + * + * @param maxTime the max time + * @param timeUnit the time unit, which may not be null + * @return this + * @mongodb.driver.manual reference/method/cursor.maxTimeMS/#cursor.maxTimeMS Max Time + */ + FindPublisher maxTime(long maxTime, TimeUnit timeUnit); + + /** + * The maximum amount of time for the server to wait on new documents to satisfy a tailable cursor + * query. This only applies to a TAILABLE_AWAIT cursor. When the cursor is not a TAILABLE_AWAIT cursor, + * this option is ignored. + *

+ * On servers >= 3.2, this option will be specified on the getMore command as "maxTimeMS". The default + * is no value: no "maxTimeMS" is sent to the server with the getMore command. + *

+ * On servers < 3.2, this option is ignored, and indicates that the driver should respect the server's default value + *

+ * A zero value will be ignored. + * + * @param maxAwaitTime the max await time + * @param timeUnit the time unit to return the result in + * @return the maximum await execution time in the given time unit + * @mongodb.driver.manual reference/method/cursor.maxTimeMS/#cursor.maxTimeMS Max Time + * @since 1.2 + */ + FindPublisher maxAwaitTime(long maxAwaitTime, TimeUnit timeUnit); + + /** + * Sets a document describing the fields to return for all matching documents. + * + * @param projection the project document, which may be null. + * @return this + * @mongodb.driver.manual reference/method/db.collection.find/ Projection + * @see Projections + */ + FindPublisher projection(@Nullable Bson projection); + /** + * Sets the sort criteria to apply to the query. + * + * @param sort the sort criteria, which may be null. + * @return this + * @mongodb.driver.manual reference/method/cursor.sort/ Sort + */ + FindPublisher sort(@Nullable Bson sort); + + /** + * The server normally times out idle cursors after an inactivity period (10 minutes) + * to prevent excess memory use. Set this option to prevent that. + * + * @param noCursorTimeout true if cursor timeout is disabled + * @return this + */ + FindPublisher noCursorTimeout(boolean noCursorTimeout); + + /** + * Get partial results from a sharded cluster if one or more shards are unreachable (instead of throwing an error). + * + * @param partial if partial results for sharded clusters is enabled + * @return this + */ + FindPublisher partial(boolean partial); + + /** + * Sets the cursor type. + * + * @param cursorType the cursor type + * @return this + */ + FindPublisher cursorType(CursorType cursorType); + + /** + * Sets the collation options + * + *

A null value represents the server default.

+ * @param collation the collation options to use + * @return this + * @since 1.3 + * @mongodb.server.release 3.4 + */ + FindPublisher collation(@Nullable Collation collation); + + /** + * Sets the comment to the query. A null value means no comment is set. + * + * @param comment the comment + * @return this + * @since 1.6 + */ + FindPublisher comment(@Nullable String comment); + + /** + * Sets the comment for this operation. A null value means no comment is set. + * + *

The comment can be any valid BSON type for server versions 4.4 and above. + * Server versions between 3.6 and 4.2 only support string as comment, + * and providing a non-string type will result in a server-side error. + * + * @param comment the comment + * @return this + * @since 4.6 + * @mongodb.server.release 3.6 + */ + FindPublisher comment(@Nullable BsonValue comment); + + /** + * Sets the hint for which index to use. A null value means no hint is set. + * + * @param hint the hint + * @return this + * @since 1.6 + */ + FindPublisher hint(@Nullable Bson hint); + + /** + * Sets the hint for which index to use. A null value means no hint is set. + * + * @param hint the name of the index which should be used for the operation + * @return this + * @since 1.13 + */ + FindPublisher hintString(@Nullable String hint); + + /** + * Add top-level variables to the operation. A null value means no variables are set. + * + *

Allows for improved command readability by separating the variables from the query text.

+ * + * @param variables for find operation or null + * @return this + * @mongodb.driver.manual reference/command/find/ + * @mongodb.server.release 5.0 + * @since 4.6 + */ + FindPublisher let(@Nullable Bson variables); + + /** + * Sets the exclusive upper bound for a specific index. A null value means no max is set. + * + * @param max the max + * @return this + * @since 1.6 + */ + FindPublisher max(@Nullable Bson max); + + /** + * Sets the minimum inclusive lower bound for a specific index. A null value means no max is set. + * + * @param min the min + * @return this + * @since 1.6 + */ + FindPublisher min(@Nullable Bson min); + + /** + * Sets the returnKey. If true the find operation will return only the index keys in the resulting documents. + * + * @param returnKey the returnKey + * @return this + * @since 1.6 + */ + FindPublisher returnKey(boolean returnKey); + + /** + * Sets the showRecordId. Set to true to add a field {@code $recordId} to the returned documents. + * + * @param showRecordId the showRecordId + * @return this + * @since 1.6 + */ + FindPublisher showRecordId(boolean showRecordId); + + /** + * Sets the number of documents to return per batch. + * + *

Overrides the {@link org.reactivestreams.Subscription#request(long)} value for setting the batch size, allowing for fine-grained + * control over the underlying cursor.

+ * + * @param batchSize the batch size + * @return this + * @since 1.8 + * @mongodb.driver.manual reference/method/cursor.batchSize/#cursor.batchSize Batch Size + */ + FindPublisher batchSize(int batchSize); + + /** + * Enables writing to temporary files on the server. When set to true, the server + * can write temporary data to disk while executing the find operation. + *

+ * This option is sent only if the caller explicitly sets it to true. + * + * @param allowDiskUse the allowDiskUse + * @return this + * @since 4.1 + * @mongodb.server.release 4.4 + */ + FindPublisher allowDiskUse(@Nullable Boolean allowDiskUse); + + /** + * Sets the timeoutMode for the cursor. + * + *

+ * Requires the {@code timeout} to be set, either in the {@link com.mongodb.MongoClientSettings}, + * via {@link MongoDatabase} or via {@link MongoCollection} + *

+ *

+ * If the {@code timeout} is set then: + *

    + *
  • For non-tailable cursors, the default value of timeoutMode is {@link TimeoutMode#CURSOR_LIFETIME}
  • + *
  • For tailable cursors, the default value of timeoutMode is {@link TimeoutMode#ITERATION} and its an error + * to configure it as: {@link TimeoutMode#CURSOR_LIFETIME}
  • + *
+ * @param timeoutMode the timeout mode + * @return this + * @since 5.2 + */ + @Alpha(Reason.CLIENT) + FindPublisher timeoutMode(TimeoutMode timeoutMode); + + /** + * Explain the execution plan for this operation with the server's default verbosity level + * + * @return the execution plan + * @since 4.2 + * @mongodb.driver.manual reference/command/explain/ + * @mongodb.server.release 3.2 + */ + Publisher explain(); + + /** + * Explain the execution plan for this operation with the given verbosity level + * + * @param verbosity the verbosity of the explanation + * @return the execution plan + * @since 4.2 + * @mongodb.driver.manual reference/command/explain/ + * @mongodb.server.release 3.2 + */ + Publisher explain(ExplainVerbosity verbosity); + + /** + * Explain the execution plan for this operation with the server's default verbosity level + * + * @param the type of the document class + * @param explainResultClass the document class to decode into + * @return the execution plan + * @since 4.2 + * @mongodb.driver.manual reference/command/explain/ + * @mongodb.server.release 3.2 + */ + Publisher explain(Class explainResultClass); + + /** + * Explain the execution plan for this operation with the given verbosity level + * + * @param the type of the document class + * @param explainResultClass the document class to decode into + * @param verbosity the verbosity of the explanation + * @return the execution plan + * @since 4.2 + * @mongodb.driver.manual reference/command/explain/ + * @mongodb.server.release 3.2 + */ + Publisher explain(Class explainResultClass, ExplainVerbosity verbosity); +} diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/ListCollectionNamesPublisher.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/ListCollectionNamesPublisher.java new file mode 100644 index 00000000000..a28fcff1030 --- /dev/null +++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/ListCollectionNamesPublisher.java @@ -0,0 +1,100 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.reactivestreams.client; + +import com.mongodb.lang.Nullable; +import org.bson.BsonValue; +import org.bson.conversions.Bson; +import org.reactivestreams.Publisher; + +import java.util.concurrent.TimeUnit; + +/** + * Publisher for listing collection names. + * + * @since 5.0 + * @mongodb.driver.manual reference/command/listCollections/ listCollections + */ +public interface ListCollectionNamesPublisher extends Publisher { + /** + * Sets the query filter to apply to the query. + * + * @param filter the filter, which may be null. + * @return this + * @mongodb.driver.manual reference/method/db.collection.find/ Filter + */ + ListCollectionNamesPublisher filter(@Nullable Bson filter); + + /** + * Sets the maximum execution time on the server for this operation. + * + * @param maxTime the max time + * @param timeUnit the time unit, which may not be null + * @return this + * @mongodb.driver.manual reference/operator/meta/maxTimeMS/ Max Time + */ + ListCollectionNamesPublisher maxTime(long maxTime, TimeUnit timeUnit); + + /** + * Sets the number of documents to return per batch. + * + *

Overrides the {@link org.reactivestreams.Subscription#request(long)} value for setting the batch size, allowing for fine-grained + * control over the underlying cursor.

+ * + * @param batchSize the batch size + * @return this + * @mongodb.driver.manual reference/method/cursor.batchSize/#cursor.batchSize Batch Size + */ + ListCollectionNamesPublisher batchSize(int batchSize); + + /** + * Sets the comment for this operation. A null value means no comment is set. + * + * @param comment the comment + * @return this + * @mongodb.server.release 4.4 + */ + ListCollectionNamesPublisher comment(@Nullable String comment); + + /** + * Sets the comment for this operation. A null value means no comment is set. + * + * @param comment the comment + * @return this + * @mongodb.server.release 4.4 + */ + ListCollectionNamesPublisher comment(@Nullable BsonValue comment); + + /** + * Helper to return a publisher limited to the first result. + * + * @return a Publisher which will contain a single item. + */ + Publisher first(); + + /** + * Sets the {@code authorizedCollections} field of the {@code listCollections} command. + * + * @param authorizedCollections If {@code true}, allows executing the {@code listCollections} command, + * which has the {@code nameOnly} field set to {@code true}, without having the + * + * {@code listCollections} privilege on the database resource. + * @return {@code this}. + * @mongodb.server.release 4.0 + */ + ListCollectionNamesPublisher authorizedCollections(boolean authorizedCollections); +} diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/ListCollectionsPublisher.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/ListCollectionsPublisher.java new file mode 100644 index 00000000000..50808928172 --- /dev/null +++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/ListCollectionsPublisher.java @@ -0,0 +1,111 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.reactivestreams.client; + +import com.mongodb.annotations.Alpha; +import com.mongodb.annotations.Reason; +import com.mongodb.client.cursor.TimeoutMode; +import com.mongodb.lang.Nullable; +import org.bson.BsonValue; +import org.bson.conversions.Bson; +import org.reactivestreams.Publisher; + +import java.util.concurrent.TimeUnit; + +/** + * Publisher interface for ListCollections. + * + * @param The type of the result. + * @since 1.0 + * @mongodb.driver.manual reference/command/listCollections/ listCollections + */ +public interface ListCollectionsPublisher extends Publisher { + + /** + * Sets the query filter to apply to the query. + * + * @param filter the filter, which may be null. + * @return this + * @mongodb.driver.manual reference/method/db.collection.find/ Filter + */ + ListCollectionsPublisher filter(@Nullable Bson filter); + + /** + * Sets the maximum execution time on the server for this operation. + * + * @param maxTime the max time + * @param timeUnit the time unit, which may not be null + * @return this + * @mongodb.driver.manual reference/operator/meta/maxTimeMS/ Max Time + */ + ListCollectionsPublisher maxTime(long maxTime, TimeUnit timeUnit); + + /** + * Sets the number of documents to return per batch. + * + *

Overrides the {@link org.reactivestreams.Subscription#request(long)} value for setting the batch size, allowing for fine-grained + * control over the underlying cursor.

+ * + * @param batchSize the batch size + * @return this + * @since 1.8 + * @mongodb.driver.manual reference/method/cursor.batchSize/#cursor.batchSize Batch Size + */ + ListCollectionsPublisher batchSize(int batchSize); + + /** + * Sets the comment for this operation. A null value means no comment is set. + * + * @param comment the comment + * @return this + * @since 4.6 + * @mongodb.server.release 4.4 + */ + ListCollectionsPublisher comment(@Nullable String comment); + + /** + * Sets the comment for this operation. A null value means no comment is set. + * + * @param comment the comment + * @return this + * @since 4.6 + * @mongodb.server.release 4.4 + */ + ListCollectionsPublisher comment(@Nullable BsonValue comment); + + /** + * Sets the timeoutMode for the cursor. + * + *

+ * Requires the {@code timeout} to be set, either in the {@link com.mongodb.MongoClientSettings}, + * via {@link MongoDatabase} or via {@link MongoCollection} + *

+ * @param timeoutMode the timeout mode + * @return this + * @since 5.2 + */ + @Alpha(Reason.CLIENT) + ListCollectionsPublisher timeoutMode(TimeoutMode timeoutMode); + + /** + * Helper to return a publisher limited to the first result. + * + * @return a Publisher which will contain a single item. + * @since 1.8 + */ + Publisher first(); +} diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/ListDatabasesPublisher.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/ListDatabasesPublisher.java new file mode 100644 index 00000000000..0dea2b0e219 --- /dev/null +++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/ListDatabasesPublisher.java @@ -0,0 +1,134 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +package com.mongodb.reactivestreams.client; + +import com.mongodb.annotations.Alpha; +import com.mongodb.annotations.Reason; +import com.mongodb.client.cursor.TimeoutMode; +import com.mongodb.lang.Nullable; +import org.bson.BsonValue; +import org.bson.conversions.Bson; +import org.reactivestreams.Publisher; + +import java.util.concurrent.TimeUnit; + +/** + * Publisher interface for ListDatabases. + * + * @param The type of the result. + * @since 1.0 + */ +public interface ListDatabasesPublisher extends Publisher { + + /** + * Sets the maximum execution time on the server for this operation. + * + * @param maxTime the max time + * @param timeUnit the time unit, which may not be null + * @return this + * @mongodb.driver.manual reference/operator/meta/maxTimeMS/ Max Time + */ + ListDatabasesPublisher maxTime(long maxTime, TimeUnit timeUnit); + + /** + * Sets the query filter to apply to the returned database names. + * + * @param filter the filter, which may be null. + * @return this + * @mongodb.server.release 3.4.2 + * @since 1.7 + */ + ListDatabasesPublisher filter(@Nullable Bson filter); + + /** + * Sets the nameOnly flag that indicates whether the command should return just the database names or return the database names and + * size information. + * + * @param nameOnly the nameOnly flag, which may be null + * @return this + * @mongodb.server.release 3.4.3 + * @since 1.7 + */ + ListDatabasesPublisher nameOnly(@Nullable Boolean nameOnly); + + /** + * Sets the authorizedDatabasesOnly flag that indicates whether the command should return just the databases which the user + * is authorized to see. + * + * @param authorizedDatabasesOnly the authorizedDatabasesOnly flag, which may be null + * @return this + * @since 4.1 + * @mongodb.server.release 4.0 + */ + ListDatabasesPublisher authorizedDatabasesOnly(@Nullable Boolean authorizedDatabasesOnly); + + /** + * Sets the number of documents to return per batch. + * + *

Overrides the {@link org.reactivestreams.Subscription#request(long)} value for setting the batch size, allowing for fine-grained + * control over the underlying cursor.

+ * + * @param batchSize the batch size + * @return this + * @since 1.8 + * @mongodb.driver.manual reference/method/cursor.batchSize/#cursor.batchSize Batch Size + */ + ListDatabasesPublisher batchSize(int batchSize); + + /** + * Sets the comment for this operation. A null value means no comment is set. + * + * @param comment the comment + * @return this + * @since 4.6 + * @mongodb.server.release 4.4 + */ + ListDatabasesPublisher comment(@Nullable String comment); + + /** + * Sets the comment for this operation. A null value means no comment is set. + * + * @param comment the comment + * @return this + * @since 4.6 + * @mongodb.server.release 4.4 + */ + ListDatabasesPublisher comment(@Nullable BsonValue comment); + + /** + * Sets the timeoutMode for the cursor. + * + *

+ * Requires the {@code timeout} to be set, either in the {@link com.mongodb.MongoClientSettings}, + * via {@link MongoDatabase} or via {@link MongoCollection} + *

+ * @param timeoutMode the timeout mode + * @return this + * @since 5.2 + */ + @Alpha(Reason.CLIENT) + ListDatabasesPublisher timeoutMode(TimeoutMode timeoutMode); + + /** + * Helper to return a publisher limited to the first result. + * + * @return a Publisher which will contain a single item. + * @since 1.8 + */ + Publisher first(); +} diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/ListIndexesPublisher.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/ListIndexesPublisher.java new file mode 100644 index 00000000000..f2abb11a9bb --- /dev/null +++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/ListIndexesPublisher.java @@ -0,0 +1,100 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.reactivestreams.client; + +import com.mongodb.annotations.Alpha; +import com.mongodb.annotations.Reason; +import com.mongodb.client.cursor.TimeoutMode; +import com.mongodb.lang.Nullable; +import org.bson.BsonValue; +import org.reactivestreams.Publisher; + +import java.util.concurrent.TimeUnit; + +/** + * Publisher interface for ListIndexes. + * + * @param The type of the result. + * @since 1.0 + */ +public interface ListIndexesPublisher extends Publisher { + + /** + * Sets the maximum execution time on the server for this operation. + * + * @param maxTime the max time + * @param timeUnit the time unit, which may not be null + * @return this + * @mongodb.driver.manual reference/operator/meta/maxTimeMS/ Max Time + */ + ListIndexesPublisher maxTime(long maxTime, TimeUnit timeUnit); + + /** + * Sets the number of documents to return per batch. + * + *

Overrides the {@link org.reactivestreams.Subscription#request(long)} value for setting the batch size, allowing for fine-grained + * control over the underlying cursor.

+ * + * @param batchSize the batch size + * @return this + * @since 1.8 + * @mongodb.driver.manual reference/method/cursor.batchSize/#cursor.batchSize Batch Size + */ + ListIndexesPublisher batchSize(int batchSize); + + /** + * Sets the comment for this operation. A null value means no comment is set. + * + * @param comment the comment + * @return this + * @since 4.6 + * @mongodb.server.release 4.4 + */ + ListIndexesPublisher comment(@Nullable String comment); + + /** + * Sets the comment for this operation. A null value means no comment is set. + * + * @param comment the comment + * @return this + * @since 4.6 + * @mongodb.server.release 4.4 + */ + ListIndexesPublisher comment(@Nullable BsonValue comment); + + /** + * Sets the timeoutMode for the cursor. + * + *

+ * Requires the {@code timeout} to be set, either in the {@link com.mongodb.MongoClientSettings}, + * via {@link MongoDatabase} or via {@link MongoCollection} + *

+ * @param timeoutMode the timeout mode + * @return this + * @since 5.2 + */ + @Alpha(Reason.CLIENT) + ListIndexesPublisher timeoutMode(TimeoutMode timeoutMode); + + /** + * Helper to return a publisher limited to the first result. + * + * @return a Publisher which will contain a single item. + * @since 1.8 + */ + Publisher first(); +} diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/ListSearchIndexesPublisher.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/ListSearchIndexesPublisher.java new file mode 100644 index 00000000000..0f4c7d798b3 --- /dev/null +++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/ListSearchIndexesPublisher.java @@ -0,0 +1,162 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.reactivestreams.client; + +import com.mongodb.ExplainVerbosity; +import com.mongodb.annotations.Alpha; +import com.mongodb.annotations.Evolving; +import com.mongodb.annotations.Reason; +import com.mongodb.client.cursor.TimeoutMode; +import com.mongodb.client.model.Collation; +import com.mongodb.lang.Nullable; +import org.bson.BsonValue; +import org.bson.Document; +import org.reactivestreams.Publisher; + +import java.util.concurrent.TimeUnit; + +/** + * A specific {@link Publisher} interface for listing Atlas Search indexes. + * + * @param The type of the result. + * @since 4.11 + * @mongodb.server.release 6.0 + */ +@Evolving +public interface ListSearchIndexesPublisher extends Publisher { + + /** + * Sets the index name for this operation. + * + * @param indexName the index name. + * @return this + */ + ListSearchIndexesPublisher name(String indexName); + + /** + * Enables writing to temporary files. A null value indicates that it's unspecified. + * + * @param allowDiskUse true if writing to temporary files is enabled + * @return this + * @mongodb.driver.manual reference/command/aggregate/ Aggregation + */ + ListSearchIndexesPublisher allowDiskUse(@Nullable Boolean allowDiskUse); + + /** + * Sets the number of documents to return per batch. + * + * @param batchSize the batch size + * @return this + * @mongodb.driver.manual reference/method/cursor.batchSize/#cursor.batchSize Batch Size + */ + ListSearchIndexesPublisher batchSize(int batchSize); + + /** + * Sets the maximum execution time on the server for this operation. + * + * @param maxTime the max time + * @param timeUnit the time unit, which may not be null + * @return this + * @mongodb.driver.manual reference/method/cursor.maxTimeMS/#cursor.maxTimeMS Max Time + */ + ListSearchIndexesPublisher maxTime(long maxTime, TimeUnit timeUnit); + + /** + * Sets the collation options + * + *

A null value represents the server default.

+ * + * @param collation the collation options to use + * @return this + */ + ListSearchIndexesPublisher collation(@Nullable Collation collation); + + /** + * Sets the comment for this operation. A null value means no comment is set. + * + * @param comment the comment + * @return this + */ + ListSearchIndexesPublisher comment(@Nullable String comment); + + /** + * Sets the comment for this operation. A null value means no comment is set. + * + * @param comment the comment + * @return this + */ + ListSearchIndexesPublisher comment(@Nullable BsonValue comment); + + /** + * Sets the timeoutMode for the cursor. + * + *

+ * Requires the {@code timeout} to be set, either in the {@link com.mongodb.MongoClientSettings}, + * via {@link MongoDatabase} or via {@link MongoCollection} + *

+ * @param timeoutMode the timeout mode + * @return this + * @since 5.2 + */ + @Alpha(Reason.CLIENT) + ListSearchIndexesPublisher timeoutMode(TimeoutMode timeoutMode); + + /** + * Helper to return a publisher limited to the first result. + * + * @return a Publisher which will contain a single item. + */ + Publisher first(); + + /** + * Explain the execution plan for this operation with the server's default verbosity level + * + * @return the execution plan + * @mongodb.driver.manual reference/command/explain/ + */ + Publisher explain(); + + /** + * Explain the execution plan for this operation with the given verbosity level + * + * @param verbosity the verbosity of the explanation + * @return the execution plan + * @mongodb.driver.manual reference/command/explain/ + */ + Publisher explain(ExplainVerbosity verbosity); + + /** + * Explain the execution plan for this operation with the server's default verbosity level + * + * @param the type of the document class + * @param explainResultClass the document class to decode into + * @return the execution plan + * @mongodb.driver.manual reference/command/explain/ + */ + Publisher explain(Class explainResultClass); + + /** + * Explain the execution plan for this operation with the given verbosity level + * + * @param the type of the document class + * @param explainResultClass the document class to decode into + * @param verbosity the verbosity of the explanation + * @return the execution plan + * @mongodb.driver.manual reference/command/explain/ + */ + Publisher explain(Class explainResultClass, ExplainVerbosity verbosity); +} diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/MapReducePublisher.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/MapReducePublisher.java new file mode 100644 index 00000000000..d104f77ee5f --- /dev/null +++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/MapReducePublisher.java @@ -0,0 +1,235 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.reactivestreams.client; + +import com.mongodb.annotations.Alpha; +import com.mongodb.annotations.Reason; +import com.mongodb.client.cursor.TimeoutMode; +import com.mongodb.client.model.Collation; +import com.mongodb.lang.Nullable; +import org.bson.conversions.Bson; +import org.reactivestreams.Publisher; +import org.reactivestreams.Subscriber; + +import java.util.concurrent.TimeUnit; + +/** + * Publisher for map reduce. + *

+ * By default, the {@code MapReducePublisher} produces the results inline. You can write map-reduce output to a collection by using the + * {@link #collectionName(String)} and {@link #toCollection()} methods.

+ * + * @param The type of the result. + * @since 1.0 + * @deprecated Superseded by aggregate + */ +@Deprecated +public interface MapReducePublisher extends Publisher { + + /** + * Sets the collectionName for the output of the MapReduce + * + *

The default action is replace the collection if it exists, to change this use {@link #action}.

+ * + * @param collectionName the name of the collection that you want the map-reduce operation to write its output. + * @return this + * @see #toCollection() + */ + MapReducePublisher collectionName(String collectionName); + + /** + * Sets the JavaScript function that follows the reduce method and modifies the output. + * + * @param finalizeFunction the JavaScript function that follows the reduce method and modifies the output. + * @return this + * @mongodb.driver.manual reference/command/mapReduce/#mapreduce-finalize-cmd Requirements for the finalize Function + */ + MapReducePublisher finalizeFunction(@Nullable String finalizeFunction); + + /** + * Sets the global variables that are accessible in the map, reduce and finalize functions. + * + * @param scope the global variables that are accessible in the map, reduce and finalize functions. + * @return this + * @mongodb.driver.manual reference/command/mapReduce mapReduce + */ + MapReducePublisher scope(@Nullable Bson scope); + + /** + * Sets the sort criteria to apply to the query. + * + * @param sort the sort criteria, which may be null. + * @return this + * @mongodb.driver.manual reference/method/cursor.sort/ Sort + */ + MapReducePublisher sort(@Nullable Bson sort); + + /** + * Sets the query filter to apply to the query. + * + * @param filter the filter to apply to the query. + * @return this + * @mongodb.driver.manual reference/method/db.collection.find/ Filter + */ + MapReducePublisher filter(@Nullable Bson filter); + + /** + * Sets the limit to apply. + * + * @param limit the limit, which may be null + * @return this + * @mongodb.driver.manual reference/method/cursor.limit/#cursor.limit Limit + */ + MapReducePublisher limit(int limit); + + /** + * Sets the flag that specifies whether to convert intermediate data into BSON format between the execution of the map and reduce + * functions. Defaults to false. + * + * @param jsMode the flag that specifies whether to convert intermediate data into BSON format between the execution of the map and + * reduce functions + * @return jsMode + * @mongodb.driver.manual reference/command/mapReduce mapReduce + */ + MapReducePublisher jsMode(boolean jsMode); + + /** + * Sets whether to include the timing information in the result information. + * + * @param verbose whether to include the timing information in the result information. + * @return this + */ + MapReducePublisher verbose(boolean verbose); + + /** + * Sets the maximum execution time on the server for this operation. + * + * @param maxTime the max time + * @param timeUnit the time unit, which may not be null + * @return this + * @mongodb.driver.manual reference/method/cursor.maxTimeMS/#cursor.maxTimeMS Max Time + */ + MapReducePublisher maxTime(long maxTime, TimeUnit timeUnit); + + /** + * Specify the {@code MapReduceAction} to be used when writing to a collection. + * + * @param action an {@link com.mongodb.client.model.MapReduceAction} to perform on the collection + * @return this + */ + MapReducePublisher action(com.mongodb.client.model.MapReduceAction action); + + /** + * Sets the name of the database to output into. + * + * @param databaseName the name of the database to output into. + * @return this + * @mongodb.driver.manual reference/command/mapReduce/#output-to-a-collection-with-an-action output with an action + */ + MapReducePublisher databaseName(@Nullable String databaseName); + + /** + * Sets the bypass document level validation flag. + * + *

Note: This only applies when an $out stage is specified

. + * + * @param bypassDocumentValidation If true, allows the write to opt-out of document level validation. + * @return this + * @since 1.2 + * @mongodb.driver.manual reference/command/aggregate/ Aggregation + * @mongodb.server.release 3.2 + */ + MapReducePublisher bypassDocumentValidation(@Nullable Boolean bypassDocumentValidation); + + /** + * Aggregates documents to a collection according to the specified map-reduce function with the given options, which must not produce + * results inline. Calling this method and then {@linkplain Publisher#subscribe(Subscriber) subscribing} to the returned + * {@link Publisher} is the preferred alternative to {@linkplain #subscribe(Subscriber) subscribing} to this {@link MapReducePublisher}, + * because this method does what is explicitly requested without executing implicit operations. + * + * @return an empty publisher that indicates when the operation has completed + * @throws IllegalStateException if a {@linkplain #collectionName(String) collection name} to write the results to has not been specified + * @see #collectionName(String) + * @mongodb.driver.manual aggregation/ Aggregation + */ + Publisher toCollection(); + + /** + * Requests {@link MapReducePublisher} to start streaming data according to the specified map-reduce function with the given options. + *
    + *
  • + * If the aggregation produces results inline, then {@linkplain MongoCollection#find() finds all} documents in the + * affected namespace and produces them. You may want to use {@link #toCollection()} instead.
  • + *
  • + * Otherwise, produces no elements.
  • + *
+ */ + void subscribe(Subscriber s); + + /** + * Sets the collation options + * + *

A null value represents the server default.

+ * @param collation the collation options to use + * @return this + * @since 1.3 + * @mongodb.server.release 3.4 + */ + MapReducePublisher collation(@Nullable Collation collation); + + /** + * Sets the number of documents to return per batch. + * + *

Overrides the {@link org.reactivestreams.Subscription#request(long)} value for setting the batch size, allowing for fine-grained + * control over the underlying cursor.

+ * + * @param batchSize the batch size + * @return this + * @since 1.8 + * @mongodb.driver.manual reference/method/cursor.batchSize/#cursor.batchSize Batch Size + */ + MapReducePublisher batchSize(int batchSize); + + /** + * Sets the timeoutMode for the cursor. + * + *

+ * Requires the {@code timeout} to be set, either in the {@link com.mongodb.MongoClientSettings}, + * via {@link MongoDatabase} or via {@link MongoCollection} + *

+ *

+ * If the {@code timeout} is set then: + *

    + *
  • For non-tailable cursors, the default value of timeoutMode is {@link TimeoutMode#CURSOR_LIFETIME}
  • + *
  • For tailable cursors, the default value of timeoutMode is {@link TimeoutMode#ITERATION} and its an error + * to configure it as: {@link TimeoutMode#CURSOR_LIFETIME}
  • + *
+ * @param timeoutMode the timeout mode + * @return this + * @since 5.2 + */ + @Alpha(Reason.CLIENT) + MapReducePublisher timeoutMode(TimeoutMode timeoutMode); + + /** + * Helper to return a publisher limited to the first result. + * + * @return a Publisher which will contain a single item. + * @since 1.8 + */ + Publisher first(); +} diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/MongoClient.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/MongoClient.java new file mode 100644 index 00000000000..87a3148b8b2 --- /dev/null +++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/MongoClient.java @@ -0,0 +1,76 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.reactivestreams.client; + +import com.mongodb.MongoDriverInformation; +import com.mongodb.annotations.Immutable; +import com.mongodb.connection.ClusterDescription; +import com.mongodb.connection.ClusterSettings; +import com.mongodb.event.ClusterListener; + +import java.io.Closeable; + +/** + * A client-side representation of a MongoDB cluster. Instances can represent either a standalone MongoDB instance, a replica set, + * or a sharded cluster. Instance of this class are responsible for maintaining an up-to-date state of the cluster, + * and possibly cache resources related to this, including background threads for monitoring, and connection pools. + *

+ * Instance of this class server as factories for {@code MongoDatabase} instances. + *

+ * + * @since 1.0 + */ +@Immutable +public interface MongoClient extends MongoCluster, Closeable { + + /** + * Close the client, which will close all underlying cached resources, including, for example, + * sockets and background monitoring threads. + */ + void close(); + + /** + * Gets the current cluster description. + * + *

+ * This method will not block, meaning that it may return a {@link ClusterDescription} whose {@code clusterType} is unknown + * and whose {@link com.mongodb.connection.ServerDescription}s are all in the connecting state. If the application requires + * notifications after the driver has connected to a member of the cluster, it should register a {@link ClusterListener} via + * the {@link ClusterSettings} in {@link com.mongodb.MongoClientSettings}. + *

+ * + * @return the current cluster description + * @see ClusterSettings.Builder#addClusterListener(ClusterListener) + * @see com.mongodb.MongoClientSettings.Builder#applyToClusterSettings(com.mongodb.Block) + * @since 4.1 + */ + ClusterDescription getClusterDescription(); + + /** + * Appends the provided {@link MongoDriverInformation} to the existing metadata. + * + *

+ * This enables frameworks and libraries to include identifying metadata (e.g., name, version, platform) which might be visible in + * the MongoD/MongoS logs. This can assist with diagnostics by making client identity visible to the server. + *

+ * Note: Metadata is limited to 512 bytes; any excess will be truncated. + * + * @param mongoDriverInformation the driver information to append to the existing metadata + * @since 5.6 + */ + void appendMetadata(MongoDriverInformation mongoDriverInformation); +} diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/MongoClients.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/MongoClients.java new file mode 100644 index 00000000000..57ee076039e --- /dev/null +++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/MongoClients.java @@ -0,0 +1,163 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.reactivestreams.client; + +import com.mongodb.ConnectionString; +import com.mongodb.MongoClientException; +import com.mongodb.MongoClientSettings; +import com.mongodb.MongoDriverInformation; +import com.mongodb.connection.SocketSettings; +import com.mongodb.internal.TimeoutSettings; +import com.mongodb.internal.connection.Cluster; +import com.mongodb.internal.connection.DefaultClusterFactory; +import com.mongodb.internal.connection.InternalConnectionPoolSettings; +import com.mongodb.internal.connection.StreamFactory; +import com.mongodb.internal.connection.StreamFactoryFactory; +import com.mongodb.lang.Nullable; +import com.mongodb.reactivestreams.client.internal.MongoClientImpl; +import com.mongodb.spi.dns.InetAddressResolver; +import org.bson.codecs.configuration.CodecRegistry; + +import static com.mongodb.assertions.Assertions.notNull; +import static com.mongodb.internal.connection.ServerAddressHelper.getInetAddressResolver; +import static com.mongodb.internal.connection.StreamFactoryHelper.getAsyncStreamFactoryFactory; +import static com.mongodb.internal.event.EventListenerHelper.getCommandListener; + + +/** + * A factory for MongoClient instances. + * + */ +public final class MongoClients { + + /** + * Creates a new client with the default connection string "mongodb://localhost". + * + * @return the client + */ + public static MongoClient create() { + return create(new ConnectionString("mongodb://localhost")); + } + + /** + * Create a new client with the given connection string. + * + * @param connectionString the connection + * @return the client + */ + public static MongoClient create(final String connectionString) { + return create(new ConnectionString(connectionString)); + } + + /** + * Create a new client with the given connection string. + * + * @param connectionString the settings + * @return the client + */ + public static MongoClient create(final ConnectionString connectionString) { + return create(connectionString, null); + } + + /** + * Create a new client with the given connection string. + * + *

Note: Intended for driver and library authors to associate extra driver metadata with the connections.

+ * + * @param connectionString the settings + * @param mongoDriverInformation any driver information to associate with the MongoClient + * @return the client + * @since 1.3 + */ + public static MongoClient create(final ConnectionString connectionString, + @Nullable final MongoDriverInformation mongoDriverInformation) { + return create(MongoClientSettings.builder().applyConnectionString(connectionString).build(), mongoDriverInformation); + } + + /** + * Create a new client with the given client settings. + * + * @param settings the settings + * @return the client + * @since 1.8 + */ + public static MongoClient create(final MongoClientSettings settings) { + return create(settings, null); + } + + /** + * Creates a new client with the given client settings. + * + *

Note: Intended for driver and library authors to associate extra driver metadata with the connections.

+ * + * @param settings the settings + * @param mongoDriverInformation any driver information to associate with the MongoClient + * @return the client + * @since 1.8 + */ + public static MongoClient create(final MongoClientSettings settings, @Nullable final MongoDriverInformation mongoDriverInformation) { + if (settings.getSocketSettings().getProxySettings().isProxyEnabled()) { + throw new MongoClientException("Proxy is not supported for reactive clients"); + } + InetAddressResolver inetAddressResolver = getInetAddressResolver(settings); + StreamFactoryFactory streamFactoryFactory = getAsyncStreamFactoryFactory(settings, inetAddressResolver); + StreamFactory streamFactory = getStreamFactory(streamFactoryFactory, settings, false); + StreamFactory heartbeatStreamFactory = getStreamFactory(streamFactoryFactory, settings, true); + MongoDriverInformation wrappedMongoDriverInformation = wrapMongoDriverInformation(mongoDriverInformation); + Cluster cluster = createCluster(settings, wrappedMongoDriverInformation, streamFactory, heartbeatStreamFactory); + return new MongoClientImpl(settings, wrappedMongoDriverInformation, cluster, streamFactoryFactory); + } + + /** + * Gets the default codec registry. + * + * @return the default codec registry + * @see com.mongodb.MongoClientSettings#getCodecRegistry() + * @since 1.4 + */ + public static CodecRegistry getDefaultCodecRegistry() { + return MongoClientSettings.getDefaultCodecRegistry(); + } + + private static Cluster createCluster(final MongoClientSettings settings, + @Nullable final MongoDriverInformation mongoDriverInformation, + final StreamFactory streamFactory, final StreamFactory heartbeatStreamFactory) { + notNull("settings", settings); + return new DefaultClusterFactory().createCluster(settings.getClusterSettings(), settings.getServerSettings(), + settings.getConnectionPoolSettings(), InternalConnectionPoolSettings.builder().prestartAsyncWorkManager(true).build(), + TimeoutSettings.create(settings), streamFactory, TimeoutSettings.createHeartbeatSettings(settings), heartbeatStreamFactory, + settings.getCredential(), settings.getLoggerSettings(), getCommandListener(settings.getCommandListeners()), + settings.getApplicationName(), mongoDriverInformation, settings.getCompressorList(), settings.getServerApi(), + settings.getDnsClient()); + } + + private static MongoDriverInformation wrapMongoDriverInformation(@Nullable final MongoDriverInformation mongoDriverInformation) { + return (mongoDriverInformation == null ? MongoDriverInformation.builder() : MongoDriverInformation.builder(mongoDriverInformation)) + .driverName("reactive-streams").build(); + } + + private static StreamFactory getStreamFactory( + final StreamFactoryFactory streamFactoryFactory, final MongoClientSettings settings, + final boolean isHeartbeat) { + SocketSettings socketSettings = isHeartbeat + ? settings.getHeartbeatSocketSettings() : settings.getSocketSettings(); + return streamFactoryFactory.create(socketSettings, settings.getSslSettings()); + } + + private MongoClients() { + } +} diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/MongoCluster.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/MongoCluster.java new file mode 100644 index 00000000000..0e2a11d9926 --- /dev/null +++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/MongoCluster.java @@ -0,0 +1,487 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.reactivestreams.client; + +import com.mongodb.ClientBulkWriteException; +import com.mongodb.ClientSessionOptions; +import com.mongodb.MongoClientSettings; +import com.mongodb.MongoException; +import com.mongodb.MongoNamespace; +import com.mongodb.ReadConcern; +import com.mongodb.ReadPreference; +import com.mongodb.WriteConcern; +import com.mongodb.annotations.Alpha; +import com.mongodb.annotations.Immutable; +import com.mongodb.annotations.Reason; +import com.mongodb.client.model.bulk.ClientBulkWriteOptions; +import com.mongodb.client.model.bulk.ClientBulkWriteResult; +import com.mongodb.client.model.bulk.ClientNamespacedDeleteManyModel; +import com.mongodb.client.model.bulk.ClientNamespacedUpdateManyModel; +import com.mongodb.client.model.bulk.ClientNamespacedWriteModel; +import com.mongodb.lang.Nullable; +import org.bson.Document; +import org.bson.codecs.configuration.CodecRegistry; +import org.bson.conversions.Bson; +import org.reactivestreams.Publisher; + +import java.util.List; +import java.util.concurrent.TimeUnit; + +/** + * The client-side representation of a MongoDB cluster operations. + * + *

+ * The originating {@link MongoClient} is responsible for the closing of resources. + * If the originator {@link MongoClient} is closed, then any cluster operations will fail. + *

+ * + * @see MongoClient + * @since 5.2 + */ +@Immutable +public interface MongoCluster { + + /** + * Get the codec registry for the MongoCluster. + * + * @return the {@link CodecRegistry} + * @since 5.2 + */ + CodecRegistry getCodecRegistry(); + + /** + * Get the read preference for the MongoCluster. + * + * @return the {@link ReadPreference} + * @since 5.2 + */ + ReadPreference getReadPreference(); + + /** + * Get the write concern for the MongoCluster. + * + * @return the {@link WriteConcern} + * @since 5.2 + */ + WriteConcern getWriteConcern(); + + /** + * Get the read concern for the MongoCluster. + * + * @return the {@link ReadConcern} + * @since 5.2 + * @mongodb.driver.manual reference/readConcern/ Read Concern + */ + ReadConcern getReadConcern(); + + /** + * The time limit for the full execution of an operation. + * + *

If not null the following deprecated options will be ignored: + * {@code waitQueueTimeoutMS}, {@code socketTimeoutMS}, {@code wTimeoutMS}, {@code maxTimeMS} and {@code maxCommitTimeMS}

+ * + *
    + *
  • {@code null} means that the timeout mechanism for operations will defer to using: + *
      + *
    • {@code waitQueueTimeoutMS}: The maximum wait time in milliseconds that a thread may wait for a connection to become + * available
    • + *
    • {@code socketTimeoutMS}: How long a send or receive on a socket can take before timing out.
    • + *
    • {@code wTimeoutMS}: How long the server will wait for the write concern to be fulfilled before timing out.
    • + *
    • {@code maxTimeMS}: The cumulative time limit for processing operations on a cursor. + * See: cursor.maxTimeMS.
    • + *
    • {@code maxCommitTimeMS}: The maximum amount of time to allow a single {@code commitTransaction} command to execute. + * See: {@link com.mongodb.TransactionOptions#getMaxCommitTime}.
    • + *
    + *
  • + *
  • {@code 0} means infinite timeout.
  • + *
  • {@code > 0} The time limit to use for the full execution of an operation.
  • + *
+ * + * @param timeUnit the time unit + * @return the timeout in the given time unit + * @since 5.2 + */ + @Alpha(Reason.CLIENT) + @Nullable + Long getTimeout(TimeUnit timeUnit); + + /** + * Create a new MongoCluster instance with a different codec registry. + * + *

The {@link CodecRegistry} configured by this method is effectively treated by the driver as an instance of + * {@link org.bson.codecs.configuration.CodecProvider}, which {@link CodecRegistry} extends. So there is no benefit to defining + * a class that implements {@link CodecRegistry}. Rather, an application should always create {@link CodecRegistry} instances + * using the factory methods in {@link org.bson.codecs.configuration.CodecRegistries}.

+ * + * @param codecRegistry the new {@link CodecRegistry} for the database + * @return a new MongoCluster instance with the different codec registry + * @see org.bson.codecs.configuration.CodecRegistries + * @since 5.2 + */ + MongoCluster withCodecRegistry(CodecRegistry codecRegistry); + + /** + * Create a new MongoCluster instance with a different read preference. + * + * @param readPreference the new {@link ReadPreference} for the database + * @return a new MongoCluster instance with the different readPreference + * @since 5.2 + */ + MongoCluster withReadPreference(ReadPreference readPreference); + + /** + * Create a new MongoCluster instance with a different write concern. + * + * @param writeConcern the new {@link WriteConcern} for the database + * @return a new MongoCluster instance with the different writeConcern + * @since 5.2 + */ + MongoCluster withWriteConcern(WriteConcern writeConcern); + + /** + * Create a new MongoCluster instance with a different read concern. + * + * @param readConcern the new {@link ReadConcern} for the database + * @return a new MongoCluster instance with the different ReadConcern + * @since 5.2 + * @mongodb.driver.manual reference/readConcern/ Read Concern + */ + MongoCluster withReadConcern(ReadConcern readConcern); + + /** + * Create a new MongoCluster instance with the set time limit for the full execution of an operation. + * + *
    + *
  • {@code 0} means infinite timeout.
  • + *
  • {@code > 0} The time limit to use for the full execution of an operation.
  • + *
+ * + * @param timeout the timeout, which must be greater than or equal to 0 + * @param timeUnit the time unit + * @return a new MongoCluster instance with the set time limit for the full execution of an operation. + * @since 5.2 + * @see #getTimeout + */ + @Alpha(Reason.CLIENT) + MongoCluster withTimeout(long timeout, TimeUnit timeUnit); + + /** + * Gets a {@link MongoDatabase} instance for the given database name. + * + * @param databaseName the name of the database to retrieve + * @return a {@code MongoDatabase} representing the specified database + * @throws IllegalArgumentException if databaseName is invalid + * @see MongoNamespace#checkDatabaseNameValidity(String) + */ + MongoDatabase getDatabase(String databaseName); + + /** + * Creates a client session with default options. + * + *

Note: A ClientSession instance can not be used concurrently in multiple operations.

+ * + * @return the client session + * @mongodb.server.release 3.6 + */ + Publisher startSession(); + + /** + * Creates a client session. + * + *

Note: A ClientSession instance can not be used concurrently in multiple operations.

+ * + * @param options the options for the client session + * @return the client session + * @mongodb.server.release 3.6 + */ + Publisher startSession(ClientSessionOptions options); + + /** + * Get a list of the database names + * + * @return an iterable containing all the names of all the databases + * @mongodb.driver.manual reference/command/listDatabases List Databases + */ + Publisher listDatabaseNames(); + + /** + * Get a list of the database names + * + * @param clientSession the client session with which to associate this operation + * @return an iterable containing all the names of all the databases + * @mongodb.driver.manual reference/command/listDatabases List Databases + * @mongodb.server.release 3.6 + */ + Publisher listDatabaseNames(ClientSession clientSession); + + /** + * Gets the list of databases + * + * @return the list databases iterable interface + */ + ListDatabasesPublisher listDatabases(); + + /** + * Gets the list of databases + * + * @param clientSession the client session with which to associate this operation + * @return the list databases iterable interface + * @mongodb.driver.manual reference/command/listDatabases List Databases + * @mongodb.server.release 3.6 + */ + ListDatabasesPublisher listDatabases(ClientSession clientSession); + + /** + * Gets the list of databases + * + * @param resultClass the class to cast the database documents to + * @param the type of the class to use instead of {@code Document}. + * @return the list databases iterable interface + */ + ListDatabasesPublisher listDatabases(Class resultClass); + + /** + * Gets the list of databases + * + * @param clientSession the client session with which to associate this operation + * @param resultClass the class to cast the database documents to + * @param the type of the class to use instead of {@code Document}. + * @return the list databases iterable interface + * @mongodb.driver.manual reference/command/listDatabases List Databases + * @mongodb.server.release 3.6 + */ + ListDatabasesPublisher listDatabases(ClientSession clientSession, Class resultClass); + + /** + * Creates a change stream for this client. + * + * @return the change stream iterable + * @mongodb.driver.dochub core/changestreams Change Streams + * @since 3.8 + * @mongodb.server.release 4.0 + */ + ChangeStreamPublisher watch(); + + /** + * Creates a change stream for this client. + * + * @param resultClass the class to decode each document into + * @param the target document type of the iterable. + * @return the change stream iterable + * @mongodb.driver.dochub core/changestreams Change Streams + * @since 3.8 + * @mongodb.server.release 4.0 + */ + ChangeStreamPublisher watch(Class resultClass); + + /** + * Creates a change stream for this client. + * + * @param pipeline the aggregation pipeline to apply to the change stream. + * @return the change stream iterable + * @mongodb.driver.dochub core/changestreams Change Streams + * @since 3.8 + * @mongodb.server.release 4.0 + */ + ChangeStreamPublisher watch(List pipeline); + + /** + * Creates a change stream for this client. + * + * @param pipeline the aggregation pipeline to apply to the change stream + * @param resultClass the class to decode each document into + * @param the target document type of the iterable. + * @return the change stream iterable + * @mongodb.driver.dochub core/changestreams Change Streams + * @since 3.8 + * @mongodb.server.release 4.0 + */ + ChangeStreamPublisher watch(List pipeline, Class resultClass); + + /** + * Creates a change stream for this client. + * + * @param clientSession the client session with which to associate this operation + * @return the change stream iterable + * @since 3.8 + * @mongodb.server.release 4.0 + * @mongodb.driver.dochub core/changestreams Change Streams + */ + ChangeStreamPublisher watch(ClientSession clientSession); + + /** + * Creates a change stream for this client. + * + * @param clientSession the client session with which to associate this operation + * @param resultClass the class to decode each document into + * @param the target document type of the iterable. + * @return the change stream iterable + * @since 3.8 + * @mongodb.server.release 4.0 + * @mongodb.driver.dochub core/changestreams Change Streams + */ + ChangeStreamPublisher watch(ClientSession clientSession, Class resultClass); + + /** + * Creates a change stream for this client. + * + * @param clientSession the client session with which to associate this operation + * @param pipeline the aggregation pipeline to apply to the change stream. + * @return the change stream iterable + * @since 3.8 + * @mongodb.server.release 4.0 + * @mongodb.driver.dochub core/changestreams Change Streams + */ + ChangeStreamPublisher watch(ClientSession clientSession, List pipeline); + + /** + * Creates a change stream for this client. + * + * @param clientSession the client session with which to associate this operation + * @param pipeline the aggregation pipeline to apply to the change stream + * @param resultClass the class to decode each document into + * @param the target document type of the iterable. + * @return the change stream iterable + * @since 3.8 + * @mongodb.server.release 4.0 + * @mongodb.driver.dochub core/changestreams Change Streams + */ + ChangeStreamPublisher watch(ClientSession clientSession, List pipeline, Class resultClass); + + /** + * Executes a client-level bulk write operation. + * This method is functionally equivalent to {@link #bulkWrite(List, ClientBulkWriteOptions)} + * with the {@linkplain ClientBulkWriteOptions#clientBulkWriteOptions() default options}. + *

+ * This operation supports {@linkplain MongoClientSettings#getRetryWrites() retryable writes}. + * Depending on the number of {@code models}, encoded size of {@code models}, and the size limits in effect, + * executing this operation may require multiple {@code bulkWrite} commands. + * The eligibility for retries is determined per each {@code bulkWrite} command: + * {@link ClientNamespacedUpdateManyModel}, {@link ClientNamespacedDeleteManyModel} in a command render it non-retryable.

+ * + * @param models The {@linkplain ClientNamespacedWriteModel individual write operations}. + * @return The {@link Publisher} signalling at most one element {@link ClientBulkWriteResult} if the operation is successful, + * or the following errors: + *
    + *
  • + * {@link ClientBulkWriteException} - If and only if the operation is unsuccessful or partially unsuccessful, + * and there is at least one of the following pieces of information to report: + * {@link ClientBulkWriteException#getWriteConcernErrors()}, {@link ClientBulkWriteException#getWriteErrors()}, + * {@link ClientBulkWriteException#getPartialResult()}.
  • + *
  • + * {@link MongoException} - Only if the operation is unsuccessful.
  • + *
+ * @since 5.3 + * @mongodb.server.release 8.0 + * @mongodb.driver.manual reference/command/bulkWrite/ bulkWrite + */ + Publisher bulkWrite(List models); + + /** + * Executes a client-level bulk write operation. + *

+ * This operation supports {@linkplain MongoClientSettings#getRetryWrites() retryable writes}. + * Depending on the number of {@code models}, encoded size of {@code models}, and the size limits in effect, + * executing this operation may require multiple {@code bulkWrite} commands. + * The eligibility for retries is determined per each {@code bulkWrite} command: + * {@link ClientNamespacedUpdateManyModel}, {@link ClientNamespacedDeleteManyModel} in a command render it non-retryable.

+ * + * @param models The {@linkplain ClientNamespacedWriteModel individual write operations}. + * @param options The options. + * @return The {@link Publisher} signalling at most one element {@link ClientBulkWriteResult} if the operation is successful, + * or the following errors: + *
    + *
  • + * {@link ClientBulkWriteException} - If and only if the operation is unsuccessful or partially unsuccessful, + * and there is at least one of the following pieces of information to report: + * {@link ClientBulkWriteException#getWriteConcernErrors()}, {@link ClientBulkWriteException#getWriteErrors()}, + * {@link ClientBulkWriteException#getPartialResult()}.
  • + *
  • + * {@link MongoException} - Only if the operation is unsuccessful.
  • + *
+ * @since 5.3 + * @mongodb.server.release 8.0 + * @mongodb.driver.manual reference/command/bulkWrite/ bulkWrite + */ + Publisher bulkWrite( + List models, + ClientBulkWriteOptions options); + + /** + * Executes a client-level bulk write operation. + * This method is functionally equivalent to {@link #bulkWrite(ClientSession, List, ClientBulkWriteOptions)} + * with the {@linkplain ClientBulkWriteOptions#clientBulkWriteOptions() default options}. + *

+ * This operation supports {@linkplain MongoClientSettings#getRetryWrites() retryable writes}. + * Depending on the number of {@code models}, encoded size of {@code models}, and the size limits in effect, + * executing this operation may require multiple {@code bulkWrite} commands. + * The eligibility for retries is determined per each {@code bulkWrite} command: + * {@link ClientNamespacedUpdateManyModel}, {@link ClientNamespacedDeleteManyModel} in a command render it non-retryable.

+ * + * @param clientSession The {@linkplain ClientSession client session} with which to associate this operation. + * @param models The {@linkplain ClientNamespacedWriteModel individual write operations}. + * @return The {@link Publisher} signalling at most one element {@link ClientBulkWriteResult} if the operation is successful, + * or the following errors: + *
    + *
  • + * {@link ClientBulkWriteException} - If and only if the operation is unsuccessful or partially unsuccessful, + * and there is at least one of the following pieces of information to report: + * {@link ClientBulkWriteException#getWriteConcernErrors()}, {@link ClientBulkWriteException#getWriteErrors()}, + * {@link ClientBulkWriteException#getPartialResult()}.
  • + *
  • + * {@link MongoException} - Only if the operation is unsuccessful.
  • + *
+ * @since 5.3 + * @mongodb.server.release 8.0 + * @mongodb.driver.manual reference/command/bulkWrite/ bulkWrite + */ + Publisher bulkWrite( + ClientSession clientSession, + List models); + + /** + * Executes a client-level bulk write operation. + *

+ * This operation supports {@linkplain MongoClientSettings#getRetryWrites() retryable writes}. + * Depending on the number of {@code models}, encoded size of {@code models}, and the size limits in effect, + * executing this operation may require multiple {@code bulkWrite} commands. + * The eligibility for retries is determined per each {@code bulkWrite} command: + * {@link ClientNamespacedUpdateManyModel}, {@link ClientNamespacedDeleteManyModel} in a command render it non-retryable.

+ * + * @param clientSession The {@linkplain ClientSession client session} with which to associate this operation. + * @param models The {@linkplain ClientNamespacedWriteModel individual write operations}. + * @param options The options. + * @return The {@link Publisher} signalling at most one element {@link ClientBulkWriteResult} if the operation is successful, + * or the following errors: + *
    + *
  • + * {@link ClientBulkWriteException} - If and only if the operation is unsuccessful or partially unsuccessful, + * and there is at least one of the following pieces of information to report: + * {@link ClientBulkWriteException#getWriteConcernErrors()}, {@link ClientBulkWriteException#getWriteErrors()}, + * {@link ClientBulkWriteException#getPartialResult()}.
  • + *
  • + * {@link MongoException} - Only if the operation is unsuccessful.
  • + *
+ * @since 5.3 + * @mongodb.server.release 8.0 + * @mongodb.driver.manual reference/command/bulkWrite/ bulkWrite + */ + Publisher bulkWrite( + ClientSession clientSession, + List models, + ClientBulkWriteOptions options); +} diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/MongoCollection.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/MongoCollection.java new file mode 100644 index 00000000000..821c7723a74 --- /dev/null +++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/MongoCollection.java @@ -0,0 +1,1847 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.reactivestreams.client; + +import com.mongodb.MongoNamespace; +import com.mongodb.ReadConcern; +import com.mongodb.ReadPreference; +import com.mongodb.WriteConcern; +import com.mongodb.annotations.Alpha; +import com.mongodb.annotations.Reason; +import com.mongodb.annotations.ThreadSafe; +import com.mongodb.bulk.BulkWriteResult; +import com.mongodb.client.model.BulkWriteOptions; +import com.mongodb.client.model.CountOptions; +import com.mongodb.client.model.CreateIndexOptions; +import com.mongodb.client.model.DeleteOptions; +import com.mongodb.client.model.DropCollectionOptions; +import com.mongodb.client.model.DropIndexOptions; +import com.mongodb.client.model.EstimatedDocumentCountOptions; +import com.mongodb.client.model.FindOneAndDeleteOptions; +import com.mongodb.client.model.FindOneAndReplaceOptions; +import com.mongodb.client.model.FindOneAndUpdateOptions; +import com.mongodb.client.model.IndexModel; +import com.mongodb.client.model.IndexOptions; +import com.mongodb.client.model.InsertManyOptions; +import com.mongodb.client.model.InsertOneOptions; +import com.mongodb.client.model.RenameCollectionOptions; +import com.mongodb.client.model.ReplaceOptions; +import com.mongodb.client.model.SearchIndexModel; +import com.mongodb.client.model.UpdateOptions; +import com.mongodb.client.model.WriteModel; +import com.mongodb.client.result.DeleteResult; +import com.mongodb.client.result.InsertManyResult; +import com.mongodb.client.result.InsertOneResult; +import com.mongodb.client.result.UpdateResult; +import com.mongodb.lang.Nullable; +import org.bson.Document; +import org.bson.codecs.configuration.CodecRegistry; +import org.bson.conversions.Bson; +import org.reactivestreams.Publisher; + +import java.util.List; +import java.util.concurrent.TimeUnit; + +/** + * The MongoCollection interface. + * + *

Note: Additions to this interface will not be considered to break binary compatibility.

+ * + * @param The type that this collection will encode documents from and decode documents to. + * @since 1.0 + */ +@ThreadSafe +public interface MongoCollection { + + /** + * Gets the namespace of this collection. + * + * @return the namespace + */ + MongoNamespace getNamespace(); + + /** + * Get the class of documents stored in this collection. + * + * @return the class + */ + Class getDocumentClass(); + + /** + * Get the codec registry for the MongoCollection. + * + * @return the {@link org.bson.codecs.configuration.CodecRegistry} + */ + CodecRegistry getCodecRegistry(); + + /** + * Get the read preference for the MongoCollection. + * + * @return the {@link com.mongodb.ReadPreference} + */ + ReadPreference getReadPreference(); + + /** + * Get the write concern for the MongoCollection. + * + * @return the {@link com.mongodb.WriteConcern} + */ + WriteConcern getWriteConcern(); + + /** + * Get the read concern for the MongoCollection. + * + * @return the {@link com.mongodb.ReadConcern} + * @mongodb.server.release 3.2 + * @since 1.2 + */ + ReadConcern getReadConcern(); + + /** + * The time limit for the full execution of an operation. + * + *

If not null the following deprecated options will be ignored: + * {@code waitQueueTimeoutMS}, {@code socketTimeoutMS}, {@code wTimeoutMS}, {@code maxTimeMS} and {@code maxCommitTimeMS}

+ * + *
    + *
  • {@code null} means that the timeout mechanism for operations will defer to using: + *
      + *
    • {@code waitQueueTimeoutMS}: The maximum wait time in milliseconds that a thread may wait for a connection to become + * available
    • + *
    • {@code socketTimeoutMS}: How long a send or receive on a socket can take before timing out.
    • + *
    • {@code wTimeoutMS}: How long the server will wait for the write concern to be fulfilled before timing out.
    • + *
    • {@code maxTimeMS}: The cumulative time limit for processing operations on a cursor. + * See: cursor.maxTimeMS.
    • + *
    • {@code maxCommitTimeMS}: The maximum amount of time to allow a single {@code commitTransaction} command to execute. + * See: {@link com.mongodb.TransactionOptions#getMaxCommitTime}.
    • + *
    + *
  • + *
  • {@code 0} means infinite timeout.
  • + *
  • {@code > 0} The time limit to use for the full execution of an operation.
  • + *
+ * + * @param timeUnit the time unit + * @return the timeout in the given time unit + * @since 5.2 + */ + @Alpha(Reason.CLIENT) + @Nullable + Long getTimeout(TimeUnit timeUnit); + + /** + * Create a new MongoCollection instance with a different default class to cast any documents returned from the database into.. + * + * @param clazz the default class to cast any documents returned from the database into. + * @param The type that the new collection will encode documents from and decode documents to + * @return a new MongoCollection instance with the different default class + */ + MongoCollection withDocumentClass(Class clazz); + + /** + * Create a new MongoCollection instance with a different codec registry. + * + *

The {@link CodecRegistry} configured by this method is effectively treated by the driver as an instance of + * {@link org.bson.codecs.configuration.CodecProvider}, which {@link CodecRegistry} extends. So there is no benefit to defining + * a class that implements {@link CodecRegistry}. Rather, an application should always create {@link CodecRegistry} instances + * using the factory methods in {@link org.bson.codecs.configuration.CodecRegistries}.

+ * + * @param codecRegistry the new {@link org.bson.codecs.configuration.CodecRegistry} for the collection + * @return a new MongoCollection instance with the different codec registry + * @see org.bson.codecs.configuration.CodecRegistries + */ + MongoCollection withCodecRegistry(CodecRegistry codecRegistry); + + /** + * Create a new MongoCollection instance with a different read preference. + * + * @param readPreference the new {@link com.mongodb.ReadPreference} for the collection + * @return a new MongoCollection instance with the different readPreference + */ + MongoCollection withReadPreference(ReadPreference readPreference); + + /** + * Create a new MongoCollection instance with a different write concern. + * + * @param writeConcern the new {@link com.mongodb.WriteConcern} for the collection + * @return a new MongoCollection instance with the different writeConcern + */ + MongoCollection withWriteConcern(WriteConcern writeConcern); + + /** + * Create a new MongoCollection instance with a different read concern. + * + * @param readConcern the new {@link ReadConcern} for the collection + * @return a new MongoCollection instance with the different ReadConcern + * @mongodb.server.release 3.2 + * @since 1.2 + */ + MongoCollection withReadConcern(ReadConcern readConcern); + + /** + * Create a new MongoCollection instance with the set time limit for the full execution of an operation. + * + *
    + *
  • {@code 0} means infinite timeout.
  • + *
  • {@code > 0} The time limit to use for the full execution of an operation.
  • + *
+ * + * @param timeout the timeout, which must be greater than or equal to 0 + * @param timeUnit the time unit + * @return a new MongoCollection instance with the set time limit for the full execution of an operation + * @since 5.2 + * @see #getTimeout + */ + @Alpha(Reason.CLIENT) + MongoCollection withTimeout(long timeout, TimeUnit timeUnit); + + /** + * Gets an estimate of the count of documents in a collection using collection metadata. + * + *

+ * Implementation note: this method is implemented using the MongoDB server's count command + *

+ * + * @return a publisher with a single element indicating the estimated number of documents + * @since 1.9 + * @mongodb.driver.manual manual/reference/command/count/#behavior + */ + Publisher estimatedDocumentCount(); + + /** + * Gets an estimate of the count of documents in a collection using collection metadata. + * + *

+ * Implementation note: this method is implemented using the MongoDB server's count command + *

+ * + * @param options the options describing the count + * @return a publisher with a single element indicating the estimated number of documents + * @since 1.9 + * @mongodb.driver.manual manual/reference/command/count/#behavior + */ + Publisher estimatedDocumentCount(EstimatedDocumentCountOptions options); + + /** + * Counts the number of documents in the collection. + * + *

+ * Note: For a fast count of the total documents in a collection see {@link #estimatedDocumentCount()}.
+ * Note: When migrating from {@code count()} to {@code countDocuments()} the following query operators must be replaced: + *

+ *
+     *
+     *  +-------------+--------------------------------+
+     *  | Operator    | Replacement                    |
+     *  +=============+================================+
+     *  | $where      |  $expr                         |
+     *  +-------------+--------------------------------+
+     *  | $near       |  $geoWithin with $center       |
+     *  +-------------+--------------------------------+
+     *  | $nearSphere |  $geoWithin with $centerSphere |
+     *  +-------------+--------------------------------+
+     * 
+ * + * @return a publisher with a single element indicating the number of documents + * @since 1.9 + */ + Publisher countDocuments(); + + /** + * Counts the number of documents in the collection according to the given options. + * + *

+ * Note: For a fast count of the total documents in a collection see {@link #estimatedDocumentCount()}.
+ * Note: When migrating from {@code count()} to {@code countDocuments()} the following query operators must be replaced: + *

+ *
+     *
+     *  +-------------+--------------------------------+
+     *  | Operator    | Replacement                    |
+     *  +=============+================================+
+     *  | $where      |  $expr                         |
+     *  +-------------+--------------------------------+
+     *  | $near       |  $geoWithin with $center       |
+     *  +-------------+--------------------------------+
+     *  | $nearSphere |  $geoWithin with $centerSphere |
+     *  +-------------+--------------------------------+
+     * 
+ * + * @param filter the query filter + * @return a publisher with a single element indicating the number of documents + * @since 1.9 + */ + Publisher countDocuments(Bson filter); + + /** + * Counts the number of documents in the collection according to the given options. + * + *

+ * Note: For a fast count of the total documents in a collection see {@link #estimatedDocumentCount()}.
+ * Note: When migrating from {@code count()} to {@code countDocuments()} the following query operators must be replaced: + *

+ *
+     *
+     *  +-------------+--------------------------------+
+     *  | Operator    | Replacement                    |
+     *  +=============+================================+
+     *  | $where      |  $expr                         |
+     *  +-------------+--------------------------------+
+     *  | $near       |  $geoWithin with $center       |
+     *  +-------------+--------------------------------+
+     *  | $nearSphere |  $geoWithin with $centerSphere |
+     *  +-------------+--------------------------------+
+     * 
+ * + * @param filter the query filter + * @param options the options describing the count + * @return a publisher with a single element indicating the number of documents + * @since 1.9 + */ + Publisher countDocuments(Bson filter, CountOptions options); + + /** + * Counts the number of documents in the collection. + * + *

+ * Note: For a fast count of the total documents in a collection see {@link #estimatedDocumentCount()}.
+ * Note: When migrating from {@code count()} to {@code countDocuments()} the following query operators must be replaced: + *

+ *
+     *
+     *  +-------------+--------------------------------+
+     *  | Operator    | Replacement                    |
+     *  +=============+================================+
+     *  | $where      |  $expr                         |
+     *  +-------------+--------------------------------+
+     *  | $near       |  $geoWithin with $center       |
+     *  +-------------+--------------------------------+
+     *  | $nearSphere |  $geoWithin with $centerSphere |
+     *  +-------------+--------------------------------+
+     * 
+ * + * @param clientSession the client session with which to associate this operation + * @return a publisher with a single element indicating the number of documents + * @mongodb.server.release 3.6 + * @since 1.9 + */ + Publisher countDocuments(ClientSession clientSession); + + /** + * Counts the number of documents in the collection according to the given options. + * + *

+ * Note: For a fast count of the total documents in a collection see {@link #estimatedDocumentCount()}. + *

+ * + * @param clientSession the client session with which to associate this operation + * @param filter the query filter + * @return a publisher with a single element indicating the number of documents + * @mongodb.server.release 3.6 + * @since 1.9 + */ + Publisher countDocuments(ClientSession clientSession, Bson filter); + + /** + * Counts the number of documents in the collection according to the given options. + * + *

+ * Note: For a fast count of the total documents in a collection see {@link #estimatedDocumentCount()}.
+ * Note: When migrating from {@code count()} to {@code countDocuments()} the following query operators must be replaced: + *

+ *
+     *
+     *  +-------------+--------------------------------+
+     *  | Operator    | Replacement                    |
+     *  +=============+================================+
+     *  | $where      |  $expr                         |
+     *  +-------------+--------------------------------+
+     *  | $near       |  $geoWithin with $center       |
+     *  +-------------+--------------------------------+
+     *  | $nearSphere |  $geoWithin with $centerSphere |
+     *  +-------------+--------------------------------+
+     * 
+ * + * @param clientSession the client session with which to associate this operation + * @param filter the query filter + * @param options the options describing the count + * @return a publisher with a single element indicating the number of documents + * @mongodb.server.release 3.6 + * @since 1.9 + */ + Publisher countDocuments(ClientSession clientSession, Bson filter, CountOptions options); + + /** + * Gets the distinct values of the specified field name. + * + * @param fieldName the field name + * @param resultClass the default class to cast any distinct items into. + * @param the target type of the iterable. + * @return a publisher emitting the sequence of distinct values + * @mongodb.driver.manual reference/command/distinct/ Distinct + */ + DistinctPublisher distinct(String fieldName, Class resultClass); + + /** + * Gets the distinct values of the specified field name. + * + * @param fieldName the field name + * @param filter the query filter + * @param resultClass the default class to cast any distinct items into. + * @param the target type of the iterable. + * @return an iterable of distinct values + * @mongodb.driver.manual reference/command/distinct/ Distinct + */ + DistinctPublisher distinct(String fieldName, Bson filter, Class resultClass); + + /** + * Gets the distinct values of the specified field name. + * + * @param clientSession the client session with which to associate this operation + * @param fieldName the field name + * @param resultClass the default class to cast any distinct items into. + * @param the target type of the iterable. + * @return a publisher emitting the sequence of distinct values + * @mongodb.driver.manual reference/command/distinct/ Distinct + * @mongodb.server.release 3.6 + * @since 1.7 + */ + DistinctPublisher distinct(ClientSession clientSession, String fieldName, Class resultClass); + + /** + * Gets the distinct values of the specified field name. + * + * @param clientSession the client session with which to associate this operation + * @param fieldName the field name + * @param filter the query filter + * @param resultClass the default class to cast any distinct items into. + * @param the target type of the iterable. + * @return an iterable of distinct values + * @mongodb.driver.manual reference/command/distinct/ Distinct + * @mongodb.server.release 3.6 + * @since 1.7 + */ + DistinctPublisher distinct(ClientSession clientSession, String fieldName, Bson filter, Class resultClass); + + /** + * Finds all documents in the collection. + * + * @return the fluent find interface + * @mongodb.driver.manual tutorial/query-documents/ Find + */ + FindPublisher find(); + + /** + * Finds all documents in the collection. + * + * @param clazz the class to decode each document into + * @param the target document type of the iterable. + * @return the fluent find interface + * @mongodb.driver.manual tutorial/query-documents/ Find + */ + FindPublisher find(Class clazz); + + /** + * Finds all documents in the collection. + * + * @param filter the query filter + * @return the fluent find interface + * @mongodb.driver.manual tutorial/query-documents/ Find + */ + FindPublisher find(Bson filter); + + /** + * Finds all documents in the collection. + * + * @param filter the query filter + * @param clazz the class to decode each document into + * @param the target document type of the iterable. + * @return the fluent find interface + * @mongodb.driver.manual tutorial/query-documents/ Find + */ + FindPublisher find(Bson filter, Class clazz); + + /** + * Finds all documents in the collection. + * + * @param clientSession the client session with which to associate this operation + * @return the fluent find interface + * @mongodb.driver.manual tutorial/query-documents/ Find + * @mongodb.server.release 3.6 + * @since 1.7 + */ + FindPublisher find(ClientSession clientSession); + + /** + * Finds all documents in the collection. + * + * @param clientSession the client session with which to associate this operation + * @param clazz the class to decode each document into + * @param the target document type of the iterable. + * @return the fluent find interface + * @mongodb.driver.manual tutorial/query-documents/ Find + * @mongodb.server.release 3.6 + * @since 1.7 + */ + FindPublisher find(ClientSession clientSession, Class clazz); + + /** + * Finds all documents in the collection. + * + * @param clientSession the client session with which to associate this operation + * @param filter the query filter + * @return the fluent find interface + * @mongodb.driver.manual tutorial/query-documents/ Find + * @mongodb.server.release 3.6 + * @since 1.7 + */ + FindPublisher find(ClientSession clientSession, Bson filter); + + /** + * Finds all documents in the collection. + * + * @param clientSession the client session with which to associate this operation + * @param filter the query filter + * @param clazz the class to decode each document into + * @param the target document type of the iterable. + * @return the fluent find interface + * @mongodb.driver.manual tutorial/query-documents/ Find + * @mongodb.server.release 3.6 + * @since 1.7 + */ + FindPublisher find(ClientSession clientSession, Bson filter, Class clazz); + + /** + * Aggregates documents according to the specified aggregation pipeline. + * + * @param pipeline the aggregate pipeline + * @return a publisher containing the result of the aggregation operation + * @mongodb.driver.manual aggregation/ Aggregation + */ + AggregatePublisher aggregate(List pipeline); + + /** + * Aggregates documents according to the specified aggregation pipeline. + * + * @param pipeline the aggregate pipeline + * @param clazz the class to decode each document into + * @param the target document type of the iterable. + * @return a publisher containing the result of the aggregation operation + * @mongodb.driver.manual aggregation/ Aggregation + */ + AggregatePublisher aggregate(List pipeline, Class clazz); + + /** + * Aggregates documents according to the specified aggregation pipeline. + * + * @param clientSession the client session with which to associate this operation + * @param pipeline the aggregate pipeline + * @return a publisher containing the result of the aggregation operation + * @mongodb.driver.manual aggregation/ Aggregation + * @mongodb.server.release 3.6 + * @since 1.7 + */ + AggregatePublisher aggregate(ClientSession clientSession, List pipeline); + + /** + * Aggregates documents according to the specified aggregation pipeline. + * + * @param clientSession the client session with which to associate this operation + * @param pipeline the aggregate pipeline + * @param clazz the class to decode each document into + * @param the target document type of the iterable. + * @return a publisher containing the result of the aggregation operation + * @mongodb.driver.manual aggregation/ Aggregation + * @mongodb.server.release 3.6 + * @since 1.7 + */ + AggregatePublisher aggregate(ClientSession clientSession, List pipeline, Class clazz); + + /** + * Creates a change stream for this collection. + * + * @return the change stream iterable + * @mongodb.driver.manual reference/operator/aggregation/changeStream $changeStream + * @mongodb.server.release 3.6 + * @since 1.7 + */ + ChangeStreamPublisher watch(); + + /** + * Creates a change stream for this collection. + * + * @param resultClass the class to decode each document into + * @param the target document type of the iterable. + * @return the change stream iterable + * @mongodb.driver.manual reference/operator/aggregation/changeStream $changeStream + * @mongodb.server.release 3.6 + * @since 1.7 + */ + ChangeStreamPublisher watch(Class resultClass); + + /** + * Creates a change stream for this collection. + * + * @param pipeline the aggregation pipeline to apply to the change stream + * @return the change stream iterable + * @mongodb.driver.manual reference/operator/aggregation/changeStream $changeStream + * @since 1.6 + */ + ChangeStreamPublisher watch(List pipeline); + + /** + * Creates a change stream for this collection. + * + * @param pipeline the aggregation pipeline to apply to the change stream + * @param resultClass the class to decode each document into + * @param the target document type of the iterable. + * @return the change stream iterable + * @mongodb.driver.manual reference/operator/aggregation/changeStream $changeStream + * @since 1.6 + */ + ChangeStreamPublisher watch(List pipeline, Class resultClass); + + /** + * Creates a change stream for this collection. + * + * @param clientSession the client session with which to associate this operation + * @return the change stream iterable + * @mongodb.driver.manual reference/operator/aggregation/changeStream $changeStream + * @mongodb.server.release 3.6 + * @since 1.7 + */ + ChangeStreamPublisher watch(ClientSession clientSession); + + /** + * Creates a change stream for this collection. + * + * @param clientSession the client session with which to associate this operation + * @param resultClass the class to decode each document into + * @param the target document type of the iterable. + * @return the change stream iterable + * @mongodb.driver.manual reference/operator/aggregation/changeStream $changeStream + * @mongodb.server.release 3.6 + * @since 1.7 + */ + ChangeStreamPublisher watch(ClientSession clientSession, Class resultClass); + + /** + * Creates a change stream for this collection. + * + * @param clientSession the client session with which to associate this operation + * @param pipeline the aggregation pipeline to apply to the change stream + * @return the change stream iterable + * @mongodb.driver.manual reference/operator/aggregation/changeStream $changeStream + * @mongodb.server.release 3.6 + * @since 1.7 + */ + ChangeStreamPublisher watch(ClientSession clientSession, List pipeline); + + /** + * Creates a change stream for this collection. + * + * @param clientSession the client session with which to associate this operation + * @param pipeline the aggregation pipeline to apply to the change stream + * @param resultClass the class to decode each document into + * @param the target document type of the iterable. + * @return the change stream iterable + * @mongodb.driver.manual reference/operator/aggregation/changeStream $changeStream + * @mongodb.server.release 3.6 + * @since 1.7 + */ + ChangeStreamPublisher watch(ClientSession clientSession, List pipeline, Class resultClass); + + /** + * Aggregates documents according to the specified map-reduce function. + * + * @param mapFunction A JavaScript function that associates or "maps" a value with a key and emits the key and value pair. + * @param reduceFunction A JavaScript function that "reduces" to a single object all the values associated with a particular key. + * @return an publisher containing the result of the map-reduce operation + * @mongodb.driver.manual reference/command/mapReduce/ map-reduce + * @deprecated Superseded by aggregate + */ + @Deprecated + MapReducePublisher mapReduce(String mapFunction, String reduceFunction); + + /** + * Aggregates documents according to the specified map-reduce function. + * + * @param mapFunction A JavaScript function that associates or "maps" a value with a key and emits the key and value pair. + * @param reduceFunction A JavaScript function that "reduces" to a single object all the values associated with a particular key. + * @param clazz the class to decode each resulting document into. + * @param the target document type of the iterable. + * @return a publisher containing the result of the map-reduce operation + * @mongodb.driver.manual reference/command/mapReduce/ map-reduce + * @deprecated Superseded by aggregate + */ + @Deprecated + MapReducePublisher mapReduce(String mapFunction, String reduceFunction, Class clazz); + + /** + * Aggregates documents according to the specified map-reduce function. + * + * @param clientSession the client session with which to associate this operation + * @param mapFunction A JavaScript function that associates or "maps" a value with a key and emits the key and value pair. + * @param reduceFunction A JavaScript function that "reduces" to a single object all the values associated with a particular key. + * @return an publisher containing the result of the map-reduce operation + * @mongodb.driver.manual reference/command/mapReduce/ map-reduce + * @mongodb.server.release 3.6 + * @since 1.7 + * @deprecated Superseded by aggregate + */ + @Deprecated + MapReducePublisher mapReduce(ClientSession clientSession, String mapFunction, String reduceFunction); + + /** + * Aggregates documents according to the specified map-reduce function. + * + * @param clientSession the client session with which to associate this operation + * @param mapFunction A JavaScript function that associates or "maps" a value with a key and emits the key and value pair. + * @param reduceFunction A JavaScript function that "reduces" to a single object all the values associated with a particular key. + * @param clazz the class to decode each resulting document into. + * @param the target document type of the iterable. + * @return a publisher containing the result of the map-reduce operation + * @mongodb.driver.manual reference/command/mapReduce/ map-reduce + * @mongodb.server.release 3.6 + * @since 1.7 + * @deprecated Superseded by aggregate + */ + @Deprecated + MapReducePublisher mapReduce(ClientSession clientSession, String mapFunction, String reduceFunction, + Class clazz); + + /** + * Executes a mix of inserts, updates, replaces, and deletes. + * + * @param requests the writes to execute + * @return a publisher with a single element the BulkWriteResult + */ + Publisher bulkWrite(List> requests); + + /** + * Executes a mix of inserts, updates, replaces, and deletes. + * + * @param requests the writes to execute + * @param options the options to apply to the bulk write operation + * @return a publisher with a single element the BulkWriteResult + */ + Publisher bulkWrite(List> requests, BulkWriteOptions options); + + /** + * Executes a mix of inserts, updates, replaces, and deletes. + * + * @param clientSession the client session with which to associate this operation + * @param requests the writes to execute + * @return a publisher with a single element the BulkWriteResult + * @mongodb.server.release 3.6 + * @since 1.7 + */ + Publisher bulkWrite(ClientSession clientSession, List> requests); + + /** + * Executes a mix of inserts, updates, replaces, and deletes. + * + * @param clientSession the client session with which to associate this operation + * @param requests the writes to execute + * @param options the options to apply to the bulk write operation + * @return a publisher with a single element the BulkWriteResult + * @mongodb.server.release 3.6 + * @since 1.7 + */ + Publisher bulkWrite(ClientSession clientSession, List> requests, + BulkWriteOptions options); + + /** + * Inserts the provided document. If the document is missing an identifier, the driver should generate one. + * + * @param document the document to insert + * @return a publisher with a single element with the InsertOneResult or with either a + * com.mongodb.DuplicateKeyException or com.mongodb.MongoException + */ + Publisher insertOne(TDocument document); + + /** + * Inserts the provided document. If the document is missing an identifier, the driver should generate one. + * + * @param document the document to insert + * @param options the options to apply to the operation + * @return a publisher with a single element with the InsertOneResult or with either a + * com.mongodb.DuplicateKeyException or com.mongodb.MongoException + * @since 1.2 + */ + Publisher insertOne(TDocument document, InsertOneOptions options); + + /** + * Inserts the provided document. If the document is missing an identifier, the driver should generate one. + * + * @param clientSession the client session with which to associate this operation + * @param document the document to insert + * @return a publisher with a single element with the InsertOneResult or with either a + * com.mongodb.DuplicateKeyException or com.mongodb.MongoException + * @mongodb.server.release 3.6 + * @since 1.7 + */ + Publisher insertOne(ClientSession clientSession, TDocument document); + + /** + * Inserts the provided document. If the document is missing an identifier, the driver should generate one. + * + * @param clientSession the client session with which to associate this operation + * @param document the document to insert + * @param options the options to apply to the operation + * @return a publisher with a single element with the InsertOneResult or with either a + * com.mongodb.DuplicateKeyException or com.mongodb.MongoException + * @mongodb.server.release 3.6 + * @since 1.7 + */ + Publisher insertOne(ClientSession clientSession, TDocument document, InsertOneOptions options); + + /** + * Inserts a batch of documents. + * + * @param documents the documents to insert + * @return a publisher with a single element with the InsertManyResult or with either a + * com.mongodb.DuplicateKeyException or com.mongodb.MongoException + */ + Publisher insertMany(List documents); + + /** + * Inserts a batch of documents. + * + * @param documents the documents to insert + * @param options the options to apply to the operation + * @return a publisher with a single element with the InsertManyResult or with either a + * com.mongodb.DuplicateKeyException or com.mongodb.MongoException + */ + Publisher insertMany(List documents, InsertManyOptions options); + + /** + * Inserts a batch of documents. + * + * @param clientSession the client session with which to associate this operation + * @param documents the documents to insert + * @return a publisher with a single element with the InsertManyResult or with either a + * com.mongodb.DuplicateKeyException or com.mongodb.MongoException + * @mongodb.server.release 3.6 + * @since 1.7 + */ + Publisher insertMany(ClientSession clientSession, List documents); + + /** + * Inserts a batch of documents. + * + * @param clientSession the client session with which to associate this operation + * @param documents the documents to insert + * @param options the options to apply to the operation + * @return a publisher with a single element with the InsertManyResult or with either a + * com.mongodb.DuplicateKeyException or com.mongodb.MongoException + * @mongodb.server.release 3.6 + * @since 1.7 + */ + Publisher insertMany(ClientSession clientSession, List documents, InsertManyOptions options); + + /** + * Removes at most one document from the collection that matches the given filter. If no documents match, the collection is not + * modified. + * + * @param filter the query filter to apply the delete operation + * @return a publisher with a single element the DeleteResult or with an com.mongodb.MongoException + */ + Publisher deleteOne(Bson filter); + + /** + * Removes at most one document from the collection that matches the given filter. If no documents match, the collection is not + * modified. + * + * @param filter the query filter to apply the delete operation + * @param options the options to apply to the delete operation + * @return a publisher with a single element the DeleteResult or with an com.mongodb.MongoException + * @since 1.5 + */ + Publisher deleteOne(Bson filter, DeleteOptions options); + + /** + * Removes at most one document from the collection that matches the given filter. If no documents match, the collection is not + * modified. + * + * @param clientSession the client session with which to associate this operation + * @param filter the query filter to apply the delete operation + * @return a publisher with a single element the DeleteResult or with an com.mongodb.MongoException + * @mongodb.server.release 3.6 + * @since 1.7 + */ + Publisher deleteOne(ClientSession clientSession, Bson filter); + + /** + * Removes at most one document from the collection that matches the given filter. If no documents match, the collection is not + * modified. + * + * @param clientSession the client session with which to associate this operation + * @param filter the query filter to apply the delete operation + * @param options the options to apply to the delete operation + * @return a publisher with a single element the DeleteResult or with an com.mongodb.MongoException + * @mongodb.server.release 3.6 + * @since 1.7 + */ + Publisher deleteOne(ClientSession clientSession, Bson filter, DeleteOptions options); + + /** + * Removes all documents from the collection that match the given query filter. If no documents match, the collection is not modified. + * + * @param filter the query filter to apply the delete operation + * @return a publisher with a single element the DeleteResult or with an com.mongodb.MongoException + */ + Publisher deleteMany(Bson filter); + + /** + * Removes all documents from the collection that match the given query filter. If no documents match, the collection is not modified. + * + * @param filter the query filter to apply the delete operation + * @param options the options to apply to the delete operation + * @return a publisher with a single element the DeleteResult or with an com.mongodb.MongoException + * @since 1.5 + */ + Publisher deleteMany(Bson filter, DeleteOptions options); + + /** + * Removes all documents from the collection that match the given query filter. If no documents match, the collection is not modified. + * + * @param clientSession the client session with which to associate this operation + * @param filter the query filter to apply the delete operation + * @return a publisher with a single element the DeleteResult or with an com.mongodb.MongoException + * @mongodb.server.release 3.6 + * @since 1.7 + */ + Publisher deleteMany(ClientSession clientSession, Bson filter); + + /** + * Removes all documents from the collection that match the given query filter. If no documents match, the collection is not modified. + * + * @param clientSession the client session with which to associate this operation + * @param filter the query filter to apply the delete operation + * @param options the options to apply to the delete operation + * @return a publisher with a single element the DeleteResult or with an com.mongodb.MongoException + * @mongodb.server.release 3.6 + * @since 1.7 + */ + Publisher deleteMany(ClientSession clientSession, Bson filter, DeleteOptions options); + + /** + * Replace a document in the collection according to the specified arguments. + * + * @param filter the query filter to apply the replace operation + * @param replacement the replacement document + * @return a publisher with a single element the UpdateResult + * @mongodb.driver.manual tutorial/modify-documents/#replace-the-document Replace + */ + Publisher replaceOne(Bson filter, TDocument replacement); + + /** + * Replace a document in the collection according to the specified arguments. + * + * @param filter the query filter to apply the replace operation + * @param replacement the replacement document + * @param options the options to apply to the replace operation + * @return a publisher with a single element the UpdateResult + * @mongodb.driver.manual tutorial/modify-documents/#replace-the-document Replace + * @since 1.8 + */ + Publisher replaceOne(Bson filter, TDocument replacement, ReplaceOptions options); + + /** + * Replace a document in the collection according to the specified arguments. + * + * @param clientSession the client session with which to associate this operation + * @param filter the query filter to apply the replace operation + * @param replacement the replacement document + * @return a publisher with a single element the UpdateResult + * @mongodb.driver.manual tutorial/modify-documents/#replace-the-document Replace + * @mongodb.server.release 3.6 + * @since 1.7 + */ + Publisher replaceOne(ClientSession clientSession, Bson filter, TDocument replacement); + + /** + * Replace a document in the collection according to the specified arguments. + * + * @param clientSession the client session with which to associate this operation + * @param filter the query filter to apply the replace operation + * @param replacement the replacement document + * @param options the options to apply to the replace operation + * @return a publisher with a single element the UpdateResult + * @mongodb.driver.manual tutorial/modify-documents/#replace-the-document Replace + * @mongodb.server.release 3.6 + * @since 1.8 + */ + Publisher replaceOne(ClientSession clientSession, Bson filter, TDocument replacement, ReplaceOptions options); + + /** + * Update a single document in the collection according to the specified arguments. + * + * @param filter a document describing the query filter, which may not be null. + * @param update a document describing the update, which may not be null. The update to apply must include only update operators. + * @return a publisher with a single element the UpdateResult + * @mongodb.driver.manual tutorial/modify-documents/ Updates + * @mongodb.driver.manual reference/operator/update/ Update Operators + */ + Publisher updateOne(Bson filter, Bson update); + + /** + * Update a single document in the collection according to the specified arguments. + * + * @param filter a document describing the query filter, which may not be null. + * @param update a document describing the update, which may not be null. The update to apply must include only update operators. + * @param options the options to apply to the update operation + * @return a publisher with a single element the UpdateResult + * @mongodb.driver.manual tutorial/modify-documents/ Updates + * @mongodb.driver.manual reference/operator/update/ Update Operators + */ + Publisher updateOne(Bson filter, Bson update, UpdateOptions options); + + /** + * Update a single document in the collection according to the specified arguments. + * + * @param clientSession the client session with which to associate this operation + * @param filter a document describing the query filter, which may not be null. + * @param update a document describing the update, which may not be null. The update to apply must include only update operators. + * @return a publisher with a single element the UpdateResult + * @mongodb.driver.manual tutorial/modify-documents/ Updates + * @mongodb.driver.manual reference/operator/update/ Update Operators + * @mongodb.server.release 3.6 + * @since 1.7 + */ + Publisher updateOne(ClientSession clientSession, Bson filter, Bson update); + + /** + * Update a single document in the collection according to the specified arguments. + * + * @param clientSession the client session with which to associate this operation + * @param filter a document describing the query filter, which may not be null. + * @param update a document describing the update, which may not be null. The update to apply must include only update operators. + * @param options the options to apply to the update operation + * @return a publisher with a single element the UpdateResult + * @mongodb.driver.manual tutorial/modify-documents/ Updates + * @mongodb.driver.manual reference/operator/update/ Update Operators + * @mongodb.server.release 3.6 + * @since 1.7 + */ + Publisher updateOne(ClientSession clientSession, Bson filter, Bson update, UpdateOptions options); + + /** + * Update a single document in the collection according to the specified arguments. + * + *

Note: Supports retryable writes on MongoDB server versions 3.6 or higher when the retryWrites setting is enabled.

+ * @param filter a document describing the query filter, which may not be null. + * @param update a pipeline describing the update, which may not be null. + * @return a publisher with a single element the UpdateResult + * @since 1.12 + * @mongodb.server.release 4.2 + * @mongodb.driver.manual tutorial/modify-documents/ Updates + * @mongodb.driver.manual reference/operator/update/ Update Operators + */ + Publisher updateOne(Bson filter, List update); + + /** + * Update a single document in the collection according to the specified arguments. + * + *

Note: Supports retryable writes on MongoDB server versions 3.6 or higher when the retryWrites setting is enabled.

+ * @param filter a document describing the query filter, which may not be null. + * @param update a pipeline describing the update, which may not be null. + * @param options the options to apply to the update operation + * @return a publisher with a single element the UpdateResult + * @since 1.12 + * @mongodb.server.release 4.2 + * @mongodb.driver.manual tutorial/modify-documents/ Updates + * @mongodb.driver.manual reference/operator/update/ Update Operators + */ + Publisher updateOne(Bson filter, List update, UpdateOptions options); + + /** + * Update a single document in the collection according to the specified arguments. + * + *

Note: Supports retryable writes on MongoDB server versions 3.6 or higher when the retryWrites setting is enabled.

+ * @param clientSession the client session with which to associate this operation + * @param filter a document describing the query filter, which may not be null. + * @param update a pipeline describing the update, which may not be null. + * @return a publisher with a single element the UpdateResult + * @since 1.12 + * @mongodb.server.release 4.2 + * @mongodb.driver.manual tutorial/modify-documents/ Updates + * @mongodb.driver.manual reference/operator/update/ Update Operators + */ + Publisher updateOne(ClientSession clientSession, Bson filter, List update); + + /** + * Update a single document in the collection according to the specified arguments. + * + *

Note: Supports retryable writes on MongoDB server versions 3.6 or higher when the retryWrites setting is enabled.

+ * @param clientSession the client session with which to associate this operation + * @param filter a document describing the query filter, which may not be null. + * @param update a pipeline describing the update, which may not be null. + * @param options the options to apply to the update operation + * @return a publisher with a single element the UpdateResult + * @since 1.12 + * @mongodb.server.release 4.2 + * @mongodb.driver.manual tutorial/modify-documents/ Updates + * @mongodb.driver.manual reference/operator/update/ Update Operators + */ + Publisher updateOne(ClientSession clientSession, Bson filter, List update, UpdateOptions options); + + /** + * Update all documents in the collection according to the specified arguments. + * + * @param filter a document describing the query filter, which may not be null. + * @param update a document describing the update, which may not be null. The update to apply must include only update operators. + * @return a publisher with a single element the UpdateResult + * @mongodb.driver.manual tutorial/modify-documents/ Updates + * @mongodb.driver.manual reference/operator/update/ Update Operators + */ + Publisher updateMany(Bson filter, Bson update); + + /** + * Update all documents in the collection according to the specified arguments. + * + * @param filter a document describing the query filter, which may not be null. + * @param update a document describing the update, which may not be null. The update to apply must include only update operators. + * @param options the options to apply to the update operation + * @return a publisher with a single element the UpdateResult + * @mongodb.driver.manual tutorial/modify-documents/ Updates + * @mongodb.driver.manual reference/operator/update/ Update Operators + */ + Publisher updateMany(Bson filter, Bson update, UpdateOptions options); + + /** + * Update all documents in the collection according to the specified arguments. + * + * @param clientSession the client session with which to associate this operation + * @param filter a document describing the query filter, which may not be null. + * @param update a document describing the update, which may not be null. The update to apply must include only update operators. + * @return a publisher with a single element the UpdateResult + * @mongodb.driver.manual tutorial/modify-documents/ Updates + * @mongodb.driver.manual reference/operator/update/ Update Operators + * @mongodb.server.release 3.6 + * @since 1.7 + */ + Publisher updateMany(ClientSession clientSession, Bson filter, Bson update); + + /** + * Update all documents in the collection according to the specified arguments. + * + * @param clientSession the client session with which to associate this operation + * @param filter a document describing the query filter, which may not be null. + * @param update a document describing the update, which may not be null. The update to apply must include only update operators. + * @param options the options to apply to the update operation + * @return a publisher with a single element the UpdateResult + * @mongodb.driver.manual tutorial/modify-documents/ Updates + * @mongodb.driver.manual reference/operator/update/ Update Operators + * @mongodb.server.release 3.6 + * @since 1.7 + */ + Publisher updateMany(ClientSession clientSession, Bson filter, Bson update, UpdateOptions options); + + /** + * Update all documents in the collection according to the specified arguments. + * + * @param filter a document describing the query filter, which may not be null. + * @param update a pipeline describing the update, which may not be null. + * @return a publisher with a single element the UpdateResult + * @since 1.12 + * @mongodb.server.release 4.2 + * @mongodb.driver.manual tutorial/modify-documents/ Updates + * @mongodb.driver.manual reference/operator/update/ Update Operators + */ + Publisher updateMany(Bson filter, List update); + + /** + * Update all documents in the collection according to the specified arguments. + * + * @param filter a document describing the query filter, which may not be null. + * @param update a pipeline describing the update, which may not be null. + * @param options the options to apply to the update operation + * @return a publisher with a single element the UpdateResult + * @since 1.12 + * @mongodb.server.release 4.2 + * @mongodb.driver.manual tutorial/modify-documents/ Updates + * @mongodb.driver.manual reference/operator/update/ Update Operators + */ + Publisher updateMany(Bson filter, List update, UpdateOptions options); + + /** + * Update all documents in the collection according to the specified arguments. + * + * @param clientSession the client session with which to associate this operation + * @param filter a document describing the query filter, which may not be null. + * @param update a pipeline describing the update, which may not be null. + * @return a publisher with a single element the UpdateResult + * @since 1.12 + * @mongodb.server.release 4.2 + * @mongodb.driver.manual tutorial/modify-documents/ Updates + * @mongodb.driver.manual reference/operator/update/ Update Operators + */ + Publisher updateMany(ClientSession clientSession, Bson filter, List update); + + /** + * Update all documents in the collection according to the specified arguments. + * + * @param clientSession the client session with which to associate this operation + * @param filter a document describing the query filter, which may not be null. + * @param update a pipeline describing the update, which may not be null. + * @param options the options to apply to the update operation + * @return a publisher with a single element the UpdateResult + * @since 1.12 + * @mongodb.server.release 4.2 + * @mongodb.driver.manual tutorial/modify-documents/ Updates + * @mongodb.driver.manual reference/operator/update/ Update Operators + */ + Publisher updateMany(ClientSession clientSession, Bson filter, List update, UpdateOptions options); + + /** + * Atomically find a document and remove it. + * + * @param filter the query filter to find the document with + * @return a publisher with a single element the document that was removed. If no documents matched the query filter, then null will be + * returned + */ + Publisher findOneAndDelete(Bson filter); + + /** + * Atomically find a document and remove it. + * + * @param filter the query filter to find the document with + * @param options the options to apply to the operation + * @return a publisher with a single element the document that was removed. If no documents matched the query filter, then null will be + * returned + */ + Publisher findOneAndDelete(Bson filter, FindOneAndDeleteOptions options); + + /** + * Atomically find a document and remove it. + * + * @param clientSession the client session with which to associate this operation + * @param filter the query filter to find the document with + * @return a publisher with a single element the document that was removed. If no documents matched the query filter, then null will be + * returned + * @mongodb.server.release 3.6 + * @since 1.7 + */ + Publisher findOneAndDelete(ClientSession clientSession, Bson filter); + + /** + * Atomically find a document and remove it. + * + * @param clientSession the client session with which to associate this operation + * @param filter the query filter to find the document with + * @param options the options to apply to the operation + * @return a publisher with a single element the document that was removed. If no documents matched the query filter, then null will be + * returned + * @mongodb.server.release 3.6 + * @since 1.7 + */ + Publisher findOneAndDelete(ClientSession clientSession, Bson filter, FindOneAndDeleteOptions options); + + /** + * Atomically find a document and replace it. + * + * @param filter the query filter to apply the replace operation + * @param replacement the replacement document + * @return a publisher with a single element the document that was replaced. Depending on the value of the {@code returnOriginal} + * property, this will either be the document as it was before the update or as it is after the update. If no documents matched the + * query filter, then null will be returned + */ + Publisher findOneAndReplace(Bson filter, TDocument replacement); + + /** + * Atomically find a document and replace it. + * + * @param filter the query filter to apply the replace operation + * @param replacement the replacement document + * @param options the options to apply to the operation + * @return a publisher with a single element the document that was replaced. Depending on the value of the {@code returnOriginal} + * property, this will either be the document as it was before the update or as it is after the update. If no documents matched the + * query filter, then null will be returned + */ + Publisher findOneAndReplace(Bson filter, TDocument replacement, FindOneAndReplaceOptions options); + + /** + * Atomically find a document and replace it. + * + * @param clientSession the client session with which to associate this operation + * @param filter the query filter to apply the replace operation + * @param replacement the replacement document + * @return a publisher with a single element the document that was replaced. Depending on the value of the {@code returnOriginal} + * property, this will either be the document as it was before the update or as it is after the update. If no documents matched the + * query filter, then null will be returned + * @mongodb.server.release 3.6 + * @since 1.7 + */ + Publisher findOneAndReplace(ClientSession clientSession, Bson filter, TDocument replacement); + + /** + * Atomically find a document and replace it. + * + * @param clientSession the client session with which to associate this operation + * @param filter the query filter to apply the replace operation + * @param replacement the replacement document + * @param options the options to apply to the operation + * @return a publisher with a single element the document that was replaced. Depending on the value of the {@code returnOriginal} + * property, this will either be the document as it was before the update or as it is after the update. If no documents matched the + * query filter, then null will be returned + * @mongodb.server.release 3.6 + * @since 1.7 + */ + Publisher findOneAndReplace(ClientSession clientSession, Bson filter, TDocument replacement, + FindOneAndReplaceOptions options); + + /** + * Atomically find a document and update it. + * + * @param filter a document describing the query filter, which may not be null. + * @param update a document describing the update, which may not be null. The update to apply must include only update operators. + * @return a publisher with a single element the document that was updated before the update was applied. If no documents matched the + * query filter, then null will be returned + */ + Publisher findOneAndUpdate(Bson filter, Bson update); + + /** + * Atomically find a document and update it. + * + * @param filter a document describing the query filter, which may not be null. + * @param update a document describing the update, which may not be null. The update to apply must include only update operators. + * @param options the options to apply to the operation + * @return a publisher with a single element the document that was updated. Depending on the value of the {@code returnOriginal} + * property, this will either be the document as it was before the update or as it is after the update. If no documents matched the + * query filter, then null will be returned + */ + Publisher findOneAndUpdate(Bson filter, Bson update, FindOneAndUpdateOptions options); + + /** + * Atomically find a document and update it. + * + * @param clientSession the client session with which to associate this operation + * @param filter a document describing the query filter, which may not be null. + * @param update a document describing the update, which may not be null. The update to apply must include only update operators. + * @return a publisher with a single element the document that was updated before the update was applied. If no documents matched the + * query filter, then null will be returned + * @mongodb.server.release 3.6 + * @since 1.7 + */ + Publisher findOneAndUpdate(ClientSession clientSession, Bson filter, Bson update); + + /** + * Atomically find a document and update it. + * + * @param clientSession the client session with which to associate this operation + * @param filter a document describing the query filter, which may not be null. + * @param update a document describing the update, which may not be null. The update to apply must include only update operators. + * @param options the options to apply to the operation + * @return a publisher with a single element the document that was updated. Depending on the value of the {@code returnOriginal} + * property, this will either be the document as it was before the update or as it is after the update. If no documents matched the + * query filter, then null will be returned + * @mongodb.server.release 3.6 + * @since 1.7 + */ + Publisher findOneAndUpdate(ClientSession clientSession, Bson filter, Bson update, FindOneAndUpdateOptions options); + + /** + * Atomically find a document and update it. + * + *

Note: Supports retryable writes on MongoDB server versions 3.6 or higher when the retryWrites setting is enabled.

+ * @param filter a document describing the query filter, which may not be null. + * @param update a pipeline describing the update, which may not be null. + * @return a publisher with a single element the document that was updated. Depending on the value of the {@code returnOriginal} + * property, this will either be the document as it was before the update or as it is after the update. If no documents matched the + * query filter, then null will be returned + * @since 1.12 + * @mongodb.server.release 4.2 + */ + Publisher findOneAndUpdate(Bson filter, List update); + + /** + * Atomically find a document and update it. + * + *

Note: Supports retryable writes on MongoDB server versions 3.6 or higher when the retryWrites setting is enabled.

+ * @param filter a document describing the query filter, which may not be null. + * @param update a pipeline describing the update, which may not be null. + * @param options the options to apply to the operation + * @return a publisher with a single element the document that was updated. Depending on the value of the {@code returnOriginal} + * property, this will either be the document as it was before the update or as it is after the update. If no documents matched the + * query filter, then null will be returned + * @since 1.12 + * @mongodb.server.release 4.2 + */ + Publisher findOneAndUpdate(Bson filter, List update, FindOneAndUpdateOptions options); + + /** + * Atomically find a document and update it. + * + *

Note: Supports retryable writes on MongoDB server versions 3.6 or higher when the retryWrites setting is enabled.

+ * @param clientSession the client session with which to associate this operation + * @param filter a document describing the query filter, which may not be null. + * @param update a pipeline describing the update, which may not be null. + * @return a publisher with a single element the document that was updated. Depending on the value of the {@code returnOriginal} + * property, this will either be the document as it was before the update or as it is after the update. If no documents matched the + * query filter, then null will be returned + * @since 1.12 + * @mongodb.server.release 4.2 + */ + Publisher findOneAndUpdate(ClientSession clientSession, Bson filter, List update); + + /** + * Atomically find a document and update it. + * + *

Note: Supports retryable writes on MongoDB server versions 3.6 or higher when the retryWrites setting is enabled.

+ * @param clientSession the client session with which to associate this operation + * @param filter a document describing the query filter, which may not be null. + * @param update a pipeline describing the update, which may not be null. + * @param options the options to apply to the operation + * @return a publisher with a single element the document that was updated. Depending on the value of the {@code returnOriginal} + * property, this will either be the document as it was before the update or as it is after the update. If no documents matched the + * query filter, then null will be returned + * @since 1.12 + * @mongodb.server.release 4.2 + */ + Publisher findOneAndUpdate(ClientSession clientSession, Bson filter, List update, + FindOneAndUpdateOptions options); + + /** + * Drops this collection from the Database. + * + * @return an empty publisher that indicates when the operation has completed + * @mongodb.driver.manual reference/command/drop/ Drop Collection + */ + Publisher drop(); + + /** + * Drops this collection from the Database. + * + * @param clientSession the client session with which to associate this operation + * @return an empty publisher that indicates when the operation has completed + * @mongodb.driver.manual reference/command/drop/ Drop Collection + * @mongodb.server.release 3.6 + * @since 1.7 + */ + Publisher drop(ClientSession clientSession); + + /** + * Drops this collection from the Database. + * + * @param dropCollectionOptions various options for dropping the collection + * @return an empty publisher that indicates when the operation has completed + * @mongodb.driver.manual reference/command/drop/ Drop Collection + * @since 4.7 + * @mongodb.server.release 6.0 + */ + Publisher drop(DropCollectionOptions dropCollectionOptions); + + /** + * Drops this collection from the Database. + * + * @param clientSession the client session with which to associate this operation + * @param dropCollectionOptions various options for dropping the collection + * @return an empty publisher that indicates when the operation has completed + * @mongodb.driver.manual reference/command/drop/ Drop Collection + * @since 4.7 + * @mongodb.server.release 6.0 + */ + Publisher drop(ClientSession clientSession, DropCollectionOptions dropCollectionOptions); + + + /** + * Create an Atlas Search index for the collection. + * + * @param indexName the name of the search index to create. + * @param definition Atlas Search index mapping definition. + * @return a {@link Publisher} with search index name. + * @mongodb.server.release 6.0 + * @mongodb.driver.manual reference/command/createSearchIndexes/ Create Search indexes + * @since 4.11 + */ + Publisher createSearchIndex(String indexName, Bson definition); + + /** + * Create an Atlas Search index with {@code "default"} name for the collection. + * + * @param definition Atlas Search index mapping definition. + * @return a {@link Publisher} with search index name. + * @mongodb.server.release 6.0 + * @mongodb.driver.manual reference/command/createSearchIndexes/ Create Search indexes + * @since 4.11 + */ + Publisher createSearchIndex(Bson definition); + + /** + * Create one or more Atlas Search indexes for the collection. + *

+ * The name can be omitted for a single index, in which case a name will be {@code "default"}. + *

+ * + * @param searchIndexModels the search index models. + * @return a {@link Publisher} with the search index names in the order specified by the given list {@link SearchIndexModel}s. + * @mongodb.server.release 6.0 + * @mongodb.driver.manual reference/command/createSearchIndexes/ Create Search indexes + * @since 4.11 + */ + Publisher createSearchIndexes(List searchIndexModels); + /** + * Update an Atlas Search index in the collection. + * + * @param indexName the name of the search index to update. + * @param definition Atlas Search index mapping definition. + * @return an empty publisher that indicates when the operation has completed. + * @mongodb.server.release 6.0 + * @mongodb.driver.manual reference/command/updateSearchIndex/ Update Search index + * @since 4.11 + */ + Publisher updateSearchIndex(String indexName, Bson definition); + /** + * Drop an Atlas Search index given its name. + * + * @param indexName the name of the search index to drop. + * @return an empty publisher that indicates when the operation has completed. + * @mongodb.server.release 6.0 + * @mongodb.driver.manual reference/command/dropSearchIndex/ Drop Search index + * @since 4.11 + */ + Publisher dropSearchIndex(String indexName); + + /** + * Get all Atlas Search indexes in this collection. + * + * @return the fluent list search indexes interface. + * @since 4.11 + * @mongodb.server.release 6.0 + */ + ListSearchIndexesPublisher listSearchIndexes(); + + /** + * Get all Atlas Search indexes in this collection. + * + * @param resultClass the class to decode each document into. + * @param the target document type of the iterable. + * @return the fluent list search indexes interface. + * @since 4.11 + * @mongodb.server.release 6.0 + */ + ListSearchIndexesPublisher listSearchIndexes(Class resultClass); + + /** + * Creates an index. + * + * @param key an object describing the index key(s), which may not be null. + * @return an empty publisher that indicates when the operation has completed + * @mongodb.driver.manual reference/method/db.collection.ensureIndex Ensure Index + */ + Publisher createIndex(Bson key); + + /** + * Creates an index. + * + * @param key an object describing the index key(s), which may not be null. + * @param options the options for the index + * @return an empty publisher that indicates when the operation has completed + * @mongodb.driver.manual reference/method/db.collection.ensureIndex Ensure Index + */ + Publisher createIndex(Bson key, IndexOptions options); + + /** + * Creates an index. + * + * @param clientSession the client session with which to associate this operation + * @param key an object describing the index key(s), which may not be null. + * @return an empty publisher that indicates when the operation has completed + * @mongodb.driver.manual reference/method/db.collection.ensureIndex Ensure Index + * @mongodb.server.release 3.6 + * @since 1.7 + */ + Publisher createIndex(ClientSession clientSession, Bson key); + + /** + * Creates an index. + * + * @param clientSession the client session with which to associate this operation + * @param key an object describing the index key(s), which may not be null. + * @param options the options for the index + * @return an empty publisher that indicates when the operation has completed + * @mongodb.driver.manual reference/method/db.collection.ensureIndex Ensure Index + * @mongodb.server.release 3.6 + * @since 1.7 + */ + Publisher createIndex(ClientSession clientSession, Bson key, IndexOptions options); + + /** + * Create multiple indexes. + * + * @param indexes the list of indexes + * @return an empty publisher that indicates when the operation has completed + * @mongodb.driver.manual reference/command/createIndexes Create indexes + */ + Publisher createIndexes(List indexes); + + /** + * Create multiple indexes. + * + * @param indexes the list of indexes + * @param createIndexOptions options to use when creating indexes + * @return an empty publisher that indicates when the operation has completed + * @mongodb.driver.manual reference/command/createIndexes Create indexes + * @since 1.7 + */ + Publisher createIndexes(List indexes, CreateIndexOptions createIndexOptions); + + /** + * Create multiple indexes. + * + * @param clientSession the client session with which to associate this operation + * @param indexes the list of indexes + * @return an empty publisher that indicates when the operation has completed + * @mongodb.driver.manual reference/command/createIndexes Create indexes + * @mongodb.server.release 3.6 + * @since 1.7 + */ + Publisher createIndexes(ClientSession clientSession, List indexes); + + /** + * Create multiple indexes. + * + * @param clientSession the client session with which to associate this operation + * @param indexes the list of indexes + * @param createIndexOptions options to use when creating indexes + * @return an empty publisher that indicates when the operation has completed + * @mongodb.driver.manual reference/command/createIndexes Create indexes + * @mongodb.server.release 3.6 + * @since 1.7 + */ + Publisher createIndexes(ClientSession clientSession, List indexes, CreateIndexOptions createIndexOptions); + + /** + * Get all the indexes in this collection. + * + * @return the fluent list indexes interface + * @mongodb.driver.manual reference/command/listIndexes/ listIndexes + */ + ListIndexesPublisher listIndexes(); + + /** + * Get all the indexes in this collection. + * + * @param clazz the class to decode each document into + * @param the target document type of the iterable. + * @return the fluent list indexes interface + * @mongodb.driver.manual reference/command/listIndexes/ listIndexes + */ + ListIndexesPublisher listIndexes(Class clazz); + + /** + * Get all the indexes in this collection. + * + * @param clientSession the client session with which to associate this operation + * @return the fluent list indexes interface + * @mongodb.driver.manual reference/command/listIndexes/ listIndexes + * @mongodb.server.release 3.6 + * @since 1.7 + */ + ListIndexesPublisher listIndexes(ClientSession clientSession); + + /** + * Get all the indexes in this collection. + * + * @param clientSession the client session with which to associate this operation + * @param clazz the class to decode each document into + * @param the target document type of the iterable. + * @return the fluent list indexes interface + * @mongodb.driver.manual reference/command/listIndexes/ listIndexes + * @mongodb.server.release 3.6 + * @since 1.7 + */ + ListIndexesPublisher listIndexes(ClientSession clientSession, Class clazz); + + /** + * Drops the given index. + * + * @param indexName the name of the index to remove + * @return an empty publisher that indicates when the operation has completed + * @mongodb.driver.manual reference/command/dropIndexes/ Drop Indexes + */ + Publisher dropIndex(String indexName); + + /** + * Drops the index given the keys used to create it. + * + * @param keys the keys of the index to remove + * @return an empty publisher that indicates when the operation has completed + * @mongodb.driver.manual reference/command/dropIndexes/ Drop indexes + */ + Publisher dropIndex(Bson keys); + + /** + * Drops the given index. + * + * @param indexName the name of the index to remove + * @param dropIndexOptions options to use when dropping indexes + * @return an empty publisher that indicates when the operation has completed + * @mongodb.driver.manual reference/command/dropIndexes/ Drop Indexes + * @since 1.7 + */ + Publisher dropIndex(String indexName, DropIndexOptions dropIndexOptions); + + /** + * Drops the index given the keys used to create it. + * + * @param keys the keys of the index to remove + * @param dropIndexOptions options to use when dropping indexes + * @return an empty publisher that indicates when the operation has completed + * @mongodb.driver.manual reference/command/dropIndexes/ Drop indexes + * @since 1.7 + */ + Publisher dropIndex(Bson keys, DropIndexOptions dropIndexOptions); + + /** + * Drops the given index. + * + * @param clientSession the client session with which to associate this operation + * @param indexName the name of the index to remove + * @return an empty publisher that indicates when the operation has completed + * @mongodb.driver.manual reference/command/dropIndexes/ Drop Indexes + * @mongodb.server.release 3.6 + * @since 1.7 + */ + Publisher dropIndex(ClientSession clientSession, String indexName); + + /** + * Drops the index given the keys used to create it. + * + * @param clientSession the client session with which to associate this operation + * @param keys the keys of the index to remove + * @return an empty publisher that indicates when the operation has completed + * @mongodb.driver.manual reference/command/dropIndexes/ Drop indexes + * @mongodb.server.release 3.6 + * @since 1.7 + */ + Publisher dropIndex(ClientSession clientSession, Bson keys); + + /** + * Drops the given index. + * + * @param clientSession the client session with which to associate this operation + * @param indexName the name of the index to remove + * @param dropIndexOptions options to use when dropping indexes + * @return an empty publisher that indicates when the operation has completed + * @mongodb.driver.manual reference/command/dropIndexes/ Drop Indexes + * @mongodb.server.release 3.6 + * @since 1.7 + */ + Publisher dropIndex(ClientSession clientSession, String indexName, DropIndexOptions dropIndexOptions); + + /** + * Drops the index given the keys used to create it. + * + * @param clientSession the client session with which to associate this operation + * @param keys the keys of the index to remove + * @param dropIndexOptions options to use when dropping indexes + * @return an empty publisher that indicates when the operation has completed + * @mongodb.driver.manual reference/command/dropIndexes/ Drop indexes + * @mongodb.server.release 3.6 + * @since 1.7 + */ + Publisher dropIndex(ClientSession clientSession, Bson keys, DropIndexOptions dropIndexOptions); + + /** + * Drop all the indexes on this collection, except for the default on _id. + * + * @return an empty publisher that indicates when the operation has completed + * @mongodb.driver.manual reference/command/dropIndexes/ Drop Indexes + */ + Publisher dropIndexes(); + + /** + * Drop all the indexes on this collection, except for the default on _id. + * + * @param dropIndexOptions options to use when dropping indexes + * @return an empty publisher that indicates when the operation has completed + * @mongodb.driver.manual reference/command/dropIndexes/ Drop Indexes + * @since 1.7 + */ + Publisher dropIndexes(DropIndexOptions dropIndexOptions); + + /** + * Drop all the indexes on this collection, except for the default on _id. + * + * @param clientSession the client session with which to associate this operation + * @return an empty publisher that indicates when the operation has completed + * @mongodb.driver.manual reference/command/dropIndexes/ Drop Indexes + * @mongodb.server.release 3.6 + * @since 1.7 + */ + Publisher dropIndexes(ClientSession clientSession); + + /** + * Drop all the indexes on this collection, except for the default on _id. + * + * @param clientSession the client session with which to associate this operation + * @param dropIndexOptions options to use when dropping indexes + * @return an empty publisher that indicates when the operation has completed + * @mongodb.driver.manual reference/command/dropIndexes/ Drop Indexes + * @mongodb.server.release 3.6 + * @since 1.7 + */ + Publisher dropIndexes(ClientSession clientSession, DropIndexOptions dropIndexOptions); + + /** + * Rename the collection with oldCollectionName to the newCollectionName. + * + * @param newCollectionNamespace the namespace the collection will be renamed to + * @return an empty publisher that indicates when the operation has completed + * @mongodb.driver.manual reference/commands/renameCollection Rename collection + */ + Publisher renameCollection(MongoNamespace newCollectionNamespace); + + /** + * Rename the collection with oldCollectionName to the newCollectionName. + * + * @param newCollectionNamespace the name the collection will be renamed to + * @param options the options for renaming a collection + * @return an empty publisher that indicates when the operation has completed + * @mongodb.driver.manual reference/commands/renameCollection Rename collection + */ + Publisher renameCollection(MongoNamespace newCollectionNamespace, RenameCollectionOptions options); + + + /** + * Rename the collection with oldCollectionName to the newCollectionName. + * + * @param clientSession the client session with which to associate this operation + * @param newCollectionNamespace the namespace the collection will be renamed to + * @return an empty publisher that indicates when the operation has completed + * @mongodb.driver.manual reference/commands/renameCollection Rename collection + * @mongodb.server.release 3.6 + * @since 1.7 + */ + Publisher renameCollection(ClientSession clientSession, MongoNamespace newCollectionNamespace); + + /** + * Rename the collection with oldCollectionName to the newCollectionName. + * + * @param clientSession the client session with which to associate this operation + * @param newCollectionNamespace the name the collection will be renamed to + * @param options the options for renaming a collection + * @return an empty publisher that indicates when the operation has completed + * @mongodb.driver.manual reference/commands/renameCollection Rename collection + * @mongodb.server.release 3.6 + * @since 1.7 + */ + Publisher renameCollection(ClientSession clientSession, MongoNamespace newCollectionNamespace, + RenameCollectionOptions options); +} diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/MongoDatabase.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/MongoDatabase.java new file mode 100644 index 00000000000..b479ece08c5 --- /dev/null +++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/MongoDatabase.java @@ -0,0 +1,630 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.reactivestreams.client; + +import com.mongodb.ReadConcern; +import com.mongodb.ReadPreference; +import com.mongodb.WriteConcern; +import com.mongodb.annotations.Alpha; +import com.mongodb.annotations.Reason; +import com.mongodb.annotations.ThreadSafe; +import com.mongodb.client.model.CreateCollectionOptions; +import com.mongodb.client.model.CreateViewOptions; +import com.mongodb.lang.Nullable; +import org.bson.Document; +import org.bson.codecs.configuration.CodecRegistry; +import org.bson.conversions.Bson; +import org.reactivestreams.Publisher; + +import java.util.List; +import java.util.concurrent.TimeUnit; + +/** + * The MongoDatabase interface. + * + *

Note: Additions to this interface will not be considered to break binary compatibility.

+ * @since 1.0 + */ +@ThreadSafe +public interface MongoDatabase { + /** + * Gets the name of the database. + * + * @return the database name + */ + String getName(); + + /** + * Get the codec registry for the MongoDatabase. + * + * @return the {@link org.bson.codecs.configuration.CodecRegistry} + */ + CodecRegistry getCodecRegistry(); + + /** + * Get the read preference for the MongoDatabase. + * + * @return the {@link com.mongodb.ReadPreference} + */ + ReadPreference getReadPreference(); + + /** + * Get the write concern for the MongoDatabase. + * + * @return the {@link com.mongodb.WriteConcern} + */ + WriteConcern getWriteConcern(); + + /** + * Get the read concern for the MongoCollection. + * + * @return the {@link com.mongodb.ReadConcern} + * @since 1.2 + * @mongodb.server.release 3.2 + */ + ReadConcern getReadConcern(); + + /** + * The time limit for the full execution of an operation. + * + *

If not null the following deprecated options will be ignored: + * {@code waitQueueTimeoutMS}, {@code socketTimeoutMS}, {@code wTimeoutMS}, {@code maxTimeMS} and {@code maxCommitTimeMS}

+ * + *
    + *
  • {@code null} means that the timeout mechanism for operations will defer to using: + *
      + *
    • {@code waitQueueTimeoutMS}: The maximum wait time in milliseconds that a thread may wait for a connection to become + * available
    • + *
    • {@code socketTimeoutMS}: How long a send or receive on a socket can take before timing out.
    • + *
    • {@code wTimeoutMS}: How long the server will wait for the write concern to be fulfilled before timing out.
    • + *
    • {@code maxTimeMS}: The cumulative time limit for processing operations on a cursor. + * See: cursor.maxTimeMS.
    • + *
    • {@code maxCommitTimeMS}: The maximum amount of time to allow a single {@code commitTransaction} command to execute. + * See: {@link com.mongodb.TransactionOptions#getMaxCommitTime}.
    • + *
    + *
  • + *
  • {@code 0} means infinite timeout.
  • + *
  • {@code > 0} The time limit to use for the full execution of an operation.
  • + *
+ * + * @param timeUnit the time unit + * @return the timeout in the given time unit + * @since 5.2 + */ + @Alpha(Reason.CLIENT) + @Nullable + Long getTimeout(TimeUnit timeUnit); + + /** + * Create a new MongoDatabase instance with a different codec registry. + * + *

The {@link CodecRegistry} configured by this method is effectively treated by the driver as an instance of + * {@link org.bson.codecs.configuration.CodecProvider}, which {@link CodecRegistry} extends. So there is no benefit to defining + * a class that implements {@link CodecRegistry}. Rather, an application should always create {@link CodecRegistry} instances + * using the factory methods in {@link org.bson.codecs.configuration.CodecRegistries}.

+ * + * @param codecRegistry the new {@link org.bson.codecs.configuration.CodecRegistry} for the collection + * @return a new MongoDatabase instance with the different codec registry + * @see org.bson.codecs.configuration.CodecRegistries + */ + MongoDatabase withCodecRegistry(CodecRegistry codecRegistry); + + /** + * Create a new MongoDatabase instance with a different read preference. + * + * @param readPreference the new {@link com.mongodb.ReadPreference} for the collection + * @return a new MongoDatabase instance with the different readPreference + */ + MongoDatabase withReadPreference(ReadPreference readPreference); + + /** + * Create a new MongoDatabase instance with a different write concern. + * + * @param writeConcern the new {@link com.mongodb.WriteConcern} for the collection + * @return a new MongoDatabase instance with the different writeConcern + */ + MongoDatabase withWriteConcern(WriteConcern writeConcern); + + /** + * Create a new MongoDatabase instance with a different read concern. + * + * @param readConcern the new {@link ReadConcern} for the collection + * @return a new MongoDatabase instance with the different ReadConcern + * @since 1.2 + * @mongodb.server.release 3.2 + */ + MongoDatabase withReadConcern(ReadConcern readConcern); + + /** + * Create a new MongoDatabase instance with the set time limit for the full execution of an operation. + * + *
    + *
  • {@code 0} means infinite timeout.
  • + *
  • {@code > 0} The time limit to use for the full execution of an operation.
  • + *
+ * + * @param timeout the timeout, which must be greater than or equal to 0 + * @param timeUnit the time unit + * @return a new MongoDatabase instance with the set time limit for the full execution of an operation. + * @since 5.2 + * @see #getTimeout + */ + @Alpha(Reason.CLIENT) + MongoDatabase withTimeout(long timeout, TimeUnit timeUnit); + + /** + * Gets a collection. + * + * @param collectionName the name of the collection to return + * @return the collection + */ + MongoCollection getCollection(String collectionName); + + /** + * Gets a collection, with a specific default document class. + * + * @param collectionName the name of the collection to return + * @param clazz the default class to cast any documents returned from the database into. + * @param the type of the class to use instead of {@code Document}. + * @return the collection + */ + MongoCollection getCollection(String collectionName, Class clazz); + + /** + * Executes command in the context of the current database. + * + *

Note: The behavior of {@code runCommand} is undefined if the provided command document includes a {@code maxTimeMS} field and the + * {@code timeoutMS} setting has been set.

+ * + * @param command the command to be run + * @return a publisher containing the command result + */ + Publisher runCommand(Bson command); + + /** + * Executes command in the context of the current database. + * + *

Note: The behavior of {@code runCommand} is undefined if the provided command document includes a {@code maxTimeMS} field and the + * {@code timeoutMS} setting has been set.

+ * + * @param command the command to be run + * @param readPreference the {@link com.mongodb.ReadPreference} to be used when executing the command + * @return a publisher containing the command result + */ + Publisher runCommand(Bson command, ReadPreference readPreference); + + /** + * Executes command in the context of the current database. + * + *

Note: The behavior of {@code runCommand} is undefined if the provided command document includes a {@code maxTimeMS} field and the + * {@code timeoutMS} setting has been set.

+ * + * @param command the command to be run + * @param clazz the default class to cast any documents returned from the database into. + * @param the type of the class to use instead of {@code Document}. + * @return a publisher containing the command result + */ + Publisher runCommand(Bson command, Class clazz); + + /** + * Executes command in the context of the current database. + * + *

Note: The behavior of {@code runCommand} is undefined if the provided command document includes a {@code maxTimeMS} field and the + * {@code timeoutMS} setting has been set.

+ * + * @param command the command to be run + * @param readPreference the {@link com.mongodb.ReadPreference} to be used when executing the command + * @param clazz the default class to cast any documents returned from the database into. + * @param the type of the class to use instead of {@code Document}. + * @return a publisher containing the command result + */ + Publisher runCommand(Bson command, ReadPreference readPreference, Class clazz); + + /** + * Executes command in the context of the current database. + * + *

Note: The behavior of {@code runCommand} is undefined if the provided command document includes a {@code maxTimeMS} field and the + * {@code timeoutMS} setting has been set.

+ * + * @param clientSession the client session with which to associate this operation + * @param command the command to be run + * @return a publisher containing the command result + * @mongodb.server.release 3.6 + * @since 1.7 + */ + Publisher runCommand(ClientSession clientSession, Bson command); + + /** + * Executes command in the context of the current database. + * + *

Note: The behavior of {@code runCommand} is undefined if the provided command document includes a {@code maxTimeMS} field and the + * {@code timeoutMS} setting has been set.

+ * + * @param clientSession the client session with which to associate this operation + * @param command the command to be run + * @param readPreference the {@link com.mongodb.ReadPreference} to be used when executing the command + * @return a publisher containing the command result + * @mongodb.server.release 3.6 + * @since 1.7 + */ + Publisher runCommand(ClientSession clientSession, Bson command, ReadPreference readPreference); + + /** + * Executes command in the context of the current database. + * + *

Note: The behavior of {@code runCommand} is undefined if the provided command document includes a {@code maxTimeMS} field and the + * {@code timeoutMS} setting has been set.

+ * + * @param clientSession the client session with which to associate this operation + * @param command the command to be run + * @param clazz the default class to cast any documents returned from the database into. + * @param the type of the class to use instead of {@code Document}. + * @return a publisher containing the command result + * @mongodb.server.release 3.6 + * @since 1.7 + */ + Publisher runCommand(ClientSession clientSession, Bson command, Class clazz); + + /** + * Executes command in the context of the current database. + * + *

Note: The behavior of {@code runCommand} is undefined if the provided command document includes a {@code maxTimeMS} field and the + * {@code timeoutMS} setting has been set.

+ * + * @param clientSession the client session with which to associate this operation + * @param command the command to be run + * @param readPreference the {@link com.mongodb.ReadPreference} to be used when executing the command + * @param clazz the default class to cast any documents returned from the database into. + * @param the type of the class to use instead of {@code Document}. + * @return a publisher containing the command result + * @mongodb.server.release 3.6 + * @since 1.7 + */ + Publisher runCommand(ClientSession clientSession, Bson command, ReadPreference readPreference, Class clazz); + + /** + * Drops this database. + * + * @return a publisher identifying when the database has been dropped + * @mongodb.driver.manual reference/commands/dropDatabase/#dbcmd.dropDatabase Drop database + */ + Publisher drop(); + + /** + * Drops this database. + * + * @param clientSession the client session with which to associate this operation + * @return a publisher identifying when the database has been dropped + * @mongodb.driver.manual reference/commands/dropDatabase/#dbcmd.dropDatabase Drop database + * @mongodb.server.release 3.6 + * @since 1.7 + */ + Publisher drop(ClientSession clientSession); + + /** + * Gets the names of all the collections in this database. + * + * @return a publisher with all the names of all the collections in this database + * @mongodb.driver.manual reference/command/listCollections listCollections + */ + ListCollectionNamesPublisher listCollectionNames(); + + /** + * Gets the names of all the collections in this database. + * + * @param clientSession the client session with which to associate this operation + * @return a publisher with all the names of all the collections in this database + * @mongodb.driver.manual reference/command/listCollections listCollections + * @mongodb.server.release 3.6 + * @since 1.7 + */ + ListCollectionNamesPublisher listCollectionNames(ClientSession clientSession); + + /** + * Finds all the collections in this database. + * + * @return the fluent list collections interface + * @mongodb.driver.manual reference/command/listCollections listCollections + */ + ListCollectionsPublisher listCollections(); + + /** + * Finds all the collections in this database. + * + * @param clazz the class to decode each document into + * @param the target document type of the iterable. + * @return the fluent list collections interface + * @mongodb.driver.manual reference/command/listCollections listCollections + */ + ListCollectionsPublisher listCollections(Class clazz); + + /** + * Finds all the collections in this database. + * + * @param clientSession the client session with which to associate this operation + * @return the fluent list collections interface + * @mongodb.driver.manual reference/command/listCollections listCollections + * @mongodb.server.release 3.6 + * @since 1.7 + */ + ListCollectionsPublisher listCollections(ClientSession clientSession); + + /** + * Finds all the collections in this database. + * + * @param clientSession the client session with which to associate this operation + * @param clazz the class to decode each document into + * @param the target document type of the iterable. + * @return the fluent list collections interface + * @mongodb.driver.manual reference/command/listCollections listCollections + * @mongodb.server.release 3.6 + * @since 1.7 + */ + ListCollectionsPublisher listCollections(ClientSession clientSession, Class clazz); + + /** + * Create a new collection with the given name. + * + * @param collectionName the name for the new collection to create + * @return a publisher identifying when the collection has been created + * @mongodb.driver.manual reference/commands/create Create Command + */ + Publisher createCollection(String collectionName); + + /** + * Create a new collection with the selected options + * + * @param collectionName the name for the new collection to create + * @param options various options for creating the collection + * @return a publisher identifying when the collection has been created + * @mongodb.driver.manual reference/commands/create Create Command + */ + Publisher createCollection(String collectionName, CreateCollectionOptions options); + + /** + * Create a new collection with the given name. + * + * @param clientSession the client session with which to associate this operation + * @param collectionName the name for the new collection to create + * @return a publisher identifying when the collection has been created + * @mongodb.driver.manual reference/commands/create Create Command + * @mongodb.server.release 3.6 + * @since 1.7 + */ + Publisher createCollection(ClientSession clientSession, String collectionName); + + /** + * Create a new collection with the selected options + * + * @param clientSession the client session with which to associate this operation + * @param collectionName the name for the new collection to create + * @param options various options for creating the collection + * @return a publisher identifying when the collection has been created + * @mongodb.driver.manual reference/commands/create Create Command + * @mongodb.server.release 3.6 + * @since 1.7 + */ + Publisher createCollection(ClientSession clientSession, String collectionName, CreateCollectionOptions options); + + /** + * Creates a view with the given name, backing collection/view name, and aggregation pipeline that defines the view. + * + * @param viewName the name of the view to create + * @param viewOn the backing collection/view for the view + * @param pipeline the pipeline that defines the view + * @return an observable identifying when the collection view has been created + * @since 1.3 + * @mongodb.server.release 3.4 + * @mongodb.driver.manual reference/command/create Create Command + */ + Publisher createView(String viewName, String viewOn, List pipeline); + + /** + * Creates a view with the given name, backing collection/view name, aggregation pipeline, and options that defines the view. + * + * @param viewName the name of the view to create + * @param viewOn the backing collection/view for the view + * @param pipeline the pipeline that defines the view + * @param createViewOptions various options for creating the view + * @return an observable identifying when the collection view has been created + * @since 1.3 + * @mongodb.server.release 3.4 + * @mongodb.driver.manual reference/command/create Create Command + */ + Publisher createView(String viewName, String viewOn, List pipeline, CreateViewOptions createViewOptions); + + /** + * Creates a view with the given name, backing collection/view name, and aggregation pipeline that defines the view. + * + * @param clientSession the client session with which to associate this operation + * @param viewName the name of the view to create + * @param viewOn the backing collection/view for the view + * @param pipeline the pipeline that defines the view + * @return an observable identifying when the collection view has been created + * @mongodb.driver.manual reference/command/create Create Command + * @mongodb.server.release 3.6 + * @since 1.7 + */ + Publisher createView(ClientSession clientSession, String viewName, String viewOn, List pipeline); + + /** + * Creates a view with the given name, backing collection/view name, aggregation pipeline, and options that defines the view. + * + * @param clientSession the client session with which to associate this operation + * @param viewName the name of the view to create + * @param viewOn the backing collection/view for the view + * @param pipeline the pipeline that defines the view + * @param createViewOptions various options for creating the view + * @return an observable identifying when the collection view has been created + * @mongodb.driver.manual reference/command/create Create Command + * @mongodb.server.release 3.6 + * @since 1.7 + */ + Publisher createView(ClientSession clientSession, String viewName, String viewOn, List pipeline, + CreateViewOptions createViewOptions); + + /** + * Creates a change stream for this database. + * + * @return the change stream iterable + * @mongodb.driver.dochub core/changestreams Change Streams + * @since 1.9 + * @mongodb.server.release 4.0 + */ + ChangeStreamPublisher watch(); + + /** + * Creates a change stream for this database. + * + * @param resultClass the class to decode each document into + * @param the target document type of the iterable. + * @return the change stream iterable + * @mongodb.driver.dochub core/changestreams Change Streams + * @since 1.9 + * @mongodb.server.release 4.0 + */ + ChangeStreamPublisher watch(Class resultClass); + + /** + * Creates a change stream for this database. + * + * @param pipeline the aggregation pipeline to apply to the change stream. + * @return the change stream iterable + * @mongodb.driver.dochub core/changestreams Change Streams + * @since 1.9 + * @mongodb.server.release 4.0 + */ + ChangeStreamPublisher watch(List pipeline); + + /** + * Creates a change stream for this database. + * + * @param pipeline the aggregation pipeline to apply to the change stream + * @param resultClass the class to decode each document into + * @param the target document type of the iterable. + * @return the change stream iterable + * @mongodb.driver.dochub core/changestreams Change Streams + * @since 1.9 + * @mongodb.server.release 4.0 + */ + ChangeStreamPublisher watch(List pipeline, Class resultClass); + + /** + * Creates a change stream for this database. + * + * @param clientSession the client session with which to associate this operation + * @return the change stream iterable + * @since 1.9 + * @mongodb.server.release 4.0 + * @mongodb.driver.dochub core/changestreams Change Streams + */ + ChangeStreamPublisher watch(ClientSession clientSession); + + /** + * Creates a change stream for this database. + * + * @param clientSession the client session with which to associate this operation + * @param resultClass the class to decode each document into + * @param the target document type of the iterable. + * @return the change stream iterable + * @since 1.9 + * @mongodb.server.release 4.0 + * @mongodb.driver.dochub core/changestreams Change Streams + */ + ChangeStreamPublisher watch(ClientSession clientSession, Class resultClass); + + /** + * Creates a change stream for this database. + * + * @param clientSession the client session with which to associate this operation + * @param pipeline the aggregation pipeline to apply to the change stream. + * @return the change stream iterable + * @since 1.9 + * @mongodb.server.release 4.0 + * @mongodb.driver.dochub core/changestreams Change Streams + */ + ChangeStreamPublisher watch(ClientSession clientSession, List pipeline); + + /** + * Creates a change stream for this database. + * + * @param clientSession the client session with which to associate this operation + * @param pipeline the aggregation pipeline to apply to the change stream + * @param resultClass the class to decode each document into + * @param the target document type of the iterable. + * @return the change stream iterable + * @since 1.9 + * @mongodb.server.release 4.0 + * @mongodb.driver.dochub core/changestreams Change Streams + */ + ChangeStreamPublisher watch(ClientSession clientSession, List pipeline, Class resultClass); + + /** + * Runs an aggregation framework pipeline on the database for pipeline stages + * that do not require an underlying collection, such as {@code $currentOp} and {@code $listLocalSessions}. + * + * @param pipeline the aggregation pipeline + * @return an iterable containing the result of the aggregation operation + * @since 1.11 + * @mongodb.driver.manual reference/command/aggregate/#dbcmd.aggregate Aggregate Command + * @mongodb.server.release 3.6 + */ + AggregatePublisher aggregate(List pipeline); + + /** + * Runs an aggregation framework pipeline on the database for pipeline stages + * that do not require an underlying collection, such as {@code $currentOp} and {@code $listLocalSessions}. + * + * @param pipeline the aggregation pipeline + * @param resultClass the class to decode each document into + * @param the target document type of the iterable. + * @return an iterable containing the result of the aggregation operation + * @since 1.11 + * @mongodb.driver.manual reference/command/aggregate/#dbcmd.aggregate Aggregate Command + * @mongodb.server.release 3.6 + */ + AggregatePublisher aggregate(List pipeline, Class resultClass); + + /** + * Runs an aggregation framework pipeline on the database for pipeline stages + * that do not require an underlying collection, such as {@code $currentOp} and {@code $listLocalSessions}. + * + * @param clientSession the client session with which to associate this operation + * @param pipeline the aggregation pipeline + * @return an iterable containing the result of the aggregation operation + * @since 1.11 + * @mongodb.driver.manual reference/command/aggregate/#dbcmd.aggregate Aggregate Command + * @mongodb.server.release 3.6 + */ + AggregatePublisher aggregate(ClientSession clientSession, List pipeline); + + /** + * Runs an aggregation framework pipeline on the database for pipeline stages + * that do not require an underlying collection, such as {@code $currentOp} and {@code $listLocalSessions}. + * + * @param clientSession the client session with which to associate this operation + * @param pipeline the aggregation pipeline + * @param resultClass the class to decode each document into + * @param the target document type of the iterable. + * @return an iterable containing the result of the aggregation operation + * @since 1.11 + * @mongodb.driver.manual reference/command/aggregate/#dbcmd.aggregate Aggregate Command + * @mongodb.server.release 3.6 + */ + AggregatePublisher aggregate(ClientSession clientSession, List pipeline, Class resultClass); + +} diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/ReactiveContextProvider.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/ReactiveContextProvider.java new file mode 100644 index 00000000000..6bbb3d37a1a --- /dev/null +++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/ReactiveContextProvider.java @@ -0,0 +1,38 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.reactivestreams.client; + +import com.mongodb.ContextProvider; +import com.mongodb.RequestContext; +import com.mongodb.annotations.ThreadSafe; +import org.reactivestreams.Subscriber; + +/** + * A {@code ContextProvider} for reactive clients. + * + * @since 4.4 + */ +@ThreadSafe +public interface ReactiveContextProvider extends ContextProvider { + /** + * Get the request context from the subscriber. + * + * @param subscriber the subscriber for the operation + * @return the request context + */ + RequestContext getContext(Subscriber subscriber); +} diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/gridfs/GridFSBucket.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/gridfs/GridFSBucket.java new file mode 100644 index 00000000000..78a3f5357fc --- /dev/null +++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/gridfs/GridFSBucket.java @@ -0,0 +1,530 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.reactivestreams.client.gridfs; + +import com.mongodb.ReadConcern; +import com.mongodb.ReadPreference; +import com.mongodb.WriteConcern; +import com.mongodb.annotations.Alpha; +import com.mongodb.annotations.Reason; +import com.mongodb.annotations.ThreadSafe; +import com.mongodb.client.gridfs.model.GridFSDownloadOptions; +import com.mongodb.client.gridfs.model.GridFSUploadOptions; +import com.mongodb.lang.Nullable; +import com.mongodb.reactivestreams.client.ClientSession; +import org.bson.BsonValue; +import org.bson.conversions.Bson; +import org.bson.types.ObjectId; +import org.reactivestreams.Publisher; + +import java.nio.ByteBuffer; +import java.util.concurrent.TimeUnit; + +/** + * Represents a GridFS Bucket + * + * @since 1.3 + */ +@ThreadSafe +public interface GridFSBucket { + + /** + * The bucket name. + * + * @return the bucket name + */ + String getBucketName(); + + /** + * Sets the chunk size in bytes. Defaults to 255. + * + * @return the chunk size in bytes. + */ + int getChunkSizeBytes(); + + /** + * Get the write concern for the GridFSBucket. + * + * @return the {@link com.mongodb.WriteConcern} + */ + WriteConcern getWriteConcern(); + + /** + * Get the read preference for the GridFSBucket. + * + * @return the {@link com.mongodb.ReadPreference} + */ + ReadPreference getReadPreference(); + + /** + * Get the read concern for the GridFSBucket. + * + * @return the {@link com.mongodb.ReadConcern} + * @mongodb.server.release 3.2 + * @mongodb.driver.manual reference/readConcern/ Read Concern + */ + ReadConcern getReadConcern(); + + /** + * The time limit for the full execution of an operation. + * + *

If not null the following deprecated options will be ignored: + * {@code waitQueueTimeoutMS}, {@code socketTimeoutMS}, {@code wTimeoutMS}, {@code maxTimeMS} and {@code maxCommitTimeMS}

+ * + *
    + *
  • {@code null} means that the timeout mechanism for operations will defer to using: + *
      + *
    • {@code waitQueueTimeoutMS}: The maximum wait time in milliseconds that a thread may wait for a connection to become + * available
    • + *
    • {@code socketTimeoutMS}: How long a send or receive on a socket can take before timing out.
    • + *
    • {@code wTimeoutMS}: How long the server will wait for the write concern to be fulfilled before timing out.
    • + *
    • {@code maxTimeMS}: The cumulative time limit for processing operations on a cursor. + * See: cursor.maxTimeMS.
    • + *
    • {@code maxCommitTimeMS}: The maximum amount of time to allow a single {@code commitTransaction} command to execute. + * See: {@link com.mongodb.TransactionOptions#getMaxCommitTime}.
    • + *
    + *
  • + *
  • {@code 0} means infinite timeout.
  • + *
  • {@code > 0} The time limit to use for the full execution of an operation.
  • + *
+ * + * @param timeUnit the time unit + * @return the timeout in the given time unit + * @since 4.x + */ + @Alpha(Reason.CLIENT) + @Nullable + Long getTimeout(TimeUnit timeUnit); + + /** + * Create a new GridFSBucket instance with a new chunk size in bytes. + * + * @param chunkSizeBytes the new chunk size in bytes. + * @return a new GridFSBucket instance with the different chunk size in bytes + */ + GridFSBucket withChunkSizeBytes(int chunkSizeBytes); + + /** + * Create a new GridFSBucket instance with a different read preference. + * + * @param readPreference the new {@link ReadPreference} for the database + * @return a new GridFSBucket instance with the different readPreference + */ + GridFSBucket withReadPreference(ReadPreference readPreference); + + /** + * Create a new GridFSBucket instance with a different write concern. + * + * @param writeConcern the new {@link WriteConcern} for the database + * @return a new GridFSBucket instance with the different writeConcern + */ + GridFSBucket withWriteConcern(WriteConcern writeConcern); + + /** + * Create a new MongoDatabase instance with a different read concern. + * + * @param readConcern the new {@link ReadConcern} for the database + * @return a new GridFSBucket instance with the different ReadConcern + * @mongodb.server.release 3.2 + * @mongodb.driver.manual reference/readConcern/ Read Concern + */ + GridFSBucket withReadConcern(ReadConcern readConcern); + + /** + * Create a new GridFSBucket instance with the set time limit for the full execution of an operation. + * + *
    + *
  • {@code 0} means infinite timeout.
  • + *
  • {@code > 0} The time limit to use for the full execution of an operation.
  • + *
+ * + * @param timeout the timeout, which must be greater than or equal to 0 + * @param timeUnit the time unit + * @return a new GridFSBucket instance with the set time limit for the full execution of an operation + * @since 4.x + * @see #getTimeout + */ + @Alpha(Reason.CLIENT) + GridFSBucket withTimeout(long timeout, TimeUnit timeUnit); + + /** + * Uploads the contents of the given {@code Publisher} to a GridFS bucket. + *

+ * Reads the contents of the user file from the {@code source} and uploads it as chunks in the chunks collection. After all the + * chunks have been uploaded, it creates a files collection document for {@code filename} in the files collection. + *

+ * + * @param filename the filename + * @param source the Publisher providing the file data + * @return a Publisher with a single element, the ObjectId of the uploaded file. + * @since 1.13 + */ + GridFSUploadPublisher uploadFromPublisher(String filename, Publisher source); + + /** + * Uploads the contents of the given {@code Publisher} to a GridFS bucket. + *

+ * Reads the contents of the user file from the {@code source} and uploads it as chunks in the chunks collection. After all the + * chunks have been uploaded, it creates a files collection document for {@code filename} in the files collection. + *

+ * + * @param filename the filename + * @param source the Publisher providing the file data + * @param options the GridFSUploadOptions + * @return a Publisher with a single element, the ObjectId of the uploaded file. + * @since 1.13 + */ + GridFSUploadPublisher uploadFromPublisher(String filename, Publisher source, GridFSUploadOptions options); + + /** + * Uploads the contents of the given {@code Publisher} to a GridFS bucket. + *

+ * Reads the contents of the user file from the {@code source} and uploads it as chunks in the chunks collection. After all the + * chunks have been uploaded, it creates a files collection document for {@code filename} in the files collection. + *

+ * + * @param id the custom id value of the file + * @param filename the filename + * @param source the Publisher providing the file data + * @return a Publisher with a single element, representing when the successful upload of the source. + * @since 1.13 + */ + GridFSUploadPublisher uploadFromPublisher(BsonValue id, String filename, Publisher source); + + /** + * Uploads the contents of the given {@code Publisher} to a GridFS bucket. + *

+ * Reads the contents of the user file from the {@code source} and uploads it as chunks in the chunks collection. After all the + * chunks have been uploaded, it creates a files collection document for {@code filename} in the files collection. + *

+ * + * @param id the custom id value of the file + * @param filename the filename + * @param source the Publisher providing the file data + * @param options the GridFSUploadOptions + * @return a Publisher with a single element, representing when the successful upload of the source. + * @since 1.13 + */ + GridFSUploadPublisher uploadFromPublisher(BsonValue id, String filename, Publisher source, + GridFSUploadOptions options); + + /** + * Uploads the contents of the given {@code Publisher} to a GridFS bucket. + *

+ * Reads the contents of the user file from the {@code source} and uploads it as chunks in the chunks collection. After all the + * chunks have been uploaded, it creates a files collection document for {@code filename} in the files collection. + *

+ * + * @param clientSession the client session with which to associate this operation + * @param filename the filename + * @param source the Publisher providing the file data + * @return a Publisher with a single element, the ObjectId of the uploaded file. + * @mongodb.server.release 3.6 + * @since 1.13 + */ + GridFSUploadPublisher uploadFromPublisher(ClientSession clientSession, String filename, Publisher source); + + /** + * Uploads the contents of the given {@code Publisher} to a GridFS bucket. + *

+ * Reads the contents of the user file from the {@code source} and uploads it as chunks in the chunks collection. After all the + * chunks have been uploaded, it creates a files collection document for {@code filename} in the files collection. + *

+ * + * @param clientSession the client session with which to associate this operation + * @param filename the filename + * @param source the Publisher providing the file data + * @param options the GridFSUploadOptions + * @return a Publisher with a single element, the ObjectId of the uploaded file. + * @mongodb.server.release 3.6 + * @since 1.13 + */ + GridFSUploadPublisher uploadFromPublisher(ClientSession clientSession, String filename, Publisher source, + GridFSUploadOptions options); + + /** + * Uploads the contents of the given {@code Publisher} to a GridFS bucket. + *

+ * Reads the contents of the user file from the {@code source} and uploads it as chunks in the chunks collection. After all the + * chunks have been uploaded, it creates a files collection document for {@code filename} in the files collection. + *

+ * + * @param clientSession the client session with which to associate this operation + * @param id the custom id value of the file + * @param filename the filename + * @param source the Publisher providing the file data + * @return a Publisher with a single element, representing when the successful upload of the source. + * @mongodb.server.release 3.6 + * @since 1.13 + */ + GridFSUploadPublisher uploadFromPublisher(ClientSession clientSession, BsonValue id, String filename, + Publisher source); + + /** + * Uploads the contents of the given {@code Publisher} to a GridFS bucket. + *

+ * Reads the contents of the user file from the {@code source} and uploads it as chunks in the chunks collection. After all the + * chunks have been uploaded, it creates a files collection document for {@code filename} in the files collection. + *

+ * + * @param clientSession the client session with which to associate this operation + * @param id the custom id value of the file + * @param filename the filename + * @param source the Publisher providing the file data + * @param options the GridFSUploadOptions + * @return a Publisher with a single element, representing when the successful upload of the source. + * @mongodb.server.release 3.6 + * @since 1.13 + */ + GridFSUploadPublisher uploadFromPublisher(ClientSession clientSession, BsonValue id, String filename, + Publisher source, GridFSUploadOptions options); + + /** + * Downloads the contents of the stored file specified by {@code id} into the {@code Publisher}. + * + * @param id the ObjectId of the file to be written to the destination Publisher + * @return a Publisher with a single element, representing the amount of data written + * @since 1.13 + */ + GridFSDownloadPublisher downloadToPublisher(ObjectId id); + + /** + * Downloads the contents of the stored file specified by {@code id} into the {@code Publisher}. + * + * @param id the custom id of the file, to be written to the destination Publisher + * @return a Publisher with a single element, representing the amount of data written + * @since 1.13 + */ + GridFSDownloadPublisher downloadToPublisher(BsonValue id); + + /** + * Downloads the contents of the stored file specified by {@code filename} into the {@code Publisher}. + * + * @param filename the name of the file to be downloaded + * @return a Publisher with a single element, representing the amount of data written + * @since 1.13 + */ + GridFSDownloadPublisher downloadToPublisher(String filename); + + /** + * Downloads the contents of the stored file specified by {@code filename} and by the revision in {@code options} into the + * {@code Publisher}. + * + * @param filename the name of the file to be downloaded + * @param options the download options + * @return a Publisher with a single element, representing the amount of data written + * @since 1.13 + */ + GridFSDownloadPublisher downloadToPublisher(String filename, GridFSDownloadOptions options); + + /** + * Downloads the contents of the stored file specified by {@code id} into the {@code Publisher}. + * + * @param clientSession the client session with which to associate this operation + * @param id the ObjectId of the file to be written to the destination Publisher + * @return a Publisher with a single element, representing the amount of data written + * @mongodb.server.release 3.6 + * @since 1.13 + */ + GridFSDownloadPublisher downloadToPublisher(ClientSession clientSession, ObjectId id); + + /** + * Downloads the contents of the stored file specified by {@code id} into the {@code Publisher}. + * + * @param clientSession the client session with which to associate this operation + * @param id the custom id of the file, to be written to the destination Publisher + * @return a Publisher with a single element, representing the amount of data written + * @mongodb.server.release 3.6 + * @since 1.13 + */ + GridFSDownloadPublisher downloadToPublisher(ClientSession clientSession, BsonValue id); + + /** + * Downloads the contents of the latest version of the stored file specified by {@code filename} into the {@code Publisher}. + * + * @param clientSession the client session with which to associate this operation + * @param filename the name of the file to be downloaded + * @return a Publisher with a single element, representing the amount of data written + * @mongodb.server.release 3.6 + * @since 1.13 + */ + GridFSDownloadPublisher downloadToPublisher(ClientSession clientSession, String filename); + + /** + * Downloads the contents of the stored file specified by {@code filename} and by the revision in {@code options} into the + * {@code Publisher}. + * + * @param clientSession the client session with which to associate this operation + * @param filename the name of the file to be downloaded + * @param options the download options + * @return a Publisher with a single element, representing the amount of data written + * @mongodb.server.release 3.6 + * @since 1.13 + */ + GridFSDownloadPublisher downloadToPublisher(ClientSession clientSession, String filename, GridFSDownloadOptions options); + + /** + * Finds all documents in the files collection. + * + * @return the GridFS find iterable interface + * @mongodb.driver.manual tutorial/query-documents/ Find + */ + GridFSFindPublisher find(); + + /** + * Finds all documents in the collection that match the filter. + *

+ * Below is an example of filtering against the filename and some nested metadata that can also be stored along with the file data: + *

+     *  {@code
+     *      Filters.and(Filters.eq("filename", "mongodb.png"), Filters.eq("metadata.contentType", "image/png"));
+     *  }
+     *  
+ * + * @param filter the query filter + * @return the GridFS find iterable interface + * @see com.mongodb.client.model.Filters + */ + GridFSFindPublisher find(Bson filter); + + /** + * Finds all documents in the files collection. + * + * @param clientSession the client session with which to associate this operation + * @return the GridFS find iterable interface + * @mongodb.driver.manual tutorial/query-documents/ Find + * @mongodb.server.release 3.6 + * @since 1.7 + */ + GridFSFindPublisher find(ClientSession clientSession); + + /** + * Finds all documents in the collection that match the filter. + *

+ * Below is an example of filtering against the filename and some nested metadata that can also be stored along with the file data: + *

+     *  {@code
+     *      Filters.and(Filters.eq("filename", "mongodb.png"), Filters.eq("metadata.contentType", "image/png"));
+     *  }
+     *  
+ * + * @param clientSession the client session with which to associate this operation + * @param filter the query filter + * @return the GridFS find iterable interface + * @see com.mongodb.client.model.Filters + * @mongodb.server.release 3.6 + * @since 1.7 + */ + GridFSFindPublisher find(ClientSession clientSession, Bson filter); + + /** + * Given a {@code id}, delete this stored file's files collection document and associated chunks from a GridFS bucket. + * + * @param id the ObjectId of the file to be deleted + * @return a publisher with a single element, representing that the file has been deleted + */ + Publisher delete(ObjectId id); + + /** + * Given a {@code id}, delete this stored file's files collection document and associated chunks from a GridFS bucket. + * + * @param id the ObjectId of the file to be deleted + * @return a publisher with a single element, representing that the file has been deleted + */ + Publisher delete(BsonValue id); + + /** + * Given a {@code id}, delete this stored file's files collection document and associated chunks from a GridFS bucket. + * + * @param clientSession the client session with which to associate this operation + * @param id the ObjectId of the file to be deleted + * @return a publisher with a single element, representing that the file has been deleted + * @mongodb.server.release 3.6 + * @since 1.7 + */ + Publisher delete(ClientSession clientSession, ObjectId id); + + /** + * Given a {@code id}, delete this stored file's files collection document and associated chunks from a GridFS bucket. + * + * @param clientSession the client session with which to associate this operation + * @param id the ObjectId of the file to be deleted + * @return a publisher with a single element, representing that the file has been deleted + * @mongodb.server.release 3.6 + * @since 1.7 + */ + Publisher delete(ClientSession clientSession, BsonValue id); + + /** + * Renames the stored file with the specified {@code id}. + * + * @param id the id of the file in the files collection to rename + * @param newFilename the new filename for the file + * @return a publisher with a single element, representing that the file has been renamed + */ + Publisher rename(ObjectId id, String newFilename); + + /** + * Renames the stored file with the specified {@code id}. + * + * @param id the id of the file in the files collection to rename + * @param newFilename the new filename for the file + * @return a publisher with a single element, representing that the file has been renamed + */ + Publisher rename(BsonValue id, String newFilename); + + /** + * Renames the stored file with the specified {@code id}. + * + * @param clientSession the client session with which to associate this operation + * @param id the id of the file in the files collection to rename + * @param newFilename the new filename for the file + * @return a publisher with a single element, representing that the file has been renamed + * @mongodb.server.release 3.6 + * @since 1.7 + */ + Publisher rename(ClientSession clientSession, ObjectId id, String newFilename); + + /** + * Renames the stored file with the specified {@code id}. + * + * @param clientSession the client session with which to associate this operation + * @param id the id of the file in the files collection to rename + * @param newFilename the new filename for the file + * @return a publisher with a single element, representing that the file has been renamed + * @mongodb.server.release 3.6 + * @since 1.7 + */ + Publisher rename(ClientSession clientSession, BsonValue id, String newFilename); + + /** + * Drops the data associated with this bucket from the database. + * + * @return a publisher with a single element, representing that the collections have been dropped + */ + Publisher drop(); + + /** + * Drops the data associated with this bucket from the database. + * + * @param clientSession the client session with which to associate this operation + * @return a publisher with a single element, representing that the collections have been dropped + * @mongodb.server.release 3.6 + * @since 1.7 + */ + Publisher drop(ClientSession clientSession); +} diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/gridfs/GridFSBuckets.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/gridfs/GridFSBuckets.java new file mode 100644 index 00000000000..ffea19a8708 --- /dev/null +++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/gridfs/GridFSBuckets.java @@ -0,0 +1,65 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.reactivestreams.client.gridfs; + +import com.mongodb.reactivestreams.client.MongoDatabase; +import com.mongodb.reactivestreams.client.internal.MongoDatabaseImpl; +import com.mongodb.reactivestreams.client.internal.gridfs.GridFSBucketImpl; + +import static com.mongodb.assertions.Assertions.notNull; + +/** + * A factory for GridFSBucket instances. + * + *

Requires the concrete {@link MongoDatabaseImpl} implementation of the MongoDatabase interface.

+ * + * @since 1.3 + */ +public final class GridFSBuckets { + + /** + * Create a new GridFS bucket with the default {@code 'fs'} bucket name + * + *

Requires the concrete {@link MongoDatabaseImpl} implementation of the MongoDatabase interface.

+ * + * @param database the database instance to use with GridFS. + * @return the GridFSBucket + */ + public static GridFSBucket create(final MongoDatabase database) { + notNull("database", database); + return new GridFSBucketImpl(database); + } + + /** + * Create a new GridFS bucket with a custom bucket name + * + *

Requires the concrete {@link MongoDatabaseImpl} implementation of the MongoDatabase interface.

+ * + * @param database the database instance to use with GridFS + * @param bucketName the custom bucket name to use + * @return the GridFSBucket + */ + public static GridFSBucket create(final MongoDatabase database, final String bucketName) { + notNull("database", database); + notNull("bucketName", bucketName); + return new GridFSBucketImpl(database, bucketName); + } + + private GridFSBuckets() { + } +} + diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/gridfs/GridFSDownloadPublisher.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/gridfs/GridFSDownloadPublisher.java new file mode 100644 index 00000000000..ac4f024d23c --- /dev/null +++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/gridfs/GridFSDownloadPublisher.java @@ -0,0 +1,56 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.reactivestreams.client.gridfs; + +import com.mongodb.client.gridfs.model.GridFSFile; +import org.reactivestreams.Publisher; + +import java.nio.ByteBuffer; + +/** + * A GridFS Publisher for downloading data from GridFS + * + *

Provides the {@code GridFSFile} for the file to being downloaded as well as a way to control the batchsize.

+ * + * @since 1.13 + */ +public interface GridFSDownloadPublisher extends Publisher { + + /** + * Gets the corresponding {@link GridFSFile} for the file being downloaded + * + * @return a Publisher with a single element, the corresponding GridFSFile for the file being downloaded + */ + Publisher getGridFSFile(); + + /** + * The preferred number of bytes per {@code ByteBuffer} returned by the {@code Publisher}. + * + *

Allows for larger than chunk size ByteBuffers. The actual chunk size of the data stored in MongoDB is the smallest allowable + * {@code ByteBuffer} size.

+ * + *

Can be used to control the memory consumption of this {@code Publisher}. The smaller the bufferSizeBytes the lower the memory + * consumption and higher latency.

+ * + *

Note: Must be set before the Publisher is subscribed to.

+ * + * @param bufferSizeBytes the preferred buffer size in bytes to use per {@code ByteBuffer} in the {@code Publisher}, defaults to chunk + * size. + * @return this + */ + GridFSDownloadPublisher bufferSizeBytes(int bufferSizeBytes); +} diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/gridfs/GridFSFindPublisher.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/gridfs/GridFSFindPublisher.java new file mode 100644 index 00000000000..75f7a7cac10 --- /dev/null +++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/gridfs/GridFSFindPublisher.java @@ -0,0 +1,127 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.reactivestreams.client.gridfs; + +import com.mongodb.client.gridfs.model.GridFSFile; +import com.mongodb.client.model.Collation; +import com.mongodb.lang.Nullable; +import org.bson.conversions.Bson; +import org.reactivestreams.Publisher; + +import java.util.concurrent.TimeUnit; + +/** + * Iterable for the GridFS Files Collection. + * + * @since 1.3 + */ +public interface GridFSFindPublisher extends Publisher { + + /** + * Helper to return a publisher limited first from the query. + * + * @return a publisher with a single element + */ + Publisher first(); + + /** + * Sets the query filter to apply to the query. + *

+ * Below is an example of filtering against the filename and some nested metadata that can also be stored along with the file data: + *

+     *  {@code
+     *      Filters.and(Filters.eq("filename", "mongodb.png"), Filters.eq("metadata.contentType", "image/png"));
+     *  }
+     *  
+ * + * @param filter the filter, which may be null. + * @return this + * @mongodb.driver.manual reference/method/db.collection.find/ Filter + * @see com.mongodb.client.model.Filters + */ + GridFSFindPublisher filter(@Nullable Bson filter); + + /** + * Sets the limit to apply. + * + * @param limit the limit, which may be null + * @return this + * @mongodb.driver.manual reference/method/cursor.limit/#cursor.limit Limit + */ + GridFSFindPublisher limit(int limit); + + /** + * Sets the number of documents to skip. + * + * @param skip the number of documents to skip + * @return this + * @mongodb.driver.manual reference/method/cursor.skip/#cursor.skip Skip + */ + GridFSFindPublisher skip(int skip); + + /** + * Sets the sort criteria to apply to the query. + * + * @param sort the sort criteria, which may be null. + * @return this + * @mongodb.driver.manual reference/method/cursor.sort/ Sort + */ + GridFSFindPublisher sort(@Nullable Bson sort); + + /** + * The server normally times out idle cursors after an inactivity period (10 minutes) + * to prevent excess memory use. Set this option to prevent that. + * + * @param noCursorTimeout true if cursor timeout is disabled + * @return this + */ + GridFSFindPublisher noCursorTimeout(boolean noCursorTimeout); + + /** + * Sets the maximum execution time on the server for this operation. + * + * @param maxTime the max time + * @param timeUnit the time unit, which may not be null + * @return this + * @mongodb.driver.manual reference/method/cursor.maxTimeMS/#cursor.maxTimeMS Max Time + */ + GridFSFindPublisher maxTime(long maxTime, TimeUnit timeUnit); + + /** + * Sets the collation options + * + *

A null value represents the server default.

+ * @param collation the collation options to use + * @return this + * @since 1.3 + * @mongodb.server.release 3.4 + */ + GridFSFindPublisher collation(@Nullable Collation collation); + + /** + * Sets the number of documents to return per batch. + * + *

Overrides the {@link org.reactivestreams.Subscription#request(long)} value for setting the batch size, allowing for fine-grained + * control over the underlying cursor.

+ * + * @param batchSize the batch size + * @return this + * @since 1.8 + * @mongodb.driver.manual reference/method/cursor.batchSize/#cursor.batchSize Batch Size + */ + GridFSFindPublisher batchSize(int batchSize); +} diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/gridfs/GridFSUploadPublisher.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/gridfs/GridFSUploadPublisher.java new file mode 100644 index 00000000000..3be3d1ce23e --- /dev/null +++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/gridfs/GridFSUploadPublisher.java @@ -0,0 +1,50 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.reactivestreams.client.gridfs; + +import org.bson.BsonValue; +import org.bson.types.ObjectId; +import org.reactivestreams.Publisher; + +/** + * A GridFS {@code Publisher} for uploading data into GridFS + * + *

Provides the {@code id} for the file to be uploaded. Cancelling the subscription to this publisher will cause any uploaded data + * to be cleaned up and removed.

+ * + * @param the result type of the publisher + * @since 1.13 + */ +public interface GridFSUploadPublisher extends Publisher { + + /** + * Gets the {@link ObjectId} for the file to be uploaded + *

+ * Throws a {@link com.mongodb.MongoGridFSException} if the file id is not an ObjectId. + * + * @return the ObjectId for the file to be uploaded + */ + ObjectId getObjectId(); + + /** + * The {@link BsonValue} id for this file. + * + * @return the id for this file + */ + BsonValue getId(); + +} diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/gridfs/package-info.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/gridfs/package-info.java new file mode 100644 index 00000000000..ec7ebdb1258 --- /dev/null +++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/gridfs/package-info.java @@ -0,0 +1,21 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * Contains the classes for supporting MongoDB's specification for storing very large files, GridFS. + * @mongodb.driver.manual core/gridfs/ GridFS + */ +package com.mongodb.reactivestreams.client.gridfs; diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/AggregatePublisherImpl.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/AggregatePublisherImpl.java new file mode 100644 index 00000000000..e37f536a4f7 --- /dev/null +++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/AggregatePublisherImpl.java @@ -0,0 +1,264 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.reactivestreams.client.internal; + +import com.mongodb.ExplainVerbosity; +import com.mongodb.MongoNamespace; +import com.mongodb.client.cursor.TimeoutMode; +import com.mongodb.client.model.Collation; +import com.mongodb.internal.TimeoutSettings; +import com.mongodb.internal.client.model.AggregationLevel; +import com.mongodb.internal.client.model.FindOptions; +import com.mongodb.internal.operation.Operations; +import com.mongodb.internal.operation.ReadOperationCursor; +import com.mongodb.internal.operation.ReadOperationExplainable; +import com.mongodb.internal.operation.ReadOperationSimple; +import com.mongodb.lang.Nullable; +import com.mongodb.reactivestreams.client.AggregatePublisher; +import com.mongodb.reactivestreams.client.ClientSession; +import org.bson.BsonDocument; +import org.bson.BsonString; +import org.bson.BsonValue; +import org.bson.Document; +import org.bson.conversions.Bson; +import org.reactivestreams.Publisher; + +import java.util.List; +import java.util.concurrent.TimeUnit; +import java.util.function.Function; + +import static com.mongodb.assertions.Assertions.notNull; + +final class AggregatePublisherImpl extends BatchCursorPublisher implements AggregatePublisher { + private final List pipeline; + private final AggregationLevel aggregationLevel; + private Boolean allowDiskUse; + private long maxTimeMS; + private long maxAwaitTimeMS; + private Boolean bypassDocumentValidation; + private Collation collation; + private BsonValue comment; + private Bson hint; + private String hintString; + private Bson variables; + + AggregatePublisherImpl( + @Nullable final ClientSession clientSession, + final MongoOperationPublisher mongoOperationPublisher, + final List pipeline, + final AggregationLevel aggregationLevel) { + super(clientSession, mongoOperationPublisher); + this.pipeline = notNull("pipeline", pipeline); + this.aggregationLevel = notNull("aggregationLevel", aggregationLevel); + } + + @Override + public AggregatePublisher allowDiskUse(@Nullable final Boolean allowDiskUse) { + this.allowDiskUse = allowDiskUse; + return this; + } + + @Override + public AggregatePublisher batchSize(final int batchSize) { + super.batchSize(batchSize); + return this; + } + + @Override + public AggregatePublisher timeoutMode(final TimeoutMode timeoutMode) { + super.timeoutMode(timeoutMode); + return this; + } + + @Override + public AggregatePublisher maxTime(final long maxTime, final TimeUnit timeUnit) { + notNull("timeUnit", timeUnit); + this.maxTimeMS = TimeUnit.MILLISECONDS.convert(maxTime, timeUnit); + return this; + } + + @Override + public AggregatePublisher maxAwaitTime(final long maxAwaitTime, final TimeUnit timeUnit) { + this.maxAwaitTimeMS = validateMaxAwaitTime(maxAwaitTime, timeUnit); + return this; + } + + @Override + public AggregatePublisher bypassDocumentValidation(@Nullable final Boolean bypassDocumentValidation) { + this.bypassDocumentValidation = bypassDocumentValidation; + return this; + } + + @Override + public AggregatePublisher collation(@Nullable final Collation collation) { + this.collation = collation; + return this; + } + + @Override + public AggregatePublisher comment(@Nullable final String comment) { + this.comment = comment != null ? new BsonString(comment) : null; + return this; + } + + @Override + public AggregatePublisher comment(@Nullable final BsonValue comment) { + this.comment = comment; + return this; + } + + @Override + public AggregatePublisher hint(@Nullable final Bson hint) { + this.hint = hint; + return this; + } + + @Override + public AggregatePublisher hintString(@Nullable final String hint) { + this.hintString = hint; + return this; + } + + @Override + public AggregatePublisher let(@Nullable final Bson variables) { + this.variables = variables; + return this; + } + + @Override + public Publisher toCollection() { + BsonDocument lastPipelineStage = getLastPipelineStage(); + if (lastPipelineStage == null || !lastPipelineStage.containsKey("$out") && !lastPipelineStage.containsKey("$merge")) { + throw new IllegalStateException("The last stage of the aggregation pipeline must be $out or $merge"); + } + return getMongoOperationPublisher().createReadOperationMono( + operations -> operations.createTimeoutSettings(maxTimeMS, maxAwaitTimeMS), + this::getAggregateToCollectionOperation, getClientSession()); + } + + @Override + public Publisher explain() { + return publishExplain(Document.class, null); + } + + @Override + public Publisher explain(final ExplainVerbosity verbosity) { + return publishExplain(Document.class, notNull("verbosity", verbosity)); + } + + @Override + public Publisher explain(final Class explainResultClass) { + return publishExplain(explainResultClass, null); + } + + @Override + public Publisher explain(final Class explainResultClass, final ExplainVerbosity verbosity) { + return publishExplain(explainResultClass, notNull("verbosity", verbosity)); + } + + private Publisher publishExplain(final Class explainResultClass, @Nullable final ExplainVerbosity verbosity) { + notNull("explainDocumentClass", explainResultClass); + return getMongoOperationPublisher().createReadOperationMono( + Operations::getTimeoutSettings, + () -> asAggregateOperation(1).asExplainableOperation(verbosity, + getCodecRegistry().get(explainResultClass)), getClientSession()); + } + + @Override + ReadOperationCursor asReadOperation(final int initialBatchSize) { + MongoNamespace outNamespace = getOutNamespace(); + + if (outNamespace != null) { + ReadOperationSimple aggregateToCollectionOperation = getAggregateToCollectionOperation(); + + FindOptions findOptions = new FindOptions().collation(collation).comment(comment).batchSize(initialBatchSize); + + ReadOperationCursor findOperation = + getOperations().find(outNamespace, new BsonDocument(), getDocumentClass(), findOptions); + + return new VoidReadOperationThenCursorReadOperation<>(aggregateToCollectionOperation, findOperation); + } else { + return asAggregateOperation(initialBatchSize); + } + } + + @Override + Function, TimeoutSettings> getTimeoutSettings() { + return (operations -> operations.createTimeoutSettings(maxTimeMS, maxAwaitTimeMS)); + } + + private ReadOperationExplainable asAggregateOperation(final int initialBatchSize) { + return getOperations() + .aggregate(pipeline, getDocumentClass(), getTimeoutMode(), + initialBatchSize, collation, hint, hintString, comment, variables, allowDiskUse, aggregationLevel); + } + + private ReadOperationSimple getAggregateToCollectionOperation() { + return getOperations().aggregateToCollection(pipeline, getTimeoutMode(), allowDiskUse, bypassDocumentValidation, + collation, hint, hintString, comment, variables, aggregationLevel); + } + + @Nullable + private BsonDocument getLastPipelineStage() { + if (pipeline.isEmpty()) { + return null; + } else { + Bson lastStage = notNull("last pipeline stage", pipeline.get(pipeline.size() - 1)); + return lastStage.toBsonDocument(getDocumentClass(), getCodecRegistry()); + } + } + + @Nullable + private MongoNamespace getOutNamespace() { + BsonDocument lastPipelineStage = getLastPipelineStage(); + if (lastPipelineStage == null) { + return null; + } + String databaseName = getNamespace().getDatabaseName(); + if (lastPipelineStage.containsKey("$out")) { + if (lastPipelineStage.get("$out").isString()) { + return new MongoNamespace(databaseName, lastPipelineStage.getString("$out").getValue()); + } else if (lastPipelineStage.get("$out").isDocument()) { + BsonDocument outDocument = lastPipelineStage.getDocument("$out"); + if (!outDocument.containsKey("db") || !outDocument.containsKey("coll")) { + throw new IllegalStateException("Cannot return a cursor when the value for $out stage is not a namespace document"); + } + return new MongoNamespace(outDocument.getString("db").getValue(), outDocument.getString("coll").getValue()); + } else { + throw new IllegalStateException("Cannot return a cursor when the value for $out stage " + + "is not a string or namespace document"); + } + } else if (lastPipelineStage.containsKey("$merge")) { + if (lastPipelineStage.isString("$merge")) { + return new MongoNamespace(databaseName, lastPipelineStage.getString("$merge").getValue()); + } else if (lastPipelineStage.isDocument("$merge")) { + BsonDocument mergeDocument = lastPipelineStage.getDocument("$merge"); + if (mergeDocument.isDocument("into")) { + BsonDocument intoDocument = mergeDocument.getDocument("into"); + return new MongoNamespace(intoDocument.getString("db", new BsonString(databaseName)).getValue(), + intoDocument.getString("coll").getValue()); + } else if (mergeDocument.isString("into")) { + return new MongoNamespace(databaseName, mergeDocument.getString("into").getValue()); + } + } else { + throw new IllegalStateException("Cannot return a cursor when the value for $merge stage is not a string or a document"); + } + } + + return null; + } +} diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/BatchCursor.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/BatchCursor.java new file mode 100644 index 00000000000..56e1ad54a15 --- /dev/null +++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/BatchCursor.java @@ -0,0 +1,70 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.reactivestreams.client.internal; + +import com.mongodb.internal.async.AsyncBatchCursor; +import org.reactivestreams.Publisher; +import reactor.core.publisher.Mono; + +import java.util.List; +import java.util.function.Supplier; + + +/** + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public class BatchCursor implements AutoCloseable { + + private final AsyncBatchCursor wrapped; + + public BatchCursor(final AsyncBatchCursor wrapped) { + this.wrapped = wrapped; + } + + public Publisher> next() { + return next(() -> false); + } + + public Publisher> next(final Supplier hasBeenCancelled) { + return Mono.create(sink -> wrapped.next( + (result, t) -> { + if (!hasBeenCancelled.get()) { + if (t != null) { + sink.error(t); + } else { + sink.success(result); + } + } + })); + } + + public void setBatchSize(final int batchSize) { + wrapped.setBatchSize(batchSize); + } + + public int getBatchSize() { + return wrapped.getBatchSize(); + } + + public boolean isClosed() { + return wrapped.isClosed(); + } + + public void close() { + wrapped.close(); + } + +} diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/BatchCursorFlux.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/BatchCursorFlux.java new file mode 100644 index 00000000000..119598a265b --- /dev/null +++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/BatchCursorFlux.java @@ -0,0 +1,132 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.reactivestreams.client.internal; + +import org.reactivestreams.Publisher; +import org.reactivestreams.Subscriber; +import reactor.core.publisher.Flux; +import reactor.core.publisher.FluxSink; +import reactor.core.publisher.Mono; + +import java.util.Objects; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicLong; + +class BatchCursorFlux implements Publisher { + + private final BatchCursorPublisher batchCursorPublisher; + private final AtomicBoolean inProgress = new AtomicBoolean(false); + private final AtomicLong demandDelta = new AtomicLong(0); + private volatile BatchCursor batchCursor; + private FluxSink sink; + + BatchCursorFlux(final BatchCursorPublisher batchCursorPublisher) { + this.batchCursorPublisher = batchCursorPublisher; + } + + @Override + public void subscribe(final Subscriber subscriber) { + Flux.create(sink -> { + this.sink = sink; + sink.onRequest(demand -> { + if (calculateDemand(demand) > 0 && inProgress.compareAndSet(false, true)) { + if (batchCursor == null) { + int batchSize = calculateBatchSize(sink.requestedFromDownstream()); + batchCursorPublisher.batchCursor(batchSize) + .contextWrite(sink.contextView()) + .subscribe(bc -> { + batchCursor = bc; + inProgress.set(false); + + // Handle any cancelled subscriptions that happen during the time it takes to get the batchCursor + if (sink.isCancelled()) { + closeCursor(); + } else { + recurseCursor(); + } + }, sink::error); + } else { + inProgress.set(false); + recurseCursor(); + } + } + }); + sink.onCancel(this::closeCursor); + sink.onDispose(this::closeCursor); + }, FluxSink.OverflowStrategy.BUFFER) + .subscribe(subscriber); + } + + private void closeCursor() { + if (batchCursor != null) { + batchCursor.close(); + } + } + + private void recurseCursor(){ + if (!sink.isCancelled() && sink.requestedFromDownstream() > 0 && inProgress.compareAndSet(false, true)) { + if (batchCursor.isClosed()) { + sink.complete(); + } else { + batchCursor.setBatchSize(calculateBatchSize(sink.requestedFromDownstream())); + Mono.from(batchCursor.next(() -> sink.isCancelled())) + .contextWrite(sink.contextView()) + .doOnCancel(this::closeCursor) + .subscribe(results -> { + if (!results.isEmpty()) { + results + .stream() + .filter(Objects::nonNull) + .forEach(sink::next); + calculateDemand(-results.size()); + } + if (batchCursor.isClosed()) { + sink.complete(); + } else { + inProgress.set(false); + recurseCursor(); + } + }, + e -> { + try { + closeCursor(); + } finally { + sink.error(e); + } + }); + } + } + } + + long calculateDemand(final long demand) { + return demandDelta.accumulateAndGet(demand, (originalValue, update) -> { + long newValue = originalValue + update; + return update > 0 && newValue < originalValue ? Long.MAX_VALUE : newValue; + }); + } + + int calculateBatchSize(final long demand) { + Integer setBatchSize = batchCursorPublisher.getBatchSize(); + if (setBatchSize != null) { + return setBatchSize; + } else if (demand > Integer.MAX_VALUE) { + return Integer.MAX_VALUE; + } + return Math.max(2, (int) demand); + } + +} diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/BatchCursorPublisher.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/BatchCursorPublisher.java new file mode 100644 index 00000000000..1d68eb0e56a --- /dev/null +++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/BatchCursorPublisher.java @@ -0,0 +1,163 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.reactivestreams.client.internal; + +import com.mongodb.MongoNamespace; +import com.mongodb.ReadPreference; +import com.mongodb.client.cursor.TimeoutMode; +import com.mongodb.internal.TimeoutSettings; +import com.mongodb.internal.VisibleForTesting; +import com.mongodb.internal.async.AsyncBatchCursor; +import com.mongodb.internal.operation.Operations; +import com.mongodb.internal.operation.ReadOperation; +import com.mongodb.internal.operation.ReadOperationCursor; +import com.mongodb.lang.Nullable; +import com.mongodb.reactivestreams.client.ClientSession; +import org.bson.codecs.configuration.CodecRegistry; +import org.reactivestreams.Publisher; +import org.reactivestreams.Subscriber; +import reactor.core.publisher.Mono; + +import java.util.concurrent.TimeUnit; +import java.util.function.Function; +import java.util.function.Supplier; + +import static com.mongodb.assertions.Assertions.assertNotNull; +import static com.mongodb.assertions.Assertions.isTrueArgument; +import static com.mongodb.assertions.Assertions.notNull; + +@VisibleForTesting(otherwise = VisibleForTesting.AccessModifier.PROTECTED) +public abstract class BatchCursorPublisher implements Publisher { + private final ClientSession clientSession; + private final MongoOperationPublisher mongoOperationPublisher; + private Integer batchSize; + private TimeoutMode timeoutMode; + + BatchCursorPublisher(@Nullable final ClientSession clientSession, final MongoOperationPublisher mongoOperationPublisher) { + this(clientSession, mongoOperationPublisher, null); + } + + BatchCursorPublisher(@Nullable final ClientSession clientSession, final MongoOperationPublisher mongoOperationPublisher, + @Nullable final Integer batchSize) { + this.clientSession = clientSession; + this.mongoOperationPublisher = notNull("mongoOperationPublisher", mongoOperationPublisher); + this.batchSize = batchSize; + } + + abstract ReadOperationCursor asReadOperation(int initialBatchSize); + abstract Function, TimeoutSettings> getTimeoutSettings(); + + ReadOperationCursor asReadOperationFirst() { + return asReadOperation(1); + } + + @Nullable + ClientSession getClientSession() { + return clientSession; + } + + MongoOperationPublisher getMongoOperationPublisher() { + return mongoOperationPublisher; + } + + Operations getOperations() { + return mongoOperationPublisher.getOperations(); + } + + MongoNamespace getNamespace() { + return assertNotNull(mongoOperationPublisher.getNamespace()); + } + + ReadPreference getReadPreference() { + return mongoOperationPublisher.getReadPreference(); + } + + CodecRegistry getCodecRegistry() { + return mongoOperationPublisher.getCodecRegistry(); + } + + boolean getRetryReads() { + return mongoOperationPublisher.getRetryReads(); + } + + Class getDocumentClass() { + return mongoOperationPublisher.getDocumentClass(); + } + + + @Nullable + public Integer getBatchSize() { + return batchSize; + } + + public Publisher batchSize(final int batchSize) { + this.batchSize = batchSize; + return this; + } + + public Publisher timeoutMode(final TimeoutMode timeoutMode) { + if (mongoOperationPublisher.getTimeoutSettings().getTimeoutMS() == null) { + throw new IllegalArgumentException("TimeoutMode requires timeoutMS to be set."); + } + this.timeoutMode = timeoutMode; + return this; + } + + @Nullable + public TimeoutMode getTimeoutMode() { + return timeoutMode; + } + + public Publisher first() { + return batchCursor(this::asReadOperationFirst) + .flatMap(batchCursor -> { + batchCursor.setBatchSize(1); + return Mono.from(batchCursor.next()) + .doOnTerminate(batchCursor::close) + .flatMap(results -> { + if (results == null || results.isEmpty()) { + return Mono.empty(); + } + return Mono.fromCallable(() -> results.get(0)); + }); + }); + } + + @Override + public void subscribe(final Subscriber subscriber) { + new BatchCursorFlux<>(this).subscribe(subscriber); + } + + public Mono> batchCursor(final int initialBatchSize) { + return batchCursor(() -> asReadOperation(initialBatchSize)); + } + + Mono> batchCursor(final Supplier>> supplier) { + return mongoOperationPublisher.createReadOperationMono(getTimeoutSettings(), supplier, clientSession).map(BatchCursor::new); + } + + protected long validateMaxAwaitTime(final long maxAwaitTime, final TimeUnit timeUnit) { + notNull("timeUnit", timeUnit); + Long timeoutMS = mongoOperationPublisher.getTimeoutSettings().getTimeoutMS(); + long maxAwaitTimeMS = TimeUnit.MILLISECONDS.convert(maxAwaitTime, timeUnit); + + isTrueArgument("maxAwaitTimeMS must be less than timeoutMS", timeoutMS == null || timeoutMS == 0 + || timeoutMS > maxAwaitTimeMS); + + return maxAwaitTimeMS; + } +} diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/ChangeStreamPublisherImpl.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/ChangeStreamPublisherImpl.java new file mode 100644 index 00000000000..b00dfb85952 --- /dev/null +++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/ChangeStreamPublisherImpl.java @@ -0,0 +1,184 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.reactivestreams.client.internal; + +import com.mongodb.client.model.Collation; +import com.mongodb.client.model.changestream.ChangeStreamDocument; +import com.mongodb.client.model.changestream.FullDocument; +import com.mongodb.client.model.changestream.FullDocumentBeforeChange; +import com.mongodb.internal.TimeoutSettings; +import com.mongodb.internal.client.model.changestream.ChangeStreamLevel; +import com.mongodb.internal.operation.Operations; +import com.mongodb.internal.operation.ReadOperationCursor; +import com.mongodb.lang.Nullable; +import com.mongodb.reactivestreams.client.ChangeStreamPublisher; +import com.mongodb.reactivestreams.client.ClientSession; +import org.bson.BsonDocument; +import org.bson.BsonString; +import org.bson.BsonTimestamp; +import org.bson.BsonValue; +import org.bson.codecs.Codec; +import org.bson.conversions.Bson; +import org.reactivestreams.Publisher; + +import java.util.List; +import java.util.concurrent.TimeUnit; +import java.util.function.Function; + +import static com.mongodb.assertions.Assertions.notNull; + + +final class ChangeStreamPublisherImpl extends BatchCursorPublisher> + implements ChangeStreamPublisher { + + private final List pipeline; + private final Codec> codec; + private final ChangeStreamLevel changeStreamLevel; + + private FullDocument fullDocument = FullDocument.DEFAULT; + private FullDocumentBeforeChange fullDocumentBeforeChange = FullDocumentBeforeChange.DEFAULT; + private BsonDocument resumeToken; + private BsonDocument startAfter; + private long maxAwaitTimeMS; + private Collation collation; + private BsonValue comment; + private BsonTimestamp startAtOperationTime; + private boolean showExpandedEvents; + + ChangeStreamPublisherImpl( + @Nullable final ClientSession clientSession, + final MongoOperationPublisher mongoOperationPublisher, + final Class innerResultClass, + final List pipeline, + final ChangeStreamLevel changeStreamLevel) { + this(clientSession, mongoOperationPublisher, + ChangeStreamDocument.createCodec(notNull("innerResultClass", innerResultClass), + mongoOperationPublisher.getCodecRegistry()), + notNull("pipeline", pipeline), notNull("changeStreamLevel", changeStreamLevel)); + } + + private ChangeStreamPublisherImpl( + @Nullable final ClientSession clientSession, + final MongoOperationPublisher mongoOperationPublisher, + final Codec> codec, + final List pipeline, + final ChangeStreamLevel changeStreamLevel) { + super(clientSession, mongoOperationPublisher.withDocumentClass(codec.getEncoderClass())); + this.pipeline = pipeline; + this.codec = codec; + this.changeStreamLevel = changeStreamLevel; + } + + @Override + public ChangeStreamPublisher fullDocument(final FullDocument fullDocument) { + this.fullDocument = notNull("fullDocument", fullDocument); + return this; + } + + @Override + public ChangeStreamPublisher fullDocumentBeforeChange(final FullDocumentBeforeChange fullDocumentBeforeChange) { + this.fullDocumentBeforeChange = notNull("fullDocumentBeforeChange", fullDocumentBeforeChange); + return this; + } + + @Override + public ChangeStreamPublisher resumeAfter(final BsonDocument resumeAfter) { + this.resumeToken = notNull("resumeAfter", resumeAfter); + return this; + } + + @Override + public ChangeStreamPublisher batchSize(final int batchSize) { + super.batchSize(batchSize); + return this; + } + + @Override + public ChangeStreamPublisher comment(@Nullable final String comment) { + this.comment = comment == null ? null : new BsonString(comment); + return this; + } + + + @Override + public ChangeStreamPublisher comment(@Nullable final BsonValue comment) { + this.comment = comment; + return this; + } + + @Override + public ChangeStreamPublisher maxAwaitTime(final long maxAwaitTime, final TimeUnit timeUnit) { + this.maxAwaitTimeMS = validateMaxAwaitTime(maxAwaitTime, timeUnit); + return this; + } + + @Override + public ChangeStreamPublisher collation(@Nullable final Collation collation) { + this.collation = notNull("collation", collation); + return this; + } + + @Override + public Publisher withDocumentClass(final Class clazz) { + return new BatchCursorPublisher(getClientSession(), getMongoOperationPublisher().withDocumentClass(clazz), + getBatchSize()) { + @Override + ReadOperationCursor asReadOperation(final int initialBatchSize) { + return createChangeStreamOperation(getMongoOperationPublisher().getCodecRegistry().get(clazz), initialBatchSize); + } + + @Override + Function, TimeoutSettings> getTimeoutSettings() { + return (operations -> operations.createTimeoutSettings(0, maxAwaitTimeMS)); + } + }; + } + + @Override + public ChangeStreamPublisher showExpandedEvents(final boolean showExpandedEvents) { + this.showExpandedEvents = showExpandedEvents; + return this; + } + + @Override + public ChangeStreamPublisher startAtOperationTime(final BsonTimestamp startAtOperationTime) { + this.startAtOperationTime = notNull("startAtOperationTime", startAtOperationTime); + return this; + } + + @Override + public ChangeStreamPublisherImpl startAfter(final BsonDocument startAfter) { + this.startAfter = notNull("startAfter", startAfter); + return this; + } + + @Override + ReadOperationCursor> asReadOperation(final int initialBatchSize) { + return createChangeStreamOperation(codec, initialBatchSize); + } + + + @Override + Function, TimeoutSettings> getTimeoutSettings() { + return (operations -> operations.createTimeoutSettings(0, maxAwaitTimeMS)); + } + + private ReadOperationCursor createChangeStreamOperation(final Codec codec, final int initialBatchSize) { + return getOperations().changeStream(fullDocument, fullDocumentBeforeChange, pipeline, codec, changeStreamLevel, initialBatchSize, + collation, comment, resumeToken, startAtOperationTime, startAfter, showExpandedEvents); + } +} diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/ClientSessionBinding.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/ClientSessionBinding.java new file mode 100644 index 00000000000..2e87b3bccf8 --- /dev/null +++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/ClientSessionBinding.java @@ -0,0 +1,266 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.reactivestreams.client.internal; + +import com.mongodb.ReadConcern; +import com.mongodb.ReadPreference; +import com.mongodb.connection.ClusterType; +import com.mongodb.connection.ServerDescription; +import com.mongodb.internal.async.SingleResultCallback; +import com.mongodb.internal.async.function.AsyncCallbackSupplier; +import com.mongodb.internal.binding.AbstractReferenceCounted; +import com.mongodb.internal.binding.AsyncClusterAwareReadWriteBinding; +import com.mongodb.internal.binding.AsyncConnectionSource; +import com.mongodb.internal.binding.AsyncReadWriteBinding; +import com.mongodb.internal.binding.TransactionContext; +import com.mongodb.internal.connection.AsyncConnection; +import com.mongodb.internal.connection.OperationContext; +import com.mongodb.internal.session.ClientSessionContext; +import com.mongodb.lang.Nullable; +import com.mongodb.reactivestreams.client.ClientSession; +import org.bson.BsonTimestamp; + +import static com.mongodb.assertions.Assertions.assertNotNull; +import static com.mongodb.assertions.Assertions.notNull; +import static com.mongodb.connection.ClusterType.LOAD_BALANCED; +import static com.mongodb.connection.ClusterType.SHARDED; + +/** + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public class ClientSessionBinding extends AbstractReferenceCounted implements AsyncReadWriteBinding { + private final AsyncClusterAwareReadWriteBinding wrapped; + private final ClientSession session; + private final boolean ownsSession; + private final OperationContext operationContext; + + public ClientSessionBinding(final ClientSession session, final boolean ownsSession, final AsyncClusterAwareReadWriteBinding wrapped) { + this.wrapped = notNull("wrapped", wrapped).retain(); + this.ownsSession = ownsSession; + this.session = notNull("session", session); + this.operationContext = wrapped.getOperationContext().withSessionContext(new AsyncClientSessionContext(session)); + } + + @Override + public ReadPreference getReadPreference() { + return wrapped.getReadPreference(); + } + + @Override + public OperationContext getOperationContext() { + return operationContext; + } + + @Override + public void getReadConnectionSource(final SingleResultCallback callback) { + getConnectionSource(wrapped::getReadConnectionSource, callback); + } + + @Override + public void getReadConnectionSource(final int minWireVersion, final ReadPreference fallbackReadPreference, + final SingleResultCallback callback) { + getConnectionSource(wrappedConnectionSourceCallback -> + wrapped.getReadConnectionSource(minWireVersion, fallbackReadPreference, wrappedConnectionSourceCallback), + callback); + } + + public void getWriteConnectionSource(final SingleResultCallback callback) { + getConnectionSource(wrapped::getWriteConnectionSource, callback); + } + + private void getConnectionSource(final AsyncCallbackSupplier connectionSourceSupplier, + final SingleResultCallback callback) { + WrappingCallback wrappingCallback = new WrappingCallback(callback); + + if (!session.hasActiveTransaction()) { + connectionSourceSupplier.get(wrappingCallback); + return; + } + if (TransactionContext.get(session) == null) { + connectionSourceSupplier.get((source, t) -> { + if (t != null) { + wrappingCallback.onResult(null, t); + } else { + ClusterType clusterType = assertNotNull(source).getServerDescription().getClusterType(); + if (clusterType == SHARDED || clusterType == LOAD_BALANCED) { + TransactionContext transactionContext = new TransactionContext<>(clusterType); + session.setTransactionContext(source.getServerDescription().getAddress(), transactionContext); + transactionContext.release(); // The session is responsible for retaining a reference to the context + } + wrappingCallback.onResult(source, null); + } + }); + } else { + wrapped.getConnectionSource(assertNotNull(session.getPinnedServerAddress()), wrappingCallback); + } + } + + @Override + public AsyncReadWriteBinding retain() { + super.retain(); + return this; + } + + @Override + public int release() { + int count = super.release(); + if (count == 0) { + wrapped.release(); + if (ownsSession) { + session.close(); + } + } + return count; + } + + private class SessionBindingAsyncConnectionSource implements AsyncConnectionSource { + private AsyncConnectionSource wrapped; + + SessionBindingAsyncConnectionSource(final AsyncConnectionSource wrapped) { + this.wrapped = wrapped; + ClientSessionBinding.this.retain(); + } + + @Override + public ServerDescription getServerDescription() { + return wrapped.getServerDescription(); + } + + @Override + public OperationContext getOperationContext() { + return operationContext; + } + + @Override + public ReadPreference getReadPreference() { + return wrapped.getReadPreference(); + } + + @Override + public void getConnection(final SingleResultCallback callback) { + TransactionContext transactionContext = TransactionContext.get(session); + if (transactionContext != null && transactionContext.isConnectionPinningRequired()) { + AsyncConnection pinnedConnection = transactionContext.getPinnedConnection(); + if (pinnedConnection == null) { + wrapped.getConnection((connection, t) -> { + if (t != null) { + callback.onResult(null, t); + } else { + transactionContext.pinConnection(assertNotNull(connection), AsyncConnection::markAsPinned); + callback.onResult(connection, null); + } + }); + } else { + callback.onResult(pinnedConnection.retain(), null); + } + } else { + wrapped.getConnection(callback); + } + } + + @Override + public AsyncConnectionSource retain() { + wrapped = wrapped.retain(); + return this; + } + + @Override + public int getCount() { + return wrapped.getCount(); + } + + @Override + public int release() { + int count = wrapped.release(); + if (count == 0) { + ClientSessionBinding.this.release(); + } + return count; + } + } + + private final class AsyncClientSessionContext extends ClientSessionContext { + + private final ClientSession clientSession; + + AsyncClientSessionContext(final ClientSession clientSession) { + super(clientSession); + this.clientSession = clientSession; + } + + + @Override + public boolean isImplicitSession() { + return ownsSession; + } + + @Override + public boolean notifyMessageSent() { + return clientSession.notifyMessageSent(); + } + + @Override + public boolean isSnapshot() { + Boolean snapshot = clientSession.getOptions().isSnapshot(); + return snapshot != null && snapshot; + } + + @Override + public void setSnapshotTimestamp(@Nullable final BsonTimestamp snapshotTimestamp) { + clientSession.setSnapshotTimestamp(snapshotTimestamp); + } + + @Override + @Nullable + public BsonTimestamp getSnapshotTimestamp() { + return clientSession.getSnapshotTimestamp(); + } + + @Override + public boolean hasActiveTransaction() { + return clientSession.hasActiveTransaction(); + } + + @Override + public ReadConcern getReadConcern() { + if (clientSession.hasActiveTransaction()) { + return assertNotNull(clientSession.getTransactionOptions().getReadConcern()); + } else if (isSnapshot()) { + return ReadConcern.SNAPSHOT; + } else { + return wrapped.getOperationContext().getSessionContext().getReadConcern(); + } + } + } + + private class WrappingCallback implements SingleResultCallback { + private final SingleResultCallback callback; + + WrappingCallback(final SingleResultCallback callback) { + this.callback = callback; + } + + @Override + public void onResult(@Nullable final AsyncConnectionSource result, @Nullable final Throwable t) { + if (t != null) { + callback.onResult(null, t); + } else { + callback.onResult(new SessionBindingAsyncConnectionSource(assertNotNull(result)), null); + } + } + } +} diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/ClientSessionHelper.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/ClientSessionHelper.java new file mode 100644 index 00000000000..30714a6a576 --- /dev/null +++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/ClientSessionHelper.java @@ -0,0 +1,67 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.reactivestreams.client.internal; + +import com.mongodb.ClientSessionOptions; +import com.mongodb.TransactionOptions; +import com.mongodb.internal.session.ServerSessionPool; +import com.mongodb.lang.Nullable; +import com.mongodb.reactivestreams.client.ClientSession; +import reactor.core.publisher.Mono; + +import static com.mongodb.assertions.Assertions.isTrue; + +/** + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public class ClientSessionHelper { + private final MongoClientImpl mongoClient; + private final ServerSessionPool serverSessionPool; + + public ClientSessionHelper(final MongoClientImpl mongoClient, final ServerSessionPool serverSessionPool) { + this.mongoClient = mongoClient; + this.serverSessionPool = serverSessionPool; + } + + Mono withClientSession(@Nullable final ClientSession clientSessionFromOperation, final OperationExecutor executor) { + if (clientSessionFromOperation != null) { + isTrue("ClientSession from same MongoClient", clientSessionFromOperation.getOriginator() == mongoClient); + return Mono.fromCallable(() -> clientSessionFromOperation); + } else { + return createClientSessionMono(ClientSessionOptions.builder().causallyConsistent(false).build(), executor); + } + + } + + Mono createClientSessionMono(final ClientSessionOptions options, final OperationExecutor executor) { + return Mono.fromCallable(() -> createClientSession(options, executor)); + } + + ClientSession createClientSession(final ClientSessionOptions options, final OperationExecutor executor) { + ClientSessionOptions mergedOptions = ClientSessionOptions.builder(options) + .defaultTransactionOptions( + TransactionOptions.merge( + options.getDefaultTransactionOptions(), + TransactionOptions.builder() + .readConcern(mongoClient.getSettings().getReadConcern()) + .writeConcern(mongoClient.getSettings().getWriteConcern()) + .readPreference(mongoClient.getSettings().getReadPreference()) + .build())) + .build(); + return new ClientSessionPublisherImpl(serverSessionPool, mongoClient, mergedOptions, executor); + } +} diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/ClientSessionPublisherImpl.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/ClientSessionPublisherImpl.java new file mode 100644 index 00000000000..52f33ec25cc --- /dev/null +++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/ClientSessionPublisherImpl.java @@ -0,0 +1,236 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.reactivestreams.client.internal; + +import com.mongodb.ClientSessionOptions; +import com.mongodb.MongoClientException; +import com.mongodb.MongoException; +import com.mongodb.MongoInternalException; +import com.mongodb.ReadConcern; +import com.mongodb.TransactionOptions; +import com.mongodb.WriteConcern; +import com.mongodb.internal.TimeoutContext; +import com.mongodb.internal.operation.AbortTransactionOperation; +import com.mongodb.internal.operation.CommitTransactionOperation; +import com.mongodb.internal.operation.ReadOperation; +import com.mongodb.internal.operation.WriteConcernHelper; +import com.mongodb.internal.operation.WriteOperation; +import com.mongodb.internal.session.BaseClientSessionImpl; +import com.mongodb.internal.session.ServerSessionPool; +import com.mongodb.lang.Nullable; +import com.mongodb.reactivestreams.client.ClientSession; +import org.reactivestreams.Publisher; +import reactor.core.publisher.Mono; +import reactor.core.publisher.MonoSink; + +import static com.mongodb.MongoException.TRANSIENT_TRANSACTION_ERROR_LABEL; +import static com.mongodb.MongoException.UNKNOWN_TRANSACTION_COMMIT_RESULT_LABEL; +import static com.mongodb.assertions.Assertions.assertNotNull; +import static com.mongodb.assertions.Assertions.assertTrue; +import static com.mongodb.assertions.Assertions.isTrue; +import static com.mongodb.assertions.Assertions.notNull; + +final class ClientSessionPublisherImpl extends BaseClientSessionImpl implements ClientSession { + + private final MongoClientImpl mongoClient; + private final OperationExecutor executor; + private TransactionState transactionState = TransactionState.NONE; + private boolean messageSentInCurrentTransaction; + private boolean commitInProgress; + private TransactionOptions transactionOptions; + + + ClientSessionPublisherImpl(final ServerSessionPool serverSessionPool, final MongoClientImpl mongoClient, + final ClientSessionOptions options, final OperationExecutor executor) { + super(serverSessionPool, mongoClient, options); + this.executor = executor; + this.mongoClient = mongoClient; + } + + @Override + public boolean hasActiveTransaction() { + return transactionState == TransactionState.IN || (transactionState == TransactionState.COMMITTED && commitInProgress); + } + + @Override + public boolean notifyMessageSent() { + if (hasActiveTransaction()) { + boolean firstMessageInCurrentTransaction = !messageSentInCurrentTransaction; + messageSentInCurrentTransaction = true; + return firstMessageInCurrentTransaction; + } else { + if (transactionState == TransactionState.COMMITTED || transactionState == TransactionState.ABORTED) { + cleanupTransaction(TransactionState.NONE); + } + return false; + } + } + + @Override + public void notifyOperationInitiated(final Object operation) { + assertTrue(operation instanceof ReadOperation || operation instanceof WriteOperation); + if (!(hasActiveTransaction() || operation instanceof CommitTransactionOperation)) { + assertTrue(getPinnedServerAddress() == null + || (transactionState != TransactionState.ABORTED && transactionState != TransactionState.NONE)); + clearTransactionContext(); + } + } + + @Override + public TransactionOptions getTransactionOptions() { + isTrue("in transaction", transactionState == TransactionState.IN || transactionState == TransactionState.COMMITTED); + return transactionOptions; + } + + @Override + public void startTransaction() { + startTransaction(TransactionOptions.builder().build()); + } + + @Override + public void startTransaction(final TransactionOptions transactionOptions) { + notNull("transactionOptions", transactionOptions); + + Boolean snapshot = getOptions().isSnapshot(); + if (snapshot != null && snapshot) { + throw new IllegalArgumentException("Transactions are not supported in snapshot sessions"); + } + if (transactionState == TransactionState.IN) { + throw new IllegalStateException("Transaction already in progress"); + } + if (transactionState == TransactionState.COMMITTED) { + cleanupTransaction(TransactionState.IN); + } else { + transactionState = TransactionState.IN; + } + getServerSession().advanceTransactionNumber(); + this.transactionOptions = TransactionOptions.merge(transactionOptions, getOptions().getDefaultTransactionOptions()); + + TimeoutContext timeoutContext = createTimeoutContext(); + WriteConcern writeConcern = getWriteConcern(timeoutContext); + if (writeConcern == null) { + throw new MongoInternalException("Invariant violated. Transaction options write concern can not be null"); + } + if (!writeConcern.isAcknowledged()) { + throw new MongoClientException("Transactions do not support unacknowledged write concern"); + } + clearTransactionContext(); + setTimeoutContext(timeoutContext); + } + + @Nullable + private WriteConcern getWriteConcern(@Nullable final TimeoutContext timeoutContext) { + WriteConcern writeConcern = transactionOptions.getWriteConcern(); + if (hasTimeoutMS(timeoutContext) && hasWTimeoutMS(writeConcern)) { + return WriteConcernHelper.cloneWithoutTimeout(writeConcern); + } + return writeConcern; + } + + @Override + public Publisher commitTransaction() { + if (transactionState == TransactionState.ABORTED) { + throw new IllegalStateException("Cannot call commitTransaction after calling abortTransaction"); + } + if (transactionState == TransactionState.NONE) { + throw new IllegalStateException("There is no transaction started"); + } + if (!messageSentInCurrentTransaction) { + cleanupTransaction(TransactionState.COMMITTED); + return Mono.create(MonoSink::success); + } else { + ReadConcern readConcern = transactionOptions.getReadConcern(); + if (readConcern == null) { + throw new MongoInternalException("Invariant violated. Transaction options read concern can not be null"); + } + boolean alreadyCommitted = commitInProgress || transactionState == TransactionState.COMMITTED; + commitInProgress = true; + resetTimeout(); + TimeoutContext timeoutContext = getTimeoutContext(); + WriteConcern writeConcern = assertNotNull(getWriteConcern(timeoutContext)); + return executor + .execute( + new CommitTransactionOperation(writeConcern, alreadyCommitted) + .recoveryToken(getRecoveryToken()), readConcern, this) + .doOnTerminate(() -> { + commitInProgress = false; + transactionState = TransactionState.COMMITTED; + }) + .doOnError(MongoException.class, this::clearTransactionContextOnError); + } + } + + @Override + public Publisher abortTransaction() { + if (transactionState == TransactionState.ABORTED) { + throw new IllegalStateException("Cannot call abortTransaction twice"); + } + if (transactionState == TransactionState.COMMITTED) { + throw new IllegalStateException("Cannot call abortTransaction after calling commitTransaction"); + } + if (transactionState == TransactionState.NONE) { + throw new IllegalStateException("There is no transaction started"); + } + if (!messageSentInCurrentTransaction) { + cleanupTransaction(TransactionState.ABORTED); + return Mono.create(MonoSink::success); + } else { + ReadConcern readConcern = transactionOptions.getReadConcern(); + if (readConcern == null) { + throw new MongoInternalException("Invariant violated. Transaction options read concern can not be null"); + } + + resetTimeout(); + TimeoutContext timeoutContext = getTimeoutContext(); + WriteConcern writeConcern = assertNotNull(getWriteConcern(timeoutContext)); + return executor + .execute(new AbortTransactionOperation(writeConcern) + .recoveryToken(getRecoveryToken()), readConcern, this) + .onErrorResume(Throwable.class, (e) -> Mono.empty()) + .doOnTerminate(() -> { + clearTransactionContext(); + cleanupTransaction(TransactionState.ABORTED); + }); + } + } + + private void clearTransactionContextOnError(final MongoException e) { + if (e.hasErrorLabel(TRANSIENT_TRANSACTION_ERROR_LABEL) || e.hasErrorLabel(UNKNOWN_TRANSACTION_COMMIT_RESULT_LABEL)) { + clearTransactionContext(); + } + } + + @Override + public void close() { + if (transactionState == TransactionState.IN) { + Mono.from(abortTransaction()).doFinally(it -> super.close()).subscribe(); + } else { + super.close(); + } + } + + private void cleanupTransaction(final TransactionState nextState) { + messageSentInCurrentTransaction = false; + transactionOptions = null; + transactionState = nextState; + setTimeoutContext(null); + } + + private TimeoutContext createTimeoutContext() { + return new TimeoutContext(getTimeoutSettings(transactionOptions, executor.getTimeoutSettings())); + } +} diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/DistinctPublisherImpl.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/DistinctPublisherImpl.java new file mode 100644 index 00000000000..e17b37bf959 --- /dev/null +++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/DistinctPublisherImpl.java @@ -0,0 +1,120 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.reactivestreams.client.internal; + +import com.mongodb.client.cursor.TimeoutMode; +import com.mongodb.client.model.Collation; +import com.mongodb.internal.TimeoutSettings; +import com.mongodb.internal.operation.Operations; +import com.mongodb.internal.operation.ReadOperationCursor; +import com.mongodb.lang.Nullable; +import com.mongodb.reactivestreams.client.ClientSession; +import com.mongodb.reactivestreams.client.DistinctPublisher; +import org.bson.BsonString; +import org.bson.BsonValue; +import org.bson.conversions.Bson; + +import java.util.concurrent.TimeUnit; +import java.util.function.Function; + +import static com.mongodb.assertions.Assertions.notNull; + +final class DistinctPublisherImpl extends BatchCursorPublisher implements DistinctPublisher { + + private final String fieldName; + private Bson filter; + private long maxTimeMS; + private Collation collation; + private BsonValue comment; + private Bson hint; + private String hintString; + + DistinctPublisherImpl( + @Nullable final ClientSession clientSession, + final MongoOperationPublisher mongoOperationPublisher, + final String fieldName, final Bson filter) { + super(clientSession, mongoOperationPublisher); + this.fieldName = notNull("fieldName", fieldName); + this.filter = notNull("filter", filter); + } + + @Override + public DistinctPublisher filter(@Nullable final Bson filter) { + this.filter = filter; + return this; + } + + @Override + public DistinctPublisher maxTime(final long maxTime, final TimeUnit timeUnit) { + notNull("timeUnit", timeUnit); + this.maxTimeMS = TimeUnit.MILLISECONDS.convert(maxTime, timeUnit); + return this; + } + + @Override + public DistinctPublisher batchSize(final int batchSize) { + super.batchSize(batchSize); + return this; + } + + @Override + public DistinctPublisher collation(@Nullable final Collation collation) { + this.collation = collation; + return this; + } + + @Override + public DistinctPublisher comment(@Nullable final String comment) { + this.comment = comment != null ? new BsonString(comment) : null; + return this; + } + + @Override + public DistinctPublisher comment(@Nullable final BsonValue comment) { + this.comment = comment; + return this; + } + + @Override + public DistinctPublisher hint(@Nullable final Bson hint) { + this.hint = hint; + return this; + } + + @Override + public DistinctPublisher hintString(@Nullable final String hint) { + this.hintString = hint; + return this; + } + + @Override + public DistinctPublisher timeoutMode(final TimeoutMode timeoutMode) { + super.timeoutMode(timeoutMode); + return this; + } + + @Override + ReadOperationCursor asReadOperation(final int initialBatchSize) { + // initialBatchSize is ignored for distinct operations. + return getOperations().distinct(fieldName, filter, getDocumentClass(), collation, comment, hint, hintString); + } + + @Override + Function, TimeoutSettings> getTimeoutSettings() { + return (operations -> operations.createTimeoutSettings(maxTimeMS)); + } +} diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/FindPublisherImpl.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/FindPublisherImpl.java new file mode 100644 index 00000000000..e223b9daea9 --- /dev/null +++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/FindPublisherImpl.java @@ -0,0 +1,237 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.reactivestreams.client.internal; + +import com.mongodb.CursorType; +import com.mongodb.ExplainVerbosity; +import com.mongodb.client.cursor.TimeoutMode; +import com.mongodb.client.model.Collation; +import com.mongodb.internal.TimeoutSettings; +import com.mongodb.internal.client.model.FindOptions; +import com.mongodb.internal.operation.Operations; +import com.mongodb.internal.operation.ReadOperationCursor; +import com.mongodb.internal.operation.ReadOperationExplainable; +import com.mongodb.lang.Nullable; +import com.mongodb.reactivestreams.client.ClientSession; +import com.mongodb.reactivestreams.client.FindPublisher; +import org.bson.BsonValue; +import org.bson.Document; +import org.bson.conversions.Bson; +import org.reactivestreams.Publisher; + +import java.util.concurrent.TimeUnit; +import java.util.function.Function; + +import static com.mongodb.assertions.Assertions.notNull; + +final class FindPublisherImpl extends BatchCursorPublisher implements FindPublisher { + private final FindOptions findOptions; + + private Bson filter; + + FindPublisherImpl(@Nullable final ClientSession clientSession, final MongoOperationPublisher mongoOperationPublisher, + final Bson filter) { + super(clientSession, mongoOperationPublisher); + this.filter = notNull("filter", filter); + this.findOptions = new FindOptions(); + } + + @Override + public FindPublisher filter(@Nullable final Bson filter) { + this.filter = filter; + return this; + } + + @Override + public FindPublisher limit(final int limit) { + findOptions.limit(limit); + return this; + } + + @Override + public FindPublisher skip(final int skip) { + findOptions.skip(skip); + return this; + } + + @Override + public FindPublisher maxTime(final long maxTime, final TimeUnit timeUnit) { + notNull("timeUnit", timeUnit); + findOptions.maxTime(maxTime, timeUnit); + return this; + } + + @Override + public FindPublisher maxAwaitTime(final long maxAwaitTime, final TimeUnit timeUnit) { + validateMaxAwaitTime(maxAwaitTime, timeUnit); + findOptions.maxAwaitTime(maxAwaitTime, timeUnit); + return this; + } + + @Override + public FindPublisher batchSize(final int batchSize) { + super.batchSize(batchSize); + findOptions.batchSize(batchSize); + return this; + } + + @Override + public FindPublisher collation(@Nullable final Collation collation) { + findOptions.collation(collation); + return this; + } + + @Override + public FindPublisher projection(@Nullable final Bson projection) { + findOptions.projection(projection); + return this; + } + + @Override + public FindPublisher sort(@Nullable final Bson sort) { + findOptions.sort(sort); + return this; + } + + @Override + public FindPublisher noCursorTimeout(final boolean noCursorTimeout) { + findOptions.noCursorTimeout(noCursorTimeout); + return this; + } + + @Override + public FindPublisher partial(final boolean partial) { + findOptions.partial(partial); + return this; + } + + @Override + public FindPublisher cursorType(final CursorType cursorType) { + findOptions.cursorType(cursorType); + return this; + } + + @Override + public FindPublisher comment(@Nullable final String comment) { + findOptions.comment(comment); + return this; + } + + @Override + public FindPublisher comment(@Nullable final BsonValue comment) { + findOptions.comment(comment); + return this; + } + + @Override + public FindPublisher hint(@Nullable final Bson hint) { + findOptions.hint(hint); + return this; + } + + @Override + public FindPublisher hintString(@Nullable final String hint) { + findOptions.hintString(hint); + return this; + } + + @Override + public FindPublisher let(@Nullable final Bson variables) { + findOptions.let(variables); + return this; + } + + @Override + public FindPublisher max(@Nullable final Bson max) { + findOptions.max(max); + return this; + } + + @Override + public FindPublisher min(@Nullable final Bson min) { + findOptions.min(min); + return this; + } + + @Override + public FindPublisher returnKey(final boolean returnKey) { + findOptions.returnKey(returnKey); + return this; + } + + @Override + public FindPublisher showRecordId(final boolean showRecordId) { + findOptions.showRecordId(showRecordId); + return this; + } + + @Override + public FindPublisher allowDiskUse(@Nullable final Boolean allowDiskUse) { + findOptions.allowDiskUse(allowDiskUse); + return this; + } + + @Override + public FindPublisher timeoutMode(final TimeoutMode timeoutMode) { + super.timeoutMode(timeoutMode); + findOptions.timeoutMode(timeoutMode); + return this; + } + + @Override + public Publisher explain() { + return publishExplain(Document.class, null); + } + + @Override + public Publisher explain(final ExplainVerbosity verbosity) { + return publishExplain(Document.class, notNull("verbosity", verbosity)); + } + + @Override + public Publisher explain(final Class explainResultClass) { + return publishExplain(explainResultClass, null); + } + + @Override + public Publisher explain(final Class explainResultClass, final ExplainVerbosity verbosity) { + return publishExplain(explainResultClass, notNull("verbosity", verbosity)); + } + + private Publisher publishExplain(final Class explainResultClass, @Nullable final ExplainVerbosity verbosity) { + notNull("explainDocumentClass", explainResultClass); + return getMongoOperationPublisher().createReadOperationMono( + getTimeoutSettings(), + () -> asReadOperation(0) + .asExplainableOperation(verbosity, getCodecRegistry().get(explainResultClass)), getClientSession()); + } + + @Override + ReadOperationExplainable asReadOperation(final int initialBatchSize) { + return getOperations().find(filter, getDocumentClass(), findOptions.withBatchSize(initialBatchSize)); + } + + @Override + Function, TimeoutSettings> getTimeoutSettings() { + return (operations -> operations.createTimeoutSettings(findOptions)); + } + + @Override + ReadOperationCursor asReadOperationFirst() { + return getOperations().findFirst(filter, getDocumentClass(), findOptions); + } +} diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/ListCollectionNamesPublisherImpl.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/ListCollectionNamesPublisherImpl.java new file mode 100644 index 00000000000..f07379d568c --- /dev/null +++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/ListCollectionNamesPublisherImpl.java @@ -0,0 +1,99 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.reactivestreams.client.internal; + +import com.mongodb.internal.VisibleForTesting; +import com.mongodb.lang.Nullable; +import com.mongodb.reactivestreams.client.ListCollectionNamesPublisher; +import org.bson.BsonValue; +import org.bson.Document; +import org.bson.conversions.Bson; +import org.reactivestreams.Publisher; +import org.reactivestreams.Subscriber; +import reactor.core.publisher.Flux; +import reactor.core.publisher.Mono; + +import java.util.concurrent.TimeUnit; + +import static com.mongodb.assertions.Assertions.notNull; +import static com.mongodb.internal.VisibleForTesting.AccessModifier.PRIVATE; + +public final class ListCollectionNamesPublisherImpl implements ListCollectionNamesPublisher { + private final ListCollectionsPublisherImpl wrapped; + private final Flux wrappedWithMapping; + + ListCollectionNamesPublisherImpl(final ListCollectionsPublisherImpl wrapped) { + this.wrapped = wrapped; + wrappedWithMapping = Flux.from(wrapped).map(ListCollectionNamesPublisherImpl::name); + } + + @Override + public ListCollectionNamesPublisher maxTime(final long maxTime, final TimeUnit timeUnit) { + notNull("timeUnit", timeUnit); + wrapped.maxTime(maxTime, timeUnit); + return this; + } + + @Override + public ListCollectionNamesPublisher batchSize(final int batchSize) { + wrapped.batchSize(batchSize); + return this; + } + + @Override + public ListCollectionNamesPublisher filter(@Nullable final Bson filter) { + wrapped.filter(filter); + return this; + } + + @Override + public ListCollectionNamesPublisher comment(@Nullable final String comment) { + wrapped.comment(comment); + return this; + } + + @Override + public ListCollectionNamesPublisher comment(@Nullable final BsonValue comment) { + wrapped.comment(comment); + return this; + } + + @Override + public ListCollectionNamesPublisher authorizedCollections(final boolean authorizedCollections) { + wrapped.authorizedCollections(authorizedCollections); + return this; + } + + @Override + public Publisher first() { + return Mono.fromDirect(wrapped.first()).map(ListCollectionNamesPublisherImpl::name); + } + + @Override + public void subscribe(final Subscriber subscriber) { + wrappedWithMapping.subscribe(subscriber); + } + + @VisibleForTesting(otherwise = PRIVATE) + public BatchCursorPublisher getWrapped() { + return wrapped; + } + + private static String name(final Document collectionDoc) { + return collectionDoc.getString("name"); + } +} diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/ListCollectionsPublisherImpl.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/ListCollectionsPublisherImpl.java new file mode 100644 index 00000000000..1f1e5f9d00a --- /dev/null +++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/ListCollectionsPublisherImpl.java @@ -0,0 +1,107 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.reactivestreams.client.internal; + +import com.mongodb.ReadConcern; +import com.mongodb.client.cursor.TimeoutMode; +import com.mongodb.internal.TimeoutSettings; +import com.mongodb.internal.operation.Operations; +import com.mongodb.internal.operation.ReadOperationCursor; +import com.mongodb.lang.Nullable; +import com.mongodb.reactivestreams.client.ClientSession; +import com.mongodb.reactivestreams.client.ListCollectionNamesPublisher; +import com.mongodb.reactivestreams.client.ListCollectionsPublisher; +import org.bson.BsonString; +import org.bson.BsonValue; +import org.bson.conversions.Bson; + +import java.util.concurrent.TimeUnit; +import java.util.function.Function; + +import static com.mongodb.assertions.Assertions.notNull; +import static java.util.concurrent.TimeUnit.MILLISECONDS; + +final class ListCollectionsPublisherImpl extends BatchCursorPublisher implements ListCollectionsPublisher { + + private final boolean collectionNamesOnly; + private boolean authorizedCollections; + private Bson filter; + private long maxTimeMS; + private BsonValue comment; + + ListCollectionsPublisherImpl( + @Nullable final ClientSession clientSession, + final MongoOperationPublisher mongoOperationPublisher, + final boolean collectionNamesOnly) { + super(clientSession, mongoOperationPublisher.withReadConcern(ReadConcern.DEFAULT)); + this.collectionNamesOnly = collectionNamesOnly; + } + + public ListCollectionsPublisher maxTime(final long maxTime, final TimeUnit timeUnit) { + notNull("timeUnit", timeUnit); + this.maxTimeMS = MILLISECONDS.convert(maxTime, timeUnit); + return this; + } + + public ListCollectionsPublisher batchSize(final int batchSize) { + super.batchSize(batchSize); + return this; + } + + public ListCollectionsPublisher filter(@Nullable final Bson filter) { + this.filter = filter; + return this; + } + + @Override + public ListCollectionsPublisher comment(@Nullable final String comment) { + this.comment = comment != null ? new BsonString(comment) : null; + return this; + } + + @Override + public ListCollectionsPublisher comment(@Nullable final BsonValue comment) { + this.comment = comment; + return this; + } + + + @SuppressWarnings("ReactiveStreamsUnusedPublisher") + @Override + public ListCollectionsPublisher timeoutMode(final TimeoutMode timeoutMode) { + super.timeoutMode(timeoutMode); + return this; + } + + /** + * @see ListCollectionNamesPublisher#authorizedCollections(boolean) + */ + void authorizedCollections(final boolean authorizedCollections) { + this.authorizedCollections = authorizedCollections; + } + + + ReadOperationCursor asReadOperation(final int initialBatchSize) { + return getOperations().listCollections(getNamespace().getDatabaseName(), getDocumentClass(), filter, collectionNamesOnly, + authorizedCollections, initialBatchSize, comment, getTimeoutMode()); + } + + @Override + Function, TimeoutSettings> getTimeoutSettings() { + return (operations -> operations.createTimeoutSettings(maxTimeMS)); + } +} diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/ListDatabasesPublisherImpl.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/ListDatabasesPublisherImpl.java new file mode 100644 index 00000000000..5c7b408ef3f --- /dev/null +++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/ListDatabasesPublisherImpl.java @@ -0,0 +1,103 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.reactivestreams.client.internal; + +import com.mongodb.client.cursor.TimeoutMode; +import com.mongodb.internal.TimeoutSettings; +import com.mongodb.internal.operation.Operations; +import com.mongodb.internal.operation.ReadOperationCursor; +import com.mongodb.lang.Nullable; +import com.mongodb.reactivestreams.client.ClientSession; +import com.mongodb.reactivestreams.client.ListDatabasesPublisher; +import org.bson.BsonString; +import org.bson.BsonValue; +import org.bson.conversions.Bson; + +import java.util.concurrent.TimeUnit; +import java.util.function.Function; + +import static com.mongodb.assertions.Assertions.notNull; +import static java.util.concurrent.TimeUnit.MILLISECONDS; + +final class ListDatabasesPublisherImpl extends BatchCursorPublisher implements ListDatabasesPublisher { + + private long maxTimeMS; + private Bson filter; + private Boolean nameOnly; + private Boolean authorizedDatabasesOnly; + private BsonValue comment; + + ListDatabasesPublisherImpl( + @Nullable final ClientSession clientSession, + final MongoOperationPublisher mongoOperationPublisher) { + super(clientSession, mongoOperationPublisher); + } + + public ListDatabasesPublisher maxTime(final long maxTime, final TimeUnit timeUnit) { + notNull("timeUnit", timeUnit); + this.maxTimeMS = MILLISECONDS.convert(maxTime, timeUnit); + return this; + } + + public ListDatabasesPublisher batchSize(final int batchSize) { + super.batchSize(batchSize); + return this; + } + + public ListDatabasesPublisher filter(@Nullable final Bson filter) { + this.filter = filter; + return this; + } + + public ListDatabasesPublisher nameOnly(@Nullable final Boolean nameOnly) { + this.nameOnly = nameOnly; + return this; + } + + public ListDatabasesPublisher authorizedDatabasesOnly(@Nullable final Boolean authorizedDatabasesOnly) { + this.authorizedDatabasesOnly = authorizedDatabasesOnly; + return this; + } + + @Override + public ListDatabasesPublisher comment(@Nullable final String comment) { + this.comment = comment != null ? new BsonString(comment) : null; + return this; + } + + @Override + public ListDatabasesPublisher comment(@Nullable final BsonValue comment) { + this.comment = comment; + return this; + } + + @Override + public ListDatabasesPublisher timeoutMode(final TimeoutMode timeoutMode) { + super.timeoutMode(timeoutMode); + return this; + } + + @Override + Function, TimeoutSettings> getTimeoutSettings() { + return (operations -> operations.createTimeoutSettings(maxTimeMS)); + } + + ReadOperationCursor asReadOperation(final int initialBatchSize) { + // initialBatchSize is ignored for distinct operations. + return getOperations().listDatabases(getDocumentClass(), filter, nameOnly, authorizedDatabasesOnly, comment); + } +} diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/ListIndexesPublisherImpl.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/ListIndexesPublisherImpl.java new file mode 100644 index 00000000000..851c56fcc54 --- /dev/null +++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/ListIndexesPublisherImpl.java @@ -0,0 +1,83 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.reactivestreams.client.internal; + +import com.mongodb.client.cursor.TimeoutMode; +import com.mongodb.internal.TimeoutSettings; +import com.mongodb.internal.operation.Operations; +import com.mongodb.internal.operation.ReadOperationCursor; +import com.mongodb.lang.Nullable; +import com.mongodb.reactivestreams.client.ClientSession; +import com.mongodb.reactivestreams.client.ListIndexesPublisher; +import org.bson.BsonString; +import org.bson.BsonValue; + +import java.util.concurrent.TimeUnit; +import java.util.function.Function; + +import static com.mongodb.assertions.Assertions.notNull; +import static java.util.concurrent.TimeUnit.MILLISECONDS; + +final class ListIndexesPublisherImpl extends BatchCursorPublisher implements ListIndexesPublisher { + + private long maxTimeMS; + private BsonValue comment; + + ListIndexesPublisherImpl( + @Nullable final ClientSession clientSession, + final MongoOperationPublisher mongoOperationPublisher) { + super(clientSession, mongoOperationPublisher); + } + + public ListIndexesPublisher maxTime(final long maxTime, final TimeUnit timeUnit) { + notNull("timeUnit", timeUnit); + this.maxTimeMS = MILLISECONDS.convert(maxTime, timeUnit); + return this; + } + + public ListIndexesPublisher batchSize(final int batchSize) { + super.batchSize(batchSize); + return this; + } + @Override + public ListIndexesPublisher comment(@Nullable final String comment) { + this.comment = comment != null ? new BsonString(comment) : null; + return this; + } + + @Override + public ListIndexesPublisher comment(@Nullable final BsonValue comment) { + this.comment = comment; + return this; + } + + @SuppressWarnings("ReactiveStreamsUnusedPublisher") + @Override + public ListIndexesPublisher timeoutMode(final TimeoutMode timeoutMode) { + super.timeoutMode(timeoutMode); + return this; + } + + ReadOperationCursor asReadOperation(final int initialBatchSize) { + return getOperations().listIndexes(getDocumentClass(), initialBatchSize, comment, getTimeoutMode()); + } + + @Override + Function, TimeoutSettings> getTimeoutSettings() { + return (operations -> operations.createTimeoutSettings(maxTimeMS)); + } +} diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/ListSearchIndexesPublisherImpl.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/ListSearchIndexesPublisherImpl.java new file mode 100644 index 00000000000..c4c703e9774 --- /dev/null +++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/ListSearchIndexesPublisherImpl.java @@ -0,0 +1,144 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.reactivestreams.client.internal; + +import com.mongodb.ExplainVerbosity; +import com.mongodb.client.cursor.TimeoutMode; +import com.mongodb.client.model.Collation; +import com.mongodb.internal.TimeoutSettings; +import com.mongodb.internal.operation.Operations; +import com.mongodb.internal.operation.ReadOperationExplainable; +import com.mongodb.lang.Nullable; +import com.mongodb.reactivestreams.client.ListSearchIndexesPublisher; +import org.bson.BsonString; +import org.bson.BsonValue; +import org.bson.Document; +import org.reactivestreams.Publisher; + +import java.util.concurrent.TimeUnit; +import java.util.function.Function; + +import static com.mongodb.assertions.Assertions.notNull; + +final class ListSearchIndexesPublisherImpl extends BatchCursorPublisher implements ListSearchIndexesPublisher { + @Nullable + private Boolean allowDiskUse; + private long maxTimeMS; + @Nullable + private Collation collation; + @Nullable + private BsonValue comment; + @Nullable + private String indexName; + + ListSearchIndexesPublisherImpl( + final MongoOperationPublisher mongoOperationPublisher) { + super(null, mongoOperationPublisher); + } + + @Override + public ListSearchIndexesPublisher name(final String indexName) { + this.indexName = notNull("indexName", indexName); + return this; + } + + @Override + public ListSearchIndexesPublisher allowDiskUse(@Nullable final Boolean allowDiskUse) { + this.allowDiskUse = allowDiskUse; + return this; + } + + @Override + public ListSearchIndexesPublisher batchSize(final int batchSize) { + super.batchSize(batchSize); + return this; + } + + @Override + public ListSearchIndexesPublisher maxTime(final long maxTime, final TimeUnit timeUnit) { + notNull("timeUnit", timeUnit); + this.maxTimeMS = TimeUnit.MILLISECONDS.convert(maxTime, timeUnit); + return this; + } + + @Override + public ListSearchIndexesPublisher collation(@Nullable final Collation collation) { + this.collation = collation; + return this; + } + + @Override + public ListSearchIndexesPublisher comment(@Nullable final String comment) { + this.comment = comment != null ? new BsonString(comment) : null; + return this; + } + + @Override + public ListSearchIndexesPublisher timeoutMode(final TimeoutMode timeoutMode) { + super.timeoutMode(timeoutMode); + return this; + } + + @Override + public ListSearchIndexesPublisher comment(@Nullable final BsonValue comment) { + this.comment = comment; + return this; + } + + @Override + public Publisher explain() { + return publishExplain(Document.class, null); + } + + @Override + public Publisher explain(final ExplainVerbosity verbosity) { + notNull("verbosity", verbosity); + + return publishExplain(Document.class, verbosity); + } + + @Override + public Publisher explain(final Class explainResultClass) { + notNull("explainResultClass", explainResultClass); + return publishExplain(explainResultClass, null); + } + + @Override + public Publisher explain(final Class explainResultClass, final ExplainVerbosity verbosity) { + notNull("verbosity", verbosity); + notNull("explainResultClass", explainResultClass); + return publishExplain(explainResultClass, verbosity); + } + + private Publisher publishExplain(final Class explainResultClass, @Nullable final ExplainVerbosity verbosity) { + return getMongoOperationPublisher().createReadOperationMono( + (operations -> operations.createTimeoutSettings(maxTimeMS)), + () -> asReadOperation(1).asExplainableOperation(verbosity, + getCodecRegistry().get(explainResultClass)), getClientSession()); + } + + @Override + ReadOperationExplainable asReadOperation(final int initialBatchSize) { + return getOperations().listSearchIndexes(getDocumentClass(), indexName, initialBatchSize, collation, comment, allowDiskUse); + } + + @Override + Function, TimeoutSettings> getTimeoutSettings() { + return (operations -> operations.createTimeoutSettings(maxTimeMS)); + } + +} diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/MapReducePublisherImpl.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/MapReducePublisherImpl.java new file mode 100644 index 00000000000..27e69762a09 --- /dev/null +++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/MapReducePublisherImpl.java @@ -0,0 +1,275 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.reactivestreams.client.internal; + +import com.mongodb.MongoNamespace; +import com.mongodb.ReadPreference; +import com.mongodb.client.cursor.TimeoutMode; +import com.mongodb.client.model.Collation; +import com.mongodb.internal.TimeoutSettings; +import com.mongodb.internal.async.AsyncBatchCursor; +import com.mongodb.internal.async.SingleResultCallback; +import com.mongodb.internal.binding.AsyncReadBinding; +import com.mongodb.internal.binding.AsyncWriteBinding; +import com.mongodb.internal.binding.WriteBinding; +import com.mongodb.internal.client.model.FindOptions; +import com.mongodb.internal.operation.MapReduceAsyncBatchCursor; +import com.mongodb.internal.operation.MapReduceBatchCursor; +import com.mongodb.internal.operation.MapReduceStatistics; +import com.mongodb.internal.operation.Operations; +import com.mongodb.internal.operation.ReadOperation; +import com.mongodb.internal.operation.ReadOperationCursor; +import com.mongodb.internal.operation.WriteOperation; +import com.mongodb.lang.Nullable; +import com.mongodb.reactivestreams.client.ClientSession; +import org.bson.BsonDocument; +import org.bson.conversions.Bson; +import org.reactivestreams.Publisher; + +import java.util.concurrent.TimeUnit; +import java.util.function.Function; + +import static com.mongodb.ReadPreference.primary; +import static com.mongodb.assertions.Assertions.notNull; + +@SuppressWarnings("deprecation") +final class MapReducePublisherImpl extends BatchCursorPublisher implements com.mongodb.reactivestreams.client.MapReducePublisher { + + private final String mapFunction; + private final String reduceFunction; + + private boolean inline = true; + private String collectionName; + private String finalizeFunction; + private Bson scope; + private Bson filter; + private Bson sort; + private int limit; + private boolean jsMode; + private boolean verbose = true; + private long maxTimeMS; + private com.mongodb.client.model.MapReduceAction action = com.mongodb.client.model.MapReduceAction.REPLACE; + private String databaseName; + private Boolean bypassDocumentValidation; + private Collation collation; + + MapReducePublisherImpl( + @Nullable final ClientSession clientSession, + final MongoOperationPublisher mongoOperationPublisher, + final String mapFunction, + final String reduceFunction) { + super(clientSession, mongoOperationPublisher); + this.mapFunction = notNull("mapFunction", mapFunction); + this.reduceFunction = notNull("reduceFunction", reduceFunction); + } + + @Override + public com.mongodb.reactivestreams.client.MapReducePublisher collectionName(final String collectionName) { + this.collectionName = notNull("collectionName", collectionName); + this.inline = false; + return this; + } + + @Override + public com.mongodb.reactivestreams.client.MapReducePublisher finalizeFunction(@Nullable final String finalizeFunction) { + this.finalizeFunction = finalizeFunction; + return this; + } + + @Override + public com.mongodb.reactivestreams.client.MapReducePublisher scope(@Nullable final Bson scope) { + this.scope = scope; + return this; + } + + @Override + public com.mongodb.reactivestreams.client.MapReducePublisher sort(@Nullable final Bson sort) { + this.sort = sort; + return this; + } + + @Override + public com.mongodb.reactivestreams.client.MapReducePublisher filter(@Nullable final Bson filter) { + this.filter = filter; + return this; + } + + @Override + public com.mongodb.reactivestreams.client.MapReducePublisher limit(final int limit) { + this.limit = limit; + return this; + } + + @Override + public com.mongodb.reactivestreams.client.MapReducePublisher jsMode(final boolean jsMode) { + this.jsMode = jsMode; + return this; + } + + @Override + public com.mongodb.reactivestreams.client.MapReducePublisher verbose(final boolean verbose) { + this.verbose = verbose; + return this; + } + + @Override + public com.mongodb.reactivestreams.client.MapReducePublisher maxTime(final long maxTime, final TimeUnit timeUnit) { + notNull("timeUnit", timeUnit); + this.maxTimeMS = TimeUnit.MILLISECONDS.convert(maxTime, timeUnit); + return this; + } + + @Override + public com.mongodb.reactivestreams.client.MapReducePublisher action(final com.mongodb.client.model.MapReduceAction action) { + this.action = action; + return this; + } + + @Override + public com.mongodb.reactivestreams.client.MapReducePublisher databaseName(@Nullable final String databaseName) { + this.databaseName = databaseName; + return this; + } + + @Override + public com.mongodb.reactivestreams.client.MapReducePublisher batchSize(final int batchSize) { + super.batchSize(batchSize); + return this; + } + + @Override + public com.mongodb.reactivestreams.client.MapReducePublisher bypassDocumentValidation( + @Nullable final Boolean bypassDocumentValidation) { + this.bypassDocumentValidation = bypassDocumentValidation; + return this; + } + + @Override + public com.mongodb.reactivestreams.client.MapReducePublisher timeoutMode(final TimeoutMode timeoutMode) { + super.timeoutMode(timeoutMode); + return this; + } + + @Override + public Publisher toCollection() { + if (inline) { + throw new IllegalStateException("The options must specify a non-inline result"); + } + return getMongoOperationPublisher().createWriteOperationMono( + (operations -> operations.createTimeoutSettings(maxTimeMS)), + this::createMapReduceToCollectionOperation, + getClientSession()); + } + + @Override + public com.mongodb.reactivestreams.client.MapReducePublisher collation(@Nullable final Collation collation) { + this.collation = collation; + return this; + } + + @Override + ReadPreference getReadPreference() { + if (inline) { + return super.getReadPreference(); + } else { + return primary(); + } + } + + @Override + Function, TimeoutSettings> getTimeoutSettings() { + return (operations -> operations.createTimeoutSettings(maxTimeMS)); + } + + @Override + public ReadOperationCursor asReadOperation(final int initialBatchSize) { + if (inline) { + // initialBatchSize is ignored for map reduce operations. + return createMapReduceInlineOperation(); + } else { + return new VoidWriteOperationThenCursorReadOperation<>(createMapReduceToCollectionOperation(), + createFindOperation(initialBatchSize)); + } + } + + private WrappedMapReduceReadOperation createMapReduceInlineOperation() { + return new WrappedMapReduceReadOperation<>(getOperations().mapReduce(mapFunction, reduceFunction, finalizeFunction, + getDocumentClass(), filter, limit, jsMode, scope, sort, verbose, collation)); + } + + private WrappedMapReduceWriteOperation createMapReduceToCollectionOperation() { + return new WrappedMapReduceWriteOperation( + getOperations().mapReduceToCollection(databaseName, collectionName, mapFunction, reduceFunction, finalizeFunction, filter, + limit, jsMode, scope, sort, verbose, action, bypassDocumentValidation, collation)); + } + + private ReadOperationCursor createFindOperation(final int initialBatchSize) { + String dbName = databaseName != null ? databaseName : getNamespace().getDatabaseName(); + FindOptions findOptions = new FindOptions().collation(collation).batchSize(initialBatchSize); + return getOperations().find(new MongoNamespace(dbName, collectionName), new BsonDocument(), getDocumentClass(), findOptions); + } + + // this could be inlined, but giving it a name so that it's unit-testable + static class WrappedMapReduceReadOperation implements ReadOperationCursorAsyncOnly { + private final ReadOperation, MapReduceAsyncBatchCursor> operation; + + WrappedMapReduceReadOperation(final ReadOperation, MapReduceAsyncBatchCursor> operation) { + this.operation = operation; + } + + ReadOperation, MapReduceAsyncBatchCursor> getOperation() { + return operation; + } + + @Override + public String getCommandName() { + return operation.getCommandName(); + } + + @Override + public void executeAsync(final AsyncReadBinding binding, final SingleResultCallback> callback) { + operation.executeAsync(binding, callback::onResult); + } + } + + static class WrappedMapReduceWriteOperation implements WriteOperation { + private final WriteOperation operation; + + WrappedMapReduceWriteOperation(final WriteOperation operation) { + this.operation = operation; + } + + WriteOperation getOperation() { + return operation; + } + + @Override + public String getCommandName() { + return operation.getCommandName(); + } + + @Override + public Void execute(final WriteBinding binding) { + throw new UnsupportedOperationException("This operation is async only"); + } + + @Override + public void executeAsync(final AsyncWriteBinding binding, final SingleResultCallback callback) { + operation.executeAsync(binding, (result, t) -> callback.onResult(null, t)); + } + } +} diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/MongoClientImpl.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/MongoClientImpl.java new file mode 100644 index 00000000000..07a17badcd7 --- /dev/null +++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/MongoClientImpl.java @@ -0,0 +1,336 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.reactivestreams.client.internal; + +import com.mongodb.AutoEncryptionSettings; +import com.mongodb.ClientSessionOptions; +import com.mongodb.ContextProvider; +import com.mongodb.MongoClientSettings; +import com.mongodb.MongoDriverInformation; +import com.mongodb.ReadConcern; +import com.mongodb.ReadPreference; +import com.mongodb.WriteConcern; +import com.mongodb.client.model.bulk.ClientBulkWriteOptions; +import com.mongodb.client.model.bulk.ClientBulkWriteResult; +import com.mongodb.client.model.bulk.ClientNamespacedWriteModel; +import com.mongodb.connection.ClusterDescription; +import com.mongodb.internal.TimeoutSettings; +import com.mongodb.internal.connection.ClientMetadata; +import com.mongodb.internal.connection.Cluster; +import com.mongodb.internal.diagnostics.logging.Logger; +import com.mongodb.internal.diagnostics.logging.Loggers; +import com.mongodb.internal.session.ServerSessionPool; +import com.mongodb.lang.Nullable; +import com.mongodb.reactivestreams.client.ChangeStreamPublisher; +import com.mongodb.reactivestreams.client.ClientSession; +import com.mongodb.reactivestreams.client.ListDatabasesPublisher; +import com.mongodb.reactivestreams.client.MongoClient; +import com.mongodb.reactivestreams.client.MongoCluster; +import com.mongodb.reactivestreams.client.MongoDatabase; +import com.mongodb.reactivestreams.client.ReactiveContextProvider; +import com.mongodb.reactivestreams.client.internal.crypt.Crypt; +import com.mongodb.reactivestreams.client.internal.crypt.Crypts; +import org.bson.BsonDocument; +import org.bson.Document; +import org.bson.codecs.configuration.CodecRegistry; +import org.bson.conversions.Bson; +import org.reactivestreams.Publisher; + +import java.util.List; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; + +import static com.mongodb.assertions.Assertions.notNull; +import static java.lang.String.format; +import static org.bson.codecs.configuration.CodecRegistries.withUuidRepresentation; + + +/** + * The internal MongoClient implementation. + * + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public final class MongoClientImpl implements MongoClient { + + private static final Logger LOGGER = Loggers.getLogger("client"); + private final MongoClientSettings settings; + private final AutoCloseable externalResourceCloser; + + private final MongoClusterImpl delegate; + private final AtomicBoolean closed; + + public MongoClientImpl(final MongoClientSettings settings, final MongoDriverInformation mongoDriverInformation, final Cluster cluster, + @Nullable final AutoCloseable externalResourceCloser) { + this(settings, mongoDriverInformation, cluster, null, externalResourceCloser); + } + + public MongoClientImpl(final MongoClientSettings settings, final MongoDriverInformation mongoDriverInformation, final Cluster cluster, + @Nullable final OperationExecutor executor) { + this(settings, mongoDriverInformation, cluster, executor, null); + } + + private MongoClientImpl(final MongoClientSettings settings, final MongoDriverInformation mongoDriverInformation, final Cluster cluster, + @Nullable final OperationExecutor executor, @Nullable final AutoCloseable externalResourceCloser) { + notNull("settings", settings); + notNull("cluster", cluster); + + TimeoutSettings timeoutSettings = TimeoutSettings.create(settings); + ServerSessionPool serverSessionPool = new ServerSessionPool(cluster, timeoutSettings, settings.getServerApi()); + ClientSessionHelper clientSessionHelper = new ClientSessionHelper(this, serverSessionPool); + + AutoEncryptionSettings autoEncryptSettings = settings.getAutoEncryptionSettings(); + Crypt crypt = autoEncryptSettings != null ? Crypts.createCrypt(settings, autoEncryptSettings) : null; + ContextProvider contextProvider = settings.getContextProvider(); + if (contextProvider != null && !(contextProvider instanceof ReactiveContextProvider)) { + throw new IllegalArgumentException("The contextProvider must be an instance of " + + ReactiveContextProvider.class.getName() + " when using the Reactive Streams driver"); + } + OperationExecutor operationExecutor = executor != null ? executor + : new OperationExecutorImpl(this, clientSessionHelper, timeoutSettings, (ReactiveContextProvider) contextProvider); + MongoOperationPublisher mongoOperationPublisher = new MongoOperationPublisher<>(Document.class, + withUuidRepresentation(settings.getCodecRegistry(), + settings.getUuidRepresentation()), + settings.getReadPreference(), + settings.getReadConcern(), settings.getWriteConcern(), + settings.getRetryWrites(), settings.getRetryReads(), + settings.getUuidRepresentation(), + settings.getAutoEncryptionSettings(), + timeoutSettings, + operationExecutor); + + this.delegate = new MongoClusterImpl(cluster, crypt, operationExecutor, serverSessionPool, clientSessionHelper, + mongoOperationPublisher); + this.externalResourceCloser = externalResourceCloser; + this.settings = settings; + this.closed = new AtomicBoolean(); + + BsonDocument clientMetadataDocument = delegate.getCluster().getClientMetadata().getBsonDocument(); + LOGGER.info(format("MongoClient with metadata %s created with settings %s", clientMetadataDocument.toJson(), settings)); + } + + Cluster getCluster() { + return delegate.getCluster(); + } + + public ServerSessionPool getServerSessionPool() { + return delegate.getServerSessionPool(); + } + + MongoOperationPublisher getMongoOperationPublisher() { + return delegate.getMongoOperationPublisher(); + } + + @Nullable + Crypt getCrypt() { + return delegate.getCrypt(); + } + + public MongoClientSettings getSettings() { + return settings; + } + + @Override + public void close() { + if (!closed.getAndSet(true)) { + Crypt crypt = getCrypt(); + if (crypt != null) { + crypt.close(); + } + getServerSessionPool().close(); + getCluster().close(); + if (externalResourceCloser != null) { + try { + externalResourceCloser.close(); + } catch (Exception e) { + LOGGER.warn("Exception closing resource", e); + } + } + } + } + + @Override + public Publisher listDatabaseNames() { + return delegate.listDatabaseNames(); + } + + @Override + public Publisher listDatabaseNames(final ClientSession clientSession) { + return delegate.listDatabaseNames(clientSession); + } + + @Override + public ListDatabasesPublisher listDatabases() { + return delegate.listDatabases(); + } + + @Override + public ListDatabasesPublisher listDatabases(final Class clazz) { + return delegate.listDatabases(clazz); + } + + @Override + public ListDatabasesPublisher listDatabases(final ClientSession clientSession) { + return delegate.listDatabases(clientSession); + } + + @Override + public ListDatabasesPublisher listDatabases(final ClientSession clientSession, final Class clazz) { + return delegate.listDatabases(clientSession, clazz); + } + + @Override + public ChangeStreamPublisher watch() { + return delegate.watch(); + } + + @Override + public ChangeStreamPublisher watch(final Class resultClass) { + return delegate.watch(resultClass); + } + + @Override + public ChangeStreamPublisher watch(final List pipeline) { + return delegate.watch(pipeline); + } + + @Override + public ChangeStreamPublisher watch(final List pipeline, final Class resultClass) { + return delegate.watch(pipeline, resultClass); + } + + @Override + public ChangeStreamPublisher watch(final ClientSession clientSession) { + return delegate.watch(clientSession); + } + + @Override + public ChangeStreamPublisher watch(final ClientSession clientSession, final Class resultClass) { + return delegate.watch(clientSession, resultClass); + } + + @Override + public ChangeStreamPublisher watch(final ClientSession clientSession, final List pipeline) { + return delegate.watch(clientSession, pipeline); + } + + @Override + public ChangeStreamPublisher watch( + final ClientSession clientSession, final List pipeline, final Class resultClass) { + return delegate.watch(clientSession, pipeline, resultClass); + } + + @Override + public Publisher bulkWrite(final List models) { + return delegate.bulkWrite(models); + } + + @Override + public Publisher bulkWrite(final List models, + final ClientBulkWriteOptions options) { + return delegate.bulkWrite(models, options); + } + + @Override + public Publisher bulkWrite(final ClientSession clientSession, + final List models) { + return delegate.bulkWrite(clientSession, models); + } + + @Override + public Publisher bulkWrite(final ClientSession clientSession, + final List models, + final ClientBulkWriteOptions options) { + return delegate.bulkWrite(clientSession, models, options); + } + + @Override + public Publisher startSession() { + return delegate.startSession(); + } + + @Override + public Publisher startSession(final ClientSessionOptions options) { + return delegate.startSession(options); + } + + @Override + public CodecRegistry getCodecRegistry() { + return delegate.getCodecRegistry(); + } + + @Override + public ReadPreference getReadPreference() { + return delegate.getReadPreference(); + } + + @Override + public WriteConcern getWriteConcern() { + return delegate.getWriteConcern(); + } + + @Override + public ReadConcern getReadConcern() { + return delegate.getReadConcern(); + } + + @Override + public Long getTimeout(final TimeUnit timeUnit) { + return null; + } + + @Override + public MongoCluster withCodecRegistry(final CodecRegistry codecRegistry) { + return delegate.withCodecRegistry(codecRegistry); + } + + @Override + public MongoCluster withReadPreference(final ReadPreference readPreference) { + return delegate.withReadPreference(readPreference); + } + + @Override + public MongoCluster withWriteConcern(final WriteConcern writeConcern) { + return delegate.withWriteConcern(writeConcern); + } + + @Override + public MongoCluster withReadConcern(final ReadConcern readConcern) { + return delegate.withReadConcern(readConcern); + } + + @Override + public MongoCluster withTimeout(final long timeout, final TimeUnit timeUnit) { + return delegate.withTimeout(timeout, timeUnit); + } + + @Override + public MongoDatabase getDatabase(final String name) { + return delegate.getDatabase(name); + } + + @Override + public ClusterDescription getClusterDescription() { + return getCluster().getCurrentDescription(); + } + + @Override + public void appendMetadata(final MongoDriverInformation mongoDriverInformation) { + ClientMetadata clientMetadata = getCluster().getClientMetadata(); + clientMetadata.append(mongoDriverInformation); + LOGGER.info(format("MongoClient metadata has been updated to %s", clientMetadata.getBsonDocument())); + } +} diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/MongoClusterImpl.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/MongoClusterImpl.java new file mode 100644 index 00000000000..04028ecc684 --- /dev/null +++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/MongoClusterImpl.java @@ -0,0 +1,280 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.reactivestreams.client.internal; + +import com.mongodb.ClientSessionOptions; +import com.mongodb.ReadConcern; +import com.mongodb.ReadPreference; +import com.mongodb.WriteConcern; +import com.mongodb.client.model.bulk.ClientBulkWriteOptions; +import com.mongodb.client.model.bulk.ClientBulkWriteResult; +import com.mongodb.client.model.bulk.ClientNamespacedWriteModel; +import com.mongodb.internal.TimeoutSettings; +import com.mongodb.internal.client.model.changestream.ChangeStreamLevel; +import com.mongodb.internal.connection.Cluster; +import com.mongodb.internal.session.ServerSessionPool; +import com.mongodb.lang.Nullable; +import com.mongodb.reactivestreams.client.ChangeStreamPublisher; +import com.mongodb.reactivestreams.client.ClientSession; +import com.mongodb.reactivestreams.client.ListDatabasesPublisher; +import com.mongodb.reactivestreams.client.MongoCluster; +import com.mongodb.reactivestreams.client.MongoDatabase; +import com.mongodb.reactivestreams.client.internal.crypt.Crypt; +import org.bson.Document; +import org.bson.codecs.configuration.CodecRegistry; +import org.bson.conversions.Bson; +import org.reactivestreams.Publisher; +import reactor.core.publisher.Flux; +import reactor.core.publisher.Mono; + +import java.util.Collections; +import java.util.List; +import java.util.concurrent.TimeUnit; + +import static com.mongodb.assertions.Assertions.isTrueArgument; +import static com.mongodb.assertions.Assertions.notNull; +import static java.util.concurrent.TimeUnit.MILLISECONDS; + +final class MongoClusterImpl implements MongoCluster { + + private final Cluster cluster; + private final Crypt crypt; + private final OperationExecutor operationExecutor; + private final ServerSessionPool serverSessionPool; + private final ClientSessionHelper clientSessionHelper; + private final MongoOperationPublisher mongoOperationPublisher; + + MongoClusterImpl(final Cluster cluster, @Nullable final Crypt crypt, final OperationExecutor operationExecutor, + final ServerSessionPool serverSessionPool, final ClientSessionHelper clientSessionHelper, + final MongoOperationPublisher mongoOperationPublisher) { + + this.cluster = cluster; + this.crypt = crypt; + this.operationExecutor = operationExecutor; + this.serverSessionPool = serverSessionPool; + this.clientSessionHelper = clientSessionHelper; + this.mongoOperationPublisher = mongoOperationPublisher; + } + + @Override + public CodecRegistry getCodecRegistry() { + return mongoOperationPublisher.getCodecRegistry(); + } + + @Override + public ReadPreference getReadPreference() { + return mongoOperationPublisher.getReadPreference(); + } + + @Override + public WriteConcern getWriteConcern() { + return mongoOperationPublisher.getWriteConcern(); + } + + @Override + public ReadConcern getReadConcern() { + return mongoOperationPublisher.getReadConcern(); + } + + @Override + public Long getTimeout(final TimeUnit timeUnit) { + Long timeoutMS = mongoOperationPublisher.getTimeoutMS(); + return timeoutMS != null ? MILLISECONDS.convert(timeoutMS, timeUnit) : null; + } + + @Override + public MongoCluster withCodecRegistry(final CodecRegistry codecRegistry) { + return new MongoClusterImpl(cluster, crypt, operationExecutor, serverSessionPool, clientSessionHelper, + mongoOperationPublisher.withCodecRegistry(codecRegistry)); + } + + @Override + public MongoCluster withReadPreference(final ReadPreference readPreference) { + return new MongoClusterImpl(cluster, crypt, operationExecutor, serverSessionPool, clientSessionHelper, + mongoOperationPublisher.withReadPreference(readPreference)); + } + + @Override + public MongoCluster withWriteConcern(final WriteConcern writeConcern) { + return new MongoClusterImpl(cluster, crypt, operationExecutor, serverSessionPool, clientSessionHelper, + mongoOperationPublisher.withWriteConcern(writeConcern)); + } + + @Override + public MongoCluster withReadConcern(final ReadConcern readConcern) { + return new MongoClusterImpl(cluster, crypt, operationExecutor, serverSessionPool, clientSessionHelper, + mongoOperationPublisher.withReadConcern(readConcern)); + } + + @Override + public MongoCluster withTimeout(final long timeout, final TimeUnit timeUnit) { + return new MongoClusterImpl(cluster, crypt, operationExecutor, serverSessionPool, clientSessionHelper, + mongoOperationPublisher.withTimeout(timeout, timeUnit)); + } + + public Cluster getCluster() { + return cluster; + } + + @Nullable + public Crypt getCrypt() { + return crypt; + } + + public ClientSessionHelper getClientSessionHelper() { + return clientSessionHelper; + } + + public ServerSessionPool getServerSessionPool() { + return serverSessionPool; + } + + public MongoOperationPublisher getMongoOperationPublisher() { + return mongoOperationPublisher; + } + + public TimeoutSettings getTimeoutSettings() { + return mongoOperationPublisher.getTimeoutSettings(); + } + + @Override + public Publisher startSession() { + return startSession(ClientSessionOptions.builder().build()); + } + + @Override + public Publisher startSession(final ClientSessionOptions options) { + notNull("options", options); + return Mono.fromCallable(() -> clientSessionHelper.createClientSession(options, operationExecutor)); + } + + + @Override + public MongoDatabase getDatabase(final String name) { + return new MongoDatabaseImpl(mongoOperationPublisher.withDatabase(name)); + } + + @Override + public Publisher listDatabaseNames() { + return Flux.from(listDatabases().nameOnly(true)).map(d -> d.getString("name")); + } + + @Override + public Publisher listDatabaseNames(final ClientSession clientSession) { + return Flux.from(listDatabases(clientSession).nameOnly(true)).map(d -> d.getString("name")); + } + + @Override + public ListDatabasesPublisher listDatabases() { + return listDatabases(Document.class); + } + + @Override + public ListDatabasesPublisher listDatabases(final Class clazz) { + return new ListDatabasesPublisherImpl<>(null, mongoOperationPublisher.withDocumentClass(clazz)); + } + + @Override + public ListDatabasesPublisher listDatabases(final ClientSession clientSession) { + return listDatabases(clientSession, Document.class); + } + + @Override + public ListDatabasesPublisher listDatabases(final ClientSession clientSession, final Class clazz) { + return new ListDatabasesPublisherImpl<>(notNull("clientSession", clientSession), mongoOperationPublisher.withDocumentClass(clazz)); + } + + @Override + public ChangeStreamPublisher watch() { + return watch(Collections.emptyList()); + } + + @Override + public ChangeStreamPublisher watch(final Class resultClass) { + return watch(Collections.emptyList(), resultClass); + } + + @Override + public ChangeStreamPublisher watch(final List pipeline) { + return watch(pipeline, Document.class); + } + + @Override + public ChangeStreamPublisher watch(final List pipeline, final Class resultClass) { + return new ChangeStreamPublisherImpl<>(null, mongoOperationPublisher.withDatabase("admin"), + resultClass, pipeline, ChangeStreamLevel.CLIENT); + } + + @Override + public ChangeStreamPublisher watch(final ClientSession clientSession) { + return watch(clientSession, Collections.emptyList(), Document.class); + } + + @Override + public ChangeStreamPublisher watch(final ClientSession clientSession, final Class resultClass) { + return watch(clientSession, Collections.emptyList(), resultClass); + } + + @Override + public ChangeStreamPublisher watch(final ClientSession clientSession, final List pipeline) { + return watch(clientSession, pipeline, Document.class); + } + + @Override + public ChangeStreamPublisher watch(final ClientSession clientSession, final List pipeline, + final Class resultClass) { + return new ChangeStreamPublisherImpl<>(notNull("clientSession", clientSession), mongoOperationPublisher.withDatabase("admin"), + resultClass, pipeline, ChangeStreamLevel.CLIENT); + } + + @Override + public Publisher bulkWrite(final List clientWriteModels) { + notNull("clientWriteModels", clientWriteModels); + isTrueArgument("`clientWriteModels` must not be empty", !clientWriteModels.isEmpty()); + return mongoOperationPublisher.clientBulkWrite(null, clientWriteModels, null); + } + + @Override + public Publisher bulkWrite(final List clientWriteModels, + final ClientBulkWriteOptions options) { + notNull("clientWriteModels", clientWriteModels); + isTrueArgument("`clientWriteModels` must not be empty", !clientWriteModels.isEmpty()); + notNull("options", options); + return mongoOperationPublisher.clientBulkWrite(null, clientWriteModels, options); + } + + @Override + public Publisher bulkWrite(final ClientSession clientSession, + final List clientWriteModels) { + notNull("clientSession", clientSession); + notNull("clientWriteModels", clientWriteModels); + isTrueArgument("`clientWriteModels` must not be empty", !clientWriteModels.isEmpty()); + return mongoOperationPublisher.clientBulkWrite(clientSession, clientWriteModels, null); + } + + @Override + public Publisher bulkWrite(final ClientSession clientSession, + final List clientWriteModels, + final ClientBulkWriteOptions options) { + notNull("clientSession", clientSession); + notNull("clientWriteModels", clientWriteModels); + isTrueArgument("`clientWriteModels` must not be empty", !clientWriteModels.isEmpty()); + notNull("options", options); + return mongoOperationPublisher.clientBulkWrite(clientSession, clientWriteModels, options); + } + +} diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/MongoCollectionImpl.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/MongoCollectionImpl.java new file mode 100644 index 00000000000..0ac3d6a2e39 --- /dev/null +++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/MongoCollectionImpl.java @@ -0,0 +1,859 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.reactivestreams.client.internal; + +import com.mongodb.MongoNamespace; +import com.mongodb.ReadConcern; +import com.mongodb.ReadPreference; +import com.mongodb.WriteConcern; +import com.mongodb.bulk.BulkWriteResult; +import com.mongodb.client.model.BulkWriteOptions; +import com.mongodb.client.model.CountOptions; +import com.mongodb.client.model.CreateIndexOptions; +import com.mongodb.client.model.DeleteOptions; +import com.mongodb.client.model.DropCollectionOptions; +import com.mongodb.client.model.DropIndexOptions; +import com.mongodb.client.model.EstimatedDocumentCountOptions; +import com.mongodb.client.model.FindOneAndDeleteOptions; +import com.mongodb.client.model.FindOneAndReplaceOptions; +import com.mongodb.client.model.FindOneAndUpdateOptions; +import com.mongodb.client.model.IndexModel; +import com.mongodb.client.model.IndexOptions; +import com.mongodb.client.model.InsertManyOptions; +import com.mongodb.client.model.InsertOneOptions; +import com.mongodb.client.model.RenameCollectionOptions; +import com.mongodb.client.model.ReplaceOptions; +import com.mongodb.client.model.SearchIndexModel; +import com.mongodb.client.model.UpdateOptions; +import com.mongodb.client.model.WriteModel; +import com.mongodb.client.result.DeleteResult; +import com.mongodb.client.result.InsertManyResult; +import com.mongodb.client.result.InsertOneResult; +import com.mongodb.client.result.UpdateResult; +import com.mongodb.internal.client.model.AggregationLevel; +import com.mongodb.internal.client.model.changestream.ChangeStreamLevel; +import com.mongodb.reactivestreams.client.AggregatePublisher; +import com.mongodb.reactivestreams.client.ChangeStreamPublisher; +import com.mongodb.reactivestreams.client.ClientSession; +import com.mongodb.reactivestreams.client.DistinctPublisher; +import com.mongodb.reactivestreams.client.FindPublisher; +import com.mongodb.reactivestreams.client.ListIndexesPublisher; +import com.mongodb.reactivestreams.client.ListSearchIndexesPublisher; +import com.mongodb.reactivestreams.client.MongoCollection; +import org.bson.BsonDocument; +import org.bson.Document; +import org.bson.codecs.configuration.CodecRegistry; +import org.bson.conversions.Bson; +import org.reactivestreams.Publisher; + +import java.util.Collections; +import java.util.List; +import java.util.concurrent.TimeUnit; + +import static com.mongodb.assertions.Assertions.assertNotNull; +import static com.mongodb.assertions.Assertions.notNull; +import static com.mongodb.assertions.Assertions.notNullElements; + + +final class MongoCollectionImpl implements MongoCollection { + private final MongoOperationPublisher mongoOperationPublisher; + + MongoCollectionImpl(final MongoOperationPublisher mongoOperationPublisher) { + this.mongoOperationPublisher = notNull("mongoOperationPublisher", mongoOperationPublisher); + } + + @Override + public MongoNamespace getNamespace() { + return assertNotNull(mongoOperationPublisher.getNamespace()); + } + + @Override + public Class getDocumentClass() { + return mongoOperationPublisher.getDocumentClass(); + } + + @Override + public CodecRegistry getCodecRegistry() { + return mongoOperationPublisher.getCodecRegistry(); + } + + @Override + public ReadPreference getReadPreference() { + return mongoOperationPublisher.getReadPreference(); + } + + @Override + public WriteConcern getWriteConcern() { + return mongoOperationPublisher.getWriteConcern(); + } + + @Override + public ReadConcern getReadConcern() { + return mongoOperationPublisher.getReadConcern(); + } + + @Override + public Long getTimeout(final TimeUnit timeUnit) { + Long timeoutMS = mongoOperationPublisher.getTimeoutMS(); + return (timeoutMS != null) ? notNull("timeUnit", timeUnit).convert(timeoutMS, TimeUnit.MILLISECONDS) : null; + } + + MongoOperationPublisher getPublisherHelper() { + return mongoOperationPublisher; + } + + @Override + public MongoCollection withDocumentClass(final Class newDocumentClass) { + return new MongoCollectionImpl<>(mongoOperationPublisher.withDocumentClass(newDocumentClass)); + } + + @Override + public MongoCollection withCodecRegistry(final CodecRegistry codecRegistry) { + return new MongoCollectionImpl<>(mongoOperationPublisher.withCodecRegistry(codecRegistry)); + } + + @Override + public MongoCollection withReadPreference(final ReadPreference readPreference) { + return new MongoCollectionImpl<>(mongoOperationPublisher.withReadPreference(readPreference)); + } + + @Override + public MongoCollection withWriteConcern(final WriteConcern writeConcern) { + return new MongoCollectionImpl<>(mongoOperationPublisher.withWriteConcern(writeConcern)); + } + + @Override + public MongoCollection withReadConcern(final ReadConcern readConcern) { + return new MongoCollectionImpl<>(mongoOperationPublisher.withReadConcern(readConcern)); + } + + @Override + public MongoCollection withTimeout(final long timeout, final TimeUnit timeUnit) { + return new MongoCollectionImpl<>(mongoOperationPublisher.withTimeout(timeout, timeUnit)); + } + + @Override + public Publisher estimatedDocumentCount() { + return estimatedDocumentCount(new EstimatedDocumentCountOptions()); + } + + @Override + public Publisher estimatedDocumentCount(final EstimatedDocumentCountOptions options) { + return mongoOperationPublisher.estimatedDocumentCount(options); + } + + @Override + public Publisher countDocuments() { + return countDocuments(new BsonDocument()); + } + + @Override + public Publisher countDocuments(final Bson filter) { + return countDocuments(filter, new CountOptions()); + } + + @Override + public Publisher countDocuments(final Bson filter, final CountOptions options) { + return mongoOperationPublisher.countDocuments(null, filter, options); + } + + @Override + public Publisher countDocuments(final ClientSession clientSession) { + return countDocuments(clientSession, new BsonDocument()); + } + + @Override + public Publisher countDocuments(final ClientSession clientSession, final Bson filter) { + return countDocuments(clientSession, filter, new CountOptions()); + } + + @Override + public Publisher countDocuments(final ClientSession clientSession, final Bson filter, final CountOptions options) { + return mongoOperationPublisher.countDocuments(notNull("clientSession", clientSession), filter, options); + } + + @Override + public DistinctPublisher distinct(final String fieldName, final Class resultClass) { + return distinct(fieldName, new BsonDocument(), resultClass); + } + + @Override + public DistinctPublisher distinct(final String fieldName, final Bson filter, final Class resultClass) { + return new DistinctPublisherImpl<>(null, mongoOperationPublisher.withDocumentClass(resultClass), fieldName, filter); + } + + @Override + public DistinctPublisher distinct(final ClientSession clientSession, final String fieldName, + final Class resultClass) { + return distinct(clientSession, fieldName, new BsonDocument(), resultClass); + } + + @Override + public DistinctPublisher distinct(final ClientSession clientSession, final String fieldName, final Bson filter, + final Class resultClass) { + return new DistinctPublisherImpl<>(notNull("clientSession", clientSession), + mongoOperationPublisher.withDocumentClass(resultClass), fieldName, filter); + } + + @Override + public FindPublisher find() { + return find(new BsonDocument(), getDocumentClass()); + } + + @Override + public FindPublisher find(final Class resultClass) { + return find(new BsonDocument(), resultClass); + } + + @Override + public FindPublisher find(final Bson filter) { + return find(filter, getDocumentClass()); + } + + @Override + public FindPublisher find(final Bson filter, final Class resultClass) { + return new FindPublisherImpl<>(null, mongoOperationPublisher.withDocumentClass(resultClass), filter); + } + + @Override + public FindPublisher find(final ClientSession clientSession) { + return find(clientSession, new BsonDocument(), getDocumentClass()); + } + + @Override + public FindPublisher find(final ClientSession clientSession, final Class resultClass) { + return find(clientSession, new BsonDocument(), resultClass); + } + + @Override + public FindPublisher find(final ClientSession clientSession, final Bson filter) { + return find(clientSession, filter, getDocumentClass()); + } + + @Override + public FindPublisher find(final ClientSession clientSession, final Bson filter, final Class resultClass) { + return new FindPublisherImpl<>(notNull("clientSession", clientSession), + mongoOperationPublisher.withDocumentClass(resultClass), filter); + } + + @Override + public AggregatePublisher aggregate(final List pipeline) { + return aggregate(pipeline, getDocumentClass()); + } + + @Override + public AggregatePublisher aggregate(final List pipeline, final Class resultClass) { + return new AggregatePublisherImpl<>(null, mongoOperationPublisher.withDocumentClass(resultClass), pipeline, + AggregationLevel.COLLECTION); + } + + @Override + public AggregatePublisher aggregate(final ClientSession clientSession, final List pipeline) { + return aggregate(clientSession, pipeline, getDocumentClass()); + } + + @Override + public AggregatePublisher aggregate(final ClientSession clientSession, final List pipeline, + final Class resultClass) { + return new AggregatePublisherImpl<>(notNull("clientSession", clientSession), + mongoOperationPublisher.withDocumentClass(resultClass), pipeline, AggregationLevel.COLLECTION); + } + + @Override + public ChangeStreamPublisher watch() { + return watch(Document.class); + } + + @Override + public ChangeStreamPublisher watch(final Class resultClass) { + return watch(Collections.emptyList(), resultClass); + } + + @Override + public ChangeStreamPublisher watch(final List pipeline) { + return watch(pipeline, Document.class); + } + + @Override + public ChangeStreamPublisher watch(final List pipeline, final Class resultClass) { + return new ChangeStreamPublisherImpl<>(null, mongoOperationPublisher, resultClass, pipeline, + ChangeStreamLevel.COLLECTION); + } + + @Override + public ChangeStreamPublisher watch(final ClientSession clientSession) { + return watch(clientSession, Document.class); + } + + @Override + public ChangeStreamPublisher watch(final ClientSession clientSession, final Class resultClass) { + return watch(clientSession, Collections.emptyList(), resultClass); + } + + @Override + public ChangeStreamPublisher watch(final ClientSession clientSession, final List pipeline) { + return watch(clientSession, pipeline, Document.class); + } + + @Override + public ChangeStreamPublisher watch(final ClientSession clientSession, final List pipeline, + final Class resultClass) { + return new ChangeStreamPublisherImpl<>(notNull("clientSession", clientSession), mongoOperationPublisher, resultClass, + pipeline, ChangeStreamLevel.COLLECTION); + } + + @SuppressWarnings("deprecation") + @Override + public com.mongodb.reactivestreams.client.MapReducePublisher mapReduce(final String mapFunction, final String reduceFunction) { + return mapReduce(mapFunction, reduceFunction, getDocumentClass()); + } + + @SuppressWarnings("deprecation") + @Override + public com.mongodb.reactivestreams.client.MapReducePublisher mapReduce(final String mapFunction, + final String reduceFunction, final Class resultClass) { + return new MapReducePublisherImpl<>(null, mongoOperationPublisher.withDocumentClass(resultClass), mapFunction, + reduceFunction); + } + + @SuppressWarnings("deprecation") + @Override + public com.mongodb.reactivestreams.client.MapReducePublisher mapReduce(final ClientSession clientSession, final String mapFunction, + final String reduceFunction) { + return mapReduce(clientSession, mapFunction, reduceFunction, getDocumentClass()); + } + + @SuppressWarnings("deprecation") + @Override + public com.mongodb.reactivestreams.client.MapReducePublisher mapReduce(final ClientSession clientSession, + final String mapFunction, final String reduceFunction, final Class resultClass) { + return new MapReducePublisherImpl<>(notNull("clientSession", clientSession), + mongoOperationPublisher.withDocumentClass(resultClass), mapFunction, reduceFunction); + } + + @Override + public Publisher bulkWrite(final List> requests) { + return bulkWrite(requests, new BulkWriteOptions()); + } + + @Override + public Publisher bulkWrite(final List> requests, + final BulkWriteOptions options) { + return mongoOperationPublisher.bulkWrite(null, requests, options); + } + + @Override + public Publisher bulkWrite(final ClientSession clientSession, + final List> requests) { + return bulkWrite(clientSession, requests, new BulkWriteOptions()); + } + + @Override + public Publisher bulkWrite(final ClientSession clientSession, + final List> requests, + final BulkWriteOptions options) { + return mongoOperationPublisher.bulkWrite(notNull("clientSession", clientSession), requests, options); + } + + @Override + public Publisher insertOne(final T document) { + return insertOne(document, new InsertOneOptions()); + } + + @Override + public Publisher insertOne(final T document, final InsertOneOptions options) { + return mongoOperationPublisher.insertOne(null, document, options); + } + + @Override + public Publisher insertOne(final ClientSession clientSession, final T document) { + return insertOne(clientSession, document, new InsertOneOptions()); + } + + @Override + public Publisher insertOne(final ClientSession clientSession, final T document, + final InsertOneOptions options) { + return mongoOperationPublisher.insertOne(notNull("clientSession", clientSession), document, options); + } + + @Override + public Publisher insertMany(final List documents) { + return insertMany(documents, new InsertManyOptions()); + } + + @Override + public Publisher insertMany(final List documents, final InsertManyOptions options) { + return mongoOperationPublisher.insertMany(null, documents, options); + } + + @Override + public Publisher insertMany(final ClientSession clientSession, final List documents) { + return insertMany(clientSession, documents, new InsertManyOptions()); + } + + @Override + public Publisher insertMany(final ClientSession clientSession, final List documents, + final InsertManyOptions options) { + return mongoOperationPublisher.insertMany(notNull("clientSession", clientSession), documents, options); + } + + @Override + public Publisher deleteOne(final Bson filter) { + return deleteOne(filter, new DeleteOptions()); + } + + @Override + public Publisher deleteOne(final Bson filter, final DeleteOptions options) { + return mongoOperationPublisher.deleteOne(null, filter, options); + } + + @Override + public Publisher deleteOne(final ClientSession clientSession, final Bson filter) { + return deleteOne(clientSession, filter, new DeleteOptions()); + } + + @Override + public Publisher deleteOne(final ClientSession clientSession, final Bson filter, final DeleteOptions options) { + return mongoOperationPublisher.deleteOne(notNull("clientSession", clientSession), filter, options); + } + + @Override + public Publisher deleteMany(final Bson filter) { + return deleteMany(filter, new DeleteOptions()); + } + + @Override + public Publisher deleteMany(final Bson filter, final DeleteOptions options) { + return mongoOperationPublisher.deleteMany(null, filter, options); + } + + @Override + public Publisher deleteMany(final ClientSession clientSession, final Bson filter) { + return deleteMany(clientSession, filter, new DeleteOptions()); + } + + @Override + public Publisher deleteMany(final ClientSession clientSession, final Bson filter, final DeleteOptions options) { + return mongoOperationPublisher.deleteMany(notNull("clientSession", clientSession), filter, options); + } + + @Override + public Publisher replaceOne(final Bson filter, final T replacement) { + return replaceOne(filter, replacement, new ReplaceOptions()); + } + + @Override + public Publisher replaceOne(final Bson filter, final T replacement, final ReplaceOptions options) { + return mongoOperationPublisher.replaceOne(null, filter, replacement, options); + } + + @Override + public Publisher replaceOne(final ClientSession clientSession, final Bson filter, final T replacement) { + return replaceOne(clientSession, filter, replacement, new ReplaceOptions()); + } + + @Override + public Publisher replaceOne(final ClientSession clientSession, final Bson filter, final T replacement, + final ReplaceOptions options) { + return mongoOperationPublisher.replaceOne(notNull("clientSession", clientSession), filter, replacement, options); + } + + @Override + public Publisher updateOne(final Bson filter, final Bson update) { + return updateOne(filter, update, new UpdateOptions()); + } + + @Override + public Publisher updateOne(final Bson filter, final Bson update, final UpdateOptions options) { + return mongoOperationPublisher.updateOne(null, filter, update, options); + } + + @Override + public Publisher updateOne(final ClientSession clientSession, final Bson filter, final Bson update) { + return updateOne(clientSession, filter, update, new UpdateOptions()); + } + + @Override + public Publisher updateOne(final ClientSession clientSession, final Bson filter, final Bson update, + final UpdateOptions options) { + return mongoOperationPublisher.updateOne(notNull("clientSession", clientSession), filter, update, options); + } + + @Override + public Publisher updateOne(final Bson filter, final List update) { + return updateOne(filter, update, new UpdateOptions()); + } + + @Override + public Publisher updateOne(final Bson filter, final List update, final UpdateOptions options) { + return mongoOperationPublisher.updateOne(null, filter, update, options); + } + + @Override + public Publisher updateOne(final ClientSession clientSession, final Bson filter, final List update) { + return updateOne(clientSession, filter, update, new UpdateOptions()); + } + + @Override + public Publisher updateOne(final ClientSession clientSession, final Bson filter, final List update, + final UpdateOptions options) { + return mongoOperationPublisher.updateOne(notNull("clientSession", clientSession), filter, update, options); + } + + @Override + public Publisher updateMany(final Bson filter, final Bson update) { + return updateMany(filter, update, new UpdateOptions()); + } + + @Override + public Publisher updateMany(final Bson filter, final Bson update, final UpdateOptions options) { + return mongoOperationPublisher.updateMany(null, filter, update, options); + } + + @Override + public Publisher updateMany(final ClientSession clientSession, final Bson filter, final Bson update) { + return updateMany(clientSession, filter, update, new UpdateOptions()); + } + + @Override + public Publisher updateMany(final ClientSession clientSession, final Bson filter, final Bson update, + final UpdateOptions options) { + return mongoOperationPublisher.updateMany(notNull("clientSession", clientSession), filter, update, options); + } + + @Override + public Publisher updateMany(final Bson filter, final List update) { + return updateMany(filter, update, new UpdateOptions()); + } + + @Override + public Publisher updateMany(final Bson filter, final List update, final UpdateOptions options) { + return mongoOperationPublisher.updateMany(null, filter, update, options); + } + + @Override + public Publisher updateMany(final ClientSession clientSession, final Bson filter, final List update) { + return updateMany(clientSession, filter, update, new UpdateOptions()); + } + + @Override + public Publisher updateMany(final ClientSession clientSession, final Bson filter, final List update, + final UpdateOptions options) { + return mongoOperationPublisher.updateMany(notNull("clientSession", clientSession), filter, update, options); + } + + @Override + public Publisher findOneAndDelete(final Bson filter) { + return findOneAndDelete(filter, new FindOneAndDeleteOptions()); + } + + @Override + public Publisher findOneAndDelete(final Bson filter, final FindOneAndDeleteOptions options) { + return mongoOperationPublisher.findOneAndDelete(null, filter, options); + } + + @Override + public Publisher findOneAndDelete(final ClientSession clientSession, final Bson filter) { + return findOneAndDelete(clientSession, filter, new FindOneAndDeleteOptions()); + } + + @Override + public Publisher findOneAndDelete(final ClientSession clientSession, final Bson filter, + final FindOneAndDeleteOptions options) { + return mongoOperationPublisher.findOneAndDelete(notNull("clientSession", clientSession), filter, options); + } + + @Override + public Publisher findOneAndReplace(final Bson filter, final T replacement) { + return findOneAndReplace(filter, replacement, new FindOneAndReplaceOptions()); + } + + @Override + public Publisher findOneAndReplace(final Bson filter, final T replacement, final FindOneAndReplaceOptions options) { + return mongoOperationPublisher.findOneAndReplace(null, filter, replacement, options); + } + + @Override + public Publisher findOneAndReplace(final ClientSession clientSession, final Bson filter, final T replacement) { + return findOneAndReplace(clientSession, filter, replacement, new FindOneAndReplaceOptions()); + } + + @Override + public Publisher findOneAndReplace(final ClientSession clientSession, final Bson filter, final T replacement, + final FindOneAndReplaceOptions options) { + return mongoOperationPublisher.findOneAndReplace(notNull("clientSession", clientSession), filter, replacement, options); + } + + @Override + public Publisher findOneAndUpdate(final Bson filter, final Bson update) { + return findOneAndUpdate(filter, update, new FindOneAndUpdateOptions()); + } + + @Override + public Publisher findOneAndUpdate(final Bson filter, final Bson update, final FindOneAndUpdateOptions options) { + return mongoOperationPublisher.findOneAndUpdate(null, filter, update, options); + } + + @Override + public Publisher findOneAndUpdate(final ClientSession clientSession, final Bson filter, final Bson update) { + return findOneAndUpdate(clientSession, filter, update, new FindOneAndUpdateOptions()); + } + + @Override + public Publisher findOneAndUpdate(final ClientSession clientSession, final Bson filter, final Bson update, + final FindOneAndUpdateOptions options) { + return mongoOperationPublisher.findOneAndUpdate(notNull("clientSession", clientSession), filter, update, options); + } + + @Override + public Publisher findOneAndUpdate(final Bson filter, final List update) { + return findOneAndUpdate(filter, update, new FindOneAndUpdateOptions()); + } + + @Override + public Publisher findOneAndUpdate(final Bson filter, final List update, + final FindOneAndUpdateOptions options) { + return mongoOperationPublisher.findOneAndUpdate(null, filter, update, options); + } + + @Override + public Publisher findOneAndUpdate(final ClientSession clientSession, final Bson filter, + final List update) { + return findOneAndUpdate(clientSession, filter, update, new FindOneAndUpdateOptions()); + } + + @Override + public Publisher findOneAndUpdate(final ClientSession clientSession, final Bson filter, + final List update, final FindOneAndUpdateOptions options) { + return mongoOperationPublisher.findOneAndUpdate(notNull("clientSession", clientSession), filter, update, options); + } + + @Override + public Publisher drop() { + return mongoOperationPublisher.dropCollection(null, new DropCollectionOptions()); + } + + @Override + public Publisher drop(final ClientSession clientSession) { + return mongoOperationPublisher.dropCollection(notNull("clientSession", clientSession), new DropCollectionOptions()); + } + + @Override + public Publisher drop(final DropCollectionOptions dropCollectionOptions) { + return mongoOperationPublisher.dropCollection(null, dropCollectionOptions); + } + + @Override + public Publisher drop(final ClientSession clientSession, final DropCollectionOptions dropCollectionOptions) { + return mongoOperationPublisher.dropCollection(notNull("clientSession", clientSession), dropCollectionOptions); + } + + @Override + public Publisher createSearchIndex(final String indexName, final Bson definition) { + notNull("indexName", indexName); + notNull("definition", definition); + + return mongoOperationPublisher.createSearchIndex(indexName, definition); + } + + @Override + public Publisher createSearchIndex(final Bson definition) { + notNull("definition", definition); + + return mongoOperationPublisher.createSearchIndex(null, definition); + } + + @Override + public Publisher createSearchIndexes(final List searchIndexModels) { + notNullElements("searchIndexModels", searchIndexModels); + + return mongoOperationPublisher.createSearchIndexes(searchIndexModels); + } + + @Override + public Publisher updateSearchIndex(final String indexName, final Bson definition) { + notNull("indexName", indexName); + notNull("definition", definition); + + return mongoOperationPublisher.updateSearchIndex(indexName, definition); + } + + @Override + public Publisher dropSearchIndex(final String indexName) { + notNull("name", indexName); + return mongoOperationPublisher.dropSearchIndex(indexName); + } + + @Override + public ListSearchIndexesPublisher listSearchIndexes() { + return listSearchIndexes(Document.class); + } + + @Override + public ListSearchIndexesPublisher listSearchIndexes(final Class resultClass) { + notNull("resultClass", resultClass); + + return new ListSearchIndexesPublisherImpl<>(mongoOperationPublisher + .withReadConcern(ReadConcern.DEFAULT) + .withDocumentClass(resultClass)); + } + + @Override + public Publisher createIndex(final Bson key) { + return createIndex(key, new IndexOptions()); + } + + @Override + public Publisher createIndex(final Bson key, final IndexOptions options) { + return mongoOperationPublisher.createIndex(null, key, options); + } + + @Override + public Publisher createIndex(final ClientSession clientSession, final Bson key) { + return createIndex(clientSession, key, new IndexOptions()); + } + + @Override + public Publisher createIndex(final ClientSession clientSession, final Bson key, final IndexOptions options) { + return mongoOperationPublisher.createIndex(notNull("clientSession", clientSession), key, options); + } + + @Override + public Publisher createIndexes(final List indexes) { + return createIndexes(indexes, new CreateIndexOptions()); + } + + @Override + public Publisher createIndexes(final List indexes, final CreateIndexOptions options) { + return mongoOperationPublisher.createIndexes(null, indexes, options); + } + + @Override + public Publisher createIndexes(final ClientSession clientSession, final List indexes) { + return createIndexes(clientSession, indexes, new CreateIndexOptions()); + } + + @Override + public Publisher createIndexes(final ClientSession clientSession, final List indexes, + final CreateIndexOptions options) { + return mongoOperationPublisher.createIndexes(notNull("clientSession", clientSession), indexes, options); + } + + @Override + public ListIndexesPublisher listIndexes() { + return listIndexes(Document.class); + } + + @Override + public ListIndexesPublisher listIndexes(final Class resultClass) { + return new ListIndexesPublisherImpl<>(null, mongoOperationPublisher.withDocumentClass(resultClass)); + } + + @Override + public ListIndexesPublisher listIndexes(final ClientSession clientSession) { + return listIndexes(clientSession, Document.class); + } + + @Override + public ListIndexesPublisher listIndexes(final ClientSession clientSession, final Class resultClass) { + return new ListIndexesPublisherImpl<>(notNull("clientSession", clientSession), + mongoOperationPublisher.withDocumentClass(resultClass)); + } + + @Override + public Publisher dropIndex(final String indexName) { + return dropIndex(indexName, new DropIndexOptions()); + } + + @Override + public Publisher dropIndex(final Bson keys) { + return dropIndex(keys, new DropIndexOptions()); + } + + @Override + public Publisher dropIndex(final String indexName, final DropIndexOptions dropIndexOptions) { + return mongoOperationPublisher.dropIndex(null, indexName, dropIndexOptions); + } + + @Override + public Publisher dropIndex(final Bson keys, final DropIndexOptions dropIndexOptions) { + return mongoOperationPublisher.dropIndex(null, keys, dropIndexOptions); + } + + @Override + public Publisher dropIndex(final ClientSession clientSession, final String indexName) { + return dropIndex(clientSession, indexName, new DropIndexOptions()); + } + + @Override + public Publisher dropIndex(final ClientSession clientSession, final Bson keys) { + return dropIndex(clientSession, keys, new DropIndexOptions()); + } + + @Override + public Publisher dropIndex(final ClientSession clientSession, final String indexName, + final DropIndexOptions options) { + return mongoOperationPublisher.dropIndex(notNull("clientSession", clientSession), indexName, options); + } + + @Override + public Publisher dropIndex(final ClientSession clientSession, final Bson keys, final DropIndexOptions options) { + return mongoOperationPublisher.dropIndex(notNull("clientSession", clientSession), keys, options); + } + + @Override + public Publisher dropIndexes() { + return dropIndexes(new DropIndexOptions()); + } + + @Override + public Publisher dropIndexes(final DropIndexOptions options) { + return mongoOperationPublisher.dropIndexes(null, options); + } + + @Override + public Publisher dropIndexes(final ClientSession clientSession) { + return dropIndexes(clientSession, new DropIndexOptions()); + } + + @Override + public Publisher dropIndexes(final ClientSession clientSession, final DropIndexOptions options) { + return mongoOperationPublisher.dropIndexes(notNull("clientSession", clientSession), options); + } + + @Override + public Publisher renameCollection(final MongoNamespace newCollectionNamespace) { + return renameCollection(newCollectionNamespace, new RenameCollectionOptions()); + } + + @Override + public Publisher renameCollection(final MongoNamespace newCollectionNamespace, final RenameCollectionOptions options) { + return mongoOperationPublisher.renameCollection(null, newCollectionNamespace, options); + } + + @Override + public Publisher renameCollection(final ClientSession clientSession, final MongoNamespace newCollectionNamespace) { + return renameCollection(clientSession, newCollectionNamespace, new RenameCollectionOptions()); + } + + @Override + public Publisher renameCollection(final ClientSession clientSession, final MongoNamespace newCollectionNamespace, + final RenameCollectionOptions options) { + return mongoOperationPublisher.renameCollection(notNull("clientSession", clientSession), newCollectionNamespace, options); + } + +} diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/MongoDatabaseImpl.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/MongoDatabaseImpl.java new file mode 100644 index 00000000000..f8709f12ad8 --- /dev/null +++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/MongoDatabaseImpl.java @@ -0,0 +1,327 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.reactivestreams.client.internal; + +import com.mongodb.MongoNamespace; +import com.mongodb.ReadConcern; +import com.mongodb.ReadPreference; +import com.mongodb.WriteConcern; +import com.mongodb.client.model.CreateCollectionOptions; +import com.mongodb.client.model.CreateViewOptions; +import com.mongodb.internal.client.model.AggregationLevel; +import com.mongodb.internal.client.model.changestream.ChangeStreamLevel; +import com.mongodb.reactivestreams.client.AggregatePublisher; +import com.mongodb.reactivestreams.client.ChangeStreamPublisher; +import com.mongodb.reactivestreams.client.ClientSession; +import com.mongodb.reactivestreams.client.ListCollectionNamesPublisher; +import com.mongodb.reactivestreams.client.ListCollectionsPublisher; +import com.mongodb.reactivestreams.client.MongoCollection; +import com.mongodb.reactivestreams.client.MongoDatabase; +import org.bson.Document; +import org.bson.codecs.configuration.CodecRegistry; +import org.bson.conversions.Bson; +import org.reactivestreams.Publisher; + +import java.util.Collections; +import java.util.List; +import java.util.concurrent.TimeUnit; + +import static com.mongodb.MongoNamespace.checkDatabaseNameValidity; +import static com.mongodb.assertions.Assertions.assertNotNull; +import static com.mongodb.assertions.Assertions.notNull; +import static java.util.concurrent.TimeUnit.MILLISECONDS; + + +/** + * The internal MongoDatabase implementation. + * + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public final class MongoDatabaseImpl implements MongoDatabase { + private final MongoOperationPublisher mongoOperationPublisher; + + MongoDatabaseImpl(final MongoOperationPublisher mongoOperationPublisher) { + this.mongoOperationPublisher = notNull("publisherHelper", mongoOperationPublisher); + checkDatabaseNameValidity(getName()); + } + + @Override + public String getName() { + return assertNotNull(mongoOperationPublisher.getNamespace()).getDatabaseName(); + } + + @Override + public CodecRegistry getCodecRegistry() { + return mongoOperationPublisher.getCodecRegistry(); + } + + @Override + public ReadPreference getReadPreference() { + return mongoOperationPublisher.getReadPreference(); + } + + @Override + public WriteConcern getWriteConcern() { + return mongoOperationPublisher.getWriteConcern(); + } + + @Override + public ReadConcern getReadConcern() { + return mongoOperationPublisher.getReadConcern(); + } + + @Override + public Long getTimeout(final TimeUnit timeUnit) { + Long timeoutMS = mongoOperationPublisher.getTimeoutSettings().getTimeoutMS(); + return timeoutMS == null ? null : notNull("timeUnit", timeUnit).convert(timeoutMS, MILLISECONDS); + } + + MongoOperationPublisher getMongoOperationPublisher() { + return mongoOperationPublisher; + } + + @Override + public MongoDatabase withCodecRegistry(final CodecRegistry codecRegistry) { + return new MongoDatabaseImpl(mongoOperationPublisher.withCodecRegistry(codecRegistry)); + } + + @Override + public MongoDatabase withReadPreference(final ReadPreference readPreference) { + return new MongoDatabaseImpl(mongoOperationPublisher.withReadPreference(readPreference)); + } + + @Override + public MongoDatabase withWriteConcern(final WriteConcern writeConcern) { + return new MongoDatabaseImpl(mongoOperationPublisher.withWriteConcern(writeConcern)); + } + + @Override + public MongoDatabase withReadConcern(final ReadConcern readConcern) { + return new MongoDatabaseImpl(mongoOperationPublisher.withReadConcern(readConcern)); + } + + @Override + public MongoDatabase withTimeout(final long timeout, final TimeUnit timeUnit) { + return new MongoDatabaseImpl(mongoOperationPublisher.withTimeout(timeout, timeUnit)); + } + + @Override + public MongoCollection getCollection(final String collectionName) { + return getCollection(collectionName, Document.class); + } + + @Override + public MongoCollection getCollection(final String collectionName, final Class clazz) { + return new MongoCollectionImpl<>( + mongoOperationPublisher.withNamespaceAndDocumentClass(new MongoNamespace(getName(), collectionName), clazz)); + } + + @Override + public Publisher runCommand(final Bson command) { + return runCommand(command, Document.class); + } + + @Override + public Publisher runCommand(final Bson command, final ReadPreference readPreference) { + return runCommand(command, readPreference, Document.class); + } + + @Override + public Publisher runCommand(final Bson command, final Class clazz) { + return runCommand(command, ReadPreference.primary(), clazz); + } + + @Override + public Publisher runCommand(final Bson command, final ReadPreference readPreference, final Class clazz) { + return mongoOperationPublisher.runCommand(null, command, readPreference, clazz); + } + + @Override + public Publisher runCommand(final ClientSession clientSession, final Bson command) { + return runCommand(clientSession, command, Document.class); + } + + @Override + public Publisher runCommand(final ClientSession clientSession, final Bson command, final ReadPreference readPreference) { + return runCommand(clientSession, command, readPreference, Document.class); + } + + @Override + public Publisher runCommand(final ClientSession clientSession, final Bson command, final Class clazz) { + return runCommand(clientSession, command, ReadPreference.primary(), clazz); + } + + @Override + public Publisher runCommand(final ClientSession clientSession, final Bson command, + final ReadPreference readPreference, final Class clazz) { + return mongoOperationPublisher.runCommand(notNull("clientSession", clientSession), command, readPreference, clazz); + } + + @Override + public Publisher drop() { + return mongoOperationPublisher.dropDatabase(null); + } + + @Override + public Publisher drop(final ClientSession clientSession) { + return mongoOperationPublisher.dropDatabase(notNull("clientSession", clientSession)); + } + + @Override + public ListCollectionNamesPublisher listCollectionNames() { + return new ListCollectionNamesPublisherImpl(new ListCollectionsPublisherImpl<>(null, mongoOperationPublisher, true)); + } + + @Override + public ListCollectionNamesPublisher listCollectionNames(final ClientSession clientSession) { + return new ListCollectionNamesPublisherImpl( + new ListCollectionsPublisherImpl<>(notNull("clientSession", clientSession), mongoOperationPublisher, true)); + } + + @Override + public ListCollectionsPublisher listCollections() { + return listCollections(Document.class); + } + + @Override + public ListCollectionsPublisher listCollections(final Class clazz) { + return new ListCollectionsPublisherImpl<>(null, mongoOperationPublisher.withDocumentClass(clazz), false); + } + + @Override + public ListCollectionsPublisher listCollections(final ClientSession clientSession) { + return listCollections(clientSession, Document.class); + } + + @Override + public ListCollectionsPublisher listCollections(final ClientSession clientSession, final Class clazz) { + return new ListCollectionsPublisherImpl<>(notNull("clientSession", clientSession), + mongoOperationPublisher.withDocumentClass(clazz), false); + } + + @Override + public Publisher createCollection(final String collectionName) { + return createCollection(collectionName, new CreateCollectionOptions()); + } + + @Override + public Publisher createCollection(final String collectionName, final CreateCollectionOptions options) { + return mongoOperationPublisher.createCollection(null, + notNull("collectionName", collectionName), notNull("options", options)); + } + + @Override + public Publisher createCollection(final ClientSession clientSession, final String collectionName) { + return createCollection(clientSession, collectionName, new CreateCollectionOptions()); + } + + @Override + public Publisher createCollection(final ClientSession clientSession, final String collectionName, + final CreateCollectionOptions options) { + return mongoOperationPublisher.createCollection(notNull("clientSession", clientSession), + notNull("collectionName", collectionName), notNull("options", options)); + } + + @Override + public Publisher createView(final String viewName, final String viewOn, final List pipeline) { + return createView(viewName, viewOn, pipeline, new CreateViewOptions()); + } + + @Override + public Publisher createView(final String viewName, final String viewOn, final List pipeline, + final CreateViewOptions options) { + return mongoOperationPublisher.createView(null, viewName, viewOn, pipeline, options); + } + + @Override + public Publisher createView(final ClientSession clientSession, final String viewName, final String viewOn, + final List pipeline) { + return createView(clientSession, viewName, viewOn, pipeline, new CreateViewOptions()); + } + + @Override + public Publisher createView(final ClientSession clientSession, final String viewName, final String viewOn, + final List pipeline, final CreateViewOptions options) { + return mongoOperationPublisher.createView(notNull("clientSession", clientSession), viewName, viewOn, pipeline, options); + } + + @Override + public ChangeStreamPublisher watch() { + return watch(Collections.emptyList()); + } + + @Override + public ChangeStreamPublisher watch(final Class resultClass) { + return watch(Collections.emptyList(), resultClass); + } + + @Override + public ChangeStreamPublisher watch(final List pipeline) { + return watch(pipeline, Document.class); + } + + @Override + public ChangeStreamPublisher watch(final List pipeline, final Class resultClass) { + return new ChangeStreamPublisherImpl<>(null, mongoOperationPublisher, resultClass, pipeline, ChangeStreamLevel.DATABASE); + } + + @Override + public ChangeStreamPublisher watch(final ClientSession clientSession) { + return watch(clientSession, Collections.emptyList(), Document.class); + } + + @Override + public ChangeStreamPublisher watch(final ClientSession clientSession, final Class resultClass) { + return watch(clientSession, Collections.emptyList(), resultClass); + } + + @Override + public ChangeStreamPublisher watch(final ClientSession clientSession, final List pipeline) { + return watch(clientSession, pipeline, Document.class); + } + + @Override + public ChangeStreamPublisher watch(final ClientSession clientSession, final List pipeline, + final Class resultClass) { + return new ChangeStreamPublisherImpl<>(notNull("clientSession", clientSession), mongoOperationPublisher, + resultClass, pipeline, ChangeStreamLevel.DATABASE); + } + + @Override + public AggregatePublisher aggregate(final List pipeline) { + return aggregate(pipeline, Document.class); + } + + @Override + public AggregatePublisher aggregate(final List pipeline, final Class resultClass) { + return new AggregatePublisherImpl<>(null, mongoOperationPublisher.withDocumentClass(resultClass), pipeline, + AggregationLevel.DATABASE); + } + + @Override + public AggregatePublisher aggregate(final ClientSession clientSession, final List pipeline) { + return aggregate(clientSession, pipeline, Document.class); + } + + @Override + public AggregatePublisher aggregate(final ClientSession clientSession, final List pipeline, + final Class resultClass) { + return new AggregatePublisherImpl<>(notNull("clientSession", clientSession), + mongoOperationPublisher.withDocumentClass(resultClass), pipeline, AggregationLevel.DATABASE); + } + +} diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/MongoOperationPublisher.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/MongoOperationPublisher.java new file mode 100644 index 00000000000..84c810f1b5e --- /dev/null +++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/MongoOperationPublisher.java @@ -0,0 +1,610 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.reactivestreams.client.internal; + +import com.mongodb.AutoEncryptionSettings; +import com.mongodb.MongoBulkWriteException; +import com.mongodb.MongoClientException; +import com.mongodb.MongoException; +import com.mongodb.MongoNamespace; +import com.mongodb.MongoWriteConcernException; +import com.mongodb.MongoWriteException; +import com.mongodb.ReadConcern; +import com.mongodb.ReadPreference; +import com.mongodb.WriteConcern; +import com.mongodb.WriteConcernResult; +import com.mongodb.WriteError; +import com.mongodb.bulk.BulkWriteResult; +import com.mongodb.bulk.WriteConcernError; +import com.mongodb.client.model.BulkWriteOptions; +import com.mongodb.client.model.CountOptions; +import com.mongodb.client.model.CreateCollectionOptions; +import com.mongodb.client.model.CreateIndexOptions; +import com.mongodb.client.model.CreateViewOptions; +import com.mongodb.client.model.DeleteOptions; +import com.mongodb.client.model.DropCollectionOptions; +import com.mongodb.client.model.DropIndexOptions; +import com.mongodb.client.model.EstimatedDocumentCountOptions; +import com.mongodb.client.model.FindOneAndDeleteOptions; +import com.mongodb.client.model.FindOneAndReplaceOptions; +import com.mongodb.client.model.FindOneAndUpdateOptions; +import com.mongodb.client.model.IndexModel; +import com.mongodb.client.model.IndexOptions; +import com.mongodb.client.model.InsertManyOptions; +import com.mongodb.client.model.InsertOneOptions; +import com.mongodb.client.model.RenameCollectionOptions; +import com.mongodb.client.model.ReplaceOptions; +import com.mongodb.client.model.SearchIndexModel; +import com.mongodb.client.model.UpdateOptions; +import com.mongodb.client.model.WriteModel; +import com.mongodb.client.model.bulk.ClientBulkWriteOptions; +import com.mongodb.client.model.bulk.ClientBulkWriteResult; +import com.mongodb.client.model.bulk.ClientNamespacedWriteModel; +import com.mongodb.client.result.DeleteResult; +import com.mongodb.client.result.InsertManyResult; +import com.mongodb.client.result.InsertOneResult; +import com.mongodb.client.result.UpdateResult; +import com.mongodb.internal.TimeoutSettings; +import com.mongodb.internal.async.SingleResultCallback; +import com.mongodb.internal.bulk.WriteRequest; +import com.mongodb.internal.operation.IndexHelper; +import com.mongodb.internal.operation.Operations; +import com.mongodb.internal.operation.ReadOperation; +import com.mongodb.internal.operation.WriteOperation; +import com.mongodb.lang.Nullable; +import com.mongodb.reactivestreams.client.ClientSession; +import org.bson.BsonDocument; +import org.bson.BsonValue; +import org.bson.UuidRepresentation; +import org.bson.codecs.configuration.CodecRegistry; +import org.bson.conversions.Bson; +import org.reactivestreams.Publisher; +import reactor.core.publisher.Flux; +import reactor.core.publisher.Mono; +import reactor.core.publisher.MonoSink; + +import java.util.HashMap; +import java.util.List; +import java.util.Objects; +import java.util.concurrent.TimeUnit; +import java.util.function.Function; +import java.util.function.Supplier; + +import static com.mongodb.assertions.Assertions.assertNotNull; +import static com.mongodb.assertions.Assertions.isTrue; +import static com.mongodb.assertions.Assertions.notNull; +import static java.util.Collections.singletonList; +import static org.bson.codecs.configuration.CodecRegistries.withUuidRepresentation; + +/** + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public final class MongoOperationPublisher { + + private final Operations operations; + private final UuidRepresentation uuidRepresentation; + @Nullable + private final AutoEncryptionSettings autoEncryptionSettings; + private final OperationExecutor executor; + + MongoOperationPublisher( + final Class documentClass, final CodecRegistry codecRegistry, final ReadPreference readPreference, + final ReadConcern readConcern, final WriteConcern writeConcern, final boolean retryWrites, final boolean retryReads, + final UuidRepresentation uuidRepresentation, @Nullable final AutoEncryptionSettings autoEncryptionSettings, + final TimeoutSettings timeoutSettings, final OperationExecutor executor) { + this(new MongoNamespace("_ignored", "_ignored"), documentClass, + codecRegistry, readPreference, readConcern, writeConcern, retryWrites, retryReads, + uuidRepresentation, autoEncryptionSettings, timeoutSettings, executor); + } + + MongoOperationPublisher( + final MongoNamespace namespace, final Class documentClass, final CodecRegistry codecRegistry, + final ReadPreference readPreference, final ReadConcern readConcern, final WriteConcern writeConcern, + final boolean retryWrites, final boolean retryReads, final UuidRepresentation uuidRepresentation, + @Nullable final AutoEncryptionSettings autoEncryptionSettings, final TimeoutSettings timeoutSettings, + final OperationExecutor executor) { + this.operations = new Operations<>(namespace, notNull("documentClass", documentClass), + notNull("readPreference", readPreference), notNull("codecRegistry", codecRegistry), + notNull("readConcern", readConcern), notNull("writeConcern", writeConcern), + retryWrites, retryReads, timeoutSettings); + this.uuidRepresentation = notNull("uuidRepresentation", uuidRepresentation); + this.autoEncryptionSettings = autoEncryptionSettings; + this.executor = notNull("executor", executor); + } + + MongoNamespace getNamespace() { + return assertNotNull(operations.getNamespace()); + } + + ReadPreference getReadPreference() { + return operations.getReadPreference(); + } + + CodecRegistry getCodecRegistry() { + return operations.getCodecRegistry(); + } + + ReadConcern getReadConcern() { + return operations.getReadConcern(); + } + + WriteConcern getWriteConcern() { + return operations.getWriteConcern(); + } + + public boolean getRetryWrites() { + return operations.isRetryWrites(); + } + + public boolean getRetryReads() { + return operations.isRetryReads(); + } + + @Nullable + public Long getTimeoutMS() { + return getTimeoutSettings().getTimeoutMS(); + } + + public TimeoutSettings getTimeoutSettings() { + return operations.getTimeoutSettings(); + } + + Class getDocumentClass() { + return operations.getDocumentClass(); + } + + public Operations getOperations() { + return operations; + } + + MongoOperationPublisher withDatabase(final String name) { + return withDatabaseAndDocumentClass(name, getDocumentClass()); + } + + MongoOperationPublisher withDatabaseAndDocumentClass(final String name, final Class documentClass) { + return withNamespaceAndDocumentClass(new MongoNamespace(notNull("name", name), "_ignored"), + notNull("documentClass", documentClass)); + } + + MongoOperationPublisher withNamespace(final MongoNamespace namespace) { + return withNamespaceAndDocumentClass(namespace, getDocumentClass()); + } + + MongoOperationPublisher withDocumentClass(final Class documentClass) { + return withNamespaceAndDocumentClass(getNamespace(), documentClass); + } + + @SuppressWarnings("unchecked") + MongoOperationPublisher withNamespaceAndDocumentClass(final MongoNamespace namespace, final Class documentClass) { + if (getNamespace().equals(namespace) && getDocumentClass().equals(documentClass)) { + return (MongoOperationPublisher) this; + } + return new MongoOperationPublisher<>(notNull("namespace", namespace), notNull("documentClass", documentClass), + getCodecRegistry(), getReadPreference(), getReadConcern(), getWriteConcern(), getRetryWrites(), getRetryReads(), + uuidRepresentation, autoEncryptionSettings, getTimeoutSettings(), executor); + } + + MongoOperationPublisher withCodecRegistry(final CodecRegistry codecRegistry) { + return new MongoOperationPublisher<>(getNamespace(), getDocumentClass(), + withUuidRepresentation(notNull("codecRegistry", codecRegistry), uuidRepresentation), + getReadPreference(), getReadConcern(), getWriteConcern(), getRetryWrites(), getRetryReads(), + uuidRepresentation, autoEncryptionSettings, getTimeoutSettings(), executor); + } + + MongoOperationPublisher withReadPreference(final ReadPreference readPreference) { + if (getReadPreference().equals(readPreference)) { + return this; + } + return new MongoOperationPublisher<>(getNamespace(), getDocumentClass(), getCodecRegistry(), + notNull("readPreference", readPreference), getReadConcern(), getWriteConcern(), getRetryWrites(), getRetryReads(), + uuidRepresentation, autoEncryptionSettings, getTimeoutSettings(), executor); + } + + MongoOperationPublisher withWriteConcern(final WriteConcern writeConcern) { + if (getWriteConcern().equals(writeConcern)) { + return this; + } + return new MongoOperationPublisher<>(getNamespace(), getDocumentClass(), getCodecRegistry(), getReadPreference(), getReadConcern(), + notNull("writeConcern", writeConcern), getRetryWrites(), getRetryReads(), uuidRepresentation, autoEncryptionSettings, + getTimeoutSettings(), executor); + } + + MongoOperationPublisher withReadConcern(final ReadConcern readConcern) { + if (getReadConcern().equals(readConcern)) { + return this; + } + return new MongoOperationPublisher<>(getNamespace(), getDocumentClass(), + getCodecRegistry(), getReadPreference(), notNull("readConcern", readConcern), + getWriteConcern(), getRetryWrites(), getRetryReads(), uuidRepresentation, + autoEncryptionSettings, getTimeoutSettings(), executor); + } + + MongoOperationPublisher withTimeout(final long timeout, final TimeUnit timeUnit) { + TimeoutSettings timeoutSettings = getTimeoutSettings().withTimeout(timeout, timeUnit); + if (Objects.equals(getTimeoutSettings(), timeoutSettings)) { + return this; + } + return new MongoOperationPublisher<>(getNamespace(), getDocumentClass(), + getCodecRegistry(), getReadPreference(), getReadConcern(), + getWriteConcern(), getRetryWrites(), getRetryReads(), uuidRepresentation, + autoEncryptionSettings, timeoutSettings, executor); + } + + Publisher dropDatabase(@Nullable final ClientSession clientSession) { + return createWriteOperationMono(operations::getTimeoutSettings, operations::dropDatabase, clientSession); + } + + Publisher createCollection( + @Nullable final ClientSession clientSession, final String collectionName, final CreateCollectionOptions options) { + return createWriteOperationMono( + operations::getTimeoutSettings, + () -> operations.createCollection(collectionName, options, autoEncryptionSettings), clientSession); + } + + Publisher createView( + @Nullable final ClientSession clientSession, final String viewName, final String viewOn, + final List pipeline, final CreateViewOptions options) { + return createWriteOperationMono( + operations::getTimeoutSettings, + () -> operations.createView(viewName, viewOn, pipeline, options), clientSession); + } + + public Publisher runCommand( + @Nullable final ClientSession clientSession, final Bson command, + final ReadPreference readPreference, final Class clazz) { + if (clientSession != null && clientSession.hasActiveTransaction() && !readPreference.equals(ReadPreference.primary())) { + return Mono.error(new MongoClientException("Read preference in a transaction must be primary")); + } + return createReadOperationMono( + operations::getTimeoutSettings, + () -> operations.commandRead(command, clazz), clientSession, notNull("readPreference", readPreference)); + } + + + Publisher estimatedDocumentCount(final EstimatedDocumentCountOptions options) { + return createReadOperationMono( + (operations -> operations.createTimeoutSettings(options)), + () -> operations.estimatedDocumentCount(notNull("options", options)), null); + } + + Publisher countDocuments(@Nullable final ClientSession clientSession, final Bson filter, final CountOptions options) { + return createReadOperationMono( + (operations -> operations.createTimeoutSettings(options)), + () -> operations.countDocuments(notNull("filter", filter), notNull("options", options) + ), clientSession); + } + + Publisher bulkWrite( + @Nullable final ClientSession clientSession, + final List> requests, final BulkWriteOptions options) { + return createWriteOperationMono( + operations::getTimeoutSettings, + () -> operations.bulkWrite(notNull("requests", requests), notNull("options", options)), clientSession); + } + + Publisher clientBulkWrite( + @Nullable final ClientSession clientSession, + final List clientWriteModels, + @Nullable final ClientBulkWriteOptions options) { + isTrue("`autoEncryptionSettings` is null, as bulkWrite does not currently support automatic encryption", autoEncryptionSettings == null); + return createWriteOperationMono( + operations::getTimeoutSettings, + () -> operations.clientBulkWriteOperation(clientWriteModels, options), clientSession); + } + + Publisher insertOne(@Nullable final ClientSession clientSession, final T document, final InsertOneOptions options) { + return createSingleWriteRequestMono(() -> operations.insertOne(notNull("document", document), + notNull("options", options)), + clientSession, WriteRequest.Type.INSERT) + .map(INSERT_ONE_RESULT_MAPPER); + } + + Publisher insertMany( + @Nullable final ClientSession clientSession, final List documents, + final InsertManyOptions options) { + return createWriteOperationMono( + operations::getTimeoutSettings, + () -> operations.insertMany(notNull("documents", documents), notNull("options", options)), clientSession) + .map(INSERT_MANY_RESULT_MAPPER); + } + + Publisher deleteOne(@Nullable final ClientSession clientSession, final Bson filter, final DeleteOptions options) { + return createSingleWriteRequestMono(() -> operations.deleteOne(notNull("filter", filter), notNull("options", options)), + clientSession, WriteRequest.Type.DELETE) + .map(DELETE_RESULT_MAPPER); + } + + Publisher deleteMany(@Nullable final ClientSession clientSession, final Bson filter, final DeleteOptions options) { + return createSingleWriteRequestMono(() -> operations.deleteMany(notNull("filter", filter), notNull("options", options)), + clientSession, WriteRequest.Type.DELETE) + .map(DELETE_RESULT_MAPPER); + } + + Publisher replaceOne( + @Nullable final ClientSession clientSession, final Bson filter, final T replacement, + final ReplaceOptions options) { + return createSingleWriteRequestMono(() -> operations.replaceOne(notNull("filter", filter), + notNull("replacement", replacement), + notNull("options", options)), + clientSession, WriteRequest.Type.REPLACE) + .map(UPDATE_RESULT_MAPPER); + } + + Publisher updateOne( + @Nullable final ClientSession clientSession, final Bson filter, final Bson update, + final UpdateOptions options) { + return createSingleWriteRequestMono(() -> operations.updateOne(notNull("filter", filter), + notNull("update", update), + notNull("options", options)), + clientSession, WriteRequest.Type.UPDATE) + .map(UPDATE_RESULT_MAPPER); + } + + Publisher updateOne( + @Nullable final ClientSession clientSession, final Bson filter, final List update, + final UpdateOptions options) { + return createSingleWriteRequestMono(() -> operations.updateOne(notNull("filter", filter), + notNull("update", update), + notNull("options", options)), + clientSession, WriteRequest.Type.UPDATE) + .map(UPDATE_RESULT_MAPPER); + } + + Publisher updateMany( + @Nullable final ClientSession clientSession, final Bson filter, final Bson update, + final UpdateOptions options) { + return createSingleWriteRequestMono(() -> operations.updateMany(notNull("filter", filter), + notNull("update", update), + notNull("options", options)), + clientSession, WriteRequest.Type.UPDATE) + .map(UPDATE_RESULT_MAPPER); + } + + Publisher updateMany( + @Nullable final ClientSession clientSession, final Bson filter, final List update, + final UpdateOptions options) { + return createSingleWriteRequestMono(() -> operations.updateMany(notNull("filter", filter), + notNull("update", update), + notNull("options", options)), + clientSession, WriteRequest.Type.UPDATE) + .map(UPDATE_RESULT_MAPPER); + } + + Publisher findOneAndDelete(@Nullable final ClientSession clientSession, final Bson filter, final FindOneAndDeleteOptions options) { + return createWriteOperationMono( + operations::getTimeoutSettings, + () -> operations.findOneAndDelete(notNull("filter", filter), notNull("options", options)), clientSession); + } + + Publisher findOneAndReplace( + @Nullable final ClientSession clientSession, final Bson filter, final T replacement, + final FindOneAndReplaceOptions options) { + return createWriteOperationMono( + operations::getTimeoutSettings, + () -> operations.findOneAndReplace(notNull("filter", filter), + notNull("replacement", replacement), + notNull("options", options)), + clientSession); + } + + Publisher findOneAndUpdate( + @Nullable final ClientSession clientSession, final Bson filter, final Bson update, + final FindOneAndUpdateOptions options) { + return createWriteOperationMono( + operations::getTimeoutSettings, + () -> operations.findOneAndUpdate(notNull("filter", filter), + notNull("update", update), + notNull("options", options)), + clientSession); + } + + Publisher findOneAndUpdate( + @Nullable final ClientSession clientSession, final Bson filter, + final List update, final FindOneAndUpdateOptions options) { + return createWriteOperationMono( + operations::getTimeoutSettings, + () -> operations.findOneAndUpdate(notNull("filter", filter), + notNull("update", update), + notNull("options", options)), + clientSession); + } + + Publisher dropCollection(@Nullable final ClientSession clientSession, final DropCollectionOptions dropCollectionOptions) { + return createWriteOperationMono( + operations::getTimeoutSettings, + () -> operations.dropCollection(dropCollectionOptions, autoEncryptionSettings), clientSession); + } + + Publisher createIndex(@Nullable final ClientSession clientSession, final Bson key, final IndexOptions options) { + return createIndexes(clientSession, singletonList(new IndexModel(notNull("key", key), options)), new CreateIndexOptions()); + } + + + Publisher createIndexes( + @Nullable final ClientSession clientSession, final List indexes, + final CreateIndexOptions options) { + return createWriteOperationMono( + operations::getTimeoutSettings, + () -> operations.createIndexes(notNull("indexes", indexes), notNull("options", options)), clientSession) + .thenMany(Flux.fromIterable(IndexHelper.getIndexNames(indexes, getCodecRegistry()))); + } + + Publisher createSearchIndex(@Nullable final String indexName, final Bson definition) { + SearchIndexModel searchIndexModel = + indexName == null ? new SearchIndexModel(definition) : new SearchIndexModel(indexName, definition); + + return createSearchIndexes(singletonList(searchIndexModel)); + } + + Publisher createSearchIndexes(final List indexes) { + return createWriteOperationMono( + operations::getTimeoutSettings, + () -> operations.createSearchIndexes(indexes), null) + .thenMany(Flux.fromIterable(IndexHelper.getSearchIndexNames(indexes))); + } + + + public Publisher updateSearchIndex(final String name, final Bson definition) { + return createWriteOperationMono( + operations::getTimeoutSettings, + () -> operations.updateSearchIndex(name, definition), null); + } + + + public Publisher dropSearchIndex(final String indexName) { + return createWriteOperationMono( + operations::getTimeoutSettings, + () -> operations.dropSearchIndex(indexName), null); + } + + Publisher dropIndex(@Nullable final ClientSession clientSession, final String indexName, final DropIndexOptions options) { + return createWriteOperationMono( + operations::getTimeoutSettings, + () -> operations.dropIndex(notNull("indexName", indexName), notNull("options", options)), + clientSession); + } + + Publisher dropIndex(@Nullable final ClientSession clientSession, final Bson keys, final DropIndexOptions options) { + return createWriteOperationMono( + operations::getTimeoutSettings, + () -> operations.dropIndex(notNull("keys", keys), notNull("options", options)), + clientSession); + } + + Publisher dropIndexes(@Nullable final ClientSession clientSession, final DropIndexOptions options) { + return dropIndex(clientSession, "*", options); + } + + Publisher renameCollection( + @Nullable final ClientSession clientSession, final MongoNamespace newCollectionNamespace, + final RenameCollectionOptions options) { + return createWriteOperationMono( + operations::getTimeoutSettings, + () -> operations.renameCollection(notNull("newCollectionNamespace", newCollectionNamespace), + notNull("options", options)), + clientSession); + } + + + Mono createReadOperationMono(final Function, TimeoutSettings> timeoutSettingsFunction, + final Supplier> operationSupplier, @Nullable final ClientSession clientSession) { + return createReadOperationMono(() -> timeoutSettingsFunction.apply(operations), operationSupplier, clientSession, getReadPreference()); + } + + + Mono createReadOperationMono(final Supplier timeoutSettingsSupplier, + final Supplier> operationSupplier, @Nullable final ClientSession clientSession, + final ReadPreference readPreference) { + ReadOperation readOperation = operationSupplier.get(); + return getExecutor(timeoutSettingsSupplier.get()) + .execute(readOperation, readPreference, getReadConcern(), clientSession); + } + + Mono createWriteOperationMono(final Function, TimeoutSettings> timeoutSettingsFunction, + final Supplier> operationSupplier, @Nullable final ClientSession clientSession) { + return createWriteOperationMono(() -> timeoutSettingsFunction.apply(operations), operationSupplier, clientSession); + } + + Mono createWriteOperationMono(final Supplier timeoutSettingsSupplier, + final Supplier> operationSupplier, @Nullable final ClientSession clientSession) { + WriteOperation writeOperation = operationSupplier.get(); + return getExecutor(timeoutSettingsSupplier.get()) + .execute(writeOperation, getReadConcern(), clientSession); + } + + private Mono createSingleWriteRequestMono( + final Supplier> operation, + @Nullable final ClientSession clientSession, + final WriteRequest.Type type) { + return createWriteOperationMono(operations::getTimeoutSettings, operation, clientSession) + .onErrorMap(MongoBulkWriteException.class, e -> { + MongoException exception; + WriteConcernError writeConcernError = e.getWriteConcernError(); + if (e.getWriteErrors().isEmpty() && writeConcernError != null) { + WriteConcernResult writeConcernResult; + if (type == WriteRequest.Type.INSERT) { + writeConcernResult = WriteConcernResult.acknowledged(e.getWriteResult().getInsertedCount(), false, null); + } else if (type == WriteRequest.Type.DELETE) { + writeConcernResult = WriteConcernResult.acknowledged(e.getWriteResult().getDeletedCount(), false, null); + } else { + writeConcernResult = WriteConcernResult + .acknowledged(e.getWriteResult().getMatchedCount() + e.getWriteResult().getUpserts().size(), + e.getWriteResult().getMatchedCount() > 0, + e.getWriteResult().getUpserts().isEmpty() + ? null : e.getWriteResult().getUpserts().get(0).getId()); + } + exception = new MongoWriteConcernException(writeConcernError, writeConcernResult, e.getServerAddress(), + e.getErrorLabels()); + } else if (!e.getWriteErrors().isEmpty()) { + exception = new MongoWriteException(new WriteError(e.getWriteErrors().get(0)), e.getServerAddress(), + e.getErrorLabels()); + } else { + exception = new MongoWriteException(new WriteError(-1, "Unknown write error", new BsonDocument()), + e.getServerAddress(), e.getErrorLabels()); + } + + return exception; + }); + } + + private OperationExecutor getExecutor(final TimeoutSettings timeoutSettings) { + return executor.withTimeoutSettings(timeoutSettings); + } + + private static final Function INSERT_ONE_RESULT_MAPPER = result -> { + if (result.wasAcknowledged()) { + BsonValue insertedId = result.getInserts().isEmpty() ? null : result.getInserts().get(0).getId(); + return InsertOneResult.acknowledged(insertedId); + } else { + return InsertOneResult.unacknowledged(); + } + }; + private static final Function INSERT_MANY_RESULT_MAPPER = result -> { + if (result.wasAcknowledged()) { + return InsertManyResult.acknowledged(result.getInserts().stream() + .collect(HashMap::new, (m, v) -> m.put(v.getIndex(), v.getId()), HashMap::putAll)); + } else { + return InsertManyResult.unacknowledged(); + } + }; + private static final Function DELETE_RESULT_MAPPER = result -> { + if (result.wasAcknowledged()) { + return DeleteResult.acknowledged(result.getDeletedCount()); + } else { + return DeleteResult.unacknowledged(); + } + }; + private static final Function UPDATE_RESULT_MAPPER = result -> { + if (result.wasAcknowledged()) { + BsonValue upsertedId = result.getUpserts().isEmpty() ? null : result.getUpserts().get(0).getId(); + return UpdateResult.acknowledged(result.getMatchedCount(), (long) result.getModifiedCount(), upsertedId); + } else { + return UpdateResult.unacknowledged(); + } + }; + + public static SingleResultCallback sinkToCallback(final MonoSink sink) { + return (result, t) -> { + if (t != null) { + sink.error(t); + } else if (result == null) { + sink.success(); + } else { + sink.success(result); + } + }; + } +} diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/OperationExecutor.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/OperationExecutor.java new file mode 100644 index 00000000000..cd666720f33 --- /dev/null +++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/OperationExecutor.java @@ -0,0 +1,73 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.reactivestreams.client.internal; + +import com.mongodb.ReadConcern; +import com.mongodb.ReadPreference; +import com.mongodb.internal.TimeoutSettings; +import com.mongodb.internal.operation.ReadOperation; +import com.mongodb.internal.operation.WriteOperation; +import com.mongodb.lang.Nullable; +import com.mongodb.reactivestreams.client.ClientSession; +import reactor.core.publisher.Mono; + +/** + * An interface describing the execution of a read or a write operation. + * + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public interface OperationExecutor { + + /** + * Execute the read operation with the given read preference. + * + * @param operation the read operation. + * @param readPreference the read preference. + * @param readConcern the read concern + * @param session the session to associate this operation with + * @param the operations result type. + */ + Mono execute(ReadOperation operation, ReadPreference readPreference, ReadConcern readConcern, + @Nullable ClientSession session); + + /** + * Execute the write operation. + * + * @param operation the write operation. + * @param session the session to associate this operation with + * @param readConcern the read concern + * @param the operations result type. + */ + Mono execute(WriteOperation operation, ReadConcern readConcern, @Nullable ClientSession session); + + /** + * Create a new OperationExecutor with a specific timeout settings + * + * @param timeoutSettings the TimeoutContext to use for the operations + * @return the new operation executor with the set timeout context + * @since 5.2 + */ + OperationExecutor withTimeoutSettings(TimeoutSettings timeoutSettings); + + /** + * Returns the current timeout settings + * + * @return the timeout settings + * @since 5.2 + */ + TimeoutSettings getTimeoutSettings(); +} diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/OperationExecutorImpl.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/OperationExecutorImpl.java new file mode 100644 index 00000000000..56b0526e4cb --- /dev/null +++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/OperationExecutorImpl.java @@ -0,0 +1,224 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.reactivestreams.client.internal; + +import com.mongodb.MongoClientException; +import com.mongodb.MongoException; +import com.mongodb.MongoInternalException; +import com.mongodb.MongoQueryException; +import com.mongodb.MongoSocketException; +import com.mongodb.MongoTimeoutException; +import com.mongodb.ReadConcern; +import com.mongodb.ReadPreference; +import com.mongodb.RequestContext; +import com.mongodb.internal.IgnorableRequestContext; +import com.mongodb.internal.TimeoutSettings; +import com.mongodb.internal.binding.AsyncClusterAwareReadWriteBinding; +import com.mongodb.internal.binding.AsyncClusterBinding; +import com.mongodb.internal.binding.AsyncReadWriteBinding; +import com.mongodb.internal.connection.OperationContext; +import com.mongodb.internal.connection.ReadConcernAwareNoOpSessionContext; +import com.mongodb.internal.operation.OperationHelper; +import com.mongodb.internal.operation.ReadOperation; +import com.mongodb.internal.operation.WriteOperation; +import com.mongodb.lang.Nullable; +import com.mongodb.reactivestreams.client.ClientSession; +import com.mongodb.reactivestreams.client.ReactiveContextProvider; +import com.mongodb.reactivestreams.client.internal.crypt.Crypt; +import com.mongodb.reactivestreams.client.internal.crypt.CryptBinding; +import org.reactivestreams.Subscriber; +import reactor.core.publisher.Mono; + +import java.util.Objects; + +import static com.mongodb.MongoException.TRANSIENT_TRANSACTION_ERROR_LABEL; +import static com.mongodb.MongoException.UNKNOWN_TRANSACTION_COMMIT_RESULT_LABEL; +import static com.mongodb.ReadPreference.primary; +import static com.mongodb.assertions.Assertions.isTrue; +import static com.mongodb.assertions.Assertions.notNull; +import static com.mongodb.internal.TimeoutContext.createTimeoutContext; +import static com.mongodb.reactivestreams.client.internal.MongoOperationPublisher.sinkToCallback; + +/** + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public class OperationExecutorImpl implements OperationExecutor { + + private final MongoClientImpl mongoClient; + private final ClientSessionHelper clientSessionHelper; + @Nullable + private final ReactiveContextProvider contextProvider; + private final TimeoutSettings timeoutSettings; + + OperationExecutorImpl(final MongoClientImpl mongoClient, final ClientSessionHelper clientSessionHelper, + final TimeoutSettings timeoutSettings, @Nullable final ReactiveContextProvider contextProvider) { + this.mongoClient = mongoClient; + this.clientSessionHelper = clientSessionHelper; + this.timeoutSettings = timeoutSettings; + this.contextProvider = contextProvider; + } + + @Override + public Mono execute(final ReadOperation operation, final ReadPreference readPreference, final ReadConcern readConcern, + @Nullable final ClientSession session) { + isTrue("open", !mongoClient.getCluster().isClosed()); + notNull("operation", operation); + notNull("readPreference", readPreference); + notNull("readConcern", readConcern); + + if (session != null) { + session.notifyOperationInitiated(operation); + } + + return Mono.from(subscriber -> + clientSessionHelper.withClientSession(session, this) + .map(clientSession -> getReadWriteBinding(getContext(subscriber), + readPreference, readConcern, clientSession, session == null, operation.getCommandName())) + .flatMap(binding -> { + if (session != null && session.hasActiveTransaction() && !binding.getReadPreference().equals(primary())) { + binding.release(); + return Mono.error(new MongoClientException("Read preference in a transaction must be primary")); + } else { + return Mono.create(sink -> operation.executeAsync(binding, (result, t) -> { + try { + binding.release(); + } finally { + sinkToCallback(sink).onResult(result, t); + } + })).doOnError((t) -> { + Throwable exceptionToHandle = t instanceof MongoException ? OperationHelper.unwrap((MongoException) t) : t; + labelException(session, exceptionToHandle); + unpinServerAddressOnTransientTransactionError(session, exceptionToHandle); + }); + } + }).subscribe(subscriber) + ); + } + + @Override + public Mono execute(final WriteOperation operation, final ReadConcern readConcern, + @Nullable final ClientSession session) { + isTrue("open", !mongoClient.getCluster().isClosed()); + notNull("operation", operation); + notNull("readConcern", readConcern); + + if (session != null) { + session.notifyOperationInitiated(operation); + } + + return Mono.from(subscriber -> + clientSessionHelper.withClientSession(session, this) + .map(clientSession -> getReadWriteBinding(getContext(subscriber), + primary(), readConcern, clientSession, session == null, operation.getCommandName())) + .flatMap(binding -> + Mono.create(sink -> operation.executeAsync(binding, (result, t) -> { + try { + binding.release(); + } finally { + sinkToCallback(sink).onResult(result, t); + } + })).doOnError((t) -> { + Throwable exceptionToHandle = t instanceof MongoException ? OperationHelper.unwrap((MongoException) t) : t; + labelException(session, exceptionToHandle); + unpinServerAddressOnTransientTransactionError(session, exceptionToHandle); + }) + ).subscribe(subscriber) + ); + } + + @Override + public OperationExecutor withTimeoutSettings(final TimeoutSettings newTimeoutSettings) { + if (Objects.equals(timeoutSettings, newTimeoutSettings)) { + return this; + } + return new OperationExecutorImpl(mongoClient, clientSessionHelper, newTimeoutSettings, contextProvider); + } + + @Override + public TimeoutSettings getTimeoutSettings() { + return timeoutSettings; + } + + private RequestContext getContext(final Subscriber subscriber) { + RequestContext context = null; + if (contextProvider != null) { + context = contextProvider.getContext(subscriber); + } + return context == null ? IgnorableRequestContext.INSTANCE : context; + } + + private void labelException(@Nullable final ClientSession session, @Nullable final Throwable t) { + if (session != null && session.hasActiveTransaction() + && (t instanceof MongoSocketException || t instanceof MongoTimeoutException + || (t instanceof MongoQueryException && ((MongoQueryException) t).getErrorCode() == 91)) + && !((MongoException) t).hasErrorLabel(UNKNOWN_TRANSACTION_COMMIT_RESULT_LABEL)) { + ((MongoException) t).addLabel(TRANSIENT_TRANSACTION_ERROR_LABEL); + } + } + + private void unpinServerAddressOnTransientTransactionError(@Nullable final ClientSession session, + @Nullable final Throwable throwable) { + if (session != null && throwable instanceof MongoException + && ((MongoException) throwable).hasErrorLabel(TRANSIENT_TRANSACTION_ERROR_LABEL)) { + session.clearTransactionContext(); + } + } + + private AsyncReadWriteBinding getReadWriteBinding(final RequestContext requestContext, + final ReadPreference readPreference, final ReadConcern readConcern, final ClientSession session, + final boolean ownsSession, final String commandName) { + notNull("readPreference", readPreference); + AsyncClusterAwareReadWriteBinding readWriteBinding = new AsyncClusterBinding(mongoClient.getCluster(), + getReadPreferenceForBinding(readPreference, session), readConcern, + getOperationContext(requestContext, session, readConcern, commandName)); + + Crypt crypt = mongoClient.getCrypt(); + if (crypt != null) { + readWriteBinding = new CryptBinding(readWriteBinding, crypt); + } + + AsyncClusterAwareReadWriteBinding asyncReadWriteBinding = readWriteBinding; + if (session != null) { + return new ClientSessionBinding(session, ownsSession, asyncReadWriteBinding); + } else { + return asyncReadWriteBinding; + } + } + + private OperationContext getOperationContext(final RequestContext requestContext, final ClientSession session, + final ReadConcern readConcern, final String commandName) { + return new OperationContext( + requestContext, + new ReadConcernAwareNoOpSessionContext(readConcern), + createTimeoutContext(session, timeoutSettings), + mongoClient.getSettings().getServerApi(), + commandName); + } + + private ReadPreference getReadPreferenceForBinding(final ReadPreference readPreference, @Nullable final ClientSession session) { + if (session == null) { + return readPreference; + } + if (session.hasActiveTransaction()) { + ReadPreference readPreferenceForBinding = session.getTransactionOptions().getReadPreference(); + if (readPreferenceForBinding == null) { + throw new MongoInternalException("Invariant violated. Transaction options read preference can not be null"); + } + return readPreferenceForBinding; + } + return readPreference; + } +} diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/ReadOperationCursorAsyncOnly.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/ReadOperationCursorAsyncOnly.java new file mode 100644 index 00000000000..c1a28e1849e --- /dev/null +++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/ReadOperationCursorAsyncOnly.java @@ -0,0 +1,29 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.reactivestreams.client.internal; + +import com.mongodb.internal.binding.ReadBinding; +import com.mongodb.internal.operation.BatchCursor; +import com.mongodb.internal.operation.ReadOperationCursor; + +public interface ReadOperationCursorAsyncOnly extends ReadOperationCursor { + + default BatchCursor execute(final ReadBinding binding) { + throw new UnsupportedOperationException("This operation is async only"); + } + +} diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/TimeoutHelper.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/TimeoutHelper.java new file mode 100644 index 00000000000..bc4da3026a9 --- /dev/null +++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/TimeoutHelper.java @@ -0,0 +1,108 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.reactivestreams.client.internal; + +import com.mongodb.MongoOperationTimeoutException; +import com.mongodb.internal.TimeoutContext; +import com.mongodb.internal.time.Timeout; +import com.mongodb.lang.Nullable; +import com.mongodb.reactivestreams.client.MongoCollection; +import com.mongodb.reactivestreams.client.MongoDatabase; +import reactor.core.publisher.Mono; + +import static java.util.concurrent.TimeUnit.MILLISECONDS; + +/** + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public final class TimeoutHelper { + private static final String DEFAULT_TIMEOUT_MESSAGE = "Operation exceeded the timeout limit."; + + private TimeoutHelper() { + //NOP + } + + public static MongoCollection collectionWithTimeout(final MongoCollection collection, + @Nullable final Timeout timeout) { + return collectionWithTimeout(collection, timeout, DEFAULT_TIMEOUT_MESSAGE); + } + + public static MongoCollection collectionWithTimeout(final MongoCollection collection, + @Nullable final Timeout timeout, + final String message) { + if (timeout != null) { + return timeout.call(MILLISECONDS, + () -> collection.withTimeout(0, MILLISECONDS), + ms -> collection.withTimeout(ms, MILLISECONDS), + () -> TimeoutContext.throwMongoTimeoutException(message)); + } + return collection; + } + + public static Mono> collectionWithTimeoutMono(final MongoCollection collection, + @Nullable final Timeout timeout) { + try { + return Mono.just(collectionWithTimeout(collection, timeout)); + } catch (MongoOperationTimeoutException e) { + return Mono.error(e); + } + } + + public static Mono> collectionWithTimeoutDeferred(final MongoCollection collection, + @Nullable final Timeout timeout) { + return Mono.defer(() -> collectionWithTimeoutMono(collection, timeout)); + } + + + public static MongoDatabase databaseWithTimeout(final MongoDatabase database, + @Nullable final Timeout timeout) { + return databaseWithTimeout(database, DEFAULT_TIMEOUT_MESSAGE, timeout); + } + + public static MongoDatabase databaseWithTimeout(final MongoDatabase database, + final String message, + @Nullable final Timeout timeout) { + if (timeout != null) { + return timeout.call(MILLISECONDS, + () -> database.withTimeout(0, MILLISECONDS), + ms -> database.withTimeout(ms, MILLISECONDS), + () -> TimeoutContext.throwMongoTimeoutException(message)); + } + return database; + } + + private static Mono databaseWithTimeoutMono(final MongoDatabase database, + final String message, + @Nullable final Timeout timeout) { + try { + return Mono.just(databaseWithTimeout(database, message, timeout)); + } catch (MongoOperationTimeoutException e) { + return Mono.error(e); + } + } + + public static Mono databaseWithTimeoutDeferred(final MongoDatabase database, + @Nullable final Timeout timeout) { + return databaseWithTimeoutDeferred(database, DEFAULT_TIMEOUT_MESSAGE, timeout); + } + + public static Mono databaseWithTimeoutDeferred(final MongoDatabase database, + final String message, + @Nullable final Timeout timeout) { + return Mono.defer(() -> databaseWithTimeoutMono(database, message, timeout)); + } +} diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/VoidReadOperationThenCursorReadOperation.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/VoidReadOperationThenCursorReadOperation.java new file mode 100644 index 00000000000..e74949432b9 --- /dev/null +++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/VoidReadOperationThenCursorReadOperation.java @@ -0,0 +1,58 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.reactivestreams.client.internal; + +import com.mongodb.internal.async.AsyncBatchCursor; +import com.mongodb.internal.async.SingleResultCallback; +import com.mongodb.internal.binding.AsyncReadBinding; +import com.mongodb.internal.operation.ReadOperationCursor; +import com.mongodb.internal.operation.ReadOperationSimple; + +class VoidReadOperationThenCursorReadOperation implements ReadOperationCursorAsyncOnly { + private final ReadOperationSimple readOperation; + private final ReadOperationCursor cursorReadOperation; + + VoidReadOperationThenCursorReadOperation(final ReadOperationSimple readOperation, + final ReadOperationCursor cursorReadOperation) { + this.readOperation = readOperation; + this.cursorReadOperation = cursorReadOperation; + } + + public ReadOperationSimple getReadOperation() { + return readOperation; + } + + public ReadOperationCursor getCursorReadOperation() { + return cursorReadOperation; + } + + @Override + public String getCommandName() { + return readOperation.getCommandName(); + } + + @Override + public void executeAsync(final AsyncReadBinding binding, final SingleResultCallback> callback) { + readOperation.executeAsync(binding, (result, t) -> { + if (t != null) { + callback.onResult(null, t); + } else { + cursorReadOperation.executeAsync(binding, callback); + } + }); + } +} diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/VoidWriteOperationThenCursorReadOperation.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/VoidWriteOperationThenCursorReadOperation.java new file mode 100644 index 00000000000..428ad21ca26 --- /dev/null +++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/VoidWriteOperationThenCursorReadOperation.java @@ -0,0 +1,51 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.reactivestreams.client.internal; + +import com.mongodb.internal.async.AsyncBatchCursor; +import com.mongodb.internal.async.SingleResultCallback; +import com.mongodb.internal.binding.AsyncReadBinding; +import com.mongodb.internal.binding.AsyncWriteBinding; +import com.mongodb.internal.operation.ReadOperationCursor; +import com.mongodb.internal.operation.WriteOperation; + +class VoidWriteOperationThenCursorReadOperation implements ReadOperationCursorAsyncOnly { + private final WriteOperation writeOperation; + private final ReadOperationCursor cursorReadOperation; + + VoidWriteOperationThenCursorReadOperation(final WriteOperation writeOperation, + final ReadOperationCursor cursorReadOperation) { + this.writeOperation = writeOperation; + this.cursorReadOperation = cursorReadOperation; + } + + @Override + public String getCommandName() { + return writeOperation.getCommandName(); + } + + @Override + public void executeAsync(final AsyncReadBinding binding, final SingleResultCallback> callback) { + writeOperation.executeAsync((AsyncWriteBinding) binding, (result, t) -> { + if (t != null) { + callback.onResult(null, t); + } else { + cursorReadOperation.executeAsync(binding, callback); + } + }); + } +} diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/crypt/CollectionInfoRetriever.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/crypt/CollectionInfoRetriever.java new file mode 100644 index 00000000000..786055b1886 --- /dev/null +++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/crypt/CollectionInfoRetriever.java @@ -0,0 +1,42 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.reactivestreams.client.internal.crypt; + +import com.mongodb.internal.time.Timeout; +import com.mongodb.lang.Nullable; +import com.mongodb.reactivestreams.client.MongoClient; +import org.bson.BsonDocument; +import reactor.core.publisher.Flux; + +import static com.mongodb.assertions.Assertions.notNull; +import static com.mongodb.reactivestreams.client.internal.TimeoutHelper.databaseWithTimeoutDeferred; + +class CollectionInfoRetriever { + + private static final String TIMEOUT_ERROR_MESSAGE = "Collection information retrieval exceeded the timeout limit."; + + private final MongoClient client; + + CollectionInfoRetriever(final MongoClient client) { + this.client = notNull("client", client); + } + + public Flux filter(final String databaseName, final BsonDocument filter, @Nullable final Timeout operationTimeout) { + return databaseWithTimeoutDeferred(client.getDatabase(databaseName), TIMEOUT_ERROR_MESSAGE, operationTimeout) + .flatMapMany(database -> Flux.from(database.listCollections(BsonDocument.class).filter(filter))); + } +} diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/crypt/CommandMarker.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/crypt/CommandMarker.java new file mode 100644 index 00000000000..443ebbe14bd --- /dev/null +++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/crypt/CommandMarker.java @@ -0,0 +1,119 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.reactivestreams.client.internal.crypt; + +import com.mongodb.AutoEncryptionSettings; +import com.mongodb.MongoClientException; +import com.mongodb.MongoException; +import com.mongodb.MongoOperationTimeoutException; +import com.mongodb.ReadConcern; +import com.mongodb.ReadPreference; +import com.mongodb.internal.crypt.capi.MongoCrypt; +import com.mongodb.internal.time.Timeout; +import com.mongodb.lang.Nullable; +import com.mongodb.reactivestreams.client.MongoClient; +import com.mongodb.reactivestreams.client.MongoClients; +import com.mongodb.reactivestreams.client.MongoDatabase; +import org.bson.RawBsonDocument; +import reactor.core.publisher.Mono; + +import java.io.Closeable; +import java.util.Map; + +import static com.mongodb.assertions.Assertions.assertNotNull; +import static com.mongodb.internal.capi.MongoCryptHelper.createMongocryptdClientSettings; +import static com.mongodb.internal.capi.MongoCryptHelper.createProcessBuilder; +import static com.mongodb.internal.capi.MongoCryptHelper.isMongocryptdSpawningDisabled; +import static com.mongodb.internal.capi.MongoCryptHelper.startProcess; +import static com.mongodb.reactivestreams.client.internal.TimeoutHelper.databaseWithTimeoutDeferred; + +@SuppressWarnings("UseOfProcessBuilder") +class CommandMarker implements Closeable { + private static final String TIMEOUT_ERROR_MESSAGE = "Command marker exceeded the timeout limit."; + @Nullable + private final MongoClient client; + @Nullable + private final ProcessBuilder processBuilder; + + /** + * The command marker + * + *

+ * If the extraOptions.cryptSharedLibRequired option is true then the driver MUST NOT attempt to spawn or connect to mongocryptd. + *

+ * If the following conditions are met: + *

    + *
  • The user's MongoClient is configured for client-side encryption (i.e. bypassAutoEncryption is not false)
  • + *
  • The user has not disabled mongocryptd spawning (i.e. by setting extraOptions.mongocryptdBypassSpawn to true)
  • + *
  • The crypt shared library is unavailable.
  • + *
  • The extraOptions.cryptSharedLibRequired option is false.
  • + *
+ * Then mongocryptd MUST be spawned by the driver. + */ + CommandMarker( + final MongoCrypt mongoCrypt, + final AutoEncryptionSettings settings) { + + if (isMongocryptdSpawningDisabled(mongoCrypt.getCryptSharedLibVersionString(), settings)) { + processBuilder = null; + client = null; + } else { + Map extraOptions = settings.getExtraOptions(); + boolean mongocryptdBypassSpawn = (boolean) extraOptions.getOrDefault("mongocryptdBypassSpawn", false); + if (!mongocryptdBypassSpawn) { + processBuilder = createProcessBuilder(extraOptions); + startProcess(processBuilder); + } else { + processBuilder = null; + } + client = MongoClients.create(createMongocryptdClientSettings((String) extraOptions.get("mongocryptdURI"))); + } + } + + Mono mark(final String databaseName, final RawBsonDocument command, @Nullable final Timeout operationTimeout) { + if (client != null) { + return runCommand(databaseName, command, operationTimeout) + .onErrorResume(Throwable.class, e -> { + if (processBuilder == null || e instanceof MongoOperationTimeoutException) { + throw MongoException.fromThrowable(e); + } + return Mono.fromRunnable(() -> startProcess(processBuilder)).then(runCommand(databaseName, command, operationTimeout)); + }) + .onErrorMap(t -> new MongoClientException("Exception in encryption library: " + t.getMessage(), t)); + } else { + return Mono.fromCallable(() -> command); + } + } + + private Mono runCommand(final String databaseName, final RawBsonDocument command, @Nullable final Timeout operationTimeout) { + assertNotNull(client); + MongoDatabase mongoDatabase = client.getDatabase(databaseName) + .withReadConcern(ReadConcern.DEFAULT) + .withReadPreference(ReadPreference.primary()); + + return databaseWithTimeoutDeferred(mongoDatabase, TIMEOUT_ERROR_MESSAGE, operationTimeout) + .flatMap(database -> Mono.from(database.runCommand(command, RawBsonDocument.class))); + } + + @Override + public void close() { + if (client != null) { + client.close(); + } + } + +} diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/crypt/Crypt.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/crypt/Crypt.java new file mode 100644 index 00000000000..61ccaa320fe --- /dev/null +++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/crypt/Crypt.java @@ -0,0 +1,382 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.reactivestreams.client.internal.crypt; + +import com.mongodb.MongoClientException; +import com.mongodb.MongoException; +import com.mongodb.MongoInternalException; +import com.mongodb.client.model.vault.DataKeyOptions; +import com.mongodb.client.model.vault.EncryptOptions; +import com.mongodb.client.model.vault.RewrapManyDataKeyOptions; +import com.mongodb.crypt.capi.MongoCryptException; +import com.mongodb.internal.capi.MongoCryptHelper; +import com.mongodb.internal.crypt.capi.MongoCrypt; +import com.mongodb.internal.crypt.capi.MongoCryptContext; +import com.mongodb.internal.crypt.capi.MongoDataKeyOptions; +import com.mongodb.internal.crypt.capi.MongoKeyDecryptor; +import com.mongodb.internal.crypt.capi.MongoRewrapManyDataKeyOptions; +import com.mongodb.internal.diagnostics.logging.Logger; +import com.mongodb.internal.diagnostics.logging.Loggers; +import com.mongodb.internal.time.Timeout; +import com.mongodb.lang.Nullable; +import com.mongodb.reactivestreams.client.MongoClient; +import org.bson.BsonBinary; +import org.bson.BsonDocument; +import org.bson.BsonValue; +import org.bson.RawBsonDocument; +import reactor.core.publisher.Mono; +import reactor.core.publisher.MonoSink; + +import java.io.Closeable; +import java.util.Map; +import java.util.function.Supplier; + +import static com.mongodb.assertions.Assertions.notNull; +import static com.mongodb.internal.client.vault.EncryptOptionsHelper.asMongoExplicitEncryptOptions; +import static com.mongodb.internal.crypt.capi.MongoCryptContext.State; + +/** + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public class Crypt implements Closeable { + private static final RawBsonDocument EMPTY_RAW_BSON_DOCUMENT = RawBsonDocument.parse("{}"); + private static final Logger LOGGER = Loggers.getLogger("client"); + private final MongoCrypt mongoCrypt; + private final Map> kmsProviders; + private final Map>> kmsProviderPropertySuppliers; + private final CollectionInfoRetriever collectionInfoRetriever; + private final CommandMarker commandMarker; + private final KeyRetriever keyRetriever; + private final KeyManagementService keyManagementService; + private final boolean bypassAutoEncryption; + @Nullable + private final MongoClient collectionInfoRetrieverClient; + @Nullable + private final MongoClient keyVaultClient; + + /** + * Create an instance to use for explicit encryption and decryption, and data key creation. + * + * @param mongoCrypt the mongoCrypt wrapper + * @param keyRetriever the key retriever + * @param keyManagementService the key management service + * @param kmsProviders the KMS provider credentials + * @param kmsProviderPropertySuppliers the KMS provider property providers + */ + Crypt(final MongoCrypt mongoCrypt, + final KeyRetriever keyRetriever, + final KeyManagementService keyManagementService, + final Map> kmsProviders, + final Map>> kmsProviderPropertySuppliers) { + this(mongoCrypt, keyRetriever, keyManagementService, kmsProviders, kmsProviderPropertySuppliers, + false, null, null, null, null); + } + + /** + * Create an instance to use for auto-encryption and auto-decryption. + * + * @param mongoCrypt the mongoCrypt wrapper + * @param keyRetriever the key retriever + * @param keyManagementService the key management service + * @param kmsProviders the KMS provider credentials + * @param kmsProviderPropertySuppliers the KMS provider property providers + * @param bypassAutoEncryption the bypass auto encryption flag + * @param collectionInfoRetriever the collection info retriever + * @param commandMarker the command marker + * @param collectionInfoRetrieverClient the collection info retriever mongo client + * @param keyVaultClient the key vault mongo client + */ + Crypt(final MongoCrypt mongoCrypt, + final KeyRetriever keyRetriever, + final KeyManagementService keyManagementService, + final Map> kmsProviders, + final Map>> kmsProviderPropertySuppliers, + final boolean bypassAutoEncryption, + @Nullable final CollectionInfoRetriever collectionInfoRetriever, + @Nullable final CommandMarker commandMarker, + @Nullable final MongoClient collectionInfoRetrieverClient, + @Nullable final MongoClient keyVaultClient) { + this.mongoCrypt = mongoCrypt; + this.keyRetriever = keyRetriever; + this.keyManagementService = keyManagementService; + this.kmsProviders = kmsProviders; + this.kmsProviderPropertySuppliers = kmsProviderPropertySuppliers; + this.bypassAutoEncryption = bypassAutoEncryption; + this.collectionInfoRetriever = collectionInfoRetriever; + this.commandMarker = commandMarker; + this.collectionInfoRetrieverClient = collectionInfoRetrieverClient; + this.keyVaultClient = keyVaultClient; + } + + /** + * Encrypt the given command + * + * @param databaseName the namespace + * @param command the unencrypted command + */ + public Mono encrypt(final String databaseName, final RawBsonDocument command, @Nullable final Timeout operationTimeout) { + notNull("databaseName", databaseName); + notNull("command", command); + + if (bypassAutoEncryption) { + return Mono.fromCallable(() -> command); + } + return executeStateMachine(() -> mongoCrypt.createEncryptionContext(databaseName, command), databaseName, operationTimeout); + } + + /** + * Decrypt the given command response + * + * @param commandResponse the encrypted command response + */ + public Mono decrypt(final RawBsonDocument commandResponse, @Nullable final Timeout operationTimeout) { + notNull("commandResponse", commandResponse); + return executeStateMachine(() -> mongoCrypt.createDecryptionContext(commandResponse), operationTimeout) + .onErrorMap(this::wrapInClientException); + } + + /** + * Create a data key. + * + * @param kmsProvider the KMS provider to create the data key for + * @param options the data key options + */ + public Mono createDataKey(final String kmsProvider, final DataKeyOptions options, @Nullable final Timeout operationTimeout) { + notNull("kmsProvider", kmsProvider); + notNull("options", options); + return executeStateMachine(() -> + mongoCrypt.createDataKeyContext(kmsProvider, + MongoDataKeyOptions.builder() + .keyAltNames(options.getKeyAltNames()) + .masterKey(options.getMasterKey()) + .keyMaterial(options.getKeyMaterial()) + .build()), operationTimeout); + } + + /** + * Encrypt the given value with the given options + * + * @param value the value to encrypt + * @param options the options + */ + public Mono encryptExplicitly(final BsonValue value, final EncryptOptions options, @Nullable final Timeout operationTimeout) { + return executeStateMachine(() -> + mongoCrypt.createExplicitEncryptionContext(new BsonDocument("v", value), asMongoExplicitEncryptOptions(options)), + operationTimeout) + .map(result -> result.getBinary("v")); + } + + /** + * Encrypts a Match Expression or Aggregate Expression to query a range index. + * + * @param expression the Match Expression or Aggregate Expression + * @param options the options + * @return the encrypted expression + * @since 4.9 + * @mongodb.server.release 6.2 + */ + public Mono encryptExpression(final BsonDocument expression, final EncryptOptions options, @Nullable final Timeout operationTimeout) { + return executeStateMachine(() -> + mongoCrypt.createEncryptExpressionContext(new BsonDocument("v", expression), asMongoExplicitEncryptOptions(options)), operationTimeout + ).map(result -> result.getDocument("v")); + } + + /** + * Decrypt the given encrypted value. + * + * @param value the encrypted value + */ + public Mono decryptExplicitly(final BsonBinary value, @Nullable final Timeout operationTimeout) { + return executeStateMachine(() -> mongoCrypt.createExplicitDecryptionContext(new BsonDocument("v", value)), operationTimeout) + .map(result -> result.get("v")); + } + + /** + * Rewrap data key + * @param filter the filter + * @param options the rewrap many data key options + * @return the decrypted value + */ + public Mono rewrapManyDataKey(final BsonDocument filter, final RewrapManyDataKeyOptions options, @Nullable final Timeout operationTimeout) { + return executeStateMachine(() -> + mongoCrypt.createRewrapManyDatakeyContext(filter, + MongoRewrapManyDataKeyOptions + .builder() + .provider(options.getProvider()) + .masterKey(options.getMasterKey()) + .build()), operationTimeout + ); + } + + + @Override + @SuppressWarnings("try") + public void close() { + //noinspection EmptyTryBlock + try (MongoCrypt ignored = this.mongoCrypt; + CommandMarker ignored1 = this.commandMarker; + MongoClient ignored2 = this.collectionInfoRetrieverClient; + MongoClient ignored3 = this.keyVaultClient; + KeyManagementService ignored4 = this.keyManagementService + ) { + // just using try-with-resources to ensure they all get closed, even in the case of exceptions + } + } + + private Mono executeStateMachine(final Supplier cryptContextSupplier, + @Nullable final Timeout operationTimeout) { + return executeStateMachine(cryptContextSupplier, null, operationTimeout); + } + + private Mono executeStateMachine(final Supplier cryptContextSupplier, + @Nullable final String databaseName, @Nullable final Timeout operationTimeout) { + try { + MongoCryptContext cryptContext = cryptContextSupplier.get(); + return Mono.create(sink -> executeStateMachineWithSink(cryptContext, databaseName, sink, operationTimeout)) + .onErrorMap(this::wrapInClientException) + .doFinally(s -> cryptContext.close()); + } catch (MongoCryptException e) { + return Mono.error(wrapInClientException(e)); + } + } + + private void executeStateMachineWithSink(final MongoCryptContext cryptContext, @Nullable final String databaseName, + final MonoSink sink, @Nullable final Timeout operationTimeout) { + State state = cryptContext.getState(); + switch (state) { + case NEED_MONGO_COLLINFO: + collInfo(cryptContext, databaseName, sink, operationTimeout); + break; + case NEED_MONGO_MARKINGS: + mark(cryptContext, databaseName, sink, operationTimeout); + break; + case NEED_KMS_CREDENTIALS: + fetchCredentials(cryptContext, databaseName, sink, operationTimeout); + break; + case NEED_MONGO_KEYS: + fetchKeys(cryptContext, databaseName, sink, operationTimeout); + break; + case NEED_KMS: + decryptKeys(cryptContext, databaseName, sink, operationTimeout); + break; + case READY: + sink.success(cryptContext.finish()); + break; + case DONE: + sink.success(EMPTY_RAW_BSON_DOCUMENT); + break; + default: + sink.error(new MongoInternalException("Unsupported encryptor state + " + state)); + } + } + + private void fetchCredentials(final MongoCryptContext cryptContext, @Nullable final String databaseName, + final MonoSink sink, @Nullable final Timeout operationTimeout) { + try { + cryptContext.provideKmsProviderCredentials(MongoCryptHelper.fetchCredentials(kmsProviders, kmsProviderPropertySuppliers)); + executeStateMachineWithSink(cryptContext, databaseName, sink, operationTimeout); + } catch (Exception e) { + sink.error(e); + } + } + + private void collInfo(final MongoCryptContext cryptContext, + @Nullable final String databaseName, + final MonoSink sink, @Nullable final Timeout operationTimeout) { + if (collectionInfoRetriever == null) { + sink.error(new IllegalStateException("Missing collection Info retriever")); + } else if (databaseName == null) { + sink.error(new IllegalStateException("Missing database name")); + } else { + collectionInfoRetriever.filter(databaseName, cryptContext.getMongoOperation(), operationTimeout) + .contextWrite(sink.contextView()) + .doOnNext(result -> cryptContext.addMongoOperationResult(result)) + .doOnComplete(() -> { + cryptContext.completeMongoOperation(); + executeStateMachineWithSink(cryptContext, databaseName, sink, operationTimeout); + }) + .doOnError(t -> sink.error(MongoException.fromThrowableNonNull(t))) + .subscribe(); + } + } + + private void mark(final MongoCryptContext cryptContext, + @Nullable final String databaseName, + final MonoSink sink, + @Nullable final Timeout operationTimeout) { + if (commandMarker == null) { + sink.error(wrapInClientException(new MongoInternalException("Missing command marker"))); + } else if (databaseName == null) { + sink.error(wrapInClientException(new IllegalStateException("Missing database name"))); + } else { + commandMarker.mark(databaseName, cryptContext.getMongoOperation(), operationTimeout) + .contextWrite(sink.contextView()) + .doOnSuccess(result -> { + cryptContext.addMongoOperationResult(result); + cryptContext.completeMongoOperation(); + executeStateMachineWithSink(cryptContext, databaseName, sink, operationTimeout); + }) + .doOnError(e -> sink.error(wrapInClientException(e))) + .subscribe(); + } + } + + private void fetchKeys(final MongoCryptContext cryptContext, + @Nullable final String databaseName, + final MonoSink sink, + @Nullable final Timeout operationTimeout) { + keyRetriever.find(cryptContext.getMongoOperation(), operationTimeout) + .contextWrite(sink.contextView()) + .doOnSuccess(results -> { + for (BsonDocument result : results) { + cryptContext.addMongoOperationResult(result); + } + cryptContext.completeMongoOperation(); + executeStateMachineWithSink(cryptContext, databaseName, sink, operationTimeout); + }) + .doOnError(t -> sink.error(MongoException.fromThrowableNonNull(t))) + .subscribe(); + } + + private void decryptKeys(final MongoCryptContext cryptContext, + @Nullable final String databaseName, + final MonoSink sink, + @Nullable final Timeout operationTimeout) { + MongoKeyDecryptor keyDecryptor = cryptContext.nextKeyDecryptor(); + if (keyDecryptor != null) { + keyManagementService.decryptKey(keyDecryptor, operationTimeout) + .contextWrite(sink.contextView()) + .doOnSuccess(r -> decryptKeys(cryptContext, databaseName, sink, operationTimeout)) + .doOnError(e -> sink.error(wrapInClientException(e))) + .subscribe(); + } else { + Mono.fromRunnable(cryptContext::completeKeyDecryptors) + .contextWrite(sink.contextView()) + .doOnSuccess(r -> executeStateMachineWithSink(cryptContext, databaseName, sink, operationTimeout)) + .doOnError(e -> sink.error(wrapInClientException(e))) + .subscribe(); + } + } + + private Throwable wrapInClientException(final Throwable t) { + if (t instanceof MongoClientException) { + return t; + } + return new MongoClientException("Exception in encryption library: " + t.getMessage(), t); + } + +} diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/crypt/CryptBinding.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/crypt/CryptBinding.java new file mode 100644 index 00000000000..1dcc8a07d62 --- /dev/null +++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/crypt/CryptBinding.java @@ -0,0 +1,163 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.reactivestreams.client.internal.crypt; + +import com.mongodb.ReadPreference; +import com.mongodb.ServerAddress; +import com.mongodb.connection.ServerDescription; +import com.mongodb.internal.async.SingleResultCallback; +import com.mongodb.internal.binding.AsyncClusterAwareReadWriteBinding; +import com.mongodb.internal.binding.AsyncConnectionSource; +import com.mongodb.internal.connection.AsyncConnection; +import com.mongodb.internal.connection.OperationContext; + +/** + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public class CryptBinding implements AsyncClusterAwareReadWriteBinding { + + private final AsyncClusterAwareReadWriteBinding wrapped; + private final Crypt crypt; + + public CryptBinding(final AsyncClusterAwareReadWriteBinding wrapped, final Crypt crypt) { + this.wrapped = wrapped; + this.crypt = crypt; + } + + @Override + public ReadPreference getReadPreference() { + return wrapped.getReadPreference(); + } + + @Override + public void getWriteConnectionSource(final SingleResultCallback callback) { + wrapped.getWriteConnectionSource((result, t) -> { + if (t != null) { + callback.onResult(null, t); + } else { + callback.onResult(new CryptConnectionSource(result), null); + } + }); + } + + @Override + public OperationContext getOperationContext() { + return wrapped.getOperationContext(); + } + + @Override + public void getReadConnectionSource(final SingleResultCallback callback) { + wrapped.getReadConnectionSource((result, t) -> { + if (t != null) { + callback.onResult(null, t); + } else { + callback.onResult(new CryptConnectionSource(result), null); + } + }); + } + + @Override + public void getReadConnectionSource(final int minWireVersion, final ReadPreference fallbackReadPreference, + final SingleResultCallback callback) { + wrapped.getReadConnectionSource(minWireVersion, fallbackReadPreference, (result, t) -> { + if (t != null) { + callback.onResult(null, t); + } else { + callback.onResult(new CryptConnectionSource(result), null); + } + }); + } + + + @Override + public void getConnectionSource(final ServerAddress serverAddress, final SingleResultCallback callback) { + wrapped.getConnectionSource(serverAddress, (result, t) -> { + if (t != null) { + callback.onResult(null, t); + } else { + callback.onResult(new CryptConnectionSource(result), null); + } + }); + } + + @Override + public int getCount() { + return wrapped.getCount(); + } + + @Override + public AsyncClusterAwareReadWriteBinding retain() { + wrapped.retain(); + return this; + } + + @Override + public int release() { + return wrapped.release(); + } + + private class CryptConnectionSource implements AsyncConnectionSource { + private final AsyncConnectionSource wrapped; + + CryptConnectionSource(final AsyncConnectionSource wrapped) { + this.wrapped = wrapped; + CryptBinding.this.retain(); + } + + @Override + public ServerDescription getServerDescription() { + return wrapped.getServerDescription(); + } + + @Override + public OperationContext getOperationContext() { + return wrapped.getOperationContext(); + } + + @Override + public ReadPreference getReadPreference() { + return wrapped.getReadPreference(); + } + + @Override + public void getConnection(final SingleResultCallback callback) { + wrapped.getConnection((result, t) -> { + if (t != null) { + callback.onResult(null, t); + } else { + callback.onResult(new CryptConnection(result, crypt), null); + } + }); + } + + @Override + public int getCount() { + return wrapped.getCount(); + } + + @Override + public AsyncConnectionSource retain() { + wrapped.retain(); + return this; + } + + @Override + public int release() { + return wrapped.release(); + } + } +} diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/crypt/CryptConnection.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/crypt/CryptConnection.java new file mode 100644 index 00000000000..c05bfb663f2 --- /dev/null +++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/crypt/CryptConnection.java @@ -0,0 +1,171 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.reactivestreams.client.internal.crypt; + +import com.mongodb.ReadPreference; +import com.mongodb.connection.ConnectionDescription; +import com.mongodb.internal.async.SingleResultCallback; +import com.mongodb.internal.connection.AsyncConnection; +import com.mongodb.internal.connection.Connection; +import com.mongodb.internal.connection.MessageSequences; +import com.mongodb.internal.connection.MessageSequences.EmptyMessageSequences; +import com.mongodb.internal.connection.MessageSettings; +import com.mongodb.internal.connection.OperationContext; +import com.mongodb.internal.connection.SplittablePayload; +import com.mongodb.internal.connection.SplittablePayloadBsonWriter; +import com.mongodb.internal.time.Timeout; +import com.mongodb.internal.validator.MappedFieldNameValidator; +import com.mongodb.lang.Nullable; +import org.bson.BsonBinaryReader; +import org.bson.BsonBinaryWriter; +import org.bson.BsonBinaryWriterSettings; +import org.bson.BsonDocument; +import org.bson.BsonWriter; +import org.bson.BsonWriterSettings; +import org.bson.FieldNameValidator; +import org.bson.RawBsonDocument; +import org.bson.codecs.BsonValueCodecProvider; +import org.bson.codecs.Codec; +import org.bson.codecs.Decoder; +import org.bson.codecs.DecoderContext; +import org.bson.codecs.EncoderContext; +import org.bson.codecs.RawBsonDocumentCodec; +import org.bson.codecs.configuration.CodecRegistry; +import org.bson.io.BasicOutputBuffer; +import reactor.core.publisher.Mono; + +import java.util.HashMap; +import java.util.Map; +import java.util.function.Function; + +import static com.mongodb.assertions.Assertions.fail; +import static com.mongodb.reactivestreams.client.internal.MongoOperationPublisher.sinkToCallback; +import static org.bson.codecs.configuration.CodecRegistries.fromProviders; + +class CryptConnection implements AsyncConnection { + private static final CodecRegistry REGISTRY = fromProviders(new BsonValueCodecProvider()); + private static final int MAX_SPLITTABLE_DOCUMENT_SIZE = 2097152; + + private final AsyncConnection wrapped; + private final Crypt crypt; + + CryptConnection(final AsyncConnection wrapped, final Crypt crypt) { + this.wrapped = wrapped; + this.crypt = crypt; + } + + @Override + public int getCount() { + return wrapped.getCount(); + } + + @Override + public CryptConnection retain() { + wrapped.retain(); + return this; + } + + @Override + public int release() { + return wrapped.release(); + } + + @Override + public ConnectionDescription getDescription() { + return wrapped.getDescription(); + } + + @Override + public void commandAsync(final String database, final BsonDocument command, final FieldNameValidator fieldNameValidator, + @Nullable final ReadPreference readPreference, final Decoder commandResultDecoder, + final OperationContext operationContext, final SingleResultCallback callback) { + commandAsync(database, command, fieldNameValidator, readPreference, commandResultDecoder, + operationContext, true, EmptyMessageSequences.INSTANCE, callback); + } + + @Override + public void commandAsync(final String database, final BsonDocument command, final FieldNameValidator commandFieldNameValidator, + @Nullable final ReadPreference readPreference, final Decoder commandResultDecoder, + final OperationContext operationContext, final boolean responseExpected, final MessageSequences sequences, + final SingleResultCallback callback) { + + try { + SplittablePayload payload = null; + FieldNameValidator payloadFieldNameValidator = null; + if (sequences instanceof SplittablePayload) { + payload = (SplittablePayload) sequences; + payloadFieldNameValidator = payload.getFieldNameValidator(); + } else if (!(sequences instanceof EmptyMessageSequences)) { + fail(sequences.toString()); + } + BasicOutputBuffer bsonOutput = new BasicOutputBuffer(); + BsonBinaryWriter bsonBinaryWriter = new BsonBinaryWriter( + new BsonWriterSettings(), new BsonBinaryWriterSettings(getDescription().getMaxDocumentSize()), + bsonOutput, getFieldNameValidator(payload, commandFieldNameValidator, payloadFieldNameValidator)); + BsonWriter writer = payload == null + ? bsonBinaryWriter + : new SplittablePayloadBsonWriter(bsonBinaryWriter, bsonOutput, createSplittablePayloadMessageSettings(), payload, + MAX_SPLITTABLE_DOCUMENT_SIZE); + + Timeout operationTimeout = operationContext.getTimeoutContext().getTimeout(); + + getEncoder(command).encode(writer, command, EncoderContext.builder().build()); + crypt.encrypt(database, new RawBsonDocument(bsonOutput.getInternalBuffer(), 0, bsonOutput.getSize()), operationTimeout) + .flatMap((Function>) encryptedCommand -> + Mono.create(sink -> wrapped.commandAsync(database, encryptedCommand, commandFieldNameValidator, readPreference, + new RawBsonDocumentCodec(), operationContext, responseExpected, EmptyMessageSequences.INSTANCE, sinkToCallback(sink)))) + .flatMap(rawBsonDocument -> crypt.decrypt(rawBsonDocument, operationTimeout)) + .map(decryptedResponse -> + commandResultDecoder.decode(new BsonBinaryReader(decryptedResponse.getByteBuffer().asNIO()), + DecoderContext.builder().build()) + ) + .subscribe(decryptedResult -> callback.onResult(decryptedResult, null), e -> callback.onResult(null, e)); + } catch (Throwable t) { + callback.onResult(null, t); + } + } + + @SuppressWarnings("unchecked") + private Codec getEncoder(final BsonDocument command) { + return (Codec) REGISTRY.get(command.getClass()); + } + + private FieldNameValidator getFieldNameValidator(@Nullable final SplittablePayload payload, + final FieldNameValidator commandFieldNameValidator, + @Nullable final FieldNameValidator payloadFieldNameValidator) { + if (payload == null) { + return commandFieldNameValidator; + } + + Map rootMap = new HashMap<>(); + rootMap.put(payload.getPayloadName(), payloadFieldNameValidator); + return new MappedFieldNameValidator(commandFieldNameValidator, rootMap); + } + + private MessageSettings createSplittablePayloadMessageSettings() { + return MessageSettings.builder() + .maxBatchCount(getDescription().getMaxBatchCount()) + .maxMessageSize(getDescription().getMaxMessageSize()) + .maxDocumentSize(getDescription().getMaxDocumentSize()) + .build(); + } + + @Override + public void markAsPinned(final Connection.PinningMode pinningMode) { + wrapped.markAsPinned(pinningMode); + } +} diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/crypt/Crypts.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/crypt/Crypts.java new file mode 100644 index 00000000000..b06af01d476 --- /dev/null +++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/crypt/Crypts.java @@ -0,0 +1,96 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.reactivestreams.client.internal.crypt; + +import com.mongodb.AutoEncryptionSettings; +import com.mongodb.ClientEncryptionSettings; +import com.mongodb.MongoClientException; +import com.mongodb.MongoClientSettings; +import com.mongodb.MongoNamespace; +import com.mongodb.internal.crypt.capi.MongoCrypt; +import com.mongodb.internal.crypt.capi.MongoCrypts; +import com.mongodb.reactivestreams.client.MongoClient; +import com.mongodb.reactivestreams.client.MongoClients; + +import javax.net.ssl.SSLContext; +import java.security.NoSuchAlgorithmException; +import java.util.Map; + +import static com.mongodb.internal.capi.MongoCryptHelper.createMongoCryptOptions; + +/** + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public final class Crypts { + + private Crypts() { + } + + public static Crypt createCrypt(final MongoClientSettings mongoClientSettings, final AutoEncryptionSettings autoEncryptionSettings) { + MongoClient sharedInternalClient = null; + MongoClientSettings keyVaultMongoClientSettings = autoEncryptionSettings.getKeyVaultMongoClientSettings(); + if (keyVaultMongoClientSettings == null || !autoEncryptionSettings.isBypassAutoEncryption()) { + MongoClientSettings defaultInternalMongoClientSettings = MongoClientSettings.builder(mongoClientSettings) + .applyToConnectionPoolSettings(builder -> builder.minSize(0)) + .autoEncryptionSettings(null) + .build(); + sharedInternalClient = MongoClients.create(defaultInternalMongoClientSettings); + } + MongoClient keyVaultClient = keyVaultMongoClientSettings == null + ? sharedInternalClient : MongoClients.create(keyVaultMongoClientSettings); + MongoCrypt mongoCrypt = MongoCrypts.create(createMongoCryptOptions(autoEncryptionSettings)); + return new Crypt( + mongoCrypt, + createKeyRetriever(keyVaultClient, autoEncryptionSettings.getKeyVaultNamespace()), + createKeyManagementService(autoEncryptionSettings.getKmsProviderSslContextMap()), + autoEncryptionSettings.getKmsProviders(), + autoEncryptionSettings.getKmsProviderPropertySuppliers(), + autoEncryptionSettings.isBypassAutoEncryption(), + autoEncryptionSettings.isBypassAutoEncryption() ? null : new CollectionInfoRetriever(sharedInternalClient), + new CommandMarker(mongoCrypt, autoEncryptionSettings), + sharedInternalClient, + keyVaultClient); + } + + public static Crypt create(final MongoClient keyVaultClient, final ClientEncryptionSettings settings) { + return new Crypt(MongoCrypts.create(createMongoCryptOptions(settings)), + createKeyRetriever(keyVaultClient, settings.getKeyVaultNamespace()), + createKeyManagementService(settings.getKmsProviderSslContextMap()), + settings.getKmsProviders(), + settings.getKmsProviderPropertySuppliers() + ); + } + + private static KeyRetriever createKeyRetriever(final MongoClient keyVaultClient, + final String keyVaultNamespaceString) { + return new KeyRetriever(keyVaultClient, new MongoNamespace(keyVaultNamespaceString)); + } + + private static KeyManagementService createKeyManagementService(final Map kmsProviderSslContextMap) { + return new KeyManagementService(kmsProviderSslContextMap, 10000); + } + + private static SSLContext getSslContext() { + SSLContext sslContext; + try { + sslContext = SSLContext.getDefault(); + } catch (NoSuchAlgorithmException e) { + throw new MongoClientException("Unable to create default SSLContext", e); + } + return sslContext; + } +} diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/crypt/KeyManagementService.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/crypt/KeyManagementService.java new file mode 100644 index 00000000000..b82dd590618 --- /dev/null +++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/crypt/KeyManagementService.java @@ -0,0 +1,202 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.reactivestreams.client.internal.crypt; + +import com.mongodb.MongoOperationTimeoutException; +import com.mongodb.MongoSocketException; +import com.mongodb.MongoSocketReadTimeoutException; +import com.mongodb.MongoSocketWriteTimeoutException; +import com.mongodb.ServerAddress; +import com.mongodb.connection.AsyncCompletionHandler; +import com.mongodb.connection.SocketSettings; +import com.mongodb.connection.SslSettings; +import com.mongodb.internal.TimeoutContext; +import com.mongodb.internal.TimeoutSettings; +import com.mongodb.internal.connection.AsynchronousChannelStream; +import com.mongodb.internal.connection.DefaultInetAddressResolver; +import com.mongodb.internal.connection.OperationContext; +import com.mongodb.internal.connection.Stream; +import com.mongodb.internal.connection.StreamFactory; +import com.mongodb.internal.connection.TlsChannelStreamFactoryFactory; +import com.mongodb.internal.crypt.capi.MongoKeyDecryptor; +import com.mongodb.internal.diagnostics.logging.Logger; +import com.mongodb.internal.diagnostics.logging.Loggers; +import com.mongodb.internal.time.Timeout; +import com.mongodb.lang.Nullable; +import org.bson.ByteBuf; +import org.bson.ByteBufNIO; +import com.mongodb.lang.NonNull; +import reactor.core.publisher.Mono; +import reactor.core.publisher.MonoSink; + +import javax.net.ssl.SSLContext; +import java.io.Closeable; +import java.nio.channels.CompletionHandler; +import java.nio.channels.InterruptedByTimeoutException; +import java.util.List; +import java.util.Map; + +import static java.util.Collections.singletonList; +import static java.util.concurrent.TimeUnit.MILLISECONDS; +import static org.bson.assertions.Assertions.assertTrue; + +class KeyManagementService implements Closeable { + private static final Logger LOGGER = Loggers.getLogger("client"); + private static final String TIMEOUT_ERROR_MESSAGE = "KMS key decryption exceeded the timeout limit."; + private final Map kmsProviderSslContextMap; + private final int timeoutMillis; + private final TlsChannelStreamFactoryFactory tlsChannelStreamFactoryFactory; + + KeyManagementService(final Map kmsProviderSslContextMap, final int timeoutMillis) { + assertTrue("timeoutMillis > 0", timeoutMillis > 0); + this.kmsProviderSslContextMap = kmsProviderSslContextMap; + this.tlsChannelStreamFactoryFactory = new TlsChannelStreamFactoryFactory(new DefaultInetAddressResolver()); + this.timeoutMillis = timeoutMillis; + } + + public void close() { + tlsChannelStreamFactoryFactory.close(); + } + + Mono decryptKey(final MongoKeyDecryptor keyDecryptor, @Nullable final Timeout operationTimeout) { + SocketSettings socketSettings = SocketSettings.builder() + .connectTimeout(timeoutMillis, MILLISECONDS) + .readTimeout(timeoutMillis, MILLISECONDS) + .build(); + StreamFactory streamFactory = tlsChannelStreamFactoryFactory.create(socketSettings, + SslSettings.builder().enabled(true).context(kmsProviderSslContextMap.get(keyDecryptor.getKmsProvider())).build()); + + ServerAddress serverAddress = new ServerAddress(keyDecryptor.getHostName()); + + LOGGER.info("Connecting to KMS server at " + serverAddress); + + return Mono.create(sink -> { + Stream stream = streamFactory.create(serverAddress); + OperationContext operationContext = createOperationContext(operationTimeout, socketSettings); + stream.openAsync(operationContext, new AsyncCompletionHandler() { + @Override + public void completed(@Nullable final Void ignored) { + streamWrite(stream, keyDecryptor, operationContext, sink); + } + + @Override + public void failed(final Throwable t) { + stream.close(); + handleError(t, operationContext, sink); + } + }); + }).onErrorMap(this::unWrapException); + } + + private void streamWrite(final Stream stream, final MongoKeyDecryptor keyDecryptor, + final OperationContext operationContext, final MonoSink sink) { + List byteBufs = singletonList(new ByteBufNIO(keyDecryptor.getMessage())); + stream.writeAsync(byteBufs, operationContext, new AsyncCompletionHandler() { + @Override + public void completed(@Nullable final Void aVoid) { + streamRead(stream, keyDecryptor, operationContext, sink); + } + + @Override + public void failed(final Throwable t) { + stream.close(); + handleError(t, operationContext, sink); + } + }); + } + + private void streamRead(final Stream stream, final MongoKeyDecryptor keyDecryptor, + final OperationContext operationContext, final MonoSink sink) { + int bytesNeeded = keyDecryptor.bytesNeeded(); + if (bytesNeeded > 0) { + AsynchronousChannelStream asyncStream = (AsynchronousChannelStream) stream; + ByteBuf buffer = asyncStream.getBuffer(bytesNeeded); + long readTimeoutMS = operationContext.getTimeoutContext().getReadTimeoutMS(); + asyncStream.getChannel().read(buffer.asNIO(), readTimeoutMS, MILLISECONDS, null, + new CompletionHandler() { + + @Override + public void completed(final Integer integer, final Void aVoid) { + buffer.flip(); + try { + keyDecryptor.feed(buffer.asNIO()); + buffer.release(); + streamRead(stream, keyDecryptor, operationContext, sink); + } catch (Throwable t) { + sink.error(t); + } + } + + @Override + public void failed(final Throwable t, final Void aVoid) { + buffer.release(); + stream.close(); + handleError(t, operationContext, sink); + } + }); + } else { + stream.close(); + sink.success(); + } + } + + private static void handleError(final Throwable t, final OperationContext operationContext, final MonoSink sink) { + if (isTimeoutException(t) && operationContext.getTimeoutContext().hasTimeoutMS()) { + sink.error(TimeoutContext.createMongoTimeoutException(TIMEOUT_ERROR_MESSAGE, t)); + } else { + sink.error(t); + } + } + + private OperationContext createOperationContext(@Nullable final Timeout operationTimeout, final SocketSettings socketSettings) { + TimeoutSettings timeoutSettings; + if (operationTimeout == null) { + timeoutSettings = createTimeoutSettings(socketSettings, null); + } else { + timeoutSettings = operationTimeout.call(MILLISECONDS, + () -> { + throw new AssertionError("operationTimeout cannot be infinite"); + }, + (ms) -> createTimeoutSettings(socketSettings, ms), + () -> { + throw new MongoOperationTimeoutException(TIMEOUT_ERROR_MESSAGE); + }); + } + return OperationContext.simpleOperationContext(new TimeoutContext(timeoutSettings)); + } + + @NonNull + private static TimeoutSettings createTimeoutSettings(final SocketSettings socketSettings, + @Nullable final Long ms) { + return new TimeoutSettings( + 0, + socketSettings.getConnectTimeout(MILLISECONDS), + socketSettings.getReadTimeout(MILLISECONDS), + ms, + 0); + } + + private Throwable unWrapException(final Throwable t) { + return t instanceof MongoSocketException ? t.getCause() : t; + } + + private static boolean isTimeoutException(final Throwable t) { + return t instanceof MongoSocketReadTimeoutException + || t instanceof MongoSocketWriteTimeoutException + || t instanceof InterruptedByTimeoutException; + } +} diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/crypt/KeyRetriever.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/crypt/KeyRetriever.java new file mode 100644 index 00000000000..23e3a06eff0 --- /dev/null +++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/crypt/KeyRetriever.java @@ -0,0 +1,54 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.reactivestreams.client.internal.crypt; + +import com.mongodb.MongoNamespace; +import com.mongodb.ReadConcern; +import com.mongodb.internal.time.Timeout; +import com.mongodb.lang.Nullable; +import com.mongodb.reactivestreams.client.MongoClient; +import com.mongodb.reactivestreams.client.MongoCollection; +import org.bson.BsonDocument; +import reactor.core.publisher.Flux; +import reactor.core.publisher.Mono; + +import java.util.List; + +import static com.mongodb.assertions.Assertions.notNull; +import static com.mongodb.reactivestreams.client.internal.TimeoutHelper.collectionWithTimeout; + +class KeyRetriever { + private static final String TIMEOUT_ERROR_MESSAGE = "Key retrieval exceeded the timeout limit."; + private final MongoClient client; + private final MongoNamespace namespace; + + KeyRetriever(final MongoClient client, final MongoNamespace namespace) { + this.client = notNull("client", client); + this.namespace = notNull("namespace", namespace); + } + + public Mono> find(final BsonDocument keyFilter, @Nullable final Timeout operationTimeout) { + return Flux.defer(() -> { + MongoCollection collection = client.getDatabase(namespace.getDatabaseName()) + .getCollection(namespace.getCollectionName(), BsonDocument.class); + + return collectionWithTimeout(collection, operationTimeout, TIMEOUT_ERROR_MESSAGE) + .withReadConcern(ReadConcern.MAJORITY) + .find(keyFilter); + }).collectList(); + } +} diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/crypt/package-info.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/crypt/package-info.java new file mode 100644 index 00000000000..3311b31a9d8 --- /dev/null +++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/crypt/package-info.java @@ -0,0 +1,25 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * This package contains internal functionality that may change at any time. + */ +@Internal +@NonNullApi +package com.mongodb.reactivestreams.client.internal.crypt; + +import com.mongodb.annotations.Internal; +import com.mongodb.lang.NonNullApi; diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/gridfs/GridFSBucketImpl.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/gridfs/GridFSBucketImpl.java new file mode 100644 index 00000000000..948c666489c --- /dev/null +++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/gridfs/GridFSBucketImpl.java @@ -0,0 +1,336 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.reactivestreams.client.internal.gridfs; + +import com.mongodb.ReadConcern; +import com.mongodb.ReadPreference; +import com.mongodb.WriteConcern; +import com.mongodb.client.gridfs.model.GridFSDownloadOptions; +import com.mongodb.client.gridfs.model.GridFSFile; +import com.mongodb.client.gridfs.model.GridFSUploadOptions; +import com.mongodb.internal.time.Timeout; +import com.mongodb.reactivestreams.client.ClientSession; +import com.mongodb.reactivestreams.client.MongoClients; +import com.mongodb.reactivestreams.client.MongoCollection; +import com.mongodb.reactivestreams.client.MongoDatabase; +import com.mongodb.reactivestreams.client.gridfs.GridFSBucket; +import com.mongodb.reactivestreams.client.gridfs.GridFSDownloadPublisher; +import com.mongodb.reactivestreams.client.gridfs.GridFSFindPublisher; +import com.mongodb.reactivestreams.client.gridfs.GridFSUploadPublisher; +import org.bson.BsonDocument; +import org.bson.BsonObjectId; +import org.bson.BsonValue; +import org.bson.Document; +import org.bson.conversions.Bson; +import org.bson.types.ObjectId; +import org.reactivestreams.Publisher; + +import java.nio.ByteBuffer; +import java.util.concurrent.TimeUnit; +import java.util.function.Function; + +import static com.mongodb.assertions.Assertions.notNull; +import static com.mongodb.reactivestreams.client.internal.gridfs.GridFSPublisherCreator.createDeletePublisher; +import static com.mongodb.reactivestreams.client.internal.gridfs.GridFSPublisherCreator.createDropPublisher; +import static com.mongodb.reactivestreams.client.internal.gridfs.GridFSPublisherCreator.createGridFSDownloadPublisher; +import static com.mongodb.reactivestreams.client.internal.gridfs.GridFSPublisherCreator.createGridFSFindPublisher; +import static com.mongodb.reactivestreams.client.internal.gridfs.GridFSPublisherCreator.createGridFSUploadPublisher; +import static com.mongodb.reactivestreams.client.internal.gridfs.GridFSPublisherCreator.createRenamePublisher; +import static java.util.concurrent.TimeUnit.MILLISECONDS; +import static org.bson.codecs.configuration.CodecRegistries.fromRegistries; + + +/** + * The internal GridFSBucket implementation. + * + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public final class GridFSBucketImpl implements GridFSBucket { + private static final int DEFAULT_CHUNKSIZE_BYTES = 255 * 1024; + private final String bucketName; + private final int chunkSizeBytes; + private final MongoCollection filesCollection; + private final MongoCollection chunksCollection; + + public GridFSBucketImpl(final MongoDatabase database) { + this(database, "fs"); + } + + public GridFSBucketImpl(final MongoDatabase database, final String bucketName) { + this(notNull("bucketName", bucketName), DEFAULT_CHUNKSIZE_BYTES, + getFilesCollection(notNull("database", database), bucketName), + getChunksCollection(database, bucketName)); + } + + private GridFSBucketImpl(final String bucketName, final int chunkSizeBytes, final MongoCollection filesCollection, + final MongoCollection chunksCollection) { + this.bucketName = notNull("bucketName", bucketName); + this.chunkSizeBytes = chunkSizeBytes; + this.filesCollection = notNull("filesCollection", filesCollection); + this.chunksCollection = notNull("chunksCollection", chunksCollection); + } + + private static MongoCollection getFilesCollection(final MongoDatabase database, final String bucketName) { + return database.getCollection(bucketName + ".files", GridFSFile.class).withCodecRegistry( + fromRegistries(database.getCodecRegistry(), MongoClients.getDefaultCodecRegistry()) + ); + } + + private static MongoCollection getChunksCollection(final MongoDatabase database, final String bucketName) { + return database.getCollection(bucketName + ".chunks").withCodecRegistry(MongoClients.getDefaultCodecRegistry()); + } + + @Override + public String getBucketName() { + return bucketName; + } + + @Override + public int getChunkSizeBytes() { + return chunkSizeBytes; + } + + @Override + public ReadPreference getReadPreference() { + return filesCollection.getReadPreference(); + } + + @Override + public WriteConcern getWriteConcern() { + return filesCollection.getWriteConcern(); + } + + @Override + public ReadConcern getReadConcern() { + return filesCollection.getReadConcern(); + } + + @Override + public Long getTimeout(final TimeUnit timeUnit) { + Long timeoutMS = filesCollection.getTimeout(MILLISECONDS); + return timeoutMS == null ? null : notNull("timeUnit", timeUnit).convert(timeoutMS, MILLISECONDS); + } + + @Override + public GridFSBucket withChunkSizeBytes(final int chunkSizeBytes) { + return new GridFSBucketImpl(bucketName, chunkSizeBytes, filesCollection, chunksCollection); + } + + @Override + public GridFSBucket withReadPreference(final ReadPreference readPreference) { + notNull("readPreference", readPreference); + return new GridFSBucketImpl(bucketName, chunkSizeBytes, filesCollection.withReadPreference(readPreference), + chunksCollection.withReadPreference(readPreference)); + } + + @Override + public GridFSBucket withWriteConcern(final WriteConcern writeConcern) { + notNull("writeConcern", writeConcern); + return new GridFSBucketImpl(bucketName, chunkSizeBytes, filesCollection.withWriteConcern(writeConcern), + chunksCollection.withWriteConcern(writeConcern)); + } + + @Override + public GridFSBucket withReadConcern(final ReadConcern readConcern) { + notNull("readConcern", readConcern); + return new GridFSBucketImpl(bucketName, chunkSizeBytes, filesCollection.withReadConcern(readConcern), + chunksCollection.withReadConcern(readConcern)); + } + + @Override + public GridFSBucket withTimeout(final long timeout, final TimeUnit timeUnit) { + return new GridFSBucketImpl(bucketName, chunkSizeBytes, filesCollection.withTimeout(timeout, timeUnit), + chunksCollection.withTimeout(timeout, timeUnit)); + } + + @Override + public GridFSUploadPublisher uploadFromPublisher(final String filename, final Publisher source) { + return uploadFromPublisher(filename, source, new GridFSUploadOptions()); + } + + @Override + public GridFSUploadPublisher uploadFromPublisher(final String filename, + final Publisher source, + final GridFSUploadOptions options) { + return createGridFSUploadPublisher(chunkSizeBytes, filesCollection, chunksCollection, + null, new BsonObjectId(), filename, options, source).withObjectId(); + } + + @Override + public GridFSUploadPublisher uploadFromPublisher(final BsonValue id, final String filename, final Publisher source) { + return uploadFromPublisher(id, filename, source, new GridFSUploadOptions()); + } + + @Override + public GridFSUploadPublisher uploadFromPublisher(final BsonValue id, final String filename, final Publisher source, + final GridFSUploadOptions options) { + return createGridFSUploadPublisher(chunkSizeBytes, filesCollection, chunksCollection, null, id, + filename, options, source); + } + + @Override + public GridFSUploadPublisher uploadFromPublisher(final ClientSession clientSession, final String filename, + final Publisher source) { + return uploadFromPublisher(clientSession, filename, source, new GridFSUploadOptions()); + } + + @Override + public GridFSUploadPublisher uploadFromPublisher(final ClientSession clientSession, final String filename, + final Publisher source, final GridFSUploadOptions options) { + return createGridFSUploadPublisher(chunkSizeBytes, filesCollection, chunksCollection, + notNull("clientSession", clientSession), new BsonObjectId(), filename, options, source) + .withObjectId(); + } + + @Override + public GridFSUploadPublisher uploadFromPublisher(final ClientSession clientSession, final BsonValue id, + final String filename, final Publisher source) { + return uploadFromPublisher(clientSession, id, filename, source, new GridFSUploadOptions()); + } + + @Override + public GridFSUploadPublisher uploadFromPublisher(final ClientSession clientSession, final BsonValue id, + final String filename, + final Publisher source, + final GridFSUploadOptions options) { + return createGridFSUploadPublisher(chunkSizeBytes, filesCollection, chunksCollection, + notNull("clientSession", clientSession), id, filename, options, source); + } + + @Override + public GridFSDownloadPublisher downloadToPublisher(final ObjectId id) { + return downloadToPublisher(new BsonObjectId(id)); + } + + @Override + public GridFSDownloadPublisher downloadToPublisher(final BsonValue id) { + + Function findPublisherCreator = + operationTimeout -> createGridFSFindPublisher(filesCollection, null, new BsonDocument("_id", id), operationTimeout); + return createGridFSDownloadPublisher(chunksCollection, null, findPublisherCreator); + } + + @Override + public GridFSDownloadPublisher downloadToPublisher(final String filename) { + return downloadToPublisher(filename, new GridFSDownloadOptions()); + } + + @Override + public GridFSDownloadPublisher downloadToPublisher(final String filename, final GridFSDownloadOptions options) { + Function findPublisherCreator = + operationTimeout -> createGridFSFindPublisher(filesCollection, null, filename, options, operationTimeout); + return createGridFSDownloadPublisher(chunksCollection, null, findPublisherCreator); + } + + @Override + public GridFSDownloadPublisher downloadToPublisher(final ClientSession clientSession, final ObjectId id) { + return downloadToPublisher(clientSession, new BsonObjectId(id)); + } + + @Override + public GridFSDownloadPublisher downloadToPublisher(final ClientSession clientSession, final BsonValue id) { + Function findPublisherCreator = + operationTimeout -> createGridFSFindPublisher(filesCollection, clientSession, new BsonDocument("_id", id), operationTimeout); + return createGridFSDownloadPublisher(chunksCollection, notNull("clientSession", clientSession), findPublisherCreator); + } + + @Override + public GridFSDownloadPublisher downloadToPublisher(final ClientSession clientSession, final String filename) { + return downloadToPublisher(clientSession, filename, new GridFSDownloadOptions()); + } + + @Override + public GridFSDownloadPublisher downloadToPublisher(final ClientSession clientSession, + final String filename, + final GridFSDownloadOptions options) { + Function findPublisherCreator = + operationTimeout -> createGridFSFindPublisher(filesCollection, clientSession, filename, + options, operationTimeout); + + return createGridFSDownloadPublisher(chunksCollection, notNull("clientSession", clientSession), findPublisherCreator); + } + + @Override + public GridFSFindPublisher find() { + return createGridFSFindPublisher(filesCollection, null, null); + } + + @Override + public GridFSFindPublisher find(final Bson filter) { + return createGridFSFindPublisher(filesCollection, null, notNull("filter", filter)); + } + + @Override + public GridFSFindPublisher find(final ClientSession clientSession) { + return createGridFSFindPublisher(filesCollection, notNull("clientSession", clientSession), null); + } + + @Override + public GridFSFindPublisher find(final ClientSession clientSession, final Bson filter) { + return createGridFSFindPublisher(filesCollection, notNull("clientSession", clientSession), notNull("filter", filter)); + } + + @Override + public Publisher delete(final ObjectId id) { + return delete(new BsonObjectId(id)); + } + + @Override + public Publisher delete(final BsonValue id) { + return createDeletePublisher(filesCollection, chunksCollection, null, id); + } + + @Override + public Publisher delete(final ClientSession clientSession, final ObjectId id) { + return delete(clientSession, new BsonObjectId(id)); + } + + @Override + public Publisher delete(final ClientSession clientSession, final BsonValue id) { + return createDeletePublisher(filesCollection, chunksCollection, notNull("clientSession", clientSession), id); + } + + @Override + public Publisher rename(final ObjectId id, final String newFilename) { + return rename(new BsonObjectId(id), newFilename); + } + + @Override + public Publisher rename(final BsonValue id, final String newFilename) { + return createRenamePublisher(filesCollection, null, id, newFilename); + } + + @Override + public Publisher rename(final ClientSession clientSession, final ObjectId id, final String newFilename) { + return rename(clientSession, new BsonObjectId(id), newFilename); + } + + @Override + public Publisher rename(final ClientSession clientSession, final BsonValue id, final String newFilename) { + return createRenamePublisher(filesCollection, notNull("clientSession", clientSession), id, newFilename); + } + + @Override + public Publisher drop() { + return createDropPublisher(filesCollection, chunksCollection, null); + } + + @Override + public Publisher drop(final ClientSession clientSession) { + return createDropPublisher(filesCollection, chunksCollection, notNull("clientSession", clientSession)); + } +} diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/gridfs/GridFSDownloadPublisherImpl.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/gridfs/GridFSDownloadPublisherImpl.java new file mode 100644 index 00000000000..bedc6552957 --- /dev/null +++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/gridfs/GridFSDownloadPublisherImpl.java @@ -0,0 +1,142 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.reactivestreams.client.internal.gridfs; + +import com.mongodb.MongoGridFSException; +import com.mongodb.client.gridfs.model.GridFSFile; +import com.mongodb.internal.time.Timeout; +import com.mongodb.lang.Nullable; +import com.mongodb.reactivestreams.client.ClientSession; +import com.mongodb.reactivestreams.client.FindPublisher; +import com.mongodb.reactivestreams.client.MongoCollection; +import com.mongodb.reactivestreams.client.gridfs.GridFSDownloadPublisher; +import com.mongodb.reactivestreams.client.gridfs.GridFSFindPublisher; +import org.bson.Document; +import org.bson.types.Binary; +import org.reactivestreams.Publisher; +import org.reactivestreams.Subscriber; +import reactor.core.publisher.Flux; +import reactor.core.publisher.Mono; + +import java.nio.ByteBuffer; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.function.Function; + +import static com.mongodb.assertions.Assertions.notNull; +import static com.mongodb.internal.TimeoutContext.startTimeout; +import static com.mongodb.reactivestreams.client.internal.TimeoutHelper.collectionWithTimeout; +import static java.lang.String.format; +import static java.util.concurrent.TimeUnit.MILLISECONDS; + +/** + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public class GridFSDownloadPublisherImpl implements GridFSDownloadPublisher { + private static final String TIMEOUT_ERROR_MESSAGE = "Finding chunks exceeded the timeout limit."; + private final ClientSession clientSession; + private final Function gridFSFileMono; + private final MongoCollection chunksCollection; + private Integer bufferSizeBytes; + + private volatile GridFSFile fileInfo; + @Nullable + private final Long timeoutMs; + + public GridFSDownloadPublisherImpl(@Nullable final ClientSession clientSession, + final Function gridFSFilePublisherCreator, + final MongoCollection chunksCollection) { + this.clientSession = clientSession; + this.gridFSFileMono = notNull("gridFSFilePublisherCreator", gridFSFilePublisherCreator); + this.chunksCollection = notNull("chunksCollection", chunksCollection); + this.timeoutMs = chunksCollection.getTimeout(MILLISECONDS); + } + + @Override + public Publisher getGridFSFile() { + if (fileInfo != null) { + return Mono.fromCallable(() -> fileInfo); + } + return Mono.from(gridFSFileMono.apply(startTimeout(timeoutMs))) + .doOnNext(gridFSFile -> fileInfo = gridFSFile); + } + + @Override + public GridFSDownloadPublisher bufferSizeBytes(final int bufferSizeBytes) { + this.bufferSizeBytes = bufferSizeBytes; + return this; + } + + @Override + public void subscribe(final Subscriber subscriber) { + Flux.defer(()-> { + Timeout operationTimeout = startTimeout(timeoutMs); + return Mono.from(gridFSFileMono.apply(operationTimeout)) + .doOnSuccess(gridFSFile -> { + if (gridFSFile == null) { + throw new MongoGridFSException("File not found"); + } + fileInfo = gridFSFile; + }).flatMapMany((Function>) gridFSFile -> getChunkPublisher(gridFSFile, operationTimeout)); + }).subscribe(subscriber); + } + + private Flux getChunkPublisher(final GridFSFile gridFSFile, @Nullable final Timeout timeout) { + Document filter = new Document("files_id", gridFSFile.getId()); + FindPublisher chunkPublisher; + if (clientSession != null) { + chunkPublisher = collectionWithTimeout(chunksCollection, timeout, TIMEOUT_ERROR_MESSAGE).find(clientSession, filter); + } else { + chunkPublisher = collectionWithTimeout(chunksCollection, timeout, TIMEOUT_ERROR_MESSAGE).find(filter); + } + + AtomicInteger chunkCounter = new AtomicInteger(0); + int numberOfChunks = (int) Math.ceil((double) gridFSFile.getLength() / gridFSFile.getChunkSize()); + Flux byteBufferFlux = Flux.from(chunkPublisher.sort(new Document("n", 1))) + .map(chunk -> { + int expectedChunkIndex = chunkCounter.getAndAdd(1); + if (chunk == null || chunk.getInteger("n") != expectedChunkIndex) { + throw new MongoGridFSException(format("Could not find file chunk for files_id: %s at chunk index %s.", + gridFSFile.getId(), expectedChunkIndex)); + } else if (!(chunk.get("data") instanceof Binary)) { + throw new MongoGridFSException("Unexpected data format for the chunk"); + } + + byte[] data = chunk.get("data", Binary.class).getData(); + + long expectedDataLength = 0; + if (numberOfChunks > 0) { + expectedDataLength = expectedChunkIndex + 1 == numberOfChunks + ? gridFSFile.getLength() - (expectedChunkIndex * (long) gridFSFile.getChunkSize()) + : gridFSFile.getChunkSize(); + } + + if (data.length != expectedDataLength) { + throw new MongoGridFSException(format("Chunk size data length is not the expected size. " + + "The size was %s for file_id: %s chunk index %s it should be " + + "%s bytes.", + data.length, gridFSFile.getId(), expectedChunkIndex, expectedDataLength)); + } + return ByteBuffer.wrap(data); + }).doOnComplete(() -> { + if (chunkCounter.get() < numberOfChunks) { + throw new MongoGridFSException(format("Could not find file chunk for files_id: %s at chunk index %s.", + gridFSFile.getId(), chunkCounter.get())); + } + }); + return bufferSizeBytes == null ? byteBufferFlux : new ResizingByteBufferFlux(byteBufferFlux, bufferSizeBytes); + } +} diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/gridfs/GridFSFindPublisherImpl.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/gridfs/GridFSFindPublisherImpl.java new file mode 100644 index 00000000000..41ee872c05e --- /dev/null +++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/gridfs/GridFSFindPublisherImpl.java @@ -0,0 +1,99 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.reactivestreams.client.internal.gridfs; + +import com.mongodb.client.gridfs.model.GridFSFile; +import com.mongodb.client.model.Collation; +import com.mongodb.lang.Nullable; +import com.mongodb.reactivestreams.client.FindPublisher; +import com.mongodb.reactivestreams.client.gridfs.GridFSFindPublisher; +import org.bson.conversions.Bson; +import org.reactivestreams.Publisher; +import org.reactivestreams.Subscriber; + +import java.util.concurrent.TimeUnit; + +import static com.mongodb.assertions.Assertions.notNull; + +/** + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public final class GridFSFindPublisherImpl implements GridFSFindPublisher { + private final FindPublisher wrapped; + + GridFSFindPublisherImpl(final FindPublisher wrapped) { + this.wrapped = notNull("GridFSFindIterable", wrapped); + } + + @Override + public Publisher first() { + return wrapped.first(); + } + + @Override + public GridFSFindPublisher sort(@Nullable final Bson sort) { + wrapped.sort(sort); + return this; + } + + @Override + public GridFSFindPublisher skip(final int skip) { + wrapped.skip(skip); + return this; + } + + @Override + public GridFSFindPublisher limit(final int limit) { + wrapped.limit(limit); + return this; + } + + @Override + public GridFSFindPublisher filter(@Nullable final Bson filter) { + wrapped.filter(filter); + return this; + } + + @Override + public GridFSFindPublisher maxTime(final long maxTime, final TimeUnit timeUnit) { + wrapped.maxTime(maxTime, timeUnit); + return this; + } + + @Override + public GridFSFindPublisher noCursorTimeout(final boolean noCursorTimeout) { + wrapped.noCursorTimeout(noCursorTimeout); + return this; + } + + @Override + public GridFSFindPublisher collation(@Nullable final Collation collation) { + wrapped.collation(collation); + return this; + } + + @Override + public GridFSFindPublisher batchSize(final int batchSize) { + wrapped.batchSize(batchSize); + return this; + } + + @Override + public void subscribe(final Subscriber s) { + wrapped.subscribe(s); + } +} diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/gridfs/GridFSPublisherCreator.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/gridfs/GridFSPublisherCreator.java new file mode 100644 index 00000000000..166abca6a0b --- /dev/null +++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/gridfs/GridFSPublisherCreator.java @@ -0,0 +1,255 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.reactivestreams.client.internal.gridfs; + +import com.mongodb.MongoGridFSException; +import com.mongodb.client.cursor.TimeoutMode; +import com.mongodb.client.gridfs.model.GridFSDownloadOptions; +import com.mongodb.client.gridfs.model.GridFSFile; +import com.mongodb.client.gridfs.model.GridFSUploadOptions; +import com.mongodb.client.result.UpdateResult; +import com.mongodb.internal.TimeoutContext; +import com.mongodb.internal.time.Timeout; +import com.mongodb.lang.Nullable; +import com.mongodb.reactivestreams.client.ClientSession; +import com.mongodb.reactivestreams.client.FindPublisher; +import com.mongodb.reactivestreams.client.MongoCollection; +import com.mongodb.reactivestreams.client.gridfs.GridFSFindPublisher; +import org.bson.BsonDocument; +import org.bson.BsonString; +import org.bson.BsonValue; +import org.bson.Document; +import org.bson.conversions.Bson; +import org.reactivestreams.Publisher; +import reactor.core.publisher.Mono; + +import java.nio.ByteBuffer; +import java.util.function.Function; + +import static com.mongodb.assertions.Assertions.notNull; +import static com.mongodb.reactivestreams.client.internal.TimeoutHelper.collectionWithTimeout; +import static com.mongodb.reactivestreams.client.internal.TimeoutHelper.collectionWithTimeoutMono; +import static com.mongodb.reactivestreams.client.internal.TimeoutHelper.collectionWithTimeoutDeferred; +import static java.lang.String.format; +import static java.util.concurrent.TimeUnit.MILLISECONDS; + +/** + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public final class GridFSPublisherCreator { + + private GridFSPublisherCreator() { + } + + public static GridFSUploadPublisherImpl createGridFSUploadPublisher( + final int chunkSizeBytes, final MongoCollection filesCollection, final MongoCollection chunksCollection, + @Nullable final ClientSession clientSession, final BsonValue id, final String filename, final GridFSUploadOptions options, + final Publisher source) { + notNull("filesCollection", filesCollection); + notNull("chunksCollection", chunksCollection); + notNull("id", id); + notNull("filename", filename); + notNull("options", options); + Integer chunkSize = options.getChunkSizeBytes(); + if (chunkSize == null) { + chunkSize = chunkSizeBytes; + } + return new GridFSUploadPublisherImpl(clientSession, filesCollection, chunksCollection, id, filename, chunkSize, + options.getMetadata(), source); + } + + public static GridFSDownloadPublisherImpl createGridFSDownloadPublisher( + final MongoCollection chunksCollection, + @Nullable final ClientSession clientSession, + final Function publisher) { + notNull("chunksCollection", chunksCollection); + notNull("gridFSFileMono", publisher); + return new GridFSDownloadPublisherImpl(clientSession, publisher, chunksCollection); + } + + public static GridFSFindPublisher createGridFSFindPublisher( + final MongoCollection filesCollection, + @Nullable final ClientSession clientSession, + @Nullable final Bson filter) { + notNull("filesCollection", filesCollection); + return new GridFSFindPublisherImpl(createFindPublisher(filesCollection, clientSession, filter)); + } + + public static GridFSFindPublisher createGridFSFindPublisher( + final MongoCollection filesCollection, + @Nullable final ClientSession clientSession, + @Nullable final Bson filter, + @Nullable final Timeout operationTimeout) { + notNull("filesCollection", filesCollection); + return new GridFSFindPublisherImpl(createFindPublisher(filesCollection, clientSession, filter, operationTimeout)); + } + + public static GridFSFindPublisher createGridFSFindPublisher( + final MongoCollection filesCollection, + @Nullable final ClientSession clientSession, + final String filename, + final GridFSDownloadOptions options, + @Nullable final Timeout operationTimeout) { + notNull("filesCollection", filesCollection); + notNull("filename", filename); + notNull("options", options); + + int revision = options.getRevision(); + int skip; + int sort; + if (revision >= 0) { + skip = revision; + sort = 1; + } else { + skip = (-revision) - 1; + sort = -1; + } + + return createGridFSFindPublisher(filesCollection, clientSession, new Document("filename", filename), operationTimeout).skip(skip) + .sort(new Document("uploadDate", sort)); + } + + public static FindPublisher createFindPublisher( + final MongoCollection filesCollection, + @Nullable final ClientSession clientSession, + @Nullable final Bson filter, + @Nullable final Timeout operationTimeout) { + notNull("filesCollection", filesCollection); + FindPublisher publisher; + if (clientSession == null) { + publisher = collectionWithTimeout(filesCollection, operationTimeout).find(); + } else { + publisher = collectionWithTimeout(filesCollection, operationTimeout).find(clientSession); + } + + if (filter != null) { + publisher = publisher.filter(filter); + } + if (operationTimeout != null) { + publisher.timeoutMode(TimeoutMode.CURSOR_LIFETIME); + } + return publisher; + } + + public static FindPublisher createFindPublisher( + final MongoCollection filesCollection, + @Nullable final ClientSession clientSession, + @Nullable final Bson filter) { + notNull("filesCollection", filesCollection); + FindPublisher publisher; + if (clientSession == null) { + publisher = filesCollection.find(); + } else { + publisher = filesCollection.find(clientSession); + } + if (filter != null) { + publisher = publisher.filter(filter); + } + if (filesCollection.getTimeout(MILLISECONDS) != null) { + publisher.timeoutMode(TimeoutMode.CURSOR_LIFETIME); + } + return publisher; + } + + public static Publisher createDeletePublisher(final MongoCollection filesCollection, + final MongoCollection chunksCollection, + @Nullable final ClientSession clientSession, + final BsonValue id) { + notNull("filesCollection", filesCollection); + notNull("chunksCollection", chunksCollection); + notNull("id", id); + BsonDocument filter = new BsonDocument("_id", id); + + return Mono.defer(()-> { + Timeout operationTimeout = startTimeout(filesCollection.getTimeout(MILLISECONDS)); + return collectionWithTimeoutMono(filesCollection, operationTimeout) + .flatMap(wrappedCollection -> { + if (clientSession == null) { + return Mono.from(wrappedCollection.deleteOne(filter)); + } else { + return Mono.from(wrappedCollection.deleteOne(clientSession, filter)); + } + }).flatMap(deleteResult -> { + if (deleteResult.wasAcknowledged() && deleteResult.getDeletedCount() == 0) { + return Mono.error(new MongoGridFSException(format("No file found with the ObjectId: %s", id))); + } + return collectionWithTimeoutMono(chunksCollection, operationTimeout); + }).flatMap(wrappedCollection -> { + if (clientSession == null) { + return Mono.from(wrappedCollection.deleteMany(new BsonDocument("files_id", id))); + } else { + return Mono.from(wrappedCollection.deleteMany(clientSession, new BsonDocument("files_id", id))); + } + }).then(); + }); + } + + public static Publisher createRenamePublisher(final MongoCollection filesCollection, + @Nullable final ClientSession clientSession, + final BsonValue id, + final String newFilename) { + notNull("filesCollection", filesCollection); + notNull("id", id); + notNull("newFilename", newFilename); + BsonDocument filter = new BsonDocument("_id", id); + BsonDocument update = new BsonDocument("$set", + new BsonDocument("filename", new BsonString(newFilename))); + Publisher publisher; + if (clientSession == null) { + publisher = filesCollection.updateOne(filter, update); + } else { + publisher = filesCollection.updateOne(clientSession, filter, update); + } + + return Mono.from(publisher).flatMap(updateResult -> { + if (updateResult.wasAcknowledged() && updateResult.getModifiedCount() == 0) { + throw new MongoGridFSException(format("No file found with the ObjectId: %s", id)); + } + return Mono.empty(); + }); + } + + public static Publisher createDropPublisher(final MongoCollection filesCollection, + final MongoCollection chunksCollection, + @Nullable final ClientSession clientSession) { + + return Mono.defer(() -> { + Timeout operationTimeout = startTimeout(filesCollection.getTimeout(MILLISECONDS)); + return collectionWithTimeoutMono(filesCollection, operationTimeout) + .flatMap(wrappedCollection -> { + if (clientSession == null) { + return Mono.from(wrappedCollection.drop()); + } else { + return Mono.from(wrappedCollection.drop(clientSession)); + } + }).then(collectionWithTimeoutDeferred(chunksCollection, operationTimeout)) + .flatMap(wrappedCollection -> { + if (clientSession == null) { + return Mono.from(wrappedCollection.drop()); + } else { + return Mono.from(wrappedCollection.drop(clientSession)); + } + + }); + }); + } + + @Nullable + private static Timeout startTimeout(@Nullable final Long timeoutMs) { + return timeoutMs == null ? null : TimeoutContext.startTimeout(timeoutMs); + } +} diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/gridfs/GridFSUploadPublisherImpl.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/gridfs/GridFSUploadPublisherImpl.java new file mode 100644 index 00000000000..7d9a46cdf3f --- /dev/null +++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/gridfs/GridFSUploadPublisherImpl.java @@ -0,0 +1,286 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.reactivestreams.client.internal.gridfs; + +import com.mongodb.MongoGridFSException; +import com.mongodb.MongoOperationTimeoutException; +import com.mongodb.client.gridfs.model.GridFSFile; +import com.mongodb.client.result.DeleteResult; +import com.mongodb.client.result.InsertOneResult; +import com.mongodb.internal.TimeoutContext; +import com.mongodb.internal.time.Timeout; +import com.mongodb.lang.Nullable; +import com.mongodb.reactivestreams.client.ClientSession; +import com.mongodb.reactivestreams.client.MongoCollection; +import com.mongodb.reactivestreams.client.gridfs.GridFSUploadPublisher; +import org.bson.BsonValue; +import org.bson.Document; +import org.bson.types.Binary; +import org.bson.types.ObjectId; +import org.reactivestreams.Publisher; +import org.reactivestreams.Subscriber; +import reactor.core.publisher.Flux; +import reactor.core.publisher.Mono; + +import java.nio.ByteBuffer; +import java.util.Date; +import java.util.Map; +import java.util.concurrent.atomic.AtomicBoolean; + +import static com.mongodb.ReadPreference.primary; +import static com.mongodb.assertions.Assertions.notNull; +import static com.mongodb.reactivestreams.client.internal.TimeoutHelper.collectionWithTimeout; +import static com.mongodb.reactivestreams.client.internal.TimeoutHelper.collectionWithTimeoutDeferred; +import static java.time.Duration.ofMillis; +import static java.util.concurrent.TimeUnit.MILLISECONDS; + + +/** + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public final class GridFSUploadPublisherImpl implements GridFSUploadPublisher { + + private static final String TIMEOUT_ERROR_MESSAGE = "Saving chunks exceeded the timeout limit."; + private static final Document PROJECTION = new Document("_id", 1); + private static final Document FILES_INDEX = new Document("filename", 1).append("uploadDate", 1); + private static final Document CHUNKS_INDEX = new Document("files_id", 1).append("n", 1); + private final ClientSession clientSession; + private final MongoCollection filesCollection; + private final MongoCollection chunksCollection; + private final BsonValue fileId; + private final String filename; + private final int chunkSizeBytes; + private final Document metadata; + private final Publisher source; + @Nullable + private final Long timeoutMs; + + public GridFSUploadPublisherImpl(@Nullable final ClientSession clientSession, + final MongoCollection filesCollection, + final MongoCollection chunksCollection, + final BsonValue fileId, + final String filename, + final int chunkSizeBytes, + @Nullable final Document metadata, + final Publisher source) { + this.clientSession = clientSession; + this.filesCollection = notNull("files collection", filesCollection); + this.chunksCollection = notNull("chunks collection", chunksCollection); + this.fileId = notNull("File Id", fileId); + this.filename = notNull("filename", filename); + this.chunkSizeBytes = chunkSizeBytes; + this.metadata = metadata; + this.source = source; + this.timeoutMs = filesCollection.getTimeout(MILLISECONDS); + } + + @Override + public ObjectId getObjectId() { + if (!fileId.isObjectId()) { + throw new MongoGridFSException("Custom id type used for this GridFS upload stream"); + } + return fileId.asObjectId().getValue(); + } + + @Override + public BsonValue getId() { + return fileId; + } + + @Override + public void subscribe(final Subscriber s) { + Mono.deferContextual(ctx -> { + AtomicBoolean terminated = new AtomicBoolean(false); + Timeout timeout = TimeoutContext.startTimeout(timeoutMs); + return createCheckAndCreateIndexesMono(timeout) + .then(createSaveChunksMono(terminated, timeout)) + .flatMap(lengthInBytes -> createSaveFileDataMono(terminated, lengthInBytes, timeout)) + .onErrorResume(originalError -> + createCancellationMono(terminated, timeout) + .onErrorMap(cancellationError -> { + // Timeout exception might occur during cancellation. It gets suppressed. + originalError.addSuppressed(cancellationError); + return originalError; + }) + .then(Mono.error(originalError))) + .doOnCancel(() -> createCancellationMono(terminated, timeout).contextWrite(ctx).subscribe()) + .then(); + }).subscribe(s); + } + + public GridFSUploadPublisher withObjectId() { + GridFSUploadPublisherImpl wrapped = this; + return new GridFSUploadPublisher() { + + @Override + public ObjectId getObjectId() { + return wrapped.getObjectId(); + } + + @Override + public BsonValue getId() { + return wrapped.getId(); + } + + @Override + public void subscribe(final Subscriber subscriber) { + Mono.from(wrapped) + .thenReturn(getObjectId()) + .subscribe(subscriber); + } + }; + } + + private Mono createCheckAndCreateIndexesMono(@Nullable final Timeout timeout) { + return collectionWithTimeoutDeferred(filesCollection.withDocumentClass(Document.class).withReadPreference(primary()), timeout) + .map(collection -> clientSession != null ? collection.find(clientSession) : collection.find()) + .flatMap(findPublisher -> Mono.from(findPublisher.projection(PROJECTION).first())) + .switchIfEmpty(Mono.defer(() -> + checkAndCreateIndex(filesCollection.withReadPreference(primary()), FILES_INDEX, timeout) + .then(checkAndCreateIndex(chunksCollection.withReadPreference(primary()), CHUNKS_INDEX, timeout)) + .then(Mono.empty()) + )) + .then(); + } + + private Mono hasIndex(final MongoCollection collection, final Document index, @Nullable final Timeout timeout) { + return collectionWithTimeoutDeferred(collection, timeout) + .map(wrappedCollection -> { + if (clientSession != null) { + return wrappedCollection.listIndexes(clientSession); + } else { + return wrappedCollection.listIndexes(); + } + }).flatMapMany(Flux::from) + .collectList() + .map(indexes -> { + boolean hasIndex = false; + for (Document result : indexes) { + Document indexDoc = result.get("key", new Document()); + for (final Map.Entry entry : indexDoc.entrySet()) { + if (entry.getValue() instanceof Number) { + entry.setValue(((Number) entry.getValue()).intValue()); + } + } + if (indexDoc.equals(index)) { + hasIndex = true; + break; + } + } + return hasIndex; + }); + } + + private Mono checkAndCreateIndex(final MongoCollection collection, final Document index, @Nullable final Timeout timeout) { + return hasIndex(collection, index, timeout).flatMap(hasIndex -> { + if (!hasIndex) { + return createIndexMono(collection, index, timeout).flatMap(s -> Mono.empty()); + } else { + return Mono.empty(); + } + }); + } + + private Mono createIndexMono(final MongoCollection collection, final Document index, @Nullable final Timeout timeout) { + return collectionWithTimeoutDeferred(collection, timeout).flatMap(wrappedCollection -> + Mono.from(clientSession == null ? wrappedCollection.createIndex(index) : wrappedCollection.createIndex(clientSession, index)) + ); + } + + private Mono createSaveChunksMono(final AtomicBoolean terminated, @Nullable final Timeout timeout) { + return new ResizingByteBufferFlux(source, chunkSizeBytes) + .takeUntilOther(createMonoTimer(timeout)) + .index() + .flatMap(indexAndBuffer -> { + if (terminated.get()) { + return Mono.empty(); + } + Long index = indexAndBuffer.getT1(); + ByteBuffer byteBuffer = indexAndBuffer.getT2(); + byte[] byteArray = new byte[byteBuffer.remaining()]; + if (byteBuffer.hasArray()) { + System.arraycopy(byteBuffer.array(), byteBuffer.position(), byteArray, 0, byteBuffer.remaining()); + } else { + byteBuffer.mark(); + byteBuffer.get(byteArray); + byteBuffer.reset(); + } + Binary data = new Binary(byteArray); + + Document chunkDocument = new Document("files_id", fileId) + .append("n", index.intValue()) + .append("data", data); + + Publisher insertOnePublisher = clientSession == null + ? collectionWithTimeout(chunksCollection, timeout, TIMEOUT_ERROR_MESSAGE).insertOne(chunkDocument) + : collectionWithTimeout(chunksCollection, timeout, TIMEOUT_ERROR_MESSAGE) + .insertOne(clientSession, chunkDocument); + + return Mono.from(insertOnePublisher).thenReturn(data.length()); + }) + .reduce(0L, Long::sum); + } + + /** + * Creates a Mono that emits a {@link MongoOperationTimeoutException} after the specified timeout. + * + * @param timeout - remaining timeout. + * @return Mono that emits a {@link MongoOperationTimeoutException}. + */ + private static Mono createMonoTimer(final @Nullable Timeout timeout) { + return Timeout.nullAsInfinite(timeout).call(MILLISECONDS, + () -> Mono.never(), + (ms) -> Mono.delay(ofMillis(ms)).then(createTimeoutMonoError()), + () -> createTimeoutMonoError()); + } + + private static Mono createTimeoutMonoError() { + return Mono.error(TimeoutContext.createMongoTimeoutException( + "GridFS waiting for data from the source Publisher exceeded the timeout limit.")); + } + + private Mono createSaveFileDataMono(final AtomicBoolean terminated, + final long lengthInBytes, + @Nullable final Timeout timeout) { + Mono> filesCollectionMono = collectionWithTimeoutDeferred(filesCollection, timeout); + if (terminated.compareAndSet(false, true)) { + GridFSFile gridFSFile = new GridFSFile(fileId, filename, lengthInBytes, chunkSizeBytes, new Date(), metadata); + if (clientSession != null) { + return filesCollectionMono.flatMap(collection -> Mono.from(collection.insertOne(clientSession, gridFSFile))); + } else { + return filesCollectionMono.flatMap(collection -> Mono.from(collection.insertOne(gridFSFile))); + } + } else { + return Mono.empty(); + } + } + + private Mono createCancellationMono(final AtomicBoolean terminated, @Nullable final Timeout timeout) { + Mono> chunksCollectionMono = collectionWithTimeoutDeferred(chunksCollection, timeout); + if (terminated.compareAndSet(false, true)) { + if (clientSession != null) { + return chunksCollectionMono.flatMap(collection -> Mono.from(collection + .deleteMany(clientSession, new Document("files_id", fileId)))); + } else { + return chunksCollectionMono.flatMap(collection -> Mono.from(collection + .deleteMany(new Document("files_id", fileId)))); + } + } else { + return Mono.empty(); + } + } +} diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/gridfs/ResizingByteBufferFlux.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/gridfs/ResizingByteBufferFlux.java new file mode 100644 index 00000000000..1fde4883a8a --- /dev/null +++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/gridfs/ResizingByteBufferFlux.java @@ -0,0 +1,127 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.reactivestreams.client.internal.gridfs; + +import com.mongodb.lang.NonNull; +import org.reactivestreams.Publisher; +import org.reactivestreams.Subscription; +import reactor.core.CoreSubscriber; +import reactor.core.publisher.BaseSubscriber; +import reactor.core.publisher.Flux; +import reactor.core.publisher.FluxSink; + +import java.nio.Buffer; +import java.nio.ByteBuffer; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicLong; + +import static com.mongodb.assertions.Assertions.isTrue; +import static com.mongodb.assertions.Assertions.notNull; + +class ResizingByteBufferFlux extends Flux { + + private final Publisher source; + private final int outputByteBufferSize; + + + ResizingByteBufferFlux(final Publisher source, final int outputByteBufferSize) { + notNull("source must not be null", source); + isTrue("'outputByteBufferSize' must be a positive number", outputByteBufferSize >= 0); + this.source = source; + this.outputByteBufferSize = outputByteBufferSize; + } + + @Override + public void subscribe(final CoreSubscriber actual) { + Flux.push(sink -> { + + BaseSubscriber subscriber = new BaseSubscriber() { + private volatile ByteBuffer remainder; + private final AtomicLong requested = new AtomicLong(); + private final AtomicBoolean startedProcessing = new AtomicBoolean(); + private volatile boolean finished = false; + + @Override + protected void hookOnSubscribe(final Subscription subscription) { + sink.onCancel(() -> upstream().cancel()); + sink.onRequest(l -> { + requested.addAndGet(l); + if (startedProcessing.compareAndSet(false, true)) { + upstream().request(1); + } + }); + } + + @Override + protected void hookOnNext(@NonNull final ByteBuffer value) { + if (remainder == null || remainder.remaining() == 0) { + remainder = value; + } else { + byte[] byteArray = new byte[remainder.remaining() + value.remaining()]; + ByteBuffer newBuffer = ByteBuffer.wrap(byteArray); + copyByteBuffer(remainder, newBuffer); + copyByteBuffer(value, newBuffer); + ((Buffer) newBuffer).flip(); + remainder = newBuffer; + } + + while (remainder != null && remainder.remaining() >= outputByteBufferSize) { + int newLimit = remainder.position() + outputByteBufferSize; + ByteBuffer next = remainder.duplicate(); + ((Buffer) next).limit(newLimit); + requested.decrementAndGet(); + sink.next(next); + ((Buffer) remainder).position(newLimit); + } + + if (requested.get() > 0) { + upstream().request(1); + } + } + + @Override + protected void hookOnComplete() { + if (!finished) { + finished = true; + if (remainder != null && remainder.remaining() > 0) { + sink.next(remainder); + } + sink.complete(); + } + } + + @Override + protected void hookOnError(@NonNull final Throwable throwable) { + sink.error(throwable); + } + + private void copyByteBuffer(final ByteBuffer original, final ByteBuffer destination) { + if (original.hasArray() && destination.hasArray()) { + System.arraycopy(original.array(), original.position(), destination.array(), destination.position(), + original.remaining()); + ((Buffer) destination).position(destination.position() + original.remaining()); + } else { + destination.put(original); + } + } + }; + + source.subscribe(subscriber); + }, FluxSink.OverflowStrategy.BUFFER) + .subscribe(actual); + } +} diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/gridfs/package-info.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/gridfs/package-info.java new file mode 100644 index 00000000000..d2582b398da --- /dev/null +++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/gridfs/package-info.java @@ -0,0 +1,25 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * This package contains internal functionality that may change at any time. + */ +@Internal +@NonNullApi +package com.mongodb.reactivestreams.client.internal.gridfs; + +import com.mongodb.annotations.Internal; +import com.mongodb.lang.NonNullApi; diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/package-info.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/package-info.java new file mode 100644 index 00000000000..b683433d231 --- /dev/null +++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/package-info.java @@ -0,0 +1,26 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * This package contains internal functionality that may change at any time. + */ + +@Internal +@NonNullApi +package com.mongodb.reactivestreams.client.internal; + +import com.mongodb.annotations.Internal; +import com.mongodb.lang.NonNullApi; diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/vault/ClientEncryptionImpl.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/vault/ClientEncryptionImpl.java new file mode 100644 index 00000000000..5ae7f4815e5 --- /dev/null +++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/vault/ClientEncryptionImpl.java @@ -0,0 +1,331 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.reactivestreams.client.internal.vault; + +import com.mongodb.ClientEncryptionSettings; +import com.mongodb.MongoConfigurationException; +import com.mongodb.MongoNamespace; +import com.mongodb.MongoUpdatedEncryptedFieldsException; +import com.mongodb.ReadConcern; +import com.mongodb.WriteConcern; +import com.mongodb.client.model.CreateCollectionOptions; +import com.mongodb.client.model.CreateEncryptedCollectionParams; +import com.mongodb.client.model.Filters; +import com.mongodb.client.model.UpdateOneModel; +import com.mongodb.client.model.Updates; +import com.mongodb.client.model.vault.DataKeyOptions; +import com.mongodb.client.model.vault.EncryptOptions; +import com.mongodb.client.model.vault.RewrapManyDataKeyOptions; +import com.mongodb.client.model.vault.RewrapManyDataKeyResult; +import com.mongodb.client.result.DeleteResult; +import com.mongodb.internal.TimeoutContext; +import com.mongodb.internal.VisibleForTesting; +import com.mongodb.internal.time.Timeout; +import com.mongodb.lang.Nullable; +import com.mongodb.reactivestreams.client.FindPublisher; +import com.mongodb.reactivestreams.client.MongoClient; +import com.mongodb.reactivestreams.client.MongoClients; +import com.mongodb.reactivestreams.client.MongoCollection; +import com.mongodb.reactivestreams.client.MongoDatabase; +import com.mongodb.reactivestreams.client.internal.crypt.Crypt; +import com.mongodb.reactivestreams.client.internal.crypt.Crypts; +import com.mongodb.reactivestreams.client.vault.ClientEncryption; +import org.bson.BsonArray; +import org.bson.BsonBinary; +import org.bson.BsonDocument; +import org.bson.BsonNull; +import org.bson.BsonString; +import org.bson.BsonValue; +import org.bson.codecs.configuration.CodecRegistry; +import org.bson.conversions.Bson; +import org.reactivestreams.Publisher; +import reactor.core.publisher.Flux; +import reactor.core.publisher.Mono; + +import java.util.List; +import java.util.Objects; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.stream.Collectors; + +import static com.mongodb.assertions.Assertions.notNull; +import static com.mongodb.internal.VisibleForTesting.AccessModifier.PRIVATE; +import static com.mongodb.internal.capi.MongoCryptHelper.validateRewrapManyDataKeyOptions; +import static com.mongodb.reactivestreams.client.internal.TimeoutHelper.collectionWithTimeout; +import static com.mongodb.reactivestreams.client.internal.TimeoutHelper.databaseWithTimeout; +import static com.mongodb.reactivestreams.client.internal.TimeoutHelper.databaseWithTimeoutDeferred; +import static java.lang.String.format; +import static java.util.Arrays.asList; +import static java.util.Collections.singletonList; +import static java.util.concurrent.TimeUnit.MILLISECONDS; +import static org.bson.internal.BsonUtil.mutableDeepCopy; + +/** + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public class ClientEncryptionImpl implements ClientEncryption { + private static final String TIMEOUT_ERROR_MESSAGE_CREATE_DATA_KEY = "Creating data key exceeded the timeout limit."; + private static final String TIMEOUT_ERROR_MESSAGE_REWRAP_DATA_KEY = "Rewrapping data key exceeded the timeout limit."; + private static final String TIMEOUT_ERROR_MESSAGE_CREATE_COLLECTION = "Encryption collection creation exceeded the timeout limit."; + private final Crypt crypt; + private final ClientEncryptionSettings options; + private final MongoClient keyVaultClient; + private final MongoCollection collection; + + public ClientEncryptionImpl(final ClientEncryptionSettings options) { + this(MongoClients.create(options.getKeyVaultMongoClientSettings()), options); + } + + @VisibleForTesting(otherwise = PRIVATE) + public ClientEncryptionImpl(final MongoClient keyVaultClient, final ClientEncryptionSettings options) { + this.keyVaultClient = keyVaultClient; + this.crypt = Crypts.create(keyVaultClient, options); + this.options = options; + MongoNamespace namespace = new MongoNamespace(options.getKeyVaultNamespace()); + this.collection = getVaultCollection(keyVaultClient, options, namespace); + } + + private static MongoCollection getVaultCollection(final MongoClient keyVaultClient, + final ClientEncryptionSettings options, + final MongoNamespace namespace) { + MongoCollection vaultCollection = keyVaultClient.getDatabase(namespace.getDatabaseName()) + .getCollection(namespace.getCollectionName(), BsonDocument.class) + .withWriteConcern(WriteConcern.MAJORITY) + .withReadConcern(ReadConcern.MAJORITY); + + Long timeoutMs = options.getTimeout(MILLISECONDS); + if (timeoutMs != null){ + vaultCollection = vaultCollection.withTimeout(timeoutMs, MILLISECONDS); + } + return vaultCollection; + } + + @Override + public Publisher createDataKey(final String kmsProvider) { + return createDataKey(kmsProvider, new DataKeyOptions()); + } + + @Override + public Publisher createDataKey(final String kmsProvider, final DataKeyOptions dataKeyOptions) { + return Mono.defer(() -> { + Timeout operationTimeout = startTimeout(); + return createDataKey(kmsProvider, dataKeyOptions, operationTimeout); + }); + } + + public Mono createDataKey(final String kmsProvider, final DataKeyOptions dataKeyOptions, @Nullable final Timeout operationTimeout) { + return crypt.createDataKey(kmsProvider, dataKeyOptions, operationTimeout) + .flatMap(dataKeyDocument -> { + MongoNamespace namespace = new MongoNamespace(options.getKeyVaultNamespace()); + + MongoCollection vaultCollection = keyVaultClient + .getDatabase(namespace.getDatabaseName()) + .getCollection(namespace.getCollectionName(), BsonDocument.class) + .withWriteConcern(WriteConcern.MAJORITY); + return Mono.from(collectionWithTimeout(vaultCollection, operationTimeout, TIMEOUT_ERROR_MESSAGE_CREATE_DATA_KEY) + .insertOne(dataKeyDocument)) + .map(i -> dataKeyDocument.getBinary("_id")); + }); + } + + @Override + public Publisher encrypt(final BsonValue value, final EncryptOptions options) { + notNull("value", value); + notNull("options", options); + + return Mono.defer(() -> crypt.encryptExplicitly(value, options, startTimeout())); + } + + @Override + public Publisher encryptExpression(final Bson expression, final EncryptOptions options) { + return Mono.defer(() -> crypt.encryptExpression( + expression.toBsonDocument(BsonDocument.class, collection.getCodecRegistry()), + options, + startTimeout())); + } + + @Override + public Publisher decrypt(final BsonBinary value) { + notNull("value", value); + return Mono.defer(() -> crypt.decryptExplicitly(value, startTimeout())); + } + + @Override + public Publisher deleteKey(final BsonBinary id) { + return collection.deleteOne(Filters.eq("_id", id)); + } + + @Override + public Publisher getKey(final BsonBinary id) { + return collection.find(Filters.eq("_id", id)).first(); + } + + @Override + public FindPublisher getKeys() { + return collection.find(); + } + + @Override + public Publisher addKeyAltName(final BsonBinary id, final String keyAltName) { + return collection.findOneAndUpdate(Filters.eq("_id", id), Updates.addToSet("keyAltNames", keyAltName)); + } + + @Override + public Publisher removeKeyAltName(final BsonBinary id, final String keyAltName) { + BsonDocument updateDocument = new BsonDocument() + .append("$set", new BsonDocument() + .append("keyAltNames", new BsonDocument() + .append("$cond", new BsonArray(asList( + new BsonDocument() + .append("$eq", new BsonArray(asList( + new BsonString("$keyAltNames"), + new BsonArray(singletonList(new BsonString(keyAltName)))))), + new BsonString("$$REMOVE"), + new BsonDocument() + .append("$filter", new BsonDocument() + .append("input", new BsonString("$keyAltNames")) + .append("cond", new BsonDocument() + .append("$ne", new BsonArray(asList( + new BsonString("$$this"), + new BsonString(keyAltName)))))) + ))) + ) + ); + return collection.findOneAndUpdate(Filters.eq("_id", id), singletonList(updateDocument)); + } + + @Override + public Publisher getKeyByAltName(final String keyAltName) { + return collection.find(Filters.eq("keyAltNames", keyAltName)).first(); + } + + @Override + public Publisher rewrapManyDataKey(final Bson filter) { + return rewrapManyDataKey(filter, new RewrapManyDataKeyOptions()); + } + + @Override + public Publisher rewrapManyDataKey(final Bson filter, final RewrapManyDataKeyOptions options) { + return Mono.fromRunnable(() -> validateRewrapManyDataKeyOptions(options)) + .then(Mono.defer(()-> { + Timeout operationTimeout = startTimeout(); + return crypt.rewrapManyDataKey(filter.toBsonDocument(BsonDocument.class, collection.getCodecRegistry()), options, operationTimeout) + .flatMap(results -> { + if (results.isEmpty()) { + return Mono.fromCallable(RewrapManyDataKeyResult::new); + } + List> updateModels = results.getArray("v", new BsonArray()).stream().map(v -> { + BsonDocument updateDocument = v.asDocument(); + return new UpdateOneModel(Filters.eq(updateDocument.get("_id")), + Updates.combine( + Updates.set("masterKey", updateDocument.get("masterKey")), + Updates.set("keyMaterial", updateDocument.get("keyMaterial")), + Updates.currentDate("updateDate")) + ); + }).collect(Collectors.toList()); + return Mono.from(collectionWithTimeout(collection, operationTimeout, TIMEOUT_ERROR_MESSAGE_REWRAP_DATA_KEY) + .bulkWrite(updateModels)).map(RewrapManyDataKeyResult::new); + }); + })); + } + + @Override + public Publisher createEncryptedCollection(final MongoDatabase database, final String collectionName, + final CreateCollectionOptions createCollectionOptions, final CreateEncryptedCollectionParams createEncryptedCollectionParams) { + notNull("collectionName", collectionName); + notNull("createCollectionOptions", createCollectionOptions); + notNull("createEncryptedCollectionParams", createEncryptedCollectionParams); + MongoNamespace namespace = new MongoNamespace(database.getName(), collectionName); + Bson rawEncryptedFields = createCollectionOptions.getEncryptedFields(); + if (rawEncryptedFields == null) { + throw new MongoConfigurationException(format("`encryptedFields` is not configured for the collection %s.", namespace)); + } + CodecRegistry codecRegistry = options.getKeyVaultMongoClientSettings().getCodecRegistry(); + BsonDocument encryptedFields = rawEncryptedFields.toBsonDocument(BsonDocument.class, codecRegistry); + BsonValue fields = encryptedFields.get("fields"); + if (fields != null && fields.isArray()) { + String kmsProvider = createEncryptedCollectionParams.getKmsProvider(); + DataKeyOptions dataKeyOptions = new DataKeyOptions(); + BsonDocument masterKey = createEncryptedCollectionParams.getMasterKey(); + if (masterKey != null) { + dataKeyOptions.masterKey(masterKey); + } + String keyIdBsonKey = "keyId"; + return Mono.defer(() -> { + Timeout operationTimeout = startTimeout(); + // `Mono.defer` results in `maybeUpdatedEncryptedFields` and `dataKeyMightBeCreated` (mutable state) + // being created once per `Subscriber`, which allows the produced `Mono` to support multiple `Subscribers`. + BsonDocument maybeUpdatedEncryptedFields = mutableDeepCopy(encryptedFields); + AtomicBoolean dataKeyMightBeCreated = new AtomicBoolean(); + Iterable> publishersOfUpdatedFields = () -> maybeUpdatedEncryptedFields.get("fields").asArray() + .stream() + .filter(BsonValue::isDocument) + .map(BsonValue::asDocument) + .filter(field -> field.containsKey(keyIdBsonKey)) + .filter(field -> Objects.equals(field.get(keyIdBsonKey), BsonNull.VALUE)) + // here we rely on the `createDataKey` publisher being cold, i.e., doing nothing until it is subscribed to + .map(field -> Mono.fromDirect(createDataKey(kmsProvider, dataKeyOptions, operationTimeout)) + // This is the closest we can do with reactive streams to setting the `dataKeyMightBeCreated` flag + // immediately before calling `createDataKey`. + .doOnSubscribe(subscription -> dataKeyMightBeCreated.set(true)) + .doOnNext(dataKeyId -> field.put(keyIdBsonKey, dataKeyId)) + .map(dataKeyId -> field) + ) + .iterator(); + // `Flux.concat` ensures that data keys are created / fields are updated sequentially one by one + Flux publisherOfUpdatedFields = Flux.concat(publishersOfUpdatedFields); + return publisherOfUpdatedFields + // All write actions in `doOnNext` above happen-before the completion (`onComplete`/`onError`) signals + // for this publisher, because all signals are serial. `thenEmpty` further guarantees that the completion signal + // for this publisher happens-before the `onSubscribe` signal for the publisher passed to it + // (the next publisher, which creates a collection). + // `defer` defers calling `createCollection` until the next publisher is subscribed to. + // Therefore, all write actions in `doOnNext` above happen-before the invocation of `createCollection`, + // which means `createCollection` is guaranteed to observe all those write actions, i.e., + // it is guaranteed to observe the updated document via the `maybeUpdatedEncryptedFields` reference. + // + // Similarly, the `Subscriber` of the returned `Publisher` is guaranteed to observe all those write actions + // via the `maybeUpdatedEncryptedFields` reference, which is emitted as a result of `thenReturn`. + .thenEmpty(Mono.defer(() -> Mono.fromDirect(databaseWithTimeout(database, + TIMEOUT_ERROR_MESSAGE_CREATE_COLLECTION, operationTimeout) + .createCollection(collectionName, new CreateCollectionOptions(createCollectionOptions) + .encryptedFields(maybeUpdatedEncryptedFields)))) + ) + .onErrorMap(e -> dataKeyMightBeCreated.get(), e -> + new MongoUpdatedEncryptedFieldsException(maybeUpdatedEncryptedFields, + format("Failed to create %s.", namespace), e) + ) + .thenReturn(maybeUpdatedEncryptedFields); + }); + } else { + return databaseWithTimeoutDeferred(database, startTimeout()) + .flatMap(wrappedDatabase -> Mono.fromDirect(wrappedDatabase + .createCollection(collectionName, createCollectionOptions))) + .thenReturn(encryptedFields); + } + } + + @Override + public void close() { + keyVaultClient.close(); + crypt.close(); + } + + @Nullable + private Timeout startTimeout() { + return TimeoutContext.startTimeout(options.getTimeout(MILLISECONDS)); + } +} diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/vault/package-info.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/vault/package-info.java new file mode 100644 index 00000000000..c4f3d32544a --- /dev/null +++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/internal/vault/package-info.java @@ -0,0 +1,25 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * This package contains internal functionality that may change at any time. + */ +@Internal +@NonNullApi +package com.mongodb.reactivestreams.client.internal.vault; + +import com.mongodb.annotations.Internal; +import com.mongodb.lang.NonNullApi; diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/package-info.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/package-info.java new file mode 100644 index 00000000000..80d98a2346a --- /dev/null +++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/package-info.java @@ -0,0 +1,35 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * This packages contains classes for the reactive stream client implementation. + *

+ * All API {@link org.reactivestreams.Publisher}s are + * cold, + * meaning that nothing happens until they are subscribed to. + * So just creating a {@link org.reactivestreams.Publisher} won’t cause any network IO. + * It’s not until {@link org.reactivestreams.Publisher#subscribe(org.reactivestreams.Subscriber)} is called that the driver executes the + * operation. + *

+ * All API {@link org.reactivestreams.Publisher}s are unicast. + * Each {@link org.reactivestreams.Subscription} to a {@link org.reactivestreams.Publisher} relates to a single MongoDB operation and its + * {@link org.reactivestreams.Subscriber} will receive its own specific set of results. + */ + +@NonNullApi +package com.mongodb.reactivestreams.client; + +import com.mongodb.lang.NonNullApi; diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/vault/ClientEncryption.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/vault/ClientEncryption.java new file mode 100644 index 00000000000..38ad1e618e6 --- /dev/null +++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/vault/ClientEncryption.java @@ -0,0 +1,220 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.reactivestreams.client.vault; + +import com.mongodb.AutoEncryptionSettings; +import com.mongodb.MongoUpdatedEncryptedFieldsException; +import com.mongodb.client.model.CreateCollectionOptions; +import com.mongodb.client.model.CreateEncryptedCollectionParams; +import com.mongodb.client.model.vault.DataKeyOptions; +import com.mongodb.client.model.vault.EncryptOptions; +import com.mongodb.client.model.vault.RewrapManyDataKeyOptions; +import com.mongodb.client.model.vault.RewrapManyDataKeyResult; +import com.mongodb.client.result.DeleteResult; +import com.mongodb.lang.Nullable; +import com.mongodb.reactivestreams.client.FindPublisher; +import com.mongodb.reactivestreams.client.MongoDatabase; +import org.bson.BsonBinary; +import org.bson.BsonDocument; +import org.bson.BsonValue; +import org.bson.conversions.Bson; +import org.reactivestreams.Publisher; + +import java.io.Closeable; + +/** + * The Key vault. + *

+ * Used to create data encryption keys, and to explicitly encrypt and decrypt values when auto-encryption is not an option. + *

+ * @since 1.12 + */ +public interface ClientEncryption extends Closeable { + + /** + * Create a data key with the given KMS provider. + * + *

+ * Creates a new key document and inserts into the key vault collection. + *

+ * + * @param kmsProvider the KMS provider + * @return a Publisher containing the identifier for the created data key + */ + Publisher createDataKey(String kmsProvider); + + /** + * Create a data key with the given KMS provider and options. + * + *

+ * Creates a new key document and inserts into the key vault collection. + *

+ * + * @param kmsProvider the KMS provider + * @param dataKeyOptions the options for data key creation + * @return a Publisher containing the identifier for the created data key + */ + Publisher createDataKey(String kmsProvider, DataKeyOptions dataKeyOptions); + + /** + * Encrypt the given value with the given options. + *

+ * The driver may throw an exception for prohibited BSON value types + *

+ * + * @param value the value to encrypt + * @param options the options for data encryption + * @return a Publisher containing the encrypted value, a BSON binary of subtype 6 + */ + Publisher encrypt(BsonValue value, EncryptOptions options); + + /** + * Encrypts a Match Expression or Aggregate Expression to query a range index. + *

+ * The expression is expected to be in one of the following forms: + *

    + *
  • A Match Expression of this form: + * {@code {$and: [{: {$gt: }}, {: {$lt: }}]}} + *
  • + *
  • An Aggregate Expression of this form: + * {@code {$and: [{$gt: [, ]}, {$lt: [, ]}] }} + *
  • + *
+ * {@code $gt} may also be {@code $gte}. {@code $lt} may also be {@code $lte}. + * + *

Only supported when queryType is "range" and algorithm is "Range". + * + * @param expression the Match Expression or Aggregate Expression + * @param options the options + * @return a Publisher containing the queryable encrypted range expression + * @since 4.9 + * @mongodb.server.release 8.0 + * @mongodb.driver.manual /core/queryable-encryption/ queryable encryption + * @mongodb.driver.manual reference/operator/aggregation/match/ $match + */ + Publisher encryptExpression(Bson expression, EncryptOptions options); + + /** + * Decrypt the given value. + * + * @param value the value to decrypt, which must be of subtype 6 + * @return a Publisher containing the decrypted value + */ + Publisher decrypt(BsonBinary value); + + /** + * Removes the key document with the given data key from the key vault collection. + * @param id the data key UUID (BSON binary subtype 0x04) + * @return a Publisher containing the delete result + * @since 4.7 + */ + Publisher deleteKey(BsonBinary id); + + /** + * Finds a single key document with the given UUID (BSON binary subtype 0x04). + * + * @param id the data key UUID (BSON binary subtype 0x04) + * @return a Publisher containing the single key document or an empty publisher if there is no match + * @since 4.7 + */ + @Nullable + Publisher getKey(BsonBinary id); + + /** + * Finds all documents in the key vault collection. + * @return a find publisher for the documents in the key vault collection + * @since 4.7 + */ + FindPublisher getKeys(); + + /** + * Adds a keyAltName to the keyAltNames array of the key document in the key vault collection with the given UUID. + * + * @param id the data key UUID (BSON binary subtype 0x04) + * @param keyAltName the alternative key name to add to the keyAltNames array + * @return a Publisher containing the previous version of the key document or an empty publisher if no match + * @since 4.7 + */ + Publisher addKeyAltName(BsonBinary id, String keyAltName); + + /** + * Removes a keyAltName from the keyAltNames array of the key document in the key vault collection with the given id. + * + * @param id the data key UUID (BSON binary subtype 0x04) + * @param keyAltName the alternative key name + * @return a Publisher containing the previous version of the key document or an empty publisher if there is no match + * @since 4.7 + */ + Publisher removeKeyAltName(BsonBinary id, String keyAltName); + + /** + * Returns a key document in the key vault collection with the given keyAltName. + * + * @param keyAltName the alternative key name + * @return a Publisher containing the matching key document or an empty publisher if there is no match + * @since 4.7 + */ + Publisher getKeyByAltName(String keyAltName); + + /** + * Decrypts multiple data keys and (re-)encrypts them with the current masterKey. + * + * @param filter the filter + * @return a Publisher containing the result + * @since 4.7 + */ + Publisher rewrapManyDataKey(Bson filter); + + /** + * Decrypts multiple data keys and (re-)encrypts them with a new masterKey, or with their current masterKey if a new one is not given. + * + * @param filter the filter + * @param options the options + * @return a Publisher containing the result + * @since 4.7 + */ + Publisher rewrapManyDataKey(Bson filter, RewrapManyDataKeyOptions options); + + /** + * {@linkplain MongoDatabase#createCollection(String, CreateCollectionOptions) Create} a new collection with encrypted fields, + * automatically {@linkplain #createDataKey(String, DataKeyOptions) creating} + * new data encryption keys when needed based on the configured + * {@link CreateCollectionOptions#getEncryptedFields() encryptedFields}, which must be specified. + * This method does not modify the configured {@code encryptedFields} when creating new data keys, + * instead it creates a new configuration if needed. + * + * @param database The database to use for creating the collection. + * @param collectionName The name for the collection to create. + * @param createCollectionOptions Options for creating the collection. + * @param createEncryptedCollectionParams Auxiliary parameters for creating an encrypted collection. + * @return A publisher of the (potentially updated) {@code encryptedFields} configuration that was used to create the + * collection. A user may use this document to configure {@link AutoEncryptionSettings#getEncryptedFieldsMap()}. + *

+ * {@linkplain org.reactivestreams.Subscriber#onError(Throwable) Signals} {@link MongoUpdatedEncryptedFieldsException} + * if an exception happens after creating at least one data key. This exception makes the updated {@code encryptedFields} + * {@linkplain MongoUpdatedEncryptedFieldsException#getEncryptedFields() available} to the caller.

+ * + * @since 4.9 + * @mongodb.server.release 7.0 + * @mongodb.driver.manual reference/command/create Create Command + */ + Publisher createEncryptedCollection(MongoDatabase database, String collectionName, + CreateCollectionOptions createCollectionOptions, CreateEncryptedCollectionParams createEncryptedCollectionParams); + + @Override + void close(); +} diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/vault/ClientEncryptions.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/vault/ClientEncryptions.java new file mode 100644 index 00000000000..269e8f37ded --- /dev/null +++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/vault/ClientEncryptions.java @@ -0,0 +1,41 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.reactivestreams.client.vault; + +import com.mongodb.ClientEncryptionSettings; +import com.mongodb.reactivestreams.client.internal.vault.ClientEncryptionImpl; + +/** + * Factory for ClientEncryption implementations. + * + * @since 1.12 + */ +public final class ClientEncryptions { + + /** + * Create a key vault with the given options. + * + * @param options the key vault options + * @return the key vault + */ + public static ClientEncryption create(final ClientEncryptionSettings options) { + return new ClientEncryptionImpl(options); + } + + private ClientEncryptions() { + } +} diff --git a/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/vault/package-info.java b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/vault/package-info.java new file mode 100644 index 00000000000..8089a4d17a2 --- /dev/null +++ b/driver-reactive-streams/src/main/com/mongodb/reactivestreams/client/vault/package-info.java @@ -0,0 +1,25 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * This package contains the Key Vault API + * + * @since 1.12 + */ +@NonNullApi +package com.mongodb.reactivestreams.client.vault; + +import com.mongodb.lang.NonNullApi; diff --git a/driver-reactive-streams/src/test/functional/com/mongodb/internal/connection/OidcAuthenticationAsyncProseTests.java b/driver-reactive-streams/src/test/functional/com/mongodb/internal/connection/OidcAuthenticationAsyncProseTests.java new file mode 100644 index 00000000000..de111926d79 --- /dev/null +++ b/driver-reactive-streams/src/test/functional/com/mongodb/internal/connection/OidcAuthenticationAsyncProseTests.java @@ -0,0 +1,68 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection; + +import com.mongodb.MongoClientSettings; +import com.mongodb.client.MongoClient; +import com.mongodb.reactivestreams.client.MongoClients; +import com.mongodb.reactivestreams.client.syncadapter.SyncMongoClient; +import org.junit.jupiter.api.Test; +import reactivestreams.helpers.SubscriberHelpers; + +import java.util.concurrent.TimeUnit; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static util.ThreadTestHelpers.executeAll; + +public class OidcAuthenticationAsyncProseTests extends OidcAuthenticationProseTests { + + @Override + protected MongoClient createMongoClient(final MongoClientSettings settings) { + return new SyncMongoClient(settings); + } + + @Test + public void testNonblockingCallbacks() { + // not a prose spec test + delayNextFind(); + + int simulatedDelayMs = 100; + TestCallback requestCallback = createCallback().setDelayMs(simulatedDelayMs); + + MongoClientSettings clientSettings = createSettings(getOidcUri(), requestCallback); + + try (com.mongodb.reactivestreams.client.MongoClient client = MongoClients.create(clientSettings)) { + executeAll(2, () -> { + SubscriberHelpers.OperationSubscriber subscriber = new SubscriberHelpers.OperationSubscriber<>(); + long t1 = System.nanoTime(); + client.getDatabase("test") + .getCollection("test") + .find() + .first() + .subscribe(subscriber); + long elapsedMs = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - t1); + + assertTrue(elapsedMs < simulatedDelayMs); + subscriber.get(); + }); + + // ensure both callbacks have been tested + assertEquals(1, requestCallback.getInvocations()); + } + } +} diff --git a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/AbstractClientMetadataProseTest.java b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/AbstractClientMetadataProseTest.java new file mode 100644 index 00000000000..8aa4db32e79 --- /dev/null +++ b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/AbstractClientMetadataProseTest.java @@ -0,0 +1,34 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.reactivestreams.client; + +import com.mongodb.MongoClientSettings; +import com.mongodb.MongoDriverInformation; +import com.mongodb.client.AbstractClientMetadataProseTest; +import com.mongodb.client.MongoClient; +import com.mongodb.lang.Nullable; +import com.mongodb.reactivestreams.client.syncadapter.SyncMongoClient; + +/** + * See spec + */ +class ClientMetadataProseTest extends AbstractClientMetadataProseTest { + + protected MongoClient createMongoClient(@Nullable final MongoDriverInformation mongoDriverInformation, final MongoClientSettings mongoClientSettings) { + return new SyncMongoClient(mongoClientSettings, mongoDriverInformation); + } +} diff --git a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/AsyncTransportSettingsTest.java b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/AsyncTransportSettingsTest.java new file mode 100644 index 00000000000..55db414588c --- /dev/null +++ b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/AsyncTransportSettingsTest.java @@ -0,0 +1,76 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.reactivestreams.client; + +import com.mongodb.MongoClientSettings; +import com.mongodb.client.MongoClient; +import com.mongodb.connection.AsyncTransportSettings; +import com.mongodb.connection.TransportSettings; +import com.mongodb.reactivestreams.client.syncadapter.SyncMongoClient; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.ValueSource; + +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.TimeUnit; + +import static com.mongodb.assertions.Assertions.assertTrue; +import static com.mongodb.client.Fixture.getMongoClientSettingsBuilder; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.atLeastOnce; +import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.verify; + +class AsyncTransportSettingsTest { + + @Test + void testAsyncTransportSettings() { + ExecutorService executorService = spy(Executors.newFixedThreadPool(5)); + AsyncTransportSettings asyncTransportSettings = TransportSettings.asyncBuilder() + .executorService(executorService) + .build(); + MongoClientSettings mongoClientSettings = getMongoClientSettingsBuilder() + .transportSettings(asyncTransportSettings) + .build(); + + try (MongoClient client = new SyncMongoClient(mongoClientSettings)) { + client.listDatabases().first(); + } + verify(executorService, atLeastOnce()).execute(any()); + } + + @ParameterizedTest + @ValueSource(booleans = {true, false}) + @SuppressWarnings("try") + void testExternalExecutorWasShutDown(final boolean tlsEnabled) throws InterruptedException { + ExecutorService executorService = Executors.newFixedThreadPool(5); + AsyncTransportSettings asyncTransportSettings = TransportSettings.asyncBuilder() + .executorService(executorService) + .build(); + MongoClientSettings mongoClientSettings = getMongoClientSettingsBuilder() + .applyToSslSettings(builder -> builder.enabled(tlsEnabled)) + .transportSettings(asyncTransportSettings) + .build(); + + try (MongoClient ignored = new SyncMongoClient(mongoClientSettings)) { + // ignored + } + + assertTrue(executorService.awaitTermination(100, TimeUnit.MILLISECONDS)); + } +} diff --git a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/AtlasSearchIndexManagementProseTest.java b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/AtlasSearchIndexManagementProseTest.java new file mode 100644 index 00000000000..f795b3c4035 --- /dev/null +++ b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/AtlasSearchIndexManagementProseTest.java @@ -0,0 +1,32 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.reactivestreams.client; + + +import com.mongodb.MongoClientSettings; +import com.mongodb.client.AbstractAtlasSearchIndexManagementProseTest; +import com.mongodb.client.MongoClient; +import com.mongodb.reactivestreams.client.syncadapter.SyncMongoClient; + +/** + * See Search Index Management Tests + */ +public class AtlasSearchIndexManagementProseTest extends AbstractAtlasSearchIndexManagementProseTest { + protected MongoClient createMongoClient(final MongoClientSettings settings) { + return new SyncMongoClient(settings); + } +} diff --git a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/BatchCursorPublisherErrorTest.java b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/BatchCursorPublisherErrorTest.java new file mode 100644 index 00000000000..ce15fe3d1e4 --- /dev/null +++ b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/BatchCursorPublisherErrorTest.java @@ -0,0 +1,95 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.reactivestreams.client; + +import com.mongodb.client.model.Aggregates; +import com.mongodb.client.model.Filters; +import org.bson.Document; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.DisplayName; +import org.junit.jupiter.api.DynamicTest; +import org.junit.jupiter.api.TestFactory; +import org.reactivestreams.Publisher; +import reactor.core.publisher.Mono; + +import java.util.List; +import java.util.stream.Collectors; + +import static com.mongodb.ClusterFixture.TIMEOUT_DURATION; +import static com.mongodb.reactivestreams.client.Fixture.drop; +import static com.mongodb.reactivestreams.client.Fixture.getDefaultDatabase; +import static com.mongodb.reactivestreams.client.Fixture.getMongoClient; +import static java.lang.String.format; +import static java.util.Arrays.asList; +import static java.util.Collections.singletonList; +import static java.util.stream.IntStream.rangeClosed; +import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; +import static org.junit.jupiter.api.DynamicTest.dynamicTest; + +public class BatchCursorPublisherErrorTest { + + private MongoCollection collection; + + @BeforeEach + public void setup() { + collection = getDefaultDatabase().getCollection("changeStreamsCancellationTest"); + Mono.from(collection.insertMany(rangeClosed(1, 11) + .boxed() + .map(i -> Document.parse(format("{a: %s}", i))) + .collect(Collectors.toList())) + ).block(TIMEOUT_DURATION); + } + + @AfterEach + public void tearDown() { + if (collection != null) { + drop(collection.getNamespace()); + } + } + + @SuppressWarnings("deprecation") + @TestFactory + @DisplayName("test batch cursors close the cursor if onNext throws an error") + List testBatchCursorThrowsAnError() { + return asList( + dynamicTest("Aggregate Publisher", + () -> assertErrorHandling(collection.aggregate(singletonList(Aggregates.match(Filters.gt("a", 5)))))), + dynamicTest("Distinct Publisher", () -> assertErrorHandling(collection.distinct("a", Integer.class))), + dynamicTest("Find Publisher", () -> assertErrorHandling(collection.find())), + dynamicTest("List Collections Publisher", () -> assertErrorHandling(getDefaultDatabase().listCollections())), + dynamicTest("List Collection Names Publisher", () -> assertErrorHandling(getDefaultDatabase().listCollectionNames())), + dynamicTest("List Databases Publisher", () -> assertErrorHandling(getMongoClient().listDatabaseNames())), + dynamicTest("List Indexes Publisher", () -> assertErrorHandling(collection.listIndexes())), + dynamicTest("Map Reduce Publisher", () -> assertErrorHandling(collection.mapReduce( + "function () { emit('a', this.a) }", + "function (k, v) { return Array.sum(v)}"))) + ); + } + + void assertErrorHandling(final Publisher publisher) { + TestSubscriber subscriber = new TestSubscriber<>(); + subscriber.doOnSubscribe(sub -> sub.request(5)); + subscriber.doOnNext(t -> { + throw new RuntimeException("Some user error"); + }); + publisher.subscribe(subscriber); + assertDoesNotThrow(Fixture::waitForLastServerSessionPoolRelease); + subscriber.assertNoTerminalEvent(); + } + +} diff --git a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/ChangeStreamsCancellationTest.java b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/ChangeStreamsCancellationTest.java new file mode 100644 index 00000000000..a41c818ceea --- /dev/null +++ b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/ChangeStreamsCancellationTest.java @@ -0,0 +1,72 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.reactivestreams.client; + +import com.mongodb.client.model.changestream.ChangeStreamDocument; +import org.bson.Document; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import reactor.core.publisher.Mono; + +import static com.mongodb.ClusterFixture.TIMEOUT_DURATION; +import static com.mongodb.reactivestreams.client.Fixture.drop; +import static com.mongodb.reactivestreams.client.Fixture.getDefaultDatabase; +import static com.mongodb.reactivestreams.client.Fixture.isReplicaSet; +import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; +import static org.junit.jupiter.api.Assumptions.assumeTrue; + +public class ChangeStreamsCancellationTest { + + private MongoCollection collection; + + @BeforeEach + public void setup() { + assumeTrue(isReplicaSet()); + collection = getDefaultDatabase().getCollection("changeStreamsCancellationTest"); + } + + @AfterEach + public void tearDown() { + if (collection != null) { + drop(collection.getNamespace()); + } + } + + @Test + public void testCancelReleasesSessions() { + Mono.from(collection.insertOne(new Document())).block(TIMEOUT_DURATION); + + TestSubscriber> subscriber = new TestSubscriber<>(); + subscriber.doOnSubscribe(sub -> { + sub.request(Integer.MAX_VALUE); + new Thread(() -> { + try { + Thread.sleep(1000); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + throw new RuntimeException("Sleep interrupted"); + } + sub.cancel(); + }).start(); + }); + collection.watch().subscribe(subscriber); + + assertDoesNotThrow(Fixture::waitForLastServerSessionPoolRelease); + } + +} diff --git a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/ClientEncryptionCustomEndpointTest.java b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/ClientEncryptionCustomEndpointTest.java new file mode 100644 index 00000000000..f69aba18940 --- /dev/null +++ b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/ClientEncryptionCustomEndpointTest.java @@ -0,0 +1,37 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.reactivestreams.client; + +import com.mongodb.ClientEncryptionSettings; +import com.mongodb.client.AbstractClientEncryptionCustomEndpointTest; +import com.mongodb.client.vault.ClientEncryption; +import com.mongodb.reactivestreams.client.syncadapter.SyncClientEncryption; +import com.mongodb.reactivestreams.client.vault.ClientEncryptions; +import org.bson.BsonDocument; + +public class ClientEncryptionCustomEndpointTest extends AbstractClientEncryptionCustomEndpointTest { + public ClientEncryptionCustomEndpointTest(final String name, final String provider, final BsonDocument masterKey, + final boolean testInvalidClientEncryption, final Class exceptionClass, + final Class wrappedExceptionClass, final String messageContainedInException) { + super(name, provider, masterKey, testInvalidClientEncryption, exceptionClass, wrappedExceptionClass, messageContainedInException); + } + + @Override + public ClientEncryption getClientEncryption(final ClientEncryptionSettings settings) { + return new SyncClientEncryption(ClientEncryptions.create(settings)); + } +} diff --git a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/ClientEncryptionDataKeyAndDoubleEncryptionTest.java b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/ClientEncryptionDataKeyAndDoubleEncryptionTest.java new file mode 100644 index 00000000000..81e1380f307 --- /dev/null +++ b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/ClientEncryptionDataKeyAndDoubleEncryptionTest.java @@ -0,0 +1,244 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.reactivestreams.client; + +import com.mongodb.AutoEncryptionSettings; +import com.mongodb.ClientEncryptionSettings; +import com.mongodb.MongoClientException; +import com.mongodb.client.model.vault.DataKeyOptions; +import com.mongodb.client.model.vault.EncryptOptions; +import com.mongodb.client.vault.ClientEncryption; +import com.mongodb.client.vault.ClientEncryptions; +import com.mongodb.internal.connection.TestCommandListener; +import com.mongodb.reactivestreams.client.syncadapter.SyncMongoClient; +import org.bson.BsonBinary; +import org.bson.BsonDocument; +import org.bson.BsonString; +import org.bson.Document; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.HashMap; +import java.util.Map; + +import static com.mongodb.ClusterFixture.getEnv; +import static com.mongodb.ClusterFixture.hasEncryptionTestsEnabled; +import static com.mongodb.client.Fixture.getMongoClientSettingsBuilder; +import static com.mongodb.client.model.Filters.eq; +import static java.lang.String.format; +import static java.util.Arrays.asList; +import static java.util.Collections.singletonList; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertThrows; +import static org.junit.Assume.assumeTrue; + +@RunWith(Parameterized.class) +public class ClientEncryptionDataKeyAndDoubleEncryptionTest { + + private final String providerName; + + private SyncMongoClient client; + private SyncMongoClient clientEncrypted; + private ClientEncryption clientEncryption; + private TestCommandListener commandListener; + + public ClientEncryptionDataKeyAndDoubleEncryptionTest(final String providerName) { + this.providerName = providerName; + } + + + @Before + public void setUp() { + assumeTrue("Has encryption tests", hasEncryptionTestsEnabled()); + + // Step 1: create unencrypted client + commandListener = new TestCommandListener(); + client = new SyncMongoClient(getMongoClientSettingsBuilder().addCommandListener(commandListener)); + client.getDatabase("keyvault").getCollection("datakeys").drop(); + client.getDatabase("db").getCollection("coll").drop(); + + // Step 2: Create encrypted client and client encryption + Map> kmsProviders = new HashMap>() {{ + put("aws", new HashMap() {{ + put("accessKeyId", getEnv("AWS_ACCESS_KEY_ID")); + put("secretAccessKey", getEnv("AWS_SECRET_ACCESS_KEY")); + }}); + put("azure", new HashMap() {{ + put("tenantId", getEnv("AZURE_TENANT_ID")); + put("clientId", getEnv("AZURE_CLIENT_ID")); + put("clientSecret", getEnv("AZURE_CLIENT_SECRET")); + }}); + put("gcp", new HashMap() {{ + put("email", getEnv("GCP_EMAIL")); + put("privateKey", getEnv("GCP_PRIVATE_KEY")); + }}); + put("local", new HashMap() {{ + put("key", "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBM" + + "UN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk"); + }}); + }}; + + HashMap schemaMap = new HashMap() {{ + put("db.coll", BsonDocument.parse("{" + + " \"bsonType\": \"object\"," + + " \"properties\": {" + + " \"encrypted_placeholder\": {" + + " \"encrypt\": {" + + " \"keyId\": \"/placeholder\"," + + " \"bsonType\": \"string\"," + + " \"algorithm\": \"AEAD_AES_256_CBC_HMAC_SHA_512-Random\"" + + " }" + + " }" + + " }" + + "}")); + }}; + + String keyVaultNamespace = "keyvault.datakeys"; + clientEncrypted = new SyncMongoClient(getMongoClientSettingsBuilder() + .autoEncryptionSettings(AutoEncryptionSettings.builder() + .keyVaultNamespace(keyVaultNamespace) + .kmsProviders(kmsProviders) + .schemaMap(schemaMap) + .build())); + + clientEncryption = ClientEncryptions.create( + ClientEncryptionSettings + .builder() + .keyVaultMongoClientSettings(getMongoClientSettingsBuilder().addCommandListener(commandListener).build()) + .keyVaultNamespace(keyVaultNamespace) + .kmsProviders(kmsProviders) + .build()); + } + + @Test + public void testProvider() { + String keyAltName = format("%s_altname", providerName); + BsonBinary dataKeyId = clientEncryption.createDataKey(providerName, + new DataKeyOptions().keyAltNames(singletonList(keyAltName)).masterKey(getMasterKey())); + assertEquals(4, dataKeyId.getType()); + + ArrayList dataKeys = client + .getDatabase("keyvault") + .getCollection("datakeys") + .find(eq("_id", dataKeyId)) + .into(new ArrayList<>()); + assertEquals(1, dataKeys.size()); + + Document dataKey = dataKeys.get(0); + assertEquals(providerName, dataKey.get("masterKey", new Document()).get("provider", "")); + + String insertWriteConcern = commandListener.getCommandStartedEvent("insert") + .getCommand() + .getDocument("writeConcern", new BsonDocument()) + .getString("w", new BsonString("")) + .getValue(); + assertEquals("majority", insertWriteConcern); + + String stringToEncrypt = format("hello %s", providerName); + BsonBinary encrypted = clientEncryption.encrypt(new BsonString(stringToEncrypt), + new EncryptOptions("AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic") + .keyId(dataKeyId)); + assertEquals(6, encrypted.getType()); + + Document insertDocument = new Document("_id", providerName); + insertDocument.put("value", encrypted); + clientEncrypted.getDatabase("db").getCollection("coll").insertOne(insertDocument); + Document decryptedDocument = clientEncrypted.getDatabase("db") + .getCollection("coll") + .find(eq("_id", providerName)) + .first(); + assertNotNull(decryptedDocument); + assertEquals(stringToEncrypt, decryptedDocument.get("value", "")); + + BsonBinary encryptedKeyAltName = clientEncryption.encrypt(new BsonString(stringToEncrypt), + new EncryptOptions("AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic") + .keyAltName(keyAltName)); + assertEquals(encrypted, encryptedKeyAltName); + + assertThrows(MongoClientException.class, () -> + clientEncrypted + .getDatabase("db") + .getCollection("coll") + .insertOne(new Document("encrypted_placeholder", encrypted)) + ); + } + + private BsonDocument getMasterKey() { + switch (providerName) { + case "aws": + return BsonDocument.parse("{" + + " \"region\": \"us-east-1\"," + + " \"key\": \"arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0\"" + + "}"); + case "azure": + return BsonDocument.parse("{" + + " \"keyVaultEndpoint\": \"key-vault-csfle.vault.azure.net\"," + + " \"keyName\": \"key-name-csfle\"" + + "}"); + case "gcp": + return BsonDocument.parse("{" + + " \"projectId\": \"devprod-drivers\"," + + " \"location\": \"global\", " + + " \"keyRing\": \"key-ring-csfle\"," + + " \"keyName\": \"key-name-csfle\"" + + "}"); + default: + return new BsonDocument(); + } + } + + + @Parameterized.Parameters(name = "providerName: {0}") + public static Collection data() { + return asList(new Object[]{"aws"}, new Object[]{"azure"}, new Object[]{"gcp"}, new Object[]{"local"}); + } + + + @After + public void after() { + if (client != null) { + try { + client.close(); + } catch (Exception e) { + // ignore + } + } + + if (clientEncrypted != null) { + try { + clientEncrypted.close(); + } catch (Exception e) { + // ignore + } + } + + if (clientEncryption != null) { + try { + clientEncryption.close(); + } catch (Exception e) { + // ignore + } + } + } + +} diff --git a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/ClientEncryptionRewrapManyDataKeyProseTest.java b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/ClientEncryptionRewrapManyDataKeyProseTest.java new file mode 100644 index 00000000000..3947a5718fd --- /dev/null +++ b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/ClientEncryptionRewrapManyDataKeyProseTest.java @@ -0,0 +1,38 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.reactivestreams.client; + +import com.mongodb.ClientEncryptionSettings; +import com.mongodb.MongoClientSettings; +import com.mongodb.client.AbstractClientEncryptionRewrapManyDataKeyProseTest; +import com.mongodb.client.MongoClient; +import com.mongodb.client.vault.ClientEncryption; +import com.mongodb.client.vault.ClientEncryptions; +import com.mongodb.reactivestreams.client.syncadapter.SyncMongoClient; + +public class ClientEncryptionRewrapManyDataKeyProseTest extends AbstractClientEncryptionRewrapManyDataKeyProseTest { + + @Override + protected MongoClient createMongoClient(final MongoClientSettings settings) { + return new SyncMongoClient(settings); + } + + @Override + public ClientEncryption getClientEncryption(final ClientEncryptionSettings settings) { + return ClientEncryptions.create(settings); + } +} diff --git a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/ClientEncryptionTextExplicitEncryptionTest.java b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/ClientEncryptionTextExplicitEncryptionTest.java new file mode 100644 index 00000000000..2c7eda55aae --- /dev/null +++ b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/ClientEncryptionTextExplicitEncryptionTest.java @@ -0,0 +1,38 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.reactivestreams.client; + +import com.mongodb.ClientEncryptionSettings; +import com.mongodb.MongoClientSettings; +import com.mongodb.client.AbstractClientEncryptionTextExplicitEncryptionTest; +import com.mongodb.client.MongoClient; +import com.mongodb.client.vault.ClientEncryption; +import com.mongodb.reactivestreams.client.syncadapter.SyncClientEncryption; +import com.mongodb.reactivestreams.client.syncadapter.SyncMongoClient; +import com.mongodb.reactivestreams.client.vault.ClientEncryptions; + +public class ClientEncryptionTextExplicitEncryptionTest extends AbstractClientEncryptionTextExplicitEncryptionTest { + @Override + protected MongoClient createMongoClient(final MongoClientSettings settings) { + return new SyncMongoClient(settings); + } + + @Override + protected ClientEncryption createClientEncryption(final ClientEncryptionSettings settings) { + return new SyncClientEncryption(ClientEncryptions.create(settings)); + } +} diff --git a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/ClientSideEncryption25LookupProseTests.java b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/ClientSideEncryption25LookupProseTests.java new file mode 100644 index 00000000000..ba1a7ce6803 --- /dev/null +++ b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/ClientSideEncryption25LookupProseTests.java @@ -0,0 +1,39 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.reactivestreams.client; + +import com.mongodb.ClientEncryptionSettings; +import com.mongodb.MongoClientSettings; +import com.mongodb.client.MongoClient; +import com.mongodb.client.vault.ClientEncryption; +import com.mongodb.reactivestreams.client.syncadapter.SyncClientEncryption; +import com.mongodb.reactivestreams.client.syncadapter.SyncMongoClient; +import com.mongodb.reactivestreams.client.vault.ClientEncryptions; + +public class ClientSideEncryption25LookupProseTests extends com.mongodb.client.ClientSideEncryption25LookupProseTests { + + @Override + protected MongoClient createMongoClient(final MongoClientSettings settings) { + return new SyncMongoClient(settings); + } + + @Override + protected ClientEncryption createClientEncryption(final ClientEncryptionSettings settings) { + return new SyncClientEncryption(ClientEncryptions.create(settings)); + } + +} diff --git a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/ClientSideEncryptionAutoDataKeysTest.java b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/ClientSideEncryptionAutoDataKeysTest.java new file mode 100644 index 00000000000..e8e18b57c3f --- /dev/null +++ b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/ClientSideEncryptionAutoDataKeysTest.java @@ -0,0 +1,38 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.reactivestreams.client; + +import com.mongodb.ClientEncryptionSettings; +import com.mongodb.MongoClientSettings; +import com.mongodb.client.AbstractClientSideEncryptionAutoDataKeysTest; +import com.mongodb.client.MongoClient; +import com.mongodb.client.vault.ClientEncryption; +import com.mongodb.reactivestreams.client.syncadapter.SyncClientEncryption; +import com.mongodb.reactivestreams.client.syncadapter.SyncMongoClient; +import com.mongodb.reactivestreams.client.vault.ClientEncryptions; + +final class ClientSideEncryptionAutoDataKeysTest extends AbstractClientSideEncryptionAutoDataKeysTest { + @Override + protected MongoClient createMongoClient(final MongoClientSettings settings) { + return new SyncMongoClient(settings); + } + + @Override + protected ClientEncryption createClientEncryption(final ClientEncryptionSettings settings) { + return new SyncClientEncryption(ClientEncryptions.create(settings)); + } +} diff --git a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/ClientSideEncryptionAwsCredentialFromEnvironmentTest.java b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/ClientSideEncryptionAwsCredentialFromEnvironmentTest.java new file mode 100644 index 00000000000..734d8db71bc --- /dev/null +++ b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/ClientSideEncryptionAwsCredentialFromEnvironmentTest.java @@ -0,0 +1,38 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.reactivestreams.client; + +import com.mongodb.ClientEncryptionSettings; +import com.mongodb.MongoClientSettings; +import com.mongodb.client.AbstractClientSideEncryptionAwsCredentialFromEnvironmentTest; +import com.mongodb.client.MongoClient; +import com.mongodb.client.vault.ClientEncryption; +import com.mongodb.reactivestreams.client.syncadapter.SyncClientEncryption; +import com.mongodb.reactivestreams.client.syncadapter.SyncMongoClient; +import com.mongodb.reactivestreams.client.vault.ClientEncryptions; + +public class ClientSideEncryptionAwsCredentialFromEnvironmentTest extends AbstractClientSideEncryptionAwsCredentialFromEnvironmentTest { + @Override + protected ClientEncryption createClientEncryption(final ClientEncryptionSettings settings) { + return new SyncClientEncryption(ClientEncryptions.create(settings)); + } + + @Override + protected MongoClient createMongoClient(final MongoClientSettings settings) { + return new SyncMongoClient(settings); + } +} diff --git a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/ClientSideEncryptionBsonSizeLimitsSpecification.groovy b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/ClientSideEncryptionBsonSizeLimitsSpecification.groovy new file mode 100644 index 00000000000..874f2204c6d --- /dev/null +++ b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/ClientSideEncryptionBsonSizeLimitsSpecification.groovy @@ -0,0 +1,161 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.reactivestreams.client + +import com.mongodb.AutoEncryptionSettings +import com.mongodb.ClientEncryptionSettings +import com.mongodb.MongoNamespace +import com.mongodb.MongoWriteException +import com.mongodb.WriteConcern +import com.mongodb.client.test.CollectionHelper +import com.mongodb.internal.connection.TestCommandListener +import com.mongodb.reactivestreams.client.vault.ClientEncryption +import com.mongodb.reactivestreams.client.vault.ClientEncryptions +import org.bson.BsonDocument +import org.bson.BsonString +import org.bson.codecs.BsonDocumentCodec +import reactor.core.publisher.Mono + +import static com.mongodb.ClusterFixture.TIMEOUT_DURATION +import static com.mongodb.reactivestreams.client.Fixture.drop +import static com.mongodb.reactivestreams.client.Fixture.getDefaultDatabaseName +import static com.mongodb.reactivestreams.client.Fixture.getMongoClientBuilderFromConnectionString +import static java.util.Collections.singletonMap +import static org.junit.Assume.assumeTrue +import static util.JsonPoweredTestHelper.getTestDocument + +class ClientSideEncryptionBsonSizeLimitsSpecification extends FunctionalSpecification { + + private final MongoNamespace keyVaultNamespace = new MongoNamespace('test.datakeys') + private final MongoNamespace autoEncryptingCollectionNamespace = new MongoNamespace(getDefaultDatabaseName(), + 'ClientSideEncryptionProseTestSpecification') + private final TestCommandListener commandListener = new TestCommandListener() + + private MongoClient autoEncryptingClient + private ClientEncryption clientEncryption + private MongoCollection autoEncryptingDataCollection + + def setup() { + assumeTrue('Key vault tests disabled', + !System.getProperty('AWS_ACCESS_KEY_ID', '').isEmpty()) + drop(keyVaultNamespace) + drop(autoEncryptingCollectionNamespace) + + new CollectionHelper<>(new BsonDocumentCodec(), keyVaultNamespace).insertDocuments( + [getTestDocument('client-side-encryption/limits/limits-key.json')], + WriteConcern.MAJORITY) + + def providerProperties = + ['local': ['key': Base64.getDecoder().decode('Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN' + + '3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk')] + ] + + autoEncryptingClient = MongoClients.create(getMongoClientBuilderFromConnectionString() + .autoEncryptionSettings(AutoEncryptionSettings.builder() + .keyVaultNamespace(keyVaultNamespace.fullName) + .kmsProviders(providerProperties) + .schemaMap(singletonMap(autoEncryptingCollectionNamespace.fullName, + getTestDocument('client-side-encryption/limits/limits-schema.json'))) + .build()) + .addCommandListener(commandListener) + .build()) + + autoEncryptingDataCollection = autoEncryptingClient.getDatabase(autoEncryptingCollectionNamespace.databaseName) + .getCollection(autoEncryptingCollectionNamespace.collectionName, BsonDocument) + + clientEncryption = ClientEncryptions.create(ClientEncryptionSettings.builder() + .keyVaultMongoClientSettings(getMongoClientBuilderFromConnectionString().build()) + .keyVaultNamespace(keyVaultNamespace.fullName) + .kmsProviders(providerProperties) + .build()) + } + + def 'test BSON size limits'() { + when: + Mono.from(autoEncryptingDataCollection.insertOne( + new BsonDocument('_id', new BsonString('over_2mib_under_16mib')) + .append('unencrypted', new BsonString('a' * 2097152)))).block(TIMEOUT_DURATION) + + then: + noExceptionThrown() + + when: + Mono.from(autoEncryptingDataCollection.insertOne(getTestDocument('client-side-encryption/limits/limits-doc.json') + .append('_id', new BsonString('encryption_exceeds_2mib')) + .append('unencrypted', new BsonString('a' * (2097152 - 2000)))) + ).block(TIMEOUT_DURATION) + + then: + noExceptionThrown() + + when: + commandListener.reset() + Mono.from(autoEncryptingDataCollection.insertMany( + [ + new BsonDocument('_id', new BsonString('over_2mib_1')) + .append('unencrypted', new BsonString('a' * 2097152)), + new BsonDocument('_id', new BsonString('over_2mib_2')) + .append('unencrypted', new BsonString('a' * 2097152)) + ])).block(TIMEOUT_DURATION) + + then: + noExceptionThrown() + countStartedEvents('insert') == 2 + + when: + commandListener.reset() + Mono.from(autoEncryptingDataCollection.insertMany( + [ + getTestDocument('client-side-encryption/limits/limits-doc.json') + .append('_id', new BsonString('encryption_exceeds_2mib_1')) + .append('unencrypted', new BsonString('a' * (2097152 - 2000))), + getTestDocument('client-side-encryption/limits/limits-doc.json') + .append('_id', new BsonString('encryption_exceeds_2mib_2')) + .append('unencrypted', new BsonString('a' * (2097152 - 2000))), + ])).block(TIMEOUT_DURATION) + + then: + noExceptionThrown() + countStartedEvents('insert') == 2 + + when: + Mono.from(autoEncryptingDataCollection.insertOne( + new BsonDocument('_id', new BsonString('under_16mib')) + .append('unencrypted', new BsonString('a' * (16777216 - 2000))))).block(TIMEOUT_DURATION) + + then: + noExceptionThrown() + + when: + Mono.from(autoEncryptingDataCollection.insertOne(getTestDocument('client-side-encryption/limits/limits-doc.json') + .append('_id', new BsonString('encryption_exceeds_16mib')) + .append('unencrypted', new BsonString('a' * (16777216 - 2000))))).block(TIMEOUT_DURATION) + + then: + thrown(MongoWriteException) + } + + private int countStartedEvents(String name) { + int count = 0 + for (def cur : commandListener.commandStartedEvents) { + if (cur.commandName == name) { + count++ + } + } + count + } +} diff --git a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/ClientSideEncryptionBypassAutoEncryptionTest.java b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/ClientSideEncryptionBypassAutoEncryptionTest.java new file mode 100644 index 00000000000..0b1c26fcec3 --- /dev/null +++ b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/ClientSideEncryptionBypassAutoEncryptionTest.java @@ -0,0 +1,119 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.reactivestreams.client; + +import com.mongodb.AutoEncryptionSettings; +import com.mongodb.ClientEncryptionSettings; +import com.mongodb.MongoClientSettings; +import com.mongodb.MongoNamespace; +import com.mongodb.client.model.vault.DataKeyOptions; +import com.mongodb.client.model.vault.EncryptOptions; +import com.mongodb.client.result.InsertOneResult; +import com.mongodb.reactivestreams.client.vault.ClientEncryption; +import com.mongodb.reactivestreams.client.vault.ClientEncryptions; +import org.bson.BsonBinary; +import org.bson.BsonString; +import org.bson.Document; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +import java.security.SecureRandom; +import java.util.HashMap; +import java.util.Map; + +import static org.junit.Assert.assertEquals; +import static reactivestreams.helpers.SubscriberHelpers.ObservableSubscriber; +import static reactivestreams.helpers.SubscriberHelpers.OperationSubscriber; + +public class ClientSideEncryptionBypassAutoEncryptionTest { + private MongoClient clientEncrypted; + private ClientEncryption clientEncryption; + + @Before + public void setUp() throws Throwable { + byte[] localMasterKey = new byte[96]; + new SecureRandom().nextBytes(localMasterKey); + + Map> kmsProviders = new HashMap>() {{ + put("local", new HashMap() {{ + put("key", localMasterKey); + }}); + }}; + + + MongoNamespace keyVaultNamespace = new MongoNamespace(Fixture.getDefaultDatabaseName(), "testKeyVault"); + + Fixture.dropDatabase(Fixture.getDefaultDatabaseName()); + + ClientEncryptionSettings clientEncryptionSettings = ClientEncryptionSettings.builder() + .keyVaultMongoClientSettings(Fixture.getMongoClientSettings()) + .keyVaultNamespace(keyVaultNamespace.getFullName()) + .kmsProviders(kmsProviders) + .build(); + + clientEncryption = ClientEncryptions.create(clientEncryptionSettings); + + AutoEncryptionSettings autoEncryptionSettings = AutoEncryptionSettings.builder() + .keyVaultNamespace(keyVaultNamespace.getFullName()) + .kmsProviders(kmsProviders) + .bypassAutoEncryption(true) + .build(); + + MongoClientSettings clientSettings = Fixture.getMongoClientSettingsBuilder() + .autoEncryptionSettings(autoEncryptionSettings) + .build(); + clientEncrypted = MongoClients.create(clientSettings); + } + + @Test + public void shouldAutoDecryptManuallyEncryptedData() { + String fieldValue = "123456789"; + + ObservableSubscriber binarySubscriber = new OperationSubscriber<>(); + clientEncryption.createDataKey("local", new DataKeyOptions()).subscribe(binarySubscriber); + BsonBinary dataKeyId = binarySubscriber.get().get(0); + + binarySubscriber = new OperationSubscriber<>(); + clientEncryption.encrypt(new BsonString(fieldValue), + new EncryptOptions("AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic").keyId(dataKeyId)) + .subscribe(binarySubscriber); + BsonBinary encryptedFieldValue = binarySubscriber.get().get(0); + + MongoCollection collection = clientEncrypted.getDatabase(Fixture.getDefaultDatabaseName()).getCollection("test"); + + ObservableSubscriber insertSubscriber = new OperationSubscriber<>(); + collection.insertOne(new Document("encryptedField", encryptedFieldValue)).subscribe(insertSubscriber); + insertSubscriber.await(); + + ObservableSubscriber resultSubscriber = new OperationSubscriber<>(); + collection.find().first().subscribe(resultSubscriber); + + assertEquals(fieldValue, resultSubscriber.get().get(0).getString("encryptedField")); + } + + @After + public void after() throws Throwable { + if (clientEncrypted != null) { + Fixture.dropDatabase(Fixture.getDefaultDatabaseName()); + clientEncrypted.close(); + } + if (clientEncryption != null) { + clientEncryption.close(); + } + } +} diff --git a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/ClientSideEncryptionCorpusTest.java b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/ClientSideEncryptionCorpusTest.java new file mode 100644 index 00000000000..1d98ede1ead --- /dev/null +++ b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/ClientSideEncryptionCorpusTest.java @@ -0,0 +1,318 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.reactivestreams.client; + +import com.mongodb.AutoEncryptionSettings; +import com.mongodb.ClientEncryptionSettings; +import com.mongodb.MongoClientSettings; +import com.mongodb.MongoException; +import com.mongodb.WriteConcern; +import com.mongodb.client.model.vault.EncryptOptions; +import com.mongodb.reactivestreams.client.vault.ClientEncryption; +import com.mongodb.reactivestreams.client.vault.ClientEncryptions; +import org.bson.BsonBinary; +import org.bson.BsonBinarySubType; +import org.bson.BsonDocument; +import org.bson.BsonString; +import org.bson.BsonValue; +import org.bson.UuidRepresentation; +import org.bson.codecs.UuidCodec; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import reactor.core.publisher.Mono; + +import java.io.IOException; +import java.net.URISyntaxException; +import java.util.Arrays; +import java.util.Base64; +import java.util.Collection; +import java.util.HashMap; +import java.util.Map; + +import static com.mongodb.ClusterFixture.TIMEOUT_DURATION; +import static com.mongodb.ClusterFixture.getEnv; +import static com.mongodb.ClusterFixture.hasEncryptionTestsEnabled; +import static com.mongodb.reactivestreams.client.Fixture.getMongoClientBuilderFromConnectionString; +import static com.mongodb.reactivestreams.client.Fixture.getMongoClientSettings; +import static org.bson.codecs.configuration.CodecRegistries.fromCodecs; +import static org.bson.codecs.configuration.CodecRegistries.fromRegistries; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotEquals; +import static org.junit.Assume.assumeTrue; +import static util.JsonPoweredTestHelper.getTestDocument; + +// See https://github.com/mongodb/specifications/tree/master/source/client-side-encryption/corpus +@RunWith(Parameterized.class) +public class ClientSideEncryptionCorpusTest { + private final boolean useLocalSchema; + private MongoClient client; + private MongoClient autoEncryptingClient; + private ClientEncryption clientEncryption; + + public ClientSideEncryptionCorpusTest(final boolean useLocalSchema) { + this.useLocalSchema = useLocalSchema; + } + + @Before + public void setUp() throws IOException, URISyntaxException { + assumeTrue("Corpus tests disabled", hasEncryptionTestsEnabled()); + + MongoClientSettings clientSettings = getMongoClientBuilderFromConnectionString() + .codecRegistry(fromRegistries(fromCodecs(new UuidCodec(UuidRepresentation.STANDARD)), + MongoClientSettings.getDefaultCodecRegistry())).build(); + + // Step 1: create unencrypted client + client = MongoClients.create(clientSettings); + MongoDatabase db = client.getDatabase("db"); + + // Step 2: Drop and recreate db.coll with schema + BsonDocument schemaDocument = bsonDocumentFromPath("corpus-schema.json"); + + Mono.from(db.getCollection("coll").drop()).block(TIMEOUT_DURATION); + + Mono.from(db.runCommand(new BsonDocument("create", new BsonString("coll")) + .append("validator", new BsonDocument("$jsonSchema", schemaDocument)))) + .block(TIMEOUT_DURATION); + + // Step 3: Drop and create keyvault.datakeys + MongoDatabase keyVaultDatabase = client.getDatabase("keyvault"); + MongoCollection dataKeysCollection = keyVaultDatabase.getCollection("datakeys", BsonDocument.class) + .withWriteConcern(WriteConcern.MAJORITY); + + Mono.from(dataKeysCollection.drop()).block(TIMEOUT_DURATION); + + Mono.from(dataKeysCollection.insertOne(bsonDocumentFromPath("corpus-key-aws.json"))).block(TIMEOUT_DURATION); + Mono.from(dataKeysCollection.insertOne(bsonDocumentFromPath("corpus-key-azure.json"))).block(TIMEOUT_DURATION); + Mono.from(dataKeysCollection.insertOne(bsonDocumentFromPath("corpus-key-gcp.json"))).block(TIMEOUT_DURATION); + Mono.from(dataKeysCollection.insertOne(bsonDocumentFromPath("corpus-key-kmip.json"))).block(TIMEOUT_DURATION); + Mono.from(dataKeysCollection.insertOne(bsonDocumentFromPath("corpus-key-local.json"))).block(TIMEOUT_DURATION); + + // Step 4: Configure our objects + Map> kmsProviders = new HashMap>() {{ + put("aws", new HashMap() {{ + put("accessKeyId", getEnv("AWS_ACCESS_KEY_ID")); + put("secretAccessKey", getEnv("AWS_SECRET_ACCESS_KEY")); + }}); + put("azure", new HashMap() {{ + put("tenantId", getEnv("AZURE_TENANT_ID")); + put("clientId", getEnv("AZURE_CLIENT_ID")); + put("clientSecret", getEnv("AZURE_CLIENT_SECRET")); + }}); + put("gcp", new HashMap() {{ + put("email", getEnv("GCP_EMAIL")); + put("privateKey", getEnv("GCP_PRIVATE_KEY")); + }}); + put("kmip", new HashMap() {{ + put("endpoint", getEnv("org.mongodb.test.kmipEndpoint", "localhost:5698")); + }}); + put("local", new HashMap() {{ + put("key", "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBM" + + "UN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk"); + }}); + }}; + + HashMap schemaMap = new HashMap<>(); + schemaMap.put("db.coll", schemaDocument); + + AutoEncryptionSettings.Builder autoEncryptionSettingsBuilder = AutoEncryptionSettings.builder() + .keyVaultNamespace("keyvault.datakeys") + .kmsProviders(kmsProviders); + + if (useLocalSchema) { + autoEncryptionSettingsBuilder.schemaMap(schemaMap); + } + + clientSettings = getMongoClientBuilderFromConnectionString() + .codecRegistry(fromRegistries( + fromCodecs(new UuidCodec(UuidRepresentation.STANDARD)), MongoClientSettings.getDefaultCodecRegistry())) + .autoEncryptionSettings(autoEncryptionSettingsBuilder.build()) + .build(); + autoEncryptingClient = MongoClients.create(clientSettings); + + ClientEncryptionSettings clientEncryptionSettings = ClientEncryptionSettings.builder(). + keyVaultMongoClientSettings(getMongoClientSettings()). + kmsProviders(kmsProviders). + keyVaultNamespace("keyvault.datakeys").build(); + clientEncryption = ClientEncryptions.create(clientEncryptionSettings); + } + + @Test + public void testCorpus() throws IOException, URISyntaxException { + + // Step 5: Iterate over corpus + BsonDocument corpus = bsonDocumentFromPath("corpus.json"); + BsonDocument corpusCopied = new BsonDocument(); + for (String field : corpus.keySet()) { + if (!corpus.get(field).isDocument()) { + corpusCopied.append(field, corpus.get(field)); + continue; + } + + BsonDocument fieldDocument = corpus.getDocument(field).clone(); + String kms = fieldDocument.getString("kms").getValue(); + String abbreviatedAlgorithName = fieldDocument.getString("algo").getValue(); + String method = fieldDocument.getString("method").getValue(); + String identifier = fieldDocument.getString("identifier").getValue(); + boolean allowed = fieldDocument.getBoolean("allowed").getValue(); + BsonValue value = fieldDocument.get("value"); + + byte[] awsKeyId = Base64.getDecoder().decode("AWSAAAAAAAAAAAAAAAAAAA=="); + byte[] azureKeyId = Base64.getDecoder().decode("AZUREAAAAAAAAAAAAAAAAA=="); + byte[] gcpKeyId = Base64.getDecoder().decode("GCPAAAAAAAAAAAAAAAAAAA=="); + byte[] kmipKeyId = Base64.getDecoder().decode("KMIPAAAAAAAAAAAAAAAAAA=="); + byte[] localKeyId = Base64.getDecoder().decode("LOCALAAAAAAAAAAAAAAAAA=="); + + if (method.equals("auto")) { + corpusCopied.append(field, corpus.get(field)); + continue; + } + + if (!method.equals("explicit")) { + throw new UnsupportedOperationException("Unsupported method: " + method); + } + + String fullAlgorithmName = "AEAD_AES_256_CBC_HMAC_SHA_512-"; + if (abbreviatedAlgorithName.equals("rand")) { + fullAlgorithmName += "Random"; + } else if (abbreviatedAlgorithName.equals("det")) { + fullAlgorithmName += "Deterministic"; + } else { + throw new UnsupportedOperationException("Unsupported algorithm: " + abbreviatedAlgorithName); + } + + EncryptOptions opts = new EncryptOptions(fullAlgorithmName); + if (identifier.equals("id")) { + switch (kms) { + case "aws": + opts.keyId(new BsonBinary(BsonBinarySubType.UUID_STANDARD, awsKeyId)); + break; + case "azure": + opts.keyId(new BsonBinary(BsonBinarySubType.UUID_STANDARD, azureKeyId)); + break; + case "gcp": + opts.keyId(new BsonBinary(BsonBinarySubType.UUID_STANDARD, gcpKeyId)); + break; + case "kmip": + opts.keyId(new BsonBinary(BsonBinarySubType.UUID_STANDARD, kmipKeyId)); + break; + case "local": + opts.keyId(new BsonBinary(BsonBinarySubType.UUID_STANDARD, localKeyId)); + break; + default: + throw new UnsupportedOperationException("Unsupported provider: " + kms); + } + } else if (identifier.equals("altname")) { + opts.keyAltName(kms); + } else { + throw new UnsupportedOperationException("Unsupported identifier: " + identifier); + } + + try { + BsonValue encryptedValue = Mono.from(clientEncryption.encrypt(value, opts)).block(TIMEOUT_DURATION); + fieldDocument.put("value", encryptedValue); + corpusCopied.append(field, fieldDocument); + } catch (MongoException e) { + if (allowed) { + throw e; + } + corpusCopied.append(field, fieldDocument); + } + } + + // Step 6: insert corpusCopied + MongoCollection encryptedCollection = autoEncryptingClient.getDatabase("db") + .getCollection("coll", BsonDocument.class); + Mono.from(encryptedCollection.insertOne(corpusCopied)).block(TIMEOUT_DURATION); + + // Step 7: check the auto decrypted document + BsonDocument corpusDecrypted = Mono.from(encryptedCollection.find(new BsonDocument()).first()).block(TIMEOUT_DURATION); + assertEquals(corpus, corpusDecrypted); + + // Step 8: check the document with an unencrypted client + MongoCollection coll = client.getDatabase("db").getCollection("coll", BsonDocument.class); + BsonDocument corpusEncryptedActual = Mono.from(coll.find(new BsonDocument()).first()).block(TIMEOUT_DURATION); + BsonDocument corpusEncryptedExpected = bsonDocumentFromPath("corpus-encrypted.json"); + + for (String field : corpusEncryptedExpected.keySet()) { + if (field.equals("_id") || field.equals("altname_aws") || field.equals("altname_local")) { + continue; + } + + boolean allowed = corpusEncryptedActual.getDocument(field).getBoolean("allowed").getValue(); + String algorithm = corpusEncryptedActual.getDocument(field).getString("algo").getValue(); + BsonValue actualValue = corpusEncryptedActual.getDocument(field).get("value"); + BsonValue expectedValue = corpusEncryptedExpected.getDocument(field).get("value"); + + if (algorithm.equals("det")) { + assertEquals(actualValue, expectedValue); + } else if (algorithm.equals("rand")) { + if (allowed) { + assertNotEquals(actualValue, expectedValue); + } + } else { + throw new UnsupportedOperationException("Unsupported algorithm type: " + algorithm); + } + + if (allowed) { + BsonValue decrypted = Mono.from(clientEncryption.decrypt(actualValue.asBinary())).block(TIMEOUT_DURATION); + BsonValue expectedDecrypted = Mono.from(clientEncryption.decrypt(expectedValue.asBinary())).block(TIMEOUT_DURATION); + assertEquals("Values should be equal for field " + field, expectedDecrypted, decrypted); + } else { + assertEquals("Values should be equal for field " + field, expectedValue, actualValue); + } + } + } + + private static BsonDocument bsonDocumentFromPath(final String path) { + return getTestDocument("client-side-encryption/corpus/" + path); + } + + @Parameterized.Parameters(name = "useLocalSchema: {0}") + public static Collection data() { + return Arrays.asList(new Object[]{true}, new Object[]{false}); + } + + @After + public void after() { + if (client != null) { + try { + client.close(); + } catch (Exception e) { + // ignore + } + } + + if (autoEncryptingClient != null) { + try { + autoEncryptingClient.close(); + } catch (Exception e) { + // ignore + } + } + + if (clientEncryption != null) { + try { + clientEncryption.close(); + } catch (Exception e) { + // ignore + } + } + } +} diff --git a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/ClientSideEncryptionDeadlockTest.java b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/ClientSideEncryptionDeadlockTest.java new file mode 100644 index 00000000000..182c1a4c3f1 --- /dev/null +++ b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/ClientSideEncryptionDeadlockTest.java @@ -0,0 +1,31 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * + */ + +package com.mongodb.reactivestreams.client; + +import com.mongodb.MongoClientSettings; +import com.mongodb.client.AbstractClientSideEncryptionDeadlockTest; +import com.mongodb.client.MongoClient; +import com.mongodb.reactivestreams.client.syncadapter.SyncMongoClient; + +public class ClientSideEncryptionDeadlockTest extends AbstractClientSideEncryptionDeadlockTest { + @Override + protected MongoClient createMongoClient(final MongoClientSettings settings) { + return new SyncMongoClient(settings); + } +} diff --git a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/ClientSideEncryptionDecryptionEventsTest.java b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/ClientSideEncryptionDecryptionEventsTest.java new file mode 100644 index 00000000000..f784a0ba73f --- /dev/null +++ b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/ClientSideEncryptionDecryptionEventsTest.java @@ -0,0 +1,38 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.reactivestreams.client; + +import com.mongodb.ClientEncryptionSettings; +import com.mongodb.MongoClientSettings; +import com.mongodb.client.AbstractClientSideEncryptionDecryptionEventsTest; +import com.mongodb.client.MongoClient; +import com.mongodb.client.vault.ClientEncryption; +import com.mongodb.reactivestreams.client.syncadapter.SyncClientEncryption; +import com.mongodb.reactivestreams.client.syncadapter.SyncMongoClient; +import com.mongodb.reactivestreams.client.vault.ClientEncryptions; + +public class ClientSideEncryptionDecryptionEventsTest extends AbstractClientSideEncryptionDecryptionEventsTest { + @Override + protected MongoClient createMongoClient(final MongoClientSettings settings) { + return new SyncMongoClient(settings); + } + + @Override + protected ClientEncryption createClientEncryption(final ClientEncryptionSettings settings) { + return new SyncClientEncryption(ClientEncryptions.create(settings)); + } +} diff --git a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/ClientSideEncryptionExternalKeyVaultTest.java b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/ClientSideEncryptionExternalKeyVaultTest.java new file mode 100644 index 00000000000..acd87ecc83e --- /dev/null +++ b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/ClientSideEncryptionExternalKeyVaultTest.java @@ -0,0 +1,170 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.reactivestreams.client; + +import com.mongodb.AutoEncryptionSettings; +import com.mongodb.ClientEncryptionSettings; +import com.mongodb.MongoClientSettings; +import com.mongodb.MongoCredential; +import com.mongodb.MongoSecurityException; +import com.mongodb.WriteConcern; +import com.mongodb.client.model.vault.EncryptOptions; +import com.mongodb.reactivestreams.client.vault.ClientEncryption; +import com.mongodb.reactivestreams.client.vault.ClientEncryptions; +import org.bson.BsonBinary; +import org.bson.BsonBinarySubType; +import org.bson.BsonDocument; +import org.bson.BsonString; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import reactor.core.publisher.Mono; + +import java.util.Arrays; +import java.util.Base64; +import java.util.Collection; +import java.util.HashMap; +import java.util.Map; + +import static com.mongodb.ClusterFixture.TIMEOUT_DURATION; +import static com.mongodb.ClusterFixture.isClientSideEncryptionTest; +import static com.mongodb.reactivestreams.client.Fixture.getMongoClient; +import static com.mongodb.reactivestreams.client.Fixture.getMongoClientBuilderFromConnectionString; +import static org.junit.Assert.assertEquals; +import static org.junit.Assume.assumeTrue; +import static util.JsonPoweredTestHelper.getTestDocument; + +@RunWith(Parameterized.class) +public class ClientSideEncryptionExternalKeyVaultTest { + private MongoClient clientEncrypted; + private ClientEncryption clientEncryption; + private final boolean withExternalKeyVault; + + public ClientSideEncryptionExternalKeyVaultTest(final boolean withExternalKeyVault) { + this.withExternalKeyVault = withExternalKeyVault; + } + + @Before + public void setUp() throws Throwable { + assumeTrue("Encryption test with external keyVault is disabled", isClientSideEncryptionTest()); + + /* Step 1: get unencrypted client and recreate keys collection */ + MongoClient client = getMongoClient(); + MongoDatabase keyvaultDatabase = client.getDatabase("keyvault"); + MongoCollection datakeys = keyvaultDatabase.getCollection("datakeys", BsonDocument.class) + .withWriteConcern(WriteConcern.MAJORITY); + Mono.from(datakeys.drop()).block(TIMEOUT_DURATION); + + Mono.from(datakeys.insertOne(bsonDocumentFromPath("external-key.json"))).block(TIMEOUT_DURATION); + + /* Step 2: create encryption objects. */ + Map> kmsProviders = new HashMap<>(); + Map localMasterkey = new HashMap<>(); + Map schemaMap = new HashMap<>(); + + byte[] localMasterkeyBytes = Base64.getDecoder().decode("Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBM" + + "UN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk"); + localMasterkey.put("key", localMasterkeyBytes); + kmsProviders.put("local", localMasterkey); + schemaMap.put("db.coll", bsonDocumentFromPath("external-schema.json")); + + AutoEncryptionSettings.Builder autoEncryptionSettingsBuilder = AutoEncryptionSettings.builder() + .keyVaultNamespace("keyvault.datakeys") + .kmsProviders(kmsProviders) + .schemaMap(schemaMap); + + MongoClientSettings externalClientSettings = null; + if (withExternalKeyVault) { + externalClientSettings = getMongoClientBuilderFromConnectionString() + .credential(MongoCredential.createCredential("fake-user", "admin", "fake-pwd".toCharArray())) + .build(); + autoEncryptionSettingsBuilder.keyVaultMongoClientSettings(externalClientSettings); + } + + AutoEncryptionSettings autoEncryptionSettings = autoEncryptionSettingsBuilder.build(); + + MongoClientSettings clientSettings = getMongoClientBuilderFromConnectionString() + .autoEncryptionSettings(autoEncryptionSettings) + .build(); + clientEncrypted = MongoClients.create(clientSettings); + + ClientEncryptionSettings.Builder clientEncryptionSettingsBuilder = ClientEncryptionSettings.builder(). + keyVaultMongoClientSettings(getMongoClientBuilderFromConnectionString().build()) + .kmsProviders(kmsProviders) + .keyVaultNamespace("keyvault.datakeys"); + + if (withExternalKeyVault) { + clientEncryptionSettingsBuilder.keyVaultMongoClientSettings(externalClientSettings); + } + + ClientEncryptionSettings clientEncryptionSettings = clientEncryptionSettingsBuilder.build(); + clientEncryption = ClientEncryptions.create(clientEncryptionSettings); + } + + @Test + public void testExternal() { + boolean authExceptionThrown = false; + MongoCollection coll = clientEncrypted + .getDatabase("db") + .getCollection("coll", BsonDocument.class); + try { + Mono.from(coll.insertOne(new BsonDocument().append("encrypted", new BsonString("test")))).block(TIMEOUT_DURATION); + } catch (MongoSecurityException mse) { + authExceptionThrown = true; + } + assertEquals(authExceptionThrown, withExternalKeyVault); + + EncryptOptions encryptOptions = new EncryptOptions("AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic") + .keyId(new BsonBinary(BsonBinarySubType.UUID_STANDARD, Base64.getDecoder().decode("LOCALAAAAAAAAAAAAAAAAA=="))); + authExceptionThrown = false; + try { + Mono.from(clientEncryption.encrypt(new BsonString("test"), encryptOptions)).block(TIMEOUT_DURATION); + } catch (MongoSecurityException mse) { + authExceptionThrown = true; + } + assertEquals(authExceptionThrown, withExternalKeyVault); + } + + private static BsonDocument bsonDocumentFromPath(final String path) { + return getTestDocument("client-side-encryption/external/" + path); + } + + @Parameterized.Parameters(name = "withExternalKeyVault: {0}") + public static Collection data() { + return Arrays.asList(new Object[]{true}, new Object[]{false}); + } + + @After + public void after() { + if (clientEncrypted != null) { + try { + clientEncrypted.close(); + } catch (Exception e) { + // ignore + } + } + if (clientEncryption != null) { + try { + clientEncryption.close(); + } catch (Exception e) { + // ignore + } + } + } +} diff --git a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/ClientSideEncryptionKmsTlsTest.java b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/ClientSideEncryptionKmsTlsTest.java new file mode 100644 index 00000000000..9ffe2643507 --- /dev/null +++ b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/ClientSideEncryptionKmsTlsTest.java @@ -0,0 +1,30 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.reactivestreams.client; + +import com.mongodb.ClientEncryptionSettings; +import com.mongodb.client.AbstractClientSideEncryptionKmsTlsTest; +import com.mongodb.client.vault.ClientEncryption; +import com.mongodb.reactivestreams.client.syncadapter.SyncClientEncryption; +import com.mongodb.reactivestreams.client.vault.ClientEncryptions; + +public class ClientSideEncryptionKmsTlsTest extends AbstractClientSideEncryptionKmsTlsTest { + @Override + public ClientEncryption getClientEncryption(final ClientEncryptionSettings settings) { + return new SyncClientEncryption(ClientEncryptions.create(settings)); + } +} diff --git a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/ClientSideEncryptionNotCreateMongocryptdClientTest.java b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/ClientSideEncryptionNotCreateMongocryptdClientTest.java new file mode 100644 index 00000000000..3e58f6a5b36 --- /dev/null +++ b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/ClientSideEncryptionNotCreateMongocryptdClientTest.java @@ -0,0 +1,29 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.reactivestreams.client; + +import com.mongodb.MongoClientSettings; +import com.mongodb.client.AbstractClientSideEncryptionNotCreateMongocryptdClientTest; +import com.mongodb.client.MongoClient; +import com.mongodb.reactivestreams.client.syncadapter.SyncMongoClient; + +final class ClientSideEncryptionNotCreateMongocryptdClientTest extends AbstractClientSideEncryptionNotCreateMongocryptdClientTest { + @Override + protected MongoClient createMongoClient(final MongoClientSettings settings) { + return new SyncMongoClient(settings); + } +} diff --git a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/ClientSideEncryptionNotSpawnMongocryptdTest.java b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/ClientSideEncryptionNotSpawnMongocryptdTest.java new file mode 100644 index 00000000000..7b1a50bc2b7 --- /dev/null +++ b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/ClientSideEncryptionNotSpawnMongocryptdTest.java @@ -0,0 +1,29 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.reactivestreams.client; + +import com.mongodb.MongoClientSettings; +import com.mongodb.client.AbstractClientSideEncryptionNotSpawnMongocryptdTest; +import com.mongodb.client.MongoClient; +import com.mongodb.reactivestreams.client.syncadapter.SyncMongoClient; + +final class ClientSideEncryptionNotSpawnMongocryptdTest extends AbstractClientSideEncryptionNotSpawnMongocryptdTest { + @Override + protected MongoClient createMongoClient(final MongoClientSettings settings) { + return new SyncMongoClient(settings); + } +} diff --git a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/ClientSideEncryptionOnDemandCredentialsTest.java b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/ClientSideEncryptionOnDemandCredentialsTest.java new file mode 100644 index 00000000000..fbaacc50b02 --- /dev/null +++ b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/ClientSideEncryptionOnDemandCredentialsTest.java @@ -0,0 +1,31 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.reactivestreams.client; + +import com.mongodb.ClientEncryptionSettings; +import com.mongodb.client.AbstractClientSideEncryptionOnDemandCredentialsTest; +import com.mongodb.client.vault.ClientEncryption; +import com.mongodb.reactivestreams.client.syncadapter.SyncClientEncryption; +import com.mongodb.reactivestreams.client.vault.ClientEncryptions; + +public class ClientSideEncryptionOnDemandCredentialsTest extends AbstractClientSideEncryptionOnDemandCredentialsTest { + + @Override + public ClientEncryption getClientEncryption(final ClientEncryptionSettings settings) { + return new SyncClientEncryption(ClientEncryptions.create(settings)); + } +} diff --git a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/ClientSideEncryptionRangeDefaultExplicitEncryptionTest.java b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/ClientSideEncryptionRangeDefaultExplicitEncryptionTest.java new file mode 100644 index 00000000000..ec7aa9e8c20 --- /dev/null +++ b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/ClientSideEncryptionRangeDefaultExplicitEncryptionTest.java @@ -0,0 +1,33 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * + */ + +package com.mongodb.reactivestreams.client; + +import com.mongodb.ClientEncryptionSettings; +import com.mongodb.client.AbstractClientSideEncryptionRangeDefaultExplicitEncryptionTest; +import com.mongodb.client.vault.ClientEncryption; +import com.mongodb.reactivestreams.client.syncadapter.SyncClientEncryption; +import com.mongodb.reactivestreams.client.vault.ClientEncryptions; + +public class ClientSideEncryptionRangeDefaultExplicitEncryptionTest extends AbstractClientSideEncryptionRangeDefaultExplicitEncryptionTest { + + @Override + protected ClientEncryption createClientEncryption(final ClientEncryptionSettings settings) { + return new SyncClientEncryption(ClientEncryptions.create(settings)); + } +} diff --git a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/ClientSideEncryptionRangeExplicitEncryptionTest.java b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/ClientSideEncryptionRangeExplicitEncryptionTest.java new file mode 100644 index 00000000000..ac8c08add0a --- /dev/null +++ b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/ClientSideEncryptionRangeExplicitEncryptionTest.java @@ -0,0 +1,39 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * + */ +package com.mongodb.reactivestreams.client; + +import com.mongodb.ClientEncryptionSettings; +import com.mongodb.MongoClientSettings; +import com.mongodb.client.AbstractClientSideEncryptionRangeExplicitEncryptionTest; +import com.mongodb.client.MongoClient; +import com.mongodb.client.vault.ClientEncryption; +import com.mongodb.reactivestreams.client.syncadapter.SyncClientEncryption; +import com.mongodb.reactivestreams.client.syncadapter.SyncMongoClient; +import com.mongodb.reactivestreams.client.vault.ClientEncryptions; + +public class ClientSideEncryptionRangeExplicitEncryptionTest extends AbstractClientSideEncryptionRangeExplicitEncryptionTest { + @Override + protected MongoClient createMongoClient(final MongoClientSettings settings) { + return new SyncMongoClient(settings); + } + + @Override + protected ClientEncryption createClientEncryption(final ClientEncryptionSettings settings) { + return new SyncClientEncryption(ClientEncryptions.create(settings)); + } +} diff --git a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/ClientSideEncryptionSessionTest.java b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/ClientSideEncryptionSessionTest.java new file mode 100644 index 00000000000..a036f94161b --- /dev/null +++ b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/ClientSideEncryptionSessionTest.java @@ -0,0 +1,153 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.reactivestreams.client; + +import com.mongodb.AutoEncryptionSettings; +import com.mongodb.MongoClientSettings; +import com.mongodb.MongoNamespace; +import com.mongodb.WriteConcern; +import com.mongodb.client.test.CollectionHelper; +import org.bson.BsonDocument; +import org.bson.BsonString; +import org.bson.codecs.BsonDocumentCodec; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import reactor.core.publisher.Mono; + +import java.util.Arrays; +import java.util.Base64; +import java.util.Collection; +import java.util.HashMap; +import java.util.Map; + +import static com.mongodb.ClusterFixture.TIMEOUT_DURATION; +import static com.mongodb.ClusterFixture.isClientSideEncryptionTest; +import static com.mongodb.ClusterFixture.isStandalone; +import static com.mongodb.reactivestreams.client.Fixture.getDefaultDatabaseName; +import static com.mongodb.reactivestreams.client.Fixture.getMongoClient; +import static com.mongodb.reactivestreams.client.Fixture.getMongoClientBuilderFromConnectionString; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; +import static org.junit.Assume.assumeFalse; +import static org.junit.Assume.assumeTrue; +import static util.JsonPoweredTestHelper.getTestDocument; + +@RunWith(Parameterized.class) +public class ClientSideEncryptionSessionTest { + private static final String COLLECTION_NAME = "clientSideEncryptionSessionsTest"; + + private MongoClient client = getMongoClient(); + private MongoClient clientEncrypted; + private final boolean useTransaction; + + @Parameterized.Parameters(name = "useTransaction: {0}") + public static Collection data() { + return Arrays.asList(new Object[]{true}, new Object[]{false}); + } + + public ClientSideEncryptionSessionTest(final boolean useTransaction) { + this.useTransaction = useTransaction; + } + + @Before + public void setUp() throws Throwable { + assumeTrue(isClientSideEncryptionTest()); + assumeFalse(isStandalone()); + + /* Step 1: get unencrypted client and recreate keys collection */ + client = getMongoClient(); + MongoDatabase keyVaultDatabase = client.getDatabase("keyvault"); + MongoCollection dataKeys = keyVaultDatabase.getCollection("datakeys", BsonDocument.class) + .withWriteConcern(WriteConcern.MAJORITY); + Mono.from(dataKeys.drop()).block(TIMEOUT_DURATION); + + Mono.from(dataKeys.insertOne(bsonDocumentFromPath("external-key.json"))).block(TIMEOUT_DURATION); + + /* Step 2: create encryption objects. */ + Map> kmsProviders = new HashMap<>(); + Map localMasterkey = new HashMap<>(); + Map schemaMap = new HashMap<>(); + + byte[] localMasterKeyBytes = Base64.getDecoder().decode("Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBM" + + "UN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk"); + localMasterkey.put("key", localMasterKeyBytes); + kmsProviders.put("local", localMasterkey); + schemaMap.put(getDefaultDatabaseName() + "." + COLLECTION_NAME, bsonDocumentFromPath("external-schema.json")); + + MongoClientSettings clientSettings = getMongoClientBuilderFromConnectionString() + .autoEncryptionSettings(AutoEncryptionSettings.builder() + .keyVaultNamespace("keyvault.datakeys") + .kmsProviders(kmsProviders) + .schemaMap(schemaMap).build()) + .build(); + clientEncrypted = MongoClients.create(clientSettings); + + CollectionHelper collectionHelper = + new CollectionHelper<>(new BsonDocumentCodec(), new MongoNamespace(getDefaultDatabaseName(), COLLECTION_NAME)); + collectionHelper.drop(); + collectionHelper.create(); + } + + @After + public void after() { + if (clientEncrypted != null) { + try { + clientEncrypted.close(); + } catch (Exception e) { + // ignore + } + } + } + + @Test + public void testWithExplicitSession() throws Throwable { + BsonString unencryptedValue = new BsonString("test"); + + try (ClientSession clientSession = Mono.from(clientEncrypted.startSession()).block(TIMEOUT_DURATION)) { + assertNotNull(clientSession); + if (useTransaction) { + clientSession.startTransaction(); + } + MongoCollection autoEncryptedCollection = clientEncrypted.getDatabase(getDefaultDatabaseName()) + .getCollection(COLLECTION_NAME, BsonDocument.class); + + Mono.from(autoEncryptedCollection.insertOne(clientSession, new BsonDocument().append("encrypted", new BsonString("test")))) + .block(TIMEOUT_DURATION); + + BsonDocument unencryptedDocument = Mono.from(autoEncryptedCollection.find(clientSession).first()).block(TIMEOUT_DURATION); + assertEquals(unencryptedValue, unencryptedDocument.getString("encrypted")); + + if (useTransaction) { + Mono.from(clientSession.commitTransaction()).block(TIMEOUT_DURATION); + } + } + + MongoCollection encryptedCollection = client.getDatabase(getDefaultDatabaseName()) + .getCollection(COLLECTION_NAME, BsonDocument.class); + BsonDocument encryptedDocument = Mono.from(encryptedCollection.find().first()).block(TIMEOUT_DURATION); + assertTrue(encryptedDocument.isBinary("encrypted")); + assertEquals(6, encryptedDocument.getBinary("encrypted").getType()); + } + + private static BsonDocument bsonDocumentFromPath(final String path) { + return getTestDocument("client-side-encryption/external/" + path); + } +} diff --git a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/ClientSideEncryptionUniqueIndexKeyAltNamesTest.java b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/ClientSideEncryptionUniqueIndexKeyAltNamesTest.java new file mode 100644 index 00000000000..ec0acafc2b2 --- /dev/null +++ b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/ClientSideEncryptionUniqueIndexKeyAltNamesTest.java @@ -0,0 +1,38 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.reactivestreams.client; + +import com.mongodb.ClientEncryptionSettings; +import com.mongodb.MongoClientSettings; +import com.mongodb.client.AbstractClientSideEncryptionUniqueIndexKeyAltNamesTest; +import com.mongodb.client.MongoClient; +import com.mongodb.client.vault.ClientEncryption; +import com.mongodb.reactivestreams.client.syncadapter.SyncClientEncryption; +import com.mongodb.reactivestreams.client.syncadapter.SyncMongoClient; +import com.mongodb.reactivestreams.client.vault.ClientEncryptions; + +public class ClientSideEncryptionUniqueIndexKeyAltNamesTest extends AbstractClientSideEncryptionUniqueIndexKeyAltNamesTest { + @Override + protected MongoClient createMongoClient(final MongoClientSettings settings) { + return new SyncMongoClient(settings); + } + + @Override + protected ClientEncryption createClientEncryption(final ClientEncryptionSettings settings) { + return new SyncClientEncryption(ClientEncryptions.create(settings)); + } +} diff --git a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/ClientSideEncryptionViewAreProhibitedTest.java b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/ClientSideEncryptionViewAreProhibitedTest.java new file mode 100644 index 00000000000..5fa53430abd --- /dev/null +++ b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/ClientSideEncryptionViewAreProhibitedTest.java @@ -0,0 +1,96 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.reactivestreams.client; + +import com.mongodb.AutoEncryptionSettings; +import com.mongodb.MongoClientSettings; +import com.mongodb.MongoException; +import org.bson.BsonDocument; +import org.bson.BsonString; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import reactor.core.publisher.Mono; + +import java.util.Base64; +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; + +import static com.mongodb.ClusterFixture.TIMEOUT_DURATION; +import static com.mongodb.ClusterFixture.isClientSideEncryptionTest; +import static com.mongodb.reactivestreams.client.Fixture.getMongoClient; +import static com.mongodb.reactivestreams.client.Fixture.getMongoClientBuilderFromConnectionString; +import static junit.framework.TestCase.assertTrue; +import static org.junit.Assert.fail; +import static org.junit.Assume.assumeTrue; + +public class ClientSideEncryptionViewAreProhibitedTest { + private MongoClient clientEncrypted; + + @Before + public void setUp() { + assumeTrue("Encryption test with external keyVault is disabled", isClientSideEncryptionTest()); + + MongoClient client = getMongoClient(); + + MongoDatabase db = client.getDatabase("db"); + Mono.from(db.getCollection("view").drop()).block(TIMEOUT_DURATION); + + Mono.from(db.createView("view", "coll", Collections.emptyList())).block(TIMEOUT_DURATION); + + Map> kmsProviders = new HashMap<>(); + Map localMasterkey = new HashMap<>(); + + byte[] localMasterkeyBytes = Base64.getDecoder().decode("Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBM" + + "UN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk"); + localMasterkey.put("key", localMasterkeyBytes); + kmsProviders.put("local", localMasterkey); + + AutoEncryptionSettings.Builder autoEncryptionSettingsBuilder = AutoEncryptionSettings.builder() + .keyVaultNamespace("admin.datakeys") + .kmsProviders(kmsProviders); + + AutoEncryptionSettings autoEncryptionSettings = autoEncryptionSettingsBuilder.build(); + + MongoClientSettings.Builder clientSettingsBuilder = getMongoClientBuilderFromConnectionString(); + MongoClientSettings clientSettings = clientSettingsBuilder + .autoEncryptionSettings(autoEncryptionSettings) + .build(); + clientEncrypted = MongoClients.create(clientSettings); + } + + @Test + public void shouldThrowError() throws Throwable { + MongoCollection coll = clientEncrypted + .getDatabase("db") + .getCollection("view", BsonDocument.class); + try { + Mono.from(coll.insertOne(new BsonDocument().append("encrypted", new BsonString("test")))).block(TIMEOUT_DURATION); + fail(); + } catch (MongoException me) { + assertTrue(me.getMessage().contains("cannot auto encrypt a view")); + } + } + + @After + public void after() { + if (clientEncrypted != null) { + clientEncrypted.close(); + } + } +} diff --git a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/ClientSideExplicitEncryptionTest.java b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/ClientSideExplicitEncryptionTest.java new file mode 100644 index 00000000000..df41b967482 --- /dev/null +++ b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/ClientSideExplicitEncryptionTest.java @@ -0,0 +1,40 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.reactivestreams.client; + +import com.mongodb.ClientEncryptionSettings; +import com.mongodb.MongoClientSettings; +import com.mongodb.client.AbstractClientSideEncryptionExplicitEncryptionTest; +import com.mongodb.client.MongoClient; +import com.mongodb.client.vault.ClientEncryption; +import com.mongodb.reactivestreams.client.syncadapter.SyncClientEncryption; +import com.mongodb.reactivestreams.client.syncadapter.SyncMongoClient; +import com.mongodb.reactivestreams.client.vault.ClientEncryptions; + +public class ClientSideExplicitEncryptionTest extends AbstractClientSideEncryptionExplicitEncryptionTest { + + @Override + protected MongoClient createMongoClient(final MongoClientSettings settings) { + return new SyncMongoClient(settings); + } + + @Override + protected ClientEncryption createClientEncryption(final ClientEncryptionSettings settings) { + return new SyncClientEncryption(ClientEncryptions.create(settings)); + } + +} diff --git a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/ClientSideOperationTimeoutProseTest.java b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/ClientSideOperationTimeoutProseTest.java new file mode 100644 index 00000000000..2ddad42c153 --- /dev/null +++ b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/ClientSideOperationTimeoutProseTest.java @@ -0,0 +1,517 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.reactivestreams.client; + +import com.mongodb.ClusterFixture; +import com.mongodb.MongoClientSettings; +import com.mongodb.MongoCommandException; +import com.mongodb.MongoNamespace; +import com.mongodb.MongoOperationTimeoutException; +import com.mongodb.ReadPreference; +import com.mongodb.WriteConcern; +import com.mongodb.client.AbstractClientSideOperationsTimeoutProseTest; +import com.mongodb.client.model.CreateCollectionOptions; +import com.mongodb.client.model.changestream.FullDocument; +import com.mongodb.event.CommandFailedEvent; +import com.mongodb.event.CommandStartedEvent; +import com.mongodb.reactivestreams.client.gridfs.GridFSBucket; +import com.mongodb.reactivestreams.client.gridfs.GridFSBuckets; +import com.mongodb.reactivestreams.client.syncadapter.SyncGridFSBucket; +import com.mongodb.reactivestreams.client.syncadapter.SyncMongoClient; +import org.bson.BsonDocument; +import org.bson.BsonTimestamp; +import org.bson.Document; +import org.bson.types.ObjectId; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.DisplayName; +import org.junit.jupiter.api.Test; +import reactor.core.publisher.Flux; +import reactor.core.publisher.Hooks; +import reactor.test.StepVerifier; + +import java.nio.ByteBuffer; +import java.time.Duration; +import java.time.Instant; +import java.util.Arrays; +import java.util.List; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; +import java.util.stream.Collectors; + +import static com.mongodb.ClusterFixture.TIMEOUT_DURATION; +import static com.mongodb.ClusterFixture.isDiscoverableReplicaSet; +import static com.mongodb.ClusterFixture.serverVersionAtLeast; +import static com.mongodb.ClusterFixture.sleep; +import static java.util.Collections.singletonList; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertInstanceOf; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assumptions.assumeTrue; + + +/** + * See https://github.com/mongodb/specifications/blob/master/source/client-side-operations-timeout/tests/README.md#prose-tests + */ +public final class ClientSideOperationTimeoutProseTest extends AbstractClientSideOperationsTimeoutProseTest { + private MongoClient wrapped; + + @Override + protected com.mongodb.client.MongoClient createMongoClient(final MongoClientSettings mongoClientSettings) { + SyncMongoClient client = new SyncMongoClient(mongoClientSettings); + wrapped = client.getWrapped(); + return client; + } + + private static MongoClient createReactiveClient(final MongoClientSettings.Builder builder) { + return MongoClients.create(builder.build()); + } + + @Override + protected com.mongodb.client.gridfs.GridFSBucket createGridFsBucket(final com.mongodb.client.MongoDatabase mongoDatabase, + final String bucketName) { + return new SyncGridFSBucket(GridFSBuckets.create(wrapped.getDatabase(mongoDatabase.getName()), bucketName)); + } + + private GridFSBucket createReaciveGridFsBucket(final MongoDatabase mongoDatabase, final String bucketName) { + return GridFSBuckets.create(mongoDatabase, bucketName); + } + + @Override + protected boolean isAsync() { + return true; + } + + @DisplayName("6. GridFS Upload - uploads via openUploadStream can be timed out") + @Test + @Override + public void testGridFSUploadViaOpenUploadStreamTimeout() { + assumeTrue(serverVersionAtLeast(4, 4)); + long rtt = ClusterFixture.getPrimaryRTT(); + + //given + collectionHelper.runAdminCommand("{" + + " configureFailPoint: \"" + FAIL_COMMAND_NAME + "\"," + + " mode: { times: 1 }," + + " data: {" + + " failCommands: [\"insert\"]," + + " blockConnection: true," + + " blockTimeMS: " + (rtt + 405) + + " }" + + "}"); + + try (MongoClient client = createReactiveClient(getMongoClientSettingsBuilder() + .timeout(rtt + 400, TimeUnit.MILLISECONDS))) { + MongoDatabase database = client.getDatabase(gridFsFileNamespace.getDatabaseName()); + GridFSBucket gridFsBucket = createReaciveGridFsBucket(database, GRID_FS_BUCKET_NAME); + + + TestEventPublisher eventPublisher = new TestEventPublisher<>(); + TestSubscriber testSubscriber = new TestSubscriber<>(); + + gridFsBucket.uploadFromPublisher("filename", eventPublisher.getEventStream()) + .subscribe(testSubscriber); + + //when + eventPublisher.sendEvent(ByteBuffer.wrap(new byte[]{0x12})); + testSubscriber.requestMore(1); + /* + By prose spec definition we have to close GridFSUploadStream when we don't have more data to submit and want to flux internal buffers. + However, in Reactive streams that would be equivalent to calling propagating complete signal from the source publisher. + */ + eventPublisher.complete(); + + //then + testSubscriber.assertTerminalEvent(); + + List onErrorEvents = testSubscriber.getOnErrorEvents(); + assertEquals(1, onErrorEvents.size()); + + Throwable commandError = onErrorEvents.get(0); + assertInstanceOf(MongoOperationTimeoutException.class, commandError); + + CommandFailedEvent chunkInsertFailedEvent = commandListener.getCommandFailedEvent("insert"); + assertNotNull(chunkInsertFailedEvent); + assertEquals(commandError, commandListener.getCommandFailedEvent("insert").getThrowable()); + } + } + + @DisplayName("6. GridFS Upload - Aborting an upload stream can be timed out") + @Test + @Override + public void testAbortingGridFsUploadStreamTimeout() throws ExecutionException, InterruptedException, TimeoutException { + assumeTrue(serverVersionAtLeast(4, 4)); + long rtt = ClusterFixture.getPrimaryRTT(); + + //given + CompletableFuture droppedErrorFuture = new CompletableFuture<>(); + Hooks.onErrorDropped(droppedErrorFuture::complete); + + collectionHelper.runAdminCommand("{" + + " configureFailPoint: \"" + FAIL_COMMAND_NAME + "\"," + + " mode: { times: 1 }," + + " data: {" + + " failCommands: [\"delete\"]," + + " blockConnection: true," + + " blockTimeMS: " + (rtt + 405) + + " }" + + "}"); + + try (MongoClient client = createReactiveClient(getMongoClientSettingsBuilder() + .timeout(rtt + 400, TimeUnit.MILLISECONDS))) { + MongoDatabase database = client.getDatabase(gridFsFileNamespace.getDatabaseName()); + GridFSBucket gridFsBucket = createReaciveGridFsBucket(database, GRID_FS_BUCKET_NAME); + + + TestEventPublisher eventPublisher = new TestEventPublisher<>(); + TestSubscriber testSubscriber = new TestSubscriber<>(); + + gridFsBucket.uploadFromPublisher("filename", eventPublisher.getEventStream()) + .subscribe(testSubscriber); + + //when + eventPublisher.sendEvent(ByteBuffer.wrap(new byte[]{0x01, 0x02, 0x03, 0x04})); + testSubscriber.requestMore(1); + /* + By prose spec definition we have to abort GridFSUploadStream. + However, in Reactive streams that would be equivalent to calling subscription to propagate cancellation signal. + */ + testSubscriber.cancelSubscription(); + + //then + Throwable droppedError = droppedErrorFuture.get(TIMEOUT_DURATION.toMillis(), TimeUnit.MILLISECONDS); + Throwable commandError = droppedError.getCause(); + assertInstanceOf(MongoOperationTimeoutException.class, commandError); + + CommandFailedEvent deleteFailedEvent = commandListener.getCommandFailedEvent("delete"); + assertNotNull(deleteFailedEvent); + + assertEquals(commandError, commandListener.getCommandFailedEvent("delete").getThrowable()); + // When subscription is cancelled, we should not receive any more events. + testSubscriber.assertNoTerminalEvent(); + } + } + + /** + * Not a prose spec test. However, it is additional test case for better coverage. + */ + @DisplayName("TimeoutMS applies to full resume attempt in a next call") + @Test + public void testTimeoutMSAppliesToFullResumeAttemptInNextCall() { + assumeTrue(serverVersionAtLeast(4, 4)); + assumeTrue(isDiscoverableReplicaSet()); + + //given + long rtt = ClusterFixture.getPrimaryRTT(); + try (MongoClient client = createReactiveClient(getMongoClientSettingsBuilder() + .timeout(rtt + 500, TimeUnit.MILLISECONDS))) { + + MongoNamespace namespace = generateNamespace(); + MongoCollection collection = client.getDatabase(namespace.getDatabaseName()) + .getCollection(namespace.getCollectionName()).withReadPreference(ReadPreference.primary()); + + collectionHelper.runAdminCommand("{" + + " configureFailPoint: \"failCommand\"," + + " mode: { times: 1}," + + " data: {" + + " failCommands: [\"getMore\" ]," + + " errorCode: 7," + + " errorLabels: [\"ResumableChangeStreamError\" ]" + + " }" + + "}"); + + //when + ChangeStreamPublisher documentChangeStreamPublisher = collection.watch( + singletonList(Document.parse("{ '$match': {'operationType': 'insert'}}"))); + + Assertions.assertThrows(MongoOperationTimeoutException.class, + () -> Flux.from(documentChangeStreamPublisher).blockFirst(TIMEOUT_DURATION)); + //then + sleep(200); //let publisher invalidate the cursor after the error. + List commandStartedEvents = commandListener.getCommandStartedEvents(); + + List expectedCommandNames = Arrays.asList("aggregate", "getMore", "killCursors", "aggregate", "getMore", "killCursors"); + assertCommandStartedEventsInOder(expectedCommandNames, commandStartedEvents); + + List commandFailedEvents = commandListener.getCommandFailedEvents(); + assertEquals(2, commandFailedEvents.size()); + + CommandFailedEvent firstGetMoreFailedEvent = commandFailedEvents.get(0); + assertEquals("getMore", firstGetMoreFailedEvent.getCommandName()); + assertInstanceOf(MongoCommandException.class, firstGetMoreFailedEvent.getThrowable()); + + CommandFailedEvent secondGetMoreFailedEvent = commandFailedEvents.get(1); + assertEquals("getMore", secondGetMoreFailedEvent.getCommandName()); + assertInstanceOf(MongoOperationTimeoutException.class, secondGetMoreFailedEvent.getThrowable()); + } + } + + /** + * Not a prose spec test. However, it is additional test case for better coverage. + */ + @DisplayName("TimeoutMS applied to initial aggregate") + @Test + public void testTimeoutMSAppliedToInitialAggregate() { + assumeTrue(serverVersionAtLeast(4, 4)); + assumeTrue(isDiscoverableReplicaSet()); + + //given + long rtt = ClusterFixture.getPrimaryRTT(); + try (MongoClient client = createReactiveClient(getMongoClientSettingsBuilder() + .timeout(rtt + 200, TimeUnit.MILLISECONDS))) { + + MongoNamespace namespace = generateNamespace(); + MongoCollection collection = client.getDatabase(namespace.getDatabaseName()) + .getCollection(namespace.getCollectionName()).withReadPreference(ReadPreference.primary()); + ChangeStreamPublisher documentChangeStreamPublisher = collection.watch( + singletonList(Document.parse("{ '$match': {'operationType': 'insert'}}"))) + .fullDocument(FullDocument.UPDATE_LOOKUP); + + collectionHelper.runAdminCommand("{" + + " configureFailPoint: \"failCommand\"," + + " mode: { times: 1}," + + " data: {" + + " failCommands: [\"aggregate\" ]," + + " blockConnection: true," + + " blockTimeMS: " + (rtt + 201) + + " }" + + "}"); + + //when + Assertions.assertThrows(MongoOperationTimeoutException.class, + () -> Flux.from(documentChangeStreamPublisher).blockFirst(TIMEOUT_DURATION)); + + //We do not expect cursor to have been created. However, publisher closes cursor asynchronously, thus we give it some time + // to make sure that cursor has not been closed (which would indicate that it was created). + sleep(200); + + //then + List commandStartedEvents = commandListener.getCommandStartedEvents(); + assertEquals(1, commandStartedEvents.size()); + assertEquals("aggregate", commandStartedEvents.get(0).getCommandName()); + assertOnlyOneCommandTimeoutFailure("aggregate"); + } + } + + /** + * Not a prose spec test. However, it is additional test case for better coverage. + */ + @DisplayName("TimeoutMS is refreshed for getMore if maxAwaitTimeMS is not set") + @Test + public void testTimeoutMsRefreshedForGetMoreWhenMaxAwaitTimeMsNotSet() { + assumeTrue(serverVersionAtLeast(4, 4)); + assumeTrue(isDiscoverableReplicaSet()); + + //given + BsonTimestamp startTime = new BsonTimestamp((int) Instant.now().getEpochSecond(), 0); + collectionHelper.create(namespace.getCollectionName(), new CreateCollectionOptions()); + sleep(2000); + + + long rtt = ClusterFixture.getPrimaryRTT(); + try (MongoClient client = createReactiveClient(getMongoClientSettingsBuilder() + .timeout(rtt + 300, TimeUnit.MILLISECONDS))) { + + MongoCollection collection = client.getDatabase(namespace.getDatabaseName()) + .getCollection(namespace.getCollectionName()).withReadPreference(ReadPreference.primary()); + + collectionHelper.runAdminCommand("{" + + " configureFailPoint: \"failCommand\"," + + " mode: { times: 3}," + + " data: {" + + " failCommands: [\"getMore\", \"aggregate\"]," + + " blockConnection: true," + + " blockTimeMS: " + (rtt + 200) + + " }" + + "}"); + + collectionHelper.insertDocuments(WriteConcern.MAJORITY, + BsonDocument.parse("{x: 1}"), + BsonDocument.parse("{x: 2}"), + + BsonDocument.parse("{x: 3}"), + BsonDocument.parse("{x: 4}"), + + BsonDocument.parse("{x: 5}"), + BsonDocument.parse("{x: 6}")); + + //when + ChangeStreamPublisher documentChangeStreamPublisher = collection.watch() + .startAtOperationTime(startTime); + StepVerifier.create(documentChangeStreamPublisher, 2) + //then + .expectNextCount(2) + .thenAwait(Duration.ofMillis(300)) + .thenRequest(2) + .expectNextCount(2) + .thenAwait(Duration.ofMillis(300)) + .thenRequest(2) + .expectNextCount(2) + .thenAwait(Duration.ofMillis(300)) + .thenRequest(2) + .expectError(MongoOperationTimeoutException.class) + .verify(); + + sleep(500); //let publisher invalidate the cursor after the error. + + List commandStartedEvents = commandListener.getCommandStartedEvents(); + List expectedCommandNames = Arrays.asList("aggregate", "getMore", "getMore", "getMore", "killCursors"); + assertCommandStartedEventsInOder(expectedCommandNames, commandStartedEvents); + assertOnlyOneCommandTimeoutFailure("getMore"); + } + } + + /** + * Not a prose spec test. However, it is additional test case for better coverage. + */ + @DisplayName("TimeoutMS is refreshed for getMore if maxAwaitTimeMS is set") + @Test + public void testTimeoutMsRefreshedForGetMoreWhenMaxAwaitTimeMsSet() { + assumeTrue(serverVersionAtLeast(4, 4)); + assumeTrue(isDiscoverableReplicaSet()); + + //given + BsonTimestamp startTime = new BsonTimestamp((int) Instant.now().getEpochSecond(), 0); + collectionHelper.create(namespace.getCollectionName(), new CreateCollectionOptions()); + sleep(2000); + + long rtt = ClusterFixture.getPrimaryRTT(); + try (MongoClient client = createReactiveClient(getMongoClientSettingsBuilder() + .timeout(rtt + 300, TimeUnit.MILLISECONDS))) { + + MongoCollection collection = client.getDatabase(namespace.getDatabaseName()) + .getCollection(namespace.getCollectionName()) + .withReadPreference(ReadPreference.primary()); + + collectionHelper.runAdminCommand("{" + + " configureFailPoint: \"failCommand\"," + + " mode: { times: 2}," + + " data: {" + + " failCommands: [\"aggregate\", \"getMore\"]," + + " blockConnection: true," + + " blockTimeMS: " + (rtt + 200) + + " }" + + "}"); + + + collectionHelper.insertDocuments(WriteConcern.MAJORITY, + BsonDocument.parse("{x: 1}"), + BsonDocument.parse("{x: 2}"), + + BsonDocument.parse("{x: 3}"), + BsonDocument.parse("{x: 4}")); + + //when + ChangeStreamPublisher documentChangeStreamPublisher = collection.watch() + .maxAwaitTime(1, TimeUnit.MILLISECONDS) + .startAtOperationTime(startTime); + StepVerifier.create(documentChangeStreamPublisher, 2) + //then + .expectNextCount(2) + .thenAwait(Duration.ofMillis(600)) + .thenRequest(2) + .expectNextCount(2) + .thenCancel() + .verify(); + + sleep(500); //let publisher invalidate the cursor after the error. + + List commandStartedEvents = commandListener.getCommandStartedEvents(); + List expectedCommandNames = Arrays.asList("aggregate", "getMore", "killCursors"); + assertCommandStartedEventsInOder(expectedCommandNames, commandStartedEvents); + } + } + + /** + * Not a prose spec test. However, it is additional test case for better coverage. + */ + @DisplayName("TimeoutMS is honored for next operation when several getMore executed internally") + @Test + public void testTimeoutMsISHonoredForNnextOperationWhenSeveralGetMoreExecutedInternally() { + assumeTrue(serverVersionAtLeast(4, 4)); + assumeTrue(isDiscoverableReplicaSet()); + + //given + long rtt = ClusterFixture.getPrimaryRTT(); + try (MongoClient client = createReactiveClient(getMongoClientSettingsBuilder() + .timeout(rtt + 2500, TimeUnit.MILLISECONDS))) { + + MongoCollection collection = client.getDatabase(namespace.getDatabaseName()) + .getCollection(namespace.getCollectionName()).withReadPreference(ReadPreference.primary()); + + //when + ChangeStreamPublisher documentChangeStreamPublisher = collection.watch(); + StepVerifier.create(documentChangeStreamPublisher, 2) + //then + .expectError(MongoOperationTimeoutException.class) + .verify(); + + sleep(200); //let publisher invalidate the cursor after the error. + + List commandStartedEvents = commandListener.getCommandStartedEvents(); + assertCommandStartedEventsInOder(Arrays.asList("aggregate", "getMore", "getMore", "getMore", "killCursors"), + commandStartedEvents); + assertOnlyOneCommandTimeoutFailure("getMore"); + } + } + + private static void assertCommandStartedEventsInOder(final List expectedCommandNames, + final List commandStartedEvents) { + assertEquals(expectedCommandNames.size(), commandStartedEvents.size(), "Expected: " + expectedCommandNames + ". Actual: " + + commandStartedEvents.stream() + .map(CommandStartedEvent::getCommand) + .map(BsonDocument::toJson) + .collect(Collectors.toList())); + + for (int i = 0; i < expectedCommandNames.size(); i++) { + CommandStartedEvent commandStartedEvent = commandStartedEvents.get(i); + + assertEquals(expectedCommandNames.get(i), commandStartedEvent.getCommandName()); + } + } + + private void assertOnlyOneCommandTimeoutFailure(final String command) { + List commandFailedEvents = commandListener.getCommandFailedEvents(); + assertEquals(1, commandFailedEvents.size()); + + CommandFailedEvent failedAggregateCommandEvent = commandFailedEvents.get(0); + assertEquals(command, commandFailedEvents.get(0).getCommandName()); + assertInstanceOf(MongoOperationTimeoutException.class, failedAggregateCommandEvent.getThrowable()); + } + + @Override + @BeforeEach + public void setUp() { + super.setUp(); + SyncMongoClient.enableSleepAfterSessionClose(postSessionCloseSleep()); + } + + @Override + @AfterEach + public void tearDown() throws InterruptedException { + super.tearDown(); + SyncMongoClient.disableSleep(); + } + + @Override + protected int postSessionCloseSleep() { + return 256; + } +} diff --git a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/ConnectivityTest.java b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/ConnectivityTest.java new file mode 100644 index 00000000000..1a0500c7980 --- /dev/null +++ b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/ConnectivityTest.java @@ -0,0 +1,43 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.reactivestreams.client; + +import com.mongodb.ConnectionString; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.MethodSource; +import reactor.core.publisher.Mono; + +import java.util.List; + +import static com.mongodb.ClusterFixture.TIMEOUT_DURATION; +import static com.mongodb.client.ConnectivityTestHelper.LEGACY_HELLO_COMMAND; +import static com.mongodb.reactivestreams.client.Fixture.getMongoClientSettingsBuilder; + +public class ConnectivityTest { + // the test succeeds if no exception is thrown, and fail otherwise + @ParameterizedTest(name = "{1}") + @MethodSource("com.mongodb.client.ConnectivityTestHelper#getConnectivityTestArguments") + public void testConnectivity(final ConnectionString connectionString, @SuppressWarnings("unused") final List hosts) { + try (MongoClient client = MongoClients.create(getMongoClientSettingsBuilder(connectionString).build())) { + // test that a command that doesn't require auth completes normally + Mono.from(client.getDatabase("admin").runCommand(LEGACY_HELLO_COMMAND)).block(TIMEOUT_DURATION); + + // test that a command that requires auth completes normally + Mono.from(client.getDatabase("test").getCollection("test").estimatedDocumentCount()).block(TIMEOUT_DURATION); + } + } +} diff --git a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/ContextProviderTest.java b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/ContextProviderTest.java new file mode 100644 index 00000000000..90529171219 --- /dev/null +++ b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/ContextProviderTest.java @@ -0,0 +1,227 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.reactivestreams.client; + +import com.mongodb.ContextProvider; +import com.mongodb.RequestContext; +import com.mongodb.WriteConcern; +import com.mongodb.event.CommandFailedEvent; +import com.mongodb.event.CommandListener; +import com.mongodb.event.CommandStartedEvent; +import com.mongodb.event.CommandSucceededEvent; +import com.mongodb.lang.Nullable; +import org.bson.Document; +import org.junit.jupiter.api.Test; +import reactor.core.publisher.Flux; +import reactor.core.publisher.Mono; + +import static com.mongodb.ClusterFixture.getDefaultDatabaseName; +import static com.mongodb.client.Fixture.getMongoClientSettingsBuilder; +import static com.mongodb.client.model.Updates.inc; +import static java.util.Arrays.asList; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.fail; +import static org.mockito.Mockito.mock; + +public class ContextProviderTest { + + @Test + public void shouldThrowIfContextProviderIsNotReactiveContextProvider() { + assertThrows(IllegalArgumentException.class, () -> MongoClients.create(getMongoClientSettingsBuilder() + .contextProvider(new ContextProvider() {}) + .build())); + } + + @Test + public void shouldPropagateExceptionFromContextProvider() { + try (MongoClient client = MongoClients.create(getMongoClientSettingsBuilder() + .contextProvider((ReactiveContextProvider) subscriber -> { + throw new RuntimeException(); + }) + .build())) { + + assertThrows(RuntimeException.class, () -> Mono.from(client.listDatabaseNames()).block()); + } + } + + @Test + public void contextShouldBeNullByDefaultInCommandEvents() { + + TestCommandListener commandListener = new TestCommandListener(null); + try (MongoClient client = MongoClients.create(getMongoClientSettingsBuilder() + .addCommandListener(commandListener) + .build())) { + + // given + MongoCollection collection = client.getDatabase(getDefaultDatabaseName()) + .getCollection("ContextProviderTest"); + Mono.from(collection.drop()).block(); + Mono.from(collection.insertMany(asList(new Document(), new Document(), new Document(), new Document()))).block(); + commandListener.reset(); + + // when + Mono.from(collection.countDocuments()).block(); + + // then + assertEquals(1, commandListener.numCommandStartedEventsWithExpectedContext); + assertEquals(1, commandListener.numCommandSucceededEventsWithExpectedContext); + } + } + + @Test + public void contextShouldBeAvailableInCommandEvents() { + RequestContext requestContext = mock(RequestContext.class); + + TestCommandListener commandListener = new TestCommandListener(requestContext); + try (MongoClient client = MongoClients.create(getMongoClientSettingsBuilder() + .contextProvider((ReactiveContextProvider) subscriber -> requestContext) + .addCommandListener(commandListener) + .build())) { + + // given + MongoCollection collection = client.getDatabase(getDefaultDatabaseName()) + .getCollection("ContextProviderTest"); + Mono.from(collection.drop()).block(); + Mono.from(collection.insertMany(asList(new Document(), new Document(), new Document(), new Document()))).block(); + commandListener.reset(); + + // when + Mono.from(collection.countDocuments()).block(); + + // then + assertEquals(1, commandListener.numCommandStartedEventsWithExpectedContext); + assertEquals(1, commandListener.numCommandSucceededEventsWithExpectedContext); + + // given + commandListener.reset(); + Document document = new Document(); + + // when + Mono.from(collection.insertOne(document)).block(); + + // then + assertEquals(1, commandListener.numCommandStartedEventsWithExpectedContext); + assertEquals(1, commandListener.numCommandSucceededEventsWithExpectedContext); + + // given + commandListener.reset(); + + // when + Mono.from(collection.updateOne(document, inc("x", 1))).block(); + + // then + assertEquals(1, commandListener.numCommandStartedEventsWithExpectedContext); + assertEquals(1, commandListener.numCommandSucceededEventsWithExpectedContext); + + // given + commandListener.reset(); + Document documentTwo = new Document(); + + // when + Mono.from(collection.withWriteConcern(WriteConcern.UNACKNOWLEDGED).insertOne(documentTwo)).block(); + + // then + assertEquals(1, commandListener.numCommandStartedEventsWithExpectedContext); + assertEquals(1, commandListener.numCommandSucceededEventsWithExpectedContext); + + // given + commandListener.reset(); + + // when + Mono.from(collection.withWriteConcern(WriteConcern.UNACKNOWLEDGED).updateOne(documentTwo, inc("x", 1))) + .block(); + + // then + assertEquals(1, commandListener.numCommandStartedEventsWithExpectedContext); + assertEquals(1, commandListener.numCommandSucceededEventsWithExpectedContext); + + // given + commandListener.reset(); + + // when + Mono.from(collection.withWriteConcern(WriteConcern.UNACKNOWLEDGED).deleteOne(documentTwo)).block(); + + // then + assertEquals(1, commandListener.numCommandStartedEventsWithExpectedContext); + assertEquals(1, commandListener.numCommandSucceededEventsWithExpectedContext); + + // given + commandListener.reset(); + + // when + Flux findFlux = Flux.from(collection.find().batchSize(4)); + findFlux.blockLast(); + + // then + assertEquals(2, commandListener.numCommandStartedEventsWithExpectedContext); + assertEquals(2, commandListener.numCommandSucceededEventsWithExpectedContext); + + // given + commandListener.reset(); + + // when + try { + Mono.from(client.getDatabase("admin").runCommand(new Document("notRealCommand", 1))).block(); + fail(); + } catch (Exception e) { + // then + assertEquals(1, commandListener.numCommandStartedEventsWithExpectedContext); + assertEquals(1, commandListener.numCommandFailedEventsWithExpectedContext); + } + } + } + + private static final class TestCommandListener implements CommandListener { + private int numCommandStartedEventsWithExpectedContext; + private int numCommandSucceededEventsWithExpectedContext; + private int numCommandFailedEventsWithExpectedContext; + private final RequestContext expectedContext; + + private TestCommandListener(@Nullable final RequestContext expectedContext) { + this.expectedContext = expectedContext; + } + + public void reset() { + numCommandStartedEventsWithExpectedContext = 0; + numCommandSucceededEventsWithExpectedContext = 0; + numCommandFailedEventsWithExpectedContext = 0; + } + + @Override + public void commandStarted(final CommandStartedEvent event) { + if (event.getRequestContext() == expectedContext) { + numCommandStartedEventsWithExpectedContext++; + } + } + + @Override + public void commandSucceeded(final CommandSucceededEvent event) { + if (event.getRequestContext() == expectedContext) { + numCommandSucceededEventsWithExpectedContext++; + } + + } + + @Override + public void commandFailed(final CommandFailedEvent event) { + if (event.getRequestContext() == expectedContext) { + numCommandFailedEventsWithExpectedContext++; + } + } + } +} diff --git a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/CrudProseTest.java b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/CrudProseTest.java new file mode 100644 index 00000000000..07afb94c8db --- /dev/null +++ b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/CrudProseTest.java @@ -0,0 +1,31 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.reactivestreams.client; + +import com.mongodb.MongoClientSettings; +import com.mongodb.client.MongoClient; +import com.mongodb.reactivestreams.client.syncadapter.SyncMongoClient; + +/** + * See + * CRUD Prose Tests. + */ +final class CrudProseTest extends com.mongodb.client.CrudProseTest { + @Override + protected MongoClient createMongoClient(final MongoClientSettings.Builder mongoClientSettingsBuilder) { + return new SyncMongoClient(mongoClientSettingsBuilder); + } +} diff --git a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/DatabaseTestCase.java b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/DatabaseTestCase.java new file mode 100644 index 00000000000..4a604e4ca61 --- /dev/null +++ b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/DatabaseTestCase.java @@ -0,0 +1,50 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.reactivestreams.client; + +import org.bson.Document; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; + +import static com.mongodb.ClusterFixture.getDefaultDatabaseName; +import static com.mongodb.reactivestreams.client.Fixture.drop; +import static com.mongodb.reactivestreams.client.Fixture.getMongoClient; + +public class DatabaseTestCase { + //For ease of use and readability, in this specific case we'll allow protected variables + //CHECKSTYLE:OFF + protected MongoClient client; + protected MongoDatabase database; + protected MongoCollection collection; + //CHECKSTYLE:ON + + @BeforeEach + public void setUp() { + client = getMongoClient(); + database = client.getDatabase(getDefaultDatabaseName()); + collection = database.getCollection(getClass().getName()); + drop(collection.getNamespace()); + } + + @AfterEach + public void tearDown() { + if (collection != null) { + drop(collection.getNamespace()); + } + } + +} diff --git a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/DnsConfigurationTest.java b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/DnsConfigurationTest.java new file mode 100644 index 00000000000..4b09d23b4a1 --- /dev/null +++ b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/DnsConfigurationTest.java @@ -0,0 +1,29 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.reactivestreams.client; + +import com.mongodb.MongoClientSettings; +import com.mongodb.client.AbstractDnsConfigurationTest; +import com.mongodb.client.MongoClient; +import com.mongodb.reactivestreams.client.syncadapter.SyncMongoClient; + +public class DnsConfigurationTest extends AbstractDnsConfigurationTest { + @Override + protected MongoClient createMongoClient(final MongoClientSettings settings) { + return new SyncMongoClient(settings); + } +} diff --git a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/ExplainTest.java b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/ExplainTest.java new file mode 100644 index 00000000000..3bc193fb2b0 --- /dev/null +++ b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/ExplainTest.java @@ -0,0 +1,47 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.reactivestreams.client; + +import com.mongodb.MongoClientSettings; +import com.mongodb.client.AbstractExplainTest; +import com.mongodb.client.MongoClient; +import com.mongodb.reactivestreams.client.syncadapter.SyncMongoClient; +import org.junit.Test; + +import static com.mongodb.reactivestreams.client.syncadapter.ContextHelper.CONTEXT_PROVIDER; +import static com.mongodb.reactivestreams.client.syncadapter.ContextHelper.assertContextPassedThrough; + +public class ExplainTest extends AbstractExplainTest { + @Override + protected MongoClient createMongoClient(final MongoClientSettings settings) { + return new SyncMongoClient(MongoClientSettings.builder(settings).contextProvider(CONTEXT_PROVIDER)); + } + + @Test + @Override + public void testExplainOfFind() { + super.testExplainOfFind(); + assertContextPassedThrough(); + } + + @Test + @Override + public void testExplainOfAggregateWithNewResponseStructure() { + super.testExplainOfAggregateWithNewResponseStructure(); + assertContextPassedThrough(); + } +} diff --git a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/Fixture.java b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/Fixture.java new file mode 100644 index 00000000000..2881b47e38e --- /dev/null +++ b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/Fixture.java @@ -0,0 +1,208 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.reactivestreams.client; + +import com.mongodb.ClusterFixture; +import com.mongodb.ConnectionString; +import com.mongodb.MongoClientSettings; +import com.mongodb.MongoCommandException; +import com.mongodb.MongoNamespace; +import com.mongodb.MongoTimeoutException; +import com.mongodb.connection.ClusterType; +import com.mongodb.connection.ServerVersion; +import com.mongodb.reactivestreams.client.internal.MongoClientImpl; +import org.bson.Document; +import org.bson.conversions.Bson; +import reactor.core.publisher.Mono; + +import java.util.Arrays; +import java.util.List; + +import static com.mongodb.ClusterFixture.TIMEOUT_DURATION; +import static com.mongodb.ClusterFixture.getServerApi; +import static com.mongodb.internal.thread.InterruptionUtil.interruptAndCreateMongoInterruptedException; +import static java.lang.Thread.sleep; + +/** + * Helper class for asynchronous tests. + */ +public final class Fixture { + private static MongoClientImpl mongoClient; + private static ServerVersion serverVersion; + private static ClusterType clusterType; + + private Fixture() { + } + + public static synchronized MongoClient getMongoClient() { + if (mongoClient == null) { + mongoClient = (MongoClientImpl) MongoClients.create(getMongoClientSettings()); + serverVersion = getServerVersion(); + clusterType = getClusterType(); + Runtime.getRuntime().addShutdownHook(new ShutdownHook()); + } + return mongoClient; + } + + public static MongoClientSettings getMongoClientSettings() { + return getMongoClientSettingsBuilder().build(); + } + + public static MongoClientSettings.Builder getMongoClientSettingsBuilder() { + return getMongoClientSettingsBuilder(ClusterFixture.getConnectionString()); + } + + public static MongoClientSettings.Builder getMongoClientSettingsBuilder(final ConnectionString connectionString) { + MongoClientSettings.Builder builder = MongoClientSettings.builder(); + if (getServerApi() != null) { + builder.serverApi(getServerApi()); + } + return builder.applyConnectionString(connectionString); + } + + public static String getDefaultDatabaseName() { + return ClusterFixture.getDefaultDatabaseName(); + } + + public static MongoDatabase getDefaultDatabase() { + return getMongoClient().getDatabase(getDefaultDatabaseName()); + } + + public static MongoCollection initializeCollection(final MongoNamespace namespace) { + MongoDatabase database = getMongoClient().getDatabase(namespace.getDatabaseName()); + try { + Mono.from(database.runCommand(new Document("drop", namespace.getCollectionName()))).block(TIMEOUT_DURATION); + } catch (MongoCommandException e) { + if (!e.getErrorMessage().contains("ns not found")) { + throw e; + } + } catch (Throwable t) { + throw new RuntimeException(t); + } + return database.getCollection(namespace.getCollectionName()); + } + + public static void dropDatabase(final String name) { + if (name == null) { + return; + } + try { + Mono.from(getMongoClient().getDatabase(name).runCommand(new Document("dropDatabase", 1))).block(TIMEOUT_DURATION); + } catch (MongoCommandException e) { + if (!e.getErrorMessage().contains("ns not found")) { + throw e; + } + } catch (Throwable t) { + throw new RuntimeException(t); + } + } + + public static void drop(final MongoNamespace namespace) { + try { + Mono.from(getMongoClient().getDatabase(namespace.getDatabaseName()) + .runCommand(new Document("drop", namespace.getCollectionName()))).block(TIMEOUT_DURATION); + } catch (MongoCommandException e) { + if (!e.getErrorMessage().contains("ns not found")) { + throw e; + } + } catch (Throwable t) { + throw new RuntimeException(t); + } + } + + public static synchronized void waitForLastServerSessionPoolRelease() { + if (mongoClient != null) { + long startTime = System.currentTimeMillis(); + long sessionInUseCount = getSessionInUseCount(); + while (sessionInUseCount > 0) { + try { + if (System.currentTimeMillis() > startTime + TIMEOUT_DURATION.toMillis()) { + throw new MongoTimeoutException("Timed out waiting for server session pool in use count to drop to 0. Now at: " + + sessionInUseCount); + } + sleep(10); + sessionInUseCount = getSessionInUseCount(); + } catch (InterruptedException e) { + throw interruptAndCreateMongoInterruptedException("Interrupted", e); + } + } + } + } + + private static long getSessionInUseCount() { + return mongoClient.getServerSessionPool().getInUseCount(); + } + + public static boolean serverVersionAtLeast(final int majorVersion, final int minorVersion) { + getMongoClient(); + return serverVersion.compareTo(new ServerVersion(Arrays.asList(majorVersion, minorVersion, 0))) >= 0; + } + + public static boolean isReplicaSet() { + getMongoClient(); + return clusterType == ClusterType.REPLICA_SET; + } + + public static synchronized ConnectionString getConnectionString() { + return ClusterFixture.getConnectionString(); + } + + public static MongoClientSettings.Builder getMongoClientBuilderFromConnectionString() { + MongoClientSettings.Builder builder = MongoClientSettings.builder() + .applyConnectionString(getConnectionString()); + if (getServerApi() != null) { + builder.serverApi(getServerApi()); + } + return builder; + } + + @SuppressWarnings("unchecked") + private static ServerVersion getServerVersion() { + Document response = runAdminCommand(new Document("buildInfo", 1)); + List versionArray = (List) response.get("versionArray"); + return new ServerVersion(versionArray.subList(0, 3)); + } + + private static ClusterType getClusterType() { + Document response = runAdminCommand(new Document("ismaster", 1)); + if (response.containsKey("setName")) { + return ClusterType.REPLICA_SET; + } else if ("isdbgrid".equals(response.getString("msg"))) { + return ClusterType.SHARDED; + } else { + return ClusterType.STANDALONE; + } + } + + private static Document runAdminCommand(final Bson command) { + return Mono.from(getMongoClient().getDatabase("admin") + .runCommand(command)).block(TIMEOUT_DURATION); + } + + static class ShutdownHook extends Thread { + @Override + public void run() { + try { + dropDatabase(getDefaultDatabaseName()); + } catch (Exception e) { + // ignore + } + mongoClient.close(); + mongoClient = null; + } + } +} diff --git a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/FunctionalSpecification.groovy b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/FunctionalSpecification.groovy new file mode 100644 index 00000000000..5729cfed52f --- /dev/null +++ b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/FunctionalSpecification.groovy @@ -0,0 +1,67 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.reactivestreams.client + +import com.mongodb.MongoNamespace +import org.bson.Document +import spock.lang.Specification + +import static Fixture.dropDatabase +import static Fixture.initializeCollection +import static com.mongodb.ClusterFixture.getDefaultDatabaseName +import static com.mongodb.reactivestreams.client.Fixture.drop +import static com.mongodb.reactivestreams.client.Fixture.getDefaultDatabase +import static com.mongodb.reactivestreams.client.Fixture.waitForLastServerSessionPoolRelease + +class FunctionalSpecification extends Specification { + protected MongoDatabase database + protected MongoCollection collection + + def setupSpec() { + dropDatabase(getDefaultDatabaseName()) + } + + def cleanupSpec() { + dropDatabase(getDefaultDatabaseName()) + } + + def setup() { + database = getDefaultDatabase() + collection = initializeCollection(new MongoNamespace(database.getName(), getClass().getName())) + drop(collection.getNamespace()) + } + + def cleanup() { + if (collection != null) { + drop(collection.getNamespace()) + } + + waitForLastServerSessionPoolRelease() + } + + String getDatabaseName() { + database.getName() + } + + String getCollectionName() { + collection.getNamespace().getCollectionName() + } + + MongoNamespace getNamespace() { + new MongoNamespace(getDatabaseName(), getCollectionName()) + } +} diff --git a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/MongoClientListenerRegistrationSpecification.groovy b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/MongoClientListenerRegistrationSpecification.groovy new file mode 100644 index 00000000000..fc995d14274 --- /dev/null +++ b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/MongoClientListenerRegistrationSpecification.groovy @@ -0,0 +1,93 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.reactivestreams.client + +import com.mongodb.event.ClusterListener +import com.mongodb.event.CommandListener +import com.mongodb.event.ConnectionPoolListener +import com.mongodb.event.ServerListener +import com.mongodb.event.ServerMonitorListener +import org.bson.Document +import reactor.core.publisher.Mono +import spock.lang.Ignore + +import java.util.concurrent.TimeUnit + +import static com.mongodb.ClusterFixture.TIMEOUT_DURATION + +class MongoClientListenerRegistrationSpecification extends FunctionalSpecification { + + @Ignore + def 'should register event listeners'() { + given: + def clusterListener = Mock(ClusterListener) { + (1.._) * _ + } + def commandListener = Mock(CommandListener) { + (1.._) * _ + } + def connectionPoolListener = Mock(ConnectionPoolListener) { + (1.._) * _ + } + def serverListener = Mock(ServerListener) { + (1.._) * _ + } + def serverMonitorListener = Mock(ServerMonitorListener) { + (1.._) * _ + } + + when: + def builder = Fixture.mongoClientBuilderFromConnectionString + builder.applyToClusterSettings { it.addClusterListener(clusterListener) } + .applyToConnectionPoolSettings { it.addConnectionPoolListener(connectionPoolListener) } + .applyToServerSettings { + it.addServerListener(serverListener) + it.heartbeatFrequency(1, TimeUnit.MILLISECONDS) + it.addServerMonitorListener(serverMonitorListener) + } + .addCommandListener(commandListener) + def settings = builder.build() + def client = MongoClients.create(settings) + + then: + Mono.from(client.getDatabase('admin').runCommand(new Document('ping', 1))).block(TIMEOUT_DURATION) + + cleanup: + client?.close() + } + + def 'should register multiple command listeners'() { + given: + def first = Mock(CommandListener) + def second = Mock(CommandListener) + def client = MongoClients.create(Fixture.mongoClientBuilderFromConnectionString + .addCommandListener(first).addCommandListener(second).build()) + + when: + Mono.from(client.getDatabase('admin').runCommand(new Document('ping', 1))).block(TIMEOUT_DURATION) + + then: + 1 * first.commandStarted(_) + 1 * second.commandStarted(_) + 1 * first.commandSucceeded(_) + 1 * second.commandSucceeded(_) + + cleanup: + client?.close() + } + +} diff --git a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/MongoClientSessionSpecification.groovy b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/MongoClientSessionSpecification.groovy new file mode 100644 index 00000000000..ae35b20cb3b --- /dev/null +++ b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/MongoClientSessionSpecification.groovy @@ -0,0 +1,334 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.reactivestreams.client + +import com.mongodb.ClientSessionOptions +import com.mongodb.MongoClientException +import com.mongodb.ReadConcern +import com.mongodb.ReadPreference +import com.mongodb.TransactionOptions +import com.mongodb.WriteConcern +import com.mongodb.event.CommandStartedEvent +import com.mongodb.internal.connection.TestCommandListener +import com.mongodb.spock.Slow +import org.bson.BsonBinarySubType +import org.bson.BsonDocument +import org.bson.BsonInt32 +import org.bson.BsonTimestamp +import org.bson.Document +import org.junit.Assert +import reactor.core.publisher.Mono +import spock.lang.IgnoreIf + +import java.util.concurrent.TimeUnit + +import static Fixture.getMongoClient +import static com.mongodb.ClusterFixture.TIMEOUT_DURATION +import static com.mongodb.ClusterFixture.isDiscoverableReplicaSet +import static com.mongodb.reactivestreams.client.Fixture.getDefaultDatabase +import static com.mongodb.reactivestreams.client.Fixture.getMongoClientBuilderFromConnectionString + +class MongoClientSessionSpecification extends FunctionalSpecification { + + def 'should throw IllegalArgumentException if options are null'() { + when: + getMongoClient().startSession(null) + + then: + thrown(IllegalArgumentException) + } + + def 'should create session with correct defaults'() { + when: + def options = ClientSessionOptions.builder().build() + def clientSession = startSession(options) + + then: + clientSession != null + clientSession.getOriginator() == getMongoClient() + clientSession.isCausallyConsistent() + clientSession.getOptions() == ClientSessionOptions.builder() + .defaultTransactionOptions(TransactionOptions.builder() + .readConcern(ReadConcern.DEFAULT) + .writeConcern(WriteConcern.ACKNOWLEDGED) + .readPreference(ReadPreference.primary()) + .build()) + .build() + clientSession.getClusterTime() == null + clientSession.getOperationTime() == null + clientSession.getServerSession() != null + + cleanup: + clientSession.close() + } + + def 'cluster time should advance'() { + given: + def firstOperationTime = new BsonTimestamp(42, 1) + def secondOperationTime = new BsonTimestamp(52, 1) + def thirdOperationTime = new BsonTimestamp(22, 1) + def firstClusterTime = new BsonDocument('clusterTime', firstOperationTime) + def secondClusterTime = new BsonDocument('clusterTime', secondOperationTime) + def olderClusterTime = new BsonDocument('clusterTime', thirdOperationTime) + + when: + def clientSession = startSession(ClientSessionOptions.builder().build()) + + then: + clientSession.getClusterTime() == null + + when: + clientSession.advanceClusterTime(null) + + then: + clientSession.getClusterTime() == null + + when: + clientSession.advanceClusterTime(firstClusterTime) + + then: + clientSession.getClusterTime() == firstClusterTime + + when: + clientSession.advanceClusterTime(secondClusterTime) + + then: + clientSession.getClusterTime() == secondClusterTime + + when: + clientSession.advanceClusterTime(olderClusterTime) + + then: + clientSession.getClusterTime() == secondClusterTime + + cleanup: + clientSession.close() + } + + def 'operation time should advance'() { + given: + def firstOperationTime = new BsonTimestamp(42, 1) + def secondOperationTime = new BsonTimestamp(52, 1) + def olderOperationTime = new BsonTimestamp(22, 1) + + when: + def clientSession = startSession(ClientSessionOptions.builder().build()) + + then: + clientSession.getOperationTime() == null + + when: + clientSession.advanceOperationTime(null) + + then: + clientSession.getOperationTime() == null + + when: + clientSession.advanceOperationTime(firstOperationTime) + + then: + clientSession.getOperationTime() == firstOperationTime + + when: + clientSession.advanceOperationTime(secondOperationTime) + + then: + clientSession.getOperationTime() == secondOperationTime + + when: + clientSession.advanceOperationTime(olderOperationTime) + + then: + clientSession.getOperationTime() == secondOperationTime + + cleanup: + clientSession.close() + } + + def 'methods that use the session should throw if the session is closed'() { + given: + def options = ClientSessionOptions.builder().build() + def clientSession = startSession(options) + clientSession.close() + + when: + clientSession.getServerSession() + + then: + thrown(IllegalStateException) + + when: + clientSession.advanceOperationTime(new BsonTimestamp(42, 0)) + + then: + thrown(IllegalStateException) + + when: + clientSession.advanceClusterTime(new BsonDocument()) + + then: + thrown(IllegalStateException) + + cleanup: + clientSession.close() + } + + def 'informational methods should not throw if the session is closed'() { + given: + def options = ClientSessionOptions.builder().build() + def clientSession = startSession(options) + clientSession.close() + + when: + clientSession.getOptions() + clientSession.isCausallyConsistent() + clientSession.getClusterTime() + clientSession.getOperationTime() + + then: + true + } + + def 'should apply causally consistent session option to client session'() { + when: + def clientSession = startSession(ClientSessionOptions.builder().causallyConsistent(causallyConsistent).build()) + + then: + clientSession != null + clientSession.isCausallyConsistent() == causallyConsistent + + cleanup: + clientSession.close() + + where: + causallyConsistent << [true, false] + } + + def 'client session should have server session with valid identifier'() { + given: + def clientSession = startSession(ClientSessionOptions.builder().build()) + + when: + def identifier = clientSession.getServerSession().identifier + + then: + identifier.size() == 1 + identifier.containsKey('id') + identifier.get('id').isBinary() + identifier.getBinary('id').getType() == BsonBinarySubType.UUID_STANDARD.value + identifier.getBinary('id').data.length == 16 + + cleanup: + clientSession.close() + } + + def 'should use a default session'() { + given: + def commandListener = new TestCommandListener() + def options = getMongoClientBuilderFromConnectionString().addCommandListener(commandListener).build() + def client = MongoClients.create(options) + + when: + Mono.from(client.getDatabase('admin').runCommand(new BsonDocument('ping', new BsonInt32(1)))).block(TIMEOUT_DURATION) + + then: + commandListener.events.size() == 2 + def pingCommandStartedEvent = commandListener.events.get(0) as CommandStartedEvent + pingCommandStartedEvent.command.containsKey('lsid') + + cleanup: + client?.close() + } + + def 'should throw exception if unacknowledged write used with explicit session'() { + given: + def session = Mono.from(getMongoClient().startSession()).block(TIMEOUT_DURATION) + + when: + Mono.from(getMongoClient().getDatabase(getDatabaseName()).getCollection(getCollectionName()) + .withWriteConcern(WriteConcern.UNACKNOWLEDGED) + .insertOne(session, new Document())) + .block(TIMEOUT_DURATION) + + then: + thrown(MongoClientException) + + cleanup: + session?.close() + } + + + @IgnoreIf({ !isDiscoverableReplicaSet() }) + def 'should ignore unacknowledged write concern when in a transaction'() { + given: + def collection = getMongoClient().getDatabase(getDatabaseName()).getCollection(getCollectionName()) + Mono.from(collection.insertOne(new Document())).block(TIMEOUT_DURATION) + + def session = Mono.from(getMongoClient().startSession()).block(TIMEOUT_DURATION) + session.startTransaction() + + when: + Mono.from(collection.withWriteConcern(WriteConcern.UNACKNOWLEDGED).insertOne(session, new Document())).block(TIMEOUT_DURATION) + + then: + noExceptionThrown() + + cleanup: + session.close() + } + + // This test attempts attempts to demonstrate that causal consistency works correctly by inserting a document and then immediately + // searching for that document on a secondary by its _id and failing the test if the document is not found. Without causal consistency + // enabled the expectation is that eventually that test would fail since generally the find will execute on the secondary before + // the secondary has a chance to replicate the document. + // This test is inherently racy as it's possible that the server _does_ replicate fast enough and therefore the test passes anyway + // even if causal consistency was not actually in effect. For that reason the test iterates a number of times in order to increase + // confidence that it's really causal consistency that is causing the test to succeed + @Slow + def 'should find inserted document on a secondary when causal consistency is enabled'() { + given: + def collection = getDefaultDatabase().getCollection(getCollectionName()) + + expect: + def clientSession = startSession(ClientSessionOptions.builder().causallyConsistent(true).build()) + try { + for (int i = 0; i < 16; i++) { + Document document = new Document('_id', i) + Mono.from(collection.insertOne(clientSession, document)).block(TIMEOUT_DURATION) + Document foundDocument = Mono.from(collection + .withReadPreference(ReadPreference.secondaryPreferred()) // read from secondary if available + .withReadConcern(readConcern) + .find(clientSession, document) + .maxTime(30, TimeUnit.SECONDS) // to avoid the test running forever in case replication is broken + .first() + ).block(TIMEOUT_DURATION) + if (foundDocument == null) { + Assert.fail('Should have found recently inserted document on secondary with causal consistency enabled') + } + } + } finally { + clientSession.close() + } + + where: + readConcern << [ReadConcern.DEFAULT, ReadConcern.LOCAL, ReadConcern.MAJORITY] + } + + static ClientSession startSession(ClientSessionOptions options) { + Mono.from(getMongoClient().startSession(options)).block(TIMEOUT_DURATION) + } +} diff --git a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/MongoClientsSpecification.groovy b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/MongoClientsSpecification.groovy new file mode 100644 index 00000000000..39bb47395c4 --- /dev/null +++ b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/MongoClientsSpecification.groovy @@ -0,0 +1,235 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.reactivestreams.client + +import com.mongodb.MongoClientSettings +import com.mongodb.MongoCompressor +import com.mongodb.MongoCredential +import com.mongodb.ReadConcern +import com.mongodb.ServerAddress +import com.mongodb.WriteConcern +import com.mongodb.connection.TransportSettings +import com.mongodb.reactivestreams.client.internal.MongoClientImpl +import org.bson.Document +import reactor.core.publisher.Mono +import spock.lang.IgnoreIf +import spock.lang.Unroll + +import static com.mongodb.ClusterFixture.TIMEOUT_DURATION +import static com.mongodb.ClusterFixture.connectionString +import static com.mongodb.ClusterFixture.getCredential +import static com.mongodb.ClusterFixture.getSslSettings +import static com.mongodb.ClusterFixture.getServerApi +import static com.mongodb.ReadPreference.primary +import static com.mongodb.ReadPreference.secondaryPreferred +import static java.util.concurrent.TimeUnit.MILLISECONDS + +@IgnoreIf({ getServerApi() != null }) +class MongoClientsSpecification extends FunctionalSpecification { + + def 'should connect'() { + given: + def connectionString = 'mongodb://' + if (!getCredential() == null) { + connectionString += (getCredential().getUserName() + ':' + String.valueOf(getCredential().getPassword()) + '@') + } + connectionString += getConnectionString().getHosts()[0] + '/?' + connectionString += 'ssl=' + getSslSettings().isEnabled() + '&' + connectionString += 'sslInvalidHostNameAllowed=' + getSslSettings().isInvalidHostNameAllowed() + + when: + def client = MongoClients.create(connectionString) + Mono.from(client.getDatabase('admin').runCommand(new Document('ping', 1))).block(TIMEOUT_DURATION) + + then: + noExceptionThrown() + + cleanup: + client?.close() + } + + def 'should apply connection string to cluster settings'() { + when: + def client = MongoClients.create('mongodb://localhost,localhost:27018/') as MongoClientImpl + + then: + client.settings.clusterSettings.hosts == [new ServerAddress('localhost'), new ServerAddress('localhost:27018')] + + cleanup: + client?.close() + } + + def 'should apply connection string to credential list'() { + when: + def client = MongoClients.create('mongodb://u:p@localhost/') as MongoClientImpl + + then: + client.settings.credential == MongoCredential.createCredential('u', 'admin', 'p'.toCharArray()) + + cleanup: + client?.close() + } + + def 'should apply connection string to server settings'() { + when: + def client = MongoClients.create('mongodb://localhost/?heartbeatFrequencyMS=50') as MongoClientImpl + + then: + client.settings.serverSettings.getHeartbeatFrequency(MILLISECONDS) == 50 + + cleanup: + client?.close() + } + + def 'should apply connection string to connection pool settings'() { + when: + def client = MongoClients.create('mongodb://localhost/?maxIdleTimeMS=200&maxLifeTimeMS=300') as MongoClientImpl + + then: + client.settings.connectionPoolSettings.getMaxConnectionIdleTime(MILLISECONDS) == 200 + client.settings.connectionPoolSettings.getMaxConnectionLifeTime(MILLISECONDS) == 300 + + cleanup: + client?.close() + } + + def 'should apply connection string to ssl settings'() { + when: + def client = MongoClients.create('mongodb://localhost/?ssl=true&sslInvalidHostNameAllowed=true') as MongoClientImpl + + then: + client.settings.sslSettings.enabled + client.settings.sslSettings.invalidHostNameAllowed + + cleanup: + client?.close() + } + + def 'should apply connection string to socket settings'() { + when: + def client = MongoClients.create('mongodb://localhost/?connectTimeoutMS=300') as MongoClientImpl + + then: + client.settings.socketSettings.getConnectTimeout(MILLISECONDS) == 300 + + cleanup: + client?.close() + } + + @Unroll + def 'should apply read preference from connection string to settings'() { + when: + def client = MongoClients.create(uri) as MongoClientImpl + + then: + client.settings.getReadPreference() == readPreference + + cleanup: + client?.close() + + where: + uri | readPreference + 'mongodb://localhost/' | primary() + 'mongodb://localhost/?readPreference=secondaryPreferred' | secondaryPreferred() + } + + @Unroll + def 'should apply read concern from connection string to settings'() { + when: + def client = MongoClients.create(uri) as MongoClientImpl + + then: + client.settings.getReadConcern() == readConcern + + cleanup: + client?.close() + + where: + uri | readConcern + 'mongodb://localhost/' | ReadConcern.DEFAULT + 'mongodb://localhost/?readConcernLevel=local' | ReadConcern.LOCAL + } + + @Unroll + def 'should apply write concern from connection string to settings'() { + when: + def client = MongoClients.create(uri) as MongoClientImpl + + then: + client.settings.getWriteConcern() == writeConcern + + cleanup: + client?.close() + + where: + uri | writeConcern + 'mongodb://localhost' | WriteConcern.ACKNOWLEDGED + 'mongodb://localhost/?w=majority' | WriteConcern.MAJORITY + } + + @Unroll + def 'should apply application name from connection string to settings'() { + when: + def client = MongoClients.create(uri) as MongoClientImpl + + then: + client.settings.getApplicationName() == applicationName + + cleanup: + client?.close() + + where: + uri | applicationName + 'mongodb://localhost' | null + 'mongodb://localhost/?appname=app1' | 'app1' + } + + @Unroll + def 'should apply compressors from connection string to settings'() { + when: + def client = MongoClients.create(uri) as MongoClientImpl + + then: + client.settings.getCompressorList() == compressorList + + cleanup: + client?.close() + + where: + uri | compressorList + 'mongodb://localhost' | [] + 'mongodb://localhost/?compressors=zlib' | [MongoCompressor.createZlibCompressor()] + 'mongodb://localhost/?compressors=zstd' | [MongoCompressor.createZstdCompressor()] + } + + def 'should create client with transport settings'() { + given: + def nettySettings = TransportSettings.nettyBuilder().build() + def settings = MongoClientSettings.builder() + .transportSettings(nettySettings) + .build() + + when: + def client = MongoClients.create(settings) + + then: + true + + cleanup: + client?.close() + } +} diff --git a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/MongoCollectionTest.java b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/MongoCollectionTest.java new file mode 100644 index 00000000000..022955a6cf5 --- /dev/null +++ b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/MongoCollectionTest.java @@ -0,0 +1,50 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.reactivestreams.client; + +import com.mongodb.client.AbstractMongoCollectionTest; +import com.mongodb.client.MongoDatabase; +import com.mongodb.reactivestreams.client.syncadapter.SyncMongoClient; +import org.junit.jupiter.api.AfterAll; + +import static com.mongodb.client.Fixture.getMongoClientSettingsBuilder; + +public class MongoCollectionTest extends AbstractMongoCollectionTest { + + private static com.mongodb.client.MongoClient mongoClient; + + @Override + protected MongoDatabase getDatabase(final String databaseName) { + return createMongoClient().getDatabase(databaseName); + } + + private com.mongodb.client.MongoClient createMongoClient() { + if (mongoClient == null) { + mongoClient = new SyncMongoClient(getMongoClientSettingsBuilder()); + } + return mongoClient; + } + + + @AfterAll + public static void closeClient() { + if (mongoClient != null) { + mongoClient.close(); + mongoClient = null; + } + } +} diff --git a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/MongoWriteConcernWithResponseExceptionTest.java b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/MongoWriteConcernWithResponseExceptionTest.java new file mode 100644 index 00000000000..09aafc02d46 --- /dev/null +++ b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/MongoWriteConcernWithResponseExceptionTest.java @@ -0,0 +1,30 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.reactivestreams.client; + +import com.mongodb.reactivestreams.client.syncadapter.SyncMongoClient; +import org.junit.Test; + +/** + * See {@link com.mongodb.client.MongoWriteConcernWithResponseExceptionTest}. + */ +public class MongoWriteConcernWithResponseExceptionTest { + @Test + public void doesNotLeak() throws InterruptedException { + com.mongodb.client.MongoWriteConcernWithResponseExceptionTest.doesNotLeak(SyncMongoClient::new); + } +} diff --git a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/NettySettingsSmokeTestSpecification.groovy b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/NettySettingsSmokeTestSpecification.groovy new file mode 100644 index 00000000000..7e35e9a183a --- /dev/null +++ b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/NettySettingsSmokeTestSpecification.groovy @@ -0,0 +1,60 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.reactivestreams.client + +import com.mongodb.MongoClientSettings +import com.mongodb.connection.TransportSettings +import io.netty.channel.oio.OioEventLoopGroup +import io.netty.channel.socket.oio.OioSocketChannel +import org.bson.Document +import reactor.core.publisher.Mono + +import static Fixture.getMongoClientBuilderFromConnectionString +import static com.mongodb.ClusterFixture.TIMEOUT_DURATION + +@SuppressWarnings('deprecation') +class NettySettingsSmokeTestSpecification extends FunctionalSpecification { + + private MongoClient mongoClient + + def 'should allow a custom Event Loop Group and Socket Channel'() { + given: + def eventLoopGroup = new OioEventLoopGroup() + def nettySettings = TransportSettings.nettyBuilder() + .eventLoopGroup(eventLoopGroup) + .socketChannelClass(OioSocketChannel) + .build() + MongoClientSettings settings = getMongoClientBuilderFromConnectionString() + .transportSettings(nettySettings).build() + def document = new Document('a', 1) + + when: + mongoClient = MongoClients.create(settings) + def collection = mongoClient.getDatabase(databaseName).getCollection(collectionName) + + + then: + Mono.from(collection.insertOne(document)).block(TIMEOUT_DURATION) + + then: 'The count is one' + Mono.from(collection.countDocuments()).block(TIMEOUT_DURATION) == 1 + + cleanup: + mongoClient?.close() + } + +} diff --git a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/ReactiveContextProviderTest.java b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/ReactiveContextProviderTest.java new file mode 100644 index 00000000000..730ac2ea001 --- /dev/null +++ b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/ReactiveContextProviderTest.java @@ -0,0 +1,126 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.reactivestreams.client; + + +import com.mongodb.client.MongoClient; +import com.mongodb.client.MongoCollection; +import com.mongodb.client.MongoDatabase; +import com.mongodb.client.model.Aggregates; +import com.mongodb.client.model.Filters; +import com.mongodb.reactivestreams.client.syncadapter.SyncMongoClient; +import org.bson.BsonDocument; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.DisplayName; +import org.junit.jupiter.api.DynamicTest; +import org.junit.jupiter.api.TestFactory; + +import java.util.List; +import java.util.stream.Collectors; + +import static com.mongodb.client.Fixture.getMongoClientSettingsBuilder; +import static com.mongodb.reactivestreams.client.Fixture.getDefaultDatabaseName; +import static com.mongodb.reactivestreams.client.syncadapter.ContextHelper.CONTEXT_PROVIDER; +import static com.mongodb.reactivestreams.client.syncadapter.ContextHelper.assertContextPassedThrough; +import static java.lang.String.format; +import static java.util.Arrays.asList; +import static java.util.Collections.singletonList; +import static java.util.stream.IntStream.rangeClosed; +import static org.junit.jupiter.api.DynamicTest.dynamicTest; + + +public class ReactiveContextProviderTest { + private MongoClient mongoClient; + + @BeforeEach + public void setup() { + getCollection().insertMany(rangeClosed(1, 11) + .boxed() + .map(i -> BsonDocument.parse(format("{a: %s}", i))) + .collect(Collectors.toList())); + } + + @AfterEach + public void tearDown() { + if (mongoClient != null) { + getCollection().drop(); + mongoClient.close(); + mongoClient = null; + } + } + + + @SuppressWarnings("deprecation") + @TestFactory + @DisplayName("test context passed through when using first") + List testMongoIterableFirstPassesTheContext() { + return asList( + dynamicTest("Aggregate Publisher", () -> { + getCollection().aggregate(singletonList(Aggregates.match(Filters.gt("a", 5)))).first(); + assertContextPassedThrough(); + }), + dynamicTest("Distinct Publisher", () -> { + getCollection().distinct("a", Integer.class).first(); + assertContextPassedThrough(); + }), + dynamicTest("Find Publisher", () -> { + getCollection().find().first(); + assertContextPassedThrough(); + }), + dynamicTest("List Collections Publisher", () -> { + getDatabase().listCollections().first(); + assertContextPassedThrough(); + }), + dynamicTest("List Collection Names Publisher", () -> { + getDatabase().listCollectionNames().first(); + assertContextPassedThrough(); + }), + dynamicTest("List Databases Publisher", () -> { + getMongoClient().listDatabases().first(); + assertContextPassedThrough(); + }), + dynamicTest("List Indexes Publisher", () -> { + getCollection().listIndexes().first(); + assertContextPassedThrough(); + }), + dynamicTest("Map Reduce Publisher", () -> { + getCollection().mapReduce( + "function () { emit('a', this.a) }", + "function (k, v) { return Array.sum(v)}").first(); + assertContextPassedThrough(); + }) + ); + } + + private MongoClient getMongoClient() { + if (mongoClient == null) { + mongoClient = new SyncMongoClient(getMongoClientSettingsBuilder() + .contextProvider(CONTEXT_PROVIDER)); + } + return mongoClient; + } + + private MongoDatabase getDatabase() { + return getMongoClient().getDatabase(getDefaultDatabaseName()); + } + + private MongoCollection getCollection() { + return getDatabase().getCollection("contextViewRegressionTest", BsonDocument.class); + } + +} diff --git a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/ReactiveInitialDnsSeedlistDiscoveryTest.java b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/ReactiveInitialDnsSeedlistDiscoveryTest.java new file mode 100644 index 00000000000..03fb7a6060a --- /dev/null +++ b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/ReactiveInitialDnsSeedlistDiscoveryTest.java @@ -0,0 +1,44 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.reactivestreams.client; + +import com.mongodb.MongoClientSettings; +import com.mongodb.client.InitialDnsSeedlistDiscoveryTest; +import com.mongodb.client.MongoClient; +import com.mongodb.reactivestreams.client.syncadapter.SyncMongoClient; +import org.bson.BsonDocument; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; + +import java.util.List; + +// See https://github.com/mongodb/specifications/tree/master/source/initial-dns-seedlist-discovery/tests +@RunWith(Parameterized.class) +public class ReactiveInitialDnsSeedlistDiscoveryTest extends InitialDnsSeedlistDiscoveryTest { + + public ReactiveInitialDnsSeedlistDiscoveryTest(final String filename, final String parentDirectory, final String uri, + final List seeds, final Integer numSeeds, final List hosts, final Integer numHosts, + final BsonDocument options, final BsonDocument parsedOptions, + final boolean isError, final boolean executePingCommand) { + super(filename, parentDirectory, uri, seeds, numSeeds, hosts, numHosts, options, parsedOptions, isError, executePingCommand); + } + + @Override + public MongoClient createMongoClient(final MongoClientSettings settings) { + return new SyncMongoClient(settings); + } +} diff --git a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/ReadConcernTest.java b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/ReadConcernTest.java new file mode 100644 index 00000000000..2040e295d9a --- /dev/null +++ b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/ReadConcernTest.java @@ -0,0 +1,73 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.reactivestreams.client; + +import com.mongodb.ReadConcern; +import com.mongodb.event.CommandStartedEvent; +import com.mongodb.internal.connection.TestCommandListener; +import org.bson.BsonDocument; +import org.bson.BsonString; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import reactor.core.publisher.Mono; + +import java.util.List; + +import static com.mongodb.ClusterFixture.TIMEOUT_DURATION; +import static com.mongodb.client.CommandMonitoringTestHelper.assertEventsEquality; +import static com.mongodb.reactivestreams.client.Fixture.getDefaultDatabaseName; +import static com.mongodb.reactivestreams.client.Fixture.getMongoClientBuilderFromConnectionString; +import static java.util.Collections.singletonList; + +public class ReadConcernTest { + private TestCommandListener commandListener; + private MongoClient mongoClient; + + @Before + public void setUp() { + commandListener = new TestCommandListener(); + mongoClient = MongoClients.create(getMongoClientBuilderFromConnectionString() + .addCommandListener(commandListener) + .build()); + } + + @After + public void tearDown() { + if (mongoClient != null) { + mongoClient.close(); + } + } + + @Test + public void shouldIncludeReadConcernInCommand() throws InterruptedException { + + Mono.from(mongoClient.getDatabase(getDefaultDatabaseName()).getCollection("test") + .withReadConcern(ReadConcern.LOCAL) + .find()) + .block(TIMEOUT_DURATION); + + List events = commandListener.getCommandStartedEvents(); + + BsonDocument commandDocument = new BsonDocument("find", new BsonString("test")) + .append("readConcern", ReadConcern.LOCAL.asDocument()) + .append("filter", new BsonDocument()); + + assertEventsEquality(singletonList(new CommandStartedEvent(null, 1, 1, null, getDefaultDatabaseName(), "find", commandDocument)), + events); + } +} diff --git a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/RetryableReadsProseTest.java b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/RetryableReadsProseTest.java new file mode 100644 index 00000000000..22b7f7645e1 --- /dev/null +++ b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/RetryableReadsProseTest.java @@ -0,0 +1,78 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.reactivestreams.client; + +import com.mongodb.client.MongoCursor; +import com.mongodb.client.RetryableWritesProseTest; +import com.mongodb.reactivestreams.client.syncadapter.SyncMongoClient; +import org.bson.Document; +import org.junit.jupiter.api.Test; + +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeoutException; + +import static com.mongodb.client.model.Filters.eq; + +/** + * See + * Retryable Reads Tests. + */ +final class RetryableReadsProseTest { + /** + * See + * + * PoolClearedError Retryability Test. + */ + @Test + void poolClearedExceptionMustBeRetryable() throws InterruptedException, ExecutionException, TimeoutException { + RetryableWritesProseTest.poolClearedExceptionMustBeRetryable( + SyncMongoClient::new, + mongoCollection -> mongoCollection.find(eq(0)).iterator().hasNext(), "find", false); + } + + /** + * See + * + * Retryable Reads Are Retried on a Different mongos When One is Available. + */ + @Test + void retriesOnDifferentMongosWhenAvailable() { + RetryableWritesProseTest.retriesOnDifferentMongosWhenAvailable( + SyncMongoClient::new, + mongoCollection -> { + try (MongoCursor cursor = mongoCollection.find().iterator()) { + return cursor.hasNext(); + } + }, "find", false); + } + + /** + * See + * + * Retryable Reads Are Retried on the Same mongos When No Others are Available. + */ + @Test + void retriesOnSameMongosWhenAnotherNotAvailable() { + RetryableWritesProseTest.retriesOnSameMongosWhenAnotherNotAvailable( + SyncMongoClient::new, + mongoCollection -> { + try (MongoCursor cursor = mongoCollection.find().iterator()) { + return cursor.hasNext(); + } + }, "find", false); + } +} diff --git a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/RetryableWritesProseTest.java b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/RetryableWritesProseTest.java new file mode 100644 index 00000000000..51a37ad1e35 --- /dev/null +++ b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/RetryableWritesProseTest.java @@ -0,0 +1,83 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.reactivestreams.client; + +import com.mongodb.client.test.CollectionHelper; +import com.mongodb.reactivestreams.client.syncadapter.SyncMongoClient; +import org.bson.Document; +import org.bson.codecs.DocumentCodec; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; + +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeoutException; + +/** + * See + * Retryable Write Prose Tests. + */ +public class RetryableWritesProseTest extends DatabaseTestCase { + private CollectionHelper collectionHelper; + + @BeforeEach + @Override + public void setUp() { + super.setUp(); + + collectionHelper = new CollectionHelper<>(new DocumentCodec(), collection.getNamespace()); + collectionHelper.create(); + } + + /** + * Prose test #2. + */ + @Test + public void poolClearedExceptionMustBeRetryable() throws InterruptedException, ExecutionException, TimeoutException { + com.mongodb.client.RetryableWritesProseTest.poolClearedExceptionMustBeRetryable( + SyncMongoClient::new, + mongoCollection -> mongoCollection.insertOne(new Document()), "insert", true); + } + + /** + * Prose test #3. + */ + @Test + public void originalErrorMustBePropagatedIfNoWritesPerformed() throws InterruptedException { + com.mongodb.client.RetryableWritesProseTest.originalErrorMustBePropagatedIfNoWritesPerformed( + SyncMongoClient::new); + } + + /** + * Prose test #4. + */ + @Test + public void retriesOnDifferentMongosWhenAvailable() { + com.mongodb.client.RetryableWritesProseTest.retriesOnDifferentMongosWhenAvailable( + SyncMongoClient::new, + mongoCollection -> mongoCollection.insertOne(new Document()), "insert", true); + } + + /** + * Prose test #5. + */ + @Test + public void retriesOnSameMongosWhenAnotherNotAvailable() { + com.mongodb.client.RetryableWritesProseTest.retriesOnSameMongosWhenAnotherNotAvailable( + SyncMongoClient::new, + mongoCollection -> mongoCollection.insertOne(new Document()), "insert", true); + } +} diff --git a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/ServerSelectionProseTest.java b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/ServerSelectionProseTest.java new file mode 100644 index 00000000000..54b6ba699bb --- /dev/null +++ b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/ServerSelectionProseTest.java @@ -0,0 +1,27 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.reactivestreams.client; + +import com.mongodb.MongoClientSettings; +import com.mongodb.client.AbstractServerSelectionProseTest; +import com.mongodb.client.MongoClient; +import com.mongodb.reactivestreams.client.syncadapter.SyncMongoClient; + +final class ServerSelectionProseTest extends AbstractServerSelectionProseTest { + protected MongoClient createClient(final MongoClientSettings settings) { + return new SyncMongoClient(settings); + } +} diff --git a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/SessionsProseTest.java b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/SessionsProseTest.java new file mode 100644 index 00000000000..21d475095b5 --- /dev/null +++ b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/SessionsProseTest.java @@ -0,0 +1,29 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.reactivestreams.client; + +import com.mongodb.MongoClientSettings; +import com.mongodb.client.AbstractSessionsProseTest; +import com.mongodb.client.MongoClient; +import com.mongodb.reactivestreams.client.syncadapter.SyncMongoClient; + +public class SessionsProseTest extends AbstractSessionsProseTest { + @Override + protected MongoClient getMongoClient(final MongoClientSettings settings) { + return new SyncMongoClient(settings); + } +} diff --git a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/SmokeTestSpecification.groovy b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/SmokeTestSpecification.groovy new file mode 100644 index 00000000000..de7c4e4d0ef --- /dev/null +++ b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/SmokeTestSpecification.groovy @@ -0,0 +1,244 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.reactivestreams.client + +import com.mongodb.MongoDriverInformation +import com.mongodb.MongoNamespace +import com.mongodb.client.model.IndexModel +import com.mongodb.client.result.InsertOneResult +import com.mongodb.internal.diagnostics.logging.Loggers +import org.bson.BsonInt32 +import org.bson.Document +import org.bson.RawBsonDocument +import reactor.core.publisher.Flux +import spock.lang.IgnoreIf + +import static Fixture.getMongoClient +import static com.mongodb.ClusterFixture.TIMEOUT_DURATION +import static com.mongodb.ClusterFixture.getConnectionString +import static com.mongodb.reactivestreams.client.Fixture.isReplicaSet + +class SmokeTestSpecification extends FunctionalSpecification { + + private static final LOGGER = Loggers.getLogger('smokeTest') + + def 'should handle common scenarios without error'() { + given: + def mongoClient = getMongoClient() + def database = mongoClient.getDatabase(databaseName) + def document = new Document('_id', 1) + def updatedDocument = new Document('_id', 1).append('a', 1) + + when: + run('clean up old database', mongoClient.getDatabase(databaseName).&drop) + def names = run('get database names', mongoClient.&listDatabaseNames) + + then: 'Get Database Names' + !names.contains(null) + + then: + run('Create a collection and the created database is in the list', database.&createCollection, collectionName) == [] + + when: + def updatedNames = run('get database names', mongoClient.&listDatabaseNames) + + then: 'The database names should contain the database and be one bigger than before' + updatedNames.contains(databaseName) + updatedNames.size() == names.size() + 1 + + when: + def collectionNames = run('The collection name should be in the collection names list', database.&listCollectionNames) + + then: + !collectionNames.contains(null) + collectionNames.contains(collectionName) + + then: + run('The count is zero', collection.&countDocuments)[0] == 0 + + then: + run('find first should return nothing if no documents', collection.find().&first) == [] + + then: + run('find should return an empty list', collection.&find) == [] + + then: + run('Insert a document', collection.&insertOne, document)[0] == InsertOneResult.acknowledged(new BsonInt32(1)) + + then: + run('The count is one', collection.&countDocuments)[0] == 1 + + then: + run('find that document', collection.find().&first)[0] == document + + then: + run('update that document', collection.&updateOne, document, new Document('$set', new Document('a', 1)))[0].wasAcknowledged() + + then: + run('find the updated document', collection.find().&first)[0] == updatedDocument + + then: + run('aggregate the collection', collection.&aggregate, [new Document('$match', new Document('a', 1))])[0] == updatedDocument + + then: + run('remove all documents', collection.&deleteOne, new Document())[0].getDeletedCount() == 1 + + then: + run('The count is zero', collection.&countDocuments)[0] == 0 + + then: + run('create an index', collection.&createIndex, new Document('test', 1))[0] == 'test_1' + + then: + def indexNames = run('has the newly created index', collection.&listIndexes)*.name + + then: + indexNames.containsAll('_id_', 'test_1') + + then: + run('create multiple indexes', collection.&createIndexes, [new IndexModel(new Document('multi', 1))])[0] == 'multi_1' + + then: + def indexNamesUpdated = run('has the newly created index', collection.&listIndexes)*.name + + then: + indexNamesUpdated.containsAll('_id_', 'test_1', 'multi_1') + + then: + run('drop the index', collection.&dropIndex, 'multi_1') == [] + + then: + run('has a single index left "_id" ', collection.&listIndexes).size() == 2 + + then: + run('drop the index', collection.&dropIndex, 'test_1') == [] + + then: + run('has a single index left "_id" ', collection.&listIndexes).size() == 1 + + then: + def newCollectionName = 'new' + collectionName.capitalize() + run('can rename the collection', collection.&renameCollection, new MongoNamespace(databaseName, newCollectionName)) == [] + + then: + !run('the new collection name is in the collection names list', database.&listCollectionNames).contains(collectionName) + run('get collection names', database.&listCollectionNames).contains(newCollectionName) + + when: + collection = database.getCollection(newCollectionName) + + then: + run('drop the collection', collection.&drop) == [] + + then: + run('there are no indexes', collection.&listIndexes).size() == 0 + + then: + !run('the collection name is no longer in the collectionNames list', database.&listCollectionNames).contains(collectionName) + } + + @IgnoreIf({ !isReplicaSet() }) + def 'should commit a transaction'() { + given: + run('create collection', database.&createCollection, collection.namespace.collectionName) + + when: + ClientSession session = run('start a session', getMongoClient().&startSession)[0] as ClientSession + session.startTransaction() + run('insert a document', collection.&insertOne, session, new Document('_id', 1)) + run('commit a transaction', session.&commitTransaction) + + then: + run('The count is one', collection.&countDocuments)[0] == 1 + + cleanup: + session?.close() + } + + @IgnoreIf({ !isReplicaSet() }) + def 'should abort a transaction'() { + given: + run('create collection', database.&createCollection, collection.namespace.collectionName) + + when: + ClientSession session = run('start a session', getMongoClient().&startSession)[0] as ClientSession + session.startTransaction() + run('insert a document', collection.&insertOne, session, new Document('_id', 1)) + run('abort a transaction', session.&abortTransaction) + + then: + run('The count is zero', collection.&countDocuments)[0] == 0 + + cleanup: + session?.close() + } + + def 'should not leak exceptions when a client is closed'() { + given: + def mongoClient = MongoClients.create(getConnectionString()) + + when: + mongoClient.close() + run('get database names', mongoClient.&listDatabaseNames) + + then: + thrown(IllegalStateException) + } + + def 'should accept custom MongoDriverInformation'() { + when: + def driverInformation = MongoDriverInformation.builder().driverName('test').driverVersion('1.2.0').build() + + then: + def client = MongoClients.create(getConnectionString(), driverInformation) + + cleanup: + client?.close() + } + + @SuppressWarnings('BusyWait') + def 'should visit all documents from a cursor with multiple batches'() { + given: + def total = 1000 + def documents = (1..total).collect { new Document('_id', it) } + run('Insert 10000 documents', collection.&insertMany, documents) + + when: + def counted = Flux.from(collection.find(new Document()).sort(new Document('_id', 1)).batchSize(10)) + .collectList().block(TIMEOUT_DURATION).size() + + then: + counted == documents.size() + } + + def 'should bulk insert RawBsonDocuments'() { + given: + def docs = [RawBsonDocument.parse('{a: 1}'), RawBsonDocument.parse('{a: 2}')] + + when: + def result = run('Insert RawBsonDocuments', collection.withDocumentClass(RawBsonDocument).&insertMany, docs) + + then: + result.insertedIds.head() == [0:null, 1:null] + } + + def run(String log, operation, ... args) { + LOGGER.debug(log) + Flux.from(operation.call(args)).collectList().block(TIMEOUT_DURATION) + } + +} diff --git a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/TestEventPublisher.java b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/TestEventPublisher.java new file mode 100644 index 00000000000..b8a40529dcd --- /dev/null +++ b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/TestEventPublisher.java @@ -0,0 +1,45 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.reactivestreams.client; + +import reactor.core.publisher.Flux; +import reactor.core.publisher.Sinks; + +public class TestEventPublisher { + private final Sinks.Many sink; + + public TestEventPublisher() { + this.sink = Sinks.many().unicast().onBackpressureBuffer(); + } + + // Method to send events + public void sendEvent(final T event) { + sink.tryEmitNext(event); + } + + public Flux getEventStream() { + return sink.asFlux(); + } + + public long currentSubscriberCount() { + return sink.currentSubscriberCount(); + } + + public void complete() { + sink.tryEmitComplete(); + } +} diff --git a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/TestSubscriber.java b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/TestSubscriber.java new file mode 100644 index 00000000000..05411729ba7 --- /dev/null +++ b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/TestSubscriber.java @@ -0,0 +1,240 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.reactivestreams.client; + +import org.reactivestreams.Subscriber; +import org.reactivestreams.Subscription; + +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; +import java.util.function.Consumer; +import java.util.function.Supplier; + +import static com.mongodb.ClusterFixture.TIMEOUT_DURATION; + +public class TestSubscriber implements Subscriber { + private final CountDownLatch latch = new CountDownLatch(1); + private final ArrayList onNextEvents = new ArrayList<>(); + private final ArrayList onErrorEvents = new ArrayList<>(); + private final ArrayList onCompleteEvents = new ArrayList<>(); + + private Consumer doOnSubscribe = sub -> {}; + private Consumer doOnNext = r -> {}; + + private Subscription subscription; + + public TestSubscriber() { + } + + @Override + public void onSubscribe(final Subscription subscription) { + this.subscription = subscription; + doOnSubscribe.accept(subscription); + } + + public void doOnSubscribe(final Consumer doOnSubscribe) { + this.doOnSubscribe = doOnSubscribe; + } + + public void doOnNext(final Consumer doOnNext) { + this.doOnNext = doOnNext; + } + + /** + * Provides the Subscriber with a new item to observe. + *

+ * The {@code Publisher} may call this method 0 or more times. + *

+ *

+ * The {@code Publisher} will not call this method again after it calls either {@link #onComplete} or + * {@link #onError}. + *

+ * @param result the item emitted by the obserable + */ + @Override + public void onNext(final T result) { + doOnNext.accept(result); + onNextEvents.add(result); + } + + /** + * Notifies the Subscriber that the obserable has experienced an error condition. + *

+ * If the obserable calls this method, it will not thereafter call {@link #onNext} or + * {@link #onComplete}. + *

+ * + * @param e the exception encountered by the obserable + */ + @Override + public void onError(final Throwable e) { + try { + onErrorEvents.add(e); + } finally { + latch.countDown(); + } + } + + /** + * Notifies the Subscriber that the obserable has finished sending push-based notifications. + *

+ * The obserable will not call this method if it calls {@link #onError}. + *

+ */ + @Override + public void onComplete() { + try { + onCompleteEvents.add(null); + } finally { + latch.countDown(); + } + } + + /** + * Allow calling the protected {@link Subscription#request(long)} from unit tests. + * + * @param n the maximum number of items you want the obserable to emit to the Subscriber at this time, or + * {@code Long.MAX_VALUE} if you want the obserable to emit items at its own pace + */ + public void requestMore(final long n) { + subscription.request(n); + } + + /** + * Get the {@link Throwable}s this {@code Subscriber} was notified of via {@link #onError} as a + * {@link List}. + * + * @return a list of the Throwables that were passed to this Subscriber's {@link #onError} method + */ + public List getOnErrorEvents() { + return onErrorEvents; + } + + /** + * Get the sequence of items observed by this {@link Subscriber}, as an ordered {@link List}. + * + * @return a list of items observed by this Subscriber, in the order in which they were observed + */ + public List getOnNextEvents() { + return onNextEvents; + } + + public void cancelSubscription() { + subscription.cancel(); + } + + /** + * Assert that a particular sequence of items was received by this {@link Subscriber} in order. + * + * @param items the sequence of items expected to have been observed + * @throws AssertionError if the sequence of items observed does not exactly match {@code items} + */ + public void assertReceivedOnNext(final List items) { + if (!waitFor(() -> getOnNextEvents().size() == items.size())) { + throw new AssertionError("Number of items does not match. Provided: " + items.size() + " Actual: " + getOnNextEvents().size()); + } + + for (int i = 0; i < items.size(); i++) { + if (items.get(i) == null) { + // check for null equality + if (onNextEvents.get(i) != null) { + throw new AssertionError("Value at index: " + i + " expected to be [null] but was: [" + getOnNextEvents().get(i) + "]"); + } + } else if (!items.get(i).equals(getOnNextEvents().get(i))) { + throw new AssertionError("Value at index: " + i + " expected to be [" + + items.get(i) + "] (" + items.get(i).getClass().getSimpleName() + ") but was: [" + getOnNextEvents().get(i) + + "] (" + getOnNextEvents().get(i).getClass().getSimpleName() + ")"); + + } + } + } + + /** + * Assert that a single terminal event occurred, either {@link #onComplete} or {@link #onError}. + * + * @throws AssertionError if not exactly one terminal event notification was received + */ + public void assertTerminalEvent() { + try { + //noinspection ResultOfMethodCallIgnored + latch.await(TIMEOUT_DURATION.toMillis(), TimeUnit.MILLISECONDS); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + } + + if (onErrorEvents.size() > 1) { + throw new AssertionError("Too many onError events: " + onErrorEvents.size()); + } + + if (onCompleteEvents.size() > 1) { + throw new AssertionError("Too many onCompleted events: " + onCompleteEvents.size()); + } + + if (onCompleteEvents.size() == 1 && onErrorEvents.size() == 1) { + throw new AssertionError("Received both an onError and onCompleted. Should be one or the other."); + } + + if (onCompleteEvents.size() == 0 && onErrorEvents.size() == 0) { + throw new AssertionError("No terminal events received."); + } + } + + /** + * Assert that no terminal event occurred, either {@link #onComplete} or {@link #onError}. + * + * @throws AssertionError if a terminal event notification was received + */ + public void assertNoTerminalEvent() { + if (onCompleteEvents.size() != 0 && onErrorEvents.size() != 0) { + throw new AssertionError("Terminal events received."); + } + } + + /** + * Assert that this {@link Subscriber} has received no {@code onError} notifications. + * + * @throws AssertionError if this {@link Subscriber} has received one or more {@link #onError} notifications + */ + public void assertNoErrors() { + if (onErrorEvents.size() > 0) { + throw new AssertionError("Unexpected onError events: " + getOnErrorEvents().size(), getOnErrorEvents().get(0)); + } + } + + private boolean waitFor(final Supplier check) { + int retry = 0; + long totalSleepTimeMS = 0; + while (totalSleepTimeMS < TIMEOUT_DURATION.toMillis()) { + retry++; + if (check.get()) { + return true; + } + long sleepTimeMS = 100 + (100 * (long) Math.pow(retry, 2)); + totalSleepTimeMS += sleepTimeMS; + try { + Thread.sleep(sleepTimeMS); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + return false; + } + } + return false; + } + +} diff --git a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/WriteConcernProseTest.java b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/WriteConcernProseTest.java new file mode 100644 index 00000000000..417a10f4037 --- /dev/null +++ b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/WriteConcernProseTest.java @@ -0,0 +1,98 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.reactivestreams.client; + +import com.mongodb.MongoNamespace; +import com.mongodb.MongoWriteConcernException; +import com.mongodb.client.test.CollectionHelper; +import org.bson.BsonArray; +import org.bson.BsonDocument; +import org.bson.BsonInt32; +import org.bson.BsonString; +import org.bson.Document; +import org.bson.codecs.DocumentCodec; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import reactor.core.publisher.Mono; + +import static com.mongodb.ClusterFixture.TIMEOUT_DURATION; +import static com.mongodb.ClusterFixture.isDiscoverableReplicaSet; +import static com.mongodb.reactivestreams.client.Fixture.getDefaultDatabaseName; +import static java.lang.String.format; +import static java.util.Arrays.asList; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.fail; +import static org.junit.jupiter.api.Assumptions.assumeTrue; + +// See https://github.com/mongodb/specifications/tree/master/source/change-streams/tests/README.md#prose-tests +public class WriteConcernProseTest extends DatabaseTestCase { + private BsonDocument failPointDocument; + private CollectionHelper collectionHelper; + + @BeforeEach + @Override + public void setUp() { + assumeTrue(canRunTests()); + super.setUp(); + collectionHelper = new CollectionHelper<>(new DocumentCodec(), new MongoNamespace(getDefaultDatabaseName(), "test")); + } + + // Ensure that the WriteConcernError errInfo object is propagated. + @Test + public void testWriteConcernErrInfoIsPropagated() { + try { + setFailPoint(); + insertOneDocument(); + } catch (MongoWriteConcernException e) { + assertEquals(e.getWriteConcernError().getCode(), 100); + assertEquals("UnsatisfiableWriteConcern", e.getWriteConcernError().getCodeName()); + assertEquals(e.getWriteConcernError().getDetails(), new BsonDocument("writeConcern", + new BsonDocument("w", new BsonInt32(2)) + .append("wtimeout", new BsonInt32(0)) + .append("provenance", new BsonString("clientSupplied")))); + } catch (Exception ex) { + fail(format("Incorrect exception thrown in test: %s", ex.getClass())); + } finally { + disableFailPoint(); + } + } + + private void insertOneDocument() { + Mono.from(collection.insertOne(Document.parse("{ x: 1 }"))).block(TIMEOUT_DURATION); + } + + private void setFailPoint() { + failPointDocument = new BsonDocument("configureFailPoint", new BsonString("failCommand")) + .append("mode", new BsonDocument("times", new BsonInt32(1))) + .append("data", new BsonDocument("failCommands", new BsonArray(asList(new BsonString("insert")))) + .append("writeConcernError", new BsonDocument("code", new BsonInt32(100)) + .append("codeName", new BsonString("UnsatisfiableWriteConcern")) + .append("errmsg", new BsonString("Not enough data-bearing nodes")) + .append("errInfo", new BsonDocument("writeConcern", new BsonDocument("w", new BsonInt32(2)) + .append("wtimeout", new BsonInt32(0)) + .append("provenance", new BsonString("clientSupplied")))))); + collectionHelper.runAdminCommand(failPointDocument); + } + + private void disableFailPoint() { + collectionHelper.runAdminCommand(failPointDocument.append("mode", new BsonString("off"))); + } + + private boolean canRunTests() { + return isDiscoverableReplicaSet(); + } +} diff --git a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/X509AuthenticationTest.java b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/X509AuthenticationTest.java new file mode 100644 index 00000000000..8299ab15142 --- /dev/null +++ b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/X509AuthenticationTest.java @@ -0,0 +1,28 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.reactivestreams.client; + +import com.mongodb.MongoClientSettings; +import com.mongodb.client.auth.AbstractX509AuthenticationTest; +import com.mongodb.reactivestreams.client.syncadapter.SyncMongoClient; + +public class X509AuthenticationTest extends AbstractX509AuthenticationTest { + @Override + protected com.mongodb.client.MongoClient createMongoClient(final MongoClientSettings mongoClientSettings) { + return new SyncMongoClient(mongoClientSettings); + } +} diff --git a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/csot/ClientSideOperationsEncryptionTimeoutProseTest.java b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/csot/ClientSideOperationsEncryptionTimeoutProseTest.java new file mode 100644 index 00000000000..877f3774015 --- /dev/null +++ b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/csot/ClientSideOperationsEncryptionTimeoutProseTest.java @@ -0,0 +1,38 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.reactivestreams.client.csot; + + +import com.mongodb.ClientEncryptionSettings; +import com.mongodb.MongoClientSettings; +import com.mongodb.client.MongoClient; +import com.mongodb.client.csot.AbstractClientSideOperationsEncryptionTimeoutProseTest; +import com.mongodb.client.vault.ClientEncryption; +import com.mongodb.reactivestreams.client.syncadapter.SyncClientEncryption; +import com.mongodb.reactivestreams.client.syncadapter.SyncMongoClient; +import com.mongodb.reactivestreams.client.vault.ClientEncryptions; + +public class ClientSideOperationsEncryptionTimeoutProseTest extends AbstractClientSideOperationsEncryptionTimeoutProseTest { + public ClientEncryption createClientEncryption(final ClientEncryptionSettings.Builder builder) { + return new SyncClientEncryption(ClientEncryptions.create(builder.build())); + } + + @Override + protected MongoClient createMongoClient(final MongoClientSettings.Builder builder) { + return new SyncMongoClient(builder); + } +} diff --git a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/gridfs/GridFSPublisherSpecification.groovy b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/gridfs/GridFSPublisherSpecification.groovy new file mode 100644 index 00000000000..e5d2b0ed1bb --- /dev/null +++ b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/gridfs/GridFSPublisherSpecification.groovy @@ -0,0 +1,515 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.reactivestreams.client.gridfs + +import com.mongodb.MongoGridFSException +import com.mongodb.client.gridfs.model.GridFSFile +import com.mongodb.client.gridfs.model.GridFSUploadOptions +import com.mongodb.reactivestreams.client.FunctionalSpecification +import com.mongodb.reactivestreams.client.MongoClients +import com.mongodb.reactivestreams.client.MongoCollection +import com.mongodb.reactivestreams.client.MongoDatabase +import org.bson.BsonDocument +import org.bson.BsonString +import org.bson.Document +import org.bson.UuidRepresentation +import org.bson.codecs.UuidCodec +import org.bson.types.ObjectId +import org.reactivestreams.Publisher +import org.reactivestreams.Subscriber +import org.reactivestreams.Subscription +import reactor.core.publisher.Flux +import reactor.core.publisher.Mono +import spock.lang.Unroll + +import java.nio.ByteBuffer +import java.nio.channels.Channels +import java.nio.channels.WritableByteChannel +import java.security.SecureRandom +import java.time.Duration + +import static com.mongodb.ClusterFixture.TIMEOUT_DURATION +import static com.mongodb.client.model.Filters.eq +import static com.mongodb.client.model.Updates.unset +import static com.mongodb.reactivestreams.client.Fixture.getDefaultDatabaseName +import static com.mongodb.reactivestreams.client.Fixture.getMongoClient +import static com.mongodb.reactivestreams.client.Fixture.getMongoClientBuilderFromConnectionString +import static com.mongodb.reactivestreams.client.MongoClients.getDefaultCodecRegistry +import static java.util.Arrays.asList +import static org.bson.codecs.configuration.CodecRegistries.fromCodecs +import static org.bson.codecs.configuration.CodecRegistries.fromRegistries + +class GridFSPublisherSpecification extends FunctionalSpecification { + protected MongoDatabase mongoDatabase + protected MongoCollection filesCollection + protected MongoCollection chunksCollection + protected GridFSBucket gridFSBucket + def singleChunkString = 'GridFS' + def multiChunkString = singleChunkString.padLeft(1024 * 255 * 5) + + def setup() { + mongoDatabase = getMongoClient().getDatabase(getDefaultDatabaseName()) + filesCollection = mongoDatabase.getCollection('fs.files', GridFSFile) + chunksCollection = mongoDatabase.getCollection('fs.chunks') + run(filesCollection.&drop) + run(chunksCollection.&drop) + gridFSBucket = GridFSBuckets.create(mongoDatabase) + } + + def cleanup() { + if (filesCollection != null) { + run(filesCollection.&drop) + run(chunksCollection.&drop) + } + } + + @Unroll + def 'should round trip a #description'() { + given: + def content = multiChunk ? multiChunkString : singleChunkString + def contentBytes = content as byte[] + def expectedLength = contentBytes.length + + when: + def fileId = run(gridFSBucket.&uploadFromPublisher, 'myFile', createPublisher(ByteBuffer.wrap(contentBytes))) + + then: + run(filesCollection.&countDocuments) == 1 + run(chunksCollection.&countDocuments) == chunkCount + + when: + def fileInfo = run(gridFSBucket.find().filter(eq('_id', fileId)).&first) + + then: + fileInfo.getId().getValue() == fileId + fileInfo.getChunkSize() == gridFSBucket.getChunkSizeBytes() + fileInfo.getLength() == expectedLength + fileInfo.getMetadata() == null + + when: + def data = runAndCollect(gridFSBucket.&downloadToPublisher, fileId) + + then: + concatByteBuffers(data) == contentBytes + + where: + description | multiChunk | chunkCount + 'a small file' | false | 1 + 'a large file' | true | 5 + } + + def 'should round trip with small chunks'() { + given: + def contentSize = 1024 * 10 + def chunkSize = 10 + def contentBytes = new byte[contentSize] + new SecureRandom().nextBytes(contentBytes) + def options = new GridFSUploadOptions().chunkSizeBytes(chunkSize) + + when: + def fileId = run(gridFSBucket.&uploadFromPublisher, 'myFile', + createPublisher(ByteBuffer.wrap(contentBytes)), options) + + then: + run(filesCollection.&countDocuments) == 1 + run(chunksCollection.&countDocuments) == contentSize / chunkSize + + when: + def data = runAndCollect(gridFSBucket.&downloadToPublisher, fileId) + + then: + concatByteBuffers(data) == contentBytes + } + + def 'should respect the outer subscription request amount'() { + given: + def contentBytes = multiChunkString.getBytes() + def options = new GridFSUploadOptions().chunkSizeBytes(contentBytes.length) + + when: + def fileId = Mono.from(gridFSBucket.uploadFromPublisher('myFile', + createPublisher(ByteBuffer.wrap(contentBytes), ByteBuffer.wrap(contentBytes), + ByteBuffer.wrap(contentBytes)), options)).block(TIMEOUT_DURATION) + + then: + run(filesCollection.&countDocuments) == 1 + run(chunksCollection.&countDocuments) == 3 + + when: + def data = Mono.from(gridFSBucket.downloadToPublisher(fileId as ObjectId).bufferSizeBytes(contentBytes.length * 3)) + .block(TIMEOUT_DURATION) + + then: + data.array() == concatByteBuffers([ByteBuffer.wrap(contentBytes), ByteBuffer.wrap(contentBytes), + ByteBuffer.wrap(contentBytes)]) + } + + def 'should upload from the source publisher when it contains multiple parts and the total size is smaller than chunksize'() { + given: + def contentBytes = singleChunkString.getBytes() + + when: + def fileId = Mono.from(gridFSBucket.uploadFromPublisher('myFile', + createPublisher(ByteBuffer.wrap(contentBytes), ByteBuffer.wrap(contentBytes)))).block(TIMEOUT_DURATION) + + then: + run(filesCollection.&countDocuments) == 1 + run(chunksCollection.&countDocuments) == 1 + + when: + def data = Mono.from(gridFSBucket.downloadToPublisher(fileId as ObjectId)).block(TIMEOUT_DURATION) + + then: + data.array() == concatByteBuffers([ByteBuffer.wrap(contentBytes), ByteBuffer.wrap(contentBytes)]) + } + + def 'should round trip with data larger than the internal bufferSize'() { + given: + def contentSize = 1024 * 1024 * 5 + def chunkSize = 1024 * 1024 + def contentBytes = new byte[contentSize] + new SecureRandom().nextBytes(contentBytes) + def options = new GridFSUploadOptions().chunkSizeBytes(chunkSize) + + when: + def fileId = run(gridFSBucket.&uploadFromPublisher, 'myFile', createPublisher(ByteBuffer.wrap(contentBytes)), options) + + then: + run(filesCollection.&countDocuments) == 1 + run(chunksCollection.&countDocuments) == contentSize / chunkSize + + when: + def data = runAndCollect(gridFSBucket.&downloadToPublisher, fileId) + + then: + concatByteBuffers(data) == contentBytes + } + + def 'should handle custom ids'() { + def contentBytes = multiChunkString.getBytes() + def fileId = new BsonString('myFile') + + when: + run(gridFSBucket.&uploadFromPublisher, fileId, 'myFile', createPublisher(ByteBuffer.wrap(contentBytes))) + def data = runAndCollect(gridFSBucket.&downloadToPublisher, fileId) + + then: + concatByteBuffers(data) == contentBytes + + when: + run(gridFSBucket.&rename, fileId, 'newName') + data = runAndCollect(gridFSBucket.&downloadToPublisher, 'newName') + + then: + concatByteBuffers(data) == contentBytes + + when: + run(gridFSBucket.&delete, fileId) + + then: + run(filesCollection.&countDocuments) == 0 + run(chunksCollection.&countDocuments) == 0 + } + + def 'should throw a chunk not found error when there are no chunks'() { + given: + def contentSize = 1024 * 1024 + def contentBytes = new byte[contentSize] + new SecureRandom().nextBytes(contentBytes) + + when: + def fileId = run(gridFSBucket.&uploadFromPublisher, 'myFile', createPublisher(ByteBuffer.wrap(contentBytes))) + run(chunksCollection.&deleteMany, eq('files_id', fileId)) + run(gridFSBucket.&downloadToPublisher, fileId) + + then: + thrown(MongoGridFSException) + } + + def 'should round trip with a byteBuffer size of 4096'() { + given: + def contentSize = 1024 * 1024 + def contentBytes = new byte[contentSize] + new SecureRandom().nextBytes(contentBytes) + def options = new GridFSUploadOptions().chunkSizeBytes(1024) + + when: + def fileId = run(gridFSBucket.&uploadFromPublisher, 'myFile', createPublisher(ByteBuffer.wrap(contentBytes)), options) + + then: + run(filesCollection.&countDocuments) == 1 + run(chunksCollection.&countDocuments) == 1024 + + when: + def fileInfo = run(gridFSBucket.find().filter(eq('_id', fileId)).&first) + + then: + fileInfo.getObjectId() == fileId + fileInfo.getChunkSize() == 1024 + fileInfo.getLength() == contentSize + fileInfo.getMetadata() == null + + when: + def data = runAndCollect(gridFSBucket.downloadToPublisher(fileId).&bufferSizeBytes, 4096) + + then: + data.size() == 256 + concatByteBuffers(data) == contentBytes + } + + def 'should handle uploading publisher erroring'() { + given: + def errorMessage = 'Failure Propagated' + def source = new Publisher() { + @Override + void subscribe(final Subscriber s) { + s.onError(new IllegalArgumentException(errorMessage)) + } + } + when: + run(gridFSBucket.&uploadFromPublisher, 'myFile', source) + + then: + IllegalArgumentException ex = thrown() + ex.getMessage() == errorMessage + } + + def 'should use custom uploadOptions when uploading'() { + given: + def chunkSize = 20 + def metadata = new Document('archived', false) + def options = new GridFSUploadOptions() + .chunkSizeBytes(chunkSize) + .metadata(metadata) + def content = 'qwerty' * 1024 + def contentBytes = content as byte[] + def expectedLength = contentBytes.length as Long + def expectedNoChunks = Math.ceil((expectedLength as double) / chunkSize) as int + + + when: + def fileId = run(gridFSBucket.&uploadFromPublisher, 'myFile', createPublisher(ByteBuffer.wrap(contentBytes)), options) + + then: + run(filesCollection.&countDocuments) == 1 + run(chunksCollection.&countDocuments) == expectedNoChunks + + when: + def fileInfo = run(gridFSBucket.find().filter(eq('_id', fileId)).&first) + + then: + fileInfo.getId().getValue() == fileId + fileInfo.getChunkSize() == options.getChunkSizeBytes() + fileInfo.getLength() == expectedLength + fileInfo.getMetadata() == options.getMetadata() + + when: + def data = runAndCollect(gridFSBucket.&downloadToPublisher, fileId) + + then: + concatByteBuffers(data) == contentBytes + } + + def 'should be able to open by name'() { + given: + def content = 'Hello GridFS' + def contentBytes = content as byte[] + def filename = 'myFile' + run(gridFSBucket.&uploadFromPublisher, filename, createPublisher(ByteBuffer.wrap(contentBytes))) + + + when: + def data = runAndCollect(gridFSBucket.&downloadToPublisher, filename) + + then: + concatByteBuffers(data) == contentBytes + } + + def 'should be able to handle missing file'() { + when: + def filename = 'myFile' + run(gridFSBucket.&downloadToPublisher, filename) + + then: + thrown(MongoGridFSException) + } + + def 'should create the indexes as expected'() { + when: + def filesIndexKey = Document.parse('{ filename: 1, uploadDate: 1 }') + def chunksIndexKey = Document.parse('{ files_id: 1, n: 1 }') + + then: + !runAndCollect(filesCollection.&listIndexes)*.get('key').contains(filesIndexKey) + !runAndCollect(chunksCollection.&listIndexes)*.get('key').contains(chunksIndexKey) + + when: + run(gridFSBucket.&uploadFromPublisher, 'myFile', createPublisher(ByteBuffer.wrap(multiChunkString.getBytes()))) + + then: + runAndCollect(filesCollection.&listIndexes)*.get('key').contains(Document.parse('{ filename: 1, uploadDate: 1 }')) + runAndCollect(chunksCollection.&listIndexes)*.get('key').contains(Document.parse('{ files_id: 1, n: 1 }')) + } + + def 'should not create indexes if the files collection is not empty'() { + when: + run(filesCollection.withDocumentClass(Document).&insertOne, new Document('filename', 'bad file')) + def contentBytes = 'Hello GridFS' as byte[] + + then: + runAndCollect(filesCollection.&listIndexes).size() == 1 + runAndCollect(chunksCollection.&listIndexes).size() == 0 + + when: + run(gridFSBucket.&uploadFromPublisher, 'myFile', createPublisher(ByteBuffer.wrap(contentBytes))) + + then: + runAndCollect(filesCollection.&listIndexes).size() == 1 + runAndCollect(chunksCollection.&listIndexes).size() == 1 + } + + def 'should use the user provided codec registries for encoding / decoding data'() { + given: + def client = MongoClients.create(getMongoClientBuilderFromConnectionString() + .uuidRepresentation(UuidRepresentation.STANDARD) + .codecRegistry(fromRegistries(fromCodecs(new UuidCodec(UuidRepresentation.STANDARD)), getDefaultCodecRegistry())) + .build()) + def database = client.getDatabase(getDefaultDatabaseName()) + + def uuid = UUID.randomUUID() + def fileMeta = new Document('uuid', uuid) + def gridFSBucket = GridFSBuckets.create(database) + + when: + def fileId = run(gridFSBucket.&uploadFromPublisher, 'myFile', createPublisher(ByteBuffer.wrap(multiChunkString.getBytes())), + new GridFSUploadOptions().metadata(fileMeta)) + + def file = run(gridFSBucket.find(new Document('_id', fileId)).&first) + + then: + file.getMetadata() == fileMeta + + when: + def fileAsDocument = run(filesCollection.find(BsonDocument).&first) + + then: + fileAsDocument.getDocument('metadata').getBinary('uuid').getType() == 4 as byte + + cleanup: + client?.close() + } + + def 'should handle missing file name data when downloading'() { + given: + def contentBytes = multiChunkString.getBytes() + + when: + def fileId = run(gridFSBucket.&uploadFromPublisher, 'myFile', createPublisher(ByteBuffer.wrap(contentBytes))) + + then: + run(filesCollection.&countDocuments) == 1 + + when: + // Remove filename + run(filesCollection.&updateOne, eq('_id', fileId), unset('filename')) + def data = runAndCollect(gridFSBucket.&downloadToPublisher, fileId) + + then: + concatByteBuffers(data) == contentBytes + } + + def 'should cleanup when unsubscribing'() { + given: + def contentSize = 1024 + def contentBytes = new byte[contentSize] + new SecureRandom().nextBytes(contentBytes) + def options = new GridFSUploadOptions().chunkSizeBytes(1024) + def data = (0..1024).collect { ByteBuffer.wrap(contentBytes) } + def publisher = createPublisher(*data).delayElements(Duration.ofMillis(1000)) + def subscriber = new Subscriber() { + Subscription subscription + + @Override + void onSubscribe(final Subscription s) { + subscription = s + } + + @Override + void onNext(final ObjectId o) { + } + + @Override + void onError(final Throwable t) { + } + + @Override + void onComplete() { + } + } + + when: + gridFSBucket.uploadFromPublisher('myFile', publisher, options) + .subscribe(subscriber) + subscriber.subscription.request(1) + + then: + retry(10) { run(chunksCollection.&countDocuments) > 0 } + run(filesCollection.&countDocuments) == 0 + + when: + subscriber.subscription.cancel() + + then: + retry(50) { run(chunksCollection.&countDocuments) == 0 } + run(filesCollection.&countDocuments) == 0 + } + + def retry(Integer times, Closure closure) { + def result = closure.call() + if (!result && times > 0) { + sleep(250) + retry(times - 1, closure) + } else { + assert result + return result + } + } + + def run(Closure operation, ... args) { + Mono.from(operation.call(args)).block(TIMEOUT_DURATION) + } + + def runAndCollect(Closure operation, ... args) { + Flux.from(operation.call(args)).collectList().block(TIMEOUT_DURATION) + } + + byte[] concatByteBuffers(List buffers) { + ByteArrayOutputStream outputStream = new ByteArrayOutputStream() + WritableByteChannel channel = Channels.newChannel(outputStream) + for (ByteBuffer buffer : buffers) { + channel.write(buffer) + } + outputStream.close() + channel.close() + outputStream.toByteArray() + } + + def createPublisher(final ByteBuffer... byteBuffers) { + Flux.fromIterable(asList(byteBuffers)) + } +} + diff --git a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/internal/BatchCursorFluxTest.java b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/internal/BatchCursorFluxTest.java new file mode 100644 index 00000000000..49d416da22e --- /dev/null +++ b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/internal/BatchCursorFluxTest.java @@ -0,0 +1,429 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.reactivestreams.client.internal; + +import com.mongodb.MongoClientSettings; +import com.mongodb.MongoCursorNotFoundException; +import com.mongodb.client.model.changestream.ChangeStreamDocument; +import com.mongodb.client.result.InsertOneResult; +import com.mongodb.event.CommandEvent; +import com.mongodb.event.CommandStartedEvent; +import com.mongodb.internal.connection.TestCommandListener; +import com.mongodb.reactivestreams.client.FindPublisher; +import com.mongodb.reactivestreams.client.MongoClient; +import com.mongodb.reactivestreams.client.MongoClients; +import com.mongodb.reactivestreams.client.MongoCollection; +import com.mongodb.reactivestreams.client.TestSubscriber; +import org.bson.BsonArray; +import org.bson.BsonDocument; +import org.bson.BsonInt32; +import org.bson.BsonString; +import org.bson.BsonTimestamp; +import org.bson.BsonValue; +import org.bson.Document; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.DisplayName; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.ExtendWith; +import org.mockito.Mock; +import org.mockito.junit.jupiter.MockitoExtension; +import org.reactivestreams.Publisher; +import reactor.core.publisher.Flux; +import reactor.core.publisher.Hooks; +import reactor.core.publisher.Mono; + +import java.util.Arrays; +import java.util.List; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.stream.Collectors; +import java.util.stream.IntStream; + +import static com.mongodb.ClusterFixture.TIMEOUT_DURATION; +import static com.mongodb.ClusterFixture.getDefaultDatabaseName; +import static com.mongodb.reactivestreams.client.Fixture.drop; +import static com.mongodb.reactivestreams.client.Fixture.getMongoClientBuilderFromConnectionString; +import static com.mongodb.reactivestreams.client.Fixture.isReplicaSet; +import static java.util.Arrays.asList; +import static java.util.Collections.emptyList; +import static java.util.Collections.singletonList; +import static org.junit.jupiter.api.Assertions.assertAll; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertIterableEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assumptions.assumeTrue; +import static org.mockito.Mockito.when; + +@ExtendWith(MockitoExtension.class) +public class BatchCursorFluxTest { + + private MongoClient client; + private TestCommandListener commandListener; + private MongoCollection collection; + + @Mock + private BatchCursorPublisher batchCursorPublisher; + + @BeforeEach + public void setUp() { + commandListener = new TestCommandListener(singletonList("commandStartedEvent"), asList("insert", "killCursors")); + MongoClientSettings mongoClientSettings = getMongoClientBuilderFromConnectionString().addCommandListener(commandListener).build(); + client = MongoClients.create(mongoClientSettings); + collection = client.getDatabase(getDefaultDatabaseName()).getCollection(getClass().getName()); + drop(collection.getNamespace()); + } + + @AfterEach + public void tearDown() { + try { + if (collection != null) { + drop(collection.getNamespace()); + } + } finally { + if (client != null) { + client.close(); + } + } + } + + @Test + public void testBatchCursorRespectsTheSetBatchSize() { + List docs = createDocs(20); + Mono.from(collection.insertMany(docs)).block(TIMEOUT_DURATION); + + TestSubscriber subscriber = new TestSubscriber<>(); + collection.find().batchSize(5).subscribe(subscriber); + assertCommandNames(emptyList()); + + subscriber.requestMore(5); + subscriber.assertReceivedOnNext(docs.subList(0, 5)); + assertCommandNames(singletonList("find")); + + subscriber.requestMore(5); + subscriber.assertReceivedOnNext(docs.subList(0, 10)); + assertCommandNames(asList("find", "getMore")); + + subscriber.requestMore(10); + subscriber.assertReceivedOnNext(docs); + subscriber.assertNoTerminalEvent(); + assertCommandNames(asList("find", "getMore", "getMore", "getMore")); + + subscriber.requestMore(1); + subscriber.assertNoErrors(); + subscriber.assertTerminalEvent(); + assertCommandNames(asList("find", "getMore", "getMore", "getMore", "getMore")); + } + + @Test + public void testBatchCursorSupportsBatchSizeZero() { + List docs = createDocs(200); + Mono.from(collection.insertMany(docs)).block(TIMEOUT_DURATION); + + TestSubscriber subscriber = new TestSubscriber<>(); + collection.find().batchSize(0).subscribe(subscriber); + assertCommandNames(emptyList()); + + subscriber.requestMore(100); + subscriber.assertReceivedOnNext(docs.subList(0, 100)); + assertCommandNames(singletonList("find")); + + subscriber.requestMore(101); + subscriber.assertReceivedOnNext(docs); + subscriber.assertNoErrors(); + subscriber.assertTerminalEvent(); + assertCommandNames(asList("find", "getMore")); + } + + @Test + public void testBatchCursorConsumesBatchesThenGetMores() { + List docs = createDocs(99); + Mono.from(collection.insertMany(docs)).block(TIMEOUT_DURATION); + + TestSubscriber subscriber = new TestSubscriber<>(); + collection.find().batchSize(50).subscribe(subscriber); + assertCommandNames(emptyList()); + + subscriber.requestMore(25); + subscriber.assertReceivedOnNext(docs.subList(0, 25)); + assertCommandNames(singletonList("find")); + + subscriber.requestMore(25); + subscriber.assertReceivedOnNext(docs.subList(0, 50)); + assertCommandNames(singletonList("find")); + + subscriber.requestMore(25); + subscriber.assertReceivedOnNext(docs.subList(0, 75)); + subscriber.assertNoTerminalEvent(); + assertCommandNames(asList("find", "getMore")); + + subscriber.requestMore(25); + subscriber.assertReceivedOnNext(docs); + subscriber.assertNoErrors(); + subscriber.assertTerminalEvent(); + assertCommandNames(asList("find", "getMore")); + } + + @Test + public void testBatchCursorDynamicBatchSize() { + List docs = createDocs(200); + Mono.from(collection.insertMany(docs)).block(TIMEOUT_DURATION); + + TestSubscriber subscriber = new TestSubscriber<>(); + FindPublisher findPublisher = collection.find(); + findPublisher.subscribe(subscriber); + assertCommandNames(emptyList()); + + subscriber.requestMore(20); + subscriber.assertReceivedOnNext(docs.subList(0, 20)); + assertCommandNames(singletonList("find")); + + subscriber.requestMore(150); + subscriber.assertReceivedOnNext(docs.subList(0, 170)); + assertCommandNames(asList("find", "getMore")); + + subscriber.requestMore(40); + subscriber.assertReceivedOnNext(docs); + subscriber.assertNoErrors(); + subscriber.assertTerminalEvent(); + assertCommandNames(asList("find", "getMore", "getMore")); + } + + @Test + public void testBatchCursorCompletesAsExpectedWithLimit() { + List docs = createDocs(100); + Mono.from(collection.insertMany(docs)).block(TIMEOUT_DURATION); + + TestSubscriber subscriber = new TestSubscriber<>(); + FindPublisher findPublisher = collection.find().limit(100); + findPublisher.subscribe(subscriber); + assertCommandNames(emptyList()); + + subscriber.requestMore(101); + subscriber.assertReceivedOnNext(docs); + subscriber.assertNoErrors(); + subscriber.assertTerminalEvent(); + assertCommandNames(singletonList("find")); + } + + @Test + public void testBatchCursorDynamicBatchSizeOnReuse() { + List docs = createDocs(200); + Mono.from(collection.insertMany(docs)).block(TIMEOUT_DURATION); + + TestSubscriber subscriber = new TestSubscriber<>(); + FindPublisher findPublisher = collection.find(); + findPublisher.subscribe(subscriber); + assertCommandNames(emptyList()); + + subscriber.requestMore(100); + subscriber.assertReceivedOnNext(docs.subList(0, 100)); + assertCommandNames(singletonList("find")); + + subscriber.requestMore(200); + subscriber.assertReceivedOnNext(docs); + subscriber.assertNoErrors(); + subscriber.assertTerminalEvent(); + assertCommandNames(asList("find", "getMore")); + + commandListener.reset(); + subscriber = new TestSubscriber<>(); + findPublisher.subscribe(subscriber); + assertCommandNames(emptyList()); + + subscriber.requestMore(Long.MAX_VALUE); + subscriber.assertNoErrors(); + subscriber.assertReceivedOnNext(docs); + subscriber.assertTerminalEvent(); + assertCommandNames(singletonList("find")); + } + + @Test + public void testCalculateDemand() { + BatchCursorFlux batchCursorFlux = new BatchCursorFlux<>(batchCursorPublisher); + + assertAll("Calculating demand", + () -> assertEquals(0, batchCursorFlux.calculateDemand(0)), + () -> assertEquals(10, batchCursorFlux.calculateDemand(10)), + () -> assertEquals(0, batchCursorFlux.calculateDemand(-10)), + () -> assertEquals(Integer.MAX_VALUE, batchCursorFlux.calculateDemand(Integer.MAX_VALUE)), + () -> assertEquals(Long.MAX_VALUE, batchCursorFlux.calculateDemand(Long.MAX_VALUE)), + () -> assertEquals(Long.MAX_VALUE, batchCursorFlux.calculateDemand(1)), + () -> assertEquals(0, batchCursorFlux.calculateDemand(-Long.MAX_VALUE)) + ); + } + + @Test + public void testCalculateBatchSize() { + BatchCursorFlux batchCursorFlux = new BatchCursorFlux<>(batchCursorPublisher); + + when(batchCursorPublisher.getBatchSize()).thenReturn(null); + assertAll("Calculating batch size with dynamic batch size", + () -> assertEquals(2, batchCursorFlux.calculateBatchSize(1)), + () -> assertEquals(1000, batchCursorFlux.calculateBatchSize(1000)), + () -> assertEquals(Integer.MAX_VALUE, batchCursorFlux.calculateBatchSize(Integer.MAX_VALUE)), + () -> assertEquals(Integer.MAX_VALUE, batchCursorFlux.calculateBatchSize(Long.MAX_VALUE)) + ); + + + when(batchCursorPublisher.getBatchSize()).thenReturn(10); + assertAll("Calculating batch size with set batch size", + () -> assertEquals(10, batchCursorFlux.calculateBatchSize(100)), + () -> assertEquals(10, batchCursorFlux.calculateBatchSize(Long.MAX_VALUE)), + () -> assertEquals(10, batchCursorFlux.calculateBatchSize(1)) + ); + + } + + @Test + @DisplayName("ChangeStreamPublisher for a collection must complete after dropping the collection") + void changeStreamPublisherCompletesAfterDroppingCollection() { + assumeTrue(isReplicaSet()); + TestSubscriber> subscriber = new TestSubscriber<>(); + subscriber.doOnSubscribe(subscription -> { + subscription.request(Long.MAX_VALUE); + }); + collection.watch() + .startAtOperationTime(ensureExists(client, collection)) + .subscribe(subscriber); + Mono.from(collection.drop()).block(TIMEOUT_DURATION); + subscriber.assertTerminalEvent(); + subscriber.assertNoErrors(); + } + + @Test + @DisplayName("Ensure BatchCursor does not drop an error") + public void testBatchCursorDoesNotDropAnError() { + try { + AtomicBoolean errorDropped = new AtomicBoolean(); + Hooks.onErrorDropped(t -> errorDropped.set(true)); + Mono.from(collection.insertMany(createDocs(200))).block(); + + Flux.fromStream(IntStream.range(1, 200).boxed()) + .flatMap(i -> + Flux.fromIterable(asList(1, 2)) + .flatMap(x -> Flux.from(collection.find())) + .take(1) + + ) + .collectList() + .block(TIMEOUT_DURATION); + + assertFalse(errorDropped.get()); + } finally { + Hooks.resetOnErrorDropped(); + } + } + + @Test + @DisplayName("Ensure no NPE is thrown on null in result set") + public void testNoNPEOnNull() { + try { + AtomicBoolean errorDropped = new AtomicBoolean(); + Hooks.onErrorDropped(t -> errorDropped.set(true)); + + Document doc = new Document("x", null); + Document doc2 = new Document("x", "hello"); + + Mono.from(collection.insertMany(Arrays.asList(doc, doc2))).block(); + + TestSubscriber subscriber = new TestSubscriber<>(); + + collection.distinct("x", String.class).subscribe(subscriber); + + subscriber.requestMore(1); + + subscriber.assertReceivedOnNext(Arrays.asList("hello")); + + assertFalse(errorDropped.get()); + } finally { + Hooks.resetOnErrorDropped(); + } + } + + @Test + @DisplayName("Ensure BatchCursor reports cursor errors") + @SuppressWarnings("OptionalGetWithoutIsPresent") + public void testBatchCursorReportsCursorErrors() { + List docs = createDocs(200); + Mono.from(collection.insertMany(docs)).block(TIMEOUT_DURATION); + + TestSubscriber subscriber = new TestSubscriber<>(); + FindPublisher findPublisher = collection.find().batchSize(50); + findPublisher.subscribe(subscriber); + assertCommandNames(emptyList()); + + subscriber.requestMore(100); + subscriber.assertReceivedOnNext(docs.subList(0, 100)); + assertCommandNames(asList("find", "getMore")); + + BsonDocument getMoreCommand = commandListener.getCommandStartedEvents().stream() + .filter(e -> e.getCommandName().equals("getMore")) + .map(CommandStartedEvent::getCommand) + .findFirst() + .get(); + + Mono.from(client.getDatabase(getDefaultDatabaseName()).runCommand( + new BsonDocument("killCursors", new BsonString(collection.getNamespace().getCollectionName())) + .append("cursors", new BsonArray(singletonList(getMoreCommand.getNumber("getMore")))) + )).block(TIMEOUT_DURATION); + + subscriber.requestMore(200); + List onErrorEvents = subscriber.getOnErrorEvents(); + subscriber.assertTerminalEvent(); + assertEquals(1, onErrorEvents.size()); + assertEquals(MongoCursorNotFoundException.class, onErrorEvents.get(0).getClass()); + } + + private void assertCommandNames(final List commandNames) { + assertIterableEquals(commandNames, + commandListener.getCommandStartedEvents().stream().map(CommandEvent::getCommandName).collect(Collectors.toList())); + } + + private List createDocs(final int amount) { + return IntStream.rangeClosed(1, amount) + .boxed() + .map(i -> new Document("_id", i)) + .collect(Collectors.toList()); + } + + /** + * This method ensures that the server considers the specified {@code collection} existing for the purposes of, e.g., + * {@link MongoCollection#drop()}, instead of replying with + * {@code "errmsg": "ns not found", "code": 26, "codeName": "NamespaceNotFound"}. + * + * @return {@code operationTime} starting at which the {@code collection} is guaranteed to exist. + */ + private static BsonTimestamp ensureExists(final MongoClient client, final MongoCollection collection) { + BsonValue insertedId = Mono.from(collection.insertOne(Document.parse("{}"))) + .map(InsertOneResult::getInsertedId) + .block(TIMEOUT_DURATION); + BsonArray deleteStatements = new BsonArray(); + deleteStatements.add(new BsonDocument() + .append("q", new BsonDocument() + .append("_id", insertedId)) + .append("limit", new BsonInt32(1))); + Publisher deletePublisher = client.getDatabase(collection.getNamespace().getDatabaseName()) + .runCommand(new BsonDocument() + .append("delete", new BsonString(collection.getNamespace().getCollectionName())) + .append("deletes", deleteStatements)); + BsonTimestamp operationTime = Mono.from(deletePublisher) + .map(doc -> doc.get("operationTime", BsonTimestamp.class)) + .block(TIMEOUT_DURATION); + assertNotNull(operationTime); + return operationTime; + } +} diff --git a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/internal/BatchCursorPublisherTest.java b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/internal/BatchCursorPublisherTest.java new file mode 100644 index 00000000000..b8f88596691 --- /dev/null +++ b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/internal/BatchCursorPublisherTest.java @@ -0,0 +1,227 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.reactivestreams.client.internal; + +import com.mongodb.ReadConcern; +import com.mongodb.ReadPreference; +import com.mongodb.internal.TimeoutSettings; +import com.mongodb.internal.async.AsyncBatchCursor; +import com.mongodb.internal.async.SingleResultCallback; +import com.mongodb.internal.operation.Operations; +import com.mongodb.internal.operation.ReadOperationCursor; +import org.bson.Document; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.ExtendWith; +import org.mockito.Mock; +import org.mockito.Mockito; +import org.mockito.junit.jupiter.MockitoExtension; +import reactor.core.publisher.Mono; +import reactor.test.StepVerifier; + +import java.util.ArrayList; +import java.util.LinkedList; +import java.util.List; +import java.util.Queue; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.function.Function; +import java.util.stream.IntStream; + +import static com.mongodb.reactivestreams.client.internal.TestHelper.OPERATION_EXECUTOR; +import static com.mongodb.reactivestreams.client.internal.TestHelper.OPERATION_PUBLISHER; +import static java.lang.String.format; +import static java.util.Collections.emptyList; +import static java.util.Collections.singletonList; +import static java.util.stream.Collectors.groupingBy; +import static java.util.stream.Collectors.toList; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.eq; + +@SuppressWarnings("unchecked") +@ExtendWith(MockitoExtension.class) +public class BatchCursorPublisherTest { + + private static final String ERROR_CREATING_CURSOR = "Error creating cursor"; + private static final String ERROR_RETURNING_RESULTS = "Error returning results"; + + @Mock + private ReadOperationCursor readOperation; + @Mock + private AsyncBatchCursor batchCursor; + + @Test + public void testBatchCursor() { + List documents = IntStream.range(1, 20).boxed().map(i -> Document.parse(format("{_id: %s}", i))).collect(toList()); + + StepVerifier.create(createVerifiableBatchCursor(documents)) + .expectNext(documents.toArray(new Document[0])) + .expectComplete() + .verify(); + } + + @Test + public void testBatchCursorRespectsBatchSize() { + List documents = IntStream.range(1, 11).boxed().map(i -> Document.parse(format("{_id: %s}", i))).collect(toList()); + + StepVerifier.Step verifier = StepVerifier.create(createVerifiableBatchCursor(documents, 2)); + createBatches(documents, 2).forEach(b -> verifier.expectNext(b.toArray(new Document[0]))); + verifier.expectComplete() + .verify(); + + StepVerifier.create(createVerifiableBatchCursor(documents, 2), 1) + .expectNext(documents.get(0)) + .thenRequest(8) + .expectNextCount(8) + .thenRequest(1) + .expectNext(documents.get(documents.size() - 1)) + .expectComplete() + .verify(); + } + + @Test + public void testBatchCursorFirst() { + List documents = IntStream.range(1, 11).boxed().map(i -> Document.parse(format("{_id: %s}", i))).collect(toList()); + StepVerifier.create(createVerifiableBatchCursor(documents).first()) + .expectNext(documents.get(0)) + .expectComplete() + .verify(); + } + + @Test + public void testBatchCursorFirstEmpty() { + StepVerifier.create(createVerifiableBatchCursor(emptyList()).first()) + .expectComplete() + .verify(); + } + + @Test + public void testBatchCursorError() { + StepVerifier.create(createVerifiableBatchCursorError()) + .expectErrorMessage(ERROR_CREATING_CURSOR) + .verify(); + } + + @Test + public void testBatchCursorOnNextError() { + List documents = IntStream.range(1, 11).boxed().map(i -> Document.parse(format("{_id: %s}", i))).collect(toList()); + + StepVerifier.create(createVerifiableBatchCursorError(documents)) + .expectNext(documents.toArray(new Document[0])) + .expectErrorMessage(ERROR_RETURNING_RESULTS) + .verify(); + + } + + @Test + public void testCancellingSubscriptionBatchCursor() { + List documents = IntStream.range(1, 11).boxed().map(i -> Document.parse(format("{_id: %s}", i))).collect(toList()); + + StepVerifier.create(createVerifiableBatchCursor(documents, 2), 1) + .expectNext(documents.get(0)) + .thenRequest(2) + .expectNext(documents.get(1), documents.get(2)) + .thenCancel() + .verifyThenAssertThat() + .hasDiscarded(documents.get(3)); + } + + BatchCursorPublisher createVerifiableBatchCursor(final List expected) { + return createVerifiableBatchCursor(expected, 0); + } + + BatchCursorPublisher createVerifiableBatchCursor(final List expected, final int batchSize) { + return createVerifiableBatchCursor(expected, batchSize, false, false); + } + + BatchCursorPublisher createVerifiableBatchCursorError() { + return createVerifiableBatchCursor(emptyList(), 0, true, false); + } + + BatchCursorPublisher createVerifiableBatchCursorError(final List expected) { + return createVerifiableBatchCursor(expected, 0, false, true); + } + + List> createBatches(final List expected, final int batchSize) { + if (batchSize == 0) { + return singletonList(expected); + } + AtomicInteger counter = new AtomicInteger(); + return new ArrayList<>(expected.stream().collect(groupingBy(it -> counter.getAndIncrement() / batchSize)).values()); + } + + BatchCursorPublisher createVerifiableBatchCursor(final List expected, final int batchSize, + final boolean errorCreatingCursor, final boolean errorOnEmpty) { + + BatchCursorPublisher publisher = new BatchCursorPublisher( + null, OPERATION_PUBLISHER) { + @Override + ReadOperationCursor asReadOperation(final int initialBatchSize) { + return readOperation; + } + + @Override + Function, TimeoutSettings> getTimeoutSettings() { + return (Operations::getTimeoutSettings); + } + }; + + OperationExecutor executor = OPERATION_EXECUTOR; + + if (batchSize > 0) { + publisher.batchSize(batchSize); + } + + if (errorCreatingCursor) { + Mockito.doAnswer(invocation -> Mono.fromCallable(() -> { + throw new Exception(ERROR_CREATING_CURSOR); + })) + .when(executor) + .execute(eq(readOperation), + eq(ReadPreference.primary()), + eq(ReadConcern.DEFAULT), + eq(null)); + } else { + Mockito.doAnswer(invocation -> Mono.fromCallable(() -> batchCursor)) + .when(executor) + .execute(eq(readOperation), + eq(ReadPreference.primary()), + eq(ReadConcern.DEFAULT), + eq(null)); + + Queue> queuedResults = new LinkedList<>(createBatches(expected, batchSize)); + AtomicBoolean isClosed = new AtomicBoolean(false); + Mockito.lenient().doAnswer(i -> isClosed.get()).when(batchCursor).isClosed(); + Mockito.doAnswer(invocation -> { + List next = queuedResults.poll(); + if (queuedResults.isEmpty()) { + if (!errorOnEmpty) { + isClosed.set(true); + } else if (next == null) { + invocation.getArgument(0, SingleResultCallback.class) + .onResult(null, new Exception(ERROR_RETURNING_RESULTS)); + return null; + } + } + invocation.getArgument(0, SingleResultCallback.class).onResult(next, null); + return null; + }).when(batchCursor).next(any(SingleResultCallback.class)); + } + + return publisher; + } + +} diff --git a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/internal/ConnectionsSurvivePrimaryStepDownProseTest.java b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/internal/ConnectionsSurvivePrimaryStepDownProseTest.java new file mode 100644 index 00000000000..6e9cb8f7167 --- /dev/null +++ b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/internal/ConnectionsSurvivePrimaryStepDownProseTest.java @@ -0,0 +1,163 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.reactivestreams.client.internal; + +import com.mongodb.MongoClientSettings; +import com.mongodb.MongoException; +import com.mongodb.MongoNamespace; +import com.mongodb.MongoNotPrimaryException; +import com.mongodb.WriteConcern; +import com.mongodb.client.test.CollectionHelper; +import com.mongodb.event.ConnectionCreatedEvent; +import com.mongodb.event.ConnectionPoolClearedEvent; +import com.mongodb.internal.connection.TestConnectionPoolListener; +import com.mongodb.reactivestreams.client.MongoClient; +import com.mongodb.reactivestreams.client.MongoClients; +import com.mongodb.reactivestreams.client.MongoCollection; +import com.mongodb.reactivestreams.client.MongoDatabase; +import org.bson.Document; +import org.bson.codecs.DocumentCodec; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import reactor.core.publisher.Mono; + +import java.util.List; + +import static com.mongodb.ClusterFixture.TIMEOUT_DURATION; +import static com.mongodb.ClusterFixture.getDefaultDatabaseName; +import static com.mongodb.ClusterFixture.isDiscoverableReplicaSet; +import static com.mongodb.reactivestreams.client.Fixture.getMongoClientSettings; +import static java.util.Arrays.asList; +import static java.util.Collections.singletonList; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.fail; +import static org.junit.Assume.assumeTrue; + +// See https://github.com/mongodb/specifications/tree/master/source/connections-survive-step-down/tests +@SuppressWarnings("deprecation") +public class ConnectionsSurvivePrimaryStepDownProseTest { + private static final String COLLECTION_NAME = "step-down"; + + private TestConnectionPoolListener connectionPoolListener; + private CollectionHelper collectionHelper; + private MongoClient client; + private MongoCollection collection; + + @Before + public void setUp() { + assumeTrue(isDiscoverableReplicaSet()); + connectionPoolListener = new TestConnectionPoolListener(); + MongoClientSettings settings = MongoClientSettings.builder(getMongoClientSettings()).retryWrites(false) + .applyToConnectionPoolSettings(builder -> builder.addConnectionPoolListener(connectionPoolListener)).build(); + + collectionHelper = new CollectionHelper<>(new DocumentCodec(), + new MongoNamespace(getDefaultDatabaseName(), COLLECTION_NAME)); + client = MongoClients.create(settings); + MongoDatabase database = client.getDatabase(getDefaultDatabaseName()); + collection = client.getDatabase(getDefaultDatabaseName()).getCollection(COLLECTION_NAME); + + Mono.from(collection.withWriteConcern(WriteConcern.MAJORITY).drop()).block(TIMEOUT_DURATION); + Mono.from(database.withWriteConcern(WriteConcern.MAJORITY).createCollection(COLLECTION_NAME)).block(TIMEOUT_DURATION); + } + + @After + public void tearDown() { + if (client != null) { + collectionHelper.runAdminCommand("{configureFailPoint: 'failCommand', mode: 'off'}"); + try { + Mono.from(client.getDatabase(getDefaultDatabaseName()).drop()).block(TIMEOUT_DURATION); + } catch (MongoNotPrimaryException e) { + // GetMore will use the same connection so won't force an server description update + Mono.from(client.getDatabase(getDefaultDatabaseName()).drop()).block(TIMEOUT_DURATION); + } + client.close(); + } + } + + @Test + public void testGetMoreIteration() { + List documents = asList(Document.parse("{_id: 1}"), Document.parse("{_id: 2}"), Document.parse("{_id: 3}"), + Document.parse("{_id: 4}"), Document.parse("{_id: 5}")); + Mono.from(collection.withWriteConcern(WriteConcern.MAJORITY).insertMany(documents)).block(TIMEOUT_DURATION); + + int connectionCount = connectionPoolListener.countEvents(ConnectionCreatedEvent.class); + + BatchCursor cursor = ((FindPublisherImpl) collection.find().batchSize(2)).batchCursor(2) + .block(TIMEOUT_DURATION); + assertNotNull(cursor); + assertEquals(asList(documents.get(0), documents.get(1)), Mono.from(cursor.next()).block(TIMEOUT_DURATION)); + + collectionHelper.runAdminCommand("{replSetStepDown: 5, force: true}"); + + assertEquals(asList(documents.get(2), documents.get(3)), Mono.from(cursor.next()).block(TIMEOUT_DURATION)); + assertEquals(singletonList(documents.get(4)), Mono.from(cursor.next()).block(TIMEOUT_DURATION)); + assertEquals(connectionCount, connectionPoolListener.countEvents(ConnectionCreatedEvent.class)); + } + + @Test + public void testNotPrimaryKeepConnectionPool() { + collectionHelper.runAdminCommand("{configureFailPoint: 'failCommand', mode: {times: 1}, " + + "data: {failCommands: ['insert'], errorCode: 10107}}"); + int connectionCount = connectionPoolListener.countEvents(ConnectionCreatedEvent.class); + + try { + Mono.from(collection.insertOne(new Document())).block(TIMEOUT_DURATION); + fail(); + } catch (MongoException e) { + assertEquals(10107, e.getCode()); + } + + Mono.from(collection.insertOne(new Document())).block(TIMEOUT_DURATION); + assertEquals(connectionCount, connectionPoolListener.countEvents(ConnectionCreatedEvent.class)); + } + + @Test + public void testInterruptedAtShutdownResetsConnectionPool() { + collectionHelper.runAdminCommand("{configureFailPoint: 'failCommand', mode: {times: 1}, " + + "data: {failCommands: ['insert'], errorCode: 11600}}"); + int connectionCount = connectionPoolListener.countEvents(ConnectionCreatedEvent.class); + + try { + Mono.from(collection.insertOne(new Document())).block(TIMEOUT_DURATION); + } catch (MongoException e) { + assertEquals(11600, e.getCode()); + } + assertEquals(1, connectionPoolListener.countEvents(ConnectionPoolClearedEvent.class)); + Mono.from(collection.insertOne(new Document())).block(TIMEOUT_DURATION); + assertEquals(connectionCount + 1, connectionPoolListener.countEvents(ConnectionCreatedEvent.class)); + } + + @Test + public void testShutdownInProgressResetsConnectionPool() { + collectionHelper.runAdminCommand("{configureFailPoint: 'failCommand', mode: {times: 1}, " + + "data: {failCommands: ['insert'], errorCode: 91}}"); + int connectionCount = connectionPoolListener.countEvents(ConnectionCreatedEvent.class); + + try { + Mono.from(collection.insertOne(new Document())).block(TIMEOUT_DURATION); + } catch (MongoException e) { + assertEquals(91, e.getCode()); + } + assertEquals(1, connectionPoolListener.countEvents(ConnectionPoolClearedEvent.class)); + + Mono.from(collection.insertOne(new Document())).block(TIMEOUT_DURATION); + assertEquals(connectionCount + 1, connectionPoolListener.countEvents(ConnectionCreatedEvent.class)); + } + +} diff --git a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/ContextHelper.java b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/ContextHelper.java new file mode 100644 index 00000000000..fd9e534b190 --- /dev/null +++ b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/ContextHelper.java @@ -0,0 +1,134 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.reactivestreams.client.syncadapter; + +import com.mongodb.ContextProvider; +import com.mongodb.RequestContext; +import com.mongodb.reactivestreams.client.ReactiveContextProvider; +import org.bson.BsonArray; +import org.bson.BsonDocument; +import org.bson.BsonValue; +import reactor.core.CoreSubscriber; +import reactor.util.context.Context; + +import java.util.Map; +import java.util.Optional; +import java.util.concurrent.ConcurrentHashMap; +import java.util.stream.Stream; + +import static org.junit.jupiter.api.Assertions.assertTrue; + +public final class ContextHelper { + static final ResettableRequestContext REQUEST_CONTEXT = new ResettableRequestContext(); + static final Context CONTEXT = Context.of("A", "B"); + + public static final ContextProvider CONTEXT_PROVIDER = (ReactiveContextProvider) subscriber -> { + if (subscriber instanceof CoreSubscriber) { + ((CoreSubscriber) subscriber) + .currentContext() + .stream() + .forEach(e -> REQUEST_CONTEXT.put(e.getKey(), e.getValue())); + } + return REQUEST_CONTEXT; + }; + + public static void assertContextPassedThrough() { + boolean contextWasSet = REQUEST_CONTEXT.size() > 0; + REQUEST_CONTEXT.reset(); + assertTrue(contextWasSet, "Test failed to pass through the context as expected"); + } + + public static void assertContextPassedThrough(final BsonDocument unifiedTestDefinition) { + boolean shouldBeSuccessful = unifiedTestDefinition.getArray("operations", new BsonArray()) + .stream() + .anyMatch(v -> { + BsonValue result = v.asDocument().get("result", new BsonDocument()); + return !result.isDocument() || result.asDocument().containsKey("errorContains"); + }); + if (shouldBeSuccessful) { + assertContextPassedThrough(); + } + } + + + @SuppressWarnings("unchecked") + static class ResettableRequestContext implements RequestContext { + + private final Map context = new ConcurrentHashMap<>(); + + @Override + public T get(final Object key) { + return (T) context.get(key); + } + + @Override + public T get(final Class key) { + return (T) context.get(key); + } + + @Override + public T getOrDefault(final Object key, final T defaultValue) { + return (T) context.getOrDefault(key, defaultValue); + } + + @Override + public Optional getOrEmpty(final Object key) { + return (Optional) context.compute(key, (k, v) -> v == null ? Optional.empty() : Optional.of(((T) v))); + } + + @Override + public boolean hasKey(final Object key) { + return context.containsKey(key); + } + + @Override + public boolean isEmpty() { + return context.isEmpty(); + } + + @Override + public void put(final Object key, final Object value) { + context.put(key, value); + } + + @Override + public void putNonNull(final Object key, final Object valueOrNull) { + context.put(key, valueOrNull); + } + + @Override + public void delete(final Object key) { + context.remove(key); + } + + @Override + public int size() { + return context.size(); + } + + @Override + public Stream> stream() { + return context.entrySet().stream(); + } + + void reset() { + context.clear(); + } + } + + private ContextHelper() { + } +} diff --git a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncAggregateIterable.java b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncAggregateIterable.java new file mode 100644 index 00000000000..6b81b1f42af --- /dev/null +++ b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncAggregateIterable.java @@ -0,0 +1,140 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.reactivestreams.client.syncadapter; + +import com.mongodb.ExplainVerbosity; +import com.mongodb.client.AggregateIterable; +import com.mongodb.client.cursor.TimeoutMode; +import com.mongodb.client.model.Collation; +import com.mongodb.lang.Nullable; +import com.mongodb.reactivestreams.client.AggregatePublisher; +import org.bson.BsonValue; +import org.bson.Document; +import org.bson.conversions.Bson; +import reactor.core.publisher.Mono; + +import java.util.concurrent.TimeUnit; + +import static com.mongodb.ClusterFixture.TIMEOUT_DURATION; +import static com.mongodb.reactivestreams.client.syncadapter.ContextHelper.CONTEXT; +import static java.util.Objects.requireNonNull; + +class SyncAggregateIterable extends SyncMongoIterable implements AggregateIterable { + private final AggregatePublisher wrapped; + + SyncAggregateIterable(final AggregatePublisher wrapped) { + super(wrapped); + this.wrapped = wrapped; + } + + @Override + public void toCollection() { + Mono.from(wrapped.toCollection()).contextWrite(CONTEXT).block(TIMEOUT_DURATION); + } + + @Override + public AggregateIterable allowDiskUse(@Nullable final Boolean allowDiskUse) { + wrapped.allowDiskUse(allowDiskUse); + return this; + } + + @Override + public AggregateIterable batchSize(final int batchSize) { + wrapped.batchSize(batchSize); + super.batchSize(batchSize); + return this; + } + + @Override + public AggregateIterable maxTime(final long maxTime, final TimeUnit timeUnit) { + wrapped.maxTime(maxTime, timeUnit); + return this; + } + + @Override + public AggregateIterable maxAwaitTime(final long maxAwaitTime, final TimeUnit timeUnit) { + wrapped.maxAwaitTime(maxAwaitTime, timeUnit); + return this; + } + + @Override + public AggregateIterable bypassDocumentValidation(@Nullable final Boolean bypassDocumentValidation) { + wrapped.bypassDocumentValidation(bypassDocumentValidation); + return this; + } + + @Override + public AggregateIterable collation(@Nullable final Collation collation) { + wrapped.collation(collation); + return this; + } + + @Override + public AggregateIterable comment(@Nullable final String comment) { + wrapped.comment(comment); + return this; + } + + @Override + public AggregateIterable comment(@Nullable final BsonValue comment) { + wrapped.comment(comment); + return this; + } + + @Override + public AggregateIterable hint(@Nullable final Bson hint) { + wrapped.hint(hint); + return this; + } + + @Override + public AggregateIterable hintString(final String hint) { + wrapped.hintString(hint); + return this; + } + + @Override + public AggregateIterable let(final Bson variables) { + wrapped.let(variables); + return this; + } + + @Override + public AggregateIterable timeoutMode(final TimeoutMode timeoutMode) { + wrapped.timeoutMode(timeoutMode); + return this; + } + + @Override + public Document explain() { + return requireNonNull(Mono.from(wrapped.explain()).contextWrite(CONTEXT).block(TIMEOUT_DURATION)); + } + + @Override + public Document explain(final ExplainVerbosity verbosity) { + return requireNonNull(Mono.from(wrapped.explain(verbosity)).contextWrite(CONTEXT).block(TIMEOUT_DURATION)); + } + + @Override + public E explain(final Class explainResultClass) { + return requireNonNull(Mono.from(wrapped.explain(explainResultClass)).contextWrite(CONTEXT).block(TIMEOUT_DURATION)); + } + + @Override + public E explain(final Class explainResultClass, final ExplainVerbosity verbosity) { + return requireNonNull(Mono.from(wrapped.explain(explainResultClass, verbosity)).contextWrite(CONTEXT).block(TIMEOUT_DURATION)); + } +} diff --git a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncChangeStreamIterable.java b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncChangeStreamIterable.java new file mode 100644 index 00000000000..d078e176bc2 --- /dev/null +++ b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncChangeStreamIterable.java @@ -0,0 +1,168 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.reactivestreams.client.syncadapter; + +import com.mongodb.ServerAddress; +import com.mongodb.ServerCursor; +import com.mongodb.client.ChangeStreamIterable; +import com.mongodb.client.MongoChangeStreamCursor; +import com.mongodb.client.MongoCursor; +import com.mongodb.client.MongoIterable; +import com.mongodb.client.model.Collation; +import com.mongodb.client.model.changestream.ChangeStreamDocument; +import com.mongodb.client.model.changestream.FullDocument; +import com.mongodb.client.model.changestream.FullDocumentBeforeChange; +import com.mongodb.lang.Nullable; +import com.mongodb.reactivestreams.client.ChangeStreamPublisher; +import org.bson.BsonDocument; +import org.bson.BsonTimestamp; +import org.bson.BsonValue; + +import java.util.concurrent.TimeUnit; + +class SyncChangeStreamIterable extends SyncMongoIterable> implements ChangeStreamIterable { + private final ChangeStreamPublisher wrapped; + @Nullable + private Integer batchSize; + + SyncChangeStreamIterable(final ChangeStreamPublisher wrapped) { + super(wrapped); + this.wrapped = wrapped; + } + + public MongoChangeStreamCursor> cursor() { + MongoCursor> wrapped = super.cursor(); + return new MongoChangeStreamCursor>() { + @Override + public BsonDocument getResumeToken() { + throw new UnsupportedOperationException(); + } + + @Override + public void close() { + wrapped.close(); + } + + @Override + public boolean hasNext() { + return wrapped.hasNext(); + } + + @Override + public ChangeStreamDocument next() { + return wrapped.next(); + } + + @Override + public int available() { + return wrapped.available(); + } + + @Override + public ChangeStreamDocument tryNext() { + return wrapped.tryNext(); + } + + @Override + public ServerCursor getServerCursor() { + return wrapped.getServerCursor(); + } + + @Override + public ServerAddress getServerAddress() { + return wrapped.getServerAddress(); + } + }; + } + + @Override + public ChangeStreamIterable fullDocument(final FullDocument fullDocument) { + wrapped.fullDocument(fullDocument); + return this; + } + + @Override + public ChangeStreamIterable fullDocumentBeforeChange(final FullDocumentBeforeChange fullDocumentBeforeChange) { + wrapped.fullDocumentBeforeChange(fullDocumentBeforeChange); + return this; + } + + @Override + public ChangeStreamIterable resumeAfter(final BsonDocument resumeToken) { + wrapped.resumeAfter(resumeToken); + return this; + } + + @Override + public ChangeStreamIterable batchSize(final int batchSize) { + wrapped.batchSize(batchSize); + this.batchSize = batchSize; + super.batchSize(batchSize); + return this; + } + + @Override + public ChangeStreamIterable maxAwaitTime(final long maxAwaitTime, final TimeUnit timeUnit) { + wrapped.maxAwaitTime(maxAwaitTime, timeUnit); + return this; + } + + @Override + public ChangeStreamIterable collation(@Nullable final Collation collation) { + wrapped.collation(collation); + return this; + } + + @Override + public MongoIterable withDocumentClass(final Class clazz) { + SyncMongoIterable result = new SyncMongoIterable<>(wrapped.withDocumentClass(clazz)); + if (batchSize != null) { + result.batchSize(batchSize); + } + return result; + } + + @Override + public ChangeStreamIterable startAtOperationTime(final BsonTimestamp startAtOperationTime) { + wrapped.startAtOperationTime(startAtOperationTime); + return this; + } + + @Override + public ChangeStreamIterable startAfter(final BsonDocument startAfter) { + wrapped.startAfter(startAfter); + return this; + } + + @Override + public ChangeStreamIterable comment(@Nullable final String comment) { + wrapped.comment(comment); + return this; + } + + @Override + public ChangeStreamIterable comment(@Nullable final BsonValue comment) { + wrapped.comment(comment); + return this; + } + + @Override + public ChangeStreamIterable showExpandedEvents(final boolean showExpandedEvents) { + wrapped.showExpandedEvents(showExpandedEvents); + return this; + } +} diff --git a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncClientEncryption.java b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncClientEncryption.java new file mode 100644 index 00000000000..44f1d1b687e --- /dev/null +++ b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncClientEncryption.java @@ -0,0 +1,132 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.reactivestreams.client.syncadapter; + +import com.mongodb.MongoUpdatedEncryptedFieldsException; +import com.mongodb.client.FindIterable; +import com.mongodb.client.MongoDatabase; +import com.mongodb.client.model.CreateCollectionOptions; +import com.mongodb.client.model.CreateEncryptedCollectionParams; +import com.mongodb.client.model.vault.DataKeyOptions; +import com.mongodb.client.model.vault.EncryptOptions; +import com.mongodb.client.model.vault.RewrapManyDataKeyOptions; +import com.mongodb.client.model.vault.RewrapManyDataKeyResult; +import com.mongodb.client.result.DeleteResult; +import com.mongodb.client.vault.ClientEncryption; +import org.bson.BsonBinary; +import org.bson.BsonDocument; +import org.bson.BsonValue; +import org.bson.conversions.Bson; +import reactor.core.publisher.Mono; + +import static com.mongodb.ClusterFixture.TIMEOUT_DURATION; +import static com.mongodb.reactivestreams.client.syncadapter.ContextHelper.CONTEXT; +import static java.util.Objects.requireNonNull; +import static org.bson.assertions.Assertions.fail; + +public class SyncClientEncryption implements ClientEncryption { + + private final com.mongodb.reactivestreams.client.vault.ClientEncryption wrapped; + + public SyncClientEncryption(final com.mongodb.reactivestreams.client.vault.ClientEncryption wrapped) { + this.wrapped = wrapped; + } + + @Override + public BsonBinary createDataKey(final String kmsProvider) { + return requireNonNull(Mono.from(wrapped.createDataKey(kmsProvider, new DataKeyOptions())).contextWrite(CONTEXT).block(TIMEOUT_DURATION)); + } + + @Override + public BsonBinary createDataKey(final String kmsProvider, final DataKeyOptions dataKeyOptions) { + return requireNonNull(Mono.from(wrapped.createDataKey(kmsProvider, dataKeyOptions)).contextWrite(CONTEXT).block(TIMEOUT_DURATION)); + } + + @Override + public BsonBinary encrypt(final BsonValue value, final EncryptOptions options) { + return requireNonNull(Mono.from(wrapped.encrypt(value, options)).contextWrite(CONTEXT).block(TIMEOUT_DURATION)); + } + + @Override + public BsonDocument encryptExpression(final Bson expression, final EncryptOptions options) { + return requireNonNull(Mono.from(wrapped.encryptExpression(expression, options)).contextWrite(CONTEXT).block(TIMEOUT_DURATION)); + } + + @Override + public BsonValue decrypt(final BsonBinary value) { + return requireNonNull(Mono.from(wrapped.decrypt(value)).contextWrite(CONTEXT).block(TIMEOUT_DURATION)); + } + + @Override + public DeleteResult deleteKey(final BsonBinary id) { + return requireNonNull(Mono.from(wrapped.deleteKey(id)).contextWrite(CONTEXT).block(TIMEOUT_DURATION)); + } + + @Override + public BsonDocument getKey(final BsonBinary id) { + return Mono.from(wrapped.getKey(id)).contextWrite(CONTEXT).block(TIMEOUT_DURATION); + } + + @Override + public FindIterable getKeys() { + return new SyncFindIterable<>(wrapped.getKeys()); + } + + @Override + public BsonDocument addKeyAltName(final BsonBinary id, final String keyAltName) { + return Mono.from(wrapped.addKeyAltName(id, keyAltName)).contextWrite(CONTEXT).block(TIMEOUT_DURATION); + } + + @Override + public BsonDocument removeKeyAltName(final BsonBinary id, final String keyAltName) { + return Mono.from(wrapped.removeKeyAltName(id, keyAltName)).contextWrite(CONTEXT).block(TIMEOUT_DURATION); + } + + @Override + public BsonDocument getKeyByAltName(final String keyAltName) { + return Mono.from(wrapped.getKeyByAltName(keyAltName)).contextWrite(CONTEXT).block(TIMEOUT_DURATION); + } + + @Override + public RewrapManyDataKeyResult rewrapManyDataKey(final Bson filter) { + return requireNonNull(Mono.from(wrapped.rewrapManyDataKey(filter)).contextWrite(CONTEXT).block(TIMEOUT_DURATION)); + } + + @Override + public RewrapManyDataKeyResult rewrapManyDataKey(final Bson filter, final RewrapManyDataKeyOptions options) { + return requireNonNull(Mono.from(wrapped.rewrapManyDataKey(filter, options)).contextWrite(CONTEXT).block(TIMEOUT_DURATION)); + } + + @Override + public BsonDocument createEncryptedCollection(final MongoDatabase database, final String collectionName, + final CreateCollectionOptions createCollectionOptions, final CreateEncryptedCollectionParams createEncryptedCollectionParams) + throws MongoUpdatedEncryptedFieldsException { + if (database instanceof SyncMongoDatabase) { + com.mongodb.reactivestreams.client.MongoDatabase reactiveDatabase = ((SyncMongoDatabase) database).getWrapped(); + return requireNonNull(Mono.fromDirect(wrapped.createEncryptedCollection( + reactiveDatabase, collectionName, createCollectionOptions, createEncryptedCollectionParams)) + .contextWrite(CONTEXT).block(TIMEOUT_DURATION)); + } else { + throw fail(database.getClass().toString()); + } + } + + @Override + public void close() { + wrapped.close(); + } +} diff --git a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncClientSession.java b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncClientSession.java new file mode 100644 index 00000000000..494e5f8c74e --- /dev/null +++ b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncClientSession.java @@ -0,0 +1,198 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.reactivestreams.client.syncadapter; + +import com.mongodb.ClientSessionOptions; +import com.mongodb.ServerAddress; +import com.mongodb.TransactionOptions; +import com.mongodb.client.ClientSession; +import com.mongodb.client.TransactionBody; +import com.mongodb.internal.TimeoutContext; +import com.mongodb.lang.Nullable; +import com.mongodb.session.ServerSession; +import org.bson.BsonDocument; +import org.bson.BsonTimestamp; +import reactor.core.publisher.Mono; + +import static com.mongodb.ClusterFixture.TIMEOUT_DURATION; +import static com.mongodb.internal.thread.InterruptionUtil.interruptAndCreateMongoInterruptedException; +import static com.mongodb.reactivestreams.client.syncadapter.ContextHelper.CONTEXT; +import static com.mongodb.reactivestreams.client.syncadapter.SyncMongoClient.getSleepAfterSessionClose; + +class SyncClientSession implements ClientSession { + private final com.mongodb.reactivestreams.client.ClientSession wrapped; + private final Object originator; + + SyncClientSession(final com.mongodb.reactivestreams.client.ClientSession wrapped, final Object originator) { + this.wrapped = wrapped; + this.originator = originator; + } + + public com.mongodb.reactivestreams.client.ClientSession getWrapped() { + return wrapped; + } + + @Override + public ServerAddress getPinnedServerAddress() { + return wrapped.getPinnedServerAddress(); + } + + @Override + public Object getTransactionContext() { + return wrapped.getTransactionContext(); + } + + @Override + public void setTransactionContext(final ServerAddress address, final Object transactionContext) { + wrapped.setTransactionContext(address, transactionContext); + } + + @Override + public void clearTransactionContext() { + wrapped.clearTransactionContext(); + } + + @Override + public BsonDocument getRecoveryToken() { + return wrapped.getRecoveryToken(); + } + + @Override + public void setRecoveryToken(final BsonDocument recoveryToken) { + wrapped.setRecoveryToken(recoveryToken); + } + + @Override + public ClientSessionOptions getOptions() { + return wrapped.getOptions(); + } + + @Override + public boolean isCausallyConsistent() { + return wrapped.isCausallyConsistent(); + } + + @Override + public Object getOriginator() { + return originator; + } + + @Override + public ServerSession getServerSession() { + return wrapped.getServerSession(); + } + + @Override + public BsonTimestamp getOperationTime() { + return wrapped.getOperationTime(); + } + + @Override + public void advanceOperationTime(final BsonTimestamp operationTime) { + wrapped.advanceOperationTime(operationTime); + } + + @Override + public void advanceClusterTime(final BsonDocument clusterTime) { + wrapped.advanceClusterTime(clusterTime); + } + + @Override + public void setSnapshotTimestamp(final BsonTimestamp snapshotTimestamp) { + wrapped.setSnapshotTimestamp(snapshotTimestamp); + } + + @Override + @Nullable + public BsonTimestamp getSnapshotTimestamp() { + return wrapped.getSnapshotTimestamp(); + } + + @Override + public BsonDocument getClusterTime() { + return wrapped.getClusterTime(); + } + + @Override + public void close() { + wrapped.close(); + sleep(getSleepAfterSessionClose()); + } + + @Override + public boolean hasActiveTransaction() { + return wrapped.hasActiveTransaction(); + } + + @Override + public boolean notifyMessageSent() { + return wrapped.notifyMessageSent(); + } + + @Override + public void notifyOperationInitiated(final Object operation) { + wrapped.notifyOperationInitiated(operation); + } + + @Override + public TransactionOptions getTransactionOptions() { + return wrapped.getTransactionOptions(); + } + + @Override + public void startTransaction() { + wrapped.startTransaction(); + } + + @Override + public void startTransaction(final TransactionOptions transactionOptions) { + wrapped.startTransaction(transactionOptions); + } + + @Override + public void commitTransaction() { + Mono.from(wrapped.commitTransaction()).contextWrite(CONTEXT).block(TIMEOUT_DURATION); + } + + @Override + public void abortTransaction() { + Mono.from(wrapped.abortTransaction()).contextWrite(CONTEXT).block(TIMEOUT_DURATION); + } + + @Override + public T withTransaction(final TransactionBody transactionBody) { + throw new UnsupportedOperationException(); + } + + @Override + public T withTransaction(final TransactionBody transactionBody, final TransactionOptions options) { + throw new UnsupportedOperationException(); + } + + @Override + public TimeoutContext getTimeoutContext() { + return wrapped.getTimeoutContext(); + } + + private static void sleep(final long millis) { + try { + Thread.sleep(millis); + } catch (InterruptedException e) { + throw interruptAndCreateMongoInterruptedException(null, e); + } + } +} diff --git a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncDistinctIterable.java b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncDistinctIterable.java new file mode 100644 index 00000000000..5b3e3c71fe1 --- /dev/null +++ b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncDistinctIterable.java @@ -0,0 +1,91 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.reactivestreams.client.syncadapter; + +import com.mongodb.client.DistinctIterable; +import com.mongodb.client.cursor.TimeoutMode; +import com.mongodb.client.model.Collation; +import com.mongodb.lang.Nullable; +import com.mongodb.reactivestreams.client.DistinctPublisher; +import org.bson.BsonValue; +import org.bson.conversions.Bson; + +import java.util.concurrent.TimeUnit; + +class SyncDistinctIterable extends SyncMongoIterable implements DistinctIterable { + private final com.mongodb.reactivestreams.client.DistinctPublisher wrapped; + + SyncDistinctIterable(final DistinctPublisher wrapped) { + super(wrapped); + this.wrapped = wrapped; + } + + @Override + public DistinctIterable filter(@Nullable final Bson filter) { + wrapped.filter(filter); + return this; + } + + @Override + public DistinctIterable maxTime(final long maxTime, final TimeUnit timeUnit) { + wrapped.maxTime(maxTime, timeUnit); + return this; + } + + @Override + public DistinctIterable batchSize(final int batchSize) { + wrapped.batchSize(batchSize); + super.batchSize(batchSize); + return this; + } + + @Override + public DistinctIterable collation(@Nullable final Collation collation) { + wrapped.collation(collation); + return this; + } + + @Override + public DistinctIterable comment(@Nullable final String comment) { + wrapped.comment(comment); + return this; + } + + @Override + public DistinctIterable comment(@Nullable final BsonValue comment) { + wrapped.comment(comment); + return this; + } + + @Override + public DistinctIterable hint(@Nullable final Bson hint) { + wrapped.hint(hint); + return this; + } + + @Override + public DistinctIterable hintString(@Nullable final String hint) { + wrapped.hintString(hint); + return this; + } + + @Override + public DistinctIterable timeoutMode(final TimeoutMode timeoutMode) { + wrapped.timeoutMode(timeoutMode); + return this; + } +} diff --git a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncFindIterable.java b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncFindIterable.java new file mode 100644 index 00000000000..3cf93b9ffb0 --- /dev/null +++ b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncFindIterable.java @@ -0,0 +1,203 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.reactivestreams.client.syncadapter; + +import com.mongodb.CursorType; +import com.mongodb.ExplainVerbosity; +import com.mongodb.client.FindIterable; +import com.mongodb.client.cursor.TimeoutMode; +import com.mongodb.client.model.Collation; +import com.mongodb.lang.Nullable; +import com.mongodb.reactivestreams.client.FindPublisher; +import org.bson.BsonValue; +import org.bson.Document; +import org.bson.conversions.Bson; +import reactor.core.publisher.Mono; + +import java.util.concurrent.TimeUnit; + +import static com.mongodb.ClusterFixture.TIMEOUT_DURATION; +import static com.mongodb.reactivestreams.client.syncadapter.ContextHelper.CONTEXT; +import static java.util.Objects.requireNonNull; + +class SyncFindIterable extends SyncMongoIterable implements FindIterable { + private final com.mongodb.reactivestreams.client.FindPublisher wrapped; + + SyncFindIterable(final FindPublisher wrapped) { + super(wrapped); + this.wrapped = wrapped; + } + + @Override + public FindIterable filter(@Nullable final Bson filter) { + wrapped.filter(filter); + return this; + } + + @Override + public FindIterable limit(final int limit) { + wrapped.limit(limit); + return this; + } + + @Override + public FindIterable skip(final int skip) { + wrapped.skip(skip); + return this; + } + + @Override + public FindIterable maxTime(final long maxTime, final TimeUnit timeUnit) { + wrapped.maxTime(maxTime, timeUnit); + return this; + } + + @Override + public FindIterable maxAwaitTime(final long maxAwaitTime, final TimeUnit timeUnit) { + wrapped.maxAwaitTime(maxAwaitTime, timeUnit); + return this; + } + + @Override + public FindIterable projection(@Nullable final Bson projection) { + wrapped.projection(projection); + return this; + } + + @Override + public FindIterable sort(@Nullable final Bson sort) { + wrapped.sort(sort); + return this; + } + + @Override + public FindIterable noCursorTimeout(final boolean noCursorTimeout) { + wrapped.noCursorTimeout(noCursorTimeout); + return this; + } + + @Override + public FindIterable partial(final boolean partial) { + wrapped.partial(partial); + return this; + } + + @Override + public FindIterable cursorType(final CursorType cursorType) { + wrapped.cursorType(cursorType); + return this; + } + + @Override + public FindIterable batchSize(final int batchSize) { + wrapped.batchSize(batchSize); + super.batchSize(batchSize); + return this; + } + + @Override + public FindIterable collation(@Nullable final Collation collation) { + wrapped.collation(collation); + return this; + } + + @Override + public FindIterable comment(@Nullable final String comment) { + wrapped.comment(comment); + return this; + } + + @Override + public FindIterable comment(@Nullable final BsonValue comment) { + wrapped.comment(comment); + return this; + } + + @Override + public FindIterable hint(@Nullable final Bson hint) { + wrapped.hint(hint); + return this; + } + + @Override + public FindIterable hintString(@Nullable final String hint) { + wrapped.hintString(hint); + return this; + } + + @Override + public FindIterable let(@Nullable final Bson variables) { + wrapped.let(variables); + return this; + } + + @Override + public FindIterable max(@Nullable final Bson max) { + wrapped.max(max); + return this; + } + + @Override + public FindIterable min(@Nullable final Bson min) { + wrapped.min(min); + return this; + } + + @Override + public FindIterable returnKey(final boolean returnKey) { + wrapped.returnKey(returnKey); + return this; + } + + @Override + public FindIterable showRecordId(final boolean showRecordId) { + wrapped.showRecordId(showRecordId); + return this; + } + + @Override + public FindIterable allowDiskUse(@Nullable final java.lang.Boolean allowDiskUse) { + wrapped.allowDiskUse(allowDiskUse); + return this; + } + + @Override + public FindIterable timeoutMode(final TimeoutMode timeoutMode) { + wrapped.timeoutMode(timeoutMode); + return this; + } + + @Override + public Document explain() { + return requireNonNull(Mono.from(wrapped.explain()).contextWrite(CONTEXT).block(TIMEOUT_DURATION)); + } + + @Override + public Document explain(final ExplainVerbosity verbosity) { + return requireNonNull(Mono.from(wrapped.explain(verbosity)).contextWrite(CONTEXT).block(TIMEOUT_DURATION)); + } + + @Override + public E explain(final Class explainResultClass) { + return requireNonNull(Mono.from(wrapped.explain(explainResultClass)).contextWrite(CONTEXT).block(TIMEOUT_DURATION)); + } + + @Override + public E explain(final Class explainResultClass, final ExplainVerbosity verbosity) { + return requireNonNull(Mono.from(wrapped.explain(explainResultClass, verbosity)).contextWrite(CONTEXT).block(TIMEOUT_DURATION)); + } +} diff --git a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncGridFSBucket.java b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncGridFSBucket.java new file mode 100644 index 00000000000..48b28e5540a --- /dev/null +++ b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncGridFSBucket.java @@ -0,0 +1,392 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.reactivestreams.client.syncadapter; + +import com.mongodb.MongoGridFSException; +import com.mongodb.ReadConcern; +import com.mongodb.ReadPreference; +import com.mongodb.WriteConcern; +import com.mongodb.client.ClientSession; +import com.mongodb.client.gridfs.GridFSBucket; +import com.mongodb.client.gridfs.GridFSDownloadStream; +import com.mongodb.client.gridfs.GridFSFindIterable; +import com.mongodb.client.gridfs.GridFSUploadStream; +import com.mongodb.client.gridfs.model.GridFSDownloadOptions; +import com.mongodb.client.gridfs.model.GridFSUploadOptions; +import com.mongodb.reactivestreams.client.gridfs.GridFSDownloadPublisher; +import com.mongodb.reactivestreams.client.gridfs.GridFSUploadPublisher; +import org.bson.BsonObjectId; +import org.bson.BsonValue; +import org.bson.conversions.Bson; +import org.bson.types.ObjectId; +import reactor.core.publisher.Flux; +import reactor.core.publisher.Mono; + +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.nio.ByteBuffer; +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.TimeUnit; + +import static com.mongodb.ClusterFixture.TIMEOUT_DURATION; +import static com.mongodb.reactivestreams.client.syncadapter.ContextHelper.CONTEXT; +import static java.util.Objects.requireNonNull; + +public class SyncGridFSBucket implements GridFSBucket { + private final com.mongodb.reactivestreams.client.gridfs.GridFSBucket wrapped; + + public SyncGridFSBucket(final com.mongodb.reactivestreams.client.gridfs.GridFSBucket wrapped) { + this.wrapped = wrapped; + } + + @Override + public String getBucketName() { + return wrapped.getBucketName(); + } + + @Override + public int getChunkSizeBytes() { + return wrapped.getChunkSizeBytes(); + } + + @Override + public WriteConcern getWriteConcern() { + return wrapped.getWriteConcern(); + } + + @Override + public ReadPreference getReadPreference() { + return wrapped.getReadPreference(); + } + + @Override + public ReadConcern getReadConcern() { + return wrapped.getReadConcern(); + } + + @Override + public Long getTimeout(final TimeUnit timeUnit) { + return wrapped.getTimeout(timeUnit); + } + + @Override + public GridFSBucket withChunkSizeBytes(final int chunkSizeBytes) { + return new SyncGridFSBucket(wrapped.withChunkSizeBytes(chunkSizeBytes)); + } + + @Override + public GridFSBucket withReadPreference(final ReadPreference readPreference) { + return new SyncGridFSBucket(wrapped.withReadPreference(readPreference)); + } + + @Override + public GridFSBucket withWriteConcern(final WriteConcern writeConcern) { + return new SyncGridFSBucket(wrapped.withWriteConcern(writeConcern)); + } + + @Override + public GridFSBucket withReadConcern(final ReadConcern readConcern) { + return new SyncGridFSBucket(wrapped.withReadConcern(readConcern)); + } + + @Override + public GridFSBucket withTimeout(final long timeout, final TimeUnit timeUnit) { + return new SyncGridFSBucket(wrapped.withTimeout(timeout, timeUnit)); + } + + @Override + public GridFSUploadStream openUploadStream(final String filename) { + return openUploadStream(filename, new GridFSUploadOptions()); + } + + @Override + public GridFSUploadStream openUploadStream(final String filename, final GridFSUploadOptions options) { + throw new UnsupportedOperationException(); + } + + @Override + public GridFSUploadStream openUploadStream(final BsonValue id, final String filename) { + return openUploadStream(id, filename, new GridFSUploadOptions()); + } + + @Override + public GridFSUploadStream openUploadStream(final BsonValue id, final String filename, final GridFSUploadOptions options) { + throw new UnsupportedOperationException(); + } + + @Override + public GridFSUploadStream openUploadStream(final ClientSession clientSession, final String filename) { + return openUploadStream(clientSession, filename, new GridFSUploadOptions()); + } + + @Override + public GridFSUploadStream openUploadStream(final ClientSession clientSession, final String filename, + final GridFSUploadOptions options) { + throw new UnsupportedOperationException(); + } + + @Override + public GridFSUploadStream openUploadStream(final ClientSession clientSession, final BsonValue id, final String filename) { + return openUploadStream(clientSession, id, filename, new GridFSUploadOptions()); + } + + @Override + public GridFSUploadStream openUploadStream(final ClientSession clientSession, final ObjectId id, final String filename) { + return openUploadStream(clientSession, new BsonObjectId(id), filename, new GridFSUploadOptions()); + } + + @Override + public GridFSUploadStream openUploadStream(final ClientSession clientSession, final BsonValue id, final String filename, + final GridFSUploadOptions options) { + throw new UnsupportedOperationException(); + } + + @Override + public ObjectId uploadFromStream(final String filename, final InputStream source) { + return uploadFromStream(filename, source, new GridFSUploadOptions()); + } + + @Override + public ObjectId uploadFromStream(final String filename, final InputStream source, final GridFSUploadOptions options) { + Flux sourceToPublisher = inputStreamToFlux(source, options); + GridFSUploadPublisher uploadPublisher = wrapped.uploadFromPublisher(filename, sourceToPublisher, options); + return requireNonNull(Mono.from(uploadPublisher).contextWrite(CONTEXT).block(TIMEOUT_DURATION)); + } + + @Override + public void uploadFromStream(final BsonValue id, final String filename, final InputStream source) { + uploadFromStream(id, filename, source, new GridFSUploadOptions()); + } + + @Override + public void uploadFromStream(final BsonValue id, final String filename, final InputStream source, final GridFSUploadOptions options) { + throw new UnsupportedOperationException(); + } + + @Override + public ObjectId uploadFromStream(final ClientSession clientSession, final String filename, final InputStream source) { + return uploadFromStream(clientSession, filename, source, new GridFSUploadOptions()); + } + + @Override + public ObjectId uploadFromStream(final ClientSession clientSession, final String filename, final InputStream source, + final GridFSUploadOptions options) { + throw new UnsupportedOperationException(); + } + + @Override + public void uploadFromStream(final ClientSession clientSession, final BsonValue id, final String filename, final InputStream source) { + uploadFromStream(clientSession, id, filename, source, new GridFSUploadOptions()); + } + + @Override + public void uploadFromStream(final ClientSession clientSession, final BsonValue id, final String filename, final InputStream source, + final GridFSUploadOptions options) { + throw new UnsupportedOperationException(); + } + + @Override + public GridFSDownloadStream openDownloadStream(final ObjectId id) { + return openDownloadStream(new BsonObjectId(id)); + } + + @Override + public GridFSDownloadStream openDownloadStream(final BsonValue id) { + return new SyncGridFSDownloadStream(wrapped.downloadToPublisher(id)); + } + + @Override + public GridFSDownloadStream openDownloadStream(final String filename) { + return openDownloadStream(filename, new GridFSDownloadOptions()); + } + + @Override + public GridFSDownloadStream openDownloadStream(final String filename, final GridFSDownloadOptions options) { + throw new UnsupportedOperationException(); + } + + @Override + public GridFSDownloadStream openDownloadStream(final ClientSession clientSession, final ObjectId id) { + return openDownloadStream(clientSession, new BsonObjectId(id)); + } + + @Override + public GridFSDownloadStream openDownloadStream(final ClientSession clientSession, final BsonValue id) { + throw new UnsupportedOperationException(); + } + + @Override + public GridFSDownloadStream openDownloadStream(final ClientSession clientSession, final String filename) { + return openDownloadStream(clientSession, filename, new GridFSDownloadOptions()); + } + + @Override + public GridFSDownloadStream openDownloadStream(final ClientSession clientSession, final String filename, + final GridFSDownloadOptions options) { + throw new UnsupportedOperationException(); + } + + @Override + public void downloadToStream(final ObjectId id, final OutputStream destination) { + downloadToStream(new BsonObjectId(id), destination); + } + + @Override + public void downloadToStream(final BsonValue id, final OutputStream destination) { + toOutputStream(wrapped.downloadToPublisher(id), destination); + } + + @Override + public void downloadToStream(final String filename, final OutputStream destination) { + downloadToStream(filename, destination, new GridFSDownloadOptions()); + } + + @Override + public void downloadToStream(final String filename, final OutputStream destination, final GridFSDownloadOptions options) { + toOutputStream(wrapped.downloadToPublisher(filename, options), destination); + } + + @Override + public void downloadToStream(final ClientSession clientSession, final ObjectId id, final OutputStream destination) { + downloadToStream(clientSession, new BsonObjectId(id), destination); + } + + @Override + public void downloadToStream(final ClientSession clientSession, final BsonValue id, final OutputStream destination) { + toOutputStream(wrapped.downloadToPublisher(unwrap(clientSession), id), destination); + } + + @Override + public void downloadToStream(final ClientSession clientSession, final String filename, final OutputStream destination) { + downloadToStream(clientSession, filename, destination, new GridFSDownloadOptions()); + } + + @Override + public void downloadToStream(final ClientSession clientSession, final String filename, final OutputStream destination, + final GridFSDownloadOptions options) { + toOutputStream(wrapped.downloadToPublisher(unwrap(clientSession), filename, options), destination); + } + + @Override + public GridFSFindIterable find() { + throw new UnsupportedOperationException(); + } + + @Override + public GridFSFindIterable find(final Bson filter) { + return new SyncGridFSFindIterable(wrapped.find(filter)); + } + + @Override + public GridFSFindIterable find(final ClientSession clientSession) { + return new SyncGridFSFindIterable(wrapped.find(unwrap(clientSession))); + } + + @Override + public GridFSFindIterable find(final ClientSession clientSession, final Bson filter) { + return new SyncGridFSFindIterable(wrapped.find(unwrap(clientSession), filter)); + } + + @Override + public void delete(final ObjectId id) { + delete(new BsonObjectId(id)); + } + + @Override + public void delete(final BsonValue id) { + Mono.from(wrapped.delete(id)).contextWrite(CONTEXT).block(TIMEOUT_DURATION); + } + + @Override + public void delete(final ClientSession clientSession, final ObjectId id) { + delete(clientSession, new BsonObjectId(id)); + } + + @Override + public void delete(final ClientSession clientSession, final BsonValue id) { + Mono.from(wrapped.delete(unwrap(clientSession), id)).contextWrite(CONTEXT).block(TIMEOUT_DURATION); + } + + @Override + public void rename(final ObjectId id, final String newFilename) { + rename(new BsonObjectId(id), newFilename); + } + + @Override + public void rename(final BsonValue id, final String newFilename) { + Mono.from(wrapped.rename(id, newFilename)).contextWrite(CONTEXT).block(TIMEOUT_DURATION); + } + + @Override + public void rename(final ClientSession clientSession, final ObjectId id, final String newFilename) { + rename(clientSession, new BsonObjectId(id), newFilename); + } + + @Override + public void rename(final ClientSession clientSession, final BsonValue id, final String newFilename) { + Mono.from(wrapped.rename(unwrap(clientSession), id, newFilename)).contextWrite(CONTEXT).block(TIMEOUT_DURATION); + } + + @Override + public void drop() { + Mono.from(wrapped.drop()) + .contextWrite(CONTEXT) + .block(TIMEOUT_DURATION); + } + + @Override + public void drop(final ClientSession clientSession) { + Mono.from(wrapped.drop()) + .contextWrite(CONTEXT) + .block(TIMEOUT_DURATION); + } + + private void toOutputStream(final GridFSDownloadPublisher downloadPublisher, final OutputStream destination) { + Flux.from(downloadPublisher).toStream().forEach(byteBuffer -> { + try { + byte[] bytes = new byte[byteBuffer.remaining()]; + byteBuffer.get(bytes); + destination.write(bytes); + } catch (IOException e) { + throw new MongoGridFSException("IOException when reading from the OutputStream", e); + } + }); + } + + @SuppressWarnings("BlockingMethodInNonBlockingContext") + private Flux inputStreamToFlux(final InputStream source, final GridFSUploadOptions options) { + List byteBuffers = new ArrayList<>(); + int chunkSize = options.getChunkSizeBytes() == null ? wrapped.getChunkSizeBytes() : options.getChunkSizeBytes(); + byte[] buffer = new byte[chunkSize]; + try { + int len; + while ((len = source.read(buffer)) != -1) { + byteBuffers.add(ByteBuffer.wrap(buffer, 0, len)); + buffer = new byte[chunkSize]; + } + return Flux.fromIterable(byteBuffers); + } catch (IOException e) { + throw new MongoGridFSException("IOException when reading from the InputStream", e); + } + } + + private com.mongodb.reactivestreams.client.ClientSession unwrap(final ClientSession clientSession) { + return ((SyncClientSession) clientSession).getWrapped(); + } +} diff --git a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncGridFSDownloadStream.java b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncGridFSDownloadStream.java new file mode 100644 index 00000000000..b3217b8f47d --- /dev/null +++ b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncGridFSDownloadStream.java @@ -0,0 +1,130 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.reactivestreams.client.syncadapter; + +import com.mongodb.MongoGridFSException; +import com.mongodb.client.gridfs.GridFSDownloadStream; +import com.mongodb.client.gridfs.model.GridFSFile; +import com.mongodb.reactivestreams.client.gridfs.GridFSDownloadPublisher; +import reactor.core.publisher.Flux; + +import java.nio.ByteBuffer; +import java.util.List; +import java.util.concurrent.atomic.AtomicBoolean; + +import static com.mongodb.ClusterFixture.TIMEOUT_DURATION; +import static com.mongodb.reactivestreams.client.syncadapter.ContextHelper.CONTEXT; +import static java.util.Objects.requireNonNull; + +public class SyncGridFSDownloadStream extends GridFSDownloadStream { + private final AtomicBoolean closed = new AtomicBoolean(false); + private ByteBuffer byteBuffer; + private final GridFSDownloadPublisher wrapped; + + public SyncGridFSDownloadStream(final GridFSDownloadPublisher publisher) { + this.wrapped = publisher; + this.byteBuffer = ByteBuffer.allocate(0); + } + + @Override + public GridFSFile getGridFSFile() { + throw new UnsupportedOperationException(); + } + + @Override + public GridFSDownloadStream batchSize(final int batchSize) { + throw new UnsupportedOperationException(); + } + + @Override + public int read() { + checkClosed(); + readAll(); + + return byteBuffer.get(); + } + + @Override + public int read(final byte[] b) { + checkClosed(); + readAll(); + int remaining = byteBuffer.remaining(); + byteBuffer.get(b); + return remaining - byteBuffer.remaining(); + } + + @Override + public int read(final byte[] b, final int off, final int len) { + checkClosed(); + readAll(); + int remaining = byteBuffer.remaining(); + byteBuffer.get(b, off, len); + return remaining - byteBuffer.remaining(); + } + + @Override + public long skip(final long n) { + checkClosed(); + readAll(); + int position = byteBuffer.position(); + long min = Math.min(position, n); + byteBuffer.position((int) min); + return min; + } + + @Override + public int available() { + checkClosed(); + readAll(); + return byteBuffer.remaining(); + } + + @Override + public void mark() { + checkClosed(); + readAll(); + byteBuffer.mark(); + } + + @Override + public void reset() { + checkClosed(); + readAll(); + byteBuffer.reset(); + } + + @Override + public void close() { + closed.set(true); + } + + private void readAll() { + List byteBuffers = requireNonNull(Flux + .from(wrapped).contextWrite(CONTEXT).collectList().block((TIMEOUT_DURATION))); + + byteBuffer = byteBuffers.stream().reduce((byteBuffer1, byteBuffer2) -> { + byteBuffer1.put(byteBuffer2); + return byteBuffer1; + }).orElseThrow(() -> new IllegalStateException("No data found")); + } + + private void checkClosed() { + if (closed.get()) { + throw new MongoGridFSException("The DownloadStream has been closed"); + } + } +} diff --git a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncGridFSFindIterable.java b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncGridFSFindIterable.java new file mode 100644 index 00000000000..1021e6bc102 --- /dev/null +++ b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncGridFSFindIterable.java @@ -0,0 +1,84 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.reactivestreams.client.syncadapter; + +import com.mongodb.client.gridfs.GridFSFindIterable; +import com.mongodb.client.gridfs.model.GridFSFile; +import com.mongodb.client.model.Collation; +import com.mongodb.lang.Nullable; +import com.mongodb.reactivestreams.client.gridfs.GridFSFindPublisher; +import org.bson.conversions.Bson; + +import java.util.concurrent.TimeUnit; + +class SyncGridFSFindIterable extends SyncMongoIterable implements GridFSFindIterable { + private final GridFSFindPublisher wrapped; + + SyncGridFSFindIterable(final GridFSFindPublisher wrapped) { + super(wrapped); + this.wrapped = wrapped; + } + + @Override + public GridFSFindIterable filter(@Nullable final Bson filter) { + wrapped.filter(filter); + return this; + } + + @Override + public GridFSFindIterable limit(final int limit) { + wrapped.limit(limit); + return this; + } + + @Override + public GridFSFindIterable skip(final int skip) { + wrapped.skip(skip); + return this; + } + + @Override + public GridFSFindIterable maxTime(final long maxTime, final TimeUnit timeUnit) { + wrapped.maxTime(maxTime, timeUnit); + return this; + } + + @Override + public GridFSFindIterable sort(@Nullable final Bson sort) { + wrapped.sort(sort); + return this; + } + + @Override + public GridFSFindIterable noCursorTimeout(final boolean noCursorTimeout) { + wrapped.noCursorTimeout(noCursorTimeout); + return this; + } + + @Override + public GridFSFindIterable batchSize(final int batchSize) { + wrapped.batchSize(batchSize); + super.batchSize(batchSize); + return this; + } + + @Override + public GridFSFindIterable collation(@Nullable final Collation collation) { + wrapped.collation(collation); + return this; + } +} diff --git a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncListCollectionNamesIterable.java b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncListCollectionNamesIterable.java new file mode 100644 index 00000000000..7a4d9481c03 --- /dev/null +++ b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncListCollectionNamesIterable.java @@ -0,0 +1,71 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.reactivestreams.client.syncadapter; + +import com.mongodb.client.ListCollectionNamesIterable; +import com.mongodb.lang.Nullable; +import com.mongodb.reactivestreams.client.ListCollectionNamesPublisher; +import org.bson.BsonValue; +import org.bson.conversions.Bson; + +import java.util.concurrent.TimeUnit; + +final class SyncListCollectionNamesIterable extends SyncMongoIterable implements ListCollectionNamesIterable { + private final ListCollectionNamesPublisher wrapped; + + SyncListCollectionNamesIterable(final ListCollectionNamesPublisher wrapped) { + super(wrapped); + this.wrapped = wrapped; + } + + @Override + public ListCollectionNamesIterable filter(@Nullable final Bson filter) { + wrapped.filter(filter); + return this; + } + + @Override + public ListCollectionNamesIterable maxTime(final long maxTime, final TimeUnit timeUnit) { + wrapped.maxTime(maxTime, timeUnit); + return this; + } + + @Override + public ListCollectionNamesIterable batchSize(final int batchSize) { + wrapped.batchSize(batchSize); + super.batchSize(batchSize); + return this; + } + + @Override + public ListCollectionNamesIterable comment(final String comment) { + wrapped.comment(comment); + return this; + } + + @Override + public ListCollectionNamesIterable comment(final BsonValue comment) { + wrapped.comment(comment); + return this; + } + + @Override + public ListCollectionNamesIterable authorizedCollections(final boolean authorizedCollections) { + wrapped.authorizedCollections(authorizedCollections); + return this; + } +} diff --git a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncListCollectionsIterable.java b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncListCollectionsIterable.java new file mode 100644 index 00000000000..48d88963077 --- /dev/null +++ b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncListCollectionsIterable.java @@ -0,0 +1,72 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.reactivestreams.client.syncadapter; + +import com.mongodb.client.ListCollectionsIterable; +import com.mongodb.client.cursor.TimeoutMode; +import com.mongodb.lang.Nullable; +import com.mongodb.reactivestreams.client.ListCollectionsPublisher; +import org.bson.BsonValue; +import org.bson.conversions.Bson; + +import java.util.concurrent.TimeUnit; + +class SyncListCollectionsIterable extends SyncMongoIterable implements ListCollectionsIterable { + private final ListCollectionsPublisher wrapped; + + SyncListCollectionsIterable(final ListCollectionsPublisher wrapped) { + super(wrapped); + this.wrapped = wrapped; + } + + @Override + public ListCollectionsIterable filter(@Nullable final Bson filter) { + wrapped.filter(filter); + return this; + } + + @Override + public ListCollectionsIterable maxTime(final long maxTime, final TimeUnit timeUnit) { + wrapped.maxTime(maxTime, timeUnit); + return this; + } + + @Override + public ListCollectionsIterable batchSize(final int batchSize) { + wrapped.batchSize(batchSize); + super.batchSize(batchSize); + return this; + } + + @Override + public ListCollectionsIterable comment(final String comment) { + wrapped.comment(comment); + return this; + } + + @Override + public ListCollectionsIterable comment(final BsonValue comment) { + wrapped.comment(comment); + return this; + } + + @Override + public ListCollectionsIterable timeoutMode(final TimeoutMode timeoutMode) { + wrapped.timeoutMode(timeoutMode); + return this; + } +} diff --git a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncListDatabasesIterable.java b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncListDatabasesIterable.java new file mode 100644 index 00000000000..4248e59c361 --- /dev/null +++ b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncListDatabasesIterable.java @@ -0,0 +1,84 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.reactivestreams.client.syncadapter; + +import com.mongodb.client.ListDatabasesIterable; +import com.mongodb.client.cursor.TimeoutMode; +import com.mongodb.lang.Nullable; +import com.mongodb.reactivestreams.client.ListDatabasesPublisher; +import org.bson.BsonValue; +import org.bson.conversions.Bson; + +import java.util.concurrent.TimeUnit; + +class SyncListDatabasesIterable extends SyncMongoIterable implements ListDatabasesIterable { + private final ListDatabasesPublisher wrapped; + + SyncListDatabasesIterable(final ListDatabasesPublisher wrapped) { + super(wrapped); + this.wrapped = wrapped; + } + + @Override + public ListDatabasesIterable maxTime(final long maxTime, final TimeUnit timeUnit) { + wrapped.maxTime(maxTime, timeUnit); + return this; + } + + @Override + public ListDatabasesIterable batchSize(final int batchSize) { + wrapped.batchSize(batchSize); + super.batchSize(batchSize); + return this; + } + + @Override + public ListDatabasesIterable filter(@Nullable final Bson filter) { + wrapped.filter(filter); + return this; + } + + @Override + public ListDatabasesIterable nameOnly(@Nullable final Boolean nameOnly) { + wrapped.nameOnly(nameOnly); + return this; + } + + @Override + public ListDatabasesIterable authorizedDatabasesOnly(@Nullable final Boolean authorizedDatabasesOnly) { + wrapped.authorizedDatabasesOnly(authorizedDatabasesOnly); + return this; + } + + @Override + public ListDatabasesIterable comment(final String comment) { + wrapped.comment(comment); + return this; + } + + @Override + public ListDatabasesIterable comment(final BsonValue comment) { + wrapped.comment(comment); + return this; + } + + @Override + public ListDatabasesIterable timeoutMode(final TimeoutMode timeoutMode) { + wrapped.timeoutMode(timeoutMode); + return this; + } +} diff --git a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncListIndexesIterable.java b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncListIndexesIterable.java new file mode 100644 index 00000000000..947cb8f0d0f --- /dev/null +++ b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncListIndexesIterable.java @@ -0,0 +1,64 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.reactivestreams.client.syncadapter; + +import com.mongodb.client.ListIndexesIterable; +import com.mongodb.client.cursor.TimeoutMode; +import com.mongodb.reactivestreams.client.ListIndexesPublisher; +import org.bson.BsonValue; + +import java.util.concurrent.TimeUnit; + +class SyncListIndexesIterable extends SyncMongoIterable implements ListIndexesIterable { + private final ListIndexesPublisher wrapped; + + SyncListIndexesIterable(final ListIndexesPublisher wrapped) { + super(wrapped); + this.wrapped = wrapped; + } + + @Override + public ListIndexesIterable maxTime(final long maxTime, final TimeUnit timeUnit) { + wrapped.maxTime(maxTime, timeUnit); + return this; + } + + @Override + public ListIndexesIterable batchSize(final int batchSize) { + wrapped.batchSize(batchSize); + super.batchSize(batchSize); + return this; + } + + @Override + public ListIndexesIterable comment(final String comment) { + wrapped.comment(comment); + return this; + } + + @Override + public ListIndexesIterable comment(final BsonValue comment) { + wrapped.comment(comment); + return this; + } + + @Override + public ListIndexesIterable timeoutMode(final TimeoutMode timeoutMode) { + wrapped.timeoutMode(timeoutMode); + return this; + } +} diff --git a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncListSearchIndexesIterable.java b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncListSearchIndexesIterable.java new file mode 100644 index 00000000000..f119c645916 --- /dev/null +++ b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncListSearchIndexesIterable.java @@ -0,0 +1,109 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.reactivestreams.client.syncadapter; + +import com.mongodb.ExplainVerbosity; +import com.mongodb.client.ListSearchIndexesIterable; +import com.mongodb.client.cursor.TimeoutMode; +import com.mongodb.client.model.Collation; +import com.mongodb.reactivestreams.client.ListSearchIndexesPublisher; +import org.bson.BsonValue; +import org.bson.Document; +import reactor.core.publisher.Mono; + +import java.util.concurrent.TimeUnit; + +import static com.mongodb.ClusterFixture.TIMEOUT_DURATION; +import static com.mongodb.reactivestreams.client.syncadapter.ContextHelper.CONTEXT; +import static java.util.Objects.requireNonNull; + +final class SyncListSearchIndexesIterable extends SyncMongoIterable implements ListSearchIndexesIterable { + private final ListSearchIndexesPublisher wrapped; + + SyncListSearchIndexesIterable(final ListSearchIndexesPublisher wrapped) { + super(wrapped); + this.wrapped = wrapped; + } + + @Override + public ListSearchIndexesIterable maxTime(final long maxTime, final TimeUnit timeUnit) { + wrapped.maxTime(maxTime, timeUnit); + return this; + } + + @Override + public ListSearchIndexesIterable collation(final Collation collation) { + wrapped.collation(collation); + return this; + } + + @Override + public ListSearchIndexesIterable name(final String indexName) { + wrapped.name(indexName); + return this; + } + + @Override + public ListSearchIndexesIterable allowDiskUse(final Boolean allowDiskUse) { + wrapped.allowDiskUse(allowDiskUse); + return this; + } + + @Override + public ListSearchIndexesIterable batchSize(final int batchSize) { + wrapped.batchSize(batchSize); + return this; + } + + @Override + public ListSearchIndexesIterable comment(final String comment) { + wrapped.comment(comment); + return this; + } + + @Override + public ListSearchIndexesIterable comment(final BsonValue comment) { + wrapped.comment(comment); + return this; + } + + @Override + public ListSearchIndexesIterable timeoutMode(final TimeoutMode timeoutMode) { + wrapped.timeoutMode(timeoutMode); + return this; + } + + @Override + public Document explain() { + return requireNonNull(Mono.from(wrapped.explain()).contextWrite(CONTEXT).block(TIMEOUT_DURATION)); + } + + @Override + public Document explain(final ExplainVerbosity verbosity) { + return requireNonNull(Mono.from(wrapped.explain(verbosity)).contextWrite(CONTEXT).block(TIMEOUT_DURATION)); + } + + @Override + public E explain(final Class explainResultClass) { + return requireNonNull(Mono.from(wrapped.explain(explainResultClass)).contextWrite(CONTEXT).block(TIMEOUT_DURATION)); + } + + @Override + public E explain(final Class explainResultClass, final ExplainVerbosity verbosity) { + return requireNonNull(Mono.from(wrapped.explain(explainResultClass, verbosity)).contextWrite(CONTEXT).block(TIMEOUT_DURATION)); + } +} diff --git a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncMapReduceIterable.java b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncMapReduceIterable.java new file mode 100644 index 00000000000..efc70b690fa --- /dev/null +++ b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncMapReduceIterable.java @@ -0,0 +1,135 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.reactivestreams.client.syncadapter; + +import com.mongodb.client.cursor.TimeoutMode; +import com.mongodb.client.model.Collation; +import com.mongodb.lang.Nullable; +import org.bson.conversions.Bson; +import reactor.core.publisher.Mono; + +import java.util.concurrent.TimeUnit; + +import static com.mongodb.ClusterFixture.TIMEOUT_DURATION; +import static com.mongodb.reactivestreams.client.syncadapter.ContextHelper.CONTEXT; + +@SuppressWarnings("deprecation") +class SyncMapReduceIterable extends SyncMongoIterable implements com.mongodb.client.MapReduceIterable { + private final com.mongodb.reactivestreams.client.MapReducePublisher wrapped; + + SyncMapReduceIterable(final com.mongodb.reactivestreams.client.MapReducePublisher wrapped) { + super(wrapped); + this.wrapped = wrapped; + } + + @Override + public void toCollection() { + Mono.from(wrapped.toCollection()).contextWrite(CONTEXT).block(TIMEOUT_DURATION); + } + + @Override + public com.mongodb.client.MapReduceIterable collectionName(final String collectionName) { + wrapped.collectionName(collectionName); + return this; + } + + @Override + public com.mongodb.client.MapReduceIterable finalizeFunction(@Nullable final String finalizeFunction) { + wrapped.finalizeFunction(finalizeFunction); + return this; + } + + @Override + public com.mongodb.client.MapReduceIterable scope(@Nullable final Bson scope) { + wrapped.scope(scope); + return this; + } + + @Override + public com.mongodb.client.MapReduceIterable sort(@Nullable final Bson sort) { + wrapped.sort(sort); + return this; + } + + @Override + public com.mongodb.client.MapReduceIterable filter(@Nullable final Bson filter) { + wrapped.filter(filter); + return this; + } + + @Override + public com.mongodb.client.MapReduceIterable limit(final int limit) { + wrapped.limit(limit); + return this; + } + + @Override + public com.mongodb.client.MapReduceIterable jsMode(final boolean jsMode) { + wrapped.jsMode(jsMode); + return this; + } + + @Override + public com.mongodb.client.MapReduceIterable verbose(final boolean verbose) { + wrapped.verbose(verbose); + return this; + } + + @Override + public com.mongodb.client.MapReduceIterable maxTime(final long maxTime, final TimeUnit timeUnit) { + wrapped.maxTime(maxTime, timeUnit); + return this; + } + + @Override + public com.mongodb.client.MapReduceIterable action(final com.mongodb.client.model.MapReduceAction action) { + wrapped.action(action); + return this; + } + + @Override + public com.mongodb.client.MapReduceIterable databaseName(@Nullable final String databaseName) { + wrapped.databaseName(databaseName); + return this; + } + + + @Override + public com.mongodb.client.MapReduceIterable batchSize(final int batchSize) { + wrapped.batchSize(batchSize); + super.batchSize(batchSize); + return this; + } + + @Override + public com.mongodb.client.MapReduceIterable bypassDocumentValidation(@Nullable final Boolean bypassDocumentValidation) { + wrapped.bypassDocumentValidation(bypassDocumentValidation); + return this; + } + + @Override + public com.mongodb.client.MapReduceIterable collation(@Nullable final Collation collation) { + wrapped.collation(collation); + return this; + } + + @Override + public com.mongodb.client.MapReduceIterable timeoutMode(final TimeoutMode timeoutMode) { + wrapped.timeoutMode(timeoutMode); + return this; + } +} diff --git a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncMongoClient.java b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncMongoClient.java new file mode 100644 index 00000000000..348bef110ff --- /dev/null +++ b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncMongoClient.java @@ -0,0 +1,379 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.reactivestreams.client.syncadapter; + +import com.mongodb.ClientBulkWriteException; +import com.mongodb.ClientSessionOptions; +import com.mongodb.MongoClientSettings; +import com.mongodb.MongoDriverInformation; +import com.mongodb.ReadConcern; +import com.mongodb.ReadPreference; +import com.mongodb.WriteConcern; +import com.mongodb.assertions.Assertions; +import com.mongodb.client.ChangeStreamIterable; +import com.mongodb.client.ClientSession; +import com.mongodb.client.ListDatabasesIterable; +import com.mongodb.client.MongoClient; +import com.mongodb.client.MongoCluster; +import com.mongodb.client.MongoDatabase; +import com.mongodb.client.MongoIterable; +import com.mongodb.client.model.bulk.ClientBulkWriteOptions; +import com.mongodb.client.model.bulk.ClientBulkWriteResult; +import com.mongodb.client.model.bulk.ClientNamespacedWriteModel; +import com.mongodb.connection.ClusterDescription; +import com.mongodb.event.ConnectionCheckedInEvent; +import com.mongodb.event.ConnectionCheckedOutEvent; +import com.mongodb.event.ConnectionPoolListener; +import com.mongodb.lang.Nullable; +import com.mongodb.reactivestreams.client.MongoClients; +import com.mongodb.reactivestreams.client.internal.BatchCursor; +import org.bson.Document; +import org.bson.codecs.configuration.CodecRegistry; +import org.bson.conversions.Bson; + +import java.util.List; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; + +import static com.mongodb.ClusterFixture.sleep; +import static java.lang.String.format; + +public class SyncMongoClient implements MongoClient { + + private static boolean waitForBatchCursorCreation; + + /** + * Unfortunately this is the only way to wait for a query to be initiated, since Reactive Streams is asynchronous + * and we have no way of knowing. Tests which require cursor initiation to complete before execution of the next operation + * can set this to a positive value. A value of 256 ms has been shown to work well. The default value is 0. + */ + public static void enableSleepAfterCursorOpen(final long sleepMS) { + SyncMongoCluster.enableSleepAfterCursorOpen(sleepMS); + } + + /** + * Unfortunately this is the only way to wait for error logic to complete, since it's asynchronous. + * This is inherently racy but there are not any other good options. Tests which require cursor error handling to complete before + * execution of the next operation can set this to a positive value. A value of 256 ms has been shown to work well. The default + * value is 0. + */ + public static void enableSleepAfterCursorError(final long sleepMS) { + SyncMongoCluster.enableSleepAfterCursorError(sleepMS); + } + + /** + * Unfortunately this is the only way to wait for close to complete, since it's asynchronous. + * This is inherently racy but there are not any other good options. Tests which require cursor cancellation to complete before + * execution of the next operation can set this to a positive value. A value of 256 ms has been shown to work well. The default + * value is 0. + */ + public static void enableSleepAfterCursorClose(final long sleepMS) { + SyncMongoCluster.enableSleepAfterCursorClose(sleepMS); + } + + /** + * Enables {@linkplain Thread#sleep(long) sleeping} in {@link SyncClientSession#close()} to wait until asynchronous closing actions + * are done. It is an attempt to make asynchronous {@link SyncMongoClient#close()} method synchronous; + * the attempt is racy and incorrect, but good enough for tests given that no other approach is available. + */ + public static void enableSleepAfterSessionClose(final long sleepMS) { + SyncMongoCluster.enableSleepAfterSessionClose(sleepMS); + } + + /** + * Enables behavior for waiting until a reactive {@link BatchCursor} is created. + *

+ * When enabled, {@link SyncMongoCursor} allows intercepting the result of the cursor creation process. + * If the creation fails, the resulting exception will be propagated; if successful, the + * process will proceed to issue getMore commands. + *

+ * NOTE: Do not enable when multiple cursors are being iterated concurrently. + */ + public static void enableWaitForBatchCursorCreation() { + waitForBatchCursorCreation = true; + } + + public static boolean isWaitForBatchCursorCreationEnabled() { + return waitForBatchCursorCreation; + } + + public static void disableWaitForBatchCursorCreation() { + waitForBatchCursorCreation = false; + } + + public static void disableSleep() { + SyncMongoCluster.disableSleep(); + } + + public static long getSleepAfterCursorOpen() { + return SyncMongoCluster.getSleepAfterCursorOpen(); + } + + public static long getSleepAfterCursorError() { + return SyncMongoCluster.getSleepAfterCursorError(); + } + + public static long getSleepAfterCursorClose() { + return SyncMongoCluster.getSleepAfterCursorClose(); + } + + public static long getSleepAfterSessionClose() { + return SyncMongoCluster.getSleepAfterSessionClose(); + } + + private final com.mongodb.reactivestreams.client.MongoClient wrapped; + private final SyncMongoCluster delegate; + private final ConnectionPoolCounter connectionPoolCounter; + + public SyncMongoClient(final MongoClientSettings settings) { + this(settings, null); + } + + public SyncMongoClient(final MongoClientSettings settings, @Nullable final MongoDriverInformation mongoDriverInformation) { + this(MongoClientSettings.builder(settings), mongoDriverInformation); + } + + public SyncMongoClient(final MongoClientSettings.Builder builder) { + this(builder, null); + } + + public SyncMongoClient(final MongoClientSettings.Builder builder, @Nullable final MongoDriverInformation mongoDriverInformation) { + this.connectionPoolCounter = new ConnectionPoolCounter(); + builder.applyToConnectionPoolSettings(b -> b.addConnectionPoolListener(connectionPoolCounter)); + this.wrapped = MongoClients.create(builder.build(), mongoDriverInformation); + this.delegate = new SyncMongoCluster(wrapped); + } + + public com.mongodb.reactivestreams.client.MongoClient getWrapped() { + return wrapped; + } + + @Override + public CodecRegistry getCodecRegistry() { + return delegate.getCodecRegistry(); + } + + @Override + public ReadPreference getReadPreference() { + return delegate.getReadPreference(); + } + + @Override + public WriteConcern getWriteConcern() { + return delegate.getWriteConcern(); + } + + @Override + public ReadConcern getReadConcern() { + return delegate.getReadConcern(); + } + + @Override + public Long getTimeout(final TimeUnit timeUnit) { + return delegate.getTimeout(timeUnit); + } + + @Override + public MongoCluster withCodecRegistry(final CodecRegistry codecRegistry) { + return delegate.withCodecRegistry(codecRegistry); + } + + @Override + public MongoCluster withReadPreference(final ReadPreference readPreference) { + return delegate.withReadPreference(readPreference); + } + + @Override + public MongoCluster withWriteConcern(final WriteConcern writeConcern) { + return delegate.withWriteConcern(writeConcern); + } + + @Override + public MongoCluster withReadConcern(final ReadConcern readConcern) { + return delegate.withReadConcern(readConcern); + } + + @Override + public MongoCluster withTimeout(final long timeout, final TimeUnit timeUnit) { + return delegate.withTimeout(timeout, timeUnit); + } + + @Override + public MongoDatabase getDatabase(final String databaseName) { + return delegate.getDatabase(databaseName); + } + + @Override + public ClientSession startSession() { + return delegate.startSession(); + } + + @Override + public ClientSession startSession(final ClientSessionOptions options) { + return delegate.startSession(options); + } + + @Override + public MongoIterable listDatabaseNames() { + return delegate.listDatabaseNames(); + } + + @Override + public MongoIterable listDatabaseNames(final ClientSession clientSession) { + return delegate.listDatabaseNames(clientSession); + } + + + @Override + public ListDatabasesIterable listDatabases() { + return delegate.listDatabases(); + } + + @Override + public ListDatabasesIterable listDatabases(final ClientSession clientSession) { + return delegate.listDatabases(clientSession); + } + + @Override + public ListDatabasesIterable listDatabases(final Class resultClass) { + return delegate.listDatabases(resultClass); + } + + @Override + public ListDatabasesIterable listDatabases(final ClientSession clientSession, final Class resultClass) { + return delegate.listDatabases(clientSession, resultClass); + } + + @Override + public ChangeStreamIterable watch() { + return delegate.watch(); + } + + @Override + public ChangeStreamIterable watch(final Class resultClass) { + return delegate.watch(resultClass); + } + + @Override + public ChangeStreamIterable watch(final List pipeline) { + return delegate.watch(pipeline); + } + + @Override + public ChangeStreamIterable watch(final List pipeline, final Class resultClass) { + return delegate.watch(pipeline, resultClass); + } + + @Override + public ChangeStreamIterable watch(final ClientSession clientSession) { + return delegate.watch(clientSession); + } + + @Override + public ChangeStreamIterable watch(final ClientSession clientSession, final Class resultClass) { + return delegate.watch(clientSession, resultClass); + } + + @Override + public ChangeStreamIterable watch(final ClientSession clientSession, final List pipeline) { + return delegate.watch(clientSession, pipeline); + } + + @Override + public ChangeStreamIterable watch( + final ClientSession clientSession, final List pipeline, final Class resultClass) { + return delegate.watch(clientSession, pipeline, resultClass); + } + + @Override + public void close() { + wrapped.close(); + connectionPoolCounter.assertConnectionsClosed(); + } + + + @Override + public ClientBulkWriteResult bulkWrite( + final List clientWriteModels) throws ClientBulkWriteException { + return delegate.bulkWrite(clientWriteModels); + } + + @Override + public ClientBulkWriteResult bulkWrite( + final List clientWriteModels, + final ClientBulkWriteOptions options) throws ClientBulkWriteException { + return delegate.bulkWrite(clientWriteModels, options); + } + + @Override + public ClientBulkWriteResult bulkWrite( + final ClientSession clientSession, + final List clientWriteModels) throws ClientBulkWriteException { + return delegate.bulkWrite(clientSession, clientWriteModels); + } + + @Override + public ClientBulkWriteResult bulkWrite( + final ClientSession clientSession, + final List clientWriteModels, + final ClientBulkWriteOptions options) throws ClientBulkWriteException { + return delegate.bulkWrite(clientSession, clientWriteModels, options); + } + + @Override + public ClusterDescription getClusterDescription() { + return wrapped.getClusterDescription(); + } + + @Override + public void appendMetadata(final MongoDriverInformation mongoDriverInformation) { + wrapped.appendMetadata(mongoDriverInformation); + } + + static class ConnectionPoolCounter implements ConnectionPoolListener { + private final AtomicInteger activeConnections = new AtomicInteger(0); + + @Override + public void connectionCheckedOut(final ConnectionCheckedOutEvent event) { + activeConnections.incrementAndGet(); + } + + @Override + public void connectionCheckedIn(final ConnectionCheckedInEvent event) { + activeConnections.decrementAndGet(); + } + + protected void assertConnectionsClosed() { + int activeConnectionsCount = activeConnections.get(); + boolean connectionsClosed = activeConnectionsCount == 0; + int counter = 0; + while (counter < 10 && !connectionsClosed) { + activeConnectionsCount = activeConnections.get(); + connectionsClosed = activeConnectionsCount == 0; + if (!connectionsClosed) { + sleep(200); + counter++; + } + } + + Assertions.assertTrue(activeConnectionsCount == 0, + format("Expected all connections to be closed after closing the client. %n" + + "The connection pool listener reports '%d' open connections.", activeConnectionsCount)); + + } + } +} diff --git a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncMongoCluster.java b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncMongoCluster.java new file mode 100644 index 00000000000..fc3cad4b6a7 --- /dev/null +++ b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncMongoCluster.java @@ -0,0 +1,318 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.reactivestreams.client.syncadapter; + +import com.mongodb.ClientBulkWriteException; +import com.mongodb.ClientSessionOptions; +import com.mongodb.ReadConcern; +import com.mongodb.ReadPreference; +import com.mongodb.WriteConcern; +import com.mongodb.client.ChangeStreamIterable; +import com.mongodb.client.ClientSession; +import com.mongodb.client.ListDatabasesIterable; +import com.mongodb.client.MongoCluster; +import com.mongodb.client.MongoDatabase; +import com.mongodb.client.MongoIterable; +import com.mongodb.client.model.bulk.ClientBulkWriteOptions; +import com.mongodb.client.model.bulk.ClientBulkWriteResult; +import com.mongodb.client.model.bulk.ClientNamespacedWriteModel; +import org.bson.BsonDocument; +import org.bson.Document; +import org.bson.codecs.configuration.CodecRegistry; +import org.bson.conversions.Bson; +import reactor.core.publisher.Mono; + +import java.util.List; +import java.util.concurrent.TimeUnit; + +import static com.mongodb.ClusterFixture.TIMEOUT_DURATION; +import static com.mongodb.reactivestreams.client.syncadapter.ContextHelper.CONTEXT; +import static java.util.Objects.requireNonNull; + +public class SyncMongoCluster implements MongoCluster { + + private static long sleepAfterCursorOpenMS; + private static long sleepAfterCursorErrorMS; + private static long sleepAfterCursorCloseMS; + private static long sleepAfterSessionCloseMS; + + /** + * Unfortunately this is the only way to wait for a query to be initiated, since Reactive Streams is asynchronous + * and we have no way of knowing. Tests which require cursor initiation to complete before execution of the next operation + * can set this to a positive value. A value of 256 ms has been shown to work well. The default value is 0. + */ + public static void enableSleepAfterCursorOpen(final long sleepMS) { + if (sleepAfterCursorOpenMS != 0) { + throw new IllegalStateException("Already enabled"); + } + if (sleepMS <= 0) { + throw new IllegalArgumentException("sleepMS must be a positive value"); + } + sleepAfterCursorOpenMS = sleepMS; + } + + /** + * Unfortunately this is the only way to wait for error logic to complete, since it's asynchronous. + * This is inherently racy but there are not any other good options. Tests which require cursor error handling to complete before + * execution of the next operation can set this to a positive value. A value of 256 ms has been shown to work well. The default + * value is 0. + */ + public static void enableSleepAfterCursorError(final long sleepMS) { + if (sleepAfterCursorErrorMS != 0) { + throw new IllegalStateException("Already enabled"); + } + if (sleepMS <= 0) { + throw new IllegalArgumentException("sleepMS must be a positive value"); + } + sleepAfterCursorErrorMS = sleepMS; + } + + /** + * Unfortunately this is the only way to wait for close to complete, since it's asynchronous. + * This is inherently racy but there are not any other good options. Tests which require cursor cancellation to complete before + * execution of the next operation can set this to a positive value. A value of 256 ms has been shown to work well. The default + * value is 0. + */ + public static void enableSleepAfterCursorClose(final long sleepMS) { + if (sleepAfterCursorCloseMS != 0) { + throw new IllegalStateException("Already enabled"); + } + if (sleepMS <= 0) { + throw new IllegalArgumentException("sleepMS must be a positive value"); + } + sleepAfterCursorCloseMS = sleepMS; + } + + + /** + * Enables {@linkplain Thread#sleep(long) sleeping} in {@link SyncClientSession#close()} to wait until asynchronous closing actions + * are done. It is an attempt to make asynchronous {@link SyncMongoClient#close()} method synchronous; + * the attempt is racy and incorrect, but good enough for tests given that no other approach is available. + */ + public static void enableSleepAfterSessionClose(final long sleepMS) { + if (sleepAfterSessionCloseMS != 0) { + throw new IllegalStateException("Already enabled"); + } + if (sleepMS <= 0) { + throw new IllegalArgumentException("sleepMS must be a positive value"); + } + sleepAfterSessionCloseMS = sleepMS; + } + + public static void disableSleep() { + sleepAfterCursorOpenMS = 0; + sleepAfterCursorErrorMS = 0; + sleepAfterCursorCloseMS = 0; + sleepAfterSessionCloseMS = 0; + } + + public static long getSleepAfterCursorOpen() { + return sleepAfterCursorOpenMS; + } + + public static long getSleepAfterCursorError() { + return sleepAfterCursorErrorMS; + } + + public static long getSleepAfterCursorClose() { + return sleepAfterCursorCloseMS; + } + + public static long getSleepAfterSessionClose() { + return sleepAfterSessionCloseMS; + } + + private final com.mongodb.reactivestreams.client.MongoCluster wrapped; + + public SyncMongoCluster(final com.mongodb.reactivestreams.client.MongoCluster wrapped) { + this.wrapped = wrapped; + } + + public com.mongodb.reactivestreams.client.MongoCluster getWrapped() { + return wrapped; + } + + @Override + public CodecRegistry getCodecRegistry() { + return wrapped.getCodecRegistry(); + } + + @Override + public ReadPreference getReadPreference() { + return wrapped.getReadPreference(); + } + + @Override + public WriteConcern getWriteConcern() { + return wrapped.getWriteConcern(); + } + + @Override + public ReadConcern getReadConcern() { + return wrapped.getReadConcern(); + } + + @Override + public Long getTimeout(final TimeUnit timeUnit) { + return wrapped.getTimeout(timeUnit); + } + + @Override + public MongoCluster withCodecRegistry(final CodecRegistry codecRegistry) { + return new SyncMongoCluster(wrapped.withCodecRegistry(codecRegistry)); + } + + @Override + public MongoCluster withReadPreference(final ReadPreference readPreference) { + return new SyncMongoCluster(wrapped.withReadPreference(readPreference)); + } + + @Override + public MongoCluster withWriteConcern(final WriteConcern writeConcern) { + return new SyncMongoCluster(wrapped.withWriteConcern(writeConcern)); + } + + @Override + public MongoCluster withReadConcern(final ReadConcern readConcern) { + return new SyncMongoCluster(wrapped.withReadConcern(readConcern)); + } + + @Override + public MongoCluster withTimeout(final long timeout, final TimeUnit timeUnit) { + return new SyncMongoCluster(wrapped.withTimeout(timeout, timeUnit)); + } + + @Override + public MongoDatabase getDatabase(final String databaseName) { + return new SyncMongoDatabase(wrapped.getDatabase(databaseName)); + } + + @Override + public ClientSession startSession() { + return new SyncClientSession(requireNonNull(Mono.from(wrapped.startSession()).contextWrite(CONTEXT).block(TIMEOUT_DURATION)), this); + } + + @Override + public ClientSession startSession(final ClientSessionOptions options) { + return new SyncClientSession(requireNonNull(Mono.from(wrapped.startSession(options)).contextWrite(CONTEXT).block(TIMEOUT_DURATION)), this); + } + + @Override + public MongoIterable listDatabaseNames() { + return listDatabases(BsonDocument.class).nameOnly(true).map(result -> result.getString("name").getValue()); + } + + @Override + public MongoIterable listDatabaseNames(final ClientSession clientSession) { + return listDatabases(clientSession, BsonDocument.class).nameOnly(true).map(result -> result.getString("name").getValue()); + } + + @Override + public ListDatabasesIterable listDatabases() { + return new SyncListDatabasesIterable<>(wrapped.listDatabases()); + } + + @Override + public ListDatabasesIterable listDatabases(final ClientSession clientSession) { + return listDatabases(clientSession, Document.class); + } + + @Override + public ListDatabasesIterable listDatabases(final Class resultClass) { + return new SyncListDatabasesIterable<>(wrapped.listDatabases(resultClass)); + } + + @Override + public ListDatabasesIterable listDatabases(final ClientSession clientSession, final Class resultClass) { + return new SyncListDatabasesIterable<>(wrapped.listDatabases(unwrap(clientSession), resultClass)); + } + + @Override + public ChangeStreamIterable watch() { + return new SyncChangeStreamIterable<>(wrapped.watch()); + } + + @Override + public ChangeStreamIterable watch(final Class resultClass) { + return new SyncChangeStreamIterable<>(wrapped.watch(resultClass)); + } + + @Override + public ChangeStreamIterable watch(final List pipeline) { + return new SyncChangeStreamIterable<>(wrapped.watch(pipeline)); + } + + @Override + public ChangeStreamIterable watch(final List pipeline, final Class resultClass) { + return new SyncChangeStreamIterable<>(wrapped.watch(pipeline, resultClass)); + } + + @Override + public ChangeStreamIterable watch(final ClientSession clientSession) { + return new SyncChangeStreamIterable<>(wrapped.watch(unwrap(clientSession))); + } + + @Override + public ChangeStreamIterable watch(final ClientSession clientSession, final Class resultClass) { + return new SyncChangeStreamIterable<>(wrapped.watch(unwrap(clientSession), resultClass)); + } + + @Override + public ChangeStreamIterable watch(final ClientSession clientSession, final List pipeline) { + return new SyncChangeStreamIterable<>(wrapped.watch(unwrap(clientSession), pipeline)); + } + + @Override + public ChangeStreamIterable watch(final ClientSession clientSession, final List pipeline, + final Class resultClass) { + return new SyncChangeStreamIterable<>(wrapped.watch(unwrap(clientSession), pipeline, resultClass)); + } + + @Override + public ClientBulkWriteResult bulkWrite( + final List clientWriteModels) throws ClientBulkWriteException { + return requireNonNull(Mono.from(wrapped.bulkWrite(clientWriteModels)).contextWrite(CONTEXT).block(TIMEOUT_DURATION)); + } + + @Override + public ClientBulkWriteResult bulkWrite( + final List clientWriteModels, + final ClientBulkWriteOptions options) throws ClientBulkWriteException { + return requireNonNull(Mono.from(wrapped.bulkWrite(clientWriteModels, options)).contextWrite(CONTEXT).block(TIMEOUT_DURATION)); + } + + @Override + public ClientBulkWriteResult bulkWrite( + final ClientSession clientSession, + final List clientWriteModels) throws ClientBulkWriteException { + return requireNonNull( + Mono.from(wrapped.bulkWrite(unwrap(clientSession), clientWriteModels)).contextWrite(CONTEXT).block(TIMEOUT_DURATION)); + } + + @Override + public ClientBulkWriteResult bulkWrite( + final ClientSession clientSession, + final List clientWriteModels, + final ClientBulkWriteOptions options) throws ClientBulkWriteException { + return requireNonNull(Mono.from(wrapped.bulkWrite(unwrap(clientSession), clientWriteModels, options)).contextWrite(CONTEXT) + .block(TIMEOUT_DURATION)); + } + + private com.mongodb.reactivestreams.client.ClientSession unwrap(final ClientSession clientSession) { + return ((SyncClientSession) clientSession).getWrapped(); + } +} diff --git a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncMongoCollection.java b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncMongoCollection.java new file mode 100644 index 00000000000..922e07cc2d5 --- /dev/null +++ b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncMongoCollection.java @@ -0,0 +1,849 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.reactivestreams.client.syncadapter; + +import com.mongodb.MongoNamespace; +import com.mongodb.ReadConcern; +import com.mongodb.ReadPreference; +import com.mongodb.WriteConcern; +import com.mongodb.bulk.BulkWriteResult; +import com.mongodb.client.AggregateIterable; +import com.mongodb.client.ChangeStreamIterable; +import com.mongodb.client.ClientSession; +import com.mongodb.client.DistinctIterable; +import com.mongodb.client.FindIterable; +import com.mongodb.client.ListIndexesIterable; +import com.mongodb.client.ListSearchIndexesIterable; +import com.mongodb.client.MongoCollection; +import com.mongodb.client.model.BulkWriteOptions; +import com.mongodb.client.model.CountOptions; +import com.mongodb.client.model.CreateIndexOptions; +import com.mongodb.client.model.DeleteOptions; +import com.mongodb.client.model.DropCollectionOptions; +import com.mongodb.client.model.DropIndexOptions; +import com.mongodb.client.model.EstimatedDocumentCountOptions; +import com.mongodb.client.model.FindOneAndDeleteOptions; +import com.mongodb.client.model.FindOneAndReplaceOptions; +import com.mongodb.client.model.FindOneAndUpdateOptions; +import com.mongodb.client.model.IndexModel; +import com.mongodb.client.model.IndexOptions; +import com.mongodb.client.model.InsertManyOptions; +import com.mongodb.client.model.InsertOneOptions; +import com.mongodb.client.model.RenameCollectionOptions; +import com.mongodb.client.model.ReplaceOptions; +import com.mongodb.client.model.SearchIndexModel; +import com.mongodb.client.model.UpdateOptions; +import com.mongodb.client.model.WriteModel; +import com.mongodb.client.result.DeleteResult; +import com.mongodb.client.result.InsertManyResult; +import com.mongodb.client.result.InsertOneResult; +import com.mongodb.client.result.UpdateResult; +import org.bson.Document; +import org.bson.codecs.configuration.CodecRegistry; +import org.bson.conversions.Bson; +import reactor.core.publisher.Flux; +import reactor.core.publisher.Mono; + +import java.util.List; +import java.util.concurrent.TimeUnit; + +import static com.mongodb.ClusterFixture.TIMEOUT_DURATION; +import static com.mongodb.reactivestreams.client.syncadapter.ContextHelper.CONTEXT; +import static java.util.Objects.requireNonNull; + +class SyncMongoCollection implements MongoCollection { + + private final com.mongodb.reactivestreams.client.MongoCollection wrapped; + + SyncMongoCollection(final com.mongodb.reactivestreams.client.MongoCollection wrapped) { + this.wrapped = wrapped; + } + + @Override + public MongoNamespace getNamespace() { + return wrapped.getNamespace(); + } + + @Override + public Class getDocumentClass() { + return wrapped.getDocumentClass(); + } + + @Override + public CodecRegistry getCodecRegistry() { + return wrapped.getCodecRegistry(); + } + + @Override + public ReadPreference getReadPreference() { + return wrapped.getReadPreference(); + } + + @Override + public WriteConcern getWriteConcern() { + return wrapped.getWriteConcern(); + } + + @Override + public ReadConcern getReadConcern() { + return wrapped.getReadConcern(); + } + + @Override + public Long getTimeout(final TimeUnit timeUnit) { + return wrapped.getTimeout(timeUnit); + } + + @Override + public MongoCollection withDocumentClass(final Class clazz) { + return new SyncMongoCollection<>(wrapped.withDocumentClass(clazz)); + } + + @Override + public MongoCollection withCodecRegistry(final CodecRegistry codecRegistry) { + return new SyncMongoCollection<>(wrapped.withCodecRegistry(codecRegistry)); + } + + @Override + public MongoCollection withReadPreference(final ReadPreference readPreference) { + return new SyncMongoCollection<>(wrapped.withReadPreference(readPreference)); + } + + @Override + public MongoCollection withWriteConcern(final WriteConcern writeConcern) { + return new SyncMongoCollection<>(wrapped.withWriteConcern(writeConcern)); + } + + @Override + public MongoCollection withReadConcern(final ReadConcern readConcern) { + return new SyncMongoCollection<>(wrapped.withReadConcern(readConcern)); + } + + @Override + public MongoCollection withTimeout(final long timeout, final TimeUnit timeUnit) { + return new SyncMongoCollection<>(wrapped.withTimeout(timeout, timeUnit)); + } + + @Override + public long countDocuments() { + return requireNonNull(Mono.from(wrapped.countDocuments()).contextWrite(CONTEXT).block(TIMEOUT_DURATION)); + } + + @Override + public long countDocuments(final Bson filter) { + return requireNonNull(Mono.from(wrapped.countDocuments(filter)).contextWrite(CONTEXT).block(TIMEOUT_DURATION)); + } + + @Override + public long countDocuments(final Bson filter, final CountOptions options) { + return requireNonNull(Mono.from(wrapped.countDocuments(filter, options)).contextWrite(CONTEXT).block(TIMEOUT_DURATION)); + } + + @Override + public long countDocuments(final ClientSession clientSession) { + return requireNonNull(Mono.from(wrapped.countDocuments(unwrap(clientSession))).contextWrite(CONTEXT).block(TIMEOUT_DURATION)); + } + + @Override + public long countDocuments(final ClientSession clientSession, final Bson filter) { + return requireNonNull(Mono.from(wrapped.countDocuments(unwrap(clientSession), filter)).contextWrite(CONTEXT).block(TIMEOUT_DURATION)); + } + + @Override + public long countDocuments(final ClientSession clientSession, final Bson filter, final CountOptions options) { + return requireNonNull(Mono.from(wrapped.countDocuments(unwrap(clientSession), filter, options)).contextWrite(CONTEXT).block(TIMEOUT_DURATION)); + } + + @Override + public long estimatedDocumentCount() { + return requireNonNull(Mono.from(wrapped.estimatedDocumentCount()).contextWrite(CONTEXT).block(TIMEOUT_DURATION)); + } + + @Override + public long estimatedDocumentCount(final EstimatedDocumentCountOptions options) { + return requireNonNull(Mono.from(wrapped.estimatedDocumentCount(options)).contextWrite(CONTEXT).block(TIMEOUT_DURATION)); + } + + @Override + public DistinctIterable distinct(final String fieldName, final Class resultClass) { + return new SyncDistinctIterable<>(wrapped.distinct(fieldName, resultClass)); + } + + @Override + public DistinctIterable distinct(final String fieldName, final Bson filter, final Class resultClass) { + return new SyncDistinctIterable<>(wrapped.distinct(fieldName, filter, resultClass)); + } + + @Override + public DistinctIterable distinct( + final ClientSession clientSession, final String fieldName, + final Class resultClass) { + return new SyncDistinctIterable<>(wrapped.distinct(unwrap(clientSession), fieldName, resultClass)); + } + + @Override + public DistinctIterable distinct( + final ClientSession clientSession, final String fieldName, final Bson filter, + final Class resultClass) { + return new SyncDistinctIterable<>(wrapped.distinct(unwrap(clientSession), fieldName, filter, resultClass)); + } + + @Override + public FindIterable find() { + return new SyncFindIterable<>(wrapped.find()); + } + + @Override + public FindIterable find(final Class resultClass) { + return new SyncFindIterable<>(wrapped.find(resultClass)); + } + + @Override + public FindIterable find(final Bson filter) { + return new SyncFindIterable<>(wrapped.find(filter)); + } + + @Override + public FindIterable find(final Bson filter, final Class resultClass) { + return new SyncFindIterable<>(wrapped.find(filter, resultClass)); + } + + @Override + public FindIterable find(final ClientSession clientSession) { + return new SyncFindIterable<>(wrapped.find(unwrap(clientSession))); + } + + @Override + public FindIterable find(final ClientSession clientSession, final Class resultClass) { + return new SyncFindIterable<>(wrapped.find(unwrap(clientSession), resultClass)); + } + + @Override + public FindIterable find(final ClientSession clientSession, final Bson filter) { + return new SyncFindIterable<>(wrapped.find(unwrap(clientSession), filter)); + } + + @Override + public FindIterable find(final ClientSession clientSession, final Bson filter, final Class resultClass) { + return new SyncFindIterable<>(wrapped.find(unwrap(clientSession), filter, resultClass)); + } + + @Override + public AggregateIterable aggregate(final List pipeline) { + return new SyncAggregateIterable<>(wrapped.aggregate(pipeline, wrapped.getDocumentClass())); + } + + @Override + public AggregateIterable aggregate(final List pipeline, final Class resultClass) { + return new SyncAggregateIterable<>(wrapped.aggregate(pipeline, resultClass)); + } + + @Override + public AggregateIterable aggregate(final ClientSession clientSession, final List pipeline) { + return new SyncAggregateIterable<>(wrapped.aggregate(unwrap(clientSession), pipeline, wrapped.getDocumentClass())); + } + + @Override + public AggregateIterable aggregate( + final ClientSession clientSession, final List pipeline, + final Class resultClass) { + return new SyncAggregateIterable<>(wrapped.aggregate(unwrap(clientSession), pipeline, resultClass)); + } + + @Override + public ChangeStreamIterable watch() { + return new SyncChangeStreamIterable<>(wrapped.watch(wrapped.getDocumentClass())); + } + + @Override + public ChangeStreamIterable watch(final Class resultClass) { + return new SyncChangeStreamIterable<>(wrapped.watch(resultClass)); + } + + @Override + public ChangeStreamIterable watch(final List pipeline) { + return new SyncChangeStreamIterable<>(wrapped.watch(pipeline, wrapped.getDocumentClass())); + } + + @Override + public ChangeStreamIterable watch(final List pipeline, final Class resultClass) { + return new SyncChangeStreamIterable<>(wrapped.watch(pipeline, resultClass)); + } + + @Override + public ChangeStreamIterable watch(final ClientSession clientSession) { + return new SyncChangeStreamIterable<>(wrapped.watch(unwrap(clientSession), wrapped.getDocumentClass())); + } + + @Override + public ChangeStreamIterable watch(final ClientSession clientSession, final Class resultClass) { + return new SyncChangeStreamIterable<>(wrapped.watch(unwrap(clientSession), resultClass)); + } + + @Override + public ChangeStreamIterable watch(final ClientSession clientSession, final List pipeline) { + // API inconsistency + return new SyncChangeStreamIterable<>(wrapped.watch(unwrap(clientSession), pipeline, wrapped.getDocumentClass())); + } + + @Override + public ChangeStreamIterable watch( + final ClientSession clientSession, final List pipeline, + final Class resultClass) { + return new SyncChangeStreamIterable<>(wrapped.watch(unwrap(clientSession), pipeline, resultClass)); + } + + @Override + @SuppressWarnings("deprecation") + public com.mongodb.client.MapReduceIterable mapReduce(final String mapFunction, final String reduceFunction) { + return new SyncMapReduceIterable<>(wrapped.mapReduce(mapFunction, reduceFunction, wrapped.getDocumentClass())); + } + + @Override + @SuppressWarnings("deprecation") + public com.mongodb.client.MapReduceIterable mapReduce( + final String mapFunction, final String reduceFunction, + final Class resultClass) { + return new SyncMapReduceIterable<>(wrapped.mapReduce(mapFunction, reduceFunction, resultClass)); + } + + @Override + @SuppressWarnings("deprecation") + public com.mongodb.client.MapReduceIterable mapReduce(final ClientSession clientSession, final String mapFunction, + final String reduceFunction) { + return new SyncMapReduceIterable<>(wrapped.mapReduce(unwrap(clientSession), mapFunction, reduceFunction, + wrapped.getDocumentClass())); + } + + @Override + @SuppressWarnings("deprecation") + public com.mongodb.client.MapReduceIterable mapReduce( + final ClientSession clientSession, final String mapFunction, + final String reduceFunction, final Class resultClass) { + return new SyncMapReduceIterable<>(wrapped.mapReduce(unwrap(clientSession), mapFunction, reduceFunction, resultClass)); + } + + @Override + public BulkWriteResult bulkWrite(final List> requests) { + return requireNonNull(Mono.from(wrapped.bulkWrite(requests)).contextWrite(CONTEXT).block(TIMEOUT_DURATION)); + } + + @Override + public BulkWriteResult bulkWrite(final List> requests, final BulkWriteOptions options) { + return requireNonNull(Mono.from(wrapped.bulkWrite(requests, options)).contextWrite(CONTEXT).block(TIMEOUT_DURATION)); + } + + @Override + public BulkWriteResult bulkWrite(final ClientSession clientSession, final List> requests) { + return requireNonNull(Mono.from(wrapped.bulkWrite(unwrap(clientSession), requests)).contextWrite(CONTEXT).block(TIMEOUT_DURATION)); + } + + @Override + public BulkWriteResult bulkWrite( + final ClientSession clientSession, final List> requests, + final BulkWriteOptions options) { + return requireNonNull(Mono.from(wrapped.bulkWrite(unwrap(clientSession), requests, options)).contextWrite(CONTEXT).block(TIMEOUT_DURATION)); + } + + @Override + public InsertOneResult insertOne(final T t) { + return requireNonNull(Mono.from(wrapped.insertOne(t)).contextWrite(CONTEXT).block(TIMEOUT_DURATION)); + } + + @Override + public InsertOneResult insertOne(final T t, final InsertOneOptions options) { + return requireNonNull(Mono.from(wrapped.insertOne(t, options)).contextWrite(CONTEXT).block(TIMEOUT_DURATION)); + } + + @Override + public InsertOneResult insertOne(final ClientSession clientSession, final T t) { + return requireNonNull(Mono.from(wrapped.insertOne(unwrap(clientSession), t)).contextWrite(CONTEXT).block(TIMEOUT_DURATION)); + } + + @Override + public InsertOneResult insertOne(final ClientSession clientSession, final T t, final InsertOneOptions options) { + return requireNonNull(Mono.from(wrapped.insertOne(unwrap(clientSession), t, options)).contextWrite(CONTEXT).block(TIMEOUT_DURATION)); + } + + @Override + public InsertManyResult insertMany(final List documents) { + return requireNonNull(Mono.from(wrapped.insertMany(documents)).contextWrite(CONTEXT).block(TIMEOUT_DURATION)); + } + + @Override + public InsertManyResult insertMany(final List documents, final InsertManyOptions options) { + return requireNonNull(Mono.from(wrapped.insertMany(documents, options)).contextWrite(CONTEXT).block(TIMEOUT_DURATION)); + } + + @Override + public InsertManyResult insertMany(final ClientSession clientSession, final List documents) { + return requireNonNull(Mono.from(wrapped.insertMany(unwrap(clientSession), documents)).contextWrite(CONTEXT).block(TIMEOUT_DURATION)); + } + + @Override + public InsertManyResult insertMany( + final ClientSession clientSession, final List documents, + final InsertManyOptions options) { + return requireNonNull(Mono.from(wrapped.insertMany(unwrap(clientSession), documents, options)).contextWrite(CONTEXT).block(TIMEOUT_DURATION)); + } + + @Override + public DeleteResult deleteOne(final Bson filter) { + return requireNonNull(Mono.from(wrapped.deleteOne(filter)).contextWrite(CONTEXT).block(TIMEOUT_DURATION)); + } + + @Override + public DeleteResult deleteOne(final Bson filter, final DeleteOptions options) { + return requireNonNull(Mono.from(wrapped.deleteOne(filter, options)).contextWrite(CONTEXT).block(TIMEOUT_DURATION)); + } + + @Override + public DeleteResult deleteOne(final ClientSession clientSession, final Bson filter) { + return requireNonNull(Mono.from(wrapped.deleteOne(unwrap(clientSession), filter)).contextWrite(CONTEXT).block(TIMEOUT_DURATION)); + } + + @Override + public DeleteResult deleteOne(final ClientSession clientSession, final Bson filter, final DeleteOptions options) { + return requireNonNull(Mono.from(wrapped.deleteOne(unwrap(clientSession), filter, options)).contextWrite(CONTEXT).block(TIMEOUT_DURATION)); + } + + @Override + public DeleteResult deleteMany(final Bson filter) { + return requireNonNull(Mono.from(wrapped.deleteMany(filter)).contextWrite(CONTEXT).block(TIMEOUT_DURATION)); + } + + @Override + public DeleteResult deleteMany(final Bson filter, final DeleteOptions options) { + return requireNonNull(Mono.from(wrapped.deleteMany(filter, options)).contextWrite(CONTEXT).block(TIMEOUT_DURATION)); + } + + @Override + public DeleteResult deleteMany(final ClientSession clientSession, final Bson filter) { + return requireNonNull(Mono.from(wrapped.deleteMany(unwrap(clientSession), filter)).contextWrite(CONTEXT).block(TIMEOUT_DURATION)); + } + + @Override + public DeleteResult deleteMany(final ClientSession clientSession, final Bson filter, final DeleteOptions options) { + return requireNonNull(Mono.from(wrapped.deleteMany(unwrap(clientSession), filter, options)).contextWrite(CONTEXT).block(TIMEOUT_DURATION)); + } + + @Override + public UpdateResult replaceOne(final Bson filter, final T replacement) { + return requireNonNull(Mono.from(wrapped.replaceOne(filter, replacement)).contextWrite(CONTEXT).block(TIMEOUT_DURATION)); + } + + @Override + public UpdateResult replaceOne(final Bson filter, final T replacement, final ReplaceOptions replaceOptions) { + return requireNonNull(Mono.from(wrapped.replaceOne(filter, replacement, replaceOptions)).contextWrite(CONTEXT).block(TIMEOUT_DURATION)); + } + + @Override + public UpdateResult replaceOne(final ClientSession clientSession, final Bson filter, final T replacement) { + return requireNonNull(Mono.from(wrapped.replaceOne(unwrap(clientSession), filter, replacement)).contextWrite(CONTEXT).block(TIMEOUT_DURATION)); + } + + @Override + public UpdateResult replaceOne( + final ClientSession clientSession, final Bson filter, final T replacement, + final ReplaceOptions replaceOptions) { + return requireNonNull(Mono.from(wrapped.replaceOne(unwrap(clientSession), filter, replacement, replaceOptions)) + .contextWrite(CONTEXT).block(TIMEOUT_DURATION)); + } + + @Override + public UpdateResult updateOne(final Bson filter, final Bson update) { + return requireNonNull(Mono.from(wrapped.updateOne(filter, update)).contextWrite(CONTEXT).block(TIMEOUT_DURATION)); + } + + @Override + public UpdateResult updateOne(final Bson filter, final Bson update, final UpdateOptions updateOptions) { + return requireNonNull(Mono.from(wrapped.updateOne(filter, update, updateOptions)).contextWrite(CONTEXT).block(TIMEOUT_DURATION)); + } + + @Override + public UpdateResult updateOne(final ClientSession clientSession, final Bson filter, final Bson update) { + return requireNonNull(Mono.from(wrapped.updateOne(unwrap(clientSession), filter, update)).contextWrite(CONTEXT).block(TIMEOUT_DURATION)); + } + + @Override + public UpdateResult updateOne( + final ClientSession clientSession, final Bson filter, final Bson update, + final UpdateOptions updateOptions) { + return requireNonNull(Mono.from(wrapped.updateOne(unwrap(clientSession), filter, update, updateOptions)).contextWrite(CONTEXT).block(TIMEOUT_DURATION)); + } + + @Override + public UpdateResult updateOne(final Bson filter, final List update) { + return requireNonNull(Mono.from(wrapped.updateOne(filter, update)).contextWrite(CONTEXT).block(TIMEOUT_DURATION)); + } + + @Override + public UpdateResult updateOne(final Bson filter, final List update, final UpdateOptions updateOptions) { + return requireNonNull(Mono.from(wrapped.updateOne(filter, update, updateOptions)).contextWrite(CONTEXT).block(TIMEOUT_DURATION)); + } + + @Override + public UpdateResult updateOne(final ClientSession clientSession, final Bson filter, final List update) { + return requireNonNull(Mono.from(wrapped.updateOne(unwrap(clientSession), filter, update)).contextWrite(CONTEXT).block(TIMEOUT_DURATION)); + } + + @Override + public UpdateResult updateOne( + final ClientSession clientSession, final Bson filter, final List update, + final UpdateOptions updateOptions) { + return requireNonNull(Mono.from(wrapped.updateOne(unwrap(clientSession), filter, update, updateOptions)).contextWrite(CONTEXT).block(TIMEOUT_DURATION)); + } + + @Override + public UpdateResult updateMany(final Bson filter, final Bson update) { + return requireNonNull(Mono.from(wrapped.updateMany(filter, update)).contextWrite(CONTEXT).block(TIMEOUT_DURATION)); + } + + @Override + public UpdateResult updateMany(final Bson filter, final Bson update, final UpdateOptions updateOptions) { + return requireNonNull(Mono.from(wrapped.updateMany(filter, update, updateOptions)).contextWrite(CONTEXT).block(TIMEOUT_DURATION)); + } + + @Override + public UpdateResult updateMany(final ClientSession clientSession, final Bson filter, final Bson update) { + return requireNonNull(Mono.from(wrapped.updateMany(unwrap(clientSession), filter, update)).contextWrite(CONTEXT).block(TIMEOUT_DURATION)); + } + + @Override + public UpdateResult updateMany( + final ClientSession clientSession, final Bson filter, final Bson update, + final UpdateOptions updateOptions) { + return requireNonNull(Mono.from(wrapped.updateMany(unwrap(clientSession), filter, update, updateOptions)).contextWrite(CONTEXT).block(TIMEOUT_DURATION)); + } + + @Override + public UpdateResult updateMany(final Bson filter, final List update) { + return requireNonNull(Mono.from(wrapped.updateMany(filter, update)).contextWrite(CONTEXT).block(TIMEOUT_DURATION)); + } + + @Override + public UpdateResult updateMany(final Bson filter, final List update, final UpdateOptions updateOptions) { + return requireNonNull(Mono.from(wrapped.updateMany(filter, update, updateOptions)).contextWrite(CONTEXT).block(TIMEOUT_DURATION)); + } + + @Override + public UpdateResult updateMany(final ClientSession clientSession, final Bson filter, final List update) { + return requireNonNull(Mono.from(wrapped.updateMany(unwrap(clientSession), filter, update)).contextWrite(CONTEXT).block(TIMEOUT_DURATION)); + } + + @Override + public UpdateResult updateMany( + final ClientSession clientSession, final Bson filter, final List update, + final UpdateOptions updateOptions) { + return requireNonNull(Mono.from(wrapped.updateMany(unwrap(clientSession), filter, update, updateOptions)).contextWrite(CONTEXT).block(TIMEOUT_DURATION)); + } + + @Override + public T findOneAndDelete(final Bson filter) { + return Mono.from(wrapped.findOneAndDelete(filter)).contextWrite(CONTEXT).block(TIMEOUT_DURATION); + } + + @Override + public T findOneAndDelete(final Bson filter, final FindOneAndDeleteOptions options) { + return Mono.from(wrapped.findOneAndDelete(filter, options)).contextWrite(CONTEXT).block(TIMEOUT_DURATION); + } + + @Override + public T findOneAndDelete(final ClientSession clientSession, final Bson filter) { + return Mono.from(wrapped.findOneAndDelete(unwrap(clientSession), filter)).contextWrite(CONTEXT).block(TIMEOUT_DURATION); + } + + @Override + public T findOneAndDelete(final ClientSession clientSession, final Bson filter, final FindOneAndDeleteOptions options) { + return Mono.from(wrapped.findOneAndDelete(unwrap(clientSession), filter, options)).contextWrite(CONTEXT).block(TIMEOUT_DURATION); + } + + @Override + public T findOneAndReplace(final Bson filter, final T replacement) { + return Mono.from(wrapped.findOneAndReplace(filter, replacement)).contextWrite(CONTEXT).block(TIMEOUT_DURATION); + } + + @Override + public T findOneAndReplace(final Bson filter, final T replacement, final FindOneAndReplaceOptions options) { + return Mono.from(wrapped.findOneAndReplace(filter, replacement, options)).contextWrite(CONTEXT).block(TIMEOUT_DURATION); + } + + @Override + public T findOneAndReplace(final ClientSession clientSession, final Bson filter, final T replacement) { + return Mono.from(wrapped.findOneAndReplace(unwrap(clientSession), filter, replacement)).contextWrite(CONTEXT).block(TIMEOUT_DURATION); + } + + @Override + public T findOneAndReplace( + final ClientSession clientSession, final Bson filter, final T replacement, + final FindOneAndReplaceOptions options) { + return Mono.from(wrapped.findOneAndReplace(unwrap(clientSession), filter, replacement, options)) + .contextWrite(CONTEXT).block(TIMEOUT_DURATION); + } + + @Override + public T findOneAndUpdate(final Bson filter, final Bson update) { + return Mono.from(wrapped.findOneAndUpdate(filter, update)).contextWrite(CONTEXT).block(TIMEOUT_DURATION); + } + + @Override + public T findOneAndUpdate(final Bson filter, final Bson update, final FindOneAndUpdateOptions options) { + return Mono.from(wrapped.findOneAndUpdate(filter, update, options)).contextWrite(CONTEXT).block(TIMEOUT_DURATION); + } + + @Override + public T findOneAndUpdate(final ClientSession clientSession, final Bson filter, final Bson update) { + return Mono.from(wrapped.findOneAndUpdate(unwrap(clientSession), filter, update)).contextWrite(CONTEXT).block(TIMEOUT_DURATION); + } + + @Override + public T findOneAndUpdate( + final ClientSession clientSession, final Bson filter, final Bson update, + final FindOneAndUpdateOptions options) { + return Mono.from(wrapped.findOneAndUpdate(unwrap(clientSession), filter, update, options)).contextWrite(CONTEXT).block(TIMEOUT_DURATION); + } + + @Override + public T findOneAndUpdate(final Bson filter, final List update) { + return Mono.from(wrapped.findOneAndUpdate(filter, update)).contextWrite(CONTEXT).block(TIMEOUT_DURATION); + } + + @Override + public T findOneAndUpdate(final Bson filter, final List update, final FindOneAndUpdateOptions options) { + return Mono.from(wrapped.findOneAndUpdate(filter, update, options)).contextWrite(CONTEXT).block(TIMEOUT_DURATION); + } + + @Override + public T findOneAndUpdate(final ClientSession clientSession, final Bson filter, final List update) { + return Mono.from(wrapped.findOneAndUpdate(unwrap(clientSession), filter, update)).contextWrite(CONTEXT).block(TIMEOUT_DURATION); + } + + @Override + public T findOneAndUpdate( + final ClientSession clientSession, final Bson filter, final List update, + final FindOneAndUpdateOptions options) { + return Mono.from(wrapped.findOneAndUpdate(unwrap(clientSession), filter, update)).contextWrite(CONTEXT).block(TIMEOUT_DURATION); + } + + @Override + public void drop() { + Mono.from(wrapped.drop()).contextWrite(CONTEXT).block(TIMEOUT_DURATION); + } + + @Override + public void drop(final ClientSession clientSession) { + Mono.from(wrapped.drop(unwrap(clientSession))).contextWrite(CONTEXT).block(TIMEOUT_DURATION); + } + + @Override + public void drop(final DropCollectionOptions dropCollectionOptions) { + Mono.from(wrapped.drop(dropCollectionOptions)).contextWrite(CONTEXT).block(TIMEOUT_DURATION); + } + + @Override + public void drop(final ClientSession clientSession, final DropCollectionOptions dropCollectionOptions) { + Mono.from(wrapped.drop(unwrap(clientSession), dropCollectionOptions)).contextWrite(CONTEXT).block(TIMEOUT_DURATION); + } + + @Override + public String createSearchIndex(final String name, final Bson definition) { + return requireNonNull(Mono.from(wrapped.createSearchIndex(name, definition)).contextWrite(CONTEXT) + .block(TIMEOUT_DURATION)); + } + + @Override + public String createSearchIndex(final Bson definition) { + return requireNonNull(Mono.from(wrapped.createSearchIndex(definition)).contextWrite(CONTEXT) + .block(TIMEOUT_DURATION)); + } + + @Override + public List createSearchIndexes(final List searchIndexModels) { + return requireNonNull(Flux.from(wrapped.createSearchIndexes(searchIndexModels)).contextWrite(CONTEXT).collectList() + .block(TIMEOUT_DURATION)); + } + + @Override + public void updateSearchIndex(final String name, final Bson definition) { + Mono.from(wrapped.updateSearchIndex(name, definition)).contextWrite(CONTEXT) + .block(TIMEOUT_DURATION); + } + + @Override + public void dropSearchIndex(final String indexName) { + Mono.from(wrapped.dropSearchIndex(indexName)).contextWrite(CONTEXT) + .block(TIMEOUT_DURATION); + } + + @Override + public ListSearchIndexesIterable listSearchIndexes() { + return listSearchIndexes(Document.class); + } + + @Override + public ListSearchIndexesIterable listSearchIndexes(final Class tResultClass) { + return new SyncListSearchIndexesIterable<>(wrapped.listSearchIndexes(tResultClass)); + } + + @Override + public String createIndex(final Bson keys) { + return requireNonNull(Mono.from(wrapped.createIndex(keys)).contextWrite(CONTEXT).block(TIMEOUT_DURATION)); + } + + @Override + public String createIndex(final Bson keys, final IndexOptions indexOptions) { + return requireNonNull(Mono.from(wrapped.createIndex(keys, indexOptions)).contextWrite(CONTEXT).block(TIMEOUT_DURATION)); + } + + @Override + public String createIndex(final ClientSession clientSession, final Bson keys) { + return requireNonNull(Mono.from(wrapped.createIndex(unwrap(clientSession), keys)).contextWrite(CONTEXT).block(TIMEOUT_DURATION)); + } + + @Override + public String createIndex(final ClientSession clientSession, final Bson keys, final IndexOptions indexOptions) { + return requireNonNull(Mono.from(wrapped.createIndex(unwrap(clientSession), keys, indexOptions)).contextWrite(CONTEXT).block(TIMEOUT_DURATION)); + } + + @Override + public List createIndexes(final List indexes) { + throw new UnsupportedOperationException(); + } + + @Override + public List createIndexes(final List indexes, final CreateIndexOptions createIndexOptions) { + throw new UnsupportedOperationException(); + } + + @Override + public List createIndexes(final ClientSession clientSession, final List indexes) { + throw new UnsupportedOperationException(); + } + + @Override + public List createIndexes( + final ClientSession clientSession, final List indexes, + final CreateIndexOptions createIndexOptions) { + throw new UnsupportedOperationException(); + } + + @Override + public ListIndexesIterable listIndexes() { + return listIndexes(Document.class); + } + + @Override + public ListIndexesIterable listIndexes(final Class resultClass) { + return new SyncListIndexesIterable<>(wrapped.listIndexes(resultClass)); + } + + @Override + public ListIndexesIterable listIndexes(final ClientSession clientSession) { + return listIndexes(clientSession, Document.class); + } + + @Override + public ListIndexesIterable listIndexes(final ClientSession clientSession, final Class resultClass) { + return new SyncListIndexesIterable<>(wrapped.listIndexes(unwrap(clientSession), resultClass)); + } + + @Override + public void dropIndex(final String indexName) { + Mono.from(wrapped.dropIndex(indexName)).contextWrite(CONTEXT).block(TIMEOUT_DURATION); + } + + @Override + public void dropIndex(final String indexName, final DropIndexOptions dropIndexOptions) { + Mono.from(wrapped.dropIndex(indexName, dropIndexOptions)).contextWrite(CONTEXT).block(TIMEOUT_DURATION); + } + + @Override + public void dropIndex(final Bson keys) { + Mono.from(wrapped.dropIndex(keys)).contextWrite(CONTEXT).block(TIMEOUT_DURATION); + } + + @Override + public void dropIndex(final Bson keys, final DropIndexOptions dropIndexOptions) { + Mono.from(wrapped.dropIndex(keys, dropIndexOptions)).contextWrite(CONTEXT).block(TIMEOUT_DURATION); + } + + @Override + public void dropIndex(final ClientSession clientSession, final String indexName) { + Mono.from(wrapped.dropIndex(unwrap(clientSession), indexName)).contextWrite(CONTEXT).block(TIMEOUT_DURATION); + } + + @Override + public void dropIndex(final ClientSession clientSession, final Bson keys) { + Mono.from(wrapped.dropIndex(unwrap(clientSession), keys)).contextWrite(CONTEXT).block(TIMEOUT_DURATION); + } + + @Override + public void dropIndex(final ClientSession clientSession, final String indexName, final DropIndexOptions dropIndexOptions) { + Mono.from(wrapped.dropIndex(unwrap(clientSession), indexName, dropIndexOptions)).contextWrite(CONTEXT).block(TIMEOUT_DURATION); + } + + @Override + public void dropIndex(final ClientSession clientSession, final Bson keys, final DropIndexOptions dropIndexOptions) { + Mono.from(wrapped.dropIndex(unwrap(clientSession), keys, dropIndexOptions)).contextWrite(CONTEXT).block(TIMEOUT_DURATION); + } + + @Override + public void dropIndexes() { + Mono.from(wrapped.dropIndexes()).contextWrite(CONTEXT).block(TIMEOUT_DURATION); + } + + @Override + public void dropIndexes(final ClientSession clientSession) { + Mono.from(wrapped.dropIndexes(unwrap(clientSession))).contextWrite(CONTEXT).block(TIMEOUT_DURATION); + } + + @Override + public void dropIndexes(final DropIndexOptions dropIndexOptions) { + Mono.from(wrapped.dropIndexes(dropIndexOptions)).contextWrite(CONTEXT).block(TIMEOUT_DURATION); + } + + @Override + public void dropIndexes(final ClientSession clientSession, final DropIndexOptions dropIndexOptions) { + Mono.from(wrapped.dropIndexes(unwrap(clientSession), dropIndexOptions)).contextWrite(CONTEXT).block(TIMEOUT_DURATION); + } + + @Override + public void renameCollection(final MongoNamespace newCollectionNamespace) { + Mono.from(wrapped.renameCollection(newCollectionNamespace)).contextWrite(CONTEXT).block(TIMEOUT_DURATION); + } + + @Override + public void renameCollection(final MongoNamespace newCollectionNamespace, final RenameCollectionOptions renameCollectionOptions) { + Mono.from(wrapped.renameCollection(newCollectionNamespace, renameCollectionOptions)).contextWrite(CONTEXT).block(TIMEOUT_DURATION); + } + + @Override + public void renameCollection(final ClientSession clientSession, final MongoNamespace newCollectionNamespace) { + Mono.from(wrapped.renameCollection(unwrap(clientSession), newCollectionNamespace)).contextWrite(CONTEXT).block(TIMEOUT_DURATION); + } + + @Override + public void renameCollection( + final ClientSession clientSession, final MongoNamespace newCollectionNamespace, + final RenameCollectionOptions renameCollectionOptions) { + Mono.from(wrapped.renameCollection(unwrap(clientSession), newCollectionNamespace, renameCollectionOptions)).contextWrite(CONTEXT).block(TIMEOUT_DURATION); + } + + private com.mongodb.reactivestreams.client.ClientSession unwrap(final ClientSession clientSession) { + return ((SyncClientSession) clientSession).getWrapped(); + } +} diff --git a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncMongoCursor.java b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncMongoCursor.java new file mode 100644 index 00000000000..4e0159f90d0 --- /dev/null +++ b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncMongoCursor.java @@ -0,0 +1,261 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.reactivestreams.client.syncadapter; + +import com.mongodb.MongoTimeoutException; +import com.mongodb.ServerAddress; +import com.mongodb.ServerCursor; +import com.mongodb.client.MongoCursor; +import com.mongodb.lang.Nullable; +import com.mongodb.reactivestreams.client.internal.BatchCursor; +import org.reactivestreams.Publisher; +import org.reactivestreams.Subscriber; +import org.reactivestreams.Subscription; +import reactor.core.CoreSubscriber; +import reactor.core.publisher.Flux; +import reactor.core.publisher.Hooks; +import reactor.core.publisher.Operators; +import reactor.util.context.Context; + +import java.util.NoSuchElementException; +import java.util.concurrent.BlockingDeque; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.LinkedBlockingDeque; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; + +import static com.mongodb.ClusterFixture.TIMEOUT; +import static com.mongodb.internal.thread.InterruptionUtil.interruptAndCreateMongoInterruptedException; +import static com.mongodb.reactivestreams.client.syncadapter.ContextHelper.CONTEXT; +import static com.mongodb.reactivestreams.client.syncadapter.SyncMongoClient.getSleepAfterCursorClose; +import static com.mongodb.reactivestreams.client.syncadapter.SyncMongoClient.getSleepAfterCursorError; +import static com.mongodb.reactivestreams.client.syncadapter.SyncMongoClient.getSleepAfterCursorOpen; +import static com.mongodb.reactivestreams.client.syncadapter.SyncMongoClient.isWaitForBatchCursorCreationEnabled; + +class SyncMongoCursor implements MongoCursor { + private static final Object COMPLETED = new Object(); + private final BlockingDeque results = new LinkedBlockingDeque<>(); + private final CompletableFuture batchCursorCompletableFuture = new CompletableFuture<>(); + private final Integer batchSize; + private int countToBatchSize; + private Subscription subscription; + private T current; + private boolean completed; + private RuntimeException error; + + SyncMongoCursor(final Publisher publisher, @Nullable final Integer batchSize) { + this.batchSize = batchSize; + CountDownLatch latch = new CountDownLatch(1); + + if (isWaitForBatchCursorCreationEnabled()) { + // This hook allows us to intercept the `onNext` and `onError` signals for any operation to determine + // whether the {@link BatchCursor} was created successfully or if an error occurred during its creation process. + // The result is propagated to a {@link CompletableFuture}, which we use to block until it is completed. + Hooks.onEachOperator(Operators.lift((sc, sub) -> + new BatchCursorInterceptSubscriber(sub, batchCursorCompletableFuture))); + } + + //noinspection ReactiveStreamsSubscriberImplementation + Flux.from(publisher).contextWrite(CONTEXT).subscribe(new Subscriber() { + @Override + public void onSubscribe(final Subscription s) { + subscription = s; + if (batchSize == null || batchSize == 0) { + subscription.request(Long.MAX_VALUE); + } else { + subscription.request(batchSize); + } + latch.countDown(); + } + + @Override + public void onNext(final T t) { + results.addLast(t); + } + + @Override + public void onError(final Throwable t) { + results.addLast(t); + sleep(getSleepAfterCursorError()); + } + + @Override + public void onComplete() { + results.addLast(COMPLETED); + } + }); + try { + if (!latch.await(TIMEOUT, TimeUnit.SECONDS)) { + throw new MongoTimeoutException("Timeout waiting for subscription"); + } + if (isWaitForBatchCursorCreationEnabled()) { + batchCursorCompletableFuture.get(TIMEOUT, TimeUnit.SECONDS); + Hooks.resetOnEachOperator(); + } + sleep(getSleepAfterCursorOpen()); + } catch (InterruptedException e) { + throw interruptAndCreateMongoInterruptedException("Interrupted waiting for asynchronous cursor establishment", e); + } catch (ExecutionException | TimeoutException e) { + Throwable cause = e.getCause(); + if (cause instanceof RuntimeException) { + throw (RuntimeException) cause; + } + throw new RuntimeException(e); + } + } + + @Override + public void close() { + subscription.cancel(); + sleep(getSleepAfterCursorClose()); + } + + private static void sleep(final long millis) { + try { + Thread.sleep(millis); + } catch (InterruptedException e) { + throw interruptAndCreateMongoInterruptedException("Interrupted from nap", e); + } + } + + @Override + @SuppressWarnings("unchecked") + public boolean hasNext() { + if (error != null) { + throw error; + } + if (completed) { + return false; + } + if (current != null) { + return true; + } + try { + Object next; + if (batchSize != null && batchSize != 0 && countToBatchSize == batchSize) { + subscription.request(batchSize); + countToBatchSize = 0; + } + next = results.pollFirst(TIMEOUT, TimeUnit.SECONDS); + if (next == null) { + throw new MongoTimeoutException("Time out waiting for result from cursor"); + } else if (next instanceof Throwable) { + error = translateError((Throwable) next); + sleep(getSleepAfterCursorError()); + throw error; + } else if (next == COMPLETED) { + completed = true; + return false; + } else { + current = (T) next; + countToBatchSize++; + return true; + } + } catch (InterruptedException e) { + throw interruptAndCreateMongoInterruptedException("Interrupted waiting for next result", e); + } + } + + @Override + public T next() { + if (!hasNext()) { + throw new NoSuchElementException(); + } + T retVal = current; + current = null; + return retVal; + } + + @Override + public int available() { + throw new UnsupportedOperationException(); + } + + @Override + public void remove() { + throw new UnsupportedOperationException(); + } + + @Override + public T tryNext() { + throw new UnsupportedOperationException(); // No good way to fulfill this contract with a Publisher + } + + @Override + public ServerCursor getServerCursor() { + throw new UnsupportedOperationException(); + } + + @Override + public ServerAddress getServerAddress() { + throw new UnsupportedOperationException(); + } + + private RuntimeException translateError(final Throwable throwable) { + if (throwable instanceof RuntimeException) { + return (RuntimeException) throwable; + } + return new RuntimeException(throwable); + } + + + private static final class BatchCursorInterceptSubscriber implements CoreSubscriber { + + private final CoreSubscriber sub; + private final CompletableFuture batchCursorCompletableFuture; + + BatchCursorInterceptSubscriber(final CoreSubscriber sub, + final CompletableFuture batchCursorCompletableFuture) { + this.sub = sub; + this.batchCursorCompletableFuture = batchCursorCompletableFuture; + } + + @Override + public Context currentContext() { + return sub.currentContext(); + } + + @Override + public void onSubscribe(final Subscription s) { + sub.onSubscribe(s); + } + + @Override + public void onNext(final Object o) { + if (o instanceof BatchCursor) { + // Interception of a cursor means that it has been created at this point. + batchCursorCompletableFuture.complete(o); + } + sub.onNext(o); + } + + @Override + public void onError(final Throwable t) { + if (!batchCursorCompletableFuture.isDone()) { // Cursor has not been created yet but an error occurred. + batchCursorCompletableFuture.completeExceptionally(t); + } + sub.onError(t); + } + + @Override + public void onComplete() { + sub.onComplete(); + } + } +} diff --git a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncMongoDatabase.java b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncMongoDatabase.java new file mode 100644 index 00000000000..40b15632366 --- /dev/null +++ b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncMongoDatabase.java @@ -0,0 +1,311 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.reactivestreams.client.syncadapter; + +import com.mongodb.ReadConcern; +import com.mongodb.ReadPreference; +import com.mongodb.WriteConcern; +import com.mongodb.client.AggregateIterable; +import com.mongodb.client.ChangeStreamIterable; +import com.mongodb.client.ClientSession; +import com.mongodb.client.ListCollectionNamesIterable; +import com.mongodb.client.ListCollectionsIterable; +import com.mongodb.client.MongoCollection; +import com.mongodb.client.MongoDatabase; +import com.mongodb.client.model.CreateCollectionOptions; +import com.mongodb.client.model.CreateViewOptions; +import org.bson.Document; +import org.bson.codecs.configuration.CodecRegistry; +import org.bson.conversions.Bson; +import reactor.core.publisher.Mono; + +import java.util.List; +import java.util.concurrent.TimeUnit; + +import static com.mongodb.ClusterFixture.TIMEOUT_DURATION; +import static com.mongodb.reactivestreams.client.syncadapter.ContextHelper.CONTEXT; +import static java.util.Objects.requireNonNull; + +public class SyncMongoDatabase implements MongoDatabase { + private final com.mongodb.reactivestreams.client.MongoDatabase wrapped; + + SyncMongoDatabase(final com.mongodb.reactivestreams.client.MongoDatabase wrapped) { + this.wrapped = wrapped; + } + + public com.mongodb.reactivestreams.client.MongoDatabase getWrapped() { + return wrapped; + } + + @Override + public String getName() { + return wrapped.getName(); + } + + @Override + public CodecRegistry getCodecRegistry() { + return wrapped.getCodecRegistry(); + } + + @Override + public ReadPreference getReadPreference() { + return wrapped.getReadPreference(); + } + + @Override + public WriteConcern getWriteConcern() { + return wrapped.getWriteConcern(); + } + + @Override + public ReadConcern getReadConcern() { + return wrapped.getReadConcern(); + } + + @Override + public Long getTimeout(final TimeUnit timeUnit) { + return wrapped.getTimeout(timeUnit); + } + + @Override + public MongoDatabase withCodecRegistry(final CodecRegistry codecRegistry) { + return new SyncMongoDatabase(wrapped.withCodecRegistry(codecRegistry)); + } + + @Override + public MongoDatabase withReadPreference(final ReadPreference readPreference) { + return new SyncMongoDatabase(wrapped.withReadPreference(readPreference)); + } + + @Override + public MongoDatabase withWriteConcern(final WriteConcern writeConcern) { + return new SyncMongoDatabase(wrapped.withWriteConcern(writeConcern)); + } + + @Override + public MongoDatabase withReadConcern(final ReadConcern readConcern) { + return new SyncMongoDatabase(wrapped.withReadConcern(readConcern)); + } + + @Override + public MongoDatabase withTimeout(final long timeout, final TimeUnit timeUnit) { + return new SyncMongoDatabase(wrapped.withTimeout(timeout, timeUnit)); + } + + @Override + public MongoCollection getCollection(final String collectionName) { + return new SyncMongoCollection<>(wrapped.getCollection(collectionName)); + } + + @Override + public MongoCollection getCollection(final String collectionName, final Class documentClass) { + return new SyncMongoCollection<>(wrapped.getCollection(collectionName, documentClass)); + } + + @Override + public Document runCommand(final Bson command) { + return requireNonNull(Mono.from(wrapped.runCommand(command)).contextWrite(CONTEXT).block(TIMEOUT_DURATION)); + } + + @Override + public Document runCommand(final Bson command, final ReadPreference readPreference) { + return requireNonNull(Mono.from(wrapped.runCommand(command, readPreference)).contextWrite(CONTEXT).block(TIMEOUT_DURATION)); + } + + @Override + public TResult runCommand(final Bson command, final Class resultClass) { + return requireNonNull(Mono.from(wrapped.runCommand(command, resultClass)).contextWrite(CONTEXT).block(TIMEOUT_DURATION)); + } + + @Override + public TResult runCommand(final Bson command, final ReadPreference readPreference, final Class resultClass) { + return requireNonNull(Mono.from(wrapped.runCommand(command, readPreference, resultClass)).contextWrite(CONTEXT).block(TIMEOUT_DURATION)); + } + + @Override + public Document runCommand(final ClientSession clientSession, final Bson command) { + return requireNonNull(Mono.from(wrapped.runCommand(unwrap(clientSession), command)).contextWrite(CONTEXT).block(TIMEOUT_DURATION)); + } + + @Override + public Document runCommand(final ClientSession clientSession, final Bson command, final ReadPreference readPreference) { + return requireNonNull(Mono.from(wrapped.runCommand(unwrap(clientSession), command, readPreference)).contextWrite(CONTEXT).block(TIMEOUT_DURATION)); + } + + @Override + public TResult runCommand(final ClientSession clientSession, final Bson command, final Class resultClass) { + return requireNonNull(Mono.from(wrapped.runCommand(unwrap(clientSession), command, resultClass)).contextWrite(CONTEXT).block(TIMEOUT_DURATION)); + } + + @Override + public TResult runCommand(final ClientSession clientSession, final Bson command, final ReadPreference readPreference, + final Class resultClass) { + return requireNonNull(Mono.from(wrapped.runCommand(unwrap(clientSession), command, readPreference, resultClass)) + .contextWrite(CONTEXT).block(TIMEOUT_DURATION)); + } + + @Override + public void drop() { + Mono.from(wrapped.drop()).contextWrite(CONTEXT).block(TIMEOUT_DURATION); + } + + @Override + public void drop(final ClientSession clientSession) { + Mono.from(wrapped.drop(unwrap(clientSession))).contextWrite(CONTEXT).block(TIMEOUT_DURATION); + } + + @Override + public ListCollectionNamesIterable listCollectionNames() { + return new SyncListCollectionNamesIterable(wrapped.listCollectionNames()); + } + + @Override + public ListCollectionsIterable listCollections() { + return new SyncListCollectionsIterable<>(wrapped.listCollections()); + } + + @Override + public ListCollectionsIterable listCollections(final Class resultClass) { + return new SyncListCollectionsIterable<>(wrapped.listCollections(resultClass)); + } + + @Override + public ListCollectionNamesIterable listCollectionNames(final ClientSession clientSession) { + return new SyncListCollectionNamesIterable(wrapped.listCollectionNames(unwrap(clientSession))); + } + + @Override + public ListCollectionsIterable listCollections(final ClientSession clientSession) { + return listCollections(clientSession, Document.class); + } + + @Override + public ListCollectionsIterable listCollections(final ClientSession clientSession, final Class resultClass) { + return new SyncListCollectionsIterable<>(wrapped.listCollections(unwrap(clientSession), resultClass)); + } + + @Override + public void createCollection(final String collectionName) { + Mono.from(wrapped.createCollection(collectionName)).contextWrite(CONTEXT).block(TIMEOUT_DURATION); + } + + @Override + public void createCollection(final String collectionName, final CreateCollectionOptions createCollectionOptions) { + Mono.from(wrapped.createCollection(collectionName, createCollectionOptions)).contextWrite(CONTEXT).block(TIMEOUT_DURATION); + } + + @Override + public void createCollection(final ClientSession clientSession, final String collectionName) { + Mono.from(wrapped.createCollection(unwrap(clientSession), collectionName)).contextWrite(CONTEXT).block(TIMEOUT_DURATION); + } + + @Override + public void createCollection(final ClientSession clientSession, final String collectionName, + final CreateCollectionOptions createCollectionOptions) { + Mono.from(wrapped.createCollection(unwrap(clientSession), collectionName, createCollectionOptions)).contextWrite(CONTEXT).block(TIMEOUT_DURATION); + } + + @Override + public void createView(final String viewName, final String viewOn, final List pipeline) { + Mono.from(wrapped.createView(viewName, viewOn, pipeline)).contextWrite(CONTEXT).block(TIMEOUT_DURATION); + } + + @Override + public void createView(final String viewName, final String viewOn, final List pipeline, + final CreateViewOptions createViewOptions) { + Mono.from(wrapped.createView(viewName, viewOn, pipeline, createViewOptions)).contextWrite(CONTEXT).block(TIMEOUT_DURATION); + } + + @Override + public void createView(final ClientSession clientSession, final String viewName, final String viewOn, + final List pipeline) { + Mono.from(wrapped.createView(unwrap(clientSession), viewName, viewOn, pipeline)).contextWrite(CONTEXT).block(TIMEOUT_DURATION); + } + + @Override + public void createView(final ClientSession clientSession, final String viewName, final String viewOn, + final List pipeline, final CreateViewOptions createViewOptions) { + Mono.from(wrapped.createView(unwrap(clientSession), viewName, viewOn, pipeline, createViewOptions)) + .contextWrite(CONTEXT).block(TIMEOUT_DURATION); + } + + @Override + public ChangeStreamIterable watch() { + return new SyncChangeStreamIterable<>(wrapped.watch()); + } + + @Override + public ChangeStreamIterable watch(final Class resultClass) { + return new SyncChangeStreamIterable<>(wrapped.watch(resultClass)); + } + + @Override + public ChangeStreamIterable watch(final List pipeline) { + return new SyncChangeStreamIterable<>(wrapped.watch(pipeline)); + } + + @Override + public ChangeStreamIterable watch(final List pipeline, final Class resultClass) { + return new SyncChangeStreamIterable<>(wrapped.watch(pipeline, resultClass)); + } + + @Override + public ChangeStreamIterable watch(final ClientSession clientSession) { + return new SyncChangeStreamIterable<>(wrapped.watch(unwrap(clientSession))); + } + + @Override + public ChangeStreamIterable watch(final ClientSession clientSession, final Class resultClass) { + return new SyncChangeStreamIterable<>(wrapped.watch(unwrap(clientSession), resultClass)); + } + + @Override + public ChangeStreamIterable watch(final ClientSession clientSession, final List pipeline) { + return new SyncChangeStreamIterable<>(wrapped.watch(unwrap(clientSession), pipeline)); + } + + @Override + public ChangeStreamIterable watch(final ClientSession clientSession, final List pipeline, + final Class resultClass) { + return new SyncChangeStreamIterable<>(wrapped.watch(unwrap(clientSession), pipeline, resultClass)); + } + + @Override + public AggregateIterable aggregate(final List pipeline) { + return new SyncAggregateIterable<>(wrapped.aggregate(pipeline)); + } + + @Override + public AggregateIterable aggregate(final List pipeline, final Class resultClass) { + return new SyncAggregateIterable<>(wrapped.aggregate(pipeline, resultClass)); + } + + @Override + public AggregateIterable aggregate(final ClientSession clientSession, final List pipeline) { + return new SyncAggregateIterable<>(wrapped.aggregate(unwrap(clientSession), pipeline)); + } + + @Override + public AggregateIterable aggregate(final ClientSession clientSession, final List pipeline, + final Class resultClass) { + return new SyncAggregateIterable<>(wrapped.aggregate(unwrap(clientSession), pipeline, resultClass)); + } + + private com.mongodb.reactivestreams.client.ClientSession unwrap(final ClientSession clientSession) { + return ((SyncClientSession) clientSession).getWrapped(); + } +} diff --git a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncMongoIterable.java b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncMongoIterable.java new file mode 100644 index 00000000000..d0ef79933be --- /dev/null +++ b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/SyncMongoIterable.java @@ -0,0 +1,101 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.reactivestreams.client.syncadapter; + +import com.mongodb.Function; +import com.mongodb.client.MongoCursor; +import com.mongodb.client.MongoIterable; +import com.mongodb.client.internal.MappingIterable; +import com.mongodb.lang.Nullable; +import com.mongodb.reactivestreams.client.internal.BatchCursorPublisher; +import com.mongodb.reactivestreams.client.internal.ListCollectionNamesPublisherImpl; +import org.bson.Document; +import org.reactivestreams.Publisher; +import reactor.core.publisher.Mono; + +import java.util.Collection; +import java.util.function.Consumer; + +import static com.mongodb.ClusterFixture.TIMEOUT_DURATION; +import static com.mongodb.reactivestreams.client.syncadapter.ContextHelper.CONTEXT; + +class SyncMongoIterable implements MongoIterable { + private final Publisher wrapped; + @Nullable + private Integer batchSize; + + SyncMongoIterable(final Publisher wrapped) { + this.wrapped = wrapped; + } + + @Override + public MongoCursor iterator() { + return cursor(); + } + + @Override + public MongoCursor cursor() { + return new SyncMongoCursor<>(wrapped, batchSize); + } + + @Override + public T first() { + return Mono.from(furtherUnwrapWrapped().first()).contextWrite(CONTEXT).block(TIMEOUT_DURATION); + } + + @Override + public MongoIterable map(final Function mapper) { + return new MappingIterable<>(this, mapper); + } + + @Override + public void forEach(final Consumer action) { + try (MongoCursor cursor = cursor()) { + while (cursor.hasNext()) { + action.accept(cursor.next()); + } + } + } + + @Override + public > A into(final A target) { + try (MongoCursor cursor = cursor()) { + while (cursor.hasNext()) { + target.add(cursor.next()); + } + } + return target; + } + + @Override + public MongoIterable batchSize(final int batchSize) { + this.batchSize = batchSize; + return this; + } + + private BatchCursorPublisher furtherUnwrapWrapped() { + if (this.wrapped instanceof ListCollectionNamesPublisherImpl) { + BatchCursorPublisher wrappedDocumentPublisher = ((ListCollectionNamesPublisherImpl) this.wrapped).getWrapped(); + // this casting obviously does not always work, but should work in tests + @SuppressWarnings("unchecked") + BatchCursorPublisher wrappedTPublisher = (BatchCursorPublisher) wrappedDocumentPublisher; + return wrappedTPublisher; + } else { + return (BatchCursorPublisher) this.wrapped; + } + } +} diff --git a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/package-info.java b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/package-info.java new file mode 100644 index 00000000000..edf6bd381e7 --- /dev/null +++ b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/syncadapter/package-info.java @@ -0,0 +1,20 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +@NonNullApi +package com.mongodb.reactivestreams.client.syncadapter; + +import com.mongodb.lang.NonNullApi; diff --git a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/unified/ChangeStreamsTest.java b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/unified/ChangeStreamsTest.java new file mode 100644 index 00000000000..8555e312db9 --- /dev/null +++ b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/unified/ChangeStreamsTest.java @@ -0,0 +1,28 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.reactivestreams.client.unified; + +import org.junit.jupiter.params.provider.Arguments; + +import java.util.Collection; + +final class ChangeStreamsTest extends UnifiedReactiveStreamsTest { + + private static Collection data() { + return getTestData("change-streams"); + } +} diff --git a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/unified/ClientMetadataTest.java b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/unified/ClientMetadataTest.java new file mode 100644 index 00000000000..6b0caf615bc --- /dev/null +++ b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/unified/ClientMetadataTest.java @@ -0,0 +1,28 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.reactivestreams.client.unified; + +import org.junit.jupiter.params.provider.Arguments; + +import java.util.Collection; + +public class ClientMetadataTest extends UnifiedReactiveStreamsTest { + + private static Collection data() { + return getTestData("mongodb-handshake/tests/unified"); + } +} diff --git a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/unified/ClientSideEncryptionTest.java b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/unified/ClientSideEncryptionTest.java new file mode 100644 index 00000000000..084974481e9 --- /dev/null +++ b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/unified/ClientSideEncryptionTest.java @@ -0,0 +1,28 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.reactivestreams.client.unified; + +import org.junit.jupiter.params.provider.Arguments; + +import java.util.Collection; + +final class ClientSideEncryptionTest extends UnifiedReactiveStreamsTest { + + private static Collection data() { + return getTestData("client-side-encryption/tests/unified"); + } +} diff --git a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/unified/ClientSideOperationTimeoutTest.java b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/unified/ClientSideOperationTimeoutTest.java new file mode 100644 index 00000000000..b00b8c365cb --- /dev/null +++ b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/unified/ClientSideOperationTimeoutTest.java @@ -0,0 +1,158 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.reactivestreams.client.unified; + +import com.mongodb.lang.Nullable; +import org.bson.BsonArray; +import org.bson.BsonDocument; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.Arguments; +import org.junit.jupiter.params.provider.MethodSource; +import reactor.core.publisher.Hooks; + +import java.util.Collection; +import java.util.List; +import java.util.concurrent.atomic.AtomicReference; + +import static com.mongodb.client.ClientSideOperationTimeoutTest.skipOperationTimeoutTests; +import static com.mongodb.reactivestreams.client.syncadapter.SyncMongoClient.disableSleep; +import static com.mongodb.reactivestreams.client.syncadapter.SyncMongoClient.enableSleepAfterCursorError; +import static java.lang.String.format; +import static java.util.Arrays.asList; +import static org.junit.jupiter.api.Assumptions.assumeFalse; + + +// See https://github.com/mongodb/specifications/tree/master/source/client-side-operation-timeout/tests +public class ClientSideOperationTimeoutTest extends UnifiedReactiveStreamsTest { + + private final AtomicReference atomicReferenceThrowable = new AtomicReference<>(); + + private static Collection data() { + return getTestData("client-side-operations-timeout"); + } + + @Override + protected void skips(final String fileDescription, final String testDescription) { + skipOperationTimeoutTests(fileDescription, testDescription); + + assumeFalse(testDescription.equals("timeoutMS is refreshed for getMore if maxAwaitTimeMS is not set"), + "No iterateOnce support. There is alternative prose test for it."); + assumeFalse(testDescription.equals("timeoutMS is refreshed for getMore if maxAwaitTimeMS is set"), + "No iterateOnce support. There is alternative prose test for it."); + /* + The Reactive Streams specification prevents us from allowing a subsequent next call (event in reactive terms) after a timeout error, + conflicting with the CSOT spec requirement not to invalidate the change stream and to try resuming and establishing a new change + stream on the server. We immediately let users know about a timeout error, which then closes the stream/publisher. + */ + assumeFalse(testDescription.equals("change stream can be iterated again if previous iteration times out"), + "It is not possible due to a conflict with the Reactive Streams specification ."); + assumeFalse(testDescription.equals("timeoutMS applies to full resume attempt in a next call"), + "Flaky and racy due to asynchronous behaviour. There is alternative prose test for it."); + assumeFalse(testDescription.equals("timeoutMS applied to initial aggregate"), + "No way to catch an error on BarchCursor creation. There is alternative prose test for it."); + + assumeFalse(testDescription.endsWith("createChangeStream on client")); + assumeFalse(testDescription.endsWith("createChangeStream on database")); + assumeFalse(testDescription.endsWith("createChangeStream on collection")); + + // No withTransaction support + assumeFalse(fileDescription.contains("withTransaction") || testDescription.contains("withTransaction")); + + assumeFalse(fileDescription.equals("operations ignore deprecated timeout options if timeoutMS is set") + && (testDescription.startsWith("abortTransaction ignores") || testDescription.startsWith("commitTransaction ignores")), + "No operation session based overrides"); + + if (testDescription.equals("timeoutMS is refreshed for close")) { + enableSleepAfterCursorError(256); + } + + /* + * The test is occasionally racy. The "killCursors" command may appear as an additional event. This is unexpected in unified tests, + * but anticipated in reactive streams because an operation timeout error triggers the closure of the stream/publisher. + */ + ignoreExtraCommandEvents(testDescription.contains("timeoutMS is refreshed for getMore - failure")); + + Hooks.onOperatorDebug(); + Hooks.onErrorDropped(atomicReferenceThrowable::set); + } + + @ParameterizedTest(name = "{0}: {1}") + @MethodSource("data") + @Override + public void shouldPassAllOutcomes( + final String testName, + @Nullable final String fileDescription, + @Nullable final String testDescription, + @Nullable final String directoryName, + final int attemptNumber, + final int totalAttempts, + final String schemaVersion, + @Nullable final BsonArray runOnRequirements, + final BsonArray entitiesArray, + final BsonArray initialData, + final BsonDocument definition) { + try { + super.shouldPassAllOutcomes( + testName, + fileDescription, + testDescription, + directoryName, + attemptNumber, + totalAttempts, + schemaVersion, + runOnRequirements, + entitiesArray, + initialData, + definition); + + } catch (AssertionError e) { + assertNoDroppedError(format("%s failed due to %s.\n" + + "The test also caused a dropped error; `onError` called with no handler.", + testDescription, e.getMessage())); + if (racyTestAssertion(testDescription, e)) { + // Ignore failure - racy test often no time to do the getMore + return; + } + throw e; + } + assertNoDroppedError(format("%s passed but there was a dropped error; `onError` called with no handler.", testDescription)); + } + + @AfterEach + public void cleanUp() { + super.cleanUp(); + disableSleep(); + Hooks.resetOnOperatorDebug(); + Hooks.resetOnErrorDropped(); + } + + public static boolean racyTestAssertion(final String testDescription, final AssertionError e) { + return RACY_GET_MORE_TESTS.contains(testDescription) && e.getMessage().startsWith("Number of events must be the same"); + } + + private static final List RACY_GET_MORE_TESTS = asList( + "remaining timeoutMS applied to getMore if timeoutMode is cursor_lifetime", + "remaining timeoutMS applied to getMore if timeoutMode is unset"); + + private void assertNoDroppedError(final String message) { + Throwable droppedError = atomicReferenceThrowable.get(); + if (droppedError != null) { + throw new AssertionError(message, droppedError); + } + } +} diff --git a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/unified/CollectionManagementTest.java b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/unified/CollectionManagementTest.java new file mode 100644 index 00000000000..98267aefaa8 --- /dev/null +++ b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/unified/CollectionManagementTest.java @@ -0,0 +1,28 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.reactivestreams.client.unified; + +import org.junit.jupiter.params.provider.Arguments; + +import java.util.Collection; + +final class CollectionManagementTest extends UnifiedReactiveStreamsTest { + + private static Collection data() { + return getTestData("collection-management"); + } +} diff --git a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/unified/CommandLoggingTest.java b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/unified/CommandLoggingTest.java new file mode 100644 index 00000000000..2e05b73ea26 --- /dev/null +++ b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/unified/CommandLoggingTest.java @@ -0,0 +1,28 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.reactivestreams.client.unified; + +import org.junit.jupiter.params.provider.Arguments; + +import java.util.Collection; + +final class CommandLoggingTest extends UnifiedReactiveStreamsTest { + + private static Collection data() { + return getTestData("command-logging-and-monitoring/tests/logging"); + } +} diff --git a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/unified/CommandMonitoringTest.java b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/unified/CommandMonitoringTest.java new file mode 100644 index 00000000000..2bddcbe8e45 --- /dev/null +++ b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/unified/CommandMonitoringTest.java @@ -0,0 +1,27 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.reactivestreams.client.unified; + +import org.junit.jupiter.params.provider.Arguments; + +import java.util.Collection; + +final class CommandMonitoringTest extends UnifiedReactiveStreamsTest { + private static Collection data() { + return getTestData("command-logging-and-monitoring/tests/monitoring"); + } +} diff --git a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/unified/ConnectionPoolLoggingTest.java b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/unified/ConnectionPoolLoggingTest.java new file mode 100644 index 00000000000..92beec1e28b --- /dev/null +++ b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/unified/ConnectionPoolLoggingTest.java @@ -0,0 +1,27 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.reactivestreams.client.unified; + +import org.junit.jupiter.params.provider.Arguments; + +import java.util.Collection; + +final class ConnectionPoolLoggingTest extends UnifiedReactiveStreamsTest { + private static Collection data() { + return getTestData("connection-monitoring-and-pooling/tests/logging"); + } +} diff --git a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/unified/IndexManagementTest.java b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/unified/IndexManagementTest.java new file mode 100644 index 00000000000..df6bf08cea8 --- /dev/null +++ b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/unified/IndexManagementTest.java @@ -0,0 +1,27 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.reactivestreams.client.unified; + +import org.junit.jupiter.params.provider.Arguments; + +import java.util.Collection; + +final class IndexManagementTest extends UnifiedReactiveStreamsTest { + private static Collection data() { + return getTestData("index-management"); + } +} diff --git a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/unified/LoadBalancerTest.java b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/unified/LoadBalancerTest.java new file mode 100644 index 00000000000..cf6bb04c2de --- /dev/null +++ b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/unified/LoadBalancerTest.java @@ -0,0 +1,27 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.reactivestreams.client.unified; + +import org.junit.jupiter.params.provider.Arguments; + +import java.util.Collection; + +final class LoadBalancerTest extends UnifiedReactiveStreamsTest { + private static Collection data() { + return getTestData("load-balancers"); + } +} diff --git a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/unified/ServerSelectionLoggingTest.java b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/unified/ServerSelectionLoggingTest.java new file mode 100644 index 00000000000..9491ee604a7 --- /dev/null +++ b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/unified/ServerSelectionLoggingTest.java @@ -0,0 +1,27 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.reactivestreams.client.unified; + +import org.junit.jupiter.params.provider.Arguments; + +import java.util.Collection; + +final class ServerSelectionLoggingTest extends UnifiedReactiveStreamsTest { + private static Collection data() { + return getTestData("server-selection/tests/logging"); + } +} diff --git a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/unified/SessionsTest.java b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/unified/SessionsTest.java new file mode 100644 index 00000000000..61e775d6458 --- /dev/null +++ b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/unified/SessionsTest.java @@ -0,0 +1,27 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.reactivestreams.client.unified; + +import org.junit.jupiter.params.provider.Arguments; + +import java.util.Collection; + +final class SessionsTest extends UnifiedReactiveStreamsTest { + private static Collection data() { + return getTestData("sessions"); + } +} diff --git a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/unified/UnifiedCrudTest.java b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/unified/UnifiedCrudTest.java new file mode 100644 index 00000000000..d602af5cd5e --- /dev/null +++ b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/unified/UnifiedCrudTest.java @@ -0,0 +1,27 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.reactivestreams.client.unified; + +import org.junit.jupiter.params.provider.Arguments; + +import java.util.Collection; + +final class UnifiedCrudTest extends UnifiedReactiveStreamsTest { + private static Collection data() { + return getTestData("crud"); + } +} diff --git a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/unified/UnifiedGridFSTest.java b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/unified/UnifiedGridFSTest.java new file mode 100644 index 00000000000..242bcdef72c --- /dev/null +++ b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/unified/UnifiedGridFSTest.java @@ -0,0 +1,27 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.reactivestreams.client.unified; + +import org.junit.jupiter.params.provider.Arguments; + +import java.util.Collection; + +final class UnifiedGridFSTest extends UnifiedReactiveStreamsTest { + private static Collection data() { + return getTestData("gridfs"); + } +} diff --git a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/unified/UnifiedReactiveStreamsTest.java b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/unified/UnifiedReactiveStreamsTest.java new file mode 100644 index 00000000000..f8eb0f89814 --- /dev/null +++ b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/unified/UnifiedReactiveStreamsTest.java @@ -0,0 +1,110 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.reactivestreams.client.unified; + +import com.mongodb.ClientEncryptionSettings; +import com.mongodb.MongoClientSettings; +import com.mongodb.client.MongoClient; +import com.mongodb.client.MongoDatabase; +import com.mongodb.client.gridfs.GridFSBucket; +import com.mongodb.client.unified.UnifiedTest; +import com.mongodb.client.unified.UnifiedTestModifications; +import com.mongodb.client.vault.ClientEncryption; +import com.mongodb.connection.TransportSettings; +import com.mongodb.lang.NonNull; +import com.mongodb.reactivestreams.client.gridfs.GridFSBuckets; +import com.mongodb.reactivestreams.client.internal.vault.ClientEncryptionImpl; +import com.mongodb.reactivestreams.client.syncadapter.SyncClientEncryption; +import com.mongodb.reactivestreams.client.syncadapter.SyncGridFSBucket; +import com.mongodb.reactivestreams.client.syncadapter.SyncMongoClient; +import com.mongodb.reactivestreams.client.syncadapter.SyncMongoDatabase; +import org.junit.jupiter.params.provider.Arguments; + +import java.util.Collection; + +import static com.mongodb.ClusterFixture.getOverriddenTransportSettings; +import static com.mongodb.client.unified.UnifiedTestModifications.Modifier; +import static com.mongodb.client.unified.UnifiedTestModifications.TestDef; +import static com.mongodb.reactivestreams.client.syncadapter.SyncMongoClient.disableSleep; +import static com.mongodb.reactivestreams.client.syncadapter.SyncMongoClient.disableWaitForBatchCursorCreation; +import static com.mongodb.reactivestreams.client.syncadapter.SyncMongoClient.enableSleepAfterCursorClose; +import static com.mongodb.reactivestreams.client.syncadapter.SyncMongoClient.enableSleepAfterCursorOpen; +import static com.mongodb.reactivestreams.client.syncadapter.SyncMongoClient.enableWaitForBatchCursorCreation; + +public abstract class UnifiedReactiveStreamsTest extends UnifiedTest { + protected UnifiedReactiveStreamsTest() { + } + + @Override + protected MongoClient createMongoClient(final MongoClientSettings settings) { + TransportSettings overriddenTransportSettings = getOverriddenTransportSettings(); + MongoClientSettings clientSettings = overriddenTransportSettings == null ? settings + : MongoClientSettings.builder(settings).transportSettings(overriddenTransportSettings).build(); + return new SyncMongoClient(clientSettings); + } + + @Override + protected GridFSBucket createGridFSBucket(final MongoDatabase database) { + return new SyncGridFSBucket(GridFSBuckets.create(((SyncMongoDatabase) database).getWrapped())); + } + + @Override + protected ClientEncryption createClientEncryption(final MongoClient keyVaultClient, final ClientEncryptionSettings clientEncryptionSettings) { + return new SyncClientEncryption(new ClientEncryptionImpl(((SyncMongoClient) keyVaultClient).getWrapped(), clientEncryptionSettings)); + } + + @Override + protected boolean isReactive() { + return true; + } + + @Override + protected void postSetUp(final TestDef testDef) { + super.postSetUp(testDef); + if (testDef.wasAssignedModifier(UnifiedTestModifications.Modifier.IGNORE_EXTRA_EVENTS)) { + ignoreExtraEvents(); // no disable needed + } + if (testDef.wasAssignedModifier(Modifier.SLEEP_AFTER_CURSOR_OPEN)) { + enableSleepAfterCursorOpen(256); + } + if (testDef.wasAssignedModifier(Modifier.SLEEP_AFTER_CURSOR_CLOSE)) { + enableSleepAfterCursorClose(256); + } + if (testDef.wasAssignedModifier(Modifier.WAIT_FOR_BATCH_CURSOR_CREATION)) { + enableWaitForBatchCursorCreation(); + } + } + + @Override + protected void postCleanUp(final TestDef testDef) { + super.postCleanUp(testDef); + if (testDef.wasAssignedModifier(Modifier.WAIT_FOR_BATCH_CURSOR_CREATION)) { + disableWaitForBatchCursorCreation(); + } + if (testDef.wasAssignedModifier(Modifier.SLEEP_AFTER_CURSOR_CLOSE)) { + disableSleep(); + } + if (testDef.wasAssignedModifier(Modifier.SLEEP_AFTER_CURSOR_OPEN)) { + disableSleep(); + } + } + + @NonNull + protected static Collection getTestData(final String directory) { + return getTestData(directory, true, Language.JAVA); + } +} diff --git a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/unified/UnifiedRetryableReadsTest.java b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/unified/UnifiedRetryableReadsTest.java new file mode 100644 index 00000000000..854ad88e092 --- /dev/null +++ b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/unified/UnifiedRetryableReadsTest.java @@ -0,0 +1,27 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.reactivestreams.client.unified; + +import org.junit.jupiter.params.provider.Arguments; + +import java.util.Collection; + +final class UnifiedRetryableReadsTest extends UnifiedReactiveStreamsTest { + private static Collection data() { + return getTestData("retryable-reads"); + } +} diff --git a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/unified/UnifiedRetryableWritesTest.java b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/unified/UnifiedRetryableWritesTest.java new file mode 100644 index 00000000000..132dec7a07b --- /dev/null +++ b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/unified/UnifiedRetryableWritesTest.java @@ -0,0 +1,27 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.reactivestreams.client.unified; + +import org.junit.jupiter.params.provider.Arguments; + +import java.util.Collection; + +final class UnifiedRetryableWritesTest extends UnifiedReactiveStreamsTest { + private static Collection data() { + return getTestData("retryable-writes"); + } +} diff --git a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/unified/UnifiedServerDiscoveryAndMonitoringTest.java b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/unified/UnifiedServerDiscoveryAndMonitoringTest.java new file mode 100644 index 00000000000..aad3df381d2 --- /dev/null +++ b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/unified/UnifiedServerDiscoveryAndMonitoringTest.java @@ -0,0 +1,26 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.reactivestreams.client.unified; + +import org.junit.jupiter.params.provider.Arguments; +import java.util.Collection; + +final class UnifiedServerDiscoveryAndMonitoringTest extends UnifiedReactiveStreamsTest { + private static Collection data() { + return getTestData("server-discovery-and-monitoring"); + } +} diff --git a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/unified/UnifiedTransactionsTest.java b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/unified/UnifiedTransactionsTest.java new file mode 100644 index 00000000000..2da185c390e --- /dev/null +++ b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/unified/UnifiedTransactionsTest.java @@ -0,0 +1,27 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.reactivestreams.client.unified; + +import org.junit.jupiter.params.provider.Arguments; + +import java.util.Collection; + +final class UnifiedTransactionsTest extends UnifiedReactiveStreamsTest { + private static Collection data() { + return getTestData("transactions"); + } +} diff --git a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/unified/UnifiedWriteConcernTest.java b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/unified/UnifiedWriteConcernTest.java new file mode 100644 index 00000000000..fc10247c349 --- /dev/null +++ b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/unified/UnifiedWriteConcernTest.java @@ -0,0 +1,27 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.reactivestreams.client.unified; + +import org.junit.jupiter.params.provider.Arguments; + +import java.util.Collection; + +final class UnifiedWriteConcernTest extends UnifiedReactiveStreamsTest { + private static Collection data() { + return getTestData("read-write-concern"); + } +} diff --git a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/unified/VersionedApiTest.java b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/unified/VersionedApiTest.java new file mode 100644 index 00000000000..8378fb00482 --- /dev/null +++ b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/unified/VersionedApiTest.java @@ -0,0 +1,27 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.reactivestreams.client.unified; + +import org.junit.jupiter.params.provider.Arguments; + +import java.util.Collection; + +final class VersionedApiTest extends UnifiedReactiveStreamsTest { + private static Collection data() { + return getTestData("versioned-api"); + } +} diff --git a/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/vector/BinaryVectorFunctionalTest.java b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/vector/BinaryVectorFunctionalTest.java new file mode 100644 index 00000000000..9bbea1303bd --- /dev/null +++ b/driver-reactive-streams/src/test/functional/com/mongodb/reactivestreams/client/vector/BinaryVectorFunctionalTest.java @@ -0,0 +1,29 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.reactivestreams.client.vector; + +import com.mongodb.MongoClientSettings; +import com.mongodb.client.MongoClient; +import com.mongodb.client.vector.AbstractBinaryVectorFunctionalTest; +import com.mongodb.reactivestreams.client.syncadapter.SyncMongoClient; + +public class BinaryVectorFunctionalTest extends AbstractBinaryVectorFunctionalTest { + @Override + protected MongoClient getMongoClient(final MongoClientSettings settings) { + return new SyncMongoClient(settings); + } +} diff --git a/driver-reactive-streams/src/test/resources/logback-test.xml b/driver-reactive-streams/src/test/resources/logback-test.xml new file mode 100644 index 00000000000..022806f0e4e --- /dev/null +++ b/driver-reactive-streams/src/test/resources/logback-test.xml @@ -0,0 +1,13 @@ + + + + + %d{HH:mm:ss.SSS} [%thread] %-5level %logger{36} - %msg%n + + + + + + + + diff --git a/driver-reactive-streams/src/test/tck/com/mongodb/reactivestreams/client/AggregatePublisherVerification.java b/driver-reactive-streams/src/test/tck/com/mongodb/reactivestreams/client/AggregatePublisherVerification.java new file mode 100644 index 00000000000..04fac1805de --- /dev/null +++ b/driver-reactive-streams/src/test/tck/com/mongodb/reactivestreams/client/AggregatePublisherVerification.java @@ -0,0 +1,65 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.reactivestreams.client; + +import org.bson.Document; +import org.reactivestreams.Publisher; +import org.reactivestreams.tck.PublisherVerification; +import org.reactivestreams.tck.TestEnvironment; + +import java.util.Collections; +import java.util.List; +import java.util.stream.Collectors; +import java.util.stream.LongStream; + +import static com.mongodb.reactivestreams.client.MongoFixture.DEFAULT_TIMEOUT_MILLIS; +import static com.mongodb.reactivestreams.client.MongoFixture.PUBLISHER_REFERENCE_CLEANUP_TIMEOUT_MILLIS; +import static com.mongodb.reactivestreams.client.MongoFixture.run; + +public class AggregatePublisherVerification extends PublisherVerification { + + public AggregatePublisherVerification() { + super(new TestEnvironment(DEFAULT_TIMEOUT_MILLIS), PUBLISHER_REFERENCE_CLEANUP_TIMEOUT_MILLIS); + } + + + @Override + public Publisher createPublisher(final long elements) { + assert (elements <= maxElementsFromPublisher()); + + MongoCollection collection = MongoFixture.getDefaultDatabase().getCollection("AggregationTest"); + run(collection.drop()); + if (elements > 0) { + List documentList = LongStream.rangeClosed(1, elements).boxed() + .map(i -> new Document("a", i)).collect(Collectors.toList()); + + run(collection.insertMany(documentList)); + } + + return collection.aggregate(Collections.singletonList(Document.parse("{$match: {}}"))); + } + + @Override + public Publisher createFailedPublisher() { + return null; + } + + @Override + public long maxElementsFromPublisher() { + return 100; + } +} diff --git a/driver-reactive-streams/src/test/tck/com/mongodb/reactivestreams/client/ChangeStreamPublisherVerification.java b/driver-reactive-streams/src/test/tck/com/mongodb/reactivestreams/client/ChangeStreamPublisherVerification.java new file mode 100644 index 00000000000..1d418e06882 --- /dev/null +++ b/driver-reactive-streams/src/test/tck/com/mongodb/reactivestreams/client/ChangeStreamPublisherVerification.java @@ -0,0 +1,82 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.reactivestreams.client; + +import com.mongodb.client.model.changestream.ChangeStreamDocument; +import org.bson.Document; +import org.reactivestreams.Publisher; +import org.reactivestreams.tck.PublisherVerification; +import org.reactivestreams.tck.TestEnvironment; + +import java.util.List; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.stream.Collectors; +import java.util.stream.LongStream; + +import static com.mongodb.ClusterFixture.isDiscoverableReplicaSet; +import static com.mongodb.reactivestreams.client.MongoFixture.DEFAULT_TIMEOUT_MILLIS; +import static com.mongodb.reactivestreams.client.MongoFixture.PUBLISHER_REFERENCE_CLEANUP_TIMEOUT_MILLIS; +import static com.mongodb.reactivestreams.client.MongoFixture.run; + +public class ChangeStreamPublisherVerification extends PublisherVerification> { + + public static final AtomicInteger COUNTER = new AtomicInteger(); + + public ChangeStreamPublisherVerification() { + super(new TestEnvironment(DEFAULT_TIMEOUT_MILLIS), PUBLISHER_REFERENCE_CLEANUP_TIMEOUT_MILLIS); + } + + @Override + public Publisher> createPublisher(final long elements) { + assert (elements <= maxElementsFromPublisher()); + if (!isDiscoverableReplicaSet()) { + notVerified(); + } + + MongoCollection collection = MongoFixture.getDefaultDatabase() + .getCollection("ChangeStreamTest" + COUNTER.getAndIncrement()); + + if (elements > 0) { + MongoFixture.ObservableSubscriber> observer = + new MongoFixture.ObservableSubscriber<>(() -> run(collection.insertOne(Document.parse("{a: 1}")))); + collection.watch().first().subscribe(observer); + + ChangeStreamDocument changeDocument = observer.get().get(0); + + // Limit the number of elements returned - this is essentially an infinite stream but to high will cause a OOM. + long maxElements = elements > 10000 ? 10000 : elements; + List documentList = LongStream.rangeClosed(1, maxElements).boxed() + .map(i -> new Document("a", i)).collect(Collectors.toList()); + + run(collection.insertMany(documentList)); + + return collection.watch().startAfter(changeDocument.getResumeToken()); + } + + return collection.watch(); + } + + @Override + public Publisher> createFailedPublisher() { + return null; + } + + @Override + public long maxElementsFromPublisher() { + return publisherUnableToSignalOnComplete(); + } +} diff --git a/driver-reactive-streams/src/test/tck/com/mongodb/reactivestreams/client/DistinctPublisherVerification.java b/driver-reactive-streams/src/test/tck/com/mongodb/reactivestreams/client/DistinctPublisherVerification.java new file mode 100644 index 00000000000..ecd9fd443ee --- /dev/null +++ b/driver-reactive-streams/src/test/tck/com/mongodb/reactivestreams/client/DistinctPublisherVerification.java @@ -0,0 +1,64 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.reactivestreams.client; + +import org.bson.Document; +import org.reactivestreams.Publisher; +import org.reactivestreams.tck.PublisherVerification; +import org.reactivestreams.tck.TestEnvironment; + +import java.util.List; +import java.util.stream.Collectors; +import java.util.stream.LongStream; + +import static com.mongodb.reactivestreams.client.MongoFixture.DEFAULT_TIMEOUT_MILLIS; +import static com.mongodb.reactivestreams.client.MongoFixture.PUBLISHER_REFERENCE_CLEANUP_TIMEOUT_MILLIS; +import static com.mongodb.reactivestreams.client.MongoFixture.run; + +public class DistinctPublisherVerification extends PublisherVerification { + + public DistinctPublisherVerification() { + super(new TestEnvironment(DEFAULT_TIMEOUT_MILLIS), PUBLISHER_REFERENCE_CLEANUP_TIMEOUT_MILLIS); + } + + + @Override + public Publisher createPublisher(final long elements) { + assert (elements <= maxElementsFromPublisher()); + + MongoCollection collection = MongoFixture.getDefaultDatabase().getCollection("DistinctTest"); + run(collection.drop()); + if (elements > 0) { + List documentList = LongStream.rangeClosed(1, elements).boxed() + .map(i -> new Document("a", i)).collect(Collectors.toList()); + + run(collection.insertMany(documentList)); + } + + return collection.distinct("a", Integer.class); + } + + @Override + public Publisher createFailedPublisher() { + return null; + } + + @Override + public long maxElementsFromPublisher() { + return 100; + } +} diff --git a/driver-reactive-streams/src/test/tck/com/mongodb/reactivestreams/client/FindPublisherVerification.java b/driver-reactive-streams/src/test/tck/com/mongodb/reactivestreams/client/FindPublisherVerification.java new file mode 100644 index 00000000000..deaebc9166a --- /dev/null +++ b/driver-reactive-streams/src/test/tck/com/mongodb/reactivestreams/client/FindPublisherVerification.java @@ -0,0 +1,64 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.reactivestreams.client; + +import org.bson.Document; +import org.reactivestreams.Publisher; +import org.reactivestreams.tck.PublisherVerification; +import org.reactivestreams.tck.TestEnvironment; + +import java.util.List; +import java.util.stream.Collectors; +import java.util.stream.LongStream; + +import static com.mongodb.reactivestreams.client.MongoFixture.DEFAULT_TIMEOUT_MILLIS; +import static com.mongodb.reactivestreams.client.MongoFixture.PUBLISHER_REFERENCE_CLEANUP_TIMEOUT_MILLIS; +import static com.mongodb.reactivestreams.client.MongoFixture.run; + +public class FindPublisherVerification extends PublisherVerification { + + public FindPublisherVerification() { + super(new TestEnvironment(DEFAULT_TIMEOUT_MILLIS), PUBLISHER_REFERENCE_CLEANUP_TIMEOUT_MILLIS); + } + + + @Override + public Publisher createPublisher(final long elements) { + assert (elements <= maxElementsFromPublisher()); + + MongoCollection collection = MongoFixture.getDefaultDatabase().getCollection("FindTest"); + run(collection.drop()); + if (elements > 0) { + List documentList = LongStream.rangeClosed(1, elements).boxed() + .map(i -> new Document("a", i)).collect(Collectors.toList()); + + run(collection.insertMany(documentList)); + } + + return collection.find(); + } + + @Override + public Publisher createFailedPublisher() { + return null; + } + + @Override + public long maxElementsFromPublisher() { + return 100; + } +} diff --git a/driver-reactive-streams/src/test/tck/com/mongodb/reactivestreams/client/ListCollectionNamesPublisherVerification.java b/driver-reactive-streams/src/test/tck/com/mongodb/reactivestreams/client/ListCollectionNamesPublisherVerification.java new file mode 100644 index 00000000000..b91b5045acb --- /dev/null +++ b/driver-reactive-streams/src/test/tck/com/mongodb/reactivestreams/client/ListCollectionNamesPublisherVerification.java @@ -0,0 +1,57 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.reactivestreams.client; + +import org.reactivestreams.Publisher; +import org.reactivestreams.tck.PublisherVerification; +import org.reactivestreams.tck.TestEnvironment; + +import static com.mongodb.reactivestreams.client.MongoFixture.DEFAULT_TIMEOUT_MILLIS; +import static com.mongodb.reactivestreams.client.MongoFixture.PUBLISHER_REFERENCE_CLEANUP_TIMEOUT_MILLIS; +import static com.mongodb.reactivestreams.client.MongoFixture.run; + +public class ListCollectionNamesPublisherVerification extends PublisherVerification { + + public ListCollectionNamesPublisherVerification() { + super(new TestEnvironment(DEFAULT_TIMEOUT_MILLIS), PUBLISHER_REFERENCE_CLEANUP_TIMEOUT_MILLIS); + } + + + @Override + public Publisher createPublisher(final long elements) { + assert (elements <= maxElementsFromPublisher()); + + MongoDatabase database = MongoFixture.getDefaultDatabase(); + run(database.drop()); + + for (long i = 0; i < elements; i++) { + run(database.createCollection("listCollectionNamesTest" + i)); + } + + return database.listCollectionNames(); + } + + @Override + public Publisher createFailedPublisher() { + return null; + } + + @Override + public long maxElementsFromPublisher() { + return 100; + } +} diff --git a/driver-reactive-streams/src/test/tck/com/mongodb/reactivestreams/client/ListCollectionsPublisherVerification.java b/driver-reactive-streams/src/test/tck/com/mongodb/reactivestreams/client/ListCollectionsPublisherVerification.java new file mode 100644 index 00000000000..cb6e670cf79 --- /dev/null +++ b/driver-reactive-streams/src/test/tck/com/mongodb/reactivestreams/client/ListCollectionsPublisherVerification.java @@ -0,0 +1,58 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.reactivestreams.client; + +import org.bson.Document; +import org.reactivestreams.Publisher; +import org.reactivestreams.tck.PublisherVerification; +import org.reactivestreams.tck.TestEnvironment; + +import static com.mongodb.reactivestreams.client.MongoFixture.DEFAULT_TIMEOUT_MILLIS; +import static com.mongodb.reactivestreams.client.MongoFixture.PUBLISHER_REFERENCE_CLEANUP_TIMEOUT_MILLIS; +import static com.mongodb.reactivestreams.client.MongoFixture.run; + +public class ListCollectionsPublisherVerification extends PublisherVerification { + + public ListCollectionsPublisherVerification() { + super(new TestEnvironment(DEFAULT_TIMEOUT_MILLIS), PUBLISHER_REFERENCE_CLEANUP_TIMEOUT_MILLIS); + } + + + @Override + public Publisher createPublisher(final long elements) { + assert (elements <= maxElementsFromPublisher()); + + MongoDatabase database = MongoFixture.getDefaultDatabase(); + run(database.drop()); + + for (long i = 0; i < elements; i++) { + run(database.createCollection("listCollectionTest" + i)); + } + + return database.listCollections(); + } + + @Override + public Publisher createFailedPublisher() { + return null; + } + + @Override + public long maxElementsFromPublisher() { + return 100; + } +} diff --git a/driver-reactive-streams/src/test/tck/com/mongodb/reactivestreams/client/ListDatabasesPublisherVerification.java b/driver-reactive-streams/src/test/tck/com/mongodb/reactivestreams/client/ListDatabasesPublisherVerification.java new file mode 100644 index 00000000000..fe8c697c69c --- /dev/null +++ b/driver-reactive-streams/src/test/tck/com/mongodb/reactivestreams/client/ListDatabasesPublisherVerification.java @@ -0,0 +1,61 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.reactivestreams.client; + +import com.mongodb.client.model.Filters; +import org.bson.Document; +import org.reactivestreams.Publisher; +import org.reactivestreams.tck.PublisherVerification; +import org.reactivestreams.tck.TestEnvironment; + +import static com.mongodb.reactivestreams.client.MongoFixture.DEFAULT_TIMEOUT_MILLIS; +import static com.mongodb.reactivestreams.client.MongoFixture.PUBLISHER_REFERENCE_CLEANUP_TIMEOUT_MILLIS; +import static com.mongodb.reactivestreams.client.MongoFixture.cleanDatabases; +import static com.mongodb.reactivestreams.client.MongoFixture.getDefaultDatabaseName; +import static com.mongodb.reactivestreams.client.MongoFixture.getMongoClient; +import static com.mongodb.reactivestreams.client.MongoFixture.run; +import static java.lang.String.format; + +public class ListDatabasesPublisherVerification extends PublisherVerification { + + public ListDatabasesPublisherVerification() { + super(new TestEnvironment(DEFAULT_TIMEOUT_MILLIS), PUBLISHER_REFERENCE_CLEANUP_TIMEOUT_MILLIS); + } + + + @Override + public Publisher createPublisher(final long elements) { + assert (elements <= maxElementsFromPublisher()); + + cleanDatabases(); + MongoClient client = getMongoClient(); + for (long i = 0; i < elements; i++) { + run(client.getDatabase(getDefaultDatabaseName() + i).createCollection("test" + i)); + } + return client.listDatabases().filter(Filters.regex("name", format("^%s.*", getDefaultDatabaseName()))); + } + + @Override + public Publisher createFailedPublisher() { + return null; + } + + @Override + public long maxElementsFromPublisher() { + return 5; + } +} diff --git a/driver-reactive-streams/src/test/tck/com/mongodb/reactivestreams/client/ListIndexesPublisherVerification.java b/driver-reactive-streams/src/test/tck/com/mongodb/reactivestreams/client/ListIndexesPublisherVerification.java new file mode 100644 index 00000000000..f9591ce1023 --- /dev/null +++ b/driver-reactive-streams/src/test/tck/com/mongodb/reactivestreams/client/ListIndexesPublisherVerification.java @@ -0,0 +1,62 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.reactivestreams.client; + +import org.bson.Document; +import org.reactivestreams.Publisher; +import org.reactivestreams.tck.PublisherVerification; +import org.reactivestreams.tck.TestEnvironment; + +import static com.mongodb.reactivestreams.client.MongoFixture.DEFAULT_TIMEOUT_MILLIS; +import static com.mongodb.reactivestreams.client.MongoFixture.PUBLISHER_REFERENCE_CLEANUP_TIMEOUT_MILLIS; +import static com.mongodb.reactivestreams.client.MongoFixture.run; +import static java.lang.String.format; + +public class ListIndexesPublisherVerification extends PublisherVerification { + + public ListIndexesPublisherVerification() { + super(new TestEnvironment(DEFAULT_TIMEOUT_MILLIS), PUBLISHER_REFERENCE_CLEANUP_TIMEOUT_MILLIS); + } + + + @Override + public Publisher createPublisher(final long elements) { + assert (elements <= maxElementsFromPublisher()); + + MongoDatabase database = MongoFixture.getDefaultDatabase(); + MongoCollection collection = database.getCollection("ListIndexesTest"); + run(collection.drop()); + if (elements > 0) { + run(database.createCollection("ListIndexesTest")); + for (long i = 1; i < elements; i++) { + run(collection.createIndex(Document.parse(format("{ a%s: 1}", i)))); + } + } + + return collection.listIndexes(); + } + + @Override + public Publisher createFailedPublisher() { + return null; + } + + @Override + public long maxElementsFromPublisher() { + return 100; + } +} diff --git a/driver-reactive-streams/src/test/tck/com/mongodb/reactivestreams/client/MapReducePublisherVerification.java b/driver-reactive-streams/src/test/tck/com/mongodb/reactivestreams/client/MapReducePublisherVerification.java new file mode 100644 index 00000000000..4333b71bb8d --- /dev/null +++ b/driver-reactive-streams/src/test/tck/com/mongodb/reactivestreams/client/MapReducePublisherVerification.java @@ -0,0 +1,64 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.reactivestreams.client; + +import org.bson.Document; +import org.reactivestreams.Publisher; +import org.reactivestreams.tck.PublisherVerification; +import org.reactivestreams.tck.TestEnvironment; + +import java.util.List; +import java.util.stream.Collectors; +import java.util.stream.LongStream; + +import static com.mongodb.reactivestreams.client.MongoFixture.DEFAULT_TIMEOUT_MILLIS; +import static com.mongodb.reactivestreams.client.MongoFixture.PUBLISHER_REFERENCE_CLEANUP_TIMEOUT_MILLIS; +import static com.mongodb.reactivestreams.client.MongoFixture.run; + +public class MapReducePublisherVerification extends PublisherVerification { + + public MapReducePublisherVerification() { + super(new TestEnvironment(DEFAULT_TIMEOUT_MILLIS), PUBLISHER_REFERENCE_CLEANUP_TIMEOUT_MILLIS); + } + + @SuppressWarnings("deprecation") + @Override + public Publisher createPublisher(final long elements) { + assert (elements <= maxElementsFromPublisher()); + + MongoCollection collection = MongoFixture.getDefaultDatabase().getCollection("MapReduceTest"); + run(collection.drop()); + if (elements > 0) { + List documentList = LongStream.rangeClosed(1, elements).boxed() + .map(i -> new Document("a", i)).collect(Collectors.toList()); + + run(collection.insertMany(documentList)); + } + + return collection.mapReduce("function(){ emit(this.a, 1)};", "function(a, b){ return a; }"); + } + + @Override + public Publisher createFailedPublisher() { + return null; + } + + @Override + public long maxElementsFromPublisher() { + return 100; + } +} diff --git a/driver-reactive-streams/src/test/tck/com/mongodb/reactivestreams/client/MongoFixture.java b/driver-reactive-streams/src/test/tck/com/mongodb/reactivestreams/client/MongoFixture.java new file mode 100644 index 00000000000..f4748062e59 --- /dev/null +++ b/driver-reactive-streams/src/test/tck/com/mongodb/reactivestreams/client/MongoFixture.java @@ -0,0 +1,221 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.reactivestreams.client; + +import com.mongodb.ClusterFixture; +import com.mongodb.MongoClientSettings; +import com.mongodb.MongoCommandException; +import com.mongodb.MongoException; +import com.mongodb.MongoNamespace; +import com.mongodb.MongoTimeoutException; +import org.bson.Document; +import org.reactivestreams.Publisher; +import org.reactivestreams.Subscriber; +import org.reactivestreams.Subscription; + +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; + +import static java.util.concurrent.TimeUnit.SECONDS; + +/** + * Helper class for asynchronous tests. + */ +public final class MongoFixture { + private static MongoClient mongoClient; + + private MongoFixture() { + } + + public static final long DEFAULT_TIMEOUT_MILLIS = 5000L; + public static final long PUBLISHER_REFERENCE_CLEANUP_TIMEOUT_MILLIS = 1000L; + + public static synchronized MongoClient getMongoClient() { + if (mongoClient == null) { + mongoClient = MongoClients.create(getMongoClientSettings()); + Runtime.getRuntime().addShutdownHook(new ShutdownHook()); + } + return mongoClient; + } + + public static MongoClientSettings getMongoClientSettings() { + return getMongoClientSettingsBuilder().build(); + } + + public static MongoClientSettings.Builder getMongoClientSettingsBuilder() { + return MongoClientSettings.builder().applyConnectionString(ClusterFixture.getConnectionString()); + } + + public static String getDefaultDatabaseName() { + return ClusterFixture.getDefaultDatabaseName(); + } + + public static MongoDatabase getDefaultDatabase() { + return getMongoClient().getDatabase(getDefaultDatabaseName()); + } + + public static void dropDatabase(final String name) { + if (name == null) { + return; + } + try { + run(getMongoClient().getDatabase(name).runCommand(new Document("dropDatabase", 1))); + } catch (MongoCommandException e) { + if (!e.getErrorMessage().contains("ns not found")) { + throw e; + } + } + } + + public static void drop(final MongoNamespace namespace) { + try { + run(getMongoClient().getDatabase(namespace.getDatabaseName()).runCommand(new Document("drop", namespace.getCollectionName()))); + } catch (MongoCommandException e) { + if (!e.getErrorMessage().contains("ns not found")) { + throw e; + } + } + } + + public static List run(final Publisher publisher) { + return run(publisher, () -> {}); + } + + public static List run(final Publisher publisher, final Runnable onRequest) { + try { + ObservableSubscriber subscriber = new ObservableSubscriber<>(onRequest); + publisher.subscribe(subscriber); + return subscriber.get(); + } catch (Throwable t) { + if (t instanceof RuntimeException) { + throw (RuntimeException) t; + } + throw new RuntimeException(t); + } + } + + public static void cleanDatabases() { + List dbNames = MongoFixture.run(getMongoClient().listDatabaseNames()); + for (String dbName : dbNames) { + if (dbName.startsWith(getDefaultDatabaseName())) { + dropDatabase(dbName); + } + } + } + + static class ShutdownHook extends Thread { + @Override + public void run() { + cleanDatabases(); + mongoClient.close(); + mongoClient = null; + } + } + + public static class ObservableSubscriber implements Subscriber { + private final List received; + private final List errors; + private final CountDownLatch latch; + private final Runnable onRequest; + private volatile boolean requested; + private volatile Subscription subscription; + private volatile boolean completed; + + public ObservableSubscriber() { + this(() -> {}); + } + + public ObservableSubscriber(final Runnable onRequest) { + this.received = new ArrayList<>(); + this.errors = new ArrayList<>(); + this.latch = new CountDownLatch(1); + this.onRequest = onRequest; + } + + @Override + public void onSubscribe(final Subscription s) { + subscription = s; + } + + @Override + public void onNext(final T t) { + received.add(t); + } + + @Override + public void onError(final Throwable t) { + errors.add(t); + onComplete(); + } + + @Override + public void onComplete() { + completed = true; + latch.countDown(); + } + + public Subscription getSubscription() { + return subscription; + } + + public List getReceived() { + return received; + } + + public List getErrors() { + return errors; + } + + public boolean isCompleted() { + return completed; + } + + public List get() { + return await(60, SECONDS).getReceived(); + } + + public List get(final long timeout, final TimeUnit unit) { + return await(timeout, unit).getReceived(); + } + + public ObservableSubscriber await(final long timeout, final TimeUnit unit) { + return await(Integer.MAX_VALUE, timeout, unit); + } + + public ObservableSubscriber await(final int request, final long timeout, final TimeUnit unit) { + subscription.request(request); + if (!requested) { + requested = true; + onRequest.run(); + } + try { + if (!latch.await(timeout, unit)) { + throw new MongoTimeoutException("Publisher onComplete timed out"); + } + } catch (InterruptedException e) { + throw new MongoException("Await failed", e); + } + if (!errors.isEmpty()) { + throw new MongoException("Await failed", errors.get(0)); + } + return this; + } + } + +} diff --git a/driver-reactive-streams/src/test/tck/com/mongodb/reactivestreams/client/PublishersVerification.java b/driver-reactive-streams/src/test/tck/com/mongodb/reactivestreams/client/PublishersVerification.java new file mode 100644 index 00000000000..dba70165cb2 --- /dev/null +++ b/driver-reactive-streams/src/test/tck/com/mongodb/reactivestreams/client/PublishersVerification.java @@ -0,0 +1,50 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.reactivestreams.client; + +import org.reactivestreams.Publisher; +import org.reactivestreams.tck.PublisherVerification; +import org.reactivestreams.tck.TestEnvironment; +import reactor.core.publisher.Flux; + +import java.util.stream.IntStream; + +import static com.mongodb.reactivestreams.client.MongoFixture.DEFAULT_TIMEOUT_MILLIS; +import static com.mongodb.reactivestreams.client.MongoFixture.PUBLISHER_REFERENCE_CLEANUP_TIMEOUT_MILLIS; + +public class PublishersVerification extends PublisherVerification { + + public PublishersVerification() { + super(new TestEnvironment(DEFAULT_TIMEOUT_MILLIS), PUBLISHER_REFERENCE_CLEANUP_TIMEOUT_MILLIS); + } + + @Override + public Publisher createPublisher(final long elements) { + assert (elements <= maxElementsFromPublisher()); + return Flux.fromStream(IntStream.rangeClosed(1, (int) elements).boxed()); + } + + @Override + public Publisher createFailedPublisher() { + return null; + } + + @Override + public long maxElementsFromPublisher() { + return 100; + } +} diff --git a/driver-reactive-streams/src/test/tck/com/mongodb/reactivestreams/client/gridfs/GridFSDownloadPublisherVerification.java b/driver-reactive-streams/src/test/tck/com/mongodb/reactivestreams/client/gridfs/GridFSDownloadPublisherVerification.java new file mode 100644 index 00000000000..ace18d84458 --- /dev/null +++ b/driver-reactive-streams/src/test/tck/com/mongodb/reactivestreams/client/gridfs/GridFSDownloadPublisherVerification.java @@ -0,0 +1,71 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.reactivestreams.client.gridfs; + +import com.mongodb.client.gridfs.model.GridFSUploadOptions; +import com.mongodb.reactivestreams.client.MongoFixture; +import org.reactivestreams.Publisher; +import org.reactivestreams.tck.PublisherVerification; +import org.reactivestreams.tck.TestEnvironment; +import reactor.core.publisher.Flux; + +import java.nio.ByteBuffer; +import java.util.List; +import java.util.stream.Collectors; +import java.util.stream.LongStream; + +import static com.mongodb.reactivestreams.client.MongoFixture.DEFAULT_TIMEOUT_MILLIS; +import static com.mongodb.reactivestreams.client.MongoFixture.PUBLISHER_REFERENCE_CLEANUP_TIMEOUT_MILLIS; +import static com.mongodb.reactivestreams.client.MongoFixture.run; + +public class GridFSDownloadPublisherVerification extends PublisherVerification { + + public GridFSDownloadPublisherVerification() { + super(new TestEnvironment(DEFAULT_TIMEOUT_MILLIS), PUBLISHER_REFERENCE_CLEANUP_TIMEOUT_MILLIS); + } + + + @Override + public Publisher createPublisher(final long elements) { + assert (elements <= maxElementsFromPublisher()); + + run(MongoFixture.getDefaultDatabase().drop()); + GridFSBucket bucket = GridFSBuckets.create(MongoFixture.getDefaultDatabase()); + + if (elements < 1) { + return bucket.downloadToPublisher("test"); + } + + List byteBuffers = LongStream.rangeClosed(1, elements).boxed() + .map(i -> ByteBuffer.wrap("test".getBytes())).collect(Collectors.toList()); + + Publisher uploader = Flux.fromIterable(byteBuffers); + run(bucket.uploadFromPublisher("test", uploader, new GridFSUploadOptions().chunkSizeBytes(4))); + + return bucket.downloadToPublisher("test"); + } + + @Override + public Publisher createFailedPublisher() { + return null; + } + + @Override + public long maxElementsFromPublisher() { + return 100; + } +} diff --git a/driver-reactive-streams/src/test/tck/com/mongodb/reactivestreams/client/gridfs/GridFSFindPublisherVerification.java b/driver-reactive-streams/src/test/tck/com/mongodb/reactivestreams/client/gridfs/GridFSFindPublisherVerification.java new file mode 100644 index 00000000000..bb86830a739 --- /dev/null +++ b/driver-reactive-streams/src/test/tck/com/mongodb/reactivestreams/client/gridfs/GridFSFindPublisherVerification.java @@ -0,0 +1,63 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.reactivestreams.client.gridfs; + +import com.mongodb.client.gridfs.model.GridFSFile; +import com.mongodb.reactivestreams.client.MongoFixture; +import org.reactivestreams.Publisher; +import org.reactivestreams.tck.PublisherVerification; +import org.reactivestreams.tck.TestEnvironment; +import reactor.core.publisher.Flux; + +import java.nio.ByteBuffer; +import java.util.Collections; + +import static com.mongodb.reactivestreams.client.MongoFixture.DEFAULT_TIMEOUT_MILLIS; +import static com.mongodb.reactivestreams.client.MongoFixture.PUBLISHER_REFERENCE_CLEANUP_TIMEOUT_MILLIS; +import static com.mongodb.reactivestreams.client.MongoFixture.run; + +public class GridFSFindPublisherVerification extends PublisherVerification { + + public GridFSFindPublisherVerification() { + super(new TestEnvironment(DEFAULT_TIMEOUT_MILLIS), PUBLISHER_REFERENCE_CLEANUP_TIMEOUT_MILLIS); + } + + @Override + public Publisher createPublisher(final long elements) { + assert (elements <= maxElementsFromPublisher()); + + run(MongoFixture.getDefaultDatabase().drop()); + GridFSBucket bucket = GridFSBuckets.create(MongoFixture.getDefaultDatabase()); + + for (long i = 0; i < elements; i++) { + run(GridFSBuckets.create(MongoFixture.getDefaultDatabase()).uploadFromPublisher("test" + i, + Flux.fromIterable(Collections.singletonList(ByteBuffer.wrap("test".getBytes()))))); + } + + return bucket.find(); + } + + @Override + public Publisher createFailedPublisher() { + return null; + } + + @Override + public long maxElementsFromPublisher() { + return 100; + } +} diff --git a/driver-reactive-streams/src/test/tck/com/mongodb/reactivestreams/client/gridfs/GridFSUploadPublisherVerification.java b/driver-reactive-streams/src/test/tck/com/mongodb/reactivestreams/client/gridfs/GridFSUploadPublisherVerification.java new file mode 100644 index 00000000000..babe4df3115 --- /dev/null +++ b/driver-reactive-streams/src/test/tck/com/mongodb/reactivestreams/client/gridfs/GridFSUploadPublisherVerification.java @@ -0,0 +1,70 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.reactivestreams.client.gridfs; + +import com.mongodb.reactivestreams.client.MongoFixture; +import org.bson.types.ObjectId; +import org.reactivestreams.Publisher; +import org.reactivestreams.tck.PublisherVerification; +import org.reactivestreams.tck.TestEnvironment; +import reactor.core.publisher.Flux; + +import java.nio.ByteBuffer; +import java.util.List; +import java.util.stream.Collectors; +import java.util.stream.LongStream; + +import static com.mongodb.reactivestreams.client.MongoFixture.DEFAULT_TIMEOUT_MILLIS; +import static com.mongodb.reactivestreams.client.MongoFixture.PUBLISHER_REFERENCE_CLEANUP_TIMEOUT_MILLIS; +import static com.mongodb.reactivestreams.client.MongoFixture.run; + +public class GridFSUploadPublisherVerification extends PublisherVerification { + + public GridFSUploadPublisherVerification() { + super(new TestEnvironment(DEFAULT_TIMEOUT_MILLIS), PUBLISHER_REFERENCE_CLEANUP_TIMEOUT_MILLIS); + } + + + @Override + public Publisher createPublisher(final long elements) { + assert (elements <= maxElementsFromPublisher()); + + if (elements < 1) { + notVerified(); + } + + GridFSBucket bucket = GridFSBuckets.create(MongoFixture.getDefaultDatabase()); + + run(MongoFixture.getDefaultDatabase().drop()); + List byteBuffers = LongStream.rangeClosed(1, elements).boxed() + .map(i -> ByteBuffer.wrap("test".getBytes())).collect(Collectors.toList()); + + Publisher uploader = Flux.fromIterable(byteBuffers); + + return bucket.uploadFromPublisher("test", uploader); + } + + @Override + public Publisher createFailedPublisher() { + return null; + } + + @Override + public long maxElementsFromPublisher() { + return 1; + } +} diff --git a/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/PublisherApiTest.java b/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/PublisherApiTest.java new file mode 100644 index 00000000000..09f77743cde --- /dev/null +++ b/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/PublisherApiTest.java @@ -0,0 +1,89 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.reactivestreams.client; + + +import com.mongodb.client.AggregateIterable; +import com.mongodb.client.ChangeStreamIterable; +import com.mongodb.client.ClientSession; +import com.mongodb.client.DistinctIterable; +import com.mongodb.client.FindIterable; +import com.mongodb.client.ListCollectionsIterable; +import com.mongodb.client.ListDatabasesIterable; +import com.mongodb.client.ListIndexesIterable; +import com.mongodb.client.MongoDatabase; +import com.mongodb.client.gridfs.GridFSFindIterable; +import com.mongodb.reactivestreams.client.gridfs.GridFSBuckets; +import com.mongodb.reactivestreams.client.gridfs.GridFSFindPublisher; +import org.junit.jupiter.api.DisplayName; +import org.junit.jupiter.api.DynamicTest; +import org.junit.jupiter.api.TestFactory; + +import java.lang.reflect.Method; +import java.util.Arrays; +import java.util.List; +import java.util.stream.Collectors; + +import static java.lang.String.format; +import static java.util.Arrays.asList; +import static org.junit.jupiter.api.Assertions.assertIterableEquals; +import static org.junit.jupiter.api.DynamicTest.dynamicTest; + +public class PublisherApiTest { + + @SuppressWarnings("deprecation") + @TestFactory + @DisplayName("test that publisher apis matches sync") + List testPublisherApiMatchesSyncApi() { + return asList( + dynamicTest("Client Session Api", () -> assertApis(com.mongodb.client.ClientSession.class, ClientSession.class)), + dynamicTest("MongoClient Api", () -> assertApis(com.mongodb.client.MongoClient.class, MongoClient.class)), + dynamicTest("MongoDatabase Api", () -> assertApis(com.mongodb.client.MongoDatabase.class, MongoDatabase.class)), + dynamicTest("MongoCollection Api", () -> assertApis(com.mongodb.client.MongoCollection.class, MongoCollection.class)), + dynamicTest("Aggregate Api", () -> assertApis(AggregateIterable.class, AggregatePublisher.class)), + dynamicTest("Change Stream Api", () -> assertApis(ChangeStreamIterable.class, ChangeStreamPublisher.class)), + dynamicTest("Distinct Api", () -> assertApis(DistinctIterable.class, DistinctPublisher.class)), + dynamicTest("Find Api", () -> assertApis(FindIterable.class, FindPublisher.class)), + dynamicTest("List Collections Api", () -> assertApis(ListCollectionsIterable.class, ListCollectionsPublisher.class)), + dynamicTest("List Databases Api", () -> assertApis(ListDatabasesIterable.class, ListDatabasesPublisher.class)), + dynamicTest("List Indexes Api", () -> assertApis(ListIndexesIterable.class, ListIndexesPublisher.class)), + dynamicTest("Map Reduce Api", () -> assertApis(com.mongodb.client.MapReduceIterable.class, MapReducePublisher.class)), + dynamicTest("GridFS Buckets Api", () -> assertApis(com.mongodb.client.gridfs.GridFSBuckets.class, GridFSBuckets.class)), + dynamicTest("GridFS Find Api", () -> assertApis(GridFSFindIterable.class, GridFSFindPublisher.class)) + ); + } + + void assertApis(final Class syncApi, final Class publisherApi) { + List syncMethods = getMethodNames(syncApi); + List publisherMethods = getMethodNames(publisherApi); + assertIterableEquals(syncMethods, publisherMethods, format("%s != %s%nSync: %s%nPub: %s", + syncApi.getSimpleName(), publisherApi.getSimpleName(), syncMethods, publisherMethods)); + } + + private static final List SYNC_ONLY_APIS = asList("iterator", "cursor", "map", "into", "spliterator", "forEach"); + private static final List PUBLISHER_ONLY_APIS = asList("batchCursor", "getBatchSize", "subscribe"); + + private List getMethodNames(final Class clazz) { + return Arrays.stream(clazz.getMethods()) + .map(Method::getName) + .distinct() + .filter(n -> !SYNC_ONLY_APIS.contains(n) && !PUBLISHER_ONLY_APIS.contains(n)) + .sorted() + .collect(Collectors.toList()); + } + +} diff --git a/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/AggregatePublisherImplTest.java b/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/AggregatePublisherImplTest.java new file mode 100644 index 00000000000..cfbf5a0a5b8 --- /dev/null +++ b/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/AggregatePublisherImplTest.java @@ -0,0 +1,458 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.reactivestreams.client.internal; + +import com.mongodb.MongoException; +import com.mongodb.MongoNamespace; +import com.mongodb.ReadConcern; +import com.mongodb.ReadPreference; +import com.mongodb.WriteConcern; +import com.mongodb.internal.client.model.AggregationLevel; +import com.mongodb.internal.operation.AggregateOperation; +import com.mongodb.internal.operation.AggregateToCollectionOperation; +import com.mongodb.internal.operation.FindOperation; +import com.mongodb.reactivestreams.client.AggregatePublisher; +import org.bson.BsonDocument; +import org.bson.BsonInt32; +import org.bson.BsonString; +import org.bson.Document; +import org.bson.codecs.configuration.CodecConfigurationException; +import org.junit.jupiter.api.DisplayName; +import org.junit.jupiter.api.Test; +import org.reactivestreams.Publisher; +import reactor.core.publisher.Flux; + +import java.util.List; + +import static com.mongodb.reactivestreams.client.MongoClients.getDefaultCodecRegistry; +import static java.lang.String.format; +import static java.util.Arrays.asList; +import static java.util.Collections.singletonList; +import static java.util.concurrent.TimeUnit.MILLISECONDS; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; + +@SuppressWarnings({"rawtypes"}) +public class AggregatePublisherImplTest extends TestHelper { + + @DisplayName("Should build the expected AggregateOperation") + @Test + void shouldBuildTheExpectedOperation() { + List pipeline = singletonList(BsonDocument.parse("{'$match': 1}")); + + TestOperationExecutor executor = createOperationExecutor(asList(getBatchCursor(), getBatchCursor())); + AggregatePublisher publisher = + new AggregatePublisherImpl<>(null, createMongoOperationPublisher(executor), pipeline, AggregationLevel.COLLECTION); + + AggregateOperation expectedOperation = new AggregateOperation<>(NAMESPACE, pipeline, + getDefaultCodecRegistry().get(Document.class)) + .batchSize(Integer.MAX_VALUE) + .retryReads(true); + + // default input should be as expected + Flux.from(publisher).blockFirst(); + + assertOperationIsTheSameAs(expectedOperation, executor.getReadOperation()); + assertEquals(ReadPreference.primary(), executor.getReadPreference()); + + // Should apply settings + publisher + .allowDiskUse(true) + .batchSize(100) + .bypassDocumentValidation(true) // Ignored + .collation(COLLATION) + .comment("my comment") + .hint(BsonDocument.parse("{a: 1}")) + .maxAwaitTime(1001, MILLISECONDS) + .maxTime(101, MILLISECONDS); + + expectedOperation = new AggregateOperation<>(NAMESPACE, pipeline, + getDefaultCodecRegistry().get(Document.class)) + .retryReads(true) + .allowDiskUse(true) + .batchSize(100) + .collation(COLLATION) + .comment(new BsonString("my comment")) + .hint(BsonDocument.parse("{a: 1}")); + + Flux.from(publisher).blockFirst(); + assertOperationIsTheSameAs(expectedOperation, executor.getReadOperation()); + assertEquals(ReadPreference.primary(), executor.getReadPreference()); + } + + @DisplayName("Should build the expected AggregateOperation for hint string") + @Test + void shouldBuildTheExpectedOperationForHintString() { + List pipeline = singletonList(BsonDocument.parse("{'$match': 1}")); + + TestOperationExecutor executor = createOperationExecutor(asList(getBatchCursor(), getBatchCursor())); + AggregatePublisher publisher = + new AggregatePublisherImpl<>(null, createMongoOperationPublisher(executor), pipeline, AggregationLevel.COLLECTION); + + AggregateOperation expectedOperation = new AggregateOperation<>(NAMESPACE, pipeline, + getDefaultCodecRegistry().get(Document.class)) + .batchSize(Integer.MAX_VALUE) + .retryReads(true); + + publisher + .hintString("x_1"); + + expectedOperation + .hint(new BsonString("x_1")); + + Flux.from(publisher).blockFirst(); + assertOperationIsTheSameAs(expectedOperation, executor.getReadOperation()); + } + + @DisplayName("Should build the expected AggregateOperation when both hint and hintString are set") + @Test + void shouldBuildTheExpectedOperationForHintPlusHintString() { + List pipeline = singletonList(BsonDocument.parse("{'$match': 1}")); + + TestOperationExecutor executor = createOperationExecutor(asList(getBatchCursor(), getBatchCursor())); + AggregatePublisher publisher = + new AggregatePublisherImpl<>(null, createMongoOperationPublisher(executor), pipeline, AggregationLevel.COLLECTION); + + AggregateOperation expectedOperation = new AggregateOperation<>(NAMESPACE, pipeline, + getDefaultCodecRegistry().get(Document.class)) + .batchSize(Integer.MAX_VALUE) + .retryReads(true); + + publisher + .hint(new Document("x", 1)) + .hintString("x_1"); + + expectedOperation + .hint(new BsonDocument("x", new BsonInt32(1))); + + Flux.from(publisher).blockFirst(); + assertOperationIsTheSameAs(expectedOperation, executor.getReadOperation()); + } + + @DisplayName("Should build the expected AggregateOperation for $out") + @Test + void shouldBuildTheExpectedOperationsForDollarOut() { + String collectionName = "collectionName"; + List pipeline = asList(BsonDocument.parse("{'$match': 1}"), + BsonDocument.parse(format("{'$out': '%s'}", collectionName))); + MongoNamespace collectionNamespace = new MongoNamespace(NAMESPACE.getDatabaseName(), collectionName); + + TestOperationExecutor executor = createOperationExecutor(asList(getBatchCursor(), getBatchCursor(), getBatchCursor(), null)); + AggregatePublisher publisher = + new AggregatePublisherImpl<>(null, createMongoOperationPublisher(executor), pipeline, AggregationLevel.COLLECTION); + + AggregateToCollectionOperation expectedOperation = new AggregateToCollectionOperation(NAMESPACE, pipeline, + ReadConcern.DEFAULT, WriteConcern.ACKNOWLEDGED); + + // default input should be as expected + Flux.from(publisher).blockFirst(); + + VoidReadOperationThenCursorReadOperation operation = (VoidReadOperationThenCursorReadOperation) executor.getReadOperation(); + assertEquals(ReadPreference.primary(), executor.getReadPreference()); + assertOperationIsTheSameAs(expectedOperation, operation.getReadOperation()); + + // Should apply settings + publisher + .allowDiskUse(true) + .batchSize(100) // Used in Find + .bypassDocumentValidation(true) + .collation(COLLATION) + .comment("my comment") + .hint(BsonDocument.parse("{a: 1}")) + .maxAwaitTime(1001, MILLISECONDS) // Ignored on $out + .maxTime(100, MILLISECONDS); + + expectedOperation = new AggregateToCollectionOperation(NAMESPACE, pipeline, + ReadConcern.DEFAULT, WriteConcern.ACKNOWLEDGED) + .allowDiskUse(true) + .bypassDocumentValidation(true) + .collation(COLLATION) + .comment(new BsonString("my comment")) + .hint(BsonDocument.parse("{a: 1}")); + + Flux.from(publisher).blockFirst(); + assertEquals(ReadPreference.primary(), executor.getReadPreference()); + operation = (VoidReadOperationThenCursorReadOperation) executor.getReadOperation(); + assertOperationIsTheSameAs(expectedOperation, operation.getReadOperation()); + + FindOperation expectedFindOperation = + new FindOperation<>(collectionNamespace, getDefaultCodecRegistry().get(Document.class)) + .batchSize(100) + .collation(COLLATION) + .filter(new BsonDocument()) + .comment(new BsonString("my comment")) + .retryReads(true); + + assertOperationIsTheSameAs(expectedFindOperation, operation.getCursorReadOperation()); + + // Should handle database level aggregations + publisher = new AggregatePublisherImpl<>(null, createMongoOperationPublisher(executor), pipeline, AggregationLevel.DATABASE); + + expectedOperation = new AggregateToCollectionOperation(NAMESPACE, pipeline, ReadConcern.DEFAULT, + WriteConcern.ACKNOWLEDGED); + + Flux.from(publisher).blockFirst(); + operation = (VoidReadOperationThenCursorReadOperation) executor.getReadOperation(); + assertEquals(ReadPreference.primary(), executor.getReadPreference()); + assertOperationIsTheSameAs(expectedOperation, operation.getReadOperation()); + + // Should handle toCollection + publisher = new AggregatePublisherImpl<>(null, createMongoOperationPublisher(executor), pipeline, AggregationLevel.COLLECTION); + + expectedOperation = new AggregateToCollectionOperation(NAMESPACE, pipeline, ReadConcern.DEFAULT, + WriteConcern.ACKNOWLEDGED); + + // default input should be as expected + Flux.from(publisher.toCollection()).blockFirst(); + assertOperationIsTheSameAs(expectedOperation, executor.getReadOperation()); + } + + @DisplayName("Should build the expected AggregateOperation for $out with hint string") + @Test + void shouldBuildTheExpectedOperationsForDollarOutWithHintString() { + String collectionName = "collectionName"; + List pipeline = asList(BsonDocument.parse("{'$match': 1}"), + BsonDocument.parse(format("{'$out': '%s'}", collectionName))); + MongoNamespace collectionNamespace = new MongoNamespace(NAMESPACE.getDatabaseName(), collectionName); + + TestOperationExecutor executor = createOperationExecutor(asList(getBatchCursor(), getBatchCursor(), getBatchCursor(), null)); + AggregatePublisher publisher = + new AggregatePublisherImpl<>(null, createMongoOperationPublisher(executor), pipeline, AggregationLevel.COLLECTION); + + AggregateToCollectionOperation expectedOperation = new AggregateToCollectionOperation(NAMESPACE, pipeline, + ReadConcern.DEFAULT, WriteConcern.ACKNOWLEDGED); + + publisher + .hintString("x_1"); + + expectedOperation + .hint(new BsonString("x_1")); + + Flux.from(publisher).blockFirst(); + assertEquals(ReadPreference.primary(), executor.getReadPreference()); + VoidReadOperationThenCursorReadOperation operation = (VoidReadOperationThenCursorReadOperation) executor.getReadOperation(); + assertOperationIsTheSameAs(expectedOperation, operation.getReadOperation()); + } + + @DisplayName("Should build the expected AggregateOperation for $out when both hint and hint string are set") + @Test + void shouldBuildTheExpectedOperationsForDollarOutWithHintPlusHintString() { + String collectionName = "collectionName"; + List pipeline = asList(BsonDocument.parse("{'$match': 1}"), + BsonDocument.parse(format("{'$out': '%s'}", collectionName))); + MongoNamespace collectionNamespace = new MongoNamespace(NAMESPACE.getDatabaseName(), collectionName); + + TestOperationExecutor executor = createOperationExecutor(asList(getBatchCursor(), getBatchCursor(), getBatchCursor(), null)); + AggregatePublisher publisher = + new AggregatePublisherImpl<>(null, createMongoOperationPublisher(executor), pipeline, AggregationLevel.COLLECTION); + + AggregateToCollectionOperation expectedOperation = new AggregateToCollectionOperation(NAMESPACE, pipeline, + ReadConcern.DEFAULT, WriteConcern.ACKNOWLEDGED); + + publisher + .hint(new Document("x", 1)) + .hintString("x_1"); + + expectedOperation + .hint(new BsonDocument("x", new BsonInt32(1))); + + Flux.from(publisher).blockFirst(); + assertEquals(ReadPreference.primary(), executor.getReadPreference()); + VoidReadOperationThenCursorReadOperation operation = (VoidReadOperationThenCursorReadOperation) executor.getReadOperation(); + assertOperationIsTheSameAs(expectedOperation, operation.getReadOperation()); + } + + @DisplayName("Should build the expected AggregateOperation for $out as document") + @Test + void shouldBuildTheExpectedOperationsForDollarOutAsDocument() { + List pipeline = asList(BsonDocument.parse("{'$match': 1}"), BsonDocument.parse("{'$out': {s3: true}}")); + + TestOperationExecutor executor = createOperationExecutor(asList(null, null, null, null)); + AggregatePublisher publisher = + new AggregatePublisherImpl<>(null, createMongoOperationPublisher(executor), pipeline, AggregationLevel.COLLECTION); + + // default input should be as expected + assertThrows(IllegalStateException.class, () -> Flux.from(publisher).blockFirst()); + + // Should handle toCollection + Publisher toCollectionPublisher = + new AggregatePublisherImpl<>(null, createMongoOperationPublisher(executor), pipeline, AggregationLevel.COLLECTION) + .toCollection(); + + AggregateToCollectionOperation expectedOperation = new AggregateToCollectionOperation(NAMESPACE, pipeline, + ReadConcern.DEFAULT, WriteConcern.ACKNOWLEDGED); + + Flux.from(toCollectionPublisher).blockFirst(); + assertOperationIsTheSameAs(expectedOperation, executor.getReadOperation()); + + // Should handle database level + toCollectionPublisher = + new AggregatePublisherImpl<>(null, createMongoOperationPublisher(executor), pipeline, AggregationLevel.DATABASE) + .toCollection(); + + Flux.from(toCollectionPublisher).blockFirst(); + assertOperationIsTheSameAs(expectedOperation, executor.getReadOperation()); + + // Should handle $out with namespace + List pipelineWithNamespace = asList(BsonDocument.parse("{'$match': 1}"), + BsonDocument.parse("{'$out': {db: 'db1', coll: 'coll1'}}")); + toCollectionPublisher = new AggregatePublisherImpl<>(null, createMongoOperationPublisher(executor), pipelineWithNamespace, + AggregationLevel.COLLECTION) + .toCollection(); + + expectedOperation = new AggregateToCollectionOperation(NAMESPACE, pipelineWithNamespace, ReadConcern.DEFAULT, + WriteConcern.ACKNOWLEDGED); + + Flux.from(toCollectionPublisher).blockFirst(); + assertOperationIsTheSameAs(expectedOperation, executor.getReadOperation()); + } + + @DisplayName("Should build the expected AggregateOperation for $merge document") + @Test + void shouldBuildTheExpectedOperationsForDollarMergeDocument() { + String collectionName = "collectionName"; + List pipeline = asList(BsonDocument.parse("{'$match': 1}"), + BsonDocument.parse(format("{'$merge': {into: '%s'}}", collectionName))); + MongoNamespace collectionNamespace = new MongoNamespace(NAMESPACE.getDatabaseName(), collectionName); + + TestOperationExecutor executor = createOperationExecutor(asList(getBatchCursor(), getBatchCursor(), getBatchCursor(), null)); + AggregatePublisher publisher = + new AggregatePublisherImpl<>(null, createMongoOperationPublisher(executor), pipeline, AggregationLevel.COLLECTION); + + AggregateToCollectionOperation expectedOperation = new AggregateToCollectionOperation(NAMESPACE, pipeline, + ReadConcern.DEFAULT, WriteConcern.ACKNOWLEDGED); + + // default input should be as expected + Flux.from(publisher).blockFirst(); + + VoidReadOperationThenCursorReadOperation operation = (VoidReadOperationThenCursorReadOperation) executor.getReadOperation(); + assertEquals(ReadPreference.primary(), executor.getReadPreference()); + assertOperationIsTheSameAs(expectedOperation, operation.getReadOperation()); + + // Should apply settings + publisher + .allowDiskUse(true) + .batchSize(100) // Used in Find + .bypassDocumentValidation(true) + .collation(COLLATION) + .comment(new BsonInt32(1)) + .hint(BsonDocument.parse("{a: 1}")) + .maxAwaitTime(1001, MILLISECONDS) // Ignored on $out + .maxTime(100, MILLISECONDS); + + expectedOperation = new AggregateToCollectionOperation(NAMESPACE, pipeline, + ReadConcern.DEFAULT, WriteConcern.ACKNOWLEDGED) + .allowDiskUse(true) + .bypassDocumentValidation(true) + .collation(COLLATION) + .comment(new BsonInt32(1)) + .hint(BsonDocument.parse("{a: 1}")); + + Flux.from(publisher).blockFirst(); + assertEquals(ReadPreference.primary(), executor.getReadPreference()); + operation = (VoidReadOperationThenCursorReadOperation) executor.getReadOperation(); + assertOperationIsTheSameAs(expectedOperation, operation.getReadOperation()); + + FindOperation expectedFindOperation = + new FindOperation<>(collectionNamespace, getDefaultCodecRegistry().get(Document.class)) + .batchSize(100) + .collation(COLLATION) + .filter(new BsonDocument()) + .comment(new BsonInt32(1)) + .retryReads(true); + + assertOperationIsTheSameAs(expectedFindOperation, operation.getCursorReadOperation()); + + // Should handle database level aggregations + publisher = new AggregatePublisherImpl<>(null, createMongoOperationPublisher(executor), pipeline, AggregationLevel.DATABASE); + + expectedOperation = new AggregateToCollectionOperation(NAMESPACE, pipeline, ReadConcern.DEFAULT, + WriteConcern.ACKNOWLEDGED); + + Flux.from(publisher).blockFirst(); + operation = (VoidReadOperationThenCursorReadOperation) executor.getReadOperation(); + assertEquals(ReadPreference.primary(), executor.getReadPreference()); + assertOperationIsTheSameAs(expectedOperation, operation.getReadOperation()); + + // Should handle toCollection + publisher = new AggregatePublisherImpl<>(null, createMongoOperationPublisher(executor), pipeline, AggregationLevel.COLLECTION); + + expectedOperation = new AggregateToCollectionOperation(NAMESPACE, pipeline, ReadConcern.DEFAULT, + WriteConcern.ACKNOWLEDGED); + + // default input should be as expected + Flux.from(publisher.toCollection()).blockFirst(); + assertOperationIsTheSameAs(expectedOperation, executor.getReadOperation()); + } + + @DisplayName("Should build the expected AggregateOperation for $merge string") + @Test + void shouldBuildTheExpectedOperationsForDollarMergeString() { + String collectionName = "collectionName"; + MongoNamespace collectionNamespace = new MongoNamespace(NAMESPACE.getDatabaseName(), collectionName); + List pipeline = asList(BsonDocument.parse("{'$match': 1}"), + BsonDocument.parse(format("{'$merge': '%s'}", collectionName))); + + TestOperationExecutor executor = createOperationExecutor(asList(getBatchCursor(), getBatchCursor(), getBatchCursor(), null)); + AggregatePublisher publisher = + new AggregatePublisherImpl<>(null, createMongoOperationPublisher(executor), pipeline, AggregationLevel.COLLECTION); + + AggregateToCollectionOperation expectedOperation = new AggregateToCollectionOperation(NAMESPACE, pipeline, + ReadConcern.DEFAULT, WriteConcern.ACKNOWLEDGED); + + // default input should be as expected + Flux.from(publisher).blockFirst(); + + VoidReadOperationThenCursorReadOperation operation = (VoidReadOperationThenCursorReadOperation) executor.getReadOperation(); + assertEquals(ReadPreference.primary(), executor.getReadPreference()); + assertOperationIsTheSameAs(expectedOperation, operation.getReadOperation()); + + FindOperation expectedFindOperation = + new FindOperation<>(collectionNamespace, getDefaultCodecRegistry().get(Document.class)) + .filter(new BsonDocument()) + .batchSize(Integer.MAX_VALUE) + .retryReads(true); + + assertOperationIsTheSameAs(expectedFindOperation, operation.getCursorReadOperation()); + } + + @DisplayName("Should handle error scenarios") + @Test + void shouldHandleErrorScenarios() { + List pipeline = singletonList(BsonDocument.parse("{'$match': 1}")); + TestOperationExecutor executor = createOperationExecutor(asList(new MongoException("Failure"), null, null)); + + // Operation fails + Publisher publisher = + new AggregatePublisherImpl<>(null, createMongoOperationPublisher(executor), pipeline, AggregationLevel.COLLECTION); + assertThrows(MongoException.class, () -> Flux.from(publisher).blockFirst()); + + // Missing Codec + Publisher publisherMissingCodec = + new AggregatePublisherImpl<>(null, createMongoOperationPublisher(executor) + .withCodecRegistry(BSON_CODEC_REGISTRY), pipeline, AggregationLevel.COLLECTION); + assertThrows(CodecConfigurationException.class, () -> Flux.from(publisherMissingCodec).blockFirst()); + + // Pipeline contains null + Publisher publisherPipelineNull = + new AggregatePublisherImpl<>(null, createMongoOperationPublisher(executor), singletonList(null), + AggregationLevel.COLLECTION); + assertThrows(IllegalArgumentException.class, () -> Flux.from(publisherPipelineNull).blockFirst()); + } + + +} diff --git a/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/ChangeStreamPublisherImplTest.java b/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/ChangeStreamPublisherImplTest.java new file mode 100644 index 00000000000..7c2ab637c27 --- /dev/null +++ b/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/ChangeStreamPublisherImplTest.java @@ -0,0 +1,142 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.reactivestreams.client.internal; + +import com.mongodb.MongoException; +import com.mongodb.ReadPreference; +import com.mongodb.client.model.changestream.ChangeStreamDocument; +import com.mongodb.client.model.changestream.FullDocument; +import com.mongodb.client.model.changestream.FullDocumentBeforeChange; +import com.mongodb.internal.client.model.changestream.ChangeStreamLevel; +import com.mongodb.internal.operation.ChangeStreamOperation; +import com.mongodb.reactivestreams.client.ChangeStreamPublisher; +import org.bson.BsonDocument; +import org.bson.BsonInt32; +import org.bson.BsonString; +import org.bson.Document; +import org.bson.codecs.Codec; +import org.bson.codecs.configuration.CodecConfigurationException; +import org.junit.jupiter.api.DisplayName; +import org.junit.jupiter.api.Test; +import org.reactivestreams.Publisher; +import reactor.core.publisher.Flux; + +import java.util.List; + +import static com.mongodb.reactivestreams.client.MongoClients.getDefaultCodecRegistry; +import static java.util.Arrays.asList; +import static java.util.Collections.singletonList; +import static java.util.concurrent.TimeUnit.MILLISECONDS; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; + +public class ChangeStreamPublisherImplTest extends TestHelper { + + @DisplayName("Should build the expected ChangeStreamOperation") + @Test + void shouldBuildTheExpectedOperation() { + List pipeline = singletonList(BsonDocument.parse("{'$match': 1}")); + Codec> codec = ChangeStreamDocument.createCodec(Document.class, getDefaultCodecRegistry()); + + TestOperationExecutor executor = createOperationExecutor(asList(getBatchCursor(), getBatchCursor())); + ChangeStreamPublisher publisher = new ChangeStreamPublisherImpl<>(null, createMongoOperationPublisher(executor), + Document.class, pipeline, ChangeStreamLevel.COLLECTION); + + ChangeStreamOperation> expectedOperation = + new ChangeStreamOperation<>(NAMESPACE, FullDocument.DEFAULT, FullDocumentBeforeChange.DEFAULT, pipeline, + codec) + .batchSize(Integer.MAX_VALUE) + .retryReads(true); + + // default input should be as expected + Flux.from(publisher).blockFirst(); + + assertOperationIsTheSameAs(expectedOperation, executor.getReadOperation()); + assertEquals(ReadPreference.primary(), executor.getReadPreference()); + + // Should apply settings + publisher + .batchSize(100) + .collation(COLLATION) + .comment("comment") + .maxAwaitTime(101, MILLISECONDS) + .fullDocument(FullDocument.UPDATE_LOOKUP); + + expectedOperation = new ChangeStreamOperation<>(NAMESPACE, FullDocument.UPDATE_LOOKUP, + FullDocumentBeforeChange.DEFAULT, + pipeline, + codec).retryReads(true); + expectedOperation + .batchSize(100) + .collation(COLLATION) + .comment(new BsonString("comment")); + + Flux.from(publisher).blockFirst(); + assertEquals(ReadPreference.primary(), executor.getReadPreference()); + assertOperationIsTheSameAs(expectedOperation, executor.getReadOperation()); + } + + @DisplayName("Should build the expected ChangeStreamOperation when setting the document class") + @Test + void shouldBuildTheExpectedOperationWhenSettingDocumentClass() { + List pipeline = singletonList(BsonDocument.parse("{'$match': 1}")); + TestOperationExecutor executor = createOperationExecutor(singletonList(getBatchCursor())); + + int batchSize = 100; + Publisher publisher = new ChangeStreamPublisherImpl<>(null, createMongoOperationPublisher(executor), + Document.class, pipeline, ChangeStreamLevel.COLLECTION) + .batchSize(batchSize) + .comment(new BsonInt32(1)) + .withDocumentClass(BsonDocument.class); + + ChangeStreamOperation expectedOperation = + new ChangeStreamOperation<>(NAMESPACE, FullDocument.DEFAULT, FullDocumentBeforeChange.DEFAULT, pipeline, + getDefaultCodecRegistry().get(BsonDocument.class)) + .batchSize(batchSize) + .comment(new BsonInt32(1)) + .retryReads(true); + + // default input should be as expected + Flux.from(publisher).blockFirst(); + + assertEquals(ReadPreference.primary(), executor.getReadPreference()); + assertOperationIsTheSameAs(expectedOperation, executor.getReadOperation()); + } + + @DisplayName("Should handle error scenarios") + @Test + void shouldHandleErrorScenarios() { + List pipeline = singletonList(BsonDocument.parse("{'$match': 1}")); + TestOperationExecutor executor = createOperationExecutor(asList(new MongoException("Failure"), null, null)); + + // Operation fails + ChangeStreamPublisher publisher = new ChangeStreamPublisherImpl<>(null, createMongoOperationPublisher(executor), + Document.class, pipeline, ChangeStreamLevel.COLLECTION); + assertThrows(MongoException.class, () -> Flux.from(publisher).blockFirst()); + + // Missing Codec + assertThrows(CodecConfigurationException.class, () -> + new ChangeStreamPublisherImpl<>(null, createMongoOperationPublisher(executor) + .withCodecRegistry(BSON_CODEC_REGISTRY), Document.class, pipeline, ChangeStreamLevel.COLLECTION)); + + // Pipeline contains null + ChangeStreamPublisher publisherPipelineNull = + new ChangeStreamPublisherImpl<>(null, createMongoOperationPublisher(executor), Document.class, + singletonList(null), ChangeStreamLevel.COLLECTION); + assertThrows(IllegalArgumentException.class, () -> Flux.from(publisherPipelineNull).blockFirst()); + } +} diff --git a/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/ClientSessionBindingSpecification.groovy b/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/ClientSessionBindingSpecification.groovy new file mode 100644 index 00000000000..d6233342291 --- /dev/null +++ b/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/ClientSessionBindingSpecification.groovy @@ -0,0 +1,192 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.reactivestreams.client.internal + +import com.mongodb.ReadConcern +import com.mongodb.ReadPreference +import com.mongodb.ServerAddress +import com.mongodb.async.FutureResultCallback +import com.mongodb.connection.ServerConnectionState +import com.mongodb.connection.ServerDescription +import com.mongodb.connection.ServerType +import com.mongodb.internal.binding.AsyncClusterAwareReadWriteBinding +import com.mongodb.internal.binding.AsyncClusterBinding +import com.mongodb.internal.binding.AsyncConnectionSource +import com.mongodb.internal.connection.Cluster +import com.mongodb.internal.connection.Server +import com.mongodb.internal.connection.ServerTuple +import com.mongodb.internal.session.ClientSessionContext +import com.mongodb.reactivestreams.client.ClientSession +import spock.lang.Specification + +import static com.mongodb.ClusterFixture.OPERATION_CONTEXT + +class ClientSessionBindingSpecification extends Specification { + def 'should return the session context from the binding'() { + given: + def session = Stub(ClientSession) + def wrappedBinding = Stub(AsyncClusterAwareReadWriteBinding) { + getOperationContext() >> OPERATION_CONTEXT + } + def binding = new ClientSessionBinding(session, false, wrappedBinding) + + when: + def context = binding.getOperationContext().getSessionContext() + + then: + (context as ClientSessionContext).getClientSession() == session + } + + def 'should return the session context from the connection source'() { + given: + def session = Stub(ClientSession) + def wrappedBinding = Mock(AsyncClusterAwareReadWriteBinding) { + getOperationContext() >> OPERATION_CONTEXT + } + wrappedBinding.retain() >> wrappedBinding + def binding = new ClientSessionBinding(session, false, wrappedBinding) + + when: + def futureResultCallback = new FutureResultCallback() + binding.getReadConnectionSource(futureResultCallback) + + then: + 1 * wrappedBinding.getReadConnectionSource(_) >> { + it[0].onResult(Stub(AsyncConnectionSource), null) + } + + when: + def context = futureResultCallback.get().getOperationContext().getSessionContext() + + then: + (context as ClientSessionContext).getClientSession() == session + + when: + futureResultCallback = new FutureResultCallback() + binding.getWriteConnectionSource(futureResultCallback) + + then: + 1 * wrappedBinding.getWriteConnectionSource(_) >> { + it[0].onResult(Stub(AsyncConnectionSource), null) + } + + when: + context = futureResultCallback.get().getOperationContext().getSessionContext() + + then: + (context as ClientSessionContext).getClientSession() == session + } + + def 'should close client session when binding reference count drops to zero if it is owned by the binding'() { + given: + def session = Mock(ClientSession) + def wrappedBinding = createStubBinding() + def binding = new ClientSessionBinding(session, true, wrappedBinding) + binding.retain() + + when: + binding.release() + + then: + 0 * session.close() + + when: + binding.release() + + then: + 1 * session.close() + } + + def 'should close client session when binding reference count drops to zero due to connection source if it is owned by the binding'() { + given: + def session = Mock(ClientSession) + def wrappedBinding = createStubBinding() + def binding = new ClientSessionBinding(session, true, wrappedBinding) + def futureResultCallback = new FutureResultCallback() + binding.getReadConnectionSource(futureResultCallback) + def readConnectionSource = futureResultCallback.get() + futureResultCallback = new FutureResultCallback() + binding.getWriteConnectionSource(futureResultCallback) + def writeConnectionSource = futureResultCallback.get() + + when: + binding.release() + + then: + 0 * session.close() + + when: + writeConnectionSource.release() + + then: + 0 * session.close() + + when: + readConnectionSource.release() + + then: + 1 * session.close() + } + + def 'should not close client session when binding reference count drops to zero if it is not owned by the binding'() { + given: + def session = Mock(ClientSession) + def wrappedBinding = createStubBinding() + def binding = new ClientSessionBinding(session, false, wrappedBinding) + binding.retain() + + when: + binding.release() + + then: + 0 * session.close() + + when: + binding.release() + + then: + 0 * session.close() + } + + def 'owned session is implicit'() { + given: + def session = Mock(ClientSession) + def wrappedBinding = createStubBinding() + + when: + def binding = new ClientSessionBinding(session, ownsSession, wrappedBinding) + + then: + binding.getOperationContext().getSessionContext().isImplicitSession() == ownsSession + + where: + ownsSession << [true, false] + } + + private AsyncClusterAwareReadWriteBinding createStubBinding() { + def cluster = Mock(Cluster) { + selectServerAsync(_, _, _) >> { + it.last().onResult(new ServerTuple(Stub(Server), ServerDescription.builder() + .type(ServerType.STANDALONE) + .state(ServerConnectionState.CONNECTED) + .address(new ServerAddress()) + .build()), null) + } + } + new AsyncClusterBinding(cluster, ReadPreference.primary(), ReadConcern.DEFAULT, OPERATION_CONTEXT) + } +} diff --git a/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/DistinctPublisherImplTest.java b/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/DistinctPublisherImplTest.java new file mode 100644 index 00000000000..f9de792574c --- /dev/null +++ b/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/DistinctPublisherImplTest.java @@ -0,0 +1,91 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.reactivestreams.client.internal; + +import com.mongodb.MongoException; +import com.mongodb.ReadPreference; +import com.mongodb.internal.operation.DistinctOperation; +import com.mongodb.reactivestreams.client.DistinctPublisher; +import org.bson.BsonDocument; +import org.bson.Document; +import org.bson.codecs.configuration.CodecConfigurationException; +import org.junit.jupiter.api.DisplayName; +import org.junit.jupiter.api.Test; +import org.reactivestreams.Publisher; +import reactor.core.publisher.Flux; + +import static com.mongodb.reactivestreams.client.MongoClients.getDefaultCodecRegistry; +import static java.util.Arrays.asList; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; + +public class DistinctPublisherImplTest extends TestHelper { + + @DisplayName("Should build the expected DistinctOperation") + @Test + void shouldBuildTheExpectedOperation() { + String fieldName = "fieldName"; + TestOperationExecutor executor = createOperationExecutor(asList(getBatchCursor(), getBatchCursor())); + DistinctPublisher publisher = + new DistinctPublisherImpl<>(null, createMongoOperationPublisher(executor), fieldName, new Document()); + + DistinctOperation expectedOperation = new DistinctOperation<>(NAMESPACE, fieldName, + getDefaultCodecRegistry().get(Document.class)) + .retryReads(true).filter(new BsonDocument()); + + // default input should be as expected + Flux.from(publisher).blockFirst(); + + assertOperationIsTheSameAs(expectedOperation, executor.getReadOperation()); + assertEquals(ReadPreference.primary(), executor.getReadPreference()); + + // Should apply settings + BsonDocument filter = BsonDocument.parse("{a: 1}"); + publisher + .batchSize(100) + .collation(COLLATION) + .filter(filter); + + expectedOperation + .collation(COLLATION) + .filter(filter); + + configureBatchCursor(); + Flux.from(publisher).blockFirst(); + assertEquals(ReadPreference.primary(), executor.getReadPreference()); + assertOperationIsTheSameAs(expectedOperation, executor.getReadOperation()); + } + + + @DisplayName("Should handle error scenarios") + @Test + void shouldHandleErrorScenarios() { + TestOperationExecutor executor = createOperationExecutor(asList(new MongoException("Failure"), null)); + + // Operation fails + Publisher publisher = + new DistinctPublisherImpl<>(null, createMongoOperationPublisher(executor), "fieldName", new Document()); + assertThrows(MongoException.class, () -> Flux.from(publisher).blockFirst()); + + // Missing Codec + Publisher publisherMissingCodec = + new DistinctPublisherImpl<>(null, createMongoOperationPublisher(executor).withCodecRegistry(BSON_CODEC_REGISTRY), + "fieldName", new Document()); + assertThrows(CodecConfigurationException.class, () -> Flux.from(publisherMissingCodec).blockFirst()); + } + +} diff --git a/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/FindPublisherImplTest.java b/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/FindPublisherImplTest.java new file mode 100644 index 00000000000..eab28373f2a --- /dev/null +++ b/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/FindPublisherImplTest.java @@ -0,0 +1,112 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.reactivestreams.client.internal; + +import com.mongodb.CursorType; +import com.mongodb.MongoNamespace; +import com.mongodb.ReadPreference; +import com.mongodb.client.model.Collation; +import com.mongodb.client.model.Sorts; +import com.mongodb.internal.operation.FindOperation; +import com.mongodb.reactivestreams.client.FindPublisher; +import org.bson.BsonDocument; +import org.bson.BsonInt32; +import org.bson.BsonString; +import org.bson.Document; +import org.junit.jupiter.api.DisplayName; +import org.junit.jupiter.api.Test; +import reactor.core.publisher.Flux; + +import static com.mongodb.reactivestreams.client.MongoClients.getDefaultCodecRegistry; +import static java.util.Arrays.asList; +import static java.util.concurrent.TimeUnit.MILLISECONDS; +import static org.junit.jupiter.api.Assertions.assertEquals; + +public class FindPublisherImplTest extends TestHelper { + + private static final MongoNamespace NAMESPACE = new MongoNamespace("db", "coll"); + private static final Collation COLLATION = Collation.builder().locale("en").build(); + + @DisplayName("Should build the expected FindOperation") + @Test + void shouldBuildTheExpectedOperation() { + configureBatchCursor(); + + TestOperationExecutor executor = createOperationExecutor(asList(getBatchCursor(), getBatchCursor())); + FindPublisher publisher = new FindPublisherImpl<>(null, createMongoOperationPublisher(executor), new Document()); + + FindOperation expectedOperation = new FindOperation<>(NAMESPACE, + getDefaultCodecRegistry().get(Document.class)) + .batchSize(Integer.MAX_VALUE) + .retryReads(true) + .filter(new BsonDocument()); + + // default input should be as expected + Flux.from(publisher).blockFirst(); + + assertOperationIsTheSameAs(expectedOperation, executor.getReadOperation()); + assertEquals(ReadPreference.primary(), executor.getReadPreference()); + + // Should apply settings + publisher + .filter(new Document("filter", 1)) + .sort(Sorts.ascending("sort")) + .projection(new Document("projection", 1)) + .maxTime(101, MILLISECONDS) + .maxAwaitTime(1001, MILLISECONDS) + .batchSize(100) + .limit(100) + .skip(10) + .cursorType(CursorType.NonTailable) + .noCursorTimeout(false) + .partial(false) + .collation(COLLATION) + .comment("my comment") + .hintString("a_1") + .min(new Document("min", 1)) + .max(new Document("max", 1)) + .returnKey(false) + .showRecordId(false) + .allowDiskUse(false); + + expectedOperation = new FindOperation<>(NAMESPACE, + getDefaultCodecRegistry().get(Document.class)) + .retryReads(true) + .filter(new BsonDocument()) + .allowDiskUse(false) + .batchSize(100) + .collation(COLLATION) + .comment(new BsonString("my comment")) + .cursorType(CursorType.NonTailable) + .filter(new BsonDocument("filter", new BsonInt32(1))) + .hint(new BsonString("a_1")) + .limit(100) + .max(new BsonDocument("max", new BsonInt32(1))) + .min(new BsonDocument("min", new BsonInt32(1))) + .projection(new BsonDocument("projection", new BsonInt32(1))) + .returnKey(false) + .showRecordId(false) + .skip(10) + .sort(new BsonDocument("sort", new BsonInt32(1))); + + configureBatchCursor(); + Flux.from(publisher).blockFirst(); + assertOperationIsTheSameAs(expectedOperation, executor.getReadOperation()); + assertEquals(ReadPreference.primary(), executor.getReadPreference()); + } + +} diff --git a/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/ListCollectionNamesPublisherImplTest.java b/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/ListCollectionNamesPublisherImplTest.java new file mode 100644 index 00000000000..6613723b49d --- /dev/null +++ b/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/ListCollectionNamesPublisherImplTest.java @@ -0,0 +1,80 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.reactivestreams.client.internal; + +import com.mongodb.ReadPreference; +import com.mongodb.internal.operation.ListCollectionsOperation; +import com.mongodb.reactivestreams.client.ListCollectionNamesPublisher; +import org.bson.BsonDocument; +import org.bson.BsonInt32; +import org.bson.Document; +import org.junit.jupiter.api.DisplayName; +import org.junit.jupiter.api.Test; +import reactor.core.publisher.Flux; + +import static com.mongodb.reactivestreams.client.MongoClients.getDefaultCodecRegistry; +import static java.util.Arrays.asList; +import static java.util.concurrent.TimeUnit.SECONDS; +import static org.junit.jupiter.api.Assertions.assertEquals; + +final class ListCollectionNamesPublisherImplTest extends TestHelper { + + private static final String DATABASE_NAME = NAMESPACE.getDatabaseName(); + + @SuppressWarnings("deprecation") + @DisplayName("Should build the expected ListCollectionsOperation") + @Test + void shouldBuildTheExpectedOperation() { + TestOperationExecutor executor = createOperationExecutor(asList(getBatchCursor(), getBatchCursor())); + ListCollectionNamesPublisher publisher = new ListCollectionNamesPublisherImpl( + new ListCollectionsPublisherImpl<>(null, createMongoOperationPublisher(executor) + .withDocumentClass(Document.class), true)) + .authorizedCollections(true); + + ListCollectionsOperation expectedOperation = new ListCollectionsOperation<>(DATABASE_NAME, + getDefaultCodecRegistry().get(Document.class)) + .batchSize(Integer.MAX_VALUE) + .nameOnly(true) + .authorizedCollections(true) + .retryReads(true); + + // default input should be as expected + Flux.from(publisher).blockFirst(); + + assertOperationIsTheSameAs(expectedOperation, executor.getReadOperation()); + assertEquals(ReadPreference.primary(), executor.getReadPreference()); + + // Should apply settings + publisher + .filter(new Document("filter", 1)) + .maxTime(10, SECONDS) + .batchSize(100); + + expectedOperation = new ListCollectionsOperation<>(DATABASE_NAME, + getDefaultCodecRegistry().get(Document.class)) + .nameOnly(true) + .authorizedCollections(true) + .retryReads(true) + .filter(new BsonDocument("filter", new BsonInt32(1))) + .batchSize(100); + + Flux.from(publisher).blockFirst(); + assertOperationIsTheSameAs(expectedOperation, executor.getReadOperation()); + assertEquals(ReadPreference.primary(), executor.getReadPreference()); + } + +} diff --git a/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/ListCollectionsPublisherImplTest.java b/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/ListCollectionsPublisherImplTest.java new file mode 100644 index 00000000000..a632edbae82 --- /dev/null +++ b/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/ListCollectionsPublisherImplTest.java @@ -0,0 +1,74 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.reactivestreams.client.internal; + +import com.mongodb.ReadPreference; +import com.mongodb.internal.operation.ListCollectionsOperation; +import com.mongodb.reactivestreams.client.ListCollectionsPublisher; +import org.bson.BsonDocument; +import org.bson.BsonInt32; +import org.bson.Document; +import org.junit.jupiter.api.DisplayName; +import org.junit.jupiter.api.Test; +import reactor.core.publisher.Flux; + +import static com.mongodb.reactivestreams.client.MongoClients.getDefaultCodecRegistry; +import static java.util.Arrays.asList; +import static java.util.concurrent.TimeUnit.MILLISECONDS; +import static org.junit.jupiter.api.Assertions.assertEquals; + +public class ListCollectionsPublisherImplTest extends TestHelper { + + private static final String DATABASE_NAME = NAMESPACE.getDatabaseName(); + + @DisplayName("Should build the expected ListCollectionsOperation") + @Test + void shouldBuildTheExpectedOperation() { + TestOperationExecutor executor = createOperationExecutor(asList(getBatchCursor(), getBatchCursor())); + ListCollectionsPublisher publisher = new ListCollectionsPublisherImpl<>(null, createMongoOperationPublisher(executor) + .withDocumentClass(String.class), true); + + ListCollectionsOperation expectedOperation = new ListCollectionsOperation<>(DATABASE_NAME, + getDefaultCodecRegistry().get(String.class)) + .batchSize(Integer.MAX_VALUE) + .nameOnly(true).retryReads(true); + + // default input should be as expected + Flux.from(publisher).blockFirst(); + + assertOperationIsTheSameAs(expectedOperation, executor.getReadOperation()); + assertEquals(ReadPreference.primary(), executor.getReadPreference()); + + // Should apply settings + publisher + .filter(new Document("filter", 1)) + .maxTime(100, MILLISECONDS) + .batchSize(100); + + expectedOperation = new ListCollectionsOperation<>(DATABASE_NAME, + getDefaultCodecRegistry().get(String.class)) + .nameOnly(true) + .retryReads(true) + .filter(new BsonDocument("filter", new BsonInt32(1))) + .batchSize(100); + + Flux.from(publisher).blockFirst(); + assertOperationIsTheSameAs(expectedOperation, executor.getReadOperation()); + assertEquals(ReadPreference.primary(), executor.getReadPreference()); + } + +} diff --git a/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/ListDatabasesPublisherImplTest.java b/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/ListDatabasesPublisherImplTest.java new file mode 100644 index 00000000000..c19a56f14cc --- /dev/null +++ b/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/ListDatabasesPublisherImplTest.java @@ -0,0 +1,73 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.reactivestreams.client.internal; + +import com.mongodb.ReadPreference; +import com.mongodb.internal.operation.ListDatabasesOperation; +import com.mongodb.reactivestreams.client.ListDatabasesPublisher; +import org.bson.BsonDocument; +import org.bson.BsonInt32; +import org.bson.Document; +import org.junit.jupiter.api.DisplayName; +import org.junit.jupiter.api.Test; +import reactor.core.publisher.Flux; + +import static com.mongodb.reactivestreams.client.MongoClients.getDefaultCodecRegistry; +import static java.util.Arrays.asList; +import static java.util.concurrent.TimeUnit.MILLISECONDS; +import static org.junit.jupiter.api.Assertions.assertEquals; + +public class ListDatabasesPublisherImplTest extends TestHelper { + + @DisplayName("Should build the expected ListDatabasesOperation") + @Test + void shouldBuildTheExpectedOperation() { + configureBatchCursor(); + + TestOperationExecutor executor = createOperationExecutor(asList(getBatchCursor(), getBatchCursor())); + ListDatabasesPublisher publisher = new ListDatabasesPublisherImpl<>(null, createMongoOperationPublisher(executor)); + + ListDatabasesOperation expectedOperation = new ListDatabasesOperation<>( + getDefaultCodecRegistry().get(Document.class)) + .retryReads(true); + + // default input should be as expected + Flux.from(publisher).blockFirst(); + + assertOperationIsTheSameAs(expectedOperation, executor.getReadOperation()); + assertEquals(ReadPreference.primary(), executor.getReadPreference()); + + // Should apply settings + publisher + .authorizedDatabasesOnly(true) + .filter(new Document("filter", 1)) + .maxTime(100, MILLISECONDS) + .batchSize(100); + + expectedOperation = new ListDatabasesOperation<>( + getDefaultCodecRegistry().get(Document.class)) + .retryReads(true) + .authorizedDatabasesOnly(true) + .filter(new BsonDocument("filter", new BsonInt32(1))); + + configureBatchCursor(); + Flux.from(publisher).blockFirst(); + assertOperationIsTheSameAs(expectedOperation, executor.getReadOperation()); + assertEquals(ReadPreference.primary(), executor.getReadPreference()); + } + +} diff --git a/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/ListIndexesPublisherImplTest.java b/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/ListIndexesPublisherImplTest.java new file mode 100644 index 00000000000..5ae221b8a02 --- /dev/null +++ b/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/ListIndexesPublisherImplTest.java @@ -0,0 +1,71 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.reactivestreams.client.internal; + +import com.mongodb.MongoNamespace; +import com.mongodb.ReadPreference; +import com.mongodb.internal.operation.ListIndexesOperation; +import com.mongodb.reactivestreams.client.ListIndexesPublisher; +import org.bson.Document; +import org.junit.jupiter.api.DisplayName; +import org.junit.jupiter.api.Test; +import reactor.core.publisher.Flux; + +import static com.mongodb.reactivestreams.client.MongoClients.getDefaultCodecRegistry; +import static java.util.Arrays.asList; +import static java.util.concurrent.TimeUnit.MILLISECONDS; +import static org.junit.jupiter.api.Assertions.assertEquals; + +public class ListIndexesPublisherImplTest extends TestHelper { + + private static final MongoNamespace NAMESPACE = new MongoNamespace("db", "coll"); + + @DisplayName("Should build the expected ListIndexesOperation") + @Test + void shouldBuildTheExpectedOperation() { + configureBatchCursor(); + + TestOperationExecutor executor = createOperationExecutor(asList(getBatchCursor(), getBatchCursor())); + ListIndexesPublisher publisher = new ListIndexesPublisherImpl<>(null, createMongoOperationPublisher(executor)); + + ListIndexesOperation expectedOperation = + new ListIndexesOperation<>(NAMESPACE, getDefaultCodecRegistry().get(Document.class)) + .batchSize(Integer.MAX_VALUE) + .retryReads(true); + + // default input should be as expected + Flux.from(publisher).blockFirst(); + + assertOperationIsTheSameAs(expectedOperation, executor.getReadOperation()); + assertEquals(ReadPreference.primary(), executor.getReadPreference()); + + // Should apply settings + publisher.batchSize(100) + .maxTime(100, MILLISECONDS); + + expectedOperation = + new ListIndexesOperation<>(NAMESPACE, getDefaultCodecRegistry().get(Document.class)) + .batchSize(100) + .retryReads(true); + + configureBatchCursor(); + Flux.from(publisher).blockFirst(); + assertOperationIsTheSameAs(expectedOperation, executor.getReadOperation()); + assertEquals(ReadPreference.primary(), executor.getReadPreference()); + } + +} diff --git a/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/MapReducePublisherImplTest.java b/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/MapReducePublisherImplTest.java new file mode 100644 index 00000000000..c112395a818 --- /dev/null +++ b/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/MapReducePublisherImplTest.java @@ -0,0 +1,172 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.reactivestreams.client.internal; + +import com.mongodb.MongoException; +import com.mongodb.ReadPreference; +import com.mongodb.WriteConcern; +import com.mongodb.client.model.Sorts; +import com.mongodb.internal.operation.MapReduceStatistics; +import com.mongodb.internal.operation.MapReduceToCollectionOperation; +import com.mongodb.internal.operation.MapReduceWithInlineResultsOperation; +import org.bson.BsonDocument; +import org.bson.BsonInt32; +import org.bson.BsonJavaScript; +import org.bson.Document; +import org.bson.codecs.configuration.CodecConfigurationException; +import org.junit.jupiter.api.DisplayName; +import org.junit.jupiter.api.Test; +import org.mockito.Mockito; +import org.reactivestreams.Publisher; +import reactor.core.publisher.Flux; + +import static com.mongodb.reactivestreams.client.MongoClients.getDefaultCodecRegistry; +import static java.util.Arrays.asList; +import static java.util.concurrent.TimeUnit.MILLISECONDS; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertThrows; + +@SuppressWarnings({"rawtypes", "deprecation"}) +public class MapReducePublisherImplTest extends TestHelper { + + private static final String MAP_FUNCTION = "mapFunction(){}"; + private static final String REDUCE_FUNCTION = "reduceFunction(){}"; + private static final String FINALIZE_FUNCTION = "finalizeFunction(){}"; + + @DisplayName("Should build the expected MapReduceWithInlineResultsOperation") + @Test + void shouldBuildTheExpectedMapReduceWithInlineResultsOperation() { + configureBatchCursor(); + + TestOperationExecutor executor = createOperationExecutor(asList(getBatchCursor(), getBatchCursor())); + com.mongodb.reactivestreams.client.MapReducePublisher publisher = + new MapReducePublisherImpl<>(null, createMongoOperationPublisher(executor), MAP_FUNCTION, REDUCE_FUNCTION); + + MapReduceWithInlineResultsOperation expectedOperation = new MapReduceWithInlineResultsOperation<>( + NAMESPACE, new BsonJavaScript(MAP_FUNCTION), new BsonJavaScript(REDUCE_FUNCTION), + getDefaultCodecRegistry().get(Document.class)).verbose(true); + + // default input should be as expected + Flux.from(publisher).blockFirst(); + + MapReducePublisherImpl.WrappedMapReduceReadOperation operation = + (MapReducePublisherImpl.WrappedMapReduceReadOperation) executor.getReadOperation(); + assertNotNull(operation); + assertOperationIsTheSameAs(expectedOperation, operation.getOperation()); + assertEquals(ReadPreference.primary(), executor.getReadPreference()); + + // Should apply settings + publisher + .batchSize(100) + .bypassDocumentValidation(true) + .collation(COLLATION) + .filter(new Document("filter", 1)) + .finalizeFunction(FINALIZE_FUNCTION) + .limit(999) + .maxTime(100, MILLISECONDS) + .scope(new Document("scope", 1)) + .sort(Sorts.ascending("sort")) + .verbose(false); + + expectedOperation = new MapReduceWithInlineResultsOperation<>( + NAMESPACE, new BsonJavaScript(MAP_FUNCTION), new BsonJavaScript(REDUCE_FUNCTION), + getDefaultCodecRegistry().get(Document.class)) + .verbose(true) + .collation(COLLATION) + .filter(BsonDocument.parse("{filter: 1}")) + .finalizeFunction(new BsonJavaScript(FINALIZE_FUNCTION)) + .limit(999) + .scope(new BsonDocument("scope", new BsonInt32(1))) + .sort(new BsonDocument("sort", new BsonInt32(1))) + .verbose(false); + + configureBatchCursor(); + Flux.from(publisher).blockFirst(); + operation = (MapReducePublisherImpl.WrappedMapReduceReadOperation) executor.getReadOperation(); + assertNotNull(operation); + assertOperationIsTheSameAs(expectedOperation, operation.getOperation()); + assertEquals(ReadPreference.primary(), executor.getReadPreference()); + } + + @DisplayName("Should build the expected MapReduceToCollectionOperation") + @Test + void shouldBuildTheExpectedMapReduceToCollectionOperation() { + MapReduceStatistics stats = Mockito.mock(MapReduceStatistics.class); + + TestOperationExecutor executor = createOperationExecutor(asList(stats, stats)); + com.mongodb.reactivestreams.client.MapReducePublisher publisher = + new MapReducePublisherImpl<>(null, createMongoOperationPublisher(executor), MAP_FUNCTION, REDUCE_FUNCTION) + .collectionName(NAMESPACE.getCollectionName()); + + MapReduceToCollectionOperation expectedOperation = new MapReduceToCollectionOperation(NAMESPACE, + new BsonJavaScript(MAP_FUNCTION), new BsonJavaScript(REDUCE_FUNCTION), NAMESPACE.getCollectionName(), + WriteConcern.ACKNOWLEDGED).verbose(true); + + // default input should be as expected + Flux.from(publisher.toCollection()).blockFirst(); + assertOperationIsTheSameAs(expectedOperation, executor.getWriteOperation()); + + // Should apply settings + publisher + .batchSize(100) + .bypassDocumentValidation(true) + .collation(COLLATION) + .filter(new Document("filter", 1)) + .finalizeFunction(FINALIZE_FUNCTION) + .limit(999) + .maxTime(100, MILLISECONDS) + .scope(new Document("scope", 1)) + .sort(Sorts.ascending("sort")) + .verbose(false); + + expectedOperation = new MapReduceToCollectionOperation(NAMESPACE, new BsonJavaScript(MAP_FUNCTION), + new BsonJavaScript(REDUCE_FUNCTION), NAMESPACE.getCollectionName(), WriteConcern.ACKNOWLEDGED) + .verbose(true) + .collation(COLLATION) + .bypassDocumentValidation(true) + .filter(BsonDocument.parse("{filter: 1}")) + .finalizeFunction(new BsonJavaScript(FINALIZE_FUNCTION)) + .limit(999) + .scope(new BsonDocument("scope", new BsonInt32(1))) + .sort(new BsonDocument("sort", new BsonInt32(1))) + .verbose(false); + + Flux.from(publisher.toCollection()).blockFirst(); + assertOperationIsTheSameAs(expectedOperation, executor.getWriteOperation()); + } + + @DisplayName("Should handle error scenarios") + @Test + void shouldHandleErrorScenarios() { + TestOperationExecutor executor = createOperationExecutor(asList(new MongoException("Failure"), null, null)); + + // Operation fails + com.mongodb.reactivestreams.client.MapReducePublisher publisher = + new MapReducePublisherImpl<>(null, createMongoOperationPublisher(executor), MAP_FUNCTION, REDUCE_FUNCTION); + assertThrows(MongoException.class, () -> Flux.from(publisher).blockFirst()); + + // toCollection inline + assertThrows(IllegalStateException.class, publisher::toCollection); + + // Missing Codec + Publisher publisherMissingCodec = + new MapReducePublisherImpl<>(null, createMongoOperationPublisher(executor) + .withCodecRegistry(BSON_CODEC_REGISTRY), MAP_FUNCTION, REDUCE_FUNCTION); + assertThrows(CodecConfigurationException.class, () -> Flux.from(publisherMissingCodec).blockFirst()); + } +} diff --git a/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/MongoClientImplTest.java b/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/MongoClientImplTest.java new file mode 100644 index 00000000000..c192ae17896 --- /dev/null +++ b/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/MongoClientImplTest.java @@ -0,0 +1,213 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.reactivestreams.client.internal; + +import com.mongodb.ClientSessionOptions; +import com.mongodb.MongoClientSettings; +import com.mongodb.MongoDriverInformation; +import com.mongodb.ReadConcern; +import com.mongodb.TransactionOptions; +import com.mongodb.internal.client.model.changestream.ChangeStreamLevel; +import com.mongodb.internal.connection.ClientMetadata; +import com.mongodb.internal.connection.Cluster; +import com.mongodb.internal.mockito.MongoMockito; +import com.mongodb.internal.session.ServerSessionPool; +import com.mongodb.reactivestreams.client.ChangeStreamPublisher; +import com.mongodb.reactivestreams.client.ClientSession; +import com.mongodb.reactivestreams.client.ListDatabasesPublisher; +import org.bson.BsonDocument; +import org.bson.Document; +import org.bson.conversions.Bson; +import org.junit.jupiter.api.Test; +import org.mockito.Mock; +import reactor.core.publisher.Mono; + +import java.util.List; + +import static java.util.Collections.emptyList; +import static java.util.Collections.singletonList; +import static org.junit.jupiter.api.Assertions.assertAll; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + + +public class MongoClientImplTest extends TestHelper { + + @Mock + private ClientSession clientSession; + + private final MongoClientImpl mongoClient = createMongoClient(); + private final MongoOperationPublisher mongoOperationPublisher = mongoClient.getMongoOperationPublisher(); + + @Test + void testListDatabases() { + assertAll("listDatabases", + () -> assertAll("check validation", + () -> assertThrows(IllegalArgumentException.class, () -> mongoClient.listDatabases((Class) null)), + () -> assertThrows(IllegalArgumentException.class, () -> mongoClient.listDatabases((ClientSession) null)), + () -> assertThrows(IllegalArgumentException.class, + () -> mongoClient.listDatabases(clientSession, null))), + () -> { + ListDatabasesPublisher expected = + new ListDatabasesPublisherImpl<>(null, mongoOperationPublisher); + assertPublisherIsTheSameAs(expected, mongoClient.listDatabases(), "Default"); + }, + () -> { + ListDatabasesPublisher expected = + new ListDatabasesPublisherImpl<>(clientSession, mongoOperationPublisher); + assertPublisherIsTheSameAs(expected, mongoClient.listDatabases(clientSession), "With session"); + }, + () -> { + ListDatabasesPublisher expected = + new ListDatabasesPublisherImpl<>(null, mongoOperationPublisher + .withDocumentClass(BsonDocument.class)); + assertPublisherIsTheSameAs(expected, mongoClient.listDatabases(BsonDocument.class), "Alternative class"); + }, + () -> { + ListDatabasesPublisher expected = + new ListDatabasesPublisherImpl<>(clientSession, mongoOperationPublisher + .withDocumentClass(BsonDocument.class)); + assertPublisherIsTheSameAs(expected, mongoClient.listDatabases(clientSession, BsonDocument.class), + "Alternative class with session"); + } + ); + } + + @Test + void testListDatabaseNames() { + assertAll("listDatabaseNames", + () -> assertAll("check validation", + () -> assertThrows(IllegalArgumentException.class, () -> mongoClient.listDatabaseNames(null))), + () -> { + ListDatabasesPublisher expected = + new ListDatabasesPublisherImpl<>(null, mongoOperationPublisher).nameOnly(true); + + assertPublisherIsTheSameAs(expected, mongoClient.listDatabaseNames(), "Default"); + }, + () -> { + ListDatabasesPublisher expected = + new ListDatabasesPublisherImpl<>(clientSession, mongoOperationPublisher).nameOnly(true); + + assertPublisherIsTheSameAs(expected, mongoClient.listDatabaseNames(clientSession), "With session"); + } + ); + } + + @Test + void testWatch() { + List pipeline = singletonList(BsonDocument.parse("{$match: {open: true}}")); + assertAll("watch", + () -> assertAll("check validation", + () -> assertThrows(IllegalArgumentException.class, () -> mongoClient.watch((Class) null)), + () -> assertThrows(IllegalArgumentException.class, () -> mongoClient.watch((List) null)), + () -> assertThrows(IllegalArgumentException.class, () -> mongoClient.watch(pipeline, null)), + () -> assertThrows(IllegalArgumentException.class, () -> mongoClient.watch((ClientSession) null)), + () -> assertThrows(IllegalArgumentException.class, () -> mongoClient.watch(null, pipeline)), + () -> assertThrows(IllegalArgumentException.class, + () -> mongoClient.watch(null, pipeline, Document.class)) + ), + () -> { + ChangeStreamPublisher expected = + new ChangeStreamPublisherImpl<>(null, mongoOperationPublisher.withDatabase("admin"), + Document.class, emptyList(), ChangeStreamLevel.CLIENT); + assertPublisherIsTheSameAs(expected, mongoClient.watch(), "Default"); + }, + () -> { + ChangeStreamPublisher expected = + new ChangeStreamPublisherImpl<>(null, mongoOperationPublisher.withDatabase("admin"), + Document.class, pipeline, ChangeStreamLevel.CLIENT); + assertPublisherIsTheSameAs(expected, mongoClient.watch(pipeline), "With pipeline"); + }, + () -> { + ChangeStreamPublisher expected = + new ChangeStreamPublisherImpl<>(null, mongoOperationPublisher.withDatabase("admin"), + BsonDocument.class, emptyList(), ChangeStreamLevel.CLIENT); + assertPublisherIsTheSameAs(expected, mongoClient.watch(BsonDocument.class), + "With result class"); + }, + () -> { + ChangeStreamPublisher expected = + new ChangeStreamPublisherImpl<>(null, mongoOperationPublisher.withDatabase("admin"), + BsonDocument.class, pipeline, ChangeStreamLevel.CLIENT); + assertPublisherIsTheSameAs(expected, mongoClient.watch(pipeline, BsonDocument.class), + "With pipeline & result class"); + }, + () -> { + ChangeStreamPublisher expected = + new ChangeStreamPublisherImpl<>(clientSession, mongoOperationPublisher.withDatabase("admin"), + Document.class, emptyList(), ChangeStreamLevel.CLIENT); + assertPublisherIsTheSameAs(expected, mongoClient.watch(clientSession), "with session"); + }, + () -> { + ChangeStreamPublisher expected = + new ChangeStreamPublisherImpl<>(clientSession, mongoOperationPublisher.withDatabase("admin"), + Document.class, pipeline, ChangeStreamLevel.CLIENT); + assertPublisherIsTheSameAs(expected, mongoClient.watch(clientSession, pipeline), "With session & pipeline"); + }, + () -> { + ChangeStreamPublisher expected = + new ChangeStreamPublisherImpl<>(clientSession, mongoOperationPublisher.withDatabase("admin"), + BsonDocument.class, emptyList(), ChangeStreamLevel.CLIENT); + assertPublisherIsTheSameAs(expected, mongoClient.watch(clientSession, BsonDocument.class), + "With session & resultClass"); + }, + () -> { + ChangeStreamPublisher expected = + new ChangeStreamPublisherImpl<>(clientSession, mongoOperationPublisher.withDatabase("admin"), + BsonDocument.class, pipeline, ChangeStreamLevel.CLIENT); + assertPublisherIsTheSameAs(expected, mongoClient.watch(clientSession, pipeline, BsonDocument.class), + "With clientSession, pipeline & result class"); + } + ); + } + + @Test + void testStartSession() { + ServerSessionPool serverSessionPool = mock(ServerSessionPool.class); + ClientSessionHelper clientSessionHelper = new ClientSessionHelper(mongoClient, serverSessionPool); + + assertAll("Start Session Tests", + () -> assertAll("check validation", + () -> assertThrows(IllegalArgumentException.class, () -> mongoClient.startSession(null)) + ), + () -> { + Mono expected = clientSessionHelper + .createClientSessionMono(ClientSessionOptions.builder().build(), OPERATION_EXECUTOR); + assertPublisherIsTheSameAs(expected, mongoClient.startSession(), "Default"); + }, + () -> { + ClientSessionOptions options = ClientSessionOptions.builder() + .causallyConsistent(true) + .defaultTransactionOptions(TransactionOptions.builder().readConcern(ReadConcern.LINEARIZABLE).build()) + .build(); + Mono expected = + clientSessionHelper.createClientSessionMono(options, OPERATION_EXECUTOR); + assertPublisherIsTheSameAs(expected, mongoClient.startSession(options), "with options"); + }); + } + + private MongoClientImpl createMongoClient() { + MongoDriverInformation mongoDriverInformation = MongoDriverInformation.builder().driverName("reactive-streams").build(); + Cluster mock = MongoMockito.mock(Cluster.class, cluster -> { + when(cluster.getClientMetadata()) + .thenReturn(new ClientMetadata("test", mongoDriverInformation)); + }); + return new MongoClientImpl(MongoClientSettings.builder().build(), + mongoDriverInformation, mock, OPERATION_EXECUTOR); + } +} diff --git a/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/MongoClusterImplTest.java b/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/MongoClusterImplTest.java new file mode 100644 index 00000000000..b79d3a645d9 --- /dev/null +++ b/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/MongoClusterImplTest.java @@ -0,0 +1,237 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.reactivestreams.client.internal; + +import com.mongodb.ClientSessionOptions; +import com.mongodb.ReadConcern; +import com.mongodb.ReadPreference; +import com.mongodb.TransactionOptions; +import com.mongodb.WriteConcern; +import com.mongodb.internal.client.model.changestream.ChangeStreamLevel; +import com.mongodb.internal.connection.Cluster; +import com.mongodb.internal.session.ServerSessionPool; +import com.mongodb.reactivestreams.client.ChangeStreamPublisher; +import com.mongodb.reactivestreams.client.ClientSession; +import com.mongodb.reactivestreams.client.ListDatabasesPublisher; +import com.mongodb.reactivestreams.client.MongoCluster; +import org.bson.BsonDocument; +import org.bson.Document; +import org.bson.codecs.configuration.CodecRegistries; +import org.bson.codecs.configuration.CodecRegistry; +import org.bson.conversions.Bson; +import org.junit.jupiter.api.Test; +import org.mockito.Mock; +import reactor.core.publisher.Mono; + +import java.util.List; +import java.util.concurrent.TimeUnit; + +import static java.util.Collections.emptyList; +import static java.util.Collections.singletonList; +import static org.junit.jupiter.api.Assertions.assertAll; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.Mockito.mock; + + +public class MongoClusterImplTest extends TestHelper { + + @Mock + private ClientSession clientSession; + + private final MongoClusterImpl mongoCluster = createMongoCluster(); + private final MongoOperationPublisher mongoOperationPublisher = mongoCluster.getMongoOperationPublisher(); + + @Test + public void withCodecRegistry() { + // Cannot do equality test as registries are wrapped + CodecRegistry codecRegistry = CodecRegistries.fromCodecs(new MyLongCodec()); + MongoCluster newMongoCluster = mongoCluster.withCodecRegistry(codecRegistry); + assertTrue(newMongoCluster.getCodecRegistry().get(Long.class) instanceof TestHelper.MyLongCodec); + } + + @Test + public void withReadConcern() { + assertEquals(ReadConcern.AVAILABLE, mongoCluster.withReadConcern(ReadConcern.AVAILABLE).getReadConcern()); + } + + @Test + public void withReadPreference() { + assertEquals(ReadPreference.secondaryPreferred(), mongoCluster.withReadPreference(ReadPreference.secondaryPreferred()) + .getReadPreference()); + } + + @Test + public void withTimeout() { + assertEquals(1000, mongoCluster.withTimeout(1000, TimeUnit.MILLISECONDS).getTimeout(TimeUnit.MILLISECONDS)); + } + + @Test + public void withWriteConcern() { + assertEquals(WriteConcern.MAJORITY, mongoCluster.withWriteConcern(WriteConcern.MAJORITY).getWriteConcern()); + } + + @Test + void testListDatabases() { + assertAll("listDatabases", + () -> assertAll("check validation", + () -> assertThrows(IllegalArgumentException.class, () -> mongoCluster.listDatabases((Class) null)), + () -> assertThrows(IllegalArgumentException.class, () -> mongoCluster.listDatabases((ClientSession) null)), + () -> assertThrows(IllegalArgumentException.class, + () -> mongoCluster.listDatabases(clientSession, null))), + () -> { + ListDatabasesPublisher expected = + new ListDatabasesPublisherImpl<>(null, mongoOperationPublisher); + assertPublisherIsTheSameAs(expected, mongoCluster.listDatabases(), "Default"); + }, + () -> { + ListDatabasesPublisher expected = + new ListDatabasesPublisherImpl<>(clientSession, mongoOperationPublisher); + assertPublisherIsTheSameAs(expected, mongoCluster.listDatabases(clientSession), "With session"); + }, + () -> { + ListDatabasesPublisher expected = + new ListDatabasesPublisherImpl<>(null, mongoOperationPublisher + .withDocumentClass(BsonDocument.class)); + assertPublisherIsTheSameAs(expected, mongoCluster.listDatabases(BsonDocument.class), "Alternative class"); + }, + () -> { + ListDatabasesPublisher expected = + new ListDatabasesPublisherImpl<>(clientSession, mongoOperationPublisher + .withDocumentClass(BsonDocument.class)); + assertPublisherIsTheSameAs(expected, mongoCluster.listDatabases(clientSession, BsonDocument.class), + "Alternative class with session"); + } + ); + } + + @Test + void testListDatabaseNames() { + assertAll("listDatabaseNames", + () -> assertAll("check validation", + () -> assertThrows(IllegalArgumentException.class, () -> mongoCluster.listDatabaseNames(null))), + () -> { + ListDatabasesPublisher expected = + new ListDatabasesPublisherImpl<>(null, mongoOperationPublisher).nameOnly(true); + + assertPublisherIsTheSameAs(expected, mongoCluster.listDatabaseNames(), "Default"); + }, + () -> { + ListDatabasesPublisher expected = + new ListDatabasesPublisherImpl<>(clientSession, mongoOperationPublisher).nameOnly(true); + + assertPublisherIsTheSameAs(expected, mongoCluster.listDatabaseNames(clientSession), "With session"); + } + ); + } + + @Test + void testWatch() { + List pipeline = singletonList(BsonDocument.parse("{$match: {open: true}}")); + assertAll("watch", + () -> assertAll("check validation", + () -> assertThrows(IllegalArgumentException.class, () -> mongoCluster.watch((Class) null)), + () -> assertThrows(IllegalArgumentException.class, () -> mongoCluster.watch((List) null)), + () -> assertThrows(IllegalArgumentException.class, () -> mongoCluster.watch(pipeline, null)), + () -> assertThrows(IllegalArgumentException.class, () -> mongoCluster.watch((ClientSession) null)), + () -> assertThrows(IllegalArgumentException.class, () -> mongoCluster.watch(null, pipeline)), + () -> assertThrows(IllegalArgumentException.class, + () -> mongoCluster.watch(null, pipeline, Document.class)) + ), + () -> { + ChangeStreamPublisher expected = + new ChangeStreamPublisherImpl<>(null, mongoOperationPublisher.withDatabase("admin"), + Document.class, emptyList(), ChangeStreamLevel.CLIENT); + assertPublisherIsTheSameAs(expected, mongoCluster.watch(), "Default"); + }, + () -> { + ChangeStreamPublisher expected = + new ChangeStreamPublisherImpl<>(null, mongoOperationPublisher.withDatabase("admin"), + Document.class, pipeline, ChangeStreamLevel.CLIENT); + assertPublisherIsTheSameAs(expected, mongoCluster.watch(pipeline), "With pipeline"); + }, + () -> { + ChangeStreamPublisher expected = + new ChangeStreamPublisherImpl<>(null, mongoOperationPublisher.withDatabase("admin"), + BsonDocument.class, emptyList(), ChangeStreamLevel.CLIENT); + assertPublisherIsTheSameAs(expected, mongoCluster.watch(BsonDocument.class), + "With result class"); + }, + () -> { + ChangeStreamPublisher expected = + new ChangeStreamPublisherImpl<>(null, mongoOperationPublisher.withDatabase("admin"), + BsonDocument.class, pipeline, ChangeStreamLevel.CLIENT); + assertPublisherIsTheSameAs(expected, mongoCluster.watch(pipeline, BsonDocument.class), + "With pipeline & result class"); + }, + () -> { + ChangeStreamPublisher expected = + new ChangeStreamPublisherImpl<>(clientSession, mongoOperationPublisher.withDatabase("admin"), + Document.class, emptyList(), ChangeStreamLevel.CLIENT); + assertPublisherIsTheSameAs(expected, mongoCluster.watch(clientSession), "with session"); + }, + () -> { + ChangeStreamPublisher expected = + new ChangeStreamPublisherImpl<>(clientSession, mongoOperationPublisher.withDatabase("admin"), + Document.class, pipeline, ChangeStreamLevel.CLIENT); + assertPublisherIsTheSameAs(expected, mongoCluster.watch(clientSession, pipeline), "With session & pipeline"); + }, + () -> { + ChangeStreamPublisher expected = + new ChangeStreamPublisherImpl<>(clientSession, mongoOperationPublisher.withDatabase("admin"), + BsonDocument.class, emptyList(), ChangeStreamLevel.CLIENT); + assertPublisherIsTheSameAs(expected, mongoCluster.watch(clientSession, BsonDocument.class), + "With session & resultClass"); + }, + () -> { + ChangeStreamPublisher expected = + new ChangeStreamPublisherImpl<>(clientSession, mongoOperationPublisher.withDatabase("admin"), + BsonDocument.class, pipeline, ChangeStreamLevel.CLIENT); + assertPublisherIsTheSameAs(expected, mongoCluster.watch(clientSession, pipeline, BsonDocument.class), + "With clientSession, pipeline & result class"); + } + ); + } + + @Test + void testStartSession() { + MongoClusterImpl mongoCluster = createMongoCluster(); + + // Validation + assertThrows(IllegalArgumentException.class, () -> mongoCluster.startSession(null)); + + // Default + Mono expected = mongoCluster.getClientSessionHelper() + .createClientSessionMono(ClientSessionOptions.builder().build(), OPERATION_EXECUTOR); + assertPublisherIsTheSameAs(expected, mongoCluster.startSession(), "Default"); + + // with options + ClientSessionOptions options = ClientSessionOptions.builder() + .causallyConsistent(true) + .defaultTransactionOptions(TransactionOptions.builder().readConcern(ReadConcern.LINEARIZABLE).build()) + .build(); + expected = mongoCluster.getClientSessionHelper().createClientSessionMono(options, OPERATION_EXECUTOR); + assertPublisherIsTheSameAs(expected, mongoCluster.startSession(options), "with options"); + + } + + private MongoClusterImpl createMongoCluster() { + return new MongoClusterImpl(mock(Cluster.class), null, OPERATION_EXECUTOR, mock(ServerSessionPool.class), + mock(ClientSessionHelper.class), OPERATION_PUBLISHER); + } +} diff --git a/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/MongoCollectionImplTest.java b/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/MongoCollectionImplTest.java new file mode 100644 index 00000000000..97b7bbf0d78 --- /dev/null +++ b/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/MongoCollectionImplTest.java @@ -0,0 +1,1143 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.reactivestreams.client.internal; + +import com.mongodb.CreateIndexCommitQuorum; +import com.mongodb.MongoNamespace; +import com.mongodb.ReadConcern; +import com.mongodb.ReadPreference; +import com.mongodb.WriteConcern; +import com.mongodb.bulk.BulkWriteResult; +import com.mongodb.client.model.BulkWriteOptions; +import com.mongodb.client.model.Collation; +import com.mongodb.client.model.CountOptions; +import com.mongodb.client.model.CreateIndexOptions; +import com.mongodb.client.model.DeleteOptions; +import com.mongodb.client.model.DropCollectionOptions; +import com.mongodb.client.model.DropIndexOptions; +import com.mongodb.client.model.EstimatedDocumentCountOptions; +import com.mongodb.client.model.FindOneAndDeleteOptions; +import com.mongodb.client.model.FindOneAndReplaceOptions; +import com.mongodb.client.model.FindOneAndUpdateOptions; +import com.mongodb.client.model.IndexModel; +import com.mongodb.client.model.IndexOptions; +import com.mongodb.client.model.Indexes; +import com.mongodb.client.model.InsertManyOptions; +import com.mongodb.client.model.InsertOneModel; +import com.mongodb.client.model.InsertOneOptions; +import com.mongodb.client.model.RenameCollectionOptions; +import com.mongodb.client.model.ReplaceOptions; +import com.mongodb.client.model.UpdateOptions; +import com.mongodb.client.model.WriteModel; +import com.mongodb.client.result.DeleteResult; +import com.mongodb.client.result.InsertManyResult; +import com.mongodb.client.result.InsertOneResult; +import com.mongodb.client.result.UpdateResult; +import com.mongodb.internal.client.model.AggregationLevel; +import com.mongodb.internal.client.model.changestream.ChangeStreamLevel; +import com.mongodb.reactivestreams.client.AggregatePublisher; +import com.mongodb.reactivestreams.client.ChangeStreamPublisher; +import com.mongodb.reactivestreams.client.ClientSession; +import com.mongodb.reactivestreams.client.DistinctPublisher; +import com.mongodb.reactivestreams.client.FindPublisher; +import com.mongodb.reactivestreams.client.ListIndexesPublisher; +import com.mongodb.reactivestreams.client.MongoCollection; +import org.bson.BsonDocument; +import org.bson.Document; +import org.bson.codecs.configuration.CodecRegistries; +import org.bson.codecs.configuration.CodecRegistry; +import org.bson.conversions.Bson; +import org.junit.jupiter.api.Test; +import org.mockito.Mock; +import org.reactivestreams.Publisher; + +import java.util.List; +import java.util.concurrent.TimeUnit; + +import static java.util.Collections.emptyList; +import static java.util.Collections.singletonList; +import static org.junit.jupiter.api.Assertions.assertAll; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; + + +public class MongoCollectionImplTest extends TestHelper { + @Mock + private ClientSession clientSession; + + private final MongoCollectionImpl collection = + new MongoCollectionImpl<>(OPERATION_PUBLISHER.withNamespace(new MongoNamespace("db.coll"))); + private final MongoOperationPublisher mongoOperationPublisher = collection.getPublisherHelper(); + + private final Bson filter = BsonDocument.parse("{$match: {open: true}}"); + private final List pipeline = singletonList(filter); + private final Collation collation = Collation.builder().locale("de").build(); + + @Test + public void withDocumentClass() { + assertEquals(BsonDocument.class, collection.withDocumentClass(BsonDocument.class).getDocumentClass()); + } + + @Test + public void withCodecRegistry() { + // Cannot do equality test as registries are wrapped + CodecRegistry codecRegistry = CodecRegistries.fromCodecs(new MyLongCodec()); + MongoCollection newCollection = collection.withCodecRegistry(codecRegistry); + assertTrue(newCollection.getCodecRegistry().get(Long.class) instanceof TestHelper.MyLongCodec); + } + + @Test + public void withReadConcern() { + assertEquals(ReadConcern.AVAILABLE, collection.withReadConcern(ReadConcern.AVAILABLE).getReadConcern()); + } + + @Test + public void withReadPreference() { + assertEquals(ReadPreference.secondaryPreferred(), collection.withReadPreference(ReadPreference.secondaryPreferred()) + .getReadPreference()); + } + + @Test + public void withTimeout() { + assertEquals(1000, collection.withTimeout(1000, TimeUnit.MILLISECONDS).getTimeout(TimeUnit.MILLISECONDS)); + } + + @Test + public void withWriteConcern() { + assertEquals(WriteConcern.MAJORITY, collection.withWriteConcern(WriteConcern.MAJORITY).getWriteConcern()); + } + + @Test + void testAggregate() { + assertAll("Aggregate tests", + () -> assertAll("check validation", + () -> assertThrows(IllegalArgumentException.class, () -> collection.aggregate(null)), + () -> assertThrows(IllegalArgumentException.class, () -> collection.aggregate(clientSession, null)) + ), + () -> { + AggregatePublisher expected = + new AggregatePublisherImpl<>(null, mongoOperationPublisher, pipeline, AggregationLevel.COLLECTION); + assertPublisherIsTheSameAs(expected, collection.aggregate(pipeline), "Default"); + }, + () -> { + AggregatePublisher expected = + new AggregatePublisherImpl<>(null, mongoOperationPublisher.withDocumentClass(BsonDocument.class), + pipeline, AggregationLevel.COLLECTION); + assertPublisherIsTheSameAs(expected, collection.aggregate(pipeline, BsonDocument.class), + "With result class"); + }, + () -> { + AggregatePublisher expected = + new AggregatePublisherImpl<>(clientSession, mongoOperationPublisher, pipeline, AggregationLevel.COLLECTION); + assertPublisherIsTheSameAs(expected, collection.aggregate(clientSession, pipeline), "With session"); + }, + () -> { + AggregatePublisher expected = + new AggregatePublisherImpl<>(clientSession, mongoOperationPublisher.withDocumentClass(BsonDocument.class), + pipeline, AggregationLevel.COLLECTION); + assertPublisherIsTheSameAs(expected, collection.aggregate(clientSession, pipeline, BsonDocument.class), + "With session & result class"); + } + ); + } + + @Test + public void testBulkWrite() { + List> requests = singletonList(new InsertOneModel<>(new Document())); + BulkWriteOptions options = new BulkWriteOptions().ordered(false); + + assertAll("bulkWrite", + () -> assertAll("check validation", + () -> assertThrows(IllegalArgumentException.class, + () -> collection.bulkWrite(null)), + () -> assertThrows(IllegalArgumentException.class, + () -> collection.bulkWrite(requests, null)), + () -> assertThrows(IllegalArgumentException.class, + () -> collection.bulkWrite(clientSession, null)), + () -> assertThrows(IllegalArgumentException.class, + () -> collection.bulkWrite(clientSession, requests, null)), + () -> assertThrows(IllegalArgumentException.class, + () -> collection.bulkWrite(null, requests)), + () -> assertThrows(IllegalArgumentException.class, + () -> collection.bulkWrite(null, requests, options)) + ), + () -> { + Publisher expected = mongoOperationPublisher.bulkWrite(null, requests, new BulkWriteOptions()); + assertPublisherIsTheSameAs(expected, collection.bulkWrite(requests), "Default"); + }, + () -> { + Publisher expected = mongoOperationPublisher.bulkWrite(null, requests, options); + assertPublisherIsTheSameAs(expected, collection.bulkWrite(requests, options), "With options"); + }, + () -> { + Publisher expected = + mongoOperationPublisher.bulkWrite(clientSession, requests, new BulkWriteOptions()); + assertPublisherIsTheSameAs(expected, collection.bulkWrite(clientSession, requests), "With client session"); + }, + () -> { + Publisher expected = mongoOperationPublisher.bulkWrite(clientSession, requests, options); + assertPublisherIsTheSameAs(expected, collection.bulkWrite(clientSession, requests, options), + "With client session & options"); + } + ); + } + + @Test + public void testCountDocuments() { + CountOptions options = new CountOptions().collation(Collation.builder().locale("de").build()); + + assertAll("countDocuments", + () -> assertAll("check validation", + () -> assertThrows(IllegalArgumentException.class, + () -> collection.countDocuments((Bson) null)), + () -> assertThrows(IllegalArgumentException.class, + () -> collection.countDocuments(filter, null)), + () -> assertThrows(IllegalArgumentException.class, + () -> collection.countDocuments((ClientSession) null)), + () -> assertThrows(IllegalArgumentException.class, + () -> collection.countDocuments(clientSession, null)), + () -> assertThrows(IllegalArgumentException.class, + () -> collection.countDocuments(clientSession, filter, null)), + () -> assertThrows(IllegalArgumentException.class, + () -> collection.countDocuments(null, filter)), + () -> assertThrows(IllegalArgumentException.class, + () -> collection.countDocuments(null, filter, options)) + ), + () -> { + Publisher expected = mongoOperationPublisher.countDocuments(null, new BsonDocument(), new CountOptions()); + assertPublisherIsTheSameAs(expected, collection.countDocuments(), "Default"); + }, + () -> { + Publisher expected = mongoOperationPublisher.countDocuments(null, filter, new CountOptions()); + assertPublisherIsTheSameAs(expected, collection.countDocuments(filter), "With filter"); + }, + () -> { + Publisher expected = mongoOperationPublisher.countDocuments(null, filter, options); + assertPublisherIsTheSameAs(expected, collection.countDocuments(filter, options), "With filter & options"); + }, + () -> { + Publisher expected = + mongoOperationPublisher.countDocuments(clientSession, new BsonDocument(), new CountOptions()); + assertPublisherIsTheSameAs(expected, collection.countDocuments(clientSession), "With client session"); + }, + () -> { + Publisher expected = mongoOperationPublisher.countDocuments(clientSession, filter, new CountOptions()); + assertPublisherIsTheSameAs(expected, collection.countDocuments(clientSession, filter), + "With client session & filter"); + }, + () -> { + Publisher expected = mongoOperationPublisher.countDocuments(clientSession, filter, options); + assertPublisherIsTheSameAs(expected, collection.countDocuments(clientSession, filter, options), + "With client session, filter & options"); + } + ); + } + + @Test + public void testCreateIndex() { + Bson key = BsonDocument.parse("{key: 1}"); + IndexOptions indexOptions = new IndexOptions(); + IndexOptions customOptions = new IndexOptions().background(true).bits(9); + + + assertAll("createIndex", + () -> assertAll("check validation", + () -> assertThrows(IllegalArgumentException.class, + () -> collection.createIndex(null)), + () -> assertThrows(IllegalArgumentException.class, + () -> collection.createIndex(key, null)), + () -> assertThrows(IllegalArgumentException.class, + () -> collection.createIndex(clientSession, null)), + () -> assertThrows(IllegalArgumentException.class, + () -> collection.createIndex(clientSession, key, null)), + () -> assertThrows(IllegalArgumentException.class, + () -> collection.createIndex(null, key)), + () -> assertThrows(IllegalArgumentException.class, + () -> collection.createIndex(null, key, indexOptions)) + ), + () -> { + Publisher expected = mongoOperationPublisher.createIndex(null, key, new IndexOptions()); + assertPublisherIsTheSameAs(expected, collection.createIndex(key), "Default"); + }, + () -> { + Publisher expected = mongoOperationPublisher.createIndex(null, key, customOptions); + assertPublisherIsTheSameAs(expected, collection.createIndex(key, customOptions), "With custom options"); + }, + () -> { + Publisher expected = mongoOperationPublisher.createIndex(clientSession, key, new IndexOptions()); + assertPublisherIsTheSameAs(expected, collection.createIndex(clientSession, key), "With client session"); + }, + () -> { + Publisher expected = mongoOperationPublisher.createIndex(clientSession, key, customOptions); + assertPublisherIsTheSameAs(expected, collection.createIndex(clientSession, key, customOptions), + "With client session, & custom options"); + } + ); + } + + @Test + public void testCreateIndexes() { + Bson key = BsonDocument.parse("{key: 1}"); + CreateIndexOptions createIndexOptions = new CreateIndexOptions(); + CreateIndexOptions customCreateIndexOptions = new CreateIndexOptions().commitQuorum(CreateIndexCommitQuorum.VOTING_MEMBERS); + List indexes = singletonList(new IndexModel(key, new IndexOptions().background(true).bits(9))); + assertAll("createIndexes", + () -> assertAll("check validation", + () -> assertThrows(IllegalArgumentException.class, + () -> collection.createIndexes(null)), + () -> assertThrows(IllegalArgumentException.class, + () -> collection.createIndexes(indexes, null)), + () -> assertThrows(IllegalArgumentException.class, + () -> collection.createIndexes(clientSession, null)), + () -> assertThrows(IllegalArgumentException.class, + () -> collection.createIndexes(clientSession, indexes, null)), + () -> assertThrows(IllegalArgumentException.class, + () -> collection.createIndexes(null, indexes)), + () -> assertThrows(IllegalArgumentException.class, + () -> collection.createIndexes(null, indexes, createIndexOptions)) + ), + () -> { + Publisher expected = mongoOperationPublisher.createIndexes(null, indexes, createIndexOptions); + assertPublisherIsTheSameAs(expected, collection.createIndexes(indexes), "Default"); + }, + () -> { + Publisher expected = mongoOperationPublisher.createIndexes(null, indexes, customCreateIndexOptions); + assertPublisherIsTheSameAs(expected, collection.createIndexes(indexes, customCreateIndexOptions), + "With custom options"); + }, + () -> { + Publisher expected = + mongoOperationPublisher.createIndexes(clientSession, indexes, createIndexOptions); + assertPublisherIsTheSameAs(expected, collection.createIndexes(clientSession, indexes), "With client session"); + }, + () -> { + Publisher expected = + mongoOperationPublisher.createIndexes(clientSession, indexes, customCreateIndexOptions); + assertPublisherIsTheSameAs(expected, collection.createIndexes(clientSession, indexes, customCreateIndexOptions), + "With client session, & custom options"); + } + ); + } + + @Test + public void testDeleteOne() { + DeleteOptions customOptions = new DeleteOptions().collation(collation); + assertAll("deleteOne", + () -> assertAll("check validation", + () -> assertThrows(IllegalArgumentException.class, () -> collection.deleteOne(null)), + () -> assertThrows(IllegalArgumentException.class, () -> collection.deleteOne(filter, null)), + () -> assertThrows(IllegalArgumentException.class, () -> collection.deleteOne(clientSession, null)), + () -> assertThrows(IllegalArgumentException.class, + () -> collection.deleteOne(clientSession, filter, null)), + () -> assertThrows(IllegalArgumentException.class, () -> collection.deleteOne(null, filter)), + () -> assertThrows(IllegalArgumentException.class, + () -> collection.deleteOne(clientSession, filter, null)) + ), + () -> { + Publisher expected = mongoOperationPublisher.deleteOne(null, filter, new DeleteOptions()); + assertPublisherIsTheSameAs(expected, collection.deleteOne(filter), "Default"); + }, + () -> { + Publisher expected = mongoOperationPublisher.deleteOne(null, filter, customOptions); + assertPublisherIsTheSameAs(expected, collection.deleteOne(filter, customOptions), "With options"); + }, + () -> { + Publisher expected = mongoOperationPublisher.deleteOne(clientSession, filter, new DeleteOptions()); + assertPublisherIsTheSameAs(expected, collection.deleteOne(clientSession, filter), "With client session"); + }, + () -> { + Publisher expected = mongoOperationPublisher.deleteOne(clientSession, filter, customOptions); + assertPublisherIsTheSameAs(expected, collection.deleteOne(clientSession, filter, customOptions), + "With client session & options"); + } + ); + } + + @Test + public void testDeleteMany() { + DeleteOptions customOptions = new DeleteOptions().collation(collation); + assertAll("deleteMany", + () -> assertAll("check validation", + () -> assertThrows(IllegalArgumentException.class, () -> collection.deleteMany(null)), + () -> assertThrows(IllegalArgumentException.class, () -> collection.deleteMany(filter, null)), + () -> assertThrows(IllegalArgumentException.class, () -> collection.deleteMany(clientSession, null)), + () -> assertThrows(IllegalArgumentException.class, + () -> collection.deleteMany(clientSession, filter, null)), + () -> assertThrows(IllegalArgumentException.class, () -> collection.deleteMany(null, filter)), + () -> assertThrows(IllegalArgumentException.class, + () -> collection.deleteMany(clientSession, filter, null)) + ), + + () -> { + Publisher expected = mongoOperationPublisher.deleteMany(null, filter, new DeleteOptions()); + assertPublisherIsTheSameAs(expected, collection.deleteMany(filter), "Default"); + }, + () -> { + Publisher expected = mongoOperationPublisher.deleteMany(null, filter, customOptions); + assertPublisherIsTheSameAs(expected, collection.deleteMany(filter, customOptions), "With options"); + }, + () -> { + Publisher expected = mongoOperationPublisher.deleteMany(clientSession, filter, new DeleteOptions()); + assertPublisherIsTheSameAs(expected, collection.deleteMany(clientSession, filter), "With client session"); + }, + () -> { + Publisher expected = mongoOperationPublisher.deleteMany(clientSession, filter, customOptions); + assertPublisherIsTheSameAs(expected, collection.deleteMany(clientSession, filter, customOptions), + "With client session & options"); + } + ); + } + + @Test + public void testDistinct() { + String fieldName = "fieldName"; + assertAll("distinct", + () -> assertAll("check validation", + () -> assertThrows(IllegalArgumentException.class, + () -> collection.distinct(null, Document.class)), + () -> assertThrows(IllegalArgumentException.class, + () -> collection.distinct(fieldName, null)), + () -> assertThrows(IllegalArgumentException.class, + () -> collection.distinct(fieldName, null, Document.class)), + () -> assertThrows(IllegalArgumentException.class, + () -> collection.distinct(clientSession, null, Document.class)), + () -> assertThrows(IllegalArgumentException.class, + () -> collection.distinct(clientSession, fieldName, null)), + () -> assertThrows(IllegalArgumentException.class, + () -> collection.distinct(clientSession, fieldName, null, Document.class)), + () -> assertThrows(IllegalArgumentException.class, + () -> collection.distinct(null, fieldName, Document.class)), + () -> assertThrows(IllegalArgumentException.class, + () -> collection.distinct(null, fieldName, filter, Document.class)) + ), + () -> { + DistinctPublisher expected = + new DistinctPublisherImpl<>(null, mongoOperationPublisher, fieldName, new BsonDocument()); + assertPublisherIsTheSameAs(expected, collection.distinct(fieldName, Document.class), "Default"); + }, + () -> { + DistinctPublisher expected = + new DistinctPublisherImpl<>(null, mongoOperationPublisher.withDocumentClass(BsonDocument.class), + fieldName, filter); + assertPublisherIsTheSameAs(expected, collection.distinct(fieldName, filter, BsonDocument.class), + "With filter & result class"); + }, + () -> { + DistinctPublisher expected = + new DistinctPublisherImpl<>(clientSession, mongoOperationPublisher, fieldName, new BsonDocument()); + assertPublisherIsTheSameAs(expected, collection.distinct(fieldName, Document.class), "With client session"); + }, + () -> { + DistinctPublisher expected = + new DistinctPublisherImpl<>(clientSession, mongoOperationPublisher.withDocumentClass(BsonDocument.class), + fieldName, filter); + assertPublisherIsTheSameAs(expected, collection.distinct(fieldName, filter, BsonDocument.class), + "With client session, filter & result class"); + } + ); + } + + @Test + public void testDrop() { + DropCollectionOptions dropCollectionOptions = new DropCollectionOptions(); + assertAll("drop", + () -> assertAll("check validation", + () -> assertThrows(IllegalArgumentException.class, () -> collection.drop(null, null)) + ), + () -> { + Publisher expected = mongoOperationPublisher.dropCollection(null, dropCollectionOptions); + assertPublisherIsTheSameAs(expected, collection.drop(), "Default"); + }, + () -> { + Publisher expected = mongoOperationPublisher.dropCollection(clientSession, dropCollectionOptions); + assertPublisherIsTheSameAs(expected, collection.drop(clientSession), "With client session"); + }, + () -> { + Publisher expected = mongoOperationPublisher.dropCollection(null, dropCollectionOptions); + assertPublisherIsTheSameAs(expected, collection.drop(dropCollectionOptions), "Default"); + }, + () -> { + Publisher expected = mongoOperationPublisher.dropCollection(clientSession, dropCollectionOptions); + assertPublisherIsTheSameAs(expected, collection.drop(clientSession, dropCollectionOptions), "With client session"); + } + ); + } + + @Test + public void testDropIndex() { + String indexName = "index_name"; + Bson index = Indexes.ascending("ascending_index"); + DropIndexOptions options = new DropIndexOptions().maxTime(1, TimeUnit.MILLISECONDS); + assertAll("dropIndex", + () -> assertAll("check validation", + () -> assertThrows(IllegalArgumentException.class, () -> collection.dropIndex((String) null)), + () -> assertThrows(IllegalArgumentException.class, () -> collection.dropIndex((Bson) null)), + () -> assertThrows(IllegalArgumentException.class, () -> collection.dropIndex(indexName, null)), + () -> assertThrows(IllegalArgumentException.class, + () -> collection.dropIndex(clientSession, (String) null)), + () -> assertThrows(IllegalArgumentException.class, + () -> collection.dropIndex(clientSession, (Bson) null)), + () -> assertThrows(IllegalArgumentException.class, + () -> collection.dropIndex(clientSession, (String) null)), + () -> assertThrows(IllegalArgumentException.class, + () -> collection.dropIndex(clientSession, indexName, null)) + + ), + () -> { + Publisher expected = mongoOperationPublisher.dropIndex(null, indexName, new DropIndexOptions()); + assertPublisherIsTheSameAs(expected, collection.dropIndex(indexName), "Default string"); + }, + () -> { + Publisher expected = mongoOperationPublisher.dropIndex(null, index, new DropIndexOptions()); + assertPublisherIsTheSameAs(expected, collection.dropIndex(index), "Default bson"); + }, + () -> { + Publisher expected = mongoOperationPublisher.dropIndex(null, indexName, options); + assertPublisherIsTheSameAs(expected, collection.dropIndex(indexName, options), "With string & options"); + }, + () -> { + Publisher expected = mongoOperationPublisher.dropIndex(null, index, options); + assertPublisherIsTheSameAs(expected, collection.dropIndex(index, options), "With bson & options"); + }, + () -> { + Publisher expected = mongoOperationPublisher.dropIndex(clientSession, indexName, new DropIndexOptions()); + assertPublisherIsTheSameAs(expected, collection.dropIndex(clientSession, indexName), + "With client session & string"); + }, + () -> { + Publisher expected = mongoOperationPublisher.dropIndex(clientSession, index, new DropIndexOptions()); + assertPublisherIsTheSameAs(expected, collection.dropIndex(clientSession, index), + "With client session & bson"); + }, + () -> { + Publisher expected = mongoOperationPublisher.dropIndex(clientSession, indexName, options); + assertPublisherIsTheSameAs(expected, collection.dropIndex(clientSession, indexName, options), + "With client session, string & options"); + }, + () -> { + Publisher expected = mongoOperationPublisher.dropIndex(clientSession, index, options); + assertPublisherIsTheSameAs(expected, collection.dropIndex(clientSession, index, options), + "With client session, bson & options"); + } + ); + } + + @Test + public void testDropIndexes() { + DropIndexOptions options = new DropIndexOptions().maxTime(1, TimeUnit.MILLISECONDS); + assertAll("dropIndexes", + () -> assertAll("check validation", + () -> assertThrows(IllegalArgumentException.class, () -> collection.dropIndexes((DropIndexOptions) null)), + () -> assertThrows(IllegalArgumentException.class, () -> collection.dropIndexes(clientSession, null)), + () -> assertThrows(IllegalArgumentException.class, () -> collection.dropIndexes(null, options)) + + ), + () -> { + Publisher expected = mongoOperationPublisher.dropIndexes(null, new DropIndexOptions()); + assertPublisherIsTheSameAs(expected, collection.dropIndexes(), "Default"); + }, + () -> { + Publisher expected = mongoOperationPublisher.dropIndexes(null, options); + assertPublisherIsTheSameAs(expected, collection.dropIndexes(options), "With options"); + }, + () -> { + Publisher expected = mongoOperationPublisher.dropIndexes(clientSession, new DropIndexOptions()); + assertPublisherIsTheSameAs(expected, collection.dropIndexes(clientSession), "With client session"); + }, + () -> { + Publisher expected = mongoOperationPublisher.dropIndexes(clientSession, options); + assertPublisherIsTheSameAs(expected, collection.dropIndexes(clientSession, options), + "With client session & options"); + } + ); + } + + + @Test + public void testEstimatedDocumentCount() { + EstimatedDocumentCountOptions options = new EstimatedDocumentCountOptions().maxTime(1, TimeUnit.MILLISECONDS); + assertAll("estimatedDocumentCount", + () -> assertAll("check validation", + () -> assertThrows(IllegalArgumentException.class, () -> collection.estimatedDocumentCount(null)) + ), + () -> { + Publisher expected = mongoOperationPublisher.estimatedDocumentCount(new EstimatedDocumentCountOptions()); + assertPublisherIsTheSameAs(expected, collection.estimatedDocumentCount(), "Default"); + }, + () -> { + Publisher expected = mongoOperationPublisher.estimatedDocumentCount(options); + assertPublisherIsTheSameAs(expected, collection.estimatedDocumentCount(options), "With options"); + } + ); + } + + @Test + public void testFind() { + assertAll("find", + () -> assertAll("check validation", + () -> assertThrows(IllegalArgumentException.class, () -> collection.find((Bson) null)), + () -> assertThrows(IllegalArgumentException.class, () -> collection.find((Class) null)), + () -> assertThrows(IllegalArgumentException.class, () -> collection.find(filter, null)), + () -> assertThrows(IllegalArgumentException.class, () -> collection.find(clientSession, (Bson) null)), + () -> assertThrows(IllegalArgumentException.class, () -> collection.find(clientSession, (Class) null)), + () -> assertThrows(IllegalArgumentException.class, () -> collection.find(clientSession, filter, null)), + () -> assertThrows(IllegalArgumentException.class, () -> collection.find(null, filter)), + () -> assertThrows(IllegalArgumentException.class, + () -> collection.find((ClientSession) null, Document.class)), + () -> assertThrows(IllegalArgumentException.class, () -> collection.find(null, filter, Document.class)) + ), + () -> { + FindPublisher expected = + new FindPublisherImpl<>(null, mongoOperationPublisher, new BsonDocument()); + assertPublisherIsTheSameAs(expected, collection.find(), "Default"); + }, + () -> { + FindPublisher expected = + new FindPublisherImpl<>(null, mongoOperationPublisher, filter); + assertPublisherIsTheSameAs(expected, collection.find(filter), "With filter"); + }, + () -> { + FindPublisher expected = + new FindPublisherImpl<>(null, mongoOperationPublisher.withDocumentClass(BsonDocument.class), filter); + assertPublisherIsTheSameAs(expected, collection.find(filter, BsonDocument.class), "With filter & result class"); + }, + () -> { + FindPublisher expected = + new FindPublisherImpl<>(clientSession, mongoOperationPublisher, new BsonDocument()); + assertPublisherIsTheSameAs(expected, collection.find(clientSession), "With client session"); + }, + () -> { + FindPublisher expected = + new FindPublisherImpl<>(clientSession, mongoOperationPublisher, filter); + assertPublisherIsTheSameAs(expected, collection.find(clientSession, filter), "With client session & filter"); + }, + () -> { + FindPublisher expected = + new FindPublisherImpl<>(clientSession, mongoOperationPublisher.withDocumentClass(BsonDocument.class), filter); + assertPublisherIsTheSameAs(expected, collection.find(clientSession, filter, BsonDocument.class), + "With client session, filter & result class"); + } + ); + } + + @Test + public void testFindOneAndDelete() { + FindOneAndDeleteOptions options = new FindOneAndDeleteOptions().collation(collation); + assertAll("findOneAndDelete", + () -> assertAll("check validation", + () -> assertThrows(IllegalArgumentException.class, () -> collection.findOneAndDelete(null)), + () -> assertThrows(IllegalArgumentException.class, () -> collection.findOneAndDelete(filter, null)), + () -> assertThrows(IllegalArgumentException.class, + () -> collection.findOneAndDelete(clientSession, null)), + () -> assertThrows(IllegalArgumentException.class, + () -> collection.findOneAndDelete(clientSession, filter, null)), + () -> assertThrows(IllegalArgumentException.class, + () -> collection.findOneAndDelete(null, filter)), + () -> assertThrows(IllegalArgumentException.class, + () -> collection.findOneAndDelete(null, filter, options)) + ), + () -> { + Publisher expected = mongoOperationPublisher.findOneAndDelete(null, filter, new FindOneAndDeleteOptions()); + assertPublisherIsTheSameAs(expected, collection.findOneAndDelete(filter), "Default"); + }, + () -> { + Publisher expected = mongoOperationPublisher.findOneAndDelete(null, filter, options); + assertPublisherIsTheSameAs(expected, collection.findOneAndDelete(filter, options), "With filter & options"); + }, + () -> { + Publisher expected = + mongoOperationPublisher.findOneAndDelete(clientSession, filter, new FindOneAndDeleteOptions()); + assertPublisherIsTheSameAs(expected, collection.findOneAndDelete(clientSession, filter), "With client session"); + }, + () -> { + Publisher expected = mongoOperationPublisher.findOneAndDelete(clientSession, filter, options); + assertPublisherIsTheSameAs(expected, collection.findOneAndDelete(clientSession, filter, options), + "With client session, filter & options"); + } + ); + } + + @Test + public void testFindOneAndReplace() { + FindOneAndReplaceOptions options = new FindOneAndReplaceOptions().collation(collation); + Document replacement = new Document(); + assertAll("findOneAndReplace", + () -> assertAll("check validation", + () -> assertThrows(IllegalArgumentException.class, () -> collection.findOneAndReplace(null, replacement)), + () -> assertThrows(IllegalArgumentException.class, + () -> collection.findOneAndReplace(filter, null)), + () -> assertThrows(IllegalArgumentException.class, + () -> collection.findOneAndReplace(clientSession, null, replacement)), + () -> assertThrows(IllegalArgumentException.class, + () -> collection.findOneAndReplace(clientSession, filter, replacement, null)), + () -> assertThrows(IllegalArgumentException.class, + () -> collection.findOneAndReplace(null, filter, replacement)), + () -> assertThrows(IllegalArgumentException.class, + () -> collection.findOneAndReplace(null, filter, replacement, options)) + ), + () -> { + Publisher expected = + mongoOperationPublisher.findOneAndReplace(null, filter, replacement, new FindOneAndReplaceOptions()); + assertPublisherIsTheSameAs(expected, collection.findOneAndReplace(filter, replacement), "Default"); + }, + () -> { + Publisher expected = mongoOperationPublisher.findOneAndReplace(null, filter, replacement, options); + assertPublisherIsTheSameAs(expected, collection.findOneAndReplace(filter, replacement, options), + "With filter & options"); + }, + () -> { + Publisher expected = + mongoOperationPublisher.findOneAndReplace(clientSession, filter, replacement, new FindOneAndReplaceOptions()); + assertPublisherIsTheSameAs(expected, collection.findOneAndReplace(clientSession, filter, replacement), + "With client session"); + }, + () -> { + Publisher expected = mongoOperationPublisher.findOneAndReplace(clientSession, filter, replacement, options); + assertPublisherIsTheSameAs(expected, collection.findOneAndReplace(clientSession, filter, replacement, options), + "With client session, filter & options"); + } + ); + } + + @Test + public void testFindOneAndUpdate() { + FindOneAndUpdateOptions options = new FindOneAndUpdateOptions().collation(collation); + Document update = new Document(); + assertAll("findOneAndUpdate", + () -> assertAll("check validation", + () -> assertThrows(IllegalArgumentException.class, () -> collection.findOneAndUpdate(null, update)), + () -> assertThrows(IllegalArgumentException.class, + () -> collection.findOneAndUpdate(filter, (Bson) null)), + () -> assertThrows(IllegalArgumentException.class, + () -> collection.findOneAndUpdate(clientSession, null, update)), + () -> assertThrows(IllegalArgumentException.class, + () -> collection.findOneAndUpdate(clientSession, filter, update, null)), + () -> assertThrows(IllegalArgumentException.class, + () -> collection.findOneAndUpdate(null, filter, update)), + () -> assertThrows(IllegalArgumentException.class, + () -> collection.findOneAndUpdate(null, filter, update, options)) + ), + () -> { + Publisher expected = + mongoOperationPublisher.findOneAndUpdate(null, filter, update, new FindOneAndUpdateOptions()); + assertPublisherIsTheSameAs(expected, collection.findOneAndUpdate(filter, update), "Default"); + }, + () -> { + Publisher expected = mongoOperationPublisher.findOneAndUpdate(null, filter, update, options); + assertPublisherIsTheSameAs(expected, collection.findOneAndUpdate(filter, update, options), + "With filter & options"); + }, + () -> { + Publisher expected = + mongoOperationPublisher.findOneAndUpdate(clientSession, filter, update, new FindOneAndUpdateOptions()); + assertPublisherIsTheSameAs(expected, collection.findOneAndUpdate(clientSession, filter, update), + "With client session"); + }, + () -> { + Publisher expected = mongoOperationPublisher.findOneAndUpdate(clientSession, filter, update, options); + assertPublisherIsTheSameAs(expected, collection.findOneAndUpdate(clientSession, filter, update, options), + "With client session, filter & options"); + } + ); + } + + @Test + public void testInsertOne() { + InsertOneOptions options = new InsertOneOptions().bypassDocumentValidation(true); + Document insert = new Document("_id", 1); + assertAll("insertOne", + () -> assertAll("check validation", + () -> assertThrows(IllegalArgumentException.class, () -> collection.insertOne(null)), + () -> assertThrows(IllegalArgumentException.class, () -> collection.insertOne(insert, null)), + () -> assertThrows(IllegalArgumentException.class, () -> collection.insertOne(clientSession, null)), + () -> assertThrows(IllegalArgumentException.class, + () -> collection.insertOne(clientSession, insert, null)), + () -> assertThrows(IllegalArgumentException.class, () -> collection.insertOne(null, insert)), + () -> assertThrows(IllegalArgumentException.class, () -> collection.insertOne(null, insert, options)) + ), + () -> { + Publisher expected = mongoOperationPublisher.insertOne(null, insert, new InsertOneOptions()); + assertPublisherIsTheSameAs(expected, collection.insertOne(insert), "Default"); + }, + () -> { + Publisher expected = mongoOperationPublisher.insertOne(null, insert, options); + assertPublisherIsTheSameAs(expected, collection.insertOne(insert, options), "With options"); + }, + () -> { + Publisher expected = + mongoOperationPublisher.insertOne(clientSession, insert, new InsertOneOptions()); + assertPublisherIsTheSameAs(expected, collection.insertOne(clientSession, insert), "With client session"); + }, + () -> { + Publisher expected = mongoOperationPublisher.insertOne(clientSession, insert, options); + assertPublisherIsTheSameAs(expected, collection.insertOne(clientSession, insert, options), + "With client session & options"); + } + ); + } + + @Test + public void testInsertMany() { + InsertManyOptions options = new InsertManyOptions().bypassDocumentValidation(true); + List inserts = singletonList(new Document("_id", 1)); + assertAll("insertMany", + () -> assertAll("check validation", + () -> assertThrows(IllegalArgumentException.class, () -> collection.insertMany(null)), + () -> assertThrows(IllegalArgumentException.class, () -> collection.insertMany(inserts, null)), + () -> assertThrows(IllegalArgumentException.class, () -> collection.insertMany(clientSession, null)), + () -> assertThrows(IllegalArgumentException.class, + () -> collection.insertMany(clientSession, inserts, null)), + () -> assertThrows(IllegalArgumentException.class, () -> collection.insertMany(null, inserts)), + () -> assertThrows(IllegalArgumentException.class, () -> collection.insertMany(null, inserts, options)) + ), + () -> { + Publisher expected = mongoOperationPublisher.insertMany(null, inserts, new InsertManyOptions()); + assertPublisherIsTheSameAs(expected, collection.insertMany(inserts), "Default"); + }, + () -> { + Publisher expected = mongoOperationPublisher.insertMany(null, inserts, options); + assertPublisherIsTheSameAs(expected, collection.insertMany(inserts, options), "With options"); + }, + () -> { + Publisher expected = + mongoOperationPublisher.insertMany(clientSession, inserts, new InsertManyOptions()); + assertPublisherIsTheSameAs(expected, collection.insertMany(clientSession, inserts), "With client session"); + }, + () -> { + Publisher expected = mongoOperationPublisher.insertMany(clientSession, inserts, options); + assertPublisherIsTheSameAs(expected, collection.insertMany(clientSession, inserts, options), + "With client session & options"); + } + ); + } + + @Test + public void testListIndexes() { + assertAll("listIndexes", + () -> assertAll("check validation", + () -> assertThrows(IllegalArgumentException.class, () -> collection.listIndexes((Class) null)), + () -> assertThrows(IllegalArgumentException.class, () -> collection.listIndexes(null, Document.class)), + () -> assertThrows(IllegalArgumentException.class, () -> collection.listIndexes(clientSession, null)) + ), + () -> { + ListIndexesPublisher expected = + new ListIndexesPublisherImpl<>(null, mongoOperationPublisher); + assertPublisherIsTheSameAs(expected, collection.listIndexes(), "Default"); + }, + () -> { + ListIndexesPublisher expected = + new ListIndexesPublisherImpl<>(null, mongoOperationPublisher.withDocumentClass(BsonDocument.class)); + assertPublisherIsTheSameAs(expected, collection.listIndexes(BsonDocument.class), "With result class"); + }, + () -> { + ListIndexesPublisher expected = + new ListIndexesPublisherImpl<>(clientSession, mongoOperationPublisher); + assertPublisherIsTheSameAs(expected, collection.listIndexes(clientSession), "With client session"); + }, + () -> { + ListIndexesPublisher expected = + new ListIndexesPublisherImpl<>(clientSession, mongoOperationPublisher.withDocumentClass(BsonDocument.class)); + assertPublisherIsTheSameAs(expected, collection.listIndexes(clientSession, BsonDocument.class), + "With client session & result class"); + } + ); + } + + @SuppressWarnings("deprecation") + @Test + public void testMapReduce() { + String map = "map"; + String reduce = "reduce"; + + assertAll("mapReduce", + () -> assertAll("check validation", + () -> assertThrows(IllegalArgumentException.class, () -> collection.mapReduce(null, reduce)), + () -> assertThrows(IllegalArgumentException.class, () -> collection.mapReduce(map, null)), + () -> assertThrows(IllegalArgumentException.class, () -> collection.mapReduce(map, reduce, null)), + () -> assertThrows(IllegalArgumentException.class, + () -> collection.mapReduce(clientSession, null, reduce)), + () -> assertThrows(IllegalArgumentException.class, + () -> collection.mapReduce(clientSession, map, null)), + () -> assertThrows(IllegalArgumentException.class, + () -> collection.mapReduce(clientSession, map, reduce, null)), + () -> assertThrows(IllegalArgumentException.class, () -> collection.mapReduce(null, map, reduce)), + () -> assertThrows(IllegalArgumentException.class, + () -> collection.mapReduce(null, map, reduce, Document.class)) + ), + () -> { + com.mongodb.reactivestreams.client.MapReducePublisher expected = + new MapReducePublisherImpl<>(null, mongoOperationPublisher, map, reduce); + assertPublisherIsTheSameAs(expected, collection.mapReduce(map, reduce), "Default"); + }, + () -> { + com.mongodb.reactivestreams.client.MapReducePublisher expected = + new MapReducePublisherImpl<>(null, mongoOperationPublisher.withDocumentClass(BsonDocument.class), + map, reduce); + assertPublisherIsTheSameAs(expected, collection.mapReduce(map, reduce, BsonDocument.class), + "With result class"); + }, + () -> { + com.mongodb.reactivestreams.client.MapReducePublisher expected = + new MapReducePublisherImpl<>(clientSession, mongoOperationPublisher, map, reduce); + assertPublisherIsTheSameAs(expected, collection.mapReduce(clientSession, map, reduce), "With client session"); + }, + () -> { + com.mongodb.reactivestreams.client.MapReducePublisher expected = + new MapReducePublisherImpl<>(clientSession, mongoOperationPublisher.withDocumentClass(BsonDocument.class), + map, reduce); + assertPublisherIsTheSameAs(expected, collection.mapReduce(clientSession, map, reduce, BsonDocument.class), + "With client session & result class"); + } + ); + } + + @Test + public void testRenameCollection() { + MongoNamespace mongoNamespace = new MongoNamespace("db2.coll2"); + RenameCollectionOptions options = new RenameCollectionOptions().dropTarget(true); + assertAll("renameCollection", + () -> assertAll("check validation", + () -> assertThrows(IllegalArgumentException.class, () -> collection.renameCollection(null)), + () -> assertThrows(IllegalArgumentException.class, + () -> collection.renameCollection(mongoNamespace, null)), + () -> assertThrows(IllegalArgumentException.class, + () -> collection.renameCollection(clientSession, null)), + () -> assertThrows(IllegalArgumentException.class, + () -> collection.renameCollection(clientSession, mongoNamespace, null)), + () -> assertThrows(IllegalArgumentException.class, + () -> collection.renameCollection(null, mongoNamespace)), + () -> assertThrows(IllegalArgumentException.class, + () -> collection.renameCollection(null, mongoNamespace, options)) + ), + () -> { + Publisher expected = + mongoOperationPublisher.renameCollection(clientSession, mongoNamespace, new RenameCollectionOptions()); + assertPublisherIsTheSameAs(expected, collection.renameCollection(mongoNamespace), "Default"); + }, + () -> { + Publisher expected = mongoOperationPublisher.renameCollection(null, mongoNamespace, options); + assertPublisherIsTheSameAs(expected, collection.renameCollection(mongoNamespace, options), "With options"); + }, + () -> { + Publisher expected = + mongoOperationPublisher.renameCollection(clientSession, mongoNamespace, new RenameCollectionOptions()); + assertPublisherIsTheSameAs(expected, collection.renameCollection(clientSession, mongoNamespace), + "With client session"); + }, + () -> { + Publisher expected = mongoOperationPublisher.renameCollection(clientSession, mongoNamespace, options); + assertPublisherIsTheSameAs(expected, collection.renameCollection(clientSession, mongoNamespace, options), + "With client session & options"); + } + ); + } + + @Test + public void testReplaceOne() { + ReplaceOptions options = new ReplaceOptions().collation(collation); + Document replacement = new Document(); + assertAll("replaceOne", + () -> assertAll("check validation", + () -> assertThrows(IllegalArgumentException.class, () -> collection.replaceOne(null, replacement)), + () -> assertThrows(IllegalArgumentException.class, + () -> collection.replaceOne(filter, null)), + () -> assertThrows(IllegalArgumentException.class, + () -> collection.replaceOne(clientSession, null, replacement)), + () -> assertThrows(IllegalArgumentException.class, + () -> collection.replaceOne(clientSession, filter, replacement, null)), + () -> assertThrows(IllegalArgumentException.class, + () -> collection.replaceOne(null, filter, replacement)), + () -> assertThrows(IllegalArgumentException.class, + () -> collection.replaceOne(null, filter, replacement, options)) + ), + () -> { + Publisher expected = + mongoOperationPublisher.replaceOne(null, filter, replacement, new ReplaceOptions()); + assertPublisherIsTheSameAs(expected, collection.replaceOne(filter, replacement), "Default"); + }, + () -> { + Publisher expected = mongoOperationPublisher.replaceOne(null, filter, replacement, options); + assertPublisherIsTheSameAs(expected, collection.replaceOne(filter, replacement, options), + "With filter & options"); + }, + () -> { + Publisher expected = + mongoOperationPublisher.replaceOne(clientSession, filter, replacement, new ReplaceOptions()); + assertPublisherIsTheSameAs(expected, collection.replaceOne(clientSession, filter, replacement), + "With client session"); + }, + () -> { + Publisher expected = mongoOperationPublisher.replaceOne(clientSession, filter, replacement, options); + assertPublisherIsTheSameAs(expected, collection.replaceOne(clientSession, filter, replacement, options), + "With client session, filter & options"); + } + ); + } + + @Test + public void testUpdateOne() { + UpdateOptions options = new UpdateOptions().collation(collation); + Document update = new Document(); + assertAll("updateOne", + () -> assertAll("check validation", + () -> assertThrows(IllegalArgumentException.class, () -> collection.updateOne(null, update)), + () -> assertThrows(IllegalArgumentException.class, + () -> collection.updateOne(filter, (Bson) null)), + () -> assertThrows(IllegalArgumentException.class, + () -> collection.updateOne(clientSession, null, update)), + () -> assertThrows(IllegalArgumentException.class, + () -> collection.updateOne(clientSession, filter, update, null)), + () -> assertThrows(IllegalArgumentException.class, + () -> collection.updateOne(null, filter, update)), + () -> assertThrows(IllegalArgumentException.class, + () -> collection.updateOne(null, filter, update, options)) + ), + () -> { + Publisher expected = mongoOperationPublisher.updateOne(null, filter, update, new UpdateOptions()); + assertPublisherIsTheSameAs(expected, collection.updateOne(filter, update), "Default"); + }, + () -> { + Publisher expected = mongoOperationPublisher.updateOne(null, filter, update, options); + assertPublisherIsTheSameAs(expected, collection.updateOne(filter, update, options), + "With filter & options"); + }, + () -> { + Publisher expected = + mongoOperationPublisher.updateOne(clientSession, filter, update, new UpdateOptions()); + assertPublisherIsTheSameAs(expected, collection.updateOne(clientSession, filter, update), + "With client session"); + }, + () -> { + Publisher expected = mongoOperationPublisher.updateOne(clientSession, filter, update, options); + assertPublisherIsTheSameAs(expected, collection.updateOne(clientSession, filter, update, options), + "With client session, filter & options"); + } + ); + } + + @Test + public void testUpdateMany() { + UpdateOptions options = new UpdateOptions().collation(collation); + List updates = singletonList(new Document()); + assertAll("updateMany", + () -> assertAll("check validation", + () -> assertThrows(IllegalArgumentException.class, () -> collection.updateMany(null, updates)), + () -> assertThrows(IllegalArgumentException.class, + () -> collection.updateMany(filter, (Bson) null)), + () -> assertThrows(IllegalArgumentException.class, + () -> collection.updateMany(clientSession, null, updates)), + () -> assertThrows(IllegalArgumentException.class, + () -> collection.updateMany(clientSession, filter, updates, null)), + () -> assertThrows(IllegalArgumentException.class, + () -> collection.updateMany(null, filter, updates)), + () -> assertThrows(IllegalArgumentException.class, + () -> collection.updateMany(null, filter, updates, options)) + ), + () -> { + Publisher expected = mongoOperationPublisher.updateMany(null, filter, updates, new UpdateOptions()); + assertPublisherIsTheSameAs(expected, collection.updateMany(filter, updates), "Default"); + }, + () -> { + Publisher expected = mongoOperationPublisher.updateMany(null, filter, updates, options); + assertPublisherIsTheSameAs(expected, collection.updateMany(filter, updates, options), + "With filter & options"); + }, + () -> { + Publisher expected = + mongoOperationPublisher.updateMany(clientSession, filter, updates, new UpdateOptions()); + assertPublisherIsTheSameAs(expected, collection.updateMany(clientSession, filter, updates), + "With client session"); + }, + () -> { + Publisher expected = mongoOperationPublisher.updateMany(clientSession, filter, updates, options); + assertPublisherIsTheSameAs(expected, collection.updateMany(clientSession, filter, updates, options), + "With client session, filter & options"); + } + ); + } + + @Test + void testWatch() { + List pipeline = singletonList(BsonDocument.parse("{$match: {open: true}}")); + assertAll("watch", + () -> assertAll("check validation", + () -> assertThrows(IllegalArgumentException.class, () -> collection.watch((Class) null)), + () -> assertThrows(IllegalArgumentException.class, () -> collection.watch((List) null)), + () -> assertThrows(IllegalArgumentException.class, () -> collection.watch(pipeline, null)), + () -> assertThrows(IllegalArgumentException.class, () -> collection.watch((ClientSession) null)), + () -> assertThrows(IllegalArgumentException.class, () -> collection.watch(null, pipeline)), + () -> assertThrows(IllegalArgumentException.class, + () -> collection.watch(null, pipeline, Document.class)) + ), + () -> { + ChangeStreamPublisher expected = + new ChangeStreamPublisherImpl<>(null, mongoOperationPublisher, Document.class, emptyList(), + ChangeStreamLevel.COLLECTION); + assertPublisherIsTheSameAs(expected, collection.watch(), "Default"); + }, + () -> { + ChangeStreamPublisher expected = + new ChangeStreamPublisherImpl<>(null, mongoOperationPublisher, Document.class, pipeline, + ChangeStreamLevel.COLLECTION); + assertPublisherIsTheSameAs(expected, collection.watch(pipeline), "With pipeline"); + }, + () -> { + ChangeStreamPublisher expected = + new ChangeStreamPublisherImpl<>(null, mongoOperationPublisher, BsonDocument.class, emptyList(), + ChangeStreamLevel.COLLECTION); + assertPublisherIsTheSameAs(expected, collection.watch(BsonDocument.class), + "With result class"); + }, + () -> { + ChangeStreamPublisher expected = + new ChangeStreamPublisherImpl<>(null, mongoOperationPublisher, BsonDocument.class, pipeline, + ChangeStreamLevel.COLLECTION); + assertPublisherIsTheSameAs(expected, collection.watch(pipeline, BsonDocument.class), + "With pipeline & result class"); + }, + () -> { + ChangeStreamPublisher expected = + new ChangeStreamPublisherImpl<>(clientSession, mongoOperationPublisher, Document.class, emptyList(), + ChangeStreamLevel.COLLECTION); + assertPublisherIsTheSameAs(expected, collection.watch(clientSession), "with session"); + }, + () -> { + ChangeStreamPublisher expected = + new ChangeStreamPublisherImpl<>(clientSession, mongoOperationPublisher, Document.class, pipeline, + ChangeStreamLevel.COLLECTION); + assertPublisherIsTheSameAs(expected, collection.watch(clientSession, pipeline), "With session & pipeline"); + }, + () -> { + ChangeStreamPublisher expected = + new ChangeStreamPublisherImpl<>(clientSession, mongoOperationPublisher, BsonDocument.class, + emptyList(), ChangeStreamLevel.COLLECTION); + + assertPublisherIsTheSameAs(expected, collection.watch(clientSession, BsonDocument.class), + "With session & resultClass"); + }, + () -> { + ChangeStreamPublisher expected = + new ChangeStreamPublisherImpl<>(clientSession, mongoOperationPublisher, BsonDocument.class, pipeline, + ChangeStreamLevel.COLLECTION); + + assertPublisherIsTheSameAs(expected, collection.watch(clientSession, pipeline, BsonDocument.class), + "With clientSession, pipeline & result class"); + } + ); + } +} diff --git a/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/MongoDatabaseImplTest.java b/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/MongoDatabaseImplTest.java new file mode 100644 index 00000000000..f50e44a7db6 --- /dev/null +++ b/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/MongoDatabaseImplTest.java @@ -0,0 +1,436 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.reactivestreams.client.internal; + +import com.mongodb.ReadConcern; +import com.mongodb.ReadPreference; +import com.mongodb.WriteConcern; +import com.mongodb.client.model.Collation; +import com.mongodb.client.model.CreateCollectionOptions; +import com.mongodb.client.model.CreateViewOptions; +import com.mongodb.internal.client.model.AggregationLevel; +import com.mongodb.internal.client.model.changestream.ChangeStreamLevel; +import com.mongodb.reactivestreams.client.AggregatePublisher; +import com.mongodb.reactivestreams.client.ChangeStreamPublisher; +import com.mongodb.reactivestreams.client.ClientSession; +import com.mongodb.reactivestreams.client.ListCollectionNamesPublisher; +import com.mongodb.reactivestreams.client.ListCollectionsPublisher; +import com.mongodb.reactivestreams.client.MongoDatabase; +import org.bson.BsonDocument; +import org.bson.Document; +import org.bson.codecs.configuration.CodecRegistries; +import org.bson.codecs.configuration.CodecRegistry; +import org.bson.conversions.Bson; +import org.junit.jupiter.api.Test; +import org.mockito.Mock; +import org.reactivestreams.Publisher; + +import java.util.List; +import java.util.concurrent.TimeUnit; + +import static java.util.Collections.emptyList; +import static java.util.Collections.singletonList; +import static org.junit.jupiter.api.Assertions.assertAll; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; + + +public class MongoDatabaseImplTest extends TestHelper { + @Mock + private ClientSession clientSession; + + private final MongoDatabaseImpl database = new MongoDatabaseImpl(OPERATION_PUBLISHER.withDatabase("db")); + private final MongoOperationPublisher mongoOperationPublisher = database.getMongoOperationPublisher(); + + @Test + public void withCodecRegistry() { + // Cannot do equality test as registries are wrapped + CodecRegistry codecRegistry = CodecRegistries.fromCodecs(new MyLongCodec()); + MongoDatabase newDatabase = database.withCodecRegistry(codecRegistry); + assertTrue(newDatabase.getCodecRegistry().get(Long.class) instanceof TestHelper.MyLongCodec); + } + + @Test + public void withReadConcern() { + assertEquals(ReadConcern.AVAILABLE, database.withReadConcern(ReadConcern.AVAILABLE).getReadConcern()); + } + + @Test + public void withReadPreference() { + assertEquals(ReadPreference.secondaryPreferred(), database.withReadPreference(ReadPreference.secondaryPreferred()) + .getReadPreference()); + } + + @Test + public void withTimeout() { + assertEquals(1000, database.withTimeout(1000, TimeUnit.MILLISECONDS).getTimeout(TimeUnit.MILLISECONDS)); + } + + @Test + public void withWriteConcern() { + assertEquals(WriteConcern.MAJORITY, database.withWriteConcern(WriteConcern.MAJORITY).getWriteConcern()); + } + + @Test + void testAggregate() { + List pipeline = singletonList(BsonDocument.parse("{$match: {open: true}}")); + assertAll("Aggregate tests", + () -> assertAll("check validation", + () -> assertThrows(IllegalArgumentException.class, () -> database.aggregate(null)), + () -> assertThrows(IllegalArgumentException.class, () -> database.aggregate(clientSession, null)) + ), + () -> { + AggregatePublisher expected = + new AggregatePublisherImpl<>(null, mongoOperationPublisher, pipeline, AggregationLevel.DATABASE); + assertPublisherIsTheSameAs(expected, database.aggregate(pipeline), "Default"); + }, + () -> { + AggregatePublisher expected = + new AggregatePublisherImpl<>(null, mongoOperationPublisher.withDocumentClass(BsonDocument.class), + pipeline, AggregationLevel.DATABASE); + assertPublisherIsTheSameAs(expected, database.aggregate(pipeline, BsonDocument.class), + "With result class"); + }, + () -> { + AggregatePublisher expected = + new AggregatePublisherImpl<>(clientSession, mongoOperationPublisher, + pipeline, AggregationLevel.DATABASE); + assertPublisherIsTheSameAs(expected, database.aggregate(clientSession, pipeline), "With session"); + }, + () -> { + AggregatePublisher expected = + new AggregatePublisherImpl<>(clientSession, mongoOperationPublisher.withDocumentClass(BsonDocument.class), + pipeline, AggregationLevel.DATABASE); + assertPublisherIsTheSameAs(expected, database.aggregate(clientSession, pipeline, BsonDocument.class), + "With session & result class"); + } + ); + } + + @Test + void shouldListCollections() { + database.listCollections(); + + assertAll("listCollections tests", + () -> assertAll("check validation", + () -> assertThrows(IllegalArgumentException.class, () -> database.listCollections((Class) null)), + () -> assertThrows(IllegalArgumentException.class, + () -> database.listCollections((ClientSession) null)), + () -> assertThrows(IllegalArgumentException.class, + () -> database.listCollections(clientSession, null)) + ), + () -> { + ListCollectionsPublisher expected = + new ListCollectionsPublisherImpl<>(null, mongoOperationPublisher, false); + assertPublisherIsTheSameAs(expected, database.listCollections(), "Default"); + }, + () -> { + ListCollectionsPublisher expected = + new ListCollectionsPublisherImpl<>(null, mongoOperationPublisher.withDocumentClass(BsonDocument.class), + false); + assertPublisherIsTheSameAs(expected, database.listCollections(BsonDocument.class), "With result class"); + }, + () -> { + ListCollectionsPublisher expected = + new ListCollectionsPublisherImpl<>(clientSession, mongoOperationPublisher, false); + assertPublisherIsTheSameAs(expected, database.listCollections(clientSession), "With client session"); + }, + () -> { + ListCollectionsPublisher expected = + new ListCollectionsPublisherImpl<>(clientSession, + mongoOperationPublisher.withDocumentClass(BsonDocument.class), false); + assertPublisherIsTheSameAs(expected, database.listCollections(clientSession, BsonDocument.class), + "With client session & result class"); + } + ); + } + + @Test + void testListCollectionNames() { + assertAll("listCollectionNames", + () -> assertAll("check validation", + () -> assertThrows(IllegalArgumentException.class, () -> database.listCollectionNames(null)) + ), + () -> { + ListCollectionNamesPublisher expected = + new ListCollectionNamesPublisherImpl( + new ListCollectionsPublisherImpl<>(null, mongoOperationPublisher, true)); + assertPublisherIsTheSameAs(expected, database.listCollectionNames(), "Default"); + }, + () -> { + ListCollectionNamesPublisher expected = + new ListCollectionNamesPublisherImpl( + new ListCollectionsPublisherImpl<>(null, mongoOperationPublisher, true)) + .authorizedCollections(true); + assertPublisherIsTheSameAs(expected, database.listCollectionNames().authorizedCollections(true), + "nameOnly & authorizedCollections"); + }, + () -> { + ListCollectionNamesPublisher expected = + new ListCollectionNamesPublisherImpl( + new ListCollectionsPublisherImpl<>(clientSession, mongoOperationPublisher, true)); + assertPublisherIsTheSameAs(expected, database.listCollectionNames(clientSession), "With client session"); + } + ); + } + + @Test + void testCreateCollection() { + String collectionName = "coll"; + assertAll("createCollection", + () -> assertAll("check validation", + () -> assertThrows(IllegalArgumentException.class, () -> database.createCollection(null)), + () -> assertThrows(IllegalArgumentException.class, + () -> database.createCollection(collectionName, null)), + () -> assertThrows(IllegalArgumentException.class, + () -> database.createCollection(null, collectionName)), + () -> assertThrows(IllegalArgumentException.class, + () -> database.createCollection(clientSession, collectionName, null)) + ), + () -> { + Publisher expected = mongoOperationPublisher + .createCollection(null, collectionName, new CreateCollectionOptions()); + assertPublisherIsTheSameAs(expected, database.createCollection(collectionName), "Default"); + }, + () -> { + CreateCollectionOptions options = new CreateCollectionOptions().sizeInBytes(500).capped(true); + Publisher expected = mongoOperationPublisher.createCollection(null, collectionName, options); + assertPublisherIsTheSameAs(expected, database.createCollection(collectionName, options), "With options"); + }, + () -> { + Publisher expected = mongoOperationPublisher.createCollection(clientSession, collectionName, + new CreateCollectionOptions()); + assertPublisherIsTheSameAs(expected, database.createCollection(clientSession, collectionName), + "With client session"); + }, + () -> { + CreateCollectionOptions options = new CreateCollectionOptions().sizeInBytes(500).capped(true); + Publisher expected = mongoOperationPublisher.createCollection(clientSession, collectionName, options); + assertPublisherIsTheSameAs(expected, database.createCollection(clientSession, collectionName, options), + "With client session & options"); + } + ); + } + + @Test + void testCreateView() { + String viewName = "viewName"; + String viewOn = "viewOn"; + List pipeline = singletonList(BsonDocument.parse("{$match: {open: true}}")); + CreateViewOptions options = new CreateViewOptions().collation(Collation.builder().locale("de").build()); + + assertAll("createView", + () -> assertAll("check validation", + () -> assertThrows(IllegalArgumentException.class, + () -> database.createView(null, viewOn, pipeline)), + () -> assertThrows(IllegalArgumentException.class, + () -> database.createView(viewName, null, pipeline)), + () -> assertThrows(IllegalArgumentException.class, + () -> database.createView(viewName, viewOn, null)), + () -> assertThrows(IllegalArgumentException.class, + () -> database.createView(viewName, viewOn, pipeline, null)), + () -> assertThrows(IllegalArgumentException.class, + () -> database.createView(null, viewName, viewOn, pipeline)), + () -> assertThrows(IllegalArgumentException.class, + () -> database.createView(null, viewName, viewOn, pipeline, options)) + + ), + () -> { + Publisher expected = + mongoOperationPublisher.createView(null, viewName, viewOn, pipeline, new CreateViewOptions()); + assertPublisherIsTheSameAs(expected, database.createView(viewName, viewOn, pipeline), "Default"); + }, + () -> { + Publisher expected = mongoOperationPublisher.createView(null, viewName, viewOn, pipeline, options); + assertPublisherIsTheSameAs(expected, database.createView(viewName, viewOn, pipeline, options), + "With options"); + }, + () -> { + Publisher expected = + mongoOperationPublisher.createView(clientSession, viewName, viewOn, pipeline, new CreateViewOptions()); + assertPublisherIsTheSameAs(expected, database.createView(clientSession, viewName, viewOn, pipeline), + "With client session"); + }, + () -> { + Publisher expected = mongoOperationPublisher.createView(clientSession, viewName, viewOn, pipeline, options); + assertPublisherIsTheSameAs(expected, database.createView(clientSession, viewName, viewOn, pipeline, options), + "With client session & options"); + } + ); + } + + @Test + void testDrop() { + assertAll("drop", + () -> assertAll("check validation", + () -> assertThrows(IllegalArgumentException.class, () -> database.drop(null)) + ), + () -> { + Publisher expected = mongoOperationPublisher.dropDatabase(null); + assertPublisherIsTheSameAs(expected, database.drop(), "Default"); + }, + () -> { + Publisher expected = mongoOperationPublisher.dropDatabase(clientSession); + assertPublisherIsTheSameAs(expected, database.drop(clientSession), "With client session"); + } + ); + } + + @Test + void testRunCommand() { + Bson command = BsonDocument.parse("{ping : 1}"); + + assertAll("runCommand", + () -> assertAll("check validation", + () -> assertThrows(IllegalArgumentException.class, + () -> database.runCommand(null)), + () -> assertThrows(IllegalArgumentException.class, + () -> database.runCommand(command, (ReadPreference) null)), + () -> assertThrows(IllegalArgumentException.class, + () -> database.runCommand(command, (Class) null)), + () -> assertThrows(IllegalArgumentException.class, + () -> database.runCommand(command, ReadPreference.nearest(), null)), + () -> assertThrows(IllegalArgumentException.class, + () -> database.runCommand(null, command)), + () -> assertThrows(IllegalArgumentException.class, + () -> database.runCommand(null, command, ReadPreference.nearest())), + () -> assertThrows(IllegalArgumentException.class, + () -> database.runCommand(null, command, Document.class)), + () -> assertThrows(IllegalArgumentException.class, + () -> database.runCommand(null, command, ReadPreference.nearest(), Document.class)) + ), + () -> { + Publisher expected = + mongoOperationPublisher.runCommand(null, command, ReadPreference.primary(), Document.class); + assertPublisherIsTheSameAs(expected, database.runCommand(command), "Default"); + }, + () -> { + Publisher expected = + mongoOperationPublisher.runCommand(null, command, ReadPreference.primary(), BsonDocument.class); + assertPublisherIsTheSameAs(expected, database.runCommand(command, BsonDocument.class), + "With result class"); + }, + () -> { + Publisher expected = + mongoOperationPublisher.runCommand(null, command, ReadPreference.nearest(), Document.class); + assertPublisherIsTheSameAs(expected, database.runCommand(command, ReadPreference.nearest()), + "With read preference"); + }, + () -> { + Publisher expected = + mongoOperationPublisher.runCommand(null, command, ReadPreference.nearest(), BsonDocument.class); + assertPublisherIsTheSameAs(expected, database.runCommand(command, ReadPreference.nearest(), BsonDocument.class), + "With read preference & result class"); + }, + () -> { + Publisher expected = + mongoOperationPublisher.runCommand(clientSession, command, ReadPreference.primary(), Document.class); + assertPublisherIsTheSameAs(expected, database.runCommand(clientSession, command), + "With client session"); + }, + () -> { + Publisher expected = mongoOperationPublisher + .runCommand(clientSession, command, ReadPreference.primary(), BsonDocument.class); + assertPublisherIsTheSameAs(expected, database.runCommand(clientSession, command, BsonDocument.class), + "With client session & result class"); + }, + () -> { + Publisher expected = + mongoOperationPublisher.runCommand(clientSession, command, ReadPreference.nearest(), Document.class); + assertPublisherIsTheSameAs(expected, database.runCommand(clientSession, command, ReadPreference.nearest()), + "With client session & read preference"); + }, + () -> { + Publisher expected = mongoOperationPublisher + .runCommand(clientSession, command, ReadPreference.nearest(), BsonDocument.class); + assertPublisherIsTheSameAs(expected, database.runCommand(clientSession, command, ReadPreference.nearest(), + BsonDocument.class), + "With client session, read preference & result class"); + } + ); + } + + @Test + void testWatch() { + List pipeline = singletonList(BsonDocument.parse("{$match: {open: true}}")); + assertAll("watch", + () -> assertAll("check validation", + () -> assertThrows(IllegalArgumentException.class, () -> database.watch((Class) null)), + () -> assertThrows(IllegalArgumentException.class, () -> database.watch((List) null)), + () -> assertThrows(IllegalArgumentException.class, () -> database.watch(pipeline, null)), + () -> assertThrows(IllegalArgumentException.class, () -> database.watch((ClientSession) null)), + () -> assertThrows(IllegalArgumentException.class, () -> database.watch(null, pipeline)), + () -> assertThrows(IllegalArgumentException.class, + () -> database.watch(null, pipeline, Document.class)) + ), + () -> { + ChangeStreamPublisher expected = + new ChangeStreamPublisherImpl<>(null, mongoOperationPublisher, Document.class, emptyList(), + ChangeStreamLevel.DATABASE); + assertPublisherIsTheSameAs(expected, database.watch(), "Default"); + }, + () -> { + ChangeStreamPublisher expected = + new ChangeStreamPublisherImpl<>(null, mongoOperationPublisher, Document.class, pipeline, + ChangeStreamLevel.DATABASE); + assertPublisherIsTheSameAs(expected, database.watch(pipeline), "With pipeline"); + }, + () -> { + ChangeStreamPublisher expected = + new ChangeStreamPublisherImpl<>(null, mongoOperationPublisher, BsonDocument.class, emptyList(), + ChangeStreamLevel.DATABASE); + assertPublisherIsTheSameAs(expected, database.watch(BsonDocument.class), + "With result class"); + }, + () -> { + ChangeStreamPublisher expected = + new ChangeStreamPublisherImpl<>(null, mongoOperationPublisher, BsonDocument.class, pipeline, + ChangeStreamLevel.DATABASE); + assertPublisherIsTheSameAs(expected, database.watch(pipeline, BsonDocument.class), + "With pipeline & result class"); + }, + () -> { + ChangeStreamPublisher expected = + new ChangeStreamPublisherImpl<>(clientSession, mongoOperationPublisher, Document.class, emptyList(), + ChangeStreamLevel.DATABASE); + assertPublisherIsTheSameAs(expected, database.watch(clientSession), "with session"); + }, + () -> { + ChangeStreamPublisher expected = + new ChangeStreamPublisherImpl<>(clientSession, mongoOperationPublisher, Document.class, pipeline, + ChangeStreamLevel.DATABASE); + assertPublisherIsTheSameAs(expected, database.watch(clientSession, pipeline), "With session & pipeline"); + }, + () -> { + ChangeStreamPublisher expected = + new ChangeStreamPublisherImpl<>(clientSession, mongoOperationPublisher, BsonDocument.class, emptyList(), + ChangeStreamLevel.DATABASE); + assertPublisherIsTheSameAs(expected, database.watch(clientSession, BsonDocument.class), + "With session & resultClass"); + }, + () -> { + ChangeStreamPublisher expected = + new ChangeStreamPublisherImpl<>(clientSession, mongoOperationPublisher, BsonDocument.class, pipeline, + ChangeStreamLevel.DATABASE); + assertPublisherIsTheSameAs(expected, database.watch(clientSession, pipeline, BsonDocument.class), + "With clientSession, pipeline & result class"); + } + ); + } + +} diff --git a/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/MongoOperationPublisherTest.java b/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/MongoOperationPublisherTest.java new file mode 100644 index 00000000000..664cf1428d8 --- /dev/null +++ b/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/MongoOperationPublisherTest.java @@ -0,0 +1,128 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.reactivestreams.client.internal; + + +import com.mongodb.MongoClientSettings; +import com.mongodb.MongoNamespace; +import com.mongodb.ReadConcern; +import com.mongodb.ReadPreference; +import com.mongodb.WriteConcern; +import org.bson.BsonDocument; +import org.bson.Document; +import org.bson.UuidRepresentation; +import org.bson.codecs.configuration.CodecRegistries; +import org.bson.codecs.configuration.CodecRegistry; +import org.junit.jupiter.api.Test; +import org.mockito.Mockito; + +import java.util.concurrent.TimeUnit; + +import static com.mongodb.ClusterFixture.TIMEOUT; +import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS_WITH_TIMEOUT; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.mock; + + +public class MongoOperationPublisherTest { + + private static final OperationExecutor OPERATION_EXECUTOR; + + static { + OPERATION_EXECUTOR = mock(OperationExecutor.class); + Mockito.lenient().doAnswer(invocation -> OPERATION_EXECUTOR) + .when(OPERATION_EXECUTOR) + .withTimeoutSettings(any()); + } + private static final MongoNamespace MONGO_NAMESPACE = new MongoNamespace("a.b"); + + private static final MongoOperationPublisher DEFAULT_MOP = new MongoOperationPublisher<>( + MONGO_NAMESPACE, Document.class, MongoClientSettings.getDefaultCodecRegistry(), ReadPreference.primary(), + ReadConcern.DEFAULT, WriteConcern.ACKNOWLEDGED, true, true, UuidRepresentation.STANDARD, + null, TIMEOUT_SETTINGS_WITH_TIMEOUT, OPERATION_EXECUTOR); + + @Test + public void withCodecRegistry() { + // Cannot do equality test as registries are wrapped + CodecRegistry codecRegistry = DEFAULT_MOP.withCodecRegistry(CodecRegistries.fromCodecs(new TestHelper.MyLongCodec())).getCodecRegistry(); + assertTrue(codecRegistry.get(Long.class) instanceof TestHelper.MyLongCodec); + } + + @Test + public void withDatabase() { + assertEquals(new MongoNamespace("c._ignored"), DEFAULT_MOP.withDatabase("c").getNamespace()); + } + + @Test + public void withDocumentClass() { + assertEquals(DEFAULT_MOP, DEFAULT_MOP.withDocumentClass(Document.class)); + assertEquals(BsonDocument.class, DEFAULT_MOP.withDocumentClass(BsonDocument.class).getDocumentClass()); + } + + @Test + public void withDatabaseAndDocumentClass() { + MongoOperationPublisher alternative = DEFAULT_MOP.withDatabaseAndDocumentClass("c", BsonDocument.class); + assertEquals(BsonDocument.class, alternative.getDocumentClass()); + assertEquals(new MongoNamespace("c._ignored"), alternative.getNamespace()); + } + + @Test + public void withNamespaceAndDocumentClass() { + assertEquals(DEFAULT_MOP, DEFAULT_MOP.withNamespaceAndDocumentClass(new MongoNamespace("a.b"), Document.class)); + + MongoOperationPublisher alternative = DEFAULT_MOP.withNamespaceAndDocumentClass(new MongoNamespace("c.d"), + BsonDocument.class); + assertEquals(BsonDocument.class, alternative.getDocumentClass()); + assertEquals(new MongoNamespace("c.d"), alternative.getNamespace()); + } + + + @Test + public void withNamespace() { + assertEquals(DEFAULT_MOP, DEFAULT_MOP.withNamespaceAndDocumentClass(new MongoNamespace("a.b"), Document.class)); + assertEquals(new MongoNamespace("c.d"), DEFAULT_MOP.withNamespace(new MongoNamespace("c.d")).getNamespace()); + } + + @Test + public void withReadConcern() { + assertEquals(DEFAULT_MOP, DEFAULT_MOP.withReadConcern(ReadConcern.DEFAULT)); + assertEquals(ReadConcern.AVAILABLE, DEFAULT_MOP.withReadConcern(ReadConcern.AVAILABLE).getReadConcern()); + } + + @Test + public void withReadPreference() { + assertEquals(DEFAULT_MOP, DEFAULT_MOP.withReadPreference(ReadPreference.primary())); + assertEquals(ReadPreference.secondaryPreferred(), DEFAULT_MOP.withReadPreference(ReadPreference.secondaryPreferred()) + .getReadPreference()); + } + + @Test + public void withTimeout() { + assertEquals(DEFAULT_MOP, DEFAULT_MOP.withTimeout(TIMEOUT, TimeUnit.SECONDS)); + assertEquals(1000, DEFAULT_MOP.withTimeout(1000, TimeUnit.MILLISECONDS).getTimeoutMS()); + assertThrows(IllegalArgumentException.class, () -> DEFAULT_MOP.withTimeout(500, TimeUnit.NANOSECONDS)); + } + + @Test + public void withWriteConcern() { + assertEquals(DEFAULT_MOP, DEFAULT_MOP.withWriteConcern(WriteConcern.ACKNOWLEDGED)); + assertEquals(WriteConcern.MAJORITY, DEFAULT_MOP.withWriteConcern(WriteConcern.MAJORITY).getWriteConcern()); + } + +} diff --git a/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/TestHelper.java b/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/TestHelper.java new file mode 100644 index 00000000000..450536df2b8 --- /dev/null +++ b/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/TestHelper.java @@ -0,0 +1,297 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.reactivestreams.client.internal; + +import com.mongodb.MongoNamespace; +import com.mongodb.ReadConcern; +import com.mongodb.ReadPreference; +import com.mongodb.WriteConcern; +import com.mongodb.client.model.Collation; +import com.mongodb.internal.async.AsyncBatchCursor; +import com.mongodb.internal.async.SingleResultCallback; +import com.mongodb.internal.bulk.IndexRequest; +import com.mongodb.internal.bulk.WriteRequest; +import com.mongodb.internal.client.model.FindOptions; +import com.mongodb.internal.operation.ReadOperation; +import com.mongodb.internal.operation.WriteOperation; +import com.mongodb.lang.NonNull; +import com.mongodb.lang.Nullable; +import org.bson.BsonReader; +import org.bson.BsonWriter; +import org.bson.Document; +import org.bson.UuidRepresentation; +import org.bson.codecs.BsonValueCodecProvider; +import org.bson.codecs.Codec; +import org.bson.codecs.DecoderContext; +import org.bson.codecs.EncoderContext; +import org.bson.codecs.configuration.CodecRegistry; +import org.junit.jupiter.api.extension.ExtendWith; +import org.mockito.Mock; +import org.mockito.Mockito; +import org.mockito.junit.jupiter.MockitoExtension; +import org.reactivestreams.Publisher; +import reactor.core.Scannable; +import reactor.core.publisher.Mono; + +import java.lang.reflect.Field; +import java.lang.reflect.Method; +import java.lang.reflect.Modifier; +import java.util.Arrays; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.function.Function; + +import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS; +import static com.mongodb.reactivestreams.client.MongoClients.getDefaultCodecRegistry; +import static java.util.Collections.emptyList; +import static java.util.Collections.emptyMap; +import static java.util.stream.Collectors.toList; +import static java.util.stream.Collectors.toMap; +import static org.bson.codecs.configuration.CodecRegistries.fromProviders; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.mock; + +@SuppressWarnings("unchecked") +@ExtendWith(MockitoExtension.class) +public class TestHelper { + + @Mock + private AsyncBatchCursor batchCursor; + + TestHelper() { + } + + + static final MongoNamespace NAMESPACE = new MongoNamespace("db", "coll"); + static final Collation COLLATION = Collation.builder().locale("de").build(); + + static final MongoOperationPublisher OPERATION_PUBLISHER; + static final OperationExecutor OPERATION_EXECUTOR; + + static { + OperationExecutor executor = mock(OperationExecutor.class); + Mockito.lenient().doAnswer(invocation -> executor) + .when(executor).withTimeoutSettings(any()); + + Mockito.lenient().doAnswer(invocation -> Mono.empty()) + .when(executor) + .execute(any(), any(), any()); + Mockito.lenient().doAnswer(invocation -> Mono.empty()) + .when(executor) + .execute(any(), any(), any(), any()); + OPERATION_EXECUTOR = executor; + OPERATION_PUBLISHER = createMongoOperationPublisher(OPERATION_EXECUTOR); + } + + static final CodecRegistry BSON_CODEC_REGISTRY = fromProviders(new BsonValueCodecProvider()); + + static MongoOperationPublisher createMongoOperationPublisher(final OperationExecutor executor) { + return new MongoOperationPublisher<>(NAMESPACE, Document.class, + getDefaultCodecRegistry(), ReadPreference.primary(), ReadConcern.DEFAULT, + WriteConcern.ACKNOWLEDGED, true, true, + UuidRepresentation.STANDARD, null, TIMEOUT_SETTINGS, executor); + } + + + public static void assertOperationIsTheSameAs(@Nullable final Object expectedOperation, @Nullable final Object actualOperation) { + + if (expectedOperation instanceof ReadOperation) { + assertTrue(actualOperation instanceof ReadOperation, "Both async read operations"); + } else { + assertTrue(actualOperation instanceof WriteOperation, "Both async write operations"); + } + + Map expectedMap = getClassGetterValues(unwrapOperation(expectedOperation)); + Map actualMap = getClassGetterValues(unwrapOperation(actualOperation)); + assertEquals(expectedMap, actualMap); + } + + public static void assertPublisherIsTheSameAs(final Publisher expectedPublisher, final Publisher actualPublisher, + @Nullable final String message) { + Map> expectedMap = getClassPrivateFieldValues(getRootSource(expectedPublisher)); + Map> actualMap = getClassPrivateFieldValues(getRootSource(actualPublisher)); + assertEquals(expectedMap, actualMap, message); + } + + private static Object unwrapOperation(@Nullable final Object operation) { + assertTrue(operation instanceof ReadOperation || operation instanceof WriteOperation, + "Must be a read or write operation"); + if (operation instanceof MapReducePublisherImpl.WrappedMapReduceReadOperation) { + return ((MapReducePublisherImpl.WrappedMapReduceReadOperation) operation).getOperation(); + } else if (operation instanceof MapReducePublisherImpl.WrappedMapReduceWriteOperation) { + return ((MapReducePublisherImpl.WrappedMapReduceWriteOperation) operation).getOperation(); + } + return operation; + } + + @NonNull + private static Map getClassGetterValues(final Object instance) { + return Arrays.stream(instance.getClass().getMethods()) + .filter(n -> n.getParameterCount() == 0 && (n.getName().startsWith("get") || n.getName().startsWith("is"))) + .collect(toMap(Method::getName, n -> { + Object value = null; + try { + value = checkValueTypes(n.invoke(instance)); + } catch (Exception e) { + // Ignore value + } + return value != null ? value : "null"; + })); + } + + + private static Map> getClassPrivateFieldValues(@Nullable final Object instance) { + if (instance == null) { + return emptyMap(); + } + return Arrays.stream(instance.getClass().getDeclaredFields()) + .filter(field -> Modifier.isPrivate(field.getModifiers())) + .collect(toMap(Field::getName, field -> { + Optional value = Optional.empty(); + field.setAccessible(true); + try { + value = Optional.ofNullable(field.get(instance)); + } catch (IllegalAccessException e) { + // ignore + } + return value.map(TestHelper::checkValueTypes); + })); + } + + private static Object checkValueTypes(final Object instance) { + Object actual = instance instanceof Optional ? ((Optional) instance).orElse(instance) : instance; + if (actual instanceof ReadOperation || actual instanceof WriteOperation) { + return getClassPrivateFieldValues(actual); + } else if (actual.getClass().getSimpleName().equals("ChangeStreamDocumentCodec")) { + return getClassGetterValues(actual); + } else if (actual instanceof FindOptions) { + return getClassGetterValues(actual); + } else if (actual instanceof WriteRequest) { + return getClassGetterValues(actual); + } else if (actual instanceof IndexRequest) { + return getClassGetterValues(actual); + } else if (actual instanceof List && !((List) actual).isEmpty()) { + return ((List) actual).stream() + .map(TestHelper::checkValueTypes) + .collect(toList()); + } + return actual; + } + + private static Publisher getRootSource(final Publisher publisher) { + Publisher sourcePublisher = publisher; + // Uses reflection to find the root / source publisher + if (publisher instanceof Scannable) { + Scannable scannable = (Scannable) publisher; + List parents = scannable.parents().collect(toList()); + if (parents.isEmpty()) { + sourcePublisher = getSource(scannable).orElse(publisher); + } else { + sourcePublisher = parents.stream().map(TestHelper::getSource) + .filter(Optional::isPresent) + .reduce((first, second) -> second) + .flatMap(Function.identity()) + .orElse(publisher); + } + } + return unwrap(sourcePublisher); + } + + private static Optional> getSource(final Scannable scannable) { + Optional> optionalSource = getScannableSource(scannable); + if (optionalSource.isPresent()) { + return optionalSource; + } else { + return getScannableArray(scannable); + } + } + + private static Publisher unwrap(final Publisher maybeWrappingPublisher) { + if (maybeWrappingPublisher instanceof ListCollectionNamesPublisherImpl) { + return ((ListCollectionNamesPublisherImpl) maybeWrappingPublisher).getWrapped(); + } else { + return maybeWrappingPublisher; + } + } + + private static Optional> getScannableSource(final Scannable scannable) { + return (Optional>) getScannableFieldValue(scannable, "source"); + } + + private static Optional> getScannableArray(final Scannable scannable) { + return getScannableFieldValue(scannable, "array") + .flatMap((Function>>) o -> + Arrays.stream((Publisher[]) o).map(TestHelper::getRootSource) + .reduce((first, second) -> first)); + } + + private static Optional getScannableFieldValue(final Scannable scannable, final String fieldName) { + try { + Optional sourceField = Arrays.stream(scannable.getClass().getDeclaredFields()) + .filter(field -> field.getName().equals(fieldName)) + .findFirst(); + if (sourceField.isPresent()) { + sourceField.get().setAccessible(true); + return Optional.of(sourceField.get().get(scannable)); + } + return Optional.empty(); + } catch (Exception e) { + return Optional.empty(); + } + } + + TestOperationExecutor createOperationExecutor(final List responses) { + configureBatchCursor(); + return new TestOperationExecutor(responses); + } + + + void configureBatchCursor() { + AtomicBoolean isClosed = new AtomicBoolean(false); + Mockito.lenient().doAnswer(i -> isClosed.get()).when(getBatchCursor()).isClosed(); + Mockito.lenient().doAnswer(invocation -> { + isClosed.set(true); + invocation.getArgument(0, SingleResultCallback.class).onResult(emptyList(), null); + return null; + }).when(getBatchCursor()).next(any(SingleResultCallback.class)); + } + + public AsyncBatchCursor getBatchCursor() { + return batchCursor; + } + + public static class MyLongCodec implements Codec { + + @Override + public Long decode(final BsonReader reader, final DecoderContext decoderContext) { + return 42L; + } + + @Override + public void encode(final BsonWriter writer, final Long value, final EncoderContext encoderContext) { + } + + @Override + public Class getEncoderClass() { + return Long.class; + } + } +} diff --git a/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/TestOperationExecutor.java b/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/TestOperationExecutor.java new file mode 100644 index 00000000000..831d22b3080 --- /dev/null +++ b/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/TestOperationExecutor.java @@ -0,0 +1,109 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.reactivestreams.client.internal; + +import com.mongodb.ReadConcern; +import com.mongodb.ReadPreference; +import com.mongodb.internal.TimeoutSettings; +import com.mongodb.internal.operation.ReadOperation; +import com.mongodb.internal.operation.WriteOperation; +import com.mongodb.lang.Nullable; +import com.mongodb.reactivestreams.client.ClientSession; +import reactor.core.publisher.Mono; + +import java.util.ArrayList; +import java.util.List; + +@SuppressWarnings({ "rawtypes", "unchecked" }) +public class TestOperationExecutor implements OperationExecutor { + + private final List responses; + private final List clientSessions = new ArrayList<>(); + private final List readPreferences = new ArrayList<>(); + + private final List readOperations = new ArrayList<>(); + private final List writeOperations = new ArrayList<>(); + + public TestOperationExecutor(final List responses) { + this.responses = new ArrayList<>(responses); + } + + @Override + public Mono execute(final ReadOperation operation, final ReadPreference readPreference, final ReadConcern readConcern, + @Nullable final ClientSession session) { + readPreferences.add(readPreference); + clientSessions.add(session); + readOperations.add(operation); + return createMono(); + } + + + @Override + public Mono execute(final WriteOperation operation, final ReadConcern readConcern, + @Nullable final ClientSession session) { + clientSessions.add(session); + writeOperations.add(operation); + return createMono(); + } + + @Override + public OperationExecutor withTimeoutSettings(final TimeoutSettings timeoutSettings) { + return this; + } + + @Override + public TimeoutSettings getTimeoutSettings() { + throw new UnsupportedOperationException("Not supported"); + } + + Mono createMono() { + return Mono.create(sink -> { + Object response = responses.remove(0); + if (response instanceof Throwable) { + sink.error((Throwable) response); + } else { + if (response == null) { + sink.success(); + } else { + sink.success((T) response); + } + } + } + ); + } + + @Nullable + ClientSession getClientSession() { + return clientSessions.isEmpty() ? null : clientSessions.remove(0); + } + + @Nullable + ReadOperation getReadOperation() { + return readOperations.isEmpty() ? null : readOperations.remove(0); + } + + @Nullable + ReadPreference getReadPreference() { + return readPreferences.isEmpty() ? null : readPreferences.remove(0); + } + + @Nullable + WriteOperation getWriteOperation() { + return writeOperations.isEmpty() ? null : writeOperations.remove(0); + } + +} diff --git a/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/TimeoutHelperTest.java b/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/TimeoutHelperTest.java new file mode 100644 index 00000000000..01924c61f0e --- /dev/null +++ b/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/TimeoutHelperTest.java @@ -0,0 +1,233 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.reactivestreams.client.internal; + +import com.mongodb.MongoOperationTimeoutException; +import com.mongodb.internal.time.Timeout; +import com.mongodb.reactivestreams.client.MongoCollection; +import com.mongodb.reactivestreams.client.MongoDatabase; +import org.bson.Document; +import org.junit.jupiter.api.Test; + +import java.util.concurrent.TimeUnit; + +import static com.mongodb.internal.mockito.MongoMockito.mock; +import static com.mongodb.internal.time.Timeout.ZeroSemantics.ZERO_DURATION_MEANS_EXPIRED; +import static com.mongodb.reactivestreams.client.internal.TimeoutHelper.collectionWithTimeout; +import static com.mongodb.reactivestreams.client.internal.TimeoutHelper.collectionWithTimeoutDeferred; +import static com.mongodb.reactivestreams.client.internal.TimeoutHelper.collectionWithTimeoutMono; +import static com.mongodb.reactivestreams.client.internal.TimeoutHelper.databaseWithTimeout; +import static com.mongodb.reactivestreams.client.internal.TimeoutHelper.databaseWithTimeoutDeferred; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.mockito.ArgumentMatchers.anyLong; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.ArgumentMatchers.longThat; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.verifyNoInteractions; +import static org.mockito.Mockito.when; + +@SuppressWarnings("unchecked") +class TimeoutHelperTest { + + private static final String TIMEOUT_ERROR_MESSAGE = "message"; + private static final String DEFAULT_TIMEOUT_ERROR_MESSAGE = "Operation exceeded the timeout limit."; + + @Test + void shouldNotSetRemainingTimeoutOnCollectionWhenTimeoutIsNull() { + //given + MongoCollection collection = mock(MongoCollection.class); + + //when + MongoCollection result = collectionWithTimeout(collection, null); + MongoCollection monoResult = collectionWithTimeoutMono(collection, null).block(); + MongoCollection monoResultDeferred = collectionWithTimeoutDeferred(collection, null).block(); + + //then + assertEquals(collection, result); + assertEquals(collection, monoResult); + assertEquals(collection, monoResultDeferred); + } + + @Test + void shouldNotSetRemainingTimeoutDatabaseWhenTimeoutIsNull() { + //given + MongoDatabase database = mock(MongoDatabase.class); + + //when + MongoDatabase result = databaseWithTimeout(database, TIMEOUT_ERROR_MESSAGE, null); + MongoDatabase monoResultDeferred = databaseWithTimeoutDeferred(database, TIMEOUT_ERROR_MESSAGE, null).block(); + + //then + assertEquals(database, result); + assertEquals(database, monoResultDeferred); + } + + @Test + void shouldNotSetRemainingTimeoutOnCollectionWhenTimeoutIsInfinite() { + //given + MongoCollection collectionWithTimeout = mock(MongoCollection.class); + MongoCollection collection = mock(MongoCollection.class, mongoCollection -> { + when(mongoCollection.withTimeout(anyLong(), eq(TimeUnit.MILLISECONDS))).thenReturn(collectionWithTimeout); + }); + + //when + MongoCollection result = collectionWithTimeout(collection, Timeout.infinite()); + MongoCollection monoResult = collectionWithTimeoutMono(collection, Timeout.infinite()).block(); + MongoCollection monoResultDeferred = collectionWithTimeoutDeferred(collection, Timeout.infinite()).block(); + + //then + assertEquals(collectionWithTimeout, result); + assertEquals(collectionWithTimeout, monoResult); + assertEquals(collectionWithTimeout, monoResultDeferred); + verify(collection, times(3)) + .withTimeout(0L, TimeUnit.MILLISECONDS); + } + + @Test + void shouldNotSetRemainingTimeoutOnDatabaseWhenTimeoutIsInfinite() { + //given + MongoDatabase databaseWithTimeout = mock(MongoDatabase.class); + MongoDatabase database = mock(MongoDatabase.class, mongoDatabase -> { + when(mongoDatabase.withTimeout(anyLong(), eq(TimeUnit.MILLISECONDS))).thenReturn(databaseWithTimeout); + }); + + //when + MongoDatabase result = databaseWithTimeout(database, TIMEOUT_ERROR_MESSAGE, Timeout.infinite()); + MongoDatabase monoResultDeferred = databaseWithTimeoutDeferred(database, TIMEOUT_ERROR_MESSAGE, Timeout.infinite()).block(); + + //then + assertEquals(databaseWithTimeout, result); + assertEquals(databaseWithTimeout, monoResultDeferred); + verify(database, times(2)) + .withTimeout(0L, TimeUnit.MILLISECONDS); + } + + @Test + void shouldSetRemainingTimeoutOnCollectionWhenTimeout() { + //given + MongoCollection collectionWithTimeout = mock(MongoCollection.class); + MongoCollection collection = mock(MongoCollection.class, mongoCollection -> { + when(mongoCollection.withTimeout(anyLong(), eq(TimeUnit.MILLISECONDS))).thenReturn(collectionWithTimeout); + }); + Timeout timeout = Timeout.expiresIn(1, TimeUnit.DAYS, ZERO_DURATION_MEANS_EXPIRED); + + //when + MongoCollection result = collectionWithTimeout(collection, timeout); + MongoCollection monoResult = collectionWithTimeoutMono(collection, timeout).block(); + MongoCollection monoResultDeferred = collectionWithTimeoutDeferred(collection, timeout).block(); + + //then + verify(collection, times(3)) + .withTimeout(longThat(remaining -> remaining > 0), eq(TimeUnit.MILLISECONDS)); + assertEquals(collectionWithTimeout, result); + assertEquals(collectionWithTimeout, monoResult); + assertEquals(collectionWithTimeout, monoResultDeferred); + } + + @Test + void shouldSetRemainingTimeoutOnDatabaseWhenTimeout() { + //given + MongoDatabase databaseWithTimeout = mock(MongoDatabase.class); + MongoDatabase database = mock(MongoDatabase.class, mongoDatabase -> { + when(mongoDatabase.withTimeout(anyLong(), eq(TimeUnit.MILLISECONDS))).thenReturn(databaseWithTimeout); + }); + Timeout timeout = Timeout.expiresIn(1, TimeUnit.DAYS, ZERO_DURATION_MEANS_EXPIRED); + + //when + MongoDatabase result = databaseWithTimeout(database, TIMEOUT_ERROR_MESSAGE, timeout); + MongoDatabase monoResultDeferred = databaseWithTimeoutDeferred(database, TIMEOUT_ERROR_MESSAGE, timeout).block(); + + //then + verify(database, times(2)) + .withTimeout(longThat(remaining -> remaining > 0), eq(TimeUnit.MILLISECONDS)); + assertEquals(databaseWithTimeout, result); + assertEquals(databaseWithTimeout, monoResultDeferred); + } + + @Test + void shouldThrowErrorWhenTimeoutHasExpiredOnCollection() { + //given + MongoCollection collection = mock(MongoCollection.class); + Timeout timeout = Timeout.expiresIn(1, TimeUnit.MICROSECONDS, ZERO_DURATION_MEANS_EXPIRED); + + //when + MongoOperationTimeoutException mongoExecutionTimeoutException = + assertThrows(MongoOperationTimeoutException.class, () -> collectionWithTimeout(collection, timeout)); + MongoOperationTimeoutException mongoExecutionTimeoutExceptionMono = + assertThrows(MongoOperationTimeoutException.class, () -> collectionWithTimeoutMono(collection, timeout).block()); + MongoOperationTimeoutException mongoExecutionTimeoutExceptionDeferred = + assertThrows(MongoOperationTimeoutException.class, () -> collectionWithTimeoutDeferred(collection, timeout).block()); + + //then + assertEquals(DEFAULT_TIMEOUT_ERROR_MESSAGE, mongoExecutionTimeoutExceptionMono.getMessage()); + assertEquals(DEFAULT_TIMEOUT_ERROR_MESSAGE, mongoExecutionTimeoutException.getMessage()); + assertEquals(DEFAULT_TIMEOUT_ERROR_MESSAGE, mongoExecutionTimeoutExceptionDeferred.getMessage()); + verifyNoInteractions(collection); + } + + @Test + void shouldThrowErrorWhenTimeoutHasExpiredOnDatabase() { + //given + MongoDatabase database = mock(MongoDatabase.class); + Timeout timeout = Timeout.expiresIn(1, TimeUnit.MICROSECONDS, ZERO_DURATION_MEANS_EXPIRED); + + //when + MongoOperationTimeoutException mongoExecutionTimeoutException = + assertThrows(MongoOperationTimeoutException.class, () -> databaseWithTimeout(database, TIMEOUT_ERROR_MESSAGE, timeout)); + MongoOperationTimeoutException mongoExecutionTimeoutExceptionDeferred = + assertThrows(MongoOperationTimeoutException.class, + () -> databaseWithTimeoutDeferred(database, TIMEOUT_ERROR_MESSAGE, timeout) + .block()); + + //then + assertEquals(TIMEOUT_ERROR_MESSAGE, mongoExecutionTimeoutException.getMessage()); + assertEquals(TIMEOUT_ERROR_MESSAGE, mongoExecutionTimeoutExceptionDeferred.getMessage()); + verifyNoInteractions(database); + } + + @Test + void shouldThrowErrorWhenTimeoutHasExpiredWithZeroRemainingOnCollection() { + //given + MongoCollection collection = mock(MongoCollection.class); + Timeout timeout = Timeout.expiresIn(0, TimeUnit.NANOSECONDS, ZERO_DURATION_MEANS_EXPIRED); + + //when + assertThrows(MongoOperationTimeoutException.class, () -> collectionWithTimeout(collection, timeout)); + assertThrows(MongoOperationTimeoutException.class, () -> collectionWithTimeoutMono(collection, timeout).block()); + assertThrows(MongoOperationTimeoutException.class, () -> collectionWithTimeoutDeferred(collection, timeout).block()); + + //then + + } + + @Test + void shouldThrowErrorWhenTimeoutHasExpiredWithZeroRemainingOnDatabase() { + //given + MongoDatabase database = mock(MongoDatabase.class); + Timeout timeout = Timeout.expiresIn(0, TimeUnit.NANOSECONDS, ZERO_DURATION_MEANS_EXPIRED); + + //when + assertThrows(MongoOperationTimeoutException.class, () -> databaseWithTimeout(database, TIMEOUT_ERROR_MESSAGE, timeout)); + assertThrows(MongoOperationTimeoutException.class, + () -> databaseWithTimeoutDeferred(database, TIMEOUT_ERROR_MESSAGE, timeout).block()); + + //then + verifyNoInteractions(database); + } +} diff --git a/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/gridfs/GridFSUploadPublisherImplTest.java b/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/gridfs/GridFSUploadPublisherImplTest.java new file mode 100644 index 00000000000..38d19647fd7 --- /dev/null +++ b/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/gridfs/GridFSUploadPublisherImplTest.java @@ -0,0 +1,144 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.reactivestreams.client.internal.gridfs; + +import com.mongodb.ClusterFixture; +import com.mongodb.MongoClientSettings; +import com.mongodb.MongoOperationTimeoutException; +import com.mongodb.ReadConcern; +import com.mongodb.ReadPreference; +import com.mongodb.WriteConcern; +import com.mongodb.client.Fixture; +import com.mongodb.client.test.CollectionHelper; +import com.mongodb.event.CommandEvent; +import com.mongodb.internal.connection.TestCommandListener; +import com.mongodb.reactivestreams.client.TestEventPublisher; +import com.mongodb.reactivestreams.client.MongoClient; +import com.mongodb.reactivestreams.client.MongoClients; +import com.mongodb.reactivestreams.client.MongoDatabase; +import com.mongodb.reactivestreams.client.TestSubscriber; +import com.mongodb.reactivestreams.client.gridfs.GridFSBucket; +import com.mongodb.reactivestreams.client.gridfs.GridFSBuckets; +import org.bson.types.ObjectId; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.reactivestreams.Subscription; +import reactor.core.publisher.Flux; + +import java.nio.ByteBuffer; +import java.util.List; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.TimeUnit; + +import static com.mongodb.ClusterFixture.TIMEOUT_DURATION; +import static com.mongodb.ClusterFixture.serverVersionAtLeast; +import static com.mongodb.client.Fixture.getDefaultDatabaseName; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotEquals; +import static org.junit.jupiter.api.Assumptions.assumeTrue; + + +class GridFSUploadPublisherTest { + private static final String GRID_FS_BUCKET_NAME = "db.fs"; + private TestCommandListener commandListener; + + protected MongoClientSettings.Builder getMongoClientSettingsBuilder() { + commandListener.reset(); + return Fixture.getMongoClientSettingsBuilder() + .readConcern(ReadConcern.MAJORITY) + .writeConcern(WriteConcern.MAJORITY) + .readPreference(ReadPreference.primary()) + .addCommandListener(commandListener); + } + + @Test + void shouldTimeoutWhenSourcePublisherCompletionExceedsOverallOperationTimeout() { + assumeTrue(serverVersionAtLeast(4, 4)); + long rtt = ClusterFixture.getPrimaryRTT(); + + //given + try (MongoClient client = MongoClients.create(getMongoClientSettingsBuilder() + .timeout(rtt + 800, TimeUnit.MILLISECONDS).build())) { + MongoDatabase database = client.getDatabase(getDefaultDatabaseName()); + GridFSBucket gridFsBucket = GridFSBuckets.create(database, GRID_FS_BUCKET_NAME); + + TestEventPublisher eventPublisher = new TestEventPublisher<>(); + TestSubscriber testSubscriber = new TestSubscriber<>(); + + //when + gridFsBucket.uploadFromPublisher("filename", eventPublisher.getEventStream()) + .subscribe(testSubscriber); + testSubscriber.requestMore(1); + + //then + testSubscriber.assertTerminalEvent(); + + List onErrorEvents = testSubscriber.getOnErrorEvents(); + assertEquals(1, onErrorEvents.size()); + + Throwable throwable = onErrorEvents.get(0); + assertEquals(MongoOperationTimeoutException.class, throwable.getClass()); + assertEquals("GridFS waiting for data from the source Publisher exceeded the timeout limit.", throwable.getMessage()); + + //assert no chunk has been inserted as we have not sent any data from source publisher. + for (CommandEvent event : commandListener.getEvents()) { + assertNotEquals("insert", event.getCommandName()); + } + } + } + + @Test + void shouldCancelSubscriptionToSourceWhenOperationTimeoutOccurs() throws Exception { + assumeTrue(serverVersionAtLeast(4, 4)); + long rtt = ClusterFixture.getPrimaryRTT(); + + //given + try (MongoClient client = MongoClients.create(getMongoClientSettingsBuilder() + .timeout(rtt + 1000, TimeUnit.MILLISECONDS).build())) { + MongoDatabase database = client.getDatabase(getDefaultDatabaseName()); + GridFSBucket gridFsBucket = GridFSBuckets.create(database, GRID_FS_BUCKET_NAME); + + TestEventPublisher testEventPublisher = new TestEventPublisher<>(); + CompletableFuture subscriptionSignal = new CompletableFuture<>(); + Flux eventStream = testEventPublisher.getEventStream().doOnSubscribe(subscriptionSignal::complete); + TestSubscriber testSubscriber = new TestSubscriber<>(); + + //when + gridFsBucket.uploadFromPublisher("filename", eventStream) + .subscribe(testSubscriber); + testSubscriber.requestMore(1); + + //then + subscriptionSignal.get(TIMEOUT_DURATION.toMillis(), TimeUnit.MILLISECONDS); + assertEquals(1, testEventPublisher.currentSubscriberCount()); + //We wait for timeout to occur here + testSubscriber.assertTerminalEvent(); + assertEquals(0, testEventPublisher.currentSubscriberCount()); + } + } + + @BeforeEach + public void setUp() { + commandListener = new TestCommandListener(); + } + + @AfterEach + public void tearDown() { + CollectionHelper.dropDatabase(getDefaultDatabaseName()); + } +} diff --git a/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/gridfs/ResizingByteBufferFluxTest.java b/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/gridfs/ResizingByteBufferFluxTest.java new file mode 100644 index 00000000000..69f26960653 --- /dev/null +++ b/driver-reactive-streams/src/test/unit/com/mongodb/reactivestreams/client/internal/gridfs/ResizingByteBufferFluxTest.java @@ -0,0 +1,149 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.reactivestreams.client.internal.gridfs; + +import org.junit.jupiter.api.DisplayName; +import org.junit.jupiter.api.DynamicTest; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.TestFactory; +import reactor.core.publisher.Flux; +import reactor.test.StepVerifier; + +import java.nio.Buffer; +import java.nio.ByteBuffer; +import java.time.Duration; +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.function.Function; +import java.util.stream.Collectors; +import java.util.stream.IntStream; + +import static java.util.Arrays.asList; +import static org.junit.jupiter.api.Assertions.assertIterableEquals; +import static org.junit.jupiter.api.DynamicTest.dynamicTest; + +public class ResizingByteBufferFluxTest { + + private static final String TEST_STRING = String.join("", + asList("foobar", "foo", "bar", "baz", "qux", "quux", "quuz", "corge", "grault", "garply", + "waldo", "fred", "plugh", "xyzzy", "thud")); + + @TestFactory + @DisplayName("test that the resizing publisher produces the expected results") + List testResizingByteBufferPublisher() { + List dynamicTests = new ArrayList<>(); + int maxByteBufferSize = 10; + IntStream.rangeClosed(1, maxByteBufferSize).boxed().forEach(sourceSizes -> { + int outputSizes = 1 + maxByteBufferSize - sourceSizes; + dynamicTests.add( + dynamicTest("Resizing from byteBuffers of: " + sourceSizes + " to chunks of: " + outputSizes, () -> { + Flux source = Flux.fromIterable(splitStringIntoChunks(TEST_STRING, sourceSizes)) + .map(STRING_BYTE_BUFFER_FUNCTION); + Flux output = new ResizingByteBufferFlux(source, outputSizes).map(BYTE_BUFFER_STRING_FUNCTION); + assertIterableEquals(splitStringIntoChunks(TEST_STRING, outputSizes), output.toIterable()); + })); + }); + return dynamicTests; + } + + @Test + public void testAndVerifyResizingByteBufferPublisher() { + List internalRequests = new ArrayList<>(); + Flux internal = Flux.fromIterable(asList("fo", "ob", "ar", "foo", "bar", "ba", "z")) + .map(STRING_BYTE_BUFFER_FUNCTION) + .doOnRequest(internalRequests::add); + Flux publisher = new ResizingByteBufferFlux(internal, 3); + + Duration waitDuration = Duration.ofMillis(200); + StepVerifier.create(publisher, 0) + .expectSubscription() + .expectNoEvent(waitDuration) + .thenRequest(1) + .expectNext(STRING_BYTE_BUFFER_FUNCTION.apply("foo")) + .expectNoEvent(waitDuration) + .thenRequest(1) + .expectNext(STRING_BYTE_BUFFER_FUNCTION.apply("bar")) + .expectNoEvent(waitDuration) + .thenRequest(1) + .expectNext(STRING_BYTE_BUFFER_FUNCTION.apply("foo")) + .expectNoEvent(waitDuration) + .thenRequest(1) + .expectNext(STRING_BYTE_BUFFER_FUNCTION.apply("bar")) + .expectNoEvent(waitDuration) + .thenRequest(1) + .expectNext(STRING_BYTE_BUFFER_FUNCTION.apply("baz")) + .expectComplete() + .verify(); + + assertIterableEquals(asList(1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L), internalRequests); + } + + @Test + public void testDirectHeapByteBuffer() { + Flux input = Flux.fromIterable(splitStringIntoChunks(TEST_STRING, 7)) + .map(STRING_BYTE_BUFFER_FUNCTION) + .map(ByteBuffer::asReadOnlyBuffer); + Flux resized = new ResizingByteBufferFlux(input, 10).map(BYTE_BUFFER_STRING_FUNCTION); + assertIterableEquals(splitStringIntoChunks(TEST_STRING, 10), resized.toIterable()); + } + + @Test + public void testErrorsAreSignalled() { + List internalRequests = new ArrayList<>(); + Flux internal = Flux.fromIterable(asList("fo", "ob", "ar")) + .map(STRING_BYTE_BUFFER_FUNCTION) + .map(i -> { + if (internalRequests.size() > 2) { + throw new RuntimeException("Upstream error"); + } + return i; + }) + .doOnRequest(internalRequests::add); + Flux publisher = new ResizingByteBufferFlux(internal, 3); + + StepVerifier.create(publisher, 0) + .expectSubscription() + .thenRequest(1) + .expectNext(STRING_BYTE_BUFFER_FUNCTION.apply("foo")) + .thenRequest(1) + .expectErrorMessage("Upstream error") + .verify(); + } + + private Collection splitStringIntoChunks(final String original, final int chunkSize) { + AtomicInteger splitCounter = new AtomicInteger(0); + return original + .chars() + .mapToObj(_char -> String.valueOf((char) _char)) + .collect(Collectors.groupingBy(stringChar -> splitCounter.getAndIncrement() / chunkSize, + Collectors.joining())) + .values(); + } + + static final Function BYTE_BUFFER_STRING_FUNCTION = bb -> { + ((Buffer) bb).mark(); + byte[] arr = new byte[bb.remaining()]; + bb.get(arr); + ((Buffer) bb).reset(); + return new String(arr); + }; + + static final Function STRING_BYTE_BUFFER_FUNCTION = s -> ByteBuffer.wrap(s.getBytes()); + +} diff --git a/driver-scala/build.gradle.kts b/driver-scala/build.gradle.kts new file mode 100644 index 00000000000..68187889629 --- /dev/null +++ b/driver-scala/build.gradle.kts @@ -0,0 +1,61 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +import ProjectExtensions.configureJarManifest +import ProjectExtensions.configureMavenPublication +import ProjectExtensions.scalaVersion + +plugins { id("project.scala") } + +base.archivesName.set("mongo-scala-driver") + +val scalaVersion: String = project.scalaVersion() + +dependencies { + api(project(path = ":bson-scala", configuration = "default")) + api(project(path = ":driver-reactive-streams", configuration = "default")) + compileOnly(libs.findbugs.jsr) + + testImplementation(project(path = ":driver-sync", configuration = "default")) + testImplementation(project(path = ":bson", configuration = "testArtifacts")) + testImplementation(project(path = ":driver-core", configuration = "testArtifacts")) + testImplementation(project(path = ":driver-sync", configuration = "testArtifacts")) + testImplementation(project(path = ":driver-reactive-streams", configuration = "testArtifacts")) + + // Encryption testing + integrationTestImplementation(project(path = ":mongodb-crypt", configuration = "default")) +} + +configureMavenPublication { + pom { + name.set("Mongo Scala Driver") + description.set("A Scala wrapper of the MongoDB Reactive Streams Java driver") + } +} + +configureJarManifest { + attributes["Automatic-Module-Name"] = "org.mongodb.driver.scala" + attributes["Bundle-SymbolicName"] = "org.mongodb.scala.mongo-scala-driver" + attributes["Import-Package"] = "!scala.*,*" +} + +// =================== +// Scala docs +// =================== +tasks.withType().forEach { + // Include bson-scala source for main scaladoc + project(":bson-scala").tasks.withType().forEach { bsonScala -> it.source += bsonScala.source } + it.scalaDocOptions.additionalParameters = listOf("-doc-root-content", "${project.rootDir}/driver-scala/rootdoc.txt") +} diff --git a/driver-scala/rootdoc.txt b/driver-scala/rootdoc.txt new file mode 100644 index 00000000000..a0fde8c9708 --- /dev/null +++ b/driver-scala/rootdoc.txt @@ -0,0 +1,14 @@ +This is the documentation for the MongoDB Scala driver. + +== Driver structure == + +The [[http://mongodb.org mongodb]] scala driver. + +To get started you need a [[org.mongodb.scala.MongoClient MongoClient]] instance, either from a +[[https://www.mongodb.com/docs/manual/reference/connection-string/ connection string]] or via a [[org.mongodb.scala.MongoClientSettings]]. + +Notable packages include: + + - [[org.mongodb.scala.MongoClient MongoClient]] The MongoClient used to connect and access MongoDB + - [[org.mongodb.scala.MongoDatabase MongoDatabase]] Providing access to a specific database + - [[org.mongodb.scala.MongoCollection MongoCollection]] Providing access to a specific collection in a database. diff --git a/driver-scala/src/integrationTest/scala/org/mongodb/scala/BaseSpec.scala b/driver-scala/src/integrationTest/scala/org/mongodb/scala/BaseSpec.scala new file mode 100644 index 00000000000..9d59b8f55e6 --- /dev/null +++ b/driver-scala/src/integrationTest/scala/org/mongodb/scala/BaseSpec.scala @@ -0,0 +1,21 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.mongodb.scala + +import org.scalatest.flatspec.AnyFlatSpec +import org.scalatest.matchers.should.Matchers + +abstract class BaseSpec extends AnyFlatSpec with Matchers {} diff --git a/driver-scala/src/integrationTest/scala/org/mongodb/scala/ClientSideEncryptionBypassAutoEncryptionSpec.scala b/driver-scala/src/integrationTest/scala/org/mongodb/scala/ClientSideEncryptionBypassAutoEncryptionSpec.scala new file mode 100644 index 00000000000..0093c2041c7 --- /dev/null +++ b/driver-scala/src/integrationTest/scala/org/mongodb/scala/ClientSideEncryptionBypassAutoEncryptionSpec.scala @@ -0,0 +1,89 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala + +import java.security.SecureRandom + +import org.mongodb.scala.MongoClient.DEFAULT_CODEC_REGISTRY +import org.mongodb.scala.bson.BsonString +import org.mongodb.scala.model.vault.{ DataKeyOptions, EncryptOptions } +import org.mongodb.scala.vault.ClientEncryptions + +import scala.collection.JavaConverters._ + +class ClientSideEncryptionBypassAutoEncryptionSpec extends RequiresMongoDBISpec with FuturesSpec { + + "ClientSideEncryption" should "be able to bypass auto encryption" in withDatabase { db => + assume(serverVersionAtLeast(List(4, 1, 0))) + + val localMasterKey = new Array[Byte](96) + new SecureRandom().nextBytes(localMasterKey) + + val kmsProviders = Map("local" -> Map[String, AnyRef]("key" -> localMasterKey).asJava).asJava + + val keyVaultNamespace: MongoNamespace = new MongoNamespace(databaseName, "testKeyVault") + + db.drop().futureValue + + val clientEncryptionSettings: ClientEncryptionSettings = ClientEncryptionSettings + .builder() + .keyVaultMongoClientSettings(mongoClientSettings) + .keyVaultNamespace(keyVaultNamespace.getFullName) + .kmsProviders(kmsProviders) + .build() + + val clientEncryption = ClientEncryptions.create(clientEncryptionSettings) + + val autoEncryptionSettings: AutoEncryptionSettings = AutoEncryptionSettings + .builder() + .keyVaultNamespace(keyVaultNamespace.getFullName) + .kmsProviders(kmsProviders) + .bypassAutoEncryption(true) + .build() + + val clientSettings: MongoClientSettings = mongoClientSettingsBuilder + .autoEncryptionSettings(autoEncryptionSettings) + .codecRegistry(DEFAULT_CODEC_REGISTRY) + .build + + withTempClient( + clientSettings, + clientEncrypted => { + + val fieldValue = BsonString("123456789") + + val dataKeyId = clientEncryption.createDataKey("local", DataKeyOptions()).head().futureValue + + val encryptedFieldValue = clientEncryption + .encrypt(fieldValue, EncryptOptions("AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic").keyId(dataKeyId)) + .head() + .futureValue + + val collection: MongoCollection[Document] = + clientEncrypted.getDatabase(databaseName).getCollection[Document]("test") + + collection.insertOne(Document("encryptedField" -> encryptedFieldValue)).futureValue + + val result = collection.find().first().head().futureValue + + result.get[BsonString]("encryptedField") should equal(Some(fieldValue)) + + } + ) + } + +} diff --git a/driver-scala/src/integrationTest/scala/org/mongodb/scala/FuturesSpec.scala b/driver-scala/src/integrationTest/scala/org/mongodb/scala/FuturesSpec.scala new file mode 100644 index 00000000000..9c7621103de --- /dev/null +++ b/driver-scala/src/integrationTest/scala/org/mongodb/scala/FuturesSpec.scala @@ -0,0 +1,41 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala + +import org.scalatest.concurrent.ScalaFutures +import org.scalatest.time.{ Millis, Seconds, Span } + +import scala.concurrent.Future +import scala.language.implicitConversions + +trait FuturesSpec extends ScalaFutures { + + implicit val defaultPatience: PatienceConfig = PatienceConfig(timeout = Span(60, Seconds), interval = Span(5, Millis)) + + implicit def observableToFuture[T](observable: Observable[T]): Future[Seq[T]] = + observable.collect().toFuture() + + implicit def singleObservableToFuture[T](observable: SingleObservable[T]): Future[T] = + observable.toFuture() + + implicit def observableToFutureConcept[T](observable: Observable[T]): FutureConcept[Seq[T]] = + convertScalaFuture(observable.collect().toFuture()) + + implicit def singleObservableToFutureConcept[T](observable: SingleObservable[T]): FutureConcept[T] = + convertScalaFuture(observable.toFuture()) + +} diff --git a/driver-scala/src/integrationTest/scala/org/mongodb/scala/MongoCollectionCaseClassSpec.scala b/driver-scala/src/integrationTest/scala/org/mongodb/scala/MongoCollectionCaseClassSpec.scala new file mode 100644 index 00000000000..bc8e98b726e --- /dev/null +++ b/driver-scala/src/integrationTest/scala/org/mongodb/scala/MongoCollectionCaseClassSpec.scala @@ -0,0 +1,88 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala + +import org.mongodb.scala.bson.codecs.Macros.createCodecProvider +import org.bson.codecs.configuration.CodecRegistries.{ fromProviders, fromRegistries } +import org.bson.codecs.configuration.CodecRegistry + +class MongoCollectionCaseClassSpec extends RequiresMongoDBISpec with FuturesSpec { + + case class Contact(phone: String) + case class User(_id: Int, username: String, age: Int, hobbies: List[String], contacts: List[Contact]) + case class Optional(_id: Int, optional: Option[Int]) + + val codecRegistry: CodecRegistry = fromRegistries( + fromProviders(classOf[User], classOf[Contact], classOf[Optional]), + MongoClient.DEFAULT_CODEC_REGISTRY + ) + + "The Scala driver" should "handle case classes" in withDatabase(databaseName) { database => + val collection = database.getCollection[User](collectionName).withCodecRegistry(codecRegistry) + + val user = User( + _id = 1, + age = 30, + username = "Bob", + hobbies = List[String]("hiking", "music"), + contacts = List(Contact("123 12314"), Contact("234 234234")) + ) + collection.insertOne(user).futureValue + + info("The collection should have the expected document") + val expectedDocument = Document("""{_id: 1, age: 30, username: "Bob", hobbies: ["hiking", "music"], + | contacts: [{phone: "123 12314"}, {phone: "234 234234"}]}""".stripMargin) + collection.find[Document]().first().futureValue should equal(expectedDocument) + + info("The collection should find and return the user") + collection.find().first().futureValue should equal(user) + } + + it should "handle optional values" in withDatabase(databaseName) { database => + val collection = database.getCollection[Optional](collectionName).withCodecRegistry(codecRegistry) + + val none = Optional(_id = 1, None) + collection.insertOne(none).futureValue + + info("The collection should have the expected document") + val expectedDocument = Document("{_id: 1, optional: null}") + collection.find[Document]().first().futureValue should equal(expectedDocument) + + info("The collection should find and return the optional") + collection.find().first().futureValue should equal(none) + + collection.drop().futureValue + + val some = Optional(_id = 1, Some(1)) + collection.insertOne(some).futureValue + + info("The collection should find and return the optional") + collection.find().first().futureValue should equal(some) + } + + it should "handle converting to case classes where there is extra data" in withDatabase(databaseName) { database => + val collection = database.getCollection[Contact](collectionName).withCodecRegistry(codecRegistry) + + database + .getCollection(collectionName) + .insertOne(Document("""{_id: 5, phone: "555 232323", active: true}""")) + .futureValue + val contact = Contact("555 232323") + collection.find().first().futureValue should equal(contact) + } + +} diff --git a/driver-scala/src/integrationTest/scala/org/mongodb/scala/RequiresMongoDBISpec.scala b/driver-scala/src/integrationTest/scala/org/mongodb/scala/RequiresMongoDBISpec.scala new file mode 100644 index 00000000000..6a886ac157b --- /dev/null +++ b/driver-scala/src/integrationTest/scala/org/mongodb/scala/RequiresMongoDBISpec.scala @@ -0,0 +1,159 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala + +import com.mongodb.connection.ServerVersion +import org.mongodb.scala.bson.BsonString +import org.scalatest._ + +import scala.collection.JavaConverters._ +import scala.concurrent.duration.{ Duration, _ } +import scala.concurrent.{ Await, ExecutionContext } + +trait RequiresMongoDBISpec extends BaseSpec with BeforeAndAfterAll { + + implicit val ec: ExecutionContext = ExecutionContext.Implicits.global + + val WAIT_DURATION: Duration = 60.seconds + private val DB_PREFIX = "mongo-scala-" + private var _currentTestName: Option[String] = None + + protected override def runTest(testName: String, args: Args): Status = { + _currentTestName = Some(testName.split("should")(1)) + super.runTest(testName, args) + } + + /** + * The database name to use for this test + */ + def databaseName: String = DB_PREFIX + suiteName + + /** + * The collection name to use for this test + */ + def collectionName: String = _currentTestName.getOrElse(suiteName).filter(_.isLetterOrDigit) + + def mongoClientSettingsBuilder: MongoClientSettings.Builder = TestMongoClientHelper.mongoClientSettingsBuilder + + val mongoClientSettings: MongoClientSettings = TestMongoClientHelper.mongoClientSettings + + def mongoClient(): MongoClient = TestMongoClientHelper.mongoClient + + def checkMongoDB(): Unit = { + if (!TestMongoClientHelper.isMongoDBOnline) { + cancel("No Available Database") + } + } + + def withTempClient(mongoClientSettings: MongoClientSettings, testCode: MongoClient => Any): Unit = { + val client = MongoClient(mongoClientSettings) + try { + testCode(client) + } finally { + client.close() + } + } + + def withClient(testCode: MongoClient => Any): Unit = { + checkMongoDB() + testCode(TestMongoClientHelper.mongoClient) // loan the client + } + + def withDatabase(dbName: String)(testCode: MongoDatabase => Any): Unit = { + withClient { client => + val databaseName = + if (dbName.startsWith(DB_PREFIX)) dbName.take(63) else s"$DB_PREFIX$dbName".take(63) // scalastyle:ignore + val mongoDatabase = client.getDatabase(databaseName) + try testCode(mongoDatabase) // "loan" the fixture to the test + finally { + // clean up the fixture + Await.result(mongoDatabase.drop().toFuture(), WAIT_DURATION) + } + } + } + + def withDatabase(testCode: MongoDatabase => Any): Unit = withDatabase(databaseName)(testCode: MongoDatabase => Any) + + def withCollection(testCode: MongoCollection[Document] => Any): Unit = { + withDatabase(databaseName) { mongoDatabase => + val mongoCollection = mongoDatabase.getCollection(collectionName) + try testCode(mongoCollection) // "loan" the fixture to the test + finally { + // clean up the fixture + Await.result(mongoCollection.drop().toFuture(), WAIT_DURATION) + } + } + } + + lazy val isSharded: Boolean = + if (!TestMongoClientHelper.isMongoDBOnline) { + false + } else { + Await + .result( + mongoClient().getDatabase("admin").runCommand(Document("isMaster" -> 1)).toFuture(), + WAIT_DURATION + ) + .getOrElse("msg", BsonString("")) + .asString() + .getValue == "isdbgrid" + } + + lazy val buildInfo: Document = { + if (TestMongoClientHelper.isMongoDBOnline) { + Await.result( + mongoClient().getDatabase("admin").runCommand(Document("buildInfo" -> 1)).toFuture(), + WAIT_DURATION + ) + } else { + Document() + } + } + + def serverVersionAtLeast(minServerVersion: List[Int]): Boolean = { + buildInfo.get[BsonString]("version") match { + case Some(version) => + val serverVersion = version.getValue.split("\\D+").map(_.toInt).padTo(3, 0).take(3).toList.asJava + new ServerVersion(serverVersion.asInstanceOf[java.util.List[Integer]]) + .compareTo(new ServerVersion(minServerVersion.asJava.asInstanceOf[java.util.List[Integer]])) >= 0 + case None => false + } + } + + def serverVersionLessThan(maxServerVersion: List[Int]): Boolean = { + buildInfo.get[BsonString]("version") match { + case Some(version) => + val serverVersion = version.getValue.split("\\D+").map(_.toInt).padTo(3, 0).take(3).toList.asJava + new ServerVersion(serverVersion.asInstanceOf[java.util.List[Integer]]) + .compareTo(new ServerVersion(maxServerVersion.asJava.asInstanceOf[java.util.List[Integer]])) < 0 + case None => false + } + } + + override def beforeAll() { + if (TestMongoClientHelper.isMongoDBOnline) { + Await.result(TestMongoClientHelper.mongoClient.getDatabase(databaseName).drop().toFuture(), WAIT_DURATION) + } + } + + override def afterAll() { + if (TestMongoClientHelper.isMongoDBOnline) { + Await.result(TestMongoClientHelper.mongoClient.getDatabase(databaseName).drop().toFuture(), WAIT_DURATION) + } + } + +} diff --git a/driver-scala/src/integrationTest/scala/org/mongodb/scala/TestMongoClientHelper.scala b/driver-scala/src/integrationTest/scala/org/mongodb/scala/TestMongoClientHelper.scala new file mode 100644 index 00000000000..fb7a065550c --- /dev/null +++ b/driver-scala/src/integrationTest/scala/org/mongodb/scala/TestMongoClientHelper.scala @@ -0,0 +1,62 @@ +/* + * Copyright (c) 2008 - 2013 10gen, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package org.mongodb.scala + +import com.mongodb.ClusterFixture.getServerApi +import org.mongodb.scala.syncadapter.WAIT_DURATION + +import scala.concurrent.Await +import scala.util.{ Properties, Try } + +object TestMongoClientHelper { + private val DEFAULT_URI: String = "mongodb://localhost:27017/" + private val MONGODB_URI_SYSTEM_PROPERTY_NAME: String = "org.mongodb.test.uri" + + val mongoClientURI: String = { + val uri = Properties.propOrElse(MONGODB_URI_SYSTEM_PROPERTY_NAME, DEFAULT_URI) + if (!uri.codePoints().allMatch((cp: Int) => Character.isWhitespace(cp))) uri else DEFAULT_URI + } + val connectionString: ConnectionString = ConnectionString(mongoClientURI) + + def mongoClientSettingsBuilder: MongoClientSettings.Builder = { + val builder = MongoClientSettings.builder().applyConnectionString(connectionString) + if (getServerApi != null) { + builder.serverApi(getServerApi) + } + builder + } + + val mongoClientSettings: MongoClientSettings = mongoClientSettingsBuilder.build() + val mongoClient: MongoClient = MongoClient(mongoClientSettings) + + def isMongoDBOnline: Boolean = { + Try(Await.result(TestMongoClientHelper.mongoClient.listDatabaseNames().toFuture(), WAIT_DURATION)).isSuccess + } + + def hasSingleHost: Boolean = { + TestMongoClientHelper.connectionString.getHosts.size() == 1 + } + + Runtime.getRuntime.addShutdownHook(new ShutdownHook()) + + private[mongodb] class ShutdownHook extends Thread { + override def run() { + mongoClient.close() + } + } +} diff --git a/driver-scala/src/integrationTest/scala/org/mongodb/scala/documentation/DocumentationChangeStreamExampleSpec.scala b/driver-scala/src/integrationTest/scala/org/mongodb/scala/documentation/DocumentationChangeStreamExampleSpec.scala new file mode 100644 index 00000000000..32dfb221c5a --- /dev/null +++ b/driver-scala/src/integrationTest/scala/org/mongodb/scala/documentation/DocumentationChangeStreamExampleSpec.scala @@ -0,0 +1,207 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala.documentation + +import java.util.concurrent.CountDownLatch +import java.util.concurrent.TimeUnit.MINUTES + +import com.mongodb.client.model.changestream.FullDocument +import org.mongodb.scala.model.changestream.ChangeStreamDocument +import org.mongodb.scala.model.{ Aggregates, Filters, Updates } +import org.mongodb.scala.{ + ChangeStreamObservable, + Document, + FuturesSpec, + MongoDatabase, + MongoTimeoutException, + Observable, + Observer, + RequiresMongoDBISpec, + SingleObservable, + Subscription +} + +import scala.collection.mutable +import scala.concurrent.{ Await, ExecutionContext } + +//scalastyle:off magic.number regex +class DocumentationChangeStreamExampleSpec extends RequiresMongoDBISpec with FuturesSpec { + + "The Scala driver" should "be able to use $changeStreams" in withDatabase { database: MongoDatabase => + assume(false) // Don't run in tests + + database.drop().execute() + database.createCollection(collectionName).execute() + val collection = database.getCollection(collectionName) + + /* + * Example 1 + * Create a simple change stream against an existing collection. + */ + println("1. Initial document from the Change Stream:") + + // Create the change stream observable. + var observable: ChangeStreamObservable[Document] = collection.watch() + + // Create a observer + var observer = new LatchedObserver[ChangeStreamDocument[Document]]() + observable.subscribe(observer) + + // Insert a test document into the collection and request a result + collection.insertOne(Document("{username: 'alice123', name: 'Alice'}")).execute() + observer.waitForThenCancel() + + /* + * Example 2 + * Create a change stream with 'lookup' option enabled. + * The test document will be returned with a full version of the updated document. + */ + println("2. Document from the Change Stream, with lookup enabled:") + + observable = collection.watch.fullDocument(FullDocument.UPDATE_LOOKUP) + observer = new LatchedObserver[ChangeStreamDocument[Document]]() + observable.subscribe(observer) + + // Update the test document. + collection + .updateOne(Document("{username: 'alice123'}"), Document("{$set : { email: 'alice@example.com'}}")) + .subscribeAndAwait() + observer.waitForThenCancel() + + /* + * Example 3 + * Create a change stream with 'lookup' option using a $match and ($redact or $project) stage. + */ + println("3. Document from the Change Stream, with lookup enabled, matching `update` operations only: ") + + // Insert some dummy data. + collection.insertMany(List(Document("{updateMe: 1}"), Document("{replaceMe: 1}"))).subscribeAndAwait() + + // Create $match pipeline stage. + val pipeline = List( + Aggregates.filter( + Filters.or( + Document("{'fullDocument.username': 'alice123'}"), + Filters.in("operationType", "update", "replace", "delete") + ) + ) + ) + + // Create the change stream cursor with $match. + observable = collection.watch(pipeline).fullDocument(FullDocument.UPDATE_LOOKUP) + observer = new LatchedObserver[ChangeStreamDocument[Document]](false, 3) + observable.subscribe(observer) + + // Update the test document. + collection.updateOne(Filters.eq("updateMe", 1), Updates.set("updated", true)).subscribeAndAwait() + // Replace the test document. + collection.replaceOne(Filters.eq("replaceMe", 1), Document("{replaced: true}")).subscribeAndAwait() + // Delete the test document. + collection.deleteOne(Filters.eq("username", "alice123")).subscribeAndAwait() + + observer.waitForThenCancel() + + val results = observer.results() + println(s""" + |Update operationType: ${results.head.getUpdateDescription} + | ${results.head} + """.stripMargin.trim) + println(s"Replace operationType: ${results(1)}") + println(s"Delete operationType: ${results(2)}") + + /* + * Example 4 + * Resume a change stream using a resume token. + */ + println("4. Document from the Change Stream including a resume token:") + + // Get the resume token from the last document we saw in the previous change stream cursor. + val resumeToken = results(2).getResumeToken + println(resumeToken) + + // Pass the resume token to the resume after function to continue the change stream cursor. + observable = collection.watch.resumeAfter(resumeToken) + observer = new LatchedObserver[ChangeStreamDocument[Document]] + observable.subscribe(observer) + + // Insert a test document. + collection.insertOne(Document("{test: 'd'}")).subscribeAndAwait() + + // Block until the next result is printed + observer.waitForThenCancel() + + } + + // Implicit functions that execute the Observable and return the results + implicit class ObservableExecutor[T](observable: Observable[T]) { + def execute(): Seq[T] = Await.result(observable, WAIT_DURATION) + + def subscribeAndAwait(): Unit = { + val observer: LatchedObserver[T] = new LatchedObserver[T](false) + observable.subscribe(observer) + observer.await() + } + } + + implicit class SingleObservableExecutor[T](observable: SingleObservable[T]) { + def execute(): T = Await.result(observable, WAIT_DURATION) + } + + // end implicit functions + + private class LatchedObserver[T](val printResults: Boolean = true, val minimumNumberOfResults: Int = 1) + extends Observer[T] { + private val latch: CountDownLatch = new CountDownLatch(1) + private val resultsBuffer: mutable.ListBuffer[T] = new mutable.ListBuffer[T] + private var subscription: Option[Subscription] = None + private var error: Option[Throwable] = None + + override def onSubscribe(s: Subscription): Unit = { + subscription = Some(s) + s.request(Integer.MAX_VALUE) + } + + override def onNext(t: T): Unit = { + resultsBuffer.append(t) + if (printResults) println(t) + if (resultsBuffer.size >= minimumNumberOfResults) latch.countDown() + } + + override def onError(t: Throwable): Unit = { + error = Some(t) + println(t.getMessage) + latch.countDown() + } + + override def onComplete(): Unit = { + latch.countDown() + } + + def results(): List[T] = resultsBuffer.toList + + def await(): Unit = { + if (!latch.await(2, MINUTES)) println("observable timed out") + if (error.isDefined) throw error.get + } + + def waitForThenCancel(): Unit = { + if (minimumNumberOfResults > resultsBuffer.size) await() + subscription.foreach(_.unsubscribe()) + } + } + +} diff --git a/driver-scala/src/integrationTest/scala/org/mongodb/scala/documentation/DocumentationExampleSpec.scala b/driver-scala/src/integrationTest/scala/org/mongodb/scala/documentation/DocumentationExampleSpec.scala new file mode 100644 index 00000000000..0bbbdfa4e50 --- /dev/null +++ b/driver-scala/src/integrationTest/scala/org/mongodb/scala/documentation/DocumentationExampleSpec.scala @@ -0,0 +1,655 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala.documentation + +import java.util.concurrent.atomic.AtomicBoolean +import com.mongodb.client.model.changestream.{ ChangeStreamDocument, FullDocument } +import org.mongodb.scala.TestMongoClientHelper.hasSingleHost +import org.mongodb.scala._ +import org.mongodb.scala.bson.conversions.Bson +import org.mongodb.scala.bson.{ BsonArray, BsonDocument, BsonNull, BsonString, BsonValue } + +import scala.collection.JavaConverters._ +import scala.concurrent.Await +import scala.language.reflectiveCalls + +// imports required for filters, projections and updates +import org.bson.BsonType +import org.mongodb.scala.model.Aggregates.filter +import org.mongodb.scala.model.Filters.{ and, bsonType, elemMatch, exists, gt, in, lt, lte, or } +import org.mongodb.scala.model.Projections.{ exclude, excludeId, fields, slice } +import org.mongodb.scala.model.Updates.{ combine, currentDate, set } +// end required filters, projections and updates imports + +//scalastyle:off magic.number +class DocumentationExampleSpec extends RequiresMongoDBISpec with FuturesSpec { + + // Implicit functions that execute the Observable and return the results + implicit class ObservableExecutor[T](observable: Observable[T]) { + def execute(): Seq[T] = Await.result(observable, WAIT_DURATION) + } + + implicit class SingleObservableExecutor[T](observable: SingleObservable[T]) { + def execute(): T = Await.result(observable, WAIT_DURATION) + } + // end implicit functions + + "The Scala driver" should "be able to insert" in withCollection { collection => + // Start Example 1 + collection + .insertOne( + Document( + "item" -> "canvas", + "qty" -> 100, + "tags" -> Seq("cotton"), + "size" -> Document("h" -> 28, "w" -> 35.5, "uom" -> "cm") + ) + ) + .execute() + // End Example 1 + + // Start Example 2 + val observable = collection.find(equal("item", "canvas")) + // End Example 2 + + observable.execute().size shouldEqual 1 + + // Start Example 3 + collection + .insertMany( + Seq( + Document( + "item" -> "journal", + "qty" -> 25, + "tags" -> Seq("blank", "red"), + "size" -> Document("h" -> 14, "w" -> 21, "uom" -> "cm") + ), + Document( + "item" -> "mat", + "qty" -> 85, + "tags" -> Seq("gray"), + "size" -> Document("h" -> 27.9, "w" -> 35.5, "uom" -> "cm") + ), + Document( + "item" -> "mousepad", + "qty" -> 25, + "tags" -> Seq("gel", "blue"), + "size" -> Document("h" -> 19, "w" -> 22.85, "uom" -> "cm") + ) + ) + ) + .execute() + // End Example 3 + + collection.countDocuments().execute() shouldEqual 4 + } + + it should "be able to query top level" in withCollection { collection => + // Start Example 6 + collection + .insertMany( + Seq( + Document("""{ item: "journal", qty: 25, size: { h: 14, w: 21, uom: "cm" }, status: "A" }"""), + Document("""{ item: "notebook", qty: 50, size: { h: 8.5, w: 11, uom: "in" }, status: "A" }"""), + Document("""{ item: "paper", qty: 100, size: { h: 8.5, w: 11, uom: "in" }, status: "D" }"""), + Document("""{ item: "planner", qty: 75, size: { h: 22.85, w: 30, uom: "cm" }, status: "D" }"""), + Document("""{ item: "postcard", qty: 45, size: { h: 10, w: 15.25, uom: "cm" }, status: "A" }""") + ) + ) + .execute() + // End Example 6 + + collection.countDocuments().execute() shouldEqual 5 + + // Start Example 7 + var findObservable = collection.find(Document()) + // End Example 7 + + findObservable.execute().size shouldEqual 5 + + // Start Example 8 + findObservable = collection.find() + // End Example 8 + + findObservable.execute().size shouldEqual 5 + + // Start Example 9 + findObservable = collection.find(equal("status", "D")) + // End Example 9 + + findObservable.execute().size shouldEqual 2 + + // Start Example 10 + findObservable = collection.find(in("status", "A", "D")) + // End Example 10 + + findObservable.execute().size shouldEqual 5 + + // Start Example 11 + findObservable = collection.find(and(equal("status", "A"), lt("qty", 30))) + // End Example 11 + + findObservable.execute().size shouldEqual 1 + + // Start Example 12 + findObservable = collection.find(or(equal("status", "A"), lt("qty", 30))) + // End Example 12 + + findObservable.execute().size shouldEqual 3 + + // Start Example 13 + findObservable = collection.find(and(equal("status", "A"), or(lt("qty", 30), regex("item", "^p")))) + // End Example 13 + + findObservable.execute().size shouldEqual 2 + } + + it should "be able to query embedded documents" in withCollection { collection => + // Start Example 14 + collection + .insertMany( + Seq( + Document("""{ item: "journal", qty: 25, size: { h: 14, w: 21, uom: "cm" }, status: "A" }"""), + Document("""{ item: "notebook", qty: 50, size: { h: 8.5, w: 11, uom: "in" }, status: "A" }"""), + Document("""{ item: "paper", qty: 100, size: { h: 8.5, w: 11, uom: "in" }, status: "D" }"""), + Document("""{ item: "planner", qty: 75, size: { h: 22.85, w: 30, uom: "cm" }, status: "D" }"""), + Document("""{ item: "postcard", qty: 45, size: { h: 10, w: 15.25, uom: "cm" }, status: "A" }""") + ) + ) + .execute() + // End Example 14 + + collection.countDocuments().execute() shouldEqual 5 + + // Start Example 15 + var findObservable = collection.find(equal("size", Document("h" -> 14, "w" -> 21, "uom" -> "cm"))) + // End Example 15 + + findObservable.execute().size shouldEqual 1 + + // Start Example 16 + findObservable = collection.find(equal("size", Document("w" -> 21, "h" -> 14, "uom" -> "cm"))) + // End Example 16 + + findObservable.execute().size shouldEqual 0 + + // Start Example 17 + findObservable = collection.find(equal("size.uom", "in")) + // End Example 17 + + findObservable.execute().size shouldEqual 2 + + // Start Example 18 + findObservable = collection.find(lt("size.h", 15)) + // End Example 18 + + findObservable.execute().size shouldEqual 4 + + // Start Example 19 + findObservable = collection.find( + and( + lt("size.h", 15), + equal("size.uom", "in"), + equal("status", "D") + ) + ) + // End Example 19 + + findObservable.execute().size shouldEqual 1 + } + + it should "be able to query array" in withCollection { collection => + // Start Example 20 + collection + .insertMany( + Seq( + Document("""{ item: "journal", qty: 25, tags: ["blank", "red"], dim_cm: [ 14, 21 ] }"""), + Document("""{ item: "notebook", qty: 50, tags: ["red", "blank"], dim_cm: [ 14, 21 ] }"""), + Document("""{ item: "paper", qty: 100, tags: ["red", "blank", "plain"], dim_cm: [ 14, 21 ] }"""), + Document("""{ item: "planner", qty: 75, tags: ["blank", "red"], dim_cm: [ 22.85, 30 ] }"""), + Document("""{ item: "postcard", qty: 45, tags: ["blue"], dim_cm: [ 10, 15.25 ] }""") + ) + ) + .execute() + // End Example 20 + + collection.countDocuments().execute() shouldEqual 5 + + // Start Example 21 + var findObservable = collection.find(equal("tags", Seq("red", "blank"))) + // End Example 21 + + findObservable.execute().size shouldEqual 1 + + // Start Example 22 + findObservable = collection.find(all("tags", "red", "blank")) + // End Example 22 + + findObservable.execute().size shouldEqual 4 + + // Start Example 23 + findObservable = collection.find(equal("tags", "red")) + // End Example 23 + + findObservable.execute().size shouldEqual 4 + + // Start Example 24 + findObservable = collection.find(gt("dim_cm", 25)) + // End Example 24 + + findObservable.execute().size shouldEqual 1 + + // Start Example 25 + findObservable = collection.find(and(gt("dim_cm", 15), lt("dim_cm", 20))) + // End Example 25 + + findObservable.execute().size shouldEqual 4 + + // Start Example 26 + findObservable = collection.find(elemMatch("dim_cm", Document("$gt" -> 22, "$lt" -> 30))) + + // End Example 26 + + findObservable.execute().size shouldEqual 1 + + // Start Example 27 + findObservable = collection.find(gt("dim_cm.1", 25)) + // End Example 27 + + findObservable.execute().size shouldEqual 1 + + // Start Example 28 + findObservable = collection.find(size("tags", 3)) + // End Example 28 + + findObservable.execute().size shouldEqual 1 + } + + it should "query array of documents" in withCollection { collection => + // Start Example 29 + collection + .insertMany( + Seq( + Document("""{ item: "journal", instock: [ { warehouse: "A", qty: 5 }, { warehouse: "C", qty: 15 } ] }"""), + Document("""{ item: "notebook", instock: [ { warehouse: "C", qty: 5 } ] }"""), + Document("""{ item: "paper", instock: [ { warehouse: "A", qty: 60 }, { warehouse: "B", qty: 15 } ] }"""), + Document("""{ item: "planner", instock: [ { warehouse: "A", qty: 40 }, { warehouse: "B", qty: 5 } ] }"""), + Document("""{ item: "postcard", instock: [ { warehouse: "B", qty: 15 }, { warehouse: "C", qty: 35 } ] }""") + ) + ) + .execute() + // End Example 29 + + collection.countDocuments().execute() shouldEqual 5 + + // Start Example 30 + var findObservable = collection.find(equal("instock", Document("warehouse" -> "A", "qty" -> 5))) + // End Example 30 + + findObservable.execute().size shouldEqual 1 + + // Start Example 31 + findObservable = collection.find(equal("instock", Document("qty" -> 5, "warehouse" -> "A"))) + // End Example 31 + + findObservable.execute().size shouldEqual 0 + + // Start Example 32 + findObservable = collection.find(lte("instock.0.qty", 20)) + // End Example 32 + + findObservable.execute().size shouldEqual 3 + + // Start Example 33 + findObservable = collection.find(lte("instock.qty", 20)) + // End Example 33 + + findObservable.execute().size shouldEqual 5 + + // Start Example 34 + findObservable = collection.find(elemMatch("instock", Document("qty" -> 5, "warehouse" -> "A"))) + // End Example 34 + + findObservable.execute().size shouldEqual 1 + + // Start Example 35 + findObservable = collection.find(elemMatch("instock", Document("""{ qty: { $gt: 10, $lte: 20 } }"""))) + // End Example 35 + + findObservable.execute().size shouldEqual 3 + + // Start Example 36 + findObservable = collection.find(and(gt("instock.qty", 10), lte("instock.qty", 20))) + // End Example 36 + + findObservable.execute().size shouldEqual 4 + + // Start Example 37 + findObservable = collection.find(and(equal("instock.qty", 5), equal("instock.warehouse", "A"))) + // End Example 37 + + findObservable.execute().size shouldEqual 2 + } + + it should "query null and missing fields" in withCollection { collection => + // Start Example 38 + collection + .insertMany( + Seq( + Document("""{"_id": 1, "item": null}"""), + Document("""{"_id": 2}""") + ) + ) + .execute() + // End Example 38 + + collection.countDocuments().execute() shouldEqual 2 + + // Start Example 39 + var findObservable = collection.find(equal("item", BsonNull())) + // End Example 39 + + findObservable.execute().size shouldEqual 2 + + // Start Example 40 + findObservable = collection.find(bsonType("item", BsonType.NULL)) + // End Example 40 + + findObservable.execute().size shouldEqual 1 + + // Start Example 41 + findObservable = collection.find(exists("item", exists = false)) + // End Example 41 + + findObservable.execute().size shouldEqual 1 + } + + it should "be able to project fields" in withCollection { collection => + // Start Example 42 + collection + .insertMany( + Seq( + Document( + """{ item: "journal", status: "A", size: { h: 14, w: 21, uom: "cm" }, instock: [ { warehouse: "A", qty: 5 } ] }""" + ), + Document( + """{ item: "notebook", status: "A", size: { h: 8.5, w: 11, uom: "in" }, instock: [ { warehouse: "C", qty: 5 } ] }""" + ), + Document( + """{ item: "paper", status: "D", size: { h: 8.5, w: 11, uom: "in" }, instock: [ { warehouse: "A", qty: 60 } ] }""" + ), + Document( + """{ item: "planner", status: "D", size: { h: 22.85, w: 30, uom: "cm" }, instock: [ { warehouse: "A", qty: 40 } ] }""" + ), + Document("""{ item: "postcard", status: "A", size: { h: 10, w: 15.25, uom: "cm" }, + instock: [ { warehouse: "B", qty: 15 }, { warehouse: "C", qty: 35 } ] }""") + ) + ) + .execute() + // End Example 42 + + collection.countDocuments().execute() shouldEqual 5 + + // Start Example 43 + var findObservable = collection.find(equal("status", "A")) + // End Example 43 + + findObservable.execute().size shouldEqual 3 + + // Start Example 44 + findObservable = collection.find(equal("status", "A")).projection(include("item", "status")) + // End Example 44 + + findObservable.execute().foreach((doc: Document) => doc.keys should contain only ("_id", "item", "status")) + + // Start Example 45 + findObservable = collection + .find(equal("status", "A")) + .projection(fields(include("item", "status"), excludeId())) + // End Example 45 + + findObservable.execute().foreach((doc: Document) => doc.keys should contain only ("item", "status")) + + // Start Example 46 + findObservable = collection.find(equal("status", "A")).projection(exclude("item", "status")) + // End Example 46 + + findObservable.execute().foreach((doc: Document) => doc.keys should contain only ("_id", "size", "instock")) + + // Start Example 47 + findObservable = collection.find(equal("status", "A")).projection(include("item", "status", "size.uom")) + // End Example 47 + + findObservable + .execute() + .foreach((doc: Document) => { + doc.keys should contain only ("_id", "item", "status", "size") + doc.get[BsonDocument]("size").get.keys should contain only "uom" + }) + + // Start Example 48 + findObservable = collection.find(equal("status", "A")).projection(exclude("size.uom")) + // End Example 48 + + findObservable + .execute() + .foreach((doc: Document) => { + doc.keys should contain only ("_id", "item", "instock", "status", "size") + doc.get[BsonDocument]("size").get.keys should contain only ("h", "w") + }) + + // Start Example 49 + findObservable = collection.find(equal("status", "A")).projection(include("item", "status", "instock.qty")) + // End Example 49 + + findObservable + .execute() + .foreach((doc: Document) => { + doc.keys should contain only ("_id", "item", "instock", "status") + doc + .get[BsonArray]("instock") + .get + .asScala + .foreach((doc: BsonValue) => doc.asInstanceOf[BsonDocument].keys should contain only "qty") + }) + + // Start Example 50 + findObservable = collection + .find(equal("status", "A")) + .projection(fields(include("item", "status"), slice("instock", -1))) + // End Example 50 + + findObservable + .execute() + .foreach((doc: Document) => { + doc.keys should contain only ("_id", "item", "instock", "status") + doc.get[BsonArray]("instock").get.size() shouldEqual 1 + }) + } + + it should "be able to update" in withCollection { collection => + assume(serverVersionAtLeast(List(2, 6, 0))) + + // Start Example 51 + collection + .insertMany( + Seq( + Document("""{ item: "canvas", qty: 100, size: { h: 28, w: 35.5, uom: "cm" }, status: "A" }"""), + Document("""{ item: "journal", qty: 25, size: { h: 14, w: 21, uom: "cm" }, status: "A" }"""), + Document("""{ item: "mat", qty: 85, size: { h: 27.9, w: 35.5, uom: "cm" }, status: "A" }"""), + Document("""{ item: "mousepad", qty: 25, size: { h: 19, w: 22.85, uom: "cm" }, status: "P" }"""), + Document("""{ item: "notebook", qty: 50, size: { h: 8.5, w: 11, uom: "in" }, status: "P" }"""), + Document("""{ item: "paper", qty: 100, size: { h: 8.5, w: 11, uom: "in" }, status: "D" }"""), + Document("""{ item: "planner", qty: 75, size: { h: 22.85, w: 30, uom: "cm" }, status: "D" }"""), + Document("""{ item: "postcard", qty: 45, size: { h: 10, w: 15.25, uom: "cm" }, status: "A" }"""), + Document("""{ item: "sketchbook", qty: 80, size: { h: 14, w: 21, uom: "cm" }, status: "A" }"""), + Document("""{ item: "sketch pad", qty: 95, size: { h: 22.85, w: 30.5, uom: "cm" }, status: "A" }""") + ) + ) + .execute() + // End Example 51 + + collection.countDocuments().execute() shouldEqual 10 + + // Start Example 52 + collection + .updateOne( + equal("item", "paper"), + combine(set("size.uom", "cm"), set("status", "P"), currentDate("lastModified")) + ) + .execute() + // End Example 52 + + collection + .find(equal("item", "paper")) + .execute() + .foreach((doc: Document) => { + doc.get[BsonDocument]("size").get.get("uom") shouldEqual BsonString("cm") + doc.get[BsonString]("status").get shouldEqual BsonString("P") + doc.containsKey("lastModified") shouldBe true + }) + + // Start Example 53 + collection + .updateMany(lt("qty", 50), combine(set("size.uom", "in"), set("status", "P"), currentDate("lastModified"))) + .execute() + // End Example 53 + + collection + .find(lt("qty", 50)) + .execute() + .foreach((doc: Document) => { + doc.get[BsonDocument]("size").get.get("uom") shouldEqual BsonString("in") + doc.get[BsonString]("status").get shouldEqual BsonString("P") + doc.containsKey("lastModified") shouldBe true + }) + + // Start Example 54 + collection + .replaceOne( + equal("item", "paper"), + Document("""{ item: "paper", instock: [ { warehouse: "A", qty: 60 }, { warehouse: "B", qty: 40 } ] }""") + ) + .execute() + // End Example 54 + + collection + .find(equal("item", "paper")) + .projection(excludeId()) + .execute() + .foreach((doc: Document) => + doc shouldEqual Document( + """{ item: "paper", instock: [ { warehouse: "A", qty: 60 }, { warehouse: "B", qty: 40 } ] }""" + ) + ) + } + + it should "be able to delete" in withCollection { collection => + // Start Example 55 + collection + .insertMany( + Seq( + Document("""{ item: "journal", qty: 25, size: { h: 14, w: 21, uom: "cm" }, status: "A" }"""), + Document("""{ item: "notebook", qty: 50, size: { h: 8.5, w: 11, uom: "in" }, status: "A" }"""), + Document("""{ item: "paper", qty: 100, size: { h: 8.5, w: 11, uom: "in" }, status: "D" }"""), + Document("""{ item: "planner", qty: 75, size: { h: 22.85, w: 30, uom: "cm" }, status: "D" }"""), + Document("""{ item: "postcard", qty: 45, size: { h: 10, w: 15.25, uom: "cm" }, status: "A" }""") + ) + ) + .execute() + // End Example 55 + + collection.countDocuments().execute() shouldEqual 5 + + // Start Example 57 + collection.deleteMany(equal("status", "A")).execute() + // End Example 57 + + collection.countDocuments().execute() shouldEqual 2 + + // Start Example 58 + collection.deleteOne(equal("status", "D")).execute() + // End Example 58 + + collection.countDocuments().execute() shouldEqual 1 + + // Start Example 56 + collection.deleteMany(Document()).execute() + // End Example 56 + + collection.countDocuments().execute() shouldEqual 0 + } + + it should "be able to watch" in withCollection { collection => + assume(serverVersionAtLeast(List(3, 6, 0)) && !hasSingleHost) + val inventory: MongoCollection[Document] = collection + val stop: AtomicBoolean = new AtomicBoolean(false) + new Thread(new Runnable { + override def run(): Unit = { + while (!stop.get) { + collection.insertOne(Document()) + try { + Thread.sleep(10) + } catch { + case e: InterruptedException => + // ignore + } + } + } + }).start() + + val observer = new Observer[ChangeStreamDocument[Document]] { + def getResumeToken: BsonDocument = Document().underlying + override def onNext(result: ChangeStreamDocument[Document]): Unit = {} + override def onError(e: Throwable): Unit = {} + override def onComplete(): Unit = {} + } + + // Start Changestream Example 1 + var observable: ChangeStreamObservable[Document] = inventory.watch() + observable.subscribe(observer) + // End Changestream Example 1 + + // Start Changestream Example 2 + observable = inventory.watch.fullDocument(FullDocument.UPDATE_LOOKUP) + observable.subscribe(observer) + // End Changestream Example 2 + + // Start Changestream Example 3 + val resumeToken: BsonDocument = observer.getResumeToken + observable = inventory.watch.resumeAfter(resumeToken) + observable.subscribe(observer) + // End Changestream Example 3 + + // Start Changestream Example 4 + val pipeline: List[Bson] = + List(filter(or(Document("{'fullDocument.username': 'alice'}"), in("operationType", List("delete"))))) + observable = inventory.watch(pipeline) + observable.subscribe(observer) + // End Changestream Example 4 + + stop.set(true) + } + + // Matcher Trait overrides... + def equal[TItem](fieldName: String, value: TItem): Bson = org.mongodb.scala.model.Filters.equal(fieldName, value) + def regex(fieldName: String, pattern: String): Bson = org.mongodb.scala.model.Filters.regex(fieldName, pattern) + def all[TItem](fieldName: String, values: TItem*): Bson = org.mongodb.scala.model.Filters.all(fieldName, values: _*) + def size(fieldName: String, size: Int): Bson = org.mongodb.scala.model.Filters.size(fieldName, size) + def include(fieldNames: String*): Bson = org.mongodb.scala.model.Projections.include(fieldNames: _*) + +} diff --git a/driver-scala/src/integrationTest/scala/org/mongodb/scala/documentation/DocumentationTransactionsExampleSpec.scala b/driver-scala/src/integrationTest/scala/org/mongodb/scala/documentation/DocumentationTransactionsExampleSpec.scala new file mode 100644 index 00000000000..9ea71553f54 --- /dev/null +++ b/driver-scala/src/integrationTest/scala/org/mongodb/scala/documentation/DocumentationTransactionsExampleSpec.scala @@ -0,0 +1,111 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala.documentation + +import org.mongodb.scala.TestMongoClientHelper.hasSingleHost +import org.mongodb.scala._ +import org.mongodb.scala.model.{ Filters, Updates } +import org.mongodb.scala.result.{ InsertOneResult, UpdateResult } + +import scala.concurrent.Await + +//scalastyle:off magic.number regex +class DocumentationTransactionsExampleSpec extends RequiresMongoDBISpec { + + // Implicit functions that execute the Observable and return the results + implicit class ObservableExecutor[T](observable: Observable[T]) { + def execute(): Seq[T] = Await.result(observable.toFuture(), WAIT_DURATION) + } + + implicit class SingleObservableExecutor[T](observable: SingleObservable[T]) { + def execute(): T = Await.result(observable.toFuture(), WAIT_DURATION) + } + // end implicit functions + + "The Scala driver" should "be able to commit a transaction" in withClient { client => + assume(serverVersionAtLeast(List(4, 0, 0)) && !hasSingleHost) + client.getDatabase("hr").drop().execute() + client.getDatabase("hr").createCollection("employees").execute() + client.getDatabase("hr").createCollection("events").execute() + + updateEmployeeInfoWithRetry(client).execute() + client.getDatabase("hr").drop().execute() + } + + def updateEmployeeInfo( + database: MongoDatabase, + observable: SingleObservable[ClientSession] + ): SingleObservable[ClientSession] = { + observable.map(clientSession => { + val employeesCollection = database.getCollection("employees") + val eventsCollection = database.getCollection("events") + + val transactionOptions = TransactionOptions + .builder() + .readPreference(ReadPreference.primary()) + .readConcern(ReadConcern.SNAPSHOT) + .writeConcern(WriteConcern.MAJORITY) + .build() + clientSession.startTransaction(transactionOptions) + employeesCollection + .updateOne(clientSession, Filters.eq("employee", 3), Updates.set("status", "Inactive")) + .subscribe((res: UpdateResult) => println(res)) + eventsCollection + .insertOne( + clientSession, + Document("employee" -> 3, "status" -> Document("new" -> "Inactive", "old" -> "Active")) + ) + .subscribe((res: InsertOneResult) => ()) + + clientSession + }) + } + + def commitAndRetry(observable: SingleObservable[Unit]): SingleObservable[Unit] = { + observable.recoverWith({ + case e: MongoException if e.hasErrorLabel(MongoException.UNKNOWN_TRANSACTION_COMMIT_RESULT_LABEL) => { + println("UnknownTransactionCommitResult, retrying commit operation ...") + commitAndRetry(observable) + } + case e: Exception => { + println(s"Exception during commit ...: $e") + throw e + } + }) + } + + def runTransactionAndRetry(observable: SingleObservable[Unit]): SingleObservable[Unit] = { + observable.recoverWith({ + case e: MongoException if e.hasErrorLabel(MongoException.TRANSIENT_TRANSACTION_ERROR_LABEL) => { + println("TransientTransactionError, aborting transaction and retrying ...") + runTransactionAndRetry(observable) + } + }) + } + + def updateEmployeeInfoWithRetry(client: MongoClient): SingleObservable[Unit] = { + + val database = client.getDatabase("hr") + val updateEmployeeInfoObservable: SingleObservable[ClientSession] = + updateEmployeeInfo(database, client.startSession()) + val commitTransactionObservable: SingleObservable[Unit] = + updateEmployeeInfoObservable.flatMap(clientSession => clientSession.commitTransaction()) + val commitAndRetryObservable: SingleObservable[Unit] = commitAndRetry(commitTransactionObservable) + + runTransactionAndRetry(commitAndRetryObservable) + } +} diff --git a/driver-scala/src/integrationTest/scala/org/mongodb/scala/gridfs/GridFSObservableSpec.scala b/driver-scala/src/integrationTest/scala/org/mongodb/scala/gridfs/GridFSObservableSpec.scala new file mode 100644 index 00000000000..e6ca96183e6 --- /dev/null +++ b/driver-scala/src/integrationTest/scala/org/mongodb/scala/gridfs/GridFSObservableSpec.scala @@ -0,0 +1,388 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala.gridfs + +import java.io.ByteArrayOutputStream +import java.nio.ByteBuffer +import java.nio.channels.Channels +import java.util.UUID +import org.bson.UuidRepresentation +import org.bson.codecs.UuidCodec +import org.bson.codecs.configuration.CodecRegistries +import org.mongodb.scala._ +import org.mongodb.scala.bson.{ BsonBinary, BsonDocument, BsonString, ObjectId } +import org.mongodb.scala.model.{ Filters, Updates } +import org.scalatest.BeforeAndAfterEach +import org.scalatest.exceptions.TestFailedException + +import scala.annotation.tailrec +import scala.concurrent.Await + +class GridFSObservableSpec extends RequiresMongoDBISpec with FuturesSpec with BeforeAndAfterEach { + private val filesCollectionName = "fs.files" + private val chunksCollectionName = "fs.chunks" + private var _gridFSBucket: Option[GridFSBucket] = None + private var _filesCollection: Option[MongoCollection[GridFSFile]] = None + private var _chunksCollection: Option[MongoCollection[Document]] = None + private val singleChunkString = "GridFS" + private val multiChunkString = f"${singleChunkString}%1305600s" + + override def beforeEach(): Unit = { + val mongoDatabase = mongoClient().getDatabase(databaseName) + _filesCollection = Some(mongoDatabase.getCollection[GridFSFile](filesCollectionName)) + _chunksCollection = Some(mongoDatabase.getCollection(chunksCollectionName)) + _filesCollection.foreach(coll => Await.result(coll.drop().toFuture(), WAIT_DURATION)) + _chunksCollection.foreach(coll => Await.result(coll.drop().toFuture(), WAIT_DURATION)) + _gridFSBucket = Some(GridFSBucket(mongoDatabase)) + } + + override def afterEach(): Unit = { + withDatabase(db => Await.result(db.drop().toFuture(), WAIT_DURATION)) + } + + private def gridFSBucket = _gridFSBucket.get + + private def filesCollection = _filesCollection.get + + private def chunksCollection = _chunksCollection.get + + "The Scala driver" should "round trip a small file" in { + val contentBytes = singleChunkString.getBytes() + val expectedLength = contentBytes.length + + val fileId = + gridFSBucket.uploadFromObservable("myFile", Observable(Seq(ByteBuffer.wrap(contentBytes)))).head().futureValue + filesCollection.countDocuments().head().futureValue should equal(1) + chunksCollection.countDocuments().head().futureValue should equal(1) + + val fileInfo = gridFSBucket.find().filter(Filters.eq("_id", fileId)).head().futureValue + fileInfo.getObjectId should equal(fileId) + fileInfo.getChunkSize should equal(gridFSBucket.chunkSizeBytes) + fileInfo.getLength should equal(expectedLength) + Option(fileInfo.getMetadata) should equal(None) + } + + it should "round trip a large file" in { + val contentBytes = multiChunkString.getBytes() + val expectedLength = contentBytes.length + + val fileId = + gridFSBucket.uploadFromObservable("myFile", Observable(Seq(ByteBuffer.wrap(contentBytes)))).head().futureValue + filesCollection.countDocuments().head().futureValue should equal(1) + chunksCollection.countDocuments().head().futureValue should equal(5) + + val fileInfo = gridFSBucket.find().filter(Filters.eq("_id", fileId)).head().futureValue + fileInfo.getObjectId should equal(fileId) + fileInfo.getChunkSize should equal(gridFSBucket.chunkSizeBytes) + fileInfo.getLength should equal(expectedLength) + Option(fileInfo.getMetadata) should equal(None) + } + + it should "round trip with small chunks" in { + val contentSize = 1024 * 500 + val chunkSize = 10 + val contentBytes = new Array[Byte](contentSize / 2) + scala.util.Random.nextBytes(contentBytes) + val options = new GridFSUploadOptions().chunkSizeBytes(chunkSize) + + val fileId = gridFSBucket + .uploadFromObservable( + "myFile", + Observable(Seq(ByteBuffer.wrap(contentBytes), ByteBuffer.wrap(contentBytes))), + options + ) + .head() + .futureValue + filesCollection.countDocuments().head().futureValue should equal(1) + chunksCollection.countDocuments().head().futureValue should equal(contentSize / chunkSize) + + val data = gridFSBucket.downloadToObservable(fileId).futureValue + concatByteBuffers(data) should equal( + concatByteBuffers(Seq(ByteBuffer.wrap(contentBytes), ByteBuffer.wrap(contentBytes))) + ) + } + + it should "round trip with data larger than the internal bufferSize" in { + val contentSize = 1024 * 1024 * 5 + val chunkSize = 1024 * 1024 + val contentBytes = new Array[Byte](contentSize) + scala.util.Random.nextBytes(contentBytes) + val options = new GridFSUploadOptions().chunkSizeBytes(chunkSize) + + val fileId = gridFSBucket + .uploadFromObservable("myFile", Observable(Seq(ByteBuffer.wrap(contentBytes))), options) + .head() + .futureValue + filesCollection.countDocuments().head().futureValue should equal(1) + chunksCollection.countDocuments().head().futureValue should equal(contentSize / chunkSize) + + val data = gridFSBucket.downloadToObservable(fileId).futureValue + concatByteBuffers(data) should equal(contentBytes) + } + + it should "handle custom ids" in { + val contentBytes = multiChunkString.getBytes() + val fileId = BsonString("myFile") + gridFSBucket + .uploadFromObservable(fileId, "myFile", Observable(Seq(ByteBuffer.wrap(contentBytes)))) + .head() + .futureValue + var data = gridFSBucket.downloadToObservable(fileId).futureValue + + concatByteBuffers(data) should equal(contentBytes) + + gridFSBucket.rename(fileId, "newName").futureValue + data = gridFSBucket.downloadToObservable("newName").futureValue + + concatByteBuffers(data) should equal(contentBytes) + + gridFSBucket.delete(fileId).futureValue + filesCollection.countDocuments().head().futureValue should equal(0) + chunksCollection.countDocuments().head().futureValue should equal(0) + } + + it should "throw a chunk not found error when there are no chunks" in { + val contentSize = 1024 * 1024 + val contentBytes = new Array[Byte](contentSize) + scala.util.Random.nextBytes(contentBytes) + + val fileId = gridFSBucket.uploadFromObservable("myFile", Observable(Seq(ByteBuffer.wrap(contentBytes)))).futureValue + chunksCollection.deleteMany(Filters.eq("files_id", fileId)).futureValue + + val caught = intercept[TestFailedException] { + gridFSBucket.downloadToObservable(fileId).futureValue + } + + caught.cause.exists(t => t.isInstanceOf[MongoGridFSException]) should equal(true) + } + + it should "round trip with a bufferSizeBytes of 4096" in { + val contentSize = 1024 * 1024 + val chunkSize = 1024 + val bufferSizeBytes = 4096 + val contentBytes = new Array[Byte](contentSize) + scala.util.Random.nextBytes(contentBytes) + val options = new GridFSUploadOptions().chunkSizeBytes(chunkSize) + + val fileId = gridFSBucket + .uploadFromObservable("myFile", Observable(Seq(ByteBuffer.wrap(contentBytes))), options) + .head() + .futureValue + filesCollection.countDocuments().head().futureValue should equal(1) + chunksCollection.countDocuments().head().futureValue should equal(contentSize / chunkSize) + + val fileInfo = gridFSBucket.find().filter(Filters.eq("_id", fileId)).head().futureValue + fileInfo.getObjectId should equal(fileId) + fileInfo.getChunkSize should equal(chunkSize) + fileInfo.getLength should equal(contentSize) + Option(fileInfo.getMetadata) should equal(None) + + val data = gridFSBucket.downloadToObservable(fileId).bufferSizeBytes(bufferSizeBytes).futureValue + concatByteBuffers(data) should equal(concatByteBuffers(Seq(ByteBuffer.wrap(contentBytes)))) + } + + it should "handle uploading publisher erroring" in { + val errorMessage = "Failure Propagated" + val source = new Observable[ByteBuffer] { + override def subscribe(observer: Observer[_ >: ByteBuffer]): Unit = + observer.onError(new IllegalArgumentException(errorMessage)) + } + + val caught = intercept[TestFailedException] { + gridFSBucket.uploadFromObservable("myFile", source).futureValue + } + + caught.cause.exists(t => t.isInstanceOf[IllegalArgumentException]) should equal(true) + caught.cause.get.getMessage should equal(errorMessage) + } + + it should "use custom uploadOptions when uploading" in { + val chunkSize = 20 + val metaData = Document("archived" -> false) + val options = new GridFSUploadOptions().chunkSizeBytes(chunkSize).metadata(metaData) + val contentBytes = multiChunkString.getBytes() + val expectedLength = contentBytes.length + val expectedNoChunks = Math.ceil((expectedLength.toDouble) / chunkSize).toInt + + val fileId = gridFSBucket + .uploadFromObservable("myFile", Observable(Seq(ByteBuffer.wrap(contentBytes))), options) + .head() + .futureValue + filesCollection.countDocuments().head().futureValue should equal(1) + chunksCollection.countDocuments().head().futureValue should equal(expectedNoChunks) + + val fileInfo = gridFSBucket.find().filter(Filters.eq("_id", fileId)).head().futureValue + fileInfo.getObjectId should equal(fileId) + fileInfo.getChunkSize should equal(chunkSize) + fileInfo.getLength should equal(expectedLength) + Option(fileInfo.getMetadata).isEmpty should equal(false) + fileInfo.getMetadata.get("archived") should equal(false) + + val data = gridFSBucket.downloadToObservable(fileId).futureValue + concatByteBuffers(data) should equal(concatByteBuffers(Seq(ByteBuffer.wrap(contentBytes)))) + } + + it should "be able to open by name" in { + val filename = "myFile" + val contentBytes = singleChunkString.getBytes() + + gridFSBucket.uploadFromObservable(filename, Observable(Seq(ByteBuffer.wrap(contentBytes)))).head().futureValue + + val data = gridFSBucket.downloadToObservable(filename).futureValue + concatByteBuffers(data) should equal(concatByteBuffers(Seq(ByteBuffer.wrap(contentBytes)))) + } + + it should "be able to handle missing file" in { + val caught = intercept[TestFailedException] { + gridFSBucket.downloadToObservable("myFile").futureValue + } + + caught.cause.exists(t => t.isInstanceOf[MongoGridFSException]) should equal(true) + } + + it should "create the indexes as expected" in { + val filesIndexKey: BsonDocument = Document("filename" -> 1, "uploadDate" -> 1).toBsonDocument + val chunksIndexKey: BsonDocument = Document("files_id" -> 1, "n" -> 1).toBsonDocument + + filesCollection.listIndexes().futureValue.map(_.getOrElse("key", Document())) should not contain (filesIndexKey) + chunksCollection.listIndexes().futureValue.map(_.getOrElse("key", Document())) should not contain (chunksIndexKey) + + gridFSBucket + .uploadFromObservable("myFile", Observable(Seq(ByteBuffer.wrap(multiChunkString.getBytes())))) + .futureValue + + filesCollection.listIndexes().futureValue.map(_.getOrElse("key", Document())) should contain(filesIndexKey) + chunksCollection.listIndexes().futureValue.map(_.getOrElse("key", Document())) should contain(chunksIndexKey) + } + + it should "not create indexes if the files collection is not empty" in { + filesCollection.withDocumentClass[Document].insertOne(Document("filename" -> "bad file")).futureValue + + filesCollection.listIndexes().futureValue.size should equal(1) + chunksCollection.listIndexes().futureValue.size should equal(0) + + gridFSBucket + .uploadFromObservable("myFile", Observable(Seq(ByteBuffer.wrap(multiChunkString.getBytes())))) + .futureValue + + filesCollection.listIndexes().futureValue.size should equal(1) + chunksCollection.listIndexes().futureValue.size should equal(1) + } + + it should "use the user provided codec registries for encoding / decoding data" in { + withTempClient( + mongoClientSettingsBuilder + .uuidRepresentation(UuidRepresentation.STANDARD) + .build(), + client => { + val database = client.getDatabase(databaseName) + val uuid = UUID.randomUUID() + val fileMeta = new org.bson.Document("uuid", uuid) + val bucket = GridFSBucket(database) + + val fileId = bucket + .uploadFromObservable( + "myFile", + Observable(Seq(ByteBuffer.wrap(multiChunkString.getBytes()))), + new GridFSUploadOptions().metadata(fileMeta) + ) + .head() + .futureValue + + val fileAsDocument = filesCollection.find[BsonDocument]().head().futureValue + fileAsDocument.getDocument("metadata").getBinary("uuid").getType should equal(4.toByte) + fileAsDocument.getDocument("metadata").getBinary("uuid").asUuid() should equal(uuid) + } + ) + } + + it should "handle missing file name data when downloading" in { + val fileId = gridFSBucket + .uploadFromObservable("myFile", Observable(Seq(ByteBuffer.wrap(multiChunkString.getBytes())))) + .head() + .futureValue + + filesCollection.updateOne(Filters.eq("_id", fileId), Updates.unset("filename")).futureValue + val data = gridFSBucket.downloadToObservable(fileId).futureValue + + concatByteBuffers(data) should equal(multiChunkString.getBytes()) + } + + it should "cleanup when unsubscribing" in { + val contentSize = 1024 * 1024 + val contentBytes = new Array[Byte](contentSize) + scala.util.Random.nextBytes(contentBytes) + + trait SubscriptionObserver[T] extends Observer[T] { + def subscription(): Subscription + } + + val observer = new SubscriptionObserver[ObjectId] { + var s: Option[Subscription] = None + var completed: Boolean = false + def subscription(): Subscription = s.get + override def onSubscribe(subscription: Subscription): Unit = + s = Some(subscription) + + override def onNext(result: ObjectId): Unit = {} + + override def onError(e: Throwable): Unit = {} + + override def onComplete(): Unit = completed = true + } + gridFSBucket + .uploadFromObservable("myFile", Observable(List.fill(1024)(ByteBuffer.wrap(contentBytes)))) + .subscribe(observer) + + observer.subscription().request(1) + + retry(10)(() => chunksCollection.countDocuments().futureValue should be > 0L) + filesCollection.countDocuments().futureValue should equal(0) + + observer.subscription().unsubscribe() + + if (!observer.completed) { + retry(50)(() => chunksCollection.countDocuments().futureValue should equal(0)) + filesCollection.countDocuments().futureValue should equal(0) + } + } + + @tailrec + private def retry[T](n: Int)(fn: () => T): T = { + try { + fn() + } catch { + case e: Exception => + if (n > 1) { + Thread.sleep(250) + retry(n - 1)(fn) + } else { + throw e + } + } + } + + private def concatByteBuffers(buffers: Seq[ByteBuffer]): Array[Byte] = { + val outputStream = new ByteArrayOutputStream() + val channel = Channels.newChannel(outputStream) + buffers.map(channel.write) + outputStream.close() + channel.close() + outputStream.toByteArray + } + +} diff --git a/driver-scala/src/integrationTest/scala/org/mongodb/scala/syncadapter/SyncAggregateIterable.scala b/driver-scala/src/integrationTest/scala/org/mongodb/scala/syncadapter/SyncAggregateIterable.scala new file mode 100644 index 00000000000..d9cec1ede39 --- /dev/null +++ b/driver-scala/src/integrationTest/scala/org/mongodb/scala/syncadapter/SyncAggregateIterable.scala @@ -0,0 +1,111 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.mongodb.scala.syncadapter + +import com.mongodb.ExplainVerbosity +import com.mongodb.client.AggregateIterable +import org.mongodb.scala.TimeoutMode +import com.mongodb.client.model.Collation +import org.bson.conversions.Bson +import org.bson.{ BsonValue, Document } +import org.mongodb.scala.AggregateObservable +import org.mongodb.scala.bson.DefaultHelper.DefaultsTo + +import java.util.concurrent.TimeUnit +import scala.concurrent.duration.Duration +import scala.reflect.ClassTag + +case class SyncAggregateIterable[T](wrapped: AggregateObservable[T]) + extends SyncMongoIterable[T] + with AggregateIterable[T] { + override def toCollection(): Unit = wrapped.toCollection().toFuture().get() + + override def allowDiskUse(allowDiskUse: java.lang.Boolean): AggregateIterable[T] = { + wrapped.allowDiskUse(allowDiskUse) + this + } + + override def batchSize(batchSize: Int): AggregateIterable[T] = { + wrapped.batchSize(batchSize) + this + } + + override def timeoutMode(timeoutMode: TimeoutMode): AggregateIterable[T] = { + wrapped.timeoutMode(timeoutMode) + this + } + + override def maxTime(maxTime: Long, timeUnit: TimeUnit): AggregateIterable[T] = { + wrapped.maxTime(maxTime, timeUnit) + this + } + + override def maxAwaitTime(maxAwaitTime: Long, timeUnit: TimeUnit): AggregateIterable[T] = { + wrapped.maxAwaitTime(Duration(maxAwaitTime, timeUnit)) + this + } + + override def bypassDocumentValidation(bypassDocumentValidation: java.lang.Boolean): AggregateIterable[T] = { + wrapped.bypassDocumentValidation(bypassDocumentValidation) + this + } + + override def collation(collation: Collation): AggregateIterable[T] = { + wrapped.collation(collation) + this + } + + override def comment(comment: String): AggregateIterable[T] = { + wrapped.comment(comment) + this + } + + override def comment(comment: BsonValue): AggregateIterable[T] = { + wrapped.comment(comment) + this + } + + override def let(variables: Bson): AggregateIterable[T] = { + wrapped.let(variables) + this + } + + override def hint(hint: Bson): AggregateIterable[T] = { + wrapped.hint(hint) + this + } + + override def hintString(hint: String): AggregateIterable[T] = { + wrapped.hintString(hint) + this + } + + override def explain(): Document = wrapped.explain().toFuture().get() + + override def explain(verbosity: ExplainVerbosity): Document = wrapped.explain(verbosity).toFuture().get() + + override def explain[E](explainResultClass: Class[E]): E = + wrapped + .explain[E]()(DefaultsTo.overrideDefault[E, org.mongodb.scala.Document], ClassTag(explainResultClass)) + .toFuture() + .get() + + override def explain[E](explainResultClass: Class[E], verbosity: ExplainVerbosity): E = + wrapped + .explain[E](verbosity)(DefaultsTo.overrideDefault[E, org.mongodb.scala.Document], ClassTag(explainResultClass)) + .toFuture() + .get() +} diff --git a/driver-scala/src/integrationTest/scala/org/mongodb/scala/syncadapter/SyncChangeStreamIterable.scala b/driver-scala/src/integrationTest/scala/org/mongodb/scala/syncadapter/SyncChangeStreamIterable.scala new file mode 100644 index 00000000000..a517d027cd2 --- /dev/null +++ b/driver-scala/src/integrationTest/scala/org/mongodb/scala/syncadapter/SyncChangeStreamIterable.scala @@ -0,0 +1,103 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala.syncadapter + +import com.mongodb.client.cursor.TimeoutMode +import com.mongodb.client.model.Collation +import com.mongodb.client.model.changestream.{ ChangeStreamDocument, FullDocument, FullDocumentBeforeChange } +import com.mongodb.client.{ ChangeStreamIterable, MongoChangeStreamCursor } +import com.mongodb.{ ServerAddress, ServerCursor } +import org.bson.{ BsonDocument, BsonTimestamp, BsonValue } +import org.mongodb.scala.{ ChangeStreamObservable, TimeoutMode } + +import java.util.concurrent.TimeUnit + +case class SyncChangeStreamIterable[T](wrapped: ChangeStreamObservable[T]) + extends SyncMongoIterable[ChangeStreamDocument[T]] + with ChangeStreamIterable[T] { + + override def cursor: MongoChangeStreamCursor[ChangeStreamDocument[T]] = { + val wrapped = super.cursor + new MongoChangeStreamCursor[ChangeStreamDocument[T]]() { + def getResumeToken = throw new UnsupportedOperationException + def close(): Unit = wrapped.close() + def hasNext: Boolean = wrapped.hasNext + def next: ChangeStreamDocument[T] = wrapped.next + def available(): Int = wrapped.available + def tryNext: ChangeStreamDocument[T] = wrapped.tryNext + def getServerCursor: ServerCursor = wrapped.getServerCursor + def getServerAddress: ServerAddress = wrapped.getServerAddress + } + } + + override def fullDocument(fullDocument: FullDocument): ChangeStreamIterable[T] = { + wrapped.fullDocument(fullDocument) + this + } + + override def fullDocumentBeforeChange(fullDocumentBeforeChange: FullDocumentBeforeChange): ChangeStreamIterable[T] = { + wrapped.fullDocumentBeforeChange(fullDocumentBeforeChange) + this + } + + override def resumeAfter(resumeToken: BsonDocument): ChangeStreamIterable[T] = { + wrapped.resumeAfter(resumeToken) + this + } + + override def batchSize(batchSize: Int): ChangeStreamIterable[T] = { + wrapped.batchSize(batchSize) + this + } + + override def maxAwaitTime(maxAwaitTime: Long, timeUnit: TimeUnit): ChangeStreamIterable[T] = { + wrapped.maxAwaitTime(maxAwaitTime, timeUnit) + this + } + + override def collation(collation: Collation): ChangeStreamIterable[T] = { + wrapped.collation(collation) + this + } + + override def withDocumentClass[TDocument](clazz: Class[TDocument]) = throw new UnsupportedOperationException + + override def startAtOperationTime(startAtOperationTime: BsonTimestamp): ChangeStreamIterable[T] = { + wrapped.startAtOperationTime(startAtOperationTime) + this + } + + override def startAfter(startAfter: BsonDocument): ChangeStreamIterable[T] = { + wrapped.startAfter(startAfter) + this + } + + override def comment(comment: String): ChangeStreamIterable[T] = { + wrapped.comment(comment) + this + } + + override def comment(comment: BsonValue): ChangeStreamIterable[T] = { + wrapped.comment(comment) + this + } + + override def showExpandedEvents(showExpandedEvents: Boolean): ChangeStreamIterable[T] = { + wrapped.showExpandedEvents(showExpandedEvents) + this + } +} diff --git a/driver-scala/src/integrationTest/scala/org/mongodb/scala/syncadapter/SyncClientEncryption.scala b/driver-scala/src/integrationTest/scala/org/mongodb/scala/syncadapter/SyncClientEncryption.scala new file mode 100644 index 00000000000..bb2987964db --- /dev/null +++ b/driver-scala/src/integrationTest/scala/org/mongodb/scala/syncadapter/SyncClientEncryption.scala @@ -0,0 +1,98 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala.syncadapter + +import com.mongodb.ClusterFixture.TIMEOUT_DURATION +import com.mongodb.client.model.{ CreateCollectionOptions, CreateEncryptedCollectionParams } +import com.mongodb.client.model.vault.{ + DataKeyOptions, + EncryptOptions, + RewrapManyDataKeyOptions, + RewrapManyDataKeyResult +} +import com.mongodb.client.result.DeleteResult +import com.mongodb.client.vault.{ ClientEncryption => JClientEncryption } +import com.mongodb.client.{ MongoDatabase => JMongoDatabase } +import org.bson.{ BsonBinary, BsonDocument, BsonValue } +import org.bson.conversions.Bson +import org.mongodb.scala.vault.ClientEncryption +import reactor.core.publisher.Mono + +import java.util.Objects.requireNonNull + +case class SyncClientEncryption(wrapped: ClientEncryption) extends JClientEncryption { + + override def createDataKey(kmsProvider: String): BsonBinary = + requireNonNull(Mono.from(wrapped.createDataKey(kmsProvider, new DataKeyOptions)).block(TIMEOUT_DURATION)) + + override def createDataKey(kmsProvider: String, dataKeyOptions: DataKeyOptions): BsonBinary = + requireNonNull(Mono.from(wrapped.createDataKey(kmsProvider, dataKeyOptions)).block(TIMEOUT_DURATION)) + + override def encrypt(value: BsonValue, options: EncryptOptions): BsonBinary = + requireNonNull(Mono.from(wrapped.encrypt(value, options)).block(TIMEOUT_DURATION)) + + override def encryptExpression(expression: Bson, options: EncryptOptions): BsonDocument = + requireNonNull(Mono.from(wrapped + .encryptExpression(expression.toBsonDocument, options)).block(TIMEOUT_DURATION).toBsonDocument) + + override def decrypt(value: BsonBinary): BsonValue = + requireNonNull(Mono.from(wrapped.decrypt(value)).block(TIMEOUT_DURATION)) + + override def deleteKey(id: BsonBinary): DeleteResult = + requireNonNull(Mono.from(wrapped.deleteKey(id)).block(TIMEOUT_DURATION)) + + override def getKey(id: BsonBinary): BsonDocument = Mono.from(wrapped.getKey(id)).block(TIMEOUT_DURATION) + + override def getKeys = new SyncFindIterable[BsonDocument](wrapped.keys) + + override def addKeyAltName(id: BsonBinary, keyAltName: String): BsonDocument = + Mono.from(wrapped.addKeyAltName(id, keyAltName)).block(TIMEOUT_DURATION) + + override def removeKeyAltName(id: BsonBinary, keyAltName: String): BsonDocument = + Mono.from(wrapped.removeKeyAltName(id, keyAltName)).block(TIMEOUT_DURATION) + + override def getKeyByAltName(keyAltName: String): BsonDocument = + Mono.from(wrapped.getKeyByAltName(keyAltName)).block(TIMEOUT_DURATION) + + override def rewrapManyDataKey(filter: Bson): RewrapManyDataKeyResult = + requireNonNull(Mono.from(wrapped.rewrapManyDataKey(filter)).block(TIMEOUT_DURATION)) + + override def rewrapManyDataKey(filter: Bson, options: RewrapManyDataKeyOptions): RewrapManyDataKeyResult = + requireNonNull(Mono.from(wrapped.rewrapManyDataKey(filter, options)).block(TIMEOUT_DURATION)) + + override def createEncryptedCollection( + database: JMongoDatabase, + collectionName: String, + createCollectionOptions: CreateCollectionOptions, + createEncryptedCollectionParams: CreateEncryptedCollectionParams + ): BsonDocument = { + database match { + case syncMongoDatabase: SyncMongoDatabase => + requireNonNull(Mono.from(wrapped.createEncryptedCollection( + syncMongoDatabase.wrapped, + collectionName, + createCollectionOptions, + createEncryptedCollectionParams + )).block(TIMEOUT_DURATION)) + case _ => throw new AssertionError(s"Unexpected database type: ${database.getClass}") + } + } + + override def close(): Unit = { + wrapped.close() + } +} diff --git a/driver-scala/src/integrationTest/scala/org/mongodb/scala/syncadapter/SyncClientSession.scala b/driver-scala/src/integrationTest/scala/org/mongodb/scala/syncadapter/SyncClientSession.scala new file mode 100644 index 00000000000..2866ce7427d --- /dev/null +++ b/driver-scala/src/integrationTest/scala/org/mongodb/scala/syncadapter/SyncClientSession.scala @@ -0,0 +1,99 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala.syncadapter + +import com.mongodb.{ ClientSessionOptions, MongoInterruptedException, ServerAddress, TransactionOptions } +import com.mongodb.client.{ ClientSession => JClientSession, TransactionBody } +import com.mongodb.internal.TimeoutContext +import com.mongodb.session.ServerSession +import org.bson.{ BsonDocument, BsonTimestamp } +import org.mongodb.scala._ +import com.mongodb.reactivestreams.client.syncadapter.{ SyncMongoClient => JSyncMongoClient } + +case class SyncClientSession(wrapped: ClientSession, originator: Object) extends JClientSession { + + override def getPinnedServerAddress: ServerAddress = wrapped.getPinnedServerAddress + + override def getRecoveryToken: BsonDocument = wrapped.getRecoveryToken + + override def setRecoveryToken(recoveryToken: BsonDocument): Unit = wrapped.setRecoveryToken(recoveryToken) + + override def getOptions: ClientSessionOptions = wrapped.getOptions + + override def isCausallyConsistent: Boolean = wrapped.isCausallyConsistent + + override def getOriginator: Object = originator + + override def getServerSession: ServerSession = wrapped.getServerSession + + override def getOperationTime: BsonTimestamp = wrapped.getOperationTime + + override def advanceOperationTime(operationTime: BsonTimestamp): Unit = wrapped.advanceOperationTime(operationTime) + + override def advanceClusterTime(clusterTime: BsonDocument): Unit = wrapped.advanceClusterTime(clusterTime) + + override def getClusterTime: BsonDocument = wrapped.getClusterTime + + override def setSnapshotTimestamp(snapshotTimestamp: BsonTimestamp): Unit = + wrapped.setSnapshotTimestamp(snapshotTimestamp) + + override def getSnapshotTimestamp: BsonTimestamp = wrapped.getSnapshotTimestamp + + override def close(): Unit = { + wrapped.close() + sleep(JSyncMongoClient.getSleepAfterSessionClose) + } + + override def hasActiveTransaction: Boolean = wrapped.hasActiveTransaction + + override def notifyMessageSent: Boolean = wrapped.notifyMessageSent + + override def notifyOperationInitiated(operation: Object): Unit = wrapped.notifyOperationInitiated(operation) + + override def getTransactionOptions: TransactionOptions = wrapped.getTransactionOptions + + override def startTransaction(): Unit = wrapped.startTransaction() + + override def startTransaction(transactionOptions: TransactionOptions): Unit = + wrapped.startTransaction(transactionOptions) + + override def commitTransaction(): Unit = wrapped.commitTransaction().toSingle().toFuture().get() + + override def abortTransaction(): Unit = wrapped.abortTransaction().toSingle().toFuture().get() + + override def withTransaction[T](transactionBody: TransactionBody[T]) = throw new UnsupportedOperationException + + override def withTransaction[T](transactionBody: TransactionBody[T], options: TransactionOptions) = + throw new UnsupportedOperationException + + override def getTransactionContext: AnyRef = wrapped.getTransactionContext + + override def setTransactionContext(address: ServerAddress, transactionContext: Any): Unit = + wrapped.setTransactionContext(address, transactionContext) + + override def clearTransactionContext(): Unit = wrapped.clearTransactionContext() + + private def sleep(millis: Long): Unit = { + try Thread.sleep(millis) + catch { + case e: InterruptedException => + throw new MongoInterruptedException(null, e) + } + } + + override def getTimeoutContext: TimeoutContext = wrapped.getTimeoutContext +} diff --git a/driver-scala/src/integrationTest/scala/org/mongodb/scala/syncadapter/SyncDistinctIterable.scala b/driver-scala/src/integrationTest/scala/org/mongodb/scala/syncadapter/SyncDistinctIterable.scala new file mode 100644 index 00000000000..acb8de040cc --- /dev/null +++ b/driver-scala/src/integrationTest/scala/org/mongodb/scala/syncadapter/SyncDistinctIterable.scala @@ -0,0 +1,75 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala.syncadapter + +import com.mongodb.client.DistinctIterable +import com.mongodb.client.model.Collation +import org.bson.BsonValue +import org.bson.conversions.Bson +import org.mongodb.scala.{ DistinctObservable, TimeoutMode } + +import java.util.concurrent.TimeUnit + +case class SyncDistinctIterable[T](wrapped: DistinctObservable[T]) + extends SyncMongoIterable[T] + with DistinctIterable[T] { + override def filter(filter: Bson): DistinctIterable[T] = { + wrapped.filter(filter) + this + } + + override def maxTime(maxTime: Long, timeUnit: TimeUnit): DistinctIterable[T] = { + wrapped.maxTime(maxTime, timeUnit) + this + } + + override def batchSize(batchSize: Int): DistinctIterable[T] = { + wrapped.batchSize(batchSize) + this + } + + override def timeoutMode(timeoutMode: TimeoutMode): DistinctIterable[T] = { + wrapped.timeoutMode(timeoutMode) + this + } + + override def collation(collation: Collation): DistinctIterable[T] = { + wrapped.collation(collation) + this + } + + override def comment(comment: String): DistinctIterable[T] = { + wrapped.comment(comment) + this + } + + override def comment(comment: BsonValue): DistinctIterable[T] = { + wrapped.comment(comment) + this + } + + override def hint(hint: Bson): DistinctIterable[T] = { + wrapped.hint(hint) + this + } + + override def hintString(hint: String): DistinctIterable[T] = { + wrapped.hintString(hint) + this + } + +} diff --git a/driver-scala/src/integrationTest/scala/org/mongodb/scala/syncadapter/SyncFindIterable.scala b/driver-scala/src/integrationTest/scala/org/mongodb/scala/syncadapter/SyncFindIterable.scala new file mode 100644 index 00000000000..505241ab39a --- /dev/null +++ b/driver-scala/src/integrationTest/scala/org/mongodb/scala/syncadapter/SyncFindIterable.scala @@ -0,0 +1,162 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala.syncadapter + +import com.mongodb.client.FindIterable +import com.mongodb.client.model.Collation +import com.mongodb.{ CursorType, ExplainVerbosity } +import org.bson.Document +import org.bson.conversions.Bson +import org.mongodb.scala.bson.BsonValue +import org.mongodb.scala.bson.DefaultHelper.DefaultsTo +import org.mongodb.scala.{ FindObservable, TimeoutMode } + +import java.util.concurrent.TimeUnit +import scala.reflect.ClassTag + +case class SyncFindIterable[T](wrapped: FindObservable[T]) extends SyncMongoIterable[T] with FindIterable[T] { + override def filter(filter: Bson): FindIterable[T] = { + wrapped.filter(filter) + this + } + + override def limit(limit: Int): FindIterable[T] = { + wrapped.limit(limit) + this + } + + override def skip(skip: Int): FindIterable[T] = { + wrapped.skip(skip) + this + } + + override def maxTime(maxTime: Long, timeUnit: TimeUnit): FindIterable[T] = { + wrapped.maxTime(maxTime, timeUnit) + this + } + + override def maxAwaitTime(maxAwaitTime: Long, timeUnit: TimeUnit): FindIterable[T] = { + wrapped.maxAwaitTime(maxAwaitTime, timeUnit) + this + } + + override def projection(projection: Bson): FindIterable[T] = { + wrapped.projection(projection) + this + } + + override def sort(sort: Bson): FindIterable[T] = { + wrapped.sort(sort) + this + } + + override def noCursorTimeout(noCursorTimeout: Boolean): FindIterable[T] = { + wrapped.noCursorTimeout(noCursorTimeout) + this + } + + override def partial(partial: Boolean): FindIterable[T] = { + wrapped.partial(partial) + this + } + + override def cursorType(cursorType: CursorType): FindIterable[T] = { + wrapped.cursorType(cursorType) + this + } + + override def batchSize(batchSize: Int): FindIterable[T] = { + wrapped.batchSize(batchSize) + this + } + + override def timeoutMode(timeoutMode: TimeoutMode): FindIterable[T] = { + wrapped.timeoutMode(timeoutMode) + this + } + + override def collation(collation: Collation): FindIterable[T] = { + wrapped.collation(collation) + this + } + + override def comment(comment: String): FindIterable[T] = { + wrapped.comment(comment) + this + } + + override def comment(comment: BsonValue): FindIterable[T] = { + wrapped.comment(comment) + this + } + + override def hint(hint: Bson): FindIterable[T] = { + wrapped.hint(hint) + this + } + + override def hintString(hint: String): FindIterable[T] = { + wrapped.hintString(hint) + this + } + + override def let(let: Bson): FindIterable[T] = { + wrapped.let(let) + this + } + + override def max(max: Bson): FindIterable[T] = { + wrapped.max(max) + this + } + + override def min(min: Bson): FindIterable[T] = { + wrapped.min(min) + this + } + + override def returnKey(returnKey: Boolean): FindIterable[T] = { + wrapped.returnKey(returnKey) + this + } + + override def showRecordId(showRecordId: Boolean): FindIterable[T] = { + wrapped.showRecordId(showRecordId) + this + } + + override def allowDiskUse(allowDiskUse: java.lang.Boolean): FindIterable[T] = { + wrapped.allowDiskUse(allowDiskUse) + this + } + + override def explain(): Document = wrapped.explain().toFuture().get() + + override def explain(verbosity: ExplainVerbosity): Document = wrapped.explain(verbosity).toFuture().get() + + override def explain[E](explainResultClass: Class[E]): E = + wrapped + .explain[E]()(DefaultsTo.overrideDefault[E, org.mongodb.scala.Document], ClassTag(explainResultClass)) + .toFuture() + .get() + + override def explain[E](explainResultClass: Class[E], verbosity: ExplainVerbosity): E = + wrapped + .explain[E](verbosity)(DefaultsTo.overrideDefault[E, org.mongodb.scala.Document], ClassTag(explainResultClass)) + .toFuture() + .get() +} diff --git a/driver-scala/src/integrationTest/scala/org/mongodb/scala/syncadapter/SyncListCollectionsIterable.scala b/driver-scala/src/integrationTest/scala/org/mongodb/scala/syncadapter/SyncListCollectionsIterable.scala new file mode 100644 index 00000000000..aa121ae99cf --- /dev/null +++ b/driver-scala/src/integrationTest/scala/org/mongodb/scala/syncadapter/SyncListCollectionsIterable.scala @@ -0,0 +1,57 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.mongodb.scala.syncadapter + +import com.mongodb.client.ListCollectionsIterable +import org.bson.conversions.Bson +import org.mongodb.scala.bson.BsonValue +import org.mongodb.scala.{ ListCollectionsObservable, TimeoutMode } + +import java.util.concurrent.TimeUnit + +case class SyncListCollectionsIterable[T](wrapped: ListCollectionsObservable[T]) + extends SyncMongoIterable[T] + with ListCollectionsIterable[T] { + override def filter(filter: Bson): ListCollectionsIterable[T] = { + wrapped.filter(filter) + this + } + + override def maxTime(maxTime: Long, timeUnit: TimeUnit): ListCollectionsIterable[T] = { + wrapped.maxTime(maxTime, timeUnit) + this + } + + override def batchSize(batchSize: Int): ListCollectionsIterable[T] = { + wrapped.batchSize(batchSize) + this + } + + override def timeoutMode(timeoutMode: TimeoutMode): ListCollectionsIterable[T] = { + wrapped.timeoutMode(timeoutMode) + this + } + + override def comment(comment: String): ListCollectionsIterable[T] = { + wrapped.comment(comment) + this + } + + override def comment(comment: BsonValue): ListCollectionsIterable[T] = { + wrapped.comment(comment) + this + } +} diff --git a/driver-scala/src/integrationTest/scala/org/mongodb/scala/syncadapter/SyncListDatabasesIterable.scala b/driver-scala/src/integrationTest/scala/org/mongodb/scala/syncadapter/SyncListDatabasesIterable.scala new file mode 100644 index 00000000000..aa841c1be0a --- /dev/null +++ b/driver-scala/src/integrationTest/scala/org/mongodb/scala/syncadapter/SyncListDatabasesIterable.scala @@ -0,0 +1,52 @@ +package org.mongodb.scala.syncadapter + +import com.mongodb.client.ListDatabasesIterable +import org.bson.conversions.Bson +import org.mongodb.scala.bson.BsonValue +import org.mongodb.scala.{ ListDatabasesObservable, TimeoutMode } + +import java.util.concurrent.TimeUnit + +case class SyncListDatabasesIterable[T](wrapped: ListDatabasesObservable[T]) + extends SyncMongoIterable[T] + with ListDatabasesIterable[T] { + override def maxTime(maxTime: Long, timeUnit: TimeUnit): ListDatabasesIterable[T] = { + wrapped.maxTime(maxTime, timeUnit) + this + } + + override def batchSize(batchSize: Int): ListDatabasesIterable[T] = { + wrapped.batchSize(batchSize) + this + } + + override def timeoutMode(timeoutMode: TimeoutMode): ListDatabasesIterable[T] = { + wrapped.timeoutMode(timeoutMode) + this + } + + override def filter(filter: Bson): ListDatabasesIterable[T] = { + wrapped.filter(filter) + this + } + + override def nameOnly(nameOnly: java.lang.Boolean): ListDatabasesIterable[T] = { + wrapped.nameOnly(nameOnly) + this + } + + override def authorizedDatabasesOnly(authorizedDatabasesOnly: java.lang.Boolean): ListDatabasesIterable[T] = { + wrapped.authorizedDatabasesOnly(authorizedDatabasesOnly) + this + } + + override def comment(comment: String): ListDatabasesIterable[T] = { + wrapped.comment(comment) + this + } + + override def comment(comment: BsonValue): ListDatabasesIterable[T] = { + wrapped.comment(comment) + this + } +} diff --git a/driver-scala/src/integrationTest/scala/org/mongodb/scala/syncadapter/SyncListIndexesIterable.scala b/driver-scala/src/integrationTest/scala/org/mongodb/scala/syncadapter/SyncListIndexesIterable.scala new file mode 100644 index 00000000000..86db80bc6e4 --- /dev/null +++ b/driver-scala/src/integrationTest/scala/org/mongodb/scala/syncadapter/SyncListIndexesIterable.scala @@ -0,0 +1,53 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala.syncadapter + +import com.mongodb.client.ListIndexesIterable +import org.mongodb.scala.bson.BsonValue +import org.mongodb.scala.{ ListIndexesObservable, TimeoutMode } + +import java.util.concurrent.TimeUnit + +case class SyncListIndexesIterable[T](wrapped: ListIndexesObservable[T]) + extends SyncMongoIterable[T] + with ListIndexesIterable[T] { + override def maxTime(maxTime: Long, timeUnit: TimeUnit): ListIndexesIterable[T] = { + wrapped.maxTime(maxTime, timeUnit) + this + } + + override def batchSize(batchSize: Int): ListIndexesIterable[T] = { + wrapped.batchSize(batchSize) + this + } + + override def timeoutMode(timeoutMode: TimeoutMode): ListIndexesIterable[T] = { + wrapped.timeoutMode(timeoutMode) + this + } + + override def comment(comment: String): ListIndexesIterable[T] = { + wrapped.comment(comment) + this + } + + override def comment(comment: BsonValue): ListIndexesIterable[T] = { + wrapped.comment(comment) + this + } + +} diff --git a/driver-scala/src/integrationTest/scala/org/mongodb/scala/syncadapter/SyncListSearchIndexesIterable.scala b/driver-scala/src/integrationTest/scala/org/mongodb/scala/syncadapter/SyncListSearchIndexesIterable.scala new file mode 100644 index 00000000000..672b97aff9e --- /dev/null +++ b/driver-scala/src/integrationTest/scala/org/mongodb/scala/syncadapter/SyncListSearchIndexesIterable.scala @@ -0,0 +1,89 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala.syncadapter + +import com.mongodb.ExplainVerbosity +import com.mongodb.client.ListSearchIndexesIterable +import com.mongodb.client.model.Collation +import org.bson.{ BsonValue, Document } +import org.mongodb.scala.bson.DefaultHelper.DefaultsTo +import org.mongodb.scala.{ ListSearchIndexesObservable, TimeoutMode } + +import java.util.concurrent.TimeUnit +import scala.reflect.ClassTag + +case class SyncListSearchIndexesIterable[T](wrapped: ListSearchIndexesObservable[T]) + extends SyncMongoIterable[T] + with ListSearchIndexesIterable[T] { + + override def name(indexName: String): ListSearchIndexesIterable[T] = { + wrapped.name(indexName) + this + } + + override def allowDiskUse(allowDiskUse: java.lang.Boolean): ListSearchIndexesIterable[T] = { + wrapped.allowDiskUse(allowDiskUse) + this + } + + override def batchSize(batchSize: Int): ListSearchIndexesIterable[T] = { + wrapped.batchSize(batchSize) + this + } + + override def timeoutMode(timeoutMode: TimeoutMode): ListSearchIndexesIterable[T] = { + wrapped.timeoutMode(timeoutMode) + this + } + + override def maxTime(maxTime: Long, timeUnit: TimeUnit): ListSearchIndexesIterable[T] = { + wrapped.maxTime(maxTime, timeUnit) + this + } + + override def collation(collation: Collation): ListSearchIndexesIterable[T] = { + wrapped.collation(collation) + this + } + + override def comment(comment: String): ListSearchIndexesIterable[T] = { + wrapped.comment(comment) + this + } + + override def comment(comment: BsonValue): ListSearchIndexesIterable[T] = { + wrapped.comment(comment) + this + } + + override def explain(): Document = wrapped.explain().toFuture().get() + + override def explain(verbosity: ExplainVerbosity): Document = wrapped.explain(verbosity).toFuture().get() + + override def explain[E](explainResultClass: Class[E]): E = + wrapped + .explain[E]()(DefaultsTo.overrideDefault[E, org.mongodb.scala.Document], ClassTag(explainResultClass)) + .toFuture() + .get() + + override def explain[E](explainResultClass: Class[E], verbosity: ExplainVerbosity): E = + wrapped + .explain[E](verbosity)(DefaultsTo.overrideDefault[E, org.mongodb.scala.Document], ClassTag(explainResultClass)) + .toFuture() + .get() + +} diff --git a/driver-scala/src/integrationTest/scala/org/mongodb/scala/syncadapter/SyncMapReduceIterable.scala b/driver-scala/src/integrationTest/scala/org/mongodb/scala/syncadapter/SyncMapReduceIterable.scala new file mode 100644 index 00000000000..73af2f6f62a --- /dev/null +++ b/driver-scala/src/integrationTest/scala/org/mongodb/scala/syncadapter/SyncMapReduceIterable.scala @@ -0,0 +1,106 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala.syncadapter + +import com.mongodb.client.MapReduceIterable +import com.mongodb.client.model.{ Collation, MapReduceAction } +import org.bson.conversions.Bson +import org.mongodb.scala.{ MapReduceObservable, TimeoutMode } + +import java.util.concurrent.TimeUnit + +case class SyncMapReduceIterable[T](wrapped: MapReduceObservable[T]) + extends SyncMongoIterable[T] + with MapReduceIterable[T] { + override def toCollection(): Unit = wrapped.toCollection().toFuture().get() + + override def collectionName(collectionName: String): MapReduceIterable[T] = { + wrapped.collectionName(collectionName) + this + } + + override def finalizeFunction(finalizeFunction: String): MapReduceIterable[T] = { + wrapped.finalizeFunction(finalizeFunction) + this + } + + override def scope(scope: Bson): MapReduceIterable[T] = { + wrapped.scope(scope) + this + } + + override def sort(sort: Bson): MapReduceIterable[T] = { + wrapped.sort(sort) + this + } + + override def filter(filter: Bson): MapReduceIterable[T] = { + wrapped.filter(filter) + this + } + + override def limit(limit: Int): MapReduceIterable[T] = { + wrapped.limit(limit) + this + } + + override def jsMode(jsMode: Boolean): MapReduceIterable[T] = { + wrapped.jsMode(jsMode) + this + } + + override def verbose(verbose: Boolean): MapReduceIterable[T] = { + wrapped.verbose(verbose) + this + } + + override def maxTime(maxTime: Long, timeUnit: TimeUnit): MapReduceIterable[T] = { + wrapped.maxTime(maxTime, timeUnit) + this + } + + override def action(action: MapReduceAction): MapReduceIterable[T] = { + wrapped.action(action) + this + } + + override def databaseName(databaseName: String): MapReduceIterable[T] = { + wrapped.databaseName(databaseName) + this + } + + override def batchSize(batchSize: Int): MapReduceIterable[T] = { + wrapped.batchSize(batchSize) + this + } + + override def timeoutMode(timeoutMode: TimeoutMode): MapReduceIterable[T] = { + wrapped.timeoutMode(timeoutMode) + this + } + + override def bypassDocumentValidation(bypassDocumentValidation: java.lang.Boolean): MapReduceIterable[T] = { + wrapped.bypassDocumentValidation(bypassDocumentValidation) + this + } + + override def collation(collation: Collation): MapReduceIterable[T] = { + wrapped.collation(collation) + this + } + +} diff --git a/driver-scala/src/integrationTest/scala/org/mongodb/scala/syncadapter/SyncMongoClient.scala b/driver-scala/src/integrationTest/scala/org/mongodb/scala/syncadapter/SyncMongoClient.scala new file mode 100644 index 00000000000..b0617e95fd7 --- /dev/null +++ b/driver-scala/src/integrationTest/scala/org/mongodb/scala/syncadapter/SyncMongoClient.scala @@ -0,0 +1,15 @@ +package org.mongodb.scala.syncadapter + +import com.mongodb.MongoDriverInformation +import com.mongodb.client.{ MongoClient => JMongoClient } +import org.mongodb.scala.MongoClient + +case class SyncMongoClient(wrapped: MongoClient) extends SyncMongoCluster(wrapped) with JMongoClient { + + override def close(): Unit = wrapped.close() + + override def getClusterDescription = throw new UnsupportedOperationException + + override def appendMetadata(mongoDriverInformation: MongoDriverInformation): Unit = + wrapped.appendMetadata(mongoDriverInformation) +} diff --git a/driver-scala/src/integrationTest/scala/org/mongodb/scala/syncadapter/SyncMongoCluster.scala b/driver-scala/src/integrationTest/scala/org/mongodb/scala/syncadapter/SyncMongoCluster.scala new file mode 100644 index 00000000000..439188e3792 --- /dev/null +++ b/driver-scala/src/integrationTest/scala/org/mongodb/scala/syncadapter/SyncMongoCluster.scala @@ -0,0 +1,148 @@ +package org.mongodb.scala.syncadapter + +import com.mongodb.client.model.bulk.{ ClientBulkWriteOptions, ClientBulkWriteResult, ClientNamespacedWriteModel } +import com.mongodb.{ ClientSessionOptions, ReadConcern, ReadPreference, WriteConcern } +import com.mongodb.client.{ ClientSession, MongoCluster => JMongoCluster, MongoDatabase => JMongoDatabase } +import org.bson.Document +import org.bson.codecs.configuration.CodecRegistry +import org.bson.conversions.Bson +import org.mongodb.scala.MongoCluster +import org.mongodb.scala.bson.DefaultHelper.DefaultsTo + +import java.util +import java.util.concurrent.TimeUnit +import scala.collection.JavaConverters._ +import scala.concurrent.Await +import scala.concurrent.duration.Duration +import scala.reflect.ClassTag + +object SyncMongoCluster { + + def apply(wrapped: MongoCluster): SyncMongoCluster = new SyncMongoCluster(wrapped) +} + +class SyncMongoCluster(wrapped: MongoCluster) extends JMongoCluster { + + override def getCodecRegistry: CodecRegistry = wrapped.codecRegistry + + override def getReadPreference: ReadPreference = wrapped.readPreference + + override def getWriteConcern: WriteConcern = wrapped.writeConcern + + override def getReadConcern: ReadConcern = wrapped.readConcern + + override def getTimeout(timeUnit: TimeUnit): java.lang.Long = { + val timeout = wrapped.timeout.map(d => timeUnit.convert(d.toMillis, TimeUnit.MILLISECONDS)) + if (timeout.isDefined) timeout.get else null + } + + override def withCodecRegistry(codecRegistry: CodecRegistry): JMongoCluster = + SyncMongoCluster(wrapped.withCodecRegistry(codecRegistry)) + + override def withReadPreference(readPreference: ReadPreference): JMongoCluster = + SyncMongoCluster(wrapped.withReadPreference(readPreference)) + + override def withWriteConcern(writeConcern: WriteConcern): JMongoCluster = + SyncMongoCluster(wrapped.withWriteConcern(writeConcern)) + + override def withReadConcern(readConcern: ReadConcern): JMongoCluster = + SyncMongoCluster(wrapped.withReadConcern(readConcern)) + + override def withTimeout(timeout: Long, timeUnit: TimeUnit): JMongoCluster = + SyncMongoCluster(wrapped.withTimeout(Duration(timeout, timeUnit))) + + override def getDatabase(databaseName: String): JMongoDatabase = + SyncMongoDatabase(wrapped.getDatabase(databaseName)) + + override def startSession: ClientSession = + SyncClientSession(Await.result(wrapped.startSession().head(), WAIT_DURATION), this) + + override def startSession(options: ClientSessionOptions): ClientSession = + SyncClientSession(Await.result(wrapped.startSession(options).head(), WAIT_DURATION), this) + + override def listDatabaseNames = throw new UnsupportedOperationException + + override def listDatabaseNames(clientSession: ClientSession) = throw new UnsupportedOperationException + + override def listDatabases = new SyncListDatabasesIterable[Document](wrapped.listDatabases[Document]()) + + override def listDatabases(clientSession: ClientSession) = throw new UnsupportedOperationException + + override def listDatabases[TResult](resultClass: Class[TResult]) = + new SyncListDatabasesIterable[TResult]( + wrapped.listDatabases[TResult]()( + DefaultsTo.overrideDefault[TResult, org.mongodb.scala.Document], + ClassTag(resultClass) + ) + ) + + override def listDatabases[TResult](clientSession: ClientSession, resultClass: Class[TResult]) = + throw new UnsupportedOperationException + + override def watch = new SyncChangeStreamIterable[Document](wrapped.watch[Document]()) + + override def watch[TResult](resultClass: Class[TResult]) = + new SyncChangeStreamIterable[TResult]( + wrapped.watch[TResult]()(DefaultsTo.overrideDefault[TResult, org.mongodb.scala.Document], ClassTag(resultClass)) + ) + + override def watch(pipeline: java.util.List[_ <: Bson]) = + new SyncChangeStreamIterable[Document](wrapped.watch[Document](pipeline.asScala.toSeq)) + + override def watch[TResult](pipeline: java.util.List[_ <: Bson], resultClass: Class[TResult]) = + new SyncChangeStreamIterable[TResult]( + wrapped.watch[TResult](pipeline.asScala.toSeq)( + DefaultsTo.overrideDefault[TResult, org.mongodb.scala.Document], + ClassTag(resultClass) + ) + ) + + override def watch(clientSession: ClientSession) = + new SyncChangeStreamIterable[Document](wrapped.watch[Document](unwrap(clientSession))) + + override def watch[TResult](clientSession: ClientSession, resultClass: Class[TResult]) = + new SyncChangeStreamIterable[TResult]( + wrapped.watch(unwrap(clientSession))( + DefaultsTo.overrideDefault[TResult, org.mongodb.scala.Document], + ClassTag(resultClass) + ) + ) + + override def watch(clientSession: ClientSession, pipeline: java.util.List[_ <: Bson]) = + new SyncChangeStreamIterable[Document](wrapped.watch[Document](unwrap(clientSession), pipeline.asScala.toSeq)) + + override def watch[TResult]( + clientSession: ClientSession, + pipeline: java.util.List[_ <: Bson], + resultClass: Class[TResult] + ) = + new SyncChangeStreamIterable[TResult]( + wrapped.watch[TResult](unwrap(clientSession), pipeline.asScala.toSeq)( + DefaultsTo.overrideDefault[TResult, org.mongodb.scala.Document], + ClassTag(resultClass) + ) + ) + + private def unwrap(clientSession: ClientSession): org.mongodb.scala.ClientSession = + clientSession.asInstanceOf[SyncClientSession].wrapped + + override def bulkWrite( + models: util.List[_ <: ClientNamespacedWriteModel] + ): ClientBulkWriteResult = wrapped.bulkWrite(models.asScala.toList).toFuture().get() + + override def bulkWrite( + models: util.List[_ <: ClientNamespacedWriteModel], + options: ClientBulkWriteOptions + ): ClientBulkWriteResult = wrapped.bulkWrite(models.asScala.toList, options).toFuture().get() + + override def bulkWrite( + clientSession: ClientSession, + models: util.List[_ <: ClientNamespacedWriteModel] + ): ClientBulkWriteResult = wrapped.bulkWrite(unwrap(clientSession), models.asScala.toList).toFuture().get() + + override def bulkWrite( + clientSession: ClientSession, + models: util.List[_ <: ClientNamespacedWriteModel], + options: ClientBulkWriteOptions + ): ClientBulkWriteResult = wrapped.bulkWrite(unwrap(clientSession), models.asScala.toList, options).toFuture().get() +} diff --git a/driver-scala/src/integrationTest/scala/org/mongodb/scala/syncadapter/SyncMongoCollection.scala b/driver-scala/src/integrationTest/scala/org/mongodb/scala/syncadapter/SyncMongoCollection.scala new file mode 100644 index 00000000000..cc06b5f1a09 --- /dev/null +++ b/driver-scala/src/integrationTest/scala/org/mongodb/scala/syncadapter/SyncMongoCollection.scala @@ -0,0 +1,611 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala.syncadapter + +import com.mongodb.bulk.BulkWriteResult +import com.mongodb.client.model._ +import com.mongodb.client.result.{ DeleteResult, UpdateResult } +import com.mongodb.client.{ + ChangeStreamIterable, + ClientSession, + ListSearchIndexesIterable, + MongoCollection => JMongoCollection +} +import com.mongodb.{ MongoNamespace, ReadConcern, ReadPreference, WriteConcern } +import org.bson.Document +import org.bson.codecs.configuration.CodecRegistry +import org.bson.conversions.Bson +import org.mongodb.scala.MongoCollection +import org.mongodb.scala.bson.DefaultHelper.DefaultsTo +import org.mongodb.scala.result.{ InsertManyResult, InsertOneResult } + +import java.util.concurrent.TimeUnit +import scala.collection.JavaConverters._ +import scala.concurrent.duration.{ Duration, MILLISECONDS } +import scala.reflect.ClassTag + +case class SyncMongoCollection[T](wrapped: MongoCollection[T]) extends JMongoCollection[T] { + + private def unwrap(clientSession: ClientSession) = clientSession.asInstanceOf[SyncClientSession].wrapped + + override def getNamespace: MongoNamespace = wrapped.namespace + + override def getDocumentClass: Class[T] = wrapped.documentClass + + override def getCodecRegistry: CodecRegistry = wrapped.codecRegistry + + override def getReadPreference: ReadPreference = wrapped.readPreference + + override def getWriteConcern: WriteConcern = wrapped.writeConcern + + override def getReadConcern: ReadConcern = wrapped.readConcern + + override def getTimeout(timeUnit: TimeUnit): java.lang.Long = { + wrapped.timeout match { + case Some(value) => timeUnit.convert(value.toMillis, MILLISECONDS) + case None => null + } + } + + override def withDocumentClass[NewTDocument](clazz: Class[NewTDocument]): JMongoCollection[NewTDocument] = + SyncMongoCollection[NewTDocument]( + wrapped.withDocumentClass[NewTDocument]()( + DefaultsTo.overrideDefault[NewTDocument, org.mongodb.scala.Document], + ClassTag(clazz) + ) + ) + + override def withCodecRegistry(codecRegistry: CodecRegistry): JMongoCollection[T] = + SyncMongoCollection[T](wrapped.withCodecRegistry(codecRegistry)) + + override def withReadPreference(readPreference: ReadPreference): JMongoCollection[T] = + SyncMongoCollection[T](wrapped.withReadPreference(readPreference)) + + override def withWriteConcern(writeConcern: WriteConcern): JMongoCollection[T] = + SyncMongoCollection[T](wrapped.withWriteConcern(writeConcern)) + + override def withReadConcern(readConcern: ReadConcern): JMongoCollection[T] = + SyncMongoCollection[T](wrapped.withReadConcern(readConcern)) + + override def withTimeout(timeout: Long, timeUnit: TimeUnit): JMongoCollection[T] = + SyncMongoCollection[T](wrapped.withTimeout(Duration(timeout, timeUnit))) + + override def countDocuments: Long = wrapped.countDocuments().toFuture().get() + + override def countDocuments(filter: Bson): Long = wrapped.countDocuments(filter).toFuture().get() + + override def countDocuments(filter: Bson, options: CountOptions): Long = + wrapped.countDocuments(filter, options).toFuture().get() + + override def countDocuments(clientSession: ClientSession): Long = + wrapped.countDocuments(unwrap(clientSession)).toFuture().get() + + override def countDocuments(clientSession: ClientSession, filter: Bson): Long = + wrapped.countDocuments(unwrap(clientSession), filter).toFuture().get() + + override def countDocuments(clientSession: ClientSession, filter: Bson, options: CountOptions): Long = + wrapped.countDocuments(unwrap(clientSession), filter, options).toFuture().get() + + override def estimatedDocumentCount: Long = wrapped.estimatedDocumentCount().toFuture().get() + + override def estimatedDocumentCount(options: EstimatedDocumentCountOptions): Long = + wrapped.estimatedDocumentCount(options).toFuture().get() + + override def distinct[TResult](fieldName: String, resultClass: Class[TResult]) = + SyncDistinctIterable[TResult](wrapped.distinct[TResult](fieldName)(ClassTag(resultClass))) + + override def distinct[TResult](fieldName: String, filter: Bson, resultClass: Class[TResult]) = + SyncDistinctIterable[TResult](wrapped.distinct[TResult](fieldName, filter)(ClassTag(resultClass))) + + override def distinct[TResult](clientSession: ClientSession, fieldName: String, resultClass: Class[TResult]) = + SyncDistinctIterable[TResult](wrapped.distinct[TResult](unwrap(clientSession), fieldName)(ClassTag(resultClass))) + + override def distinct[TResult]( + clientSession: ClientSession, + fieldName: String, + filter: Bson, + resultClass: Class[TResult] + ) = + SyncDistinctIterable[TResult]( + wrapped.distinct[TResult](unwrap(clientSession), fieldName, filter)(ClassTag(resultClass)) + ) + + override def find = SyncFindIterable[T](wrapped.find[T]()(DefaultsTo.default[T], ClassTag(getDocumentClass))) + + override def find[TResult](resultClass: Class[TResult]) = + SyncFindIterable[TResult](wrapped.find[TResult]()(DefaultsTo.overrideDefault[TResult, T], ClassTag(resultClass))) + + override def find(filter: Bson) = + SyncFindIterable[T](wrapped.find(filter)(DefaultsTo.default[T], ClassTag(getDocumentClass))) + + override def find[TResult](filter: Bson, resultClass: Class[TResult]) = + SyncFindIterable[TResult]( + wrapped.find[TResult](filter)(DefaultsTo.overrideDefault[TResult, T], ClassTag(resultClass)) + ) + + override def find(clientSession: ClientSession) = + SyncFindIterable[T](wrapped.find[T](unwrap(clientSession))(DefaultsTo.default[T], ClassTag(getDocumentClass))) + + override def find[TResult](clientSession: ClientSession, resultClass: Class[TResult]) = + SyncFindIterable[TResult]( + wrapped.find[TResult](unwrap(clientSession))(DefaultsTo.overrideDefault[TResult, T], ClassTag(resultClass)) + ) + + override def find(clientSession: ClientSession, filter: Bson) = + SyncFindIterable[T]( + wrapped.find[T](unwrap(clientSession), filter)(DefaultsTo.default[T], ClassTag(getDocumentClass)) + ) + + override def find[TResult](clientSession: ClientSession, filter: Bson, resultClass: Class[TResult]) = + SyncFindIterable[TResult]( + wrapped + .find[TResult](unwrap(clientSession), filter)(DefaultsTo.overrideDefault[TResult, T], ClassTag(resultClass)) + ) + + override def aggregate(pipeline: java.util.List[_ <: Bson]) = + SyncAggregateIterable[T]( + wrapped.aggregate(pipeline.asScala.toSeq)(DefaultsTo.default[T], ClassTag(getDocumentClass)) + ) + + override def aggregate[TResult](pipeline: java.util.List[_ <: Bson], resultClass: Class[TResult]) = + SyncAggregateIterable[TResult]( + wrapped.aggregate[TResult](pipeline.asScala.toSeq)(DefaultsTo.overrideDefault[TResult, T], ClassTag(resultClass)) + ) + + override def aggregate(clientSession: ClientSession, pipeline: java.util.List[_ <: Bson]) = + SyncAggregateIterable[T]( + wrapped + .aggregate[T](unwrap(clientSession), pipeline.asScala.toSeq)(DefaultsTo.default[T], ClassTag(getDocumentClass)) + ) + + override def aggregate[TResult]( + clientSession: ClientSession, + pipeline: java.util.List[_ <: Bson], + resultClass: Class[TResult] + ) = + SyncAggregateIterable[TResult]( + wrapped.aggregate[TResult](unwrap(clientSession), pipeline.asScala.toSeq)( + DefaultsTo.overrideDefault[TResult, T], + ClassTag(resultClass) + ) + ) + + override def watch = + SyncChangeStreamIterable[T](wrapped.watch[T]()(DefaultsTo.default[T], ClassTag(getDocumentClass))) + + override def watch[TResult](resultClass: Class[TResult]) = + SyncChangeStreamIterable[TResult]( + wrapped.watch[TResult]()(DefaultsTo.overrideDefault[TResult, T], ClassTag(resultClass)) + ) + + override def watch(pipeline: java.util.List[_ <: Bson]) = + SyncChangeStreamIterable[T]( + wrapped.watch[T](pipeline.asScala.toSeq)(DefaultsTo.default[T], ClassTag(getDocumentClass)) + ) + + override def watch[TResult](pipeline: java.util.List[_ <: Bson], resultClass: Class[TResult]) = + SyncChangeStreamIterable[TResult]( + wrapped.watch[TResult](pipeline.asScala.toSeq)(DefaultsTo.overrideDefault[TResult, T], ClassTag(resultClass)) + ) + + override def watch(clientSession: ClientSession) = + SyncChangeStreamIterable[T]( + wrapped.watch[T](unwrap(clientSession))(DefaultsTo.default[T], ClassTag(getDocumentClass)) + ) + + override def watch[TResult](clientSession: ClientSession, resultClass: Class[TResult]) = + SyncChangeStreamIterable[TResult]( + wrapped.watch[TResult](unwrap(clientSession))(DefaultsTo.overrideDefault[TResult, T], ClassTag(resultClass)) + ) + + override def watch(clientSession: ClientSession, pipeline: java.util.List[_ <: Bson]): ChangeStreamIterable[T] = + SyncChangeStreamIterable[T]( + wrapped.watch[T](unwrap(clientSession), pipeline.asScala.toSeq)(DefaultsTo.default[T], ClassTag(getDocumentClass)) + ) + + override def watch[TResult]( + clientSession: ClientSession, + pipeline: java.util.List[_ <: Bson], + resultClass: Class[TResult] + ) = + SyncChangeStreamIterable[TResult]( + wrapped.watch(unwrap(clientSession), pipeline.asScala.toSeq)( + DefaultsTo.overrideDefault[TResult, T], + ClassTag(resultClass) + ) + ) + + override def mapReduce(mapFunction: String, reduceFunction: String) = + SyncMapReduceIterable[T]( + wrapped.mapReduce[T](mapFunction, reduceFunction)(DefaultsTo.default[T], ClassTag(getDocumentClass)) + ) + + override def mapReduce[TResult](mapFunction: String, reduceFunction: String, resultClass: Class[TResult]) = + SyncMapReduceIterable[TResult]( + wrapped + .mapReduce[TResult](mapFunction, reduceFunction)(DefaultsTo.overrideDefault[TResult, T], ClassTag(resultClass)) + ) + + override def mapReduce(clientSession: ClientSession, mapFunction: String, reduceFunction: String) = + SyncMapReduceIterable[T]( + wrapped.mapReduce[T](unwrap(clientSession), mapFunction, reduceFunction)( + DefaultsTo.default[T], + ClassTag(getDocumentClass) + ) + ) + + override def mapReduce[TResult]( + clientSession: ClientSession, + mapFunction: String, + reduceFunction: String, + resultClass: Class[TResult] + ) = + SyncMapReduceIterable[TResult]( + wrapped.mapReduce[TResult](unwrap(clientSession), mapFunction, reduceFunction)( + DefaultsTo.overrideDefault[TResult, T], + ClassTag(resultClass) + ) + ) + + override def bulkWrite(requests: java.util.List[_ <: WriteModel[_ <: T]]): BulkWriteResult = + wrapped.bulkWrite(requests.asScala.toSeq).toFuture().get() + + override def bulkWrite( + requests: java.util.List[_ <: WriteModel[_ <: T]], + options: BulkWriteOptions + ): BulkWriteResult = wrapped.bulkWrite(requests.asScala.toSeq, options).toFuture().get() + + override def bulkWrite( + clientSession: ClientSession, + requests: java.util.List[_ <: WriteModel[_ <: T]] + ): BulkWriteResult = + wrapped.bulkWrite(unwrap(clientSession), requests.asScala.toSeq).toFuture().get() + + override def bulkWrite( + clientSession: ClientSession, + requests: java.util.List[_ <: WriteModel[_ <: T]], + options: BulkWriteOptions + ): BulkWriteResult = + wrapped.bulkWrite(unwrap(clientSession), requests.asScala.toSeq, options).toFuture().get() + + override def insertOne(t: T): InsertOneResult = wrapped.insertOne(t).toFuture().get() + + override def insertOne(t: T, options: InsertOneOptions): InsertOneResult = + wrapped.insertOne(t, options).toFuture().get() + + override def insertOne(clientSession: ClientSession, t: T): InsertOneResult = + wrapped.insertOne(unwrap(clientSession), t).toFuture().get() + + override def insertOne(clientSession: ClientSession, t: T, options: InsertOneOptions): InsertOneResult = + wrapped.insertOne(unwrap(clientSession), t, options).toFuture().get() + + override def insertMany(documents: java.util.List[_ <: T]): InsertManyResult = + wrapped.insertMany(documents.asScala.toSeq).toFuture().get() + + override def insertMany(documents: java.util.List[_ <: T], options: InsertManyOptions): InsertManyResult = + wrapped.insertMany(documents.asScala.toSeq, options).toFuture().get() + + override def insertMany(clientSession: ClientSession, documents: java.util.List[_ <: T]): InsertManyResult = + wrapped.insertMany(unwrap(clientSession), documents.asScala.toSeq).toFuture().get() + + override def insertMany( + clientSession: ClientSession, + documents: java.util.List[_ <: T], + options: InsertManyOptions + ): InsertManyResult = + wrapped.insertMany(unwrap(clientSession), documents.asScala.toSeq, options).toFuture().get() + + override def deleteOne(filter: Bson): DeleteResult = + wrapped.deleteOne(filter).toFuture().get() + + override def deleteOne(filter: Bson, options: DeleteOptions): DeleteResult = + wrapped.deleteOne(filter, options).toFuture().get() + + override def deleteOne(clientSession: ClientSession, filter: Bson): DeleteResult = + wrapped.deleteOne(unwrap(clientSession), filter).toFuture().get() + + override def deleteOne(clientSession: ClientSession, filter: Bson, options: DeleteOptions): DeleteResult = + wrapped.deleteOne(unwrap(clientSession), filter, options).toFuture().get() + + override def deleteMany(filter: Bson): DeleteResult = + wrapped.deleteMany(filter).toFuture().get() + + override def deleteMany(filter: Bson, options: DeleteOptions): DeleteResult = + wrapped.deleteMany(filter, options).toFuture().get() + + override def deleteMany(clientSession: ClientSession, filter: Bson): DeleteResult = + wrapped.deleteMany(unwrap(clientSession), filter).toFuture().get() + + override def deleteMany(clientSession: ClientSession, filter: Bson, options: DeleteOptions): DeleteResult = + wrapped.deleteMany(unwrap(clientSession), filter, options).toFuture().get() + + override def replaceOne(filter: Bson, replacement: T): UpdateResult = + wrapped.replaceOne(filter, replacement).toFuture().get() + + override def replaceOne(filter: Bson, replacement: T, replaceOptions: ReplaceOptions): UpdateResult = + wrapped.replaceOne(filter, replacement, replaceOptions).toFuture().get() + + override def replaceOne(clientSession: ClientSession, filter: Bson, replacement: T): UpdateResult = + wrapped.replaceOne(unwrap(clientSession), filter, replacement).toFuture().get() + + override def replaceOne( + clientSession: ClientSession, + filter: Bson, + replacement: T, + replaceOptions: ReplaceOptions + ): UpdateResult = + wrapped.replaceOne(unwrap(clientSession), filter, replacement, replaceOptions).toFuture().get() + + override def updateOne(filter: Bson, update: Bson): UpdateResult = + wrapped.updateOne(filter, update).toFuture().get() + + override def updateOne(filter: Bson, update: Bson, updateOptions: UpdateOptions): UpdateResult = + wrapped.updateOne(filter, update, updateOptions).toFuture().get() + + override def updateOne(clientSession: ClientSession, filter: Bson, update: Bson): UpdateResult = + wrapped.updateOne(unwrap(clientSession), filter, update).toFuture().get() + + override def updateOne( + clientSession: ClientSession, + filter: Bson, + update: Bson, + updateOptions: UpdateOptions + ): UpdateResult = + wrapped.updateOne(unwrap(clientSession), filter, update, updateOptions).toFuture().get() + + override def updateOne(filter: Bson, update: java.util.List[_ <: Bson]): UpdateResult = + wrapped.updateOne(filter, update.asScala.toSeq).toFuture().get() + + override def updateOne(filter: Bson, update: java.util.List[_ <: Bson], updateOptions: UpdateOptions): UpdateResult = + wrapped.updateOne(filter, update.asScala.toSeq, updateOptions).toFuture().get() + + override def updateOne(clientSession: ClientSession, filter: Bson, update: java.util.List[_ <: Bson]): UpdateResult = + wrapped.updateOne(unwrap(clientSession), filter, update.asScala.toSeq).toFuture().get() + + override def updateOne( + clientSession: ClientSession, + filter: Bson, + update: java.util.List[_ <: Bson], + updateOptions: UpdateOptions + ): UpdateResult = + wrapped.updateOne(unwrap(clientSession), filter, update.asScala.toSeq, updateOptions).toFuture().get() + + override def updateMany(filter: Bson, update: Bson): UpdateResult = + wrapped.updateMany(filter, update).toFuture().get() + + override def updateMany(filter: Bson, update: Bson, updateOptions: UpdateOptions): UpdateResult = + wrapped.updateMany(filter, update, updateOptions).toFuture().get() + + override def updateMany(clientSession: ClientSession, filter: Bson, update: Bson): UpdateResult = + wrapped.updateMany(unwrap(clientSession), filter, update).toFuture().get() + + override def updateMany( + clientSession: ClientSession, + filter: Bson, + update: Bson, + updateOptions: UpdateOptions + ): UpdateResult = + wrapped.updateMany(unwrap(clientSession), filter, update, updateOptions).toFuture().get() + + override def updateMany(filter: Bson, update: java.util.List[_ <: Bson]): UpdateResult = + wrapped.updateMany(filter, update.asScala.toSeq).toFuture().get() + + override def updateMany(filter: Bson, update: java.util.List[_ <: Bson], updateOptions: UpdateOptions): UpdateResult = + wrapped.updateMany(filter, update.asScala.toSeq, updateOptions).toFuture().get() + + override def updateMany(clientSession: ClientSession, filter: Bson, update: java.util.List[_ <: Bson]): UpdateResult = + wrapped.updateMany(unwrap(clientSession), filter, update.asScala.toSeq).toFuture().get() + + override def updateMany( + clientSession: ClientSession, + filter: Bson, + update: java.util.List[_ <: Bson], + updateOptions: UpdateOptions + ): UpdateResult = + wrapped.updateMany(unwrap(clientSession), filter, update.asScala.toSeq, updateOptions).toFuture().get() + + override def findOneAndDelete(filter: Bson): T = + wrapped.findOneAndDelete(filter).toFuture().get() + + override def findOneAndDelete(filter: Bson, options: FindOneAndDeleteOptions): T = + wrapped.findOneAndDelete(filter, options).toFuture().get() + + override def findOneAndDelete(clientSession: ClientSession, filter: Bson): T = + wrapped.findOneAndDelete(unwrap(clientSession), filter).toFuture().get() + + override def findOneAndDelete(clientSession: ClientSession, filter: Bson, options: FindOneAndDeleteOptions): T = + wrapped.findOneAndDelete(unwrap(clientSession), filter, options).toFuture().get() + + override def findOneAndReplace(filter: Bson, replacement: T): T = + wrapped.findOneAndReplace(filter, replacement).toFuture().get() + + override def findOneAndReplace(filter: Bson, replacement: T, options: FindOneAndReplaceOptions): T = + wrapped.findOneAndReplace(filter, replacement, options).toFuture().get() + + override def findOneAndReplace(clientSession: ClientSession, filter: Bson, replacement: T): T = + wrapped.findOneAndReplace(unwrap(clientSession), filter, replacement).toFuture().get() + + override def findOneAndReplace( + clientSession: ClientSession, + filter: Bson, + replacement: T, + options: FindOneAndReplaceOptions + ): T = + wrapped.findOneAndReplace(unwrap(clientSession), filter, replacement, options).toFuture().get() + + override def findOneAndUpdate(filter: Bson, update: Bson): T = + wrapped.findOneAndUpdate(filter, update).toFuture().get() + + override def findOneAndUpdate(filter: Bson, update: Bson, options: FindOneAndUpdateOptions): T = + wrapped.findOneAndUpdate(filter, update, options).toFuture().get() + + override def findOneAndUpdate(clientSession: ClientSession, filter: Bson, update: Bson): T = + wrapped.findOneAndUpdate(unwrap(clientSession), filter, update).toFuture().get() + + override def findOneAndUpdate( + clientSession: ClientSession, + filter: Bson, + update: Bson, + options: FindOneAndUpdateOptions + ): T = + wrapped.findOneAndUpdate(unwrap(clientSession), filter, update, options).toFuture().get() + + override def findOneAndUpdate(filter: Bson, update: java.util.List[_ <: Bson]): T = + wrapped.findOneAndUpdate(filter, update.asScala.toSeq).toFuture().get() + + override def findOneAndUpdate(filter: Bson, update: java.util.List[_ <: Bson], options: FindOneAndUpdateOptions): T = + wrapped.findOneAndUpdate(filter, update.asScala.toSeq, options).toFuture().get() + + override def findOneAndUpdate(clientSession: ClientSession, filter: Bson, update: java.util.List[_ <: Bson]): T = + wrapped.findOneAndUpdate(unwrap(clientSession), filter, update.asScala.toSeq).toFuture().get() + + override def findOneAndUpdate( + clientSession: ClientSession, + filter: Bson, + update: java.util.List[_ <: Bson], + options: FindOneAndUpdateOptions + ): T = + wrapped.findOneAndUpdate(unwrap(clientSession), filter, update.asScala.toSeq, options).toFuture().get() + + override def drop(): Unit = wrapped.drop().toFuture().get() + + override def drop(clientSession: ClientSession): Unit = wrapped.drop(unwrap(clientSession)).toFuture().get() + + override def drop(dropCollectionOptions: DropCollectionOptions): Unit = + wrapped.drop(dropCollectionOptions).toFuture().get() + + override def drop(clientSession: ClientSession, dropCollectionOptions: DropCollectionOptions): Unit = + wrapped.drop(unwrap(clientSession), dropCollectionOptions).toFuture().get() + + override def createIndex(keys: Bson): String = wrapped.createIndex(keys).toFuture().get() + + override def createIndex(keys: Bson, indexOptions: IndexOptions) = + wrapped.createIndex(keys, indexOptions).toFuture().get() + + override def createIndex(clientSession: ClientSession, keys: Bson) = + wrapped.createIndex(unwrap(clientSession), keys).toFuture().get() + + override def createIndex(clientSession: ClientSession, keys: Bson, indexOptions: IndexOptions) = + wrapped.createIndex(unwrap(clientSession), keys, indexOptions).toFuture().get() + + override def createIndexes(indexes: java.util.List[IndexModel]) = throw new UnsupportedOperationException + + override def createIndexes(indexes: java.util.List[IndexModel], createIndexOptions: CreateIndexOptions) = + throw new UnsupportedOperationException + + override def createIndexes(clientSession: ClientSession, indexes: java.util.List[IndexModel]) = + throw new UnsupportedOperationException + + override def createIndexes( + clientSession: ClientSession, + indexes: java.util.List[IndexModel], + createIndexOptions: CreateIndexOptions + ) = throw new UnsupportedOperationException + + override def listIndexes = throw new UnsupportedOperationException + + override def listIndexes[TResult](resultClass: Class[TResult]) = + SyncListIndexesIterable[TResult]( + wrapped + .listIndexes[TResult]()(DefaultsTo.overrideDefault[TResult, org.mongodb.scala.Document], ClassTag(resultClass)) + ) + + override def listIndexes(clientSession: ClientSession) = throw new UnsupportedOperationException + + override def listIndexes[TResult](clientSession: ClientSession, resultClass: Class[TResult]) = + throw new UnsupportedOperationException + + override def dropIndex(indexName: String): Unit = wrapped.dropIndex(indexName).toFuture().get() + + override def dropIndex(indexName: String, dropIndexOptions: DropIndexOptions): Unit = + wrapped.dropIndex(indexName, dropIndexOptions).toFuture().get() + + override def dropIndex(keys: Bson): Unit = + wrapped.dropIndex(keys).toFuture().get() + + override def dropIndex(keys: Bson, dropIndexOptions: DropIndexOptions): Unit = + wrapped.dropIndex(keys, dropIndexOptions).toFuture().get() + + override def dropIndex(clientSession: ClientSession, indexName: String): Unit = + wrapped.dropIndex(unwrap(clientSession), indexName).toFuture().get() + + override def dropIndex(clientSession: ClientSession, keys: Bson): Unit = + wrapped.dropIndex(unwrap(clientSession), keys).toFuture().get() + + override def dropIndex(clientSession: ClientSession, indexName: String, dropIndexOptions: DropIndexOptions): Unit = + wrapped.dropIndex(unwrap(clientSession), indexName, dropIndexOptions).toFuture().get() + + override def dropIndex(clientSession: ClientSession, keys: Bson, dropIndexOptions: DropIndexOptions): Unit = + wrapped.dropIndex(unwrap(clientSession), keys, dropIndexOptions).toFuture().get() + + override def dropIndexes(): Unit = wrapped.dropIndexes().toFuture().get() + override def dropIndexes(clientSession: ClientSession): Unit = + wrapped.dropIndexes(unwrap(clientSession)).toFuture().get() + + override def dropIndexes(dropIndexOptions: DropIndexOptions): Unit = + wrapped.dropIndexes(dropIndexOptions).toFuture().get() + + override def dropIndexes(clientSession: ClientSession, dropIndexOptions: DropIndexOptions): Unit = + wrapped.dropIndexes(unwrap(clientSession), dropIndexOptions).toFuture().get() + + override def createSearchIndex(indexName: String, definition: Bson): String = + wrapped.createSearchIndex(indexName, definition).toFuture().get() + + override def createSearchIndex(definition: Bson): String = wrapped.createSearchIndex(definition).toFuture().get() + + override def createSearchIndexes(searchIndexModels: java.util.List[SearchIndexModel]): java.util.List[String] = + wrapped.createSearchIndexes(searchIndexModels.asScala.toList).toFuture().get().asJava + + override def updateSearchIndex(indexName: String, definition: Bson): Unit = + wrapped.updateSearchIndex(indexName, definition) + + override def dropSearchIndex(indexName: String): Unit = wrapped.dropSearchIndex(indexName) + + override def listSearchIndexes(): ListSearchIndexesIterable[Document] = + SyncListSearchIndexesIterable(wrapped.listSearchIndexes()) + + override def listSearchIndexes[TResult](resultClass: Class[TResult]): ListSearchIndexesIterable[TResult] = + SyncListSearchIndexesIterable( + wrapped.listSearchIndexes[TResult]()( + DefaultsTo.overrideDefault[TResult, org.mongodb.scala.Document], + ClassTag(resultClass) + ) + ) + + override def renameCollection(newCollectionNamespace: MongoNamespace): Unit = { + wrapped.renameCollection(newCollectionNamespace).toFuture().get() + } + + override def renameCollection( + newCollectionNamespace: MongoNamespace, + renameCollectionOptions: RenameCollectionOptions + ): Unit = { + wrapped.renameCollection(newCollectionNamespace, renameCollectionOptions).toFuture().get() + } + + override def renameCollection(clientSession: ClientSession, newCollectionNamespace: MongoNamespace): Unit = { + wrapped.renameCollection(unwrap(clientSession), newCollectionNamespace).toFuture().get() + } + + override def renameCollection( + clientSession: ClientSession, + newCollectionNamespace: MongoNamespace, + renameCollectionOptions: RenameCollectionOptions + ): Unit = { + wrapped.renameCollection(unwrap(clientSession), newCollectionNamespace, renameCollectionOptions).toFuture().get() + } +} diff --git a/driver-scala/src/integrationTest/scala/org/mongodb/scala/syncadapter/SyncMongoCursor.scala b/driver-scala/src/integrationTest/scala/org/mongodb/scala/syncadapter/SyncMongoCursor.scala new file mode 100644 index 00000000000..9dcce5c4cb5 --- /dev/null +++ b/driver-scala/src/integrationTest/scala/org/mongodb/scala/syncadapter/SyncMongoCursor.scala @@ -0,0 +1,108 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.mongodb.scala.syncadapter + +import java.util.NoSuchElementException +import java.util.concurrent.{ CountDownLatch, LinkedBlockingDeque, TimeUnit } + +import com.mongodb.{ MongoInterruptedException, MongoTimeoutException } +import com.mongodb.client.MongoCursor +import org.mongodb.scala.Observable +import org.reactivestreams.{ Subscriber, Subscription } + +case class SyncMongoCursor[T](val observable: Observable[T]) extends MongoCursor[T] { + val COMPLETED = new Object() + + private var subscription: Option[Subscription] = None + private var nextResult: Option[T] = None + private val results = new LinkedBlockingDeque[Any] + + val latch = new CountDownLatch(1) + observable.subscribe(new Subscriber[T]() { + def onSubscribe(s: Subscription): Unit = { + subscription = Some(s) + s.request(Long.MaxValue) + latch.countDown() + } + + def onNext(t: T): Unit = { + results.addLast(t) + } + + def onError(t: Throwable): Unit = { + results.addLast(t) + } + + def onComplete(): Unit = { + results.addLast(COMPLETED) + } + }) + try { + if (!latch.await(WAIT_DURATION.toSeconds, TimeUnit.SECONDS)) { + throw new MongoTimeoutException("Timeout waiting for subscription") + } + } catch { + case e: InterruptedException => + throw new MongoInterruptedException("Interrupted awaiting latch", e) + } + + override def close(): Unit = { + subscription.foreach(_.cancel()) + subscription = None + } + + override def hasNext: Boolean = { + if (nextResult.isDefined) { + return true + } + val first = results.pollFirst(WAIT_DURATION.toSeconds, TimeUnit.SECONDS) + first match { + case n if n == null => throw new MongoTimeoutException("Time out!!!") + case t if t.isInstanceOf[Throwable] => throw translateError(t.asInstanceOf[Throwable]) + case c if c == COMPLETED => false + case n => { + nextResult = Some(n.asInstanceOf[T]) + true + } + } + } + + override def next: T = { + if (!hasNext) { + throw new NoSuchElementException + } + val retVal = nextResult.get + nextResult = None + retVal + } + + override def available(): Int = throw new UnsupportedOperationException + + override def remove(): Unit = throw new UnsupportedOperationException + + def tryNext = throw new UnsupportedOperationException // No good way to fulfill this contract with a Publisher + + def getServerCursor = throw new UnsupportedOperationException + + def getServerAddress = throw new UnsupportedOperationException + + private def translateError(throwable: Throwable): RuntimeException = { + throwable match { + case exception: RuntimeException => exception + case e => new RuntimeException(e) + } + } +} diff --git a/driver-scala/src/integrationTest/scala/org/mongodb/scala/syncadapter/SyncMongoDatabase.scala b/driver-scala/src/integrationTest/scala/org/mongodb/scala/syncadapter/SyncMongoDatabase.scala new file mode 100644 index 00000000000..846aa6580dc --- /dev/null +++ b/driver-scala/src/integrationTest/scala/org/mongodb/scala/syncadapter/SyncMongoDatabase.scala @@ -0,0 +1,284 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.mongodb.scala.syncadapter + +import com.mongodb.{ ReadConcern, ReadPreference, WriteConcern } +import com.mongodb.client.model.{ CreateCollectionOptions, CreateViewOptions } +import com.mongodb.client.{ ClientSession, MongoDatabase => JMongoDatabase } +import org.bson.Document +import org.bson.codecs.configuration.CodecRegistry +import org.bson.conversions.Bson +import org.mongodb.scala.MongoDatabase +import org.mongodb.scala.bson.DefaultHelper.DefaultsTo + +import java.util.concurrent.TimeUnit +import scala.collection.JavaConverters._ +import scala.concurrent.duration.MILLISECONDS +import scala.reflect.ClassTag + +case class SyncMongoDatabase(wrapped: MongoDatabase) extends JMongoDatabase { + + override def getName: String = wrapped.name + + override def getCodecRegistry: CodecRegistry = wrapped.codecRegistry + + override def getReadPreference: ReadPreference = wrapped.readPreference + + override def getWriteConcern: WriteConcern = wrapped.writeConcern + + override def getReadConcern: ReadConcern = wrapped.readConcern + + override def getTimeout(timeUnit: TimeUnit): java.lang.Long = { + wrapped.timeout match { + case Some(value) => timeUnit.convert(value.toMillis, MILLISECONDS) + case None => null + } + } + + override def withCodecRegistry(codecRegistry: CodecRegistry) = + SyncMongoDatabase(wrapped.withCodecRegistry(codecRegistry)) + + override def withReadPreference(readPreference: ReadPreference) = + SyncMongoDatabase(wrapped.withReadPreference(readPreference)) + + override def withWriteConcern(writeConcern: WriteConcern) = SyncMongoDatabase(wrapped.withWriteConcern(writeConcern)) + + override def withReadConcern(readConcern: ReadConcern) = SyncMongoDatabase(wrapped.withReadConcern(readConcern)) + + override def withTimeout(timeout: Long, timeUnit: TimeUnit) = + SyncMongoDatabase(wrapped.withTimeout(timeout, timeUnit)) + + override def getCollection(collectionName: String) = + SyncMongoCollection[Document](wrapped.getCollection(collectionName)) + + override def getCollection[TDocument](collectionName: String, documentClass: Class[TDocument]) = + SyncMongoCollection[TDocument]( + wrapped.getCollection[TDocument](collectionName)( + DefaultsTo.overrideDefault[TDocument, org.mongodb.scala.Document], + ClassTag(documentClass) + ) + ) + + override def runCommand(command: Bson): Document = wrapped.runCommand(command).toFuture().get() + + override def runCommand(command: Bson, readPreference: ReadPreference): Document = + wrapped.runCommand(command, readPreference).toFuture().get() + + override def runCommand[TResult](command: Bson, resultClass: Class[TResult]): TResult = + wrapped + .runCommand[TResult](command)( + DefaultsTo.overrideDefault[TResult, org.mongodb.scala.Document], + ClassTag(resultClass) + ) + .toFuture() + .get() + + override def runCommand[TResult]( + command: Bson, + readPreference: ReadPreference, + resultClass: Class[TResult] + ): TResult = + wrapped + .runCommand[TResult](command, readPreference)( + DefaultsTo.overrideDefault[TResult, org.mongodb.scala.Document], + ClassTag(resultClass) + ) + .toFuture() + .get() + + override def runCommand(clientSession: ClientSession, command: Bson): Document = + wrapped.runCommand[Document](unwrap(clientSession), command).toFuture().get() + + override def runCommand(clientSession: ClientSession, command: Bson, readPreference: ReadPreference): Document = + wrapped.runCommand[Document](unwrap(clientSession), command, readPreference).toFuture().get() + + override def runCommand[TResult](clientSession: ClientSession, command: Bson, resultClass: Class[TResult]): TResult = + wrapped + .runCommand[TResult](unwrap(clientSession), command)( + DefaultsTo.overrideDefault[TResult, org.mongodb.scala.Document], + ClassTag(resultClass) + ) + .toFuture() + .get() + + override def runCommand[TResult]( + clientSession: ClientSession, + command: Bson, + readPreference: ReadPreference, + resultClass: Class[TResult] + ): TResult = + wrapped + .runCommand[TResult](unwrap(clientSession), command, readPreference)( + DefaultsTo.overrideDefault[TResult, org.mongodb.scala.Document], + ClassTag(resultClass) + ) + .toFuture() + .get() + + override def drop(): Unit = wrapped.drop().toFuture().get() + + override def drop(clientSession: ClientSession): Unit = wrapped.drop(unwrap(clientSession)).toFuture().get() + + override def listCollectionNames = throw new UnsupportedOperationException + + override def listCollections = new SyncListCollectionsIterable[Document](wrapped.listCollections[Document]()) + + override def listCollections[TResult](resultClass: Class[TResult]) = + new SyncListCollectionsIterable[TResult]( + wrapped.listCollections[TResult]()( + DefaultsTo.overrideDefault[TResult, org.mongodb.scala.Document], + ClassTag(resultClass) + ) + ) + + override def listCollectionNames(clientSession: ClientSession) = throw new UnsupportedOperationException + + override def listCollections(clientSession: ClientSession) = throw new UnsupportedOperationException + + override def listCollections[TResult](clientSession: ClientSession, resultClass: Class[TResult]) = + throw new UnsupportedOperationException + + override def createCollection(collectionName: String): Unit = { + wrapped.createCollection(collectionName).toFuture().get() + } + + override def createCollection(collectionName: String, createCollectionOptions: CreateCollectionOptions): Unit = { + wrapped.createCollection(collectionName, createCollectionOptions).toFuture().get() + } + + override def createCollection(clientSession: ClientSession, collectionName: String): Unit = { + wrapped.createCollection(unwrap(clientSession), collectionName).toFuture().get() + } + + override def createCollection( + clientSession: ClientSession, + collectionName: String, + createCollectionOptions: CreateCollectionOptions + ): Unit = { + wrapped.createCollection(unwrap(clientSession), collectionName, createCollectionOptions).toFuture().get() + } + + override def createView(viewName: String, viewOn: String, pipeline: java.util.List[_ <: Bson]): Unit = { + wrapped.createView(viewName, viewOn, pipeline.asScala.toList).toFuture().get() + } + + override def createView( + viewName: String, + viewOn: String, + pipeline: java.util.List[_ <: Bson], + createViewOptions: CreateViewOptions + ): Unit = { + wrapped.createView(viewName, viewOn, pipeline.asScala.toList, createViewOptions).toFuture().get() + } + + override def createView( + clientSession: ClientSession, + viewName: String, + viewOn: String, + pipeline: java.util.List[_ <: Bson] + ): Unit = { + wrapped.createView(unwrap(clientSession), viewName, viewOn, pipeline.asScala.toList).toFuture().get() + } + + override def createView( + clientSession: ClientSession, + viewName: String, + viewOn: String, + pipeline: java.util.List[_ <: Bson], + createViewOptions: CreateViewOptions + ): Unit = { + wrapped.createView( + unwrap(clientSession), + viewName, + viewOn, + pipeline.asScala.toList, + createViewOptions + ).toFuture().get() + } + + override def watch = new SyncChangeStreamIterable[Document](wrapped.watch[Document]()) + + override def watch[TResult](resultClass: Class[TResult]) = + new SyncChangeStreamIterable[TResult]( + wrapped.watch[TResult]()(DefaultsTo.overrideDefault[TResult, org.mongodb.scala.Document], ClassTag(resultClass)) + ) + + override def watch(pipeline: java.util.List[_ <: Bson]) = + new SyncChangeStreamIterable[Document](wrapped.watch[Document](pipeline.asScala.toSeq)) + + override def watch[TResult](pipeline: java.util.List[_ <: Bson], resultClass: Class[TResult]) = + new SyncChangeStreamIterable[TResult]( + wrapped.watch(pipeline.asScala.toSeq)( + DefaultsTo.overrideDefault[TResult, org.mongodb.scala.Document], + ClassTag(resultClass) + ) + ) + + override def watch(clientSession: ClientSession) = + new SyncChangeStreamIterable[Document](wrapped.watch(unwrap(clientSession))) + + override def watch[TResult](clientSession: ClientSession, resultClass: Class[TResult]) = + new SyncChangeStreamIterable[TResult]( + wrapped.watch(unwrap(clientSession))( + DefaultsTo.overrideDefault[TResult, org.mongodb.scala.Document], + ClassTag(resultClass) + ) + ) + + override def watch(clientSession: ClientSession, pipeline: java.util.List[_ <: Bson]) = + new SyncChangeStreamIterable[Document](wrapped.watch(unwrap(clientSession), pipeline.asScala.toSeq)) + + override def watch[TResult]( + clientSession: ClientSession, + pipeline: java.util.List[_ <: Bson], + resultClass: Class[TResult] + ) = + new SyncChangeStreamIterable[TResult]( + wrapped.watch[TResult](unwrap(clientSession), pipeline.asScala.toSeq)( + DefaultsTo.overrideDefault[TResult, org.mongodb.scala.Document], + ClassTag(resultClass) + ) + ) + + override def aggregate(pipeline: java.util.List[_ <: Bson]) = + new SyncAggregateIterable[Document](wrapped.aggregate(pipeline.asScala.toSeq)) + + override def aggregate[TResult](pipeline: java.util.List[_ <: Bson], resultClass: Class[TResult]) = + new SyncAggregateIterable[TResult]( + wrapped.aggregate[TResult](pipeline.asScala.toSeq)( + DefaultsTo.overrideDefault[TResult, org.mongodb.scala.Document], + ClassTag(resultClass) + ) + ) + + override def aggregate(clientSession: ClientSession, pipeline: java.util.List[_ <: Bson]) = + new SyncAggregateIterable[Document](wrapped.aggregate(unwrap(clientSession), pipeline.asScala.toSeq)) + + override def aggregate[TResult]( + clientSession: ClientSession, + pipeline: java.util.List[_ <: Bson], + resultClass: Class[TResult] + ) = + new SyncAggregateIterable[TResult]( + wrapped.aggregate[TResult](unwrap(clientSession), pipeline.asScala.toSeq)( + DefaultsTo.overrideDefault[TResult, org.mongodb.scala.Document], + ClassTag(resultClass) + ) + ) + + private def unwrap(clientSession: ClientSession) = clientSession.asInstanceOf[SyncClientSession].wrapped + +} diff --git a/driver-scala/src/integrationTest/scala/org/mongodb/scala/syncadapter/SyncMongoIterable.scala b/driver-scala/src/integrationTest/scala/org/mongodb/scala/syncadapter/SyncMongoIterable.scala new file mode 100644 index 00000000000..83dbb72330c --- /dev/null +++ b/driver-scala/src/integrationTest/scala/org/mongodb/scala/syncadapter/SyncMongoIterable.scala @@ -0,0 +1,56 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala.syncadapter; + +import java.util.function.Consumer + +import com.mongodb.Function +import com.mongodb.client.{ MongoCursor, MongoIterable } +import org.mongodb.scala.Observable + +import scala.concurrent.Await +import scala.language.reflectiveCalls + +trait SyncMongoIterable[T] extends MongoIterable[T] { + + val wrapped: Observable[T] + + override def iterator(): MongoCursor[T] = cursor() + + override def cursor(): MongoCursor[T] = SyncMongoCursor[T](wrapped) + + override def first(): T = Await.result(wrapped.head(), WAIT_DURATION) + + override def map[U](mapper: Function[T, U]) = throw new UnsupportedOperationException + + override def forEach(action: Consumer[_ >: T]): Unit = { + use(cursor())(localCursor => while (localCursor.hasNext) action.accept(localCursor.next())) + } + + override def into[A <: java.util.Collection[_ >: T]](target: A): A = { + use(cursor())(localCursor => while (localCursor.hasNext) target.add(localCursor.next())) + target + } + + def use[A <: { def close(): Unit }, B](resource: A)(code: A => B): B = { + try { + code(resource) + } finally { + resource.close() + } + } +} diff --git a/driver-scala/src/integrationTest/scala/org/mongodb/scala/syncadapter/package.scala b/driver-scala/src/integrationTest/scala/org/mongodb/scala/syncadapter/package.scala new file mode 100644 index 00000000000..7fc860f3b38 --- /dev/null +++ b/driver-scala/src/integrationTest/scala/org/mongodb/scala/syncadapter/package.scala @@ -0,0 +1,30 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala + +import scala.concurrent.duration.Duration +import scala.concurrent.{ Await, Future } + +package object syncadapter { + + val WAIT_DURATION: Duration = Duration(60, "second") + + implicit class FutureToResult[T](future: Future[T]) { + def get(): T = Await.result(future, WAIT_DURATION) + } + +} diff --git a/driver-scala/src/integrationTest/scala/org/mongodb/scala/unified/ClientEncryptionTest.scala b/driver-scala/src/integrationTest/scala/org/mongodb/scala/unified/ClientEncryptionTest.scala new file mode 100644 index 00000000000..2b18a20e953 --- /dev/null +++ b/driver-scala/src/integrationTest/scala/org/mongodb/scala/unified/ClientEncryptionTest.scala @@ -0,0 +1,21 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala.unified + +object ClientEncryptionTest extends UnifiedTest { + val directory = "client-side-encryption/tests/unified" +} diff --git a/driver-scala/src/integrationTest/scala/org/mongodb/scala/unified/UnifiedCrudTest.scala b/driver-scala/src/integrationTest/scala/org/mongodb/scala/unified/UnifiedCrudTest.scala new file mode 100644 index 00000000000..6f58161ce12 --- /dev/null +++ b/driver-scala/src/integrationTest/scala/org/mongodb/scala/unified/UnifiedCrudTest.scala @@ -0,0 +1,21 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala.unified + +object UnifiedCrudTest extends UnifiedTest { + val directory = "crud" +} diff --git a/driver-scala/src/integrationTest/scala/org/mongodb/scala/unified/UnifiedTest.scala b/driver-scala/src/integrationTest/scala/org/mongodb/scala/unified/UnifiedTest.scala new file mode 100644 index 00000000000..3e0431437c4 --- /dev/null +++ b/driver-scala/src/integrationTest/scala/org/mongodb/scala/unified/UnifiedTest.scala @@ -0,0 +1,66 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala.unified + +import com.mongodb.client.gridfs.{ GridFSBucket => JGridFSBucket } +import com.mongodb.client.unified.UnifiedTest.Language +import com.mongodb.client.unified.{ UnifiedTest => JUnifiedTest } +import com.mongodb.client.vault.{ ClientEncryption => JClientEncryption } +import com.mongodb.client.{ MongoClient => JMongoClient, MongoDatabase => JMongoDatabase } +import com.mongodb.reactivestreams.client.internal.vault.ClientEncryptionImpl +import com.mongodb.{ ClientEncryptionSettings => JClientEncryptionSettings, MongoClientSettings } +import org.junit.jupiter.api.TestInstance +import org.junit.jupiter.api.TestInstance.Lifecycle +import org.junit.jupiter.params.provider.Arguments +import org.mongodb.scala.MongoClient +import org.mongodb.scala.MongoClient.DEFAULT_CODEC_REGISTRY +import org.mongodb.scala.syncadapter.{ SyncClientEncryption, SyncMongoClient } +import org.mongodb.scala.vault.ClientEncryption + +import java.util + +@TestInstance(Lifecycle.PER_CLASS) +abstract class UnifiedTest extends JUnifiedTest { + + val directory: String + + def data(): util.Collection[Arguments] = JUnifiedTest.getTestData(directory, true, Language.SCALA) + + override def createMongoClient(settings: MongoClientSettings): JMongoClient = + SyncMongoClient(MongoClient(MongoClientSettings.builder(settings).codecRegistry(DEFAULT_CODEC_REGISTRY).build())) + + override def createGridFSBucket(database: JMongoDatabase): JGridFSBucket = + throw new NotImplementedError("Not implemented") + + override def createClientEncryption( + keyVaultClient: JMongoClient, + clientEncryptionSettings: JClientEncryptionSettings + ): JClientEncryption = { + keyVaultClient match { + case client: SyncMongoClient => + SyncClientEncryption(ClientEncryption(new ClientEncryptionImpl( + client.wrapped.wrapped, + clientEncryptionSettings + ))) + case _ => throw new IllegalArgumentException(s"Invalid keyVaultClient type: ${keyVaultClient.getClass}") + } + } + + override protected def isReactive: Boolean = true + + override protected def getLanguage: Language = Language.SCALA +} diff --git a/driver-scala/src/integrationTest/scala/tour/ClientSideEncryptionAutoEncryptionSettingsTour.scala b/driver-scala/src/integrationTest/scala/tour/ClientSideEncryptionAutoEncryptionSettingsTour.scala new file mode 100644 index 00000000000..89638e41da8 --- /dev/null +++ b/driver-scala/src/integrationTest/scala/tour/ClientSideEncryptionAutoEncryptionSettingsTour.scala @@ -0,0 +1,103 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package tour + +import java.security.SecureRandom +import java.util.Base64 + +import scala.collection.JavaConverters._ +import org.mongodb.scala._ +import org.mongodb.scala.bson.BsonDocument +import org.mongodb.scala.model.vault.DataKeyOptions +import org.mongodb.scala.vault.ClientEncryptions +import tour.Helpers._ + +/** + * ClientSideEncryption AutoEncryptionSettings tour + */ +object ClientSideEncryptionAutoEncryptionSettingsTour { + + /** + * Run this main method to see the output of this quick example. + * + * Requires the mongodb-crypt library in the class path and mongocryptd on the system path. + * + * @param args ignored args + */ + def main(args: Array[String]): Unit = { + + // This would have to be the same master key as was used to create the encryption key + val localMasterKey = new Array[Byte](96) + new SecureRandom().nextBytes(localMasterKey) + + val kmsProviders = Map("local" -> Map[String, AnyRef]("key" -> localMasterKey).asJava).asJava + + val keyVaultNamespace = "admin.datakeys" + + val clientEncryptionSettings = ClientEncryptionSettings + .builder() + .keyVaultMongoClientSettings( + MongoClientSettings.builder().applyConnectionString(ConnectionString("mongodb://localhost")).build() + ) + .keyVaultNamespace(keyVaultNamespace) + .kmsProviders(kmsProviders) + .build() + + val clientEncryption = ClientEncryptions.create(clientEncryptionSettings) + + val dataKey = clientEncryption.createDataKey("local", DataKeyOptions()).headResult() + + val base64DataKeyId = Base64.getEncoder.encodeToString(dataKey.getData) + val dbName = "test" + val collName = "coll" + val autoEncryptionSettings = AutoEncryptionSettings + .builder() + .keyVaultNamespace(keyVaultNamespace) + .kmsProviders(kmsProviders) + .schemaMap(Map(s"$dbName.$collName" -> BsonDocument(s"""{ + properties: { + encryptedField: { + encrypt: { + keyId: [{ + "$$binary": { + "base64": "$base64DataKeyId", + "subType": "04" + } + }], + bsonType: "string", + algorithm: "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + } + } + }, + bsonType: "object" + }""")).asJava) + .build() + + val clientSettings = MongoClientSettings.builder().autoEncryptionSettings(autoEncryptionSettings).build() + val mongoClient = MongoClient(clientSettings) + val collection = mongoClient.getDatabase("test").getCollection("coll") + + collection.drop().headResult() + + collection.insertOne(Document("encryptedField" -> "123456789")).headResult() + + collection.find().first().printHeadResult() + + // release resources + mongoClient.close() + } +} diff --git a/driver-scala/src/integrationTest/scala/tour/ClientSideEncryptionExplicitEncryptionAndDecryptionTour.scala b/driver-scala/src/integrationTest/scala/tour/ClientSideEncryptionExplicitEncryptionAndDecryptionTour.scala new file mode 100644 index 00000000000..7c6180356c3 --- /dev/null +++ b/driver-scala/src/integrationTest/scala/tour/ClientSideEncryptionExplicitEncryptionAndDecryptionTour.scala @@ -0,0 +1,101 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package tour + +import java.security.SecureRandom + +import com.mongodb.MongoNamespace +import org.mongodb.scala._ +import org.mongodb.scala.bson.{ BsonBinary, BsonString } +import org.mongodb.scala.model.vault.{ DataKeyOptions, EncryptOptions } +import org.mongodb.scala.model.{ Filters, IndexOptions, Indexes } +import org.mongodb.scala.vault.ClientEncryptions +import tour.Helpers._ + +import scala.collection.JavaConverters._ + +/** + * ClientSideEncryption explicit encryption and decryption tour + */ +object ClientSideEncryptionExplicitEncryptionAndDecryptionTour { + + /** + * Run this main method to see the output of this quick example. + * + * @param args ignored args + */ + def main(args: Array[String]): Unit = { + + // This would have to be the same master key as was used to create the encryption key + val localMasterKey = new Array[Byte](96) + new SecureRandom().nextBytes(localMasterKey) + + val kmsProviders = Map("local" -> Map[String, AnyRef]("key" -> localMasterKey).asJava).asJava + + val keyVaultNamespace = new MongoNamespace("encryption.testKeyVault") + + val clientSettings = MongoClientSettings.builder().build() + val mongoClient = MongoClient(clientSettings) + + // Set up the key vault for this example + val keyVaultCollection = + mongoClient.getDatabase(keyVaultNamespace.getDatabaseName).getCollection(keyVaultNamespace.getCollectionName) + keyVaultCollection.drop().headResult() + + // Ensure that two data keys cannot share the same keyAltName. + keyVaultCollection.createIndex( + Indexes.ascending("keyAltNames"), + new IndexOptions() + .unique(true) + .partialFilterExpression(Filters.exists("keyAltNames")) + ) + + val collection = mongoClient.getDatabase("test").getCollection("coll") + collection.drop().headResult() + + // Create the ClientEncryption instance + val clientEncryptionSettings = ClientEncryptionSettings + .builder() + .keyVaultMongoClientSettings( + MongoClientSettings.builder().applyConnectionString(ConnectionString("mongodb://localhost")).build() + ) + .keyVaultNamespace(keyVaultNamespace.getFullName) + .kmsProviders(kmsProviders) + .build() + + val clientEncryption = ClientEncryptions.create(clientEncryptionSettings) + + val dataKeyId = clientEncryption.createDataKey("local", DataKeyOptions()).headResult() + + // Explicitly encrypt a field + val encryptedFieldValue = clientEncryption + .encrypt(BsonString("123456789"), EncryptOptions("AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic").keyId(dataKeyId)) + .headResult() + + collection.insertOne(Document("encryptedField" -> encryptedFieldValue)).headResult() + + val doc = collection.find.first().headResult() + println(doc.toJson()) + + // Explicitly decrypt the field + println(clientEncryption.decrypt(doc.get[BsonBinary]("encryptedField").get).headResult()) + + // release resources + clientEncryption.close() + mongoClient.close() + } +} diff --git a/driver-scala/src/integrationTest/scala/tour/ClientSideEncryptionExplicitEncryptionOnlyTour.scala b/driver-scala/src/integrationTest/scala/tour/ClientSideEncryptionExplicitEncryptionOnlyTour.scala new file mode 100644 index 00000000000..af58c5d75d0 --- /dev/null +++ b/driver-scala/src/integrationTest/scala/tour/ClientSideEncryptionExplicitEncryptionOnlyTour.scala @@ -0,0 +1,107 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package tour + +import java.security.SecureRandom + +import com.mongodb.MongoNamespace +import org.mongodb.scala._ +import org.mongodb.scala.bson.{ BsonBinary, BsonString } +import org.mongodb.scala.model.vault.{ DataKeyOptions, EncryptOptions } +import org.mongodb.scala.model.{ Filters, IndexOptions, Indexes } +import org.mongodb.scala.vault.ClientEncryptions +import tour.Helpers._ + +import scala.collection.JavaConverters._ + +/** + * ClientSideEncryption explicit encryption and decryption tour + */ +object ClientSideEncryptionExplicitEncryptionOnlyTour { + + /** + * Run this main method to see the output of this quick example. + * + * @param args ignored args + */ + def main(args: Array[String]): Unit = { + + // This would have to be the same master key as was used to create the encryption key + val localMasterKey = new Array[Byte](96) + new SecureRandom().nextBytes(localMasterKey) + + val kmsProviders = Map("local" -> Map[String, AnyRef]("key" -> localMasterKey).asJava).asJava + + val keyVaultNamespace = new MongoNamespace("encryption.testKeyVault") + + val clientSettings = MongoClientSettings + .builder() + .autoEncryptionSettings( + AutoEncryptionSettings + .builder() + .keyVaultNamespace(keyVaultNamespace.getFullName) + .kmsProviders(kmsProviders) + .bypassAutoEncryption(true) + .build() + ) + .build() + val mongoClient = MongoClient(clientSettings) + + // Set up the key vault for this example + val keyVaultCollection = + mongoClient.getDatabase(keyVaultNamespace.getDatabaseName).getCollection(keyVaultNamespace.getCollectionName) + keyVaultCollection.drop().headResult() + + // Ensure that two data keys cannot share the same keyAltName. + keyVaultCollection.createIndex( + Indexes.ascending("keyAltNames"), + new IndexOptions() + .unique(true) + .partialFilterExpression(Filters.exists("keyAltNames")) + ) + + val collection = mongoClient.getDatabase("test").getCollection("coll") + collection.drop().headResult() + + // Create the ClientEncryption instance + val clientEncryptionSettings = ClientEncryptionSettings + .builder() + .keyVaultMongoClientSettings( + MongoClientSettings.builder().applyConnectionString(ConnectionString("mongodb://localhost")).build() + ) + .keyVaultNamespace(keyVaultNamespace.getFullName) + .kmsProviders(kmsProviders) + .build() + + val clientEncryption = ClientEncryptions.create(clientEncryptionSettings) + + val dataKeyId = clientEncryption.createDataKey("local", DataKeyOptions()).headResult() + + // Explicitly encrypt a field + val encryptedFieldValue = clientEncryption + .encrypt(BsonString("123456789"), EncryptOptions("AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic").keyId(dataKeyId)) + .headResult() + + collection.insertOne(Document("encryptedField" -> encryptedFieldValue)).headResult() + + println(collection.find.first().headResult().toJson()) + + // release resources + clientEncryption.close() + mongoClient.close() + } +} diff --git a/driver-scala/src/integrationTest/scala/tour/ClientSideEncryptionSimpleTour.scala b/driver-scala/src/integrationTest/scala/tour/ClientSideEncryptionSimpleTour.scala new file mode 100644 index 00000000000..967bc9eec49 --- /dev/null +++ b/driver-scala/src/integrationTest/scala/tour/ClientSideEncryptionSimpleTour.scala @@ -0,0 +1,69 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package tour + +import java.security.SecureRandom + +import org.mongodb.scala.{ AutoEncryptionSettings, Document, MongoClient, MongoClientSettings } +import tour.Helpers._ + +import scala.collection.JavaConverters._ + +/** + * ClientSideEncryption Simple tour + */ +object ClientSideEncryptionSimpleTour { + + /** + * Run this main method to see the output of this quick example. + * + * Requires the mongodb-crypt library in the class path and mongocryptd on the system path. + * + * @param args ignored args + */ + def main(args: Array[String]): Unit = { + val localMasterKey = new Array[Byte](96) + new SecureRandom().nextBytes(localMasterKey) + + val kmsProviders = Map("local" -> Map[String, AnyRef]("key" -> localMasterKey).asJava).asJava + + val keyVaultNamespace = "admin.datakeys" + + val autoEncryptionSettings = AutoEncryptionSettings + .builder() + .keyVaultNamespace(keyVaultNamespace) + .kmsProviders(kmsProviders) + .build() + + val clientSettings = MongoClientSettings + .builder() + .autoEncryptionSettings(autoEncryptionSettings) + .build() + + val mongoClient = MongoClient(clientSettings) + val collection = mongoClient.getDatabase("test").getCollection("coll") + + collection.drop().headResult() + + collection.insertOne(Document("encryptedField" -> "123456789")).headResult() + + collection.find().first().printHeadResult() + + // release resources + mongoClient.close() + } +} diff --git a/driver-scala/src/integrationTest/scala/tour/GridFSTour.scala b/driver-scala/src/integrationTest/scala/tour/GridFSTour.scala new file mode 100644 index 00000000000..8bc6a071f1a --- /dev/null +++ b/driver-scala/src/integrationTest/scala/tour/GridFSTour.scala @@ -0,0 +1,114 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package tour + +import java.nio.ByteBuffer +import java.nio.charset.StandardCharsets + +import org.mongodb.scala._ +import org.mongodb.scala.bson.ObjectId +import org.mongodb.scala.gridfs._ +import org.mongodb.scala.model.Filters +import tour.Helpers._ + +import scala.util.Success + +/** + * The GridFSTour code example + */ +object GridFSTour { + + // scalastyle:off + /** + * Run this main method to see the output of this quick example. + * + * @param args takes an optional single argument for the connection string + * @throws Throwable if an operation fails + */ + def main(args: Array[String]): Unit = { + val mongoClient: MongoClient = if (args.isEmpty) MongoClient() else MongoClient(args.head) + + // get handle to "mydb" database + val database: MongoDatabase = mongoClient.getDatabase("mydb") + + database.drop().results() + + val gridFSBucket = GridFSBucket(database) + + /* + * UploadFromStream Example + */ + val observableToUploadFrom: Observable[ByteBuffer] = Observable( + Seq(ByteBuffer.wrap("MongoDB Tutorial..".getBytes(StandardCharsets.UTF_8))) + ) + + // Create some custom options + val options: GridFSUploadOptions = + new GridFSUploadOptions().chunkSizeBytes(1024 * 1204).metadata(Document("type" -> "presentation")) + + val fileId: ObjectId = + gridFSBucket.uploadFromObservable("mongodb-tutorial", observableToUploadFrom, options).headResult() + + /* + * Find documents + */ + println("File names:") + gridFSBucket.find().results().foreach(file => println(s" - ${file.getFilename}")) + + /* + * Find documents with a filter + */ + gridFSBucket + .find(Filters.equal("metadata.contentType", "image/png")) + .results() + .foreach(file => println(s" > ${file.getFilename}")) + + /* + * Download to Observable + */ + val downloadById = gridFSBucket.downloadToObservable(fileId).results() + val downloadByIdSize = downloadById.map(_.limit()).sum + System.out.println("downloaded file sized: " + downloadByIdSize) + + /* + * Download to Observable by name + */ + val downloadOptions: GridFSDownloadOptions = new GridFSDownloadOptions().revision(0) + val downloadByName = gridFSBucket.downloadToObservable("mongodb-tutorial", downloadOptions).results() + val downloadByNameSize = downloadByName.map(_.limit()).sum + System.out.println("downloaded file sized: " + downloadByNameSize) + + /* + * Rename + */ + gridFSBucket.rename(fileId, "mongodbTutorial").andThen({ case Success(r) => println("renamed") }).results() + println("renamed") + + /* + * Delete + */ + gridFSBucket.delete(fileId).results() + println("deleted") + + // Final cleanup + database.drop().results() + println("Finished") + } + + // scalastyle:on + +} diff --git a/driver-scala/src/integrationTest/scala/tour/Helpers.scala b/driver-scala/src/integrationTest/scala/tour/Helpers.scala new file mode 100644 index 00000000000..55c2be7ff9c --- /dev/null +++ b/driver-scala/src/integrationTest/scala/tour/Helpers.scala @@ -0,0 +1,49 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package tour + +import java.util.concurrent.TimeUnit + +import scala.concurrent.Await +import scala.concurrent.duration.Duration + +import org.mongodb.scala._ + +object Helpers { + + implicit class DocumentObservable[C](val observable: Observable[Document]) extends ImplicitObservable[Document] { + override val converter: (Document) => String = (doc) => doc.toJson + } + + implicit class GenericObservable[C](val observable: Observable[C]) extends ImplicitObservable[C] { + override val converter: (C) => String = (doc) => Option(doc).map(_.toString).getOrElse("") + } + + trait ImplicitObservable[C] { + val observable: Observable[C] + val converter: (C) => String + + def results(): Seq[C] = Await.result(observable.toFuture(), Duration(10, TimeUnit.SECONDS)) + def headResult() = Await.result(observable.head(), Duration(10, TimeUnit.SECONDS)) + def printResults(initial: String = ""): Unit = { + if (initial.length > 0) print(initial) + results().foreach(res => println(converter(res))) + } + def printHeadResult(initial: String = ""): Unit = println(s"${initial}${converter(headResult())}") + } + +} diff --git a/driver-scala/src/integrationTest/scala/tour/QuickTour.scala b/driver-scala/src/integrationTest/scala/tour/QuickTour.scala new file mode 100644 index 00000000000..625a7547587 --- /dev/null +++ b/driver-scala/src/integrationTest/scala/tour/QuickTour.scala @@ -0,0 +1,160 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package tour + +import java.util.concurrent.CountDownLatch + +import com.mongodb.Block +import com.mongodb.connection.{ ClusterSettings, SslSettings } +import org.bson.UuidRepresentation +import org.bson.codecs.UuidCodec +import org.bson.codecs.configuration.CodecRegistries +import org.mongodb.scala._ +import org.mongodb.scala.bson.BsonDocument +import org.mongodb.scala.model.{ + Accumulators, + Aggregates, + BulkWriteOptions, + CreateCollectionOptions, + DeleteOneModel, + Filters, + InsertOneModel, + Projections, + ReplaceOneModel, + UpdateOneModel +} +import org.mongodb.scala.model.Aggregates._ +import org.mongodb.scala.model.Filters._ +import org.mongodb.scala.model.Projections._ +import org.mongodb.scala.model.Sorts._ +import org.mongodb.scala.model.Updates.{ inc, set } +import org.mongodb.scala.model.changestream.ChangeStreamDocument +import tour.Helpers._ + +import scala.collection.immutable.IndexedSeq + +/** + * The QuickTour code example + */ +object QuickTour { + // scalastyle:off method.length + + /** + * Run this main method to see the output of this quick example. + * + * @param args takes an optional single argument for the connection string + * @throws Throwable if an operation fails + */ + def main(args: Array[String]): Unit = { + + val mongoClient: MongoClient = if (args.isEmpty) MongoClient() else MongoClient(args.head) + + // get handle to "mydb" database + val database: MongoDatabase = mongoClient.getDatabase("mydb") + + // get a handle to the "test" collection + val collection: MongoCollection[Document] = database.getCollection("test") + + collection.drop().results() + + // make a document and insert it + val doc: Document = Document( + "_id" -> 0, + "name" -> "MongoDB", + "type" -> "database", + "count" -> 1, + "info" -> Document("x" -> 203, "y" -> 102) + ) + + collection.insertOne(doc).results() + + // get it (since it's the only one in there since we dropped the rest earlier on) + collection.find.first().printResults() + + // now, lets add lots of little documents to the collection so we can explore queries and cursors + val documents: IndexedSeq[Document] = (1 to 100) map { i: Int => + Document("i" -> i) + } + val insertObservable = collection.insertMany(documents) + + val insertAndCount = for { + insertResult <- insertObservable + countResult <- collection.countDocuments() + } yield countResult + + println(s"total # of documents after inserting 100 small ones (should be 101): ${insertAndCount.headResult()}") + + collection.find().first().printHeadResult() + + // Query Filters + // now use a query to get 1 document out + collection.find(equal("i", 71)).first().printHeadResult() + + // now use a range query to get a larger subset + collection.find(gt("i", 50)).printResults() + + // range query with multiple constraints + collection.find(and(gt("i", 50), lte("i", 100))).printResults() + + // Sorting + collection.find(exists("i")).sort(descending("i")).first().printHeadResult() + + // Projection + collection.find().projection(excludeId()).first().printHeadResult() + + // Aggregation + collection + .aggregate( + Seq( + filter(gt("i", 0)), + project(Document("""{ITimes10: {$multiply: ["$i", 10]}}""")) + ) + ) + .printResults() + + // Update One + collection.updateOne(equal("i", 10), set("i", 110)).printHeadResult("Update Result: ") + + // Update Many + collection.updateMany(lt("i", 100), inc("i", 100)).printHeadResult("Update Result: ") + + // Delete One + collection.deleteOne(equal("i", 110)).printHeadResult("Delete Result: ") + + // Delete Many + collection.deleteMany(gte("i", 100)).printHeadResult("Delete Result: ") + + // Create Index + collection.createIndex(Document("i" -> 1)).printHeadResult("Create Index Result: %s") + + // Clean up + collection.drop().results() + + // release resources + mongoClient.close() + + import scala.collection.JavaConverters._ + import org.mongodb.scala.bson._ + + val codecRegistry = + CodecRegistries.fromRegistries( + CodecRegistries.fromCodecs(new UuidCodec(UuidRepresentation.STANDARD)), + MongoClient.DEFAULT_CODEC_REGISTRY + ) + } + +} diff --git a/driver-scala/src/integrationTest/scala/tour/QuickTourCaseClass.scala b/driver-scala/src/integrationTest/scala/tour/QuickTourCaseClass.scala new file mode 100644 index 00000000000..dcf5dd0cec6 --- /dev/null +++ b/driver-scala/src/integrationTest/scala/tour/QuickTourCaseClass.scala @@ -0,0 +1,110 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package tour + +import org.mongodb.scala._ +import org.mongodb.scala.bson.ObjectId +import org.mongodb.scala.model.Filters._ +import org.mongodb.scala.model.Sorts._ +import org.mongodb.scala.model.Updates._ +import tour.Helpers._ + +/** + * The QuickTour code example + */ +object QuickTourCaseClass { + // scalastyle:off method.length + + /** + * Run this main method to see the output of this quick example. + * + * @param args takes an optional single argument for the connection string + * @throws Throwable if an operation fails + */ + def main(args: Array[String]): Unit = { + + // Create the case class + object Person { + def apply(firstName: String, lastName: String): Person = Person(new ObjectId(), firstName, lastName); + } + case class Person(_id: ObjectId, firstName: String, lastName: String) + + // Create a codec for the Person case class + import org.mongodb.scala.bson.codecs.Macros._ + import org.mongodb.scala.MongoClient.DEFAULT_CODEC_REGISTRY + import org.bson.codecs.configuration.CodecRegistries.{ fromProviders, fromRegistries } + val codecRegistry = fromRegistries(fromProviders(classOf[Person]), DEFAULT_CODEC_REGISTRY) + + // Create the client + val mongoClient: MongoClient = if (args.isEmpty) MongoClient() else MongoClient(args.head) + + // get handle to "mydb" database + val database: MongoDatabase = mongoClient.getDatabase("mydb").withCodecRegistry(codecRegistry) + + // get a handle to the "test" collection + val collection: MongoCollection[Person] = database.getCollection("test") + + collection.drop().results() + + // make a document and insert it + val person: Person = Person("Ada", "Lovelace") + + collection.insertOne(person).results() + + // get it (since it's the only one in there since we dropped the rest earlier on) + collection.find.first().printResults() + + // now, lets add lots of little documents to the collection so we can explore queries and cursors + val people: Seq[Person] = Seq( + Person("Charles", "Babbage"), + Person("George", "Boole"), + Person("Gertrude", "Blanch"), + Person("Grace", "Hopper"), + Person("Ida", "Rhodes"), + Person("Jean", "Bartik"), + Person("John", "Backus"), + Person("Lucy", "Sanders"), + Person("Tim", "Berners Lee"), + Person("Zaphod", "Beeblebrox") + ) + collection.insertMany(people).printResults() + + // Querying + collection.find().first().printHeadResult() + + // Query Filters + collection.find(equal("firstName", "Ida")).first().printHeadResult() + + // now use a range query to get a larger subset + collection.find(regex("firstName", "^G")).sort(ascending("lastName")).printResults() + + // Update One + collection + .updateOne(equal("lastName", "Berners Lee"), set("lastName", "Berners-Lee")) + .printHeadResult("Update Result: ") + + // Delete One + collection.deleteOne(equal("firstName", "Zaphod")).printHeadResult("Delete Result: ") + + // Clean up + collection.drop().results() + + // release resources + mongoClient.close() + } + +} diff --git a/driver-scala/src/main/scala/org/mongodb/scala/AggregateObservable.scala b/driver-scala/src/main/scala/org/mongodb/scala/AggregateObservable.scala new file mode 100644 index 00000000000..a363ee21662 --- /dev/null +++ b/driver-scala/src/main/scala/org/mongodb/scala/AggregateObservable.scala @@ -0,0 +1,285 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala + +import com.mongodb.ExplainVerbosity +import com.mongodb.annotations.{ Alpha, Reason } + +import java.util.concurrent.TimeUnit +import com.mongodb.reactivestreams.client.AggregatePublisher +import org.mongodb.scala.bson.BsonValue +import org.mongodb.scala.bson.DefaultHelper.DefaultsTo +import org.mongodb.scala.bson.conversions.Bson +import org.mongodb.scala.model.Collation +import org.reactivestreams.Subscriber + +import scala.concurrent.duration.Duration +import scala.reflect.ClassTag + +/** + * Observable for aggregate + * + * @param wrapped the underlying java AggregateObservable + * @tparam TResult The type of the result. + * @since 1.0 + */ +case class AggregateObservable[TResult](private val wrapped: AggregatePublisher[TResult]) extends Observable[TResult] { + + /** + * Enables writing to temporary files. A null value indicates that it's unspecified. + * + * [[https://www.mongodb.com/docs/manual/reference/command/aggregate/ Aggregation]] + * + * @param allowDiskUse true if writing to temporary files is enabled + * @return this + */ + def allowDiskUse(allowDiskUse: Boolean): AggregateObservable[TResult] = { + wrapped.allowDiskUse(allowDiskUse) + this + } + + /** + * Sets the maximum execution time on the server for this operation. + * + * [[https://www.mongodb.com/docs/manual/reference/operator/meta/maxTimeMS/ Max Time]] + * @param duration the duration + * @return this + */ + def maxTime(duration: Duration): AggregateObservable[TResult] = { + wrapped.maxTime(duration.toMillis, TimeUnit.MILLISECONDS) + this + } + + /** + * Sets the maximum await execution time on the server for this operation. + * + * [[https://www.mongodb.com/docs/manual/reference/operator/meta/maxTimeMS/ Max Time]] + * @param duration the duration + * @return this + * @since 2.2 + * @note Requires MongoDB 3.6 or greater + */ + def maxAwaitTime(duration: Duration): AggregateObservable[TResult] = { + wrapped.maxAwaitTime(duration.toMillis, TimeUnit.MILLISECONDS) + this + } + + /** + * Sets the bypass document level validation flag. + * + * '''Note:''': This only applies when an `\$out` stage is specified. + * + * [[https://www.mongodb.com/docs/manual/reference/command/aggregate/ Aggregation]] + * @note Requires MongoDB 3.2 or greater + * @param bypassDocumentValidation If true, allows the write to opt-out of document level validation. + * @return this + * @since 1.1 + */ + def bypassDocumentValidation(bypassDocumentValidation: Boolean): AggregateObservable[TResult] = { + wrapped.bypassDocumentValidation(bypassDocumentValidation) + this + } + + /** + * Sets the collation options + * + * @param collation the collation options to use + * @return this + * @since 1.2 + * @note A null value represents the server default. + * @note Requires MongoDB 3.4 or greater + */ + def collation(collation: Collation): AggregateObservable[TResult] = { + wrapped.collation(collation) + this + } + + /** + * Sets the comment for this operation. A null value means no comment is set. + * + * @param comment the comment + * @return this + * @since 2.2 + * @note Requires MongoDB 3.6 or greater + */ + def comment(comment: String): AggregateObservable[TResult] = { + wrapped.comment(comment) + this + } + + /** + * Sets the comment for this operation. A null value means no comment is set. + * + * @param comment the comment + * @return this + * @since 4.6 + * @note The comment can be any valid BSON type for server versions 4.4 and above. + * Server versions between 3.6 and 4.2 only support + * string as comment, and providing a non-string type will result in a server-side error. + */ + def comment(comment: BsonValue): AggregateObservable[TResult] = { + wrapped.comment(comment) + this + } + + /** + * Add top-level variables to the aggregation. + * + * For MongoDB 5.0+, the aggregate command accepts a "let" option. This option is a document consisting of zero or more + * fields representing variables that are accessible to the aggregation pipeline. The key is the name of the variable and the value is + * a constant in the aggregate expression language. Each parameter name is then usable to access the value of the corresponding + * expression with the "$$" syntax within aggregate expression contexts which may require the use of '\$expr' or a pipeline. + * + * @param variables the variables + * @return this + * @since 4.3 + * @note Requires MongoDB 5.0 or greater + */ + def let(variables: Bson): AggregateObservable[TResult] = { + wrapped.let(variables) + this + } + + /** + * Sets the hint for which index to use. A null value means no hint is set. + * + * @param hint the hint + * @return this + * @since 2.2 + * @note Requires MongoDB 3.6 or greater + */ + def hint(hint: Bson): AggregateObservable[TResult] = { + wrapped.hint(hint) + this + } + + /** + * Sets the hint for which index to use. A null value means no hint is set. + * + * @param hint the hint + * @return this + * @since 4.4 + * @note Requires MongoDB 3.6 or greater + */ + def hintString(hint: String): AggregateObservable[TResult] = { + wrapped.hintString(hint) + this + } + + /** + * Sets the number of documents to return per batch. + * + * @param batchSize the batch size + * @return this + * @since 2.7 + */ + def batchSize(batchSize: Int): AggregateObservable[TResult] = { + wrapped.batchSize(batchSize) + this + } + + /** + * Aggregates documents according to the specified aggregation pipeline, which must end with an `\$out` or `\$merge` stage. + * Calling this method and then `subscribing` to the returned [[SingleObservable]] + * is the preferred alternative to subscribing to this [[AggregateObservable]], + * because this method does what is explicitly requested without executing implicit operations. + * + * [[https://www.mongodb.com/docs/manual/aggregation/ Aggregation]] + * + * @throws java.lang.IllegalStateException if the pipeline does not end with an `\$out` or `\$merge` stage + * @return an Observable that indicates when the operation has completed. + */ + def toCollection(): SingleObservable[Unit] = wrapped.toCollection() + + /** + * Sets the timeoutMode for the cursor. + * + * Requires the `timeout` to be set, either in the [[MongoClientSettings]], + * via [[MongoDatabase]] or via [[MongoCollection]] + * + * If the `timeout` is set then: + * + * - For non-tailable cursors, the default value of timeoutMode is `TimeoutMode.CURSOR_LIFETIME` + * - For tailable cursors, the default value of timeoutMode is `TimeoutMode.ITERATION` and its an error + * to configure it as: `TimeoutMode.CURSOR_LIFETIME` + * + * @param timeoutMode the timeout mode + * @return this + * @since 5.2 + */ + @Alpha(Array(Reason.CLIENT)) + def timeoutMode(timeoutMode: TimeoutMode): AggregateObservable[TResult] = { + wrapped.timeoutMode(timeoutMode) + this + } + + /** + * Helper to return a single observable limited to the first result. + * + * @return a single observable which will the first result. + * @since 4.0 + */ + def first(): SingleObservable[TResult] = wrapped.first() + + /** + * Explain the execution plan for this operation with the server's default verbosity level + * + * @tparam ExplainResult The type of the result + * @return the execution plan + * @since 4.2 + * @note Requires MongoDB 3.6 or greater + */ + def explain[ExplainResult]()( + implicit e: ExplainResult DefaultsTo Document, + ct: ClassTag[ExplainResult] + ): SingleObservable[ExplainResult] = + wrapped.explain[ExplainResult](ct) + + /** + * Explain the execution plan for this operation with the given verbosity level + * + * @tparam ExplainResult The type of the result + * @param verbosity the verbosity of the explanation + * @return the execution plan + * @since 4.2 + * @note Requires MongoDB 3.6 or greater + */ + def explain[ExplainResult]( + verbosity: ExplainVerbosity + )(implicit e: ExplainResult DefaultsTo Document, ct: ClassTag[ExplainResult]): SingleObservable[ExplainResult] = + wrapped.explain[ExplainResult](ct, verbosity) + + /** + * Requests [[AggregateObservable]] to start streaming data according to the specified aggregation pipeline. + * + * - If the aggregation pipeline ends with an `\$out` or `\$merge` stage, + * then finds all documents in the affected namespace and produces them. + * You may want to use [[toCollection]] instead. + * - Otherwise, produces no elements. + */ + override def subscribe(observer: Observer[_ >: TResult]): Unit = wrapped.subscribe(observer) + + /** + * Requests [[AggregateObservable]] to start streaming data according to the specified aggregation pipeline. + * + * - If the aggregation pipeline ends with an `\$out` or `\$merge` stage, + * then finds all documents in the affected namespace and produces them. + * You may want to use [[toCollection]] instead. + * - Otherwise, produces no elements. + */ + override def subscribe(observer: Subscriber[_ >: TResult]): Unit = wrapped.subscribe(observer) +} diff --git a/driver-scala/src/main/scala/org/mongodb/scala/AutoEncryptionSettings.scala b/driver-scala/src/main/scala/org/mongodb/scala/AutoEncryptionSettings.scala new file mode 100644 index 00000000000..0810bc391cd --- /dev/null +++ b/driver-scala/src/main/scala/org/mongodb/scala/AutoEncryptionSettings.scala @@ -0,0 +1,59 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.mongodb.scala + +import com.mongodb.{ AutoEncryptionSettings => JAutoEncryptionSettings } + +/** + * The client-side automatic encryption settings. In-use encryption enables an application to specify what fields in a collection + * must be encrypted, and the driver automatically encrypts commands sent to MongoDB and decrypts responses. + * + * Automatic encryption is an enterprise only feature that only applies to operations on a collection. Automatic encryption is not + * supported for operations on a database or view and will result in error. To bypass automatic encryption, + * set bypassAutoEncryption=true in `AutoEncryptionSettings`. + * + * Explicit encryption/decryption and automatic decryption is a community feature, enabled with the new + * `com.mongodb.client.vault.ClientEncryption` type. + * + * A MongoClient configured with bypassAutoEncryption=true will still automatically decrypt. + * + * If automatic encryption fails on an operation, use a MongoClient configured with bypassAutoEncryption=true and use + * ClientEncryption#encrypt to manually encrypt values. + * + * Enabling client side encryption reduces the maximum document and message size (using a maxBsonObjectSize of 2MiB and + * maxMessageSizeBytes of 6MB) and may have a negative performance impact. + * + * Automatic encryption requires the authenticated user to have the listCollections privilege action. + * + * Supplying an `encryptedFieldsMap` provides more security than relying on an encryptedFields obtained from the server. + * It protects against a malicious server advertising false encryptedFields. + * + * @since 2.7 + */ +object AutoEncryptionSettings { + + /** + * Gets a Builder for creating a new AutoEncryptionSettings instance. + * + * @return a new Builder for creating AutoEncryptionSettings. + */ + def builder(): Builder = JAutoEncryptionSettings.builder() + + /** + * AutoEncryptionSettings builder type + */ + type Builder = JAutoEncryptionSettings.Builder +} diff --git a/driver-scala/src/main/scala/org/mongodb/scala/ChangeStreamObservable.scala b/driver-scala/src/main/scala/org/mongodb/scala/ChangeStreamObservable.scala new file mode 100644 index 00000000000..61136ca4fad --- /dev/null +++ b/driver-scala/src/main/scala/org/mongodb/scala/ChangeStreamObservable.scala @@ -0,0 +1,209 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala + +import java.util.concurrent.TimeUnit +import com.mongodb.reactivestreams.client.ChangeStreamPublisher +import org.mongodb.scala.bson.{ BsonTimestamp, BsonValue } +import org.mongodb.scala.model.Collation +import org.mongodb.scala.model.changestream.{ ChangeStreamDocument, FullDocument, FullDocumentBeforeChange } + +import scala.concurrent.duration.Duration + +/** + * Observable for change streams. + * + * '''Note:''' The `ChangeStreamDocument` class will not be applicable for all change stream outputs. + * If using custom pipelines that radically change the result, the [[ChangeStreamObservable#withDocumentClass]] method should be used + * to provide an alternative document format. + * + * @param wrapped the underlying java ChangeStreamIterable + * @tparam TResult The type of the result. + * @since 2.2 + * @note Requires MongoDB 3.6 or greater + */ +case class ChangeStreamObservable[TResult](private val wrapped: ChangeStreamPublisher[TResult]) + extends Observable[ChangeStreamDocument[TResult]] { + + /** + * Sets the fullDocument value. + * + * @param fullDocument the fullDocument + * @return this + */ + def fullDocument(fullDocument: FullDocument): ChangeStreamObservable[TResult] = { + wrapped.fullDocument(fullDocument) + this + } + + /** + * Sets the fullDocumentBeforeChange value. + * + * @param fullDocumentBeforeChange the fullDocumentBeforeChange + * @return this + * @since 4.7 + * @note Requires MongoDB 6.0 or greater + */ + def fullDocumentBeforeChange(fullDocumentBeforeChange: FullDocumentBeforeChange): ChangeStreamObservable[TResult] = { + wrapped.fullDocumentBeforeChange(fullDocumentBeforeChange) + this + } + + /** + * Sets the logical starting point for the new change stream. + * + * @param resumeToken the resume token + * @return this + */ + def resumeAfter(resumeToken: Document): ChangeStreamObservable[TResult] = { + wrapped.resumeAfter(resumeToken.underlying) + this + } + + /** + * The change stream will only provide changes that occurred at or after the specified timestamp. + * + * Any command run against the server will return an operation time that can be used here. + * The default value is an operation time obtained from the server before the change stream was created. + * + * @param startAtOperationTime the start at operation time + * @return this + * @since 2.4 + * @note Requires MongoDB 4.0 or greater + */ + def startAtOperationTime(startAtOperationTime: BsonTimestamp): ChangeStreamObservable[TResult] = { + wrapped.startAtOperationTime(startAtOperationTime) + this + } + + /** + * Sets the logical starting point for the new change stream. + * + * + * This will allow users to watch collections that have been dropped and recreated or newly renamed collections without missing + * any notifications. + * + * @param startAfter the resume token + * @return this + * @since 2.7 + * @note Requires MongoDB 4.2 or greater + * @note The server will report an error if both `startAfter` and `resumeAfter` are specified. + * @see [[https://www.mongodb.com/docs/manual/changeStreams/#change-stream-start-after Change stream start after]] + */ + def startAfter(startAfter: Document): ChangeStreamObservable[TResult] = { + wrapped.startAfter(startAfter.underlying) + this + } + + /** + * Sets the number of documents to return per batch. + * + * @param batchSize the batch size + * @return this + */ + def batchSize(batchSize: Int): ChangeStreamObservable[TResult] = { + wrapped.batchSize(batchSize) + this + } + + /** + * Sets the maximum await execution time on the server for this operation. + * + * [[https://www.mongodb.com/docs/manual/reference/operator/meta/maxTimeMS/ Max Time]] + * @param duration the duration + * @return this + */ + def maxAwaitTime(duration: Duration): ChangeStreamObservable[TResult] = { + wrapped.maxAwaitTime(duration.toMillis, TimeUnit.MILLISECONDS) + this + } + + /** + * Sets the collation options + * + * A null value represents the server default. + * + * @param collation the collation options to use + * @return this + */ + def collation(collation: Collation): ChangeStreamObservable[TResult] = { + wrapped.collation(collation) + this + } + + /** + * Sets the comment for this operation. A null value means no comment is set. + * + * @param comment the comment + * @return this + * @since 4.6 + * @note Requires MongoDB 3.6 or greater + */ + def comment(comment: String): ChangeStreamObservable[TResult] = { + wrapped.comment(comment) + this + } + + /** + * Sets the comment for this operation. A null value means no comment is set. + * + * @param comment the comment + * @return this + * @since 4.6 + * @note The comment can be any valid BSON type for server versions 4.4 and above. + * Server versions between 3.6 and 4.2 only support + * string as comment, and providing a non-string type will result in a server-side error. + */ + def comment(comment: BsonValue): ChangeStreamObservable[TResult] = { + wrapped.comment(comment) + this + } + + /** + * Sets whether to include expanded change stream events, which are: + * createIndexes, dropIndexes, modify, create, shardCollection, + * reshardCollection, refineCollectionShardKey. False by default. + * + * @param showExpandedEvents true to include expanded events + * @return this + * @since 4.7 + * @note Requires MongoDB 6.0 or greater + */ + def showExpandedEvents(showExpandedEvents: Boolean): ChangeStreamObservable[TResult] = { + wrapped.showExpandedEvents(showExpandedEvents) + this + } + + /** + * Returns an `Observable` containing the results of the change stream based on the document class provided. + * + * @param clazz the class to use for the raw result. + * @tparam T the result type + * @return an Observable + */ + def withDocumentClass[T](clazz: Class[T]): Observable[T] = wrapped.withDocumentClass(clazz).toObservable() + + /** + * Helper to return a single observable limited to the first result. + * + * @return a single observable which will the first result. + * @since 4.0 + */ + def first(): SingleObservable[ChangeStreamDocument[TResult]] = wrapped.first() + + override def subscribe(observer: Observer[_ >: ChangeStreamDocument[TResult]]): Unit = wrapped.subscribe(observer) +} diff --git a/driver-scala/src/main/scala/org/mongodb/scala/ClientEncryptionSettings.scala b/driver-scala/src/main/scala/org/mongodb/scala/ClientEncryptionSettings.scala new file mode 100644 index 00000000000..06036597445 --- /dev/null +++ b/driver-scala/src/main/scala/org/mongodb/scala/ClientEncryptionSettings.scala @@ -0,0 +1,41 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.mongodb.scala + +import com.mongodb.{ ClientEncryptionSettings => JClientEncryptionSettings } + +/** + * The client-side settings for data key creation and explicit encryption. + * + * Explicit encryption/decryption is a community feature, enabled with the new `com.mongodb.client.vault.ClientEncryption` type, + * for which this is the settings. + * + * @since 2.7 + */ +object ClientEncryptionSettings { + + /** + * Gets a Builder for creating a new AutoEncryptionSettings instance. + * + * @return a new Builder for creating AutoEncryptionSettings. + */ + def builder(): Builder = JClientEncryptionSettings.builder() + + /** + * AutoEncryptionSettings builder type + */ + type Builder = JClientEncryptionSettings.Builder +} diff --git a/driver-scala/src/main/scala/org/mongodb/scala/ClientSessionImplicits.scala b/driver-scala/src/main/scala/org/mongodb/scala/ClientSessionImplicits.scala new file mode 100644 index 00000000000..9718b01c1a8 --- /dev/null +++ b/driver-scala/src/main/scala/org/mongodb/scala/ClientSessionImplicits.scala @@ -0,0 +1,48 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala + +/** + * Extends the Java [[ClientSession]] and adds helpers for committing and aborting transactions. + * + * Automatically imported into the `org.mongodb.scala` namespace + */ +trait ClientSessionImplicits { + + /** + * The implicit ClientSession with Scala helpers + * + * @param clientSession the clientSession + */ + implicit class ScalaClientSession(clientSession: ClientSession) { + + /** + * Commit a transaction in the context of this session. + * + * A transaction can only be commmited if one has first been started. + */ + def commitTransaction(): SingleObservable[Unit] = clientSession.commitTransaction() + + /** + * Abort a transaction in the context of this session. + * + * A transaction can only be aborted if one has first been started. + */ + def abortTransaction(): SingleObservable[Unit] = clientSession.abortTransaction() + } + +} diff --git a/driver-scala/src/main/scala/org/mongodb/scala/ClientSessionOptions.scala b/driver-scala/src/main/scala/org/mongodb/scala/ClientSessionOptions.scala new file mode 100644 index 00000000000..d41e9a44bcc --- /dev/null +++ b/driver-scala/src/main/scala/org/mongodb/scala/ClientSessionOptions.scala @@ -0,0 +1,50 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala + +import com.mongodb.{ ClientSessionOptions => JClientSessionOptions } + +/** + * The options to apply to a `ClientSession`. + * + * @see ClientSession + * @since 2.2 + */ +object ClientSessionOptions { + + /** + * Gets an instance of a builder + * + * @return a builder instance + */ + def builder(): Builder = JClientSessionOptions.builder + + /** + * Gets an instance of a builder initialized with the given options + * + * @param clientSessionOptions the options with which to initialize the builder + * @return a builder instance + * @since 2.4 + */ + def builder(clientSessionOptions: ClientSessionOptions): Builder = JClientSessionOptions.builder(clientSessionOptions) + + /** + * ClientSession builder + */ + type Builder = JClientSessionOptions.Builder + +} diff --git a/driver-scala/src/main/scala/org/mongodb/scala/CreateIndexCommitQuorum.scala b/driver-scala/src/main/scala/org/mongodb/scala/CreateIndexCommitQuorum.scala new file mode 100644 index 00000000000..0789ff3a9a6 --- /dev/null +++ b/driver-scala/src/main/scala/org/mongodb/scala/CreateIndexCommitQuorum.scala @@ -0,0 +1,54 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala + +import scala.collection.JavaConverters._ +import scala.concurrent.duration.Duration +import com.mongodb.{ CreateIndexCommitQuorum => JCreateIndexCommitQuorum } + +/** + * The commit quorum specifies how many data-bearing members of a replica set, including the primary, must + * complete the index builds successfully before the primary marks the indexes as ready. + * + * @since 4.1 + */ +object CreateIndexCommitQuorum { + + /** + * A create index commit quorum of majority. + */ + val MAJORITY: JCreateIndexCommitQuorum = JCreateIndexCommitQuorum.MAJORITY + + /** + * A create index commit quorum of voting members. + */ + val VOTING_MEMBERS: JCreateIndexCommitQuorum = JCreateIndexCommitQuorum.VOTING_MEMBERS + + /** + * Create a create index commit quorum with a mode value. + * + * @param mode the mode value + */ + def create(mode: String): JCreateIndexCommitQuorum = JCreateIndexCommitQuorum.create(mode) + + /** + * Create a create index commit quorum with a w value. + * + * @param w the w value + */ + def create(w: Int): JCreateIndexCommitQuorum = JCreateIndexCommitQuorum.create(w) +} diff --git a/driver-scala/src/main/scala/org/mongodb/scala/DistinctObservable.scala b/driver-scala/src/main/scala/org/mongodb/scala/DistinctObservable.scala new file mode 100644 index 00000000000..84e0905cdf9 --- /dev/null +++ b/driver-scala/src/main/scala/org/mongodb/scala/DistinctObservable.scala @@ -0,0 +1,164 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala + +import com.mongodb.annotations.{ Alpha, Reason } + +import java.util.concurrent.TimeUnit +import com.mongodb.reactivestreams.client.DistinctPublisher +import org.mongodb.scala.bson.BsonValue +import org.mongodb.scala.bson.conversions.Bson +import org.mongodb.scala.model.Collation + +import scala.concurrent.duration.Duration + +/** + * Observable for distinct + * + * @param wrapped the underlying java DistinctObservable + * @tparam TResult The type of the result. + * @since 1.0 + */ +case class DistinctObservable[TResult](private val wrapped: DistinctPublisher[TResult]) extends Observable[TResult] { + + /** + * Sets the query filter to apply to the query. + * + * [[https://www.mongodb.com/docs/manual/reference/method/db.collection.find/ Filter]] + * @param filter the filter, which may be null. + * @return this + */ + def filter(filter: Bson): DistinctObservable[TResult] = { + wrapped.filter(filter) + this + } + + /** + * Sets the maximum execution time on the server for this operation. + * + * [[https://www.mongodb.com/docs/manual/reference/operator/meta/maxTimeMS/ Max Time]] + * @param duration the duration + * @return this + */ + def maxTime(duration: Duration): DistinctObservable[TResult] = { + wrapped.maxTime(duration.toMillis, TimeUnit.MILLISECONDS) + this + } + + /** + * Sets the collation options + * + * @param collation the collation options to use + * @return this + * @since 1.2 + * @note A null value represents the server default. + * @note Requires MongoDB 3.4 or greater + */ + def collation(collation: Collation): DistinctObservable[TResult] = { + wrapped.collation(collation) + this + } + + /** + * Sets the number of documents to return per batch. + * + * @param batchSize the batch size + * @return this + * @since 2.7 + */ + def batchSize(batchSize: Int): DistinctObservable[TResult] = { + wrapped.batchSize(batchSize) + this + } + + /** + * Sets the comment for this operation. A null value means no comment is set. + * + * @param comment the comment + * @return this + * @since 4.6 + * @note Requires MongoDB 4.4 or greater + */ + def comment(comment: String): DistinctObservable[TResult] = { + wrapped.comment(comment) + this + } + + /** + * Sets the comment for this operation. A null value means no comment is set. + * + * @param comment the comment + * @return this + * @since 4.6 + * @note Requires MongoDB 4.4 or greater + */ + def comment(comment: BsonValue): DistinctObservable[TResult] = { + wrapped.comment(comment) + this + } + + /** + * Sets the hint for this operation. A null value means no hint is set. + * + * @param hint the hint + * @return this + * @note if [[hint]] is set that will be used instead of any hint string. + * @since 5.3 + */ + def hint(hint: Bson): DistinctObservable[TResult] = { + wrapped.hint(hint) + this + } + + /** + * Sets the hint for this operation. A null value means no hint is set. + * + * @param hint the name of the index which should be used for the operation + * @return this + * @since 5.3 + */ + def hintString(hint: String): DistinctObservable[TResult] = { + wrapped.hintString(hint) + this + } + + /** + * Sets the timeoutMode for the cursor. + * + * Requires the `timeout` to be set, either in the [[MongoClientSettings]], + * via [[MongoDatabase]] or via [[MongoCollection]] + * + * @param timeoutMode the timeout mode + * @return this + * @since 5.2 + */ + @Alpha(Array(Reason.CLIENT)) + def timeoutMode(timeoutMode: TimeoutMode): DistinctObservable[TResult] = { + wrapped.timeoutMode(timeoutMode) + this + } + + /** + * Helper to return a single observable limited to the first result. + * + * @return a single observable which will the first result. + * @since 4.0 + */ + def first(): SingleObservable[TResult] = wrapped.first() + + override def subscribe(observer: Observer[_ >: TResult]): Unit = wrapped.subscribe(observer) +} diff --git a/driver-scala/src/main/scala/org/mongodb/scala/FindObservable.scala b/driver-scala/src/main/scala/org/mongodb/scala/FindObservable.scala new file mode 100644 index 00000000000..57a964b8315 --- /dev/null +++ b/driver-scala/src/main/scala/org/mongodb/scala/FindObservable.scala @@ -0,0 +1,387 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala + +import com.mongodb.annotations.{ Alpha, Reason } +import com.mongodb.reactivestreams.client.FindPublisher +import com.mongodb.{ CursorType, ExplainVerbosity } +import org.mongodb.scala.bson.BsonValue +import org.mongodb.scala.bson.DefaultHelper.DefaultsTo +import org.mongodb.scala.bson.conversions.Bson +import org.mongodb.scala.model.Collation + +import java.util.concurrent.TimeUnit +import scala.concurrent.duration.Duration +import scala.reflect.ClassTag + +/** + * Observable interface for Find. + * + * @param wrapped the underlying java FindObservable + * @tparam TResult The type of the result. + * @since 1.0 + */ +case class FindObservable[TResult](private val wrapped: FindPublisher[TResult]) extends Observable[TResult] { + + /** + * Helper to return a Observable limited to just the first result the query. + * + * '''Note:''' Sets limit in the background so only returns 1. + * + * @return a Observable which will return the first item + */ + def first(): SingleObservable[TResult] = wrapped.first() + + /** + * Sets the query filter to apply to the query. + * + * [[https://www.mongodb.com/docs/manual/reference/method/db.collection.find/ Filter]] + * @param filter the filter, which may be null. + * @return this + */ + def filter(filter: Bson): FindObservable[TResult] = { + wrapped.filter(filter) + this + } + + /** + * Sets the limit to apply. + * + * [[https://www.mongodb.com/docs/manual/reference/method/cursor.limit/#cursor.limit Limit]] + * @param limit the limit, which may be null + * @return this + */ + def limit(limit: Int): FindObservable[TResult] = { + wrapped.limit(limit) + this + } + + /** + * Sets the number of documents to skip. + * + * [[https://www.mongodb.com/docs/manual/reference/method/cursor.skip/#cursor.skip Skip]] + * @param skip the number of documents to skip + * @return this + */ + def skip(skip: Int): FindObservable[TResult] = { + wrapped.skip(skip) + this + } + + /** + * Sets the maximum execution time on the server for this operation. + * + * [[https://www.mongodb.com/docs/manual/reference/operator/meta/maxTimeMS/ Max Time]] + * @param duration the duration + * @return this + */ + def maxTime(duration: Duration): FindObservable[TResult] = { + wrapped.maxTime(duration.toMillis, TimeUnit.MILLISECONDS) + this + } + + /** + * The maximum amount of time for the server to wait on new documents to satisfy a tailable cursor + * query. This only applies to a TAILABLE_AWAIT cursor. When the cursor is not a TAILABLE_AWAIT cursor, + * this option is ignored. + * + * On servers >= 3.2, this option will be specified on the getMore command as "maxTimeMS". The default + * is no value: no "maxTimeMS" is sent to the server with the getMore command. + * + * On servers < 3.2, this option is ignored, and indicates that the driver should respect the server's default value + * + * A zero value will be ignored. + * + * [[https://www.mongodb.com/docs/manual/reference/operator/meta/maxTimeMS/ Max Time]] + * @param duration the duration + * @return the maximum await execution time in the given time unit + * @since 1.1 + */ + def maxAwaitTime(duration: Duration): FindObservable[TResult] = { + wrapped.maxAwaitTime(duration.toMillis, TimeUnit.MILLISECONDS) + this + } + + /** + * Sets a document describing the fields to return for all matching documents. + * + * [[https://www.mongodb.com/docs/manual/reference/method/db.collection.find/ Projection]] + * @param projection the project document, which may be null. + * @return this + * @see [[org.mongodb.scala.model.Projections]] + */ + def projection(projection: Bson): FindObservable[TResult] = { + wrapped.projection(projection) + this + } + + /** + * Sets the sort criteria to apply to the query. + * + * [[https://www.mongodb.com/docs/manual/reference/method/cursor.sort/ Sort]] + * @param sort the sort criteria, which may be null. + * @return this + */ + def sort(sort: Bson): FindObservable[TResult] = { + wrapped.sort(sort) + this + } + + /** + * The server normally times out idle cursors after an inactivity period (10 minutes) + * to prevent excess memory use. Set this option to prevent that. + * + * @param noCursorTimeout true if cursor timeout is disabled + * @return this + */ + def noCursorTimeout(noCursorTimeout: Boolean): FindObservable[TResult] = { + wrapped.noCursorTimeout(noCursorTimeout) + this + } + + /** + * Get partial results from a sharded cluster if one or more shards are unreachable (instead of throwing an error). + * + * @param partial if partial results for sharded clusters is enabled + * @return this + */ + def partial(partial: Boolean): FindObservable[TResult] = { + wrapped.partial(partial) + this + } + + /** + * Sets the cursor type. + * + * @param cursorType the cursor type + * @return this + */ + def cursorType(cursorType: CursorType): FindObservable[TResult] = { + wrapped.cursorType(cursorType) + this + } + + /** + * Sets the collation options + * + * @param collation the collation options to use + * @return this + * @since 1.2 + * @note A null value represents the server default. + * @note Requires MongoDB 3.4 or greater + */ + def collation(collation: Collation): FindObservable[TResult] = { + wrapped.collation(collation) + this + } + + /** + * Sets the comment to the query. A null value means no comment is set. + * + * @param comment the comment + * @return this + * @since 2.2 + */ + def comment(comment: String): FindObservable[TResult] = { + wrapped.comment(comment) + this + } + + /** + * Sets the comment for this operation. A null value means no comment is set. + * + * @param comment the comment + * @return this + * @since 4.6 + * @note The comment can be any valid BSON type for server versions 4.4 and above. + * Server versions between 3.6 and 4.2 only support + * string as comment, and providing a non-string type will result in a server-side error. + */ + def comment(comment: BsonValue): FindObservable[TResult] = { + wrapped.comment(comment) + this + } + + /** + * Sets the hint for which index to use. A null value means no hint is set. + * + * @param hint the hint + * @return this + * @since 2.2 + */ + def hint(hint: Bson): FindObservable[TResult] = { + wrapped.hint(hint) + this + } + + /** + * Sets the hint for which index to use. A null value means no hint is set. + * + * @param hint the name of the index which should be used for the operation + * @return this + * @note if [[hint]] is set that will be used instead of any hint string. + * @since 2.8 + */ + def hintString(hint: String): FindObservable[TResult] = { + wrapped.hintString(hint) + this + } + + /** + * Add top-level variables to the operation. A null value means no variables are set. + * + * Allows for improved command readability by separating the variables from the query text. + * + * @param let the top-level variables for the find operation or null + * @return this + * @since 4.6 + * @note Requires MongoDB 5.0 or greater + */ + def let(let: Bson): FindObservable[TResult] = { + wrapped.let(let) + this + } + + /** + * Sets the exclusive upper bound for a specific index. A null value means no max is set. + * + * @param max the max + * @return this + * @since 2.2 + */ + def max(max: Bson): FindObservable[TResult] = { + wrapped.max(max) + this + } + + /** + * Sets the minimum inclusive lower bound for a specific index. A null value means no max is set. + * + * @param min the min + * @return this + * @since 2.2 + */ + def min(min: Bson): FindObservable[TResult] = { + wrapped.min(min) + this + } + + /** + * Sets the returnKey. If true the find operation will return only the index keys in the resulting documents. + * + * @param returnKey the returnKey + * @return this + * @since 2.2 + */ + def returnKey(returnKey: Boolean): FindObservable[TResult] = { + wrapped.returnKey(returnKey) + this + } + + /** + * Sets the showRecordId. Set to true to add a field `\$recordId` to the returned documents. + * + * @param showRecordId the showRecordId + * @return this + * @since 2.2 + */ + def showRecordId(showRecordId: Boolean): FindObservable[TResult] = { + wrapped.showRecordId(showRecordId) + this + } + + /** + * Sets the number of documents to return per batch. + * + * @param batchSize the batch size + * @return this + * @since 2.7 + */ + def batchSize(batchSize: Int): FindObservable[TResult] = { + wrapped.batchSize(batchSize) + this + } + + /** + * Enables writing to temporary files on the server. When set to true, the server + * can write temporary data to disk while executing the find operation. + * + *

This option is sent only if the caller explicitly provides a value. The default + * is to not send a value. For servers < 3.2, this option is ignored and not sent + * as allowDiskUse does not exist in the OP_QUERY wire protocol.

+ * + * @param allowDiskUse the allowDiskUse + * @since 4.1 + * @note Requires MongoDB 4.4 or greater + */ + def allowDiskUse(allowDiskUse: Boolean): FindObservable[TResult] = { + wrapped.allowDiskUse(allowDiskUse) + this + } + + /** + * Sets the timeoutMode for the cursor. + * + * Requires the `timeout` to be set, either in the [[MongoClientSettings]], + * via [[MongoDatabase]] or via [[MongoCollection]] + * + * If the `timeout` is set then: + * + * - For non-tailable cursors, the default value of timeoutMode is `TimeoutMode.CURSOR_LIFETIME` + * - For tailable cursors, the default value of timeoutMode is `TimeoutMode.ITERATION` and its an error + * to configure it as: `TimeoutMode.CURSOR_LIFETIME` + * + * @param timeoutMode the timeout mode + * @return this + * @since 5.2 + */ + @Alpha(Array(Reason.CLIENT)) + def timeoutMode(timeoutMode: TimeoutMode): FindObservable[TResult] = { + wrapped.timeoutMode(timeoutMode) + this + } + + /** + * Explain the execution plan for this operation with the server's default verbosity level + * + * @tparam ExplainResult The type of the result + * @return the execution plan + * @since 4.2 + * @note Requires MongoDB 3.2 or greater + */ + def explain[ExplainResult]()( + implicit e: ExplainResult DefaultsTo Document, + ct: ClassTag[ExplainResult] + ): SingleObservable[ExplainResult] = + wrapped.explain[ExplainResult](ct) + + /** + * Explain the execution plan for this operation with the given verbosity level + * + * @tparam ExplainResult The type of the result + * @param verbosity the verbosity of the explanation + * @return the execution plan + * @since 4.2 + * @note Requires MongoDB 3.2 or greater + */ + def explain[ExplainResult]( + verbosity: ExplainVerbosity + )(implicit e: ExplainResult DefaultsTo Document, ct: ClassTag[ExplainResult]): SingleObservable[ExplainResult] = + wrapped.explain[ExplainResult](ct, verbosity) + + override def subscribe(observer: Observer[_ >: TResult]): Unit = wrapped.subscribe(observer) +} diff --git a/driver-scala/src/main/scala/org/mongodb/scala/Helpers.scala b/driver-scala/src/main/scala/org/mongodb/scala/Helpers.scala new file mode 100644 index 00000000000..46389865350 --- /dev/null +++ b/driver-scala/src/main/scala/org/mongodb/scala/Helpers.scala @@ -0,0 +1,45 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala + +import java.util.concurrent.Executor + +import scala.concurrent.ExecutionContext +import scala.language.implicitConversions +import scala.reflect.ClassTag + +/** + * Custom helpers for the client + */ +private[scala] object Helpers { + + /** + * Helper to get the class from a classTag + * + * @param ct the classTag we want to implicitly get the class of + * @tparam C the class type + * @return the classOf[C] + */ + implicit def classTagToClassOf[C](ct: ClassTag[C]): Class[C] = ct.runtimeClass.asInstanceOf[Class[C]] + + /** + * Direct Execution Context uses the current context to run the command in. + */ + final val DirectExecutionContext: ExecutionContext = ExecutionContext.fromExecutor(new Executor { + override def execute(command: Runnable): Unit = command.run() + }) +} diff --git a/driver-scala/src/main/scala/org/mongodb/scala/ListCollectionNamesObservable.scala b/driver-scala/src/main/scala/org/mongodb/scala/ListCollectionNamesObservable.scala new file mode 100644 index 00000000000..50b970eec62 --- /dev/null +++ b/driver-scala/src/main/scala/org/mongodb/scala/ListCollectionNamesObservable.scala @@ -0,0 +1,116 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala + +import com.mongodb.reactivestreams.client.ListCollectionNamesPublisher +import org.mongodb.scala.bson.BsonValue +import org.mongodb.scala.bson.conversions.Bson + +import java.util.concurrent.TimeUnit +import scala.concurrent.duration.Duration + +/** + * Observable for listing collection names. + * + * @param wrapped the underlying java ListCollectionNamesPublisher + * @since 5.0 + */ +case class ListCollectionNamesObservable(wrapped: ListCollectionNamesPublisher) extends Observable[String] { + + /** + * Sets the query filter to apply to the query. + * + * [[https://www.mongodb.com/docs/manual/reference/method/db.collection.find/ Filter]] + * @param filter the filter, which may be null. + * @return this + */ + def filter(filter: Bson): ListCollectionNamesObservable = { + wrapped.filter(filter) + this + } + + /** + * Sets the maximum execution time on the server for this operation. + * + * [[https://www.mongodb.com/docs/manual/reference/operator/meta/maxTimeMS/ Max Time]] + * @param duration the duration + * @return this + */ + def maxTime(duration: Duration): ListCollectionNamesObservable = { + wrapped.maxTime(duration.toMillis, TimeUnit.MILLISECONDS) + this + } + + /** + * Sets the number of documents to return per batch. + * + * @param batchSize the batch size + * @return this + */ + def batchSize(batchSize: Int): ListCollectionNamesObservable = { + wrapped.batchSize(batchSize) + this + } + + /** + * Sets the comment for this operation. A null value means no comment is set. + * + * @param comment the comment + * @return this + * @note Requires MongoDB 4.4 or greater + */ + def comment(comment: String): ListCollectionNamesObservable = { + wrapped.comment(comment) + this + } + + /** + * Sets the comment for this operation. A null value means no comment is set. + * + * @param comment the comment + * @return this + * @note Requires MongoDB 4.4 or greater + */ + def comment(comment: BsonValue): ListCollectionNamesObservable = { + wrapped.comment(comment) + this + } + + /** + * Sets the `authorizedCollections` field of the `istCollections` command. + * + * @param authorizedCollections If `true`, allows executing the `listCollections` command, + * which has the `nameOnly` field set to `true`, without having the + * + * `listCollections` privilege on the database resource. + * @return `this`. + * @note Requires MongoDB 4.0 or greater + */ + def authorizedCollections(authorizedCollections: Boolean): ListCollectionNamesObservable = { + wrapped.authorizedCollections(authorizedCollections) + this + } + + /** + * Helper to return a single observable limited to the first result. + * + * @return a single observable which will the first result. + */ + def first(): SingleObservable[String] = wrapped.first() + + override def subscribe(observer: Observer[_ >: String]): Unit = wrapped.subscribe(observer) +} diff --git a/driver-scala/src/main/scala/org/mongodb/scala/ListCollectionsObservable.scala b/driver-scala/src/main/scala/org/mongodb/scala/ListCollectionsObservable.scala new file mode 100644 index 00000000000..3e34de87dfe --- /dev/null +++ b/driver-scala/src/main/scala/org/mongodb/scala/ListCollectionsObservable.scala @@ -0,0 +1,124 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala + +import com.mongodb.annotations.{ Alpha, Reason } + +import java.util.concurrent.TimeUnit +import com.mongodb.reactivestreams.client.ListCollectionsPublisher +import org.mongodb.scala.bson.BsonValue +import org.mongodb.scala.bson.conversions.Bson + +import scala.concurrent.duration.Duration + +/** + * Observable interface for ListCollections + * + * @param wrapped the underlying java ListCollectionsPublisher + * @tparam TResult The type of the result. + * @since 1.0 + */ +case class ListCollectionsObservable[TResult](wrapped: ListCollectionsPublisher[TResult]) extends Observable[TResult] { + + /** + * Sets the query filter to apply to the query. + * + * [[https://www.mongodb.com/docs/manual/reference/method/db.collection.find/ Filter]] + * @param filter the filter, which may be null. + * @return this + */ + def filter(filter: Bson): ListCollectionsObservable[TResult] = { + wrapped.filter(filter) + this + } + + /** + * Sets the maximum execution time on the server for this operation. + * + * [[https://www.mongodb.com/docs/manual/reference/operator/meta/maxTimeMS/ Max Time]] + * @param duration the duration + * @return this + */ + def maxTime(duration: Duration): ListCollectionsObservable[TResult] = { + wrapped.maxTime(duration.toMillis, TimeUnit.MILLISECONDS) + this + } + + /** + * Sets the number of documents to return per batch. + * + * @param batchSize the batch size + * @return this + * @since 2.7 + */ + def batchSize(batchSize: Int): ListCollectionsObservable[TResult] = { + wrapped.batchSize(batchSize) + this + } + + /** + * Sets the comment for this operation. A null value means no comment is set. + * + * @param comment the comment + * @return this + * @since 4.6 + * @note Requires MongoDB 4.4 or greater + */ + def comment(comment: String): ListCollectionsObservable[TResult] = { + wrapped.comment(comment) + this + } + + /** + * Sets the comment for this operation. A null value means no comment is set. + * + * @param comment the comment + * @return this + * @since 4.6 + * @note Requires MongoDB 4.4 or greater + */ + def comment(comment: BsonValue): ListCollectionsObservable[TResult] = { + wrapped.comment(comment) + this + } + + /** + * Sets the timeoutMode for the cursor. + * + * Requires the `timeout` to be set, either in the [[MongoClientSettings]], + * via [[MongoDatabase]] or via [[MongoCollection]] + * + * @param timeoutMode the timeout mode + * @return this + * @since 5.2 + */ + @Alpha(Array(Reason.CLIENT)) + def timeoutMode(timeoutMode: TimeoutMode): ListCollectionsObservable[TResult] = { + wrapped.timeoutMode(timeoutMode) + this + } + + /** + * Helper to return a single observable limited to the first result. + * + * @return a single observable which will the first result. + * @since 4.0 + */ + def first(): SingleObservable[TResult] = wrapped.first() + + override def subscribe(observer: Observer[_ >: TResult]): Unit = wrapped.subscribe(observer) +} diff --git a/driver-scala/src/main/scala/org/mongodb/scala/ListDatabasesObservable.scala b/driver-scala/src/main/scala/org/mongodb/scala/ListDatabasesObservable.scala new file mode 100644 index 00000000000..8fd7f41843c --- /dev/null +++ b/driver-scala/src/main/scala/org/mongodb/scala/ListDatabasesObservable.scala @@ -0,0 +1,153 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala + +import com.mongodb.annotations.{ Alpha, Reason } + +import java.util.concurrent.TimeUnit +import com.mongodb.reactivestreams.client.ListDatabasesPublisher +import org.mongodb.scala.bson.BsonValue +import org.mongodb.scala.bson.conversions.Bson + +import scala.concurrent.duration.Duration + +/** + * Observable interface for ListDatabases. + * + * @param wrapped the underlying java ListDatabasesObservable + * @tparam TResult The type of the result. + * @since 1.0 + */ +case class ListDatabasesObservable[TResult](wrapped: ListDatabasesPublisher[TResult]) extends Observable[TResult] { + + /** + * Sets the maximum execution time on the server for this operation. + * + * [[https://www.mongodb.com/docs/manual/reference/operator/meta/maxTimeMS/ Max Time]] + * @param duration the duration + * @return this + */ + def maxTime(duration: Duration): ListDatabasesObservable[TResult] = { + wrapped.maxTime(duration.toMillis, TimeUnit.MILLISECONDS) + this + } + + /** + * Sets the query filter to apply to the returned database names. + * + * @param filter the filter, which may be null. + * @return this + * @since 2.2 + * @note Requires MongoDB 3.4.2 or greater + */ + def filter(filter: Bson): ListDatabasesObservable[TResult] = { + wrapped.filter(filter) + this + } + + /** + * Sets the nameOnly flag that indicates whether the command should return just the database names or return the database names and + * size information. + * + * @param nameOnly the nameOnly flag, which may be null + * @return this + * @since 2.2 + * @note Requires MongoDB 3.4.3 or greater + */ + def nameOnly(nameOnly: Boolean): ListDatabasesObservable[TResult] = { + wrapped.nameOnly(nameOnly) + this + } + + /** + * Sets the authorizedDatabasesOnly flag that indicates whether the command should return just the databases which the user + * is authorized to see. + * + * @param authorizedDatabasesOnly the authorizedDatabasesOnly flag, which may be null + * @return this + * @since 4.1 + * @note Requires MongoDB 4.0.5 or greater + */ + def authorizedDatabasesOnly(authorizedDatabasesOnly: Boolean): ListDatabasesObservable[TResult] = { + wrapped.authorizedDatabasesOnly(authorizedDatabasesOnly) + this + } + + /** + * Sets the number of documents to return per batch. + * + * @param batchSize the batch size + * @return this + * @since 2.7 + */ + def batchSize(batchSize: Int): ListDatabasesObservable[TResult] = { + wrapped.batchSize(batchSize) + this + } + + /** + * Sets the comment for this operation. A null value means no comment is set. + * + * @param comment the comment + * @return this + * @since 4.6 + * @note Requires MongoDB 4.4 or greater + */ + def comment(comment: String): ListDatabasesObservable[TResult] = { + wrapped.comment(comment) + this + } + + /** + * Sets the comment for this operation. A null value means no comment is set. + * + * @param comment the comment + * @return this + * @since 4.6 + * @note Requires MongoDB 4.4 or greater + */ + def comment(comment: BsonValue): ListDatabasesObservable[TResult] = { + wrapped.comment(comment) + this + } + + /** + * Sets the timeoutMode for the cursor. + * + * Requires the `timeout` to be set, either in the [[MongoClientSettings]], + * via [[MongoDatabase]] or via [[MongoCollection]] + * + * @param timeoutMode the timeout mode + * @return this + * @since 5.2 + */ + @Alpha(Array(Reason.CLIENT)) + def timeoutMode(timeoutMode: TimeoutMode): ListDatabasesObservable[TResult] = { + wrapped.timeoutMode(timeoutMode) + this + } + + /** + * Helper to return a single observable limited to the first result. + * + * @return a single observable which will the first result. + * @since 4.0 + */ + def first(): SingleObservable[TResult] = wrapped.first() + + override def subscribe(observer: Observer[_ >: TResult]): Unit = wrapped.subscribe(observer) +} diff --git a/driver-scala/src/main/scala/org/mongodb/scala/ListIndexesObservable.scala b/driver-scala/src/main/scala/org/mongodb/scala/ListIndexesObservable.scala new file mode 100644 index 00000000000..f6ab4c53c10 --- /dev/null +++ b/driver-scala/src/main/scala/org/mongodb/scala/ListIndexesObservable.scala @@ -0,0 +1,111 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala + +import com.mongodb.annotations.{ Alpha, Reason } + +import java.util.concurrent.TimeUnit +import com.mongodb.reactivestreams.client.ListIndexesPublisher +import org.mongodb.scala.bson.BsonValue + +import scala.concurrent.duration.Duration + +/** + * Observable interface for ListIndexes. + * + * @param wrapped the underlying java ListIndexesObservable + * @tparam TResult The type of the result. + * @since 1.0 + */ +case class ListIndexesObservable[TResult](wrapped: ListIndexesPublisher[TResult]) extends Observable[TResult] { + + /** + * Sets the maximum execution time on the server for this operation. + * + * [[https://www.mongodb.com/docs/manual/reference/operator/meta/maxTimeMS/ Max Time]] + * @param duration the duration + * @return this + */ + def maxTime(duration: Duration): ListIndexesObservable[TResult] = { + wrapped.maxTime(duration.toMillis, TimeUnit.MILLISECONDS) + this + } + + /** + * Sets the number of documents to return per batch. + * + * @param batchSize the batch size + * @return this + * @since 2.7 + */ + def batchSize(batchSize: Int): ListIndexesObservable[TResult] = { + wrapped.batchSize(batchSize) + this + } + + /** + * Sets the comment for this operation. A null value means no comment is set. + * + * @param comment the comment + * @return this + * @since 4.6 + * @note Requires MongoDB 4.4 or greater + */ + def comment(comment: String): ListIndexesObservable[TResult] = { + wrapped.comment(comment) + this + } + + /** + * Sets the comment for this operation. A null value means no comment is set. + * + * @param comment the comment + * @return this + * @since 4.6 + * @note Requires MongoDB 4.4 or greater + */ + def comment(comment: BsonValue): ListIndexesObservable[TResult] = { + wrapped.comment(comment) + this + } + + /** + * Sets the timeoutMode for the cursor. + * + * Requires the `timeout` to be set, either in the [[MongoClientSettings]], + * via [[MongoDatabase]] or via [[MongoCollection]] + * + * @param timeoutMode the timeout mode + * @return this + * @since 5.2 + */ + @Alpha(Array(Reason.CLIENT)) + def timeoutMode(timeoutMode: TimeoutMode): ListIndexesObservable[TResult] = { + wrapped.timeoutMode(timeoutMode) + this + } + + /** + * Helper to return a single observable limited to the first result. + * + * @return a single observable which will the first result. + * @since 4.0 + */ + def first(): SingleObservable[TResult] = wrapped.first() + + override def subscribe(observer: Observer[_ >: TResult]): Unit = wrapped.subscribe(observer) +} diff --git a/driver-scala/src/main/scala/org/mongodb/scala/ListSearchIndexesObservable.scala b/driver-scala/src/main/scala/org/mongodb/scala/ListSearchIndexesObservable.scala new file mode 100644 index 00000000000..db7b687c498 --- /dev/null +++ b/driver-scala/src/main/scala/org/mongodb/scala/ListSearchIndexesObservable.scala @@ -0,0 +1,185 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala + +import com.mongodb.ExplainVerbosity +import com.mongodb.annotations.{ Alpha, Reason } +import com.mongodb.reactivestreams.client.ListSearchIndexesPublisher +import org.mongodb.scala.bson.BsonValue +import org.mongodb.scala.bson.DefaultHelper.DefaultsTo +import org.mongodb.scala.model.Collation + +import java.util.concurrent.TimeUnit +import scala.concurrent.duration.Duration +import scala.reflect.ClassTag + +/** + * Observable interface for ListSearchIndexes. + * + * @param wrapped the underlying java ListSearchIndexesPublisher + * @tparam TResult The type of the result. + * @since 4.11 + */ +case class ListSearchIndexesObservable[TResult](wrapped: ListSearchIndexesPublisher[TResult]) + extends Observable[TResult] { + + /** + * Sets an Atlas Search index name for this operation. + * + * @param indexName Atlas Search index name. + * @note Requires MongoDB 6.0 or greater + */ + def name(indexName: String): ListSearchIndexesObservable[TResult] = { + wrapped.name(indexName) + this + } + + /** + * Enables writing to temporary files. A null value indicates that it's unspecified. + * + * @param allowDiskUse true if writing to temporary files is enabled. + * @return this. + * @see [[https://www.mongodb.com/docs/manual/reference/command/aggregate/ Aggregation]] + */ + def allowDiskUse(allowDiskUse: Boolean): ListSearchIndexesObservable[TResult] = { + wrapped.allowDiskUse(allowDiskUse) + this + } + + /** + * Sets the maximum execution time on the server for this operation. + * + * @param duration the duration. + * @return this. + * @see [[https://www.mongodb.com/docs/manual/reference/operator/meta/maxTimeMS/ Max Time]] + */ + def maxTime(duration: Duration): ListSearchIndexesObservable[TResult] = { + wrapped.maxTime(duration.toMillis, TimeUnit.MILLISECONDS) + this + } + + /** + * Sets the collation options + * + * @param collation the collation options to use. + * @return this. + * @since 1.2 + * @note A null value represents the server default. + * @note Requires MongoDB 3.4 or greater. + */ + def collation(collation: Collation): ListSearchIndexesObservable[TResult] = { + wrapped.collation(collation) + this + } + + /** + * Sets the comment for this operation. A null value means no comment is set. + * + * @param comment the comment. + * @return this. + * @since 2.2 + * @note Requires MongoDB 3.6 or greater. + */ + def comment(comment: String): ListSearchIndexesObservable[TResult] = { + wrapped.comment(comment) + this + } + + /** + * Sets the comment for this operation. A null value means no comment is set. + * + * @param comment the comment + * @return this + * @since 4.6 + */ + def comment(comment: BsonValue): ListSearchIndexesObservable[TResult] = { + wrapped.comment(comment) + this + } + + /** + * Sets the number of documents to return per batch. + * + * @param batchSize the batch size. + * @return this. + * @since 2.7 + */ + def batchSize(batchSize: Int): ListSearchIndexesObservable[TResult] = { + wrapped.batchSize(batchSize) + this + } + + /** + * Sets the timeoutMode for the cursor. + * + * Requires the `timeout` to be set, either in the [[MongoClientSettings]], + * via [[MongoDatabase]] or via [[MongoCollection]] + * + * If the `timeout` is set then: + * + * - For non-tailable cursors, the default value of timeoutMode is `TimeoutMode.CURSOR_LIFETIME` + * - For tailable cursors, the default value of timeoutMode is `TimeoutMode.ITERATION` and its an error + * to configure it as: `TimeoutMode.CURSOR_LIFETIME` + * + * @param timeoutMode the timeout mode + * @return this + * @since 5.2 + */ + @Alpha(Array(Reason.CLIENT)) + def timeoutMode(timeoutMode: TimeoutMode): ListSearchIndexesObservable[TResult] = { + wrapped.timeoutMode(timeoutMode) + this + } + + /** + * Helper to return a single observable limited to the first result. + * + * @return a single observable which will the first result. + * @since 4.0 + */ + def first(): SingleObservable[TResult] = wrapped.first() + + /** + * Explain the execution plan for this operation with the server's default verbosity level. + * + * @tparam ExplainResult The type of the result. + * @return the execution plan. + * @since 4.2 + * @note Requires MongoDB 3.6 or greater. + */ + def explain[ExplainResult]()( + implicit e: ExplainResult DefaultsTo Document, + ct: ClassTag[ExplainResult] + ): SingleObservable[ExplainResult] = + wrapped.explain[ExplainResult](ct) + + /** + * Explain the execution plan for this operation with the given verbosity level. + * + * @tparam ExplainResult The type of the result. + * @param verbosity the verbosity of the explanation. + * @return the execution plan. + * @since 4.2 + * @note Requires MongoDB 3.6 or greater. + */ + def explain[ExplainResult]( + verbosity: ExplainVerbosity + )(implicit e: ExplainResult DefaultsTo Document, ct: ClassTag[ExplainResult]): SingleObservable[ExplainResult] = + wrapped.explain[ExplainResult](ct, verbosity) + + override def subscribe(observer: Observer[_ >: TResult]): Unit = wrapped.subscribe(observer) +} diff --git a/driver-scala/src/main/scala/org/mongodb/scala/LoggerSettings.scala b/driver-scala/src/main/scala/org/mongodb/scala/LoggerSettings.scala new file mode 100644 index 00000000000..93a04bf9cd3 --- /dev/null +++ b/driver-scala/src/main/scala/org/mongodb/scala/LoggerSettings.scala @@ -0,0 +1,40 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala + +import com.mongodb.{ LoggerSettings => JLoggerSettings } + +/** + * Settings for the logger. + * + * @since 4.9 + */ +object LoggerSettings { + + /** + * Get a builder for this class. + * + * @return a new Builder for creating LoggerSettings. + */ + def builder(): Builder = JLoggerSettings.builder() + + /** + * ClusterSettings builder type + */ + type Builder = JLoggerSettings.Builder + +} diff --git a/driver-scala/src/main/scala/org/mongodb/scala/MapReduceObservable.scala b/driver-scala/src/main/scala/org/mongodb/scala/MapReduceObservable.scala new file mode 100644 index 00000000000..a7f08258cd7 --- /dev/null +++ b/driver-scala/src/main/scala/org/mongodb/scala/MapReduceObservable.scala @@ -0,0 +1,275 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala + +import com.mongodb.annotations.{ Alpha, Reason } + +import java.util.concurrent.TimeUnit +import com.mongodb.client.model.MapReduceAction +import com.mongodb.reactivestreams.client.MapReducePublisher +import org.mongodb.scala.bson.conversions.Bson +import org.mongodb.scala.model.Collation +import org.reactivestreams.Subscriber + +import scala.concurrent.duration.Duration + +/** + * Observable for map reduce. + * + * By default, the [[MapReduceObservable]] produces the results inline. You can write map-reduce output to a collection by using the + * [[collectionName]] and [[toCollection]] methods. + * + * @define docsRef https://www.mongodb.com/docs/manual/reference + * + * @tparam TResult The type of the result. + * @since 1.0 + */ +@deprecated("Superseded by aggregate", "4.4.0") +case class MapReduceObservable[TResult](wrapped: MapReducePublisher[TResult]) extends Observable[TResult] { + + /** + * Sets the collectionName for the output of the MapReduce + * + *

The default action is replace the collection if it exists, to change this use [[action]].

+ * + * @param collectionName the name of the collection that you want the map-reduce operation to write its output. + * @return this + * @see [[toCollection]] + */ + def collectionName(collectionName: String): MapReduceObservable[TResult] = { + wrapped.collectionName(collectionName) + this + } + + /** + * Sets the JavaScript function that follows the reduce method and modifies the output. + * + * [[https://www.mongodb.com/docs/manual/reference/command/mapReduce#mapreduce-finalize-cmd Requirements for the finalize Function]] + * @param finalizeFunction the JavaScript function that follows the reduce method and modifies the output. + * @return this + */ + def finalizeFunction(finalizeFunction: String): MapReduceObservable[TResult] = { + wrapped.finalizeFunction(finalizeFunction) + this + } + + /** + * Sets the global variables that are accessible in the map, reduce and finalize functions. + * + * [[https://www.mongodb.com/docs/manual/reference/command/mapReduce mapReduce]] + * @param scope the global variables that are accessible in the map, reduce and finalize functions. + * @return this + */ + def scope(scope: Bson): MapReduceObservable[TResult] = { + wrapped.scope(scope) + this + } + + /** + * Sets the sort criteria to apply to the query. + * + * [[https://www.mongodb.com/docs/manual/reference/method/cursor.sort/ Sort]] + * @param sort the sort criteria, which may be null. + * @return this + */ + def sort(sort: Bson): MapReduceObservable[TResult] = { + wrapped.sort(sort) + this + } + + /** + * Sets the query filter to apply to the query. + * + * [[https://www.mongodb.com/docs/manual/reference/method/db.collection.find/ Filter]] + * @param filter the filter to apply to the query. + * @return this + */ + def filter(filter: Bson): MapReduceObservable[TResult] = { + wrapped.filter(filter) + this + } + + /** + * Sets the limit to apply. + * + * [[https://www.mongodb.com/docs/manual/reference/method/cursor.limit/#cursor.limit Limit]] + * @param limit the limit, which may be null + * @return this + */ + def limit(limit: Int): MapReduceObservable[TResult] = { + wrapped.limit(limit) + this + } + + /** + * Sets the flag that specifies whether to convert intermediate data into BSON format between the execution of the map and reduce + * functions. Defaults to false. + * + * [[https://www.mongodb.com/docs/manual/reference/command/mapReduce mapReduce]] + * @param jsMode the flag that specifies whether to convert intermediate data into BSON format between the execution of the map and + * reduce functions + * @return jsMode + */ + def jsMode(jsMode: Boolean): MapReduceObservable[TResult] = { + wrapped.jsMode(jsMode) + this + } + + /** + * Sets whether to include the timing information in the result information. + * + * @param verbose whether to include the timing information in the result information. + * @return this + */ + def verbose(verbose: Boolean): MapReduceObservable[TResult] = { + wrapped.verbose(verbose) + this + } + + /** + * Sets the maximum execution time on the server for this operation. + * + * [[https://www.mongodb.com/docs/manual/reference/operator/meta/maxTimeMS/ Max Time]] + * @param duration the duration + * @return this + */ + def maxTime(duration: Duration): MapReduceObservable[TResult] = { + wrapped.maxTime(duration.toMillis, TimeUnit.MILLISECONDS) + this + } + + /** + * Specify the `MapReduceAction` to be used when writing to a collection. + * + * @param action an [[model.MapReduceAction]] to perform on the collection + * @return this + */ + def action(action: MapReduceAction): MapReduceObservable[TResult] = { + wrapped.action(action) + this + } + + /** + * Sets the name of the database to output into. + * + * [[https://www.mongodb.com/docs/manual/reference/command/mapReduce#output-to-a-collection-with-an-action output with an action]] + * @param databaseName the name of the database to output into. + * @return this + */ + def databaseName(databaseName: String): MapReduceObservable[TResult] = { + wrapped.databaseName(databaseName) + this + } + + /** + * Sets the bypass document level validation flag. + * + * '''Note:''': This only applies when an `\$out` stage is specified. + * + * [[https://www.mongodb.com/docs/manual/reference/command/mapReduce#output-to-a-collection-with-an-action output with an action]] + * + * @note Requires MongoDB 3.2 or greater + * @param bypassDocumentValidation If true, allows the write to opt-out of document level validation. + * @return this + * @since 1.1 + */ + def bypassDocumentValidation(bypassDocumentValidation: Boolean): MapReduceObservable[TResult] = { + wrapped.bypassDocumentValidation(bypassDocumentValidation) + this + } + + /** + * Sets the collation options + * + * @param collation the collation options to use + * @return this + * @since 1.2 + * @note A null value represents the server default. + * @note Requires MongoDB 3.4 or greater + */ + def collation(collation: Collation): MapReduceObservable[TResult] = { + wrapped.collation(collation) + this + } + + /** + * Sets the number of documents to return per batch. + * + * @param batchSize the batch size + * @return this + * @since 2.7 + */ + def batchSize(batchSize: Int): MapReduceObservable[TResult] = { + wrapped.batchSize(batchSize) + this + } + + /** + * Aggregates documents to a collection according to the specified map-reduce function with the given options, which must not produce + * results inline. Calling this method and then subscribing to the returned [[SingleObservable]] is the preferred alternative to + * subscribing to this [[MapReduceObservable]], + * because this method does what is explicitly requested without executing implicit operations. + * + * @return an Observable that indicates when the operation has completed + * [[https://www.mongodb.com/docs/manual/aggregation/ Aggregation]] + * @throws java.lang.IllegalStateException if a collection name to write the results to has not been specified + * @see [[collectionName]] + */ + def toCollection(): SingleObservable[Unit] = wrapped.toCollection() + + /** + * Sets the timeoutMode for the cursor. + * + * Requires the `timeout` to be set, either in the [[MongoClientSettings]], + * via [[MongoDatabase]] or via [[MongoCollection]] + * + * @param timeoutMode the timeout mode + * @return this + * @since 5.2 + */ + @Alpha(Array(Reason.CLIENT)) + def timeoutMode(timeoutMode: TimeoutMode): MapReduceObservable[TResult] = { + wrapped.timeoutMode(timeoutMode) + this + } + + /** + * Helper to return a single observable limited to the first result. + * + * @return a single observable which will the first result. + * @since 4.0 + */ + def first(): SingleObservable[TResult] = wrapped.first() + + /** + * Requests [[MapReduceObservable]] to start streaming data according to the specified map-reduce function with the given options. + * + * - If the aggregation produces results inline, then finds all documents in the + * affected namespace and produces them. You may want to use [[toCollection]] instead. + * - Otherwise, produces no elements. + */ + override def subscribe(observer: Observer[_ >: TResult]): Unit = wrapped.subscribe(observer) + + /** + * Requests [[MapReduceObservable]] to start streaming data according to the specified map-reduce function with the given options. + * + * - If the aggregation produces results inline, then finds all documents in the + * affected namespace and produces them. You may want to use [[toCollection]] instead. + * - Otherwise, produces no elements. + */ + override def subscribe(observer: Subscriber[_ >: TResult]): Unit = wrapped.subscribe(observer) +} diff --git a/driver-scala/src/main/scala/org/mongodb/scala/MongoClient.scala b/driver-scala/src/main/scala/org/mongodb/scala/MongoClient.scala new file mode 100644 index 00000000000..f2c8e4a74cb --- /dev/null +++ b/driver-scala/src/main/scala/org/mongodb/scala/MongoClient.scala @@ -0,0 +1,151 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala + +import com.mongodb.connection.ClusterDescription +import com.mongodb.reactivestreams.client.{ MongoClient => JMongoClient, MongoClients } +import org.bson.codecs.configuration.CodecRegistries.{ fromProviders, fromRegistries } +import org.bson.codecs.configuration.CodecRegistry +import org.mongodb.scala.bson.codecs.{ DocumentCodecProvider, IterableCodecProvider } + +import java.io.Closeable + +/** + * Companion object for creating new [[MongoClient]] instances + * + * @since 1.0 + */ +object MongoClient { + + /** + * Create a default MongoClient at localhost:27017 + * + * @return MongoClient + */ + def apply(): MongoClient = apply("mongodb://localhost:27017") + + /** + * Create a MongoClient instance from a connection string uri + * + * @param uri the connection string + * @return MongoClient + */ + def apply(uri: String): MongoClient = MongoClient(uri, None) + + /** + * Create a MongoClient instance from a connection string uri + * + * @param uri the connection string + * @param mongoDriverInformation any driver information to associate with the MongoClient + * @return MongoClient + * @note the `mongoDriverInformation` is intended for driver and library authors to associate extra driver metadata with the connections. + */ + def apply(uri: String, mongoDriverInformation: Option[MongoDriverInformation]): MongoClient = { + apply( + MongoClientSettings + .builder() + .applyConnectionString(new ConnectionString(uri)) + .codecRegistry(DEFAULT_CODEC_REGISTRY) + .build(), + mongoDriverInformation + ) + } + + /** + * Create a MongoClient instance from the MongoClientSettings + * + * @param clientSettings MongoClientSettings to use for the MongoClient + * @return MongoClient + * @since 2.3 + */ + def apply(clientSettings: MongoClientSettings): MongoClient = MongoClient(clientSettings, None) + + /** + * Create a MongoClient instance from the MongoClientSettings + * + * @param clientSettings MongoClientSettings to use for the MongoClient + * @param mongoDriverInformation any driver information to associate with the MongoClient + * @return MongoClient + * @note the `mongoDriverInformation` is intended for driver and library authors to associate extra driver metadata with the connections. + * @since 2.3 + */ + def apply( + clientSettings: MongoClientSettings, + mongoDriverInformation: Option[MongoDriverInformation] + ): MongoClient = { + val builder = mongoDriverInformation match { + case Some(info) => MongoDriverInformation.builder(info) + case None => MongoDriverInformation.builder() + } + builder.driverName("scala").driverPlatform(s"Scala/${scala.util.Properties.versionString}") + MongoClient(MongoClients.create(clientSettings, builder.build())) + } + + val DEFAULT_CODEC_REGISTRY: CodecRegistry = fromRegistries( + fromProviders(DocumentCodecProvider(), IterableCodecProvider()), + com.mongodb.MongoClientSettings.getDefaultCodecRegistry + ) +} + +/** + * A client-side representation of a MongoDB cluster. Instances can represent either a standalone MongoDB instance, a replica set, + * or a sharded cluster. Instance of this class are responsible for maintaining an up-to-date state of the cluster, + * and possibly cache resources related to this, including background threads for monitoring, and connection pools. + * + * Instance of this class server as factories for [[MongoDatabase]] instances. + * + * @param wrapped the underlying java MongoClient + * @since 1.0 + */ +case class MongoClient(protected[scala] val wrapped: JMongoClient) extends MongoCluster(wrapped) with Closeable { + + /** + * Close the client, which will close all underlying cached resources, including, for example, + * sockets and background monitoring threads. + */ + def close(): Unit = wrapped.close() + + /** + * Gets the current cluster description. + * + * This method will not block, meaning that it may return a `ClusterDescription` whose `clusterType` is unknown + * and whose { @link com.mongodb.connection.ServerDescription}s are all in the connecting state. If the application requires + * notifications after the driver has connected to a member of the cluster, it should register a `ClusterListener` via + * the `ClusterSettings` in `MongoClientSettings`. + * + * @return the current cluster description + * @since 4.1 + */ + def getClusterDescription: ClusterDescription = + wrapped.getClusterDescription + + /** + * Appends the provided [[MongoDriverInformation]] to the existing metadata. + * + * + * This enables frameworks and libraries to include identifying metadata (e.g., name, version, platform) which might be visible in + * the MongoD/MongoS logs. This can assist with diagnostics by making client identity visible to the server. + * + * + * **Note:** Metadata is limited to 512 bytes; any excess will be truncated. + * + * @param mongoDriverInformation the driver information to append to the existing metadata + * @since 5.6 + */ + def appendMetadata(mongoDriverInformation: MongoDriverInformation): Unit = + wrapped.appendMetadata(mongoDriverInformation) +} diff --git a/driver-scala/src/main/scala/org/mongodb/scala/MongoClientSettings.scala b/driver-scala/src/main/scala/org/mongodb/scala/MongoClientSettings.scala new file mode 100644 index 00000000000..2c71a3d7382 --- /dev/null +++ b/driver-scala/src/main/scala/org/mongodb/scala/MongoClientSettings.scala @@ -0,0 +1,54 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala + +import com.mongodb.reactivestreams.client.{ MongoClients => JMongoClients } +import com.mongodb.{ MongoClientSettings => JMongoClientSettings } + +/** + * A MongoClientSettings companion object + * + * @since 1.0 + */ +object MongoClientSettings { + + /** + * Creates a the default builder + * @return a MongoClientSettings builder + */ + def builder(): Builder = JMongoClientSettings.builder().codecRegistry(MongoClient.DEFAULT_CODEC_REGISTRY) + + /** + * Creates a builder from an existing `MongoClientSettings`. + * + * @param settings the settings to create the builder from + * @return a MongoClientSettings builder + */ + def builder(settings: MongoClientSettings): Builder = { + val builder = JMongoClientSettings.builder(settings) + if (settings.getCodecRegistry == JMongoClients.getDefaultCodecRegistry) { + builder.codecRegistry(MongoClient.DEFAULT_CODEC_REGISTRY) + } + builder + } + + /** + * MongoClientSettings builder type + */ + type Builder = JMongoClientSettings.Builder + +} diff --git a/driver-scala/src/main/scala/org/mongodb/scala/MongoCluster.scala b/driver-scala/src/main/scala/org/mongodb/scala/MongoCluster.scala new file mode 100644 index 00000000000..9e5a75e2d7b --- /dev/null +++ b/driver-scala/src/main/scala/org/mongodb/scala/MongoCluster.scala @@ -0,0 +1,408 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala + +import com.mongodb.annotations.{ Alpha, Reason } +import com.mongodb.{ ReadConcern, ReadPreference, WriteConcern } +import com.mongodb.reactivestreams.client.{ MongoCluster => JMongoCluster } +import org.bson.codecs.configuration.CodecRegistry +import org.mongodb.scala.bson.DefaultHelper.DefaultsTo +import org.mongodb.scala.bson.conversions.Bson +import org.mongodb.scala.model.bulk.{ ClientBulkWriteOptions, ClientBulkWriteResult, ClientNamespacedWriteModel } + +import scala.collection.JavaConverters._ +import scala.concurrent.duration.{ Duration, MILLISECONDS } +import scala.reflect.ClassTag + +/** + * Companion object for creating new [[MongoCluster]] instances + * + * @since 1.0 + */ +object MongoCluster { + + /** + * Create a new `MongoCluster` wrapper + * + * @param wrapped the java `MongoCluster` instance + * @return MongoCluster + */ + def apply(wrapped: JMongoCluster): MongoCluster = new MongoCluster(wrapped) +} + +/** + * The client-side representation of a MongoDB cluster operations. + * + * The originating [[MongoClient]] is responsible for the closing of resources. + * If the originator [[MongoClient]] is closed, then any operations will fail. + * + * @see MongoClient + * @since 5.2 + */ +class MongoCluster(private val wrapped: JMongoCluster) { + + /** + * Get the codec registry for the MongoDatabase. + * + * @return the { @link org.bson.codecs.configuration.CodecRegistry} + */ + lazy val codecRegistry: CodecRegistry = wrapped.getCodecRegistry + + /** + * Get the read preference for the MongoDatabase. + * + * @return the { @link com.mongodb.ReadPreference} + */ + lazy val readPreference: ReadPreference = wrapped.getReadPreference + + /** + * Get the write concern for the MongoDatabase. + * + * @return the { @link com.mongodb.WriteConcern} + */ + lazy val writeConcern: WriteConcern = wrapped.getWriteConcern + + /** + * Get the read concern for the MongoDatabase. + * + * @return the [[ReadConcern]] + */ + lazy val readConcern: ReadConcern = wrapped.getReadConcern + + /** + * The time limit for the full execution of an operation. + * + * If not null the following deprecated options will be ignored: `waitQueueTimeoutMS`, `socketTimeoutMS`, + * `wTimeoutMS`, `maxTimeMS` and `maxCommitTimeMS`. + * + * - `null` means that the timeout mechanism for operations will defer to using: + * - `waitQueueTimeoutMS`: The maximum wait time in milliseconds that a thread may wait for a connection to become available + * - `socketTimeoutMS`: How long a send or receive on a socket can take before timing out. + * - `wTimeoutMS`: How long the server will wait for the write concern to be fulfilled before timing out. + * - `maxTimeMS`: The time limit for processing operations on a cursor. + * See: [cursor.maxTimeMS](https://docs.mongodb.com/manual/reference/method/cursor.maxTimeMS"). + * - `maxCommitTimeMS`: The maximum amount of time to allow a single `commitTransaction` command to execute. + * - `0` means infinite timeout. + * - `> 0` The time limit to use for the full execution of an operation. + * + * @return the optional timeout duration + */ + @Alpha(Array(Reason.CLIENT)) + lazy val timeout: Option[Duration] = + Option.apply(wrapped.getTimeout(MILLISECONDS)).map(t => Duration(t, MILLISECONDS)) + + /** + * Create a new MongoCluster instance with a different codec registry. + * + * The { @link CodecRegistry} configured by this method is effectively treated by the driver as an + * instance of { @link CodecProvider}, which { @link CodecRegistry} extends. + * So there is no benefit to defining a class that implements { @link CodecRegistry}. Rather, an + * application should always create { @link CodecRegistry} instances using the factory methods in + * { @link CodecRegistries}. + * + * @param codecRegistry the new { @link org.bson.codecs.configuration.CodecRegistry} for the collection + * @return a new MongoCluster instance with the different codec registry + * @see CodecRegistries + */ + def withCodecRegistry(codecRegistry: CodecRegistry): MongoCluster = + MongoCluster(wrapped.withCodecRegistry(codecRegistry)) + + /** + * Create a new MongoCluster instance with a different read preference. + * + * @param readPreference the new { @link com.mongodb.ReadPreference} for the collection + * @return a new MongoCluster instance with the different readPreference + */ + def withReadPreference(readPreference: ReadPreference): MongoCluster = + MongoCluster(wrapped.withReadPreference(readPreference)) + + /** + * Create a new MongoCluster instance with a different write concern. + * + * @param writeConcern the new { @link com.mongodb.WriteConcern} for the collection + * @return a new MongoCluster instance with the different writeConcern + */ + def withWriteConcern(writeConcern: WriteConcern): MongoCluster = + MongoCluster(wrapped.withWriteConcern(writeConcern)) + + /** + * Create a new MongoCluster instance with a different read concern. + * + * @param readConcern the new [[ReadConcern]] for the collection + * @return a new MongoCluster instance with the different ReadConcern + * @since 1.1 + */ + def withReadConcern(readConcern: ReadConcern): MongoCluster = + MongoCluster(wrapped.withReadConcern(readConcern)) + + /** + * Create a new MongoCluster instance with the set time limit for the full execution of an operation. + * + * - `0` means infinite timeout. + * - `> 0` The time limit to use for the full execution of an operation. + * + * @param timeout the timeout, which must be greater than or equal to 0 + * @return a new MongoCluster instance with the set time limit for operations + * @since 5.2 + */ + @Alpha(Array(Reason.CLIENT)) + def withTimeout(timeout: Duration): MongoCluster = + MongoCluster(wrapped.withTimeout(timeout.toMillis, MILLISECONDS)) + + /** + * Creates a client session. + * + * '''Note:''' A ClientSession instance can not be used concurrently in multiple asynchronous operations. + * + * @since 2.4 + * @note Requires MongoDB 3.6 or greater + */ + def startSession(): SingleObservable[ClientSession] = wrapped.startSession() + + /** + * Creates a client session. + * + * '''Note:''' A ClientSession instance can not be used concurrently in multiple asynchronous operations. + * + * @param options the options for the client session + * @since 2.2 + * @note Requires MongoDB 3.6 or greater + */ + def startSession(options: ClientSessionOptions): SingleObservable[ClientSession] = wrapped.startSession(options) + + /** + * Gets the database with the given name. + * + * @param name the name of the database + * @return the database + */ + def getDatabase(name: String): MongoDatabase = MongoDatabase(wrapped.getDatabase(name)) + + /** + * Get a list of the database names + * + * [[https://www.mongodb.com/docs/manual/reference/commands/listDatabases List Databases]] + * @return an iterable containing all the names of all the databases + */ + def listDatabaseNames(): Observable[String] = wrapped.listDatabaseNames() + + /** + * Get a list of the database names + * + * [[https://www.mongodb.com/docs/manual/reference/commands/listDatabases List Databases]] + * + * @param clientSession the client session with which to associate this operation + * @return an iterable containing all the names of all the databases + * @since 2.2 + * @note Requires MongoDB 3.6 or greater + */ + def listDatabaseNames(clientSession: ClientSession): Observable[String] = wrapped.listDatabaseNames(clientSession) + + /** + * Gets the list of databases + * + * @tparam TResult the type of the class to use instead of `Document`. + * @return the fluent list databases interface + */ + def listDatabases[TResult]()( + implicit e: TResult DefaultsTo Document, + ct: ClassTag[TResult] + ): ListDatabasesObservable[TResult] = + ListDatabasesObservable(wrapped.listDatabases(ct)) + + /** + * Gets the list of databases + * + * @param clientSession the client session with which to associate this operation + * @tparam TResult the type of the class to use instead of `Document`. + * @return the fluent list databases interface + * @since 2.2 + * @note Requires MongoDB 3.6 or greater + */ + def listDatabases[TResult]( + clientSession: ClientSession + )(implicit e: TResult DefaultsTo Document, ct: ClassTag[TResult]): ListDatabasesObservable[TResult] = + ListDatabasesObservable(wrapped.listDatabases(clientSession, ct)) + + /** + * Creates a change stream for this collection. + * + * @tparam C the target document type of the observable. + * @return the change stream observable + * @since 2.4 + * @note Requires MongoDB 4.0 or greater + */ + def watch[C]()(implicit e: C DefaultsTo Document, ct: ClassTag[C]): ChangeStreamObservable[C] = + ChangeStreamObservable(wrapped.watch(ct)) + + /** + * Creates a change stream for this collection. + * + * @param pipeline the aggregation pipeline to apply to the change stream + * @tparam C the target document type of the observable. + * @return the change stream observable + * @since 2.4 + * @note Requires MongoDB 4.0 or greater + */ + def watch[C](pipeline: Seq[Bson])(implicit e: C DefaultsTo Document, ct: ClassTag[C]): ChangeStreamObservable[C] = + ChangeStreamObservable(wrapped.watch(pipeline.asJava, ct)) + + /** + * Creates a change stream for this collection. + * + * @param clientSession the client session with which to associate this operation + * @tparam C the target document type of the observable. + * @return the change stream observable + * @since 2.4 + * @note Requires MongoDB 4.0 or greater + */ + def watch[C]( + clientSession: ClientSession + )(implicit e: C DefaultsTo Document, ct: ClassTag[C]): ChangeStreamObservable[C] = + ChangeStreamObservable(wrapped.watch(clientSession, ct)) + + /** + * Creates a change stream for this collection. + * + * @param clientSession the client session with which to associate this operation + * @param pipeline the aggregation pipeline to apply to the change stream + * @tparam C the target document type of the observable. + * @return the change stream observable + * @since 2.4 + * @note Requires MongoDB 4.0 or greater + */ + def watch[C]( + clientSession: ClientSession, + pipeline: Seq[Bson] + )(implicit e: C DefaultsTo Document, ct: ClassTag[C]): ChangeStreamObservable[C] = + ChangeStreamObservable(wrapped.watch(clientSession, pipeline.asJava, ct)) + + /** + * Executes a client-level bulk write operation. + * This method is functionally equivalent to `bulkWrite(List, ClientBulkWriteOptions)` + * with the [[org.mongodb.scala.model.bulk.ClientBulkWriteOptions.clientBulkWriteOptions default options]]. + * + * This operation supports retryable writes. + * Depending on the number of `models`, encoded size of `models`, and the size limits in effect, + * executing this operation may require multiple `bulkWrite` commands. + * The eligibility for retries is determined per each `bulkWrite` command: + * [[org.mongodb.scala.model.bulk.ClientNamespacedUpdateManyModel]], [[org.mongodb.scala.model.bulk.ClientNamespacedDeleteManyModel]] in a command render it non-retryable. + * + * [[https://www.mongodb.com/docs/manual/reference/command/bulkWrite/ bulkWrite]] + * @param models The [[org.mongodb.scala.model.bulk.ClientNamespacedWriteModel]] individual write operations. + * @return The [[SingleObservable]] signalling at most one element [[org.mongodb.scala.model.bulk.ClientBulkWriteResult]] if the operation is successful, + * or the following errors: + * - [[ClientBulkWriteException]]: If and only if the operation is unsuccessful or partially unsuccessful, + * and there is at least one of the following pieces of information to report: + * [[ClientBulkWriteException ClientBulkWriteException#getWriteConcernErrors]], + * [[ClientBulkWriteException ClientBulkWriteException#getWriteErrors]], + * [[ClientBulkWriteException ClientBulkWriteException#getPartialResult]]. + * - [[MongoException]]: Only if the operation is unsuccessful. + * @since 5.4 + * @note Requires MongoDB 8.0 or greater. + */ + def bulkWrite(models: List[_ <: ClientNamespacedWriteModel]): SingleObservable[ClientBulkWriteResult] = + wrapped.bulkWrite(models.asJava) + + /** + * Executes a client-level bulk write operation. + * + * This operation supports retryable writes. + * Depending on the number of `models`, encoded size of `models`, and the size limits in effect, + * executing this operation may require multiple `bulkWrite` commands. + * The eligibility for retries is determined per each `bulkWrite` command: + * [[org.mongodb.scala.model.bulk.ClientNamespacedUpdateManyModel]], [[org.mongodb.scala.model.bulk.ClientNamespacedDeleteManyModel]] in a command render it non-retryable. + * + * [[https://www.mongodb.com/docs/manual/reference/command/bulkWrite/ bulkWrite]] + * @param models The [[org.mongodb.scala.model.bulk.ClientNamespacedWriteModel]] individual write operations. + * @param options The options. + * @return The [[SingleObservable]] signalling at most one element [[org.mongodb.scala.model.bulk.ClientBulkWriteResult]] if the operation is successful, + * or the following errors: + * - [[ClientBulkWriteException]]: If and only if the operation is unsuccessful or partially unsuccessful, + * and there is at least one of the following pieces of information to report: + * [[ClientBulkWriteException ClientBulkWriteException#getWriteConcernErrors]], + * [[ClientBulkWriteException ClientBulkWriteException#getWriteErrors]], + * [[ClientBulkWriteException ClientBulkWriteException#getPartialResult]]. + * - [[MongoException]]: Only if the operation is unsuccessful. + * @since 5.4 + * @note Requires MongoDB 8.0 or greater. + */ + def bulkWrite( + models: List[_ <: ClientNamespacedWriteModel], + options: ClientBulkWriteOptions + ): SingleObservable[ClientBulkWriteResult] = wrapped.bulkWrite(models.asJava, options) + + /** + * Executes a client-level bulk write operation. + * This method is functionally equivalent to `bulkWrite(ClientSession, List, ClientBulkWriteOptions)` + * with the [[org.mongodb.scala.model.bulk.ClientBulkWriteOptions.clientBulkWriteOptions default options]]. + * + * This operation supports retryable writes. + * Depending on the number of `models`, encoded size of `models`, and the size limits in effect, + * executing this operation may require multiple `bulkWrite` commands. + * The eligibility for retries is determined per each `bulkWrite` command: + * [[org.mongodb.scala.model.bulk.ClientNamespacedUpdateManyModel]], [[org.mongodb.scala.model.bulk.ClientNamespacedDeleteManyModel]] in a command render it non-retryable. + * + * [[https://www.mongodb.com/docs/manual/reference/command/bulkWrite/ bulkWrite]] + * @param clientSession [[ClientSession client session]] with which to associate this operation. + * @param models The [[org.mongodb.scala.model.bulk.ClientNamespacedWriteModel]] individual write operations. + * @return The [[SingleObservable]] signalling at most one element [[org.mongodb.scala.model.bulk.ClientBulkWriteResult]] if the operation is successful, + * or the following errors: + * - [[ClientBulkWriteException]]: If and only if the operation is unsuccessful or partially unsuccessful, + * and there is at least one of the following pieces of information to report: + * [[ClientBulkWriteException ClientBulkWriteException#getWriteConcernErrors]], + * [[ClientBulkWriteException ClientBulkWriteException#getWriteErrors]], + * [[ClientBulkWriteException ClientBulkWriteException#getPartialResult]]. + * - [[MongoException]]: Only if the operation is unsuccessful. + * @since 5.4 + * @note Requires MongoDB 8.0 or greater. + */ + def bulkWrite( + clientSession: ClientSession, + models: List[_ <: ClientNamespacedWriteModel] + ): SingleObservable[ClientBulkWriteResult] = wrapped.bulkWrite(clientSession, models.asJava) + + /** + * Executes a client-level bulk write operation. + * + * This operation supports retryable writes. + * Depending on the number of `models`, encoded size of `models`, and the size limits in effect, + * executing this operation may require multiple `bulkWrite` commands. + * The eligibility for retries is determined per each `bulkWrite` command: + * [[org.mongodb.scala.model.bulk.ClientNamespacedUpdateManyModel]], [[org.mongodb.scala.model.bulk.ClientNamespacedDeleteManyModel]] in a command render it non-retryable. + * + * [[https://www.mongodb.com/docs/manual/reference/command/bulkWrite/ bulkWrite]] + * @param clientSession The [[ClientSession client session]] with which to associate this operation. + * @param models The [[org.mongodb.scala.model.bulk.ClientNamespacedWriteModel]] individual write operations. + * @param options The options. + * @return The [[SingleObservable]] signalling at most one element [[org.mongodb.scala.model.bulk.ClientBulkWriteResult]] if the operation is successful, + * or the following errors: + * - [[ClientBulkWriteException]]: If and only if the operation is unsuccessful or partially unsuccessful, + * and there is at least one of the following pieces of information to report: + * [[ClientBulkWriteException ClientBulkWriteException#getWriteConcernErrors]], + * [[ClientBulkWriteException ClientBulkWriteException#getWriteErrors]], + * [[ClientBulkWriteException ClientBulkWriteException#getPartialResult]]. + * - [[MongoException]]: Only if the operation is unsuccessful. + * @since 5.4 + * @note Requires MongoDB 8.0 or greater. + */ + def bulkWrite( + clientSession: ClientSession, + models: List[_ <: ClientNamespacedWriteModel], + options: ClientBulkWriteOptions + ): SingleObservable[ClientBulkWriteResult] = wrapped.bulkWrite(clientSession, models.asJava, options) +} diff --git a/driver-scala/src/main/scala/org/mongodb/scala/MongoCollection.scala b/driver-scala/src/main/scala/org/mongodb/scala/MongoCollection.scala new file mode 100644 index 00000000000..48e09aa7921 --- /dev/null +++ b/driver-scala/src/main/scala/org/mongodb/scala/MongoCollection.scala @@ -0,0 +1,1863 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala + +import com.mongodb.annotations.{ Alpha, Reason } +import com.mongodb.client.model.DropCollectionOptions + +import java.util +import com.mongodb.reactivestreams.client.{ MongoCollection => JMongoCollection } +import org.bson.codecs.configuration.CodecRegistry +import org.mongodb.scala.bson.DefaultHelper.DefaultsTo +import org.mongodb.scala.bson.conversions.Bson +import org.mongodb.scala.model._ +import org.mongodb.scala.result._ + +import scala.collection.JavaConverters._ +import scala.concurrent.duration.{ Duration, MILLISECONDS, TimeUnit } +import scala.reflect.ClassTag + +// scalastyle:off number.of.methods file.size.limit + +/** + * The MongoCollection representation. + * + * @param wrapped the underlying java MongoCollection + * @tparam TResult The type that this collection will encode documents from and decode documents to. + * @since 1.0 + */ +case class MongoCollection[TResult](private val wrapped: JMongoCollection[TResult]) { + + /** + * Gets the namespace of this collection. + * + * @return the namespace + */ + lazy val namespace: MongoNamespace = wrapped.getNamespace + + /** + * Get the default class to cast any documents returned from the database into. + * + * @return the default class to cast any documents into + */ + lazy val documentClass: Class[TResult] = wrapped.getDocumentClass + + /** + * Get the codec registry for the MongoDatabase. + * + * @return the { @link org.bson.codecs.configuration.CodecRegistry} + */ + lazy val codecRegistry: CodecRegistry = wrapped.getCodecRegistry + + /** + * Get the read preference for the MongoDatabase. + * + * @return the { @link com.mongodb.ReadPreference} + */ + lazy val readPreference: ReadPreference = wrapped.getReadPreference + + /** + * Get the write concern for the MongoDatabase. + * + * @return the { @link com.mongodb.WriteConcern} + */ + lazy val writeConcern: WriteConcern = wrapped.getWriteConcern + + /** + * Get the read concern for the MongoDatabase. + * + * @return the [[ReadConcern]] + * @since 1.1 + */ + lazy val readConcern: ReadConcern = wrapped.getReadConcern + + /** + * The time limit for the full execution of an operation. + * + * If not null the following deprecated options will be ignored: `waitQueueTimeoutMS`, `socketTimeoutMS`, + * `wTimeoutMS`, `maxTimeMS` and `maxCommitTimeMS`. + * + * - `null` means that the timeout mechanism for operations will defer to using: + * - `waitQueueTimeoutMS`: The maximum wait time in milliseconds that a thread may wait for a connection to become available + * - `socketTimeoutMS`: How long a send or receive on a socket can take before timing out. + * - `wTimeoutMS`: How long the server will wait for the write concern to be fulfilled before timing out. + * - `maxTimeMS`: The time limit for processing operations on a cursor. + * See: [cursor.maxTimeMS](https://docs.mongodb.com/manual/reference/method/cursor.maxTimeMS"). + * - `maxCommitTimeMS`: The maximum amount of time to allow a single `commitTransaction` command to execute. + * - `0` means infinite timeout. + * - `> 0` The time limit to use for the full execution of an operation. + * + * @return the optional timeout duration + * @since 5.2 + */ + @Alpha(Array(Reason.CLIENT)) + lazy val timeout: Option[Duration] = + Option.apply(wrapped.getTimeout(MILLISECONDS)).map(t => Duration(t, MILLISECONDS)) + + /** + * Create a new MongoCollection instance with a different default class to cast any documents returned from the database into.. + * + * @tparam C The type that the new collection will encode documents from and decode documents to + * @return a new MongoCollection instance with the different default class + */ + def withDocumentClass[C]()(implicit e: C DefaultsTo Document, ct: ClassTag[C]): MongoCollection[C] = + MongoCollection(wrapped.withDocumentClass(ct)) + + /** + * Create a new MongoCollection instance with a different codec registry. + * + * The { @link CodecRegistry} configured by this method is effectively treated by the driver as an + * instance of { @link CodecProvider}, which { @link CodecRegistry} extends. + * So there is no benefit to defining a class that implements { @link CodecRegistry}. Rather, an + * application should always create { @link CodecRegistry} instances using the factory methods in + * { @link CodecRegistries}. + * + * @param codecRegistry the new { @link org.bson.codecs.configuration.CodecRegistry} for the collection + * @return a new MongoCollection instance with the different codec registry + * @see CodecRegistries + */ + def withCodecRegistry(codecRegistry: CodecRegistry): MongoCollection[TResult] = + MongoCollection(wrapped.withCodecRegistry(codecRegistry)) + + /** + * Create a new MongoCollection instance with a different read preference. + * + * @param readPreference the new { @link com.mongodb.ReadPreference} for the collection + * @return a new MongoCollection instance with the different readPreference + */ + def withReadPreference(readPreference: ReadPreference): MongoCollection[TResult] = + MongoCollection(wrapped.withReadPreference(readPreference)) + + /** + * Create a new MongoCollection instance with a different write concern. + * + * @param writeConcern the new { @link com.mongodb.WriteConcern} for the collection + * @return a new MongoCollection instance with the different writeConcern + */ + def withWriteConcern(writeConcern: WriteConcern): MongoCollection[TResult] = + MongoCollection(wrapped.withWriteConcern(writeConcern)) + + /** + * Create a new MongoCollection instance with a different read concern. + * + * @param readConcern the new [[ReadConcern]] for the collection + * @return a new MongoCollection instance with the different ReadConcern + * @since 1.1 + */ + def withReadConcern(readConcern: ReadConcern): MongoCollection[TResult] = + MongoCollection(wrapped.withReadConcern(readConcern)) + + /** + * Sets the time limit for the full execution of an operation. + * + * - `0` means infinite timeout. + * - `> 0` The time limit to use for the full execution of an operation. + * + * @param timeout the timeout, which must be greater than or equal to 0 + * @return a new MongoCollection instance with the set time limit for operations + * @since 5.2 + */ + @Alpha(Array(Reason.CLIENT)) + def withTimeout(timeout: Duration): MongoCollection[TResult] = + MongoCollection(wrapped.withTimeout(timeout.toMillis, MILLISECONDS)) + + /** + * Gets an estimate of the count of documents in a collection using collection metadata. + * + * @return a publisher with a single element indicating the estimated number of documents + * @since 2.4 + * @note this method is implemented using the MongoDB server's count command + */ + def estimatedDocumentCount(): SingleObservable[Long] = wrapped.estimatedDocumentCount() + + /** + * Gets an estimate of the count of documents in a collection using collection metadata. + * + * @param options the options describing the count + * @return a publisher with a single element indicating the estimated number of documents + * @since 2.4 + * @note this method is implemented using the MongoDB server's count command + */ + def estimatedDocumentCount(options: EstimatedDocumentCountOptions): SingleObservable[Long] = + wrapped.estimatedDocumentCount(options) + + /** + * Counts the number of documents in the collection. + * + * '''Note:''' + * For a fast count of the total documents in a collection see [[estimatedDocumentCount()*]] + * When migrating from `count()` to `countDocuments()` the following query operators must be replaced: + * + * {{{ + * +-------------+----------------------------------------+ + * | Operator | Replacement | + * +=============+========================================+ + * | `\$where` | `\$expr` | + * +-------------+----------------------------------------+ + * | `\$near` | `\$geoWithin` with `\$center` | + * +-------------+----------------------------------------+ + * | `\$nearSphere`| `\$geoWithin` with `\$centerSphere`| + * +-------------+----------------------------------------+ + * }}} + * + * @return a publisher with a single element indicating the number of documents + * @since 2.4 + */ + def countDocuments(): SingleObservable[Long] = + wrapped.countDocuments() + + /** + * Counts the number of documents in the collection according to the given options. + * + * '''Note:''' + * For a fast count of the total documents in a collection see [[estimatedDocumentCount()*]] + * When migrating from `count()` to `countDocuments()` the following query operators must be replaced: + * + * {{{ + * +-------------+----------------------------------------+ + * | Operator | Replacement | + * +=============+========================================+ + * | `\$where` | `\$expr` | + * +-------------+----------------------------------------+ + * | `\$near` | `\$geoWithin` with `\$center` | + * +-------------+----------------------------------------+ + * | `\$nearSphere`| `\$geoWithin` with `\$centerSphere`| + * +-------------+----------------------------------------+ + * }}} + * + * @param filter the query filter + * @return a publisher with a single element indicating the number of documents + * @since 2.4 + */ + def countDocuments(filter: Bson): SingleObservable[Long] = + wrapped.countDocuments(filter) + + /** + * Counts the number of documents in the collection according to the given options. + * + * '''Note:''' + * For a fast count of the total documents in a collection see [[estimatedDocumentCount()*]] + * When migrating from `count()` to `countDocuments()` the following query operators must be replaced: + * + * {{{ + * +-------------+----------------------------------------+ + * | Operator | Replacement | + * +=============+========================================+ + * | `\$where` | `\$expr` | + * +-------------+----------------------------------------+ + * | `\$near` | `\$geoWithin` with `\$center` | + * +-------------+----------------------------------------+ + * | `\$nearSphere`| `\$geoWithin` with `\$centerSphere`| + * +-------------+----------------------------------------+ + * }}} + * + * @param filter the query filter + * @param options the options describing the count + * @return a publisher with a single element indicating the number of documents + * @since 2.4 + */ + def countDocuments(filter: Bson, options: CountOptions): SingleObservable[Long] = + wrapped.countDocuments(filter, options) + + /** + * Counts the number of documents in the collection. + * + * '''Note:''' + * For a fast count of the total documents in a collection see [[estimatedDocumentCount()*]] + * When migrating from `count()` to `countDocuments()` the following query operators must be replaced: + * + * {{{ + * +-------------+----------------------------------------+ + * | Operator | Replacement | + * +=============+========================================+ + * | `\$where` | `\$expr` | + * +-------------+----------------------------------------+ + * | `\$near` | `\$geoWithin` with `\$center` | + * +-------------+----------------------------------------+ + * | `\$nearSphere`| `\$geoWithin` with `\$centerSphere`| + * +-------------+----------------------------------------+ + * }}} + * + * @param clientSession the client session with which to associate this operation + * @return a publisher with a single element indicating the number of documents + * @since 2.4 + * @note Requires MongoDB 3.6 or greater + */ + def countDocuments(clientSession: ClientSession): SingleObservable[Long] = + wrapped.countDocuments(clientSession) + + /** + * Counts the number of documents in the collection according to the given options. + * + * '''Note:''' + * For a fast count of the total documents in a collection see [[estimatedDocumentCount()*]] + * When migrating from `count()` to `countDocuments()` the following query operators must be replaced: + * + * {{{ + * +-------------+----------------------------------------+ + * | Operator | Replacement | + * +=============+========================================+ + * | `\$where` | `\$expr` | + * +-------------+----------------------------------------+ + * | `\$near` | `\$geoWithin` with `\$center` | + * +-------------+----------------------------------------+ + * | `\$nearSphere`| `\$geoWithin` with `\$centerSphere`| + * +-------------+----------------------------------------+ + * }}} + * + * @param clientSession the client session with which to associate this operation + * @param filter the query filter + * @return a publisher with a single element indicating the number of documents + * @since 2.4 + * @note Requires MongoDB 3.6 or greater + */ + def countDocuments(clientSession: ClientSession, filter: Bson): SingleObservable[Long] = + wrapped.countDocuments(clientSession, filter) + + /** + * Counts the number of documents in the collection according to the given options. + * + * '''Note:''' + * For a fast count of the total documents in a collection see [[estimatedDocumentCount()*]] + * When migrating from `count()` to `countDocuments()` the following query operators must be replaced: + * + * {{{ + * +-------------+----------------------------------------+ + * | Operator | Replacement | + * +=============+========================================+ + * | `\$where` | `\$expr` | + * +-------------+----------------------------------------+ + * | `\$near` | `\$geoWithin` with `\$center` | + * +-------------+----------------------------------------+ + * | `\$nearSphere`| `\$geoWithin` with `\$centerSphere`| + * +-------------+----------------------------------------+ + * }}} + * + * @param clientSession the client session with which to associate this operation + * @param filter the query filter + * @param options the options describing the count + * @return a publisher with a single element indicating the number of documents + * @since 2.4 + * @note Requires MongoDB 3.6 or greater + */ + def countDocuments(clientSession: ClientSession, filter: Bson, options: CountOptions): SingleObservable[Long] = + wrapped.countDocuments(clientSession, filter, options) + + /** + * Gets the distinct values of the specified field name. + * + * [[https://www.mongodb.com/docs/manual/reference/command/distinct/ Distinct]] + * @param fieldName the field name + * @tparam C the target type of the observable. + * @return a Observable emitting the sequence of distinct values + */ + def distinct[C](fieldName: String)(implicit ct: ClassTag[C]): DistinctObservable[C] = + DistinctObservable(wrapped.distinct(fieldName, ct)) + + /** + * Gets the distinct values of the specified field name. + * + * [[https://www.mongodb.com/docs/manual/reference/command/distinct/ Distinct]] + * @param fieldName the field name + * @param filter the query filter + * @tparam C the target type of the observable. + * @return a Observable emitting the sequence of distinct values + */ + def distinct[C](fieldName: String, filter: Bson)(implicit ct: ClassTag[C]): DistinctObservable[C] = + DistinctObservable(wrapped.distinct(fieldName, filter, ct)) + + /** + * Gets the distinct values of the specified field name. + * + * [[https://www.mongodb.com/docs/manual/reference/command/distinct/ Distinct]] + * @param clientSession the client session with which to associate this operation + * @param fieldName the field name + * @tparam C the target type of the observable. + * @return a Observable emitting the sequence of distinct values + * @since 2.2 + * @note Requires MongoDB 3.6 or greater + */ + def distinct[C](clientSession: ClientSession, fieldName: String)(implicit ct: ClassTag[C]): DistinctObservable[C] = + DistinctObservable(wrapped.distinct(clientSession, fieldName, ct)) + + /** + * Gets the distinct values of the specified field name. + * + * [[https://www.mongodb.com/docs/manual/reference/command/distinct/ Distinct]] + * @param clientSession the client session with which to associate this operation + * @param fieldName the field name + * @param filter the query filter + * @tparam C the target type of the observable. + * @return a Observable emitting the sequence of distinct values + * @since 2.2 + * @note Requires MongoDB 3.6 or greater + */ + def distinct[C](clientSession: ClientSession, fieldName: String, filter: Bson)( + implicit ct: ClassTag[C] + ): DistinctObservable[C] = + DistinctObservable(wrapped.distinct(clientSession, fieldName, filter, ct)) + + /** + * Finds all documents in the collection. + * + * [[https://www.mongodb.com/docs/manual/tutorial/query-documents/ Find]] + * + * @tparam C the target document type of the observable. + * @return the find Observable + */ + def find[C]()(implicit e: C DefaultsTo TResult, ct: ClassTag[C]): FindObservable[C] = + FindObservable(wrapped.find[C](ct)) + + /** + * Finds all documents in the collection. + * + * [[https://www.mongodb.com/docs/manual/tutorial/query-documents/ Find]] + * @param filter the query filter + * @tparam C the target document type of the observable. + * @return the find Observable + */ + def find[C](filter: Bson)(implicit e: C DefaultsTo TResult, ct: ClassTag[C]): FindObservable[C] = + FindObservable(wrapped.find(filter, ct)) + + /** + * Finds all documents in the collection. + * + * [[https://www.mongodb.com/docs/manual/tutorial/query-documents/ Find]] + * + * @param clientSession the client session with which to associate this operation + * @tparam C the target document type of the observable. + * @return the find Observable + * @since 2.2 + * @note Requires MongoDB 3.6 or greater + */ + def find[C](clientSession: ClientSession)(implicit e: C DefaultsTo TResult, ct: ClassTag[C]): FindObservable[C] = + FindObservable(wrapped.find[C](clientSession, ct)) + + /** + * Finds all documents in the collection. + * + * [[https://www.mongodb.com/docs/manual/tutorial/query-documents/ Find]] + * @param clientSession the client session with which to associate this operation + * @param filter the query filter + * @tparam C the target document type of the observable. + * @return the find Observable + * @since 2.2 + * @note Requires MongoDB 3.6 or greater + */ + def find[C]( + clientSession: ClientSession, + filter: Bson + )(implicit e: C DefaultsTo TResult, ct: ClassTag[C]): FindObservable[C] = + FindObservable(wrapped.find(clientSession, filter, ct)) + + /** + * Aggregates documents according to the specified aggregation pipeline. + * + * @param pipeline the aggregate pipeline + * @return a Observable containing the result of the aggregation operation + * [[https://www.mongodb.com/docs/manual/aggregation/ Aggregation]] + */ + def aggregate[C](pipeline: Seq[Bson])(implicit e: C DefaultsTo TResult, ct: ClassTag[C]): AggregateObservable[C] = + AggregateObservable(wrapped.aggregate[C](pipeline.asJava, ct)) + + /** + * Aggregates documents according to the specified aggregation pipeline. + * + * @param clientSession the client session with which to associate this operation + * @param pipeline the aggregate pipeline + * @return a Observable containing the result of the aggregation operation + * [[https://www.mongodb.com/docs/manual/aggregation/ Aggregation]] + * @since 2.2 + * @note Requires MongoDB 3.6 or greater + */ + def aggregate[C]( + clientSession: ClientSession, + pipeline: Seq[Bson] + )(implicit e: C DefaultsTo TResult, ct: ClassTag[C]): AggregateObservable[C] = + AggregateObservable(wrapped.aggregate[C](clientSession, pipeline.asJava, ct)) + + /** + * Aggregates documents according to the specified map-reduce function. + * + * @param mapFunction A JavaScript function that associates or "maps" a value with a key and emits the key and value pair. + * @param reduceFunction A JavaScript function that "reduces" to a single object all the values associated with a particular key. + * @tparam C the target document type of the observable. + * @return a Observable containing the result of the map-reduce operation + * [[https://www.mongodb.com/docs/manual/reference/command/mapReduce/ map-reduce]] + */ + @deprecated("Superseded by aggregate", "4.4.0") + def mapReduce[C]( + mapFunction: String, + reduceFunction: String + )(implicit e: C DefaultsTo TResult, ct: ClassTag[C]): MapReduceObservable[C] = + MapReduceObservable(wrapped.mapReduce(mapFunction, reduceFunction, ct)) + + /** + * Aggregates documents according to the specified map-reduce function. + * + * @param clientSession the client session with which to associate this operation + * @param mapFunction A JavaScript function that associates or "maps" a value with a key and emits the key and value pair. + * @param reduceFunction A JavaScript function that "reduces" to a single object all the values associated with a particular key. + * @tparam C the target document type of the observable. + * @return a Observable containing the result of the map-reduce operation + * [[https://www.mongodb.com/docs/manual/reference/command/mapReduce/ map-reduce]] + * @since 2.2 + * @note Requires MongoDB 3.6 or greater + */ + @deprecated("Superseded by aggregate", "4.4.0") + def mapReduce[C](clientSession: ClientSession, mapFunction: String, reduceFunction: String)( + implicit e: C DefaultsTo TResult, + ct: ClassTag[C] + ): MapReduceObservable[C] = + MapReduceObservable(wrapped.mapReduce(clientSession, mapFunction, reduceFunction, ct)) + + /** + * Executes a mix of inserts, updates, replaces, and deletes. + * + * @param requests the writes to execute + * @return a Observable with a single element the BulkWriteResult + */ + def bulkWrite(requests: Seq[_ <: WriteModel[_ <: TResult]]): SingleObservable[BulkWriteResult] = + wrapped.bulkWrite(requests.asJava.asInstanceOf[util.List[_ <: WriteModel[_ <: TResult]]]) + + /** + * Executes a mix of inserts, updates, replaces, and deletes. + * + * @param requests the writes to execute + * @param options the options to apply to the bulk write operation + * @return a Observable with a single element the BulkWriteResult + */ + def bulkWrite( + requests: Seq[_ <: WriteModel[_ <: TResult]], + options: BulkWriteOptions + ): SingleObservable[BulkWriteResult] = + wrapped.bulkWrite(requests.asJava.asInstanceOf[util.List[_ <: WriteModel[_ <: TResult]]], options) + + /** + * Executes a mix of inserts, updates, replaces, and deletes. + * + * @param clientSession the client session with which to associate this operation + * @param requests the writes to execute + * @return a Observable with a single element the BulkWriteResult + * @since 2.2 + * @note Requires MongoDB 3.6 or greater + */ + def bulkWrite( + clientSession: ClientSession, + requests: Seq[_ <: WriteModel[_ <: TResult]] + ): SingleObservable[BulkWriteResult] = + wrapped.bulkWrite(clientSession, requests.asJava.asInstanceOf[util.List[_ <: WriteModel[_ <: TResult]]]) + + /** + * Executes a mix of inserts, updates, replaces, and deletes. + * + * @param clientSession the client session with which to associate this operation + * @param requests the writes to execute + * @param options the options to apply to the bulk write operation + * @return a Observable with a single element the BulkWriteResult + * @since 2.2 + * @note Requires MongoDB 3.6 or greater + */ + def bulkWrite( + clientSession: ClientSession, + requests: Seq[_ <: WriteModel[_ <: TResult]], + options: BulkWriteOptions + ): SingleObservable[BulkWriteResult] = + wrapped.bulkWrite(clientSession, requests.asJava.asInstanceOf[util.List[_ <: WriteModel[_ <: TResult]]], options) + + /** + * Inserts the provided document. If the document is missing an identifier, the driver should generate one. + * + * @param document the document to insert + * @return a Observable with a single element the InsertOneResult or with either a + * com.mongodb.DuplicateKeyException or com.mongodb.MongoException + */ + def insertOne(document: TResult): SingleObservable[InsertOneResult] = wrapped.insertOne(document) + + /** + * Inserts the provided document. If the document is missing an identifier, the driver should generate one. + * + * @param document the document to insert + * @param options the options to apply to the operation + * @return a Observable with a single element the InsertOneResult or with either a + * com.mongodb.DuplicateKeyException or com.mongodb.MongoException + * @since 1.1 + */ + def insertOne(document: TResult, options: InsertOneOptions): SingleObservable[InsertOneResult] = + wrapped.insertOne(document, options) + + /** + * Inserts the provided document. If the document is missing an identifier, the driver should generate one. + * + * @param clientSession the client session with which to associate this operation + * @param document the document to insert + * @return a Observable with a single element the InsertOneResult or with either a + * com.mongodb.DuplicateKeyException or com.mongodb.MongoException + * @since 2.2 + * @note Requires MongoDB 3.6 or greater + */ + def insertOne(clientSession: ClientSession, document: TResult): SingleObservable[InsertOneResult] = + wrapped.insertOne(clientSession, document) + + /** + * Inserts the provided document. If the document is missing an identifier, the driver should generate one. + * + * @param clientSession the client session with which to associate this operation + * @param document the document to insert + * @param options the options to apply to the operation + * @return a Observable with a single element the InsertOneResult or with either a + * com.mongodb.DuplicateKeyException or com.mongodb.MongoException + * @since 2.2 + * @note Requires MongoDB 3.6 or greater + */ + def insertOne( + clientSession: ClientSession, + document: TResult, + options: InsertOneOptions + ): SingleObservable[InsertOneResult] = + wrapped.insertOne(clientSession, document, options) + + /** + * Inserts a batch of documents. The preferred way to perform bulk inserts is to use the BulkWrite API. However, when talking with a + * server < 2.6, using this method will be faster due to constraints in the bulk API related to error handling. + * + * @param documents the documents to insert + * @return a Observable with a single element the InsertManyResult or with either a + * com.mongodb.DuplicateKeyException or com.mongodb.MongoException + */ + def insertMany(documents: Seq[_ <: TResult]): SingleObservable[InsertManyResult] = + wrapped.insertMany(documents.asJava) + + /** + * Inserts a batch of documents. The preferred way to perform bulk inserts is to use the BulkWrite API. However, when talking with a + * server < 2.6, using this method will be faster due to constraints in the bulk API related to error handling. + * + * @param documents the documents to insert + * @param options the options to apply to the operation + * @return a Observable with a single element the InsertManyResult or with either a + * com.mongodb.DuplicateKeyException or com.mongodb.MongoException + */ + def insertMany(documents: Seq[_ <: TResult], options: InsertManyOptions): SingleObservable[InsertManyResult] = + wrapped.insertMany(documents.asJava, options) + + /** + * Inserts a batch of documents. The preferred way to perform bulk inserts is to use the BulkWrite API. + * + * @param clientSession the client session with which to associate this operation + * @param documents the documents to insert + * @return a Observable with a single element the InsertManyResult or with either a + * com.mongodb.DuplicateKeyException or com.mongodb.MongoException + * @since 2.2 + * @note Requires MongoDB 3.6 or greater + */ + def insertMany(clientSession: ClientSession, documents: Seq[_ <: TResult]): SingleObservable[InsertManyResult] = + wrapped.insertMany(clientSession, documents.asJava) + + /** + * Inserts a batch of documents. The preferred way to perform bulk inserts is to use the BulkWrite API. + * + * @param clientSession the client session with which to associate this operation + * @param documents the documents to insert + * @param options the options to apply to the operation + * @return a Observable with a single element the InsertManyResult or with either a + * com.mongodb.DuplicateKeyException or com.mongodb.MongoExceptionn + * @since 2.2 + * @note Requires MongoDB 3.6 or greater + */ + def insertMany( + clientSession: ClientSession, + documents: Seq[_ <: TResult], + options: InsertManyOptions + ): SingleObservable[InsertManyResult] = + wrapped.insertMany(clientSession, documents.asJava, options) + + /** + * Removes at most one document from the collection that matches the given filter. If no documents match, the collection is not + * modified. + * + * @param filter the query filter to apply the delete operation + * @return a Observable with a single element the DeleteResult or with an com.mongodb.MongoException + */ + def deleteOne(filter: Bson): SingleObservable[DeleteResult] = wrapped.deleteOne(filter) + + /** + * Removes at most one document from the collection that matches the given filter. If no documents match, the collection is not + * modified. + * + * @param filter the query filter to apply the delete operation + * @param options the options to apply to the delete operation + * @return a Observable with a single element the DeleteResult or with an com.mongodb.MongoException + * @since 1.2 + */ + def deleteOne(filter: Bson, options: DeleteOptions): SingleObservable[DeleteResult] = + wrapped.deleteOne(filter, options) + + /** + * Removes at most one document from the collection that matches the given filter. If no documents match, the collection is not + * modified. + * + * @param clientSession the client session with which to associate this operation + * @param filter the query filter to apply the delete operation + * @return a Observable with a single element the DeleteResult or with an com.mongodb.MongoException + * @since 2.2 + * @note Requires MongoDB 3.6 or greater + */ + def deleteOne(clientSession: ClientSession, filter: Bson): SingleObservable[DeleteResult] = + wrapped.deleteOne(clientSession, filter) + + /** + * Removes at most one document from the collection that matches the given filter. If no documents match, the collection is not + * modified. + * + * @param clientSession the client session with which to associate this operation + * @param filter the query filter to apply the delete operation + * @param options the options to apply to the delete operation + * @return a Observable with a single element the DeleteResult or with an com.mongodb.MongoException + * @since 2.2 + * @note Requires MongoDB 3.6 or greater + */ + def deleteOne(clientSession: ClientSession, filter: Bson, options: DeleteOptions): SingleObservable[DeleteResult] = + wrapped.deleteOne(clientSession, filter, options) + + /** + * Removes all documents from the collection that match the given query filter. If no documents match, the collection is not modified. + * + * @param filter the query filter to apply the delete operation + * @return a Observable with a single element the DeleteResult or with an com.mongodb.MongoException + */ + def deleteMany(filter: Bson): SingleObservable[DeleteResult] = wrapped.deleteMany(filter) + + /** + * Removes all documents from the collection that match the given query filter. If no documents match, the collection is not modified. + * + * @param filter the query filter to apply the delete operation + * @param options the options to apply to the delete operation + * @return a Observable with a single element the DeleteResult or with an com.mongodb.MongoException + * @since 1.2 + */ + def deleteMany(filter: Bson, options: DeleteOptions): SingleObservable[DeleteResult] = + wrapped.deleteMany(filter, options) + + /** + * Removes all documents from the collection that match the given query filter. If no documents match, the collection is not modified. + * + * @param clientSession the client session with which to associate this operation + * @param filter the query filter to apply the delete operation + * @return a Observable with a single element the DeleteResult or with an com.mongodb.MongoException + * @since 2.2 + * @note Requires MongoDB 3.6 or greater + */ + def deleteMany(clientSession: ClientSession, filter: Bson): SingleObservable[DeleteResult] = + wrapped.deleteMany(clientSession, filter) + + /** + * Removes all documents from the collection that match the given query filter. If no documents match, the collection is not modified. + * + * @param clientSession the client session with which to associate this operation + * @param filter the query filter to apply the delete operation + * @param options the options to apply to the delete operation + * @return a Observable with a single element the DeleteResult or with an com.mongodb.MongoException + * @since 2.2 + * @note Requires MongoDB 3.6 or greater + */ + def deleteMany(clientSession: ClientSession, filter: Bson, options: DeleteOptions): SingleObservable[DeleteResult] = + wrapped.deleteMany(clientSession, filter, options) + + /** + * Replace a document in the collection according to the specified arguments. + * + * [[https://www.mongodb.com/docs/manual/tutorial/modify-documents/#replace-the-document Replace]] + * @param filter the query filter to apply the replace operation + * @param replacement the replacement document + * @return a Observable with a single element the UpdateResult + */ + def replaceOne(filter: Bson, replacement: TResult): SingleObservable[UpdateResult] = + wrapped.replaceOne(filter, replacement) + + /** + * Replace a document in the collection according to the specified arguments. + * + * [[https://www.mongodb.com/docs/manual/tutorial/modify-documents/#replace-the-document Replace]] + * @param clientSession the client session with which to associate this operation + * @param filter the query filter to apply the replace operation + * @param replacement the replacement document + * @return a Observable with a single element the UpdateResult + * @since 2.2 + * @note Requires MongoDB 3.6 or greater + */ + def replaceOne(clientSession: ClientSession, filter: Bson, replacement: TResult): SingleObservable[UpdateResult] = + wrapped.replaceOne(clientSession, filter, replacement) + + /** + * Replace a document in the collection according to the specified arguments. + * + * [[https://www.mongodb.com/docs/manual/tutorial/modify-documents/#replace-the-document Replace]] + * @param filter the query filter to apply the replace operation + * @param replacement the replacement document + * @param options the options to apply to the replace operation + * @return a Observable with a single element the UpdateResult + */ + def replaceOne(filter: Bson, replacement: TResult, options: ReplaceOptions): SingleObservable[UpdateResult] = + wrapped.replaceOne(filter, replacement, options) + + /** + * Replace a document in the collection according to the specified arguments. + * + * [[https://www.mongodb.com/docs/manual/tutorial/modify-documents/#replace-the-document Replace]] + * @param clientSession the client session with which to associate this operation + * @param filter the query filter to apply the replace operation + * @param replacement the replacement document + * @param options the options to apply to the replace operation + * @return a Observable with a single element the UpdateResult + * @since 2.2 + * @note Requires MongoDB 3.6 or greater + */ + def replaceOne( + clientSession: ClientSession, + filter: Bson, + replacement: TResult, + options: ReplaceOptions + ): SingleObservable[UpdateResult] = + wrapped.replaceOne(clientSession, filter, replacement, options) + + /** + * Update a single document in the collection according to the specified arguments. + * + * [[https://www.mongodb.com/docs/manual/tutorial/modify-documents/ Updates]] + * [[https://www.mongodb.com/docs/manual/reference/operator/update/ Update Operators]] + * @param filter a document describing the query filter, which may not be null. This can be of any type for which a `Codec` is + * registered + * @param update a document describing the update, which may not be null. The update to apply must include only update operators. This + * can be of any type for which a `Codec` is registered + * @return a Observable with a single element the UpdateResult + */ + def updateOne(filter: Bson, update: Bson): SingleObservable[UpdateResult] = + wrapped.updateOne(filter, update) + + /** + * Update a single document in the collection according to the specified arguments. + * + * [[https://www.mongodb.com/docs/manual/tutorial/modify-documents/ Updates]] + * [[https://www.mongodb.com/docs/manual/reference/operator/update/ Update Operators]] + * @param filter a document describing the query filter, which may not be null. This can be of any type for which a `Codec` is + * registered + * @param update a document describing the update, which may not be null. The update to apply must include only update operators. This + * can be of any type for which a `Codec` is registered + * @param options the options to apply to the update operation + * @return a Observable with a single element the UpdateResult + */ + def updateOne(filter: Bson, update: Bson, options: UpdateOptions): SingleObservable[UpdateResult] = + wrapped.updateOne(filter, update, options) + + /** + * Update a single document in the collection according to the specified arguments. + * + * [[https://www.mongodb.com/docs/manual/tutorial/modify-documents/ Updates]] + * [[https://www.mongodb.com/docs/manual/reference/operator/update/ Update Operators]] + * @param clientSession the client session with which to associate this operation + * @param filter a document describing the query filter, which may not be null. This can be of any type for which a `Codec` is + * registered + * @param update a document describing the update, which may not be null. The update to apply must include only update operators. This + * can be of any type for which a `Codec` is registered + * @return a Observable with a single element the UpdateResult + * @since 2.2 + * @note Requires MongoDB 3.6 or greater + */ + def updateOne(clientSession: ClientSession, filter: Bson, update: Bson): SingleObservable[UpdateResult] = + wrapped.updateOne(clientSession, filter, update) + + /** + * Update a single document in the collection according to the specified arguments. + * + * [[https://www.mongodb.com/docs/manual/tutorial/modify-documents/ Updates]] + * [[https://www.mongodb.com/docs/manual/reference/operator/update/ Update Operators]] + * @param clientSession the client session with which to associate this operation + * @param filter a document describing the query filter, which may not be null. This can be of any type for which a `Codec` is + * registered + * @param update a document describing the update, which may not be null. The update to apply must include only update operators. This + * can be of any type for which a `Codec` is registered + * @param options the options to apply to the update operation + * @return a Observable with a single element the UpdateResult + * @since 2.2 + * @note Requires MongoDB 3.6 or greater + */ + def updateOne( + clientSession: ClientSession, + filter: Bson, + update: Bson, + options: UpdateOptions + ): SingleObservable[UpdateResult] = + wrapped.updateOne(clientSession, filter, update, options) + + /** + * Update a single document in the collection according to the specified arguments. + * + * [[https://www.mongodb.com/docs/manual/tutorial/modify-documents/ Updates]] + * [[https://www.mongodb.com/docs/manual/reference/operator/update/ Update Operators]] + * @param filter a document describing the query filter, which may not be null. This can be of any type for which a `Codec` is + * registered + * @param update a pipeline describing the update. + * @return a Observable with a single element the UpdateResult + * @since 2.7 + * @note Requires MongoDB 4.2 or greater + */ + def updateOne(filter: Bson, update: Seq[Bson]): SingleObservable[UpdateResult] = + wrapped.updateOne(filter, update.asJava) + + /** + * Update a single document in the collection according to the specified arguments. + * + * [[https://www.mongodb.com/docs/manual/tutorial/modify-documents/ Updates]] + * [[https://www.mongodb.com/docs/manual/reference/operator/update/ Update Operators]] + * @param filter a document describing the query filter, which may not be null. This can be of any type for which a `Codec` is + * registered + * @param update a pipeline describing the update. + * @param options the options to apply to the update operation + * @return a Observable with a single element the UpdateResult + * @since 2.7 + * @note Requires MongoDB 4.2 or greater + */ + def updateOne(filter: Bson, update: Seq[Bson], options: UpdateOptions): SingleObservable[UpdateResult] = + wrapped.updateOne(filter, update.asJava, options) + + /** + * Update a single document in the collection according to the specified arguments. + * + * [[https://www.mongodb.com/docs/manual/tutorial/modify-documents/ Updates]] + * [[https://www.mongodb.com/docs/manual/reference/operator/update/ Update Operators]] + * @param clientSession the client session with which to associate this operation + * @param filter a document describing the query filter, which may not be null. This can be of any type for which a `Codec` is + * registered + * @param update a pipeline describing the update. + * @return a Observable with a single element the UpdateResult + * @since 2.7 + * @note Requires MongoDB 4.2 or greater + */ + def updateOne(clientSession: ClientSession, filter: Bson, update: Seq[Bson]): SingleObservable[UpdateResult] = + wrapped.updateOne(clientSession, filter, update.asJava) + + /** + * Update a single document in the collection according to the specified arguments. + * + * [[https://www.mongodb.com/docs/manual/tutorial/modify-documents/ Updates]] + * [[https://www.mongodb.com/docs/manual/reference/operator/update/ Update Operators]] + * @param clientSession the client session with which to associate this operation + * @param filter a document describing the query filter, which may not be null. This can be of any type for which a `Codec` is + * registered + * @param update a pipeline describing the update. + * @param options the options to apply to the update operation + * @return a Observable with a single element the UpdateResult + * @since 2.7 + * @note Requires MongoDB 4.2 or greater + */ + def updateOne( + clientSession: ClientSession, + filter: Bson, + update: Seq[Bson], + options: UpdateOptions + ): SingleObservable[UpdateResult] = + wrapped.updateOne(clientSession, filter, update.asJava, options) + + /** + * Update all documents in the collection according to the specified arguments. + * + * [[https://www.mongodb.com/docs/manual/tutorial/modify-documents/ Updates]] + * [[https://www.mongodb.com/docs/manual/reference/operator/update/ Update Operators]] + * @param filter a document describing the query filter, which may not be null. This can be of any type for which a `Codec` is + * registered + * @param update a document describing the update, which may not be null. The update to apply must include only update operators. This + * can be of any type for which a `Codec` is registered + * @return a Observable with a single element the UpdateResult + */ + def updateMany(filter: Bson, update: Bson): SingleObservable[UpdateResult] = + wrapped.updateMany(filter, update) + + /** + * Update all documents in the collection according to the specified arguments. + * + * [[https://www.mongodb.com/docs/manual/tutorial/modify-documents/ Updates]] + * [[https://www.mongodb.com/docs/manual/reference/operator/update/ Update Operators]] + * @param filter a document describing the query filter, which may not be null. This can be of any type for which a `Codec` is + * registered + * @param update a document describing the update, which may not be null. The update to apply must include only update operators. This + * can be of any type for which a `Codec` is registered + * @param options the options to apply to the update operation + * @return a Observable with a single element the UpdateResult + */ + def updateMany(filter: Bson, update: Bson, options: UpdateOptions): SingleObservable[UpdateResult] = + wrapped.updateMany(filter, update, options) + + /** + * Update all documents in the collection according to the specified arguments. + * + * [[https://www.mongodb.com/docs/manual/tutorial/modify-documents/ Updates]] + * [[https://www.mongodb.com/docs/manual/reference/operator/update/ Update Operators]] + * @param clientSession the client session with which to associate this operation + * @param filter a document describing the query filter, which may not be null. This can be of any type for which a `Codec` is + * registered + * @param update a document describing the update, which may not be null. The update to apply must include only update operators. This + * can be of any type for which a `Codec` is registered + * @return a Observable with a single element the UpdateResult + * @since 2.2 + * @note Requires MongoDB 3.6 or greater + */ + def updateMany(clientSession: ClientSession, filter: Bson, update: Bson): SingleObservable[UpdateResult] = + wrapped.updateMany(clientSession, filter, update) + + /** + * Update all documents in the collection according to the specified arguments. + * + * [[https://www.mongodb.com/docs/manual/tutorial/modify-documents/ Updates]] + * [[https://www.mongodb.com/docs/manual/reference/operator/update/ Update Operators]] + * @param clientSession the client session with which to associate this operation + * @param filter a document describing the query filter, which may not be null. This can be of any type for which a `Codec` is + * registered + * @param update a document describing the update, which may not be null. The update to apply must include only update operators. This + * can be of any type for which a `Codec` is registered + * @param options the options to apply to the update operation + * @return a Observable with a single element the UpdateResult + * @since 2.2 + * @note Requires MongoDB 3.6 or greater + */ + def updateMany( + clientSession: ClientSession, + filter: Bson, + update: Bson, + options: UpdateOptions + ): SingleObservable[UpdateResult] = + wrapped.updateMany(clientSession, filter, update, options) + + /** + * Update all documents in the collection according to the specified arguments. + * + * [[https://www.mongodb.com/docs/manual/tutorial/modify-documents/ Updates]] + * [[https://www.mongodb.com/docs/manual/reference/operator/update/ Update Operators]] + * @param filter a document describing the query filter, which may not be null. This can be of any type for which a `Codec` is + * registered + * @param update a pipeline describing the update. + * @return a Observable with a single element the UpdateResult + * @since 2.7 + * @note Requires MongoDB 4.2 or greater + */ + def updateMany(filter: Bson, update: Seq[Bson]): SingleObservable[UpdateResult] = + wrapped.updateMany(filter, update.asJava) + + /** + * Update all documents in the collection according to the specified arguments. + * + * [[https://www.mongodb.com/docs/manual/tutorial/modify-documents/ Updates]] + * [[https://www.mongodb.com/docs/manual/reference/operator/update/ Update Operators]] + * @param filter a document describing the query filter, which may not be null. This can be of any type for which a `Codec` is + * registered + * @param update a pipeline describing the update. + * @param options the options to apply to the update operation + * @return a Observable with a single element the UpdateResult + * @since 2.7 + * @note Requires MongoDB 4.2 or greater + */ + def updateMany(filter: Bson, update: Seq[Bson], options: UpdateOptions): SingleObservable[UpdateResult] = + wrapped.updateMany(filter, update.asJava, options) + + /** + * Update all documents in the collection according to the specified arguments. + * + * [[https://www.mongodb.com/docs/manual/tutorial/modify-documents/ Updates]] + * [[https://www.mongodb.com/docs/manual/reference/operator/update/ Update Operators]] + * @param clientSession the client session with which to associate this operation + * @param filter a document describing the query filter, which may not be null. This can be of any type for which a `Codec` is + * registered + * @param update a pipeline describing the update. + * @return a Observable with a single element the UpdateResult + * @since 2.7 + * @note Requires MongoDB 4.2 or greater + */ + def updateMany(clientSession: ClientSession, filter: Bson, update: Seq[Bson]): SingleObservable[UpdateResult] = + wrapped.updateMany(clientSession, filter, update.asJava) + + /** + * Update all documents in the collection according to the specified arguments. + * + * [[https://www.mongodb.com/docs/manual/tutorial/modify-documents/ Updates]] + * [[https://www.mongodb.com/docs/manual/reference/operator/update/ Update Operators]] + * @param clientSession the client session with which to associate this operation + * @param filter a document describing the query filter, which may not be null. This can be of any type for which a `Codec` is + * registered + * @param update a pipeline describing the update. + * @param options the options to apply to the update operation + * @return a Observable with a single element the UpdateResult + * @since 2.7 + * @note Requires MongoDB 4.2 or greater + */ + def updateMany( + clientSession: ClientSession, + filter: Bson, + update: Seq[Bson], + options: UpdateOptions + ): SingleObservable[UpdateResult] = + wrapped.updateMany(clientSession, filter, update.asJava, options) + + /** + * Atomically find a document and remove it. + * + * @param filter the query filter to find the document with + * @return a Observable with a single element the document that was removed. If no documents matched the query filter, then null will be + * returned + */ + def findOneAndDelete(filter: Bson): SingleObservable[TResult] = wrapped.findOneAndDelete(filter) + + /** + * Atomically find a document and remove it. + * + * @param filter the query filter to find the document with + * @param options the options to apply to the operation + * @return a Observable with a single element the document that was removed. If no documents matched the query filter, then null will be + * returned + */ + def findOneAndDelete(filter: Bson, options: FindOneAndDeleteOptions): SingleObservable[TResult] = + wrapped.findOneAndDelete(filter, options) + + /** + * Atomically find a document and remove it. + * + * @param clientSession the client session with which to associate this operation + * @param filter the query filter to find the document with + * @return a Observable with a single element the document that was removed. If no documents matched the query filter, then null will be + * returned + * @since 2.2 + * @note Requires MongoDB 3.6 or greater + */ + def findOneAndDelete(clientSession: ClientSession, filter: Bson): SingleObservable[TResult] = + wrapped.findOneAndDelete(clientSession, filter) + + /** + * Atomically find a document and remove it. + * + * @param clientSession the client session with which to associate this operation + * @param filter the query filter to find the document with + * @param options the options to apply to the operation + * @return a Observable with a single element the document that was removed. If no documents matched the query filter, then null will be + * returned + * @since 2.2 + * @note Requires MongoDB 3.6 or greater + */ + def findOneAndDelete( + clientSession: ClientSession, + filter: Bson, + options: FindOneAndDeleteOptions + ): SingleObservable[TResult] = + wrapped.findOneAndDelete(clientSession, filter, options) + + /** + * Atomically find a document and replace it. + * + * @param filter the query filter to apply the replace operation + * @param replacement the replacement document + * @return a Observable with a single element the document that was replaced. Depending on the value of the `returnOriginal` + * property, this will either be the document as it was before the update or as it is after the update. If no documents matched the + * query filter, then null will be returned + */ + def findOneAndReplace(filter: Bson, replacement: TResult): SingleObservable[TResult] = + wrapped.findOneAndReplace(filter, replacement) + + /** + * Atomically find a document and replace it. + * + * @param filter the query filter to apply the replace operation + * @param replacement the replacement document + * @param options the options to apply to the operation + * @return a Observable with a single element the document that was replaced. Depending on the value of the `returnOriginal` + * property, this will either be the document as it was before the update or as it is after the update. If no documents matched the + * query filter, then null will be returned + */ + def findOneAndReplace( + filter: Bson, + replacement: TResult, + options: FindOneAndReplaceOptions + ): SingleObservable[TResult] = + wrapped.findOneAndReplace(filter, replacement, options) + + /** + * Atomically find a document and replace it. + * + * @param clientSession the client session with which to associate this operation + * @param filter the query filter to apply the replace operation + * @param replacement the replacement document + * @return a Observable with a single element the document that was replaced. Depending on the value of the `returnOriginal` + * property, this will either be the document as it was before the update or as it is after the update. If no documents matched the + * query filter, then null will be returned + * @since 2.2 + * @note Requires MongoDB 3.6 or greater + */ + def findOneAndReplace(clientSession: ClientSession, filter: Bson, replacement: TResult): SingleObservable[TResult] = + wrapped.findOneAndReplace(clientSession, filter, replacement) + + /** + * Atomically find a document and replace it. + * + * @param clientSession the client session with which to associate this operation + * @param filter the query filter to apply the replace operation + * @param replacement the replacement document + * @param options the options to apply to the operation + * @return a Observable with a single element the document that was replaced. Depending on the value of the `returnOriginal` + * property, this will either be the document as it was before the update or as it is after the update. If no documents matched the + * query filter, then null will be returned + * @since 2.2 + * @note Requires MongoDB 3.6 or greater + */ + def findOneAndReplace( + clientSession: ClientSession, + filter: Bson, + replacement: TResult, + options: FindOneAndReplaceOptions + ): SingleObservable[TResult] = + wrapped.findOneAndReplace(clientSession, filter, replacement, options) + + /** + * Atomically find a document and update it. + * + * @param filter a document describing the query filter, which may not be null. This can be of any type for which a `Codec` is + * registered + * @param update a document describing the update, which may not be null. The update to apply must include only update operators. This + * can be of any type for which a `Codec` is registered + * @return a Observable with a single element the document that was updated. Depending on the value of the `returnOriginal` + * property, this will either be the document as it was before the update or as it is after the update. If no documents matched the + * query filter, then null will be returned + */ + def findOneAndUpdate(filter: Bson, update: Bson): SingleObservable[TResult] = + wrapped.findOneAndUpdate(filter, update) + + /** + * Atomically find a document and update it. + * + * @param filter a document describing the query filter, which may not be null. This can be of any type for which a `Codec` is + * registered + * @param update a document describing the update, which may not be null. The update to apply must include only update operators. This + * can be of any type for which a `Codec` is registered + * @param options the options to apply to the operation + * @return a Observable with a single element the document that was updated. Depending on the value of the `returnOriginal` + * property, this will either be the document as it was before the update or as it is after the update. If no documents matched the + * query filter, then null will be returned + */ + def findOneAndUpdate(filter: Bson, update: Bson, options: FindOneAndUpdateOptions): SingleObservable[TResult] = + wrapped.findOneAndUpdate(filter, update, options) + + /** + * Atomically find a document and update it. + * + * @param clientSession the client session with which to associate this operation + * @param filter a document describing the query filter, which may not be null. This can be of any type for which a `Codec` is + * registered + * @param update a document describing the update, which may not be null. The update to apply must include only update operators. This + * can be of any type for which a `Codec` is registered + * @return a Observable with a single element the document that was updated. Depending on the value of the `returnOriginal` + * property, this will either be the document as it was before the update or as it is after the update. If no documents matched the + * query filter, then null will be returned + * @since 2.2 + * @note Requires MongoDB 3.6 or greater + */ + def findOneAndUpdate(clientSession: ClientSession, filter: Bson, update: Bson): SingleObservable[TResult] = + wrapped.findOneAndUpdate(clientSession, filter, update) + + /** + * Atomically find a document and update it. + * + * @param clientSession the client session with which to associate this operation + * @param filter a document describing the query filter, which may not be null. This can be of any type for which a `Codec` is + * registered + * @param update a document describing the update, which may not be null. The update to apply must include only update operators. This + * can be of any type for which a `Codec` is registered + * @param options the options to apply to the operation + * @return a Observable with a single element the document that was updated. Depending on the value of the `returnOriginal` + * property, this will either be the document as it was before the update or as it is after the update. If no documents matched the + * query filter, then null will be returned + * @since 2.2 + * @note Requires MongoDB 3.6 or greater + */ + def findOneAndUpdate( + clientSession: ClientSession, + filter: Bson, + update: Bson, + options: FindOneAndUpdateOptions + ): SingleObservable[TResult] = + wrapped.findOneAndUpdate(clientSession, filter, update, options) + + /** + * Atomically find a document and update it. + * + * @param filter a document describing the query filter, which may not be null. This can be of any type for which a `Codec` is + * registered + * @param update a pipeline describing the update. + * @return a Observable with a single element the document that was updated. Depending on the value of the `returnOriginal` + * property, this will either be the document as it was before the update or as it is after the update. If no documents matched the + * query filter, then null will be returned + * @since 2.7 + * @note Requires MongoDB 4.2 or greater + */ + def findOneAndUpdate(filter: Bson, update: Seq[Bson]): SingleObservable[TResult] = + wrapped.findOneAndUpdate(filter, update.asJava) + + /** + * Atomically find a document and update it. + * + * @param filter a document describing the query filter, which may not be null. This can be of any type for which a `Codec` is + * registered + * @param update a pipeline describing the update. + * @param options the options to apply to the operation + * @return a Observable with a single element the document that was updated. Depending on the value of the `returnOriginal` + * property, this will either be the document as it was before the update or as it is after the update. If no documents matched the + * query filter, then null will be returned + * @since 2.7 + * @note Requires MongoDB 4.2 or greater + */ + def findOneAndUpdate(filter: Bson, update: Seq[Bson], options: FindOneAndUpdateOptions): SingleObservable[TResult] = + wrapped.findOneAndUpdate(filter, update.asJava, options) + + /** + * Atomically find a document and update it. + * + * @param clientSession the client session with which to associate this operation + * @param filter a document describing the query filter, which may not be null. This can be of any type for which a `Codec` is + * registered + * @param update a pipeline describing the update. + * @return a Observable with a single element the document that was updated. Depending on the value of the `returnOriginal` + * property, this will either be the document as it was before the update or as it is after the update. If no documents matched the + * query filter, then null will be returned + * @since 2.7 + * @note Requires MongoDB 4.2 or greater + */ + def findOneAndUpdate(clientSession: ClientSession, filter: Bson, update: Seq[Bson]): SingleObservable[TResult] = + wrapped.findOneAndUpdate(clientSession, filter, update.asJava) + + /** + * Atomically find a document and update it. + * + * @param clientSession the client session with which to associate this operation + * @param filter a document describing the query filter, which may not be null. This can be of any type for which a `Codec` is + * registered + * @param update a pipeline describing the update. + * @param options the options to apply to the operation + * @return a Observable with a single element the document that was updated. Depending on the value of the `returnOriginal` + * property, this will either be the document as it was before the update or as it is after the update. If no documents matched the + * query filter, then null will be returned + * @since 2.7 + * @note Requires MongoDB 4.2 or greater + */ + def findOneAndUpdate( + clientSession: ClientSession, + filter: Bson, + update: Seq[Bson], + options: FindOneAndUpdateOptions + ): SingleObservable[TResult] = + wrapped.findOneAndUpdate(clientSession, filter, update.asJava, options) + + /** + * Drops this collection from the Database. + * + * @return an Observable that indicates when the operation has completed + * [[https://www.mongodb.com/docs/manual/reference/command/drop/ Drop Collection]] + */ + def drop(): SingleObservable[Unit] = wrapped.drop() + + /** + * Drops this collection from the Database. + * + * @param clientSession the client session with which to associate this operation + * @return an Observable that indicates when the operation has completed + * [[https://www.mongodb.com/docs/manual/reference/command/drop/ Drop Collection]] + * @since 2.2 + * @note Requires MongoDB 3.6 or greater + */ + def drop(clientSession: ClientSession): SingleObservable[Unit] = wrapped.drop(clientSession) + + /** + * Drops this collection from the Database. + * + * @param dropCollectionOptions various options for dropping the collection + * @return an Observable that indicates when the operation has completed + * [[https://www.mongodb.com/docs/manual/reference/command/drop/ Drop Collection]] + * @since 4.7 + * @note Requires MongoDB 6.0 or greater + */ + def drop(dropCollectionOptions: DropCollectionOptions): SingleObservable[Unit] = wrapped.drop(dropCollectionOptions) + + /** + * Drops this collection from the Database. + * + * @param clientSession the client session with which to associate this operation + * @param dropCollectionOptions various options for dropping the collection + * @return an Observable that indicates when the operation has completed + * [[https://www.mongodb.com/docs/manual/reference/command/drop/ Drop Collection]] + * @since 4.7 + * @note Requires MongoDB 6.0 or greater + */ + def drop(clientSession: ClientSession, dropCollectionOptions: DropCollectionOptions): SingleObservable[Unit] = + wrapped.drop(clientSession, dropCollectionOptions) + + /** + * Create an Atlas Search index for the collection. + * + * @param indexName the name of the search index to create. + * @param definition the search index mapping definition. + * @return an Observable with the search index name. + * @since 4.11 + * @note Requires MongoDB 6.0 or greater + * @see [[https://www.mongodb.com/docs/manual/reference/command/createSearchIndexes/ Create Search Indexes]] + */ + def createSearchIndex(indexName: String, definition: Bson): SingleObservable[String] = + wrapped.createSearchIndex(indexName, definition) + + /** + * Create an Atlas Search index with `default` name for the collection. + * + * @param definition the search index mapping definition. + * @return an Observable with search index name. + * @since 4.11 + * @note Requires MongoDB 6.0 or greater + * @see [[https://www.mongodb.com/docs/manual/reference/command/createSearchIndexes/ Create Search Indexes]] + */ + def createSearchIndex(definition: Bson): SingleObservable[String] = wrapped.createSearchIndex(definition) + + /** + * Create one or more Atlas Search indexes for the collection. + *

+ * The name can be omitted for a single index, in which case a name will be `default`. + *

+ * + * @param searchIndexModels the search index models. + * @return an Observable with the names of the search indexes + * in the order specified by the given list of [[org.mongodb.scala.model.SearchIndexModel]]s. + * @since 4.11 + * @note Requires MongoDB 6.0 or greater + * @see [[https://www.mongodb.com/docs/manual/reference/command/createSearchIndexes/ Create Search Indexes]] + */ + def createSearchIndexes(searchIndexModels: List[SearchIndexModel]): Observable[String] = + wrapped.createSearchIndexes(searchIndexModels.asJava) + + /** + * Update an Atlas Search index in the collection. + * + * @param indexName the name of the search index to update. + * @param definition the search index mapping definition. + * @return an Observable that indicates when the operation has completed. + * @since 4.11 + * @note Requires MongoDB 6.0 or greater + * @see [[https://www.mongodb.com/docs/manual/reference/command/updateSearchIndex/ Update Search Index]] + */ + def updateSearchIndex(indexName: String, definition: Bson): SingleObservable[Unit] = + wrapped.updateSearchIndex(indexName, definition) + + /** + * Drop an Atlas Search index given its name. + * + * @param indexName the name of the search index to drop. + * @return an Observable that indicates when the operation has completed. + * @since 4.11 + * @note Requires MongoDB 6.0 or greater + * @see [[https://www.mongodb.com/docs/manual/reference/command/dropSearchIndex/ Drop Search Index]] + */ + def dropSearchIndex(indexName: String): SingleObservable[Unit] = wrapped.dropSearchIndex(indexName) + + /** + * Get all Atlas Search indexes in this collection. + * + * @tparam C the target document type of the observable. + * @return the fluent list search indexes interface + * @since 4.11 + * @note Requires MongoDB 6.0 or greater + * @see [[https://www.mongodb.com/docs/manual/reference/operator/aggregation/listSearchIndexes List Search Indexes]] + */ + def listSearchIndexes[C]()(implicit e: C DefaultsTo Document, ct: ClassTag[C]): ListSearchIndexesObservable[C] = + ListSearchIndexesObservable(wrapped.listSearchIndexes(ct)) + + /** + * [[https://www.mongodb.com/docs/manual/reference/command/createIndexes Create Index]] + * @param key an object describing the index key(s), which may not be null. This can be of any type for which a `Codec` is + * registered + * @return an empty Observable that indicates when the operation has completed + */ + def createIndex(key: Bson): SingleObservable[String] = wrapped.createIndex(key) + + /** + * [[https://www.mongodb.com/docs/manual/reference/command/createIndexes Create Index]] + * @param key an object describing the index key(s), which may not be null. This can be of any type for which a `Codec` is + * registered + * @param options the options for the index + * @return an empty Observable that indicates when the operation has completed + */ + def createIndex(key: Bson, options: IndexOptions): SingleObservable[String] = + wrapped.createIndex(key, options) + + /** + * [[https://www.mongodb.com/docs/manual/reference/command/createIndexes Create Index]] + * @param clientSession the client session with which to associate this operation + * @param key an object describing the index key(s), which may not be null. This can be of any type for which a `Codec` is + * registered + * @return an empty Observable that indicates when the operation has completed + * @since 2.2 + * @note Requires MongoDB 3.6 or greater + */ + def createIndex(clientSession: ClientSession, key: Bson): SingleObservable[String] = + wrapped.createIndex(clientSession, key) + + /** + * [[https://www.mongodb.com/docs/manual/reference/command/createIndexes Create Index]] + * @param clientSession the client session with which to associate this operation + * @param key an object describing the index key(s), which may not be null. This can be of any type for which a `Codec` is + * registered + * @param options the options for the index + * @return an empty Observable that indicates when the operation has completed + * @since 2.2 + * @note Requires MongoDB 3.6 or greater + */ + def createIndex(clientSession: ClientSession, key: Bson, options: IndexOptions): SingleObservable[String] = + wrapped.createIndex(clientSession, key, options) + + /** + * Create multiple indexes. + * + * [[https://www.mongodb.com/docs/manual/reference/command/createIndexes Create Index]] + * @param models the list of indexes to create + * @return a Observable with the names of the indexes + */ + def createIndexes(models: Seq[IndexModel]): Observable[String] = wrapped.createIndexes(models.asJava) + + /** + * Create multiple indexes. + * + * [[https://www.mongodb.com/docs/manual/reference/command/createIndexes Create Index]] + * @param models the list of indexes to create + * @param createIndexOptions options to use when creating indexes + * @return a Observable with the names of the indexes + * @since 2.2 + */ + def createIndexes(models: Seq[IndexModel], createIndexOptions: CreateIndexOptions): Observable[String] = + wrapped.createIndexes(models.asJava, createIndexOptions) + + /** + * Create multiple indexes. + * + * [[https://www.mongodb.com/docs/manual/reference/command/createIndexes Create Index]] + * @param clientSession the client session with which to associate this operation + * @param models the list of indexes to create + * @return a Observable with the names of the indexes + * @since 2.2 + * @note Requires MongoDB 3.6 or greater + */ + def createIndexes(clientSession: ClientSession, models: Seq[IndexModel]): Observable[String] = + wrapped.createIndexes(clientSession, models.asJava) + + /** + * Create multiple indexes. + * + * [[https://www.mongodb.com/docs/manual/reference/command/createIndexes Create Index]] + * @param clientSession the client session with which to associate this operation + * @param models the list of indexes to create + * @param createIndexOptions options to use when creating indexes + * @return a Observable with the names of the indexes + * @since 2.2 + * @note Requires MongoDB 3.6 or greater + */ + def createIndexes( + clientSession: ClientSession, + models: Seq[IndexModel], + createIndexOptions: CreateIndexOptions + ): Observable[String] = + wrapped.createIndexes(clientSession, models.asJava, createIndexOptions) + + /** + * Get all the indexes in this collection. + * + * [[https://www.mongodb.com/docs/manual/reference/command/listIndexes/ listIndexes]] + * @tparam C the target document type of the observable. + * @return the fluent list indexes interface + */ + def listIndexes[C]()(implicit e: C DefaultsTo Document, ct: ClassTag[C]): ListIndexesObservable[C] = + ListIndexesObservable(wrapped.listIndexes(ct)) + + /** + * Get all the indexes in this collection. + * + * [[https://www.mongodb.com/docs/manual/reference/command/listIndexes/ listIndexes]] + * @param clientSession the client session with which to associate this operation + * @tparam C the target document type of the observable. + * @return the fluent list indexes interface + * @since 2.2 + * @note Requires MongoDB 3.6 or greater + */ + def listIndexes[C]( + clientSession: ClientSession + )(implicit e: C DefaultsTo Document, ct: ClassTag[C]): ListIndexesObservable[C] = + ListIndexesObservable(wrapped.listIndexes(clientSession, ct)) + + /** + * Drops the given index. + * + * [[https://www.mongodb.com/docs/manual/reference/command/dropIndexes/ Drop Indexes]] + * @param indexName the name of the index to remove + * @return an Observable that indicates when the operation has completed + */ + def dropIndex(indexName: String): SingleObservable[Unit] = wrapped.dropIndex(indexName) + + /** + * Drops the given index. + * + * [[https://www.mongodb.com/docs/manual/reference/command/dropIndexes/ Drop Indexes]] + * @param indexName the name of the index to remove + * @param dropIndexOptions options to use when dropping indexes + * @return an Observable that indicates when the operation has completed + * @since 2.2 + */ + def dropIndex(indexName: String, dropIndexOptions: DropIndexOptions): SingleObservable[Unit] = + wrapped.dropIndex(indexName, dropIndexOptions) + + /** + * Drops the index given the keys used to create it. + * + * @param keys the keys of the index to remove + * @return an Observable that indicates when the operation has completed + */ + def dropIndex(keys: Bson): SingleObservable[Unit] = wrapped.dropIndex(keys) + + /** + * Drops the index given the keys used to create it. + * + * @param keys the keys of the index to remove + * @param dropIndexOptions options to use when dropping indexes + * @return an Observable that indicates when the operation has completed + * @since 2.2 + */ + def dropIndex(keys: Bson, dropIndexOptions: DropIndexOptions): SingleObservable[Unit] = + wrapped.dropIndex(keys, dropIndexOptions) + + /** + * Drops the given index. + * + * [[https://www.mongodb.com/docs/manual/reference/command/dropIndexes/ Drop Indexes]] + * @param clientSession the client session with which to associate this operation + * @param indexName the name of the index to remove + * @return an Observable that indicates when the operation has completed + * @since 2.2 + * @note Requires MongoDB 3.6 or greater + */ + def dropIndex(clientSession: ClientSession, indexName: String): SingleObservable[Unit] = + wrapped.dropIndex(clientSession, indexName) + + /** + * Drops the given index. + * + * [[https://www.mongodb.com/docs/manual/reference/command/dropIndexes/ Drop Indexes]] + * @param clientSession the client session with which to associate this operation + * @param indexName the name of the index to remove + * @param dropIndexOptions options to use when dropping indexes + * @return an Observable that indicates when the operation has completed + * @since 2.2 + * @note Requires MongoDB 3.6 or greater + */ + def dropIndex( + clientSession: ClientSession, + indexName: String, + dropIndexOptions: DropIndexOptions + ): SingleObservable[Unit] = + wrapped.dropIndex(clientSession, indexName, dropIndexOptions) + + /** + * Drops the index given the keys used to create it. + * + * @param clientSession the client session with which to associate this operation + * @param keys the keys of the index to remove + * @return an Observable that indicates when the operation has completed + * @since 2.2 + * @note Requires MongoDB 3.6 or greater + */ + def dropIndex(clientSession: ClientSession, keys: Bson): SingleObservable[Unit] = + wrapped.dropIndex(clientSession, keys) + + /** + * Drops the index given the keys used to create it. + * + * @param clientSession the client session with which to associate this operation + * @param keys the keys of the index to remove + * @param dropIndexOptions options to use when dropping indexes + * @return an Observable that indicates when the operation has completed + * @since 2.2 + * @note Requires MongoDB 3.6 or greater + */ + def dropIndex( + clientSession: ClientSession, + keys: Bson, + dropIndexOptions: DropIndexOptions + ): SingleObservable[Unit] = + wrapped.dropIndex(clientSession, keys, dropIndexOptions) + + /** + * Drop all the indexes on this collection, except for the default on _id. + * + * [[https://www.mongodb.com/docs/manual/reference/command/dropIndexes/ Drop Indexes]] + * @return an Observable that indicates when the operation has completed + */ + def dropIndexes(): SingleObservable[Unit] = wrapped.dropIndexes() + + /** + * Drop all the indexes on this collection, except for the default on _id. + * + * [[https://www.mongodb.com/docs/manual/reference/command/dropIndexes/ Drop Indexes]] + * @param dropIndexOptions options to use when dropping indexes + * @return an Observable that indicates when the operation has completed + * @since 2.2 + */ + def dropIndexes(dropIndexOptions: DropIndexOptions): SingleObservable[Unit] = + wrapped.dropIndexes(dropIndexOptions) + + /** + * Drop all the indexes on this collection, except for the default on _id. + * + * [[https://www.mongodb.com/docs/manual/reference/command/dropIndexes/ Drop Indexes]] + * @param clientSession the client session with which to associate this operation + * @return an Observable that indicates when the operation has completed + * @since 2.2 + * @note Requires MongoDB 3.6 or greater + */ + def dropIndexes(clientSession: ClientSession): SingleObservable[Unit] = + wrapped.dropIndexes(clientSession) + + /** + * Drop all the indexes on this collection, except for the default on _id. + * + * [[https://www.mongodb.com/docs/manual/reference/command/dropIndexes/ Drop Indexes]] + * @param clientSession the client session with which to associate this operation + * @param dropIndexOptions options to use when dropping indexes + * @return an Observable that indicates when the operation has completed + * @since 2.2 + * @note Requires MongoDB 3.6 or greater + */ + def dropIndexes(clientSession: ClientSession, dropIndexOptions: DropIndexOptions): SingleObservable[Unit] = + wrapped.dropIndexes(clientSession, dropIndexOptions) + + /** + * Rename the collection with oldCollectionName to the newCollectionName. + * + * [[https://www.mongodb.com/docs/manual/reference/commands/renameCollection Rename collection]] + * @param newCollectionNamespace the name the collection will be renamed to + * @return an Observable that indicates when the operation has completed + */ + def renameCollection(newCollectionNamespace: MongoNamespace): SingleObservable[Unit] = + wrapped.renameCollection(newCollectionNamespace) + + /** + * Rename the collection with oldCollectionName to the newCollectionName. + * + * [[https://www.mongodb.com/docs/manual/reference/commands/renameCollection Rename collection]] + * @param newCollectionNamespace the name the collection will be renamed to + * @param options the options for renaming a collection + * @return an Observable that indicates when the operation has completed + */ + def renameCollection( + newCollectionNamespace: MongoNamespace, + options: RenameCollectionOptions + ): SingleObservable[Unit] = + wrapped.renameCollection(newCollectionNamespace, options) + + /** + * Rename the collection with oldCollectionName to the newCollectionName. + * + * [[https://www.mongodb.com/docs/manual/reference/commands/renameCollection Rename collection]] + * @param clientSession the client session with which to associate this operation + * @param newCollectionNamespace the name the collection will be renamed to + * @return an Observable that indicates when the operation has completed + * @since 2.2 + * @note Requires MongoDB 3.6 or greater + */ + def renameCollection( + clientSession: ClientSession, + newCollectionNamespace: MongoNamespace + ): SingleObservable[Unit] = + wrapped.renameCollection(clientSession, newCollectionNamespace) + + /** + * Rename the collection with oldCollectionName to the newCollectionName. + * + * [[https://www.mongodb.com/docs/manual/reference/commands/renameCollection Rename collection]] + * @param clientSession the client session with which to associate this operation + * @param newCollectionNamespace the name the collection will be renamed to + * @param options the options for renaming a collection + * @return an Observable that indicates when the operation has completed + * @since 2.2 + * @note Requires MongoDB 3.6 or greater + */ + def renameCollection( + clientSession: ClientSession, + newCollectionNamespace: MongoNamespace, + options: RenameCollectionOptions + ): SingleObservable[Unit] = + wrapped.renameCollection(clientSession, newCollectionNamespace, options) + + /** + * Creates a change stream for this collection. + * + * @tparam C the target document type of the observable. + * @return the change stream observable + * @since 2.2 + * @note Requires MongoDB 3.6 or greater + */ + def watch[C]()(implicit e: C DefaultsTo TResult, ct: ClassTag[C]): ChangeStreamObservable[C] = + ChangeStreamObservable(wrapped.watch(ct)) + + /** + * Creates a change stream for this collection. + * + * @param pipeline the aggregation pipeline to apply to the change stream + * @tparam C the target document type of the observable. + * @return the change stream observable + * @since 2.2 + * @note Requires MongoDB 3.6 or greater + */ + def watch[C](pipeline: Seq[Bson])(implicit e: C DefaultsTo TResult, ct: ClassTag[C]): ChangeStreamObservable[C] = + ChangeStreamObservable(wrapped.watch(pipeline.asJava, ct)) + + /** + * Creates a change stream for this collection. + * + * @param clientSession the client session with which to associate this operation + * @tparam C the target document type of the observable. + * @return the change stream observable + * @since 2.2 + * @note Requires MongoDB 3.6 or greater + */ + def watch[C]( + clientSession: ClientSession + )(implicit e: C DefaultsTo TResult, ct: ClassTag[C]): ChangeStreamObservable[C] = + ChangeStreamObservable(wrapped.watch(clientSession, ct)) + + /** + * Creates a change stream for this collection. + * + * @param clientSession the client session with which to associate this operation + * @param pipeline the aggregation pipeline to apply to the change stream + * @tparam C the target document type of the observable. + * @return the change stream observable + * @since 2.2 + * @note Requires MongoDB 3.6 or greater + */ + def watch[C]( + clientSession: ClientSession, + pipeline: Seq[Bson] + )(implicit e: C DefaultsTo TResult, ct: ClassTag[C]): ChangeStreamObservable[C] = + ChangeStreamObservable(wrapped.watch(clientSession, pipeline.asJava, ct)) + +} + +// scalastyle:on number.of.methods file.size.limit diff --git a/driver-scala/src/main/scala/org/mongodb/scala/MongoCompressor.scala b/driver-scala/src/main/scala/org/mongodb/scala/MongoCompressor.scala new file mode 100644 index 00000000000..6ef3348f7f0 --- /dev/null +++ b/driver-scala/src/main/scala/org/mongodb/scala/MongoCompressor.scala @@ -0,0 +1,52 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala + +import com.mongodb.{ MongoCompressor => JMongoCompressor } + +/** + * Metadata describing a compressor to use for sending and receiving messages to a MongoDB server. + * + * @since 2.2 + * @note Requires MongoDB 3.4 or greater + */ +object MongoCompressor { + + /** + * Create an instance for snappy compression. + * + * @return A compressor based on the snappy compression algorithm + */ + def createSnappyCompressor: MongoCompressor = JMongoCompressor.createSnappyCompressor() + + /** + * Create an instance for zlib compression. + * + * @return A compressor based on the zlib compression algorithm + * @note Requires MongoDB 3.6 or greater + */ + def createZlibCompressor: MongoCompressor = JMongoCompressor.createZlibCompressor() + + /** + * Create an instance for zstd compression. + * + * @return A compressor based on the zstd compression algorithm + * @note Requires MongoDB 4.2 or greater + * @since 4.0 + */ + def createZstdCompressor: MongoCompressor = JMongoCompressor.createZstdCompressor() +} diff --git a/driver-scala/src/main/scala/org/mongodb/scala/MongoCredential.scala b/driver-scala/src/main/scala/org/mongodb/scala/MongoCredential.scala new file mode 100644 index 00000000000..184fa1e3f6d --- /dev/null +++ b/driver-scala/src/main/scala/org/mongodb/scala/MongoCredential.scala @@ -0,0 +1,211 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala + +import com.mongodb.{ MongoCredential => JMongoCredential } + +/** + * Represents credentials to authenticate to a MongoDB server, as well as the source of the credentials and the authentication mechanism + * to use. + * + * @since 1.0 + */ +object MongoCredential { + + /** + * The GSSAPI mechanism. See the RFC. + * + @see [[https://www.mongodb.com/docs/manual/core/authentication/#kerberos-authentication GSSAPI]] + * @since 4.0 + */ + val GSSAPI_MECHANISM: String = JMongoCredential.GSSAPI_MECHANISM + + /** + * The PLAIN mechanism. See the RFC. + * + @see [[https://www.mongodb.com/docs/manual/core/authentication/#ldap-proxy-authority-authentication PLAIN]] + * @since 4.0 + */ + val PLAIN_MECHANISM: String = JMongoCredential.PLAIN_MECHANISM + + /** + * The MongoDB X.509 + * + @see [[https://www.mongodb.com/docs/manual/core/authentication/#x-509-certificate-authentication X-509]] + * @since 4.0 + */ + val MONGODB_X509_MECHANISM: String = JMongoCredential.MONGODB_X509_MECHANISM + + /** + * The SCRAM-SHA-1 Mechanism. + * + * @note Requires MongoDB 3.0 or greater + @see [[https://www.mongodb.com/docs/manual/core/authentication/#authentication-scram-sha-1 SCRAM-SHA-1]] + * @since 4.0 + */ + val SCRAM_SHA_1_MECHANISM: String = JMongoCredential.SCRAM_SHA_1_MECHANISM + + /** + * The SCRAM-SHA-256 Mechanism. + * + * @since 3.8 + * @note Requires MongoDB 4.0 or greater + @see [[https://www.mongodb.com/docs/manual/core/authentication/#authentication-scram-sha-256 SCRAM-SHA-256]] + */ + val SCRAM_SHA_256_MECHANISM: String = JMongoCredential.SCRAM_SHA_256_MECHANISM + + /** + * Mechanism property key for overriding the service name for GSSAPI authentication. + * + * @see #createGSSAPICredential(String) + * @see #withMechanismProperty(String, Object) + * @since 4.0 + */ + val SERVICE_NAME_KEY: String = JMongoCredential.SERVICE_NAME_KEY + + /** + * Mechanism property key for specifying whether to canonicalize the host name for GSSAPI authentication. + * + * @see #createGSSAPICredential(String) + * @see #withMechanismProperty(String, Object) + * @since 4.0 + */ + val CANONICALIZE_HOST_NAME_KEY: String = JMongoCredential.CANONICALIZE_HOST_NAME_KEY + + /** + * Mechanism property key for overriding the SaslClient properties for GSSAPI authentication. + * + * The value of this property must be a `Map[String, Object]`. In most cases there is no need to set this mechanism property. + * But if an application does: + * + * - Generally it must set the `javax.security.sasl.Sasl#CREDENTIALS` property to an instance of `org.ietf.jgss.GSSCredential` + * - It's recommended that it set the `javax.security.sasl.Sasl#MAX_BUFFER` property to "0" to ensure compatibility with all + * versions of MongoDB. + * + * @see #createGSSAPICredential(String) + * @see #withMechanismProperty(String, Object) + * @see javax.security.sasl.Sasl + * @see javax.security.sasl.Sasl#CREDENTIALS + * @see javax.security.sasl.Sasl#MAX_BUFFER + * @since 4.0 + */ + val JAVA_SASL_CLIENT_PROPERTIES_KEY: String = JMongoCredential.JAVA_SASL_CLIENT_PROPERTIES_KEY + + /** + * Mechanism property key for overriding the `javax.security.auth.Subject` under which GSSAPI authentication executes. + * + * @see #createGSSAPICredential(String) + * @see #withMechanismProperty(String, Object) + * @since 4.0 + */ + val JAVA_SUBJECT_KEY: String = JMongoCredential.JAVA_SUBJECT_KEY + + /** + * Creates a MongoCredential instance with an unspecified mechanism. The client will negotiate the best mechanism based on the + * version of the server that the client is authenticating to. If the server version is 3.0 or higher, + * the driver will authenticate using the SCRAM-SHA-1 mechanism. Otherwise, the driver will authenticate using the MONGODB_CR + * mechanism. + * + * + * @param userName the user name + * @param database the database where the user is defined + * @param password the user's password + * @return the credential + * + * @see [[https://www.mongodb.com/docs/manual/core/authentication/#mongodb-cr-authentication MONGODB-CR]] + * @see [[https://www.mongodb.com/docs/manual/core/authentication/#authentication-scram-sha-1 SCRAM-SHA-1]] + */ + def createCredential(userName: String, database: String, password: Array[Char]): JMongoCredential = + JMongoCredential.createCredential(userName, database, password) + + /** + * Creates a MongoCredential instance for the SCRAM-SHA-1 SASL mechanism. Use this method only if you want to ensure that + * the driver uses the MONGODB_CR mechanism regardless of whether the server you are connecting to supports a more secure + * authentication mechanism. Otherwise use the [[createCredential]] method to allow the driver to + * negotiate the best mechanism based on the server version. + * + * + * @param userName the non-null user name + * @param source the source where the user is defined. + * @param password the non-null user password + * @return the credential + * @see [[createCredential]] + * + * @see [[https://www.mongodb.com/docs/manual/core/authentication/#authentication-scram-sha-1 SCRAM-SHA-1]] + */ + def createScramSha1Credential(userName: String, source: String, password: Array[Char]): JMongoCredential = + JMongoCredential.createScramSha1Credential(userName, source, password) + + /** + * Creates a MongoCredential instance for the SCRAM-SHA-256 SASL mechanism. + * + * + * @param userName the non-null user name + * @param source the source where the user is defined. + * @param password the non-null user password + * @return the credential + * @note Requires MongoDB 4.0 or greater + * @see [[https://www.mongodb.com/docs/manual/core/authentication/#authentication-scram-sha-256 SCRAM-SHA-256]] + */ + def createScramSha256Credential(userName: String, source: String, password: Array[Char]): JMongoCredential = + JMongoCredential.createScramSha256Credential(userName, source, password) + + /** + * Creates a MongoCredential instance for the MongoDB X.509 protocol. + * + * @param userName the user name + * @return the credential + * @see [[https://www.mongodb.com/docs/manual/core/authentication/#x-509-certificate-authentication X-509]] + */ + def createMongoX509Credential(userName: String): JMongoCredential = + JMongoCredential.createMongoX509Credential(userName) + + /** + * Creates a MongoCredential instance for the MongoDB X.509 protocol where the distinguished subject name of the client certificate + * acts as the userName. + * + * @return the credential + * @see [[https://www.mongodb.com/docs/manual/core/authentication/#x-509-certificate-authentication X-509]] + * @since 1.2 + * @note Requires MongoDB 3.4 or greater + */ + def createMongoX509Credential(): JMongoCredential = JMongoCredential.createMongoX509Credential() + + /** + * Creates a MongoCredential instance for the PLAIN SASL mechanism. + * + * @param userName the non-null user name + * @param source the source where the user is defined. This can be either `\$external` or the name of a database. + * @param password the non-null user password + * @return the credential + * @see [[https://www.mongodb.com/docs/manual/core/authentication/#ldap-proxy-authority-authentication PLAIN]] + */ + def createPlainCredential(userName: String, source: String, password: Array[Char]): JMongoCredential = + JMongoCredential.createPlainCredential(userName, source, password) + + /** + * Creates a MongoCredential instance for the GSSAPI SASL mechanism. To override the default service name of `mongodb`, add a + * mechanism property with the name `SERVICE_NAME`. To force canonicalization of the host name prior to authentication, add a + * mechanism property with the name `CANONICALIZE_HOST_NAME` with the value `true`. + * + * @param userName the non-null user name + * @return the credential + * @see [[https://www.mongodb.com/docs/manual/core/authentication/#kerberos-authentication GSSAPI]] + */ + def createGSSAPICredential(userName: String): JMongoCredential = JMongoCredential.createGSSAPICredential(userName) + +} diff --git a/driver-scala/src/main/scala/org/mongodb/scala/MongoDatabase.scala b/driver-scala/src/main/scala/org/mongodb/scala/MongoDatabase.scala new file mode 100644 index 00000000000..54c48574c72 --- /dev/null +++ b/driver-scala/src/main/scala/org/mongodb/scala/MongoDatabase.scala @@ -0,0 +1,510 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala + +import com.mongodb.annotations.{ Alpha, Reason } +import com.mongodb.client.model.{ CreateCollectionOptions, CreateViewOptions } +import com.mongodb.reactivestreams.client.{ MongoDatabase => JMongoDatabase } +import org.bson.codecs.configuration.CodecRegistry +import org.mongodb.scala.bson.DefaultHelper.DefaultsTo +import org.mongodb.scala.bson.conversions.Bson + +import scala.collection.JavaConverters._ +import scala.concurrent.duration.{ Duration, MILLISECONDS } +import scala.reflect.ClassTag + +/** + * The MongoDatabase representation. + * + * @param wrapped the underlying java MongoDatabase + * @since 1.0 + */ +case class MongoDatabase(private[scala] val wrapped: JMongoDatabase) { + + /** + * Gets the name of the database. + * + * @return the database name + */ + lazy val name: String = wrapped.getName + + /** + * Get the codec registry for the MongoDatabase. + * + * @return the { @link org.bson.codecs.configuration.CodecRegistry} + */ + lazy val codecRegistry: CodecRegistry = wrapped.getCodecRegistry + + /** + * Get the read preference for the MongoDatabase. + * + * @return the { @link com.mongodb.ReadPreference} + */ + lazy val readPreference: ReadPreference = wrapped.getReadPreference + + /** + * Get the write concern for the MongoDatabase. + * + * @return the { @link com.mongodb.WriteConcern} + */ + lazy val writeConcern: WriteConcern = wrapped.getWriteConcern + + /** + * Get the read concern for the MongoDatabase. + * + * @return the [[ReadConcern]] + * @since 1.1 + */ + lazy val readConcern: ReadConcern = wrapped.getReadConcern + + /** + * The time limit for the full execution of an operation. + * + * If not null the following deprecated options will be ignored: `waitQueueTimeoutMS`, `socketTimeoutMS`, + * `wTimeoutMS`, `maxTimeMS` and `maxCommitTimeMS`. + * + * - `null` means that the timeout mechanism for operations will defer to using: + * - `waitQueueTimeoutMS`: The maximum wait time in milliseconds that a thread may wait for a connection to become available + * - `socketTimeoutMS`: How long a send or receive on a socket can take before timing out. + * - `wTimeoutMS`: How long the server will wait for the write concern to be fulfilled before timing out. + * - `maxTimeMS`: The time limit for processing operations on a cursor. + * See: [cursor.maxTimeMS](https://docs.mongodb.com/manual/reference/method/cursor.maxTimeMS"). + * - `maxCommitTimeMS`: The maximum amount of time to allow a single `commitTransaction` command to execute. + * - `0` means infinite timeout. + * - `> 0` The time limit to use for the full execution of an operation. + * + * @return the optional timeout duration + * @since 5.2 + */ + @Alpha(Array(Reason.CLIENT)) + lazy val timeout: Option[Duration] = + Option.apply(wrapped.getTimeout(MILLISECONDS)).map(t => Duration(t, MILLISECONDS)) + + /** + * Create a new MongoDatabase instance with a different codec registry. + * + * The { @link CodecRegistry} configured by this method is effectively treated by the driver as an + * instance of { @link CodecProvider}, which { @link CodecRegistry} extends. + * So there is no benefit to defining a class that implements { @link CodecRegistry}. Rather, an + * application should always create { @link CodecRegistry} instances using the factory methods in + * { @link CodecRegistries}. + * + * @param codecRegistry the new { @link org.bson.codecs.configuration.CodecRegistry} for the collection + * @return a new MongoDatabase instance with the different codec registry + * @see CodecRegistries + */ + def withCodecRegistry(codecRegistry: CodecRegistry): MongoDatabase = + MongoDatabase(wrapped.withCodecRegistry(codecRegistry)) + + /** + * Create a new MongoDatabase instance with a different read preference. + * + * @param readPreference the new { @link com.mongodb.ReadPreference} for the collection + * @return a new MongoDatabase instance with the different readPreference + */ + def withReadPreference(readPreference: ReadPreference): MongoDatabase = + MongoDatabase(wrapped.withReadPreference(readPreference)) + + /** + * Create a new MongoDatabase instance with a different write concern. + * + * @param writeConcern the new { @link com.mongodb.WriteConcern} for the collection + * @return a new MongoDatabase instance with the different writeConcern + */ + def withWriteConcern(writeConcern: WriteConcern): MongoDatabase = + MongoDatabase(wrapped.withWriteConcern(writeConcern)) + + /** + * Create a new MongoDatabase instance with a different read concern. + * + * @param readConcern the new [[ReadConcern]] for the collection + * @return a new MongoDatabase instance with the different ReadConcern + * @since 1.1 + */ + def withReadConcern(readConcern: ReadConcern): MongoDatabase = + MongoDatabase(wrapped.withReadConcern(readConcern)) + + /** + * Sets the time limit for the full execution of an operation. + * + * - `0` means infinite timeout. + * - `> 0` The time limit to use for the full execution of an operation. + * + * @param timeout the timeout, which must be greater than or equal to 0 + * @return a new MongoDatabase instance with the set time limit for operations + * @since 5.2 + */ + @Alpha(Array(Reason.CLIENT)) + def withTimeout(timeout: Duration): MongoDatabase = + MongoDatabase(wrapped.withTimeout(timeout.toMillis, MILLISECONDS)) + + /** + * Gets a collection, with a specific default document class. + * + * @param collectionName the name of the collection to return + * @tparam TResult the type of the class to use instead of [[Document]]. + * @return the collection + */ + def getCollection[TResult]( + collectionName: String + )(implicit e: TResult DefaultsTo Document, ct: ClassTag[TResult]): MongoCollection[TResult] = + MongoCollection(wrapped.getCollection(collectionName, ct)) + + /** + * Executes command in the context of the current database using the primary server. + * + * Note: The behavior of `runCommand` is undefined if the provided command document includes a `maxTimeMS` field and the + * `timeoutMS` setting has been set. + * + * @param command the command to be run + * @tparam TResult the type of the class to use instead of [[Document]]. + * @return a Observable containing the command result + */ + def runCommand[TResult]( + command: Bson + )(implicit e: TResult DefaultsTo Document, ct: ClassTag[TResult]): SingleObservable[TResult] = + wrapped.runCommand[TResult](command, ct) + + /** + * Executes command in the context of the current database. + * + * Note: The behavior of `runCommand` is undefined if the provided command document includes a `maxTimeMS` field and the + * `timeoutMS` setting has been set. + * + * @param command the command to be run + * @param readPreference the [[ReadPreference]] to be used when executing the command + * @tparam TResult the type of the class to use instead of [[Document]]. + * @return a Observable containing the command result + */ + def runCommand[TResult](command: Bson, readPreference: ReadPreference)( + implicit e: TResult DefaultsTo Document, + ct: ClassTag[TResult] + ): SingleObservable[TResult] = + wrapped.runCommand(command, readPreference, ct) + + /** + * Executes command in the context of the current database using the primary server. + * + * Note: The behavior of `runCommand` is undefined if the provided command document includes a `maxTimeMS` field and the + * `timeoutMS` setting has been set. + * + * @param clientSession the client session with which to associate this operation + * @param command the command to be run + * @tparam TResult the type of the class to use instead of [[Document]]. + * @return a Observable containing the command result + * @since 2.2 + * @note Requires MongoDB 3.6 or greater + */ + def runCommand[TResult](clientSession: ClientSession, command: Bson)( + implicit e: TResult DefaultsTo Document, + ct: ClassTag[TResult] + ): SingleObservable[TResult] = + wrapped.runCommand[TResult](clientSession, command, ct) + + /** + * Executes command in the context of the current database. + * + * Note: The behavior of `runCommand` is undefined if the provided command document includes a `maxTimeMS` field and the + * `timeoutMS` setting has been set. + * + * @param command the command to be run + * @param readPreference the [[ReadPreference]] to be used when executing the command + * @tparam TResult the type of the class to use instead of [[Document]]. + * @return a Observable containing the command result + * @since 2.2 + * @note Requires MongoDB 3.6 or greater + */ + def runCommand[TResult](clientSession: ClientSession, command: Bson, readPreference: ReadPreference)( + implicit e: TResult DefaultsTo Document, + ct: ClassTag[TResult] + ): SingleObservable[TResult] = + wrapped.runCommand(clientSession, command, readPreference, ct) + + /** + * Drops this database. + * + * [[https://www.mongodb.com/docs/manual/reference/commands/dropDatabase/#dbcmd.dropDatabase Drop database]] + * @return a Observable identifying when the database has been dropped + */ + def drop(): SingleObservable[Unit] = wrapped.drop() + + /** + * Drops this database. + * + * [[https://www.mongodb.com/docs/manual/reference/commands/dropDatabase/#dbcmd.dropDatabase Drop database]] + * @param clientSession the client session with which to associate this operation + * @return a Observable identifying when the database has been dropped + * @since 2.2 + * @note Requires MongoDB 3.6 or greater + */ + def drop(clientSession: ClientSession): SingleObservable[Unit] = wrapped.drop(clientSession) + + /** + * Gets the names of all the collections in this database. + * + * [[https://www.mongodb.com/docs/manual/reference/command/listCollections listCollections]] + * @return a Observable with all the names of all the collections in this database + */ + def listCollectionNames(): ListCollectionNamesObservable = + ListCollectionNamesObservable(wrapped.listCollectionNames()) + + /** + * Finds all the collections in this database. + * + * [[https://www.mongodb.com/docs/manual/reference/command/listCollections listCollections]] + * @tparam TResult the target document type of the iterable. + * @return the fluent list collections interface + */ + def listCollections[TResult]()( + implicit e: TResult DefaultsTo Document, + ct: ClassTag[TResult] + ): ListCollectionsObservable[TResult] = + ListCollectionsObservable(wrapped.listCollections(ct)) + + /** + * Gets the names of all the collections in this database. + * + * [[https://www.mongodb.com/docs/manual/reference/command/listCollections listCollections]] + * @param clientSession the client session with which to associate this operation + * @return a Observable with all the names of all the collections in this database + * @since 2.2 + * @note Requires MongoDB 3.6 or greater + */ + def listCollectionNames(clientSession: ClientSession): ListCollectionNamesObservable = + ListCollectionNamesObservable(wrapped.listCollectionNames(clientSession)) + + /** + * Finds all the collections in this database. + * + * [[https://www.mongodb.com/docs/manual/reference/command/listCollections listCollections]] + * @param clientSession the client session with which to associate this operation + * @tparam TResult the target document type of the iterable. + * @return the fluent list collections interface + * @since 2.2 + * @note Requires MongoDB 3.6 or greater + */ + def listCollections[TResult](clientSession: ClientSession)( + implicit e: TResult DefaultsTo Document, + ct: ClassTag[TResult] + ): ListCollectionsObservable[TResult] = + ListCollectionsObservable(wrapped.listCollections(clientSession, ct)) + + /** + * Create a new collection with the given name. + * + * [[https://www.mongodb.com/docs/manual/reference/commands/create Create Command]] + * @param collectionName the name for the new collection to create + * @return a Observable identifying when the collection has been created + */ + def createCollection(collectionName: String): SingleObservable[Unit] = + wrapped.createCollection(collectionName) + + /** + * Create a new collection with the selected options + * + * [[https://www.mongodb.com/docs/manual/reference/commands/create Create Command]] + * @param collectionName the name for the new collection to create + * @param options various options for creating the collection + * @return a Observable identifying when the collection has been created + */ + def createCollection(collectionName: String, options: CreateCollectionOptions): SingleObservable[Unit] = + wrapped.createCollection(collectionName, options) + + /** + * Create a new collection with the given name. + * + * [[https://www.mongodb.com/docs/manual/reference/commands/create Create Command]] + * @param clientSession the client session with which to associate this operation + * @param collectionName the name for the new collection to create + * @return a Observable identifying when the collection has been created + * @since 2.2 + * @note Requires MongoDB 3.6 or greater + */ + def createCollection(clientSession: ClientSession, collectionName: String): SingleObservable[Unit] = + wrapped.createCollection(clientSession, collectionName) + + /** + * Create a new collection with the selected options + * + * [[https://www.mongodb.com/docs/manual/reference/commands/create Create Command]] + * @param clientSession the client session with which to associate this operation + * @param collectionName the name for the new collection to create + * @param options various options for creating the collection + * @return a Observable identifying when the collection has been created + * @since 2.2 + * @note Requires MongoDB 3.6 or greater + */ + def createCollection( + clientSession: ClientSession, + collectionName: String, + options: CreateCollectionOptions + ): SingleObservable[Unit] = + wrapped.createCollection(clientSession, collectionName, options) + + /** + * Creates a view with the given name, backing collection/view name, and aggregation pipeline that defines the view. + * + * [[https://www.mongodb.com/docs/manual/reference/commands/create Create Command]] + * @param viewName the name of the view to create + * @param viewOn the backing collection/view for the view + * @param pipeline the pipeline that defines the view + * @since 1.2 + * @note Requires MongoDB 3.4 or greater + */ + def createView(viewName: String, viewOn: String, pipeline: Seq[Bson]): SingleObservable[Unit] = + wrapped.createView(viewName, viewOn, pipeline.asJava) + + /** + * Creates a view with the given name, backing collection/view name, aggregation pipeline, and options that defines the view. + * + * [[https://www.mongodb.com/docs/manual/reference/commands/create Create Command]] + * @param viewName the name of the view to create + * @param viewOn the backing collection/view for the view + * @param pipeline the pipeline that defines the view + * @param createViewOptions various options for creating the view + * @since 1.2 + * @note Requires MongoDB 3.4 or greater + */ + def createView( + viewName: String, + viewOn: String, + pipeline: Seq[Bson], + createViewOptions: CreateViewOptions + ): SingleObservable[Unit] = + wrapped.createView(viewName, viewOn, pipeline.asJava, createViewOptions) + + /** + * Creates a view with the given name, backing collection/view name, and aggregation pipeline that defines the view. + * + * [[https://www.mongodb.com/docs/manual/reference/commands/create Create Command]] + * @param clientSession the client session with which to associate this operation + * @param viewName the name of the view to create + * @param viewOn the backing collection/view for the view + * @param pipeline the pipeline that defines the view + * @since 2.2 + * @note Requires MongoDB 3.6 or greater + */ + def createView( + clientSession: ClientSession, + viewName: String, + viewOn: String, + pipeline: Seq[Bson] + ): SingleObservable[Unit] = + wrapped.createView(clientSession, viewName, viewOn, pipeline.asJava) + + /** + * Creates a view with the given name, backing collection/view name, aggregation pipeline, and options that defines the view. + * + * [[https://www.mongodb.com/docs/manual/reference/commands/create Create Command]] + * @param clientSession the client session with which to associate this operation + * @param viewName the name of the view to create + * @param viewOn the backing collection/view for the view + * @param pipeline the pipeline that defines the view + * @param createViewOptions various options for creating the view + * @since 2.2 + * @note Requires MongoDB 3.6 or greater + */ + def createView( + clientSession: ClientSession, + viewName: String, + viewOn: String, + pipeline: Seq[Bson], + createViewOptions: CreateViewOptions + ): SingleObservable[Unit] = + wrapped.createView(clientSession, viewName, viewOn, pipeline.asJava, createViewOptions) + + /** + * Creates a change stream for this collection. + * + * @tparam C the target document type of the observable. + * @return the change stream observable + * @since 2.4 + * @note Requires MongoDB 4.0 or greater + */ + def watch[C]()(implicit e: C DefaultsTo Document, ct: ClassTag[C]): ChangeStreamObservable[C] = + ChangeStreamObservable(wrapped.watch(ct)) + + /** + * Creates a change stream for this collection. + * + * @param pipeline the aggregation pipeline to apply to the change stream + * @tparam C the target document type of the observable. + * @return the change stream observable + * @since 2.4 + * @note Requires MongoDB 4.0 or greater + */ + def watch[C](pipeline: Seq[Bson])(implicit e: C DefaultsTo Document, ct: ClassTag[C]): ChangeStreamObservable[C] = + ChangeStreamObservable(wrapped.watch(pipeline.asJava, ct)) + + /** + * Creates a change stream for this collection. + * + * @param clientSession the client session with which to associate this operation + * @tparam C the target document type of the observable. + * @return the change stream observable + * @since 2.4 + * @note Requires MongoDB 4.0 or greater + */ + def watch[C]( + clientSession: ClientSession + )(implicit e: C DefaultsTo Document, ct: ClassTag[C]): ChangeStreamObservable[C] = + ChangeStreamObservable(wrapped.watch(clientSession, ct)) + + /** + * Creates a change stream for this collection. + * + * @param clientSession the client session with which to associate this operation + * @param pipeline the aggregation pipeline to apply to the change stream + * @tparam C the target document type of the observable. + * @return the change stream observable + * @since 2.4 + * @note Requires MongoDB 4.0 or greater + */ + def watch[C]( + clientSession: ClientSession, + pipeline: Seq[Bson] + )(implicit e: C DefaultsTo Document, ct: ClassTag[C]): ChangeStreamObservable[C] = + ChangeStreamObservable(wrapped.watch(clientSession, pipeline.asJava, ct)) + + /** + * Aggregates documents according to the specified aggregation pipeline. + * + * @param pipeline the aggregate pipeline + * @return a Observable containing the result of the aggregation operation + * [[https://www.mongodb.com/docs/manual/aggregation/ Aggregation]] + * @since 2.6 + * @note Requires MongoDB 3.6 or greater + */ + def aggregate[C](pipeline: Seq[Bson])(implicit e: C DefaultsTo Document, ct: ClassTag[C]): AggregateObservable[C] = + AggregateObservable(wrapped.aggregate[C](pipeline.asJava, ct)) + + /** + * Aggregates documents according to the specified aggregation pipeline. + * + * @param clientSession the client session with which to associate this operation + * @param pipeline the aggregate pipeline + * @return a Observable containing the result of the aggregation operation + * [[https://www.mongodb.com/docs/manual/aggregation/ Aggregation]] + * @since 2.6 + * @note Requires MongoDB 3.6 or greater + */ + def aggregate[C]( + clientSession: ClientSession, + pipeline: Seq[Bson] + )(implicit e: C DefaultsTo Document, ct: ClassTag[C]): AggregateObservable[C] = + AggregateObservable(wrapped.aggregate[C](clientSession, pipeline.asJava, ct)) +} diff --git a/driver-scala/src/main/scala/org/mongodb/scala/MongoDriverInformation.scala b/driver-scala/src/main/scala/org/mongodb/scala/MongoDriverInformation.scala new file mode 100644 index 00000000000..249b4885522 --- /dev/null +++ b/driver-scala/src/main/scala/org/mongodb/scala/MongoDriverInformation.scala @@ -0,0 +1,50 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala + +import com.mongodb.{ MongoDriverInformation => JMongoDriverInformation } + +/** + * The options regarding collation support in MongoDB 3.4+ + * + * @note Requires MongoDB 3.4 or greater + * @since 1.2 + */ +object MongoDriverInformation { + + /** + * Creates a builder for creating the MongoDriverInformation. + * + * @return a new Builder for creating the MongoDriverInformation. + */ + def builder(): Builder = JMongoDriverInformation.builder() + + /** + * Creates a builder for creating the MongoDriverInformation. + * + * @param mongoDriverInformation existing MongoDriverInformation to be extended. + * @return a new Builder for creating the MongoDriverInformation. + */ + def builder(mongoDriverInformation: MongoDriverInformation): Builder = + JMongoDriverInformation.builder(mongoDriverInformation) + + /** + * MongoDriverInformation builder type + */ + type Builder = JMongoDriverInformation.Builder + +} diff --git a/driver-scala/src/main/scala/org/mongodb/scala/MongoNamespace.scala b/driver-scala/src/main/scala/org/mongodb/scala/MongoNamespace.scala new file mode 100644 index 00000000000..a21e6d9b92c --- /dev/null +++ b/driver-scala/src/main/scala/org/mongodb/scala/MongoNamespace.scala @@ -0,0 +1,30 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala + +import com.mongodb.{ MongoNamespace => JMongoNamespace } + +/** + * A companion object for MongoNamespace + * + * @since 1.0 + */ +object MongoNamespace { + def apply(namespace: String): JMongoNamespace = new JMongoNamespace(namespace) + def apply(databaseName: String, collectionName: String): JMongoNamespace = + new JMongoNamespace(databaseName, collectionName) +} diff --git a/driver-scala/src/main/scala/org/mongodb/scala/Observable.scala b/driver-scala/src/main/scala/org/mongodb/scala/Observable.scala new file mode 100644 index 00000000000..22fada878eb --- /dev/null +++ b/driver-scala/src/main/scala/org/mongodb/scala/Observable.scala @@ -0,0 +1,478 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala + +import org.mongodb.scala.internal._ +import org.reactivestreams.{ Publisher, Subscriber } + +import scala.collection.mutable.ListBuffer +import scala.concurrent.{ ExecutionContext, Future, Promise } +import scala.util.Try + +/** + * A companion object for [[Observable]] + */ +object Observable { + + /** + * Creates an Observable from an Iterable. + * + * Convenient for testing and or debugging. + * + * @param from the iterable to create the observable from + * @tparam A the type of Iterable + * @return an Observable that emits each item from the Iterable + */ + def apply[A](from: Iterable[A]): Observable[A] = IterableObservable[A](from) + +} + +/** + * A `Observable` represents a MongoDB operation and implements the `Publisher` interface. + * + * As such it is a provider of a potentially unbounded number of sequenced elements, publishing them according to the demand received + * from its [[Observer]](s). + * + * Extends the `Publisher` interface and adds helpers to make Observables composable and simple to Subscribe to. + * + * Special parameterizations: + * + * - `Observable[Unit]` must emit exactly one item by signalling [[Observer.onNext]] + * if it terminates successfully by signalling [[Observer.onComplete]]. + * - `Observable[Void]` cannot emit an item. It is not exposed by the driver API because it is not convenient to work with in Scala. + * + * @define forComprehensionExamples + * Example: + * + * {{{ + * def f = Observable(1 to 10) + * def g = Observable(100 to 100) + * val h = for { + * x: Int <- f // returns Observable(1 to 10) + * y: Int <- g // returns Observable(100 to 100) + * } yield x + y + * }}} + * + * is translated to: + * + * {{{ + * f flatMap { (x: Int) => g map { (y: Int) => x + y } } + * }}} + * + * @tparam T the type of element signaled. + */ +trait Observable[T] extends Publisher[T] { + + /** + * Request `Observable` to start streaming data. + * + * This is a "factory method" and can be called multiple times, each time starting a new [[Subscription]]. + * Each `Subscription` will work for only a single [[Observer]]. + * + * If the `Observable` rejects the subscription attempt or otherwise fails it will signal the error via [[Observer.onError]]. + * + * @param observer the `Observer` that will consume signals from this `Observable` + */ + def subscribe(observer: Observer[_ >: T]): Unit + + /** + * Handles the automatic boxing of a Java `Observable` so it conforms to the interface. + * + * @note Users should not have to implement this method but rather use the Scala `Observable`. + * @param observer the `Observer` that will consume signals from this `Observable` + */ + override def subscribe(observer: Subscriber[_ >: T]): Unit = subscribe(BoxedSubscriber[T](observer)) + + /** + * Subscribes to the [[Observable]] and requests `Long.MaxValue`. + * + * @param doOnNext anonymous function to apply to each emitted element. + */ + def subscribe(doOnNext: T => Any): Unit = subscribe(doOnNext, t => t) + + /** + * Subscribes to the [[Observable]] and requests `Long.MaxValue`. + * + * @param doOnNext anonymous function to apply to each emitted element. + * @param doOnError anonymous function to apply if there is an error. + */ + def subscribe(doOnNext: T => Any, doOnError: Throwable => Any): Unit = subscribe(doOnNext, doOnError, () => ()) + + /** + * Subscribes to the [[Observable]] and requests `Long.MaxValue`. + * + * @param doOnError anonymous function to apply if there is an error. + * @param doOnComplete anonymous function to apply on completion. + */ + def subscribe(doOnError: Throwable => Any, doOnComplete: () => Any): Unit = subscribe(r => r, doOnError, doOnComplete) + + /** + * Subscribes to the [[Observable]] and requests `Long.MaxValue`. + * + * Uses the default or overridden `onNext`, `onError`, `onComplete` partial functions. + * + * @param doOnNext anonymous function to apply to each emitted element. + * @param doOnError anonymous function to apply if there is an error. + * @param doOnComplete anonymous function to apply on completion. + */ + def subscribe(doOnNext: T => Any, doOnError: Throwable => Any, doOnComplete: () => Any): Unit = { + subscribe(new Observer[T] { + override def onSubscribe(subscription: Subscription): Unit = subscription.request(Long.MaxValue) + + override def onNext(tResult: T): Unit = doOnNext(tResult) + + override def onError(throwable: Throwable): Unit = doOnError(throwable) + + override def onComplete(): Unit = doOnComplete() + + }) + } + + /* Monadic operations */ + + /** + * Applies a function applied to each emitted result. + * + * Automatically requests all results + * + * @param doOnEach the anonymous function applied to each emitted item + * @tparam U the resulting type after the transformation + */ + def foreach[U](doOnEach: T => U): Unit = subscribe(doOnEach) + + /** + * Creates a new Observable by applying the `resultFunction` function to each emitted result. + * If there is an error and `onError` is called the `errorFunction` function is applied to the failed result. + * + * @param mapFunction function that transforms a each result of the receiver and passes the result to the returned Observable + * @param errorMapFunction function that transforms a failure of the receiver into a failure of the returned observer + * @tparam S the resulting type of each item in the Observable + * @return an Observable with transformed results and / or error. + */ + def transform[S](mapFunction: T => S, errorMapFunction: Throwable => Throwable): Observable[S] = + MapObservable(this, mapFunction, errorMapFunction) + + /** + * Creates a new Observable by applying a function to each emitted result of the [[Observable]]. + * If the Observable calls errors then then the new Observable will also contain this exception. + * + * $forComprehensionExamples + * + * @param mapFunction function that transforms a each result of the receiver and passes the result to the returned Observable + * @tparam S the resulting type of each item in the Observable + * @return an Observable with transformed results and / or error. + */ + def map[S](mapFunction: T => S): Observable[S] = MapObservable(this, mapFunction) + + /** + * Creates a new Observable by applying a function to each emitted result of the [[Observable]]. + * If the Observable calls errors then then the new Observable will also contain this exception. + * + * As each emitted item passed to `onNext` returns an Observable, we tightly control the requests to the parent Observable. + * The requested amount is then passed to the child Observable and only when that is completed does the parent become available for + * requesting more data. + * + * $forComprehensionExamples + * + * @param mapFunction function that transforms a each result of the receiver into an Observable and passes each result of that + * Observable to the returned Observable. + * @tparam S the resulting type of each item in the Observable + * @return an Observable with transformed results and / or error. + */ + def flatMap[S](mapFunction: T => Observable[S]): Observable[S] = FlatMapObservable(this, mapFunction) + + /** + * Creates a new [[Observable]] by filtering the value of the current Observable with a predicate. + * + * If the current Observable fails, then the resulting Observable also fails. + * + * Example: + * {{{ + * val oddValues = Observable(1 to 100) filter { _ % 2 == 1 } + * }}} + * + * @param predicate the function that is applied to each result emitted if it matches that result is passes to the returned Observable + * @return an Observable only containing items matching that match the predicate + */ + def filter(predicate: T => Boolean): Observable[T] = FilterObservable(this, predicate) + + /** + * Used by for-comprehensions. + */ + final def withFilter(p: T => Boolean): Observable[T] = FilterObservable(this, p) + + /** + * Collects all the values of the [[Observable]] into a list and returns a new Observable with that list. + * + * Example: + * {{{ + * val listOfNumbers = Observable(1 to 100).collect() + * }}} + * + * @note If the Observable is large then this will consume lots of memory! + * If the underlying Observable is infinite this Observable will never complete. + * @see Uses [[foldLeft]] underneath + * @return an Observable that emits a single item, the result of accumulator. + */ + def collect[S](): SingleObservable[Seq[T]] = + FoldLeftObservable(this, ListBuffer[T](), (l: ListBuffer[T], v: T) => l += v).map(_.toSeq) + + /** + * Builds a new [[Observable]] by applying a partial function to all elements. + * + * Example: + * {{{ + * val justStrings = Observable(Iterable("this", 1, 2, "that")).collect{ case s: String => s } + * }}} + * + * @param pf function that transforms each result of the receiver into an Observable and passes each result of that + * Observable to the returned Observable. + * @tparam S the resulting type of each item in the Observable + * @return an Observable with transformed results and / or error. + */ + def collect[S](pf: PartialFunction[T, S]): Observable[S] = + CollectObservable(this, pf) + + /** + * Creates a new [[Observable]] that contains the single result of the applied accumulator function. + * + * The first item emitted by the Observable is passed to the supplied accumulator function alongside the initial value, then all other + * emitted items are passed along with the previous result of the accumulator function. + * + * Example: + * {{{ + * val countingObservable = Observable(1 to 100) foldLeft(0)((v, i) => v + 1) + * }}} + * + * @note If this function is used to collect results into a collection then it could use lots of memory! + * If the underlying Observable is infinite this Observable will never complete. + * @param initialValue the initial (seed) accumulator value + * @param accumulator an accumulator function to be invoked on each item emitted by the source Observable, the result of which will be + * used in the next accumulator call. + * @return an Observable that emits a single item, the result of accumulator. + */ + def foldLeft[S](initialValue: S)(accumulator: (S, T) => S): SingleObservable[S] = + FoldLeftObservable(this, initialValue, accumulator) + + /** + * Creates a new [[Observable]] that will handle any matching throwable that this Observable might contain. + * If there is no match, or if this Observable contains a valid result then the new Observable will contain the same. + * + * Example: + * + * {{{ + * mongoExceptionObservable recover { case e: MongoException => 0 } // final result: 0 + * mongoExceptionObservable recover { case e: NotFoundException => 0 } // result: exception + * }}} + * + * @param pf the partial function used to pattern match against the `onError` throwable + * @tparam U the type of the returned Observable + * @return an Observable that will handle any matching throwable and not error. + */ + def recover[U >: T](pf: PartialFunction[Throwable, U]): Observable[U] = RecoverObservable(this, pf) + + /** + * Creates a new [[Observable]] that will handle any matching throwable that this Observable might contain by assigning it a value + * of another Observable. + * + * If there is no match, or if this Observable contains a valid result then the new Observable will contain the same result. + * + * Example: + * + * {{{ + * successfulObservable recoverWith { case e: ArithmeticException => observableB } // result: successfulObservable + * mongoExceptionObservable recoverWith { case t: Throwable => observableB } // result: observableB + * }}} + * + * == Ensuring results from a Single Observer == + * + * `recoverWith` can potentially emit results from either Observer. This often isn't desirable, so to ensure only a single Observable + * issues results combine with the [[[collect[S]()*]]] method eg: + * + * {{{ + * val results = Observable(1 to 100) + * .collect() + * .recoverWith({ case t: Throwable => Observable(200 to 300).collect() }) + * .subscribe((i: Seq[Int]) => print(results)) + * }}} + * + * @param pf the partial function used to pattern match against the `onError` throwable + * @tparam U the type of the returned Observable + * @return an Observable that will handle any matching throwable and not error but recover with a new observable + */ + def recoverWith[U >: T](pf: PartialFunction[Throwable, Observable[U]]): Observable[U] = + RecoverWithObservable(this, pf) + + /** + * Zips the values of `this` and `that` [[Observable]], and creates a new Observable holding the tuple of their results. + * + * If `this` Observable fails, the resulting Observable is failed with the throwable stored in `this`. Otherwise, if `that` + * Observable fails, the resulting Observable is failed with the throwable stored in `that`. + * + * It will only emit as many items as the number of items emitted by the source Observable that emits the fewest items. + * + * @param that the Observable to zip with + * @tparam U the type of the `that` Observable + * @return a new zipped Observable + */ + def zip[U](that: Observable[U]): Observable[(T, U)] = ZipObservable(this, that) + + /** + * Creates a new [[Observable]] which returns the results of this Observable, if there is an error, it will then fallback to returning + * the results of the alternative "`that`" Observable. + * + * If both Observables fail, the resulting Observable holds the throwable object of the first Observable. + * + * Example: + * {{{ + * val fallBackObservable = Observable(1 to 100) fallbackTo Observable(200 to 300) + * }}} + * + * == Ensuring results from a Single Observer == + * + * `fallbackTo` can potentially emit results from either Observer. This often isn't desirable, so to ensure only a single Observable + * issues results combine with the [[[collect[S]()*]]] method eg: + * + * {{{ + * val results = Observable(1 to 100).collect() fallbackTo Observable(200 to 300).collect() + * }}} + * + * @param that the Observable to fallback to if `this` Observable fails + * @tparam U the type of the returned Observable + * @return an Observable that will fallback to the `that` Observable should `this` Observable complete with an `onError`. + */ + def fallbackTo[U >: T](that: Observable[U]): Observable[U] = + RecoverWithObservable(this, { case t: Throwable => that }, true) + + /** + * Applies the side-effecting function to the final result of this [[Observable]] and, returns a new Observable with the result of + * this Observable. + * + * This method allows one to enforce that the callbacks are executed in a specified order. + * + * Note that if one of the chained `andThen` callbacks throws an exception, that exception is not propagated to the subsequent + * `andThen` callbacks. Instead, the subsequent `andThen` callbacks are given the original value of this Observable. + * + * The following example prints out `10`: + * + * {{{ + * Observable(1 to 10) andThen { + * case r => sys.error("runtime exception") + * } andThen { + * case Success(x) => print(x) + * case Failure(t) => print("Failure") + * } + * }}} + * + * + * @param pf the partial function to pattern match against + * @tparam U the result type of the + * @return an + */ + def andThen[U](pf: PartialFunction[Try[T], U]): Observable[T] = AndThenObservable(this, pf) + + /** + * Returns the head of the [[Observable]] in a `scala.concurrent.Future`. + * + * @return the head result of the [[Observable]]. + */ + def head(): Future[T] = { + headOption().map { + case Some(result) => result + case None => null.asInstanceOf[T] // scalastyle:ignore null + }(Helpers.DirectExecutionContext) + } + + /** + * Returns the head option of the [[Observable]] in a `scala.concurrent.Future`. + * + * @return the head option result of the [[Observable]]. + * @since 2.2 + */ + def headOption(): Future[Option[T]] = { + val promise = Promise[Option[T]]() + subscribe(new Observer[T]() { + @volatile + var subscription: Option[Subscription] = None + @volatile + var terminated: Boolean = false + + override def onSubscribe(sub: Subscription): Unit = { + subscription = Some(sub) + sub.request(1) + } + + override def onError(throwable: Throwable): Unit = + completeWith( + "onError", + { () => + promise.failure(throwable) + } + ) + + override def onComplete(): Unit = { + if (!terminated) completeWith( + "onComplete", + { () => + promise.success(None) + } + ) // Completed with no values + } + + override def onNext(tResult: T): Unit = { + completeWith( + "onNext", + { () => + promise.success(Some(tResult)) + } + ) + } + + private def completeWith(method: String, action: () => Any): Unit = { + if (terminated) + throw new IllegalStateException(s"$method called after the Observer has already completed or errored.") + terminated = true + subscription.foreach((sub: Subscription) => sub.unsubscribe()) + action() + } + }) + promise.future + } + + /** + * Use a specific execution context for future operations + * + * @param context the execution context + * @return an Observable that uses the specified execution context + */ + def observeOn(context: ExecutionContext): Observable[T] = ExecutionContextObservable(this, context) + + /** + * Convert this observable so that it emits a single Unit to [[Observer.onNext]] before calling [[Observer.onComplete]]. + * + * If the underlying observable errors then that is propagated to the `Observer`. This method is especially useful for chaining + * `Observable[Void]` in for comprehensions. + * + * @return a single observable which emits Unit before completion. + * @since 4.4 + */ + @deprecated( + "Is no longer needed because of the `ToSingleObservableUnit` implicit class. Scheduled for removal in a major release", + "5.0" + ) + def completeWithUnit(): SingleObservable[Unit] = UnitObservable(this) +} diff --git a/driver-scala/src/main/scala/org/mongodb/scala/ObservableImplicits.scala b/driver-scala/src/main/scala/org/mongodb/scala/ObservableImplicits.scala new file mode 100644 index 00000000000..86e51b41d41 --- /dev/null +++ b/driver-scala/src/main/scala/org/mongodb/scala/ObservableImplicits.scala @@ -0,0 +1,175 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala + +import org.mongodb.scala.bson.ObjectId +import org.mongodb.scala.gridfs.GridFSFile +import org.mongodb.scala.internal.{ MapObservable, UnitObservable } +import org.reactivestreams.{ Publisher, Subscriber, Subscription => JSubscription } +import reactor.core.publisher.{ Flux, Mono } + +import java.util.concurrent.atomic.AtomicBoolean +import scala.concurrent.Future + +/** + * Implicit conversion support for Publishers, Observables and Subscriptions + * + * Automatically imported into the `org.mongodb.scala` namespace + */ +trait ObservableImplicits { + + implicit class BoxedPublisher[T](pub: => Publisher[T]) extends Observable[T] { + val publisher = pub + + /** + * @return an [[Observable]] (extended) publisher + */ + def toObservable(): Observable[T] = this + + override def subscribe(observer: Observer[_ >: T]): Unit = Flux.from(publisher).subscribe(observer) + override def subscribe(s: Subscriber[_ >: T]): Unit = Flux.from(publisher).subscribe(s) + } + + implicit class BoxedSubscriber[T](sub: => Subscriber[_ >: T]) extends Observer[T] { + val subscriber = sub + + override def onSubscribe(subscription: Subscription): Unit = subscriber.onSubscribe(subscription) + + override def onError(e: Throwable): Unit = subscriber.onError(e) + + override def onComplete(): Unit = subscriber.onComplete() + + override def onNext(result: T): Unit = subscriber.onNext(result) + } + + implicit class BoxedSubscription(subscription: => JSubscription) extends Subscription { + val cancelled = new AtomicBoolean(false) + override def request(n: Long): Unit = subscription.request(n) + + override def unsubscribe(): Unit = { + cancelled.set(true) + subscription.cancel() + } + + override def isUnsubscribed: Boolean = cancelled.get() + + } + + implicit class ToObservableString(pub: => Publisher[java.lang.String]) extends Observable[String] { + val publisher = pub + override def subscribe(observer: Observer[_ >: String]): Unit = Flux.from(publisher).subscribe(observer) + } + + implicit class ToSingleObservablePublisher[T](pub: => Publisher[T]) extends SingleObservable[T] { + val publisher = pub + + /** + * Converts the [[Observable]] to a single result [[Observable]]. + * + * @return a single result Observable + */ + def toSingle(): SingleObservable[T] = this + + override def subscribe(observer: Observer[_ >: T]): Unit = Mono.from(publisher).subscribe(observer) + } + + implicit class ToSingleObservableInt(pub: => Publisher[java.lang.Integer]) extends SingleObservable[Int] { + val publisher = pub + override def subscribe(observer: Observer[_ >: Int]): Unit = { + Mono + .from(MapObservable(publisher.toObservable(), (i: Integer) => i.toInt)) + .subscribe(observer) + } + } + + implicit class ToSingleObservableLong(pub: => Publisher[java.lang.Long]) extends SingleObservable[Long] { + val publisher = pub + override def subscribe(observer: Observer[_ >: Long]): Unit = + Mono + .from(MapObservable(publisher.toObservable(), (i: java.lang.Long) => i.toLong)) + .subscribe(observer) + } + + implicit class ToSingleObservableObjectId(pub: => Publisher[org.bson.types.ObjectId]) + extends SingleObservable[ObjectId] { + val publisher = pub + override def subscribe(observer: Observer[_ >: ObjectId]): Unit = Mono.from(publisher).subscribe(observer) + } + + implicit class ToSingleObservableGridFS(pub: => Publisher[com.mongodb.client.gridfs.model.GridFSFile]) + extends SingleObservable[GridFSFile] { + val publisher = pub + override def subscribe(observer: Observer[_ >: GridFSFile]): Unit = Mono.from(publisher).subscribe(observer) + } + + /** + * An [[Observable]] that emits + * + * - exactly one item, if the wrapped `Publisher` does not signal an error, even if the represented stream is empty; + * - no items if the wrapped `Publisher` signals an error. + * + * @param pub A `Publisher` representing a finite stream. + */ + implicit class ToSingleObservableUnit(pub: => Publisher[Void]) extends SingleObservable[Unit] { + val publisher = pub + + override def subscribe(observer: Observer[_ >: Unit]): Unit = { + // We must call `toObservable` in order to avoid infinite recursion + // caused by the implicit conversion of `Publisher[Void]` to `SingleObservable[Unit]`. + UnitObservable(publisher.toObservable()).subscribe(observer) + } + } + + implicit class ObservableFuture[T](obs: => Observable[T]) { + val observable = obs + + /** + * Collects the [[Observable]] results and converts to a `scala.concurrent.Future`. + * + * Automatically subscribes to the `Observable` and uses the [[[Observable.collect[S]()*]]] method to aggregate the results. + * + * @note If the Observable is large then this will consume lots of memory! + * If the underlying Observable is infinite this Observable will never complete. + * @return a future representation of the whole Observable + */ + def toFuture(): Future[Seq[T]] = observable.collect().head() + + } + + implicit class SingleObservableFuture[T](obs: => SingleObservable[T]) { + val observable = obs + + /** + * Collects the [[Observable]] results and converts to a `scala.concurrent.Future`. + * + * Automatically subscribes to the `Observable` and uses the [[Observable.head]] method to aggregate the results. + * + * @note If the Observable is large then this will consume lots of memory! + * If the underlying Observable is infinite this Observable will never complete. + * @return a future representation of the whole Observable + */ + def toFuture(): Future[T] = observable.head() + + /** + * Collects the [[Observable]] result and converts to a `scala.concurrent.Future`. + * @return a future representation of the Observable + * + */ + def toFutureOption(): Future[Option[T]] = observable.headOption() + } + +} diff --git a/driver-scala/src/main/scala/org/mongodb/scala/Observer.scala b/driver-scala/src/main/scala/org/mongodb/scala/Observer.scala new file mode 100644 index 00000000000..7b9ad2740ea --- /dev/null +++ b/driver-scala/src/main/scala/org/mongodb/scala/Observer.scala @@ -0,0 +1,86 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala + +import org.reactivestreams.{ Subscriber, Subscription => JSubscription } + +/** + * A Scala based wrapper of the `Subscriber` interface which provides a mechanism for receiving push-based notifications. + * + * Will receive a call to `Observer.onSubscribe(subscription: Subscription)` on subscription to the [[Observable]]. + * + * Default implementations of this trait are greedy and will call [[Subscription.request]] with `Long.MaxValue` so that all results are + * requested. Custom implementations of the `onSubscribe` method can be used to control "back-pressure" and ensure that only demand that + * the `Observer` is capable of handling is requested. + * + * After signaling demand: + * + * - Zero or more invocations of [[Observer.onNext]] up to the maximum number defined by [[Subscription.request]] + * - Single invocation of [[Observer.onError]] or [[Observer.onComplete]] which signals a terminal state after which no + * further events will be sent. + * + * @tparam T The type of element signaled. + */ +trait Observer[T] extends Subscriber[T] { + + /** + * Invoked on subscription to an [[Observable]]. + * + * No operation will happen until [[Subscription.request]] is invoked. + * + * It is the responsibility of this Subscriber instance to call [[Subscription.request]] whenever more data is wanted. + * + * @param subscription [[Subscription]] that allows requesting data via [[Subscription.request]] + */ + def onSubscribe(subscription: Subscription): Unit = subscription.request(Long.MaxValue) + + /** + * Provides the Observer with a new item to observe. + * + * The Observer may call this method 0 or more times. + * + * The [[Observable]] will not call this method again after it calls either [[onComplete]] or + * [[onError]]. + * + * @param result the item emitted by the [[Observable]] + */ + def onNext(result: T): Unit + + /** + * Notifies the Observer that the [[Observable]] has experienced an error condition. + * + * If the [[Observable]] calls this method, it will not thereafter call [[onNext]] or [[onComplete]]. + * + * @param e the exception encountered by the [[Observable]] + */ + def onError(e: Throwable): Unit + + /** + * Notifies the Subscriber that the [[Observable]] has finished sending push-based notifications. + * + * The [[Observable]] will not call this method if it calls [[onError]]. + */ + def onComplete(): Unit + + /** + * Handles the automatic boxing of a Java subscription so it conforms to the interface. + * + * @note Users should not have to implement this method but rather use the Scala `Subscription`. + * @param subscription the Java subscription + */ + override def onSubscribe(subscription: JSubscription): Unit = onSubscribe(BoxedSubscription(subscription)) +} diff --git a/driver-scala/src/main/scala/org/mongodb/scala/ReadConcern.scala b/driver-scala/src/main/scala/org/mongodb/scala/ReadConcern.scala new file mode 100644 index 00000000000..d56a2e5c73c --- /dev/null +++ b/driver-scala/src/main/scala/org/mongodb/scala/ReadConcern.scala @@ -0,0 +1,76 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala + +import com.mongodb.{ ReadConcern => JReadConcern } + +/** + * The readConcern option allows clients to choose a level of isolation for their reads. + * + * @note Requires MongoDB 3.2 or greater + * @since 1.1 + */ +object ReadConcern { + + /** + * Construct a new read concern + * + * @param readConcernLevel the read concern level + */ + def apply(readConcernLevel: ReadConcernLevel): ReadConcern = new JReadConcern(readConcernLevel) + + /** + * Use the servers default read concern. + */ + val DEFAULT: ReadConcern = JReadConcern.DEFAULT + + /** + * Return the node's most recent copy of data. Provides no guarantee that the data has been written to a majority of the nodes. + */ + val LOCAL: ReadConcern = JReadConcern.LOCAL + + /** + * Return the node's most recent copy of the data confirmed as having been written to a majority of the nodes. + */ + val MAJORITY: ReadConcern = JReadConcern.MAJORITY + + /** + * The linearizable read concern. + * + * This read concern is only compatible with [[org.mongodb.scala.ReadPreference$.primary]] + * + * @note Requires MongoDB 3.4 or greater + * @since 2.2 + */ + val LINEARIZABLE: ReadConcern = JReadConcern.LINEARIZABLE + + /** + * The snapshot read concern level. + * + * @note Requires MongoDB 4.0 or greater + * @since 2.4 + */ + val SNAPSHOT: ReadConcern = JReadConcern.SNAPSHOT + + /** + * The available read concern level. + * + * @note Requires MongoDB 4.0 or greater + * @since 2.5 + */ + val AVAILABLE: ReadConcern = JReadConcern.AVAILABLE +} diff --git a/driver-scala/src/main/scala/org/mongodb/scala/ReadConcernLevel.scala b/driver-scala/src/main/scala/org/mongodb/scala/ReadConcernLevel.scala new file mode 100644 index 00000000000..cf2ae2d4733 --- /dev/null +++ b/driver-scala/src/main/scala/org/mongodb/scala/ReadConcernLevel.scala @@ -0,0 +1,74 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala + +import scala.util.Try + +import com.mongodb.{ ReadConcernLevel => JReadConcernLevel } + +/** + * The readConcern level of isolation for reads. + * + * @note Requires MongoDB 3.2 or greater + * @since 1.1 + */ +object ReadConcernLevel { + + /** + * Return the node's most recent copy of data. Provides no guarantee that the data has been written to a majority of the nodes. + */ + val LOCAL: ReadConcernLevel = JReadConcernLevel.LOCAL + + /** + * Return the node's most recent copy of the data confirmed as having been written to a majority of the nodes. + */ + val MAJORITY: ReadConcernLevel = JReadConcernLevel.MAJORITY + + /** + * The linearizable read concern. + * + * This read concern is only compatible with [[org.mongodb.scala.ReadPreference$.primary]] + * + * @note Requires MongoDB 3.4 or greater + * @since 2.2 + */ + val LINEARIZABLE: ReadConcernLevel = JReadConcernLevel.LINEARIZABLE + + /** + * The snapshot read concern level. + * + * @note Requires MongoDB 4.0 or greater + * @since 2.4 + */ + val SNAPSHOT: ReadConcernLevel = JReadConcernLevel.SNAPSHOT + + /** + * The available read concern level. + * + * @note Requires MongoDB 4.0 or greater + * @since 2.5 + */ + val AVAILABLE = JReadConcernLevel.AVAILABLE + + /** + * Returns the ReadConcern from the string read concern level. + * + * @param readConcernLevel the read concern level string. + * @return the read concern + */ + def fromString(readConcernLevel: String): Try[ReadConcernLevel] = Try(JReadConcernLevel.fromString(readConcernLevel)) +} diff --git a/driver-scala/src/main/scala/org/mongodb/scala/ReadPreference.scala b/driver-scala/src/main/scala/org/mongodb/scala/ReadPreference.scala new file mode 100644 index 00000000000..c44f37c971d --- /dev/null +++ b/driver-scala/src/main/scala/org/mongodb/scala/ReadPreference.scala @@ -0,0 +1,332 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala + +import java.util.concurrent.TimeUnit.MILLISECONDS + +import scala.collection.JavaConverters._ +import scala.concurrent.duration.Duration + +import com.mongodb.{ ReadPreference => JReadPreference } + +/** + * The preferred replica set members to which a query or command can be sent. + * + * @since 1.0 + */ +object ReadPreference { + + /** + * Gets a read preference that forces read to the primary. + * + * @return ReadPreference which reads from primary only + */ + def primary(): ReadPreference = JReadPreference.primary() + + /** + * Gets a read preference that forces reads to the primary if available, otherwise to a secondary. + * + * @return ReadPreference which reads primary if available. + */ + def primaryPreferred(): ReadPreference = JReadPreference.primaryPreferred() + + /** + * Gets a read preference that forces reads to a secondary. + * + * @return ReadPreference which reads secondary. + */ + def secondary(): ReadPreference = JReadPreference.secondary() + + /** + * Gets a read preference that forces reads to a secondary if one is available, otherwise to the primary. + * + * @return ReadPreference which reads secondary if available, otherwise from primary. + */ + def secondaryPreferred(): ReadPreference = JReadPreference.secondaryPreferred() + + /** + * Gets a read preference that forces reads to a primary or a secondary. + * + * @return ReadPreference which reads nearest + */ + def nearest(): ReadPreference = JReadPreference.nearest() + + /** + * Gets a read preference that forces reads to the primary if available, otherwise to a secondary. + * + * @param maxStaleness the max allowable staleness of secondaries. The minimum value is either 90 seconds, or the heartbeat frequency + * plus 10 seconds, whichever is greatest. + * @return ReadPreference which reads primary if available. + * @since 1.2 + * @note Requires MongoDB 3.4 or greater + */ + def primaryPreferred(maxStaleness: Duration): ReadPreference = + JReadPreference.primaryPreferred(maxStaleness.toMillis, MILLISECONDS) + + /** + * Gets a read preference that forces reads to a secondary. + * + * @param maxStaleness the max allowable staleness of secondaries. The minimum value is either 90 seconds, or the heartbeat frequency + * plus 10 seconds, whichever is greatest. + * @return ReadPreference which reads secondary. + * @since 1.2 + * @note Requires MongoDB 3.4 or greater + */ + def secondary(maxStaleness: Duration): ReadPreference = JReadPreference.secondary(maxStaleness.toMillis, MILLISECONDS) + + /** + * Gets a read preference that forces reads to a secondary if one is available, otherwise to the primary. + * + * @param maxStaleness the max allowable staleness of secondaries. The minimum value is either 90 seconds, or the heartbeat frequency + * plus 10 seconds, whichever is greatest. + * @return ReadPreference which reads secondary if available, otherwise from primary. + * @since 1.2 + * @note Requires MongoDB 3.4 or greater + */ + def secondaryPreferred(maxStaleness: Duration): ReadPreference = + JReadPreference.secondaryPreferred(maxStaleness.toMillis, MILLISECONDS) + + /** + * Gets a read preference that forces reads to a primary or a secondary. + * + * @param maxStaleness the max allowable staleness of secondaries. The minimum value is either 90 seconds, or the heartbeat frequency + * plus 10 seconds, whichever is greatest. + * @return ReadPreference which reads nearest + * @since 1.2 + * @note Requires MongoDB 3.4 or greater + */ + def nearest(maxStaleness: Duration): ReadPreference = JReadPreference.nearest(maxStaleness.toMillis, MILLISECONDS) + + /** + * Gets a read preference that forces reads to the primary if available, otherwise to a secondary with the given set of tags. + * + * @param tagSet the set of tags to limit the list of secondaries to. + * @return ReadPreference which reads primary if available, otherwise a secondary respective of tags. + */ + def primaryPreferred(tagSet: TagSet): TaggableReadPreference = JReadPreference.primaryPreferred(tagSet) + + /** + * Gets a read preference that forces reads to a secondary with the given set of tags. + * + * @param tagSet the set of tags to limit the list of secondaries to + * @return ReadPreference which reads secondary respective of tags. + */ + def secondary(tagSet: TagSet): TaggableReadPreference = JReadPreference.secondary(tagSet) + + /** + * Gets a read preference that forces reads to a secondary with the given set of tags, or the primary is none are available. + * + * @param tagSet the set of tags to limit the list of secondaries to + * @return ReadPreference which reads secondary if available respective of tags, otherwise from primary irrespective of tags. + */ + def secondaryPreferred(tagSet: TagSet): TaggableReadPreference = JReadPreference.secondaryPreferred(tagSet) + + /** + * Gets a read preference that forces reads to the primary or a secondary with the given set of tags. + * + * @param tagSet the set of tags to limit the list of secondaries to + * @return ReadPreference which reads nearest node respective of tags. + */ + def nearest(tagSet: TagSet): TaggableReadPreference = JReadPreference.nearest(tagSet) + + /** + * Gets a read preference that forces reads to the primary if available, otherwise to a secondary with the given set of tags. + * + * @param tagSet the set of tags to limit the list of secondaries to. + * @param maxStaleness the max allowable staleness of secondaries. The minimum value is either 90 seconds, or the heartbeat frequency + * plus 10 seconds, whichever is greatest. + * @return ReadPreference which reads primary if available, otherwise a secondary respective of tags. + * @since 1.2 + * @note Requires MongoDB 3.4 or greater + */ + def primaryPreferred(tagSet: TagSet, maxStaleness: Duration): TaggableReadPreference = + JReadPreference.primaryPreferred(tagSet, maxStaleness.toMillis, MILLISECONDS) + + /** + * Gets a read preference that forces reads to a secondary with the given set of tags. + * + * @param tagSet the set of tags to limit the list of secondaries to + * @param maxStaleness the max allowable staleness of secondaries. The minimum value is either 90 seconds, or the heartbeat frequency + * plus 10 seconds, whichever is greatest. + * @return ReadPreference which reads secondary respective of tags. + * @since 1.2 + * @note Requires MongoDB 3.4 or greater + */ + def secondary(tagSet: TagSet, maxStaleness: Duration): TaggableReadPreference = + JReadPreference.secondary(tagSet, maxStaleness.toMillis, MILLISECONDS) + + /** + * Gets a read preference that forces reads to a secondary with the given set of tags, or the primary is none are available. + * + * @param tagSet the set of tags to limit the list of secondaries to + * @param maxStaleness the max allowable staleness of secondaries. The minimum value is either 90 seconds, or the heartbeat frequency + * plus 10 seconds, whichever is greatest. + * @return ReadPreference which reads secondary if available respective of tags, otherwise from primary irrespective of tags. + * @since 1.2 + * @note Requires MongoDB 3.4 or greater + */ + def secondaryPreferred(tagSet: TagSet, maxStaleness: Duration): TaggableReadPreference = + JReadPreference.secondaryPreferred(tagSet, maxStaleness.toMillis, MILLISECONDS) + + /** + * Gets a read preference that forces reads to the primary or a secondary with the given set of tags. + * + * @param tagSet the set of tags to limit the list of secondaries to + * @param maxStaleness the max allowable staleness of secondaries. The minimum value is either 90 seconds, or the heartbeat frequency + * plus 10 seconds, whichever is greatest. + * @return ReadPreference which reads nearest node respective of tags. + * @since 1.2 + * @note Requires MongoDB 3.4 or greater + */ + def nearest(tagSet: TagSet, maxStaleness: Duration): TaggableReadPreference = + JReadPreference.nearest(tagSet, maxStaleness.toMillis, MILLISECONDS) + + /** + * Gets a read preference that forces reads to the primary if available, otherwise to a secondary with one of the given sets of tags. + * The driver will look for a secondary with each tag set in the given list, stopping after one is found, + * or failing if no secondary can be found that matches any of the tag sets in the list. + * + * @param tagSetList the list of tag sets to limit the list of secondaries to + * @return ReadPreference which reads primary if available, otherwise a secondary respective of tags. + */ + def primaryPreferred(tagSetList: Seq[TagSet]): TaggableReadPreference = + JReadPreference.primaryPreferred(tagSetList.asJava) + + /** + * Gets a read preference that forces reads to a secondary with one of the given sets of tags. + * The driver will look for a secondary with each tag set in the given list, stopping after one is found, + * or failing if no secondary can be found that matches any of the tag sets in the list. + * + * @param tagSetList the list of tag sets to limit the list of secondaries to + * @return ReadPreference which reads secondary respective of tags. + */ + def secondary(tagSetList: Seq[TagSet]): TaggableReadPreference = JReadPreference.secondary(tagSetList.asJava) + + /** + * Gets a read preference that forces reads to a secondary with one of the given sets of tags. + * The driver will look for a secondary with each tag set in the given list, stopping after one is found, + * or the primary if none are available. + * + * @param tagSetList the list of tag sets to limit the list of secondaries to + * @return ReadPreference which reads secondary if available respective of tags, otherwise from primary irrespective of tags. + */ + def secondaryPreferred(tagSetList: Seq[TagSet]): TaggableReadPreference = + JReadPreference.secondaryPreferred(tagSetList.asJava) + + /** + * Gets a read preference that forces reads to the primary or a secondary with one of the given sets of tags. + * The driver will look for a secondary with each tag set in the given list, stopping after one is found, + * or the primary if none are available. + * + * @param tagSetList the list of tag sets to limit the list of secondaries to + * @return ReadPreference which reads nearest node respective of tags. + */ + def nearest(tagSetList: Seq[TagSet]): TaggableReadPreference = JReadPreference.nearest(tagSetList.asJava) + + /** + * Gets a read preference that forces reads to the primary if available, otherwise to a secondary with one of the given sets of tags. + * The driver will look for a secondary with each tag set in the given list, stopping after one is found, + * or failing if no secondary can be found that matches any of the tag sets in the list. + * + * @param tagSetList the list of tag sets to limit the list of secondaries to + * @param maxStaleness the max allowable staleness of secondaries. The minimum value is either 90 seconds, or the heartbeat frequency + * plus 10 seconds, whichever is greatest. + * @return ReadPreference which reads primary if available, otherwise a secondary respective of tags. + * @since 1.2 + * @note Requires MongoDB 3.4 or greater + */ + def primaryPreferred(tagSetList: Seq[TagSet], maxStaleness: Duration): TaggableReadPreference = + JReadPreference.primaryPreferred(tagSetList.asJava, maxStaleness.toMillis, MILLISECONDS) + + /** + * Gets a read preference that forces reads to a secondary with one of the given sets of tags. + * The driver will look for a secondary with each tag set in the given list, stopping after one is found, + * or failing if no secondary can be found that matches any of the tag sets in the list. + * + * @param tagSetList the list of tag sets to limit the list of secondaries to + * @param maxStaleness the max allowable staleness of secondaries. The minimum value is either 90 seconds, or the heartbeat frequency + * plus 10 seconds, whichever is greatest. + * @return ReadPreference which reads secondary respective of tags. + * @since 1.2 + * @note Requires MongoDB 3.4 or greater + */ + def secondary(tagSetList: Seq[TagSet], maxStaleness: Duration): TaggableReadPreference = + JReadPreference.secondary(tagSetList.asJava, maxStaleness.toMillis, MILLISECONDS) + + /** + * Gets a read preference that forces reads to a secondary with one of the given sets of tags. + * The driver will look for a secondary with each tag set in the given list, stopping after one is found, + * or the primary if none are available. + * + * @param tagSetList the list of tag sets to limit the list of secondaries to + * @param maxStaleness the max allowable staleness of secondaries. The minimum value is either 90 seconds, or the heartbeat frequency + * plus 10 seconds, whichever is greatest. + * @return ReadPreference which reads secondary if available respective of tags, otherwise from primary irrespective of tags. + * @since 1.2 + * @note Requires MongoDB 3.4 or greater + */ + def secondaryPreferred(tagSetList: Seq[TagSet], maxStaleness: Duration): TaggableReadPreference = + JReadPreference.secondaryPreferred(tagSetList.asJava, maxStaleness.toMillis, MILLISECONDS) + + /** + * Gets a read preference that forces reads to the primary or a secondary with one of the given sets of tags. + * The driver will look for a secondary with each tag set in the given list, stopping after one is found, + * or the primary if none are available. + * + * @param tagSetList the list of tag sets to limit the list of secondaries to + * @param maxStaleness the max allowable staleness of secondaries. The minimum value is either 90 seconds, or the heartbeat frequency + * plus 10 seconds, whichever is greatest. + * @return ReadPreference which reads nearest node respective of tags. + * @since 1.2 + * @note Requires MongoDB 3.4 or greater + */ + def nearest(tagSetList: Seq[TagSet], maxStaleness: Duration): TaggableReadPreference = + JReadPreference.nearest(tagSetList.asJava, maxStaleness.toMillis, MILLISECONDS) + + /** + * Creates a read preference from the given read preference name. + * + * @param name the name of the read preference + * @return the read preference + */ + def valueOf(name: String): ReadPreference = JReadPreference.valueOf(name) + + /** + * Creates a taggable read preference from the given read preference name and list of tag sets. + * + * @param name the name of the read preference + * @param tagSetList the list of tag sets + * @return the taggable read preference + */ + def valueOf(name: String, tagSetList: Seq[TagSet]): TaggableReadPreference = + JReadPreference.valueOf(name, tagSetList.asJava) + + /** + * Creates a taggable read preference from the given read preference name and list of tag sets. + * + * @param name the name of the read preference + * @param tagSetList the list of tag sets + * @param maxStaleness the max allowable staleness of secondaries. The minimum value is either 90 seconds, or the heartbeat frequency + * plus 10 seconds, whichever is greatest. + * @return the taggable read preference + * @since 1.2 + * @note Requires MongoDB 3.4 or greater + */ + def valueOf(name: String, tagSetList: Seq[TagSet], maxStaleness: Duration): TaggableReadPreference = + JReadPreference.valueOf(name, tagSetList.asJava, maxStaleness.toMillis, MILLISECONDS) + +} diff --git a/driver-scala/src/main/scala/org/mongodb/scala/ServerAddress.scala b/driver-scala/src/main/scala/org/mongodb/scala/ServerAddress.scala new file mode 100644 index 00000000000..b16ec41b8a1 --- /dev/null +++ b/driver-scala/src/main/scala/org/mongodb/scala/ServerAddress.scala @@ -0,0 +1,72 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala + +import java.net.{ InetAddress, InetSocketAddress } + +import com.mongodb.{ ServerAddress => JServerAddress } + +/** + * Represents the location of a MongoDB server - i.e. server name and port number + * + * @since 1.0 + */ +object ServerAddress { + + /** + * Creates a ServerAddress with default host and port + */ + def apply(): JServerAddress = new JServerAddress() + + /** + * Creates a ServerAddress with default port + * + * @param host hostname + */ + def apply(host: String): JServerAddress = new JServerAddress(host) + + /** + * Creates a ServerAddress with default port + * + * @param inetAddress host address + */ + def apply(inetAddress: InetAddress): JServerAddress = new JServerAddress(inetAddress) + + /** + * Creates a ServerAddress + * + * @param inetAddress host address + * @param port mongod port + */ + def apply(inetAddress: InetAddress, port: Int): JServerAddress = new JServerAddress(inetAddress, port) + + /** + * Creates a ServerAddress + * + * @param inetSocketAddress inet socket address containing hostname and port + */ + def apply(inetSocketAddress: InetSocketAddress): JServerAddress = new JServerAddress(inetSocketAddress) + + /** + * Creates a ServerAddress + * + * @param host hostname + * @param port mongod port + */ + def apply(host: String, port: Int): JServerAddress = new JServerAddress(host, port) + +} diff --git a/driver-scala/src/main/scala/org/mongodb/scala/SingleObservable.scala b/driver-scala/src/main/scala/org/mongodb/scala/SingleObservable.scala new file mode 100644 index 00000000000..fcd8c90f84a --- /dev/null +++ b/driver-scala/src/main/scala/org/mongodb/scala/SingleObservable.scala @@ -0,0 +1,69 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala + +import org.mongodb.scala.internal.SingleItemObservable +import org.reactivestreams.Subscriber + +/** + * A companion object for [[SingleObservable]] + * + * @since 2.0 + */ +object SingleObservable { + + /** + * Creates an SingleObservable from an item. + * + * Convenient for testing and or debugging. + * + * @param item the item to create an observable from + * @tparam A the type of the SingleObservable + * @return an Observable that emits the item + */ + def apply[A](item: A): SingleObservable[A] = SingleItemObservable(item) + +} + +/** + * A `SingleObservable` represents an [[Observable]] that emits one or no items. + * + * @tparam T the type of element signaled. + * @since 2.0 + */ +trait SingleObservable[T] extends Observable[T] { + + /** + * Request `SingleObservable` to start streaming data. + * + * This is a "factory method" and can be called multiple times, each time starting a new [[Subscription]]. + * Each `Subscription` will work for only a single [[Observer]]. + * + * If the `Observable` rejects the subscription attempt or otherwise fails it will signal the error via [[Observer.onError]]. + * + * @param observer the `Observer` that will consume signals from this `Observable` + */ + def subscribe(observer: Observer[_ >: T]): Unit + + /** + * Handles the automatic boxing of a Java `Observable` so it conforms to the interface. + * + * @note Users should not have to implement this method but rather use the Scala `Observable`. + * @param observer the `Observer` that will consume signals from this `Observable` + */ + override def subscribe(observer: Subscriber[_ >: T]): Unit = this.subscribe(BoxedSubscriber(observer)) +} diff --git a/driver-scala/src/main/scala/org/mongodb/scala/Subscription.scala b/driver-scala/src/main/scala/org/mongodb/scala/Subscription.scala new file mode 100644 index 00000000000..bf0ec33d81f --- /dev/null +++ b/driver-scala/src/main/scala/org/mongodb/scala/Subscription.scala @@ -0,0 +1,65 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala + +import org.reactivestreams.{ Subscription => JSubscription } + +/** + * A `Subscription` represents a one-to-one lifecycle of a [[Observer]] subscribing to an [[Observable]]. + * + * Instances can only be used once by a single [[Observer]]. + * + * It is used to both signal desire for data and to allow for unsubscribing. + */ +trait Subscription extends JSubscription { + + /** + * No operation will be sent to MongoDB from the [[Observable]] until demand is signaled via this method. + * + * It can be called however often and whenever needed, but the outstanding cumulative demand must never exceed `Long.MaxValue`. + * An outstanding cumulative demand of `Long.MaxValue` may be treated by the [[Observable]] as "effectively unbounded". + * + * Whatever has been requested might be sent, so only signal demand for what can be safely handled. + * + * An [[Observable]] can send less than is requested if the stream ends but then must emit either + * [[Observer.onError]] or [[Observer.onComplete]]. + * + * @param n the strictly positive number of elements to requests to the upstream [[Observable]] + */ + def request(n: Long): Unit + + /** + * Request the [[Observable]] to stop sending data and clean up resources. + * + * As this request is asynchronous data may still be sent to meet previously signalled demand after calling cancel. + */ + def unsubscribe(): Unit + + /** + * Indicates whether this `Subscription` is currently unsubscribed. + * + * @return `true` if this `Subscription` is currently unsubscribed, `false` otherwise + */ + def isUnsubscribed: Boolean + + /** + * Request the [[Observable]] to stop sending data and clean up resources. + * + * As this request is asynchronous data may still be sent to meet previously signalled demand after calling cancel. + */ + override def cancel(): Unit = unsubscribe() +} diff --git a/driver-scala/src/main/scala/org/mongodb/scala/Tag.scala b/driver-scala/src/main/scala/org/mongodb/scala/Tag.scala new file mode 100644 index 00000000000..8402f97e003 --- /dev/null +++ b/driver-scala/src/main/scala/org/mongodb/scala/Tag.scala @@ -0,0 +1,26 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala + +import com.mongodb.{ Tag => JTag } + +/** + * A replica set tag. + */ +object Tag { + def apply(name: String, value: String): Tag = new JTag(name, value) +} diff --git a/driver-scala/src/main/scala/org/mongodb/scala/TagSet.scala b/driver-scala/src/main/scala/org/mongodb/scala/TagSet.scala new file mode 100644 index 00000000000..8f56820ce10 --- /dev/null +++ b/driver-scala/src/main/scala/org/mongodb/scala/TagSet.scala @@ -0,0 +1,46 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala + +import scala.collection.JavaConverters._ + +import com.mongodb.{ TagSet => JTagSet } + +/** + * An immutable set of tags, used to select members of a replica set to use for read operations. + */ +object TagSet { + + /** + * An empty set of tags. + */ + def apply(): TagSet = new JTagSet() + + /** + * A set of tags contain the single given tag + * + * @param tag the tag + */ + def apply(tag: Tag): TagSet = new JTagSet(tag) + + /** + * A set of tags containing the given list of tags. + * + * @param tagList the list of tags + */ + def apply(tagList: Seq[Tag]): TagSet = new JTagSet(tagList.asJava) +} diff --git a/driver-scala/src/main/scala/org/mongodb/scala/TransactionOptions.scala b/driver-scala/src/main/scala/org/mongodb/scala/TransactionOptions.scala new file mode 100644 index 00000000000..41b68a3ef0f --- /dev/null +++ b/driver-scala/src/main/scala/org/mongodb/scala/TransactionOptions.scala @@ -0,0 +1,41 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala + +import com.mongodb.{ TransactionOptions => JTransactionOptions } + +/** + * The options to apply to transactions. + * + * @see TransactionOptions + * @since 2.4 + */ +object TransactionOptions { + + /** + * Gets an instance of a builder + * + * @return a builder instance + */ + def builder(): Builder = JTransactionOptions.builder() + + /** + * TransactionOptions builder type + */ + type Builder = JTransactionOptions.Builder + +} diff --git a/driver-scala/src/main/scala/org/mongodb/scala/WriteConcern.scala b/driver-scala/src/main/scala/org/mongodb/scala/WriteConcern.scala new file mode 100644 index 00000000000..63eb02eea79 --- /dev/null +++ b/driver-scala/src/main/scala/org/mongodb/scala/WriteConcern.scala @@ -0,0 +1,106 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala + +import com.mongodb.{ WriteConcern => JWriteConcern } + +/** + * Controls the acknowledgment of write operations with various options. + * + * ==`w`== + * - 0: Don't wait for acknowledgement from the server + * - 1: Wait for acknowledgement, but don't wait for secondaries to replicate + * - >=2: Wait for one or more secondaries to also acknowledge + * - "majority": Wait for a majority of secondaries to also acknowledge + * - "": Wait for one or more secondaries to also acknowledge based on a tag set name + * + * ==`wTimeout` - how long to wait for slaves before failing == + * - 0: indefinite + * - >0: time to wait in milliseconds + * + * ==Other options:== + * + * - `journal`: If true block until write operations have been committed to the journal. Cannot be used in combination with `fsync`. + * Prior to MongoDB 2.6 this option was ignored if the server was running without journaling. Starting with MongoDB 2.6 + * write operations will fail with an exception if this option is used when the server is running without journaling. + * + * == Implicit helper == + * + * The [[ScalaWriteConcern]] implicit allows for chainable building of the WriteConcern eg: + * + * {{{ + * val myWriteConcern = WriteConcern.ACKNOWLEDGED.withJournal(true)).withWTimeout(Duration(10, TimeUnit.MILLISECONDS)) + * }}} + * + * @since 1.0 + */ +object WriteConcern { + + /** + * Write operations that use this write concern will wait for acknowledgement from the primary server before returning. Exceptions are + * raised for network issues, and server errors. + */ + val ACKNOWLEDGED: JWriteConcern = JWriteConcern.ACKNOWLEDGED + + /** + * Write operations that use this write concern will wait for acknowledgement from a single member. + */ + val W1: JWriteConcern = apply(1) + + /** + * Write operations that use this write concern will wait for acknowledgement from two members. + */ + val W2: JWriteConcern = apply(2) + + /** + * Write operations that use this write concern will wait for acknowledgement from three members. + */ + val W3: JWriteConcern = apply(3) + + /** + * Write operations that use this write concern will return as soon as the message is written to the socket. Exceptions are raised for + * network issues, but not server errors. + */ + val UNACKNOWLEDGED: JWriteConcern = JWriteConcern.UNACKNOWLEDGED + + /** + * Exceptions are raised for network issues, and server errors; the write operation waits for the server to group commit to the journal + * file on disk. + */ + val JOURNALED: JWriteConcern = JWriteConcern.JOURNALED + + /** + * Exceptions are raised for network issues, and server errors; waits on a majority of servers for the write operation. + */ + val MAJORITY: JWriteConcern = JWriteConcern.MAJORITY + + /** + * Create a WriteConcern with the set number of acknowledged writes before returning + * + * @param w number of writes + */ + def apply(w: Int): JWriteConcern = new JWriteConcern(w) + + /** + * Tag set named write concern or a "majority" write concern. + * + * @param w Write Concern tag set name or "majority", representing the servers to ensure write propagation to before acknowledgment. + * Do not use string representation of integer values for w. + */ + def apply(w: String): JWriteConcern = new JWriteConcern(w) + +} diff --git a/driver-scala/src/main/scala/org/mongodb/scala/connection/AsyncTransportSettings.scala b/driver-scala/src/main/scala/org/mongodb/scala/connection/AsyncTransportSettings.scala new file mode 100644 index 00000000000..5157c58501d --- /dev/null +++ b/driver-scala/src/main/scala/org/mongodb/scala/connection/AsyncTransportSettings.scala @@ -0,0 +1,32 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala.connection + +import com.mongodb.connection.{ AsyncTransportSettings => JAsyncTransportSettings } + +/** + * Async transport settings for the driver. + * + * @since 5.2 + */ +object AsyncTransportSettings { + + /** + * AsyncTransportSettings builder type + */ + type Builder = JAsyncTransportSettings.Builder +} diff --git a/driver-scala/src/main/scala/org/mongodb/scala/connection/ClusterSettings.scala b/driver-scala/src/main/scala/org/mongodb/scala/connection/ClusterSettings.scala new file mode 100644 index 00000000000..36778fb68f1 --- /dev/null +++ b/driver-scala/src/main/scala/org/mongodb/scala/connection/ClusterSettings.scala @@ -0,0 +1,40 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala.connection + +import com.mongodb.connection.{ ClusterSettings => JClusterSettings } + +/** + * Settings for the cluster. + * + * @since 1.0 + */ +object ClusterSettings { + + /** + * Get a builder for this class. + * + * @return a new Builder for creating ClusterSettings. + */ + def builder(): Builder = JClusterSettings.builder() + + /** + * ClusterSettings builder type + */ + type Builder = JClusterSettings.Builder + +} diff --git a/driver-scala/src/main/scala/org/mongodb/scala/connection/ConnectionPoolSettings.scala b/driver-scala/src/main/scala/org/mongodb/scala/connection/ConnectionPoolSettings.scala new file mode 100644 index 00000000000..491ff7c2ae3 --- /dev/null +++ b/driver-scala/src/main/scala/org/mongodb/scala/connection/ConnectionPoolSettings.scala @@ -0,0 +1,40 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala.connection + +import com.mongodb.connection.{ ConnectionPoolSettings => JConnectionPoolSettings } + +/** + * All settings that relate to the pool of connections to a MongoDB server. + * + * @since 1.0 + */ +object ConnectionPoolSettings { + + /** + * Gets a Builder for creating a new ConnectionPoolSettings instance. + * + * @return a new Builder for creating ClusterSettings. + */ + def builder(): Builder = JConnectionPoolSettings.builder() + + /** + * ConnectionPoolSettings builder type + */ + type Builder = JConnectionPoolSettings.Builder + +} diff --git a/driver-scala/src/main/scala/org/mongodb/scala/connection/NettyTransportSettings.scala b/driver-scala/src/main/scala/org/mongodb/scala/connection/NettyTransportSettings.scala new file mode 100644 index 00000000000..de1dd57a93f --- /dev/null +++ b/driver-scala/src/main/scala/org/mongodb/scala/connection/NettyTransportSettings.scala @@ -0,0 +1,32 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala.connection + +import com.mongodb.connection.{ NettyTransportSettings => JNettyTransportSettings } + +/** + * An immutable class representing Netty transport settings used for connections to a MongoDB server. + * + * @since 4.11 + */ +object NettyTransportSettings { + + /** + * NettyTransportSettings builder type + */ + type Builder = JNettyTransportSettings.Builder +} diff --git a/driver-scala/src/main/scala/org/mongodb/scala/connection/ProxySettings.scala b/driver-scala/src/main/scala/org/mongodb/scala/connection/ProxySettings.scala new file mode 100644 index 00000000000..3337d742dde --- /dev/null +++ b/driver-scala/src/main/scala/org/mongodb/scala/connection/ProxySettings.scala @@ -0,0 +1,49 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala.connection + +import com.mongodb.connection.{ ProxySettings => JProxySettings } + +/** + * This setting is only applicable when communicating with a MongoDB server using the synchronous variant of `MongoClient`. + * + * This setting is furthermore ignored if: + *
    + *
  • the communication is via `com.mongodb.UnixServerAddress` (Unix domain socket).
  • + *
  • a `StreamFactoryFactory` is `MongoClientSettings.Builder.streamFactoryFactory` configured.
  • + *
+ * + * @see [[org.mongodb.scala.connection.SocketSettings]] + * @see [[org.mongodb.scala.AutoEncryptionSettings]] + * @see [[org.mongodb.scala.ClientEncryptionSettings]] + * @since 4.11 + */ +object ProxySettings { + + /** + * Creates a builder for ProxySettings. + * + * @return a new Builder for creating ProxySettings. + */ + def builder(): Builder = JProxySettings.builder() + + /** + * ProxySettings builder type + */ + type Builder = JProxySettings.Builder + +} diff --git a/driver-scala/src/main/scala/org/mongodb/scala/connection/ServerSettings.scala b/driver-scala/src/main/scala/org/mongodb/scala/connection/ServerSettings.scala new file mode 100644 index 00000000000..bcd5e91ee98 --- /dev/null +++ b/driver-scala/src/main/scala/org/mongodb/scala/connection/ServerSettings.scala @@ -0,0 +1,40 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala.connection + +import com.mongodb.connection.{ ServerSettings => JServerSettings } + +/** + * Settings relating to monitoring of each server. + * + * @since 1.0 + */ +object ServerSettings { + + /** + * Creates a builder for ServerSettings. + * + * @return a new Builder for creating ServerSettings. + */ + def builder(): Builder = JServerSettings.builder() + + /** + * ServerSettings builder type + */ + type Builder = JServerSettings.Builder + +} diff --git a/driver-scala/src/main/scala/org/mongodb/scala/connection/SocketSettings.scala b/driver-scala/src/main/scala/org/mongodb/scala/connection/SocketSettings.scala new file mode 100644 index 00000000000..de42187b368 --- /dev/null +++ b/driver-scala/src/main/scala/org/mongodb/scala/connection/SocketSettings.scala @@ -0,0 +1,40 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala.connection + +import com.mongodb.connection.{ SocketSettings => JSocketSettings } + +/** + * An immutable class representing socket settings used for connections to a MongoDB server. + * + * @since 1.0 + */ +object SocketSettings { + + /** + * Creates a builder for SocketSettings. + * + * @return a new Builder for creating SocketSettings. + */ + def builder(): Builder = JSocketSettings.builder() + + /** + * SocketSettings builder type + */ + type Builder = JSocketSettings.Builder + +} diff --git a/driver-scala/src/main/scala/org/mongodb/scala/connection/SslSettings.scala b/driver-scala/src/main/scala/org/mongodb/scala/connection/SslSettings.scala new file mode 100644 index 00000000000..8f39a719066 --- /dev/null +++ b/driver-scala/src/main/scala/org/mongodb/scala/connection/SslSettings.scala @@ -0,0 +1,40 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala.connection + +import com.mongodb.connection.{ SslSettings => JSslSettings } + +/** + * Settings for connecting to MongoDB via SSL. + * + * @since 1.0 + */ +object SslSettings { + + /** + * Gets a Builder for creating a new SSLSettings instance. + * + * @return a new Builder for creating SslSettings. + */ + def builder(): Builder = JSslSettings.builder() + + /** + * SslSettings builder type + */ + type Builder = JSslSettings.Builder + +} diff --git a/driver-scala/src/main/scala/org/mongodb/scala/connection/TransportSettings.scala b/driver-scala/src/main/scala/org/mongodb/scala/connection/TransportSettings.scala new file mode 100644 index 00000000000..c41bc958d84 --- /dev/null +++ b/driver-scala/src/main/scala/org/mongodb/scala/connection/TransportSettings.scala @@ -0,0 +1,42 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala.connection + +import com.mongodb.connection.{ TransportSettings => JTransportSettings } + +/** + * An immutable class representing transport settings used for connections to a MongoDB server. + * + * @since 4.11 + */ +object TransportSettings { + + /** + * Creates a builder for NettyTransportSettings. + * + * @return a new Builder for creating NettyTransportSettings. + */ + def nettyBuilder(): NettyTransportSettings.Builder = JTransportSettings.nettyBuilder() + + /** + * Creates a builder for AsyncTransportSettings. + * + * @return a new Builder for creating AsyncTransportSettings. + * @since 5.2 + */ + def asyncBuilder(): AsyncTransportSettings.Builder = JTransportSettings.asyncBuilder() +} diff --git a/driver-scala/src/main/scala/org/mongodb/scala/connection/package.scala b/driver-scala/src/main/scala/org/mongodb/scala/connection/package.scala new file mode 100644 index 00000000000..e283f4e07be --- /dev/null +++ b/driver-scala/src/main/scala/org/mongodb/scala/connection/package.scala @@ -0,0 +1,85 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala + +/** + * The connection package contains classes that manage connecting to MongoDB servers. + */ +package object connection { + + /** + * Settings for the cluster. + */ + type ClusterSettings = com.mongodb.connection.ClusterSettings + + /** + * All settings that relate to the pool of connections to a MongoDB server. + */ + type ConnectionPoolSettings = com.mongodb.connection.ConnectionPoolSettings + + /** + * Settings relating to monitoring of each server. + */ + type ServerSettings = com.mongodb.connection.ServerSettings + + /** + * An immutable class representing socket settings used for connections to a MongoDB server. + */ + type SocketSettings = com.mongodb.connection.SocketSettings + + /** + * This setting is only applicable when communicating with a MongoDB server using the synchronous variant of `MongoClient`. + * + * This setting is furthermore ignored if: + *
    + *
  • the communication is via `com.mongodb.UnixServerAddress` (Unix domain socket).
  • + *
  • a `StreamFactoryFactory` is `MongoClientSettings.Builder.streamFactoryFactory` configured.
  • + *
+ * + * @see [[org.mongodb.scala.connection.SocketSettings]] + * @see [[org.mongodb.scala.AutoEncryptionSettings]] + * @see [[org.mongodb.scala.ClientEncryptionSettings]] + * @since 4.11 + */ + type ProxySettings = com.mongodb.connection.ProxySettings + + /** + * Settings for connecting to MongoDB via SSL. + */ + type SslSettings = com.mongodb.connection.SslSettings + + /** + * Transport settings for the driver. + * + * @since 4.11 + */ + type TransportSettings = com.mongodb.connection.TransportSettings + + /** + * TransportSettings for a Netty-based transport implementation. + * + * @since 4.11 + */ + type NettyTransportSettings = com.mongodb.connection.NettyTransportSettings + + /** + * TransportSettings for an async transport implementation. + * + * @since 5.2 + */ + type AsyncTransportSettings = com.mongodb.connection.AsyncTransportSettings +} diff --git a/driver-scala/src/main/scala/org/mongodb/scala/gridfs/GridFSBucket.scala b/driver-scala/src/main/scala/org/mongodb/scala/gridfs/GridFSBucket.scala new file mode 100644 index 00000000000..15849798fe3 --- /dev/null +++ b/driver-scala/src/main/scala/org/mongodb/scala/gridfs/GridFSBucket.scala @@ -0,0 +1,624 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala.gridfs + +import com.mongodb.annotations.{ Alpha, Reason } +import java.nio.ByteBuffer +import com.mongodb.reactivestreams.client.gridfs.{ GridFSBucket => JGridFSBucket, GridFSBuckets } +import org.mongodb.scala.bson.conversions.Bson +import org.mongodb.scala.bson.{ BsonObjectId, BsonValue, ObjectId } +import org.mongodb.scala.{ + ClientSession, + MongoDatabase, + Observable, + ReadConcern, + ReadPreference, + SingleObservable, + WriteConcern +} + +import scala.concurrent.duration.{ Duration, MILLISECONDS } + +/** + * A factory for GridFSBucket instances. + * + * @since 1.2 + */ +object GridFSBucket { + + /** + * Create a new GridFS bucket with the default `'fs'` bucket name + * + * @param database the database instance to use with GridFS + * @return the GridFSBucket + */ + def apply(database: MongoDatabase): GridFSBucket = GridFSBucket(GridFSBuckets.create(database.wrapped)) + + /** + * Create a new GridFS bucket with a custom bucket name + * + * @param database the database instance to use with GridFS + * @param bucketName the custom bucket name to use + * @return the GridFSBucket + */ + def apply(database: MongoDatabase, bucketName: String): GridFSBucket = + GridFSBucket(GridFSBuckets.create(database.wrapped, bucketName)) +} + +// scalastyle:off number.of.methods +/** + * Represents a GridFS Bucket + * + * @since 1.2 + */ +case class GridFSBucket(private val wrapped: JGridFSBucket) { + + /** + * The bucket name. + * + * @return the bucket name + */ + lazy val bucketName: String = wrapped.getBucketName + + /** + * Sets the chunk size in bytes. Defaults to 255. + * + * @return the chunk size in bytes. + */ + lazy val chunkSizeBytes: Int = wrapped.getChunkSizeBytes + + /** + * Get the write concern for the GridFSBucket. + * + * @return the WriteConcern + */ + lazy val writeConcern: WriteConcern = wrapped.getWriteConcern + + /** + * Get the read preference for the GridFSBucket. + * + * @return the ReadPreference + */ + lazy val readPreference: ReadPreference = wrapped.getReadPreference + + /** + * Get the read concern for the GridFSBucket. + * + * @return the ReadConcern + * @note Requires MongoDB 3.2 or greater + * @see [[https://www.mongodb.com/docs/manual/reference/readConcern Read Concern]] + */ + lazy val readConcern: ReadConcern = wrapped.getReadConcern + + /** + * The time limit for the full execution of an operation. + * + * If not null the following deprecated options will be ignored: `waitQueueTimeoutMS`, `socketTimeoutMS`, + * `wTimeoutMS`, `maxTimeMS` and `maxCommitTimeMS`. + * + * - `null` means that the timeout mechanism for operations will defer to using: + * - `waitQueueTimeoutMS`: The maximum wait time in milliseconds that a thread may wait for a connection to become available + * - `socketTimeoutMS`: How long a send or receive on a socket can take before timing out. + * - `wTimeoutMS`: How long the server will wait for the write concern to be fulfilled before timing out. + * - `maxTimeMS`: The time limit for processing operations on a cursor. + * See: [cursor.maxTimeMS](https://docs.mongodb.com/manual/reference/method/cursor.maxTimeMS"). + * - `maxCommitTimeMS`: The maximum amount of time to allow a single `commitTransaction` command to execute. + * - `0` means infinite timeout. + * - `> 0` The time limit to use for the full execution of an operation. + * + * @return the optional timeout duration + * @since 5.2 + */ + @Alpha(Array(Reason.CLIENT)) + lazy val timeout: Option[Duration] = + Option.apply(wrapped.getTimeout(MILLISECONDS)).map(t => Duration(t, MILLISECONDS)) + + /** + * Create a new GridFSBucket instance with a new chunk size in bytes. + * + * @param chunkSizeBytes the new chunk size in bytes. + * @return a new GridFSBucket instance with the different chunk size in bytes + */ + def withChunkSizeBytes(chunkSizeBytes: Int): GridFSBucket = GridFSBucket(wrapped.withChunkSizeBytes(chunkSizeBytes)) + + /** + * Create a new GridFSBucket instance with a different read preference. + * + * @param readPreference the new ReadPreference for the database + * @return a new GridFSBucket instance with the different readPreference + */ + def withReadPreference(readPreference: ReadPreference): GridFSBucket = + GridFSBucket(wrapped.withReadPreference(readPreference)) + + /** + * Create a new GridFSBucket instance with a different write concern. + * + * @param writeConcern the new WriteConcern for the database + * @return a new GridFSBucket instance with the different writeConcern + */ + def withWriteConcern(writeConcern: WriteConcern): GridFSBucket = GridFSBucket(wrapped.withWriteConcern(writeConcern)) + + /** + * Create a new MongoDatabase instance with a different read concern. + * + * @param readConcern the new ReadConcern for the database + * @return a new GridFSBucket instance with the different ReadConcern + * @note Requires MongoDB 3.2 or greater + * @see [[https://www.mongodb.com/docs/manual/reference/readConcern Read Concern]] + */ + def withReadConcern(readConcern: ReadConcern): GridFSBucket = GridFSBucket(wrapped.withReadConcern(readConcern)) + + /** + * Sets the time limit for the full execution of an operation. + * + * - `0` means infinite timeout. + * - `> 0` The time limit to use for the full execution of an operation. + * + * @param timeout the timeout, which must be greater than or equal to 0 + * @return a new GridFSBucket instance with the set time limit for operations + * @since 5.2 + */ + @Alpha(Array(Reason.CLIENT)) + def withTimeout(timeout: Duration): GridFSBucket = + GridFSBucket(wrapped.withTimeout(timeout.toMillis, MILLISECONDS)) + + /** + * Uploads the contents of the given `Observable` to a GridFS bucket. + * + * Reads the contents of the user file from the `source` and uploads it as chunks in the chunks collection. After all the + * chunks have been uploaded, it creates a files collection document for `filename` in the files collection. + * + * Note: When this [[GridFSBucket]] is set with a operation timeout (via timeout inherited from [[MongoDatabase]] + * settings or [[withTimeout]]), timeout breaches may occur due to the [[Observable]] + * lacking inherent read timeout support, which might extend the operation beyond the specified timeout limit. + * + * @param filename the filename for the stream + * @param source the Publisher providing the file data + * @return an Observable with a single element, the ObjectId of the uploaded file. + * @since 2.8 + */ + def uploadFromObservable(filename: String, source: Observable[ByteBuffer]): GridFSUploadObservable[ObjectId] = + GridFSUploadObservable(wrapped.uploadFromPublisher(filename, source)) + + /** + * Uploads the contents of the given `Observable` to a GridFS bucket. + * + * Reads the contents of the user file from the `source` and uploads it as chunks in the chunks collection. After all the + * chunks have been uploaded, it creates a files collection document for `filename` in the files collection. + * + * Note: When this [[GridFSBucket]] is set with a operation timeout (via timeout inherited from [[MongoDatabase]] + * settings or [[withTimeout]]), timeout breaches may occur due to the [[Observable]] + * lacking inherent read timeout support, which might extend the operation beyond the specified timeout limit. + * + * @param filename the filename for the stream + * @param source the Publisher providing the file data + * @param options the GridFSUploadOptions + * @return an Observable with a single element, the ObjectId of the uploaded file. + * @since 2.8 + */ + def uploadFromObservable( + filename: String, + source: Observable[ByteBuffer], + options: GridFSUploadOptions + ): GridFSUploadObservable[ObjectId] = + GridFSUploadObservable(wrapped.uploadFromPublisher(filename, source, options)) + + /** + * Uploads the contents of the given `Observable` to a GridFS bucket. + * + * Reads the contents of the user file from the `source` and uploads it as chunks in the chunks collection. After all the + * chunks have been uploaded, it creates a files collection document for `filename` in the files collection. + * + * Note: When this [[GridFSBucket]] is set with a operation timeout (via timeout inherited from [[MongoDatabase]] + * settings or [[withTimeout]]), timeout breaches may occur due to the [[Observable]] + * lacking inherent read timeout support, which might extend the operation beyond the specified timeout limit. + * + * @param id the custom id value of the file + * @param filename the filename for the stream + * @param source the Publisher providing the file data + * @return an Observable representing when the successful upload of the source. + * @since 2.8 + */ + def uploadFromObservable( + id: BsonValue, + filename: String, + source: Observable[ByteBuffer] + ): GridFSUploadObservable[Unit] = + GridFSUploadObservable(wrapped.uploadFromPublisher(id, filename, source)) + + /** + * Uploads the contents of the given `Observable` to a GridFS bucket. + * + * Reads the contents of the user file from the `source` and uploads it as chunks in the chunks collection. After all the + * chunks have been uploaded, it creates a files collection document for `filename` in the files collection. + * + * Note: When this [[GridFSBucket]] is set with a operation timeout (via timeout inherited from [[MongoDatabase]] + * settings or [[withTimeout]]), timeout breaches may occur due to the [[Observable]] + * lacking inherent read timeout support, which might extend the operation beyond the specified timeout limit. + * + * @param id the custom id value of the file + * @param filename the filename for the stream + * @param source the Publisher providing the file data + * @param options the GridFSUploadOptions + * @return an Observable representing when the successful upload of the source. + * @since 2.8 + */ + def uploadFromObservable( + id: BsonValue, + filename: String, + source: Observable[ByteBuffer], + options: GridFSUploadOptions + ): GridFSUploadObservable[Unit] = + GridFSUploadObservable(wrapped.uploadFromPublisher(id, filename, source, options)) + + /** + * Uploads the contents of the given `Observable` to a GridFS bucket. + * + * Reads the contents of the user file from the `source` and uploads it as chunks in the chunks collection. After all the + * chunks have been uploaded, it creates a files collection document for `filename` in the files collection. + * + * Note: When this [[GridFSBucket]] is set with a operation timeout (via timeout inherited from [[MongoDatabase]] + * settings or [[withTimeout]]), timeout breaches may occur due to the [[Observable]] + * lacking inherent read timeout support, which might extend the operation beyond the specified timeout limit. + * + * @param clientSession the client session with which to associate this operation + * @param filename the filename for the stream + * @param source the Publisher providing the file data + * @return an Observable with a single element, the ObjectId of the uploaded file. + * @note Requires MongoDB 3.6 or greater + * @since 2.8 + */ + def uploadFromObservable( + clientSession: ClientSession, + filename: String, + source: Observable[ByteBuffer] + ): GridFSUploadObservable[ObjectId] = + GridFSUploadObservable(wrapped.uploadFromPublisher(clientSession, filename, source)) + + /** + * Uploads the contents of the given `Observable` to a GridFS bucket. + * + * Reads the contents of the user file from the `source` and uploads it as chunks in the chunks collection. After all the + * chunks have been uploaded, it creates a files collection document for `filename` in the files collection. + * + * Note: When this [[GridFSBucket]] is set with a operation timeout (via timeout inherited from [[MongoDatabase]] + * settings or [[withTimeout]]), timeout breaches may occur due to the [[Observable]] + * lacking inherent read timeout support, which might extend the operation beyond the specified timeout limit. + * + * @param clientSession the client session with which to associate this operation + * @param filename the filename for the stream + * @param source the Publisher providing the file data + * @param options the GridFSUploadOptions + * @return an Observable with a single element, the ObjectId of the uploaded file. + * @note Requires MongoDB 3.6 or greater + * @since 2.8 + */ + def uploadFromObservable( + clientSession: ClientSession, + filename: String, + source: Observable[ByteBuffer], + options: GridFSUploadOptions + ): GridFSUploadObservable[ObjectId] = + GridFSUploadObservable(wrapped.uploadFromPublisher(clientSession, filename, source, options)) + + /** + * Uploads the contents of the given `Observable` to a GridFS bucket. + * + * Reads the contents of the user file from the `source` and uploads it as chunks in the chunks collection. After all the + * chunks have been uploaded, it creates a files collection document for `filename` in the files collection. + * + * Note: When this [[GridFSBucket]] is set with a operation timeout (via timeout inherited from [[MongoDatabase]] + * settings or [[withTimeout]]), timeout breaches may occur due to the [[Observable]] + * lacking inherent read timeout support, which might extend the operation beyond the specified timeout limit. + * + * @param clientSession the client session with which to associate this operation + * @param id the custom id value of the file + * @param filename the filename for the stream + * @param source the Publisher providing the file data + * @return an Observable representing when the successful upload of the source. + * @note Requires MongoDB 3.6 or greater + * @since 2.8 + */ + def uploadFromObservable( + clientSession: ClientSession, + id: BsonValue, + filename: String, + source: Observable[ByteBuffer] + ): GridFSUploadObservable[Unit] = + GridFSUploadObservable(wrapped.uploadFromPublisher(clientSession, id, filename, source)) + + /** + * Uploads the contents of the given `Observable` to a GridFS bucket. + * + * Reads the contents of the user file from the `source` and uploads it as chunks in the chunks collection. After all the + * chunks have been uploaded, it creates a files collection document for `filename` in the files collection. + * + * Note: When this [[GridFSBucket]] is set with a operation timeout (via timeout inherited from [[MongoDatabase]] + * settings or [[withTimeout]]), timeout breaches may occur due to the [[Observable]] + * lacking inherent read timeout support, which might extend the operation beyond the specified timeout limit. + * + * @param clientSession the client session with which to associate this operation + * @param id the custom id value of the file + * @param filename the filename for the stream + * @param source the Publisher providing the file data + * @param options the GridFSUploadOptions + * @return an Observable representing when the successful upload of the source. + * @note Requires MongoDB 3.6 or greater + * @since 2.8 + */ + def uploadFromObservable( + clientSession: ClientSession, + id: BsonValue, + filename: String, + source: Observable[ByteBuffer], + options: GridFSUploadOptions + ): GridFSUploadObservable[Unit] = + GridFSUploadObservable(wrapped.uploadFromPublisher(clientSession, id, filename, source, options)) + + /** + * Downloads the contents of the stored file specified by `id` into the `Publisher`. + * + * @param id the ObjectId of the file to be written to the destination stream + * @return an Observable with a single element, representing the amount of data written + * @since 2.8 + */ + def downloadToObservable(id: ObjectId): GridFSDownloadObservable = + GridFSDownloadObservable(wrapped.downloadToPublisher(id)) + + /** + * Downloads the contents of the stored file specified by `id` into the `Publisher`. + * + * @param id the custom id of the file, to be written to the destination stream + * @return an Observable with a single element, representing the amount of data written + * @since 2.8 + */ + def downloadToObservable(id: BsonValue): GridFSDownloadObservable = + GridFSDownloadObservable(wrapped.downloadToPublisher(id)) + + /** + * Downloads the contents of the stored file specified by `filename` into the `Publisher`. + * + * @param filename the name of the file to be downloaded + * @return an Observable with a single element, representing the amount of data written + * @since 2.8 + */ + def downloadToObservable(filename: String): GridFSDownloadObservable = + GridFSDownloadObservable(wrapped.downloadToPublisher(filename)) + + /** + * Downloads the contents of the stored file specified by `filename` and by the revision in `options` into the + * `Publisher`. + * + * @param filename the name of the file to be downloaded + * @param options the download options + * @return an Observable with a single element, representing the amount of data written + * @since 2.8 + */ + def downloadToObservable(filename: String, options: GridFSDownloadOptions): GridFSDownloadObservable = + GridFSDownloadObservable(wrapped.downloadToPublisher(filename, options)) + + /** + * Downloads the contents of the stored file specified by `id` into the `Publisher`. + * + * @param clientSession the client session with which to associate this operation + * @param id the ObjectId of the file to be written to the destination stream + * @return an Observable with a single element, representing the amount of data written + * @note Requires MongoDB 3.6 or greater + * @since 2.8 + */ + def downloadToObservable(clientSession: ClientSession, id: ObjectId): GridFSDownloadObservable = + GridFSDownloadObservable(wrapped.downloadToPublisher(clientSession, id)) + + /** + * Downloads the contents of the stored file specified by `id` into the `Publisher`. + * + * @param clientSession the client session with which to associate this operation + * @param id the custom id of the file, to be written to the destination stream + * @return an Observable with a single element, representing the amount of data written + * @note Requires MongoDB 3.6 or greater + * @since 2.8 + */ + def downloadToObservable(clientSession: ClientSession, id: BsonValue): GridFSDownloadObservable = + GridFSDownloadObservable(wrapped.downloadToPublisher(clientSession, id)) + + /** + * Downloads the contents of the latest version of the stored file specified by `filename` into the `Publisher`. + * + * @param clientSession the client session with which to associate this operation + * @param filename the name of the file to be downloaded + * @return an Observable with a single element, representing the amount of data written + * @note Requires MongoDB 3.6 or greater + * @since 2.8 + */ + def downloadToObservable(clientSession: ClientSession, filename: String): GridFSDownloadObservable = + GridFSDownloadObservable(wrapped.downloadToPublisher(clientSession, filename)) + + /** + * Downloads the contents of the stored file specified by `filename` and by the revision in `options` into the + * `Publisher`. + * + * @param clientSession the client session with which to associate this operation + * @param filename the name of the file to be downloaded + * @param options the download options + * @return an Observable with a single element, representing the amount of data written + * @note Requires MongoDB 3.6 or greater + * @since 2.8 + */ + def downloadToObservable( + clientSession: ClientSession, + filename: String, + options: GridFSDownloadOptions + ): GridFSDownloadObservable = + GridFSDownloadObservable(wrapped.downloadToPublisher(clientSession, filename, options)) + + /** + * Finds all documents in the files collection. + * + * @return the GridFS find iterable interface + * @see [[https://www.mongodb.com/docs/manual/tutorial/query-documents/ Find]] + */ + def find(): GridFSFindObservable = GridFSFindObservable(wrapped.find()) + + /** + * Finds all documents in the collection that match the filter. + * + * Below is an example of filtering against the filename and some nested metadata that can also be stored along with the file data: + * + * ` + * Filters.and(Filters.eq("filename", "mongodb.png"), Filters.eq("metadata.contentType", "image/png")); + * ` + * + * @param filter the query filter + * @return the GridFS find iterable interface + * @see com.mongodb.client.model.Filters + */ + def find(filter: Bson): GridFSFindObservable = GridFSFindObservable(wrapped.find(filter)) + + /** + * Finds all documents in the files collection. + * + * @param clientSession the client session with which to associate this operation + * @return the GridFS find iterable interface + * @see [[https://www.mongodb.com/docs/manual/tutorial/query-documents/ Find]] + * @since 2.2 + * @note Requires MongoDB 3.6 or greater + */ + def find(clientSession: ClientSession): GridFSFindObservable = GridFSFindObservable(wrapped.find(clientSession)) + + /** + * Finds all documents in the collection that match the filter. + * + * Below is an example of filtering against the filename and some nested metadata that can also be stored along with the file data: + * + * ` + * Filters.and(Filters.eq("filename", "mongodb.png"), Filters.eq("metadata.contentType", "image/png")); + * ` + * + * @param clientSession the client session with which to associate this operation + * @param filter the query filter + * @return the GridFS find iterable interface + * @see com.mongodb.client.model.Filters + * @since 2.2 + * @note Requires MongoDB 3.6 or greater + */ + def find(clientSession: ClientSession, filter: Bson): GridFSFindObservable = + GridFSFindObservable(wrapped.find(clientSession, filter)) + + /** + * Given a `id`, delete this stored file's files collection document and associated chunks from a GridFS bucket. + * + * @param id the ObjectId of the file to be deleted + * @return an Observable that indicates when the operation has completed + */ + def delete(id: ObjectId): SingleObservable[Unit] = wrapped.delete(id) + + /** + * Given a `id`, delete this stored file's files collection document and associated chunks from a GridFS bucket. + * + * @param id the ObjectId of the file to be deleted + * @return an Observable that indicates when the operation has completed + */ + def delete(id: BsonValue): SingleObservable[Unit] = wrapped.delete(id) + + /** + * Given a `id`, delete this stored file's files collection document and associated chunks from a GridFS bucket. + * + * @param clientSession the client session with which to associate this operation + * @param id the ObjectId of the file to be deleted + * @return an Observable that indicates when the operation has completed + * @since 2.2 + * @note Requires MongoDB 3.6 or greater + */ + def delete(clientSession: ClientSession, id: ObjectId): SingleObservable[Unit] = + wrapped.delete(clientSession, id) + + /** + * Given a `id`, delete this stored file's files collection document and associated chunks from a GridFS bucket. + * + * @param clientSession the client session with which to associate this operation + * @param id the ObjectId of the file to be deleted + * @return an Observable that indicates when the operation has completed + * @since 2.2 + * @note Requires MongoDB 3.6 or greater + */ + def delete(clientSession: ClientSession, id: BsonValue): SingleObservable[Unit] = + wrapped.delete(clientSession, id) + + /** + * Renames the stored file with the specified `id`. + * + * @param id the id of the file in the files collection to rename + * @param newFilename the new filename for the file + * @return an Observable that indicates when the operation has completed + */ + def rename(id: ObjectId, newFilename: String): SingleObservable[Unit] = + wrapped.rename(id, newFilename) + + /** + * Renames the stored file with the specified `id`. + * + * @param id the id of the file in the files collection to rename + * @param newFilename the new filename for the file + * @return an Observable that indicates when the operation has completed + */ + def rename(id: BsonValue, newFilename: String): SingleObservable[Unit] = + wrapped.rename(id, newFilename) + + /** + * Renames the stored file with the specified `id`. + * + * @param clientSession the client session with which to associate this operation + * @param id the id of the file in the files collection to rename + * @param newFilename the new filename for the file + * @return an Observable that indicates when the operation has completed + * @since 2.2 + * @note Requires MongoDB 3.6 or greater + */ + def rename(clientSession: ClientSession, id: ObjectId, newFilename: String): SingleObservable[Unit] = + wrapped.rename(clientSession, id, newFilename) + + /** + * Renames the stored file with the specified `id`. + * + * @param clientSession the client session with which to associate this operation + * @param id the id of the file in the files collection to rename + * @param newFilename the new filename for the file + * @return an Observable that indicates when the operation has completed + * @since 2.2 + * @note Requires MongoDB 3.6 or greater + */ + def rename(clientSession: ClientSession, id: BsonValue, newFilename: String): SingleObservable[Unit] = + wrapped.rename(clientSession, id, newFilename) + + /** + * Drops the data associated with this bucket from the database. + * + * @return an Observable that indicates when the operation has completed + */ + def drop(): SingleObservable[Unit] = wrapped.drop() + + /** + * Drops the data associated with this bucket from the database. + * + * @param clientSession the client session with which to associate this operation + * @return an Observable that indicates when the operation has completed + * @since 2.2 + * @note Requires MongoDB 3.6 or greater + */ + def drop(clientSession: ClientSession): SingleObservable[Unit] = wrapped.drop(clientSession) +} +// scalastyle:on number.of.methods diff --git a/driver-scala/src/main/scala/org/mongodb/scala/gridfs/GridFSDownloadObservable.scala b/driver-scala/src/main/scala/org/mongodb/scala/gridfs/GridFSDownloadObservable.scala new file mode 100644 index 00000000000..b9db01730b4 --- /dev/null +++ b/driver-scala/src/main/scala/org/mongodb/scala/gridfs/GridFSDownloadObservable.scala @@ -0,0 +1,68 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala.gridfs + +import java.nio.ByteBuffer + +import com.mongodb.reactivestreams.client.gridfs.GridFSDownloadPublisher +import org.mongodb.scala.{ Observable, Observer } + +/** + * A GridFS Observable for downloading data from GridFS + * + * Provides the `GridFSFile` for the file to being downloaded as well as a way to control the batchsize. + * + * @since 2.8 + */ +case class GridFSDownloadObservable(private val wrapped: GridFSDownloadPublisher) extends Observable[ByteBuffer] { + + /** + * Gets the corresponding [[GridFSFile]] for the file being downloaded + * + * @return a Publisher with a single element, the corresponding GridFSFile for the file being downloaded + */ + def gridFSFile(): Observable[GridFSFile] = wrapped.getGridFSFile() + + /** + * The preferred number of bytes per `ByteBuffer` returned by the `Observable`. + * + * Allows for larger than chunk size ByteBuffers. The actual chunk size of the data stored in MongoDB is the smallest allowable + * `ByteBuffer` size. + * + * Can be used to control the memory consumption of this `Observable`. The smaller the bufferSizeBytes the lower the memory + * consumption and higher latency. + * + * '''Note:''' Must be set before the Observable is subscribed to + * + * @param bufferSizeBytes the preferred buffer size in bytes to use per `ByteBuffer` in the `Observable`, defaults to chunk size. + * @return this + */ + def bufferSizeBytes(bufferSizeBytes: Int): GridFSDownloadObservable = + GridFSDownloadObservable(wrapped.bufferSizeBytes(bufferSizeBytes)) + + /** + * Request `Observable` to start streaming data. + * + * This is a "factory method" and can be called multiple times, each time starting a new `Subscription`. + * Each `Subscription` will work for only a single [[Observer]]. + * + * If the `Observable` rejects the subscription attempt or otherwise fails it will signal the error via [[Observer.onError]]. + * + * @param observer the `Observer` that will consume signals from this `Observable` + */ + override def subscribe(observer: Observer[_ >: ByteBuffer]): Unit = wrapped.subscribe(observer) +} diff --git a/driver-scala/src/main/scala/org/mongodb/scala/gridfs/GridFSFindObservable.scala b/driver-scala/src/main/scala/org/mongodb/scala/gridfs/GridFSFindObservable.scala new file mode 100644 index 00000000000..fdbea9add70 --- /dev/null +++ b/driver-scala/src/main/scala/org/mongodb/scala/gridfs/GridFSFindObservable.scala @@ -0,0 +1,143 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala.gridfs + +import java.util.concurrent.TimeUnit +import com.mongodb.reactivestreams.client.gridfs.GridFSFindPublisher +import org.mongodb.scala.bson.conversions.Bson +import org.mongodb.scala.{ Observable, Observer, SingleObservable, TimeoutMode } + +import scala.concurrent.duration.Duration + +/** + * Observable representing the GridFS Files Collection. + * + * @since 1.2 + */ +case class GridFSFindObservable(private val wrapped: GridFSFindPublisher) extends Observable[GridFSFile] { + + /** + * Sets the query filter to apply to the query. + * + * Below is an example of filtering against the filename and some nested metadata that can also be stored along with the file data: + * + * {{{ + * Filters.and(Filters.eq("filename", "mongodb.png"), Filters.eq("metadata.contentType", "image/png")); + * }}} + * + * @param filter the filter, which may be null. + * @return this + * @see [[https://www.mongodb.com/docs/manual/reference/method/db.collection.find/ Filter]] + * @see [[org.mongodb.scala.model.Filters]] + */ + def filter(filter: Bson): GridFSFindObservable = { + wrapped.filter(filter) + this + } + + /** + * Sets the limit to apply. + * + * @param limit the limit, which may be null + * @return this + * @see [[https://www.mongodb.com/docs/manual/reference/method/cursor.limit/#cursor.limit Limit]] + */ + def limit(limit: Int): GridFSFindObservable = { + wrapped.limit(limit) + this + } + + /** + * Sets the number of documents to skip. + * + * @param skip the number of documents to skip + * @return this + * @see [[https://www.mongodb.com/docs/manual/reference/method/cursor.skip/#cursor.skip Skip]] + */ + def skip(skip: Int): GridFSFindObservable = { + wrapped.skip(skip) + this + } + + /** + * Sets the sort criteria to apply to the query. + * + * @param sort the sort criteria, which may be null. + * @return this + * @see [[https://www.mongodb.com/docs/manual/reference/method/cursor.sort/ Sort]] + */ + def sort(sort: Bson): GridFSFindObservable = { + wrapped.sort(sort) + this + } + + /** + * The server normally times out idle cursors after an inactivity period (10 minutes) + * to prevent excess memory use. Set this option to prevent that. + * + * @param noCursorTimeout true if cursor timeout is disabled + * @return this + */ + def noCursorTimeout(noCursorTimeout: Boolean): GridFSFindObservable = { + wrapped.noCursorTimeout(noCursorTimeout) + this + } + + /** + * Sets the maximum execution time on the server for this operation. + * + * @see [[https://www.mongodb.com/docs/manual/reference/operator/meta/maxTimeMS/ Max Time]] + * @param duration the duration + * @return this + */ + def maxTime(duration: Duration): GridFSFindObservable = { + wrapped.maxTime(duration.toMillis, TimeUnit.MILLISECONDS) + this + } + + /** + * Sets the number of documents to return per batch. + * + * @param batchSize the batch size + * @return this + * @see [[https://www.mongodb.com/docs/manual/reference/method/cursor.batchSize/#cursor.batchSize Batch Size]] + */ + def batchSize(batchSize: Int): GridFSFindObservable = { + wrapped.batchSize(batchSize) + this + } + + /** + * Helper to return a single observable limited to the first result. + * + * @return a single observable which will the first result. + * @since 4.0 + */ + def first(): SingleObservable[GridFSFile] = wrapped.first() + + /** + * Request `Observable` to start streaming data. + * + * This is a "factory method" and can be called multiple times, each time starting a new [[org.mongodb.scala.Subscription]]. + * Each `Subscription` will work for only a single [[Observer]]. + * + * If the `Observable` rejects the subscription attempt or otherwise fails it will signal the error via [[Observer.onError]]. + * + * @param observer the `Observer` that will consume signals from this `Observable` + */ + override def subscribe(observer: Observer[_ >: GridFSFile]): Unit = wrapped.subscribe(observer) +} diff --git a/driver-scala/src/main/scala/org/mongodb/scala/gridfs/GridFSUploadObservable.scala b/driver-scala/src/main/scala/org/mongodb/scala/gridfs/GridFSUploadObservable.scala new file mode 100644 index 00000000000..3364e423936 --- /dev/null +++ b/driver-scala/src/main/scala/org/mongodb/scala/gridfs/GridFSUploadObservable.scala @@ -0,0 +1,63 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala.gridfs + +import com.mongodb.reactivestreams.client.gridfs.GridFSUploadPublisher +import org.bson.BsonValue +import org.bson.types.ObjectId +import org.mongodb.scala.{ Observer, SingleObservable } + +/** + * A GridFS `Observable` for uploading data into GridFS + * + * Provides the `id` for the file to be uploaded. Cancelling the subscription to this publisher will cause any uploaded data + * to be cleaned up and removed. + * + * @tparam T the result type of the publisher + * @since 2.8 + */ +case class GridFSUploadObservable[T](private val wrapped: GridFSUploadPublisher[T]) extends SingleObservable[T] { + + /** + * Gets the ObjectId for the file to be uploaded + * + * @throws MongoGridFSException if the file id is not an ObjectId. + * + * @return the ObjectId for the file to be uploaded + */ + lazy val objectId: ObjectId = wrapped.getObjectId + + /** + * The BsonValue id for this file. + * + * @return the id for this file + */ + lazy val id: BsonValue = wrapped.getId + + /** + * Request `Observable` to start streaming data. + * + * This is a "factory method" and can be called multiple times, each time starting a new `Subscription`. + * Each `Subscription` will work for only a single [[Observer]]. + * + * If the `Observable` rejects the subscription attempt or otherwise fails it will signal the error via [[Observer.onError]]. + * + * @param observer the `Observer` that will consume signals from this `Observable` + */ + override def subscribe(observer: Observer[_ >: T]): Unit = wrapped.subscribe(observer) + +} diff --git a/driver-scala/src/main/scala/org/mongodb/scala/gridfs/package.scala b/driver-scala/src/main/scala/org/mongodb/scala/gridfs/package.scala new file mode 100644 index 00000000000..6e3e4b24153 --- /dev/null +++ b/driver-scala/src/main/scala/org/mongodb/scala/gridfs/package.scala @@ -0,0 +1,69 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala + +import com.mongodb.reactivestreams.client.gridfs.GridFSUploadPublisher +import org.bson.BsonValue +import org.mongodb.scala.bson.ObjectId +import org.reactivestreams.Subscriber +import reactor.core.publisher.Flux + +package object gridfs extends ObservableImplicits { + + /** + * An exception indicating that a failure occurred in GridFS. + */ + type MongoGridFSException = com.mongodb.MongoGridFSException + + /** + * GridFS upload options + * + * Customizable options used when uploading files into GridFS + */ + type GridFSUploadOptions = com.mongodb.client.gridfs.model.GridFSUploadOptions + + /** + * The GridFSFile + */ + type GridFSFile = com.mongodb.client.gridfs.model.GridFSFile + + /** + * The GridFS download by name options + * + * Controls the selection of the revision to download + */ + type GridFSDownloadOptions = com.mongodb.client.gridfs.model.GridFSDownloadOptions + + /** + * A `GridFSUploadPublisher`` that emits + * + * - exactly one item, if the wrapped `Publisher` does not signal an error, even if the represented stream is empty; + * - no items if the wrapped `Publisher` signals an error. + * + * @param pub A `Publisher` representing a finite stream. + */ + implicit class ToGridFSUploadPublisherUnit(pub: => GridFSUploadPublisher[Void]) extends GridFSUploadPublisher[Unit] { + val publisher = pub + + override def subscribe(observer: Subscriber[_ >: Unit]): Unit = + Flux.from(publisher).reduce((), (_: Unit, _: Void) => ()).subscribe(observer) + + override def getObjectId: ObjectId = publisher.getObjectId + + override def getId: BsonValue = publisher.getId + } +} diff --git a/driver-scala/src/main/scala/org/mongodb/scala/internal/AndThenObservable.scala b/driver-scala/src/main/scala/org/mongodb/scala/internal/AndThenObservable.scala new file mode 100644 index 00000000000..18b48b9d123 --- /dev/null +++ b/driver-scala/src/main/scala/org/mongodb/scala/internal/AndThenObservable.scala @@ -0,0 +1,35 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala.internal + +import org.mongodb.scala.{ Observable, Observer } +import reactor.core.publisher.Flux + +import scala.util.{ Failure, Try } + +private[scala] case class AndThenObservable[T, U](observable: Observable[T], pf: PartialFunction[Try[T], U]) + extends Observable[T] { + override def subscribe(observer: Observer[_ >: T]): Unit = { + var finalResult: Option[T] = None + Flux + .from(observable) + .doOnNext(t => finalResult = Some(t)) + .doOnError(e => Try(pf(Failure(e)))) + .doOnComplete(() => Try(pf(Try(finalResult.get)))) + .subscribe(observer) + } +} diff --git a/driver-scala/src/main/scala/org/mongodb/scala/internal/CollectObservable.scala b/driver-scala/src/main/scala/org/mongodb/scala/internal/CollectObservable.scala new file mode 100644 index 00000000000..45f434c1b1f --- /dev/null +++ b/driver-scala/src/main/scala/org/mongodb/scala/internal/CollectObservable.scala @@ -0,0 +1,27 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala.internal + +import org.mongodb.scala.{ Observable, Observer, SingleObservable } +import reactor.core.publisher.Flux + +private[scala] case class CollectObservable[T, S](observable: Observable[T], pf: PartialFunction[T, S]) + extends SingleObservable[S] { + + override def subscribe(observer: Observer[_ >: S]): Unit = + Flux.from(observable.filter((t: T) => pf.isDefinedAt(t)).map((t: T) => pf.apply(t))).subscribe(observer) +} diff --git a/driver-scala/src/main/scala/org/mongodb/scala/internal/ExecutionContextObservable.scala b/driver-scala/src/main/scala/org/mongodb/scala/internal/ExecutionContextObservable.scala new file mode 100644 index 00000000000..4eaeb964f78 --- /dev/null +++ b/driver-scala/src/main/scala/org/mongodb/scala/internal/ExecutionContextObservable.scala @@ -0,0 +1,34 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala.internal + +import org.mongodb.scala.{ Observable, Observer } +import reactor.core.publisher.Flux +import reactor.core.scheduler.Schedulers + +import scala.concurrent.ExecutionContext + +private[scala] case class ExecutionContextObservable[T](observable: Observable[T], context: ExecutionContext) + extends Observable[T] { + + override def subscribe(observer: Observer[_ >: T]): Unit = + Flux + .from(observable) + .publishOn(Schedulers.fromExecutor((command: Runnable) => context.execute(command))) + .subscribe(observer) + +} diff --git a/driver-scala/src/main/scala/org/mongodb/scala/internal/FilterObservable.scala b/driver-scala/src/main/scala/org/mongodb/scala/internal/FilterObservable.scala new file mode 100644 index 00000000000..3362afe768f --- /dev/null +++ b/driver-scala/src/main/scala/org/mongodb/scala/internal/FilterObservable.scala @@ -0,0 +1,25 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala.internal + +import org.mongodb.scala.{ Observable, Observer } +import reactor.core.publisher.Flux + +private[scala] case class FilterObservable[T](observable: Observable[T], p: T => Boolean) extends Observable[T] { + override def subscribe(observer: Observer[_ >: T]): Unit = + Flux.from(observable).filter((t: T) => p(t)).subscribe(observer) +} diff --git a/driver-scala/src/main/scala/org/mongodb/scala/internal/FlatMapObservable.scala b/driver-scala/src/main/scala/org/mongodb/scala/internal/FlatMapObservable.scala new file mode 100644 index 00000000000..3a47dfe2d35 --- /dev/null +++ b/driver-scala/src/main/scala/org/mongodb/scala/internal/FlatMapObservable.scala @@ -0,0 +1,27 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala.internal + +import org.mongodb.scala._ +import reactor.core.publisher.Flux + +private[scala] case class FlatMapObservable[T, S](observable: Observable[T], f: T => Observable[S]) + extends Observable[S] { + override def subscribe(observer: Observer[_ >: S]): Unit = + Flux.from(observable).flatMap((t: T) => f(t)).subscribe(observer) + +} diff --git a/driver-scala/src/main/scala/org/mongodb/scala/internal/FoldLeftObservable.scala b/driver-scala/src/main/scala/org/mongodb/scala/internal/FoldLeftObservable.scala new file mode 100644 index 00000000000..50b374d2fa5 --- /dev/null +++ b/driver-scala/src/main/scala/org/mongodb/scala/internal/FoldLeftObservable.scala @@ -0,0 +1,27 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala.internal + +import org.mongodb.scala.{ Observable, Observer, SingleObservable } +import reactor.core.publisher.Flux + +private[scala] case class FoldLeftObservable[T, S](observable: Observable[T], initialValue: S, accumulator: (S, T) => S) + extends SingleObservable[S] { + + override def subscribe(observer: Observer[_ >: S]): Unit = + Flux.from(observable).reduce(initialValue, (s: S, t: T) => accumulator(s, t)).subscribe(observer) +} diff --git a/driver-scala/src/main/scala/org/mongodb/scala/internal/IterableObservable.scala b/driver-scala/src/main/scala/org/mongodb/scala/internal/IterableObservable.scala new file mode 100644 index 00000000000..b6de44f0d9d --- /dev/null +++ b/driver-scala/src/main/scala/org/mongodb/scala/internal/IterableObservable.scala @@ -0,0 +1,26 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala.internal + +import scala.collection.JavaConverters._ +import org.mongodb.scala.{ Observable, Observer } +import reactor.core.publisher.Flux + +private[scala] case class IterableObservable[A](from: Iterable[A]) extends Observable[A] { + + override def subscribe(observer: Observer[_ >: A]): Unit = Flux.fromIterable(from.asJava).subscribe(observer) +} diff --git a/driver-scala/src/main/scala/org/mongodb/scala/internal/MapObservable.scala b/driver-scala/src/main/scala/org/mongodb/scala/internal/MapObservable.scala new file mode 100644 index 00000000000..1e1cd2bcb14 --- /dev/null +++ b/driver-scala/src/main/scala/org/mongodb/scala/internal/MapObservable.scala @@ -0,0 +1,36 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala.internal + +import org.mongodb.scala.{ Observable, Observer } +import reactor.core.publisher.Flux + +import org.reactivestreams.{ Subscription => JSubscription } + +private[scala] case class MapObservable[T, S](observable: Observable[T], s: T => S, f: Throwable => Throwable = t => t) + extends Observable[S] { + override def subscribe(observer: Observer[_ >: S]): Unit = { + Flux + .from(observable) + .subscribe( + (t: T) => observer.onNext(s(t)), + (e: Throwable) => observer.onError(f(e)), + () => observer.onComplete(), + (s: JSubscription) => observer.onSubscribe(s) + ) + } +} diff --git a/driver-scala/src/main/scala/org/mongodb/scala/internal/RecoverObservable.scala b/driver-scala/src/main/scala/org/mongodb/scala/internal/RecoverObservable.scala new file mode 100644 index 00000000000..783832982c4 --- /dev/null +++ b/driver-scala/src/main/scala/org/mongodb/scala/internal/RecoverObservable.scala @@ -0,0 +1,33 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala.internal + +import org.mongodb.scala._ +import reactor.core.publisher.{ Flux, Mono } + +private[scala] case class RecoverObservable[T, U >: T]( + observable: Observable[T], + pf: PartialFunction[Throwable, U] +) extends Observable[U] { + + override def subscribe(observer: Observer[_ >: U]): Unit = + Flux + .from(observable) + .onErrorResume((t: Throwable) => Mono.fromCallable(() => pf(t).asInstanceOf[T])) + .subscribe(observer) + +} diff --git a/driver-scala/src/main/scala/org/mongodb/scala/internal/RecoverWithObservable.scala b/driver-scala/src/main/scala/org/mongodb/scala/internal/RecoverWithObservable.scala new file mode 100644 index 00000000000..9b8ab80da07 --- /dev/null +++ b/driver-scala/src/main/scala/org/mongodb/scala/internal/RecoverWithObservable.scala @@ -0,0 +1,52 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala.internal + +import org.mongodb.scala._ +import org.reactivestreams.Publisher +import reactor.core.publisher.Flux + +import scala.util.{ Failure, Success, Try } + +private[scala] case class RecoverWithObservable[T, U >: T]( + observable: Observable[T], + pf: PartialFunction[Throwable, Observable[U]], + throwOriginalException: Boolean = false +) extends Observable[U] { + + override def subscribe(observer: Observer[_ >: U]): Unit = + Flux + .from(observable) + .onErrorResume((t: Throwable) => { + Try(pf(t)) match { + case Success(res) => + Flux + .from(res) + .onErrorResume((ex: Throwable) => { + if (throwOriginalException) { + throw t + } else { + throw ex + } + }) + .asInstanceOf[Publisher[T]] + case Failure(_) => throw t + } + }) + .subscribe(observer) + +} diff --git a/driver-scala/src/main/scala/org/mongodb/scala/internal/SingleItemObservable.scala b/driver-scala/src/main/scala/org/mongodb/scala/internal/SingleItemObservable.scala new file mode 100644 index 00000000000..588269b7a25 --- /dev/null +++ b/driver-scala/src/main/scala/org/mongodb/scala/internal/SingleItemObservable.scala @@ -0,0 +1,26 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala.internal + +import org.mongodb.scala.{ Observer, SingleObservable } +import reactor.core.publisher.Mono + +private[scala] case class SingleItemObservable[A](item: A) extends SingleObservable[A] { + + override def subscribe(observer: Observer[_ >: A]): Unit = + Mono.just(item).subscribe(observer) +} diff --git a/driver-scala/src/main/scala/org/mongodb/scala/internal/UnitObservable.scala b/driver-scala/src/main/scala/org/mongodb/scala/internal/UnitObservable.scala new file mode 100644 index 00000000000..7978cf6be63 --- /dev/null +++ b/driver-scala/src/main/scala/org/mongodb/scala/internal/UnitObservable.scala @@ -0,0 +1,33 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala.internal + +import org.mongodb.scala.{ Observable, Observer, SingleObservable } + +/** + * An [[Observable]] that emits + * + * - exactly one item, if the wrapped [[Observable]] does not signal an error, even if the represented stream is empty; + * - no items if the wrapped [[Observable]] signals an error. + * + * @param pub An [[Observable]] representing a finite stream. + */ +private[scala] case class UnitObservable[T](observable: Observable[T]) extends SingleObservable[Unit] { + override def subscribe(observer: Observer[_ >: Unit]): Unit = + observable.foldLeft(0)((_, _) => 0).map(_ => ()).subscribe(observer) + +} diff --git a/driver-scala/src/main/scala/org/mongodb/scala/internal/WriteConcernImplicits.scala b/driver-scala/src/main/scala/org/mongodb/scala/internal/WriteConcernImplicits.scala new file mode 100644 index 00000000000..19d3769240d --- /dev/null +++ b/driver-scala/src/main/scala/org/mongodb/scala/internal/WriteConcernImplicits.scala @@ -0,0 +1,41 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala.internal + +import java.util.concurrent.TimeUnit + +import scala.concurrent.duration.Duration + +import com.mongodb.{ WriteConcern => JWriteConcern } + +import org.mongodb.scala.WriteConcern + +private[scala] trait WriteConcernImplicits { + + implicit class ScalaWriteConcern[T](jWriteConcern: JWriteConcern) { + + /** + * Constructs a new WriteConcern from the current one and the specified wTimeout in the given time unit. + * + * @param wTimeout the wTimeout, which must be >= 0 and <= Integer.MAX_VALUE after conversion to milliseconds + * @return the WriteConcern with the given wTimeout + */ + def withWTimeout(wTimeout: Duration): WriteConcern = + jWriteConcern.withWTimeout(wTimeout.toMillis, TimeUnit.MILLISECONDS) + } + +} diff --git a/driver-scala/src/main/scala/org/mongodb/scala/internal/ZipObservable.scala b/driver-scala/src/main/scala/org/mongodb/scala/internal/ZipObservable.scala new file mode 100644 index 00000000000..d23c2d88c34 --- /dev/null +++ b/driver-scala/src/main/scala/org/mongodb/scala/internal/ZipObservable.scala @@ -0,0 +1,38 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala.internal + +import org.mongodb.scala.{ Observable, Observer } +import reactor.core.publisher.Flux +import org.reactivestreams.{ Subscription => JSubscription } +import reactor.util.function.{ Tuple2 => RTuple2 } + +private[scala] case class ZipObservable[L, R]( + leftObservable: Observable[L], + rightObservable: Observable[R] +) extends Observable[(L, R)] { + + def subscribe(observer: Observer[_ >: (L, R)]): Unit = + Flux + .zip(leftObservable, rightObservable) + .subscribe( + (t: RTuple2[L, R]) => observer.onNext((t.getT1, t.getT2)), + (e: Throwable) => observer.onError(e), + () => observer.onComplete(), + (s: JSubscription) => observer.onSubscribe(s) + ) +} diff --git a/driver-scala/src/main/scala/org/mongodb/scala/internal/package.scala b/driver-scala/src/main/scala/org/mongodb/scala/internal/package.scala new file mode 100644 index 00000000000..ccaa433ab5c --- /dev/null +++ b/driver-scala/src/main/scala/org/mongodb/scala/internal/package.scala @@ -0,0 +1,26 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala + +/** + * Internal companion objects for easy interaction with the Java based async library + * + * @note Not directly accessible but accessible via package objects and aliases. + * Mirrors the async package naming convention and locations but under the `org.mongodb.scala` namespace + * @since 1.0 + */ +package object internal {} diff --git a/driver-scala/src/main/scala/org/mongodb/scala/model/Accumulators.scala b/driver-scala/src/main/scala/org/mongodb/scala/model/Accumulators.scala new file mode 100644 index 00000000000..f8ffc712360 --- /dev/null +++ b/driver-scala/src/main/scala/org/mongodb/scala/model/Accumulators.scala @@ -0,0 +1,526 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala.model + +import scala.collection.JavaConverters._ +import com.mongodb.client.model.{ Accumulators => JAccumulators, QuantileMethod } +import org.mongodb.scala.bson.conversions.Bson + +/** + * Builders for accumulators used in the group pipeline stage of an aggregation pipeline. + * + * @see [[https://www.mongodb.com/docs/manual/core/aggregation-pipeline/ Aggregation pipeline]] + * @see [[https://www.mongodb.com/docs/manual/reference/operator/aggregation/group/#accumulator-operator Accumulators]] + * @see [[https://www.mongodb.com/docs/manual/meta/aggregation-quick-reference/#aggregation-expressions Expressions]] + * + * @since 1.0 + */ +object Accumulators { + + /** + * Gets a field name for a `\$group` operation representing the sum of the values of the given expression when applied to all members of + * the group. + * + * @see [[https://www.mongodb.com/docs/manual/reference/operator/aggregation/sum/ \$sum]] + * @param fieldName the field name + * @param expression the expression + * @tparam TExpression the expression type + * @return the field + */ + def sum[TExpression](fieldName: String, expression: TExpression): BsonField = JAccumulators.sum(fieldName, expression) + + /** + * Gets a field name for a `\$group` operation representing the average of the values of the given expression when applied to all + * members of the group. + * + * @param fieldName the field name + * @param expression the expression + * @tparam TExpression the expression type + * @return the field + * @see [[https://www.mongodb.com/docs/manual/reference/operator/aggregation/avg/ \$avg]] + */ + def avg[TExpression](fieldName: String, expression: TExpression): BsonField = JAccumulators.avg(fieldName, expression) + + /** + * Returns a combination of a computed field and an accumulator that generates a BSON `ARRAY` + * containing computed values from the given `inExpression` based on the provided `pExpression`, which represents an array + * of percentiles of interest within a group, where each element is a numeric value between 0.0 and 1.0 (inclusive). + * + * @param fieldName The field computed by the accumulator. + * @param inExpression The input expression. + * @param pExpression The expression representing a percentiles of interest. + * @param method The method to be used for computing the percentiles. + * @tparam InExpression The type of the input expression. + * @tparam PExpression The type of the percentile expression. + * @return The requested [[BsonField]]. + * @see [[https://www.mongodb.com/docs/manual/reference/operator/aggregation/percentile/ \$percentile]] + * @since 4.10 + * @note Requires MongoDB 7.0 or greater + */ + def percentile[InExpression, PExpression]( + fieldName: String, + inExpression: InExpression, + pExpression: PExpression, + method: QuantileMethod + ): BsonField = + JAccumulators.percentile(fieldName, inExpression, pExpression, method) + + /** + * Returns a combination of a computed field and an accumulator that generates a BSON `Double` + * representing the median value computed from the given `nExpression` within a group. + * + * @param fieldName The field computed by the accumulator. + * @param inExpression The input expression. + * @param method The method to be used for computing the median. + * @tparam InExpression The type of the input expression. + * @return The requested [[BsonField]]. + * @see [[https://www.mongodb.com/docs/manual/reference/operator/aggregation/median/ \$median]] + * @since 4.10 + * @note Requires MongoDB 7.0 or greater + */ + def median[InExpression]( + fieldName: String, + inExpression: InExpression, + method: QuantileMethod + ): BsonField = + JAccumulators.median(fieldName, inExpression, method) + + /** + * Gets a field name for a `\$group` operation representing the value of the given expression when applied to the first member of + * the group. + * + * @param fieldName the field name + * @param expression the expression + * @tparam TExpression the expression type + * @return the field + * @see [[https://www.mongodb.com/docs/manual/reference/operator/aggregation/first/ \$first]] + */ + def first[TExpression](fieldName: String, expression: TExpression): BsonField = + JAccumulators.first(fieldName, expression) + + /** + * Returns a combination of a computed field and an accumulator that produces a BSON `Array` + * of values of the given `inExpression` computed for the first `N` elements within a presorted group, + * where `N` is the positive integral value of the `nExpression`. + * + * @param fieldName The field computed by the accumulator. + * @param inExpression The input expression. + * @param nExpression The expression limiting the number of produced values. + * @tparam InExpression The type of the input expression. + * @tparam NExpression The type of the limiting expression. + * @return The requested [[BsonField]]. + * @see [[https://www.mongodb.com/docs/manual/reference/operator/aggregation/firstN/ \$firstN]] + * @since 4.7 + * @note Requires MongoDB 5.2 or greater + */ + def firstN[InExpression, NExpression]( + fieldName: String, + inExpression: InExpression, + nExpression: NExpression + ): BsonField = + JAccumulators.firstN(fieldName, inExpression, nExpression) + + /** + * Returns a combination of a computed field and an accumulator that produces + * a value of the given `outExpression` computed for the top element within a group + * sorted according to the provided `sort` specification. + * + * @param fieldName The field computed by the accumulator. + * @param sortBy The sort specification. The syntax is identical to the one expected by [[Aggregates.sort]]. + * @param outExpression The output expression. + * @tparam OutExpression The type of the output expression. + * @return The requested [[BsonField]]. + * @see [[https://www.mongodb.com/docs/manual/reference/operator/aggregation/topN/ \$topN]] + * @since 4.7 + * @note Requires MongoDB 5.2 or greater + */ + def top[OutExpression]( + fieldName: String, + sortBy: Bson, + outExpression: OutExpression + ): BsonField = + JAccumulators.top(fieldName, sortBy, outExpression) + + /** + * Returns a combination of a computed field and an accumulator that produces a BSON `Array` + * of values of the given `outExpression` computed for the top `N` elements within a group + * sorted according to the provided `sort` specification, + * where `N` is the positive integral value of the `nExpression`. + * + * @param fieldName The field computed by the accumulator. + * @param sortBy The sort specification. The syntax is identical to the one expected by [[Aggregates.sort]]. + * @param outExpression The output expression. + * @param nExpression The expression limiting the number of produced values. + * @tparam OutExpression The type of the output expression. + * @tparam NExpression The type of the limiting expression. + * @return The requested [[BsonField]]. + * @see [[https://www.mongodb.com/docs/manual/reference/operator/aggregation/topN/ \$topN]] + * @since 4.7 + * @note Requires MongoDB 5.2 or greater + */ + def topN[OutExpression, NExpression]( + fieldName: String, + sortBy: Bson, + outExpression: OutExpression, + nExpression: NExpression + ): BsonField = + JAccumulators.topN(fieldName, sortBy, outExpression, nExpression) + + /** + * Gets a field name for a `\$group` operation representing the value of the given expression when applied to the last member of + * the group. + * + * @param fieldName the field name + * @param expression the expression + * @tparam TExpression the expression type + * @return the field + * @see [[https://www.mongodb.com/docs/manual/reference/operator/aggregation/last/ \$last]] + */ + def last[TExpression](fieldName: String, expression: TExpression): BsonField = + JAccumulators.last(fieldName, expression) + + /** + * Returns a combination of a computed field and an accumulator that produces a BSON `Array` + * of values of the given `inExpression` computed for the last `N` elements within a presorted group + * where `N` is the positive integral value of the `nExpression`. + * + * @param fieldName The field computed by the accumulator. + * @param inExpression The input expression. + * @param nExpression The expression limiting the number of produced values. + * @tparam InExpression The type of the input expression. + * @tparam NExpression The type of the limiting expression. + * @return The requested [[BsonField]]. + * @see [[https://www.mongodb.com/docs/manual/reference/operator/aggregation/lastN/ \$lastN]] + * @since 4.7 + * @note Requires MongoDB 5.2 or greater + */ + def lastN[InExpression, NExpression]( + fieldName: String, + inExpression: InExpression, + nExpression: NExpression + ): BsonField = + JAccumulators.lastN(fieldName, inExpression, nExpression) + + /** + * Returns a combination of a computed field and an accumulator that produces + * a value of the given `outExpression` computed for the bottom element within a group + * sorted according to the provided `sort` specification. + * + * @param fieldName The field computed by the accumulator. + * @param sortBy The sort specification. The syntax is identical to the one expected by [[Aggregates.sort]]. + * @param outExpression The output expression. + * @tparam OutExpression The type of the output expression. + * @return The requested [[BsonField]]. + * @see [[https://www.mongodb.com/docs/manual/reference/operator/aggregation/bottom/ \$bottom]] + * @since 4.7 + * @note Requires MongoDB 5.2 or greater + */ + def bottom[OutExpression]( + fieldName: String, + sortBy: Bson, + outExpression: OutExpression + ): BsonField = + JAccumulators.bottom(fieldName, sortBy, outExpression) + + /** + * Returns a combination of a computed field and an accumulator that produces a BSON `Array` + * of values of the given `outExpression` computed for the bottom `N` elements within a group + * sorted according to the provided `sort` specification, + * where `N` is the positive integral value of the `nExpression`. + * + * @param fieldName The field computed by the accumulator. + * @param sortBy The sort specification. The syntax is identical to the one expected by [[Aggregates.sort]]. + * @param outExpression The output expression. + * @param nExpression The expression limiting the number of produced values. + * @tparam OutExpression The type of the output expression. + * @tparam NExpression The type of the limiting expression. + * @return The requested [[BsonField]]. + * @see [[https://www.mongodb.com/docs/manual/reference/operator/aggregation/bottomN/ \$bottomN]] + * @since 4.7 + * @note Requires MongoDB 5.2 or greater + */ + def bottomN[OutExpression, NExpression]( + fieldName: String, + sortBy: Bson, + outExpression: OutExpression, + nExpression: NExpression + ): BsonField = + JAccumulators.bottomN(fieldName, sortBy, outExpression, nExpression) + + /** + * Gets a field name for a `\$group` operation representing the maximum of the values of the given expression when applied to all + * members of the group. + * + * @param fieldName the field name + * @param expression the expression + * @tparam TExpression the expression type + * @return the field + * @see [[https://www.mongodb.com/docs/manual/reference/operator/aggregation/max/ \$max]] + */ + def max[TExpression](fieldName: String, expression: TExpression): BsonField = JAccumulators.max(fieldName, expression) + + /** + * Returns a combination of a computed field and an accumulator that produces a BSON `Array` + * of `N` largest values of the given `inExpression`, + * where `N` is the positive integral value of the `nExpression`. + * + * @param fieldName The field computed by the accumulator. + * @param inExpression The input expression. + * @param nExpression The expression limiting the number of produced values. + * @tparam InExpression The type of the input expression. + * @tparam NExpression The type of the limiting expression. + * @return The requested [[BsonField]]. + * @see [[https://www.mongodb.com/docs/manual/reference/operator/aggregation/maxN/ \$maxN]] + * @since 4.7 + * @note Requires MongoDB 5.2 or greater + */ + def maxN[InExpression, NExpression]( + fieldName: String, + inExpression: InExpression, + nExpression: NExpression + ): BsonField = + JAccumulators.maxN(fieldName, inExpression, nExpression) + + /** + * Gets a field name for a `\$group` operation representing the minimum of the values of the given expression when applied to all + * members of the group. + * + * @param fieldName the field name + * @param expression the expression + * @tparam TExpression the expression type + * @return the field + * @see [[https://www.mongodb.com/docs/manual/reference/operator/aggregation/min/ \$min]] + */ + def min[TExpression](fieldName: String, expression: TExpression): BsonField = JAccumulators.min(fieldName, expression) + + /** + * Returns a combination of a computed field and an accumulator that produces a BSON `Array` + * of `N` smallest values of the given `inExpression`, + * where `N` is the positive integral value of the `nExpression`. + * + * @param fieldName The field computed by the accumulator. + * @param inExpression The input expression. + * @param nExpression The expression limiting the number of produced values. + * @tparam InExpression The type of the input expression. + * @tparam NExpression The type of the limiting expression. + * @return The requested [[BsonField]]. + * @see [[https://www.mongodb.com/docs/manual/reference/operator/aggregation/minN/ \$minN]] + * @since 4.7 + * @note Requires MongoDB 5.2 or greater + */ + def minN[InExpression, NExpression]( + fieldName: String, + inExpression: InExpression, + nExpression: NExpression + ): BsonField = + JAccumulators.minN(fieldName, inExpression, nExpression) + + /** + * Gets a field name for a `\$group` operation representing an array of all values that results from applying an expression to each + * document in a group of documents that share the same group by key. + * + * @param fieldName the field name + * @param expression the expression + * @tparam TExpression the expression type + * @return the field + * @see [[https://www.mongodb.com/docs/manual/reference/operator/aggregation/push/ \$push]] + */ + def push[TExpression](fieldName: String, expression: TExpression): BsonField = + JAccumulators.push(fieldName, expression) + + /** + * Gets a field name for a `\$group` operation representing all unique values that results from applying the given expression to each + * document in a group of documents that share the same group by key. + * + * @param fieldName the field name + * @param expression the expression + * @tparam TExpression the expression type + * @return the field + * @see [[https://www.mongodb.com/docs/manual/reference/operator/aggregation/addToSet/ \$addToSet]] + */ + def addToSet[TExpression](fieldName: String, expression: TExpression): BsonField = + JAccumulators.addToSet(fieldName, expression) + + /** + * Gets a field name for a `\$group` operation representing the result of merging the fields of the documents. + * If documents to merge include the same field name, the field, in the resulting document, has the value from the last document + * merged for the field. + * + * @param fieldName the field name + * @param expression the expression + * @tparam TExpression the expression type + * @return the field + * @see [[https://www.mongodb.com/docs/manual/reference/operator/aggregation/mergeObjects/ \$mergeObjects]] + * @since 4.4 + */ + def mergeObjects[TExpression](fieldName: String, expression: TExpression): BsonField = + JAccumulators.mergeObjects(fieldName, expression) + + /** + * Gets a field name for a `\$group` operation representing the sample standard deviation of the values of the given expression + * when applied to all members of the group. + * + * Use if the values encompass the entire population of data you want to represent and do not wish to generalize about + * a larger population. + * + * @note Requires MongoDB 3.2 or greater + * @param fieldName the field name + * @param expression the expression + * @tparam TExpression the expression type + * @return the field + * @see [[https://www.mongodb.com/docs/manual/reference/operator/aggregation/stdDevPop/ \$stdDevPop]] + * @since 1.1 + */ + def stdDevPop[TExpression](fieldName: String, expression: TExpression): BsonField = + JAccumulators.stdDevPop(fieldName, expression) + + /** + * Gets a field name for a `\$group` operation representing the sample standard deviation of the values of the given expression + * when applied to all members of the group. + * + * Use if the values encompass a sample of a population of data from which to generalize about the population. + * + * @note Requires MongoDB 3.2 or greater + * @param fieldName the field name + * @param expression the expression + * @tparam TExpression the expression type + * @return the field + * @see [[https://www.mongodb.com/docs/manual/reference/operator/aggregation/stdDevSamp/ \$stdDevSamp]] + * @since 1.1 + */ + def stdDevSamp[TExpression](fieldName: String, expression: TExpression): BsonField = + JAccumulators.stdDevSamp(fieldName, expression) + + /** + * Creates an `\$accumulator` pipeline stage + * + * @param fieldName the field name + * @param initFunction a function used to initialize the state + * @param accumulateFunction a function used to accumulate documents + * @param mergeFunction a function used to merge two internal states, e.g. accumulated on different shards or + * threads. It returns the resulting state of the accumulator. + * @return the `\$accumulator` pipeline stage + * @see [[https://www.mongodb.com/docs/manual/reference/operator/aggregation/accumulator/ \$accumulator]] + * @since 1.2 + * @note Requires MongoDB 4.4 or greater + */ + def accumulator( + fieldName: String, + initFunction: String, + accumulateFunction: String, + mergeFunction: String + ): BsonField = + JAccumulators.accumulator(fieldName, initFunction, accumulateFunction, mergeFunction) + + /** + * Creates an `\$accumulator` pipeline stage + * + * @param fieldName the field name + * @param initFunction a function used to initialize the state + * @param accumulateFunction a function used to accumulate documents + * @param mergeFunction a function used to merge two internal states, e.g. accumulated on different shards or + * threads. It returns the resulting state of the accumulator. + * @param finalizeFunction a function used to finalize the state and return the result (may be null) + * @return the `\$accumulator` pipeline stage + * @see [[https://www.mongodb.com/docs/manual/reference/operator/aggregation/accumulator/ \$accumulator]] + * @since 1.2 + * @note Requires MongoDB 4.4 or greater + */ + def accumulator( + fieldName: String, + initFunction: String, + accumulateFunction: String, + mergeFunction: String, + finalizeFunction: String + ): BsonField = + JAccumulators.accumulator(fieldName, initFunction, accumulateFunction, mergeFunction, finalizeFunction) + + /** + * Creates an `\$accumulator` pipeline stage + * + * @param fieldName the field name + * @param initFunction a function used to initialize the state + * @param initArgs init function’s arguments (may be null) + * @param accumulateFunction a function used to accumulate documents + * @param accumulateArgs additional accumulate function’s arguments (may be null). The first argument to the + * function is ‘state’. + * @param mergeFunction a function used to merge two internal states, e.g. accumulated on different shards or + * threads. It returns the resulting state of the accumulator. + * @param finalizeFunction a function used to finalize the state and return the result (may be null) + * @return the `\$accumulator` pipeline stage + * @see [[https://www.mongodb.com/docs/manual/reference/operator/aggregation/accumulator/ \$accumulator]] + * @since 1.2 + * @note Requires MongoDB 4.4 or greater + */ + def accumulator( + fieldName: String, + initFunction: String, + initArgs: Seq[String], + accumulateFunction: String, + accumulateArgs: Seq[String], + mergeFunction: String, + finalizeFunction: String + ): BsonField = + JAccumulators.accumulator( + fieldName, + initFunction, + initArgs.asJava, + accumulateFunction, + accumulateArgs.asJava, + mergeFunction, + finalizeFunction + ) + + /** + * Creates an `\$accumulator` pipeline stage + * + * @param fieldName the field name + * @param initFunction a function used to initialize the state + * @param initArgs init function’s arguments (may be null) + * @param accumulateFunction a function used to accumulate documents + * @param accumulateArgs additional accumulate function’s arguments (may be null). The first argument to the + * function is ‘state’. + * @param mergeFunction a function used to merge two internal states, e.g. accumulated on different shards or + * threads. It returns the resulting state of the accumulator. + * @param finalizeFunction a function used to finalize the state and return the result (may be null) + * @param lang a language specifier + * @return the `\$accumulator` pipeline stage + * @see [[https://www.mongodb.com/docs/manual/reference/operator/aggregation/accumulator/ \$accumulator]] + * @since 1.2 + * @note Requires MongoDB 4.4 or greater + */ + def accumulator( + fieldName: String, + initFunction: String, + initArgs: Seq[String], + accumulateFunction: String, + accumulateArgs: Seq[String], + mergeFunction: String, + finalizeFunction: String, + lang: String + ): BsonField = + JAccumulators.accumulator( + fieldName, + initFunction, + initArgs.asJava, + accumulateFunction, + accumulateArgs.asJava, + mergeFunction, + finalizeFunction, + lang + ) +} diff --git a/driver-scala/src/main/scala/org/mongodb/scala/model/Aggregates.scala b/driver-scala/src/main/scala/org/mongodb/scala/model/Aggregates.scala new file mode 100644 index 00000000000..c7b8d120cf7 --- /dev/null +++ b/driver-scala/src/main/scala/org/mongodb/scala/model/Aggregates.scala @@ -0,0 +1,795 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala.model + +import com.mongodb.annotations.{ Beta, Reason } +import com.mongodb.client.model.fill.FillOutputField +import com.mongodb.client.model.search.FieldSearchPath + +import scala.collection.JavaConverters._ +import com.mongodb.client.model.{ Aggregates => JAggregates } +import org.mongodb.scala.MongoNamespace +import org.mongodb.scala.bson.conversions.Bson +import org.mongodb.scala.model.densify.{ DensifyOptions, DensifyRange } +import org.mongodb.scala.model.fill.FillOptions +import org.mongodb.scala.model.geojson.Point +import org.mongodb.scala.model.search.{ SearchCollector, SearchOperator, SearchOptions, VectorSearchOptions } + +/** + * Builders for aggregation pipeline stages. + * + * @see [[https://www.mongodb.com/docs/manual/core/aggregation-pipeline/ Aggregation pipeline]] + * + * @since 1.0 + */ +object Aggregates { + + /** + * Creates an `\$addFields` pipeline stage + * + * @param fields the fields to add + * @return the `\$addFields` pipeline stage + * @see [[https://www.mongodb.com/docs/manual/reference/operator/aggregation/addFields/ \$addFields]] + * @since 1.2 + * @note Requires MongoDB 3.4 or greater + */ + def addFields(fields: Field[_]*): Bson = JAggregates.addFields(fields.asJava) + + /** + * Creates an \$set pipeline stage + * + * @param fields the fields to add + * @return the \$set pipeline stage + * @see [[https://www.mongodb.com/docs/manual/reference/operator/aggregation/set/ \$set]] + * @since 4.3 + * @note Requires MongoDB 4.2 or greater + */ + def set(fields: Field[_]*): Bson = JAggregates.set(fields.asJava) + + /** + * Creates a \$bucket pipeline stage + * + * @param groupBy the criteria to group By + * @param boundaries the boundaries of the buckets + * @tparam TExpression the groupBy expression type + * @tparam TBoundary the boundary type + * @return the `\$bucket` pipeline stage + * @see [[https://www.mongodb.com/docs/manual/reference/operator/aggregation/bucket/ \$bucket]] + * @since 1.2 + * @note Requires MongoDB 3.4 or greater + */ + def bucket[TExpression, TBoundary](groupBy: TExpression, boundaries: TBoundary*): Bson = + JAggregates.bucket(groupBy, boundaries.asJava) + + /** + * Creates a `\$bucket` pipeline stage + * + * @param groupBy the criteria to group By + * @param boundaries the boundaries of the buckets + * @param options the optional values for the `\$bucket` stage + * @tparam TExpression the groupBy expression type + * @tparam TBoundary the boundary type + * @return the `\$bucket` pipeline stage + * @see [[https://www.mongodb.com/docs/manual/reference/operator/aggregation/bucket/ \$bucket]] + * @since 1.2 + * @note Requires MongoDB 3.4 or greater + */ + def bucket[TExpression, TBoundary](groupBy: TExpression, options: BucketOptions, boundaries: TBoundary*): Bson = + JAggregates.bucket(groupBy, boundaries.asJava, options) + + /** + * Creates a `\$bucketAuto` pipeline stage + * + * @param groupBy the criteria to group By + * @param buckets the number of the buckets + * @tparam TExpression the groupBy expression type + * @return the `\$bucketAuto` pipeline stage + * @see [[https://www.mongodb.com/docs/manual/reference/operator/aggregation/bucketAuto/ \$bucketAuto]] + * @since 1.2 + * @note Requires MongoDB 3.4 or greater + */ + def bucketAuto[TExpression, TBoundary](groupBy: TExpression, buckets: Int): Bson = + JAggregates.bucketAuto(groupBy, buckets) + + /** + * Creates a `\$bucketAuto` pipeline stage + * + * @param groupBy the criteria to group By + * @param buckets the number of the buckets + * @param options the optional values for the `\$bucketAuto` stage + * @tparam TExpression the groupBy expression type + * @return the `\$bucketAuto` pipeline stage + * @see [[https://www.mongodb.com/docs/manual/reference/operator/aggregation/bucketAuto/ \$bucketAuto]] + * @since 1.2 + * @note Requires MongoDB 3.4 or greater + */ + def bucketAuto[TExpression, TBoundary](groupBy: TExpression, buckets: Int, options: BucketAutoOptions): Bson = + JAggregates.bucketAuto(groupBy, buckets, options) + + /** + * Creates a `\$count` pipeline stage using the field name "count" to store the result + * + * @return the `\$count` pipeline stage + * @see [[https://www.mongodb.com/docs/manual/reference/operator/aggregation/count/ \$count]] + * @since 1.2 + * @note Requires MongoDB 3.4 or greater + */ + def count(): Bson = JAggregates.count() + + /** + * Creates a `\$count` pipeline stage using the named field to store the result + * + * @param field the field in which to store the count + * @return the `\$count` pipeline stage + * @see [[https://www.mongodb.com/docs/manual/reference/operator/aggregation/count/ \$count]] + * @since 1.2 + * @note Requires MongoDB 3.4 or greater + */ + def count(field: String): Bson = JAggregates.count(field) + + /** + * Creates a `\$match` pipeline stage for the specified filter + * + * @param filter the filter to match + * @return the `\$match` pipeline stage + * @see Filters + * @see [[https://www.mongodb.com/docs/manual/reference/operator/aggregation/match/ \$match]] + */ + def `match`(filter: Bson): Bson = JAggregates.`match`(filter) // scalastyle:ignore + + /** + * Creates a `\$match` pipeline stage for the specified filter + * + * A friendly alias for the `match` method. + * + * @param filter the filter to match against + * @return the `\$match` pipeline stage + * @see Filters + * @see [[https://www.mongodb.com/docs/manual/reference/operator/aggregation/match/ \$match]] + */ + def filter(filter: Bson): Bson = `match`(filter) // scalastyle:ignore + + /** + * Creates a `\$facet` pipeline stage + * + * @param facets the facets to use + * @return the new pipeline stage + * @see [[https://www.mongodb.com/docs/manual/reference/operator/aggregation/facet/ \$facet]] + * @since 1.2 + * @note Requires MongoDB 3.4 or greater + */ + def facet(facets: Facet*): Bson = JAggregates.facet(facets.asJava) + + /** + * Creates a `\$graphLookup` pipeline stage for the specified filter + * + * @param from the collection to query + * @param startWith the expression to start the graph lookup with + * @param connectFromField the from field + * @param connectToField the to field + * @param as name of field in output document + * @tparam TExpression the expression type + * @return the `\$graphLookup` pipeline stage + * @see [[https://www.mongodb.com/docs/manual/reference/operator/aggregation/graphLookup/ \$graphLookup]] + * @since 1.2 + * @note Requires MongoDB 3.4 or greater + */ + def graphLookup[TExpression]( + from: String, + startWith: TExpression, + connectFromField: String, + connectToField: String, + as: String + ): Bson = + JAggregates.graphLookup(from, startWith, connectFromField, connectToField, as) + + /** + * Creates a graphLookup pipeline stage for the specified filter + * + * @param from the collection to query + * @param startWith the expression to start the graph lookup with + * @param connectFromField the from field + * @param connectToField the to field + * @param as name of field in output document + * @param options optional values for the graphLookup + * @tparam TExpression the expression type + * @return the `\$graphLookup` pipeline stage + * @see [[https://www.mongodb.com/docs/manual/reference/operator/aggregation/graphLookup/ \$graphLookup]] + * @since 1.2 + * @note Requires MongoDB 3.4 or greater + */ + def graphLookup[TExpression]( + from: String, + startWith: TExpression, + connectFromField: String, + connectToField: String, + as: String, + options: GraphLookupOptions + ): Bson = + JAggregates.graphLookup(from, startWith, connectFromField, connectToField, as, options) + + /** + * Creates a `\$project` pipeline stage for the specified projection + * + * @param projection the projection + * @return the `\$project` pipeline stage + * @see Projections + * @see [[https://www.mongodb.com/docs/manual/reference/operator/aggregation/project/ \$project]] + */ + def project(projection: Bson): Bson = JAggregates.project(projection) + + /** + * Creates a `\$replaceRoot` pipeline stage + * + * @param value the new root value + * @tparam TExpression the new root type + * @return the `\$replaceRoot` pipeline stage + * @see [[https://www.mongodb.com/docs/manual/reference/operator/aggregation/replaceRoot/ \$replaceRoot]] + * @since 1.2 + * @note Requires MongoDB 3.4 or greater + */ + def replaceRoot[TExpression](value: TExpression): Bson = JAggregates.replaceRoot(value) + + /** + * Creates a `\$replaceRoot` pipeline stage + * + * With `\$replaceWith`, you can promote an embedded document to the top-level. + * You can also specify a new document as the replacement. + * + * The `\$replaceWith` is an alias for [[replaceRoot]].

+ * + * @param value the new root value + * @tparam TExpression the new root type + * @return the `\$replaceRoot` pipeline stage + * @see [[https://www.mongodb.com/docs/manual/reference/operator/aggregation/replaceWith/ \$replaceWith]] + * @since 2.7 + */ + def replaceWith[TExpression](value: TExpression): Bson = JAggregates.replaceWith(value) + + /** + * Creates a `\$sort` pipeline stage for the specified sort specification + * + * @param sort the sort specification + * @see Sorts + * @see [[https://www.mongodb.com/docs/manual/reference/operator/aggregation/sort/#sort-aggregation \$sort]] + */ + def sort(sort: Bson): Bson = JAggregates.sort(sort) + + /** + * Creates a `\$sortByCount` pipeline stage for the specified filter + * + * @param filter the filter specification + * @tparam TExpression the expression type + * @return the `\$sortByCount` pipeline stage + * @see Sorts + * @see [[https://www.mongodb.com/docs/manual/reference/operator/aggregation/sortByCount \$sortByCount]] + * @since 1.2 + * @note Requires MongoDB 3.4 or greater + */ + def sortByCount[TExpression](filter: TExpression): Bson = JAggregates.sortByCount(filter) + + /** + * Creates a `\$skip` pipeline stage + * + * @param skip the number of documents to skip + * @return the `\$skip` pipeline stage + * @see [[https://www.mongodb.com/docs/manual/ reference/operator/aggregation/skip/ \$skip]] + */ + def skip(skip: Int): Bson = JAggregates.skip(skip) + + /** + * Creates a `\$sample` pipeline stage with the specified sample size + * + * @param size the sample size + * @return the `\$sample` pipeline stage + * @since 1.1 + * @see [[https://www.mongodb.com/docs/manual/reference/operator/aggregation/sample/ \$sample]] + */ + def sample(size: Int): Bson = JAggregates.sample(size) + + /** + * Creates a `\$limit` pipeline stage for the specified filter + * + * @param limit the limit + * @return the `\$limit` pipeline stage + * @see [[https://www.mongodb.com/docs/manual/reference/operator/aggregation/limit/ \$limit]] + */ + def limit(limit: Int): Bson = JAggregates.limit(limit) + + /** + * Creates a `\$lookup` pipeline stage for the specified filter + * + * @param from the name of the collection in the same database to perform the join with. + * @param localField specifies the field from the local collection to match values against. + * @param foreignField specifies the field in the from collection to match values against. + * @param as the name of the new array field to add to the input documents. + * @return the `\$lookup` pipeline stage + * @since 1.1 + * @see [[https://www.mongodb.com/docs/manual/reference/operator/aggregation/lookup/ \$lookup]] + * @note Requires MongoDB 3.2 or greater + */ + def lookup(from: String, localField: String, foreignField: String, as: String): Bson = + JAggregates.lookup(from, localField, foreignField, as) + + /** + * Creates a `\$lookup` pipeline stage, joining the current collection with + * the one specified in from using the given pipeline. If the first stage in + * the pipeline is a `\$documents` stage, then the "from" collection is + * ignored. + * + * @param from the name of the collection in the same database to + * perform the join with. May be null if the + * first pipeline stage is `\$documents`. + * @param pipeline the pipeline to run on the joined collection. + * @param as the name of the new array field to add to the input documents. + * @return the `\$lookup` pipeline stage: + * @since 2.3 + * @see [[https://www.mongodb.com/docs/manual/reference/operator/aggregation/lookup/ \$lookup]] + * @note Requires MongoDB 3.6 or greater + */ + def lookup(from: String, pipeline: Seq[_ <: Bson], as: String): Bson = + JAggregates.lookup(from, pipeline.asJava, as) + + /** + * Creates a `\$lookup` pipeline stage, joining the current collection with + * the one specified in from using the given pipeline. If the first stage in + * the pipeline is a `\$documents` stage, then the "from" collection is + * ignored. + * + * @param from the name of the collection in the same database to + * perform the join with. May be null if the + * first pipeline stage is `\$documents`. + * @param let the variables to use in the pipeline field stages. + * @param pipeline the pipeline to run on the joined collection. + * @param as the name of the new array field to add to the input documents. + * @return the `\$lookup` pipeline stage + * @since 2.3 + * @see [[https://www.mongodb.com/docs/manual/reference/operator/aggregation/lookup/ \$lookup]] + * @note Requires MongoDB 3.6 or greater + */ + def lookup[T](from: String, let: Seq[Variable[T]], pipeline: Seq[_ <: Bson], as: String): Bson = + JAggregates.lookup[T](from, let.asJava, pipeline.asJava, as) + + /** + * Creates a `\$group` pipeline stage for the specified filter + * + * @param id the id expression for the group + * @param fieldAccumulators zero or more field accumulator pairs + * @tparam TExpression the expression type + * @return the `\$group` pipeline stage + * @see [[https://www.mongodb.com/docs/manual/reference/operator/aggregation/group/ \$group]] + * @see [[https://www.mongodb.com/docs/manual/meta/aggregation-quick-reference/#aggregation-expressions Expressions]] + */ + def group[TExpression](id: TExpression, fieldAccumulators: BsonField*): Bson = + JAggregates.group(id, fieldAccumulators.asJava) + + /** + * Creates a `\$unwind` pipeline stage for the specified field name, which must be prefixed by a `\$` sign. + * + * @param fieldName the field name, prefixed by a `\$` sign + * @return the `\$unwind` pipeline stage + * @see [[https://www.mongodb.com/docs/manual/reference/operator/aggregation/unwind/ \$unwind]] + */ + def unwind(fieldName: String): Bson = JAggregates.unwind(fieldName) + + /** + * Creates a `\$unwind` pipeline stage for the specified field name, which must be prefixed by a `\$` sign. + * + * @param fieldName the field name, prefixed by a `\$` sign + * @return the `\$unwind` pipeline stage + * @see [[https://www.mongodb.com/docs/manual/reference/operator/aggregation/unwind/ \$unwind]] + * @since 1.1 + */ + def unwind(fieldName: String, unwindOptions: UnwindOptions): Bson = JAggregates.unwind(fieldName, unwindOptions) + + /** + * Creates a `\$out` pipeline stage that writes to the collection with the specified name + * + * @param collectionName the collection name + * @return the `\$out` pipeline stage + * @see [[https://www.mongodb.com/docs/manual/reference/operator/aggregation/out/ \$out]] + */ + def out(collectionName: String): Bson = JAggregates.out(collectionName) + + /** + * Creates a `\$out` pipeline stage that supports outputting to a different database. + * + * @param databaseName the database name + * @param collectionName the collection name + * @return the `\$out` pipeline stage + * @see [[https://www.mongodb.com/docs/manual/reference/operator/aggregation/out/ \$out]] + */ + def out(databaseName: String, collectionName: String): Bson = JAggregates.out(databaseName, collectionName) + + /** + * Creates a `\$merge` pipeline stage that merges into the specified collection using the specified options. + * + * @param collectionName the name of the collection to merge into + * @return the `\$merge` pipeline stage + * @since 2.7 + * @see [[https://www.mongodb.com/docs/manual/reference/operator/aggregation/merge/]] + */ + def merge(collectionName: String): Bson = JAggregates.merge(collectionName) + + /** + * Creates a `\$merge` pipeline stage that merges into the specified collection using the specified options. + * + * @param collectionName the name of the collection to merge into + * @param mergeOptions the mergeOptions + * @return the `\$merge` pipeline stage + * @since 2.7 + * @see [[https://www.mongodb.com/docs/manual/reference/operator/aggregation/merge/]] + */ + def merge(collectionName: String, mergeOptions: MergeOptions): Bson = + JAggregates.merge(collectionName, mergeOptions.wrapped) + + /** + * Creates a `\$merge` pipeline stage that merges into the specified collection using the specified options. + * + * @param namespace the namespace to merge into + * @return the `\$merge` pipeline stage + * @since 2.7 + * @see [[https://www.mongodb.com/docs/manual/reference/operator/aggregation/merge/]] + */ + def merge(namespace: MongoNamespace): Bson = JAggregates.merge(namespace) + + /** + * Creates a `\$merge` pipeline stage that merges into the specified collection using the specified options. + * + * @param namespace the namespace to merge into + * @param mergeOptions the mergeOptions + * @return the `\$merge` pipeline stage + * @since 2.7 + * @see [[https://www.mongodb.com/docs/manual/reference/operator/aggregation/merge/]] + */ + def merge(namespace: MongoNamespace, mergeOptions: MergeOptions): Bson = + JAggregates.merge(namespace, mergeOptions.wrapped) + + /** + * Creates a `\$unionWith` pipeline stage. + * + * @param collection the name of the collection in the same database to perform the union with. + * @param pipeline the pipeline to run on the union. + * @return the `\$unionWith` pipeline stage + * @see [[https://www.mongodb.com/docs/manual/reference/operator/aggregation/unionWith/]] + */ + def unionWith(collection: String, pipeline: Bson*): Bson = + JAggregates.unionWith(collection, pipeline.asJava) + + /** + * Creates a `\$setWindowFields` pipeline stage, which allows using window operators. + * This stage partitions the input documents similarly to the [[Aggregates.group \$group]] pipeline stage, + * optionally sorts them, computes fields in the documents by computing window functions over [[Window windows]] specified per + * function, and outputs the documents. The important difference from the `\$group` pipeline stage is that + * documents belonging to the same partition or window are not folded into a single document. + * + * @param partitionBy Optional partitioning of data specified like `id` in [[Aggregates.group]]. + * If `None`, then all documents belong to the same partition. + * @param sortBy Fields to sort by. The syntax is identical to `sort` in [[Aggregates.sort]] (see [[Sorts]]). + * Sorting is required by certain functions and may be required by some windows (see [[Windows]] for more details). + * Sorting is used only for the purpose of computing window functions and does not guarantee ordering + * of the output documents. + * @param output A [[WindowOutputField window output field]]. + * @param moreOutput More [[WindowOutputField window output fields]]. + * @tparam TExpression The `partitionBy` expression type. + * @return The `\$setWindowFields` pipeline stage. + * @see [[https://dochub.mongodb.org/core/window-functions-set-window-fields \$setWindowFields]] + * @since 4.3 + * @note Requires MongoDB 5.0 or greater. + */ + def setWindowFields[TExpression >: Null]( + partitionBy: Option[TExpression], + sortBy: Option[Bson], + output: WindowOutputField, + moreOutput: WindowOutputField* + ): Bson = + JAggregates.setWindowFields(partitionBy.orNull, sortBy.orNull, output, moreOutput: _*) + + /** + * Creates a `\$setWindowFields` pipeline stage, which allows using window operators. + * This stage partitions the input documents similarly to the [[Aggregates.group \$group]] pipeline stage, + * optionally sorts them, computes fields in the documents by computing window functions over [[Window windows]] specified per + * function, and outputs the documents. The important difference from the `\$group` pipeline stage is that + * documents belonging to the same partition or window are not folded into a single document. + * + * @param partitionBy Optional partitioning of data specified like `id` in [[Aggregates.group]]. + * If `None`, then all documents belong to the same partition. + * @param sortBy Fields to sort by. The syntax is identical to `sort` in [[Aggregates.sort]] (see [[Sorts]]). + * Sorting is required by certain functions and may be required by some windows (see [[Windows]] for more details). + * Sorting is used only for the purpose of computing window functions and does not guarantee ordering + * of the output documents. + * @param output A nonempty list of [[WindowOutputField window output fields]]. + * Specifying an empty list is not an error, but the resulting stage does not do anything useful. + * @tparam TExpression The `partitionBy` expression type. + * @return The `\$setWindowFields` pipeline stage. + * @see [[https://dochub.mongodb.org/core/window-functions-set-window-fields \$setWindowFields]] + * @since 4.3 + * @note Requires MongoDB 5.0 or greater. + */ + def setWindowFields[TExpression >: Null]( + partitionBy: Option[TExpression], + sortBy: Option[Bson], + output: Iterable[_ <: WindowOutputField] + ): Bson = + JAggregates.setWindowFields(partitionBy.orNull, sortBy.orNull, output.asJava) + + /** + * Creates a `\$densify` pipeline stage, which adds documents to a sequence of documents + * where certain values in the `field` are missing. + * + * @param field The field to densify. + * @param range The range. + * @return The requested pipeline stage. + * @see [[https://www.mongodb.com/docs/manual/reference/operator/aggregation/densify/ \$densify]] + * @see [[https://www.mongodb.com/docs/manual/core/document/#dot-notation Dot notation]] + * @note Requires MongoDB 5.1 or greater. + * @since 4.7 + */ + def densify(field: String, range: DensifyRange): Bson = + JAggregates.densify(field, range) + + /** + * Creates a `\$densify` pipeline stage, which adds documents to a sequence of documents + * where certain values in the `field` are missing. + * + * @param field The field to densify. + * @param range The range. + * @param options The densify options. + * Specifying `DensifyOptions.densifyOptions` is equivalent to calling `Aggregates.densify(String, DensifyRange)`. + * @return The requested pipeline stage. + * @see [[https://www.mongodb.com/docs/manual/reference/operator/aggregation/densify/ \$densify]] + * @see [[https://www.mongodb.com/docs/manual/core/document/#dot-notation Dot notation]] + * @note Requires MongoDB 5.1 or greater. + * @since 4.7 + */ + def densify(field: String, range: DensifyRange, options: DensifyOptions): Bson = + JAggregates.densify(field, range, options) + + /** + * Creates a `\$fill` pipeline stage, which assigns values to fields when they are BSON `Null` or missing. + * + * @param options The fill options. + * @param output The `FillOutputField`. + * @param moreOutput More `FillOutputField`s. + * @return The requested pipeline stage. + * @see [[https://www.mongodb.com/docs/manual/reference/operator/aggregation/fill/ \$fill]] + * @note Requires MongoDB 5.3 or greater. + * @since 4.7 + */ + def fill(options: FillOptions, output: FillOutputField, moreOutput: FillOutputField*): Bson = + JAggregates.fill(options, output, moreOutput: _*) + + /** + * Creates a `\$fill` pipeline stage, which assigns values to fields when they are BSON `Null` or missing. + * + * @param options The fill options. + * @param output The non-empty `FillOutputField`s. + * @return The requested pipeline stage. + * @see [[https://www.mongodb.com/docs/manual/reference/operator/aggregation/fill/ \$fill]] + * @note Requires MongoDB 5.3 or greater. + * @since 4.7 + */ + def fill(options: FillOptions, output: Iterable[_ <: FillOutputField]): Bson = + JAggregates.fill(options, output.asJava) + + /** + * Creates a `\$search` pipeline stage supported by MongoDB Atlas. + * You may use the `\$meta: "searchScore"` expression, e.g., via [[Projections.metaSearchScore]], + * to extract the relevance score assigned to each found document. + * + * `Filters.text(String, TextSearchOptions)` is a legacy text search alternative. + * + * @param operator A search operator. + * @return The `\$search` pipeline stage. + * @see [[https://www.mongodb.com/docs/atlas/atlas-search/query-syntax/#-search \$search]] + * @see [[https://www.mongodb.com/docs/atlas/atlas-search/operators-and-collectors/#operators Search operators]] + * @see [[https://www.mongodb.com/docs/atlas/atlas-search/scoring/ Scoring]] + * @since 4.7 + */ + def search(operator: SearchOperator): Bson = + JAggregates.search(operator) + + /** + * Creates a `\$search` pipeline stage supported by MongoDB Atlas. + * You may use the `\$meta: "searchScore"` expression, e.g., via [[Projections.metaSearchScore]], + * to extract the relevance score assigned to each found document. + * + * `Filters.text(String, TextSearchOptions)` is a legacy text search alternative. + * + * @param operator A search operator. + * @param options Optional `\$search` pipeline stage fields. + * Specifying `SearchOptions.searchOptions` is equivalent to calling `Aggregates.search(SearchOperator)`. + * @return The `\$search` pipeline stage. + * @see [[https://www.mongodb.com/docs/atlas/atlas-search/query-syntax/#-search \$search]] + * @see [[https://www.mongodb.com/docs/atlas/atlas-search/operators-and-collectors/#operators Search operators]] + * @see [[https://www.mongodb.com/docs/atlas/atlas-search/scoring/ Scoring]] + * @since 4.7 + */ + def search(operator: SearchOperator, options: SearchOptions): Bson = + JAggregates.search(operator, options) + + /** + * Creates a `\$search` pipeline stage supported by MongoDB Atlas. + * You may use the `\$meta: "searchScore"` expression, e.g., via [[Projections.metaSearchScore]], + * to extract the relevance score assigned to each found document. + * + * `Filters.text(String, TextSearchOptions)` is a legacy text search alternative. + * + * @param collector A search collector. + * @return The `\$search` pipeline stage. + * @see [[https://www.mongodb.com/docs/atlas/atlas-search/query-syntax/#-search \$search]] + * @see [[https://www.mongodb.com/docs/atlas/atlas-search/operators-and-collectors/#collectors Search collectors]] + * @see [[https://www.mongodb.com/docs/atlas/atlas-search/scoring/ Scoring]] + * @since 4.7 + */ + def search(collector: SearchCollector): Bson = + JAggregates.search(collector) + + /** + * Creates a `\$search` pipeline stage supported by MongoDB Atlas. + * You may use the `\$meta: "searchScore"` expression, e.g., via [[Projections.metaSearchScore]], + * to extract the relevance score assigned to each found document. + * + * `Filters.text(String, TextSearchOptions)` is a legacy text search alternative. + * + * @param collector A search collector. + * @param options Optional `\$search` pipeline stage fields. + * Specifying `SearchOptions.searchOptions` is equivalent to calling `Aggregates.search(SearchCollector)`. + * @return The `\$search` pipeline stage. + * @see [[https://www.mongodb.com/docs/atlas/atlas-search/query-syntax/#-search \$search]] + * @see [[https://www.mongodb.com/docs/atlas/atlas-search/operators-and-collectors/#collectors Search collectors]] + * @see [[https://www.mongodb.com/docs/atlas/atlas-search/scoring/ Scoring]] + * @since 4.7 + */ + def search(collector: SearchCollector, options: SearchOptions): Bson = + JAggregates.search(collector, options) + + /** + * Creates a `\$searchMeta` pipeline stage supported by MongoDB Atlas. + * Unlike `\$search`, it does not return found documents, + * instead it returns metadata, which in case of using the `\$search` stage + * may be extracted by using `$$SEARCH_META` variable, e.g., via [[Projections.computedSearchMeta]]. + * + * @param operator A search operator. + * @return The `\$searchMeta` pipeline stage. + * @see [[https://www.mongodb.com/docs/atlas/atlas-search/query-syntax/#-searchmeta \$searchMeta]] + * @see [[https://www.mongodb.com/docs/atlas/atlas-search/operators-and-collectors/#operators Search operators]] + * @since 4.7 + */ + def searchMeta(operator: SearchOperator): Bson = + JAggregates.searchMeta(operator) + + /** + * Creates a `\$searchMeta` pipeline stage supported by MongoDB Atlas. + * Unlike `\$search`, it does not return found documents, + * instead it returns metadata, which in case of using the `\$search` stage + * may be extracted by using `$$SEARCH_META` variable, e.g., via [[Projections.computedSearchMeta]]. + * + * @param operator A search operator. + * @param options Optional `\$search` pipeline stage fields. + * Specifying `SearchOptions.searchOptions` is equivalent to calling `Aggregates.searchMeta(SearchOperator)`. + * @return The `\$searchMeta` pipeline stage. + * @see [[https://www.mongodb.com/docs/atlas/atlas-search/query-syntax/#-searchmeta \$searchMeta]] + * @see [[https://www.mongodb.com/docs/atlas/atlas-search/operators-and-collectors/#operators Search operators]] + * @since 4.7 + */ + def searchMeta(operator: SearchOperator, options: SearchOptions): Bson = + JAggregates.searchMeta(operator, options) + + /** + * Creates a `\$searchMeta` pipeline stage supported by MongoDB Atlas. + * Unlike `\$search`, it does not return found documents, + * instead it returns metadata, which in case of using the `\$search` stage + * may be extracted by using `$$SEARCH_META` variable, e.g., via [[Projections.computedSearchMeta]]. + * + * @param collector A search collector. + * @return The `\$searchMeta` pipeline stage. + * @see [[https://www.mongodb.com/docs/atlas/atlas-search/query-syntax/#-searchmeta \$searchMeta]] + * @see [[https://www.mongodb.com/docs/atlas/atlas-search/operators-and-collectors/#collectors Search collectors]] + * @since 4.7 + */ + def searchMeta(collector: SearchCollector): Bson = + JAggregates.searchMeta(collector) + + /** + * Creates a `\$searchMeta` pipeline stage supported by MongoDB Atlas. + * Unlike `\$search`, it does not return found documents, + * instead it returns metadata, which in case of using the `\$search` stage + * may be extracted by using `$$SEARCH_META` variable, e.g., via [[Projections.computedSearchMeta]]. + * + * @param collector A search collector. + * @param options Optional `\$search` pipeline stage fields. + * Specifying `SearchOptions.searchOptions` is equivalent to calling `Aggregates.searchMeta(SearchCollector)`. + * @return The `\$searchMeta` pipeline stage. + * @see [[https://www.mongodb.com/docs/atlas/atlas-search/query-syntax/#-searchmeta \$searchMeta]] + * @see [[https://www.mongodb.com/docs/atlas/atlas-search/operators-and-collectors/#collectors Search collectors]] + * @since 4.7 + */ + def searchMeta(collector: SearchCollector, options: SearchOptions): Bson = + JAggregates.searchMeta(collector, options) + + /** + * Creates a `\$vectorSearch` pipeline stage supported by MongoDB Atlas. + * You may use the `\$meta: "vectorSearchScore"` expression, e.g., via [[Projections.metaVectorSearchScore]], + * to extract the relevance score assigned to each found document. + * + * @param queryVector The query vector. The number of dimensions must match that of the `index`. + * @param path The field to be searched. + * @param index The name of the index to use. + * @param limit The limit on the number of documents produced by the pipeline stage. + * @param options Optional `\$vectorSearch` pipeline stage fields. + * @return The `\$vectorSearch` pipeline stage. + * @see [[https://www.mongodb.com/docs/atlas/atlas-vector-search/vector-search-stage/ \$vectorSearch]] + * @note Requires MongoDB 6.0.10 or greater + * @since 4.11 + */ + def vectorSearch( + path: FieldSearchPath, + queryVector: Iterable[java.lang.Double], + index: String, + limit: Long, + options: VectorSearchOptions + ): Bson = + JAggregates.vectorSearch(path, queryVector.asJava, index, limit, options) + + /** + * Creates an `\$unset` pipeline stage that removes/excludes fields from documents + * + * @param fields the fields to exclude. May use dot notation. + * @return the `\$unset` pipeline stage + * @see [[https://www.mongodb.com/docs/manual/reference/operator/aggregation/unset/ \$unset]] + * @since 4.8 + */ + def unset(fields: String*): Bson = JAggregates.unset(fields.asJava) + + /** + * Creates a `\$geoNear` pipeline stage that outputs documents in order of nearest to farthest from a specified point. + * + * @param near The point for which to find the closest documents. + * @param distanceField The output field that contains the calculated distance. + * To specify a field within an embedded document, use dot notation. + * @param options {@link GeoNearOptions} + * @return the `\$geoNear` pipeline stage + * @see [[https://www.mongodb.com/docs/manual/reference/operator/aggregation/geoNear/ \$geoNear]] + * @since 4.8 + */ + def geoNear(near: Point, distanceField: String, options: GeoNearOptions): Bson = + JAggregates.geoNear(near, distanceField, options) + + /** + * Creates a `\$geoNear` pipeline stage that outputs documents in order of nearest to farthest from a specified point. + * + * @param near The point for which to find the closest documents. + * @param distanceField The output field that contains the calculated distance. + * To specify a field within an embedded document, use dot notation. + * @return the `\$geoNear` pipeline stage + * @see [[https://www.mongodb.com/docs/manual/reference/operator/aggregation/geoNear/ \$geoNear]] + * @since 4.8 + */ + def geoNear(near: Point, distanceField: String): Bson = + JAggregates.geoNear(near, distanceField) + + /** + * Creates a `\$documents` pipeline stage. + * + * @param documents the documents. + * @return the `\$documents` pipeline stage + * @see [[https://www.mongodb.com/docs/manual/reference/operator/aggregation/documents/ \$documents]] + * @since 4.9 + */ + def documents(documents: Bson*): Bson = JAggregates.documents(documents.asJava) +} diff --git a/driver-scala/src/main/scala/org/mongodb/scala/model/BucketGranularity.scala b/driver-scala/src/main/scala/org/mongodb/scala/model/BucketGranularity.scala new file mode 100644 index 00000000000..9c3f9f83d34 --- /dev/null +++ b/driver-scala/src/main/scala/org/mongodb/scala/model/BucketGranularity.scala @@ -0,0 +1,65 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala.model + +import scala.util.Try + +import com.mongodb.client.model.{ BucketGranularity => JBucketGranularity } + +/** + * Granularity values for automatic bucketing. + * + * @see [[https://www.mongodb.com/docs/manual/reference/operator/aggregation/bucketAuto/ \$bucketAuto]] + * @see [[https://en.wikipedia.org/wiki/Preferred_number">Preferred numbers]] + * @since 1.2 + */ +object BucketGranularity { + val R5: BucketGranularity = JBucketGranularity.R5 + + val R10: BucketGranularity = JBucketGranularity.R10 + + val R20: BucketGranularity = JBucketGranularity.R20 + + val R40: BucketGranularity = JBucketGranularity.R40 + + val R80: BucketGranularity = JBucketGranularity.R80 + + val SERIES_125: BucketGranularity = JBucketGranularity.SERIES_125 + + val E6: BucketGranularity = JBucketGranularity.E6 + + val E12: BucketGranularity = JBucketGranularity.E12 + + val E24: BucketGranularity = JBucketGranularity.E24 + + val E48: BucketGranularity = JBucketGranularity.E48 + + val E96: BucketGranularity = JBucketGranularity.E96 + + val E192: BucketGranularity = JBucketGranularity.E192 + + val POWERSOF2: BucketGranularity = JBucketGranularity.POWERSOF2 + + /** + * Returns the BucketGranularity from the string value. + * + * @param value the string value. + * @return the Bucket Granularity + */ + def fromString(value: String): Try[BucketGranularity] = Try(JBucketGranularity.fromString(value)) + +} diff --git a/driver-scala/src/main/scala/org/mongodb/scala/model/Collation.scala b/driver-scala/src/main/scala/org/mongodb/scala/model/Collation.scala new file mode 100644 index 00000000000..fd302b489f3 --- /dev/null +++ b/driver-scala/src/main/scala/org/mongodb/scala/model/Collation.scala @@ -0,0 +1,41 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala.model + +import com.mongodb.client.model.{ Collation => JCollation } + +/** + * The options regarding collation support in MongoDB 3.4+ + * + * @note Requires MongoDB 3.4 or greater + * @since 1.2 + */ +object Collation { + + /** + * Creates a builder for creating the Collation. + * + * @return a new Builder for creating the Collation. + */ + def builder(): Builder = JCollation.builder() + + /** + * Collation builder type + */ + type Builder = JCollation.Builder + +} diff --git a/driver-scala/src/main/scala/org/mongodb/scala/model/CollationAlternate.scala b/driver-scala/src/main/scala/org/mongodb/scala/model/CollationAlternate.scala new file mode 100644 index 00000000000..995bc48563d --- /dev/null +++ b/driver-scala/src/main/scala/org/mongodb/scala/model/CollationAlternate.scala @@ -0,0 +1,56 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala.model + +import scala.util.Try + +import com.mongodb.client.model.{ CollationAlternate => JCollationAlternate } + +/** + * Collation support allows the specific configuration of whether or not spaces and punctuation are considered base characters. + * + * @note Requires MongoDB 3.4 or greater + * @since 1.2 + */ +object CollationAlternate { + + /** + * Non-ignorable + * + * Spaces and punctuation are considered base characters + */ + val NON_IGNORABLE: CollationAlternate = JCollationAlternate.NON_IGNORABLE + + /** + * Shifted + * + * Spaces and punctuation are not considered base characters, and are only distinguished when the collation strength is > 3 + * + * @see CollationMaxVariable + */ + val SHIFTED: CollationAlternate = JCollationAlternate.SHIFTED + + /** + * Returns the CollationAlternate from the string value. + * + * @param collationAlternate the string value. + * @return the read concern + */ + def fromString(collationAlternate: String): Try[CollationAlternate] = + Try(JCollationAlternate.fromString(collationAlternate)) + +} diff --git a/driver-scala/src/main/scala/org/mongodb/scala/model/CollationCaseFirst.scala b/driver-scala/src/main/scala/org/mongodb/scala/model/CollationCaseFirst.scala new file mode 100644 index 00000000000..214bf590a84 --- /dev/null +++ b/driver-scala/src/main/scala/org/mongodb/scala/model/CollationCaseFirst.scala @@ -0,0 +1,55 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala.model + +import scala.util.Try + +import com.mongodb.client.model.{ CollationCaseFirst => JCollationCaseFirst } + +/** + * Collation support allows the specific configuration of how character cases are handled. + * + * @note Requires MongoDB 3.4 or greater + * @since 1.2 + */ +object CollationCaseFirst { + + /** + * Uppercase first + */ + val UPPER: CollationCaseFirst = JCollationCaseFirst.UPPER + + /** + * Lowercase first + */ + val LOWER: CollationCaseFirst = JCollationCaseFirst.LOWER + + /** + * Off + */ + val OFF: CollationCaseFirst = JCollationCaseFirst.OFF + + /** + * Returns the CollationCaseFirst from the string value. + * + * @param collationCaseFirst the string value. + * @return the read concern + */ + def fromString(collationCaseFirst: String): Try[CollationCaseFirst] = + Try(JCollationCaseFirst.fromString(collationCaseFirst)) + +} diff --git a/driver-scala/src/main/scala/org/mongodb/scala/model/CollationMaxVariable.scala b/driver-scala/src/main/scala/org/mongodb/scala/model/CollationMaxVariable.scala new file mode 100644 index 00000000000..6773fb3d20c --- /dev/null +++ b/driver-scala/src/main/scala/org/mongodb/scala/model/CollationMaxVariable.scala @@ -0,0 +1,56 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala.model + +import scala.util.Try + +import com.mongodb.client.model.{ CollationMaxVariable => JCollationMaxVariable } + +/** + * Collation support allows the specific configuration of whether or not spaces and punctuation are considered base characters. + * + * `CollationMaxVariable` controls which characters are affected by [[CollationAlternate$.SHIFTED]]. + * + * @note Requires MongoDB 3.4 or greater + * @since 1.2 + */ +object CollationMaxVariable { + + /** + * Punct + * + * Both punctuation and spaces are affected. + */ + val PUNCT: CollationMaxVariable = JCollationMaxVariable.PUNCT + + /** + * Shifted + * + * Only spaces are affected. + */ + val SPACE: CollationMaxVariable = JCollationMaxVariable.SPACE + + /** + * Returns the CollationMaxVariable from the string value. + * + * @param collationMaxVariable the string value. + * @return the read concern + */ + def fromString(collationMaxVariable: String): Try[CollationMaxVariable] = + Try(JCollationMaxVariable.fromString(collationMaxVariable)) + +} diff --git a/driver-scala/src/main/scala/org/mongodb/scala/model/CollationStrength.scala b/driver-scala/src/main/scala/org/mongodb/scala/model/CollationStrength.scala new file mode 100644 index 00000000000..d54a8b4bdaa --- /dev/null +++ b/driver-scala/src/main/scala/org/mongodb/scala/model/CollationStrength.scala @@ -0,0 +1,66 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala.model + +import scala.util.Try + +import com.mongodb.client.model.{ CollationStrength => JCollationStrength } + +/** + * Collation support allows the specific configuration of how character cases are handled. + * + * @note Requires MongoDB 3.4 or greater + * @since 1.2 + */ +object CollationStrength { + + /** + * Strongest level, denote difference between base characters + */ + val PRIMARY: CollationStrength = JCollationStrength.PRIMARY + + /** + * Accents in characters are considered secondary differences + */ + val SECONDARY: CollationStrength = JCollationStrength.SECONDARY + + /** + * Upper and lower case differences in characters are distinguished at the tertiary level. The server default. + */ + val TERTIARY: CollationStrength = JCollationStrength.TERTIARY + + /** + * When punctuation is ignored at level 1-3, an additional level can be used to distinguish words with and without punctuation. + */ + val QUATERNARY: CollationStrength = JCollationStrength.QUATERNARY + + /** + * When all other levels are equal, the identical level is used as a tiebreaker. + * The Unicode code point values of the NFD form of each string are compared at this level, just in case there is no difference at + * levels 1-4 + */ + val IDENTICAL: CollationStrength = JCollationStrength.IDENTICAL + + /** + * Returns the CollationStrength from the string value. + * + * @param collationStrength the int value. + * @return the read concern + */ + def fromInt(collationStrength: Int): Try[CollationStrength] = Try(JCollationStrength.fromInt(collationStrength)) + +} diff --git a/driver-scala/src/main/scala/org/mongodb/scala/model/Filters.scala b/driver-scala/src/main/scala/org/mongodb/scala/model/Filters.scala new file mode 100644 index 00000000000..cff938d6842 --- /dev/null +++ b/driver-scala/src/main/scala/org/mongodb/scala/model/Filters.scala @@ -0,0 +1,714 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala.model + +import java.lang + +import scala.collection.JavaConverters._ +import scala.util.matching.Regex + +import org.bson._ +import com.mongodb.client.model.geojson.{ Geometry, Point } +import com.mongodb.client.model.{ Filters => JFilters } + +import org.mongodb.scala.bson.conversions.Bson + +//scalastyle:off null number.of.methods +/** + * A factory for query filters. A convenient way to use this class is to statically import all of its methods, which allows usage like: + * + * `collection.find(and(eq("x", 1), lt("y", 3)))` + * + * @since 1.0 + */ +object Filters { + + /** + * Creates a filter that matches all documents where the value of the field name equals the specified value. Note that this doesn't + * actually generate a `\$eq` operator, as the query language doesn't require it. + * + * @param fieldName the field name + * @param value the value + * @tparam TItem the value type + * @return the filter + * @see [[https://www.mongodb.com/docs/manual/reference/operator/query/eq \$eq]] + */ + def eq[TItem](fieldName: String, value: TItem): Bson = JFilters.eq(fieldName, value) + + /** + * Allows the use of aggregation expressions within the query language. + * + * @param expression the aggregation expression + * @tparam TExpression the expression type + * @return the filter + * @since 2.2 + * @note Requires MongoDB 3.6 or greater + */ + def expr[TExpression](expression: TExpression): Bson = JFilters.expr(expression) + + /** + * Creates a filter that matches all documents where the value of the field name equals the specified value. Note that this does + * actually generate a `\$eq` operator, as the query language doesn't require it. + * + * A friendly alias for the `eq` method. + * + * @param fieldName the field name + * @param value the value + * @tparam TItem the value type + * @return the filter + * @see [[https://www.mongodb.com/docs/manual/reference/operator/query/eq \$eq]] + */ + def equal[TItem](fieldName: String, value: TItem): Bson = eq(fieldName, value) + + /** + * Creates a filter that matches all documents that validate against the given JSON schema document. + * + * @param schema the JSON schema to validate against + * @return the filter + * @since 2.2 + * @note Requires MongoDB 3.6 or greater + */ + def jsonSchema(schema: Bson): Bson = JFilters.jsonSchema(schema) + + /** + * Creates a filter that matches all documents where the value of the field name does not equal the specified value. + * + * @param fieldName the field name + * @param value the value + * @tparam TItem the value type + * @return the filter + * @see [[https://www.mongodb.com/docs/manual/reference/operator/query/ne \$ne]] + */ + def ne[TItem](fieldName: String, value: TItem): Bson = JFilters.ne(fieldName, value) + + /** + * Creates a filter that matches all documents where the value of the field name does not equal the specified value. + * + * A friendly alias for the `neq` method. + * + * @param fieldName the field name + * @param value the value + * @tparam TItem the value type + * @return the filter + * @see [[https://www.mongodb.com/docs/manual/reference/operator/query/ne \$ne]] + */ + def notEqual[TItem](fieldName: String, value: TItem): Bson = JFilters.ne(fieldName, value) + + /** + * Creates a filter that matches all documents where the value of the given field is greater than the specified value. + * + * @param fieldName the field name + * @param value the value + * @tparam TItem the value type + * @return the filter + * @see [[https://www.mongodb.com/docs/manual/reference/operator/query/gt \$gt]] + */ + def gt[TItem](fieldName: String, value: TItem): Bson = JFilters.gt(fieldName, value) + + /** + * Creates a filter that matches all documents where the value of the given field is less than the specified value. + * + * @param fieldName the field name + * @param value the value + * @tparam TItem the value type + * @return the filter + * @see [[https://www.mongodb.com/docs/manual/reference/operator/query/lt \$lt]] + */ + def lt[TItem](fieldName: String, value: TItem): Bson = JFilters.lt(fieldName, value) + + /** + * Creates a filter that matches all documents where the value of the given field is greater than or equal to the specified value. + * + * @param fieldName the field name + * @param value the value + * @tparam TItem the value type + * @return the filter + * @see [[https://www.mongodb.com/docs/manual/reference/operator/query/gte \$gte]] + */ + def gte[TItem](fieldName: String, value: TItem): Bson = JFilters.gte(fieldName, value) + + /** + * Creates a filter that matches all documents where the value of the given field is less than or equal to the specified value. + * + * @param fieldName the field name + * @param value the value + * @tparam TItem the value type + * @return the filter + * @see [[https://www.mongodb.com/docs/manual/reference/operator/query/lte \$lte]] + */ + def lte[TItem](fieldName: String, value: TItem): Bson = JFilters.lte(fieldName: String, value: TItem) + + /** + * Creates a filter that matches all documents where the value of a field equals any value in the list of specified values. + * + * @param fieldName the field name + * @param values the list of values + * @tparam TItem the value type + * @return the filter + * @see [[https://www.mongodb.com/docs/manual/reference/operator/query/in \$in]] + */ + def in[TItem](fieldName: String, values: TItem*): Bson = JFilters.in(fieldName, values.asJava) + + /** + * Creates a filter that matches all documents where the value of a field does not equal any of the specified values or does not exist. + * + * @param fieldName the field name + * @param values the list of values + * @tparam TItem the value type + * @return the filter + * @see [[https://www.mongodb.com/docs/manual/reference/operator/query/nin \$nin]] + */ + def nin[TItem](fieldName: String, values: TItem*): Bson = JFilters.nin(fieldName, values.asJava) + + /** + * Creates a filter that performs a logical AND of the provided list of filters. Note that this will only generate a "\$and" + * operator if absolutely necessary, as the query language implicity ands together all the keys. In other words, a query expression + * like: + * + *
+   * and(eq("x", 1), lt("y", 3))
+   * 
+ * + * will generate a MongoDB query like: + * + *
+   * {x : 1, y : {\$lt : 3}}
+   * 
+ * + * @param filters the list of filters to and together + * @return the filter + * @see [[https://www.mongodb.com/docs/manual/reference/operator/query/and \$and]] + */ + def and(filters: Bson*): Bson = JFilters.and(filters.asJava) + + /** + * Creates a filter that preforms a logical OR of the provided list of filters. + * + * @param filters the list of filters to and together + * @return the filter + * @see [[https://www.mongodb.com/docs/manual/reference/operator/query/or \$or]] + */ + def or(filters: Bson*): Bson = JFilters.or(filters.asJava) + + /** + * Creates a filter that matches all documents that do not match the passed in filter. + * Requires the field name to passed as part of the value passed in and lifts it to create a valid "\$not" query: + * + * `not(eq("x", 1))` + * + * will generate a MongoDB query like: + * `{x :\$not: {\$eq : 1}}` + * + * @param filter the value + * @return the filter + * @see [[https://www.mongodb.com/docs/manual/reference/operator/query/not \$not]] + */ + def not(filter: Bson): Bson = JFilters.not(filter) + + /** + * Creates a filter that performs a logical NOR operation on all the specified filters. + * + * @param filters the list of values + * @return the filter + * @see [[https://www.mongodb.com/docs/manual/reference/operator/query/nor \$nor]] + */ + def nor(filters: Bson*): Bson = JFilters.nor(filters.asJava) + + /** + * Creates a filter that matches all documents that contain the given field. + * + * @param fieldName the field name + * @return the filter + * @see [[https://www.mongodb.com/docs/manual/reference/operator/query/exists \$exists]] + */ + def exists(fieldName: String): Bson = JFilters.exists(fieldName) + + /** + * Creates a filter that matches all documents that either contain or do not contain the given field, depending on the value of the + * exists parameter. + * + * @param fieldName the field name + * @param exists true to check for existence, false to check for absence + * @return the filter + * @see [[https://www.mongodb.com/docs/manual/reference/operator/query/exists \$exists]] + */ + def exists(fieldName: String, exists: Boolean): Bson = JFilters.exists(fieldName, exists) + + /** + * Creates a filter that matches all documents where the value of the field is of the specified BSON type. + * + * @param fieldName the field name + * @param bsonType the BSON type + * @return the filter + * @see [[https://www.mongodb.com/docs/manual/reference/operator/query/type \$type]] + */ + def `type`(fieldName: String, bsonType: BsonType): Bson = JFilters.`type`(fieldName, bsonType) // scalastyle:ignore + + /** + * Creates a filter that matches all documents where the value of the field is of the specified BSON type. + * + * A friendly alias for the `type` method. + * + * @param fieldName the field name + * @param bsonType the BSON type + * @return the filter + * @see [[https://www.mongodb.com/docs/manual/reference/operator/query/type \$type]] + */ + def bsonType(fieldName: String, bsonType: BsonType): Bson = JFilters.`type`(fieldName, bsonType) + + /** + * Creates a filter that matches all documents where the value of a field divided by a divisor has the specified remainder (i.e. perform + * a modulo operation to select documents). + * + * @param fieldName the field name + * @param divisor the modulus + * @param remainder the remainder + * @return the filter + * @see [[https://www.mongodb.com/docs/manual/reference/operator/query/mod \$mod]] + */ + def mod(fieldName: String, divisor: Long, remainder: Long): Bson = JFilters.mod(fieldName, divisor, remainder) + + /** + * Creates a filter that matches all documents where the value of the field matches the given regular expression pattern. + * + * @param fieldName the field name + * @param pattern the pattern + * @return the filter + * @see [[https://www.mongodb.com/docs/manual/reference/operator/query/regex \$regex]] + */ + def regex(fieldName: String, pattern: String): Bson = JFilters.regex(fieldName, pattern) + + /** + * Creates a filter that matches all documents where the value of the field matches the given regular expression pattern with the given + * options applied. + * + * @param fieldName the field name + * @param pattern the pattern + * @param options the options + * @return the filter + * @see [[https://www.mongodb.com/docs/manual/reference/operator/query/regex \$regex]] + */ + def regex(fieldName: String, pattern: String, options: String): Bson = + JFilters.regex(fieldName: String, pattern: String, options: String) + + /** + * Creates a filter that matches all documents where the value of the field matches the given regular expression pattern. + * + * @param fieldName the field name + * @param regex the regex + * @return the filter + * @see [[https://www.mongodb.com/docs/manual/reference/operator/query/regex \$regex]] + * @since 1.0 + */ + def regex(fieldName: String, regex: Regex): Bson = JFilters.regex(fieldName, regex.pattern) + + /** + * Creates a filter that matches all documents matching the given search term. + * You may use [[Projections.metaTextScore]] to extract the relevance score assigned to each matched document. + * + * `Aggregates.search(SearchOperator, SearchOptions)` / `Aggregates.search(SearchCollector, SearchOptions)` + * is a more powerful full-text search alternative. + * + * @param search the search term + * @return the filter + * @see [[https://www.mongodb.com/docs/manual/reference/operator/query/text \$text]] + */ + def text(search: String): Bson = JFilters.text(search) + + /** + * Creates a filter that matches all documents matching the given search term using the given language. + * You may use [[Projections.metaTextScore]] to extract the relevance score assigned to each matched document. + * + * `Aggregates.search(SearchOperator, SearchOptions)` / `Aggregates.search(SearchCollector, SearchOptions)` + * is a more powerful full-text search alternative. + * + * @param search the search term + * @param textSearchOptions the text search options to use + * @return the filter + * @see [[https://www.mongodb.com/docs/manual/reference/operator/query/text \$text]] + * @since 1.1 + */ + def text(search: String, textSearchOptions: TextSearchOptions): Bson = JFilters.text(search, textSearchOptions) + + /** + * Creates a filter that matches all documents for which the given expression is true. + * + * @param javaScriptExpression the JavaScript expression + * @return the filter + * @see [[https://www.mongodb.com/docs/manual/reference/operator/query/where \$where]] + */ + def where(javaScriptExpression: String): Bson = JFilters.where(javaScriptExpression) + + /** + * Creates a filter that matches all documents where the value of a field is an array that contains all the specified values. + * + * @param fieldName the field name + * @param values the list of values + * @tparam TItem the value type + * @return the filter + * @see [[https://www.mongodb.com/docs/manual/reference/operator/query/all \$all]] + */ + def all[TItem](fieldName: String, values: TItem*): Bson = JFilters.all(fieldName, values.toList.asJava) + + /** + * Creates a filter that matches all documents containing a field that is an array where at least one member of the array matches the + * given filter. + * + * @param fieldName the field name + * @param filter the filter to apply to each element + * @return the filter + * @see [[https://www.mongodb.com/docs/manual/reference/operator/query/elemMatch \$elemMatch]] + */ + def elemMatch(fieldName: String, filter: Bson): Bson = JFilters.elemMatch(fieldName, filter) + + /** + * Creates a filter that matches all documents where the value of a field is an array of the specified size. + * + * @param fieldName the field name + * @param size the size of the array + * @return the filter + * @see [[https://www.mongodb.com/docs/manual/reference/operator/query/size \$size]] + */ + def size(fieldName: String, size: Int): Bson = JFilters.size(fieldName, size) + + /** + * Creates a filter that matches all documents where all of the bit positions are clear in the field. + * + * @note Requires MongoDB 3.2 or greater + * @param fieldName the field name + * @param bitmask the bitmask + * @return the filter + * @see [[https://www.mongodb.com/docs/manual/reference/operator/query/bitsAllClear \$bitsAllClear]] + * @since 1.1 + */ + def bitsAllClear(fieldName: String, bitmask: Long): Bson = JFilters.bitsAllClear(fieldName, bitmask) + + /** + * Creates a filter that matches all documents where all of the bit positions are set in the field. + * + * @note Requires MongoDB 3.2 or greater + * @param fieldName the field name + * @param bitmask the bitmask + * @return the filter + * @see [[https://www.mongodb.com/docs/manual/reference/operator/query/bitsAllSet \$bitsAllSet]] + * @since 1.1 + */ + def bitsAllSet(fieldName: String, bitmask: Long): Bson = JFilters.bitsAllSet(fieldName, bitmask) + + /** + * Creates a filter that matches all documents where any of the bit positions are clear in the field. + * + * @note Requires MongoDB 3.2 or greater + * @param fieldName the field name + * @param bitmask the bitmask + * @return the filter + * @see [[https://www.mongodb.com/docs/manual/reference/operator/query/bitsAnyClear \$bitsAnyClear]] + * @since 1.1 + */ + def bitsAnyClear(fieldName: String, bitmask: Long): Bson = JFilters.bitsAnyClear(fieldName, bitmask) + + /** + * Creates a filter that matches all documents where any of the bit positions are set in the field. + * + * @note Requires MongoDB 3.2 or greater + * @param fieldName the field name + * @param bitmask the bitmask + * @return the filter + * @see [[https://www.mongodb.com/docs/manual/reference/operator/query/bitsAnySet \$bitsAnySet]] + * @since 1.1 + */ + def bitsAnySet(fieldName: String, bitmask: Long): Bson = JFilters.bitsAnySet(fieldName, bitmask) + + /** + * Creates a filter that matches all documents containing a field with geospatial data that exists entirely within the specified shape. + * + * @param fieldName the field name + * @param geometry the bounding GeoJSON geometry object + * @return the filter + * @see [[https://www.mongodb.com/docs/manual/reference/operator/query/geoWithin/ \$geoWithin]] + */ + def geoWithin(fieldName: String, geometry: Geometry): Bson = JFilters.geoWithin(fieldName, geometry) + + /** + * Creates a filter that matches all documents containing a field with geospatial data that exists entirely within the specified shape. + * + * @param fieldName the field name + * @param geometry the bounding GeoJSON geometry object + * @return the filter + * @see [[https://www.mongodb.com/docs/manual/reference/operator/query/geoWithin/ \$geoWithin]] + */ + def geoWithin(fieldName: String, geometry: Bson): Bson = JFilters.geoWithin(fieldName, geometry) + + /** + * Creates a filter that matches all documents containing a field with grid coordinates data that exist entirely within the specified + * box. + * + * @param fieldName the field name + * @param lowerLeftX the lower left x coordinate of the box + * @param lowerLeftY the lower left y coordinate of the box + * @param upperRightX the upper left x coordinate of the box + * @param upperRightY the upper left y coordinate of the box + * @return the filter + * @see [[https://www.mongodb.com/docs/manual/reference/operator/query/geoWithin/ \$geoWithin]] + * @see [[https://www.mongodb.com/docs/manual/reference/operator/query/box/#op._S_box \$box]] + */ + def geoWithinBox( + fieldName: String, + lowerLeftX: Double, + lowerLeftY: Double, + upperRightX: Double, + upperRightY: Double + ): Bson = + JFilters.geoWithinBox(fieldName, lowerLeftX, lowerLeftY, upperRightX, upperRightY) + + /** + * Creates a filter that matches all documents containing a field with grid coordinates data that exist entirely within the specified + * polygon. + * + * @param fieldName the field name + * @param points a Seq of pairs of x, y coordinates. Any extra dimensions are ignored + * @return the filter + * @see [[https://www.mongodb.com/docs/manual/reference/operator/query/geoWithin/ \$geoWithin]] + * @see [[https://www.mongodb.com/docs/manual/reference/operator/query/polygon/#op._S_polygon \$polygon]] + */ + def geoWithinPolygon(fieldName: String, points: Seq[Seq[Double]]): Bson = + JFilters.geoWithinPolygon(fieldName, points.map(_.asInstanceOf[Seq[lang.Double]].asJava).asJava) + + /** + * Creates a filter that matches all documents containing a field with grid coordinates data that exist entirely within the specified + * circle. + * + * @param fieldName the field name + * @param x the x coordinate of the circle + * @param y the y coordinate of the circle + * @param radius the radius of the circle, as measured in the units used by the coordinate system + * @return the filter + * @see [[https://www.mongodb.com/docs/manual/reference/operator/query/geoWithin/ \$geoWithin]] + * @see [[https://www.mongodb.com/docs/manual/reference/operator/query/center/#op._S_center \$center]] + */ + def geoWithinCenter(fieldName: String, x: Double, y: Double, radius: Double): Bson = + JFilters.geoWithinCenter(fieldName, x, y, radius) + + /** + * Creates a filter that matches all documents containing a field with geospatial data (GeoJSON or legacy coordinate pairs) that exist + * entirely within the specified circle, using spherical geometry. If using longitude and latitude, specify longitude first. + * + * @param fieldName the field name + * @param x the x coordinate of the circle + * @param y the y coordinate of the circle + * @param radius the radius of the circle, in radians + * @return the filter + * @see [[https://www.mongodb.com/docs/manual/reference/operator/query/geoWithin/ \$geoWithin]] + * @see [[https://www.mongodb.com/docs/manual/reference/operator/query/centerSphere/#op._S_centerSphere \$centerSphere]] + */ + def geoWithinCenterSphere(fieldName: String, x: Double, y: Double, radius: Double): Bson = + JFilters.geoWithinCenterSphere(fieldName, x, y, radius) + + /** + * Creates a filter that matches all documents containing a field with geospatial data that intersects with the specified shape. + * + * @param fieldName the field name + * @param geometry the bounding GeoJSON geometry object + * @return the filter + * @see [[https://www.mongodb.com/docs/manual/reference/operator/query/geoIntersects/ \$geoIntersects]] + */ + def geoIntersects(fieldName: String, geometry: Bson): Bson = JFilters.geoIntersects(fieldName, geometry) + + /** + * Creates a filter that matches all documents containing a field with geospatial data that intersects with the specified shape. + * + * @param fieldName the field name + * @param geometry the bounding GeoJSON geometry object + * @return the filter + * @see [[https://www.mongodb.com/docs/manual/reference/operator/query/geoIntersects/ \$geoIntersects]] + */ + def geoIntersects(fieldName: String, geometry: Geometry): Bson = JFilters.geoIntersects(fieldName, geometry) + + /** + * Creates a filter that matches all documents containing a field with geospatial data that is near the specified GeoJSON point. + * + * @param fieldName the field name + * @param geometry the bounding GeoJSON geometry object + * @return the filter + * @see [[https://www.mongodb.com/docs/manual/reference/operator/query/near/ \$near]] + */ + def near(fieldName: String, geometry: Point): Bson = JFilters.near(fieldName, geometry, null, null) + + /** + * Creates a filter that matches all documents containing a field with geospatial data that is near the specified GeoJSON point. + * + * @param fieldName the field name + * @param geometry the bounding GeoJSON geometry object + * @param maxDistance the optional maximum distance from the point, in meters + * @param minDistance the optional minimum distance from the point, in meters + * @return the filter + * @see [[https://www.mongodb.com/docs/manual/reference/operator/query/near/ \$near]] + */ + def near(fieldName: String, geometry: Point, maxDistance: Option[Double], minDistance: Option[Double]): Bson = { + JFilters.near(fieldName, geometry, maxDistance.asJava, minDistance.asJava) + } + + /** + * Creates a filter that matches all documents containing a field with geospatial data that is near the specified GeoJSON point. + * + * @param fieldName the field name + * @param geometry the bounding GeoJSON geometry object + * @return the filter + * @see [[https://www.mongodb.com/docs/manual/reference/operator/query/near/ \$near]] + */ + def near(fieldName: String, geometry: Bson): Bson = JFilters.near(fieldName, geometry, null, null) + + /** + * Creates a filter that matches all documents containing a field with geospatial data that is near the specified GeoJSON point. + * + * @param fieldName the field name + * @param geometry the bounding GeoJSON geometry object + * @param maxDistance the optional maximum distance from the point, in meters + * @param minDistance the optional minimum distance from the point, in meters + * @return the filter + * @see [[https://www.mongodb.com/docs/manual/reference/operator/query/near/ \$near]] + */ + def near(fieldName: String, geometry: Bson, maxDistance: Option[Double], minDistance: Option[Double]): Bson = { + JFilters.near(fieldName, geometry, maxDistance.asJava, minDistance.asJava) + } + + /** + * Creates a filter that matches all documents containing a field with geospatial data that is near the specified point. + * + * @param fieldName the field name + * @param x the x coordinate + * @param y the y coordinate + * @return the filter + * @see [[https://www.mongodb.com/docs/manual/reference/operator/query/near/ \$near]] + */ + def near(fieldName: String, x: Double, y: Double): Bson = JFilters.near(fieldName, x, y, null, null) + + /** + * Creates a filter that matches all documents containing a field with geospatial data that is near the specified point. + * + * @param fieldName the field name + * @param x the x coordinate + * @param y the y coordinate + * @param maxDistance the optional maximum distance from the point, in radians + * @param minDistance the optional minimum distance from the point, in radians + * @return the filter + * @see [[https://www.mongodb.com/docs/manual/reference/operator/query/near/ \$near]] + */ + def near(fieldName: String, x: Double, y: Double, maxDistance: Option[Double], minDistance: Option[Double]): Bson = { + JFilters.near(fieldName, x, y, maxDistance.asJava, minDistance.asJava) + } + + /** + * Creates a filter that matches all documents containing a field with geospatial data that is near the specified GeoJSON point using + * spherical geometry. + * + * @param fieldName the field name + * @param geometry the bounding GeoJSON geometry object + * @return the filter + * @see [[https://www.mongodb.com/docs/manual/reference/operator/query/near/ \$near]] + */ + def nearSphere(fieldName: String, geometry: Point): Bson = JFilters.nearSphere(fieldName, geometry, null, null) + + /** + * Creates a filter that matches all documents containing a field with geospatial data that is near the specified GeoJSON point using + * spherical geometry. + * + * @param fieldName the field name + * @param geometry the bounding GeoJSON geometry object + * @param maxDistance the optional maximum distance from the point, in meters + * @param minDistance the optional minimum distance from the point, in meters + * @return the filter + * @see [[https://www.mongodb.com/docs/manual/reference/operator/query/near/ \$near]] + */ + def nearSphere(fieldName: String, geometry: Point, maxDistance: Option[Double], minDistance: Option[Double]): Bson = { + JFilters.nearSphere(fieldName, geometry, maxDistance.asJava, minDistance.asJava) + } + + /** + * Creates a filter that matches all documents containing a field with geospatial data that is near the specified GeoJSON point using + * spherical geometry. + * + * @param fieldName the field name + * @param geometry the bounding GeoJSON geometry object + * @return the filter + * @see [[https://www.mongodb.com/docs/manual/reference/operator/query/near/ \$near]] + */ + def nearSphere(fieldName: String, geometry: Bson): Bson = JFilters.nearSphere(fieldName, geometry, null, null) + + /** + * Creates a filter that matches all documents containing a field with geospatial data that is near the specified GeoJSON point using + * spherical geometry. + * + * @param fieldName the field name + * @param geometry the bounding GeoJSON geometry object + * @param maxDistance the optional maximum distance from the point, in meters + * @param minDistance the optional minimum distance from the point, in meters + * @return the filter + * @see [[https://www.mongodb.com/docs/manual/reference/operator/query/near/ \$near]] + */ + def nearSphere(fieldName: String, geometry: Bson, maxDistance: Option[Double], minDistance: Option[Double]): Bson = { + JFilters.nearSphere(fieldName, geometry, maxDistance.asJava, minDistance.asJava) + } + + /** + * Creates a filter that matches all documents containing a field with geospatial data that is near the specified point using + * spherical geometry. + * + * @param fieldName the field name + * @param x the x coordinate + * @param y the y coordinate + * @return the filter + * @see [[https://www.mongodb.com/docs/manual/reference/operator/query/near/ \$near]] + */ + def nearSphere(fieldName: String, x: Double, y: Double): Bson = JFilters.nearSphere(fieldName, x, y, null, null) + + /** + * Creates a filter that matches all documents containing a field with geospatial data that is near the specified point using + * spherical geometry. + * + * @param fieldName the field name + * @param x the x coordinate + * @param y the y coordinate + * @param maxDistance the optional maximum distance from the point, in radians + * @param minDistance the optional minimum distance from the point, in radians + * @return the filter + * @see [[https://www.mongodb.com/docs/manual/reference/operator/query/near/ \$near]] + */ + def nearSphere( + fieldName: String, + x: Double, + y: Double, + maxDistance: Option[Double], + minDistance: Option[Double] + ): Bson = { + JFilters.nearSphere(fieldName, x, y, maxDistance.asJava, minDistance.asJava) + } + + /** + * Creates an empty filter that will match all documents. + * + * @return the filter + * @since 4.2 + */ + def empty(): Bson = JFilters.empty() + + private implicit class ScalaOptionDoubleToJavaDoubleOrNull(maybeDouble: Option[Double]) { + def asJava: java.lang.Double = maybeDouble.map(double2Double).orNull + } + +} +//scalastyle:on null number.of.methods diff --git a/driver-scala/src/main/scala/org/mongodb/scala/model/Indexes.scala b/driver-scala/src/main/scala/org/mongodb/scala/model/Indexes.scala new file mode 100644 index 00000000000..00680c6ec50 --- /dev/null +++ b/driver-scala/src/main/scala/org/mongodb/scala/model/Indexes.scala @@ -0,0 +1,104 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala.model + +import scala.collection.JavaConverters._ + +import com.mongodb.client.model.{ Indexes => JIndexes } + +import org.mongodb.scala.bson.conversions.Bson + +/** + * A factory for defining index keys. A convenient way to use this class is to statically import all of its methods, which allows usage + * like: + * + * {{{ + * collection.createIndex(compoundIndex(ascending("x"), descending("y"))) + * }}} + * @since 1.0 + */ +object Indexes { + + /** + * Create an index key for an ascending index on the given fields. + * + * @param fieldNames the field names, which must contain at least one + * @return the index specification + * @see [[https://www.mongodb.com/docs/manual/core/indexes indexes]] + */ + def ascending(fieldNames: String*): Bson = JIndexes.ascending(fieldNames.asJava) + + /** + * Create an index key for an ascending index on the given fields. + * + * @param fieldNames the field names, which must contain at least one + * @return the index specification + * @see [[https://www.mongodb.com/docs/manual/core/indexes indexes]] + */ + def descending(fieldNames: String*): Bson = JIndexes.descending(fieldNames.asJava) + + /** + * Create an index key for an 2dsphere index on the given fields. + * + * @param fieldNames the field names, which must contain at least one + * @return the index specification + * @see [[https://www.mongodb.com/docs/manual/core/2dsphere 2dsphere Index]] + */ + def geo2dsphere(fieldNames: String*): Bson = JIndexes.geo2dsphere(fieldNames.asJava) + + /** + * Create an index key for a 2d index on the given field. + * + *

+ * Note: A 2d index is for data stored as points on a two-dimensional plane. + * The 2d index is intended for legacy coordinate pairs used in MongoDB 2.2 and earlier. + *

+ * + * @param fieldName the field to create a 2d index on + * @return the index specification + * @see [[https://www.mongodb.com/docs/manual/core/2d 2d index]] + */ + def geo2d(fieldName: String): Bson = JIndexes.geo2d(fieldName) + + /** + * Create an index key for a text index on the given field. + * + * @param fieldName the field to create a text index on + * @return the index specification + * @see [[https://www.mongodb.com/docs/manual/core/text text index]] + */ + def text(fieldName: String): Bson = JIndexes.text(fieldName) + + /** + * Create an index key for a hashed index on the given field. + * + * @param fieldName the field to create a hashed index on + * @return the index specification + * @see [[https://www.mongodb.com/docs/manual/core/hashed hashed index]] + */ + def hashed(fieldName: String): Bson = JIndexes.hashed(fieldName) + + /** + * create a compound index specifications. If any field names are repeated, the last one takes precedence. + * + * @param indexes the index specifications + * @return the compound index specification + * @see [[https://www.mongodb.com/docs/manual/core/index-compound compoundIndex]] + */ + def compoundIndex(indexes: Bson*): Bson = JIndexes.compoundIndex(indexes.asJava) + +} diff --git a/driver-scala/src/main/scala/org/mongodb/scala/model/MapReduceAction.scala b/driver-scala/src/main/scala/org/mongodb/scala/model/MapReduceAction.scala new file mode 100644 index 00000000000..e80fbed5022 --- /dev/null +++ b/driver-scala/src/main/scala/org/mongodb/scala/model/MapReduceAction.scala @@ -0,0 +1,50 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala.model + +import com.mongodb.client.model.{ MapReduceAction => JMapReduceAction } + +/** + * The map reduce to collection actions. + * + * These actions are only available when passing out a collection that already exists. This option is not available on secondary members + * of replica sets. The Enum values dictate what to do with the output collection if it already exists when the map reduce is run. + * + * @since 1.0 + */ +@deprecated("Superseded by aggregate", "4.4.0") +object MapReduceAction { + + /** + * Replace the contents of the `collectionName` if the collection with the `collectionName` exists. + */ + val REPLACE: JMapReduceAction = JMapReduceAction.REPLACE + + /** + * Merge the new result with the existing result if the output collection already exists. If an existing document has the same key + * as the new result, overwrite that existing document. + */ + val MERGE: JMapReduceAction = JMapReduceAction.MERGE + + /** + * Merge the new result with the existing result if the output collection already exists. If an existing document has the same key + * as the new result, apply the reduce function to both the new and the existing documents and overwrite the existing document with + * the result. + */ + val REDUCE: JMapReduceAction = JMapReduceAction.REDUCE + +} diff --git a/driver-scala/src/main/scala/org/mongodb/scala/model/MergeOptions.scala b/driver-scala/src/main/scala/org/mongodb/scala/model/MergeOptions.scala new file mode 100644 index 00000000000..1fe4986f4e3 --- /dev/null +++ b/driver-scala/src/main/scala/org/mongodb/scala/model/MergeOptions.scala @@ -0,0 +1,153 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala.model + +import scala.collection.JavaConverters._ +import org.mongodb.scala.bson.conversions.Bson + +import com.mongodb.client.model.{ MergeOptions => JMergeOptions } + +/** + * Options to control the behavior of the `\$merge` aggregation stage + * + * @since 2.7 + */ +object MergeOptions { + + /** + * The behavior of `\$merge` if a result document and an existing document in the collection have the same value for the specified on + * field(s). + */ + object WhenMatched { + + /** + * Replace the existing document in the output collection with the matching results document. + */ + val REPLACE = JMergeOptions.WhenMatched.REPLACE + + /** + * Keep the existing document in the output collection. + */ + val KEEP_EXISTING = JMergeOptions.WhenMatched.KEEP_EXISTING + + /** + * Merge the matching documents + */ + val MERGE = JMergeOptions.WhenMatched.MERGE + + /** + * An aggregation pipeline to update the document in the collection. + * + * @see #whenMatchedPipeline(List) + */ + val PIPELINE = JMergeOptions.WhenMatched.PIPELINE + + /** + * Stop and fail the aggregation operation. Any changes to the output collection from previous documents are not reverted. + */ + val FAIL = JMergeOptions.WhenMatched.FAIL + + } + + /** + * The behavior of `\$merge` if a result document does not match an existing document in the out collection. + */ + object WhenNotMatched { + + /** + * Insert the document into the output collection. + */ + val INSERT = JMergeOptions.WhenNotMatched.INSERT + + /** + * Discard the document; i.e. `\$merge` does not insert the document into the output collection. + */ + val DISCARD = JMergeOptions.WhenNotMatched.DISCARD + + /** + * Stop and fail the aggregation operation. Any changes to the output collection from previous documents are not reverted. + */ + val FAIL = JMergeOptions.WhenNotMatched.FAIL + } +} + +/** + * Options to control the behavior of the `\$merge` aggregation stage + * + * @since 2.7 + */ +case class MergeOptions(wrapped: JMergeOptions = new JMergeOptions()) { + + /** + * Sets the field that act as a unique identifier for a document. The identifier determine if a results document matches an + * already existing document in the output collection. + * + * @param uniqueIdentifiers the unique identifier(s) + * @return this + */ + def uniqueIdentifier(uniqueIdentifiers: String*): MergeOptions = { + wrapped.uniqueIdentifier(uniqueIdentifiers.asJava) + this + } + + /** + * Sets the behavior of `\$merge` if a result document and an existing document in the collection have the same value for the specified + * on field(s). + * + * @param whenMatched when matched + * @return this + */ + def whenMatched(whenMatched: JMergeOptions.WhenMatched): MergeOptions = { + wrapped.whenMatched(whenMatched) + this + } + + /** + * Sets the variables accessible for use in the whenMatched pipeline. + * + * @param variables the variables + * @return this + */ + def variables(variables: Variable[_]*): MergeOptions = { + wrapped.variables(variables.asJava) + this + } + + /** + * Sets aggregation pipeline to update the document in the collection. + * + * @param whenMatchedPipeline when matched pipeline + * @return this + * @see WhenMatched#PIPELINE + */ + def whenMatchedPipeline(whenMatchedPipeline: Bson*): MergeOptions = { + wrapped.whenMatchedPipeline(whenMatchedPipeline.asJava) + this + } + + /** + * Sets the behavior of `\$merge` if a result document does not match an existing document in the out collection. + * + * @param whenNotMatched when not matched + * @return this + */ + def whenNotMatched(whenNotMatched: JMergeOptions.WhenNotMatched): MergeOptions = { + wrapped.whenNotMatched(whenNotMatched) + this + } + +} diff --git a/driver-scala/src/main/scala/org/mongodb/scala/model/Projections.scala b/driver-scala/src/main/scala/org/mongodb/scala/model/Projections.scala new file mode 100644 index 00000000000..14b21948f3c --- /dev/null +++ b/driver-scala/src/main/scala/org/mongodb/scala/model/Projections.scala @@ -0,0 +1,209 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala.model + +import scala.collection.JavaConverters._ + +import com.mongodb.client.model.{ Projections => JProjections } + +import org.mongodb.scala.bson.conversions.Bson + +/** + * A factory for projections. A convenient way to use this class is to statically import all of its methods, which allows usage like: + * + * `collection.find().projection(fields(include("x", "y"), excludeId()))` + * + * @since 1.0 + */ +object Projections { + + /** + * Creates a projection of a field whose value is computed from the given expression. Projection with an expression can be used in the + * following contexts: + *
    + *
  • \$project aggregation pipeline stage.
  • + *
  • Starting from MongoDB 4.4, it's also accepted in various find-related methods within the + * `MongoCollection`-based API where projection is supported, for example: + *
      + *
    • `find()`
    • + *
    • `findOneAndReplace()`
    • + *
    • `findOneAndUpdate()`
    • + *
    • `findOneAndDelete()`
    • + *
    + *
  • + *
+ * + * @param fieldName the field name + * @param expression the expression + * @tparam TExpression the expression type + * @return the projection + * @see [[Projections.computedSearchMeta]] + * @see Aggregates#project(Bson) + */ + def computed[TExpression](fieldName: String, expression: TExpression): Bson = + JProjections.computed(fieldName, expression) + + /** + * Creates a projection of a field whose value is equal to the `$$SEARCH_META` variable, + * for use with `Aggregates.search(SearchOperator, SearchOptions)` / `Aggregates.search(SearchCollector, SearchOptions)`. + * Calling this method is equivalent to calling [[Projections.computed]] with `"$$SEARCH_META"` as the second argument. + * + * @param fieldName the field name + * @return the projection + * @see [[org.mongodb.scala.model.search.SearchCount]] + * @see [[org.mongodb.scala.model.search.SearchCollector]] + */ + def computedSearchMeta(fieldName: String): Bson = + JProjections.computedSearchMeta(fieldName) + + /** + * Creates a projection that includes all of the given fields. + * + * @param fieldNames the field names + * @return the projection + */ + def include(fieldNames: String*): Bson = JProjections.include(fieldNames.asJava) + + /** + * Creates a projection that excludes all of the given fields. + * + * @param fieldNames the field names + * @return the projection + */ + def exclude(fieldNames: String*): Bson = JProjections.exclude(fieldNames.asJava) + + /** + * Creates a projection that excludes the _id field. This suppresses the automatic inclusion of _id that is the default, even when + * other fields are explicitly included. + * + * @return the projection + */ + def excludeId(): Bson = JProjections.excludeId + + /** + * Creates a projection that includes for the given field only the first element of an array that matches the query filter. This is + * referred to as the positional `\$` operator. + * + * @param fieldName the field name whose value is the array + * @return the projection + * @see [[https://www.mongodb.com/docs/manual/reference/operator/projection/positional/#projection Project the first matching element (\$ operator)]] + */ + def elemMatch(fieldName: String): Bson = JProjections.elemMatch(fieldName) + + /** + * Creates a projection that includes for the given field only the first element of the array value of that field that matches the given + * query filter. + * + * @param fieldName the field name + * @param filter the filter to apply + * @return the projection + * @see [[https://www.mongodb.com/docs/manual/reference/operator/projection/elemMatch elemMatch]] + */ + def elemMatch(fieldName: String, filter: Bson): Bson = JProjections.elemMatch(fieldName, filter) + + /** + * Creates a `\$meta` projection to the given field name for the given meta field name. + * + * @param fieldName the field name + * @param metaFieldName the meta field name + * @return the projection + * @see [[https://www.mongodb.com/docs/manual/reference/operator/aggregation/meta/ meta]] + * @see [[Projections.metaTextScore]] + * @see [[Projections.metaSearchScore]] + * @see [[Projections.metaVectorSearchScore]] + * @see [[Projections.metaSearchHighlights]] + * @since 4.1 + */ + def meta(fieldName: String, metaFieldName: String): Bson = JProjections.meta(fieldName, metaFieldName) + + /** + * Creates a projection to the given field name of the textScore, for use with text queries. + * Calling this method is equivalent to calling [[Projections.meta]] with `"textScore"` as the second argument. + * + * @param fieldName the field name + * @return the projection + * @see `Filters.text(String, TextSearchOptions)` + * @see [[https://www.mongodb.com/docs/manual/reference/operator/aggregation/meta/#text-score-metadata--meta---textscore- textScore]] + */ + def metaTextScore(fieldName: String): Bson = JProjections.metaTextScore(fieldName) + + /** + * Creates a projection to the given field name of the searchScore, + * for use with `Aggregates.search(SearchOperator, SearchOptions)` / `Aggregates.search(SearchCollector, SearchOptions)`. + * Calling this method is equivalent to calling [[Projections.meta]] with `"searchScore"` as the second argument. + * + * @param fieldName the field name + * @return the projection + * @see [[https://www.mongodb.com/docs/atlas/atlas-search/scoring/ Scoring]] + */ + def metaSearchScore(fieldName: String): Bson = JProjections.metaSearchScore(fieldName) + + /** + * Creates a projection to the given field name of the vectorSearchScore, + * for use with `Aggregates.vectorSearch(FieldSearchPath, Iterable, String, Long, Long, VectorSearchOptions)`. + * Calling this method is equivalent to calling [[Projections.meta]] with `"vectorSearchScore"` as the second argument. + * + * @param fieldName the field name + * @return the projection + * @see [[https://www.mongodb.com/docs/atlas/atlas-search/scoring/ Scoring]] + * @note Requires MongoDB 6.0.10 or greater + * @since 4.11 + */ + def metaVectorSearchScore(fieldName: String): Bson = JProjections.metaVectorSearchScore(fieldName) + + /** + * Creates a projection to the given field name of the searchHighlights, + * for use with `Aggregates.search(SearchOperator, SearchOptions)` / `Aggregates.search(SearchCollector, SearchOptions)`. + * Calling this method is equivalent to calling [[Projections.meta]] with `"searchHighlights"` as the second argument. + * + * @param fieldName the field name + * @return the projection + * @see [[org.mongodb.scala.model.search.SearchHighlight]] + * @see [[https://www.mongodb.com/docs/atlas/atlas-search/highlighting/ Highlighting]] + */ + def metaSearchHighlights(fieldName: String): Bson = JProjections.metaSearchHighlights(fieldName) + + /** + * Creates a projection to the given field name of a slice of the array value of that field. + * + * @param fieldName the field name + * @param limit the number of elements to project. + * @return the projection + * @see [[https://www.mongodb.com/docs/manual/reference/operator/projection/slice Slice]] + */ + def slice(fieldName: String, limit: Int): Bson = JProjections.slice(fieldName, limit) + + /** + * Creates a projection to the given field name of a slice of the array value of that field. + * + * @param fieldName the field name + * @param skip the number of elements to skip before applying the limit + * @param limit the number of elements to project + * @return the projection + * @see [[https://www.mongodb.com/docs/manual/reference/operator/projection/slice Slice]] + */ + def slice(fieldName: String, skip: Int, limit: Int): Bson = JProjections.slice(fieldName, skip, limit) + + /** + * Creates a projection that combines the list of projections into a single one. If there are duplicate keys, the last one takes + * precedence. + * + * @param projections the list of projections to combine + * @return the combined projection + */ + def fields(projections: Bson*): Bson = JProjections.fields(projections.asJava) +} diff --git a/driver-scala/src/main/scala/org/mongodb/scala/model/QuantileMethod.scala b/driver-scala/src/main/scala/org/mongodb/scala/model/QuantileMethod.scala new file mode 100644 index 00000000000..99df9da410b --- /dev/null +++ b/driver-scala/src/main/scala/org/mongodb/scala/model/QuantileMethod.scala @@ -0,0 +1,43 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.mongodb.scala.model + +import com.mongodb.annotations.Sealed +import com.mongodb.client.model.{ QuantileMethod => JQuantileMethod } + +/** + * This interface represents a quantile method used in quantile accumulators of the `\$group` and + * `\$setWindowFields` stages. + *

+ * It provides methods for creating and converting quantile methods to `BsonValue`. + *

+ * + * @see [[org.mongodb.scala.model.Accumulators.percentile]] + * @see [[org.mongodb.scala.model.Accumulators.median]] + * @see [[org.mongodb.scala.model.WindowOutputFields.percentile]] + * @see [[org.mongodb.scala.model.WindowOutputFields.median]] + * @since 4.10 + * @note Requires MongoDB 7.0 or greater + */ +@Sealed object QuantileMethod { + + /** + * Returns a `QuantileMethod` instance representing the "approximate" quantile method. + * + * @return The requested `QuantileMethod`. + */ + def approximate: ApproximateQuantileMethod = JQuantileMethod.approximate() +} diff --git a/driver-scala/src/main/scala/org/mongodb/scala/model/ReturnDocument.scala b/driver-scala/src/main/scala/org/mongodb/scala/model/ReturnDocument.scala new file mode 100644 index 00000000000..0d6800492a2 --- /dev/null +++ b/driver-scala/src/main/scala/org/mongodb/scala/model/ReturnDocument.scala @@ -0,0 +1,37 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala.model + +import com.mongodb.client.model.{ ReturnDocument => JReturnDocument } + +/** + * Indicates which document to return, the original document before change or the document after the change + * + * @since 1.0 + */ +object ReturnDocument { + + /** + * Indicates to return the document before the update, replacement, or insert occurred. + */ + val BEFORE: JReturnDocument = JReturnDocument.BEFORE + + /** + * Indicates to return the document after the update, replacement, or insert occurred. + */ + val AFTER: JReturnDocument = JReturnDocument.AFTER +} diff --git a/driver-scala/src/main/scala/org/mongodb/scala/model/Sorts.scala b/driver-scala/src/main/scala/org/mongodb/scala/model/Sorts.scala new file mode 100644 index 00000000000..7e479cef8cf --- /dev/null +++ b/driver-scala/src/main/scala/org/mongodb/scala/model/Sorts.scala @@ -0,0 +1,70 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala.model + +import scala.collection.JavaConverters._ + +import com.mongodb.client.model.{ Sorts => JSorts } + +import org.mongodb.scala.bson.conversions.Bson + +/** + * A factory for sort specifications. A convenient way to use this class is to statically import all of its methods, which allows + * usage like: + * + * `collection.find().sort(orderBy(ascending("x", "y"), descending("z")))` + * + * @since 1.0 + */ +object Sorts { + + /** + * Create a sort specification for an ascending sort on the given fields. + * + * @param fieldNames the field names, which must contain at least one + * @return the sort specification + * @see [[https://www.mongodb.com/docs/manual/reference/operator/meta/orderby Sort]] + */ + def ascending(fieldNames: String*): Bson = JSorts.ascending(fieldNames.asJava) + + /** + * Create a sort specification for an ascending sort on the given fields. + * + * @param fieldNames the field names, which must contain at least one + * @return the sort specification + * @see [[https://www.mongodb.com/docs/manual/reference/operator/meta/orderby Sort]] + */ + def descending(fieldNames: String*): Bson = JSorts.descending(fieldNames.asJava) + + /** + * Create a sort specification for the text score meta projection on the given field. + * + * @param fieldName the field name + * @return the sort specification + * @see Filters.text(String, TextSearchOptions) + * @see [[https://docs.mongodb.com/manual/reference/operator/aggregation/meta/#text-score-metadata--meta---textscore- textScore]] + */ + def metaTextScore(fieldName: String): Bson = JSorts.metaTextScore(fieldName) + + /** + * Combine multiple sort specifications. If any field names are repeated, the last one takes precendence. + * + * @param sorts the sort specifications + * @return the combined sort specification + */ + def orderBy(sorts: Bson*): Bson = JSorts.orderBy(sorts.asJava) +} diff --git a/driver-scala/src/main/scala/org/mongodb/scala/model/Updates.scala b/driver-scala/src/main/scala/org/mongodb/scala/model/Updates.scala new file mode 100644 index 00000000000..40367bb4da0 --- /dev/null +++ b/driver-scala/src/main/scala/org/mongodb/scala/model/Updates.scala @@ -0,0 +1,317 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala.model + +import scala.collection.JavaConverters._ + +import com.mongodb.client.model.{ PushOptions => JPushOptions, Updates => JUpdates } + +import org.mongodb.scala.bson.conversions.Bson + +/** + * A factory for document updates. A convenient way to use this class is to statically import all of its methods, which allows usage like: + * + * `collection.updateOne(eq("x", 1), set("x", 2))` + * + * @since 1.0 + */ +object Updates { + + /** + * Combine a list of updates into a single update. + * + * @param updates the list of updates + * @return a combined update + */ + def combine(updates: Bson*): Bson = JUpdates.combine(updates.asJava) + + /** + * Creates an update that sets the value of the field with the given name to the given value. + * + * @param fieldName the non-null field name + * @param value the value + * @tparam TItem the value type + * @return the update + * @see [[https://www.mongodb.com/docs/manual/reference/operator/update/set/ \$set]] + */ + def set[TItem](fieldName: String, value: TItem): Bson = JUpdates.set(fieldName, value) + + /** + * Creates an update that deletes the field with the given name. + * + * @param fieldName the non-null field name + * @return the update + * @see [[https://www.mongodb.com/docs/manual/reference/operator/update/unset/ \$unset]] + */ + def unset(fieldName: String): Bson = JUpdates.unset(fieldName) + + /** + * Creates an update that sets the value of the field with the given name to the given value, but only if the update is an upsert that + * results in an insert of a document. + * + * @param fieldName the non-null field name + * @param value the value + * @tparam TItem the value type + * @return the update + * @see [[https://www.mongodb.com/docs/manual/reference/operator/update/setOnInsert/ \$setOnInsert]] + * @see UpdateOptions#upsert(boolean) + */ + def setOnInsert[TItem](fieldName: String, value: TItem): Bson = JUpdates.setOnInsert(fieldName, value) + + /** + * Creates an update that renames a field. + * + * @param fieldName the non-null field name + * @param newFieldName the non-null new field name + * @return the update + * @see [[https://www.mongodb.com/docs/manual/reference/operator/update/rename/ \$rename]] + */ + def rename(fieldName: String, newFieldName: String): Bson = JUpdates.rename(fieldName, newFieldName) + + /** + * Creates an update that increments the value of the field with the given name by the given value. + * + * @param fieldName the non-null field name + * @param number the value + * @return the update + * @see [[https://www.mongodb.com/docs/manual/reference/operator/update/inc/ \$inc]] + */ + def inc(fieldName: String, number: Number): Bson = JUpdates.inc(fieldName, number) + + /** + * Creates an update that multiplies the value of the field with the given name by the given number. + * + * @param fieldName the non-null field name + * @param number the non-null number + * @return the update + * @see [[https://www.mongodb.com/docs/manual/reference/operator/update/mul/ \$mul]] + */ + def mul(fieldName: String, number: Number): Bson = JUpdates.mul(fieldName, number) + + /** + * Creates an update that sets the value of the field to the given value if the given value is less than the current value of the + * field. + * + * @param fieldName the non-null field name + * @param value the value + * @tparam TItem the value type + * @return the update + * @see [[https://www.mongodb.com/docs/manual/reference/operator/update/min/ \$min]] + */ + def min[TItem](fieldName: String, value: TItem): Bson = JUpdates.min(fieldName, value) + + /** + * Creates an update that sets the value of the field to the given value if the given value is greater than the current value of the + * field. + * + * @param fieldName the non-null field name + * @param value the value + * @tparam TItem the value type + * @return the update + * @see [[https://www.mongodb.com/docs/manual/reference/operator/update/min/ \$min]] + */ + def max[TItem](fieldName: String, value: TItem): Bson = JUpdates.max(fieldName, value) + + /** + * Creates an update that sets the value of the field to the current date as a BSON date. + * + * @param fieldName the non-null field name + * @return the update + * @see [[https://www.mongodb.com/docs/manual/reference/operator/update/currentDate/ \$currentDate]] + * @see [[https://www.mongodb.com/docs/manual/reference/bson-types/#date Date]] + */ + def currentDate(fieldName: String): Bson = JUpdates.currentDate(fieldName) + + /** + * Creates an update that sets the value of the field to the current date as a BSON timestamp. + * + * @param fieldName the non-null field name + * @return the update + * @see [[https://www.mongodb.com/docs/manual/reference/operator/update/currentDate/ \$currentDate]] + * @see [[https://www.mongodb.com/docs/manual/reference/bson-types/#document-bson-type-timestamp Timestamp]] + */ + def currentTimestamp(fieldName: String): Bson = JUpdates.currentTimestamp(fieldName) + + /** + * Creates an update that adds the given value to the array value of the field with the given name, unless the value is + * already present, in which case it does nothing + * + * @param fieldName the non-null field name + * @param value the value + * @tparam TItem the value type + * @return the update + * @see [[https://www.mongodb.com/docs/manual/reference/operator/update/addToSet/ \$addToSet]] + */ + def addToSet[TItem](fieldName: String, value: TItem): Bson = JUpdates.addToSet(fieldName, value) + + /** + * Creates an update that adds each of the given values to the array value of the field with the given name, unless the value is + * already present, in which case it does nothing + * + * @param fieldName the non-null field name + * @param values the values + * @tparam TItem the value type + * @return the update + * @see [[https://www.mongodb.com/docs/manual/reference/operator/update/addToSet/ \$addToSet]] + */ + def addEachToSet[TItem](fieldName: String, values: TItem*): Bson = JUpdates.addEachToSet(fieldName, values.asJava) + + /** + * Creates an update that adds the given value to the array value of the field with the given name. + * + * @param fieldName the non-null field name + * @param value the value + * @tparam TItem the value type + * @return the update + * @see [[https://www.mongodb.com/docs/manual/reference/operator/update/push/ \$push]] + */ + def push[TItem](fieldName: String, value: TItem): Bson = JUpdates.push(fieldName, value) + + /** + * Creates an update that adds each of the given values to the array value of the field with the given name. + * + * @param fieldName the non-null field name + * @param values the values + * @tparam TItem the value type + * @return the update + * @see [[https://www.mongodb.com/docs/manual/reference/operator/update/push/ \$push]] + */ + def pushEach[TItem](fieldName: String, values: TItem*): Bson = JUpdates.pushEach(fieldName, values.asJava) + + /** + * Creates an update that adds each of the given values to the array value of the field with the given name, applying the given + * options for positioning the pushed values, and then slicing and/or sorting the array. + * + * @param fieldName the non-null field name + * @param values the values + * @param options the non-null push options + * @tparam TItem the value type + * @return the update + * @see [[https://www.mongodb.com/docs/manual/reference/operator/update/push/ \$push]] + */ + def pushEach[TItem](fieldName: String, options: JPushOptions, values: TItem*): Bson = + JUpdates.pushEach(fieldName, values.asJava, options) + + /** + * Creates an update that removes all instances of the given value from the array value of the field with the given name. + * + * @param fieldName the non-null field name + * @param value the value + * @tparam TItem the value type + * @return the update + * @see [[https://www.mongodb.com/docs/manual/reference/operator/update/pull/ \$pull]] + */ + def pull[TItem](fieldName: String, value: TItem): Bson = JUpdates.pull(fieldName, value) + + /** + * Creates an update that removes from an array all elements that match the given filter. + * + * @param filter the query filter + * @return the update + * @see [[https://www.mongodb.com/docs/manual/reference/operator/update/pull/ \$pull]] + */ + def pullByFilter(filter: Bson): Bson = JUpdates.pullByFilter(filter) + + /** + * Creates an update that removes all instances of the given values from the array value of the field with the given name. + * + * @param fieldName the non-null field name + * @param values the values + * @tparam TItem the value type + * @return the update + * @see [[https://www.mongodb.com/docs/manual/reference/operator/update/pull/ \$pull]] + */ + def pullAll[TItem](fieldName: String, values: TItem*): Bson = JUpdates.pullAll(fieldName, values.asJava) + + /** + * Creates an update that pops the first element of an array that is the value of the field with the given name. + * + * @param fieldName the non-null field name + * @return the update + * @see [[https://www.mongodb.com/docs/manual/reference/operator/update/pop/ \$pop]] + */ + def popFirst(fieldName: String): Bson = JUpdates.popFirst(fieldName) + + /** + * Creates an update that pops the last element of an array that is the value of the field with the given name. + * + * @param fieldName the non-null field name + * @return the update + * @see [[https://www.mongodb.com/docs/manual/reference/operator/update/pop/ \$pop]] + */ + def popLast(fieldName: String): Bson = JUpdates.popLast(fieldName) + + /** + * Creates an update that performs a bitwise and between the given integer value and the integral value of the field with the given + * name. + * + * @param fieldName the field name + * @param value the value + * @return the update + */ + def bitwiseAnd(fieldName: String, value: Int): Bson = JUpdates.bitwiseAnd(fieldName, value) + + /** + * Creates an update that performs a bitwise and between the given long value and the integral value of the field with the given name. + * + * @param fieldName the field name + * @param value the value + * @return the update + * @see [[https://www.mongodb.com/docs/manual/reference/operator/update/bit/ \$bit]] + */ + def bitwiseAnd(fieldName: String, value: Long): Bson = JUpdates.bitwiseAnd(fieldName, value) + + /** + * Creates an update that performs a bitwise or between the given integer value and the integral value of the field with the given + * name. + * + * @param fieldName the field name + * @param value the value + * @return the update + * @see [[https://www.mongodb.com/docs/manual/reference/operator/update/bit/ \$bit]] + */ + def bitwiseOr(fieldName: String, value: Int): Bson = JUpdates.bitwiseOr(fieldName, value) + + /** + * Creates an update that performs a bitwise or between the given long value and the integral value of the field with the given name. + * + * @param fieldName the field name + * @param value the value + * @return the update + * @see [[https://www.mongodb.com/docs/manual/reference/operator/update/bit/ \$bit]] + */ + def bitwiseOr(fieldName: String, value: Long): Bson = JUpdates.bitwiseOr(fieldName, value) + + /** + * Creates an update that performs a bitwise xor between the given integer value and the integral value of the field with the given + * name. + * + * @param fieldName the field name + * @param value the value + * @return the update + */ + def bitwiseXor(fieldName: String, value: Int): Bson = JUpdates.bitwiseXor(fieldName, value) + + /** + * Creates an update that performs a bitwise xor between the given long value and the integral value of the field with the given name. + * + * @param fieldName the field name + * @param value the value + * @return the update + */ + def bitwiseXor(fieldName: String, value: Long): Bson = JUpdates.bitwiseXor(fieldName, value) +} diff --git a/driver-scala/src/main/scala/org/mongodb/scala/model/ValidationAction.scala b/driver-scala/src/main/scala/org/mongodb/scala/model/ValidationAction.scala new file mode 100644 index 00000000000..de58d7ed7ab --- /dev/null +++ b/driver-scala/src/main/scala/org/mongodb/scala/model/ValidationAction.scala @@ -0,0 +1,49 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala.model + +import scala.util.Try + +import com.mongodb.client.model.{ ValidationAction => JValidationAction } + +/** + * Determines how strictly MongoDB applies the validation rules to existing documents during an insert or update. + * + * @note Requires MongoDB 3.2 or greater + * @since 1.1 + */ +object ValidationAction { + + /** + * Documents must pass validation before the write occurs. Otherwise, the write operation fails. + */ + val ERROR: ValidationAction = JValidationAction.ERROR + + /** + * Documents do not have to pass validation. If the document fails validation, the write operation logs the validation failure to + * the mongod logs. + */ + val WARN: ValidationAction = JValidationAction.WARN + + /** + * Returns the validationAction from the string representation of a validation action. + * + * @param validationAction the string representation of the validation action. + * @return the validation action + */ + def fromString(validationAction: String): Try[ValidationAction] = Try(JValidationAction.fromString(validationAction)) +} diff --git a/driver-scala/src/main/scala/org/mongodb/scala/model/ValidationLevel.scala b/driver-scala/src/main/scala/org/mongodb/scala/model/ValidationLevel.scala new file mode 100644 index 00000000000..27de60e4e85 --- /dev/null +++ b/driver-scala/src/main/scala/org/mongodb/scala/model/ValidationLevel.scala @@ -0,0 +1,55 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala.model + +import scala.util.Try + +import com.mongodb.client.model.{ ValidationLevel => JValidationLevel } + +/** + * Determines how strictly MongoDB applies the validation rules to existing documents during an insert or update. + * + * @note Requires MongoDB 3.2 or greater + * @since 1.1 + */ +object ValidationLevel { + + /** + * No validation for inserts or updates. + */ + val OFF: ValidationLevel = JValidationLevel.OFF + + /** + * Apply validation rules to all inserts and all updates. + */ + val STRICT: ValidationLevel = JValidationLevel.STRICT + + /** + * Applies validation rules to inserts and to updates on existing valid documents. + * + * Does not apply rules to updates on existing invalid documents. + */ + val MODERATE: ValidationLevel = JValidationLevel.MODERATE + + /** + * Returns the ValidationLevel from the string representation of the validation level. + * + * @param validationLevel the string representation of the validation level. + * @return the validation level + */ + def fromString(validationLevel: String): Try[ValidationLevel] = Try(JValidationLevel.fromString(validationLevel)) +} diff --git a/driver-scala/src/main/scala/org/mongodb/scala/model/WindowOutputFields.scala b/driver-scala/src/main/scala/org/mongodb/scala/model/WindowOutputFields.scala new file mode 100644 index 00000000000..29209bff280 --- /dev/null +++ b/driver-scala/src/main/scala/org/mongodb/scala/model/WindowOutputFields.scala @@ -0,0 +1,710 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.mongodb.scala.model + +import com.mongodb.client.model.{ + MongoTimeUnit => JMongoTimeUnit, + QuantileMethod, + WindowOutputFields => JWindowOutputFields +} +import org.mongodb.scala.bson.conversions.Bson + +/** + * Builders for [[WindowOutputField window output fields]] used in the + * `Aggregates.setWindowFields` pipeline stage + * of an aggregation pipeline. Each windowed output field is a triple: + * - A window function. Some functions require documents in a window to be sorted + * (see `sortBy` in `Aggregates.setWindowFields`). + * - An optional [[Window window]], a.k.a. frame. + * Specifying `None` window is equivalent to specifying an unbounded window, + * i.e., a window with both ends specified as [[Windows.Bound UNBOUNDED]]. + * Some window functions, e.g., [[WindowOutputFields.derivative]], + * require an explicit unbounded window instead of `None`. + * - A path to an output field to be computed by the window function over the window. + * + * A windowed computation is similar to an [[Accumulators accumulator]] but does not result in folding documents constituting + * the window into a single document. + * + * @see [[https://www.mongodb.com/docs/manual/meta/aggregation-quick-reference/#field-paths Field paths]] + * @since 4.3 + * @note Requires MongoDB 5.0 or greater. + */ +object WindowOutputFields { + + /** + * Creates a windowed computation from a document field in situations when there is no builder method that better satisfies your needs. + * This method cannot be used to validate the document field syntax. + * + * {{{ + * val pastWeek: Window = Windows.timeRange(-1, MongoTimeUnit.WEEK, Windows.Bound.CURRENT) + * val pastWeekExpenses1: WindowOutputField = WindowOutputFields.sum("pastWeekExpenses", "\$expenses", pastWeek) + * val pastWeekExpenses2: WindowOutputField = WindowOutputFields.of( + * BsonField("pastWeekExpenses", Document("\$sum" -> "\$expenses", + * "window" -> pastWeek.toBsonDocument))) + * }}} + * + * @param WindowOutputField A document field representing the required windowed computation. + * @return The constructed windowed computation. + */ + def of(WindowOutputField: BsonField): WindowOutputField = + JWindowOutputFields.of(WindowOutputField) + + /** + * Builds a computation of the sum of the evaluation results of the `expression` over the `window`. + * + * @param path The output field path. + * @param expression The expression. + * @param window The window. + * @tparam TExpression The expression type. + * @return The constructed windowed computation. + * @see [[https://dochub.mongodb.org/core/window-functions-sum \$sum]] + */ + def sum[TExpression](path: String, expression: TExpression, window: Option[_ <: Window]): WindowOutputField = + JWindowOutputFields.sum(path, expression, window.orNull) + + /** + * Builds a computation of the average of the evaluation results of the `expression` over the `window`. + * + * @param path The output field path. + * @param expression The expression. + * @param window The window. + * @tparam TExpression The expression type. + * @return The constructed windowed computation. + * @see [[https://dochub.mongodb.org/core/window-functions-avg \$avg]] + */ + def avg[TExpression](path: String, expression: TExpression, window: Option[_ <: Window]): WindowOutputField = + JWindowOutputFields.avg(path, expression, window.orNull) + + /** + * Builds a window output field of percentiles of the evaluation results of the `inExpression` + * over documents in the specified `window`. The `pExpression` parameter represents an array of + * percentiles of interest, with each element being a numeric value between 0.0 and 1.0 (inclusive). + * + * @param path The output field path. + * @param inExpression The input expression. + * @param pExpression The expression representing the percentiles of interest. + * @param method The method to be used for computing the percentiles. + * @param window The window. + * @tparam InExpression The input expression type. + * @tparam PExpression The percentile expression type. + * @return The constructed windowed computation. + * @see [[https://www.mongodb.com/docs/manual/reference/operator/aggregation/percentile/ \$percentile]] + * @since 4.10 + * @note Requires MongoDB 7.0 or greater + */ + def percentile[InExpression, PExpression]( + path: String, + inExpression: InExpression, + pExpression: PExpression, + method: QuantileMethod, + window: Option[_ <: Window] + ): WindowOutputField = + JWindowOutputFields.percentile(path, inExpression, pExpression, method, window.orNull) + + /** + * Builds a window output field representing the median value of the evaluation results of the `inExpression` + * over documents in the specified `window`. + * + * @param inExpression The input expression. + * @param method The method to be used for computing the median. + * @param window The window. + * @tparam InExpression The input expression type. + * @return The constructed windowed computation. + * @see [[https://www.mongodb.com/docs/manual/reference/operator/aggregation/medoan/ \$median]] + * @since 4.10 + * @note Requires MongoDB 7.0 or greater + */ + def median[InExpression]( + path: String, + inExpression: InExpression, + method: QuantileMethod, + window: Option[_ <: Window] + ): WindowOutputField = + JWindowOutputFields.median(path, inExpression, method, window.orNull) + + /** + * Builds a computation of the sample standard deviation of the evaluation results of the `expression` over the `window`. + * + * @param path The output field path. + * @param expression The expression. + * @param window The window. + * @tparam TExpression The expression type. + * @return The constructed windowed computation. + * @see [[https://dochub.mongodb.org/core/window-functions-std-dev-samp \$stdDevSamp]] + */ + def stdDevSamp[TExpression](path: String, expression: TExpression, window: Option[_ <: Window]): WindowOutputField = + JWindowOutputFields.stdDevSamp(path, expression, window.orNull) + + /** + * Builds a computation of the population standard deviation of the evaluation results of the `expression` over the `window`. + * + * @param path The output field path. + * @param expression The expression. + * @param window The window. + * @tparam TExpression The expression type. + * @return The constructed windowed computation. + * @see [[https://dochub.mongodb.org/core/window-functions-std-dev-pop \$stdDevPop]] + */ + def stdDevPop[TExpression](path: String, expression: TExpression, window: Option[_ <: Window]): WindowOutputField = + JWindowOutputFields.stdDevPop(path, expression, window.orNull) + + /** + * Builds a computation of the lowest of the evaluation results of the `expression` over the `window`. + * + * @param path The output field path. + * @param expression The expression. + * @param window The window. + * @tparam TExpression The expression type. + * @return The constructed windowed computation. + * @see [[https://www.mongodb.com/docs/manual/reference/operator/aggregation/min/ \$min]] + */ + def min[TExpression](path: String, expression: TExpression, window: Option[_ <: Window]): WindowOutputField = + JWindowOutputFields.min(path, expression, window.orNull) + + /** + * Builds a computation of a BSON `Array` + * of `N` smallest evaluation results of the `inExpression` over the `window`, + * where `N` is the positive integral value of the `nExpression`. + * + * @param path The output field path. + * @param inExpression The input expression. + * @param nExpression The expression limiting the number of produced values. + * @tparam InExpression The type of the input expression. + * @tparam NExpression The type of the limiting expression. + * @return The constructed windowed computation. + * @see [[https://www.mongodb.com/docs/manual/reference/operator/aggregation/minN/ \$minN]] + * @since 4.7 + * @note Requires MongoDB 5.2 or greater + */ + def minN[InExpression, NExpression]( + path: String, + inExpression: InExpression, + nExpression: NExpression, + window: Option[_ <: Window] + ): WindowOutputField = JWindowOutputFields.minN(path, inExpression, nExpression, window.orNull) + + /** + * Builds a computation of the highest of the evaluation results of the `expression` over the `window`. + * + * @param path The output field path. + * @param expression The expression. + * @param window The window. + * @tparam TExpression The expression type. + * @return The constructed windowed computation. + * @see [[https://www.mongodb.com/docs/manual/reference/operator/aggregation/max/ \$max]] + */ + def max[TExpression](path: String, expression: TExpression, window: Option[_ <: Window]): WindowOutputField = + JWindowOutputFields.max(path, expression, window.orNull) + + /** + * Builds a computation of a BSON `Array` + * of `N` largest evaluation results of the `inExpression` over the `window`, + * where `N` is the positive integral value of the `nExpression`. + * + * @param path The output field path. + * @param inExpression The input expression. + * @param nExpression The expression limiting the number of produced values. + * @tparam InExpression The type of the input expression. + * @tparam NExpression The type of the limiting expression. + * @return The constructed windowed computation. + * @see [[https://www.mongodb.com/docs/manual/reference/operator/aggregation/maxN/ \$maxN]] + * @since 4.7 + * @note Requires MongoDB 5.2 or greater + */ + def maxN[InExpression, NExpression]( + path: String, + inExpression: InExpression, + nExpression: NExpression, + window: Option[_ <: Window] + ): WindowOutputField = JWindowOutputFields.maxN(path, inExpression, nExpression, window.orNull) + + /** + * Builds a computation of the number of documents in the `window`. + * + * @param path The output field path. + * @param window The window. + * @return The constructed windowed computation. + * @see [[https://dochub.mongodb.org/core/window-functions-count \$count]] + */ + def count(path: String, window: Option[_ <: Window]): WindowOutputField = + JWindowOutputFields.count(path, window.orNull) + + /** + * Builds a computation of the time derivative by subtracting the evaluation result of the `expression` against the last document + * and the first document in the `window` and dividing it by the difference in the values of the + * `sortBy` field of the respective documents. + * Other documents in the `window` have no effect on the computation. + * + * Sorting is required. + * + * @param path The output field path. + * @param expression The expression. + * @param window The window. + * @tparam TExpression The expression type. + * @return The constructed windowed computation. + * @see [[https://dochub.mongodb.org/core/window-functions-derivative \$derivative]] + */ + def derivative[TExpression](path: String, expression: TExpression, window: Window): WindowOutputField = + JWindowOutputFields.derivative(path, expression, window) + + /** + * Builds a computation of the time derivative by subtracting the evaluation result of the `expression` against the last document + * and the first document in the `window` and dividing it by the difference in the BSON `Date` + * values of the `sortBy` field of the respective documents. + * Other documents in the `window` have no effect on the computation. + * + * Sorting is required. + * + * @param path The output field path. + * @param expression The expression. + * @param window The window. + * @param unit The desired time unit for the divisor. Allowed values are: + * [[MongoTimeUnit WEEK]], [[MongoTimeUnit DAY]], [[MongoTimeUnit HOUR]], [[MongoTimeUnit MINUTE]], + * [[MongoTimeUnit SECOND]], [[MongoTimeUnit MILLISECOND]]. + * @tparam TExpression The expression type. + * @return The constructed windowed computation. + * @see [[https://dochub.mongodb.org/core/window-functions-derivative \$derivative]] + */ + def timeDerivative[TExpression]( + path: String, + expression: TExpression, + window: Window, + unit: JMongoTimeUnit + ): WindowOutputField = + JWindowOutputFields.timeDerivative(path, expression, window, unit) + + /** + * Builds a computation of the approximate integral of a function that maps values of + * the `sortBy` field to evaluation results of the `expression` + * against the same document. The limits of integration match the `window` bounds. + * The approximation is done by using the + * + * trapezoidal rule. + * + * Sorting is required. + * + * @param path The output field path. + * @param expression The expression. + * @param window The window. + * @tparam TExpression The expression type. + * @return The constructed windowed computation. + * @see [[https://dochub.mongodb.org/core/window-functions-integral \$integral]] + */ + def integral[TExpression](path: String, expression: TExpression, window: Window): WindowOutputField = + JWindowOutputFields.integral(path, expression, window) + + /** + * Builds a computation of the approximate integral of a function that maps BSON `Date` values of + * the `sortBy` field to evaluation results of the `expression` + * against the same document. The limits of integration match the `window` bounds. + * The approximation is done by using the trapezoidal rule. + * + * Sorting is required. + * + * @param path The output field path. + * @param expression The expression. + * @param window The window. + * @param unit The desired time unit for the divisor. Allowed values are: + * [[MongoTimeUnit WEEK]], [[MongoTimeUnit DAY]], [[MongoTimeUnit HOUR]], [[MongoTimeUnit MINUTE]], + * [[MongoTimeUnit SECOND]], [[MongoTimeUnit MILLISECOND]]. + * @tparam TExpression The expression type. + * @return The constructed windowed computation. + * @see [[https://dochub.mongodb.org/core/window-functions-integral \$integral]] + */ + def timeIntegral[TExpression]( + path: String, + expression: TExpression, + window: Window, + unit: JMongoTimeUnit + ): WindowOutputField = + JWindowOutputFields.timeIntegral(path, expression, window, unit) + + /** + * Builds a computation of the sample covariance between the evaluation results of the two expressions over the `window`. + * + * @param path The output field path. + * @param expression1 The first expression. + * @param expression2 The second expression. + * @param window The window. + * @tparam TExpression The expression type. + * @return The constructed windowed computation. + * @see [[https://dochub.mongodb.org/core/window-functions-covariance-samp \$covarianceSamp]] + */ + def covarianceSamp[TExpression]( + path: String, + expression1: TExpression, + expression2: TExpression, + window: Option[_ <: Window] + ): WindowOutputField = + JWindowOutputFields.covarianceSamp(path, expression1, expression2, window.orNull) + + /** + * Builds a computation of the population covariance between the evaluation results of the two expressions over the `window`. + * + * @param path The output field path. + * @param expression1 The first expression. + * @param expression2 The second expression. + * @param window The window. + * @tparam TExpression The expression type. + * @return The constructed windowed computation. + * @see [[https://dochub.mongodb.org/core/window-functions-covariance-pop \$covariancePop]] + */ + def covariancePop[TExpression]( + path: String, + expression1: TExpression, + expression2: TExpression, + window: Option[_ <: Window] + ): WindowOutputField = + JWindowOutputFields.covariancePop(path, expression1, expression2, window.orNull) + + /** + * Builds a computation of the exponential moving average of the evaluation results of the `expression` over a window + * that includes `n` - 1 documents preceding the current document and the current document, with more weight on documents + * closer to the current one. + * + * Sorting is required. + * + * @param path The output field path. + * @param expression The expression. + * @param n Must be positive. + * @tparam TExpression The expression type. + * @return The constructed windowed computation. + * @see [[https://dochub.mongodb.org/core/window-functions-exp-moving-avg \$expMovingAvg]] + */ + def expMovingAvg[TExpression](path: String, expression: TExpression, n: Int): WindowOutputField = + JWindowOutputFields.expMovingAvg(path, expression, n) + + /** + * Builds a computation of the exponential moving average of the evaluation results of the `expression` over the half-bounded + * window `[`[[Windows.Bound UNBOUNDED]], [[Windows.Bound CURRENT]]`]`, + * with `alpha` representing the degree of weighting decrease. + * + * Sorting is required. + * + * @param path The output field path. + * @param expression The expression. + * @param alpha A parameter specifying how fast weighting decrease happens. A higher `alpha` discounts older observations faster. + * Must belong to the interval (0, 1). + * @tparam TExpression The expression type. + * @return The constructed windowed computation. + * @see [[https://dochub.mongodb.org/core/window-functions-exp-moving-avg \$expMovingAvg]] + */ + def expMovingAvg[TExpression](path: String, expression: TExpression, alpha: Double): WindowOutputField = + JWindowOutputFields.expMovingAvg(path, expression, alpha) + + /** + * Builds a computation that adds the evaluation results of the `expression` over the `window` + * to a BSON `Array`. + * Order within the array is guaranteed if `sortBy` is specified. + * + * @param path The output field path. + * @param expression The expression. + * @param window The window. + * @tparam TExpression The expression type. + * @return The constructed windowed computation. + * @see [[https://dochub.mongodb.org/core/window-functions-push \$push]] + */ + def push[TExpression](path: String, expression: TExpression, window: Option[_ <: Window]): WindowOutputField = + JWindowOutputFields.push(path, expression, window.orNull) + + /** + * Builds a computation that adds the evaluation results of the `expression` over the `window` + * to a BSON `Array` and excludes duplicates. + * Order within the array is not specified. + * + * @param path The output field path. + * @param expression The expression. + * @param window The window. + * @tparam TExpression The expression type. + * @return The constructed windowed computation. + * @see [[https://dochub.mongodb.org/core/window-functions-add-to-set \$addToSet]] + */ + def addToSet[TExpression](path: String, expression: TExpression, window: Option[_ <: Window]): WindowOutputField = + JWindowOutputFields.addToSet(path, expression, window.orNull) + + /** + * Builds a computation of the evaluation result of the `expression` against the first document in the `window`. + * + * Sorting is required. + * + * @param path The output field path. + * @param expression The expression. + * @param window The window. + * @tparam TExpression The expression type. + * @return The constructed windowed computation. + * @see [[https://www.mongodb.com/docs/manual/reference/operator/aggregation/first/ \$first]] + */ + def first[TExpression](path: String, expression: TExpression, window: Option[_ <: Window]): WindowOutputField = + JWindowOutputFields.first(path, expression, window.orNull) + + /** + * Builds a computation of a BSON `Array` + * of evaluation results of the `inExpression` against the first `N` documents in the `window`, + * where `N` is the positive integral value of the `nExpression`. + * + * Sorting is required. + * + * @param path The output field path. + * @param inExpression The input expression. + * @param nExpression The expression limiting the number of produced values. + * @tparam InExpression The type of the input expression. + * @tparam NExpression The type of the limiting expression. + * @return The constructed windowed computation. + * @see [[https://www.mongodb.com/docs/manual/reference/operator/aggregation/firstN/ \$firstN]] + * @since 4.7 + * @note Requires MongoDB 5.2 or greater + */ + def firstN[InExpression, NExpression]( + path: String, + inExpression: InExpression, + nExpression: NExpression, + window: Option[_ <: Window] + ): WindowOutputField = JWindowOutputFields.firstN(path, inExpression, nExpression, window.orNull) + + /** + * Builds a computation of the evaluation result of the `outExpression` against the top document in the `window` + * sorted according to the provided `sortBy` specification. + * + * @param path The output field path. + * @param sortBy The sort specification. The syntax is identical to the one expected by [[Aggregates.sort]]. + * @param outExpression The output expression. + * @tparam OutExpression The type of the input expression. + * @return The constructed windowed computation. + * @see [[https://www.mongodb.com/docs/manual/reference/operator/aggregation/top/ \$top]] + * @since 4.7 + * @note Requires MongoDB 5.2 or greater + */ + def top[OutExpression]( + path: String, + sortBy: Bson, + outExpression: OutExpression, + window: Option[_ <: Window] + ): WindowOutputField = JWindowOutputFields.top(path, sortBy, outExpression, window.orNull) + + /** + * Builds a computation of a BSON `Array` + * of evaluation results of the `outExpression` against the top `N` documents in the `window` + * sorted according to the provided `sortBy` specification, + * where `N` is the positive integral value of the `nExpression`. + * + * @param path The output field path. + * @param sortBy The sort specification. The syntax is identical to the one expected by [[Aggregates.sort]]. + * @param outExpression The output expression. + * @param nExpression The expression limiting the number of produced values. + * @tparam OutExpression The type of the input expression. + * @tparam NExpression The type of the limiting expression. + * @return The constructed windowed computation. + * @see [[https://www.mongodb.com/docs/manual/reference/operator/aggregation/topN/ \$topN]] + * @since 4.7 + * @note Requires MongoDB 5.2 or greater + */ + def topN[OutExpression, NExpression]( + path: String, + sortBy: Bson, + outExpression: OutExpression, + nExpression: NExpression, + window: Option[_ <: Window] + ): WindowOutputField = JWindowOutputFields.topN(path, sortBy, outExpression, nExpression, window.orNull) + + /** + * Builds a computation of the evaluation result of the `expression` against the last document in the `window`. + * + * Sorting is required. + * + * @param path The output field path. + * @param expression The expression. + * @param window The window. + * @tparam TExpression The expression type. + * @return The constructed windowed computation. + * @see [[https://www.mongodb.com/docs/manual/reference/operator/aggregation/last/ \$last]] + */ + def last[TExpression](path: String, expression: TExpression, window: Option[_ <: Window]): WindowOutputField = + JWindowOutputFields.last(path, expression, window.orNull) + + /** + * Builds a computation of a BSON `Array` + * of evaluation results of the `inExpression` against the last `N` documents in the `window`, + * where `N` is the positive integral value of the `nExpression`. + * + * Sorting is required. + * + * @param path The output field path. + * @param inExpression The input expression. + * @param nExpression The expression limiting the number of produced values. + * @tparam InExpression The type of the input expression. + * @tparam NExpression The type of the limiting expression. + * @return The constructed windowed computation. + * @see [[https://www.mongodb.com/docs/manual/reference/operator/aggregation/lastN/ \$lastN]] + * @since 4.7 + * @note Requires MongoDB 5.2 or greater + */ + def lastN[InExpression, NExpression]( + path: String, + inExpression: InExpression, + nExpression: NExpression, + window: Option[_ <: Window] + ): WindowOutputField = JWindowOutputFields.lastN(path, inExpression, nExpression, window.orNull) + + /** + * Builds a computation of the evaluation result of the `outExpression` against the bottom document in the `window` + * sorted according to the provided `sortBy` specification. + * + * @param path The output field path. + * @param sortBy The sort specification. The syntax is identical to the one expected by [[Aggregates.sort]]. + * @param outExpression The output expression. + * @tparam OutExpression The type of the input expression. + * @return The constructed windowed computation. + * @see [[https://www.mongodb.com/docs/manual/reference/operator/aggregation/bottom/ \$bottom]] + * @since 4.7 + * @note Requires MongoDB 5.2 or greater + */ + def bottom[OutExpression]( + path: String, + sortBy: Bson, + outExpression: OutExpression, + window: Option[_ <: Window] + ): WindowOutputField = JWindowOutputFields.bottom(path, sortBy, outExpression, window.orNull) + + /** + * Builds a computation of a BSON `Array` + * of evaluation results of the `outExpression` against the bottom `N` documents in the `window` + * sorted according to the provided `sortBy` specification, + * where `N` is the positive integral value of the `nExpression`. + * + * @param path The output field path. + * @param sortBy The sort specification. The syntax is identical to the one expected by [[Aggregates.sort]]. + * @param outExpression The output expression. + * @param nExpression The expression limiting the number of produced values. + * @tparam OutExpression The type of the input expression. + * @tparam NExpression The type of the limiting expression. + * @return The constructed windowed computation. + * @see [[https://www.mongodb.com/docs/manual/reference/operator/aggregation/bottomN/ \$bottomN]] + * @since 4.7 + * @note Requires MongoDB 5.2 or greater + */ + def bottomN[OutExpression, NExpression]( + path: String, + sortBy: Bson, + outExpression: OutExpression, + nExpression: NExpression, + window: Option[_ <: Window] + ): WindowOutputField = JWindowOutputFields.bottomN(path, sortBy, outExpression, nExpression, window.orNull) + + /** + * Builds a computation of the evaluation result of the `expression` for the document whose position is shifted by the given + * amount relative to the current document. If the shifted document is outside of the + * partition containing the current document, + * then the `defaultExpression` is used instead of the `expression`. + * + * Sorting is required. + * + * @param path The output field path. + * @param expression The expression. + * @param defaultExpression The default expression. + * If `None`, then the default expression is evaluated to BSON `Null`. + * Must evaluate to a constant value. + * @param by The shift specified similarly to [[Windows rules for window bounds]]: + * - 0 means the current document; + * - a negative value refers to the document preceding the current one; + * - a positive value refers to the document following the current one. + * @tparam TExpression The expression type. + * @return The constructed windowed computation. + * @see [[https://dochub.mongodb.org/core/window-functions-shift \$shift]] + */ + def shift[TExpression >: Null]( + path: String, + expression: TExpression, + defaultExpression: Option[TExpression], + by: Int + ): WindowOutputField = + JWindowOutputFields.shift(path, expression, defaultExpression.orNull, by) + + /** + * Builds a computation of the order number of each document in its + * partition. + * + * Sorting is required. + * + * @param path The output field path. + * @return The constructed windowed computation. + * @see [[https://dochub.mongodb.org/core/window-functions-document-number \$documentNumber]] + */ + def documentNumber(path: String): WindowOutputField = + JWindowOutputFields.documentNumber(path) + + /** + * Builds a computation of the rank of each document in its + * partition. + * Documents with the same value(s) of the `sortBy` fields result in + * the same ranking and result in gaps in the returned ranks. + * For example, a partition with the sequence [1, 3, 3, 5] representing the values of the single `sortBy` field + * produces the following sequence of rank values: [1, 2, 2, 4]. + * + * Sorting is required. + * + * @param path The output field path. + * @return The constructed windowed computation. + * @see [[https://dochub.mongodb.org/core/window-functions-rank \$rank]] + */ + def rank(path: String): WindowOutputField = + JWindowOutputFields.rank(path) + + /** + * Builds a computation of the dense rank of each document in its + * partition. + * Documents with the same value(s) of the `sortBy` fields result in + * the same ranking but do not result in gaps in the returned ranks. + * For example, a partition with the sequence [1, 3, 3, 5] representing the values of the single `sortBy` field + * produces the following sequence of rank values: [1, 2, 2, 3]. + * + * Sorting is required. + * + * @param path The output field path. + * @return The constructed windowed computation. + * @see [[https://dochub.mongodb.org/core/window-functions-dense-rank \$denseRank]] + */ + def denseRank(path: String): WindowOutputField = + JWindowOutputFields.denseRank(path) + + /** + * Builds a computation of the last observed non-`Null` evaluation result of the `expression`. + * + * Sorting is required. + * + * @param path The output field path. + * @param expression The expression. + * @tparam TExpression The expression type. + * @return The constructed windowed computation. + * @see [[https://www.mongodb.com/docs/manual/reference/operator/aggregation/locf \$locf]] + */ + def locf[TExpression](path: String, expression: TExpression): WindowOutputField = + JWindowOutputFields.locf(path, expression) + + /** + * Builds a computation of a value that is equal to the evaluation result of the `expression` when it is non-`Null`, + * or to the linear interpolation of surrounding evaluation results of the `expression` when the result is BSON `Null`. + * + * Sorting is required. + * + * @param path The output field path. + * @param expression The expression. + * @tparam TExpression The expression type. + * @return The constructed windowed computation. + * @see [[https://www.mongodb.com/docs/manual/reference/operator/aggregation/linearFill \$linearFill]] + */ + def linearFill[TExpression](path: String, expression: TExpression): WindowOutputField = + JWindowOutputFields.linearFill(path, expression) +} diff --git a/driver-scala/src/main/scala/org/mongodb/scala/model/Windows.scala b/driver-scala/src/main/scala/org/mongodb/scala/model/Windows.scala new file mode 100644 index 00000000000..5ccbd299edf --- /dev/null +++ b/driver-scala/src/main/scala/org/mongodb/scala/model/Windows.scala @@ -0,0 +1,264 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.mongodb.scala.model + +import com.mongodb.annotations.{ Beta, Reason } +import com.mongodb.client.model.{ MongoTimeUnit => JMongoTimeUnit, Windows => JWindows } +import org.bson.types.Decimal128 +import org.mongodb.scala.bson.conversions.Bson + +/** + * Builders for [[Window windows]] used when expressing [[WindowOutputField window output fields]]. + * There are two types of windows: documents and range. + * + * Bounded and half-bounded windows require sorting. + * Window bounds are inclusive and the lower bound must always be less than or equal to the upper bound. + * The following type-specific rules are applied to windows: + * - documents + * - bounds + * - 0 refers to the current document and is functionally equivalent to [[Windows.Bound CURRENT]]; + * - a negative value refers to documents preceding the current one; + * - a positive value refers to documents following the current one; + * - range + * - `sortBy` (see `Aggregates.setWindowFields`) + * - must contain exactly one field; + * - must specify the ascending sort order; + * - the `sortBy` field must be of either a numeric BSON type + * (see the `\$isNumber` aggregation pipeline stage) + * or the BSON `Date` type if time + * bounds are used; + * - bounds + * - if numeric, i.e., not `com.mongodb.client.model.Windows.Bound`, then the bound is calculated by adding + * the value to the value of the `sortBy` field in the current document; + * - if [[Windows.Bound CURRENT]], then the bound is determined by the current document + * and not the current value of the `sortBy` field; + * - time bounds require specifying a [[MongoTimeUnit time unit]] and are added as per the + * `\$dateAdd`/`\$dateSubtract` aggregation pipeline stage specification. + * + * @see [[WindowOutputField]] + * @see [[https://www.mongodb.com/docs/manual/reference/operator/aggregation/isNumber/ \$isNumber aggregation pipeline stage]] + * @see [[https://www.mongodb.com/docs/manual/reference/bson-types/#date BSON Date type]] + * @see [[https://www.mongodb.com/docs/manual/reference/operator/aggregation/dateAdd/ \$dateAdd aggregation pipeline stage]] + * @see [[https://www.mongodb.com/docs/manual/reference/operator/aggregation/dateSubtract/ \$dateSubtract aggregation pipeline stage]] + * @since 4.3 + * @note Requires MongoDB 5.0 or greater. + */ +@Beta(Array(Reason.SERVER)) +object Windows { + + /** + * Creates a window from `Bson` in situations when there is no builder method that better satisfies your needs. + * This method cannot be used to validate the syntax. + * + * Example
+ * The following code creates two functionally identical windows, though they may not be equal. + * {{{ + * val pastWeek1: Window = Windows.timeRange(-1, MongoTimeUnit.WEEK, Windows.Bound.CURRENT) + * val pastWeek2: Window = Windows.of( + * Document("range" -> BsonArray(-1, "current"), + * "unit" -> BsonString("week"))) + * }}} + * + * @param window A `Bson` representing the required window. + * @return The constructed window. + */ + def of(window: Bson): Window = JWindows.of(window) + + /** + * Creates a documents window whose bounds are determined by a number of documents before and after the current document. + * + * @param lower A value based on which the lower bound of the window is calculated. + * @param upper A value based on which the upper bound of the window is calculated. + * @return The constructed documents window. + */ + def documents(lower: Int, upper: Int): Window = JWindows.documents(lower, upper) + + /** + * Creates a documents window whose bounds are determined by a number of documents before and after the current document. + * + * @param lower A value based on which the lower bound of the window is calculated. + * @param upper A value based on which the upper bound of the window is calculated. + * @return The constructed documents window. + * @note Requires MongoDB 5.0 or greater. + */ + def documents(lower: JWindows.Bound, upper: Int): Window = JWindows.documents(lower, upper) + + /** + * Creates a documents window whose bounds are determined by a number of documents before and after the current document. + * + * @param lower A value based on which the lower bound of the window is calculated. + * @param upper A value based on which the upper bound of the window is calculated. + * @return The constructed documents window. + */ + def documents(lower: Int, upper: JWindows.Bound): Window = JWindows.documents(lower, upper) + + /** + * Creates a documents window whose bounds are determined by a number of documents before and after the current document. + * + * @param lower A value based on which the lower bound of the window is calculated. + * @param upper A value based on which the upper bound of the window is calculated. + * @return The constructed documents window. + */ + def documents(lower: JWindows.Bound, upper: JWindows.Bound): Window = JWindows.documents(lower, upper) + + /** + * Creates a dynamically-sized range window whose bounds are determined by a range of possible values around + * the value of the `sortBy` field in the current document. + * + * @param lower A value based on which the lower bound of the window is calculated. + * @param upper A value based on which the upper bound of the window is calculated. + * @return The constructed range window. + */ + def range(lower: Long, upper: Long): Window = JWindows.range(lower, upper) + + /** + * Creates a dynamically-sized range window whose bounds are determined by a range of possible values around + * the value of the `sortBy` field in the current document. + * + * @param lower A value based on which the lower bound of the window is calculated. + * @param upper A value based on which the upper bound of the window is calculated. + * @return The constructed range window. + */ + def range(lower: Double, upper: Double): Window = JWindows.range(lower, upper) + + /** + * Creates a dynamically-sized range window whose bounds are determined by a range of possible values around + * the value of the `sortBy` field in the current document. + * + * @param lower A value based on which the lower bound of the window is calculated. + * @param upper A value based on which the upper bound of the window is calculated. + * @return The constructed range window. + */ + def range(lower: Decimal128, upper: Decimal128): Window = JWindows.range(lower, upper) + + /** + * Creates a dynamically-sized range window whose bounds are determined by a range of possible values around + * the value of the `sortBy` field in the current document. + * + * @param lower A value based on which the lower bound of the window is calculated. + * @param upper A value based on which the upper bound of the window is calculated. + * @return The constructed range window. + */ + def range(lower: JWindows.Bound, upper: Long): Window = JWindows.range(lower, upper) + + /** + * Creates a dynamically-sized range window whose bounds are determined by a range of possible values around + * the value of the `sortBy` field in the current document. + * + * @param lower A value based on which the lower bound of the window is calculated. + * @param upper A value based on which the upper bound of the window is calculated. + * @return The constructed range window. + */ + def range(lower: JWindows.Bound, upper: Double): Window = JWindows.range(lower, upper) + + /** + * Creates a dynamically-sized range window whose bounds are determined by a range of possible values around + * the value of the `sortBy` field in the current document. + * + * @param lower A value based on which the lower bound of the window is calculated. + * @param upper A value based on which the upper bound of the window is calculated. + * @return The constructed range window. + */ + def range(lower: JWindows.Bound, upper: Decimal128): Window = JWindows.range(lower, upper) + + /** + * Creates a dynamically-sized range window whose bounds are determined by a range of possible values around + * the value of the `sortBy` field in the current document. + * + * @param lower A value based on which the lower bound of the window is calculated. + * @param upper A value based on which the upper bound of the window is calculated. + * @return The constructed range window. + */ + def range(lower: Long, upper: JWindows.Bound): Window = JWindows.range(lower, upper) + + /** + * Creates a dynamically-sized range window whose bounds are determined by a range of possible values around + * the value of the `sortBy` field in the current document. + * + * @param lower A value based on which the lower bound of the window is calculated. + * @param upper A value based on which the upper bound of the window is calculated. + * @return The constructed range window. + */ + def range(lower: Double, upper: JWindows.Bound): Window = JWindows.range(lower, upper) + + /** + * Creates a dynamically-sized range window whose bounds are determined by a range of possible values around + * the value of the `sortBy` field in the current document. + * + * @param lower A value based on which the lower bound of the window is calculated. + * @param upper A value based on which the upper bound of the window is calculated. + * @return The constructed range window. + */ + def range(lower: Decimal128, upper: JWindows.Bound): Window = JWindows.range(lower, upper) + + /** + * Creates a dynamically-sized range window whose bounds are determined by a range of possible values around + * the BSON `Date` value of the `sortBy` + * field in the current document. + * + * @param lower A value based on which the lower bound of the window is calculated. + * @param upper A value based on which the upper bound of the window is calculated. + * @param unit A time unit in which `lower` and `upper` are specified. + * @return The constructed range window. + */ + def timeRange(lower: Long, upper: Long, unit: JMongoTimeUnit): Window = JWindows.timeRange(lower, upper, unit) + + /** + * Creates a dynamically-sized range window whose bounds are determined by a range of possible values around + * the BSON `Date` value of the `sortBy` + * field in the current document. + * + * @param lower A value based on which the lower bound of the window is calculated. + * @param upper A value based on which the upper bound of the window is calculated. + * @param unit A time unit in which `upper` is specified. + * @return The constructed range window. + */ + def timeRange(lower: JWindows.Bound, upper: Long, unit: JMongoTimeUnit): Window = + JWindows.timeRange(lower, upper, unit) + + /** + * Creates a dynamically-sized range window whose bounds are determined by a range of possible values around + * the BSON `Date` value of the `sortBy` + * field in the current document. + * + * @param lower A value based on which the lower bound of the window is calculated. + * @param unit A time unit in which `lower` is specified. + * @param upper A value based on which the upper bound of the window is calculated. + * @return The constructed range window. + */ + def timeRange(lower: Long, unit: JMongoTimeUnit, upper: JWindows.Bound): Window = + JWindows.timeRange(lower, unit, upper) + + /** + * Special values that may be used when specifying the bounds of a [[Window window]]. + * + * @since 4.3 + * @note Requires MongoDB 5.0 or greater. + */ + @Beta(Array(Reason.SERVER)) + object Bound { + + /** + * The [[Window window]] bound is determined by the current document and is inclusive. + */ + val UNBOUNDED = JWindows.Bound.UNBOUNDED + + /** + * The [[Window window]] bound is the same as the corresponding bound of the partition encompassing it. + */ + val CURRENT = JWindows.Bound.CURRENT + } +} diff --git a/driver-scala/src/main/scala/org/mongodb/scala/model/bulk/package.scala b/driver-scala/src/main/scala/org/mongodb/scala/model/bulk/package.scala new file mode 100644 index 00000000000..44dcd7e3c84 --- /dev/null +++ b/driver-scala/src/main/scala/org/mongodb/scala/model/bulk/package.scala @@ -0,0 +1,235 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala.model + +import org.mongodb.scala.MongoNamespace +import org.mongodb.scala.bson.conversions.Bson + +import scala.collection.JavaConverters._ + +/** + * Models, options, results for the client-level bulk write operation. + * + * @since 5.4 + */ +package object bulk { + + /** + * A model for inserting a document. + */ + type ClientNamespacedInsertOneModel = com.mongodb.client.model.bulk.ClientNamespacedInsertOneModel + + /** + * A model for updating at most one document matching a filter. + */ + type ClientNamespacedUpdateOneModel = com.mongodb.client.model.bulk.ClientNamespacedUpdateOneModel + + /** + * A model for updating all documents matching a filter. + */ + type ClientNamespacedUpdateManyModel = com.mongodb.client.model.bulk.ClientNamespacedUpdateManyModel + + /** + * A model for replacing at most one document matching a filter. + */ + type ClientNamespacedReplaceOneModel = com.mongodb.client.model.bulk.ClientNamespacedReplaceOneModel + + /** + * A model for deleting at most one document matching a filter. + */ + type ClientNamespacedDeleteOneModel = com.mongodb.client.model.bulk.ClientNamespacedDeleteOneModel + + /** + * A model for deleting all documents matching a filter. + */ + type ClientNamespacedDeleteManyModel = com.mongodb.client.model.bulk.ClientNamespacedDeleteManyModel + + /** + * A combination of an individual write operation and a [[MongoNamespace]] + * the operation is targeted at. + */ + type ClientNamespacedWriteModel = com.mongodb.client.model.bulk.ClientNamespacedWriteModel + + object ClientNamespacedWriteModel { + + def insertOne[TDocument](namespace: MongoNamespace, document: TDocument): ClientNamespacedInsertOneModel = + com.mongodb.client.model.bulk.ClientNamespacedWriteModel.insertOne(namespace, document) + + def updateOne(namespace: MongoNamespace, filter: Bson, update: Bson): ClientNamespacedUpdateOneModel = + com.mongodb.client.model.bulk.ClientNamespacedWriteModel.updateOne(namespace, filter, update) + + def updateOne( + namespace: MongoNamespace, + filter: Bson, + update: Bson, + options: ClientUpdateOneOptions + ): ClientNamespacedUpdateOneModel = + com.mongodb.client.model.bulk.ClientNamespacedWriteModel.updateOne(namespace, filter, update, options) + + def updateOne( + namespace: MongoNamespace, + filter: Bson, + updatePipeline: Iterable[_ <: Bson] + ): ClientNamespacedUpdateOneModel = + com.mongodb.client.model.bulk.ClientNamespacedWriteModel.updateOne(namespace, filter, updatePipeline.asJava) + + def updateOne( + namespace: MongoNamespace, + filter: Bson, + updatePipeline: Iterable[_ <: Bson], + options: ClientUpdateOneOptions + ): ClientNamespacedUpdateOneModel = + com.mongodb.client.model.bulk.ClientNamespacedWriteModel.updateOne( + namespace, + filter, + updatePipeline.asJava, + options + ) + + def updateMany(namespace: MongoNamespace, filter: Bson, update: Bson): ClientNamespacedUpdateManyModel = + com.mongodb.client.model.bulk.ClientNamespacedWriteModel.updateMany(namespace, filter, update) + + def updateMany( + namespace: MongoNamespace, + filter: Bson, + update: Bson, + options: ClientUpdateManyOptions + ): ClientNamespacedUpdateManyModel = + com.mongodb.client.model.bulk.ClientNamespacedWriteModel.updateMany(namespace, filter, update, options) + + def updateMany( + namespace: MongoNamespace, + filter: Bson, + updatePipeline: Iterable[_ <: Bson] + ): ClientNamespacedUpdateManyModel = + com.mongodb.client.model.bulk.ClientNamespacedWriteModel.updateMany(namespace, filter, updatePipeline.asJava) + + def updateMany( + namespace: MongoNamespace, + filter: Bson, + updatePipeline: Iterable[_ <: Bson], + options: ClientUpdateManyOptions + ): ClientNamespacedUpdateManyModel = + com.mongodb.client.model.bulk.ClientNamespacedWriteModel.updateMany( + namespace, + filter, + updatePipeline.asJava, + options + ) + + def replaceOne[TDocument]( + namespace: MongoNamespace, + filter: Bson, + replacement: TDocument + ): ClientNamespacedReplaceOneModel = + com.mongodb.client.model.bulk.ClientNamespacedWriteModel.replaceOne(namespace, filter, replacement) + + def replaceOne[TDocument]( + namespace: MongoNamespace, + filter: Bson, + replacement: TDocument, + options: ClientReplaceOneOptions + ): ClientNamespacedReplaceOneModel = + com.mongodb.client.model.bulk.ClientNamespacedWriteModel.replaceOne(namespace, filter, replacement, options) + + def deleteOne(namespace: MongoNamespace, filter: Bson): ClientNamespacedDeleteOneModel = + com.mongodb.client.model.bulk.ClientNamespacedWriteModel.deleteOne(namespace, filter) + + def deleteOne( + namespace: MongoNamespace, + filter: Bson, + options: ClientDeleteOneOptions + ): ClientNamespacedDeleteOneModel = + com.mongodb.client.model.bulk.ClientNamespacedWriteModel.deleteOne(namespace, filter, options) + + def deleteMany(namespace: MongoNamespace, filter: Bson): ClientNamespacedDeleteManyModel = + com.mongodb.client.model.bulk.ClientNamespacedWriteModel.deleteMany(namespace, filter) + + def deleteMany( + namespace: MongoNamespace, + filter: Bson, + options: ClientDeleteManyOptions + ): ClientNamespacedDeleteManyModel = + com.mongodb.client.model.bulk.ClientNamespacedWriteModel.deleteMany(namespace, filter, options) + } + + /** + * The options to apply when executing a client-level bulk write operation. + */ + type ClientBulkWriteOptions = com.mongodb.client.model.bulk.ClientBulkWriteOptions + + object ClientBulkWriteOptions { + def clientBulkWriteOptions(): ClientBulkWriteOptions = + com.mongodb.client.model.bulk.ClientBulkWriteOptions.clientBulkWriteOptions() + } + + /** + * The options to apply when updating a document. + */ + type ClientUpdateOneOptions = com.mongodb.client.model.bulk.ClientUpdateOneOptions + + object ClientUpdateOneOptions { + def clientUpdateOneOptions(): ClientUpdateOneOptions = + com.mongodb.client.model.bulk.ClientUpdateOneOptions.clientUpdateOneOptions() + } + + /** + * The options to apply when updating documents. + */ + type ClientUpdateManyOptions = com.mongodb.client.model.bulk.ClientUpdateManyOptions + + object ClientUpdateManyOptions { + def clientUpdateManyOptions(): ClientUpdateManyOptions = + com.mongodb.client.model.bulk.ClientUpdateManyOptions.clientUpdateManyOptions() + } + + /** + * The options to apply when replacing a document. + */ + type ClientReplaceOneOptions = com.mongodb.client.model.bulk.ClientReplaceOneOptions + + object ClientReplaceOneOptions { + def clientReplaceOneOptions(): ClientReplaceOneOptions = + com.mongodb.client.model.bulk.ClientReplaceOneOptions.clientReplaceOneOptions() + } + + /** + * The options to apply when deleting a document. + */ + type ClientDeleteOneOptions = com.mongodb.client.model.bulk.ClientDeleteOneOptions + + object ClientDeleteOneOptions { + def clientDeleteOneOptions(): ClientDeleteOneOptions = + com.mongodb.client.model.bulk.ClientDeleteOneOptions.clientDeleteOneOptions() + } + + /** + * The options to apply when deleting documents. + */ + type ClientDeleteManyOptions = com.mongodb.client.model.bulk.ClientDeleteManyOptions + + object ClientDeleteManyOptions { + def clientDeleteManyOptions(): ClientDeleteManyOptions = + com.mongodb.client.model.bulk.ClientDeleteManyOptions.clientDeleteManyOptions() + } + + /** + * The result of a successful or partially successful client-level bulk write operation. + * + */ + type ClientBulkWriteResult = com.mongodb.client.model.bulk.ClientBulkWriteResult +} diff --git a/driver-scala/src/main/scala/org/mongodb/scala/model/changestream/FullDocument.scala b/driver-scala/src/main/scala/org/mongodb/scala/model/changestream/FullDocument.scala new file mode 100644 index 00000000000..adf5d62f744 --- /dev/null +++ b/driver-scala/src/main/scala/org/mongodb/scala/model/changestream/FullDocument.scala @@ -0,0 +1,74 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala.model.changestream + +import com.mongodb.client.model.changestream.{ FullDocument => JFullDocument } + +import scala.util.Try + +/** + * Change Stream fullDocument configuration. + * + * Determines what to return for update operations when using a Change Stream. Defaults to [[FullDocument.DEFAULT]]. + * When set to [[FullDocument.UPDATE_LOOKUP]], the change stream for partial updates will include both a delta describing the + * changes to the document as well as a copy of the entire document that was changed from *some time<*> after the change occurred. + * @note Requires MongoDB 3.6 or greater + * @since 2.4 + */ +object FullDocument { + + /** + * Default + * + * Returns the servers default value in the `fullDocument` field. + */ + val DEFAULT = JFullDocument.DEFAULT + + /** + * Lookup + * + * The change stream for partial updates will include both a delta describing the changes to the document as well as a copy of the + * entire document that was changed from *some time* after the change occurred. + */ + val UPDATE_LOOKUP = JFullDocument.UPDATE_LOOKUP + + /** + * Configures the change stream to return the post-image of the modified document for replace and update change events, if it + * is available. + * + * @since 4.7 + * @note Requires MongoDB 6.0 or greater + */ + val WHEN_AVAILABLE = JFullDocument.WHEN_AVAILABLE + + /** + * The same behavior as [[WHEN_AVAILABLE]] except that an error is raised if the post-image is not available. + * + * @since 4.7 + * @note Requires MongoDB 6.0 or greater + */ + val REQUIRED = JFullDocument.REQUIRED + + /** + * Returns the FullDocument from the string value. + * + * @param fullDocument the string value. + * @return the read concern + */ + def fromString(fullDocument: String): Try[FullDocument] = Try(JFullDocument.fromString(fullDocument)) + +} diff --git a/driver-scala/src/main/scala/org/mongodb/scala/model/changestream/FullDocumentBeforeChange.scala b/driver-scala/src/main/scala/org/mongodb/scala/model/changestream/FullDocumentBeforeChange.scala new file mode 100644 index 00000000000..16e7dc0afb1 --- /dev/null +++ b/driver-scala/src/main/scala/org/mongodb/scala/model/changestream/FullDocumentBeforeChange.scala @@ -0,0 +1,63 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala.model.changestream + +import com.mongodb.client.model.changestream.{ FullDocumentBeforeChange => JFullDocumentBeforeChange } + +import scala.util.Try + +/** + * Change Stream fullDocumentBeforeChange configuration. + * + * Determines what to return for update operations when using a Change Stream. Defaults to [[FullDocumentBeforeChange#DEFAULT]]. + * + * @since 4.7 + * @note Requires MongoDB 6.0 or greater + */ +object FullDocumentBeforeChange { + + /** + * The default value + */ + val DEFAULT = JFullDocumentBeforeChange.DEFAULT + + /** + * Configures the change stream to not include the pre-image of the modified document. + */ + val OFF = JFullDocumentBeforeChange.OFF + + /** + * Configures the change stream to return the pre-image of the modified document for replace, update, and delete change events if it + * is available. + */ + val WHEN_AVAILABLE = JFullDocumentBeforeChange.WHEN_AVAILABLE + + /** + * The same behavior as [[WHEN_AVAILABLE]] except that an error is raised if the pre-image is not available. + */ + val REQUIRED = JFullDocumentBeforeChange.REQUIRED + + /** + * Returns the FullDocumentBeforeChange from the string value. + * + * @param fullDocumentBeforeChange the string value. + * @return the FullDocumentBeforeChange value + */ + def fromString(fullDocumentBeforeChange: String): Try[JFullDocumentBeforeChange] = + Try(JFullDocumentBeforeChange.fromString(fullDocumentBeforeChange)) + +} diff --git a/driver-scala/src/main/scala/org/mongodb/scala/model/changestream/package.scala b/driver-scala/src/main/scala/org/mongodb/scala/model/changestream/package.scala new file mode 100644 index 00000000000..00fe124beb7 --- /dev/null +++ b/driver-scala/src/main/scala/org/mongodb/scala/model/changestream/package.scala @@ -0,0 +1,42 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala.model + +package object changestream { + + /** + * Represents the `\$changeStream` aggregation output document. + * + * '''Note:''' this class will not be applicable for all change stream outputs. If using custom pipelines that radically change the + * change stream result, then an alternative document format should be used. + * + * @tparam T The type that this collection will encode the `fullDocument` field into. + */ + type ChangeStreamDocument[T] = com.mongodb.client.model.changestream.ChangeStreamDocument[T] + + /** + * Change Stream fullDocument configuration. + */ + type FullDocument = com.mongodb.client.model.changestream.FullDocument + + /** + * Change Stream fullDocumentBeforeChange configuration. + */ + type FullDocumentBeforeChange = com.mongodb.client.model.changestream.FullDocumentBeforeChange + + object F +} diff --git a/driver-scala/src/main/scala/org/mongodb/scala/model/densify/DensifyOptions.scala b/driver-scala/src/main/scala/org/mongodb/scala/model/densify/DensifyOptions.scala new file mode 100644 index 00000000000..b65361aa4ad --- /dev/null +++ b/driver-scala/src/main/scala/org/mongodb/scala/model/densify/DensifyOptions.scala @@ -0,0 +1,35 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.mongodb.scala.model.densify + +import com.mongodb.client.model.densify.{ DensifyOptions => JDensifyOptions } + +/** + * Represents optional fields of the `\$densify` pipeline stage of an aggregation pipeline. + * + * @see `Aggregates.densify` + * @note Requires MongoDB 5.1 or greater. + * @since 4.7 + */ +object DensifyOptions { + + /** + * Returns `DensifyOptions` that represents server defaults. + * + * @return `DensifyOptions` that represents server defaults. + */ + def densifyOptions(): DensifyOptions = JDensifyOptions.densifyOptions() +} diff --git a/driver-scala/src/main/scala/org/mongodb/scala/model/densify/DensifyRange.scala b/driver-scala/src/main/scala/org/mongodb/scala/model/densify/DensifyRange.scala new file mode 100644 index 00000000000..4588895c17a --- /dev/null +++ b/driver-scala/src/main/scala/org/mongodb/scala/model/densify/DensifyRange.scala @@ -0,0 +1,123 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.mongodb.scala.model.densify + +import com.mongodb.client.model.{ MongoTimeUnit => JMongoTimeUnit } +import com.mongodb.client.model.densify.{ DensifyRange => JDensifyRange } +import org.bson.conversions.Bson + +import java.time.Instant + +/** + * A specification of how to compute the missing field values + * for which new documents must be added. It specifies a half-closed interval of values with the lower bound being inclusive, and a step. + * The first potentially missing value within each interval is its lower bound, other values are computed by adding the step + * multiple times, until the result is out of the interval. Each time the step is added, the result is a potentially missing value for + * which a new document must be added if the sequence of documents that is being densified does not have a document + * with equal value of the field. + * + * @see `Aggregates.densify` + * @note Requires MongoDB 5.1 or greater. + * @since 4.7 + */ +object DensifyRange { + + /** + * Returns a `DensifyRange` that represents an interval with the smallest + * BSON `32-bit integer` / `64-bit integer` / `Double` / `Decimal128` value of the field + * in the sequence of documents being its lower bound, and the largest value being the upper bound. + * + * @param step The positive step. + * @return The requested `DensifyRange`. + */ + def fullRangeWithStep(step: Number): NumberDensifyRange = JDensifyRange.fullRangeWithStep(step) + + /** + * Returns a `DensifyRange` that represents an interval with the smallest + * BSON `32-bit integer` / `64-bit integer` / `Double` / `Decimal128` value of the field + * in the partition of documents being its lower bound, and the largest value being the upper bound. + * + * @param step The positive step. + * @return The requested `DensifyRange`. + */ + def partitionRangeWithStep(step: Number): NumberDensifyRange = + JDensifyRange.partitionRangeWithStep(step) + + /** + * Returns a `DensifyRange` that represents a single interval [l, u). + * + * @param l The lower bound. + * @param u The upper bound. + * @param step The positive step. + * @return The requested `DensifyRange`. + */ + def rangeWithStep(l: Number, u: Number, step: Number): NumberDensifyRange = + JDensifyRange.rangeWithStep(l, u, step) + + /** + * Returns a `DensifyRange` that represents an interval with the smallest BSON `Date` value of the field + * in the sequence of documents being its lower bound, and the largest value being the upper bound. + * + * @param step The positive step. + * @param unit The unit in which the `step` is specified. + * @return The requested `DensifyRange`. + */ + def fullRangeWithStep(step: Long, unit: JMongoTimeUnit): DateDensifyRange = + JDensifyRange.fullRangeWithStep(step, unit) + + /** + * Returns a `DensifyRange` that represents an interval with the smallest BSON `Date` value of the field + * in the partition of documents being its lower bound, and the largest value being the upper bound. + * + * @param step The positive step. + * @param unit The unit in which the `step` is specified. + * @return The requested `DensifyRange`. + */ + def partitionRangeWithStep(step: Long, unit: JMongoTimeUnit): DateDensifyRange = + JDensifyRange.partitionRangeWithStep(step, unit) + + /** + * Returns a `DensifyRange` that represents a single interval [l, u). + * + * @param l The lower bound. + * @param u The upper bound. + * @param step The positive step. + * @param unit The unit in which the `step` is specified. + * @return The requested `DensifyRange`. + */ + def rangeWithStep(l: Instant, u: Instant, step: Long, unit: JMongoTimeUnit): DateDensifyRange = + JDensifyRange.rangeWithStep(l, u, step, unit) + + /** + * Creates a `DensifyRange` from a `Bson` in situations when there is no builder method that better satisfies your needs. + * This method cannot be used to validate the syntax. + * + * Example
+ * The following code creates two functionally equivalent `DensifyRange`s, + * though they may not be equal. + * {{{ + * val range1 = DensifyRange.partitionRangeWithStep( + * 1, MongoTimeUnit.MINUTE) + * val range2 = DensifyRange.of(Document("bounds" -> "partition", + * "step" -> 1, "unit" -> MongoTimeUnit.MINUTE.value())) + * }}} + * + * @param range A `Bson` representing the required `DensifyRange`. + * + * @return The requested `DensifyRange`. + */ + def of(range: Bson): DensifyRange = JDensifyRange.of(range) +} diff --git a/driver-scala/src/main/scala/org/mongodb/scala/model/densify/package.scala b/driver-scala/src/main/scala/org/mongodb/scala/model/densify/package.scala new file mode 100644 index 00000000000..ea276eb659a --- /dev/null +++ b/driver-scala/src/main/scala/org/mongodb/scala/model/densify/package.scala @@ -0,0 +1,63 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.mongodb.scala.model + +import com.mongodb.annotations.Evolving + +/** + * @see `Aggregates.densify` + * @note Requires MongoDB 5.1 or greater. + * @since 4.7 + */ +package object densify { + + /** + * A specification of how to compute the missing field values + * for which new documents must be added. It specifies a half-closed interval of values with the lower bound being inclusive, and a step. + * The first potentially missing value within each interval is its lower bound, other values are computed by adding the step + * multiple times, until the result is out of the interval. Each time the step is added, the result is a potentially missing value for + * which a new document must be added if the sequence of documents that is being densified does not have a document + * with equal value of the field. + * + * @see `Aggregates.densify` + */ + @Evolving + type DensifyRange = com.mongodb.client.model.densify.DensifyRange + + /** + * @see `DensifyRange.fullRangeWithStep` + * @see `DensifyRange.partitionRangeWithStep` + * @see `DensifyRange.rangeWithStep` + */ + @Evolving + type NumberDensifyRange = com.mongodb.client.model.densify.NumberDensifyRange + + /** + * @see `DensifyRange.fullRangeWithStep` + * @see `DensifyRange.partitionRangeWithStep` + * @see `DensifyRange.rangeWithStep` + */ + @Evolving + type DateDensifyRange = com.mongodb.client.model.densify.DateDensifyRange + + /** + * Represents optional fields of the `\$densify` pipeline stage of an aggregation pipeline. + * + * @see `Aggregates.densify` + */ + @Evolving + type DensifyOptions = com.mongodb.client.model.densify.DensifyOptions +} diff --git a/driver-scala/src/main/scala/org/mongodb/scala/model/fill/FillOptions.scala b/driver-scala/src/main/scala/org/mongodb/scala/model/fill/FillOptions.scala new file mode 100644 index 00000000000..6601c55750b --- /dev/null +++ b/driver-scala/src/main/scala/org/mongodb/scala/model/fill/FillOptions.scala @@ -0,0 +1,35 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.mongodb.scala.model.fill + +import com.mongodb.client.model.fill.{ FillOptions => JFillOptions } + +/** + * Represents optional fields of the `\$fill` pipeline stage of an aggregation pipeline. + * + * @see `Aggregates.fill` + * @note Requires MongoDB 5.3 or greater. + * @since 4.7 + */ +object FillOptions { + + /** + * Returns `FillOptions` that represents server defaults. + * + * @return `FillOptions` that represents server defaults. + */ + def fillOptions(): FillOptions = JFillOptions.fillOptions() +} diff --git a/driver-scala/src/main/scala/org/mongodb/scala/model/fill/FillOutputField.scala b/driver-scala/src/main/scala/org/mongodb/scala/model/fill/FillOutputField.scala new file mode 100644 index 00000000000..c043332f9c8 --- /dev/null +++ b/driver-scala/src/main/scala/org/mongodb/scala/model/fill/FillOutputField.scala @@ -0,0 +1,81 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.mongodb.scala.model.fill + +import com.mongodb.client.model.fill.{ FillOutputField => JFillOutputField } +import org.mongodb.scala.bson.conversions.Bson +import org.mongodb.scala.model.WindowOutputFields + +/** + * The core part of the `\$fill` pipeline stage of an aggregation pipeline. + * A pair of an expression/method and a path to a field to be filled with evaluation results of the expression/method. + * + * @see `Aggregates.fill` + * @note Requires MongoDB 5.3 or greater. + * @since 4.7 + */ +object FillOutputField { + + /** + * Returns a `FillOutputField` that uses the specified `expression`. + * + * @param field The field to fill. + * @param expression The expression. + * @tparam TExpression The `expression` type. + * @return The requested `FillOutputField`. + * @see [[https://www.mongodb.com/docs/manual/core/document/#dot-notation Dot notation]] + */ + def value[TExpression](field: String, expression: TExpression): ValueFillOutputField = + JFillOutputField.value(field, expression) + + /** + * Returns a `FillOutputField` that uses the [[WindowOutputFields.locf]] method. + * + * @param field The field to fill. + * @return The requested `FillOutputField`. + * @see [[https://www.mongodb.com/docs/manual/core/document/#dot-notation Dot notation]] + */ + def locf(field: String): LocfFillOutputField = JFillOutputField.locf(field) + + /** + * Returns a `FillOutputField` that uses the [[WindowOutputFields.linearFill]] method. + * + * Sorting (`FillOptions.sortBy`) is required. + * + * @param field The field to fill. + * @return The requested `FillOutputField`. + * @see [[https://www.mongodb.com/docs/manual/core/document/#dot-notation Dot notation]] + */ + def linear(field: String): LinearFillOutputField = JFillOutputField.linear(field) + + /** + * Creates a `FillOutputField` from a `Bson` in situations when there is no builder method that better satisfies your needs. + * This method cannot be used to validate the syntax. + * + * Example
+ * The following code creates two functionally equivalent `FillOutputField`s, + * though they may not be equal. + * {{{ + * val field1 = FillOutputField.locf("fieldName") + * val field2 = FillOutputField.of(Document("fieldName" -> Document("method" -> "locf"))) + * }}} + * + * @param fill A `Bson` representing the required `FillOutputField`. + * + * @return The requested `FillOutputField`. + */ + def of(fill: Bson): FillOutputField = JFillOutputField.of(fill) +} diff --git a/driver-scala/src/main/scala/org/mongodb/scala/model/fill/fill.scala b/driver-scala/src/main/scala/org/mongodb/scala/model/fill/fill.scala new file mode 100644 index 00000000000..1bd9835c47d --- /dev/null +++ b/driver-scala/src/main/scala/org/mongodb/scala/model/fill/fill.scala @@ -0,0 +1,61 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.mongodb.scala.model + +import com.mongodb.annotations.Evolving + +/** + * @see `Aggregates.fill` + * @note Requires MongoDB 5.3 or greater. + * @since 4.7 + */ +package object fill { + + /** + * Represents optional fields of the `\$fill` pipeline stage of an aggregation pipeline. + * + * @see `Aggregates.fill` + */ + @Evolving + type FillOptions = com.mongodb.client.model.fill.FillOptions + + /** + * The core part of the `\$fill` pipeline stage of an aggregation pipeline. + * A pair of an expression/method and a path to a field to be filled with evaluation results of the expression/method. + * + * @see `Aggregates.fill` + */ + @Evolving + type FillOutputField = com.mongodb.client.model.fill.FillOutputField + + /** + * @see `FillOutputField.value` + */ + @Evolving + type ValueFillOutputField = com.mongodb.client.model.fill.ValueFillOutputField + + /** + * @see `FillOutputField.locf` + */ + @Evolving + type LocfFillOutputField = com.mongodb.client.model.fill.LocfFillOutputField + + /** + * @see `FillOutputField.linear` + */ + @Evolving + type LinearFillOutputField = com.mongodb.client.model.fill.LinearFillOutputField +} diff --git a/driver-scala/src/main/scala/org/mongodb/scala/model/geojson/package.scala b/driver-scala/src/main/scala/org/mongodb/scala/model/geojson/package.scala new file mode 100644 index 00000000000..5329e61bfd9 --- /dev/null +++ b/driver-scala/src/main/scala/org/mongodb/scala/model/geojson/package.scala @@ -0,0 +1,395 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala.model + +import scala.collection.JavaConverters._ +import scala.collection.mutable.ArrayBuffer + +import com.mongodb.client.model.{ geojson => Jgeojson } + +package object geojson { + + /** + * A GeoJSON Coordinate Reference System (CRS). + */ + type CoordinateReferenceSystem = Jgeojson.CoordinateReferenceSystem + + /** + * GeoJSON coordinate reference system types. + */ + type CoordinateReferenceSystemType = Jgeojson.CoordinateReferenceSystemType + + /** + * GeoJSON coordinate reference system types. + */ + object CoordinateReferenceSystemType { + + /** + * A coordinate reference system that is specified by name + */ + val NAME: CoordinateReferenceSystemType = Jgeojson.CoordinateReferenceSystemType.NAME + + /** + * A coordinate reference system that is specified by a dereferenceable URI + */ + val LINK: CoordinateReferenceSystemType = Jgeojson.CoordinateReferenceSystemType.LINK + } + + /** + * The GeoJSON object types. + */ + type GeoJsonObjectType = Jgeojson.GeoJsonObjectType + + /** + * The GeoJSON object types. + */ + object GeoJsonObjectType { + + /** + * A GeometryCollection + */ + val GEOMETRY_COLLECTION: GeoJsonObjectType = Jgeojson.GeoJsonObjectType.GEOMETRY_COLLECTION + + /** + * A LineString + */ + val LINE_STRING: GeoJsonObjectType = Jgeojson.GeoJsonObjectType.LINE_STRING + + /** + * A MultiLineString + */ + val MULTI_LINE_STRING: GeoJsonObjectType = Jgeojson.GeoJsonObjectType.MULTI_LINE_STRING + + /** + * A MultiPoint + */ + val MULTI_POINT: GeoJsonObjectType = Jgeojson.GeoJsonObjectType.MULTI_POINT + + /** + * A MultiPolygon + */ + val MULTI_POLYGON: GeoJsonObjectType = Jgeojson.GeoJsonObjectType.MULTI_POLYGON + + /** + * A Point + */ + val POINT: GeoJsonObjectType = Jgeojson.GeoJsonObjectType.POINT + + /** + * A Polygon + */ + val POLYGON: GeoJsonObjectType = Jgeojson.GeoJsonObjectType.POLYGON + } + + /** + * An abstract class for representations of GeoJSON geometry objects. + */ + type Geometry = Jgeojson.Geometry + + /** + * A representation of a GeoJSON GeometryCollection. + */ + type GeometryCollection = Jgeojson.GeometryCollection + + /** + * A representation of a GeoJSON GeometryCollection. + */ + object GeometryCollection { + + /** + * Construct an instance with the given list of Geometry objects + * + * @param geometries the list of Geometry objects + */ + def apply(geometries: Seq[geojson.Geometry]): GeometryCollection = + new Jgeojson.GeometryCollection(geometries.asJava) + + /** + * Construct an instance with the given list of Geometry objects + * + * @param coordinateReferenceSystem the coordinate reference system + * @param geometries the list of Geometry objects + */ + def apply(coordinateReferenceSystem: CoordinateReferenceSystem, geometries: Seq[Geometry]): GeometryCollection = + new Jgeojson.GeometryCollection(coordinateReferenceSystem, geometries.asJava) + } + + /** + * A representation of a GeoJSON LineString. + */ + type LineString = Jgeojson.LineString + + /** + * A representation of a GeoJSON LineString. + */ + object LineString { + + /** + * Construct an instance with the given coordinates. + * + * @param coordinates the list of Geometry objects + * @return the new LineString + */ + def apply(coordinates: Seq[Position]): LineString = new Jgeojson.LineString(coordinates.asJava) + + /** + * Construct an instance with the given coordinates. + * + * @param coordinateReferenceSystem the coordinate reference system + * @param coordinates the list of Geometry objects + * @return the new LineString + */ + def apply(coordinateReferenceSystem: CoordinateReferenceSystem, coordinates: Seq[Position]): LineString = + new Jgeojson.LineString(coordinateReferenceSystem, coordinates.asJava) + } + + /** + * A representation of a GeoJSON MultiLineString. + */ + type MultiLineString = Jgeojson.MultiLineString + + /** + * A representation of a GeoJSON MultiLineString. + */ + object MultiLineString { + + /** + * Construct an instance with the given coordinates. + * + * @param coordinates the coordinates of each line + * @return the new MultiLineString + */ + def apply(coordinates: Seq[Position]*): MultiLineString = new MultiLineString(coordinates.map(_.asJava).asJava) + + /** + * Construct an instance with the given coordinates and coordinate reference system. + * + * @param coordinateReferenceSystem the coordinate reference system + * @param coordinates the coordinates of each line + * @return the new MultiLineString + */ + def apply(coordinateReferenceSystem: CoordinateReferenceSystem, coordinates: Seq[Position]*): MultiLineString = + new Jgeojson.MultiLineString(coordinateReferenceSystem, coordinates.map(_.asJava).asJava) + } + + /** + * A representation of a GeoJSON MultiPoint. + */ + type MultiPoint = Jgeojson.MultiPoint + + /** + * A representation of a GeoJSON MultiPoint. + */ + object MultiPoint { + + /** + * Construct an instance with the given coordinates. + * + * @param coordinates the coordinates + * @return the new MultiPoint + */ + def apply(coordinates: Position*): MultiPoint = new Jgeojson.MultiPoint(coordinates.asJava) + + /** + * Construct an instance with the given coordinates and coordinate reference system. + * + * @param coordinateReferenceSystem the coordinate reference system + * @param coordinates the coordinates + * @return the new MultiPoint + */ + def apply(coordinateReferenceSystem: CoordinateReferenceSystem, coordinates: Position*): MultiPoint = + new Jgeojson.MultiPoint(coordinateReferenceSystem, coordinates.asJava) + } + + /** + * A representation of a GeoJSON MultiPolygon. + */ + type MultiPolygon = Jgeojson.MultiPolygon + + /** + * A representation of a GeoJSON MultiPolygon. + */ + object MultiPolygon { + + /** + * Construct an instance. + * + * @param coordinates the coordinates + * @return the new MultiPolygon + */ + def apply(coordinates: PolygonCoordinates*): MultiPolygon = new Jgeojson.MultiPolygon(coordinates.asJava) + + /** + * Construct an instance. + * + * @param coordinateReferenceSystem the coordinate reference system + * @param coordinates the coordinates + * @return the new MultiPolygon + */ + def apply(coordinateReferenceSystem: CoordinateReferenceSystem, coordinates: PolygonCoordinates*): MultiPolygon = + new Jgeojson.MultiPolygon(coordinateReferenceSystem, coordinates.asJava) + } + + /** + * A GeoJSON named Coordinate Reference System. + */ + type NamedCoordinateReferenceSystem = Jgeojson.NamedCoordinateReferenceSystem + + /** + * A GeoJSON named Coordinate Reference System. + */ + object NamedCoordinateReferenceSystem { + + /** + * The EPSG:4326 Coordinate Reference System. + */ + val EPSG_4326: NamedCoordinateReferenceSystem = Jgeojson.NamedCoordinateReferenceSystem.EPSG_4326 + + /** + * The urn:ogc:def:crs:OGC:1.3:CRS84 Coordinate Reference System + */ + val CRS_84: NamedCoordinateReferenceSystem = Jgeojson.NamedCoordinateReferenceSystem.CRS_84 + + /** + * A custom MongoDB EPSG:4326 Coordinate Reference System that uses a strict counter-clockwise winding order. + * + * [[https://www.mongodb.com/docs/manual/reference/operator/query/geometry/ Strict Winding]] + */ + val EPSG_4326_STRICT_WINDING: NamedCoordinateReferenceSystem = + Jgeojson.NamedCoordinateReferenceSystem.EPSG_4326_STRICT_WINDING + + /** + * Construct an instance + * + * @param name the name + * @return the new NamedCoordinateReferenceSystem + */ + def apply(name: String): NamedCoordinateReferenceSystem = new Jgeojson.NamedCoordinateReferenceSystem(name) + } + + /** + * A representation of a GeoJSON Point. + */ + type Point = Jgeojson.Point + + /** + * A representation of a GeoJSON Point. + */ + object Point { + + /** + * Construct an instance with the given coordinate. + * + * @param coordinate the non-null coordinate of the point + * @return the new Point + */ + def apply(coordinate: Position): Point = new Jgeojson.Point(coordinate) + + /** + * Construct an instance with the given coordinate and coordinate reference system. + * + * @param coordinateReferenceSystem the coordinate reference system + * @param coordinate the non-null coordinate of the point + * @return the new Point + */ + def apply(coordinateReferenceSystem: CoordinateReferenceSystem, coordinate: Position): Point = + new Jgeojson.Point(coordinateReferenceSystem, coordinate) + } + + /** + * A representation of a GeoJSON Polygon. + */ + type Polygon = Jgeojson.Polygon + + /** + * A representation of a GeoJSON Polygon. + */ + object Polygon { + + /** + * Construct an instance with the given coordinates. + * + * @param exterior the exterior ring of the polygon + * @param holes optional interior rings of the polygon + * @return the new Polygon + */ + def apply(exterior: Seq[Position], holes: Seq[Position]*): Polygon = + new Jgeojson.Polygon(exterior.asJava, holes.map(_.asJava): _*) + + /** + * Construct an instance with the given coordinates. + * + * @param coordinates the coordinates + * @return the new Polygon + */ + def apply(coordinates: PolygonCoordinates): Polygon = new Jgeojson.Polygon(coordinates) + + /** + * Construct an instance with the given coordinates and coordinate reference system. + * + * @param coordinateReferenceSystem the coordinate reference system + * @param coordinates the coordinates + * @return the new Polygon + */ + def apply(coordinateReferenceSystem: CoordinateReferenceSystem, coordinates: PolygonCoordinates): Polygon = + new Jgeojson.Polygon(coordinateReferenceSystem, coordinates) + } + + /** + * Coordinates for a GeoJSON Polygon. + */ + type PolygonCoordinates = Jgeojson.PolygonCoordinates + + /** + * Coordinates for a GeoJSON Polygon. + */ + object PolygonCoordinates { + + /** + * Construct an instance. + * + * @param exterior the exterior ring of the polygon + * @param holes optional interior rings of the polygon + * @return the new PolygonCoordinates + */ + def apply(exterior: Seq[Position], holes: Seq[Position]*): PolygonCoordinates = + new Jgeojson.PolygonCoordinates(exterior.asJava, holes.map(_.asJava): _*) + } + + /** + * A representation of a GeoJSON Position. + */ + type Position = Jgeojson.Position + + /** + * A representation of a GeoJSON Position. + */ + object Position { + + /** + * Construct an instance. + * + * @param values the position values + * @return the new Position + */ + def apply(values: Double*): Position = { + val buffer = new ArrayBuffer[java.lang.Double] + values.foreach(buffer.append(_)) + new Jgeojson.Position(buffer.asJava) + } + } +} diff --git a/driver-scala/src/main/scala/org/mongodb/scala/model/package.scala b/driver-scala/src/main/scala/org/mongodb/scala/model/package.scala new file mode 100644 index 00000000000..0d23a38c2e8 --- /dev/null +++ b/driver-scala/src/main/scala/org/mongodb/scala/model/package.scala @@ -0,0 +1,997 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala + +import com.mongodb.annotations.{ Beta, Reason, Sealed } + +import scala.collection.JavaConverters._ +import com.mongodb.client.model.{ MongoTimeUnit => JMongoTimeUnit } +import org.mongodb.scala.bson.conversions.Bson + +// scalastyle:off number.of.methods number.of.types +/** + * The model package containing models and options that help describe `MongoCollection` operations + */ +package object model { + + /** + * A representation of a BSON document field whose value is another BSON document. + */ + type BsonField = com.mongodb.client.model.BsonField + + /** + * A representation of a BSON document field whose value is another BSON document. + */ + object BsonField { + + /** + * Construct a new instance. + * + * @param name the name of the field + * @param value the value for the field + * @return a new BsonField instance + */ + def apply(name: String, value: Bson): BsonField = { + new com.mongodb.client.model.BsonField(name, value) + } + } + + /** + * The options for a `\$bucketAuto` aggregation pipeline stage + * + * @since 1.2 + */ + type BucketAutoOptions = com.mongodb.client.model.BucketAutoOptions + + /** + * The options for a `\$bucketAuto` aggregation pipeline stage + * + * @since 1.2 + */ + object BucketAutoOptions { + def apply(): BucketAutoOptions = new com.mongodb.client.model.BucketAutoOptions() + } + + /** + * The options for a `\$bucket` aggregation pipeline stage + * + * @since 1.2 + */ + type BucketOptions = com.mongodb.client.model.BucketOptions + + /** + * The options for a `\$bucket` aggregation pipeline stage + * + * @since 1.2 + */ + object BucketOptions { + def apply(): BucketOptions = new com.mongodb.client.model.BucketOptions() + } + + /** + * Granularity values for automatic bucketing. + * + * @see [[https://en.wikipedia.org/wiki/Preferred_number">Preferred numbers]] + * @since 1.2 + */ + type BucketGranularity = com.mongodb.client.model.BucketGranularity + + /** + * The options to apply to a bulk write. + */ + type BulkWriteOptions = com.mongodb.client.model.BulkWriteOptions + + /** + * The options to apply to a bulk write. + */ + object BulkWriteOptions { + def apply(): BulkWriteOptions = new com.mongodb.client.model.BulkWriteOptions() + } + + /** + * The collation options. + */ + type Collation = com.mongodb.client.model.Collation + + /** + * The collation alternative options. + */ + type CollationAlternate = com.mongodb.client.model.CollationAlternate + + /** + * The collation max variable options + */ + type CollationMaxVariable = com.mongodb.client.model.CollationMaxVariable + + /** + * The collation configuration of how character cases are handled + */ + type CollationCaseFirst = com.mongodb.client.model.CollationCaseFirst + + /** + * The collation configuration of how differences between characters are handled. + */ + type CollationStrength = com.mongodb.client.model.CollationStrength + + /** + * The options to apply to a count operation. + */ + type CountOptions = com.mongodb.client.model.CountOptions + + /** + * The options to apply to a count operation. + */ + object CountOptions { + def apply(): CountOptions = new com.mongodb.client.model.CountOptions() + } + + /** + * The options to apply to an estimated count operation. + * @since 2.4 + */ + type EstimatedDocumentCountOptions = com.mongodb.client.model.EstimatedDocumentCountOptions + + /** + * The options to apply to an estimated count operation. + * @since 2.4 + */ + object EstimatedDocumentCountOptions { + def apply(): EstimatedDocumentCountOptions = new com.mongodb.client.model.EstimatedDocumentCountOptions() + } + + /** + * Options for creating a collection + */ + type CreateCollectionOptions = com.mongodb.client.model.CreateCollectionOptions + + /** + * Options for creating a collection + */ + object CreateCollectionOptions { + def apply(): CreateCollectionOptions = new com.mongodb.client.model.CreateCollectionOptions() + + def apply(options: CreateCollectionOptions): CreateCollectionOptions = + new com.mongodb.client.model.CreateCollectionOptions(options) + } + + /** + * Auxiliary parameters for creating an encrypted collection. + * + * @since 4.9 + */ + @Beta(Array(Reason.SERVER)) + type CreateEncryptedCollectionParams = com.mongodb.client.model.CreateEncryptedCollectionParams + + /** + * Auxiliary parameters for creating an encrypted collection. + * + * @since 4.9 + */ + @Beta(Array(Reason.SERVER)) + object CreateEncryptedCollectionParams { + def apply(kmsProvider: String) = + new com.mongodb.client.model.CreateEncryptedCollectionParams(kmsProvider) + } + + /** + * Options for creating a collection + * + * @since 4.6 + */ + type DropCollectionOptions = com.mongodb.client.model.DropCollectionOptions + + /** + * Options for creating a collection + * + * @since 4.6 + */ + object DropCollectionOptions { + def apply(): DropCollectionOptions = new com.mongodb.client.model.DropCollectionOptions() + } + + /** + * Options for creating a time-series collection + */ + type TimeSeriesOptions = com.mongodb.client.model.TimeSeriesOptions + + /** + * Options for creating a time-series collection + */ + object TimeSeriesOptions { + def apply(timeFieldName: String): TimeSeriesOptions = new com.mongodb.client.model.TimeSeriesOptions(timeFieldName) + } + + /** + * Options for creating a clustered index on a collection + */ + type ClusteredIndexOptions = com.mongodb.client.model.ClusteredIndexOptions + + /** + * Options for creating a clustered index on a collection + */ + object ClusteredIndexOptions { + def apply(key: Bson, unique: Boolean): ClusteredIndexOptions = + new com.mongodb.client.model.ClusteredIndexOptions(key, unique) + } + + /** + * Enumeration of values for time-series data granularity + */ + type TimeSeriesGranularity = com.mongodb.client.model.TimeSeriesGranularity + + /** + * Options for change stream pre- and post- images + */ + type ChangeStreamPreAndPostImagesOptions = com.mongodb.client.model.ChangeStreamPreAndPostImagesOptions + + /** + * Options for change stream pre- and post- images + */ + object ChangeStreamPreAndPostImagesOptions { + def apply(enabled: Boolean): ChangeStreamPreAndPostImagesOptions = + new com.mongodb.client.model.ChangeStreamPreAndPostImagesOptions(enabled) + } + + /** + * Options for creating a view + * + * @since 1.2 + * @note Requires MongoDB 3.4 or greater + */ + type CreateViewOptions = com.mongodb.client.model.CreateViewOptions + + /** + * Options for creating a view + * + * @since 1.2 + * @note Requires MongoDB 3.4 or greater + */ + object CreateViewOptions { + def apply(): CreateViewOptions = new com.mongodb.client.model.CreateViewOptions() + } + + /** + * The default options for a collection to apply on the creation of indexes. + * + * @note Requires MongoDB 3.2 or greater + * @see [[https://www.mongodb.com/docs/manual/reference/command/createIndexes Index options]] + * @since 1.1 + */ + type IndexOptionDefaults = com.mongodb.client.model.IndexOptionDefaults + + /** + * Options for creating an index + */ + object IndexOptionDefaults { + + /** + * Construct a new instance. + */ + def apply(): IndexOptionDefaults = new com.mongodb.client.model.IndexOptionDefaults() + } + + /** + * The options to apply for delete operations + */ + type DeleteOptions = com.mongodb.client.model.DeleteOptions + + /** + * The options to apply for delete operations. + */ + object DeleteOptions { + def apply(): DeleteOptions = new com.mongodb.client.model.DeleteOptions() + } + + /** + * A model describing the removal of all documents matching the query filter. + * + * @tparam TResult the type of document to update. In practice this doesn't actually apply to updates but is here for consistency with the + * other write models + */ + type DeleteManyModel[TResult] = com.mongodb.client.model.DeleteManyModel[TResult] + + /** + * A model describing the removal of all documents matching the query filter. + */ + object DeleteManyModel { + def apply[T](filter: Bson): DeleteManyModel[T] = new com.mongodb.client.model.DeleteManyModel[T](filter) + def apply[T](filter: Bson, options: DeleteOptions): DeleteManyModel[T] = + new com.mongodb.client.model.DeleteManyModel[T](filter, options) + } + + /** + * A model describing the removal of at most one document matching the query filter. + * + * @tparam TResult the type of document to update. In practice this doesn't actually apply to updates but is here for consistency with + * the other write models + */ + type DeleteOneModel[TResult] = com.mongodb.client.model.DeleteOneModel[TResult] + + /** + * A model describing the removal of at most one document matching the query filter. + */ + object DeleteOneModel { + + /** + * Construct a new instance. + * + * @param filter the query filter + * @return the new DeleteOneModel + */ + def apply(filter: Bson): DeleteOneModel[Nothing] = new com.mongodb.client.model.DeleteOneModel(filter) + } + + /** + * Defines a Facet for use in `\$facet` pipeline stages. + * + * @since 1.2 + */ + type Facet = com.mongodb.client.model.Facet + + /** + * Defines a Facet for use in `\$facet` pipeline stages. + * + * @since 1.2 + */ + object Facet { + + /** + * Construct a new instance + * + * @param name the name of this facet + * @param pipeline the facet definition pipeline + * @return the `\$facet` pipeline stage + */ + def apply(name: String, pipeline: Bson*): Facet = { + new com.mongodb.client.model.Facet(name, pipeline.asJava) + } + } + + /** + * A helper to define new fields for the `\$addFields` pipeline stage + * + * @tparam TExpression the expression type + * @since 1.2 + */ + type Field[TExpression] = com.mongodb.client.model.Field[TExpression] + + /** + * A helper to define new fields for the `\$addFields` pipeline stage + * + * @since 1.2 + */ + object Field { + + /** + * Construct a new instance. + * + * @param name the name of the new field + * @param value the value of the new field + * @tparam TExpression the expression type + */ + def apply[TExpression](name: String, value: TExpression): Field[TExpression] = + new com.mongodb.client.model.Field(name, value) + + } + + /** + * The options to apply to an operation that atomically finds a document and deletes it. + */ + type FindOneAndDeleteOptions = com.mongodb.client.model.FindOneAndDeleteOptions + + /** + * The options to apply to an operation that atomically finds a document and deletes it. + */ + object FindOneAndDeleteOptions { + def apply(): FindOneAndDeleteOptions = new com.mongodb.client.model.FindOneAndDeleteOptions() + } + + /** + * The options to apply to an operation that atomically finds a document and replaces it. + */ + type FindOneAndReplaceOptions = com.mongodb.client.model.FindOneAndReplaceOptions + + /** + * The options to apply to an operation that atomically finds a document and replaces it. + */ + object FindOneAndReplaceOptions { + def apply(): FindOneAndReplaceOptions = new com.mongodb.client.model.FindOneAndReplaceOptions() + } + + /** + * The options to apply to an operation that atomically finds a document and updates it. + */ + type FindOneAndUpdateOptions = com.mongodb.client.model.FindOneAndUpdateOptions + + /** + * The options to apply to an operation that atomically finds a document and updates it. + */ + object FindOneAndUpdateOptions { + def apply(): FindOneAndUpdateOptions = new com.mongodb.client.model.FindOneAndUpdateOptions() + } + + /** + * The options for a graphLookup aggregation pipeline stage + * @since 1.2 + */ + type GraphLookupOptions = com.mongodb.client.model.GraphLookupOptions + + /** + * The options for a graphLookup aggregation pipeline stage + * @since 1.2 + */ + object GraphLookupOptions { + def apply(): GraphLookupOptions = new com.mongodb.client.model.GraphLookupOptions() + } + + /** + * The options to apply to an operation that inserts multiple documents into a collection. + */ + type InsertManyOptions = com.mongodb.client.model.InsertManyOptions + + /** + * The options to apply to an operation that inserts multiple documents into a collection. + */ + object InsertManyOptions { + def apply(): InsertManyOptions = new com.mongodb.client.model.InsertManyOptions() + } + + /** + * The options to apply to the creation of an index. + */ + type IndexOptions = com.mongodb.client.model.IndexOptions + + /** + * The options to apply to the command when creating indexes. + * + * @since 2.2 + */ + type CreateIndexOptions = com.mongodb.client.model.CreateIndexOptions + + /** + * The options to apply to the command when creating indexes. + * + * @since 2.2 + */ + type DropIndexOptions = com.mongodb.client.model.DropIndexOptions + + /** + * The options to apply to the creation of an index. + */ + object IndexOptions { + def apply(): IndexOptions = new com.mongodb.client.model.IndexOptions() + } + + /** + * A model describing the creation of a single Atlas Search index. + */ + type SearchIndexModel = com.mongodb.client.model.SearchIndexModel + + /** + * Represents an Atlas Search Index type, which is utilized for creating specific types of indexes. + */ + type SearchIndexType = com.mongodb.client.model.SearchIndexType + + /** + * A model describing the creation of a single Atlas Search index. + */ + object SearchIndexModel { + + /** + * Construct an instance with the given Atlas Search index mapping definition. + * + * After calling this constructor, the `name` field will be `null`. In that case, when passing this + * `SearchIndexModel` to the `createSearchIndexes` method, the default search index name `default` + * will be used to create the search index. + * + * @param definition the search index mapping definition. + * @return the SearchIndexModel + */ + def apply(definition: Bson): SearchIndexModel = new com.mongodb.client.model.SearchIndexModel(definition) + + /** + * Construct an instance with the given search index name and definition. + * + * @param indexName the name of the search index to create. + * @param definition the search index mapping definition. + * @return the SearchIndexModel + */ + def apply(indexName: String, definition: Bson): SearchIndexModel = + new com.mongodb.client.model.SearchIndexModel(indexName, definition) + + /** + * Construct an instance with the given search index name and definition. + * + * @param indexName the name of the search index to create. + * @param definition the search index mapping definition. + * @param indexType the search index type. + * @return the SearchIndexModel + */ + def apply(indexName: Option[String], definition: Bson, indexType: Option[SearchIndexType]): SearchIndexModel = + new com.mongodb.client.model.SearchIndexModel(indexName.orNull, definition, indexType.orNull) + } + + /** + * A model describing the creation of a single index. + */ + type IndexModel = com.mongodb.client.model.IndexModel + + /** + * A model describing the creation of a single index. + */ + object IndexModel { + + /** + * Construct an instance with the given keys. + * + * @param keys the index keys + * @return the IndexModel + */ + def apply(keys: Bson): IndexModel = new com.mongodb.client.model.IndexModel(keys) + + /** + * Construct an instance with the given keys and options. + * + * @param keys the index keys + * @param indexOptions the index options + * @return the IndexModel + */ + def apply(keys: Bson, indexOptions: IndexOptions): IndexModel = + new com.mongodb.client.model.IndexModel(keys, indexOptions) + } + + /** + * The options to apply to an operation that inserts a single document into a collection. + * + * @note Requires MongoDB 3.2 or greater + * @since 1.1 + */ + type InsertOneOptions = com.mongodb.client.model.InsertOneOptions + + /** + * The options to apply to an operation that inserts a single document into a collection. + * + * @note Requires MongoDB 3.2 or greater + * @since 1.1 + */ + object InsertOneOptions { + + /** + * Construct a new instance. + */ + def apply(): InsertOneOptions = new com.mongodb.client.model.InsertOneOptions() + } + + /** + * A model describing an insert of a single document. + * + * @tparam TResult the type of document to insert. This can be of any type for which a `Codec` is registered + */ + type InsertOneModel[TResult] = com.mongodb.client.model.InsertOneModel[TResult] + + /** + * A model describing an insert of a single document. + */ + object InsertOneModel { + + /** + * Construct a new instance. + * + * @param document the document to insert + * @tparam TResult the type of document to insert. This can be of any type for which a `Codec` is registered + * @return the new InsertOneModel + */ + def apply[TResult](document: TResult): InsertOneModel[TResult] = + new com.mongodb.client.model.InsertOneModel[TResult](document) + } + + /** + * The map reduce to collection actions. + * + * These actions are only available when passing out a collection that already exists. This option is not available on secondary members + * of replica sets. The Enum values dictate what to do with the output collection if it already exists when the map reduce is run. + */ + type MapReduceAction = com.mongodb.client.model.MapReduceAction + + /** + * The options to apply when renaming a collection + */ + type RenameCollectionOptions = com.mongodb.client.model.RenameCollectionOptions + + /** + * The options to apply when renaming a collection + */ + object RenameCollectionOptions { + def apply(): RenameCollectionOptions = new com.mongodb.client.model.RenameCollectionOptions() + } + + /** + * The options to apply to a `\$push` update operator. + */ + type PushOptions = com.mongodb.client.model.PushOptions + + /** + * The options to apply to a `\$push` update operator. + */ + object PushOptions { + def apply(): PushOptions = new com.mongodb.client.model.PushOptions() + } + + /** + * Indicates which document to return, the original document before change or the document after the change + */ + type ReturnDocument = com.mongodb.client.model.ReturnDocument + + /** + * A model describing the replacement of at most one document that matches the query filter. + * + * @tparam TResult the type of document to replace. This can be of any type for which a `Codec` is registered + */ + type ReplaceOneModel[TResult] = com.mongodb.client.model.ReplaceOneModel[TResult] + + /** + * The options to apply when replacing documents. + * @since 2.3 + */ + type ReplaceOptions = com.mongodb.client.model.ReplaceOptions + + /** + * The options to apply when replacing documents. + * @since 2.3 + */ + object ReplaceOptions { + def apply(): ReplaceOptions = new com.mongodb.client.model.ReplaceOptions() + } + + /** + * A model describing the replacement of at most one document that matches the query filter. + */ + object ReplaceOneModel { + + /** + * Construct a new instance. + * + * @param filter a document describing the query filter. + * @param replacement the replacement document + * @tparam TResult the type of document to insert. This can be of any type for which a `Codec` is registered + * @return the new ReplaceOneModel + */ + def apply[TResult](filter: Bson, replacement: TResult): ReplaceOneModel[TResult] = + new com.mongodb.client.model.ReplaceOneModel[TResult](filter, replacement) + + /** + * Construct a new instance. + * + * @param filter a document describing the query filter. + * @param replacement the replacement document + * @param replaceOptions the options to apply + * @tparam TResult the type of document to insert. This can be of any type for which a `Codec` is registered + * @return the new ReplaceOneModel + * @since 2.3 + */ + def apply[TResult](filter: Bson, replacement: TResult, replaceOptions: ReplaceOptions): ReplaceOneModel[TResult] = + new com.mongodb.client.model.ReplaceOneModel[TResult](filter, replacement, replaceOptions) + } + + /** + * Text search options for the [[Filters]] text helper + * + * @see [[https://www.mongodb.com/docs/manual/reference/operator/query/text \$text]] + * @since 1.1 + */ + type TextSearchOptions = com.mongodb.client.model.TextSearchOptions + + /** + * Text search options for the [[Filters]] text helper + * @since 1.1 + */ + object TextSearchOptions { + + /** + * Construct a new instance. + */ + def apply(): TextSearchOptions = new com.mongodb.client.model.TextSearchOptions() + } + + /** + * Validation options for documents being inserted or updated in a collection + * + * @note Requires MongoDB 3.2 or greater + * @since 1.1 + */ + type ValidationOptions = com.mongodb.client.model.ValidationOptions + + /** + * Validation options for documents being inserted or updated in a collection + * + * @note Requires MongoDB 3.2 or greater + * @since 1.1 + */ + object ValidationOptions { + + /** + * Construct a new instance. + */ + def apply(): ValidationOptions = new com.mongodb.client.model.ValidationOptions() + } + + /** + * A model describing an update to all documents that matches the query filter. The update to apply must include only update + * operators. + * + * @tparam TResult the type of document to update. In practice this doesn't actually apply to updates but is here for consistency with the + * other write models + */ + type UpdateManyModel[TResult] = com.mongodb.client.model.UpdateManyModel[TResult] + + /** + * A model describing an update to all documents that matches the query filter. The update to apply must include only update + * operators. + */ + object UpdateManyModel { + + /** + * Construct a new instance. + * + * @param filter a document describing the query filter. + * @param update a document describing the update. The update to apply must include only update operators. + * @return the new UpdateManyModel + */ + def apply(filter: Bson, update: Bson): UpdateManyModel[Nothing] = + new com.mongodb.client.model.UpdateManyModel(filter, update) + + /** + * Construct a new instance. + * + * @param filter a document describing the query filter. + * @param update a document describing the update. The update to apply must include only update operators. + * @param updateOptions the options to apply + * @return the new UpdateManyModel + */ + def apply(filter: Bson, update: Bson, updateOptions: UpdateOptions): UpdateManyModel[Nothing] = + new com.mongodb.client.model.UpdateManyModel(filter, update, updateOptions) + + /** + * Construct a new instance. + * + * @param filter a document describing the query filter. + * @param update a pipeline describing the update, which may not be null. + * @return the new UpdateManyModel + * @note Requires MongoDB 4.2 or greater + * @since 4.7 + */ + def apply(filter: Bson, update: Seq[Bson]): UpdateManyModel[Nothing] = + new com.mongodb.client.model.UpdateManyModel(filter, update.asJava) + + /** + * Construct a new instance. + * + * @param filter a document describing the query filter. + * @param update a pipeline describing the update, which may not be null. + * @param updateOptions the options to apply + * @return the new UpdateManyModel + * @note Requires MongoDB 4.2 or greater + * @since 4.7 + * + */ + def apply(filter: Bson, update: Seq[Bson], updateOptions: UpdateOptions): UpdateManyModel[Nothing] = + new com.mongodb.client.model.UpdateManyModel(filter, update.asJava, updateOptions) + } + + /** + * The options to apply when updating documents. + */ + type UpdateOptions = com.mongodb.client.model.UpdateOptions + + /** + * The options to apply when updating documents. + */ + object UpdateOptions { + def apply(): UpdateOptions = new com.mongodb.client.model.UpdateOptions() + } + + /** + * A model describing an update to at most one document that matches the query filter. The update to apply must include only update + * operators. + * + * @tparam TResult the type of document to update. In practice this doesn't actually apply to updates but is here for consistency with the + * other write models + */ + type UpdateOneModel[TResult] = com.mongodb.client.model.UpdateOneModel[TResult] + + /** + * A model describing an update to at most one document that matches the query filter. The update to apply must include only update + * operators. + */ + object UpdateOneModel { + + /** + * Construct a new instance. + * + * @param filter a document describing the query filter. + * @param update a document describing the update. The update to apply must include only update operators. + * @return the new UpdateOneModel + */ + def apply(filter: Bson, update: Bson): UpdateOneModel[Nothing] = + new com.mongodb.client.model.UpdateOneModel(filter, update) + + /** + * Construct a new instance. + * + * @param filter a document describing the query filter. + * @param update a document describing the update. The update to apply must include only update operators. + * @param updateOptions the options to apply + * @return the new UpdateOneModel + */ + def apply(filter: Bson, update: Bson, updateOptions: UpdateOptions): UpdateOneModel[Nothing] = + new com.mongodb.client.model.UpdateOneModel(filter, update, updateOptions) + + /** + * Construct a new instance. + * + * @param filter a document describing the query filter. + * @param update a pipeline describing the update, which may not be null. + * @return the new UpdateOneModel + * @note Requires MongoDB 4.2 or greater + * @since 4.7 + */ + def apply(filter: Bson, update: Seq[Bson]): UpdateOneModel[Nothing] = + new com.mongodb.client.model.UpdateOneModel(filter, update.asJava) + + /** + * Construct a new instance. + * + * @param filter a document describing the query filter. + * @param update a pipeline describing the update, which may not be null. + * @param updateOptions the options to apply + * @return the new UpdateOneModel + * @note Requires MongoDB 4.2 or greater + * @since 4.7 + * + */ + def apply(filter: Bson, update: Seq[Bson], updateOptions: UpdateOptions): UpdateOneModel[Nothing] = + new com.mongodb.client.model.UpdateOneModel(filter, update.asJava, updateOptions) + } + + /** + * The options for an unwind aggregation pipeline stage + * + * @note Requires MongoDB 3.2 or greater + * @since 1.1 + */ + type UnwindOptions = com.mongodb.client.model.UnwindOptions + + /** + * The options for an unwind aggregation pipeline stage + * + * @note Requires MongoDB 3.2 or greater + * @since 1.1 + */ + object UnwindOptions { + + /** + * Construct a new instance. + */ + def apply(): UnwindOptions = new com.mongodb.client.model.UnwindOptions() + } + + /** + * Determines whether to error on invalid documents or just warn about the violations but allow invalid documents. + * + * @note Requires MongoDB 3.2 or greater + * @since 1.1 + */ + type ValidationAction = com.mongodb.client.model.ValidationAction + + /** + * Determines how strictly MongoDB applies the validation rules to existing documents during an insert or update. + * + * @note Requires MongoDB 3.2 or greater + * @since 1.1 + */ + type ValidationLevel = com.mongodb.client.model.ValidationLevel + + /** + * A base class for models that can be used in a bulk write operations. + * + * @tparam TResult the document type for storage + */ + type WriteModel[TResult] = com.mongodb.client.model.WriteModel[TResult] + + /** + * Helps define new variable for the `\$lookup` pipeline stage + * + * @tparam TExpression the type of the value for the new variable + * @since 2.3 + */ + type Variable[TExpression] = com.mongodb.client.model.Variable[TExpression] + + /** + * Helps define new variable for the `\$lookup` pipeline stage + * @since 2.3 + */ + object Variable { + + /** + * Creates a new variable definition for use in `\$lookup` pipeline stages + * + * @param name the name of the new variable + * @param value the value of the new variable + */ + def apply[TExpression](name: String, value: TExpression): Variable[TExpression] = + new com.mongodb.client.model.Variable[TExpression](name, value) + + } + + /** + * Units for specifying time-based values. + * + * @see [[Windows]] + * @see [[WindowOutputFields]] + * @see `org.mongodb.scala.model.densify.DensifyRange` + * @since 4.3 + * @note Requires MongoDB 5.0 or greater. + */ + object MongoTimeUnit { + + val YEAR = JMongoTimeUnit.YEAR + + val QUARTER = JMongoTimeUnit.QUARTER + + val MONTH = JMongoTimeUnit.MONTH + + val WEEK = JMongoTimeUnit.WEEK + + val DAY = JMongoTimeUnit.DAY + + val HOUR = JMongoTimeUnit.HOUR + + val MINUTE = JMongoTimeUnit.MINUTE + + val SECOND = JMongoTimeUnit.SECOND + + val MILLISECOND = JMongoTimeUnit.MILLISECOND + } + + /** + * A subset of documents within a partition in the `Aggregates.setWindowFields` pipeline stage + * of an aggregation pipeline (see `partitionBy` in `Aggregates.setWindowFields`). + * + * @see [[Windows]] + * @since 4.3 + */ + type Window = com.mongodb.client.model.Window + + /** + * The core part of the `Aggregates.setWindowFields` pipeline stage of an aggregation pipeline. + * A triple of a window function, a [[Window window]] and a path to a field to be computed by the window function over the window. + * + * @see [[WindowOutputFields]] + * @since 4.3 + */ + type WindowOutputField = com.mongodb.client.model.WindowOutputField + + type GeoNearOptions = com.mongodb.client.model.GeoNearOptions + + /** + * @see `QuantileMethod.approximate()` + */ + @Sealed + type ApproximateQuantileMethod = com.mongodb.client.model.ApproximateQuantileMethod +} + +// scalastyle:on number.of.methods number.of.types diff --git a/driver-scala/src/main/scala/org/mongodb/scala/model/search/FuzzySearchOptions.scala b/driver-scala/src/main/scala/org/mongodb/scala/model/search/FuzzySearchOptions.scala new file mode 100644 index 00000000000..d106d6bbd9d --- /dev/null +++ b/driver-scala/src/main/scala/org/mongodb/scala/model/search/FuzzySearchOptions.scala @@ -0,0 +1,37 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.mongodb.scala.model.search + +import com.mongodb.annotations.{ Beta, Reason } +import com.mongodb.client.model.search.{ FuzzySearchOptions => JFuzzySearchOptions } + +/** + * Fuzzy search options that may be used with some `SearchOperator`s. + * + * @see [[https://www.mongodb.com/docs/atlas/atlas-search/autocomplete/ autocomplete operator]] + * @see [[https://www.mongodb.com/docs/atlas/atlas-search/text/ text operator]] + * @since 4.7 + */ +@Beta(Array(Reason.CLIENT)) +object FuzzySearchOptions { + + /** + * Returns `FuzzySearchOptions` that represents server defaults. + * + * @return `FuzzySearchOptions` that represents server defaults. + */ + def fuzzySearchOptions(): FuzzySearchOptions = JFuzzySearchOptions.fuzzySearchOptions() +} diff --git a/driver-scala/src/main/scala/org/mongodb/scala/model/search/SearchCollector.scala b/driver-scala/src/main/scala/org/mongodb/scala/model/search/SearchCollector.scala new file mode 100644 index 00000000000..a651e502b10 --- /dev/null +++ b/driver-scala/src/main/scala/org/mongodb/scala/model/search/SearchCollector.scala @@ -0,0 +1,86 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.mongodb.scala.model.search + +import com.mongodb.annotations.{ Beta, Reason } +import com.mongodb.client.model.search.{ SearchCollector => JSearchCollector } +import org.mongodb.scala.bson.conversions.Bson +import org.mongodb.scala.model.Projections + +import scala.collection.JavaConverters._ + +/** + * The core part of the `\$search` pipeline stage of an aggregation pipeline. + * `SearchCollector`s allow returning metadata together with the matching search results. + * You may use the `$$SEARCH_META` variable, e.g., via [[Projections.computedSearchMeta]], to extract this metadata. + * + * @see [[https://www.mongodb.com/docs/atlas/atlas-search/operators-and-collectors/#collectors Search collectors]] + * @since 4.7 + */ +@Beta(Array(Reason.CLIENT)) +object SearchCollector { + + /** + * Returns a `SearchCollector` that groups results by values or ranges in the specified faceted fields and returns the count + * for each of those groups. + * + * @param operator The search operator to use. + * @param facets The non-empty facet definitions. + * @return The requested `SearchCollector`. + * @see [[https://www.mongodb.com/docs/atlas/atlas-search/facet/ facet collector]] + */ + @Beta(Array(Reason.CLIENT, Reason.SERVER)) + def facet(operator: SearchOperator, facets: Iterable[_ <: SearchFacet]): FacetSearchCollector = + JSearchCollector.facet(operator, facets.asJava) + + /** + * Creates a `SearchCollector` from a `Bson` in situations when there is no builder method that better satisfies your needs. + * This method cannot be used to validate the syntax. + * + * Example
+ * The following code creates two functionally equivalent `SearchCollector`s, + * though they may not be equal. + * {{{ + * val collector1: SearchCollector = SearchCollector.facet( + * SearchOperator.exists( + * SearchPath.fieldPath("fieldName")), + * Seq( + * SearchFacet.stringFacet( + * "stringFacetName", + * SearchPath.fieldPath("stringFieldName")), + * SearchFacet.numberFacet( + * "numberFacetName", + * SearchPath.fieldPath("numberFieldName"), + * Seq(10, 20, 30)))) + * val collector2: SearchCollector = SearchCollector.of(Document("facet" -> + * Document("operator" -> SearchOperator.exists( + * SearchPath.fieldPath("fieldName")).toBsonDocument, + * "facets" -> SearchFacet.combineToBson(Seq( + * SearchFacet.stringFacet( + * "stringFacetName", + * SearchPath.fieldPath("stringFieldName")), + * SearchFacet.numberFacet( + * "numberFacetName", + * SearchPath.fieldPath("numberFieldName"), + * Seq(10, 20, 30)))).toBsonDocument))) + * }}} + * + * @param collector A `Bson` representing the required `SearchCollector`. + * + * @return The requested `SearchCollector`. + */ + def of(collector: Bson): SearchCollector = JSearchCollector.of(collector) +} diff --git a/driver-scala/src/main/scala/org/mongodb/scala/model/search/SearchCount.scala b/driver-scala/src/main/scala/org/mongodb/scala/model/search/SearchCount.scala new file mode 100644 index 00000000000..ecba0ecce0d --- /dev/null +++ b/driver-scala/src/main/scala/org/mongodb/scala/model/search/SearchCount.scala @@ -0,0 +1,66 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.mongodb.scala.model.search + +import com.mongodb.annotations.{ Beta, Reason } +import com.mongodb.client.model.search.{ SearchCount => JSearchCount } +import org.mongodb.scala.bson.conversions.Bson +import org.mongodb.scala.model.Projections + +/** + * Counting options. + * You may use the `$$SEARCH_META` variable, e.g., via [[Projections.computedSearchMeta]], + * to extract the results of counting. + * + * @see [[https://www.mongodb.com/docs/atlas/atlas-search/counting/ Counting]] + * @since 4.7 + */ +@Beta(Array(Reason.CLIENT, Reason.SERVER)) +object SearchCount { + + /** + * Returns a `SearchCount` that instructs to count documents exactly. + * + * @return The requested `SearchCount`. + */ + def total(): TotalSearchCount = JSearchCount.total() + + /** + * Returns a `SearchCount` that instructs to count documents exactly only up to + * `LowerBoundSearchCount.threshold`. + * + * @return The requested `SearchCount`. + */ + def lowerBound(): LowerBoundSearchCount = JSearchCount.lowerBound() + + /** + * Creates a `SearchCount` from a `Bson` in situations when there is no builder method that better satisfies your needs. + * This method cannot be used to validate the syntax. + * + * Example
+ * The following code creates two functionally equivalent `SearchCount`s, + * though they may not be equal. + * {{{ + * val count1: SearchCount = SearchCount.lowerBound() + * val count2: SearchCount = SearchCount.of(Document("type" -> "lowerBound")) + * }}} + * + * @param count A `Bson` representing the required `SearchCount`. + * + * @return The requested `SearchCount`. + */ + def of(count: Bson): SearchCount = JSearchCount.of(count) +} diff --git a/driver-scala/src/main/scala/org/mongodb/scala/model/search/SearchFacet.scala b/driver-scala/src/main/scala/org/mongodb/scala/model/search/SearchFacet.scala new file mode 100644 index 00000000000..3bc27520ea3 --- /dev/null +++ b/driver-scala/src/main/scala/org/mongodb/scala/model/search/SearchFacet.scala @@ -0,0 +1,103 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.mongodb.scala.model.search + +import com.mongodb.annotations.{ Beta, Reason } +import com.mongodb.client.model.search.{ SearchFacet => JSearchFacet } +import org.mongodb.scala.bson.conversions.Bson + +import java.time.Instant +import collection.JavaConverters._ + +/** + * A facet definition for [[FacetSearchCollector]]. + * + * @see [[https://www.mongodb.com/docs/atlas/atlas-search/facet/#facet-definition Facet definition]] + * @since 4.7 + */ +@Beta(Array(Reason.CLIENT, Reason.SERVER)) +object SearchFacet { + + /** + * Returns a `SearchFacet` that allows narrowing down search results based on the most frequent + * BSON `String` values of the specified field. + * + * @param name The facet name. + * @param path The field to facet on. + * @return The requested `SearchFacet`. + * @see [[https://www.mongodb.com/docs/atlas/atlas-search/facet/#string-facets String facet definition]] + */ + def stringFacet(name: String, path: FieldSearchPath): StringSearchFacet = JSearchFacet.stringFacet(name, path) + + /** + * Returns a `SearchFacet` that allows determining the frequency of + * BSON `32-bit integer` / `64-bit integer` / `Double` values + * in the search results by breaking the results into separate ranges. + * + * @param name The facet name. + * @param path The path to facet on. + * @param boundaries Bucket boundaries in ascending order. Must contain at least two boundaries. + * @return The requested `SearchFacet`. + * @see [[https://www.mongodb.com/docs/atlas/atlas-search/facet/#numeric-facets Numeric facet definition]] + */ + def numberFacet(name: String, path: FieldSearchPath, boundaries: Iterable[Number]): NumberSearchFacet = + JSearchFacet.numberFacet(name, path, boundaries.asJava) + + /** + * Returns a `SearchFacet` that allows determining the frequency of BSON `Date` values + * in the search results by breaking the results into separate ranges. + * + * @param name The facet name. + * @param path The path to facet on. + * @param boundaries Bucket boundaries in ascending order. Must contain at least two boundaries. + * @return The requested `SearchFacet`. + * @see [[https://www.mongodb.com/docs/atlas/atlas-search/facet/#date-facets Date facet definition]] + * @see `org.bson.codecs.jsr310.InstantCodec` + */ + def dateFacet(name: String, path: FieldSearchPath, boundaries: Iterable[Instant]): DateSearchFacet = + JSearchFacet.dateFacet(name, path, boundaries.asJava) + + /** + * Creates a `SearchFacet` from a `Bson` in situations when there is no builder method that better satisfies your needs. + * This method cannot be used to validate the syntax. + * + * Example
+ * The following code creates two functionally equivalent `SearchFacet`s, + * though they may not be equal. + * {{{ + * val facet1: SearchFacet = SearchFacet.stringFacet("facetName", + * SearchPath.fieldPath("fieldName")) + * val facet2: SearchFacet = SearchFacet.of(Document("facetName" -> Document("type" -> "string", + * "path" -> SearchPath.fieldPath("fieldName").toValue))) + * }}} + * + * @param facet A `Bson` representing the required `SearchFacet`. + * + * @return The requested `SearchFacet`. + */ + def of(facet: Bson): SearchFacet = JSearchFacet.of(facet) + + /** + * Combines `SearchFacet`s into a `Bson`. + * + * This method may be useful when using [[SearchCollector.of]]. + * + * @param facets The non-empty facet definitions to combine. + * @return A `Bson` representing combined `facets`. + */ + def combineToBson(facets: Iterable[_ <: SearchFacet]): Bson = + JSearchFacet.combineToBson(facets.asJava) +} diff --git a/driver-scala/src/main/scala/org/mongodb/scala/model/search/SearchHighlight.scala b/driver-scala/src/main/scala/org/mongodb/scala/model/search/SearchHighlight.scala new file mode 100644 index 00000000000..7ac1deebac1 --- /dev/null +++ b/driver-scala/src/main/scala/org/mongodb/scala/model/search/SearchHighlight.scala @@ -0,0 +1,74 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.mongodb.scala.model.search + +import com.mongodb.annotations.{ Beta, Reason } +import com.mongodb.client.model.search.{ SearchHighlight => JSearchHighlight } +import org.mongodb.scala.bson.conversions.Bson +import org.mongodb.scala.model.Projections + +import collection.JavaConverters._ + +/** + * Highlighting options. + * You may use the `\$meta: "searchHighlights"` expression, e.g., via [[Projections.metaSearchHighlights]], + * to extract the results of highlighting. + * + * @see [[https://www.mongodb.com/docs/atlas/atlas-search/highlighting/ Highlighting]] + * @since 4.7 + */ +@Beta(Array(Reason.CLIENT)) +object SearchHighlight { + + /** + * Returns a `SearchHighlight` for the given `paths`. + * + * @param path The field to be searched. + * @param paths More fields to be searched. + * @return The requested `SearchHighlight`. + */ + def paths(path: SearchPath, paths: SearchPath*): SearchHighlight = JSearchHighlight.paths(path, paths: _*) + + /** + * Returns a `SearchHighlight` for the given `paths`. + * + * @param paths The non-empty fields to be searched. + * @return The requested `SearchHighlight`. + */ + def paths(paths: Iterable[_ <: SearchPath]): SearchHighlight = JSearchHighlight.paths(paths.asJava) + + /** + * Creates a `SearchHighlight` from a `Bson` in situations when there is no builder method that better satisfies your needs. + * This method cannot be used to validate the syntax. + * + * Example
+ * The following code creates two functionally equivalent `SearchHighlight`s, + * though they may not be equal. + * {{{ + * val highlight1: SearchHighlight = SearchHighlight.paths( + * SearchPath.fieldPath("fieldName"), + * SearchPath.wildcardPath("wildc*rd")) + * val highlight2: SearchHighlight = SearchHighlight.of(Document("path" -> Seq( + * SearchPath.fieldPath("fieldName").toBsonValue, + * SearchPath.wildcardPath("wildc*rd").toBsonValue))) + * }}} + * + * @param highlight A `Bson` representing the required `SearchHighlight`. + * + * @return The requested `SearchHighlight`. + */ + def of(highlight: Bson): SearchHighlight = JSearchHighlight.of(highlight) +} diff --git a/driver-scala/src/main/scala/org/mongodb/scala/model/search/SearchOperator.scala b/driver-scala/src/main/scala/org/mongodb/scala/model/search/SearchOperator.scala new file mode 100644 index 00000000000..1fa47a54e1b --- /dev/null +++ b/driver-scala/src/main/scala/org/mongodb/scala/model/search/SearchOperator.scala @@ -0,0 +1,517 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.mongodb.scala.model.search + +import com.mongodb.annotations.{ Beta, Reason } +import com.mongodb.client.model.search.{ SearchOperator => JSearchOperator } + +import java.util.UUID + +import org.mongodb.scala.bson.BsonDocument +import org.mongodb.scala.bson.conversions.Bson +import org.mongodb.scala.model.geojson.Point + +import org.bson.types.ObjectId; + +import java.time.{ Duration, Instant } + +import collection.JavaConverters._ + +/** + * The core part of the `\$search` pipeline stage of an aggregation pipeline. + * + * @see [[https://www.mongodb.com/docs/atlas/atlas-search/operators-and-collectors/#operators Search operators]] + * @since 4.7 + */ +@Beta(Array(Reason.CLIENT)) +object SearchOperator { + + /** + * Returns a base for a `SearchOperator` that may combine multiple `SearchOperator`s. + * Combining `SearchOperator`s affects calculation of the relevance score. + * + * @return A base for a `CompoundSearchOperator`. + * @see [[https://www.mongodb.com/docs/atlas/atlas-search/compound/ compound operator]] + */ + def compound(): CompoundSearchOperatorBase = JSearchOperator.compound() + + /** + * Returns a `SearchOperator` that tests if the `path` exists in a document. + * + * @param path The path to test. + * @return The requested `SearchOperator`. + * @see [[https://www.mongodb.com/docs/atlas/atlas-search/exists/ exists operator]] + */ + def exists(path: FieldSearchPath): ExistsSearchOperator = JSearchOperator.exists(path) + + /** + * Returns a `SearchOperator` that performs a full-text search. + * + * @param path The field to be searched. + * @param query The string to search for. + * @return The requested `SearchOperator`. + * @see [[https://www.mongodb.com/docs/atlas/atlas-search/text/ text operator]] + */ + def text(path: SearchPath, query: String): TextSearchOperator = JSearchOperator.text(path, query) + + /** + * Returns a `SearchOperator` that performs a full-text search. + * + * @param paths The non-empty fields to be searched. + * @param queries The non-empty strings to search for. + * @return The requested `SearchOperator`. + * @see [[https://www.mongodb.com/docs/atlas/atlas-search/text/ text operator]] + */ + def text(paths: Iterable[_ <: SearchPath], queries: Iterable[String]): TextSearchOperator = + JSearchOperator.text(paths.asJava, queries.asJava) + + /** + * Returns a `SearchOperator` that may be used to implement search-as-you-type functionality. + * + * @param path The field to be searched. + * @param query The string to search for. + * @param queries More strings to search for. + * @return The requested `SearchOperator`. + * @see [[https://www.mongodb.com/docs/atlas/atlas-search/autocomplete/ autocomplete operator]] + */ + def autocomplete(path: FieldSearchPath, query: String, queries: String*): AutocompleteSearchOperator = + JSearchOperator.autocomplete(path, query, queries: _*) + + /** + * Returns a `SearchOperator` that may be used to implement search-as-you-type functionality. + * + * @param path The field to be searched. + * @param queries The non-empty strings to search for. + * @return The requested `SearchOperator`. + * @see [[https://www.mongodb.com/docs/atlas/atlas-search/autocomplete/ autocomplete operator]] + */ + def autocomplete(path: FieldSearchPath, queries: Iterable[String]): AutocompleteSearchOperator = + JSearchOperator.autocomplete(path, queries.asJava) + + /** + * Returns a base for a `SearchOperator` that tests if the + * BSON `32-bit integer` / `64-bit integer` / `Double` values + * of the specified fields are within an interval. + * + * @param path The field to be searched. + * @param paths More fields to be searched. + * @return A base for a `NumberRangeSearchOperator`. + * @see [[https://www.mongodb.com/docs/atlas/atlas-search/range/ range operator]] + */ + def numberRange(path: FieldSearchPath, paths: FieldSearchPath*): NumberRangeSearchOperatorBase = + JSearchOperator.numberRange(path, paths: _*) + + /** + * Returns a base for a `SearchOperator` that tests if the + * BSON `32-bit integer` / `64-bit integer` / `Double` values + * of the specified fields are within an interval. + * + * @param paths The non-empty fields to be searched. + * @return A base for a `NumberRangeSearchOperator`. + * @see [[https://www.mongodb.com/docs/atlas/atlas-search/range/ range operator]] + */ + def numberRange(paths: Iterable[_ <: FieldSearchPath]): NumberRangeSearchOperatorBase = + JSearchOperator.numberRange(paths.asJava) + + /** + * Returns a base for a `SearchOperator` that tests if the + * BSON `Date` values of the specified fields are within an interval. + * + * @param path The field to be searched. + * @param paths More fields to be searched. + * @return A base for a `DateRangeSearchOperator`. + * @see [[https://www.mongodb.com/docs/atlas/atlas-search/range/ range operator]] + */ + def dateRange(path: FieldSearchPath, paths: FieldSearchPath*): DateRangeSearchOperatorBase = + JSearchOperator.dateRange(path, paths: _*) + + /** + * Returns a base for a `SearchOperator` that tests if the + * BSON `Date` values of the specified fields are within an interval. + * + * @param paths The non-empty fields to be searched. + * @return A base for a `DateRangeSearchOperator`. + * @see [[https://www.mongodb.com/docs/atlas/atlas-search/range/ range operator]] + */ + def dateRange(paths: Iterable[_ <: FieldSearchPath]): DateRangeSearchOperatorBase = + JSearchOperator.dateRange(paths.asJava) + + /** + * Returns a `SearchOperator` that allows finding results that are near the specified `origin`. + * + * @param origin The origin from which the proximity of the results is measured. + * The relevance score is 1 if the values of the fields are `origin`. + * @param pivot The positive distance from the `origin` at which the relevance score drops in half. + * @param path The field to be searched. + * @param paths More fields to be searched. + * @return The requested `SearchOperator`. + * @see [[https://www.mongodb.com/docs/atlas/atlas-search/near/ near operator]] + */ + def near(origin: Number, pivot: Number, path: FieldSearchPath, paths: FieldSearchPath*): NumberNearSearchOperator = + JSearchOperator.near(origin, pivot, path, paths: _*) + + /** + * Returns a `SearchOperator` that allows finding results that are near the specified `origin`. + * + * @param origin The origin from which the proximity of the results is measured. + * The relevance score is 1 if the values of the fields are `origin`. + * @param pivot The positive distance from the `origin` at which the relevance score drops in half. + * @param paths The non-empty fields to be searched. + * @return The requested `SearchOperator`. + * @see [[https://www.mongodb.com/docs/atlas/atlas-search/near/ near operator]] + */ + def near(origin: Number, pivot: Number, paths: Iterable[_ <: FieldSearchPath]): NumberNearSearchOperator = + JSearchOperator.near(origin, pivot, paths.asJava) + + /** + * Returns a `SearchOperator` that allows finding results that are near the specified `origin`. + * + * @param origin The origin from which the proximity of the results is measured. + * The relevance score is 1 if the values of the fields are `origin`. + * @param pivot The positive distance from the `origin` at which the relevance score drops in half. + * Data is extracted via `Duration.toMillis`. + * @param path The field to be searched. + * @param paths More fields to be searched. + * @return The requested `SearchOperator`. + * @see [[https://www.mongodb.com/docs/atlas/atlas-search/near/ near operator]] + * @see `org.bson.codecs.jsr310.InstantCodec` + */ + def near(origin: Instant, pivot: Duration, path: FieldSearchPath, paths: FieldSearchPath*): DateNearSearchOperator = + JSearchOperator.near(origin, pivot, path, paths: _*) + + /** + * Returns a `SearchOperator` that allows finding results that are near the specified `origin`. + * + * @param origin The origin from which the proximity of the results is measured. + * The relevance score is 1 if the values of the fields are `origin`. + * @param pivot The positive distance from the `origin` at which the relevance score drops in half. + * Data is extracted via `Duration.toMillis`. + * @param paths The non-empty fields to be searched. + * It is converted to `long` via `Duration.toMillis`. + * @return The requested `SearchOperator`. + * @see [[https://www.mongodb.com/docs/atlas/atlas-search/near/ near operator]] + * @see `org.bson.codecs.jsr310.InstantCodec` + */ + def near(origin: Instant, pivot: Duration, paths: Iterable[_ <: FieldSearchPath]): DateNearSearchOperator = + JSearchOperator.near(origin, pivot, paths.asJava) + + /** + * Returns a `SearchOperator` that allows finding results that are near the specified `origin`. + * + * @param origin The origin from which the proximity of the results is measured. + * The relevance score is 1 if the values of the fields are `origin`. + * @param pivot The positive distance in meters from the `origin` at which the relevance score drops in half. + * @param path The field to be searched. + * @param paths More fields to be searched. + * @return The requested `SearchOperator`. + * @see [[https://www.mongodb.com/docs/atlas/atlas-search/near/ near operator]] + */ + def near(origin: Point, pivot: Number, path: FieldSearchPath, paths: FieldSearchPath*): GeoNearSearchOperator = + JSearchOperator.near(origin, pivot, path, paths: _*) + + /** + * Returns a `SearchOperator` that allows finding results that are near the specified `origin`. + * + * @param origin The origin from which the proximity of the results is measured. + * The relevance score is 1 if the values of the fields are `origin`. + * @param pivot The positive distance in meters from the `origin` at which the relevance score drops in half. + * @param paths The non-empty fields to be searched. + * @return The requested `SearchOperator`. + * @see [[https://www.mongodb.com/docs/atlas/atlas-search/near/ near operator]] + */ + def near(origin: Point, pivot: Number, paths: Iterable[_ <: FieldSearchPath]): GeoNearSearchOperator = + JSearchOperator.near(origin, pivot, paths.asJava) + + /** + * Returns a `SearchOperator` that searches for documents where the value + * or array of values at a given path contains any of the specified values + * + * @param path The indexed field to be searched. + * @param value The boolean value to search for. + * @param values More fields to be searched. + * @return The requested `SearchOperator`. + * @see [[https://www.mongodb.com/docs/atlas/atlas-search/in/ in operator]] + */ + def in(path: FieldSearchPath, value: Boolean, values: Boolean*): InSearchOperator = + JSearchOperator.in(path, value, values: _*) + + /** + * Returns a `SearchOperator` that searches for documents where the value + * or array of values at a given path contains any of the specified values + * + * @param path The indexed field to be searched. + * @param value The objectId value to search for. + * @param values More fields to be searched. + * @return The requested `SearchOperator`. + * @see [[https://www.mongodb.com/docs/atlas/atlas-search/in/ in operator]] + */ + def in(path: FieldSearchPath, value: ObjectId, values: ObjectId*): InSearchOperator = + JSearchOperator.in(path, value, values: _*) + + /** + * Returns a `SearchOperator` that searches for documents where the value + * or array of values at a given path contains any of the specified values + * + * @param path The indexed field to be searched. + * @param value The number value to search for. + * @param values More fields to be searched. + * @return The requested `SearchOperator`. + * @see [[https://www.mongodb.com/docs/atlas/atlas-search/in/ in operator]] + */ + def in(path: FieldSearchPath, value: Number, values: Number*): InSearchOperator = + JSearchOperator.in(path, value, values: _*) + + /** + * Returns a `SearchOperator` that searches for documents where the value + * or array of values at a given path contains any of the specified values + * + * @param path The indexed field to be searched. + * @param value The instant date value to search for. + * @param values More fields to be searched. + * @return The requested `SearchOperator`. + * @see [[https://www.mongodb.com/docs/atlas/atlas-search/in/ in operator]] + */ + def in(path: FieldSearchPath, value: Instant, values: Instant*): InSearchOperator = + JSearchOperator.in(path, value, values: _*) + + /** + * Returns a `SearchOperator` that searches for documents where the value + * or array of values at a given path contains any of the specified values + * + * @param path The indexed field to be searched. + * @param value The uuid value to search for. + * @param values More fields to be searched. + * @return The requested `SearchOperator`. + * @see [[https://www.mongodb.com/docs/atlas/atlas-search/in/ in operator]] + */ + def in(path: FieldSearchPath, value: UUID, values: UUID*): InSearchOperator = + JSearchOperator.in(path, value, values: _*) + + /** + * Returns a `SearchOperator` that searches for documents where the value + * or array of values at a given path contains any of the specified values + * + * @param path The indexed field to be searched. + * @param value The string value to search for. + * @param values More fields to be searched. + * @return The requested `SearchOperator`. + * @see [[https://www.mongodb.com/docs/atlas/atlas-search/in/ in operator]] + */ + def in(path: FieldSearchPath, value: String, values: String*): InSearchOperator = + JSearchOperator.in(path, value, values: _*) + + /** + * Returns a `SearchOperator` that searches for documents where the value + * or array of values at a given path contains any of the specified values + * + * @param path The indexed field to be searched. + * @param values The non-empty values to search for. Value can be either a single value or an array of values of only one of the supported BSON types and can't be a mix of different types. + * @return The requested `SearchOperator`. + * @see [[https://www.mongodb.com/docs/atlas/atlas-search/in/ in operator]] + */ + def in[T](path: FieldSearchPath, values: Iterable[_ <: T]): InSearchOperator = + JSearchOperator.in(path, values.asJava) + + /** + * Returns a `SearchOperator` that searches for documents where a field matches the specified value. + * + * @param path The indexed field to be searched. + * @param value The boolean value to query for. + * @return The requested `SearchOperator`. + * @see [[https://www.mongodb.com/docs/atlas/atlas-search/equals/ equals operator]] + */ + def equals(path: FieldSearchPath, value: Boolean): EqualsSearchOperator = + JSearchOperator.equals(path, value) + + /** + * Returns a `SearchOperator` that searches for documents where a field matches the specified value. + * + * @param path The indexed field to be searched. + * @param value The object id value to query for. + * @return The requested `SearchOperator`. + * @see [[https://www.mongodb.com/docs/atlas/atlas-search/equals/ equals operator]] + */ + def equals(path: FieldSearchPath, value: ObjectId): EqualsSearchOperator = + JSearchOperator.equals(path, value) + + /** + * Returns a `SearchOperator` that searches for documents where a field matches the specified value. + * + * @param path The indexed field to be searched. + * @param value The number value to query for. + * @return The requested `SearchOperator`. + * @see [[https://www.mongodb.com/docs/atlas/atlas-search/equals/ equals operator]] + */ + def equals(path: FieldSearchPath, value: Number): EqualsSearchOperator = + JSearchOperator.equals(path, value) + + /** + * Returns a `SearchOperator` that searches for documents where a field matches the specified value. + * + * @param path The indexed field to be searched. + * @param value The instant date value to query for. + * @return The requested `SearchOperator`. + * @see [[https://www.mongodb.com/docs/atlas/atlas-search/equals/ equals operator]] + */ + def equals(path: FieldSearchPath, value: Instant): EqualsSearchOperator = + JSearchOperator.equals(path, value) + + /** + * Returns a `SearchOperator` that searches for documents where a field matches the specified value. + * + * @param path The indexed field to be searched. + * @param value The string value to query for. + * @return The requested `SearchOperator`. + * @see [[https://www.mongodb.com/docs/atlas/atlas-search/equals/ equals operator]] + */ + def equals(path: FieldSearchPath, value: String): EqualsSearchOperator = + JSearchOperator.equals(path, value) + + /** + * Returns a `SearchOperator` that searches for documents where a field matches the specified value. + * + * @param path The indexed field to be searched. + * @param value The uuid value to query for. + * @return The requested `SearchOperator`. + * @see [[https://www.mongodb.com/docs/atlas/atlas-search/equals/ equals operator]] + */ + def equals(path: FieldSearchPath, value: UUID): EqualsSearchOperator = + JSearchOperator.equals(path, value) + + /** + * Returns a `SearchOperator` that searches for documents where a field matches null. + * + * @param path The indexed field to be searched. + * @param value The uuid value to query for. + * @return The requested `SearchOperator`. + * @see [[https://www.mongodb.com/docs/atlas/atlas-search/equals/ equals operator]] + */ + def equalsNull(path: FieldSearchPath): EqualsSearchOperator = + JSearchOperator.equalsNull(path) + + /** + * Returns a `SearchOperator` that returns documents similar to input document. + * + * @param like The BSON document that is used to extract representative terms to query for. + * @return The requested `SearchOperator`. + * @see [[https://www.mongodb.com/docs/atlas/atlas-search/morelikethis/ moreLikeThis operator]] + */ + def moreLikeThis(like: BsonDocument): MoreLikeThisSearchOperator = JSearchOperator.moreLikeThis(like) + + /** + * Returns a `SearchOperator` that returns documents similar to input documents. + * + * @param likes The BSON documents that are used to extract representative terms to query for. + * @return The requested `SearchOperator`. + * @see [[https://www.mongodb.com/docs/atlas/atlas-search/morelikethis/ moreLikeThis operator]] + */ + def moreLikeThis(likes: Iterable[BsonDocument]): MoreLikeThisSearchOperator = + JSearchOperator.moreLikeThis(likes.asJava) + + /** + * Returns a `SearchOperator` that enables queries which use special characters in the search string that can match any character. + * + * @param query The string to search for. + * @param path The indexed field to be searched. + * @return The requested `SearchOperator`. + * @see [[https://www.mongodb.com/docs/atlas/atlas-search/wildcard/ wildcard operator]] + */ + def wildcard(query: String, path: SearchPath): WildcardSearchOperator = JSearchOperator.wildcard(path, query) + + /** + * Returns a `SearchOperator` that enables queries which use special characters in the search string that can match any character. + * + * @param queries The non-empty strings to search for. + * @param paths The non-empty indexed fields to be searched. + * @return The requested `SearchOperator`. + * @see [[https://www.mongodb.com/docs/atlas/atlas-search/wildcard/ wildcard operator]] + */ + def wildcard(queries: Iterable[String], paths: Iterable[_ <: SearchPath]): WildcardSearchOperator = + JSearchOperator.wildcard(queries.asJava, paths.asJava) + + /** + * Returns a `SearchOperator` that supports querying a combination of indexed fields and values. + * + * @param defaultPath The field to be searched by default. + * @param query One or more indexed fields and values to search. + * @return The requested `SearchOperator`. + * @see [[https://www.mongodb.com/docs/atlas/atlas-search/queryString/ queryString operator]] + */ + def queryString(defaultPath: FieldSearchPath, query: String): QueryStringSearchOperator = + JSearchOperator.queryString(defaultPath, query) + + /** + * Returns a `SearchOperator` that performs a search for documents containing an ordered sequence of terms. + * + * @param path The field to be searched. + * @param query The string to search for. + * @return The requested `SearchOperator`. + * @see [[https://www.mongodb.com/docs/atlas/atlas-search/phrase/ phrase operator]] + */ + def phrase(path: SearchPath, query: String): PhraseSearchOperator = JSearchOperator.phrase(path, query) + + /** + * Returns a `SearchOperator` that performs a search for documents containing an ordered sequence of terms. + * + * @param paths The non-empty fields to be searched. + * @param queries The non-empty strings to search for. + * @return The requested `SearchOperator`. + * @see [[https://www.mongodb.com/docs/atlas/atlas-search/phrase/ phrase operator]] + */ + def phrase(paths: Iterable[_ <: SearchPath], queries: Iterable[String]): PhraseSearchOperator = + JSearchOperator.phrase(paths.asJava, queries.asJava) + + /** + * Returns a `SearchOperator` that performs a search using a regular expression. + * + * @param path The field to be searched. + * @param query The string to search for. + * @return The requested `SearchOperator`. + * @see [[https://www.mongodb.com/docs/atlas/atlas-search/regex/ regex operator]] + */ + def regex(path: SearchPath, query: String): RegexSearchOperator = JSearchOperator.regex(path, query) + + /** + * Returns a `SearchOperator` that performs a search using a regular expression. + * + * @param paths The non-empty fields to be searched. + * @param queries The non-empty strings to search for. + * @return The requested `SearchOperator`. + * @see [[https://www.mongodb.com/docs/atlas/atlas-search/regex/ regex operator]] + */ + def regex(paths: Iterable[_ <: SearchPath], queries: Iterable[String]): RegexSearchOperator = + JSearchOperator.regex(paths.asJava, queries.asJava) + + /** + * Creates a `SearchOperator` from a `Bson` in situations when there is no builder method that better satisfies your needs. + * This method cannot be used to validate the syntax. + * + * Example
+ * The following code creates two functionally equivalent `SearchOperator`s, + * though they may not be equal. + * {{{ + * val operator1: SearchOperator = SearchOperator.exists( + * SearchPath.fieldPath("fieldName")) + * val operator2: SearchOperator = SearchOperator.of(Document("exists" -> + * Document("path" -> SearchPath.fieldPath("fieldName").toValue))) + * }}} + * + * @param operator A `Bson` representing the required `SearchOperator`. + * + * @return The requested `SearchOperator`. + */ + def of(operator: Bson): SearchOperator = JSearchOperator.of(operator) +} diff --git a/driver-scala/src/main/scala/org/mongodb/scala/model/search/SearchOptions.scala b/driver-scala/src/main/scala/org/mongodb/scala/model/search/SearchOptions.scala new file mode 100644 index 00000000000..5eb61591043 --- /dev/null +++ b/driver-scala/src/main/scala/org/mongodb/scala/model/search/SearchOptions.scala @@ -0,0 +1,36 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.mongodb.scala.model.search + +import com.mongodb.annotations.{ Beta, Reason } +import com.mongodb.client.model.search.{ SearchOptions => JSearchOptions } + +/** + * Represents optional fields of the `\$search` pipeline stage of an aggregation pipeline. + * + * @see [[https://www.mongodb.com/docs/atlas/atlas-search/query-syntax/#-search \$search syntax]] + * @since 4.7 + */ +@Beta(Array(Reason.CLIENT)) +object SearchOptions { + + /** + * Returns `SearchOptions` that represents server defaults. + * + * @return `SearchOptions` that represents server defaults. + */ + def searchOptions(): SearchOptions = JSearchOptions.searchOptions() +} diff --git a/driver-scala/src/main/scala/org/mongodb/scala/model/search/SearchPath.scala b/driver-scala/src/main/scala/org/mongodb/scala/model/search/SearchPath.scala new file mode 100644 index 00000000000..74999deef35 --- /dev/null +++ b/driver-scala/src/main/scala/org/mongodb/scala/model/search/SearchPath.scala @@ -0,0 +1,51 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.mongodb.scala.model.search + +import com.mongodb.annotations.{ Beta, Reason } +import com.mongodb.client.model.search.{ SearchPath => JSearchPath } + +/** + * A specification of fields to be searched. + * + * Depending on the context, one of the following methods may be used to get a representation of a `SearchPath` + * with the correct syntax: `SearchPath.toBsonDocument`, `SearchPath.toBsonValue`, `FieldSearchPath.toValue`. + * + * @see [[https://www.mongodb.com/docs/atlas/atlas-search/path-construction/ Path]] + * @since 4.7 + */ +@Beta(Array(Reason.CLIENT)) +object SearchPath { + + /** + * Returns a `SearchPath` for the given `path`. + * + * @param path The name of the field. Must not contain [[SearchPath.wildcardPath wildcard]] characters. + * @return The requested `SearchPath`. + * @see [[https://www.mongodb.com/docs/manual/core/document/#dot-notation Dot notation]] + */ + def fieldPath(path: String): FieldSearchPath = JSearchPath.fieldPath(path) + + /** + * Returns a `SearchPath` for the given `wildcardPath`. + * + * @param wildcardPath The specification of the fields that contains wildcard (`'*'`) characters. + * Must not contain `'**'`. + * @return The requested `SearchPath`. + * @see [[https://www.mongodb.com/docs/manual/core/document/#dot-notation Dot notation]] + */ + def wildcardPath(wildcardPath: String): WildcardSearchPath = JSearchPath.wildcardPath(wildcardPath) +} diff --git a/driver-scala/src/main/scala/org/mongodb/scala/model/search/SearchScore.scala b/driver-scala/src/main/scala/org/mongodb/scala/model/search/SearchScore.scala new file mode 100644 index 00000000000..35005c05970 --- /dev/null +++ b/driver-scala/src/main/scala/org/mongodb/scala/model/search/SearchScore.scala @@ -0,0 +1,91 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.mongodb.scala.model.search + +import com.mongodb.annotations.{ Beta, Reason } +import com.mongodb.client.model.search.{ SearchScore => JSearchScore } +import org.mongodb.scala.bson.conversions.Bson +import org.mongodb.scala.model.Projections + +/** + * A modifier of the relevance score. + * You may use the `\$meta: "searchScore"` expression, e.g., via [[Projections.metaSearchScore]], + * to extract the relevance score assigned to each found document. + * + * @see [[https://www.mongodb.com/docs/atlas/atlas-search/scoring/ Scoring]] + * @since 4.7 + */ +@Beta(Array(Reason.CLIENT)) +object SearchScore { + + /** + * Returns a `SearchScore` that instructs to multiply the score by the specified `value`. + * + * @param value The positive value to multiply the score by. + * @return The requested `SearchScore`. + * @see [[https://www.mongodb.com/docs/atlas/atlas-search/scoring/#boost boost score modifier]] + */ + def boost(value: Float): ValueBoostSearchScore = JSearchScore.boost(value) + + /** + * Returns a `SearchScore` that instructs to multiply the score by the value of the specified field. + * + * @param path The numeric field whose value to multiply the score by. + * @return The requested `SearchScore`. + * @see [[https://www.mongodb.com/docs/atlas/atlas-search/scoring/#boost boost score modifier]] + * @see [[SearchScoreExpression.pathExpression]] + */ + def boost(path: FieldSearchPath): PathBoostSearchScore = JSearchScore.boost(path) + + /** + * Returns a `SearchScore` that instructs to replace the score with the specified `value`. + * + * @param value The positive value to replace the score with. + * @return The requested `SearchScore`. + * @see [[https://www.mongodb.com/docs/atlas/atlas-search/scoring/#constant constant score modifier]] + * @see [[SearchScoreExpression.constantExpression]] + */ + def constant(value: Float): ConstantSearchScore = JSearchScore.constant(value) + + /** + * Returns a `SearchScore` that instructs to compute the score using the specified `expression`. + * + * @param expression The expression to use when calculating the score. + * @return The requested `SearchScore`. + * @see [[https://www.mongodb.com/docs/atlas/atlas-search/scoring/#function function score modifier]] + */ + def function(expression: SearchScoreExpression): FunctionSearchScore = JSearchScore.function(expression) + + /** + * Creates a `SearchScore` from a `Bson` in situations when there is no builder method that better satisfies your needs. + * This method cannot be used to validate the syntax. + * + * Example
+ * The following code creates two functionally equivalent `SearchScore`s, + * though they may not be equal. + * {{{ + * val score1: SearchScore = SearchScore.boost( + * SearchPath.fieldPath("fieldName")) + * val score2: SearchScore = SearchScore.of(Document("boost" -> + * Document("path" -> SearchPath.fieldPath("fieldName").toValue))) + * }}} + * + * @param score A `Bson` representing the required `SearchScore`. + * + * @return The requested `SearchScore`. + */ + def of(score: Bson): SearchScore = JSearchScore.of(score) +} diff --git a/driver-scala/src/main/scala/org/mongodb/scala/model/search/SearchScoreExpression.scala b/driver-scala/src/main/scala/org/mongodb/scala/model/search/SearchScoreExpression.scala new file mode 100644 index 00000000000..244c07e5847 --- /dev/null +++ b/driver-scala/src/main/scala/org/mongodb/scala/model/search/SearchScoreExpression.scala @@ -0,0 +1,138 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.mongodb.scala.model.search + +import com.mongodb.annotations.{ Beta, Reason } +import com.mongodb.client.model.search.{ SearchScoreExpression => JSearchScoreExpression } +import org.mongodb.scala.bson.conversions.Bson + +import collection.JavaConverters._ + +/** + * @see [[SearchScore.function]] + * @see [[https://www.mongodb.com/docs/atlas/atlas-search/scoring/#expressions Expressions for the function score modifier]] + * @since 4.7 + */ +@Beta(Array(Reason.CLIENT)) +object SearchScoreExpression { + + /** + * Returns a `SearchScoreExpression` that evaluates into the relevance score of a document. + * + * @return The requested `SearchScoreExpression`. + */ + def relevanceExpression(): RelevanceSearchScoreExpression = JSearchScoreExpression.relevanceExpression() + + /** + * Returns a `SearchScoreExpression` that evaluates into the value of the specified field. + * + * @param path The numeric field whose value to use as the result of the expression. + * @return The requested `SearchScoreExpression`. + * @see `SearchScore.boost(FieldSearchPath)` + */ + def pathExpression(path: FieldSearchPath): PathSearchScoreExpression = JSearchScoreExpression.pathExpression(path) + + /** + * Returns a `SearchScoreExpression` that evaluates into the specified `value`. + * + * @param value The value to use as the result of the expression. Unlike [[SearchScore.constant]], does not have constraints. + * @return The requested `SearchScoreExpression`. + * @see [[SearchScore.constant]] + */ + def constantExpression(value: Float): ConstantSearchScoreExpression = JSearchScoreExpression.constantExpression(value) + + /** + * Returns a `SearchScoreExpression` that represents a Gaussian function whose output is within the interval [0, 1]. + * Roughly speaking, the further the value of the `path` expression is from the `origin`, + * the smaller the output of the function. + * + * The `scale` and `decay` are parameters of the Gaussian function, + * they define the rate at which the function decays. + * The input of the Gaussian function is the output of another function: + * max(0, abs(`pathValue` - `origin`) - `offset`), + * where `pathValue` is the value of the `path` expression. + * + * @param origin The point of origin, see `GaussSearchScoreExpression.offset`. + * The value of the Gaussian function is 1 if the value of the `path` expression is `origin`. + * @param path The expression whose value is used to calculate the input of the Gaussian function. + * @param scale The non-zero distance from the points `origin` ± `offset` + * at which the output of the Gaussian function must decay by the factor of `decay`. + * @return The requested `SearchScoreExpression`. + */ + def gaussExpression(origin: Double, path: PathSearchScoreExpression, scale: Double): GaussSearchScoreExpression = + JSearchScoreExpression.gaussExpression(origin, path, scale) + + /** + * Returns a `SearchScoreExpression` that evaluates into log10(`expressionValue`), + * where `expressionValue` is the value of the `expression`. + * + * @param expression The expression whose value is the input of the log10 function. + * @return The requested `SearchScoreExpression`. + */ + def logExpression(expression: SearchScoreExpression): LogSearchScoreExpression = + JSearchScoreExpression.logExpression(expression) + + /** + * Returns a `SearchScoreExpression` that evaluates into log10(`expressionValue` + 1), + * where `expressionValue` is the value of the `expression`. + * + * @param expression The expression whose value is used to calculate the input of the log10 function. + * @return The requested `SearchScoreExpression`. + */ + def log1pExpression(expression: SearchScoreExpression): Log1pSearchScoreExpression = + JSearchScoreExpression.log1pExpression(expression) + + /** + * Returns a `SearchScoreExpression` that evaluates into the sum of the values of the specified `expressions`. + * + * @param expressions The expressions whose values to add. Must contain at least two expressions. + * @return The requested `SearchScoreExpression`. + */ + def addExpression(expressions: Iterable[_ <: SearchScoreExpression]): AddSearchScoreExpression = + JSearchScoreExpression.addExpression(expressions.asJava) + + /** + * Returns a `SearchScoreExpression` that evaluates into the product of the values of the specified `expressions`. + * + * @param expressions The expressions whose values to multiply. Must contain at least two expressions. + * @return The requested `SearchScoreExpression`. + */ + def multiplyExpression(expressions: Iterable[_ <: SearchScoreExpression]): MultiplySearchScoreExpression = + JSearchScoreExpression.multiplyExpression(expressions.asJava) + + /** + * Creates a `SearchScoreExpression` from a `Bson` in situations when there is no builder method + * that better satisfies your needs. + * This method cannot be used to validate the syntax. + * + * Example
+ * The following code creates two functionally equivalent `SearchScoreExpression`s, + * though they may not be equal. + * {{{ + * val expression1: SearchScoreExpression = SearchScoreExpression.pathExpression( + * SearchPath.fieldPath("fieldName")) + * .undefined(-1.5f) + * val expression2: SearchScoreExpression = SearchScoreExpression.of(Document("path" -> + * Document("value" -> SearchPath.fieldPath("fieldName").toValue, + * "undefined" -> -1.5))) + * }}} + * + * @param expression A `Bson` representing the required `SearchScoreExpression`. + * + * @return The requested `SearchScoreExpression`. + */ + def of(expression: Bson): SearchScoreExpression = JSearchScoreExpression.of(expression) +} diff --git a/driver-scala/src/main/scala/org/mongodb/scala/model/search/VectorSearchOptions.scala b/driver-scala/src/main/scala/org/mongodb/scala/model/search/VectorSearchOptions.scala new file mode 100644 index 00000000000..6911ec0f653 --- /dev/null +++ b/driver-scala/src/main/scala/org/mongodb/scala/model/search/VectorSearchOptions.scala @@ -0,0 +1,45 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.mongodb.scala.model.search + +import com.mongodb.annotations.{ Beta, Reason } +import com.mongodb.client.model.search.{ VectorSearchOptions => JVectorSearchOptions } + +/** + * Represents optional fields of the `\$vectorSearch` pipeline stage of an aggregation pipeline. + * + * @see [[https://www.mongodb.com/docs/atlas/atlas-vector-search/vector-search-stage/ \$vectorSearch]] + * @note Requires MongoDB 6.0.11, or greater + * @since 4.11 + */ +object VectorSearchOptions { + + /** + * Returns `ApproximateVectorSearchOptions` that represents server defaults. + * + * @return `ApproximateVectorSearchOptions` that represents server defaults. + */ + def approximateVectorSearchOptions(numCandidates: Long): ApproximateVectorSearchOptions = + JVectorSearchOptions.approximateVectorSearchOptions(numCandidates) + + /** + * Returns `ExactVectorSearchOptions` that represents server defaults with the `exact` option set to true. + * + * @return `ExactVectorSearchOptions` that represents server defaults. + * @since 5.2 + */ + def exactVectorSearchOptions(): ExactVectorSearchOptions = JVectorSearchOptions.exactVectorSearchOptions() +} diff --git a/driver-scala/src/main/scala/org/mongodb/scala/model/search/package.scala b/driver-scala/src/main/scala/org/mongodb/scala/model/search/package.scala new file mode 100644 index 00000000000..771e800801d --- /dev/null +++ b/driver-scala/src/main/scala/org/mongodb/scala/model/search/package.scala @@ -0,0 +1,507 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.mongodb.scala.model + +import com.mongodb.annotations.{ Beta, Reason, Sealed } + +/** + * Query building API for MongoDB Atlas full-text search. + * + * While all the building blocks of this API, such as + * `SearchOptions`, `SearchHighlight`, etc., + * are not necessary immutable, they are unmodifiable due to methods like + * `SearchHighlight.maxCharsToExamine` returning new instances instead of modifying the instance + * on which they are called. This allows storing and using such instances as templates. + * + * @see `Aggregates.search` + * @see `Aggregates.vectorSearch` + * @see [[https://www.mongodb.com/docs/atlas/atlas-search/ Atlas Search]] + * @see [[https://www.mongodb.com/docs/atlas/atlas-search/query-syntax/ Atlas Search aggregation pipeline stages]] + * @since 4.7 + */ +package object search { + + /** + * The core part of the `\$search` pipeline stage of an aggregation pipeline. + * + * @see [[https://www.mongodb.com/docs/atlas/atlas-search/operators-and-collectors/#operators Search operators]] + */ + @Sealed + @Beta(Array(Reason.CLIENT)) + type SearchOperator = com.mongodb.client.model.search.SearchOperator + + /** + * A base for a [[CompoundSearchOperator]] which allows creating instances of this operator. + * This interface is a technicality and does not represent a meaningful element of the full-text search query syntax. + * + * @see `SearchOperator.compound()` + */ + @Sealed + @Beta(Array(Reason.CLIENT)) + type CompoundSearchOperatorBase = com.mongodb.client.model.search.CompoundSearchOperatorBase + + /** + * @see `SearchOperator.compound()` + */ + @Sealed + @Beta(Array(Reason.CLIENT)) + type CompoundSearchOperator = com.mongodb.client.model.search.CompoundSearchOperator + + /** + * A representation of a [[CompoundSearchOperator]] that allows changing + * `must`-specific options, if any. + * This interface is a technicality and does not represent a meaningful element of the full-text search query syntax. + * + * @see `CompoundSearchOperatorBase.must(Iterable)` + */ + @Sealed + @Beta(Array(Reason.CLIENT)) + type MustCompoundSearchOperator = com.mongodb.client.model.search.MustCompoundSearchOperator + + /** + * A representation of a [[CompoundSearchOperator]] that allows changing + * `mustNot`-specific options, if any. + * This interface is a technicality and does not represent a meaningful element of the full-text search query syntax. + * + * @see `CompoundSearchOperatorBase.mustNot(Iterable)` + */ + @Sealed + @Beta(Array(Reason.CLIENT)) + type MustNotCompoundSearchOperator = com.mongodb.client.model.search.MustNotCompoundSearchOperator + + /** + * A representation of a [[CompoundSearchOperator]] that allows changing + * `should`-specific options, if any. + * This interface is a technicality and does not represent a meaningful element of the full-text search query syntax. + * + * @see `CompoundSearchOperatorBase.should(Iterable)` + */ + @Sealed + @Beta(Array(Reason.CLIENT)) + type ShouldCompoundSearchOperator = com.mongodb.client.model.search.ShouldCompoundSearchOperator + + /** + * A representation of a [[CompoundSearchOperator]] that allows changing + * `filter`-specific options, if any. + * This interface is a technicality and does not represent a meaningful element of the full-text search query syntax. + * + * @see `CompoundSearchOperatorBase.filter(Iterable)` + */ + @Sealed + @Beta(Array(Reason.CLIENT)) + type FilterCompoundSearchOperator = com.mongodb.client.model.search.FilterCompoundSearchOperator + + /** + * @see `SearchOperator.exists(FieldSearchPath)` + */ + @Sealed + @Beta(Array(Reason.CLIENT)) + type ExistsSearchOperator = com.mongodb.client.model.search.ExistsSearchOperator + + /** + * @see `SearchOperator.text(String, SearchPath)` + * @see `SearchOperator.text(Iterable, Iterable)` + */ + @Sealed + @Beta(Array(Reason.CLIENT)) + type TextSearchOperator = com.mongodb.client.model.search.TextSearchOperator + + /** + * @see `SearchOperator.phrase(String, SearchPath)` + * @see `SearchOperator.phrase(Iterable, Iterable)` + */ + @Sealed + @Beta(Array(Reason.CLIENT)) + type PhraseSearchOperator = com.mongodb.client.model.search.PhraseSearchOperator + + /** + * @see `SearchOperator.autocomplete(String, FieldSearchPath)` + * @see `SearchOperator.autocomplete(Iterable, FieldSearchPath)` + */ + @Sealed + @Beta(Array(Reason.CLIENT)) + type AutocompleteSearchOperator = com.mongodb.client.model.search.AutocompleteSearchOperator + + /** + * @see `SearchOperator.regex(String, SearchPath)` + * @see `SearchOperator.regex(Iterable, Iterable)` + */ + @Sealed + @Beta(Array(Reason.CLIENT)) + type RegexSearchOperator = com.mongodb.client.model.search.RegexSearchOperator + + /** + * A base for a [[NumberRangeSearchOperatorBase]] which allows creating instances of this operator. + * This interface is a technicality and does not represent a meaningful element of the full-text search query syntax. + * + * @see `SearchOperator.numberRange` + */ + @Sealed + @Beta(Array(Reason.CLIENT)) + type NumberRangeSearchOperatorBase = com.mongodb.client.model.search.NumberRangeSearchOperatorBase + + /** + * A base for a [[DateRangeSearchOperatorBase]] which allows creating instances of this operator. + * This interface is a technicality and does not represent a meaningful element of the full-text search query syntax. + * + * @see `SearchOperator.dateRange` + */ + @Sealed + @Beta(Array(Reason.CLIENT)) + type DateRangeSearchOperatorBase = com.mongodb.client.model.search.DateRangeSearchOperatorBase + + /** + * @see `SearchOperator.numberRange` + */ + @Sealed + @Beta(Array(Reason.CLIENT)) + type NumberRangeSearchOperator = com.mongodb.client.model.search.NumberRangeSearchOperator + + /** + * @see `SearchOperator.dateRange` + */ + @Sealed + @Beta(Array(Reason.CLIENT)) + type DateRangeSearchOperator = com.mongodb.client.model.search.DateRangeSearchOperator + + /** + * @see `SearchOperator.near` + */ + @Sealed + @Beta(Array(Reason.CLIENT)) + type NumberNearSearchOperator = com.mongodb.client.model.search.NumberNearSearchOperator + + /** + * @see `SearchOperator.near` + */ + @Sealed + @Beta(Array(Reason.CLIENT)) + type DateNearSearchOperator = com.mongodb.client.model.search.DateNearSearchOperator + + /** + * @see `SearchOperator.near` + */ + @Sealed + @Beta(Array(Reason.CLIENT)) + type GeoNearSearchOperator = com.mongodb.client.model.search.GeoNearSearchOperator + + /** + * @see `SearchOperator.in` + */ + @Sealed + @Beta(Array(Reason.CLIENT)) + type InSearchOperator = com.mongodb.client.model.search.InSearchOperator + + /** + * @see `SearchOperator.equals` + */ + @Sealed + @Beta(Array(Reason.CLIENT)) + type EqualsSearchOperator = com.mongodb.client.model.search.EqualsSearchOperator + + /** + * @see `SearchOperator.moreLikeThis` + */ + @Sealed + @Beta(Array(Reason.CLIENT)) + type MoreLikeThisSearchOperator = com.mongodb.client.model.search.MoreLikeThisSearchOperator + + /** + * @see `SearchOperator.wildcard(String, SearchPath)` + * @see `SearchOperator.wildcard(Iterable, Iterable)` + */ + @Sealed + @Beta(Array(Reason.CLIENT)) + type WildcardSearchOperator = com.mongodb.client.model.search.WildcardSearchOperator + + /** + * @see `SearchOperator.queryString` + */ + @Sealed + @Beta(Array(Reason.CLIENT)) + type QueryStringSearchOperator = com.mongodb.client.model.search.QueryStringSearchOperator + + /** + * Fuzzy search options that may be used with some [[SearchOperator]]s. + * + * @see [[https://www.mongodb.com/docs/atlas/atlas-search/autocomplete/ autocomplete operator]] + * @see [[https://www.mongodb.com/docs/atlas/atlas-search/text/ text operator]] + */ + @Sealed + @Beta(Array(Reason.CLIENT)) + type FuzzySearchOptions = com.mongodb.client.model.search.FuzzySearchOptions + + /** + * The core part of the `\$search` pipeline stage of an aggregation pipeline. + * [[SearchCollector]]s allow returning metadata together with the matching search results. + * You may use the `$$SEARCH_META` variable, e.g., via [[Projections.computedSearchMeta]], to extract this metadata. + * + * @see [[https://www.mongodb.com/docs/atlas/atlas-search/operators-and-collectors/#collectors Search collectors]] + */ + @Sealed + @Beta(Array(Reason.CLIENT)) + type SearchCollector = com.mongodb.client.model.search.SearchCollector + + /** + * @see `SearchCollector.facet(SearchOperator, Iterable)` + */ + @Sealed + @Beta(Array(Reason.CLIENT, Reason.SERVER)) + type FacetSearchCollector = com.mongodb.client.model.search.FacetSearchCollector + + /** + * Represents optional fields of the `\$search` pipeline stage of an aggregation pipeline. + * + * @see [[https://www.mongodb.com/docs/atlas/atlas-search/query-syntax/#-search \$search syntax]] + */ + @Sealed + @Beta(Array(Reason.CLIENT)) + type SearchOptions = com.mongodb.client.model.search.SearchOptions + + /** + * Represents optional fields of the `\$vectorSearch` pipeline stage of an aggregation pipeline. + * + * @see [[https://www.mongodb.com/docs/atlas/atlas-vector-search/vector-search-stage/ \$vectorSearch]] + * @note Requires MongoDB 6.0.11 or greater + * @since 4.11 + */ + @Sealed + @Beta(Array(Reason.SERVER)) + type VectorSearchOptions = com.mongodb.client.model.search.VectorSearchOptions + + /** + * Represents optional fields of the `\$vectorSearch` pipeline stage of an aggregation pipeline. + *

+ * Configures approximate vector search for Atlas Vector Search to enable searches that may not return the exact closest vectors. + * + * @see [[https://www.mongodb.com/docs/atlas/atlas-vector-search/vector-search-stage/ \$vectorSearch]] + * @note Requires MongoDB 6.0.11, 7.0.2 or greater + * @since 5.2 + */ + @Sealed + @Beta(Array(Reason.SERVER)) + type ApproximateVectorSearchOptions = com.mongodb.client.model.search.ApproximateVectorSearchOptions + + /** + * Represents optional fields of the `\$vectorSearch` pipeline stage of an aggregation pipeline. + *

+ * Configures exact vector search for Atlas Vector Search to enable precise matching, ensuring that + * results are the closest vectors to a given query vector. + * + * @see [[https://www.mongodb.com/docs/atlas/atlas-vector-search/vector-search-stage/ \$vectorSearch]] + * @note Requires MongoDB 6.0.16, 7.0.10, 7.3.2 or greater + * @since 5.2 + */ + @Sealed + @Beta(Array(Reason.SERVER)) + type ExactVectorSearchOptions = com.mongodb.client.model.search.ExactVectorSearchOptions + + /** + * Highlighting options. + * You may use the `\$meta: "searchHighlights"` expression, e.g., via [[Projections.metaSearchHighlights]], + * to extract the results of highlighting. + * + * @see [[https://www.mongodb.com/docs/atlas/atlas-search/highlighting/ Highlighting]] + */ + @Sealed + @Beta(Array(Reason.CLIENT)) + type SearchHighlight = com.mongodb.client.model.search.SearchHighlight + + /** + * Counting options. + * You may use the `$$SEARCH_META` variable, e.g., via [[Projections.computedSearchMeta]], + * to extract the results of counting. + * You may use [[Projections.computedSearchMeta]] to extract the count results. + * + * @see [[https://www.mongodb.com/docs/atlas/atlas-search/counting/ Counting]] + */ + @Sealed + @Beta(Array(Reason.CLIENT, Reason.SERVER)) + type SearchCount = com.mongodb.client.model.search.SearchCount + + /** + * @see `SearchCount.total()` + */ + @Sealed + @Beta(Array(Reason.CLIENT, Reason.SERVER)) + type TotalSearchCount = com.mongodb.client.model.search.TotalSearchCount + + /** + * @see `SearchCount.lowerBound()` + */ + @Sealed + @Beta(Array(Reason.CLIENT, Reason.SERVER)) + type LowerBoundSearchCount = com.mongodb.client.model.search.LowerBoundSearchCount + + /** + * A facet definition for [[FacetSearchCollector]]. + * + * @see [[https://www.mongodb.com/docs/atlas/atlas-search/facet/#facet-definition Facet definition]] + */ + @Sealed + @Beta(Array(Reason.CLIENT, Reason.SERVER)) + type SearchFacet = com.mongodb.client.model.search.SearchFacet + + /** + * @see `SearchFacet.stringFacet(String, FieldSearchPath)` + */ + @Sealed + @Beta(Array(Reason.CLIENT, Reason.SERVER)) + type StringSearchFacet = com.mongodb.client.model.search.StringSearchFacet + + /** + * @see `SearchFacet.numberFacet(String, FieldSearchPath, Iterable)` + */ + @Sealed + @Beta(Array(Reason.CLIENT, Reason.SERVER)) + type NumberSearchFacet = com.mongodb.client.model.search.NumberSearchFacet + + /** + * @see `SearchFacet.dateFacet(String, FieldSearchPath, Iterable)` + */ + @Sealed + @Beta(Array(Reason.CLIENT, Reason.SERVER)) + type DateSearchFacet = com.mongodb.client.model.search.DateSearchFacet + + /** + * A specification of fields to be searched. + * + * Despite `SearchPath` being `Bson`, + * its value conforming to the correct syntax must be obtained via either `SearchPath.toBsonValue` or `FieldSearchPath.toValue`. + * + * @see [[https://www.mongodb.com/docs/atlas/atlas-search/path-construction/ Path]] + */ + @Sealed + @Beta(Array(Reason.CLIENT)) + type SearchPath = com.mongodb.client.model.search.SearchPath + + /** + * @see `SearchPath.fieldPath(String)` + */ + @Sealed + @Beta(Array(Reason.CLIENT)) + type FieldSearchPath = com.mongodb.client.model.search.FieldSearchPath + + /** + * @see `SearchPath.wildcardPath(String)` + */ + @Sealed + @Beta(Array(Reason.CLIENT)) + type WildcardSearchPath = com.mongodb.client.model.search.WildcardSearchPath + + /** + * A modifier of the relevance score. + * You may use the `\$meta: "searchScore"` expression, e.g., via [[Projections.metaSearchScore]], + * to extract the relevance score assigned to each found document. + * + * @see [[https://www.mongodb.com/docs/atlas/atlas-search/scoring/ Scoring]] + */ + @Sealed + @Beta(Array(Reason.CLIENT)) + type SearchScore = com.mongodb.client.model.search.SearchScore + + /** + * @see `SearchScore.boost(float)` + */ + @Sealed + @Beta(Array(Reason.CLIENT)) + type ValueBoostSearchScore = com.mongodb.client.model.search.ValueBoostSearchScore + + /** + * @see `SearchScore.boost(FieldSearchPath)` + */ + @Sealed + @Beta(Array(Reason.CLIENT)) + type PathBoostSearchScore = com.mongodb.client.model.search.PathBoostSearchScore + + /** + * @see `SearchScore.constant` + */ + @Sealed + @Beta(Array(Reason.CLIENT)) + type ConstantSearchScore = com.mongodb.client.model.search.ConstantSearchScore + + /** + * @see `SearchScore.function` + */ + @Sealed + @Beta(Array(Reason.CLIENT)) + type FunctionSearchScore = com.mongodb.client.model.search.FunctionSearchScore + + /** + * @see `SearchScore.function` + * @see [[https://www.mongodb.com/docs/atlas/atlas-search/scoring/#expressions Expressions for the function score modifier]] + */ + @Sealed + @Beta(Array(Reason.CLIENT)) + type SearchScoreExpression = com.mongodb.client.model.search.SearchScoreExpression + + /** + * @see `SearchScoreExpression.relevanceExpression` + */ + @Sealed + @Beta(Array(Reason.CLIENT)) + type RelevanceSearchScoreExpression = com.mongodb.client.model.search.RelevanceSearchScoreExpression + + /** + * @see `SearchScoreExpression.pathExpression` + */ + @Sealed + @Beta(Array(Reason.CLIENT)) + type PathSearchScoreExpression = com.mongodb.client.model.search.PathSearchScoreExpression + + /** + * @see `SearchScoreExpression.constantExpression` + */ + @Sealed + @Beta(Array(Reason.CLIENT)) + type ConstantSearchScoreExpression = com.mongodb.client.model.search.ConstantSearchScoreExpression + + /** + * @see `SearchScoreExpression.gaussExpression` + */ + @Sealed + @Beta(Array(Reason.CLIENT)) + type GaussSearchScoreExpression = com.mongodb.client.model.search.GaussSearchScoreExpression + + /** + * @see `SearchScoreExpression.log` + */ + @Sealed + @Beta(Array(Reason.CLIENT)) + type LogSearchScoreExpression = com.mongodb.client.model.search.LogSearchScoreExpression + + /** + * @see `SearchScoreExpression.log1p` + */ + @Sealed + @Beta(Array(Reason.CLIENT)) + type Log1pSearchScoreExpression = com.mongodb.client.model.search.Log1pSearchScoreExpression + + /** + * @see `SearchScoreExpression.addExpression` + */ + @Sealed + @Beta(Array(Reason.CLIENT)) + type AddSearchScoreExpression = com.mongodb.client.model.search.AddSearchScoreExpression + + /** + * @see `SearchScoreExpression.multiplyExpression` + */ + @Sealed + @Beta(Array(Reason.CLIENT)) + type MultiplySearchScoreExpression = com.mongodb.client.model.search.MultiplySearchScoreExpression +} diff --git a/driver-scala/src/main/scala/org/mongodb/scala/model/vault/package.scala b/driver-scala/src/main/scala/org/mongodb/scala/model/vault/package.scala new file mode 100644 index 00000000000..34cdf93ce69 --- /dev/null +++ b/driver-scala/src/main/scala/org/mongodb/scala/model/vault/package.scala @@ -0,0 +1,93 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala.model + +import com.mongodb.client.model.vault.{ DataKeyOptions => JDataKeyOptions } +import com.mongodb.client.model.vault.{ EncryptOptions => JEncryptOptions } +import com.mongodb.client.model.vault.{ RangeOptions => JRangeOptions } +import com.mongodb.client.model.vault.{ RewrapManyDataKeyResult => JRewrapManyDataKeyResult } +import com.mongodb.client.model.vault.{ RewrapManyDataKeyOptions => JRewrapManyDataKeyOptions } + +/** + * This package contains options classes for the key vault API + * + * @since 2.7 + */ +package object vault { + + /** + * The options for creating a data key. + */ + type DataKeyOptions = JDataKeyOptions + + object DataKeyOptions { + def apply(): DataKeyOptions = new JDataKeyOptions() + } + + /** + * The options for explicit encryption. + */ + type EncryptOptions = JEncryptOptions + + /** + * The options for explicit encryption. + */ + object EncryptOptions { + + /** + * Construct an instance with the given algorithm. + * + * @param algorithm the encryption algorithm + */ + def apply(algorithm: String): EncryptOptions = new JEncryptOptions(algorithm) + } + + /** + * Range options specifies index options for a Queryable Encryption field supporting "range" queries. + * @since 4.9 + */ + type RangeOptions = JRangeOptions + + object RangeOptions { + def apply(): RangeOptions = new JRangeOptions() + } + + /** + * The result of the rewrapping of data keys + * + * @since 5.6 + */ + type RewrapManyDataKeyResult = JRewrapManyDataKeyResult + + /** + * The result of the rewrapping of data keys + * + * @since 5.6 + */ + type RewrapManyDataKeyOptions = JRewrapManyDataKeyOptions + + /** + * The rewrap many data key options + * + * The `getMasterKey` document MUST have the fields corresponding to the given provider as specified in masterKey. + * + * @since 5.6 + */ + object RewrapManyDataKeyOptions { + def apply() = new JRewrapManyDataKeyOptions() + } +} diff --git a/driver-scala/src/main/scala/org/mongodb/scala/package.scala b/driver-scala/src/main/scala/org/mongodb/scala/package.scala new file mode 100644 index 00000000000..1cdc2d0a564 --- /dev/null +++ b/driver-scala/src/main/scala/org/mongodb/scala/package.scala @@ -0,0 +1,474 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb + +import com.mongodb.annotations.{ Beta, Reason } +import org.bson.BsonDocumentReader +import org.bson.codecs.{ DecoderContext, DocumentCodec } +import org.mongodb.scala.bson.BsonDocument +import org.mongodb.scala.internal.WriteConcernImplicits + +import _root_.scala.language.implicitConversions +import _root_.scala.reflect.ClassTag + +/** + * The MongoDB Scala Driver package + * + * Contains type aliases and companion objects to help when using the Scala API + * + * @since 1.0 + */ +package object scala extends ClientSessionImplicits with ObservableImplicits with WriteConcernImplicits { + + /** + * An immutable Document implementation. + * + * A strictly typed `Map[String, BsonValue]` like structure that traverses the elements in insertion order. Unlike native scala maps there + * is no variance in the value type and it always has to be a `BsonValue`. + */ + type Document = bson.Document + + /** + * An immutable Document implementation. + * + * A strictly typed `Map[String, BsonValue]` like structure that traverses the elements in insertion order. Unlike native scala maps there + * is no variance in the value type and it always has to be a `BsonValue`. + */ + val Document = bson.Document + + /** + * The Connection String + */ + type ConnectionString = com.mongodb.ConnectionString + + /** + * Connection String companion object + */ + object ConnectionString { + def apply(connectionString: String): ConnectionString = new com.mongodb.ConnectionString(connectionString) + } + + /** + * The result of a successful bulk write operation. + */ + type BulkWriteResult = com.mongodb.bulk.BulkWriteResult + + /** + * Represents the commit quorum specifies how many data-bearing members of a replica set, including the primary, must + * complete the index builds successfully before the primary marks the indexes as ready. + * + * @since 4.1 + */ + type CreateIndexCommitQuorum = com.mongodb.CreateIndexCommitQuorum + + /** + * A MongoDB namespace, which includes a database name and collection name. + */ + type MongoNamespace = com.mongodb.MongoNamespace + + /** + * The readConcern option allows clients to choose a level of isolation for their reads. + * + * @see [[ReadConcern]] + * @since 1.1 + */ + type ReadConcernLevel = com.mongodb.ReadConcernLevel + + /** + * Represents preferred replica set members to which a query or command can be sent. + */ + type ReadPreference = com.mongodb.ReadPreference + + /** + * Represents ReadPreferences that can be combined with tags + */ + type TaggableReadPreference = com.mongodb.TaggableReadPreference + + /** + * A replica set tag + */ + type Tag = com.mongodb.Tag + + /** + * An immutable set of tags, used to select members of a replica set to use for read operations. + */ + type TagSet = com.mongodb.TagSet + + /** + * The timeout mode for a cursor + * + * For operations that create cursors, `timeoutMS` can either cap the lifetime of the cursor or be applied separately to the + * original operation and all next calls. + * + * @since 5.2 + */ + type TimeoutMode = com.mongodb.client.cursor.TimeoutMode + + /** + * Controls the acknowledgment of write operations with various options. + */ + type WriteConcern = com.mongodb.WriteConcern + + /** + * Controls the level of isolation for reads. + * + * @since 1.1 + */ + type ReadConcern = com.mongodb.ReadConcern + + /** + * The result of a successful write operation. If the write was unacknowledged, then `wasAcknowledged` will return false and all + * other methods with throw `MongoUnacknowledgedWriteException`. + * + * @see [[WriteConcern]] + */ + type WriteConcernResult = com.mongodb.WriteConcernResult + + /** + * Represents the details of a write error , e.g. a duplicate key error + */ + type WriteError = com.mongodb.WriteError + + /** + * Represents credentials to authenticate to a MongoDB server,as well as the source of the credentials and the authentication mechanism to + * use. + */ + type MongoCredential = com.mongodb.MongoCredential + + /** + * Represents the location of a MongoDB server + */ + type ServerAddress = com.mongodb.ServerAddress + + /** + * The MongoDriverInformation class allows driver and library authors to add extra information about their library. This information is + * then available in the MongoD/MongoS logs. + * + * The following metadata can be included when creating a `MongoClient`. + * + * - The driver name. Eg: `mongo-scala-driver` + * - The driver version. Eg: `1.2.0` + * - Extra platform information. Eg: `Scala 2.11` + * + * '''Note:''' Library authors are responsible for accepting `MongoDriverInformation` from external libraries using their library. + * Also all the meta data is limited to 512 bytes and any excess data will be truncated. + * + * @since 1.2 + * @note Requires MongoDB 3.4 or greater + */ + type MongoDriverInformation = com.mongodb.MongoDriverInformation + + /** + * Various settings to control the behavior of a `MongoClient`. + */ + type MongoClientSettings = com.mongodb.MongoClientSettings + + /** + * A Client Session + * + * @since 2.4 + */ + type ClientSession = com.mongodb.reactivestreams.client.ClientSession + + /** + * Options for creating ClientSessions + * @since 2.2 + */ + type ClientSessionOptions = com.mongodb.ClientSessionOptions + + /** + * Options for transactions + * @since 2.4 + */ + type TransactionOptions = com.mongodb.TransactionOptions + + /** + * Options for creating MongoCompressor + * @since 2.2 + */ + type MongoCompressor = com.mongodb.MongoCompressor + + // MongoException Aliases + /** + * Top level Exception for all Exceptions, server-side or client-side, that come from the driver. + */ + type MongoException = com.mongodb.MongoException + + /** + * Top level Exception for all Exceptions, server-side or client-side, that come from the driver. + */ + object MongoException { + + /** + * An error label indicating that the exception can be treated as a transient transaction error. + * + * @since 2.4 + */ + val TRANSIENT_TRANSACTION_ERROR_LABEL: String = com.mongodb.MongoException.TRANSIENT_TRANSACTION_ERROR_LABEL + + /** + * An error label indicating that the exception can be treated as an unknown transaction commit result. + * + * @since 2.4 + */ + val UNKNOWN_TRANSACTION_COMMIT_RESULT_LABEL: String = + com.mongodb.MongoException.UNKNOWN_TRANSACTION_COMMIT_RESULT_LABEL + } + + /** + * An exception that represents all errors associated with a bulk write operation. + */ + type MongoBulkWriteException = com.mongodb.MongoBulkWriteException + + /** + * The result of an unsuccessful or partially unsuccessful client-level bulk write operation. + * + * @since 5.4 + */ + type ClientBulkWriteException = com.mongodb.ClientBulkWriteException + + /** + * An exception indicating that a failure occurred when running a `\$changeStream`. + * @since 2.2 + */ + type MongoChangeStreamException = com.mongodb.MongoChangeStreamException + + /** + * A base class for exceptions indicating a failure condition with the MongoClient. + */ + type MongoClientException = com.mongodb.MongoClientException + + /** + * An exception indicating that a command sent to a MongoDB server returned a failure. + */ + type MongoCommandException = com.mongodb.MongoCommandException + + /** + * Subclass of [[MongoException]] representing a cursor-not-found exception. + */ + type MongoCursorNotFoundException = com.mongodb.MongoCursorNotFoundException + + /** + * Subclass of [[MongoClientException]] representing a server-unavailable exception. + */ + type MongoServerUnavailableException = com.mongodb.MongoServerUnavailableException + + /** + * Exception indicating that the execution of the current operation timed out as a result of the maximum operation time being exceeded. + */ + type MongoExecutionTimeoutException = com.mongodb.MongoExecutionTimeoutException + + /** + * An exception indicating that this version of the driver is not compatible with at least one of the servers that it is currently + * connected to. + */ + type MongoIncompatibleDriverException = com.mongodb.MongoIncompatibleDriverException + + /** + * A Mongo exception internal to the driver, not carrying any error code. + */ + type MongoInternalException = com.mongodb.MongoInternalException + + /** + * A non-checked exception indicating that the driver has been interrupted by a call to `Thread.interrupt`. + */ + type MongoInterruptedException = com.mongodb.MongoInterruptedException + + /** + * An exception indicating that the server is a member of a replica set but is in recovery mode, and therefore refused to execute + * the operation. This can happen when a server is starting up and trying to join the replica set. + */ + type MongoNodeIsRecoveringException = com.mongodb.MongoNodeIsRecoveringException + + /** + * Exception thrown when a replica set primary is identified as a stale primary during Server Discovery and Monitoring (SDAM). + * This occurs when a new primary is discovered, causing the previously known primary to be marked stale, typically during network + * partitions or elections. + * + * @since 5.6 + */ + type MongoStalePrimaryException = com.mongodb.MongoStalePrimaryException + + /** + * An exception indicating that the server is a member of a replica set but is not the primary, and therefore refused to execute either a + * write operation or a read operation that required a primary. This can happen during a replica set election. + */ + type MongoNotPrimaryException = com.mongodb.MongoNotPrimaryException + + /** + * An exception indicating that a query operation failed on the server. + */ + type MongoQueryException = com.mongodb.MongoQueryException + + /** + * This exception is thrown when there is an error reported by the underlying client authentication mechanism. + */ + type MongoSecurityException = com.mongodb.MongoSecurityException + + /** + * An exception indicating that some error has been raised by a MongoDB server in response to an operation. + */ + type MongoServerException = com.mongodb.MongoServerException + + /** + * This exception is thrown when trying to read or write from a closed socket. + */ + type MongoSocketClosedException = com.mongodb.MongoSocketClosedException + + /** + * Subclass of [[MongoException]] representing a network-related exception + */ + type MongoSocketException = com.mongodb.MongoSocketException + + /** + * This exception is thrown when there is an exception opening a Socket. + */ + type MongoSocketOpenException = com.mongodb.MongoSocketOpenException + + /** + * This exception is thrown when there is an exception reading a response from a Socket. + */ + type MongoSocketReadException = com.mongodb.MongoSocketReadException + + /** + * This exception is thrown when there is a timeout reading a response from the socket. + */ + type MongoSocketReadTimeoutException = com.mongodb.MongoSocketReadTimeoutException + + /** + * This exception is thrown when there is a timeout writing to a socket. + */ + type MongoSocketWriteTimeoutException = com.mongodb.MongoSocketWriteTimeoutException + + /** + * This exception is thrown when there is an exception writing a response to a Socket. + */ + type MongoSocketWriteException = com.mongodb.MongoSocketWriteException + + /** + * An exception indicating that the driver has timed out waiting for either a server or a connection to become available. + */ + type MongoTimeoutException = com.mongodb.MongoTimeoutException + + /** + * Exception thrown to indicate that a MongoDB operation has exceeded the specified timeout for + * the full execution of operation. + * + *

The [[MongoOperationTimeoutException]] might provide information about the underlying + * cause of the timeout, if available. For example, if retries are attempted due to transient failures, + * and a timeout occurs in any of the attempts, the exception from one of the retries may be appended + * as the cause to this [[MongoOperationTimeoutException]]. + + @since 5.0 + */ + type MongoOperationTimeoutException = com.mongodb.MongoOperationTimeoutException + + /** + * An exception indicating a failure to apply the write concern to the requested write operation + * + * @see [[WriteConcern]] + */ + type MongoWriteConcernException = com.mongodb.MongoWriteConcernException + + /** + * An exception indicating the failure of a write operation. + */ + type MongoWriteException = com.mongodb.MongoWriteException + + /** + * An exception representing an error reported due to a write failure. + */ + type WriteConcernException = com.mongodb.WriteConcernException + + /** + * Subclass of [[WriteConcernException]] representing a duplicate key exception + */ + type DuplicateKeyException = com.mongodb.DuplicateKeyException + + /** + * An exception that may happen usually as a result of another thread clearing a connection pool. + * Such clearing usually itself happens as a result of an exception. + */ + type MongoConnectionPoolClearedException = com.mongodb.MongoConnectionPoolClearedException + + /** + * An exception thrown by methods that may automatically create data encryption keys + * where needed based on the `encryptedFields` configuration. + * + * @since 4.9 + */ + @Beta(Array(Reason.SERVER)) + type MongoUpdatedEncryptedFieldsException = com.mongodb.MongoUpdatedEncryptedFieldsException + + /** + * The client-side automatic encryption settings. In-use encryption enables an application to specify what fields in a collection + * must be encrypted, and the driver automatically encrypts commands sent to MongoDB and decrypts responses. + * + * Automatic encryption is an enterprise only feature that only applies to operations on a collection. Automatic encryption is not + * supported for operations on a database or view and will result in error. To bypass automatic encryption, + * set bypassAutoEncryption=true in `AutoEncryptionSettings`. + * + * Explicit encryption/decryption and automatic decryption is a community feature, enabled with the new + * `com.mongodb.client.vault.ClientEncryption` type. + * + * A MongoClient configured with bypassAutoEncryption=true will still automatically decrypt. + * + * If automatic encryption fails on an operation, use a MongoClient configured with bypassAutoEncryption=true and use + * ClientEncryption#encrypt to manually encrypt values. + * + * Enabling client side encryption reduces the maximum document and message size (using a maxBsonObjectSize of 2MiB and + * maxMessageSizeBytes of 6MB) and may have a negative performance impact. + * + * Automatic encryption requires the authenticated user to have the listCollections privilege action. + * + * Supplying an `encryptedFieldsMap` provides more security than relying on an encryptedFields obtained from the server. + * It protects against a malicious server advertising false encryptedFields. + * + * @since 2.7 + */ + type AutoEncryptionSettings = com.mongodb.AutoEncryptionSettings + + /** + * The client-side settings for data key creation and explicit encryption. + * + * Explicit encryption/decryption is a community feature, enabled with the new `com.mongodb.client.vault.ClientEncryption` type, + * for which this is the settings. + * + * @since 2.7 + */ + type ClientEncryptionSettings = com.mongodb.ClientEncryptionSettings + + /** + * Helper to get the class from a classTag + * + * @param ct the classTag we want to implicitly get the class of + * @tparam C the class type + * @return the classOf[C] + */ + implicit def classTagToClassOf[C](ct: ClassTag[C]): Class[C] = ct.runtimeClass.asInstanceOf[Class[C]] + + implicit def bsonDocumentToDocument(doc: BsonDocument): Document = new Document(doc) + + implicit def documentToUntypedDocument(doc: Document): org.bson.Document = + bsonDocumentToUntypedDocument(doc.underlying) + + private lazy val DOCUMENT_CODEC = new DocumentCodec() + implicit def bsonDocumentToUntypedDocument(doc: BsonDocument): org.bson.Document = { + DOCUMENT_CODEC.decode(new BsonDocumentReader(doc), DecoderContext.builder().build()) + } + +} diff --git a/driver-scala/src/main/scala/org/mongodb/scala/result/package.scala b/driver-scala/src/main/scala/org/mongodb/scala/result/package.scala new file mode 100644 index 00000000000..adb1932226a --- /dev/null +++ b/driver-scala/src/main/scala/org/mongodb/scala/result/package.scala @@ -0,0 +1,50 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala + +/** + * Result based types + * + * @since 1.0 + */ +package object result { + + /** + * The result of a delete operation. If the delete was unacknowledged, then `wasAcknowledged` will return false and all other methods + * with throw an `UnsupportedOperationException`. + */ + type DeleteResult = com.mongodb.client.result.DeleteResult + + /** + * The result of an update operation. If the update was unacknowledged, then `wasAcknowledged` will return false and all other methods + * with throw an `UnsupportedOperationException`. + */ + type UpdateResult = com.mongodb.client.result.UpdateResult + + /** + * The result of an insert one operation. If the update was unacknowledged, then `wasAcknowledged` will return false and all other methods + * with throw an `UnsupportedOperationException`. + */ + type InsertOneResult = com.mongodb.client.result.InsertOneResult + + /** + * The result of an insert many operation. If the update was unacknowledged, then `wasAcknowledged` will return false and all other methods + * with throw an `UnsupportedOperationException`. + */ + type InsertManyResult = com.mongodb.client.result.InsertManyResult + +} diff --git a/driver-scala/src/main/scala/org/mongodb/scala/vault/ClientEncryption.scala b/driver-scala/src/main/scala/org/mongodb/scala/vault/ClientEncryption.scala new file mode 100644 index 00000000000..a73d6704714 --- /dev/null +++ b/driver-scala/src/main/scala/org/mongodb/scala/vault/ClientEncryption.scala @@ -0,0 +1,227 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala.vault + +import com.mongodb.annotations.{ Beta, Reason } +import com.mongodb.client.model.{ CreateCollectionOptions, CreateEncryptedCollectionParams } + +import java.io.Closeable +import com.mongodb.reactivestreams.client.vault.{ ClientEncryption => JClientEncryption } +import org.bson.{ BsonBinary, BsonDocument, BsonValue } +import org.mongodb.scala.bson.conversions.Bson +import org.mongodb.scala.{ Document, FindObservable, MongoDatabase, SingleObservable, ToSingleObservablePublisher } +import org.mongodb.scala.model.vault.{ + DataKeyOptions, + EncryptOptions, + RewrapManyDataKeyOptions, + RewrapManyDataKeyResult +} +import org.mongodb.scala.result.DeleteResult + +/** + * The Key vault. + * + * Used to create data encryption keys, and to explicitly encrypt and decrypt values when auto-encryption is not an option. + * + * @since 2.7 + */ +case class ClientEncryption(private val wrapped: JClientEncryption) extends Closeable { + + /** + * Create a data key with the given KMS provider. + * + * Creates a new key document and inserts into the key vault collection. + * + * @param kmsProvider the KMS provider + * @return an Observable containing the identifier for the created data key + */ + def createDataKey(kmsProvider: String): SingleObservable[BsonBinary] = createDataKey(kmsProvider, DataKeyOptions()) + + /** + * Create a data key with the given KMS provider and options. + * + * Creates a new key document and inserts into the key vault collection. + * + * @param kmsProvider the KMS provider + * @param dataKeyOptions the options for data key creation + * @return an Observable containing the identifier for the created data key + */ + def createDataKey(kmsProvider: String, dataKeyOptions: DataKeyOptions): SingleObservable[BsonBinary] = + wrapped.createDataKey(kmsProvider, dataKeyOptions) + + /** + * Encrypt the given value with the given options. + * The driver may throw an exception for prohibited BSON value types + * + * @param value the value to encrypt + * @param options the options for data encryption + * @return an Observable containing the encrypted value, a BSON binary of subtype 6 + */ + def encrypt(value: BsonValue, options: EncryptOptions): SingleObservable[BsonBinary] = + wrapped.encrypt(value, options) + + /** + * Encrypts a Match Expression or Aggregate Expression to query a range index. + * + * The expression is expected to be in one of the following forms: + * + * - A Match Expression of this form: + * {{{ {\$and: [{: {\$gt: }}, {: {\$lt: }}]}} }}} + * - An Aggregate Expression of this form: + * {{{ {\$and: [{\$gt: [, ]}, {\$lt: [, ]}] }} }}} + * + * `\$gt` may also be `\$gte`. `\$lt` may also be `\$lte`. + * + * Only supported when queryType is "range" and algorithm is "Range". + * + * [[https://www.mongodb.com/docs/manual/core/queryable-encryption/ queryable encryption]] + * + * @note Requires MongoDB 8.0 or greater + * @param expression the Match Expression or Aggregate Expression + * @param options the options + * @return an Observable containing the queryable encrypted range expression + * @since 4.9 + */ + def encryptExpression( + expression: Document, + options: EncryptOptions + ): SingleObservable[Document] = + wrapped.encryptExpression(expression.toBsonDocument, options).map(d => Document(d)) + + /** + * Decrypt the given value. + * + * @param value the value to decrypt, which must be of subtype 6 + * @return an Observable containing the decrypted value + */ + def decrypt(value: BsonBinary): SingleObservable[BsonValue] = wrapped.decrypt(value) + + /** + * Finds a single key document with the given UUID (BSON binary subtype 0x04). + * + * @param id the data key UUID (BSON binary subtype 0x04) + * @return an Observable containing the single key document or an empty Observable if there is no match + * @since 5.6 + */ + def getKey(id: BsonBinary): SingleObservable[BsonDocument] = wrapped.getKey(id) + + /** + * Returns a key document in the key vault collection with the given keyAltName. + * + * @param keyAltName the alternative key name + * @return an Observable containing the matching key document or an empty Observable if there is no match + * @since 5.6 + */ + def getKeyByAltName(keyAltName: String): SingleObservable[BsonDocument] = wrapped.getKeyByAltName(keyAltName) + + /** + * Finds all documents in the key vault collection. + * + * @return a find Observable for the documents in the key vault collection + * @since 5.6 + */ + def keys: FindObservable[BsonDocument] = FindObservable(wrapped.getKeys) + + /** + * Adds a keyAltName to the keyAltNames array of the key document in the key vault collection with the given UUID. + * + * @param id the data key UUID (BSON binary subtype 0x04) + * @param keyAltName the alternative key name to add to the keyAltNames array + * @return an Observable containing the previous version of the key document or an empty Observable if no match + * @since 5.6 + */ + def addKeyAltName(id: BsonBinary, keyAltName: String): SingleObservable[BsonDocument] = + wrapped.addKeyAltName(id, keyAltName) + + /** + * Removes the key document with the given data key from the key vault collection. + * + * @param id the data key UUID (BSON binary subtype 0x04) + * @return an Observable containing the delete result + * @since 5.6 + */ + def deleteKey(id: BsonBinary): SingleObservable[DeleteResult] = wrapped.deleteKey(id) + + /** + * Removes a keyAltName from the keyAltNames array of the key document in the key vault collection with the given id. + * + * @param id the data key UUID (BSON binary subtype 0x04) + * @param keyAltName the alternative key name + * @return an Observable containing the previous version of the key document or an empty Observable if there is no match + * @since 5.6 + */ + def removeKeyAltName(id: BsonBinary, keyAltName: String): SingleObservable[BsonDocument] = + wrapped.removeKeyAltName(id, keyAltName) + + /** + * Decrypts multiple data keys and (re-)encrypts them with the current masterKey. + * + * @param filter the filter + * @return an Observable containing the result + * @since 5.6 + */ + def rewrapManyDataKey(filter: Bson): SingleObservable[RewrapManyDataKeyResult] = wrapped.rewrapManyDataKey(filter) + + /** + * Decrypts multiple data keys and (re-)encrypts them with a new masterKey, or with their current masterKey if a new one is not given. + * + * @param filter the filter + * @param options the options + * @return an Observable containing the result + * @since 5.6 + */ + def rewrapManyDataKey(filter: Bson, options: RewrapManyDataKeyOptions): SingleObservable[RewrapManyDataKeyResult] = + wrapped.rewrapManyDataKey(filter, options) + + /** + * Create a new collection with encrypted fields, + * automatically creating + * new data encryption keys when needed based on the configured + * `encryptedFields`, which must be specified. + * This method does not modify the configured `encryptedFields` when creating new data keys, + * instead it creates a new configuration if needed. + * + * @param database The database to use for creating the collection. + * @param collectionName The name for the collection to create. + * @param createCollectionOptions Options for creating the collection. + * @param createEncryptedCollectionParams Auxiliary parameters for creating an encrypted collection. + * @return An Observable of the (potentially updated) `encryptedFields` configuration that was used to create the collection. + * A user may use this document to configure `com.mongodb.AutoEncryptionSettings.getEncryptedFieldsMap`. + * + * Produces MongoUpdatedEncryptedFieldsException` if an exception happens after creating at least one data key. + * This exception makes the updated `encryptedFields` available to the caller. + * @since 4.9 + * @note Requires MongoDB 7.0 or greater. + * @see [[https://www.mongodb.com/docs/manual/reference/command/create/ Create Command]] + */ + @Beta(Array(Reason.SERVER)) + def createEncryptedCollection( + database: MongoDatabase, + collectionName: String, + createCollectionOptions: CreateCollectionOptions, + createEncryptedCollectionParams: CreateEncryptedCollectionParams + ): SingleObservable[BsonDocument] = + wrapped.createEncryptedCollection( + database.wrapped, + collectionName, + createCollectionOptions, + createEncryptedCollectionParams + ) + + override def close(): Unit = wrapped.close() + +} diff --git a/driver-scala/src/main/scala/org/mongodb/scala/vault/ClientEncryptions.scala b/driver-scala/src/main/scala/org/mongodb/scala/vault/ClientEncryptions.scala new file mode 100644 index 00000000000..11ff41a5de1 --- /dev/null +++ b/driver-scala/src/main/scala/org/mongodb/scala/vault/ClientEncryptions.scala @@ -0,0 +1,36 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.mongodb.scala.vault + +import com.mongodb.ClientEncryptionSettings +import com.mongodb.reactivestreams.client.vault.{ ClientEncryptions => JClientEncryptions } + +/** + * Factory for ClientEncryption implementations. + * + * @since 2.7 + */ +object ClientEncryptions { + + /** + * Create a key vault with the given options. + * + * @param options the key vault options + * @return the key vault + */ + def create(options: ClientEncryptionSettings): ClientEncryption = ClientEncryption(JClientEncryptions.create(options)) + +} diff --git a/driver-scala/src/main/scala/org/mongodb/scala/vault/package.scala b/driver-scala/src/main/scala/org/mongodb/scala/vault/package.scala new file mode 100644 index 00000000000..92b9b296245 --- /dev/null +++ b/driver-scala/src/main/scala/org/mongodb/scala/vault/package.scala @@ -0,0 +1,23 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.mongodb.scala + +/** + * This package contains the Key Vault API + * + * @since 2.7 + */ +package object vault {} diff --git a/driver-scala/src/test/resources/logback-test.xml b/driver-scala/src/test/resources/logback-test.xml new file mode 100644 index 00000000000..252df0531e4 --- /dev/null +++ b/driver-scala/src/test/resources/logback-test.xml @@ -0,0 +1,19 @@ + + + + + %d{HH:mm:ss.SSS} [%thread] %-5level %logger{36} - %msg%n + + + + + + + + + + + + + + diff --git a/driver-scala/src/test/scala/org/mongodb/scala/AggregateObservableSpec.scala b/driver-scala/src/test/scala/org/mongodb/scala/AggregateObservableSpec.scala new file mode 100644 index 00000000000..bd30396ac28 --- /dev/null +++ b/driver-scala/src/test/scala/org/mongodb/scala/AggregateObservableSpec.scala @@ -0,0 +1,81 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala + +import com.mongodb.ExplainVerbosity +import com.mongodb.client.cursor.TimeoutMode +import com.mongodb.reactivestreams.client.AggregatePublisher +import org.mockito.Mockito.{ verify, verifyNoMoreInteractions } +import org.mongodb.scala.model.Collation +import org.scalatestplus.mockito.MockitoSugar + +import java.util.concurrent.TimeUnit +import scala.concurrent.duration.Duration + +class AggregateObservableSpec extends BaseSpec with MockitoSugar { + + "AggregateObservable" should "have the same methods as the wrapped AggregateObservable" in { + val wrapped: Set[String] = classOf[AggregatePublisher[Document]].getMethods.map(_.getName).toSet + val local: Set[String] = classOf[AggregateObservable[Document]].getMethods.map(_.getName).toSet + + wrapped.foreach((name: String) => { + val cleanedName = name.stripPrefix("get") + assert(local.contains(name) || local.contains(cleanedName.head.toLower + cleanedName.tail), s"Missing: $name") + }) + } + + it should "call the underlying methods" in { + val wrapper = mock[AggregatePublisher[Document]] + val observable = AggregateObservable(wrapper) + + val duration = Duration(1, TimeUnit.SECONDS) + val collation = Collation.builder().locale("en").build() + val hint = Document("{hint: 1}") + val batchSize = 10 + val ct = classOf[Document] + val verbosity = ExplainVerbosity.QUERY_PLANNER + + observable.allowDiskUse(true) + observable.maxTime(duration) + observable.maxAwaitTime(duration) + observable.bypassDocumentValidation(true) + observable.collation(collation) + observable.comment("comment") + observable.hint(hint) + observable.batchSize(batchSize) + observable.explain[Document]() + observable.explain[Document](verbosity) + observable.timeoutMode(TimeoutMode.ITERATION) + + verify(wrapper).allowDiskUse(true) + verify(wrapper).maxTime(duration.toMillis, TimeUnit.MILLISECONDS) + verify(wrapper).maxAwaitTime(duration.toMillis, TimeUnit.MILLISECONDS) + verify(wrapper).bypassDocumentValidation(true) + verify(wrapper).collation(collation) + verify(wrapper).comment("comment") + verify(wrapper).hint(hint) + verify(wrapper).batchSize(batchSize) + verify(wrapper).explain(ct) + verify(wrapper).explain(ct, verbosity) + verify(wrapper).timeoutMode(TimeoutMode.ITERATION) + + observable.toCollection() + verify(wrapper).toCollection + + verifyNoMoreInteractions(wrapper) + } +} diff --git a/driver-scala/src/test/scala/org/mongodb/scala/ApiAliasAndCompanionSpec.scala b/driver-scala/src/test/scala/org/mongodb/scala/ApiAliasAndCompanionSpec.scala new file mode 100644 index 00000000000..a5b76965651 --- /dev/null +++ b/driver-scala/src/test/scala/org/mongodb/scala/ApiAliasAndCompanionSpec.scala @@ -0,0 +1,444 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala + +import org.reflections.Reflections +import org.reflections.scanners.SubTypesScanner +import org.reflections.util.{ ClasspathHelper, ConfigurationBuilder, FilterBuilder } +import org.scalatest.Inspectors.forEvery + +import java.lang.reflect.Modifier._ +import scala.collection.JavaConverters._ +import scala.reflect.runtime.currentMirror + +class ApiAliasAndCompanionSpec extends BaseSpec { + + val classFilter = (f: Class[_ <: Object]) => + isPublic(f.getModifiers) && !f.getName.contains("$") && !f.getSimpleName.contains("Test") + + "The scala package" should "mirror the com.mongodb package and com.mongodb.reactivestreams.client" in { + val packageName = "com.mongodb" + val javaExclusions = Set( + "Address", + "AwsCredential", + "BasicDBList", + "BasicDBObject", + "BasicDBObjectBuilder", + "Block", + "BSONTimestampCodec", + "CausalConsistencyExamples", + "ChangeStreamSamples", + "ContextHelper", + "ContextProvider", + "DBObject", + "DBObjectCodec", + "DBObjectCodecProvider", + "DBRef", + "DBRefCodec", + "DBRefCodecProvider", + "DnsClient", + "DnsClientProvider", + "DocumentToDBRefTransformer", + "Function", + "FutureResultCallback", + "InetAddressResolver", + "InetAddressResolverProvider", + "Jep395RecordCodecProvider", + "KerberosSubjectProvider", + "KotlinCodecProvider", + "MongoClients", + "NonNull", + "NonNullApi", + "Nullable", + "Person", + "ReadPreferenceHedgeOptions", + "ReactiveContextProvider", + "RequestContext", + "ServerApi", + "ServerCursor", + "ServerSession", + "SessionContext", + "SingleResultCallback", + "Slow", + "SubjectProvider", + "TransactionExample", + "UnixServerAddress", + "SubscriberHelpers", + "PublisherHelpers", + "TargetDocument", + "UpdatePrimer", + "InsertPrimer", + "IndexesPrimer", + "QueryPrimer", + "DocumentationSamples", + "AggregatePrimer", + "RemovePrimer", + "SyncMongoClient", + "SyncMongoCluster", + "SyncGridFSBucket", + "SyncMongoDatabase", + "SyncClientEncryption", + "BaseClientUpdateOptions", + "BaseClientDeleteOptions", + "MongoBaseInterfaceAssertions" + ) + val scalaExclusions = Set( + "BuildInfo", + "BulkWriteResult", + "ClientSessionImplicits", + "Document", + "Helpers", + "internal", + "Observable", + "ObservableImplicits", + "Observer", + "package", + "ReadConcernLevel", + "SingleObservable", + "Subscription", + "TimeoutMode" + ) + + val classFilter = (f: Class[_ <: Object]) => { + isPublic(f.getModifiers) && + !f.getName.contains("$") && + !f.getSimpleName.contains("Spec") && + !f.getSimpleName.contains("Test") && + !f.getSimpleName.contains("Tour") && + !f.getSimpleName.contains("Fixture") && + !javaExclusions.contains(f.getSimpleName) + } + val filters = FilterBuilder.parse( + """ + |-com.mongodb.annotations.*, + |-com.mongodb.assertions.*, + |-com.mongodb.binding.*, + |-com.mongodb.bulk.*, + |-com.mongodb.client.*, + |-com.mongodb.connection.*, + |-com.mongodb.crypt.*, + |-com.mongodb.diagnostics.*, + |-com.mongodb.event.*, + |-com.mongodb.internal.*, + |-com.mongodb.management.*, + |-com.mongodb.operation.*, + |-com.mongodb.selector.*, + |-com.mongodb.kotlin.*, + |-com.mongodb.test.*, + |-com.mongodb.client.gridfs.*, + |-com.mongodb.async.client.*, + |-com.mongodb.async.client.gridfs.*, + |-com.mongodb.async.client.internal.*, + |-com.mongodb.async.client.vault.*, + |-com.mongodb.reactivestreams.client.gridfs.*, + |-com.mongodb.reactivestreams.client.internal.*, + |-com.mongodb.reactivestreams.client.vault.*""".stripMargin + ) + + val exceptions = new Reflections(packageName) + .getSubTypesOf(classOf[MongoException]) + .asScala + .map(_.getSimpleName) + .toSet + + "MongoException" - "MongoGridFSException" - "MongoConfigurationException" - "MongoWriteConcernWithResponseException" + + val objects = new Reflections( + new ConfigurationBuilder() + .setUrls(ClasspathHelper.forPackage(packageName)) + .setScanners(new SubTypesScanner(false)) + .filterInputsBy(filters) + ).getSubTypesOf(classOf[Object]) + .asScala + .filter(classFilter) + .map(_.getSimpleName.replace("Publisher", "Observable")) + .toSet + + val wrapped = objects ++ exceptions + + val scalaPackageName = "org.mongodb.scala" + val scalaObjects = new Reflections(scalaPackageName, new SubTypesScanner(false)) + .getSubTypesOf(classOf[Object]) + .asScala + .filter(classFilter) + .filter(f => f.getPackage.getName == scalaPackageName) + .map(_.getSimpleName) + .toSet + val packageObjects = + currentMirror.staticPackage(scalaPackageName).info.decls.filter(!_.isImplicit).map(_.name.toString).toSet + val local = (scalaObjects ++ packageObjects) -- scalaExclusions + + diff(local, wrapped) shouldBe empty + } + + it should "mirror parts of com.mongodb.connection in org.mongodb.scala.connection" in { + val packageName = "com.mongodb.connection" + val javaExclusions = Set( + "AsyncCompletionHandler", + "ClusterDescription", + "ClusterId", + "ConnectionDescription", + "ConnectionId", + "ServerDescription", + "ServerId", + "ServerVersion", + "TopologyVersion" + ) + + val filters = FilterBuilder.parse("-com.mongodb.connection.netty.*") + val wrapped = new Reflections( + new ConfigurationBuilder() + .setUrls(ClasspathHelper.forPackage(packageName)) + .setScanners(new SubTypesScanner(false)) + .filterInputsBy(filters) + ).getSubTypesOf(classOf[Object]) + .asScala + .filter(_.getPackage.getName == packageName) + .filter(classFilter) + .map(_.getSimpleName) + .toSet -- javaExclusions + + val scalaPackageName = "org.mongodb.scala.connection" + val scalaExclusions = Set( + "package", + "NettyStreamFactoryFactory", + "NettyStreamFactoryFactoryBuilder", + "AsynchronousSocketChannelStreamFactoryFactoryBuilder" + ) + val local = currentMirror.staticPackage(scalaPackageName).info.decls.map(_.name.toString).toSet -- scalaExclusions + + diff(local, wrapped) shouldBe empty + } + + it should "mirror all com.mongodb.client. into org.mongdb.scala." in { + val packageName = "com.mongodb.client" + + val javaExclusions = Set( + "ClientSession", + "ConcreteCodecProvider", + "Fixture", + "ImmutableDocument", + "ImmutableDocumentCodec", + "ImmutableDocumentCodecProvider", + "ListCollectionsObservable", + "MongoChangeStreamCursor", + "MongoClientFactory", + "MongoClients", + "MongoCursor", + "MongoObservable", + "Name", + "NameCodecProvider", + "SynchronousContextProvider", + "TransactionBody", + "FailPoint", + "WithWrapper" + ) + + val wrapped = new Reflections(packageName, new SubTypesScanner(false)) + .getSubTypesOf(classOf[Object]) + .asScala + .filter(_.getPackage.getName == packageName) + .filter(classFilter) + .map(_.getSimpleName.replace("Iterable", "Observable")) + .toSet -- javaExclusions + + val scalaPackageName = "org.mongodb.scala" + val local = new Reflections(scalaPackageName, new SubTypesScanner(false)) + .getSubTypesOf(classOf[Object]) + .asScala + .filter(_.getPackage.getName == scalaPackageName) + .filter((f: Class[_ <: Object]) => isPublic(f.getModifiers)) + .map(_.getSimpleName.replace("$", "")) + .toSet + + forEvery(wrapped) { (className: String) => + local should contain(className) + } + } + + it should "mirror all com.mongodb.client.model in org.mongdb.scala.model" in { + val javaExclusions = Set("ParallelCollectionScanOptions", "AggregationLevel") + val packageName = "com.mongodb.client.model" + + val objectsAndEnums = new Reflections(packageName, new SubTypesScanner(false)) + .getSubTypesOf(classOf[Object]) + .asScala ++ + new Reflections(packageName, new SubTypesScanner(false)).getSubTypesOf(classOf[Enum[_]]).asScala + + val wrapped = objectsAndEnums + .filter(_.getPackage.getName == packageName) + .filter(classFilter) + .map(_.getSimpleName) + .toSet -- javaExclusions + + val scalaPackageName = "org.mongodb.scala.model" + val localPackage = currentMirror.staticPackage(scalaPackageName).info.decls.map(_.name.toString).toSet + val localObjects = new Reflections(scalaPackageName, new SubTypesScanner(false)) + .getSubTypesOf(classOf[Object]) + .asScala + .filter(_.getPackage.getName == scalaPackageName) + .filter(classFilter) + .map(_.getSimpleName) + .toSet + val scalaExclusions = Set("package") + val local = (localPackage ++ localObjects) -- scalaExclusions + + diff(local, wrapped) shouldBe empty + } + + it should "mirror all com.mongodb.client.model.search in org.mongdb.scala.model.search" in { + val packageName = "com.mongodb.client.model.search" + val wrapped = new Reflections(packageName, new SubTypesScanner(false)) + .getSubTypesOf(classOf[Object]) + .asScala + .filter(_.getPackage.getName == packageName) + .filter(classFilter) + .map(_.getSimpleName) + .toSet + val scalaPackageName = "org.mongodb.scala.model.search" + val localPackage = currentMirror.staticPackage(scalaPackageName).info.decls.map(_.name.toString).toSet + val localObjects = new Reflections(scalaPackageName, new SubTypesScanner(false)) + .getSubTypesOf(classOf[Object]) + .asScala + .filter(_.getPackage.getName == scalaPackageName) + .filter(classFilter) + .map(_.getSimpleName) + .toSet + val local = localPackage ++ localObjects - "package" + diff(local, wrapped) shouldBe empty + } + + it should "mirror all com.mongodb.client.model.geojson in org.mongdb.scala.model.geojson" in { + val packageName = "com.mongodb.client.model.geojson" + val wrapped = new Reflections(packageName, new SubTypesScanner(false)) + .getSubTypesOf(classOf[Object]) + .asScala + .filter(_.getPackage.getName == packageName) + .filter(classFilter) + .map(_.getSimpleName) + .toSet ++ Set("GeoJsonObjectType", "CoordinateReferenceSystemType") + + val scalaPackageName = "org.mongodb.scala.model.geojson" + val local = currentMirror.staticPackage(scalaPackageName).info.decls.map(_.name.toString).toSet - "package" + + diff(local, wrapped) shouldBe empty + } + + it should "mirror all com.mongodb.client.result in org.mongdb.scala.result" in { + val packageName = "com.mongodb.client.result" + val wrapped = new Reflections(packageName, new SubTypesScanner(false)) + .getSubTypesOf(classOf[Object]) + .asScala + .filter(_.getPackage.getName == packageName) + .filter(classFilter) + .map(_.getSimpleName) + .toSet + + val scalaPackageName = "org.mongodb.scala.result" + val local = currentMirror.staticPackage(scalaPackageName).info.decls.map(_.name.toString).toSet - "package" + + diff(local, wrapped) shouldBe empty + } + + it should "mirror all com.mongodb.client.vault in org.mongdb.scala.vault" in { + val packageName = "com.mongodb.reactivestreams.client.vault" + val wrapped = new Reflections(packageName, new SubTypesScanner(false)) + .getSubTypesOf(classOf[Object]) + .asScala + .filter(_.getPackage.getName == packageName) + .filter(classFilter) + .map(_.getSimpleName) + .toSet + + val scalaPackageName = "org.mongodb.scala.vault" + val localPackage = currentMirror.staticPackage(scalaPackageName).info.decls.map(_.name.toString).toSet + val localObjects = new Reflections(scalaPackageName, new SubTypesScanner(false)) + .getSubTypesOf(classOf[Object]) + .asScala + .filter(classFilter) + .map(_.getSimpleName) + .toSet + val scalaExclusions = Set("package") + val local = (localPackage ++ localObjects) -- scalaExclusions + + diff(local, wrapped) shouldBe empty + } + + it should "mirror all com.mongodb.WriteConcern in org.mongodb.scala.WriteConcern" in { + val notMirrored = Set( + "SAFE", + "serialVersionUID", + "FSYNCED", + "FSYNC_SAFE", + "JOURNAL_SAFE", + "REPLICAS_SAFE", + "REPLICA_ACKNOWLEDGED", + "NAMED_CONCERNS", + "NORMAL", + "majorityWriteConcern", + "valueOf" + ) + val wrapped = + (classOf[com.mongodb.WriteConcern].getDeclaredMethods ++ classOf[com.mongodb.WriteConcern].getDeclaredFields) + .filter(f => isStatic(f.getModifiers) && !notMirrored.contains(f.getName)) + .map(_.getName) + .toSet + + val local = WriteConcern.getClass.getDeclaredMethods + .filter(f => f.getName != "apply" && isPublic(f.getModifiers)) + .map(_.getName) + .toSet + + diff(local, wrapped) shouldBe empty + } + + it should "mirror com.mongodb.reactivestreams.client.gridfs in org.mongdb.scala.gridfs" in { + val javaExclusions = Set("GridFSBuckets", "GridFSDownloadByNameOptions") + val wrapped: Set[String] = Set("com.mongodb.reactivestreams.client.gridfs", "com.mongodb.client.gridfs.model") + .flatMap(packageName => + new Reflections(packageName, new SubTypesScanner(false)) + .getSubTypesOf(classOf[Object]) + .asScala + .filter(_.getPackage.getName == packageName) + .filter(classFilter) + .map(_.getSimpleName.replace("Publisher", "Observable")) + .toSet + ) -- javaExclusions + "MongoGridFSException" + + val scalaPackageName = "org.mongodb.scala.gridfs" + val scalaExclusions = Set( + "package", + "AsyncOutputStream", + "AsyncInputStream", + "GridFSUploadStream", + "GridFSDownloadStream" + ) + + val packageObjects = + currentMirror.staticPackage(scalaPackageName).info.decls.filter(!_.isImplicit).map(_.name.toString).toSet + val local = new Reflections(scalaPackageName, new SubTypesScanner(false)) + .getSubTypesOf(classOf[Object]) + .asScala + .filter(classFilter) + .filter(f => f.getPackage.getName == scalaPackageName) + .map(_.getSimpleName) + .toSet ++ packageObjects -- scalaExclusions + + diff(local, wrapped) shouldBe empty + } + + def diff(a: Set[String], b: Set[String]): Set[String] = a.diff(b) ++ b.diff(a) +} diff --git a/driver-scala/src/test/scala/org/mongodb/scala/BaseSpec.scala b/driver-scala/src/test/scala/org/mongodb/scala/BaseSpec.scala new file mode 100644 index 00000000000..9d59b8f55e6 --- /dev/null +++ b/driver-scala/src/test/scala/org/mongodb/scala/BaseSpec.scala @@ -0,0 +1,21 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.mongodb.scala + +import org.scalatest.flatspec.AnyFlatSpec +import org.scalatest.matchers.should.Matchers + +abstract class BaseSpec extends AnyFlatSpec with Matchers {} diff --git a/driver-scala/src/test/scala/org/mongodb/scala/ChangeStreamObservableSpec.scala b/driver-scala/src/test/scala/org/mongodb/scala/ChangeStreamObservableSpec.scala new file mode 100644 index 00000000000..bcc4bfea815 --- /dev/null +++ b/driver-scala/src/test/scala/org/mongodb/scala/ChangeStreamObservableSpec.scala @@ -0,0 +1,83 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala + +import com.mongodb.client.cursor.TimeoutMode +import com.mongodb.reactivestreams.client.ChangeStreamPublisher +import org.mockito.Mockito.{ verify, verifyNoMoreInteractions } +import org.mongodb.scala.bson.BsonTimestamp +import org.mongodb.scala.model.Collation +import org.mongodb.scala.model.changestream.FullDocument +import org.reactivestreams.Publisher +import org.scalatestplus.mockito.MockitoSugar + +import java.util.concurrent.TimeUnit +import scala.concurrent.duration.Duration +import scala.util.Success + +class ChangeStreamObservableSpec extends BaseSpec with MockitoSugar { + + "ChangeStreamObservable" should "have the same methods as the wrapped ChangeStreamPublisher" in { + val mongoPublisher: Set[String] = classOf[Publisher[Document]].getMethods.map(_.getName).toSet + val wrapped: Set[String] = classOf[ChangeStreamPublisher[Document]].getMethods + .map(_.getName) + .toSet -- mongoPublisher + val local = classOf[ChangeStreamObservable[Document]].getMethods.map(_.getName).toSet + + wrapped.foreach((name: String) => { + val cleanedName = name.stripPrefix("get") + assert(local.contains(name) || local.contains(cleanedName.head.toLower + cleanedName.tail), s"Missing: $name") + }) + } + + it should "call the underlying methods" in { + val wrapper = mock[ChangeStreamPublisher[Document]] + val observable = ChangeStreamObservable[Document](wrapper) + + val duration = Duration(1, TimeUnit.SECONDS) + val resumeToken = Document() + val fullDocument = FullDocument.DEFAULT + val startAtTime = BsonTimestamp() + val collation = Collation.builder().locale("en").build() + val batchSize = 10 + + observable.batchSize(batchSize) + observable.fullDocument(fullDocument) + observable.resumeAfter(resumeToken) + observable.startAfter(resumeToken) + observable.startAtOperationTime(startAtTime) + observable.maxAwaitTime(duration) + observable.collation(collation) + observable.withDocumentClass(classOf[Int]) + + verify(wrapper).batchSize(batchSize) + verify(wrapper).fullDocument(fullDocument) + verify(wrapper).resumeAfter(resumeToken.underlying) + verify(wrapper).startAfter(resumeToken.underlying) + verify(wrapper).startAtOperationTime(startAtTime) + verify(wrapper).maxAwaitTime(duration.toMillis, TimeUnit.MILLISECONDS) + verify(wrapper).collation(collation) + verify(wrapper).withDocumentClass(classOf[Int]) + + verifyNoMoreInteractions(wrapper) + } + + it should "mirror FullDocument" in { + FullDocument.fromString("default") shouldBe Success(FullDocument.DEFAULT) + FullDocument.fromString("madeUp").isFailure shouldBe true + } +} diff --git a/driver-scala/src/test/scala/org/mongodb/scala/CreateIndexCommitQuorumSpec.scala b/driver-scala/src/test/scala/org/mongodb/scala/CreateIndexCommitQuorumSpec.scala new file mode 100644 index 00000000000..1ecbb1443ba --- /dev/null +++ b/driver-scala/src/test/scala/org/mongodb/scala/CreateIndexCommitQuorumSpec.scala @@ -0,0 +1,57 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala + +import java.lang.reflect.Modifier.isStatic + +class CreateIndexCommitQuorumSpec extends BaseSpec { + + "CreateIndexCommitQuorum" should "have the same methods as the wrapped CreateIndexCommitQuorum" in { + val wrapped = + classOf[com.mongodb.CreateIndexCommitQuorum].getDeclaredMethods + .filter(f => isStatic(f.getModifiers)) + .map(_.getName) + .toSet ++ + classOf[com.mongodb.CreateIndexCommitQuorum].getDeclaredFields + .filter(f => isStatic(f.getModifiers)) + .map(_.getName) + .toSet + val local = CreateIndexCommitQuorum.getClass.getDeclaredMethods.map(_.getName).toSet + + local should equal(wrapped) + } + + it should "return the correct create index commit quorum for majority" in { + val commitQuorumMajority = CreateIndexCommitQuorum.MAJORITY + commitQuorumMajority shouldBe com.mongodb.CreateIndexCommitQuorum.MAJORITY + } + + it should "return the correct create index commit quorum for voting members" in { + val commitQuorumVotingMembers = CreateIndexCommitQuorum.VOTING_MEMBERS + commitQuorumVotingMembers shouldBe com.mongodb.CreateIndexCommitQuorum.VOTING_MEMBERS + } + + it should "return the correct create index commit quorum with a mode" in { + val commitQuorumMode = CreateIndexCommitQuorum.create("majority") + commitQuorumMode shouldBe com.mongodb.CreateIndexCommitQuorum.create("majority") + } + + it should "return the correct create index commit quorum with a w value" in { + val commitQuorumW = CreateIndexCommitQuorum.create(2) + commitQuorumW shouldBe com.mongodb.CreateIndexCommitQuorum.create(2) + } +} diff --git a/driver-scala/src/test/scala/org/mongodb/scala/DistinctObservableSpec.scala b/driver-scala/src/test/scala/org/mongodb/scala/DistinctObservableSpec.scala new file mode 100644 index 00000000000..60f4f271f09 --- /dev/null +++ b/driver-scala/src/test/scala/org/mongodb/scala/DistinctObservableSpec.scala @@ -0,0 +1,63 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala +import com.mongodb.client.cursor.TimeoutMode +import com.mongodb.reactivestreams.client.DistinctPublisher +import org.mockito.Mockito.{ verify, verifyNoMoreInteractions } +import org.mongodb.scala.model.Collation +import org.reactivestreams.Publisher +import org.scalatestplus.mockito.MockitoSugar + +import java.util.concurrent.TimeUnit +import scala.concurrent.duration.Duration +class DistinctObservableSpec extends BaseSpec with MockitoSugar { + + "DistinctObservable" should "have the same methods as the wrapped DistinctObservable" in { + val mongoPublisher: Set[String] = classOf[Publisher[Document]].getMethods.map(_.getName).toSet + val wrapped = classOf[DistinctPublisher[Document]].getMethods.map(_.getName).toSet -- mongoPublisher + val local = classOf[DistinctObservable[Document]].getMethods.map(_.getName).toSet + + wrapped.foreach((name: String) => { + val cleanedName = name.stripPrefix("get") + assert(local.contains(name) || local.contains(cleanedName.head.toLower + cleanedName.tail), s"Missing: $name") + }) + } + + it should "call the underlying methods" in { + val wrapper = mock[DistinctPublisher[Document]] + val observable = DistinctObservable(wrapper) + + val filter = Document("a" -> 1) + val duration = Duration(1, TimeUnit.SECONDS) + val collation = Collation.builder().locale("en").build() + val batchSize = 10 + + observable.filter(filter) + observable.maxTime(duration) + observable.collation(collation) + observable.batchSize(batchSize) + observable.timeoutMode(TimeoutMode.ITERATION) + + verify(wrapper).filter(filter) + verify(wrapper).maxTime(duration.toMillis, TimeUnit.MILLISECONDS) + verify(wrapper).collation(collation) + verify(wrapper).batchSize(batchSize) + verify(wrapper).timeoutMode(TimeoutMode.ITERATION) + + verifyNoMoreInteractions(wrapper) + } +} diff --git a/driver-scala/src/test/scala/org/mongodb/scala/FindObservableSpec.scala b/driver-scala/src/test/scala/org/mongodb/scala/FindObservableSpec.scala new file mode 100644 index 00000000000..570978012ae --- /dev/null +++ b/driver-scala/src/test/scala/org/mongodb/scala/FindObservableSpec.scala @@ -0,0 +1,102 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala + +import com.mongodb.client.cursor.TimeoutMode +import com.mongodb.reactivestreams.client.FindPublisher +import com.mongodb.{ CursorType, ExplainVerbosity } +import org.mockito.Mockito.{ verify, verifyNoMoreInteractions } +import org.mongodb.scala.model.Collation +import org.reactivestreams.Publisher +import org.scalatestplus.mockito.MockitoSugar + +import java.util.concurrent.TimeUnit +import scala.concurrent.duration.Duration + +class FindObservableSpec extends BaseSpec with MockitoSugar { + + "FindObservable" should "have the same methods as the wrapped FindPublisher" in { + val mongoPublisher: Set[String] = classOf[Publisher[Document]].getMethods.map(_.getName).toSet + val wrapped = classOf[FindPublisher[Document]].getMethods.map(_.getName).toSet -- mongoPublisher + val local = classOf[FindObservable[Document]].getMethods.map(_.getName).toSet + + wrapped.foreach((name: String) => { + val cleanedName = name.stripPrefix("get") + assert(local.contains(name) || local.contains(cleanedName.head.toLower + cleanedName.tail), s"Missing: $name") + }) + } + + it should "call the underlying methods" in { + val wrapper = mock[FindPublisher[Document]] + val observable = FindObservable(wrapper) + + val filter = Document("a" -> 1) + val hint = Document("a" -> 1) + val hintString = "a_1" + val duration = Duration(1, TimeUnit.SECONDS) + val maxDuration = Duration(10, TimeUnit.SECONDS) + val projection = Document("proj" -> 1) + val sort = Document("sort" -> 1) + val collation = Collation.builder().locale("en").build() + val batchSize = 10 + val ct = classOf[Document] + val verbosity = ExplainVerbosity.QUERY_PLANNER + + observable.first() + verify(wrapper).first() + + observable.collation(collation) + observable.cursorType(CursorType.NonTailable) + observable.filter(filter) + observable.hint(hint) + observable.hintString(hintString) + observable.limit(1) + observable.maxAwaitTime(maxDuration) + observable.maxTime(duration) + observable.noCursorTimeout(true) + observable.partial(true) + observable.projection(projection) + observable.skip(1) + observable.sort(sort) + observable.batchSize(batchSize) + observable.allowDiskUse(true) + observable.explain[Document]() + observable.explain[Document](verbosity) + observable.timeoutMode(TimeoutMode.ITERATION) + + verify(wrapper).collation(collation) + verify(wrapper).cursorType(CursorType.NonTailable) + verify(wrapper).filter(filter) + verify(wrapper).limit(1) + verify(wrapper).hint(hint) + verify(wrapper).hintString(hintString) + verify(wrapper).maxAwaitTime(maxDuration.toMillis, TimeUnit.MILLISECONDS) + verify(wrapper).maxTime(duration.toMillis, TimeUnit.MILLISECONDS) + verify(wrapper).noCursorTimeout(true) + verify(wrapper).partial(true) + verify(wrapper).projection(projection) + verify(wrapper).skip(1) + verify(wrapper).sort(sort) + verify(wrapper).batchSize(batchSize) + verify(wrapper).allowDiskUse(true) + verify(wrapper).explain(ct) + verify(wrapper).explain(ct, verbosity) + verify(wrapper).timeoutMode(TimeoutMode.ITERATION) + + verifyNoMoreInteractions(wrapper) + } +} diff --git a/driver-scala/src/test/scala/org/mongodb/scala/ListCollectionNamesObservableSpec.scala b/driver-scala/src/test/scala/org/mongodb/scala/ListCollectionNamesObservableSpec.scala new file mode 100644 index 00000000000..5a89cbf5760 --- /dev/null +++ b/driver-scala/src/test/scala/org/mongodb/scala/ListCollectionNamesObservableSpec.scala @@ -0,0 +1,60 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala + +import com.mongodb.reactivestreams.client.ListCollectionNamesPublisher +import org.mockito.Mockito.{ verify, verifyNoMoreInteractions } +import org.reactivestreams.Publisher +import org.scalatestplus.mockito.MockitoSugar + +import java.util.concurrent.TimeUnit +import scala.concurrent.duration.Duration + +class ListCollectionNamesObservableSpec extends BaseSpec with MockitoSugar { + + "ListCollectionNamesObservable" should "have the same methods as the wrapped ListCollectionNamesPublisher" in { + val mongoPublisher: Set[String] = classOf[Publisher[Document]].getMethods.map(_.getName).toSet + val wrapped = classOf[ListCollectionNamesPublisher].getMethods.map(_.getName).toSet -- mongoPublisher + val local = classOf[ListCollectionNamesObservable].getMethods.map(_.getName).toSet + + wrapped.foreach((name: String) => { + val cleanedName = name.stripPrefix("get") + assert(local.contains(name) || local.contains(cleanedName.head.toLower + cleanedName.tail), s"Missing: $name") + }) + } + + it should "call the underlying methods" in { + val wrapper = mock[ListCollectionNamesPublisher] + val observable = ListCollectionNamesObservable(wrapper) + + val filter = Document("a" -> 1) + val duration = Duration(1, TimeUnit.SECONDS) + val batchSize = 10 + val authorizedCollections = true + + observable.filter(filter) + observable.maxTime(duration) + observable.batchSize(batchSize) + observable.authorizedCollections(authorizedCollections) + + verify(wrapper).filter(filter) + verify(wrapper).maxTime(duration.toMillis, TimeUnit.MILLISECONDS) + verify(wrapper).batchSize(batchSize) + verify(wrapper).authorizedCollections(authorizedCollections) + verifyNoMoreInteractions(wrapper) + } +} diff --git a/driver-scala/src/test/scala/org/mongodb/scala/ListCollectionsObservableSpec.scala b/driver-scala/src/test/scala/org/mongodb/scala/ListCollectionsObservableSpec.scala new file mode 100644 index 00000000000..141dd62013a --- /dev/null +++ b/driver-scala/src/test/scala/org/mongodb/scala/ListCollectionsObservableSpec.scala @@ -0,0 +1,61 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala + +import com.mongodb.client.cursor.TimeoutMode +import com.mongodb.reactivestreams.client.ListCollectionsPublisher +import org.mockito.Mockito.{ verify, verifyNoMoreInteractions } +import org.reactivestreams.Publisher +import org.scalatestplus.mockito.MockitoSugar + +import java.util.concurrent.TimeUnit +import scala.concurrent.duration.Duration + +class ListCollectionsObservableSpec extends BaseSpec with MockitoSugar { + + "ListCollectionsObservable" should "have the same methods as the wrapped ListCollectionsPublisher" in { + val mongoPublisher: Set[String] = classOf[Publisher[Document]].getMethods.map(_.getName).toSet + val wrapped = classOf[ListCollectionsPublisher[Document]].getMethods.map(_.getName).toSet -- mongoPublisher + val local = classOf[ListCollectionsObservable[Document]].getMethods.map(_.getName).toSet + + wrapped.foreach((name: String) => { + val cleanedName = name.stripPrefix("get") + assert(local.contains(name) || local.contains(cleanedName.head.toLower + cleanedName.tail), s"Missing: $name") + }) + } + + it should "call the underlying methods" in { + val wrapper = mock[ListCollectionsPublisher[Document]] + val observable = ListCollectionsObservable(wrapper) + + val filter = Document("a" -> 1) + val duration = Duration(1, TimeUnit.SECONDS) + val batchSize = 10 + + observable.filter(filter) + observable.maxTime(duration) + observable.batchSize(batchSize) + observable.timeoutMode(TimeoutMode.ITERATION) + + verify(wrapper).filter(filter) + verify(wrapper).maxTime(duration.toMillis, TimeUnit.MILLISECONDS) + verify(wrapper).batchSize(batchSize) + verify(wrapper).timeoutMode(TimeoutMode.ITERATION) + + verifyNoMoreInteractions(wrapper) + } +} diff --git a/driver-scala/src/test/scala/org/mongodb/scala/ListDatabasesObservableSpec.scala b/driver-scala/src/test/scala/org/mongodb/scala/ListDatabasesObservableSpec.scala new file mode 100644 index 00000000000..385bbf4e306 --- /dev/null +++ b/driver-scala/src/test/scala/org/mongodb/scala/ListDatabasesObservableSpec.scala @@ -0,0 +1,61 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala +import com.mongodb.client.cursor.TimeoutMode +import com.mongodb.reactivestreams.client.ListDatabasesPublisher +import org.mockito.Mockito.{ verify, verifyNoMoreInteractions } +import org.reactivestreams.Publisher +import org.scalatestplus.mockito.MockitoSugar + +import java.util.concurrent.TimeUnit +import scala.concurrent.duration.Duration + +class ListDatabasesObservableSpec extends BaseSpec with MockitoSugar { + + "ListDatabasesObservable" should "have the same methods as the wrapped ListDatabasesObservable" in { + val mongoPublisher: Set[String] = classOf[Publisher[Document]].getMethods.map(_.getName).toSet + val wrapped = classOf[ListDatabasesPublisher[Document]].getMethods.map(_.getName).toSet -- mongoPublisher + val local = classOf[ListDatabasesObservable[Document]].getMethods.map(_.getName).toSet + + wrapped.foreach((name: String) => { + val cleanedName = name.stripPrefix("get") + assert(local.contains(name) || local.contains(cleanedName.head.toLower + cleanedName.tail), s"Missing: $name") + }) + } + + it should "call the underlying methods" in { + val wrapper = mock[ListDatabasesPublisher[Document]] + val observable = ListDatabasesObservable(wrapper) + val filter = Document("{a: 1}") + val duration = Duration(1, TimeUnit.SECONDS) + val batchSize = 10 + + observable.maxTime(duration) + observable.filter(filter) + observable.nameOnly(true) + observable.batchSize(batchSize) + observable.timeoutMode(TimeoutMode.ITERATION) + + verify(wrapper).maxTime(duration.toMillis, TimeUnit.MILLISECONDS) + verify(wrapper).filter(filter) + verify(wrapper).nameOnly(true) + verify(wrapper).batchSize(batchSize) + verify(wrapper).timeoutMode(TimeoutMode.ITERATION) + + verifyNoMoreInteractions(wrapper) + } +} diff --git a/driver-scala/src/test/scala/org/mongodb/scala/ListIndexesObservableSpec.scala b/driver-scala/src/test/scala/org/mongodb/scala/ListIndexesObservableSpec.scala new file mode 100644 index 00000000000..6d8a60a1e92 --- /dev/null +++ b/driver-scala/src/test/scala/org/mongodb/scala/ListIndexesObservableSpec.scala @@ -0,0 +1,56 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala +import com.mongodb.client.cursor.TimeoutMode +import com.mongodb.reactivestreams.client.ListIndexesPublisher +import org.mockito.Mockito.{ verify, verifyNoMoreInteractions } +import org.reactivestreams.Publisher +import org.scalatestplus.mockito.MockitoSugar + +import java.util.concurrent.TimeUnit +import scala.concurrent.duration.Duration + +class ListIndexesObservableSpec extends BaseSpec with MockitoSugar { + + "ListIndexesObservable" should "have the same methods as the wrapped ListIndexesObservable" in { + val mongoPublisher: Set[String] = classOf[Publisher[Document]].getMethods.map(_.getName).toSet + val wrapped = classOf[ListIndexesPublisher[Document]].getMethods.map(_.getName).toSet -- mongoPublisher + val local = classOf[ListIndexesObservable[Document]].getMethods.map(_.getName).toSet + + wrapped.foreach((name: String) => { + val cleanedName = name.stripPrefix("get") + assert(local.contains(name) || local.contains(cleanedName.head.toLower + cleanedName.tail), s"Missing: $name") + }) + } + + it should "call the underlying methods" in { + val wrapper = mock[ListIndexesPublisher[Document]] + val observable = ListIndexesObservable(wrapper) + val duration = Duration(1, TimeUnit.SECONDS) + val batchSize = 10 + + observable.maxTime(duration) + observable.batchSize(batchSize) + observable.timeoutMode(TimeoutMode.ITERATION) + + verify(wrapper).maxTime(duration.toMillis, TimeUnit.MILLISECONDS) + verify(wrapper).batchSize(batchSize) + verify(wrapper).timeoutMode(TimeoutMode.ITERATION) + + verifyNoMoreInteractions(wrapper) + } +} diff --git a/driver-scala/src/test/scala/org/mongodb/scala/MapReduceObservableSpec.scala b/driver-scala/src/test/scala/org/mongodb/scala/MapReduceObservableSpec.scala new file mode 100644 index 00000000000..7c99804d329 --- /dev/null +++ b/driver-scala/src/test/scala/org/mongodb/scala/MapReduceObservableSpec.scala @@ -0,0 +1,89 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala + +import com.mongodb.client.cursor.TimeoutMode +import com.mongodb.client.model.MapReduceAction +import com.mongodb.reactivestreams.client.MapReducePublisher +import org.mockito.Mockito.{ verify, verifyNoMoreInteractions } +import org.mongodb.scala.model.Collation +import org.scalatestplus.mockito.MockitoSugar + +import java.util.concurrent.TimeUnit +import scala.concurrent.duration.Duration + +class MapReduceObservableSpec extends BaseSpec with MockitoSugar { + + "MapReduceObservable" should "have the same methods as the wrapped MapReduceObservable" in { + val wrapped = classOf[MapReducePublisher[Document]].getMethods.map(_.getName).toSet + val local = classOf[MapReduceObservable[Document]].getMethods.map(_.getName).toSet + + wrapped.foreach((name: String) => { + val cleanedName = name.stripPrefix("get") + assert(local.contains(name) || local.contains(cleanedName.head.toLower + cleanedName.tail), s"Missing: $name") + }) + } + + it should "call the underlying methods" in { + val wrapper = mock[MapReducePublisher[Document]] + val observable = MapReduceObservable(wrapper) + + val filter = Document("a" -> 1) + val duration = Duration(1, TimeUnit.SECONDS) + val sort = Document("sort" -> 1) + val scope = Document("mod" -> 1) + val collation = Collation.builder().locale("en").build() + val batchSize = 10 + + observable.filter(filter) + observable.scope(scope) + observable.sort(sort) + observable.limit(1) + observable.maxTime(duration) + observable.collectionName("collectionName") + observable.databaseName("databaseName") + observable.finalizeFunction("final") + observable.action(MapReduceAction.REPLACE) + observable.jsMode(true) + observable.verbose(true) + observable.bypassDocumentValidation(true) + observable.collation(collation) + observable.batchSize(batchSize) + observable.timeoutMode(TimeoutMode.ITERATION) + + verify(wrapper).filter(filter) + verify(wrapper).scope(scope) + verify(wrapper).sort(sort) + verify(wrapper).limit(1) + verify(wrapper).maxTime(duration.toMillis, TimeUnit.MILLISECONDS) + verify(wrapper).collectionName("collectionName") + verify(wrapper).databaseName("databaseName") + verify(wrapper).finalizeFunction("final") + verify(wrapper).action(MapReduceAction.REPLACE) + verify(wrapper).jsMode(true) + verify(wrapper).verbose(true) + verify(wrapper).bypassDocumentValidation(true) + verify(wrapper).collation(collation) + verify(wrapper).batchSize(batchSize) + verify(wrapper).timeoutMode(TimeoutMode.ITERATION) + verifyNoMoreInteractions(wrapper) + + observable.toCollection() + verify(wrapper).toCollection + verifyNoMoreInteractions(wrapper) + } +} diff --git a/driver-scala/src/test/scala/org/mongodb/scala/MongoClientSettingsSpec.scala b/driver-scala/src/test/scala/org/mongodb/scala/MongoClientSettingsSpec.scala new file mode 100644 index 00000000000..3a25d3d5518 --- /dev/null +++ b/driver-scala/src/test/scala/org/mongodb/scala/MongoClientSettingsSpec.scala @@ -0,0 +1,68 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala + +import com.mongodb.Block +import org.bson.codecs.configuration.CodecRegistries._ +import org.mongodb.scala.MongoClient.DEFAULT_CODEC_REGISTRY +import org.mongodb.scala.bson.codecs.DocumentCodecProvider +import org.mongodb.scala.connection.ConnectionPoolSettings.Builder +import org.mongodb.scala.connection._ + +class MongoClientSettingsSpec extends BaseSpec { + + "MongoClientSettings" should "default with the Scala Codec Registry" in { + MongoClientSettings.builder().build().getCodecRegistry should equal(DEFAULT_CODEC_REGISTRY) + } + + it should "keep the default Scala Codec Registry in no codec registry is set" in { + val settings = MongoClientSettings.builder().readPreference(ReadPreference.nearest()).build() + MongoClientSettings.builder(settings).build().getCodecRegistry should equal(DEFAULT_CODEC_REGISTRY) + } + + it should "use a none default Codec Registry if set" in { + val codecRegistry = fromProviders(DocumentCodecProvider()) + val settings = MongoClientSettings.builder().codecRegistry(codecRegistry).build() + MongoClientSettings.builder(settings).build().getCodecRegistry should equal(codecRegistry) + } + + it should "allow local Builder types" in { + MongoClientSettings + .builder() + .applyToClusterSettings(new Block[ClusterSettings.Builder] { + override def apply(t: ClusterSettings.Builder): Unit = {} + }) + .applyToConnectionPoolSettings(new Block[ConnectionPoolSettings.Builder] { + override def apply(t: Builder): Unit = {} + }) + .applyToServerSettings(new Block[ServerSettings.Builder] { + override def apply(t: ServerSettings.Builder): Unit = {} + }) + .applyToSocketSettings(new Block[SocketSettings.Builder] { + override def apply(t: SocketSettings.Builder): Unit = { + t.applyToProxySettings(new Block[ProxySettings.Builder] { + override def apply(t: ProxySettings.Builder): Unit = {} + }) + } + }) + .applyToSslSettings(new Block[SslSettings.Builder] { + override def apply(t: SslSettings.Builder): Unit = {} + }) + .build() + } + +} diff --git a/driver-scala/src/test/scala/org/mongodb/scala/MongoClientSpec.scala b/driver-scala/src/test/scala/org/mongodb/scala/MongoClientSpec.scala new file mode 100644 index 00000000000..762ec19d1c6 --- /dev/null +++ b/driver-scala/src/test/scala/org/mongodb/scala/MongoClientSpec.scala @@ -0,0 +1,145 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala + +import com.mongodb.reactivestreams.client.{ MongoClient => JMongoClient } +import org.bson.BsonDocument +import org.mockito.Mockito.verify +import org.mongodb.scala.model.bulk.{ ClientBulkWriteOptions, ClientNamespacedWriteModel } +import org.scalatestplus.mockito.MockitoSugar + +import scala.collection.JavaConverters._ + +class MongoClientSpec extends BaseSpec with MockitoSugar { + + val wrapped = mock[JMongoClient] + val clientSession = mock[ClientSession] + val mongoClient = new MongoClient(wrapped) + val namespace = new MongoNamespace("db.coll") + + "MongoClient" should "have the same methods as the wrapped MongoClient" in { + val wrapped = classOf[JMongoClient].getMethods.map(_.getName).toSet -- Seq("getSettings") + val local = classOf[MongoClient].getMethods.map(_.getName) + + wrapped.foreach((name: String) => { + val cleanedName = name.stripPrefix("get") + assert(local.contains(name) || local.contains(cleanedName.head.toLower + cleanedName.tail), s"Missing: $name") + }) + } + + it should "accept MongoDriverInformation" in { + val driverInformation = MongoDriverInformation.builder().driverName("test").driverVersion("1.2.0").build() + MongoClient("mongodb://localhost", Some(driverInformation)) + } + + it should "call the underlying getDatabase" in { + mongoClient.getDatabase("dbName") + + verify(wrapped).getDatabase("dbName") + } + + it should "call the underlying close" in { + mongoClient.close() + + verify(wrapped).close() + } + + it should "call the underlying startSession" in { + val clientSessionOptions = ClientSessionOptions.builder().build() + mongoClient.startSession(clientSessionOptions) + + verify(wrapped).startSession(clientSessionOptions) + } + + it should "call the underlying listDatabases[T]" in { + mongoClient.listDatabases() + mongoClient.listDatabases(clientSession) + mongoClient.listDatabases[BsonDocument]() + mongoClient.listDatabases[BsonDocument](clientSession) + + verify(wrapped).listDatabases(classOf[Document]) + verify(wrapped).listDatabases(clientSession, classOf[Document]) + verify(wrapped).listDatabases(classOf[BsonDocument]) + verify(wrapped).listDatabases(clientSession, classOf[BsonDocument]) + } + + it should "call the underlying listDatabaseNames" in { + mongoClient.listDatabaseNames() + mongoClient.listDatabaseNames(clientSession) + + verify(wrapped).listDatabaseNames() + verify(wrapped).listDatabaseNames(clientSession) + } + + it should "call the underlying watch" in { + val pipeline = List(Document("$match" -> 1)) + + mongoClient.watch() shouldBe a[ChangeStreamObservable[_]] + mongoClient.watch(pipeline) shouldBe a[ChangeStreamObservable[_]] + mongoClient.watch[BsonDocument](pipeline) shouldBe a[ChangeStreamObservable[_]] + mongoClient.watch(clientSession, pipeline) shouldBe a[ChangeStreamObservable[_]] + mongoClient.watch[BsonDocument](clientSession, pipeline) shouldBe a[ChangeStreamObservable[_]] + + verify(wrapped).watch(classOf[Document]) + verify(wrapped).watch(pipeline.asJava, classOf[Document]) + verify(wrapped).watch(pipeline.asJava, classOf[BsonDocument]) + verify(wrapped).watch(clientSession, pipeline.asJava, classOf[Document]) + verify(wrapped).watch(clientSession, pipeline.asJava, classOf[BsonDocument]) + } + + it should "call the underlying bulkWrite with models only" in { + val models = List(ClientNamespacedWriteModel.insertOne(namespace, Document("key" -> "value"))) + mongoClient.bulkWrite(models) shouldBe a[SingleObservable[_]] + verify(wrapped).bulkWrite(models.asJava) + } + + it should "call the underlying bulkWrite with models and options" in { + val models = List(ClientNamespacedWriteModel.insertOne(namespace, Document("key" -> "value"))) + val options = ClientBulkWriteOptions.clientBulkWriteOptions() + + mongoClient.bulkWrite(models, options) + + verify(wrapped).bulkWrite(models.asJava, options) + } + + it should "call the underlying bulkWrite with clientSession and models" in { + val models = List(ClientNamespacedWriteModel.insertOne(namespace, Document("key" -> "value"))) + + mongoClient.bulkWrite(clientSession, models) + + verify(wrapped).bulkWrite(clientSession, models.asJava) + } + + it should "call the underlying bulkWrite with clientSession, models, and options" in { + val models = List(ClientNamespacedWriteModel.insertOne(namespace, Document("key" -> "value"))) + val options = ClientBulkWriteOptions.clientBulkWriteOptions() + + mongoClient.bulkWrite(clientSession, models, options) + verify(wrapped).bulkWrite(clientSession, models.asJava, options) + } + + it should "call the underlying getClusterDescription" in { + mongoClient.getClusterDescription + verify(wrapped).getClusterDescription + } + + it should "call the underlying appendMetadata" in { + val driverInformation = MongoDriverInformation.builder().build() + mongoClient.appendMetadata(driverInformation) + verify(wrapped).appendMetadata(driverInformation) + } +} diff --git a/driver-scala/src/test/scala/org/mongodb/scala/MongoCollectionSpec.scala b/driver-scala/src/test/scala/org/mongodb/scala/MongoCollectionSpec.scala new file mode 100644 index 00000000000..5d91d8e9202 --- /dev/null +++ b/driver-scala/src/test/scala/org/mongodb/scala/MongoCollectionSpec.scala @@ -0,0 +1,533 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala + +import java.util.concurrent.TimeUnit + +import com.mongodb.reactivestreams.client.{ MongoCollection => JMongoCollection } +import org.bson.BsonDocument +import org.bson.codecs.BsonValueCodecProvider +import org.bson.codecs.configuration.CodecRegistries.fromProviders +import org.mockito.Mockito.{ times, verify } +import org.mongodb.scala.model._ +import org.scalatestplus.mockito.MockitoSugar + +import scala.collection.JavaConverters._ + +class MongoCollectionSpec extends BaseSpec with MockitoSugar { + + val wrapped = mock[JMongoCollection[Document]] + val clientSession = mock[ClientSession] + val mongoCollection = MongoCollection[Document](wrapped) + val readPreference = ReadPreference.secondary() + val collation = Collation.builder().locale("en").build() + + val filter: Document = Document("filter" -> 1) + + "MongoCollection" should "have the same methods as the wrapped MongoCollection" in { + val wrapped = classOf[JMongoCollection[Document]].getMethods.map(_.getName).toSet + val local = classOf[MongoCollection[Document]].getMethods.map(_.getName).toSet + + wrapped.foreach((name: String) => { + val cleanedName = name.stripPrefix("get") + assert(local.contains(name) || local.contains(cleanedName.head.toLower + cleanedName.tail), s"Missing: $name") + }) + } + + it should "return the underlying getNamespace" in { + mongoCollection.namespace + + verify(wrapped).getNamespace + } + + it should "return the underlying getCodecRegistry" in { + mongoCollection.codecRegistry + + verify(wrapped).getCodecRegistry + } + + it should "return the underlying getReadPreference" in { + mongoCollection.readPreference + + verify(wrapped).getReadPreference + } + + it should "return the underlying getWriteConcern" in { + mongoCollection.writeConcern + + verify(wrapped).getWriteConcern + } + + it should "return the underlying getReadConcern" in { + mongoCollection.readConcern + + verify(wrapped).getReadConcern + } + + it should "return the underlying getDocumentClass" in { + mongoCollection.documentClass + + verify(wrapped).getDocumentClass + } + + it should "return the underlying withCodecRegistry" in { + val codecRegistry = fromProviders(new BsonValueCodecProvider()) + + mongoCollection.withCodecRegistry(codecRegistry) + + verify(wrapped).withCodecRegistry(codecRegistry) + } + + it should "return the underlying withReadPreference" in { + mongoCollection.withReadPreference(readPreference) + + verify(wrapped).withReadPreference(readPreference) + } + + it should "return the underlying withWriteConcern" in { + val writeConcern = WriteConcern.MAJORITY + mongoCollection.withWriteConcern(writeConcern) + + verify(wrapped).withWriteConcern(writeConcern) + } + + it should "return the underlying withReadConcern" in { + val readConcern = ReadConcern.MAJORITY + mongoCollection.withReadConcern(readConcern) + + verify(wrapped).withReadConcern(readConcern) + } + + it should "return the underlying withDocumentClass" in { + mongoCollection.withDocumentClass() + mongoCollection.withDocumentClass[Document]() + mongoCollection.withDocumentClass[BsonDocument]() + + verify(wrapped, times(2)).withDocumentClass(classOf[Document]) + verify(wrapped).withDocumentClass(classOf[BsonDocument]) + + } + + it should "return the underlying countDocuments" in { + val countOptions = CountOptions() + + mongoCollection.countDocuments() + mongoCollection.countDocuments(filter) + mongoCollection.countDocuments(filter, countOptions) + mongoCollection.countDocuments(clientSession) + mongoCollection.countDocuments(clientSession, filter) + mongoCollection.countDocuments(clientSession, filter, countOptions) + + verify(wrapped).countDocuments() + verify(wrapped).countDocuments(filter) + verify(wrapped).countDocuments(filter, countOptions) + verify(wrapped).countDocuments(clientSession) + verify(wrapped).countDocuments(clientSession, filter) + verify(wrapped).countDocuments(clientSession, filter, countOptions) + } + + it should "return the underlying estimatedDocumentCount" in { + val options = EstimatedDocumentCountOptions().maxTime(1, TimeUnit.SECONDS) + + mongoCollection.estimatedDocumentCount() + mongoCollection.estimatedDocumentCount(options) + + verify(wrapped).estimatedDocumentCount() + verify(wrapped).estimatedDocumentCount(options) + } + + it should "wrap the underlying DistinctObservable correctly" in { + mongoCollection.distinct[String]("fieldName") + mongoCollection.distinct[String]("fieldName", filter) + mongoCollection.distinct[String](clientSession, "fieldName") + mongoCollection.distinct[String](clientSession, "fieldName", filter) + + verify(wrapped).distinct("fieldName", classOf[String]) + verify(wrapped).distinct("fieldName", filter, classOf[String]) + verify(wrapped).distinct(clientSession, "fieldName", classOf[String]) + verify(wrapped).distinct(clientSession, "fieldName", filter, classOf[String]) + } + + it should "wrap the underlying FindObservable correctly" in { + mongoCollection.find() shouldBe a[FindObservable[_]] + mongoCollection.find[BsonDocument]() shouldBe a[FindObservable[_]] + mongoCollection.find(filter) shouldBe a[FindObservable[_]] + mongoCollection.find[BsonDocument](filter) shouldBe a[FindObservable[_]] + mongoCollection.find(clientSession) shouldBe a[FindObservable[_]] + mongoCollection.find[BsonDocument](clientSession) shouldBe a[FindObservable[_]] + mongoCollection.find(clientSession, filter) shouldBe a[FindObservable[_]] + mongoCollection.find[BsonDocument](clientSession, filter) shouldBe a[FindObservable[_]] + + verify(wrapped).find(classOf[Document]) + verify(wrapped).find(classOf[BsonDocument]) + verify(wrapped).find(filter, classOf[Document]) + verify(wrapped).find(filter, classOf[BsonDocument]) + verify(wrapped).find(clientSession, classOf[Document]) + verify(wrapped).find(clientSession, classOf[BsonDocument]) + verify(wrapped).find(clientSession, filter, classOf[Document]) + verify(wrapped).find(clientSession, filter, classOf[BsonDocument]) + } + + it should "wrap the underlying AggregateObservable correctly" in { + val pipeline = List(Document("$match" -> 1)) + + mongoCollection.aggregate(pipeline) shouldBe a[AggregateObservable[_]] + mongoCollection.aggregate[BsonDocument](pipeline) shouldBe a[AggregateObservable[_]] + mongoCollection.aggregate(clientSession, pipeline) shouldBe a[AggregateObservable[_]] + mongoCollection.aggregate[BsonDocument](clientSession, pipeline) shouldBe a[AggregateObservable[_]] + + verify(wrapped).aggregate(pipeline.asJava, classOf[Document]) + verify(wrapped).aggregate(pipeline.asJava, classOf[BsonDocument]) + verify(wrapped).aggregate(clientSession, pipeline.asJava, classOf[Document]) + verify(wrapped).aggregate(clientSession, pipeline.asJava, classOf[BsonDocument]) + } + + it should "wrap the underlying MapReduceObservable correctly" in { + mongoCollection.mapReduce("map", "reduce") shouldBe a[MapReduceObservable[_]] + mongoCollection.mapReduce[BsonDocument]("map", "reduce") shouldBe a[MapReduceObservable[_]] + mongoCollection.mapReduce(clientSession, "map", "reduce") shouldBe a[MapReduceObservable[_]] + mongoCollection.mapReduce[BsonDocument](clientSession, "map", "reduce") shouldBe a[MapReduceObservable[_]] + + verify(wrapped).mapReduce("map", "reduce", classOf[Document]) + verify(wrapped).mapReduce("map", "reduce", classOf[BsonDocument]) + verify(wrapped).mapReduce(clientSession, "map", "reduce", classOf[Document]) + verify(wrapped).mapReduce(clientSession, "map", "reduce", classOf[BsonDocument]) + } + + it should "wrap the underlying bulkWrite correctly" in { + val bulkRequests = List( + InsertOneModel(Document("a" -> 1)), + DeleteOneModel(filter), + UpdateOneModel(filter, Document("$set" -> Document("b" -> 1))) + ) + val bulkWriteOptions = new BulkWriteOptions().ordered(true) + + mongoCollection.bulkWrite(bulkRequests) + mongoCollection.bulkWrite(bulkRequests, bulkWriteOptions) + mongoCollection.bulkWrite(clientSession, bulkRequests) + mongoCollection.bulkWrite(clientSession, bulkRequests, bulkWriteOptions) + + verify(wrapped).bulkWrite(bulkRequests.asJava) + verify(wrapped).bulkWrite(bulkRequests.asJava, bulkWriteOptions) + verify(wrapped).bulkWrite(clientSession, bulkRequests.asJava) + verify(wrapped).bulkWrite(clientSession, bulkRequests.asJava, bulkWriteOptions) + } + + it should "wrap the underlying insertOne correctly" in { + val insertDoc = Document("a" -> 1) + val insertOptions = InsertOneOptions().bypassDocumentValidation(true) + + mongoCollection.insertOne(insertDoc) + mongoCollection.insertOne(insertDoc, insertOptions) + mongoCollection.insertOne(clientSession, insertDoc) + mongoCollection.insertOne(clientSession, insertDoc, insertOptions) + + verify(wrapped).insertOne(insertDoc) + verify(wrapped).insertOne(insertDoc, insertOptions) + verify(wrapped).insertOne(clientSession, insertDoc) + verify(wrapped).insertOne(clientSession, insertDoc, insertOptions) + } + + it should "wrap the underlying insertMany correctly" in { + val insertDocs = List(Document("a" -> 1)) + val insertOptions = new InsertManyOptions().ordered(false) + + mongoCollection.insertMany(insertDocs) + mongoCollection.insertMany(insertDocs, insertOptions) + mongoCollection.insertMany(clientSession, insertDocs) + mongoCollection.insertMany(clientSession, insertDocs, insertOptions) + + verify(wrapped).insertMany(insertDocs.asJava) + verify(wrapped).insertMany(insertDocs.asJava, insertOptions) + verify(wrapped).insertMany(clientSession, insertDocs.asJava) + verify(wrapped).insertMany(clientSession, insertDocs.asJava, insertOptions) + } + + it should "wrap the underlying deleteOne correctly" in { + val options = new DeleteOptions().collation(collation) + + mongoCollection.deleteOne(filter) + mongoCollection.deleteOne(filter, options) + mongoCollection.deleteOne(clientSession, filter) + mongoCollection.deleteOne(clientSession, filter, options) + + verify(wrapped).deleteOne(filter) + verify(wrapped).deleteOne(filter, options) + verify(wrapped).deleteOne(clientSession, filter) + verify(wrapped).deleteOne(clientSession, filter, options) + } + + it should "wrap the underlying deleteMany correctly" in { + val options = new DeleteOptions().collation(collation) + mongoCollection.deleteMany(filter) + mongoCollection.deleteMany(filter, options) + mongoCollection.deleteMany(clientSession, filter) + mongoCollection.deleteMany(clientSession, filter, options) + + verify(wrapped).deleteMany(filter) + verify(wrapped).deleteMany(filter, options) + verify(wrapped).deleteMany(clientSession, filter) + verify(wrapped).deleteMany(clientSession, filter, options) + } + + it should "wrap the underlying replaceOne correctly" in { + val replacement = Document("a" -> 1) + val replaceOptions = new ReplaceOptions().upsert(true) + + mongoCollection.replaceOne(filter, replacement) + mongoCollection.replaceOne(filter, replacement, replaceOptions) + mongoCollection.replaceOne(clientSession, filter, replacement) + mongoCollection.replaceOne(clientSession, filter, replacement, replaceOptions) + + verify(wrapped).replaceOne(filter, replacement) + verify(wrapped).replaceOne(filter, replacement, replaceOptions) + verify(wrapped).replaceOne(clientSession, filter, replacement) + verify(wrapped).replaceOne(clientSession, filter, replacement, replaceOptions) + } + + it should "wrap the underlying updateOne correctly" in { + val update = Document("$set" -> Document("a" -> 2)) + val pipeline = Seq(update) + val updateOptions = new UpdateOptions().upsert(true) + + mongoCollection.updateOne(filter, update) + mongoCollection.updateOne(filter, update, updateOptions) + mongoCollection.updateOne(clientSession, filter, update) + mongoCollection.updateOne(clientSession, filter, update, updateOptions) + + mongoCollection.updateOne(filter, pipeline) + mongoCollection.updateOne(filter, pipeline, updateOptions) + mongoCollection.updateOne(clientSession, filter, pipeline) + mongoCollection.updateOne(clientSession, filter, pipeline, updateOptions) + + verify(wrapped).updateOne(filter, update) + verify(wrapped).updateOne(filter, update, updateOptions) + verify(wrapped).updateOne(clientSession, filter, update) + verify(wrapped).updateOne(clientSession, filter, update, updateOptions) + + verify(wrapped).updateOne(filter, pipeline.asJava) + verify(wrapped).updateOne(filter, pipeline.asJava, updateOptions) + verify(wrapped).updateOne(clientSession, filter, pipeline.asJava) + verify(wrapped).updateOne(clientSession, filter, pipeline.asJava, updateOptions) + } + + it should "wrap the underlying updateMany correctly" in { + val update = Document("$set" -> Document("a" -> 2)) + val pipeline = Seq(update) + val updateOptions = new UpdateOptions().upsert(true) + + mongoCollection.updateMany(filter, update) + mongoCollection.updateMany(filter, update, updateOptions) + mongoCollection.updateMany(clientSession, filter, update) + mongoCollection.updateMany(clientSession, filter, update, updateOptions) + + mongoCollection.updateMany(filter, pipeline) + mongoCollection.updateMany(filter, pipeline, updateOptions) + mongoCollection.updateMany(clientSession, filter, pipeline) + mongoCollection.updateMany(clientSession, filter, pipeline, updateOptions) + + verify(wrapped).updateMany(filter, update) + verify(wrapped).updateMany(filter, update, updateOptions) + verify(wrapped).updateMany(clientSession, filter, update) + verify(wrapped).updateMany(clientSession, filter, update, updateOptions) + + verify(wrapped).updateMany(filter, pipeline.asJava) + verify(wrapped).updateMany(filter, pipeline.asJava, updateOptions) + verify(wrapped).updateMany(clientSession, filter, pipeline.asJava) + verify(wrapped).updateMany(clientSession, filter, pipeline.asJava, updateOptions) + } + + it should "wrap the underlying findOneAndDelete correctly" in { + val options = new FindOneAndDeleteOptions().sort(Document("sort" -> 1)) + + mongoCollection.findOneAndDelete(filter) + mongoCollection.findOneAndDelete(filter, options) + mongoCollection.findOneAndDelete(clientSession, filter) + mongoCollection.findOneAndDelete(clientSession, filter, options) + + verify(wrapped).findOneAndDelete(filter) + verify(wrapped).findOneAndDelete(filter, options) + verify(wrapped).findOneAndDelete(clientSession, filter) + verify(wrapped).findOneAndDelete(clientSession, filter, options) + } + + it should "wrap the underlying findOneAndReplace correctly" in { + val replacement = Document("a" -> 2) + val options = new FindOneAndReplaceOptions().sort(Document("sort" -> 1)) + + mongoCollection.findOneAndReplace(filter, replacement) + mongoCollection.findOneAndReplace(filter, replacement, options) + mongoCollection.findOneAndReplace(clientSession, filter, replacement) + mongoCollection.findOneAndReplace(clientSession, filter, replacement, options) + + verify(wrapped).findOneAndReplace(filter, replacement) + verify(wrapped).findOneAndReplace(filter, replacement, options) + verify(wrapped).findOneAndReplace(clientSession, filter, replacement) + verify(wrapped).findOneAndReplace(clientSession, filter, replacement, options) + } + + it should "wrap the underlying findOneAndUpdate correctly" in { + val update = Document("a" -> 2) + val pipeline = Seq(update) + val options = new FindOneAndUpdateOptions().sort(Document("sort" -> 1)) + + mongoCollection.findOneAndUpdate(filter, update) + mongoCollection.findOneAndUpdate(filter, update, options) + mongoCollection.findOneAndUpdate(clientSession, filter, update) + mongoCollection.findOneAndUpdate(clientSession, filter, update, options) + + mongoCollection.findOneAndUpdate(filter, pipeline) + mongoCollection.findOneAndUpdate(filter, pipeline, options) + mongoCollection.findOneAndUpdate(clientSession, filter, pipeline) + mongoCollection.findOneAndUpdate(clientSession, filter, pipeline, options) + + verify(wrapped).findOneAndUpdate(filter, update) + verify(wrapped).findOneAndUpdate(filter, update, options) + verify(wrapped).findOneAndUpdate(clientSession, filter, update) + verify(wrapped).findOneAndUpdate(clientSession, filter, update, options) + + verify(wrapped).findOneAndUpdate(filter, pipeline.asJava) + verify(wrapped).findOneAndUpdate(filter, pipeline.asJava, options) + verify(wrapped).findOneAndUpdate(clientSession, filter, pipeline.asJava) + verify(wrapped).findOneAndUpdate(clientSession, filter, pipeline.asJava, options) + } + + it should "wrap the underlying drop correctly" in { + mongoCollection.drop() + mongoCollection.drop(clientSession) + + verify(wrapped).drop() + verify(wrapped).drop(clientSession) + } + + it should "wrap the underlying createIndex correctly" in { + val index = Document("a" -> 1) + val options = new IndexOptions().background(true) + + mongoCollection.createIndex(index) + mongoCollection.createIndex(index, options) + mongoCollection.createIndex(clientSession, index) + mongoCollection.createIndex(clientSession, index, options) + + verify(wrapped).createIndex(index) + verify(wrapped).createIndex(index, options) + verify(wrapped).createIndex(clientSession, index) + verify(wrapped).createIndex(clientSession, index, options) + } + + it should "wrap the underlying createIndexes correctly" in { + val indexes = new IndexModel(Document("a" -> 1)) + val options = new CreateIndexOptions() + + mongoCollection.createIndexes(List(indexes)) + mongoCollection.createIndexes(List(indexes), options) + mongoCollection.createIndexes(clientSession, List(indexes)) + mongoCollection.createIndexes(clientSession, List(indexes), options) + + verify(wrapped).createIndexes(List(indexes).asJava) + verify(wrapped).createIndexes(List(indexes).asJava, options) + verify(wrapped).createIndexes(clientSession, List(indexes).asJava) + verify(wrapped).createIndexes(clientSession, List(indexes).asJava, options) + } + + it should "wrap the underlying listIndexes correctly" in { + mongoCollection.listIndexes() + mongoCollection.listIndexes[BsonDocument]() + mongoCollection.listIndexes(clientSession) + mongoCollection.listIndexes[BsonDocument](clientSession) + + verify(wrapped).listIndexes(classOf[Document]) + verify(wrapped).listIndexes(classOf[BsonDocument]) + verify(wrapped).listIndexes(clientSession, classOf[Document]) + verify(wrapped).listIndexes(clientSession, classOf[BsonDocument]) + } + + it should "wrap the underlying dropIndex correctly" in { + val indexDocument = Document("""{a: 1}""") + val options = new DropIndexOptions() + + mongoCollection.dropIndex("indexName") + mongoCollection.dropIndex(indexDocument) + mongoCollection.dropIndex("indexName", options) + mongoCollection.dropIndex(indexDocument, options) + mongoCollection.dropIndex(clientSession, "indexName") + mongoCollection.dropIndex(clientSession, indexDocument) + mongoCollection.dropIndex(clientSession, "indexName", options) + mongoCollection.dropIndex(clientSession, indexDocument, options) + + verify(wrapped).dropIndex("indexName") + verify(wrapped).dropIndex(indexDocument) + verify(wrapped).dropIndex("indexName", options) + verify(wrapped).dropIndex(indexDocument, options) + verify(wrapped).dropIndex(clientSession, "indexName") + verify(wrapped).dropIndex(clientSession, indexDocument) + verify(wrapped).dropIndex(clientSession, "indexName", options) + verify(wrapped).dropIndex(clientSession, indexDocument, options) + } + + it should "wrap the underlying dropIndexes correctly" in { + val options = new DropIndexOptions() + + mongoCollection.dropIndexes() + mongoCollection.dropIndexes(options) + mongoCollection.dropIndexes(clientSession) + mongoCollection.dropIndexes(clientSession, options) + + verify(wrapped).dropIndexes() + verify(wrapped).dropIndexes(options) + verify(wrapped).dropIndexes(clientSession) + verify(wrapped).dropIndexes(clientSession, options) + } + + it should "wrap the underlying renameCollection correctly" in { + val newNamespace = new MongoNamespace("db", "coll") + val options = new RenameCollectionOptions() + + mongoCollection.renameCollection(newNamespace) + mongoCollection.renameCollection(newNamespace, options) + mongoCollection.renameCollection(clientSession, newNamespace) + mongoCollection.renameCollection(clientSession, newNamespace, options) + + verify(wrapped).renameCollection(newNamespace) + verify(wrapped).renameCollection(newNamespace, options) + verify(wrapped).renameCollection(clientSession, newNamespace) + verify(wrapped).renameCollection(clientSession, newNamespace, options) + } + + it should "wrap the underlying ChangeStreamPublisher correctly" in { + val pipeline = List(Document("$match" -> 1)) + + mongoCollection.watch() shouldBe a[ChangeStreamObservable[_]] + mongoCollection.watch[BsonDocument]() shouldBe a[ChangeStreamObservable[_]] + mongoCollection.watch(pipeline) shouldBe a[ChangeStreamObservable[_]] + mongoCollection.watch[BsonDocument](pipeline) shouldBe a[ChangeStreamObservable[_]] + mongoCollection.watch(clientSession) shouldBe a[ChangeStreamObservable[_]] + mongoCollection.watch[BsonDocument](clientSession) shouldBe a[ChangeStreamObservable[_]] + mongoCollection.watch(clientSession, pipeline) shouldBe a[ChangeStreamObservable[_]] + mongoCollection.watch[BsonDocument](clientSession, pipeline) shouldBe a[ChangeStreamObservable[_]] + + verify(wrapped).watch(classOf[Document]) + verify(wrapped).watch(classOf[BsonDocument]) + verify(wrapped).watch(pipeline.asJava, classOf[Document]) + verify(wrapped).watch(pipeline.asJava, classOf[BsonDocument]) + verify(wrapped).watch(clientSession, classOf[Document]) + verify(wrapped).watch(clientSession, classOf[BsonDocument]) + verify(wrapped).watch(clientSession, pipeline.asJava, classOf[Document]) + verify(wrapped).watch(clientSession, pipeline.asJava, classOf[BsonDocument]) + } + +} diff --git a/driver-scala/src/test/scala/org/mongodb/scala/MongoCredentialSpec.scala b/driver-scala/src/test/scala/org/mongodb/scala/MongoCredentialSpec.scala new file mode 100644 index 00000000000..5c4d228f2df --- /dev/null +++ b/driver-scala/src/test/scala/org/mongodb/scala/MongoCredentialSpec.scala @@ -0,0 +1,61 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala + +import com.mongodb.{ MongoCredential => JMongoCredential } + +class MongoCredentialSpec extends BaseSpec { + + "MongoCredential" should "have the same methods as the wrapped MongoClient" in { + val wrapped = classOf[JMongoCredential].getMethods.map(_.getName) + val local = classOf[MongoCredential].getMethods.map(_.getName) + + wrapped.foreach((name: String) => { + val cleanedName = name.stripPrefix("get") + assert(local.contains(name) || local.contains(cleanedName.head.toLower + cleanedName.tail), s"Missing: $name") + }) + } + + it should "create the expected credential" in { + MongoCredential.createCredential("user", "source", "pass".toCharArray) should + equal(JMongoCredential.createCredential("user", "source", "pass".toCharArray)) + } + + it should "create the expected createScramSha1Credential" in { + MongoCredential.createScramSha1Credential("user", "source", "pass".toCharArray) should + equal(JMongoCredential.createScramSha1Credential("user", "source", "pass".toCharArray)) + } + + it should "create the expected createScramSha256Credential" in { + MongoCredential.createScramSha256Credential("user", "source", "pass".toCharArray) should + equal(JMongoCredential.createScramSha256Credential("user", "source", "pass".toCharArray)) + } + + it should "create the expected createMongoX509Credential" in { + MongoCredential.createMongoX509Credential() should equal(JMongoCredential.createMongoX509Credential()) + MongoCredential.createMongoX509Credential("user") should equal(JMongoCredential.createMongoX509Credential("user")) + } + + it should "create the expected createPlainCredential" in { + MongoCredential.createPlainCredential("user", "source", "pass".toCharArray) should + equal(JMongoCredential.createPlainCredential("user", "source", "pass".toCharArray)) + } + + it should "create the expected createGSSAPICredential" in { + MongoCredential.createGSSAPICredential("user") should equal(JMongoCredential.createGSSAPICredential("user")) + } +} diff --git a/driver-scala/src/test/scala/org/mongodb/scala/MongoDatabaseSpec.scala b/driver-scala/src/test/scala/org/mongodb/scala/MongoDatabaseSpec.scala new file mode 100644 index 00000000000..1e48aed6204 --- /dev/null +++ b/driver-scala/src/test/scala/org/mongodb/scala/MongoDatabaseSpec.scala @@ -0,0 +1,235 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala + +import scala.collection.JavaConverters._ + +import org.bson.BsonDocument +import org.bson.codecs.BsonValueCodecProvider +import org.bson.codecs.configuration.CodecRegistries.fromProviders +import com.mongodb.reactivestreams.client.{ ListCollectionsPublisher, MongoDatabase => JMongoDatabase } +import org.mockito.Mockito.{ verify, when } +import org.mongodb.scala.bson.conversions.Bson +import org.mongodb.scala.model._ +import org.scalatestplus.mockito.MockitoSugar + +class MongoDatabaseSpec extends BaseSpec with MockitoSugar { + + val wrapped = mock[JMongoDatabase] + val clientSession = mock[ClientSession] + val mongoDatabase = MongoDatabase(wrapped) + val command = Document() + val readPreference: ReadPreference = ReadPreference.secondary() + + "MongoDatabase" should "have the same methods as the wrapped MongoDatabase" in { + val wrapped = classOf[JMongoDatabase].getMethods.map(_.getName) + + val local = classOf[MongoDatabase].getMethods.map(_.getName) + + wrapped.foreach((name: String) => { + val cleanedName = name.stripPrefix("get") + assert(local.contains(name) || local.contains(cleanedName.head.toLower + cleanedName.tail), s"Missing: $name") + }) + } + + it should "return the underlying getCollection[T]" in { + mongoDatabase.getCollection("collectionName") + mongoDatabase.getCollection[BsonDocument]("collectionName") + + verify(wrapped).getCollection("collectionName", classOf[Document]) + verify(wrapped).getCollection("collectionName", classOf[BsonDocument]) + } + + it should "return the underlying getName" in { + mongoDatabase.name + + verify(wrapped).getName + } + + it should "return the underlying getCodecRegistry" in { + mongoDatabase.codecRegistry + + verify(wrapped).getCodecRegistry + } + + it should "return the underlying getReadPreference" in { + mongoDatabase.readPreference + + verify(wrapped).getReadPreference + } + + it should "return the underlying getWriteConcern" in { + mongoDatabase.writeConcern + + verify(wrapped).getWriteConcern + } + + it should "return the underlying getReadConcern" in { + mongoDatabase.readConcern + + verify(wrapped).getReadConcern + } + + it should "return the underlying withCodecRegistry" in { + val codecRegistry = fromProviders(new BsonValueCodecProvider()) + + mongoDatabase.withCodecRegistry(codecRegistry) + + verify(wrapped).withCodecRegistry(codecRegistry) + } + + it should "return the underlying withReadPreference" in { + mongoDatabase.withReadPreference(readPreference) + + verify(wrapped).withReadPreference(readPreference) + } + + it should "return the underlying withWriteConcern" in { + val writeConcern = WriteConcern.MAJORITY + mongoDatabase.withWriteConcern(writeConcern) + + verify(wrapped).withWriteConcern(writeConcern) + } + + it should "return the underlying withReadConcern" in { + val readConcern = ReadConcern.MAJORITY + mongoDatabase.withReadConcern(readConcern) + + verify(wrapped).withReadConcern(readConcern) + } + + it should "call the underlying runCommand[T] when writing" in { + mongoDatabase.runCommand(command) + mongoDatabase.runCommand[BsonDocument](command) + mongoDatabase.runCommand(clientSession, command) + mongoDatabase.runCommand[BsonDocument](clientSession, command) + + verify(wrapped).runCommand(command, classOf[Document]) + verify(wrapped).runCommand(command, classOf[BsonDocument]) + verify(wrapped).runCommand(clientSession, command, classOf[Document]) + verify(wrapped).runCommand(clientSession, command, classOf[BsonDocument]) + } + + it should "call the underlying runCommand[T] when reading" in { + mongoDatabase.runCommand(command, readPreference) + mongoDatabase.runCommand[BsonDocument](command, readPreference) + mongoDatabase.runCommand(clientSession, command, readPreference) + mongoDatabase.runCommand[BsonDocument](clientSession, command, readPreference) + + verify(wrapped).runCommand(command, readPreference, classOf[Document]) + verify(wrapped).runCommand(command, readPreference, classOf[BsonDocument]) + verify(wrapped).runCommand(clientSession, command, readPreference, classOf[Document]) + verify(wrapped).runCommand(clientSession, command, readPreference, classOf[BsonDocument]) + } + + it should "call the underlying drop()" in { + mongoDatabase.drop() + mongoDatabase.drop(clientSession) + + verify(wrapped).drop() + verify(wrapped).drop(clientSession) + } + + it should "call the underlying listCollectionNames()" in { + mongoDatabase.listCollectionNames() + mongoDatabase.listCollectionNames(clientSession) + + verify(wrapped).listCollectionNames() + verify(wrapped).listCollectionNames(clientSession) + } + + it should "call the underlying listCollections()" in { + when(wrapped.listCollections()).thenReturn(mock[ListCollectionsPublisher[org.bson.Document]]) + when(wrapped.listCollections(classOf[BsonDocument])).thenReturn(mock[ListCollectionsPublisher[BsonDocument]]) + when(wrapped.listCollections(clientSession)).thenReturn(mock[ListCollectionsPublisher[org.bson.Document]]) + when(wrapped.listCollections(clientSession, classOf[BsonDocument])) + .thenReturn(mock[ListCollectionsPublisher[BsonDocument]]) + + mongoDatabase.listCollections() + mongoDatabase.listCollections[BsonDocument]() + mongoDatabase.listCollections(clientSession) + mongoDatabase.listCollections[BsonDocument](clientSession) + } + + it should "call the underlying createCollection()" in { + val options = CreateCollectionOptions() + .capped(true) + .validationOptions( + ValidationOptions() + .validator(Document("""{level: {$gte: 10}}""")) + .validationLevel(ValidationLevel.MODERATE) + .validationAction(ValidationAction.WARN) + ) + .indexOptionDefaults(IndexOptionDefaults().storageEngine(Document("""{storageEngine: { mmapv1: {}}}"""))) + .storageEngineOptions(Document("""{ wiredTiger: {}}""")) + + mongoDatabase.createCollection("collectionName") + mongoDatabase.createCollection("collectionName", options) + mongoDatabase.createCollection(clientSession, "collectionName") + mongoDatabase.createCollection(clientSession, "collectionName", options) + + verify(wrapped).createCollection("collectionName") + verify(wrapped).createCollection("collectionName", options) + verify(wrapped).createCollection(clientSession, "collectionName") + verify(wrapped).createCollection(clientSession, "collectionName", options) + } + + it should "call the underlying createView()" in { + val options = CreateViewOptions().collation(Collation.builder().locale("en").build()) + val pipeline = List.empty[Bson] + + mongoDatabase.createView("viewName", "collectionName", pipeline) + mongoDatabase.createView("viewName", "collectionName", pipeline, options) + mongoDatabase.createView(clientSession, "viewName", "collectionName", pipeline) + mongoDatabase.createView(clientSession, "viewName", "collectionName", pipeline, options) + + verify(wrapped).createView("viewName", "collectionName", pipeline.asJava) + verify(wrapped).createView("viewName", "collectionName", pipeline.asJava, options) + verify(wrapped).createView(clientSession, "viewName", "collectionName", pipeline.asJava) + verify(wrapped).createView(clientSession, "viewName", "collectionName", pipeline.asJava, options) + } + + it should "call the underlying watch" in { + val pipeline = List(Document("$match" -> 1)) + + mongoDatabase.watch() shouldBe a[ChangeStreamObservable[_]] + mongoDatabase.watch(pipeline) shouldBe a[ChangeStreamObservable[_]] + mongoDatabase.watch[BsonDocument](pipeline) shouldBe a[ChangeStreamObservable[_]] + mongoDatabase.watch(clientSession, pipeline) shouldBe a[ChangeStreamObservable[_]] + mongoDatabase.watch[BsonDocument](clientSession, pipeline) shouldBe a[ChangeStreamObservable[_]] + + verify(wrapped).watch(classOf[Document]) + verify(wrapped).watch(pipeline.asJava, classOf[Document]) + verify(wrapped).watch(pipeline.asJava, classOf[BsonDocument]) + verify(wrapped).watch(clientSession, pipeline.asJava, classOf[Document]) + verify(wrapped).watch(clientSession, pipeline.asJava, classOf[BsonDocument]) + } + + it should "call the underlying aggregate" in { + val pipeline = List(Document("$match" -> 1)) + + mongoDatabase.aggregate(pipeline) shouldBe a[AggregateObservable[_]] + mongoDatabase.aggregate[BsonDocument](pipeline) shouldBe a[AggregateObservable[_]] + mongoDatabase.aggregate(clientSession, pipeline) shouldBe a[AggregateObservable[_]] + mongoDatabase.aggregate[BsonDocument](clientSession, pipeline) shouldBe a[AggregateObservable[_]] + + verify(wrapped).aggregate(pipeline.asJava, classOf[Document]) + verify(wrapped).aggregate(pipeline.asJava, classOf[BsonDocument]) + verify(wrapped).aggregate(clientSession, pipeline.asJava, classOf[Document]) + verify(wrapped).aggregate(clientSession, pipeline.asJava, classOf[BsonDocument]) + } +} diff --git a/driver-scala/src/test/scala/org/mongodb/scala/MongoDriverInformationSpec.scala b/driver-scala/src/test/scala/org/mongodb/scala/MongoDriverInformationSpec.scala new file mode 100644 index 00000000000..a1ca4419ceb --- /dev/null +++ b/driver-scala/src/test/scala/org/mongodb/scala/MongoDriverInformationSpec.scala @@ -0,0 +1,48 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala + +import java.lang.reflect.Modifier.isStatic + +class MongoDriverInformationSpec extends BaseSpec { + + "MongoDriverInformation" should "have the same static fields as the wrapped MongoDriverInformation" in { + val MongoDriverInformationClass: Class[MongoDriverInformation] = classOf[com.mongodb.MongoDriverInformation] + val wrappedFields = + MongoDriverInformationClass.getDeclaredFields.filter(f => isStatic(f.getModifiers)).map(_.getName).toSet + val wrappedMethods = + MongoDriverInformationClass.getDeclaredMethods.filter(f => isStatic(f.getModifiers)).map(_.getName).toSet + val exclusions = Set("$VALUES", "$values", "valueOf", "values", "access$200") + + val wrapped = (wrappedFields ++ wrappedMethods) -- exclusions + val local = MongoDriverInformation.getClass.getDeclaredMethods.map(_.getName).toSet -- Set( + "apply", + "$deserializeLambda$", + "$anonfun$fromString$1" + ) + + local should equal(wrapped) + } + + it should "return the underlying builder" in { + MongoDriverInformation.builder().getClass should equal(classOf[com.mongodb.MongoDriverInformation.Builder]) + MongoDriverInformation.builder(MongoDriverInformation.builder().build()).getClass should equal( + classOf[com.mongodb.MongoDriverInformation.Builder] + ) + } + +} diff --git a/driver-scala/src/test/scala/org/mongodb/scala/ObservableImplicitsToGridFSUploadPublisherUnitSpec.scala b/driver-scala/src/test/scala/org/mongodb/scala/ObservableImplicitsToGridFSUploadPublisherUnitSpec.scala new file mode 100644 index 00000000000..21a5d049e04 --- /dev/null +++ b/driver-scala/src/test/scala/org/mongodb/scala/ObservableImplicitsToGridFSUploadPublisherUnitSpec.scala @@ -0,0 +1,98 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.mongodb.scala + +import com.mongodb.reactivestreams.client.gridfs.GridFSUploadPublisher +import org.mongodb.scala.bson.{ BsonInt32, BsonValue, ObjectId } +import org.reactivestreams.Subscriber +import reactor.core.publisher.Mono + +class ObservableImplicitsToGridFSUploadPublisherUnitSpec extends BaseSpec { + it should "emit exactly one element" in { + var onNextCounter = 0 + VoidGridFSUploadPublisher().toObservable().subscribe((_: Void) => onNextCounter += 1) + onNextCounter shouldBe 0 + + onNextCounter = 0 + var errorActual: Option[Throwable] = None + var completed = false + toGridFSUploadPublisherUnit().subscribe( + (_: Unit) => onNextCounter += 1, + (error: Throwable) => errorActual = Some(error), + () => completed = true + ) + onNextCounter shouldBe 1 + errorActual shouldBe None + completed shouldBe true + } + + it should "signal the underlying error" in { + var onNextCounter = 0 + val errorExpected = Some(new Exception()) + var errorActual: Option[Throwable] = None + var completed = false + toGridFSUploadPublisherUnit(errorExpected).subscribe( + (_: Unit) => onNextCounter += 1, + (error: Throwable) => errorActual = Some(error), + () => completed = true + ) + onNextCounter shouldBe 0 + errorActual shouldBe errorExpected + completed shouldBe false + } + + it should "work with explicit request" in { + var onNextCounter = 0 + var errorActual: Option[Throwable] = None + var completed = false + toGridFSUploadPublisherUnit().subscribe(new Observer[Unit] { + override def onSubscribe(subscription: Subscription): Unit = subscription.request(1) + + override def onNext(result: Unit): Unit = onNextCounter += 1 + + override def onError(error: Throwable): Unit = errorActual = Some(error) + + override def onComplete(): Unit = completed = true + }) + onNextCounter shouldBe 1 + errorActual shouldBe None + completed shouldBe true + } + + def toGridFSUploadPublisherUnit(error: Option[Exception] = Option.empty): Observable[Unit] = { + gridfs.ToGridFSUploadPublisherUnit(VoidGridFSUploadPublisher(error)).toObservable() + } + + /** + * A [[GridFSUploadPublisher]] that emits no items. + */ + case class VoidGridFSUploadPublisher(error: Option[Exception] = Option.empty) extends GridFSUploadPublisher[Void] { + private val objectId = new ObjectId() + private val id = BsonInt32(0) + + override def getObjectId: ObjectId = objectId + + override def getId: BsonValue = id + + override def subscribe(subscriber: Subscriber[_ >: Void]): Unit = { + val mono = error match { + case Some(error) => Mono.error(error) + case None => Mono.empty() + } + mono.subscribe(subscriber) + } + } +} diff --git a/driver-scala/src/test/scala/org/mongodb/scala/ReadConcernLevelSpec.scala b/driver-scala/src/test/scala/org/mongodb/scala/ReadConcernLevelSpec.scala new file mode 100644 index 00000000000..9c113dea99a --- /dev/null +++ b/driver-scala/src/test/scala/org/mongodb/scala/ReadConcernLevelSpec.scala @@ -0,0 +1,70 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala + +import java.lang.reflect.Modifier._ + +import scala.util.{ Success, Try } + +import org.scalatest.prop.TableDrivenPropertyChecks._ + +class ReadConcernLevelSpec extends BaseSpec { + + "ReadConcernLevel" should "have the same static fields as the wrapped ReadConcern" in { + val wrappedFields = + classOf[com.mongodb.ReadConcernLevel].getDeclaredFields.filter(f => isStatic(f.getModifiers)).map(_.getName).toSet + val wrappedMethods = classOf[com.mongodb.ReadConcernLevel].getDeclaredMethods + .filter(f => isStatic(f.getModifiers)) + .map(_.getName) + .toSet + val exclusions = Set("$VALUES", "$values", "valueOf", "values") + + val wrapped = (wrappedFields ++ wrappedMethods) -- exclusions + val local = ReadConcernLevel.getClass.getDeclaredMethods.map(_.getName).toSet -- Set( + "apply", + "$deserializeLambda$", + "$anonfun$fromString$1" + ) + + local should equal(wrapped) + } + + it should "return the expected ReadConcerns" in { + forAll(readConcernLevels) { (stringValue: String, expectedValue: Try[ReadConcernLevel]) => + ReadConcernLevel.fromString(stringValue) should equal(expectedValue) + ReadConcernLevel.fromString(stringValue.toUpperCase()) should equal(expectedValue) + } + } + + it should "handle invalid strings" in { + forAll(invalidReadConcernLevels) { (stringValue: String) => + ReadConcernLevel.fromString(stringValue) should be a Symbol("failure") + } + } + + val readConcernLevels = + Table( + ("stringValue", "JavaValue"), + ("local", Success(ReadConcernLevel.LOCAL)), + ("linearizable", Success(ReadConcernLevel.LINEARIZABLE)), + ("majority", Success(ReadConcernLevel.MAJORITY)), + ("snapshot", Success(ReadConcernLevel.SNAPSHOT)), + ("available", Success(ReadConcernLevel.AVAILABLE)) + ) + + val invalidReadConcernLevels = Table("invalid strings", "all", "none") +} diff --git a/driver-scala/src/test/scala/org/mongodb/scala/ReadConcernSpec.scala b/driver-scala/src/test/scala/org/mongodb/scala/ReadConcernSpec.scala new file mode 100644 index 00000000000..3dea5a8a236 --- /dev/null +++ b/driver-scala/src/test/scala/org/mongodb/scala/ReadConcernSpec.scala @@ -0,0 +1,54 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala + +import java.lang.reflect.Modifier._ + +import com.mongodb.{ ReadConcern => JReadConcern } + +import org.scalatest.prop.TableDrivenPropertyChecks._ + +class ReadConcernSpec extends BaseSpec { + + "ReadConcern" should "have the same static fields as the wrapped ReadConcern" in { + val wrapped = + classOf[com.mongodb.ReadConcern].getDeclaredFields.filter(f => isStatic(f.getModifiers)).map(_.getName).toSet + val local = ReadConcern.getClass.getDeclaredMethods.map(_.getName).toSet -- Set( + "apply", + "$deserializeLambda$", + "$anonfun$fromString$1" + ) + + local should equal(wrapped) + } + + it should "return the expected ReadConcerns" in { + forAll(readConcerns) { (scalaValue: ReadConcern, javaValue: JReadConcern) => + scalaValue should equal(javaValue) + } + } + + val readConcerns = + Table( + ("ScalaValue", "JavaValue"), + (ReadConcern.DEFAULT, JReadConcern.DEFAULT), + (ReadConcern.LOCAL, JReadConcern.LOCAL), + (ReadConcern.LINEARIZABLE, JReadConcern.LINEARIZABLE), + (ReadConcern.MAJORITY, JReadConcern.MAJORITY), + (ReadConcern(ReadConcernLevel.LOCAL), JReadConcern.LOCAL) + ) +} diff --git a/driver-scala/src/test/scala/org/mongodb/scala/ReadPreferenceSpec.scala b/driver-scala/src/test/scala/org/mongodb/scala/ReadPreferenceSpec.scala new file mode 100644 index 00000000000..e6e9a762391 --- /dev/null +++ b/driver-scala/src/test/scala/org/mongodb/scala/ReadPreferenceSpec.scala @@ -0,0 +1,158 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala + +import java.lang.reflect.Modifier.isStatic +import java.util.concurrent.TimeUnit.SECONDS + +import scala.collection.JavaConverters._ +import scala.concurrent.duration.Duration + +class ReadPreferenceSpec extends BaseSpec { + + val duration = Duration("95 sec") + + "ReadPreference" should "have the same methods as the wrapped ReadPreference" in { + val wrapped = + classOf[com.mongodb.ReadPreference].getDeclaredMethods.filter(f => isStatic(f.getModifiers)).map(_.getName).toSet + val local = ReadPreference.getClass.getDeclaredMethods.map(_.getName).toSet + + local should equal(wrapped) + } + + it should "return the correct primary ReadPreferences" in { + val readPreference = ReadPreference.primary() + readPreference shouldBe com.mongodb.ReadPreference.primary() + } + + it should "return the correct primaryPreferred ReadPreferences" in { + val readPreference = ReadPreference.primaryPreferred() + readPreference shouldBe com.mongodb.ReadPreference.primaryPreferred() + + val readPreference1 = ReadPreference.primaryPreferred(duration) + readPreference1 shouldBe com.mongodb.ReadPreference.primaryPreferred(95, SECONDS) + + val readPreference2 = ReadPreference.primaryPreferred(TagSet()) + readPreference2 shouldBe com.mongodb.ReadPreference.primaryPreferred(TagSet()) + + val readPreference3 = ReadPreference.primaryPreferred(TagSet(), duration) + readPreference3 shouldBe com.mongodb.ReadPreference.primaryPreferred(TagSet(), 95, SECONDS) + + val readPreference4 = ReadPreference.primaryPreferred(TagSet(Tag("name", "value"))) + readPreference4 shouldBe com.mongodb.ReadPreference.primaryPreferred(TagSet(Tag("name", "value"))) + + val readPreference5 = ReadPreference.primaryPreferred(TagSet(Tag("name", "value")), duration) + readPreference5 shouldBe com.mongodb.ReadPreference.primaryPreferred(TagSet(Tag("name", "value")), 95, SECONDS) + + val readPreference6 = ReadPreference.primaryPreferred(List(TagSet(List(Tag("name", "value"))))) + readPreference6 shouldBe com.mongodb.ReadPreference + .primaryPreferred(List(TagSet(List(Tag("name", "value")))).asJava) + + val readPreference7 = ReadPreference.primaryPreferred(List(TagSet(List(Tag("name", "value")))), duration) + readPreference7 shouldBe com.mongodb.ReadPreference + .primaryPreferred(List(TagSet(List(Tag("name", "value")))).asJava, 95, SECONDS) + } + + it should "return the correct secondary based ReadPreferences" in { + val readPreference = ReadPreference.secondary() + readPreference shouldBe com.mongodb.ReadPreference.secondary() + + val readPreference1 = ReadPreference.secondary(duration) + readPreference1 shouldBe com.mongodb.ReadPreference.secondary(95, SECONDS) + + val readPreference2 = ReadPreference.secondary(TagSet()) + readPreference2 shouldBe com.mongodb.ReadPreference.secondary(TagSet()) + + val readPreference3 = ReadPreference.secondary(TagSet(), duration) + readPreference3 shouldBe com.mongodb.ReadPreference.secondary(TagSet(), 95, SECONDS) + + val readPreference4 = ReadPreference.secondary(TagSet(Tag("name", "value"))) + readPreference4 shouldBe com.mongodb.ReadPreference.secondary(TagSet(Tag("name", "value"))) + + val readPreference5 = ReadPreference.secondary(TagSet(Tag("name", "value")), duration) + readPreference5 shouldBe com.mongodb.ReadPreference.secondary(TagSet(Tag("name", "value")), 95, SECONDS) + + val readPreference6 = ReadPreference.secondary(List(TagSet(List(Tag("name", "value"))))) + readPreference6 shouldBe com.mongodb.ReadPreference.secondary(List(TagSet(List(Tag("name", "value")))).asJava) + + val readPreference7 = ReadPreference.secondary(List(TagSet(List(Tag("name", "value")))), duration) + readPreference7 shouldBe com.mongodb.ReadPreference + .secondary(List(TagSet(List(Tag("name", "value")))).asJava, 95, SECONDS) + } + + it should "return the correct secondaryPreferred based ReadPreferences" in { + val readPreference = ReadPreference.secondaryPreferred() + readPreference shouldBe com.mongodb.ReadPreference.secondaryPreferred() + + val readPreference1 = ReadPreference.secondaryPreferred(duration) + readPreference1 shouldBe com.mongodb.ReadPreference.secondaryPreferred(95, SECONDS) + + val readPreference2 = ReadPreference.secondaryPreferred(TagSet()) + readPreference2 shouldBe com.mongodb.ReadPreference.secondaryPreferred(TagSet()) + + val readPreference3 = ReadPreference.secondaryPreferred(TagSet(), duration) + readPreference3 shouldBe com.mongodb.ReadPreference.secondaryPreferred(TagSet(), 95, SECONDS) + + val readPreference4 = ReadPreference.secondaryPreferred(TagSet(Tag("name", "value"))) + readPreference4 shouldBe com.mongodb.ReadPreference.secondaryPreferred(TagSet(Tag("name", "value"))) + + val readPreference5 = ReadPreference.secondaryPreferred(TagSet(Tag("name", "value")), duration) + readPreference5 shouldBe com.mongodb.ReadPreference.secondaryPreferred(TagSet(Tag("name", "value")), 95, SECONDS) + + val readPreference6 = ReadPreference.secondaryPreferred(List(TagSet(List(Tag("name", "value"))))) + readPreference6 shouldBe com.mongodb.ReadPreference + .secondaryPreferred(List(TagSet(List(Tag("name", "value")))).asJava) + + val readPreference7 = ReadPreference.secondaryPreferred(List(TagSet(List(Tag("name", "value")))), duration) + readPreference7 shouldBe com.mongodb.ReadPreference + .secondaryPreferred(List(TagSet(List(Tag("name", "value")))).asJava, 95, SECONDS) + } + + it should "return the correct nearest based ReadPreferences" in { + val readPreference = ReadPreference.nearest() + readPreference shouldBe com.mongodb.ReadPreference.nearest() + + val readPreference1 = ReadPreference.nearest(duration) + readPreference1 shouldBe com.mongodb.ReadPreference.nearest(95, SECONDS) + + val readPreference2 = ReadPreference.nearest(TagSet(Tag("name", "value"))) + readPreference2 shouldBe com.mongodb.ReadPreference.nearest(TagSet(Tag("name", "value"))) + + val readPreference3 = ReadPreference.nearest(TagSet(Tag("name", "value")), duration) + readPreference3 shouldBe com.mongodb.ReadPreference.nearest(TagSet(Tag("name", "value")), 95, SECONDS) + + val readPreference4 = ReadPreference.nearest(List(TagSet(List(Tag("name", "value"))))) + readPreference4 shouldBe com.mongodb.ReadPreference.nearest(List(TagSet(List(Tag("name", "value")))).asJava) + + val readPreference5 = ReadPreference.nearest(List(TagSet(List(Tag("name", "value")))), duration) + readPreference5 shouldBe com.mongodb.ReadPreference + .nearest(List(TagSet(List(Tag("name", "value")))).asJava, 95, SECONDS) + } + + it should "return the correct ReadPreference for valueOf" in { + val readPreference = ReadPreference.valueOf("Primary") + readPreference shouldBe com.mongodb.ReadPreference.primary() + + val readPreference2 = ReadPreference.valueOf("PrimaryPreferred", List(TagSet(Tag("name", "value")))) + readPreference2 shouldBe com.mongodb.ReadPreference.primaryPreferred(List(TagSet(Tag("name", "value"))).asJava) + + val readPreference3 = ReadPreference.valueOf("PrimaryPreferred", List(TagSet(Tag("name", "value"))), duration) + readPreference3 shouldBe com.mongodb.ReadPreference + .primaryPreferred(List(TagSet(Tag("name", "value"))).asJava, 95, SECONDS) + } + +} diff --git a/driver-scala/src/test/scala/org/mongodb/scala/ScalaPackageSpec.scala b/driver-scala/src/test/scala/org/mongodb/scala/ScalaPackageSpec.scala new file mode 100644 index 00000000000..19b1140e8f7 --- /dev/null +++ b/driver-scala/src/test/scala/org/mongodb/scala/ScalaPackageSpec.scala @@ -0,0 +1,171 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala + +import java.util.concurrent.TimeUnit +import _root_.scala.concurrent.duration.Duration +import com.mongodb.{ MongoCredential => JMongoCredential } +import org.bson.BsonDocumentWrapper +import org.mongodb.scala +import org.mongodb.scala.MongoClient.DEFAULT_CODEC_REGISTRY +import org.mongodb.scala.bson._ +import org.mongodb.scala.model._ + +class ScalaPackageSpec extends BaseSpec { + + it should "be able to create Observable, Observers and Subscriptions" in { + var success = false + val observerable = new Observable[Int] { + override def subscribe(observer: Observer[_ >: Int]): Unit = { + val subscription = new Subscription { + override def isUnsubscribed: Boolean = false + + override def request(l: Long): Unit = observer.onComplete() + + override def unsubscribe(): Unit = {} + } + + observer.onSubscribe(subscription) + } + } + val observer = new Observer[Int] { + override def onError(throwable: Throwable): Unit = {} + + override def onSubscribe(subscription: Subscription): Unit = subscription.request(1) + + override def onComplete(): Unit = success = true + + override def onNext(tResult: Int): Unit = {} + } + + observerable.subscribe(observer) + + success shouldBe true + } + + it should "be able to create MongoClientSettings" in { + val settings = scala.MongoClientSettings.builder().readPreference(ReadPreference.primary()).build() + settings shouldBe a[com.mongodb.MongoClientSettings] + } + + it should "be able to create Documents" in { + val doc = Document("a" -> BsonString("1")) + val doc2 = collection.Document("a" -> BsonString("1")) + + doc shouldBe a[collection.immutable.Document] + doc should equal(doc2) + } + + it should "be able to create BulkWriteOptions" in { + val options = BulkWriteOptions() + options shouldBe a[com.mongodb.client.model.BulkWriteOptions] + } + + it should "be able to create MongoNamespace" in { + val namespace = MongoNamespace("db.coll") + namespace shouldBe a[com.mongodb.MongoNamespace] + + val namespace2 = MongoNamespace("db", "coll") + namespace2 shouldBe a[com.mongodb.MongoNamespace] + } + + it should "be able to create WriteConcern" in { + WriteConcern.ACKNOWLEDGED should equal(com.mongodb.WriteConcern.ACKNOWLEDGED) + + WriteConcern.W1 should equal(new com.mongodb.WriteConcern(1)) + + WriteConcern.W2 should equal(new com.mongodb.WriteConcern(2)) + + WriteConcern.W3 should equal(new com.mongodb.WriteConcern(3)) + + WriteConcern.UNACKNOWLEDGED should equal(com.mongodb.WriteConcern.UNACKNOWLEDGED) + + WriteConcern.JOURNALED should equal(com.mongodb.WriteConcern.JOURNALED) + + WriteConcern.MAJORITY should equal(com.mongodb.WriteConcern.MAJORITY) + + WriteConcern(1) should equal(new com.mongodb.WriteConcern(1)) + + WriteConcern("Majority") should equal(new com.mongodb.WriteConcern("Majority")) + + WriteConcern(1).withJournal(true) should equal(new com.mongodb.WriteConcern(1).withJournal(true)) + + WriteConcern("Majority").withWTimeout(Duration(10, TimeUnit.MILLISECONDS)) should equal( + new com.mongodb.WriteConcern("Majority").withWTimeout(10, TimeUnit.MILLISECONDS) + ) + + WriteConcern(1).withWTimeout(Duration(10, TimeUnit.MILLISECONDS)) should equal( + new com.mongodb.WriteConcern(1).withWTimeout(10, TimeUnit.MILLISECONDS) + ) + } + + it should "create MongoCredential" in { + + val scalaCredential = MongoCredential.createCredential("userName", "database", "password".toCharArray) + val javaCredential = JMongoCredential.createCredential("userName", "database", "password".toCharArray) + scalaCredential should equal(javaCredential) + + val scalaCredential1 = MongoCredential.createScramSha1Credential("userName", "database", "password".toCharArray) + val javaCredential1 = JMongoCredential.createScramSha1Credential("userName", "database", "password".toCharArray) + scalaCredential1 should equal(javaCredential1) + + val scalaCredential2 = MongoCredential.createMongoX509Credential("userName") + val javaCredential2 = JMongoCredential.createMongoX509Credential("userName") + scalaCredential2 should equal(javaCredential2) + + val scalaCredential3 = MongoCredential.createMongoX509Credential() + val javaCredential3 = JMongoCredential.createMongoX509Credential() + scalaCredential3 should equal(javaCredential3) + + val scalaCredential4 = MongoCredential.createPlainCredential("userName", "database", "password".toCharArray) + val javaCredential4 = JMongoCredential.createPlainCredential("userName", "database", "password".toCharArray) + scalaCredential4 should equal(javaCredential4) + + val scalaCredential5 = MongoCredential.createGSSAPICredential("userName") + val javaCredential5 = JMongoCredential.createGSSAPICredential("userName") + scalaCredential5 should equal(javaCredential5) + } + + it should "implicitly convert to org.bson.document with type fidelity" in { + + val bsonDocument = Document( + "null" -> BsonNull(), + "int32" -> BsonInt32(32), + "int64" -> BsonInt64(Long.MaxValue), + "decimal128" -> BsonDecimal128(128.1), + "boolean" -> BsonBoolean(true), + "date" -> BsonDateTime(123456789), + "double" -> BsonDouble(1.1), + "string" -> BsonString("String"), + "minKey" -> BsonMinKey(), + "maxKey" -> BsonMaxKey(), + "javaScript" -> BsonJavaScript("function () {}"), + "objectId" -> BsonObjectId(), + "codeWithScope" -> BsonJavaScriptWithScope("function () {}", Document()), + "regex" -> BsonRegularExpression("/(.*)/"), + "symbol" -> BsonSymbol(Symbol("sym")), + "timestamp" -> BsonTimestamp(), + "undefined" -> BsonUndefined(), + "binary" -> BsonBinary(Array[Byte](128.toByte)), + "array" -> BsonArray(List("a", "b", "c")), + "document" -> Document("a" -> 1, "b" -> List(1, 2, 3)) + ) + + val document: org.bson.Document = bsonDocument + BsonDocumentWrapper.asBsonDocument(document, DEFAULT_CODEC_REGISTRY) should equal(bsonDocument.underlying) + } +} diff --git a/driver-scala/src/test/scala/org/mongodb/scala/connection/ConnectionSpec.scala b/driver-scala/src/test/scala/org/mongodb/scala/connection/ConnectionSpec.scala new file mode 100644 index 00000000000..9db7968fae7 --- /dev/null +++ b/driver-scala/src/test/scala/org/mongodb/scala/connection/ConnectionSpec.scala @@ -0,0 +1,99 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala.connection + +import com.mongodb.{ ServerAddress => JServerAddress } +import org.mongodb.scala.{ BaseSpec, ServerAddress } +import org.scalatestplus.mockito.MockitoSugar + +import java.net.{ InetAddress, InetSocketAddress } +import scala.collection.JavaConverters._ + +class ConnectionSpec extends BaseSpec with MockitoSugar { + + it should "have a ClusterSettings companion" in { + val scalaSetting = ClusterSettings.builder().hosts(List(ServerAddress()).asJava).build() + val javaSetting = com.mongodb.connection.ClusterSettings.builder().hosts(List(ServerAddress()).asJava).build() + + scalaSetting shouldBe a[com.mongodb.connection.ClusterSettings] + scalaSetting should equal(javaSetting) + } + + it should "have a ConnectionPoolSettings companion" in { + val scalaSetting = ConnectionPoolSettings.builder.build() + val javaSetting = com.mongodb.connection.ConnectionPoolSettings.builder().build() + + scalaSetting shouldBe a[com.mongodb.connection.ConnectionPoolSettings] + scalaSetting should equal(javaSetting) + } + + it should "have a ServerSettings companion" in { + val scalaSetting = ServerSettings.builder.build() + val javaSetting = com.mongodb.connection.ServerSettings.builder().build() + + scalaSetting shouldBe a[com.mongodb.connection.ServerSettings] + scalaSetting should equal(javaSetting) + } + + it should "have a SocketSettings companion" in { + val scalaSetting = SocketSettings.builder.build() + val javaSetting = com.mongodb.connection.SocketSettings.builder().build() + + scalaSetting shouldBe a[com.mongodb.connection.SocketSettings] + scalaSetting should equal(javaSetting) + } + + it should "have a SslSettings companion" in { + val scalaSetting = SslSettings.builder.build() + val javaSetting = com.mongodb.connection.SslSettings.builder().build() + + scalaSetting shouldBe a[com.mongodb.connection.SslSettings] + scalaSetting should equal(javaSetting) + } + + it should "have a ServerAddress companion" in { + val scalaAddress = ServerAddress() + val javaAddress = new JServerAddress() + scalaAddress should equal(javaAddress) + + val scalaAddress1 = ServerAddress("localhost") + val javaAddress1 = new JServerAddress("localhost") + scalaAddress1 should equal(javaAddress1) + + val scalaAddress2 = ServerAddress("localhost") + val javaAddress2 = new JServerAddress("localhost") + scalaAddress2 should equal(javaAddress2) + + val inetAddress = InetAddress.getByName("localhost") + val scalaAddress3 = ServerAddress(inetAddress) + val javaAddress3 = new JServerAddress(inetAddress) + scalaAddress3 should equal(javaAddress3) + + val scalaAddress4 = ServerAddress(inetAddress, 27017) + val javaAddress4 = new JServerAddress(inetAddress, 27017) + scalaAddress4 should equal(javaAddress4) + + val inetSocketAddress = new InetSocketAddress(inetAddress, 27017) + val scalaAddress5 = ServerAddress(inetSocketAddress) + val javaAddress5 = new JServerAddress(inetSocketAddress) + scalaAddress5 should equal(javaAddress5) + + val scalaAddress6 = ServerAddress("localhost", 27017) + val javaAddress6 = new JServerAddress("localhost", 27017) + scalaAddress6 should equal(javaAddress6) + } +} diff --git a/driver-scala/src/test/scala/org/mongodb/scala/gridfs/GridFSBucketSpec.scala b/driver-scala/src/test/scala/org/mongodb/scala/gridfs/GridFSBucketSpec.scala new file mode 100644 index 00000000000..4a00157cad1 --- /dev/null +++ b/driver-scala/src/test/scala/org/mongodb/scala/gridfs/GridFSBucketSpec.scala @@ -0,0 +1,184 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala.gridfs + +import java.nio.ByteBuffer + +import com.mongodb.reactivestreams.client.gridfs.{ GridFSBucket => JGridFSBucket } +import org.mockito.Mockito.{ verify, when } +import org.mongodb.scala.bson.BsonObjectId +import org.mongodb.scala.bson.collection.immutable.Document +import org.mongodb.scala.{ BaseSpec, ClientSession, Observable, ReadConcern, ReadPreference, WriteConcern } +import org.scalatestplus.mockito.MockitoSugar + +class GridFSBucketSpec extends BaseSpec with MockitoSugar { + val wrapper = mock[JGridFSBucket] + val clientSession = mock[ClientSession] + val gridFSBucket = new GridFSBucket(wrapper) + + "GridFSBucket" should "have the same methods as the wrapped GridFSBucket" in { + val wrapped = classOf[JGridFSBucket].getMethods.map(_.getName).toSet + val local = classOf[GridFSBucket].getMethods.map(_.getName).toSet + + wrapped.foreach((name: String) => { + val cleanedName = name.stripPrefix("get").replace("Publisher", "Observable") + assert(local.contains(name) || local.contains(cleanedName.head.toLower + cleanedName.tail), s"Missing: $name") + }) + } + + it should "call the underlying methods to get bucket values" in { + gridFSBucket.bucketName + gridFSBucket.chunkSizeBytes + gridFSBucket.readConcern + gridFSBucket.readPreference + gridFSBucket.writeConcern + + verify(wrapper).getBucketName + when(wrapper.getChunkSizeBytes).thenReturn(1) + verify(wrapper).getReadConcern + verify(wrapper).getReadPreference + verify(wrapper).getWriteConcern + } + + it should "call the underlying methods to set bucket values" in { + val chunkSizeInBytes = 1024 * 1024 + val readConcern = ReadConcern.MAJORITY + val readPreference = ReadPreference.secondaryPreferred() + val writeConcern = WriteConcern.W2 + + gridFSBucket.withChunkSizeBytes(chunkSizeInBytes) + gridFSBucket.withReadConcern(readConcern) + gridFSBucket.withReadPreference(readPreference) + gridFSBucket.withWriteConcern(writeConcern) + + verify(wrapper).withChunkSizeBytes(chunkSizeInBytes) + verify(wrapper).withReadConcern(readConcern) + verify(wrapper).withReadPreference(readPreference) + verify(wrapper).withWriteConcern(writeConcern) + } + + it should "call the underlying delete method" in { + val bsonValue = BsonObjectId() + val objectId = bsonValue.getValue + + gridFSBucket.delete(objectId) + gridFSBucket.delete(bsonValue) + gridFSBucket.delete(clientSession, objectId) + gridFSBucket.delete(clientSession, bsonValue) + + verify(wrapper).delete(objectId) + verify(wrapper).delete(bsonValue) + verify(wrapper).delete(clientSession, objectId) + verify(wrapper).delete(clientSession, bsonValue) + } + + it should "call the underlying drop method" in { + gridFSBucket.drop() + gridFSBucket.drop(clientSession) + + verify(wrapper).drop() + verify(wrapper).drop(clientSession) + } + + it should "call the underlying rename method" in { + val bsonValue = BsonObjectId() + val objectId = bsonValue.getValue + val newName = "newName" + + gridFSBucket.rename(objectId, newName) + gridFSBucket.rename(bsonValue, newName) + gridFSBucket.rename(clientSession, objectId, newName) + gridFSBucket.rename(clientSession, bsonValue, newName) + + verify(wrapper).rename(objectId, newName) + verify(wrapper).rename(bsonValue, newName) + verify(wrapper).rename(clientSession, objectId, newName) + verify(wrapper).rename(clientSession, bsonValue, newName) + } + + it should "return the expected findObservable" in { + val filter = Document("{a: 1}") + + gridFSBucket.find() + gridFSBucket.find(filter) + gridFSBucket.find(clientSession) + gridFSBucket.find(clientSession, filter) + + verify(wrapper).find() + verify(wrapper).find(filter) + verify(wrapper).find(clientSession) + verify(wrapper).find(clientSession, filter) + } + + it should "return the expected GridFSDownloadObservable" in { + val fileName = "myFile" + val bsonValue = BsonObjectId() + val objectId = bsonValue.getValue + val options = new GridFSDownloadOptions() + val clientSession = mock[ClientSession] + + gridFSBucket.downloadToObservable(objectId) + gridFSBucket.downloadToObservable(bsonValue) + gridFSBucket.downloadToObservable(fileName) + gridFSBucket.downloadToObservable(fileName, options) + + verify(wrapper).downloadToPublisher(objectId) + verify(wrapper).downloadToPublisher(bsonValue) + verify(wrapper).downloadToPublisher(fileName) + verify(wrapper).downloadToPublisher(fileName, options) + + gridFSBucket.downloadToObservable(clientSession, objectId) + gridFSBucket.downloadToObservable(clientSession, bsonValue) + gridFSBucket.downloadToObservable(clientSession, fileName) + gridFSBucket.downloadToObservable(clientSession, fileName, options) + + verify(wrapper).downloadToPublisher(clientSession, objectId) + verify(wrapper).downloadToPublisher(clientSession, bsonValue) + verify(wrapper).downloadToPublisher(clientSession, fileName) + verify(wrapper).downloadToPublisher(clientSession, fileName, options) + + } + + it should "return the expected GridFSUploadObservable" in { + val publisher = Observable(Seq(ByteBuffer.wrap("123".getBytes))) + val fileName = "myFile" + val bsonValue = BsonObjectId() + val options = new GridFSUploadOptions() + val clientSession = mock[ClientSession] + + gridFSBucket.uploadFromObservable(fileName, publisher) + gridFSBucket.uploadFromObservable(fileName, publisher, options) + gridFSBucket.uploadFromObservable(bsonValue, fileName, publisher) + gridFSBucket.uploadFromObservable(bsonValue, fileName, publisher, options) + + verify(wrapper).uploadFromPublisher(fileName, publisher) + verify(wrapper).uploadFromPublisher(fileName, publisher, options) + verify(wrapper).uploadFromPublisher(bsonValue, fileName, publisher) + verify(wrapper).uploadFromPublisher(bsonValue, fileName, publisher, options) + + gridFSBucket.uploadFromObservable(clientSession, fileName, publisher) + gridFSBucket.uploadFromObservable(clientSession, fileName, publisher, options) + gridFSBucket.uploadFromObservable(clientSession, bsonValue, fileName, publisher) + gridFSBucket.uploadFromObservable(clientSession, bsonValue, fileName, publisher, options) + + verify(wrapper).uploadFromPublisher(clientSession, fileName, publisher) + verify(wrapper).uploadFromPublisher(clientSession, fileName, publisher, options) + verify(wrapper).uploadFromPublisher(clientSession, bsonValue, fileName, publisher) + verify(wrapper).uploadFromPublisher(clientSession, bsonValue, fileName, publisher, options) + } + +} diff --git a/driver-scala/src/test/scala/org/mongodb/scala/gridfs/GridFSDownloadObservableSpec.scala b/driver-scala/src/test/scala/org/mongodb/scala/gridfs/GridFSDownloadObservableSpec.scala new file mode 100644 index 00000000000..4d02b6290f6 --- /dev/null +++ b/driver-scala/src/test/scala/org/mongodb/scala/gridfs/GridFSDownloadObservableSpec.scala @@ -0,0 +1,48 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala.gridfs + +import com.mongodb.reactivestreams.client.gridfs.GridFSDownloadPublisher +import org.mockito.Mockito.verify +import org.mongodb.scala.BaseSpec +import org.scalatestplus.mockito.MockitoSugar + +class GridFSDownloadObservableSpec extends BaseSpec with MockitoSugar { + val wrapper = mock[GridFSDownloadPublisher] + val gridFSDownloadStream = GridFSDownloadObservable(wrapper) + + "GridFSDownloadStream" should "have the same methods as the wrapped GridFSDownloadStream" in { + val wrapped = classOf[GridFSDownloadPublisher].getMethods.map(_.getName).toSet + val local = classOf[GridFSDownloadObservable].getMethods.map(_.getName).toSet + + wrapped.foreach((name: String) => { + val cleanedName = name.stripPrefix("get") + assert(local.contains(name) || local.contains(cleanedName.head.toLower + cleanedName.tail), s"Missing: $name") + }) + } + + it should "call the underlying methods" in { + val bufferSizeBytes = 1024 + + gridFSDownloadStream.bufferSizeBytes(bufferSizeBytes) + gridFSDownloadStream.gridFSFile() + + verify(wrapper).bufferSizeBytes(bufferSizeBytes) + verify(wrapper).getGridFSFile + } + +} diff --git a/driver-scala/src/test/scala/org/mongodb/scala/gridfs/GridFSFindObservableSpec.scala b/driver-scala/src/test/scala/org/mongodb/scala/gridfs/GridFSFindObservableSpec.scala new file mode 100644 index 00000000000..521f7ac7dca --- /dev/null +++ b/driver-scala/src/test/scala/org/mongodb/scala/gridfs/GridFSFindObservableSpec.scala @@ -0,0 +1,69 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala.gridfs + +import java.util.concurrent.TimeUnit + +import com.mongodb.reactivestreams.client.gridfs.GridFSFindPublisher +import org.mockito.Mockito.verify +import org.mongodb.scala.{ BaseSpec, Document } +import org.reactivestreams.Publisher +import org.scalatestplus.mockito.MockitoSugar + +import scala.concurrent.duration.Duration + +class GridFSFindObservableSpec extends BaseSpec with MockitoSugar { + val wrapper = mock[GridFSFindPublisher] + val gridFSFindObservable = GridFSFindObservable(wrapper) + + "GridFSFindObservable" should "have the same methods as the wrapped GridFSFindPublisher" in { + val mongoPublisher: Set[String] = classOf[Publisher[Document]].getMethods.map(_.getName).toSet + val wrapped = classOf[GridFSFindPublisher].getMethods.map(_.getName).toSet -- mongoPublisher - "collation" + val local = classOf[GridFSFindObservable].getMethods.map(_.getName).toSet + + wrapped.foreach((name: String) => { + val cleanedName = name.stripPrefix("get") + assert(local.contains(name) || local.contains(cleanedName.head.toLower + cleanedName.tail), s"Missing: $name") + }) + } + + it should "call the underlying methods" in { + val batchSize = 20 + val filter = Document("{a: 1}") + val limit = 10 + val maxTime = Duration(10, "second") // scalatyle:ignore + val skip = 5 + val sort = Document("{_id: 1}") + + gridFSFindObservable.batchSize(batchSize) + gridFSFindObservable.filter(filter) + gridFSFindObservable.limit(limit) + gridFSFindObservable.maxTime(maxTime) + gridFSFindObservable.noCursorTimeout(true) + gridFSFindObservable.skip(skip) + gridFSFindObservable.sort(sort) + + verify(wrapper).batchSize(batchSize) + verify(wrapper).filter(filter) + verify(wrapper).limit(limit) + verify(wrapper).maxTime(maxTime.toMillis, TimeUnit.MILLISECONDS) + verify(wrapper).noCursorTimeout(true) + verify(wrapper).skip(skip) + verify(wrapper).sort(sort) + } + +} diff --git a/driver-scala/src/test/scala/org/mongodb/scala/gridfs/GridFSUploadPublisherSpec.scala b/driver-scala/src/test/scala/org/mongodb/scala/gridfs/GridFSUploadPublisherSpec.scala new file mode 100644 index 00000000000..b60b77709d8 --- /dev/null +++ b/driver-scala/src/test/scala/org/mongodb/scala/gridfs/GridFSUploadPublisherSpec.scala @@ -0,0 +1,47 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala.gridfs + +import com.mongodb.reactivestreams.client.gridfs.GridFSUploadPublisher +import org.mockito.Mockito.verify +import org.mongodb.scala.BaseSpec +import org.scalatestplus.mockito.MockitoSugar + +class GridFSUploadPublisherSpec extends BaseSpec with MockitoSugar { + val wrapper = mock[GridFSUploadPublisher[Unit]] + val gridFSUploadObservable = GridFSUploadObservable(wrapper) + + "GridFSBucket" should "have the same methods as the wrapped GridFSUploadStream" in { + val wrapped = classOf[GridFSUploadPublisher[Unit]].getMethods.map(_.getName).toSet + val local = classOf[GridFSUploadObservable[Unit]].getMethods.map(_.getName).toSet + + wrapped.foreach((name: String) => { + val cleanedName = name.stripPrefix("get") + assert(local.contains(name) || local.contains(cleanedName.head.toLower + cleanedName.tail), s"Missing: $name") + }) + } + + it should "call the underlying methods" in { + + gridFSUploadObservable.objectId + gridFSUploadObservable.id + + verify(wrapper).getObjectId + verify(wrapper).getId + } + +} diff --git a/driver-scala/src/test/scala/org/mongodb/scala/internal/CollectObservableTest.scala b/driver-scala/src/test/scala/org/mongodb/scala/internal/CollectObservableTest.scala new file mode 100644 index 00000000000..57c163061ff --- /dev/null +++ b/driver-scala/src/test/scala/org/mongodb/scala/internal/CollectObservableTest.scala @@ -0,0 +1,31 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala.internal + +import org.mongodb.scala.{ BaseSpec, Observable } +import org.scalatest.concurrent.ScalaFutures + +class CollectObservableTest extends BaseSpec with ScalaFutures { + "CollectObservable" should "apply a partial function to the elements in the Observable" in { + val justStrings = Observable(Iterable("this", 1, 2, "that")) + .collect { case s: String => s } + .toFuture() + .futureValue + + assert(justStrings === Seq("this", "that")) + } +} diff --git a/driver-scala/src/test/scala/org/mongodb/scala/internal/FlatMapObservableTest.scala b/driver-scala/src/test/scala/org/mongodb/scala/internal/FlatMapObservableTest.scala new file mode 100644 index 00000000000..7473bfff2c3 --- /dev/null +++ b/driver-scala/src/test/scala/org/mongodb/scala/internal/FlatMapObservableTest.scala @@ -0,0 +1,76 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala.internal + +import org.mongodb.scala.{ BaseSpec, Observable, Observer } +import org.scalatest.concurrent.{ Eventually, Futures } + +import java.util.concurrent.atomic.AtomicInteger +import scala.concurrent.ExecutionContext.Implicits.global +import scala.concurrent.{ Future, Promise } + +class FlatMapObservableTest extends BaseSpec with Futures with Eventually { + "FlatMapObservable" should "only complete once" in { + + val p = Promise[Unit]() + val completedCounter = new AtomicInteger(0) + Observable(1 to 100) + .flatMap(x => createObservable(x)) + .subscribe( + _ => (), + e => p.failure(e), + () => { + p.trySuccess(()) + completedCounter.incrementAndGet() + } + ) + eventually(assert(completedCounter.get() == 1, s"${completedCounter.get()}")) + } + + it should "call onError if the mapper fails" in { + val p = Promise[Unit]() + val errorCounter = new AtomicInteger(0) + Observable(1 to 100) + .flatMap(x => + if (x > 10) { + throw new IllegalStateException("Fail") + } else { + createObservable(x) + } + ) + .subscribe( + _ => (), + _ => { + p.trySuccess() + errorCounter.incrementAndGet() + }, + () => { + p.failure(new IllegalStateException("Should not complete")) + } + ) + eventually(assert(errorCounter.get() == 1, s"${errorCounter.get()}")) + } + + private def createObservable(x: Int): Observable[Int] = new Observable[Int] { + override def subscribe(observer: Observer[_ >: Int]): Unit = { + Future(()).onComplete(_ => { + observer.onNext(x) + observer.onComplete() + }) + } + } +} diff --git a/driver-scala/src/test/scala/org/mongodb/scala/internal/ObservableImplementationSpec.scala b/driver-scala/src/test/scala/org/mongodb/scala/internal/ObservableImplementationSpec.scala new file mode 100644 index 00000000000..4c092bcb7a6 --- /dev/null +++ b/driver-scala/src/test/scala/org/mongodb/scala/internal/ObservableImplementationSpec.scala @@ -0,0 +1,408 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala.internal + +import org.mongodb.scala._ +import org.scalatest.prop.TableDrivenPropertyChecks + +import scala.util.{ Failure, Success } + +class ObservableImplementationSpec extends BaseSpec with TableDrivenPropertyChecks { + + "Observables" should "call onCompleted once all results are consumed" in { + forAll(happyObservables) { (observable: Observable[Int], observer: TestObserver[Int]) => + { + observable.subscribe(observer) + + val subscription = observer.subscription.get + subscription.request(1) + subscription.request(1000) + + subscription.isUnsubscribed should equal(false) + observer.error should equal(None) + observer.completed should equal(true) + } + } + } + + it should "Consuming observables should handle over requesting observables as expected" in { + forAll(overRequestingObservables) { (observable: Observable[Int], observer: TestObserver[Int], expected: Int) => + { + observable.subscribe(observer) + + val subscription = observer.subscription.get + subscription.request(1000) + + subscription.isUnsubscribed should equal(false) + observer.error should equal(None) + observer.completed should equal(true) + observer.results.size should equal(expected) + } + } + } + + it should "be well behaved when and call onError if the Observable errors" in { + forAll(failingObservables) { (observable: Observable[Int]) => + { + var thrown = false + observable.subscribe(_ => (), _ => thrown = true) + thrown should equal(true) + } + } + } + + it should "be well behaved when errors are caused by passed in function and call onError" in { + forAll(failingFunctionsObservables) { (observable: Observable[Int]) => + { + var thrown = false + observable.subscribe(_ => (), _ => thrown = true) + thrown should equal(true) + } + } + } + + it should "honor subscriptions and isUnsubscribed" in { + forAll(happyObservables) { (observable: Observable[Int], observer: TestObserver[Int]) => + { + observable.subscribe(observer) + + val expectedCompleted = observable.isInstanceOf[FoldLeftObservable[_, _]] + val subscription = observer.subscription.get + subscription.request(1) + subscription.request(2) + subscription.request(3) + subscription.request(4) + subscription.isUnsubscribed should equal(false) + + subscription.unsubscribe() + subscription.isUnsubscribed should equal(true) + + observer.error should equal(None) + observer.results.length should be <= 10 + observer.completed should equal(expectedCompleted) + } + } + } + + it should "honor subscriptions and isUnsubscribed without requesting data" in { + forAll(happyObservables) { (observable: Observable[Int], observer: TestObserver[Int]) => + { + observable.subscribe(observer) + + val subscription = observer.subscription.get + subscription.isUnsubscribed should equal(false) + + subscription.unsubscribe() + subscription.isUnsubscribed should equal(true) + + observer.error should equal(None) + observer.results shouldBe empty + observer.completed should equal(false) + + subscription.request(1000) + observer.results shouldBe empty + observer.completed should equal(false) + } + } + } + + it should "propagate errors from the observer" in { + forAll(happyObservables) { (observable: Observable[Int], observer: TestObserver[Int]) => + { + testObserver[Int](observable, observer) + } + } + } + + def testObserver[I](observable: Observable[I], observer: TestObserver[I]): Unit = { + val failObserver = TestObserver[I](new Observer[I] { + override def onError(throwable: Throwable): Unit = {} + + override def onSubscribe(subscription: Subscription): Unit = {} + + override def onComplete(): Unit = {} + + override def onNext(tResult: I): Unit = throw new Throwable("Failed action") + }) + + observable.subscribe(failObserver) + intercept[Throwable] { + observer.subscription.get.request(10) + } + } + + it should "allow multiple subscriptions" in { + forAll(happyObservables) { (observable: Observable[Int], observer: TestObserver[Int]) => + { + val observer1 = TestObserver[Int]() + + observable.subscribe(observer) + observable.subscribe(observer1) + observer.subscription.get.request(Long.MaxValue) + observer1.subscription.get.request(Long.MaxValue) + + observer.error.isEmpty should equal(true) + observer1.error.isEmpty should equal(true) + observer.completed should equal(true) + observer1.completed should equal(true) + + observer.results.length should equal(observer.results.length) + } + } + } + + it should "return the length of the smallest Observable from ZipObservable" in { + forAll(zippedObservables) { (observable: Observable[(Int, Int)]) => + { + val observer = TestObserver[(Int, Int)]() + observable.subscribe(observer) + + observer.subscription.foreach(_.request(100)) + + observer.results should equal((1 to 50).map(i => (i, i))) + observer.completed should equal(true) + } + } + + forAll(zippedObservablesWithEmptyObservable) { (observable: Observable[(Int, Int)]) => + { + val observer = TestObserver[(Int, Int)]() + observable.subscribe(observer) + + observer.subscription.foreach(_.request(100)) + + observer.results should equal(List()) + observer.completed should equal(true) + } + } + } + + it should "error if requested amount is less than 1" in { + forAll(happyObservables) { (observable: Observable[Int], observer: TestObserver[Int]) => + { + observable.subscribe(observer) + intercept[IllegalArgumentException] { + observer.subscription.get.request(0) + } + } + } + } + + it should "handle multiple requests where request rolls over Long.MaxValue" in { + forAll(happyObservables) { (observable: Observable[Int], observer: TestObserver[Int]) => + { + observable.subscribe(observer) + observer.subscription.get.request(Long.MaxValue - 1) + observer.subscription.get.request(Long.MaxValue) + + observer.error should equal(None) + observer.results should not be empty + observer.completed should equal(true) + } + } + } + + val failOn = 30 + + private def failingObservables = + Table( + "observable", + TestObservable(failOn = failOn), + AndThenObservable[Int, Int]( + TestObservable(failOn = failOn), + { + case Success(r) => 1000 + case Failure(ex) => 0 + } + ), + FilterObservable[Int](TestObservable(failOn = failOn), (i: Int) => i % 2 != 0), + FlatMapObservable[Int, Int](TestObservable(), (i: Int) => TestObservable(failOn = failOn)), + FoldLeftObservable(TestObservable(1 to 100, failOn = failOn), 0, (v: Int, i: Int) => v + i), + MapObservable[Int, Int](TestObservable(failOn = failOn), (i: Int) => i * 100), + RecoverObservable[Int, Int](TestObservable(failOn = failOn), { case e: ArithmeticException => 999 }), + RecoverWithObservable[Int, Int]( + TestObservable(failOn = failOn), + { + case e: ArithmeticException => TestObservable() + } + ), + RecoverWithObservable[Int, Int]( + TestObservable(failOn = failOn), + { + case e => TestObservable(failOn = failOn) + } + ), + ZipObservable[Int, Int](TestObservable(), TestObservable(failOn = failOn)).map[Int](a => a._1), + ZipObservable[Int, Int](TestObservable(failOn = failOn), TestObservable()).map[Int](a => a._1) + ) + + private def failingFunctionsObservables = + Table( + "observable", + FilterObservable[Int]( + TestObservable(), + (i: Int) => { + if (i > 10) { + throw new RuntimeException("Error") + } + i % 2 == 0 + } + ), + FlatMapObservable[Int, Int]( + TestObservable(), + (i: Int) => { + if (i > 10) { + throw new RuntimeException("Error") + } + TestObservable(1 to 2) + } + ), + FoldLeftObservable( + TestObservable(1 to 100), + 0, + (v: Int, i: Int) => { + if (i > 10) { + throw new RuntimeException("Error") + } + v + i + } + ), + MapObservable[Int, Int]( + TestObservable(), + (i: Int) => { + if (i > 10) { + throw new RuntimeException("Error") + } + i * 100 + } + ) + ) + + private def happyObservables = + Table( + ("observable", "observer"), + (TestObservable(), TestObserver[Int]()), + ( + AndThenObservable[Int, Int]( + TestObservable(), + { + case Success(r) => 1000 + case Failure(ex) => 0 + } + ), + TestObserver[Int]() + ), + (FilterObservable[Int](TestObservable(), (i: Int) => i % 2 != 0), TestObserver[Int]()), + ( + FlatMapObservable[Int, Int](TestObservable(), (i: Int) => TestObservable(1 to 1)), + TestObserver[Int]() + ), + ( + FlatMapObservable[Int, Int](TestObservable(1 to 1), (i: Int) => TestObservable()), + TestObserver[Int]() + ), + (FoldLeftObservable(TestObservable(1 to 100), 0, (v: Int, i: Int) => v + i), TestObserver[Int]()), + (MapObservable[Int, Int](TestObservable(), (i: Int) => i * 100), TestObserver[Int]()), + (RecoverObservable[Int, Int](TestObservable(), { case e: ArithmeticException => 999 }), TestObserver[Int]()), + ( + RecoverWithObservable[Int, Int](TestObservable(), { case t => TestObservable() }), + TestObserver[Int]() + ), + ( + RecoverWithObservable[Int, Int](TestObservable(1 to 10, failOn = 1), { case t => TestObservable() }), + TestObserver[Int]() + ), + (IterableObservable((1 to 100).toStream), TestObserver[Int]()), + (ZipObservable[Int, Int](TestObservable(), TestObservable()).map[Int](a => a._1), TestObserver[Int]()) + ) + + private def zippedObservables = + Table[Observable[(Int, Int)]]( + "observable", + ZipObservable[Int, Int](TestObservable(1 to 50), TestObservable()), + ZipObservable[Int, Int](TestObservable(), TestObservable(1 to 50)) + ) + + private def zippedObservablesWithEmptyObservable = + Table[Observable[(Int, Int)]]( + "observable", + ZipObservable[Int, Int](TestObservable(1 to 50), TestObservable(List())), + ZipObservable[Int, Int](TestObservable(List()), TestObservable(1 to 50)) + ) + + private def overRequestingObservables = + Table( + ("observable", "observer", "expected"), + ( + FlatMapObservable[Int, Int]( + OverRequestedObservable(TestObservable(1 to 10)), + (i: Int) => TestObservable(1 to 10) + ), + TestObserver[Int](), + 100 + ), + ( + RecoverWithObservable[Int, Int]( + TestObservable(1 to 10, failOn = 1), + { case t => OverRequestedObservable(TestObservable(1 to 10)) } + ), + TestObserver[Int](), + 10 + ) + ) + + case class OverRequestedObservable(observable: TestObservable = TestObservable()) extends Observable[Int] { + + var totalRequested = 0L + override def subscribe(observer: Observer[_ >: Int]): Unit = { + observable.subscribe( + new Observer[Int] { + + var completed = false + override def onError(throwable: Throwable): Unit = observer.onError(throwable) + + override def onSubscribe(subscription: Subscription): Unit = { + val masterSub = new Subscription() { + override def isUnsubscribed: Boolean = subscription.isUnsubscribed + + override def request(n: Long): Unit = { + if (!completed) { + var demand = n + 1 + if (demand < 0) demand = Long.MaxValue + totalRequested += demand + subscription.request(demand) + } + } + override def unsubscribe(): Unit = subscription.unsubscribe() + } + observer.onSubscribe(masterSub) + } + + override def onComplete(): Unit = { + completed = true + observer.onComplete() + } + + override def onNext(tResult: Int): Unit = { + observer.onNext(tResult) + } + } + ) + } + } + +} diff --git a/driver-scala/src/test/scala/org/mongodb/scala/internal/OverridableObservableImplicitsSpec.scala b/driver-scala/src/test/scala/org/mongodb/scala/internal/OverridableObservableImplicitsSpec.scala new file mode 100644 index 00000000000..d835162e5f5 --- /dev/null +++ b/driver-scala/src/test/scala/org/mongodb/scala/internal/OverridableObservableImplicitsSpec.scala @@ -0,0 +1,50 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala.internal + +import scala.concurrent.duration.DurationInt +import org.mongodb.scala.{ BaseSpec, Observable } + +import scala.concurrent.ExecutionContext.Implicits.global +import scala.concurrent.{ Await, Future } + +object ObservableImplicitOverride { + implicit class ObservableFuture[T](obs: => Observable[T]) { + def toFuture(): Future[String] = Future("Overridden observable") + } + +} + +class OverridableObservableImplicitsSpec extends BaseSpec { + + "Observable implicits" should "be overrideable" in { + import ObservableImplicitOverride._ + + val observable: Observable[Int] = Observable(1 to 10) + + Await.result(observable.toFuture(), 1.second) should equal("Overridden observable") + } + + it should "also allow the default implementation to work" in { + import org.mongodb.scala._ + val observable: Observable[Int] = Observable(1 to 10) + + Await.result(observable.toFuture(), 1.second) should equal((1 to 10).toList) + + } + +} diff --git a/driver-scala/src/test/scala/org/mongodb/scala/internal/ScalaObservableSpec.scala b/driver-scala/src/test/scala/org/mongodb/scala/internal/ScalaObservableSpec.scala new file mode 100644 index 00000000000..4ad8d9c0190 --- /dev/null +++ b/driver-scala/src/test/scala/org/mongodb/scala/internal/ScalaObservableSpec.scala @@ -0,0 +1,620 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala.internal + +import java.util.concurrent._ + +import com.mongodb.MongoException +import org.mongodb.scala._ +import org.reactivestreams.{ Subscriber, Subscription } +import org.scalatest.prop.TableDrivenPropertyChecks._ + +import scala.collection.mutable.ArrayBuffer +import scala.concurrent.ExecutionContext.Implicits.global +import scala.concurrent.duration.Duration +import scala.concurrent.{ Await, ExecutionContext } +import scala.util.{ Failure, Success } + +class ScalaObservableSpec extends BaseSpec { + + "ScalaObservable" should "allow for inline subscription" in { + val results = ArrayBuffer[Int]() + observable().subscribe((res: Int) => results += res) + results should equal(1 to 100) + + var thrown = false + observable(fail = true).subscribe((res: Int) => (), (t: Throwable) => thrown = true) + thrown should equal(true) + + var completed = false + observable().subscribe((res: Int) => (), (t: Throwable) => (), () => completed = true) + completed should equal(true) + } + + it should "have a foreach method" in { + val results = ArrayBuffer[Int]() + observable().foreach((res: Int) => results += res) + results should equal(1 to 100) + } + + it should "have a transform method" in { + var completed = false + val results = ArrayBuffer[String]() + observable() + .transform((res: Int) => res.toString, (ex: Throwable) => ex) + .subscribe((s: String) => results += s, (t: Throwable) => (), () => completed = true) + results should equal((1 to 100).map(_.toString)) + completed should equal(true) + + completed = false + val exception = new MongoException("New Exception") + var throwable: Option[Throwable] = None + observable(fail = true) + .transform((res: Int) => res, (ex: Throwable) => exception) + .subscribe((s: Int) => (), (t: Throwable) => throwable = Some(t), () => completed = true) + + completed should equal(false) + throwable.get should equal(exception) + } + + it should "have a map method" in { + val results = ArrayBuffer[String]() + var completed = false + observable() + .map((res: Int) => res.toString) + .subscribe((s: String) => results += s, (t: Throwable) => (), () => completed = true) + results should equal((1 to 100).map(_.toString)) + completed should equal(true) + } + + it should "have a flatMap method" in { + def myObservable(fail: Boolean = false): Observable[String] = + observable(fail = fail).flatMap((res: Int) => Observable(List(res.toString))) + + val results = ArrayBuffer[String]() + myObservable().subscribe((s: String) => results += s) + results should equal((1 to 100).map(_.toString)) + + var errorSeen: Option[Throwable] = None + myObservable(true).subscribe((s: String) => (), (fail: Throwable) => errorSeen = Some(fail)) + errorSeen.getOrElse(None) shouldBe a[Throwable] + + var completed = 0 + var seen = 0 + myObservable().subscribe((s: String) => seen += 1, (t: Throwable) => t, () => completed += 1) + seen should equal(100) + completed should equal(1) + } + + it should "have a filter method" in { + def myObservable(fail: Boolean = false): Observable[Int] = + observable(fail = fail).filter((i: Int) => i % 2 != 0) + + val results = ArrayBuffer[Int]() + myObservable().subscribe((i: Int) => results += i) + results should equal((1 to 100).filter(i => i % 2 != 0)) + + var errorSeen: Option[Throwable] = None + myObservable(true).subscribe((s: Int) => (), (fail: Throwable) => errorSeen = Some(fail)) + errorSeen.getOrElse(None) shouldBe a[Throwable] + + var completed = false + myObservable().subscribe((s: Int) => (), (t: Throwable) => t, () => completed = true) + completed should equal(true) + } + + it should "have a withFilter method" in { + def myObservable(fail: Boolean = false): Observable[Int] = + observable(fail = fail).withFilter((i: Int) => i % 2 != 0) + + val results = ArrayBuffer[Int]() + myObservable().subscribe((i: Int) => results += i) + results should equal((1 to 100).filter(i => i % 2 != 0)) + + var errorSeen: Option[Throwable] = None + myObservable(true).subscribe((s: Int) => (), (fail: Throwable) => errorSeen = Some(fail)) + errorSeen.getOrElse(None) shouldBe a[Throwable] + + var completed = false + myObservable().subscribe((s: Int) => (), (t: Throwable) => t, () => completed = true) + completed should equal(true) + } + + it should "have a collect method" in { + def myObservable(fail: Boolean = false): Observable[Seq[Int]] = { + observable(fail = fail).collect() + } + + val results = ArrayBuffer[Int]() + myObservable().subscribe((i: Seq[Int]) => results ++= i) + results should equal(1 to 100) + + var errorSeen: Option[Throwable] = None + myObservable(true).subscribe((s: Seq[Int]) => (), (fail: Throwable) => errorSeen = Some(fail)) + errorSeen.getOrElse(None) shouldBe a[Throwable] + + var completed = false + myObservable().subscribe((s: Seq[Int]) => (), (t: Throwable) => t, () => completed = true) + completed should equal(true) + } + + it should "allow a collect() followed by flatMap()" in { + var results = ArrayBuffer[Int]() + var completed = false + var errorSeen: Option[Throwable] = None + + observable() + .collect() + .flatMap(_ => Observable(1 to 3)) + .subscribe((i: Int) => results += i, (t: Throwable) => errorSeen = Some(t), () => completed = true) + + completed should equal(true) + errorSeen should equal(None) + results should equal(1 to 3) + + results = ArrayBuffer[Int]() + completed = false + errorSeen = None + + observable(fail = true) + .collect() + .flatMap(_ => Observable(1 to 3)) + .subscribe((i: Int) => results += i, (t: Throwable) => errorSeen = Some(t), () => completed = true) + + errorSeen.getOrElse(None) shouldBe a[Throwable] + completed should equal(false) + results.size should equal(0) + } + + it should "have a foldLeft method" in { + def myObservable(fail: Boolean = false): Observable[Int] = { + observable(fail = fail).foldLeft(0)((l: Int, i) => l + 1) + } + + var results = 0 + myObservable().subscribe((i: Int) => results = i) + results should equal(100) + + var errorSeen: Option[Throwable] = None + myObservable(true).subscribe((s: Int) => (), (fail: Throwable) => errorSeen = Some(fail)) + errorSeen.getOrElse(None) shouldBe a[Throwable] + + var completed = false + myObservable().subscribe((s: Int) => (), (t: Throwable) => t, () => completed = true) + completed should equal(true) + } + + it should "have a recover method" in { + var results = ArrayBuffer[Int]() + observable().recover({ case e: ArithmeticException => 999 }).subscribe((i: Int) => results += i) + results should equal(1 to 100) + + var errorSeen: Option[Throwable] = None + observable(fail = true) + .recover({ case e: ArithmeticException => 999 }) + .subscribe((s: Int) => (), (fail: Throwable) => errorSeen = Some(fail)) + errorSeen.getOrElse(None) shouldBe a[Throwable] + + results = ArrayBuffer[Int]() + observable(fail = true) + .transform(i => i, (t: Throwable) => new ArithmeticException()) + .recover({ case e: ArithmeticException => 999 }) + .subscribe((i: Int) => results += i) + results should equal((1 to 50) :+ 999) + } + + it should "have a recoverWith method" in { + var results = ArrayBuffer[Int]() + var completed = false + observable() + .recoverWith({ case e: ArithmeticException => observable(1000 to 1001) }) + .subscribe((i: Int) => results += i, (t: Throwable) => (), () => completed = true) + results should equal(1 to 100) + completed should equal(true) + + results = ArrayBuffer[Int]() + var errorSeen: Option[Throwable] = None + completed = false + observable(fail = true) + .recoverWith({ case e: ArithmeticException => observable(1000 to 1001) }) + .subscribe((i: Int) => results += i, (fail: Throwable) => errorSeen = Some(fail), () => completed = true) + errorSeen.getOrElse(None) shouldBe a[Throwable] + results should equal(1 to 50) + completed should equal(false) + + results = ArrayBuffer[Int]() + observable(fail = true) + .transform(i => i, (t: Throwable) => new ArithmeticException()) + .recoverWith({ case e: ArithmeticException => observable(1000 to 1001) }) + .subscribe((i: Int) => results += i) + results should equal((1 to 50) ++ (1000 to 1001)) + + results = ArrayBuffer[Int]() + observable(fail = true) + .transform(i => i, (t: Throwable) => new ArithmeticException()) + .collect() + .recoverWith({ case e: ArithmeticException => observable(1000 to 1001).collect() }) + .subscribe((i: Seq[Int]) => results ++= i) + results should equal((1000 to 1001)) + } + + it should "have a zip method" in { + val results = ArrayBuffer[(Int, String)]() + observable().zip(observable().map(i => i.toString)).subscribe((res: (Int, String)) => results += res) + results should equal((1 to 100).zip((1 to 100).map(_.toString))) + } + + it should "have a fallBackTo method" in { + var results = ArrayBuffer[Int]() + observable().fallbackTo(observable(1000 to 1001)).subscribe((i: Int) => results += i) + results should equal(1 to 100) + + results = ArrayBuffer[Int]() + observable(fail = true) + .fallbackTo(observable(1000 to 1001)) + .subscribe((i: Int) => results += i) + results should equal((1 to 50) ++ (1000 to 1001)) + + var errorMessage = "" + TestObservable(1 to 100, 10, "Original Error") + .fallbackTo(TestObservable(1000 to 1001, 1000, "Fallback Error")) + .subscribe((i: Int) => i, (t: Throwable) => errorMessage = t.getMessage) + errorMessage should equal("Original Error") + } + + it should "have an andThen method" in { + var results = ArrayBuffer[Int]() + def myObservable(fail: Boolean = false): Observable[Int] = { + observable(1 to 100, fail = fail) andThen { + case Success(r) => results += 999 + case Failure(ex) => results += -999 + } + } + + myObservable().subscribe((i: Int) => results += i) + results should equal((1 to 100) :+ 999) + + results = ArrayBuffer[Int]() + var errorSeen: Option[Throwable] = None + myObservable(true).subscribe((i: Int) => results += i, (fail: Throwable) => errorSeen = Some(fail)) + errorSeen.getOrElse(None) shouldBe a[Throwable] + results should equal((1 to 50) :+ -999) + + results = ArrayBuffer[Int]() + var completed = false + myObservable().subscribe((i: Int) => results += i, (t: Throwable) => t, () => completed = true) + results should equal((1 to 100) :+ 999) + completed should equal(true) + } + + it should "work with for comprehensions" in { + def f = observable(1 to 5) + def g = observable(100 to 101) + val h = for { + x: Int <- f // returns Observable(1 to 5) + y: Int <- g // returns Observable(100 to 100) + } yield x + y + val expectedResults = (1 to 5).flatMap(i => (100 to 101).map(x => x + i)) + + var results = ArrayBuffer[Int]() + var completed = false + h.subscribe((s: Int) => results += s, (t: Throwable) => t, () => completed = true) + results should equal(expectedResults) + completed should equal(true) + + results = ArrayBuffer[Int]() + completed = false + val fh: Observable[Int] = f flatMap { (x: Int) => + g map { (y: Int) => + x + y + } + } + fh.subscribe((s: Int) => results += s, (t: Throwable) => t, () => completed = true) + results should equal(expectedResults) + completed should equal(true) + } + + it should "work with andThen as expected" in { + var results = ArrayBuffer[Int]() + var completed = false + observable() andThen { + case r => throw new MongoException("Exception") + } andThen { + case Success(_) => results += 999 + case Failure(t) => results += -999 + } subscribe ((s: Int) => results += s, (t: Throwable) => t, () => completed = true) + + results should equal((1 to 100) :+ 999) + completed should equal(true) + + results = ArrayBuffer[Int]() + completed = false + observable(fail = true) andThen { + case r => throw new MongoException("Exception") + } andThen { + case Success(_) => results += 999 + case Failure(t) => results += -999 + } subscribe ((s: Int) => results += s, (t: Throwable) => t, () => completed = true) + + results should equal((1 to 50) :+ -999) + completed should equal(false) + } + + it should "convert to a Future" in { + var results = ArrayBuffer[Int]() + var errorSeen: Option[Throwable] = None + val happyFuture = observable().toFuture() + var latch = new CountDownLatch(1) + + happyFuture.onComplete({ + case Success(res) => + results ++= res + latch.countDown() + case Failure(throwable) => errorSeen = Some(throwable) + }) + latch.await(10, TimeUnit.SECONDS) + results should equal(1 to 100) + errorSeen.isEmpty should equal(true) + + results = ArrayBuffer[Int]() + latch = new CountDownLatch(1) + val unhappyFuture = observable(fail = true).toFuture() + unhappyFuture.onComplete({ + case Success(res) => results ++= res + case Failure(throwable) => + errorSeen = Some(throwable) + latch.countDown() + }) + intercept[MongoException] { + Await.result(unhappyFuture, Duration(10, TimeUnit.SECONDS)) + } + latch.await(10, TimeUnit.SECONDS) + results should equal(List()) + errorSeen.nonEmpty should equal(true) + errorSeen.getOrElse(None) shouldBe a[Throwable] + } + + it should "provide a headOption method" in { + Await.result(observable().headOption(), Duration(10, TimeUnit.SECONDS)) should equal(Some(1)) + Await.result(observable(fail = true).headOption(), Duration(10, TimeUnit.SECONDS)) should equal(Some(1)) + + intercept[MongoException] { + Await.result( + TestObservable(observable(1 to 10), failOn = 1).headOption(), + Duration(10, TimeUnit.SECONDS) + ) + } + + Await.result(TestObservable(Observable(List[Int]())).headOption(), Duration(10, TimeUnit.SECONDS)) should equal( + None + ) + } + + it should "provide a head method" in { + Await.result(observable().head(), Duration(10, TimeUnit.SECONDS)) should equal(1) + Await.result(observable(fail = true).head(), Duration(10, TimeUnit.SECONDS)) should equal(1) + + intercept[MongoException] { + Await.result(TestObservable(observable(1 to 10), failOn = 1).head(), Duration(10, TimeUnit.SECONDS)) + } + + Option(Await.result(TestObservable(Observable(List[Int]())).head(), Duration(10, TimeUnit.SECONDS))) should equal( + None + ) + } + + it should "have a completeWithUnit method" in { + var nextCalled = false + observable().completeWithUnit().subscribe(_ => nextCalled = true) + nextCalled shouldBe true + + nextCalled = false + observable(0 to 0).completeWithUnit().subscribe(_ => nextCalled = true) + nextCalled shouldBe true + + nextCalled = false + var errorSeen: Option[Throwable] = None + observable(1 to 100, fail = true) + .completeWithUnit() + .subscribe((_: Unit) => nextCalled = true, (fail: Throwable) => errorSeen = Some(fail)) + nextCalled shouldBe false + errorSeen.getOrElse(None) shouldBe a[Throwable] + + nextCalled = false + var completed = false + observable().completeWithUnit().subscribe(_ => nextCalled = true, (t: Throwable) => t, () => completed = true) + nextCalled shouldBe true + completed shouldBe true + + nextCalled = false + completed = false + observable() + .completeWithUnit() + .subscribe(new Observer[Unit] { + override def onSubscribe(subscription: Subscription): Unit = subscription.request(1) + override def onNext(result: Unit): Unit = nextCalled = true + override def onError(e: Throwable): Unit = completed = false + override def onComplete(): Unit = completed = true + }) + nextCalled shouldBe true + completed shouldBe true + } + + it should "not stackoverflow when using flatMap with execution contexts" in { + val altContextObservable = + Observable(1 to 10000).observeOn(ExecutionContext.global).flatMap((res: Int) => Observable(Seq(res))) + Await.result(altContextObservable.toFuture(), Duration(10, TimeUnit.SECONDS)) should equal(1 to 10000) + } + + def badObservable[T](t: T*): Observable[T] = { + new Observable[T] { + override def subscribe(observer: Observer[_ >: T]): Unit = { + for (tee <- t) { + observer.onNext(tee) + } + observer.onComplete() + } + } + } + + val observableErrorScenarios = + Table( + "Bad Observables", + () => badObservable(1, 2, 3).flatMap((i: Int) => badObservable(i, i)), + () => badObservable(1, 2, 3).map((i: Int) => badObservable(i, i)), + () => + badObservable(1, 2, 3).andThen { + case Success(r) => 1 + case Failure(ex) => 0 + }, + () => badObservable(1, 2, 3).collect(), + () => badObservable(1, 2, 3).foldLeft(0)((v: Int, i: Int) => v + i), + () => badObservable(1, 2, 3).recoverWith { case t: Throwable => badObservable(1, 2, 3) }, + () => badObservable(1, 2, 3).zip(badObservable(1, 2, 3)), + () => badObservable(1, 2, 3).filter((i: Int) => i > 1) + ) + + it should "work with Java Subscriber" in { + var results = ArrayBuffer[Int]() + var errorSeen: Option[Throwable] = None + var latch = new CountDownLatch(1) + + var subscription: Option[Subscription] = None + val observer = new Subscriber[Int]() { + override def onError(e: Throwable): Unit = { + errorSeen = Some(e) + latch.countDown() + } + + override def onSubscribe(sub: Subscription): Unit = { + subscription = Some(sub) + sub.request(Long.MaxValue) + } + + override def onComplete(): Unit = latch.countDown() + + override def onNext(result: Int): Unit = results += result + } + + observable().subscribe(observer) + latch.await(10, TimeUnit.SECONDS) + results should equal(1 to 100) + + subscription = None + results = ArrayBuffer[Int]() + errorSeen = None + latch = new CountDownLatch(1) + + observable(fail = true).subscribe(observer) + latch.await(10, TimeUnit.SECONDS) + results should equal(1 to 50) + errorSeen.nonEmpty should equal(true) + errorSeen.getOrElse(None) shouldBe a[Throwable] + } + + it should "should be able to use custom execution contexts" in { + var originalThreadId: Long = 0 + var observeOnThreadId1: Long = 0 + var observeOnThreadId2: Long = 0 + val ctx1 = ExecutionContext.fromExecutorService(Executors.newFixedThreadPool(5)) + val ctx2 = ExecutionContext.fromExecutorService(Executors.newFixedThreadPool(5)) + + Await.result( + observable() + .map((i: Int) => { + originalThreadId = Thread.currentThread().getId + i + }) + .observeOn(ctx1) + .map((i: Int) => { + observeOnThreadId1 = Thread.currentThread().getId + i + }) + .observeOn(ctx2) + .map((i: Int) => { + observeOnThreadId2 = Thread.currentThread().getId + i + }) + .toFuture(), + Duration(10, TimeUnit.SECONDS) + ) + ctx1.shutdown() + ctx2.shutdown() + + originalThreadId should not be observeOnThreadId1 + observeOnThreadId1 should not be observeOnThreadId2 + } + + def observable(from: Iterable[Int] = (1 to 100), fail: Boolean = false): Observable[Int] = { + if (fail) { + TestObservable(from, failOn = 51) + } else { + TestObservable(from) + } + } + + "Observers" should "support Reactive stream Subscriptions" in { + val observer = new Observer[Int]() { + override def onError(e: Throwable): Unit = {} + + override def onSubscribe(subscription: Subscription): Unit = { + subscription.request(1) + } + + override def onComplete(): Unit = {} + + override def onNext(result: Int): Unit = {} + } + + var requested = 0 + val subscription = new Subscription { + var cancelled = false + def isCancelled: Boolean = cancelled + + override def request(n: Long): Unit = requested += 1 + + override def cancel(): Unit = cancelled = true + } + + observer.onSubscribe(subscription) + subscription.isCancelled should equal(false) + requested should equal(1) + subscription.cancel() + subscription.isCancelled should equal(true) + } + + "Observers" should "automatically subscribe and request Long.MaxValue" in { + val observer = new Observer[Int]() { + override def onError(e: Throwable): Unit = {} + + override def onComplete(): Unit = {} + + override def onNext(result: Int): Unit = {} + } + + var requested: Long = 0 + val subscription: Subscription = new Subscription { + override def request(n: Long): Unit = requested = n + + override def cancel(): Unit = {} + } + + observer.onSubscribe(subscription) + requested should equal(Long.MaxValue) + } + +} diff --git a/driver-scala/src/test/scala/org/mongodb/scala/internal/SingleObservableSpec.scala b/driver-scala/src/test/scala/org/mongodb/scala/internal/SingleObservableSpec.scala new file mode 100644 index 00000000000..0483a5fa9a3 --- /dev/null +++ b/driver-scala/src/test/scala/org/mongodb/scala/internal/SingleObservableSpec.scala @@ -0,0 +1,37 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala.internal + +import org.mongodb.scala.{ BaseSpec, SingleObservable } + +class SingleObservableSpec extends BaseSpec { + + "ScalaObservable" should "allow for inline subscription" in { + var result = 0 + observable().subscribe((res: Int) => result = res) + result should equal(42) + + var thrown = false + observable() + .map(_ => throw new RuntimeException("Failed")) + .subscribe((_: Int) => (), (_: Throwable) => thrown = true) + thrown should equal(true) + } + + def observable(): SingleObservable[Int] = SingleObservable(42) + +} diff --git a/driver-scala/src/test/scala/org/mongodb/scala/internal/TestObservable.scala b/driver-scala/src/test/scala/org/mongodb/scala/internal/TestObservable.scala new file mode 100644 index 00000000000..3f13a2ba0ca --- /dev/null +++ b/driver-scala/src/test/scala/org/mongodb/scala/internal/TestObservable.scala @@ -0,0 +1,70 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala.internal + +import com.mongodb.MongoException +import org.mongodb.scala.{ Observable, Observer, Subscription } + +object TestObservable { + + def apply(from: Iterable[Int]): TestObservable = { + new TestObservable(Observable(from)) + } + + def apply(from: Iterable[Int], failOn: Int): TestObservable = { + new TestObservable(Observable(from), failOn) + } + + def apply(from: Iterable[Int], failOn: Int, errorMessage: String): TestObservable = { + new TestObservable(Observable(from), failOn, errorMessage) + } +} + +case class TestObservable( + delegate: Observable[Int] = Observable((1 to 100)), + failOn: Int = Int.MaxValue, + errorMessage: String = "Failed" +) extends Observable[Int] { + + override def subscribe(observer: Observer[_ >: Int]): Unit = { + delegate.subscribe( + new Observer[Int] { + var failed = false + var subscription: Option[Subscription] = None + override def onError(throwable: Throwable): Unit = observer.onError(throwable) + + override def onSubscribe(sub: Subscription): Unit = { + subscription = Some(sub) + observer.onSubscribe(sub) + } + + override def onComplete(): Unit = if (!failed) observer.onComplete() + + override def onNext(tResult: Int): Unit = { + if (!failed) { + if (tResult == failOn) { + failed = true + onError(new MongoException(errorMessage)) + } else { + observer.onNext(tResult) + } + } + } + } + ) + } +} diff --git a/driver-scala/src/test/scala/org/mongodb/scala/internal/TestObserver.scala b/driver-scala/src/test/scala/org/mongodb/scala/internal/TestObserver.scala new file mode 100644 index 00000000000..d564792ecf7 --- /dev/null +++ b/driver-scala/src/test/scala/org/mongodb/scala/internal/TestObserver.scala @@ -0,0 +1,110 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala.internal + +import scala.collection.mutable + +import org.mongodb.scala.{ Observer, Subscription } + +object TestObserver { + + def apply[A](): TestObserver[A] = { + new TestObserver[A](new Observer[A] { + override def onError(throwable: Throwable): Unit = {} + + override def onSubscribe(subscription: Subscription): Unit = {} + + override def onComplete(): Unit = {} + + override def onNext(tResult: A): Unit = {} + }) + } + +} + +case class TestObserver[A](delegate: Observer[A]) extends Observer[A] { + @volatile var subscription: Option[OneAtATimeSubscription] = None + @volatile var error: Option[Throwable] = None + @volatile var completed: Boolean = false + @volatile var terminated: Boolean = false + val results: mutable.ListBuffer[A] = mutable.ListBuffer[A]() + + override def onError(throwable: Throwable): Unit = { + require(!terminated, "onError called after the observer has already been terminated") + terminated = true + error = Some(throwable) + delegate.onError(throwable) + } + + override def onSubscribe(sub: Subscription): Unit = { + require(subscription.isEmpty, "observer already subscribed to") + val oneAtATimeSubscription = OneAtATimeSubscription(sub) + subscription = Some(oneAtATimeSubscription) + delegate.onSubscribe(oneAtATimeSubscription) + } + + override def onComplete(): Unit = { + require(!terminated, "onComplete called after the observer has already been terminated") + terminated = true + delegate.onComplete() + completed = true + } + + override def onNext(result: A): Unit = { + require(!terminated, "onNext called after the observer has already been terminated") + this.synchronized { + results.append(result) + } + delegate.onNext(result) + subscription.foreach(_.innerRequestNext()) + } + + case class OneAtATimeSubscription(inner: Subscription) extends Subscription { + + @volatile var demand: Long = 0 + + override def request(n: Long): Unit = { + require(n > 0L, s"Number requested must be greater than zero: $n") + addDemand(n) + innerRequestNext() + } + + override def unsubscribe(): Unit = inner.unsubscribe() + + override def isUnsubscribed: Boolean = inner.isUnsubscribed + + def innerRequestNext(): Unit = { + if (!terminated && !isUnsubscribed && addDemand(-1) > 0) { + inner.request(1) + } + } + + private def addDemand(extraDemand: Long): Long = { + this.synchronized { + demand += extraDemand + if (demand > 0) { + demand + } else if (demand < 0 && extraDemand > 0) { + demand = Long.MaxValue + demand + } else { + 0 + } + } + } + } +} diff --git a/driver-scala/src/test/scala/org/mongodb/scala/internal/UnitObservableSpec.scala b/driver-scala/src/test/scala/org/mongodb/scala/internal/UnitObservableSpec.scala new file mode 100644 index 00000000000..7b0655de07a --- /dev/null +++ b/driver-scala/src/test/scala/org/mongodb/scala/internal/UnitObservableSpec.scala @@ -0,0 +1,85 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.mongodb.scala.internal + +import org.mongodb.scala.{ BaseSpec, Observer, Subscription } + +import scala.collection.mutable.ArrayBuffer + +class UnitObservableSpec extends BaseSpec { + it should "emit exactly one element" in { + var onNextCounter = 0 + var errorActual: Option[Throwable] = None + var completed = false + UnitObservable(TestObservable(1 to 9)).subscribe( + (_: Unit) => onNextCounter += 1, + (error: Throwable) => errorActual = Some(error), + () => completed = true + ) + onNextCounter shouldBe 1 + errorActual shouldBe None + completed shouldBe true + } + + it should "signal the underlying error" in { + var onNextCounter = 0 + val errorMessageExpected = "error message" + var errorActual: Option[Throwable] = None + var completed = false + UnitObservable(TestObservable(1 to 9, failOn = 5, errorMessage = errorMessageExpected)).subscribe( + (_: Unit) => onNextCounter += 1, + (error: Throwable) => errorActual = Some(error), + () => completed = true + ) + onNextCounter shouldBe 0 + errorActual.map(e => e.getMessage) shouldBe Some(errorMessageExpected) + completed shouldBe false + } + + it should "work with explicit request" in { + var onNextCounter = 0 + var errorActual: Option[Throwable] = None + var completed = false + UnitObservable(TestObservable(1 to 9)).subscribe(new Observer[Unit] { + override def onSubscribe(subscription: Subscription): Unit = subscription.request(1) + + override def onNext(result: Unit): Unit = onNextCounter += 1 + + override def onError(error: Throwable): Unit = errorActual = Some(error) + + override def onComplete(): Unit = completed = true + }) + onNextCounter shouldBe 1 + errorActual shouldBe None + completed shouldBe true + } + + it should "work with for comprehensions" in { + val observable = for { + _ <- UnitObservable(TestObservable(1 to 2)) + _ <- UnitObservable(TestObservable(20 to 30)) + } yield List(1, 2, 3) + val items = ArrayBuffer[Int]() + var completed = false + observable.subscribe( + (item: List[Int]) => item.foreach(i => items += i), + (error: Throwable) => error, + () => completed = true + ) + items should equal(List(1, 2, 3)) + completed should equal(true) + } +} diff --git a/driver-scala/src/test/scala/org/mongodb/scala/model/AggregatesSpec.scala b/driver-scala/src/test/scala/org/mongodb/scala/model/AggregatesSpec.scala new file mode 100644 index 00000000000..70ac84065e0 --- /dev/null +++ b/driver-scala/src/test/scala/org/mongodb/scala/model/AggregatesSpec.scala @@ -0,0 +1,886 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala.model + +import com.mongodb.client.model.GeoNearOptions.geoNearOptions +import com.mongodb.client.model.fill.FillOutputField + +import java.lang.reflect.Modifier._ +import org.bson.BsonDocument +import org.mongodb.scala.bson.BsonArray +import org.mongodb.scala.bson.collection.immutable.Document +import org.mongodb.scala.bson.conversions.Bson +import org.mongodb.scala.model.Accumulators._ +import org.mongodb.scala.model.Aggregates._ +import org.mongodb.scala.model.MongoTimeUnit.DAY +import org.mongodb.scala.model.Projections._ +import org.mongodb.scala.model.Sorts._ +import org.mongodb.scala.model.Windows.Bound.{ CURRENT, UNBOUNDED } +import org.mongodb.scala.model.Windows.{ documents, range } +import org.mongodb.scala.model.densify.DensifyRange.fullRangeWithStep +import org.mongodb.scala.model.fill.FillOptions.fillOptions +import org.mongodb.scala.model.geojson.{ Point, Position } +import org.mongodb.scala.model.search.SearchCount.total +import org.mongodb.scala.model.search.SearchFacet.stringFacet +import org.mongodb.scala.model.search.SearchHighlight.paths +import org.mongodb.scala.model.search.SearchCollector +import org.mongodb.scala.model.search.SearchOperator.exists +import org.mongodb.scala.model.search.SearchOptions.searchOptions +import org.mongodb.scala.model.search.SearchPath.{ fieldPath, wildcardPath } +import org.mongodb.scala.model.search.VectorSearchOptions.{ approximateVectorSearchOptions, exactVectorSearchOptions } +import org.mongodb.scala.{ BaseSpec, MongoClient, MongoNamespace } + +class AggregatesSpec extends BaseSpec { + val registry = MongoClient.DEFAULT_CODEC_REGISTRY + + def toBson(bson: Bson): Document = + Document(bson.toBsonDocument(classOf[BsonDocument], MongoClient.DEFAULT_CODEC_REGISTRY)) + + "Aggregates" should "have the same methods as the wrapped Aggregates" in { + val wrapped = classOf[com.mongodb.client.model.Aggregates].getDeclaredMethods + .filter(f => isPublic(f.getModifiers)) + .map(_.getName) + .toSet + val aliases = Set("filter") + val local = Aggregates.getClass.getDeclaredMethods + .filter(f => isPublic(f.getModifiers)) + .map(_.getName) + .toSet -- aliases + + local should equal(wrapped) + } + + it should "have the same methods as the wrapped Accumulators" in { + val wrapped = classOf[com.mongodb.client.model.Accumulators].getDeclaredMethods + .filter(f => isPublic(f.getModifiers)) + .map(_.getName) + .toSet + val local = Accumulators.getClass.getDeclaredMethods.filter(f => isPublic(f.getModifiers)).map(_.getName).toSet + local should equal(wrapped) + } + + it should "render $addFields" in { + toBson(addFields(Field("newField", "hello"))) should equal(Document("""{$addFields: { "newField": "hello"}}""")) + } + + it should "render $set" in { + toBson(set(Field("newField", "hello"))) should equal(Document("""{$set: { "newField": "hello"}}""")) + } + + // scalastyle:off magic.number + it should "render $bucket" in { + toBson(bucket("$screenSize", 0, 24, 32, 50, 100000)) should equal( + Document("""{$bucket: { groupBy: "$screenSize", boundaries: [0, 24, 32, 50, 100000] } } """) + ) + + toBson(bucket("$screenSize", new BucketOptions().defaultBucket("other"), 0, 24, 32, 50, 100000)) should equal( + Document("""{$bucket: { groupBy: "$screenSize", boundaries: [0, 24, 32, 50, 100000], default: "other"} } """) + ) + } + + it should "render $bucketAuto" in { + toBson(bucketAuto("$price", 4)) should equal(Document("""{ $bucketAuto: { groupBy: "$price", buckets: 4 } }""")) + toBson( + bucketAuto( + "$price", + 4, + BucketAutoOptions() + .granularity(BucketGranularity.R5) + .output(sum("count", 1), avg("avgPrice", "$price")) + ) + ) should equal(Document("""{$bucketAuto: { + groupBy: "$price", + buckets: 4, + output: { + count: {$sum: 1}, + avgPrice: {$avg: "$price"}, + }, + granularity: "R5" + } + }""")) + } + + it should "render $count" in { + toBson(count()) should equal(Document("""{$count: "count"}""")) + toBson(count("total")) should equal(Document("""{$count: "total"}""")) + } + + it should "render $match" in { + toBson(`match`(Filters.eq("author", "dave"))) should equal(Document("""{ $match : { author : "dave" } }""")) + toBson(filter(Filters.eq("author", "dave"))) should equal(Document("""{ $match : { author : "dave" } }""")) + } + + it should "render $facet" in { + toBson( + facet( + Facet( + "Screen Sizes", + unwind("$attributes"), + filter(Filters.equal("attributes.name", "screen size")), + group(null, sum("count", 1)) + ), + Facet( + "Manufacturer", + filter(Filters.equal("attributes.name", "manufacturer")), + group("$attributes.value", sum("count", 1)), + sort(descending("count")), + limit(5) + ) + ) + ) should equal( + Document( + """{$facet: { "Screen Sizes": [{$unwind: "$attributes"}, {$match: {"attributes.name": "screen size"}}, + {$group: { _id: null, count: {$sum: 1} }}], + "Manufacturer": [ {$match: {"attributes.name": "manufacturer"}}, {$group: {_id: "$attributes.value", count: {$sum: 1}}}, + {$sort: {count: -1}}, {$limit: 5}]}}""" + ) + ) + } + + it should "render $graphLookup" in { + toBson(graphLookup("contacts", "$friends", "friends", "name", "socialNetwork")) should equal( + Document( + """{ $graphLookup:{ from: "contacts", startWith: "$friends", connectFromField: "friends", connectToField: "name", + | as: "socialNetwork" } }""".stripMargin + ) + ) + + toBson( + graphLookup("contacts", "$friends", "friends", "name", "socialNetwork", GraphLookupOptions().maxDepth(1)) + ) should equal( + Document( + """{ $graphLookup: { from: "contacts", startWith: "$friends", connectFromField: "friends", connectToField: "name", + | as: "socialNetwork", maxDepth: 1 } }""".stripMargin + ) + ) + + toBson( + graphLookup( + "contacts", + "$friends", + "friends", + "name", + "socialNetwork", + GraphLookupOptions().maxDepth(1).depthField("master") + ) + ) should equal( + Document( + """{ $graphLookup: { from: "contacts", startWith: "$friends", connectFromField: "friends", connectToField: "name", + | as: "socialNetwork", maxDepth: 1, depthField: "master" } }""".stripMargin + ) + ) + + toBson( + graphLookup( + "contacts", + "$friends", + "friends", + "name", + "socialNetwork", + GraphLookupOptions() + .depthField("master") + ) + ) should equal( + Document( + """{ $graphLookup: { from: "contacts", startWith: "$friends", connectFromField: "friends", + | connectToField: "name", as: "socialNetwork", depthField: "master" } }""".stripMargin + ) + ) + } + + it should "render $project" in { + toBson(project(fields(Projections.include("title", "author"), computed("lastName", "$author.last")))) should equal( + Document("""{ $project : { title : 1 , author : 1, lastName : "$author.last" } }""") + ) + } + + it should "render $replaceRoot" in { + toBson(replaceRoot("$a1")) should equal(Document("""{$replaceRoot: {newRoot: "$a1"}}""")) + } + + it should "render $sort" in { + toBson(sort(ascending("title", "author"))) should equal(Document("""{ $sort : { title : 1 , author : 1 } }""")) + } + + it should "render $sortByCount" in { + toBson(sortByCount("someField")) should equal(Document("""{ $sortByCount : "someField" }""")) + } + + it should "render $limit" in { + toBson(limit(5)) should equal(Document("""{ $limit : 5 }""")) + } + + it should "render $lookup" in { + toBson(lookup("from", "localField", "foreignField", "as")) should equal( + Document("""{ $lookup : { from: "from", localField: "localField", foreignField: "foreignField", as: "as" } }""") + ) + + val pipeline = Seq(filter(Filters.expr(Filters.eq("x", 1)))) + toBson(lookup("from", pipeline, "as")) == + Document("""{ $lookup : { from: "from", + pipeline : [{ $match : { $expr: { $eq : [ "x" , "1" ]}}}], + as: "as" }}""") + + toBson(lookup("from", Seq(Variable("var1", "expression1")), pipeline, "as")) == + Document("""{ $lookup : { from: "from", + let: { var1: "expression1" }, + pipeline : [{ $match : { $expr: { $eq : [ "x" , "1" ]}}}], + as: "as" }}""") + + } + + it should "render $skip" in { + toBson(skip(5)) should equal(Document("""{ $skip : 5 }""")) + } + + it should "render $sample" in { + toBson(sample(5)) should equal(Document("""{ $sample : { size: 5} }""")) + } + + it should "render $unwind" in { + toBson(unwind("$sizes")) should equal(Document("""{ $unwind : "$sizes" }""")) + toBson(unwind("$sizes", UnwindOptions().preserveNullAndEmptyArrays(null))) should equal( + Document("""{ $unwind : { path : "$sizes" } }""") + ) + toBson(unwind("$sizes", UnwindOptions().preserveNullAndEmptyArrays(false))) should equal( + Document(""" + { $unwind : { path : "$sizes", preserveNullAndEmptyArrays : false } }""") + ) + toBson(unwind("$sizes", UnwindOptions().preserveNullAndEmptyArrays(true))) should equal( + Document(""" + { $unwind : { path : "$sizes", preserveNullAndEmptyArrays : true } }""") + ) + toBson(unwind("$sizes", UnwindOptions().includeArrayIndex(null))) should equal( + Document("""{ $unwind : { path : "$sizes" } }""") + ) + toBson(unwind("$sizes", UnwindOptions().includeArrayIndex("$a"))) should equal( + Document(""" + { $unwind : { path : "$sizes", includeArrayIndex : "$a" } }""") + ) + toBson(unwind("$sizes", UnwindOptions().preserveNullAndEmptyArrays(true).includeArrayIndex("$a"))) should equal( + Document(""" + { $unwind : { path : "$sizes", preserveNullAndEmptyArrays : true, includeArrayIndex : "$a" } }""") + ) + } + + it should "render $out" in { + toBson(out("authors")) should equal(Document("""{ $out : "authors" }""")) + } + + it should "render $merge" in { + toBson(merge("authors")) should equal(Document("""{ $merge : {into: "authors" }}""")) + toBson(merge(MongoNamespace("db1", "authors"))) should equal( + Document("""{ $merge : {into: {db: "db1", coll: "authors" }}}""") + ) + + toBson(merge("authors", MergeOptions().uniqueIdentifier("ssn"))) should equal( + Document("""{ $merge : {into: "authors", on: "ssn" }}""") + ) + + toBson(merge("authors", MergeOptions().uniqueIdentifier("ssn", "otherId"))) should equal( + Document("""{ $merge : {into: "authors", on: ["ssn", "otherId"] }}""") + ) + + toBson( + merge( + "authors", + MergeOptions().whenMatched(MergeOptions.WhenMatched.REPLACE) + ) + ) should equal( + Document("""{ $merge : {into: "authors", whenMatched: "replace" }}""") + ) + toBson( + merge( + "authors", + MergeOptions().whenMatched(MergeOptions.WhenMatched.KEEP_EXISTING) + ) + ) should equal( + Document("""{ $merge : {into: "authors", whenMatched: "keepExisting" }}""") + ) + toBson( + merge( + "authors", + MergeOptions().whenMatched(MergeOptions.WhenMatched.MERGE) + ) + ) should equal( + Document("""{ $merge : {into: "authors", whenMatched: "merge" }}""") + ) + toBson( + merge( + "authors", + MergeOptions().whenMatched(MergeOptions.WhenMatched.FAIL) + ) + ) should equal( + Document("""{ $merge : {into: "authors", whenMatched: "fail" }}""") + ) + + toBson( + merge( + "authors", + MergeOptions().whenNotMatched(MergeOptions.WhenNotMatched.INSERT) + ) + ) should equal( + Document("""{ $merge : {into: "authors", whenNotMatched: "insert" }}""") + ) + toBson( + merge( + "authors", + MergeOptions().whenNotMatched(MergeOptions.WhenNotMatched.DISCARD) + ) + ) should equal( + Document("""{ $merge : {into: "authors", whenNotMatched: "discard" }}""") + ) + toBson( + merge( + "authors", + MergeOptions().whenNotMatched(MergeOptions.WhenNotMatched.FAIL) + ) + ) should equal( + Document("""{ $merge : {into: "authors", whenNotMatched: "fail" }}""") + ) + + toBson( + merge( + "authors", + MergeOptions() + .whenMatched(MergeOptions.WhenMatched.PIPELINE) + .variables(Variable("y", 2), Variable("z", 3)) + .whenMatchedPipeline(addFields(Field("x", 1))) + ) + ) should equal( + Document("""{ $merge : {into: "authors", let: {y: 2, z: 3}, whenMatched: [{$addFields: {x: 1}}]}}""") + ) + } + + it should "render $group" in { + toBson(group("$customerId")) should equal(Document("""{ $group : { _id : "$customerId" } }""")) + toBson(group(null)) should equal(Document("""{ $group : { _id : null } }""")) + + toBson( + group(Document("""{ month: { $month: "$date" }, day: { $dayOfMonth: "$date" }, year: { $year: "$date" } }""")) + ) should equal( + Document( + """{ $group : { _id : { month: { $month: "$date" }, day: { $dayOfMonth: "$date" }, year: { $year: "$date" } } } }""" + ) + ) + + val groupDocument = Document("""{ + $group : { + _id : null, + sum: { $sum: { $multiply: [ "$price", "$quantity" ] } }, + avg: { $avg: "$quantity" }, + percentile: { $percentile: { input: "$quantity", method: "approximate", p: [0.95, 0.3] } }, + median: { $median: { input: "$quantity", method: "approximate" } }, + min: { $min: "$quantity" }, + minN: { $minN: { input: "$quantity", n: 2 } }, + max: { $max: "$quantity" }, + maxN: { $maxN: { input: "$quantity", n: 2 } }, + first: { $first: "$quantity" }, + firstN: { $firstN: { input: "$quantity", n: 2 } }, + top: { $top: { sortBy: { quantity: 1 }, output: "$quantity" } }, + topN: { $topN: { sortBy: { quantity: 1 }, output: "$quantity", n: 2 } }, + last: { $last: "$quantity" }, + lastN: { $lastN: { input: "$quantity", n: 2 } }, + bottom: { $bottom: { sortBy: { quantity: 1 }, output: ["$quantity", "$quality"] } }, + bottomN: { $bottomN: { sortBy: { quantity: 1 }, output: ["$quantity", "$quality"], n: 2 } }, + all: { $push: "$quantity" }, + unique: { $addToSet: "$quantity" }, + stdDevPop: { $stdDevPop: "$quantity" }, + stdDevSamp: { $stdDevSamp: "$quantity" } + } + }""") + + toBson( + group( + null, + sum("sum", Document("""{ $multiply: [ "$price", "$quantity" ] }""")), + avg("avg", "$quantity"), + percentile("percentile", "$quantity", List(0.95, 0.3), QuantileMethod.approximate), + median("median", "$quantity", QuantileMethod.approximate), + min("min", "$quantity"), + minN("minN", "$quantity", 2), + max("max", "$quantity"), + maxN("maxN", "$quantity", 2), + first("first", "$quantity"), + firstN("firstN", "$quantity", 2), + top("top", ascending("quantity"), "$quantity"), + topN("topN", ascending("quantity"), "$quantity", 2), + last("last", "$quantity"), + lastN("lastN", "$quantity", 2), + bottom("bottom", ascending("quantity"), List("$quantity", "$quality")), + bottomN("bottomN", ascending("quantity"), List("$quantity", "$quality"), 2), + push("all", "$quantity"), + addToSet("unique", "$quantity"), + stdDevPop("stdDevPop", "$quantity"), + stdDevSamp("stdDevSamp", "$quantity") + ) + ) should equal(groupDocument) + } + + it should "render $setWindowFields" in { + val window: Window = documents(1, 2) + toBson( + setWindowFields( + Some("$partitionByField"), + Some(ascending("sortByField")), + WindowOutputFields.of( + BsonField.apply( + "newField00", + Document( + "$sum" -> "$field00", + "window" -> Windows.of(Document("range" -> BsonArray(1, "current"))).toBsonDocument + ) + ) + ), + WindowOutputFields.sum("newField01", "$field01", Some(range(1, CURRENT))), + WindowOutputFields.avg("newField02", "$field02", Some(range(UNBOUNDED, 1))), + WindowOutputFields.percentile( + "newField02P", + "$field02P", + List(0.3, 0.9), + QuantileMethod.approximate, + Some(range(UNBOUNDED, 1)) + ), + WindowOutputFields.median("newField02M", "$field02M", QuantileMethod.approximate, Some(range(UNBOUNDED, 1))), + WindowOutputFields.stdDevSamp("newField03", "$field03", Some(window)), + WindowOutputFields.stdDevPop("newField04", "$field04", Some(window)), + WindowOutputFields.min("newField05", "$field05", Some(window)), + WindowOutputFields.minN("newField05N", "$field05N", 2, Some(window)), + WindowOutputFields.max("newField06", "$field06", Some(window)), + WindowOutputFields.maxN("newField06N", "$field06N", 2, Some(window)), + WindowOutputFields.count("newField07", Some(window)), + WindowOutputFields.derivative("newField08", "$field08", window), + WindowOutputFields.timeDerivative("newField09", "$field09", window, DAY), + WindowOutputFields.integral("newField10", "$field10", window), + WindowOutputFields.timeIntegral("newField11", "$field11", window, DAY), + WindowOutputFields.covarianceSamp("newField12", "$field12_1", "$field12_2", Some(window)), + WindowOutputFields.covariancePop("newField13", "$field13_1", "$field13_2", Some(window)), + WindowOutputFields.expMovingAvg("newField14", "$field14", 3), + WindowOutputFields.expMovingAvg("newField15", "$field15", 0.5), + WindowOutputFields.push("newField16", "$field16", Some(window)), + WindowOutputFields.addToSet("newField17", "$field17", Some(window)), + WindowOutputFields.first("newField18", "$field18", Some(window)), + WindowOutputFields.firstN("newField18N", "$field18N", 2, Some(window)), + WindowOutputFields.last("newField19", "$field19", Some(window)), + WindowOutputFields.lastN("newField19N", "$field19N", 2, Some(window)), + WindowOutputFields.shift("newField20", "$field20", Some("defaultConstantValue"), -3), + WindowOutputFields.documentNumber("newField21"), + WindowOutputFields.rank("newField22"), + WindowOutputFields.denseRank("newField23"), + WindowOutputFields.bottom("newField24", ascending("sortByField"), "$field24", Some(window)), + WindowOutputFields.bottomN("newField24N", ascending("sortByField"), "$field24N", 2, Some(window)), + WindowOutputFields.top("newField25", ascending("sortByField"), "$field25", Some(window)), + WindowOutputFields.topN("newField25N", ascending("sortByField"), "$field25N", 2, Some(window)), + WindowOutputFields.locf("newField26", "$field26"), + WindowOutputFields.linearFill("newField27", "$field27") + ) + ) should equal( + Document( + """{ + "$setWindowFields": { + "partitionBy": "$partitionByField", + "sortBy": { "sortByField" : 1 }, + "output": { + "newField00": { "$sum": "$field00", "window": { "range": [{"$numberInt": "1"}, "current"] } }, + "newField01": { "$sum": "$field01", "window": { "range": [{"$numberLong": "1"}, "current"] } }, + "newField02": { "$avg": "$field02", "window": { "range": ["unbounded", {"$numberLong": "1"}] } }, + "newField02P": { "$percentile": { input: "$field02P", p: [0.3, 0.9], method: "approximate"} "window": { "range": ["unbounded", {"$numberLong": "1"}] } }, + "newField02M": { "$median": { input: "$field02M", method: "approximate"} "window": { "range": ["unbounded", {"$numberLong": "1"}] } }, + "newField03": { "$stdDevSamp": "$field03", "window": { "documents": [1, 2] } }, + "newField04": { "$stdDevPop": "$field04", "window": { "documents": [1, 2] } }, + "newField05": { "$min": "$field05", "window": { "documents": [1, 2] } }, + "newField05N": { "$minN": { "input": "$field05N", "n": 2 }, "window": { "documents": [1, 2] } }, + "newField06": { "$max": "$field06", "window": { "documents": [1, 2] } }, + "newField06N": { "$maxN": { "input": "$field06N", "n": 2 }, "window": { "documents": [1, 2] } }, + "newField07": { "$count": {}, "window": { "documents": [1, 2] } }, + "newField08": { "$derivative": { "input": "$field08" }, "window": { "documents": [1, 2] } }, + "newField09": { "$derivative": { "input": "$field09", "unit": "day" }, "window": { "documents": [1, 2] } }, + "newField10": { "$integral": { "input": "$field10"}, "window": { "documents": [1, 2] } }, + "newField11": { "$integral": { "input": "$field11", "unit": "day" }, "window": { "documents": [1, 2] } }, + "newField12": { "$covarianceSamp": ["$field12_1", "$field12_2"], "window": { "documents": [1, 2] } }, + "newField13": { "$covariancePop": ["$field13_1", "$field13_2"], "window": { "documents": [1, 2] } }, + "newField14": { "$expMovingAvg": { "input": "$field14", "N": 3 } }, + "newField15": { "$expMovingAvg": { "input": "$field15", "alpha": 0.5 } }, + "newField16": { "$push": "$field16", "window": { "documents": [1, 2] } }, + "newField17": { "$addToSet": "$field17", "window": { "documents": [1, 2] } }, + "newField18": { "$first": "$field18", "window": { "documents": [1, 2] } }, + "newField18N": { "$firstN": { "input": "$field18N", "n": 2 }, "window": { "documents": [1, 2] } }, + "newField19": { "$last": "$field19", "window": { "documents": [1, 2] } }, + "newField19N": { "$lastN": { "input": "$field19N", "n": 2 }, "window": { "documents": [1, 2] } }, + "newField20": { "$shift": { "output": "$field20", "by": -3, "default": "defaultConstantValue" } }, + "newField21": { "$documentNumber": {} }, + "newField22": { "$rank": {} }, + "newField23": { "$denseRank": {} }, + "newField24": { "$bottom": { "sortBy": { "sortByField": 1}, "output": "$field24"}, "window": { "documents": [1, 2] } }, + "newField24N": { "$bottomN": { "sortBy": { "sortByField": 1}, "output": "$field24N", "n": 2 }, "window": { "documents": [1, 2] } }, + "newField25": { "$top": { "sortBy": { "sortByField": 1}, "output": "$field25"}, "window": { "documents": [1, 2] } }, + "newField25N": { "$topN": { "sortBy": { "sortByField": 1}, "output": "$field25N", "n": 2 }, "window": { "documents": [1, 2] } }, + "newField26": { "$locf": "$field26" }, + "newField27": { "$linearFill": "$field27" } + } + } + }""" + ) + ) + } + + it should "render $setWindowFields with no partitionBy/sortBy" in { + toBson( + setWindowFields(None, None, WindowOutputFields.sum("newField01", "$field01", Some(documents(1, 2)))) + ) should equal( + Document("""{ + "$setWindowFields": { + "output": { + "newField01": { "$sum": "$field01", "window": { "documents": [1, 2] } } + } + } + }""") + ) + } + + it should "render $densify" in { + toBson( + Aggregates.densify( + "fieldName", + fullRangeWithStep(1) + ) + ) should equal( + Document("""{ + "$densify": { + "field": "fieldName", + "range": { "bounds": "full", "step": 1 } + } + }""") + ) + } + + it should "render $fill" in { + toBson( + Aggregates.fill( + fillOptions().partitionByFields("fieldName3").sortBy(ascending("fieldName4")), + FillOutputField.linear("fieldName1"), + FillOutputField.locf("fieldName2") + ) + ) should equal( + Document("""{ + "$fill": { + "partitionByFields": ["fieldName3"], + "sortBy": { "fieldName4": 1 }, + "output": { + "fieldName1": { "method": "linear" }, + "fieldName2": { "method": "locf" } + } + } + }""") + ) + } + + it should "render $search" in { + toBson( + Aggregates.search( + exists(fieldPath("fieldName")), + searchOptions() + ) + ) should equal( + Document("""{ + "$search": { + "exists": { "path": "fieldName" } + } + }""") + ) + toBson( + Aggregates.search( + SearchCollector + .facet(exists(fieldPath("fieldName")), List(stringFacet("stringFacetName", fieldPath("fieldName1")))), + searchOptions() + .index("indexName") + .count(total()) + .highlight( + paths( + fieldPath("fieldName1"), + fieldPath("fieldName2").multi("analyzerName"), + wildcardPath("field.name*") + ) + ) + ) + ) should equal( + Document("""{ + "$search": { + "facet": { + "operator": { "exists": { "path": "fieldName" } }, + "facets": { + "stringFacetName": { "type" : "string", "path": "fieldName1" } + } + }, + "index": "indexName", + "count": { "type": "total" }, + "highlight": { + "path": [ + "fieldName1", + { "value": "fieldName2", "multi": "analyzerName" }, + { "wildcard": "field.name*" } + ] + } + } + }""") + ) + } + + it should "render $search with no options" in { + toBson( + Aggregates.search( + exists(fieldPath("fieldName")) + ) + ) should equal( + Document("""{ + "$search": { + "exists": { "path": "fieldName" } + } + }""") + ) + toBson( + Aggregates.search( + SearchCollector.facet( + exists(fieldPath("fieldName")), + List( + stringFacet("facetName", fieldPath("fieldName")) + .numBuckets(3) + ) + ) + ) + ) should equal( + Document("""{ + "$search": { + "facet": { + "operator": { "exists": { "path": "fieldName" } }, + "facets": { + "facetName": { "type": "string", "path": "fieldName", "numBuckets": 3 } + } + } + } + }""") + ) + } + + it should "render $searchMeta" in { + toBson( + Aggregates.searchMeta( + exists(fieldPath("fieldName")), + searchOptions() + ) + ) should equal( + Document("""{ + "$searchMeta": { + "exists": { "path": "fieldName" } + } + }""") + ) + toBson( + Aggregates.searchMeta( + SearchCollector + .facet(exists(fieldPath("fieldName")), List(stringFacet("stringFacetName", fieldPath("fieldName1")))), + searchOptions() + .index("indexName") + .count(total()) + .highlight( + paths( + fieldPath("fieldName1"), + fieldPath("fieldName2").multi("analyzerName"), + wildcardPath("field.name*") + ) + ) + ) + ) should equal( + Document("""{ + "$searchMeta": { + "facet": { + "operator": { "exists": { "path": "fieldName" } }, + "facets": { + "stringFacetName": { "type" : "string", "path": "fieldName1" } + } + }, + "index": "indexName", + "count": { "type": "total" }, + "highlight": { + "path": [ + "fieldName1", + { "value": "fieldName2", "multi": "analyzerName" }, + { "wildcard": "field.name*" } + ] + } + } + }""") + ) + } + + it should "render $searchMeta with no options" in { + toBson( + Aggregates.searchMeta( + exists(fieldPath("fieldName")) + ) + ) should equal( + Document("""{ + "$searchMeta": { + "exists": { "path": "fieldName" } + } + }""") + ) + toBson( + Aggregates.searchMeta( + SearchCollector.facet( + exists(fieldPath("fieldName")), + List( + stringFacet("facetName", fieldPath("fieldName")) + .numBuckets(3) + ) + ) + ) + ) should equal( + Document("""{ + "$searchMeta": { + "facet": { + "operator": { "exists": { "path": "fieldName" } }, + "facets": { + "facetName": { "type": "string", "path": "fieldName", "numBuckets": 3 } + } + } + } + }""") + ) + } + + it should "render approximate $vectorSearch" in { + toBson( + Aggregates.vectorSearch( + fieldPath("fieldName").multi("ignored"), + List(1.0d, 2.0d), + "indexName", + 1, + approximateVectorSearchOptions(2) + .filter(Filters.ne("fieldName", "fieldValue")) + ) + ) should equal( + Document( + """{ + "$vectorSearch": { + "path": "fieldName", + "queryVector": [1.0, 2.0], + "index": "indexName", + "numCandidates": {"$numberLong": "2"}, + "limit": {"$numberLong": "1"}, + "filter": {"fieldName": {"$ne": "fieldValue"}} + } + }""" + ) + ) + } + + it should "render exact $vectorSearch" in { + toBson( + Aggregates.vectorSearch( + fieldPath("fieldName").multi("ignored"), + List(1.0d, 2.0d), + "indexName", + 1, + exactVectorSearchOptions() + .filter(Filters.ne("fieldName", "fieldValue")) + ) + ) should equal( + Document( + """{ + "$vectorSearch": { + "path": "fieldName", + "queryVector": [1.0, 2.0], + "index": "indexName", + "exact": true, + "limit": {"$numberLong": "1"}, + "filter": {"fieldName": {"$ne": "fieldValue"}} + } + }""" + ) + ) + } + + it should "render $unset" in { + toBson( + Aggregates.unset("title", "author.first") + ) should equal( + Document("""{ $unset: ['title', 'author.first'] }""") + ) + toBson( + Aggregates.unset("author.first") + ) should equal( + Document("""{ "$unset": "author.first" }""") + ) + } + + it should "render $geoNear" in { + + toBson( + Aggregates.geoNear( + Point(Position(-73.99279, 40.719296)), + "dist.calculated" + ) + ) should equal( + Document("""{ + | $geoNear: { + | near: { type: 'Point', coordinates: [ -73.99279 , 40.719296 ] }, + | distanceField: 'dist.calculated' + | } + |}""".stripMargin) + ) + toBson( + Aggregates.geoNear( + Point(Position(-73.99279, 40.719296)), + "dist.calculated", + geoNearOptions() + .minDistance(0) + .maxDistance(2) + .query(Document("""{ "category": "Parks" }""")) + .includeLocs("dist.location") + .spherical() + .key("location") + .distanceMultiplier(10.0) + ) + ) should equal( + Document("""{ + | $geoNear: { + | near: { type: 'Point', coordinates: [ -73.99279 , 40.719296 ] }, + | distanceField: 'dist.calculated', + | minDistance: 0, + | maxDistance: 2, + | query: { category: 'Parks' }, + | includeLocs: 'dist.location', + | spherical: true, + | key: 'location', + | distanceMultiplier: 10.0 + | } + |}""".stripMargin) + ) + } + + it should "render $documents" in { + toBson( + Aggregates.documents( + org.mongodb.scala.bson.BsonDocument("""{a: 1, b: {$add: [1, 1]} }"""), + Document("""{a: 3, b: 4}""") + ) + ) should equal( + Document("""{$documents: [{a: 1, b: {$add: [1, 1]}}, {a: 3, b: 4}]}""") + ) + } +} diff --git a/driver-scala/src/test/scala/org/mongodb/scala/model/BucketGranularitySpec.scala b/driver-scala/src/test/scala/org/mongodb/scala/model/BucketGranularitySpec.scala new file mode 100644 index 00000000000..7f46c29ece6 --- /dev/null +++ b/driver-scala/src/test/scala/org/mongodb/scala/model/BucketGranularitySpec.scala @@ -0,0 +1,77 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala.model + +import java.lang.reflect.Modifier._ + +import org.mongodb.scala.BaseSpec +import org.scalatest.prop.TableDrivenPropertyChecks._ + +import scala.util.{ Success, Try } + +class BucketGranularitySpec extends BaseSpec { + + "BucketGranularity" should "have the same static fields as the wrapped BucketGranularity" in { + val BucketGranularityClass: Class[BucketGranularity] = classOf[com.mongodb.client.model.BucketGranularity] + val wrappedFields = + BucketGranularityClass.getDeclaredFields.filter(f => isStatic(f.getModifiers)).map(_.getName).toSet + val wrappedMethods = + BucketGranularityClass.getDeclaredMethods.filter(f => isStatic(f.getModifiers)).map(_.getName).toSet + val exclusions = Set("$VALUES", "$values", "valueOf", "values") + + val wrapped = (wrappedFields ++ wrappedMethods) -- exclusions + val local = BucketGranularity.getClass.getDeclaredMethods.map(_.getName).toSet -- Set( + "apply", + "$deserializeLambda$", + "$anonfun$fromString$1" + ) + + local should equal(wrapped) + } + + it should "return the expected BucketGranularity" in { + forAll(BucketGranularitys) { (value: String, expectedValue: Try[BucketGranularity]) => + BucketGranularity.fromString(value) should equal(expectedValue) + } + } + + it should "handle invalid values" in { + forAll(invalidBucketGranularitys) { (value: String) => + BucketGranularity.fromString(value) should be a Symbol("failure") + } + } + + val BucketGranularitys = + Table( + ("stringValue", "JavaValue"), + ("R5", Success(BucketGranularity.R5)), + ("R10", Success(BucketGranularity.R10)), + ("R20", Success(BucketGranularity.R20)), + ("R40", Success(BucketGranularity.R40)), + ("R80", Success(BucketGranularity.R80)), + ("1-2-5", Success(BucketGranularity.SERIES_125)), + ("E6", Success(BucketGranularity.E6)), + ("E12", Success(BucketGranularity.E12)), + ("E24", Success(BucketGranularity.E24)), + ("E48", Success(BucketGranularity.E48)), + ("E96", Success(BucketGranularity.E96)), + ("E192", Success(BucketGranularity.E192)), + ("POWERSOF2", Success(BucketGranularity.POWERSOF2)) + ) + + val invalidBucketGranularitys = Table("invalid values", "r5", "powers of 2") +} diff --git a/driver-scala/src/test/scala/org/mongodb/scala/model/CollationAlternateSpec.scala b/driver-scala/src/test/scala/org/mongodb/scala/model/CollationAlternateSpec.scala new file mode 100644 index 00000000000..b63134423a4 --- /dev/null +++ b/driver-scala/src/test/scala/org/mongodb/scala/model/CollationAlternateSpec.scala @@ -0,0 +1,66 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala.model + +import java.lang.reflect.Modifier._ + +import org.mongodb.scala.BaseSpec +import org.scalatest.prop.TableDrivenPropertyChecks._ + +import scala.util.{ Success, Try } + +class CollationAlternateSpec extends BaseSpec { + + "CollationAlternate" should "have the same static fields as the wrapped CollationAlternate" in { + val collationAlternateClass: Class[CollationAlternate] = classOf[com.mongodb.client.model.CollationAlternate] + val wrappedFields = + collationAlternateClass.getDeclaredFields.filter(f => isStatic(f.getModifiers)).map(_.getName).toSet + val wrappedMethods = + collationAlternateClass.getDeclaredMethods.filter(f => isStatic(f.getModifiers)).map(_.getName).toSet + val exclusions = Set("$VALUES", "$values", "valueOf", "values") + + val wrapped = (wrappedFields ++ wrappedMethods) -- exclusions + val local = CollationAlternate.getClass.getDeclaredMethods.map(_.getName).toSet -- Set( + "apply", + "$deserializeLambda$", + "$anonfun$fromString$1" + ) + + local should equal(wrapped) + } + + it should "return the expected CollationAlternate" in { + forAll(collationAlternates) { (value: String, expectedValue: Try[CollationAlternate]) => + CollationAlternate.fromString(value) should equal(expectedValue) + } + } + + it should "handle invalid values" in { + forAll(invalidCollationAlternates) { (value: String) => + CollationAlternate.fromString(value) should be a Symbol("failure") + } + } + + val collationAlternates = + Table( + ("stringValue", "JavaValue"), + ("non-ignorable", Success(CollationAlternate.NON_IGNORABLE)), + ("shifted", Success(CollationAlternate.SHIFTED)) + ) + + val invalidCollationAlternates = Table("invalid values", "NON_IGNORABLE", "SHIFTED") +} diff --git a/driver-scala/src/test/scala/org/mongodb/scala/model/CollationCaseFirstSpec.scala b/driver-scala/src/test/scala/org/mongodb/scala/model/CollationCaseFirstSpec.scala new file mode 100644 index 00000000000..7aabb047318 --- /dev/null +++ b/driver-scala/src/test/scala/org/mongodb/scala/model/CollationCaseFirstSpec.scala @@ -0,0 +1,67 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala.model + +import java.lang.reflect.Modifier._ + +import org.mongodb.scala.BaseSpec +import org.scalatest.prop.TableDrivenPropertyChecks._ + +import scala.util.{ Success, Try } + +class CollationCaseFirstSpec extends BaseSpec { + + "CollationCaseFirst" should "have the same static fields as the wrapped CollationCaseFirst" in { + val collationCaseFirstClass: Class[CollationCaseFirst] = classOf[com.mongodb.client.model.CollationCaseFirst] + val wrappedFields = + collationCaseFirstClass.getDeclaredFields.filter(f => isStatic(f.getModifiers)).map(_.getName).toSet + val wrappedMethods = + collationCaseFirstClass.getDeclaredMethods.filter(f => isStatic(f.getModifiers)).map(_.getName).toSet + val exclusions = Set("$VALUES", "$values", "valueOf", "values") + + val wrapped = (wrappedFields ++ wrappedMethods) -- exclusions + val local = CollationCaseFirst.getClass.getDeclaredMethods.map(_.getName).toSet -- Set( + "apply", + "$deserializeLambda$", + "$anonfun$fromString$1" + ) + + local should equal(wrapped) + } + + it should "return the expected CollationCaseFirst" in { + forAll(collationCaseFirsts) { (value: String, expectedValue: Try[CollationCaseFirst]) => + CollationCaseFirst.fromString(value) should equal(expectedValue) + } + } + + it should "handle invalid values" in { + forAll(invalidCollationCaseFirsts) { (value: String) => + CollationCaseFirst.fromString(value) should be a Symbol("failure") + } + } + + val collationCaseFirsts = + Table( + ("stringValue", "JavaValue"), + ("upper", Success(CollationCaseFirst.UPPER)), + ("lower", Success(CollationCaseFirst.LOWER)), + ("off", Success(CollationCaseFirst.OFF)) + ) + + val invalidCollationCaseFirsts = Table("invalid values", "OFF", "LOWER") +} diff --git a/driver-scala/src/test/scala/org/mongodb/scala/model/CollationMaxVariableSpec.scala b/driver-scala/src/test/scala/org/mongodb/scala/model/CollationMaxVariableSpec.scala new file mode 100644 index 00000000000..479c9125348 --- /dev/null +++ b/driver-scala/src/test/scala/org/mongodb/scala/model/CollationMaxVariableSpec.scala @@ -0,0 +1,66 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala.model + +import java.lang.reflect.Modifier._ + +import org.mongodb.scala.BaseSpec +import org.scalatest.prop.TableDrivenPropertyChecks._ + +import scala.util.{ Success, Try } + +class CollationMaxVariableSpec extends BaseSpec { + + "CollationMaxVariable" should "have the same static fields as the wrapped CollationMaxVariable" in { + val collationMaxVariableClass: Class[CollationMaxVariable] = classOf[com.mongodb.client.model.CollationMaxVariable] + val wrappedFields = + collationMaxVariableClass.getDeclaredFields.filter(f => isStatic(f.getModifiers)).map(_.getName).toSet + val wrappedMethods = + collationMaxVariableClass.getDeclaredMethods.filter(f => isStatic(f.getModifiers)).map(_.getName).toSet + val exclusions = Set("$VALUES", "$values", "valueOf", "values") + + val wrapped = (wrappedFields ++ wrappedMethods) -- exclusions + val local = CollationMaxVariable.getClass.getDeclaredMethods.map(_.getName).toSet -- Set( + "apply", + "$deserializeLambda$", + "$anonfun$fromString$1" + ) + + local should equal(wrapped) + } + + it should "return the expected CollationMaxVariable" in { + forAll(collationMaxVariables) { (value: String, expectedValue: Try[CollationMaxVariable]) => + CollationMaxVariable.fromString(value) should equal(expectedValue) + } + } + + it should "handle invalid values" in { + forAll(invalidCollationMaxVariables) { (value: String) => + CollationMaxVariable.fromString(value) should be a Symbol("failure") + } + } + + val collationMaxVariables = + Table( + ("stringValue", "JavaValue"), + ("punct", Success(CollationMaxVariable.PUNCT)), + ("space", Success(CollationMaxVariable.SPACE)) + ) + + val invalidCollationMaxVariables = Table("invalid values", "SPACE", "PUNCT") +} diff --git a/driver-scala/src/test/scala/org/mongodb/scala/model/CollationSpec.scala b/driver-scala/src/test/scala/org/mongodb/scala/model/CollationSpec.scala new file mode 100644 index 00000000000..02198b5b8fd --- /dev/null +++ b/driver-scala/src/test/scala/org/mongodb/scala/model/CollationSpec.scala @@ -0,0 +1,82 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala.model + +import java.lang.reflect.Modifier.isStatic + +import com.mongodb.client.model.{ + Collation => JCollation, + CollationAlternate => JCollationAlternate, + CollationCaseFirst => JCollationCaseFirst, + CollationMaxVariable => JCollationMaxVariable, + CollationStrength => JCollationStrength +} +import org.mongodb.scala.BaseSpec + +class CollationSpec extends BaseSpec { + + "Collation" should "have the same static fields as the wrapped Collation" in { + val collationClass: Class[Collation] = classOf[com.mongodb.client.model.Collation] + val wrappedFields = collationClass.getDeclaredFields.filter(f => isStatic(f.getModifiers)).map(_.getName).toSet + val wrappedMethods = collationClass.getDeclaredMethods.filter(f => isStatic(f.getModifiers)).map(_.getName).toSet + val exclusions = Set("$VALUES", "$values", "valueOf", "values") + + val wrapped = (wrappedFields ++ wrappedMethods) -- exclusions + val local = Collation.getClass.getDeclaredMethods.map(_.getName).toSet -- Set( + "apply", + "$deserializeLambda$", + "$anonfun$fromString$1" + ) + + local should equal(wrapped) + } + + it should "return the underlying builder" in { + Collation.builder().getClass should equal(classOf[com.mongodb.client.model.Collation.Builder]) + } + + it should "produce the same collation value when using the Scala helpers" in { + val viaScalaHelper = Collation + .builder() + .backwards(true) + .caseLevel(true) + .collationAlternate(CollationAlternate.NON_IGNORABLE) + .collationCaseFirst(CollationCaseFirst.UPPER) + .collationMaxVariable(CollationMaxVariable.SPACE) + .collationStrength(CollationStrength.TERTIARY) + .locale("fr") + .normalization(true) + .numericOrdering(true) + .build() + + val javaNative = JCollation + .builder() + .backwards(true) + .caseLevel(true) + .collationAlternate(JCollationAlternate.NON_IGNORABLE) + .collationCaseFirst(JCollationCaseFirst.UPPER) + .collationMaxVariable(JCollationMaxVariable.SPACE) + .collationStrength(JCollationStrength.TERTIARY) + .locale("fr") + .normalization(true) + .numericOrdering(true) + .build() + + viaScalaHelper should equal(javaNative) + } + +} diff --git a/driver-scala/src/test/scala/org/mongodb/scala/model/CollationStrengthSpec.scala b/driver-scala/src/test/scala/org/mongodb/scala/model/CollationStrengthSpec.scala new file mode 100644 index 00000000000..0e5d2f406dd --- /dev/null +++ b/driver-scala/src/test/scala/org/mongodb/scala/model/CollationStrengthSpec.scala @@ -0,0 +1,69 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala.model + +import java.lang.reflect.Modifier._ + +import org.mongodb.scala.BaseSpec +import org.scalatest.prop.TableDrivenPropertyChecks._ + +import scala.util.{ Success, Try } + +class CollationStrengthSpec extends BaseSpec { + + "CollationStrength" should "have the same static fields as the wrapped CollationStrength" in { + val collationStrengthClass: Class[CollationStrength] = classOf[com.mongodb.client.model.CollationStrength] + val wrappedFields = + collationStrengthClass.getDeclaredFields.filter(f => isStatic(f.getModifiers)).map(_.getName).toSet + val wrappedMethods = + collationStrengthClass.getDeclaredMethods.filter(f => isStatic(f.getModifiers)).map(_.getName).toSet + val exclusions = Set("$VALUES", "$values", "valueOf", "values") + + val wrapped = (wrappedFields ++ wrappedMethods) -- exclusions + val local = CollationStrength.getClass.getDeclaredMethods.map(_.getName).toSet -- Set( + "apply", + "$deserializeLambda$", + "$anonfun$fromInt$1" + ) + + local should equal(wrapped) + } + + it should "return the expected CollationStrength" in { + forAll(collationStrengths) { (value: Int, expectedValue: Try[CollationStrength]) => + CollationStrength.fromInt(value) should equal(expectedValue) + } + } + + it should "handle invalid values" in { + forAll(invalidCollationStrengths) { (value: Int) => + CollationStrength.fromInt(value) should be a Symbol("failure") + } + } + + val collationStrengths = + Table( + ("intValue", "JavaValue"), + (1, Success(CollationStrength.PRIMARY)), + (2, Success(CollationStrength.SECONDARY)), + (3, Success(CollationStrength.TERTIARY)), + (4, Success(CollationStrength.QUATERNARY)), + (5, Success(CollationStrength.IDENTICAL)) + ) + + val invalidCollationStrengths = Table("invalid values", 0, 6) +} diff --git a/driver-scala/src/test/scala/org/mongodb/scala/model/FiltersSpec.scala b/driver-scala/src/test/scala/org/mongodb/scala/model/FiltersSpec.scala new file mode 100644 index 00000000000..e05aa2447e1 --- /dev/null +++ b/driver-scala/src/test/scala/org/mongodb/scala/model/FiltersSpec.scala @@ -0,0 +1,778 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala.model + +import java.lang.reflect.Modifier._ + +import org.bson.{ BsonDocument, BsonType } +import org.mongodb.scala.bson.collection.immutable.Document +import org.mongodb.scala.bson.conversions.Bson +import org.mongodb.scala.model.geojson.{ Point, Polygon, Position } +import org.mongodb.scala.{ model, BaseSpec, MongoClient } + +class FiltersSpec extends BaseSpec { + val registry = MongoClient.DEFAULT_CODEC_REGISTRY + + def toBson(bson: Bson): Document = + Document(bson.toBsonDocument(classOf[BsonDocument], MongoClient.DEFAULT_CODEC_REGISTRY)) + + "Filters" should "have the same methods as the wrapped Filters" in { + val wrapped = classOf[com.mongodb.client.model.Filters].getDeclaredMethods + .filter(f => isPublic(f.getModifiers)) + .map(_.getName) + .toSet + val aliases = Set("equal", "notEqual", "bsonType") + val ignore = Set("$anonfun$geoWithinPolygon$1") + val local = model.Filters.getClass.getDeclaredMethods + .filter(f => isPublic(f.getModifiers)) + .map(_.getName) + .toSet -- aliases -- ignore + + local should equal(wrapped) + } + + it should "render without $eq" in { + toBson(model.Filters.eq("x", 1)) should equal(Document("""{x : 1}""")) + toBson(model.Filters.eq("x", null)) should equal(Document("""{x : null}""")) + + toBson(model.Filters.equal("x", 1)) should equal(Document("""{x : 1}""")) + toBson(model.Filters.equal("x", null)) should equal(Document("""{x : null}""")) + } + + it should "render $ne" in { + toBson(model.Filters.ne("x", 1)) should equal(Document("""{x : {$ne : 1} }""")) + toBson(model.Filters.ne("x", null)) should equal(Document("""{x : {$ne : null} }""")) + } + + it should "render $not" in { + toBson(model.Filters.not(model.Filters.eq("x", 1))) should equal(Document("""{x : {$not: {$eq: 1}}}""")) + toBson(model.Filters.not(model.Filters.gt("x", 1))) should equal(Document("""{x : {$not: {$gt: 1}}}""")) + toBson(model.Filters.not(model.Filters.regex("x", "^p.*"))) should equal(Document("""{x : {$not: /^p.*/}}""")) + toBson(model.Filters.not(model.Filters.and(model.Filters.gt("x", 1), model.Filters.eq("y", 20)))) should equal( + Document("""{$not: {$and: [{x: {$gt: 1}}, {y: 20}]}}""") + ) + toBson(model.Filters.not(model.Filters.and(model.Filters.eq("x", 1), model.Filters.eq("x", 2)))) should equal( + Document("""{$not: {$and: [{x: 1}, {x: 2}]}}""") + ) + toBson(model.Filters.not(model.Filters.and(model.Filters.in("x", 1, 2), model.Filters.eq("x", 3)))) should equal( + Document("""{$not: {$and: [{x: {$in: [1, 2]}}, {x: 3}]}}""") + ) + toBson(model.Filters.not(model.Filters.or(model.Filters.gt("x", 1), model.Filters.eq("y", 20)))) should equal( + Document("""{$not: {$or: [{x: {$gt: 1}}, {y: 20}]}}""") + ) + toBson(model.Filters.not(model.Filters.or(model.Filters.eq("x", 1), model.Filters.eq("x", 2)))) should equal( + Document("""{$not: {$or: [{x: 1}, {x: 2}]}}""") + ) + toBson(model.Filters.not(model.Filters.or(model.Filters.in("x", 1, 2), model.Filters.eq("x", 3)))) should equal( + Document("""{$not: {$or: [{x: {$in: [1, 2]}}, {x: 3}]}}""") + ) + toBson(model.Filters.not(Document("$in" -> List(1)))) should equal(Document("""{$not: {$in: [1]}}""")) + } + + it should "render $nor" in { + toBson(model.Filters.nor(model.Filters.eq("price", 1))) should equal(Document("""{$nor : [{price: 1}]}""")) + toBson(model.Filters.nor(model.Filters.eq("price", 1), model.Filters.eq("sale", true))) should equal( + Document("""{$nor : [{price: 1}, {sale: true}]}""") + ) + } + + it should "render $gt" in { + toBson(model.Filters.gt("x", 1)) should equal(Document("""{x : {$gt : 1} }""")) + } + + it should "render $lt" in { + toBson(model.Filters.lt("x", 1)) should equal(Document("""{x : {$lt : 1} }""")) + } + + it should "render $gte" in { + toBson(model.Filters.gte("x", 1)) should equal(Document("""{x : {$gte : 1} }""")) + } + + it should "render $lte" in { + toBson(model.Filters.lte("x", 1)) should equal(Document("""{x : {$lte : 1} }""")) + } + + it should "render $exists" in { + toBson(model.Filters.exists("x")) should equal(Document("""{x : {$exists : true} }""")) + toBson(model.Filters.exists("x", false)) should equal(Document("""{x : {$exists : false} }""")) + } + + it should "or should render empty or using $or" in { + toBson(model.Filters.or()) should equal(Document("""{$or : []}""")) + } + + it should "render $or" in { + toBson(model.Filters.or(model.Filters.eq("x", 1), model.Filters.eq("y", 2))) should equal( + Document("""{$or : [{x : 1}, {y : 2}]}""") + ) + } + + it should "and should render empty and using $and" in { + toBson(model.Filters.and()) should equal(Document("""{$and : []}""")) + } + + it should "and should render using $and" in { + toBson(model.Filters.and(model.Filters.eq("x", 1), model.Filters.eq("y", 2))) should equal( + Document("""{$and: [{x : 1}, {y : 2}]}""") + ) + } + + it should "and should flatten multiple operators for the same key" in { + toBson(model.Filters.and(model.Filters.gt("a", 1), model.Filters.lt("a", 9))) should equal( + Document("""{$and: [{a : {$gt : 1}}, {a: {$lt : 9}}]}""") + ) + } + + it should "and should flatten nested" in { + toBson( + model.Filters.and(model.Filters.and(model.Filters.eq("a", 1), model.Filters.eq("b", 2)), model.Filters.eq("c", 3)) + ) should equal(Document("""{$and: [{$and: [{a : 1}, {b : 2}]}, {c : 3}]}""")) + toBson( + model.Filters.and(model.Filters.and(model.Filters.eq("a", 1), model.Filters.eq("a", 2)), model.Filters.eq("c", 3)) + ) should equal(Document("""{$and: [{$and:[{a : 1}, {a : 2}]}, {c : 3}] }""")) + toBson(model.Filters.and(model.Filters.lt("a", 1), model.Filters.lt("b", 2))) should equal( + Document("""{$and: [{a : {$lt : 1}}, {b : {$lt : 2} }]}""") + ) + toBson(model.Filters.and(model.Filters.lt("a", 1), model.Filters.lt("a", 2))) should equal( + Document("""{$and : [{a : {$lt : 1}}, {a : {$lt : 2}}]}""") + ) + } + + it should "render $all" in { + toBson(model.Filters.all("a", 1, 2, 3)) should equal(Document("""{a : {$all : [1, 2, 3]} }""")) + } + + it should "render $elemMatch" in { + toBson(model.Filters.elemMatch("results", Document("$gte" -> 80, "$lt" -> 85))) should equal( + Document("""{results : {$elemMatch : {$gte: 80, $lt: 85}}}""") + ) + toBson( + model.Filters + .elemMatch("results", model.Filters.and(model.Filters.eq("product", "xyz"), model.Filters.gt("score", 8))) + ) should equal(Document("""{ results : {$elemMatch : {$and: [{product : "xyz"}, {score : {$gt : 8}}]}}}""")) + } + + it should "render $in" in { + toBson(model.Filters.in("a", 1, 2, 3)) should equal(Document("""{a : {$in : [1, 2, 3]} }""")) + } + + it should "render $nin" in { + toBson(model.Filters.nin("a", 1, 2, 3)) should equal(Document("""{a : {$nin : [1, 2, 3]} }""")) + } + + it should "render $mod" in { + toBson(model.Filters.mod("a", 100, 7)) should equal(Document("a" -> Document("$mod" -> List(100L, 7L)))) + } + + it should "render $size" in { + toBson(model.Filters.size("a", 13)) should equal(Document("""{a : {$size : 13} }""")) + } + + it should "render $type" in { + toBson(model.Filters.`type`("a", BsonType.ARRAY)) should equal(Document("""{a : {$type : 4} }""")) + } + + it should "render $bitsAllClear" in { + toBson(model.Filters.bitsAllClear("a", 13)) should equal( + Document("""{a : {$bitsAllClear : { "$numberLong" : "13" }} }""") + ) + } + + it should "render $bitsAllSet" in { + toBson(model.Filters.bitsAllSet("a", 13)) should equal( + Document("""{a : {$bitsAllSet : { "$numberLong" : "13" }} }""") + ) + } + + it should "render $bitsAnyClear" in { + toBson(model.Filters.bitsAnyClear("a", 13)) should equal( + Document("""{a : {$bitsAnyClear : { "$numberLong" : "13" }} }""") + ) + } + + it should "render $bitsAnySet" in { + toBson(model.Filters.bitsAnySet("a", 13)) should equal( + Document("""{a : {$bitsAnySet : { "$numberLong" : "13" }} }""") + ) + } + + it should "render $text" in { + toBson(model.Filters.text("mongoDB for GIANT ideas")) should equal( + Document("""{$text: {$search: "mongoDB for GIANT ideas"} }""") + ) + toBson(model.Filters.text("mongoDB for GIANT ideas", new TextSearchOptions().language("english"))) should equal( + Document("""{$text : {$search : "mongoDB for GIANT ideas", $language : "english"} }""") + ) + toBson(model.Filters.text("mongoDB for GIANT ideas", new TextSearchOptions().caseSensitive(true))) should equal( + Document("""{$text : {$search : "mongoDB for GIANT ideas", $caseSensitive : true} }""") + ) + toBson( + model.Filters.text("mongoDB for GIANT ideas", new TextSearchOptions().diacriticSensitive(false)) + ) should equal( + Document("""{$text : {$search : "mongoDB for GIANT ideas", $diacriticSensitive : false} }""") + ) + toBson( + model.Filters.text( + "mongoDB for GIANT ideas", + new TextSearchOptions() + .language("english") + .caseSensitive(false) + .diacriticSensitive(true) + ) + ) should equal( + Document( + """{$text : {$search : "mongoDB for GIANT ideas", $language : "english", $caseSensitive : false, + $diacriticSensitive : true} }""" + ) + ) + } + + it should "render $regex" in { + toBson(model.Filters.regex("name", "acme.*corp")) should equal( + Document("""{name : {$regex : "acme.*corp", $options : ""}}""") + ) + toBson(model.Filters.regex("name", "acme.*corp", "si")) should equal( + Document("""{name : {$regex : "acme.*corp", $options : "si"}}""") + ) + toBson(model.Filters.regex("name", "acme.*corp".r)) should equal( + Document("""{name : {$regex : "acme.*corp", $options : ""}}""") + ) + } + + it should "render $where" in { + toBson(model.Filters.where("this.credits == this.debits")) should equal( + Document("""{$where: "this.credits == this.debits"}""") + ) + } + + it should "render $geoWithin" in { + val polygon = Polygon( + Seq( + Position(40.0, 18.0), + Position(40.0, 19.0), + Position(41.0, 19.0), + Position(40.0, 18.0) + ) + ) + + toBson(model.Filters.geoWithin("loc", polygon)) should equal( + Document("""{ + loc: { + $geoWithin: { + $geometry: { + type: "Polygon", + coordinates: [ + [ + [40.0, 18.0], [40.0, 19.0], [41.0, 19.0], [40.0, 18.0] + ] + ] + } + } + } + }""") + ) + + toBson(model.Filters.geoWithin("loc", Document(polygon.toJson()))) should equal( + Document("""{ + loc: { + $geoWithin: { + $geometry: { + type: "Polygon", + coordinates: [ + [ + [40.0, 18.0], [40.0, 19.0], [41.0, 19.0], + [40.0, 18.0] + ] + ] + } + } + } + }""") + ) + } + + it should "render $geoWithin with $box" in { + toBson(model.Filters.geoWithinBox("loc", 1d, 2d, 3d, 4d)) should equal(Document("""{ + loc: { + $geoWithin: { + $box: [ + [ 1.0, 2.0 ], [ 3.0, 4.0 ] + ] + } + } + }""")) + } + + it should "render $geoWithin with $polygon" in { + toBson(model.Filters.geoWithinPolygon("loc", List(List(0d, 0d), List(3d, 6d), List(6d, 0d)))) should equal( + Document("""{ + loc: { + $geoWithin: { + $polygon: [ + [ 0.0, 0.0 ], [ 3.0, 6.0 ], + [ 6.0, 0.0 ] + ] + } + } + }""") + ) + } + + it should "render $geoWithin with $center" in { + toBson(model.Filters.geoWithinCenter("loc", -74d, 40.74d, 10d)) should equal( + Document("""{ loc: { $geoWithin: { $center: [ [-74.0, 40.74], 10.0 ] } } }""") + ) + } + + it should "render $geoWithin with $centerSphere" in { + toBson(model.Filters.geoWithinCenterSphere("loc", -74d, 40.74d, 10d)) should equal(Document("""{ + loc: { + $geoWithin: { + $centerSphere: [ + [-74.0, 40.74], 10.0 + ] + } + } + }""")) + } + + it should "render $geoIntersects" in { + val polygon = Polygon( + Seq( + Position(40.0d, 18.0d), + Position(40.0d, 19.0d), + Position(41.0d, 19.0d), + Position(40.0d, 18.0d) + ) + ) + + toBson(model.Filters.geoIntersects("loc", polygon)) should equal(Document("""{ + loc: { + $geoIntersects: { + $geometry: { + type: "Polygon", + coordinates: [ + [ + [40.0, 18.0], [40.0, 19.0], [41.0, 19.0], + [40.0, 18.0] + ] + ] + } + } + } + }""")) + + toBson(model.Filters.geoIntersects("loc", Document(polygon.toJson))) should equal( + Document("""{ + loc: { + $geoIntersects: { + $geometry: { + type: "Polygon", + coordinates: [ + [ + [40.0, 18.0], [40.0, 19.0], [41.0, 19.0], + [40.0, 18.0] + ] + ] + } + } + } + }""") + ) + } + + it should "render $near" in { + val point = Point(Position(-73.9667, 40.78)) + val pointDocument = Document(point.toJson) + + toBson(model.Filters.near("loc", point)) should equal(Document("""{ + loc : { + $near: { + $geometry: { + type : "Point", + coordinates : [ -73.9667, 40.78 ] + }, + } + } + }""")) + + toBson(model.Filters.near("loc", point, Some(5000d), Some(1000d))) should equal( + Document("""{ + loc : { + $near: { + $geometry: { + type : "Point", + coordinates : [ -73.9667, 40.78 ] + }, + $maxDistance: 5000.0, + $minDistance: 1000.0, + } + } + }""") + ) + + toBson(model.Filters.near("loc", point, Some(5000d), None)) should equal(Document("""{ + loc : { + $near: { + $geometry: { + type : "Point", + coordinates : [ -73.9667, 40.78 ] + }, + $maxDistance: 5000.0, + } + } + }""")) + + toBson(model.Filters.near("loc", point, None, Some(1000d))) should equal(Document("""{ + loc : { + $near: { + $geometry: { + type : "Point", + coordinates : [ -73.9667, 40.78 ] + }, + $minDistance: 1000.0, + } + } + }""")) + + toBson(model.Filters.near("loc", point, None, None)) should equal(Document("""{ + loc : { + $near: { + $geometry: { + type : "Point", + coordinates : [ -73.9667, 40.78 ] + }, + } + } + }""")) + + toBson(model.Filters.near("loc", pointDocument)) should equal(Document("""{ + loc : { + $near: { + $geometry: { + type : "Point", + coordinates : [ -73.9667, 40.78 ] + }, + } + } + }""")) + + toBson(model.Filters.near("loc", pointDocument, Some(5000d), Some(1000d))) should equal( + Document("""{ + loc : { + $near: { + $geometry: { + type : "Point", + coordinates : [ -73.9667, 40.78 ] + }, + $maxDistance: 5000.0, + $minDistance: 1000.0, + } + } + }""") + ) + + toBson(model.Filters.near("loc", pointDocument, Some(5000d), None)) should equal( + Document("""{ + loc : { + $near: { + $geometry: { + type : "Point", + coordinates : [ -73.9667, 40.78 ] + }, + $maxDistance: 5000.0, + } + } + }""") + ) + + toBson(model.Filters.near("loc", pointDocument, None, Some(1000d))) should equal( + Document("""{ + loc : { + $near: { + $geometry: { + type : "Point", + coordinates : [ -73.9667, 40.78 ] + }, + $minDistance: 1000.0, + } + } + }""") + ) + + toBson(model.Filters.near("loc", pointDocument, None, None)) should equal(Document("""{ + loc : { + $near: { + $geometry: { + type : "Point", + coordinates : [ -73.9667, 40.78 ] + }, + } + } + }""")) + + toBson(model.Filters.near("loc", -73.9667, 40.78)) should equal(Document("""{ + loc : { + $near: [-73.9667, 40.78], + } + } + }""")) + + toBson(model.Filters.near("loc", -73.9667, 40.78, Some(5000d), Some(1000d))) should equal( + Document("""{ + loc : { + $near: [-73.9667, 40.78], + $maxDistance: 5000.0, + $minDistance: 1000.0, + } + } + }""") + ) + + toBson(model.Filters.near("loc", -73.9667, 40.78, Some(5000d), None)) should equal( + Document("""{ + loc : { + $near: [-73.9667, 40.78], + $maxDistance: 5000.0, + } + } + }""") + ) + + toBson(model.Filters.near("loc", -73.9667, 40.78, None, Some(1000d))) should equal( + Document("""{ + loc : { + $near: [-73.9667, 40.78], + $minDistance: 1000.0, + } + } + }""") + ) + + toBson(model.Filters.near("loc", -73.9667, 40.78, None, None)) should equal(Document("""{ + loc : { + $near: [-73.9667, 40.78], + } + } + }""")) + } + + it should "render $nearSphere" in { + val point = Point(Position(-73.9667, 40.78)) + val pointDocument = Document(point.toJson) + + toBson(model.Filters.nearSphere("loc", point)) should equal(Document("""{ + loc : { + $nearSphere: { + $geometry: { + type : "Point", + coordinates : [ -73.9667, 40.78 ] + }, + } + } + }""")) + + toBson(model.Filters.nearSphere("loc", point, Some(5000d), Some(1000d))) should equal( + Document("""{ + loc : { + $nearSphere: { + $geometry: { + type : "Point", + coordinates : [ -73.9667, 40.78 ] + }, + $maxDistance: 5000.0, + $minDistance: 1000.0, + } + } + }""") + ) + + toBson(model.Filters.nearSphere("loc", point, Some(5000d), None)) should equal(Document("""{ + loc: + { + $nearSphere: + { + $geometry: + { + type: "Point", + coordinates: + [-73.9667, 40.78] + }, + $maxDistance: 5000.0, + } + } + }""")) + + toBson(model.Filters.nearSphere("loc", point, None, Some(1000d))) should equal( + Document("""{ + loc : { + $nearSphere: { + $geometry: { + type : "Point", + coordinates : [ -73.9667, 40.78 ] + }, + $minDistance: 1000.0, + } + } + }""") + ) + + toBson(model.Filters.nearSphere("loc", point, None, None)) should equal(Document("""{ + loc : { + $nearSphere: { + $geometry: { + type : "Point", + coordinates : [ -73.9667, 40.78 ] + }, + } + } + }""")) + + toBson(model.Filters.nearSphere("loc", pointDocument)) should equal(Document("""{ + loc : { + $nearSphere: { + $geometry: { + type : "Point", + coordinates : [ -73.9667, 40.78 ] + }, + } + } + }""")) + + toBson(model.Filters.nearSphere("loc", pointDocument, Some(5000d), Some(1000d))) should equal( + Document("""{ + loc : { + $nearSphere: { + $geometry: { + type : "Point", + coordinates : [ -73.9667, 40.78 ] + }, + $maxDistance: 5000.0, + $minDistance: 1000.0, + } + } + }""") + ) + + toBson(model.Filters.nearSphere("loc", pointDocument, Some(5000d), None)) should equal( + Document("""{ + loc : { + $nearSphere: { + $geometry: { + type : "Point", + coordinates : [ -73.9667, 40.78 ] + }, + $maxDistance: 5000.0, + } + } + }""") + ) + + toBson(model.Filters.nearSphere("loc", pointDocument, None, Some(1000d))) should equal( + Document("""{ + loc : { + $nearSphere: { + $geometry: { + type : "Point", + coordinates : [ -73.9667, 40.78 ] + }, + $minDistance: 1000.0, + } + } + }""") + ) + + toBson(model.Filters.nearSphere("loc", pointDocument, None, None)) should equal( + Document("""{ + loc : { + $nearSphere: { + $geometry: { + type : "Point", + coordinates : [ -73.9667, 40.78 ] + }, + } + } + }""") + ) + + toBson(model.Filters.nearSphere("loc", -73.9667, 40.78)) should equal(Document("""{ + loc : { + $nearSphere: [-73.9667, 40.78], + } + } + }""")) + + toBson(model.Filters.nearSphere("loc", -73.9667, 40.78, Some(5000d), Some(1000d))) should equal( + Document("""{ + loc : { + $nearSphere: [-73.9667, 40.78], + $maxDistance: 5000.0, + $minDistance: 1000.0, + } + } + }""") + ) + + toBson(model.Filters.nearSphere("loc", -73.9667, 40.78, Some(5000d), None)) should equal( + Document("""{ + loc : { + $nearSphere: [-73.9667, 40.78], + $maxDistance: 5000.0, + } + } + }""") + ) + + toBson(model.Filters.nearSphere("loc", -73.9667, 40.78, None, Some(1000d))) should equal( + Document("""{ + loc : { + $nearSphere: [-73.9667, 40.78], + $minDistance: 1000.0, + } + } + }""") + ) + + toBson(model.Filters.nearSphere("loc", -73.9667, 40.78, None, None)) should equal( + Document("""{ + loc : { + $nearSphere: [-73.9667, 40.78], + } + } + }""") + ) + } + + it should "render $expr" in { + toBson(model.Filters.expr(Document("{$gt: ['$spent', '$budget']}"))) should equal( + Document("""{$expr: {$gt: ["$spent", "$budget"]}}""") + ) + } + + it should "render $jsonSchema" in { + toBson(model.Filters.jsonSchema(Document("{bsonType: 'object'}"))) should equal( + Document("""{$jsonSchema: {bsonType: "object"}}""") + ) + } + + it should "render an empty document" in { + toBson(model.Filters.empty()) should equal( + Document("""{}""") + ) + } + +} diff --git a/driver-scala/src/test/scala/org/mongodb/scala/model/GeoJsonSpec.scala b/driver-scala/src/test/scala/org/mongodb/scala/model/GeoJsonSpec.scala new file mode 100644 index 00000000000..fec2e0a0413 --- /dev/null +++ b/driver-scala/src/test/scala/org/mongodb/scala/model/GeoJsonSpec.scala @@ -0,0 +1,135 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala.model + +import java.lang.reflect.Modifier._ + +import org.mongodb.scala.BaseSpec +import org.mongodb.scala.model.geojson.NamedCoordinateReferenceSystem._ +import org.mongodb.scala.model.geojson._ + +import scala.collection.JavaConverters._ + +class GeoJsonSpec extends BaseSpec { + + it should "have the same methods as the wrapped CoordinateReferenceSystemType" in { + val wrapped = classOf[geojson.CoordinateReferenceSystemType].getDeclaredFields + .filter(f => isPublic(f.getModifiers)) + .map(_.getName) + .toSet + val local = CoordinateReferenceSystemType.getClass.getDeclaredMethods + .filter(f => isPublic(f.getModifiers)) + .map(_.getName) + .toSet + + local should equal(wrapped) + } + + it should "have the same methods as the wrapped GeoJsonObjectType" in { + val wrapped = classOf[geojson.GeoJsonObjectType].getDeclaredFields + .filter(f => isPublic(f.getModifiers)) + .map(_.getName) + .toSet + val local = GeoJsonObjectType.getClass.getDeclaredMethods.filter(f => isPublic(f.getModifiers)).map(_.getName).toSet + + local should equal(wrapped) + } + + it should "have the same CoordinateReferenceSystemType" in { + CoordinateReferenceSystemType.LINK should equal(geojson.CoordinateReferenceSystemType.LINK) + CoordinateReferenceSystemType.NAME should equal(geojson.CoordinateReferenceSystemType.NAME) + } + + it should "have the same GeoJsonObjectType" in { + GeoJsonObjectType.GEOMETRY_COLLECTION should equal(geojson.GeoJsonObjectType.GEOMETRY_COLLECTION) + GeoJsonObjectType.LINE_STRING should equal(geojson.GeoJsonObjectType.LINE_STRING) + GeoJsonObjectType.MULTI_LINE_STRING should equal(geojson.GeoJsonObjectType.MULTI_LINE_STRING) + GeoJsonObjectType.MULTI_POINT should equal(geojson.GeoJsonObjectType.MULTI_POINT) + GeoJsonObjectType.POINT should equal(geojson.GeoJsonObjectType.POINT) + GeoJsonObjectType.POLYGON should equal(geojson.GeoJsonObjectType.POLYGON) + } + + it should "create the same GeometryCollection" in { + GeometryCollection(Seq(Point(Position(1, 2)))) should equal( + new geojson.GeometryCollection(Seq(Point(Position(1, 2))).asInstanceOf[List[Geometry]].asJava) + ) + + GeometryCollection(EPSG_4326, Seq(Point(Position(1, 2)))) should equal( + new geojson.GeometryCollection(EPSG_4326, Seq(Point(Position(1, 2))).asInstanceOf[List[Geometry]].asJava) + ) + } + + it should "create the same LineString" in { + LineString(Seq(Position(1, 2), Position(2, 4))) should equal( + new geojson.LineString(Seq(Position(1, 2), Position(2, 4)).asJava) + ) + LineString(EPSG_4326_STRICT_WINDING, Seq(Position(1, 2), Position(2, 4))) should equal( + new geojson.LineString(EPSG_4326_STRICT_WINDING, Seq(Position(1, 2), Position(2, 4)).asJava) + ) + } + + it should "create the same MultiLineString" in { + MultiLineString(Seq(Position(1, 2))) should equal( + new geojson.MultiLineString(Seq(Seq(Position(1, 2)).asJava).asJava) + ) + MultiLineString(EPSG_4326, Seq(Position(1, 2))) should equal( + new geojson.MultiLineString(EPSG_4326, Seq(Seq(Position(1, 2)).asJava).asJava) + ) + } + + it should "create the same MultiPoint" in { + MultiPoint(Position(1, 2)) should equal(new geojson.MultiPoint(Seq(Position(1, 2)).asJava)) + MultiPoint(CRS_84, Position(1, 2)) should equal(new geojson.MultiPoint(CRS_84, Seq(Position(1, 2)).asJava)) + } + + it should "create the same Point" in { + Point(Position(1, 2)) should equal(new geojson.Point(Position(1, 2))) + Point(CRS_84, Position(1, 2)) should equal(new geojson.Point(CRS_84, Position(1, 2))) + } + + it should "create the same MultiPolygon" in { + val exterior = Seq(Position(10, 20), Position(10, 40), Position(20, 40), Position(10, 20)) + val interior = Seq(Position(15, 16), Position(15, 18), Position(16, 18), Position(15, 16)) + + MultiPolygon(PolygonCoordinates(exterior)) should equal( + new geojson.MultiPolygon(Seq(PolygonCoordinates(exterior)).asJava) + ) + + MultiPolygon(PolygonCoordinates(exterior, interior)) should equal( + new geojson.MultiPolygon(Seq(PolygonCoordinates(exterior, interior)).asJava) + ) + + MultiPolygon(CRS_84, PolygonCoordinates(exterior)) should equal( + new geojson.MultiPolygon(CRS_84, Seq(PolygonCoordinates(exterior)).asJava) + ) + } + + it should "create the same Polygon" in { + val exterior = Seq(Position(10, 20), Position(10, 40), Position(20, 40), Position(10, 20)) + + Polygon(PolygonCoordinates(exterior)) should equal(new geojson.Polygon(PolygonCoordinates(exterior))) + Polygon(CRS_84, PolygonCoordinates(exterior)) should equal( + new geojson.Polygon(CRS_84, PolygonCoordinates(exterior)) + ) + } + + it should "create a NamedCoordinateReferenceSystem from a string" in { + val coordinateRefSystem = NamedCoordinateReferenceSystem("EPSG:4326") + + coordinateRefSystem should equal(EPSG_4326) + } +} diff --git a/driver-scala/src/test/scala/org/mongodb/scala/model/IndexesSpec.scala b/driver-scala/src/test/scala/org/mongodb/scala/model/IndexesSpec.scala new file mode 100644 index 00000000000..2ca3e588e19 --- /dev/null +++ b/driver-scala/src/test/scala/org/mongodb/scala/model/IndexesSpec.scala @@ -0,0 +1,80 @@ +/* + * Copyright 2015 MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONObjectITIONS OF ANY KINObject, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala.model + +import java.lang.reflect.Modifier._ + +import org.bson.BsonDocument +import org.mongodb.scala.bson.collection.immutable.Document +import org.mongodb.scala.bson.conversions.Bson +import org.mongodb.scala.model.Indexes._ +import org.mongodb.scala.{ model, BaseSpec, MongoClient } + +class IndexesSpec extends BaseSpec { + val registry = MongoClient.DEFAULT_CODEC_REGISTRY + + def toBson(bson: Bson): Document = + Document(bson.toBsonDocument(classOf[BsonDocument], MongoClient.DEFAULT_CODEC_REGISTRY)) + + "Indexes" should "have the same methods as the wrapped Updates" in { + val wrapped = classOf[com.mongodb.client.model.Indexes].getDeclaredMethods + .filter(f => isPublic(f.getModifiers)) + .map(_.getName) + .toSet + val local = model.Indexes.getClass.getDeclaredMethods.filter(f => isPublic(f.getModifiers)).map(_.getName).toSet + + local should equal(wrapped) + } + + it should "ascending" in { + toBson(ascending("x")) should equal(Document("""{x : 1}""")) + toBson(ascending("x", "y")) should equal(Document("""{x : 1, y : 1}""")) + } + + it should "descending" in { + toBson(descending("x")) should equal(Document("""{x : -1}""")) + toBson(descending("x", "y")) should equal(Document("""{x : -1, y : -1}""")) + } + + it should "geo2dsphere" in { + toBson(geo2dsphere("x")) should equal(Document("""{x : "2dsphere"}""")) + toBson(geo2dsphere("x", "y")) should equal(Document("""{x : "2dsphere", y : "2dsphere"}""")) + } + + it should "geo2d" in { + toBson(geo2d("x")) should equal(Document("""{x : "2d"}""")) + } + + it should "text" in { + toBson(text("x")) should equal(Document("""{x : "text"}""")) + } + + it should "hashed" in { + toBson(hashed("x")) should equal(Document("""{x : "hashed"}""")) + } + + it should "compoundIndex" in { + toBson(compoundIndex(ascending("x"), descending("y"))) should equal(Document("""{x : 1, y : -1}""")) + toBson(compoundIndex(ascending("x"), descending("y"), descending("x"))) should equal( + Document("""{y : -1, x : -1}""") + ) + toBson(compoundIndex(ascending("x", "y"), descending("a", "b"))) should equal( + Document("""{x : 1, y : 1, a : -1, b : -1}""") + ) + } + +} diff --git a/driver-scala/src/test/scala/org/mongodb/scala/model/MapReduceActionSpec.scala b/driver-scala/src/test/scala/org/mongodb/scala/model/MapReduceActionSpec.scala new file mode 100644 index 00000000000..75e27d10d44 --- /dev/null +++ b/driver-scala/src/test/scala/org/mongodb/scala/model/MapReduceActionSpec.scala @@ -0,0 +1,41 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala.model + +import java.lang.reflect.Modifier._ + +import com.mongodb.client.model.{ MapReduceAction => JMapReduceAction } +import org.mongodb.scala.BaseSpec + +class MapReduceActionSpec extends BaseSpec { + + "MapReduceAction" should "mirror com.mongodb.client.model.MapReduceAction" in { + val wrapped = classOf[JMapReduceAction].getEnumConstants.map(_.getValue.toUpperCase).toSet + val local = MapReduceAction.getClass.getDeclaredMethods.filter(f => isPublic(f.getModifiers)).map(_.getName).toSet + + local should equal(wrapped) + } + + it should "have the same values" in { + MapReduceAction.MERGE should equal(JMapReduceAction.MERGE) + + MapReduceAction.REDUCE should equal(JMapReduceAction.REDUCE) + + MapReduceAction.REPLACE should equal(JMapReduceAction.REPLACE) + } + +} diff --git a/driver-scala/src/test/scala/org/mongodb/scala/model/MergeOptionsSpec.scala b/driver-scala/src/test/scala/org/mongodb/scala/model/MergeOptionsSpec.scala new file mode 100644 index 00000000000..6c439495f61 --- /dev/null +++ b/driver-scala/src/test/scala/org/mongodb/scala/model/MergeOptionsSpec.scala @@ -0,0 +1,78 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala.model + +import java.lang.reflect.Modifier._ + +import com.mongodb.client.model.{ MergeOptions => JMergeOptions } +import org.mongodb.scala.BaseSpec + +class MergeOptionsSpec extends BaseSpec { + + case class Default(wrapped: String = "") + + "MergeOptions" should "mirror com.mongodb.client.model.MergeOptions" in { + val setters = classOf[JMergeOptions].getDeclaredMethods + .filter(f => isPublic(f.getModifiers) && !f.getName.startsWith("get")) + .map(_.getName) + .toSet + val enums = classOf[JMergeOptions].getDeclaredFields.map(_.getName).toSet + val wrapped = (setters ++ enums) -- Set("hashCode", "toString", "equals") + + val exclusions = Default().getClass.getDeclaredMethods + .filter(f => isPublic(f.getModifiers)) + .map(_.getName) + .toSet ++ Set("apply", "unapply") + val local = MergeOptions().getClass.getDeclaredMethods + .filter(f => isPublic(f.getModifiers) && !f.getName.contains("$")) + .map(_.getName) + .toSet -- exclusions + + local should equal(wrapped) + } + + it should "have the same values for WhenMatched" in { + val wrapped = classOf[JMergeOptions.WhenMatched].getEnumConstants.map(_.toString).toSet + val local = + MergeOptions.WhenMatched.getClass.getDeclaredMethods.filter(f => isPublic(f.getModifiers)).map(_.getName).toSet + + local should equal(wrapped) + + MergeOptions.WhenMatched.FAIL should equal(JMergeOptions.WhenMatched.FAIL) + MergeOptions.WhenMatched.KEEP_EXISTING should equal(JMergeOptions.WhenMatched.KEEP_EXISTING) + MergeOptions.WhenMatched.MERGE should equal(JMergeOptions.WhenMatched.MERGE) + MergeOptions.WhenMatched.PIPELINE should equal(JMergeOptions.WhenMatched.PIPELINE) + MergeOptions.WhenMatched.REPLACE should equal(JMergeOptions.WhenMatched.REPLACE) + + wrapped.size should equal(5) + } + + it should "have the same values for WhenNotMatched" in { + val wrapped = classOf[JMergeOptions.WhenNotMatched].getEnumConstants.map(_.toString).toSet + val local = + MergeOptions.WhenNotMatched.getClass.getDeclaredMethods.filter(f => isPublic(f.getModifiers)).map(_.getName).toSet + + local should equal(wrapped) + + MergeOptions.WhenNotMatched.DISCARD should equal(JMergeOptions.WhenNotMatched.DISCARD) + MergeOptions.WhenNotMatched.FAIL should equal(JMergeOptions.WhenNotMatched.FAIL) + MergeOptions.WhenNotMatched.INSERT should equal(JMergeOptions.WhenNotMatched.INSERT) + + wrapped.size should equal(3) + } + +} diff --git a/driver-scala/src/test/scala/org/mongodb/scala/model/ModelSpec.scala b/driver-scala/src/test/scala/org/mongodb/scala/model/ModelSpec.scala new file mode 100644 index 00000000000..bc779f79aa8 --- /dev/null +++ b/driver-scala/src/test/scala/org/mongodb/scala/model/ModelSpec.scala @@ -0,0 +1,129 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala.model + +import org.mongodb.scala._ + +class ModelSpec extends BaseSpec { + + it should "be able to create CountOptions" in { + val options = CountOptions() + options shouldBe a[com.mongodb.client.model.CountOptions] + } + + it should "be able to create CreateCollectionOptions" in { + val options = CreateCollectionOptions() + options shouldBe a[com.mongodb.client.model.CreateCollectionOptions] + } + + it should "be able to create FindOneAndDeleteOptions" in { + val options = FindOneAndDeleteOptions() + options shouldBe a[com.mongodb.client.model.FindOneAndDeleteOptions] + } + + it should "be able to create FindOneAndReplaceOptions" in { + val options = FindOneAndReplaceOptions() + options shouldBe a[com.mongodb.client.model.FindOneAndReplaceOptions] + } + + it should "be able to create FindOneAndUpdateOptions" in { + val options = FindOneAndUpdateOptions() + options shouldBe a[com.mongodb.client.model.FindOneAndUpdateOptions] + } + + it should "be able to create IndexOptions" in { + val options = IndexOptions() + options shouldBe a[com.mongodb.client.model.IndexOptions] + } + + it should "be able to create InsertManyOptions" in { + val options = InsertManyOptions() + options shouldBe a[com.mongodb.client.model.InsertManyOptions] + } + + it should "be able to create RenameCollectionOptions" in { + val options = RenameCollectionOptions() + options shouldBe a[com.mongodb.client.model.RenameCollectionOptions] + } + + it should "be able to create UpdateOptions" in { + val options = UpdateOptions() + options shouldBe a[com.mongodb.client.model.UpdateOptions] + } + + it should "be able to create IndexModel" in { + val model = IndexModel(Document("a" -> 1)) + model shouldBe a[com.mongodb.client.model.IndexModel] + + val model2 = IndexModel(Document("a" -> 1), IndexOptions()) + model2 shouldBe a[com.mongodb.client.model.IndexModel] + } + + it should "be able to create DeleteManyModel" in { + val model = DeleteManyModel(Document("a" -> 1)) + model shouldBe a[com.mongodb.client.model.DeleteManyModel[_]] + } + + it should "be able to create DeleteOneModel" in { + val model = DeleteOneModel(Document("a" -> 1)) + model shouldBe a[com.mongodb.client.model.DeleteOneModel[_]] + } + + it should "be able to create InsertOneModel" in { + val model = InsertOneModel(Document("a" -> 1)) + model shouldBe a[com.mongodb.client.model.InsertOneModel[_]] + } + + it should "be able to create ReplaceOneModel" in { + val model = ReplaceOneModel(Document("a" -> 1), Document("a" -> 2)) + model shouldBe a[com.mongodb.client.model.ReplaceOneModel[_]] + } + + it should "be able to create UpdateManyModel" in { + val model = UpdateManyModel(Document("a" -> 1), Document("$set" -> Document("a" -> 2))) + model shouldBe a[com.mongodb.client.model.UpdateManyModel[_]] + + val model2 = UpdateManyModel(Document("a" -> 1), Document("$set" -> Document("a" -> 2)), UpdateOptions()) + model2 shouldBe a[com.mongodb.client.model.UpdateManyModel[_]] + + val model3 = UpdateManyModel(Document("a" -> 1), Seq(Document("$set" -> Document("a" -> 2)))) + model3 shouldBe a[com.mongodb.client.model.UpdateManyModel[_]] + + val model4 = UpdateManyModel(Document("a" -> 1), Seq(Document("$set" -> Document("a" -> 2))), UpdateOptions()) + model4 shouldBe a[com.mongodb.client.model.UpdateManyModel[_]] + } + + it should "be able to create UpdateOneModel" in { + val model = UpdateOneModel(Document("a" -> 1), Document("$set" -> Document("a" -> 2))) + model shouldBe a[com.mongodb.client.model.UpdateOneModel[_]] + + val model2 = UpdateOneModel(Document("a" -> 1), Document("$set" -> Document("a" -> 2)), UpdateOptions()) + model2 shouldBe a[com.mongodb.client.model.UpdateOneModel[_]] + + val model3 = UpdateOneModel(Document("a" -> 1), Seq(Document("$set" -> Document("a" -> 2)))) + model3 shouldBe a[com.mongodb.client.model.UpdateOneModel[_]] + + val model4 = UpdateOneModel(Document("a" -> 1), Seq(Document("$set" -> Document("a" -> 2))), UpdateOptions()) + model4 shouldBe a[com.mongodb.client.model.UpdateOneModel[_]] + } + + it should "be able to create BsonField" in { + val bsonField = BsonField("key", Document("a" -> 1)) + bsonField shouldBe a[com.mongodb.client.model.BsonField] + } + +} diff --git a/driver-scala/src/test/scala/org/mongodb/scala/model/ProjectionsSpec.scala b/driver-scala/src/test/scala/org/mongodb/scala/model/ProjectionsSpec.scala new file mode 100644 index 00000000000..aacfc572420 --- /dev/null +++ b/driver-scala/src/test/scala/org/mongodb/scala/model/ProjectionsSpec.scala @@ -0,0 +1,92 @@ +/* + * Copyright 2015 MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONObjectITIONS OF ANY KINObject, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala.model + +import java.lang.reflect.Modifier._ + +import org.bson.BsonDocument +import org.mongodb.scala.bson.collection.immutable.Document +import org.mongodb.scala.bson.conversions.Bson +import org.mongodb.scala.{ model, BaseSpec, MongoClient } + +class ProjectionsSpec extends BaseSpec { + val registry = MongoClient.DEFAULT_CODEC_REGISTRY + + def toBson(bson: Bson): Document = + Document(bson.toBsonDocument(classOf[BsonDocument], MongoClient.DEFAULT_CODEC_REGISTRY)) + + "Projections" should "have the same methods as the wrapped Projections" in { + val wrapped = classOf[com.mongodb.client.model.Projections].getDeclaredMethods + .filter(f => isPublic(f.getModifiers)) + .map(_.getName) + .toSet + val local = model.Projections.getClass.getDeclaredMethods.filter(f => isPublic(f.getModifiers)).map(_.getName).toSet + + local should equal(wrapped) + } + + it should "include" in { + toBson(model.Projections.include("x")) should equal(Document("""{x : 1}""")) + toBson(model.Projections.include("x", "y")) should equal(Document("""{x : 1, y : 1}""")) + } + + it should "exclude" in { + toBson(model.Projections.exclude("x")) should equal(Document("""{x : 0}""")) + toBson(model.Projections.exclude("x", "y")) should equal(Document("""{x : 0, y : 0}""")) + } + + it should "excludeId" in { + toBson(model.Projections.excludeId) should equal(Document("""{_id : 0}""")) + } + + it should "firstElem" in { + toBson(model.Projections.elemMatch("x")) should equal(Document("""{"x.$" : 1}""")) + } + + it should "elemMatch" in { + toBson( + model.Projections.elemMatch("x", Filters.and(model.Filters.eq("y", 1), model.Filters.eq("z", 2))) + ) should equal( + Document("""{x : {$elemMatch : {$and: [{y : 1}, {z : 2}]}}}""") + ) + } + + it should "slice" in { + toBson(model.Projections.slice("x", 5)) should equal(Document("""{x : {$slice : 5}}""")) + toBson(model.Projections.slice("x", 5, 10)) should equal(Document("""{x : {$slice : [5, 10]}}""")) + } + + it should "metaTextScore" in { + toBson(model.Projections.metaTextScore("x")) should equal(Document("""{x : {$meta : "textScore"}}""")) + } + + it should "computed" in { + toBson(model.Projections.computed("c", "$y")) should equal(Document("""{c : "$y"}""")) + } + + it should "combine fields" in { + toBson( + model.Projections.fields(model.Projections.include("x", "y"), model.Projections.exclude("_id")) + ) should equal( + Document("""{x : 1, y : 1, _id : 0}""") + ) + toBson(model.Projections.fields(model.Projections.include("x", "y"), model.Projections.exclude("x"))) should equal( + Document("""{y : 1, x : 0}""") + ) + } + +} diff --git a/driver-scala/src/test/scala/org/mongodb/scala/model/ReturnDocumentSpec.scala b/driver-scala/src/test/scala/org/mongodb/scala/model/ReturnDocumentSpec.scala new file mode 100644 index 00000000000..87291ade48d --- /dev/null +++ b/driver-scala/src/test/scala/org/mongodb/scala/model/ReturnDocumentSpec.scala @@ -0,0 +1,39 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala.model + +import java.lang.reflect.Modifier._ + +import com.mongodb.client.model.{ ReturnDocument => JReturnDocument } +import org.mongodb.scala.BaseSpec + +class ReturnDocumentSpec extends BaseSpec { + + "ReturnDocument" should "mirror com.mongodb.client.model.ReturnDocument" in { + val wrapped = classOf[JReturnDocument].getEnumConstants.map(_.toString).toSet + val local = ReturnDocument.getClass.getDeclaredMethods.filter(f => isPublic(f.getModifiers)).map(_.getName).toSet + + local should equal(wrapped) + } + + it should "have the same values" in { + ReturnDocument.BEFORE should equal(JReturnDocument.BEFORE) + + ReturnDocument.AFTER should equal(JReturnDocument.AFTER) + } + +} diff --git a/driver-scala/src/test/scala/org/mongodb/scala/model/SortsSpec.scala b/driver-scala/src/test/scala/org/mongodb/scala/model/SortsSpec.scala new file mode 100644 index 00000000000..de81bc6c53d --- /dev/null +++ b/driver-scala/src/test/scala/org/mongodb/scala/model/SortsSpec.scala @@ -0,0 +1,68 @@ +/* + * Copyright 2015 MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONObjectITIONS OF ANY KINObject, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala.model + +import java.lang.reflect.Modifier._ + +import org.bson.BsonDocument +import org.mongodb.scala.bson.collection.immutable.Document +import org.mongodb.scala.bson.conversions.Bson +import org.mongodb.scala.model.Sorts._ +import org.mongodb.scala.{ model, BaseSpec, MongoClient } + +class SortsSpec extends BaseSpec { + val registry = MongoClient.DEFAULT_CODEC_REGISTRY + + def toBson(bson: Bson): Document = + Document(bson.toBsonDocument(classOf[BsonDocument], MongoClient.DEFAULT_CODEC_REGISTRY)) + + "Sorts" should "have the same methods as the wrapped Sorts" in { + val wrapped = classOf[com.mongodb.client.model.Sorts].getDeclaredMethods + .filter(f => isPublic(f.getModifiers)) + .map(_.getName) + .toSet + val local = model.Sorts.getClass.getDeclaredMethods.filter(f => isPublic(f.getModifiers)).map(_.getName).toSet + + local should equal(wrapped) + } + + it should "ascending" in { + toBson(ascending("x")) should equal(Document("""{x : 1}""")) + toBson(ascending("x", "y")) should equal(Document("""{x : 1, y : 1}""")) + toBson(ascending(Seq("x", "y"): _*)) should equal(Document("""{x : 1, y : 1}""")) + } + + it should "descending" in { + toBson(descending("x")) should equal(Document("""{x : -1}""")) + toBson(descending("x", "y")) should equal(Document("""{x : -1, y : -1}""")) + toBson(descending(Seq("x", "y"): _*)) should equal(Document("""{x : -1, y : -1}""")) + } + + it should "metaTextScore" in { + toBson(metaTextScore("x")) should equal(Document("""{x : {$meta : "textScore"}}""")) + } + + it should "orderBy" in { + toBson(orderBy(Seq(ascending("x"), descending("y")): _*)) should equal(Document("""{x : 1, y : -1}""")) + toBson(orderBy(ascending("x"), descending("y"))) should equal(Document("""{x : 1, y : -1}""")) + toBson(orderBy(ascending("x"), descending("y"), descending("x"))) should equal(Document("""{y : -1, x : -1}""")) + toBson(orderBy(ascending("x", "y"), descending("a", "b"))) should equal( + Document("""{x : 1, y : 1, a : -1, b : -1}""") + ) + } + +} diff --git a/driver-scala/src/test/scala/org/mongodb/scala/model/UpdatesSpec.scala b/driver-scala/src/test/scala/org/mongodb/scala/model/UpdatesSpec.scala new file mode 100644 index 00000000000..ee27de2b1a7 --- /dev/null +++ b/driver-scala/src/test/scala/org/mongodb/scala/model/UpdatesSpec.scala @@ -0,0 +1,161 @@ +/* + * Copyright 2015 MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONObjectITIONS OF ANY KINObject, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala.model + +import java.lang.reflect.Modifier._ + +import org.bson.BsonDocument +import org.mongodb.scala.bson.collection.immutable.Document +import org.mongodb.scala.bson.conversions.Bson +import org.mongodb.scala.model.Updates._ +import org.mongodb.scala.{ model, BaseSpec, MongoClient } + +class UpdatesSpec extends BaseSpec { + val registry = MongoClient.DEFAULT_CODEC_REGISTRY + + def toBson(bson: Bson): Document = + Document(bson.toBsonDocument(classOf[BsonDocument], MongoClient.DEFAULT_CODEC_REGISTRY)) + + "Updates" should "have the same methods as the wrapped Updates" in { + val wrapped = classOf[com.mongodb.client.model.Updates].getDeclaredMethods + .filter(f => isPublic(f.getModifiers)) + .map(_.getName) + .toSet + val local = model.Updates.getClass.getDeclaredMethods.filter(f => isPublic(f.getModifiers)).map(_.getName).toSet + + local should equal(wrapped) + } + + it should "should render $set" in { + toBson(set("x", 1)) should equal(Document("""{$set : { x : 1} }""")) + toBson(set("x", null)) should equal(Document("""{$set : { x : null } }""")) + } + + it should "should render $setOnInsert" in { + toBson(setOnInsert("x", 1)) should equal(Document("""{$setOnInsert : { x : 1} }""")) + toBson(setOnInsert("x", List(1, 2, 3))) should equal(Document("""{$setOnInsert : { x : [1, 2, 3]} }""")) + toBson(setOnInsert("x", Map("a" -> 1, "b" -> 2, "c" -> 3))) should equal( + Document("""{$setOnInsert : { x : {a: 1, b: 2, c: 3}} }""") + ) + toBson(setOnInsert("x", null)) should equal(Document("""{$setOnInsert : { x : null } }""")) + } + + it should "should render $unset" in { + toBson(unset("x")) should equal(Document("""{$unset : { x : ""} }""")) + } + + it should "should render $rename" in { + toBson(rename("x", "y")) should equal(Document("""{$rename : { x : "y"} }""")) + } + + it should "should render $inc" in { + toBson(inc("x", 1)) should equal(Document("""{$inc : { x : 1} }""")) + toBson(inc("x", 5L)) should equal(Document("""{$inc : { x : {$numberLong : "5"}} }""")) + toBson(inc("x", 3.4d)) should equal(Document("""{$inc : { x : 3.4} }""")) + } + + it should "should render $mul" in { + toBson(mul("x", 1)) should equal(Document("""{$mul : { x : 1} }""")) + toBson(mul("x", 5L)) should equal(Document("""{$mul : { x : {$numberLong : "5"}} }""")) + toBson(mul("x", 3.4d)) should equal(Document("""{$mul : { x : 3.4} }""")) + } + + it should "should render $min" in { + toBson(min("x", 42)) should equal(Document("""{$min : { x : 42} }""")) + } + + it should "should render $max" in { + toBson(max("x", 42)) should equal(Document("""{$max : { x : 42} }""")) + } + + it should "should render $currentDate" in { + toBson(currentDate("x")) should equal(Document("""{$currentDate : { x : true} }""")) + toBson(currentTimestamp("x")) should equal(Document("""{$currentDate : { x : {$type : "timestamp"} } }""")) + } + + it should "should render $addToSet" in { + toBson(addToSet("x", 1)) should equal(Document("""{$addToSet : { x : 1} }""")) + toBson(addEachToSet("x", 1, 2, 3)) should equal(Document("""{$addToSet : { x : { $each : [1, 2, 3] } } }""")) + } + + it should "should render $push" in { + toBson(push("x", 1)) should equal(Document("""{$push : { x : 1} }""")) + toBson(pushEach("x", 1, 2, 3)) should equal(Document("""{$push : { x : { $each : [1, 2, 3] } } }""")) + toBson(pushEach("x", PushOptions(), 1, 2, 3)) should equal(Document("""{$push : { x : { $each : [1, 2, 3] } } }""")) + toBson( + pushEach( + "x", + PushOptions().position(0).slice(3).sortDocument(Document("{score : -1}")), + Document("""{score : 89}"""), + Document("""{score : 65}""") + ) + ) should equal( + Document( + """{$push : { x : { $each : [{score : 89}, {score : 65}], $position : 0, $slice : 3, $sort : { score : -1 } } } }""" + ) + ) + + toBson(pushEach("x", PushOptions().position(0).slice(3).sort(-1), 89, 65)) should equal( + Document("""{$push : { x : { $each : [89, 65], $position : 0, $slice : 3, $sort : -1 } } }""") + ) + } + + it should "should render `$pull`" in { + toBson(pull("x", 1)) should equal(Document("""{$pull : { x : 1} }""")) + toBson(pullByFilter(Filters.gte("x", 5))) should equal(Document("""{$pull : { x : { $gte : 5 }} }""")) + } + + it should "should render `$pullAll`" in { + toBson(pullAll("x")) should equal(Document("""{$pullAll : { x : []} }""")) + toBson(pullAll("x", 1, 2, 3)) should equal(Document("""{$pullAll : { x : [1, 2, 3]} }""")) + } + + it should "should render $pop" in { + toBson(popFirst("x")) should equal(Document("""{$pop : { x : -1} }""")) + toBson(popLast("x")) should equal(Document("""{$pop : { x : 1} }""")) + } + + it should "should render $bit" in { + toBson(bitwiseAnd("x", 5)) should equal(Document("""{$bit : { x : {and : 5} } }""")) + toBson(bitwiseAnd("x", 5L)) should equal(Document("""{$bit : { x : {and : {$numberLong : "5"} } } }""")) + toBson(bitwiseOr("x", 5)) should equal(Document("""{$bit : { x : {or : 5} } }""")) + toBson(bitwiseOr("x", 5L)) should equal(Document("""{$bit : { x : {or : {$numberLong : "5"} } } }""")) + toBson(bitwiseXor("x", 5)) should equal(Document("""{$bit : { x : {xor : 5} } }""")) + toBson(bitwiseXor("x", 5L)) should equal(Document("""{$bit : { x : {xor : {$numberLong : "5"} } } }""")) + } + + it should "should combine updates" in { + toBson(combine(set("x", 1))) should equal(Document("""{$set : { x : 1} }""")) + toBson(combine(set("x", 1), set("x", 2))) should equal(Document("""{$set : { x : 2} }""")) + toBson(combine(set("x", 1), inc("z", 3), set("y", 2), inc("a", 4))) should equal(Document("""{ + $set : { x : 1, y : 2}, + $inc : { z : 3, a : 4}} + }""")) + + toBson(combine(combine(set("x", 1)))) should equal(Document("""{$set : { x : 1} }""")) + toBson(combine(combine(set("x", 1), set("y", 2)))) should equal(Document("""{$set : { x : 1, y : 2} }""")) + toBson(combine(combine(set("x", 1), set("x", 2)))) should equal(Document("""{$set : { x : 2} }""")) + + toBson(combine(combine(set("x", 1), inc("z", 3), set("y", 2), inc("a", 4)))) should equal( + Document("""{ + $set : { x : 1, y : 2}, + $inc : { z : 3, a : 4} + }""") + ) + } + +} diff --git a/driver-scala/src/test/scala/org/mongodb/scala/model/ValidationActionSpec.scala b/driver-scala/src/test/scala/org/mongodb/scala/model/ValidationActionSpec.scala new file mode 100644 index 00000000000..4725bf16d96 --- /dev/null +++ b/driver-scala/src/test/scala/org/mongodb/scala/model/ValidationActionSpec.scala @@ -0,0 +1,68 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala.model + +import java.lang.reflect.Modifier._ + +import org.mongodb.scala.BaseSpec +import org.scalatest.prop.TableDrivenPropertyChecks._ + +import scala.util.{ Success, Try } + +class ValidationActionSpec extends BaseSpec { + + "ValidationAction" should "have the same static fields as the wrapped ValidationAction" in { + val ValidationActionClass: Class[ValidationAction] = classOf[com.mongodb.client.model.ValidationAction] + val wrappedFields = + ValidationActionClass.getDeclaredFields.filter(f => isStatic(f.getModifiers)).map(_.getName).toSet + val wrappedMethods = + ValidationActionClass.getDeclaredMethods.filter(f => isStatic(f.getModifiers)).map(_.getName).toSet + val exclusions = Set("$VALUES", "$values", "valueOf", "values") + + val wrapped = (wrappedFields ++ wrappedMethods) -- exclusions + val local = ValidationAction.getClass.getDeclaredMethods.map(_.getName).toSet -- Set( + "apply", + "$deserializeLambda$", + "$anonfun$fromString$1" + ) + + local should equal(wrapped) + } + + it should "return the expected ValidationActions" in { + forAll(validationActions) { (stringValue: String, expectedValue: Try[ValidationAction]) => + ValidationAction.fromString(stringValue) should equal(expectedValue) + } + } + + it should "handle invalid strings" in { + forAll(invalidValidationActions) { (stringValue: String) => + ValidationAction.fromString(stringValue) should be a Symbol("failure") + } + } + + val validationActions = + Table( + ("stringValue", "JavaValue"), + ("error", Success(ValidationAction.ERROR)), + ("ERROR", Success(ValidationAction.ERROR)), + ("warn", Success(ValidationAction.WARN)), + ("WARN", Success(ValidationAction.WARN)) + ) + + val invalidValidationActions = Table("invalid strings", "all", "none") +} diff --git a/driver-scala/src/test/scala/org/mongodb/scala/model/ValidationLevelSpec.scala b/driver-scala/src/test/scala/org/mongodb/scala/model/ValidationLevelSpec.scala new file mode 100644 index 00000000000..6f346ca98e3 --- /dev/null +++ b/driver-scala/src/test/scala/org/mongodb/scala/model/ValidationLevelSpec.scala @@ -0,0 +1,71 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala.model + +import java.lang.reflect.Modifier._ + +import org.mongodb.scala.BaseSpec +import org.scalatest.prop.TableDrivenPropertyChecks._ + +import scala.util.{ Success, Try } + +class ValidationLevelSpec extends BaseSpec { + + "ValidationLevel" should "have the same static fields as the wrapped ValidationLevel" in { + val validationLevelClass: Class[ValidationLevel] = classOf[com.mongodb.client.model.ValidationLevel] + val wrappedFields = + validationLevelClass.getDeclaredFields.filter(f => isStatic(f.getModifiers)).map(_.getName).toSet + val wrappedMethods = + validationLevelClass.getDeclaredMethods.filter(f => isStatic(f.getModifiers)).map(_.getName).toSet + val exclusions = Set("$VALUES", "$values", "valueOf", "values") + + val wrapped = (wrappedFields ++ wrappedMethods) -- exclusions + val local = ValidationLevel.getClass.getDeclaredMethods.map(_.getName).toSet -- Set( + "apply", + "$deserializeLambda$", + "$anonfun$fromString$1" + ) + + local should equal(wrapped) + } + + it should "return the expected ValidationLevels" in { + forAll(validationLevels) { (stringValue: String, expectedValue: Try[ValidationLevel]) => + ValidationLevel.fromString(stringValue) should equal(expectedValue) + } + } + + it should "handle invalid strings" in { + forAll(invalidValidationLevels) { (stringValue: String) => + ValidationLevel.fromString(stringValue) should be a Symbol("failure") + } + } + + val validationLevels = + Table( + ("stringValue", "JavaValue"), + ("off", Success(ValidationLevel.OFF)), + ("OFF", Success(ValidationLevel.OFF)), + ("strict", Success(ValidationLevel.STRICT)), + ("STRICT", Success(ValidationLevel.STRICT)), + ("OFF", Success(ValidationLevel.OFF)), + ("moderate", Success(ValidationLevel.MODERATE)), + ("MODERATE", Success(ValidationLevel.MODERATE)) + ) + + val invalidValidationLevels = Table("invalid strings", "all", "none") +} diff --git a/driver-scala/src/test/scala/org/mongodb/scala/model/bulk/BulkModelSpec.scala b/driver-scala/src/test/scala/org/mongodb/scala/model/bulk/BulkModelSpec.scala new file mode 100644 index 00000000000..f96ca2d96ee --- /dev/null +++ b/driver-scala/src/test/scala/org/mongodb/scala/model/bulk/BulkModelSpec.scala @@ -0,0 +1,111 @@ +package org.mongodb.scala.model.bulk + +import org.mongodb.scala.bson.Document +import org.mongodb.scala.bson.conversions.Bson +import org.mongodb.scala.{ BaseSpec, MongoNamespace } + +class BulkModelSpec extends BaseSpec { + + val namespace = new MongoNamespace("db.coll") + val filter: Bson = Document("a" -> 1) + val update: Bson = Document("$set" -> Document("b" -> 2)) + val replacement = Document("b" -> 2) + val document = Document("a" -> 1) + val updatePipeline: Seq[Document] = Seq(Document("$set" -> Document("b" -> 2))) + + it should "be able to create ClientNamespacedInsertOneModel" in { + val insertOneModel = ClientNamespacedWriteModel.insertOne(namespace, document) + insertOneModel shouldBe a[ClientNamespacedInsertOneModel] + insertOneModel shouldBe a[com.mongodb.client.model.bulk.ClientNamespacedInsertOneModel] + } + + it should "be able to create ClientNamespacedUpdateOneModel with filter and update" in { + val updateOneModel = ClientNamespacedWriteModel.updateOne(namespace, filter, update) + updateOneModel shouldBe a[ClientNamespacedUpdateOneModel] + updateOneModel shouldBe a[com.mongodb.client.model.bulk.ClientNamespacedUpdateOneModel] + } + + it should "be able to create ClientNamespacedUpdateOneModel with filter, update, and options" in { + val options = ClientUpdateOneOptions.clientUpdateOneOptions() + val updateOneModel = ClientNamespacedWriteModel.updateOne(namespace, filter, update, options) + updateOneModel shouldBe a[ClientNamespacedUpdateOneModel] + updateOneModel shouldBe a[com.mongodb.client.model.bulk.ClientNamespacedUpdateOneModel] + } + + it should "be able to create ClientNamespacedUpdateOneModel with update pipeline" in { + val updateOneModel = ClientNamespacedWriteModel.updateOne(namespace, filter, updatePipeline) + updateOneModel shouldBe a[ClientNamespacedUpdateOneModel] + updateOneModel shouldBe a[com.mongodb.client.model.bulk.ClientNamespacedUpdateOneModel] + } + + it should "be able to create ClientNamespacedUpdateOneModel with update pipeline and options" in { + val options = ClientUpdateOneOptions.clientUpdateOneOptions() + val updateOneModel = ClientNamespacedWriteModel.updateOne(namespace, filter, updatePipeline, options) + updateOneModel shouldBe a[ClientNamespacedUpdateOneModel] + updateOneModel shouldBe a[com.mongodb.client.model.bulk.ClientNamespacedUpdateOneModel] + } + + it should "be able to create ClientNamespacedUpdateManyModel with filter and update" in { + val updateManyModel = ClientNamespacedWriteModel.updateMany(namespace, filter, update) + updateManyModel shouldBe a[ClientNamespacedUpdateManyModel] + updateManyModel shouldBe a[com.mongodb.client.model.bulk.ClientNamespacedUpdateManyModel] + } + it should "be able to create ClientNamespacedUpdateManyModel with filter, update and options" in { + val options = ClientUpdateManyOptions.clientUpdateManyOptions() + val updateManyModel = ClientNamespacedWriteModel.updateMany(namespace, filter, update, options) + updateManyModel shouldBe a[ClientNamespacedUpdateManyModel] + updateManyModel shouldBe a[com.mongodb.client.model.bulk.ClientNamespacedUpdateManyModel] + } + + it should "be able to create ClientNamespacedUpdateManyModel with filter, updatePipeline" in { + val updateManyModel = ClientNamespacedWriteModel.updateMany(namespace, filter, updatePipeline) + updateManyModel shouldBe a[ClientNamespacedUpdateManyModel] + updateManyModel shouldBe a[com.mongodb.client.model.bulk.ClientNamespacedUpdateManyModel] + } + + it should "be able to create ClientNamespacedUpdateManyModel with filter, updatePipeline and options" in { + val options = ClientUpdateManyOptions.clientUpdateManyOptions() + val updateManyModel = ClientNamespacedWriteModel.updateMany(namespace, filter, updatePipeline, options) + updateManyModel shouldBe a[ClientNamespacedUpdateManyModel] + updateManyModel shouldBe a[com.mongodb.client.model.bulk.ClientNamespacedUpdateManyModel] + } + + it should "be able to create ClientNamespacedReplaceOneModel" in { + val replaceOneModel = ClientNamespacedWriteModel.replaceOne(namespace, filter, replacement) + replaceOneModel shouldBe a[ClientNamespacedReplaceOneModel] + replaceOneModel shouldBe a[com.mongodb.client.model.bulk.ClientNamespacedReplaceOneModel] + } + + it should "be able to create ClientNamespacedReplaceOneModel with options" in { + val options = ClientReplaceOneOptions.clientReplaceOneOptions() + val replaceOneModel = ClientNamespacedWriteModel.replaceOne(namespace, filter, replacement, options) + replaceOneModel shouldBe a[ClientNamespacedReplaceOneModel] + replaceOneModel shouldBe a[com.mongodb.client.model.bulk.ClientNamespacedReplaceOneModel] + } + + it should "be able to create ClientNamespacedDeleteOneModel" in { + val deleteOneModel = ClientNamespacedWriteModel.deleteOne(namespace, filter) + deleteOneModel shouldBe a[ClientNamespacedDeleteOneModel] + deleteOneModel shouldBe a[com.mongodb.client.model.bulk.ClientNamespacedDeleteOneModel] + } + + it should "be able to create ClientNamespacedDeleteOneModel with options" in { + val options = ClientDeleteOneOptions.clientDeleteOneOptions() + val deleteOneModel = ClientNamespacedWriteModel.deleteOne(namespace, filter, options) + deleteOneModel shouldBe a[ClientNamespacedDeleteOneModel] + deleteOneModel shouldBe a[com.mongodb.client.model.bulk.ClientNamespacedDeleteOneModel] + } + + it should "be able to create ClientNamespacedDeleteManyModel" in { + val deleteManyModel = ClientNamespacedWriteModel.deleteMany(namespace, filter) + deleteManyModel shouldBe a[ClientNamespacedDeleteManyModel] + deleteManyModel shouldBe a[com.mongodb.client.model.bulk.ClientNamespacedDeleteManyModel] + } + + it should "be able to create ClientNamespacedDeleteManyModel with options" in { + val options = ClientDeleteManyOptions.clientDeleteManyOptions() + val deleteManyModel = ClientNamespacedWriteModel.deleteMany(namespace, filter, options) + deleteManyModel shouldBe a[ClientNamespacedDeleteManyModel] + deleteManyModel shouldBe a[com.mongodb.client.model.bulk.ClientNamespacedDeleteManyModel] + } +} diff --git a/driver-scala/src/test/scala/org/mongodb/scala/model/search/SearchOperatorSpec.scala b/driver-scala/src/test/scala/org/mongodb/scala/model/search/SearchOperatorSpec.scala new file mode 100644 index 00000000000..3d5481d8368 --- /dev/null +++ b/driver-scala/src/test/scala/org/mongodb/scala/model/search/SearchOperatorSpec.scala @@ -0,0 +1,103 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.mongodb.scala.model.search + +import org.bson.BsonDocument +import org.mongodb.scala.{ BaseSpec, MongoClient } +import org.mongodb.scala.bson.collection.immutable.Document +import org.mongodb.scala.bson.conversions.Bson +import org.mongodb.scala.model.geojson.{ Point, Position } +import org.mongodb.scala.model.search.FuzzySearchOptions.fuzzySearchOptions +import org.mongodb.scala.model.search.SearchOperator.{ + autocomplete, + compound, + dateRange, + exists, + near, + numberRange, + text +} +import org.mongodb.scala.model.search.SearchPath.{ fieldPath, wildcardPath } +import org.mongodb.scala.model.search.SearchScore.function +import org.mongodb.scala.model.search.SearchScoreExpression.{ constantExpression, logExpression } + +import java.time.{ Duration, Instant } +import scala.collection.JavaConverters._ + +class SearchOperatorSpec extends BaseSpec { + it should "render all operators" in { + toDocument( + compound() + .should(Seq( + exists(fieldPath("fieldName1")), + text(fieldPath("fieldName2"), "term1") + .score(function(logExpression(constantExpression(3)))), + text(Seq(wildcardPath("wildc*rd"), fieldPath("fieldName3")), Seq("term2", "term3")) + .fuzzy(fuzzySearchOptions() + .maxEdits(1) + .prefixLength(2) + .maxExpansions(3)), + autocomplete( + fieldPath("title") + // multi must be ignored + .multi("keyword"), + "term4" + ), + autocomplete(fieldPath("title"), "Traffic in", "term5") + .fuzzy() + .sequentialTokenOrder(), + numberRange(fieldPath("fieldName4"), fieldPath("fieldName5")) + .gtLt(1, 1.5), + dateRange(fieldPath("fieldName6")) + .lte(Instant.ofEpochMilli(1)), + near(0, 1.5, fieldPath("fieldName7"), fieldPath("fieldName8")), + near(Instant.ofEpochMilli(1), Duration.ofMillis(3), fieldPath("fieldName9")), + near(Point(Position(114.15, 22.28)), 1234.5, fieldPath("address.location")) + ).asJava) + .minimumShouldMatch(1) + .mustNot(Seq( + compound().must(Seq(exists(fieldPath("fieldName"))).asJava) + ).asJava) + ) should equal( + Document("""{ + "compound": { + "should": [ + { "exists": { "path": "fieldName1" } }, + { "text": { "path": "fieldName2", "query": "term1", "score": { "function": { "log": { "constant": 3.0 } } } } }, + { "text": { + "path": [ { "wildcard": "wildc*rd" }, "fieldName3" ], + "query": [ "term2", "term3" ], + "fuzzy": { "maxEdits": 1, "prefixLength": 2, "maxExpansions": 3 } } }, + { "autocomplete": { "path": "title", "query": "term4" } }, + { "autocomplete": { "path": "title", "query": ["Traffic in", "term5"], "fuzzy": {}, "tokenOrder": "sequential" } }, + { "range": { "path": [ "fieldName4", "fieldName5" ], "gt": 1, "lt": 1.5 } }, + { "range": { "path": "fieldName6", "lte": { "$date": "1970-01-01T00:00:00.001Z" } } }, + { "near": { "origin": 0, "pivot": 1.5, "path": [ "fieldName7", "fieldName8" ] } }, + { "near": { "origin": { "$date": "1970-01-01T00:00:00.001Z" }, "pivot": { "$numberLong": "3" }, "path": "fieldName9" } }, + { "near": { "origin": { type: "Point", coordinates: [ 114.15, 22.28 ] }, "pivot": 1234.5, "path": "address.location" } } + ], + "minimumShouldMatch": 1, + "mustNot": [ + { "compound": { "must": [ { "exists": { "path": "fieldName" } } ] } } + ] + } + }""") + ) + } + + def toDocument(bson: Bson): Document = + Document(bson.toBsonDocument(classOf[BsonDocument], MongoClient.DEFAULT_CODEC_REGISTRY)) +} diff --git a/driver-scala/src/test/scala/org/mongodb/scala/model/vault/ClientEncryptionSpec.scala b/driver-scala/src/test/scala/org/mongodb/scala/model/vault/ClientEncryptionSpec.scala new file mode 100644 index 00000000000..93c3ba03d28 --- /dev/null +++ b/driver-scala/src/test/scala/org/mongodb/scala/model/vault/ClientEncryptionSpec.scala @@ -0,0 +1,159 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.mongodb.scala.model.vault + +import com.mongodb.client.model.CreateEncryptedCollectionParams + +import com.mongodb.reactivestreams.client.vault.{ ClientEncryption => JClientEncryption } +import org.mockito.ArgumentMatchers.{ any, same } +import org.mockito.Mockito.verify +import org.mongodb.scala.{ BaseSpec, MongoDatabase } +import org.mongodb.scala.bson.collection.immutable.Document +import org.mongodb.scala.bson.{ BsonBinary, BsonString } +import org.mongodb.scala.model.CreateCollectionOptions +import org.mongodb.scala.vault.ClientEncryption +import org.scalatestplus.mockito.MockitoSugar + +import java.lang.reflect.Modifier.{ isPublic, isStatic } + +class ClientEncryptionSpec extends BaseSpec with MockitoSugar { + + val wrapped = mock[JClientEncryption] + val clientEncryption = ClientEncryption(wrapped) + + "ClientEncryption" should "have the same methods as the wrapped Filters" in { + val wrapped = classOf[JClientEncryption].getDeclaredMethods.map(_.getName).toSet + val local = classOf[ClientEncryption].getDeclaredMethods.map(_.getName).toSet + + wrapped.foreach((name: String) => { + val cleanedName = name.stripPrefix("get") + assert(local.contains(name) || local.contains(cleanedName.head.toLower + cleanedName.tail), s"Missing: $name") + }) + } + + it should "call createDataKey" in { + val kmsProvider = "kmsProvider" + val options = DataKeyOptions() + + clientEncryption.createDataKey(kmsProvider) + verify(wrapped).createDataKey(same(kmsProvider), any()) + + clientEncryption.createDataKey(kmsProvider, options) + verify(wrapped).createDataKey(kmsProvider, options) + } + + it should "call getKey" in { + val bsonBinary = BsonBinary(Array[Byte](1, 2, 3)) + + clientEncryption.getKey(bsonBinary) + verify(wrapped).getKey(same(bsonBinary)) + } + + it should "call getKeyByAltName" in { + val altKeyName = "altKeyName" + + clientEncryption.getKeyByAltName(altKeyName) + verify(wrapped).getKeyByAltName(same(altKeyName)) + } + + it should "call getKeys" in { + clientEncryption.keys + verify(wrapped).getKeys + } + + it should "call addKeyAltName" in { + val bsonBinary = BsonBinary(Array[Byte](1, 2, 3)) + val altKeyName = "altKeyName" + + clientEncryption.addKeyAltName(bsonBinary, altKeyName) + verify(wrapped).addKeyAltName(same(bsonBinary), same(altKeyName)) + } + + it should "call deleteKey" in { + val bsonBinary = BsonBinary(Array[Byte](1, 2, 3)) + + clientEncryption.deleteKey(bsonBinary) + verify(wrapped).deleteKey(same(bsonBinary)) + } + + it should "call removeKeyAltName" in { + val bsonBinary = BsonBinary(Array[Byte](1, 2, 3)) + val altKeyName = "altKeyName" + + clientEncryption.removeKeyAltName(bsonBinary, altKeyName) + verify(wrapped).removeKeyAltName(same(bsonBinary), same(altKeyName)) + } + + it should "call rewrapManyDataKey" in { + val bsonDocument = Document() + val options = RewrapManyDataKeyOptions() + + clientEncryption.rewrapManyDataKey(bsonDocument) + verify(wrapped).rewrapManyDataKey(same(bsonDocument)) + + clientEncryption.rewrapManyDataKey(bsonDocument, options) + verify(wrapped).rewrapManyDataKey(same(bsonDocument), same(options)) + } + + it should "call encrypt" in { + val bsonValue = BsonString("") + val options = EncryptOptions("algorithm") + clientEncryption.encrypt(bsonValue, options) + + verify(wrapped).encrypt(bsonValue, options) + } + + it should "call encrypt Expression" in { + val bsonDocument = Document() + val options = EncryptOptions("algorithm").rangeOptions(RangeOptions()) + clientEncryption.encryptExpression(bsonDocument, options) + + verify(wrapped).encryptExpression(bsonDocument.toBsonDocument, options) + } + + it should "call decrypt" in { + val bsonBinary = BsonBinary(Array[Byte](1, 2, 3)) + clientEncryption.decrypt(bsonBinary) + + verify(wrapped).decrypt(bsonBinary) + } + + it should "call createEncryptedCollection" in { + val database = mock[MongoDatabase] + val collectionName = "collectionName" + val createCollectionOptions = new CreateCollectionOptions() + val createEncryptedCollectionParams = new CreateEncryptedCollectionParams("kmsProvider") + clientEncryption.createEncryptedCollection( + database, + collectionName, + createCollectionOptions, + createEncryptedCollectionParams + ) + verify(wrapped).createEncryptedCollection( + same(database.wrapped), + same(collectionName), + same(createCollectionOptions), + same(createEncryptedCollectionParams) + ) + } + + it should "call close" in { + clientEncryption.close() + + verify(wrapped).close() + } +} diff --git a/driver-sync/build.gradle.kts b/driver-sync/build.gradle.kts new file mode 100644 index 00000000000..95cd0979973 --- /dev/null +++ b/driver-sync/build.gradle.kts @@ -0,0 +1,60 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +import ProjectExtensions.configureJarManifest +import ProjectExtensions.configureMavenPublication + +plugins { + id("project.java") + id("conventions.test-artifacts") + id("conventions.test-artifacts-runtime-dependencies") + id("conventions.test-include-optionals") + id("conventions.testing-mockito") + id("conventions.testing-junit") + id("conventions.testing-spock-exclude-slow") +} + +base.archivesName.set("mongodb-driver-sync") + +dependencies { + api(project(path = ":bson", configuration = "default")) + api(project(path = ":driver-core", configuration = "default")) + compileOnly(project(path = ":mongodb-crypt", configuration = "default")) + + testImplementation(project(path = ":bson", configuration = "testArtifacts")) + testImplementation(project(path = ":driver-core", configuration = "testArtifacts")) + + // lambda testing + testImplementation(libs.aws.lambda.core) +} + +configureMavenPublication { + pom { + name.set("MongoDB Driver") + description.set("The MongoDB Synchronous Driver") + } +} + +configureJarManifest { + attributes["Automatic-Module-Name"] = "org.mongodb.driver.sync.client" + attributes["Bundle-SymbolicName"] = "org.mongodb.driver-sync" + attributes["Import-Package"] = + listOf( + "com.mongodb.crypt.capi.*;resolution:=optional", + "com.mongodb.internal.crypt.capi.*;resolution:=optional", + "*", + ) + .joinToString(",") +} diff --git a/driver-sync/src/examples/documentation/CausalConsistencyExamples.java b/driver-sync/src/examples/documentation/CausalConsistencyExamples.java new file mode 100644 index 00000000000..ab37d9c21ee --- /dev/null +++ b/driver-sync/src/examples/documentation/CausalConsistencyExamples.java @@ -0,0 +1,107 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package documentation; + +import com.mongodb.ClientSessionOptions; +import com.mongodb.ConnectionString; +import com.mongodb.MongoClientSettings; +import com.mongodb.ReadConcern; +import com.mongodb.ReadPreference; +import com.mongodb.WriteConcern; +import com.mongodb.client.ClientSession; +import com.mongodb.client.MongoClient; +import com.mongodb.client.MongoClients; +import com.mongodb.client.MongoCollection; +import com.mongodb.client.MongoDatabase; +import org.bson.BsonNull; +import org.bson.Document; + +import java.util.Date; +import java.util.concurrent.TimeUnit; + +import static com.mongodb.client.model.Filters.eq; +import static com.mongodb.client.model.Updates.set; + +public final class CausalConsistencyExamples { + + /** + * Run this main method to see the output of this quick example. + * + * @param args takes an optional single argument for the connection string + */ + public static void main(final String[] args) { + MongoClientSettings clientSettings = ( + args.length == 0 + ? MongoClientSettings.builder() + : MongoClientSettings.builder().applyConnectionString(new ConnectionString(args[0]))) + .build(); + setupDatabase(clientSettings); + MongoClient client = MongoClients.create(clientSettings); + + // Start Causal Consistency Example 1 + // Example 1: Use a causally consistent session to ensure that the update occurs before the insert. + ClientSession session1 = client.startSession(ClientSessionOptions.builder().causallyConsistent(true).build()); + Date currentDate = new Date(); + MongoCollection items = client.getDatabase("test") + .withReadConcern(ReadConcern.MAJORITY) + .withWriteConcern(WriteConcern.MAJORITY.withWTimeout(1000, TimeUnit.MILLISECONDS)) + .getCollection("test"); + + items.updateOne(session1, eq("sku", "111"), set("end", currentDate)); + + Document document = new Document("sku", "nuts-111") + .append("name", "Pecans") + .append("start", currentDate); + items.insertOne(session1, document); + // End Causal Consistency Example 1 + + // Start Causal Consistency Example 2 + // Example 2: Advance the cluster time and the operation time to that of the other session to ensure that + // this client is causally consistent with the other session and read after the two writes. + ClientSession session2 = client.startSession(ClientSessionOptions.builder().causallyConsistent(true).build()); + session2.advanceClusterTime(session1.getClusterTime()); + session2.advanceOperationTime(session1.getOperationTime()); + + items = client.getDatabase("test") + .withReadPreference(ReadPreference.secondary()) + .withReadConcern(ReadConcern.MAJORITY) + .withWriteConcern(WriteConcern.MAJORITY.withWTimeout(1000, TimeUnit.MILLISECONDS)) + .getCollection("items"); + + for (Document item: items.find(session2, eq("end", BsonNull.VALUE))) { + System.out.println(item); + } + // End Causal Consistency Example 2 + } + + private static void setupDatabase(final MongoClientSettings clientSettings) { + MongoClient client = MongoClients.create(clientSettings); + client.getDatabase("test").drop(); + + MongoDatabase database = client.getDatabase("test"); + database.getCollection("items").drop(); + MongoCollection items = database.getCollection("items"); + + Document document = new Document("sku", "111") + .append("name", "Peanuts") + .append("start", new Date()); + items.insertOne(document); + client.close(); + } + + private CausalConsistencyExamples() {} +} diff --git a/driver-sync/src/examples/documentation/ChangeStreamSamples.java b/driver-sync/src/examples/documentation/ChangeStreamSamples.java new file mode 100644 index 00000000000..2d2aab6155d --- /dev/null +++ b/driver-sync/src/examples/documentation/ChangeStreamSamples.java @@ -0,0 +1,173 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package documentation; + +import com.mongodb.client.MongoChangeStreamCursor; +import com.mongodb.client.MongoClient; +import com.mongodb.client.MongoClients; +import com.mongodb.client.MongoCollection; +import com.mongodb.client.MongoDatabase; +import com.mongodb.client.model.Aggregates; +import com.mongodb.client.model.Filters; +import com.mongodb.client.model.Updates; +import com.mongodb.client.model.changestream.ChangeStreamDocument; +import com.mongodb.client.model.changestream.FullDocument; +import org.bson.BsonDocument; +import org.bson.Document; +import org.bson.conversions.Bson; + +import java.util.List; + +import static java.util.Arrays.asList; +import static java.util.Collections.singletonList; + + +public final class ChangeStreamSamples { + + /** + * Run this main method to see the output of this quick example. + * + * @param args takes an optional single argument for the connection string + */ + public static void main(final String[] args) { + MongoClient mongoClient; + + if (args.length == 0) { + // connect to the local database server + mongoClient = MongoClients.create("mongodb://localhost:27017,localhost:27018,localhost:27019"); + } else { + mongoClient = MongoClients.create(args[0]); + } + + // Select the MongoDB database. + MongoDatabase database = mongoClient.getDatabase("testChangeStreams"); + database.drop(); + sleep(); + + // Select the collection to query. + MongoCollection collection = database.getCollection("documents"); + + /* + * Example 1 + * Create a simple change stream against an existing collection. + */ + System.out.println("1. Initial document from the Change Stream:"); + + // Create the change stream cursor. + MongoChangeStreamCursor> cursor = collection.watch().cursor(); + + // Insert a test document into the collection. + collection.insertOne(Document.parse("{username: 'alice123', name: 'Alice'}")); + ChangeStreamDocument next = cursor.next(); + System.out.println(next); + cursor.close(); + sleep(); + + /* + * Example 2 + * Create a change stream with 'lookup' option enabled. + * The test document will be returned with a full version of the updated document. + */ + System.out.println("2. Document from the Change Stream, with lookup enabled:"); + + // Create the change stream cursor. + cursor = collection.watch().fullDocument(FullDocument.UPDATE_LOOKUP).cursor(); + + // Update the test document. + collection.updateOne(Document.parse("{username: 'alice123'}"), Document.parse("{$set : { email: 'alice@example.com'}}")); + + // Block until the next result is returned + next = cursor.next(); + System.out.println(next); + cursor.close(); + sleep(); + + /* + * Example 3 + * Create a change stream with 'lookup' option using a $match and ($redact or $project) stage. + */ + System.out.println("3. Document from the Change Stream, with lookup enabled, matching `update` operations only: "); + + // Insert some dummy data. + collection.insertMany(asList(Document.parse("{updateMe: 1}"), Document.parse("{replaceMe: 1}"))); + + // Create $match pipeline stage. + List pipeline = singletonList( + Aggregates.match( + Filters.or( + Document.parse("{'fullDocument.username': 'alice123'}"), + Filters.in("operationType", asList("update", "replace", "delete")) + ) + ) + ); + + // Create the change stream cursor with $match. + cursor = collection.watch(pipeline).fullDocument(FullDocument.UPDATE_LOOKUP).cursor(); + + // Forward to the end of the change stream + next = cursor.tryNext(); + + // Update the test document. + collection.updateOne(Filters.eq("updateMe", 1), Updates.set("updated", true)); + next = cursor.next(); + System.out.printf("Update operationType: %s %n %s%n", next.getUpdateDescription(), next); + + // Replace the test document. + collection.replaceOne(Filters.eq("replaceMe", 1), Document.parse("{replaced: true}")); + next = cursor.next(); + System.out.printf("Replace operationType: %s%n", next); + + // Delete the test document. + collection.deleteOne(Filters.eq("username", "alice123")); + next = cursor.next(); + System.out.printf("Delete operationType: %s%n", next); + cursor.close(); + sleep(); + + /** + * Example 4 + * Resume a change stream using a resume token. + */ + System.out.println("4. Document from the Change Stream including a resume token:"); + + // Get the resume token from the last document we saw in the previous change stream cursor. + BsonDocument resumeToken = cursor.getResumeToken(); + System.out.println(resumeToken); + + // Pass the resume token to the resume after function to continue the change stream cursor. + cursor = collection.watch().resumeAfter(resumeToken).cursor(); + + // Insert a test document. + collection.insertOne(Document.parse("{test: 'd'}")); + + // Block until the next result is returned + next = cursor.next(); + System.out.println(next); + cursor.close(); + } + + private static void sleep() { + try { + Thread.sleep(100); + } catch (InterruptedException e) { + // Ignore. + } + } + + private ChangeStreamSamples() { + } +} diff --git a/driver-sync/src/examples/documentation/DocumentationSamples.java b/driver-sync/src/examples/documentation/DocumentationSamples.java new file mode 100644 index 00000000000..659507807ee --- /dev/null +++ b/driver-sync/src/examples/documentation/DocumentationSamples.java @@ -0,0 +1,739 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package documentation; + +import com.mongodb.client.AggregateIterable; +import com.mongodb.client.DatabaseTestCase; +import com.mongodb.client.FindIterable; +import com.mongodb.client.MongoCollection; +import com.mongodb.client.MongoCursor; +import com.mongodb.client.MongoDatabase; +import com.mongodb.client.model.Field; +import com.mongodb.client.model.IndexOptions; +import com.mongodb.client.model.Indexes; +import com.mongodb.client.model.Variable; +import com.mongodb.client.model.changestream.ChangeStreamDocument; +import com.mongodb.client.model.changestream.FullDocument; +import org.bson.BsonDocument; +import org.bson.BsonType; +import org.bson.Document; +import org.bson.conversions.Bson; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.Test; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashSet; +import java.util.List; +import java.util.concurrent.atomic.AtomicBoolean; + +import static com.mongodb.ClusterFixture.isDiscoverableReplicaSet; +import static com.mongodb.client.Fixture.getDefaultDatabaseName; +import static com.mongodb.client.Fixture.getMongoClient; +import static com.mongodb.client.model.Accumulators.sum; +import static com.mongodb.client.model.Aggregates.addFields; +import static com.mongodb.client.model.Aggregates.group; +import static com.mongodb.client.model.Aggregates.lookup; +import static com.mongodb.client.model.Aggregates.match; +import static com.mongodb.client.model.Aggregates.project; +import static com.mongodb.client.model.Aggregates.sort; +import static com.mongodb.client.model.Aggregates.unwind; +import static com.mongodb.client.model.Filters.all; +import static com.mongodb.client.model.Filters.and; +import static com.mongodb.client.model.Filters.elemMatch; +import static com.mongodb.client.model.Filters.eq; +import static com.mongodb.client.model.Filters.exists; +import static com.mongodb.client.model.Filters.expr; +import static com.mongodb.client.model.Filters.gt; +import static com.mongodb.client.model.Filters.in; +import static com.mongodb.client.model.Filters.lt; +import static com.mongodb.client.model.Filters.lte; +import static com.mongodb.client.model.Filters.or; +import static com.mongodb.client.model.Filters.regex; +import static com.mongodb.client.model.Filters.size; +import static com.mongodb.client.model.Filters.type; +import static com.mongodb.client.model.Projections.computed; +import static com.mongodb.client.model.Projections.exclude; +import static com.mongodb.client.model.Projections.excludeId; +import static com.mongodb.client.model.Projections.fields; +import static com.mongodb.client.model.Projections.include; +import static com.mongodb.client.model.Projections.slice; +import static com.mongodb.client.model.Sorts.ascending; +import static com.mongodb.client.model.Updates.combine; +import static com.mongodb.client.model.Updates.currentDate; +import static com.mongodb.client.model.Updates.set; +import static java.util.Arrays.asList; +import static java.util.Collections.singletonList; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assumptions.assumeTrue; + +// imports required for change streams +// end required change streams imports +// imports required for filters, projections and updates +// end required filters, projections and updates imports + + +public final class DocumentationSamples extends DatabaseTestCase { + + private final MongoDatabase database = getMongoClient().getDatabase(getDefaultDatabaseName()); + private final MongoCollection collection = database.getCollection("inventory"); + + @Test + public void testInsert() { + + // Start Example 1 + Document canvas = new Document("item", "canvas") + .append("qty", 100) + .append("tags", singletonList("cotton")); + + Document size = new Document("h", 28) + .append("w", 35.5) + .append("uom", "cm"); + canvas.put("size", size); + + collection.insertOne(canvas); + // End Example 1 + + // Start Example 2 + FindIterable findIterable = collection.find(eq("item", "canvas")); + // End Example 2 + + assertEquals(1, findIterable.into(new ArrayList<>()).size()); + + // Start Example 3 + Document journal = new Document("item", "journal") + .append("qty", 25) + .append("tags", asList("blank", "red")); + + Document journalSize = new Document("h", 14) + .append("w", 21) + .append("uom", "cm"); + journal.put("size", journalSize); + + Document mat = new Document("item", "mat") + .append("qty", 85) + .append("tags", singletonList("gray")); + + Document matSize = new Document("h", 27.9) + .append("w", 35.5) + .append("uom", "cm"); + mat.put("size", matSize); + + Document mousePad = new Document("item", "mousePad") + .append("qty", 25) + .append("tags", asList("gel", "blue")); + + Document mousePadSize = new Document("h", 19) + .append("w", 22.85) + .append("uom", "cm"); + mousePad.put("size", mousePadSize); + + collection.insertMany(asList(journal, mat, mousePad)); + // End Example 3 + + assertEquals(4, collection.countDocuments()); + } + + @Test + public void testQueryingAtTheTopLevel() { + // Start Example 6 + collection.insertMany(asList( + Document.parse("{ item: 'journal', qty: 25, size: { h: 14, w: 21, uom: 'cm' }, status: 'A' }"), + Document.parse("{ item: 'notebook', qty: 50, size: { h: 8.5, w: 11, uom: 'in' }, status: 'A' }"), + Document.parse("{ item: 'paper', qty: 100, size: { h: 8.5, w: 11, uom: 'in' }, status: 'D' }"), + Document.parse("{ item: 'planner', qty: 75, size: { h: 22.85, w: 30, uom: 'cm' }, status: 'D' }"), + Document.parse("{ item: 'postcard', qty: 45, size: { h: 10, w: 15.25, uom: 'cm' }, status: 'A' }") + )); + // End Example 6 + + assertEquals(5, collection.countDocuments()); + + // Start Example 7 + FindIterable findIterable = collection.find(new Document()); + // End Example 7 + + assertEquals(5, findIterable.into(new ArrayList<>()).size()); + + // Start Example 8 + findIterable = collection.find(); + // End Example 8 + + assertEquals(5, findIterable.into(new ArrayList<>()).size()); + + // Start Example 9 + findIterable = collection.find(eq("status", "D")); + // End Example 9 + + assertEquals(2, findIterable.into(new ArrayList<>()).size()); + + // Start Example 10 + findIterable = collection.find(in("status", "A", "D")); + // End Example 10 + + assertEquals(5, findIterable.into(new ArrayList<>()).size()); + + // Start Example 11 + findIterable = collection.find(and(eq("status", "A"), lt("qty", 30))); + // End Example 11 + + assertEquals(1, findIterable.into(new ArrayList<>()).size()); + + // Start Example 12 + findIterable = collection.find(or(eq("status", "A"), lt("qty", 30))); + // End Example 12 + + assertEquals(3, findIterable.into(new ArrayList<>()).size()); + + // Start Example 13 + findIterable = collection.find( + and(eq("status", "A"), + or(lt("qty", 30), regex("item", "^p"))) + ); + // End Example 13 + + assertEquals(2, findIterable.into(new ArrayList<>()).size()); + } + + @Test + public void testQueryingEmbeddedDocuments() { + // Start Example 14 + collection.insertMany(asList( + Document.parse("{ item: 'journal', qty: 25, size: { h: 14, w: 21, uom: 'cm' }, status: 'A' }"), + Document.parse("{ item: 'notebook', qty: 50, size: { h: 8.5, w: 11, uom: 'in' }, status: 'A' }"), + Document.parse("{ item: 'paper', qty: 100, size: { h: 8.5, w: 11, uom: 'in' }, status: 'D' }"), + Document.parse("{ item: 'planner', qty: 75, size: { h: 22.85, w: 30, uom: 'cm' }, status: 'D' }"), + Document.parse("{ item: 'postcard', qty: 45, size: { h: 10, w: 15.25, uom: 'cm' }, status: 'A' }") + )); + // End Example 14 + + assertEquals(5, collection.countDocuments()); + + // Start Example 15 + FindIterable findIterable = collection.find(eq("size", Document.parse("{ h: 14, w: 21, uom: 'cm' }"))); + // End Example 15 + + assertEquals(1, findIterable.into(new ArrayList<>()).size()); + + // Start Example 16 + findIterable = collection.find(eq("size", Document.parse("{ w: 21, h: 14, uom: 'cm' }"))); + // End Example 16 + + assertEquals(0, findIterable.into(new ArrayList<>()).size()); + + // Start Example 17 + findIterable = collection.find(eq("size.uom", "in")); + // End Example 17 + + assertEquals(2, findIterable.into(new ArrayList<>()).size()); + + // Start Example 18 + findIterable = collection.find(lt("size.h", 15)); + // End Example 18 + + assertEquals(4, findIterable.into(new ArrayList<>()).size()); + + // Start Example 19 + findIterable = collection.find(and( + lt("size.h", 15), + eq("size.uom", "in"), + eq("status", "D") + )); + // End Example 19 + + assertEquals(1, findIterable.into(new ArrayList<>()).size()); + } + + @Test + public void testQueryingArrayValues() { + + //Start Example 20 + collection.insertMany(asList( + Document.parse("{ item: 'journal', qty: 25, tags: ['blank', 'red'], dim_cm: [ 14, 21 ] }"), + Document.parse("{ item: 'notebook', qty: 50, tags: ['red', 'blank'], dim_cm: [ 14, 21 ] }"), + Document.parse("{ item: 'paper', qty: 100, tags: ['red', 'blank', 'plain'], dim_cm: [ 14, 21 ] }"), + Document.parse("{ item: 'planner', qty: 75, tags: ['blank', 'red'], dim_cm: [ 22.85, 30 ] }"), + Document.parse("{ item: 'postcard', qty: 45, tags: ['blue'], dim_cm: [ 10, 15.25 ] }") + )); + //End Example 20 + + assertEquals(5, collection.countDocuments()); + + //Start Example 21 + FindIterable findIterable = collection.find(eq("tags", asList("red", "blank"))); + //End Example 21 + + assertEquals(1, findIterable.into(new ArrayList<>()).size()); + + //Start Example 22 + findIterable = collection.find(all("tags", asList("red", "blank"))); + //End Example 22 + + assertEquals(4, findIterable.into(new ArrayList<>()).size()); + + //Start Example 23 + findIterable = collection.find(eq("tags", "red")); + //End Example 23 + + assertEquals(4, findIterable.into(new ArrayList<>()).size()); + + //Start Example 24 + findIterable = collection.find(gt("dim_cm", 25)); + //End Example 24 + + assertEquals(1, findIterable.into(new ArrayList<>()).size()); + + //Start Example 25 + findIterable = collection.find(and(gt("dim_cm", 15), lt("dim_cm", 20))); + //End Example 25 + + assertEquals(4, findIterable.into(new ArrayList<>()).size()); + + //Start Example 26 + findIterable = collection.find(elemMatch("dim_cm", Document.parse("{ $gt: 22, $lt: 30 }"))); + //End Example 26 + + assertEquals(1, findIterable.into(new ArrayList<>()).size()); + + //Start Example 27 + findIterable = collection.find(gt("dim_cm.1", 25)); + //End Example 27 + + assertEquals(1, findIterable.into(new ArrayList<>()).size()); + + //Start Example 28 + findIterable = collection.find(size("tags", 3)); + //End Example 28 + + assertEquals(1, findIterable.into(new ArrayList<>()).size()); + } + + @Test + public void testQueryingArraysContainingDocuments() { + + //Start Example 29 + collection.insertMany(asList( + Document.parse("{ item: 'journal', instock: [ { warehouse: 'A', qty: 5 }, { warehouse: 'C', qty: 15 } ] }"), + Document.parse("{ item: 'notebook', instock: [ { warehouse: 'C', qty: 5 } ] }"), + Document.parse("{ item: 'paper', instock: [ { warehouse: 'A', qty: 60 }, { warehouse: 'B', qty: 15 } ] }"), + Document.parse("{ item: 'planner', instock: [ { warehouse: 'A', qty: 40 }, { warehouse: 'B', qty: 5 } ] }"), + Document.parse("{ item: 'postcard', instock: [ { warehouse: 'B', qty: 15 }, { warehouse: 'C', qty: 35 } ] }") + )); + //End Example 29 + + assertEquals(5, collection.countDocuments()); + + //Start Example 30 + FindIterable findIterable = collection.find(eq("instock", Document.parse("{ warehouse: 'A', qty: 5 }"))); + //End Example 30 + + assertEquals(1, findIterable.into(new ArrayList<>()).size()); + + //Start Example 31 + findIterable = collection.find(eq("instock", Document.parse("{ qty: 5, warehouse: 'A' }"))); + //End Example 31 + + assertEquals(0, findIterable.into(new ArrayList<>()).size()); + + //Start Example 32 + findIterable = collection.find(lte("instock.0.qty", 20)); + //End Example 32 + + assertEquals(3, findIterable.into(new ArrayList<>()).size()); + + //Start Example 33 + findIterable = collection.find(lte("instock.qty", 20)); + //End Example 33 + + assertEquals(5, findIterable.into(new ArrayList<>()).size()); + + //Start Example 34 + findIterable = collection.find(elemMatch("instock", Document.parse("{ qty: 5, warehouse: 'A' }"))); + //End Example 34 + + assertEquals(1, findIterable.into(new ArrayList<>()).size()); + + //Start Example 35 + findIterable = collection.find(elemMatch("instock", Document.parse("{ qty: { $gt: 10, $lte: 20 } }"))); + //End Example 35 + + assertEquals(3, findIterable.into(new ArrayList<>()).size()); + + //Start Example 36 + findIterable = collection.find(and(gt("instock.qty", 10), lte("instock.qty", 20))); + //End Example 36 + + assertEquals(4, findIterable.into(new ArrayList<>()).size()); + + //Start Example 37 + findIterable = collection.find(and(eq("instock.qty", 5), eq("instock.warehouse", "A"))); + //End Example 37 + + assertEquals(2, findIterable.into(new ArrayList<>()).size()); + } + + @Test + public void testQueryingNullandMissingFields() { + + //Start Example 38 + collection.insertMany(asList( + Document.parse("{'_id': 1, 'item': null}"), + Document.parse("{'_id': 2}") + )); + //End Example 38 + + assertEquals(2, collection.countDocuments()); + + //Start Example 39 + FindIterable findIterable = collection.find(eq("item", null)); + //End Example 39 + + assertEquals(2, findIterable.into(new ArrayList<>()).size()); + + //Start Example 40 + findIterable = collection.find(type("item", BsonType.NULL)); + //End Example 40 + + assertEquals(1, findIterable.into(new ArrayList<>()).size()); + + //Start Example 41 + findIterable = collection.find(exists("item", false)); + //End Example 41 + + assertEquals(1, findIterable.into(new ArrayList<>()).size()); + } + + @Test + @SuppressWarnings("unchecked") + public void testProjectingFields() { + + //Start Example 42 + collection.insertMany(asList( + Document.parse("{ item: 'journal', status: 'A', size: { h: 14, w: 21, uom: 'cm' }, instock: [ { warehouse: 'A', qty: 5 }]}"), + Document.parse("{ item: 'notebook', status: 'A', size: { h: 8.5, w: 11, uom: 'in' }, instock: [ { warehouse: 'C', qty: 5}]}"), + Document.parse("{ item: 'paper', status: 'D', size: { h: 8.5, w: 11, uom: 'in' }, instock: [ { warehouse: 'A', qty: 60 }]}"), + Document.parse("{ item: 'planner', status: 'D', size: { h: 22.85, w: 30, uom: 'cm' }, instock: [ { warehouse: 'A', qty: 40}]}"), + Document.parse("{ item: 'postcard', status: 'A', size: { h: 10, w: 15.25, uom: 'cm' }, " + + "instock: [ { warehouse: 'B', qty: 15 }, { warehouse: 'C', qty: 35 } ] }") + )); + //End Example 42 + + assertEquals(5, collection.countDocuments()); + + //Start Example 43 + FindIterable findIterable = collection.find(eq("status", "A")); + //End Example 43 + + assertEquals(3, findIterable.into(new ArrayList<>()).size()); + + //Start Example 44 + findIterable = collection.find(eq("status", "A")).projection(include("item", "status")); + //End Example 44 + + findIterable.forEach(document -> assertEquals(new HashSet<>(asList("_id", "item", "status")), document.keySet())); + + //Start Example 45 + findIterable = collection.find(eq("status", "A")) + .projection(fields(include("item", "status"), excludeId())); + //End Example 45 + + findIterable.forEach(document -> assertEquals(new HashSet<>(asList("item", "status")), document.keySet())); + + //Start Example 46 + findIterable = collection.find(eq("status", "A")).projection(exclude("item", "status")); + //End Example 46 + + findIterable.forEach(document -> assertEquals(new HashSet<>(asList("_id", "size", "instock")), document.keySet())); + + //Start Example 47 + findIterable = collection.find(eq("status", "A")).projection(include("item", "status", "size.uom")); + //End Example 47 + + findIterable.forEach(document -> { + assertEquals(new HashSet<>(asList("_id", "item", "status", "size")), document.keySet()); + assertEquals(new HashSet<>(singletonList("uom")), document.get("size", Document.class).keySet()); + }); + + //Start Example 48 + findIterable = collection.find(eq("status", "A")).projection(exclude("size.uom")); + //End Example 48 + + findIterable.forEach(document -> { + assertEquals(new HashSet<>(asList("_id", "item", "instock", "status", "size")), document.keySet()); + assertEquals(new HashSet<>(asList("h", "w")), document.get("size", Document.class).keySet()); + }); + + //Start Example 49 + findIterable = collection.find(eq("status", "A")).projection(include("item", "status", "instock.qty")); + //End Example 49 + + findIterable.forEach(document -> { + assertEquals(new HashSet<>(asList("_id", "item", "instock", "status")), document.keySet()); + + List instock = (List) document.get("instock"); + for (Document stockDocument : instock) { + assertEquals(new HashSet<>(singletonList("qty")), stockDocument.keySet()); + } + }); + + //Start Example 50 + findIterable = collection.find(eq("status", "A")) + .projection(fields(include("item", "status"), slice("instock", -1))); + //End Example 50 + + findIterable.forEach(document -> { + assertEquals(new HashSet<>(asList("_id", "item", "instock", "status")), document.keySet()); + + List instock = (List) document.get("instock"); + assertEquals(1, instock.size()); + }); + } + + @Test + public void testAggregate() { + + MongoCollection salesCollection = database.getCollection("sales"); + + // Start Aggregation Example 1 + AggregateIterable aggregateIterable = salesCollection.aggregate(asList( + match(eq("items.fruit", "banana")), + sort(ascending("date")) + )); + // End Aggregation Example 1 + + aggregateIterable.into(new ArrayList<>()); + + // Start Aggregation Example 2 + aggregateIterable = salesCollection.aggregate(asList( + unwind("$items"), + match(eq("items.fruit", "banana")), + group(new Document("day", new Document("$dayOfWeek", "$date")), + sum("count", "$items.quantity")), + project(fields( + computed("dayOfWeek", "$_id.day"), + computed("numberSold", "$count"), + excludeId())), + sort(Indexes.ascending("numberSold")))); + // End Aggregation Example 2 + + aggregateIterable.into(new ArrayList<>()); + + // Start Aggregation Example 3 + aggregateIterable = salesCollection.aggregate(asList( + unwind("$items"), + group(new Document("day", new Document("$dayOfWeek", "$date")), + sum("items_old", "$items.quantity"), + sum("revenue", new Document("$multiply", asList("$items.quantity", "$items.price")))), + project(fields( + computed("day", "$_id.day"), + include("revenue", "items_sold"), + computed("discount", + new Document("$cond", + new Document("if", new Document("$lte", Arrays.asList("$revenue", 250))) + .append("then", 25) + .append("else", 0))))))); + // End Aggregation Example 3 + + aggregateIterable.into(new ArrayList<>()); + + MongoCollection airAlliancesCollection = database.getCollection("air_alliances"); + + // Start Aggregation Example 4 + aggregateIterable = airAlliancesCollection.aggregate(asList( + lookup("air_airlines", + singletonList(new Variable<>("constituents", "$airlines")), + singletonList(match(expr(new Document("$in", asList("$name", "$$constituents"))))), + "airlines"), + project(fields( + excludeId(), + include("name"), + computed("airlines", + new Document("$filter", + new Document("input", "$airlines") + .append("as", "airline") + .append("cond", new Document("$eq", asList("$$airline.country", "Canada"))))))))); + + // End Aggregation Example 4 + + aggregateIterable.into(new ArrayList<>()); + } + + @Test + public void testUpdates() { + //Start Example 51 + collection.insertMany(asList( + Document.parse("{ item: 'canvas', qty: 100, size: { h: 28, w: 35.5, uom: 'cm' }, status: 'A' }"), + Document.parse("{ item: 'journal', qty: 25, size: { h: 14, w: 21, uom: 'cm' }, status: 'A' }"), + Document.parse("{ item: 'mat', qty: 85, size: { h: 27.9, w: 35.5, uom: 'cm' }, status: 'A' }"), + Document.parse("{ item: 'mousepad', qty: 25, size: { h: 19, w: 22.85, uom: 'cm' }, status: 'P' }"), + Document.parse("{ item: 'notebook', qty: 50, size: { h: 8.5, w: 11, uom: 'in' }, status: 'P' }"), + Document.parse("{ item: 'paper', qty: 100, size: { h: 8.5, w: 11, uom: 'in' }, status: 'D' }"), + Document.parse("{ item: 'planner', qty: 75, size: { h: 22.85, w: 30, uom: 'cm' }, status: 'D' }"), + Document.parse("{ item: 'postcard', qty: 45, size: { h: 10, w: 15.25, uom: 'cm' }, status: 'A' }"), + Document.parse("{ item: 'sketchbook', qty: 80, size: { h: 14, w: 21, uom: 'cm' }, status: 'A' }"), + Document.parse("{ item: 'sketch pad', qty: 95, size: { h: 22.85, w: 30.5, uom: 'cm' }, status: 'A' }") + )); + //End Example 51 + + assertEquals(10, collection.countDocuments()); + + //Start Example 52 + collection.updateOne(eq("item", "paper"), + combine(set("size.uom", "cm"), set("status", "P"), currentDate("lastModified"))); + //End Example 52 + + collection.find(eq("item", "paper")).forEach(document -> { + assertEquals("cm", document.get("size", Document.class).getString("uom")); + assertEquals("P", document.getString("status")); + assertTrue(document.containsKey("lastModified")); + }); + + + //Start Example 53 + collection.updateMany(lt("qty", 50), + combine(set("size.uom", "in"), set("status", "P"), currentDate("lastModified"))); + //End Example 53 + + collection.find(lt("qty", 50)).forEach(document -> { + assertEquals("in", document.get("size", Document.class).getString("uom")); + assertEquals("P", document.getString("status")); + assertTrue(document.containsKey("lastModified")); + }); + + //Start Example 54 + collection.replaceOne(eq("item", "paper"), + Document.parse("{ item: 'paper', instock: [ { warehouse: 'A', qty: 60 }, { warehouse: 'B', qty: 40 } ] }")); + //End Example 54 + + collection.find(eq("item", "paper")).projection(excludeId()).forEach(document -> assertEquals(Document.parse("{ item: 'paper', instock: [ { warehouse: 'A', qty: 60 }, { warehouse: 'B', qty: 40 } ] }"), + document)); + + } + + @Test + public void testDeletions() { + + //Start Example 55 + collection.insertMany(asList( + Document.parse("{ item: 'journal', qty: 25, size: { h: 14, w: 21, uom: 'cm' }, status: 'A' }"), + Document.parse("{ item: 'notebook', qty: 50, size: { h: 8.5, w: 11, uom: 'in' }, status: 'A' }"), + Document.parse("{ item: 'paper', qty: 100, size: { h: 8.5, w: 11, uom: 'in' }, status: 'D' }"), + Document.parse("{ item: 'planner', qty: 75, size: { h: 22.85, w: 30, uom: 'cm' }, status: 'D' }"), + Document.parse("{ item: 'postcard', qty: 45, size: { h: 10, w: 15.25, uom: 'cm' }, status: 'A' }") + )); + //End Example 55 + + assertEquals(5, collection.countDocuments()); + + //Start Example 57 + collection.deleteMany(eq("status", "A")); + //End Example 57 + + assertEquals(2, collection.countDocuments()); + + //Start Example 58 + collection.deleteOne(eq("status", "D")); + //End Example 58 + + assertEquals(1, collection.countDocuments()); + + //Start Example 56 + collection.deleteMany(new Document()); + //End Example 56 + + assertEquals(0, collection.countDocuments()); + } + + @Test + public void testWatch() throws InterruptedException { + assumeTrue(isDiscoverableReplicaSet()); + + MongoCollection inventory = collection; + AtomicBoolean stop = new AtomicBoolean(false); + + Thread thread = new Thread(() -> { + while (!stop.get()) { + collection.insertMany(asList(new Document("username", "alice"), new Document())); + try { + Thread.sleep(10); + } catch (InterruptedException e) { + // ignore + } + collection.deleteOne(new Document("username", "alice")); + } + }); + thread.start(); + + // Start Changestream Example 1 + MongoCursor> cursor = inventory.watch().iterator(); + ChangeStreamDocument next = cursor.next(); + // End Changestream Example 1 + + cursor.close(); + + // Start Changestream Example 2 + cursor = inventory.watch().fullDocument(FullDocument.UPDATE_LOOKUP).iterator(); + next = cursor.next(); + // End Changestream Example 2 + + cursor.close(); + + // Start Changestream Example 3 + BsonDocument resumeToken = next.getResumeToken(); + cursor = inventory.watch().resumeAfter(resumeToken).iterator(); + next = cursor.next(); + // End Changestream Example 3 + + cursor.close(); + + // Start Changestream Example 4 + List pipeline = asList(match(Document.parse("{'fullDocument.username': 'alice'}")), + addFields(new Field<>("newField", "this is an added field!"))); + cursor = inventory.watch(pipeline).iterator(); + next = cursor.next(); + // End Changestream Example 4 + + cursor.close(); + + stop.set(true); + thread.join(); + } + + @Test + public void testRunCommand() { + // Start runCommand Example 1 + database.runCommand(new Document("buildInfo", 1)); + // End runCommand Example 1 + } + + @Test + public void testCreateIndexes() { + // Start Index Example 1 + collection.createIndex(Indexes.ascending("score")); + // End Index Example 1 + + // Start Index Example 2 + collection.createIndex(Indexes.ascending("cuisine", "name"), + new IndexOptions().partialFilterExpression(gt("rating", 5))); + // End Index Example 2 + } + + @AfterEach + public void tearDown() { + collection.drop(); + } +} diff --git a/driver-sync/src/examples/documentation/ExampleAwsLambdaHandler.java b/driver-sync/src/examples/documentation/ExampleAwsLambdaHandler.java new file mode 100644 index 00000000000..de7890f739b --- /dev/null +++ b/driver-sync/src/examples/documentation/ExampleAwsLambdaHandler.java @@ -0,0 +1,38 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package documentation; + +import com.amazonaws.services.lambda.runtime.Context; +import com.amazonaws.services.lambda.runtime.RequestHandler; +import com.mongodb.client.MongoClient; +import com.mongodb.client.MongoClients; +import org.bson.Document; + +// Start AWS Lambda Example 1 +public class ExampleAwsLambdaHandler implements RequestHandler { + private final MongoClient client; + + public ExampleAwsLambdaHandler() { + client = MongoClients.create(System.getenv("MONGODB_URI")); + } + + @Override + public String handleRequest(final String input, final Context context) { + return client.getDatabase("admin").runCommand(new Document("ping", 1)).toJson(); + } +} +// End AWS Lambda Example 1 diff --git a/driver-sync/src/examples/documentation/TransactionExample.java b/driver-sync/src/examples/documentation/TransactionExample.java new file mode 100644 index 00000000000..4f73122ee35 --- /dev/null +++ b/driver-sync/src/examples/documentation/TransactionExample.java @@ -0,0 +1,165 @@ +/* + * Copyright 2018 MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package documentation; + +import com.mongodb.ConnectionString; +import com.mongodb.MongoClientSettings; +import com.mongodb.MongoCommandException; +import com.mongodb.MongoException; +import com.mongodb.ReadConcern; +import com.mongodb.ReadPreference; +import com.mongodb.TransactionOptions; +import com.mongodb.WriteConcern; +import com.mongodb.client.ClientSession; +import com.mongodb.client.MongoClient; +import com.mongodb.client.MongoClients; +import com.mongodb.client.MongoCollection; +import com.mongodb.client.model.Filters; +import com.mongodb.client.model.Updates; +import org.bson.Document; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +import static com.mongodb.ClusterFixture.isDiscoverableReplicaSet; +import static com.mongodb.ClusterFixture.isSharded; +import static com.mongodb.client.Fixture.getMongoClientSettingsBuilder; +import static org.junit.Assume.assumeTrue; + +public class TransactionExample { + private MongoClient client; + + @Before + public void setUp() { + assumeTrue(isSharded() || isDiscoverableReplicaSet()); + MongoClientSettings.Builder builder = getMongoClientSettingsBuilder() + .applyConnectionString(new ConnectionString( + "mongodb://localhost,localhost:27018,localhost:27019/?serverSelectionTimeoutMS=5000")); + client = MongoClients.create(builder.build()); + + createCollection(client, "hr", "employees"); + createCollection(client, "reporting", "events"); + } + + @After + public void cleanUp() { + if (client != null) { + client.close(); + } + } + + @Test + public void updateEmployeeInfoWithRetry() { + runTransactionWithRetry(this::updateEmployeeInfo); + } + + private void runTransactionWithRetry(final Runnable transactional) { + while (true) { + try { + transactional.run(); + break; + } catch (MongoException e) { + System.out.println("Transaction aborted. Caught exception during transaction."); + + if (e.hasErrorLabel(MongoException.TRANSIENT_TRANSACTION_ERROR_LABEL)) { + System.out.println("TransientTransactionError, aborting transaction and retrying ..."); + } else { + throw e; + } + } + } + } + + private void commitWithRetry(final ClientSession clientSession) { + while (true) { + try { + clientSession.commitTransaction(); + System.out.println("Transaction committed"); + break; + } catch (MongoException e) { + // can retry commit + if (e.hasErrorLabel(MongoException.UNKNOWN_TRANSACTION_COMMIT_RESULT_LABEL)) { + System.out.println("UnknownTransactionCommitResult, retrying commit operation ..."); + } else { + System.out.println("Exception during commit ..."); + throw e; + } + } + } + } + + private void updateEmployeeInfo() { + MongoCollection employeesCollection = client.getDatabase("hr").getCollection("employees"); + MongoCollection eventsCollection = client.getDatabase("reporting").getCollection("events"); + + TransactionOptions txnOptions = TransactionOptions.builder() + .readPreference(ReadPreference.primary()) + .readConcern(ReadConcern.MAJORITY) + .writeConcern(WriteConcern.MAJORITY) + .build(); + + try (ClientSession clientSession = client.startSession()) { + clientSession.startTransaction(txnOptions); + + employeesCollection.updateOne(clientSession, + Filters.eq("employee", 3), + Updates.set("status", "Inactive")); + eventsCollection.insertOne(clientSession, + new Document("employee", 3).append("status", new Document("new", "Inactive").append("old", "Active"))); + + commitWithRetry(clientSession); + } + } + + @Test + public void updateEmployeeInfoUsingWithTransactionHelper() { + MongoCollection employeesCollection = client.getDatabase("hr").getCollection("employees"); + MongoCollection eventsCollection = client.getDatabase("reporting").getCollection("events"); + + TransactionOptions txnOptions = TransactionOptions.builder() + .readPreference(ReadPreference.primary()) + .readConcern(ReadConcern.MAJORITY) + .writeConcern(WriteConcern.MAJORITY) + .build(); + + try (ClientSession clientSession = client.startSession()) { + clientSession.withTransaction(() -> { + employeesCollection.updateOne(clientSession, + Filters.eq("employee", 3), + Updates.set("status", "Inactive")); + eventsCollection.insertOne(clientSession, + new Document("employee", 3).append("status", new Document("new", "Inactive").append("old", "Active"))); + return null; + }, txnOptions); + } catch (MongoException e) { + System.out.println("Transaction aborted. Caught exception during transaction."); + throw e; + } + } + + private void createCollection(final MongoClient client, final String dbName, final String collectionName) { + try { + client.getDatabase(dbName).createCollection(collectionName); + } catch (MongoCommandException e) { + if (!e.getErrorCodeName().equals("NamespaceExists")) { + throw e; + } + } + } + +} diff --git a/driver-sync/src/examples/gridfs/GridFSTour.java b/driver-sync/src/examples/gridfs/GridFSTour.java new file mode 100644 index 00000000000..e1f8a4c918e --- /dev/null +++ b/driver-sync/src/examples/gridfs/GridFSTour.java @@ -0,0 +1,161 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package gridfs; + +import com.mongodb.client.MongoClient; +import com.mongodb.client.MongoClients; +import com.mongodb.client.MongoDatabase; +import com.mongodb.client.gridfs.GridFSBucket; +import com.mongodb.client.gridfs.GridFSBuckets; +import com.mongodb.client.gridfs.GridFSDownloadStream; +import com.mongodb.client.gridfs.GridFSUploadStream; +import com.mongodb.client.gridfs.model.GridFSDownloadOptions; +import com.mongodb.client.gridfs.model.GridFSUploadOptions; +import org.bson.Document; +import org.bson.types.ObjectId; + +import java.io.ByteArrayInputStream; +import java.io.FileNotFoundException; +import java.io.FileOutputStream; +import java.io.IOException; +import java.io.InputStream; +import java.nio.charset.StandardCharsets; + +import static com.mongodb.client.model.Filters.eq; + +/** + * The GridFS code example see: https://mongodb.github.io/mongo-java-driver/3.1/driver/reference/gridfs + */ +public final class GridFSTour { + + /** + * Run this main method to see the output of this quick example. + * + * @param args takes an optional single argument for the connection string + * @throws FileNotFoundException if the sample file cannot be found + * @throws IOException if there was an exception closing an input stream + */ + public static void main(final String[] args) throws FileNotFoundException, IOException { + MongoClient mongoClient; + + if (args.length == 0) { + // connect to the local database server + mongoClient = MongoClients.create(); + } else { + mongoClient = MongoClients.create(args[0]); + } + + // get handle to "mydb" database + MongoDatabase database = mongoClient.getDatabase("mydb"); + database.drop(); + + GridFSBucket gridFSBucket = GridFSBuckets.create(database); + + /* + * UploadFromStream Example + */ + // Get the input stream + InputStream streamToUploadFrom = new ByteArrayInputStream("Hello World".getBytes(StandardCharsets.UTF_8)); + + // Create some custom options + GridFSUploadOptions options = new GridFSUploadOptions() + .chunkSizeBytes(1024) + .metadata(new Document("type", "presentation")); + + ObjectId fileId = gridFSBucket.uploadFromStream("mongodb-tutorial", streamToUploadFrom, options); + streamToUploadFrom.close(); + System.out.println("The fileId of the uploaded file is: " + fileId.toHexString()); + + /* + * OpenUploadStream Example + */ + + // Get some data to write + byte[] data = "Data to upload into GridFS".getBytes(StandardCharsets.UTF_8); + + + GridFSUploadStream uploadStream = gridFSBucket.openUploadStream("sampleData"); + uploadStream.write(data); + uploadStream.close(); + System.out.println("The fileId of the uploaded file is: " + uploadStream.getObjectId().toHexString()); + + /* + * Find documents + */ + gridFSBucket.find().forEach(gridFSFile -> System.out.println(gridFSFile.getFilename())); + + /* + * Find documents with a filter + */ + gridFSBucket.find(eq("metadata.contentType", "image/png")).forEach( + gridFSFile -> System.out.println(gridFSFile.getFilename())); + + /* + * DownloadToStream + */ + FileOutputStream streamToDownloadTo = new FileOutputStream("/tmp/mongodb-tutorial.txt"); + gridFSBucket.downloadToStream(fileId, streamToDownloadTo); + streamToDownloadTo.close(); + + /* + * DownloadToStreamByName + */ + streamToDownloadTo = new FileOutputStream("/tmp/mongodb-tutorial.txt"); + GridFSDownloadOptions downloadOptions = new GridFSDownloadOptions().revision(0); + gridFSBucket.downloadToStream("mongodb-tutorial", streamToDownloadTo, downloadOptions); + streamToDownloadTo.close(); + + /* + * OpenDownloadStream + */ + GridFSDownloadStream downloadStream = gridFSBucket.openDownloadStream(fileId); + int fileLength = (int) downloadStream.getGridFSFile().getLength(); + byte[] bytesToWriteTo = new byte[fileLength]; + downloadStream.read(bytesToWriteTo); + downloadStream.close(); + + System.out.println(new String(bytesToWriteTo, StandardCharsets.UTF_8)); + + /* + * OpenDownloadStreamByName + */ + + downloadStream = gridFSBucket.openDownloadStream("sampleData"); + fileLength = (int) downloadStream.getGridFSFile().getLength(); + bytesToWriteTo = new byte[fileLength]; + downloadStream.read(bytesToWriteTo); + downloadStream.close(); + + System.out.println(new String(bytesToWriteTo, StandardCharsets.UTF_8)); + + /* + * Rename + */ + gridFSBucket.rename(fileId, "mongodbTutorial"); + + /* + * Delete + */ + gridFSBucket.delete(fileId); + + + database.drop(); + } + + private GridFSTour() { + } +} diff --git a/driver-sync/src/examples/gridfs/package-info.java b/driver-sync/src/examples/gridfs/package-info.java new file mode 100644 index 00000000000..2ee200c7122 --- /dev/null +++ b/driver-sync/src/examples/gridfs/package-info.java @@ -0,0 +1,20 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * This package contains the gridfs tour example + */ +package gridfs; diff --git a/driver-sync/src/examples/primer/AggregatePrimer.java b/driver-sync/src/examples/primer/AggregatePrimer.java new file mode 100644 index 00000000000..45f72d6edbf --- /dev/null +++ b/driver-sync/src/examples/primer/AggregatePrimer.java @@ -0,0 +1,85 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package primer; + +import com.mongodb.client.AggregateIterable; +import org.bson.Document; +import org.junit.jupiter.api.Test; + +import static java.util.Arrays.asList; + +// @import: start +// @import: end + +public class AggregatePrimer extends PrimerTestCase { + + @Test + public void groupDocumentsByAFieldAndCalculateCount() { + + // @begin: group-documents-by-a-field-and-calculate-count + // @code: start + AggregateIterable iterable = db.getCollection("restaurants").aggregate(asList( + new Document("$group", new Document("_id", "$borough").append("count", new Document("$sum", 1))))); + // @code: end + + // @pre: Iterate the results and apply a block to each resulting document + // @code: start + iterable.forEach(document -> System.out.println(document.toJson())); + // @code: end + + /* + // @results: start + { "_id" : "Missing", "count" : 51 } + { "_id" : "Staten Island", "count" : 969 } + { "_id" : "Manhattan", "count" : 10259 } + { "_id" : "Brooklyn", "count" : 6086 } + { "_id" : "Queens", "count" : 5656 } + { "_id" : "Bronx", "count" : 2338 } + // @results: end + */ + + // @end: group-documents-by-a-field-and-calculate-count + } + + @Test + public void filterAndGroupDocuments() { + + // @begin: filter-and-group-documents + // @code: start + AggregateIterable iterable = db.getCollection("restaurants").aggregate(asList( + new Document("$match", new Document("borough", "Queens").append("cuisine", "Brazilian")), + new Document("$group", new Document("_id", "$address.zipcode").append("count", new Document("$sum", 1))))); + // @code: end + + // @pre: Iterate the results and apply a block to each resulting document + // @code: start + iterable.forEach(document -> System.out.println(document.toJson())); + // @code: end + + /* + // @results: start + { "_id" : "11377", "count" : 1 } + { "_id" : "11368", "count" : 1 } + { "_id" : "11101", "count" : 2 } + { "_id" : "11106", "count" : 3 } + { "_id" : "11103", "count" : 1 } + // @results: end + */ + + // @end: filter-and-group-documents + } +} diff --git a/driver-sync/src/examples/primer/IndexesPrimer.java b/driver-sync/src/examples/primer/IndexesPrimer.java new file mode 100644 index 00000000000..ce9496788f6 --- /dev/null +++ b/driver-sync/src/examples/primer/IndexesPrimer.java @@ -0,0 +1,49 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package primer; + +import org.junit.jupiter.api.Test; + +// @imports: start +import org.bson.Document; +// @imports: end + +public class IndexesPrimer extends PrimerTestCase { + + @Test + public void singleFieldIndex() { + + // @begin: single-field-index + // @code: start + db.getCollection("restaurants").createIndex(new Document("cuisine", 1)); + // @code: end + + // @post: The method does not return a result. + // @end: single-field-index + } + + @Test + public void createCompoundIndex() { + // @begin: create-compound-index + // @code: start + db.getCollection("restaurants").createIndex(new Document("cuisine", 1).append("address.zipcode", -1)); + // @code: end + + // @post: The method does not return a result. + // @end: create-compound-index + } +} diff --git a/driver-sync/src/examples/primer/InsertPrimer.java b/driver-sync/src/examples/primer/InsertPrimer.java new file mode 100644 index 00000000000..39a3565f51d --- /dev/null +++ b/driver-sync/src/examples/primer/InsertPrimer.java @@ -0,0 +1,65 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package primer; + +import org.junit.jupiter.api.Test; + +// @imports: start +import org.bson.Document; + +import java.text.DateFormat; +import java.text.ParseException; +import java.text.SimpleDateFormat; +import java.util.Locale; + +import static java.util.Arrays.asList; +// @imports: end + +public class InsertPrimer extends PrimerTestCase { + + @Test + public void insertADocument() throws ParseException { + + // @begin: insert-a-document + // @code: start + DateFormat format = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss'Z'", Locale.ENGLISH); + db.getCollection("restaurants").insertOne( + new Document("address", + new Document() + .append("street", "2 Avenue") + .append("zipcode", "10075") + .append("building", "1480") + .append("coord", asList(-73.9557413, 40.7720266))) + .append("borough", "Manhattan") + .append("cuisine", "Italian") + .append("grades", asList( + new Document() + .append("date", format.parse("2014-10-01T00:00:00Z")) + .append("grade", "A") + .append("score", 11), + new Document() + .append("date", format.parse("2014-01-16T00:00:00Z")) + .append("grade", "B") + .append("score", 17))) + .append("name", "Vella") + .append("restaurant_id", "41704620")); + // @code: end + + // @post: The method does not return a result. + // @end: insert-a-document + } +} diff --git a/driver-sync/src/examples/primer/PrimerTestCase.java b/driver-sync/src/examples/primer/PrimerTestCase.java new file mode 100644 index 00000000000..2466cc03578 --- /dev/null +++ b/driver-sync/src/examples/primer/PrimerTestCase.java @@ -0,0 +1,31 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package primer; + +import com.mongodb.client.DatabaseTestCase; +import com.mongodb.client.MongoDatabase; + +import static com.mongodb.client.Fixture.getMongoClient; + +public class PrimerTestCase extends DatabaseTestCase { + MongoDatabase db = getDatabase(); + + MongoDatabase getDatabase() { + // Data should be installed as per the primer instructions + return getMongoClient().getDatabase("test"); + } +} diff --git a/driver-sync/src/examples/primer/QueryPrimer.java b/driver-sync/src/examples/primer/QueryPrimer.java new file mode 100644 index 00000000000..67a4c7431c5 --- /dev/null +++ b/driver-sync/src/examples/primer/QueryPrimer.java @@ -0,0 +1,217 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package primer; + +import com.mongodb.client.FindIterable; +import org.bson.Document; +import org.junit.jupiter.api.Test; + +import static com.mongodb.client.model.Filters.and; +import static com.mongodb.client.model.Filters.eq; +import static com.mongodb.client.model.Filters.gt; +import static com.mongodb.client.model.Filters.lt; +import static com.mongodb.client.model.Filters.or; +import static com.mongodb.client.model.Sorts.ascending; +import static java.util.Arrays.asList; + +// @imports: start +// @imports: end + + +public class QueryPrimer extends PrimerTestCase { + + @Test + public void queryAll() { + // @begin: query-all + // @code: start + FindIterable iterable = db.getCollection("restaurants").find(); + // @code: end + + // @pre: Iterate the results and apply a block to each resulting document. + // @code: start + iterable.forEach(document -> System.out.println(document)); + // @code: end + // @end: query-all + } + + + @Test + public void logicalAnd() { + + // @begin: logical-and + // @code: start + FindIterable iterable = db.getCollection("restaurants").find( + new Document("cuisine", "Italian").append("address.zipcode", "10075")); + // @code: end + + // @pre: Iterate the results and apply a block to each resulting document. + // @code: start + iterable.forEach(document -> System.out.println(document)); + // @code: end + + // @pre: To simplify building queries the Java driver provides static helpers + // @code: start + db.getCollection("restaurants").find(and(eq("cuisine", "Italian"), eq("address.zipcode", "10075"))); + // @code: end + + // @end: logical-and + } + + @Test + public void logicalOr() { + + // @begin: logical-or + // @code: start + FindIterable iterable = db.getCollection("restaurants").find( + new Document("$or", asList(new Document("cuisine", "Italian"), + new Document("address.zipcode", "10075")))); + // @code: end + + // @pre: Iterate the results and apply a block to each resulting document. + // @code: start + iterable.forEach(document -> System.out.println(document)); + // @code: end + + // @pre: To simplify building queries the Java driver provides static helpers + // @code: start + db.getCollection("restaurants").find(or(eq("cuisine", "Italian"), eq("address.zipcode", "10075"))); + // @code: end + + // @end: logical-or + } + + @Test + public void queryTopLevelField() { + // @begin: query-top-level-field + // @code: start + FindIterable iterable = db.getCollection("restaurants").find( + new Document("borough", "Manhattan")); + // @code: end + + // @pre: Iterate the results and apply a block to each resulting document. + // @code: start + iterable.forEach(document -> System.out.println(document)); + // @code: end + + // @pre: To simplify building queries the Java driver provides static helpers + // @code: start + db.getCollection("restaurants").find(eq("borough", "Manhattan")); + // @code: end + // @end: query-top-level-field + } + + @Test + public void queryEmbeddedDocument() { + // @begin: query-embedded-document + // @code: start + FindIterable iterable = db.getCollection("restaurants").find( + new Document("address.zipcode", "10075")); + // @code: end + + // @pre: Iterate the results and apply a block to each resulting document. + // @code: start + iterable.forEach(document -> System.out.println(document)); + // @code: end + + // @pre: To simplify building queries the Java driver provides static helpers + // @code: start + db.getCollection("restaurants").find(eq("address.zipcode", "10075")); + // @code: end + // @end: query-embedded-document + } + + @Test + public void queryFieldInArray() { + // @begin: query-field-in-array + // @code: start + FindIterable iterable = db.getCollection("restaurants").find( + new Document("grades.grade", "B")); + // @code: end + + // @pre: Iterate the results and apply a block to each resulting document. + // @code: start + iterable.forEach(document -> System.out.println(document)); + // @code: end + + // @pre: To simplify building queries the Java driver provides static helpers + // @code: start + db.getCollection("restaurants").find(eq("grades.grade", "B")); + // @code: end + // @end: query-field-in-array + } + + @Test + public void greaterThan() { + // @begin: greater-than + // @code: start + FindIterable iterable = db.getCollection("restaurants").find( + new Document("grades.score", new Document("$gt", 30))); + // @code: end + + // @pre: Iterate the results and apply a block to each resulting document. + // @code: start + iterable.forEach(document -> System.out.println(document)); + // @code: end + + // @pre: To simplify building queries the Java driver provides static helpers + // @code: start + db.getCollection("restaurants").find(gt("grades.score", 30)); + // @code: end + // @end: greater-than + } + + @Test + public void lessThan() { + // @begin: less-than + // @code: start + FindIterable iterable = db.getCollection("restaurants").find( + new Document("grades.score", new Document("$lt", 10))); + // @code: end + + // @pre: Iterate the results and apply a block to each resulting document. + // @code: start + iterable.forEach(document -> System.out.println(document)); + // @code: end + + // @pre: To simplify building queries the Java driver provides static helpers + // @code: start + db.getCollection("restaurants").find(lt("grades.score", 10)); + // @code: end + // @end: less-than + } + + + @Test + public void sort() { + // @begin: sort + // @code: start + FindIterable iterable = db.getCollection("restaurants").find() + .sort(new Document("borough", 1).append("address.zipcode", 1)); + // @code: end + + // @pre: Iterate the results and apply a block to each resulting document + // @code: start + iterable.forEach(document -> System.out.println(document)); + // @code: end + + // @pre: To simplify sorting fields the Java driver provides static helpers + // @code: start + db.getCollection("restaurants").find().sort(ascending("borough", "address.zipcode")); + // @code: end + // @end: sort + } +} diff --git a/driver-sync/src/examples/primer/RemovePrimer.java b/driver-sync/src/examples/primer/RemovePrimer.java new file mode 100644 index 00000000000..5e1894fdb86 --- /dev/null +++ b/driver-sync/src/examples/primer/RemovePrimer.java @@ -0,0 +1,67 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package primer; + +import org.junit.jupiter.api.Test; + +// @import: start +import org.bson.Document; +// @import: end + +public class RemovePrimer extends PrimerTestCase { + + @Test + public void removeMatchingDocuments() { + // @begin: remove-matching-documents + // @code: start + db.getCollection("restaurants").deleteMany(new Document("borough", "Manhattan")); + // @code: end + + /* + // @post: start + The deleteMany operation returns a ``DeleteResult`` which contains information about the operation. + The ``getDeletedCount`` method returns number of documents deleted. + // @post: end + */ + // @end: remove-matching-documents + } + + @Test + public void removeAllDocuments() { + // @begin: remove-all-documents + // @code: start + db.getCollection("restaurants").deleteMany(new Document()); + // @code: end + + /* + // @post: start + The deleteMany operation returns a ``DeleteResult`` which contains information about the operation. + The ``getDeletedCount`` method returns number of documents deleted. + // @post: end + */ + // @end: remove-all-documents + } + + @Test + public void dropCollection() { + // @begin: drop-collection + // @code: start + db.getCollection("restaurants").drop(); + // @code: end + // @end: drop-collection + } +} diff --git a/driver-sync/src/examples/primer/UpdatePrimer.java b/driver-sync/src/examples/primer/UpdatePrimer.java new file mode 100644 index 00000000000..a2eefcb9bf8 --- /dev/null +++ b/driver-sync/src/examples/primer/UpdatePrimer.java @@ -0,0 +1,106 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package primer; + +import org.junit.jupiter.api.Test; + +// @import: start +import org.bson.Document; + +import static java.util.Arrays.asList; +// @import: end + + +public class UpdatePrimer extends PrimerTestCase { + + @Test + public void updateTopLevelFields() { + // @begin: update-top-level-fields + // @code: start + db.getCollection("restaurants").updateOne(new Document("name", "Juni"), + new Document("$set", new Document("cuisine", "American (New)")) + .append("$currentDate", new Document("lastModified", true))); + // @code: end + + /* + // @post: start + The updateOne operation returns a ``UpdateResult`` which contains information about the operation. + The ``getModifiedCount`` method returns the number of documents modified. + // @post: end + */ + // @end: update-top-level-fields + } + + @Test + public void updateEmbeddedField() { + // @begin: update-embedded-field + // @code: start + db.getCollection("restaurants").updateOne(new Document("restaurant_id", "41156888"), + new Document("$set", new Document("address.street", "East 31st Street"))); + + // @code: end + /* + // @post: start + The updateOne operation returns a ``UpdateResult`` which contains information about the operation. + The ``getModifiedCount`` method returns the number of documents modified. + // @post: end + */ + // @end: update-embedded-field + } + + + @Test + public void updateMultipleDocuments() { + // @begin: update-multiple-documents + // @code: start + db.getCollection("restaurants").updateMany(new Document("address.zipcode", "10016").append("cuisine", "Other"), + new Document("$set", new Document("cuisine", "Category To Be Determined")) + .append("$currentDate", new Document("lastModified", true))); + // @code: end + + /* + // @post: start + The updateMany operation returns a ``UpdateResult`` which contains information about the operation. + The ``getModifiedCount`` method returns the number of documents modified. + // @post: end + */ + // @end: update-multiple-documents + } + + @Test + public void replaceDocument() { + // @begin: replace-document + // @code: start + db.getCollection("restaurants").replaceOne(new Document("restaurant_id", "41704620"), + new Document("address", + new Document() + .append("street", "2 Avenue") + .append("zipcode", "10075") + .append("building", "1480") + .append("coord", asList(-73.9557413, 40.7720266))) + .append("name", "Vella 2")); + // @code: end + /* + // @post: start + The replaceOne operation returns a ``UpdateResult`` which contains information about the operation. + The ``getModifiedCount`` method returns the number of documents modified. + // @post: end + */ + + // @end: replace-document + } +} diff --git a/driver-sync/src/examples/tour/Address.java b/driver-sync/src/examples/tour/Address.java new file mode 100644 index 00000000000..fb4612c9c31 --- /dev/null +++ b/driver-sync/src/examples/tour/Address.java @@ -0,0 +1,141 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package tour; + +/** + * The Address POJO + */ +public final class Address { + + private String street; + private String city; + private String zip; + + /** + * Construct a new instance + */ + public Address() { + } + + /** + * Construct a new instance + * + * @param street the street + * @param city the city + * @param zip the zip / postal code + */ + public Address(final String street, final String city, final String zip) { + this.street = street; + this.city = city; + this.zip = zip; + } + + /** + * Returns the street + * + * @return the street + */ + public String getStreet() { + return street; + } + + /** + * Sets the street + * + * @param street the street + */ + public void setStreet(final String street) { + this.street = street; + } + + /** + * Returns the city + * + * @return the city + */ + public String getCity() { + return city; + } + + /** + * Sets the city + * + * @param city the city + */ + public void setCity(final String city) { + this.city = city; + } + + /** + * Returns the zip + * + * @return the zip + */ + public String getZip() { + return zip; + } + + /** + * Sets the zip + * + * @param zip the zip + */ + public void setZip(final String zip) { + this.zip = zip; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + Address address = (Address) o; + + if (getStreet() != null ? !getStreet().equals(address.getStreet()) : address.getStreet() != null) { + return false; + } + if (getCity() != null ? !getCity().equals(address.getCity()) : address.getCity() != null) { + return false; + } + if (getZip() != null ? !getZip().equals(address.getZip()) : address.getZip() != null) { + return false; + } + + return true; + } + + @Override + public int hashCode() { + int result = getStreet() != null ? getStreet().hashCode() : 0; + result = 31 * result + (getCity() != null ? getCity().hashCode() : 0); + result = 31 * result + (getZip() != null ? getZip().hashCode() : 0); + return result; + } + + @Override + public String toString() { + return "Address{" + + "street='" + street + "'" + + ", city='" + city + "'" + + ", zip='" + zip + "'" + + "}"; + } +} diff --git a/driver-sync/src/examples/tour/ClientSideEncryptionAutoEncryptionSettingsTour.java b/driver-sync/src/examples/tour/ClientSideEncryptionAutoEncryptionSettingsTour.java new file mode 100644 index 00000000000..9880c1f17fc --- /dev/null +++ b/driver-sync/src/examples/tour/ClientSideEncryptionAutoEncryptionSettingsTour.java @@ -0,0 +1,119 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package tour; + +import com.mongodb.AutoEncryptionSettings; +import com.mongodb.ClientEncryptionSettings; +import com.mongodb.ConnectionString; +import com.mongodb.MongoClientSettings; +import com.mongodb.client.MongoClient; +import com.mongodb.client.MongoClients; +import com.mongodb.client.MongoCollection; +import com.mongodb.client.model.vault.DataKeyOptions; +import com.mongodb.client.vault.ClientEncryption; +import com.mongodb.client.vault.ClientEncryptions; +import org.bson.BsonBinary; +import org.bson.BsonDocument; +import org.bson.Document; + +import java.security.SecureRandom; +import java.util.Base64; +import java.util.HashMap; +import java.util.Map; + +/** + * ClientSideEncryption AutoEncryptionSettings tour + */ +public class ClientSideEncryptionAutoEncryptionSettingsTour { + + /** + * Run this main method to see the output of this quick example. + *

+ * Requires the mongodb-crypt library in the class path and mongocryptd on the system path. + * + * @param args ignored args + */ + public static void main(final String[] args) { + + // This would have to be the same master key as was used to create the encryption key + byte[] localMasterKey = new byte[96]; + new SecureRandom().nextBytes(localMasterKey); + + Map> kmsProviders = new HashMap>() {{ + put("local", new HashMap() {{ + put("key", localMasterKey); + }}); + }}; + MongoClientSettings commonClientSettings = ( + args.length == 0 + ? MongoClientSettings.builder() + : MongoClientSettings.builder().applyConnectionString(new ConnectionString(args[0]))) + .build(); + String keyVaultNamespace = "encryption.__keyVault"; + ClientEncryptionSettings clientEncryptionSettings = ClientEncryptionSettings.builder() + .keyVaultMongoClientSettings(commonClientSettings) + .keyVaultNamespace(keyVaultNamespace) + .kmsProviders(kmsProviders) + .build(); + + ClientEncryption clientEncryption = ClientEncryptions.create(clientEncryptionSettings); + BsonBinary dataKeyId = clientEncryption.createDataKey("local", new DataKeyOptions()); + String base64DataKeyId = Base64.getEncoder().encodeToString(dataKeyId.getData()); + + final String dbName = "test"; + final String collName = "coll"; + AutoEncryptionSettings autoEncryptionSettings = AutoEncryptionSettings.builder() + .keyVaultNamespace(keyVaultNamespace) + .kmsProviders(kmsProviders) + .schemaMap(new HashMap() {{ + put(dbName + "." + collName, + // Need a schema that references the new data key + BsonDocument.parse("{" + + " properties: {" + + " encryptedField: {" + + " encrypt: {" + + " keyId: [{" + + " \"$binary\": {" + + " \"base64\": \"" + base64DataKeyId + "\"," + + " \"subType\": \"04\"" + + " }" + + " }]," + + " bsonType: \"string\"," + + " algorithm: \"AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic\"" + + " }" + + " }" + + " }," + + " \"bsonType\": \"object\"" + + "}")); + }}).build(); + + MongoClientSettings clientSettings = MongoClientSettings.builder(commonClientSettings) + .autoEncryptionSettings(autoEncryptionSettings) + .build(); + + MongoClient mongoClient = MongoClients.create(clientSettings); + MongoCollection collection = mongoClient.getDatabase("test").getCollection("coll"); + collection.drop(); // Clear old data + + collection.insertOne(new Document("encryptedField", "123456789")); + + System.out.println(collection.find().first().toJson()); + + // release resources + mongoClient.close(); + } +} diff --git a/driver-sync/src/examples/tour/ClientSideEncryptionExplicitEncryptionAndDecryptionTour.java b/driver-sync/src/examples/tour/ClientSideEncryptionExplicitEncryptionAndDecryptionTour.java new file mode 100644 index 00000000000..853f364c4bf --- /dev/null +++ b/driver-sync/src/examples/tour/ClientSideEncryptionExplicitEncryptionAndDecryptionTour.java @@ -0,0 +1,114 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package tour; + +import com.mongodb.ClientEncryptionSettings; +import com.mongodb.ConnectionString; +import com.mongodb.MongoClientSettings; +import com.mongodb.MongoNamespace; +import com.mongodb.client.MongoClient; +import com.mongodb.client.MongoClients; +import com.mongodb.client.MongoCollection; +import com.mongodb.client.model.Filters; +import com.mongodb.client.model.IndexOptions; +import com.mongodb.client.model.Indexes; +import com.mongodb.client.model.vault.DataKeyOptions; +import com.mongodb.client.model.vault.EncryptOptions; +import com.mongodb.client.vault.ClientEncryption; +import com.mongodb.client.vault.ClientEncryptions; +import org.bson.BsonBinary; +import org.bson.BsonString; +import org.bson.Document; +import org.bson.types.Binary; + +import java.security.SecureRandom; +import java.util.HashMap; +import java.util.Map; + +/** + * ClientSideEncryption explicit encryption and decryption tour + */ +public class ClientSideEncryptionExplicitEncryptionAndDecryptionTour { + + /** + * Run this main method to see the output of this quick example. + * + * @param args ignored args + */ + public static void main(final String[] args) { + + // This would have to be the same master key as was used to create the encryption key + byte[] localMasterKey = new byte[96]; + new SecureRandom().nextBytes(localMasterKey); + + Map> kmsProviders = new HashMap>() {{ + put("local", new HashMap() {{ + put("key", localMasterKey); + }}); + }}; + MongoClientSettings commonClientSettings = ( + args.length == 0 + ? MongoClientSettings.builder() + : MongoClientSettings.builder().applyConnectionString(new ConnectionString(args[0]))) + .build(); + MongoClient mongoClient = MongoClients.create(commonClientSettings); + + // Set up the key vault for this example + MongoNamespace keyVaultNamespace = new MongoNamespace("encryption.testKeyVault"); + + MongoCollection keyVaultCollection = mongoClient.getDatabase(keyVaultNamespace.getDatabaseName()) + .getCollection(keyVaultNamespace.getCollectionName()); + keyVaultCollection.drop(); + + // Ensure that two data keys cannot share the same keyAltName. + keyVaultCollection.createIndex(Indexes.ascending("keyAltNames"), + new IndexOptions().unique(true) + .partialFilterExpression(Filters.exists("keyAltNames"))); + + MongoCollection collection = mongoClient.getDatabase("test").getCollection("coll"); + collection.drop(); // Clear old data + + // Create the ClientEncryption instance + ClientEncryptionSettings clientEncryptionSettings = ClientEncryptionSettings.builder() + .keyVaultMongoClientSettings(commonClientSettings) + .keyVaultNamespace(keyVaultNamespace.getFullName()) + .kmsProviders(kmsProviders) + .build(); + + ClientEncryption clientEncryption = ClientEncryptions.create(clientEncryptionSettings); + + BsonBinary dataKeyId = clientEncryption.createDataKey("local", new DataKeyOptions()); + + // Explicitly encrypt a field + BsonBinary encryptedFieldValue = clientEncryption.encrypt(new BsonString("123456789"), + new EncryptOptions("AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic").keyId(dataKeyId)); + + collection.insertOne(new Document("encryptedField", encryptedFieldValue)); + + Document doc = collection.find().first(); + System.out.println(doc.toJson()); + + // Explicitly decrypt the field + Binary encryptedField = doc.get("encryptedField", Binary.class); + BsonString decryptedField = clientEncryption.decrypt(new BsonBinary(encryptedField.getType(), encryptedField.getData())).asString(); + System.out.println(decryptedField.getValue()); + + // release resources + clientEncryption.close(); + mongoClient.close(); + } +} diff --git a/driver-sync/src/examples/tour/ClientSideEncryptionExplicitEncryptionOnlyTour.java b/driver-sync/src/examples/tour/ClientSideEncryptionExplicitEncryptionOnlyTour.java new file mode 100644 index 00000000000..e50cc54e29c --- /dev/null +++ b/driver-sync/src/examples/tour/ClientSideEncryptionExplicitEncryptionOnlyTour.java @@ -0,0 +1,116 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package tour; + +import com.mongodb.AutoEncryptionSettings; +import com.mongodb.ClientEncryptionSettings; +import com.mongodb.ConnectionString; +import com.mongodb.MongoClientSettings; +import com.mongodb.MongoNamespace; +import com.mongodb.client.MongoClient; +import com.mongodb.client.MongoClients; +import com.mongodb.client.MongoCollection; +import com.mongodb.client.model.Filters; +import com.mongodb.client.model.IndexOptions; +import com.mongodb.client.model.Indexes; +import com.mongodb.client.model.vault.DataKeyOptions; +import com.mongodb.client.model.vault.EncryptOptions; +import com.mongodb.client.vault.ClientEncryption; +import com.mongodb.client.vault.ClientEncryptions; +import org.bson.BsonBinary; +import org.bson.BsonString; +import org.bson.Document; + +import java.security.SecureRandom; +import java.util.HashMap; +import java.util.Map; + +/** + * ClientSideEncryption explicit encryption and decryption tour + */ +public class ClientSideEncryptionExplicitEncryptionOnlyTour { + + /** + * Run this main method to see the output of this quick example. + * + * @param args ignored args + */ + public static void main(final String[] args) { + + // This would have to be the same master key as was used to create the encryption key + byte[] localMasterKey = new byte[96]; + new SecureRandom().nextBytes(localMasterKey); + + Map> kmsProviders = new HashMap>() {{ + put("local", new HashMap() {{ + put("key", localMasterKey); + }}); + }}; + + MongoNamespace keyVaultNamespace = new MongoNamespace("encryption.testKeyVault"); + MongoClientSettings commonClientSettings = ( + args.length == 0 + ? MongoClientSettings.builder() + : MongoClientSettings.builder().applyConnectionString(new ConnectionString(args[0]))) + .build(); + MongoClientSettings clientSettings = MongoClientSettings.builder(commonClientSettings) + .autoEncryptionSettings(AutoEncryptionSettings.builder() + .keyVaultNamespace(keyVaultNamespace.getFullName()) + .kmsProviders(kmsProviders) + .bypassAutoEncryption(true) + .build()) + .build(); + MongoClient mongoClient = MongoClients.create(clientSettings); + + // Set up the key vault for this example + MongoCollection keyVaultCollection = mongoClient.getDatabase(keyVaultNamespace.getDatabaseName()) + .getCollection(keyVaultNamespace.getCollectionName()); + keyVaultCollection.drop(); + + // Ensure that two data keys cannot share the same keyAltName. + keyVaultCollection.createIndex(Indexes.ascending("keyAltNames"), + new IndexOptions().unique(true) + .partialFilterExpression(Filters.exists("keyAltNames"))); + + MongoCollection collection = mongoClient.getDatabase("test").getCollection("coll"); + collection.drop(); // Clear old data + + // Create the ClientEncryption instance + ClientEncryptionSettings clientEncryptionSettings = ClientEncryptionSettings.builder() + .keyVaultMongoClientSettings(commonClientSettings) + .keyVaultNamespace(keyVaultNamespace.getFullName()) + .kmsProviders(kmsProviders) + .build(); + + ClientEncryption clientEncryption = ClientEncryptions.create(clientEncryptionSettings); + + BsonBinary dataKeyId = clientEncryption.createDataKey("local", new DataKeyOptions()); + + // Explicitly encrypt a field + BsonBinary encryptedFieldValue = clientEncryption.encrypt(new BsonString("123456789"), + new EncryptOptions("AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic").keyId(dataKeyId)); + + collection.insertOne(new Document("encryptedField", encryptedFieldValue)); + + // Automatically decrypts the encrypted field. + System.out.println(collection.find().first().toJson()); + + // release resources + clientEncryption.close(); + mongoClient.close(); + } +} diff --git a/driver-sync/src/examples/tour/ClientSideEncryptionQueryableEncryptionTour.java b/driver-sync/src/examples/tour/ClientSideEncryptionQueryableEncryptionTour.java new file mode 100644 index 00000000000..8fb99c59ea3 --- /dev/null +++ b/driver-sync/src/examples/tour/ClientSideEncryptionQueryableEncryptionTour.java @@ -0,0 +1,154 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package tour; + +import com.mongodb.AutoEncryptionSettings; +import com.mongodb.ClientEncryptionSettings; +import com.mongodb.ConnectionString; +import com.mongodb.MongoClientSettings; +import com.mongodb.client.MongoClient; +import com.mongodb.client.MongoClients; +import com.mongodb.client.MongoCollection; +import com.mongodb.client.MongoDatabase; +import com.mongodb.client.model.Filters; +import com.mongodb.client.model.vault.DataKeyOptions; +import com.mongodb.client.vault.ClientEncryption; +import com.mongodb.client.vault.ClientEncryptions; +import com.mongodb.connection.ClusterDescription; +import com.mongodb.connection.ClusterType; +import org.bson.BsonBinary; +import org.bson.BsonDocument; +import org.bson.BsonString; +import org.bson.BsonType; + +import java.security.SecureRandom; +import java.util.Base64; +import java.util.HashMap; +import java.util.Map; + +import static java.lang.String.format; + +/** + * ClientSideEncryption Queryable Encryption tour + */ +public class ClientSideEncryptionQueryableEncryptionTour { + + /** + * Run this main method to test queryable encryption. + *

+ * Requires the latest mongodb-crypt library in the class path. + * + * @param args ignored args + */ + public static void main(final String[] args) { + + String uri = args.length == 0 ? "mongodb://localhost:27017,localhost:27018,localhost:27019/" : args[0]; + ConnectionString connectionString = new ConnectionString(uri); + + // This would have to be the same master key as was used to create the encryption key + byte[] localMasterKey = new byte[96]; + new SecureRandom().nextBytes(localMasterKey); + + Map> kmsProviders = new HashMap>() {{ + put("local", new HashMap() {{ + put("key", localMasterKey); + }}); + }}; + + MongoClient mongoClient = MongoClients.create(connectionString); + mongoClient.getDatabase("keyvault").getCollection("datakeys").drop(); + mongoClient.getDatabase("docsExamples").drop(); + + ClusterDescription clusterDescription = mongoClient.getClusterDescription(); + ClusterType clusterType = clusterDescription.getType(); + if (clusterType.equals(ClusterType.STANDALONE) || clusterType.equals(ClusterType.UNKNOWN)) { + System.out.println("Requires a replicaset or sharded cluster"); + return; + } + if (clusterDescription.getServerDescriptions().get(0).getMaxWireVersion() < 17) { + System.out.println("Requires MongoDB 6.0 or greater"); + return; + } + + String keyVaultNamespace = "keyvault.datakeys"; + ClientEncryptionSettings clientEncryptionSettings = ClientEncryptionSettings.builder() + .keyVaultMongoClientSettings(MongoClientSettings.builder() + .applyConnectionString(connectionString) + .build()) + .keyVaultNamespace(keyVaultNamespace) + .kmsProviders(kmsProviders) + .build(); + + ClientEncryption clientEncryption = ClientEncryptions.create(clientEncryptionSettings); + BsonBinary dataKeyId1 = clientEncryption.createDataKey("local", new DataKeyOptions()); + BsonBinary dataKeyId2 = clientEncryption.createDataKey("local", new DataKeyOptions()); + String base64DataKeyId1 = Base64.getEncoder().encodeToString(dataKeyId1.getData()); + String base64DataKeyId2 = Base64.getEncoder().encodeToString(dataKeyId2.getData()); + + // Create an encryptedFieldsMap with an indexed and unindexed field. + Map encryptedFieldsMap = new HashMap<>(); + encryptedFieldsMap.put("docsExamples.encrypted", BsonDocument.parse("{" + + "fields: [" + + "{'path': 'encryptedIndexed', 'bsonType': 'string', 'queries': {'queryType': 'equality'}, 'keyId': " + + "{'$binary': {'base64' : '" + base64DataKeyId1 + "', 'subType': '" + dataKeyId1.asBinary().getType() + "'}}}," + + "{'path': 'encryptedUnindexed', 'bsonType': 'string', 'keyId': " + + "{'$binary': {'base64' : '" + base64DataKeyId2 + "', 'subType': '" + dataKeyId2.asBinary().getType() + "'}}}" + + "]" + + "}")); + + AutoEncryptionSettings autoEncryptionSettings = AutoEncryptionSettings.builder() + .keyVaultNamespace(keyVaultNamespace) + .kmsProviders(kmsProviders) + .encryptedFieldsMap(encryptedFieldsMap) + .build(); + + MongoClient encryptedClient = + MongoClients.create(MongoClientSettings.builder() + .applyConnectionString(connectionString) + .autoEncryptionSettings(autoEncryptionSettings).build()); + + // Create an FLE 2 collection. + MongoDatabase docsExamplesDatabase = encryptedClient.getDatabase("docsExamples"); + docsExamplesDatabase.createCollection("encrypted"); + MongoCollection encryptedCollection = docsExamplesDatabase.getCollection("encrypted", BsonDocument.class); + + // Auto encrypt an insert and find with "Indexed" and "Unindexed" encrypted fields. + String indexedValue = "indexedValue"; + String unindexedValue = "unindexedValue"; + encryptedCollection.insertOne(BsonDocument.parse(format("{'_id': 1, 'encryptedIndexed': '%s', 'encryptedUnindexed': '%s'}", + indexedValue, unindexedValue))); + + BsonDocument findResult = encryptedCollection.find(Filters.eq("encryptedIndexed", "indexedValue")).first(); + assert findResult != null; + assert findResult.getString("encryptedIndexed").equals(new BsonString(indexedValue)); + assert findResult.getString("encryptedUnindexed").equals(new BsonString(unindexedValue)); + + + // Find documents without decryption. + MongoCollection unencryptedCollection = mongoClient.getDatabase("docsExamples") + .getCollection("encrypted", BsonDocument.class); + findResult = unencryptedCollection.find(Filters.eq("_id", 1)).first(); + assert findResult != null; + assert findResult.get("encryptedIndexed").getBsonType().equals(BsonType.BINARY); + assert findResult.get("encryptedUnindexed").getBsonType().equals(BsonType.BINARY); + + // release resources + clientEncryption.close(); + encryptedClient.close(); + mongoClient.close(); + } +} diff --git a/driver-sync/src/examples/tour/ClientSideEncryptionSimpleTour.java b/driver-sync/src/examples/tour/ClientSideEncryptionSimpleTour.java new file mode 100644 index 00000000000..de116fd3a62 --- /dev/null +++ b/driver-sync/src/examples/tour/ClientSideEncryptionSimpleTour.java @@ -0,0 +1,81 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package tour; + +import com.mongodb.AutoEncryptionSettings; +import com.mongodb.ConnectionString; +import com.mongodb.MongoClientSettings; +import com.mongodb.client.MongoClient; +import com.mongodb.client.MongoClients; +import com.mongodb.client.MongoCollection; +import org.bson.Document; + +import java.security.SecureRandom; +import java.util.HashMap; +import java.util.Map; + +/** + * ClientSideEncryption Simple tour + */ +public class ClientSideEncryptionSimpleTour { + + /** + * Run this main method to see the output of this quick example. + *

+ * Requires the mongodb-crypt library in the class path and mongocryptd on the system path. + * Assumes the schema has already been created in MongoDB. + * + * @param args ignored args + */ + public static void main(final String[] args) { + + // This would have to be the same master key as was used to create the encryption key + byte[] localMasterKey = new byte[96]; + new SecureRandom().nextBytes(localMasterKey); + + Map> kmsProviders = new HashMap>() {{ + put("local", new HashMap() {{ + put("key", localMasterKey); + }}); + }}; + + String keyVaultNamespace = "encryption.__keyVault"; + + AutoEncryptionSettings autoEncryptionSettings = AutoEncryptionSettings.builder() + .keyVaultNamespace(keyVaultNamespace) + .kmsProviders(kmsProviders) + .build(); + + MongoClientSettings clientSettings = ( + args.length == 0 + ? MongoClientSettings.builder() + : MongoClientSettings.builder().applyConnectionString(new ConnectionString(args[0]))) + .autoEncryptionSettings(autoEncryptionSettings) + .build(); + + MongoClient mongoClient = MongoClients.create(clientSettings); + MongoCollection collection = mongoClient.getDatabase("test").getCollection("coll"); + collection.drop(); // Clear old data + + collection.insertOne(new Document("encryptedField", "123456789")); + + System.out.println(collection.find().first().toJson()); + + // release resources + mongoClient.close(); + } +} diff --git a/driver-sync/src/examples/tour/Decimal128QuickTour.java b/driver-sync/src/examples/tour/Decimal128QuickTour.java new file mode 100644 index 00000000000..bf66ef2b032 --- /dev/null +++ b/driver-sync/src/examples/tour/Decimal128QuickTour.java @@ -0,0 +1,77 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package tour; + +import com.mongodb.client.MongoClient; +import com.mongodb.client.MongoClients; +import com.mongodb.client.MongoCollection; +import com.mongodb.client.MongoDatabase; +import com.mongodb.client.model.Filters; +import org.bson.Document; +import org.bson.types.Decimal128; + +import java.math.BigDecimal; + +/** + * Decimal128 Quick tour + */ +public class Decimal128QuickTour { + /** + * Run this main method to see the output of this quick example. + * + * @param args takes an optional single argument for the connection string + */ + public static void main(final String[] args) { + MongoClient mongoClient; + + if (args.length == 0) { + // connect to the local database server + mongoClient = MongoClients.create(); + } else { + mongoClient = MongoClients.create(args[0]); + } + + + // get handle to "mydb" database + MongoDatabase database = mongoClient.getDatabase("mydb"); + + + // get a handle to the "test" collection + MongoCollection collection = database.getCollection("test"); + + // drop all the data in it + collection.drop(); + + // make a document and insert it + Document doc = new Document("name", "MongoDB") + .append("amount1", Decimal128.parse(".10")) + .append("amount2", new Decimal128(42L)) + .append("amount3", new Decimal128(new BigDecimal(".200"))); + + collection.insertOne(doc); + + + Document first = collection.find().filter(Filters.eq("amount1", new Decimal128(new BigDecimal(".10")))).first(); + + Decimal128 amount3 = (Decimal128) first.get("amount3"); + BigDecimal amount2AsBigDecimal = amount3.bigDecimalValue(); + + System.out.println(amount3); + System.out.println(amount2AsBigDecimal); + + } +} diff --git a/driver-sync/src/examples/tour/Person.java b/driver-sync/src/examples/tour/Person.java new file mode 100644 index 00000000000..9294ff5e19d --- /dev/null +++ b/driver-sync/src/examples/tour/Person.java @@ -0,0 +1,166 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package tour; + +import org.bson.types.ObjectId; + +/** + * The Person Pojo + */ +public final class Person { + private ObjectId id; + private String name; + private int age; + private Address address; + + /** + * Construct a new instance + */ + public Person() { + } + + /** + * Construct a new instance + * + * @param name the name + * @param age the age + * @param address the address + */ + public Person(final String name, final int age, final Address address) { + this.name = name; + this.age = age; + this.address = address; + } + + /** + * Returns the id + * + * @return the id + */ + public ObjectId getId() { + return id; + } + + /** + * Sets the id + * + * @param id the id + */ + public void setId(final ObjectId id) { + this.id = id; + } + + /** + * Returns the name + * + * @return the name + */ + public String getName() { + return name; + } + + /** + * Sets the name + * + * @param name the name + */ + public void setName(final String name) { + this.name = name; + } + + /** + * Returns the age + * + * @return the age + */ + public int getAge() { + return age; + } + + /** + * Sets the age + * + * @param age the age + */ + public void setAge(final int age) { + this.age = age; + } + + /** + * Returns the address + * + * @return the address + */ + public Address getAddress() { + return address; + } + + /** + * Sets the address + * + * @param address the address + */ + public void setAddress(final Address address) { + this.address = address; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + Person person = (Person) o; + + if (getAge() != person.getAge()) { + return false; + } + if (getId() != null ? !getId().equals(person.getId()) : person.getId() != null) { + return false; + } + if (getName() != null ? !getName().equals(person.getName()) : person.getName() != null) { + return false; + } + if (getAddress() != null ? !getAddress().equals(person.getAddress()) : person.getAddress() != null) { + return false; + } + + return true; + } + + @Override + public int hashCode() { + int result = getId() != null ? getId().hashCode() : 0; + result = 31 * result + (getName() != null ? getName().hashCode() : 0); + result = 31 * result + getAge(); + result = 31 * result + (getAddress() != null ? getAddress().hashCode() : 0); + return result; + } + + @Override + public String toString() { + return "Person{" + + "id='" + id + "'" + + ", name='" + name + "'" + + ", age=" + age + + ", address=" + address + + "}"; + } +} diff --git a/driver-sync/src/examples/tour/PojoQuickTour.java b/driver-sync/src/examples/tour/PojoQuickTour.java new file mode 100644 index 00000000000..cdfa95876c8 --- /dev/null +++ b/driver-sync/src/examples/tour/PojoQuickTour.java @@ -0,0 +1,137 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package tour; + +import com.mongodb.MongoClientSettings; +import com.mongodb.client.MongoClient; +import com.mongodb.client.MongoClients; +import com.mongodb.client.MongoCollection; +import com.mongodb.client.MongoDatabase; +import com.mongodb.client.result.DeleteResult; +import com.mongodb.client.result.UpdateResult; +import org.bson.codecs.configuration.CodecRegistry; +import org.bson.codecs.pojo.PojoCodecProvider; + +import java.util.List; +import java.util.function.Consumer; + +import static com.mongodb.client.model.Filters.eq; +import static com.mongodb.client.model.Filters.gt; +import static com.mongodb.client.model.Filters.not; +import static com.mongodb.client.model.Updates.combine; +import static com.mongodb.client.model.Updates.set; +import static java.util.Arrays.asList; +import static org.bson.codecs.configuration.CodecRegistries.fromProviders; +import static org.bson.codecs.configuration.CodecRegistries.fromRegistries; + +/** + * The POJO QuickTour code example + */ +public class PojoQuickTour { + /** + * Run this main method to see the output of this quick example. + * + * @param args takes an optional single argument for the connection string + */ + public static void main(final String[] args) { + MongoClient mongoClient; + + if (args.length == 0) { + // connect to the local database server + mongoClient = MongoClients.create(); + } else { + mongoClient = MongoClients.create(args[0]); + } + + // create codec registry for POJOs + CodecRegistry pojoCodecRegistry = fromRegistries(MongoClientSettings.getDefaultCodecRegistry(), + fromProviders(PojoCodecProvider.builder().automatic(true).build())); + + // get handle to "mydb" database + MongoDatabase database = mongoClient.getDatabase("mydb").withCodecRegistry(pojoCodecRegistry); + + // get a handle to the "people" collection + MongoCollection collection = database.getCollection("people", Person.class); + + // drop all the data in it + collection.drop(); + + // make a document and insert it + Person ada = new Person("Ada Byron", 20, new Address("St James Square", "London", "W1")); + System.out.println("Original Person Model: " + ada); + collection.insertOne(ada); + + // Person will now have an ObjectId + System.out.println("Mutated Person Model: " + ada); + + // get it (since it's the only one in there since we dropped the rest earlier on) + Person somebody = collection.find().first(); + System.out.println(somebody); + + // now, lets add some more people so we can explore queries and cursors + List people = asList( + new Person("Charles Babbage", 45, new Address("5 Devonshire Street", "London", "W11")), + new Person("Alan Turing", 28, new Address("Bletchley Hall", "Bletchley Park", "MK12")), + new Person("Timothy Berners-Lee", 61, new Address("Colehill", "Wimborne", null)) + ); + + collection.insertMany(people); + System.out.println("total # of people " + collection.countDocuments()); + + System.out.println(); + // lets get all the documents in the collection and print them out + Consumer printBlock = person -> System.out.println(person); + + collection.find().forEach(printBlock); + + System.out.println(); + // now use a query to get 1 document out + somebody = collection.find(eq("address.city", "Wimborne")).first(); + System.out.println(somebody); + + System.out.println(); + // now lets find every over 30 + collection.find(gt("age", 30)).forEach(printBlock); + + System.out.println(); + // Update One + collection.updateOne(eq("name", "Ada Byron"), combine(set("age", 23), set("name", "Ada Lovelace"))); + + System.out.println(); + // Update Many + UpdateResult updateResult = collection.updateMany(not(eq("zip", null)), set("zip", null)); + System.out.println(updateResult.getModifiedCount()); + + System.out.println(); + // Replace One + updateResult = collection.replaceOne(eq("name", "Ada Lovelace"), ada); + System.out.println(updateResult.getModifiedCount()); + + // Delete One + collection.deleteOne(eq("address.city", "Wimborne")); + + // Delete Many + DeleteResult deleteResult = collection.deleteMany(eq("address.city", "London")); + System.out.println(deleteResult.getDeletedCount()); + + // Clean up + database.drop(); + + // release resources + mongoClient.close(); + } +} diff --git a/driver-sync/src/examples/tour/QuickTour.java b/driver-sync/src/examples/tour/QuickTour.java new file mode 100644 index 00000000000..a84a3a1ce61 --- /dev/null +++ b/driver-sync/src/examples/tour/QuickTour.java @@ -0,0 +1,194 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package tour; + +import com.mongodb.client.MongoClient; +import com.mongodb.client.MongoClients; +import com.mongodb.client.MongoCollection; +import com.mongodb.client.MongoCursor; +import com.mongodb.client.MongoDatabase; +import com.mongodb.client.result.DeleteResult; +import com.mongodb.client.result.UpdateResult; +import org.bson.Document; + +import java.util.ArrayList; +import java.util.List; +import java.util.function.Consumer; + +import static com.mongodb.client.model.Accumulators.sum; +import static com.mongodb.client.model.Aggregates.group; +import static com.mongodb.client.model.Aggregates.match; +import static com.mongodb.client.model.Aggregates.project; +import static com.mongodb.client.model.Filters.and; +import static com.mongodb.client.model.Filters.eq; +import static com.mongodb.client.model.Filters.exists; +import static com.mongodb.client.model.Filters.gt; +import static com.mongodb.client.model.Filters.gte; +import static com.mongodb.client.model.Filters.lt; +import static com.mongodb.client.model.Filters.lte; +import static com.mongodb.client.model.Projections.excludeId; +import static com.mongodb.client.model.Sorts.descending; +import static com.mongodb.client.model.Updates.inc; +import static com.mongodb.client.model.Updates.set; +import static java.util.Arrays.asList; +import static java.util.Collections.singletonList; + +/** + * The QuickTour code example + */ +public class QuickTour { + /** + * Run this main method to see the output of this quick example. + * + * @param args takes an optional single argument for the connection string + */ + public static void main(final String[] args) { + MongoClient mongoClient; + + if (args.length == 0) { + // connect to the local database server + mongoClient = MongoClients.create(); + } else { + mongoClient = MongoClients.create(args[0]); + } + + // get handle to "mydb" database + MongoDatabase database = mongoClient.getDatabase("mydb"); + + + // get a handle to the "test" collection + MongoCollection collection = database.getCollection("test"); + + // drop all the data in it + collection.drop(); + + // make a document and insert it + Document doc = new Document("name", "MongoDB") + .append("type", "database") + .append("count", 1) + .append("info", new Document("x", 203).append("y", 102)); + + collection.insertOne(doc); + + // get it (since it's the only one in there since we dropped the rest earlier on) + Document myDoc = collection.find().first(); + System.out.println(myDoc.toJson()); + + // now, lets add lots of little documents to the collection so we can explore queries and cursors + List documents = new ArrayList<>(); + for (int i = 0; i < 100; i++) { + documents.add(new Document("i", i)); + } + collection.insertMany(documents); + System.out.println("total # of documents after inserting 100 small ones (should be 101) " + collection.countDocuments()); + + // find first + myDoc = collection.find().first(); + System.out.println(myDoc.toJson()); + + // lets get all the documents in the collection and print them out + MongoCursor cursor = collection.find().iterator(); + try { + while (cursor.hasNext()) { + System.out.println(cursor.next().toJson()); + } + } finally { + cursor.close(); + } + + for (Document cur : collection.find()) { + System.out.println(cur.toJson()); + } + + // now use a query to get 1 document out + myDoc = collection.find(eq("i", 71)).first(); + System.out.println(myDoc.toJson()); + + // now use a range query to get a larger subset + cursor = collection.find(gt("i", 50)).iterator(); + + try { + while (cursor.hasNext()) { + System.out.println(cursor.next().toJson()); + } + } finally { + cursor.close(); + } + + // range query with multiple constraints + cursor = collection.find(and(gt("i", 50), lte("i", 100))).iterator(); + + try { + while (cursor.hasNext()) { + System.out.println(cursor.next().toJson()); + } + } finally { + cursor.close(); + } + + // Query Filters + myDoc = collection.find(eq("i", 71)).first(); + System.out.println(myDoc.toJson()); + + // now use a range query to get a larger subset + Consumer printBlock = document -> System.out.println(document.toJson()); + collection.find(gt("i", 50)).forEach(printBlock); + + // filter where; 50 < i <= 100 + collection.find(and(gt("i", 50), lte("i", 100))).forEach(printBlock); + + // Sorting + myDoc = collection.find(exists("i")).sort(descending("i")).first(); + System.out.println(myDoc.toJson()); + + // Projection + myDoc = collection.find().projection(excludeId()).first(); + System.out.println(myDoc.toJson()); + + // Aggregation + collection.aggregate(asList( + match(gt("i", 0)), + project(Document.parse("{ITimes10: {$multiply: ['$i', 10]}}"))) + ).forEach(printBlock); + + myDoc = collection.aggregate(singletonList(group(null, sum("total", "$i")))).first(); + System.out.println(myDoc.toJson()); + + // Update One + collection.updateOne(eq("i", 10), set("i", 110)); + + // Update Many + UpdateResult updateResult = collection.updateMany(lt("i", 100), inc("i", 100)); + System.out.println(updateResult.getModifiedCount()); + + // Delete One + collection.deleteOne(eq("i", 110)); + + // Delete Many + DeleteResult deleteResult = collection.deleteMany(gte("i", 100)); + System.out.println(deleteResult.getDeletedCount()); + + // Create Index + collection.createIndex(new Document("i", 1)); + + // Clean up + database.drop(); + + // release resources + mongoClient.close(); + } +} diff --git a/driver-sync/src/examples/tour/package-info.java b/driver-sync/src/examples/tour/package-info.java new file mode 100644 index 00000000000..ebd69343c4a --- /dev/null +++ b/driver-sync/src/examples/tour/package-info.java @@ -0,0 +1,20 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * This package contains the quick tour examples + */ +package tour; diff --git a/driver-sync/src/main/com/mongodb/client/AggregateIterable.java b/driver-sync/src/main/com/mongodb/client/AggregateIterable.java new file mode 100644 index 00000000000..032e1860244 --- /dev/null +++ b/driver-sync/src/main/com/mongodb/client/AggregateIterable.java @@ -0,0 +1,282 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client; + +import com.mongodb.ExplainVerbosity; +import com.mongodb.MongoNamespace; +import com.mongodb.annotations.Alpha; +import com.mongodb.annotations.Reason; +import com.mongodb.client.cursor.TimeoutMode; +import com.mongodb.client.model.Aggregates; +import com.mongodb.client.model.Collation; +import com.mongodb.client.model.MergeOptions; +import com.mongodb.lang.Nullable; +import org.bson.BsonValue; +import org.bson.Document; +import org.bson.conversions.Bson; + +import java.util.concurrent.TimeUnit; + +/** + * Iterable for aggregate. + * + * @param The type of the result. + * @mongodb.driver.manual reference/command/aggregate/ Aggregation + * @since 3.0 + */ +public interface AggregateIterable extends MongoIterable { + + /** + * Aggregates documents according to the specified aggregation pipeline, which must end with an + * {@link Aggregates#out(String, String) $out} or {@link Aggregates#merge(MongoNamespace, MergeOptions) $merge} stage. + * This method is the preferred alternative to {@link #iterator()}, {@link #cursor()}, + * because this method does what is explicitly requested without executing implicit operations. + * + * @throws IllegalStateException if the pipeline does not end with an {@code $out} or {@code $merge} stage + * @mongodb.driver.manual reference/operator/aggregation/out/ $out stage + * @mongodb.driver.manual reference/operator/aggregation/merge/ $merge stage + * @since 3.4 + */ + void toCollection(); + + /** + * Aggregates documents according to the specified aggregation pipeline. + *

    + *
  • + * If the aggregation pipeline ends with an {@link Aggregates#out(String, String) $out} or + * {@link Aggregates#merge(MongoNamespace, MergeOptions) $merge} stage, + * then {@linkplain MongoCollection#find() finds all} documents in the affected namespace and returns a {@link MongoCursor} + * over them. You may want to use {@link #toCollection()} instead.
  • + *
  • + * Otherwise, returns a {@link MongoCursor} producing no elements.
  • + *
+ */ + @Override + MongoCursor iterator(); + + /** + * Aggregates documents according to the specified aggregation pipeline. + *
    + *
  • + * If the aggregation pipeline ends with an {@link Aggregates#out(String, String) $out} or + * {@link Aggregates#merge(MongoNamespace, MergeOptions) $merge} stage, + * then {@linkplain MongoCollection#find() finds all} documents in the affected namespace and returns a {@link MongoCursor} + * over them. You may want to use {@link #toCollection()} instead.
  • + *
  • + * Otherwise, returns a {@link MongoCursor} producing no elements.
  • + *
+ */ + @Override + MongoCursor cursor(); + + /** + * Enables writing to temporary files. A null value indicates that it's unspecified. + * + * @param allowDiskUse true if writing to temporary files is enabled + * @return this + * @mongodb.driver.manual reference/command/aggregate/ Aggregation + */ + AggregateIterable allowDiskUse(@Nullable Boolean allowDiskUse); + + /** + * Sets the number of documents to return per batch. + * + * @param batchSize the batch size + * @return this + * @mongodb.driver.manual reference/method/cursor.batchSize/#cursor.batchSize Batch Size + */ + AggregateIterable batchSize(int batchSize); + + /** + * Sets the timeoutMode for the cursor. + * + *

+ * Requires the {@code timeout} to be set, either in the {@link com.mongodb.MongoClientSettings}, + * via {@link MongoDatabase} or via {@link MongoCollection} + *

+ *

+ * If the {@code timeout} is set then: + *

    + *
  • For non-tailable cursors, the default value of timeoutMode is {@link TimeoutMode#CURSOR_LIFETIME}
  • + *
  • For tailable cursors, the default value of timeoutMode is {@link TimeoutMode#ITERATION} and its an error + * to configure it as: {@link TimeoutMode#CURSOR_LIFETIME}
  • + *
+ *

+ * Will error if the timeoutMode is set to {@link TimeoutMode#ITERATION} and the pipeline contains either + * an {@code $out} or a {@code $merge} stage. + *

+ * @param timeoutMode the timeout mode + * @return this + * @since 5.2 + */ + @Alpha(Reason.CLIENT) + AggregateIterable timeoutMode(TimeoutMode timeoutMode); + + /** + * Sets the maximum execution time on the server for this operation. + * + * @param maxTime the max time + * @param timeUnit the time unit, which may not be null + * @return this + * @mongodb.driver.manual reference/method/cursor.maxTimeMS/#cursor.maxTimeMS Max Time + */ + AggregateIterable maxTime(long maxTime, TimeUnit timeUnit); + + /** + * The maximum amount of time for the server to wait on new documents to satisfy a {@code $changeStream} aggregation. + *

+ * A zero value will be ignored. + * + * @param maxAwaitTime the max await time + * @param timeUnit the time unit to return the result in + * @return the maximum await execution time in the given time unit + * @mongodb.server.release 3.6 + * @since 3.6 + */ + AggregateIterable maxAwaitTime(long maxAwaitTime, TimeUnit timeUnit); + + /** + * Sets the bypass document level validation flag. + * + *

Note: This only applies when an $out or $merge stage is specified

. + * + * @param bypassDocumentValidation If true, allows the write to opt-out of document level validation. + * @return this + * @since 3.2 + * @mongodb.driver.manual reference/command/aggregate/ Aggregation + * @mongodb.server.release 3.2 + */ + AggregateIterable bypassDocumentValidation(@Nullable Boolean bypassDocumentValidation); + + /** + * Sets the collation options + * + *

A null value represents the server default.

+ * @param collation the collation options to use + * @return this + * @since 3.4 + * @mongodb.server.release 3.4 + */ + AggregateIterable collation(@Nullable Collation collation); + + /** + * Sets the comment for this operation. A null value means no comment is set. + * + * @param comment the comment + * @return this + * @since 3.6 + * @mongodb.server.release 3.6 + */ + AggregateIterable comment(@Nullable String comment); + + /** + * Sets the comment for this operation. A null value means no comment is set. + * + *

The comment can be any valid BSON type for server versions 4.4 and above. + * Server versions between 3.6 and 4.2 only support string as comment, + * and providing a non-string type will result in a server-side error. + * + * @param comment the comment + * @return this + * @since 4.6 + * @mongodb.server.release 3.6 + */ + AggregateIterable comment(@Nullable BsonValue comment); + + /** + * Sets the hint for which index to use. A null value means no hint is set. + * + * @param hint the hint + * @return this + * @since 3.6 + * @mongodb.server.release 3.6 + */ + AggregateIterable hint(@Nullable Bson hint); + + /** + * Sets the hint to apply. + * + *

Note: If {@link AggregateIterable#hint(Bson)} is set that will be used instead of any hint string.

+ * + * @param hint the name of the index which should be used for the operation + * @return this + * @since 4.4 + */ + AggregateIterable hintString(@Nullable String hint); + + /** + * Add top-level variables to the aggregation. + *

+ * For MongoDB 5.0+, the aggregate command accepts a {@code let} option. This option is a document consisting of zero or more + * fields representing variables that are accessible to the aggregation pipeline. The key is the name of the variable and the value is + * a constant in the aggregate expression language. Each parameter name is then usable to access the value of the corresponding + * expression with the "$$" syntax within aggregate expression contexts which may require the use of $expr or a pipeline. + *

+ * + * @param variables the variables + * @return this + * @since 4.3 + * @mongodb.server.release 5.0 + */ + AggregateIterable let(@Nullable Bson variables); + + /** + * Explain the execution plan for this operation with the server's default verbosity level + * + * @return the execution plan + * @since 4.2 + * @mongodb.driver.manual reference/command/explain/ + * @mongodb.server.release 3.6 + */ + Document explain(); + + /** + * Explain the execution plan for this operation with the given verbosity level + * + * @param verbosity the verbosity of the explanation + * @return the execution plan + * @since 4.2 + * @mongodb.driver.manual reference/command/explain/ + * @mongodb.server.release 3.6 + */ + Document explain(ExplainVerbosity verbosity); + + /** + * Explain the execution plan for this operation with the server's default verbosity level + * + * @param the type of the document class + * @param explainResultClass the document class to decode into + * @return the execution plan + * @since 4.2 + * @mongodb.driver.manual reference/command/explain/ + * @mongodb.server.release 3.6 + */ + E explain(Class explainResultClass); + + /** + * Explain the execution plan for this operation with the given verbosity level + * + * @param the type of the document class + * @param explainResultClass the document class to decode into + * @param verbosity the verbosity of the explanation + * @return the execution plan + * @since 4.2 + * @mongodb.driver.manual reference/command/explain/ + * @mongodb.server.release 3.6 + */ + E explain(Class explainResultClass, ExplainVerbosity verbosity); +} diff --git a/driver-sync/src/main/com/mongodb/client/ChangeStreamIterable.java b/driver-sync/src/main/com/mongodb/client/ChangeStreamIterable.java new file mode 100644 index 00000000000..017ee9c2442 --- /dev/null +++ b/driver-sync/src/main/com/mongodb/client/ChangeStreamIterable.java @@ -0,0 +1,181 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client; + +import com.mongodb.client.model.Collation; +import com.mongodb.client.model.changestream.ChangeStreamDocument; +import com.mongodb.client.model.changestream.FullDocument; +import com.mongodb.client.model.changestream.FullDocumentBeforeChange; +import com.mongodb.lang.Nullable; +import org.bson.BsonDocument; +import org.bson.BsonTimestamp; +import org.bson.BsonValue; + +import java.util.concurrent.TimeUnit; + +/** + * Iterable for change streams. + * + *

Note: the {@link ChangeStreamDocument} class will not be applicable for all change stream outputs. If using custom pipelines that + * radically change the result, then the {@link #withDocumentClass(Class)} method can be used to provide an alternative document format.

+ * + * @param The type of the result. + * @mongodb.server.release 3.6 + * @since 3.6 + */ +public interface ChangeStreamIterable extends MongoIterable> { + + /** + * Returns a cursor used for iterating over elements of type {@code ChangeStreamDocument}. The cursor has + * a covariant return type to additionally provide a method to access the resume token in change stream batches. + * + * @return the change stream cursor + * @since 3.11 + */ + MongoChangeStreamCursor> cursor(); + + /** + * Sets the fullDocument value. + * + * @param fullDocument the fullDocument + * @return this + */ + ChangeStreamIterable fullDocument(FullDocument fullDocument); + + + /** + * Sets the fullDocumentBeforeChange value. + * + * @param fullDocumentBeforeChange the fullDocumentBeforeChange + * @return this + * @since 4.7 + * @mongodb.server.release 6.0 + */ + ChangeStreamIterable fullDocumentBeforeChange(FullDocumentBeforeChange fullDocumentBeforeChange); + + /** + * Sets the logical starting point for the new change stream. + * + * @param resumeToken the resume token + * @return this + */ + ChangeStreamIterable resumeAfter(BsonDocument resumeToken); + + /** + * Sets the number of documents to return per batch. + * + * @param batchSize the batch size + * @return this + * @mongodb.driver.manual reference/method/cursor.batchSize/#cursor.batchSize Batch Size + */ + ChangeStreamIterable batchSize(int batchSize); + + /** + * Sets the maximum await execution time on the server for this operation. + * + * @param maxAwaitTime the max await time. A zero value will be ignored, and indicates that the driver should respect the server's + * default value + * @param timeUnit the time unit, which may not be null + * @return this + */ + ChangeStreamIterable maxAwaitTime(long maxAwaitTime, TimeUnit timeUnit); + + /** + * Sets the collation options + * + *

A null value represents the server default.

+ * @param collation the collation options to use + * @return this + */ + ChangeStreamIterable collation(@Nullable Collation collation); + + /** + * Returns a {@code MongoIterable} containing the results of the change stream based on the document class provided. + * + * @param clazz the class to use for the raw result. + * @param the result type + * @return the new Mongo Iterable + */ + MongoIterable withDocumentClass(Class clazz); + + /** + * The change stream will only provide changes that occurred at or after the specified timestamp. + * + *

Any command run against the server will return an operation time that can be used here.

+ *

The default value is an operation time obtained from the server before the change stream was created.

+ * + * @param startAtOperationTime the start at operation time + * @since 3.8 + * @return this + * @mongodb.server.release 4.0 + * @mongodb.driver.manual reference/method/db.runCommand/ + */ + ChangeStreamIterable startAtOperationTime(BsonTimestamp startAtOperationTime); + + /** + * Similar to {@code resumeAfter}, this option takes a resume token and starts a + * new change stream returning the first notification after the token. + * + *

This will allow users to watch collections that have been dropped and recreated + * or newly renamed collections without missing any notifications.

+ * + *

Note: The server will report an error if both {@code startAfter} and {@code resumeAfter} are specified.

+ * + * @param startAfter the startAfter resumeToken + * @return this + * @since 3.11 + * @mongodb.server.release 4.2 + * @mongodb.driver.manual changeStreams/#change-stream-start-after + */ + ChangeStreamIterable startAfter(BsonDocument startAfter); + + /** + * Sets the comment for this operation. A null value means no comment is set. + * + * @param comment the comment + * @return this + * @since 4.6 + * @mongodb.server.release 3.6 + */ + ChangeStreamIterable comment(@Nullable String comment); + + /** + * Sets the comment for this operation. A null value means no comment is set. + * + *

The comment can be any valid BSON type for server versions 4.4 and above. + * Server versions between 3.6 and 4.2 only support string as comment, + * and providing a non-string type will result in a server-side error. + * + * @param comment the comment + * @return this + * @since 4.6 + * @mongodb.server.release 3.6 + */ + ChangeStreamIterable comment(@Nullable BsonValue comment); + + /** + * Sets whether to include expanded change stream events, which are: + * createIndexes, dropIndexes, modify, create, shardCollection, + * reshardCollection, refineCollectionShardKey. False by default. + * + * @param showExpandedEvents true to include expanded events + * @return this + * @since 4.7 + * @mongodb.server.release 6.0 + */ + ChangeStreamIterable showExpandedEvents(boolean showExpandedEvents); +} diff --git a/driver-sync/src/main/com/mongodb/client/ClientSession.java b/driver-sync/src/main/com/mongodb/client/ClientSession.java new file mode 100644 index 00000000000..5d994b863e8 --- /dev/null +++ b/driver-sync/src/main/com/mongodb/client/ClientSession.java @@ -0,0 +1,128 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client; + +import com.mongodb.ServerAddress; +import com.mongodb.TransactionOptions; +import com.mongodb.lang.Nullable; + +/** + * A client session that supports transactions. + * + * @since 3.8 + */ +public interface ClientSession extends com.mongodb.session.ClientSession { + /** + * Returns the server address of the pinned mongos on this session. + * + * @return the server address of the pinned mongos. + * @mongodb.server.release 4.2 + * @since 3.11 + */ + @Nullable + ServerAddress getPinnedServerAddress(); + + /** + * Returns true if there is an active transaction on this session, and false otherwise + * + * @return true if there is an active transaction on this session + * @mongodb.server.release 4.0 + */ + boolean hasActiveTransaction(); + + /** + * Notify the client session that a message has been sent. + *

+ * For internal use only + *

+ * + * @return true if this is the first message sent, false otherwise + */ + boolean notifyMessageSent(); + + /** + * Notify the client session that command execution is being initiated. This should be called before server selection occurs. + *

+ * For internal use only + *

+ * + * @param operation the operation + */ + void notifyOperationInitiated(Object operation); + + /** + * Gets the transaction options. Only call this method of the session has an active transaction + * + * @return the transaction options + */ + TransactionOptions getTransactionOptions(); + + /** + * Start a transaction in the context of this session with default transaction options. A transaction can not be started if there is + * already an active transaction on this session. + * + * @mongodb.server.release 4.0 + */ + void startTransaction(); + + /** + * Start a transaction in the context of this session with the given transaction options. A transaction can not be started if there is + * already an active transaction on this session. + * + * @param transactionOptions the options to apply to the transaction + * + * @mongodb.server.release 4.0 + */ + void startTransaction(TransactionOptions transactionOptions); + + /** + * Commit a transaction in the context of this session. A transaction can only be commmited if one has first been started. + * + * @mongodb.server.release 4.0 + */ + void commitTransaction(); + + /** + * Abort a transaction in the context of this session. A transaction can only be aborted if one has first been started. + * + * @mongodb.server.release 4.0 + */ + void abortTransaction(); + + /** + * Execute the given function within a transaction. + * + * @param the return type of the transaction body + * @param transactionBody the body of the transaction + * @return the return value of the transaction body + * @mongodb.server.release 4.0 + * @since 3.11 + */ + T withTransaction(TransactionBody transactionBody); + + /** + * Execute the given function within a transaction. + * + * @param the return type of the transaction body + * @param transactionBody the body of the transaction + * @param options the transaction options + * @return the return value of the transaction body + * @mongodb.server.release 4.0 + * @since 3.11 + */ + T withTransaction(TransactionBody transactionBody, TransactionOptions options); +} diff --git a/driver-sync/src/main/com/mongodb/client/DistinctIterable.java b/driver-sync/src/main/com/mongodb/client/DistinctIterable.java new file mode 100644 index 00000000000..9488c7fb49e --- /dev/null +++ b/driver-sync/src/main/com/mongodb/client/DistinctIterable.java @@ -0,0 +1,129 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client; + +import com.mongodb.annotations.Alpha; +import com.mongodb.annotations.Reason; +import com.mongodb.client.cursor.TimeoutMode; +import com.mongodb.client.model.Collation; +import com.mongodb.lang.Nullable; +import org.bson.BsonValue; +import org.bson.conversions.Bson; + +import java.util.concurrent.TimeUnit; + +/** + * Iterable interface for distinct. + * + * @param The type of the result. + * @since 3.0 + */ +public interface DistinctIterable extends MongoIterable { + + /** + * Sets the query filter to apply to the query. + * + * @param filter the filter, which may be null. + * @return this + * @mongodb.driver.manual reference/method/db.collection.find/ Filter + */ + DistinctIterable filter(@Nullable Bson filter); + + /** + * Sets the maximum execution time on the server for this operation. + * + * @param maxTime the max time + * @param timeUnit the time unit, which may not be null + * @return this + */ + DistinctIterable maxTime(long maxTime, TimeUnit timeUnit); + + /** + * Sets the number of documents to return per batch. + * + * @param batchSize the batch size + * @return this + * @mongodb.driver.manual reference/method/cursor.batchSize/#cursor.batchSize Batch Size + */ + DistinctIterable batchSize(int batchSize); + + /** + * Sets the collation options + * + *

A null value represents the server default.

+ * @param collation the collation options to use + * @return this + * @since 3.4 + * @mongodb.server.release 3.4 + */ + DistinctIterable collation(@Nullable Collation collation); + + /** + * Sets the comment for this operation. A null value means no comment is set. + * + * @param comment the comment + * @return this + * @since 4.6 + * @mongodb.server.release 4.4 + */ + DistinctIterable comment(@Nullable String comment); + + /** + * Sets the comment for this operation. A null value means no comment is set. + * + * @param comment the comment + * @return this + * @since 4.6 + * @mongodb.server.release 4.4 + */ + DistinctIterable comment(@Nullable BsonValue comment); + + /** + * Sets the hint for which index to use. A null value means no hint is set. + * + * @param hint the hint + * @return this + * @since 5.3 + */ + DistinctIterable hint(@Nullable Bson hint); + + /** + * Sets the hint to apply. + * + *

Note: If {@link DistinctIterable#hint(Bson)} is set that will be used instead of any hint string.

+ * + * @param hint the name of the index which should be used for the operation + * @return this + * @since 5.3 + */ + DistinctIterable hintString(@Nullable String hint); + + /** + * Sets the timeoutMode for the cursor. + * + *

+ * Requires the {@code timeout} to be set, either in the {@link com.mongodb.MongoClientSettings}, + * via {@link MongoDatabase} or via {@link MongoCollection} + *

+ * + * @param timeoutMode the timeout mode + * @return this + * @since 5.2 + */ + @Alpha(Reason.CLIENT) + DistinctIterable timeoutMode(TimeoutMode timeoutMode); +} diff --git a/driver-sync/src/main/com/mongodb/client/FindIterable.java b/driver-sync/src/main/com/mongodb/client/FindIterable.java new file mode 100644 index 00000000000..d610ed73ffa --- /dev/null +++ b/driver-sync/src/main/com/mongodb/client/FindIterable.java @@ -0,0 +1,334 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client; + +import com.mongodb.CursorType; +import com.mongodb.ExplainVerbosity; +import com.mongodb.annotations.Alpha; +import com.mongodb.annotations.Reason; +import com.mongodb.client.cursor.TimeoutMode; +import com.mongodb.client.model.Collation; +import com.mongodb.client.model.Projections; +import com.mongodb.lang.Nullable; +import org.bson.BsonValue; +import org.bson.Document; +import org.bson.conversions.Bson; + +import java.util.concurrent.TimeUnit; + +/** + * Iterable for find. + * + * @param The type of the result. + * @since 3.0 + */ +public interface FindIterable extends MongoIterable { + + /** + * Sets the query filter to apply to the query. + * + * @param filter the filter, which may be null. + * @return this + * @mongodb.driver.manual reference/method/db.collection.find/ Filter + */ + FindIterable filter(@Nullable Bson filter); + + /** + * Sets the limit to apply. + * + * @param limit the limit, which may be 0 + * @return this + * @mongodb.driver.manual reference/method/cursor.limit/#cursor.limit Limit + */ + FindIterable limit(int limit); + /** + * Sets the number of documents to skip. + * + * @param skip the number of documents to skip + * @return this + * @mongodb.driver.manual reference/method/cursor.skip/#cursor.skip Skip + */ + FindIterable skip(int skip); + + /** + * Sets the maximum execution time on the server for this operation. + * + * @param maxTime the max time + * @param timeUnit the time unit, which may not be null + * @return this + * @mongodb.driver.manual reference/method/cursor.maxTimeMS/#cursor.maxTimeMS Max Time + */ + FindIterable maxTime(long maxTime, TimeUnit timeUnit); + + /** + * The maximum amount of time for the server to wait on new documents to satisfy a tailable cursor + * query. This only applies to a TAILABLE_AWAIT cursor. When the cursor is not a TAILABLE_AWAIT cursor, + * this option is ignored. + *

+ * On servers >= 3.2, this option will be specified on the getMore command as "maxTimeMS". The default + * is no value: no "maxTimeMS" is sent to the server with the getMore command. + *

+ * On servers < 3.2, this option is ignored, and indicates that the driver should respect the server's default value + *

+ * A zero value will be ignored. + * + * @param maxAwaitTime the max await time + * @param timeUnit the time unit to return the result in + * @return the maximum await execution time in the given time unit + * @mongodb.driver.manual reference/method/cursor.maxTimeMS/#cursor.maxTimeMS Max Time + * @since 3.2 + */ + FindIterable maxAwaitTime(long maxAwaitTime, TimeUnit timeUnit); + + /** + * Sets a document describing the fields to return for all matching documents. + * + * @param projection the project document, which may be null. + * @return this + * @mongodb.driver.manual reference/method/db.collection.find/ Projection + * @see Projections + */ + FindIterable projection(@Nullable Bson projection); + + /** + * Sets the sort criteria to apply to the query. + * + * @param sort the sort criteria, which may be null. + * @return this + * @mongodb.driver.manual reference/method/cursor.sort/ Sort + */ + FindIterable sort(@Nullable Bson sort); + + /** + * The server normally times out idle cursors after an inactivity period (10 minutes) + * to prevent excess memory use. Set this option to prevent that. + * + * @param noCursorTimeout true if cursor timeout is disabled + * @return this + */ + FindIterable noCursorTimeout(boolean noCursorTimeout); + + /** + * Get partial results from a sharded cluster if one or more shards are unreachable (instead of throwing an error). + * + * @param partial if partial results for sharded clusters is enabled + * @return this + */ + FindIterable partial(boolean partial); + + /** + * Sets the cursor type. + * + * @param cursorType the cursor type + * @return this + */ + FindIterable cursorType(CursorType cursorType); + + /** + * Sets the number of documents to return per batch. + * + * @param batchSize the batch size + * @return this + * @mongodb.driver.manual reference/method/cursor.batchSize/#cursor.batchSize Batch Size + */ + @Override + FindIterable batchSize(int batchSize); + + /** + * Sets the collation options + * + *

A null value represents the server default.

+ * @param collation the collation options to use + * @return this + * @since 3.4 + * @mongodb.server.release 3.4 + */ + FindIterable collation(@Nullable Collation collation); + + /** + * Sets the comment for this operation. A null value means no comment is set. + * + * @param comment the comment + * @return this + * @since 3.5 + */ + FindIterable comment(@Nullable String comment); + + /** + * Sets the comment for this operation. A null value means no comment is set. + * + *

The comment can be any valid BSON type for server versions 4.4 and above. + * Server versions between 3.6 and 4.2 only support string as comment, + * and providing a non-string type will result in a server-side error. + * + * @param comment the comment + * @return this + * @since 4.6 + * @mongodb.server.release 3.6 + */ + FindIterable comment(@Nullable BsonValue comment); + + /** + * Sets the hint for which index to use. A null value means no hint is set. + * + * @param hint the hint + * @return this + * @since 3.5 + */ + FindIterable hint(@Nullable Bson hint); + + /** + * Sets the hint to apply. + * + *

Note: If {@link FindIterable#hint(Bson)} is set that will be used instead of any hint string.

+ * + * @param hint the name of the index which should be used for the operation + * @return this + * @since 3.12 + */ + FindIterable hintString(@Nullable String hint); + + /** + * Add top-level variables to the operation. A null value means no variables are set. + * + *

Allows for improved command readability by separating the variables from the query text.

+ * + * @param variables for find operation or null + * @return this + * @mongodb.driver.manual reference/command/find/ + * @mongodb.server.release 5.0 + * @since 4.6 + */ + FindIterable let(@Nullable Bson variables); + + /** + * Sets the exclusive upper bound for a specific index. A null value means no max is set. + * + * @param max the max + * @return this + * @since 3.5 + */ + FindIterable max(@Nullable Bson max); + + /** + * Sets the minimum inclusive lower bound for a specific index. A null value means no max is set. + * + * @param min the min + * @return this + * @since 3.5 + */ + FindIterable min(@Nullable Bson min); + + /** + * Sets the returnKey. If true the find operation will return only the index keys in the resulting documents. + * + * @param returnKey the returnKey + * @return this + * @since 3.5 + */ + FindIterable returnKey(boolean returnKey); + + /** + * Sets the showRecordId. Set to true to add a field {@code $recordId} to the returned documents. + * + * @param showRecordId the showRecordId + * @return this + * @since 3.5 + */ + FindIterable showRecordId(boolean showRecordId); + + /** + * Enables writing to temporary files on the server. When set to true, the server + * can write temporary data to disk while executing the find operation. + * + *

This option is sent only if the caller explicitly sets it to true.

+ * + * @param allowDiskUse the allowDiskUse + * @return this + * @since 4.1 + * @mongodb.server.release 4.4 + */ + FindIterable allowDiskUse(@Nullable Boolean allowDiskUse); + + /** + * Sets the timeoutMode for the cursor. + * + *

+ * Requires the {@code timeout} to be set, either in the {@link com.mongodb.MongoClientSettings}, + * via {@link MongoDatabase} or via {@link MongoCollection} + *

+ *

+ * If the {@code timeout} is set then: + *

    + *
  • For non-tailable cursors, the default value of timeoutMode is {@link TimeoutMode#CURSOR_LIFETIME}
  • + *
  • For tailable cursors, the default value of timeoutMode is {@link TimeoutMode#ITERATION} and its an error + * to configure it as: {@link TimeoutMode#CURSOR_LIFETIME}
  • + *
+ * + * @param timeoutMode the timeout mode + * @return this + * @since 5.2 + */ + @Alpha(Reason.CLIENT) + FindIterable timeoutMode(TimeoutMode timeoutMode); + + /** + * Explain the execution plan for this operation with the server's default verbosity level + * + * @return the execution plan + * @since 4.2 + * @mongodb.driver.manual reference/command/explain/ + * @mongodb.server.release 3.2 + */ + Document explain(); + + /** + * Explain the execution plan for this operation with the given verbosity level + * + * @param verbosity the verbosity of the explanation + * @return the execution plan + * @since 4.2 + * @mongodb.driver.manual reference/command/explain/ + * @mongodb.server.release 3.2 + */ + Document explain(ExplainVerbosity verbosity); + + /** + * Explain the execution plan for this operation with the server's default verbosity level + * + * @param the type of the document class + * @param explainResultClass the document class to decode into + * @return the execution plan + * @since 4.2 + * @mongodb.driver.manual reference/command/explain/ + * @mongodb.server.release 3.2 + */ + E explain(Class explainResultClass); + + /** + * Explain the execution plan for this operation with the given verbosity level + * + * @param the type of the document class + * @param explainResultClass the document class to decode into + * @param verbosity the verbosity of the explanation + * @return the execution plan + * @since 4.2 + * @mongodb.driver.manual reference/command/explain/ + * @mongodb.server.release 3.2 + */ + E explain(Class explainResultClass, ExplainVerbosity verbosity); +} diff --git a/driver-sync/src/main/com/mongodb/client/ListCollectionNamesIterable.java b/driver-sync/src/main/com/mongodb/client/ListCollectionNamesIterable.java new file mode 100644 index 00000000000..94cfd7c52e3 --- /dev/null +++ b/driver-sync/src/main/com/mongodb/client/ListCollectionNamesIterable.java @@ -0,0 +1,90 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client; + +import com.mongodb.lang.Nullable; +import org.bson.BsonValue; +import org.bson.conversions.Bson; + +import java.util.concurrent.TimeUnit; + +/** + * Iterable for listing collection names. + * + * @since 5.0 + * @mongodb.driver.manual reference/command/listCollections/ listCollections + */ +public interface ListCollectionNamesIterable extends MongoIterable { + /** + * Sets the query filter to apply to the query. + * + * @param filter the filter, which may be null. + * @return this + * @mongodb.driver.manual reference/method/db.collection.find/ Filter + */ + ListCollectionNamesIterable filter(@Nullable Bson filter); + + /** + * Sets the maximum execution time on the server for this operation. + * + * @param maxTime the max time + * @param timeUnit the time unit, which may not be null + * @return this + * @mongodb.driver.manual reference/operator/meta/maxTimeMS/ Max Time + */ + ListCollectionNamesIterable maxTime(long maxTime, TimeUnit timeUnit); + + /** + * Sets the number of documents to return per batch. + * + * @param batchSize the batch size + * @return this + * @mongodb.driver.manual reference/method/cursor.batchSize/#cursor.batchSize Batch Size + */ + @Override + ListCollectionNamesIterable batchSize(int batchSize); + + /** + * Sets the comment for this operation. A null value means no comment is set. + * + * @param comment the comment + * @return this + * @mongodb.server.release 4.4 + */ + ListCollectionNamesIterable comment(@Nullable String comment); + + /** + * Sets the comment for this operation. A null value means no comment is set. + * + * @param comment the comment + * @return this + * @mongodb.server.release 4.4 + */ + ListCollectionNamesIterable comment(@Nullable BsonValue comment); + + /** + * Sets the {@code authorizedCollections} field of the {@code listCollections} command. + * + * @param authorizedCollections If {@code true}, allows executing the {@code listCollections} command, + * which has the {@code nameOnly} field set to {@code true}, without having the + * + * {@code listCollections} privilege on the database resource. + * @return {@code this}. + * @mongodb.server.release 4.0 + */ + ListCollectionNamesIterable authorizedCollections(boolean authorizedCollections); +} diff --git a/driver-sync/src/main/com/mongodb/client/ListCollectionsIterable.java b/driver-sync/src/main/com/mongodb/client/ListCollectionsIterable.java new file mode 100644 index 00000000000..421fbcaa674 --- /dev/null +++ b/driver-sync/src/main/com/mongodb/client/ListCollectionsIterable.java @@ -0,0 +1,99 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client; + +import com.mongodb.annotations.Alpha; +import com.mongodb.annotations.Reason; +import com.mongodb.client.cursor.TimeoutMode; +import com.mongodb.lang.Nullable; +import org.bson.BsonValue; +import org.bson.conversions.Bson; + +import java.util.concurrent.TimeUnit; + +/** + * Iterable for ListCollections. + * + * @param The type of the result. + * @since 3.0 + * @mongodb.driver.manual reference/command/listCollections/ listCollections + */ +public interface ListCollectionsIterable extends MongoIterable { + + /** + * Sets the query filter to apply to the query. + * + * @param filter the filter, which may be null. + * @return this + * @mongodb.driver.manual reference/method/db.collection.find/ Filter + */ + ListCollectionsIterable filter(@Nullable Bson filter); + + /** + * Sets the maximum execution time on the server for this operation. + * + * @param maxTime the max time + * @param timeUnit the time unit, which may not be null + * @return this + * @mongodb.driver.manual reference/operator/meta/maxTimeMS/ Max Time + */ + ListCollectionsIterable maxTime(long maxTime, TimeUnit timeUnit); + + /** + * Sets the number of documents to return per batch. + * + * @param batchSize the batch size + * @return this + * @mongodb.driver.manual reference/method/cursor.batchSize/#cursor.batchSize Batch Size + */ + @Override + ListCollectionsIterable batchSize(int batchSize); + + /** + * Sets the comment for this operation. A null value means no comment is set. + * + * @param comment the comment + * @return this + * @since 4.6 + * @mongodb.server.release 4.4 + */ + ListCollectionsIterable comment(@Nullable String comment); + + /** + * Sets the comment for this operation. A null value means no comment is set. + * + * @param comment the comment + * @return this + * @since 4.6 + * @mongodb.server.release 4.4 + */ + ListCollectionsIterable comment(@Nullable BsonValue comment); + + /** + * Sets the timeoutMode for the cursor. + * + *

+ * Requires the {@code timeout} to be set, either in the {@link com.mongodb.MongoClientSettings}, + * via {@link MongoDatabase} or via {@link MongoCollection} + *

+ * @param timeoutMode the timeout mode + * @return this + * @since 5.2 + */ + @Alpha(Reason.CLIENT) + ListCollectionsIterable timeoutMode(TimeoutMode timeoutMode); +} diff --git a/driver-sync/src/main/com/mongodb/client/ListDatabasesIterable.java b/driver-sync/src/main/com/mongodb/client/ListDatabasesIterable.java new file mode 100644 index 00000000000..75625e487a0 --- /dev/null +++ b/driver-sync/src/main/com/mongodb/client/ListDatabasesIterable.java @@ -0,0 +1,121 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client; + +import com.mongodb.annotations.Alpha; +import com.mongodb.annotations.Reason; +import com.mongodb.client.cursor.TimeoutMode; +import com.mongodb.lang.Nullable; +import org.bson.BsonValue; +import org.bson.conversions.Bson; + +import java.util.concurrent.TimeUnit; + +/** + * Iterable for ListDatabases. + * + * @param The type of the result. + * @since 3.0 + */ +public interface ListDatabasesIterable extends MongoIterable { + + /** + * Sets the maximum execution time on the server for this operation. + * + * @param maxTime the max time + * @param timeUnit the time unit, which may not be null + * @return this + * @mongodb.driver.manual reference/operator/meta/maxTimeMS/ Max Time + */ + ListDatabasesIterable maxTime(long maxTime, TimeUnit timeUnit); + + /** + * Sets the number of documents to return per batch. + * + * @param batchSize the batch size + * @return this + * @mongodb.driver.manual reference/method/cursor.batchSize/#cursor.batchSize Batch Size + */ + @Override + ListDatabasesIterable batchSize(int batchSize); + + /** + * Sets the query filter to apply to the returned database names. + * + * @param filter the filter, which may be null. + * @return this + * @since 3.6 + * @mongodb.server.release 3.4.2 + */ + ListDatabasesIterable filter(@Nullable Bson filter); + + /** + * Sets the nameOnly flag that indicates whether the command should return just the database names or return the database names and + * size information. + * + * @param nameOnly the nameOnly flag, which may be null + * @return this + * @since 3.6 + * @mongodb.server.release 3.4.3 + */ + ListDatabasesIterable nameOnly(@Nullable Boolean nameOnly); + + /** + * Sets the authorizedDatabasesOnly flag that indicates whether the command should return just the databases which the user + * is authorized to see. + * + * @param authorizedDatabasesOnly the authorizedDatabasesOnly flag, which may be null + * @return this + * @since 4.1 + * @mongodb.server.release 4.0 + */ + ListDatabasesIterable authorizedDatabasesOnly(@Nullable Boolean authorizedDatabasesOnly); + + /** + * Sets the comment for this operation. A null value means no comment is set. + * + * @param comment the comment + * @return this + * @since 4.6 + * @mongodb.server.release 4.4 + */ + ListDatabasesIterable comment(@Nullable String comment); + + /** + * Sets the comment for this operation. A null value means no comment is set. + * + * @param comment the comment + * @return this + * @since 4.6 + * @mongodb.server.release 4.4 + */ + ListDatabasesIterable comment(@Nullable BsonValue comment); + + /** + * Sets the timeoutMode for the cursor. + * + *

+ * Requires the {@code timeout} to be set, either in the {@link com.mongodb.MongoClientSettings}, + * via {@link MongoDatabase} or via {@link MongoCollection} + *

+ * @param timeoutMode the timeout mode + * @return this + * @since 5.2 + */ + @Alpha(Reason.CLIENT) + ListDatabasesIterable timeoutMode(TimeoutMode timeoutMode); +} diff --git a/driver-sync/src/main/com/mongodb/client/ListIndexesIterable.java b/driver-sync/src/main/com/mongodb/client/ListIndexesIterable.java new file mode 100644 index 00000000000..160cb59ebd9 --- /dev/null +++ b/driver-sync/src/main/com/mongodb/client/ListIndexesIterable.java @@ -0,0 +1,88 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client; + +import com.mongodb.annotations.Alpha; +import com.mongodb.annotations.Reason; +import com.mongodb.client.cursor.TimeoutMode; +import com.mongodb.lang.Nullable; +import org.bson.BsonValue; + +import java.util.concurrent.TimeUnit; + +/** + * Iterable for ListIndexes. + * + * @param The type of the result. + * @since 3.0 + */ +public interface ListIndexesIterable extends MongoIterable { + + /** + * Sets the maximum execution time on the server for this operation. + * + * @param maxTime the max time + * @param timeUnit the time unit, which may not be null + * @return this + * @mongodb.driver.manual reference/operator/meta/maxTimeMS/ Max Time + */ + ListIndexesIterable maxTime(long maxTime, TimeUnit timeUnit); + + /** + * Sets the number of documents to return per batch. + * + * @param batchSize the batch size + * @return this + * @mongodb.driver.manual reference/method/cursor.batchSize/#cursor.batchSize Batch Size + */ + @Override + ListIndexesIterable batchSize(int batchSize); + + /** + * Sets the comment for this operation. A null value means no comment is set. + * + * @param comment the comment + * @return this + * @since 4.6 + * @mongodb.server.release 4.4 + */ + ListIndexesIterable comment(@Nullable String comment); + + /** + * Sets the comment for this operation. A null value means no comment is set. + * + * @param comment the comment + * @return this + * @since 4.6 + * @mongodb.server.release 4.4 + */ + ListIndexesIterable comment(@Nullable BsonValue comment); + + /** + * Sets the timeoutMode for the cursor. + * + *

+ * Requires the {@code timeout} to be set, either in the {@link com.mongodb.MongoClientSettings}, + * via {@link MongoDatabase} or via {@link MongoCollection} + *

+ * @param timeoutMode the timeout mode + * @return this + * @since 5.2 + */ + @Alpha(Reason.CLIENT) + ListIndexesIterable timeoutMode(TimeoutMode timeoutMode); +} diff --git a/driver-sync/src/main/com/mongodb/client/ListSearchIndexesIterable.java b/driver-sync/src/main/com/mongodb/client/ListSearchIndexesIterable.java new file mode 100644 index 00000000000..a5579bacfd5 --- /dev/null +++ b/driver-sync/src/main/com/mongodb/client/ListSearchIndexesIterable.java @@ -0,0 +1,155 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client; + +import com.mongodb.ExplainVerbosity; +import com.mongodb.annotations.Alpha; +import com.mongodb.annotations.Evolving; +import com.mongodb.annotations.Reason; +import com.mongodb.client.cursor.TimeoutMode; +import com.mongodb.client.model.Collation; +import com.mongodb.lang.Nullable; +import org.bson.BsonValue; +import org.bson.Document; + +import java.util.concurrent.TimeUnit; +/** + * Iterable for listing Atlas Search indexes. + * This interface contains aggregate options and that of applied to {@code $listSearchIndexes} operation. + * + * @param The type of the result. + * @mongodb.driver.manual reference/operator/aggregation/listSearchIndexes ListSearchIndexes + * @since 4.11 + * @mongodb.server.release 6.0 + */ +@Evolving +public interface ListSearchIndexesIterable extends MongoIterable { + + /** + * Sets the index name for this operation. + * + * @param indexName the index name. + * @return this. + */ + ListSearchIndexesIterable name(String indexName); + + /** + * Enables writing to temporary files. A null value indicates that it's unspecified. + * + * @param allowDiskUse true if writing to temporary files is enabled. + * @return this. + * @mongodb.driver.manual reference/command/aggregate/ Aggregation + */ + ListSearchIndexesIterable allowDiskUse(@Nullable Boolean allowDiskUse); + + /** + * Sets the number of documents to return per batch. + * + * @param batchSize the batch size. + * @return this. + * @mongodb.driver.manual reference/method/cursor.batchSize/#cursor.batchSize Batch Size + */ + ListSearchIndexesIterable batchSize(int batchSize); + + /** + * Sets the maximum execution time on the server for this operation. + * + * @param maxTime the max time. + * @param timeUnit the time unit, which may not be null. + * @return this. + * @mongodb.driver.manual reference/method/cursor.maxTimeMS/#cursor.maxTimeMS Max Time + */ + ListSearchIndexesIterable maxTime(long maxTime, TimeUnit timeUnit); + + /** + * Sets the collation options + * + *

A null value represents the server default.

+ * + * @param collation the collation options to use + * @return this + */ + ListSearchIndexesIterable collation(@Nullable Collation collation); + + /** + * Sets the comment for this operation. A null value means no comment is set. + * + * @param comment the comment. + * @return this + */ + ListSearchIndexesIterable comment(@Nullable String comment); + + /** + * Sets the comment for this operation. A null value means no comment is set. + * + * @param comment the comment. + * @return this. + */ + ListSearchIndexesIterable comment(@Nullable BsonValue comment); + + /** + * Sets the timeoutMode for the cursor. + * + *

+ * Requires the {@code timeout} to be set, either in the {@link com.mongodb.MongoClientSettings}, + * via {@link MongoDatabase} or via {@link MongoCollection} + *

+ * @param timeoutMode the timeout mode + * @return this + * @since 5.2 + */ + @Alpha(Reason.CLIENT) + ListSearchIndexesIterable timeoutMode(TimeoutMode timeoutMode); + + /** + * Explain the execution plan for this operation with the server's default verbosity level. + * + * @return the execution plan. + * @mongodb.driver.manual reference/command/explain/ + */ + Document explain(); + + /** + * Explain the execution plan for this operation with the given verbosity level. + * + * @param verbosity the verbosity of the explanation. + * @return the execution plan. + * @mongodb.driver.manual reference/command/explain/ + */ + Document explain(ExplainVerbosity verbosity); + + /** + * Explain the execution plan for this operation with the server's default verbosity level. + * + * @param the type of the document class. + * @param explainResultClass the document class to decode into. + * @return the execution plan. + * @mongodb.driver.manual reference/command/explain/ + */ + E explain(Class explainResultClass); + + /** + * Explain the execution plan for this operation with the given verbosity level. + * + * @param the type of the document class. + * @param explainResultClass the document class to decode into. + * @param verbosity the verbosity of the explanation. + * @return the execution plan. + * @mongodb.driver.manual reference/command/explain/ + */ + E explain(Class explainResultClass, ExplainVerbosity verbosity); +} diff --git a/driver-sync/src/main/com/mongodb/client/MapReduceIterable.java b/driver-sync/src/main/com/mongodb/client/MapReduceIterable.java new file mode 100644 index 00000000000..13d029155f9 --- /dev/null +++ b/driver-sync/src/main/com/mongodb/client/MapReduceIterable.java @@ -0,0 +1,227 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client; + +import com.mongodb.annotations.Alpha; +import com.mongodb.annotations.Reason; +import com.mongodb.client.cursor.TimeoutMode; +import com.mongodb.client.model.Collation; +import com.mongodb.lang.Nullable; +import org.bson.conversions.Bson; + +import java.util.concurrent.TimeUnit; + +/** + * Iterable for map-reduce. + *

+ * By default, the {@code MapReduceIterable} produces the results inline. You can write map-reduce output to a collection by using the + * {@link #collectionName(String)} and {@link #toCollection()} methods.

+ * + * @param The type of the result. + * @since 3.0 + * @deprecated Superseded by aggregate + */ +@Deprecated +public interface MapReduceIterable extends MongoIterable { + + /** + * Aggregates documents to a collection according to the specified map-reduce function with the given options, which must not produce + * results inline. This method is the preferred alternative to {@link #iterator()}, {@link #cursor()}, + * because this method does what is explicitly requested without executing implicit operations. + * + * @throws IllegalStateException if a {@linkplain #collectionName(String) collection name} to write the results to has not been specified + * @see #collectionName(String) + * @since 3.4 + */ + void toCollection(); + + /** + * Aggregates documents according to the specified map-reduce function with the given options. + *
    + *
  • + * If the aggregation produces results inline, then {@linkplain MongoCollection#find() finds all} documents in the + * affected namespace and returns a {@link MongoCursor} over them. You may want to use {@link #toCollection()} instead.
  • + *
  • + * Otherwise, returns a {@link MongoCursor} producing no elements.
  • + *
+ */ + @Override + MongoCursor iterator(); + + /** + * Aggregates documents according to the specified map-reduce function with the given options. + *
    + *
  • + * If the aggregation produces results inline, then {@linkplain MongoCollection#find() finds all} documents in the + * affected namespace and returns a {@link MongoCursor} over them. You may want to use {@link #toCollection()} instead.
  • + *
  • + * Otherwise, returns a {@link MongoCursor} producing no elements.
  • + *
+ */ + @Override + MongoCursor cursor(); + + /** + * Sets the collectionName for the output of the MapReduce + * + *

The default action is replace the collection if it exists, to change this use {@link #action}.

+ * + * @param collectionName the name of the collection that you want the map-reduce operation to write its output. + * @return this + * @see #toCollection() + */ + MapReduceIterable collectionName(String collectionName); + + /** + * Sets the JavaScript function that follows the reduce method and modifies the output. + * + * @param finalizeFunction the JavaScript function that follows the reduce method and modifies the output. + * @return this + * @mongodb.driver.manual reference/command/mapReduce/#mapreduce-finalize-cmd Requirements for the finalize Function + */ + MapReduceIterable finalizeFunction(@Nullable String finalizeFunction); + + /** + * Sets the global variables that are accessible in the map, reduce and finalize functions. + * + * @param scope the global variables that are accessible in the map, reduce and finalize functions. + * @return this + * @mongodb.driver.manual reference/command/mapReduce mapReduce + */ + MapReduceIterable scope(@Nullable Bson scope); + + /** + * Sets the sort criteria to apply to the query. + * + * @param sort the sort criteria, which may be null. + * @return this + * @mongodb.driver.manual reference/method/cursor.sort/ Sort + */ + MapReduceIterable sort(@Nullable Bson sort); + + /** + * Sets the query filter to apply to the query. + * + * @param filter the filter to apply to the query. + * @return this + * @mongodb.driver.manual reference/method/db.collection.find/ Filter + */ + MapReduceIterable filter(@Nullable Bson filter); + + /** + * Sets the limit to apply. + * + * @param limit the limit, which may be null + * @return this + * @mongodb.driver.manual reference/method/cursor.limit/#cursor.limit Limit + */ + MapReduceIterable limit(int limit); + + /** + * Sets the flag that specifies whether to convert intermediate data into BSON format between the execution of the map and reduce + * functions. Defaults to false. + * + * @param jsMode the flag that specifies whether to convert intermediate data into BSON format between the execution of the map and + * reduce functions + * @return jsMode + * @mongodb.driver.manual reference/command/mapReduce mapReduce + */ + MapReduceIterable jsMode(boolean jsMode); + + /** + * Sets whether to include the timing information in the result information. + * + * @param verbose whether to include the timing information in the result information. + * @return this + */ + MapReduceIterable verbose(boolean verbose); + + /** + * Sets the maximum execution time on the server for this operation. + * + * @param maxTime the max time + * @param timeUnit the time unit, which may not be null + * @return this + * @mongodb.driver.manual reference/method/cursor.maxTimeMS/#cursor.maxTimeMS Max Time + */ + MapReduceIterable maxTime(long maxTime, TimeUnit timeUnit); + + /** + * Specify the {@code MapReduceAction} to be used when writing to a collection. + * + * @param action an {@link com.mongodb.client.model.MapReduceAction} to perform on the collection + * @return this + */ + MapReduceIterable action(com.mongodb.client.model.MapReduceAction action); + + /** + * Sets the name of the database to output into. + * + * @param databaseName the name of the database to output into. + * @return this + * @mongodb.driver.manual reference/command/mapReduce/#output-to-a-collection-with-an-action output with an action + */ + MapReduceIterable databaseName(@Nullable String databaseName); + + /** + * Sets the number of documents to return per batch. + * + * @param batchSize the batch size + * @return this + * @mongodb.driver.manual reference/method/cursor.batchSize/#cursor.batchSize Batch Size + */ + @Override + MapReduceIterable batchSize(int batchSize); + + /** + * Sets the bypass document level validation flag. + * + *

Note: This only applies when an $out stage is specified

. + * + * @param bypassDocumentValidation If true, allows the write to opt-out of document level validation. + * @return this + * @since 3.2 + * @mongodb.driver.manual reference/command/mapReduce mapReduce + * @mongodb.server.release 3.2 + */ + MapReduceIterable bypassDocumentValidation(@Nullable Boolean bypassDocumentValidation); + + /** + * Sets the collation options + * + *

A null value represents the server default.

+ * @param collation the collation options to use + * @return this + * @since 3.4 + * @mongodb.server.release 3.4 + */ + MapReduceIterable collation(@Nullable Collation collation); + + /** + * Sets the timeoutMode for the cursor. + * + *

+ * Requires the {@code timeout} to be set, either in the {@link com.mongodb.MongoClientSettings}, + * via {@link MongoDatabase} or via {@link MongoCollection} + *

+ * @param timeoutMode the timeout mode + * @return this + * @since 5.2 + */ + @Alpha(Reason.CLIENT) + MapReduceIterable timeoutMode(TimeoutMode timeoutMode); +} diff --git a/driver-sync/src/main/com/mongodb/client/MongoChangeStreamCursor.java b/driver-sync/src/main/com/mongodb/client/MongoChangeStreamCursor.java new file mode 100644 index 00000000000..ed58412496d --- /dev/null +++ b/driver-sync/src/main/com/mongodb/client/MongoChangeStreamCursor.java @@ -0,0 +1,60 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client; + +import com.mongodb.annotations.NotThreadSafe; +import com.mongodb.lang.Nullable; +import org.bson.BsonDocument; + +/** + * The Mongo Cursor interface for change streams implementing the iterator protocol. + *

+ * An application should ensure that a cursor is closed in all circumstances, e.g. using a try-with-resources statement: + *

+ *
{@code
+ * try (MongoChangeStreamCursor> cursor = collection.watch().cursor()) {
+ *     while (cursor.hasNext()) {
+ *         System.out.println(cursor.next());
+ *     }
+ * }
+ * }
+ * + * + *

+ * A {@link com.mongodb.MongoOperationTimeoutException} does not invalidate the {@link MongoChangeStreamCursor}, but is immediately + * propagated to the caller. Subsequent method call will attempt to resume operation by establishing a new change stream on the server, + * without doing {@code getMore} request first.

+ *

+ * If a {@link com.mongodb.MongoOperationTimeoutException} occurs before any events are received, it indicates that the server + * has timed out before it could finish processing the existing oplog. In such cases, it is recommended to close the current stream + * and recreate it with a higher timeout setting. + * + * @since 3.11 + * @param The type of documents the cursor contains + */ +@NotThreadSafe +public interface MongoChangeStreamCursor extends MongoCursor { + /** + * Returns the resume token. If a batch has been iterated to the last change stream document in the batch + * and a postBatchResumeToken is included in the document, the postBatchResumeToken will be returned. + * Otherwise, the resume token contained in the last change stream document will be returned. + * + * @return the resume token, which can be null if the cursor has either not been iterated yet, or the cursor is closed. + */ + @Nullable + BsonDocument getResumeToken(); +} diff --git a/driver-sync/src/main/com/mongodb/client/MongoClient.java b/driver-sync/src/main/com/mongodb/client/MongoClient.java new file mode 100644 index 00000000000..e61ebf92566 --- /dev/null +++ b/driver-sync/src/main/com/mongodb/client/MongoClient.java @@ -0,0 +1,78 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client; + +import com.mongodb.MongoDriverInformation; +import com.mongodb.annotations.Immutable; +import com.mongodb.connection.ClusterDescription; +import com.mongodb.connection.ClusterSettings; +import com.mongodb.event.ClusterListener; + +import java.io.Closeable; + +/** + * A client-side representation of a MongoDB cluster. Instances can represent either a standalone MongoDB instance, a replica set, + * or a sharded cluster. Instance of this class are responsible for maintaining an up-to-date state of the cluster, + * and possibly cache resources related to this, including background threads for monitoring, and connection pools. + *

+ * Instances of this class serve as factories for {@link MongoDatabase} instances. + *

+ *

+ * Instances of this class can be created via the {@link MongoClients} factory. + *

+ * @see MongoClients + * @since 3.7 + */ +@Immutable +public interface MongoClient extends MongoCluster, Closeable { + + /** + * Close the client, which will close all underlying cached resources, including, for example, + * sockets and background monitoring threads. + */ + void close(); + + /** + * Gets the current cluster description. + * + *

+ * This method will not block, meaning that it may return a {@link ClusterDescription} whose {@code clusterType} is unknown + * and whose {@link com.mongodb.connection.ServerDescription}s are all in the connecting state. If the application requires + * notifications after the driver has connected to a member of the cluster, it should register a {@link ClusterListener} via + * the {@link ClusterSettings} in {@link com.mongodb.MongoClientSettings}. + *

+ * + * @return the current cluster description + * @see ClusterSettings.Builder#addClusterListener(ClusterListener) + * @see com.mongodb.MongoClientSettings.Builder#applyToClusterSettings(com.mongodb.Block) + * @since 3.11 + */ + ClusterDescription getClusterDescription(); + + /** + * Appends the provided {@link MongoDriverInformation} to the existing metadata. + *

+ * This enables frameworks and libraries to include identifying metadata (e.g., name, version, platform) which might be visible in + * the MongoD/MongoS logs. This can assist with diagnostics by making client identity visible to the server. + *

+ * Note: Metadata is limited to 512 bytes; any excess will be truncated. + * + * @param mongoDriverInformation the driver information to append to the existing metadata + * @since 5.6 + */ + void appendMetadata(MongoDriverInformation mongoDriverInformation); +} diff --git a/driver-sync/src/main/com/mongodb/client/MongoClientFactory.java b/driver-sync/src/main/com/mongodb/client/MongoClientFactory.java new file mode 100644 index 00000000000..975799bcf43 --- /dev/null +++ b/driver-sync/src/main/com/mongodb/client/MongoClientFactory.java @@ -0,0 +1,99 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client; + +import com.mongodb.MongoException; +import com.mongodb.internal.diagnostics.logging.Logger; +import com.mongodb.internal.diagnostics.logging.Loggers; + +import javax.naming.Context; +import javax.naming.Name; +import javax.naming.RefAddr; +import javax.naming.Reference; +import javax.naming.spi.ObjectFactory; +import java.util.Enumeration; +import java.util.Hashtable; + +import static java.lang.String.format; + +/** + * A JNDI ObjectFactory for {@link MongoClient} instances. + * + * @since 3.10 + */ +public class MongoClientFactory implements ObjectFactory { + + private static final Logger LOGGER = Loggers.getLogger("client"); + + private static final String CONNECTION_STRING = "connectionString"; + + /** + * This implementation will create instances of {@link MongoClient} based on a connection string conforming to the format specified in + * {@link com.mongodb.ConnectionString}. + *

The connection string is specified in one of two ways:

+ *
    + *
  • As the {@code String} value of a property in the {@code environment} parameter with a key of {@code "connectionString"}
  • + *
  • As the {@code String} value of a {@link RefAddr} with type {@code "connectionString"} in an {@code obj} parameter + * of type {@link Reference}
  • + *
+ * + * Specification of the connection string in the {@code environment} parameter takes precedence over specification in the {@code obj} + * parameter. The {@code name} and {@code nameCtx} parameters are ignored. + *

+ * If a non-empty connection string is not specified in either of these two ways, a {@link MongoException} is thrown. + * @return an instance of {@link MongoClient} based on the specified connection string + */ + @Override + public Object getObjectInstance(final Object obj, final Name name, final Context nameCtx, final Hashtable environment) { + + // Some app servers, e.g. Wildfly, use the environment to pass location information to an ObjectFactory + String connectionString = null; + + if (environment.get(CONNECTION_STRING) instanceof String) { + connectionString = (String) environment.get(CONNECTION_STRING); + } + + if (connectionString == null || connectionString.isEmpty()) { + LOGGER.debug(format("No '%s' property in environment. Casting 'obj' to java.naming.Reference to look for a " + + "javax.naming.RefAddr with type equal to '%s'", CONNECTION_STRING, CONNECTION_STRING)); + + // Some app servers, e.g. Tomcat, pass obj as an instance of javax.naming.Reference and pass location information in a + // javax.naming.RefAddr + if (obj instanceof Reference) { + Enumeration props = ((Reference) obj).getAll(); + + while (props.hasMoreElements()) { + RefAddr addr = props.nextElement(); + if (addr != null) { + if (CONNECTION_STRING.equals(addr.getType())) { + if (addr.getContent() instanceof String) { + connectionString = (String) addr.getContent(); + break; + } + } + } + } + } + } + + if (connectionString == null || connectionString.isEmpty()) { + throw new MongoException(format("Could not locate '%s' in either environment or obj", CONNECTION_STRING)); + } + + return MongoClients.create(connectionString); + } +} diff --git a/driver-sync/src/main/com/mongodb/client/MongoClients.java b/driver-sync/src/main/com/mongodb/client/MongoClients.java new file mode 100644 index 00000000000..e0e59ba5f78 --- /dev/null +++ b/driver-sync/src/main/com/mongodb/client/MongoClients.java @@ -0,0 +1,134 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client; + +import com.mongodb.ConnectionString; +import com.mongodb.MongoClientSettings; +import com.mongodb.MongoDriverInformation; +import com.mongodb.client.internal.Clusters; +import com.mongodb.client.internal.MongoClientImpl; +import com.mongodb.internal.connection.Cluster; +import com.mongodb.internal.connection.StreamFactoryFactory; +import com.mongodb.lang.Nullable; + +import static com.mongodb.assertions.Assertions.notNull; +import static com.mongodb.internal.connection.ServerAddressHelper.getInetAddressResolver; +import static com.mongodb.internal.connection.StreamFactoryHelper.getSyncStreamFactoryFactory; + + +/** + * A factory for {@link MongoClient} instances. Use of this class is now the recommended way to connect to MongoDB via the Java driver. + * + * @see MongoClient + * @since 3.7 + */ +public final class MongoClients { + + /** + * Creates a new client with the default connection string "mongodb://localhost". + * + * @return the client + */ + public static MongoClient create() { + return create(new ConnectionString("mongodb://localhost")); + } + + /** + * Create a new client with the given client settings. + * + * @param settings the settings + * @return the client + */ + public static MongoClient create(final MongoClientSettings settings) { + return create(settings, null); + } + + /** + * Create a new client with the given connection string as if by a call to {@link #create(ConnectionString)}. + * + * @param connectionString the connection + * @return the client + * @see #create(ConnectionString) + */ + public static MongoClient create(final String connectionString) { + return create(new ConnectionString(connectionString)); + } + + /** + * Create a new client with the given connection string. + *

+ * For each of the settings classed configurable via {@link MongoClientSettings}, the connection string is applied by calling the + * {@code applyConnectionString} method on an instance of setting's builder class, building the setting, and adding it to an instance of + * {@link com.mongodb.MongoClientSettings.Builder}. + *

+ * + * @param connectionString the settings + * @return the client + * + * @see com.mongodb.MongoClientSettings.Builder#applyConnectionString(ConnectionString) + */ + public static MongoClient create(final ConnectionString connectionString) { + return create(connectionString, null); + } + + /** + * Create a new client with the given connection string. + * + *

Note: Intended for driver and library authors to associate extra driver metadata with the connections.

+ * + * @param connectionString the settings + * @param mongoDriverInformation any driver information to associate with the MongoClient + * @return the client + * @see MongoClients#create(ConnectionString) + */ + public static MongoClient create(final ConnectionString connectionString, + @Nullable final MongoDriverInformation mongoDriverInformation) { + return create(MongoClientSettings.builder().applyConnectionString(connectionString).build(), mongoDriverInformation); + } + + /** + * Creates a new client with the given client settings. + * + *

Note: Intended for driver and library authors to associate extra driver metadata with the connections.

+ * + * @param settings the settings + * @param mongoDriverInformation any driver information to associate with the MongoClient + * @return the client + */ + public static MongoClient create(final MongoClientSettings settings, @Nullable final MongoDriverInformation mongoDriverInformation) { + notNull("settings", settings); + + MongoDriverInformation.Builder builder = mongoDriverInformation == null ? MongoDriverInformation.builder() + : MongoDriverInformation.builder(mongoDriverInformation); + + MongoDriverInformation driverInfo = builder.driverName("sync").build(); + + StreamFactoryFactory syncStreamFactoryFactory = getSyncStreamFactoryFactory( + settings.getTransportSettings(), + getInetAddressResolver(settings)); + + Cluster cluster = Clusters.createCluster( + settings, + driverInfo, + syncStreamFactoryFactory); + + return new MongoClientImpl(cluster, settings, driverInfo, syncStreamFactoryFactory); + } + + private MongoClients() { + } +} diff --git a/driver-sync/src/main/com/mongodb/client/MongoCluster.java b/driver-sync/src/main/com/mongodb/client/MongoCluster.java new file mode 100644 index 00000000000..e86761f8d48 --- /dev/null +++ b/driver-sync/src/main/com/mongodb/client/MongoCluster.java @@ -0,0 +1,466 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client; + +import com.mongodb.ClientBulkWriteException; +import com.mongodb.ClientSessionOptions; +import com.mongodb.MongoClientSettings; +import com.mongodb.MongoException; +import com.mongodb.MongoNamespace; +import com.mongodb.ReadConcern; +import com.mongodb.ReadPreference; +import com.mongodb.WriteConcern; +import com.mongodb.annotations.Alpha; +import com.mongodb.annotations.Immutable; +import com.mongodb.annotations.Reason; +import com.mongodb.client.model.bulk.ClientBulkWriteOptions; +import com.mongodb.client.model.bulk.ClientBulkWriteResult; +import com.mongodb.client.model.bulk.ClientNamespacedDeleteManyModel; +import com.mongodb.client.model.bulk.ClientNamespacedUpdateManyModel; +import com.mongodb.client.model.bulk.ClientNamespacedWriteModel; +import com.mongodb.lang.Nullable; +import org.bson.Document; +import org.bson.codecs.configuration.CodecRegistry; +import org.bson.conversions.Bson; + +import java.util.List; +import java.util.concurrent.TimeUnit; + +/** + * The client-side representation of a MongoDB cluster operations. + * + *

+ * The originating {@link MongoClient} is responsible for the closing of resources. + * If the originator {@link MongoClient} is closed, then any cluster operations will fail. + *

+ * + * @see MongoClient + * @since 5.2 + */ +@Immutable +public interface MongoCluster { + + /** + * Get the codec registry for the MongoCluster. + * + * @return the {@link org.bson.codecs.configuration.CodecRegistry} + * @since 5.2 + */ + CodecRegistry getCodecRegistry(); + + /** + * Get the read preference for the MongoCluster. + * + * @return the {@link com.mongodb.ReadPreference} + * @since 5.2 + */ + ReadPreference getReadPreference(); + + /** + * Get the write concern for the MongoCluster. + * + * @return the {@link com.mongodb.WriteConcern} + * @since 5.2 + */ + WriteConcern getWriteConcern(); + + /** + * Get the read concern for the MongoCluster. + * + * @return the {@link com.mongodb.ReadConcern} + * @since 5.2 + * @mongodb.driver.manual reference/readConcern/ Read Concern + */ + ReadConcern getReadConcern(); + + /** + * The time limit for the full execution of an operation. + * + *

If not null the following deprecated options will be ignored: + * {@code waitQueueTimeoutMS}, {@code socketTimeoutMS}, {@code wTimeoutMS}, {@code maxTimeMS} and {@code maxCommitTimeMS}

+ * + *
    + *
  • {@code null} means that the timeout mechanism for operations will defer to using: + *
      + *
    • {@code waitQueueTimeoutMS}: The maximum wait time in milliseconds that a thread may wait for a connection to become + * available
    • + *
    • {@code socketTimeoutMS}: How long a send or receive on a socket can take before timing out.
    • + *
    • {@code wTimeoutMS}: How long the server will wait for the write concern to be fulfilled before timing out.
    • + *
    • {@code maxTimeMS}: The cumulative time limit for processing operations on a cursor. + * See: cursor.maxTimeMS.
    • + *
    • {@code maxCommitTimeMS}: The maximum amount of time to allow a single {@code commitTransaction} command to execute. + * See: {@link com.mongodb.TransactionOptions#getMaxCommitTime}.
    • + *
    + *
  • + *
  • {@code 0} means infinite timeout.
  • + *
  • {@code > 0} The time limit to use for the full execution of an operation.
  • + *
+ * + * @param timeUnit the time unit + * @return the timeout in the given time unit + * @since 5.2 + */ + @Alpha(Reason.CLIENT) + @Nullable + Long getTimeout(TimeUnit timeUnit); + + /** + * Create a new MongoCluster instance with a different codec registry. + * + *

The {@link CodecRegistry} configured by this method is effectively treated by the driver as an instance of + * {@link org.bson.codecs.configuration.CodecProvider}, which {@link CodecRegistry} extends. So there is no benefit to defining + * a class that implements {@link CodecRegistry}. Rather, an application should always create {@link CodecRegistry} instances + * using the factory methods in {@link org.bson.codecs.configuration.CodecRegistries}.

+ * + * @param codecRegistry the new {@link org.bson.codecs.configuration.CodecRegistry} for the database + * @return a new MongoCluster instance with the different codec registry + * @see org.bson.codecs.configuration.CodecRegistries + * @since 5.2 + */ + MongoCluster withCodecRegistry(CodecRegistry codecRegistry); + + /** + * Create a new MongoCluster instance with a different read preference. + * + * @param readPreference the new {@link ReadPreference} for the database + * @return a new MongoCluster instance with the different readPreference + * @since 5.2 + */ + MongoCluster withReadPreference(ReadPreference readPreference); + + /** + * Create a new MongoCluster instance with a different write concern. + * + * @param writeConcern the new {@link WriteConcern} for the database + * @return a new MongoCluster instance with the different writeConcern + * @since 5.2 + */ + MongoCluster withWriteConcern(WriteConcern writeConcern); + + /** + * Create a new MongoCluster instance with a different read concern. + * + * @param readConcern the new {@link ReadConcern} for the database + * @return a new MongoCluster instance with the different ReadConcern + * @since 5.2 + * @mongodb.driver.manual reference/readConcern/ Read Concern + */ + MongoCluster withReadConcern(ReadConcern readConcern); + + /** + * Create a new MongoCluster instance with the set time limit for the full execution of an operation. + * + *
    + *
  • {@code 0} means infinite timeout.
  • + *
  • {@code > 0} The time limit to use for the full execution of an operation.
  • + *
+ * + * @param timeout the timeout, which must be greater than or equal to 0 + * @param timeUnit the time unit + * @return a new MongoCluster instance with the set time limit for the full execution of an operation. + * @since 5.2 + * @see #getTimeout + */ + @Alpha(Reason.CLIENT) + MongoCluster withTimeout(long timeout, TimeUnit timeUnit); + + /** + * Gets a {@link MongoDatabase} instance for the given database name. + * + * @param databaseName the name of the database to retrieve + * @return a {@code MongoDatabase} representing the specified database + * @throws IllegalArgumentException if databaseName is invalid + * @see MongoNamespace#checkDatabaseNameValidity(String) + */ + MongoDatabase getDatabase(String databaseName); + + /** + * Creates a client session with default options. + * + *

Note: A ClientSession instance can not be used concurrently in multiple operations.

+ * + * @return the client session + * @mongodb.server.release 3.6 + */ + ClientSession startSession(); + + /** + * Creates a client session. + * + *

Note: A ClientSession instance can not be used concurrently in multiple operations.

+ * + * @param options the options for the client session + * @return the client session + * @mongodb.server.release 3.6 + */ + ClientSession startSession(ClientSessionOptions options); + + /** + * Get a list of the database names + * + * @return an iterable containing all the names of all the databases + * @mongodb.driver.manual reference/command/listDatabases List Databases + */ + MongoIterable listDatabaseNames(); + + /** + * Get a list of the database names + * + * @param clientSession the client session with which to associate this operation + * @return an iterable containing all the names of all the databases + * @mongodb.driver.manual reference/command/listDatabases List Databases + * @mongodb.server.release 3.6 + */ + MongoIterable listDatabaseNames(ClientSession clientSession); + + /** + * Gets the list of databases + * + * @return the list databases iterable interface + */ + ListDatabasesIterable listDatabases(); + + /** + * Gets the list of databases + * + * @param clientSession the client session with which to associate this operation + * @return the list databases iterable interface + * @mongodb.driver.manual reference/command/listDatabases List Databases + * @mongodb.server.release 3.6 + */ + ListDatabasesIterable listDatabases(ClientSession clientSession); + + /** + * Gets the list of databases + * + * @param resultClass the class to cast the database documents to + * @param the type of the class to use instead of {@code Document}. + * @return the list databases iterable interface + */ + ListDatabasesIterable listDatabases(Class resultClass); + + /** + * Gets the list of databases + * + * @param clientSession the client session with which to associate this operation + * @param resultClass the class to cast the database documents to + * @param the type of the class to use instead of {@code Document}. + * @return the list databases iterable interface + * @mongodb.driver.manual reference/command/listDatabases List Databases + * @mongodb.server.release 3.6 + */ + ListDatabasesIterable listDatabases(ClientSession clientSession, Class resultClass); + + /** + * Creates a change stream for this client. + * + * @return the change stream iterable + * @mongodb.driver.dochub core/changestreams Change Streams + * @since 3.8 + * @mongodb.server.release 4.0 + */ + ChangeStreamIterable watch(); + + /** + * Creates a change stream for this client. + * + * @param resultClass the class to decode each document into + * @param the target document type of the iterable. + * @return the change stream iterable + * @mongodb.driver.dochub core/changestreams Change Streams + * @since 3.8 + * @mongodb.server.release 4.0 + */ + ChangeStreamIterable watch(Class resultClass); + + /** + * Creates a change stream for this client. + * + * @param pipeline the aggregation pipeline to apply to the change stream. + * @return the change stream iterable + * @mongodb.driver.dochub core/changestreams Change Streams + * @since 3.8 + * @mongodb.server.release 4.0 + */ + ChangeStreamIterable watch(List pipeline); + + /** + * Creates a change stream for this client. + * + * @param pipeline the aggregation pipeline to apply to the change stream + * @param resultClass the class to decode each document into + * @param the target document type of the iterable. + * @return the change stream iterable + * @mongodb.driver.dochub core/changestreams Change Streams + * @since 3.8 + * @mongodb.server.release 4.0 + */ + ChangeStreamIterable watch(List pipeline, Class resultClass); + + /** + * Creates a change stream for this client. + * + * @param clientSession the client session with which to associate this operation + * @return the change stream iterable + * @since 3.8 + * @mongodb.server.release 4.0 + * @mongodb.driver.dochub core/changestreams Change Streams + */ + ChangeStreamIterable watch(ClientSession clientSession); + + /** + * Creates a change stream for this client. + * + * @param clientSession the client session with which to associate this operation + * @param resultClass the class to decode each document into + * @param the target document type of the iterable. + * @return the change stream iterable + * @since 3.8 + * @mongodb.server.release 4.0 + * @mongodb.driver.dochub core/changestreams Change Streams + */ + ChangeStreamIterable watch(ClientSession clientSession, Class resultClass); + + /** + * Creates a change stream for this client. + * + * @param clientSession the client session with which to associate this operation + * @param pipeline the aggregation pipeline to apply to the change stream. + * @return the change stream iterable + * @since 3.8 + * @mongodb.server.release 4.0 + * @mongodb.driver.dochub core/changestreams Change Streams + */ + ChangeStreamIterable watch(ClientSession clientSession, List pipeline); + + /** + * Creates a change stream for this client. + * + * @param clientSession the client session with which to associate this operation + * @param pipeline the aggregation pipeline to apply to the change stream + * @param resultClass the class to decode each document into + * @param the target document type of the iterable. + * @return the change stream iterable + * @since 3.8 + * @mongodb.server.release 4.0 + * @mongodb.driver.dochub core/changestreams Change Streams + */ + ChangeStreamIterable watch(ClientSession clientSession, List pipeline, Class resultClass); + + /** + * Executes a client-level bulk write operation. + * This method is functionally equivalent to {@link #bulkWrite(List, ClientBulkWriteOptions)} + * with the {@linkplain ClientBulkWriteOptions#clientBulkWriteOptions() default options}. + *

+ * This operation supports {@linkplain MongoClientSettings#getRetryWrites() retryable writes}. + * Depending on the number of {@code models}, encoded size of {@code models}, and the size limits in effect, + * executing this operation may require multiple {@code bulkWrite} commands. + * The eligibility for retries is determined per each {@code bulkWrite} command: + * {@link ClientNamespacedUpdateManyModel}, {@link ClientNamespacedDeleteManyModel} in a command render it non-retryable.

+ * + * @param models The {@linkplain ClientNamespacedWriteModel individual write operations}. + * @return The {@link ClientBulkWriteResult} if the operation is successful. + * @throws ClientBulkWriteException If and only if the operation is unsuccessful or partially unsuccessful, + * and there is at least one of the following pieces of information to report: + * {@link ClientBulkWriteException#getWriteConcernErrors()}, {@link ClientBulkWriteException#getWriteErrors()}, + * {@link ClientBulkWriteException#getPartialResult()}. + * @throws MongoException Only if the operation is unsuccessful. + * @since 5.3 + * @mongodb.server.release 8.0 + * @mongodb.driver.manual reference/command/bulkWrite/ bulkWrite + */ + ClientBulkWriteResult bulkWrite(List models) throws ClientBulkWriteException; + + /** + * Executes a client-level bulk write operation. + *

+ * This operation supports {@linkplain MongoClientSettings#getRetryWrites() retryable writes}. + * Depending on the number of {@code models}, encoded size of {@code models}, and the size limits in effect, + * executing this operation may require multiple {@code bulkWrite} commands. + * The eligibility for retries is determined per each {@code bulkWrite} command: + * {@link ClientNamespacedUpdateManyModel}, {@link ClientNamespacedDeleteManyModel} in a command render it non-retryable.

+ * + * @param models The {@linkplain ClientNamespacedWriteModel individual write operations}. + * @param options The options. + * @return The {@link ClientBulkWriteResult} if the operation is successful. + * @throws ClientBulkWriteException If and only if the operation is unsuccessful or partially unsuccessful, + * and there is at least one of the following pieces of information to report: + * {@link ClientBulkWriteException#getWriteConcernErrors()}, {@link ClientBulkWriteException#getWriteErrors()}, + * {@link ClientBulkWriteException#getPartialResult()}. + * @throws MongoException Only if the operation is unsuccessful. + * @since 5.3 + * @mongodb.server.release 8.0 + * @mongodb.driver.manual reference/command/bulkWrite/ bulkWrite + */ + ClientBulkWriteResult bulkWrite( + List models, + ClientBulkWriteOptions options) throws ClientBulkWriteException; + + /** + * Executes a client-level bulk write operation. + * This method is functionally equivalent to {@link #bulkWrite(ClientSession, List, ClientBulkWriteOptions)} + * with the {@linkplain ClientBulkWriteOptions#clientBulkWriteOptions() default options}. + *

+ * This operation supports {@linkplain MongoClientSettings#getRetryWrites() retryable writes}. + * Depending on the number of {@code models}, encoded size of {@code models}, and the size limits in effect, + * executing this operation may require multiple {@code bulkWrite} commands. + * The eligibility for retries is determined per each {@code bulkWrite} command: + * {@link ClientNamespacedUpdateManyModel}, {@link ClientNamespacedDeleteManyModel} in a command render it non-retryable.

+ * + * @param clientSession The {@linkplain ClientSession client session} with which to associate this operation. + * @param models The {@linkplain ClientNamespacedWriteModel individual write operations}. + * @return The {@link ClientBulkWriteResult} if the operation is successful. + * @throws ClientBulkWriteException If and only if the operation is unsuccessful or partially unsuccessful, + * and there is at least one of the following pieces of information to report: + * {@link ClientBulkWriteException#getWriteConcernErrors()}, {@link ClientBulkWriteException#getWriteErrors()}, + * {@link ClientBulkWriteException#getPartialResult()}. + * @throws MongoException Only if the operation is unsuccessful. + * @since 5.3 + * @mongodb.server.release 8.0 + * @mongodb.driver.manual reference/command/bulkWrite/ bulkWrite + */ + ClientBulkWriteResult bulkWrite( + ClientSession clientSession, + List models) throws ClientBulkWriteException; + + /** + * Executes a client-level bulk write operation. + *

+ * This operation supports {@linkplain MongoClientSettings#getRetryWrites() retryable writes}. + * Depending on the number of {@code models}, encoded size of {@code models}, and the size limits in effect, + * executing this operation may require multiple {@code bulkWrite} commands. + * The eligibility for retries is determined per each {@code bulkWrite} command: + * {@link ClientNamespacedUpdateManyModel}, {@link ClientNamespacedDeleteManyModel} in a command render it non-retryable.

+ * + * @param clientSession The {@linkplain ClientSession client session} with which to associate this operation. + * @param models The {@linkplain ClientNamespacedWriteModel individual write operations}. + * @param options The options. + * @return The {@link ClientBulkWriteResult} if the operation is successful. + * @throws ClientBulkWriteException If and only if the operation is unsuccessful or partially unsuccessful, + * and there is at least one of the following pieces of information to report: + * {@link ClientBulkWriteException#getWriteConcernErrors()}, {@link ClientBulkWriteException#getWriteErrors()}, + * {@link ClientBulkWriteException#getPartialResult()}. + * @throws MongoException Only if the operation is unsuccessful. + * @since 5.3 + * @mongodb.server.release 8.0 + * @mongodb.driver.manual reference/command/bulkWrite/ bulkWrite + */ + ClientBulkWriteResult bulkWrite( + ClientSession clientSession, + List models, + ClientBulkWriteOptions options) throws ClientBulkWriteException; +} diff --git a/driver-sync/src/main/com/mongodb/client/MongoCollection.java b/driver-sync/src/main/com/mongodb/client/MongoCollection.java new file mode 100644 index 00000000000..0d3248b613f --- /dev/null +++ b/driver-sync/src/main/com/mongodb/client/MongoCollection.java @@ -0,0 +1,2124 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client; + +import com.mongodb.MongoNamespace; +import com.mongodb.ReadConcern; +import com.mongodb.ReadPreference; +import com.mongodb.WriteConcern; +import com.mongodb.annotations.Alpha; +import com.mongodb.annotations.Reason; +import com.mongodb.annotations.ThreadSafe; +import com.mongodb.bulk.BulkWriteResult; +import com.mongodb.client.model.BulkWriteOptions; +import com.mongodb.client.model.CountOptions; +import com.mongodb.client.model.CreateIndexOptions; +import com.mongodb.client.model.DeleteOptions; +import com.mongodb.client.model.DropCollectionOptions; +import com.mongodb.client.model.DropIndexOptions; +import com.mongodb.client.model.EstimatedDocumentCountOptions; +import com.mongodb.client.model.FindOneAndDeleteOptions; +import com.mongodb.client.model.FindOneAndReplaceOptions; +import com.mongodb.client.model.FindOneAndUpdateOptions; +import com.mongodb.client.model.IndexModel; +import com.mongodb.client.model.IndexOptions; +import com.mongodb.client.model.InsertManyOptions; +import com.mongodb.client.model.InsertOneOptions; +import com.mongodb.client.model.RenameCollectionOptions; +import com.mongodb.client.model.ReplaceOptions; +import com.mongodb.client.model.SearchIndexModel; +import com.mongodb.client.model.UpdateOptions; +import com.mongodb.client.model.WriteModel; +import com.mongodb.client.result.DeleteResult; +import com.mongodb.client.result.InsertManyResult; +import com.mongodb.client.result.InsertOneResult; +import com.mongodb.client.result.UpdateResult; +import com.mongodb.lang.Nullable; +import org.bson.Document; +import org.bson.codecs.configuration.CodecRegistry; +import org.bson.conversions.Bson; + +import java.util.List; +import java.util.concurrent.TimeUnit; + +/** + * The MongoCollection interface. + * + *

Note: Additions to this interface will not be considered to break binary compatibility.

+ * + *

MongoCollection is generic allowing for different types to represent documents. Any custom classes must have a + * {@link org.bson.codecs.Codec} registered in the {@link CodecRegistry}. + *

+ * + * @param The type that this collection will encode documents from and decode documents to. + * @since 3.0 + */ +@ThreadSafe +public interface MongoCollection { + + /** + * Gets the namespace of this collection. + * + * @return the namespace + */ + MongoNamespace getNamespace(); + + /** + * Get the class of documents stored in this collection. + * + * @return the class + */ + Class getDocumentClass(); + + /** + * Get the codec registry for the MongoCollection. + * + * @return the {@link org.bson.codecs.configuration.CodecRegistry} + */ + CodecRegistry getCodecRegistry(); + + /** + * Get the read preference for the MongoCollection. + * + * @return the {@link com.mongodb.ReadPreference} + */ + ReadPreference getReadPreference(); + + /** + * Get the write concern for the MongoCollection. + * + * @return the {@link com.mongodb.WriteConcern} + */ + WriteConcern getWriteConcern(); + + /** + * Get the read concern for the MongoCollection. + * + * @return the {@link com.mongodb.ReadConcern} + * @since 3.2 + * @mongodb.server.release 3.2 + * @mongodb.driver.manual reference/readConcern/ Read Concern + */ + ReadConcern getReadConcern(); + + /** + * The time limit for the full execution of an operation. + * + *

If not null the following deprecated options will be ignored: + * {@code waitQueueTimeoutMS}, {@code socketTimeoutMS}, {@code wTimeoutMS}, {@code maxTimeMS} and {@code maxCommitTimeMS}

+ * + *
    + *
  • {@code null} means that the timeout mechanism for operations will defer to using: + *
      + *
    • {@code waitQueueTimeoutMS}: The maximum wait time in milliseconds that a thread may wait for a connection to become + * available
    • + *
    • {@code socketTimeoutMS}: How long a send or receive on a socket can take before timing out.
    • + *
    • {@code wTimeoutMS}: How long the server will wait for the write concern to be fulfilled before timing out.
    • + *
    • {@code maxTimeMS}: The cumulative time limit for processing operations on a cursor. + * See: cursor.maxTimeMS.
    • + *
    • {@code maxCommitTimeMS}: The maximum amount of time to allow a single {@code commitTransaction} command to execute. + * See: {@link com.mongodb.TransactionOptions#getMaxCommitTime}.
    • + *
    + *
  • + *
  • {@code 0} means infinite timeout.
  • + *
  • {@code > 0} The time limit to use for the full execution of an operation.
  • + *
+ * + * @param timeUnit the time unit + * @return the timeout in the given time unit + * @since 5.2 + */ + @Alpha(Reason.CLIENT) + @Nullable + Long getTimeout(TimeUnit timeUnit); + + /** + * Create a new MongoCollection instance with a different default class to cast any documents returned from the database into.. + * + * @param clazz the default class to cast any documents returned from the database into. + * @param The type that the new collection will encode documents from and decode documents to + * @return a new MongoCollection instance with the different default class + */ + MongoCollection withDocumentClass(Class clazz); + + /** + * Create a new MongoCollection instance with a different codec registry. + * + *

The {@link CodecRegistry} configured by this method is effectively treated by the driver as an instance of + * {@link org.bson.codecs.configuration.CodecProvider}, which {@link CodecRegistry} extends. So there is no benefit to defining + * a class that implements {@link CodecRegistry}. Rather, an application should always create {@link CodecRegistry} instances + * using the factory methods in {@link org.bson.codecs.configuration.CodecRegistries}.

+ * + * @param codecRegistry the new {@link org.bson.codecs.configuration.CodecRegistry} for the collection + * @return a new MongoCollection instance with the different codec registry + * @see org.bson.codecs.configuration.CodecRegistries + */ + MongoCollection withCodecRegistry(CodecRegistry codecRegistry); + + /** + * Create a new MongoCollection instance with a different read preference. + * + * @param readPreference the new {@link com.mongodb.ReadPreference} for the collection + * @return a new MongoCollection instance with the different readPreference + */ + MongoCollection withReadPreference(ReadPreference readPreference); + + /** + * Create a new MongoCollection instance with a different write concern. + * + * @param writeConcern the new {@link com.mongodb.WriteConcern} for the collection + * @return a new MongoCollection instance with the different writeConcern + */ + MongoCollection withWriteConcern(WriteConcern writeConcern); + + /** + * Create a new MongoCollection instance with a different read concern. + * + * @param readConcern the new {@link ReadConcern} for the collection + * @return a new MongoCollection instance with the different ReadConcern + * @since 3.2 + * @mongodb.server.release 3.2 + * @mongodb.driver.manual reference/readConcern/ Read Concern + */ + MongoCollection withReadConcern(ReadConcern readConcern); + + /** + * Create a new MongoCollection instance with the set time limit for the full execution of an operation. + * + *
    + *
  • {@code 0} means infinite timeout.
  • + *
  • {@code > 0} The time limit to use for the full execution of an operation.
  • + *
+ * + * @param timeout the timeout, which must be greater than or equal to 0 + * @param timeUnit the time unit + * @return a new MongoCollection instance with the set time limit for the full execution of an operation + * @since 5.2 + * @see #getTimeout + */ + @Alpha(Reason.CLIENT) + MongoCollection withTimeout(long timeout, TimeUnit timeUnit); + + /** + * Counts the number of documents in the collection. + * + *

+ * Note: For a fast count of the total documents in a collection see {@link #estimatedDocumentCount()}. + * When migrating from {@code count()} to {@code countDocuments()} the following query operators must be replaced: + *

+ *
+     *
+     *  +-------------+--------------------------------+
+     *  | Operator    | Replacement                    |
+     *  +=============+================================+
+     *  | $where      |  $expr                         |
+     *  +-------------+--------------------------------+
+     *  | $near       |  $geoWithin with $center       |
+     *  +-------------+--------------------------------+
+     *  | $nearSphere |  $geoWithin with $centerSphere |
+     *  +-------------+--------------------------------+
+     * 
+ * + * @return the number of documents in the collection + * @since 3.8 + */ + long countDocuments(); + + /** + * Counts the number of documents in the collection according to the given options. + * + *

+ * Note: For a fast count of the total documents in a collection see {@link #estimatedDocumentCount()}. + * When migrating from {@code count()} to {@code countDocuments()} the following query operators must be replaced: + *

+ *
+     *
+     *  +-------------+--------------------------------+
+     *  | Operator    | Replacement                    |
+     *  +=============+================================+
+     *  | $where      |  $expr                         |
+     *  +-------------+--------------------------------+
+     *  | $near       |  $geoWithin with $center       |
+     *  +-------------+--------------------------------+
+     *  | $nearSphere |  $geoWithin with $centerSphere |
+     *  +-------------+--------------------------------+
+     * 
+ * + * @param filter the query filter + * @return the number of documents in the collection + * @since 3.8 + */ + long countDocuments(Bson filter); + + /** + * Counts the number of documents in the collection according to the given options. + * + *

+ * Note: For a fast count of the total documents in a collection see {@link #estimatedDocumentCount()}. + * When migrating from {@code count()} to {@code countDocuments()} the following query operators must be replaced: + *

+ *
+     *
+     *  +-------------+--------------------------------+
+     *  | Operator    | Replacement                    |
+     *  +=============+================================+
+     *  | $where      |  $expr                         |
+     *  +-------------+--------------------------------+
+     *  | $near       |  $geoWithin with $center       |
+     *  +-------------+--------------------------------+
+     *  | $nearSphere |  $geoWithin with $centerSphere |
+     *  +-------------+--------------------------------+
+     * 
+ * + * @param filter the query filter + * @param options the options describing the count + * @return the number of documents in the collection + * @since 3.8 + */ + long countDocuments(Bson filter, CountOptions options); + + /** + * Counts the number of documents in the collection. + * + *

+ * Note: For a fast count of the total documents in a collection see {@link #estimatedDocumentCount()}. + * When migrating from {@code count()} to {@code countDocuments()} the following query operators must be replaced: + *

+ *
+     *
+     *  +-------------+--------------------------------+
+     *  | Operator    | Replacement                    |
+     *  +=============+================================+
+     *  | $where      |  $expr                         |
+     *  +-------------+--------------------------------+
+     *  | $near       |  $geoWithin with $center       |
+     *  +-------------+--------------------------------+
+     *  | $nearSphere |  $geoWithin with $centerSphere |
+     *  +-------------+--------------------------------+
+     * 
+ * + * @param clientSession the client session with which to associate this operation + * @return the number of documents in the collection + * @since 3.8 + * @mongodb.server.release 3.6 + */ + long countDocuments(ClientSession clientSession); + + /** + * Counts the number of documents in the collection according to the given options. + * + *

+ * Note: For a fast count of the total documents in a collection see {@link #estimatedDocumentCount()}. + * When migrating from {@code count()} to {@code countDocuments()} the following query operators must be replaced: + *

+ *
+     *
+     *  +-------------+--------------------------------+
+     *  | Operator    | Replacement                    |
+     *  +=============+================================+
+     *  | $where      |  $expr                         |
+     *  +-------------+--------------------------------+
+     *  | $near       |  $geoWithin with $center       |
+     *  +-------------+--------------------------------+
+     *  | $nearSphere |  $geoWithin with $centerSphere |
+     *  +-------------+--------------------------------+
+     * 
+ * + * @param clientSession the client session with which to associate this operation + * @param filter the query filter + * @return the number of documents in the collection + * @since 3.8 + * @mongodb.server.release 3.6 + */ + long countDocuments(ClientSession clientSession, Bson filter); + + /** + * Counts the number of documents in the collection according to the given options. + * + *

+ * Note: For a fast count of the total documents in a collection see {@link #estimatedDocumentCount()}. + * When migrating from {@code count()} to {@code countDocuments()} the following query operators must be replaced: + *

+ *
+     *
+     *  +-------------+--------------------------------+
+     *  | Operator    | Replacement                    |
+     *  +=============+================================+
+     *  | $where      |  $expr                         |
+     *  +-------------+--------------------------------+
+     *  | $near       |  $geoWithin with $center       |
+     *  +-------------+--------------------------------+
+     *  | $nearSphere |  $geoWithin with $centerSphere |
+     *  +-------------+--------------------------------+
+     * 
+ * + * @param clientSession the client session with which to associate this operation + * @param filter the query filter + * @param options the options describing the count + * @return the number of documents in the collection + * @since 3.8 + * @mongodb.server.release 3.6 + */ + long countDocuments(ClientSession clientSession, Bson filter, CountOptions options); + + /** + * Gets an estimate of the count of documents in a collection using collection metadata. + * + *

+ * Implementation note: this method is implemented using the MongoDB server's count command + *

+ * + * @return the number of documents in the collection + * @since 3.8 + * @mongodb.driver.manual manual/reference/command/count/#behavior + */ + long estimatedDocumentCount(); + + /** + * Gets an estimate of the count of documents in a collection using collection metadata. + * + *

+ * Implementation note: this method is implemented using the MongoDB server's count command + *

+ * + * @param options the options describing the count + * @return the number of documents in the collection + * @since 3.8 + * @mongodb.driver.manual manual/reference/command/count/#behavior + */ + long estimatedDocumentCount(EstimatedDocumentCountOptions options); + + /** + * Gets the distinct values of the specified field name. + * + * @param fieldName the field name + * @param resultClass the class to cast any distinct items into. + * @param the target type of the iterable. + * @return an iterable of distinct values + * @mongodb.driver.manual reference/command/distinct/ Distinct + */ + DistinctIterable distinct(String fieldName, Class resultClass); + + /** + * Gets the distinct values of the specified field name. + * + * @param fieldName the field name + * @param filter the query filter + * @param resultClass the class to cast any distinct items into. + * @param the target type of the iterable. + * @return an iterable of distinct values + * @mongodb.driver.manual reference/command/distinct/ Distinct + */ + DistinctIterable distinct(String fieldName, Bson filter, Class resultClass); + + /** + * Gets the distinct values of the specified field name. + * + * @param clientSession the client session with which to associate this operation + * @param fieldName the field name + * @param resultClass the class to cast any distinct items into. + * @param the target type of the iterable. + * @return an iterable of distinct values + * @since 3.6 + * @mongodb.server.release 3.6 + * @mongodb.driver.manual reference/command/distinct/ Distinct + */ + DistinctIterable distinct(ClientSession clientSession, String fieldName, Class resultClass); + + /** + * Gets the distinct values of the specified field name. + * + * @param clientSession the client session with which to associate this operation + * @param fieldName the field name + * @param filter the query filter + * @param resultClass the class to cast any distinct items into. + * @param the target type of the iterable. + * @return an iterable of distinct values + * @since 3.6 + * @mongodb.server.release 3.6 + * @mongodb.driver.manual reference/command/distinct/ Distinct + */ + DistinctIterable distinct(ClientSession clientSession, String fieldName, Bson filter, Class resultClass); + + /** + * Finds all documents in the collection. + * + * @return the find iterable interface + * @mongodb.driver.manual tutorial/query-documents/ Find + */ + FindIterable find(); + + /** + * Finds all documents in the collection. + * + * @param resultClass the class to decode each document into + * @param the target document type of the iterable. + * @return the find iterable interface + * @mongodb.driver.manual tutorial/query-documents/ Find + */ + FindIterable find(Class resultClass); + + /** + * Finds all documents in the collection. + * + * @param filter the query filter + * @return the find iterable interface + * @mongodb.driver.manual tutorial/query-documents/ Find + */ + FindIterable find(Bson filter); + + /** + * Finds all documents in the collection. + * + * @param filter the query filter + * @param resultClass the class to decode each document into + * @param the target document type of the iterable. + * @return the find iterable interface + * @mongodb.driver.manual tutorial/query-documents/ Find + */ + FindIterable find(Bson filter, Class resultClass); + + /** + * Finds all documents in the collection. + * + * @param clientSession the client session with which to associate this operation + * @return the find iterable interface + * @since 3.6 + * @mongodb.server.release 3.6 + * @mongodb.driver.manual tutorial/query-documents/ Find + */ + FindIterable find(ClientSession clientSession); + + /** + * Finds all documents in the collection. + * + * @param clientSession the client session with which to associate this operation + * @param resultClass the class to decode each document into + * @param the target document type of the iterable. + * @return the find iterable interface + * @since 3.6 + * @mongodb.server.release 3.6 + * @mongodb.driver.manual tutorial/query-documents/ Find + */ + FindIterable find(ClientSession clientSession, Class resultClass); + + /** + * Finds all documents in the collection. + * + * @param clientSession the client session with which to associate this operation + * @param filter the query filter + * @return the find iterable interface + * @since 3.6 + * @mongodb.server.release 3.6 + * @mongodb.driver.manual tutorial/query-documents/ Find + */ + FindIterable find(ClientSession clientSession, Bson filter); + + /** + * Finds all documents in the collection. + * + * @param clientSession the client session with which to associate this operation + * @param filter the query filter + * @param resultClass the class to decode each document into + * @param the target document type of the iterable. + * @return the find iterable interface + * @since 3.6 + * @mongodb.server.release 3.6 + * @mongodb.driver.manual tutorial/query-documents/ Find + */ + FindIterable find(ClientSession clientSession, Bson filter, Class resultClass); + + /** + * Aggregates documents according to the specified aggregation pipeline. + * + * @param pipeline the aggregation pipeline + * @return an iterable containing the result of the aggregation operation + * @mongodb.driver.manual aggregation/ Aggregation + * @mongodb.server.release 2.2 + */ + AggregateIterable aggregate(List pipeline); + + /** + * Aggregates documents according to the specified aggregation pipeline. + * + * @param pipeline the aggregation pipeline + * @param resultClass the class to decode each document into + * @param the target document type of the iterable. + * @return an iterable containing the result of the aggregation operation + * @mongodb.driver.manual aggregation/ Aggregation + * @mongodb.server.release 2.2 + */ + AggregateIterable aggregate(List pipeline, Class resultClass); + + /** + * Aggregates documents according to the specified aggregation pipeline. + * + * @param clientSession the client session with which to associate this operation + * @param pipeline the aggregation pipeline + * @return an iterable containing the result of the aggregation operation + * @since 3.6 + * @mongodb.server.release 3.6 + * @mongodb.driver.manual aggregation/ Aggregation + */ + AggregateIterable aggregate(ClientSession clientSession, List pipeline); + + /** + * Aggregates documents according to the specified aggregation pipeline. + * + * @param clientSession the client session with which to associate this operation + * @param pipeline the aggregation pipeline + * @param resultClass the class to decode each document into + * @param the target document type of the iterable. + * @return an iterable containing the result of the aggregation operation + * @since 3.6 + * @mongodb.server.release 3.6 + * @mongodb.driver.manual aggregation/ Aggregation + */ + AggregateIterable aggregate(ClientSession clientSession, List pipeline, Class resultClass); + + /** + * Creates a change stream for this collection. + * + * @return the change stream iterable + * @mongodb.driver.dochub core/changestreams Change Streams + * @since 3.6 + */ + ChangeStreamIterable watch(); + + /** + * Creates a change stream for this collection. + * + * @param resultClass the class to decode each document into + * @param the target document type of the iterable. + * @return the change stream iterable + * @mongodb.driver.dochub core/changestreams Change Streams + * @since 3.6 + */ + ChangeStreamIterable watch(Class resultClass); + + /** + * Creates a change stream for this collection. + * + * @param pipeline the aggregation pipeline to apply to the change stream. + * @return the change stream iterable + * @mongodb.driver.dochub core/changestreams Change Streams + * @since 3.6 + */ + ChangeStreamIterable watch(List pipeline); + + /** + * Creates a change stream for this collection. + * + * @param pipeline the aggregation pipeline to apply to the change stream + * @param resultClass the class to decode each document into + * @param the target document type of the iterable. + * @return the change stream iterable + * @mongodb.driver.dochub core/changestreams Change Streams + * @since 3.6 + */ + ChangeStreamIterable watch(List pipeline, Class resultClass); + + /** + * Creates a change stream for this collection. + * + * @param clientSession the client session with which to associate this operation + * @return the change stream iterable + * @since 3.6 + * @mongodb.server.release 3.6 + * @mongodb.driver.dochub core/changestreams Change Streams + */ + ChangeStreamIterable watch(ClientSession clientSession); + + /** + * Creates a change stream for this collection. + * + * @param clientSession the client session with which to associate this operation + * @param resultClass the class to decode each document into + * @param the target document type of the iterable. + * @return the change stream iterable + * @since 3.6 + * @mongodb.server.release 3.6 + * @mongodb.driver.dochub core/changestreams Change Streams + */ + ChangeStreamIterable watch(ClientSession clientSession, Class resultClass); + + /** + * Creates a change stream for this collection. + * + * @param clientSession the client session with which to associate this operation + * @param pipeline the aggregation pipeline to apply to the change stream. + * @return the change stream iterable + * @since 3.6 + * @mongodb.server.release 3.6 + * @mongodb.driver.dochub core/changestreams Change Streams + */ + ChangeStreamIterable watch(ClientSession clientSession, List pipeline); + + /** + * Creates a change stream for this collection. + * + * @param clientSession the client session with which to associate this operation + * @param pipeline the aggregation pipeline to apply to the change stream + * @param resultClass the class to decode each document into + * @param the target document type of the iterable. + * @return the change stream iterable + * @since 3.6 + * @mongodb.server.release 3.6 + * @mongodb.driver.dochub core/changestreams Change Streams + */ + ChangeStreamIterable watch(ClientSession clientSession, List pipeline, Class resultClass); + + /** + * Aggregates documents according to the specified map-reduce function. + * + * @param mapFunction A JavaScript function that associates or "maps" a value with a key and emits the key and value pair. + * @param reduceFunction A JavaScript function that "reduces" to a single object all the values associated with a particular key. + * @return an iterable containing the result of the map-reduce operation + * @mongodb.driver.manual reference/command/mapReduce/ map-reduce + * @deprecated Superseded by aggregate + */ + @Deprecated + MapReduceIterable mapReduce(String mapFunction, String reduceFunction); + + /** + * Aggregates documents according to the specified map-reduce function. + * + * @param mapFunction A JavaScript function that associates or "maps" a value with a key and emits the key and value pair. + * @param reduceFunction A JavaScript function that "reduces" to a single object all the values associated with a particular key. + * @param resultClass the class to decode each resulting document into. + * @param the target document type of the iterable. + * @return an iterable containing the result of the map-reduce operation + * @mongodb.driver.manual reference/command/mapReduce/ map-reduce + * @deprecated Superseded by aggregate + */ + @Deprecated + MapReduceIterable mapReduce(String mapFunction, String reduceFunction, Class resultClass); + + /** + * Aggregates documents according to the specified map-reduce function. + * + * @param clientSession the client session with which to associate this operation + * @param mapFunction A JavaScript function that associates or "maps" a value with a key and emits the key and value pair. + * @param reduceFunction A JavaScript function that "reduces" to a single object all the values associated with a particular key. + * @return an iterable containing the result of the map-reduce operation + * @since 3.6 + * @mongodb.server.release 3.6 + * @mongodb.driver.manual reference/command/mapReduce/ map-reduce + * @deprecated Superseded by aggregate + */ + @Deprecated + MapReduceIterable mapReduce(ClientSession clientSession, String mapFunction, String reduceFunction); + + /** + * Aggregates documents according to the specified map-reduce function. + * + * @param clientSession the client session with which to associate this operation + * @param mapFunction A JavaScript function that associates or "maps" a value with a key and emits the key and value pair. + * @param reduceFunction A JavaScript function that "reduces" to a single object all the values associated with a particular key. + * @param resultClass the class to decode each resulting document into. + * @param the target document type of the iterable. + * @return an iterable containing the result of the map-reduce operation + * @since 3.6 + * @mongodb.server.release 3.6 + * @mongodb.driver.manual reference/command/mapReduce/ map-reduce + * @deprecated Superseded by aggregate + */ + @Deprecated + MapReduceIterable mapReduce(ClientSession clientSession, String mapFunction, String reduceFunction, + Class resultClass); + + /** + * Executes a mix of inserts, updates, replaces, and deletes. + * + *

Note: Supports retryable writes on MongoDB server versions 3.6 or higher when the retryWrites setting is enabled. + * The eligibility for retryable write support for bulk operations is determined on the whole bulk write. If the {@code requests} + * contain any {@code UpdateManyModels} or {@code DeleteManyModels} then the bulk operation will not support retryable writes.

+ * @param requests the writes to execute + * @return the result of the bulk write + * @throws com.mongodb.MongoBulkWriteException if there's an exception in the bulk write operation + * @throws com.mongodb.MongoException if there's an exception running the operation + */ + BulkWriteResult bulkWrite(List> requests); + + /** + * Executes a mix of inserts, updates, replaces, and deletes. + * + *

Note: Supports retryable writes on MongoDB server versions 3.6 or higher when the retryWrites setting is enabled. + * The eligibility for retryable write support for bulk operations is determined on the whole bulk write. If the {@code requests} + * contain any {@code UpdateManyModels} or {@code DeleteManyModels} then the bulk operation will not support retryable writes.

+ * @param requests the writes to execute + * @param options the options to apply to the bulk write operation + * @return the result of the bulk write + * @throws com.mongodb.MongoBulkWriteException if there's an exception in the bulk write operation + * @throws com.mongodb.MongoException if there's an exception running the operation + */ + BulkWriteResult bulkWrite(List> requests, BulkWriteOptions options); + + /** + * Executes a mix of inserts, updates, replaces, and deletes. + * + *

Note: Supports retryable writes on MongoDB server versions 3.6 or higher when the retryWrites setting is enabled. + * The eligibility for retryable write support for bulk operations is determined on the whole bulk write. If the {@code requests} + * contain any {@code UpdateManyModels} or {@code DeleteManyModels} then the bulk operation will not support retryable writes.

+ * @param clientSession the client session with which to associate this operation + * @param requests the writes to execute + * @return the result of the bulk write + * @throws com.mongodb.MongoBulkWriteException if there's an exception in the bulk write operation + * @throws com.mongodb.MongoException if there's an exception running the operation + * @since 3.6 + * @mongodb.server.release 3.6 + */ + BulkWriteResult bulkWrite(ClientSession clientSession, List> requests); + + /** + * Executes a mix of inserts, updates, replaces, and deletes. + * + *

Note: Supports retryable writes on MongoDB server versions 3.6 or higher when the retryWrites setting is enabled. + * The eligibility for retryable write support for bulk operations is determined on the whole bulk write. If the {@code requests} + * contain any {@code UpdateManyModels} or {@code DeleteManyModels} then the bulk operation will not support retryable writes.

+ * @param clientSession the client session with which to associate this operation + * @param requests the writes to execute + * @param options the options to apply to the bulk write operation + * @return the result of the bulk write + * @throws com.mongodb.MongoBulkWriteException if there's an exception in the bulk write operation + * @throws com.mongodb.MongoException if there's an exception running the operation + * @since 3.6 + * @mongodb.server.release 3.6 + */ + BulkWriteResult bulkWrite(ClientSession clientSession, List> requests, + BulkWriteOptions options); + + /** + * Inserts the provided document. If the document is missing an identifier, the driver should generate one. + * + *

Note: Supports retryable writes on MongoDB server versions 3.6 or higher when the retryWrites setting is enabled.

+ * @param document the document to insert + * @return the insert one result + * @throws com.mongodb.MongoWriteException if the write failed due to some specific write exception + * @throws com.mongodb.MongoWriteConcernException if the write failed due to being unable to fulfil the write concern + * @throws com.mongodb.MongoCommandException if the write failed due to a specific command exception + * @throws com.mongodb.MongoException if the write failed due some other failure + */ + InsertOneResult insertOne(TDocument document); + + /** + * Inserts the provided document. If the document is missing an identifier, the driver should generate one. + * + *

Note: Supports retryable writes on MongoDB server versions 3.6 or higher when the retryWrites setting is enabled.

+ * @param document the document to insert + * @param options the options to apply to the operation + * @return the insert one result + * @throws com.mongodb.MongoWriteException if the write failed due to some specific write exception + * @throws com.mongodb.MongoWriteConcernException if the write failed due to being unable to fulfil the write concern + * @throws com.mongodb.MongoCommandException if the write failed due to a specific command exception + * @throws com.mongodb.MongoException if the write failed due some other failure + * @since 3.2 + */ + InsertOneResult insertOne(TDocument document, InsertOneOptions options); + + /** + * Inserts the provided document. If the document is missing an identifier, the driver should generate one. + * + *

Note: Supports retryable writes on MongoDB server versions 3.6 or higher when the retryWrites setting is enabled.

+ * @param clientSession the client session with which to associate this operation + * @param document the document to insert + * @return the insert one result + * @throws com.mongodb.MongoWriteException if the write failed due to some specific write exception + * @throws com.mongodb.MongoWriteConcernException if the write failed due to being unable to fulfil the write concern + * @throws com.mongodb.MongoCommandException if the write failed due to a specific command exception + * @throws com.mongodb.MongoException if the write failed due some other failure + * @since 3.6 + * @mongodb.server.release 3.6 + */ + InsertOneResult insertOne(ClientSession clientSession, TDocument document); + + /** + * Inserts the provided document. If the document is missing an identifier, the driver should generate one. + * + *

Note: Supports retryable writes on MongoDB server versions 3.6 or higher when the retryWrites setting is enabled.

+ * @param clientSession the client session with which to associate this operation + * @param document the document to insert + * @param options the options to apply to the operation + * @return the insert one result + * @throws com.mongodb.MongoWriteException if the write failed due to some specific write exception + * @throws com.mongodb.MongoWriteConcernException if the write failed due to being unable to fulfil the write concern + * @throws com.mongodb.MongoCommandException if the write failed due to a specific command exception + * @throws com.mongodb.MongoException if the write failed due some other failure + * @since 3.6 + * @mongodb.server.release 3.6 + */ + InsertOneResult insertOne(ClientSession clientSession, TDocument document, InsertOneOptions options); + + /** + * Inserts one or more documents. A call to this method is equivalent to a call to the {@code bulkWrite} method + * + *

Note: Supports retryable writes on MongoDB server versions 3.6 or higher when the retryWrites setting is enabled.

+ * @param documents the documents to insert + * @return the insert many result + * @throws com.mongodb.MongoBulkWriteException if there's an exception in the bulk write operation + * @throws com.mongodb.MongoCommandException if the write failed due to a specific command exception + * @throws com.mongodb.MongoException if the write failed due some other failure + * @throws IllegalArgumentException if the documents list is null or empty, or any of the documents in the list are null + * @see com.mongodb.client.MongoCollection#bulkWrite + */ + InsertManyResult insertMany(List documents); + + /** + * Inserts one or more documents. A call to this method is equivalent to a call to the {@code bulkWrite} method + * + *

Note: Supports retryable writes on MongoDB server versions 3.6 or higher when the retryWrites setting is enabled.

+ * @param documents the documents to insert + * @param options the options to apply to the operation + * @return the insert many result + * @throws com.mongodb.MongoBulkWriteException if there's an exception in the bulk write operation + * @throws com.mongodb.MongoCommandException if the write failed due to a specific command exception + * @throws com.mongodb.MongoException if the write failed due some other failure + * @throws IllegalArgumentException if the documents list is null or empty, or any of the documents in the list are null + */ + InsertManyResult insertMany(List documents, InsertManyOptions options); + + /** + * Inserts one or more documents. A call to this method is equivalent to a call to the {@code bulkWrite} method + * + *

Note: Supports retryable writes on MongoDB server versions 3.6 or higher when the retryWrites setting is enabled.

+ * @param clientSession the client session with which to associate this operation + * @param documents the documents to insert + * @return the insert many result + * @throws com.mongodb.MongoBulkWriteException if there's an exception in the bulk write operation + * @throws com.mongodb.MongoCommandException if the write failed due to a specific command exception + * @throws com.mongodb.MongoException if the write failed due some other failure + * @throws IllegalArgumentException if the documents list is null or empty, or any of the documents in the list are null + * @see com.mongodb.client.MongoCollection#bulkWrite + * @since 3.6 + * @mongodb.server.release 3.6 + */ + InsertManyResult insertMany(ClientSession clientSession, List documents); + + /** + * Inserts one or more documents. A call to this method is equivalent to a call to the {@code bulkWrite} method + * + *

Note: Supports retryable writes on MongoDB server versions 3.6 or higher when the retryWrites setting is enabled.

+ * @param clientSession the client session with which to associate this operation + * @param documents the documents to insert + * @param options the options to apply to the operation + * @return the insert many result + * @throws com.mongodb.MongoBulkWriteException if there's an exception in the bulk write operation + * @throws com.mongodb.MongoCommandException if the write failed due to a specific command exception + * @throws com.mongodb.MongoException if the write failed due some other failure + * @throws IllegalArgumentException if the documents list is null or empty, or any of the documents in the list are null + * @since 3.6 + * @mongodb.server.release 3.6 + */ + InsertManyResult insertMany(ClientSession clientSession, List documents, InsertManyOptions options); + + /** + * Removes at most one document from the collection that matches the given filter. If no documents match, the collection is not + * modified. + * + *

Note: Supports retryable writes on MongoDB server versions 3.6 or higher when the retryWrites setting is enabled.

+ * @param filter the query filter to apply the delete operation + * @return the result of the remove one operation + * @throws com.mongodb.MongoWriteException if the write failed due to some specific write exception + * @throws com.mongodb.MongoWriteConcernException if the write failed due to being unable to fulfil the write concern + * @throws com.mongodb.MongoCommandException if the write failed due to a specific command exception + * @throws com.mongodb.MongoException if the write failed due some other failure + */ + DeleteResult deleteOne(Bson filter); + + /** + * Removes at most one document from the collection that matches the given filter. If no documents match, the collection is not + * modified. + * + *

Note: Supports retryable writes on MongoDB server versions 3.6 or higher when the retryWrites setting is enabled.

+ * @param filter the query filter to apply the delete operation + * @param options the options to apply to the delete operation + * @return the result of the remove one operation + * @throws com.mongodb.MongoWriteException if the write failed due to some specific write exception + * @throws com.mongodb.MongoWriteConcernException if the write failed due to being unable to fulfil the write concern + * @throws com.mongodb.MongoCommandException if the write failed due to a specific command exception + * @throws com.mongodb.MongoException if the write failed due some other failure + * @since 3.4 + */ + DeleteResult deleteOne(Bson filter, DeleteOptions options); + + /** + * Removes at most one document from the collection that matches the given filter. If no documents match, the collection is not + * modified. + * + *

Note: Supports retryable writes on MongoDB server versions 3.6 or higher when the retryWrites setting is enabled.

+ * @param clientSession the client session with which to associate this operation + * @param filter the query filter to apply the delete operation + * @return the result of the remove one operation + * @throws com.mongodb.MongoWriteException if the write failed due to some specific write exception + * @throws com.mongodb.MongoWriteConcernException if the write failed due to being unable to fulfil the write concern + * @throws com.mongodb.MongoCommandException if the write failed due to a specific command exception + * @throws com.mongodb.MongoException if the write failed due some other failure + * @since 3.6 + * @mongodb.server.release 3.6 + */ + DeleteResult deleteOne(ClientSession clientSession, Bson filter); + + /** + * Removes at most one document from the collection that matches the given filter. If no documents match, the collection is not + * modified. + * + *

Note: Supports retryable writes on MongoDB server versions 3.6 or higher when the retryWrites setting is enabled.

+ * @param clientSession the client session with which to associate this operation + * @param filter the query filter to apply the delete operation + * @param options the options to apply to the delete operation + * @return the result of the remove one operation + * @throws com.mongodb.MongoWriteException if the write failed due to some specific write exception + * @throws com.mongodb.MongoWriteConcernException if the write failed due to being unable to fulfil the write concern + * @throws com.mongodb.MongoCommandException if the write failed due to a specific command exception + * @throws com.mongodb.MongoException if the write failed due some other failure + * @since 3.6 + * @mongodb.server.release 3.6 + */ + DeleteResult deleteOne(ClientSession clientSession, Bson filter, DeleteOptions options); + + /** + * Removes all documents from the collection that match the given query filter. If no documents match, the collection is not modified. + * + * @param filter the query filter to apply the delete operation + * @return the result of the remove many operation + * @throws com.mongodb.MongoWriteException if the write failed due to some specific write exception + * @throws com.mongodb.MongoWriteConcernException if the write failed due to being unable to fulfil the write concern + * @throws com.mongodb.MongoCommandException if the write failed due to a specific command exception + * @throws com.mongodb.MongoException if the write failed due some other failure + */ + DeleteResult deleteMany(Bson filter); + + /** + * Removes all documents from the collection that match the given query filter. If no documents match, the collection is not modified. + * + * @param filter the query filter to apply the delete operation + * @param options the options to apply to the delete operation + * @return the result of the remove many operation + * @throws com.mongodb.MongoWriteException if the write failed due to some specific write exception + * @throws com.mongodb.MongoWriteConcernException if the write failed due to being unable to fulfil the write concern + * @throws com.mongodb.MongoCommandException if the write failed due to a specific command exception + * @throws com.mongodb.MongoException if the write failed due some other failure + * @since 3.4 + */ + DeleteResult deleteMany(Bson filter, DeleteOptions options); + + /** + * Removes all documents from the collection that match the given query filter. If no documents match, the collection is not modified. + * + * @param clientSession the client session with which to associate this operation + * @param filter the query filter to apply the delete operation + * @return the result of the remove many operation + * @throws com.mongodb.MongoWriteException if the write failed due to some specific write exception + * @throws com.mongodb.MongoWriteConcernException if the write failed due to being unable to fulfil the write concern + * @throws com.mongodb.MongoCommandException if the write failed due to a specific command exception + * @throws com.mongodb.MongoException if the write failed due some other failure + * @since 3.6 + * @mongodb.server.release 3.6 + */ + DeleteResult deleteMany(ClientSession clientSession, Bson filter); + + /** + * Removes all documents from the collection that match the given query filter. If no documents match, the collection is not modified. + * + * @param clientSession the client session with which to associate this operation + * @param filter the query filter to apply the delete operation + * @param options the options to apply to the delete operation + * @return the result of the remove many operation + * @throws com.mongodb.MongoWriteException if the write failed due to some specific write exception + * @throws com.mongodb.MongoWriteConcernException if the write failed due to being unable to fulfil the write concern + * @throws com.mongodb.MongoCommandException if the write failed due to a specific command exception + * @throws com.mongodb.MongoException if the write failed due some other failure + * @since 3.6 + * @mongodb.server.release 3.6 + */ + DeleteResult deleteMany(ClientSession clientSession, Bson filter, DeleteOptions options); + + /** + * Replace a document in the collection according to the specified arguments. + * + *

Use this method to replace a document using the specified replacement argument. To update the document with update operators, use + * the corresponding {@link #updateOne(Bson, Bson)} method.

+ * + *

Note: Supports retryable writes on MongoDB server versions 3.6 or higher when the retryWrites setting is enabled.

+ * @param filter the query filter to apply the replace operation + * @param replacement the replacement document + * @return the result of the replace one operation + * @throws com.mongodb.MongoWriteException if the write failed due to some specific write exception + * @throws com.mongodb.MongoWriteConcernException if the write failed due to being unable to fulfil the write concern + * @throws com.mongodb.MongoCommandException if the write failed due to a specific command exception + * @throws com.mongodb.MongoException if the write failed due some other failure + * @mongodb.driver.manual tutorial/modify-documents/#replace-the-document Replace + * @mongodb.driver.manual reference/command/update Update Command Behaviors + */ + UpdateResult replaceOne(Bson filter, TDocument replacement); + + /** + * Replace a document in the collection according to the specified arguments. + * + *

Use this method to replace a document using the specified replacement argument. To update the document with update operators, use + * the corresponding {@link #updateOne(Bson, Bson, UpdateOptions)} method.

+ * + *

Note: Supports retryable writes on MongoDB server versions 3.6 or higher when the retryWrites setting is enabled.

+ * @param filter the query filter to apply the replace operation + * @param replacement the replacement document + * @param replaceOptions the options to apply to the replace operation + * @return the result of the replace one operation + * @throws com.mongodb.MongoWriteException if the write failed due to some specific write exception + * @throws com.mongodb.MongoWriteConcernException if the write failed due to being unable to fulfil the write concern + * @throws com.mongodb.MongoCommandException if the write failed due to a specific command exception + * @throws com.mongodb.MongoException if the write failed due some other failure + * @mongodb.driver.manual tutorial/modify-documents/#replace-the-document Replace + * @mongodb.driver.manual reference/command/update Update Command Behaviors + * @since 3.7 + */ + UpdateResult replaceOne(Bson filter, TDocument replacement, ReplaceOptions replaceOptions); + + /** + * Replace a document in the collection according to the specified arguments. + * + *

Use this method to replace a document using the specified replacement argument. To update the document with update operators, use + * the corresponding {@link #updateOne(ClientSession, Bson, Bson)} method.

+ * + *

Note: Supports retryable writes on MongoDB server versions 3.6 or higher when the retryWrites setting is enabled.

+ * @param clientSession the client session with which to associate this operation + * @param filter the query filter to apply the replace operation + * @param replacement the replacement document + * @return the result of the replace one operation + * @throws com.mongodb.MongoWriteException if the write failed due to some specific write exception + * @throws com.mongodb.MongoWriteConcernException if the write failed due to being unable to fulfil the write concern + * @throws com.mongodb.MongoCommandException if the write failed due to a specific command exception + * @throws com.mongodb.MongoException if the write failed due some other failure + * @since 3.6 + * @mongodb.server.release 3.6 + * @mongodb.driver.manual tutorial/modify-documents/#replace-the-document Replace + * @mongodb.driver.manual reference/command/update Update Command Behaviors + */ + UpdateResult replaceOne(ClientSession clientSession, Bson filter, TDocument replacement); + + /** + * Replace a document in the collection according to the specified arguments. + * + *

Use this method to replace a document using the specified replacement argument. To update the document with update operators, use + * the corresponding {@link #updateOne(ClientSession, Bson, Bson, UpdateOptions)} method.

+ * + *

Note: Supports retryable writes on MongoDB server versions 3.6 or higher when the retryWrites setting is enabled.

+ * @param clientSession the client session with which to associate this operation + * @param filter the query filter to apply the replace operation + * @param replacement the replacement document + * @param replaceOptions the options to apply to the replace operation + * @return the result of the replace one operation + * @throws com.mongodb.MongoWriteException if the write failed due to some specific write exception + * @throws com.mongodb.MongoWriteConcernException if the write failed due to being unable to fulfil the write concern + * @throws com.mongodb.MongoCommandException if the write failed due to a specific command exception + * @throws com.mongodb.MongoException if the write failed due some other failure + * @since 3.7 + * @mongodb.server.release 3.6 + * @mongodb.driver.manual tutorial/modify-documents/#replace-the-document Replace + * @mongodb.driver.manual reference/command/update Update Command Behaviors + */ + UpdateResult replaceOne(ClientSession clientSession, Bson filter, TDocument replacement, ReplaceOptions replaceOptions); + + /** + * Update a single document in the collection according to the specified arguments. + * + *

Use this method to only update the corresponding fields in the document according to the update operators used in the update + * document. To replace the entire document with a new document, use the corresponding {@link #replaceOne(Bson, Object)} method.

+ * + *

Note: Supports retryable writes on MongoDB server versions 3.6 or higher when the retryWrites setting is enabled.

+ * @param filter a document describing the query filter, which may not be null. + * @param update a document describing the update, which may not be null. The update to apply must include at least one update operator. + * @return the result of the update one operation + * @throws com.mongodb.MongoWriteException if the write failed due to some specific write exception + * @throws com.mongodb.MongoWriteConcernException if the write failed due to being unable to fulfil the write concern + * @throws com.mongodb.MongoCommandException if the write failed due to a specific command exception + * @throws com.mongodb.MongoException if the write failed due some other failure + * @mongodb.driver.manual tutorial/modify-documents/ Updates + * @mongodb.driver.manual reference/operator/update/ Update Operators + * @mongodb.driver.manual reference/command/update Update Command Behaviors + * @see com.mongodb.client.MongoCollection#replaceOne(Bson, Object) + */ + UpdateResult updateOne(Bson filter, Bson update); + + /** + * Update a single document in the collection according to the specified arguments. + * + *

Use this method to only update the corresponding fields in the document according to the update operators used in the update + * document. To replace the entire document with a new document, use the corresponding {@link #replaceOne(Bson, Object, ReplaceOptions)} + * method.

+ * + *

Note: Supports retryable writes on MongoDB server versions 3.6 or higher when the retryWrites setting is enabled.

+ * @param filter a document describing the query filter, which may not be null. + * @param update a document describing the update, which may not be null. The update to apply must include at least one update + * operator. + * @param updateOptions the options to apply to the update operation + * @return the result of the update one operation + * @throws com.mongodb.MongoWriteException if the write failed due to some specific write exception + * @throws com.mongodb.MongoWriteConcernException if the write failed due to being unable to fulfil the write concern + * @throws com.mongodb.MongoCommandException if the write failed due to a specific command exception + * @throws com.mongodb.MongoException if the write failed due some other failure + * @mongodb.driver.manual tutorial/modify-documents/ Updates + * @mongodb.driver.manual reference/operator/update/ Update Operators + * @mongodb.driver.manual reference/command/update Update Command Behaviors + * @see com.mongodb.client.MongoCollection#replaceOne(Bson, Object, ReplaceOptions) + */ + UpdateResult updateOne(Bson filter, Bson update, UpdateOptions updateOptions); + + /** + * Update a single document in the collection according to the specified arguments. + * + *

Use this method to only update the corresponding fields in the document according to the update operators used in the update + * document. To replace the entire document with a new document, use the corresponding {@link #replaceOne(ClientSession, Bson, Object)} + * method.

+ * + *

Note: Supports retryable writes on MongoDB server versions 3.6 or higher when the retryWrites setting is enabled.

+ * @param clientSession the client session with which to associate this operation + * @param filter a document describing the query filter, which may not be null. + * @param update a document describing the update, which may not be null. The update to apply must include at least one update operator. + * @return the result of the update one operation + * @throws com.mongodb.MongoWriteException if the write failed due to some specific write exception + * @throws com.mongodb.MongoWriteConcernException if the write failed due to being unable to fulfil the write concern + * @throws com.mongodb.MongoCommandException if the write failed due to a specific command exception + * @throws com.mongodb.MongoException if the write failed due some other failure + * @since 3.6 + * @mongodb.server.release 3.6 + * @mongodb.driver.manual tutorial/modify-documents/ Updates + * @mongodb.driver.manual reference/operator/update/ Update Operators + * @mongodb.driver.manual reference/command/update Update Command Behaviors + * @see com.mongodb.client.MongoCollection#replaceOne(ClientSession, Bson, Object) + */ + UpdateResult updateOne(ClientSession clientSession, Bson filter, Bson update); + + /** + * Update a single document in the collection according to the specified arguments. + * + *

Use this method to only update the corresponding fields in the document according to the update operators used in the update + * document. To replace the entire document with a new document, use the corresponding {@link #replaceOne(ClientSession, Bson, Object, + * ReplaceOptions)} method.

+ * + *

Note: Supports retryable writes on MongoDB server versions 3.6 or higher when the retryWrites setting is enabled.

+ * @param clientSession the client session with which to associate this operation + * @param filter a document describing the query filter, which may not be null. + * @param update a document describing the update, which may not be null. The update to apply must include at least one update + * operator. + * @param updateOptions the options to apply to the update operation + * @return the result of the update one operation + * @throws com.mongodb.MongoWriteException if the write failed due to some specific write exception + * @throws com.mongodb.MongoWriteConcernException if the write failed due to being unable to fulfil the write concern + * @throws com.mongodb.MongoCommandException if the write failed due to a specific command exception + * @throws com.mongodb.MongoException if the write failed due some other failure + * @since 3.6 + * @mongodb.server.release 3.6 + * @mongodb.driver.manual tutorial/modify-documents/ Updates + * @mongodb.driver.manual reference/operator/update/ Update Operators + * @mongodb.driver.manual reference/command/update Update Command Behaviors + * @see com.mongodb.client.MongoCollection#replaceOne(ClientSession, Bson, Object, ReplaceOptions) + */ + UpdateResult updateOne(ClientSession clientSession, Bson filter, Bson update, UpdateOptions updateOptions); + + /** + * Update a single document in the collection according to the specified arguments. + * + *

Note: Supports retryable writes on MongoDB server versions 3.6 or higher when the retryWrites setting is enabled.

+ * @param filter a document describing the query filter, which may not be null. + * @param update a pipeline describing the update, which may not be null. + * @return the result of the update one operation + * @throws com.mongodb.MongoWriteException if the write failed due some other failure specific to the update command + * @throws com.mongodb.MongoWriteConcernException if the write failed due being unable to fulfil the write concern + * @throws com.mongodb.MongoException if the write failed due some other failure + * @since 3.11 + * @mongodb.server.release 4.2 + * @mongodb.driver.manual tutorial/modify-documents/ Updates + * @mongodb.driver.manual reference/operator/update/ Update Operators + */ + UpdateResult updateOne(Bson filter, List update); + + /** + * Update a single document in the collection according to the specified arguments. + * + *

Note: Supports retryable writes on MongoDB server versions 3.6 or higher when the retryWrites setting is enabled.

+ * @param filter a document describing the query filter, which may not be null. + * @param update a pipeline describing the update, which may not be null. + * @param updateOptions the options to apply to the update operation + * @return the result of the update one operation + * @throws com.mongodb.MongoWriteException if the write failed due some other failure specific to the update command + * @throws com.mongodb.MongoWriteConcernException if the write failed due being unable to fulfil the write concern + * @throws com.mongodb.MongoException if the write failed due some other failure + * @since 3.11 + * @mongodb.server.release 4.2 + * @mongodb.driver.manual tutorial/modify-documents/ Updates + * @mongodb.driver.manual reference/operator/update/ Update Operators + */ + UpdateResult updateOne(Bson filter, List update, UpdateOptions updateOptions); + + /** + * Update a single document in the collection according to the specified arguments. + * + *

Note: Supports retryable writes on MongoDB server versions 3.6 or higher when the retryWrites setting is enabled.

+ * @param clientSession the client session with which to associate this operation + * @param filter a document describing the query filter, which may not be null. + * @param update a pipeline describing the update, which may not be null. + * @return the result of the update one operation + * @throws com.mongodb.MongoWriteException if the write failed due some other failure specific to the update command + * @throws com.mongodb.MongoWriteConcernException if the write failed due being unable to fulfil the write concern + * @throws com.mongodb.MongoException if the write failed due some other failure + * @since 3.11 + * @mongodb.server.release 4.2 + * @mongodb.driver.manual tutorial/modify-documents/ Updates + * @mongodb.driver.manual reference/operator/update/ Update Operators + */ + UpdateResult updateOne(ClientSession clientSession, Bson filter, List update); + + /** + * Update a single document in the collection according to the specified arguments. + * + *

Note: Supports retryable writes on MongoDB server versions 3.6 or higher when the retryWrites setting is enabled.

+ * @param clientSession the client session with which to associate this operation + * @param filter a document describing the query filter, which may not be null. + * @param update a pipeline describing the update, which may not be null. + * @param updateOptions the options to apply to the update operation + * @return the result of the update one operation + * @throws com.mongodb.MongoWriteException if the write failed due some other failure specific to the update command + * @throws com.mongodb.MongoWriteConcernException if the write failed due being unable to fulfil the write concern + * @throws com.mongodb.MongoException if the write failed due some other failure + * @since 3.11 + * @mongodb.server.release 4.2 + * @mongodb.driver.manual tutorial/modify-documents/ Updates + * @mongodb.driver.manual reference/operator/update/ Update Operators + */ + UpdateResult updateOne(ClientSession clientSession, Bson filter, List update, UpdateOptions updateOptions); + + /** + * Update all documents in the collection according to the specified arguments. + * + * @param filter a document describing the query filter, which may not be null. + * @param update a document describing the update, which may not be null. The update to apply must include only update operators. + * @return the result of the update many operation + * @throws com.mongodb.MongoWriteException if the write failed due to some specific write exception + * @throws com.mongodb.MongoWriteConcernException if the write failed due to being unable to fulfil the write concern + * @throws com.mongodb.MongoCommandException if the write failed due to a specific command exception + * @throws com.mongodb.MongoException if the write failed due some other failure + * @mongodb.driver.manual tutorial/modify-documents/ Updates + * @mongodb.driver.manual reference/operator/update/ Update Operators + */ + UpdateResult updateMany(Bson filter, Bson update); + + /** + * Update all documents in the collection according to the specified arguments. + * + * @param filter a document describing the query filter, which may not be null. + * @param update a document describing the update, which may not be null. The update to apply must include only update operators. + * @param updateOptions the options to apply to the update operation + * @return the result of the update many operation + * @throws com.mongodb.MongoWriteException if the write failed due to some specific write exception + * @throws com.mongodb.MongoWriteConcernException if the write failed due to being unable to fulfil the write concern + * @throws com.mongodb.MongoCommandException if the write failed due to a specific command exception + * @throws com.mongodb.MongoException if the write failed due some other failure + * @mongodb.driver.manual tutorial/modify-documents/ Updates + * @mongodb.driver.manual reference/operator/update/ Update Operators + */ + UpdateResult updateMany(Bson filter, Bson update, UpdateOptions updateOptions); + + /** + * Update all documents in the collection according to the specified arguments. + * + * @param clientSession the client session with which to associate this operation + * @param filter a document describing the query filter, which may not be null. + * @param update a document describing the update, which may not be null. The update to apply must include only update operators. + * @return the result of the update many operation + * @throws com.mongodb.MongoWriteException if the write failed due to some specific write exception + * @throws com.mongodb.MongoWriteConcernException if the write failed due to being unable to fulfil the write concern + * @throws com.mongodb.MongoCommandException if the write failed due to a specific command exception + * @throws com.mongodb.MongoException if the write failed due some other failure + * @since 3.6 + * @mongodb.server.release 3.6 + * @mongodb.driver.manual tutorial/modify-documents/ Updates + * @mongodb.driver.manual reference/operator/update/ Update Operators + */ + UpdateResult updateMany(ClientSession clientSession, Bson filter, Bson update); + + /** + * Update all documents in the collection according to the specified arguments. + * + * @param clientSession the client session with which to associate this operation + * @param filter a document describing the query filter, which may not be null. + * @param update a document describing the update, which may not be null. The update to apply must include only update operators. + * @param updateOptions the options to apply to the update operation + * @return the result of the update many operation + * @throws com.mongodb.MongoWriteException if the write failed due to some specific write exception + * @throws com.mongodb.MongoWriteConcernException if the write failed due to being unable to fulfil the write concern + * @throws com.mongodb.MongoCommandException if the write failed due to a specific command exception + * @throws com.mongodb.MongoException if the write failed due some other failure + * @since 3.6 + * @mongodb.server.release 3.6 + * @mongodb.driver.manual tutorial/modify-documents/ Updates + * @mongodb.driver.manual reference/operator/update/ Update Operators + */ + UpdateResult updateMany(ClientSession clientSession, Bson filter, Bson update, UpdateOptions updateOptions); + + /** + * Update all documents in the collection according to the specified arguments. + * + * @param filter a document describing the query filter, which may not be null. + * @param update a pipeline describing the update, which may not be null. + * @return the result of the update many operation + * @throws com.mongodb.MongoWriteException if the write failed due some other failure specific to the update command + * @throws com.mongodb.MongoWriteConcernException if the write failed due being unable to fulfil the write concern + * @throws com.mongodb.MongoException if the write failed due some other failure + * @since 3.11 + * @mongodb.server.release 4.2 + * @mongodb.driver.manual tutorial/modify-documents/ Updates + * @mongodb.driver.manual reference/operator/update/ Update Operators + */ + UpdateResult updateMany(Bson filter, List update); + + /** + * Update all documents in the collection according to the specified arguments. + * + * @param filter a document describing the query filter, which may not be null. + * @param update a pipeline describing the update, which may not be null. + * @param updateOptions the options to apply to the update operation + * @return the result of the update many operation + * @throws com.mongodb.MongoWriteException if the write failed due some other failure specific to the update command + * @throws com.mongodb.MongoWriteConcernException if the write failed due being unable to fulfil the write concern + * @throws com.mongodb.MongoException if the write failed due some other failure + * @since 3.11 + * @mongodb.server.release 4.2 + * @mongodb.driver.manual tutorial/modify-documents/ Updates + * @mongodb.driver.manual reference/operator/update/ Update Operators + */ + UpdateResult updateMany(Bson filter, List update, UpdateOptions updateOptions); + + /** + * Update all documents in the collection according to the specified arguments. + * + * @param clientSession the client session with which to associate this operation + * @param filter a document describing the query filter, which may not be null. + * @param update a pipeline describing the update, which may not be null. + * @return the result of the update many operation + * @throws com.mongodb.MongoWriteException if the write failed due some other failure specific to the update command + * @throws com.mongodb.MongoWriteConcernException if the write failed due being unable to fulfil the write concern + * @throws com.mongodb.MongoException if the write failed due some other failure + * @since 3.11 + * @mongodb.server.release 4.2 + * @mongodb.driver.manual tutorial/modify-documents/ Updates + * @mongodb.driver.manual reference/operator/update/ Update Operators + */ + UpdateResult updateMany(ClientSession clientSession, Bson filter, List update); + + /** + * Update all documents in the collection according to the specified arguments. + * + * @param clientSession the client session with which to associate this operation + * @param filter a document describing the query filter, which may not be null. + * @param update a pipeline describing the update, which may not be null. + * @param updateOptions the options to apply to the update operation + * @return the result of the update many operation + * @throws com.mongodb.MongoWriteException if the write failed due some other failure specific to the update command + * @throws com.mongodb.MongoWriteConcernException if the write failed due being unable to fulfil the write concern + * @throws com.mongodb.MongoException if the write failed due some other failure + * @since 3.11 + * @mongodb.server.release 4.2 + * @mongodb.driver.manual tutorial/modify-documents/ Updates + * @mongodb.driver.manual reference/operator/update/ Update Operators + */ + UpdateResult updateMany(ClientSession clientSession, Bson filter, List update, UpdateOptions updateOptions); + + /** + * Atomically find a document and remove it. + * + *

Note: Supports retryable writes on MongoDB server versions 3.6 or higher when the retryWrites setting is enabled.

+ * @param filter the query filter to find the document with + * @return the document that was removed. If no documents matched the query filter, then null will be returned + */ + @Nullable + TDocument findOneAndDelete(Bson filter); + + /** + * Atomically find a document and remove it. + * + *

Note: Supports retryable writes on MongoDB server versions 3.6 or higher when the retryWrites setting is enabled.

+ * @param filter the query filter to find the document with + * @param options the options to apply to the operation + * @return the document that was removed. If no documents matched the query filter, then null will be returned + */ + @Nullable + TDocument findOneAndDelete(Bson filter, FindOneAndDeleteOptions options); + + /** + * Atomically find a document and remove it. + * + *

Note: Supports retryable writes on MongoDB server versions 3.6 or higher when the retryWrites setting is enabled.

+ * @param clientSession the client session with which to associate this operation + * @param filter the query filter to find the document with + * @return the document that was removed. If no documents matched the query filter, then null will be returned + * @since 3.6 + * @mongodb.server.release 3.6 + */ + @Nullable + TDocument findOneAndDelete(ClientSession clientSession, Bson filter); + + /** + * Atomically find a document and remove it. + * + *

Note: Supports retryable writes on MongoDB server versions 3.6 or higher when the retryWrites setting is enabled.

+ * @param clientSession the client session with which to associate this operation + * @param filter the query filter to find the document with + * @param options the options to apply to the operation + * @return the document that was removed. If no documents matched the query filter, then null will be returned + * @since 3.6 + * @mongodb.server.release 3.6 + */ + @Nullable + TDocument findOneAndDelete(ClientSession clientSession, Bson filter, FindOneAndDeleteOptions options); + + /** + * Atomically find a document and replace it. + * + *

Use this method to replace a document using the specified replacement argument. To update the document with update operators, use + * the corresponding {@link #findOneAndUpdate(Bson, Bson)} method.

+ * + *

Note: Supports retryable writes on MongoDB server versions 3.6 or higher when the retryWrites setting is enabled.

+ * @param filter the query filter to apply the replace operation + * @param replacement the replacement document + * @return the document that was replaced. Depending on the value of the {@code returnOriginal} property, this will either be the + * document as it was before the update or as it is after the update. If no documents matched the query filter, then null will be + * returned + * @mongodb.driver.manual reference/command/update Update Command Behaviors + */ + @Nullable + TDocument findOneAndReplace(Bson filter, TDocument replacement); + + /** + * Atomically find a document and replace it. + * + *

Use this method to replace a document using the specified replacement argument. To update the document with update operators, use + * the corresponding {@link #findOneAndUpdate(Bson, Bson, FindOneAndUpdateOptions)} method.

+ * + *

Note: Supports retryable writes on MongoDB server versions 3.6 or higher when the retryWrites setting is enabled.

+ * @param filter the query filter to apply the replace operation + * @param replacement the replacement document + * @param options the options to apply to the operation + * @return the document that was replaced. Depending on the value of the {@code returnOriginal} property, this will either be the + * document as it was before the update or as it is after the update. If no documents matched the query filter, then null will be + * returned + * @mongodb.driver.manual reference/command/update Update Command Behaviors + */ + @Nullable + TDocument findOneAndReplace(Bson filter, TDocument replacement, FindOneAndReplaceOptions options); + + /** + * Atomically find a document and replace it. + * + *

Use this method to replace a document using the specified replacement argument. To update the document with update operators, use + * the corresponding {@link #findOneAndUpdate(ClientSession, Bson, Bson)} method.

+ * + *

Note: Supports retryable writes on MongoDB server versions 3.6 or higher when the retryWrites setting is enabled.

+ * @param clientSession the client session with which to associate this operation + * @param filter the query filter to apply the replace operation + * @param replacement the replacement document + * @return the document that was replaced. Depending on the value of the {@code returnOriginal} property, this will either be the + * document as it was before the update or as it is after the update. If no documents matched the query filter, then null will be + * returned + * @mongodb.driver.manual reference/command/update Update Command Behaviors + * @since 3.6 + * @mongodb.server.release 3.6 + */ + @Nullable + TDocument findOneAndReplace(ClientSession clientSession, Bson filter, TDocument replacement); + + /** + * Atomically find a document and replace it. + * + *

Use this method to replace a document using the specified replacement argument. To update the document with update operators, use + * the corresponding {@link #findOneAndUpdate(ClientSession, Bson, Bson, FindOneAndUpdateOptions)} method.

+ * + *

Note: Supports retryable writes on MongoDB server versions 3.6 or higher when the retryWrites setting is enabled.

+ * @param clientSession the client session with which to associate this operation + * @param filter the query filter to apply the replace operation + * @param replacement the replacement document + * @param options the options to apply to the operation + * @return the document that was replaced. Depending on the value of the {@code returnOriginal} property, this will either be the + * document as it was before the update or as it is after the update. If no documents matched the query filter, then null will be + * returned + * @mongodb.driver.manual reference/command/update Update Command Behaviors + * @since 3.6 + * @mongodb.server.release 3.6 + */ + @Nullable + TDocument findOneAndReplace(ClientSession clientSession, Bson filter, TDocument replacement, FindOneAndReplaceOptions options); + + /** + * Atomically find a document and update it. + * + *

Use this method to only update the corresponding fields in the document according to the update operators used in the update + * document. To replace the entire document with a new document, use the corresponding {@link #findOneAndReplace(Bson, Object)} method. + *

+ * + *

Note: Supports retryable writes on MongoDB server versions 3.6 or higher when the retryWrites setting is enabled.

+ * @param filter a document describing the query filter, which may not be null. + * @param update a document describing the update, which may not be null. The update to apply must include at least one update operator. + * @return the document that was updated before the update was applied. If no documents matched the query filter, then null will be + * returned + * @mongodb.driver.manual reference/command/update Update Command Behaviors + * @see com.mongodb.client.MongoCollection#findOneAndReplace(Bson, Object) + */ + @Nullable + TDocument findOneAndUpdate(Bson filter, Bson update); + + /** + * Atomically find a document and update it. + * + *

Use this method to only update the corresponding fields in the document according to the update operators used in the update + * document. To replace the entire document with a new document, use the corresponding {@link #findOneAndReplace(Bson, Object, + * FindOneAndReplaceOptions)} method.

+ * + *

Note: Supports retryable writes on MongoDB server versions 3.6 or higher when the retryWrites setting is enabled.

+ * @param filter a document describing the query filter, which may not be null. + * @param update a document describing the update, which may not be null. The update to apply must include at least one update + * operator. + * @param options the options to apply to the operation + * @return the document that was updated. Depending on the value of the {@code returnOriginal} property, this will either be the + * document as it was before the update or as it is after the update. If no documents matched the query filter, then null will be + * returned + * @mongodb.driver.manual reference/command/update Update Command Behaviors + * @see com.mongodb.client.MongoCollection#findOneAndReplace(Bson, Object, FindOneAndReplaceOptions) + */ + @Nullable + TDocument findOneAndUpdate(Bson filter, Bson update, FindOneAndUpdateOptions options); + + /** + * Atomically find a document and update it. + * + *

Use this method to only update the corresponding fields in the document according to the update operators used in the update + * document. To replace the entire document with a new document, use the corresponding {@link #findOneAndReplace(ClientSession, Bson, + * Object)} method.

+ * + *

Note: Supports retryable writes on MongoDB server versions 3.6 or higher when the retryWrites setting is enabled.

+ * @param clientSession the client session with which to associate this operation + * @param filter a document describing the query filter, which may not be null. + * @param update a document describing the update, which may not be null. The update to apply must include at least one update operator. + * @return the document that was updated before the update was applied. If no documents matched the query filter, then null will be + * returned + * @mongodb.driver.manual reference/command/update Update Command Behaviors + * @since 3.6 + * @mongodb.server.release 3.6 + * @see com.mongodb.client.MongoCollection#findOneAndReplace(ClientSession, Bson, Object) + */ + @Nullable + TDocument findOneAndUpdate(ClientSession clientSession, Bson filter, Bson update); + + /** + * Atomically find a document and update it. + * + *

Use this method to only update the corresponding fields in the document according to the update operators used in the update + * document. To replace the entire document with a new document, use the corresponding {@link #findOneAndReplace(ClientSession, Bson, + * Object, FindOneAndReplaceOptions)} method.

+ * + *

Note: Supports retryable writes on MongoDB server versions 3.6 or higher when the retryWrites setting is enabled.

+ * @param clientSession the client session with which to associate this operation + * @param filter a document describing the query filter, which may not be null. + * @param update a document describing the update, which may not be null. The update to apply must include at least one update + * operator. + * @param options the options to apply to the operation + * @return the document that was updated. Depending on the value of the {@code returnOriginal} property, this will either be the + * document as it was before the update or as it is after the update. If no documents matched the query filter, then null will be + * returned + * @mongodb.driver.manual reference/command/update Update Command Behaviors + * @since 3.6 + * @mongodb.server.release 3.6 + * @see com.mongodb.client.MongoCollection#findOneAndReplace(ClientSession, Bson, Object, FindOneAndReplaceOptions) + */ + @Nullable + TDocument findOneAndUpdate(ClientSession clientSession, Bson filter, Bson update, FindOneAndUpdateOptions options); + + /** + * Atomically find a document and update it. + * + *

Note: Supports retryable writes on MongoDB server versions 3.6 or higher when the retryWrites setting is enabled.

+ * @param filter a document describing the query filter, which may not be null. + * @param update a pipeline describing the update, which may not be null. + * @return the document that was updated before the update was applied. If no documents matched the query filter, then null will be + * returned + * @since 3.11 + * @mongodb.server.release 4.2 + */ + @Nullable + TDocument findOneAndUpdate(Bson filter, List update); + + /** + * Atomically find a document and update it. + * + *

Note: Supports retryable writes on MongoDB server versions 3.6 or higher when the retryWrites setting is enabled.

+ * @param filter a document describing the query filter, which may not be null. + * @param update a pipeline describing the update, which may not be null. + * @param options the options to apply to the operation + * @return the document that was updated. Depending on the value of the {@code returnOriginal} property, this will either be the + * document as it was before the update or as it is after the update. If no documents matched the query filter, then null will be + * returned + * @since 3.11 + * @mongodb.server.release 4.2 + */ + @Nullable + TDocument findOneAndUpdate(Bson filter, List update, FindOneAndUpdateOptions options); + + /** + * Atomically find a document and update it. + * + *

Note: Supports retryable writes on MongoDB server versions 3.6 or higher when the retryWrites setting is enabled.

+ * @param clientSession the client session with which to associate this operation + * @param filter a document describing the query filter, which may not be null. + * @param update a pipeline describing the update, which may not be null. + * @return the document that was updated before the update was applied. If no documents matched the query filter, then null will be + * returned + * @since 3.11 + * @mongodb.server.release 4.2 + */ + @Nullable + TDocument findOneAndUpdate(ClientSession clientSession, Bson filter, List update); + + /** + * Atomically find a document and update it. + * + *

Note: Supports retryable writes on MongoDB server versions 3.6 or higher when the retryWrites setting is enabled.

+ * @param clientSession the client session with which to associate this operation + * @param filter a document describing the query filter, which may not be null. + * @param update a pipeline describing the update, which may not be null. + * @param options the options to apply to the operation + * @return the document that was updated. Depending on the value of the {@code returnOriginal} property, this will either be the + * document as it was before the update or as it is after the update. If no documents matched the query filter, then null will be + * returned + * @since 3.11 + * @mongodb.server.release 4.2 + */ + @Nullable + TDocument findOneAndUpdate(ClientSession clientSession, Bson filter, List update, FindOneAndUpdateOptions options); + + /** + * Drops this collection from the Database. + * + * @mongodb.driver.manual reference/command/drop/ Drop Collection + */ + void drop(); + + /** + * Drops this collection from the Database. + * + * @param clientSession the client session with which to associate this operation + * @mongodb.driver.manual reference/command/drop/ Drop Collection + * @since 3.6 + * @mongodb.server.release 3.6 + */ + void drop(ClientSession clientSession); + + /** + * Drops this collection from the Database. + * + * @param dropCollectionOptions various options for dropping the collection + * @mongodb.driver.manual reference/command/drop/ Drop Collection + * @since 4.7 + * @mongodb.server.release 6.0 + */ + void drop(DropCollectionOptions dropCollectionOptions); + + /** + * Drops this collection from the Database. + * + * @param clientSession the client session with which to associate this operation + * @param dropCollectionOptions various options for dropping the collection + * @mongodb.driver.manual reference/command/drop/ Drop Collection + * @since 4.7 + * @mongodb.server.release 6.0 + */ + void drop(ClientSession clientSession, DropCollectionOptions dropCollectionOptions); + + /** + * Create an Atlas Search index for the collection. + * + * @param indexName the name of the search index to create. + * @param definition the search index mapping definition. + * @return the search index name. + * @mongodb.server.release 6.0 + * @mongodb.driver.manual reference/command/createSearchIndexes/ Create Search indexes + * @since 4.11 + */ + String createSearchIndex(String indexName, Bson definition); + + /** + * Create an Atlas Search index with {@code "default"} name for the collection. + * + * @param definition the search index mapping definition. + * @return the search index name. + * @mongodb.server.release 6.0 + * @mongodb.driver.manual reference/command/createSearchIndexes/ Create Search indexes + * @since 4.11 + */ + String createSearchIndex(Bson definition); + + /** + * Create one or more Atlas Search indexes for the collection. + *

+ * The name can be omitted for a single index, in which case a name will be {@code "default"}. + *

+ * + * @param searchIndexModels the search index models. + * @return the search index names in the order specified by the given list of {@link SearchIndexModel}s. + * @mongodb.server.release 6.0 + * @mongodb.driver.manual reference/command/createSearchIndexes/ Create Search indexes + * @since 4.11 + */ + List createSearchIndexes(List searchIndexModels); + + /** + * Update an Atlas Search index in the collection. + * + * @param indexName the name of the search index to update. + * @param definition the search index mapping definition. + * @mongodb.server.release 6.0 + * @mongodb.driver.manual reference/command/updateSearchIndex/ Update Search index + * @since 4.11 + */ + void updateSearchIndex(String indexName, Bson definition); + + /** + * Drop an Atlas Search index given its name. + * + * @param indexName the name of the search index to drop. + * @mongodb.server.release 6.0 + * @mongodb.driver.manual reference/command/dropSearchIndex/ Drop Search index + * @since 4.11 + */ + void dropSearchIndex(String indexName); + + /** + * Get all Atlas Search indexes in this collection. + * + * @return the list search indexes iterable interface. + * @since 4.11 + * @mongodb.server.release 6.0 + */ + ListSearchIndexesIterable listSearchIndexes(); + + /** + * Get all Atlas Search indexes in this collection. + * + * @param resultClass the class to decode each document into. + * @param the target document type of the iterable. + * @return the list search indexes iterable interface. + * @since 4.11 + * @mongodb.server.release 6.0 + */ + ListSearchIndexesIterable listSearchIndexes(Class resultClass); + + /** + * Create an index with the given keys. + * + * @param keys an object describing the index key(s), which may not be null. + * @return the index name + * @mongodb.driver.manual reference/command/createIndexes Create indexes + */ + String createIndex(Bson keys); + + /** + * Create an index with the given keys and options. + * + * @param keys an object describing the index key(s), which may not be null. + * @param indexOptions the options for the index + * @return the index name + * @mongodb.driver.manual reference/command/createIndexes Create indexes + */ + String createIndex(Bson keys, IndexOptions indexOptions); + + /** + * Create an index with the given keys. + * + * @param clientSession the client session with which to associate this operation + * @param keys an object describing the index key(s), which may not be null. + * @return the index name + * @since 3.6 + * @mongodb.server.release 3.6 + * @mongodb.driver.manual reference/command/createIndexes Create indexes + */ + String createIndex(ClientSession clientSession, Bson keys); + + /** + * Create an index with the given keys and options. + * + * @param clientSession the client session with which to associate this operation + * @param keys an object describing the index key(s), which may not be null. + * @param indexOptions the options for the index + * @return the index name + * @since 3.6 + * @mongodb.server.release 3.6 + * @mongodb.driver.manual reference/command/createIndexes Create indexes + */ + String createIndex(ClientSession clientSession, Bson keys, IndexOptions indexOptions); + + /** + * Create multiple indexes. + * + * @param indexes the list of indexes + * @return the list of index names + * @mongodb.driver.manual reference/command/createIndexes Create indexes + */ + List createIndexes(List indexes); + + /** + * Create multiple indexes. + * + * @param indexes the list of indexes + * @param createIndexOptions options to use when creating indexes + * @return the list of index names + * @mongodb.driver.manual reference/command/createIndexes Create indexes + * @since 3.6 + */ + List createIndexes(List indexes, CreateIndexOptions createIndexOptions); + + /** + * Create multiple indexes. + * + * @param clientSession the client session with which to associate this operation + * @param indexes the list of indexes + * @return the list of index names + * @since 3.6 + * @mongodb.server.release 3.6 + * @mongodb.driver.manual reference/command/createIndexes Create indexes + */ + List createIndexes(ClientSession clientSession, List indexes); + + /** + * Create multiple indexes. + * + * @param clientSession the client session with which to associate this operation + * @param indexes the list of indexes + * @param createIndexOptions options to use when creating indexes + * @return the list of index names + * @since 3.6 + * @mongodb.server.release 3.6 + * @mongodb.driver.manual reference/command/createIndexes Create indexes + */ + List createIndexes(ClientSession clientSession, List indexes, CreateIndexOptions createIndexOptions); + + /** + * Get all the indexes in this collection. + * + * @return the list indexes iterable interface + * @mongodb.driver.manual reference/command/listIndexes/ List indexes + */ + ListIndexesIterable listIndexes(); + + /** + * Get all the indexes in this collection. + * + * @param resultClass the class to decode each document into + * @param the target document type of the iterable. + * @return the list indexes iterable interface + * @mongodb.driver.manual reference/command/listIndexes/ List indexes + */ + ListIndexesIterable listIndexes(Class resultClass); + + /** + * Get all the indexes in this collection. + * + * @param clientSession the client session with which to associate this operation + * @return the list indexes iterable interface + * @since 3.6 + * @mongodb.server.release 3.6 + * @mongodb.driver.manual reference/command/listIndexes/ List indexes + */ + ListIndexesIterable listIndexes(ClientSession clientSession); + + /** + * Get all the indexes in this collection. + * + * @param clientSession the client session with which to associate this operation + * @param resultClass the class to decode each document into + * @param the target document type of the iterable. + * @return the list indexes iterable interface + * @since 3.6 + * @mongodb.server.release 3.6 + * @mongodb.driver.manual reference/command/listIndexes/ List indexes + */ + ListIndexesIterable listIndexes(ClientSession clientSession, Class resultClass); + + /** + * Drops the index given its name. + * + * @param indexName the name of the index to remove + * @mongodb.driver.manual reference/command/dropIndexes/ Drop indexes + */ + void dropIndex(String indexName); + + /** + * Drops the index given its name. + * + * @param indexName the name of the index to remove + * @param dropIndexOptions the options to use when dropping indexes + * @mongodb.driver.manual reference/command/dropIndexes/ Drop indexes + * @since 3.6 + */ + void dropIndex(String indexName, DropIndexOptions dropIndexOptions); + + /** + * Drops the index given the keys used to create it. + * + * @param keys the keys of the index to remove + * @mongodb.driver.manual reference/command/dropIndexes/ Drop indexes + */ + void dropIndex(Bson keys); + + /** + * Drops the index given the keys used to create it. + * + * @param keys the keys of the index to remove + * @param dropIndexOptions the options to use when dropping indexes + * @mongodb.driver.manual reference/command/dropIndexes/ Drop indexes + * @since 3.6 + */ + void dropIndex(Bson keys, DropIndexOptions dropIndexOptions); + + /** + * Drops the index given its name. + * + * @param clientSession the client session with which to associate this operation + * @param indexName the name of the index to remove + * @since 3.6 + * @mongodb.server.release 3.6 + * @mongodb.driver.manual reference/command/dropIndexes/ Drop indexes + */ + void dropIndex(ClientSession clientSession, String indexName); + + /** + * Drops the index given the keys used to create it. + * + * @param clientSession the client session with which to associate this operation + * @param keys the keys of the index to remove + * @since 3.6 + * @mongodb.server.release 3.6 + * @mongodb.driver.manual reference/command/dropIndexes/ Drop indexes + */ + void dropIndex(ClientSession clientSession, Bson keys); + + /** + * Drops the index given its name. + * + * @param clientSession the client session with which to associate this operation + * @param indexName the name of the index to remove + * @param dropIndexOptions the options to use when dropping indexes + * @since 3.6 + * @mongodb.server.release 3.6 + * @mongodb.driver.manual reference/command/dropIndexes/ Drop indexes + */ + void dropIndex(ClientSession clientSession, String indexName, DropIndexOptions dropIndexOptions); + + /** + * Drops the index given the keys used to create it. + * + * @param clientSession the client session with which to associate this operation + * @param keys the keys of the index to remove + * @param dropIndexOptions the options to use when dropping indexes + * @since 3.6 + * @mongodb.server.release 3.6 + * @mongodb.driver.manual reference/command/dropIndexes/ Drop indexes + */ + void dropIndex(ClientSession clientSession, Bson keys, DropIndexOptions dropIndexOptions); + + /** + * Drop all the indexes on this collection, except for the default on _id. + * + * @mongodb.driver.manual reference/command/dropIndexes/ Drop indexes + */ + void dropIndexes(); + + /** + * Drop all the indexes on this collection, except for the default on _id. + * + * @param clientSession the client session with which to associate this operation + * @since 3.6 + * @mongodb.server.release 3.6 + * @mongodb.driver.manual reference/command/dropIndexes/ Drop indexes + */ + void dropIndexes(ClientSession clientSession); + + /** + * Drop all the indexes on this collection, except for the default on _id. + * + * @param dropIndexOptions the options to use when dropping indexes + * @mongodb.driver.manual reference/command/dropIndexes/ Drop indexes + * @since 3.6 + */ + void dropIndexes(DropIndexOptions dropIndexOptions); + + /** + * Drop all the indexes on this collection, except for the default on _id. + * + * @param clientSession the client session with which to associate this operation + * @param dropIndexOptions the options to use when dropping indexes + * @since 3.6 + * @mongodb.server.release 3.6 + * @mongodb.driver.manual reference/command/dropIndexes/ Drop indexes + */ + void dropIndexes(ClientSession clientSession, DropIndexOptions dropIndexOptions); + + /** + * Rename the collection with oldCollectionName to the newCollectionName. + * + * @param newCollectionNamespace the namespace the collection will be renamed to + * @throws com.mongodb.MongoServerException if you provide a newCollectionName that is the name of an existing collection, or if the + * oldCollectionName is the name of a collection that doesn't exist + * @mongodb.driver.manual reference/command/renameCollection Rename collection + */ + void renameCollection(MongoNamespace newCollectionNamespace); + + /** + * Rename the collection with oldCollectionName to the newCollectionName. + * + * @param newCollectionNamespace the name the collection will be renamed to + * @param renameCollectionOptions the options for renaming a collection + * @throws com.mongodb.MongoServerException if you provide a newCollectionName that is the name of an existing collection and dropTarget + * is false, or if the oldCollectionName is the name of a collection that doesn't exist + * @mongodb.driver.manual reference/command/renameCollection Rename collection + */ + void renameCollection(MongoNamespace newCollectionNamespace, RenameCollectionOptions renameCollectionOptions); + + /** + * Rename the collection with oldCollectionName to the newCollectionName. + * + * @param clientSession the client session with which to associate this operation + * @param newCollectionNamespace the namespace the collection will be renamed to + * @throws com.mongodb.MongoServerException if you provide a newCollectionName that is the name of an existing collection, or if the + * oldCollectionName is the name of a collection that doesn't exist + * @since 3.6 + * @mongodb.server.release 3.6 + * @mongodb.driver.manual reference/command/renameCollection Rename collection + */ + void renameCollection(ClientSession clientSession, MongoNamespace newCollectionNamespace); + + /** + * Rename the collection with oldCollectionName to the newCollectionName. + * + * @param clientSession the client session with which to associate this operation + * @param newCollectionNamespace the name the collection will be renamed to + * @param renameCollectionOptions the options for renaming a collection + * @throws com.mongodb.MongoServerException if you provide a newCollectionName that is the name of an existing collection and dropTarget + * is false, or if the oldCollectionName is the name of a collection that doesn't exist + * @since 3.6 + * @mongodb.server.release 3.6 + * @mongodb.driver.manual reference/command/renameCollection Rename collection + */ + void renameCollection(ClientSession clientSession, MongoNamespace newCollectionNamespace, + RenameCollectionOptions renameCollectionOptions); +} diff --git a/driver-sync/src/main/com/mongodb/client/MongoCursor.java b/driver-sync/src/main/com/mongodb/client/MongoCursor.java new file mode 100644 index 00000000000..f0664d4633a --- /dev/null +++ b/driver-sync/src/main/com/mongodb/client/MongoCursor.java @@ -0,0 +1,109 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client; + +import com.mongodb.ServerAddress; +import com.mongodb.ServerCursor; +import com.mongodb.annotations.NotThreadSafe; +import com.mongodb.lang.Nullable; + +import java.io.Closeable; +import java.util.Iterator; +import java.util.function.Consumer; + +/** + * The Mongo Cursor interface implementing the iterator protocol. + *

+ * An application should ensure that a cursor is closed in all circumstances, e.g. using a try-with-resources statement: + * + *

{@code
+ * try (MongoCursor cursor = collection.find().cursor()) {
+ *     while (cursor.hasNext()) {
+ *         System.out.println(cursor.next());
+ *     }
+ * }
+ * }
+ * + * @since 3.0 + * @param The type of documents the cursor contains + */ +@NotThreadSafe +public interface MongoCursor extends Iterator, Closeable { + /** + * Despite this interface being {@linkplain NotThreadSafe non-thread-safe}, + * {@link #close()} is allowed to be called concurrently with any method of the cursor, including itself. + * This is useful to cancel blocked {@link #hasNext()}, {@link #next()}. + * This method is idempotent. + */ + @Override + void close(); + + @Override + boolean hasNext(); + + @Override + TResult next(); + + /** + * Gets the number of results available locally without blocking, which may be 0. + * + *

+ * If the cursor is known to be exhausted, returns 0. If the cursor is closed before it's been exhausted, it may return a non-zero + * value. + *

+ * + * @return the number of results available locally without blocking + * @since 4.4 + */ + int available(); + + /** + * A special {@code next()} case that returns the next element in the iteration if available or null. + * + *

Tailable cursors are an example where this is useful. A call to {@code tryNext()} may return null, but in the future calling + * {@code tryNext()} would return a new element if a document had been added to the capped collection.

+ * + * @return the next element in the iteration if available or null. + * @mongodb.driver.manual reference/glossary/#term-tailable-cursor Tailable Cursor + */ + @Nullable + TResult tryNext(); + + /** + * Returns the server cursor, which can be null if the no cursor was created or if the cursor has been exhausted or killed. + * + * @return the ServerCursor, which can be null. + */ + @Nullable + ServerCursor getServerCursor(); + + /** + * Returns the server address + * + * @return ServerAddress + */ + ServerAddress getServerAddress(); + + @Override + default void forEachRemaining(final Consumer action) { + try { + Iterator.super.forEachRemaining(action); + } finally { + close(); + } + } +} diff --git a/driver-sync/src/main/com/mongodb/client/MongoDatabase.java b/driver-sync/src/main/com/mongodb/client/MongoDatabase.java new file mode 100644 index 00000000000..1e84a91005a --- /dev/null +++ b/driver-sync/src/main/com/mongodb/client/MongoDatabase.java @@ -0,0 +1,626 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client; + +import com.mongodb.ReadConcern; +import com.mongodb.ReadPreference; +import com.mongodb.WriteConcern; +import com.mongodb.annotations.Alpha; +import com.mongodb.annotations.Reason; +import com.mongodb.annotations.ThreadSafe; +import com.mongodb.client.model.CreateCollectionOptions; +import com.mongodb.client.model.CreateViewOptions; +import com.mongodb.lang.Nullable; +import org.bson.Document; +import org.bson.codecs.configuration.CodecRegistry; +import org.bson.conversions.Bson; + +import java.util.List; +import java.util.concurrent.TimeUnit; + +/** + * The MongoDatabase interface. + * + *

Note: Additions to this interface will not be considered to break binary compatibility.

+ * + * @since 3.0 + */ +@ThreadSafe +public interface MongoDatabase { + + /** + * Gets the name of the database. + * + * @return the database name + */ + String getName(); + + /** + * Get the codec registry for the MongoDatabase. + * + * @return the {@link org.bson.codecs.configuration.CodecRegistry} + */ + CodecRegistry getCodecRegistry(); + + /** + * Get the read preference for the MongoDatabase. + * + * @return the {@link com.mongodb.ReadPreference} + */ + ReadPreference getReadPreference(); + + /** + * Get the write concern for the MongoDatabase. + * + * @return the {@link com.mongodb.WriteConcern} + */ + WriteConcern getWriteConcern(); + + /** + * Get the read concern for the MongoDatabase. + * + * @return the {@link com.mongodb.ReadConcern} + * @since 3.2 + * @mongodb.server.release 3.2 + * @mongodb.driver.manual reference/readConcern/ Read Concern + */ + ReadConcern getReadConcern(); + + /** + * The time limit for the full execution of an operation. + * + *

If not null the following deprecated options will be ignored: + * {@code waitQueueTimeoutMS}, {@code socketTimeoutMS}, {@code wTimeoutMS}, {@code maxTimeMS} and {@code maxCommitTimeMS}

+ * + *
    + *
  • {@code null} means that the timeout mechanism for operations will defer to using: + *
      + *
    • {@code waitQueueTimeoutMS}: The maximum wait time in milliseconds that a thread may wait for a connection to become + * available
    • + *
    • {@code socketTimeoutMS}: How long a send or receive on a socket can take before timing out.
    • + *
    • {@code wTimeoutMS}: How long the server will wait for the write concern to be fulfilled before timing out.
    • + *
    • {@code maxTimeMS}: The cumulative time limit for processing operations on a cursor. + * See: cursor.maxTimeMS.
    • + *
    • {@code maxCommitTimeMS}: The maximum amount of time to allow a single {@code commitTransaction} command to execute. + * See: {@link com.mongodb.TransactionOptions#getMaxCommitTime}.
    • + *
    + *
  • + *
  • {@code 0} means infinite timeout.
  • + *
  • {@code > 0} The time limit to use for the full execution of an operation.
  • + *
+ * + * @param timeUnit the time unit + * @return the timeout in the given time unit + * @since 5.2 + */ + @Alpha(Reason.CLIENT) + @Nullable + Long getTimeout(TimeUnit timeUnit); + + /** + * Create a new MongoDatabase instance with a different codec registry. + * + *

The {@link CodecRegistry} configured by this method is effectively treated by the driver as an instance of + * {@link org.bson.codecs.configuration.CodecProvider}, which {@link CodecRegistry} extends. So there is no benefit to defining + * a class that implements {@link CodecRegistry}. Rather, an application should always create {@link CodecRegistry} instances + * using the factory methods in {@link org.bson.codecs.configuration.CodecRegistries}.

+ * + * @param codecRegistry the new {@link org.bson.codecs.configuration.CodecRegistry} for the database + * @return a new MongoDatabase instance with the different codec registry + * @see org.bson.codecs.configuration.CodecRegistries + */ + MongoDatabase withCodecRegistry(CodecRegistry codecRegistry); + + /** + * Create a new MongoDatabase instance with a different read preference. + * + * @param readPreference the new {@link ReadPreference} for the database + * @return a new MongoDatabase instance with the different readPreference + */ + MongoDatabase withReadPreference(ReadPreference readPreference); + + /** + * Create a new MongoDatabase instance with a different write concern. + * + * @param writeConcern the new {@link WriteConcern} for the database + * @return a new MongoDatabase instance with the different writeConcern + */ + MongoDatabase withWriteConcern(WriteConcern writeConcern); + + /** + * Create a new MongoDatabase instance with a different read concern. + * + * @param readConcern the new {@link ReadConcern} for the database + * @return a new MongoDatabase instance with the different ReadConcern + * @since 3.2 + * @mongodb.server.release 3.2 + * @mongodb.driver.manual reference/readConcern/ Read Concern + */ + MongoDatabase withReadConcern(ReadConcern readConcern); + + /** + * Create a new MongoDatabase instance with the set time limit for the full execution of an operation. + * + *
    + *
  • {@code 0} means infinite timeout.
  • + *
  • {@code > 0} The time limit to use for the full execution of an operation.
  • + *
+ * + * @param timeout the timeout, which must be greater than or equal to 0 + * @param timeUnit the time unit + * @return a new MongoDatabase instance with the set time limit for the full execution of an operation. + * @since 5.2 + * @see #getTimeout + */ + @Alpha(Reason.CLIENT) + MongoDatabase withTimeout(long timeout, TimeUnit timeUnit); + + /** + * Gets a collection. + * + * @param collectionName the name of the collection to return + * @return the collection + * @throws IllegalArgumentException if collectionName is invalid + * @see com.mongodb.MongoNamespace#checkCollectionNameValidity(String) + */ + MongoCollection getCollection(String collectionName); + + /** + * Gets a collection, with a specific default document class. + * + * @param collectionName the name of the collection to return + * @param documentClass the default class to cast any documents returned from the database into. + * @param the type of the class to use instead of {@code Document}. + * @return the collection + */ + MongoCollection getCollection(String collectionName, Class documentClass); + + /** + * Executes the given command in the context of the current database with a read preference of {@link ReadPreference#primary()}. + * + *

Note: The behavior of {@code runCommand} is undefined if the provided command document includes a {@code maxTimeMS} field and the + * {@code timeoutMS} setting has been set.

+ * + * @param command the command to be run + * @return the command result + */ + Document runCommand(Bson command); + + /** + * Executes the given command in the context of the current database with the given read preference. + * + *

Note: The behavior of {@code runCommand} is undefined if the provided command document includes a {@code maxTimeMS} field and the + * {@code timeoutMS} setting has been set.

+ * + * @param command the command to be run + * @param readPreference the {@link ReadPreference} to be used when executing the command + * @return the command result + */ + Document runCommand(Bson command, ReadPreference readPreference); + + /** + * Executes the given command in the context of the current database with a read preference of {@link ReadPreference#primary()}. + * + *

Note: The behavior of {@code runCommand} is undefined if the provided command document includes a {@code maxTimeMS} field and the + * {@code timeoutMS} setting has been set.

+ * + * @param command the command to be run + * @param resultClass the class to decode each document into + * @param the type of the class to use instead of {@code Document}. + * @return the command result + */ + TResult runCommand(Bson command, Class resultClass); + + /** + * Executes the given command in the context of the current database with the given read preference. + * + *

Note: The behavior of {@code runCommand} is undefined if the provided command document includes a {@code maxTimeMS} field and the + * {@code timeoutMS} setting has been set.

+ * + * @param command the command to be run + * @param readPreference the {@link ReadPreference} to be used when executing the command + * @param resultClass the class to decode each document into + * @param the type of the class to use instead of {@code Document}. + * @return the command result + */ + TResult runCommand(Bson command, ReadPreference readPreference, Class resultClass); + + /** + * Executes the given command in the context of the current database with a read preference of {@link ReadPreference#primary()}. + * + *

Note: The behavior of {@code runCommand} is undefined if the provided command document includes a {@code maxTimeMS} field and the + * {@code timeoutMS} setting has been set.

+ * + * @param clientSession the client session with which to associate this operation + * @param command the command to be run + * @return the command result + * @since 3.6 + * @mongodb.server.release 3.6 + */ + Document runCommand(ClientSession clientSession, Bson command); + + /** + * Executes the given command in the context of the current database with the given read preference. + * + *

Note: The behavior of {@code runCommand} is undefined if the provided command document includes a {@code maxTimeMS} field and the + * {@code timeoutMS} setting has been set.

+ * + * @param clientSession the client session with which to associate this operation + * @param command the command to be run + * @param readPreference the {@link ReadPreference} to be used when executing the command + * @return the command result + * @since 3.6 + * @mongodb.server.release 3.6 + */ + Document runCommand(ClientSession clientSession, Bson command, ReadPreference readPreference); + + /** + * Executes the given command in the context of the current database with a read preference of {@link ReadPreference#primary()}. + * + *

Note: The behavior of {@code runCommand} is undefined if the provided command document includes a {@code maxTimeMS} field and the + * {@code timeoutMS} setting has been set.

+ * + * @param clientSession the client session with which to associate this operation + * @param command the command to be run + * @param resultClass the class to decode each document into + * @param the type of the class to use instead of {@code Document}. + * @return the command result + * @since 3.6 + * @mongodb.server.release 3.6 + */ + TResult runCommand(ClientSession clientSession, Bson command, Class resultClass); + + /** + * Executes the given command in the context of the current database with the given read preference. + * + *

Note: The behavior of {@code runCommand} is undefined if the provided command document includes a {@code maxTimeMS} field and the + * {@code timeoutMS} setting has been set.

+ * + * @param clientSession the client session with which to associate this operation + * @param command the command to be run + * @param readPreference the {@link ReadPreference} to be used when executing the command + * @param resultClass the class to decode each document into + * @param the type of the class to use instead of {@code Document}. + * @return the command result + * @since 3.6 + * @mongodb.server.release 3.6 + */ + TResult runCommand(ClientSession clientSession, Bson command, ReadPreference readPreference, Class resultClass); + + /** + * Drops this database. + * + * @mongodb.driver.manual reference/command/dropDatabase/#dbcmd.dropDatabase Drop database + */ + void drop(); + + /** + * Drops this database. + * + * @param clientSession the client session with which to associate this operation + * @since 3.6 + * @mongodb.server.release 3.6 + * @mongodb.driver.manual reference/command/dropDatabase/#dbcmd.dropDatabase Drop database + */ + void drop(ClientSession clientSession); + + /** + * Gets the names of all the collections in this database. + * + * @return an iterable containing all the names of all the collections in this database + * @mongodb.driver.manual reference/command/listCollections listCollections + */ + ListCollectionNamesIterable listCollectionNames(); + + /** + * Finds all the collections in this database. + * + * @return the list collections iterable interface + * @mongodb.driver.manual reference/command/listCollections listCollections + */ + ListCollectionsIterable listCollections(); + + /** + * Finds all the collections in this database. + * + * @param resultClass the class to decode each document into + * @param the target document type of the iterable. + * @return the list collections iterable interface + * @mongodb.driver.manual reference/command/listCollections listCollections + */ + ListCollectionsIterable listCollections(Class resultClass); + + /** + * Gets the names of all the collections in this database. + * + * @param clientSession the client session with which to associate this operation + * @return an iterable containing all the names of all the collections in this database + * @since 3.6 + * @mongodb.server.release 3.6 + * @mongodb.driver.manual reference/command/listCollections listCollections + */ + ListCollectionNamesIterable listCollectionNames(ClientSession clientSession); + + /** + * Finds all the collections in this database. + * + * @param clientSession the client session with which to associate this operation + * @return the list collections iterable interface + * @since 3.6 + * @mongodb.server.release 3.6 + * @mongodb.driver.manual reference/command/listCollections listCollections + */ + ListCollectionsIterable listCollections(ClientSession clientSession); + + /** + * Finds all the collections in this database. + * + * @param clientSession the client session with which to associate this operation + * @param resultClass the class to decode each document into + * @param the target document type of the iterable. + * @return the list collections iterable interface + * @since 3.6 + * @mongodb.server.release 3.6 + * @mongodb.driver.manual reference/command/listCollections listCollections + */ + ListCollectionsIterable listCollections(ClientSession clientSession, Class resultClass); + + + /** + * Create a new collection with the given name. + * + * @param collectionName the name for the new collection to create + * @mongodb.driver.manual reference/command/create Create Command + */ + void createCollection(String collectionName); + + /** + * Create a new collection with the selected options + * + * @param collectionName the name for the new collection to create + * @param createCollectionOptions various options for creating the collection + * @mongodb.driver.manual reference/command/create Create Command + */ + void createCollection(String collectionName, CreateCollectionOptions createCollectionOptions); + + /** + * Create a new collection with the given name. + * + * @param clientSession the client session with which to associate this operation + * @param collectionName the name for the new collection to create + * @since 3.6 + * @mongodb.server.release 3.6 + * @mongodb.driver.manual reference/command/create Create Command + */ + void createCollection(ClientSession clientSession, String collectionName); + + /** + * Create a new collection with the selected options + * + * @param clientSession the client session with which to associate this operation + * @param collectionName the name for the new collection to create + * @param createCollectionOptions various options for creating the collection + * @since 3.6 + * @mongodb.server.release 3.6 + * @mongodb.driver.manual reference/command/create Create Command + */ + void createCollection(ClientSession clientSession, String collectionName, CreateCollectionOptions createCollectionOptions); + + /** + * Creates a view with the given name, backing collection/view name, and aggregation pipeline that defines the view. + * + * @param viewName the name of the view to create + * @param viewOn the backing collection/view for the view + * @param pipeline the pipeline that defines the view + * @since 3.4 + * @mongodb.server.release 3.4 + * @mongodb.driver.manual reference/command/create Create Command + */ + void createView(String viewName, String viewOn, List pipeline); + + /** + * Creates a view with the given name, backing collection/view name, aggregation pipeline, and options that defines the view. + * + * @param viewName the name of the view to create + * @param viewOn the backing collection/view for the view + * @param pipeline the pipeline that defines the view + * @param createViewOptions various options for creating the view + * @since 3.4 + * @mongodb.server.release 3.4 + * @mongodb.driver.manual reference/command/create Create Command + */ + void createView(String viewName, String viewOn, List pipeline, CreateViewOptions createViewOptions); + + /** + * Creates a view with the given name, backing collection/view name, and aggregation pipeline that defines the view. + * + * @param clientSession the client session with which to associate this operation + * @param viewName the name of the view to create + * @param viewOn the backing collection/view for the view + * @param pipeline the pipeline that defines the view + * @since 3.6 + * @mongodb.server.release 3.6 + * @mongodb.driver.manual reference/command/create Create Command + */ + void createView(ClientSession clientSession, String viewName, String viewOn, List pipeline); + + /** + * Creates a view with the given name, backing collection/view name, aggregation pipeline, and options that defines the view. + * + * @param clientSession the client session with which to associate this operation + * @param viewName the name of the view to create + * @param viewOn the backing collection/view for the view + * @param pipeline the pipeline that defines the view + * @param createViewOptions various options for creating the view + * @since 3.6 + * @mongodb.server.release 3.6 + * @mongodb.driver.manual reference/command/create Create Command + */ + void createView(ClientSession clientSession, String viewName, String viewOn, List pipeline, + CreateViewOptions createViewOptions); + + /** + * Creates a change stream for this database. + * + * @return the change stream iterable + * @mongodb.driver.dochub core/changestreams Change Streams + * @since 3.8 + * @mongodb.server.release 4.0 + */ + ChangeStreamIterable watch(); + + /** + * Creates a change stream for this database. + * + * @param resultClass the class to decode each document into + * @param the target document type of the iterable. + * @return the change stream iterable + * @mongodb.driver.dochub core/changestreams Change Streams + * @since 3.8 + * @mongodb.server.release 4.0 + */ + ChangeStreamIterable watch(Class resultClass); + + /** + * Creates a change stream for this database. + * + * @param pipeline the aggregation pipeline to apply to the change stream. + * @return the change stream iterable + * @mongodb.driver.dochub core/changestreams Change Streams + * @since 3.8 + * @mongodb.server.release 4.0 + */ + ChangeStreamIterable watch(List pipeline); + + /** + * Creates a change stream for this database. + * + * @param pipeline the aggregation pipeline to apply to the change stream + * @param resultClass the class to decode each document into + * @param the target document type of the iterable. + * @return the change stream iterable + * @mongodb.driver.dochub core/changestreams Change Streams + * @since 3.8 + * @mongodb.server.release 4.0 + */ + ChangeStreamIterable watch(List pipeline, Class resultClass); + + /** + * Creates a change stream for this database. + * + * @param clientSession the client session with which to associate this operation + * @return the change stream iterable + * @since 3.8 + * @mongodb.server.release 4.0 + * @mongodb.driver.dochub core/changestreams Change Streams + */ + ChangeStreamIterable watch(ClientSession clientSession); + + /** + * Creates a change stream for this database. + * + * @param clientSession the client session with which to associate this operation + * @param resultClass the class to decode each document into + * @param the target document type of the iterable. + * @return the change stream iterable + * @since 3.8 + * @mongodb.server.release 4.0 + * @mongodb.driver.dochub core/changestreams Change Streams + */ + ChangeStreamIterable watch(ClientSession clientSession, Class resultClass); + + /** + * Creates a change stream for this database. + * + * @param clientSession the client session with which to associate this operation + * @param pipeline the aggregation pipeline to apply to the change stream. + * @return the change stream iterable + * @since 3.8 + * @mongodb.server.release 4.0 + * @mongodb.driver.dochub core/changestreams Change Streams + */ + ChangeStreamIterable watch(ClientSession clientSession, List pipeline); + + /** + * Creates a change stream for this database. + * + * @param clientSession the client session with which to associate this operation + * @param pipeline the aggregation pipeline to apply to the change stream + * @param resultClass the class to decode each document into + * @param the target document type of the iterable. + * @return the change stream iterable + * @since 3.8 + * @mongodb.server.release 4.0 + * @mongodb.driver.dochub core/changestreams Change Streams + */ + ChangeStreamIterable watch(ClientSession clientSession, List pipeline, Class resultClass); + + /** + * Runs an aggregation framework pipeline on the database for pipeline stages + * that do not require an underlying collection, such as {@code $currentOp} and {@code $listLocalSessions}. + * + * @param pipeline the aggregation pipeline + * @return an iterable containing the result of the aggregation operation + * @since 3.10 + * @mongodb.driver.manual reference/command/aggregate/#dbcmd.aggregate Aggregate Command + * @mongodb.server.release 3.6 + */ + AggregateIterable aggregate(List pipeline); + + /** + * Runs an aggregation framework pipeline on the database for pipeline stages + * that do not require an underlying collection, such as {@code $currentOp} and {@code $listLocalSessions}. + * + * @param pipeline the aggregation pipeline + * @param resultClass the class to decode each document into + * @param the target document type of the iterable. + * @return an iterable containing the result of the aggregation operation + * @since 3.10 + * @mongodb.driver.manual reference/command/aggregate/#dbcmd.aggregate Aggregate Command + * @mongodb.server.release 3.6 + */ + AggregateIterable aggregate(List pipeline, Class resultClass); + + /** + * Runs an aggregation framework pipeline on the database for pipeline stages + * that do not require an underlying collection, such as {@code $currentOp} and {@code $listLocalSessions}. + * + * @param clientSession the client session with which to associate this operation + * @param pipeline the aggregation pipeline + * @return an iterable containing the result of the aggregation operation + * @since 3.10 + * @mongodb.driver.manual reference/command/aggregate/#dbcmd.aggregate Aggregate Command + * @mongodb.server.release 3.6 + */ + AggregateIterable aggregate(ClientSession clientSession, List pipeline); + + /** + * Runs an aggregation framework pipeline on the database for pipeline stages + * that do not require an underlying collection, such as {@code $currentOp} and {@code $listLocalSessions}. + * + * @param clientSession the client session with which to associate this operation + * @param pipeline the aggregation pipeline + * @param resultClass the class to decode each document into + * @param the target document type of the iterable. + * @return an iterable containing the result of the aggregation operation + * @since 3.10 + * @mongodb.driver.manual reference/command/aggregate/#dbcmd.aggregate Aggregate Command + * @mongodb.server.release 3.6 + */ + AggregateIterable aggregate(ClientSession clientSession, List pipeline, Class resultClass); + +} diff --git a/driver-sync/src/main/com/mongodb/client/MongoIterable.java b/driver-sync/src/main/com/mongodb/client/MongoIterable.java new file mode 100644 index 00000000000..e69d499c8f7 --- /dev/null +++ b/driver-sync/src/main/com/mongodb/client/MongoIterable.java @@ -0,0 +1,81 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client; + +import com.mongodb.Function; +import com.mongodb.lang.Nullable; + +import java.util.Collection; + +/** + *The MongoIterable is the results from an operation, such as a query. + * + * @param The type that this iterable will decode documents to. + * @since 3.0 + */ +public interface MongoIterable extends Iterable { + + /** + * @return A {@link MongoCursor} that must be {@linkplain MongoCursor#close() closed}. + */ + @Override + MongoCursor iterator(); + + /** + * Returns a cursor used for iterating over elements of type {@code TResult}. The cursor is primarily used for change streams. + * + * @return a cursor equivalent to that returned from {@link #iterator()}. + * @since 3.11 + */ + MongoCursor cursor(); + + /** + * Helper to return the first item in the iterator or null. + * + * @return T the first item or null. + */ + @Nullable + TResult first(); + + /** + * Maps this iterable from the source document type to the target document type. + * + * @param mapper a function that maps from the source to the target document type + * @param the target document type + * @return an iterable which maps T to U + */ + MongoIterable map(Function mapper); + + /** + * Iterates over all the documents, adding each to the given target. + * + * @param target the collection to insert into + * @param the collection type + * @return the target + */ + > A into(A target); + + /** + * Sets the number of documents to return per batch. + * + * @param batchSize the batch size + * @return this + * @mongodb.driver.manual reference/method/cursor.batchSize/#cursor.batchSize Batch Size + */ + MongoIterable batchSize(int batchSize); + +} diff --git a/driver-sync/src/main/com/mongodb/client/SynchronousContextProvider.java b/driver-sync/src/main/com/mongodb/client/SynchronousContextProvider.java new file mode 100644 index 00000000000..bd18cdf732b --- /dev/null +++ b/driver-sync/src/main/com/mongodb/client/SynchronousContextProvider.java @@ -0,0 +1,36 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client; + +import com.mongodb.ContextProvider; +import com.mongodb.RequestContext; +import com.mongodb.annotations.ThreadSafe; + +/** + * A {@code ContextProvider} for synchronous clients. + * + * @since 4.4 + */ +@ThreadSafe +public interface SynchronousContextProvider extends ContextProvider { + /** + * Get the request context. + * + * @return the request context + */ + RequestContext getContext(); +} diff --git a/driver-sync/src/main/com/mongodb/client/TransactionBody.java b/driver-sync/src/main/com/mongodb/client/TransactionBody.java new file mode 100644 index 00000000000..bd2e801f282 --- /dev/null +++ b/driver-sync/src/main/com/mongodb/client/TransactionBody.java @@ -0,0 +1,35 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client; + +/** + * A functional interface representing the body of a transaction. Implementations of this interface should be idempotent in order to + * support transaction retry logic. + * + * @param the return type + * @see ClientSession#withTransaction(TransactionBody) + * @since 3.10 + */ +public interface TransactionBody { + + /** + * Execute the body of the transaction. + * + * @return the result of the transaction body + */ + T execute(); +} diff --git a/driver-sync/src/main/com/mongodb/client/gridfs/GridFSBucket.java b/driver-sync/src/main/com/mongodb/client/gridfs/GridFSBucket.java new file mode 100644 index 00000000000..5335ed4ce91 --- /dev/null +++ b/driver-sync/src/main/com/mongodb/client/gridfs/GridFSBucket.java @@ -0,0 +1,774 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.gridfs; + +import com.mongodb.ReadConcern; +import com.mongodb.ReadPreference; +import com.mongodb.WriteConcern; +import com.mongodb.annotations.Alpha; +import com.mongodb.annotations.Reason; +import com.mongodb.annotations.ThreadSafe; +import com.mongodb.client.ClientSession; +import com.mongodb.client.MongoDatabase; +import com.mongodb.client.gridfs.model.GridFSDownloadOptions; +import com.mongodb.client.gridfs.model.GridFSUploadOptions; +import com.mongodb.lang.Nullable; +import org.bson.BsonValue; +import org.bson.conversions.Bson; +import org.bson.types.ObjectId; + +import java.io.InputStream; +import java.io.OutputStream; +import java.util.concurrent.TimeUnit; + +/** + * Represents a GridFS Bucket + * + * @since 3.1 + */ +@ThreadSafe +public interface GridFSBucket { + + /** + * The bucket name. + * + * @return the bucket name + */ + String getBucketName(); + + /** + * Sets the chunk size in bytes. Defaults to 255. + * + * @return the chunk size in bytes. + */ + int getChunkSizeBytes(); + + /** + * Get the write concern for the GridFSBucket. + * + * @return the {@link com.mongodb.WriteConcern} + */ + WriteConcern getWriteConcern(); + + /** + * Get the read preference for the GridFSBucket. + * + * @return the {@link com.mongodb.ReadPreference} + */ + ReadPreference getReadPreference(); + + /** + * Get the read concern for the GridFSBucket. + * + * @return the {@link com.mongodb.ReadConcern} + * @since 3.2 + * @mongodb.server.release 3.2 + * @mongodb.driver.manual reference/readConcern/ Read Concern + */ + ReadConcern getReadConcern(); + + /** + * The time limit for the full execution of an operation. + * + *

If not null the following deprecated options will be ignored: + * {@code waitQueueTimeoutMS}, {@code socketTimeoutMS}, {@code wTimeoutMS}, {@code maxTimeMS} and {@code maxCommitTimeMS}

+ * + *
+ * + * @param timeUnit the time unit + * @return the timeout in the given time unit + * @since 4.x + */ + @Alpha(Reason.CLIENT) + @Nullable + Long getTimeout(TimeUnit timeUnit); + + /** + * Create a new GridFSBucket instance with a new chunk size in bytes. + * + * @param chunkSizeBytes the new chunk size in bytes. + * @return a new GridFSBucket instance with the different chunk size in bytes + */ + GridFSBucket withChunkSizeBytes(int chunkSizeBytes); + + /** + * Create a new GridFSBucket instance with a different read preference. + * + * @param readPreference the new {@link ReadPreference} for the GridFSBucket + * @return a new GridFSBucket instance with the different readPreference + */ + GridFSBucket withReadPreference(ReadPreference readPreference); + + /** + * Create a new GridFSBucket instance with a different write concern. + * + * @param writeConcern the new {@link WriteConcern} for the GridFSBucket + * @return a new GridFSBucket instance with the different writeConcern + */ + GridFSBucket withWriteConcern(WriteConcern writeConcern); + + /** + * Create a new GridFSBucket instance with a different read concern. + * + * @param readConcern the new {@link ReadConcern} for the GridFSBucket + * @return a new GridFSBucket instance with the different ReadConcern + * @since 3.2 + * @mongodb.server.release 3.2 + * @mongodb.driver.manual reference/readConcern/ Read Concern + */ + GridFSBucket withReadConcern(ReadConcern readConcern); + + /** + * Create a new GridFSBucket instance with the set time limit for the full execution of an operation. + * + *
    + *
  • {@code 0} means infinite timeout.
  • + *
  • {@code > 0} The time limit to use for the full execution of an operation.
  • + *
+ * + * @param timeout the timeout, which must be greater than or equal to 0 + * @param timeUnit the time unit + * @return a new GridFSBucket instance with the set time limit for the full execution of an operation + * @since 4.x + * @see #getTimeout + */ + @Alpha(Reason.CLIENT) + GridFSBucket withTimeout(long timeout, TimeUnit timeUnit); + + /** + * Opens a Stream that the application can write the contents of the file to. + *

+ * As the application writes the contents to the returned Stream, the contents are uploaded as chunks in the chunks collection. When + * the application signals it is done writing the contents of the file by calling close on the returned Stream, a files collection + * document is created in the files collection. + *

+ * + * @param filename the filename for the stream + * @return the GridFSUploadStream that provides the ObjectId for the file to be uploaded and the Stream to which the + * application will write the contents. + */ + GridFSUploadStream openUploadStream(String filename); + + /** + * Opens a Stream that the application can write the contents of the file to. + *

+ * As the application writes the contents to the returned Stream, the contents are uploaded as chunks in the chunks collection. When + * the application signals it is done writing the contents of the file by calling close on the returned Stream, a files collection + * document is created in the files collection. + *

+ * @param filename the filename for the stream + * @param options the GridFSUploadOptions + * @return the GridFSUploadStream that provides the ObjectId for the file to be uploaded and the Stream to which the + * application will write the contents. + */ + GridFSUploadStream openUploadStream(String filename, GridFSUploadOptions options); + + /** + * Opens a Stream that the application can write the contents of the file to. + *

+ * As the application writes the contents to the returned Stream, the contents are uploaded as chunks in the chunks collection. When + * the application signals it is done writing the contents of the file by calling close on the returned Stream, a files collection + * document is created in the files collection. + *

+ * + * @param id the custom id value of the file + * @param filename the filename for the stream + * @return the GridFSUploadStream that provides the ObjectId for the file to be uploaded and the Stream to which the + * application will write the contents. + * @since 3.3 + */ + GridFSUploadStream openUploadStream(BsonValue id, String filename); + + /** + * Opens a Stream that the application can write the contents of the file to. + *

+ * As the application writes the contents to the returned Stream, the contents are uploaded as chunks in the chunks collection. When + * the application signals it is done writing the contents of the file by calling close on the returned Stream, a files collection + * document is created in the files collection. + *

+ * + * @param id the custom id value of the file + * @param filename the filename for the stream + * @param options the GridFSUploadOptions + * @return the GridFSUploadStream that includes the _id for the file to be uploaded and the Stream to which the + * application will write the contents. + * @since 3.3 + */ + GridFSUploadStream openUploadStream(BsonValue id, String filename, GridFSUploadOptions options); + + /** + * Opens a Stream that the application can write the contents of the file to. + *

+ * As the application writes the contents to the returned Stream, the contents are uploaded as chunks in the chunks collection. When + * the application signals it is done writing the contents of the file by calling close on the returned Stream, a files collection + * document is created in the files collection. + *

+ * + * @param clientSession the client session with which to associate this operation + * @param filename the filename for the stream + * @return the GridFSUploadStream that provides the ObjectId for the file to be uploaded and the Stream to which the + * application will write the contents. + * @since 3.6 + * @mongodb.server.release 3.6 + */ + GridFSUploadStream openUploadStream(ClientSession clientSession, String filename); + + /** + * Opens a Stream that the application can write the contents of the file to. + *

+ * As the application writes the contents to the returned Stream, the contents are uploaded as chunks in the chunks collection. When + * the application signals it is done writing the contents of the file by calling close on the returned Stream, a files collection + * document is created in the files collection. + *

+ * + * @param clientSession the client session with which to associate this operation + * @param filename the filename for the stream + * @param options the GridFSUploadOptions + * @return the GridFSUploadStream that provides the ObjectId for the file to be uploaded and the Stream to which the + * application will write the contents. + * @since 3.6 + * @mongodb.server.release 3.6 + */ + GridFSUploadStream openUploadStream(ClientSession clientSession, String filename, GridFSUploadOptions options); + + /** + * Opens a Stream that the application can write the contents of the file to. + *

+ * As the application writes the contents to the returned Stream, the contents are uploaded as chunks in the chunks collection. When + * the application signals it is done writing the contents of the file by calling close on the returned Stream, a files collection + * document is created in the files collection. + *

+ * + * @param clientSession the client session with which to associate this operation + * @param id the custom id value of the file + * @param filename the filename for the stream + * @return the GridFSUploadStream that provides the ObjectId for the file to be uploaded and the Stream to which the + * application will write the contents. + * @since 3.6 + * @mongodb.server.release 3.6 + */ + GridFSUploadStream openUploadStream(ClientSession clientSession, BsonValue id, String filename); + + /** + * Opens a Stream that the application can write the contents of the file to. + *

+ * As the application writes the contents to the returned Stream, the contents are uploaded as chunks in the chunks collection. When + * the application signals it is done writing the contents of the file by calling close on the returned Stream, a files collection + * document is created in the files collection. + *

+ * + * @param clientSession the client session with which to associate this operation + * @param id the custom id value of the file + * @param filename the filename for the stream + * @return the GridFSUploadStream that provides the ObjectId for the file to be uploaded and the Stream to which the + * application will write the contents. + * @since 3.6 + * @mongodb.server.release 3.6 + */ + GridFSUploadStream openUploadStream(ClientSession clientSession, ObjectId id, String filename); + + /** + * Opens a Stream that the application can write the contents of the file to. + *

+ * As the application writes the contents to the returned Stream, the contents are uploaded as chunks in the chunks collection. When + * the application signals it is done writing the contents of the file by calling close on the returned Stream, a files collection + * document is created in the files collection. + *

+ * + * @param clientSession the client session with which to associate this operation + * @param id the custom id value of the file + * @param filename the filename for the stream + * @param options the GridFSUploadOptions + * @return the GridFSUploadStream that includes the _id for the file to be uploaded and the Stream to which the + * application will write the contents. + * @since 3.6 + * @mongodb.server.release 3.6 + */ + GridFSUploadStream openUploadStream(ClientSession clientSession, BsonValue id, String filename, GridFSUploadOptions options); + + /** + * Uploads the contents of the given {@code InputStream} to a GridFS bucket. + *

+ * Reads the contents of the user file from the {@code Stream} and uploads it as chunks in the chunks collection. After all the + * chunks have been uploaded, it creates a files collection document for {@code filename} in the files collection. + *

+ * + * @param filename the filename for the stream + * @param source the Stream providing the file data + * @return the ObjectId of the uploaded file. + */ + ObjectId uploadFromStream(String filename, InputStream source); + + /** + * Uploads the contents of the given {@code InputStream} to a GridFS bucket. + *

+ * Reads the contents of the user file from the {@code Stream} and uploads it as chunks in the chunks collection. After all the + * chunks have been uploaded, it creates a files collection document for {@code filename} in the files collection. + *

+ * + * @param filename the filename for the stream + * @param source the Stream providing the file data + * @param options the GridFSUploadOptions + * @return the ObjectId of the uploaded file. + */ + ObjectId uploadFromStream(String filename, InputStream source, GridFSUploadOptions options); + + /** + * Uploads the contents of the given {@code InputStream} to a GridFS bucket. + *

+ * Reads the contents of the user file from the {@code Stream} and uploads it as chunks in the chunks collection. After all the + * chunks have been uploaded, it creates a files collection document for {@code filename} in the files collection. + *

+ * +

Note: When this {@link GridFSBucket} is set with a operation timeout (via timeout inherited from {@link MongoDatabase} + * settings or {@link #withTimeout(long, TimeUnit)}), timeout breaches may occur due to the {@link InputStream} + * lacking inherent read timeout support, which might extend the operation beyond the specified timeout limit.

+ * + * @param id the custom id value of the file + * @param filename the filename for the stream + * @param source the Stream providing the file data + * @since 3.3 + */ + void uploadFromStream(BsonValue id, String filename, InputStream source); + + /** + * Uploads the contents of the given {@code InputStream} to a GridFS bucket. + *

+ * Reads the contents of the user file from the {@code Stream} and uploads it as chunks in the chunks collection. After all the + * chunks have been uploaded, it creates a files collection document for {@code filename} in the files collection. + *

+ * +

Note: When this {@link GridFSBucket} is set with a operation timeout (via timeout inherited from {@link MongoDatabase} + * settings or {@link #withTimeout(long, TimeUnit)}), timeout breaches may occur due to the {@link InputStream} + * lacking inherent read timeout support, which might extend the operation beyond the specified timeout limit.

+ * + * @param id the custom id value of the file + * @param filename the filename for the stream + * @param source the Stream providing the file data + * @param options the GridFSUploadOptions + * @since 3.3 + */ + void uploadFromStream(BsonValue id, String filename, InputStream source, GridFSUploadOptions options); + + /** + * Uploads the contents of the given {@code InputStream} to a GridFS bucket. + *

+ * Reads the contents of the user file from the {@code Stream} and uploads it as chunks in the chunks collection. After all the + * chunks have been uploaded, it creates a files collection document for {@code filename} in the files collection. + *

+ * +

Note: When this {@link GridFSBucket} is set with a operation timeout (via timeout inherited from {@link MongoDatabase} + * settings or {@link #withTimeout(long, TimeUnit)}), timeout breaches may occur due to the {@link InputStream} + * lacking inherent read timeout support, which might extend the operation beyond the specified timeout limit.

+ * + * @param clientSession the client session with which to associate this operation + * @param filename the filename for the stream + * @param source the Stream providing the file data + * @return the ObjectId of the uploaded file. + * @since 3.6 + * @mongodb.server.release 3.6 + */ + ObjectId uploadFromStream(ClientSession clientSession, String filename, InputStream source); + + /** + * Uploads the contents of the given {@code InputStream} to a GridFS bucket. + *

+ * Reads the contents of the user file from the {@code Stream} and uploads it as chunks in the chunks collection. After all the + * chunks have been uploaded, it creates a files collection document for {@code filename} in the files collection. + *

+ * +

Note: When this {@link GridFSBucket} is set with a operation timeout (via timeout inherited from {@link MongoDatabase} + * settings or {@link #withTimeout(long, TimeUnit)}), timeout breaches may occur due to the {@link InputStream} + * lacking inherent read timeout support, which might extend the operation beyond the specified timeout limit.

+ * + * @param clientSession the client session with which to associate this operation + * @param filename the filename for the stream + * @param source the Stream providing the file data + * @param options the GridFSUploadOptions + * @return the ObjectId of the uploaded file. + * @since 3.6 + * @mongodb.server.release 3.6 + */ + ObjectId uploadFromStream(ClientSession clientSession, String filename, InputStream source, GridFSUploadOptions options); + + /** + * Uploads the contents of the given {@code InputStream} to a GridFS bucket. + *

+ * Reads the contents of the user file from the {@code Stream} and uploads it as chunks in the chunks collection. After all the + * chunks have been uploaded, it creates a files collection document for {@code filename} in the files collection. + *

+ * +

Note: When this {@link GridFSBucket} is set with a operation timeout (via timeout inherited from {@link MongoDatabase} + * settings or {@link #withTimeout(long, TimeUnit)}), timeout breaches may occur due to the {@link InputStream} + * lacking inherent read timeout support, which might extend the operation beyond the specified timeout limit.

+ * + * @param clientSession the client session with which to associate this operation + * @param id the custom id value of the file + * @param filename the filename for the stream + * @param source the Stream providing the file data + * @since 3.6 + * @mongodb.server.release 3.6 + */ + void uploadFromStream(ClientSession clientSession, BsonValue id, String filename, InputStream source); + + /** + * Uploads the contents of the given {@code InputStream} to a GridFS bucket. + *

+ * Reads the contents of the user file from the {@code Stream} and uploads it as chunks in the chunks collection. After all the + * chunks have been uploaded, it creates a files collection document for {@code filename} in the files collection. + *

+ * +

Note: When this {@link GridFSBucket} is set with a operation timeout (via timeout inherited from {@link MongoDatabase} + * settings or {@link #withTimeout(long, TimeUnit)}), timeout breaches may occur due to the {@link InputStream} + * lacking inherent read timeout support, which might extend the operation beyond the specified timeout limit.

+ * + * @param clientSession the client session with which to associate this operation + * @param id the custom id value of the file + * @param filename the filename for the stream + * @param source the Stream providing the file data + * @param options the GridFSUploadOptions + * @since 3.6 + * @mongodb.server.release 3.6 + */ + void uploadFromStream(ClientSession clientSession, BsonValue id, String filename, InputStream source, GridFSUploadOptions options); + + /** + * Opens a Stream from which the application can read the contents of the stored file specified by {@code id}. + * + * @param id the ObjectId of the file to be put into a stream. + * @return the stream + */ + GridFSDownloadStream openDownloadStream(ObjectId id); + + /** + * Opens a Stream from which the application can read the contents of the stored file specified by {@code id}. + * + * @param id the custom id value of the file, to be put into a stream. + * @return the stream + */ + GridFSDownloadStream openDownloadStream(BsonValue id); + + /** + * Opens a Stream from which the application can read the contents of the latest version of the stored file specified by the + * {@code filename}. + * + * @param filename the name of the file to be downloaded + * @return the stream + * @since 3.3 + */ + GridFSDownloadStream openDownloadStream(String filename); + + /** + * Opens a Stream from which the application can read the contents of the stored file specified by {@code filename} and the revision + * in {@code options}. + * + * @param filename the name of the file to be downloaded + * @param options the download options + * @return the stream + * @since 3.3 + */ + GridFSDownloadStream openDownloadStream(String filename, GridFSDownloadOptions options); + + /** + * Opens a Stream from which the application can read the contents of the stored file specified by {@code id}. + * + * @param clientSession the client session with which to associate this operation + * @param id the ObjectId of the file to be put into a stream. + * @return the stream + * @since 3.6 + * @mongodb.server.release 3.6 + */ + GridFSDownloadStream openDownloadStream(ClientSession clientSession, ObjectId id); + + /** + * Opens a Stream from which the application can read the contents of the stored file specified by {@code id}. + * + * @param clientSession the client session with which to associate this operation + * @param id the custom id value of the file, to be put into a stream. + * @return the stream + * @since 3.6 + * @mongodb.server.release 3.6 + */ + GridFSDownloadStream openDownloadStream(ClientSession clientSession, BsonValue id); + + /** + * Opens a Stream from which the application can read the contents of the latest version of the stored file specified by the + * {@code filename}. + * + * @param clientSession the client session with which to associate this operation + * @param filename the name of the file to be downloaded + * @return the stream + * @since 3.6 + * @mongodb.server.release 3.6 + */ + GridFSDownloadStream openDownloadStream(ClientSession clientSession, String filename); + + /** + * Opens a Stream from which the application can read the contents of the stored file specified by {@code filename} and the revision + * in {@code options}. + * + * @param clientSession the client session with which to associate this operation + * @param filename the name of the file to be downloaded + * @param options the download options + * @return the stream + * @since 3.6 + * @mongodb.server.release 3.6 + */ + GridFSDownloadStream openDownloadStream(ClientSession clientSession, String filename, GridFSDownloadOptions options); + + /** + * Downloads the contents of the stored file specified by {@code id} and writes the contents to the {@code destination} Stream. + * + * @param id the ObjectId of the file to be written to the destination stream + * @param destination the destination stream. Its {@link OutputStream#flush()} is not guaranteed to be called by this method. + */ + void downloadToStream(ObjectId id, OutputStream destination); + + /** + * Downloads the contents of the stored file specified by {@code id} and writes the contents to the {@code destination} Stream. + * + * @param id the custom id of the file, to be written to the destination stream + * @param destination the destination stream. Its {@link OutputStream#flush()} is not guaranteed to be called by this method. + */ + void downloadToStream(BsonValue id, OutputStream destination); + + /** + * Downloads the contents of the latest version of the stored file specified by {@code filename} and writes the contents to + * the {@code destination} Stream. + * + * @param filename the name of the file to be downloaded + * @param destination the destination stream. Its {@link OutputStream#flush()} is not guaranteed to be called by this method. + * @since 3.3 + */ + void downloadToStream(String filename, OutputStream destination); + + /** + * Downloads the contents of the stored file specified by {@code filename} and by the revision in {@code options} and writes the + * contents to the {@code destination} Stream. + * + * @param filename the name of the file to be downloaded + * @param destination the destination stream + * @param options the download options. Its {@link OutputStream#flush()} is not guaranteed to be called by this method. + * @since 3.3 + */ + void downloadToStream(String filename, OutputStream destination, GridFSDownloadOptions options); + + /** + * Downloads the contents of the stored file specified by {@code id} and writes the contents to the {@code destination} Stream. + * + * @param clientSession the client session with which to associate this operation + * @param id the ObjectId of the file to be written to the destination stream + * @param destination the destination stream. Its {@link OutputStream#flush()} is not guaranteed to be called by this method. + * @since 3.6 + * @mongodb.server.release 3.6 + */ + void downloadToStream(ClientSession clientSession, ObjectId id, OutputStream destination); + + /** + * Downloads the contents of the stored file specified by {@code id} and writes the contents to the {@code destination} Stream. + * + * @param clientSession the client session with which to associate this operation + * @param id the custom id of the file, to be written to the destination stream + * @param destination the destination stream. Its {@link OutputStream#flush()} is not guaranteed to be called by this method. + * @since 3.6 + * @mongodb.server.release 3.6 + */ + void downloadToStream(ClientSession clientSession, BsonValue id, OutputStream destination); + + /** + * Downloads the contents of the latest version of the stored file specified by {@code filename} and writes the contents to + * the {@code destination} Stream. + * + * @param clientSession the client session with which to associate this operation + * @param filename the name of the file to be downloaded + * @param destination the destination stream. Its {@link OutputStream#flush()} is not guaranteed to be called by this method. + * @since 3.6 + * @mongodb.server.release 3.6 + */ + void downloadToStream(ClientSession clientSession, String filename, OutputStream destination); + + /** + * Downloads the contents of the stored file specified by {@code filename} and by the revision in {@code options} and writes the + * contents to the {@code destination} Stream. + * + * @param clientSession the client session with which to associate this operation + * @param filename the name of the file to be downloaded + * @param destination the destination stream. Its {@link OutputStream#flush()} is not guaranteed to be called by this method. + * @param options the download options + * @since 3.6 + * @mongodb.server.release 3.6 + */ + void downloadToStream(ClientSession clientSession, String filename, OutputStream destination, GridFSDownloadOptions options); + + /** + * Finds all documents in the files collection. + * + * @return the GridFS find iterable interface + * @mongodb.driver.manual tutorial/query-documents/ Find + */ + GridFSFindIterable find(); + + /** + * Finds all documents in the collection that match the filter. + * + *

+ * Below is an example of filtering against the filename and some nested metadata that can also be stored along with the file data: + *

+     *  {@code
+     *      Filters.and(Filters.eq("filename", "mongodb.png"), Filters.eq("metadata.contentType", "image/png"));
+     *  }
+     *  
+ * + * @param filter the query filter + * @return the GridFS find iterable interface + * @see com.mongodb.client.model.Filters + */ + GridFSFindIterable find(Bson filter); + + /** + * Finds all documents in the files collection. + * + * @param clientSession the client session with which to associate this operation + * @return the GridFS find iterable interface + * @mongodb.driver.manual tutorial/query-documents/ Find + * @since 3.6 + * @mongodb.server.release 3.6 + */ + GridFSFindIterable find(ClientSession clientSession); + + /** + * Finds all documents in the collection that match the filter. + * + *

+ * Below is an example of filtering against the filename and some nested metadata that can also be stored along with the file data: + *

+     *  {@code
+     *      Filters.and(Filters.eq("filename", "mongodb.png"), Filters.eq("metadata.contentType", "image/png"));
+     *  }
+     *  
+ * + * + * @param clientSession the client session with which to associate this operation + * @param filter the query filter + * @return the GridFS find iterable interface + * @see com.mongodb.client.model.Filters + * @since 3.6 + * @mongodb.server.release 3.6 + */ + GridFSFindIterable find(ClientSession clientSession, Bson filter); + + /** + * Given a {@code id}, delete this stored file's files collection document and associated chunks from a GridFS bucket. + * @param id the ObjectId of the file to be deleted + */ + void delete(ObjectId id); + + /** + * Given a {@code id}, delete this stored file's files collection document and associated chunks from a GridFS bucket. + * @param id the id of the file to be deleted + * @since 3.3 + */ + void delete(BsonValue id); + + /** + * Given a {@code id}, delete this stored file's files collection document and associated chunks from a GridFS bucket. + * + * @param clientSession the client session with which to associate this operation + * @param id the ObjectId of the file to be deleted + * @since 3.6 + * @mongodb.server.release 3.6 + */ + void delete(ClientSession clientSession, ObjectId id); + + /** + * Given a {@code id}, delete this stored file's files collection document and associated chunks from a GridFS bucket. + * + * @param clientSession the client session with which to associate this operation + * @param id the id of the file to be deleted + * @since 3.6 + * @mongodb.server.release 3.6 + */ + void delete(ClientSession clientSession, BsonValue id); + + /** + * Renames the stored file with the specified {@code id}. + * + * @param id the id of the file in the files collection to rename + * @param newFilename the new filename for the file + */ + void rename(ObjectId id, String newFilename); + + /** + * Renames the stored file with the specified {@code id}. + * + * @param id the id of the file in the files collection to rename + * @param newFilename the new filename for the file + * @since 3.3 + */ + void rename(BsonValue id, String newFilename); + + /** + * Renames the stored file with the specified {@code id}. + * + * @param clientSession the client session with which to associate this operation + * @param id the id of the file in the files collection to rename + * @param newFilename the new filename for the file + * @since 3.6 + * @mongodb.server.release 3.6 + */ + void rename(ClientSession clientSession, ObjectId id, String newFilename); + + /** + * Renames the stored file with the specified {@code id}. + * + * @param clientSession the client session with which to associate this operation + * @param id the id of the file in the files collection to rename + * @param newFilename the new filename for the file + * @since 3.6 + * @mongodb.server.release 3.6 + */ + void rename(ClientSession clientSession, BsonValue id, String newFilename); + + /** + * Drops the data associated with this bucket from the database. + */ + void drop(); + + /** + * Drops the data associated with this bucket from the database. + * + * @param clientSession the client session with which to associate this operation + * @since 3.6 + * @mongodb.server.release 3.6 + */ + void drop(ClientSession clientSession); +} diff --git a/driver-sync/src/main/com/mongodb/client/gridfs/GridFSBucketImpl.java b/driver-sync/src/main/com/mongodb/client/gridfs/GridFSBucketImpl.java new file mode 100644 index 00000000000..20ac8fc6d44 --- /dev/null +++ b/driver-sync/src/main/com/mongodb/client/gridfs/GridFSBucketImpl.java @@ -0,0 +1,661 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.gridfs; + +import com.mongodb.MongoClientSettings; +import com.mongodb.MongoGridFSException; +import com.mongodb.MongoOperationTimeoutException; +import com.mongodb.ReadConcern; +import com.mongodb.ReadPreference; +import com.mongodb.WriteConcern; +import com.mongodb.client.ClientSession; +import com.mongodb.client.FindIterable; +import com.mongodb.client.ListIndexesIterable; +import com.mongodb.client.MongoCollection; +import com.mongodb.client.MongoDatabase; +import com.mongodb.client.cursor.TimeoutMode; +import com.mongodb.client.gridfs.model.GridFSDownloadOptions; +import com.mongodb.client.gridfs.model.GridFSFile; +import com.mongodb.client.gridfs.model.GridFSUploadOptions; +import com.mongodb.client.internal.TimeoutHelper; +import com.mongodb.client.model.IndexOptions; +import com.mongodb.client.result.DeleteResult; +import com.mongodb.client.result.UpdateResult; +import com.mongodb.internal.TimeoutContext; +import com.mongodb.internal.VisibleForTesting; +import com.mongodb.internal.time.Timeout; +import com.mongodb.lang.Nullable; +import org.bson.BsonDocument; +import org.bson.BsonObjectId; +import org.bson.BsonString; +import org.bson.BsonValue; +import org.bson.Document; +import org.bson.conversions.Bson; +import org.bson.types.ObjectId; + +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.util.ArrayList; +import java.util.Map; +import java.util.concurrent.TimeUnit; + +import static com.mongodb.ReadPreference.primary; +import static com.mongodb.assertions.Assertions.notNull; +import static java.lang.String.format; +import static java.util.concurrent.TimeUnit.MILLISECONDS; +import static org.bson.codecs.configuration.CodecRegistries.fromRegistries; + +final class GridFSBucketImpl implements GridFSBucket { + private static final int DEFAULT_CHUNKSIZE_BYTES = 255 * 1024; + private static final String TIMEOUT_MESSAGE = "GridFS operation exceeded the timeout limit."; + private final String bucketName; + private final int chunkSizeBytes; + private final MongoCollection filesCollection; + private final MongoCollection chunksCollection; + private volatile boolean checkedIndexes; + + GridFSBucketImpl(final MongoDatabase database) { + this(database, "fs"); + } + + GridFSBucketImpl(final MongoDatabase database, final String bucketName) { + this(notNull("bucketName", bucketName), DEFAULT_CHUNKSIZE_BYTES, + getFilesCollection(notNull("database", database), bucketName), + getChunksCollection(database, bucketName)); + } + + @VisibleForTesting(otherwise = VisibleForTesting.AccessModifier.PRIVATE) + GridFSBucketImpl(final String bucketName, final int chunkSizeBytes, final MongoCollection filesCollection, + final MongoCollection chunksCollection) { + this.bucketName = notNull("bucketName", bucketName); + this.chunkSizeBytes = chunkSizeBytes; + this.filesCollection = notNull("filesCollection", filesCollection); + this.chunksCollection = notNull("chunksCollection", chunksCollection); + } + + @Override + public String getBucketName() { + return bucketName; + } + + @Override + public int getChunkSizeBytes() { + return chunkSizeBytes; + } + + @Override + public ReadPreference getReadPreference() { + return filesCollection.getReadPreference(); + } + + @Override + public WriteConcern getWriteConcern() { + return filesCollection.getWriteConcern(); + } + + @Override + public ReadConcern getReadConcern() { + return filesCollection.getReadConcern(); + } + + @Override + public Long getTimeout(final TimeUnit timeUnit) { + return filesCollection.getTimeout(timeUnit); + } + + @Override + public GridFSBucket withChunkSizeBytes(final int chunkSizeBytes) { + return new GridFSBucketImpl(bucketName, chunkSizeBytes, filesCollection, chunksCollection); + } + + @Override + public GridFSBucket withReadPreference(final ReadPreference readPreference) { + return new GridFSBucketImpl(bucketName, chunkSizeBytes, filesCollection.withReadPreference(readPreference), + chunksCollection.withReadPreference(readPreference)); + } + + @Override + public GridFSBucket withWriteConcern(final WriteConcern writeConcern) { + return new GridFSBucketImpl(bucketName, chunkSizeBytes, filesCollection.withWriteConcern(writeConcern), + chunksCollection.withWriteConcern(writeConcern)); + } + + @Override + public GridFSBucket withReadConcern(final ReadConcern readConcern) { + return new GridFSBucketImpl(bucketName, chunkSizeBytes, filesCollection.withReadConcern(readConcern), + chunksCollection.withReadConcern(readConcern)); + } + + @Override + public GridFSBucket withTimeout(final long timeout, final TimeUnit timeUnit) { + return new GridFSBucketImpl(bucketName, chunkSizeBytes, filesCollection.withTimeout(timeout, timeUnit), + chunksCollection.withTimeout(timeout, timeUnit)); + } + + @Override + public GridFSUploadStream openUploadStream(final String filename) { + return openUploadStream(new BsonObjectId(), filename); + } + + @Override + public GridFSUploadStream openUploadStream(final String filename, final GridFSUploadOptions options) { + return openUploadStream(new BsonObjectId(), filename, options); + } + + @Override + public GridFSUploadStream openUploadStream(final BsonValue id, final String filename) { + return openUploadStream(id, filename, new GridFSUploadOptions()); + } + + @Override + public GridFSUploadStream openUploadStream(final BsonValue id, final String filename, final GridFSUploadOptions options) { + return createGridFSUploadStream(null, id, filename, options); + } + + @Override + public GridFSUploadStream openUploadStream(final ClientSession clientSession, final String filename) { + return openUploadStream(clientSession, new BsonObjectId(), filename); + } + + @Override + public GridFSUploadStream openUploadStream(final ClientSession clientSession, final String filename, + final GridFSUploadOptions options) { + return openUploadStream(clientSession, new BsonObjectId(), filename, options); + } + + @Override + public GridFSUploadStream openUploadStream(final ClientSession clientSession, final ObjectId id, final String filename) { + return openUploadStream(clientSession, new BsonObjectId(id), filename); + } + + @Override + public GridFSUploadStream openUploadStream(final ClientSession clientSession, final BsonValue id, final String filename) { + return openUploadStream(clientSession, id, filename, new GridFSUploadOptions()); + } + + @Override + public GridFSUploadStream openUploadStream(final ClientSession clientSession, final BsonValue id, final String filename, + final GridFSUploadOptions options) { + notNull("clientSession", clientSession); + return createGridFSUploadStream(clientSession, id, filename, options); + } + + private GridFSUploadStream createGridFSUploadStream(@Nullable final ClientSession clientSession, final BsonValue id, + final String filename, final GridFSUploadOptions options) { + Timeout operationTimeout = startTimeout(); + notNull("options", options); + Integer chunkSizeBytes = options.getChunkSizeBytes(); + int chunkSize = chunkSizeBytes == null ? this.chunkSizeBytes : chunkSizeBytes; + checkCreateIndex(clientSession, operationTimeout); + return new GridFSUploadStreamImpl(clientSession, filesCollection, + chunksCollection, id, filename, chunkSize, + options.getMetadata(), operationTimeout); + } + + @Override + public ObjectId uploadFromStream(final String filename, final InputStream source) { + return uploadFromStream(filename, source, new GridFSUploadOptions()); + } + + @Override + public ObjectId uploadFromStream(final String filename, final InputStream source, final GridFSUploadOptions options) { + ObjectId id = new ObjectId(); + uploadFromStream(new BsonObjectId(id), filename, source, options); + return id; + } + + @Override + public void uploadFromStream(final BsonValue id, final String filename, final InputStream source) { + uploadFromStream(id, filename, source, new GridFSUploadOptions()); + } + + @Override + public void uploadFromStream(final BsonValue id, final String filename, final InputStream source, + final GridFSUploadOptions options) { + executeUploadFromStream(null, id, filename, source, options); + } + + @Override + public ObjectId uploadFromStream(final ClientSession clientSession, final String filename, final InputStream source) { + return uploadFromStream(clientSession, filename, source, new GridFSUploadOptions()); + } + + @Override + public ObjectId uploadFromStream(final ClientSession clientSession, final String filename, final InputStream source, + final GridFSUploadOptions options) { + ObjectId id = new ObjectId(); + uploadFromStream(clientSession, new BsonObjectId(id), filename, source, options); + return id; + } + + @Override + public void uploadFromStream(final ClientSession clientSession, final BsonValue id, final String filename, final InputStream source) { + uploadFromStream(clientSession, id, filename, source, new GridFSUploadOptions()); + } + + @Override + public void uploadFromStream(final ClientSession clientSession, final BsonValue id, final String filename, final InputStream source, + final GridFSUploadOptions options) { + notNull("clientSession", clientSession); + executeUploadFromStream(clientSession, id, filename, source, options); + } + + private void executeUploadFromStream(@Nullable final ClientSession clientSession, final BsonValue id, final String filename, + final InputStream source, final GridFSUploadOptions options) { + GridFSUploadStream uploadStream = createGridFSUploadStream(clientSession, id, filename, options); + Integer chunkSizeBytes = options.getChunkSizeBytes(); + int chunkSize = chunkSizeBytes == null ? this.chunkSizeBytes : chunkSizeBytes; + byte[] buffer = new byte[chunkSize]; + int len; + try { + while ((len = source.read(buffer)) != -1) { + uploadStream.write(buffer, 0, len); + } + uploadStream.close(); + } catch (IOException e) { + uploadStream.abort(); + throw new MongoGridFSException("IOException when reading from the InputStream", e); + } + } + + @Override + public GridFSDownloadStream openDownloadStream(final ObjectId id) { + return openDownloadStream(new BsonObjectId(id)); + } + + @Override + public GridFSDownloadStream openDownloadStream(final BsonValue id) { + Timeout operationTimeout = startTimeout(); + + GridFSFile fileInfo = getFileInfoById(null, id, operationTimeout); + return createGridFSDownloadStream(null, fileInfo, operationTimeout); + } + + @Override + public GridFSDownloadStream openDownloadStream(final String filename) { + return openDownloadStream(filename, new GridFSDownloadOptions()); + } + + @Override + public GridFSDownloadStream openDownloadStream(final String filename, final GridFSDownloadOptions options) { + Timeout operationTimeout = startTimeout(); + GridFSFile file = getFileByName(null, filename, options, operationTimeout); + return createGridFSDownloadStream(null, file, operationTimeout); + } + + @Override + public GridFSDownloadStream openDownloadStream(final ClientSession clientSession, final ObjectId id) { + return openDownloadStream(clientSession, new BsonObjectId(id)); + } + + @Override + public GridFSDownloadStream openDownloadStream(final ClientSession clientSession, final BsonValue id) { + notNull("clientSession", clientSession); + Timeout operationTimeout = startTimeout(); + GridFSFile fileInfoById = getFileInfoById(clientSession, id, operationTimeout); + return createGridFSDownloadStream(clientSession, fileInfoById, operationTimeout); + } + + @Override + public GridFSDownloadStream openDownloadStream(final ClientSession clientSession, final String filename) { + return openDownloadStream(clientSession, filename, new GridFSDownloadOptions()); + } + + @Override + public GridFSDownloadStream openDownloadStream(final ClientSession clientSession, final String filename, + final GridFSDownloadOptions options) { + notNull("clientSession", clientSession); + Timeout operationTimeout = startTimeout(); + GridFSFile file = getFileByName(clientSession, filename, options, operationTimeout); + return createGridFSDownloadStream(clientSession, file, operationTimeout); + } + + private GridFSDownloadStream createGridFSDownloadStream(@Nullable final ClientSession clientSession, final GridFSFile gridFSFile, + @Nullable final Timeout operationTimeout) { + return new GridFSDownloadStreamImpl(clientSession, gridFSFile, chunksCollection, operationTimeout); + } + + @Override + public void downloadToStream(final ObjectId id, final OutputStream destination) { + downloadToStream(new BsonObjectId(id), destination); + } + + @Override + public void downloadToStream(final BsonValue id, final OutputStream destination) { + downloadToStream(openDownloadStream(id), destination); + } + + @Override + public void downloadToStream(final String filename, final OutputStream destination) { + downloadToStream(filename, destination, new GridFSDownloadOptions()); + } + + @Override + public void downloadToStream(final String filename, final OutputStream destination, final GridFSDownloadOptions options) { + downloadToStream(openDownloadStream(filename, options), destination); + } + + @Override + public void downloadToStream(final ClientSession clientSession, final ObjectId id, final OutputStream destination) { + downloadToStream(clientSession, new BsonObjectId(id), destination); + } + + @Override + public void downloadToStream(final ClientSession clientSession, final BsonValue id, final OutputStream destination) { + notNull("clientSession", clientSession); + downloadToStream(openDownloadStream(clientSession, id), destination); + } + + @Override + public void downloadToStream(final ClientSession clientSession, final String filename, final OutputStream destination) { + downloadToStream(clientSession, filename, destination, new GridFSDownloadOptions()); + } + + @Override + public void downloadToStream(final ClientSession clientSession, final String filename, final OutputStream destination, + final GridFSDownloadOptions options) { + notNull("clientSession", clientSession); + downloadToStream(openDownloadStream(clientSession, filename, options), destination); + } + + @Override + public GridFSFindIterable find() { + return createGridFSFindIterable(null, null); + } + + @Override + public GridFSFindIterable find(final Bson filter) { + notNull("filter", filter); + return createGridFSFindIterable(null, filter); + } + + @Override + public GridFSFindIterable find(final ClientSession clientSession) { + notNull("clientSession", clientSession); + return createGridFSFindIterable(clientSession, null); + } + + @Override + public GridFSFindIterable find(final ClientSession clientSession, final Bson filter) { + notNull("clientSession", clientSession); + notNull("filter", filter); + return createGridFSFindIterable(clientSession, filter); + } + + private GridFSFindIterable createGridFSFindIterable(@Nullable final ClientSession clientSession, @Nullable final Bson filter) { + return new GridFSFindIterableImpl(createFindIterable(clientSession, filter, startTimeout())); + } + + private GridFSFindIterable createGridFSFindIterable(@Nullable final ClientSession clientSession, @Nullable final Bson filter, + @Nullable final Timeout operationTimeout) { + return new GridFSFindIterableImpl(createFindIterable(clientSession, filter, operationTimeout)); + } + + @Override + public void delete(final ObjectId id) { + delete(new BsonObjectId(id)); + } + + @Override + public void delete(final BsonValue id) { + executeDelete(null, id); + } + + @Override + public void delete(final ClientSession clientSession, final ObjectId id) { + delete(clientSession, new BsonObjectId(id)); + } + + @Override + public void delete(final ClientSession clientSession, final BsonValue id) { + notNull("clientSession", clientSession); + executeDelete(clientSession, id); + } + + private void executeDelete(@Nullable final ClientSession clientSession, final BsonValue id) { + Timeout operationTimeout = startTimeout(); + DeleteResult result; + if (clientSession != null) { + result = withNullableTimeout(filesCollection, operationTimeout) + .deleteOne(clientSession, new BsonDocument("_id", id)); + withNullableTimeout(chunksCollection, operationTimeout) + .deleteMany(clientSession, new BsonDocument("files_id", id)); + } else { + result = withNullableTimeout(filesCollection, operationTimeout) + .deleteOne(new BsonDocument("_id", id)); + withNullableTimeout(chunksCollection, operationTimeout) + .deleteMany(new BsonDocument("files_id", id)); + } + + if (result.wasAcknowledged() && result.getDeletedCount() == 0) { + throw new MongoGridFSException(format("No file found with the id: %s", id)); + } + } + + @Override + public void rename(final ObjectId id, final String newFilename) { + rename(new BsonObjectId(id), newFilename); + } + + @Override + public void rename(final BsonValue id, final String newFilename) { + executeRename(null, id, newFilename); + } + + @Override + public void rename(final ClientSession clientSession, final ObjectId id, final String newFilename) { + rename(clientSession, new BsonObjectId(id), newFilename); + } + + @Override + public void rename(final ClientSession clientSession, final BsonValue id, final String newFilename) { + notNull("clientSession", clientSession); + executeRename(clientSession, id, newFilename); + } + + private void executeRename(@Nullable final ClientSession clientSession, final BsonValue id, final String newFilename) { + Timeout operationTimeout = startTimeout(); + UpdateResult updateResult; + if (clientSession != null) { + updateResult = withNullableTimeout(filesCollection, operationTimeout).updateOne(clientSession, new BsonDocument("_id", id), + new BsonDocument("$set", new BsonDocument("filename", new BsonString(newFilename)))); + } else { + updateResult = withNullableTimeout(filesCollection, operationTimeout).updateOne(new BsonDocument("_id", id), + new BsonDocument("$set", new BsonDocument("filename", new BsonString(newFilename)))); + } + + if (updateResult.wasAcknowledged() && updateResult.getMatchedCount() == 0) { + throw new MongoGridFSException(format("No file found with the id: %s", id)); + } + } + + @Override + public void drop() { + Timeout operationTimeout = startTimeout(); + withNullableTimeout(filesCollection, operationTimeout).drop(); + withNullableTimeout(chunksCollection, operationTimeout).drop(); + } + + @Override + public void drop(final ClientSession clientSession) { + Timeout operationTimeout = startTimeout(); + notNull("clientSession", clientSession); + withNullableTimeout(filesCollection, operationTimeout).drop(clientSession); + withNullableTimeout(chunksCollection, operationTimeout).drop(clientSession); + } + + private static MongoCollection getFilesCollection(final MongoDatabase database, final String bucketName) { + return database.getCollection(bucketName + ".files", GridFSFile.class).withCodecRegistry( + fromRegistries(database.getCodecRegistry(), MongoClientSettings.getDefaultCodecRegistry()) + ); + } + + private static MongoCollection getChunksCollection(final MongoDatabase database, final String bucketName) { + return database.getCollection(bucketName + ".chunks", BsonDocument.class).withCodecRegistry(MongoClientSettings.getDefaultCodecRegistry()); + } + + private void checkCreateIndex(@Nullable final ClientSession clientSession, @Nullable final Timeout operationTimeout) { + if (!checkedIndexes) { + if (collectionIsEmpty(clientSession, + filesCollection.withDocumentClass(Document.class).withReadPreference(primary()), + operationTimeout)) { + + Document filesIndex = new Document("filename", 1).append("uploadDate", 1); + if (!hasIndex(clientSession, filesCollection.withReadPreference(primary()), filesIndex, operationTimeout)) { + createIndex(clientSession, filesCollection, filesIndex, new IndexOptions(), operationTimeout); + } + Document chunksIndex = new Document("files_id", 1).append("n", 1); + if (!hasIndex(clientSession, chunksCollection.withReadPreference(primary()), chunksIndex, operationTimeout)) { + createIndex(clientSession, chunksCollection, chunksIndex, new IndexOptions().unique(true), operationTimeout); + } + } + checkedIndexes = true; + } + } + + private boolean collectionIsEmpty(@Nullable final ClientSession clientSession, + final MongoCollection collection, + @Nullable final Timeout operationTimeout) { + if (clientSession != null) { + return withNullableTimeout(collection, operationTimeout) + .find(clientSession).projection(new Document("_id", 1)).first() == null; + } else { + return withNullableTimeout(collection, operationTimeout) + .find().projection(new Document("_id", 1)).first() == null; + } + } + + private boolean hasIndex(@Nullable final ClientSession clientSession, final MongoCollection collection, + final Document index, @Nullable final Timeout operationTimeout) { + boolean hasIndex = false; + ListIndexesIterable listIndexesIterable; + if (clientSession != null) { + listIndexesIterable = withNullableTimeout(collection, operationTimeout).listIndexes(clientSession); + } else { + listIndexesIterable = withNullableTimeout(collection, operationTimeout).listIndexes(); + } + + ArrayList indexes = listIndexesIterable.into(new ArrayList<>()); + for (Document result : indexes) { + Document indexDoc = result.get("key", new Document()); + for (final Map.Entry entry : indexDoc.entrySet()) { + if (entry.getValue() instanceof Number) { + entry.setValue(((Number) entry.getValue()).intValue()); + } + } + if (indexDoc.equals(index)) { + hasIndex = true; + break; + } + } + return hasIndex; + } + + private void createIndex(@Nullable final ClientSession clientSession, final MongoCollection collection, final Document index, + final IndexOptions indexOptions, final @Nullable Timeout operationTimeout) { + if (clientSession != null) { + withNullableTimeout(collection, operationTimeout).createIndex(clientSession, index, indexOptions); + } else { + withNullableTimeout(collection, operationTimeout).createIndex(index, indexOptions); + } + } + + private GridFSFile getFileByName(@Nullable final ClientSession clientSession, final String filename, + final GridFSDownloadOptions options, @Nullable final Timeout operationTimeout) { + int revision = options.getRevision(); + int skip; + int sort; + if (revision >= 0) { + skip = revision; + sort = 1; + } else { + skip = (-revision) - 1; + sort = -1; + } + + GridFSFile fileInfo = createGridFSFindIterable(clientSession, new Document("filename", filename), operationTimeout).skip(skip) + .sort(new Document("uploadDate", sort)).first(); + if (fileInfo == null) { + throw new MongoGridFSException(format("No file found with the filename: %s and revision: %s", filename, revision)); + } + return fileInfo; + } + + private GridFSFile getFileInfoById(@Nullable final ClientSession clientSession, final BsonValue id, + @Nullable final Timeout operationTImeout) { + notNull("id", id); + GridFSFile fileInfo = createFindIterable(clientSession, new Document("_id", id), operationTImeout).first(); + if (fileInfo == null) { + throw new MongoGridFSException(format("No file found with the id: %s", id)); + } + return fileInfo; + } + + private FindIterable createFindIterable(@Nullable final ClientSession clientSession, @Nullable final Bson filter, + @Nullable final Timeout operationTImeout) { + FindIterable findIterable; + if (clientSession != null) { + findIterable = withNullableTimeout(filesCollection, operationTImeout).find(clientSession); + } else { + findIterable = withNullableTimeout(filesCollection, operationTImeout).find(); + } + if (filter != null) { + findIterable = findIterable.filter(filter); + } + if (filesCollection.getTimeout(MILLISECONDS) != null) { + findIterable.timeoutMode(TimeoutMode.CURSOR_LIFETIME); + } + return findIterable; + } + + private void downloadToStream(final GridFSDownloadStream downloadStream, final OutputStream destination) { + byte[] buffer = new byte[downloadStream.getGridFSFile().getChunkSize()]; + int len; + MongoGridFSException savedThrowable = null; + try { + while ((len = downloadStream.read(buffer)) != -1) { + destination.write(buffer, 0, len); + } + } catch (MongoOperationTimeoutException e){ + throw e; + } catch (IOException e) { + savedThrowable = new MongoGridFSException("IOException when reading from the OutputStream", e); + } catch (Exception e) { + savedThrowable = new MongoGridFSException("Unexpected Exception when reading GridFS and writing to the Stream", e); + } finally { + try { + downloadStream.close(); + } catch (Exception e) { + // Do nothing + } + if (savedThrowable != null) { + throw savedThrowable; + } + } + } + + private static MongoCollection withNullableTimeout(final MongoCollection chunksCollection, + @Nullable final Timeout timeout) { + return TimeoutHelper.collectionWithTimeout(chunksCollection, TIMEOUT_MESSAGE, timeout); + } + + @Nullable + private Timeout startTimeout() { + return TimeoutContext.startTimeout(filesCollection.getTimeout(MILLISECONDS)); + } +} diff --git a/driver-sync/src/main/com/mongodb/client/gridfs/GridFSBuckets.java b/driver-sync/src/main/com/mongodb/client/gridfs/GridFSBuckets.java new file mode 100644 index 00000000000..ad3e5fabbd4 --- /dev/null +++ b/driver-sync/src/main/com/mongodb/client/gridfs/GridFSBuckets.java @@ -0,0 +1,52 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.gridfs; + + +import com.mongodb.client.MongoDatabase; + +/** + * A factory for GridFSBucket instances. + * + * @since 3.1 + */ +public final class GridFSBuckets { + + /** + * Create a new GridFS bucket with the default {@code 'fs'} bucket name + * + * @param database the database instance to use with GridFS + * @return the GridFSBucket + */ + public static GridFSBucket create(final MongoDatabase database) { + return new GridFSBucketImpl(database); + } + + /** + * Create a new GridFS bucket with a custom bucket name + * + * @param database the database instance to use with GridFS + * @param bucketName the custom bucket name to use + * @return the GridFSBucket + */ + public static GridFSBucket create(final MongoDatabase database, final String bucketName) { + return new GridFSBucketImpl(database, bucketName); + } + + private GridFSBuckets() { + } +} diff --git a/driver-sync/src/main/com/mongodb/client/gridfs/GridFSDownloadStream.java b/driver-sync/src/main/com/mongodb/client/gridfs/GridFSDownloadStream.java new file mode 100644 index 00000000000..e1ea78633ba --- /dev/null +++ b/driver-sync/src/main/com/mongodb/client/gridfs/GridFSDownloadStream.java @@ -0,0 +1,84 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.gridfs; + +import com.mongodb.annotations.NotThreadSafe; +import com.mongodb.client.gridfs.model.GridFSFile; + +import java.io.InputStream; + +/** + * A GridFS InputStream for downloading data from GridFS + * + *

Provides the {@code GridFSFile} for the file to being downloaded as well as the {@code read} methods of a {@link InputStream}

+ * + *

This implementation of a {@code InputStream} will not throw {@link java.io.IOException}s. However, it will throw a + * {@link com.mongodb.MongoException} if there is an error reading from MongoDB.

+ * + * @since 3.1 + */ +@NotThreadSafe +public abstract class GridFSDownloadStream extends InputStream { + + /** + * Gets the corresponding {@link GridFSFile} for the file being downloaded + * + * @return the corresponding GridFSFile for the file being downloaded + */ + public abstract GridFSFile getGridFSFile(); + + /** + * Sets the number of chunks to return per batch. + * + *

Can be used to control the memory consumption of this InputStream. The smaller the batchSize the lower the memory consumption + * and higher latency.

+ * + * @param batchSize the batch size + * @return this + * @mongodb.driver.manual reference/method/cursor.batchSize/#cursor.batchSize Batch Size + */ + public abstract GridFSDownloadStream batchSize(int batchSize); + + @Override + public abstract int read(); + + @Override + public abstract int read(byte[] b); + + @Override + public abstract int read(byte[] b, int off, int len); + + @Override + public abstract long skip(long n); + + @Override + public abstract int available(); + + /** + * Marks the current position in this input stream. + * + *

A subsequent call to the {@code reset} method repositions this stream at the last marked position so that subsequent reads + * re-read the same bytes.

+ */ + public abstract void mark(); + + @Override + public abstract void reset(); + + @Override + public abstract void close(); +} diff --git a/driver-sync/src/main/com/mongodb/client/gridfs/GridFSDownloadStreamImpl.java b/driver-sync/src/main/com/mongodb/client/gridfs/GridFSDownloadStreamImpl.java new file mode 100644 index 00000000000..709ae68138b --- /dev/null +++ b/driver-sync/src/main/com/mongodb/client/gridfs/GridFSDownloadStreamImpl.java @@ -0,0 +1,314 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.gridfs; + +import com.mongodb.MongoGridFSException; +import com.mongodb.client.ClientSession; +import com.mongodb.client.FindIterable; +import com.mongodb.client.MongoCollection; +import com.mongodb.client.MongoCursor; +import com.mongodb.client.cursor.TimeoutMode; +import com.mongodb.client.gridfs.model.GridFSFile; +import com.mongodb.client.internal.TimeoutHelper; +import com.mongodb.internal.time.Timeout; +import com.mongodb.lang.Nullable; +import org.bson.BsonBinary; +import org.bson.BsonDocument; +import org.bson.BsonInt32; +import org.bson.BsonValue; + +import java.util.concurrent.locks.ReentrantLock; + +import static com.mongodb.assertions.Assertions.isTrueArgument; +import static com.mongodb.assertions.Assertions.notNull; +import static com.mongodb.internal.Locks.withInterruptibleLock; +import static com.mongodb.internal.TimeoutContext.createMongoTimeoutException; +import static java.lang.String.format; + +class GridFSDownloadStreamImpl extends GridFSDownloadStream { + private static final String TIMEOUT_MESSAGE = "The GridFS download stream exceeded the timeout limit."; + private final ClientSession clientSession; + private final GridFSFile fileInfo; + private final MongoCollection chunksCollection; + private final BsonValue fileId; + /** + * The length, in bytes of the file to download. + */ + private final long length; + private final int chunkSizeInBytes; + private final int numberOfChunks; + private MongoCursor cursor; + private int batchSize; + private int chunkIndex; + private int bufferOffset; + /** + * Current byte position in the file. + */ + private long currentPosition; + private byte[] buffer = null; + private long markPosition; + @Nullable + private final Timeout timeout; + private final ReentrantLock closeLock = new ReentrantLock(); + private final ReentrantLock cursorLock = new ReentrantLock(); + private boolean closed = false; + + GridFSDownloadStreamImpl(@Nullable final ClientSession clientSession, final GridFSFile fileInfo, + final MongoCollection chunksCollection, @Nullable final Timeout timeout) { + this.clientSession = clientSession; + this.fileInfo = notNull("file information", fileInfo); + this.chunksCollection = notNull("chunks collection", chunksCollection); + + fileId = fileInfo.getId(); + length = fileInfo.getLength(); + chunkSizeInBytes = fileInfo.getChunkSize(); + numberOfChunks = (int) Math.ceil((double) length / chunkSizeInBytes); + this.timeout = timeout; + } + + @Override + public GridFSFile getGridFSFile() { + return fileInfo; + } + + @Override + public GridFSDownloadStream batchSize(final int batchSize) { + isTrueArgument("batchSize cannot be negative", batchSize >= 0); + this.batchSize = batchSize; + discardCursor(); + return this; + } + + @Override + public int read() { + byte[] b = new byte[1]; + int res = read(b); + if (res < 0) { + return -1; + } + return b[0] & 0xFF; + } + + @Override + public int read(final byte[] b) { + return read(b, 0, b.length); + } + + @Override + public int read(final byte[] b, final int off, final int len) { + checkClosed(); + checkTimeout(); + + if (currentPosition == length) { + return -1; + } else if (buffer == null) { + buffer = getBuffer(chunkIndex); + } else if (bufferOffset == buffer.length) { + chunkIndex += 1; + buffer = getBuffer(chunkIndex); + bufferOffset = 0; + } + + int r = Math.min(len, buffer.length - bufferOffset); + System.arraycopy(buffer, bufferOffset, b, off, r); + bufferOffset += r; + currentPosition += r; + return r; + } + + @Override + public long skip(final long bytesToSkip) { + checkClosed(); + checkTimeout(); + if (bytesToSkip <= 0) { + return 0; + } + + long skippedPosition = currentPosition + bytesToSkip; + bufferOffset = (int) (skippedPosition % chunkSizeInBytes); + if (skippedPosition >= length) { + long skipped = length - currentPosition; + chunkIndex = numberOfChunks - 1; + currentPosition = length; + buffer = null; + discardCursor(); + return skipped; + } else { + int newChunkIndex = (int) Math.floor(skippedPosition / (double) chunkSizeInBytes); + if (chunkIndex != newChunkIndex) { + chunkIndex = newChunkIndex; + buffer = null; + discardCursor(); + } + currentPosition += bytesToSkip; + return bytesToSkip; + } + } + + @Override + public int available() { + checkClosed(); + checkTimeout(); + if (buffer == null) { + return 0; + } else { + return buffer.length - bufferOffset; + } + } + + @Override + public void mark() { + mark(Integer.MAX_VALUE); + } + + @Override + public void mark(final int readlimit) { + markPosition = currentPosition; + } + + @Override + public void reset() { + checkClosed(); + checkTimeout(); + if (currentPosition == markPosition) { + return; + } + + bufferOffset = (int) (markPosition % chunkSizeInBytes); + currentPosition = markPosition; + int markChunkIndex = (int) Math.floor(markPosition / (double) chunkSizeInBytes); + if (markChunkIndex != chunkIndex) { + chunkIndex = markChunkIndex; + buffer = null; + discardCursor(); + } + } + + @Override + public boolean markSupported() { + return true; + } + + @Override + public void close() { + withInterruptibleLock(closeLock, () -> { + if (!closed) { + closed = true; + } + discardCursor(); + }); + } + + private void checkTimeout() { + Timeout.onExistsAndExpired(timeout, () -> { + throw createMongoTimeoutException(TIMEOUT_MESSAGE); + }); + } + private void checkClosed() { + withInterruptibleLock(closeLock, () -> { + if (closed) { + throw new MongoGridFSException("The InputStream has been closed"); + } + }); + } + + private void discardCursor() { + withInterruptibleLock(cursorLock, () -> { + if (cursor != null) { + cursor.close(); + cursor = null; + } + }); + } + + @Nullable + private BsonDocument getChunk(final int startChunkIndex) { + if (cursor == null) { + cursor = getCursor(startChunkIndex); + } + BsonDocument chunk = null; + if (cursor.hasNext()) { + chunk = cursor.next(); + if (batchSize == 1) { + discardCursor(); + } + if (chunk.getInt32("n").getValue() != startChunkIndex) { + throw new MongoGridFSException(format("Could not find file chunk for file_id: %s at chunk index %s.", + fileId, startChunkIndex)); + } + } + + return chunk; + } + + private MongoCursor getCursor(final int startChunkIndex) { + FindIterable findIterable; + BsonDocument filter = new BsonDocument("files_id", fileId).append("n", new BsonDocument("$gte", new BsonInt32(startChunkIndex))); + if (clientSession != null) { + findIterable = withNullableTimeout(chunksCollection, timeout).find(clientSession, filter); + } else { + findIterable = withNullableTimeout(chunksCollection, timeout).find(filter); + } + if (timeout != null){ + findIterable.timeoutMode(TimeoutMode.CURSOR_LIFETIME); + } + return findIterable.batchSize(batchSize) + .sort(new BsonDocument("n", new BsonInt32(1))).iterator(); + } + + private byte[] getBufferFromChunk(@Nullable final BsonDocument chunk, final int expectedChunkIndex) { + + if (chunk == null || chunk.getInt32("n").getValue() != expectedChunkIndex) { + throw new MongoGridFSException(format("Could not find file chunk for file_id: %s at chunk index %s.", + fileId, expectedChunkIndex)); + } + + if (!(chunk.get("data") instanceof BsonBinary)) { + throw new MongoGridFSException("Unexpected data format for the chunk"); + } + byte[] data = chunk.getBinary("data").getData(); + + long expectedDataLength = 0; + boolean extraChunk = false; + if (expectedChunkIndex + 1 > numberOfChunks) { + extraChunk = true; + } else if (expectedChunkIndex + 1 == numberOfChunks) { + expectedDataLength = length - (expectedChunkIndex * (long) chunkSizeInBytes); + } else { + expectedDataLength = chunkSizeInBytes; + } + + if (extraChunk && data.length > expectedDataLength) { + throw new MongoGridFSException(format("Extra chunk data for file_id: %s. Unexpected chunk at chunk index %s." + + "The size was %s and it should be %s bytes.", fileId, expectedChunkIndex, data.length, expectedDataLength)); + } else if (data.length != expectedDataLength) { + throw new MongoGridFSException(format("Chunk size data length is not the expected size. " + + "The size was %s for file_id: %s chunk index %s it should be %s bytes.", + data.length, fileId, expectedChunkIndex, expectedDataLength)); + } + return data; + } + + private byte[] getBuffer(final int chunkIndexToFetch) { + return getBufferFromChunk(getChunk(chunkIndexToFetch), chunkIndexToFetch); + } + + private MongoCollection withNullableTimeout(final MongoCollection chunksCollection, + @Nullable final Timeout timeout) { + return TimeoutHelper.collectionWithTimeout(chunksCollection, TIMEOUT_MESSAGE, timeout); + } +} diff --git a/driver-sync/src/main/com/mongodb/client/gridfs/GridFSFindIterable.java b/driver-sync/src/main/com/mongodb/client/gridfs/GridFSFindIterable.java new file mode 100644 index 00000000000..9b8cb8b9117 --- /dev/null +++ b/driver-sync/src/main/com/mongodb/client/gridfs/GridFSFindIterable.java @@ -0,0 +1,118 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.gridfs; + +import com.mongodb.client.MongoIterable; +import com.mongodb.client.gridfs.model.GridFSFile; +import com.mongodb.client.model.Collation; +import com.mongodb.lang.Nullable; +import org.bson.conversions.Bson; + +import java.util.concurrent.TimeUnit; + +/** + * Iterable for the GridFS Files Collection. + * + * @since 3.1 + */ +public interface GridFSFindIterable extends MongoIterable { + + /** + * Sets the query filter to apply to the query. + * + *

+ * Below is an example of filtering against the filename and some nested metadata that can also be stored along with the file data: + *

+     *  {@code
+     *      Filters.and(Filters.eq("filename", "mongodb.png"), Filters.eq("metadata.contentType", "image/png"));
+     *  }
+     *  
+ * + * @param filter the filter, which may be null. + * @return this + * @mongodb.driver.manual reference/method/db.collection.find/ Filter + * @see com.mongodb.client.model.Filters + */ + GridFSFindIterable filter(@Nullable Bson filter); + + /** + * Sets the limit to apply. + * + * @param limit the limit, which may be null + * @return this + * @mongodb.driver.manual reference/method/cursor.limit/#cursor.limit Limit + */ + GridFSFindIterable limit(int limit); + + /** + * Sets the number of documents to skip. + * + * @param skip the number of documents to skip + * @return this + * @mongodb.driver.manual reference/method/cursor.skip/#cursor.skip Skip + */ + GridFSFindIterable skip(int skip); + + /** + * Sets the sort criteria to apply to the query. + * + * @param sort the sort criteria, which may be null. + * @return this + * @mongodb.driver.manual reference/method/cursor.sort/ Sort + */ + GridFSFindIterable sort(@Nullable Bson sort); + + /** + * The server normally times out idle cursors after an inactivity period (10 minutes) + * to prevent excess memory use. Set this option to prevent that. + * + * @param noCursorTimeout true if cursor timeout is disabled + * @return this + */ + GridFSFindIterable noCursorTimeout(boolean noCursorTimeout); + + /** + * Sets the maximum execution time on the server for this operation. + * + * @param maxTime the max time + * @param timeUnit the time unit, which may not be null + * @return this + * @mongodb.driver.manual reference/method/cursor.maxTimeMS/#cursor.maxTimeMS Max Time + */ + GridFSFindIterable maxTime(long maxTime, TimeUnit timeUnit); + + /** + * Sets the number of documents to return per batch. + * + * @param batchSize the batch size + * @return this + * @mongodb.driver.manual reference/method/cursor.batchSize/#cursor.batchSize Batch Size + */ + @Override + GridFSFindIterable batchSize(int batchSize); + + /** + * Sets the collation options + * + *

A null value represents the server default.

+ * @param collation the collation options to use + * @return this + * @since 3.4 + * @mongodb.server.release 3.4 + */ + GridFSFindIterable collation(@Nullable Collation collation); +} diff --git a/driver-sync/src/main/com/mongodb/client/gridfs/GridFSFindIterableImpl.java b/driver-sync/src/main/com/mongodb/client/gridfs/GridFSFindIterableImpl.java new file mode 100644 index 00000000000..80bfb498e12 --- /dev/null +++ b/driver-sync/src/main/com/mongodb/client/gridfs/GridFSFindIterableImpl.java @@ -0,0 +1,118 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.gridfs; + +import com.mongodb.Function; +import com.mongodb.client.FindIterable; +import com.mongodb.client.MongoCursor; +import com.mongodb.client.MongoIterable; +import com.mongodb.client.gridfs.model.GridFSFile; +import com.mongodb.client.model.Collation; +import com.mongodb.lang.Nullable; +import org.bson.conversions.Bson; + +import java.util.Collection; +import java.util.concurrent.TimeUnit; +import java.util.function.Consumer; + +class GridFSFindIterableImpl implements GridFSFindIterable { + private final FindIterable underlying; + + GridFSFindIterableImpl(final FindIterable underlying) { + this.underlying = underlying; + } + + @Override + public GridFSFindIterable sort(@Nullable final Bson sort) { + underlying.sort(sort); + return this; + } + + @Override + public GridFSFindIterable skip(final int skip) { + underlying.skip(skip); + return this; + } + + @Override + public GridFSFindIterable limit(final int limit) { + underlying.limit(limit); + return this; + } + + @Override + public GridFSFindIterable filter(@Nullable final Bson filter) { + underlying.filter(filter); + return this; + } + + @Override + public GridFSFindIterable maxTime(final long maxTime, final TimeUnit timeUnit) { + underlying.maxTime(maxTime, timeUnit); + return this; + } + + @Override + public GridFSFindIterable batchSize(final int batchSize) { + underlying.batchSize(batchSize); + return this; + } + + @Override + public GridFSFindIterable collation(@Nullable final Collation collation) { + underlying.collation(collation); + return this; + } + + @Override + public GridFSFindIterable noCursorTimeout(final boolean noCursorTimeout) { + underlying.noCursorTimeout(noCursorTimeout); + return this; + } + + @Override + public MongoCursor iterator() { + return underlying.iterator(); + } + + @Override + public MongoCursor cursor() { + return iterator(); + } + + @Nullable + @Override + public GridFSFile first() { + return underlying.first(); + } + + @Override + public MongoIterable map(final Function mapper) { + return underlying.map(mapper); + } + + @Override + public void forEach(final Consumer block) { + underlying.forEach(block); + } + + @Override + public > A into(final A target) { + return underlying.into(target); + } + +} diff --git a/driver-sync/src/main/com/mongodb/client/gridfs/GridFSUploadStream.java b/driver-sync/src/main/com/mongodb/client/gridfs/GridFSUploadStream.java new file mode 100644 index 00000000000..5d2ad026f62 --- /dev/null +++ b/driver-sync/src/main/com/mongodb/client/gridfs/GridFSUploadStream.java @@ -0,0 +1,72 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.gridfs; + +import com.mongodb.annotations.NotThreadSafe; +import org.bson.BsonValue; +import org.bson.types.ObjectId; + +import java.io.OutputStream; + +/** + * A GridFS OutputStream for uploading data into GridFS + * + *

Provides the {@code id} for the file to be uploaded as well as the {@code write} methods of a {@link OutputStream}

+ * + *

This implementation of a {@code OutputStream} will not throw {@link java.io.IOException}s. However, it will throw a + * {@link com.mongodb.MongoException} if there is an error writing to MongoDB.

+ * + * @since 3.1 + */ +@NotThreadSafe +public abstract class GridFSUploadStream extends OutputStream { + + /** + * Gets the {@link ObjectId} for the file to be uploaded or throws an error if an alternative BsonType has been used for the id. + *

+ * Throws a MongoGridFSException if the file id is not an ObjectId. + * @return the ObjectId for the file to be uploaded + */ + public abstract ObjectId getObjectId(); + + /** + * Gets the {@link BsonValue} for the file to be uploaded + * + * @return the BsonValue for the file to be uploaded + */ + public abstract BsonValue getId(); + + /** + * Aborts the upload and deletes any data. + */ + public abstract void abort(); + + @Override + public abstract void write(int b); + + @Override + public abstract void write(byte[] b); + + @Override + public abstract void write(byte[] b, int off, int len); + + @Override + public void flush() {} + + @Override + public abstract void close(); +} diff --git a/driver-sync/src/main/com/mongodb/client/gridfs/GridFSUploadStreamImpl.java b/driver-sync/src/main/com/mongodb/client/gridfs/GridFSUploadStreamImpl.java new file mode 100644 index 00000000000..240cecf78b3 --- /dev/null +++ b/driver-sync/src/main/com/mongodb/client/gridfs/GridFSUploadStreamImpl.java @@ -0,0 +1,214 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.gridfs; + +import com.mongodb.MongoGridFSException; +import com.mongodb.client.ClientSession; +import com.mongodb.client.MongoCollection; +import com.mongodb.client.gridfs.model.GridFSFile; +import com.mongodb.client.internal.TimeoutHelper; +import com.mongodb.internal.TimeoutContext; +import com.mongodb.internal.time.Timeout; +import com.mongodb.lang.Nullable; +import org.bson.BsonBinary; +import org.bson.BsonDocument; +import org.bson.BsonInt32; +import org.bson.BsonValue; +import org.bson.Document; +import org.bson.types.ObjectId; + +import java.util.Date; +import java.util.concurrent.locks.ReentrantLock; + +import static com.mongodb.assertions.Assertions.notNull; +import static com.mongodb.internal.Locks.withInterruptibleLock; + +final class GridFSUploadStreamImpl extends GridFSUploadStream { + public static final String TIMEOUT_MESSAGE = "The GridFS upload stream exceeded the timeout limit."; + private final ClientSession clientSession; + private final MongoCollection filesCollection; + private final MongoCollection chunksCollection; + private final BsonValue fileId; + private final String filename; + private final int chunkSizeBytes; + private final Document metadata; + private byte[] buffer; + private long lengthInBytes; + private int bufferOffset; + private int chunkIndex; + @Nullable + private final Timeout timeout; + private final ReentrantLock closeLock = new ReentrantLock(); + private boolean closed = false; + + GridFSUploadStreamImpl(@Nullable final ClientSession clientSession, final MongoCollection filesCollection, + final MongoCollection chunksCollection, final BsonValue fileId, final String filename, + final int chunkSizeBytes, @Nullable final Document metadata, @Nullable final Timeout timeout) { + this.clientSession = clientSession; + this.filesCollection = notNull("files collection", filesCollection); + this.chunksCollection = notNull("chunks collection", chunksCollection); + this.fileId = notNull("File Id", fileId); + this.filename = notNull("filename", filename); + this.chunkSizeBytes = chunkSizeBytes; + this.metadata = metadata; + chunkIndex = 0; + bufferOffset = 0; + buffer = new byte[chunkSizeBytes]; + this.timeout = timeout; + } + + @Override + public ObjectId getObjectId() { + if (!fileId.isObjectId()) { + throw new MongoGridFSException("Custom id type used for this GridFS upload stream"); + } + return fileId.asObjectId().getValue(); + } + + @Override + public BsonValue getId() { + return fileId; + } + + @Override + public void abort() { + withInterruptibleLock(closeLock, () -> { + checkClosed(); + closed = true; + }); + + if (clientSession != null) { + withNullableTimeout(chunksCollection, timeout) + .deleteMany(clientSession, new Document("files_id", fileId)); + } else { + withNullableTimeout(chunksCollection, timeout) + .deleteMany(new Document("files_id", fileId)); + } + } + + @Override + public void write(final int b) { + byte[] byteArray = new byte[1]; + byteArray[0] = (byte) (0xFF & b); + write(byteArray, 0, 1); + } + + @Override + public void write(final byte[] b) { + write(b, 0, b.length); + } + + @Override + public void write(final byte[] b, final int off, final int len) { + checkClosed(); + checkTimeout(); + notNull("b", b); + + if ((off < 0) || (off > b.length) || (len < 0) + || ((off + len) > b.length) || ((off + len) < 0)) { + throw new IndexOutOfBoundsException(); + } else if (len == 0) { + return; + } + + int currentOffset = off; + int lengthToWrite = len; + int amountToCopy = 0; + + while (lengthToWrite > 0) { + amountToCopy = lengthToWrite; + if (amountToCopy > chunkSizeBytes - bufferOffset) { + amountToCopy = chunkSizeBytes - bufferOffset; + } + System.arraycopy(b, currentOffset, buffer, bufferOffset, amountToCopy); + + bufferOffset += amountToCopy; + currentOffset += amountToCopy; + lengthToWrite -= amountToCopy; + lengthInBytes += amountToCopy; + + if (bufferOffset == chunkSizeBytes) { + writeChunk(); + } + } + } + + private void checkTimeout() { + Timeout.onExistsAndExpired(timeout, () -> TimeoutContext.throwMongoTimeoutException(TIMEOUT_MESSAGE)); + } + + @Override + public void close() { + boolean alreadyClosed = withInterruptibleLock(closeLock, () -> { + boolean prevClosed = closed; + closed = true; + return prevClosed; + }); + if (alreadyClosed) { + return; + } + writeChunk(); + GridFSFile gridFSFile = new GridFSFile(fileId, filename, lengthInBytes, chunkSizeBytes, new Date(), + metadata); + if (clientSession != null) { + withNullableTimeout(filesCollection, timeout).insertOne(clientSession, gridFSFile); + } else { + withNullableTimeout(filesCollection, timeout).insertOne(gridFSFile); + } + buffer = null; + } + + private void writeChunk() { + if (bufferOffset > 0) { + if (clientSession != null) { + withNullableTimeout(chunksCollection, timeout) + .insertOne(clientSession, new BsonDocument("files_id", fileId) + .append("n", new BsonInt32(chunkIndex)) + .append("data", getData())); + } else { + withNullableTimeout(chunksCollection, timeout) + .insertOne(new BsonDocument("files_id", fileId) + .append("n", new BsonInt32(chunkIndex)) + .append("data", getData())); + } + chunkIndex++; + bufferOffset = 0; + } + } + + private BsonBinary getData() { + if (bufferOffset < chunkSizeBytes) { + byte[] sizedBuffer = new byte[bufferOffset]; + System.arraycopy(buffer, 0, sizedBuffer, 0, bufferOffset); + buffer = sizedBuffer; + } + return new BsonBinary(buffer); + } + + private void checkClosed() { + withInterruptibleLock(closeLock, () -> { + if (closed) { + throw new MongoGridFSException("The OutputStream has been closed"); + } + }); + } + + private static MongoCollection withNullableTimeout(final MongoCollection collection, + @Nullable final Timeout timeout) { + return TimeoutHelper.collectionWithTimeout(collection, TIMEOUT_MESSAGE, timeout); + } +} diff --git a/driver-sync/src/main/com/mongodb/client/gridfs/package-info.java b/driver-sync/src/main/com/mongodb/client/gridfs/package-info.java new file mode 100644 index 00000000000..e967b4366cc --- /dev/null +++ b/driver-sync/src/main/com/mongodb/client/gridfs/package-info.java @@ -0,0 +1,25 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * This package contains the new GridFS implementation + * + * @since 3.1 + */ +@NonNullApi +package com.mongodb.client.gridfs; + +import com.mongodb.lang.NonNullApi; diff --git a/driver-sync/src/main/com/mongodb/client/internal/AggregateIterableImpl.java b/driver-sync/src/main/com/mongodb/client/internal/AggregateIterableImpl.java new file mode 100644 index 00000000000..49ed63cba32 --- /dev/null +++ b/driver-sync/src/main/com/mongodb/client/internal/AggregateIterableImpl.java @@ -0,0 +1,291 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.internal; + +import com.mongodb.ExplainVerbosity; +import com.mongodb.MongoNamespace; +import com.mongodb.ReadConcern; +import com.mongodb.ReadPreference; +import com.mongodb.WriteConcern; +import com.mongodb.client.AggregateIterable; +import com.mongodb.client.ClientSession; +import com.mongodb.client.cursor.TimeoutMode; +import com.mongodb.client.model.Collation; +import com.mongodb.internal.TimeoutSettings; +import com.mongodb.internal.client.model.AggregationLevel; +import com.mongodb.internal.client.model.FindOptions; +import com.mongodb.internal.operation.Operations; +import com.mongodb.internal.operation.ReadOperationCursor; +import com.mongodb.internal.operation.ReadOperationExplainable; +import com.mongodb.lang.Nullable; +import org.bson.BsonDocument; +import org.bson.BsonString; +import org.bson.BsonValue; +import org.bson.Document; +import org.bson.codecs.configuration.CodecRegistry; +import org.bson.conversions.Bson; + +import java.util.List; +import java.util.concurrent.TimeUnit; + +import static com.mongodb.assertions.Assertions.notNull; + +class AggregateIterableImpl extends MongoIterableImpl implements AggregateIterable { + private final Operations operations; + private final MongoNamespace namespace; + private final Class documentClass; + private final Class resultClass; + private final CodecRegistry codecRegistry; + private final List pipeline; + private final AggregationLevel aggregationLevel; + + private Boolean allowDiskUse; + private long maxTimeMS; + private long maxAwaitTimeMS; + private Boolean bypassDocumentValidation; + private Collation collation; + private BsonValue comment; + private Bson hint; + private String hintString; + private Bson variables; + + @SuppressWarnings("checkstyle:ParameterNumber") + AggregateIterableImpl(@Nullable final ClientSession clientSession, final String databaseName, final Class documentClass, + final Class resultClass, final CodecRegistry codecRegistry, final ReadPreference readPreference, + final ReadConcern readConcern, final WriteConcern writeConcern, final OperationExecutor executor, + final List pipeline, final AggregationLevel aggregationLevel, final boolean retryReads, + final TimeoutSettings timeoutSettings) { + this(clientSession, new MongoNamespace(databaseName, "_ignored"), documentClass, resultClass, codecRegistry, readPreference, + readConcern, writeConcern, executor, pipeline, aggregationLevel, retryReads, timeoutSettings); + } + + @SuppressWarnings("checkstyle:ParameterNumber") + AggregateIterableImpl(@Nullable final ClientSession clientSession, final MongoNamespace namespace, final Class documentClass, + final Class resultClass, final CodecRegistry codecRegistry, final ReadPreference readPreference, + final ReadConcern readConcern, final WriteConcern writeConcern, final OperationExecutor executor, + final List pipeline, final AggregationLevel aggregationLevel, final boolean retryReads, + final TimeoutSettings timeoutSettings) { + super(clientSession, executor, readConcern, readPreference, retryReads, timeoutSettings); + this.operations = new Operations<>(namespace, documentClass, readPreference, codecRegistry, readConcern, writeConcern, + true, retryReads, timeoutSettings); + this.namespace = notNull("namespace", namespace); + this.documentClass = notNull("documentClass", documentClass); + this.resultClass = notNull("resultClass", resultClass); + this.codecRegistry = notNull("codecRegistry", codecRegistry); + this.pipeline = notNull("pipeline", pipeline); + this.aggregationLevel = notNull("aggregationLevel", aggregationLevel); + } + + @Override + public void toCollection() { + BsonDocument lastPipelineStage = getLastPipelineStage(); + if (lastPipelineStage == null || !lastPipelineStage.containsKey("$out") && !lastPipelineStage.containsKey("$merge")) { + throw new IllegalStateException("The last stage of the aggregation pipeline must be $out or $merge"); + } + + getExecutor().execute( + operations.aggregateToCollection(pipeline, getTimeoutMode(), allowDiskUse, + bypassDocumentValidation, collation, hint, hintString, comment, variables, aggregationLevel), + getReadPreference(), getReadConcern(), getClientSession()); + } + + @Override + public AggregateIterable allowDiskUse(@Nullable final Boolean allowDiskUse) { + this.allowDiskUse = allowDiskUse; + return this; + } + + @Override + public AggregateIterable batchSize(final int batchSize) { + super.batchSize(batchSize); + return this; + } + + @Override + public AggregateIterable timeoutMode(final TimeoutMode timeoutMode) { + super.timeoutMode(timeoutMode); + return this; + } + + @Override + public AggregateIterable maxTime(final long maxTime, final TimeUnit timeUnit) { + notNull("timeUnit", timeUnit); + this.maxTimeMS = TimeUnit.MILLISECONDS.convert(maxTime, timeUnit); + return this; + } + + @Override + public AggregateIterable maxAwaitTime(final long maxAwaitTime, final TimeUnit timeUnit) { + this.maxAwaitTimeMS = validateMaxAwaitTime(maxAwaitTime, timeUnit); + return this; + } + + @Override + public AggregateIterable bypassDocumentValidation(@Nullable final Boolean bypassDocumentValidation) { + this.bypassDocumentValidation = bypassDocumentValidation; + return this; + } + + @Override + public AggregateIterable collation(@Nullable final Collation collation) { + this.collation = collation; + return this; + } + + @Override + public AggregateIterable comment(@Nullable final String comment) { + this.comment = comment == null ? null : new BsonString(comment); + return this; + } + + @Override + public AggregateIterable comment(@Nullable final BsonValue comment) { + this.comment = comment; + return this; + } + + @Override + public AggregateIterable hint(@Nullable final Bson hint) { + this.hint = hint; + return this; + } + + @Override + public AggregateIterable hintString(@Nullable final String hint) { + this.hintString = hint; + return this; + } + + @Override + public AggregateIterable let(@Nullable final Bson variables) { + this.variables = variables; + return this; + } + + @Override + public Document explain() { + return executeExplain(Document.class, null); + } + + @Override + public Document explain(final ExplainVerbosity verbosity) { + return executeExplain(Document.class, notNull("verbosity", verbosity)); + } + + @Override + public E explain(final Class explainDocumentClass) { + return executeExplain(explainDocumentClass, null); + } + + @Override + public E explain(final Class explainResultClass, final ExplainVerbosity verbosity) { + return executeExplain(explainResultClass, notNull("verbosity", verbosity)); + } + + private E executeExplain(final Class explainResultClass, @Nullable final ExplainVerbosity verbosity) { + notNull("explainDocumentClass", explainResultClass); + return getExecutor().execute( + asAggregateOperation().asExplainableOperation(verbosity, codecRegistry.get(explainResultClass)), getReadPreference(), + getReadConcern(), getClientSession()); + } + + @Override + public ReadOperationCursor asReadOperation() { + MongoNamespace outNamespace = getOutNamespace(); + if (outNamespace != null) { + validateTimeoutMode(); + getExecutor().execute( + operations.aggregateToCollection(pipeline, getTimeoutMode(), allowDiskUse, + bypassDocumentValidation, collation, hint, hintString, comment, variables, aggregationLevel), + getReadPreference(), getReadConcern(), getClientSession()); + + FindOptions findOptions = new FindOptions().collation(collation); + Integer batchSize = getBatchSize(); + if (batchSize != null) { + findOptions.batchSize(batchSize); + } + return operations.find(outNamespace, new BsonDocument(), resultClass, findOptions); + } else { + return asAggregateOperation(); + } + } + + protected OperationExecutor getExecutor() { + return getExecutor(operations.createTimeoutSettings(maxTimeMS, maxAwaitTimeMS)); + } + + private ReadOperationExplainable asAggregateOperation() { + return operations.aggregate(pipeline, resultClass, getTimeoutMode(), getBatchSize(), collation, hint, hintString, comment, + variables, allowDiskUse, aggregationLevel); + } + + @Nullable + private BsonDocument getLastPipelineStage() { + if (pipeline.isEmpty()) { + return null; + } else { + Bson lastStage = notNull("last pipeline stage", pipeline.get(pipeline.size() - 1)); + return lastStage.toBsonDocument(documentClass, codecRegistry); + } + } + + @Nullable + private MongoNamespace getOutNamespace() { + BsonDocument lastPipelineStage = getLastPipelineStage(); + if (lastPipelineStage == null) { + return null; + } + if (lastPipelineStage.containsKey("$out")) { + if (lastPipelineStage.get("$out").isString()) { + return new MongoNamespace(namespace.getDatabaseName(), lastPipelineStage.getString("$out").getValue()); + } else if (lastPipelineStage.get("$out").isDocument()) { + BsonDocument outDocument = lastPipelineStage.getDocument("$out"); + if (!outDocument.containsKey("db") || !outDocument.containsKey("coll")) { + throw new IllegalStateException("Cannot return a cursor when the value for $out stage is not a namespace document"); + } + return new MongoNamespace(outDocument.getString("db").getValue(), outDocument.getString("coll").getValue()); + } else { + throw new IllegalStateException("Cannot return a cursor when the value for $out stage " + + "is not a string or namespace document"); + } + } else if (lastPipelineStage.containsKey("$merge")) { + if (lastPipelineStage.isString("$merge")) { + return new MongoNamespace(namespace.getDatabaseName(), lastPipelineStage.getString("$merge").getValue()); + } else if (lastPipelineStage.isDocument("$merge")) { + BsonDocument mergeDocument = lastPipelineStage.getDocument("$merge"); + if (mergeDocument.isDocument("into")) { + BsonDocument intoDocument = mergeDocument.getDocument("into"); + return new MongoNamespace(intoDocument.getString("db", new BsonString(namespace.getDatabaseName())).getValue(), + intoDocument.getString("coll").getValue()); + } else if (mergeDocument.isString("into")) { + return new MongoNamespace(namespace.getDatabaseName(), mergeDocument.getString("into").getValue()); + } + } else { + throw new IllegalStateException("Cannot return a cursor when the value for $merge stage is not a string or a document"); + } + } + + return null; + } + + private void validateTimeoutMode() { + if (getTimeoutMode() == TimeoutMode.ITERATION) { + throw new IllegalArgumentException("Aggregations that output to a collection do not support the ITERATION value for the " + + "timeoutMode option."); + } + } +} diff --git a/driver-sync/src/main/com/mongodb/client/internal/ChangeStreamIterableImpl.java b/driver-sync/src/main/com/mongodb/client/internal/ChangeStreamIterableImpl.java new file mode 100644 index 00000000000..b5b41b375f5 --- /dev/null +++ b/driver-sync/src/main/com/mongodb/client/internal/ChangeStreamIterableImpl.java @@ -0,0 +1,229 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.internal; + +import com.mongodb.MongoNamespace; +import com.mongodb.ReadConcern; +import com.mongodb.ReadPreference; +import com.mongodb.client.ChangeStreamIterable; +import com.mongodb.client.ClientSession; +import com.mongodb.client.MongoChangeStreamCursor; +import com.mongodb.client.MongoCursor; +import com.mongodb.client.MongoIterable; +import com.mongodb.client.model.Collation; +import com.mongodb.client.model.changestream.ChangeStreamDocument; +import com.mongodb.client.model.changestream.FullDocument; +import com.mongodb.client.model.changestream.FullDocumentBeforeChange; +import com.mongodb.internal.TimeoutSettings; +import com.mongodb.internal.client.model.changestream.ChangeStreamLevel; +import com.mongodb.internal.operation.BatchCursor; +import com.mongodb.internal.operation.Operations; +import com.mongodb.internal.operation.ReadOperationCursor; +import com.mongodb.lang.Nullable; +import org.bson.BsonDocument; +import org.bson.BsonString; +import org.bson.BsonTimestamp; +import org.bson.BsonValue; +import org.bson.RawBsonDocument; +import org.bson.codecs.Codec; +import org.bson.codecs.RawBsonDocumentCodec; +import org.bson.codecs.configuration.CodecRegistry; +import org.bson.conversions.Bson; + +import java.util.List; +import java.util.concurrent.TimeUnit; + +import static com.mongodb.assertions.Assertions.notNull; + +/** + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public class ChangeStreamIterableImpl extends MongoIterableImpl> + implements ChangeStreamIterable { + private final CodecRegistry codecRegistry; + private final List pipeline; + private final Codec> codec; + private final ChangeStreamLevel changeStreamLevel; + private final Operations operations; + private FullDocument fullDocument = FullDocument.DEFAULT; + private FullDocumentBeforeChange fullDocumentBeforeChange = FullDocumentBeforeChange.DEFAULT; + private BsonDocument resumeToken; + private BsonDocument startAfter; + private long maxAwaitTimeMS; + private Collation collation; + private BsonTimestamp startAtOperationTime; + private BsonValue comment; + private boolean showExpandedEvents; + + public ChangeStreamIterableImpl(@Nullable final ClientSession clientSession, final String databaseName, + final CodecRegistry codecRegistry, final ReadPreference readPreference, final ReadConcern readConcern, + final OperationExecutor executor, final List pipeline, final Class resultClass, + final ChangeStreamLevel changeStreamLevel, final boolean retryReads, final TimeoutSettings timeoutSettings) { + this(clientSession, new MongoNamespace(databaseName, "_ignored"), codecRegistry, readPreference, readConcern, executor, pipeline, + resultClass, changeStreamLevel, retryReads, timeoutSettings); + } + + public ChangeStreamIterableImpl(@Nullable final ClientSession clientSession, final MongoNamespace namespace, + final CodecRegistry codecRegistry, final ReadPreference readPreference, final ReadConcern readConcern, + final OperationExecutor executor, final List pipeline, final Class resultClass, + final ChangeStreamLevel changeStreamLevel, final boolean retryReads, final TimeoutSettings timeoutSettings) { + super(clientSession, executor, readConcern, readPreference, retryReads, timeoutSettings); + this.codecRegistry = notNull("codecRegistry", codecRegistry); + this.pipeline = notNull("pipeline", pipeline); + this.codec = ChangeStreamDocument.createCodec(notNull("resultClass", resultClass), codecRegistry); + this.changeStreamLevel = notNull("changeStreamLevel", changeStreamLevel); + this.operations = new Operations<>(namespace, resultClass, readPreference, codecRegistry, retryReads, timeoutSettings); + } + + @Override + public ChangeStreamIterable fullDocument(final FullDocument fullDocument) { + this.fullDocument = notNull("fullDocument", fullDocument); + return this; + } + + @Override + public ChangeStreamIterable fullDocumentBeforeChange(final FullDocumentBeforeChange fullDocumentBeforeChange) { + this.fullDocumentBeforeChange = notNull("fullDocumentBeforeChange", fullDocumentBeforeChange); + return this; + } + + @Override + public ChangeStreamIterable resumeAfter(final BsonDocument resumeAfter) { + this.resumeToken = notNull("resumeAfter", resumeAfter); + return this; + } + + @Override + public ChangeStreamIterable batchSize(final int batchSize) { + super.batchSize(batchSize); + return this; + } + + @Override + public ChangeStreamIterable maxAwaitTime(final long maxAwaitTime, final TimeUnit timeUnit) { + this.maxAwaitTimeMS = validateMaxAwaitTime(maxAwaitTime, timeUnit); + return this; + } + + @Override + public ChangeStreamIterable collation(@Nullable final Collation collation) { + this.collation = notNull("collation", collation); + return this; + } + + @Override + public MongoIterable withDocumentClass(final Class clazz) { + return new MongoIterableImpl(getClientSession(), getExecutor(), getReadConcern(), getReadPreference(), getRetryReads(), + getTimeoutSettings()) { + @Override + public MongoCursor iterator() { + return cursor(); + } + + @Override + public MongoChangeStreamCursor cursor() { + return new MongoChangeStreamCursorImpl<>(execute(), codecRegistry.get(clazz), initialResumeToken()); + } + + @Override + public ReadOperationCursor asReadOperation() { + throw new UnsupportedOperationException(); + } + + @Override + + protected OperationExecutor getExecutor() { + return ChangeStreamIterableImpl.this.getExecutor(); + } + }; + } + + @Override + public ChangeStreamIterable startAtOperationTime(final BsonTimestamp startAtOperationTime) { + this.startAtOperationTime = notNull("startAtOperationTime", startAtOperationTime); + return this; + } + + @Override + public ChangeStreamIterableImpl startAfter(final BsonDocument startAfter) { + this.startAfter = notNull("startAfter", startAfter); + return this; + } + + + @Override + public ChangeStreamIterable comment(@Nullable final String comment) { + this.comment = comment == null ? null : new BsonString(comment); + return this; + } + + @Override + public ChangeStreamIterable comment(@Nullable final BsonValue comment) { + this.comment = comment; + return this; + } + + @Override + public ChangeStreamIterable showExpandedEvents(final boolean showExpandedEvents) { + this.showExpandedEvents = showExpandedEvents; + return this; + } + + @Override + public MongoCursor> iterator() { + return cursor(); + } + + @Override + public MongoChangeStreamCursor> cursor() { + return new MongoChangeStreamCursorImpl<>(execute(), codec, initialResumeToken()); + } + + @Nullable + @Override + public ChangeStreamDocument first() { + try (MongoChangeStreamCursor> cursor = cursor()) { + if (!cursor.hasNext()) { + return null; + } + return cursor.next(); + } + } + + @Override + public ReadOperationCursor> asReadOperation() { + throw new UnsupportedOperationException(); + } + + + protected OperationExecutor getExecutor() { + return getExecutor(operations.createTimeoutSettings(0, maxAwaitTimeMS)); + } + + private ReadOperationCursor createChangeStreamOperation() { + return operations.changeStream(fullDocument, fullDocumentBeforeChange, pipeline, new RawBsonDocumentCodec(), changeStreamLevel, + getBatchSize(), collation, comment, resumeToken, startAtOperationTime, startAfter, showExpandedEvents); + } + + private BatchCursor execute() { + return getExecutor().execute(createChangeStreamOperation(), getReadPreference(), getReadConcern(), getClientSession()); + } + + private BsonDocument initialResumeToken() { + return startAfter != null ? startAfter : resumeToken; + } +} diff --git a/driver-sync/src/main/com/mongodb/client/internal/ClientEncryptionImpl.java b/driver-sync/src/main/com/mongodb/client/internal/ClientEncryptionImpl.java new file mode 100644 index 00000000000..3edef6b937d --- /dev/null +++ b/driver-sync/src/main/com/mongodb/client/internal/ClientEncryptionImpl.java @@ -0,0 +1,286 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.internal; + +import com.mongodb.ClientEncryptionSettings; +import com.mongodb.MongoConfigurationException; +import com.mongodb.MongoNamespace; +import com.mongodb.MongoUpdatedEncryptedFieldsException; +import com.mongodb.ReadConcern; +import com.mongodb.WriteConcern; +import com.mongodb.bulk.BulkWriteResult; +import com.mongodb.client.FindIterable; +import com.mongodb.client.MongoClient; +import com.mongodb.client.MongoClients; +import com.mongodb.client.MongoCollection; +import com.mongodb.client.MongoDatabase; +import com.mongodb.client.model.CreateCollectionOptions; +import com.mongodb.client.model.CreateEncryptedCollectionParams; +import com.mongodb.client.model.Filters; +import com.mongodb.client.model.UpdateOneModel; +import com.mongodb.client.model.Updates; +import com.mongodb.client.model.vault.DataKeyOptions; +import com.mongodb.client.model.vault.EncryptOptions; +import com.mongodb.client.model.vault.RewrapManyDataKeyOptions; +import com.mongodb.client.model.vault.RewrapManyDataKeyResult; +import com.mongodb.client.result.DeleteResult; +import com.mongodb.client.vault.ClientEncryption; +import com.mongodb.internal.TimeoutContext; +import com.mongodb.internal.VisibleForTesting; +import com.mongodb.internal.time.Timeout; +import com.mongodb.lang.Nullable; +import org.bson.BsonArray; +import org.bson.BsonBinary; +import org.bson.BsonDocument; +import org.bson.BsonNull; +import org.bson.BsonString; +import org.bson.BsonValue; +import org.bson.codecs.configuration.CodecRegistry; +import org.bson.conversions.Bson; + +import java.util.List; +import java.util.Objects; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.stream.Collectors; + +import static com.mongodb.assertions.Assertions.notNull; +import static com.mongodb.client.internal.TimeoutHelper.collectionWithTimeout; +import static com.mongodb.client.internal.TimeoutHelper.databaseWithTimeout; +import static com.mongodb.internal.VisibleForTesting.AccessModifier.PRIVATE; +import static com.mongodb.internal.capi.MongoCryptHelper.validateRewrapManyDataKeyOptions; +import static java.lang.String.format; +import static java.util.Arrays.asList; +import static java.util.Collections.singletonList; +import static java.util.concurrent.TimeUnit.MILLISECONDS; +import static org.bson.internal.BsonUtil.mutableDeepCopy; + +/** + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public class ClientEncryptionImpl implements ClientEncryption { + private final Crypt crypt; + private final ClientEncryptionSettings options; + private final MongoClient keyVaultClient; + private final MongoCollection collection; + + public ClientEncryptionImpl(final ClientEncryptionSettings options) { + this(MongoClients.create(options.getKeyVaultMongoClientSettings()), options); + } + + @VisibleForTesting(otherwise = PRIVATE) + public ClientEncryptionImpl(final MongoClient keyVaultClient, final ClientEncryptionSettings options) { + this.keyVaultClient = keyVaultClient; + this.crypt = Crypts.create(keyVaultClient, options); + this.options = options; + MongoNamespace namespace = new MongoNamespace(options.getKeyVaultNamespace()); + this.collection = getVaultCollection(keyVaultClient, options, namespace); + } + + private static MongoCollection getVaultCollection(final MongoClient keyVaultClient, + final ClientEncryptionSettings options, + final MongoNamespace namespace) { + MongoCollection vaultCollection = keyVaultClient.getDatabase(namespace.getDatabaseName()) + .getCollection(namespace.getCollectionName(), BsonDocument.class) + .withWriteConcern(WriteConcern.MAJORITY) + .withReadConcern(ReadConcern.MAJORITY); + + Long timeoutMs = options.getTimeout(MILLISECONDS); + if (timeoutMs != null){ + vaultCollection = vaultCollection.withTimeout(timeoutMs, MILLISECONDS); + } + return vaultCollection; + } + + @Override + public BsonBinary createDataKey(final String kmsProvider) { + return createDataKey(kmsProvider, new DataKeyOptions()); + } + + @Override + public BsonBinary createDataKey(final String kmsProvider, final DataKeyOptions dataKeyOptions) { + Timeout operationTimeout = startTimeout(); + return createDataKey(kmsProvider, dataKeyOptions, operationTimeout); + } + + public BsonBinary createDataKey(final String kmsProvider, final DataKeyOptions dataKeyOptions, @Nullable final Timeout operationTimeout) { + BsonDocument dataKeyDocument = crypt.createDataKey(kmsProvider, dataKeyOptions, operationTimeout); + collectionWithTimeout(collection, "Data key insertion exceeded the timeout limit.", operationTimeout).insertOne(dataKeyDocument); + return dataKeyDocument.getBinary("_id"); + } + + @Override + public BsonBinary encrypt(final BsonValue value, final EncryptOptions options) { + Timeout operationTimeout = startTimeout(); + return crypt.encryptExplicitly(value, options, operationTimeout); + } + + @Override + public BsonDocument encryptExpression(final Bson expression, final EncryptOptions options) { + Timeout operationTimeout = startTimeout(); + return crypt.encryptExpression(expression.toBsonDocument(BsonDocument.class, collection.getCodecRegistry()), options, + operationTimeout); + } + + @Override + public BsonValue decrypt(final BsonBinary value) { + Timeout operationTimeout = startTimeout(); + return crypt.decryptExplicitly(value, operationTimeout); + } + + @Override + public DeleteResult deleteKey(final BsonBinary id) { + return collectionWithTimeout(collection, startTimeout()).deleteOne(Filters.eq("_id", id)); + } + + @Override + public BsonDocument getKey(final BsonBinary id) { + return collectionWithTimeout(collection, startTimeout()).find(Filters.eq("_id", id)).first(); + } + + @Override + public FindIterable getKeys() { + return collectionWithTimeout(collection, startTimeout()).find(); + } + + @Override + public BsonDocument addKeyAltName(final BsonBinary id, final String keyAltName) { + return collection.findOneAndUpdate(Filters.eq("_id", id), Updates.addToSet("keyAltNames", keyAltName)); + } + + @Override + public BsonDocument removeKeyAltName(final BsonBinary id, final String keyAltName) { + BsonDocument updateDocument = new BsonDocument() + .append("$set", new BsonDocument() + .append("keyAltNames", new BsonDocument() + .append("$cond", new BsonArray(asList( + new BsonDocument() + .append("$eq", new BsonArray(asList( + new BsonString("$keyAltNames"), + new BsonArray(singletonList(new BsonString(keyAltName)))))), + new BsonString("$$REMOVE"), + new BsonDocument() + .append("$filter", new BsonDocument() + .append("input", new BsonString("$keyAltNames")) + .append("cond", new BsonDocument() + .append("$ne", new BsonArray(asList( + new BsonString("$$this"), + new BsonString(keyAltName)))))) + ))) + ) + ); + return collection.findOneAndUpdate(Filters.eq("_id", id), singletonList(updateDocument)); + } + + @Override + public BsonDocument getKeyByAltName(final String keyAltName) { + return collection.find(Filters.eq("keyAltNames", keyAltName)).first(); + } + + @Override + public RewrapManyDataKeyResult rewrapManyDataKey(final Bson filter) { + return rewrapManyDataKey(filter, new RewrapManyDataKeyOptions()); + } + + @Override + public RewrapManyDataKeyResult rewrapManyDataKey(final Bson filter, final RewrapManyDataKeyOptions options) { + validateRewrapManyDataKeyOptions(options); + Timeout operationTimeout = startTimeout(); + BsonDocument results = crypt.rewrapManyDataKey(filter.toBsonDocument(BsonDocument.class, collection.getCodecRegistry()), + options, operationTimeout); + if (results.isEmpty()) { + return new RewrapManyDataKeyResult(); + } + List> updateModels = results.getArray("v", new BsonArray()).stream().map(v -> { + BsonDocument updateDocument = v.asDocument(); + return new UpdateOneModel(Filters.eq(updateDocument.get("_id")), + Updates.combine( + Updates.set("masterKey", updateDocument.get("masterKey")), + Updates.set("keyMaterial", updateDocument.get("keyMaterial")), + Updates.currentDate("updateDate")) + ); + }).collect(Collectors.toList()); + BulkWriteResult bulkWriteResult = collectionWithTimeout(collection, operationTimeout).bulkWrite(updateModels); + return new RewrapManyDataKeyResult(bulkWriteResult); + } + + @Override + public BsonDocument createEncryptedCollection(final MongoDatabase database, final String collectionName, + final CreateCollectionOptions createCollectionOptions, final CreateEncryptedCollectionParams createEncryptedCollectionParams) { + notNull("collectionName", collectionName); + notNull("createCollectionOptions", createCollectionOptions); + notNull("createEncryptedCollectionParams", createEncryptedCollectionParams); + Timeout operationTimeout = startTimeout(); + MongoNamespace namespace = new MongoNamespace(database.getName(), collectionName); + Bson rawEncryptedFields = createCollectionOptions.getEncryptedFields(); + if (rawEncryptedFields == null) { + throw new MongoConfigurationException(format("`encryptedFields` is not configured for the collection %s.", namespace)); + } + CodecRegistry codecRegistry = options.getKeyVaultMongoClientSettings().getCodecRegistry(); + BsonDocument encryptedFields = rawEncryptedFields.toBsonDocument(BsonDocument.class, codecRegistry); + BsonValue fields = encryptedFields.get("fields"); + if (fields != null && fields.isArray()) { + String kmsProvider = createEncryptedCollectionParams.getKmsProvider(); + DataKeyOptions dataKeyOptions = new DataKeyOptions(); + BsonDocument masterKey = createEncryptedCollectionParams.getMasterKey(); + if (masterKey != null) { + dataKeyOptions.masterKey(masterKey); + } + String keyIdBsonKey = "keyId"; + BsonDocument maybeUpdatedEncryptedFields = mutableDeepCopy(encryptedFields); + // only the mutability of `dataKeyMightBeCreated` is important, it does not need to be thread-safe + AtomicBoolean dataKeyMightBeCreated = new AtomicBoolean(); + try { + maybeUpdatedEncryptedFields.get("fields").asArray() + .stream() + .filter(BsonValue::isDocument) + .map(BsonValue::asDocument) + .filter(field -> field.containsKey(keyIdBsonKey)) + .filter(field -> Objects.equals(field.get(keyIdBsonKey), BsonNull.VALUE)) + .forEachOrdered(field -> { + // It is crucial to set the `dataKeyMightBeCreated` flag either immediately before calling `createDataKey`, + // or after that in a `finally` block. + dataKeyMightBeCreated.set(true); + BsonBinary dataKeyId = createDataKey(kmsProvider, dataKeyOptions, operationTimeout); + field.put(keyIdBsonKey, dataKeyId); + }); + databaseWithTimeout(database, operationTimeout).createCollection(collectionName, + new CreateCollectionOptions(createCollectionOptions).encryptedFields(maybeUpdatedEncryptedFields)); + return maybeUpdatedEncryptedFields; + } catch (Exception e) { + if (dataKeyMightBeCreated.get()) { + throw new MongoUpdatedEncryptedFieldsException(maybeUpdatedEncryptedFields, format("Failed to create %s.", namespace), e); + } else { + throw e; + } + } + } else { + databaseWithTimeout(database, operationTimeout).createCollection(collectionName, createCollectionOptions); + return encryptedFields; + } + } + + @Override + public void close() { + crypt.close(); + keyVaultClient.close(); + } + + @Nullable + private Timeout startTimeout() { + return TimeoutContext.startTimeout(options.getTimeout(MILLISECONDS)); + } +} diff --git a/driver-sync/src/main/com/mongodb/client/internal/ClientSessionBinding.java b/driver-sync/src/main/com/mongodb/client/internal/ClientSessionBinding.java new file mode 100644 index 00000000000..2d8a4dbfb30 --- /dev/null +++ b/driver-sync/src/main/com/mongodb/client/internal/ClientSessionBinding.java @@ -0,0 +1,222 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.internal; + +import com.mongodb.ReadConcern; +import com.mongodb.ReadPreference; +import com.mongodb.client.ClientSession; +import com.mongodb.connection.ClusterType; +import com.mongodb.connection.ServerDescription; +import com.mongodb.internal.binding.AbstractReferenceCounted; +import com.mongodb.internal.binding.ClusterAwareReadWriteBinding; +import com.mongodb.internal.binding.ConnectionSource; +import com.mongodb.internal.binding.ReadWriteBinding; +import com.mongodb.internal.binding.TransactionContext; +import com.mongodb.internal.connection.Connection; +import com.mongodb.internal.connection.OperationContext; +import com.mongodb.internal.session.ClientSessionContext; + +import java.util.function.Supplier; + +import static com.mongodb.connection.ClusterType.LOAD_BALANCED; +import static com.mongodb.connection.ClusterType.SHARDED; +import static org.bson.assertions.Assertions.assertNotNull; +import static org.bson.assertions.Assertions.notNull; + +/** + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public class ClientSessionBinding extends AbstractReferenceCounted implements ReadWriteBinding { + private final ClusterAwareReadWriteBinding wrapped; + private final ClientSession session; + private final boolean ownsSession; + private final OperationContext operationContext; + + public ClientSessionBinding(final ClientSession session, final boolean ownsSession, final ClusterAwareReadWriteBinding wrapped) { + this.wrapped = wrapped; + wrapped.retain(); + this.session = notNull("session", session); + this.ownsSession = ownsSession; + this.operationContext = wrapped.getOperationContext().withSessionContext(new SyncClientSessionContext(session)); + } + + @Override + public ReadPreference getReadPreference() { + return wrapped.getReadPreference(); + } + + @Override + public int getCount() { + return wrapped.getCount(); + } + + @Override + public ClientSessionBinding retain() { + super.retain(); + return this; + } + + @Override + public int release() { + int count = super.release(); + if (count == 0) { + wrapped.release(); + if (ownsSession) { + session.close(); + } + + } + return count; + } + + @Override + public ConnectionSource getReadConnectionSource() { + return new SessionBindingConnectionSource(getConnectionSource(wrapped::getReadConnectionSource)); + } + + @Override + public ConnectionSource getReadConnectionSource(final int minWireVersion, final ReadPreference fallbackReadPreference) { + return new SessionBindingConnectionSource(getConnectionSource(() -> + wrapped.getReadConnectionSource(minWireVersion, fallbackReadPreference))); + } + + public ConnectionSource getWriteConnectionSource() { + return new SessionBindingConnectionSource(getConnectionSource(wrapped::getWriteConnectionSource)); + } + + @Override + public OperationContext getOperationContext() { + return operationContext; + } + + private ConnectionSource getConnectionSource(final Supplier wrappedConnectionSourceSupplier) { + if (!session.hasActiveTransaction()) { + return wrappedConnectionSourceSupplier.get(); + } + + if (TransactionContext.get(session) == null) { + ConnectionSource source = wrappedConnectionSourceSupplier.get(); + ClusterType clusterType = source.getServerDescription().getClusterType(); + if (clusterType == SHARDED || clusterType == LOAD_BALANCED) { + TransactionContext transactionContext = new TransactionContext<>(clusterType); + session.setTransactionContext(source.getServerDescription().getAddress(), transactionContext); + transactionContext.release(); // The session is responsible for retaining a reference to the context + } + return source; + } else { + return wrapped.getConnectionSource(assertNotNull(session.getPinnedServerAddress())); + } + } + + private class SessionBindingConnectionSource implements ConnectionSource { + private ConnectionSource wrapped; + + SessionBindingConnectionSource(final ConnectionSource wrapped) { + this.wrapped = wrapped; + ClientSessionBinding.this.retain(); + } + + @Override + public ServerDescription getServerDescription() { + return wrapped.getServerDescription(); + } + + @Override + public OperationContext getOperationContext() { + return operationContext; + } + + @Override + public ReadPreference getReadPreference() { + return wrapped.getReadPreference(); + } + + @Override + public Connection getConnection() { + TransactionContext transactionContext = TransactionContext.get(session); + if (transactionContext != null && transactionContext.isConnectionPinningRequired()) { + Connection pinnedConnection = transactionContext.getPinnedConnection(); + if (pinnedConnection == null) { + Connection connection = wrapped.getConnection(); + transactionContext.pinConnection(connection, Connection::markAsPinned); + return connection; + } else { + return pinnedConnection.retain(); + } + } else { + return wrapped.getConnection(); + } + } + + @Override + @SuppressWarnings("checkstyle:methodlength") + public ConnectionSource retain() { + wrapped = wrapped.retain(); + return this; + } + + @Override + public int getCount() { + return wrapped.getCount(); + } + + @Override + public int release() { + int count = wrapped.release(); + if (count == 0) { + ClientSessionBinding.this.release(); + } + return count; + } + } + + private final class SyncClientSessionContext extends ClientSessionContext { + + private final ClientSession clientSession; + + SyncClientSessionContext(final ClientSession clientSession) { + super(clientSession); + this.clientSession = clientSession; + } + + @Override + public boolean isImplicitSession() { + return ownsSession; + } + + @Override + public boolean notifyMessageSent() { + return clientSession.notifyMessageSent(); + } + + @Override + public boolean hasActiveTransaction() { + return clientSession.hasActiveTransaction(); + } + + @Override + public ReadConcern getReadConcern() { + if (clientSession.hasActiveTransaction()) { + return assertNotNull(clientSession.getTransactionOptions().getReadConcern()); + } else if (isSnapshot()) { + return ReadConcern.SNAPSHOT; + } else { + return wrapped.getOperationContext().getSessionContext().getReadConcern(); + } + } + } +} diff --git a/driver-sync/src/main/com/mongodb/client/internal/ClientSessionClock.java b/driver-sync/src/main/com/mongodb/client/internal/ClientSessionClock.java new file mode 100644 index 00000000000..a5ba63e3cd6 --- /dev/null +++ b/driver-sync/src/main/com/mongodb/client/internal/ClientSessionClock.java @@ -0,0 +1,41 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.internal; + +/** + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public final class ClientSessionClock { + public static final ClientSessionClock INSTANCE = new ClientSessionClock(0L); + + private long currentTime; + + private ClientSessionClock(final long millis) { + currentTime = millis; + } + + public long now() { + if (currentTime == 0L) { + return System.currentTimeMillis(); + } + return currentTime; + } + + public void setTime(final long millis) { + currentTime = millis; + } +} diff --git a/driver-sync/src/main/com/mongodb/client/internal/ClientSessionImpl.java b/driver-sync/src/main/com/mongodb/client/internal/ClientSessionImpl.java new file mode 100644 index 00000000000..b60fc90316a --- /dev/null +++ b/driver-sync/src/main/com/mongodb/client/internal/ClientSessionImpl.java @@ -0,0 +1,320 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.internal; + +import com.mongodb.ClientSessionOptions; +import com.mongodb.MongoClientException; +import com.mongodb.MongoException; +import com.mongodb.MongoExecutionTimeoutException; +import com.mongodb.MongoInternalException; +import com.mongodb.MongoOperationTimeoutException; +import com.mongodb.ReadConcern; +import com.mongodb.TransactionOptions; +import com.mongodb.WriteConcern; +import com.mongodb.client.ClientSession; +import com.mongodb.client.TransactionBody; +import com.mongodb.internal.TimeoutContext; +import com.mongodb.internal.operation.AbortTransactionOperation; +import com.mongodb.internal.operation.CommitTransactionOperation; +import com.mongodb.internal.operation.OperationHelper; +import com.mongodb.internal.operation.ReadOperation; +import com.mongodb.internal.operation.WriteConcernHelper; +import com.mongodb.internal.operation.WriteOperation; +import com.mongodb.internal.session.BaseClientSessionImpl; +import com.mongodb.internal.session.ServerSessionPool; +import com.mongodb.lang.Nullable; + +import static com.mongodb.MongoException.TRANSIENT_TRANSACTION_ERROR_LABEL; +import static com.mongodb.MongoException.UNKNOWN_TRANSACTION_COMMIT_RESULT_LABEL; +import static com.mongodb.assertions.Assertions.assertNotNull; +import static com.mongodb.assertions.Assertions.assertTrue; +import static com.mongodb.assertions.Assertions.isTrue; +import static com.mongodb.assertions.Assertions.notNull; + +final class ClientSessionImpl extends BaseClientSessionImpl implements ClientSession { + + private static final int MAX_RETRY_TIME_LIMIT_MS = 120000; + + private final OperationExecutor operationExecutor; + private TransactionState transactionState = TransactionState.NONE; + private boolean messageSentInCurrentTransaction; + private boolean commitInProgress; + private TransactionOptions transactionOptions; + + ClientSessionImpl(final ServerSessionPool serverSessionPool, final Object originator, final ClientSessionOptions options, + final OperationExecutor operationExecutor) { + super(serverSessionPool, originator, options); + this.operationExecutor = operationExecutor; + } + + @Override + public boolean hasActiveTransaction() { + return transactionState == TransactionState.IN || (transactionState == TransactionState.COMMITTED && commitInProgress); + } + + @Override + public boolean notifyMessageSent() { + if (hasActiveTransaction()) { + boolean firstMessageInCurrentTransaction = !messageSentInCurrentTransaction; + messageSentInCurrentTransaction = true; + return firstMessageInCurrentTransaction; + } else { + if (transactionState == TransactionState.COMMITTED || transactionState == TransactionState.ABORTED) { + cleanupTransaction(TransactionState.NONE); + } + return false; + } + } + + + @Override + public void notifyOperationInitiated(final Object operation) { + assertTrue(operation instanceof ReadOperation || operation instanceof WriteOperation); + if (!(hasActiveTransaction() || operation instanceof CommitTransactionOperation)) { + assertTrue(getPinnedServerAddress() == null + || (transactionState != TransactionState.ABORTED && transactionState != TransactionState.NONE)); + clearTransactionContext(); + } + } + + @Override + public TransactionOptions getTransactionOptions() { + isTrue("in transaction", transactionState == TransactionState.IN || transactionState == TransactionState.COMMITTED); + return transactionOptions; + } + + @Override + public void startTransaction() { + startTransaction(TransactionOptions.builder().build()); + } + + @Override + public void startTransaction(final TransactionOptions transactionOptions) { + startTransaction(transactionOptions, createTimeoutContext(transactionOptions)); + } + + @Override + public void commitTransaction() { + commitTransaction(true); + } + + @Override + public void abortTransaction() { + if (transactionState == TransactionState.ABORTED) { + throw new IllegalStateException("Cannot call abortTransaction twice"); + } + if (transactionState == TransactionState.COMMITTED) { + throw new IllegalStateException("Cannot call abortTransaction after calling commitTransaction"); + } + if (transactionState == TransactionState.NONE) { + throw new IllegalStateException("There is no transaction started"); + } + try { + if (messageSentInCurrentTransaction) { + ReadConcern readConcern = transactionOptions.getReadConcern(); + if (readConcern == null) { + throw new MongoInternalException("Invariant violated. Transaction options read concern can not be null"); + } + resetTimeout(); + TimeoutContext timeoutContext = getTimeoutContext(); + WriteConcern writeConcern = assertNotNull(getWriteConcern(timeoutContext)); + operationExecutor + .execute(new AbortTransactionOperation(writeConcern) + .recoveryToken(getRecoveryToken()), readConcern, this); + } + } catch (RuntimeException e) { + // ignore exceptions from abort + } finally { + clearTransactionContext(); + cleanupTransaction(TransactionState.ABORTED); + } + } + + private void startTransaction(final TransactionOptions transactionOptions, final TimeoutContext timeoutContext) { + Boolean snapshot = getOptions().isSnapshot(); + if (snapshot != null && snapshot) { + throw new IllegalArgumentException("Transactions are not supported in snapshot sessions"); + } + notNull("transactionOptions", transactionOptions); + if (transactionState == TransactionState.IN) { + throw new IllegalStateException("Transaction already in progress"); + } + if (transactionState == TransactionState.COMMITTED) { + cleanupTransaction(TransactionState.IN); + } else { + transactionState = TransactionState.IN; + } + getServerSession().advanceTransactionNumber(); + this.transactionOptions = TransactionOptions.merge(transactionOptions, getOptions().getDefaultTransactionOptions()); + WriteConcern writeConcern = getWriteConcern(timeoutContext); + if (writeConcern == null) { + throw new MongoInternalException("Invariant violated. Transaction options write concern can not be null"); + } + if (!writeConcern.isAcknowledged()) { + throw new MongoClientException("Transactions do not support unacknowledged write concern"); + } + clearTransactionContext(); + setTimeoutContext(timeoutContext); + } + + @Nullable + private WriteConcern getWriteConcern(@Nullable final TimeoutContext timeoutContext) { + WriteConcern writeConcern = transactionOptions.getWriteConcern(); + if (hasTimeoutMS(timeoutContext) && hasWTimeoutMS(writeConcern)) { + return WriteConcernHelper.cloneWithoutTimeout(writeConcern); + } + return writeConcern; + } + + private void commitTransaction(final boolean resetTimeout) { + if (transactionState == TransactionState.ABORTED) { + throw new IllegalStateException("Cannot call commitTransaction after calling abortTransaction"); + } + if (transactionState == TransactionState.NONE) { + throw new IllegalStateException("There is no transaction started"); + } + + try { + if (messageSentInCurrentTransaction) { + ReadConcern readConcern = transactionOptions.getReadConcern(); + if (readConcern == null) { + throw new MongoInternalException("Invariant violated. Transaction options read concern can not be null"); + } + commitInProgress = true; + if (resetTimeout) { + resetTimeout(); + } + TimeoutContext timeoutContext = getTimeoutContext(); + WriteConcern writeConcern = assertNotNull(getWriteConcern(timeoutContext)); + operationExecutor + .execute(new CommitTransactionOperation(writeConcern, + transactionState == TransactionState.COMMITTED) + .recoveryToken(getRecoveryToken()), readConcern, this); + } + } catch (MongoException e) { + clearTransactionContextOnError(e); + throw e; + } finally { + transactionState = TransactionState.COMMITTED; + commitInProgress = false; + } + } + + private void clearTransactionContextOnError(final MongoException e) { + if (e.hasErrorLabel(TRANSIENT_TRANSACTION_ERROR_LABEL) || e.hasErrorLabel(UNKNOWN_TRANSACTION_COMMIT_RESULT_LABEL)) { + clearTransactionContext(); + } + } + + @Override + public T withTransaction(final TransactionBody transactionBody) { + return withTransaction(transactionBody, TransactionOptions.builder().build()); + } + + @Override + public T withTransaction(final TransactionBody transactionBody, final TransactionOptions options) { + notNull("transactionBody", transactionBody); + long startTime = ClientSessionClock.INSTANCE.now(); + TimeoutContext withTransactionTimeoutContext = createTimeoutContext(options); + + outer: + while (true) { + T retVal; + try { + startTransaction(options, withTransactionTimeoutContext.copyTimeoutContext()); + retVal = transactionBody.execute(); + } catch (Throwable e) { + if (transactionState == TransactionState.IN) { + abortTransaction(); + } + if (e instanceof MongoException && !(e instanceof MongoOperationTimeoutException)) { + MongoException exceptionToHandle = OperationHelper.unwrap((MongoException) e); + if (exceptionToHandle.hasErrorLabel(TRANSIENT_TRANSACTION_ERROR_LABEL) + && ClientSessionClock.INSTANCE.now() - startTime < MAX_RETRY_TIME_LIMIT_MS) { + continue; + } + } + throw e; + } + if (transactionState == TransactionState.IN) { + while (true) { + try { + commitTransaction(false); + break; + } catch (MongoException e) { + clearTransactionContextOnError(e); + if (!(e instanceof MongoOperationTimeoutException) + && ClientSessionClock.INSTANCE.now() - startTime < MAX_RETRY_TIME_LIMIT_MS) { + applyMajorityWriteConcernToTransactionOptions(); + + if (!(e instanceof MongoExecutionTimeoutException) + && e.hasErrorLabel(UNKNOWN_TRANSACTION_COMMIT_RESULT_LABEL)) { + continue; + } else if (e.hasErrorLabel(TRANSIENT_TRANSACTION_ERROR_LABEL)) { + continue outer; + } + } + throw e; + } + } + } + return retVal; + } + } + + @Override + public void close() { + try { + if (transactionState == TransactionState.IN) { + abortTransaction(); + } + } finally { + clearTransactionContext(); + super.close(); + } + } + + // Apply majority write concern if the commit is to be retried. + private void applyMajorityWriteConcernToTransactionOptions() { + if (transactionOptions != null) { + TimeoutContext timeoutContext = getTimeoutContext(); + WriteConcern writeConcern = getWriteConcern(timeoutContext); + if (writeConcern != null) { + transactionOptions = TransactionOptions.merge(TransactionOptions.builder() + .writeConcern(writeConcern.withW("majority")).build(), transactionOptions); + } else { + transactionOptions = TransactionOptions.merge(TransactionOptions.builder() + .writeConcern(WriteConcern.MAJORITY).build(), transactionOptions); + } + } else { + transactionOptions = TransactionOptions.builder().writeConcern(WriteConcern.MAJORITY).build(); + } + } + + private void cleanupTransaction(final TransactionState nextState) { + messageSentInCurrentTransaction = false; + transactionOptions = null; + transactionState = nextState; + setTimeoutContext(null); + } + + private TimeoutContext createTimeoutContext(final TransactionOptions transactionOptions) { + return new TimeoutContext(getTimeoutSettings( + TransactionOptions.merge(transactionOptions, getOptions().getDefaultTransactionOptions()), + operationExecutor.getTimeoutSettings())); + } +} diff --git a/driver-sync/src/main/com/mongodb/client/internal/Clusters.java b/driver-sync/src/main/com/mongodb/client/internal/Clusters.java new file mode 100644 index 00000000000..6c57505e090 --- /dev/null +++ b/driver-sync/src/main/com/mongodb/client/internal/Clusters.java @@ -0,0 +1,63 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.client.internal; + +import com.mongodb.MongoClientSettings; +import com.mongodb.MongoDriverInformation; +import com.mongodb.connection.SocketSettings; +import com.mongodb.internal.TimeoutSettings; +import com.mongodb.internal.connection.Cluster; +import com.mongodb.internal.connection.DefaultClusterFactory; +import com.mongodb.internal.connection.InternalConnectionPoolSettings; +import com.mongodb.internal.connection.StreamFactory; +import com.mongodb.internal.connection.StreamFactoryFactory; +import com.mongodb.lang.Nullable; + +import static com.mongodb.assertions.Assertions.assertNotNull; +import static com.mongodb.internal.event.EventListenerHelper.getCommandListener; + +public final class Clusters { + + private Clusters() { + //NOP + } + + public static Cluster createCluster(final MongoClientSettings settings, + @Nullable final MongoDriverInformation mongoDriverInformation, + final StreamFactoryFactory streamFactoryFactory) { + assertNotNull(streamFactoryFactory); + assertNotNull(settings); + + StreamFactory streamFactory = getStreamFactory(streamFactoryFactory, settings, false); + StreamFactory heartbeatStreamFactory = getStreamFactory(streamFactoryFactory, settings, true); + + return new DefaultClusterFactory().createCluster(settings.getClusterSettings(), settings.getServerSettings(), + settings.getConnectionPoolSettings(), InternalConnectionPoolSettings.builder().build(), + TimeoutSettings.create(settings), streamFactory, + TimeoutSettings.createHeartbeatSettings(settings), heartbeatStreamFactory, + settings.getCredential(), settings.getLoggerSettings(), getCommandListener(settings.getCommandListeners()), + settings.getApplicationName(), mongoDriverInformation, settings.getCompressorList(), settings.getServerApi(), + settings.getDnsClient()); + } + + private static StreamFactory getStreamFactory( + final StreamFactoryFactory streamFactoryFactory, + final MongoClientSettings settings, + final boolean isHeartbeat) { + SocketSettings socketSettings = isHeartbeat ? settings.getHeartbeatSocketSettings() : settings.getSocketSettings(); + return streamFactoryFactory.create(socketSettings, settings.getSslSettings()); + } +} diff --git a/driver-sync/src/main/com/mongodb/client/internal/CollectionInfoRetriever.java b/driver-sync/src/main/com/mongodb/client/internal/CollectionInfoRetriever.java new file mode 100644 index 00000000000..9d02a1e8756 --- /dev/null +++ b/driver-sync/src/main/com/mongodb/client/internal/CollectionInfoRetriever.java @@ -0,0 +1,45 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.internal; + +import com.mongodb.client.MongoClient; +import com.mongodb.internal.time.Timeout; +import com.mongodb.lang.Nullable; +import org.bson.BsonDocument; + +import java.util.ArrayList; +import java.util.List; + +import static com.mongodb.assertions.Assertions.notNull; +import static com.mongodb.client.internal.TimeoutHelper.databaseWithTimeout; + +class CollectionInfoRetriever { + + private static final String TIMEOUT_ERROR_MESSAGE = "Collection information retrieval exceeded the timeout limit."; + private final MongoClient client; + + CollectionInfoRetriever(final MongoClient client) { + this.client = notNull("client", client); + } + + public List filter(final String databaseName, final BsonDocument filter, @Nullable final Timeout operationTimeout) { + return databaseWithTimeout(client.getDatabase(databaseName), TIMEOUT_ERROR_MESSAGE, operationTimeout) + .listCollections(BsonDocument.class) + .filter(filter) + .into(new ArrayList<>()); + } +} diff --git a/driver-sync/src/main/com/mongodb/client/internal/CommandMarker.java b/driver-sync/src/main/com/mongodb/client/internal/CommandMarker.java new file mode 100644 index 00000000000..73eed8efd01 --- /dev/null +++ b/driver-sync/src/main/com/mongodb/client/internal/CommandMarker.java @@ -0,0 +1,130 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.internal; + +import com.mongodb.AutoEncryptionSettings; +import com.mongodb.MongoClientException; +import com.mongodb.MongoException; +import com.mongodb.MongoOperationTimeoutException; +import com.mongodb.MongoTimeoutException; +import com.mongodb.ReadConcern; +import com.mongodb.ReadPreference; +import com.mongodb.client.MongoClient; +import com.mongodb.client.MongoClients; +import com.mongodb.client.MongoDatabase; +import com.mongodb.internal.crypt.capi.MongoCrypt; +import com.mongodb.internal.time.Timeout; +import com.mongodb.lang.Nullable; +import org.bson.RawBsonDocument; + +import java.io.Closeable; +import java.util.Map; + +import static com.mongodb.assertions.Assertions.assertNotNull; +import static com.mongodb.client.internal.TimeoutHelper.databaseWithTimeout; +import static com.mongodb.internal.capi.MongoCryptHelper.createMongocryptdClientSettings; +import static com.mongodb.internal.capi.MongoCryptHelper.createProcessBuilder; +import static com.mongodb.internal.capi.MongoCryptHelper.isMongocryptdSpawningDisabled; +import static com.mongodb.internal.capi.MongoCryptHelper.startProcess; + +@SuppressWarnings("UseOfProcessBuilder") +class CommandMarker implements Closeable { + private static final String TIMEOUT_ERROR_MESSAGE = "Command marker exceeded the timeout limit."; + @Nullable + private final MongoClient client; + @Nullable + private final ProcessBuilder processBuilder; + + /** + * The command marker + * + *

+ * If the extraOptions.cryptSharedLibRequired option is true then the driver MUST NOT attempt to spawn or connect to mongocryptd. + *

+ * If the following conditions are met: + *

    + *
  • The user's MongoClient is configured for client-side encryption (i.e. bypassAutoEncryption is not false)
  • + *
  • The user has not disabled mongocryptd spawning (i.e. by setting extraOptions.mongocryptdBypassSpawn to true)
  • + *
  • The crypt shared library is unavailable.
  • + *
  • The extraOptions.cryptSharedLibRequired option is false.
  • + *
+ * Then mongocryptd MUST be spawned by the driver. + */ + CommandMarker( + final MongoCrypt mongoCrypt, + final AutoEncryptionSettings settings) { + + if (isMongocryptdSpawningDisabled(mongoCrypt.getCryptSharedLibVersionString(), settings)) { + processBuilder = null; + client = null; + } else { + Map extraOptions = settings.getExtraOptions(); + boolean mongocryptdBypassSpawn = (boolean) extraOptions.getOrDefault("mongocryptdBypassSpawn", false); + if (!mongocryptdBypassSpawn) { + processBuilder = createProcessBuilder(extraOptions); + startProcess(processBuilder); + } else { + processBuilder = null; + } + client = MongoClients.create(createMongocryptdClientSettings((String) extraOptions.get("mongocryptdURI"))); + } + } + + RawBsonDocument mark(final String databaseName, final RawBsonDocument command, @Nullable final Timeout operationTimeout) { + if (client != null) { + try { + try { + return executeCommand(databaseName, command, operationTimeout); + } catch (MongoOperationTimeoutException e){ + throw e; + } catch (MongoTimeoutException e) { + if (processBuilder == null) { // mongocryptdBypassSpawn=true + throw e; + } + startProcess(processBuilder); + return executeCommand(databaseName, command, operationTimeout); + } + } catch (MongoException e) { + throw wrapInClientException(e); + } + } else { + return command; + } + } + + @Override + public void close() { + if (client != null) { + client.close(); + } + } + + private RawBsonDocument executeCommand(final String databaseName, final RawBsonDocument markableCommand, @Nullable final Timeout operationTimeout) { + assertNotNull(client); + + MongoDatabase mongoDatabase = client.getDatabase(databaseName) + .withReadConcern(ReadConcern.DEFAULT) + .withReadPreference(ReadPreference.primary()); + + return databaseWithTimeout(mongoDatabase, TIMEOUT_ERROR_MESSAGE, operationTimeout) + .runCommand(markableCommand, RawBsonDocument.class); + } + + private MongoClientException wrapInClientException(final MongoException e) { + return new MongoClientException("Exception in encryption library: " + e.getMessage(), e); + } +} diff --git a/driver-sync/src/main/com/mongodb/client/internal/Crypt.java b/driver-sync/src/main/com/mongodb/client/internal/Crypt.java new file mode 100644 index 00000000000..15ba16e66da --- /dev/null +++ b/driver-sync/src/main/com/mongodb/client/internal/Crypt.java @@ -0,0 +1,379 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.internal; + +import com.mongodb.MongoClientException; +import com.mongodb.MongoException; +import com.mongodb.MongoInternalException; +import com.mongodb.client.MongoClient; +import com.mongodb.client.model.vault.DataKeyOptions; +import com.mongodb.client.model.vault.EncryptOptions; +import com.mongodb.client.model.vault.RewrapManyDataKeyOptions; +import com.mongodb.crypt.capi.MongoCryptException; +import com.mongodb.internal.capi.MongoCryptHelper; +import com.mongodb.internal.crypt.capi.MongoCrypt; +import com.mongodb.internal.crypt.capi.MongoCryptContext; +import com.mongodb.internal.crypt.capi.MongoDataKeyOptions; +import com.mongodb.internal.crypt.capi.MongoKeyDecryptor; +import com.mongodb.internal.crypt.capi.MongoRewrapManyDataKeyOptions; +import com.mongodb.internal.time.Timeout; +import com.mongodb.lang.Nullable; +import org.bson.BsonBinary; +import org.bson.BsonDocument; +import org.bson.BsonValue; +import org.bson.RawBsonDocument; + +import java.io.Closeable; +import java.io.IOException; +import java.io.InputStream; +import java.nio.ByteBuffer; +import java.util.List; +import java.util.Map; +import java.util.function.Supplier; + +import static com.mongodb.assertions.Assertions.assertNotNull; +import static com.mongodb.assertions.Assertions.notNull; +import static com.mongodb.internal.client.vault.EncryptOptionsHelper.asMongoExplicitEncryptOptions; +import static com.mongodb.internal.crypt.capi.MongoCryptContext.State; +import static com.mongodb.internal.thread.InterruptionUtil.translateInterruptedException; + +/** + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public class Crypt implements Closeable { + + private static final RawBsonDocument EMPTY_RAW_BSON_DOCUMENT = RawBsonDocument.parse("{}"); + private final MongoCrypt mongoCrypt; + private final Map> kmsProviders; + private final Map>> kmsProviderPropertySuppliers; + private final CollectionInfoRetriever collectionInfoRetriever; + private final CommandMarker commandMarker; + private final KeyRetriever keyRetriever; + private final KeyManagementService keyManagementService; + private final boolean bypassAutoEncryption; + @Nullable + private final MongoClient collectionInfoRetrieverClient; + @Nullable + private final MongoClient keyVaultClient; + + + /** + * Create an instance to use for explicit encryption and decryption, and data key creation. + * + * @param mongoCrypt the mongoCrypt wrapper + * @param keyRetriever the key retriever + * @param keyManagementService the key management service + * @param kmsProviders the KMS provider credentials + * @param kmsProviderPropertySuppliers the KMS provider property providers + */ + Crypt(final MongoCrypt mongoCrypt, + final KeyRetriever keyRetriever, + final KeyManagementService keyManagementService, + final Map> kmsProviders, + final Map>> kmsProviderPropertySuppliers) { + this(mongoCrypt, keyRetriever, keyManagementService, kmsProviders, kmsProviderPropertySuppliers, + false, null, null, null, null); + } + + /** + * Create an instance to use for auto-encryption and auto-decryption. + * + * @param mongoCrypt the mongoCrypt wrapper + * @param keyRetriever the key retriever + * @param keyManagementService the key management service + * @param kmsProviders the KMS provider credentials + * @param kmsProviderPropertySuppliers the KMS provider property providers + * @param bypassAutoEncryption the bypass auto encryption flag + * @param collectionInfoRetriever the collection info retriever + * @param commandMarker the command marker + * @param collectionInfoRetrieverClient the collection info retriever mongo client + * @param keyVaultClient the key vault mongo client + */ + Crypt(final MongoCrypt mongoCrypt, + final KeyRetriever keyRetriever, + final KeyManagementService keyManagementService, + final Map> kmsProviders, + final Map>> kmsProviderPropertySuppliers, + final boolean bypassAutoEncryption, + @Nullable final CollectionInfoRetriever collectionInfoRetriever, + @Nullable final CommandMarker commandMarker, + @Nullable final MongoClient collectionInfoRetrieverClient, + @Nullable final MongoClient keyVaultClient) { + this.mongoCrypt = mongoCrypt; + this.keyRetriever = keyRetriever; + this.keyManagementService = keyManagementService; + this.kmsProviders = kmsProviders; + this.kmsProviderPropertySuppliers = kmsProviderPropertySuppliers; + this.bypassAutoEncryption = bypassAutoEncryption; + this.collectionInfoRetriever = collectionInfoRetriever; + this.commandMarker = commandMarker; + this.collectionInfoRetrieverClient = collectionInfoRetrieverClient; + this.keyVaultClient = keyVaultClient; + } + + /** + * Encrypt the given command + * + * @param databaseName the namespace + * @param command the unencrypted command + * @return the encrypted command + */ + RawBsonDocument encrypt(final String databaseName, final RawBsonDocument command, @Nullable final Timeout timeoutOperation) { + notNull("databaseName", databaseName); + notNull("command", command); + + if (bypassAutoEncryption) { + return command; + } + + try (MongoCryptContext encryptionContext = mongoCrypt.createEncryptionContext(databaseName, command)) { + return executeStateMachine(encryptionContext, databaseName, timeoutOperation); + } catch (MongoCryptException e) { + throw wrapInMongoException(e); + } + } + + /** + * Decrypt the given command response + * + * @param commandResponse the encrypted command response + * @return the decrypted command response + */ + RawBsonDocument decrypt(final RawBsonDocument commandResponse, @Nullable final Timeout timeoutOperation) { + notNull("commandResponse", commandResponse); + try (MongoCryptContext decryptionContext = mongoCrypt.createDecryptionContext(commandResponse)) { + return executeStateMachine(decryptionContext, null, timeoutOperation); + } catch (MongoCryptException e) { + throw wrapInMongoException(e); + } + } + + /** + * Create a data key. + * + * @param kmsProvider the KMS provider to create the data key for + * @param options the data key options + * @return the document representing the data key to be added to the key vault + */ + BsonDocument createDataKey(final String kmsProvider, final DataKeyOptions options, @Nullable final Timeout operationTimeout) { + notNull("kmsProvider", kmsProvider); + notNull("options", options); + + try (MongoCryptContext dataKeyCreationContext = mongoCrypt.createDataKeyContext(kmsProvider, + MongoDataKeyOptions.builder() + .keyAltNames(options.getKeyAltNames()) + .masterKey(options.getMasterKey()) + .keyMaterial(options.getKeyMaterial()) + .build())) { + return executeStateMachine(dataKeyCreationContext, null, operationTimeout); + } catch (MongoCryptException e) { + throw wrapInMongoException(e); + } + } + + /** + * Encrypt the given value with the given options + * + * @param value the value to encrypt + * @param options the options + * @return the encrypted value + */ + BsonBinary encryptExplicitly(final BsonValue value, final EncryptOptions options, @Nullable final Timeout timeoutOperation) { + notNull("value", value); + notNull("options", options); + + try (MongoCryptContext encryptionContext = mongoCrypt.createExplicitEncryptionContext( + new BsonDocument("v", value), asMongoExplicitEncryptOptions(options))) { + return executeStateMachine(encryptionContext, null, timeoutOperation).getBinary("v"); + } catch (MongoCryptException e) { + throw wrapInMongoException(e); + } + } + + /** + * Encrypts a Match Expression or Aggregate Expression to query a range index. + * + * @param expression the Match Expression or Aggregate Expression + * @param options the options + * @return the encrypted expression + */ + BsonDocument encryptExpression(final BsonDocument expression, final EncryptOptions options, @Nullable final Timeout timeoutOperation) { + notNull("expression", expression); + notNull("options", options); + + try (MongoCryptContext encryptionContext = mongoCrypt.createEncryptExpressionContext( + new BsonDocument("v", expression), asMongoExplicitEncryptOptions(options))) { + return executeStateMachine(encryptionContext, null, timeoutOperation).getDocument("v"); + } catch (MongoCryptException e) { + throw wrapInMongoException(e); + } + } + + /** + * Decrypt the given encrypted value. + * + * @param value the encrypted value + * @return the decrypted value + */ + BsonValue decryptExplicitly(final BsonBinary value, @Nullable final Timeout timeoutOperation) { + notNull("value", value); + try (MongoCryptContext decryptionContext = mongoCrypt.createExplicitDecryptionContext(new BsonDocument("v", value))) { + return assertNotNull(executeStateMachine(decryptionContext, null, timeoutOperation).get("v")); + } catch (MongoCryptException e) { + throw wrapInMongoException(e); + } + } + + /** + * Rewrap data key + * @param filter the filter + * @param options the rewrap many data key options + * @return the decrypted value + * @since 4.7 + */ + BsonDocument rewrapManyDataKey(final BsonDocument filter, final RewrapManyDataKeyOptions options, @Nullable final Timeout operationTimeout) { + notNull("filter", filter); + try { + try (MongoCryptContext rewrapManyDatakeyContext = mongoCrypt.createRewrapManyDatakeyContext(filter, + MongoRewrapManyDataKeyOptions + .builder() + .provider(options.getProvider()) + .masterKey(options.getMasterKey()) + .build())) { + return executeStateMachine(rewrapManyDatakeyContext, null, operationTimeout); + } + } catch (MongoCryptException e) { + throw wrapInMongoException(e); + } + } + + @Override + @SuppressWarnings("try") + public void close() { + //noinspection EmptyTryBlock + try (MongoCrypt ignored = this.mongoCrypt; + CommandMarker ignored1 = this.commandMarker; + MongoClient ignored2 = this.collectionInfoRetrieverClient; + MongoClient ignored3 = this.keyVaultClient + ) { + // just using try-with-resources to ensure they all get closed, even in the case of exceptions + } + } + + private RawBsonDocument executeStateMachine(final MongoCryptContext cryptContext, @Nullable final String databaseName, @Nullable final Timeout operationTimeout) { + while (true) { + State state = cryptContext.getState(); + switch (state) { + case NEED_MONGO_COLLINFO: + collInfo(cryptContext, notNull("databaseName", databaseName), operationTimeout); + break; + case NEED_MONGO_MARKINGS: + mark(cryptContext, notNull("databaseName", databaseName), operationTimeout); + break; + case NEED_KMS_CREDENTIALS: + fetchCredentials(cryptContext); + break; + case NEED_MONGO_KEYS: + fetchKeys(cryptContext, operationTimeout); + break; + case NEED_KMS: + decryptKeys(cryptContext, operationTimeout); + break; + case READY: + return cryptContext.finish(); + case DONE: + return EMPTY_RAW_BSON_DOCUMENT; + default: + throw new MongoInternalException("Unsupported encryptor state + " + state); + } + } + } + + private void fetchCredentials(final MongoCryptContext cryptContext) { + cryptContext.provideKmsProviderCredentials(MongoCryptHelper.fetchCredentials(kmsProviders, kmsProviderPropertySuppliers)); + } + + private void collInfo(final MongoCryptContext cryptContext, final String databaseName, @Nullable final Timeout operationTimeout) { + try { + List results = assertNotNull(collectionInfoRetriever) + .filter(databaseName, cryptContext.getMongoOperation(), operationTimeout); + for (BsonDocument result : results) { + cryptContext.addMongoOperationResult(result); + } + cryptContext.completeMongoOperation(); + } catch (Throwable t) { + throw MongoException.fromThrowableNonNull(t); + } + } + + private void mark(final MongoCryptContext cryptContext, final String databaseName, @Nullable final Timeout operationTimeout) { + try { + RawBsonDocument markedCommand = assertNotNull(commandMarker).mark(databaseName, cryptContext.getMongoOperation(), operationTimeout); + cryptContext.addMongoOperationResult(markedCommand); + cryptContext.completeMongoOperation(); + } catch (Throwable t) { + throw wrapInMongoException(t); + } + } + + private void fetchKeys(final MongoCryptContext keyBroker, @Nullable final Timeout operationTimeout) { + try { + for (BsonDocument bsonDocument : keyRetriever.find(keyBroker.getMongoOperation(), operationTimeout)) { + keyBroker.addMongoOperationResult(bsonDocument); + } + keyBroker.completeMongoOperation(); + } catch (Throwable t) { + throw MongoException.fromThrowableNonNull(t); + } + } + + private void decryptKeys(final MongoCryptContext cryptContext, @Nullable final Timeout operationTimeout) { + try { + MongoKeyDecryptor keyDecryptor = cryptContext.nextKeyDecryptor(); + while (keyDecryptor != null) { + decryptKey(keyDecryptor, operationTimeout); + keyDecryptor = cryptContext.nextKeyDecryptor(); + } + cryptContext.completeKeyDecryptors(); + } catch (Throwable t) { + throw translateInterruptedException(t, "Interrupted while doing IO") + .orElseThrow(() -> wrapInMongoException(t)); + } + } + + private void decryptKey(final MongoKeyDecryptor keyDecryptor, @Nullable final Timeout operationTimeout) throws IOException { + try (InputStream inputStream = keyManagementService.stream(keyDecryptor.getKmsProvider(), keyDecryptor.getHostName(), + keyDecryptor.getMessage(), operationTimeout)) { + int bytesNeeded = keyDecryptor.bytesNeeded(); + + while (bytesNeeded > 0) { + byte[] bytes = new byte[bytesNeeded]; + int bytesRead = inputStream.read(bytes, 0, bytes.length); + keyDecryptor.feed(ByteBuffer.wrap(bytes, 0, bytesRead)); + bytesNeeded = keyDecryptor.bytesNeeded(); + } + } + } + + private MongoException wrapInMongoException(final Throwable t) { + if (t instanceof MongoException) { + return (MongoException) t; + } else { + return new MongoClientException("Exception in encryption library: " + t.getMessage(), t); + } + } +} diff --git a/driver-sync/src/main/com/mongodb/client/internal/CryptBinding.java b/driver-sync/src/main/com/mongodb/client/internal/CryptBinding.java new file mode 100644 index 00000000000..036466077ec --- /dev/null +++ b/driver-sync/src/main/com/mongodb/client/internal/CryptBinding.java @@ -0,0 +1,127 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.internal; + +import com.mongodb.ReadPreference; +import com.mongodb.ServerAddress; +import com.mongodb.connection.ServerDescription; +import com.mongodb.internal.binding.ClusterAwareReadWriteBinding; +import com.mongodb.internal.binding.ConnectionSource; +import com.mongodb.internal.binding.ReadWriteBinding; +import com.mongodb.internal.connection.Connection; +import com.mongodb.internal.connection.OperationContext; + + +class CryptBinding implements ClusterAwareReadWriteBinding { + private final ClusterAwareReadWriteBinding wrapped; + private final Crypt crypt; + + CryptBinding(final ClusterAwareReadWriteBinding wrapped, final Crypt crypt) { + this.crypt = crypt; + this.wrapped = wrapped; + } + + @Override + public ReadPreference getReadPreference() { + return wrapped.getReadPreference(); + } + + @Override + public ConnectionSource getReadConnectionSource() { + return new CryptConnectionSource(wrapped.getReadConnectionSource()); + } + + @Override + public ConnectionSource getReadConnectionSource(final int minWireVersion, final ReadPreference fallbackReadPreference) { + return new CryptConnectionSource(wrapped.getReadConnectionSource(minWireVersion, fallbackReadPreference)); + } + + @Override + public ConnectionSource getWriteConnectionSource() { + return new CryptConnectionSource(wrapped.getWriteConnectionSource()); + } + + @Override + public ConnectionSource getConnectionSource(final ServerAddress serverAddress) { + return new CryptConnectionSource(wrapped.getConnectionSource(serverAddress)); + } + + @Override + public OperationContext getOperationContext() { + return wrapped.getOperationContext(); + } + + @Override + public int getCount() { + return wrapped.getCount(); + } + + @Override + public ReadWriteBinding retain() { + wrapped.retain(); + return this; + } + + @Override + public int release() { + return wrapped.release(); + } + + private class CryptConnectionSource implements ConnectionSource { + private final ConnectionSource wrapped; + + CryptConnectionSource(final ConnectionSource wrapped) { + this.wrapped = wrapped; + } + + @Override + public ServerDescription getServerDescription() { + return wrapped.getServerDescription(); + } + + @Override + public OperationContext getOperationContext() { + return wrapped.getOperationContext(); + } + + @Override + public ReadPreference getReadPreference() { + return wrapped.getReadPreference(); + } + + @Override + public Connection getConnection() { + return new CryptConnection(wrapped.getConnection(), crypt); + } + + @Override + public int getCount() { + return wrapped.getCount(); + } + + @Override + public ConnectionSource retain() { + wrapped.retain(); + return this; + } + + @Override + public int release() { + return wrapped.release(); + } + } +} diff --git a/driver-sync/src/main/com/mongodb/client/internal/CryptConnection.java b/driver-sync/src/main/com/mongodb/client/internal/CryptConnection.java new file mode 100644 index 00000000000..803df89a6b6 --- /dev/null +++ b/driver-sync/src/main/com/mongodb/client/internal/CryptConnection.java @@ -0,0 +1,169 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.internal; + +import com.mongodb.ReadPreference; +import com.mongodb.connection.ConnectionDescription; +import com.mongodb.internal.connection.Connection; +import com.mongodb.internal.connection.MessageSequences; +import com.mongodb.internal.connection.MessageSequences.EmptyMessageSequences; +import com.mongodb.internal.connection.MessageSettings; +import com.mongodb.internal.connection.OperationContext; +import com.mongodb.internal.connection.SplittablePayload; +import com.mongodb.internal.connection.SplittablePayloadBsonWriter; +import com.mongodb.internal.time.Timeout; +import com.mongodb.internal.validator.MappedFieldNameValidator; +import com.mongodb.lang.Nullable; +import org.bson.BsonBinaryReader; +import org.bson.BsonBinaryWriter; +import org.bson.BsonBinaryWriterSettings; +import org.bson.BsonDocument; +import org.bson.BsonWriter; +import org.bson.BsonWriterSettings; +import org.bson.FieldNameValidator; +import org.bson.RawBsonDocument; +import org.bson.codecs.BsonValueCodecProvider; +import org.bson.codecs.Codec; +import org.bson.codecs.Decoder; +import org.bson.codecs.DecoderContext; +import org.bson.codecs.EncoderContext; +import org.bson.codecs.RawBsonDocumentCodec; +import org.bson.codecs.configuration.CodecRegistry; +import org.bson.io.BasicOutputBuffer; + +import java.util.HashMap; +import java.util.Map; + +import static com.mongodb.assertions.Assertions.fail; +import static org.bson.codecs.configuration.CodecRegistries.fromProviders; + +/** + * This class is not part of the public API and may be removed or changed at any time. + */ +public final class CryptConnection implements Connection { + private static final CodecRegistry REGISTRY = fromProviders(new BsonValueCodecProvider()); + private static final int MAX_SPLITTABLE_DOCUMENT_SIZE = 2097152; + + private final Connection wrapped; + private final Crypt crypt; + + CryptConnection(final Connection wrapped, final Crypt crypt) { + this.wrapped = wrapped; + this.crypt = crypt; + } + + @Override + public int getCount() { + return wrapped.getCount(); + } + + @Override + public CryptConnection retain() { + wrapped.retain(); + return this; + } + + @Override + public int release() { + return wrapped.release(); + } + + @Override + public ConnectionDescription getDescription() { + return wrapped.getDescription(); + } + + @Nullable + @Override + public T command(final String database, final BsonDocument command, final FieldNameValidator commandFieldNameValidator, + @Nullable final ReadPreference readPreference, final Decoder commandResultDecoder, + final OperationContext operationContext, final boolean responseExpected, final MessageSequences sequences) { + + SplittablePayload payload = null; + FieldNameValidator payloadFieldNameValidator = null; + if (sequences instanceof SplittablePayload) { + payload = (SplittablePayload) sequences; + payloadFieldNameValidator = payload.getFieldNameValidator(); + } else if (!(sequences instanceof EmptyMessageSequences)) { + fail(sequences.toString()); + } + BasicOutputBuffer bsonOutput = new BasicOutputBuffer(); + BsonBinaryWriter bsonBinaryWriter = new BsonBinaryWriter(new BsonWriterSettings(), + new BsonBinaryWriterSettings(getDescription().getMaxDocumentSize()), + bsonOutput, getFieldNameValidator(payload, commandFieldNameValidator, payloadFieldNameValidator)); + BsonWriter writer = payload == null + ? bsonBinaryWriter + : new SplittablePayloadBsonWriter(bsonBinaryWriter, bsonOutput, createSplittablePayloadMessageSettings(), payload, + MAX_SPLITTABLE_DOCUMENT_SIZE); + + getEncoder(command).encode(writer, command, EncoderContext.builder().build()); + + Timeout operationTimeout = operationContext.getTimeoutContext().getTimeout(); + RawBsonDocument encryptedCommand = crypt.encrypt(database, + new RawBsonDocument(bsonOutput.getInternalBuffer(), 0, bsonOutput.getSize()), operationTimeout); + + RawBsonDocument encryptedResponse = wrapped.command(database, encryptedCommand, commandFieldNameValidator, readPreference, + new RawBsonDocumentCodec(), operationContext, responseExpected, EmptyMessageSequences.INSTANCE); + + if (encryptedResponse == null) { + return null; + } + + RawBsonDocument decryptedResponse = crypt.decrypt(encryptedResponse, operationTimeout); + + BsonBinaryReader reader = new BsonBinaryReader(decryptedResponse.getByteBuffer().asNIO()); + + return commandResultDecoder.decode(reader, DecoderContext.builder().build()); + } + + @Nullable + @Override + public T command(final String database, final BsonDocument command, final FieldNameValidator fieldNameValidator, + @Nullable final ReadPreference readPreference, final Decoder commandResultDecoder, final OperationContext operationContext) { + return command(database, command, fieldNameValidator, readPreference, commandResultDecoder, operationContext, true, EmptyMessageSequences.INSTANCE); + } + + @SuppressWarnings("unchecked") + private Codec getEncoder(final BsonDocument command) { + return (Codec) REGISTRY.get(command.getClass()); + } + + private FieldNameValidator getFieldNameValidator(@Nullable final SplittablePayload payload, + final FieldNameValidator commandFieldNameValidator, + @Nullable final FieldNameValidator payloadFieldNameValidator) { + if (payload == null) { + return commandFieldNameValidator; + } + + Map rootMap = new HashMap<>(); + rootMap.put(payload.getPayloadName(), payloadFieldNameValidator); + return new MappedFieldNameValidator(commandFieldNameValidator, rootMap); + } + + private MessageSettings createSplittablePayloadMessageSettings() { + return MessageSettings.builder() + .maxBatchCount(getDescription().getMaxBatchCount()) + .maxMessageSize(getDescription().getMaxMessageSize()) + .maxDocumentSize(getDescription().getMaxDocumentSize()) + .build(); + } + + @Override + public void markAsPinned(final PinningMode pinningMode) { + wrapped.markAsPinned(pinningMode); + } +} diff --git a/driver-sync/src/main/com/mongodb/client/internal/Crypts.java b/driver-sync/src/main/com/mongodb/client/internal/Crypts.java new file mode 100644 index 00000000000..30319bbf4f8 --- /dev/null +++ b/driver-sync/src/main/com/mongodb/client/internal/Crypts.java @@ -0,0 +1,82 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.internal; + +import com.mongodb.AutoEncryptionSettings; +import com.mongodb.ClientEncryptionSettings; +import com.mongodb.MongoClientSettings; +import com.mongodb.MongoNamespace; +import com.mongodb.client.MongoClient; +import com.mongodb.client.MongoClients; +import com.mongodb.internal.crypt.capi.MongoCrypt; +import com.mongodb.internal.crypt.capi.MongoCrypts; + +import javax.net.ssl.SSLContext; +import java.util.Map; + +import static com.mongodb.internal.capi.MongoCryptHelper.createMongoCryptOptions; + +/** + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public final class Crypts { + + public static Crypt createCrypt(final MongoClientSettings mongoClientSettings, final AutoEncryptionSettings settings) { + MongoClient sharedInternalClient = null; + MongoClientSettings keyVaultMongoClientSettings = settings.getKeyVaultMongoClientSettings(); + if (keyVaultMongoClientSettings == null || !settings.isBypassAutoEncryption()) { + MongoClientSettings defaultInternalMongoClientSettings = MongoClientSettings.builder(mongoClientSettings) + .applyToConnectionPoolSettings(builder -> builder.minSize(0)) + .autoEncryptionSettings(null) + .build(); + sharedInternalClient = MongoClients.create(defaultInternalMongoClientSettings); + } + MongoClient keyVaultClient = keyVaultMongoClientSettings == null + ? sharedInternalClient : MongoClients.create(keyVaultMongoClientSettings); + MongoCrypt mongoCrypt = MongoCrypts.create(createMongoCryptOptions(settings)); + return new Crypt( + mongoCrypt, + createKeyRetriever(keyVaultClient, settings.getKeyVaultNamespace()), + createKeyManagementService(settings.getKmsProviderSslContextMap()), + settings.getKmsProviders(), + settings.getKmsProviderPropertySuppliers(), + settings.isBypassAutoEncryption(), + settings.isBypassAutoEncryption() ? null : new CollectionInfoRetriever(sharedInternalClient), + new CommandMarker(mongoCrypt, settings), + sharedInternalClient, keyVaultClient); + } + + static Crypt create(final MongoClient keyVaultClient, final ClientEncryptionSettings settings) { + return new Crypt(MongoCrypts.create(createMongoCryptOptions(settings)), + createKeyRetriever(keyVaultClient, settings.getKeyVaultNamespace()), + createKeyManagementService(settings.getKmsProviderSslContextMap()), + settings.getKmsProviders(), + settings.getKmsProviderPropertySuppliers() + ); + } + private static KeyRetriever createKeyRetriever(final MongoClient keyVaultClient, + final String keyVaultNamespaceString) { + return new KeyRetriever(keyVaultClient, new MongoNamespace(keyVaultNamespaceString)); + } + + private static KeyManagementService createKeyManagementService(final Map kmsProviderSslContextMap) { + return new KeyManagementService(kmsProviderSslContextMap, 10000); + } + + private Crypts() { + } +} diff --git a/driver-sync/src/main/com/mongodb/client/internal/DistinctIterableImpl.java b/driver-sync/src/main/com/mongodb/client/internal/DistinctIterableImpl.java new file mode 100644 index 00000000000..d3403738088 --- /dev/null +++ b/driver-sync/src/main/com/mongodb/client/internal/DistinctIterableImpl.java @@ -0,0 +1,126 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.internal; + +import com.mongodb.MongoNamespace; +import com.mongodb.ReadConcern; +import com.mongodb.ReadPreference; +import com.mongodb.client.ClientSession; +import com.mongodb.client.DistinctIterable; +import com.mongodb.client.cursor.TimeoutMode; +import com.mongodb.client.model.Collation; +import com.mongodb.internal.TimeoutSettings; +import com.mongodb.internal.operation.Operations; +import com.mongodb.internal.operation.ReadOperationCursor; +import com.mongodb.lang.Nullable; +import org.bson.BsonString; +import org.bson.BsonValue; +import org.bson.codecs.configuration.CodecRegistry; +import org.bson.conversions.Bson; + +import java.util.concurrent.TimeUnit; + +import static com.mongodb.assertions.Assertions.notNull; + +class DistinctIterableImpl extends MongoIterableImpl implements DistinctIterable { + private final Operations operations; + + private final Class resultClass; + private final String fieldName; + + private Bson filter; + private long maxTimeMS; + private Collation collation; + private BsonValue comment; + private Bson hint; + private String hintString; + + DistinctIterableImpl(@Nullable final ClientSession clientSession, final MongoNamespace namespace, final Class documentClass, + final Class resultClass, final CodecRegistry codecRegistry, final ReadPreference readPreference, + final ReadConcern readConcern, final OperationExecutor executor, final String fieldName, final Bson filter, + final boolean retryReads, final TimeoutSettings timeoutSettings) { + super(clientSession, executor, readConcern, readPreference, retryReads, timeoutSettings); + this.operations = new Operations<>(namespace, documentClass, readPreference, codecRegistry, retryReads, timeoutSettings); + this.resultClass = notNull("resultClass", resultClass); + this.fieldName = notNull("mapFunction", fieldName); + this.filter = filter; + } + + @Override + public DistinctIterable filter(@Nullable final Bson filter) { + this.filter = filter; + return this; + } + + @Override + public DistinctIterable maxTime(final long maxTime, final TimeUnit timeUnit) { + notNull("timeUnit", timeUnit); + this.maxTimeMS = TimeUnit.MILLISECONDS.convert(maxTime, timeUnit); + return this; + } + + @Override + public DistinctIterable batchSize(final int batchSize) { + super.batchSize(batchSize); + return this; + } + + @Override + public DistinctIterable timeoutMode(final TimeoutMode timeoutMode) { + super.timeoutMode(timeoutMode); + return this; + } + + @Override + public DistinctIterable collation(@Nullable final Collation collation) { + this.collation = collation; + return this; + } + + @Override + public DistinctIterable comment(@Nullable final String comment) { + this.comment = comment == null ? null : new BsonString(comment); + return this; + } + + @Override + public DistinctIterable comment(@Nullable final BsonValue comment) { + this.comment = comment; + return this; + } + + @Override + public DistinctIterable hint(@Nullable final Bson hint) { + this.hint = hint; + return this; + } + + @Override + public DistinctIterable hintString(@Nullable final String hint) { + this.hintString = hint; + return this; + } + + @Override + public ReadOperationCursor asReadOperation() { + return operations.distinct(fieldName, filter, resultClass, collation, comment, hint, hintString); + } + + protected OperationExecutor getExecutor() { + return getExecutor(operations.createTimeoutSettings(maxTimeMS)); + } +} diff --git a/driver-sync/src/main/com/mongodb/client/internal/FindIterableImpl.java b/driver-sync/src/main/com/mongodb/client/internal/FindIterableImpl.java new file mode 100644 index 00000000000..9b0321abbbc --- /dev/null +++ b/driver-sync/src/main/com/mongodb/client/internal/FindIterableImpl.java @@ -0,0 +1,251 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.internal; + +import com.mongodb.CursorType; +import com.mongodb.ExplainVerbosity; +import com.mongodb.MongoNamespace; +import com.mongodb.ReadConcern; +import com.mongodb.ReadPreference; +import com.mongodb.client.ClientSession; +import com.mongodb.client.FindIterable; +import com.mongodb.client.cursor.TimeoutMode; +import com.mongodb.client.model.Collation; +import com.mongodb.internal.TimeoutSettings; +import com.mongodb.internal.client.model.FindOptions; +import com.mongodb.internal.operation.BatchCursor; +import com.mongodb.internal.operation.Operations; +import com.mongodb.internal.operation.ReadOperationExplainable; +import com.mongodb.lang.Nullable; +import org.bson.BsonValue; +import org.bson.Document; +import org.bson.codecs.configuration.CodecRegistry; +import org.bson.conversions.Bson; + +import java.util.concurrent.TimeUnit; + +import static com.mongodb.assertions.Assertions.notNull; + +class FindIterableImpl extends MongoIterableImpl implements FindIterable { + + private final Operations operations; + + private final Class resultClass; + private final FindOptions findOptions; + private final CodecRegistry codecRegistry; + + private Bson filter; + + FindIterableImpl(@Nullable final ClientSession clientSession, final MongoNamespace namespace, final Class documentClass, + final Class resultClass, final CodecRegistry codecRegistry, final ReadPreference readPreference, + final ReadConcern readConcern, final OperationExecutor executor, final Bson filter, final boolean retryReads, + final TimeoutSettings timeoutSettings) { + super(clientSession, executor, readConcern, readPreference, retryReads, timeoutSettings); + this.operations = new Operations<>(namespace, documentClass, readPreference, codecRegistry, retryReads, timeoutSettings); + this.resultClass = notNull("resultClass", resultClass); + this.filter = notNull("filter", filter); + this.findOptions = new FindOptions(); + this.codecRegistry = codecRegistry; + } + + @Override + public FindIterable filter(@Nullable final Bson filter) { + this.filter = filter; + return this; + } + + @Override + public FindIterable limit(final int limit) { + findOptions.limit(limit); + return this; + } + + @Override + public FindIterable skip(final int skip) { + findOptions.skip(skip); + return this; + } + + @Override + public FindIterable maxTime(final long maxTime, final TimeUnit timeUnit) { + notNull("timeUnit", timeUnit); + findOptions.maxTime(maxTime, timeUnit); + return this; + } + + @Override + public FindIterable maxAwaitTime(final long maxAwaitTime, final TimeUnit timeUnit) { + validateMaxAwaitTime(maxAwaitTime, timeUnit); + findOptions.maxAwaitTime(maxAwaitTime, timeUnit); + return this; + } + + @Override + public FindIterable batchSize(final int batchSize) { + super.batchSize(batchSize); + findOptions.batchSize(batchSize); + return this; + } + + @Override + public FindIterable timeoutMode(final TimeoutMode timeoutMode) { + super.timeoutMode(timeoutMode); + findOptions.timeoutMode(timeoutMode); + return this; + } + + @Override + public FindIterable collation(@Nullable final Collation collation) { + findOptions.collation(collation); + return this; + } + + @Override + public FindIterable projection(@Nullable final Bson projection) { + findOptions.projection(projection); + return this; + } + + @Override + public FindIterable sort(@Nullable final Bson sort) { + findOptions.sort(sort); + return this; + } + + @Override + public FindIterable noCursorTimeout(final boolean noCursorTimeout) { + findOptions.noCursorTimeout(noCursorTimeout); + return this; + } + + @Override + public FindIterable partial(final boolean partial) { + findOptions.partial(partial); + return this; + } + + @Override + public FindIterable cursorType(final CursorType cursorType) { + findOptions.cursorType(cursorType); + return this; + } + + @Override + public FindIterable comment(@Nullable final String comment) { + findOptions.comment(comment); + return this; + } + + @Override + public FindIterable comment(@Nullable final BsonValue comment) { + findOptions.comment(comment); + return this; + } + + @Override + public FindIterable hint(@Nullable final Bson hint) { + findOptions.hint(hint); + return this; + } + + @Override + public FindIterable hintString(@Nullable final String hint) { + findOptions.hintString(hint); + return this; + } + + @Override + public FindIterable let(@Nullable final Bson variables) { + findOptions.let(variables); + return this; + } + + @Override + public FindIterable max(@Nullable final Bson max) { + findOptions.max(max); + return this; + } + + @Override + public FindIterable min(@Nullable final Bson min) { + findOptions.min(min); + return this; + } + + @Override + public FindIterable returnKey(final boolean returnKey) { + findOptions.returnKey(returnKey); + return this; + } + + @Override + public FindIterable showRecordId(final boolean showRecordId) { + findOptions.showRecordId(showRecordId); + return this; + } + + @Override + public FindIterable allowDiskUse(@Nullable final Boolean allowDiskUse) { + findOptions.allowDiskUse(allowDiskUse); + return this; + } + + @Nullable + @Override + public TResult first() { + try (BatchCursor batchCursor = getExecutor().execute( + operations.findFirst(filter, resultClass, findOptions), getReadPreference(), getReadConcern(), getClientSession())) { + return batchCursor.hasNext() ? batchCursor.next().iterator().next() : null; + } + } + + @Override + public Document explain() { + return executeExplain(Document.class, null); + } + + @Override + public Document explain(final ExplainVerbosity verbosity) { + return executeExplain(Document.class, notNull("verbosity", verbosity)); + } + + @Override + public E explain(final Class explainDocumentClass) { + return executeExplain(explainDocumentClass, null); + } + + @Override + public E explain(final Class explainResultClass, final ExplainVerbosity verbosity) { + return executeExplain(explainResultClass, notNull("verbosity", verbosity)); + } + + + protected OperationExecutor getExecutor() { + return getExecutor(operations.createTimeoutSettings(findOptions)); + } + + private E executeExplain(final Class explainResultClass, @Nullable final ExplainVerbosity verbosity) { + notNull("explainDocumentClass", explainResultClass); + return getExecutor().execute( + asReadOperation().asExplainableOperation(verbosity, codecRegistry.get(explainResultClass)), getReadPreference(), getReadConcern(), getClientSession()); + } + + public ReadOperationExplainable asReadOperation() { + return operations.find(filter, resultClass, findOptions); + } + +} diff --git a/driver-sync/src/main/com/mongodb/client/internal/KeyManagementService.java b/driver-sync/src/main/com/mongodb/client/internal/KeyManagementService.java new file mode 100644 index 00000000000..806f768a923 --- /dev/null +++ b/driver-sync/src/main/com/mongodb/client/internal/KeyManagementService.java @@ -0,0 +1,194 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.internal; + +import com.mongodb.ServerAddress; +import com.mongodb.internal.TimeoutContext; +import com.mongodb.internal.connection.SslHelper; +import com.mongodb.internal.diagnostics.logging.Logger; +import com.mongodb.internal.diagnostics.logging.Loggers; +import com.mongodb.internal.time.Timeout; +import com.mongodb.lang.Nullable; +import com.mongodb.lang.NonNull; + +import javax.net.SocketFactory; +import javax.net.ssl.SSLContext; +import javax.net.ssl.SSLParameters; +import javax.net.ssl.SSLSocket; +import javax.net.ssl.SSLSocketFactory; +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.net.InetAddress; +import java.net.InetSocketAddress; +import java.net.Socket; +import java.net.SocketException; +import java.nio.ByteBuffer; +import java.util.Map; + +import static com.mongodb.assertions.Assertions.assertNotNull; +import static com.mongodb.assertions.Assertions.notNull; +import static java.util.concurrent.TimeUnit.MILLISECONDS; +import static java.util.concurrent.TimeUnit.NANOSECONDS; + +class KeyManagementService { + private static final Logger LOGGER = Loggers.getLogger("client"); + private final Map kmsProviderSslContextMap; + private final int timeoutMillis; + + KeyManagementService(final Map kmsProviderSslContextMap, final int timeoutMillis) { + this.kmsProviderSslContextMap = notNull("kmsProviderSslContextMap", kmsProviderSslContextMap); + this.timeoutMillis = timeoutMillis; + } + + public InputStream stream(final String kmsProvider, final String host, final ByteBuffer message, @Nullable final Timeout operationTimeout) throws IOException { + ServerAddress serverAddress = new ServerAddress(host); + + LOGGER.info("Connecting to KMS server at " + serverAddress); + SSLContext sslContext = kmsProviderSslContextMap.get(kmsProvider); + + SocketFactory sslSocketFactory = sslContext == null + ? SSLSocketFactory.getDefault() : sslContext.getSocketFactory(); + SSLSocket socket = (SSLSocket) sslSocketFactory.createSocket(); + enableHostNameVerification(socket); + + try { + socket.setSoTimeout(timeoutMillis); + socket.connect(new InetSocketAddress(InetAddress.getByName(serverAddress.getHost()), serverAddress.getPort()), timeoutMillis); + } catch (IOException e) { + closeSocket(socket); + throw e; + } + + try { + OutputStream outputStream = socket.getOutputStream(); + + byte[] bytes = new byte[message.remaining()]; + + message.get(bytes); + outputStream.write(bytes); + } catch (IOException e) { + closeSocket(socket); + throw e; + } + + try { + return OperationTimeoutAwareInputStream.wrapIfNeeded(operationTimeout, socket); + } catch (IOException e) { + closeSocket(socket); + throw e; + } + } + + private void enableHostNameVerification(final SSLSocket socket) { + SSLParameters sslParameters = socket.getSSLParameters(); + if (sslParameters == null) { + sslParameters = new SSLParameters(); + } + SslHelper.enableHostNameVerification(sslParameters); + socket.setSSLParameters(sslParameters); + } + + private void closeSocket(final Socket socket) { + try { + socket.close(); + } catch (IOException | RuntimeException e) { + // ignore + } + } + + private static final class OperationTimeoutAwareInputStream extends InputStream { + private final Socket socket; + private final Timeout operationTimeout; + private final InputStream wrapped; + + /** + * @param socket - socket to set timeout on. + * @param operationTimeout - non-infinite timeout. + */ + private OperationTimeoutAwareInputStream(final Socket socket, final Timeout operationTimeout) throws IOException { + this.socket = socket; + this.operationTimeout = operationTimeout; + this.wrapped = socket.getInputStream(); + } + + public static InputStream wrapIfNeeded(@Nullable final Timeout operationTimeout, final SSLSocket socket) throws IOException { + return Timeout.nullAsInfinite(operationTimeout).checkedCall(NANOSECONDS, + () -> socket.getInputStream(), + (ns) -> new OperationTimeoutAwareInputStream(socket, assertNotNull(operationTimeout)), + () -> new OperationTimeoutAwareInputStream(socket, assertNotNull(operationTimeout))); + } + + private void setSocketSoTimeoutToOperationTimeout() throws SocketException { + operationTimeout.checkedRun(MILLISECONDS, + () -> { + throw new AssertionError("operationTimeout cannot be infinite"); + }, + (ms) -> socket.setSoTimeout(Math.toIntExact(ms)), + () -> TimeoutContext.throwMongoTimeoutException("Reading from KMS server exceeded the timeout limit.")); + } + + @Override + public int read() throws IOException { + setSocketSoTimeoutToOperationTimeout(); + return wrapped.read(); + } + + @Override + public int read(@NonNull final byte[] b) throws IOException { + setSocketSoTimeoutToOperationTimeout(); + return wrapped.read(b); + } + + @Override + public int read(@NonNull final byte[] b, final int off, final int len) throws IOException { + setSocketSoTimeoutToOperationTimeout(); + return wrapped.read(b, off, len); + } + + @Override + public void close() throws IOException { + wrapped.close(); + } + + @Override + public long skip(final long n) throws IOException { + setSocketSoTimeoutToOperationTimeout(); + return wrapped.skip(n); + } + + @Override + public int available() throws IOException { + return wrapped.available(); + } + + @Override + public synchronized void mark(final int readlimit) { + wrapped.mark(readlimit); + } + + @Override + public synchronized void reset() throws IOException { + wrapped.reset(); + } + + @Override + public boolean markSupported() { + return wrapped.markSupported(); + } + } +} diff --git a/driver-sync/src/main/com/mongodb/client/internal/KeyRetriever.java b/driver-sync/src/main/com/mongodb/client/internal/KeyRetriever.java new file mode 100644 index 00000000000..59544eefc45 --- /dev/null +++ b/driver-sync/src/main/com/mongodb/client/internal/KeyRetriever.java @@ -0,0 +1,51 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.internal; + +import com.mongodb.MongoNamespace; +import com.mongodb.ReadConcern; +import com.mongodb.client.MongoClient; +import com.mongodb.client.MongoCollection; +import com.mongodb.internal.time.Timeout; +import com.mongodb.lang.Nullable; +import org.bson.BsonDocument; + +import java.util.ArrayList; +import java.util.List; + +import static com.mongodb.assertions.Assertions.notNull; +import static com.mongodb.client.internal.TimeoutHelper.collectionWithTimeout; + +class KeyRetriever { + private static final String TIMEOUT_ERROR_MESSAGE = "Key retrieval exceeded the timeout limit."; + private final MongoClient client; + private final MongoNamespace namespace; + + KeyRetriever(final MongoClient client, final MongoNamespace namespace) { + this.client = notNull("client", client); + this.namespace = notNull("namespace", namespace); + } + + public List find(final BsonDocument keyFilter, @Nullable final Timeout operationTimeout) { + MongoCollection collection = client.getDatabase(namespace.getDatabaseName()) + .getCollection(namespace.getCollectionName(), BsonDocument.class); + + return collectionWithTimeout(collection, TIMEOUT_ERROR_MESSAGE, operationTimeout) + .withReadConcern(ReadConcern.MAJORITY) + .find(keyFilter).into(new ArrayList<>()); + } +} diff --git a/driver-sync/src/main/com/mongodb/client/internal/ListCollectionNamesIterableImpl.java b/driver-sync/src/main/com/mongodb/client/internal/ListCollectionNamesIterableImpl.java new file mode 100644 index 00000000000..aabdcd8e7a7 --- /dev/null +++ b/driver-sync/src/main/com/mongodb/client/internal/ListCollectionNamesIterableImpl.java @@ -0,0 +1,114 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.internal; + +import com.mongodb.Function; +import com.mongodb.client.ListCollectionNamesIterable; +import com.mongodb.client.MongoCursor; +import com.mongodb.client.MongoIterable; +import com.mongodb.internal.VisibleForTesting; +import com.mongodb.lang.Nullable; +import org.bson.BsonDocument; +import org.bson.BsonValue; +import org.bson.conversions.Bson; + +import java.util.Collection; +import java.util.concurrent.TimeUnit; + +import static com.mongodb.assertions.Assertions.notNull; +import static com.mongodb.internal.VisibleForTesting.AccessModifier.PRIVATE; + +final class ListCollectionNamesIterableImpl implements ListCollectionNamesIterable { + private final ListCollectionsIterableImpl wrapped; + private final MongoIterable wrappedWithMapping; + + ListCollectionNamesIterableImpl(final ListCollectionsIterableImpl wrapped) { + this.wrapped = wrapped; + wrappedWithMapping = wrapped.map(collectionDoc -> collectionDoc.getString("name").getValue()); + } + + @Override + public ListCollectionNamesIterable filter(@Nullable final Bson filter) { + wrapped.filter(filter); + return this; + } + + @Override + public ListCollectionNamesIterable maxTime(final long maxTime, final TimeUnit timeUnit) { + notNull("timeUnit", timeUnit); + wrapped.maxTime(maxTime, timeUnit); + return this; + } + + @Override + public ListCollectionNamesIterable batchSize(final int batchSize) { + wrapped.batchSize(batchSize); + return this; + } + + @Override + public ListCollectionNamesIterable comment(@Nullable final String comment) { + wrapped.comment(comment); + return this; + } + + @Override + public ListCollectionNamesIterable comment(@Nullable final BsonValue comment) { + wrapped.comment(comment); + return this; + } + + @Override + public ListCollectionNamesIterable authorizedCollections(final boolean authorizedCollections) { + wrapped.authorizedCollections(authorizedCollections); + return this; + } + + @Override + public MongoCursor iterator() { + return wrappedWithMapping.iterator(); + } + + @Override + public MongoCursor cursor() { + return wrappedWithMapping.cursor(); + } + + @Nullable + @Override + public String first() { + return wrappedWithMapping.first(); + } + + @Override + public MongoIterable map(final Function mapper) { + return wrappedWithMapping.map(mapper); + } + + @Override + public
> A into(final A target) { + return wrappedWithMapping.into(target); + } + + /** + * This method is used from Groovy code in {@code com.mongodb.client.internal.MongoDatabaseSpecification}. + */ + @VisibleForTesting(otherwise = PRIVATE) + ListCollectionsIterableImpl getWrapped() { + return wrapped; + } +} diff --git a/driver-sync/src/main/com/mongodb/client/internal/ListCollectionsIterableImpl.java b/driver-sync/src/main/com/mongodb/client/internal/ListCollectionsIterableImpl.java new file mode 100644 index 00000000000..60755456811 --- /dev/null +++ b/driver-sync/src/main/com/mongodb/client/internal/ListCollectionsIterableImpl.java @@ -0,0 +1,115 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.internal; + +import com.mongodb.ReadConcern; +import com.mongodb.ReadPreference; +import com.mongodb.client.ClientSession; +import com.mongodb.client.ListCollectionNamesIterable; +import com.mongodb.client.ListCollectionsIterable; +import com.mongodb.client.cursor.TimeoutMode; +import com.mongodb.internal.TimeoutSettings; +import com.mongodb.internal.operation.Operations; +import com.mongodb.internal.operation.ReadOperationCursor; +import com.mongodb.lang.Nullable; +import org.bson.BsonDocument; +import org.bson.BsonString; +import org.bson.BsonValue; +import org.bson.codecs.configuration.CodecRegistry; +import org.bson.conversions.Bson; + +import java.util.concurrent.TimeUnit; + +import static com.mongodb.assertions.Assertions.notNull; +import static java.util.concurrent.TimeUnit.MILLISECONDS; + +class ListCollectionsIterableImpl extends MongoIterableImpl implements ListCollectionsIterable { + private final Operations operations; + private final String databaseName; + private final Class resultClass; + private Bson filter; + private final boolean collectionNamesOnly; + private boolean authorizedCollections; + private long maxTimeMS; + private BsonValue comment; + + ListCollectionsIterableImpl(@Nullable final ClientSession clientSession, final String databaseName, final boolean collectionNamesOnly, + final Class resultClass, final CodecRegistry codecRegistry, final ReadPreference readPreference, + final OperationExecutor executor, final boolean retryReads, final TimeoutSettings timeoutSettings) { + super(clientSession, executor, ReadConcern.DEFAULT, readPreference, retryReads, timeoutSettings); // TODO: read concern? + this.collectionNamesOnly = collectionNamesOnly; + this.operations = new Operations<>(BsonDocument.class, readPreference, codecRegistry, retryReads, timeoutSettings); + this.databaseName = notNull("databaseName", databaseName); + this.resultClass = notNull("resultClass", resultClass); + } + + @Override + public ListCollectionsIterable filter(@Nullable final Bson filter) { + this.filter = filter; + return this; + } + + @Override + public ListCollectionsIterable maxTime(final long maxTime, final TimeUnit timeUnit) { + notNull("timeUnit", timeUnit); + this.maxTimeMS = MILLISECONDS.convert(maxTime, timeUnit); + return this; + } + + @Override + public ListCollectionsIterable batchSize(final int batchSize) { + super.batchSize(batchSize); + return this; + } + + @Override + public ListCollectionsIterable timeoutMode(final TimeoutMode timeoutMode) { + super.timeoutMode(timeoutMode); + return this; + } + + @Override + public ListCollectionsIterable comment(@Nullable final String comment) { + this.comment = comment != null ? new BsonString(comment) : null; + return this; + } + + @Override + public ListCollectionsIterable comment(@Nullable final BsonValue comment) { + this.comment = comment; + return this; + } + + /** + * @see ListCollectionNamesIterable#authorizedCollections(boolean) + */ + ListCollectionsIterableImpl authorizedCollections(final boolean authorizedCollections) { + this.authorizedCollections = authorizedCollections; + return this; + } + + @Override + public ReadOperationCursor asReadOperation() { + return operations.listCollections(databaseName, resultClass, filter, collectionNamesOnly, authorizedCollections, + getBatchSize(), comment, getTimeoutMode()); + } + + + protected OperationExecutor getExecutor() { + return getExecutor(operations.createTimeoutSettings(maxTimeMS)); + } +} diff --git a/driver-sync/src/main/com/mongodb/client/internal/ListDatabasesIterableImpl.java b/driver-sync/src/main/com/mongodb/client/internal/ListDatabasesIterableImpl.java new file mode 100644 index 00000000000..f6e7d9a5021 --- /dev/null +++ b/driver-sync/src/main/com/mongodb/client/internal/ListDatabasesIterableImpl.java @@ -0,0 +1,117 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.client.internal; + +import com.mongodb.ReadConcern; +import com.mongodb.ReadPreference; +import com.mongodb.client.ClientSession; +import com.mongodb.client.ListDatabasesIterable; +import com.mongodb.client.cursor.TimeoutMode; +import com.mongodb.internal.TimeoutSettings; +import com.mongodb.internal.operation.Operations; +import com.mongodb.internal.operation.ReadOperationCursor; +import com.mongodb.lang.Nullable; +import org.bson.BsonDocument; +import org.bson.BsonString; +import org.bson.BsonValue; +import org.bson.codecs.configuration.CodecRegistry; +import org.bson.conversions.Bson; + +import java.util.concurrent.TimeUnit; + +import static com.mongodb.assertions.Assertions.notNull; +import static java.util.concurrent.TimeUnit.MILLISECONDS; + + +/** + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public class ListDatabasesIterableImpl extends MongoIterableImpl implements ListDatabasesIterable { + private final Operations operations; + private final Class resultClass; + + private long maxTimeMS; + private Bson filter; + private Boolean nameOnly; + private Boolean authorizedDatabasesOnly; + private BsonValue comment; + + public ListDatabasesIterableImpl(@Nullable final ClientSession clientSession, final Class resultClass, + final CodecRegistry codecRegistry, final ReadPreference readPreference, final OperationExecutor executor, + final boolean retryReads, final TimeoutSettings timeoutSettings) { + super(clientSession, executor, ReadConcern.DEFAULT, readPreference, retryReads, timeoutSettings); // TODO: read concern? + this.operations = new Operations<>(BsonDocument.class, readPreference, codecRegistry, retryReads, timeoutSettings); + this.resultClass = notNull("clazz", resultClass); + } + + @Override + public ListDatabasesIterable maxTime(final long maxTime, final TimeUnit timeUnit) { + notNull("timeUnit", timeUnit); + this.maxTimeMS = MILLISECONDS.convert(maxTime, timeUnit); + return this; + } + + @Override + public ListDatabasesIterable batchSize(final int batchSize) { + super.batchSize(batchSize); + return this; + } + + @Override + public ListDatabasesIterable timeoutMode(final TimeoutMode timeoutMode) { + super.timeoutMode(timeoutMode); + return this; + } + + @Override + public ListDatabasesIterable filter(@Nullable final Bson filter) { + this.filter = filter; + return this; + } + + @Override + public ListDatabasesIterable nameOnly(@Nullable final Boolean nameOnly) { + this.nameOnly = nameOnly; + return this; + } + + @Override + public ListDatabasesIterable authorizedDatabasesOnly(@Nullable final Boolean authorizedDatabasesOnly) { + this.authorizedDatabasesOnly = authorizedDatabasesOnly; + return this; + } + + @Override + public ListDatabasesIterable comment(@Nullable final String comment) { + this.comment = comment != null ? new BsonString(comment) : null; + return this; + } + + @Override + public ListDatabasesIterable comment(@Nullable final BsonValue comment) { + this.comment = comment; + return this; + } + + @Override + public ReadOperationCursor asReadOperation() { + return operations.listDatabases(resultClass, filter, nameOnly, authorizedDatabasesOnly, comment); + } + + protected OperationExecutor getExecutor() { + return getExecutor(operations.createTimeoutSettings(maxTimeMS)); + } +} diff --git a/driver-sync/src/main/com/mongodb/client/internal/ListIndexesIterableImpl.java b/driver-sync/src/main/com/mongodb/client/internal/ListIndexesIterableImpl.java new file mode 100644 index 00000000000..e555984422b --- /dev/null +++ b/driver-sync/src/main/com/mongodb/client/internal/ListIndexesIterableImpl.java @@ -0,0 +1,92 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.internal; + +import com.mongodb.MongoNamespace; +import com.mongodb.ReadConcern; +import com.mongodb.ReadPreference; +import com.mongodb.client.ClientSession; +import com.mongodb.client.ListIndexesIterable; +import com.mongodb.client.cursor.TimeoutMode; +import com.mongodb.internal.TimeoutSettings; +import com.mongodb.internal.operation.Operations; +import com.mongodb.internal.operation.ReadOperationCursor; +import com.mongodb.lang.Nullable; +import org.bson.BsonDocument; +import org.bson.BsonString; +import org.bson.BsonValue; +import org.bson.codecs.configuration.CodecRegistry; + +import java.util.concurrent.TimeUnit; + +import static com.mongodb.assertions.Assertions.notNull; +import static java.util.concurrent.TimeUnit.MILLISECONDS; + +class ListIndexesIterableImpl extends MongoIterableImpl implements ListIndexesIterable { + private final Operations operations; + private final Class resultClass; + private long maxTimeMS; + private BsonValue comment; + + ListIndexesIterableImpl(@Nullable final ClientSession clientSession, final MongoNamespace namespace, final Class resultClass, + final CodecRegistry codecRegistry, final ReadPreference readPreference, final OperationExecutor executor, + final boolean retryReads, final TimeoutSettings timeoutSettings) { + super(clientSession, executor, ReadConcern.DEFAULT, readPreference, retryReads, timeoutSettings); + this.operations = new Operations<>(namespace, BsonDocument.class, readPreference, codecRegistry, retryReads, timeoutSettings); + this.resultClass = notNull("resultClass", resultClass); + } + + @Override + public ListIndexesIterable maxTime(final long maxTime, final TimeUnit timeUnit) { + notNull("timeUnit", timeUnit); + this.maxTimeMS = MILLISECONDS.convert(maxTime, timeUnit); + return this; + } + + @Override + public ListIndexesIterable batchSize(final int batchSize) { + super.batchSize(batchSize); + return this; + } + + @Override + public ListIndexesIterable timeoutMode(final TimeoutMode timeoutMode) { + super.timeoutMode(timeoutMode); + return this; + } + + @Override + public ListIndexesIterable comment(@Nullable final String comment) { + this.comment = comment != null ? new BsonString(comment) : null; + return this; + } + + @Override + public ListIndexesIterable comment(@Nullable final BsonValue comment) { + this.comment = comment; + return this; + } + + @Override + public ReadOperationCursor asReadOperation() { + return operations.listIndexes(resultClass, getBatchSize(), comment, getTimeoutMode()); + } + + protected OperationExecutor getExecutor() { + return getExecutor(operations.createTimeoutSettings(maxTimeMS)); + } +} diff --git a/driver-sync/src/main/com/mongodb/client/internal/ListSearchIndexesIterableImpl.java b/driver-sync/src/main/com/mongodb/client/internal/ListSearchIndexesIterableImpl.java new file mode 100644 index 00000000000..7fd6ecd4a93 --- /dev/null +++ b/driver-sync/src/main/com/mongodb/client/internal/ListSearchIndexesIterableImpl.java @@ -0,0 +1,158 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.internal; + +import com.mongodb.ExplainVerbosity; +import com.mongodb.MongoNamespace; +import com.mongodb.ReadConcern; +import com.mongodb.ReadPreference; +import com.mongodb.client.ListSearchIndexesIterable; +import com.mongodb.client.cursor.TimeoutMode; +import com.mongodb.client.model.Collation; +import com.mongodb.internal.TimeoutSettings; +import com.mongodb.internal.operation.Operations; +import com.mongodb.internal.operation.ReadOperationCursor; +import com.mongodb.internal.operation.ReadOperationExplainable; +import com.mongodb.lang.Nullable; +import org.bson.BsonDocument; +import org.bson.BsonString; +import org.bson.BsonValue; +import org.bson.Document; +import org.bson.codecs.configuration.CodecRegistry; + +import java.util.concurrent.TimeUnit; + +import static com.mongodb.assertions.Assertions.notNull; + +final class ListSearchIndexesIterableImpl extends MongoIterableImpl implements ListSearchIndexesIterable { + private final Operations operations; + private final Class resultClass; + @Nullable + private Boolean allowDiskUse; + @Nullable + private long maxTimeMS; + @Nullable + private Collation collation; + @Nullable + private BsonValue comment; + @Nullable + private String indexName; + private final CodecRegistry codecRegistry; + + ListSearchIndexesIterableImpl(final MongoNamespace namespace, final OperationExecutor executor, + final Class resultClass, final CodecRegistry codecRegistry, + final ReadPreference readPreference, final boolean retryReads, final TimeoutSettings timeoutSettings) { + super(null, executor, ReadConcern.DEFAULT, readPreference, retryReads, timeoutSettings); + this.resultClass = resultClass; + this.operations = new Operations<>(namespace, BsonDocument.class, readPreference, codecRegistry, retryReads, timeoutSettings); + this.codecRegistry = codecRegistry; + } + + @Override + public ReadOperationCursor asReadOperation() { + return asAggregateOperation(); + } + + @Override + public ListSearchIndexesIterable allowDiskUse(@Nullable final Boolean allowDiskUse) { + this.allowDiskUse = allowDiskUse; + return this; + } + + @Override + public ListSearchIndexesIterable batchSize(final int batchSize) { + super.batchSize(batchSize); + return this; + } + + @Override + public ListSearchIndexesIterable timeoutMode(final TimeoutMode timeoutMode) { + super.timeoutMode(timeoutMode); + return this; + } + + @Override + public ListSearchIndexesIterable maxTime(final long maxTime, final TimeUnit timeUnit) { + notNull("timeUnit", timeUnit); + this.maxTimeMS = TimeUnit.MILLISECONDS.convert(maxTime, timeUnit); + return this; + } + + @Override + public ListSearchIndexesIterable collation(@Nullable final Collation collation) { + this.collation = collation; + return this; + } + + @Override + public ListSearchIndexesIterable comment(@Nullable final String comment) { + this.comment = comment == null ? null : new BsonString(comment); + return this; + } + + @Override + public ListSearchIndexesIterable comment(@Nullable final BsonValue comment) { + this.comment = comment; + return this; + } + + @Override + public ListSearchIndexesIterable name(final String indexName) { + this.indexName = notNull("indexName", indexName); + return this; + } + + @Override + public Document explain() { + return executeExplain(Document.class, null); + } + + @Override + public Document explain(final ExplainVerbosity verbosity) { + notNull("verbosity", verbosity); + return executeExplain(Document.class, verbosity); + } + + @Override + public E explain(final Class explainResultClass) { + notNull("explainResultClass", explainResultClass); + return executeExplain(explainResultClass, null); + } + + @Override + public E explain(final Class explainResultClass, final ExplainVerbosity verbosity) { + notNull("explainResultClass", explainResultClass); + notNull("verbosity", verbosity); + return executeExplain(explainResultClass, verbosity); + } + + private E executeExplain(final Class explainResultClass, @Nullable final ExplainVerbosity verbosity) { + return getExecutor().execute(asAggregateOperation() + .asExplainableOperation(verbosity, codecRegistry.get(explainResultClass)), getReadPreference(), getReadConcern(), getClientSession()); + } + + private ReadOperationExplainable asAggregateOperation() { + return operations.listSearchIndexes(resultClass, indexName, getBatchSize(), collation, comment, + allowDiskUse); + } + + + protected OperationExecutor getExecutor() { + return getExecutor(operations.createTimeoutSettings(maxTimeMS)); + } + +} diff --git a/driver-sync/src/main/com/mongodb/client/internal/MapReduceIterableImpl.java b/driver-sync/src/main/com/mongodb/client/internal/MapReduceIterableImpl.java new file mode 100644 index 00000000000..be3e8ca05e9 --- /dev/null +++ b/driver-sync/src/main/com/mongodb/client/internal/MapReduceIterableImpl.java @@ -0,0 +1,253 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.internal; + +import com.mongodb.MongoNamespace; +import com.mongodb.ReadConcern; +import com.mongodb.ReadPreference; +import com.mongodb.WriteConcern; +import com.mongodb.client.ClientSession; +import com.mongodb.client.cursor.TimeoutMode; +import com.mongodb.client.model.Collation; +import com.mongodb.internal.TimeoutSettings; +import com.mongodb.internal.async.AsyncBatchCursor; +import com.mongodb.internal.async.SingleResultCallback; +import com.mongodb.internal.binding.AsyncReadBinding; +import com.mongodb.internal.binding.ReadBinding; +import com.mongodb.internal.client.model.FindOptions; +import com.mongodb.internal.operation.BatchCursor; +import com.mongodb.internal.operation.MapReduceStatistics; +import com.mongodb.internal.operation.Operations; +import com.mongodb.internal.operation.ReadOperationCursor; +import com.mongodb.internal.operation.ReadOperationMapReduceCursor; +import com.mongodb.internal.operation.WriteOperation; +import com.mongodb.lang.Nullable; +import org.bson.BsonDocument; +import org.bson.codecs.configuration.CodecRegistry; +import org.bson.conversions.Bson; + +import java.util.concurrent.TimeUnit; + +import static com.mongodb.ReadPreference.primary; +import static com.mongodb.assertions.Assertions.notNull; + +@SuppressWarnings("deprecation") +class MapReduceIterableImpl extends MongoIterableImpl implements com.mongodb.client.MapReduceIterable { + private final Operations operations; + private final MongoNamespace namespace; + private final Class resultClass; + private final String mapFunction; + private final String reduceFunction; + + private boolean inline = true; + private String collectionName; + private String finalizeFunction; + private Bson scope; + private Bson filter; + private Bson sort; + private int limit; + private boolean jsMode; + private boolean verbose = true; + private long maxTimeMS; + private com.mongodb.client.model.MapReduceAction action = com.mongodb.client.model.MapReduceAction.REPLACE; + private String databaseName; + private Boolean bypassDocumentValidation; + private Collation collation; + + MapReduceIterableImpl(@Nullable final ClientSession clientSession, final MongoNamespace namespace, final Class documentClass, + final Class resultClass, final CodecRegistry codecRegistry, final ReadPreference readPreference, + final ReadConcern readConcern, final WriteConcern writeConcern, final OperationExecutor executor, + final String mapFunction, final String reduceFunction, final TimeoutSettings timeoutSettings) { + super(clientSession, executor, readConcern, readPreference, false, timeoutSettings); + this.operations = new Operations<>(namespace, documentClass, readPreference, codecRegistry, readConcern, writeConcern, + false, false, timeoutSettings); + this.namespace = notNull("namespace", namespace); + this.resultClass = notNull("resultClass", resultClass); + this.mapFunction = notNull("mapFunction", mapFunction); + this.reduceFunction = notNull("reduceFunction", reduceFunction); + } + + @Override + public void toCollection() { + if (inline) { + throw new IllegalStateException("The options must specify a non-inline result"); + } + + getExecutor().execute(createMapReduceToCollectionOperation(), getReadConcern(), getClientSession()); + } + + @Override + public com.mongodb.client.MapReduceIterable collectionName(final String collectionName) { + this.collectionName = notNull("collectionName", collectionName); + this.inline = false; + return this; + } + + @Override + public com.mongodb.client.MapReduceIterable finalizeFunction(@Nullable final String finalizeFunction) { + this.finalizeFunction = finalizeFunction; + return this; + } + + @Override + public com.mongodb.client.MapReduceIterable scope(@Nullable final Bson scope) { + this.scope = scope; + return this; + } + + @Override + public com.mongodb.client.MapReduceIterable sort(@Nullable final Bson sort) { + this.sort = sort; + return this; + } + + @Override + public com.mongodb.client.MapReduceIterable filter(@Nullable final Bson filter) { + this.filter = filter; + return this; + } + + @Override + public com.mongodb.client.MapReduceIterable limit(final int limit) { + this.limit = limit; + return this; + } + + @Override + public com.mongodb.client.MapReduceIterable jsMode(final boolean jsMode) { + this.jsMode = jsMode; + return this; + } + + @Override + public com.mongodb.client.MapReduceIterable verbose(final boolean verbose) { + this.verbose = verbose; + return this; + } + + @Override + public com.mongodb.client.MapReduceIterable maxTime(final long maxTime, final TimeUnit timeUnit) { + notNull("timeUnit", timeUnit); + this.maxTimeMS = TimeUnit.MILLISECONDS.convert(maxTime, timeUnit); + return this; + } + + @Override + public com.mongodb.client.MapReduceIterable action(final com.mongodb.client.model.MapReduceAction action) { + this.action = action; + return this; + } + + @Override + public com.mongodb.client.MapReduceIterable databaseName(@Nullable final String databaseName) { + this.databaseName = databaseName; + return this; + } + + @Override + public com.mongodb.client.MapReduceIterable batchSize(final int batchSize) { + super.batchSize(batchSize); + return this; + } + + @Override + public com.mongodb.client.MapReduceIterable timeoutMode(final TimeoutMode timeoutMode) { + super.timeoutMode(timeoutMode); + return this; + } + + @Override + public com.mongodb.client.MapReduceIterable bypassDocumentValidation(@Nullable final Boolean bypassDocumentValidation) { + this.bypassDocumentValidation = bypassDocumentValidation; + return this; + } + + @Override + public com.mongodb.client.MapReduceIterable collation(@Nullable final Collation collation) { + this.collation = collation; + return this; + } + + @Override + ReadPreference getReadPreference() { + if (inline) { + return super.getReadPreference(); + } else { + return primary(); + } + } + + + protected OperationExecutor getExecutor() { + return getExecutor(operations.createTimeoutSettings(maxTimeMS)); + } + + @Override + public ReadOperationCursor asReadOperation() { + if (inline) { + ReadOperationMapReduceCursor operation = operations.mapReduce(mapFunction, reduceFunction, finalizeFunction, + resultClass, filter, limit, jsMode, scope, sort, verbose, collation); + return new WrappedMapReduceReadOperation<>(operation); + } else { + getExecutor().execute(createMapReduceToCollectionOperation(), getReadConcern(), getClientSession()); + + String dbName = databaseName != null ? databaseName : namespace.getDatabaseName(); + + FindOptions findOptions = new FindOptions().collation(collation); + Integer batchSize = getBatchSize(); + if (batchSize != null) { + findOptions.batchSize(batchSize); + } + return operations.find(new MongoNamespace(dbName, collectionName), new BsonDocument(), resultClass, findOptions); + } + + } + + private WriteOperation createMapReduceToCollectionOperation() { + return operations.mapReduceToCollection(databaseName, collectionName, mapFunction, reduceFunction, finalizeFunction, filter, + limit, jsMode, scope, sort, verbose, action, bypassDocumentValidation, collation + ); + } + + // this could be inlined, but giving it a name so that it's unit-testable + static class WrappedMapReduceReadOperation implements ReadOperationCursor { + private final ReadOperationMapReduceCursor operation; + + ReadOperationMapReduceCursor getOperation() { + return operation; + } + + WrappedMapReduceReadOperation(final ReadOperationMapReduceCursor operation) { + this.operation = operation; + } + + @Override + public String getCommandName() { + return operation.getCommandName(); + } + + @Override + public BatchCursor execute(final ReadBinding binding) { + return operation.execute(binding); + } + + @Override + public void executeAsync(final AsyncReadBinding binding, final SingleResultCallback> callback) { + throw new UnsupportedOperationException("This operation is sync only"); + } + } +} diff --git a/driver-sync/src/main/com/mongodb/client/internal/MappingIterable.java b/driver-sync/src/main/com/mongodb/client/internal/MappingIterable.java new file mode 100644 index 00000000000..6386c352443 --- /dev/null +++ b/driver-sync/src/main/com/mongodb/client/internal/MappingIterable.java @@ -0,0 +1,85 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.internal; + +import com.mongodb.Function; +import com.mongodb.client.MongoCursor; +import com.mongodb.client.MongoIterable; +import com.mongodb.lang.Nullable; + +import java.util.Collection; +import java.util.function.Consumer; + +/** + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public class MappingIterable implements MongoIterable { + + private final MongoIterable iterable; + private final Function mapper; + + public MappingIterable(final MongoIterable iterable, final Function mapper) { + this.iterable = iterable; + this.mapper = mapper; + } + + @Override + public MongoCursor iterator() { + return new MongoMappingCursor<>(iterable.iterator(), mapper); + } + + @Override + public MongoCursor cursor() { + return iterator(); + } + + @Nullable + @Override + public V first() { + U first = iterable.first(); + if (first == null) { + return null; + } + return mapper.apply(first); + } + + @Override + public void forEach(final Consumer block) { + iterable.forEach(document -> block.accept(mapper.apply(document))); + } + + @Override + public
> A into(final A target) { + forEach(v -> target.add(v)); + return target; + } + + @Override + public MappingIterable batchSize(final int batchSize) { + iterable.batchSize(batchSize); + return this; + } + + @Override + public MongoIterable map(final Function newMap) { + return new MappingIterable<>(this, newMap); + } + + MongoIterable getMapped() { + return iterable; + } +} diff --git a/driver-sync/src/main/com/mongodb/client/internal/MongoBatchCursorAdapter.java b/driver-sync/src/main/com/mongodb/client/internal/MongoBatchCursorAdapter.java new file mode 100644 index 00000000000..3de806671be --- /dev/null +++ b/driver-sync/src/main/com/mongodb/client/internal/MongoBatchCursorAdapter.java @@ -0,0 +1,109 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.internal; + +import com.mongodb.ServerAddress; +import com.mongodb.ServerCursor; +import com.mongodb.client.MongoCursor; +import com.mongodb.internal.operation.BatchCursor; +import com.mongodb.lang.Nullable; + +import java.util.List; +import java.util.NoSuchElementException; + +/** + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public class MongoBatchCursorAdapter implements MongoCursor { + private final BatchCursor batchCursor; + private List curBatch; + private int curPos; + + public MongoBatchCursorAdapter(final BatchCursor batchCursor) { + this.batchCursor = batchCursor; + } + + @Override + public void remove() { + throw new UnsupportedOperationException("Cursors do not support removal"); + } + + @Override + public void close() { + batchCursor.close(); + } + + @Override + public boolean hasNext() { + return curBatch != null || batchCursor.hasNext(); + } + + @Override + public T next() { + if (!hasNext()) { + throw new NoSuchElementException(); + } + + if (curBatch == null) { + curBatch = batchCursor.next(); + } + + return getNextInBatch(); + } + + @Override + public int available() { + int available = batchCursor.available(); + if (curBatch != null) { + available += (curBatch.size() - curPos); + } + return available; + } + + @Nullable + @Override + public T tryNext() { + if (curBatch == null) { + curBatch = batchCursor.tryNext(); + } + + return curBatch == null ? null : getNextInBatch(); + } + + @Nullable + @Override + public ServerCursor getServerCursor() { + return batchCursor.getServerCursor(); + } + + @Override + public ServerAddress getServerAddress() { + return batchCursor.getServerAddress(); + } + + private T getNextInBatch() { + T nextInBatch = curBatch.get(curPos); + curBatch.set(curPos, null); + if (curPos < curBatch.size() - 1) { + curPos++; + } else { + curBatch = null; + curPos = 0; + } + return nextInBatch; + } +} diff --git a/driver-sync/src/main/com/mongodb/client/internal/MongoChangeStreamCursorImpl.java b/driver-sync/src/main/com/mongodb/client/internal/MongoChangeStreamCursorImpl.java new file mode 100644 index 00000000000..fa6ea549643 --- /dev/null +++ b/driver-sync/src/main/com/mongodb/client/internal/MongoChangeStreamCursorImpl.java @@ -0,0 +1,134 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.internal; + +import com.mongodb.ServerAddress; +import com.mongodb.ServerCursor; +import com.mongodb.client.MongoChangeStreamCursor; +import com.mongodb.internal.operation.AggregateResponseBatchCursor; +import com.mongodb.internal.operation.BatchCursor; +import com.mongodb.lang.Nullable; +import org.bson.BsonDocument; +import org.bson.RawBsonDocument; +import org.bson.codecs.Decoder; + +import java.util.List; +import java.util.NoSuchElementException; + +/** + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public class MongoChangeStreamCursorImpl implements MongoChangeStreamCursor { + private final AggregateResponseBatchCursor batchCursor; + private final Decoder decoder; + private List curBatch; + private int curPos; + private BsonDocument resumeToken; + + public MongoChangeStreamCursorImpl(final BatchCursor batchCursor, final Decoder decoder, + @Nullable final BsonDocument initialResumeToken) { + this.batchCursor = (AggregateResponseBatchCursor) batchCursor; + this.decoder = decoder; + this.resumeToken = initialResumeToken; + } + + @Override + public void remove() { + throw new UnsupportedOperationException("Cursors do not support removal"); + } + + @Override + public void close() { + batchCursor.close(); + } + + @Override + public boolean hasNext() { + return curBatch != null || batchCursor.hasNext(); + } + + @Override + public T next() { + if (!hasNext()) { + throw new NoSuchElementException(); + } + + if (curBatch == null) { + curBatch = batchCursor.next(); + } + + return getNextInBatch(); + } + + @Override + public int available() { + int available = batchCursor.available(); + if (curBatch != null) { + available += (curBatch.size() - curPos); + } + return available; + } + + @Nullable + @Override + public T tryNext() { + if (curBatch == null) { + curBatch = batchCursor.tryNext(); + } + + if (curBatch == null) { + if (batchCursor.getPostBatchResumeToken() != null) { + resumeToken = batchCursor.getPostBatchResumeToken(); + } + } + + return curBatch == null ? null : getNextInBatch(); + } + + @Nullable + @Override + public ServerCursor getServerCursor() { + return batchCursor.getServerCursor(); + } + + @Override + public ServerAddress getServerAddress() { + return batchCursor.getServerAddress(); + } + + private T getNextInBatch() { + RawBsonDocument nextInBatch = curBatch.get(curPos); + curBatch.set(curPos, null); + resumeToken = nextInBatch.getDocument("_id"); + if (curPos < curBatch.size() - 1) { + curPos++; + } else { + curBatch = null; + curPos = 0; + if (batchCursor.getPostBatchResumeToken() != null) { + resumeToken = batchCursor.getPostBatchResumeToken(); + } + } + + return nextInBatch.decode(decoder); + } + + @Nullable + public BsonDocument getResumeToken() { + return resumeToken; + } +} diff --git a/driver-sync/src/main/com/mongodb/client/internal/MongoClientImpl.java b/driver-sync/src/main/com/mongodb/client/internal/MongoClientImpl.java new file mode 100644 index 00000000000..6870277b1c6 --- /dev/null +++ b/driver-sync/src/main/com/mongodb/client/internal/MongoClientImpl.java @@ -0,0 +1,355 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.internal; + +import com.mongodb.AutoEncryptionSettings; +import com.mongodb.ClientBulkWriteException; +import com.mongodb.ClientSessionOptions; +import com.mongodb.MongoClientSettings; +import com.mongodb.MongoDriverInformation; +import com.mongodb.ReadConcern; +import com.mongodb.ReadPreference; +import com.mongodb.WriteConcern; +import com.mongodb.client.ChangeStreamIterable; +import com.mongodb.client.ClientSession; +import com.mongodb.client.ListDatabasesIterable; +import com.mongodb.client.MongoClient; +import com.mongodb.client.MongoCluster; +import com.mongodb.client.MongoDatabase; +import com.mongodb.client.MongoIterable; +import com.mongodb.client.SynchronousContextProvider; +import com.mongodb.client.model.bulk.ClientBulkWriteOptions; +import com.mongodb.client.model.bulk.ClientBulkWriteResult; +import com.mongodb.client.model.bulk.ClientNamespacedWriteModel; +import com.mongodb.connection.ClusterDescription; +import com.mongodb.connection.SocketSettings; +import com.mongodb.internal.TimeoutSettings; +import com.mongodb.internal.VisibleForTesting; +import com.mongodb.internal.connection.ClientMetadata; +import com.mongodb.internal.connection.Cluster; +import com.mongodb.internal.connection.DefaultClusterFactory; +import com.mongodb.internal.connection.InternalConnectionPoolSettings; +import com.mongodb.internal.connection.StreamFactory; +import com.mongodb.internal.connection.StreamFactoryFactory; +import com.mongodb.internal.diagnostics.logging.Logger; +import com.mongodb.internal.diagnostics.logging.Loggers; +import com.mongodb.internal.session.ServerSessionPool; +import com.mongodb.lang.Nullable; +import org.bson.BsonDocument; +import org.bson.Document; +import org.bson.codecs.configuration.CodecRegistry; +import org.bson.conversions.Bson; + +import java.util.List; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; + +import static com.mongodb.assertions.Assertions.notNull; +import static com.mongodb.client.internal.Crypts.createCrypt; +import static com.mongodb.internal.event.EventListenerHelper.getCommandListener; +import static java.lang.String.format; +import static org.bson.codecs.configuration.CodecRegistries.withUuidRepresentation; + +/** + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public final class MongoClientImpl implements MongoClient { + private static final Logger LOGGER = Loggers.getLogger("client"); + + private final MongoClientSettings settings; + private final MongoDriverInformation mongoDriverInformation; + private final MongoClusterImpl delegate; + private final AtomicBoolean closed; + private final AutoCloseable externalResourceCloser; + + public MongoClientImpl(final Cluster cluster, + final MongoClientSettings settings, + final MongoDriverInformation mongoDriverInformation, + @Nullable final AutoCloseable externalResourceCloser) { + this(cluster, mongoDriverInformation, settings, externalResourceCloser, null); + } + + @VisibleForTesting(otherwise = VisibleForTesting.AccessModifier.PRIVATE) + public MongoClientImpl(final Cluster cluster, + final MongoDriverInformation mongoDriverInformation, + final MongoClientSettings settings, + @Nullable final AutoCloseable externalResourceCloser, + @Nullable final OperationExecutor operationExecutor) { + + this.externalResourceCloser = externalResourceCloser; + this.settings = notNull("settings", settings); + this.mongoDriverInformation = mongoDriverInformation; + AutoEncryptionSettings autoEncryptionSettings = settings.getAutoEncryptionSettings(); + if (settings.getContextProvider() != null && !(settings.getContextProvider() instanceof SynchronousContextProvider)) { + throw new IllegalArgumentException("The contextProvider must be an instance of " + + SynchronousContextProvider.class.getName() + " when using the synchronous driver"); + } + + this.delegate = new MongoClusterImpl(autoEncryptionSettings, cluster, + withUuidRepresentation(settings.getCodecRegistry(), settings.getUuidRepresentation()), + (SynchronousContextProvider) settings.getContextProvider(), + autoEncryptionSettings == null ? null : createCrypt(settings, autoEncryptionSettings), this, + operationExecutor, settings.getReadConcern(), settings.getReadPreference(), settings.getRetryReads(), + settings.getRetryWrites(), settings.getServerApi(), + new ServerSessionPool(cluster, TimeoutSettings.create(settings), settings.getServerApi()), + TimeoutSettings.create(settings), settings.getUuidRepresentation(), settings.getWriteConcern()); + this.closed = new AtomicBoolean(); + + BsonDocument clientMetadataDocument = delegate.getCluster().getClientMetadata().getBsonDocument(); + LOGGER.info(format("MongoClient with metadata %s created with settings %s", clientMetadataDocument.toJson(), settings)); + } + + @Override + public void close() { + if (!closed.getAndSet(true)) { + Crypt crypt = delegate.getCrypt(); + if (crypt != null) { + crypt.close(); + } + delegate.getServerSessionPool().close(); + delegate.getCluster().close(); + if (externalResourceCloser != null) { + try { + externalResourceCloser.close(); + } catch (Exception e) { + LOGGER.warn("Exception closing resource", e); + } + } + } + } + + @Override + public ClusterDescription getClusterDescription() { + return delegate.getCluster().getCurrentDescription(); + } + + @Override + public void appendMetadata(final MongoDriverInformation mongoDriverInformation) { + ClientMetadata clientMetadata = getCluster().getClientMetadata(); + clientMetadata.append(mongoDriverInformation); + LOGGER.info(format("MongoClient metadata has been updated to %s", clientMetadata.getBsonDocument())); + } + + @Override + public CodecRegistry getCodecRegistry() { + return delegate.getCodecRegistry(); + } + + @Override + public ReadPreference getReadPreference() { + return delegate.getReadPreference(); + } + + @Override + public WriteConcern getWriteConcern() { + return delegate.getWriteConcern(); + } + + @Override + public ReadConcern getReadConcern() { + return delegate.getReadConcern(); + } + + @Override + public Long getTimeout(final TimeUnit timeUnit) { + return delegate.getTimeout(timeUnit); + } + + @Override + public MongoCluster withCodecRegistry(final CodecRegistry codecRegistry) { + return delegate.withCodecRegistry(codecRegistry); + } + + @Override + public MongoCluster withReadPreference(final ReadPreference readPreference) { + return delegate.withReadPreference(readPreference); + } + + @Override + public MongoCluster withWriteConcern(final WriteConcern writeConcern) { + return delegate.withWriteConcern(writeConcern); + } + + @Override + public MongoCluster withReadConcern(final ReadConcern readConcern) { + return delegate.withReadConcern(readConcern); + } + + @Override + public MongoCluster withTimeout(final long timeout, final TimeUnit timeUnit) { + return delegate.withTimeout(timeout, timeUnit); + } + + @Override + public MongoDatabase getDatabase(final String databaseName) { + return delegate.getDatabase(databaseName); + } + + @Override + public ClientSession startSession() { + return delegate.startSession(); + } + + @Override + public ClientSession startSession(final ClientSessionOptions options) { + return delegate.startSession(options); + } + + @Override + public MongoIterable listDatabaseNames() { + return delegate.listDatabaseNames(); + } + + @Override + public MongoIterable listDatabaseNames(final ClientSession clientSession) { + return delegate.listDatabaseNames(clientSession); + } + + @Override + public ListDatabasesIterable listDatabases() { + return delegate.listDatabases(); + } + + @Override + public ListDatabasesIterable listDatabases(final ClientSession clientSession) { + return delegate.listDatabases(clientSession); + } + + @Override + public ListDatabasesIterable listDatabases(final Class resultClass) { + return delegate.listDatabases(resultClass); + } + + @Override + public ListDatabasesIterable listDatabases(final ClientSession clientSession, final Class resultClass) { + return delegate.listDatabases(clientSession, resultClass); + } + + @Override + public ChangeStreamIterable watch() { + return delegate.watch(); + } + + @Override + public ChangeStreamIterable watch(final Class resultClass) { + return delegate.watch(resultClass); + } + + @Override + public ChangeStreamIterable watch(final List pipeline) { + return delegate.watch(pipeline); + } + + @Override + public ChangeStreamIterable watch(final List pipeline, final Class resultClass) { + return delegate.watch(pipeline, resultClass); + } + + @Override + public ChangeStreamIterable watch(final ClientSession clientSession) { + return delegate.watch(clientSession); + } + + @Override + public ChangeStreamIterable watch(final ClientSession clientSession, final Class resultClass) { + return delegate.watch(clientSession, resultClass); + } + + @Override + public ChangeStreamIterable watch(final ClientSession clientSession, final List pipeline) { + return delegate.watch(clientSession, pipeline); + } + + @Override + public ChangeStreamIterable watch( + final ClientSession clientSession, final List pipeline, final Class resultClass) { + return delegate.watch(clientSession, pipeline, resultClass); + } + + @Override + public ClientBulkWriteResult bulkWrite( + final List clientWriteModels) throws ClientBulkWriteException { + return delegate.bulkWrite(clientWriteModels); + } + + @Override + public ClientBulkWriteResult bulkWrite( + final List clientWriteModels, + final ClientBulkWriteOptions options) throws ClientBulkWriteException { + return delegate.bulkWrite(clientWriteModels, options); + } + + @Override + public ClientBulkWriteResult bulkWrite( + final ClientSession clientSession, + final List clientWriteModels) throws ClientBulkWriteException { + return delegate.bulkWrite(clientSession, clientWriteModels); + } + + @Override + public ClientBulkWriteResult bulkWrite( + final ClientSession clientSession, + final List clientWriteModels, + final ClientBulkWriteOptions options) throws ClientBulkWriteException { + return delegate.bulkWrite(clientSession, clientWriteModels, options); + } + + private static Cluster createCluster(final MongoClientSettings settings, + @Nullable final MongoDriverInformation mongoDriverInformation, + final StreamFactory streamFactory, final StreamFactory heartbeatStreamFactory) { + notNull("settings", settings); + return new DefaultClusterFactory().createCluster(settings.getClusterSettings(), settings.getServerSettings(), + settings.getConnectionPoolSettings(), InternalConnectionPoolSettings.builder().build(), + TimeoutSettings.create(settings), streamFactory, + TimeoutSettings.createHeartbeatSettings(settings), heartbeatStreamFactory, + settings.getCredential(), settings.getLoggerSettings(), getCommandListener(settings.getCommandListeners()), + settings.getApplicationName(), mongoDriverInformation, settings.getCompressorList(), settings.getServerApi(), + settings.getDnsClient()); + } + + private static StreamFactory getStreamFactory( + final StreamFactoryFactory streamFactoryFactory, + final MongoClientSettings settings, + final boolean isHeartbeat) { + SocketSettings socketSettings = isHeartbeat ? settings.getHeartbeatSocketSettings() : settings.getSocketSettings(); + return streamFactoryFactory.create(socketSettings, settings.getSslSettings()); + } + + public Cluster getCluster() { + return delegate.getCluster(); + } + + public ServerSessionPool getServerSessionPool() { + return delegate.getServerSessionPool(); + } + + public OperationExecutor getOperationExecutor() { + return delegate.getOperationExecutor(); + } + + public TimeoutSettings getTimeoutSettings() { + return delegate.getTimeoutSettings(); + } + + public MongoClientSettings getSettings() { + return settings; + } + + public MongoDriverInformation getMongoDriverInformation() { + return mongoDriverInformation; + } +} diff --git a/driver-sync/src/main/com/mongodb/client/internal/MongoClusterImpl.java b/driver-sync/src/main/com/mongodb/client/internal/MongoClusterImpl.java new file mode 100644 index 00000000000..058122e9c26 --- /dev/null +++ b/driver-sync/src/main/com/mongodb/client/internal/MongoClusterImpl.java @@ -0,0 +1,560 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.internal; + +import com.mongodb.AutoEncryptionSettings; +import com.mongodb.ClientBulkWriteException; +import com.mongodb.ClientSessionOptions; +import com.mongodb.MongoClientException; +import com.mongodb.MongoException; +import com.mongodb.MongoInternalException; +import com.mongodb.MongoQueryException; +import com.mongodb.MongoSocketException; +import com.mongodb.MongoTimeoutException; +import com.mongodb.ReadConcern; +import com.mongodb.ReadPreference; +import com.mongodb.RequestContext; +import com.mongodb.ServerApi; +import com.mongodb.TransactionOptions; +import com.mongodb.WriteConcern; +import com.mongodb.client.ChangeStreamIterable; +import com.mongodb.client.ClientSession; +import com.mongodb.client.ListDatabasesIterable; +import com.mongodb.client.MongoCluster; +import com.mongodb.client.MongoDatabase; +import com.mongodb.client.MongoIterable; +import com.mongodb.client.SynchronousContextProvider; +import com.mongodb.client.model.bulk.ClientBulkWriteOptions; +import com.mongodb.client.model.bulk.ClientBulkWriteResult; +import com.mongodb.client.model.bulk.ClientNamespacedWriteModel; +import com.mongodb.internal.IgnorableRequestContext; +import com.mongodb.internal.TimeoutSettings; +import com.mongodb.internal.binding.ClusterAwareReadWriteBinding; +import com.mongodb.internal.binding.ClusterBinding; +import com.mongodb.internal.binding.ReadBinding; +import com.mongodb.internal.binding.ReadWriteBinding; +import com.mongodb.internal.binding.WriteBinding; +import com.mongodb.internal.client.model.changestream.ChangeStreamLevel; +import com.mongodb.internal.connection.Cluster; +import com.mongodb.internal.connection.OperationContext; +import com.mongodb.internal.connection.ReadConcernAwareNoOpSessionContext; +import com.mongodb.internal.operation.OperationHelper; +import com.mongodb.internal.operation.Operations; +import com.mongodb.internal.operation.ReadOperation; +import com.mongodb.internal.operation.WriteOperation; +import com.mongodb.internal.session.ServerSessionPool; +import com.mongodb.lang.Nullable; +import org.bson.BsonDocument; +import org.bson.Document; +import org.bson.UuidRepresentation; +import org.bson.codecs.configuration.CodecRegistry; +import org.bson.conversions.Bson; + +import java.util.Collections; +import java.util.List; +import java.util.Objects; +import java.util.concurrent.TimeUnit; + +import static com.mongodb.MongoException.TRANSIENT_TRANSACTION_ERROR_LABEL; +import static com.mongodb.MongoException.UNKNOWN_TRANSACTION_COMMIT_RESULT_LABEL; +import static com.mongodb.ReadPreference.primary; +import static com.mongodb.assertions.Assertions.isTrue; +import static com.mongodb.assertions.Assertions.isTrueArgument; +import static com.mongodb.assertions.Assertions.notNull; +import static com.mongodb.internal.TimeoutContext.createTimeoutContext; + +final class MongoClusterImpl implements MongoCluster { + @Nullable + private final AutoEncryptionSettings autoEncryptionSettings; + private final Cluster cluster; + private final CodecRegistry codecRegistry; + @Nullable + private final SynchronousContextProvider contextProvider; + @Nullable + private final Crypt crypt; + private final Object originator; + private final OperationExecutor operationExecutor; + private final ReadConcern readConcern; + private final ReadPreference readPreference; + private final boolean retryReads; + private final boolean retryWrites; + @Nullable + private final ServerApi serverApi; + private final ServerSessionPool serverSessionPool; + private final TimeoutSettings timeoutSettings; + private final UuidRepresentation uuidRepresentation; + private final WriteConcern writeConcern; + private final Operations operations; + + MongoClusterImpl( + @Nullable final AutoEncryptionSettings autoEncryptionSettings, final Cluster cluster, final CodecRegistry codecRegistry, + @Nullable final SynchronousContextProvider contextProvider, @Nullable final Crypt crypt, final Object originator, + @Nullable final OperationExecutor operationExecutor, final ReadConcern readConcern, final ReadPreference readPreference, + final boolean retryReads, final boolean retryWrites, @Nullable final ServerApi serverApi, + final ServerSessionPool serverSessionPool, final TimeoutSettings timeoutSettings, final UuidRepresentation uuidRepresentation, + final WriteConcern writeConcern) { + this.autoEncryptionSettings = autoEncryptionSettings; + this.cluster = cluster; + this.codecRegistry = codecRegistry; + this.contextProvider = contextProvider; + this.crypt = crypt; + this.originator = originator; + this.operationExecutor = operationExecutor != null ? operationExecutor : new OperationExecutorImpl(timeoutSettings); + this.readConcern = readConcern; + this.readPreference = readPreference; + this.retryReads = retryReads; + this.retryWrites = retryWrites; + this.serverApi = serverApi; + this.serverSessionPool = serverSessionPool; + this.timeoutSettings = timeoutSettings; + this.uuidRepresentation = uuidRepresentation; + this.writeConcern = writeConcern; + operations = new Operations<>( + null, + BsonDocument.class, + readPreference, + codecRegistry, + readConcern, + writeConcern, + retryWrites, + retryReads, + timeoutSettings); + } + + @Override + public CodecRegistry getCodecRegistry() { + return codecRegistry; + } + + @Override + public ReadPreference getReadPreference() { + return readPreference; + } + + @Override + public WriteConcern getWriteConcern() { + return writeConcern; + } + + @Override + public ReadConcern getReadConcern() { + return readConcern; + } + + @Override + @Nullable + public Long getTimeout(final TimeUnit timeUnit) { + Long timeoutMS = timeoutSettings.getTimeoutMS(); + return timeoutMS == null ? null : timeUnit.convert(timeoutMS, TimeUnit.MILLISECONDS); + } + + @Override + public MongoCluster withCodecRegistry(final CodecRegistry codecRegistry) { + return new MongoClusterImpl(autoEncryptionSettings, cluster, codecRegistry, contextProvider, crypt, originator, + operationExecutor, readConcern, readPreference, retryReads, retryWrites, serverApi, serverSessionPool, timeoutSettings, + uuidRepresentation, writeConcern); + } + + @Override + public MongoCluster withReadPreference(final ReadPreference readPreference) { + return new MongoClusterImpl(autoEncryptionSettings, cluster, codecRegistry, contextProvider, crypt, originator, + operationExecutor, readConcern, readPreference, retryReads, retryWrites, serverApi, serverSessionPool, timeoutSettings, + uuidRepresentation, writeConcern); + } + + @Override + public MongoCluster withWriteConcern(final WriteConcern writeConcern) { + return new MongoClusterImpl(autoEncryptionSettings, cluster, codecRegistry, contextProvider, crypt, originator, + operationExecutor, readConcern, readPreference, retryReads, retryWrites, serverApi, serverSessionPool, timeoutSettings, + uuidRepresentation, writeConcern); + } + + @Override + public MongoCluster withReadConcern(final ReadConcern readConcern) { + return new MongoClusterImpl(autoEncryptionSettings, cluster, codecRegistry, contextProvider, crypt, originator, + operationExecutor, readConcern, readPreference, retryReads, retryWrites, serverApi, serverSessionPool, timeoutSettings, + uuidRepresentation, writeConcern); + } + + @Override + public MongoCluster withTimeout(final long timeout, final TimeUnit timeUnit) { + return new MongoClusterImpl(autoEncryptionSettings, cluster, codecRegistry, contextProvider, crypt, originator, + operationExecutor, readConcern, readPreference, retryReads, retryWrites, serverApi, serverSessionPool, + timeoutSettings.withTimeout(timeout, timeUnit), uuidRepresentation, writeConcern); + } + + @Override + public MongoDatabase getDatabase(final String databaseName) { + return new MongoDatabaseImpl(databaseName, codecRegistry, readPreference, writeConcern, retryWrites, retryReads, readConcern, + uuidRepresentation, autoEncryptionSettings, timeoutSettings, operationExecutor); + } + + public Cluster getCluster() { + return cluster; + } + + @Nullable + public Crypt getCrypt() { + return crypt; + } + + public OperationExecutor getOperationExecutor() { + return operationExecutor; + } + + public ServerSessionPool getServerSessionPool() { + return serverSessionPool; + } + + public TimeoutSettings getTimeoutSettings() { + return timeoutSettings; + } + + @Override + public ClientSession startSession() { + return startSession(ClientSessionOptions + .builder() + .defaultTransactionOptions(TransactionOptions.builder() + .readConcern(readConcern) + .writeConcern(writeConcern) + .build()) + .build()); + } + + @Override + public ClientSession startSession(final ClientSessionOptions options) { + notNull("options", options); + + ClientSessionOptions mergedOptions = ClientSessionOptions.builder(options) + .defaultTransactionOptions( + TransactionOptions.merge( + options.getDefaultTransactionOptions(), + TransactionOptions.builder() + .readConcern(readConcern) + .writeConcern(writeConcern) + .readPreference(readPreference) + .build())) + .build(); + return new ClientSessionImpl(serverSessionPool, originator, mergedOptions, operationExecutor); + } + + @Override + public MongoIterable listDatabaseNames() { + return createListDatabaseNamesIterable(null); + } + + @Override + public MongoIterable listDatabaseNames(final ClientSession clientSession) { + notNull("clientSession", clientSession); + return createListDatabaseNamesIterable(clientSession); + } + + @Override + public ListDatabasesIterable listDatabases() { + return listDatabases(Document.class); + } + + @Override + public ListDatabasesIterable listDatabases(final ClientSession clientSession) { + return listDatabases(clientSession, Document.class); + } + + @Override + public ListDatabasesIterable listDatabases(final Class clazz) { + return createListDatabasesIterable(null, clazz); + } + + @Override + public ListDatabasesIterable listDatabases(final ClientSession clientSession, final Class clazz) { + notNull("clientSession", clientSession); + return createListDatabasesIterable(clientSession, clazz); + } + + @Override + public ChangeStreamIterable watch() { + return watch(Collections.emptyList()); + } + + @Override + public ChangeStreamIterable watch(final Class clazz) { + return watch(Collections.emptyList(), clazz); + } + + @Override + public ChangeStreamIterable watch(final List pipeline) { + return watch(pipeline, Document.class); + } + + @Override + public ChangeStreamIterable watch(final List pipeline, final Class clazz) { + return createChangeStreamIterable(null, pipeline, clazz); + } + + @Override + public ChangeStreamIterable watch(final ClientSession clientSession) { + return watch(clientSession, Collections.emptyList()); + } + + @Override + public ChangeStreamIterable watch(final ClientSession clientSession, final Class clazz) { + return watch(clientSession, Collections.emptyList(), clazz); + } + + @Override + public ChangeStreamIterable watch(final ClientSession clientSession, final List pipeline) { + return watch(clientSession, pipeline, Document.class); + } + + @Override + public ChangeStreamIterable watch(final ClientSession clientSession, final List pipeline, + final Class clazz) { + notNull("clientSession", clientSession); + return createChangeStreamIterable(clientSession, pipeline, clazz); + } + + @Override + public ClientBulkWriteResult bulkWrite( + final List clientWriteModels) throws ClientBulkWriteException { + notNull("clientWriteModels", clientWriteModels); + isTrueArgument("`clientWriteModels` must not be empty", !clientWriteModels.isEmpty()); + return executeBulkWrite(null, clientWriteModels, null); + } + + @Override + public ClientBulkWriteResult bulkWrite( + final List clientWriteModels, + final ClientBulkWriteOptions options) throws ClientBulkWriteException { + notNull("clientWriteModels", clientWriteModels); + isTrueArgument("`clientWriteModels` must not be empty", !clientWriteModels.isEmpty()); + notNull("options", options); + return executeBulkWrite(null, clientWriteModels, options); + } + + @Override + public ClientBulkWriteResult bulkWrite( + final ClientSession clientSession, + final List clientWriteModels) throws ClientBulkWriteException { + notNull("clientSession", clientSession); + notNull("clientWriteModels", clientWriteModels); + isTrueArgument("`clientWriteModels` must not be empty", !clientWriteModels.isEmpty()); + return executeBulkWrite(clientSession, clientWriteModels, null); + } + + @Override + public ClientBulkWriteResult bulkWrite( + final ClientSession clientSession, + final List clientWriteModels, + final ClientBulkWriteOptions options) throws ClientBulkWriteException { + notNull("clientSession", clientSession); + notNull("clientWriteModels", clientWriteModels); + isTrueArgument("`clientWriteModels` must not be empty", !clientWriteModels.isEmpty()); + notNull("options", options); + return executeBulkWrite(clientSession, clientWriteModels, options); + } + + private ListDatabasesIterable createListDatabasesIterable(@Nullable final ClientSession clientSession, final Class clazz) { + return new ListDatabasesIterableImpl<>(clientSession, clazz, codecRegistry, ReadPreference.primary(), operationExecutor, retryReads, timeoutSettings); + } + + private MongoIterable createListDatabaseNamesIterable(@Nullable final ClientSession clientSession) { + return createListDatabasesIterable(clientSession, BsonDocument.class) + .nameOnly(true) + .map(result -> result.getString("name").getValue()); + } + + private ChangeStreamIterable createChangeStreamIterable(@Nullable final ClientSession clientSession, + final List pipeline, final Class resultClass) { + return new ChangeStreamIterableImpl<>(clientSession, "admin", codecRegistry, readPreference, + readConcern, operationExecutor, pipeline, resultClass, ChangeStreamLevel.CLIENT, + retryReads, timeoutSettings); + } + + private ClientBulkWriteResult executeBulkWrite( + @Nullable final ClientSession clientSession, + final List clientWriteModels, + @Nullable final ClientBulkWriteOptions options) { + isTrue("`autoEncryptionSettings` is null, as bulkWrite does not currently support automatic encryption", autoEncryptionSettings == null); + return operationExecutor.execute(operations.clientBulkWriteOperation(clientWriteModels, options), readConcern, clientSession); + } + + final class OperationExecutorImpl implements OperationExecutor { + private final TimeoutSettings executorTimeoutSettings; + + OperationExecutorImpl(final TimeoutSettings executorTimeoutSettings) { + this.executorTimeoutSettings = executorTimeoutSettings; + } + + @Override + public T execute(final ReadOperation operation, final ReadPreference readPreference, final ReadConcern readConcern) { + return execute(operation, readPreference, readConcern, null); + } + + @Override + public T execute(final WriteOperation operation, final ReadConcern readConcern) { + return execute(operation, readConcern, null); + } + + @Override + public T execute(final ReadOperation operation, final ReadPreference readPreference, final ReadConcern readConcern, + @Nullable final ClientSession session) { + if (session != null) { + session.notifyOperationInitiated(operation); + } + + ClientSession actualClientSession = getClientSession(session); + ReadBinding binding = getReadBinding(readPreference, readConcern, actualClientSession, session == null, + operation.getCommandName()); + + try { + if (actualClientSession.hasActiveTransaction() && !binding.getReadPreference().equals(primary())) { + throw new MongoClientException("Read preference in a transaction must be primary"); + } + return operation.execute(binding); + } catch (MongoException e) { + MongoException exceptionToHandle = OperationHelper.unwrap(e); + labelException(actualClientSession, exceptionToHandle); + clearTransactionContextOnTransientTransactionError(session, exceptionToHandle); + throw e; + } finally { + binding.release(); + } + } + + @Override + public T execute(final WriteOperation operation, final ReadConcern readConcern, + @Nullable final ClientSession session) { + if (session != null) { + session.notifyOperationInitiated(operation); + } + + ClientSession actualClientSession = getClientSession(session); + WriteBinding binding = getWriteBinding(readConcern, actualClientSession, session == null, operation.getCommandName()); + + try { + return operation.execute(binding); + } catch (MongoException e) { + MongoException exceptionToHandle = OperationHelper.unwrap(e); + labelException(actualClientSession, exceptionToHandle); + clearTransactionContextOnTransientTransactionError(session, exceptionToHandle); + throw e; + } finally { + binding.release(); + } + } + + @Override + public OperationExecutor withTimeoutSettings(final TimeoutSettings newTimeoutSettings) { + if (Objects.equals(executorTimeoutSettings, newTimeoutSettings)) { + return this; + } + return new OperationExecutorImpl(newTimeoutSettings); + } + + @Override + public TimeoutSettings getTimeoutSettings() { + return executorTimeoutSettings; + } + + WriteBinding getWriteBinding(final ReadConcern readConcern, final ClientSession session, final boolean ownsSession, + final String commandName) { + return getReadWriteBinding(primary(), readConcern, session, ownsSession, commandName); + } + + ReadBinding getReadBinding(final ReadPreference readPreference, final ReadConcern readConcern, final ClientSession session, + final boolean ownsSession, final String commandName) { + return getReadWriteBinding(readPreference, readConcern, session, ownsSession, commandName); + } + + ReadWriteBinding getReadWriteBinding(final ReadPreference readPreference, + final ReadConcern readConcern, final ClientSession session, final boolean ownsSession, + final String commandName) { + + ClusterAwareReadWriteBinding readWriteBinding = new ClusterBinding(cluster, + getReadPreferenceForBinding(readPreference, session), readConcern, + getOperationContext(session, readConcern, commandName)); + + if (crypt != null) { + readWriteBinding = new CryptBinding(readWriteBinding, crypt); + } + + return new ClientSessionBinding(session, ownsSession, readWriteBinding); + } + + private OperationContext getOperationContext(final ClientSession session, final ReadConcern readConcern, final String commandName) { + return new OperationContext( + getRequestContext(), + new ReadConcernAwareNoOpSessionContext(readConcern), + createTimeoutContext(session, executorTimeoutSettings), + serverApi, + commandName); + } + + private RequestContext getRequestContext() { + RequestContext context = null; + if (contextProvider != null) { + context = contextProvider.getContext(); + } + return context == null ? IgnorableRequestContext.INSTANCE : context; + } + + private void labelException(final ClientSession session, final MongoException e) { + if (session.hasActiveTransaction() && (e instanceof MongoSocketException || e instanceof MongoTimeoutException + || e instanceof MongoQueryException && e.getCode() == 91) + && !e.hasErrorLabel(UNKNOWN_TRANSACTION_COMMIT_RESULT_LABEL)) { + e.addLabel(TRANSIENT_TRANSACTION_ERROR_LABEL); + } + } + + private void clearTransactionContextOnTransientTransactionError(@Nullable final ClientSession session, final MongoException e) { + if (session != null && e.hasErrorLabel(TRANSIENT_TRANSACTION_ERROR_LABEL)) { + session.clearTransactionContext(); + } + } + + private ReadPreference getReadPreferenceForBinding(final ReadPreference readPreference, @Nullable final ClientSession session) { + if (session == null) { + return readPreference; + } + if (session.hasActiveTransaction()) { + ReadPreference readPreferenceForBinding = session.getTransactionOptions().getReadPreference(); + if (readPreferenceForBinding == null) { + throw new MongoInternalException("Invariant violated. Transaction options read preference can not be null"); + } + return readPreferenceForBinding; + } + return readPreference; + } + + ClientSession getClientSession(@Nullable final ClientSession clientSessionFromOperation) { + ClientSession session; + if (clientSessionFromOperation != null) { + isTrue("ClientSession from same MongoClient", clientSessionFromOperation.getOriginator() == originator); + session = clientSessionFromOperation; + } else { + session = startSession(ClientSessionOptions.builder(). + causallyConsistent(false) + .defaultTransactionOptions( + TransactionOptions.builder() + .readConcern(ReadConcern.DEFAULT) + .readPreference(ReadPreference.primary()) + .writeConcern(WriteConcern.ACKNOWLEDGED).build()) + .build()); + } + return session; + } + } +} diff --git a/driver-sync/src/main/com/mongodb/client/internal/MongoCollectionImpl.java b/driver-sync/src/main/com/mongodb/client/internal/MongoCollectionImpl.java new file mode 100755 index 00000000000..736e1541212 --- /dev/null +++ b/driver-sync/src/main/com/mongodb/client/internal/MongoCollectionImpl.java @@ -0,0 +1,1178 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.internal; + +import com.mongodb.AutoEncryptionSettings; +import com.mongodb.MongoBulkWriteException; +import com.mongodb.MongoInternalException; +import com.mongodb.MongoNamespace; +import com.mongodb.MongoWriteConcernException; +import com.mongodb.MongoWriteException; +import com.mongodb.ReadConcern; +import com.mongodb.ReadPreference; +import com.mongodb.WriteConcern; +import com.mongodb.WriteConcernResult; +import com.mongodb.WriteError; +import com.mongodb.bulk.BulkWriteResult; +import com.mongodb.client.AggregateIterable; +import com.mongodb.client.ChangeStreamIterable; +import com.mongodb.client.ClientSession; +import com.mongodb.client.DistinctIterable; +import com.mongodb.client.FindIterable; +import com.mongodb.client.ListIndexesIterable; +import com.mongodb.client.ListSearchIndexesIterable; +import com.mongodb.client.MongoCollection; +import com.mongodb.client.model.BulkWriteOptions; +import com.mongodb.client.model.CountOptions; +import com.mongodb.client.model.CreateIndexOptions; +import com.mongodb.client.model.DeleteOptions; +import com.mongodb.client.model.DropCollectionOptions; +import com.mongodb.client.model.DropIndexOptions; +import com.mongodb.client.model.EstimatedDocumentCountOptions; +import com.mongodb.client.model.FindOneAndDeleteOptions; +import com.mongodb.client.model.FindOneAndReplaceOptions; +import com.mongodb.client.model.FindOneAndUpdateOptions; +import com.mongodb.client.model.IndexModel; +import com.mongodb.client.model.IndexOptions; +import com.mongodb.client.model.InsertManyOptions; +import com.mongodb.client.model.InsertOneOptions; +import com.mongodb.client.model.RenameCollectionOptions; +import com.mongodb.client.model.ReplaceOptions; +import com.mongodb.client.model.SearchIndexModel; +import com.mongodb.client.model.UpdateOptions; +import com.mongodb.client.model.WriteModel; +import com.mongodb.client.result.DeleteResult; +import com.mongodb.client.result.InsertManyResult; +import com.mongodb.client.result.InsertOneResult; +import com.mongodb.client.result.UpdateResult; +import com.mongodb.internal.TimeoutSettings; +import com.mongodb.internal.bulk.WriteRequest; +import com.mongodb.internal.client.model.AggregationLevel; +import com.mongodb.internal.client.model.changestream.ChangeStreamLevel; +import com.mongodb.internal.operation.IndexHelper; +import com.mongodb.internal.operation.Operations; +import com.mongodb.internal.operation.WriteOperation; +import com.mongodb.lang.Nullable; +import org.bson.BsonDocument; +import org.bson.BsonValue; +import org.bson.Document; +import org.bson.UuidRepresentation; +import org.bson.codecs.configuration.CodecRegistry; +import org.bson.conversions.Bson; + +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.concurrent.TimeUnit; + +import static com.mongodb.assertions.Assertions.notNull; +import static com.mongodb.assertions.Assertions.notNullElements; +import static com.mongodb.internal.bulk.WriteRequest.Type.DELETE; +import static com.mongodb.internal.bulk.WriteRequest.Type.INSERT; +import static com.mongodb.internal.bulk.WriteRequest.Type.REPLACE; +import static com.mongodb.internal.bulk.WriteRequest.Type.UPDATE; +import static java.util.Collections.singletonList; +import static java.util.concurrent.TimeUnit.MILLISECONDS; +import static org.bson.codecs.configuration.CodecRegistries.withUuidRepresentation; + +class MongoCollectionImpl implements MongoCollection { + private final MongoNamespace namespace; + private final Class documentClass; + private final ReadPreference readPreference; + private final CodecRegistry codecRegistry; + private final WriteConcern writeConcern; + private final boolean retryWrites; + private final boolean retryReads; + private final ReadConcern readConcern; + private final Operations operations; + private final UuidRepresentation uuidRepresentation; + @Nullable + private final AutoEncryptionSettings autoEncryptionSettings; + + private final TimeoutSettings timeoutSettings; + private final OperationExecutor executor; + + MongoCollectionImpl(final MongoNamespace namespace, final Class documentClass, final CodecRegistry codecRegistry, + final ReadPreference readPreference, final WriteConcern writeConcern, final boolean retryWrites, + final boolean retryReads, final ReadConcern readConcern, final UuidRepresentation uuidRepresentation, + @Nullable final AutoEncryptionSettings autoEncryptionSettings, final TimeoutSettings timeoutSettings, + final OperationExecutor executor) { + this.namespace = notNull("namespace", namespace); + this.documentClass = notNull("documentClass", documentClass); + this.codecRegistry = notNull("codecRegistry", codecRegistry); + this.readPreference = notNull("readPreference", readPreference); + this.writeConcern = notNull("writeConcern", writeConcern); + this.retryWrites = retryWrites; + this.retryReads = retryReads; + this.readConcern = notNull("readConcern", readConcern); + this.executor = notNull("executor", executor); + this.uuidRepresentation = notNull("uuidRepresentation", uuidRepresentation); + this.autoEncryptionSettings = autoEncryptionSettings; + this.timeoutSettings = timeoutSettings; + this.operations = new Operations<>(namespace, documentClass, readPreference, codecRegistry, readConcern, writeConcern, + retryWrites, retryReads, timeoutSettings); + } + + @Override + public MongoNamespace getNamespace() { + return namespace; + } + + @Override + public Class getDocumentClass() { + return documentClass; + } + + @Override + public CodecRegistry getCodecRegistry() { + return codecRegistry; + } + + @Override + public ReadPreference getReadPreference() { + return readPreference; + } + + @Override + public WriteConcern getWriteConcern() { + return writeConcern; + } + + @Override + public ReadConcern getReadConcern() { + return readConcern; + } + + @Override + @Nullable + public Long getTimeout(final TimeUnit timeUnit) { + Long timeoutMS = timeoutSettings.getTimeoutMS(); + return timeoutMS == null ? null : notNull("timeUnit", timeUnit).convert(timeoutMS, MILLISECONDS); + } + + @Override + public MongoCollection withDocumentClass(final Class clazz) { + return new MongoCollectionImpl<>(namespace, clazz, codecRegistry, readPreference, writeConcern, retryWrites, + retryReads, readConcern, uuidRepresentation, autoEncryptionSettings, timeoutSettings, executor); + } + + @Override + public MongoCollection withCodecRegistry(final CodecRegistry codecRegistry) { + return new MongoCollectionImpl<>(namespace, documentClass, withUuidRepresentation(codecRegistry, uuidRepresentation), + readPreference, writeConcern, retryWrites, retryReads, readConcern, uuidRepresentation, autoEncryptionSettings, timeoutSettings, executor); + } + + @Override + public MongoCollection withReadPreference(final ReadPreference readPreference) { + return new MongoCollectionImpl<>(namespace, documentClass, codecRegistry, readPreference, writeConcern, retryWrites, + retryReads, readConcern, uuidRepresentation, autoEncryptionSettings, timeoutSettings, executor); + } + + @Override + public MongoCollection withWriteConcern(final WriteConcern writeConcern) { + return new MongoCollectionImpl<>(namespace, documentClass, codecRegistry, readPreference, writeConcern, retryWrites, + retryReads, readConcern, uuidRepresentation, autoEncryptionSettings, timeoutSettings, executor); + } + + @Override + public MongoCollection withReadConcern(final ReadConcern readConcern) { + return new MongoCollectionImpl<>(namespace, documentClass, codecRegistry, readPreference, writeConcern, retryWrites, + retryReads, readConcern, uuidRepresentation, autoEncryptionSettings, timeoutSettings, executor); + } + + @Override + public MongoCollection withTimeout(final long timeout, final TimeUnit timeUnit) { + return new MongoCollectionImpl<>(namespace, documentClass, codecRegistry, readPreference, writeConcern, retryWrites, retryReads, + readConcern, uuidRepresentation, autoEncryptionSettings, timeoutSettings.withTimeout(timeout, timeUnit), executor); + } + + @Override + public long countDocuments() { + return countDocuments(new BsonDocument()); + } + + @Override + public long countDocuments(final Bson filter) { + return countDocuments(filter, new CountOptions()); + } + + @Override + public long countDocuments(final Bson filter, final CountOptions options) { + return executeCount(null, filter, options); + } + + @Override + public long countDocuments(final ClientSession clientSession) { + return countDocuments(clientSession, new BsonDocument()); + } + + @Override + public long countDocuments(final ClientSession clientSession, final Bson filter) { + return countDocuments(clientSession, filter, new CountOptions()); + } + + @Override + public long countDocuments(final ClientSession clientSession, final Bson filter, final CountOptions options) { + notNull("clientSession", clientSession); + return executeCount(clientSession, filter, options); + } + + @Override + public long estimatedDocumentCount() { + return estimatedDocumentCount(new EstimatedDocumentCountOptions()); + } + + @Override + public long estimatedDocumentCount(final EstimatedDocumentCountOptions options) { + return getExecutor(operations.createTimeoutSettings(options)) + .execute(operations.estimatedDocumentCount(options), readPreference, readConcern, null); + } + + private long executeCount(@Nullable final ClientSession clientSession, final Bson filter, final CountOptions options) { + return getExecutor(operations.createTimeoutSettings(options)) + .execute(operations.countDocuments(filter, options), readPreference, readConcern, clientSession); + } + + @Override + public DistinctIterable distinct(final String fieldName, final Class resultClass) { + return distinct(fieldName, new BsonDocument(), resultClass); + } + + @Override + public DistinctIterable distinct(final String fieldName, final Bson filter, final Class resultClass) { + return createDistinctIterable(null, fieldName, filter, resultClass); + } + + @Override + public DistinctIterable distinct(final ClientSession clientSession, final String fieldName, + final Class resultClass) { + return distinct(clientSession, fieldName, new BsonDocument(), resultClass); + } + + @Override + public DistinctIterable distinct(final ClientSession clientSession, final String fieldName, final Bson filter, + final Class resultClass) { + notNull("clientSession", clientSession); + return createDistinctIterable(clientSession, fieldName, filter, resultClass); + } + + private DistinctIterable createDistinctIterable(@Nullable final ClientSession clientSession, final String fieldName, + final Bson filter, final Class resultClass) { + return new DistinctIterableImpl<>(clientSession, namespace, documentClass, resultClass, codecRegistry, + readPreference, readConcern, executor, fieldName, filter, retryReads, timeoutSettings); + } + + @Override + public FindIterable find() { + return find(new BsonDocument(), documentClass); + } + + @Override + public FindIterable find(final Class resultClass) { + return find(new BsonDocument(), resultClass); + } + + @Override + public FindIterable find(final Bson filter) { + return find(filter, documentClass); + } + + @Override + public FindIterable find(final Bson filter, final Class resultClass) { + return createFindIterable(null, filter, resultClass); + } + + @Override + public FindIterable find(final ClientSession clientSession) { + notNull("clientSession", clientSession); + return find(clientSession, new BsonDocument(), documentClass); + } + + @Override + public FindIterable find(final ClientSession clientSession, final Class resultClass) { + notNull("clientSession", clientSession); + return find(clientSession, new BsonDocument(), resultClass); + } + + @Override + public FindIterable find(final ClientSession clientSession, final Bson filter) { + notNull("clientSession", clientSession); + return find(clientSession, filter, documentClass); + } + + @Override + public FindIterable find(final ClientSession clientSession, final Bson filter, + final Class resultClass) { + notNull("clientSession", clientSession); + return createFindIterable(clientSession, filter, resultClass); + } + + private FindIterable createFindIterable(@Nullable final ClientSession clientSession, final Bson filter, + final Class resultClass) { + return new FindIterableImpl<>(clientSession, namespace, this.documentClass, resultClass, codecRegistry, + readPreference, readConcern, executor, filter, retryReads, timeoutSettings); + } + + @Override + public AggregateIterable aggregate(final List pipeline) { + return aggregate(pipeline, documentClass); + } + + @Override + public AggregateIterable aggregate(final List pipeline, final Class resultClass) { + return createAggregateIterable(null, pipeline, resultClass); + } + + @Override + public AggregateIterable aggregate(final ClientSession clientSession, final List pipeline) { + return aggregate(clientSession, pipeline, documentClass); + } + + @Override + public AggregateIterable aggregate(final ClientSession clientSession, final List pipeline, + final Class resultClass) { + notNull("clientSession", clientSession); + return createAggregateIterable(clientSession, pipeline, resultClass); + } + + private AggregateIterable createAggregateIterable(@Nullable final ClientSession clientSession, + final List pipeline, + final Class resultClass) { + return new AggregateIterableImpl<>(clientSession, namespace, documentClass, resultClass, codecRegistry, + readPreference, readConcern, writeConcern, executor, pipeline, AggregationLevel.COLLECTION, retryReads, timeoutSettings); + } + + @Override + public ChangeStreamIterable watch() { + return watch(Collections.emptyList()); + } + + @Override + public ChangeStreamIterable watch(final Class resultClass) { + return watch(Collections.emptyList(), resultClass); + } + + @Override + public ChangeStreamIterable watch(final List pipeline) { + return watch(pipeline, documentClass); + } + + @Override + public ChangeStreamIterable watch(final List pipeline, final Class resultClass) { + return createChangeStreamIterable(null, pipeline, resultClass); + } + + @Override + public ChangeStreamIterable watch(final ClientSession clientSession) { + return watch(clientSession, Collections.emptyList(), documentClass); + } + + @Override + public ChangeStreamIterable watch(final ClientSession clientSession, final Class resultClass) { + return watch(clientSession, Collections.emptyList(), resultClass); + } + + @Override + public ChangeStreamIterable watch(final ClientSession clientSession, final List pipeline) { + return watch(clientSession, pipeline, documentClass); + } + + @Override + public ChangeStreamIterable watch(final ClientSession clientSession, final List pipeline, + final Class resultClass) { + notNull("clientSession", clientSession); + return createChangeStreamIterable(clientSession, pipeline, resultClass); + } + + private ChangeStreamIterable createChangeStreamIterable(@Nullable final ClientSession clientSession, + final List pipeline, + final Class resultClass) { + return new ChangeStreamIterableImpl<>(clientSession, namespace, codecRegistry, readPreference, readConcern, executor, + pipeline, resultClass, ChangeStreamLevel.COLLECTION, retryReads, timeoutSettings); + } + + @SuppressWarnings("deprecation") + @Override + public com.mongodb.client.MapReduceIterable mapReduce(final String mapFunction, final String reduceFunction) { + return mapReduce(mapFunction, reduceFunction, documentClass); + } + + @SuppressWarnings("deprecation") + @Override + public com.mongodb.client.MapReduceIterable mapReduce(final String mapFunction, final String reduceFunction, + final Class resultClass) { + return createMapReduceIterable(null, mapFunction, reduceFunction, resultClass); + } + + @SuppressWarnings("deprecation") + @Override + public com.mongodb.client.MapReduceIterable mapReduce(final ClientSession clientSession, final String mapFunction, + final String reduceFunction) { + return mapReduce(clientSession, mapFunction, reduceFunction, documentClass); + } + + @SuppressWarnings("deprecation") + @Override + public com.mongodb.client.MapReduceIterable mapReduce(final ClientSession clientSession, final String mapFunction, + final String reduceFunction, final Class resultClass) { + notNull("clientSession", clientSession); + return createMapReduceIterable(clientSession, mapFunction, reduceFunction, resultClass); + } + + @SuppressWarnings("deprecation") + private com.mongodb.client.MapReduceIterable createMapReduceIterable(@Nullable final ClientSession clientSession, + final String mapFunction, final String reduceFunction, + final Class resultClass) { + return new MapReduceIterableImpl<>(clientSession, namespace, documentClass, resultClass, codecRegistry, + readPreference, readConcern, writeConcern, executor, mapFunction, reduceFunction, timeoutSettings); + } + + @Override + public BulkWriteResult bulkWrite(final List> requests) { + return bulkWrite(requests, new BulkWriteOptions()); + } + + @Override + public BulkWriteResult bulkWrite(final List> requests, final BulkWriteOptions options) { + return executeBulkWrite(null, requests, options); + } + + @Override + public BulkWriteResult bulkWrite(final ClientSession clientSession, final List> requests) { + return bulkWrite(clientSession, requests, new BulkWriteOptions()); + } + + @Override + public BulkWriteResult bulkWrite(final ClientSession clientSession, final List> requests, + final BulkWriteOptions options) { + notNull("clientSession", clientSession); + return executeBulkWrite(clientSession, requests, options); + } + + private BulkWriteResult executeBulkWrite(@Nullable final ClientSession clientSession, + final List> requests, + final BulkWriteOptions options) { + notNull("requests", requests); + return getExecutor(timeoutSettings) + .execute(operations.bulkWrite(requests, options), readConcern, clientSession); + } + + @Override + public InsertOneResult insertOne(final TDocument document) { + return insertOne(document, new InsertOneOptions()); + } + + @Override + public InsertOneResult insertOne(final TDocument document, final InsertOneOptions options) { + notNull("document", document); + return executeInsertOne(null, document, options); + } + + @Override + public InsertOneResult insertOne(final ClientSession clientSession, final TDocument document) { + return insertOne(clientSession, document, new InsertOneOptions()); + } + + @Override + public InsertOneResult insertOne(final ClientSession clientSession, final TDocument document, final InsertOneOptions options) { + notNull("clientSession", clientSession); + notNull("document", document); + return executeInsertOne(clientSession, document, options); + } + + private InsertOneResult executeInsertOne(@Nullable final ClientSession clientSession, final TDocument document, + final InsertOneOptions options) { + return toInsertOneResult(executeSingleWriteRequest(clientSession, operations.insertOne(document, options), INSERT)); + } + + @Override + public InsertManyResult insertMany(final List documents) { + return insertMany(documents, new InsertManyOptions()); + } + + @Override + public InsertManyResult insertMany(final List documents, final InsertManyOptions options) { + return executeInsertMany(null, documents, options); + } + + @Override + public InsertManyResult insertMany(final ClientSession clientSession, final List documents) { + return insertMany(clientSession, documents, new InsertManyOptions()); + } + + + @Override + public InsertManyResult insertMany(final ClientSession clientSession, final List documents, + final InsertManyOptions options) { + notNull("clientSession", clientSession); + return executeInsertMany(clientSession, documents, options); + } + + private InsertManyResult executeInsertMany(@Nullable final ClientSession clientSession, final List documents, + final InsertManyOptions options) { + return toInsertManyResult( + getExecutor(timeoutSettings).execute(operations.insertMany(documents, options), readConcern, clientSession) + ); + } + + @Override + public DeleteResult deleteOne(final Bson filter) { + return deleteOne(filter, new DeleteOptions()); + } + + @Override + public DeleteResult deleteOne(final Bson filter, final DeleteOptions options) { + return executeDelete(null, filter, options, false); + } + + @Override + public DeleteResult deleteOne(final ClientSession clientSession, final Bson filter) { + return deleteOne(clientSession, filter, new DeleteOptions()); + } + + @Override + public DeleteResult deleteOne(final ClientSession clientSession, final Bson filter, final DeleteOptions options) { + notNull("clientSession", clientSession); + return executeDelete(clientSession, filter, options, false); + } + + @Override + public DeleteResult deleteMany(final Bson filter) { + return deleteMany(filter, new DeleteOptions()); + } + + @Override + public DeleteResult deleteMany(final Bson filter, final DeleteOptions options) { + return executeDelete(null, filter, options, true); + } + + @Override + public DeleteResult deleteMany(final ClientSession clientSession, final Bson filter) { + return deleteMany(clientSession, filter, new DeleteOptions()); + } + + @Override + public DeleteResult deleteMany(final ClientSession clientSession, final Bson filter, final DeleteOptions options) { + notNull("clientSession", clientSession); + return executeDelete(clientSession, filter, options, true); + } + + @Override + public UpdateResult replaceOne(final Bson filter, final TDocument replacement) { + return replaceOne(filter, replacement, new ReplaceOptions()); + } + + @Override + public UpdateResult replaceOne(final Bson filter, final TDocument replacement, final ReplaceOptions replaceOptions) { + return executeReplaceOne(null, filter, replacement, replaceOptions); + } + + @Override + public UpdateResult replaceOne(final ClientSession clientSession, final Bson filter, final TDocument replacement) { + return replaceOne(clientSession, filter, replacement, new ReplaceOptions()); + } + + @Override + public UpdateResult replaceOne(final ClientSession clientSession, final Bson filter, final TDocument replacement, + final ReplaceOptions replaceOptions) { + notNull("clientSession", clientSession); + return executeReplaceOne(clientSession, filter, replacement, replaceOptions); + } + + private UpdateResult executeReplaceOne(@Nullable final ClientSession clientSession, final Bson filter, final TDocument replacement, + final ReplaceOptions replaceOptions) { + return toUpdateResult(executeSingleWriteRequest(clientSession, operations.replaceOne(filter, replacement, replaceOptions), + REPLACE)); + } + + @Override + public UpdateResult updateOne(final Bson filter, final Bson update) { + return updateOne(filter, update, new UpdateOptions()); + } + + @Override + public UpdateResult updateOne(final Bson filter, final Bson update, final UpdateOptions updateOptions) { + return executeUpdate(null, filter, update, updateOptions, false); + } + + @Override + public UpdateResult updateOne(final ClientSession clientSession, final Bson filter, final Bson update) { + return updateOne(clientSession, filter, update, new UpdateOptions()); + } + + @Override + public UpdateResult updateOne(final ClientSession clientSession, final Bson filter, final Bson update, + final UpdateOptions updateOptions) { + notNull("clientSession", clientSession); + return executeUpdate(clientSession, filter, update, updateOptions, false); + + } + + @Override + public UpdateResult updateOne(final Bson filter, final List update) { + return updateOne(filter, update, new UpdateOptions()); + } + + @Override + public UpdateResult updateOne(final Bson filter, final List update, final UpdateOptions updateOptions) { + return executeUpdate(null, filter, update, updateOptions, false); + } + + @Override + public UpdateResult updateOne(final ClientSession clientSession, final Bson filter, final List update) { + return updateOne(clientSession, filter, update, new UpdateOptions()); + } + + @Override + public UpdateResult updateOne(final ClientSession clientSession, final Bson filter, final List update, + final UpdateOptions updateOptions) { + notNull("clientSession", clientSession); + return executeUpdate(clientSession, filter, update, updateOptions, false); + + } + + @Override + public UpdateResult updateMany(final Bson filter, final Bson update) { + return updateMany(filter, update, new UpdateOptions()); + } + + @Override + public UpdateResult updateMany(final Bson filter, final Bson update, final UpdateOptions updateOptions) { + return executeUpdate(null, filter, update, updateOptions, true); + } + + @Override + public UpdateResult updateMany(final ClientSession clientSession, final Bson filter, final Bson update) { + return updateMany(clientSession, filter, update, new UpdateOptions()); + } + + @Override + public UpdateResult updateMany(final ClientSession clientSession, final Bson filter, final Bson update, + final UpdateOptions updateOptions) { + notNull("clientSession", clientSession); + return executeUpdate(clientSession, filter, update, updateOptions, true); + } + + @Override + public UpdateResult updateMany(final Bson filter, final List update) { + return updateMany(filter, update, new UpdateOptions()); + } + + @Override + public UpdateResult updateMany(final Bson filter, final List update, final UpdateOptions updateOptions) { + return executeUpdate(null, filter, update, updateOptions, true); + } + + @Override + public UpdateResult updateMany(final ClientSession clientSession, final Bson filter, final List update) { + return updateMany(clientSession, filter, update, new UpdateOptions()); + } + + @Override + public UpdateResult updateMany(final ClientSession clientSession, final Bson filter, final List update, + final UpdateOptions updateOptions) { + notNull("clientSession", clientSession); + return executeUpdate(clientSession, filter, update, updateOptions, true); + } + + @Override + @Nullable + public TDocument findOneAndDelete(final Bson filter) { + return findOneAndDelete(filter, new FindOneAndDeleteOptions()); + } + + @Override + @Nullable + public TDocument findOneAndDelete(final Bson filter, final FindOneAndDeleteOptions options) { + return executeFindOneAndDelete(null, filter, options); + } + + @Override + @Nullable + public TDocument findOneAndDelete(final ClientSession clientSession, final Bson filter) { + return findOneAndDelete(clientSession, filter, new FindOneAndDeleteOptions()); + } + + @Override + @Nullable + public TDocument findOneAndDelete(final ClientSession clientSession, final Bson filter, final FindOneAndDeleteOptions options) { + notNull("clientSession", clientSession); + return executeFindOneAndDelete(clientSession, filter, options); + } + + @Nullable + private TDocument executeFindOneAndDelete(@Nullable final ClientSession clientSession, final Bson filter, + final FindOneAndDeleteOptions options) { + return getExecutor(operations.createTimeoutSettings(options)) + .execute(operations.findOneAndDelete(filter, options), readConcern, clientSession); + } + + @Override + @Nullable + public TDocument findOneAndReplace(final Bson filter, final TDocument replacement) { + return findOneAndReplace(filter, replacement, new FindOneAndReplaceOptions()); + } + + @Override + @Nullable + public TDocument findOneAndReplace(final Bson filter, final TDocument replacement, final FindOneAndReplaceOptions options) { + return executeFindOneAndReplace(null, filter, replacement, options); + } + + @Override + @Nullable + public TDocument findOneAndReplace(final ClientSession clientSession, final Bson filter, final TDocument replacement) { + return findOneAndReplace(clientSession, filter, replacement, new FindOneAndReplaceOptions()); + } + + @Override + @Nullable + public TDocument findOneAndReplace(final ClientSession clientSession, final Bson filter, final TDocument replacement, + final FindOneAndReplaceOptions options) { + notNull("clientSession", clientSession); + return executeFindOneAndReplace(clientSession, filter, replacement, options); + } + + @Nullable + private TDocument executeFindOneAndReplace(@Nullable final ClientSession clientSession, final Bson filter, final TDocument replacement, + final FindOneAndReplaceOptions options) { + return getExecutor(operations.createTimeoutSettings(options)) + .execute(operations.findOneAndReplace(filter, replacement, options), readConcern, clientSession); + } + + @Override + @Nullable + public TDocument findOneAndUpdate(final Bson filter, final Bson update) { + return findOneAndUpdate(filter, update, new FindOneAndUpdateOptions()); + } + + @Override + @Nullable + public TDocument findOneAndUpdate(final Bson filter, final Bson update, final FindOneAndUpdateOptions options) { + return executeFindOneAndUpdate(null, filter, update, options); + } + + @Override + @Nullable + public TDocument findOneAndUpdate(final ClientSession clientSession, final Bson filter, final Bson update) { + return findOneAndUpdate(clientSession, filter, update, new FindOneAndUpdateOptions()); + } + + @Override + @Nullable + public TDocument findOneAndUpdate(final ClientSession clientSession, final Bson filter, final Bson update, + final FindOneAndUpdateOptions options) { + notNull("clientSession", clientSession); + return executeFindOneAndUpdate(clientSession, filter, update, options); + } + + @Nullable + private TDocument executeFindOneAndUpdate(@Nullable final ClientSession clientSession, final Bson filter, final Bson update, + final FindOneAndUpdateOptions options) { + return getExecutor(operations.createTimeoutSettings(options)) + .execute(operations.findOneAndUpdate(filter, update, options), readConcern, clientSession); + } + + @Override + @Nullable + public TDocument findOneAndUpdate(final Bson filter, final List update) { + return findOneAndUpdate(filter, update, new FindOneAndUpdateOptions()); + } + + @Override + @Nullable + public TDocument findOneAndUpdate(final Bson filter, final List update, final FindOneAndUpdateOptions options) { + return executeFindOneAndUpdate(null, filter, update, options); + } + + @Override + @Nullable + public TDocument findOneAndUpdate(final ClientSession clientSession, final Bson filter, final List update) { + return findOneAndUpdate(clientSession, filter, update, new FindOneAndUpdateOptions()); + } + + @Override + @Nullable + public TDocument findOneAndUpdate(final ClientSession clientSession, final Bson filter, final List update, + final FindOneAndUpdateOptions options) { + notNull("clientSession", clientSession); + return executeFindOneAndUpdate(clientSession, filter, update, options); + } + + @Nullable + private TDocument executeFindOneAndUpdate(@Nullable final ClientSession clientSession, final Bson filter, + final List update, final FindOneAndUpdateOptions options) { + return getExecutor(operations.createTimeoutSettings(options)) + .execute(operations.findOneAndUpdate(filter, update, options), readConcern, clientSession); + } + + @Override + public void drop() { + executeDrop(null, new DropCollectionOptions()); + } + + @Override + public void drop(final ClientSession clientSession) { + notNull("clientSession", clientSession); + executeDrop(clientSession, new DropCollectionOptions()); + } + + @Override + public void drop(final DropCollectionOptions dropCollectionOptions) { + executeDrop(null, dropCollectionOptions); + } + + @Override + public void drop(final ClientSession clientSession, final DropCollectionOptions dropCollectionOptions) { + executeDrop(clientSession, dropCollectionOptions); + } + + @Override + public String createSearchIndex(final String indexName, final Bson definition) { + notNull("indexName", indexName); + notNull("definition", definition); + + return executeCreateSearchIndexes(singletonList(new SearchIndexModel(indexName, definition))).get(0); + } + + @Override + public String createSearchIndex(final Bson definition) { + notNull("definition", definition); + + return executeCreateSearchIndexes(singletonList(new SearchIndexModel(definition))).get(0); + } + + @Override + public List createSearchIndexes(final List searchIndexModels) { + notNullElements("searchIndexModels", searchIndexModels); + + return executeCreateSearchIndexes(searchIndexModels); + } + + @Override + public void updateSearchIndex(final String indexName, final Bson definition) { + notNull("indexName", indexName); + notNull("definition", definition); + + getExecutor(timeoutSettings).execute(operations.updateSearchIndex(indexName, definition), readConcern, null); + } + + @Override + public void dropSearchIndex(final String indexName) { + notNull("indexName", indexName); + + getExecutor(timeoutSettings).execute(operations.dropSearchIndex(indexName), readConcern, null); + } + + @Override + public ListSearchIndexesIterable listSearchIndexes() { + return createListSearchIndexesIterable(Document.class); + } + + @Override + public ListSearchIndexesIterable listSearchIndexes(final Class resultClass) { + notNull("resultClass", resultClass); + return createListSearchIndexesIterable(resultClass); + } + + private void executeDrop(@Nullable final ClientSession clientSession, final DropCollectionOptions dropCollectionOptions) { + getExecutor(timeoutSettings) + .execute(operations.dropCollection(dropCollectionOptions, autoEncryptionSettings), readConcern, clientSession); + } + + @Override + public String createIndex(final Bson keys) { + return createIndex(keys, new IndexOptions()); + } + + @Override + public String createIndex(final Bson keys, final IndexOptions indexOptions) { + return createIndexes(singletonList(new IndexModel(keys, indexOptions))).get(0); + } + + @Override + public String createIndex(final ClientSession clientSession, final Bson keys) { + return createIndex(clientSession, keys, new IndexOptions()); + } + + @Override + public String createIndex(final ClientSession clientSession, final Bson keys, final IndexOptions indexOptions) { + return createIndexes(clientSession, singletonList(new IndexModel(keys, indexOptions))).get(0); + } + + @Override + public List createIndexes(final List indexes) { + return createIndexes(indexes, new CreateIndexOptions()); + } + + @Override + public List createIndexes(final List indexes, final CreateIndexOptions createIndexOptions) { + return executeCreateIndexes(null, indexes, createIndexOptions); + } + + @Override + public List createIndexes(final ClientSession clientSession, final List indexes) { + return createIndexes(clientSession, indexes, new CreateIndexOptions()); + } + + @Override + public List createIndexes(final ClientSession clientSession, final List indexes, + final CreateIndexOptions createIndexOptions) { + notNull("clientSession", clientSession); + return executeCreateIndexes(clientSession, indexes, createIndexOptions); + } + + private List executeCreateIndexes(@Nullable final ClientSession clientSession, final List indexes, + final CreateIndexOptions createIndexOptions) { + getExecutor(operations.createTimeoutSettings(createIndexOptions)) + .execute(operations.createIndexes(indexes, createIndexOptions), readConcern, clientSession); + return IndexHelper.getIndexNames(indexes, codecRegistry); + } + + private List executeCreateSearchIndexes(final List searchIndexModels) { + getExecutor(timeoutSettings).execute(operations.createSearchIndexes(searchIndexModels), readConcern, null); + return IndexHelper.getSearchIndexNames(searchIndexModels); + } + + @Override + public ListIndexesIterable listIndexes() { + return listIndexes(Document.class); + } + + @Override + public ListIndexesIterable listIndexes(final Class resultClass) { + return createListIndexesIterable(null, resultClass); + } + + @Override + public ListIndexesIterable listIndexes(final ClientSession clientSession) { + return listIndexes(clientSession, Document.class); + } + + @Override + public ListIndexesIterable listIndexes(final ClientSession clientSession, final Class resultClass) { + notNull("clientSession", clientSession); + return createListIndexesIterable(clientSession, resultClass); + } + + private ListIndexesIterable createListIndexesIterable(@Nullable final ClientSession clientSession, + final Class resultClass) { + return new ListIndexesIterableImpl<>(clientSession, getNamespace(), resultClass, codecRegistry, ReadPreference.primary(), + executor, retryReads, timeoutSettings); + } + + private ListSearchIndexesIterable createListSearchIndexesIterable(final Class resultClass) { + return new ListSearchIndexesIterableImpl<>(getNamespace(), executor, resultClass, codecRegistry, readPreference, + retryReads, timeoutSettings); + } + + @Override + public void dropIndex(final String indexName) { + dropIndex(indexName, new DropIndexOptions()); + } + + @Override + public void dropIndex(final String indexName, final DropIndexOptions dropIndexOptions) { + executeDropIndex(null, indexName, dropIndexOptions); + } + + @Override + public void dropIndex(final Bson keys) { + dropIndex(keys, new DropIndexOptions()); + } + + @Override + public void dropIndex(final Bson keys, final DropIndexOptions dropIndexOptions) { + executeDropIndex(null, keys, dropIndexOptions); + } + + @Override + public void dropIndex(final ClientSession clientSession, final String indexName) { + dropIndex(clientSession, indexName, new DropIndexOptions()); + } + + @Override + public void dropIndex(final ClientSession clientSession, final Bson keys) { + dropIndex(clientSession, keys, new DropIndexOptions()); + } + + @Override + public void dropIndex(final ClientSession clientSession, final String indexName, final DropIndexOptions dropIndexOptions) { + notNull("clientSession", clientSession); + executeDropIndex(clientSession, indexName, dropIndexOptions); + } + + @Override + public void dropIndex(final ClientSession clientSession, final Bson keys, final DropIndexOptions dropIndexOptions) { + notNull("clientSession", clientSession); + executeDropIndex(clientSession, keys, dropIndexOptions); + } + + @Override + public void dropIndexes() { + dropIndex("*"); + } + + @Override + public void dropIndexes(final ClientSession clientSession) { + notNull("clientSession", clientSession); + executeDropIndex(clientSession, "*", new DropIndexOptions()); + } + + @Override + public void dropIndexes(final DropIndexOptions dropIndexOptions) { + dropIndex("*", dropIndexOptions); + } + + @Override + public void dropIndexes(final ClientSession clientSession, final DropIndexOptions dropIndexOptions) { + dropIndex(clientSession, "*", dropIndexOptions); + } + + private void executeDropIndex(@Nullable final ClientSession clientSession, final String indexName, + final DropIndexOptions options) { + notNull("options", options); + getExecutor(operations.createTimeoutSettings(options)) + .execute(operations.dropIndex(indexName, options), readConcern, clientSession); + } + + private void executeDropIndex(@Nullable final ClientSession clientSession, final Bson keys, final DropIndexOptions options) { + notNull("options", options); + getExecutor(operations.createTimeoutSettings(options)) + .execute(operations.dropIndex(keys, options), readConcern, clientSession); + } + + @Override + public void renameCollection(final MongoNamespace newCollectionNamespace) { + renameCollection(newCollectionNamespace, new RenameCollectionOptions()); + } + + @Override + public void renameCollection(final MongoNamespace newCollectionNamespace, final RenameCollectionOptions renameCollectionOptions) { + executeRenameCollection(null, newCollectionNamespace, renameCollectionOptions); + } + + @Override + public void renameCollection(final ClientSession clientSession, final MongoNamespace newCollectionNamespace) { + renameCollection(clientSession, newCollectionNamespace, new RenameCollectionOptions()); + } + + @Override + public void renameCollection(final ClientSession clientSession, final MongoNamespace newCollectionNamespace, + final RenameCollectionOptions renameCollectionOptions) { + notNull("clientSession", clientSession); + executeRenameCollection(clientSession, newCollectionNamespace, renameCollectionOptions); + } + + private void executeRenameCollection(@Nullable final ClientSession clientSession, final MongoNamespace newCollectionNamespace, + final RenameCollectionOptions renameCollectionOptions) { + getExecutor(timeoutSettings) + .execute(operations.renameCollection(newCollectionNamespace, renameCollectionOptions), readConcern, clientSession); + } + + private DeleteResult executeDelete(@Nullable final ClientSession clientSession, final Bson filter, final DeleteOptions deleteOptions, + final boolean multi) { + com.mongodb.bulk.BulkWriteResult result = executeSingleWriteRequest(clientSession, + multi ? operations.deleteMany(filter, deleteOptions) : operations.deleteOne(filter, deleteOptions), DELETE); + if (result.wasAcknowledged()) { + return DeleteResult.acknowledged(result.getDeletedCount()); + } else { + return DeleteResult.unacknowledged(); + } + } + + private UpdateResult executeUpdate(@Nullable final ClientSession clientSession, final Bson filter, final Bson update, + final UpdateOptions updateOptions, final boolean multi) { + return toUpdateResult(executeSingleWriteRequest(clientSession, + multi ? operations.updateMany(filter, update, updateOptions) : operations.updateOne(filter, update, updateOptions), + UPDATE)); + } + + private UpdateResult executeUpdate(@Nullable final ClientSession clientSession, final Bson filter, + final List update, final UpdateOptions updateOptions, final boolean multi) { + return toUpdateResult(executeSingleWriteRequest(clientSession, + multi ? operations.updateMany(filter, update, updateOptions) : operations.updateOne(filter, update, updateOptions), + UPDATE)); + } + + private BulkWriteResult executeSingleWriteRequest(@Nullable final ClientSession clientSession, + final WriteOperation writeOperation, + final WriteRequest.Type type) { + try { + return getExecutor(timeoutSettings) + .execute(writeOperation, readConcern, clientSession); + } catch (MongoBulkWriteException e) { + if (e.getWriteErrors().isEmpty()) { + throw new MongoWriteConcernException(e.getWriteConcernError(), + translateBulkWriteResult(type, e.getWriteResult()), + e.getServerAddress(), e.getErrorLabels()); + } else { + throw new MongoWriteException(new WriteError(e.getWriteErrors().get(0)), e.getServerAddress(), e.getErrorLabels()); + } + + } + } + + private WriteConcernResult translateBulkWriteResult(final WriteRequest.Type type, final BulkWriteResult writeResult) { + switch (type) { + case INSERT: + return WriteConcernResult.acknowledged(writeResult.getInsertedCount(), false, null); + case DELETE: + return WriteConcernResult.acknowledged(writeResult.getDeletedCount(), false, null); + case UPDATE: + case REPLACE: + return WriteConcernResult.acknowledged(writeResult.getMatchedCount() + writeResult.getUpserts().size(), + writeResult.getMatchedCount() > 0, + writeResult.getUpserts().isEmpty() + ? null : writeResult.getUpserts().get(0).getId()); + default: + throw new MongoInternalException("Unhandled write request type: " + type); + } + } + + private InsertOneResult toInsertOneResult(final com.mongodb.bulk.BulkWriteResult result) { + if (result.wasAcknowledged()) { + BsonValue insertedId = result.getInserts().isEmpty() ? null : result.getInserts().get(0).getId(); + return InsertOneResult.acknowledged(insertedId); + } else { + return InsertOneResult.unacknowledged(); + } + } + + private InsertManyResult toInsertManyResult(final com.mongodb.bulk.BulkWriteResult result) { + if (result.wasAcknowledged()) { + return InsertManyResult.acknowledged(result.getInserts().stream() + .collect(HashMap::new, (m, v) -> m.put(v.getIndex(), v.getId()), HashMap::putAll)); + } else { + return InsertManyResult.unacknowledged(); + } + } + + private UpdateResult toUpdateResult(final com.mongodb.bulk.BulkWriteResult result) { + if (result.wasAcknowledged()) { + BsonValue upsertedId = result.getUpserts().isEmpty() ? null : result.getUpserts().get(0).getId(); + return UpdateResult.acknowledged(result.getMatchedCount(), (long) result.getModifiedCount(), upsertedId); + } else { + return UpdateResult.unacknowledged(); + } + } + + private OperationExecutor getExecutor(final TimeoutSettings timeoutSettings) { + return executor.withTimeoutSettings(timeoutSettings); + } + +} diff --git a/driver-sync/src/main/com/mongodb/client/internal/MongoDatabaseImpl.java b/driver-sync/src/main/com/mongodb/client/internal/MongoDatabaseImpl.java new file mode 100644 index 00000000000..1541fbe1c63 --- /dev/null +++ b/driver-sync/src/main/com/mongodb/client/internal/MongoDatabaseImpl.java @@ -0,0 +1,415 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.internal; + +import com.mongodb.AutoEncryptionSettings; +import com.mongodb.MongoClientException; +import com.mongodb.MongoNamespace; +import com.mongodb.ReadConcern; +import com.mongodb.ReadPreference; +import com.mongodb.WriteConcern; +import com.mongodb.client.AggregateIterable; +import com.mongodb.client.ChangeStreamIterable; +import com.mongodb.client.ClientSession; +import com.mongodb.client.ListCollectionNamesIterable; +import com.mongodb.client.ListCollectionsIterable; +import com.mongodb.client.MongoCollection; +import com.mongodb.client.MongoDatabase; +import com.mongodb.client.model.CreateCollectionOptions; +import com.mongodb.client.model.CreateViewOptions; +import com.mongodb.internal.TimeoutSettings; +import com.mongodb.internal.client.model.AggregationLevel; +import com.mongodb.internal.client.model.changestream.ChangeStreamLevel; +import com.mongodb.internal.operation.Operations; +import com.mongodb.lang.Nullable; +import org.bson.BsonDocument; +import org.bson.Document; +import org.bson.UuidRepresentation; +import org.bson.codecs.configuration.CodecRegistry; +import org.bson.conversions.Bson; + +import java.util.Collections; +import java.util.List; +import java.util.concurrent.TimeUnit; + +import static com.mongodb.MongoNamespace.checkDatabaseNameValidity; +import static com.mongodb.assertions.Assertions.notNull; +import static java.util.concurrent.TimeUnit.MILLISECONDS; +import static org.bson.codecs.configuration.CodecRegistries.withUuidRepresentation; + +/** + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public class MongoDatabaseImpl implements MongoDatabase { + private final String name; + private final ReadPreference readPreference; + private final CodecRegistry codecRegistry; + private final WriteConcern writeConcern; + private final boolean retryWrites; + private final boolean retryReads; + private final ReadConcern readConcern; + private final UuidRepresentation uuidRepresentation; + @Nullable + private final AutoEncryptionSettings autoEncryptionSettings; + + private final TimeoutSettings timeoutSettings; + private final OperationExecutor executor; + private final Operations operations; + + public MongoDatabaseImpl(final String name, final CodecRegistry codecRegistry, final ReadPreference readPreference, + final WriteConcern writeConcern, final boolean retryWrites, final boolean retryReads, + final ReadConcern readConcern, final UuidRepresentation uuidRepresentation, + @Nullable final AutoEncryptionSettings autoEncryptionSettings, final TimeoutSettings timeoutSettings, + final OperationExecutor executor) { + checkDatabaseNameValidity(name); + this.name = notNull("name", name); + this.codecRegistry = notNull("codecRegistry", codecRegistry); + this.readPreference = notNull("readPreference", readPreference); + this.writeConcern = notNull("writeConcern", writeConcern); + this.retryWrites = retryWrites; + this.retryReads = retryReads; + this.readConcern = notNull("readConcern", readConcern); + this.uuidRepresentation = notNull("uuidRepresentation", uuidRepresentation); + this.autoEncryptionSettings = autoEncryptionSettings; + this.timeoutSettings = timeoutSettings; + this.executor = notNull("executor", executor); + this.operations = new Operations<>(new MongoNamespace(name, "_ignored"), BsonDocument.class, readPreference, + codecRegistry, readConcern, writeConcern, retryWrites, retryReads, timeoutSettings); + } + + @Override + public String getName() { + return name; + } + + @Override + public CodecRegistry getCodecRegistry() { + return codecRegistry; + } + + @Override + public ReadPreference getReadPreference() { + return readPreference; + } + + @Override + public WriteConcern getWriteConcern() { + return writeConcern; + } + + @Override + public ReadConcern getReadConcern() { + return readConcern; + } + + @Override + @Nullable + public Long getTimeout(final TimeUnit timeUnit) { + Long timeoutMS = timeoutSettings.getTimeoutMS(); + return timeoutMS == null ? null : notNull("timeUnit", timeUnit).convert(timeoutMS, MILLISECONDS); + } + + @Override + public MongoDatabase withCodecRegistry(final CodecRegistry codecRegistry) { + return new MongoDatabaseImpl(name, withUuidRepresentation(codecRegistry, uuidRepresentation), readPreference, writeConcern, retryWrites, + retryReads, readConcern, uuidRepresentation, autoEncryptionSettings, timeoutSettings, executor); + } + + @Override + public MongoDatabase withReadPreference(final ReadPreference readPreference) { + return new MongoDatabaseImpl(name, codecRegistry, readPreference, writeConcern, retryWrites, retryReads, readConcern, + uuidRepresentation, autoEncryptionSettings, timeoutSettings, executor); + } + + @Override + public MongoDatabase withWriteConcern(final WriteConcern writeConcern) { + return new MongoDatabaseImpl(name, codecRegistry, readPreference, writeConcern, retryWrites, retryReads, readConcern, + uuidRepresentation, autoEncryptionSettings, timeoutSettings, executor); + } + + @Override + public MongoDatabase withReadConcern(final ReadConcern readConcern) { + return new MongoDatabaseImpl(name, codecRegistry, readPreference, writeConcern, retryWrites, retryReads, readConcern, + uuidRepresentation, autoEncryptionSettings, timeoutSettings, executor); + } + + @Override + public MongoDatabase withTimeout(final long timeout, final TimeUnit timeUnit) { + return new MongoDatabaseImpl(name, codecRegistry, readPreference, writeConcern, retryWrites, retryReads, readConcern, + uuidRepresentation, autoEncryptionSettings, timeoutSettings.withTimeout(timeout, timeUnit), executor); + } + + @Override + public MongoCollection getCollection(final String collectionName) { + return getCollection(collectionName, Document.class); + } + + @Override + public MongoCollection getCollection(final String collectionName, final Class documentClass) { + return new MongoCollectionImpl<>(new MongoNamespace(name, collectionName), documentClass, codecRegistry, readPreference, + writeConcern, retryWrites, retryReads, readConcern, uuidRepresentation, autoEncryptionSettings, timeoutSettings, executor); + } + + @Override + public Document runCommand(final Bson command) { + return runCommand(command, Document.class); + } + + @Override + public Document runCommand(final Bson command, final ReadPreference readPreference) { + return runCommand(command, readPreference, Document.class); + } + + @Override + public TResult runCommand(final Bson command, final Class resultClass) { + return runCommand(command, ReadPreference.primary(), resultClass); + } + + @Override + public TResult runCommand(final Bson command, final ReadPreference readPreference, final Class resultClass) { + return executeCommand(null, command, readPreference, resultClass); + } + + @Override + public Document runCommand(final ClientSession clientSession, final Bson command) { + return runCommand(clientSession, command, ReadPreference.primary(), Document.class); + } + + @Override + public Document runCommand(final ClientSession clientSession, final Bson command, final ReadPreference readPreference) { + return runCommand(clientSession, command, readPreference, Document.class); + } + + @Override + public TResult runCommand(final ClientSession clientSession, final Bson command, final Class resultClass) { + return runCommand(clientSession, command, ReadPreference.primary(), resultClass); + } + + @Override + public TResult runCommand(final ClientSession clientSession, final Bson command, final ReadPreference readPreference, + final Class resultClass) { + notNull("clientSession", clientSession); + return executeCommand(clientSession, command, readPreference, resultClass); + } + + private TResult executeCommand(@Nullable final ClientSession clientSession, final Bson command, + final ReadPreference readPreference, final Class resultClass) { + notNull("readPreference", readPreference); + if (clientSession != null && clientSession.hasActiveTransaction() && !readPreference.equals(ReadPreference.primary())) { + throw new MongoClientException("Read preference in a transaction must be primary"); + } + return getExecutor().execute(operations.commandRead(command, resultClass), readPreference, readConcern, clientSession); + } + + @Override + public void drop() { + executeDrop(null); + } + + @Override + public void drop(final ClientSession clientSession) { + notNull("clientSession", clientSession); + executeDrop(clientSession); + } + + private void executeDrop(@Nullable final ClientSession clientSession) { + getExecutor().execute(operations.dropDatabase(), readConcern, clientSession); + } + + @Override + public ListCollectionNamesIterable listCollectionNames() { + return createListCollectionNamesIterable(null); + } + + @Override + public ListCollectionNamesIterable listCollectionNames(final ClientSession clientSession) { + notNull("clientSession", clientSession); + return createListCollectionNamesIterable(clientSession); + } + + private ListCollectionNamesIterable createListCollectionNamesIterable(@Nullable final ClientSession clientSession) { + return new ListCollectionNamesIterableImpl(createListCollectionsIterable(clientSession, BsonDocument.class, true)); + } + + @Override + public ListCollectionsIterable listCollections() { + return listCollections(Document.class); + } + + @Override + public ListCollectionsIterable listCollections(final Class resultClass) { + return createListCollectionsIterable(null, resultClass, false); + } + + @Override + public ListCollectionsIterable listCollections(final ClientSession clientSession) { + return listCollections(clientSession, Document.class); + } + + @Override + public ListCollectionsIterable listCollections(final ClientSession clientSession, final Class resultClass) { + notNull("clientSession", clientSession); + return createListCollectionsIterable(clientSession, resultClass, false); + } + + private ListCollectionsIterableImpl createListCollectionsIterable(@Nullable final ClientSession clientSession, + final Class resultClass, + final boolean collectionNamesOnly) { + return new ListCollectionsIterableImpl<>(clientSession, name, collectionNamesOnly, resultClass, codecRegistry, + ReadPreference.primary(), executor, retryReads, timeoutSettings); + } + + @Override + public void createCollection(final String collectionName) { + createCollection(collectionName, new CreateCollectionOptions()); + } + + @Override + public void createCollection(final String collectionName, final CreateCollectionOptions createCollectionOptions) { + executeCreateCollection(null, collectionName, createCollectionOptions); + } + + @Override + public void createCollection(final ClientSession clientSession, final String collectionName) { + createCollection(clientSession, collectionName, new CreateCollectionOptions()); + } + + @Override + public void createCollection(final ClientSession clientSession, final String collectionName, + final CreateCollectionOptions createCollectionOptions) { + notNull("clientSession", clientSession); + executeCreateCollection(clientSession, collectionName, createCollectionOptions); + } + + private void executeCreateCollection(@Nullable final ClientSession clientSession, final String collectionName, + final CreateCollectionOptions createCollectionOptions) { + getExecutor().execute(operations.createCollection(collectionName, createCollectionOptions, autoEncryptionSettings), + readConcern, clientSession); + } + + @Override + public void createView(final String viewName, final String viewOn, final List pipeline) { + createView(viewName, viewOn, pipeline, new CreateViewOptions()); + } + + @Override + public void createView(final String viewName, final String viewOn, final List pipeline, + final CreateViewOptions createViewOptions) { + executeCreateView(null, viewName, viewOn, pipeline, createViewOptions); + } + + @Override + public void createView(final ClientSession clientSession, final String viewName, final String viewOn, + final List pipeline) { + createView(clientSession, viewName, viewOn, pipeline, new CreateViewOptions()); + } + + @Override + public void createView(final ClientSession clientSession, final String viewName, final String viewOn, + final List pipeline, final CreateViewOptions createViewOptions) { + notNull("clientSession", clientSession); + executeCreateView(clientSession, viewName, viewOn, pipeline, createViewOptions); + } + + @Override + public ChangeStreamIterable watch() { + return watch(Collections.emptyList()); + } + + @Override + public ChangeStreamIterable watch(final Class resultClass) { + return watch(Collections.emptyList(), resultClass); + } + + @Override + public ChangeStreamIterable watch(final List pipeline) { + return watch(pipeline, Document.class); + } + + @Override + public ChangeStreamIterable watch(final List pipeline, final Class resultClass) { + return createChangeStreamIterable(null, pipeline, resultClass); + } + + @Override + public ChangeStreamIterable watch(final ClientSession clientSession) { + return watch(clientSession, Collections.emptyList(), Document.class); + } + + @Override + public ChangeStreamIterable watch(final ClientSession clientSession, final Class resultClass) { + return watch(clientSession, Collections.emptyList(), resultClass); + } + + @Override + public ChangeStreamIterable watch(final ClientSession clientSession, final List pipeline) { + return watch(clientSession, pipeline, Document.class); + } + + @Override + public ChangeStreamIterable watch(final ClientSession clientSession, final List pipeline, + final Class resultClass) { + notNull("clientSession", clientSession); + return createChangeStreamIterable(clientSession, pipeline, resultClass); + } + + @Override + public AggregateIterable aggregate(final List pipeline) { + return aggregate(pipeline, Document.class); + } + + @Override + public AggregateIterable aggregate(final List pipeline, final Class resultClass) { + return createAggregateIterable(null, pipeline, resultClass); + } + + @Override + public AggregateIterable aggregate(final ClientSession clientSession, final List pipeline) { + return aggregate(clientSession, pipeline, Document.class); + } + + @Override + public AggregateIterable aggregate(final ClientSession clientSession, final List pipeline, + final Class resultClass) { + notNull("clientSession", clientSession); + return createAggregateIterable(clientSession, pipeline, resultClass); + } + + private AggregateIterable createAggregateIterable(@Nullable final ClientSession clientSession, + final List pipeline, + final Class resultClass) { + return new AggregateIterableImpl<>(clientSession, name, Document.class, resultClass, codecRegistry, + readPreference, readConcern, writeConcern, executor, pipeline, AggregationLevel.DATABASE, retryReads, timeoutSettings); + } + + private ChangeStreamIterable createChangeStreamIterable(@Nullable final ClientSession clientSession, + final List pipeline, + final Class resultClass) { + return new ChangeStreamIterableImpl<>(clientSession, name, codecRegistry, readPreference, readConcern, executor, + pipeline, resultClass, ChangeStreamLevel.DATABASE, retryReads, timeoutSettings); + } + + private void executeCreateView(@Nullable final ClientSession clientSession, final String viewName, final String viewOn, + final List pipeline, final CreateViewOptions createViewOptions) { + notNull("createViewOptions", createViewOptions); + getExecutor().execute(operations.createView(viewName, viewOn, pipeline, createViewOptions), readConcern, clientSession); + } + + private OperationExecutor getExecutor() { + return executor.withTimeoutSettings(timeoutSettings); + } +} diff --git a/driver-sync/src/main/com/mongodb/client/internal/MongoIterableImpl.java b/driver-sync/src/main/com/mongodb/client/internal/MongoIterableImpl.java new file mode 100644 index 00000000000..b642f0f1189 --- /dev/null +++ b/driver-sync/src/main/com/mongodb/client/internal/MongoIterableImpl.java @@ -0,0 +1,170 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.internal; + +import com.mongodb.Function; +import com.mongodb.ReadConcern; +import com.mongodb.ReadPreference; +import com.mongodb.client.ClientSession; +import com.mongodb.client.MongoCursor; +import com.mongodb.client.MongoIterable; +import com.mongodb.client.cursor.TimeoutMode; +import com.mongodb.internal.TimeoutSettings; +import com.mongodb.internal.operation.BatchCursor; +import com.mongodb.internal.operation.ReadOperationCursor; +import com.mongodb.lang.Nullable; + +import java.util.Collection; +import java.util.concurrent.TimeUnit; +import java.util.function.Consumer; + +import static com.mongodb.assertions.Assertions.isTrueArgument; +import static com.mongodb.assertions.Assertions.notNull; + +/** + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public abstract class MongoIterableImpl implements MongoIterable { + private final ClientSession clientSession; + private final ReadConcern readConcern; + private final OperationExecutor executor; + private final ReadPreference readPreference; + private final boolean retryReads; + private final TimeoutSettings timeoutSettings; + private Integer batchSize; + private TimeoutMode timeoutMode; + + public MongoIterableImpl(@Nullable final ClientSession clientSession, final OperationExecutor executor, final ReadConcern readConcern, + final ReadPreference readPreference, final boolean retryReads, final TimeoutSettings timeoutSettings) { + this.clientSession = clientSession; + this.executor = notNull("executor", executor); + this.readConcern = notNull("readConcern", readConcern); + this.readPreference = notNull("readPreference", readPreference); + this.retryReads = retryReads; + this.timeoutSettings = timeoutSettings; + } + + public abstract ReadOperationCursor asReadOperation(); + + @Nullable + ClientSession getClientSession() { + return clientSession; + } + + protected abstract OperationExecutor getExecutor(); + + OperationExecutor getExecutor(final TimeoutSettings timeoutSettings) { + return executor.withTimeoutSettings(timeoutSettings); + } + + ReadPreference getReadPreference() { + return readPreference; + } + + protected ReadConcern getReadConcern() { + return readConcern; + } + + protected boolean getRetryReads() { + return retryReads; + } + + protected TimeoutSettings getTimeoutSettings() { + return timeoutSettings; + } + + @Nullable + public Integer getBatchSize() { + return batchSize; + } + + @Override + public MongoIterable batchSize(final int batchSize) { + this.batchSize = batchSize; + return this; + } + + @Nullable + public TimeoutMode getTimeoutMode() { + return timeoutMode; + } + + public MongoIterable timeoutMode(final TimeoutMode timeoutMode) { + if (timeoutSettings.getTimeoutMS() == null) { + throw new IllegalArgumentException("TimeoutMode requires timeoutMS to be set."); + } + this.timeoutMode = timeoutMode; + return this; + } + + @Override + public MongoCursor iterator() { + return new MongoBatchCursorAdapter<>(execute()); + } + + @Override + public MongoCursor cursor() { + return iterator(); + } + + @Nullable + @Override + public TResult first() { + try (MongoCursor cursor = iterator()) { + if (!cursor.hasNext()) { + return null; + } + return cursor.next(); + } + } + + @Override + public MongoIterable map(final Function mapper) { + return new MappingIterable<>(this, mapper); + } + + @Override + public void forEach(final Consumer action) { + try (MongoCursor cursor = iterator()) { + while (cursor.hasNext()) { + action.accept(cursor.next()); + } + } + } + + @Override + public
> A into(final A target) { + forEach(target::add); + return target; + } + + private BatchCursor execute() { + return getExecutor().execute(asReadOperation(), readPreference, readConcern, clientSession); + } + + + protected long validateMaxAwaitTime(final long maxAwaitTime, final TimeUnit timeUnit) { + notNull("timeUnit", timeUnit); + Long timeoutMS = timeoutSettings.getTimeoutMS(); + long maxAwaitTimeMS = TimeUnit.MILLISECONDS.convert(maxAwaitTime, timeUnit); + + isTrueArgument("maxAwaitTimeMS must be less than timeoutMS", timeoutMS == null || timeoutMS == 0 + || timeoutMS > maxAwaitTimeMS); + + return maxAwaitTimeMS; + } +} diff --git a/driver-sync/src/main/com/mongodb/client/internal/MongoMappingCursor.java b/driver-sync/src/main/com/mongodb/client/internal/MongoMappingCursor.java new file mode 100644 index 00000000000..5a9a011e3c2 --- /dev/null +++ b/driver-sync/src/main/com/mongodb/client/internal/MongoMappingCursor.java @@ -0,0 +1,82 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.internal; + +import com.mongodb.Function; +import com.mongodb.ServerAddress; +import com.mongodb.ServerCursor; +import com.mongodb.client.MongoCursor; +import com.mongodb.lang.Nullable; + +import static com.mongodb.assertions.Assertions.notNull; + +class MongoMappingCursor implements MongoCursor { + private final MongoCursor proxied; + private final Function mapper; + + MongoMappingCursor(final MongoCursor proxied, final Function mapper) { + this.proxied = notNull("proxied", proxied); + this.mapper = notNull("mapper", mapper); + } + + @Override + public void close() { + proxied.close(); + } + + @Override + public boolean hasNext() { + return proxied.hasNext(); + } + + @Override + public U next() { + return mapper.apply(proxied.next()); + } + + @Override + public int available() { + return proxied.available(); + } + + @Nullable + @Override + public U tryNext() { + T proxiedNext = proxied.tryNext(); + if (proxiedNext == null) { + return null; + } else { + return mapper.apply(proxiedNext); + } + } + + @Override + public void remove() { + proxied.remove(); + } + + @Nullable + @Override + public ServerCursor getServerCursor() { + return proxied.getServerCursor(); + } + + @Override + public ServerAddress getServerAddress() { + return proxied.getServerAddress(); + } +} diff --git a/driver-sync/src/main/com/mongodb/client/internal/OperationExecutor.java b/driver-sync/src/main/com/mongodb/client/internal/OperationExecutor.java new file mode 100644 index 00000000000..1ec19483afc --- /dev/null +++ b/driver-sync/src/main/com/mongodb/client/internal/OperationExecutor.java @@ -0,0 +1,94 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.internal; + +import com.mongodb.ReadConcern; +import com.mongodb.ReadPreference; +import com.mongodb.client.ClientSession; +import com.mongodb.internal.TimeoutSettings; +import com.mongodb.internal.operation.ReadOperation; +import com.mongodb.internal.operation.WriteOperation; +import com.mongodb.lang.Nullable; + +/** + * An interface describing the execution of a read or a write operation. + * + *

This class is not part of the public API and may be removed or changed at any time

+ */ +@SuppressWarnings("overloads") +public interface OperationExecutor { + /** + * Execute the read operation with the given read preference. + * + * @param the operations result type. + * @param operation the read operation. + * @param readPreference the read preference. + * @param readConcern the read concern + * @return the result of executing the operation. + */ + T execute(ReadOperation operation, ReadPreference readPreference, ReadConcern readConcern); + + /** + * Execute the write operation. + * + * @param the operations result type. + * @param operation the write operation. + * @param readConcern the read concern + * @return the result of executing the operation. + */ + T execute(WriteOperation operation, ReadConcern readConcern); + + /** + * Execute the read operation with the given read preference. + * + * @param the operations result type. + * @param operation the read operation. + * @param readPreference the read preference. + * @param readConcern the read concern + * @param session the session to associate this operation with + * @return the result of executing the operation. + */ + T execute(ReadOperation operation, ReadPreference readPreference, ReadConcern readConcern, @Nullable ClientSession session); + + /** + * Execute the write operation. + * + * @param the operations result type. + * @param operation the write operation. + * @param readConcern the read concern + * @param session the session to associate this operation with + * @return the result of executing the operation. + */ + T execute(WriteOperation operation, ReadConcern readConcern, @Nullable ClientSession session); + + /** + * Create a new OperationExecutor with a specific timeout settings + * + * @param timeoutSettings the TimeoutContext to use for the operations + * @return the new operation executor with the set timeout context + * @since 5.2 + */ + OperationExecutor withTimeoutSettings(TimeoutSettings timeoutSettings); + + /** + * Returns the current timeout settings + * + * @return the timeout settings + * @since 5.2 + */ + TimeoutSettings getTimeoutSettings(); +} diff --git a/driver-sync/src/main/com/mongodb/client/internal/TimeoutHelper.java b/driver-sync/src/main/com/mongodb/client/internal/TimeoutHelper.java new file mode 100644 index 00000000000..6a5ef68e615 --- /dev/null +++ b/driver-sync/src/main/com/mongodb/client/internal/TimeoutHelper.java @@ -0,0 +1,71 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.internal; + +import com.mongodb.client.MongoCollection; +import com.mongodb.client.MongoDatabase; +import com.mongodb.internal.TimeoutContext; +import com.mongodb.internal.time.Timeout; +import com.mongodb.lang.Nullable; + +import static java.util.concurrent.TimeUnit.MILLISECONDS; + +/** + *

This class is not part of the public API and may be removed or changed at any time

+ */ +public final class TimeoutHelper { + private static final String DEFAULT_TIMEOUT_MESSAGE = "Operation exceeded the timeout limit."; + + private TimeoutHelper() { + //NOP + } + + public static MongoCollection collectionWithTimeout(final MongoCollection collection, + final String message, + @Nullable final Timeout timeout) { + if (timeout != null) { + return timeout.call(MILLISECONDS, + () -> collection.withTimeout(0, MILLISECONDS), + ms -> collection.withTimeout(ms, MILLISECONDS), + () -> TimeoutContext.throwMongoTimeoutException(message)); + } + return collection; + } + + public static MongoCollection collectionWithTimeout(final MongoCollection collection, + @Nullable final Timeout timeout) { + return collectionWithTimeout(collection, DEFAULT_TIMEOUT_MESSAGE, timeout); + } + + public static MongoDatabase databaseWithTimeout(final MongoDatabase database, + final String message, + @Nullable final Timeout timeout) { + if (timeout != null) { + return timeout.call(MILLISECONDS, + () -> database.withTimeout(0, MILLISECONDS), + ms -> database.withTimeout(ms, MILLISECONDS), + () -> TimeoutContext.throwMongoTimeoutException(message)); + } + return database; + } + + public static MongoDatabase databaseWithTimeout(final MongoDatabase database, + @Nullable final Timeout timeout) { + return databaseWithTimeout(database, DEFAULT_TIMEOUT_MESSAGE, timeout); + } + +} diff --git a/driver-sync/src/main/com/mongodb/client/internal/package-info.java b/driver-sync/src/main/com/mongodb/client/internal/package-info.java new file mode 100644 index 00000000000..4d3affbab2e --- /dev/null +++ b/driver-sync/src/main/com/mongodb/client/internal/package-info.java @@ -0,0 +1,25 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * This package contains internal functionality that may change at any time. + */ +@Internal +@NonNullApi +package com.mongodb.client.internal; + +import com.mongodb.annotations.Internal; +import com.mongodb.lang.NonNullApi; diff --git a/driver-sync/src/main/com/mongodb/client/package-info.java b/driver-sync/src/main/com/mongodb/client/package-info.java new file mode 100644 index 00000000000..f98e983c21c --- /dev/null +++ b/driver-sync/src/main/com/mongodb/client/package-info.java @@ -0,0 +1,23 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * This package contains the synchronous CRUD API. + */ +@NonNullApi +package com.mongodb.client; + +import com.mongodb.lang.NonNullApi; diff --git a/driver-sync/src/main/com/mongodb/client/vault/ClientEncryption.java b/driver-sync/src/main/com/mongodb/client/vault/ClientEncryption.java new file mode 100644 index 00000000000..8b883273ca3 --- /dev/null +++ b/driver-sync/src/main/com/mongodb/client/vault/ClientEncryption.java @@ -0,0 +1,222 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.vault; + +import com.mongodb.AutoEncryptionSettings; +import com.mongodb.MongoUpdatedEncryptedFieldsException; +import com.mongodb.client.FindIterable; +import com.mongodb.client.MongoDatabase; +import com.mongodb.client.model.CreateCollectionOptions; +import com.mongodb.client.model.CreateEncryptedCollectionParams; +import com.mongodb.client.model.vault.DataKeyOptions; +import com.mongodb.client.model.vault.EncryptOptions; +import com.mongodb.client.model.vault.RewrapManyDataKeyOptions; +import com.mongodb.client.model.vault.RewrapManyDataKeyResult; +import com.mongodb.client.result.DeleteResult; +import com.mongodb.lang.Nullable; +import org.bson.BsonBinary; +import org.bson.BsonDocument; +import org.bson.BsonValue; +import org.bson.conversions.Bson; + +import java.io.Closeable; + +/** + * The Key vault. + *

+ * Used to create data encryption keys, and to explicitly encrypt and decrypt values when auto-encryption is not an option. + *

+ * + * @since 3.11 + */ +public interface ClientEncryption extends Closeable { + + /** + * Create a data key with the given KMS provider. + * + *

+ * Creates a new key document and inserts into the key vault collection. + *

+ * + * @param kmsProvider the KMS provider + * @return the identifier for the created data key + */ + BsonBinary createDataKey(String kmsProvider); + + /** + * Create a data key with the given KMS provider and options. + * + *

+ * Creates a new key document and inserts into the key vault collection. + *

+ * + * @param kmsProvider the KMS provider + * @param dataKeyOptions the options for data key creation + * @return the identifier for the created data key + */ + BsonBinary createDataKey(String kmsProvider, DataKeyOptions dataKeyOptions); + + /** + * Encrypt the given value with the given options. + *

+ * The driver may throw an exception for prohibited BSON value types + *

+ * + * @param value the value to encrypt + * @param options the options for data encryption + * @return the encrypted value, a BSON binary of subtype 6 + */ + BsonBinary encrypt(BsonValue value, EncryptOptions options); + + /** + * Encrypts a Match Expression or Aggregate Expression to query a range index. + *

+ * The expression is expected to be in one of the following forms: + *

    + *
  • A Match Expression of this form: + * {@code {$and: [{: {$gt: }}, {: {$lt: }}]}} + *
  • + *
  • An Aggregate Expression of this form: + * {@code {$and: [{$gt: [, ]}, {$lt: [, ]}] }} + *
  • + *
+ * {@code $gt} may also be {@code $gte}. {@code $lt} may also be {@code $lte}. + * + *

+ * This test specifically verifies the following part of the specification: + *

    + *
  • KMS providers that include a name (e.g., "aws:myname") do not support automatic credentials.
  • + *
  • Configuring a named KMS provider for automatic credentials will result in a runtime error from libmongocrypt.
  • + *
+ *

+ * Detailed specification reference: + * Client-Side Encryption Spec + */ + @Test + @DisplayName("Throw MongoCryptException when configured for automatic/on-demand credentials in ClientEncryptionSettings") + void shouldThrowMongoCryptExceptionWhenNamedKMSProviderUsesEmptyOnDemandCredentialsWithEncryptionSettings() { + assumeTrue(isClientSideEncryptionTest()); + + Map> kmsProviders = new HashMap>() {{ + put("aws:name", new HashMap<>()); + }}; + + Map>> kmsProviderPropertySuppliers = new HashMap<>(); + kmsProviderPropertySuppliers.put("aws:name", () -> Assertions.fail("Supplier should not be called")); + + ClientEncryptionSettings settings = ClientEncryptionSettings.builder() + .keyVaultNamespace("test.datakeys") + .kmsProviders(kmsProviders) + .kmsProviderPropertySuppliers(kmsProviderPropertySuppliers) + .keyVaultMongoClientSettings(Fixture.getMongoClientSettings()) + .build(); + + MongoCryptException e = assertThrows(MongoCryptException.class, () -> { + createClientEncryption(settings).close(); + }); + assertTrue(e.getMessage().contains("On-demand credentials are not supported for named KMS providers.")); + } + + /** + * This is a custom prose tests to enhance coverage. + *

+ * This test specifically verifies the following part of the specification: + *

    + *
  • KMS providers that include a name (e.g., "aws:myname") do not support automatic credentials.
  • + *
  • Configuring a named KMS provider for automatic credentials will result in a runtime error from libmongocrypt.
  • + *
+ *

+ * Detailed specification reference: + * Client-Side Encryption Spec + */ + @Test + @DisplayName("Throw MongoCryptException when configured for automatic/on-demand credentials in AutoEncryptionSettings") + public void shouldThrowMongoCryptExceptionWhenNamedKMSProviderUsesEmptyOnDemandCredentialsWithAutoEncryptionSettings() { + assumeTrue(isClientSideEncryptionTest()); + + Map> kmsProviders = new HashMap>() {{ + put("aws:name", new HashMap<>()); + }}; + + Map>> kmsProviderPropertySuppliers = new HashMap<>(); + kmsProviderPropertySuppliers.put("aws:name", () -> Assertions.fail("Supplier should not be called")); + + AutoEncryptionSettings autoEncryptionSettings = AutoEncryptionSettings.builder() + .kmsProviders(kmsProviders) + .keyVaultNamespace("test.datakeys") + .build(); + + MongoCryptException e = assertThrows(MongoCryptException.class, () -> { + createMongoClient(getMongoClientSettingsBuilder() + .autoEncryptionSettings(autoEncryptionSettings) + .build()).close(); + }); + assertTrue(e.getMessage().contains("On-demand credentials are not supported for named KMS providers.")); + } + + + + @Test + public void shouldIgnoreSupplierIfKmsProviderMapValueIsNotEmpty() { + assumeFalse(System.getenv().containsKey("AWS_ACCESS_KEY_ID")); + assumeTrue(isClientSideEncryptionTest()); + + Map> kmsProviders = new HashMap>() {{ + put("aws", new HashMap() {{ + put("accessKeyId", getEnv("AWS_ACCESS_KEY_ID")); + put("secretAccessKey", getEnv("AWS_SECRET_ACCESS_KEY")); + }}); + }}; + + Map>> kmsProviderPropertySuppliers = new HashMap>>() {{ + put("aws", () -> null); // if Supplier was actually used, an exception would be thrown because it's returning null + }}; + + try (ClientEncryption clientEncryption = createClientEncryption(ClientEncryptionSettings.builder() + .keyVaultNamespace("test.datakeys") + .kmsProviders(kmsProviders) + .kmsProviderPropertySuppliers(kmsProviderPropertySuppliers) + .keyVaultMongoClientSettings(Fixture.getMongoClientSettings()) + .build())) { + assertDoesNotThrow(() -> + clientEncryption.createDataKey("aws", new DataKeyOptions().masterKey(BsonDocument.parse(MASTER_KEY)))); + } + } + + @NonNull + private static BsonDocument getSchema(final String base64DataKeyId) { + return BsonDocument.parse("{" + + " properties: {" + + " encryptedField: {" + + " encrypt: {" + + " keyId: [{" + + " \"$binary\": {" + + " \"base64\": \"" + base64DataKeyId + "\"," + + " \"subType\": \"04\"" + + " }" + + " }]," + + " bsonType: \"string\"," + + " algorithm: \"AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic\"" + + " }" + + " }" + + " }," + + " \"bsonType\": \"object\"" + + "}"); + } +} diff --git a/driver-sync/src/test/functional/com/mongodb/client/AbstractClientSideEncryptionDeadlockTest.java b/driver-sync/src/test/functional/com/mongodb/client/AbstractClientSideEncryptionDeadlockTest.java new file mode 100644 index 00000000000..980f724bac8 --- /dev/null +++ b/driver-sync/src/test/functional/com/mongodb/client/AbstractClientSideEncryptionDeadlockTest.java @@ -0,0 +1,246 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * + */ + +package com.mongodb.client; + +import com.mongodb.AutoEncryptionSettings; +import com.mongodb.ClientEncryptionSettings; +import com.mongodb.MongoClientSettings; +import com.mongodb.ReadConcern; +import com.mongodb.WriteConcern; +import com.mongodb.client.model.CreateCollectionOptions; +import com.mongodb.client.model.Filters; +import com.mongodb.client.model.ValidationOptions; +import com.mongodb.client.model.vault.EncryptOptions; +import com.mongodb.client.vault.ClientEncryption; +import com.mongodb.client.vault.ClientEncryptions; +import com.mongodb.event.CommandEvent; +import com.mongodb.event.CommandListener; +import com.mongodb.event.CommandStartedEvent; +import com.mongodb.internal.connection.TestCommandListener; +import com.mongodb.lang.NonNull; +import org.bson.BsonBinary; +import org.bson.BsonDocument; +import org.bson.BsonInt32; +import org.bson.BsonString; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.Arguments; +import org.junit.jupiter.params.provider.MethodSource; + +import java.io.IOException; +import java.net.URISyntaxException; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.stream.Stream; + +import static com.mongodb.ClusterFixture.isClientSideEncryptionTest; +import static com.mongodb.client.Fixture.getMongoClient; +import static com.mongodb.client.Fixture.getMongoClientSettingsBuilder; +import static com.mongodb.fixture.EncryptionFixture.KmsProviderType.LOCAL; +import static com.mongodb.fixture.EncryptionFixture.getKmsProviders; +import static java.util.Arrays.asList; +import static java.util.Collections.emptyList; +import static java.util.Collections.singletonList; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assumptions.assumeTrue; +import static org.junit.jupiter.params.provider.Arguments.arguments; +import static util.JsonPoweredTestHelper.getTestDocument; + +public abstract class AbstractClientSideEncryptionDeadlockTest { + private BsonBinary cipherText; + private MongoClient encryptingClient; + private Map> kmsProviders; + + protected abstract MongoClient createMongoClient(MongoClientSettings settings); + + @BeforeEach + public void setUp() throws IOException, URISyntaxException { + assumeTrue(isClientSideEncryptionTest()); + + MongoDatabase keyVaultDatabase = getMongoClient().getDatabase("keyvault"); + MongoCollection dataKeysCollection = keyVaultDatabase.getCollection("datakeys", BsonDocument.class) + .withWriteConcern(WriteConcern.MAJORITY); + dataKeysCollection.drop(); + dataKeysCollection.insertOne(bsonDocumentFromPath("external-key.json")); + + MongoDatabase encryptedDatabase = getMongoClient().getDatabase("db"); + MongoCollection encryptedCollection = encryptedDatabase.getCollection("coll", BsonDocument.class) + .withWriteConcern(WriteConcern.MAJORITY); + encryptedCollection.drop(); + encryptedDatabase.createCollection("coll", new CreateCollectionOptions() + .validationOptions(new ValidationOptions() + .validator(new BsonDocument("$jsonSchema", bsonDocumentFromPath("external-schema.json"))))); + + kmsProviders = getKmsProviders(LOCAL); + ClientEncryption clientEncryption = ClientEncryptions.create( + ClientEncryptionSettings.builder() + .keyVaultMongoClientSettings(getKeyVaultClientSettings(new TestCommandListener())) + .keyVaultNamespace("keyvault.datakeys") + .kmsProviders(kmsProviders) + .build()); + cipherText = clientEncryption.encrypt(new BsonString("string0"), + new EncryptOptions("AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic").keyAltName("local")); + clientEncryption.close(); + } + + @AfterEach + @SuppressWarnings("try") + public void cleanUp() { + //noinspection EmptyTryBlock + try (MongoClient ignored = this.encryptingClient) { + // just using try-with-resources to ensure they all get closed, even in the case of exceptions + } + } + + private static Stream testArgumentProvider() { + return Stream.of( + // + arguments(1, 2, false, false, + asList(new ExpectedEvent("db", "listCollections"), + new ExpectedEvent("keyvault", "find"), + new ExpectedEvent("db", "insert"), + new ExpectedEvent("db", "find")), + emptyList()), + arguments(1, 2, false, true, + asList(new ExpectedEvent("db", "listCollections"), + new ExpectedEvent("db", "insert"), + new ExpectedEvent("db", "find")), + asList(new ExpectedEvent("keyvault", "find"))), + arguments(1, 2, true, false, + asList(new ExpectedEvent("db", "find"), + new ExpectedEvent("keyvault", "find")), + emptyList()), + + arguments(1, 1, true, true, + asList(new ExpectedEvent("db", "find")), + asList(new ExpectedEvent("keyvault", "find"))) + ); + } + + @ParameterizedTest + @MethodSource("testArgumentProvider") + public void shouldPassAllOutcomes(final int maxPoolSize, + final int expectedNumberOfClientsCreated, + final boolean bypassAutoEncryption, + final boolean externalKeyVaultClient, + final List expectedEncryptingClientEvents, + final List expectedExternalKeyVaultsClientEvents) { + AutoEncryptionSettings.Builder autoEncryptionSettingsBuilder = AutoEncryptionSettings.builder() + .keyVaultNamespace("keyvault.datakeys") + .kmsProviders(kmsProviders) + .bypassAutoEncryption(bypassAutoEncryption); + TestCommandListener externalKeyVaultClientCommandListener = + new TestCommandListener(singletonList("commandStartedEvent"), emptyList()); + if (externalKeyVaultClient) { + autoEncryptionSettingsBuilder.keyVaultMongoClientSettings(getKeyVaultClientSettings(externalKeyVaultClientCommandListener)); + } + + TestCommandListener encryptingClientCommandListener = new TestCommandListener(singletonList("commandStartedEvent"), emptyList()); + encryptingClient = createMongoClient(getClientSettings(maxPoolSize, encryptingClientCommandListener, + autoEncryptionSettingsBuilder.build())); + + BsonDocument unencryptedDocument = new BsonDocument("_id", new BsonInt32(0)).append("encrypted", new BsonString("string0")); + + if (bypassAutoEncryption) { + getMongoClient().getDatabase("db") + .getCollection("coll", BsonDocument.class) + .withWriteConcern(WriteConcern.MAJORITY) + .insertOne(new BsonDocument("_id", new BsonInt32(0)).append("encrypted", cipherText)); + + } else { + encryptingClient.getDatabase("db") + .getCollection("coll", BsonDocument.class) + .withWriteConcern(WriteConcern.MAJORITY) + .insertOne(unencryptedDocument); + } + + BsonDocument result = encryptingClient.getDatabase("db") + .getCollection("coll", BsonDocument.class) + .find().filter(Filters.eq("_id", 0)).first(); + + assertEquals(unencryptedDocument, result); + + assertEquals(expectedNumberOfClientsCreated, getNumUniqueClients(encryptingClientCommandListener), "Unique clients"); + + assertEventEquality(encryptingClientCommandListener, expectedEncryptingClientEvents); + assertEventEquality(externalKeyVaultClientCommandListener, expectedExternalKeyVaultsClientEvents); + } + + private void assertEventEquality(final TestCommandListener commandListener, final List expectedStartEvents) { + List actualStartedEvents = commandListener.getCommandStartedEvents(); + assertEquals(expectedStartEvents.size(), actualStartedEvents.size()); + for (int i = 0; i < expectedStartEvents.size(); i++) { + ExpectedEvent expectedEvent = expectedStartEvents.get(i); + CommandStartedEvent actualEvent = actualStartedEvents.get(i); + assertEquals(expectedEvent.getDatabase(), actualEvent.getDatabaseName(), "Database name"); + assertEquals(expectedEvent.getCommandName(), actualEvent.getCommandName(), "Command name"); + } + } + + private int getNumUniqueClients(final TestCommandListener commandListener) { + Set uniqueClients = new HashSet<>(); + for (CommandEvent event : commandListener.getEvents()) { + uniqueClients.add(event.getConnectionDescription().getConnectionId().getServerId().getClusterId().getValue()); + } + return uniqueClients.size(); + } + + @NonNull + private static MongoClientSettings getKeyVaultClientSettings(final CommandListener commandListener) { + return getClientSettings(1, commandListener, null); + } + + @NonNull + private static MongoClientSettings getClientSettings(final int maxPoolSize, + final CommandListener commandListener, + final AutoEncryptionSettings autoEncryptionSettings) { + return getMongoClientSettingsBuilder() + .autoEncryptionSettings(autoEncryptionSettings) + .readConcern(ReadConcern.MAJORITY) + .writeConcern(WriteConcern.MAJORITY) + .addCommandListener(commandListener) + .applyToConnectionPoolSettings(builder -> builder.maxSize(maxPoolSize)) + .build(); + } + + private static BsonDocument bsonDocumentFromPath(final String path) { + return getTestDocument("client-side-encryption/external/" + path); + } + + private static final class ExpectedEvent { + private final String database; + private final String commandName; + + ExpectedEvent(final String database, final String commandName) { + this.database = database; + this.commandName = commandName; + } + + String getDatabase() { + return database; + } + + String getCommandName() { + return commandName; + } + } +} diff --git a/driver-sync/src/test/functional/com/mongodb/client/AbstractClientSideEncryptionDecryptionEventsTest.java b/driver-sync/src/test/functional/com/mongodb/client/AbstractClientSideEncryptionDecryptionEventsTest.java new file mode 100644 index 00000000000..24fbf17779a --- /dev/null +++ b/driver-sync/src/test/functional/com/mongodb/client/AbstractClientSideEncryptionDecryptionEventsTest.java @@ -0,0 +1,219 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * + */ + +package com.mongodb.client; + +import com.mongodb.AutoEncryptionSettings; +import com.mongodb.ClientEncryptionSettings; +import com.mongodb.MongoClientException; +import com.mongodb.MongoClientSettings; +import com.mongodb.MongoCommandException; +import com.mongodb.MongoSocketReadException; +import com.mongodb.client.model.Aggregates; +import com.mongodb.client.model.vault.EncryptOptions; +import com.mongodb.client.vault.ClientEncryption; +import com.mongodb.event.CommandSucceededEvent; +import com.mongodb.fixture.EncryptionFixture; +import com.mongodb.internal.connection.TestCommandListener; +import org.bson.BsonBinary; +import org.bson.BsonDocument; +import org.bson.BsonString; +import org.bson.BsonType; +import org.bson.Document; +import org.bson.conversions.Bson; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; + +import java.util.List; +import java.util.Map; + +import static com.mongodb.ClusterFixture.configureFailPoint; +import static com.mongodb.ClusterFixture.isSharded; +import static com.mongodb.ClusterFixture.isStandalone; +import static com.mongodb.ClusterFixture.serverVersionAtLeast; +import static com.mongodb.client.Fixture.getDefaultDatabase; +import static com.mongodb.client.Fixture.getDefaultDatabaseName; +import static com.mongodb.client.Fixture.getMongoClientSettingsBuilder; +import static com.mongodb.fixture.EncryptionFixture.getKmsProviders; +import static java.util.Collections.singletonList; +import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assumptions.assumeFalse; +import static org.junit.jupiter.api.Assumptions.assumeTrue; + +// See: https://github.com/mongodb/specifications/tree/master/source/client-side-encryption/tests#decryption-events +public abstract class AbstractClientSideEncryptionDecryptionEventsTest { + private static final List AGGREGATION_PIPELINE = singletonList(Aggregates.match(new BsonDocument())); + private MongoClient encryptedClient; + private ClientEncryption clientEncryption; + private TestCommandListener commandListener; + private BsonBinary ciphertext; + private BsonBinary malformedCiphertext; + + + protected abstract MongoClient createMongoClient(MongoClientSettings settings); + protected abstract ClientEncryption createClientEncryption(ClientEncryptionSettings settings); + + @BeforeEach + public void setUp() { + assumeTrue(serverVersionAtLeast(6, 0)); + assumeFalse(isStandalone()); + assumeFalse(isSharded()); + + getDefaultDatabase().getCollection("decryption_events").drop(); + getDefaultDatabase().createCollection("decryption_events"); + + Map> kmsProviders = getKmsProviders(EncryptionFixture.KmsProviderType.LOCAL); + + + clientEncryption = createClientEncryption(ClientEncryptionSettings.builder() + .keyVaultMongoClientSettings(getMongoClientSettingsBuilder().build()) + .keyVaultNamespace("keyvault.datakeys") + .kmsProviders(kmsProviders) + .build()); + + BsonBinary keyId = clientEncryption.createDataKey("local"); + + ciphertext = clientEncryption.encrypt(new BsonString("hello"), new EncryptOptions("AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic").keyId(keyId)); + + // Copy ciphertext into a variable named malformedCiphertext. Change the last byte. This will produce an invalid HMAC tag. + byte[] malformedBytes = ciphertext.getData().clone(); + malformedBytes[malformedBytes.length - 1] = (byte) (malformedBytes[malformedBytes.length - 1] == 0 ? 0 : 1); + malformedCiphertext = new BsonBinary(ciphertext.getType(), malformedBytes); + + commandListener = new TestCommandListener(); + encryptedClient = createMongoClient(getMongoClientSettingsBuilder() + .autoEncryptionSettings( + AutoEncryptionSettings.builder() + .keyVaultNamespace("keyvault.datakeys") + .kmsProviders(kmsProviders) + .build()) + .retryReads(false) + .addCommandListener(commandListener) + .build()); + } + + @AfterEach + @SuppressWarnings("try") + public void cleanUp() { + //noinspection EmptyTryBlock + try (ClientEncryption ignored = this.clientEncryption; + MongoClient ignored1 = this.encryptedClient + ) { + // just using try-with-resources to ensure they all get closed, even in the case of exceptions + } + } + + @Test + public void commandError() { + configureFailPoint(BsonDocument.parse("{" + + " 'configureFailPoint': 'failCommand'," + + " 'mode': {" + + " 'times': 1" + + " }," + + " 'data': {" + + " 'errorCode': 123," + + " 'failCommands': [" + + " 'aggregate'" + + " ]" + + " }" + + "}")); + + + + assertThrows(MongoCommandException.class, () -> encryptedClient + .getDatabase(getDefaultDatabaseName()) + .getCollection("decryption_events") + .aggregate(AGGREGATION_PIPELINE) + .first()); + + assertDoesNotThrow(() -> commandListener.getCommandFailedEvent("aggregate")); + } + + @Test + public void networkError() { + configureFailPoint(BsonDocument.parse("{" + + " 'configureFailPoint': 'failCommand'," + + " 'mode': {" + + " 'times': 1" + + " }," + + " 'data': {" + + " 'errorCode': 123," + + " 'closeConnection': true," + + " 'failCommands': [" + + " 'aggregate'" + + " ]" + + " }" + + "}")); + + assertThrows(MongoSocketReadException.class, () -> encryptedClient + .getDatabase(getDefaultDatabaseName()) + .getCollection("decryption_events") + .aggregate(AGGREGATION_PIPELINE) + .first()); + + assertDoesNotThrow(() -> commandListener.getCommandFailedEvent("aggregate")); + } + + @Test + public void decryptError() { + MongoCollection decryptionEvents = encryptedClient + .getDatabase(getDefaultDatabaseName()) + .getCollection("decryption_events"); + + decryptionEvents.insertOne(new Document("encrypted", malformedCiphertext)); + assertThrows(MongoClientException.class, () -> decryptionEvents + .aggregate(AGGREGATION_PIPELINE) + .first()); + + CommandSucceededEvent succeededEvent = commandListener.getCommandSucceededEvent("aggregate"); + assertEquals(BsonType.BINARY, succeededEvent + .getResponse() + .getDocument("cursor") + .getArray("firstBatch") + .get(0) + .asDocument() + .get("encrypted") + .getBsonType()); + } + + @Test + public void decryptSuccess() { + MongoCollection decryptionEvents = encryptedClient + .getDatabase(getDefaultDatabaseName()) + .getCollection("decryption_events"); + + decryptionEvents.insertOne(new Document("encrypted", ciphertext)); + Document document = assertDoesNotThrow(() -> decryptionEvents + .aggregate(AGGREGATION_PIPELINE) + .first()); + + assertEquals("hello", document.getString("encrypted")); + CommandSucceededEvent succeededEvent = commandListener.getCommandSucceededEvent("aggregate"); + assertEquals(BsonType.BINARY, succeededEvent + .getResponse() + .getDocument("cursor") + .getArray("firstBatch") + .get(0) + .asDocument() + .get("encrypted") + .getBsonType()); + } +} diff --git a/driver-sync/src/test/functional/com/mongodb/client/AbstractClientSideEncryptionExplicitEncryptionTest.java b/driver-sync/src/test/functional/com/mongodb/client/AbstractClientSideEncryptionExplicitEncryptionTest.java new file mode 100644 index 00000000000..9f4594143f1 --- /dev/null +++ b/driver-sync/src/test/functional/com/mongodb/client/AbstractClientSideEncryptionExplicitEncryptionTest.java @@ -0,0 +1,208 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * + */ + +package com.mongodb.client; + +import com.mongodb.AutoEncryptionSettings; +import com.mongodb.ClientEncryptionSettings; +import com.mongodb.MongoClientSettings; +import com.mongodb.MongoNamespace; +import com.mongodb.WriteConcern; +import com.mongodb.client.model.CreateCollectionOptions; +import com.mongodb.client.model.DropCollectionOptions; +import com.mongodb.client.model.vault.EncryptOptions; +import com.mongodb.client.vault.ClientEncryption; +import com.mongodb.fixture.EncryptionFixture; +import org.bson.BsonBinary; +import org.bson.BsonDocument; +import org.bson.BsonInt32; +import org.bson.BsonString; +import org.bson.BsonValue; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; + +import java.util.ArrayList; +import java.util.List; +import java.util.Map; + +import static com.mongodb.ClusterFixture.isStandalone; +import static com.mongodb.ClusterFixture.serverVersionAtLeast; +import static com.mongodb.client.Fixture.getDefaultDatabase; +import static com.mongodb.client.Fixture.getDefaultDatabaseName; +import static com.mongodb.client.Fixture.getMongoClient; +import static com.mongodb.client.Fixture.getMongoClientSettings; +import static com.mongodb.client.Fixture.getMongoClientSettingsBuilder; +import static com.mongodb.fixture.EncryptionFixture.getKmsProviders; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assumptions.assumeFalse; +import static org.junit.jupiter.api.Assumptions.assumeTrue; +import static util.JsonPoweredTestHelper.getTestDocument; + +public abstract class AbstractClientSideEncryptionExplicitEncryptionTest { + private static final BsonString ENCRYPTED_INDEXED_VALUE = new BsonString("encrypted indexed value"); + private static final BsonString ENCRYPTED_UNINDEXED_VALUE = new BsonString("encrypted unindexed value"); + private MongoClient encryptedClient; + private ClientEncryption clientEncryption; + private BsonBinary key1Id; + + protected abstract MongoClient createMongoClient(MongoClientSettings settings); + protected abstract ClientEncryption createClientEncryption(ClientEncryptionSettings settings); + + @BeforeEach + public void setUp() { + assumeTrue(serverVersionAtLeast(7, 0)); + assumeFalse(isStandalone()); + + MongoNamespace dataKeysNamespace = new MongoNamespace("keyvault.datakeys"); + BsonDocument encryptedFields = bsonDocumentFromPath("encryptedFields.json"); + BsonDocument key1Document = bsonDocumentFromPath("keys/key1-document.json"); + + MongoDatabase explicitEncryptionDatabase = getDefaultDatabase(); + explicitEncryptionDatabase.getCollection("explicit_encryption") + .drop(new DropCollectionOptions().encryptedFields(encryptedFields)); + explicitEncryptionDatabase.createCollection("explicit_encryption", + new CreateCollectionOptions().encryptedFields(encryptedFields)); + + MongoCollection dataKeysCollection = getMongoClient() + .getDatabase(dataKeysNamespace.getDatabaseName()) + .getCollection(dataKeysNamespace.getCollectionName(), BsonDocument.class) + .withWriteConcern(WriteConcern.MAJORITY); + + dataKeysCollection.drop(); + dataKeysCollection.insertOne(key1Document); + key1Id = key1Document.getBinary("_id"); + + Map> kmsProviders = getKmsProviders(EncryptionFixture.KmsProviderType.LOCAL); + + clientEncryption = createClientEncryption(ClientEncryptionSettings.builder() + .keyVaultMongoClientSettings(getMongoClientSettings()) + .keyVaultNamespace(dataKeysNamespace.getFullName()) + .kmsProviders(kmsProviders) + .build()); + + encryptedClient = createMongoClient(getMongoClientSettingsBuilder() + .autoEncryptionSettings( + AutoEncryptionSettings.builder() + .keyVaultNamespace(dataKeysNamespace.getFullName()) + .kmsProviders(kmsProviders) + .bypassQueryAnalysis(true) + .build()) + .build()); + } + + @AfterEach + @SuppressWarnings("try") + public void cleanUp() { + //noinspection EmptyTryBlock + try (ClientEncryption ignored = this.clientEncryption; + MongoClient ignored1 = this.encryptedClient + ) { + // just using try-with-resources to ensure they all get closed, even in the case of exceptions + } + } + + @Test + public void canInsertEncryptedIndexedAndFind() { + EncryptOptions encryptOptions = new EncryptOptions("Indexed").keyId(key1Id).contentionFactor(0L); + BsonBinary insertPayload = clientEncryption.encrypt(ENCRYPTED_INDEXED_VALUE, encryptOptions); + + MongoCollection coll = encryptedClient.getDatabase(getDefaultDatabaseName()) + .getCollection("explicit_encryption", BsonDocument.class); + coll.insertOne(new BsonDocument("encryptedIndexed", insertPayload)); + + encryptOptions = new EncryptOptions("Indexed").keyId(key1Id).queryType("equality").contentionFactor(0L); + BsonBinary findPayload = clientEncryption.encrypt(ENCRYPTED_INDEXED_VALUE, encryptOptions); + + BsonDocument actual = coll.find(new BsonDocument("encryptedIndexed", findPayload)).first(); + assertNotNull(actual, "No value found"); + assertEquals(ENCRYPTED_INDEXED_VALUE, actual.get("encryptedIndexed")); + } + + @Test + public void canInsertEncryptedIndexedAndFindWithNonZeroContention() { + EncryptOptions encryptOptions = new EncryptOptions("Indexed").keyId(key1Id).contentionFactor(10L); + MongoCollection coll = encryptedClient.getDatabase(getDefaultDatabaseName()) + .getCollection("explicit_encryption", BsonDocument.class); + + for (int i = 0; i < 10; i++) { + BsonBinary insertPayload = clientEncryption.encrypt(ENCRYPTED_INDEXED_VALUE, encryptOptions); + coll.insertOne(new BsonDocument("encryptedIndexed", insertPayload)); + } + + encryptOptions = new EncryptOptions("Indexed").keyId(key1Id).queryType("equality").contentionFactor(0L); + BsonBinary findPayload = clientEncryption.encrypt(ENCRYPTED_INDEXED_VALUE, encryptOptions); + + List values = coll.find(new BsonDocument("encryptedIndexed", findPayload)).into(new ArrayList<>()); + assertTrue(values.size() < 10); + values.forEach(v -> + assertEquals(ENCRYPTED_INDEXED_VALUE, v.get("encryptedIndexed")) + ); + + encryptOptions = new EncryptOptions("Indexed").keyId(key1Id).contentionFactor(10L).queryType("equality"); + BsonBinary findPayload2 = clientEncryption.encrypt(ENCRYPTED_INDEXED_VALUE, encryptOptions); + + values = coll.find(new BsonDocument("encryptedIndexed", findPayload2)).into(new ArrayList<>()); + + assertEquals(10, values.size()); + values.forEach(v -> + assertEquals(ENCRYPTED_INDEXED_VALUE, v.get("encryptedIndexed")) + ); + } + + @Test + public void canInsertEncryptedUnindexed() { + EncryptOptions encryptOptions = new EncryptOptions("Unindexed").keyId(key1Id); + MongoCollection coll = encryptedClient.getDatabase(getDefaultDatabaseName()) + .getCollection("explicit_encryption", BsonDocument.class); + + BsonBinary insertPayload = clientEncryption.encrypt(ENCRYPTED_UNINDEXED_VALUE, encryptOptions); + coll.insertOne(new BsonDocument("_id", new BsonInt32(1)).append("encryptedUnindexed", insertPayload)); + + BsonDocument found = coll.find(new BsonDocument("_id", new BsonInt32(1))).first(); + assertNotNull(found); + + assertEquals(ENCRYPTED_UNINDEXED_VALUE, found.get("encryptedUnindexed")); + } + + @Test + public void canRoundtripEncryptedIndexed() { + EncryptOptions encryptOptions = new EncryptOptions("Indexed").keyId(key1Id).contentionFactor(0L); + + BsonBinary payload = clientEncryption.encrypt(ENCRYPTED_INDEXED_VALUE, encryptOptions); + BsonValue decrypted = clientEncryption.decrypt(payload); + + assertEquals(ENCRYPTED_INDEXED_VALUE, decrypted); + } + + @Test + public void canRoundtripEncryptedUnindexed() { + EncryptOptions encryptOptions = new EncryptOptions("Unindexed").keyId(key1Id); + + BsonBinary payload = clientEncryption.encrypt(ENCRYPTED_UNINDEXED_VALUE, encryptOptions); + BsonValue decrypted = clientEncryption.decrypt(payload); + + assertEquals(ENCRYPTED_UNINDEXED_VALUE, decrypted); + } + + private static BsonDocument bsonDocumentFromPath(final String path) { + return getTestDocument("client-side-encryption/etc/data/" + path); + } +} diff --git a/driver-sync/src/test/functional/com/mongodb/client/AbstractClientSideEncryptionKmsTlsTest.java b/driver-sync/src/test/functional/com/mongodb/client/AbstractClientSideEncryptionKmsTlsTest.java new file mode 100644 index 00000000000..6e0b5957dea --- /dev/null +++ b/driver-sync/src/test/functional/com/mongodb/client/AbstractClientSideEncryptionKmsTlsTest.java @@ -0,0 +1,261 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client; + +import com.mongodb.ClientEncryptionSettings; +import com.mongodb.MongoClientException; +import com.mongodb.client.model.vault.DataKeyOptions; +import com.mongodb.client.vault.ClientEncryption; +import com.mongodb.lang.NonNull; +import com.mongodb.lang.Nullable; +import org.bson.BsonDocument; +import org.junit.jupiter.api.Test; + +import javax.net.ssl.SSLContext; +import javax.net.ssl.TrustManager; +import javax.net.ssl.X509TrustManager; +import java.security.KeyManagementException; +import java.security.NoSuchAlgorithmException; +import java.security.cert.CertificateException; +import java.security.cert.CertificateExpiredException; +import java.security.cert.X509Certificate; +import java.util.HashMap; +import java.util.Map; + +import static com.mongodb.ClusterFixture.getEnv; +import static com.mongodb.ClusterFixture.hasEncryptionTestsEnabled; +import static com.mongodb.client.Fixture.getMongoClientSettings; +import static java.util.Objects.requireNonNull; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; +import static org.junit.jupiter.api.Assumptions.assumeTrue; +public abstract class AbstractClientSideEncryptionKmsTlsTest { + + private static final String SYSTEM_PROPERTY_KEY = "org.mongodb.test.kms.tls.error.type"; + + enum TlsErrorType { + EXPIRED(CertificateExpiredException.class, "NotAfter"), + INVALID_HOSTNAME(CertificateException.class, "No subject alternative names present"); + + private final Class expectedExceptionClass; + private final String expectedExceptionMessageSubstring; + + TlsErrorType(final Class expectedExceptionClass, final String expectedExceptionMessageSubstring) { + this.expectedExceptionClass = expectedExceptionClass; + this.expectedExceptionMessageSubstring = expectedExceptionMessageSubstring; + } + + @Nullable + Throwable getCauseOfExpectedClass(final MongoClientException e) { + Throwable cause = e.getCause(); + while (cause != null) { + if (cause.getClass().equals(expectedExceptionClass)) { + return cause; + } + cause = cause.getCause(); + } + return null; + } + + boolean causeContainsExpectedMessage(final MongoClientException e) { + return requireNonNull(getCauseOfExpectedClass(e)).getMessage().contains(expectedExceptionMessageSubstring); + } + + static TlsErrorType fromSystemPropertyValue(final String value) { + if (value.equals("expired")) { + return TlsErrorType.EXPIRED; + } else if (value.equals("invalidHostname")) { + return TlsErrorType.INVALID_HOSTNAME; + } else { + throw new IllegalArgumentException("Unsupported value for " + SYSTEM_PROPERTY_KEY + " system property: " + value); + } + } + } + + @NonNull + public abstract ClientEncryption getClientEncryption(ClientEncryptionSettings settings); + + /** + * See + * 10. KMS TLS Tests. + */ + @Test + public void testInvalidKmsCertificate() { + assumeTrue(System.getProperties().containsKey(SYSTEM_PROPERTY_KEY)); + TlsErrorType expectedKmsTlsError = TlsErrorType.fromSystemPropertyValue(System.getProperty(SYSTEM_PROPERTY_KEY)); + ClientEncryptionSettings clientEncryptionSettings = ClientEncryptionSettings.builder() + .keyVaultMongoClientSettings(getMongoClientSettings()) + .keyVaultNamespace("keyvault.datakeys") + .kmsProviders(new HashMap>() {{ + put("aws", new HashMap() {{ + put("accessKeyId", "fakeAccessKeyId"); + put("secretAccessKey", "fakeSecretAccessKey"); + }}); + }}) + .build(); + // See: https://github.com/mongodb-labs/drivers-evergreen-tools/blob/master/.evergreen/csfle/README.md + String endpoint = expectedKmsTlsError == TlsErrorType.EXPIRED ? "mongodb://127.0.0.1:9000" : "mongodb://127.0.0.1:9001"; + try (ClientEncryption clientEncryption = getClientEncryption(clientEncryptionSettings)) { + clientEncryption.createDataKey("aws", new DataKeyOptions().masterKey( + BsonDocument.parse("{" + + "region: \"us-east-1\", " + + "key: \"arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0\"," + + "endpoint: \"" + endpoint + "\"}"))); + fail(); + } catch (MongoClientException e) { + assertNotNull(expectedKmsTlsError.getCauseOfExpectedClass(e)); + assertTrue(expectedKmsTlsError.causeContainsExpectedMessage(e)); + } + } + + /** + * See + * 11. KMS TLS Options Tests. + */ + @Test() + public void testThatCustomSslContextIsUsed() { + assumeTrue(hasEncryptionTestsEnabled()); + + Map> kmsProviders = getKmsProviders(); + ClientEncryptionSettings clientEncryptionSettings = ClientEncryptionSettings.builder() + .keyVaultMongoClientSettings(getMongoClientSettings()) + .keyVaultNamespace("keyvault.datakeys") + .kmsProviders(kmsProviders) + .kmsProviderSslContextMap(new HashMap() {{ + put("aws", getUntrustingSslContext("aws")); + put("aws:named", getUntrustingSslContext("aws:named")); + put("azure", getUntrustingSslContext("azure")); + put("azure:named", getUntrustingSslContext("azure:named")); + put("gcp", getUntrustingSslContext("gcp")); + put("gcp:named", getUntrustingSslContext("gcp:named")); + put("kmip", getUntrustingSslContext("kmip")); + put("kmip:named", getUntrustingSslContext("kmip:named")); + }}) + .build(); + try (ClientEncryption clientEncryption = getClientEncryption(clientEncryptionSettings)) { + outer: + for (String curProvider: kmsProviders.keySet()) { + Throwable e = assertThrows(MongoClientException.class, () -> + clientEncryption.createDataKey(curProvider, new DataKeyOptions().masterKey( + BsonDocument.parse(getMasterKey(curProvider))))); + while (e != null) { + if (e.getMessage().contains("Don't trust " + curProvider)) { + break outer; + } + e = e.getCause(); + } + fail("No exception in the cause chain contains the expected string"); + } + } + } + + private HashMap> getKmsProviders() { + return new HashMap>() {{ + put("aws", new HashMap() {{ + put("accessKeyId", getEnv("AWS_ACCESS_KEY_ID")); + put("secretAccessKey", getEnv("AWS_SECRET_ACCESS_KEY")); + }}); + put("aws:named", new HashMap() {{ + put("accessKeyId", getEnv("AWS_ACCESS_KEY_ID")); + put("secretAccessKey", getEnv("AWS_SECRET_ACCESS_KEY")); + }}); + put("azure", new HashMap() {{ + put("tenantId", getEnv("AZURE_TENANT_ID")); + put("clientId", getEnv("AZURE_CLIENT_ID")); + put("clientSecret", getEnv("AZURE_CLIENT_SECRET")); + put("identityPlatformEndpoint", "login.microsoftonline.com:443"); + }}); + put("azure:named", new HashMap() {{ + put("tenantId", getEnv("AZURE_TENANT_ID")); + put("clientId", getEnv("AZURE_CLIENT_ID")); + put("clientSecret", getEnv("AZURE_CLIENT_SECRET")); + put("identityPlatformEndpoint", "login.microsoftonline.com:443"); + }}); + put("gcp", new HashMap() {{ + put("email", getEnv("GCP_EMAIL")); + put("privateKey", getEnv("GCP_PRIVATE_KEY")); + put("endpoint", "oauth2.googleapis.com:443"); + }}); + put("gcp:named", new HashMap() {{ + put("email", getEnv("GCP_EMAIL")); + put("privateKey", getEnv("GCP_PRIVATE_KEY")); + put("endpoint", "oauth2.googleapis.com:443"); + }}); + put("kmip", new HashMap() {{ + put("endpoint", "localhost:5698"); + }}); + put("kmip:named", new HashMap() {{ + put("endpoint", "localhost:5698"); + }}); + }}; + } + + String getMasterKey(final String kmsProvider) { + switch (kmsProvider) { + case "aws": + case "aws:named": + return "{" + + "region: \"us-east-1\", " + + "key: \"arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0\"}"; + case "azure": + case "azure:named": + return "{" + + " \"keyVaultEndpoint\": \"key-vault-csfle.vault.azure.net\"," + + " \"keyName\": \"key-name-csfle\"" + + "}"; + case "gcp": + case "gcp:named": + return "{" + + " \"projectId\": \"devprod-drivers\"," + + " \"location\": \"global\", " + + " \"keyRing\": \"key-ring-csfle\"," + + " \"keyName\": \"key-name-csfle\"" + + "}"; + case "kmip": + case "kmip:named": + return "{}"; + default: + throw new UnsupportedOperationException("Unsupported KMS provider: " + kmsProvider); + } + } + + private SSLContext getUntrustingSslContext(final String kmsProvider) { + try { + TrustManager untrustingTrustManager = new X509TrustManager() { + public X509Certificate[] getAcceptedIssuers() { + return new X509Certificate[0]; + } + + public void checkClientTrusted(final X509Certificate[] certs, final String authType) { + } + + public void checkServerTrusted(final X509Certificate[] certs, final String authType) throws CertificateException { + throw new CertificateException("Don't trust " + kmsProvider); + } + }; + + SSLContext sslContext = SSLContext.getInstance("SSL"); + sslContext.init(null, new TrustManager[]{untrustingTrustManager}, null); + return sslContext; + } catch (KeyManagementException | NoSuchAlgorithmException e) { + throw new RuntimeException(e); + } + } +} + diff --git a/driver-sync/src/test/functional/com/mongodb/client/AbstractClientSideEncryptionNotCreateMongocryptdClientTest.java b/driver-sync/src/test/functional/com/mongodb/client/AbstractClientSideEncryptionNotCreateMongocryptdClientTest.java new file mode 100644 index 00000000000..897c7cab503 --- /dev/null +++ b/driver-sync/src/test/functional/com/mongodb/client/AbstractClientSideEncryptionNotCreateMongocryptdClientTest.java @@ -0,0 +1,221 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client; + +import com.mongodb.AutoEncryptionSettings; +import com.mongodb.MongoClientSettings; +import com.mongodb.MongoNamespace; +import com.mongodb.internal.capi.MongoCryptHelper; +import com.mongodb.internal.thread.DaemonThreadFactory; +import com.mongodb.lang.Nullable; +import org.bson.Document; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; + +import java.io.IOException; +import java.net.InetAddress; +import java.net.InetSocketAddress; +import java.net.ServerSocket; +import java.net.Socket; +import java.time.Duration; +import java.util.AbstractMap.SimpleImmutableEntry; +import java.util.Map.Entry; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.Future; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; +import java.util.stream.Stream; + +import static com.mongodb.ClusterFixture.cryptSharedLibPathSysPropValue; +import static com.mongodb.client.Fixture.getMongoClientSettings; +import static com.mongodb.client.unified.UnifiedClientEncryptionHelper.localKmsProviderKey; +import static java.lang.Math.toIntExact; +import static java.lang.String.format; +import static java.util.Collections.singletonMap; +import static java.util.stream.Collectors.toMap; +import static org.junit.jupiter.api.Assertions.fail; +import static org.junit.jupiter.api.Assumptions.assumeTrue; + +/** + * See + * + * 20. Bypass creating mongocryptd client when shared library is loaded. + */ +public abstract class AbstractClientSideEncryptionNotCreateMongocryptdClientTest { + @Nullable + private static final String CRYPT_SHARED_LIB_PATH_SYS_PROP_VALUE = cryptSharedLibPathSysPropValue().orElse(null); + private static final int DEFAULT_MONGOCRYPTD_PORT = MongoCryptHelper.createMongocryptdClientSettings(null) + .getClusterSettings().getHosts().get(0).getPort(); + private static final String LOCAL_KMS_PROVIDER_ID = "local"; + private static final Duration TIMEOUT = Duration.ofMillis(1_000); + private static final MongoNamespace KEY_VAULT_NAMESPACE = new MongoNamespace("keyvault", "datakeys"); + + private MongoClient client; + private MongoCollection collection; + private ConnectionTracker mongocryptdConnectionTracker; + + @BeforeEach + public void setUp() throws Exception { + assumeTrue(CRYPT_SHARED_LIB_PATH_SYS_PROP_VALUE != null); + mongocryptdConnectionTracker = ConnectionTracker.start(); + client = createMongoClient(MongoClientSettings.builder(getMongoClientSettings()) + .autoEncryptionSettings(AutoEncryptionSettings.builder() + .kmsProviders(singletonMap(LOCAL_KMS_PROVIDER_ID, singletonMap("key", localKmsProviderKey()))) + .keyVaultNamespace(KEY_VAULT_NAMESPACE.getFullName()) + .extraOptions(Stream.of( + new SimpleImmutableEntry<>("cryptSharedLibPath", CRYPT_SHARED_LIB_PATH_SYS_PROP_VALUE), + new SimpleImmutableEntry<>("mongocryptdURI", format("mongodb://%s:%d/?serverSelectionTimeoutMS=%d", + mongocryptdConnectionTracker.serverSocket().getInetAddress().getHostAddress(), + mongocryptdConnectionTracker.serverSocket().getLocalPort(), + TIMEOUT.toMillis())) + ).collect(toMap(Entry::getKey, Entry::getValue))) + .build()) + .build()); + client.getDatabase(KEY_VAULT_NAMESPACE.getDatabaseName()).drop(); + MongoDatabase db = client.getDatabase("db"); + db.drop(); + collection = db.getCollection("coll"); + } + + @AfterEach + @SuppressWarnings("try") + public void cleanUp() throws Exception { + //noinspection unused + try (ConnectionTracker autoClosed = mongocryptdConnectionTracker; + MongoClient autoClosed2 = client) { + // we use the `try`-with-resources statement to release multiple resources + } + } + + protected abstract MongoClient createMongoClient(MongoClientSettings settings); + + @Test + @SuppressWarnings("try") + void whenCryptSharedLoaded() throws Exception { + //noinspection unused + try (AutoCloseable assertNoConnectionsOnAutoCloseToPreserveAssertionFailure = mongocryptdConnectionTracker) { + collection.insertOne(Document.parse("{unencrypted: 'test'}")); + // ConnectionTracker.assertNoConnections is called on auto-close + } + } + + static int findAvailableMongocryptdLoopbackPort() { + try (ServerSocket serverSocket = new ServerSocket()) { + serverSocket.bind(new InetSocketAddress(InetAddress.getLoopbackAddress(), 0)); + int foundPort = serverSocket.getLocalPort(); + if (foundPort != DEFAULT_MONGOCRYPTD_PORT) { + return foundPort; + } else { + return findAvailableMongocryptdLoopbackPort(); + } + } catch (IOException e) { + throw new RuntimeException("Failed to find an available port", e); + } + } + + @SuppressWarnings("try") + private static final class ConnectionTracker implements AutoCloseable { + private final ServerSocket serverSocket; + private final ExecutorService executor; + private final Future failOnConnect; + private boolean active; + + private ConnectionTracker( + final ServerSocket serverSocket, final ExecutorService executor, final Future failOnConnect) { + this.serverSocket = serverSocket; + this.executor = executor; + this.failOnConnect = failOnConnect; + active = true; + } + + ServerSocket serverSocket() { + return serverSocket; + } + + static ConnectionTracker start() throws Exception { + ServerSocket serverSocket = new ServerSocket(); + try { + serverSocket.bind(new InetSocketAddress(InetAddress.getLoopbackAddress(), findAvailableMongocryptdLoopbackPort())); + ExecutorService executor = Executors.newSingleThreadExecutor(new DaemonThreadFactory("NotConnectNotSpawnMongocryptd")); + try { + return start(serverSocket, executor); + } catch (Exception e) { + executor.shutdownNow(); + throw e; + } + } catch (Exception e) { + serverSocket.close(); + throw e; + } + } + + private static ConnectionTracker start(final ServerSocket serverSocket, final ExecutorService executor) throws Exception { + CompletableFuture confirmListening = new CompletableFuture<>(); + Future failOnConnect = executor.submit(() -> { + try { + //noinspection unused + try (Socket expectedIgnoredSocket = serverSocket.accept()) { + confirmListening.complete(null); + } + try (Socket unexpectedSocket = serverSocket.accept()) { + fail(format("Fake mongocryptd bound to %s received a connection from %s", + unexpectedSocket.getLocalSocketAddress(), unexpectedSocket.getRemoteSocketAddress())); + } + } catch (IOException e) { + throw new RuntimeException(e); + } + }); + try (Socket socket = new Socket()) { + socket.connect(serverSocket.getLocalSocketAddress(), toIntExact(TIMEOUT.toMillis())); + confirmListening.get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS); + } + return new ConnectionTracker(serverSocket, executor, failOnConnect); + } + + private void assertNoConnections() { + try { + failOnConnect.get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS); + } catch (TimeoutException expected) { + // expected + } catch (ExecutionException e) { + Throwable cause = e.getCause(); + fail(cause == null ? e : cause); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + fail(e); + } catch (Exception e) { + fail(e); + } + } + + @Override + public void close() throws Exception { + if (active) { + active = false; + //noinspection unused + try (AutoCloseable autoClosed = executor::shutdownNow; + ServerSocket autoClosed1 = serverSocket) { + assertNoConnections(); + } + } + } + } +} diff --git a/driver-sync/src/test/functional/com/mongodb/client/AbstractClientSideEncryptionNotSpawnMongocryptdTest.java b/driver-sync/src/test/functional/com/mongodb/client/AbstractClientSideEncryptionNotSpawnMongocryptdTest.java new file mode 100644 index 00000000000..7f0b6995982 --- /dev/null +++ b/driver-sync/src/test/functional/com/mongodb/client/AbstractClientSideEncryptionNotSpawnMongocryptdTest.java @@ -0,0 +1,225 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client; + +import com.mongodb.AutoEncryptionSettings; +import com.mongodb.MongoClientException; +import com.mongodb.MongoClientSettings; +import com.mongodb.MongoNamespace; +import com.mongodb.MongoTimeoutException; +import com.mongodb.ServerAddress; +import com.mongodb.WriteConcern; +import com.mongodb.lang.Nullable; +import org.bson.BsonDocument; +import org.bson.Document; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.Test; + +import java.net.InetAddress; +import java.net.InetSocketAddress; +import java.time.Duration; +import java.util.AbstractMap.SimpleImmutableEntry; +import java.util.HashMap; +import java.util.Map; +import java.util.Map.Entry; +import java.util.concurrent.TimeUnit; +import java.util.function.BiConsumer; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +import static com.mongodb.ClusterFixture.cryptSharedLibPathSysPropValue; +import static com.mongodb.client.AbstractClientSideEncryptionNotCreateMongocryptdClientTest.findAvailableMongocryptdLoopbackPort; +import static com.mongodb.client.Fixture.getMongoClientSettings; +import static com.mongodb.client.unified.UnifiedClientEncryptionHelper.localKmsProviderKey; +import static java.lang.String.format; +import static java.util.Arrays.asList; +import static java.util.Collections.singletonList; +import static java.util.Collections.singletonMap; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assumptions.assumeFalse; +import static org.junit.jupiter.api.Assumptions.assumeTrue; +import static util.JsonPoweredTestHelper.getTestDocument; + +/** + * See + * + * 8. Bypass Spawning mongocryptd. + */ +public abstract class AbstractClientSideEncryptionNotSpawnMongocryptdTest { + @Nullable + private static final String CRYPT_SHARED_LIB_PATH_SYS_PROP_VALUE = cryptSharedLibPathSysPropValue().orElse(null); + private static final BsonDocument EXTERNAL_SCHEMA = externalSchema(); + private static final BsonDocument EXTERNAL_KEY = externalKey(); + private static final String LOCAL_KMS_PROVIDER_ID = "local"; + private static final Duration TIMEOUT = Duration.ofMillis(1_000); + private static final MongoNamespace KEY_VAULT_NAMESPACE = new MongoNamespace("keyvault", "datakeys"); + private static final MongoNamespace NAMESPACE = new MongoNamespace("db", "coll"); + + private MongoClient client; + private InetSocketAddress mongocryptdSocketAddress; + + @AfterEach + public void cleanUp() { + mongocryptdSocketAddress = null; + if (client != null) { + client.close(); + } + } + + protected abstract MongoClient createMongoClient(MongoClientSettings settings); + + /** + * See + * + * Via loading shared library. + */ + @Test + void viaLoadingSharedLibrary() { + assumeTrue(CRYPT_SHARED_LIB_PATH_SYS_PROP_VALUE != null); + setUpCollection((mongocryptdSocketAddress, autoEncryptionSettingsBuilder) -> + autoEncryptionSettingsBuilder.extraOptions(merge(commonExtraAutoEncryptionOptions(mongocryptdSocketAddress), + new SimpleImmutableEntry<>("cryptSharedLibPath", CRYPT_SHARED_LIB_PATH_SYS_PROP_VALUE), + new SimpleImmutableEntry<>("cryptSharedLibRequired", true), + new SimpleImmutableEntry<>("mongocryptdURI", format("mongodb://%s:%d/?serverSelectionTimeoutMS=%d", + mongocryptdSocketAddress.getAddress().getHostAddress(), + mongocryptdSocketAddress.getPort(), + TIMEOUT.toMillis())) + )) + ).insertOne(Document.parse("{unencrypted: 'test'}")); + assertMongocryptdNotSpawned(); + } + + /** + * See + * + * Via mongocryptdBypassSpawn. + */ + @Test + void viaMongocryptdBypassSpawn() { + assumeFalse(CRYPT_SHARED_LIB_PATH_SYS_PROP_VALUE != null); + MongoCollection collection = setUpCollection((mongocryptdSocketAddress, autoEncryptionSettingsBuilder) -> + autoEncryptionSettingsBuilder.extraOptions(merge(commonExtraAutoEncryptionOptions(mongocryptdSocketAddress), + new SimpleImmutableEntry<>("mongocryptdBypassSpawn", true), + new SimpleImmutableEntry<>("mongocryptdURI", format("mongodb://%s:%d/?serverSelectionTimeoutMS=%d", + mongocryptdSocketAddress.getAddress().getHostAddress(), + mongocryptdSocketAddress.getPort(), + TIMEOUT.toMillis())) + )) + ); + assertTrue(assertThrows(MongoClientException.class, + () -> collection.insertOne(Document.parse("{encrypted: 'test'}"))).getMessage().contains("Timed out")); + } + + /** + * See + * + * Via bypassAutoEncryption. + */ + @Test + void viaBypassAutoEncryption() { + assumeFalse(CRYPT_SHARED_LIB_PATH_SYS_PROP_VALUE != null); + setUpCollection((mongocryptdSocketAddress, autoEncryptionSettingsBuilder) -> autoEncryptionSettingsBuilder + .extraOptions(commonExtraAutoEncryptionOptions(mongocryptdSocketAddress)) + .bypassAutoEncryption(true) + ).insertOne(Document.parse("{unencrypted: 'test'}")); + assertMongocryptdNotSpawned(); + } + + /** + * See + * + * Via bypassQueryAnalysis. + */ + @Test + void viaBypassQueryAnalysis() { + assumeFalse(CRYPT_SHARED_LIB_PATH_SYS_PROP_VALUE != null); + setUpCollection((mongocryptdSocketAddress, autoEncryptionSettingsBuilder) -> autoEncryptionSettingsBuilder + .extraOptions(commonExtraAutoEncryptionOptions(mongocryptdSocketAddress)) + .bypassQueryAnalysis(true) + ).insertOne(Document.parse("{unencrypted: 'test'}")); + assertMongocryptdNotSpawned(); + } + + private MongoCollection setUpCollection( + final BiConsumer autoEncryptionSettingsBuilderMutator) { + setUpKeyVaultNamespace(); + InetSocketAddress localMongocryptdSocketAddress = new InetSocketAddress( + InetAddress.getLoopbackAddress(), findAvailableMongocryptdLoopbackPort()); + AutoEncryptionSettings.Builder autoEncryptionSettingsBuilder = AutoEncryptionSettings.builder(); + autoEncryptionSettingsBuilderMutator.accept(localMongocryptdSocketAddress, autoEncryptionSettingsBuilder); + mongocryptdSocketAddress = localMongocryptdSocketAddress; + client = createMongoClient(MongoClientSettings.builder(getMongoClientSettings()) + .autoEncryptionSettings(autoEncryptionSettingsBuilder + .kmsProviders(singletonMap(LOCAL_KMS_PROVIDER_ID, singletonMap("key", localKmsProviderKey()))) + .keyVaultNamespace(KEY_VAULT_NAMESPACE.getFullName()) + .schemaMap(singletonMap(NAMESPACE.getFullName(), EXTERNAL_SCHEMA)) + .build()) + .build()); + MongoDatabase db = client.getDatabase(NAMESPACE.getDatabaseName()); + db.drop(); + return db.getCollection(NAMESPACE.getCollectionName()); + } + + private void setUpKeyVaultNamespace() { + try (MongoClient client = createMongoClient(MongoClientSettings.builder(getMongoClientSettings()).build())) { + MongoDatabase db = client.getDatabase(KEY_VAULT_NAMESPACE.getDatabaseName()); + db.drop(); + db.getCollection(KEY_VAULT_NAMESPACE.getCollectionName(), BsonDocument.class) + .withWriteConcern(WriteConcern.MAJORITY) + .insertOne(EXTERNAL_KEY); + } + } + + private static Map commonExtraAutoEncryptionOptions(final InetSocketAddress mongocryptdSocketAddress) { + return singletonMap("mongocryptdSpawnArgs", asList( + // We pick a random available `mongocryptd` port and also include it in the PID file path + // to reduce the chances of different test runs interfering with each other. The interference + // may come from the fact that once spawned, `mongocryptd` stays up and running for some time, + // which may cause failures in other runs if they use the same `mongocryptd` port / PID file. + format("--pidfilepath=bypass-spawning-mongocryptd-%d.pid", mongocryptdSocketAddress.getPort()), + format("--port=%d", mongocryptdSocketAddress.getPort()))); + } + + private void assertMongocryptdNotSpawned() { + try (MongoClient mongocryptdClient = createMongoClient(MongoClientSettings.builder() + .applyToClusterSettings(builder -> builder + .hosts(singletonList(new ServerAddress(mongocryptdSocketAddress))) + .serverSelectionTimeout(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS)) + .build())) { + assertThrows(MongoTimeoutException.class, () -> mongocryptdClient.getDatabase(NAMESPACE.getDatabaseName()) + .runCommand(Document.parse("{hello: 1}")), + "If nothing is thrown, then we connected to mongocryptd, i.e., it was spawned"); + } + } + + private static BsonDocument externalSchema() { + return getTestDocument("client-side-encryption/external/external-schema.json"); + } + + private static BsonDocument externalKey() { + return getTestDocument("client-side-encryption/external/external-key.json"); + } + + @SafeVarargs + @SuppressWarnings("varargs") + private static Map merge(final Map map, final Entry... entries) { + HashMap result = new HashMap<>(map); + result.putAll(Stream.of(entries).collect(Collectors.toMap(Entry::getKey, Entry::getValue))); + return result; + } +} diff --git a/driver-sync/src/test/functional/com/mongodb/client/AbstractClientSideEncryptionOnDemandCredentialsTest.java b/driver-sync/src/test/functional/com/mongodb/client/AbstractClientSideEncryptionOnDemandCredentialsTest.java new file mode 100644 index 00000000000..1eaaa3accae --- /dev/null +++ b/driver-sync/src/test/functional/com/mongodb/client/AbstractClientSideEncryptionOnDemandCredentialsTest.java @@ -0,0 +1,98 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client; + +import com.mongodb.ClientEncryptionSettings; +import com.mongodb.MongoClientException; +import com.mongodb.client.model.vault.DataKeyOptions; +import com.mongodb.client.vault.ClientEncryption; +import com.mongodb.lang.NonNull; +import org.bson.BsonDocument; +import org.bson.BsonString; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.condition.EnabledIfSystemProperty; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; + +import static com.mongodb.ClusterFixture.getEnv; +import static com.mongodb.assertions.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.assertThrows; + +public abstract class AbstractClientSideEncryptionOnDemandCredentialsTest { + + public abstract ClientEncryption getClientEncryption(ClientEncryptionSettings settings); + + @Test + @EnabledIfSystemProperty(named = "org.mongodb.test.fle.on.demand.credential.test.success.enabled", matches = "true") + public void testSuccess() { + String kmsProvider = getEnv("PROVIDER"); + try (ClientEncryption clientEncryption = initClientEncryption(kmsProvider)) { + clientEncryption.createDataKey(kmsProvider, getDataKeyOptions(kmsProvider)); + } + } + + @Test + @EnabledIfSystemProperty(named = "org.mongodb.test.fle.on.demand.credential.test.failure.enabled", matches = "true") + public void testGcpFailure() { + testFailure("gcp"); + } + + @Test + @EnabledIfSystemProperty(named = "org.mongodb.test.fle.on.demand.credential.test.failure.enabled", matches = "true") + public void testAzureFailure() { + testFailure("azure"); + } + + private void testFailure(final String kmsProvider) { + try (ClientEncryption clientEncryption = initClientEncryption(kmsProvider)) { + MongoClientException thrown = assertThrows( + MongoClientException.class, + () -> clientEncryption.createDataKey(kmsProvider, getDataKeyOptions(kmsProvider))); + assertTrue(thrown.getCause() instanceof IOException); + } + } + + @NonNull + private ClientEncryption initClientEncryption(final String kmsProvider) { + Map> kmsProviders = new HashMap<>(); + kmsProviders.put(kmsProvider, new HashMap<>()); + return getClientEncryption(ClientEncryptionSettings.builder() + .keyVaultMongoClientSettings(Fixture.getMongoClientSettings()) + .keyVaultNamespace("keyvault.datakeys") + .kmsProviders(kmsProviders) + .build()); + } + + @NonNull + private DataKeyOptions getDataKeyOptions(final String kmsProvider) { + switch (kmsProvider) { + case "gcp": + return new DataKeyOptions().masterKey(BsonDocument.parse( + "{projectId: \"devprod-drivers\", location: \"global\", keyRing: \"key-ring-csfle\", keyName: \"key-name-csfle\"}")); + case "azure": + String keyVaultEndpoint = getEnv("AZUREKMS_KEY_VAULT_ENDPOINT"); + String keyName = getEnv("AZUREKMS_KEY_NAME"); + return new DataKeyOptions().masterKey(new BsonDocument() + .append("keyVaultEndpoint", new BsonString(keyVaultEndpoint)) + .append("keyName", new BsonString(keyName))); + default: + throw new UnsupportedOperationException("Unsupported KMS provider: " + kmsProvider); + } + } +} diff --git a/driver-sync/src/test/functional/com/mongodb/client/AbstractClientSideEncryptionRangeDefaultExplicitEncryptionTest.java b/driver-sync/src/test/functional/com/mongodb/client/AbstractClientSideEncryptionRangeDefaultExplicitEncryptionTest.java new file mode 100644 index 00000000000..407782dfbb0 --- /dev/null +++ b/driver-sync/src/test/functional/com/mongodb/client/AbstractClientSideEncryptionRangeDefaultExplicitEncryptionTest.java @@ -0,0 +1,127 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * + */ + +package com.mongodb.client; + +import com.mongodb.ClientEncryptionSettings; +import com.mongodb.MongoNamespace; +import com.mongodb.client.model.vault.EncryptOptions; +import com.mongodb.client.model.vault.RangeOptions; +import com.mongodb.client.vault.ClientEncryption; +import com.mongodb.fixture.EncryptionFixture.KmsProviderType; +import org.bson.BsonBinary; +import org.bson.BsonInt32; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.DisplayName; +import org.junit.jupiter.api.Test; + +import static com.mongodb.ClusterFixture.isStandalone; +import static com.mongodb.ClusterFixture.serverVersionAtLeast; +import static com.mongodb.client.Fixture.getMongoClientSettings; +import static com.mongodb.fixture.EncryptionFixture.getKmsProviders; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assumptions.assumeFalse; +import static org.junit.jupiter.api.Assumptions.assumeTrue; + +/** + * + */ + +public abstract class AbstractClientSideEncryptionRangeDefaultExplicitEncryptionTest { + private static final BsonInt32 VALUE_TO_ENCRYPT = new BsonInt32(123); + private ClientEncryption clientEncryption; + private BsonBinary keyId; + private BsonBinary payloadDefaults; + + protected abstract ClientEncryption createClientEncryption(ClientEncryptionSettings settings); + + @BeforeEach + public void setUp() { + assumeTrue(serverVersionAtLeast(8, 0)); + assumeFalse(isStandalone()); + + MongoNamespace dataKeysNamespace = new MongoNamespace("keyvault.datakeys"); + clientEncryption = createClientEncryption(ClientEncryptionSettings.builder() + .keyVaultMongoClientSettings(getMongoClientSettings()) + .keyVaultNamespace(dataKeysNamespace.getFullName()) + .kmsProviders(getKmsProviders(KmsProviderType.LOCAL)) + .build()); + keyId = clientEncryption.createDataKey("local"); + payloadDefaults = clientEncryption.encrypt(VALUE_TO_ENCRYPT, + getEncryptionOptions() + ); + } + + private EncryptOptions getEncryptionOptions() { + return new EncryptOptions("Range") + .keyId(keyId) + .contentionFactor(0L) + .rangeOptions(new RangeOptions() + .min(new BsonInt32(0)) + .max(new BsonInt32(1000)) + ); + } + + @AfterEach + @SuppressWarnings("try") + public void cleanUp() { + try (ClientEncryption ignored = clientEncryption) { + // just using try-with-resources to ensure they all get closed, even in the case of exceptions + } + } + + /** + * Validates that the omission of options trimFactor and sparsity leads to libmongocrypt-provided defaults being used instead. + */ + @Test + @DisplayName("Case 1: Uses libmongocrypt defaults") + void shouldUseDefaultsWhenNotSpecified() { + BsonBinary encryptedValue = clientEncryption.encrypt(VALUE_TO_ENCRYPT, + new EncryptOptions("Range") + .keyId(keyId) + .contentionFactor(0L) + .rangeOptions(new RangeOptions() + .min(new BsonInt32(0)) + .max(new BsonInt32(1000)) + .sparsity(2L) + .trimFactor(6) + ) + ); + + assertEquals(payloadDefaults.getData().length, encryptedValue.getData().length); + } + + @Test + @DisplayName("Case 2: Accepts `trimFactor` 0") + void shouldAcceptTrimFactor() { + BsonBinary encryptedValue = clientEncryption.encrypt(VALUE_TO_ENCRYPT, + new EncryptOptions("Range") + .keyId(keyId) + .contentionFactor(0L) + .rangeOptions(new RangeOptions() + .min(new BsonInt32(0)) + .max(new BsonInt32(1000)) + .trimFactor(0) + ) + ); + + assertTrue(payloadDefaults.getData().length < encryptedValue.getData().length); + } +} diff --git a/driver-sync/src/test/functional/com/mongodb/client/AbstractClientSideEncryptionRangeExplicitEncryptionTest.java b/driver-sync/src/test/functional/com/mongodb/client/AbstractClientSideEncryptionRangeExplicitEncryptionTest.java new file mode 100644 index 00000000000..8537f834956 --- /dev/null +++ b/driver-sync/src/test/functional/com/mongodb/client/AbstractClientSideEncryptionRangeExplicitEncryptionTest.java @@ -0,0 +1,353 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * + */ + +package com.mongodb.client; + +import com.mongodb.AutoEncryptionSettings; +import com.mongodb.ClientEncryptionSettings; +import com.mongodb.MongoClientException; +import com.mongodb.MongoClientSettings; +import com.mongodb.MongoNamespace; +import com.mongodb.WriteConcern; +import com.mongodb.client.model.CreateCollectionOptions; +import com.mongodb.client.model.DropCollectionOptions; +import com.mongodb.client.model.Filters; +import com.mongodb.client.model.Projections; +import com.mongodb.client.model.Sorts; +import com.mongodb.client.model.vault.EncryptOptions; +import com.mongodb.client.model.vault.RangeOptions; +import com.mongodb.client.vault.ClientEncryption; +import com.mongodb.fixture.EncryptionFixture; +import com.mongodb.test.AfterBeforeParameterResolver; +import org.bson.BsonArray; +import org.bson.BsonBinary; +import org.bson.BsonDateTime; +import org.bson.BsonDecimal128; +import org.bson.BsonDocument; +import org.bson.BsonDouble; +import org.bson.BsonInt32; +import org.bson.BsonInt64; +import org.bson.BsonString; +import org.bson.BsonValue; +import org.bson.types.Decimal128; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.extension.ExtendWith; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.EnumSource; + +import java.util.ArrayList; +import java.util.List; +import java.util.Map; + +import static com.mongodb.ClusterFixture.isStandalone; +import static com.mongodb.ClusterFixture.serverVersionAtLeast; +import static com.mongodb.client.Fixture.getDefaultDatabase; +import static com.mongodb.client.Fixture.getDefaultDatabaseName; +import static com.mongodb.client.Fixture.getMongoClient; +import static com.mongodb.client.Fixture.getMongoClientSettings; +import static com.mongodb.client.Fixture.getMongoClientSettingsBuilder; +import static com.mongodb.fixture.EncryptionFixture.getKmsProviders; +import static java.util.Arrays.asList; +import static java.util.Collections.singletonList; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertIterableEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assumptions.assumeFalse; +import static org.junit.jupiter.api.Assumptions.assumeTrue; +import static util.JsonPoweredTestHelper.getTestDocument; + + +@ExtendWith(AfterBeforeParameterResolver.class) +public abstract class AbstractClientSideEncryptionRangeExplicitEncryptionTest { + private MongoClient encryptedClient; + private ClientEncryption clientEncryption; + private BsonBinary key1Id; + private EncryptOptions encryptOptions; + private EncryptOptions encryptQueryOptions; + private String encryptedField; + private MongoCollection encryptedColl; + protected abstract MongoClient createMongoClient(MongoClientSettings settings); + + protected abstract ClientEncryption createClientEncryption(ClientEncryptionSettings settings); + + @BeforeEach + public void setUp(final Type type) { + assumeTrue(serverVersionAtLeast(8, 0)); + assumeFalse(isStandalone()); + + MongoNamespace dataKeysNamespace = new MongoNamespace("keyvault.datakeys"); + BsonDocument encryptedFields = getTestDocument("client-side-encryption/etc/data/range-encryptedFields-" + type.value + ".json"); + BsonDocument key1Document = getTestDocument("client-side-encryption/etc/data/keys/key1-document.json"); + key1Id = key1Document.getBinary("_id"); + + MongoDatabase explicitEncryptionDatabase = getDefaultDatabase(); + explicitEncryptionDatabase.getCollection("explicit_encryption") + .drop(new DropCollectionOptions().encryptedFields(encryptedFields)); + explicitEncryptionDatabase.createCollection("explicit_encryption", + new CreateCollectionOptions().encryptedFields(encryptedFields)); + + MongoCollection dataKeysCollection = getMongoClient() + .getDatabase(dataKeysNamespace.getDatabaseName()) + .getCollection(dataKeysNamespace.getCollectionName(), BsonDocument.class) + .withWriteConcern(WriteConcern.MAJORITY); + + dataKeysCollection.drop(); + dataKeysCollection.insertOne(key1Document); + + Map> kmsProviders = getKmsProviders(EncryptionFixture.KmsProviderType.LOCAL); + + clientEncryption = createClientEncryption(ClientEncryptionSettings.builder() + .keyVaultMongoClientSettings(getMongoClientSettings()) + .keyVaultNamespace(dataKeysNamespace.getFullName()) + .kmsProviders(kmsProviders) + .build()); + + encryptedClient = createMongoClient(getMongoClientSettingsBuilder() + .autoEncryptionSettings( + AutoEncryptionSettings.builder() + .keyVaultNamespace(dataKeysNamespace.getFullName()) + .kmsProviders(kmsProviders) + .bypassQueryAnalysis(true) + .build()) + .build()); + + encryptOptions = new EncryptOptions("Range") + .keyId(key1Id) + .contentionFactor(0L) + .rangeOptions(type.getRangeOptions()); + + BsonBinary encryptedValue0 = clientEncryption.encrypt(type.convertNumber(0), encryptOptions); + BsonBinary encryptedValue6 = clientEncryption.encrypt(type.convertNumber(6), encryptOptions); + BsonBinary encryptedValue30 = clientEncryption.encrypt(type.convertNumber(30), encryptOptions); + BsonBinary encryptedValue200 = clientEncryption.encrypt(type.convertNumber(200), encryptOptions); + + encryptQueryOptions = new EncryptOptions("Range") + .keyId(key1Id) + .queryType("range") + .contentionFactor(0L) + .rangeOptions(type.getRangeOptions()); + + encryptedColl = encryptedClient.getDatabase(getDefaultDatabaseName()) + .getCollection("explicit_encryption", BsonDocument.class); + + encryptedField = "encrypted" + type.value; + encryptedColl.insertOne(new BsonDocument("_id", new BsonInt32(0)).append(encryptedField, encryptedValue0)); + encryptedColl.insertOne(new BsonDocument("_id", new BsonInt32(1)).append(encryptedField, encryptedValue6)); + encryptedColl.insertOne(new BsonDocument("_id", new BsonInt32(2)).append(encryptedField, encryptedValue30)); + encryptedColl.insertOne(new BsonDocument("_id", new BsonInt32(3)).append(encryptedField, encryptedValue200)); + } + + + @AfterEach + @SuppressWarnings("try") + public void cleanUp() { + //noinspection EmptyTryBlock + try (ClientEncryption ignored = clientEncryption; + MongoClient ignored1 = encryptedClient + ) { + // just using try-with-resources to ensure they all get closed, even in the case of exceptions + } + } + + @ParameterizedTest(name = "[{0}] Case 1: can decrypt a payload") + @EnumSource(Type.class) + void testCanDecryptAPayload(final Type type) { + BsonValue originalValue = type.convertNumber(6); + BsonBinary insertPayload = clientEncryption.encrypt(originalValue, encryptOptions); + + BsonValue decryptedValue = clientEncryption.decrypt(insertPayload); + + assertEquals(originalValue, decryptedValue); + } + + @ParameterizedTest(name = "[{0}] Case 2: can find encrypted range and return the maximum") + @EnumSource(Type.class) + void testCanFindEncryptedRangeAndReturnTheMaximum(final Type type) { + BsonDocument expression = Filters.and( + Filters.gte(encryptedField, type.convertNumber(6)), + Filters.lte(encryptedField, type.convertNumber(200))).toBsonDocument(); + + BsonDocument findPayload = clientEncryption.encryptExpression(expression, encryptQueryOptions); + + List expected = asList( + new BsonDocument(encryptedField, type.convertNumber(6)), + new BsonDocument(encryptedField, type.convertNumber(30)), + new BsonDocument(encryptedField, type.convertNumber(200))); + + List actual = encryptedColl.find(findPayload) + .projection(Projections.fields(Projections.excludeId(), Projections.include(encryptedField))) + .sort(Sorts.ascending("_id")) + .into(new ArrayList<>()); + + assertIterableEquals(expected, actual); + } + + @ParameterizedTest(name = "[{0}] Case 3: can find encrypted range and return the minimum") + @EnumSource(Type.class) + void testCanFindEncryptedRangeAndReturnTheMinimum(final Type type) { + BsonDocument expression = Filters.and( + Filters.gte(encryptedField, type.convertNumber(0)), + Filters.lte(encryptedField, type.convertNumber(6))).toBsonDocument(); + + BsonDocument findPayload = clientEncryption.encryptExpression(expression, encryptQueryOptions); + + List expected = asList( + new BsonDocument(encryptedField, type.convertNumber(0)), + new BsonDocument(encryptedField, type.convertNumber(6))); + + List actual = encryptedColl.find(findPayload) + .projection(Projections.fields(Projections.excludeId(), Projections.include(encryptedField))) + .sort(Sorts.ascending("_id")) + .into(new ArrayList<>()); + + assertIterableEquals(expected, actual); + } + + @ParameterizedTest(name = "[{0}] Case 4: can find encrypted range with an open range query") + @EnumSource(Type.class) + void testCanFindEncryptedRangeWithAnOpenRangeQuery(final Type type) { + BsonDocument expression = Filters.and( + Filters.gt(encryptedField, type.convertNumber(30))).toBsonDocument(); + + BsonDocument findPayload = clientEncryption.encryptExpression(expression, encryptQueryOptions); + + List expected = singletonList(new BsonDocument(encryptedField, type.convertNumber(200))); + + List actual = encryptedColl.find(findPayload) + .projection(Projections.fields(Projections.excludeId(), Projections.include(encryptedField))) + .sort(Sorts.ascending("_id")) + .into(new ArrayList<>()); + + assertIterableEquals(expected, actual); + } + + @ParameterizedTest(name = "[{0}] Case 5: can run an aggregation expression inside $expr") + @EnumSource(Type.class) + void testCanRunAnAggregationExpressionInsideExpr(final Type type) { + BsonDocument expression = new BsonDocument("$and", + new BsonArray(singletonList(new BsonDocument("$lt", + new BsonArray(asList(new BsonString("$" + encryptedField), type.convertNumber(30))))))); + + BsonDocument findPayload = clientEncryption.encryptExpression(expression, encryptQueryOptions); + + List expected = asList( + new BsonDocument(encryptedField, type.convertNumber(0)), + new BsonDocument(encryptedField, type.convertNumber(6))); + + List actual = encryptedColl.find(new BsonDocument("$expr", findPayload)) + .projection(Projections.fields(Projections.excludeId(), Projections.include(encryptedField))) + .sort(Sorts.ascending("_id")) + .into(new ArrayList<>()); + + assertIterableEquals(expected, actual); + } + + @ParameterizedTest(name = "[{0}] Case 6: encrypting a document greater than the maximum errors") + @EnumSource(value = Type.class, mode = EnumSource.Mode.EXCLUDE, names = { "DECIMAL_NO_PRECISION", "DOUBLE_NO_PRECISION" }) + void testEncryptingADocumentGreaterThanTheMaximumErrors(final Type type) { + BsonValue originalValue = type.convertNumber(201); + + assertThrows(MongoClientException.class, () -> clientEncryption.encrypt(originalValue, encryptOptions)); + } + + @ParameterizedTest(name = "[{0}] Case 7: encrypting a document of a different type errors") + @EnumSource(value = Type.class, mode = EnumSource.Mode.EXCLUDE, names = { "DECIMAL_NO_PRECISION", "DOUBLE_NO_PRECISION" }) + void testEncryptingADocumentOfADifferentTypeErrors(final Type type) { + BsonValue originalValue = type == Type.INT ? new BsonDouble(6) : new BsonInt32(6); + + assertThrows(MongoClientException.class, () -> clientEncryption.encrypt(originalValue, encryptOptions)); + } + + @ParameterizedTest(name = "[{0}] Case 8: setting precision errors if the type is not a double") + @EnumSource(value = Type.class, mode = EnumSource.Mode.EXCLUDE, + names = {"DECIMAL_PRECISION", "DECIMAL_NO_PRECISION", "DOUBLE_PRECISION", "DOUBLE_NO_PRECISION" }) + void testSettingPrecisionErrorsIfTheTypeIsNotADouble(final Type type) { + BsonValue originalValue = type == Type.INT ? new BsonDouble(6) : new BsonInt32(6); + + EncryptOptions precisionEncryptOptions = new EncryptOptions("Range") + .keyId(key1Id) + .contentionFactor(0L) + .rangeOptions(type.getRangeOptions().precision(2)); + + assertThrows(MongoClientException.class, () -> clientEncryption.encrypt(originalValue, precisionEncryptOptions)); + } + + enum Type { + DECIMAL_PRECISION("DecimalPrecision"), + DECIMAL_NO_PRECISION("DecimalNoPrecision"), + DOUBLE_PRECISION("DoublePrecision"), + DOUBLE_NO_PRECISION("DoubleNoPrecision"), + DATE("Date"), + INT("Int"), + LONG("Long"); + private final String value; + Type(final String value) { + this.value = value; + } + + @Override + public String toString() { + return value; + } + + RangeOptions getRangeOptions() { + RangeOptions rangeOptions = new RangeOptions() + .trimFactor(1) + .sparsity(1L); + switch (this) { + case DECIMAL_NO_PRECISION: + case DOUBLE_NO_PRECISION: + return rangeOptions; + case DECIMAL_PRECISION: + return rangeOptions.precision(2) + .min(new BsonDecimal128(Decimal128.parse("0"))) + .max(new BsonDecimal128(Decimal128.parse("200"))); + case DOUBLE_PRECISION: + return rangeOptions.precision(2).min(new BsonDouble(0)).max(new BsonDouble(200)); + case DATE: + return rangeOptions.min(new BsonDateTime(0)).max(new BsonDateTime(200)); + case INT: + return rangeOptions.min(new BsonInt32(0)).max(new BsonInt32(200)); + case LONG: + return rangeOptions.min(new BsonInt64(0)).max(new BsonInt64(200)); + default: + throw new UnsupportedOperationException("Unsupported Type " + this); + } + } + + BsonValue convertNumber(final int number) { + switch (this) { + case DECIMAL_PRECISION: + case DECIMAL_NO_PRECISION: + return new BsonDecimal128(new Decimal128(number)); + case DOUBLE_PRECISION: + case DOUBLE_NO_PRECISION: + return new BsonDouble(number); + case DATE: + return new BsonDateTime(number); + case INT: + return new BsonInt32(number); + case LONG: + return new BsonInt64(number); + default: + throw new UnsupportedOperationException("Unsupported Type " + this); + } + } + } +} diff --git a/driver-sync/src/test/functional/com/mongodb/client/AbstractClientSideEncryptionUniqueIndexKeyAltNamesTest.java b/driver-sync/src/test/functional/com/mongodb/client/AbstractClientSideEncryptionUniqueIndexKeyAltNamesTest.java new file mode 100644 index 00000000000..aae95054482 --- /dev/null +++ b/driver-sync/src/test/functional/com/mongodb/client/AbstractClientSideEncryptionUniqueIndexKeyAltNamesTest.java @@ -0,0 +1,135 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * + */ + +package com.mongodb.client; + +import com.mongodb.ClientEncryptionSettings; +import com.mongodb.MongoClientSettings; +import com.mongodb.MongoCommandException; +import com.mongodb.MongoWriteException; +import com.mongodb.WriteConcern; +import com.mongodb.client.model.vault.DataKeyOptions; +import com.mongodb.client.vault.ClientEncryption; +import com.mongodb.fixture.EncryptionFixture; +import org.bson.BsonBinary; +import org.bson.BsonDocument; +import org.bson.BsonString; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; + +import java.util.Map; + +import static com.mongodb.ClusterFixture.isStandalone; +import static com.mongodb.ClusterFixture.serverVersionAtLeast; +import static com.mongodb.client.Fixture.getMongoClientSettings; +import static com.mongodb.client.Fixture.getMongoClientSettingsBuilder; +import static com.mongodb.fixture.EncryptionFixture.getKmsProviders; +import static java.util.Collections.singletonList; +import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assumptions.assumeFalse; +import static org.junit.jupiter.api.Assumptions.assumeTrue; + +// See: https://github.com/mongodb/specifications/tree/master/source/client-side-encryption/tests#unique-index-on-keyaltnames +public abstract class AbstractClientSideEncryptionUniqueIndexKeyAltNamesTest { + private MongoClient encryptedClient; + private ClientEncryption clientEncryption; + private BsonBinary existingKeyId; + + + protected abstract MongoClient createMongoClient(MongoClientSettings settings); + protected abstract ClientEncryption createClientEncryption(ClientEncryptionSettings settings); + + @BeforeEach + public void setUp() { + assumeTrue(serverVersionAtLeast(6, 0)); + assumeFalse(isStandalone()); + + encryptedClient = createMongoClient(getMongoClientSettingsBuilder().build()); + + encryptedClient.getDatabase("keyvault").getCollection("datakeys").drop(); + encryptedClient.getDatabase("keyvault").createCollection("datakeys"); + encryptedClient.getDatabase("keyvault") + .withWriteConcern(WriteConcern.MAJORITY) + .runCommand( + BsonDocument.parse("{" + + " 'createIndexes': 'datakeys'," + + " 'indexes': [{" + + " 'name': 'keyAltNames_1'," + + " 'key': { 'keyAltNames': 1 }," + + " 'unique': true," + + " 'partialFilterExpression': { 'keyAltNames': { '$exists': true } }" + + " }]" + + "}") + ); + + Map> kmsProviders = getKmsProviders(EncryptionFixture.KmsProviderType.LOCAL); + + clientEncryption = createClientEncryption(ClientEncryptionSettings.builder() + .keyVaultMongoClientSettings(getMongoClientSettings()) + .keyVaultNamespace("keyvault.datakeys") + .kmsProviders(kmsProviders) + .build()); + + existingKeyId = clientEncryption.createDataKey("local", + new DataKeyOptions().keyAltNames(singletonList("def"))); + + } + + @AfterEach + @SuppressWarnings("try") + public void cleanUp() { + //noinspection EmptyTryBlock + try (ClientEncryption ignored = this.clientEncryption; + MongoClient ignored1 = this.encryptedClient + ) { + // just using try-with-resources to ensure they all get closed, even in the case of exceptions + } + } + + @Test + public void createKey() { + assertDoesNotThrow(() -> clientEncryption.createDataKey("local", new DataKeyOptions().keyAltNames(singletonList("abc")))); + MongoWriteException exception = assertThrows(MongoWriteException.class, () -> clientEncryption.createDataKey("local", + new DataKeyOptions().keyAltNames(singletonList("abc")))); + assertEquals(11000, exception.getCode()); + + exception = assertThrows(MongoWriteException.class, () -> clientEncryption.createDataKey("local", + new DataKeyOptions().keyAltNames(singletonList("def")))); + assertEquals(11000, exception.getCode()); + } + + @Test + public void addKeyAltName() { + BsonBinary newKey = assertDoesNotThrow(() -> clientEncryption.createDataKey("local")); + + assertDoesNotThrow(() -> clientEncryption.addKeyAltName(newKey, "abc")); + + BsonDocument results = assertDoesNotThrow(() -> clientEncryption.addKeyAltName(newKey, "abc")); + assertTrue(results.getArray("keyAltNames").contains(new BsonString("abc"))); + + MongoCommandException exception = assertThrows(MongoCommandException.class, () -> clientEncryption.addKeyAltName(newKey, "def")); + assertEquals(11000, exception.getCode()); + + results = assertDoesNotThrow(() -> clientEncryption.addKeyAltName(existingKeyId, "def")); + assertTrue(results.getArray("keyAltNames").contains(new BsonString("def"))); + } +} diff --git a/driver-sync/src/test/functional/com/mongodb/client/AbstractClientSideOperationsTimeoutProseTest.java b/driver-sync/src/test/functional/com/mongodb/client/AbstractClientSideOperationsTimeoutProseTest.java new file mode 100644 index 00000000000..3ef1cefa105 --- /dev/null +++ b/driver-sync/src/test/functional/com/mongodb/client/AbstractClientSideOperationsTimeoutProseTest.java @@ -0,0 +1,1142 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client; + +import com.mongodb.ClientBulkWriteException; +import com.mongodb.ClientSessionOptions; +import com.mongodb.ClusterFixture; +import com.mongodb.ConnectionString; +import com.mongodb.CursorType; +import com.mongodb.MongoClientSettings; +import com.mongodb.MongoCredential; +import com.mongodb.MongoException; +import com.mongodb.MongoNamespace; +import com.mongodb.MongoOperationTimeoutException; +import com.mongodb.MongoSocketReadTimeoutException; +import com.mongodb.MongoTimeoutException; +import com.mongodb.ReadConcern; +import com.mongodb.ReadPreference; +import com.mongodb.TransactionOptions; +import com.mongodb.WriteConcern; +import com.mongodb.client.gridfs.GridFSBucket; +import com.mongodb.client.gridfs.GridFSDownloadStream; +import com.mongodb.client.gridfs.GridFSUploadStream; +import com.mongodb.client.model.CreateCollectionOptions; +import com.mongodb.client.model.bulk.ClientNamespacedWriteModel; +import com.mongodb.client.model.changestream.ChangeStreamDocument; +import com.mongodb.client.model.changestream.FullDocument; +import com.mongodb.client.test.CollectionHelper; +import com.mongodb.event.CommandEvent; +import com.mongodb.event.CommandFailedEvent; +import com.mongodb.event.CommandStartedEvent; +import com.mongodb.event.CommandSucceededEvent; +import com.mongodb.event.ConnectionClosedEvent; +import com.mongodb.event.ConnectionCreatedEvent; +import com.mongodb.event.ConnectionReadyEvent; +import com.mongodb.internal.connection.InternalStreamConnection; +import com.mongodb.internal.connection.ServerHelper; +import com.mongodb.internal.connection.TestCommandListener; +import com.mongodb.internal.connection.TestConnectionPoolListener; +import com.mongodb.test.FlakyTest; +import org.bson.BsonArray; +import org.bson.BsonBoolean; +import org.bson.BsonDocument; +import org.bson.BsonInt32; +import org.bson.BsonString; +import org.bson.BsonTimestamp; +import org.bson.Document; +import org.bson.codecs.BsonDocumentCodec; +import org.bson.types.ObjectId; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.DisplayName; +import org.junit.jupiter.api.Named; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.Arguments; +import org.junit.jupiter.params.provider.MethodSource; + +import java.time.Instant; +import java.util.List; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +import static com.mongodb.ClusterFixture.getConnectionString; +import static com.mongodb.ClusterFixture.isAuthenticated; +import static com.mongodb.ClusterFixture.isDiscoverableReplicaSet; +import static com.mongodb.ClusterFixture.isLoadBalanced; +import static com.mongodb.ClusterFixture.isStandalone; +import static com.mongodb.ClusterFixture.serverVersionAtLeast; +import static com.mongodb.ClusterFixture.sleep; +import static com.mongodb.client.Fixture.getDefaultDatabaseName; +import static com.mongodb.client.Fixture.getPrimary; +import static java.lang.Long.MAX_VALUE; +import static java.lang.String.join; +import static java.util.Arrays.asList; +import static java.util.Collections.nCopies; +import static java.util.Collections.singletonList; +import static java.util.concurrent.TimeUnit.NANOSECONDS; +import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertInstanceOf; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assumptions.assumeFalse; +import static org.junit.jupiter.api.Assumptions.assumeTrue; + +/** + * See + * Prose Tests. + */ +@SuppressWarnings("checkstyle:VisibilityModifier") +public abstract class AbstractClientSideOperationsTimeoutProseTest { + + protected static final String FAIL_COMMAND_NAME = "failCommand"; + protected static final String GRID_FS_BUCKET_NAME = "db.fs"; + private static final AtomicInteger COUNTER = new AtomicInteger(); + private ExecutorService executor; + + protected MongoNamespace namespace; + protected MongoNamespace gridFsFileNamespace; + protected MongoNamespace gridFsChunksNamespace; + + protected CollectionHelper collectionHelper; + private CollectionHelper filesCollectionHelper; + private CollectionHelper chunksCollectionHelper; + + protected TestCommandListener commandListener; + + protected abstract MongoClient createMongoClient(MongoClientSettings mongoClientSettings); + + protected abstract GridFSBucket createGridFsBucket(MongoDatabase mongoDatabase, String bucketName); + + protected abstract boolean isAsync(); + + protected int postSessionCloseSleep() { + return 0; + } + + @SuppressWarnings("try") + @FlakyTest(maxAttempts = 3) + @DisplayName("4. Background Connection Pooling - timeoutMS used for handshake commands") + public void testBackgroundConnectionPoolingTimeoutMSUsedForHandshakeCommands() { + assumeTrue(serverVersionAtLeast(4, 4)); + assumeTrue(isAuthenticated()); + + collectionHelper.runAdminCommand("{" + + " configureFailPoint: \"" + FAIL_COMMAND_NAME + "\"," + + " mode: {" + + " times: 1" + + " }," + + " data: {" + + " failCommands: [\"saslContinue\"]," + + " blockConnection: true," + + " blockTimeMS: 150," + + " appName: \"timeoutBackgroundPoolTest\"" + + " }" + + "}"); + + TestConnectionPoolListener connectionPoolListener = new TestConnectionPoolListener(); + + try (MongoClient ignoredClient = createMongoClient(getMongoClientSettingsBuilder() + .applicationName("timeoutBackgroundPoolTest") + .applyToConnectionPoolSettings(builder -> { + builder.minSize(1); + builder.addConnectionPoolListener(connectionPoolListener); + }) + .timeout(100, TimeUnit.MILLISECONDS))) { + + assertDoesNotThrow(() -> + connectionPoolListener.waitForEvents(asList(ConnectionCreatedEvent.class, ConnectionClosedEvent.class), + 10, TimeUnit.SECONDS)); + } + } + + @SuppressWarnings("try") + @FlakyTest(maxAttempts = 3) + @DisplayName("4. Background Connection Pooling - timeoutMS is refreshed for each handshake command") + public void testBackgroundConnectionPoolingTimeoutMSIsRefreshedForEachHandshakeCommand() { + assumeTrue(serverVersionAtLeast(4, 4)); + assumeTrue(isAuthenticated()); + + collectionHelper.runAdminCommand("{" + + " configureFailPoint: \"" + FAIL_COMMAND_NAME + "\"," + + " mode: \"alwaysOn\"," + + " data: {" + + " failCommands: [\"hello\", \"isMaster\", \"saslContinue\"]," + + " blockConnection: true," + + " blockTimeMS: 150," + + " appName: \"refreshTimeoutBackgroundPoolTest\"" + + " }" + + "}"); + + TestConnectionPoolListener connectionPoolListener = new TestConnectionPoolListener(); + + try (MongoClient ignoredClient = createMongoClient(getMongoClientSettingsBuilder() + .applicationName("refreshTimeoutBackgroundPoolTest") + .applyToConnectionPoolSettings(builder -> { + builder.minSize(1); + builder.addConnectionPoolListener(connectionPoolListener); + }) + .timeout(250, TimeUnit.MILLISECONDS))) { + + assertDoesNotThrow(() -> + connectionPoolListener.waitForEvents(asList(ConnectionCreatedEvent.class, ConnectionReadyEvent.class), + 10, TimeUnit.SECONDS)); + } + } + + @FlakyTest(maxAttempts = 3) + @DisplayName("5. Blocking Iteration Methods - Tailable cursors") + public void testBlockingIterationMethodsTailableCursor() { + assumeTrue(serverVersionAtLeast(4, 4)); + + collectionHelper.create(namespace.getCollectionName(), + new CreateCollectionOptions().capped(true).sizeInBytes(10 * 1024 * 1024)); + collectionHelper.insertDocuments(singletonList(BsonDocument.parse("{x: 1}")), WriteConcern.MAJORITY); + collectionHelper.runAdminCommand("{" + + " configureFailPoint: \"failCommand\"," + + " mode: \"alwaysOn\"," + + " data: {" + + " failCommands: [\"getMore\"]," + + " blockConnection: true," + + " blockTimeMS: " + 150 + + " }" + + "}"); + + try (MongoClient client = createMongoClient(getMongoClientSettingsBuilder() + .timeout(250, TimeUnit.MILLISECONDS))) { + MongoCollection collection = client.getDatabase(namespace.getDatabaseName()) + .getCollection(namespace.getCollectionName()); + + try (MongoCursor cursor = collection.find().cursorType(CursorType.Tailable).cursor()) { + Document document = assertDoesNotThrow(cursor::next); + assertEquals(1, document.get("x")); + assertThrows(MongoOperationTimeoutException.class, cursor::next); + } + + List events = commandListener.getCommandSucceededEvents(); + assertEquals(1, events.stream().filter(e -> e.getCommandName().equals("find")).count()); + long getMoreCount = events.stream().filter(e -> e.getCommandName().equals("getMore")).count(); + assertTrue(getMoreCount <= 2, "getMoreCount expected to less than or equal to two but was: " + getMoreCount); + } + } + + @FlakyTest(maxAttempts = 3) + @DisplayName("5. Blocking Iteration Methods - Change Streams") + public void testBlockingIterationMethodsChangeStream() { + assumeTrue(serverVersionAtLeast(4, 4)); + assumeTrue(isDiscoverableReplicaSet()); + assumeFalse(isAsync()); // Async change stream cursor is non-deterministic for cursor::next + + BsonTimestamp startTime = new BsonTimestamp((int) Instant.now().getEpochSecond(), 0); + collectionHelper.create(namespace.getCollectionName(), new CreateCollectionOptions()); + sleep(2000); + collectionHelper.insertDocuments(singletonList(BsonDocument.parse("{x: 1}")), WriteConcern.MAJORITY); + + collectionHelper.runAdminCommand("{" + + " configureFailPoint: \"failCommand\"," + + " mode: \"alwaysOn\"," + + " data: {" + + " failCommands: [\"getMore\"]," + + " blockConnection: true," + + " blockTimeMS: " + 150 + + " }" + + "}"); + + try (MongoClient mongoClient = createMongoClient(getMongoClientSettingsBuilder() + .timeout(250, TimeUnit.MILLISECONDS))) { + + MongoCollection collection = mongoClient.getDatabase(namespace.getDatabaseName()) + .getCollection(namespace.getCollectionName()).withReadPreference(ReadPreference.primary()); + try (MongoChangeStreamCursor> cursor = collection.watch( + singletonList(Document.parse("{ '$match': {'operationType': 'insert'}}"))) + .startAtOperationTime(startTime) + .fullDocument(FullDocument.UPDATE_LOOKUP) + .cursor()) { + ChangeStreamDocument document = assertDoesNotThrow(cursor::next); + + Document fullDocument = document.getFullDocument(); + assertNotNull(fullDocument); + assertEquals(1, fullDocument.get("x")); + assertThrows(MongoOperationTimeoutException.class, cursor::next); + } + List events = commandListener.getCommandSucceededEvents(); + assertEquals(1, events.stream().filter(e -> e.getCommandName().equals("aggregate")).count()); + long getMoreCount = events.stream().filter(e -> e.getCommandName().equals("getMore")).count(); + assertTrue(getMoreCount <= 2, "getMoreCount expected to less than or equal to two but was: " + getMoreCount); + } + } + + @DisplayName("6. GridFS Upload - uploads via openUploadStream can be timed out") + @FlakyTest(maxAttempts = 3) + public void testGridFSUploadViaOpenUploadStreamTimeout() { + assumeTrue(serverVersionAtLeast(4, 4)); + long rtt = ClusterFixture.getPrimaryRTT(); + + collectionHelper.runAdminCommand("{" + + " configureFailPoint: \"failCommand\"," + + " mode: { times: 1 }," + + " data: {" + + " failCommands: [\"insert\"]," + + " blockConnection: true," + + " blockTimeMS: " + (rtt + 205) + + " }" + + "}"); + + chunksCollectionHelper.create(); + filesCollectionHelper.create(); + + try (MongoClient client = createMongoClient(getMongoClientSettingsBuilder() + .timeout(rtt + 200, TimeUnit.MILLISECONDS))) { + MongoDatabase database = client.getDatabase(namespace.getDatabaseName()); + GridFSBucket gridFsBucket = createGridFsBucket(database, GRID_FS_BUCKET_NAME); + + try (GridFSUploadStream uploadStream = gridFsBucket.openUploadStream("filename")){ + uploadStream.write(0x12); + assertThrows(MongoOperationTimeoutException.class, uploadStream::close); + } + } + } + + @DisplayName("6. GridFS Upload - Aborting an upload stream can be timed out") + @Test + public void testAbortingGridFsUploadStreamTimeout() throws Throwable { + assumeTrue(serverVersionAtLeast(4, 4)); + long rtt = ClusterFixture.getPrimaryRTT(); + + collectionHelper.runAdminCommand("{" + + " configureFailPoint: \"failCommand\"," + + " mode: { times: 1 }," + + " data: {" + + " failCommands: [\"delete\"]," + + " blockConnection: true," + + " blockTimeMS: " + (rtt + 305) + + " }" + + "}"); + + chunksCollectionHelper.create(); + filesCollectionHelper.create(); + + try (MongoClient client = createMongoClient(getMongoClientSettingsBuilder() + .timeout(rtt + 300, TimeUnit.MILLISECONDS))) { + MongoDatabase database = client.getDatabase(namespace.getDatabaseName()); + GridFSBucket gridFsBucket = createGridFsBucket(database, GRID_FS_BUCKET_NAME).withChunkSizeBytes(2); + + try (GridFSUploadStream uploadStream = gridFsBucket.openUploadStream("filename")){ + uploadStream.write(new byte[]{0x01, 0x02, 0x03, 0x04}); + assertThrows(MongoOperationTimeoutException.class, uploadStream::abort); + } + } + } + + @DisplayName("6. GridFS Download") + @Test + public void testGridFsDownloadStreamTimeout() { + assumeTrue(serverVersionAtLeast(4, 4)); + long rtt = ClusterFixture.getPrimaryRTT(); + + chunksCollectionHelper.create(); + filesCollectionHelper.create(); + + filesCollectionHelper.insertDocuments(singletonList(BsonDocument.parse( + "{" + + " _id: {" + + " $oid: \"000000000000000000000005\"" + + " }," + + " length: 10," + + " chunkSize: 4," + + " uploadDate: {" + + " $date: \"1970-01-01T00:00:00.000Z\"" + + " }," + + " md5: \"57d83cd477bfb1ccd975ab33d827a92b\"," + + " filename: \"length-10\"," + + " contentType: \"application/octet-stream\"," + + " aliases: []," + + " metadata: {}" + + "}" + )), WriteConcern.MAJORITY); + collectionHelper.runAdminCommand("{" + + " configureFailPoint: \"failCommand\"," + + " mode: { skip: 1 }," + + " data: {" + + " failCommands: [\"find\"]," + + " blockConnection: true," + + " blockTimeMS: " + (rtt + 95) + + " }" + + "}"); + + try (MongoClient client = createMongoClient(getMongoClientSettingsBuilder() + .timeout(rtt + 100, TimeUnit.MILLISECONDS))) { + MongoDatabase database = client.getDatabase(namespace.getDatabaseName()); + GridFSBucket gridFsBucket = createGridFsBucket(database, GRID_FS_BUCKET_NAME).withChunkSizeBytes(2); + + try (GridFSDownloadStream downloadStream = gridFsBucket.openDownloadStream(new ObjectId("000000000000000000000005"))){ + assertThrows(MongoOperationTimeoutException.class, downloadStream::read); + + List events = commandListener.getCommandStartedEvents(); + List findCommands = events.stream().filter(e -> e.getCommandName().equals("find")).collect(Collectors.toList()); + + assertEquals(2, findCommands.size()); + assertEquals(gridFsFileNamespace.getCollectionName(), findCommands.get(0).getCommand().getString("find").getValue()); + assertEquals(gridFsChunksNamespace.getCollectionName(), findCommands.get(1).getCommand().getString("find").getValue()); + } + } + } + + @DisplayName("8. Server Selection 1 / 2") + @ParameterizedTest(name = "[{index}] {0}") + @MethodSource("test8ServerSelectionArguments") + public void test8ServerSelection(final String connectionString) { + int timeoutBuffer = 100; // 5 in spec, Java is slower + // 1. Create a MongoClient + try (MongoClient mongoClient = createMongoClient(getMongoClientSettingsBuilder() + .applyConnectionString(new ConnectionString(connectionString))) + ) { + long start = System.nanoTime(); + // 2. Using client, execute: + Throwable throwable = assertThrows(MongoTimeoutException.class, () -> { + mongoClient.getDatabase("admin").runCommand(new BsonDocument("ping", new BsonInt32(1))); + }); + // Expect this to fail with a server selection timeout error after no more than 15ms [this is increased] + long elapsed = msElapsedSince(start); + assertTrue(throwable.getMessage().contains("while waiting for a server")); + assertTrue(elapsed < 10 + timeoutBuffer, "Took too long to time out, elapsedMS: " + elapsed); + } + } + + @DisplayName("8. Server Selection 2 / 2") + @ParameterizedTest(name = "[{index}] {0}") + @MethodSource("test8ServerSelectionHandshakeArguments") + public void test8ServerSelectionHandshake(final String ignoredTestName, final int timeoutMS, final int serverSelectionTimeoutMS) { + assumeTrue(serverVersionAtLeast(4, 4)); + assumeTrue(isAuthenticated()); + + MongoCredential credential = getConnectionString().getCredential(); + assertNotNull(credential); + assertNull(credential.getAuthenticationMechanism()); + + MongoNamespace namespace = generateNamespace(); + collectionHelper = new CollectionHelper<>(new BsonDocumentCodec(), namespace); + collectionHelper.runAdminCommand("{" + + " configureFailPoint: \"failCommand\"," + + " mode: \"alwaysOn\"," + + " data: {" + + " failCommands: [\"saslContinue\"]," + + " blockConnection: true," + + " blockTimeMS: 350" + + " }" + + "}"); + + try (MongoClient mongoClient = createMongoClient(getMongoClientSettingsBuilder() + .timeout(timeoutMS, TimeUnit.MILLISECONDS) + .applyToClusterSettings(b -> b.serverSelectionTimeout(serverSelectionTimeoutMS, TimeUnit.MILLISECONDS)) + .retryWrites(false))) { + + long start = System.nanoTime(); + assertThrows(MongoOperationTimeoutException.class, () -> { + mongoClient.getDatabase(namespace.getDatabaseName()) + .getCollection(namespace.getCollectionName()) + .insertOne(new Document("x", 1)); + }); + long elapsed = msElapsedSince(start); + assertTrue(elapsed <= 310, "Took too long to time out, elapsedMS: " + elapsed); + } + } + + @SuppressWarnings("try") + @DisplayName("9. End Session. The timeout specified via the MongoClient timeoutMS option") + @FlakyTest(maxAttempts = 3) + public void test9EndSessionClientTimeout() { + assumeTrue(serverVersionAtLeast(4, 4)); + assumeFalse(isStandalone()); + + collectionHelper.runAdminCommand("{" + + " configureFailPoint: \"failCommand\"," + + " mode: { times: 1 }," + + " data: {" + + " failCommands: [\"abortTransaction\"]," + + " blockConnection: true," + + " blockTimeMS: " + 150 + + " }" + + "}"); + + try (MongoClient mongoClient = createMongoClient(getMongoClientSettingsBuilder().retryWrites(false) + .timeout(100, TimeUnit.MILLISECONDS))) { + MongoCollection collection = mongoClient.getDatabase(namespace.getDatabaseName()) + .getCollection(namespace.getCollectionName()); + + try (ClientSession session = mongoClient.startSession()) { + session.startTransaction(); + collection.insertOne(session, new Document("x", 1)); + + long start = System.nanoTime(); + session.close(); + long elapsed = msElapsedSince(start) - postSessionCloseSleep(); + assertTrue(elapsed <= 150, "Took too long to time out, elapsedMS: " + elapsed); + } + } + CommandFailedEvent abortTransactionEvent = assertDoesNotThrow(() -> + commandListener.getCommandFailedEvent("abortTransaction")); + assertInstanceOf(MongoOperationTimeoutException.class, abortTransactionEvent.getThrowable()); + } + + @SuppressWarnings("try") + @DisplayName("9. End Session. The timeout specified via the ClientSession defaultTimeoutMS option") + @Test + public void test9EndSessionSessionTimeout() { + assumeTrue(serverVersionAtLeast(4, 4)); + assumeFalse(isStandalone()); + + collectionHelper.runAdminCommand("{" + + " configureFailPoint: \"failCommand\"," + + " mode: { times: 1 }," + + " data: {" + + " failCommands: [\"abortTransaction\"]," + + " blockConnection: true," + + " blockTimeMS: " + 150 + + " }" + + "}"); + + try (MongoClient mongoClient = createMongoClient(getMongoClientSettingsBuilder())) { + MongoCollection collection = mongoClient.getDatabase(namespace.getDatabaseName()) + .getCollection(namespace.getCollectionName()); + + try (ClientSession session = mongoClient.startSession(ClientSessionOptions.builder() + .defaultTimeout(100, TimeUnit.MILLISECONDS).build())) { + session.startTransaction(); + collection.insertOne(session, new Document("x", 1)); + + long start = System.nanoTime(); + session.close(); + long elapsed = msElapsedSince(start) - postSessionCloseSleep(); + assertTrue(elapsed <= 150, "Took too long to time out, elapsedMS: " + elapsed); + } + } + CommandFailedEvent abortTransactionEvent = assertDoesNotThrow(() -> + commandListener.getCommandFailedEvent("abortTransaction")); + assertInstanceOf(MongoOperationTimeoutException.class, abortTransactionEvent.getThrowable()); + } + + @DisplayName("9. End Session - Custom Test: Each operation has its own timeout with commit") + @Test + public void test9EndSessionCustomTesEachOperationHasItsOwnTimeoutWithCommit() { + assumeTrue(serverVersionAtLeast(4, 4)); + assumeFalse(isStandalone()); + collectionHelper.runAdminCommand("{" + + " configureFailPoint: \"failCommand\"," + + " mode: { times: 1 }," + + " data: {" + + " failCommands: [\"insert\"]," + + " blockConnection: true," + + " blockTimeMS: " + 25 + + " }" + + "}"); + + try (MongoClient mongoClient = createMongoClient(getMongoClientSettingsBuilder())) { + MongoCollection collection = mongoClient.getDatabase(namespace.getDatabaseName()) + .getCollection(namespace.getCollectionName()); + + try (ClientSession session = mongoClient.startSession(ClientSessionOptions.builder() + .defaultTimeout(200, TimeUnit.MILLISECONDS).build())) { + session.startTransaction(); + collection.insertOne(session, new Document("x", 1)); + sleep(200); + + assertDoesNotThrow(session::commitTransaction); + } + } + assertDoesNotThrow(() -> commandListener.getCommandSucceededEvent("commitTransaction")); + } + + @DisplayName("9. End Session - Custom Test: Each operation has its own timeout with abort") + @Test + public void test9EndSessionCustomTesEachOperationHasItsOwnTimeoutWithAbort() { + assumeTrue(serverVersionAtLeast(4, 4)); + assumeFalse(isStandalone()); + collectionHelper.runAdminCommand("{" + + " configureFailPoint: \"failCommand\"," + + " mode: { times: 1 }," + + " data: {" + + " failCommands: [\"insert\"]," + + " blockConnection: true," + + " blockTimeMS: " + 25 + + " }" + + "}"); + + try (MongoClient mongoClient = createMongoClient(getMongoClientSettingsBuilder())) { + MongoCollection collection = mongoClient.getDatabase(namespace.getDatabaseName()) + .getCollection(namespace.getCollectionName()); + + try (ClientSession session = mongoClient.startSession(ClientSessionOptions.builder() + .defaultTimeout(200, TimeUnit.MILLISECONDS).build())) { + session.startTransaction(); + collection.insertOne(session, new Document("x", 1)); + sleep(200); + + assertDoesNotThrow(session::close); + } + } + assertDoesNotThrow(() -> commandListener.getCommandSucceededEvent("abortTransaction")); + } + + @DisplayName("10. Convenient Transactions") + @Test + public void test10ConvenientTransactions() { + assumeTrue(serverVersionAtLeast(4, 4)); + assumeFalse(isStandalone()); + assumeFalse(isAsync()); + collectionHelper.runAdminCommand("{" + + " configureFailPoint: \"failCommand\"," + + " mode: { times: 2 }," + + " data: {" + + " failCommands: [\"insert\", \"abortTransaction\"]," + + " blockConnection: true," + + " blockTimeMS: " + 150 + + " }" + + "}"); + + try (MongoClient mongoClient = createMongoClient(getMongoClientSettingsBuilder() + .timeout(100, TimeUnit.MILLISECONDS))) { + MongoCollection collection = mongoClient.getDatabase(namespace.getDatabaseName()) + .getCollection(namespace.getCollectionName()); + + try (ClientSession session = mongoClient.startSession()) { + assertThrows(MongoOperationTimeoutException.class, + () -> session.withTransaction(() -> collection.insertOne(session, new Document("x", 1)))); + } + + List failedEvents = commandListener.getEvents().stream() + .filter(e -> e instanceof CommandFailedEvent) + .collect(Collectors.toList()); + + assertEquals(1, failedEvents.stream().filter(e -> e.getCommandName().equals("insert")).count()); + assertEquals(1, failedEvents.stream().filter(e -> e.getCommandName().equals("abortTransaction")).count()); + } + } + + @DisplayName("10. Convenient Transactions - Custom Test: with transaction uses a single timeout") + @Test + public void test10CustomTestWithTransactionUsesASingleTimeout() { + assumeTrue(serverVersionAtLeast(4, 4)); + assumeFalse(isStandalone()); + assumeFalse(isAsync()); + collectionHelper.runAdminCommand("{" + + " configureFailPoint: \"failCommand\"," + + " mode: { times: 1 }," + + " data: {" + + " failCommands: [\"insert\"]," + + " blockConnection: true," + + " blockTimeMS: " + 25 + + " }" + + "}"); + + try (MongoClient mongoClient = createMongoClient(getMongoClientSettingsBuilder())) { + MongoCollection collection = mongoClient.getDatabase(namespace.getDatabaseName()) + .getCollection(namespace.getCollectionName()); + + try (ClientSession session = mongoClient.startSession(ClientSessionOptions.builder() + .defaultTimeout(200, TimeUnit.MILLISECONDS).build())) { + assertThrows(MongoOperationTimeoutException.class, + () -> session.withTransaction(() -> { + collection.insertOne(session, new Document("x", 1)); + sleep(200); + return true; + }) + ); + } + } + } + + @DisplayName("10. Convenient Transactions - Custom Test: with transaction uses a single timeout - lock") + @Test + public void test10CustomTestWithTransactionUsesASingleTimeoutWithLock() { + assumeTrue(serverVersionAtLeast(4, 4)); + assumeFalse(isStandalone()); + assumeFalse(isAsync()); + collectionHelper.runAdminCommand("{" + + " configureFailPoint: \"failCommand\"," + + " mode: \"alwaysOn\"," + + " data: {" + + " failCommands: [\"insert\"]," + + " blockConnection: true," + + " blockTimeMS: " + 25 + + " errorCode: " + 24 + + " errorLabels: [\"TransientTransactionError\"]" + + " }" + + "}"); + + try (MongoClient mongoClient = createMongoClient(getMongoClientSettingsBuilder())) { + MongoCollection collection = mongoClient.getDatabase(namespace.getDatabaseName()) + .getCollection(namespace.getCollectionName()); + + try (ClientSession session = mongoClient.startSession(ClientSessionOptions.builder() + .defaultTimeout(200, TimeUnit.MILLISECONDS).build())) { + assertThrows(MongoOperationTimeoutException.class, + () -> session.withTransaction(() -> { + collection.insertOne(session, new Document("x", 1)); + sleep(200); + return true; + }) + ); + } + } + } + + @DisplayName("11. Multi-batch bulkWrites") + @Test + @SuppressWarnings("try") + protected void test11MultiBatchBulkWrites() throws InterruptedException { + assumeTrue(serverVersionAtLeast(8, 0)); + try (MongoClient client = createMongoClient(getMongoClientSettingsBuilder())) { + // a workaround for https://jira.mongodb.org/browse/DRIVERS-2997, remove this block when the aforementioned bug is fixed + client.getDatabase(namespace.getDatabaseName()).drop(); + } + BsonDocument failPointDocument = new BsonDocument("configureFailPoint", new BsonString("failCommand")) + .append("mode", new BsonDocument("times", new BsonInt32(2))) + .append("data", new BsonDocument("failCommands", new BsonArray(singletonList(new BsonString("bulkWrite")))) + .append("blockConnection", BsonBoolean.TRUE) + .append("blockTimeMS", new BsonInt32(2020))); + try (MongoClient client = createMongoClient(getMongoClientSettingsBuilder().timeout(4000, TimeUnit.MILLISECONDS)); + FailPoint ignored = FailPoint.enable(failPointDocument, getPrimary())) { + MongoDatabase db = client.getDatabase(namespace.getDatabaseName()); + db.drop(); + Document helloResponse = db.runCommand(new Document("hello", 1)); + int maxBsonObjectSize = helloResponse.getInteger("maxBsonObjectSize"); + int maxMessageSizeBytes = helloResponse.getInteger("maxMessageSizeBytes"); + ClientNamespacedWriteModel model = ClientNamespacedWriteModel.insertOne( + namespace, + new Document("a", join("", nCopies(maxBsonObjectSize - 500, "b")))); + MongoException topLevelError = assertThrows(ClientBulkWriteException.class, () -> + client.bulkWrite(nCopies(maxMessageSizeBytes / maxBsonObjectSize + 1, model))) + .getCause(); + assertNotNull(topLevelError); + assertInstanceOf(MongoOperationTimeoutException.class, topLevelError); + assertEquals(2, commandListener.getCommandStartedEvents("bulkWrite").size()); + } + } + + /** + * Not a prose spec test. However, it is additional test case for better coverage. + */ + @Test + @DisplayName("Should ignore wTimeoutMS of WriteConcern to initial and subsequent commitTransaction operations") + public void shouldIgnoreWtimeoutMsOfWriteConcernToInitialAndSubsequentCommitTransactionOperations() { + assumeTrue(serverVersionAtLeast(4, 4)); + assumeFalse(isStandalone()); + + try (MongoClient mongoClient = createMongoClient(getMongoClientSettingsBuilder())) { + MongoCollection collection = mongoClient.getDatabase(namespace.getDatabaseName()) + .getCollection(namespace.getCollectionName()); + + try (ClientSession session = mongoClient.startSession(ClientSessionOptions.builder() + .defaultTimeout(200, TimeUnit.MILLISECONDS) + .build())) { + session.startTransaction(TransactionOptions.builder() + .writeConcern(WriteConcern.ACKNOWLEDGED.withWTimeout(100, TimeUnit.MILLISECONDS)) + .build()); + collection.insertOne(session, new Document("x", 1)); + sleep(200); + + assertDoesNotThrow(session::commitTransaction); + //repeat commit. + assertDoesNotThrow(session::commitTransaction); + } + } + List commandStartedEvents = commandListener.getCommandStartedEvents("commitTransaction"); + assertEquals(2, commandStartedEvents.size()); + + commandStartedEvents.forEach(e -> { + BsonDocument command = e.getCommand(); + if (command.containsKey("writeConcern")) { + BsonDocument writeConcern = command.getDocument("writeConcern"); + assertFalse(writeConcern.isEmpty()); + assertFalse(writeConcern.containsKey("wtimeout")); + }}); + } + + /** + * Not a prose spec test. However, it is additional test case for better coverage. + */ + @Test + @DisplayName("Should ignore waitQueueTimeoutMS when timeoutMS is set") + public void shouldIgnoreWaitQueueTimeoutMSWhenTimeoutMsIsSet() { + assumeTrue(serverVersionAtLeast(4, 4)); + + //given + try (MongoClient mongoClient = createMongoClient(getMongoClientSettingsBuilder() + .timeout(500, TimeUnit.MILLISECONDS) + .applyToConnectionPoolSettings(builder -> builder + .maxWaitTime(1, TimeUnit.MILLISECONDS) + .maxSize(1) + ))) { + MongoCollection collection = mongoClient.getDatabase(namespace.getDatabaseName()) + .getCollection(namespace.getCollectionName()); + + collectionHelper.runAdminCommand("{" + + " configureFailPoint: \"failCommand\"," + + " mode: { times: 1}," + + " data: {" + + " failCommands: [\"find\" ]," + + " blockConnection: true," + + " blockTimeMS: " + 300 + + " }" + + "}"); + + executor.submit(() -> collection.find().first()); + sleep(100); + + //when && then + assertDoesNotThrow(() -> collection.find().first()); + } + } + + /** + * Not a prose spec test. However, it is additional test case for better coverage. + */ + @Test + @DisplayName("Should throw MongoOperationTimeoutException when connection is not available and timeoutMS is set") + public void shouldThrowOperationTimeoutExceptionWhenConnectionIsNotAvailableAndTimeoutMSIsSet() { + assumeTrue(serverVersionAtLeast(4, 4)); + + //given + try (MongoClient mongoClient = createMongoClient(getMongoClientSettingsBuilder() + .timeout(100, TimeUnit.MILLISECONDS) + .applyToConnectionPoolSettings(builder -> builder + .maxSize(1) + ))) { + MongoCollection collection = mongoClient.getDatabase(namespace.getDatabaseName()) + .getCollection(namespace.getCollectionName()); + + collectionHelper.runAdminCommand("{" + + " configureFailPoint: \"failCommand\"," + + " mode: { times: 1}," + + " data: {" + + " failCommands: [\"find\" ]," + + " blockConnection: true," + + " blockTimeMS: " + 500 + + " }" + + "}"); + + executor.submit(() -> collection.withTimeout(0, TimeUnit.MILLISECONDS).find().first()); + sleep(100); + + //when && then + assertThrows(MongoOperationTimeoutException.class, () -> collection.find().first()); + } + } + + /** + * Not a prose spec test. However, it is additional test case for better coverage. + */ + @Test + @DisplayName("Should use waitQueueTimeoutMS when timeoutMS is not set") + public void shouldUseWaitQueueTimeoutMSWhenTimeoutIsNotSet() { + assumeTrue(serverVersionAtLeast(4, 4)); + + //given + try (MongoClient mongoClient = createMongoClient(getMongoClientSettingsBuilder() + .applyToConnectionPoolSettings(builder -> builder + .maxWaitTime(100, TimeUnit.MILLISECONDS) + .maxSize(1) + ))) { + MongoCollection collection = mongoClient.getDatabase(namespace.getDatabaseName()) + .getCollection(namespace.getCollectionName()); + + collectionHelper.runAdminCommand("{" + + " configureFailPoint: \"failCommand\"," + + " mode: { times: 1}," + + " data: {" + + " failCommands: [\"find\" ]," + + " blockConnection: true," + + " blockTimeMS: " + 300 + + " }" + + "}"); + + executor.submit(() -> collection.find().first()); + sleep(100); + + //when & then + assertThrows(MongoTimeoutException.class, () -> collection.find().first()); + } + } + + /** + * Not a prose spec test. However, it is additional test case for better coverage. + */ + @DisplayName("KillCursors is not executed after getMore network error when timeout is not enabled") + @Test + public void testKillCursorsIsNotExecutedAfterGetMoreNetworkErrorWhenTimeoutIsNotEnabled() { + assumeTrue(serverVersionAtLeast(4, 4)); + assumeTrue(isLoadBalanced()); + + long rtt = ClusterFixture.getPrimaryRTT(); + collectionHelper.create(namespace.getCollectionName(), new CreateCollectionOptions()); + collectionHelper.insertDocuments(new Document(), new Document()); + collectionHelper.runAdminCommand("{" + + " configureFailPoint: \"failCommand\"," + + " mode: { times: 1}," + + " data: {" + + " failCommands: [\"getMore\" ]," + + " blockConnection: true," + + " blockTimeMS: " + (rtt + 600) + + " }" + + "}"); + + try (MongoClient mongoClient = createMongoClient(getMongoClientSettingsBuilder() + .retryReads(true) + .applyToSocketSettings(builder -> builder.readTimeout(500, TimeUnit.MILLISECONDS)))) { + + MongoCollection collection = mongoClient.getDatabase(namespace.getDatabaseName()) + .getCollection(namespace.getCollectionName()).withReadPreference(ReadPreference.primary()); + + MongoCursor cursor = collection.find() + .batchSize(1) + .cursor(); + + cursor.next(); + assertThrows(MongoSocketReadTimeoutException.class, cursor::next); + cursor.close(); + } + + List events = commandListener.getCommandStartedEvents(); + assertEquals(2, events.size(), "Actual events: " + events.stream() + .map(CommandStartedEvent::getCommandName) + .collect(Collectors.toList())); + assertEquals(1, events.stream().filter(e -> e.getCommandName().equals("find")).count()); + assertEquals(1, events.stream().filter(e -> e.getCommandName().equals("getMore")).count()); + + } + + /** + * Not a prose spec test. However, it is additional test case for better coverage. + */ + @DisplayName("KillCursors is not executed after getMore network error") + @Test + public void testKillCursorsIsNotExecutedAfterGetMoreNetworkError() { + assumeTrue(serverVersionAtLeast(4, 4)); + assumeTrue(isLoadBalanced()); + + long rtt = ClusterFixture.getPrimaryRTT(); + collectionHelper.create(namespace.getCollectionName(), new CreateCollectionOptions()); + collectionHelper.insertDocuments(new Document(), new Document()); + collectionHelper.runAdminCommand("{" + + " configureFailPoint: \"failCommand\"," + + " mode: { times: 1}," + + " data: {" + + " failCommands: [\"getMore\" ]," + + " blockConnection: true," + + " blockTimeMS: " + (rtt + 600) + + " }" + + "}"); + + try (MongoClient mongoClient = createMongoClient(getMongoClientSettingsBuilder() + .timeout(500, TimeUnit.MILLISECONDS))) { + + MongoCollection collection = mongoClient.getDatabase(namespace.getDatabaseName()) + .getCollection(namespace.getCollectionName()).withReadPreference(ReadPreference.primary()); + + MongoCursor cursor = collection.find() + .batchSize(1) + .cursor(); + + cursor.next(); + assertThrows(MongoOperationTimeoutException.class, cursor::next); + cursor.close(); + } + + List events = commandListener.getCommandStartedEvents(); + assertEquals(2, events.size(), "Actual events: " + events.stream() + .map(CommandStartedEvent::getCommandName) + .collect(Collectors.toList())); + assertEquals(1, events.stream().filter(e -> e.getCommandName().equals("find")).count()); + assertEquals(1, events.stream().filter(e -> e.getCommandName().equals("getMore")).count()); + + } + + /** + * Not a prose spec test. However, it is additional test case for better coverage. + */ + @Test + @DisplayName("Should throw timeout exception for subsequent commit transaction") + public void shouldThrowTimeoutExceptionForSubsequentCommitTransaction() { + assumeTrue(serverVersionAtLeast(4, 4)); + assumeFalse(isStandalone()); + + try (MongoClient mongoClient = createMongoClient(getMongoClientSettingsBuilder())) { + MongoCollection collection = mongoClient.getDatabase(namespace.getDatabaseName()) + .getCollection(namespace.getCollectionName()); + + try (ClientSession session = mongoClient.startSession(ClientSessionOptions.builder() + .defaultTimeout(200, TimeUnit.MILLISECONDS) + .build())) { + session.startTransaction(TransactionOptions.builder().build()); + collection.insertOne(session, new Document("x", 1)); + sleep(200); + + assertDoesNotThrow(session::commitTransaction); + + collectionHelper.runAdminCommand("{" + + " configureFailPoint: \"failCommand\"," + + " mode: { times: 1 }," + + " data: {" + + " failCommands: [\"commitTransaction\"]," + + " blockConnection: true," + + " blockTimeMS: " + 500 + + " }" + + "}"); + + //repeat commit. + assertThrows(MongoOperationTimeoutException.class, session::commitTransaction); + } + } + List commandStartedEvents = commandListener.getCommandStartedEvents("commitTransaction"); + assertEquals(2, commandStartedEvents.size()); + + List failedEvents = commandListener.getCommandFailedEvents("commitTransaction"); + assertEquals(1, failedEvents.size()); + } + + /** + * Not a prose spec test. However, it is additional test case for better coverage. + *

+ * From the spec: + * - When doing `minPoolSize` maintenance, `connectTimeoutMS` is used as the timeout for socket establishment. + */ + @Test + @DisplayName("Should use connectTimeoutMS when establishing connection in background") + public void shouldUseConnectTimeoutMsWhenEstablishingConnectionInBackground() { + assumeTrue(serverVersionAtLeast(4, 4)); + + collectionHelper.runAdminCommand("{" + + "configureFailPoint: \"" + FAIL_COMMAND_NAME + "\"," + + "mode: \"alwaysOn\"," + + " data: {" + + " failCommands: [\"hello\", \"isMaster\"]," + + " blockConnection: true," + + " blockTimeMS: " + 500 + + " }" + + "}"); + + try (MongoClient ignored = createMongoClient(getMongoClientSettingsBuilder() + .applyToConnectionPoolSettings(builder -> builder.minSize(1)) + // Use a very short timeout to ensure that the connection establishment will fail on the first handshake command. + .timeout(10, TimeUnit.MILLISECONDS))) { + InternalStreamConnection.setRecordEverything(true); + + // Wait for the connection to start establishment in the background. + sleep(1000); + } finally { + InternalStreamConnection.setRecordEverything(false); + } + + List commandFailedEvents = commandListener.getCommandFailedEvents("isMaster"); + assertEquals(1, commandFailedEvents.size()); + assertInstanceOf(MongoOperationTimeoutException.class, commandFailedEvents.get(0).getThrowable()); + } + + private static Stream test8ServerSelectionArguments() { + return Stream.of( + Arguments.of(Named.of("serverSelectionTimeoutMS honored if timeoutMS is not set", + "mongodb://invalid/?serverSelectionTimeoutMS=10")), + Arguments.of(Named.of("timeoutMS honored for server selection if it's lower than serverSelectionTimeoutMS", + "mongodb://invalid/?timeoutMS=200&serverSelectionTimeoutMS=10")), + Arguments.of(Named.of("serverSelectionTimeoutMS honored for server selection if it's lower than timeoutMS", + "mongodb://invalid/?timeoutMS=10&serverSelectionTimeoutMS=200")), + Arguments.of(Named.of("serverSelectionTimeoutMS honored for server selection if timeoutMS=0", + "mongodb://invalid/?timeoutMS=0&serverSelectionTimeoutMS=10")) + + ); + } + + private static Stream test8ServerSelectionHandshakeArguments() { + return Stream.of( + Arguments.of("timeoutMS honored for connection handshake commands if it's lower than serverSelectionTimeoutMS", 200, 300), + Arguments.of("serverSelectionTimeoutMS honored for connection handshake commands if it's lower than timeoutMS", 300, 200) + ); + } + + protected MongoNamespace generateNamespace() { + return new MongoNamespace(getDefaultDatabaseName(), + getClass().getSimpleName() + "_" + COUNTER.incrementAndGet()); + } + + protected MongoClientSettings.Builder getMongoClientSettingsBuilder() { + commandListener.reset(); + return Fixture.getMongoClientSettingsBuilder() + .readConcern(ReadConcern.MAJORITY) + .writeConcern(WriteConcern.MAJORITY) + .readPreference(ReadPreference.primary()) + .addCommandListener(commandListener); + } + + @BeforeEach + public void setUp() { + namespace = generateNamespace(); + executor = Executors.newSingleThreadExecutor(); + gridFsFileNamespace = new MongoNamespace(getDefaultDatabaseName(), GRID_FS_BUCKET_NAME + ".files"); + gridFsChunksNamespace = new MongoNamespace(getDefaultDatabaseName(), GRID_FS_BUCKET_NAME + ".chunks"); + + collectionHelper = new CollectionHelper<>(new BsonDocumentCodec(), namespace); + filesCollectionHelper = new CollectionHelper<>(new BsonDocumentCodec(), gridFsFileNamespace); + chunksCollectionHelper = new CollectionHelper<>(new BsonDocumentCodec(), gridFsChunksNamespace); + commandListener = new TestCommandListener(); + } + + @AfterEach + public void tearDown() throws InterruptedException { + ClusterFixture.disableFailPoint(FAIL_COMMAND_NAME); + if (collectionHelper != null) { + collectionHelper.drop(); + filesCollectionHelper.drop(); + chunksCollectionHelper.drop(); + commandListener.reset(); + try { + ServerHelper.checkPool(getPrimary()); + } catch (InterruptedException e) { + // ignore + } + } + + if (executor != null) { + executor.shutdownNow(); + //noinspection ResultOfMethodCallIgnored + executor.awaitTermination(MAX_VALUE, NANOSECONDS); + } + } + + @AfterAll + public static void finalTearDown() { + CollectionHelper.dropDatabase(getDefaultDatabaseName()); + } + + private MongoClient createMongoClient(final MongoClientSettings.Builder builder) { + return createMongoClient(builder.build()); + } + + private long msElapsedSince(final long t1) { + return TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - t1); + } +} diff --git a/driver-sync/src/test/functional/com/mongodb/client/AbstractDnsConfigurationTest.java b/driver-sync/src/test/functional/com/mongodb/client/AbstractDnsConfigurationTest.java new file mode 100644 index 00000000000..9320c673eed --- /dev/null +++ b/driver-sync/src/test/functional/com/mongodb/client/AbstractDnsConfigurationTest.java @@ -0,0 +1,103 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client; + +import com.mongodb.ConnectionString; +import com.mongodb.MongoClientSettings; +import com.mongodb.MongoException; +import com.mongodb.MongoSocketException; +import com.mongodb.ServerAddress; +import com.mongodb.connection.ServerDescription; +import com.mongodb.event.ClusterDescriptionChangedEvent; +import com.mongodb.event.ClusterListener; +import com.mongodb.spi.dns.DnsClient; +import com.mongodb.spi.dns.DnsException; +import com.mongodb.spi.dns.InetAddressResolver; +import org.junit.jupiter.api.Test; + +import java.net.UnknownHostException; +import java.util.Collections; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeoutException; + +import static java.util.concurrent.TimeUnit.SECONDS; +import static org.junit.jupiter.api.Assertions.assertEquals; + +@SuppressWarnings("try") +public abstract class AbstractDnsConfigurationTest { + + protected abstract MongoClient createMongoClient(MongoClientSettings settings); + + @Test + public void testInetAddressResolverConfiguration() throws InterruptedException, ExecutionException, TimeoutException { + UnknownHostException exception = new UnknownHostException(); + InetAddressResolver resolver = host -> { + throw exception; + }; + + CompletableFuture exceptionReceivedFuture = new CompletableFuture<>(); + MongoClientSettings settings = MongoClientSettings.builder() + .applyToClusterSettings(builder -> + builder.hosts(Collections.singletonList(new ServerAddress("some.host"))) + .addClusterListener(new ClusterListener() { + @Override + public void clusterDescriptionChanged(final ClusterDescriptionChangedEvent event) { + ServerDescription serverDescription = event.getNewDescription().getServerDescriptions().get(0); + if (serverDescription.getException() != null) { + exceptionReceivedFuture.complete(serverDescription.getException()); + } + } + })) + .inetAddressResolver(resolver) + .build(); + + try (MongoClient ignored = createMongoClient(settings)) { + Throwable exceptionReceived = exceptionReceivedFuture.get(1, SECONDS); + assertEquals(MongoSocketException.class, exceptionReceived.getClass()); + assertEquals(exception, exceptionReceived.getCause()); + } + } + + @Test + public void testDnsClientConfiguration() throws InterruptedException, ExecutionException, TimeoutException { + DnsException exception = new DnsException("", new Exception()); + DnsClient dnsClient = (name, type) -> { + throw exception; + }; + + CompletableFuture exceptionReceived = new CompletableFuture<>(); + MongoClientSettings settings = MongoClientSettings.builder() + .applyConnectionString(new ConnectionString("mongodb+srv://free-java.mongodb-dev.net")) + .applyToClusterSettings(builder -> + builder.addClusterListener(new ClusterListener() { + @Override + public void clusterDescriptionChanged(final ClusterDescriptionChangedEvent event) { + MongoException srvResolutionException = event.getNewDescription().getSrvResolutionException(); + if (srvResolutionException != null) { + exceptionReceived.complete(srvResolutionException.getCause()); + } + } + })) + .dnsClient(dnsClient) + .build(); + + try (MongoClient ignored = createMongoClient(settings)) { + assertEquals(exception, exceptionReceived.get(1, SECONDS)); + } + } +} diff --git a/driver-sync/src/test/functional/com/mongodb/client/AbstractExplainTest.java b/driver-sync/src/test/functional/com/mongodb/client/AbstractExplainTest.java new file mode 100644 index 00000000000..920dff0396e --- /dev/null +++ b/driver-sync/src/test/functional/com/mongodb/client/AbstractExplainTest.java @@ -0,0 +1,211 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client; + +import com.mongodb.ExplainVerbosity; +import com.mongodb.MongoClientSettings; +import com.mongodb.MongoCommandException; +import com.mongodb.client.model.Aggregates; +import com.mongodb.client.model.Filters; +import com.mongodb.event.CommandStartedEvent; +import com.mongodb.internal.connection.TestCommandListener; +import org.bson.BsonDocument; +import org.bson.BsonInt32; +import org.bson.Document; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +import java.util.concurrent.TimeUnit; + +import static com.mongodb.ClusterFixture.serverVersionAtLeast; +import static com.mongodb.client.Fixture.getDefaultDatabaseName; +import static java.util.Collections.singletonList; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; +import static org.junit.Assume.assumeTrue; + +public abstract class AbstractExplainTest { + + private MongoClient client; + private TestCommandListener commandListener; + + protected abstract MongoClient createMongoClient(MongoClientSettings settings); + + @Before + public void setUp() { + commandListener = new TestCommandListener(); + client = createMongoClient(Fixture.getMongoClientSettingsBuilder().addCommandListener(commandListener).build()); + } + + @After + public void tearDown() { + client.close(); + commandListener.reset(); + } + + @Test + public void testExplainOfFind() { + MongoCollection collection = client.getDatabase(getDefaultDatabaseName()) + .getCollection("explainTest", BsonDocument.class); + collection.drop(); + collection.insertOne(new BsonDocument("_id", new BsonInt32(1))); + + FindIterable iterable = collection.find() + .filter(Filters.eq("_id", 1)); + + Document explainDocument = iterable.explain(); + assertNotNull(explainDocument); + assertTrue(explainDocument.containsKey("queryPlanner")); + assertTrue(explainDocument.containsKey("executionStats")); + + explainDocument = iterable.explain(ExplainVerbosity.QUERY_PLANNER); + assertNotNull(explainDocument); + assertTrue(explainDocument.containsKey("queryPlanner")); + assertFalse(explainDocument.containsKey("executionStats")); + + BsonDocument explainBsonDocument = iterable.explain(BsonDocument.class); + assertNotNull(explainBsonDocument); + assertTrue(explainBsonDocument.containsKey("queryPlanner")); + assertTrue(explainBsonDocument.containsKey("executionStats")); + + explainBsonDocument = iterable.explain(BsonDocument.class, ExplainVerbosity.QUERY_PLANNER); + assertNotNull(explainBsonDocument); + assertTrue(explainBsonDocument.containsKey("queryPlanner")); + assertFalse(explainBsonDocument.containsKey("executionStats")); + } + + @Test + public void testFindContainsMaxTimeMsInExplain() { + //given + MongoCollection collection = client.getDatabase(getDefaultDatabaseName()) + .getCollection("explainTest", BsonDocument.class); + + FindIterable iterable = collection.find() + .maxTime(500, TimeUnit.MILLISECONDS); + + //when + iterable.explain(); + + //then + assertExplainableCommandContainMaxTimeMS(); + } + + @Test + public void testAggregateContainsMaxTimeMsInExplain() { + //given + MongoCollection collection = client.getDatabase(getDefaultDatabaseName()) + .getCollection("explainTest", BsonDocument.class); + + AggregateIterable iterable = collection.aggregate( + singletonList(Aggregates.match(Filters.eq("_id", 1)))) + .maxTime(500, TimeUnit.MILLISECONDS); + + //when + iterable.explain(); + + //then + assertExplainableCommandContainMaxTimeMS(); + } + + @Test + public void testListSearchIndexesContainsMaxTimeMsInExplain() { + //given + assumeTrue(serverVersionAtLeast(6, 0)); + MongoCollection collection = client.getDatabase(getDefaultDatabaseName()) + .getCollection("explainTest", BsonDocument.class); + + ListSearchIndexesIterable iterable = collection.listSearchIndexes() + .maxTime(500, TimeUnit.MILLISECONDS); + + //when + try { + iterable.explain(); + } catch (MongoCommandException throwable) { + //we expect listSearchIndexes is only supported in Atlas Search in some deployments. + } + + //then + assertExplainableCommandContainMaxTimeMS(); + } + + @Test + public void testExplainOfAggregateWithNewResponseStructure() { + MongoCollection collection = client.getDatabase(getDefaultDatabaseName()) + .getCollection("explainTest", BsonDocument.class); + collection.drop(); + collection.insertOne(new BsonDocument("_id", new BsonInt32(1))); + + AggregateIterable iterable = collection + .aggregate(singletonList(Aggregates.match(Filters.eq("_id", 1)))); + + Document explainDocument = getAggregateExplainDocument(iterable.explain()); + assertTrue(explainDocument.containsKey("queryPlanner")); + assertTrue(explainDocument.containsKey("executionStats")); + + explainDocument = getAggregateExplainDocument(iterable.explain(ExplainVerbosity.QUERY_PLANNER)); + assertNotNull(explainDocument); + assertTrue(explainDocument.containsKey("queryPlanner")); + assertFalse(explainDocument.containsKey("executionStats")); + + BsonDocument explainBsonDocument = getAggregateExplainDocument(iterable.explain(BsonDocument.class)); + assertNotNull(explainBsonDocument); + assertTrue(explainBsonDocument.containsKey("queryPlanner")); + assertTrue(explainBsonDocument.containsKey("executionStats")); + + explainBsonDocument = getAggregateExplainDocument(iterable.explain(BsonDocument.class, ExplainVerbosity.QUERY_PLANNER)); + assertNotNull(explainBsonDocument); + assertTrue(explainBsonDocument.containsKey("queryPlanner")); + assertFalse(explainBsonDocument.containsKey("executionStats")); + } + + // Post-MongoDB 7.0, sharded cluster responses move the explain plan document into a "shards" document, which a plan for each shard. + // This method grabs the explain plan document from the first shard when this new structure is present. + private static Document getAggregateExplainDocument(final Document rootAggregateExplainDocument) { + assertNotNull(rootAggregateExplainDocument); + Document aggregateExplainDocument = rootAggregateExplainDocument; + if (rootAggregateExplainDocument.containsKey("shards")) { + Document shardDocument = rootAggregateExplainDocument.get("shards", Document.class); + String firstKey = shardDocument.keySet().iterator().next(); + aggregateExplainDocument = shardDocument.get(firstKey, Document.class); + } + return aggregateExplainDocument; + } + + private static BsonDocument getAggregateExplainDocument(final BsonDocument rootAggregateExplainDocument) { + assertNotNull(rootAggregateExplainDocument); + BsonDocument aggregateExplainDocument = rootAggregateExplainDocument; + if (rootAggregateExplainDocument.containsKey("shards")) { + BsonDocument shardDocument = rootAggregateExplainDocument.getDocument("shards"); + String firstKey = shardDocument.getFirstKey(); + aggregateExplainDocument = shardDocument.getDocument(firstKey); + } + return aggregateExplainDocument; + } + + private void assertExplainableCommandContainMaxTimeMS() { + assertEquals(1, commandListener.getCommandStartedEvents().size()); + CommandStartedEvent explain = commandListener.getCommandStartedEvent("explain"); + BsonDocument explainCommand = explain.getCommand(); + BsonDocument explainableCommand = explainCommand.getDocument("explain"); + + assertFalse(explainCommand.containsKey("maxTimeMS")); + assertTrue(explainableCommand.containsKey("maxTimeMS")); + } +} diff --git a/driver-sync/src/test/functional/com/mongodb/client/AbstractExplicitUuidCodecUuidRepresentationTest.java b/driver-sync/src/test/functional/com/mongodb/client/AbstractExplicitUuidCodecUuidRepresentationTest.java new file mode 100644 index 00000000000..a8ffe4f7e41 --- /dev/null +++ b/driver-sync/src/test/functional/com/mongodb/client/AbstractExplicitUuidCodecUuidRepresentationTest.java @@ -0,0 +1,308 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client; + +import com.mongodb.BasicDBObject; +import com.mongodb.DBObject; +import org.bson.BSONException; +import org.bson.BsonBinary; +import org.bson.BsonBinarySubType; +import org.bson.BsonDocument; +import org.bson.Document; +import org.bson.UuidRepresentation; +import org.bson.codecs.UuidCodec; +import org.bson.codecs.configuration.CodecRegistry; +import org.bson.codecs.pojo.PojoCodecProvider; +import org.bson.types.Binary; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import util.Hex; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; +import java.util.UUID; + +import static com.mongodb.MongoClientSettings.getDefaultCodecRegistry; +import static com.mongodb.client.Fixture.getDefaultDatabaseName; +import static com.mongodb.client.Fixture.getMongoClient; +import static org.bson.BsonBinarySubType.UUID_LEGACY; +import static org.bson.BsonBinarySubType.UUID_STANDARD; +import static org.bson.UuidRepresentation.C_SHARP_LEGACY; +import static org.bson.UuidRepresentation.JAVA_LEGACY; +import static org.bson.UuidRepresentation.PYTHON_LEGACY; +import static org.bson.UuidRepresentation.STANDARD; +import static org.bson.codecs.configuration.CodecRegistries.fromCodecs; +import static org.bson.codecs.configuration.CodecRegistries.fromProviders; +import static org.bson.codecs.configuration.CodecRegistries.fromRegistries; +import static org.junit.Assert.assertArrayEquals; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; + +@RunWith(Parameterized.class) +public abstract class AbstractExplicitUuidCodecUuidRepresentationTest { + + private final UuidRepresentation uuidRepresentationForExplicitEncoding; + private final UuidCodec uuidCodec; + private final UuidRepresentation uuidRepresentationForClient; + private final BsonBinarySubType subType; + private final UUID uuid; + private final byte[] encodedValue; + private final byte[] standardEncodedValue; + private MongoCollection documentCollection; + private MongoCollection dbObjectCollection; + private MongoCollection uuidIdPojoCollection; + private MongoCollection bsonDocumentCollection; + + public AbstractExplicitUuidCodecUuidRepresentationTest(final UuidRepresentation uuidRepresentationForClient, + final UuidRepresentation uuidRepresentationForExplicitEncoding, + final BsonBinarySubType subType, + final UuidCodec uuidCodec, final UUID uuid, + final byte[] encodedValue, final byte[] standardEncodedValue) { + this.uuidRepresentationForExplicitEncoding = uuidRepresentationForExplicitEncoding; + this.uuidRepresentationForClient = uuidRepresentationForClient; + this.uuidCodec = uuidCodec; + this.subType = subType; + this.uuid = uuid; + this.encodedValue = encodedValue; + this.standardEncodedValue = standardEncodedValue; + } + + protected abstract void createMongoClient(UuidRepresentation uuidRepresentation, CodecRegistry codecRegistry); + + protected abstract MongoDatabase getDatabase(String databaseName); + + @Before + public void setUp() { + CodecRegistry codecRegistry = fromRegistries( + fromCodecs(uuidCodec), getDefaultCodecRegistry(), + fromProviders(PojoCodecProvider.builder().automatic(true).build())); + + createMongoClient(uuidRepresentationForClient, codecRegistry); + MongoDatabase database = getDatabase(getDefaultDatabaseName()); + documentCollection = database.getCollection(getClass().getName()); + dbObjectCollection = documentCollection.withDocumentClass(DBObject.class); + uuidIdPojoCollection = documentCollection.withDocumentClass(UuidIdPojo.class); + + bsonDocumentCollection = getMongoClient().getDatabase(getDefaultDatabaseName()) + .getCollection(getClass().getName()) + .withDocumentClass(BsonDocument.class); + + bsonDocumentCollection.drop(); + } + + @Test + public void shouldEncodeDocumentWithUuidRepresentation() { + documentCollection.insertOne(new Document("_id", uuid)); + + BsonDocument document = bsonDocumentCollection.find().first(); + assertNotNull(document); + BsonBinary uuidAsBinary = document.getBinary("_id"); + assertEquals(subType.getValue(), uuidAsBinary.getType()); + assertArrayEquals(encodedValue, uuidAsBinary.getData()); + } + + @Test + public void shouldEncodeDbObjectWithUuidRepresentation() { + dbObjectCollection.insertOne(new BasicDBObject("_id", uuid)); + + BsonDocument document = bsonDocumentCollection.find().first(); + assertNotNull(document); + BsonBinary uuidAsBinary = document.getBinary("_id"); + assertEquals(subType.getValue(), uuidAsBinary.getType()); + assertEquals(subType.getValue(), uuidAsBinary.getType()); + assertArrayEquals(encodedValue, uuidAsBinary.getData()); + } + + @Test + public void shouldEncodePojoWithUuidRepresentation() { + uuidIdPojoCollection.insertOne(new UuidIdPojo(uuid)); + + BsonDocument document = bsonDocumentCollection.find().first(); + assertNotNull(document); + BsonBinary uuidAsBinary = document.getBinary("_id"); + assertEquals(subType.getValue(), uuidAsBinary.getType()); + assertArrayEquals(encodedValue, uuidAsBinary.getData()); + } + + @Test + public void shouldDecodeDocumentWithUuidRepresentation() { + bsonDocumentCollection.insertOne(new BsonDocument("standard", new BsonBinary(uuid, STANDARD)) + .append("legacy", new BsonBinary(uuid, uuidRepresentationForExplicitEncoding))); + + Document document; + try { + document = documentCollection.find().first(); + assertNotNull(document); + } catch (BSONException e) { + if (uuidCodec.getUuidRepresentation() != STANDARD) { + throw e; + } + return; + } + + if (uuidRepresentationForClient == STANDARD) { + assertEquals(UUID.class, document.get("standard").getClass()); + assertEquals(uuid, document.get("standard")); + + assertEquals(Binary.class, document.get("legacy").getClass()); + assertEquals(new Binary(UUID_LEGACY, encodedValue), document.get("legacy")); + } else { + if (uuidRepresentationForClient == JAVA_LEGACY) { + assertEquals(UUID.class, document.get("standard").getClass()); + assertEquals(uuid, document.get("standard")); + } else { + assertEquals(Binary.class, document.get("standard").getClass()); + assertEquals(new Binary(UUID_STANDARD, standardEncodedValue), document.get("standard")); + } + + assertEquals(UUID.class, document.get("legacy").getClass()); + assertEquals(uuid, document.get("legacy")); + } + } + + @Test + public void shouldDecodeDBObjectWithUuidRepresentation() { + bsonDocumentCollection.insertOne(new BsonDocument("standard", new BsonBinary(uuid, STANDARD)) + .append("legacy", new BsonBinary(uuid, uuidRepresentationForExplicitEncoding))); + + DBObject document; + try { + document = dbObjectCollection.find().first(); + assertNotNull(document); + } catch (BSONException e) { + if (uuidCodec.getUuidRepresentation() != STANDARD) { + throw e; + } + return; + } + + if (uuidRepresentationForClient == STANDARD) { + assertEquals(UUID.class, document.get("standard").getClass()); + assertEquals(uuid, document.get("standard")); + + assertEquals(Binary.class, document.get("legacy").getClass()); + assertEquals(new Binary(UUID_LEGACY, encodedValue), document.get("legacy")); + } else { + if (uuidRepresentationForClient == JAVA_LEGACY) { + assertEquals(UUID.class, document.get("standard").getClass()); + assertEquals(uuid, document.get("standard")); + } else { + assertEquals(Binary.class, document.get("standard").getClass()); + assertEquals(new Binary(UUID_STANDARD, standardEncodedValue), document.get("standard")); + } + + assertEquals(UUID.class, document.get("legacy").getClass()); + assertEquals(uuid, document.get("legacy")); + } + } + + @Test + public void shouldDecodePojoWithStandardUuidRepresentation() { + bsonDocumentCollection.insertOne(new BsonDocument("_id", new BsonBinary(uuid, STANDARD))); + + try { + UuidIdPojo document = uuidIdPojoCollection.find().first(); + assertNotNull(document); + assertEquals(uuid, document.getId()); + } catch (BSONException e) { + if (uuidCodec.getUuidRepresentation() == uuidRepresentationForClient) { + throw e; + } + } + } + + @Test + public void shouldDecodePojoWithLegacyUuidRepresentation() { + bsonDocumentCollection.insertOne(new BsonDocument("_id", new BsonBinary(uuid, uuidRepresentationForExplicitEncoding))); + + try { + UuidIdPojo document = uuidIdPojoCollection.find().first(); + assertNotNull(document); + assertEquals(uuid, document.getId()); + } catch (BSONException e) { + if (uuidCodec.getUuidRepresentation() == uuidRepresentationForExplicitEncoding) { + throw e; + } + } + } + + @Parameterized.Parameters(name = "{0}/{1}/{2}") + public static Collection data() { + UUID uuid = UUID.fromString("00112233-4455-6677-8899-aabbccddeeff"); + + byte[] standardEncodedValue = Hex.decode("00112233445566778899AABBCCDDEEFF"); + + List data = new ArrayList<>(); + data.add(new Object[]{ + JAVA_LEGACY, + PYTHON_LEGACY, + UUID_STANDARD, + new UuidCodec(STANDARD), + uuid, + standardEncodedValue, + standardEncodedValue}); + data.add(new Object[]{ + STANDARD, + C_SHARP_LEGACY, + UUID_LEGACY, + new UuidCodec(C_SHARP_LEGACY), + uuid, + Hex.decode("33221100554477668899AABBCCDDEEFF"), + standardEncodedValue}); + data.add(new Object[]{ + STANDARD, + JAVA_LEGACY, + UUID_LEGACY, + new UuidCodec(JAVA_LEGACY), + uuid, + Hex.decode("7766554433221100FFEEDDCCBBAA9988"), + standardEncodedValue}); + data.add(new Object[]{ + STANDARD, + PYTHON_LEGACY, + UUID_LEGACY, + new UuidCodec(PYTHON_LEGACY), + uuid, + standardEncodedValue, + standardEncodedValue}); + return data; + } + + +public static class UuidIdPojo { + private UUID id; + + @SuppressWarnings("unused") + public UuidIdPojo() { + } + + UuidIdPojo(final UUID id) { + this.id = id; + } + + public UUID getId() { + return id; + } + + public void setId(final UUID id) { + this.id = id; + } +} +} diff --git a/driver-sync/src/test/functional/com/mongodb/client/AbstractMongoCollectionTest.java b/driver-sync/src/test/functional/com/mongodb/client/AbstractMongoCollectionTest.java new file mode 100644 index 00000000000..76a21d1a588 --- /dev/null +++ b/driver-sync/src/test/functional/com/mongodb/client/AbstractMongoCollectionTest.java @@ -0,0 +1,314 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client; + +import com.mongodb.DBRef; +import com.mongodb.MongoClientSettings; +import com.mongodb.ReadPreference; +import com.mongodb.WriteConcern; +import com.mongodb.client.result.InsertManyResult; +import org.bson.BsonReader; +import org.bson.BsonValue; +import org.bson.BsonWriter; +import org.bson.Document; +import org.bson.RawBsonDocument; +import org.bson.codecs.BsonValueCodecProvider; +import org.bson.codecs.Codec; +import org.bson.codecs.DecoderContext; +import org.bson.codecs.DocumentCodecProvider; +import org.bson.codecs.EncoderContext; +import org.bson.codecs.ValueCodecProvider; +import org.bson.codecs.configuration.CodecRegistry; +import org.bson.codecs.pojo.PojoCodecProvider; +import org.bson.codecs.pojo.entities.ShapeModelAbstract; +import org.bson.codecs.pojo.entities.ShapeModelCircle; +import org.bson.codecs.pojo.entities.conventions.BsonRepresentationModel; +import org.bson.json.JsonObject; +import org.bson.types.ObjectId; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import static com.mongodb.client.Fixture.getDefaultDatabaseName; +import static java.util.Arrays.asList; +import static org.bson.codecs.configuration.CodecRegistries.fromCodecs; +import static org.bson.codecs.configuration.CodecRegistries.fromProviders; +import static org.bson.codecs.configuration.CodecRegistries.fromRegistries; +import static org.hamcrest.CoreMatchers.is; +import static org.hamcrest.MatcherAssert.assertThat; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertTrue; + +public abstract class AbstractMongoCollectionTest { + + protected abstract MongoDatabase getDatabase(String databaseName); + + MongoCollection getCollection() { + return getDatabase(getDefaultDatabaseName()).getCollection("MongoCollectionTest"); + } + + @BeforeEach + public void setUp() { + getCollection().drop(); + } + + @AfterEach + public void cleanUp() { + getCollection().drop(); + } + + @Test + public void testFindAndUpdateWithGenerics() { + CodecRegistry codecRegistry = fromProviders(asList(new ValueCodecProvider(), new DocumentCodecProvider(), + new BsonValueCodecProvider(), new ConcreteCodecProvider())); + MongoCollection collection = getCollection() + .withDocumentClass(Concrete.class) + .withCodecRegistry(codecRegistry) + .withReadPreference(ReadPreference.primary()) + .withWriteConcern(WriteConcern.ACKNOWLEDGED); + + Concrete doc = new Concrete(new ObjectId(), "str", 5, 10L, 4.0, 3290482390480L); + collection.insertOne(doc); + + Concrete newDoc = collection.findOneAndUpdate(new Document("i", 5), + new Document("$set", new Document("i", 6))); + + assertNotNull(newDoc); + assertEquals(doc, newDoc); + } + + @Test + public void testFindOneAndUpdateEmpty() { + boolean exceptionFound = false; + getCollection().insertOne(new Document().append("_id", "fakeId").append("one", 1).append("foo", "bar")); + + try { + getCollection().findOneAndUpdate(new Document(), new Document()); + } catch (IllegalArgumentException e) { + assertEquals("Invalid BSON document for an update. The document may not be empty.", e.getMessage()); + exceptionFound = true; + } + assertTrue(exceptionFound); + } + + @Test + public void shouldBeAbleToQueryTypedCollectionAndMapResultsIntoTypedLists() { + // given + CodecRegistry codecRegistry = fromProviders(asList(new ValueCodecProvider(), new DocumentCodecProvider(), + new BsonValueCodecProvider(), new ConcreteCodecProvider())); + MongoCollection collection = getCollection() + .withDocumentClass(Concrete.class) + .withCodecRegistry(codecRegistry) + .withReadPreference(ReadPreference.primary()) + .withWriteConcern(WriteConcern.ACKNOWLEDGED); + + Concrete firstItem = new Concrete("first", 1, 2L, 3.0, 5L); + collection.insertOne(firstItem); + + Concrete secondItem = new Concrete("second", 7, 11L, 13.0, 17L); + collection.insertOne(secondItem); + + // when + List listOfStringObjectIds = collection.find(new Document("i", 1)) + .map(Concrete::getId) + .map(ObjectId::toString).into(new ArrayList<>()); + + // then + assertThat(listOfStringObjectIds.size(), is(1)); + assertThat(listOfStringObjectIds.get(0), is(firstItem.getId().toString())); + + // when + List listOfObjectIds = collection.find(new Document("i", 1)) + .map(Concrete::getId) + .into(new ArrayList<>()); + + // then + assertThat(listOfObjectIds.size(), is(1)); + assertThat(listOfObjectIds.get(0), is(firstItem.getId())); + } + + @SuppressWarnings("deprecation") + @Test + public void testMapReduceWithGenerics() { + // given + CodecRegistry codecRegistry = fromProviders(asList(new DocumentCodecProvider(), new NameCodecProvider())); + getCollection().insertMany(asList(new Document("name", "Pete").append("job", "handyman"), + new Document("name", "Sam").append("job", "Plumber"), + new Document("name", "Pete").append("job", "'electrician'"))); + + String mapFunction = "function(){ emit( this.name , 1 ); }"; + String reduceFunction = "function(key, values){ return values.length; }"; + MongoCollection collection = getCollection() + .withCodecRegistry(codecRegistry) + .withReadPreference(ReadPreference.primary()) + .withWriteConcern(WriteConcern.ACKNOWLEDGED); + + // when + List result = collection.mapReduce(mapFunction, reduceFunction, Name.class).into(new ArrayList<>()); + + // then + assertTrue(result.contains(new Name("Pete", 2))); + assertTrue(result.contains(new Name("Sam", 1))); + } + + @Test + public void testAggregationToACollection() { + // given + List documents = asList(new Document("_id", 1), new Document("_id", 2)); + getCollection().insertMany(documents); + + + // when + List result = getCollection().aggregate(Collections.singletonList(new Document("$out", "outCollection"))) + .into(new ArrayList<>()); + + // then + assertEquals(documents, result); + } + + @Test + public void bulkInsertRawBsonDocuments() { + // given + List docs = asList(RawBsonDocument.parse("{a: 1}"), RawBsonDocument.parse("{a: 2}")); + + // when + InsertManyResult result = getCollection().withDocumentClass(RawBsonDocument.class).insertMany(docs); + + // then + Map expectedResult = new HashMap<>(); + expectedResult.put(0, null); + expectedResult.put(1, null); + assertEquals(expectedResult, result.getInsertedIds()); + } + + // This is really a test that the default registry created in MongoClient and passed down to MongoCollection has been constructed + // properly to handle DBRef encoding and decoding + @Test + public void testDBRefEncodingAndDecoding() { + // given + Document doc = new Document("_id", 1) + .append("ref", new DBRef("foo", 5)) + .append("refWithDB", new DBRef("db", "foo", 5)); + + + // when + getCollection().insertOne(doc); + + // then + assertEquals(doc, getCollection().find().first()); + } + + @Test + public void testJsonObjectEncodingAndDecoding() { + // given + MongoCollection test = getCollection().withDocumentClass(JsonObject.class); + JsonObject json = new JsonObject("{\"_id\": {\"$oid\": \"5f5a5442306e56d34136dbcf\"}, \"hello\": 1}"); + + // when + test.insertOne(json); + + // then + assertEquals(json, test.find().first()); + } + + @Test + public void testObjectIdToStringConversion() { + // given + CodecRegistry pojoCodecRegistry = fromRegistries(MongoClientSettings.getDefaultCodecRegistry(), + fromProviders(PojoCodecProvider.builder().automatic(true).build())); + + MongoCollection test = getCollection() + .withDocumentClass(BsonRepresentationModel.class) + .withCodecRegistry(pojoCodecRegistry); + test.drop(); + + // when + test.insertOne(new BsonRepresentationModel(null, 1)); + + // then + BsonRepresentationModel first = test.find().first(); + assertNotNull(first); + assertNotNull(first.getId()); + } + + @Test + public void testOperationsUseDocumentClassCodec() { + + Codec shapeModelCodec = new Codec() { + private final CodecRegistry pojoCodecRegistry = fromRegistries(MongoClientSettings.getDefaultCodecRegistry(), + fromProviders(PojoCodecProvider.builder().automatic(true).build())); + + @Override + public ShapeModelAbstract decode(final BsonReader reader, final DecoderContext decoderContext) { + return pojoCodecRegistry.get(getEncoderClass()).decode(reader, decoderContext); + } + + @Override + public void encode(final BsonWriter writer, final ShapeModelAbstract value, final EncoderContext encoderContext) { + pojoCodecRegistry.get(getEncoderClass()).encode(writer, value, encoderContext); + } + + @Override + public Class getEncoderClass() { + return ShapeModelAbstract.class; + } + }; + Codec circleCodec = new Codec() { + + @Override + public void encode(final BsonWriter writer, final ShapeModelCircle value, final EncoderContext encoderContext) { + throw new UnsupportedOperationException("If this method is called it means this codec was used directly, " + + "even though its not the MongoCollection document class."); + } + + @Override + public Class getEncoderClass() { + return ShapeModelCircle.class; + } + + @Override + public ShapeModelCircle decode(final BsonReader reader, final DecoderContext decoderContext) { + throw new UnsupportedOperationException("If this method is called it means this codec was used directly, " + + "even though its not the MongoCollection document class."); + } + }; + + + // given + CodecRegistry pojoCodecRegistry = fromRegistries(fromCodecs(shapeModelCodec, circleCodec), + MongoClientSettings.getDefaultCodecRegistry()); + + MongoCollection test = getCollection() + .withDocumentClass(ShapeModelAbstract.class) + .withCodecRegistry(pojoCodecRegistry); + test.drop(); + + // when + ShapeModelCircle redCircle = new ShapeModelCircle("red", 1.1); + test.insertOne(redCircle); + + // then + assertEquals(redCircle, test.find().first()); + } +} diff --git a/driver-sync/src/test/functional/com/mongodb/client/AbstractServerSelectionProseTest.java b/driver-sync/src/test/functional/com/mongodb/client/AbstractServerSelectionProseTest.java new file mode 100644 index 00000000000..506a40d8bd6 --- /dev/null +++ b/driver-sync/src/test/functional/com/mongodb/client/AbstractServerSelectionProseTest.java @@ -0,0 +1,146 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.client; + +import com.mongodb.ConnectionString; +import com.mongodb.MongoClientSettings; +import com.mongodb.ServerAddress; +import com.mongodb.event.CommandStartedEvent; +import com.mongodb.internal.connection.TestCommandListener; +import org.bson.BsonArray; +import org.bson.BsonBoolean; +import org.bson.BsonDocument; +import org.bson.BsonInt32; +import org.bson.BsonString; +import org.bson.Document; +import org.junit.jupiter.api.Test; + +import java.util.List; +import java.util.Map; +import java.util.concurrent.Callable; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.Future; +import java.util.stream.IntStream; + +import static com.mongodb.ClusterFixture.getMultiMongosConnectionString; +import static com.mongodb.ClusterFixture.isSharded; +import static com.mongodb.client.Fixture.getDefaultDatabaseName; +import static com.mongodb.client.Fixture.getMongoClientSettingsBuilder; +import static com.mongodb.client.model.Filters.eq; +import static java.util.Collections.singletonList; +import static java.util.concurrent.TimeUnit.SECONDS; +import static java.util.stream.Collectors.groupingBy; +import static java.util.stream.Collectors.toList; +import static java.util.stream.Collectors.toMap; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assumptions.assumeTrue; + +/** + * See prose tests in + * + * "Server Selection Test Plan". + */ +public abstract class AbstractServerSelectionProseTest { + /** + * + * {@code operationCount}-based Selection Within Latency Window. + */ + @Test + @SuppressWarnings("try") + void operationCountBasedSelectionWithinLatencyWindow() throws InterruptedException, ExecutionException { + assumeTrue(isSharded()); + ConnectionString multiMongosConnectionString = getMultiMongosConnectionString(); + assumeTrue(multiMongosConnectionString != null); + assumeTrue(multiMongosConnectionString.getSslEnabled() == null || !multiMongosConnectionString.getSslEnabled()); + assertEquals(2, multiMongosConnectionString.getHosts().size()); + String appName = "loadBalancingTest"; + int timeoutSeconds = 60; + int tasks = 10; + int opsPerTask = 100; + TestCommandListener commandListener = new TestCommandListener(singletonList("commandStartedEvent"), singletonList("drop")); + MongoClientSettings clientSettings = getMongoClientSettingsBuilder() + .applicationName(appName) + .applyConnectionString(multiMongosConnectionString) + .applyToConnectionPoolSettings(builder -> builder + .minSize(tasks)) + .addCommandListener(commandListener) + .build(); + BsonDocument configureFailPoint = new BsonDocument() + .append("configureFailPoint", new BsonString("failCommand")) + .append("mode", new BsonDocument() + .append("times", new BsonInt32(10_000))) + .append("data", new BsonDocument() + .append("failCommands", new BsonArray(singletonList(new BsonString("find")))) + .append("blockConnection", BsonBoolean.valueOf(true)) + .append("blockTimeMS", new BsonInt32(500)) + .append("appName", new BsonString(appName))); + ServerAddress serverWithFailPoint = clientSettings.getClusterSettings().getHosts().get(0); + ExecutorService executor = Executors.newFixedThreadPool(tasks); + try (MongoClient client = createClient(clientSettings)) { + MongoCollection collection = client.getDatabase(getDefaultDatabaseName()) + .getCollection("operationCountBasedSelectionWithinLatencyWindow"); + collection.drop(); + try (FailPoint ignored = FailPoint.enable(configureFailPoint, serverWithFailPoint)) { + Map selectionRates = doSelections( + collection, commandListener, executor, tasks, opsPerTask, timeoutSeconds); + double expectedServerWithFpSelectionRateUpperBound = 0.25; + assertTrue(selectionRates.containsKey(serverWithFailPoint)); + assertTrue(selectionRates.get(serverWithFailPoint) < expectedServerWithFpSelectionRateUpperBound, + selectionRates::toString); + assertEquals(1, selectionRates.values().stream().mapToDouble(Double::doubleValue).sum(), 0.01, + selectionRates::toString); + } + commandListener.reset(); + Map selectionRates = doSelections(collection, commandListener, executor, tasks, opsPerTask, + timeoutSeconds); + selectionRates.values().forEach(rate -> assertEquals(0.5, rate, 0.1, selectionRates::toString)); + } finally { + executor.shutdownNow(); + assertTrue(executor.awaitTermination(timeoutSeconds, SECONDS)); + } + } + + private static Map doSelections(final MongoCollection collection, + final TestCommandListener commandListener, final ExecutorService ex, final int tasks, final int opsPerTask, + final int timeoutSeconds) throws InterruptedException, ExecutionException { + List> results = ex.invokeAll(IntStream.range(0, tasks) + .>mapToObj(taskIdx -> () -> { + boolean result = false; + for (int opIdx = 0; opIdx < opsPerTask; opIdx++) { + try (MongoCursor cursor = collection.find(eq(0)).iterator()) { + result |= cursor.hasNext(); + } + } + return result; + }) + .collect(toList()), timeoutSeconds, SECONDS); + for (Future result : results) { + result.get(); + } + List commandStartedEvents = commandListener.getCommandStartedEvents(); + assertEquals(tasks * opsPerTask, commandStartedEvents.size()); + return commandStartedEvents.stream() + .collect(groupingBy(event -> event.getConnectionDescription().getServerAddress())) + .entrySet() + .stream() + .collect(toMap(Map.Entry::getKey, entry -> (double) entry.getValue().size() / commandStartedEvents.size())); + } + + protected abstract MongoClient createClient(MongoClientSettings settings); +} diff --git a/driver-sync/src/test/functional/com/mongodb/client/AbstractSessionsProseTest.java b/driver-sync/src/test/functional/com/mongodb/client/AbstractSessionsProseTest.java new file mode 100644 index 00000000000..3682bd64ff0 --- /dev/null +++ b/driver-sync/src/test/functional/com/mongodb/client/AbstractSessionsProseTest.java @@ -0,0 +1,283 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client; + +import com.mongodb.ClusterFixture; +import com.mongodb.MongoClientException; +import com.mongodb.MongoClientSettings; +import com.mongodb.MongoCommandException; +import com.mongodb.ServerAddress; +import com.mongodb.client.model.Filters; +import com.mongodb.client.model.UpdateOneModel; +import com.mongodb.client.model.Updates; +import com.mongodb.event.CommandListener; +import com.mongodb.event.CommandStartedEvent; +import com.mongodb.event.ServerHeartbeatStartedEvent; +import com.mongodb.event.ServerHeartbeatSucceededEvent; +import com.mongodb.event.TestServerMonitorListener; +import com.mongodb.internal.connection.TestCommandListener; +import org.bson.BsonDocument; +import org.bson.Document; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Test; + +import java.io.File; +import java.io.IOException; +import java.time.Duration; +import java.util.List; +import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; +import java.util.concurrent.atomic.AtomicBoolean; + +import static com.mongodb.ClusterFixture.getDefaultDatabaseName; +import static com.mongodb.ClusterFixture.isStandalone; +import static com.mongodb.client.Fixture.getMongoClientSettingsBuilder; +import static java.util.Arrays.asList; +import static java.util.Collections.singletonList; +import static java.util.concurrent.TimeUnit.MILLISECONDS; +import static org.bson.assertions.Assertions.fail; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assumptions.assumeTrue; + +// Prose tests for Sessions specification: https://github.com/mongodb/specifications/tree/master/source/sessions +// Prose test README: https://github.com/mongodb/specifications/tree/master/source/sessions/tests/README.md +public abstract class AbstractSessionsProseTest { + + private static final int MONGOCRYPTD_PORT = 47017; + private static Process mongocryptdProcess; + + protected abstract MongoClient getMongoClient(MongoClientSettings settings); + + @BeforeAll + public static void beforeAll() throws IOException { + mongocryptdProcess = startMongocryptdProcess(); + } + + @AfterAll + public static void afterAll() { + if (mongocryptdProcess != null) { + mongocryptdProcess.destroy(); + mongocryptdProcess = null; + } + } + + // Test 13 from #13-existing-sessions-are-not-checked-into-a-cleared-pool-after-forking + @Test + public void shouldCreateServerSessionOnlyAfterConnectionCheckout() throws InterruptedException { + Set lsidSet = ConcurrentHashMap.newKeySet(); + MongoCollection collection; + try (MongoClient client = getMongoClient( + getMongoClientSettingsBuilder() + .applyToConnectionPoolSettings(builder -> builder.maxSize(1)) + .addCommandListener(new CommandListener() { + @Override + public void commandStarted(final CommandStartedEvent event) { + lsidSet.add(event.getCommand().getDocument("lsid")); + } + }) + .build())) { + collection = client.getDatabase(getDefaultDatabaseName()).getCollection(getClass().getName()); + + List operations = asList( + () -> collection.insertOne(new Document()), + () -> collection.deleteOne(Filters.eq("_id", 1)), + () -> collection.updateOne(Filters.eq("_id", 1), Updates.set("x", 1)), + () -> collection.bulkWrite(singletonList(new UpdateOneModel<>(Filters.eq("_id", 1), Updates.set("x", 1)))), + () -> collection.findOneAndDelete(Filters.eq("_id", 1)), + () -> collection.findOneAndUpdate(Filters.eq("_id", 1), Updates.set("x", 1)), + () -> collection.findOneAndReplace(Filters.eq("_id", 1), new Document("_id", 1)), + () -> collection.find().first() + ); + + int minLsidSetSize = Integer.MAX_VALUE; + + // Try up to five times, counting on at least one time that only one lsid will be used + for (int i = 0; i < 5; i++) { + // given + lsidSet.clear(); + + // when executing numConcurrentOperations operations concurrently + ExecutorService executor = Executors.newFixedThreadPool(operations.size()); + + operations.forEach(executor::submit); + + executor.shutdown(); + boolean terminated = executor.awaitTermination(5, TimeUnit.SECONDS); + + // then + assertTrue(terminated); + assertTrue(lsidSet.size() < operations.size()); + minLsidSetSize = Math.min(minLsidSetSize, lsidSet.size()); + if (minLsidSetSize == 1) { + break; + } + } + assertEquals(1, minLsidSetSize); + } + } + + // Test 18 from #18-implicit-session-is-ignored-if-connection-does-not-support-sessions + @Test + public void shouldIgnoreImplicitSessionIfConnectionDoesNotSupportSessions() throws IOException { + // initialize to true in case the command listener is never actually called, in which case the assertFalse will fire + AtomicBoolean containsLsid = new AtomicBoolean(true); + try (MongoClient client = getMongoClient( + getMongocryptdMongoClientSettingsBuilder() + .addCommandListener(new CommandListener() { + @Override + public void commandStarted(final CommandStartedEvent event) { + containsLsid.set(event.getCommand().containsKey("lsid")); + } + }) + .build())) { + + Document helloResponse = client.getDatabase("admin").runCommand(new Document("hello", 1)); + assertFalse((helloResponse.containsKey("logicalSessionTimeoutMinutes"))); + + MongoCollection collection = client.getDatabase(getDefaultDatabaseName()).getCollection(getClass().getName()); + try { + collection.find().first(); + } catch (MongoCommandException e) { + // ignore command errors from mongocryptd + } + assertFalse(containsLsid.get()); + + // reset + containsLsid.set(true); + + try { + collection.insertOne(new Document()); + } catch (MongoCommandException e) { + // ignore command errors from mongocryptd + } + assertFalse(containsLsid.get()); + } + } + + // Test 19 from #19-explicit-session-raises-an-error-if-connection-does-not-support-sessions + @Test + public void shouldThrowOnExplicitSessionIfConnectionDoesNotSupportSessions() throws IOException { + try (MongoClient client = getMongoClient(getMongocryptdMongoClientSettingsBuilder().build())) { + MongoCollection collection = client.getDatabase(getDefaultDatabaseName()).getCollection(getClass().getName()); + + Document helloResponse = client.getDatabase("admin").runCommand(new Document("hello", 1)); + assertFalse((helloResponse.containsKey("logicalSessionTimeoutMinutes"))); + + try (ClientSession session = client.startSession()) { + String expectedClientExceptionMessage = + "Attempting to use a ClientSession while connected to a server that doesn't support sessions"; + try { + collection.find(session).first(); + fail("Expected MongoClientException"); + } catch (MongoClientException e) { + assertEquals(expectedClientExceptionMessage, e.getMessage()); + } + + try { + collection.insertOne(session, new Document()); + fail("Expected MongoClientException"); + } catch (MongoClientException e) { + assertEquals(expectedClientExceptionMessage, e.getMessage()); + } + } + } + } + + /* Test 20 from #20-drivers-do-not-gossip-clustertime-on-sdam-commands + In this test, we check that the cluster time has not been advanced on client1 through the server monitors, after client2 advanced + the cluster time on the deployment/cluster. + */ + @Test + public void shouldNotGossipClusterTimeInServerMonitors() throws InterruptedException, TimeoutException { + assumeTrue(!isStandalone()); + + //given + TestServerMonitorListener serverMonitorListener = + new TestServerMonitorListener(asList("serverHeartbeatStartedEvent", "serverHeartbeatSucceededEvent", + "serverHeartbeatFailedEvent")); + TestCommandListener commandListener = new TestCommandListener(); + try (MongoClient client1 = getMongoClient( + getDirectPrimaryMongoClientSettingsBuilder() + .addCommandListener(commandListener) + .applyToServerSettings(builder -> builder + .heartbeatFrequency(10, MILLISECONDS) + .addServerMonitorListener(serverMonitorListener)) + .build()); + MongoClient client2 = getMongoClient(getDirectPrimaryMongoClientSettingsBuilder() + .build())) { + + Document clusterTime = executePing(client1) + .get("$clusterTime", Document.class); + + //when + client2.getDatabase("test") + .getCollection("test") + .insertOne(new Document("advance", "$clusterTime")); + + // wait until the client1 processes the next pair of SDAM heartbeat started + succeeded events. + serverMonitorListener.reset(); + serverMonitorListener.waitForEvents(ServerHeartbeatStartedEvent.class, serverHeartbeatStartedEvent -> true, + 1, Duration.ofMillis(20 + ClusterFixture.getPrimaryRTT())); + serverMonitorListener.waitForEvents(ServerHeartbeatSucceededEvent.class, serverHeartbeatSucceededEvent -> true, + 1, Duration.ofMillis(20 + ClusterFixture.getPrimaryRTT())); + + commandListener.reset(); + executePing(client1); + + //then + List pingStartedEvents = commandListener.getCommandStartedEvents("ping"); + assertEquals(1, pingStartedEvents.size()); + BsonDocument sentClusterTime = pingStartedEvents.get(0).getCommand().getDocument("$clusterTime"); + + assertEquals(clusterTime.toBsonDocument(), sentClusterTime, "Cluster time should not have advanced after the first ping"); + } + } + + private static MongoClientSettings.Builder getDirectPrimaryMongoClientSettingsBuilder() { + return getMongoClientSettingsBuilder() + .applyToClusterSettings(ClusterFixture::setDirectConnection); + } + + private static MongoClientSettings.Builder getMongocryptdMongoClientSettingsBuilder() { + return MongoClientSettings.builder() + .applyToClusterSettings(builder -> + builder.hosts(singletonList(new ServerAddress("localhost", MONGOCRYPTD_PORT)))); + } + + private static Process startMongocryptdProcess() throws IOException { + String port = Integer.toString(MONGOCRYPTD_PORT); + ProcessBuilder processBuilder = new ProcessBuilder(asList("mongocryptd", + "--port", port, + "--pidfilepath", "mongocryptd-" + port + ".pid")); + processBuilder.redirectErrorStream(true); + processBuilder.redirectOutput(new File("/tmp/mongocryptd.log")); + return processBuilder.start(); + } + + private static Document executePing(final MongoClient client1) { + return client1.getDatabase("admin") + .runCommand(new Document("ping", 1)); + } +} + diff --git a/driver-sync/src/test/functional/com/mongodb/client/AbstractUuidRepresentationTest.java b/driver-sync/src/test/functional/com/mongodb/client/AbstractUuidRepresentationTest.java new file mode 100644 index 00000000000..670a9630f7f --- /dev/null +++ b/driver-sync/src/test/functional/com/mongodb/client/AbstractUuidRepresentationTest.java @@ -0,0 +1,338 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client; + +import com.mongodb.BasicDBObject; +import com.mongodb.DBObject; +import org.bson.BSONException; +import org.bson.BsonBinary; +import org.bson.BsonBinarySubType; +import org.bson.BsonDocument; +import org.bson.Document; +import org.bson.UuidRepresentation; +import org.bson.codecs.configuration.CodecConfigurationException; +import org.bson.codecs.configuration.CodecRegistry; +import org.bson.codecs.pojo.PojoCodecProvider; +import org.bson.types.Binary; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import util.Hex; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; +import java.util.UUID; + +import static com.mongodb.MongoClientSettings.getDefaultCodecRegistry; +import static com.mongodb.client.Fixture.getDefaultDatabaseName; +import static com.mongodb.client.Fixture.getMongoClient; +import static org.bson.UuidRepresentation.JAVA_LEGACY; +import static org.bson.UuidRepresentation.STANDARD; +import static org.bson.codecs.configuration.CodecRegistries.fromProviders; +import static org.bson.codecs.configuration.CodecRegistries.fromRegistries; +import static org.junit.Assert.assertArrayEquals; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.fail; + +@RunWith(Parameterized.class) +public abstract class AbstractUuidRepresentationTest { + private final UuidRepresentation uuidRepresentation; + private final BsonBinarySubType subType; + private final UUID uuid; + private final byte[] encodedValue; + private final byte[] standardEncodedValue; + private MongoCollection documentCollection; + private MongoCollection dbObjectCollection; + private MongoCollection uuidIdPojoCollection; + private MongoCollection bsonDocumentCollection; + + + public AbstractUuidRepresentationTest(final UuidRepresentation uuidRepresentation, final BsonBinarySubType subType, + final UUID uuid, + final byte[] encodedValue, final byte[] standardEncodedValue) { + this.uuidRepresentation = uuidRepresentation; + this.subType = subType; + this.uuid = uuid; + this.encodedValue = encodedValue; + this.standardEncodedValue = standardEncodedValue; + } + + protected abstract void createMongoClient(UuidRepresentation uuidRepresentation, CodecRegistry codecRegistry); + + protected abstract MongoDatabase getDatabase(String databaseName); + + + @Before + public void setUp() { + PojoCodecProvider pojoCodecProvider = PojoCodecProvider.builder().automatic(true).build(); + CodecRegistry codecRegistry = fromRegistries(getDefaultCodecRegistry(), fromProviders(pojoCodecProvider)); + + createMongoClient(uuidRepresentation, codecRegistry); + MongoDatabase database = getDatabase(getDefaultDatabaseName()); + documentCollection = database.getCollection(getClass().getName()); + dbObjectCollection = documentCollection.withDocumentClass(DBObject.class); + uuidIdPojoCollection = documentCollection.withDocumentClass(UuidIdPojo.class); + + bsonDocumentCollection = getMongoClient().getDatabase(getDefaultDatabaseName()) + .getCollection(getClass().getName()) + .withDocumentClass(BsonDocument.class); + + bsonDocumentCollection.drop(); + } + + @Test + public void shouldEncodeDocumentWithUuidRepresentation() { + + if (uuidRepresentation == UuidRepresentation.UNSPECIFIED) { + try { + documentCollection.insertOne(new Document("_id", uuid)); + fail(); + } catch (CodecConfigurationException e) { + // all good + } + } else { + documentCollection.insertOne(new Document("_id", uuid)); + + BsonDocument document = bsonDocumentCollection.find().first(); + assertNotNull(document); + BsonBinary uuidAsBinary = document.getBinary("_id"); + assertEquals(subType.getValue(), uuidAsBinary.getType()); + assertArrayEquals(encodedValue, uuidAsBinary.getData()); + } + } + + @Test + public void shouldEncodeDbObjectWithUuidRepresentation() { + + if (uuidRepresentation == UuidRepresentation.UNSPECIFIED) { + try { + dbObjectCollection.insertOne(new BasicDBObject("_id", uuid)); + fail(); + } catch (CodecConfigurationException e) { + // all good + } + } else { + dbObjectCollection.insertOne(new BasicDBObject("_id", uuid)); + + BsonDocument document = bsonDocumentCollection.find().first(); + assertNotNull(document); + BsonBinary uuidAsBinary = document.getBinary("_id"); + assertEquals(subType.getValue(), uuidAsBinary.getType()); + assertEquals(subType.getValue(), uuidAsBinary.getType()); + assertArrayEquals(encodedValue, uuidAsBinary.getData()); + } + } + + @Test + public void shouldEncodePojoWithUuidRepresentation() { + if (uuidRepresentation == UuidRepresentation.UNSPECIFIED) { + try { + uuidIdPojoCollection.insertOne(new UuidIdPojo(uuid)); + fail(); + } catch (CodecConfigurationException e) { + // all good + } + } else { + uuidIdPojoCollection.insertOne(new UuidIdPojo(uuid)); + + BsonDocument document = bsonDocumentCollection.find().first(); + assertNotNull(document); + BsonBinary uuidAsBinary = document.getBinary("_id"); + assertEquals(subType.getValue(), uuidAsBinary.getType()); + assertEquals(subType.getValue(), uuidAsBinary.getType()); + assertArrayEquals(encodedValue, uuidAsBinary.getData()); + } + } + + @Test + public void shouldDecodeDocumentWithUuidRepresentation() { + + bsonDocumentCollection.insertOne(new BsonDocument("standard", new BsonBinary(uuid, STANDARD)) + .append("legacy", new BsonBinary(uuid, + uuidRepresentation == UuidRepresentation.UNSPECIFIED || uuidRepresentation == STANDARD + ? UuidRepresentation.PYTHON_LEGACY + : uuidRepresentation))); + + Document document; + try { + document = documentCollection.find().first(); + assertNotNull(document); + } catch (BSONException e) { + if (uuidRepresentation != STANDARD && uuidRepresentation != JAVA_LEGACY) { + throw e; + } + return; + } + + if (uuidRepresentation == UuidRepresentation.UNSPECIFIED) { + assertEquals(Binary.class, document.get("standard").getClass()); + assertEquals(new Binary(BsonBinarySubType.UUID_STANDARD, standardEncodedValue), document.get("standard")); + + assertEquals(Binary.class, document.get("legacy").getClass()); + assertEquals(new Binary(BsonBinarySubType.UUID_LEGACY, standardEncodedValue), document.get("legacy")); + } else if (uuidRepresentation == STANDARD) { + assertEquals(UUID.class, document.get("standard").getClass()); + assertEquals(uuid, document.get("standard")); + + assertEquals(Binary.class, document.get("legacy").getClass()); + assertEquals(new Binary(BsonBinarySubType.UUID_LEGACY, standardEncodedValue), document.get("legacy")); + } else { + assertEquals(Binary.class, document.get("standard").getClass()); + assertEquals(new Binary(BsonBinarySubType.UUID_STANDARD, standardEncodedValue), document.get("standard")); + + assertEquals(UUID.class, document.get("legacy").getClass()); + assertEquals(uuid, document.get("legacy")); + } + } + + @Test + public void shouldDecodeDbObjectWithUuidRepresentation() { + + bsonDocumentCollection.insertOne(new BsonDocument("standard", new BsonBinary(uuid, STANDARD)) + .append("legacy", new BsonBinary(uuid, + uuidRepresentation == UuidRepresentation.UNSPECIFIED || uuidRepresentation == STANDARD + ? UuidRepresentation.PYTHON_LEGACY + : uuidRepresentation))); + + DBObject document; + try { + document = dbObjectCollection.find().first(); + assertNotNull(document); + } catch (BSONException e) { + if (uuidRepresentation != STANDARD && uuidRepresentation != JAVA_LEGACY) { + throw e; + } + return; + } + + if (uuidRepresentation == UuidRepresentation.UNSPECIFIED) { + assertEquals(Binary.class, document.get("standard").getClass()); + assertEquals(new Binary(BsonBinarySubType.UUID_STANDARD, standardEncodedValue), document.get("standard")); + + assertEquals(Binary.class, document.get("legacy").getClass()); + assertEquals(new Binary(BsonBinarySubType.UUID_LEGACY, standardEncodedValue), document.get("legacy")); + } else if (uuidRepresentation == STANDARD) { + assertEquals(UUID.class, document.get("standard").getClass()); + assertEquals(uuid, document.get("standard")); + + assertEquals(Binary.class, document.get("legacy").getClass()); + assertEquals(new Binary(BsonBinarySubType.UUID_LEGACY, standardEncodedValue), document.get("legacy")); + } else { + assertEquals(Binary.class, document.get("standard").getClass()); + assertEquals(new Binary(BsonBinarySubType.UUID_STANDARD, standardEncodedValue), document.get("standard")); + + assertEquals(UUID.class, document.get("legacy").getClass()); + assertEquals(uuid, document.get("legacy")); + } + } + + @Test + public void shouldDecodePojoWithStandardUuidRepresentation() { + + bsonDocumentCollection.insertOne(new BsonDocument("_id", new BsonBinary(uuid, STANDARD))); + + try { + UuidIdPojo document = uuidIdPojoCollection.find().first(); + assertNotNull(document); + assertEquals(uuid, document.getId()); + } catch (BSONException e) { + assertNotEquals(STANDARD, uuidRepresentation); + } + } + + @Test + public void shouldDecodePojoWithLegacyUuidRepresentation() { + + bsonDocumentCollection.insertOne(new BsonDocument("_id", new BsonBinary(uuid, + uuidRepresentation == UuidRepresentation.UNSPECIFIED || uuidRepresentation == STANDARD + ? UuidRepresentation.PYTHON_LEGACY + : uuidRepresentation))); + + try { + UuidIdPojo document = uuidIdPojoCollection.find().first(); + assertNotNull(document); + assertEquals(uuid, document.getId()); + } catch (BSONException e) { + assertNotEquals(UuidRepresentation.C_SHARP_LEGACY, uuidRepresentation); + assertNotEquals(UuidRepresentation.PYTHON_LEGACY, uuidRepresentation); + } + } + + @Parameterized.Parameters(name = "{0}/{1}/{2}") + public static Collection data() { + UUID uuid = UUID.fromString("00112233-4455-6677-8899-aabbccddeeff"); + + byte[] standardEncodedValue = Hex.decode("00112233445566778899AABBCCDDEEFF"); + + List data = new ArrayList<>(); + data.add(new Object[]{ + JAVA_LEGACY, + BsonBinarySubType.UUID_LEGACY, + uuid, + Hex.decode("7766554433221100FFEEDDCCBBAA9988"), + standardEncodedValue}); + data.add(new Object[]{ + UuidRepresentation.C_SHARP_LEGACY, + BsonBinarySubType.UUID_LEGACY, + uuid, + Hex.decode("33221100554477668899AABBCCDDEEFF"), + standardEncodedValue}); + data.add(new Object[]{ + UuidRepresentation.PYTHON_LEGACY, + BsonBinarySubType.UUID_LEGACY, + uuid, + standardEncodedValue, + standardEncodedValue}); + data.add(new Object[]{ + STANDARD, + BsonBinarySubType.UUID_STANDARD, + uuid, + standardEncodedValue, + standardEncodedValue}); + data.add(new Object[]{ + UuidRepresentation.UNSPECIFIED, + null, + uuid, + null, + standardEncodedValue}); + return data; + } + + public static class UuidIdPojo { + private UUID id; + + @SuppressWarnings("unused") + public UuidIdPojo() { + } + + UuidIdPojo(final UUID id) { + this.id = id; + } + + public UUID getId() { + return id; + } + + public void setId(final UUID id) { + this.id = id; + } + } +} diff --git a/driver-sync/src/test/functional/com/mongodb/client/AtlasSearchIndexManagementProseTest.java b/driver-sync/src/test/functional/com/mongodb/client/AtlasSearchIndexManagementProseTest.java new file mode 100644 index 00000000000..d9b60a3b0f7 --- /dev/null +++ b/driver-sync/src/test/functional/com/mongodb/client/AtlasSearchIndexManagementProseTest.java @@ -0,0 +1,28 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client; + +import com.mongodb.MongoClientSettings; + +/** + * See Search Index Management Tests + */ +public class AtlasSearchIndexManagementProseTest extends AbstractAtlasSearchIndexManagementProseTest { + protected MongoClient createMongoClient(final MongoClientSettings settings) { + return MongoClients.create(settings); + } +} diff --git a/driver-sync/src/test/functional/com/mongodb/client/ChangeStreamProseTest.java b/driver-sync/src/test/functional/com/mongodb/client/ChangeStreamProseTest.java new file mode 100644 index 00000000000..51c2da53b00 --- /dev/null +++ b/driver-sync/src/test/functional/com/mongodb/client/ChangeStreamProseTest.java @@ -0,0 +1,450 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client; + +import com.mongodb.MongoChangeStreamException; +import com.mongodb.MongoCommandException; +import com.mongodb.MongoInterruptedException; +import com.mongodb.MongoQueryException; +import com.mongodb.assertions.Assertions; +import com.mongodb.client.internal.MongoChangeStreamCursorImpl; +import com.mongodb.client.model.Aggregates; +import com.mongodb.client.model.ChangeStreamPreAndPostImagesOptions; +import com.mongodb.client.model.CreateCollectionOptions; +import com.mongodb.client.model.TimeSeriesOptions; +import com.mongodb.client.model.changestream.ChangeStreamDocument; +import com.mongodb.client.model.changestream.FullDocumentBeforeChange; +import com.mongodb.client.model.changestream.NamespaceType; +import com.mongodb.client.model.changestream.SplitEvent; +import com.mongodb.internal.operation.AggregateResponseBatchCursor; +import org.bson.BsonArray; +import org.bson.BsonDocument; +import org.bson.BsonInt32; +import org.bson.BsonString; +import org.bson.Document; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; + +import java.lang.reflect.Field; + +import static com.mongodb.ClusterFixture.isDiscoverableReplicaSet; +import static com.mongodb.ClusterFixture.serverVersionAtLeast; +import static com.mongodb.client.CrudTestHelper.repeat; +import static com.mongodb.client.model.Updates.set; +import static java.util.Arrays.asList; +import static java.util.Collections.singletonList; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; +import static org.junit.jupiter.api.Assumptions.assumeTrue; + + +// See https://github.com/mongodb/specifications/tree/master/source/change-streams/tests/README.md#prose-tests +public class ChangeStreamProseTest extends DatabaseTestCase { + private BsonDocument failPointDocument; + + @BeforeEach + @Override + public void setUp() { + assumeTrue(canRunTests()); + super.setUp(); + collection.insertOne(Document.parse("{ _id : 0 }")); + } + + static class ChangeStreamWatcher implements Runnable { + private volatile boolean interruptedExceptionOccurred = false; + private final MongoChangeStreamCursor> cursor; + + ChangeStreamWatcher(final MongoChangeStreamCursor> cursor) { + this.cursor = cursor; + } + + @Override + public void run() { + try { + cursor.next(); + } catch (MongoInterruptedException e) { + interruptedExceptionOccurred = true; + } finally { + cursor.close(); + } + } + + public boolean getInterruptedExceptionOccurred() { + return interruptedExceptionOccurred; + } + } + + // + // Test that MongoInterruptedException is not retryable so that a thread can be interrupted. + // + @Test + public void testThreadInterrupted() throws InterruptedException { + ChangeStreamWatcher watcher = new ChangeStreamWatcher(collection.watch().cursor()); + Thread t = new Thread(watcher); + t.start(); + t.interrupt(); + t.join(); + assertTrue(watcher.getInterruptedExceptionOccurred()); + } + + // + // Test that the ChangeStream continuously tracks the last seen resumeToken. + // + @Test + public void test01ChangeStreamTracksResumeToken() { + + try (MongoCursor> cursor = collection.watch().iterator()) { + collection.insertOne(Document.parse("{x: 1}")); + BsonDocument initialResumeToken = cursor.next().getResumeToken(); + assertNotNull(initialResumeToken); + + collection.insertOne(Document.parse("{x: 2}")); + BsonDocument nextResumeToken = cursor.next().getResumeToken(); + assertNotNull(nextResumeToken); + assertNotEquals(initialResumeToken, nextResumeToken); + } + } + + // + // Test that the ChangeStream will throw an exception if the server response is missing the resume token (if wire version is < 8). + // + @Test + public void test02MissingResumeTokenThrowsException() { + try (MongoCursor> cursor = collection.watch(asList(Aggregates.project(Document.parse("{ _id : 0 }")))) + .iterator()) { + collection.insertOne(Document.parse("{ x: 1 }")); + cursor.next(); + fail(); + } catch (MongoChangeStreamException | MongoQueryException ignored) { + } + } + + // + // Test that the ChangeStream will automatically resume one time on a resumable error (including not primary) + // with the initial pipeline and options, except for the addition/update of a resumeToken. + // + @Test + public void test03ResumeOneTimeOnError() { + try (MongoChangeStreamCursor> cursor = collection.watch().cursor()) { + collection.insertOne(Document.parse("{ x: 1 }")); + setFailPoint("getMore", 10107); + assertNotNull(cursor.next()); + } finally { + disableFailPoint(); + } + } + + // + // Test that ChangeStream will not attempt to resume on any error encountered while executing an aggregate command. + // + @Test + public void test04NoResumeForAggregateErrors() { + boolean exceptionFound = false; + MongoChangeStreamCursor> cursor = null; + + //noinspection TryFinallyCanBeTryWithResources + try { + cursor = collection.watch(asList(Document.parse("{ $unsupportedStage: { _id : 0 } }"))).cursor(); + } catch (MongoCommandException e) { + exceptionFound = true; + } finally { + if (cursor != null) { + cursor.close(); + } + } + assertTrue(exceptionFound); + } + + // + // Ensure that a cursor returned from an aggregate command with a cursor id and an initial empty batch + // is not closed on the driver side. + // + @Test + public void test07CursorNotClosed() { + MongoCursor> cursor = collection.watch().iterator(); + assertNotNull(cursor.getServerCursor()); + cursor.close(); + } + + // + // 11. For a ChangeStream under these conditions: + // Running against a server >=4.0.7. + // The batch is empty or has been iterated to the last document. + // Expected result: + // getResumeToken must return the postBatchResumeToken from the current command response. + // + @Test + public void test11GetResumeTokenReturnsPostBatchResumeToken() throws NoSuchFieldException, IllegalAccessException { + MongoChangeStreamCursor> cursor = collection.watch().cursor(); + assertNull(cursor.getResumeToken()); + collection.insertOne(Document.parse("{ _id: 42, x: 1 }")); + try { + cursor.next(); + + // use reflection to access the postBatchResumeToken + AggregateResponseBatchCursor batchCursor = getBatchCursor(cursor); + assertEquals(cursor.getResumeToken(), batchCursor.getPostBatchResumeToken()); + } finally { + cursor.close(); + } + } + + // + // 13. For a ChangeStream under these conditions: + // The batch is not empty. + // The batch has been iterated up to but not including the last element. + // Expected result: + // getResumeToken must return the _id of the previous document returned. + // + @Test + public void test13GetResumeTokenEqualsIdOfPreviousDocument() { + try (MongoChangeStreamCursor> cursor = collection.watch().batchSize(3).cursor()) { + collection.insertOne(Document.parse("{ _id: 42, x: 1 }")); + collection.insertOne(Document.parse("{ _id: 43, x: 1 }")); + collection.insertOne(Document.parse("{ _id: 44, x: 1 }")); + cursor.next(); + assertEquals(cursor.next().getResumeToken(), cursor.getResumeToken()); + } + } + + // + // 14. For a ChangeStream under these conditions: (startAfter only supported for 4.2) + // The batch is not empty. + // The batch hasn’t been iterated at all. + // Only the initial aggregate command has been executed. + // Expected result: + // getResumeToken must return startAfter from the initial aggregate if the option was specified. + // If startAfter is not specified, the getResumeToken result must be empty. + // + @Test + public void test14GetResumeTokenReturnsStartAfter() { + BsonDocument resumeToken; + MongoChangeStreamCursor> cursor = collection.watch().cursor(); + collection.insertOne(Document.parse("{ _id: 42, x: 1 }")); + collection.insertOne(Document.parse("{ _id: 43, x: 1 }")); + try { + resumeToken = cursor.next().getResumeToken(); + } finally { + cursor.close(); + } + + cursor = collection.watch().startAfter(resumeToken).cursor(); + try { + assertEquals(resumeToken, cursor.getResumeToken()); + } finally { + cursor.close(); + } + + cursor = collection.watch().cursor(); + try { + assertNull(cursor.getResumeToken()); + } finally { + cursor.close(); + } + } + + // + // 14. For a ChangeStream under these conditions: + // The batch is not empty. + // The batch hasn’t been iterated at all. + // Only the initial aggregate command has been executed. + // Expected result: + // getResumeToken must return resumeAfter from the initial aggregate if the option was specified. + // If resumeAfter is not specified, the getResumeToken result must be empty. + // + @Test + public void test14GetResumeTokenReturnsResumeAfter() { + BsonDocument resumeAfterResumeToken; + MongoChangeStreamCursor> cursor = collection.watch().cursor(); + collection.insertOne(Document.parse("{ _id: 42, x: 1 }")); + collection.insertOne(Document.parse("{ _id: 43, x: 1 }")); + try { + resumeAfterResumeToken = cursor.next().getResumeToken(); + } finally { + cursor.close(); + } + + cursor = collection.watch().resumeAfter(resumeAfterResumeToken).cursor(); + try { + assertEquals(resumeAfterResumeToken, cursor.getResumeToken()); + } finally { + cursor.close(); + } + + cursor = collection.watch().cursor(); + try { + assertNull(cursor.getResumeToken()); + } finally { + cursor.close(); + } + } + + // + // 15. For a ChangeStream under these conditions: + // Running against a server >=4.0.7. + // The batch is not empty. + // The batch hasn’t been iterated at all. + // The stream has iterated beyond a previous batch and a getMore command has just been executed. + // Expected result: + // getResumeToken must return the postBatchResumeToken from the previous command response. + // + @Test + public void test15GetResumeTokenReturnsPostBatchResumeTokenAfterGetMore() + throws NoSuchFieldException, IllegalAccessException { + try (MongoChangeStreamCursor> cursor = collection.watch().cursor()) { + collection.insertOne(Document.parse("{ _id: 42, x: 1 }")); + // use reflection to access the postBatchResumeToken + AggregateResponseBatchCursor batchCursor = getBatchCursor(cursor); + + assertNotNull(batchCursor.getPostBatchResumeToken()); + + // resume token should be null before iteration + assertNull(cursor.getResumeToken()); + + cursor.next(); + assertEquals(cursor.getResumeToken(), batchCursor.getPostBatchResumeToken()); + } + } + + @Test + public void test19SplitChangeStreamEvents() { + assumeTrue(serverVersionAtLeast(6, 0)); + collection.drop(); + database.createCollection( + getClass().getName(), + new CreateCollectionOptions().changeStreamPreAndPostImagesOptions( + new ChangeStreamPreAndPostImagesOptions(true))); + + // #. Insert into _C_ a document at least 10mb in size ... + int mb10 = 10 * 1024 * 1024; + collection.insertOne(new Document("value", new BsonString(repeat(mb10, "q")))); + + // #. Create a change stream _S_ by calling ``watch`` on _C_ with + // pipeline ``[{ "$changeStreamSplitLargeEvent": {} }]`` and ``fullDocumentBeforeChange=required``. + ChangeStreamIterable changeStream = collection + .watch(asList(Document.parse("{ $changeStreamSplitLargeEvent: {} }"))) + .fullDocumentBeforeChange(FullDocumentBeforeChange.REQUIRED); + + try (MongoChangeStreamCursor> cursor = changeStream.cursor()) { + // #. Call ``updateOne`` on _C_ with an empty ``query`` and an update setting + // the field to a new large value + collection.updateOne(new BsonDocument(), set("value", new BsonString(repeat(mb10, "z")))); + + // #. Collect two events from _S_. + ChangeStreamDocument e1 = Assertions.assertNotNull(cursor.tryNext()); + ChangeStreamDocument e2 = Assertions.assertNotNull(cursor.tryNext()); + + // #. Assert that the events collected have ``splitEvent`` fields ..., in that order. + assertEquals(e1.getSplitEvent(), new SplitEvent(1, 2)); + assertEquals(e2.getSplitEvent(), new SplitEvent(2, 2)); + } + } + + /** + * Not a prose spec test. However, it is additional test case for better coverage. + */ + @Test + public void testNameSpaceTypePresentChangeStreamEvents() { + assumeTrue(serverVersionAtLeast(8, 1)); + collection.drop(); + + ChangeStreamIterable changeStream = database + .watch() + .fullDocumentBeforeChange(FullDocumentBeforeChange.REQUIRED) + .showExpandedEvents(true); + + try (MongoChangeStreamCursor> cursor = changeStream.cursor()) { + + TimeSeriesOptions timeSeriesOptions = new TimeSeriesOptions("timestampFieldName"); + database.createCollection( + "timeSeriesCollection", + new CreateCollectionOptions().timeSeriesOptions(timeSeriesOptions) + ); + database.createCollection( + getClass().getName(), + new CreateCollectionOptions().changeStreamPreAndPostImagesOptions( + new ChangeStreamPreAndPostImagesOptions(true))); + database.createView( + "view", + "timeSeriesCollection", + singletonList(Document.parse("{ $match: { field: 1 } }")) + ); + + ChangeStreamDocument e1 = Assertions.assertNotNull(cursor.tryNext()); + ChangeStreamDocument e2 = Assertions.assertNotNull(cursor.tryNext()); + ChangeStreamDocument e3 = Assertions.assertNotNull(cursor.tryNext()); + + assertEquals(NamespaceType.TIMESERIES, e1.getNamespaceType()); + assertEquals(NamespaceType.TIMESERIES.getValue(), e1.getNamespaceTypeString()); + assertEquals(NamespaceType.COLLECTION, e2.getNamespaceType()); + assertEquals(NamespaceType.COLLECTION.getValue(), e2.getNamespaceTypeString()); + assertEquals(NamespaceType.VIEW, e3.getNamespaceType()); + assertEquals(NamespaceType.VIEW.getValue(), e3.getNamespaceTypeString()); + } + } + + /** + * Not a prose spec test. However, it is additional test case for better coverage. + */ + @Test + public void testNameSpaceTypeAbsentChangeStreamEvents() { + assumeTrue(serverVersionAtLeast(8, 1)); + collection.drop(); + + ChangeStreamIterable changeStream = database + .watch() + .fullDocumentBeforeChange(FullDocumentBeforeChange.REQUIRED); + + try (MongoChangeStreamCursor> cursor = changeStream.cursor()) { + + collection.insertOne(new Document("test", new BsonString("test"))); + + ChangeStreamDocument e1 = Assertions.assertNotNull(cursor.tryNext()); + + assertNull(e1.getNamespaceType()); + assertNull(e1.getNamespaceTypeString()); + } + } + + private void setFailPoint(final String command, final int errCode) { + failPointDocument = new BsonDocument("configureFailPoint", new BsonString("failCommand")) + .append("mode", new BsonDocument("times", new BsonInt32(1))) + .append("data", new BsonDocument("failCommands", new BsonArray(asList(new BsonString(command)))) + .append("errorCode", new BsonInt32(errCode)) + .append("errorLabels", new BsonArray(asList(new BsonString("ResumableChangeStreamError"))))); + getCollectionHelper().runAdminCommand(failPointDocument); + } + + private void disableFailPoint() { + getCollectionHelper().runAdminCommand(failPointDocument.append("mode", new BsonString("off"))); + } + + private boolean canRunTests() { + return isDiscoverableReplicaSet(); + } + + private AggregateResponseBatchCursor getBatchCursor(final MongoChangeStreamCursor> cursor) + throws NoSuchFieldException, IllegalAccessException { + Field batchCursorField = MongoChangeStreamCursorImpl.class.getDeclaredField("batchCursor"); + batchCursorField.setAccessible(true); + return (AggregateResponseBatchCursor) (batchCursorField.get(cursor)); + } +} diff --git a/driver-sync/src/test/functional/com/mongodb/client/ClientEncryptionCustomEndpointTest.java b/driver-sync/src/test/functional/com/mongodb/client/ClientEncryptionCustomEndpointTest.java new file mode 100644 index 00000000000..9b8cbef15a8 --- /dev/null +++ b/driver-sync/src/test/functional/com/mongodb/client/ClientEncryptionCustomEndpointTest.java @@ -0,0 +1,35 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client; + +import com.mongodb.ClientEncryptionSettings; +import com.mongodb.client.vault.ClientEncryption; +import com.mongodb.client.vault.ClientEncryptions; +import org.bson.BsonDocument; + +public class ClientEncryptionCustomEndpointTest extends AbstractClientEncryptionCustomEndpointTest { + public ClientEncryptionCustomEndpointTest(final String name, final String provider, final BsonDocument masterKey, + final boolean testInvalidClientEncryption, final Class exceptionClass, + final Class wrappedExceptionClass, final String messageContainedInException) { + super(name, provider, masterKey, testInvalidClientEncryption, exceptionClass, wrappedExceptionClass, messageContainedInException); + } + + @Override + public ClientEncryption getClientEncryption(final ClientEncryptionSettings settings) { + return ClientEncryptions.create(settings); + } +} diff --git a/driver-sync/src/test/functional/com/mongodb/client/ClientEncryptionDataKeyAndDoubleEncryptionTest.java b/driver-sync/src/test/functional/com/mongodb/client/ClientEncryptionDataKeyAndDoubleEncryptionTest.java new file mode 100644 index 00000000000..def52e4ba22 --- /dev/null +++ b/driver-sync/src/test/functional/com/mongodb/client/ClientEncryptionDataKeyAndDoubleEncryptionTest.java @@ -0,0 +1,233 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client; + +import com.mongodb.AutoEncryptionSettings; +import com.mongodb.ClientEncryptionSettings; +import com.mongodb.MongoClientException; +import com.mongodb.client.model.vault.DataKeyOptions; +import com.mongodb.client.model.vault.EncryptOptions; +import com.mongodb.client.vault.ClientEncryption; +import com.mongodb.client.vault.ClientEncryptions; +import com.mongodb.fixture.EncryptionFixture.KmsProviderType; +import com.mongodb.internal.connection.TestCommandListener; +import org.bson.BsonBinary; +import org.bson.BsonDocument; +import org.bson.BsonString; +import org.bson.Document; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.HashMap; +import java.util.Map; + +import static com.mongodb.ClusterFixture.hasEncryptionTestsEnabled; +import static com.mongodb.client.Fixture.getMongoClientSettingsBuilder; +import static com.mongodb.client.model.Filters.eq; +import static com.mongodb.fixture.EncryptionFixture.getKmsProviders; +import static java.lang.String.format; +import static java.util.Arrays.asList; +import static java.util.Collections.singletonList; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertThrows; +import static org.junit.Assume.assumeTrue; + +@RunWith(Parameterized.class) +public class ClientEncryptionDataKeyAndDoubleEncryptionTest { + + private final String providerName; + + private MongoClient client; + private MongoClient clientEncrypted; + private ClientEncryption clientEncryption; + private TestCommandListener commandListener; + + public ClientEncryptionDataKeyAndDoubleEncryptionTest(final String providerName) { + this.providerName = providerName; + } + + + @Before + public void setUp() { + assumeTrue("Has encryption tests", hasEncryptionTestsEnabled()); + + // Step 1: create unencrypted client + commandListener = new TestCommandListener(); + client = MongoClients.create(getMongoClientSettingsBuilder().addCommandListener(commandListener).build()); + client.getDatabase("keyvault").getCollection("datakeys").drop(); + client.getDatabase("db").getCollection("coll").drop(); + + + // Step 2: Create encrypted client and client encryption + Map> kmsProviders = getKmsProviders( + KmsProviderType.AWS, + KmsProviderType.AZURE, + KmsProviderType.GCP, + KmsProviderType.LOCAL + ); + + HashMap schemaMap = new HashMap() {{ + put("db.coll", BsonDocument.parse("{" + + " \"bsonType\": \"object\"," + + " \"properties\": {" + + " \"encrypted_placeholder\": {" + + " \"encrypt\": {" + + " \"keyId\": \"/placeholder\"," + + " \"bsonType\": \"string\"," + + " \"algorithm\": \"AEAD_AES_256_CBC_HMAC_SHA_512-Random\"" + + " }" + + " }" + + " }" + + "}")); + }}; + + String keyVaultNamespace = "keyvault.datakeys"; + clientEncrypted = MongoClients.create(getMongoClientSettingsBuilder() + .autoEncryptionSettings(AutoEncryptionSettings.builder() + .keyVaultNamespace(keyVaultNamespace) + .kmsProviders(kmsProviders) + .schemaMap(schemaMap) + .build()) + .build()); + + clientEncryption = ClientEncryptions.create( + ClientEncryptionSettings + .builder() + .keyVaultMongoClientSettings(getMongoClientSettingsBuilder().addCommandListener(commandListener).build()) + .keyVaultNamespace(keyVaultNamespace) + .kmsProviders(kmsProviders) + .build()); + } + + @Test + public void testProvider() { + String keyAltName = format("%s_altname", providerName); + BsonBinary dataKeyId = clientEncryption.createDataKey(providerName, + new DataKeyOptions().keyAltNames(singletonList(keyAltName)).masterKey(getMasterKey())); + assertEquals(4, dataKeyId.getType()); + + ArrayList dataKeys = client + .getDatabase("keyvault") + .getCollection("datakeys") + .find(eq("_id", dataKeyId)) + .into(new ArrayList<>()); + assertEquals(1, dataKeys.size()); + + Document dataKey = dataKeys.get(0); + assertEquals(providerName, dataKey.get("masterKey", new Document()).get("provider", "")); + + String insertWriteConcern = commandListener.getCommandStartedEvent("insert") + .getCommand() + .getDocument("writeConcern", new BsonDocument()) + .getString("w", new BsonString("")) + .getValue(); + assertEquals("majority", insertWriteConcern); + + String stringToEncrypt = format("hello %s", providerName); + BsonBinary encrypted = clientEncryption.encrypt(new BsonString(stringToEncrypt), + new EncryptOptions("AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic") + .keyId(dataKeyId)); + assertEquals(6, encrypted.getType()); + + Document insertDocument = new Document("_id", providerName); + insertDocument.put("value", encrypted); + clientEncrypted.getDatabase("db").getCollection("coll").insertOne(insertDocument); + Document decryptedDocument = clientEncrypted.getDatabase("db") + .getCollection("coll") + .find(eq("_id", providerName)) + .first(); + assertNotNull(decryptedDocument); + assertEquals(stringToEncrypt, decryptedDocument.get("value", "")); + + BsonBinary encryptedKeyAltName = clientEncryption.encrypt(new BsonString(stringToEncrypt), + new EncryptOptions("AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic") + .keyAltName(keyAltName)); + assertEquals(encrypted, encryptedKeyAltName); + + assertThrows(MongoClientException.class, () -> + clientEncrypted + .getDatabase("db") + .getCollection("coll") + .insertOne(new Document("encrypted_placeholder", encrypted)) + ); + } + + private BsonDocument getMasterKey() { + switch (providerName) { + case "aws": + return BsonDocument.parse("{" + + " \"region\": \"us-east-1\"," + + " \"key\": \"arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0\"" + + "}"); + case "azure": + return BsonDocument.parse("{" + + " \"keyVaultEndpoint\": \"key-vault-csfle.vault.azure.net\"," + + " \"keyName\": \"key-name-csfle\"" + + "}"); + case "gcp": + return BsonDocument.parse("{" + + " \"projectId\": \"devprod-drivers\"," + + " \"location\": \"global\", " + + " \"keyRing\": \"key-ring-csfle\"," + + " \"keyName\": \"key-name-csfle\"" + + "}"); + default: + return new BsonDocument(); + } + } + + + @Parameterized.Parameters(name = "providerName: {0}") + public static Collection data() { + return asList(new Object[]{"aws"}, new Object[]{"azure"}, new Object[]{"gcp"}, new Object[]{"local"}); + } + + + @After + public void after() { + if (client != null) { + try { + client.close(); + } catch (Exception e) { + // ignore + } + } + + if (clientEncrypted != null) { + try { + clientEncrypted.close(); + } catch (Exception e) { + // ignore + } + } + + if (clientEncryption != null) { + try { + clientEncryption.close(); + } catch (Exception e) { + // ignore + } + } + } + +} diff --git a/driver-sync/src/test/functional/com/mongodb/client/ClientEncryptionRewrapManyDataKeyProseTest.java b/driver-sync/src/test/functional/com/mongodb/client/ClientEncryptionRewrapManyDataKeyProseTest.java new file mode 100644 index 00000000000..cc3dd79a76a --- /dev/null +++ b/driver-sync/src/test/functional/com/mongodb/client/ClientEncryptionRewrapManyDataKeyProseTest.java @@ -0,0 +1,35 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client; + +import com.mongodb.ClientEncryptionSettings; +import com.mongodb.MongoClientSettings; +import com.mongodb.client.vault.ClientEncryption; +import com.mongodb.client.vault.ClientEncryptions; + +public class ClientEncryptionRewrapManyDataKeyProseTest extends AbstractClientEncryptionRewrapManyDataKeyProseTest { + + @Override + protected MongoClient createMongoClient(final MongoClientSettings settings) { + return MongoClients.create(settings); + } + + @Override + public ClientEncryption getClientEncryption(final ClientEncryptionSettings settings) { + return ClientEncryptions.create(settings); + } +} diff --git a/driver-sync/src/test/functional/com/mongodb/client/ClientEncryptionTextExplicitEncryptionTest.java b/driver-sync/src/test/functional/com/mongodb/client/ClientEncryptionTextExplicitEncryptionTest.java new file mode 100644 index 00000000000..23bd9ec135d --- /dev/null +++ b/driver-sync/src/test/functional/com/mongodb/client/ClientEncryptionTextExplicitEncryptionTest.java @@ -0,0 +1,34 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client; + +import com.mongodb.ClientEncryptionSettings; +import com.mongodb.MongoClientSettings; +import com.mongodb.client.vault.ClientEncryption; +import com.mongodb.client.vault.ClientEncryptions; + +public class ClientEncryptionTextExplicitEncryptionTest extends AbstractClientEncryptionTextExplicitEncryptionTest { + @Override + protected MongoClient createMongoClient(final MongoClientSettings settings) { + return MongoClients.create(settings); + } + + @Override + protected ClientEncryption createClientEncryption(final ClientEncryptionSettings settings) { + return ClientEncryptions.create(settings); + } +} diff --git a/driver-sync/src/test/functional/com/mongodb/client/ClientMetadataProseTest.java b/driver-sync/src/test/functional/com/mongodb/client/ClientMetadataProseTest.java new file mode 100644 index 00000000000..f457eb350fe --- /dev/null +++ b/driver-sync/src/test/functional/com/mongodb/client/ClientMetadataProseTest.java @@ -0,0 +1,30 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client; + +import com.mongodb.MongoClientSettings; +import com.mongodb.MongoDriverInformation; +import com.mongodb.lang.Nullable; + +public class ClientMetadataProseTest extends AbstractClientMetadataProseTest { + + @Override + protected MongoClient createMongoClient(@Nullable final MongoDriverInformation mongoDriverInformation, + final MongoClientSettings mongoClientSettings) { + return MongoClients.create(mongoClientSettings, mongoDriverInformation); + } +} diff --git a/driver-sync/src/test/functional/com/mongodb/client/ClientMetadataTest.java b/driver-sync/src/test/functional/com/mongodb/client/ClientMetadataTest.java new file mode 100644 index 00000000000..652d5a4059d --- /dev/null +++ b/driver-sync/src/test/functional/com/mongodb/client/ClientMetadataTest.java @@ -0,0 +1,29 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client; + +import com.mongodb.client.unified.UnifiedSyncTest; +import org.junit.jupiter.params.provider.Arguments; + +import java.util.Collection; + +public class ClientMetadataTest extends UnifiedSyncTest { + + private static Collection data() { + return getTestData("mongodb-handshake/tests/unified"); + } +} diff --git a/driver-sync/src/test/functional/com/mongodb/client/ClientSideEncryption25LookupProseTests.java b/driver-sync/src/test/functional/com/mongodb/client/ClientSideEncryption25LookupProseTests.java new file mode 100644 index 00000000000..4324fab484c --- /dev/null +++ b/driver-sync/src/test/functional/com/mongodb/client/ClientSideEncryption25LookupProseTests.java @@ -0,0 +1,247 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client; + +import com.mongodb.AutoEncryptionSettings; +import com.mongodb.ClientEncryptionSettings; +import com.mongodb.MongoClientSettings; +import com.mongodb.MongoNamespace; +import com.mongodb.WriteConcern; +import com.mongodb.client.model.CreateCollectionOptions; +import com.mongodb.client.model.ValidationOptions; +import com.mongodb.client.vault.ClientEncryption; +import com.mongodb.client.vault.ClientEncryptions; +import com.mongodb.crypt.capi.MongoCryptException; +import com.mongodb.fixture.EncryptionFixture; +import org.bson.BsonArray; +import org.bson.BsonDocument; +import org.bson.Document; +import org.bson.types.Binary; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.CsvSource; + +import java.util.Arrays; +import java.util.List; +import java.util.Map; +import java.util.function.Consumer; +import java.util.stream.Collectors; + +import static com.mongodb.ClusterFixture.isStandalone; +import static com.mongodb.ClusterFixture.serverVersionAtLeast; +import static com.mongodb.ClusterFixture.serverVersionLessThan; +import static com.mongodb.client.Fixture.getMongoClientSettingsBuilder; +import static com.mongodb.fixture.EncryptionFixture.getKmsProviders; +import static com.mongodb.testing.MongoAssertions.assertCause; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assumptions.assumeFalse; +import static org.junit.jupiter.api.Assumptions.assumeTrue; +import static util.JsonPoweredTestHelper.getTestDocument; + +/** + * + * 25. Test $lookup + */ +public class ClientSideEncryption25LookupProseTests { + private MongoClient client; + + protected MongoClient createMongoClient(final MongoClientSettings settings) { + return MongoClients.create(settings); + } + + protected ClientEncryption createClientEncryption(final ClientEncryptionSettings settings) { + return ClientEncryptions.create(settings); + } + + @BeforeEach + public void setUp() { + assumeFalse(isStandalone()); + assumeTrue(serverVersionAtLeast(7, 0)); + + // Create an encrypted MongoClient named `encryptedClient` configured with: + MongoNamespace dataKeysNamespace = new MongoNamespace("db.keyvault"); + Map> kmsProviders = getKmsProviders(EncryptionFixture.KmsProviderType.LOCAL); + MongoClient encryptedClient = createMongoClient(getMongoClientSettingsBuilder() + .autoEncryptionSettings( + AutoEncryptionSettings.builder() + .keyVaultNamespace(dataKeysNamespace.getFullName()) + .kmsProviders(kmsProviders) + .build()) + .build()); + // Use `encryptedClient` to drop `db.keyvault`. + MongoDatabase encryptedDb = encryptedClient.getDatabase("db"); + MongoCollection encryptedCollection = encryptedDb + .getCollection(dataKeysNamespace.getCollectionName(), BsonDocument.class) + .withWriteConcern(WriteConcern.MAJORITY); + encryptedCollection.drop(); + // Insert `` into `db.keyvault` with majority write concern. + encryptedCollection.insertOne(bsonDocumentFromPath("key-doc.json")); + + // Use `encryptedClient` to drop and create the following collections: + Arrays.asList("csfle", "csfle2", "qe", "qe2", "no_schema", "no_schema2").forEach(c -> { + encryptedDb.getCollection(c).drop(); + }); + // create + encryptedDb.createCollection("csfle", new CreateCollectionOptions() + .validationOptions(new ValidationOptions() + .validator(new BsonDocument("$jsonSchema", bsonDocumentFromPath("schema-csfle.json"))))); + encryptedDb.createCollection("csfle2", new CreateCollectionOptions() + .validationOptions(new ValidationOptions() + .validator(new BsonDocument("$jsonSchema", bsonDocumentFromPath("schema-csfle2.json"))))); + + encryptedDb.createCollection("qe", + new CreateCollectionOptions().encryptedFields(bsonDocumentFromPath("schema-qe.json"))); + encryptedDb.createCollection("qe2", + new CreateCollectionOptions().encryptedFields(bsonDocumentFromPath("schema-qe2.json"))); + + encryptedDb.createCollection("no_schema"); + encryptedDb.createCollection("no_schema2"); + + // Insert documents with `encryptedClient`: + Consumer insert = (name) -> { + encryptedDb.getCollection(name).insertOne(new Document(name, name)); + }; + insert.accept("csfle"); + insert.accept("csfle2"); + insert.accept("qe"); + insert.accept("qe2"); + insert.accept("no_schema"); + insert.accept("no_schema2"); + + // Create an unencrypted MongoClient named `unencryptedClient`. + MongoClient unencryptedClient = createMongoClient(getMongoClientSettingsBuilder().build()); + MongoDatabase unencryptedDb = unencryptedClient.getDatabase("db"); + + Consumer assertDocument = (name) -> { + List pipeline = Arrays.asList( + BsonDocument.parse("{\"$project\" : {\"_id\" : 0, \"__safeContent__\" : 0}}") + ); + Document decryptedDoc = encryptedDb.getCollection(name) + .aggregate(pipeline).first(); + assertEquals(decryptedDoc, new Document(name, name)); + Document encryptedDoc = unencryptedDb.getCollection(name) + .aggregate(pipeline).first(); + assertNotNull(encryptedDoc); + assertEquals(Binary.class, encryptedDoc.get(name).getClass()); + }; + + assertDocument.accept("csfle"); + assertDocument.accept("csfle2"); + assertDocument.accept("qe"); + assertDocument.accept("qe2"); + + unencryptedClient.close(); + encryptedClient.close(); + + client = createMongoClient(getMongoClientSettingsBuilder() + .autoEncryptionSettings( + AutoEncryptionSettings.builder() + .keyVaultNamespace(dataKeysNamespace.getFullName()) + .kmsProviders(kmsProviders) + .build()) + .build()); + } + + @AfterEach + @SuppressWarnings("try") + public void cleanUp() { + //noinspection EmptyTryBlock + try (MongoClient ignored = this.client) { + // just using try-with-resources to ensure they all get closed, even in the case of exceptions + } + } + + @ParameterizedTest + @CsvSource({ + "csfle, no_schema", + "qe, no_schema", + "no_schema, csfle", + "no_schema, qe", + "csfle, csfle2", + "qe, qe2", + "no_schema, no_schema2"}) + void testCase1Through7(final String from, final String to) { + assumeTrue(serverVersionAtLeast(8, 1)); + String mql = ("[\n" + + " {\"$match\" : {\"\" : \"\"}},\n" + + " {\n" + + " \"$lookup\" : {\n" + + " \"from\" : \"\",\n" + + " \"as\" : \"matched\",\n" + + " \"pipeline\" : [ {\"$match\" : {\"\" : \"\"}}, {\"$project\" : {\"_id\" : 0, \"__safeContent__\" : 0}} ]\n" + + " }\n" + + " },\n" + + " {\"$project\" : {\"_id\" : 0, \"__safeContent__\" : 0}}\n" + + "]").replace("", from).replace("", to); + + List pipeline = BsonArray.parse(mql).stream() + .map(stage -> stage.asDocument()) + .collect(Collectors.toList()); + assertEquals( + Document.parse("{\"\" : \"\", \"matched\" : [ {\"\" : \"\"} ]}" + .replace("", from).replace("", to)), + client.getDatabase("db").getCollection(from).aggregate(pipeline).first()); + } + + @Test + void testCase8() { + assumeTrue(serverVersionAtLeast(8, 1)); + List pipeline = BsonArray.parse("[\n" + + " {\"$match\" : {\"csfle\" : \"qe\"}},\n" + + " {\n" + + " \"$lookup\" : {\n" + + " \"from\" : \"qe\",\n" + + " \"as\" : \"matched\",\n" + + " \"pipeline\" : [ {\"$match\" : {\"qe\" : \"qe\"}}, {\"$project\" : {\"_id\" : 0}} ]\n" + + " }\n" + + " },\n" + + " {\"$project\" : {\"_id\" : 0}}\n" + + "]").stream().map(stage -> stage.asDocument()).collect(Collectors.toList()); + + assertCause( + MongoCryptException.class, + "not supported", + () -> client.getDatabase("db").getCollection("csfle").aggregate(pipeline).first()); + } + + @Test + void testCase9() { + assumeTrue(serverVersionLessThan(8, 1)); + List pipeline = BsonArray.parse("[\n" + + " {\"$match\" : {\"csfle\" : \"csfle\"}},\n" + + " {\n" + + " \"$lookup\" : {\n" + + " \"from\" : \"no_schema\",\n" + + " \"as\" : \"matched\",\n" + + " \"pipeline\" : [ {\"$match\" : {\"no_schema\" : \"no_schema\"}}, {\"$project\" : {\"_id\" : 0}} ]\n" + + " }\n" + + " },\n" + + " {\"$project\" : {\"_id\" : 0}}\n" + + "]").stream().map(stage -> stage.asDocument()).collect(Collectors.toList()); + assertCause( + RuntimeException.class, + "Upgrade", + () -> client.getDatabase("db").getCollection("csfle").aggregate(pipeline).first()); + } + + public static BsonDocument bsonDocumentFromPath(final String path) { + return getTestDocument("client-side-encryption/etc/data/lookup/" + path); + } +} diff --git a/driver-sync/src/test/functional/com/mongodb/client/ClientSideEncryptionAutoDataKeysTest.java b/driver-sync/src/test/functional/com/mongodb/client/ClientSideEncryptionAutoDataKeysTest.java new file mode 100644 index 00000000000..3b0c378f73e --- /dev/null +++ b/driver-sync/src/test/functional/com/mongodb/client/ClientSideEncryptionAutoDataKeysTest.java @@ -0,0 +1,34 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client; + +import com.mongodb.ClientEncryptionSettings; +import com.mongodb.MongoClientSettings; +import com.mongodb.client.vault.ClientEncryption; +import com.mongodb.client.vault.ClientEncryptions; + +final class ClientSideEncryptionAutoDataKeysTest extends AbstractClientSideEncryptionAutoDataKeysTest { + @Override + protected MongoClient createMongoClient(final MongoClientSettings settings) { + return MongoClients.create(settings); + } + + @Override + protected ClientEncryption createClientEncryption(final ClientEncryptionSettings settings) { + return ClientEncryptions.create(settings); + } +} diff --git a/driver-sync/src/test/functional/com/mongodb/client/ClientSideEncryptionAwsCredentialFromEnvironmentTest.java b/driver-sync/src/test/functional/com/mongodb/client/ClientSideEncryptionAwsCredentialFromEnvironmentTest.java new file mode 100644 index 00000000000..70964e35387 --- /dev/null +++ b/driver-sync/src/test/functional/com/mongodb/client/ClientSideEncryptionAwsCredentialFromEnvironmentTest.java @@ -0,0 +1,34 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client; + +import com.mongodb.ClientEncryptionSettings; +import com.mongodb.MongoClientSettings; +import com.mongodb.client.vault.ClientEncryption; +import com.mongodb.client.vault.ClientEncryptions; + +public class ClientSideEncryptionAwsCredentialFromEnvironmentTest extends AbstractClientSideEncryptionAwsCredentialFromEnvironmentTest { + @Override + protected ClientEncryption createClientEncryption(final ClientEncryptionSettings settings) { + return ClientEncryptions.create(settings); + } + + @Override + protected MongoClient createMongoClient(final MongoClientSettings settings) { + return MongoClients.create(settings); + } +} diff --git a/driver-sync/src/test/functional/com/mongodb/client/ClientSideEncryptionBsonSizeLimitsSpecification.groovy b/driver-sync/src/test/functional/com/mongodb/client/ClientSideEncryptionBsonSizeLimitsSpecification.groovy new file mode 100644 index 00000000000..68a5c1e9a1c --- /dev/null +++ b/driver-sync/src/test/functional/com/mongodb/client/ClientSideEncryptionBsonSizeLimitsSpecification.groovy @@ -0,0 +1,161 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client + +import com.mongodb.AutoEncryptionSettings +import com.mongodb.ClientEncryptionSettings +import com.mongodb.MongoNamespace +import com.mongodb.MongoWriteException +import com.mongodb.WriteConcern +import com.mongodb.client.vault.ClientEncryption +import com.mongodb.client.vault.ClientEncryptions +import com.mongodb.internal.connection.TestCommandListener +import org.bson.BsonDocument +import org.bson.BsonString + +import static com.mongodb.ClusterFixture.isClientSideEncryptionTest +import static com.mongodb.client.Fixture.getDefaultDatabaseName +import static com.mongodb.client.Fixture.getMongoClient +import static com.mongodb.client.Fixture.getMongoClientSettings +import static com.mongodb.client.Fixture.getMongoClientSettingsBuilder +import static java.util.Collections.singletonMap +import static org.junit.Assume.assumeTrue +import static util.JsonPoweredTestHelper.getTestDocument + +class ClientSideEncryptionBsonSizeLimitsSpecification extends FunctionalSpecification { + + private final String collName = 'ClientSideEncryptionBsonSizeLimitsSpecification' + private final MongoNamespace keyVaultNamespace = new MongoNamespace('test.datakeys') + private final MongoNamespace autoEncryptingCollectionNamespace = new MongoNamespace(getDefaultDatabaseName(), + collName) + private final MongoCollection dataKeyCollection = getMongoClient() + .getDatabase(keyVaultNamespace.databaseName).getCollection(keyVaultNamespace.collectionName, BsonDocument) + .withWriteConcern(WriteConcern.MAJORITY) + private final MongoCollection dataCollection = getMongoClient() + .getDatabase(autoEncryptingCollectionNamespace.databaseName).getCollection(autoEncryptingCollectionNamespace.collectionName, + BsonDocument) + private final TestCommandListener commandListener = new TestCommandListener() + + private MongoClient autoEncryptingClient + private ClientEncryption clientEncryption + private MongoCollection autoEncryptingDataCollection + + def setup() { + assumeTrue(isClientSideEncryptionTest()) + dataKeyCollection.drop() + dataCollection.drop() + + dataKeyCollection.insertOne(getTestDocument('client-side-encryption/limits/limits-key.json')) + + def providerProperties = + ['local': ['key': Base64.getDecoder().decode('Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN' + + '3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk')] + ] + + autoEncryptingClient = MongoClients.create(getMongoClientSettingsBuilder() + .autoEncryptionSettings(AutoEncryptionSettings.builder() + .keyVaultNamespace(keyVaultNamespace.fullName) + .kmsProviders(providerProperties) + .schemaMap(singletonMap(autoEncryptingCollectionNamespace.fullName, + getTestDocument('client-side-encryption/limits/limits-schema.json'))) + .build()) + .addCommandListener(commandListener) + .build()) + + autoEncryptingDataCollection = autoEncryptingClient.getDatabase(autoEncryptingCollectionNamespace.databaseName) + .getCollection(autoEncryptingCollectionNamespace.collectionName, BsonDocument) + + clientEncryption = ClientEncryptions.create(ClientEncryptionSettings.builder() + .keyVaultMongoClientSettings(getMongoClientSettings()) + .keyVaultNamespace(keyVaultNamespace.fullName) + .kmsProviders(providerProperties) + .build()) + } + + def 'test BSON size limits'() { + when: + autoEncryptingDataCollection.insertOne( + new BsonDocument('_id', new BsonString('over_2mib_under_16mib')) + .append('unencrypted', new BsonString('a' * 2097152))) + then: + noExceptionThrown() + + when: + autoEncryptingDataCollection.insertOne(getTestDocument('client-side-encryption/limits/limits-doc.json') + .append('_id', new BsonString('encryption_exceeds_2mib')) + .append('unencrypted', new BsonString('a' * (2097152 - 2000)))) + + then: + noExceptionThrown() + + when: + commandListener.reset() + autoEncryptingDataCollection.insertMany( + [ + new BsonDocument('_id', new BsonString('over_2mib_1')) + .append('unencrypted', new BsonString('a' * 2097152)), + new BsonDocument('_id', new BsonString('over_2mib_2')) + .append('unencrypted', new BsonString('a' * 2097152)) + ]) + + then: + noExceptionThrown() + countStartedEvents('insert') == 2 + + when: + commandListener.reset() + autoEncryptingDataCollection.insertMany( + [ + getTestDocument('client-side-encryption/limits/limits-doc.json') + .append('_id', new BsonString('encryption_exceeds_2mib_1')) + .append('unencrypted', new BsonString('a' * (2097152 - 2000))), + getTestDocument('client-side-encryption/limits/limits-doc.json') + .append('_id', new BsonString('encryption_exceeds_2mib_2')) + .append('unencrypted', new BsonString('a' * (2097152 - 2000))), + ]) + + then: + noExceptionThrown() + countStartedEvents('insert') == 2 + + when: + autoEncryptingDataCollection.insertOne( + new BsonDocument('_id', new BsonString('under_16mib')) + .append('unencrypted', new BsonString('a' * (16777216 - 2000)))) + + then: + noExceptionThrown() + + when: + autoEncryptingDataCollection.insertOne(getTestDocument('client-side-encryption/limits/limits-doc.json') + .append('_id', new BsonString('encryption_exceeds_16mib')) + .append('unencrypted', new BsonString('a' * (16777216 - 2000)))) + + then: + thrown(MongoWriteException) + } + + private int countStartedEvents(String name) { + int count = 0 + for (def cur : commandListener.commandStartedEvents) { + if (cur.commandName == name) { + count++ + } + } + count + } +} diff --git a/driver-sync/src/test/functional/com/mongodb/client/ClientSideEncryptionBypassAutoEncryptionTest.java b/driver-sync/src/test/functional/com/mongodb/client/ClientSideEncryptionBypassAutoEncryptionTest.java new file mode 100644 index 00000000000..5b0d2a48cd6 --- /dev/null +++ b/driver-sync/src/test/functional/com/mongodb/client/ClientSideEncryptionBypassAutoEncryptionTest.java @@ -0,0 +1,118 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client; + +import com.mongodb.AutoEncryptionSettings; +import com.mongodb.ClientEncryptionSettings; +import com.mongodb.MongoClientSettings; +import com.mongodb.MongoNamespace; +import com.mongodb.client.model.Filters; +import com.mongodb.client.model.IndexOptions; +import com.mongodb.client.model.Indexes; +import com.mongodb.client.model.vault.DataKeyOptions; +import com.mongodb.client.model.vault.EncryptOptions; +import com.mongodb.client.vault.ClientEncryption; +import com.mongodb.client.vault.ClientEncryptions; +import org.bson.BsonBinary; +import org.bson.BsonString; +import org.bson.Document; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +import java.security.SecureRandom; +import java.util.HashMap; +import java.util.Map; + +import static com.mongodb.client.Fixture.getMongoClient; +import static com.mongodb.client.Fixture.getMongoClientSettings; +import static com.mongodb.client.Fixture.getMongoClientSettingsBuilder; +import static org.junit.Assert.assertEquals; + +public class ClientSideEncryptionBypassAutoEncryptionTest { + private MongoClient clientEncrypted; + private ClientEncryption clientEncryption; + + @Before + public void setUp() { + MongoClient mongoClient = getMongoClient(); + + byte[] localMasterKey = new byte[96]; + new SecureRandom().nextBytes(localMasterKey); + + Map> kmsProviders = new HashMap>() {{ + put("local", new HashMap() {{ + put("key", localMasterKey); + }}); + }}; + + // Set up the key vault for this example + MongoNamespace keyVaultNamespace = new MongoNamespace("encryption.testKeyVault"); + MongoCollection keyVaultCollection = mongoClient.getDatabase(keyVaultNamespace.getDatabaseName()) + .getCollection(keyVaultNamespace.getCollectionName()); + keyVaultCollection.drop(); + + // Ensure that two data keys cannot share the same keyAltName. + keyVaultCollection.createIndex(Indexes.ascending("keyAltNames"), + new IndexOptions().unique(true) + .partialFilterExpression(Filters.exists("keyAltNames"))); + + MongoDatabase db = mongoClient.getDatabase(Fixture.getDefaultDatabaseName()); + db.getCollection("test").drop(); + + // Create the ClientEncryption instance + ClientEncryptionSettings clientEncryptionSettings = ClientEncryptionSettings.builder() + .keyVaultMongoClientSettings(getMongoClientSettings()) + .keyVaultNamespace(keyVaultNamespace.getFullName()) + .kmsProviders(kmsProviders) + .build(); + + clientEncryption = ClientEncryptions.create(clientEncryptionSettings); + + AutoEncryptionSettings autoEncryptionSettings = AutoEncryptionSettings.builder() + .keyVaultNamespace(keyVaultNamespace.getFullName()) + .kmsProviders(kmsProviders) + .bypassAutoEncryption(true) + .build(); + + MongoClientSettings clientSettings = getMongoClientSettingsBuilder() + .autoEncryptionSettings(autoEncryptionSettings) + .build(); + clientEncrypted = MongoClients.create(clientSettings); + } + + @Test + public void shouldAutoDecryptManuallyEncryptedData() { + String fieldValue = "123456789"; + BsonBinary dataKeyId = clientEncryption.createDataKey("local", new DataKeyOptions()); + BsonBinary encryptedFieldValue = clientEncryption.encrypt(new BsonString(fieldValue), + new EncryptOptions("AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic").keyId(dataKeyId)); + + MongoCollection collection = clientEncrypted.getDatabase(Fixture.getDefaultDatabaseName()).getCollection("test"); + collection.insertOne(new Document("encryptedField", encryptedFieldValue)); + + assertEquals(fieldValue, collection.find().first().getString("encryptedField")); + } + + @After + public void after() { + if (clientEncrypted != null) { + clientEncrypted.getDatabase(Fixture.getDefaultDatabaseName()).drop(); + clientEncrypted.close(); + } + } +} diff --git a/driver-sync/src/test/functional/com/mongodb/client/ClientSideEncryptionCorpusTest.java b/driver-sync/src/test/functional/com/mongodb/client/ClientSideEncryptionCorpusTest.java new file mode 100644 index 00000000000..3b4980e430d --- /dev/null +++ b/driver-sync/src/test/functional/com/mongodb/client/ClientSideEncryptionCorpusTest.java @@ -0,0 +1,296 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client; + +import com.mongodb.AutoEncryptionSettings; +import com.mongodb.ClientEncryptionSettings; +import com.mongodb.MongoClientSettings; +import com.mongodb.MongoException; +import com.mongodb.WriteConcern; +import com.mongodb.client.model.vault.EncryptOptions; +import com.mongodb.client.vault.ClientEncryption; +import com.mongodb.client.vault.ClientEncryptions; +import com.mongodb.fixture.EncryptionFixture.KmsProviderType; +import org.bson.BsonBinary; +import org.bson.BsonBinarySubType; +import org.bson.BsonDocument; +import org.bson.BsonString; +import org.bson.BsonValue; +import org.bson.UuidRepresentation; +import org.bson.codecs.UuidCodec; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; + +import java.io.IOException; +import java.net.URISyntaxException; +import java.util.Base64; +import java.util.Collection; +import java.util.HashMap; +import java.util.Map; + +import static com.mongodb.ClusterFixture.hasEncryptionTestsEnabled; +import static com.mongodb.client.Fixture.getMongoClientSettings; +import static com.mongodb.client.Fixture.getMongoClientSettingsBuilder; +import static com.mongodb.fixture.EncryptionFixture.getKmsProviders; +import static java.util.Arrays.asList; +import static org.bson.codecs.configuration.CodecRegistries.fromCodecs; +import static org.bson.codecs.configuration.CodecRegistries.fromRegistries; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotEquals; +import static org.junit.Assume.assumeTrue; +import static util.JsonPoweredTestHelper.getTestDocument; + +// See https://github.com/mongodb/specifications/tree/master/source/client-side-encryption/corpus +@RunWith(Parameterized.class) +public class ClientSideEncryptionCorpusTest { + private final boolean useLocalSchema; + private MongoClient client; + private MongoClient autoEncryptingClient; + private ClientEncryption clientEncryption; + + public ClientSideEncryptionCorpusTest(final boolean useLocalSchema) { + this.useLocalSchema = useLocalSchema; + } + + @Before + public void setUp() throws IOException, URISyntaxException { + assumeTrue("Corpus tests disabled", hasEncryptionTestsEnabled()); + + MongoClientSettings clientSettings = getMongoClientSettingsBuilder() + .codecRegistry(fromRegistries(fromCodecs(new UuidCodec(UuidRepresentation.STANDARD)), + MongoClientSettings.getDefaultCodecRegistry())).build(); + + // Step 1: create unencrypted client + client = MongoClients.create(clientSettings); + MongoDatabase db = client.getDatabase("db"); + + // Step 2: Drop and recreate db.coll with schema + BsonDocument schemaDocument = bsonDocumentFromPath("corpus-schema.json"); + + db.getCollection("coll").drop(); + db.runCommand(new BsonDocument("create", new BsonString("coll")) + .append("validator", new BsonDocument("$jsonSchema", schemaDocument))); + + // Step 3: Drop and create keyvault.datakeys + MongoDatabase keyvaultDatabase = client.getDatabase("keyvault"); + MongoCollection dataKeysCollection = keyvaultDatabase.getCollection("datakeys", BsonDocument.class) + .withWriteConcern(WriteConcern.MAJORITY); + dataKeysCollection.drop(); + dataKeysCollection.insertOne(bsonDocumentFromPath("corpus-key-aws.json")); + dataKeysCollection.insertOne(bsonDocumentFromPath("corpus-key-azure.json")); + dataKeysCollection.insertOne(bsonDocumentFromPath("corpus-key-gcp.json")); + dataKeysCollection.insertOne(bsonDocumentFromPath("corpus-key-kmip.json")); + dataKeysCollection.insertOne(bsonDocumentFromPath("corpus-key-local.json")); + + // Step 4: Configure our objects + Map> kmsProviders = getKmsProviders( + KmsProviderType.AWS, + KmsProviderType.AZURE, + KmsProviderType.GCP, + KmsProviderType.KMIP, + KmsProviderType.LOCAL); + + HashMap schemaMap = new HashMap<>(); + schemaMap.put("db.coll", schemaDocument); + + AutoEncryptionSettings.Builder autoEncryptionSettingsBuilder = AutoEncryptionSettings.builder() + .keyVaultNamespace("keyvault.datakeys") + .kmsProviders(kmsProviders); + + if (useLocalSchema) { + autoEncryptionSettingsBuilder.schemaMap(schemaMap); + } + + clientSettings = getMongoClientSettingsBuilder() + .codecRegistry(fromRegistries( + fromCodecs(new UuidCodec(UuidRepresentation.STANDARD)), MongoClientSettings.getDefaultCodecRegistry())) + .autoEncryptionSettings(autoEncryptionSettingsBuilder.build()) + .build(); + autoEncryptingClient = MongoClients.create(clientSettings); + + ClientEncryptionSettings clientEncryptionSettings = ClientEncryptionSettings.builder(). + keyVaultMongoClientSettings(getMongoClientSettings()). + kmsProviders(kmsProviders). + keyVaultNamespace("keyvault.datakeys").build(); + clientEncryption = ClientEncryptions.create(clientEncryptionSettings); + } + + @Test + public void testCorpus() throws IOException, URISyntaxException { + + // Step 5: Iterate over corpus + BsonDocument corpus = bsonDocumentFromPath("corpus.json"); + BsonDocument corpusCopied = new BsonDocument(); + for (String field : corpus.keySet()) { + if (!corpus.get(field).isDocument()) { + corpusCopied.append(field, corpus.get(field)); + continue; + } + + BsonDocument fieldDocument = corpus.getDocument(field).clone(); + String kms = fieldDocument.getString("kms").getValue(); + String abbreviatedAlgorithName = fieldDocument.getString("algo").getValue(); + String method = fieldDocument.getString("method").getValue(); + String identifier = fieldDocument.getString("identifier").getValue(); + boolean allowed = fieldDocument.getBoolean("allowed").getValue(); + BsonValue value = fieldDocument.get("value"); + + byte[] awsKeyId = Base64.getDecoder().decode("AWSAAAAAAAAAAAAAAAAAAA=="); + byte[] azureKeyId = Base64.getDecoder().decode("AZUREAAAAAAAAAAAAAAAAA=="); + byte[] gcpKeyId = Base64.getDecoder().decode("GCPAAAAAAAAAAAAAAAAAAA=="); + byte[] kmipKeyId = Base64.getDecoder().decode("KMIPAAAAAAAAAAAAAAAAAA=="); + byte[] localKeyId = Base64.getDecoder().decode("LOCALAAAAAAAAAAAAAAAAA=="); + + if (method.equals("auto")) { + corpusCopied.append(field, corpus.get(field)); + continue; + } + + if (!method.equals("explicit")) { + throw new UnsupportedOperationException("Unsupported method: " + method); + } + + String fullAlgorithmName = "AEAD_AES_256_CBC_HMAC_SHA_512-"; + if (abbreviatedAlgorithName.equals("rand")) { + fullAlgorithmName += "Random"; + } else if (abbreviatedAlgorithName.equals("det")) { + fullAlgorithmName += "Deterministic"; + } else { + throw new UnsupportedOperationException("Unsupported algorithm: " + abbreviatedAlgorithName); + } + + EncryptOptions opts = new EncryptOptions(fullAlgorithmName); + if (identifier.equals("id")) { + switch (kms) { + case "aws": + opts.keyId(new BsonBinary(BsonBinarySubType.UUID_STANDARD, awsKeyId)); + break; + case "azure": + opts.keyId(new BsonBinary(BsonBinarySubType.UUID_STANDARD, azureKeyId)); + break; + case "gcp": + opts.keyId(new BsonBinary(BsonBinarySubType.UUID_STANDARD, gcpKeyId)); + break; + case "kmip": + opts.keyId(new BsonBinary(BsonBinarySubType.UUID_STANDARD, kmipKeyId)); + break; + case "local": + opts.keyId(new BsonBinary(BsonBinarySubType.UUID_STANDARD, localKeyId)); + break; + default: + throw new UnsupportedOperationException("Unsupported provider: " + kms); + } + } else if (identifier.equals("altname")) { + opts.keyAltName(kms); + } else { + throw new UnsupportedOperationException("Unsupported identifier: " + identifier); + } + + try { + BsonValue encryptedValue = clientEncryption.encrypt(value, opts); + fieldDocument.put("value", encryptedValue); + corpusCopied.append(field, fieldDocument); + } catch (MongoException e) { + if (allowed) { + throw e; + } + corpusCopied.append(field, fieldDocument); + } + } + + // Step 6: insert corpusCopied + MongoCollection encryptedCollection = autoEncryptingClient.getDatabase("db") + .getCollection("coll", BsonDocument.class); + encryptedCollection.insertOne(corpusCopied); + + // Step 7: check the auto decrypted document + BsonDocument corpusDecrypted = encryptedCollection.find(new BsonDocument()).first(); + assertEquals(corpus, corpusDecrypted); + + // Step 8: check the document with an unencrypted client + MongoCollection coll = client.getDatabase("db").getCollection("coll", BsonDocument.class); + BsonDocument corpusEncryptedActual = coll.find(new BsonDocument()).first(); + BsonDocument corpusEncryptedExpected = bsonDocumentFromPath("corpus-encrypted.json"); + + for (String field : corpusEncryptedExpected.keySet()) { + if (field.equals("_id") || field.equals("altname_aws") || field.equals("altname_local")) { + continue; + } + + boolean allowed = corpusEncryptedActual.getDocument(field).getBoolean("allowed").getValue(); + String algorithm = corpusEncryptedActual.getDocument(field).getString("algo").getValue(); + BsonValue actualValue = corpusEncryptedActual.getDocument(field).get("value"); + BsonValue expectedValue = corpusEncryptedExpected.getDocument(field).get("value"); + + if (algorithm.equals("det")) { + assertEquals(actualValue, expectedValue); + } else if (algorithm.equals("rand")) { + if (allowed) { + assertNotEquals(actualValue, expectedValue); + } + } else { + throw new UnsupportedOperationException("Unsupported algorithm type: " + algorithm); + } + + if (allowed) { + BsonValue decrypted = clientEncryption.decrypt(actualValue.asBinary()); + assertEquals("Values should be equal for field " + field, clientEncryption.decrypt(expectedValue.asBinary()), decrypted); + } else { + assertEquals("Values should be equal for field " + field, expectedValue, actualValue); + } + } + } + + private static BsonDocument bsonDocumentFromPath(final String path) { + return getTestDocument("client-side-encryption/corpus/" + path); + } + + @Parameterized.Parameters(name = "useLocalSchema: {0}") + public static Collection data() { + return asList(new Object[]{true}, new Object[]{false}); + } + + @After + public void after() { + if (client != null) { + try { + client.close(); + } catch (Exception e) { + // ignore + } + } + + if (autoEncryptingClient != null) { + try { + autoEncryptingClient.close(); + } catch (Exception e) { + // ignore + } + } + + if (clientEncryption != null) { + try { + clientEncryption.close(); + } catch (Exception e) { + // ignore + } + } + } +} diff --git a/driver-sync/src/test/functional/com/mongodb/client/ClientSideEncryptionDeadlockTest.java b/driver-sync/src/test/functional/com/mongodb/client/ClientSideEncryptionDeadlockTest.java new file mode 100644 index 00000000000..8809c36978e --- /dev/null +++ b/driver-sync/src/test/functional/com/mongodb/client/ClientSideEncryptionDeadlockTest.java @@ -0,0 +1,28 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * + */ + +package com.mongodb.client; + +import com.mongodb.MongoClientSettings; + +public class ClientSideEncryptionDeadlockTest extends AbstractClientSideEncryptionDeadlockTest { + @Override + protected MongoClient createMongoClient(final MongoClientSettings settings) { + return MongoClients.create(settings); + } +} diff --git a/driver-sync/src/test/functional/com/mongodb/client/ClientSideEncryptionDecryptionEventsTest.java b/driver-sync/src/test/functional/com/mongodb/client/ClientSideEncryptionDecryptionEventsTest.java new file mode 100644 index 00000000000..ad4199d1035 --- /dev/null +++ b/driver-sync/src/test/functional/com/mongodb/client/ClientSideEncryptionDecryptionEventsTest.java @@ -0,0 +1,34 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client; + +import com.mongodb.ClientEncryptionSettings; +import com.mongodb.MongoClientSettings; +import com.mongodb.client.vault.ClientEncryption; +import com.mongodb.client.vault.ClientEncryptions; + +public class ClientSideEncryptionDecryptionEventsTest extends AbstractClientSideEncryptionDecryptionEventsTest { + @Override + protected MongoClient createMongoClient(final MongoClientSettings settings) { + return MongoClients.create(settings); + } + + @Override + protected ClientEncryption createClientEncryption(final ClientEncryptionSettings settings) { + return ClientEncryptions.create(settings); + } +} diff --git a/driver-sync/src/test/functional/com/mongodb/client/ClientSideEncryptionExplicitEncryptionTest.java b/driver-sync/src/test/functional/com/mongodb/client/ClientSideEncryptionExplicitEncryptionTest.java new file mode 100644 index 00000000000..9f71a1ad73b --- /dev/null +++ b/driver-sync/src/test/functional/com/mongodb/client/ClientSideEncryptionExplicitEncryptionTest.java @@ -0,0 +1,38 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * + */ + +package com.mongodb.client; + +import com.mongodb.ClientEncryptionSettings; +import com.mongodb.MongoClientSettings; +import com.mongodb.client.vault.ClientEncryption; +import com.mongodb.client.vault.ClientEncryptions; + + +public class ClientSideEncryptionExplicitEncryptionTest extends AbstractClientSideEncryptionExplicitEncryptionTest { + + @Override + protected MongoClient createMongoClient(final MongoClientSettings settings) { + return MongoClients.create(settings); + } + + @Override + protected ClientEncryption createClientEncryption(final ClientEncryptionSettings settings) { + return ClientEncryptions.create(settings); + } +} diff --git a/driver-sync/src/test/functional/com/mongodb/client/ClientSideEncryptionExternalKeyVaultSpecification.groovy b/driver-sync/src/test/functional/com/mongodb/client/ClientSideEncryptionExternalKeyVaultSpecification.groovy new file mode 100644 index 00000000000..8c888420b27 --- /dev/null +++ b/driver-sync/src/test/functional/com/mongodb/client/ClientSideEncryptionExternalKeyVaultSpecification.groovy @@ -0,0 +1,186 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client + +import com.mongodb.AutoEncryptionSettings +import com.mongodb.ClientEncryptionSettings +import com.mongodb.MongoClientException +import com.mongodb.MongoNamespace +import com.mongodb.WriteConcern +import com.mongodb.client.model.vault.DataKeyOptions +import com.mongodb.client.model.vault.EncryptOptions +import com.mongodb.client.vault.ClientEncryption +import com.mongodb.client.vault.ClientEncryptions +import com.mongodb.event.CommandStartedEvent +import com.mongodb.internal.connection.TestCommandListener +import org.bson.BsonBinarySubType +import org.bson.BsonDocument +import org.bson.BsonString + +import static com.mongodb.client.Fixture.getDefaultDatabaseName +import static com.mongodb.client.Fixture.getMongoClient +import static com.mongodb.client.Fixture.getMongoClientSettingsBuilder +import static com.mongodb.client.model.Filters.eq +import static java.util.Collections.singletonMap +import static org.junit.Assume.assumeTrue + +class ClientSideEncryptionExternalKeyVaultSpecification extends FunctionalSpecification { + + private final MongoNamespace keyVaultNamespace = new MongoNamespace('test.datakeys') + private final MongoNamespace autoEncryptingCollectionNamespace = new MongoNamespace(getDefaultDatabaseName(), + 'ClientSideEncryptionProseTestSpecification') + private final MongoCollection dataKeyCollection = getMongoClient() + .getDatabase(keyVaultNamespace.databaseName).getCollection(keyVaultNamespace.collectionName, BsonDocument) + .withWriteConcern(WriteConcern.MAJORITY) + private final MongoCollection dataCollection = getMongoClient() + .getDatabase(autoEncryptingCollectionNamespace.databaseName).getCollection(autoEncryptingCollectionNamespace.collectionName, + BsonDocument) + + private MongoClient autoEncryptingClient + private ClientEncryption clientEncryption + private MongoCollection autoEncryptingDataCollection + private TestCommandListener commandListener + + def setup() { + assumeTrue('Key vault tests disabled', + System.getProperty('AWS_ACCESS_KEY_ID') != null + && !System.getProperty('AWS_ACCESS_KEY_ID').isEmpty()) + dataKeyCollection.drop() + dataCollection.drop() + + def providerProperties = + ['local': ['key': Base64.getDecoder().decode('Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN' + + '3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk')], + 'aws' : ['accessKeyId' : System.getProperty('AWS_ACCESS_KEY_ID'), + 'secretAccessKey': System.getProperty('AWS_SECRET_ACCESS_KEY')] + ] + + autoEncryptingClient = MongoClients.create(getMongoClientSettingsBuilder() + .autoEncryptionSettings(AutoEncryptionSettings.builder() + .keyVaultNamespace(keyVaultNamespace.fullName) + .kmsProviders(providerProperties) + .schemaMap(singletonMap(autoEncryptingCollectionNamespace.fullName, + BsonDocument.parse( + ''' + { + "bsonType": "object", + "properties": { + "encrypted_placeholder": { + "encrypt": { + "keyId": "/placeholder", + "bsonType": "string", + "algorithm": "AEAD_AES_256_CBC_HMAC_SHA_512-Random" + } + } + } + }'''))) + .build()) + .build()) + + autoEncryptingDataCollection = autoEncryptingClient.getDatabase(autoEncryptingCollectionNamespace.databaseName) + .getCollection(autoEncryptingCollectionNamespace.collectionName, BsonDocument) + + commandListener = new TestCommandListener() + clientEncryption = ClientEncryptions.create(ClientEncryptionSettings.builder() + .keyVaultMongoClientSettings(getMongoClientSettingsBuilder() + .addCommandListener(commandListener) + .build()) + .keyVaultNamespace(keyVaultNamespace.fullName) + .kmsProviders(providerProperties) + .build()) + } + + def 'test external key vault'() { + when: + def localDataKeyId = clientEncryption.createDataKey('local', new DataKeyOptions().keyAltNames(['local_altname'])) + + then: + commandListener.getCommandStartedEvents().size() == 1 + def event = commandListener.getCommandStartedEvents().get(0) as CommandStartedEvent + event.getCommand().containsKey('writeConcern') + event.getCommand().getDocument('writeConcern') == WriteConcern.MAJORITY.asDocument() + + localDataKeyId != null + localDataKeyId.type == BsonBinarySubType.UUID_STANDARD.value + dataKeyCollection.find(eq('masterKey.provider', 'local')).into([]).size() == 1 + + when: + def localEncrypted = clientEncryption.encrypt(new BsonString('hello local'), + new EncryptOptions('AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic') + .keyId(localDataKeyId)) + + then: + localEncrypted.asBinary().getType() == (byte) 6 + + when: + autoEncryptingDataCollection.insertOne(new BsonDocument('_id', new BsonString('local')) + .append('value', localEncrypted)) + + then: + autoEncryptingDataCollection.find(eq('_id', new BsonString('local'))).first().getString('value') + .value == 'hello local' + + when: + def localEncryptedWithAltName = clientEncryption.encrypt(new BsonString('hello local'), + new EncryptOptions('AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic') + .keyAltName('local_altname')) + + then: + localEncryptedWithAltName == localEncrypted + + when: + def awsDataKeyId = clientEncryption.createDataKey('aws', + new DataKeyOptions().keyAltNames(['aws_altname']) + .masterKey(new BsonDocument('region', new BsonString('us-east-1')) + .append('key', new BsonString('arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0')))) + + then: + awsDataKeyId != null + awsDataKeyId.type == BsonBinarySubType.UUID_STANDARD.value + dataKeyCollection.find(eq('masterKey.provider', 'aws')).into([]).size() == 1 + + when: + def awsEncrypted = clientEncryption.encrypt(new BsonString('hello aws'), + new EncryptOptions('AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic') + .keyId(awsDataKeyId)) + + then: + awsEncrypted.asBinary().getType() == (byte) 6 + + when: + autoEncryptingDataCollection.insertOne(new BsonDocument('_id', new BsonString('aws')) + .append('value', awsEncrypted)) + + then: + autoEncryptingDataCollection.find(eq('_id', new BsonString('aws'))).first().getString('value') + .value == 'hello aws' + + when: + def awsEncryptedWithAltName = clientEncryption.encrypt(new BsonString('hello aws'), + new EncryptOptions('AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic') + .keyAltName('aws_altname')) + + then: + awsEncryptedWithAltName == awsEncrypted + + when: + autoEncryptingDataCollection.insertOne(new BsonDocument('encrypted_placeholder', localEncrypted)) + + then: + thrown(MongoClientException) + } +} diff --git a/driver-sync/src/test/functional/com/mongodb/client/ClientSideEncryptionExternalKeyVaultTest.java b/driver-sync/src/test/functional/com/mongodb/client/ClientSideEncryptionExternalKeyVaultTest.java new file mode 100644 index 00000000000..35f42e539ab --- /dev/null +++ b/driver-sync/src/test/functional/com/mongodb/client/ClientSideEncryptionExternalKeyVaultTest.java @@ -0,0 +1,174 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client; + +import com.mongodb.AutoEncryptionSettings; +import com.mongodb.ClientEncryptionSettings; +import com.mongodb.MongoClientSettings; +import com.mongodb.MongoCredential; +import com.mongodb.MongoNamespace; +import com.mongodb.MongoSecurityException; +import com.mongodb.WriteConcern; +import com.mongodb.client.model.vault.EncryptOptions; +import com.mongodb.client.test.CollectionHelper; +import com.mongodb.client.vault.ClientEncryption; +import com.mongodb.client.vault.ClientEncryptions; +import org.bson.BsonBinary; +import org.bson.BsonBinarySubType; +import org.bson.BsonDocument; +import org.bson.BsonString; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; + +import java.io.IOException; +import java.net.URISyntaxException; +import java.util.Arrays; +import java.util.Base64; +import java.util.Collection; +import java.util.HashMap; +import java.util.Map; + +import static com.mongodb.ClusterFixture.isClientSideEncryptionTest; +import static com.mongodb.client.Fixture.getMongoClient; +import static com.mongodb.client.Fixture.getMongoClientSettingsBuilder; +import static org.junit.Assert.assertEquals; +import static org.junit.Assume.assumeTrue; +import static util.JsonPoweredTestHelper.getTestDocument; + +@RunWith(Parameterized.class) +public class ClientSideEncryptionExternalKeyVaultTest { + private MongoClient client, clientEncrypted; + private ClientEncryption clientEncryption; + private final boolean withExternalKeyVault; + private static final MongoNamespace NAMESPACE = new MongoNamespace("db", ClientSideEncryptionExternalKeyVaultTest.class.getName()); + + public ClientSideEncryptionExternalKeyVaultTest(final boolean withExternalKeyVault) { + this.withExternalKeyVault = withExternalKeyVault; + } + + @Before + public void setUp() throws IOException, URISyntaxException { + assumeTrue("Encryption test with external keyVault is disabled", isClientSideEncryptionTest()); + + /* Step 1: get unencrypted client and recreate keys collection */ + client = getMongoClient(); + MongoDatabase keyvaultDatabase = client.getDatabase("keyvault"); + MongoCollection datakeys = keyvaultDatabase.getCollection("datakeys", BsonDocument.class) + .withWriteConcern(WriteConcern.MAJORITY); + datakeys.drop(); + datakeys.insertOne(bsonDocumentFromPath("external-key.json")); + + /* Step 2: create encryption objects. */ + Map> kmsProviders = new HashMap<>(); + Map localMasterkey = new HashMap<>(); + Map schemaMap = new HashMap<>(); + + byte[] localMasterkeyBytes = Base64.getDecoder().decode("Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBM" + + "UN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk"); + localMasterkey.put("key", localMasterkeyBytes); + kmsProviders.put("local", localMasterkey); + schemaMap.put(NAMESPACE.getFullName(), bsonDocumentFromPath("external-schema.json")); + + AutoEncryptionSettings.Builder autoEncryptionSettingsBuilder = AutoEncryptionSettings.builder() + .keyVaultNamespace("keyvault.datakeys") + .kmsProviders(kmsProviders) + .schemaMap(schemaMap); + + MongoClientSettings externalClientSettings = null; + if (withExternalKeyVault) { + externalClientSettings = getMongoClientSettingsBuilder() + .credential(MongoCredential.createCredential("fake-user", "admin", "fake-pwd".toCharArray())) + .build(); + autoEncryptionSettingsBuilder.keyVaultMongoClientSettings(externalClientSettings); + } + + AutoEncryptionSettings autoEncryptionSettings = autoEncryptionSettingsBuilder.build(); + + MongoClientSettings clientSettings = getMongoClientSettingsBuilder() + .autoEncryptionSettings(autoEncryptionSettings) + .build(); + clientEncrypted = MongoClients.create(clientSettings); + + ClientEncryptionSettings.Builder clientEncryptionSettingsBuilder = ClientEncryptionSettings.builder(). + keyVaultMongoClientSettings(getMongoClientSettingsBuilder().build()) + .kmsProviders(kmsProviders) + .keyVaultNamespace("keyvault.datakeys"); + + if (withExternalKeyVault) { + clientEncryptionSettingsBuilder.keyVaultMongoClientSettings(externalClientSettings); + } + + ClientEncryptionSettings clientEncryptionSettings = clientEncryptionSettingsBuilder.build(); + clientEncryption = ClientEncryptions.create(clientEncryptionSettings); + } + + @Test + public void testExternal() { + boolean authExceptionThrown = false; + MongoCollection coll = clientEncrypted + .getDatabase(NAMESPACE.getDatabaseName()) + .getCollection(NAMESPACE.getCollectionName(), BsonDocument.class); + try { + coll.insertOne(new BsonDocument().append("encrypted", new BsonString("test"))); + } catch (MongoSecurityException mse) { + authExceptionThrown = true; + } + assertEquals(authExceptionThrown, withExternalKeyVault); + + EncryptOptions encryptOptions = new EncryptOptions("AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic") + .keyId(new BsonBinary(BsonBinarySubType.UUID_STANDARD, Base64.getDecoder().decode("LOCALAAAAAAAAAAAAAAAAA=="))); + authExceptionThrown = false; + try { + clientEncryption.encrypt(new BsonString("test"), encryptOptions); + } catch (MongoSecurityException mse) { + authExceptionThrown = true; + } + assertEquals(authExceptionThrown, withExternalKeyVault); + } + + private static BsonDocument bsonDocumentFromPath(final String path) { + return getTestDocument("client-side-encryption/external/" + path); + } + + @Parameterized.Parameters(name = "withExternalKeyVault: {0}") + public static Collection data() { + return Arrays.asList(new Object[]{true}, new Object[]{false}); + } + + @After + public void after() { + if (clientEncrypted != null) { + try { + clientEncrypted.close(); + } catch (Exception e) { + // ignore + } + } + if (clientEncryption != null) { + try { + clientEncryption.close(); + } catch (Exception e) { + // ignore + } + } + + CollectionHelper.drop(NAMESPACE); + } +} diff --git a/driver-sync/src/test/functional/com/mongodb/client/ClientSideEncryptionKmsTlsTest.java b/driver-sync/src/test/functional/com/mongodb/client/ClientSideEncryptionKmsTlsTest.java new file mode 100644 index 00000000000..1807ae0f316 --- /dev/null +++ b/driver-sync/src/test/functional/com/mongodb/client/ClientSideEncryptionKmsTlsTest.java @@ -0,0 +1,28 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client; + +import com.mongodb.ClientEncryptionSettings; +import com.mongodb.client.vault.ClientEncryption; +import com.mongodb.client.vault.ClientEncryptions; + +public class ClientSideEncryptionKmsTlsTest extends AbstractClientSideEncryptionKmsTlsTest { + @Override + public ClientEncryption getClientEncryption(final ClientEncryptionSettings settings) { + return ClientEncryptions.create(settings); + } +} diff --git a/driver-sync/src/test/functional/com/mongodb/client/ClientSideEncryptionNotCreateMongocryptdClientTest.java b/driver-sync/src/test/functional/com/mongodb/client/ClientSideEncryptionNotCreateMongocryptdClientTest.java new file mode 100644 index 00000000000..fa130109b02 --- /dev/null +++ b/driver-sync/src/test/functional/com/mongodb/client/ClientSideEncryptionNotCreateMongocryptdClientTest.java @@ -0,0 +1,26 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client; + +import com.mongodb.MongoClientSettings; + +final class ClientSideEncryptionNotCreateMongocryptdClientTest extends AbstractClientSideEncryptionNotCreateMongocryptdClientTest { + @Override + protected MongoClient createMongoClient(final MongoClientSettings settings) { + return MongoClients.create(settings); + } +} diff --git a/driver-sync/src/test/functional/com/mongodb/client/ClientSideEncryptionNotSpawnMongocryptdTest.java b/driver-sync/src/test/functional/com/mongodb/client/ClientSideEncryptionNotSpawnMongocryptdTest.java new file mode 100644 index 00000000000..6da9aa13d34 --- /dev/null +++ b/driver-sync/src/test/functional/com/mongodb/client/ClientSideEncryptionNotSpawnMongocryptdTest.java @@ -0,0 +1,26 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client; + +import com.mongodb.MongoClientSettings; + +final class ClientSideEncryptionNotSpawnMongocryptdTest extends AbstractClientSideEncryptionNotSpawnMongocryptdTest { + @Override + protected MongoClient createMongoClient(final MongoClientSettings settings) { + return MongoClients.create(settings); + } +} diff --git a/driver-sync/src/test/functional/com/mongodb/client/ClientSideEncryptionOnDemandCredentialsTest.java b/driver-sync/src/test/functional/com/mongodb/client/ClientSideEncryptionOnDemandCredentialsTest.java new file mode 100644 index 00000000000..8f0f56e397c --- /dev/null +++ b/driver-sync/src/test/functional/com/mongodb/client/ClientSideEncryptionOnDemandCredentialsTest.java @@ -0,0 +1,29 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client; + +import com.mongodb.ClientEncryptionSettings; +import com.mongodb.client.vault.ClientEncryption; +import com.mongodb.client.vault.ClientEncryptions; + +public class ClientSideEncryptionOnDemandCredentialsTest extends AbstractClientSideEncryptionOnDemandCredentialsTest { + + @Override + public ClientEncryption getClientEncryption(final ClientEncryptionSettings settings) { + return ClientEncryptions.create(settings); + } +} diff --git a/driver-sync/src/test/functional/com/mongodb/client/ClientSideEncryptionRangeDefaultExplicitEncryptionTest.java b/driver-sync/src/test/functional/com/mongodb/client/ClientSideEncryptionRangeDefaultExplicitEncryptionTest.java new file mode 100644 index 00000000000..1e3a12c19b9 --- /dev/null +++ b/driver-sync/src/test/functional/com/mongodb/client/ClientSideEncryptionRangeDefaultExplicitEncryptionTest.java @@ -0,0 +1,31 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * + */ + +package com.mongodb.client; + +import com.mongodb.ClientEncryptionSettings; +import com.mongodb.client.vault.ClientEncryption; +import com.mongodb.client.vault.ClientEncryptions; + +public class ClientSideEncryptionRangeDefaultExplicitEncryptionTest extends AbstractClientSideEncryptionRangeDefaultExplicitEncryptionTest { + + @Override + protected ClientEncryption createClientEncryption(final ClientEncryptionSettings settings) { + return ClientEncryptions.create(settings); + } +} diff --git a/driver-sync/src/test/functional/com/mongodb/client/ClientSideEncryptionRangeExplicitEncryptionTest.java b/driver-sync/src/test/functional/com/mongodb/client/ClientSideEncryptionRangeExplicitEncryptionTest.java new file mode 100644 index 00000000000..14c0080fd9f --- /dev/null +++ b/driver-sync/src/test/functional/com/mongodb/client/ClientSideEncryptionRangeExplicitEncryptionTest.java @@ -0,0 +1,36 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * + */ +package com.mongodb.client; + +import com.mongodb.ClientEncryptionSettings; +import com.mongodb.MongoClientSettings; +import com.mongodb.client.vault.ClientEncryption; +import com.mongodb.client.vault.ClientEncryptions; + +public class ClientSideEncryptionRangeExplicitEncryptionTest extends AbstractClientSideEncryptionRangeExplicitEncryptionTest { + + @Override + protected MongoClient createMongoClient(final MongoClientSettings settings) { + return MongoClients.create(settings); + } + + @Override + protected ClientEncryption createClientEncryption(final ClientEncryptionSettings settings) { + return ClientEncryptions.create(settings); + } +} diff --git a/driver-sync/src/test/functional/com/mongodb/client/ClientSideEncryptionSessionTest.java b/driver-sync/src/test/functional/com/mongodb/client/ClientSideEncryptionSessionTest.java new file mode 100644 index 00000000000..d69c1954e9b --- /dev/null +++ b/driver-sync/src/test/functional/com/mongodb/client/ClientSideEncryptionSessionTest.java @@ -0,0 +1,146 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client; + +import com.mongodb.AutoEncryptionSettings; +import com.mongodb.MongoClientSettings; +import com.mongodb.MongoNamespace; +import com.mongodb.WriteConcern; +import com.mongodb.client.test.CollectionHelper; +import org.bson.BsonDocument; +import org.bson.BsonString; +import org.bson.codecs.BsonDocumentCodec; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; + +import java.io.IOException; +import java.net.URISyntaxException; +import java.util.Arrays; +import java.util.Base64; +import java.util.Collection; +import java.util.HashMap; +import java.util.Map; + +import static com.mongodb.ClusterFixture.isClientSideEncryptionTest; +import static com.mongodb.ClusterFixture.isStandalone; +import static com.mongodb.client.Fixture.getDefaultDatabaseName; +import static com.mongodb.client.Fixture.getMongoClient; +import static com.mongodb.client.Fixture.getMongoClientSettingsBuilder; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; +import static org.junit.Assume.assumeFalse; +import static org.junit.Assume.assumeTrue; +import static util.JsonPoweredTestHelper.getTestDocument; + +@RunWith(Parameterized.class) +public class ClientSideEncryptionSessionTest { + private static final String COLLECTION_NAME = "clientSideEncryptionSessionsTest"; + private MongoClient client, clientEncrypted; + private final boolean useTransaction; + + @Parameterized.Parameters(name = "useTransaction: {0}") + public static Collection data() { + return Arrays.asList(new Object[]{true}, new Object[]{false}); + } + + public ClientSideEncryptionSessionTest(final boolean useTransaction) { + this.useTransaction = useTransaction; + } + + @Before + public void setUp() throws IOException, URISyntaxException { + assumeTrue(isClientSideEncryptionTest()); + assumeFalse(isStandalone()); + + /* Step 1: get unencrypted client and recreate keys collection */ + client = getMongoClient(); + MongoDatabase keyvaultDatabase = client.getDatabase("keyvault"); + MongoCollection datakeys = keyvaultDatabase.getCollection("datakeys", BsonDocument.class) + .withWriteConcern(WriteConcern.MAJORITY); + datakeys.drop(); + datakeys.insertOne(bsonDocumentFromPath("external-key.json")); + + /* Step 2: create encryption objects. */ + Map> kmsProviders = new HashMap<>(); + Map localMasterkey = new HashMap<>(); + Map schemaMap = new HashMap<>(); + + byte[] localMasterkeyBytes = Base64.getDecoder().decode("Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBM" + + "UN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk"); + localMasterkey.put("key", localMasterkeyBytes); + kmsProviders.put("local", localMasterkey); + schemaMap.put(getDefaultDatabaseName() + "." + COLLECTION_NAME, bsonDocumentFromPath("external-schema.json")); + + AutoEncryptionSettings autoEncryptionSettings = AutoEncryptionSettings.builder() + .keyVaultNamespace("keyvault.datakeys") + .kmsProviders(kmsProviders) + .schemaMap(schemaMap).build(); + + MongoClientSettings clientSettings = getMongoClientSettingsBuilder() + .autoEncryptionSettings(autoEncryptionSettings) + .build(); + clientEncrypted = MongoClients.create(clientSettings); + + CollectionHelper collectionHelper = + new CollectionHelper<>(new BsonDocumentCodec(), new MongoNamespace(getDefaultDatabaseName(), COLLECTION_NAME)); + collectionHelper.drop(); + collectionHelper.create(); + } + + @After + public void after() { + if (clientEncrypted != null) { + try { + clientEncrypted.close(); + } catch (Exception e) { + // ignore + } + } + } + + @Test + public void testWithExplicitSession() { + BsonString unencryptedValue = new BsonString("test"); + + try (ClientSession clientSession = clientEncrypted.startSession()) { + if (useTransaction) { + clientSession.startTransaction(); + } + MongoCollection encryptedCollection = clientEncrypted.getDatabase(getDefaultDatabaseName()) + .getCollection(COLLECTION_NAME, BsonDocument.class); + encryptedCollection.insertOne(clientSession, new BsonDocument().append("encrypted", unencryptedValue)); + BsonDocument unencryptedDocument = encryptedCollection.find(clientSession).first(); + assertEquals(unencryptedValue, unencryptedDocument.getString("encrypted")); + if (useTransaction) { + clientSession.commitTransaction(); + } + } + + MongoCollection unencryptedCollection = client.getDatabase(getDefaultDatabaseName()) + .getCollection(COLLECTION_NAME, BsonDocument.class); + BsonDocument encryptedDocument = unencryptedCollection.find().first(); + assertTrue(encryptedDocument.isBinary("encrypted")); + assertEquals(6, encryptedDocument.getBinary("encrypted").getType()); + } + + private static BsonDocument bsonDocumentFromPath(final String path) { + return getTestDocument("client-side-encryption/external/" + path); + } +} diff --git a/driver-sync/src/test/functional/com/mongodb/client/ClientSideEncryptionUniqueIndexKeyAltNamesTest.java b/driver-sync/src/test/functional/com/mongodb/client/ClientSideEncryptionUniqueIndexKeyAltNamesTest.java new file mode 100644 index 00000000000..72b96fe4749 --- /dev/null +++ b/driver-sync/src/test/functional/com/mongodb/client/ClientSideEncryptionUniqueIndexKeyAltNamesTest.java @@ -0,0 +1,34 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client; + +import com.mongodb.ClientEncryptionSettings; +import com.mongodb.MongoClientSettings; +import com.mongodb.client.vault.ClientEncryption; +import com.mongodb.client.vault.ClientEncryptions; + +public class ClientSideEncryptionUniqueIndexKeyAltNamesTest extends AbstractClientSideEncryptionUniqueIndexKeyAltNamesTest { + @Override + protected MongoClient createMongoClient(final MongoClientSettings settings) { + return MongoClients.create(settings); + } + + @Override + protected ClientEncryption createClientEncryption(final ClientEncryptionSettings settings) { + return ClientEncryptions.create(settings); + } +} diff --git a/driver-sync/src/test/functional/com/mongodb/client/ClientSideEncryptionViewAreProhibitedTest.java b/driver-sync/src/test/functional/com/mongodb/client/ClientSideEncryptionViewAreProhibitedTest.java new file mode 100644 index 00000000000..b07450656ed --- /dev/null +++ b/driver-sync/src/test/functional/com/mongodb/client/ClientSideEncryptionViewAreProhibitedTest.java @@ -0,0 +1,93 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client; + +import com.mongodb.AutoEncryptionSettings; +import com.mongodb.MongoClientSettings; +import com.mongodb.MongoException; +import org.bson.BsonDocument; +import org.bson.BsonString; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +import java.util.Base64; +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; + +import static com.mongodb.ClusterFixture.isClientSideEncryptionTest; +import static com.mongodb.client.Fixture.getMongoClient; +import static com.mongodb.client.Fixture.getMongoClientSettingsBuilder; +import static junit.framework.TestCase.assertTrue; +import static org.junit.Assert.fail; +import static org.junit.Assume.assumeTrue; + +public class ClientSideEncryptionViewAreProhibitedTest { + private MongoClient clientEncrypted; + + @Before + public void setUp() { + assumeTrue("Encryption test with external keyVault is disabled", isClientSideEncryptionTest()); + + MongoClient client = getMongoClient(); + + MongoDatabase db = client.getDatabase("db"); + db.getCollection("view").drop(); + db.createView("view", "coll", Collections.emptyList()); + + Map> kmsProviders = new HashMap<>(); + Map localMasterkey = new HashMap<>(); + + byte[] localMasterkeyBytes = Base64.getDecoder().decode("Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBM" + + "UN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk"); + localMasterkey.put("key", localMasterkeyBytes); + kmsProviders.put("local", localMasterkey); + + AutoEncryptionSettings.Builder autoEncryptionSettingsBuilder = AutoEncryptionSettings.builder() + .keyVaultNamespace("keyvault.datakeys") + .kmsProviders(kmsProviders); + + AutoEncryptionSettings autoEncryptionSettings = autoEncryptionSettingsBuilder.build(); + + MongoClientSettings.Builder clientSettingsBuilder = getMongoClientSettingsBuilder(); + MongoClientSettings clientSettings = clientSettingsBuilder + .autoEncryptionSettings(autoEncryptionSettings) + .build(); + clientEncrypted = MongoClients.create(clientSettings); + } + + @Test + public void shouldThrowError() { + MongoCollection coll = clientEncrypted + .getDatabase("db") + .getCollection("view", BsonDocument.class); + try { + coll.insertOne(new BsonDocument().append("encrypted", new BsonString("test"))); + fail(); + } catch (MongoException me) { + assertTrue(me.getMessage().contains("cannot auto encrypt a view")); + } + } + + @After + public void after() { + if (clientEncrypted != null) { + clientEncrypted.close(); + } + } +} diff --git a/driver-sync/src/test/functional/com/mongodb/client/ClientSideOperationTimeoutProseTest.java b/driver-sync/src/test/functional/com/mongodb/client/ClientSideOperationTimeoutProseTest.java new file mode 100644 index 00000000000..4dcbc4d1a0f --- /dev/null +++ b/driver-sync/src/test/functional/com/mongodb/client/ClientSideOperationTimeoutProseTest.java @@ -0,0 +1,43 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client; + +import com.mongodb.MongoClientSettings; +import com.mongodb.client.gridfs.GridFSBucket; +import com.mongodb.client.gridfs.GridFSBuckets; + + +/** + * See https://github.com/mongodb/specifications/blob/master/source/client-side-operations-timeout/tests/README.md#prose-tests + */ +public final class ClientSideOperationTimeoutProseTest extends AbstractClientSideOperationsTimeoutProseTest { + + @Override + protected MongoClient createMongoClient(final MongoClientSettings mongoClientSettings) { + return MongoClients.create(mongoClientSettings); + } + + @Override + protected GridFSBucket createGridFsBucket(final MongoDatabase mongoDatabase, final String bucketName) { + return GridFSBuckets.create(mongoDatabase, bucketName); + } + + @Override + protected boolean isAsync() { + return false; + } +} diff --git a/driver-sync/src/test/functional/com/mongodb/client/ClientSideOperationTimeoutTest.java b/driver-sync/src/test/functional/com/mongodb/client/ClientSideOperationTimeoutTest.java new file mode 100644 index 00000000000..cb62545f4e4 --- /dev/null +++ b/driver-sync/src/test/functional/com/mongodb/client/ClientSideOperationTimeoutTest.java @@ -0,0 +1,62 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client; + +import com.mongodb.client.unified.UnifiedSyncTest; +import org.junit.jupiter.params.provider.Arguments; + +import java.util.Collection; + +import static org.junit.jupiter.api.Assumptions.assumeFalse; + + +// See https://github.com/mongodb/specifications/tree/master/source/client-side-operation-timeout/tests +public class ClientSideOperationTimeoutTest extends UnifiedSyncTest { + + private static Collection data() { + return getTestData("client-side-operations-timeout"); + } + + @Override + protected void skips(final String fileDescription, final String testDescription) { + skipOperationTimeoutTests(fileDescription, testDescription); + + /* + * The test is occasionally racy. Sometimes multiple getMores can be triggered. + */ + ignoreExtraCommandEvents(testDescription.contains("timeoutMS is refreshed for getMore if maxAwaitTimeMS is set")); + } + + public static void skipOperationTimeoutTests(final String fileDescription, final String testDescription) { + assumeFalse(testDescription.contains("maxTimeMS is ignored if timeoutMS is set - createIndex on collection"), + "No maxTimeMS parameter for createIndex() method"); + assumeFalse(fileDescription.startsWith("runCursorCommand"), "No run cursor command"); + assumeFalse(testDescription.contains("runCommand on database"), "No special handling of runCommand"); + assumeFalse(testDescription.endsWith("count on collection"), "No count command helper"); + assumeFalse(fileDescription.equals("timeoutMS can be overridden for an operation"), "No operation based overrides"); + assumeFalse(testDescription.equals("timeoutMS can be overridden for commitTransaction") + || testDescription.equals("timeoutMS applied to abortTransaction"), + "No operation session based overrides"); + + assumeFalse(fileDescription.equals("operations ignore deprecated timeout options if timeoutMS is set") + && (testDescription.startsWith("abortTransaction ignores") || testDescription.startsWith("commitTransaction ignores")), + "No operation session based overrides"); + + assumeFalse(fileDescription.equals("timeoutMS behaves correctly when closing cursors") + && testDescription.equals("timeoutMS can be overridden for close"), "No operation based overrides"); + } +} diff --git a/driver-sync/src/test/functional/com/mongodb/client/ClusterEventPublishingTest.java b/driver-sync/src/test/functional/com/mongodb/client/ClusterEventPublishingTest.java new file mode 100644 index 00000000000..e390d4c3afc --- /dev/null +++ b/driver-sync/src/test/functional/com/mongodb/client/ClusterEventPublishingTest.java @@ -0,0 +1,172 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client; + +import com.mongodb.ClusterFixture; +import com.mongodb.connection.ClusterType; +import com.mongodb.event.ClusterClosedEvent; +import com.mongodb.event.ClusterDescriptionChangedEvent; +import com.mongodb.event.ClusterListener; +import com.mongodb.event.ClusterOpeningEvent; +import com.mongodb.event.ServerClosedEvent; +import com.mongodb.event.ServerDescriptionChangedEvent; +import com.mongodb.event.ServerHeartbeatFailedEvent; +import com.mongodb.event.ServerHeartbeatStartedEvent; +import com.mongodb.event.ServerHeartbeatSucceededEvent; +import com.mongodb.event.ServerListener; +import com.mongodb.event.ServerMonitorListener; +import com.mongodb.event.ServerOpeningEvent; +import org.junit.jupiter.api.Test; + +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; + +import static com.mongodb.client.Fixture.getMongoClientSettingsBuilder; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assumptions.assumeFalse; + +public class ClusterEventPublishingTest { + + @Test + public void shouldPublishExpectedEvents() throws InterruptedException { + assumeFalse(ClusterFixture.isLoadBalanced()); + + AllClusterEventListener clusterEventListenerOne = new AllClusterEventListener(); + AllClusterEventListener clusterEventListenerTwo = new AllClusterEventListener(); + + MongoClient client = MongoClients.create( + getMongoClientSettingsBuilder() + .applyToClusterSettings(builder -> builder + .addClusterListener(clusterEventListenerOne) + .addClusterListener(clusterEventListenerTwo)) + .applyToServerSettings(builder -> builder + .heartbeatFrequency(1, TimeUnit.MILLISECONDS) + .addServerListener(clusterEventListenerOne) + .addServerListener(clusterEventListenerTwo) + .addServerMonitorListener(clusterEventListenerOne) + .addServerMonitorListener(clusterEventListenerTwo)) + .build()); + + assertTrue(clusterEventListenerOne.waitUntilConnected()); + assertTrue(clusterEventListenerTwo.waitUntilConnected()); + + assertTrue(clusterEventListenerOne.waitUntilHeartbeat()); + assertTrue(clusterEventListenerTwo.waitUntilHeartbeat()); + + client.close(); + + assertTrue(clusterEventListenerOne.waitUntilDisconnected()); + assertTrue(clusterEventListenerTwo.waitUntilDisconnected()); + + assertEquals(clusterEventListenerOne.getEvents().size(), clusterEventListenerTwo.getEvents().size()); + assertEvents(clusterEventListenerOne); + assertEvents(clusterEventListenerTwo); + } + + private void assertEvents(final AllClusterEventListener clusterEventListener) { + assertTrue(clusterEventListener.hasEventOfType(ClusterOpeningEvent.class)); + assertTrue(clusterEventListener.hasEventOfType(ClusterDescriptionChangedEvent.class)); + assertTrue(clusterEventListener.hasEventOfType(ClusterClosedEvent.class)); + + assertTrue(clusterEventListener.hasEventOfType(ServerOpeningEvent.class)); + assertTrue(clusterEventListener.hasEventOfType(ServerClosedEvent.class)); + assertTrue(clusterEventListener.hasEventOfType(ServerDescriptionChangedEvent.class)); + + assertTrue(clusterEventListener.hasEventOfType(ServerHeartbeatStartedEvent.class)); + } + + private static final class AllClusterEventListener implements ClusterListener, ServerListener, ServerMonitorListener { + private final List events = new ArrayList<>(); + private final CountDownLatch connectedLatch = new CountDownLatch(1); + private final CountDownLatch heartbeatLatch = new CountDownLatch(1); + private final CountDownLatch disconnectedLatch = new CountDownLatch(1); + + public List getEvents() { + return events; + } + + public boolean hasEventOfType(final Class eventClass) { + return events.stream().anyMatch(event -> event.getClass().equals(eventClass)); + } + + public boolean waitUntilConnected() throws InterruptedException { + return connectedLatch.await(5, TimeUnit.SECONDS); + } + + public boolean waitUntilHeartbeat() throws InterruptedException { + return heartbeatLatch.await(5, TimeUnit.SECONDS); + } + + public boolean waitUntilDisconnected() throws InterruptedException { + return disconnectedLatch.await(5, TimeUnit.SECONDS); + } + + @Override + public void clusterOpening(final ClusterOpeningEvent event) { + events.add(event); + } + + @Override + public void clusterClosed(final ClusterClosedEvent event) { + events.add(event); + disconnectedLatch.countDown(); + } + + @Override + public void clusterDescriptionChanged(final ClusterDescriptionChangedEvent event) { + events.add(event); + if (event.getNewDescription().getType() != ClusterType.UNKNOWN) { + connectedLatch.countDown(); + } + } + + @Override + public void serverOpening(final ServerOpeningEvent event) { + events.add(event); + } + + @Override + public void serverClosed(final ServerClosedEvent event) { + events.add(event); + } + + @Override + public void serverDescriptionChanged(final ServerDescriptionChangedEvent event) { + events.add(event); + } + + @Override + public void serverHearbeatStarted(final ServerHeartbeatStartedEvent event) { + events.add(event); + } + + @Override + public void serverHeartbeatSucceeded(final ServerHeartbeatSucceededEvent event) { + events.add(event); + heartbeatLatch.countDown(); + } + + @Override + public void serverHeartbeatFailed(final ServerHeartbeatFailedEvent event) { + events.add(event); + heartbeatLatch.countDown(); + } + } +} diff --git a/driver-sync/src/test/functional/com/mongodb/client/Concrete.java b/driver-sync/src/test/functional/com/mongodb/client/Concrete.java new file mode 100644 index 00000000000..cb827bf03bb --- /dev/null +++ b/driver-sync/src/test/functional/com/mongodb/client/Concrete.java @@ -0,0 +1,128 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client; + +import org.bson.types.ObjectId; + +class Concrete { + private ObjectId id; + private final String str; + private final int i; + private final long l; + private final double d; + private final long date; + + Concrete(final String str, final int i, final long l, final double d, final long date) { + this.str = str; + this.i = i; + this.l = l; + this.d = d; + this.date = date; + } + + Concrete(final ObjectId id, final String str, final int i, final long l, final double d, final long date) { + this(str, i, l, d, date); + this.id = id; + } + + @Override + public String toString() { + return "Concrete{" + + "id=" + id + + ", str='" + str + '\'' + + ", i=" + i + + ", l=" + l + + ", d=" + d + + ", date=" + date + + '}'; + } + + ObjectId getId() { + return id; + } + + String getStr() { + return str; + } + + int getI() { + return i; + } + + long getL() { + return l; + } + + double getD() { + return d; + } + + long getDate() { + return date; + } + + public void setId(final ObjectId id) { + this.id = id; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + Concrete concrete = (Concrete) o; + + if (Double.compare(concrete.d, d) != 0) { + return false; + } + if (date != concrete.date) { + return false; + } + if (i != concrete.i) { + return false; + } + if (l != concrete.l) { + return false; + } + if (!id.equals(concrete.id)) { + return false; + } + if (!str.equals(concrete.str)) { + return false; + } + + return true; + } + + @Override + public int hashCode() { + int result; + long temp; + result = id.hashCode(); + result = 31 * result + str.hashCode(); + result = 31 * result + i; + result = 31 * result + (int) (l ^ (l >>> 32)); + temp = Double.doubleToLongBits(d); + result = 31 * result + (int) (temp ^ (temp >>> 32)); + result = 31 * result + (int) (date ^ (date >>> 32)); + return result; + } +} diff --git a/driver-sync/src/test/functional/com/mongodb/client/ConcreteCodec.java b/driver-sync/src/test/functional/com/mongodb/client/ConcreteCodec.java new file mode 100644 index 00000000000..6c417753239 --- /dev/null +++ b/driver-sync/src/test/functional/com/mongodb/client/ConcreteCodec.java @@ -0,0 +1,80 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client; + +import org.bson.BsonObjectId; +import org.bson.BsonReader; +import org.bson.BsonWriter; +import org.bson.codecs.CollectibleCodec; +import org.bson.codecs.DecoderContext; +import org.bson.codecs.EncoderContext; +import org.bson.types.ObjectId; + +class ConcreteCodec implements CollectibleCodec { + + @Override + public void encode(final BsonWriter writer, final Concrete c, final EncoderContext encoderContext) { + writer.writeStartDocument(); + if (!documentHasId(c)) { + c.setId(new ObjectId()); + } + writer.writeObjectId("_id", c.getId()); + writer.writeString("str", c.getStr()); + writer.writeInt32("i", c.getI()); + writer.writeInt64("l", c.getL()); + writer.writeDouble("d", c.getD()); + writer.writeDateTime("date", c.getDate()); + writer.writeEndDocument(); + } + + @Override + public Concrete decode(final BsonReader reader, final DecoderContext decoderContext) { + reader.readStartDocument(); + ObjectId id = reader.readObjectId("_id"); + String str = reader.readString("str"); + int i = reader.readInt32("i"); + long l = reader.readInt64("l"); + double d = reader.readDouble("d"); + long date = reader.readDateTime("date"); + + reader.readEndDocument(); + return new Concrete(id, str, i, l, d, date); + } + + @Override + public Class getEncoderClass() { + return Concrete.class; + } + + @Override + public boolean documentHasId(final Concrete document) { + return document.getId() != null; + } + + @Override + public BsonObjectId getDocumentId(final Concrete document) { + return new BsonObjectId(document.getId()); + } + + @Override + public Concrete generateIdIfAbsentFromDocument(final Concrete document) { + if (!documentHasId(document)) { + document.setId(new ObjectId()); + } + return document; + } +} diff --git a/driver-sync/src/test/functional/com/mongodb/client/ConcreteCodecProvider.java b/driver-sync/src/test/functional/com/mongodb/client/ConcreteCodecProvider.java new file mode 100644 index 00000000000..7c0652c1f03 --- /dev/null +++ b/driver-sync/src/test/functional/com/mongodb/client/ConcreteCodecProvider.java @@ -0,0 +1,36 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client; + +import org.bson.codecs.Codec; +import org.bson.codecs.configuration.CodecProvider; +import org.bson.codecs.configuration.CodecRegistry; + +/** + * + */ +public class ConcreteCodecProvider implements CodecProvider { + @Override + @SuppressWarnings("unchecked") + public Codec get(final Class clazz, final CodecRegistry registry) { + if (clazz.equals(Concrete.class)) { + return (Codec) new ConcreteCodec(); + } + + return null; + } +} diff --git a/driver-sync/src/test/functional/com/mongodb/client/ConnectionsSurvivePrimaryStepDownProseTest.java b/driver-sync/src/test/functional/com/mongodb/client/ConnectionsSurvivePrimaryStepDownProseTest.java new file mode 100644 index 00000000000..99e7bac823a --- /dev/null +++ b/driver-sync/src/test/functional/com/mongodb/client/ConnectionsSurvivePrimaryStepDownProseTest.java @@ -0,0 +1,152 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client; + +import com.mongodb.MongoClientSettings; +import com.mongodb.MongoException; +import com.mongodb.MongoNamespace; +import com.mongodb.MongoNotPrimaryException; +import com.mongodb.WriteConcern; +import com.mongodb.client.test.CollectionHelper; +import com.mongodb.event.ConnectionCreatedEvent; +import com.mongodb.event.ConnectionPoolClearedEvent; +import com.mongodb.internal.connection.TestConnectionPoolListener; +import org.bson.Document; +import org.bson.codecs.DocumentCodec; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +import java.util.List; + +import static com.mongodb.ClusterFixture.getDefaultDatabaseName; +import static com.mongodb.ClusterFixture.isDiscoverableReplicaSet; +import static com.mongodb.client.Fixture.getMongoClientSettings; +import static java.util.Arrays.asList; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.fail; +import static org.junit.Assume.assumeTrue; + +// See https://github.com/mongodb/specifications/tree/master/source/connections-survive-step-down/tests +public class ConnectionsSurvivePrimaryStepDownProseTest { + private static final String COLLECTION_NAME = "step-down"; + + private TestConnectionPoolListener connectionPoolListener; + private CollectionHelper collectionHelper; + private MongoClient client; + private MongoCollection collection; + + @Before + public void setUp() { + assumeTrue(isDiscoverableReplicaSet()); + connectionPoolListener = new TestConnectionPoolListener(); + MongoClientSettings settings = MongoClientSettings.builder(getMongoClientSettings()).retryWrites(false) + .applyToConnectionPoolSettings(builder -> builder.addConnectionPoolListener(connectionPoolListener)).build(); + + collectionHelper = new CollectionHelper<>(new DocumentCodec(), + new MongoNamespace(getDefaultDatabaseName(), COLLECTION_NAME)); + client = MongoClients.create(settings); + MongoDatabase database = client.getDatabase(getDefaultDatabaseName()); + collection = client.getDatabase(getDefaultDatabaseName()).getCollection(COLLECTION_NAME); + collection.withWriteConcern(WriteConcern.MAJORITY).drop(); + + database.withWriteConcern(WriteConcern.MAJORITY).createCollection(COLLECTION_NAME); + } + + @After + public void tearDown() { + if (client != null) { + collectionHelper.runAdminCommand("{configureFailPoint: 'failCommand', mode: 'off'}"); + + try { + client.getDatabase(getDefaultDatabaseName()).drop(); + } catch (MongoNotPrimaryException e) { + // GetMore will use the same connection so won't force an server description update + client.getDatabase(getDefaultDatabaseName()).drop(); + } + client.close(); + } + } + + @Test + public void testGetMoreIteration() { + List documents = asList(Document.parse("{_id: 1}"), Document.parse("{_id: 2}"), Document.parse("{_id: 3}"), + Document.parse("{_id: 4}"), Document.parse("{_id: 5}")); + collection.withWriteConcern(WriteConcern.MAJORITY).insertMany(documents); + + int connectionCount = connectionPoolListener.countEvents(ConnectionCreatedEvent.class); + MongoCursor cursor = collection.find().batchSize(2).iterator(); + assertEquals(asList(documents.get(0), documents.get(1)), asList(cursor.next(), cursor.next())); + + collectionHelper.runAdminCommand("{replSetStepDown: 5, force: true}"); + + assertEquals(asList(documents.get(2), documents.get(3), documents.get(4)), asList(cursor.next(), cursor.next(), cursor.next())); + assertEquals(connectionCount, connectionPoolListener.countEvents(ConnectionCreatedEvent.class)); + } + + @Test + public void testNotPrimaryKeepConnectionPool() { + collectionHelper.runAdminCommand("{configureFailPoint: 'failCommand', mode: {times: 1}, data: {failCommands: ['insert'], " + + "errorCode: 10107}}"); + int connectionCount = connectionPoolListener.countEvents(ConnectionCreatedEvent.class); + + try { + collection.insertOne(new Document()); + fail(); + } catch (MongoException e) { + assertEquals(10107, e.getCode()); + } + + collection.insertOne(new Document()); + assertEquals(connectionCount, connectionPoolListener.countEvents(ConnectionCreatedEvent.class)); + } + + @Test + public void testInterruptedAtShutdownResetsConnectionPool() { + collectionHelper.runAdminCommand("{configureFailPoint: 'failCommand', mode: {times: 1}, data: {failCommands: ['insert'], " + + "errorCode: 11600}}"); + int connectionCount = connectionPoolListener.countEvents(ConnectionCreatedEvent.class); + + try { + collection.insertOne(new Document()); + fail(); + } catch (MongoException e) { + assertEquals(11600, e.getCode()); + } + assertEquals(1, connectionPoolListener.countEvents(ConnectionPoolClearedEvent.class)); + collection.insertOne(new Document("test", 1)); + assertEquals(connectionCount + 1, connectionPoolListener.countEvents(ConnectionCreatedEvent.class)); + } + + @Test + public void testShutdownInProgressResetsConnectionPool() { + collectionHelper.runAdminCommand("{configureFailPoint: 'failCommand', mode: {times: 1}, data: {failCommands: ['insert'], " + + "errorCode: 91}}"); + int connectionCount = connectionPoolListener.countEvents(ConnectionCreatedEvent.class); + + try { + collection.insertOne(new Document()); + fail(); + } catch (MongoException e) { + assertEquals(91, e.getCode()); + } + assertEquals(1, connectionPoolListener.countEvents(ConnectionPoolClearedEvent.class)); + collection.insertOne(new Document("test", 1)); + assertEquals(connectionCount + 1, connectionPoolListener.countEvents(ConnectionCreatedEvent.class)); + } + +} diff --git a/driver-sync/src/test/functional/com/mongodb/client/ConnectivityTest.java b/driver-sync/src/test/functional/com/mongodb/client/ConnectivityTest.java new file mode 100644 index 00000000000..8838b9a53ab --- /dev/null +++ b/driver-sync/src/test/functional/com/mongodb/client/ConnectivityTest.java @@ -0,0 +1,42 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client; + +import com.mongodb.ConnectionString; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.MethodSource; + +import java.util.List; + +import static com.mongodb.client.ConnectivityTestHelper.LEGACY_HELLO_COMMAND; +import static com.mongodb.client.Fixture.getMongoClientSettings; + +public class ConnectivityTest { + + // the test succeeds if no exception is thrown, and fail otherwise + @ParameterizedTest(name = "{1}") + @MethodSource("com.mongodb.client.ConnectivityTestHelper#getConnectivityTestArguments") + public void testConnectivity(final ConnectionString connectionString, @SuppressWarnings("unused") final List hosts) { + try (MongoClient client = MongoClients.create(getMongoClientSettings(connectionString).build())) { + // test that a command that doesn't require auth completes normally + client.getDatabase("admin").runCommand(LEGACY_HELLO_COMMAND); + + // test that a command that requires auth completes normally + client.getDatabase("test").getCollection("test").estimatedDocumentCount(); + } + } +} diff --git a/driver-sync/src/test/functional/com/mongodb/client/ConnectivityTestHelper.java b/driver-sync/src/test/functional/com/mongodb/client/ConnectivityTestHelper.java new file mode 100644 index 00000000000..40abbd3d99c --- /dev/null +++ b/driver-sync/src/test/functional/com/mongodb/client/ConnectivityTestHelper.java @@ -0,0 +1,52 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client; + +import com.mongodb.ConnectionString; +import org.bson.Document; +import org.junit.jupiter.params.provider.Arguments; + +import java.util.Arrays; +import java.util.stream.Stream; + +import static com.mongodb.ClusterFixture.getConnectionStringSystemPropertyOrDefault; + +public final class ConnectivityTestHelper { + public static final Document LEGACY_HELLO_COMMAND = new Document("ismaster", 1); + + /** + * Gets the Junit Arguments for connectivity tests that use the "|"-delimited system property "org.mongodb.test.connectivity.uris" + * for the list of connection strings for which to check connectivity. If that system property is not set, it uses the default + * connection string configured for the entire test run (which itself defaults to "mongodb://localhost). + * + * @return a {@code Stream}: the first argument is of type {@code ConnectionString}, the second is of type + * {@code List}, representing the list of hosts on the connection string. The latter is useful as the value of the name of + * the parameterized test, e.g. {@code @ParameterizedTest(name = "{1}")}. + */ + public static Stream getConnectivityTestArguments() { + String connectionStrings = System.getProperty("org.mongodb.test.connectivity.uris", + getConnectionStringSystemPropertyOrDefault()); + return Arrays.stream(connectionStrings.split("\\|")) + .map(str -> { + ConnectionString connectionString = new ConnectionString(str); + return Arguments.of(connectionString, connectionString.getHosts()); + }); + } + + private ConnectivityTestHelper() { + } +} diff --git a/driver-sync/src/test/functional/com/mongodb/client/ContextProviderTest.java b/driver-sync/src/test/functional/com/mongodb/client/ContextProviderTest.java new file mode 100644 index 00000000000..caf676a8ab7 --- /dev/null +++ b/driver-sync/src/test/functional/com/mongodb/client/ContextProviderTest.java @@ -0,0 +1,247 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client; + +import com.mongodb.ContextProvider; +import com.mongodb.RequestContext; +import com.mongodb.WriteConcern; +import com.mongodb.event.CommandFailedEvent; +import com.mongodb.event.CommandListener; +import com.mongodb.event.CommandStartedEvent; +import com.mongodb.event.CommandSucceededEvent; +import com.mongodb.lang.Nullable; +import org.bson.Document; +import org.junit.jupiter.api.Test; + +import java.util.ArrayList; + +import static com.mongodb.ClusterFixture.getDefaultDatabaseName; +import static com.mongodb.client.Fixture.getMongoClientSettingsBuilder; +import static com.mongodb.client.model.Updates.inc; +import static java.util.Arrays.asList; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.fail; +import static org.mockito.Mockito.mock; + +public class ContextProviderTest { + + @Test + public void shouldThrowIfContextProviderIsNotSynchronousContextProvider() { + assertThrows(IllegalArgumentException.class, () -> MongoClients.create(getMongoClientSettingsBuilder() + .contextProvider(new ContextProvider() {}) + .build())); + } + + @Test + public void shouldPropagateExceptionFromContextProvider() { + try (MongoClient client = MongoClients.create(getMongoClientSettingsBuilder() + .contextProvider((SynchronousContextProvider) () -> { + throw new RuntimeException(); + }) + .build())) { + + assertThrows(RuntimeException.class, () -> client.listDatabaseNames().into(new ArrayList<>())); + } + } + + @Test + public void contextShouldBeNullByDefaultInCommandEvents() { + + TestCommandListener commandListener = new TestCommandListener(null); + try (MongoClient client = MongoClients.create(getMongoClientSettingsBuilder() + .addCommandListener(commandListener) + .build())) { + + // given + MongoCollection collection = client.getDatabase(getDefaultDatabaseName()) + .getCollection("ContextProviderTest"); + collection.drop(); + collection.insertMany(asList(new Document(), new Document(), new Document(), new Document())); + commandListener.reset(); + + // when + collection.countDocuments(); + + // then + assertEquals(1, commandListener.numCommandStartedEventsWithExpectedContext); + assertEquals(1, commandListener.numCommandSucceededEventsWithExpectedContext); + } + } + + @Test + public void contextShouldBeAvailableInCommandEvents() { + RequestContext requestContext = mock(RequestContext.class); + + TestCommandListener commandListener = new TestCommandListener(requestContext); + try (MongoClient client = MongoClients.create(getMongoClientSettingsBuilder() + .contextProvider((SynchronousContextProvider) () -> requestContext) + .addCommandListener(commandListener) + .build())) { + + // given + MongoCollection collection = client.getDatabase(getDefaultDatabaseName()) + .getCollection("ContextProviderTest"); + collection.drop(); + collection.insertMany(asList(new Document(), new Document(), new Document(), new Document())); + commandListener.reset(); + + // when + collection.countDocuments(); + + // then + assertEquals(1, commandListener.numCommandStartedEventsWithExpectedContext); + assertEquals(1, commandListener.numCommandSucceededEventsWithExpectedContext); + + // given + commandListener.reset(); + Document document = new Document(); + + // when + collection.insertOne(document); + + // then + assertEquals(1, commandListener.numCommandStartedEventsWithExpectedContext); + assertEquals(1, commandListener.numCommandSucceededEventsWithExpectedContext); + + // given + commandListener.reset(); + + // when + collection.updateOne(document, inc("x", 1)); + + // then + assertEquals(1, commandListener.numCommandStartedEventsWithExpectedContext); + assertEquals(1, commandListener.numCommandSucceededEventsWithExpectedContext); + + // given + commandListener.reset(); + Document documentTwo = new Document(); + + // when + collection.withWriteConcern(WriteConcern.UNACKNOWLEDGED).insertOne(documentTwo); + + // then + assertEquals(1, commandListener.numCommandStartedEventsWithExpectedContext); + assertEquals(1, commandListener.numCommandSucceededEventsWithExpectedContext); + + // given + commandListener.reset(); + + // when + collection.withWriteConcern(WriteConcern.UNACKNOWLEDGED).updateOne(documentTwo, inc("x", 1)); + + // then + assertEquals(1, commandListener.numCommandStartedEventsWithExpectedContext); + assertEquals(1, commandListener.numCommandSucceededEventsWithExpectedContext); + + // given + commandListener.reset(); + + // when + collection.withWriteConcern(WriteConcern.UNACKNOWLEDGED).deleteOne(documentTwo); + + // then + assertEquals(1, commandListener.numCommandStartedEventsWithExpectedContext); + assertEquals(1, commandListener.numCommandSucceededEventsWithExpectedContext); + + // given + commandListener.reset(); + + // when + MongoCursor cursor = collection.find().batchSize(2).cursor(); + cursor.next(); + + // then + assertEquals(1, commandListener.numCommandStartedEventsWithExpectedContext); + assertEquals(1, commandListener.numCommandSucceededEventsWithExpectedContext); + + // given + commandListener.reset(); + + // when + cursor.next(); + cursor.next(); + + // then + assertEquals(1, commandListener.numCommandStartedEventsWithExpectedContext); + assertEquals(1, commandListener.numCommandSucceededEventsWithExpectedContext); + + // given + commandListener.reset(); + + // when + cursor.close(); + + // then + assertEquals(1, commandListener.numCommandStartedEventsWithExpectedContext); + assertEquals(1, commandListener.numCommandSucceededEventsWithExpectedContext); + + // given + commandListener.reset(); + + // when + try { + client.getDatabase("admin").runCommand(new Document("notRealCommand", 1)); + fail(); + } catch (Exception e) { + // then + assertEquals(1, commandListener.numCommandStartedEventsWithExpectedContext); + assertEquals(1, commandListener.numCommandFailedEventsWithExpectedContext); + } + } + } + + private static final class TestCommandListener implements CommandListener { + private int numCommandStartedEventsWithExpectedContext; + private int numCommandSucceededEventsWithExpectedContext; + private int numCommandFailedEventsWithExpectedContext; + private final RequestContext expectedContext; + + private TestCommandListener(@Nullable final RequestContext expectedContext) { + this.expectedContext = expectedContext; + } + + public void reset() { + numCommandStartedEventsWithExpectedContext = 0; + numCommandSucceededEventsWithExpectedContext = 0; + numCommandFailedEventsWithExpectedContext = 0; + } + + @Override + public void commandStarted(final CommandStartedEvent event) { + if (event.getRequestContext() == expectedContext) { + numCommandStartedEventsWithExpectedContext++; + } + } + + @Override + public void commandSucceeded(final CommandSucceededEvent event) { + if (event.getRequestContext() == expectedContext) { + numCommandSucceededEventsWithExpectedContext++; + } + + } + + @Override + public void commandFailed(final CommandFailedEvent event) { + if (event.getRequestContext() == expectedContext) { + numCommandFailedEventsWithExpectedContext++; + } + } + } +} diff --git a/driver-sync/src/test/functional/com/mongodb/client/CrudProseTest.java b/driver-sync/src/test/functional/com/mongodb/client/CrudProseTest.java new file mode 100644 index 00000000000..d269a3cad57 --- /dev/null +++ b/driver-sync/src/test/functional/com/mongodb/client/CrudProseTest.java @@ -0,0 +1,614 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client; + +import com.mongodb.AutoEncryptionSettings; +import com.mongodb.ClientBulkWriteException; +import com.mongodb.Function; +import com.mongodb.MongoBulkWriteException; +import com.mongodb.MongoClientSettings; +import com.mongodb.MongoNamespace; +import com.mongodb.MongoWriteConcernException; +import com.mongodb.MongoWriteException; +import com.mongodb.WriteConcern; +import com.mongodb.assertions.Assertions; +import com.mongodb.client.model.CreateCollectionOptions; +import com.mongodb.client.model.Filters; +import com.mongodb.client.model.InsertOneModel; +import com.mongodb.client.model.Updates; +import com.mongodb.client.model.ValidationOptions; +import com.mongodb.client.model.bulk.ClientBulkWriteOptions; +import com.mongodb.client.model.bulk.ClientBulkWriteResult; +import com.mongodb.client.model.bulk.ClientNamespacedWriteModel; +import com.mongodb.client.test.CollectionHelper; +import com.mongodb.event.CommandStartedEvent; +import com.mongodb.internal.connection.TestCommandListener; +import org.bson.BsonArray; +import org.bson.BsonDocument; +import org.bson.BsonDocumentWrapper; +import org.bson.BsonInt32; +import org.bson.BsonMaximumSizeExceededException; +import org.bson.BsonString; +import org.bson.BsonValue; +import org.bson.Document; +import org.bson.RawBsonDocument; +import org.bson.codecs.configuration.CodecRegistry; +import org.bson.codecs.pojo.PojoCodecProvider; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.DisplayName; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.Arguments; +import org.junit.jupiter.params.provider.MethodSource; +import org.junit.jupiter.params.provider.ValueSource; +import org.opentest4j.AssertionFailedError; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.concurrent.TimeUnit; +import java.util.function.BiFunction; +import java.util.function.Supplier; +import java.util.stream.Stream; + +import static com.mongodb.ClusterFixture.isDiscoverableReplicaSet; +import static com.mongodb.ClusterFixture.isStandalone; +import static com.mongodb.ClusterFixture.serverVersionAtLeast; +import static com.mongodb.MongoClientSettings.getDefaultCodecRegistry; +import static com.mongodb.client.Fixture.getMongoClientSettingsBuilder; +import static com.mongodb.client.Fixture.getPrimary; +import static com.mongodb.client.model.bulk.ClientBulkWriteOptions.clientBulkWriteOptions; +import static com.mongodb.client.model.bulk.ClientNamespacedWriteModel.insertOne; +import static com.mongodb.client.model.bulk.ClientUpdateOneOptions.clientUpdateOneOptions; +import static java.lang.String.join; +import static java.util.Arrays.asList; +import static java.util.Collections.nCopies; +import static java.util.Collections.singletonList; +import static java.util.Collections.singletonMap; +import static org.bson.codecs.configuration.CodecRegistries.fromProviders; +import static org.bson.codecs.configuration.CodecRegistries.fromRegistries; +import static org.junit.jupiter.api.Assertions.assertAll; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assumptions.assumeFalse; +import static org.junit.jupiter.api.Assumptions.assumeTrue; +import static org.junit.jupiter.params.provider.Arguments.arguments; + +/** + * See + * CRUD Prose Tests. + */ +public class CrudProseTest { + private static final MongoNamespace NAMESPACE = new MongoNamespace("db", CrudProseTest.class.getName()); + + @DisplayName("1. WriteConcernError.details exposes writeConcernError.errInfo") + @Test + @SuppressWarnings("try") + void testWriteConcernErrInfoIsPropagated() throws InterruptedException { + assumeTrue(isDiscoverableReplicaSet()); + BsonDocument failPointDocument = new BsonDocument("configureFailPoint", new BsonString("failCommand")) + .append("mode", new BsonDocument("times", new BsonInt32(1))) + .append("data", new BsonDocument("failCommands", new BsonArray(singletonList(new BsonString("insert")))) + .append("writeConcernError", new BsonDocument("code", new BsonInt32(100)) + .append("codeName", new BsonString("UnsatisfiableWriteConcern")) + .append("errmsg", new BsonString("Not enough data-bearing nodes")) + .append("errInfo", new BsonDocument("writeConcern", new BsonDocument("w", new BsonInt32(2)) + .append("wtimeout", new BsonInt32(0)) + .append("provenance", new BsonString("clientSupplied")))))); + try (MongoClient client = createMongoClient(getMongoClientSettingsBuilder()); + FailPoint ignored = FailPoint.enable(failPointDocument, getPrimary())) { + MongoWriteConcernException actual = assertThrows(MongoWriteConcernException.class, () -> + droppedCollection(client, Document.class).insertOne(Document.parse("{ x: 1 }"))); + assertEquals(actual.getWriteConcernError().getCode(), 100); + assertEquals("UnsatisfiableWriteConcern", actual.getWriteConcernError().getCodeName()); + assertEquals(actual.getWriteConcernError().getDetails(), new BsonDocument("writeConcern", + new BsonDocument("w", new BsonInt32(2)) + .append("wtimeout", new BsonInt32(0)) + .append("provenance", new BsonString("clientSupplied")))); + } + } + + @DisplayName("2. WriteError.details exposes writeErrors[].errInfo") + @Test + void testWriteErrorDetailsIsPropagated() { + try (MongoClient client = createMongoClient(getMongoClientSettingsBuilder())) { + MongoCollection collection = droppedCollection(client, Document.class); + droppedDatabase(client).createCollection( + collection.getNamespace().getCollectionName(), + new CreateCollectionOptions().validationOptions(new ValidationOptions().validator(Filters.type("x", "string")))); + assertAll( + () -> { + MongoWriteException actual = assertThrows(MongoWriteException.class, () -> + collection.insertOne(new Document("x", 1))); + // These assertions don't do exactly what's required by the specification, + // but it's simpler to implement and nearly as effective. + assertTrue(actual.getMessage().contains("Write error")); + assertNotNull(actual.getError().getDetails()); + if (serverVersionAtLeast(5, 0)) { + assertFalse(actual.getError().getDetails().isEmpty()); + } + }, + () -> { + MongoBulkWriteException actual = assertThrows(MongoBulkWriteException.class, () -> + collection.insertMany(singletonList(new Document("x", 1)))); + // These assertions don't do exactly what's required by the specification, + // but it's simpler to implement and nearly as effective. + assertTrue(actual.getMessage().contains("Write errors")); + assertEquals(1, actual.getWriteErrors().size()); + if (serverVersionAtLeast(5, 0)) { + assertFalse(actual.getWriteErrors().get(0).getDetails().isEmpty()); + } + } + ); + + } + } + + @DisplayName("3. MongoClient.bulkWrite batch splits a writeModels input with greater than maxWriteBatchSize operations") + @Test + void testBulkWriteSplitsWhenExceedingMaxWriteBatchSize() { + assumeTrue(serverVersionAtLeast(8, 0)); + TestCommandListener commandListener = new TestCommandListener(); + try (MongoClient client = createMongoClient(getMongoClientSettingsBuilder().addCommandListener(commandListener))) { + int maxWriteBatchSize = droppedDatabase(client).runCommand(new Document("hello", 1)).getInteger("maxWriteBatchSize"); + ClientBulkWriteResult result = client.bulkWrite(nCopies( + maxWriteBatchSize + 1, + insertOne(NAMESPACE, new Document("a", "b")))); + assertEquals(maxWriteBatchSize + 1, result.getInsertedCount()); + List startedBulkWriteCommandEvents = commandListener.getCommandStartedEvents("bulkWrite"); + assertEquals(2, startedBulkWriteCommandEvents.size()); + CommandStartedEvent firstEvent = startedBulkWriteCommandEvents.get(0); + CommandStartedEvent secondEvent = startedBulkWriteCommandEvents.get(1); + assertEquals(maxWriteBatchSize, firstEvent.getCommand().getArray("ops").size()); + assertEquals(1, secondEvent.getCommand().getArray("ops").size()); + assertEquals(firstEvent.getOperationId(), secondEvent.getOperationId()); + } + } + + @DisplayName("4. MongoClient.bulkWrite batch splits when an ops payload exceeds maxMessageSizeBytes") + @Test + void testBulkWriteSplitsWhenExceedingMaxMessageSizeBytes() { + assumeTrue(serverVersionAtLeast(8, 0)); + TestCommandListener commandListener = new TestCommandListener(); + try (MongoClient client = createMongoClient(getMongoClientSettingsBuilder().addCommandListener(commandListener))) { + Document helloResponse = droppedDatabase(client).runCommand(new Document("hello", 1)); + int maxBsonObjectSize = helloResponse.getInteger("maxBsonObjectSize"); + int maxMessageSizeBytes = helloResponse.getInteger("maxMessageSizeBytes"); + ClientNamespacedWriteModel model = insertOne( + NAMESPACE, + new Document("a", join("", nCopies(maxBsonObjectSize - 500, "b")))); + int numModels = maxMessageSizeBytes / maxBsonObjectSize + 1; + ClientBulkWriteResult result = client.bulkWrite(nCopies(numModels, model)); + assertEquals(numModels, result.getInsertedCount()); + List startedBulkWriteCommandEvents = commandListener.getCommandStartedEvents("bulkWrite"); + assertEquals(2, startedBulkWriteCommandEvents.size()); + CommandStartedEvent firstEvent = startedBulkWriteCommandEvents.get(0); + CommandStartedEvent secondEvent = startedBulkWriteCommandEvents.get(1); + assertEquals(numModels - 1, firstEvent.getCommand().getArray("ops").size()); + assertEquals(1, secondEvent.getCommand().getArray("ops").size()); + assertEquals(firstEvent.getOperationId(), secondEvent.getOperationId()); + } + } + + @DisplayName("5. MongoClient.bulkWrite collects WriteConcernErrors across batches") + @Test + @SuppressWarnings("try") + protected void testBulkWriteCollectsWriteConcernErrorsAcrossBatches() throws InterruptedException { + assumeTrue(serverVersionAtLeast(8, 0)); + TestCommandListener commandListener = new TestCommandListener(); + BsonDocument failPointDocument = new BsonDocument("configureFailPoint", new BsonString("failCommand")) + .append("mode", new BsonDocument("times", new BsonInt32(2))) + .append("data", new BsonDocument() + .append("failCommands", new BsonArray(singletonList(new BsonString("bulkWrite")))) + .append("writeConcernError", new BsonDocument("code", new BsonInt32(91)) + .append("errmsg", new BsonString("Replication is being shut down")))); + try (MongoClient client = createMongoClient(getMongoClientSettingsBuilder() + .retryWrites(false) + .addCommandListener(commandListener)); + FailPoint ignored = FailPoint.enable(failPointDocument, getPrimary())) { + int maxWriteBatchSize = droppedDatabase(client).runCommand(new Document("hello", 1)).getInteger("maxWriteBatchSize"); + ClientNamespacedWriteModel model = insertOne(NAMESPACE, new Document("a", "b")); + int numModels = maxWriteBatchSize + 1; + ClientBulkWriteException error = assertThrows(ClientBulkWriteException.class, () -> + client.bulkWrite(nCopies(numModels, model))); + assertEquals(2, error.getWriteConcernErrors().size()); + ClientBulkWriteResult partialResult = error.getPartialResult() + .orElseThrow(org.junit.jupiter.api.Assertions::fail); + assertEquals(numModels, partialResult.getInsertedCount()); + assertEquals(2, commandListener.getCommandStartedEvents("bulkWrite").size()); + } + } + + @DisplayName("6. MongoClient.bulkWrite handles individual WriteErrors across batches") + @ParameterizedTest(name = "6. MongoClient.bulkWrite handles individual WriteErrors across batches--ordered:{0}") + @ValueSource(booleans = {false, true}) + protected void testBulkWriteHandlesWriteErrorsAcrossBatches(final boolean ordered) { + assumeTrue(serverVersionAtLeast(8, 0)); + TestCommandListener commandListener = new TestCommandListener(); + try (MongoClient client = createMongoClient(getMongoClientSettingsBuilder() + .retryWrites(false) + .addCommandListener(commandListener))) { + int maxWriteBatchSize = droppedDatabase(client).runCommand(new Document("hello", 1)).getInteger("maxWriteBatchSize"); + Document document = new Document("_id", 1); + MongoCollection collection = droppedCollection(client, Document.class); + collection.insertOne(document); + ClientNamespacedWriteModel model = insertOne(collection.getNamespace(), document); + int numModels = maxWriteBatchSize + 1; + ClientBulkWriteException error = assertThrows(ClientBulkWriteException.class, () -> + client.bulkWrite(nCopies(numModels, model), clientBulkWriteOptions().ordered(ordered))); + int expectedWriteErrorCount = ordered ? 1 : numModels; + int expectedCommandStartedEventCount = ordered ? 1 : 2; + assertEquals(expectedWriteErrorCount, error.getWriteErrors().size()); + assertEquals(expectedCommandStartedEventCount, commandListener.getCommandStartedEvents("bulkWrite").size()); + } + } + + @DisplayName("7. MongoClient.bulkWrite handles a cursor requiring a getMore") + @Test + void testBulkWriteHandlesCursorRequiringGetMore() { + assumeTrue(serverVersionAtLeast(8, 0)); + assertBulkWriteHandlesCursorRequiringGetMore(false); + } + + @DisplayName("8. MongoClient.bulkWrite handles a cursor requiring getMore within a transaction") + @Test + protected void testBulkWriteHandlesCursorRequiringGetMoreWithinTransaction() { + assumeTrue(serverVersionAtLeast(8, 0)); + assumeFalse(isStandalone()); + assertBulkWriteHandlesCursorRequiringGetMore(true); + } + + private void assertBulkWriteHandlesCursorRequiringGetMore(final boolean transaction) { + TestCommandListener commandListener = new TestCommandListener(); + try (MongoClient client = createMongoClient(getMongoClientSettingsBuilder() + .retryWrites(false) + .addCommandListener(commandListener))) { + int maxBsonObjectSize = droppedDatabase(client).runCommand(new Document("hello", 1)).getInteger("maxBsonObjectSize"); + try (ClientSession session = transaction ? client.startSession() : null) { + BiFunction, ClientBulkWriteOptions, ClientBulkWriteResult> bulkWrite = + (models, options) -> session == null + ? client.bulkWrite(models, options) + : client.bulkWrite(session, models, options); + Supplier action = () -> bulkWrite.apply(asList( + ClientNamespacedWriteModel.updateOne( + NAMESPACE, + Filters.eq(join("", nCopies(maxBsonObjectSize / 2, "a"))), + Updates.set("x", 1), + clientUpdateOneOptions().upsert(true)), + ClientNamespacedWriteModel.updateOne( + NAMESPACE, + Filters.eq(join("", nCopies(maxBsonObjectSize / 2, "b"))), + Updates.set("x", 1), + clientUpdateOneOptions().upsert(true))), + clientBulkWriteOptions().verboseResults(true) + ); + + ClientBulkWriteResult result = transaction ? runInTransaction(session, action) : action.get(); + assertEquals(2, result.getUpsertedCount()); + assertEquals(2, result.getVerboseResults().orElseThrow(Assertions::fail).getUpdateResults().size()); + assertEquals(1, commandListener.getCommandStartedEvents("bulkWrite").size()); + } + } + } + + @DisplayName("11. MongoClient.bulkWrite batch splits when the addition of a new namespace exceeds the maximum message size") + @Test + protected void testBulkWriteSplitsWhenExceedingMaxMessageSizeBytesDueToNsInfo() { + assumeTrue(serverVersionAtLeast(8, 0)); + assertAll( + () -> { + // Case 1: No batch-splitting required + testBulkWriteSplitsWhenExceedingMaxMessageSizeBytesDueToNsInfo((client, models, commandListener) -> { + models.add(insertOne(NAMESPACE, new Document("a", "b"))); + ClientBulkWriteResult result = client.bulkWrite(models); + assertEquals(models.size(), result.getInsertedCount()); + List startedBulkWriteCommandEvents = commandListener.getCommandStartedEvents("bulkWrite"); + assertEquals(1, startedBulkWriteCommandEvents.size()); + CommandStartedEvent event = startedBulkWriteCommandEvents.get(0); + BsonDocument command = event.getCommand(); + assertEquals(models.size(), command.getArray("ops").asArray().size()); + BsonArray nsInfo = command.getArray("nsInfo").asArray(); + assertEquals(1, nsInfo.size()); + assertEquals(NAMESPACE.getFullName(), nsInfo.get(0).asDocument().getString("ns").getValue()); + }); + }, + () -> { + // Case 2: Batch-splitting required + testBulkWriteSplitsWhenExceedingMaxMessageSizeBytesDueToNsInfo((client, models, commandListener) -> { + MongoNamespace namespace = new MongoNamespace(NAMESPACE.getDatabaseName(), join("", nCopies(200, "c"))); + models.add(insertOne(namespace, new Document("a", "b"))); + ClientBulkWriteResult result = client.bulkWrite(models); + assertEquals(models.size(), result.getInsertedCount()); + List startedBulkWriteCommandEvents = commandListener.getCommandStartedEvents("bulkWrite"); + assertEquals(2, startedBulkWriteCommandEvents.size()); + BsonDocument firstEventCommand = startedBulkWriteCommandEvents.get(0).getCommand(); + assertEquals(models.size() - 1, firstEventCommand.getArray("ops").asArray().size()); + BsonArray firstNsInfo = firstEventCommand.getArray("nsInfo").asArray(); + assertEquals(1, firstNsInfo.size()); + assertEquals(NAMESPACE.getFullName(), firstNsInfo.get(0).asDocument().getString("ns").getValue()); + BsonDocument secondEventCommand = startedBulkWriteCommandEvents.get(1).getCommand(); + assertEquals(1, secondEventCommand.getArray("ops").asArray().size()); + BsonArray secondNsInfo = secondEventCommand.getArray("nsInfo").asArray(); + assertEquals(1, secondNsInfo.size()); + assertEquals(namespace.getFullName(), secondNsInfo.get(0).asDocument().getString("ns").getValue()); + }); + } + ); + } + + private void testBulkWriteSplitsWhenExceedingMaxMessageSizeBytesDueToNsInfo( + final TriConsumer, TestCommandListener> test) { + TestCommandListener commandListener = new TestCommandListener(); + try (MongoClient client = createMongoClient(getMongoClientSettingsBuilder().addCommandListener(commandListener))) { + Document helloResponse = droppedDatabase(client).runCommand(new Document("hello", 1)); + int maxBsonObjectSize = helloResponse.getInteger("maxBsonObjectSize"); + int maxMessageSizeBytes = helloResponse.getInteger("maxMessageSizeBytes"); + // By the spec test, we have to subtract only 1122, however, we have different collection name. + int opsBytes = maxMessageSizeBytes - 1118 - NAMESPACE.getCollectionName().length(); + int numModels = opsBytes / maxBsonObjectSize; + int remainderBytes = opsBytes % maxBsonObjectSize; + List models = new ArrayList<>(nCopies( + numModels, + insertOne( + NAMESPACE, + new Document("a", join("", nCopies(maxBsonObjectSize - 57, "b")))))); + if (remainderBytes >= 217) { + models.add(insertOne( + NAMESPACE, + new Document("a", join("", nCopies(remainderBytes - 57, "b"))))); + } + test.accept(client, models, commandListener); + } + } + + @DisplayName("12. MongoClient.bulkWrite returns an error if no operations can be added to ops") + @ParameterizedTest(name = "12. MongoClient.bulkWrite returns an error if no operations can be added to ops--tooLarge:{0}") + @ValueSource(strings = {"document", "namespace"}) + protected void testBulkWriteSplitsErrorsForTooLargeOpsOrNsInfo(final String tooLarge) { + assumeTrue(serverVersionAtLeast(8, 0)); + try (MongoClient client = createMongoClient(getMongoClientSettingsBuilder())) { + int maxMessageSizeBytes = droppedDatabase(client).runCommand(new Document("hello", 1)).getInteger("maxMessageSizeBytes"); + ClientNamespacedWriteModel model; + switch (tooLarge) { + case "document": { + model = insertOne( + NAMESPACE, + new Document("a", join("", nCopies(maxMessageSizeBytes, "b")))); + break; + } + case "namespace": { + model = insertOne( + new MongoNamespace(NAMESPACE.getDatabaseName(), join("", nCopies(maxMessageSizeBytes, "b"))), + new Document("a", "b")); + break; + } + default: { + throw Assertions.fail(tooLarge); + } + } + assertThrows(BsonMaximumSizeExceededException.class, () -> client.bulkWrite(singletonList(model))); + } + } + + @DisplayName("13. MongoClient.bulkWrite returns an error if auto-encryption is configured") + @Test + protected void testBulkWriteErrorsForAutoEncryption() { + assumeTrue(serverVersionAtLeast(8, 0)); + HashMap awsKmsProviderProperties = new HashMap<>(); + awsKmsProviderProperties.put("accessKeyId", "foo"); + awsKmsProviderProperties.put("secretAccessKey", "bar"); + try (MongoClient client = createMongoClient(getMongoClientSettingsBuilder() + .autoEncryptionSettings(AutoEncryptionSettings.builder() + .keyVaultNamespace(NAMESPACE.getFullName()) + .kmsProviders(singletonMap("aws", awsKmsProviderProperties)) + .build()))) { + assertTrue( + assertThrows( + IllegalStateException.class, + () -> client.bulkWrite(singletonList(insertOne(NAMESPACE, new Document("a", "b")))) + ).getMessage().contains("bulkWrite does not currently support automatic encryption") + ); + } + } + + @DisplayName("15. MongoClient.bulkWrite with unacknowledged write concern uses w:0 for all batches") + @Test + protected void testWriteConcernOfAllBatchesWhenUnacknowledgedRequested() { + assumeTrue(serverVersionAtLeast(8, 0)); + TestCommandListener commandListener = new TestCommandListener(); + try (MongoClient client = createMongoClient(getMongoClientSettingsBuilder().addCommandListener(commandListener) + .writeConcern(WriteConcern.UNACKNOWLEDGED))) { + MongoDatabase database = droppedDatabase(client); + database.createCollection(NAMESPACE.getCollectionName()); + Document helloResponse = database.runCommand(new Document("hello", 1)); + int maxBsonObjectSize = helloResponse.getInteger("maxBsonObjectSize"); + int maxMessageSizeBytes = helloResponse.getInteger("maxMessageSizeBytes"); + ClientNamespacedWriteModel model = insertOne( + NAMESPACE, + new Document("a", join("", nCopies(maxBsonObjectSize - 500, "b")))); + int numModels = maxMessageSizeBytes / maxBsonObjectSize + 1; + ClientBulkWriteResult result = client.bulkWrite(nCopies(numModels, model), clientBulkWriteOptions().ordered(false)); + assertFalse(result.isAcknowledged()); + List startedBulkWriteCommandEvents = commandListener.getCommandStartedEvents("bulkWrite"); + assertEquals(2, startedBulkWriteCommandEvents.size()); + CommandStartedEvent firstEvent = startedBulkWriteCommandEvents.get(0); + BsonDocument firstCommand = firstEvent.getCommand(); + CommandStartedEvent secondEvent = startedBulkWriteCommandEvents.get(1); + BsonDocument secondCommand = secondEvent.getCommand(); + assertEquals(numModels - 1, firstCommand.getArray("ops").size()); + assertEquals(1, secondCommand.getArray("ops").size()); + assertEquals(firstEvent.getOperationId(), secondEvent.getOperationId()); + assertEquals(0, firstCommand.getDocument("writeConcern").getInt32("w").intValue()); + assertEquals(0, secondCommand.getDocument("writeConcern").getInt32("w").intValue()); + assertEquals(numModels, database.getCollection(NAMESPACE.getCollectionName()).countDocuments()); + } + } + + /** + * This test is not from the specification. + */ + @DisplayName("insertMustGenerateIdAtMostOnce") + @ParameterizedTest(name = "insertMustGenerateIdAtMostOnce--documentClass:{0}, expectIdGenerated:{1}") + @MethodSource("insertMustGenerateIdAtMostOnceArgs") + protected void insertMustGenerateIdAtMostOnce( + final Class documentClass, + final boolean expectIdGenerated, + final Supplier documentSupplier) { + assumeTrue(serverVersionAtLeast(8, 0)); + assumeTrue(isDiscoverableReplicaSet()); + assertAll( + () -> assertInsertMustGenerateIdAtMostOnce("insert", documentClass, expectIdGenerated, + (client, collection) -> collection.insertOne(documentSupplier.get()).getInsertedId()), + () -> assertInsertMustGenerateIdAtMostOnce("insert", documentClass, expectIdGenerated, + (client, collection) -> collection.bulkWrite( + singletonList(new InsertOneModel<>(documentSupplier.get()))) + .getInserts().get(0).getId()), + () -> assertInsertMustGenerateIdAtMostOnce("bulkWrite", documentClass, expectIdGenerated, + (client, collection) -> client.bulkWrite( + singletonList(insertOne(collection.getNamespace(), documentSupplier.get())), + clientBulkWriteOptions().verboseResults(true)) + .getVerboseResults().orElseThrow(Assertions::fail).getInsertResults().get(0).getInsertedId().orElse(null)) + ); + } + + private static Stream insertMustGenerateIdAtMostOnceArgs() { + CodecRegistry codecRegistry = fromRegistries( + getDefaultCodecRegistry(), + fromProviders(PojoCodecProvider.builder().automatic(true).build())); + return Stream.of( + arguments(MyDocument.class, true, (Supplier) MyDocument::new), + arguments(Document.class, true, (Supplier) Document::new), + arguments(BsonDocument.class, true, (Supplier) BsonDocument::new), + arguments( + BsonDocumentWrapper.class, true, + (Supplier>) () -> + new BsonDocumentWrapper<>(new MyDocument(), codecRegistry.get(MyDocument.class))), + arguments( + RawBsonDocument.class, false, + (Supplier) () -> + new RawBsonDocument(new MyDocument(), codecRegistry.get(MyDocument.class))) + ); + } + + @SuppressWarnings("try") + private void assertInsertMustGenerateIdAtMostOnce( + final String commandName, + final Class documentClass, + final boolean expectIdGenerated, + final BiFunction, BsonValue> insertOperation) throws InterruptedException { + TestCommandListener commandListener = new TestCommandListener(); + BsonDocument failPointDocument = new BsonDocument("configureFailPoint", new BsonString("failCommand")) + .append("mode", new BsonDocument("times", new BsonInt32(1))) + .append("data", new BsonDocument() + .append("failCommands", new BsonArray(singletonList(new BsonString(commandName)))) + .append("errorLabels", new BsonArray(singletonList(new BsonString("RetryableWriteError")))) + .append("writeConcernError", new BsonDocument("code", new BsonInt32(91)) + .append("errmsg", new BsonString("Replication is being shut down")))); + try (MongoClient client = createMongoClient(getMongoClientSettingsBuilder() + .retryWrites(true) + .addCommandListener(commandListener) + .applyToServerSettings(builder -> builder.heartbeatFrequency(50, TimeUnit.MILLISECONDS)) + .codecRegistry(fromRegistries( + getDefaultCodecRegistry(), + fromProviders(PojoCodecProvider.builder().automatic(true).build())))); + FailPoint ignored = FailPoint.enable(failPointDocument, getPrimary())) { + MongoCollection collection = droppedCollection(client, documentClass); + BsonValue insertedId = insertOperation.apply(client, collection); + if (expectIdGenerated) { + assertNotNull(insertedId); + } else { + assertNull(insertedId); + } + List startedCommandEvents = commandListener.getCommandStartedEvents(commandName); + assertEquals(2, startedCommandEvents.size()); + Function idFromCommand; + switch (commandName) { + case "insert": { + idFromCommand = command -> command.getArray("documents").get(0).asDocument().get("_id"); + break; + } + case "bulkWrite": { + idFromCommand = command -> command.getArray("ops").get(0).asDocument().getDocument("document").get("_id"); + break; + } + default: { + throw Assertions.fail(commandName); + } + } + CommandStartedEvent firstEvent = startedCommandEvents.get(0); + CommandStartedEvent secondEvent = startedCommandEvents.get(1); + assertEquals(insertedId, idFromCommand.apply(firstEvent.getCommand())); + assertEquals(insertedId, idFromCommand.apply(secondEvent.getCommand())); + } + } + + protected MongoClient createMongoClient(final MongoClientSettings.Builder mongoClientSettingsBuilder) { + return MongoClients.create(mongoClientSettingsBuilder.build()); + } + + private MongoCollection droppedCollection(final MongoClient client, final Class documentClass) { + return droppedDatabase(client).getCollection(NAMESPACE.getCollectionName(), documentClass); + } + + private MongoDatabase droppedDatabase(final MongoClient client) { + MongoDatabase database = client.getDatabase(NAMESPACE.getDatabaseName()); + database.drop(); + return database; + } + + public static final class MyDocument { + private int v; + + public MyDocument() { + } + + public int getV() { + return v; + } + } + + @FunctionalInterface + private interface TriConsumer { + void accept(A1 a1, A2 a2, A3 a3); + } + + /** + * This method is used instead of {@link ClientSession#withTransaction(TransactionBody)} + * because reactive {@code com.mongodb.reactivestreams.client.ClientSession} do not support it. + */ + private static ClientBulkWriteResult runInTransaction(final ClientSession session, + final Supplier action) { + session.startTransaction(); + try { + ClientBulkWriteResult result = action.get(); + session.commitTransaction(); + return result; + } catch (Throwable throwable) { + session.abortTransaction(); + throw throwable; + } + } + + @AfterAll + public static void cleanUp() { + CollectionHelper.drop(NAMESPACE); + } +} diff --git a/driver-sync/src/test/functional/com/mongodb/client/DatabaseTestCase.java b/driver-sync/src/test/functional/com/mongodb/client/DatabaseTestCase.java new file mode 100644 index 00000000000..70479c4670b --- /dev/null +++ b/driver-sync/src/test/functional/com/mongodb/client/DatabaseTestCase.java @@ -0,0 +1,61 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client; + +import com.mongodb.client.test.CollectionHelper; +import com.mongodb.internal.connection.ServerHelper; +import org.bson.Document; +import org.bson.codecs.DocumentCodec; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; + +import static com.mongodb.client.Fixture.getDefaultDatabaseName; +import static com.mongodb.client.Fixture.getMongoClient; +import static com.mongodb.client.Fixture.getPrimary; + +public class DatabaseTestCase { + //For ease of use and readability, in this specific case we'll allow protected variables + //CHECKSTYLE:OFF + protected MongoClient client; + protected MongoDatabase database; + protected MongoCollection collection; + //CHECKSTYLE:ON + + @BeforeEach + public void setUp() { + client = getMongoClient(); + database = client.getDatabase(getDefaultDatabaseName()); + collection = database.getCollection(getClass().getName()); + collection.drop(); + } + + @AfterEach + public void tearDown() { + if (collection != null) { + collection.drop(); + } + try { + ServerHelper.checkPool(getPrimary()); + } catch (InterruptedException e) { + // ignore + } + } + + protected CollectionHelper getCollectionHelper() { + return new CollectionHelper<>(new DocumentCodec(), collection.getNamespace()); + } +} diff --git a/driver-sync/src/test/functional/com/mongodb/client/DnsConfigurationTest.java b/driver-sync/src/test/functional/com/mongodb/client/DnsConfigurationTest.java new file mode 100644 index 00000000000..36593ba0239 --- /dev/null +++ b/driver-sync/src/test/functional/com/mongodb/client/DnsConfigurationTest.java @@ -0,0 +1,26 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client; + +import com.mongodb.MongoClientSettings; + +public class DnsConfigurationTest extends AbstractDnsConfigurationTest { + @Override + protected MongoClient createMongoClient(final MongoClientSettings settings) { + return MongoClients.create(settings); + } +} diff --git a/driver-sync/src/test/functional/com/mongodb/client/ExplainTest.java b/driver-sync/src/test/functional/com/mongodb/client/ExplainTest.java new file mode 100644 index 00000000000..ef278bff6a3 --- /dev/null +++ b/driver-sync/src/test/functional/com/mongodb/client/ExplainTest.java @@ -0,0 +1,26 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client; + +import com.mongodb.MongoClientSettings; + +public class ExplainTest extends AbstractExplainTest { + @Override + protected MongoClient createMongoClient(final MongoClientSettings settings) { + return MongoClients.create(settings); + } +} diff --git a/driver-sync/src/test/functional/com/mongodb/client/ExplicitUuidCodecUuidRepresentationTest.java b/driver-sync/src/test/functional/com/mongodb/client/ExplicitUuidCodecUuidRepresentationTest.java new file mode 100644 index 00000000000..c193469fd6a --- /dev/null +++ b/driver-sync/src/test/functional/com/mongodb/client/ExplicitUuidCodecUuidRepresentationTest.java @@ -0,0 +1,58 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client; + +import org.bson.BsonBinarySubType; +import org.bson.UuidRepresentation; +import org.bson.codecs.UuidCodec; +import org.bson.codecs.configuration.CodecRegistry; +import org.junit.After; + +import java.util.UUID; + +public class ExplicitUuidCodecUuidRepresentationTest extends AbstractExplicitUuidCodecUuidRepresentationTest { + private MongoClient mongoClient; + + public ExplicitUuidCodecUuidRepresentationTest(final UuidRepresentation uuidRepresentationForClient, + final UuidRepresentation uuidRepresentationForExplicitEncoding, + final BsonBinarySubType subType, + final UuidCodec uuidCodec, final UUID uuid, final byte[] encodedValue, + final byte[] standardEncodedValue) { + super(uuidRepresentationForClient, uuidRepresentationForExplicitEncoding, subType, uuidCodec, uuid, encodedValue, + standardEncodedValue); + } + + @Override + protected void createMongoClient(final UuidRepresentation uuidRepresentation, final CodecRegistry codecRegistry) { + mongoClient = MongoClients.create(Fixture.getMongoClientSettingsBuilder() + .uuidRepresentation(uuidRepresentation) + .codecRegistry(codecRegistry) + .build()); + } + + @Override + protected MongoDatabase getDatabase(final String databaseName) { + return mongoClient.getDatabase(databaseName); + } + + @After + public void cleanUp() { + if (mongoClient != null) { + mongoClient.close(); + } + } +} diff --git a/driver-sync/src/test/functional/com/mongodb/client/FailPoint.java b/driver-sync/src/test/functional/com/mongodb/client/FailPoint.java new file mode 100644 index 00000000000..52e8fe9ff58 --- /dev/null +++ b/driver-sync/src/test/functional/com/mongodb/client/FailPoint.java @@ -0,0 +1,70 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.client; + +import com.mongodb.MongoClientSettings; +import com.mongodb.ServerAddress; +import com.mongodb.connection.ClusterConnectionMode; +import org.bson.BsonDocument; +import org.bson.BsonString; +import org.bson.conversions.Bson; + +import java.util.Collections; + +import static com.mongodb.client.Fixture.getMongoClientSettingsBuilder; + +public final class FailPoint implements AutoCloseable { + private final BsonDocument failPointDocument; + private final MongoClient client; + + private FailPoint(final BsonDocument failPointDocument, final MongoClient client) { + this.failPointDocument = failPointDocument.toBsonDocument(); + this.client = client; + } + + /** + * @param configureFailPointDoc A document representing {@code configureFailPoint} command to be issued as is via + * {@link com.mongodb.client.MongoDatabase#runCommand(Bson)}. + * @param serverAddress One may use {@link Fixture#getPrimary()} to get the address of a primary server + * if that is what is needed. + */ + public static FailPoint enable(final BsonDocument configureFailPointDoc, final ServerAddress serverAddress) { + MongoClientSettings clientSettings = getMongoClientSettingsBuilder() + .applyToClusterSettings(builder -> builder + .mode(ClusterConnectionMode.SINGLE) + .hosts(Collections.singletonList(serverAddress))) + .build(); + MongoClient client = MongoClients.create(clientSettings); + return enable(configureFailPointDoc, client); + } + + private static FailPoint enable(final BsonDocument configureFailPointDoc, final MongoClient client) { + FailPoint result = new FailPoint(configureFailPointDoc, client); + client.getDatabase("admin").runCommand(configureFailPointDoc); + return result; + } + + @Override + public void close() { + try { + client.getDatabase("admin").runCommand(new BsonDocument() + .append("configureFailPoint", failPointDocument.getString("configureFailPoint")) + .append("mode", new BsonString("off"))); + } finally { + client.close(); + } + } +} diff --git a/driver-sync/src/test/functional/com/mongodb/client/Fixture.java b/driver-sync/src/test/functional/com/mongodb/client/Fixture.java new file mode 100644 index 00000000000..8114d62e41a --- /dev/null +++ b/driver-sync/src/test/functional/com/mongodb/client/Fixture.java @@ -0,0 +1,117 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client; + +import com.mongodb.ClusterFixture; +import com.mongodb.ConnectionString; +import com.mongodb.MongoClientSettings; +import com.mongodb.ServerAddress; +import com.mongodb.connection.ServerDescription; + +import java.util.List; +import java.util.concurrent.TimeUnit; + +import static com.mongodb.ClusterFixture.getMultiMongosConnectionString; +import static com.mongodb.ClusterFixture.getServerApi; +import static com.mongodb.internal.connection.ClusterDescriptionHelper.getPrimaries; +import static java.util.Objects.requireNonNull; + +/** + * Helper class for the acceptance tests. + */ +public final class Fixture { + private static final long MIN_HEARTBEAT_FREQUENCY_MS = 50L; + + private static MongoClient mongoClient; + private static MongoDatabase defaultDatabase; + + private Fixture() { + } + + public static synchronized MongoClient getMongoClient() { + if (mongoClient != null) { + return mongoClient; + } + MongoClientSettings mongoClientSettings = getMongoClientSettings(); + mongoClient = MongoClients.create(mongoClientSettings); + Runtime.getRuntime().addShutdownHook(new Thread(() -> { + synchronized (Fixture.class) { + if (mongoClient == null) { + return; + } + if (defaultDatabase != null) { + defaultDatabase.drop(); + } + mongoClient.close(); + mongoClient = null; + } + })); + return mongoClient; + } + + public static synchronized MongoDatabase getDefaultDatabase() { + if (defaultDatabase == null) { + defaultDatabase = getMongoClient().getDatabase(getDefaultDatabaseName()); + } + return defaultDatabase; + } + + public static String getDefaultDatabaseName() { + return ClusterFixture.getDefaultDatabaseName(); + } + + public static MongoClientSettings getMongoClientSettings() { + return getMongoClientSettingsBuilder().build(); + } + + public static MongoClientSettings.Builder getMongoClientSettingsBuilder() { + return getMongoClientSettings(ClusterFixture.getConnectionString()); + } + + public static MongoClientSettings.Builder getMultiMongosMongoClientSettingsBuilder() { + return getMongoClientSettings(requireNonNull(getMultiMongosConnectionString())); + } + + public static MongoClientSettings.Builder getMongoClientSettings(final ConnectionString connectionString) { + MongoClientSettings.Builder builder = MongoClientSettings.builder() + .applyConnectionString(connectionString) + .applyToSocketSettings(socketSettingsBuilder -> { + socketSettingsBuilder.readTimeout(5, TimeUnit.MINUTES); + }) + .applyToServerSettings(serverSettingsBuilder -> { + serverSettingsBuilder.minHeartbeatFrequency(MIN_HEARTBEAT_FREQUENCY_MS, TimeUnit.MILLISECONDS); + }); + if (getServerApi() != null) { + builder.serverApi(getServerApi()); + } + return builder; + } + + /** + * Beware of a potential race condition hiding here: the primary you discover may differ from the one used by the {@code client} + * when performing some operations, as the primary may change. + */ + public static ServerAddress getPrimary() throws InterruptedException { + MongoClient client = getMongoClient(); + List serverDescriptions = getPrimaries(client.getClusterDescription()); + while (serverDescriptions.isEmpty()) { + Thread.sleep(100); + serverDescriptions = getPrimaries(client.getClusterDescription()); + } + return serverDescriptions.get(0).getAddress(); + } +} diff --git a/driver-sync/src/test/functional/com/mongodb/client/FunctionalSpecification.groovy b/driver-sync/src/test/functional/com/mongodb/client/FunctionalSpecification.groovy new file mode 100644 index 00000000000..82608420ada --- /dev/null +++ b/driver-sync/src/test/functional/com/mongodb/client/FunctionalSpecification.groovy @@ -0,0 +1,45 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client + +import org.bson.Document +import spock.lang.Specification + +class FunctionalSpecification extends Specification { + protected MongoDatabase database + protected MongoCollection collection + + def setup() { + database = Fixture.getMongoClient().getDatabase(Fixture.getDefaultDatabaseName()) + collection = database.getCollection(getClass().getName()) + collection.drop() + } + + def cleanup() { + if (collection != null) { + collection.drop() + } + } + + String getDatabaseName() { + Fixture.getDefaultDatabaseName() + } + + String getCollectionName() { + collection.namespace.collectionName + } +} diff --git a/driver-sync/src/test/functional/com/mongodb/client/InitialDnsSeedlistDiscoveryTest.java b/driver-sync/src/test/functional/com/mongodb/client/InitialDnsSeedlistDiscoveryTest.java new file mode 100644 index 00000000000..3b50cdc6b20 --- /dev/null +++ b/driver-sync/src/test/functional/com/mongodb/client/InitialDnsSeedlistDiscoveryTest.java @@ -0,0 +1,346 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client; + +import com.mongodb.Block; +import com.mongodb.ConnectionString; +import com.mongodb.MongoClientException; +import com.mongodb.MongoClientSettings; +import com.mongodb.MongoException; +import com.mongodb.ServerAddress; +import com.mongodb.connection.ClusterConnectionMode; +import com.mongodb.connection.ClusterSettings; +import com.mongodb.connection.ServerDescription; +import com.mongodb.connection.SslSettings; +import com.mongodb.event.ClusterDescriptionChangedEvent; +import com.mongodb.event.ClusterListener; +import com.mongodb.lang.Nullable; +import org.bson.BsonArray; +import org.bson.BsonBoolean; +import org.bson.BsonDocument; +import org.bson.BsonNumber; +import org.bson.BsonValue; +import org.bson.Document; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import util.JsonPoweredTestHelper; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; +import java.util.Map; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicReference; +import java.util.stream.Collectors; + +import static com.mongodb.ClusterFixture.getSslSettings; +import static com.mongodb.ClusterFixture.isDiscoverableReplicaSet; +import static com.mongodb.ClusterFixture.isLoadBalanced; +import static com.mongodb.ClusterFixture.isSharded; +import static java.util.Objects.requireNonNull; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; +import static org.junit.Assume.assumeFalse; +import static org.junit.Assume.assumeTrue; + +// See https://github.com/mongodb/specifications/tree/master/source/initial-dns-seedlist-discovery/tests +@RunWith(Parameterized.class) +public abstract class InitialDnsSeedlistDiscoveryTest { + private final String filename; + private final String parentDirectory; + private final String uri; + @Nullable + private final List seeds; + @Nullable + private final Integer numSeeds; + @Nullable + private final List hosts; + @Nullable + private final Integer numHosts; + private final BsonDocument options; + private final BsonDocument parsedOptions; + private final boolean isError; + private final boolean executePingCommand; + + public InitialDnsSeedlistDiscoveryTest(final String filename, final String parentDirectory, final String uri, + @Nullable final List seeds, @Nullable final Integer numSeeds, + @Nullable final List hosts, @Nullable final Integer numHosts, + final BsonDocument options, final BsonDocument parsedOptions, + final boolean isError, final boolean executePingCommand) { + this.filename = filename; + this.parentDirectory = parentDirectory; + this.uri = uri; + this.seeds = seeds; + this.numSeeds = numSeeds; + this.hosts = hosts; + this.numHosts = numHosts; + this.parsedOptions = parsedOptions; + this.isError = isError; + this.options = options; + this.executePingCommand = executePingCommand; + } + + public abstract MongoClient createMongoClient(MongoClientSettings settings); + + @Before + public void setUp() { + assumeFalse("https://jira.mongodb.org/browse/JAVA-5064", filename.equals("uri-with-uppercase-hostname.json")); + + if (parentDirectory.endsWith("replica-set")) { + assumeTrue(isDiscoverableReplicaSet()); + } else if (parentDirectory.endsWith("load-balanced")) { + assumeTrue(isLoadBalanced()); + } else if (parentDirectory.endsWith("sharded")) { + assumeTrue(isSharded()); + } else { + fail("Unexpected parent directory: " + parentDirectory); + } + } + + @Test + public void shouldPassAllOutcomes() throws InterruptedException { + if (isError) { + assertErrorCondition(); + } else { + assertNonErrorCondition(); + } + } + + public void assertErrorCondition() throws InterruptedException { + AtomicReference exceptionReference = new AtomicReference<>(); + CountDownLatch latch = new CountDownLatch(1); + + ConnectionString connectionString; + MongoClientSettings settings; + try { + connectionString = new ConnectionString(uri); + SslSettings sslSettings = getSslSettings(connectionString); + assumeTrue("SSL settings don't match", getSslSettings().isEnabled() == sslSettings.isEnabled()); + settings = MongoClientSettings.builder().applyConnectionString(connectionString) + .applyToSslSettings(builder -> { + builder.applySettings(sslSettings); + builder.invalidHostNameAllowed(true); + }) + .applyToClusterSettings(builder -> { + builder.serverSelectionTimeout(5, TimeUnit.SECONDS); + builder.addClusterListener(new ClusterListener() { + @Override + public void clusterDescriptionChanged(final ClusterDescriptionChangedEvent event) { + if (event.getNewDescription().getSrvResolutionException() != null) { + exceptionReference.set(event.getNewDescription().getSrvResolutionException()); + latch.countDown(); + } + } + }); + }) + .build(); + } catch (MongoClientException | IllegalArgumentException e) { + // all good + return; + } + try (MongoClient client = createMongoClient(settings)) { + // Load balancing mode has special rules regarding cluster event publishing, so we can't rely on those here. + // Instead, we just try to execute an operation and assert that it throws + if (settings.getClusterSettings().getMode() == ClusterConnectionMode.LOAD_BALANCED) { + try { + client.getDatabase("admin").runCommand(new Document("ping", 1)); + } catch (MongoClientException e) { + // all good + } + } else { + if (!latch.await(5, TimeUnit.SECONDS)) { + fail("Failed to capture SRV resolution exception"); + } + try { + throw exceptionReference.get(); + } catch (MongoClientException e) { + // all good + } + } + } + } + + private void assertNonErrorCondition() throws InterruptedException { + CountDownLatch seedsLatch = new CountDownLatch(1); + CountDownLatch hostsLatch = new CountDownLatch(1); + ConnectionString connectionString = new ConnectionString(uri); + + assertOptions(connectionString); + assertParsedOptions(connectionString); + + SslSettings sslSettings = getSslSettings(connectionString); + + assumeTrue("SSL settings don't match", getSslSettings().isEnabled() == sslSettings.isEnabled()); + + MongoClientSettings settings = MongoClientSettings.builder() + .applyToClusterSettings(new Block() { + @Override + public void apply(final ClusterSettings.Builder builder) { + builder.applyConnectionString(connectionString) + .addClusterListener(new ClusterListener() { + @Override + public void clusterDescriptionChanged(final ClusterDescriptionChangedEvent event) { + List seedsList = event.getNewDescription().getServerDescriptions() + .stream() + .map(ServerDescription::getAddress) + .map(ServerAddress::toString) + .collect(Collectors.toList()); + List okHostsList = event.getNewDescription().getServerDescriptions() + .stream().filter(ServerDescription::isOk) + .map(ServerDescription::getAddress) + .map(ServerAddress::toString) + .collect(Collectors.toList()); + + hostsCheck(seedsList, seeds, numSeeds, seedsLatch); + hostsCheck(okHostsList, hosts, numHosts, hostsLatch); + } + }); + } + + private void hostsCheck(final List actual, @Nullable final List expected, + @Nullable final Integer expectedSize, final CountDownLatch latch) { + if (expected == null && expectedSize == null) { + latch.countDown(); + } else if (expected != null && actual.size() == expected.size() && actual.containsAll(expected)) { + latch.countDown(); + } else if (expectedSize != null && actual.size() == expectedSize) { + latch.countDown(); + } + } + }) + .applyToSslSettings(builder -> { + builder.applySettings(sslSettings); + builder.invalidHostNameAllowed(true); + }) + .build(); + + try (MongoClient client = createMongoClient(settings)) { + assertTrue(seedsLatch.await(10, TimeUnit.SECONDS)); + assertTrue(hostsLatch.await(10, TimeUnit.SECONDS)); + if (executePingCommand) { + assertTrue(client.getDatabase("admin").runCommand(new Document("ping", 1)).containsKey("ok")); + } + } + } + + private void assertOptions(final ConnectionString connectionString) { + for (Map.Entry entry : options.entrySet()) { + switch (entry.getKey()) { + case "replicaSet": + assertEquals(entry.getValue().asString().getValue(), connectionString.getRequiredReplicaSetName()); + break; + case "ssl": + assertEquals(entry.getValue().asBoolean().getValue(), connectionString.getSslEnabled()); + break; + case "authSource": + // ignoring authSource for now, because without at least a userName also in the connection string, + // the authSource is ignored. If the test gets this far, at least we know that a TXT record + // containing in authSource doesn't blow up. We just don't test that it's actually used. + assertTrue(true); + break; + case "directConnection": + assertEquals(entry.getValue().asBoolean().getValue(), connectionString.isDirectConnection()); + break; + case "loadBalanced": + assertEquals(entry.getValue().asBoolean().getValue(), connectionString.isLoadBalanced()); + break; + case "srvMaxHosts": + assertEquals(Integer.valueOf(entry.getValue().asInt32().getValue()), connectionString.getSrvMaxHosts()); + break; + case "srvServiceName": + assertEquals(entry.getValue().asString().getValue(), connectionString.getSrvServiceName()); + break; + default: + throw new UnsupportedOperationException("No support configured yet for " + entry.getKey()); + } + } + } + + private void assertParsedOptions(final ConnectionString connectionString) { + for (Map.Entry entry : parsedOptions.entrySet()) { + switch (entry.getKey()) { + case "db": + case "defaultDatabase": + assertEquals(entry.getValue().asString().getValue(), connectionString.getDatabase()); + break; + case "user": + String userName = requireNonNull(connectionString.getCredential()).getUserName(); + assertEquals(entry.getValue().asString().getValue(), userName); + break; + case "password": + String password = new String(requireNonNull(requireNonNull(connectionString.getCredential()).getPassword())); + assertEquals(entry.getValue().asString().getValue(), password); + break; + case "auth_database": + String source = connectionString.getCredential() == null + ? connectionString.getDatabase() + : connectionString.getCredential().getSource(); + assertEquals(entry.getValue().asString().getValue(), source); + break; + default: + throw new UnsupportedOperationException("No support configured yet for " + entry.getKey()); + } + } + } + + + @Parameterized.Parameters(name = "{0}") + public static Collection data() { + List data = new ArrayList<>(); + for (BsonDocument testDocument : JsonPoweredTestHelper.getSpecTestDocuments("initial-dns-seedlist-discovery")) { + String resourcePath = testDocument.getString("resourcePath").getValue(); + data.add(new Object[]{ + testDocument.getString("fileName").getValue(), + resourcePath.substring(0, resourcePath.lastIndexOf("/")), + testDocument.getString("uri").getValue(), + toStringList(testDocument.getArray("seeds", null)), + toInteger(testDocument.getNumber("numSeeds", null)), + toStringList(testDocument.getArray("hosts", null)), + toInteger(testDocument.getNumber("numHosts", null)), + testDocument.getDocument("options", new BsonDocument()), + testDocument.getDocument("parsed_options", new BsonDocument()), + testDocument.getBoolean("error", BsonBoolean.FALSE).getValue(), + testDocument.getBoolean("ping", BsonBoolean.TRUE).getValue() + }); + } + return data; + } + + @Nullable + private static Integer toInteger(@Nullable final BsonNumber bsonNumber) { + if (bsonNumber == null) { + return null; + } + return bsonNumber.intValue(); + } + + @Nullable + private static List toStringList(@Nullable final BsonArray bsonArray) { + if (bsonArray == null) { + return null; + } + List retVal = new ArrayList<>(bsonArray.size()); + for (BsonValue cur : bsonArray) { + retVal.add(cur.asString().getValue()); + } + return retVal; + } +} diff --git a/driver-sync/src/test/functional/com/mongodb/client/JsonPoweredCrudTestHelper.java b/driver-sync/src/test/functional/com/mongodb/client/JsonPoweredCrudTestHelper.java new file mode 100644 index 00000000000..7e0225e8c51 --- /dev/null +++ b/driver-sync/src/test/functional/com/mongodb/client/JsonPoweredCrudTestHelper.java @@ -0,0 +1,1372 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client; + +import com.mongodb.MongoBulkWriteException; +import com.mongodb.MongoException; +import com.mongodb.MongoGridFSException; +import com.mongodb.MongoNamespace; +import com.mongodb.ReadConcern; +import com.mongodb.ReadConcernLevel; +import com.mongodb.ReadPreference; +import com.mongodb.WriteConcern; +import com.mongodb.bulk.BulkWriteError; +import com.mongodb.bulk.BulkWriteInsert; +import com.mongodb.bulk.BulkWriteResult; +import com.mongodb.bulk.BulkWriteUpsert; +import com.mongodb.client.gridfs.GridFSBucket; +import com.mongodb.client.gridfs.model.GridFSDownloadOptions; +import com.mongodb.client.gridfs.model.GridFSUploadOptions; +import com.mongodb.client.model.BulkWriteOptions; +import com.mongodb.client.model.Collation; +import com.mongodb.client.model.CollationAlternate; +import com.mongodb.client.model.CollationCaseFirst; +import com.mongodb.client.model.CollationMaxVariable; +import com.mongodb.client.model.CollationStrength; +import com.mongodb.client.model.CountOptions; +import com.mongodb.client.model.CreateCollectionOptions; +import com.mongodb.client.model.DeleteManyModel; +import com.mongodb.client.model.DeleteOneModel; +import com.mongodb.client.model.DeleteOptions; +import com.mongodb.client.model.DropCollectionOptions; +import com.mongodb.client.model.FindOneAndDeleteOptions; +import com.mongodb.client.model.FindOneAndReplaceOptions; +import com.mongodb.client.model.FindOneAndUpdateOptions; +import com.mongodb.client.model.IndexOptions; +import com.mongodb.client.model.InsertManyOptions; +import com.mongodb.client.model.InsertOneModel; +import com.mongodb.client.model.InsertOneOptions; +import com.mongodb.client.model.ReplaceOneModel; +import com.mongodb.client.model.ReplaceOptions; +import com.mongodb.client.model.ReturnDocument; +import com.mongodb.client.model.UpdateManyModel; +import com.mongodb.client.model.UpdateOneModel; +import com.mongodb.client.model.UpdateOptions; +import com.mongodb.client.model.ValidationOptions; +import com.mongodb.client.model.WriteModel; +import com.mongodb.client.model.changestream.ChangeStreamDocument; +import com.mongodb.client.result.InsertManyResult; +import com.mongodb.client.result.InsertOneResult; +import com.mongodb.client.result.UpdateResult; +import com.mongodb.lang.NonNull; +import com.mongodb.lang.Nullable; +import org.bson.BsonArray; +import org.bson.BsonBinary; +import org.bson.BsonBoolean; +import org.bson.BsonDocument; +import org.bson.BsonInt32; +import org.bson.BsonNull; +import org.bson.BsonObjectId; +import org.bson.BsonString; +import org.bson.BsonValue; +import org.bson.Document; +import org.bson.types.ObjectId; +import util.Hex; + +import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.io.InputStream; +import java.lang.reflect.InvocationTargetException; +import java.lang.reflect.Method; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.concurrent.TimeUnit; + +import static com.mongodb.client.Fixture.getMongoClient; +import static java.lang.String.format; +import static org.junit.Assert.assertEquals; + +public class JsonPoweredCrudTestHelper { + private final String description; + private final MongoDatabase database; + private final MongoCollection baseCollection; + private final GridFSBucket gridFSBucket; + private final MongoClient mongoClient; + + public JsonPoweredCrudTestHelper(final String description, final MongoDatabase database, + final MongoCollection collection) { + this(description, database, collection, null, null); + } + + public JsonPoweredCrudTestHelper(final String description, final MongoDatabase database, + final MongoCollection collection, @Nullable final GridFSBucket gridFSBucket, + final MongoClient mongoClient) { + this.description = description; + this.database = database; + this.baseCollection = collection; + this.gridFSBucket = gridFSBucket; + this.mongoClient = mongoClient; + } + + public BsonDocument getOperationResults(final BsonDocument operation) { + return getOperationResults(operation, null); + } + + BsonDocument getOperationResults(final BsonDocument operation, @Nullable final ClientSession clientSession) { + BsonDocument collectionOptions = operation.getDocument("collectionOptions", new BsonDocument()); + BsonDocument arguments = operation.getDocument("arguments", new BsonDocument()); + + String methodName = createMethodName(operation.getString("name").getValue(), + operation.getString("object", new BsonString("")).getValue()); + + switch (methodName) { + case "assertCollectionExists": + assertCollectionExists(operation, true); + return new BsonDocument(); + case "assertCollectionNotExists": + assertCollectionExists(operation, false); + return new BsonDocument(); + case "assertIndexExists": + assertIndexExists(operation, true); + return new BsonDocument(); + case "assertIndexNotExists": + assertIndexExists(operation, false); + return new BsonDocument(); + case "wait": + return executeWait(operation); + default: + try { + Method method = getClass().getDeclaredMethod(methodName, BsonDocument.class, BsonDocument.class, ClientSession.class); + return (BsonDocument) method.invoke(this, collectionOptions, arguments, clientSession); + } catch (NoSuchMethodException e) { + throw new UnsupportedOperationException("No handler for operation " + methodName); + } catch (InvocationTargetException e) { + if (e.getTargetException() instanceof MongoException) { + throw (MongoException) e.getTargetException(); + } + throw (RuntimeException) e.getTargetException(); + } catch (IllegalAccessException e) { + throw new UnsupportedOperationException("Invalid handler access for operation " + methodName); + } + } + } + + private String createMethodName(final String name, final String object) { + if (object.equals("testRunner")) { + return name; + } + StringBuilder builder = new StringBuilder(); + builder.append("get"); + if (!object.isEmpty() && !object.equals("collection") && !object.equals("gridfsbucket")) { + appendInitCapToBuilder(builder, object); + } + if (name.indexOf('_') >= 0) { + String[] nameParts = name.split("_"); + for (String part : nameParts) { + appendInitCapToBuilder(builder, part); + } + } else { + appendInitCapToBuilder(builder, name); + } + builder.append("Result"); + return builder.toString(); + } + + private void appendInitCapToBuilder(final StringBuilder builder, final String object) { + builder.append(object.substring(0, 1).toUpperCase()); + builder.append(object.substring(1)); + } + + BsonDocument toResult(final int count) { + return toResult(new BsonInt32(count)); + } + + BsonDocument toResult(final MongoIterable results) { + return toResult(new BsonArray(results.into(new ArrayList<>()))); + } + + BsonDocument toResult(final String key, final BsonValue value) { + return toResult(new BsonDocument(key, value)); + } + + BsonDocument toResult(final UpdateResult updateResult) { + BsonDocument resultDoc = new BsonDocument("matchedCount", new BsonInt32((int) updateResult.getMatchedCount())); + resultDoc.append("modifiedCount", new BsonInt32((int) updateResult.getModifiedCount())); + if (updateResult.getUpsertedId() != null) { + resultDoc.append("upsertedId", updateResult.getUpsertedId()); + } + resultDoc.append("upsertedCount", updateResult.getUpsertedId() == null ? new BsonInt32(0) : new BsonInt32(1)); + + return toResult(resultDoc); + } + + BsonDocument toResult(final BulkWriteResult bulkWriteResult, final List> writeModels, + final List writeErrors) { + + BsonDocument resultDoc = new BsonDocument(); + if (bulkWriteResult.wasAcknowledged()) { + resultDoc.append("deletedCount", new BsonInt32(bulkWriteResult.getDeletedCount())); + + BsonDocument inserts = new BsonDocument(); + for (BulkWriteInsert bulkWriteInsert : bulkWriteResult.getInserts()) { + inserts.put(String.valueOf(bulkWriteInsert.getIndex()), bulkWriteInsert.getId()); + } + resultDoc.append("insertedIds", inserts); + resultDoc.append("insertedCount", bulkWriteResult.getInserts() == null + ? new BsonInt32(0) : new BsonInt32(bulkWriteResult.getInserts().size())); + + resultDoc.append("matchedCount", new BsonInt32(bulkWriteResult.getMatchedCount())); + resultDoc.append("modifiedCount", new BsonInt32(bulkWriteResult.getModifiedCount())); + resultDoc.append("upsertedCount", bulkWriteResult.getUpserts() == null + ? new BsonInt32(0) : new BsonInt32(bulkWriteResult.getUpserts().size())); + BsonDocument upserts = new BsonDocument(); + for (BulkWriteUpsert bulkWriteUpsert : bulkWriteResult.getUpserts()) { + upserts.put(String.valueOf(bulkWriteUpsert.getIndex()), bulkWriteUpsert.getId()); + } + resultDoc.append("upsertedIds", upserts); + } + return toResult(resultDoc); + } + + private boolean writeSuccessful(final int index, final List writeErrors) { + for (BulkWriteError cur : writeErrors) { + if (cur.getIndex() == index) { + return false; + } + } + return true; + } + + BsonDocument toResult(@Nullable final BsonValue results) { + return new BsonDocument("result", results != null ? results : BsonNull.VALUE); + } + + private BsonDocument executeWait(final BsonDocument operation) { + try { + Thread.sleep(operation.getDocument("arguments").getNumber("ms").longValue()); + return new BsonDocument(); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } + } + + private void assertCollectionExists(final BsonDocument operation, final boolean shouldExist) { + BsonDocument arguments = operation.getDocument("arguments", new BsonDocument()); + String databaseName = arguments.getString("database").getValue(); + String collection = arguments.getString("collection").getValue(); + assertEquals(shouldExist, collectionExists(databaseName, collection)); + } + + private boolean collectionExists(final String databaseName, final String collectionName) { + return getMongoClient().getDatabase(databaseName).listCollectionNames().into(new ArrayList<>()).contains(collectionName); + } + + private void assertIndexExists(final BsonDocument operation, final boolean shouldExist) { + BsonDocument arguments = operation.getDocument("arguments", new BsonDocument()); + String db = arguments.getString("database").getValue(); + String collection = arguments.getString("collection").getValue(); + String index = arguments.getString("index").getValue(); + assertEquals(shouldExist, indexExists(db, collection, index)); + } + + private boolean indexExists(final String databaseName, final String collectionName, final String indexName) { + List indexes = getMongoClient() + .getDatabase(databaseName) + .getCollection(collectionName) + .listIndexes() + .into(new ArrayList<>()); + return indexes.stream().anyMatch(document -> document.get("name").equals(indexName)); + } + + @NonNull + private List getCollectionNames(final BsonDocument arguments, @Nullable final ClientSession clientSession) { + MongoDatabase database = mongoClient.getDatabase(arguments.getString("database").getValue()); + MongoIterable collectionNames = clientSession != null ? database.listCollectionNames(clientSession) : database.listCollectionNames(); + return collectionNames.into(new ArrayList<>()); + } + + BsonDocument getDatabaseRunCommandResult(final BsonDocument collectionOptions, final BsonDocument arguments, + @Nullable final ClientSession clientSession) { + return getRunCommandResult(collectionOptions, arguments, clientSession); + } + + BsonDocument getRunCommandResult(final BsonDocument collectionOptions, final BsonDocument arguments, + @Nullable final ClientSession clientSession) { + BsonDocument response; + BsonDocument command = arguments.getDocument("command"); + ReadPreference readPreference = arguments.containsKey("readPreference") ? getReadPreference(arguments) : null; + + if (clientSession == null) { + if (readPreference == null) { + response = database.runCommand(command, BsonDocument.class); + } else { + response = database.runCommand(command, readPreference, BsonDocument.class); + } + } else { + if (readPreference == null) { + response = database.runCommand(clientSession, command, BsonDocument.class); + } else { + response = database.runCommand(clientSession, command, readPreference, BsonDocument.class); + } + } + if (response.containsKey("ok")) { + // The server response to the command may contain a double value for the "ok" field, but the expected result is an integer. + response.put("ok", new BsonInt32((int) response.get("ok").asDouble().getValue())); + } + return toResult(response); + } + + BsonDocument getAggregateResult(final BsonDocument collectionOptions, final BsonDocument arguments, + @Nullable final ClientSession clientSession) { + List pipeline = new ArrayList<>(); + for (BsonValue stage : arguments.getArray("pipeline")) { + pipeline.add(stage.asDocument()); + } + + AggregateIterable iterable; + if (clientSession == null) { + iterable = getCollection(collectionOptions).aggregate(pipeline); + } else { + iterable = getCollection(collectionOptions).aggregate(clientSession, pipeline); + } + + if (arguments.containsKey("batchSize")) { + iterable.batchSize(arguments.getNumber("batchSize").intValue()); + } + if (arguments.containsKey("maxTimeMS")) { + iterable.maxTime(arguments.getNumber("maxTimeMS").longValue(), TimeUnit.MILLISECONDS); + } + if (arguments.containsKey("collation")) { + iterable.collation(getCollation(arguments.getDocument("collation"))); + } + return toResult(iterable); + } + + BsonDocument getDatabaseAggregateResult(final BsonDocument operationOptions, final BsonDocument arguments, + @Nullable final ClientSession clientSession) { + List pipeline = new ArrayList<>(); + for (BsonValue stage : arguments.getArray("pipeline")) { + pipeline.add(stage.asDocument()); + } + + AggregateIterable iterable; + if (clientSession == null) { + iterable = database.aggregate(pipeline, BsonDocument.class); + } else { + iterable = database.aggregate(clientSession, pipeline, BsonDocument.class); + } + + if (arguments.containsKey("allowDiskUse")) { + iterable.allowDiskUse(arguments.getBoolean("allowDiskUse").getValue()); + } + if (arguments.containsKey("batchSize")) { + iterable.batchSize(arguments.getNumber("batchSize").intValue()); + } + if (arguments.containsKey("collation")) { + iterable.collation(getCollation(arguments.getDocument("collation"))); + } + + BsonDocument results = toResult(iterable); + for (BsonValue result : results.getArray("result", new BsonArray())) { + if (result.isDocument()) { + BsonDocument command = result.asDocument().getDocument("command", new BsonDocument()); + command.remove("$readPreference"); + command.remove("$clusterTime"); + command.remove("signature"); + command.remove("keyId"); + } + } + return results; + } + + BsonDocument getEstimatedDocumentCountResult(final BsonDocument collectionOptions, final BsonDocument arguments, + @Nullable final ClientSession clientSession) { + if (!arguments.isEmpty()) { + throw new UnsupportedOperationException("Unexpected arguments: " + arguments); + } + return toResult((int) getCollection(collectionOptions).estimatedDocumentCount()); + } + + BsonDocument getClientListDatabasesResult(final BsonDocument databaseOptions, final BsonDocument arguments, + @Nullable final ClientSession clientSession) { + ListDatabasesIterable iterable; + if (clientSession == null) { + iterable = mongoClient.listDatabases(BsonDocument.class); + } else { + iterable = mongoClient.listDatabases(clientSession, BsonDocument.class); + } + return toResult(iterable); + } + + BsonDocument getClientListDatabaseObjectsResult(final BsonDocument databaseOptions, final BsonDocument arguments, + @Nullable final ClientSession clientSession) { + return getClientListDatabasesResult(databaseOptions, arguments, clientSession); + } + + BsonDocument getClientListDatabaseNamesResult(final BsonDocument databaseOptions, final BsonDocument arguments, + @Nullable final ClientSession clientSession) { + return getClientListDatabasesResult(databaseOptions, arguments, clientSession); + } + + BsonDocument getDatabaseListCollectionObjectsResult(final BsonDocument databaseOptions, final BsonDocument arguments, + @Nullable final ClientSession clientSession) { + return getDatabaseListCollectionsResult(databaseOptions, arguments, clientSession); + } + + BsonDocument getDatabaseListCollectionNamesResult(final BsonDocument databaseOptions, final BsonDocument arguments, + @Nullable final ClientSession clientSession) { + return getDatabaseListCollectionsResult(databaseOptions, arguments, clientSession); + } + + BsonDocument getDatabaseListCollectionsResult(final BsonDocument databaseOptions, final BsonDocument arguments, + @Nullable final ClientSession clientSession) { + ListCollectionsIterable iterable; + if (clientSession == null) { + iterable = database.listCollections(BsonDocument.class); + } else { + iterable = database.listCollections(clientSession, BsonDocument.class); + } + return toResult(iterable); + } + + BsonDocument getCreateIndexResult(final BsonDocument collectionOptions, final BsonDocument arguments, + @Nullable final ClientSession clientSession) { + String index; + IndexOptions indexOptions = new IndexOptions(); + + if (arguments.containsKey("name")) { + indexOptions.name(arguments.getString("name").getValue()); + } + if (clientSession == null) { + index = getCollection(collectionOptions).createIndex(arguments.getDocument("keys", new BsonDocument()), indexOptions); + } else { + index = getCollection(collectionOptions).createIndex(clientSession, arguments.getDocument("keys", new BsonDocument()), + indexOptions); + } + return toResult("result", new BsonString(index)); + } + + BsonDocument getDatabaseCreateCollectionResult(final BsonDocument databaseOptions, final BsonDocument arguments, + @Nullable final ClientSession clientSession) { + String collectionName = arguments.getString("collection").getValue(); + CreateCollectionOptions createCollectionOptions = new CreateCollectionOptions(); + if (arguments.containsKey("encryptedFields")) { + createCollectionOptions.encryptedFields(arguments.getDocument("encryptedFields")); + } + if (arguments.containsKey("validator")) { + createCollectionOptions.validationOptions(new ValidationOptions().validator(arguments.getDocument("validator"))); + } + + if (clientSession == null) { + database.createCollection(collectionName, createCollectionOptions); + } else { + database.createCollection(clientSession, collectionName, createCollectionOptions); + } + return new BsonDocument("ok", new BsonInt32(1)); + } + + BsonDocument getDropIndexResult(final BsonDocument collectionOptions, final BsonDocument arguments, + @Nullable final ClientSession clientSession) { + if (clientSession == null) { + getCollection(collectionOptions).dropIndex(arguments.getString("name").getValue()); + } else { + getCollection(collectionOptions).dropIndex(clientSession, arguments.getString("name").getValue()); + } + return new BsonDocument("ok", new BsonInt32(1)); + } + + BsonDocument getDatabaseDropCollectionResult(final BsonDocument databaseOptions, final BsonDocument arguments, + @Nullable final ClientSession clientSession) { + String collectionName = arguments.getString("collection").getValue(); + DropCollectionOptions dropCollectionOptions = new DropCollectionOptions(); + if (arguments.containsKey("encryptedFields")) { + dropCollectionOptions.encryptedFields(arguments.getDocument("encryptedFields")); + } + + if (clientSession == null) { + database.getCollection(collectionName).drop(dropCollectionOptions); + } else { + database.getCollection(collectionName).drop(clientSession, dropCollectionOptions); + } + return new BsonDocument("ok", new BsonInt32(1)); + } + + BsonDocument getListIndexesResult(final BsonDocument collectionOptions, final BsonDocument arguments, + @Nullable final ClientSession clientSession) { + ListIndexesIterable iterable; + if (clientSession == null) { + iterable = getCollection(collectionOptions).listIndexes(BsonDocument.class); + } else { + iterable = getCollection(collectionOptions).listIndexes(clientSession, BsonDocument.class); + } + return toResult(iterable); + } + + BsonDocument getCountDocumentsResult(final BsonDocument collectionOptions, final BsonDocument arguments, + @Nullable final ClientSession clientSession) { + CountOptions options = new CountOptions(); + if (arguments.containsKey("skip")) { + options.skip(arguments.getNumber("skip").intValue()); + } + if (arguments.containsKey("limit")) { + options.limit(arguments.getNumber("limit").intValue()); + } + if (arguments.containsKey("collation")) { + options.collation(getCollation(arguments.getDocument("collation"))); + } + BsonDocument filter = arguments.getDocument("filter", new BsonDocument()); + int count; + if (clientSession == null) { + count = (int) getCollection(collectionOptions).countDocuments(filter, options); + } else { + count = (int) getCollection(collectionOptions).countDocuments(clientSession, filter, options); + } + return toResult(count); + } + + BsonDocument getDistinctResult(final BsonDocument collectionOptions, final BsonDocument arguments, + @Nullable final ClientSession clientSession) { + DistinctIterable iterable; + if (clientSession == null) { + iterable = getCollection(collectionOptions).distinct(arguments.getString("fieldName").getValue(), BsonValue.class); + } else { + iterable = getCollection(collectionOptions).distinct(clientSession, arguments.getString("fieldName").getValue(), + BsonValue.class); + } + + if (arguments.containsKey("filter")) { + iterable.filter(arguments.getDocument("filter")); + } + if (arguments.containsKey("collation")) { + iterable.collation(getCollation(arguments.getDocument("collation"))); + } + return toResult(iterable.into(new BsonArray())); + } + + BsonDocument getFindOneResult(final BsonDocument collectionOptions, final BsonDocument arguments, + @Nullable final ClientSession clientSession) { + return toResult(createFindIterable(collectionOptions, arguments, clientSession).first()); + } + + BsonDocument getFindResult(final BsonDocument collectionOptions, final BsonDocument arguments, + @Nullable final ClientSession clientSession) { + return toResult(createFindIterable(collectionOptions, arguments, clientSession)); + } + + private FindIterable createFindIterable(final BsonDocument collectionOptions, final BsonDocument arguments, + @Nullable final ClientSession clientSession) { + FindIterable iterable; + if (clientSession == null) { + iterable = getCollection(collectionOptions).find(arguments.getDocument("filter", new BsonDocument())); + } else { + iterable = getCollection(collectionOptions).find(clientSession, arguments.getDocument("filter", new BsonDocument())); + } + + if (arguments.containsKey("skip")) { + iterable.skip(arguments.getNumber("skip").intValue()); + } + if (arguments.containsKey("limit")) { + iterable.limit(arguments.getNumber("limit").intValue()); + } + if (arguments.containsKey("batchSize")) { + iterable.batchSize(arguments.getNumber("batchSize").intValue()); + } + if (arguments.containsKey("sort")) { + iterable.sort(arguments.getDocument("sort")); + } + if (arguments.containsKey("collation")) { + iterable.collation(getCollation(arguments.getDocument("collation"))); + } + if (arguments.containsKey("comment")) { + iterable.comment(arguments.getString("comment").getValue()); + } + if (arguments.containsKey("hint")) { + iterable.hint(arguments.getDocument("hint")); + } + if (arguments.containsKey("max")) { + iterable.max(arguments.getDocument("max")); + } + if (arguments.containsKey("min")) { + iterable.min(arguments.getDocument("min")); + } + if (arguments.containsKey("maxTimeMS")) { + iterable.maxTime(arguments.getNumber("maxTimeMS").intValue(), TimeUnit.MILLISECONDS); + } + if (arguments.containsKey("showRecordId")) { + iterable.showRecordId(arguments.getBoolean("showRecordId").getValue()); + } + if (arguments.containsKey("returnKey")) { + iterable.returnKey(arguments.getBoolean("returnKey").getValue()); + } + if (arguments.containsKey("collation")) { + iterable.collation(getCollation(arguments.getDocument("collation"))); + } + if (arguments.containsKey("comment")) { + iterable.comment(arguments.getString("comment").getValue()); + } + if (arguments.containsKey("hint")) { + iterable.hint(arguments.getDocument("hint")); + } + if (arguments.containsKey("max")) { + iterable.max(arguments.getDocument("max")); + } + if (arguments.containsKey("min")) { + iterable.min(arguments.getDocument("min")); + } + if (arguments.containsKey("maxTimeMS")) { + iterable.maxTime(arguments.getNumber("maxTimeMS").intValue(), TimeUnit.MILLISECONDS); + } + if (arguments.containsKey("showRecordId")) { + iterable.showRecordId(arguments.getBoolean("showRecordId").getValue()); + } + if (arguments.containsKey("returnKey")) { + iterable.returnKey(arguments.getBoolean("returnKey").getValue()); + } + if (arguments.containsKey("allowDiskUse")) { + iterable.allowDiskUse(arguments.getBoolean("allowDiskUse").getValue()); + } + + return iterable; + } + + @SuppressWarnings("deprecation") + BsonDocument getMapReduceResult(final BsonDocument collectionOptions, final BsonDocument arguments, + @Nullable final ClientSession clientSession) { + MapReduceIterable iterable; + if (clientSession == null) { + iterable = getCollection(collectionOptions).mapReduce(arguments.get("map").asJavaScript().getCode(), + arguments.get("reduce").asJavaScript().getCode()); + } else { + iterable = getCollection(collectionOptions).mapReduce(clientSession, arguments.get("map").asJavaScript().getCode(), + arguments.get("reduce").asJavaScript().getCode()); + } + + if (arguments.containsKey("filter")) { + iterable.filter(arguments.getDocument("filter")); + } + if (arguments.containsKey("collation")) { + iterable.collation(getCollation(arguments.getDocument("collation"))); + } + return toResult(iterable.into(new BsonArray())); + } + + BsonDocument getDeleteManyResult(final BsonDocument collectionOptions, final BsonDocument arguments, + @Nullable final ClientSession clientSession) { + DeleteOptions options = new DeleteOptions(); + if (arguments.containsKey("collation")) { + options.collation(getCollation(arguments.getDocument("collation"))); + } + if (arguments.containsKey("hint")) { + if (arguments.isDocument("hint")) { + options.hint(arguments.getDocument("hint")); + } else { + options.hintString(arguments.getString("hint").getValue()); + } + } + + int deletedCount; + if (clientSession == null) { + deletedCount = (int) getCollection(collectionOptions).deleteMany(arguments.getDocument("filter"), options).getDeletedCount(); + } else { + deletedCount = (int) getCollection(collectionOptions).deleteMany(clientSession, arguments.getDocument("filter"), options) + .getDeletedCount(); + } + + return toResult("deletedCount", + new BsonInt32(deletedCount)); + } + + BsonDocument getDeleteOneResult(final BsonDocument collectionOptions, final BsonDocument arguments, + @Nullable final ClientSession clientSession) { + DeleteOptions options = new DeleteOptions(); + if (arguments.containsKey("collation")) { + options.collation(getCollation(arguments.getDocument("collation"))); + } + if (arguments.containsKey("hint")) { + if (arguments.isDocument("hint")) { + options.hint(arguments.getDocument("hint")); + } else { + options.hintString(arguments.getString("hint").getValue()); + } + } + + int deletedCount; + if (clientSession == null) { + deletedCount = (int) getCollection(collectionOptions).deleteOne(arguments.getDocument("filter"), options).getDeletedCount(); + } else { + deletedCount = (int) getCollection(collectionOptions).deleteOne(clientSession, arguments.getDocument("filter"), options) + .getDeletedCount(); + } + + return toResult("deletedCount", new BsonInt32(deletedCount)); + } + + BsonDocument getFindOneAndDeleteResult(final BsonDocument collectionOptions, final BsonDocument arguments, + @Nullable final ClientSession clientSession) { + FindOneAndDeleteOptions options = new FindOneAndDeleteOptions(); + if (arguments.containsKey("projection")) { + options.projection(arguments.getDocument("projection")); + } + if (arguments.containsKey("sort")) { + options.sort(arguments.getDocument("sort")); + } + if (arguments.containsKey("collation")) { + options.collation(getCollation(arguments.getDocument("collation"))); + } + if (arguments.containsKey("hint")) { + if (arguments.isDocument("hint")) { + options.hint(arguments.getDocument("hint")); + } else { + options.hintString(arguments.getString("hint").getValue()); + } + } + + BsonDocument result; + if (clientSession == null) { + result = getCollection(collectionOptions).findOneAndDelete(arguments.getDocument("filter"), options); + } else { + result = getCollection(collectionOptions).findOneAndDelete(clientSession, arguments.getDocument("filter"), options); + } + + + return toResult(result); + } + + BsonDocument getFindOneAndReplaceResult(final BsonDocument collectionOptions, final BsonDocument arguments, + @Nullable final ClientSession clientSession) { + FindOneAndReplaceOptions options = new FindOneAndReplaceOptions(); + if (arguments.containsKey("projection")) { + options.projection(arguments.getDocument("projection")); + } + if (arguments.containsKey("sort")) { + options.sort(arguments.getDocument("sort")); + } + if (arguments.containsKey("upsert")) { + options.upsert(arguments.getBoolean("upsert").getValue()); + } + if (arguments.containsKey("returnDocument")) { + options.returnDocument(arguments.getString("returnDocument").getValue().equals("After") ? ReturnDocument.AFTER + : ReturnDocument.BEFORE); + } + if (arguments.containsKey("collation")) { + options.collation(getCollation(arguments.getDocument("collation"))); + } + if (arguments.containsKey("hint")) { + if (arguments.isDocument("hint")) { + options.hint(arguments.getDocument("hint")); + } else { + options.hintString(arguments.getString("hint").getValue()); + } + } + + BsonDocument result; + if (clientSession == null) { + result = getCollection(collectionOptions).findOneAndReplace(arguments.getDocument("filter"), + arguments.getDocument("replacement"), options); + } else { + result = getCollection(collectionOptions).findOneAndReplace(clientSession, arguments.getDocument("filter"), + arguments.getDocument("replacement"), options); + } + + return toResult(result); + } + + BsonDocument getFindOneAndUpdateResult(final BsonDocument collectionOptions, final BsonDocument arguments, + @Nullable final ClientSession clientSession) { + FindOneAndUpdateOptions options = new FindOneAndUpdateOptions(); + if (arguments.containsKey("projection")) { + options.projection(arguments.getDocument("projection")); + } + if (arguments.containsKey("sort")) { + options.sort(arguments.getDocument("sort")); + } + if (arguments.containsKey("upsert")) { + options.upsert(arguments.getBoolean("upsert").getValue()); + } + if (arguments.containsKey("returnDocument")) { + options.returnDocument(arguments.getString("returnDocument").getValue().equals("After") ? ReturnDocument.AFTER + : ReturnDocument.BEFORE); + } + if (arguments.containsKey("collation")) { + options.collation(getCollation(arguments.getDocument("collation"))); + } + if (arguments.containsKey("arrayFilters")) { + options.arrayFilters((getListOfDocuments(arguments.getArray("arrayFilters")))); + } + if (arguments.containsKey("hint")) { + if (arguments.isDocument("hint")) { + options.hint(arguments.getDocument("hint")); + } else { + options.hintString(arguments.getString("hint").getValue()); + } + } + BsonDocument result; + if (clientSession == null) { + if (arguments.isDocument("update")) { + result = getCollection(collectionOptions).findOneAndUpdate(arguments.getDocument("filter"), arguments.getDocument("update"), + options); + } else { // update is a pipeline + result = getCollection(collectionOptions).findOneAndUpdate(arguments.getDocument("filter"), + getListOfDocuments(arguments.getArray("update")), options); + } + } else { + if (arguments.isDocument("update")) { + result = getCollection(collectionOptions).findOneAndUpdate(clientSession, arguments.getDocument("filter"), + arguments.getDocument("update"), options); + } else { // update is a pipeline + result = getCollection(collectionOptions).findOneAndUpdate(clientSession, arguments.getDocument("filter"), + getListOfDocuments(arguments.getArray("update")), options); + } + } + + return toResult(result); + } + + BsonDocument getInsertOneResult(final BsonDocument collectionOptions, final BsonDocument arguments, + @Nullable final ClientSession clientSession) { + BsonDocument document = arguments.getDocument("document"); + InsertOneOptions options = new InsertOneOptions(); + if (arguments.containsKey("bypassDocumentValidation")) { + options.bypassDocumentValidation(arguments.getBoolean("bypassDocumentValidation").getValue()); + } + InsertOneResult result; + + if (clientSession == null) { + result = getCollection(collectionOptions).insertOne(document, options); + } else { + result = getCollection(collectionOptions).insertOne(clientSession, document, options); + } + + return toResult(new BsonDocument("insertedId", result.getInsertedId())); + } + + BsonDocument getInsertManyResult(final BsonDocument collectionOptions, final BsonDocument arguments, + @Nullable final ClientSession clientSession) { + List documents = new ArrayList<>(); + for (BsonValue document : arguments.getArray("documents")) { + documents.add(document.asDocument()); + } + + try { + InsertManyOptions options = new InsertManyOptions().ordered(arguments.getDocument("options", new BsonDocument()) + .getBoolean("ordered", BsonBoolean.TRUE).getValue()); + if (arguments.containsKey("bypassDocumentValidation")) { + options.bypassDocumentValidation(arguments.getBoolean("bypassDocumentValidation").getValue()); + } + + InsertManyResult insertManyResult; + if (clientSession == null) { + insertManyResult = getCollection(collectionOptions).insertMany(documents, options); + } else { + insertManyResult = getCollection(collectionOptions).insertMany(clientSession, documents, options); + } + + BsonDocument insertedIds = new BsonDocument(); + insertManyResult.getInsertedIds().forEach((i, v) -> insertedIds.put(i.toString(), v)); + return toResult(new BsonDocument("insertedIds", insertedIds)); + } catch (MongoBulkWriteException e) { + // For transaction tests, the exception is expected to be returned. + if (clientSession != null && clientSession.hasActiveTransaction()) { + throw e; + } + // Test results are expecting this to look just like bulkWrite error, so translate to InsertOneModel so the result + // translation code can be reused. + List> writeModels = new ArrayList<>(); + for (BsonValue document : arguments.getArray("documents")) { + writeModels.add(new InsertOneModel<>(document.asDocument())); + } + BsonDocument result = toResult(e.getWriteResult(), writeModels, e.getWriteErrors()); + result.put("error", BsonBoolean.TRUE); + return result; + } + } + + BsonDocument getReplaceOneResult(final BsonDocument collectionOptions, final BsonDocument arguments, + @Nullable final ClientSession clientSession) { + ReplaceOptions options = new ReplaceOptions(); + if (arguments.containsKey("upsert")) { + options.upsert(arguments.getBoolean("upsert").getValue()); + } + if (arguments.containsKey("collation")) { + options.collation(getCollation(arguments.getDocument("collation"))); + } + if (arguments.containsKey("bypassDocumentValidation")) { + options.bypassDocumentValidation(arguments.getBoolean("bypassDocumentValidation").getValue()); + } + if (arguments.containsKey("hint")) { + if (arguments.isDocument("hint")) { + options.hint(arguments.getDocument("hint")); + } else { + options.hintString(arguments.getString("hint").getValue()); + } + } + + UpdateResult updateResult; + if (clientSession == null) { + updateResult = getCollection(collectionOptions).replaceOne(arguments.getDocument("filter"), + arguments.getDocument("replacement"), options); + } else { + updateResult = getCollection(collectionOptions).replaceOne(clientSession, arguments.getDocument("filter"), + arguments.getDocument("replacement"), options); + } + + return toResult(updateResult); + } + + BsonDocument getUpdateManyResult(final BsonDocument collectionOptions, final BsonDocument arguments, + @Nullable final ClientSession clientSession) { + UpdateOptions options = new UpdateOptions(); + if (arguments.containsKey("upsert")) { + options.upsert(arguments.getBoolean("upsert").getValue()); + } + if (arguments.containsKey("collation")) { + options.collation(getCollation(arguments.getDocument("collation"))); + } + if (arguments.containsKey("arrayFilters")) { + options.arrayFilters((getListOfDocuments(arguments.getArray("arrayFilters")))); + } + if (arguments.containsKey("bypassDocumentValidation")) { + options.bypassDocumentValidation(arguments.getBoolean("bypassDocumentValidation").getValue()); + } + if (arguments.containsKey("hint")) { + if (arguments.isDocument("hint")) { + options.hint(arguments.getDocument("hint")); + } else { + options.hintString(arguments.getString("hint").getValue()); + } + } + + UpdateResult updateResult; + if (clientSession == null) { + if (arguments.isDocument("update")) { + updateResult = getCollection(collectionOptions).updateMany(arguments.getDocument("filter"), arguments.getDocument("update"), + options); + } else { // update is a pipeline + updateResult = getCollection(collectionOptions).updateMany(arguments.getDocument("filter"), + getListOfDocuments(arguments.getArray("update")), options); + } + } else { + if (arguments.isDocument("update")) { + updateResult = getCollection(collectionOptions).updateMany(clientSession, arguments.getDocument("filter"), + arguments.getDocument("update"), options); + } else { // update is a pipeline + updateResult = getCollection(collectionOptions).updateMany(clientSession, arguments.getDocument("filter"), + getListOfDocuments(arguments.getArray("update")), options); + } + } + + return toResult(updateResult); + } + + BsonDocument getUpdateOneResult(final BsonDocument collectionOptions, final BsonDocument arguments, + @Nullable final ClientSession clientSession) { + UpdateOptions options = new UpdateOptions(); + if (arguments.containsKey("upsert")) { + options.upsert(arguments.getBoolean("upsert").getValue()); + } + if (arguments.containsKey("collation")) { + options.collation(getCollation(arguments.getDocument("collation"))); + } + if (arguments.containsKey("arrayFilters")) { + options.arrayFilters((getListOfDocuments(arguments.getArray("arrayFilters")))); + } + if (arguments.containsKey("bypassDocumentValidation")) { + options.bypassDocumentValidation(arguments.getBoolean("bypassDocumentValidation").getValue()); + } + if (arguments.containsKey("hint")) { + if (arguments.isDocument("hint")) { + options.hint(arguments.getDocument("hint")); + } else { + options.hintString(arguments.getString("hint").getValue()); + } + } + + UpdateResult updateResult; + if (clientSession == null) { + if (arguments.isDocument("update")) { + updateResult = getCollection(collectionOptions).updateOne(arguments.getDocument("filter"), arguments.getDocument("update"), + options); + } else { // update is a pipeline + updateResult = getCollection(collectionOptions).updateOne(arguments.getDocument("filter"), + getListOfDocuments(arguments.getArray("update")), options); + } + } else { + if (arguments.isDocument("update")) { + updateResult = getCollection(collectionOptions).updateOne(clientSession, arguments.getDocument("filter"), + arguments.getDocument("update"), options); + } else { // update is a pipeline + updateResult = getCollection(collectionOptions).updateOne(clientSession, arguments.getDocument("filter"), + getListOfDocuments(arguments.getArray("update")), options); + } + } + + return toResult(updateResult); + } + + BsonDocument getBulkWriteResult(final BsonDocument collectionOptions, final BsonDocument arguments, + @Nullable final ClientSession clientSession) { + List> writeModels = new ArrayList<>(); + for (BsonValue bsonValue : arguments.getArray("requests")) { + BsonDocument cur = bsonValue.asDocument(); + String name = cur.getString("name").getValue(); + BsonDocument requestArguments = cur.getDocument("arguments"); + if (name.equals("insertOne")) { + writeModels.add(new InsertOneModel<>(requestArguments.getDocument("document"))); + } else if (name.equals("updateOne")) { + if (requestArguments.isDocument("update")) { + writeModels.add(new UpdateOneModel<>(requestArguments.getDocument("filter"), + requestArguments.getDocument("update"), + getUpdateOptions(requestArguments))); + } else { // update is a pipeline + writeModels.add(new UpdateOneModel<>(requestArguments.getDocument("filter"), + getListOfDocuments(requestArguments.getArray("update")), + getUpdateOptions(requestArguments))); + } + } else if (name.equals("updateMany")) { + if (requestArguments.isDocument("update")) { + writeModels.add(new UpdateManyModel<>(requestArguments.getDocument("filter"), + requestArguments.getDocument("update"), + getUpdateOptions(requestArguments))); + } else { // update is a pipeline + writeModels.add(new UpdateManyModel<>(requestArguments.getDocument("filter"), + getListOfDocuments(requestArguments.getArray("update")), + getUpdateOptions(requestArguments))); + } + } else if (name.equals("deleteOne")) { + writeModels.add(new DeleteOneModel<>(requestArguments.getDocument("filter"), + getDeleteOptions(requestArguments))); + } else if (name.equals("deleteMany")) { + writeModels.add(new DeleteManyModel<>(requestArguments.getDocument("filter"), + getDeleteOptions(requestArguments))); + } else if (name.equals("replaceOne")) { + writeModels.add(new ReplaceOneModel<>(requestArguments.getDocument("filter"), + requestArguments.getDocument("replacement"), getReplaceOptions(requestArguments))); + } else { + throw new UnsupportedOperationException(format("Unsupported write request type: %s", name)); + } + } + + try { + BulkWriteResult bulkWriteResult; + BsonDocument optionsDocument = arguments.getDocument("options", new BsonDocument()); + BulkWriteOptions options = new BulkWriteOptions() + .ordered(optionsDocument.getBoolean("ordered", BsonBoolean.TRUE).getValue()); + if (optionsDocument.containsKey("bypassDocumentValidation")) { + options.bypassDocumentValidation(optionsDocument.getBoolean("bypassDocumentValidation").getValue()); + } + + if (clientSession == null) { + bulkWriteResult = getCollection(collectionOptions).bulkWrite(writeModels, options); + } else { + bulkWriteResult = getCollection(collectionOptions).bulkWrite(clientSession, writeModels, options); + } + + return toResult(bulkWriteResult, writeModels, Collections.emptyList()); + } catch (MongoBulkWriteException e) { + BsonDocument result = toResult(e.getWriteResult(), writeModels, e.getWriteErrors()); + result.put("error", BsonBoolean.TRUE); + return result; + } + } + + BsonDocument getRenameResult(final BsonDocument collectionOptions, final BsonDocument arguments, + @Nullable final ClientSession clientSession) { + MongoNamespace toNamespace = new MongoNamespace(database.getName(), arguments.getString("to").getValue()); + if (clientSession == null) { + getCollection(collectionOptions).renameCollection(toNamespace); + } else { + getCollection(collectionOptions).renameCollection(clientSession, toNamespace); + } + return new BsonDocument("ok", new BsonInt32(1)); + } + + BsonDocument getDropResult(final BsonDocument collectionOptions, final BsonDocument arguments, + @Nullable final ClientSession clientSession) { + if (clientSession == null) { + getCollection(collectionOptions).drop(); + } else { + getCollection(collectionOptions).drop(clientSession); + } + return new BsonDocument("ok", new BsonInt32(1)); + } + + // GridFSBucket operations + + BsonDocument getDownloadByNameResult(final BsonDocument collectionOptions, final BsonDocument arguments, + @Nullable final ClientSession clientSession) throws IOException { + ByteArrayOutputStream outputStream = null; + + try { + outputStream = new ByteArrayOutputStream(); + GridFSDownloadOptions downloadOptions = new GridFSDownloadOptions(); + if (arguments.containsKey("options")) { + int revision = arguments.getDocument("options").getInt32("revision").getValue(); + downloadOptions = downloadOptions.revision(revision); + } + gridFSBucket.downloadToStream(arguments.getString("filename").getValue(), outputStream, downloadOptions); + String output = outputStream.toString(); + } finally { + outputStream.close(); + } + return toResult("result", new BsonString(Hex.encode(outputStream.toByteArray()).toLowerCase())); + } + + BsonDocument getDeleteResult(final BsonDocument collectionOptions, final BsonDocument arguments, + @Nullable final ClientSession clientSession) { + try { + gridFSBucket.delete(arguments.getObjectId("id").getValue()); + return new BsonDocument("ok", new BsonInt32(1)); + } catch (MongoGridFSException e) { + BsonDocument result = toResult("message", new BsonString(e.getMessage())); + result.put("error", BsonBoolean.TRUE); + return result; + } + } + + BsonDocument getDownloadResult(final BsonDocument collectionOptions, final BsonDocument arguments, + @Nullable final ClientSession clientSession) throws IOException { + ByteArrayOutputStream outputStream = null; + + try { + outputStream = new ByteArrayOutputStream(); + gridFSBucket.downloadToStream(arguments.getObjectId("id").getValue(), outputStream); + } finally { + outputStream.close(); + } + return toResult("result", new BsonString(Hex.encode(outputStream.toByteArray()).toLowerCase())); + } + + BsonDocument getUploadResult(final BsonDocument collectionOptions, final BsonDocument rawArguments, + @Nullable final ClientSession clientSession) { + ObjectId objectId = null; + BsonDocument arguments = parseHexDocument(rawArguments, "source"); + + GridFSBucket gridFSUploadBucket = gridFSBucket; + String filename = arguments.getString("filename").getValue(); + InputStream input = new ByteArrayInputStream(arguments.getBinary("source").getData()); + GridFSUploadOptions options = new GridFSUploadOptions(); + BsonDocument rawOptions = arguments.getDocument("options", new BsonDocument()); + if (rawOptions.containsKey("chunkSizeBytes")) { + options.chunkSizeBytes(rawOptions.getInt32("chunkSizeBytes").getValue()); + } + if (rawOptions.containsKey("metadata")) { + options.metadata(Document.parse(rawOptions.getDocument("metadata").toJson())); + } + + return new BsonDocument("objectId", new BsonObjectId(gridFSUploadBucket.uploadFromStream(filename, input, options))); + } + + // Change streams operations + + BsonDocument getClientWatchResult(final BsonDocument collectionOptions, final BsonDocument rawArguments, + @Nullable final ClientSession clientSession) { + MongoCursor> cursor = mongoClient.watch().iterator(); + //noinspection TryFinallyCanBeTryWithResources + try { + return new BsonDocument("ok", new BsonInt32(1)); + } finally { + cursor.close(); + } + } + + BsonDocument getWatchResult(final BsonDocument collectionOptions, final BsonDocument rawArguments, + @Nullable final ClientSession clientSession) { + MongoCursor> cursor = baseCollection.watch().iterator(); + //noinspection TryFinallyCanBeTryWithResources + try { + return new BsonDocument("ok", new BsonInt32(1)); + } finally { + cursor.close(); + } + } + + BsonDocument getDatabaseWatchResult(final BsonDocument collectionOptions, final BsonDocument rawArguments, + @Nullable final ClientSession clientSession) { + MongoCursor> cursor = database.watch().iterator(); + //noinspection TryFinallyCanBeTryWithResources + try { + return new BsonDocument("ok", new BsonInt32(1)); + } finally { + cursor.close(); + } + } + + BsonDocument wait(final BsonDocument options, final BsonDocument rawArguments, @Nullable final ClientSession clientSession) { + try { + Thread.sleep(rawArguments.getNumber("ms").longValue()); + return new BsonDocument(); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } + } + + Collation getCollation(final BsonDocument bsonCollation) { + Collation.Builder builder = Collation.builder(); + if (bsonCollation.containsKey("locale")) { + builder.locale(bsonCollation.getString("locale").getValue()); + } + if (bsonCollation.containsKey("caseLevel")) { + builder.caseLevel(bsonCollation.getBoolean("caseLevel").getValue()); + } + if (bsonCollation.containsKey("caseFirst")) { + builder.collationCaseFirst(CollationCaseFirst.fromString(bsonCollation.getString("caseFirst").getValue())); + } + if (bsonCollation.containsKey("strength")) { + builder.collationStrength(CollationStrength.fromInt(bsonCollation.getInt32("strength").getValue())); + } + if (bsonCollation.containsKey("numericOrdering")) { + builder.numericOrdering(bsonCollation.getBoolean("numericOrdering").getValue()); + } + if (bsonCollation.containsKey("strength")) { + builder.collationStrength(CollationStrength.fromInt(bsonCollation.getInt32("strength").getValue())); + } + if (bsonCollation.containsKey("alternate")) { + builder.collationAlternate(CollationAlternate.fromString(bsonCollation.getString("alternate").getValue())); + } + if (bsonCollation.containsKey("maxVariable")) { + builder.collationMaxVariable(CollationMaxVariable.fromString(bsonCollation.getString("maxVariable").getValue())); + } + if (bsonCollation.containsKey("normalization")) { + builder.normalization(bsonCollation.getBoolean("normalization").getValue()); + } + if (bsonCollation.containsKey("backwards")) { + builder.backwards(bsonCollation.getBoolean("backwards").getValue()); + } + return builder.build(); + } + + private UpdateOptions getUpdateOptions(final BsonDocument requestArguments) { + UpdateOptions options = new UpdateOptions(); + if (requestArguments.containsKey("upsert")) { + options.upsert(true); + } + if (requestArguments.containsKey("arrayFilters")) { + options.arrayFilters(getListOfDocuments(requestArguments.getArray("arrayFilters"))); + } + if (requestArguments.containsKey("collation")) { + options.collation(getCollation(requestArguments.getDocument("collation"))); + } + if (requestArguments.containsKey("hint")) { + if (requestArguments.isDocument("hint")) { + options.hint(requestArguments.getDocument("hint")); + } else { + options.hintString(requestArguments.getString("hint").getValue()); + } + } + return options; + } + + private DeleteOptions getDeleteOptions(final BsonDocument requestArguments) { + DeleteOptions options = new DeleteOptions(); + if (requestArguments.containsKey("collation")) { + options.collation(getCollation(requestArguments.getDocument("collation"))); + } + if (requestArguments.containsKey("hint")) { + if (requestArguments.isDocument("hint")) { + options.hint(requestArguments.getDocument("hint")); + } else { + options.hintString(requestArguments.getString("hint").getValue()); + } + } + return options; + } + + private ReplaceOptions getReplaceOptions(final BsonDocument requestArguments) { + ReplaceOptions options = new ReplaceOptions(); + if (requestArguments.containsKey("upsert")) { + options.upsert(true); + } + if (requestArguments.containsKey("collation")) { + options.collation(getCollation(requestArguments.getDocument("collation"))); + } + if (requestArguments.containsKey("hint")) { + if (requestArguments.isDocument("hint")) { + options.hint(requestArguments.getDocument("hint")); + } else { + options.hintString(requestArguments.getString("hint").getValue()); + } + } + return options; + } + + @Nullable + private List getListOfDocuments(@Nullable final BsonArray bsonArray) { + if (bsonArray == null) { + return null; + } + List arrayFilters = new ArrayList<>(bsonArray.size()); + for (BsonValue cur : bsonArray) { + arrayFilters.add(cur.asDocument()); + } + return arrayFilters; + } + + private MongoCollection getCollection(final BsonDocument collectionOptions) { + MongoCollection retVal = baseCollection; + if (collectionOptions.containsKey("readPreference")) { + retVal = retVal.withReadPreference(getReadPreference(collectionOptions)); + } + + if (collectionOptions.containsKey("writeConcern")) { + WriteConcern writeConcern = getWriteConcern(collectionOptions); + retVal = retVal.withWriteConcern(writeConcern); + } + + if (collectionOptions.containsKey("readConcern")) { + ReadConcern readConcern = getReadConcern(collectionOptions); + retVal = retVal.withReadConcern(readConcern); + } + + return retVal; + } + + ReadPreference getReadPreference(final BsonDocument arguments) { + return ReadPreference.valueOf( + arguments.getDocument("readPreference").getString("mode").getValue()); + } + + WriteConcern getWriteConcern(final BsonDocument arguments) { + WriteConcern writeConcern = WriteConcern.ACKNOWLEDGED; + BsonDocument writeConcernDocument = arguments.getDocument("writeConcern"); + for (Map.Entry entry: writeConcernDocument.entrySet()) { + if (entry.getKey().equals("w")) { + if (entry.getValue().isNumber()) { + writeConcern = writeConcern.withW(entry.getValue().asNumber().intValue()); + } else { + writeConcern = writeConcern.withW(entry.getValue().asString().getValue()); + } + } else if (entry.getKey().equals("j")) { + writeConcern = writeConcern.withJournal(entry.getValue().asBoolean().getValue()); + } else if (entry.getKey().equals("wtimeout")) { + writeConcern = writeConcern.withWTimeout(entry.getValue().asNumber().intValue(), TimeUnit.MILLISECONDS); + } else { + throw new UnsupportedOperationException("Unsupported write concern document key: " + entry.getKey()); + } + } + return writeConcern; + } + + ReadConcern getReadConcern(final BsonDocument arguments) { + return new ReadConcern(ReadConcernLevel.fromString(arguments.getDocument("readConcern").getString("level").getValue())); + } + + private BsonDocument parseHexDocument(final BsonDocument document, final String hexDocument) { + if (document.containsKey(hexDocument) && document.get(hexDocument).isDocument()) { + byte[] bytes = Hex.decode(document.getDocument(hexDocument).getString("$hex").getValue()); + document.put(hexDocument, new BsonBinary(bytes)); + } + return document; + } + + public static final Document LEGACY_HELLO_COMMAND = Document.parse("{isMaster: 1}"); + boolean isSharded() { + return database.runCommand(LEGACY_HELLO_COMMAND).get("msg", "").equals("isdbgrid"); + } +} diff --git a/driver-sync/src/test/functional/com/mongodb/client/MongoClientFactorySpecification.groovy b/driver-sync/src/test/functional/com/mongodb/client/MongoClientFactorySpecification.groovy new file mode 100644 index 00000000000..ff3a14a12a4 --- /dev/null +++ b/driver-sync/src/test/functional/com/mongodb/client/MongoClientFactorySpecification.groovy @@ -0,0 +1,70 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client + +import com.mongodb.ClusterFixture +import com.mongodb.MongoException + +import javax.naming.Reference +import javax.naming.StringRefAddr + +class MongoClientFactorySpecification extends FunctionalSpecification { + def mongoClientFactory = new MongoClientFactory() + + def 'should create MongoClient from environment'() { + given: + def environment = new Hashtable() + environment.put('connectionString', ClusterFixture.getConnectionString().getConnectionString()) + + when: + MongoClient client = mongoClientFactory.getObjectInstance(null, null, null, environment) as MongoClient + + then: + client != null + + cleanup: + client?.close() + } + + def 'should create MongoClient from obj that is of type Reference'() { + given: + def environment = new Hashtable() + def reference = new Reference(null, new StringRefAddr('connectionString', + ClusterFixture.getConnectionString().getConnectionString())) + + when: + MongoClient client = mongoClientFactory.getObjectInstance(reference, null, null, environment) as MongoClient + + then: + client != null + + cleanup: + client?.close() + } + + def 'should throw if no connection string is provided'() { + given: + def environment = new Hashtable() + + when: + mongoClientFactory.getObjectInstance(null, null, null, environment) + + then: + thrown(MongoException) + } +} + diff --git a/driver-sync/src/test/functional/com/mongodb/client/MongoClientSessionSpecification.groovy b/driver-sync/src/test/functional/com/mongodb/client/MongoClientSessionSpecification.groovy new file mode 100644 index 00000000000..2004f3df9ff --- /dev/null +++ b/driver-sync/src/test/functional/com/mongodb/client/MongoClientSessionSpecification.groovy @@ -0,0 +1,349 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client + +import com.mongodb.ClientSessionOptions +import com.mongodb.MongoClientException +import com.mongodb.MongoClientSettings +import com.mongodb.ReadConcern +import com.mongodb.ReadPreference +import com.mongodb.TransactionOptions +import com.mongodb.WriteConcern +import com.mongodb.client.model.Filters +import com.mongodb.event.CommandStartedEvent +import com.mongodb.internal.connection.TestCommandListener +import com.mongodb.internal.time.Timeout +import com.mongodb.spock.Slow +import org.bson.BsonBinarySubType +import org.bson.BsonDocument +import org.bson.BsonInt32 +import org.bson.BsonTimestamp +import org.bson.Document +import org.bson.types.ObjectId +import org.junit.Assert +import spock.lang.IgnoreIf + +import java.util.concurrent.TimeUnit + +import static com.mongodb.ClusterFixture.isDiscoverableReplicaSet +import static com.mongodb.client.Fixture.getDefaultDatabaseName +import static com.mongodb.client.Fixture.getMongoClient +import static com.mongodb.client.Fixture.getMongoClientSettings + +class MongoClientSessionSpecification extends FunctionalSpecification { + + def 'should throw IllegalArgumentException if options are null'() { + when: + getMongoClient().startSession(null) + + then: + thrown(IllegalArgumentException) + } + + def 'should create session with correct defaults'() { + expect: + clientSession.getOriginator() == getMongoClient() + clientSession.isCausallyConsistent() + clientSession.getOptions() == ClientSessionOptions.builder() + .defaultTransactionOptions(TransactionOptions.builder() + .readConcern(ReadConcern.DEFAULT) + .writeConcern(WriteConcern.ACKNOWLEDGED) + .readPreference(ReadPreference.primary()) + .build()) + .build() + clientSession.getClusterTime() == null + clientSession.getOperationTime() == null + clientSession.getServerSession() != null + + cleanup: + clientSession?.close() + + where: + clientSession << [getMongoClient().startSession(), + getMongoClient().startSession(ClientSessionOptions.builder().build())] + } + + def 'cluster time should advance'() { + given: + def firstOperationTime = new BsonTimestamp(42, 1) + def secondOperationTime = new BsonTimestamp(52, 1) + def thirdOperationTime = new BsonTimestamp(22, 1) + def firstClusterTime = new BsonDocument('clusterTime', firstOperationTime) + def secondClusterTime = new BsonDocument('clusterTime', secondOperationTime) + def olderClusterTime = new BsonDocument('clusterTime', thirdOperationTime) + + when: + def clientSession = getMongoClient().startSession(ClientSessionOptions.builder().build()) + + then: + clientSession.getClusterTime() == null + + when: + clientSession.advanceClusterTime(null) + + then: + clientSession.getClusterTime() == null + + when: + clientSession.advanceClusterTime(firstClusterTime) + + then: + clientSession.getClusterTime() == firstClusterTime + + when: + clientSession.advanceClusterTime(secondClusterTime) + + then: + clientSession.getClusterTime() == secondClusterTime + + when: + clientSession.advanceClusterTime(olderClusterTime) + + then: + clientSession.getClusterTime() == secondClusterTime + } + + def 'operation time should advance'() { + given: + def firstOperationTime = new BsonTimestamp(42, 1) + def secondOperationTime = new BsonTimestamp(52, 1) + def olderOperationTime = new BsonTimestamp(22, 1) + + when: + def clientSession = getMongoClient().startSession(ClientSessionOptions.builder().build()) + + then: + clientSession.getOperationTime() == null + + when: + clientSession.advanceOperationTime(null) + + then: + clientSession.getOperationTime() == null + + when: + clientSession.advanceOperationTime(firstOperationTime) + + then: + clientSession.getOperationTime() == firstOperationTime + + when: + clientSession.advanceOperationTime(secondOperationTime) + + then: + clientSession.getOperationTime() == secondOperationTime + + when: + clientSession.advanceOperationTime(olderOperationTime) + + then: + clientSession.getOperationTime() == secondOperationTime + } + + def 'methods that use the session should throw if the session is closed'() { + given: + def options = ClientSessionOptions.builder().build() + def clientSession = getMongoClient().startSession(options) + clientSession.close() + + when: + clientSession.getServerSession() + + then: + thrown(IllegalStateException) + + when: + clientSession.advanceOperationTime(new BsonTimestamp(42, 0)) + + then: + thrown(IllegalStateException) + + when: + clientSession.advanceClusterTime(new BsonDocument()) + + then: + thrown(IllegalStateException) + } + + def 'informational methods should not throw if the session is closed'() { + given: + def options = ClientSessionOptions.builder().build() + def clientSession = getMongoClient().startSession(options) + clientSession.close() + + when: + clientSession.getOptions() + clientSession.isCausallyConsistent() + clientSession.getClusterTime() + clientSession.getOperationTime() + + then: + noExceptionThrown() + } + + def 'should apply causally consistent session option to client session'() { + when: + def clientSession = getMongoClient().startSession(ClientSessionOptions.builder() + .causallyConsistent(causallyConsistent) + .build()) + + then: + clientSession != null + clientSession.isCausallyConsistent() == causallyConsistent + + where: + causallyConsistent << [true, false] + } + + def 'client session should have server session with valid identifier'() { + given: + def clientSession = getMongoClient().startSession(ClientSessionOptions.builder().build()) + + when: + def identifier = clientSession.getServerSession().identifier + + then: + identifier.size() == 1 + identifier.containsKey('id') + identifier.get('id').isBinary() + identifier.getBinary('id').getType() == BsonBinarySubType.UUID_STANDARD.value + identifier.getBinary('id').data.length == 16 + } + + def 'should use a default session'() { + given: + def commandListener = new TestCommandListener() + def settings = MongoClientSettings.builder(getMongoClientSettings()).commandListenerList([commandListener]).build() + def client = MongoClients.create(settings) + + when: + client.getDatabase('admin').runCommand(new BsonDocument('ping', new BsonInt32(1))) + + then: + commandListener.events.size() == 2 + def pingCommandStartedEvent = commandListener.events.get(0) as CommandStartedEvent + pingCommandStartedEvent.command.containsKey('lsid') + + cleanup: + client?.close() + } + + // This test attempts attempts to demonstrate that causal consistency works correctly by inserting a document and then immediately + // searching for that document on a secondary by its _id and failing the test if the document is not found. Without causal consistency + // enabled the expectation is that eventually that test would fail since generally the find will execute on the secondary before + // the secondary has a chance to replicate the document. + // This test is inherently racy as it's possible that the server _does_ replicate fast enough and therefore the test passes anyway + // even if causal consistency was not actually in effect. For that reason the test iterates a number of times in order to increase + // confidence that it's really causal consistency that is causing the test to succeed + @Slow + def 'should find inserted document on a secondary when causal consistency is enabled'() { + given: + def collection = getMongoClient().getDatabase(getDefaultDatabaseName()).getCollection(getCollectionName()) + + expect: + def clientSession = getMongoClient().startSession(ClientSessionOptions.builder() + .causallyConsistent(true) + .build()) + try { + for (int i = 0; i < 16; i++) { + Document document = new Document('_id', i) + collection.insertOne(clientSession, document) + Document foundDocument = collection + .withReadPreference(ReadPreference.secondaryPreferred()) // read from secondary if available + .withReadConcern(readConcern) + .find(clientSession, document) + .maxTime(30, TimeUnit.SECONDS) // to avoid the test running forever in case replication is broken + .first() + if (foundDocument == null) { + Assert.fail('Should have found recently inserted document on secondary with causal consistency enabled') + } + } + } finally { + clientSession.close() + } + + where: + readConcern << [ReadConcern.DEFAULT, ReadConcern.LOCAL, ReadConcern.MAJORITY] + } + + def 'should not use an implicit session for an unacknowledged write'() { + given: + def commandListener = new TestCommandListener() + def settings = MongoClientSettings.builder(getMongoClientSettings()).commandListenerList([commandListener]).build() + def client = MongoClients.create(settings) + def collection = client.getDatabase(getDatabaseName()).getCollection(getCollectionName()) + def id = new ObjectId() + + when: + collection.withWriteConcern(WriteConcern.UNACKNOWLEDGED).insertOne(new Document('_id', id)) + + then: + def insertEvent = commandListener.events.get(0) as CommandStartedEvent + !insertEvent.command.containsKey('lsid') + + cleanup: + waitForInsertAcknowledgement(collection, id) + client?.close() + } + + def 'should throw exception if unacknowledged write used with explicit session'() { + given: + def session = getMongoClient().startSession() + + when: + getMongoClient().getDatabase(getDatabaseName()).getCollection(getCollectionName()) + .withWriteConcern(WriteConcern.UNACKNOWLEDGED) + .insertOne(session, new Document()) + + then: + thrown(MongoClientException) + + cleanup: + session?.close() + } + + + @IgnoreIf({ !isDiscoverableReplicaSet() }) + def 'should ignore unacknowledged write concern when in a transaction'() { + given: + def collection = getMongoClient().getDatabase(getDatabaseName()).getCollection(getCollectionName()) + collection.insertOne(new Document()) + + def session = getMongoClient().startSession() + session.startTransaction() + + when: + collection.withWriteConcern(WriteConcern.UNACKNOWLEDGED) + .insertOne(session, new Document()) + + then: + noExceptionThrown() + + cleanup: + session.close() + } + + void waitForInsertAcknowledgement(MongoCollection collection, ObjectId id) { + Document document = collection.find(Filters.eq(id)).first() + Timeout timeout = Timeout.expiresIn(5, TimeUnit.SECONDS, Timeout.ZeroSemantics.ZERO_DURATION_MEANS_INFINITE) + while (document == null) { + Thread.sleep(1) + document = collection.find(Filters.eq(id)).first() + timeout.onExpired { assert !"Timed out waiting for insert acknowledgement".trim() } + } + } +} diff --git a/driver-sync/src/test/functional/com/mongodb/client/MongoClientTest.java b/driver-sync/src/test/functional/com/mongodb/client/MongoClientTest.java new file mode 100644 index 00000000000..6d3413f032a --- /dev/null +++ b/driver-sync/src/test/functional/com/mongodb/client/MongoClientTest.java @@ -0,0 +1,100 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client; + +import com.mongodb.ClusterFixture; +import com.mongodb.MongoClientSettings; +import com.mongodb.MongoDriverInformation; +import com.mongodb.client.internal.MongoClientImpl; +import com.mongodb.connection.ClusterId; +import com.mongodb.event.ClusterListener; +import com.mongodb.event.ClusterOpeningEvent; +import com.mongodb.internal.connection.ClientMetadata; +import com.mongodb.internal.connection.Cluster; +import com.mongodb.internal.mockito.MongoMockito; +import org.junit.jupiter.api.Test; +import org.mockito.Mockito; + +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; + +import static com.mongodb.client.Fixture.getMongoClientSettingsBuilder; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.mockito.Mockito.doNothing; +import static org.mockito.Mockito.when; + +class MongoClientTest { + + @SuppressWarnings("try") + @Test + void shouldIncludeApplicationNameInClusterId() throws InterruptedException, + ExecutionException, TimeoutException { + CompletableFuture clusterIdFuture = new CompletableFuture<>(); + ClusterListener clusterListener = new ClusterListener() { + @Override + public void clusterOpening(final ClusterOpeningEvent event) { + clusterIdFuture.complete(event.getClusterId()); + } + }; + String applicationName = "test"; + try (MongoClient ignored = MongoClients.create(getMongoClientSettingsBuilder() + .applicationName(applicationName) + .applyToClusterSettings(builder -> builder.addClusterListener(clusterListener)) + .build())) { + ClusterId clusterId = clusterIdFuture.get(ClusterFixture.TIMEOUT, TimeUnit.SECONDS); + assertEquals(applicationName, clusterId.getDescription()); + } + } + + @Test + void shouldCloseExternalResources() throws Exception { + + //given + MongoDriverInformation mongoDriverInformation = MongoDriverInformation.builder().build(); + Cluster cluster = MongoMockito.mock( + Cluster.class, + mockedCluster -> { + doNothing().when(mockedCluster).close(); + when(mockedCluster.getClientMetadata()) + .thenReturn(new ClientMetadata("test", mongoDriverInformation)); + }); + AutoCloseable externalResource = MongoMockito.mock( + AutoCloseable.class, + mockedExternalResource -> { + try { + doNothing().when(mockedExternalResource).close(); + } catch (Exception e) { + throw new RuntimeException(e); + } + }); + + MongoClientImpl mongoClient = new MongoClientImpl( + cluster, + MongoClientSettings.builder().build(), + mongoDriverInformation, + externalResource); + + //when + mongoClient.close(); + + //then + Mockito.verify(externalResource).close(); + Mockito.verify(cluster).close(); + } +} diff --git a/driver-sync/src/test/functional/com/mongodb/client/MongoCollectionTest.java b/driver-sync/src/test/functional/com/mongodb/client/MongoCollectionTest.java new file mode 100644 index 00000000000..896fac88292 --- /dev/null +++ b/driver-sync/src/test/functional/com/mongodb/client/MongoCollectionTest.java @@ -0,0 +1,47 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client; + +import org.junit.jupiter.api.AfterAll; + +import static com.mongodb.client.Fixture.getMongoClientSettingsBuilder; + +public class MongoCollectionTest extends AbstractMongoCollectionTest { + + private static MongoClient mongoClient; + + @Override + protected MongoDatabase getDatabase(final String databaseName) { + return createMongoClient().getDatabase(databaseName); + } + + private MongoClient createMongoClient() { + if (mongoClient == null) { + mongoClient = MongoClients.create(getMongoClientSettingsBuilder().build()); + } + return mongoClient; + } + + + @AfterAll + public static void closeClient() { + if (mongoClient != null) { + mongoClient.close(); + mongoClient = null; + } + } +} diff --git a/driver-sync/src/test/functional/com/mongodb/client/MongoWriteConcernWithResponseExceptionTest.java b/driver-sync/src/test/functional/com/mongodb/client/MongoWriteConcernWithResponseExceptionTest.java new file mode 100644 index 00000000000..6f90b3f5f01 --- /dev/null +++ b/driver-sync/src/test/functional/com/mongodb/client/MongoWriteConcernWithResponseExceptionTest.java @@ -0,0 +1,144 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client; + +import com.mongodb.Function; +import com.mongodb.MongoClientSettings; +import com.mongodb.MongoWriteConcernException; +import com.mongodb.ServerAddress; +import com.mongodb.assertions.Assertions; +import com.mongodb.event.CommandEvent; +import com.mongodb.event.CommandFailedEvent; +import com.mongodb.event.CommandListener; +import com.mongodb.event.CommandSucceededEvent; +import com.mongodb.internal.connection.MongoWriteConcernWithResponseException; +import org.bson.BsonArray; +import org.bson.BsonDocument; +import org.bson.BsonInt32; +import org.bson.BsonString; +import org.bson.Document; +import org.junit.Test; + +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +import static com.mongodb.ClusterFixture.isDiscoverableReplicaSet; +import static com.mongodb.ClusterFixture.serverVersionAtLeast; +import static com.mongodb.client.Fixture.getDefaultDatabaseName; +import static com.mongodb.client.Fixture.getMongoClientSettingsBuilder; +import static java.util.Collections.singletonList; +import static org.junit.Assert.assertThrows; +import static org.junit.Assume.assumeTrue; + +/** + * Tests in this class check that the internal {@link MongoWriteConcernWithResponseException} does not leak from our API. + */ +public final class MongoWriteConcernWithResponseExceptionTest { + /** + * This test is similar to {@link RetryableWritesProseTest#originalErrorMustBePropagatedIfNoWritesPerformed()}. + * The difference is in the assertions, it also verifies situations when `writeConcernError` happens on the first attempt + * and on the last attempt. + */ + @Test + public void doesNotLeak() throws InterruptedException { + doesNotLeak(MongoClients::create); + } + + public static void doesNotLeak(final Function clientCreator) throws InterruptedException { + BsonDocument writeConcernErrorFpDoc = new BsonDocument() + .append("configureFailPoint", new BsonString("failCommand")) + .append("mode", new BsonDocument() + .append("times", new BsonInt32(2))) + .append("data", new BsonDocument() + .append("writeConcernError", new BsonDocument() + .append("code", new BsonInt32(91)) + .append("errorLabels", new BsonArray(Stream.of("RetryableWriteError") + .map(BsonString::new).collect(Collectors.toList()))) + .append("errmsg", new BsonString("")) + ) + .append("failCommands", new BsonArray(singletonList(new BsonString("insert"))))); + BsonDocument noWritesPerformedFpDoc = new BsonDocument() + .append("configureFailPoint", new BsonString("failCommand")) + .append("mode", new BsonDocument() + .append("times", new BsonInt32(1))) + .append("data", new BsonDocument() + .append("failCommands", new BsonArray(singletonList(new BsonString("insert")))) + .append("errorCode", new BsonInt32(10107)) + .append("errorLabels", new BsonArray(Stream.of("RetryableWriteError", "NoWritesPerformed") + .map(BsonString::new).collect(Collectors.toList())))); + doesNotLeak(clientCreator, writeConcernErrorFpDoc, true, noWritesPerformedFpDoc); + doesNotLeak(clientCreator, noWritesPerformedFpDoc, false, writeConcernErrorFpDoc); + } + + @SuppressWarnings("try") + private static void doesNotLeak( + final Function clientCreator, + final BsonDocument firstAttemptFpDoc, + final boolean firstAttemptCommandSucceededEvent, + final BsonDocument lastAttemptFpDoc) throws InterruptedException { + assumeTrue(serverVersionAtLeast(6, 0) && isDiscoverableReplicaSet()); + ServerAddress primaryServerAddress = Fixture.getPrimary(); + CompletableFuture futureFailPointFromListener = new CompletableFuture<>(); + CommandListener commandListener = new CommandListener() { + private final AtomicBoolean configureFailPoint = new AtomicBoolean(true); + + @Override + public void commandSucceeded(final CommandSucceededEvent event) { + if (firstAttemptCommandSucceededEvent) { + enableLastAttemptFp(event); + } + } + + @Override + public void commandFailed(final CommandFailedEvent event) { + if (!firstAttemptCommandSucceededEvent) { + enableLastAttemptFp(event); + } + } + + private void enableLastAttemptFp(final CommandEvent event) { + if (event.getCommandName().equals("insert") && configureFailPoint.compareAndSet(true, false)) { + Assertions.assertTrue(futureFailPointFromListener.complete(FailPoint.enable(lastAttemptFpDoc, primaryServerAddress))); + } + } + }; + try (MongoClient client = clientCreator.apply(getMongoClientSettingsBuilder() + .retryWrites(true) + .addCommandListener(commandListener) + .applyToServerSettings(builder -> builder.heartbeatFrequency(50, TimeUnit.MILLISECONDS)) + .build()); + FailPoint ignored = FailPoint.enable(firstAttemptFpDoc, primaryServerAddress)) { + MongoCollection collection = client.getDatabase(getDefaultDatabaseName()) + .getCollection("originalErrorMustBePropagatedIfNoWritesPerformed"); + collection.drop(); + assertThrows(MongoWriteConcernException.class, () -> { + // We want to see an exception indicating `writeConcernError`, + // but not in the form of `MongoWriteConcernWithResponseException`. + try { + collection.insertOne(new Document()); + } catch (MongoWriteConcernWithResponseException e) { + throw new AssertionError("The internal exception leaked.", e); + } + }); + } finally { + futureFailPointFromListener.thenAccept(FailPoint::close); + } + } +} diff --git a/driver-sync/src/test/functional/com/mongodb/client/Name.java b/driver-sync/src/test/functional/com/mongodb/client/Name.java new file mode 100644 index 00000000000..dfbf6030520 --- /dev/null +++ b/driver-sync/src/test/functional/com/mongodb/client/Name.java @@ -0,0 +1,64 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client; + +import java.util.Objects; + +public class Name { + private final String name; + private final int count; + + public Name(final String name, final int count) { + this.name = name; + this.count = count; + } + + public String getName() { + return name; + } + + public int getCount() { + return count; + } + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + Name name1 = (Name) o; + + if (count != name1.count) { + return false; + } + if (!Objects.equals(name, name1.name)) { + return false; + } + + return true; + } + + @Override + public int hashCode() { + int result = name != null ? name.hashCode() : 0; + result = 31 * result + count; + return result; + } +} diff --git a/driver-sync/src/test/functional/com/mongodb/client/NameCodec.java b/driver-sync/src/test/functional/com/mongodb/client/NameCodec.java new file mode 100644 index 00000000000..71ed7b138fa --- /dev/null +++ b/driver-sync/src/test/functional/com/mongodb/client/NameCodec.java @@ -0,0 +1,65 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client; + +import org.bson.BsonObjectId; +import org.bson.BsonReader; +import org.bson.BsonWriter; +import org.bson.codecs.CollectibleCodec; +import org.bson.codecs.DecoderContext; +import org.bson.codecs.EncoderContext; + +class NameCodec implements CollectibleCodec { + + @Override + public void encode(final BsonWriter writer, final Name n, final EncoderContext encoderContext) { + writer.writeStartDocument(); + writer.writeString("name", n.getName()); + writer.writeInt32("count", n.getCount()); + writer.writeEndDocument(); + } + + @Override + public Name decode(final BsonReader reader, final DecoderContext decoderContext) { + reader.readStartDocument(); + String name = reader.readString("_id"); + int count = (int) reader.readDouble("value"); + + reader.readEndDocument(); + return new Name(name, count); + } + + @Override + public Class getEncoderClass() { + return Name.class; + } + + @Override + public boolean documentHasId(final Name document) { + return false; + } + + @Override + public BsonObjectId getDocumentId(final Name document) { + return null; + } + + @Override + public Name generateIdIfAbsentFromDocument(final Name document) { + return document; + } +} diff --git a/driver-sync/src/test/functional/com/mongodb/client/NameCodecProvider.java b/driver-sync/src/test/functional/com/mongodb/client/NameCodecProvider.java new file mode 100644 index 00000000000..db72aa4c7ab --- /dev/null +++ b/driver-sync/src/test/functional/com/mongodb/client/NameCodecProvider.java @@ -0,0 +1,32 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client; + +import org.bson.codecs.Codec; +import org.bson.codecs.configuration.CodecProvider; +import org.bson.codecs.configuration.CodecRegistry; + +public class NameCodecProvider implements CodecProvider { + @Override + @SuppressWarnings("unchecked") + public Codec get(final Class clazz, final CodecRegistry registry) { + if (clazz.equals(Name.class)) { + return (Codec) new NameCodec(); + } + return null; + } +} diff --git a/driver-sync/src/test/functional/com/mongodb/client/OcspTest.java b/driver-sync/src/test/functional/com/mongodb/client/OcspTest.java new file mode 100644 index 00000000000..bef20ba7702 --- /dev/null +++ b/driver-sync/src/test/functional/com/mongodb/client/OcspTest.java @@ -0,0 +1,51 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client; + +import com.mongodb.MongoTimeoutException; +import org.bson.BsonDocument; +import org.bson.BsonInt32; +import org.junit.Before; +import org.junit.Test; + +import static com.mongodb.ClusterFixture.getOcspShouldSucceed; +import static java.security.Security.getProperty; +import static org.junit.Assert.fail; +import static org.junit.Assume.assumeTrue; + +public class OcspTest { + @Before + public void setUp() { + assumeTrue(canRunTests()); + } + + @Test + public void testTLS() { + String uri = "mongodb://localhost/?serverSelectionTimeoutMS=2000&tls=true"; + try (MongoClient client = MongoClients.create(uri)) { + client.getDatabase("admin").runCommand(new BsonDocument("ping", new BsonInt32(1))); + } catch (MongoTimeoutException e) { + if (getOcspShouldSucceed()) { + fail("Unexpected exception when using OCSP with tls=true: " + e); + } + } + } + + private boolean canRunTests() { + return getProperty("ocsp.enable") != null && getProperty("ocsp.enable").equals("true"); + } +} diff --git a/driver-sync/src/test/functional/com/mongodb/client/ReadConcernTest.java b/driver-sync/src/test/functional/com/mongodb/client/ReadConcernTest.java new file mode 100644 index 00000000000..cea89765756 --- /dev/null +++ b/driver-sync/src/test/functional/com/mongodb/client/ReadConcernTest.java @@ -0,0 +1,71 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client; + +import com.mongodb.ReadConcern; +import com.mongodb.event.CommandStartedEvent; +import com.mongodb.internal.connection.TestCommandListener; +import org.bson.BsonDocument; +import org.bson.BsonString; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.concurrent.TimeUnit; + +import static com.mongodb.client.CommandMonitoringTestHelper.assertEventsEquality; +import static com.mongodb.client.Fixture.getDefaultDatabaseName; +import static com.mongodb.client.Fixture.getMongoClientSettingsBuilder; + +public class ReadConcernTest { + private MongoClient mongoClient; + private TestCommandListener commandListener; + + @Before + public void setUp() { + commandListener = new TestCommandListener(); + mongoClient = MongoClients.create(getMongoClientSettingsBuilder() + .addCommandListener(commandListener) + .applyToSocketSettings(builder -> builder.readTimeout(5, TimeUnit.SECONDS)) + .build()); + } + + @After + public void cleanUp() { + if (mongoClient != null) { + mongoClient.close(); + } + } + + @Test + public void shouldIncludeReadConcernInCommand() { + mongoClient.getDatabase(getDefaultDatabaseName()).getCollection("test") + .withReadConcern(ReadConcern.LOCAL).find().into(new ArrayList<>()); + + List events = commandListener.getCommandStartedEvents(); + + BsonDocument commandDocument = new BsonDocument("find", new BsonString("test")) + .append("readConcern", ReadConcern.LOCAL.asDocument()) + .append("filter", new BsonDocument()); + + assertEventsEquality(Arrays.asList(new CommandStartedEvent(null, 1, 1, null, getDefaultDatabaseName(), + "find", commandDocument)), events); + } +} diff --git a/driver-sync/src/test/functional/com/mongodb/client/RetryableReadsProseTest.java b/driver-sync/src/test/functional/com/mongodb/client/RetryableReadsProseTest.java new file mode 100644 index 00000000000..ccf18aad5b9 --- /dev/null +++ b/driver-sync/src/test/functional/com/mongodb/client/RetryableReadsProseTest.java @@ -0,0 +1,72 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client; + +import org.bson.Document; +import org.junit.jupiter.api.Test; + +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeoutException; + +import static com.mongodb.client.model.Filters.eq; + +/** + * See + * Retryable Reads Tests. + */ +final class RetryableReadsProseTest { + /** + * See + * + * PoolClearedError Retryability Test. + */ + @Test + void poolClearedExceptionMustBeRetryable() throws InterruptedException, ExecutionException, TimeoutException { + RetryableWritesProseTest.poolClearedExceptionMustBeRetryable(MongoClients::create, + mongoCollection -> mongoCollection.find(eq(0)).iterator().hasNext(), "find", false); + } + + /** + * See + * + * Retryable Reads Are Retried on a Different mongos When One is Available. + */ + @Test + void retriesOnDifferentMongosWhenAvailable() { + RetryableWritesProseTest.retriesOnDifferentMongosWhenAvailable(MongoClients::create, + mongoCollection -> { + try (MongoCursor cursor = mongoCollection.find().iterator()) { + return cursor.hasNext(); + } + }, "find", false); + } + + /** + * See + * + * Retryable Reads Are Retried on the Same mongos When No Others are Available. + */ + @Test + void retriesOnSameMongosWhenAnotherNotAvailable() { + RetryableWritesProseTest.retriesOnSameMongosWhenAnotherNotAvailable(MongoClients::create, + mongoCollection -> { + try (MongoCursor cursor = mongoCollection.find().iterator()) { + return cursor.hasNext(); + } + }, "find", false); + } +} diff --git a/driver-sync/src/test/functional/com/mongodb/client/RetryableWritesProseTest.java b/driver-sync/src/test/functional/com/mongodb/client/RetryableWritesProseTest.java new file mode 100644 index 00000000000..fae39864bb9 --- /dev/null +++ b/driver-sync/src/test/functional/com/mongodb/client/RetryableWritesProseTest.java @@ -0,0 +1,355 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client; + +import com.mongodb.ConnectionString; +import com.mongodb.Function; +import com.mongodb.MongoClientSettings; +import com.mongodb.MongoServerException; +import com.mongodb.MongoWriteConcernException; +import com.mongodb.ServerAddress; +import com.mongodb.assertions.Assertions; +import com.mongodb.connection.ClusterConnectionMode; +import com.mongodb.connection.ConnectionDescription; +import com.mongodb.event.CommandEvent; +import com.mongodb.event.CommandFailedEvent; +import com.mongodb.event.CommandListener; +import com.mongodb.event.CommandSucceededEvent; +import com.mongodb.event.ConnectionCheckOutFailedEvent; +import com.mongodb.event.ConnectionCheckedOutEvent; +import com.mongodb.event.ConnectionPoolClearedEvent; +import com.mongodb.internal.connection.ServerAddressHelper; +import com.mongodb.internal.connection.TestCommandListener; +import com.mongodb.internal.connection.TestConnectionPoolListener; +import org.bson.BsonArray; +import org.bson.BsonBoolean; +import org.bson.BsonDocument; +import org.bson.BsonInt32; +import org.bson.BsonString; +import org.bson.Document; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; + +import java.util.HashSet; +import java.util.List; +import java.util.Set; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.Future; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +import static com.mongodb.ClusterFixture.getConnectionString; +import static com.mongodb.ClusterFixture.getMultiMongosConnectionString; +import static com.mongodb.ClusterFixture.isDiscoverableReplicaSet; +import static com.mongodb.ClusterFixture.isSharded; +import static com.mongodb.ClusterFixture.isStandalone; +import static com.mongodb.ClusterFixture.serverVersionAtLeast; +import static com.mongodb.client.Fixture.getDefaultDatabaseName; +import static com.mongodb.client.Fixture.getMongoClientSettingsBuilder; +import static com.mongodb.client.Fixture.getMultiMongosMongoClientSettingsBuilder; +import static java.util.Arrays.asList; +import static java.util.Collections.emptyList; +import static java.util.Collections.singletonList; +import static java.util.concurrent.TimeUnit.SECONDS; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertInstanceOf; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assumptions.assumeTrue; + +/** + * See + * Retryable Write Prose Tests. + */ +public class RetryableWritesProseTest extends DatabaseTestCase { + + @BeforeEach + @Override + public void setUp() { + super.setUp(); + } + + /** + * Prose test #2. + */ + @Test + public void poolClearedExceptionMustBeRetryable() throws InterruptedException, ExecutionException, TimeoutException { + poolClearedExceptionMustBeRetryable(MongoClients::create, + mongoCollection -> mongoCollection.insertOne(new Document()), "insert", true); + } + + @SuppressWarnings("try") + public static void poolClearedExceptionMustBeRetryable( + final Function clientCreator, + final Function, R> operation, final String operationName, final boolean write) + throws InterruptedException, ExecutionException, TimeoutException { + assumeTrue(serverVersionAtLeast(4, 3) && !(write && isStandalone())); + TestConnectionPoolListener connectionPoolListener = new TestConnectionPoolListener(asList( + "connectionCheckedOutEvent", + "poolClearedEvent", + "connectionCheckOutFailedEvent")); + TestCommandListener commandListener = new TestCommandListener( + singletonList("commandStartedEvent"), asList("configureFailPoint", "drop")); + MongoClientSettings clientSettings = getMongoClientSettingsBuilder() + .applyToConnectionPoolSettings(builder -> builder + .maxSize(1) + .addConnectionPoolListener(connectionPoolListener)) + .applyToServerSettings(builder -> builder + /* We fake server's state by configuring a fail point. This breaks the mechanism of the + * streaming server monitoring protocol + * (https://github.com/mongodb/specifications/blob/master/source/server-discovery-and-monitoring/server-monitoring.md#streaming-protocol) + * that allows the server to determine whether or not it needs to send a new state to the client. + * As a result, the client has to wait for at least its heartbeat delay until it hears back from a server + * (while it waits for a response, calling `ServerMonitor.connect` has no effect). + * Thus, we want to use small heartbeat delay to reduce delays in the test. */ + .heartbeatFrequency(50, TimeUnit.MILLISECONDS)) + .retryReads(true) + .retryWrites(true) + .addCommandListener(commandListener) + .build(); + BsonDocument configureFailPoint = new BsonDocument() + .append("configureFailPoint", new BsonString("failCommand")) + .append("mode", new BsonDocument() + .append("times", new BsonInt32(1))) + .append("data", new BsonDocument() + .append("failCommands", new BsonArray(singletonList(new BsonString(operationName)))) + .append("errorCode", new BsonInt32(91)) + .append("errorLabels", write + ? new BsonArray(singletonList(new BsonString("RetryableWriteError"))) + : new BsonArray()) + .append("blockConnection", BsonBoolean.valueOf(true)) + .append("blockTimeMS", new BsonInt32(1000))); + int timeoutSeconds = 5; + try (MongoClient client = clientCreator.apply(clientSettings); + FailPoint ignored = FailPoint.enable(configureFailPoint, Fixture.getPrimary())) { + MongoCollection collection = client.getDatabase(getDefaultDatabaseName()) + .getCollection("poolClearedExceptionMustBeRetryable"); + collection.drop(); + ExecutorService ex = Executors.newFixedThreadPool(2); + try { + Future result1 = ex.submit(() -> operation.apply(collection)); + Future result2 = ex.submit(() -> operation.apply(collection)); + connectionPoolListener.waitForEvent(ConnectionCheckedOutEvent.class, 1, timeoutSeconds, SECONDS); + connectionPoolListener.waitForEvent(ConnectionPoolClearedEvent.class, 1, timeoutSeconds, SECONDS); + connectionPoolListener.waitForEvent(ConnectionCheckOutFailedEvent.class, 1, timeoutSeconds, SECONDS); + result1.get(timeoutSeconds, SECONDS); + result2.get(timeoutSeconds, SECONDS); + } finally { + ex.shutdownNow(); + } + assertEquals(3, commandListener.getCommandStartedEvents().size()); + commandListener.getCommandStartedEvents().forEach(event -> assertEquals(operationName, event.getCommandName())); + } + } + + /** + * Prose test #3. + */ + @Test + public void originalErrorMustBePropagatedIfNoWritesPerformed() throws InterruptedException { + originalErrorMustBePropagatedIfNoWritesPerformed(MongoClients::create); + } + + @SuppressWarnings("try") + public static void originalErrorMustBePropagatedIfNoWritesPerformed( + final Function clientCreator) throws InterruptedException { + assumeTrue(serverVersionAtLeast(6, 0) && isDiscoverableReplicaSet()); + ServerAddress primaryServerAddress = Fixture.getPrimary(); + CompletableFuture futureFailPointFromListener = new CompletableFuture<>(); + CommandListener commandListener = new CommandListener() { + private final AtomicBoolean configureFailPoint = new AtomicBoolean(true); + + @Override + public void commandSucceeded(final CommandSucceededEvent event) { + if (event.getCommandName().equals("insert") + && event.getResponse().getDocument("writeConcernError", new BsonDocument()) + .getInt32("code", new BsonInt32(-1)).intValue() == 91 + && configureFailPoint.compareAndSet(true, false)) { + Assertions.assertTrue(futureFailPointFromListener.complete(FailPoint.enable( + new BsonDocument() + .append("configureFailPoint", new BsonString("failCommand")) + .append("mode", new BsonDocument() + .append("times", new BsonInt32(1))) + .append("data", new BsonDocument() + .append("failCommands", new BsonArray(singletonList(new BsonString("insert")))) + .append("errorCode", new BsonInt32(10107)) + .append("errorLabels", new BsonArray(Stream.of("RetryableWriteError", "NoWritesPerformed") + .map(BsonString::new).collect(Collectors.toList())))), + primaryServerAddress + ))); + } + } + }; + BsonDocument failPointDocument = new BsonDocument() + .append("configureFailPoint", new BsonString("failCommand")) + .append("mode", new BsonDocument() + .append("times", new BsonInt32(1))) + .append("data", new BsonDocument() + .append("writeConcernError", new BsonDocument() + .append("code", new BsonInt32(91)) + .append("errorLabels", new BsonArray(Stream.of("RetryableWriteError") + .map(BsonString::new).collect(Collectors.toList()))) + .append("errmsg", new BsonString("")) + ) + .append("failCommands", new BsonArray(singletonList(new BsonString("insert"))))); + try (MongoClient client = clientCreator.apply(getMongoClientSettingsBuilder() + .retryWrites(true) + .addCommandListener(commandListener) + .applyToServerSettings(builder -> + // see `poolClearedExceptionMustBeRetryable` for the explanation + builder.heartbeatFrequency(50, TimeUnit.MILLISECONDS)) + .build()); + FailPoint ignored = FailPoint.enable(failPointDocument, primaryServerAddress)) { + MongoCollection collection = client.getDatabase(getDefaultDatabaseName()) + .getCollection("originalErrorMustBePropagatedIfNoWritesPerformed"); + collection.drop(); + MongoWriteConcernException e = assertThrows(MongoWriteConcernException.class, () -> collection.insertOne(new Document())); + assertEquals(91, e.getCode()); + } finally { + futureFailPointFromListener.thenAccept(FailPoint::close); + } + } + + /** + * Prose test #4. + */ + @Test + public void retriesOnDifferentMongosWhenAvailable() { + retriesOnDifferentMongosWhenAvailable(MongoClients::create, + mongoCollection -> mongoCollection.insertOne(new Document()), "insert", true); + } + + @SuppressWarnings("try") + public static void retriesOnDifferentMongosWhenAvailable( + final Function clientCreator, + final Function, R> operation, final String operationName, final boolean write) { + if (write) { + assumeTrue(serverVersionAtLeast(4, 4)); + } + assumeTrue(isSharded()); + ConnectionString connectionString = getMultiMongosConnectionString(); + assumeTrue(connectionString != null); + ServerAddress s0Address = ServerAddressHelper.createServerAddress(connectionString.getHosts().get(0)); + ServerAddress s1Address = ServerAddressHelper.createServerAddress(connectionString.getHosts().get(1)); + BsonDocument failPointDocument = BsonDocument.parse( + "{\n" + + " configureFailPoint: \"failCommand\",\n" + + " mode: { times: 1 },\n" + + " data: {\n" + + " failCommands: [\"" + operationName + "\"],\n" + + (write + ? " errorLabels: [\"RetryableWriteError\"]," : "") + + " errorCode: 6\n" + + " }\n" + + "}\n"); + TestCommandListener commandListener = new TestCommandListener(singletonList("commandFailedEvent"), emptyList()); + try (FailPoint s0FailPoint = FailPoint.enable(failPointDocument, s0Address); + FailPoint s1FailPoint = FailPoint.enable(failPointDocument, s1Address); + MongoClient client = clientCreator.apply(getMultiMongosMongoClientSettingsBuilder() + .retryReads(true) + .retryWrites(true) + .addCommandListener(commandListener) + // explicitly specify only s0 and s1, in case `getMultiMongosMongoClientSettingsBuilder` has more + .applyToClusterSettings(builder -> builder.hosts(asList(s0Address, s1Address))) + .build())) { + MongoCollection collection = client.getDatabase(getDefaultDatabaseName()) + .getCollection("retriesOnDifferentMongosWhenAvailable"); + collection.drop(); + commandListener.reset(); + assertThrows(MongoServerException.class, () -> operation.apply(collection)); + List failedCommandEvents = commandListener.getEvents(); + assertEquals(2, failedCommandEvents.size(), failedCommandEvents::toString); + List unexpectedCommandNames = failedCommandEvents.stream() + .map(CommandEvent::getCommandName) + .filter(commandName -> !commandName.equals(operationName)) + .collect(Collectors.toList()); + assertTrue(unexpectedCommandNames.isEmpty(), unexpectedCommandNames::toString); + Set failedServerAddresses = failedCommandEvents.stream() + .map(CommandEvent::getConnectionDescription) + .map(ConnectionDescription::getServerAddress) + .collect(Collectors.toSet()); + assertEquals(new HashSet<>(asList(s0Address, s1Address)), failedServerAddresses); + } + } + + /** + * Prose test #5. + */ + @Test + public void retriesOnSameMongosWhenAnotherNotAvailable() { + retriesOnSameMongosWhenAnotherNotAvailable(MongoClients::create, + mongoCollection -> mongoCollection.insertOne(new Document()), "insert", true); + } + + @SuppressWarnings("try") + public static void retriesOnSameMongosWhenAnotherNotAvailable( + final Function clientCreator, + final Function, R> operation, final String operationName, final boolean write) { + if (write) { + assumeTrue(serverVersionAtLeast(4, 4)); + } + assumeTrue(isSharded()); + ConnectionString connectionString = getConnectionString(); + ServerAddress s0Address = ServerAddressHelper.createServerAddress(connectionString.getHosts().get(0)); + BsonDocument failPointDocument = BsonDocument.parse( + "{\n" + + " configureFailPoint: \"failCommand\",\n" + + " mode: { times: 1 },\n" + + " data: {\n" + + " failCommands: [\"" + operationName + "\"],\n" + + (write + ? " errorLabels: [\"RetryableWriteError\"]," : "") + + " errorCode: 6\n" + + " }\n" + + "}\n"); + TestCommandListener commandListener = new TestCommandListener( + asList("commandFailedEvent", "commandSucceededEvent"), emptyList()); + try (FailPoint s0FailPoint = FailPoint.enable(failPointDocument, s0Address); + MongoClient client = clientCreator.apply(getMongoClientSettingsBuilder() + .retryReads(true) + .retryWrites(true) + .addCommandListener(commandListener) + // explicitly specify only s0, in case `getMongoClientSettingsBuilder` has more + .applyToClusterSettings(builder -> builder + .hosts(singletonList(s0Address)) + .mode(ClusterConnectionMode.MULTIPLE)) + .build())) { + MongoCollection collection = client.getDatabase(getDefaultDatabaseName()) + .getCollection("retriesOnSameMongosWhenAnotherNotAvailable"); + collection.drop(); + commandListener.reset(); + operation.apply(collection); + List commandEvents = commandListener.getEvents(); + assertEquals(2, commandEvents.size(), commandEvents::toString); + List unexpectedCommandNames = commandEvents.stream() + .map(CommandEvent::getCommandName) + .filter(commandName -> !commandName.equals(operationName)) + .collect(Collectors.toList()); + assertTrue(unexpectedCommandNames.isEmpty(), unexpectedCommandNames::toString); + assertInstanceOf(CommandFailedEvent.class, commandEvents.get(0), commandEvents::toString); + assertEquals(s0Address, commandEvents.get(0).getConnectionDescription().getServerAddress(), commandEvents::toString); + assertInstanceOf(CommandSucceededEvent.class, commandEvents.get(1), commandEvents::toString); + assertEquals(s0Address, commandEvents.get(1).getConnectionDescription().getServerAddress(), commandEvents::toString); + } + } +} diff --git a/driver-sync/src/test/functional/com/mongodb/client/ServerDiscoveryAndMonitoringProseTests.java b/driver-sync/src/test/functional/com/mongodb/client/ServerDiscoveryAndMonitoringProseTests.java new file mode 100644 index 00000000000..18b3b3f4fc5 --- /dev/null +++ b/driver-sync/src/test/functional/com/mongodb/client/ServerDiscoveryAndMonitoringProseTests.java @@ -0,0 +1,332 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client; + +import com.mongodb.ClusterFixture; +import com.mongodb.MongoClientSettings; +import com.mongodb.event.ConnectionPoolClearedEvent; +import com.mongodb.event.ConnectionPoolListener; +import com.mongodb.event.ConnectionPoolReadyEvent; +import com.mongodb.event.ServerDescriptionChangedEvent; +import com.mongodb.event.ServerHeartbeatFailedEvent; +import com.mongodb.event.ServerHeartbeatSucceededEvent; +import com.mongodb.event.ServerListener; +import com.mongodb.event.ServerMonitorListener; +import com.mongodb.internal.diagnostics.logging.Logger; +import com.mongodb.internal.diagnostics.logging.Loggers; +import com.mongodb.internal.time.TimePointTest; +import com.mongodb.internal.time.Timeout; +import com.mongodb.lang.Nullable; +import org.bson.BsonArray; +import org.bson.BsonDocument; +import org.bson.BsonInt32; +import org.bson.BsonNull; +import org.bson.BsonString; +import org.bson.Document; +import org.junit.Ignore; +import org.junit.Test; + +import java.util.ArrayList; +import java.util.HashSet; +import java.util.List; +import java.util.Optional; +import java.util.Set; +import java.util.concurrent.BlockingQueue; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.LinkedBlockingQueue; + +import static com.mongodb.ClusterFixture.configureFailPoint; +import static com.mongodb.ClusterFixture.disableFailPoint; +import static com.mongodb.ClusterFixture.isStandalone; +import static com.mongodb.ClusterFixture.serverVersionAtLeast; +import static com.mongodb.client.Fixture.getDefaultDatabaseName; +import static com.mongodb.client.Fixture.getMongoClientSettingsBuilder; +import static com.mongodb.internal.thread.InterruptionUtil.interruptAndCreateMongoInterruptedException; +import static com.mongodb.internal.time.Timeout.ZeroSemantics.ZERO_DURATION_MEANS_EXPIRED; +import static java.lang.String.format; +import static java.util.Arrays.asList; +import static java.util.Collections.singleton; +import static java.util.Collections.synchronizedList; +import static java.util.concurrent.TimeUnit.MILLISECONDS; +import static java.util.concurrent.TimeUnit.NANOSECONDS; +import static java.util.concurrent.TimeUnit.SECONDS; +import static org.bson.BsonDocument.parse; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; +import static org.junit.Assume.assumeTrue; + +/** + * See + * Server Discovery And Monitoring—Test Plan + * and + * Prose Tests. + */ +public class ServerDiscoveryAndMonitoringProseTests { + private static final Logger LOGGER = Loggers.getLogger(ServerDiscoveryAndMonitoringProseTests.class.getSimpleName()); + private static final long TEST_WAIT_TIMEOUT_MILLIS = SECONDS.toMillis(5); + + static final String HELLO = "hello"; + static final String LEGACY_HELLO = "isMaster"; + + @Test + @SuppressWarnings("try") + public void testHeartbeatFrequency() throws InterruptedException { + CountDownLatch latch = new CountDownLatch(5); + MongoClientSettings settings = getMongoClientSettingsBuilder() + .applyToServerSettings(builder -> { + builder.heartbeatFrequency(50, MILLISECONDS); + builder.addServerMonitorListener(new ServerMonitorListener() { + @Override + public void serverHeartbeatSucceeded(final ServerHeartbeatSucceededEvent event) { + latch.countDown(); + } + }); + }).build(); + + try (MongoClient ignored = MongoClients.create(settings)) { + assertTrue("Took longer than expected to reach expected number of hearbeats", + latch.await(500, MILLISECONDS)); + } + } + + @Test + public void testRTTUpdates() throws InterruptedException { + assumeTrue(isStandalone()); + assumeTrue(serverVersionAtLeast(4, 4)); + + List events = synchronizedList(new ArrayList<>()); + MongoClientSettings settings = getMongoClientSettingsBuilder() + .applicationName("streamingRttTest") + .applyToServerSettings(builder -> { + builder.heartbeatFrequency(50, MILLISECONDS); + builder.addServerListener(new ServerListener() { + @Override + public void serverDescriptionChanged(final ServerDescriptionChangedEvent event) { + events.add(event); + } + }); + }).build(); + try (MongoClient client = MongoClients.create(settings)) { + client.getDatabase("admin").runCommand(new Document("ping", 1)); + Thread.sleep(250); + assertTrue(events.size() >= 1); + events.forEach(event -> + assertTrue(event.getNewDescription().getRoundTripTimeNanos() > 0)); + + configureFailPoint(parse(format("{" + + "configureFailPoint: \"failCommand\"," + + "mode: {times: 1000}," + + " data: {" + + " failCommands: [\"%s\", \"%s\"]," + + " blockConnection: true," + + " blockTimeMS: 100," + + " appName: \"streamingRttTest\"" + + " }" + + "}", LEGACY_HELLO, HELLO))); + + long startTime = System.currentTimeMillis(); + while (true) { + long rttMillis = NANOSECONDS.toMillis(client.getClusterDescription().getServerDescriptions().get(0) + .getRoundTripTimeNanos()); + if (rttMillis > 50) { + break; + } + assertFalse(System.currentTimeMillis() - startTime > 1000); + //noinspection BusyWait + Thread.sleep(50); + } + + } finally { + disableFailPoint("failCommand"); + } + } + + /** + * See + * Connection Pool Management. + */ + @Test + @Ignore("JAVA-4484 - events are not guaranteed to be delivered in order") + @SuppressWarnings("try") + public void testConnectionPoolManagement() throws InterruptedException { + assumeTrue(serverVersionAtLeast(4, 3)); + BlockingQueue events = new LinkedBlockingQueue<>(); + ServerMonitorListener serverMonitorListener = new ServerMonitorListener() { + @Override + public void serverHeartbeatSucceeded(final ServerHeartbeatSucceededEvent event) { + put(events, event); + } + + @Override + public void serverHeartbeatFailed(final ServerHeartbeatFailedEvent event) { + put(events, event); + } + }; + ConnectionPoolListener connectionPoolListener = new ConnectionPoolListener() { + @Override + public void connectionPoolReady(final ConnectionPoolReadyEvent event) { + put(events, event); + } + + @Override + public void connectionPoolCleared(final ConnectionPoolClearedEvent event) { + put(events, event); + } + }; + String appName = "SDAMPoolManagementTest"; + MongoClientSettings clientSettings = getMongoClientSettingsBuilder() + .applicationName(appName) + .applyToClusterSettings(ClusterFixture::setDirectConnection) + .applyToServerSettings(builder -> builder + .heartbeatFrequency(100, MILLISECONDS) + .addServerMonitorListener(serverMonitorListener)) + .applyToConnectionPoolSettings(builder -> builder + .addConnectionPoolListener(connectionPoolListener)) + .build(); + try (MongoClient unused = MongoClients.create(clientSettings)) { + /* Note that ServerHeartbeatSucceededEvent type is sometimes allowed but never required. + * This is because DefaultServerMonitor does not send such events in situations when a server check happens as part + * of a connection handshake. */ + assertPoll(events, ServerHeartbeatSucceededEvent.class, singleton(ConnectionPoolReadyEvent.class)); + configureFailPoint(new BsonDocument() + .append("configureFailPoint", new BsonString("failCommand")) + .append("mode", new BsonDocument() + .append("times", new BsonInt32(2))) + .append("data", new BsonDocument() + .append("failCommands", new BsonArray(asList(new BsonString("isMaster"), new BsonString("hello")))) + .append("errorCode", new BsonInt32(1234)) + .append("appName", new BsonString(appName)))); + assertPoll(events, ServerHeartbeatSucceededEvent.class, + new HashSet<>(asList(ServerHeartbeatFailedEvent.class, ConnectionPoolClearedEvent.class))); + assertPoll(events, null, new HashSet<>(asList(ServerHeartbeatSucceededEvent.class, ConnectionPoolReadyEvent.class))); + } finally { + disableFailPoint("failCommand"); + } + } + + /** + * See + * + * Monitors sleep at least minHeartbeatFreqencyMS between checks. + */ + @Test + @SuppressWarnings("try") + public void monitorsSleepAtLeastMinHeartbeatFrequencyMSBetweenChecks() { + assumeTrue(serverVersionAtLeast(4, 3)); + long defaultMinHeartbeatIntervalMillis = MongoClientSettings.builder().build().getServerSettings() + .getMinHeartbeatFrequency(MILLISECONDS); + assertEquals(500, defaultMinHeartbeatIntervalMillis); + String appName = "SDAMMinHeartbeatFrequencyTest"; + MongoClientSettings clientSettings = getMongoClientSettingsBuilder() + .applicationName(appName) + .applyToClusterSettings(ClusterFixture::setDirectConnection) + .applyToClusterSettings(builder -> builder + .serverSelectionTimeout(5000, MILLISECONDS)) + /* We have to set the default value explicitly because `getMongoClientSettingsBuilder` sets the internal to + * a smaller value to make tests more responsive. */ + .applyToServerSettings(builder -> builder.minHeartbeatFrequency(defaultMinHeartbeatIntervalMillis, MILLISECONDS)) + .build(); + BsonDocument configureFailPoint = new BsonDocument() + .append("configureFailPoint", new BsonString("failCommand")) + .append("mode", new BsonDocument() + .append("times", new BsonInt32(5))) + .append("data", new BsonDocument() + .append("failCommands", new BsonArray(asList(new BsonString("hello"), new BsonString("isMaster")))) + .append("errorCode", new BsonInt32(1234)) + .append("appName", new BsonString(appName))); + try (FailPoint ignored = FailPoint.enable(configureFailPoint, clientSettings.getClusterSettings().getHosts().get(0)); + MongoClient client = MongoClients.create(clientSettings)) { + long startNanos = System.nanoTime(); + client.getDatabase(getDefaultDatabaseName()).runCommand(new BsonDocument("ping", BsonNull.VALUE)); + long durationMillis = NANOSECONDS.toMillis(System.nanoTime() - startNanos); + String msg = durationMillis + " ms"; + assertTrue(msg, durationMillis >= 2000); + assertTrue(msg, durationMillis <= 3500); + } + } + + @Test + @Ignore("Run as part of DefaultServerMonitorTest") + public void shouldEmitHeartbeatStartedBeforeSocketIsConnected() { + // The implementation of this test is in DefaultServerMonitorTest.shouldEmitHeartbeatStartedBeforeSocketIsConnected + // As it requires mocking and package access to `com.mongodb.internal.connection` + } + + private static void assertPoll(final BlockingQueue queue, @Nullable final Class allowed, final Set> required) + throws InterruptedException { + assertPoll(queue, allowed, required, Timeout.expiresIn(TEST_WAIT_TIMEOUT_MILLIS, MILLISECONDS, ZERO_DURATION_MEANS_EXPIRED)); + } + + private static void assertPoll(final BlockingQueue queue, @Nullable final Class allowed, final Set> required, + final Timeout timeout) throws InterruptedException { + Set> encountered = new HashSet<>(); + while (true) { + Object element = poll(queue, timeout); + if (element != null) { + if (LOGGER.isInfoEnabled()) { + LOGGER.info("Polled " + element); + } + Class elementClass = element.getClass(); + if (findAssignable(elementClass, required) + .map(found -> { + encountered.add(found); + return encountered.equals(required); + }).orElseGet(() -> { + assertTrue(format("allowed %s, required %s, actual %s", allowed, required, elementClass), + allowed != null && allowed.isAssignableFrom(elementClass)); + return false; + })) { + return; + } + } + if (TimePointTest.hasExpired(timeout)) { + fail(format("encountered %s, required %s", encountered, required)); + } + } + } + + @Nullable + private static Object poll(final BlockingQueue queue, final Timeout timeout) throws InterruptedException { + long remainingNs = timeout.call(NANOSECONDS, + () -> -1L, + (ns) -> ns, + () -> 0L); + Object element; + if (remainingNs == -1) { + element = queue.take(); + } else if (remainingNs == 0) { + element = queue.poll(); + } else { + element = queue.poll(remainingNs, NANOSECONDS); + } + return element; + } + + private static Optional> findAssignable(final Class from, final Set> toAnyOf) { + return toAnyOf.stream().filter(to -> to.isAssignableFrom(from)).findAny(); + } + + private static void put(final BlockingQueue q, final E e) { + try { + q.put(e); + } catch (InterruptedException t) { + throw interruptAndCreateMongoInterruptedException(null, t); + } + } +} diff --git a/driver-sync/src/test/functional/com/mongodb/client/ServerSelectionProseTest.java b/driver-sync/src/test/functional/com/mongodb/client/ServerSelectionProseTest.java new file mode 100644 index 00000000000..d54eca0d1c6 --- /dev/null +++ b/driver-sync/src/test/functional/com/mongodb/client/ServerSelectionProseTest.java @@ -0,0 +1,24 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.client; + +import com.mongodb.MongoClientSettings; + +final class ServerSelectionProseTest extends AbstractServerSelectionProseTest { + protected MongoClient createClient(final MongoClientSettings settings) { + return MongoClients.create(settings); + } +} diff --git a/driver-sync/src/test/functional/com/mongodb/client/SessionsProseTest.java b/driver-sync/src/test/functional/com/mongodb/client/SessionsProseTest.java new file mode 100644 index 00000000000..2e7d5a96b5a --- /dev/null +++ b/driver-sync/src/test/functional/com/mongodb/client/SessionsProseTest.java @@ -0,0 +1,26 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client; + +import com.mongodb.MongoClientSettings; + +public class SessionsProseTest extends AbstractSessionsProseTest { + @Override + protected MongoClient getMongoClient(final MongoClientSettings settings) { + return MongoClients.create(settings); + } +} diff --git a/driver-sync/src/test/functional/com/mongodb/client/Socks5ProseTest.java b/driver-sync/src/test/functional/com/mongodb/client/Socks5ProseTest.java new file mode 100644 index 00000000000..20e3a35534d --- /dev/null +++ b/driver-sync/src/test/functional/com/mongodb/client/Socks5ProseTest.java @@ -0,0 +1,199 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.client; + +import com.mongodb.ConnectionString; +import com.mongodb.MongoClientSettings; +import com.mongodb.MongoSocketOpenException; +import com.mongodb.MongoTimeoutException; +import com.mongodb.connection.ClusterDescription; +import com.mongodb.connection.ServerDescription; +import com.mongodb.event.ClusterDescriptionChangedEvent; +import com.mongodb.event.ClusterListener; +import org.bson.Document; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.condition.DisabledIf; +import org.junit.jupiter.api.condition.EnabledIf; +import org.junit.jupiter.api.extension.ConditionEvaluationResult; +import org.junit.jupiter.api.extension.ExecutionCondition; +import org.junit.jupiter.api.extension.ExtendWith; +import org.junit.jupiter.api.extension.ExtensionContext; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.MethodSource; +import org.mockito.ArgumentCaptor; +import org.mockito.Mockito; + +import java.util.List; +import java.util.Objects; +import java.util.concurrent.TimeUnit; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +import static java.lang.String.format; +import static org.junit.jupiter.api.Assumptions.assumeFalse; +import static org.mockito.Mockito.atLeast; + +/** + * See https://github.com/mongodb/specifications/blob/master/source/socks5-support/tests/README.md#prose-tests + */ +@ExtendWith(Socks5ProseTest.SocksProxyPropertyCondition.class) +class Socks5ProseTest { + private static final String MONGO_REPLICA_SET_URI_PREFIX = System.getProperty("org.mongodb.test.uri"); + private static final String MONGO_SINGLE_MAPPED_URI_PREFIX = System.getProperty("org.mongodb.test.uri.singleHost"); + private static final int PROXY_PORT = Integer.parseInt(System.getProperty("org.mongodb.test.uri.proxyPort")); + private MongoClient mongoClient; + + @AfterEach + void tearDown() { + if (mongoClient != null) { + mongoClient.close(); + } + } + + static Stream noAuthConnectionStrings() { + return Stream.of(buildConnectionString(MONGO_SINGLE_MAPPED_URI_PREFIX, "proxyHost=localhost&proxyPort=%d&directConnection=true"), + buildConnectionString(MONGO_REPLICA_SET_URI_PREFIX, "proxyHost=localhost&proxyPort=%d")); + } + + static Stream invalidAuthConnectionStrings() { + return Stream.of(buildConnectionString(MONGO_SINGLE_MAPPED_URI_PREFIX, + "proxyHost=localhost&proxyPort=%d&proxyUsername=nonexistentuser&proxyPassword=badauth&directConnection=true"), + buildConnectionString(MONGO_REPLICA_SET_URI_PREFIX, + "proxyHost=localhost&proxyPort=%d&proxyUsername=nonexistentuser&proxyPassword=badauth")); + } + + static Stream validAuthConnectionStrings() { + return Stream.of(buildConnectionString(MONGO_SINGLE_MAPPED_URI_PREFIX, + "proxyHost=localhost&proxyPort=%d&proxyUsername=username&proxyPassword=p4ssw0rd&directConnection=true"), + buildConnectionString(MONGO_REPLICA_SET_URI_PREFIX, + "proxyHost=localhost&proxyPort=%d&proxyUsername=username&proxyPassword=p4ssw0rd")); + } + + @ParameterizedTest(name = "Should connect without authentication in connection string. ConnectionString: {0}") + @MethodSource({"noAuthConnectionStrings", "invalidAuthConnectionStrings"}) + @DisabledIf("isAuthEnabled") + void shouldConnectWithoutAuth(final ConnectionString connectionString) { + mongoClient = MongoClients.create(connectionString); + runHelloCommand(mongoClient); + } + + @ParameterizedTest(name = "Should connect without authentication in proxy settings. ConnectionString: {0}") + @MethodSource({"noAuthConnectionStrings", "invalidAuthConnectionStrings"}) + @DisabledIf("isAuthEnabled") + void shouldConnectWithoutAuthInProxySettings(final ConnectionString connectionString) { + mongoClient = MongoClients.create(buildMongoClientSettings(connectionString)); + runHelloCommand(mongoClient); + } + + @ParameterizedTest(name = "Should not connect without valid authentication in connection string. ConnectionString: {0}") + @MethodSource({"noAuthConnectionStrings", "invalidAuthConnectionStrings"}) + @EnabledIf("isAuthEnabled") + void shouldNotConnectWithoutAuth(final ConnectionString connectionString) { + ClusterListener clusterListener = Mockito.mock(ClusterListener.class); + + mongoClient = createMongoClient(MongoClientSettings.builder() + .applyConnectionString(connectionString), clusterListener); + + Assertions.assertThrows(MongoTimeoutException.class, () -> runHelloCommand(mongoClient)); + assertSocksAuthenticationIssue(clusterListener); + } + + @ParameterizedTest(name = "Should not connect without valid authentication in proxy settings. ConnectionString: {0}") + @MethodSource({"noAuthConnectionStrings", "invalidAuthConnectionStrings"}) + @EnabledIf("isAuthEnabled") + void shouldNotConnectWithoutAuthInProxySettings(final ConnectionString connectionString) { + ClusterListener clusterListener = Mockito.mock(ClusterListener.class); + + mongoClient = createMongoClient(MongoClientSettings.builder(buildMongoClientSettings(connectionString)), clusterListener); + + Assertions.assertThrows(MongoTimeoutException.class, () -> runHelloCommand(mongoClient)); + assertSocksAuthenticationIssue(clusterListener); + } + + @ParameterizedTest(name = "Should connect with valid authentication in connection string. ConnectionString: {0}") + @MethodSource("validAuthConnectionStrings") + @EnabledIf("isAuthEnabled") + void shouldConnectWithValidAuth(final ConnectionString connectionString) { + mongoClient = MongoClients.create(connectionString); + runHelloCommand(mongoClient); + } + + @ParameterizedTest(name = "Should connect with valid authentication in proxy settings. ConnectionString: {0}") + @MethodSource("validAuthConnectionStrings") + @EnabledIf("isAuthEnabled") + void shouldConnectWithValidAuthInProxySettings(final ConnectionString connectionString) { + mongoClient = MongoClients.create(buildMongoClientSettings(connectionString)); + runHelloCommand(mongoClient); + } + + private static void assertSocksAuthenticationIssue(final ClusterListener clusterListener) { + final ArgumentCaptor captor = ArgumentCaptor.forClass(ClusterDescriptionChangedEvent.class); + Mockito.verify(clusterListener, atLeast(1)).clusterDescriptionChanged(captor.capture()); + List errors = captor.getAllValues().stream() + .map(ClusterDescriptionChangedEvent::getNewDescription) + .map(ClusterDescription::getServerDescriptions) + .flatMap(List::stream) + .map(ServerDescription::getException) + .filter(Objects::nonNull) + .collect(Collectors.toList()); + assumeFalse(errors.isEmpty()); + errors.forEach(throwable -> Assertions.assertEquals(MongoSocketOpenException.class, throwable.getClass())); + } + + private static void runHelloCommand(final MongoClient mongoClient) { + mongoClient.getDatabase("test").runCommand(new Document("hello", 1)); + } + + private static ConnectionString buildConnectionString(final String uriPrefix, final String uriParameters) { + String format; + if (uriPrefix.contains("/?")) { + format = uriPrefix + "&" + uriParameters; + } else { + format = uriPrefix + "/?" + uriParameters; + } + return new ConnectionString(format(format, PROXY_PORT)); + } + + private static MongoClientSettings buildMongoClientSettings(final ConnectionString connectionString) { + return MongoClientSettings.builder().applyConnectionString(connectionString).build(); + } + + private static MongoClient createMongoClient(final MongoClientSettings.Builder settingsBuilder, final ClusterListener clusterListener) { + return MongoClients.create(settingsBuilder + .applyToClusterSettings(builder -> { + builder.addClusterListener(clusterListener); + // to speed up test execution in case of socks authentication issues. Default is 30 seconds. + builder.serverSelectionTimeout(5, TimeUnit.SECONDS); + }) + .build()); + } + + private static boolean isAuthEnabled() { + return Boolean.parseBoolean(System.getProperty("org.mongodb.test.uri.socks.auth.enabled")); + } + + public static class SocksProxyPropertyCondition implements ExecutionCondition { + @Override + public ConditionEvaluationResult evaluateExecutionCondition(final ExtensionContext context) { + if (System.getProperty("org.mongodb.test.uri.socks.auth.enabled") != null) { + return ConditionEvaluationResult.enabled("Test is enabled because socks proxy configuration exists"); + } else { + return ConditionEvaluationResult.disabled("Test is disabled because socks proxy configuration is missing"); + } + } + } +} diff --git a/driver-sync/src/test/functional/com/mongodb/client/SyncInitialDnsSeedlistDiscoveryTest.java b/driver-sync/src/test/functional/com/mongodb/client/SyncInitialDnsSeedlistDiscoveryTest.java new file mode 100644 index 00000000000..afe31d41d27 --- /dev/null +++ b/driver-sync/src/test/functional/com/mongodb/client/SyncInitialDnsSeedlistDiscoveryTest.java @@ -0,0 +1,36 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client; + +import com.mongodb.MongoClientSettings; +import org.bson.BsonDocument; + +import java.util.List; + +public class SyncInitialDnsSeedlistDiscoveryTest extends InitialDnsSeedlistDiscoveryTest { + public SyncInitialDnsSeedlistDiscoveryTest(final String filename, final String parentDirectory, final String uri, + final List seeds, final Integer numSeeds, final List hosts, final Integer numHosts, + final BsonDocument options, final BsonDocument parsedOptions, + final boolean isError, final boolean executePingCommand) { + super(filename, parentDirectory, uri, seeds, numSeeds, hosts, numHosts, options, parsedOptions, isError, executePingCommand); + } + + @Override + public MongoClient createMongoClient(final MongoClientSettings settings) { + return MongoClients.create(settings); + } +} diff --git a/driver-sync/src/test/functional/com/mongodb/client/TransactionProseTest.java b/driver-sync/src/test/functional/com/mongodb/client/TransactionProseTest.java new file mode 100644 index 00000000000..9a1426ad887 --- /dev/null +++ b/driver-sync/src/test/functional/com/mongodb/client/TransactionProseTest.java @@ -0,0 +1,124 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client; + +import com.mongodb.MongoClientSettings; +import com.mongodb.MongoException; +import org.bson.Document; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +import java.util.HashSet; +import java.util.Set; +import java.util.concurrent.TimeUnit; + +import static com.mongodb.ClusterFixture.getDefaultDatabaseName; +import static com.mongodb.ClusterFixture.getMultiMongosConnectionString; +import static com.mongodb.ClusterFixture.isSharded; +import static org.junit.Assert.assertTrue; +import static org.junit.Assume.assumeTrue; + +// See https://github.com/mongodb/specifications/blob/master/source/transactions/tests/README.md#mongos-pinning-prose-tests +public class TransactionProseTest { + private MongoClient client; + private MongoCollection collection; + + @Before + public void setUp() { + assumeTrue(canRunTests()); + MongoClientSettings.Builder builder = MongoClientSettings.builder() + .applyConnectionString(getMultiMongosConnectionString()); + + client = MongoClients.create(MongoClientSettings.builder(builder.build()) + .applyToSocketSettings(builder1 -> builder1.readTimeout(5, TimeUnit.SECONDS)) + .build()); + + collection = client.getDatabase(getDefaultDatabaseName()).getCollection(getClass().getName()); + collection.drop(); + } + + @After + public void tearDown() { + if (collection != null) { + collection.drop(); + } + if (client != null) { + client.close(); + } + } + + // Test that starting a new transaction on a pinned ClientSession unpins the session and normal + // server selection is performed for the next operation. + @Test + public void testNewTransactionUnpinsSession() throws MongoException { + ClientSession session = null; + try { + collection.insertOne(Document.parse("{}")); + session = client.startSession(); + session.startTransaction(); + collection.insertOne(session, Document.parse("{ _id : 1 }")); + session.commitTransaction(); + + Set> addresses = new HashSet<>(); + int iterations = 50; + while (iterations-- > 0) { + session.startTransaction(); + addresses.add(collection.find(session, Document.parse("{}"))); + session.commitTransaction(); + } + assertTrue(addresses.size() > 1); + } finally { + if (session != null) { + session.close(); + } + if (collection != null) { + collection.drop(); + } + } + } + + // Test non-transaction operations using a pinned ClientSession unpins the session and normal server selection is performed. + @Test + public void testNonTransactionOpsUnpinsSession() throws MongoException { + ClientSession session = null; + try { + collection.insertOne(Document.parse("{}")); + session = client.startSession(); + session.startTransaction(); + collection.insertOne(session, Document.parse("{ _id : 1 }")); + + Set> addresses = new HashSet<>(); + int iterations = 50; + while (iterations-- > 0) { + addresses.add(collection.find(session, Document.parse("{}"))); + } + assertTrue(addresses.size() > 1); + } finally { + if (session != null) { + session.close(); + } + if (collection != null) { + collection.drop(); + } + } + } + + private boolean canRunTests() { + return isSharded(); + } +} diff --git a/driver-sync/src/test/functional/com/mongodb/client/UuidRepresentationTest.java b/driver-sync/src/test/functional/com/mongodb/client/UuidRepresentationTest.java new file mode 100644 index 00000000000..9d7c275ff2e --- /dev/null +++ b/driver-sync/src/test/functional/com/mongodb/client/UuidRepresentationTest.java @@ -0,0 +1,54 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client; + +import org.bson.BsonBinarySubType; +import org.bson.UuidRepresentation; +import org.bson.codecs.configuration.CodecRegistry; +import org.junit.After; + +import java.util.UUID; + +public class UuidRepresentationTest extends AbstractUuidRepresentationTest{ + private MongoClient mongoClient; + + public UuidRepresentationTest(final UuidRepresentation uuidRepresentation, final BsonBinarySubType subType, + final UUID uuid, final byte[] encodedValue, final byte[] standardEncodedValue) { + super(uuidRepresentation, subType, uuid, encodedValue, standardEncodedValue); + } + + + @Override + protected void createMongoClient(final UuidRepresentation uuidRepresentation, final CodecRegistry codecRegistry) { + mongoClient = MongoClients.create(Fixture.getMongoClientSettingsBuilder() + .uuidRepresentation(uuidRepresentation) + .codecRegistry(codecRegistry) + .build()); + } + + @Override + protected MongoDatabase getDatabase(final String databaseName) { + return mongoClient.getDatabase(databaseName); + } + + @After + public void cleanUp() { + if (mongoClient != null) { + mongoClient.close(); + } + } +} diff --git a/driver-sync/src/test/functional/com/mongodb/client/WithTransactionProseTest.java b/driver-sync/src/test/functional/com/mongodb/client/WithTransactionProseTest.java new file mode 100644 index 00000000000..1afbf61565e --- /dev/null +++ b/driver-sync/src/test/functional/com/mongodb/client/WithTransactionProseTest.java @@ -0,0 +1,209 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client; + +import com.mongodb.ClientSessionOptions; +import com.mongodb.MongoClientException; +import com.mongodb.MongoException; +import com.mongodb.TransactionOptions; +import com.mongodb.client.internal.ClientSessionClock; +import com.mongodb.client.model.Sorts; +import org.bson.Document; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; + +import java.util.concurrent.TimeUnit; + +import static com.mongodb.ClusterFixture.TIMEOUT; +import static com.mongodb.ClusterFixture.isDiscoverableReplicaSet; +import static com.mongodb.ClusterFixture.isSharded; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; +import static org.junit.jupiter.api.Assumptions.assumeTrue; + +// See https://github.com/mongodb/specifications/blob/master/source/transactions-convenient-api/tests/README.md#prose-tests +public class WithTransactionProseTest extends DatabaseTestCase { + private static final long START_TIME_MS = 1L; + private static final long ERROR_GENERATING_INTERVAL = 121000L; + + @BeforeEach + @Override + public void setUp() { + assumeTrue(canRunTests()); + super.setUp(); + + // create the collection before starting transactions + collection.insertOne(Document.parse("{ _id : 0 }")); + } + + // + // Test that the callback raises a custom exception or error that does not include either UnknownTransactionCommitResult or + // TransientTransactionError error labels. The callback will execute using withTransaction and assert that the callback's error + // bypasses any retry logic within withTransaction and is propagated to the caller of withTransaction. + // + @Test + public void testCallbackRaisesCustomError() { + final String exceptionMessage = "NotTransientOrUnknownError"; + try (ClientSession session = client.startSession()) { + session.withTransaction((TransactionBody) () -> { + throw new MongoException(exceptionMessage); + }); + // should not get here + fail("Test should have thrown an exception."); + } catch (MongoException e) { + assertEquals(exceptionMessage, e.getMessage()); + } + } + + // + // Test that the callback that returns a custom value (e.g. boolean, string, object). Execute this callback using withTransaction + // and assert that the callback's return value is propagated to the caller of withTransaction. + // + @Test + public void testCallbackReturnsValue() { + try (ClientSession session = client.startSession()) { + final String msg = "Inserted document"; + String returnValueFromCallback = session.withTransaction(() -> { + collection.insertOne(Document.parse("{ _id : 1 }")); + return msg; + }); + assertEquals(msg, returnValueFromCallback); + } + } + + // + // If the callback raises an error with the TransientTransactionError label and the retry timeout has been exceeded, withTransaction + // should propagate the error to its caller. + // + @Test + public void testRetryTimeoutEnforcedTransientTransactionError() { + final String errorMessage = "transient transaction error"; + + try (ClientSession session = client.startSession()) { + ClientSessionClock.INSTANCE.setTime(START_TIME_MS); + session.withTransaction((TransactionBody) () -> { + ClientSessionClock.INSTANCE.setTime(ERROR_GENERATING_INTERVAL); + MongoException e = new MongoException(112, errorMessage); + e.addLabel(MongoException.TRANSIENT_TRANSACTION_ERROR_LABEL); + throw e; + }); + fail("Test should have thrown an exception."); + } catch (Exception e) { + assertEquals(errorMessage, e.getMessage()); + assertTrue(((MongoException) e).getErrorLabels().contains(MongoException.TRANSIENT_TRANSACTION_ERROR_LABEL)); + } + } + + // + // If committing raises an error with the UnknownTransactionCommitResult label, the error is not a write concern timeout, and the + // retry timeout has been exceeded, withTransaction should propagate the error to its caller. + // + @Test + public void testRetryTimeoutEnforcedUnknownTransactionCommit() { + MongoDatabase failPointAdminDb = client.getDatabase("admin"); + failPointAdminDb.runCommand( + Document.parse("{'configureFailPoint': 'failCommand', 'mode': {'times': 2}, " + + "'data': {'failCommands': ['commitTransaction'], 'errorCode': 91, 'closeConnection': false}}")); + + try (ClientSession session = client.startSession()) { + ClientSessionClock.INSTANCE.setTime(START_TIME_MS); + session.withTransaction((TransactionBody) () -> { + ClientSessionClock.INSTANCE.setTime(ERROR_GENERATING_INTERVAL); + collection.insertOne(session, new Document("_id", 2)); + return null; + }); + fail("Test should have thrown an exception."); + } catch (Exception e) { + assertEquals(91, ((MongoException) e).getCode()); + assertTrue(((MongoException) e).getErrorLabels().contains(MongoException.UNKNOWN_TRANSACTION_COMMIT_RESULT_LABEL)); + } finally { + failPointAdminDb.runCommand(Document.parse("{'configureFailPoint': 'failCommand', 'mode': 'off'}")); + } + } + + // + // If committing raises an error with the TransientTransactionError label and the retry timeout has been exceeded, withTransaction + // should propagate the error to its caller. This case may occur if the commit was internally retried against a new primary after + // a failover and the second primary returned a NoSuchTransaction error response. + // + @Test + public void testRetryTimeoutEnforcedTransientTransactionErrorOnCommit() { + MongoDatabase failPointAdminDb = client.getDatabase("admin"); + failPointAdminDb.runCommand( + Document.parse("{'configureFailPoint': 'failCommand', 'mode': {'times': 2}, " + + "'data': {'failCommands': ['commitTransaction'], 'errorCode': 251, 'codeName': 'NoSuchTransaction', " + + "'errmsg': 'Transaction 0 has been aborted', 'closeConnection': false}}")); + + try (ClientSession session = client.startSession()) { + ClientSessionClock.INSTANCE.setTime(START_TIME_MS); + session.withTransaction((TransactionBody) () -> { + ClientSessionClock.INSTANCE.setTime(ERROR_GENERATING_INTERVAL); + collection.insertOne(session, Document.parse("{ _id : 1 }")); + return null; + }); + fail("Test should have thrown an exception."); + } catch (Exception e) { + assertEquals(251, ((MongoException) e).getCode()); + assertTrue(((MongoException) e).getErrorLabels().contains(MongoException.TRANSIENT_TRANSACTION_ERROR_LABEL)); + } finally { + failPointAdminDb.runCommand(Document.parse("{'configureFailPoint': 'failCommand', 'mode': 'off'}")); + } + } + + // + // Ensure cannot override timeout in transaction + // + @Test + public void testTimeoutMS() { + try (ClientSession session = client.startSession(ClientSessionOptions.builder() + .defaultTransactionOptions(TransactionOptions.builder().timeout(TIMEOUT, TimeUnit.SECONDS).build()) + .build())) { + assertThrows(MongoClientException.class, () -> session.withTransaction(() -> { + collection.insertOne(session, Document.parse("{ _id : 1 }")); + collection.withTimeout(2, TimeUnit.MINUTES).find(session).first(); + return -1; + })); + } + } + + // + // Ensure legacy settings don't cause issues in sessions + // + @Test + public void testTimeoutMSAndLegacySettings() { + try (ClientSession session = client.startSession(ClientSessionOptions.builder() + .defaultTransactionOptions(TransactionOptions.builder().timeout(TIMEOUT, TimeUnit.SECONDS).build()) + .build())) { + Document document = Document.parse("{ _id : 1 }"); + Document returnValueFromCallback = session.withTransaction(() -> { + collection.insertOne(session, document); + Document found = collection.find(session) + .maxAwaitTime(1, TimeUnit.MINUTES) + .sort(Sorts.descending("_id")) + .first(); + return found != null ? found : new Document(); + }); + assertEquals(document, returnValueFromCallback); + } + } + + private boolean canRunTests() { + return isSharded() || isDiscoverableReplicaSet(); + } +} diff --git a/driver-sync/src/test/functional/com/mongodb/client/auth/AbstractX509AuthenticationTest.java b/driver-sync/src/test/functional/com/mongodb/client/auth/AbstractX509AuthenticationTest.java new file mode 100644 index 00000000000..0d003210f3d --- /dev/null +++ b/driver-sync/src/test/functional/com/mongodb/client/auth/AbstractX509AuthenticationTest.java @@ -0,0 +1,182 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.auth; + +import com.mongodb.MongoClientSettings; +import com.mongodb.MongoCommandException; +import com.mongodb.MongoSecurityException; +import com.mongodb.client.Fixture; +import com.mongodb.client.MongoClient; +import com.mongodb.connection.NettyTransportSettings; +import io.netty.handler.ssl.SslContextBuilder; +import io.netty.handler.ssl.SslProvider; +import org.junit.jupiter.api.extension.ConditionEvaluationResult; +import org.junit.jupiter.api.extension.ExecutionCondition; +import org.junit.jupiter.api.extension.ExtendWith; +import org.junit.jupiter.api.extension.ExtensionContext; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.Arguments; +import org.junit.jupiter.params.provider.MethodSource; + +import javax.net.ssl.KeyManagerFactory; +import javax.net.ssl.SSLContext; +import java.io.File; +import java.io.FileInputStream; +import java.io.IOException; +import java.security.KeyStore; +import java.security.KeyStoreException; +import java.security.NoSuchAlgorithmException; +import java.security.UnrecoverableKeyException; +import java.security.cert.CertificateException; +import java.util.stream.Stream; + +import static com.mongodb.AuthenticationMechanism.MONGODB_X509; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; + +@ExtendWith(AbstractX509AuthenticationTest.X509AuthenticationPropertyCondition.class) +public abstract class AbstractX509AuthenticationTest { + + private static final String KEYSTORE_PASSWORD = "test"; + protected abstract MongoClient createMongoClient(MongoClientSettings mongoClientSettings); + + private static Stream shouldAuthenticateWithClientCertificate() throws Exception { + String keystoreFileName = "existing_user.p12"; + return getArgumentForKeystore(keystoreFileName); + } + + @ParameterizedTest(name = "should authenticate with client certificate. MongoClientSettings: {0}") + @MethodSource + public void shouldAuthenticateWithClientCertificate(final MongoClientSettings mongoClientSettings) { + //given + try (MongoClient client = createMongoClient(mongoClientSettings)) { + + //when & then command completes successfully with x509 authentication + client.getDatabase("test").getCollection("test").estimatedDocumentCount(); + } + } + + private static Stream shouldPassMutualTLSWithClientCertificateAndFailAuthenticateWithAbsentUser() throws Exception { + String keystoreFileName = "non_existing_user.p12"; + return getArgumentForKeystore(keystoreFileName); + } + + @ParameterizedTest(name = "should pass mutual TLS with client certificate and fail authenticate with absent user. " + + "MongoClientSettings: {0}") + @MethodSource + public void shouldPassMutualTLSWithClientCertificateAndFailAuthenticateWithAbsentUser(final MongoClientSettings mongoClientSettings) { + // given + try (MongoClient client = createMongoClient(mongoClientSettings)) { + + // when & then + MongoSecurityException mongoSecurityException = assertThrows(MongoSecurityException.class, + () -> client.getDatabase("test").getCollection("test").estimatedDocumentCount()); + + assertTrue(mongoSecurityException.getMessage().contains("Exception authenticating")); + MongoCommandException mongoCommandException = (MongoCommandException) mongoSecurityException.getCause(); + + assertTrue(mongoCommandException.getMessage().contains("Could not find user")); + assertEquals(11, mongoCommandException.getCode()); + } + } + + private static Stream getArgumentForKeystore(final String keystoreFileName) throws Exception { + SSLContext context = buildSslContextFromKeyStore(keystoreFileName); + MongoClientSettings.Builder mongoClientSettingsBuilder = Fixture.getMongoClientSettingsBuilder(); + verifyX509AuthenticationIsRequired(mongoClientSettingsBuilder); + + return Stream.of( + Arguments.of(mongoClientSettingsBuilder + .applyToSslSettings(builder -> builder.context(context)) + .build()), + + Arguments.of(mongoClientSettingsBuilder + .applyToSslSettings(builder -> builder.context(context)) + .transportSettings(NettyTransportSettings.nettyBuilder() + .sslContext(SslContextBuilder.forClient() + .sslProvider(SslProvider.JDK) + .keyManager(getKeyManagerFactory(keystoreFileName)) + .build()) + .build()) + .build()), + + Arguments.of(mongoClientSettingsBuilder + .applyToSslSettings(builder -> builder.context(context)) + .transportSettings(NettyTransportSettings.nettyBuilder() + .sslContext(SslContextBuilder.forClient() + .sslProvider(SslProvider.OPENSSL) + .keyManager(getKeyManagerFactory(keystoreFileName)) + .build()) + .build()) + .build()) + ); + } + + private static SSLContext buildSslContextFromKeyStore(final String keystoreFileName) throws Exception { + KeyManagerFactory keyManagerFactory = getKeyManagerFactory(keystoreFileName); + SSLContext sslContext = SSLContext.getInstance("TLS"); + sslContext.init(keyManagerFactory.getKeyManagers(), null, null); + return sslContext; + } + + private static KeyManagerFactory getKeyManagerFactory(final String keystoreFileName) + throws KeyStoreException, IOException, NoSuchAlgorithmException, CertificateException, UnrecoverableKeyException { + KeyStore ks = KeyStore.getInstance("PKCS12"); + try (FileInputStream fis = new FileInputStream(getKeystoreLocation() + File.separator + keystoreFileName)) { + ks.load(fis, KEYSTORE_PASSWORD.toCharArray()); + } + KeyManagerFactory keyManagerFactory = KeyManagerFactory.getInstance( + KeyManagerFactory.getDefaultAlgorithm()); + keyManagerFactory.init(ks, KEYSTORE_PASSWORD.toCharArray()); + return keyManagerFactory; + } + + private static String getKeystoreLocation() { + return System.getProperty("org.mongodb.test.x509.auth.keystore.location"); + } + + /** + * The connection string is sourced from an environment variable populated from Secret Storage. + * We verify it still requires X.509 authentication before running these tests to ensure test invariants. + */ + private static void verifyX509AuthenticationIsRequired(final MongoClientSettings.Builder mongoClientSettingsBuilder) { + com.mongodb.assertions.Assertions.assertTrue( + com.mongodb.assertions.Assertions.assertNotNull(mongoClientSettingsBuilder.build().getCredential()) + .getAuthenticationMechanism() == MONGODB_X509); + } + + /** + This condition allows to skip initialization of method sources and test execution. + - @EnableIf on the class, assumeTrue in the constructor - do not block method source initialization. + - assumeTrue in the static block - fails the test. + **/ + public static class X509AuthenticationPropertyCondition implements ExecutionCondition { + @Override + public ConditionEvaluationResult evaluateExecutionCondition(final ExtensionContext context) { + if (isX509TestsEnabled()) { + return ConditionEvaluationResult.enabled("Test is enabled because x509 auth configuration exists"); + } else { + return ConditionEvaluationResult.disabled("Test is disabled because x509 auth configuration is missing"); + } + } + } + + private static boolean isX509TestsEnabled() { + return Boolean.parseBoolean(System.getProperty("org.mongodb.test.x509.auth.enabled")); + } +} diff --git a/driver-sync/src/test/functional/com/mongodb/client/auth/X509AuthenticationTest.java b/driver-sync/src/test/functional/com/mongodb/client/auth/X509AuthenticationTest.java new file mode 100644 index 00000000000..9605c027141 --- /dev/null +++ b/driver-sync/src/test/functional/com/mongodb/client/auth/X509AuthenticationTest.java @@ -0,0 +1,28 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.auth; + +import com.mongodb.MongoClientSettings; +import com.mongodb.client.MongoClient; +import com.mongodb.client.MongoClients; + +public class X509AuthenticationTest extends AbstractX509AuthenticationTest { + @Override + protected MongoClient createMongoClient(final MongoClientSettings mongoClientSettings) { + return MongoClients.create(mongoClientSettings); + } +} diff --git a/driver-sync/src/test/functional/com/mongodb/client/csot/AbstractClientSideOperationsEncryptionTimeoutProseTest.java b/driver-sync/src/test/functional/com/mongodb/client/csot/AbstractClientSideOperationsEncryptionTimeoutProseTest.java new file mode 100644 index 00000000000..dd45bc8ae2c --- /dev/null +++ b/driver-sync/src/test/functional/com/mongodb/client/csot/AbstractClientSideOperationsEncryptionTimeoutProseTest.java @@ -0,0 +1,387 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.csot; + +import com.mongodb.AutoEncryptionSettings; +import com.mongodb.ClientEncryptionSettings; +import com.mongodb.ClusterFixture; +import com.mongodb.MongoClientSettings; +import com.mongodb.MongoNamespace; +import com.mongodb.MongoOperationTimeoutException; +import com.mongodb.MongoUpdatedEncryptedFieldsException; +import com.mongodb.ReadConcern; +import com.mongodb.ReadPreference; +import com.mongodb.WriteConcern; +import com.mongodb.client.Fixture; +import com.mongodb.client.MongoClient; +import com.mongodb.client.MongoCollection; +import com.mongodb.client.MongoDatabase; +import com.mongodb.client.model.CreateCollectionOptions; +import com.mongodb.client.model.CreateEncryptedCollectionParams; +import com.mongodb.client.model.ValidationOptions; +import com.mongodb.client.model.vault.DataKeyOptions; +import com.mongodb.client.model.vault.EncryptOptions; +import com.mongodb.client.test.CollectionHelper; +import com.mongodb.client.vault.ClientEncryption; +import com.mongodb.event.CommandStartedEvent; +import com.mongodb.internal.connection.TestCommandListener; +import org.bson.BsonBinary; +import org.bson.BsonDocument; +import org.bson.BsonString; +import org.bson.Document; +import org.bson.codecs.BsonDocumentCodec; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.ValueSource; + +import java.util.Arrays; +import java.util.Base64; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.concurrent.TimeUnit; + +import static com.mongodb.ClusterFixture.serverVersionAtLeast; +import static java.util.concurrent.TimeUnit.MILLISECONDS; +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.lessThan; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertInstanceOf; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assumptions.assumeTrue; + +/** + * See + * Prose Tests. + */ +public abstract class AbstractClientSideOperationsEncryptionTimeoutProseTest { + + protected static final String FAIL_COMMAND_NAME = "failCommand"; + private static final Map> KMS_PROVIDERS = new HashMap<>(); + + private final MongoNamespace keyVaultNamespace = new MongoNamespace("keyvault", "datakeys"); + + private CollectionHelper keyVaultCollectionHelper; + + private TestCommandListener commandListener; + + private static final String MASTER_KEY = "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5a" + + "XRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk"; + + protected abstract ClientEncryption createClientEncryption(ClientEncryptionSettings.Builder builder); + + protected abstract MongoClient createMongoClient(MongoClientSettings.Builder builder); + + @Test + void shouldThrowOperationTimeoutExceptionWhenCreateDataKey() { + assumeTrue(serverVersionAtLeast(4, 4)); + long rtt = ClusterFixture.getPrimaryRTT(); + + Map> kmsProviders = new HashMap<>(); + Map localProviderMap = new HashMap<>(); + localProviderMap.put("key", Base64.getDecoder().decode(MASTER_KEY)); + kmsProviders.put("local", localProviderMap); + + try (ClientEncryption clientEncryption = createClientEncryption(getClientEncryptionSettingsBuilder(rtt + 100))) { + + keyVaultCollectionHelper.runAdminCommand("{" + + " configureFailPoint: \"" + FAIL_COMMAND_NAME + "\"," + + " mode: { times: 1 }," + + " data: {" + + " failCommands: [\"insert\"]," + + " blockConnection: true," + + " blockTimeMS: " + (rtt + 100) + + " }" + + "}"); + + assertThrows(MongoOperationTimeoutException.class, () -> clientEncryption.createDataKey("local")); + + List commandStartedEvents = commandListener.getCommandStartedEvents(); + assertEquals(1, commandStartedEvents.size()); + assertEquals(keyVaultNamespace.getCollectionName(), + commandStartedEvents.get(0).getCommand().get("insert").asString().getValue()); + assertNotNull(commandListener.getCommandFailedEvent("insert")); + } + + } + + @Test + void shouldThrowOperationTimeoutExceptionWhenEncryptData() { + assumeTrue(serverVersionAtLeast(4, 4)); + long rtt = ClusterFixture.getPrimaryRTT(); + + try (ClientEncryption clientEncryption = createClientEncryption(getClientEncryptionSettingsBuilder(rtt + 150))) { + + clientEncryption.createDataKey("local"); + + keyVaultCollectionHelper.runAdminCommand("{" + + " configureFailPoint: \"" + FAIL_COMMAND_NAME + "\"," + + " mode: { times: 1 }," + + " data: {" + + " failCommands: [\"find\"]," + + " blockConnection: true," + + " blockTimeMS: " + (rtt + 150) + + " }" + + "}"); + + BsonBinary dataKey = clientEncryption.createDataKey("local"); + + EncryptOptions encryptOptions = new EncryptOptions("AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic"); + encryptOptions.keyId(dataKey); + commandListener.reset(); + assertThrows(MongoOperationTimeoutException.class, () -> clientEncryption.encrypt(new BsonString("hello"), encryptOptions)); + + List commandStartedEvents = commandListener.getCommandStartedEvents(); + assertEquals(1, commandStartedEvents.size()); + assertEquals(keyVaultNamespace.getCollectionName(), commandStartedEvents.get(0).getCommand().get("find").asString().getValue()); + assertNotNull(commandListener.getCommandFailedEvent("find")); + } + + } + + @Test + void shouldThrowOperationTimeoutExceptionWhenDecryptData() { + assumeTrue(serverVersionAtLeast(4, 4)); + long rtt = ClusterFixture.getPrimaryRTT(); + + BsonBinary encrypted; + try (ClientEncryption clientEncryption = createClientEncryption(getClientEncryptionSettingsBuilder(rtt + 400))) { + clientEncryption.createDataKey("local"); + BsonBinary dataKey = clientEncryption.createDataKey("local"); + EncryptOptions encryptOptions = new EncryptOptions("AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic"); + encryptOptions.keyId(dataKey); + encrypted = clientEncryption.encrypt(new BsonString("hello"), encryptOptions); + } + + try (ClientEncryption clientEncryption = createClientEncryption(getClientEncryptionSettingsBuilder(rtt + 400))) { + keyVaultCollectionHelper.runAdminCommand("{" + + " configureFailPoint: \"" + FAIL_COMMAND_NAME + "\"," + + " mode: { times: 1 }," + + " data: {" + + " failCommands: [\"find\"]," + + " blockConnection: true," + + " blockTimeMS: " + (rtt + 500) + + " }" + + "}"); + commandListener.reset(); + assertThrows(MongoOperationTimeoutException.class, () -> clientEncryption.decrypt(encrypted)); + + List commandStartedEvents = commandListener.getCommandStartedEvents(); + assertEquals(1, commandStartedEvents.size()); + assertEquals(keyVaultNamespace.getCollectionName(), commandStartedEvents.get(0).getCommand().get("find").asString().getValue()); + assertNotNull(commandListener.getCommandFailedEvent("find")); + } + } + + /** + * Not a prose spec test. However, it is additional test case for better coverage. + */ + @Test + void shouldDecreaseOperationTimeoutForSubsequentOperations() { + assumeTrue(serverVersionAtLeast(4, 4)); + long rtt = ClusterFixture.getPrimaryRTT(); + long initialTimeoutMS = rtt + 2500; + + keyVaultCollectionHelper.runAdminCommand("{" + + " configureFailPoint: \"" + FAIL_COMMAND_NAME + "\"," + + " mode: \"alwaysOn\"," + + " data: {" + + " failCommands: [\"insert\", \"find\", \"listCollections\"]," + + " blockConnection: true," + + " blockTimeMS: " + (rtt + 10) + + " }" + + "}"); + + try (ClientEncryption clientEncryption = createClientEncryption(getClientEncryptionSettingsBuilder() + .timeout(initialTimeoutMS, MILLISECONDS))) { + BsonBinary dataKeyId = clientEncryption.createDataKey("local", new DataKeyOptions()); + String base64DataKeyId = Base64.getEncoder().encodeToString(dataKeyId.getData()); + + final String dbName = "test"; + final String collName = "coll"; + + AutoEncryptionSettings autoEncryptionSettings = AutoEncryptionSettings.builder() + .keyVaultNamespace(keyVaultNamespace.getFullName()) + .keyVaultMongoClientSettings(getMongoClientSettingsBuilder() + .build()) + .kmsProviders(KMS_PROVIDERS) + .build(); + + try (MongoClient mongoClient = createMongoClient(getMongoClientSettingsBuilder() + .autoEncryptionSettings(autoEncryptionSettings) + .timeout(initialTimeoutMS, MILLISECONDS))) { + + CreateCollectionOptions createCollectionOptions = new CreateCollectionOptions(); + createCollectionOptions.validationOptions(new ValidationOptions() + .validator(new BsonDocument("$jsonSchema", BsonDocument.parse("{" + + " properties: {" + + " encryptedField: {" + + " encrypt: {" + + " keyId: [{" + + " \"$binary\": {" + + " \"base64\": \"" + base64DataKeyId + "\"," + + " \"subType\": \"04\"" + + " }" + + " }]," + + " bsonType: \"string\"," + + " algorithm: \"AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic\"" + + " }" + + " }" + + " }," + + " \"bsonType\": \"object\"" + + "}")))); + + MongoCollection collection = mongoClient.getDatabase(dbName).getCollection(collName); + collection.drop(); + + mongoClient.getDatabase(dbName).createCollection(collName, createCollectionOptions); + + commandListener.reset(); + collection.insertOne(new Document("encryptedField", "123456789")); + + List commandStartedEvents = commandListener.getCommandStartedEvents(); + assertTimeoutIsDecreasingForCommands(Arrays.asList("listCollections", "find", "insert"), commandStartedEvents, + initialTimeoutMS); + } + } + } + + /** + * Not a prose spec test. However, it is additional test case for better coverage. + */ + @ParameterizedTest + @ValueSource(strings = {"insert", "create"}) + void shouldThrowTimeoutExceptionWhenCreateEncryptedCollection(final String commandToTimeout) { + assumeTrue(serverVersionAtLeast(7, 0)); + //given + long rtt = ClusterFixture.getPrimaryRTT(); + long initialTimeoutMS = rtt + 200; + + try (ClientEncryption clientEncryption = createClientEncryption(getClientEncryptionSettingsBuilder() + .timeout(initialTimeoutMS, MILLISECONDS))) { + final String dbName = "test"; + final String collName = "coll"; + + try (MongoClient mongoClient = createMongoClient(getMongoClientSettingsBuilder() + .timeout(initialTimeoutMS, MILLISECONDS))) { + CreateCollectionOptions createCollectionOptions = new CreateCollectionOptions().encryptedFields(Document.parse( + "{" + + " fields: [{" + + " path: 'ssn'," + + " bsonType: 'string'," + + " keyId: null" + + " }]" + + "}")); + + keyVaultCollectionHelper.runAdminCommand("{" + + " configureFailPoint: \"" + FAIL_COMMAND_NAME + "\"," + + " mode: { times: 1 }," + + " data: {" + + " failCommands: [\"" + commandToTimeout + "\"]," + + " blockConnection: true," + + " blockTimeMS: " + initialTimeoutMS + + " }" + + "}"); + + MongoDatabase database = mongoClient.getDatabase(dbName); + database.getCollection(collName).drop(); + commandListener.reset(); + + //when + MongoUpdatedEncryptedFieldsException encryptionException = assertThrows(MongoUpdatedEncryptedFieldsException.class, () -> + clientEncryption.createEncryptedCollection(database, collName, createCollectionOptions, + new CreateEncryptedCollectionParams("local"))); + //then + assertInstanceOf(MongoOperationTimeoutException.class, encryptionException.getCause()); + } + } + } + + private static void assertTimeoutIsDecreasingForCommands(final List commandNames, + final List commandStartedEvents, + final long initialTimeoutMs) { + long previousMaxTimeMS = initialTimeoutMs; + assertEquals(commandNames.size(), commandStartedEvents.size(), "There have been more commands then expected"); + for (int i = 0; i < commandStartedEvents.size(); i++) { + CommandStartedEvent commandStartedEvent = commandStartedEvents.get(i); + String expectedCommandName = commandNames.get(i); + assertEquals(expectedCommandName, commandStartedEvent.getCommandName()); + + BsonDocument command = commandStartedEvent.getCommand(); + assertTrue(command.containsKey("maxTimeMS"), "Command " + expectedCommandName + " should have maxTimeMS set"); + + long maxTimeMS = command.getInt64("maxTimeMS").getValue(); + + if (i > 0) { + assertThat(commandStartedEvent.getCommandName() + " " + "maxTimeMS should be less than that of a previous " + + commandStartedEvents.get(i - 1).getCommandName() + " command", maxTimeMS, lessThan(previousMaxTimeMS)); + } else { + assertThat("maxTimeMS should be less than the configured timeout " + initialTimeoutMs + "ms", + maxTimeMS, lessThan(previousMaxTimeMS)); + } + previousMaxTimeMS = maxTimeMS; + } + } + + protected ClientEncryptionSettings.Builder getClientEncryptionSettingsBuilder(final long vaultTimeout) { + return ClientEncryptionSettings + .builder() + .keyVaultNamespace(keyVaultNamespace.getFullName()) + .keyVaultMongoClientSettings(getMongoClientSettingsBuilder() + .timeout(vaultTimeout, TimeUnit.MILLISECONDS).build()) + .kmsProviders(KMS_PROVIDERS); + } + + protected ClientEncryptionSettings.Builder getClientEncryptionSettingsBuilder() { + return ClientEncryptionSettings + .builder() + .keyVaultNamespace(keyVaultNamespace.getFullName()) + .keyVaultMongoClientSettings(getMongoClientSettingsBuilder().build()) + .kmsProviders(KMS_PROVIDERS); + } + + protected MongoClientSettings.Builder getMongoClientSettingsBuilder() { + return Fixture.getMongoClientSettingsBuilder() + .readConcern(ReadConcern.MAJORITY) + .writeConcern(WriteConcern.MAJORITY) + .readPreference(ReadPreference.primary()) + .addCommandListener(commandListener); + } + + @BeforeEach + public void setUp() { + Map localProviderMap = new HashMap<>(); + localProviderMap.put("key", Base64.getDecoder().decode(MASTER_KEY)); + KMS_PROVIDERS.put("local", localProviderMap); + + keyVaultCollectionHelper = new CollectionHelper<>(new BsonDocumentCodec(), keyVaultNamespace); + keyVaultCollectionHelper.create(); + commandListener = new TestCommandListener(); + } + + @AfterEach + public void tearDown() { + ClusterFixture.disableFailPoint(FAIL_COMMAND_NAME); + if (keyVaultCollectionHelper != null) { + keyVaultCollectionHelper.drop(); + } + } +} diff --git a/driver-sync/src/test/functional/com/mongodb/client/csot/ClientSideOperationsEncryptionTimeoutProseTest.java b/driver-sync/src/test/functional/com/mongodb/client/csot/ClientSideOperationsEncryptionTimeoutProseTest.java new file mode 100644 index 00000000000..25a1102914a --- /dev/null +++ b/driver-sync/src/test/functional/com/mongodb/client/csot/ClientSideOperationsEncryptionTimeoutProseTest.java @@ -0,0 +1,35 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.csot; + +import com.mongodb.ClientEncryptionSettings; +import com.mongodb.MongoClientSettings; +import com.mongodb.client.MongoClient; +import com.mongodb.client.MongoClients; +import com.mongodb.client.vault.ClientEncryption; +import com.mongodb.client.vault.ClientEncryptions; + +public class ClientSideOperationsEncryptionTimeoutProseTest extends AbstractClientSideOperationsEncryptionTimeoutProseTest { + public ClientEncryption createClientEncryption(final ClientEncryptionSettings.Builder builder) { + return ClientEncryptions.create(builder.build()); + } + + @Override + protected MongoClient createMongoClient(final MongoClientSettings.Builder builder) { + return MongoClients.create(builder.build()); + } +} diff --git a/driver-sync/src/test/functional/com/mongodb/client/gridfs/GridFSBucketSmokeTestSpecification.groovy b/driver-sync/src/test/functional/com/mongodb/client/gridfs/GridFSBucketSmokeTestSpecification.groovy new file mode 100644 index 00000000000..e629dc40859 --- /dev/null +++ b/driver-sync/src/test/functional/com/mongodb/client/gridfs/GridFSBucketSmokeTestSpecification.groovy @@ -0,0 +1,612 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.gridfs + +import com.mongodb.MongoClientSettings +import com.mongodb.MongoGridFSException +import com.mongodb.client.FunctionalSpecification +import com.mongodb.client.MongoClients +import com.mongodb.client.MongoCollection +import com.mongodb.client.MongoDatabase +import com.mongodb.client.gridfs.model.GridFSDownloadOptions +import com.mongodb.client.gridfs.model.GridFSFile +import com.mongodb.client.gridfs.model.GridFSUploadOptions +import org.bson.BsonDocument +import org.bson.BsonObjectId +import org.bson.BsonString +import org.bson.Document +import org.bson.UuidRepresentation +import org.bson.codecs.UuidCodec +import org.bson.types.ObjectId +import spock.lang.Unroll + +import static com.mongodb.client.Fixture.getDefaultDatabase +import static com.mongodb.client.Fixture.getDefaultDatabaseName +import static com.mongodb.client.Fixture.getMongoClientSettingsBuilder +import static com.mongodb.client.model.Filters.eq +import static com.mongodb.client.model.Updates.unset +import static org.bson.codecs.configuration.CodecRegistries.fromCodecs +import static org.bson.codecs.configuration.CodecRegistries.fromRegistries + +class GridFSBucketSmokeTestSpecification extends FunctionalSpecification { + protected MongoDatabase mongoDatabase + protected MongoCollection filesCollection + protected MongoCollection chunksCollection + protected GridFSBucket gridFSBucket + def singleChunkString = 'GridFS' + def multiChunkString = singleChunkString.padLeft(1024 * 255 * 5) + + def setup() { + mongoDatabase = getDefaultDatabase() + filesCollection = mongoDatabase.getCollection('fs.files', GridFSFile) + chunksCollection = mongoDatabase.getCollection('fs.chunks') + filesCollection.drop() + chunksCollection.drop() + gridFSBucket = new GridFSBucketImpl(mongoDatabase) + } + + def cleanup() { + if (filesCollection != null) { + filesCollection.drop() + chunksCollection.drop() + } + } + + @Unroll + def 'should round trip a #description'() { + given: + def content = multiChunk ? multiChunkString : singleChunkString + def contentBytes = content as byte[] + def expectedLength = contentBytes.length as Long + ObjectId fileId + byte[] gridFSContentBytes + + when: + if (direct) { + fileId = gridFSBucket.uploadFromStream('myFile', new ByteArrayInputStream(contentBytes)) + } else { + def outputStream = gridFSBucket.openUploadStream('myFile') + outputStream.write(contentBytes) + outputStream.close() + outputStream.close() // check for close idempotency + fileId = outputStream.getObjectId() + } + + then: + filesCollection.countDocuments() == 1 + chunksCollection.countDocuments() == chunkCount + + when: + def file = filesCollection.find().first() + + then: + file.getObjectId() == fileId + file.getChunkSize() == gridFSBucket.getChunkSizeBytes() + file.getLength() == expectedLength + file.getMetadata() == null + + when: + if (direct) { + gridFSContentBytes = gridFSBucket.openDownloadStream(fileId).getBytes() + } else { + def outputStream = new ByteArrayOutputStream(expectedLength as int) + gridFSBucket.downloadToStream(fileId, outputStream) + outputStream.close() + gridFSContentBytes = outputStream.toByteArray() + } + + then: + gridFSContentBytes == contentBytes + + where: + description | multiChunk | chunkCount | direct + 'a small file directly' | false | 1 | true + 'a small file to stream' | false | 1 | false + 'a large file directly' | true | 5 | true + 'a large file to stream' | true | 5 | false + } + + def 'should round trip with a batchSize of 1'() { + given: + def content = multiChunkString + def contentBytes = content as byte[] + def expectedLength = contentBytes.length as Long + ObjectId fileId + byte[] gridFSContentBytes + + when: + fileId = gridFSBucket.uploadFromStream('myFile', new ByteArrayInputStream(contentBytes)) + + then: + filesCollection.countDocuments() == 1 + chunksCollection.countDocuments() == 5 + + when: + def file = filesCollection.find().first() + + then: + file.getObjectId() == fileId + file.getChunkSize() == gridFSBucket.getChunkSizeBytes() + file.getLength() == expectedLength + file.getMetadata() == null + + when: + gridFSContentBytes = gridFSBucket.openDownloadStream(fileId).batchSize(1).getBytes() + + then: + gridFSContentBytes == contentBytes + } + + def 'should handle custom ids'() { + given: + def content = multiChunkString + def contentBytes = content as byte[] + def fileId = new BsonString('myFile') + byte[] gridFSContentBytes + + when: + gridFSBucket.uploadFromStream(fileId, 'myFile', new ByteArrayInputStream(contentBytes)) + gridFSContentBytes = gridFSBucket.openDownloadStream(fileId).batchSize(1).getBytes() + + then: + gridFSContentBytes == contentBytes + + when: + gridFSBucket.rename(fileId, 'newName') + + then: + gridFSBucket.openDownloadStream('newName').getBytes() == contentBytes + + when: + gridFSBucket.delete(fileId) + + then: + filesCollection.countDocuments() == 0 + chunksCollection.countDocuments() == 0 + } + + def 'should use custom uploadOptions when uploading' () { + given: + def chunkSize = 20 + def metadata = new Document('archived', false) + def options = new GridFSUploadOptions() + .chunkSizeBytes(chunkSize) + .metadata(metadata) + def content = 'qwerty' * 1024 + def contentBytes = content as byte[] + def expectedLength = contentBytes.length as Long + def expectedNoChunks = Math.ceil((expectedLength as double) / chunkSize) as int + def fileId + byte[] gridFSContentBytes + + when: + if (direct) { + fileId = new BsonObjectId(gridFSBucket.uploadFromStream('myFile', new ByteArrayInputStream(contentBytes), options)) + } else { + def outputStream = gridFSBucket.openUploadStream('myFile', options) + outputStream.write(contentBytes) + outputStream.close() + fileId = outputStream.getId() + } + + then: + filesCollection.countDocuments() == 1 + chunksCollection.countDocuments() == expectedNoChunks + + when: + def fileInfo = filesCollection.find().first() + + then: + fileInfo.getId() == fileId + fileInfo.getChunkSize() == options.getChunkSizeBytes() + fileInfo.getLength() == expectedLength + fileInfo.getMetadata() == options.getMetadata() + + when: + if (direct) { + gridFSContentBytes = gridFSBucket.openDownloadStream(fileId).getBytes() + } else { + def outputStream = new ByteArrayOutputStream(expectedLength as int) + gridFSBucket.downloadToStream(fileId, outputStream) + outputStream.close() + gridFSContentBytes = outputStream.toByteArray() + } + + then: + gridFSContentBytes == contentBytes + + where: + direct << [true, false] + } + + + def 'should be able to open by name'() { + given: + def content = 'Hello GridFS' + def contentBytes = content as byte[] + def filename = 'myFile' + gridFSBucket.uploadFromStream(filename, new ByteArrayInputStream(contentBytes)) + byte[] gridFSContentBytes + + when: 'Direct to a stream' + gridFSContentBytes = gridFSBucket.openDownloadStream(filename).getBytes() + + then: + gridFSContentBytes == contentBytes + + when: 'To supplied stream' + def outputStream = new ByteArrayOutputStream(contentBytes.length) + gridFSBucket.downloadToStream(filename, outputStream) + outputStream.close() + gridFSContentBytes = outputStream.toByteArray() + + then: + gridFSContentBytes == contentBytes + } + + @Unroll + def 'should be able to open by name with selected version: #version'() { + given: + def contentBytes = (0..3).collect({ "Hello GridFS - ${it}" as byte[] }) as List + def filename = 'myFile' + byte[] gridFSContentBytes + contentBytes.each{ + gridFSBucket.uploadFromStream(filename, new ByteArrayInputStream(it)) + } + def expectedContentBytes = contentBytes[version] + def options = new GridFSDownloadOptions().revision(version) + + when: 'Direct to a stream' + gridFSContentBytes = gridFSBucket.openDownloadStream(filename, options).getBytes() + + then: + gridFSContentBytes == expectedContentBytes + + when: 'To supplied stream' + def outputStream = new ByteArrayOutputStream(expectedContentBytes.length) + gridFSBucket.downloadToStream(filename, outputStream, options) + outputStream.close() + gridFSContentBytes = outputStream.toByteArray() + + then: + gridFSContentBytes == expectedContentBytes + + where: + version << [0, 1, 2, 3, -1, -2, -3, -4] + } + + def 'should throw an exception if cannot open by name'() { + given: + def filename = 'FileDoesNotExist' + + when: 'Direct to a stream' + gridFSBucket.openDownloadStream(filename) + + then: + thrown(MongoGridFSException) + + when: 'To supplied stream' + gridFSBucket.downloadToStream(filename, new ByteArrayOutputStream(1024)) + + then: + thrown(MongoGridFSException) + } + + def 'should throw an exception if cannot open by name with selected version'() { + given: + def filename = 'myFile' + def options = new GridFSDownloadOptions().revision(1) + gridFSBucket.uploadFromStream(filename, new ByteArrayInputStream('Hello GridFS' as byte[])) + + when: 'Direct to a stream' + gridFSBucket.openDownloadStream(filename, options) + + then: + thrown(MongoGridFSException) + + when: 'To supplied stream' + gridFSBucket.downloadToStream(filename, new ByteArrayOutputStream(1024), options) + + then: + thrown(MongoGridFSException) + } + + def 'should delete a file'() { + given: + def filename = 'myFile' + + when: + def fileId = gridFSBucket.uploadFromStream(filename, new ByteArrayInputStream('Hello GridFS' as byte[])) + + then: + filesCollection.countDocuments() == 1 + chunksCollection.countDocuments() == 1 + + when: + gridFSBucket.delete(fileId) + + then: + filesCollection.countDocuments() == 0 + chunksCollection.countDocuments() == 0 + } + + def 'should thrown when deleting nonexistent file'() { + when: + gridFSBucket.delete(new ObjectId()) + + then: + thrown(MongoGridFSException) + } + + def 'should delete a file data orphan chunks'() { + def filename = 'myFile' + def fileId = gridFSBucket.uploadFromStream(filename, new ByteArrayInputStream('Hello GridFS' as byte[])) + + when: + filesCollection.drop() + + then: + filesCollection.countDocuments() == 0 + chunksCollection.countDocuments() == 1 + + when: + gridFSBucket.delete(fileId) + + then: + thrown(MongoGridFSException) + + then: + filesCollection.countDocuments() == 0 + chunksCollection.countDocuments() == 0 + } + + def 'should rename a file'() { + given: + def filename = 'myFile' + def newFileName = 'newFileName' + + when: + def fileId = gridFSBucket.uploadFromStream(filename, new ByteArrayInputStream('Hello GridFS' as byte[])) + + then: + filesCollection.countDocuments() == 1 + chunksCollection.countDocuments() == 1 + + when: + gridFSBucket.rename(fileId, 'newFileName') + + then: + filesCollection.countDocuments() == 1 + chunksCollection.countDocuments() == 1 + + when: + gridFSBucket.openDownloadStream(newFileName) + + then: + notThrown(MongoGridFSException) + } + + def 'should thrown an exception when rename a nonexistent file'() { + when: + gridFSBucket.rename(new ObjectId(), 'newFileName') + + then: + thrown(MongoGridFSException) + } + + def 'should only create indexes on first write'() { + when: + def contentBytes = 'Hello GridFS' as byte[] + + then: + filesCollection.listIndexes().into([]).size() == 0 + chunksCollection.listIndexes().into([]).size() == 0 + + when: + if (direct) { + gridFSBucket.uploadFromStream('myFile', new ByteArrayInputStream(contentBytes)) + } else { + def outputStream = gridFSBucket.openUploadStream('myFile') + outputStream.write(contentBytes) + outputStream.close() + } + + then: + filesCollection.listIndexes().into([]).size() == 2 + chunksCollection.listIndexes().into([]).size() == 2 + + where: + direct << [true, false] + } + + def 'should not create indexes if the files collection is not empty'() { + when: + filesCollection.withDocumentClass(Document).insertOne(new Document('filename', 'bad file')) + def contentBytes = 'Hello GridFS' as byte[] + + then: + filesCollection.listIndexes().into([]).size() == 1 + chunksCollection.listIndexes().into([]).size() == 0 + + when: + if (direct) { + gridFSBucket.uploadFromStream('myFile', new ByteArrayInputStream(contentBytes)) + } else { + def outputStream = gridFSBucket.openUploadStream('myFile') + outputStream.write(contentBytes) + outputStream.close() + } + + then: + filesCollection.listIndexes().into([]).size() == 1 + chunksCollection.listIndexes().into([]).size() == 1 + + where: + direct << [true, false] + } + + def 'should not create if index is numerically the same'() { + when: + filesCollection.createIndex(new Document('filename', indexValue1).append('uploadDate', indexValue2)) + chunksCollection.createIndex(new Document('files_id', indexValue1).append('n', indexValue2)) + def contentBytes = 'Hello GridFS' as byte[] + + then: + filesCollection.listIndexes().into([]).size() == 2 + chunksCollection.listIndexes().into([]).size() == 2 + + when: + if (direct) { + gridFSBucket.uploadFromStream('myFile', new ByteArrayInputStream(contentBytes)) + } else { + def outputStream = gridFSBucket.openUploadStream('myFile') + outputStream.write(contentBytes) + outputStream.close() + } + + then: + filesCollection.listIndexes().into([]).size() == 2 + chunksCollection.listIndexes().into([]).size() == 2 + + where: + [direct, indexValue1, indexValue2] << [[true, false], [1, 1.0, 1L], [1, 1.0, 1L]].combinations() + } + + def 'should mark and reset'() { + given: + def content = 1 .. 1000 as byte[] + def readByte = new byte[500] + + when: + def fileId = gridFSBucket.uploadFromStream('myFile', new ByteArrayInputStream(content), + new GridFSUploadOptions().chunkSizeBytes(500)) + + then: + filesCollection.countDocuments() == 1 + chunksCollection.countDocuments() == 2 + + when: + def gridFSDownloadStream = gridFSBucket.openDownloadStream(fileId) + gridFSDownloadStream.read(readByte) + + then: + readByte == 1 .. 500 as byte[] + + when: + gridFSDownloadStream.mark() + + then: + gridFSDownloadStream.read(readByte) + + then: + readByte == 501 .. 1000 as byte[] + + when: + gridFSDownloadStream.reset() + + then: + gridFSDownloadStream.read(readByte) + + then: + readByte == 501 .. 1000 as byte[] + } + + def 'should drop the bucket'() { + given: + gridFSBucket.uploadFromStream('fileName', new ByteArrayInputStream('Hello GridFS' as byte[])) + + when: + gridFSBucket.drop() + + then: + def collectionNames = mongoDatabase.listCollectionNames().into([]) + !collectionNames.contains(filesCollection.getNamespace().collectionName) + !collectionNames.contains(chunksCollection.getNamespace().collectionName) + } + + def 'should use the user provided codec registries for encoding / decoding data'() { + given: + def codecRegistry = fromRegistries(fromCodecs(new UuidCodec(UuidRepresentation.STANDARD)), + MongoClientSettings.getDefaultCodecRegistry()) + def client = MongoClients.create(getMongoClientSettingsBuilder() + .uuidRepresentation(UuidRepresentation.STANDARD) + .build()) + + def database = client.getDatabase(getDefaultDatabaseName()).withCodecRegistry(codecRegistry) + def uuid = UUID.randomUUID() + def fileMeta = new Document('uuid', uuid) + def gridFSBucket = GridFSBuckets.create(database) + + when: + def fileId = gridFSBucket.uploadFromStream('myFile', new ByteArrayInputStream(multiChunkString as byte[]), + new GridFSUploadOptions().metadata(fileMeta)) + + def file = gridFSBucket.find(new Document('_id', fileId)).first() + + then: + file.getMetadata() == fileMeta + + then: + filesCollection.find(BsonDocument).first().getDocument('metadata').getBinary('uuid').getType() == 4 as byte + + cleanup: + client?.close() + } + + @Unroll + def 'should handle missing file name data when downloading #description'() { + given: + def content = multiChunkString + def contentBytes = content as byte[] + ObjectId fileId + byte[] gridFSContentBytes + + when: + if (direct) { + fileId = gridFSBucket.uploadFromStream('myFile', new ByteArrayInputStream(contentBytes)) + } else { + def outputStream = gridFSBucket.openUploadStream('myFile') + outputStream.write(contentBytes) + outputStream.close() + fileId = outputStream.getObjectId() + } + + then: + filesCollection.countDocuments() == 1 + + when: + // Remove filename + filesCollection.updateOne(eq('_id', fileId), unset('filename')) + + if (direct) { + gridFSContentBytes = gridFSBucket.openDownloadStream(fileId).getBytes() + } else { + def outputStream = new ByteArrayOutputStream(contentBytes.length) + gridFSBucket.downloadToStream(fileId, outputStream) + outputStream.close() + gridFSContentBytes = outputStream.toByteArray() + } + + then: + gridFSContentBytes == contentBytes + + where: + description | direct + 'directly' | true + 'a stream' | false + } +} diff --git a/driver-sync/src/test/functional/com/mongodb/client/model/mql/InContextMqlValuesFunctionalTest.java b/driver-sync/src/test/functional/com/mongodb/client/model/mql/InContextMqlValuesFunctionalTest.java new file mode 100644 index 00000000000..25c33a04964 --- /dev/null +++ b/driver-sync/src/test/functional/com/mongodb/client/model/mql/InContextMqlValuesFunctionalTest.java @@ -0,0 +1,185 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model.mql; + +import com.mongodb.MongoClientSettings; +import com.mongodb.client.AggregateIterable; +import com.mongodb.client.DatabaseTestCase; +import com.mongodb.client.FindIterable; +import com.mongodb.client.model.Aggregates; +import org.bson.Document; +import org.bson.conversions.Bson; +import org.junit.jupiter.api.Test; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; + +import static com.mongodb.ClusterFixture.serverVersionAtLeast; +import static com.mongodb.client.model.Accumulators.sum; +import static com.mongodb.client.model.Aggregates.match; +import static com.mongodb.client.model.Aggregates.project; +import static com.mongodb.client.model.Filters.expr; +import static com.mongodb.client.model.Projections.computed; +import static com.mongodb.client.model.Projections.excludeId; +import static com.mongodb.client.model.Projections.fields; +import static com.mongodb.client.model.Projections.include; +import static com.mongodb.client.model.Sorts.ascending; +import static com.mongodb.client.model.mql.MqlValues.current; +import static com.mongodb.client.model.mql.MqlValues.of; +import static com.mongodb.client.model.mql.MqlValues.ofArray; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assumptions.assumeTrue; + +public class InContextMqlValuesFunctionalTest extends DatabaseTestCase { + + private static String bsonToString(final Bson project) { + return project.toBsonDocument(Document.class, MongoClientSettings.getDefaultCodecRegistry()).toString().replaceAll("\"", "'"); + } + + private List aggregate(final Bson... stages) { + AggregateIterable result = collection.aggregate(Arrays.asList(stages)); + List results = new ArrayList<>(); + result.forEach(r -> results.add(r)); + return results; + } + + @Test + public void findTest() { + assumeTrue(serverVersionAtLeast(5, 0)); // get/setField + collection.insertMany(Arrays.asList( + Document.parse("{_id: 1, x: 0, y: 2}"), + Document.parse("{_id: 2, x: 0, y: 3}"), + Document.parse("{_id: 3, x: 1, y: 3}"))); + + FindIterable iterable = collection.find(expr( + current().getInteger("x").eq(of(1)))); + List results = new ArrayList<>(); + iterable.forEach(r -> results.add(r)); + + assertEquals( + Arrays.asList(Document.parse("{_id: 3, x: 1, y: 3}")), + results); + } + + @Test + public void matchTest() { + assumeTrue(serverVersionAtLeast(5, 0)); // get/setField + collection.insertMany(Arrays.asList( + Document.parse("{_id: 1, x: 0, y: 2}"), + Document.parse("{_id: 2, x: 0, y: 3}"), + Document.parse("{_id: 3, x: 1, y: 3}"))); + + List results = aggregate( + match(expr(current().getInteger("x").eq(of(1))))); + + assertEquals( + Arrays.asList(Document.parse("{_id: 3, x: 1, y: 3}")), + results); + } + + @Test + public void currentAsMapMatchTest() { + assumeTrue(serverVersionAtLeast(5, 0)); // get/setField + collection.insertMany(Arrays.asList( + Document.parse("{_id: 1, x: 0, y: 2}"), + Document.parse("{_id: 2, x: 0, y: 3}"), + Document.parse("{_id: 3, x: 1, y: 3}"))); + + List results = aggregate( + match(expr(MqlValues.currentAsMap() + .entries() + .map(e -> e.getValue()) + .sum(v -> v).eq(of(7))))); + + assertEquals( + Arrays.asList(Document.parse("{_id: 3, x: 1, y: 3}")), + results); + } + + @Test + public void projectTest() { + assumeTrue(serverVersionAtLeast(5, 0)); // get/setField + collection.insertMany(Arrays.asList( + Document.parse("{_id: 1, x: 0, y: 2}"))); + + List expected = Arrays.asList(Document.parse("{_id: 1, x: 0, c: 2}")); + + // old, using "$y" + Bson projectOld = project(fields(include("x"), computed("c", + "$y"))); + assertEquals("{'$project': {'x': 1, 'c': '$y'}}", bsonToString(projectOld)); + assertEquals(expected, + aggregate(projectOld)); + + // new, using current() with add/subtract + Bson projectNew = project(fields(include("x"), computed("c", + current().getInteger("y").add(10).subtract(10)))); + assertEquals( + "{'$project': {'x': 1, 'c': " + + "{'$subtract': [{'$add': [{'$getField': " + + "{'input': '$$CURRENT', 'field': 'y'}}, 10]}, 10]}}}", + bsonToString(projectNew)); + assertEquals(expected, + aggregate(projectNew)); + } + + @Test + public void projectTest2() { + assumeTrue(serverVersionAtLeast(5, 0)); // get/setField + collection.insertMany(Arrays.asList(Document.parse("{_id: 0, x: 1}"))); + + // new, nestedArray + Bson projectNestedArray = project(fields(excludeId(), computed("nestedArray", ofArray( + current().getInteger("x").max(of(4)), + current().getInteger("x"), + of(0), of(1), of(true), of(false) + )))); + assertEquals( + Arrays.asList(Document.parse("{ nestedArray: [ 4, 1, 0, 1, true, false ] }")), + aggregate(projectNestedArray)); + + // new, document + Bson projectDocument = project(fields(computed("nested", + // the below is roughly: "{ x: {$max : ['$x', 4] }}" + of(Document.parse("{x: 9}")).setField("x", current().getInteger("x").max(of(4))) + ))); + assertEquals( + Arrays.asList(Document.parse("{_id: 0, nested: { x: 4 } }")), + aggregate(projectDocument)); + } + + @Test + public void groupTest() { + assumeTrue(serverVersionAtLeast(5, 0)); // get/setField + collection.insertMany(Arrays.asList( + Document.parse("{t: 0, a: 1}"), + Document.parse("{t: 0, a: 2}"), + Document.parse("{t: 1, a: 9}"))); + + List results = aggregate( + Aggregates.group( + current().getInteger("t").add(of(100)), + sum("sum", current().getInteger("a").add(1))), + Aggregates.sort(ascending("_id"))); + assertEquals( + Arrays.asList( + Document.parse("{_id: 100, sum: 5}"), + Document.parse("{_id: 101, sum: 10}")), + results); + } +} diff --git a/driver-sync/src/test/functional/com/mongodb/client/model/search/AggregatesSearchFunctionalTest.java b/driver-sync/src/test/functional/com/mongodb/client/model/search/AggregatesSearchFunctionalTest.java new file mode 100644 index 00000000000..1513d5495bc --- /dev/null +++ b/driver-sync/src/test/functional/com/mongodb/client/model/search/AggregatesSearchFunctionalTest.java @@ -0,0 +1,262 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.model.search; + +import com.mongodb.client.AggregateIterable; +import com.mongodb.client.MongoClient; +import com.mongodb.client.MongoCollection; +import com.mongodb.client.MongoDatabase; +import com.mongodb.client.model.SearchIndexModel; +import com.mongodb.internal.connection.ServerHelper; +import org.bson.BsonArray; +import org.bson.BsonDocument; +import org.bson.Document; +import org.bson.codecs.DecoderContext; +import org.bson.conversions.Bson; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Test; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.concurrent.TimeUnit; +import java.util.stream.Collectors; +import java.util.stream.StreamSupport; + +import static com.mongodb.ClusterFixture.isAtlasSearchTest; +import static com.mongodb.ClusterFixture.serverVersionAtLeast; +import static com.mongodb.MongoClientSettings.getDefaultCodecRegistry; +import static com.mongodb.client.Fixture.getMongoClient; +import static com.mongodb.client.Fixture.getPrimary; +import static com.mongodb.client.model.Aggregates.search; +import static com.mongodb.client.model.Aggregates.sort; +import static com.mongodb.client.model.Sorts.ascending; +import static com.mongodb.client.model.search.SearchOptions.searchOptions; +import static com.mongodb.client.model.search.SearchPath.fieldPath; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assumptions.assumeTrue; + +public class AggregatesSearchFunctionalTest { + public static final String ATLAS_SEARCH_DATABASE = "javaVectorSearchTest"; + private static MongoClient client; + private static MongoDatabase database; + private static MongoCollection collection; + private static String searchIndexName; + + @BeforeAll + public static void beforeAll() { + assumeTrue(isAtlasSearchTest()); + assumeTrue(serverVersionAtLeast(8, 0)); + + client = getMongoClient(); + database = client.getDatabase(ATLAS_SEARCH_DATABASE); + String collectionName = AggregatesSearchFunctionalTest.class.getName(); + collection = database.getCollection(collectionName); + collection.drop(); + + // We insert documents first. The ensuing indexing guarantees that all + // data present at the time indexing commences will be indexed before + // the index enters the READY state. + insertDocuments("[\n" + + " { _id: 1 },\n" + + " { _id: 2, title: null },\n" + + " { _id: 3, title: 'test' },\n" + + " { _id: 4, title: ['test', 'xyz'] },\n" + + " { _id: 5, title: 'not test' },\n" + + " { _id: 6, description: 'desc 1' },\n" + + " { _id: 7, description: 'desc 8' },\n" + + " { _id: 8, summary: 'summary 1 one five' },\n" + + " { _id: 9, summary: 'summary 2 one two three four five' },\n" + + "]"); + + searchIndexName = "not_default"; + // Index creation can take disproportionately long, so we create it once + // for all tests. + // We set dynamic to true to index unspecified fields. Different kinds + // of fields are needed for different tests. + collection.createSearchIndexes(Arrays.asList(new SearchIndexModel(searchIndexName, Document.parse( + "{\n" + + " \"mappings\": {\n" + + " \"dynamic\": true,\n" + + " \"fields\": {\n" + + " \"title\": {\n" + + " \"type\": \"token\"\n" + + " },\n" + + " \"description\": {\n" + + " \"analyzer\": \"lucene.keyword\"," + + " \"type\": \"string\"\n" + + " }\n" + + " }\n" + + " }\n" + + "}")))); + waitForIndex(collection, searchIndexName); + } + + @AfterAll + public static void afterAll() { + if (collection != null) { + collection.drop(); + } + try { + ServerHelper.checkPool(getPrimary()); + } catch (InterruptedException e) { + // ignore + } + } + + @Test + public void testExists() { + List pipeline = Arrays.asList( + search(SearchOperator.exists(fieldPath("title")), + searchOptions().index(searchIndexName))); + assertResults(pipeline, "[\n" + + " { _id: 2, title: null },\n" + + " { _id: 3, title: 'test' },\n" + + " { _id: 4, title: ['test', 'xyz'] },\n" + + " { _id: 5, title: 'not test' },\n" + + "]"); + } + + @Test + public void testEquals() { + List pipeline1 = Arrays.asList( + search(SearchOperator.equals(fieldPath("title"), "test"), + searchOptions().index(searchIndexName))); + assertResults(pipeline1, "[\n" + + " { _id: 3, title: 'test' }\n" + + " { _id: 4, title: ['test', 'xyz'] }\n" + + "]"); + + // equals null does not match non-existent fields + List pipeline2 = Arrays.asList( + search(SearchOperator.equalsNull(fieldPath("title")), + searchOptions().index(searchIndexName))); + assertResults(pipeline2, "[\n" + + " { _id: 2, title: null }\n" + + "]"); + } + + @Test + public void testMoreLikeThis() { + List pipeline = Arrays.asList( + search(SearchOperator.moreLikeThis(Document.parse("{ summary: 'summary' }").toBsonDocument()), + searchOptions().index(searchIndexName))); + assertResults(pipeline, "[\n" + + " { _id: 8, summary: 'summary 1 one five' },\n" + + " { _id: 9, summary: 'summary 2 one two three four five' },\n" + + "]"); + } + + @Test + public void testRegex() { + List pipeline = Arrays.asList( + search(SearchOperator.regex(fieldPath("description"), "des[c]+ <1-4>"), + searchOptions().index(searchIndexName))); + assertResults(pipeline, "[\n" + + " { _id: 6, description: 'desc 1' },\n" + + "]"); + } + + @Test + public void testWildcard() { + List pipeline = Arrays.asList( + search(SearchOperator.wildcard(fieldPath("description"), "desc*"), + searchOptions().index(searchIndexName))); + assertResults(pipeline, "[\n" + + " { _id: 6, description: 'desc 1' },\n" + + " { _id: 7, description: 'desc 8' },\n" + + "]"); + } + + @Test + public void testPhrase() { + List pipeline = Arrays.asList( + search(SearchOperator.phrase(fieldPath("summary"), "one five").slop(2), + searchOptions().index(searchIndexName))); + assertResults(pipeline, "[\n" + + " { _id: 8, summary: 'summary 1 one five' },\n" + + "]"); + } + + @Test + public void testQueryString() { + List pipeline = Arrays.asList( + search(SearchOperator.queryString(fieldPath("summary"), "summary: one AND summary: three"), + searchOptions().index(searchIndexName))); + assertResults(pipeline, "[\n" + + " { _id: 9, summary: 'summary 2 one two three four five' },\n" + + "]"); + } + + private static void insertDocuments(final String s) { + List documents = BsonArray.parse(s).stream() + .map(v -> new Document(v.asDocument())) + .collect(Collectors.toList()); + collection.insertMany(documents); + } + + private static void assertResults(final List pipeline, final String expectedResultsAsString) { + ArrayList pipeline2 = new ArrayList<>(pipeline); + pipeline2.add(sort(ascending("_id"))); + + List expectedResults = parseToList(expectedResultsAsString); + List actualResults = aggregate(pipeline2); + assertEquals(expectedResults, actualResults); + } + + private static List aggregate(final List stages) { + AggregateIterable result = collection.aggregate(stages); + List results = new ArrayList<>(); + result.forEach(r -> results.add(r.toBsonDocument())); + return results; + } + + public static List parseToList(final String s) { + return BsonArray.parse(s).stream().map(v -> toBsonDocument(v.asDocument())).collect(Collectors.toList()); + } + + public static BsonDocument toBsonDocument(final BsonDocument bsonDocument) { + return getDefaultCodecRegistry().get(BsonDocument.class).decode(bsonDocument.asBsonReader(), DecoderContext.builder().build()); + } + + public static boolean waitForIndex(final MongoCollection collection, final String indexName) { + long startTime = System.nanoTime(); + long timeoutNanos = TimeUnit.SECONDS.toNanos(60); + while (System.nanoTime() - startTime < timeoutNanos) { + Document indexRecord = StreamSupport.stream(collection.listSearchIndexes().spliterator(), false) + .filter(index -> indexName.equals(index.getString("name"))) + .findAny().orElse(null); + if (indexRecord != null) { + if ("FAILED".equals(indexRecord.getString("status"))) { + throw new RuntimeException("Search index has failed status."); + } + if (indexRecord.getBoolean("queryable")) { + return true; + } + } + try { + Thread.sleep(100); // busy-wait, avoid in production + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + throw new RuntimeException(e); + } + } + return false; + } + +} diff --git a/driver-sync/src/test/functional/com/mongodb/client/unified/AssertionContext.java b/driver-sync/src/test/functional/com/mongodb/client/unified/AssertionContext.java new file mode 100644 index 00000000000..54d2c9dc1e7 --- /dev/null +++ b/driver-sync/src/test/functional/com/mongodb/client/unified/AssertionContext.java @@ -0,0 +1,46 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.unified; + +import java.util.ArrayDeque; +import java.util.Deque; + +final class AssertionContext { + + private final Deque contextStack = new ArrayDeque<>(); + + public void push(final ContextElement contextElement) { + contextStack.push(contextElement); + } + + public void pop() { + contextStack.pop(); + } + + public String getMessage(final String rootMessage) { + StringBuilder builder = new StringBuilder(); + + builder.append(rootMessage).append("\n\n"); + builder.append("Assertion Context:\n\n"); + + for (ContextElement contextElement : contextStack) { + builder.append(contextElement.toString()).append('\n'); + } + + return builder.toString(); + } +} diff --git a/driver-sync/src/test/functional/com/mongodb/client/unified/ChangeStreamsTest.java b/driver-sync/src/test/functional/com/mongodb/client/unified/ChangeStreamsTest.java new file mode 100644 index 00000000000..f631cc461fa --- /dev/null +++ b/driver-sync/src/test/functional/com/mongodb/client/unified/ChangeStreamsTest.java @@ -0,0 +1,27 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.unified; + +import org.junit.jupiter.params.provider.Arguments; + +import java.util.Collection; + +final class ChangeStreamsTest extends UnifiedSyncTest { + private static Collection data() { + return getTestData("change-streams"); + } +} diff --git a/driver-sync/src/test/functional/com/mongodb/client/unified/ClientSideEncryptionTest.java b/driver-sync/src/test/functional/com/mongodb/client/unified/ClientSideEncryptionTest.java new file mode 100644 index 00000000000..dabca853dee --- /dev/null +++ b/driver-sync/src/test/functional/com/mongodb/client/unified/ClientSideEncryptionTest.java @@ -0,0 +1,28 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.unified; + +import org.junit.jupiter.params.provider.Arguments; + +import java.util.Collection; + +final class ClientSideEncryptionTest extends UnifiedSyncTest { + + private static Collection data() { + return getTestData("client-side-encryption/tests/unified"); + } +} diff --git a/driver-sync/src/test/functional/com/mongodb/client/unified/CollectionManagementTest.java b/driver-sync/src/test/functional/com/mongodb/client/unified/CollectionManagementTest.java new file mode 100644 index 00000000000..88a9e1e113c --- /dev/null +++ b/driver-sync/src/test/functional/com/mongodb/client/unified/CollectionManagementTest.java @@ -0,0 +1,28 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.unified; + +import org.junit.jupiter.params.provider.Arguments; + +import java.util.Collection; + +final class CollectionManagementTest extends UnifiedSyncTest { + + private static Collection data() { + return getTestData("collection-management"); + } +} diff --git a/driver-sync/src/test/functional/com/mongodb/client/unified/CommandLoggingTest.java b/driver-sync/src/test/functional/com/mongodb/client/unified/CommandLoggingTest.java new file mode 100644 index 00000000000..39f14de71af --- /dev/null +++ b/driver-sync/src/test/functional/com/mongodb/client/unified/CommandLoggingTest.java @@ -0,0 +1,27 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.unified; + +import org.junit.jupiter.params.provider.Arguments; + +import java.util.Collection; + +final class CommandLoggingTest extends UnifiedSyncTest { + private static Collection data() { + return getTestData("command-logging-and-monitoring/tests/logging"); + } +} diff --git a/driver-sync/src/test/functional/com/mongodb/client/unified/CommandMonitoringTest.java b/driver-sync/src/test/functional/com/mongodb/client/unified/CommandMonitoringTest.java new file mode 100644 index 00000000000..e0b576b52d5 --- /dev/null +++ b/driver-sync/src/test/functional/com/mongodb/client/unified/CommandMonitoringTest.java @@ -0,0 +1,27 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.unified; + +import org.junit.jupiter.params.provider.Arguments; + +import java.util.Collection; + +public final class CommandMonitoringTest extends UnifiedSyncTest { + private static Collection data() { + return getTestData("command-logging-and-monitoring/tests/monitoring"); + } +} diff --git a/driver-sync/src/test/functional/com/mongodb/client/unified/ConnectionPoolLoggingTest.java b/driver-sync/src/test/functional/com/mongodb/client/unified/ConnectionPoolLoggingTest.java new file mode 100644 index 00000000000..e9ab1f9d799 --- /dev/null +++ b/driver-sync/src/test/functional/com/mongodb/client/unified/ConnectionPoolLoggingTest.java @@ -0,0 +1,27 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.unified; + +import org.junit.jupiter.params.provider.Arguments; + +import java.util.Collection; + +final class ConnectionPoolLoggingTest extends UnifiedSyncTest { + private static Collection data() { + return getTestData("connection-monitoring-and-pooling/tests/logging"); + } +} diff --git a/driver-sync/src/test/functional/com/mongodb/client/unified/ContextElement.java b/driver-sync/src/test/functional/com/mongodb/client/unified/ContextElement.java new file mode 100644 index 00000000000..7e6e86fb01c --- /dev/null +++ b/driver-sync/src/test/functional/com/mongodb/client/unified/ContextElement.java @@ -0,0 +1,548 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.unified; + +import com.mongodb.MongoNamespace; +import com.mongodb.connection.ClusterDescription; +import com.mongodb.connection.ServerDescription; +import com.mongodb.event.ClusterClosedEvent; +import com.mongodb.event.ClusterDescriptionChangedEvent; +import com.mongodb.event.ClusterOpeningEvent; +import com.mongodb.event.CommandEvent; +import com.mongodb.event.CommandFailedEvent; +import com.mongodb.event.CommandStartedEvent; +import com.mongodb.event.CommandSucceededEvent; +import com.mongodb.internal.logging.LogMessage; +import com.mongodb.lang.Nullable; +import org.bson.BsonArray; +import org.bson.BsonBoolean; +import org.bson.BsonDocument; +import org.bson.BsonString; +import org.bson.BsonValue; +import org.bson.json.JsonWriterSettings; + +import java.util.List; +import java.util.stream.Collectors; + +abstract class ContextElement { + public static ContextElement ofTest(final BsonDocument definition) { + return new TestContextContextElement(definition); + } + + static ContextElement ofStartedOperation(final BsonDocument operation, final int index) { + return new StartedOperationContextElement(operation, index); + } + + static ContextElement ofCompletedOperation(final BsonDocument operation, final OperationResult result, final int index) { + return new CompletedOperationContextElement(operation, result, index); + } + + static ContextElement ofValueMatcher(final BsonValue expected, @Nullable final BsonValue actual, final String key, + final int arrayPosition) { + return new ValueMatchingContextElement(expected, actual, key, arrayPosition); + } + + static ContextElement ofError(final BsonDocument expectedError, final Exception e) { + return new ErrorMatchingContextElement(expectedError, e); + } + + static ContextElement ofOutcome(final MongoNamespace namespace, final List expectedOutcome, + final List actualOutcome) { + return new OutcomeMatchingContextElement(namespace, expectedOutcome, actualOutcome); + } + + static ContextElement ofCommandEvents(final String client, final BsonArray expectedEvents, final List actualEvents) { + return new CommandEventsMatchingContextElement(client, expectedEvents, actualEvents); + } + + static ContextElement ofCommandEvent(final BsonDocument expected, final CommandEvent actual, final int eventPosition) { + return new CommandEventMatchingContextElement(expected, actual, eventPosition); + } + + public static ContextElement ofConnectionPoolEvents(final String client, final BsonArray expectedEvents, + final List actualEvents) { + return new ConnectionPoolEventsMatchingContextElement(client, expectedEvents, actualEvents); + } + + public static ContextElement ofConnectionPoolEvent(final BsonDocument expected, final Object actual, final int eventPosition) { + return new ConnectionPoolEventMatchingContextElement(expected, actual, eventPosition); + } + + public static ContextElement ofWaitForPrimaryChange() { + return new ContextElement() { + @Override + public String toString() { + return "Wait For Primary Change Context\n"; + } + }; + } + + public static ContextElement ofWaitForThread(final String threadId) { + return new ContextElement() { + @Override + public String toString() { + return "Wait For Thread Context:\n" + + " Thread id: " + threadId + "\n"; + } + }; + } + + public static ContextElement ofTopologyType(final String topologyType) { + return new ContextElement() { + @Override + public String toString() { + return "Topology Type Context:\n" + + " Topology Type: " + topologyType + "\n"; + } + }; + } + + public static ContextElement ofWaitForConnectionPoolEvents(final String client, final BsonDocument event, final int count) { + return new EventCountContext("Wait For Connection Pool Events", client, event, count); + } + + public static ContextElement ofConnectionPoolEventCount(final String client, final BsonDocument event, final int count) { + return new EventCountContext("Connection Pool Event Count", client, event, count); + } + + public static ContextElement ofWaitForServerDescriptionChangedEvents(final String client, final BsonDocument event, final int count) { + return new EventCountContext("Wait For Server Description Changed Events", client, event, count); + } + + public static ContextElement ofServerDescriptionChangedEventCount(final String client, final BsonDocument event, final int count) { + return new EventCountContext("Server Description Changed Event Count", client, event, count); + } + + public static ContextElement ofWaitForClusterDescriptionChangedEvents(final String client, final BsonDocument event, final int count) { + return new EventCountContext("Wait For Cluster Description Changed Events", client, event, count); + } + + public static ContextElement ofClusterDescriptionChangedEventCount(final String client, final BsonDocument event, final int count) { + return new EventCountContext("Cluster Description Changed Event Count", client, event, count); + } + + public static ContextElement ofWaitForClusterClosedEvent(final String client) { + return new ContextElement() { + @Override + public String toString() { + return "Event MatchingContext\n" + + " client: " + client + "\n" + + " expected event: ClusterClosedEvent\n"; + } + }; + } + + public static ContextElement ofWaitForCommandEvents(final String client, final BsonDocument commandEvent, final int count) { + return new EventCountContext("Wait For Command Events", client, commandEvent, count); + } + + public static ContextElement ofTopologyEvents(final String client, final BsonArray expectedEvents, + final List actualEvents) { + return new ContextElement() { + @Override + public String toString() { + return "Events MatchingContext: \n" + + " client: '" + client + "'\n" + + " Expected events:\n" + + new BsonDocument("events", expectedEvents).toJson(JsonWriterSettings.builder().indent(true).build()) + "\n" + + " Actual events:\n" + + new BsonDocument("events", + new BsonArray(actualEvents.stream().map(ContextElement::topologyEventToDocument).collect(Collectors.toList()))) + .toJson(JsonWriterSettings.builder().indent(true).build()) + + "\n"; + } + }; + } + + public static ContextElement ofTopologyEvent(final BsonDocument expected, final Object actual, final int eventPosition) { + return new ContextElement() { + @Override + public String toString() { + return "Event Matching Context\n" + + " event position: " + eventPosition + "\n" + + " expected event: " + expected + "\n" + + " actual event: " + topologyEventToDocument(actual) + "\n"; + } + }; + } + + public static ContextElement ofWaitForServerMonitorEvents(final String client, final BsonDocument event, final int count) { + return new EventCountContext("Wait For Server Monitor Events", client, event, count); + } + + public static ContextElement ofServerMonitorEventCount(final String client, final BsonDocument event, final int count) { + return new EventCountContext("Server Monitor Event Count", client, event, count); + } + + public static ContextElement ofServerMonitorEvents(final String client, final BsonArray expectedEvents, final List actualEvents) { + return new ContextElement() { + @Override + public String toString() { + return "Events MatchingContext: \n" + + " client: '" + client + "'\n" + + " Expected events:\n" + + new BsonDocument("events", expectedEvents).toJson(JsonWriterSettings.builder().indent(true).build()) + "\n" + + " Actual events:\n" + + new BsonDocument("events", + new BsonArray(actualEvents.stream().map(ContextElement::serverMonitorEventToDocument).collect(Collectors.toList()))) + .toJson(JsonWriterSettings.builder().indent(true).build()) + + "\n"; + } + }; + } + + public static ContextElement ofServerMonitorEvent(final BsonDocument expected, final Object actual, final int eventPosition) { + return new ContextElement() { + @Override + public String toString() { + return "Event Matching Context\n" + + " event position: " + eventPosition + "\n" + + " expected event: " + expected + "\n" + + " actual event: " + serverMonitorEventToDocument(actual) + "\n"; + } + }; + } + + private static class EventCountContext extends ContextElement { + + private final String name; + private final String client; + private final BsonDocument event; + private final int count; + + EventCountContext(final String name, final String client, final BsonDocument event, final int count) { + this.name = name; + this.client = client; + this.event = event; + this.count = count; + } + + @Override + public String toString() { + return name + " Context: " + "\n" + + " Client: " + client + "\n" + + " Event:\n" + + event.toJson(JsonWriterSettings.builder().indent(true).build()) + "\n" + + " Count: " + count + "\n"; + } + } + public static ContextElement ofLogMessages(final String client, final BsonArray expectedMessages, + final List actualMessages) { + return new LogMessageMatchingContextElement(client, expectedMessages, actualMessages); + } + + + private static class TestContextContextElement extends ContextElement { + private final BsonDocument definition; + + TestContextContextElement(final BsonDocument definition) { + this.definition = definition; + } + + public String toString() { + return "Test Context: " + "\n" + + definition.toJson(JsonWriterSettings.builder().indent(true).build()); + } + } + + private static class StartedOperationContextElement extends ContextElement { + private final BsonDocument operation; + private final int index; + + StartedOperationContextElement(final BsonDocument operation, final int index) { + this.operation = operation; + this.index = index; + } + + public String toString() { + return "Started Operation Context: " + "\n" + + " Operation:\n" + + operation.toJson(JsonWriterSettings.builder().indent(true).build()) + "\n" + + " Operation index: " + index + "\n"; + } + } + + private static class CompletedOperationContextElement extends ContextElement { + private final BsonDocument operation; + private final OperationResult result; + private final int index; + + CompletedOperationContextElement(final BsonDocument operation, final OperationResult result, final int index) { + this.operation = operation; + this.result = result; + this.index = index; + } + + public String toString() { + return "Completed Operation Context: " + "\n" + + " Operation:\n" + + operation.toJson(JsonWriterSettings.builder().indent(true).build()) + "\n" + + " Actual result:\n" + + result + "\n" + + " Operation index: " + index + "\n"; + } + } + + private static class ValueMatchingContextElement extends ContextElement { + private final BsonValue expected; + private final BsonValue actual; + private final String key; + private final int arrayPosition; + + ValueMatchingContextElement(final BsonValue expected, @Nullable final BsonValue actual, final String key, final int arrayPosition) { + this.expected = expected; + this.actual = actual; + this.key = key; + this.arrayPosition = arrayPosition; + } + + public String toString() { + StringBuilder builder = new StringBuilder(); + builder.append("Value Matching Context:\n"); + if (key != null) { + builder.append(" Key: ").append(key).append("\n"); + } + if (arrayPosition != -1) { + builder.append(" Array position: ").append(arrayPosition).append("\n"); + } + builder.append(" Expected value:\n "); + builder.append(expected).append("\n"); + builder.append(" Actual value:\n "); + builder.append(actual).append("\n"); + return builder.toString(); + } + } + + private static class ErrorMatchingContextElement extends ContextElement { + private final BsonDocument expectedError; + private final Exception actalError; + + ErrorMatchingContextElement(final BsonDocument expectedError, final Exception actualError) { + this.expectedError = expectedError; + this.actalError = actualError; + } + + public String toString() { + return "Error Matching Context:\n" + + " Expected error:\n" + + expectedError.toJson(JsonWriterSettings.builder().indent(true).build()) + "\n" + + " Actual error:\n" + + actalError.toString() + "\n"; + } + } + + private static class OutcomeMatchingContextElement extends ContextElement { + private final MongoNamespace namespace; + private final List expectedOutcome; + private final List actualOutcome; + + OutcomeMatchingContextElement(final MongoNamespace namespace, final List expectedOutcome, + final List actualOutcome) { + this.namespace = namespace; + this.expectedOutcome = expectedOutcome; + this.actualOutcome = actualOutcome; + } + + public String toString() { + return "Outcome Matching Context:\n" + + " Namespace: " + namespace + "\n" + + " Expected outcome:\n " + + expectedOutcome + "\n" + + " Actual outcome:\n " + + actualOutcome + "\n"; + } + } + + private static class CommandEventsMatchingContextElement extends ContextElement { + private final String client; + private final BsonArray expectedEvents; + private final List actualEvents; + + CommandEventsMatchingContextElement(final String client, final BsonArray expectedEvents, final List actualEvents) { + this.client = client; + this.expectedEvents = expectedEvents; + this.actualEvents = actualEvents; + } + + @Override + public String toString() { + return "Events MatchingContext: \n" + + " client: '" + client + "\n" + + " Expected events:\n" + + new BsonDocument("events", expectedEvents).toJson(JsonWriterSettings.builder().indent(true).build()) + "\n" + + " Actual events:\n" + + new BsonDocument("events", new BsonArray(actualEvents.stream() + .map(ContextElement::commandEventToDocument).collect(Collectors.toList()))) + .toJson(JsonWriterSettings.builder().indent(true).build()) + + "\n"; + } + } + + private static class CommandEventMatchingContextElement extends ContextElement { + private final BsonDocument expectedEvent; + private final CommandEvent actualEvent; + private final int eventPosition; + + CommandEventMatchingContextElement(final BsonDocument expectedEvent, final CommandEvent actualEvent, final int eventPosition) { + this.expectedEvent = expectedEvent; + this.actualEvent = actualEvent; + this.eventPosition = eventPosition; + } + + @Override + public String toString() { + return "Event Matching Context\n" + + " event position: " + eventPosition + "\n" + + " expected event: " + expectedEvent + "\n" + + " actual event: " + commandEventToDocument(actualEvent) + "\n"; + } + } + + private static BsonDocument commandEventToDocument(final CommandEvent event) { + if (event instanceof CommandStartedEvent) { + CommandStartedEvent commandStartedEvent = (CommandStartedEvent) event; + return new BsonDocument("commandStartedEvent", + new BsonDocument("command", commandStartedEvent.getCommand()) + .append("databaseName", new BsonString(commandStartedEvent.getDatabaseName()))); + } + if (event instanceof CommandSucceededEvent) { + CommandSucceededEvent commandSucceededEvent = (CommandSucceededEvent) event; + return new BsonDocument("commandSucceededEvent", + new BsonDocument("reply", commandSucceededEvent.getResponse()) + .append("commandName", new BsonString(commandSucceededEvent.getCommandName()))); + } else if (event instanceof CommandFailedEvent) { + CommandFailedEvent commandFailedEvent = (CommandFailedEvent) event; + return new BsonDocument("commandFailedEvent", + new BsonDocument("commandName", new BsonString(commandFailedEvent.getCommandName()))); + } else { + throw new UnsupportedOperationException("Unsupported command event: " + event.getClass().getName()); + } + } + + private static class ConnectionPoolEventsMatchingContextElement extends ContextElement { + private final String client; + private final BsonArray expectedEvents; + private final List actualEvents; + + ConnectionPoolEventsMatchingContextElement(final String client, final BsonArray expectedEvents, final List actualEvents) { + this.client = client; + this.expectedEvents = expectedEvents; + this.actualEvents = actualEvents; + } + + @Override + public String toString() { + return "Events MatchingContext: \n" + + " client: '" + client + "\n" + + " Expected events:\n" + + new BsonDocument("events", expectedEvents).toJson(JsonWriterSettings.builder().indent(true).build()) + "\n" + + " Actual events:\n" + + new BsonDocument("events", new BsonArray(actualEvents.stream() + .map(ContextElement::connectionPoolEventToDocument).collect(Collectors.toList()))) + .toJson(JsonWriterSettings.builder().indent(true).build()) + + "\n"; + } + } + private static class ConnectionPoolEventMatchingContextElement extends ContextElement { + private final BsonDocument expectedEvent; + private final Object actualEvent; + private final int eventPosition; + + ConnectionPoolEventMatchingContextElement(final BsonDocument expectedEvent, final Object actualEvent, final int eventPosition) { + this.expectedEvent = expectedEvent; + this.actualEvent = actualEvent; + this.eventPosition = eventPosition; + } + + @Override + public String toString() { + return "Event Matching Context\n" + + " event position: " + eventPosition + "\n" + + " expected event: " + expectedEvent + "\n" + + " actual event: " + connectionPoolEventToDocument(actualEvent) + "\n"; + } + } + + + private static class LogMessageMatchingContextElement extends ContextElement { + private final String client; + private final BsonArray expectedMessages; + private final List actualMessages; + + LogMessageMatchingContextElement(final String client, final BsonArray expectedMessages, + final List actualMessages) { + super(); + this.client = client; + this.expectedMessages = expectedMessages; + this.actualMessages = actualMessages; + } + + @Override + public String toString() { + return "Log Message Matching Context\n" + + " client='" + client + '\'' + "\n" + + " expectedMessages=" + + new BsonDocument("messages", expectedMessages).toJson(JsonWriterSettings.builder().indent(true).build()) + "\n" + + " actualMessages=" + + new BsonDocument("messages", new BsonArray(actualMessages.stream() + .map(LogMatcher::logMessageAsDocument).collect(Collectors.toList()))) + .toJson(JsonWriterSettings.builder().indent(true).build()) + "\n"; + } + } + + private static BsonDocument connectionPoolEventToDocument(final Object event) { + return new BsonDocument(event.getClass().getSimpleName(), new BsonDocument()); + } + + private static BsonDocument serverMonitorEventToDocument(final Object event) { + return new BsonDocument(EventMatcher.getEventType(event.getClass()), + new BsonDocument("awaited", BsonBoolean.valueOf(EventMatcher.getAwaitedFromServerMonitorEvent(event)))); + } + + static BsonDocument topologyEventToDocument(final Object event) { + if (event != null && !(event instanceof ClusterOpeningEvent || event instanceof ClusterDescriptionChangedEvent || event instanceof ClusterClosedEvent)) { + throw new UnsupportedOperationException("Unsupported topology event: " + event.getClass().getName()); + } + BsonDocument eventDocument = new BsonDocument(); + if (event instanceof ClusterDescriptionChangedEvent) { + ClusterDescriptionChangedEvent changedEvent = (ClusterDescriptionChangedEvent) event; + eventDocument.put("previousDescription", + new BsonDocument("type", new BsonString(clusterDescriptionToString(changedEvent.getPreviousDescription())))); + eventDocument.put("newDescription", + new BsonDocument("type", new BsonString(clusterDescriptionToString(changedEvent.getNewDescription())))); + } + return new BsonDocument(EventMatcher.getEventType(event.getClass()), eventDocument); + } + + static String clusterDescriptionToString(final ClusterDescription clusterDescription) { + switch (clusterDescription.getType()) { + case STANDALONE: + return "Single"; + case REPLICA_SET: + return clusterDescription.getServerDescriptions().stream() + .anyMatch(ServerDescription::isPrimary) ? "ReplicaSetWithPrimary" : "ReplicaSetNoPrimary"; + case SHARDED: + return "Sharded"; + case LOAD_BALANCED: + return "LoadBalancer"; + case UNKNOWN: + return "Unknown"; + default: + throw new UnsupportedOperationException("Unexpected value: " + clusterDescription.getShortDescription()); + } + } +} diff --git a/driver-sync/src/test/functional/com/mongodb/client/unified/Entities.java b/driver-sync/src/test/functional/com/mongodb/client/unified/Entities.java new file mode 100644 index 00000000000..6f6e5bb66c8 --- /dev/null +++ b/driver-sync/src/test/functional/com/mongodb/client/unified/Entities.java @@ -0,0 +1,760 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.unified; + +import com.mongodb.AutoEncryptionSettings; +import com.mongodb.ClientEncryptionSettings; +import com.mongodb.ClientSessionOptions; +import com.mongodb.ConnectionString; +import com.mongodb.MongoClientSettings; +import com.mongodb.MongoCredential; +import com.mongodb.ReadConcern; +import com.mongodb.ReadConcernLevel; +import com.mongodb.ReadPreference; +import com.mongodb.ServerApi; +import com.mongodb.ServerApiVersion; +import com.mongodb.TransactionOptions; +import com.mongodb.client.ClientSession; +import com.mongodb.client.MongoClient; +import com.mongodb.client.MongoCluster; +import com.mongodb.client.MongoCollection; +import com.mongodb.client.MongoCursor; +import com.mongodb.client.MongoDatabase; +import com.mongodb.client.gridfs.GridFSBucket; +import com.mongodb.client.vault.ClientEncryption; +import com.mongodb.connection.ClusterConnectionMode; +import com.mongodb.connection.ClusterDescription; +import com.mongodb.event.TestServerMonitorListener; +import com.mongodb.internal.connection.ServerMonitoringModeUtil; +import com.mongodb.internal.connection.TestClusterListener; +import com.mongodb.internal.connection.TestCommandListener; +import com.mongodb.internal.connection.TestConnectionPoolListener; +import com.mongodb.internal.connection.TestServerListener; +import com.mongodb.internal.logging.LogMessage; +import com.mongodb.lang.Nullable; +import com.mongodb.logging.TestLoggingInterceptor; +import org.bson.BsonArray; +import org.bson.BsonBoolean; +import org.bson.BsonDocument; +import org.bson.BsonInt32; +import org.bson.BsonString; +import org.bson.BsonValue; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.Future; +import java.util.concurrent.TimeUnit; +import java.util.function.BiFunction; +import java.util.function.Function; +import java.util.stream.Collectors; + +import static com.mongodb.AuthenticationMechanism.MONGODB_OIDC; +import static com.mongodb.ClusterFixture.getEnv; +import static com.mongodb.ClusterFixture.getMultiMongosConnectionString; +import static com.mongodb.ClusterFixture.isLoadBalanced; +import static com.mongodb.ClusterFixture.isSharded; +import static com.mongodb.assertions.Assertions.assertNotNull; +import static com.mongodb.assertions.Assertions.notNull; +import static com.mongodb.client.Fixture.getMongoClientSettingsBuilder; +import static com.mongodb.client.Fixture.getMultiMongosMongoClientSettingsBuilder; +import static com.mongodb.client.unified.UnifiedClientEncryptionHelper.createKmsProvidersMap; +import static com.mongodb.client.unified.UnifiedCrudHelper.asReadConcern; +import static com.mongodb.client.unified.UnifiedCrudHelper.asReadPreference; +import static com.mongodb.client.unified.UnifiedCrudHelper.asWriteConcern; +import static com.mongodb.internal.connection.AbstractConnectionPoolTest.waitForPoolAsyncWorkManagerStart; +import static java.lang.System.getenv; +import static java.util.Arrays.asList; +import static org.junit.Assume.assumeTrue; + +public final class Entities { + private static final Set SUPPORTED_CLIENT_ENTITY_OPTIONS = new HashSet<>( + asList( + "id", "autoEncryptOpts", "uriOptions", "serverApi", "useMultipleMongoses", "storeEventsAsEntities", + "observeEvents", "observeLogMessages", "observeSensitiveCommands", "ignoreCommandMonitoringEvents")); + private final Set entityNames = new HashSet<>(); + private final Map threads = new HashMap<>(); + private final Map>> tasks = new HashMap<>(); + private final Map results = new HashMap<>(); + private final Map clients = new HashMap<>(); + private final Map databases = new HashMap<>(); + private final Map> collections = new HashMap<>(); + private final Map sessions = new HashMap<>(); + private final Map sessionIdentifiers = new HashMap<>(); + private final Map buckets = new HashMap<>(); + private final Map clientEncryptions = new HashMap<>(); + private final Map clientCommandListeners = new HashMap<>(); + private final Map clientLoggingInterceptors = new HashMap<>(); + private final Map clientConnectionPoolListeners = new HashMap<>(); + private final Map clientServerListeners = new HashMap<>(); + private final Map clientClusterListeners = new HashMap<>(); + private final Map serverMonitorListeners = new HashMap<>(); + private final Map> cursors = new HashMap<>(); + private final Map topologyDescriptions = new HashMap<>(); + private final Map> eventsMap = new HashMap<>(); + + public boolean hasEvents(final String id) { + return eventsMap.containsKey(id); + } + + public List getEvents(final String id) { + return getEntity(id, eventsMap, "events"); + } + + public void addResult(final String id, final BsonValue result) { + putEntity(id, result, results); + } + + public BsonValue getResult(final String id) { + return getEntity(id, results, "result"); + } + + public void addCursor(final String id, final MongoCursor cursor) { + putEntity(id, cursor, cursors); + } + + public MongoCursor getCursor(final String id) { + return getEntity(id, cursors, "cursors"); + } + + public void addTopologyDescription(final String id, final ClusterDescription clusterDescription) { + putEntity(id, clusterDescription, topologyDescriptions); + } + + public ClusterDescription getTopologyDescription(final String id) { + return getEntity(id, topologyDescriptions, "topologyDescription"); + } + + public ExecutorService getThread(final String id) { + return getEntity(id, threads, "thread"); + } + + public void addThreadTask(final String id, final Future task) { + getEntity(id, tasks, "tasks").add(task); + } + + public List> getThreadTasks(final String id) { + return getEntity(id, tasks, "tasks"); + } + + public void clearThreadTasks(final String id) { + getEntity(id, tasks, "tasks").clear(); + } + + public boolean hasClient(final String id) { + return clients.containsKey(id); + } + + public MongoClient getClient(final String id) { + return getEntity(id, clients, "client"); + } + + public ClientEncryption getClientEncryption(final String id) { + return getEntity(id, clientEncryptions, "clientEncryption"); + } + + public boolean hasDatabase(final String id) { + return databases.containsKey(id); + } + + public MongoDatabase getDatabase(final String id) { + return getEntity(id, databases, "database"); + } + + public boolean hasCollection(final String id) { + return collections.containsKey(id); + } + + public MongoCollection getCollection(final String id) { + return getEntity(id, collections, "collection"); + } + + public MongoCluster getMongoClusterWithTimeoutMS(final String id, @Nullable final Long timeoutMS) { + return timeoutMS != null ? getClient(id).withTimeout(timeoutMS, TimeUnit.MILLISECONDS) : getClient(id); + } + + public MongoDatabase getDatabaseWithTimeoutMS(final String id, @Nullable final Long timeoutMS) { + return timeoutMS != null ? getDatabase(id).withTimeout(timeoutMS, TimeUnit.MILLISECONDS) : getDatabase(id); + } + + public MongoCollection getCollectionWithTimeoutMS(final String id, @Nullable final Long timeoutMS) { + return timeoutMS != null ? getCollection(id).withTimeout(timeoutMS, TimeUnit.MILLISECONDS) : getCollection(id); + } + + public ClientSession getSession(final String id) { + return getEntity(id, sessions, "session"); + } + + public BsonDocument getSessionIdentifier(final String id) { + return getEntity(id + "-identifier", sessionIdentifiers, "session identifier"); + } + + public GridFSBucket getBucket(final String id) { + return getEntity(id, buckets, "bucket"); + } + + public TestCommandListener getClientCommandListener(final String id) { + return getEntity(id + "-command-listener", clientCommandListeners, "command listener"); + } + + public TestLoggingInterceptor getClientLoggingInterceptor(final String id) { + return getEntity(id + "-logging-interceptor", clientLoggingInterceptors, "logging interceptor"); + } + + public TestConnectionPoolListener getConnectionPoolListener(final String id) { + return getEntity(id + "-connection-pool-listener", clientConnectionPoolListeners, "connection pool listener"); + } + + public TestServerListener getServerListener(final String id) { + return getEntity(id + "-server-listener", clientServerListeners, "server listener"); + } + + public TestClusterListener getClusterListener(final String id) { + return getEntity(id + "-cluster-listener", clientClusterListeners, "cluster listener"); + } + + public TestServerMonitorListener getServerMonitorListener(final String id) { + return getEntity(id + "-server-monitor-listener", serverMonitorListeners, "server monitor listener"); + } + + private T getEntity(final String id, final Map entities, final String type) { + T entity = entities.get(id); + if (entity == null) { + throw new IllegalStateException("Missing " + type + " with id: " + id); + } + return entity; + } + + private void putEntity(final String id, final T entity, final Map entities) { + if (!entityNames.add(id)) { + throw new IllegalStateException("Already an entity with id: " + id); + } + entities.put(id, entity); + } + + public void init(final BsonArray entitiesArray, + final BsonDocument startingClusterTime, + final boolean waitForPoolAsyncWorkManagerStart, + final Function mongoClientSupplier, + final Function gridFSBucketSupplier, + final BiFunction clientEncryptionSupplier) { + for (BsonValue cur : entitiesArray.getValues()) { + String entityType = cur.asDocument().getFirstKey(); + BsonDocument entity = cur.asDocument().getDocument(entityType); + String id = entity.getString("id").getValue(); + switch (entityType) { + case "thread": + initThread(id); + break; + case "client": + initClient(entity, id, mongoClientSupplier, waitForPoolAsyncWorkManagerStart); + break; + case "database": { + initDatabase(entity, id); + break; + } + case "collection": { + initCollection(entity, id); + break; + } + case "session": { + initSession(entity, id, startingClusterTime); + break; + } + case "bucket": { + initBucket(entity, id, gridFSBucketSupplier); + break; + } + case "clientEncryption": { + initClientEncryption(entity, id, clientEncryptionSupplier); + break; + } + default: + throw new UnsupportedOperationException("Unsupported entity type: " + entityType); + } + } + } + + private void initThread(final String id) { + putEntity(id, Executors.newSingleThreadExecutor(), threads); + tasks.put(id, new ArrayList<>()); + } + + private void initClient(final BsonDocument entity, final String id, + final Function mongoClientSupplier, + final boolean waitForPoolAsyncWorkManagerStart) { + if (!SUPPORTED_CLIENT_ENTITY_OPTIONS.containsAll(entity.keySet())) { + throw new UnsupportedOperationException("Client entity contains unsupported options: " + entity.keySet() + + ". Supported options are " + SUPPORTED_CLIENT_ENTITY_OPTIONS); + } + MongoClientSettings.Builder clientSettingsBuilder; + if (entity.getBoolean("useMultipleMongoses", BsonBoolean.FALSE).getValue() && (isSharded() || isLoadBalanced())) { + assumeTrue("Multiple mongos connection string not available for sharded cluster", + !isSharded() || getMultiMongosConnectionString() != null); + assumeTrue("Multiple mongos connection string not available for load-balanced cluster", + !isLoadBalanced() || getMultiMongosConnectionString() != null); + clientSettingsBuilder = getMultiMongosMongoClientSettingsBuilder(); + } else { + clientSettingsBuilder = getMongoClientSettingsBuilder(); + } + + clientSettingsBuilder.applicationName(id); + clientSettingsBuilder.applyToLoggerSettings(builder -> builder.maxDocumentLength(10_000)); + + TestServerListener testServerListener = new TestServerListener(); + clientSettingsBuilder.applyToServerSettings(builder -> builder.addServerListener(testServerListener)); + putEntity(id + "-server-listener", testServerListener, clientServerListeners); + + TestClusterListener testClusterListener = new TestClusterListener(); + clientSettingsBuilder.applyToClusterSettings(builder -> builder.addClusterListener(testClusterListener)); + putEntity(id + "-cluster-listener", testClusterListener, clientClusterListeners); + + if (entity.containsKey("observeEvents")) { + List observeEvents = entity.getArray("observeEvents").stream() + .map(type -> type.asString().getValue()).collect(Collectors.toList()); + List ignoreCommandMonitoringEvents = entity + .getArray("ignoreCommandMonitoringEvents", new BsonArray()).stream() + .map(type -> type.asString().getValue()).collect(Collectors.toList()); + ignoreCommandMonitoringEvents.add("configureFailPoint"); + TestCommandListener testCommandListener = new TestCommandListener( + observeEvents, + ignoreCommandMonitoringEvents, entity.getBoolean("observeSensitiveCommands", BsonBoolean.FALSE).getValue(), + null); + clientSettingsBuilder.addCommandListener(testCommandListener); + putEntity(id + "-command-listener", testCommandListener, clientCommandListeners); + + TestConnectionPoolListener testConnectionPoolListener = new TestConnectionPoolListener(observeEvents); + clientSettingsBuilder.applyToConnectionPoolSettings(builder -> + builder.addConnectionPoolListener(testConnectionPoolListener)); + putEntity(id + "-connection-pool-listener", testConnectionPoolListener, clientConnectionPoolListeners); + + TestServerMonitorListener testServerMonitorListener = new TestServerMonitorListener(observeEvents); + clientSettingsBuilder.applyToServerSettings(builder -> builder.addServerMonitorListener(testServerMonitorListener)); + putEntity(id + "-server-monitor-listener", testServerMonitorListener, serverMonitorListeners); + } else { + // Regardless of whether events are observed, we still need to track some info about the pool in order to implement + // the assertNumberConnectionsCheckedOut operation + TestConnectionPoolListener testConnectionPoolListener = new TestConnectionPoolListener(); + clientSettingsBuilder.applyToConnectionPoolSettings(builder -> + builder.addConnectionPoolListener(testConnectionPoolListener)); + putEntity(id + "-connection-pool-listener", testConnectionPoolListener, clientConnectionPoolListeners); + } + + clientSettingsBuilder.applyToServerSettings(builder -> { + builder.heartbeatFrequency(50, TimeUnit.MILLISECONDS); + builder.minHeartbeatFrequency(50, TimeUnit.MILLISECONDS); + }); + if (entity.containsKey("uriOptions")) { + entity.getDocument("uriOptions").forEach((key, value) -> { + switch (key) { + case "retryReads": + clientSettingsBuilder.retryReads(value.asBoolean().getValue()); + break; + case "retryWrites": + clientSettingsBuilder.retryWrites(value.asBoolean().getValue()); + break; + case "readPreference": + clientSettingsBuilder.readPreference(ReadPreference.valueOf(value.asString().getValue())); + break; + case "readConcernLevel": + clientSettingsBuilder.readConcern( + new ReadConcern(ReadConcernLevel.fromString(value.asString().getValue()))); + break; + case "w": + if (value.isString()) { + clientSettingsBuilder.writeConcern(clientSettingsBuilder.build() + .getWriteConcern().withW(value.asString().getValue())); + } else { + clientSettingsBuilder.writeConcern(clientSettingsBuilder.build() + .getWriteConcern().withW(value.asInt32().intValue())); + } + break; + case "wTimeoutMS": + clientSettingsBuilder.writeConcern(clientSettingsBuilder.build().getWriteConcern() + .withWTimeout(value.asNumber().longValue(), TimeUnit.MILLISECONDS)); + break; + case "maxPoolSize": + clientSettingsBuilder.applyToConnectionPoolSettings(builder -> builder.maxSize(value.asNumber().intValue())); + break; + case "minPoolSize": + clientSettingsBuilder.applyToConnectionPoolSettings(builder -> builder.minSize(value.asNumber().intValue())); + break; + case "waitQueueTimeoutMS": + clientSettingsBuilder.applyToConnectionPoolSettings(builder -> + builder.maxWaitTime(value.asNumber().longValue(), TimeUnit.MILLISECONDS)); + break; + case "maxIdleTimeMS": + clientSettingsBuilder.applyToConnectionPoolSettings(builder -> + builder.maxConnectionIdleTime(value.asNumber().longValue(), TimeUnit.MILLISECONDS)); + break; + case "maxConnecting": + clientSettingsBuilder.applyToConnectionPoolSettings(builder -> + builder.maxConnecting(value.asNumber().intValue())); + break; + case "heartbeatFrequencyMS": + clientSettingsBuilder.applyToServerSettings(builder -> + builder.heartbeatFrequency(value.asNumber().longValue(), TimeUnit.MILLISECONDS)); + break; + case "connectTimeoutMS": + clientSettingsBuilder.applyToSocketSettings(builder -> + builder.connectTimeout(value.asNumber().intValue(), TimeUnit.MILLISECONDS)); + break; + case "socketTimeoutMS": + clientSettingsBuilder.applyToSocketSettings(builder -> + builder.readTimeout(value.asNumber().intValue(), TimeUnit.MILLISECONDS)); + break; + case "serverSelectionTimeoutMS": + clientSettingsBuilder.applyToClusterSettings(builder -> + builder.serverSelectionTimeout(value.asNumber().longValue(), TimeUnit.MILLISECONDS)); + break; + case "loadBalanced": + if (value.asBoolean().getValue()) { + clientSettingsBuilder.applyToClusterSettings(builder -> builder.mode(ClusterConnectionMode.LOAD_BALANCED)); + } + break; + case "appname": + case "appName": + clientSettingsBuilder.applicationName(value.asString().getValue()); + break; + case "timeoutMS": + clientSettingsBuilder.timeout(value.asNumber().longValue(), TimeUnit.MILLISECONDS); + break; + case "serverMonitoringMode": + clientSettingsBuilder.applyToServerSettings(builder -> builder.serverMonitoringMode( + ServerMonitoringModeUtil.fromString(value.asString().getValue()))); + break; + case "authMechanism": + if (value.equals(new BsonString(MONGODB_OIDC.getMechanismName()))) { + // authMechanismProperties depends on authMechanism + BsonDocument authMechanismProperties = entity + .getDocument("uriOptions") + .getDocument("authMechanismProperties"); + boolean hasPlaceholder = authMechanismProperties.equals( + new BsonDocument("$$placeholder", new BsonInt32(1))); + if (!hasPlaceholder) { + throw new UnsupportedOperationException( + "Unsupported authMechanismProperties for authMechanism: " + value); + } + + // override the org.mongodb.test.uri connection string + String uri = getenv("MONGODB_URI"); + ConnectionString cs = new ConnectionString(uri); + clientSettingsBuilder.applyConnectionString(cs); + + String env = assertNotNull(getenv("OIDC_ENV")); + MongoCredential oidcCredential = MongoCredential + .createOidcCredential(null) + .withMechanismProperty("ENVIRONMENT", env); + if (env.equals("azure")) { + oidcCredential = oidcCredential.withMechanismProperty( + MongoCredential.TOKEN_RESOURCE_KEY, getenv("AZUREOIDC_RESOURCE")); + } else if (env.equals("gcp")) { + oidcCredential = oidcCredential.withMechanismProperty( + MongoCredential.TOKEN_RESOURCE_KEY, getenv("GCPOIDC_RESOURCE")); + } + clientSettingsBuilder.credential(oidcCredential); + break; + } + throw new UnsupportedOperationException("Unsupported authMechanism: " + value); + case "authMechanismProperties": + // authMechanismProperties are handled as part of authMechanism, above + BsonValue authMechanism = entity + .getDocument("uriOptions") + .get("authMechanism"); + if (authMechanism.equals(new BsonString(MONGODB_OIDC.getMechanismName()))) { + break; + } + throw new UnsupportedOperationException("Failure to apply authMechanismProperties: " + value); + default: + throw new UnsupportedOperationException("Unsupported uri option: " + key); + } + }); + } + if (entity.containsKey("serverApi")) { + BsonDocument serverApiDocument = entity.getDocument("serverApi"); + String apiVersion = serverApiDocument.getString("version").getValue(); + ServerApi.Builder serverApiBuilder = ServerApi.builder().version(ServerApiVersion.findByValue(apiVersion)); + if (serverApiDocument.containsKey("deprecationErrors")) { + serverApiBuilder.deprecationErrors(serverApiDocument.getBoolean("deprecationErrors").getValue()); + } + if (serverApiDocument.containsKey("strict")) { + serverApiBuilder.strict(serverApiDocument.getBoolean("strict").getValue()); + } + clientSettingsBuilder.serverApi(serverApiBuilder.build()); + } + if (entity.containsKey("autoEncryptOpts")) { + AutoEncryptionSettings.Builder builder = AutoEncryptionSettings.builder(); + BsonDocument autoEncryptOpts = entity.getDocument("autoEncryptOpts"); + + String cryptSharedLibPath = getEnv("CRYPT_SHARED_LIB_PATH", ""); + if (!cryptSharedLibPath.isEmpty()) { + BsonDocument extraOptions = autoEncryptOpts.getDocument("extraOptions", new BsonDocument()); + autoEncryptOpts.put("extraOptions", extraOptions.append("cryptSharedLibPath", new BsonString(cryptSharedLibPath))); + } + + for (Map.Entry entry : autoEncryptOpts.entrySet()) { + switch (entry.getKey()) { + case "bypassAutoEncryption": + builder.bypassAutoEncryption(entry.getValue().asBoolean().getValue()); + break; + case "bypassQueryAnalysis": + builder.bypassQueryAnalysis(entry.getValue().asBoolean().getValue()); + break; + case "schemaMap": + Map schemaMap = new HashMap<>(); + for (Map.Entry entries : entry.getValue().asDocument().entrySet()) { + schemaMap.put(entries.getKey(), entries.getValue().asDocument()); + } + builder.schemaMap(schemaMap); + break; + case "encryptedFieldsMap": + Map encryptedFieldsMap = new HashMap<>(); + for (Map.Entry entries : entry.getValue().asDocument().entrySet()) { + encryptedFieldsMap.put(entries.getKey(), entries.getValue().asDocument()); + } + builder.encryptedFieldsMap(encryptedFieldsMap); + break; + case "extraOptions": + Map extraOptions = new HashMap<>(); + for (Map.Entry extraOptionsEntry : entry.getValue().asDocument().entrySet()) { + switch (extraOptionsEntry.getKey()) { + case "mongocryptdBypassSpawn": + extraOptions.put(extraOptionsEntry.getKey(), extraOptionsEntry.getValue().asBoolean().getValue()); + break; + case "cryptSharedLibPath": + extraOptions.put(extraOptionsEntry.getKey(), extraOptionsEntry.getValue().asString().getValue()); + break; + default: + throw new UnsupportedOperationException("Unsupported extra encryption option: " + extraOptionsEntry.getKey()); + } + } + builder.extraOptions(extraOptions); + break; + case "keyVaultNamespace": + builder.keyVaultNamespace(entry.getValue().asString().getValue()); + break; + case "kmsProviders": + builder.kmsProviders(createKmsProvidersMap(entry.getValue().asDocument())); + break; + case "keyExpirationMS": + builder.keyExpiration(entry.getValue().asNumber().longValue(), TimeUnit.MILLISECONDS); + break; + default: + throw new UnsupportedOperationException("Unsupported client encryption option: " + entry.getKey()); + } + } + clientSettingsBuilder.autoEncryptionSettings(builder.build()); + } + + MongoClientSettings clientSettings = clientSettingsBuilder.build(); + + if (entity.containsKey("observeLogMessages")) { + BsonDocument observeLogMessagesDocument = entity.getDocument("observeLogMessages"); + + Map filterConfig = observeLogMessagesDocument.entrySet().stream() + .collect(Collectors.toMap(Entities::toComponent, Entities::toLevel)); + + TestLoggingInterceptor.LoggingFilter loggingFilter = new TestLoggingInterceptor.LoggingFilter(filterConfig); + + putEntity(id + "-logging-interceptor", new TestLoggingInterceptor(clientSettings.getApplicationName(), loggingFilter), + clientLoggingInterceptors); + } + + putEntity(id, mongoClientSupplier.apply(clientSettings), clients); + if (waitForPoolAsyncWorkManagerStart) { + waitForPoolAsyncWorkManagerStart(); + } + } + + private static LogMessage.Component toComponent(final Map.Entry entry) { + String componentName = entry.getKey(); + return LogMessage.Component.of(componentName); + } + + private static LogMessage.Level toLevel(final Map.Entry entry) { + BsonValue bsonValue = entry.getValue(); + String levelName = bsonValue + .asString() + .getValue() + .toUpperCase(); + return LogMessage.Level.valueOf(levelName); + } + + private void initDatabase(final BsonDocument entity, final String id) { + MongoClient client = clients.get(entity.getString("client").getValue()); + MongoDatabase database = client.getDatabase(entity.getString("databaseName").getValue()); + if (entity.containsKey("databaseOptions")) { + for (Map.Entry entry : entity.getDocument("databaseOptions").entrySet()) { + switch (entry.getKey()) { + case "readConcern": + database = database.withReadConcern(asReadConcern(entry.getValue().asDocument())); + break; + case "readPreference": + database = database.withReadPreference(asReadPreference(entry.getValue().asDocument())); + break; + case "writeConcern": + database = database.withWriteConcern(asWriteConcern(entry.getValue().asDocument())); + break; + case "timeoutMS": + database = database.withTimeout(entry.getValue().asNumber().longValue(), TimeUnit.MILLISECONDS); + break; + default: + throw new UnsupportedOperationException("Unsupported database option: " + entry.getKey()); + } + } + } + putEntity(id, database, databases); + } + + private void initCollection(final BsonDocument entity, final String id) { + MongoDatabase database = databases.get(entity.getString("database").getValue()); + MongoCollection collection = database.getCollection(entity.getString("collectionName").getValue(), + BsonDocument.class); + if (entity.containsKey("collectionOptions")) { + for (Map.Entry entry : entity.getDocument("collectionOptions").entrySet()) { + switch (entry.getKey()) { + case "readConcern": + collection = collection.withReadConcern(asReadConcern(entry.getValue().asDocument())); + break; + case "readPreference": + collection = collection.withReadPreference(asReadPreference(entry.getValue().asDocument())); + break; + case "writeConcern": + collection = collection.withWriteConcern(asWriteConcern(entry.getValue().asDocument())); + break; + case "timeoutMS": + collection = collection.withTimeout(entry.getValue().asNumber().longValue(), TimeUnit.MILLISECONDS); + break; + default: + throw new UnsupportedOperationException("Unsupported collection option: " + entry.getKey()); + } + } + } + putEntity(id, collection, collections); + } + + private void initSession(final BsonDocument entity, final String id, final BsonDocument startingClusterTime) { + MongoClient client = clients.get(entity.getString("client").getValue()); + ClientSessionOptions.Builder optionsBuilder = ClientSessionOptions.builder(); + if (entity.containsKey("sessionOptions")) { + for (Map.Entry entry : entity.getDocument("sessionOptions").entrySet()) { + switch (entry.getKey()) { + case "defaultTransactionOptions": + optionsBuilder.defaultTransactionOptions(getTransactionOptions(entry.getValue().asDocument())); + break; + case "snapshot": + optionsBuilder.snapshot(entry.getValue().asBoolean().getValue()); + break; + case "defaultTimeoutMS": + optionsBuilder.defaultTimeout(entry.getValue().asNumber().longValue(), TimeUnit.MILLISECONDS); + break; + case "causalConsistency": + optionsBuilder.causallyConsistent(entry.getValue().asBoolean().getValue()); + break; + default: + throw new UnsupportedOperationException("Unsupported session option: " + entry.getKey()); + } + } + } + ClientSession session = client.startSession(optionsBuilder.build()); + session.advanceClusterTime(startingClusterTime); + putEntity(id, session, sessions); + putEntity(id + "-identifier", session.getServerSession().getIdentifier(), sessionIdentifiers); + } + + private void initBucket(final BsonDocument entity, final String id, final Function gridFSBucketSupplier) { + MongoDatabase database = databases.get(entity.getString("database").getValue()); + if (entity.containsKey("bucketOptions")) { + throw new UnsupportedOperationException("Unsupported session specification: bucketOptions"); + } + putEntity(id, gridFSBucketSupplier.apply(database), buckets); + } + + private void initClientEncryption(final BsonDocument entity, final String id, + final BiFunction clientEncryptionSupplier) { + if (!entity.containsKey("clientEncryptionOpts")) { + throw new UnsupportedOperationException("Unsupported client encryption specification missing: clientEncryptionOpts"); + } + BsonDocument clientEncryptionOpts = entity.getDocument("clientEncryptionOpts"); + if (!clientEncryptionOpts.containsKey("keyVaultClient")) { + throw new UnsupportedOperationException("Unsupported client encryption specification missing: " + + "clientEncryptionOpts.keyVaultClient"); + } + + MongoClient mongoClient = null; + ClientEncryptionSettings.Builder builder = ClientEncryptionSettings.builder(); + // this is ignored in preference to the keyVaultClient, but required to be non-null in the ClientEncryptionSettings constructor + builder.keyVaultMongoClientSettings(MongoClientSettings.builder().build()); + for (Map.Entry entry : clientEncryptionOpts.entrySet()) { + switch (entry.getKey()) { + case "keyVaultClient": + mongoClient = clients.get(entry.getValue().asString().getValue()); + break; + case "keyVaultNamespace": + builder.keyVaultNamespace(entry.getValue().asString().getValue()); + break; + case "kmsProviders": + builder.kmsProviders(createKmsProvidersMap(entry.getValue().asDocument())); + break; + case "keyExpirationMS": + builder.keyExpiration(entry.getValue().asNumber().longValue(), TimeUnit.MILLISECONDS); + break; + default: + throw new UnsupportedOperationException("Unsupported client encryption option: " + entry.getKey()); + } + } + + putEntity(id, clientEncryptionSupplier.apply(notNull("mongoClient", mongoClient), builder.build()), clientEncryptions); + } + + private TransactionOptions getTransactionOptions(final BsonDocument options) { + TransactionOptions.Builder transactionOptionsBuilder = TransactionOptions.builder(); + for (Map.Entry entry : options.entrySet()) { + switch (entry.getKey()) { + case "readConcern": + transactionOptionsBuilder.readConcern(asReadConcern(entry.getValue().asDocument())); + break; + case "writeConcern": + transactionOptionsBuilder.writeConcern(asWriteConcern(entry.getValue().asDocument())); + break; + case "readPreference": + transactionOptionsBuilder.readPreference(asReadPreference(entry.getValue().asDocument())); + break; + case "maxCommitTimeMS": + transactionOptionsBuilder.maxCommitTime(entry.getValue().asNumber().longValue(), TimeUnit.MILLISECONDS); + break; + default: + throw new UnsupportedOperationException("Unsupported transaction option: " + entry.getKey()); + } + } + return transactionOptionsBuilder.build(); + } + + public void close() { + cursors.values().forEach(MongoCursor::close); + sessions.values().forEach(ClientSession::close); + clientEncryptions.values().forEach(ClientEncryption::close); + clients.values().forEach(MongoClient::close); + clientLoggingInterceptors.values().forEach(TestLoggingInterceptor::close); + threads.values().forEach(ExecutorService::shutdownNow); + } +} diff --git a/driver-sync/src/test/functional/com/mongodb/client/unified/ErrorMatcher.java b/driver-sync/src/test/functional/com/mongodb/client/unified/ErrorMatcher.java new file mode 100644 index 00000000000..dce5e06590f --- /dev/null +++ b/driver-sync/src/test/functional/com/mongodb/client/unified/ErrorMatcher.java @@ -0,0 +1,200 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.unified; + +import com.mongodb.ClientBulkWriteException; +import com.mongodb.MongoBulkWriteException; +import com.mongodb.MongoClientException; +import com.mongodb.MongoCommandException; +import com.mongodb.MongoException; +import com.mongodb.MongoGridFSException; +import com.mongodb.MongoSecurityException; +import com.mongodb.MongoExecutionTimeoutException; +import com.mongodb.MongoOperationTimeoutException; +import com.mongodb.MongoServerException; +import com.mongodb.MongoSocketException; +import com.mongodb.MongoWriteConcernException; +import com.mongodb.MongoWriteException; +import com.mongodb.WriteError; +import com.mongodb.bulk.WriteConcernError; +import org.bson.BsonDocument; +import org.bson.BsonInt32; +import org.bson.BsonString; +import org.bson.BsonValue; + +import java.util.HashSet; +import java.util.List; +import java.util.Locale; +import java.util.Map; +import java.util.Set; + +import static java.lang.Integer.parseInt; +import static java.util.Arrays.asList; +import static java.util.stream.Collectors.toList; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; + +final class ErrorMatcher { + private static final Set EXPECTED_ERROR_FIELDS = new HashSet<>( + asList("isError", "expectError", "isClientError", "errorCode", "errorCodeName", "errorContains", "errorResponse", + "isClientError", "isTimeoutError", "errorLabelsOmit", "errorLabelsContain", + "writeErrors", "writeConcernErrors", "expectResult")); + + private final AssertionContext context; + private final ValueMatcher valueMatcher; + + ErrorMatcher(final AssertionContext context, final ValueMatcher valueMatcher) { + this.context = context; + this.valueMatcher = valueMatcher; + } + + void assertErrorsMatch(final BsonDocument expectedError, final Exception e) { + context.push(ContextElement.ofError(expectedError, e)); + + assertTrue(context.getMessage("Unexpected field in expectError. One of " + expectedError.keySet()), + EXPECTED_ERROR_FIELDS.containsAll(expectedError.keySet())); + + if (expectedError.containsKey("isError")) { + assertTrue(context.getMessage("isError must be true"), expectedError.getBoolean("isError").getValue()); + } + if (expectedError.containsKey("isClientError")) { + assertEquals(context.getMessage("Exception must be of type MongoClientException or MongoSocketException" + + " or MongoGridFSException or IllegalArgumentException or IllegalStateException "), + expectedError.getBoolean("isClientError").getValue(), + e instanceof MongoClientException || e instanceof MongoSocketException || e instanceof MongoGridFSException + || e instanceof IllegalArgumentException || e instanceof IllegalStateException); + } + + if (expectedError.containsKey("isTimeoutError")) { + assertEquals(context.getMessage("Exception must be of type MongoOperationTimeoutException when checking for results"), + expectedError.getBoolean("isTimeoutError").getValue(), + e instanceof MongoOperationTimeoutException + ); + } + + if (expectedError.containsKey("errorContains")) { + String errorContains = expectedError.getString("errorContains").getValue(); + assertTrue(context.getMessage("Error message does not contain expected string: " + errorContains), + e.getMessage().toLowerCase(Locale.ROOT).contains(errorContains.toLowerCase(Locale.ROOT))); + } + if (expectedError.containsKey("errorResponse")) { + valueMatcher.assertValuesMatch(expectedError.getDocument("errorResponse"), ((MongoCommandException) e).getResponse()); + } + if (expectedError.containsKey("errorCode")) { + Exception errorCodeException = e; + if (e instanceof MongoSecurityException && e.getCause() instanceof MongoCommandException) { + errorCodeException = (Exception) e.getCause(); + } + assertTrue(context.getMessage("Exception must be of type MongoCommandException or MongoWriteException when checking" + + " for error codes, but was " + e.getClass().getSimpleName()), + errorCodeException instanceof MongoCommandException + || errorCodeException instanceof MongoWriteException); + int errorCode = (errorCodeException instanceof MongoCommandException) + ? ((MongoCommandException) errorCodeException).getErrorCode() + : ((MongoWriteException) errorCodeException).getCode(); + + assertEquals(context.getMessage("Error codes must match"), expectedError.getNumber("errorCode").intValue(), + errorCode); + } + if (expectedError.containsKey("errorCodeName")) { + String expectedErrorCodeName = expectedError.getString("errorCodeName").getValue(); + if (e instanceof MongoExecutionTimeoutException) { + assertEquals(context.getMessage("Error code names must match"), expectedErrorCodeName, "MaxTimeMSExpired"); + } else if (e instanceof MongoWriteConcernException) { + assertEquals(context.getMessage("Error code names must match"), expectedErrorCodeName, + ((MongoWriteConcernException) e).getWriteConcernError().getCodeName()); + } else if (e instanceof MongoServerException) { + assertEquals(context.getMessage("Error code names must match"), expectedErrorCodeName, + ((MongoServerException) e).getErrorCodeName()); + } else { + fail(context.getMessage(String.format("Unexpected exception type %s when asserting error code name", + e.getClass().getSimpleName()))); + } + } + if (expectedError.containsKey("errorLabelsOmit")) { + assertTrue(context.getMessage("Exception must be of type MongoException when checking for error labels"), + e instanceof MongoException); + MongoException mongoException = (MongoException) e; + for (BsonValue cur : expectedError.getArray("errorLabelsOmit")) { + assertFalse(context.getMessage("Expected error label to be omitted but it is not: " + cur.asString().getValue()), + mongoException.hasErrorLabel(cur.asString().getValue())); + } + } + if (expectedError.containsKey("errorLabelsContain")) { + assertTrue(context.getMessage("Exception must be of type MongoException when checking for error labels"), + e instanceof MongoException); + MongoException mongoException = (MongoException) e; + for (BsonValue cur : expectedError.getArray("errorLabelsContain")) { + assertTrue(context.getMessage("Expected error label: " + cur.asString().getValue()), + mongoException.hasErrorLabel(cur.asString().getValue())); + } + } + if (expectedError.containsKey("writeErrors")) { + assertTrue(context.getMessage("Exception must be of type ClientBulkWriteException when checking for write errors"), + e instanceof ClientBulkWriteException); + BsonDocument writeErrors = expectedError.getDocument("writeErrors"); + ClientBulkWriteException actualException = (ClientBulkWriteException) e; + Map actualWriteErrors = actualException.getWriteErrors(); + assertEquals("The number of write errors must match", writeErrors.size(), actualWriteErrors.size()); + writeErrors.forEach((index, writeError) -> { + WriteError actualWriteError = actualWriteErrors.get(parseInt(index)); + assertNotNull("Expected a write error with index " + index, actualWriteError); + valueMatcher.assertValuesMatch(writeError, toMatchableValue(actualWriteError)); + }); + } + if (expectedError.containsKey("writeConcernErrors")) { + assertTrue(context.getMessage("Exception must be of type ClientBulkWriteException when checking for write errors"), + e instanceof ClientBulkWriteException); + List writeConcernErrors = expectedError.getArray("writeConcernErrors").stream() + .map(BsonValue::asDocument).collect(toList()); + ClientBulkWriteException actualException = (ClientBulkWriteException) e; + List actualWriteConcernErrors = actualException.getWriteConcernErrors(); + assertEquals("The number of write concern errors must match", writeConcernErrors.size(), actualWriteConcernErrors.size()); + for (int index = 0; index < writeConcernErrors.size(); index++) { + BsonDocument writeConcernError = writeConcernErrors.get(index); + WriteConcernError actualWriteConcernError = actualWriteConcernErrors.get(index); + valueMatcher.assertValuesMatch(writeConcernError, toMatchableValue(actualWriteConcernError)); + } + } + if (expectedError.containsKey("expectResult")) { + assertTrue(context.getMessage("Exception must be of type" + + " MongoBulkWriteException, or MongoSocketException, or ClientBulkWriteException" + + " when checking for results, but actual type is " + e.getClass().getSimpleName()), + e instanceof MongoBulkWriteException || e instanceof ClientBulkWriteException || e instanceof MongoSocketException); + // neither `MongoBulkWriteException` nor `MongoSocketException` includes information about the successful individual operations + if (e instanceof ClientBulkWriteException) { + BsonDocument actualPartialResult = ((ClientBulkWriteException) e).getPartialResult() + .map(UnifiedCrudHelper::toMatchableValue) + .orElse(new BsonDocument()); + valueMatcher.assertValuesMatch(expectedError.getDocument("expectResult"), actualPartialResult); + } + } + context.pop(); + } + + private static BsonDocument toMatchableValue(final WriteError writeError) { + return new BsonDocument("code", new BsonInt32(writeError.getCode())); + } + + private static BsonDocument toMatchableValue(final WriteConcernError writeConcernError) { + return new BsonDocument("code", new BsonInt32(writeConcernError.getCode())) + .append("message", new BsonString(writeConcernError.getMessage())); + } +} diff --git a/driver-sync/src/test/functional/com/mongodb/client/unified/EventMatcher.java b/driver-sync/src/test/functional/com/mongodb/client/unified/EventMatcher.java new file mode 100644 index 00000000000..b2718b4b2d7 --- /dev/null +++ b/driver-sync/src/test/functional/com/mongodb/client/unified/EventMatcher.java @@ -0,0 +1,572 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.unified; + +import com.mongodb.assertions.Assertions; +import com.mongodb.connection.ServerType; +import com.mongodb.event.ClusterClosedEvent; +import com.mongodb.event.ClusterDescriptionChangedEvent; +import com.mongodb.event.ClusterOpeningEvent; +import com.mongodb.event.CommandEvent; +import com.mongodb.event.CommandFailedEvent; +import com.mongodb.event.CommandStartedEvent; +import com.mongodb.event.CommandSucceededEvent; +import com.mongodb.event.ConnectionCheckOutFailedEvent; +import com.mongodb.event.ConnectionClosedEvent; +import com.mongodb.event.ConnectionCreatedEvent; +import com.mongodb.event.ConnectionPoolClearedEvent; +import com.mongodb.event.ConnectionPoolReadyEvent; +import com.mongodb.event.ConnectionReadyEvent; +import com.mongodb.event.ServerDescriptionChangedEvent; +import com.mongodb.event.ServerHeartbeatFailedEvent; +import com.mongodb.event.ServerHeartbeatStartedEvent; +import com.mongodb.event.ServerHeartbeatSucceededEvent; +import com.mongodb.event.TestServerMonitorListener; +import com.mongodb.internal.connection.TestClusterListener; +import com.mongodb.internal.connection.TestCommandListener; +import com.mongodb.internal.connection.TestConnectionPoolListener; +import com.mongodb.internal.connection.TestServerListener; +import com.mongodb.lang.NonNull; +import com.mongodb.lang.Nullable; +import org.bson.BsonArray; +import org.bson.BsonDocument; +import org.bson.types.ObjectId; + +import java.time.Duration; +import java.util.HashSet; +import java.util.List; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; + +import static com.mongodb.client.unified.ContextElement.clusterDescriptionToString; +import static java.util.Arrays.asList; +import static java.util.Collections.singleton; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +final class EventMatcher { + private final ValueMatcher valueMatcher; + private final AssertionContext context; + + EventMatcher(final ValueMatcher valueMatcher, final AssertionContext context) { + this.valueMatcher = valueMatcher; + this.context = context; + } + + public void assertCommandEventsEquality(final String client, final boolean ignoreExtraEvents, final BsonArray expectedEventDocuments, + final List events) { + context.push(ContextElement.ofCommandEvents(client, expectedEventDocuments, events)); + if (ignoreExtraEvents) { + assertTrue(context.getMessage("Number of events must be greater than or equal to the expected number of events"), + events.size() >= expectedEventDocuments.size()); + } else { + assertEquals(context.getMessage("Number of events must be the same"), expectedEventDocuments.size(), events.size()); + } + + for (int i = 0; i < expectedEventDocuments.size(); i++) { + CommandEvent actual = events.get(i); + BsonDocument expectedEventDocument = expectedEventDocuments.get(i).asDocument(); + String eventType = expectedEventDocument.getFirstKey(); + context.push(ContextElement.ofCommandEvent(expectedEventDocument, actual, i)); + BsonDocument expected = expectedEventDocument.getDocument(eventType); + + if (expected.containsKey("commandName")) { + assertEquals(context.getMessage("Command names must be equal"), + expected.getString("commandName").getValue(), actual.getCommandName()); + } + + if (expected.containsKey("databaseName")) { + assertEquals(context.getMessage("Expected database names to match"), + expected.getString("databaseName").getValue(), actual.getDatabaseName()); + } + + if (expected.containsKey("hasServiceId")) { + boolean hasServiceId = expected.getBoolean("hasServiceId").getValue(); + ObjectId serviceId = actual.getConnectionDescription().getServiceId(); + if (hasServiceId) { + assertNotNull(context.getMessage("Expected serviceId"), serviceId); + } else { + assertNull(context.getMessage("Expected no serviceId"), serviceId); + } + } + + if (expected.containsKey("hasServerConnectionId")) { + boolean hasServerConnectionId = expected.getBoolean("hasServerConnectionId").getValue(); + Long serverConnectionId = actual.getConnectionDescription().getConnectionId().getServerValue(); + if (hasServerConnectionId) { + assertNotNull(context.getMessage("Expected serverConnectionId"), serverConnectionId); + } else { + assertNull(context.getMessage("Expected no serverConnectionId"), serverConnectionId); + } + } + + if (actual.getClass().equals(CommandStartedEvent.class)) { + assertEquals(context.getMessage("Expected CommandStartedEvent"), eventType, "commandStartedEvent"); + CommandStartedEvent actualCommandStartedEvent = (CommandStartedEvent) actual; + + if (expected.containsKey("command")) { + valueMatcher.assertValuesMatch(expected.getDocument("command"), actualCommandStartedEvent.getCommand()); + } + } else if (actual.getClass().equals(CommandSucceededEvent.class)) { + assertEquals(context.getMessage("Expected CommandSucceededEvent"), eventType, "commandSucceededEvent"); + CommandSucceededEvent actualCommandSucceededEvent = (CommandSucceededEvent) actual; + + if (expected.containsKey("reply")) { + valueMatcher.assertValuesMatch(expected.getDocument("reply"), actualCommandSucceededEvent.getResponse()); + } + } else if (actual.getClass().equals(CommandFailedEvent.class)) { + assertEquals(context.getMessage("Expected CommandFailedEvent"), eventType, "commandFailedEvent"); + } else { + throw new UnsupportedOperationException("Unsupported event type: " + actual.getClass()); + } + context.pop(); + } + context.pop(); + } + + public void assertConnectionPoolEventsEquality(final String client, final boolean ignoreExtraEvents, final BsonArray expectedEventDocuments, + final List events) { + context.push(ContextElement.ofConnectionPoolEvents(client, expectedEventDocuments, events)); + if (ignoreExtraEvents) { + assertTrue(context.getMessage("Number of events must be greater than or equal to the expected number of events"), + events.size() >= expectedEventDocuments.size()); + } else { + assertEquals(context.getMessage("Number of events must be the same"), expectedEventDocuments.size(), events.size()); + } + + for (int i = 0; i < expectedEventDocuments.size(); i++) { + Object actual = events.get(i); + BsonDocument expectedEventDocument = expectedEventDocuments.get(i).asDocument(); + String eventType = expectedEventDocument.getFirstKey(); + context.push(ContextElement.ofConnectionPoolEvent(expectedEventDocument, actual, i)); + + assertEquals(context.getMessage("Expected event type to match"), eventType, getEventType(actual.getClass())); + + if (actual.getClass().equals(ConnectionPoolClearedEvent.class)) { + BsonDocument expected = expectedEventDocument.getDocument(eventType); + ConnectionPoolClearedEvent connectionPoolClearedEvent = (ConnectionPoolClearedEvent) actual; + if (expected.containsKey("hasServiceId")) { + boolean hasServiceId = expected.getBoolean("hasServiceId").getValue(); + ObjectId serviceId = connectionPoolClearedEvent.getServiceId(); + if (hasServiceId) { + assertNotNull(context.getMessage("Expected serviceId"), serviceId); + } else { + assertNull(context.getMessage("Expected no serviceId"), serviceId); + } + } + } else if (actual.getClass().equals(ConnectionCheckOutFailedEvent.class)) { + BsonDocument expected = expectedEventDocument.getDocument(eventType); + ConnectionCheckOutFailedEvent actualEvent = (ConnectionCheckOutFailedEvent) actual; + if (expected.containsKey("reason")) { + assertEquals(context.getMessage("Expected reason to match"), expected.getString("reason").getValue(), + getReasonString(actualEvent.getReason())); + } + } else if (actual.getClass().equals(ConnectionClosedEvent.class)) { + BsonDocument expected = expectedEventDocument.getDocument(eventType); + ConnectionClosedEvent actualEvent = (ConnectionClosedEvent) actual; + if (expected.containsKey("reason")) { + assertEquals(context.getMessage("Expected reason to match"), expected.getString("reason").getValue(), + getReasonString(actualEvent.getReason())); + } + } + context.pop(); + } + context.pop(); + } + + public void waitForConnectionPoolEvents(final String client, final BsonDocument event, final int count, + final TestConnectionPoolListener connectionPoolListener) { + context.push(ContextElement.ofWaitForConnectionPoolEvents(client, event, count)); + Class eventClass; + switch (event.getFirstKey()) { + case "poolClearedEvent": + eventClass = ConnectionPoolClearedEvent.class; + break; + case "poolReadyEvent": + eventClass = ConnectionPoolReadyEvent.class; + break; + case "connectionCreatedEvent": + eventClass = ConnectionCreatedEvent.class; + break; + case "connectionReadyEvent": + eventClass = ConnectionReadyEvent.class; + break; + default: + throw new UnsupportedOperationException("Unsupported event: " + event.getFirstKey()); + } + if (!event.getDocument(event.getFirstKey()).isEmpty()) { + throw new UnsupportedOperationException("Wait for connection pool events does not support event properties"); + } + try { + connectionPoolListener.waitForEvent(eventClass, count, 10, TimeUnit.SECONDS); + context.pop(); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } catch (TimeoutException e) { + fail(context.getMessage("Timed out waiting for connection pool events")); + } + } + + public void waitForCommandEvents(final String clientId, final BsonDocument expectedCommandEvent, final int count, + final TestCommandListener clientCommandListener) { + context.push(ContextElement.ofWaitForCommandEvents(clientId, expectedCommandEvent, count)); + try { + switch (expectedCommandEvent.getFirstKey()) { + case "commandStartedEvent": + BsonDocument properties = expectedCommandEvent.getDocument(expectedCommandEvent.getFirstKey()); + String commandName = properties.getString("commandName").getValue(); + clientCommandListener.waitForEvents(CommandStartedEvent.class, commandStartedEvent -> + commandName.equals(commandStartedEvent.getCommandName()), count); + break; + default: + throw new UnsupportedOperationException("Unsupported event: " + expectedCommandEvent.getFirstKey()); + } + context.pop(); + } catch (TimeoutException e) { + fail(context.getMessage("Timed out waiting for connection pool events")); + } + } + + public void assertConnectionPoolEventCount(final String client, final BsonDocument event, final int count, final List events) { + context.push(ContextElement.ofConnectionPoolEventCount(client, event, count)); + Class eventClass; + switch (event.getFirstKey()) { + case "poolClearedEvent": + eventClass = ConnectionPoolClearedEvent.class; + break; + case "poolReadyEvent": + eventClass = ConnectionPoolReadyEvent.class; + break; + default: + throw new UnsupportedOperationException("Unsupported event: " + event.getFirstKey()); + } + if (!event.getDocument(event.getFirstKey()).isEmpty()) { + throw new UnsupportedOperationException("Wait for connection pool events does not support event properties"); + } + long matchCount = events.stream().filter(cur -> cur.getClass().equals(eventClass)).count(); + assertEquals(context.getMessage("Expected connection pool event counts to match"), count, matchCount); + context.pop(); + } + + + public void waitForServerDescriptionChangedEvents(final String client, final BsonDocument expectedEvent, final int count, + final TestServerListener serverListener) { + context.push(ContextElement.ofWaitForServerDescriptionChangedEvents(client, expectedEvent, count)); + BsonDocument expectedEventContents = getEventContents(expectedEvent); + try { + serverListener.waitForServerDescriptionChangedEvents( + event -> serverDescriptionChangedEventMatches(expectedEventContents, event), count, Duration.ofSeconds(10)); + context.pop(); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } catch (TimeoutException e) { + fail(context.getMessage("Timed out waiting for server description changed events")); + } + } + + public void assertServerDescriptionChangeEventCount(final String client, final BsonDocument expectedEvent, final int count, + final List events) { + BsonDocument expectedEventContents = getEventContents(expectedEvent); + context.push(ContextElement.ofServerDescriptionChangedEventCount(client, expectedEvent, count)); + long matchCount = events.stream().filter(event -> serverDescriptionChangedEventMatches(expectedEventContents, event)).count(); + assertEquals(context.getMessage("Expected server description changed event counts to match"), count, matchCount); + context.pop(); + } + + public void waitForClusterDescriptionChangedEvents(final String client, final BsonDocument expectedEvent, final int count, + final TestClusterListener clusterListener) { + context.push(ContextElement.ofWaitForClusterDescriptionChangedEvents(client, expectedEvent, count)); + BsonDocument expectedEventContents = getEventContents(expectedEvent); + try { + clusterListener.waitForClusterDescriptionChangedEvents( + event -> clusterDescriptionChangedEventMatches(expectedEventContents, event, context), count, Duration.ofSeconds(10)); + context.pop(); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } catch (TimeoutException e) { + fail(context.getMessage("Timed out waiting for cluster description changed events")); + } + } + + public void waitForClusterClosedEvent(final String client, final TestClusterListener clusterListener) { + context.push(ContextElement.ofWaitForClusterClosedEvent(client)); + try { + clusterListener.waitForClusterClosedEvent(Duration.ofSeconds(10)); + context.pop(); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } catch (TimeoutException e) { + fail(context.getMessage("Timed out waiting for cluster description changed events")); + } + } + + public void assertClusterDescriptionChangeEventCount(final String client, final BsonDocument expectedEvent, final int count, + final List events) { + BsonDocument expectedEventContents = getEventContents(expectedEvent); + context.push(ContextElement.ofClusterDescriptionChangedEventCount(client, expectedEvent, count)); + long matchCount = + events.stream().filter(event -> clusterDescriptionChangedEventMatches(expectedEventContents, event, context)).count(); + assertEquals(context.getMessage("Expected cluster description changed event counts to match"), count, matchCount); + context.pop(); + } + + public void assertTopologyEventsEquality( + final String client, + final boolean ignoreExtraEvents, + final BsonArray expectedEventDocuments, + final List events) { + context.push(ContextElement.ofTopologyEvents(client, expectedEventDocuments, events)); + if (ignoreExtraEvents) { + assertTrue(context.getMessage("Number of events must be greater than or equal to the expected number of events"), + events.size() >= expectedEventDocuments.size()); + } else { + assertEquals(context.getMessage("Number of events must be the same"), expectedEventDocuments.size(), events.size()); + } + for (int i = 0; i < expectedEventDocuments.size(); i++) { + Object actualEvent = events.get(i); + BsonDocument expectedEventDocument = expectedEventDocuments.get(i).asDocument(); + String expectedEventType = expectedEventDocument.getFirstKey(); + context.push(ContextElement.ofTopologyEvent(expectedEventDocument, actualEvent, i)); + assertEquals(context.getMessage("Expected event type to match"), expectedEventType, getEventType(actualEvent.getClass())); + assertTopologyEventEquality(expectedEventType, expectedEventDocument, actualEvent, context); + context.pop(); + } + context.pop(); + } + + public void waitForServerMonitorEvents(final String client, final Class expectedEventType, final BsonDocument expectedEvent, + final int count, final TestServerMonitorListener serverMonitorListener) { + context.push(ContextElement.ofWaitForServerMonitorEvents(client, expectedEvent, count)); + BsonDocument expectedEventContents = getEventContents(expectedEvent); + try { + serverMonitorListener.waitForEvents(expectedEventType, + event -> serverMonitorEventMatches(expectedEventContents, event, null), count, Duration.ofSeconds(15)); + context.pop(); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } catch (TimeoutException e) { + fail(context.getMessage(e.getMessage())); + } + } + + public void assertServerMonitorEventCount(final String client, final Class expectedEventType, final BsonDocument expectedEvent, + final int count, final TestServerMonitorListener serverMonitorListener) { + BsonDocument expectedEventContents = getEventContents(expectedEvent); + context.push(ContextElement.ofServerMonitorEventCount(client, expectedEvent, count)); + long matchCount = serverMonitorListener.countEvents(expectedEventType, event -> + serverMonitorEventMatches(expectedEventContents, event, null)); + assertEquals(context.getMessage("Expected server monitor event counts to match"), count, matchCount); + context.pop(); + } + + public void assertServerMonitorEventsEquality( + final String client, + final boolean ignoreExtraEvents, + final BsonArray expectedEventDocuments, + final List events) { + context.push(ContextElement.ofServerMonitorEvents(client, expectedEventDocuments, events)); + if (ignoreExtraEvents) { + assertTrue(context.getMessage("Number of events must be greater than or equal to the expected number of events"), + events.size() >= expectedEventDocuments.size()); + } else { + assertEquals(context.getMessage("Number of events must be the same"), expectedEventDocuments.size(), events.size()); + } + for (int i = 0; i < expectedEventDocuments.size(); i++) { + Object actualEvent = events.get(i); + BsonDocument expectedEventDocument = expectedEventDocuments.get(i).asDocument(); + String expectedEventType = expectedEventDocument.getFirstKey(); + context.push(ContextElement.ofServerMonitorEvent(expectedEventDocument, actualEvent, i)); + assertEquals(context.getMessage("Expected event type to match"), expectedEventType, getEventType(actualEvent.getClass())); + BsonDocument expectedEventContents = expectedEventDocument.getDocument(expectedEventType); + serverMonitorEventMatches(expectedEventContents, actualEvent, context); + context.pop(); + } + context.pop(); + } + + @NonNull + private BsonDocument getEventContents(final BsonDocument expectedEvent) { + HashSet supportedEventTypes = new HashSet<>(asList( + "serverDescriptionChangedEvent", "topologyDescriptionChangedEvent", + "serverHeartbeatStartedEvent", "serverHeartbeatSucceededEvent", "serverHeartbeatFailedEvent")); + String expectedEventType = expectedEvent.getFirstKey(); + if (!supportedEventTypes.contains(expectedEventType)) { + throw new UnsupportedOperationException("Unsupported event type " + expectedEventType); + } + @SuppressWarnings("OptionalGetWithoutIsPresent") + BsonDocument expectedEventContents = expectedEvent.values().stream().findFirst().get().asDocument(); + if (expectedEventContents.isEmpty()) { + return expectedEventContents; + } + + HashSet emptyEventTypes = new HashSet<>(singleton("topologyDescriptionChangedEvent")); + if (emptyEventTypes.contains(expectedEventType)) { + throw new UnsupportedOperationException("Contents of " + expectedEventType + " must be empty"); + } + if (expectedEventContents.size() != 1 || !expectedEventContents.getFirstKey().equals("newDescription") + || expectedEventContents.getDocument("newDescription").size() != 1) { + throw new UnsupportedOperationException("Unsupported event contents " + expectedEvent); + } + return expectedEventContents; + } + + private static boolean serverDescriptionChangedEventMatches(final BsonDocument expectedEventContents, + final ServerDescriptionChangedEvent event) { + if (expectedEventContents.isEmpty()) { + return true; + } + String newType = expectedEventContents.getDocument("newDescription").getString("type").getValue(); + switch (newType) { + case "Unknown": + return event.getNewDescription().getType() == ServerType.UNKNOWN; + case "LoadBalancer": { + return event.getNewDescription().getType() == ServerType.LOAD_BALANCER; + } + default: + throw new UnsupportedOperationException(); + } + } + + private static boolean clusterDescriptionChangedEventMatches(final BsonDocument expectedEventContents, + final ClusterDescriptionChangedEvent event, @Nullable final AssertionContext context) { + if (!expectedEventContents.isEmpty()) { + throw new UnsupportedOperationException( + "Contents of " + ClusterDescriptionChangedEvent.class.getSimpleName() + " must be empty"); + } + return true; + } + + private static void assertTopologyEventEquality( + final String expectedEventType, + final BsonDocument expectedEventDocument, + final T actualEvent, + final AssertionContext context) { + + switch (expectedEventType) { + case "topologyOpeningEvent": + assertTrue(context.getMessage("Expected ClusterOpeningEvent"), actualEvent instanceof ClusterOpeningEvent); + break; + case "topologyClosedEvent": + assertTrue(context.getMessage("Expected ClusterClosedEvent"), actualEvent instanceof ClusterClosedEvent); + break; + case "topologyDescriptionChangedEvent": + assertTrue(context.getMessage("Expected ClusterDescriptionChangedEvent"), actualEvent instanceof ClusterDescriptionChangedEvent); + ClusterDescriptionChangedEvent event = (ClusterDescriptionChangedEvent) actualEvent; + BsonDocument topologyChangeDocument = expectedEventDocument.getDocument(expectedEventType, new BsonDocument()); + + if (!topologyChangeDocument.isEmpty()) { + if (topologyChangeDocument.containsKey("previousDescription")) { + String previousDescription = topologyChangeDocument.getDocument("previousDescription").getString("type").getValue(); + assertEquals(context.getMessage("Expected ClusterDescriptionChangedEvent with previousDescription: " + previousDescription), + previousDescription, clusterDescriptionToString(event.getPreviousDescription())); + } + if (topologyChangeDocument.containsKey("newDescription")) { + String newDescription = topologyChangeDocument.getDocument("newDescription").getString("type").getValue(); + assertEquals(context.getMessage("Expected ClusterDescriptionChangedEvent with newDescription: " + newDescription), + newDescription, clusterDescriptionToString(event.getNewDescription())); + } + } + break; + default: + throw new UnsupportedOperationException("Unsupported topology event: " + expectedEventType); + } + } + + /** + * @param context Not {@code null} iff mismatch must result in an error, that is, this method works as an assertion. + */ + private static boolean serverMonitorEventMatches( + final BsonDocument expectedEventContents, + final T event, + @Nullable final AssertionContext context) { + if (expectedEventContents.size() > 1) { + throw new UnsupportedOperationException("Matching for the following event is not implemented " + expectedEventContents.toJson()); + } + if (expectedEventContents.containsKey("awaited")) { + boolean expectedAwaited = expectedEventContents.getBoolean("awaited").getValue(); + boolean actualAwaited = getAwaitedFromServerMonitorEvent(event); + boolean awaitedMatches = expectedAwaited == actualAwaited; + if (context != null) { + assertTrue(context.getMessage("Expected `awaited` to match"), awaitedMatches); + } + return awaitedMatches; + } + return true; + } + + static boolean getAwaitedFromServerMonitorEvent(final Object event) { + if (event instanceof ServerHeartbeatStartedEvent) { + return ((ServerHeartbeatStartedEvent) event).isAwaited(); + } else if (event instanceof ServerHeartbeatSucceededEvent) { + return ((ServerHeartbeatSucceededEvent) event).isAwaited(); + } else if (event instanceof ServerHeartbeatFailedEvent) { + return ((ServerHeartbeatFailedEvent) event).isAwaited(); + } else { + throw Assertions.fail(event.toString()); + } + } + + static String getEventType(final Class eventClass) { + String eventClassName = eventClass.getSimpleName(); + if (eventClassName.startsWith("Cluster")) { + return eventClassName.replace("Cluster", "topology"); + } else if (eventClassName.startsWith("ConnectionPool")) { + return eventClassName.replace("ConnectionPool", "pool"); + } else if (eventClassName.startsWith("Connection")) { + return eventClassName.replace("Connection", "connection"); + } else if (eventClassName.startsWith("ServerHeartbeat")) { + StringBuilder eventTypeBuilder = new StringBuilder(eventClassName); + eventTypeBuilder.setCharAt(0, Character.toLowerCase(eventTypeBuilder.charAt(0))); + return eventTypeBuilder.toString(); + } else { + throw new UnsupportedOperationException(eventClassName); + } + } + + public static String getReasonString(final ConnectionCheckOutFailedEvent.Reason reason) { + switch (reason) { + case POOL_CLOSED: + return "poolClosed"; + case TIMEOUT: + return "timeout"; + case CONNECTION_ERROR: + return "connectionError"; + case UNKNOWN: + return "unknown"; + default: + throw new UnsupportedOperationException("Unsupported reason: " + reason); + } + } + + public static String getReasonString(final ConnectionClosedEvent.Reason reason) { + switch (reason) { + case STALE: + return "stale"; + case IDLE: + return "idle"; + case ERROR: + return "error"; + case POOL_CLOSED: + return "poolClosed"; + default: + throw new UnsupportedOperationException("Unsupported reason: " + reason); + } + } +} diff --git a/driver-sync/src/test/functional/com/mongodb/client/unified/FailPoint.java b/driver-sync/src/test/functional/com/mongodb/client/unified/FailPoint.java new file mode 100644 index 00000000000..9dbd6d140d1 --- /dev/null +++ b/driver-sync/src/test/functional/com/mongodb/client/unified/FailPoint.java @@ -0,0 +1,74 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.unified; + +import com.mongodb.client.ClientSession; +import com.mongodb.client.MongoClient; +import com.mongodb.client.MongoClients; +import org.bson.BsonDocument; +import org.bson.BsonString; + +import static com.mongodb.client.Fixture.getMongoClientSettingsBuilder; +import static java.util.Collections.singletonList; + +final class FailPoint { + private final MongoClient client; + private final boolean ownsClient; + private final BsonDocument failPointDocument; + + static FailPoint targeted(final BsonDocument operation, final Entities entities) { + return new FailPoint(operation.getDocument("arguments").getDocument("failPoint"), createClient(operation, entities), + true); + } + + static FailPoint untargeted(final BsonDocument operation, final Entities entities) { + return new FailPoint(operation.getDocument("arguments").getDocument("failPoint"), + entities.getClient(operation.getDocument("arguments").getString("client").getValue()), false); + } + + private FailPoint(final BsonDocument failPointDocument, final MongoClient client, final boolean ownsClient) { + this.client = client; + this.ownsClient = ownsClient; + this.failPointDocument = failPointDocument; + } + + void executeFailPoint() { + client.getDatabase("admin").runCommand(failPointDocument); + } + + void disableFailPoint() { + client.getDatabase("admin") + .runCommand(new BsonDocument("configureFailPoint", + failPointDocument.getString("configureFailPoint")) + .append("mode", new BsonString("off"))); + if (ownsClient) { + client.close(); + } + } + + private static MongoClient createClient(final BsonDocument operation, final Entities entities) { + BsonDocument arguments = operation.getDocument("arguments", new BsonDocument()); + ClientSession clientSession = entities.getSession(arguments.getString("session").getValue()); + + if (clientSession.getPinnedServerAddress() == null) { + throw new UnsupportedOperationException("Can't target a failpoint to a server where the session is not pinned"); + } + + return MongoClients.create(getMongoClientSettingsBuilder() + .applyToClusterSettings(builder -> builder.hosts(singletonList(clientSession.getPinnedServerAddress()))).build()); + } +} diff --git a/driver-sync/src/test/functional/com/mongodb/client/unified/IndexManagementTest.java b/driver-sync/src/test/functional/com/mongodb/client/unified/IndexManagementTest.java new file mode 100644 index 00000000000..5ff83faf63d --- /dev/null +++ b/driver-sync/src/test/functional/com/mongodb/client/unified/IndexManagementTest.java @@ -0,0 +1,27 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.unified; + +import org.junit.jupiter.params.provider.Arguments; + +import java.util.Collection; + +final class IndexManagementTest extends UnifiedSyncTest { + private static Collection data() { + return getTestData("index-management"); + } +} diff --git a/driver-sync/src/test/functional/com/mongodb/client/unified/LoadBalancerTest.java b/driver-sync/src/test/functional/com/mongodb/client/unified/LoadBalancerTest.java new file mode 100644 index 00000000000..530d10e7f63 --- /dev/null +++ b/driver-sync/src/test/functional/com/mongodb/client/unified/LoadBalancerTest.java @@ -0,0 +1,27 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.unified; + +import org.junit.jupiter.params.provider.Arguments; + +import java.util.Collection; + +final class LoadBalancerTest extends UnifiedSyncTest { + private static Collection data() { + return getTestData("load-balancers"); + } +} diff --git a/driver-sync/src/test/functional/com/mongodb/client/unified/LogMatcher.java b/driver-sync/src/test/functional/com/mongodb/client/unified/LogMatcher.java new file mode 100644 index 00000000000..b1c56e50fac --- /dev/null +++ b/driver-sync/src/test/functional/com/mongodb/client/unified/LogMatcher.java @@ -0,0 +1,157 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.unified; + +import com.mongodb.Function; +import com.mongodb.MongoCommandException; +import com.mongodb.internal.ExceptionUtils.MongoCommandExceptionUtils; +import com.mongodb.internal.logging.LogMessage; +import com.mongodb.lang.Nullable; +import org.bson.BsonArray; +import org.bson.BsonBoolean; +import org.bson.BsonDocument; +import org.bson.BsonDouble; +import org.bson.BsonInt32; +import org.bson.BsonInt64; +import org.bson.BsonNull; +import org.bson.BsonString; +import org.bson.BsonValue; + +import java.util.Collection; +import java.util.List; +import java.util.stream.Collectors; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + +final class LogMatcher { + private final ValueMatcher valueMatcher; + private final AssertionContext context; + + LogMatcher(final ValueMatcher valueMatcher, final AssertionContext context) { + + this.valueMatcher = valueMatcher; + this.context = context; + } + + void assertLogMessageEquality(final String client, final BsonArray ignoreMessages, + final boolean ignoreExtraMessages, final BsonArray expectedMessages, + final List actualMessages, final Iterable tweaks) { + context.push(ContextElement.ofLogMessages(client, expectedMessages, actualMessages)); + + List logMessages = actualMessages.stream() + .filter(logMessage -> !ignoreMessages.contains(logMessageAsIgnoreMessageDocument(logMessage))) + .collect(Collectors.toList()); + + if (ignoreExtraMessages) { + assertTrue(context.getMessage("Number of messages must be greater than or equal to the expected number of messages"), + logMessages.size() >= expectedMessages.size()); + } else { + assertEquals(context.getMessage("Number of log messages must be the same"), expectedMessages.size(), logMessages.size()); + } + + for (int i = 0; i < expectedMessages.size(); i++) { + BsonDocument expectedMessage = expectedMessages.get(i).asDocument().clone(); + for (Tweak tweak : tweaks) { + expectedMessage = tweak.apply(expectedMessage); + } + if (expectedMessage != null) { + valueMatcher.assertValuesMatch(expectedMessage, logMessageAsDocument(logMessages.get(i))); + } + } + + context.pop(); + } + + private static BsonDocument logMessageAsIgnoreMessageDocument(final LogMessage message) { + BsonDocument document = new BsonDocument(); + document.put("level", new BsonString(message.getLevel().name().toLowerCase())); + document.put("component", new BsonString(message.getComponent().getValue())); + document.put("data", new BsonDocument("message", new BsonString(message.getMessageId()))); + return document; + } + + static BsonDocument logMessageAsDocument(final LogMessage message) { + BsonDocument document = new BsonDocument(); + document.put("component", new BsonString(message.getComponent().getValue())); + document.put("level", new BsonString(message.getLevel().name().toLowerCase())); + document.put("hasFailure", BsonBoolean.valueOf(message.getException() != null)); + document.put("failureIsRedacted", + BsonBoolean.valueOf(message.getException() != null && exceptionIsRedacted(message.getException()))); + BsonDocument dataDocument = new BsonDocument(); + dataDocument.put("message", new BsonString(message.getMessageId())); + if (message.getException() != null) { + dataDocument.put("failure", new BsonString(message.getException().toString())); + } + + Collection entries = message.toStructuredLogMessage().getEntries(); + for (LogMessage.Entry entry : entries) { + dataDocument.put(entry.getName(), asBsonValue(entry.getValue())); + } + document.put("data", dataDocument); + + return document; + } + + private static boolean exceptionIsRedacted(final Throwable exception) { + return exception instanceof MongoCommandException + && MongoCommandExceptionUtils.SecurityInsensitiveResponseField.fieldNames() + .containsAll(((MongoCommandException) exception).getResponse().keySet()); + } + + private static BsonValue asBsonValue(final Object value) { + if (value == null) { + return BsonNull.VALUE; + } else if (value instanceof String) { + return new BsonString((String) value); + } else if (value instanceof Integer) { + return new BsonInt32((Integer) value); + } else if (value instanceof Long) { + return new BsonInt64((Long) value); + } else if (value instanceof Double) { + return new BsonDouble((Double) value); + } else if (value instanceof Boolean) { + return BsonBoolean.valueOf((Boolean) value); + } else { + return new BsonString(value.toString()); + } + } + + interface Tweak extends Function { + /** + * @param expectedMessage May be {@code null}, in which case the method simply returns {@code null}. + * This method may mutate {@code expectedMessage}. + * @return {@code null} iff matching {@code expectedMessage} with the actual message must be skipped. + */ + @Nullable + BsonDocument apply(@Nullable BsonDocument expectedMessage); + + static Tweak skip(final LogMessage.Entry.Name name) { + return expectedMessage -> { + if (expectedMessage == null) { + return null; + } else { + BsonDocument expectedData = expectedMessage.getDocument("data", null); + if (expectedData != null) { + expectedData.remove(name.getValue()); + } + return expectedMessage; + } + }; + } + } +} diff --git a/driver-sync/src/test/functional/com/mongodb/client/unified/OperationAsserter.java b/driver-sync/src/test/functional/com/mongodb/client/unified/OperationAsserter.java new file mode 100644 index 00000000000..2ee7bc2e594 --- /dev/null +++ b/driver-sync/src/test/functional/com/mongodb/client/unified/OperationAsserter.java @@ -0,0 +1,23 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.unified; + +import org.bson.BsonDocument; + +interface OperationAsserter { + void assertOperation(BsonDocument operation, int index); +} diff --git a/driver-sync/src/test/functional/com/mongodb/client/unified/OperationResult.java b/driver-sync/src/test/functional/com/mongodb/client/unified/OperationResult.java new file mode 100644 index 00000000000..a464748d189 --- /dev/null +++ b/driver-sync/src/test/functional/com/mongodb/client/unified/OperationResult.java @@ -0,0 +1,69 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.unified; + +import org.bson.BsonValue; + +final class OperationResult { + + public static final OperationResult NONE = new OperationResult(); + + private final BsonValue result; + private final Exception exception; + + static OperationResult of(final BsonValue result) { + return new OperationResult(result); + } + + static OperationResult of(final Exception exception) { + return new OperationResult(exception); + } + + private OperationResult() { + result = null; + exception = null; + } + + private OperationResult(final BsonValue result) { + this.result = result; + this.exception = null; + } + + private OperationResult(final Exception exception) { + this.result = null; + this.exception = exception; + } + + public BsonValue getResult() { + return result; + } + + public Exception getException() { + return exception; + } + + @Override + public String toString() { + if (result != null) { + return result.toString(); + } else if (exception != null) { + return exception.toString(); + } else { + return ""; + } + } +} diff --git a/driver-sync/src/test/functional/com/mongodb/client/unified/RunOnRequirementsMatcher.java b/driver-sync/src/test/functional/com/mongodb/client/unified/RunOnRequirementsMatcher.java new file mode 100644 index 00000000000..a60901e8db4 --- /dev/null +++ b/driver-sync/src/test/functional/com/mongodb/client/unified/RunOnRequirementsMatcher.java @@ -0,0 +1,129 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.unified; + +import com.mongodb.MongoClientSettings; +import com.mongodb.connection.ServerVersion; +import org.bson.BsonArray; +import org.bson.BsonDocument; +import org.bson.BsonString; +import org.bson.BsonValue; + +import java.util.Map; +import java.util.Objects; + +import static com.mongodb.ClusterFixture.getMongoCryptVersion; +import static com.mongodb.ClusterFixture.getServerParameters; +import static com.mongodb.ClusterFixture.hasEncryptionTestsEnabled; +import static com.mongodb.JsonTestServerVersionChecker.getMaxServerVersionForField; +import static com.mongodb.JsonTestServerVersionChecker.getMinServerVersion; +import static com.mongodb.JsonTestServerVersionChecker.serverlessMatches; +import static com.mongodb.JsonTestServerVersionChecker.topologyMatches; + +final class RunOnRequirementsMatcher { + public static boolean runOnRequirementsMet(final BsonArray runOnRequirements, final MongoClientSettings clientSettings, + final ServerVersion serverVersion) { + for (BsonValue cur : runOnRequirements) { + boolean requirementMet = true; + BsonDocument requirement = cur.asDocument(); + + requirementLoop: + for (Map.Entry curRequirement : requirement.entrySet()) { + switch (curRequirement.getKey()) { + case "minServerVersion": + if (serverVersion.compareTo(getMinServerVersion(curRequirement.getValue().asString().getValue())) < 0) { + requirementMet = false; + break requirementLoop; + } + break; + case "maxServerVersion": + if (serverVersion.compareTo(getMaxServerVersionForField(curRequirement.getValue().asString().getValue())) > 0) { + requirementMet = false; + break requirementLoop; + } + break; + case "topologies": + BsonArray topologyTypes = curRequirement.getValue().asArray(); + if (!topologyMatches(topologyTypes)) { + requirementMet = false; + break requirementLoop; + } + break; + case "serverless": + if (!serverlessMatches(curRequirement.getValue().asString().getValue())) { + requirementMet = false; + break requirementLoop; + } + break; + case "auth": + boolean authRequired = curRequirement.getValue().asBoolean().getValue(); + boolean credentialPresent = clientSettings.getCredential() != null; + + if (authRequired != credentialPresent) { + requirementMet = false; + break requirementLoop; + } + break; + case "authMechanism": + boolean containsMechanism = getServerParameters() + .getArray("authenticationMechanisms") + .contains(curRequirement.getValue()); + if (!containsMechanism) { + requirementMet = false; + break requirementLoop; + } + break; + case "serverParameters": + BsonDocument serverParameters = getServerParameters(); + for (Map.Entry curParameter: curRequirement.getValue().asDocument().entrySet()) { + if (!Objects.equals(serverParameters.get(curParameter.getKey()), curParameter.getValue())) { + requirementMet = false; + break requirementLoop; + } + } + break; + case "csfle": + if (!hasEncryptionTestsEnabled()) { + requirementMet = false; + break requirementLoop; + } + if (curRequirement.getValue().isDocument()) { + BsonDocument csfleRequirements = curRequirement.getValue().asDocument(); + ServerVersion mongoCryptSharedLibVersion = getMongoCryptVersion(); + ServerVersion minLibmongocryptVersion = getMinServerVersion(csfleRequirements + .getString("minLibmongocryptVersion", new BsonString("0.0.0")).getValue()); + if (mongoCryptSharedLibVersion.compareTo(minLibmongocryptVersion) < 0) { + requirementMet = false; + break requirementLoop; + } + } + break; + default: + throw new UnsupportedOperationException("Unsupported runOnRequirement: " + curRequirement.getKey()); + } + } + + if (requirementMet) { + return true; + } + } + return false; + } + + private RunOnRequirementsMatcher() { + } +} diff --git a/driver-sync/src/test/functional/com/mongodb/client/unified/ServerSelectionLoggingTest.java b/driver-sync/src/test/functional/com/mongodb/client/unified/ServerSelectionLoggingTest.java new file mode 100644 index 00000000000..4e5e9021877 --- /dev/null +++ b/driver-sync/src/test/functional/com/mongodb/client/unified/ServerSelectionLoggingTest.java @@ -0,0 +1,27 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.unified; + +import org.junit.jupiter.params.provider.Arguments; + +import java.util.Collection; + +final class ServerSelectionLoggingTest extends UnifiedSyncTest { + private static Collection data() { + return getTestData("server-selection/tests/logging"); + } +} diff --git a/driver-sync/src/test/functional/com/mongodb/client/unified/SessionsTest.java b/driver-sync/src/test/functional/com/mongodb/client/unified/SessionsTest.java new file mode 100644 index 00000000000..57947cbf075 --- /dev/null +++ b/driver-sync/src/test/functional/com/mongodb/client/unified/SessionsTest.java @@ -0,0 +1,27 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.unified; + +import org.junit.jupiter.params.provider.Arguments; + +import java.util.Collection; + +final class SessionsTest extends UnifiedSyncTest { + private static Collection data() { + return getTestData("sessions"); + } +} diff --git a/driver-sync/src/test/functional/com/mongodb/client/unified/UnifiedAuthTest.java b/driver-sync/src/test/functional/com/mongodb/client/unified/UnifiedAuthTest.java new file mode 100644 index 00000000000..56c17a08c4c --- /dev/null +++ b/driver-sync/src/test/functional/com/mongodb/client/unified/UnifiedAuthTest.java @@ -0,0 +1,27 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.unified; + +import org.junit.jupiter.params.provider.Arguments; + +import java.util.Collection; + +final class UnifiedAuthTest extends UnifiedSyncTest { + private static Collection data() { + return getTestData("auth"); + } +} diff --git a/driver-sync/src/test/functional/com/mongodb/client/unified/UnifiedClientEncryptionHelper.java b/driver-sync/src/test/functional/com/mongodb/client/unified/UnifiedClientEncryptionHelper.java new file mode 100644 index 00000000000..dbc919cdc58 --- /dev/null +++ b/driver-sync/src/test/functional/com/mongodb/client/unified/UnifiedClientEncryptionHelper.java @@ -0,0 +1,324 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.unified; + +import com.mongodb.bulk.BulkWriteResult; +import com.mongodb.client.model.vault.DataKeyOptions; +import com.mongodb.client.model.vault.EncryptOptions; +import com.mongodb.client.model.vault.RewrapManyDataKeyOptions; +import com.mongodb.client.model.vault.RewrapManyDataKeyResult; +import com.mongodb.client.result.DeleteResult; +import com.mongodb.client.vault.ClientEncryption; +import com.mongodb.lang.Nullable; +import org.bson.BsonArray; +import org.bson.BsonBinary; +import org.bson.BsonDocument; +import org.bson.BsonInt32; +import org.bson.BsonString; +import org.bson.BsonValue; + +import java.util.ArrayList; +import java.util.Base64; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.function.Supplier; + +import static com.mongodb.ClusterFixture.getEnv; +import static java.lang.Math.toIntExact; + +public final class UnifiedClientEncryptionHelper { + private static final BsonDocument PLACEHOLDER = BsonDocument.parse("{'$$placeholder': 1}"); + private final Entities entities; + + UnifiedClientEncryptionHelper(final Entities entities) { + this.entities = entities; + } + + private OperationResult resultOf(final Supplier operationResult) { + try { + return OperationResult.of(operationResult.get()); + } catch (Exception e) { + return OperationResult.of(e); + } + } + + static Map> createKmsProvidersMap(final BsonDocument kmsProviders) { + Map> kmsProvidersMap = new HashMap<>(); + for (String kmsProviderKey : kmsProviders.keySet()) { + BsonDocument kmsProviderOptions = kmsProviders.get(kmsProviderKey).asDocument(); + Map kmsProviderMap = new HashMap<>(); + switch (kmsProviderKey) { + case "aws": + case "aws:name1": + // awsTemporary uses `aws` and includes a `sessionToken`. + if (kmsProviderOptions.containsKey("sessionToken")) { + setKmsProviderProperty(kmsProviderMap, kmsProviderOptions, "accessKeyId", "AWS_TEMP_ACCESS_KEY_ID"); + setKmsProviderProperty(kmsProviderMap, kmsProviderOptions, "secretAccessKey", "AWS_TEMP_SECRET_ACCESS_KEY"); + setKmsProviderProperty(kmsProviderMap, kmsProviderOptions, "sessionToken", "AWS_TEMP_SESSION_TOKEN"); + } else { + setKmsProviderProperty(kmsProviderMap, kmsProviderOptions, "accessKeyId", "AWS_ACCESS_KEY_ID"); + setKmsProviderProperty(kmsProviderMap, kmsProviderOptions, "secretAccessKey", "AWS_SECRET_ACCESS_KEY"); + } + break; + case "aws:name2": + setKmsProviderProperty(kmsProviderMap, kmsProviderOptions, "accessKeyId", "AWS_ACCESS_KEY_ID_AWS_KMS_NAMED"); + setKmsProviderProperty(kmsProviderMap, kmsProviderOptions, "secretAccessKey", "AWS_SECRET_ACCESS_KEY_AWS_KMS_NAMED"); + break; + case "awsTemporary": + setKmsProviderProperty(kmsProviderMap, kmsProviderOptions, "accessKeyId", "AWS_TEMP_ACCESS_KEY_ID"); + setKmsProviderProperty(kmsProviderMap, kmsProviderOptions, "secretAccessKey", "AWS_TEMP_SECRET_ACCESS_KEY"); + setKmsProviderProperty(kmsProviderMap, kmsProviderOptions, "sessionToken", "AWS_TEMP_SESSION_TOKEN"); + break; + case "awsTemporaryNoSessionToken": + setKmsProviderProperty(kmsProviderMap, kmsProviderOptions, "accessKeyId", "AWS_TEMP_ACCESS_KEY_ID"); + setKmsProviderProperty(kmsProviderMap, kmsProviderOptions, "secretAccessKey", "AWS_TEMP_SECRET_ACCESS_KEY"); + break; + case "azure": + case "azure:name1": + setKmsProviderProperty(kmsProviderMap, kmsProviderOptions, "tenantId", "AZURE_TENANT_ID"); + setKmsProviderProperty(kmsProviderMap, kmsProviderOptions, "clientId", "AZURE_CLIENT_ID"); + setKmsProviderProperty(kmsProviderMap, kmsProviderOptions, "clientSecret", "AZURE_CLIENT_SECRET"); + break; + case "gcp": + case "gcp:name1": + setKmsProviderProperty(kmsProviderMap, kmsProviderOptions, "email", "GCP_EMAIL"); + setKmsProviderProperty(kmsProviderMap, kmsProviderOptions, "privateKey", "GCP_PRIVATE_KEY"); + break; + case "kmip": + case "kmip:name1": + setKmsProviderProperty( + kmsProviderMap, + kmsProviderOptions, + "endpoint", + () -> getEnv("org.mongodb.test.kmipEndpoint", "localhost:5698")); + break; + case "local": + case "local:name1": + setKmsProviderProperty( + kmsProviderMap, + kmsProviderOptions, + "key", + UnifiedClientEncryptionHelper::localKmsProviderKey); + break; + case "local:name2": + setKmsProviderProperty( + kmsProviderMap, + kmsProviderOptions, + "key", + () -> decodeKmsProviderString(kmsProviderOptions.getString("key").getValue())); + break; + default: + throw new UnsupportedOperationException("Unsupported KMS provider: " + kmsProviderKey); + } + kmsProvidersMap.put(kmsProviderKey, kmsProviderMap); + } + return kmsProvidersMap; + } + + public static byte[] localKmsProviderKey() { + return decodeKmsProviderString("Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBMUN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZ" + + "GJkTXVyZG9uSjFk"); + } + + public static byte[] decodeKmsProviderString(final String key) { + return Base64.getDecoder().decode(key); + } + + private static void setKmsProviderProperty(final Map kmsProviderMap, + final BsonDocument kmsProviderOptions, final String key, final String propertyName) { + setKmsProviderProperty( + kmsProviderMap, + kmsProviderOptions, + key, + () -> { + if (getEnv(propertyName) != null) { + return getEnv(propertyName); + } + throw new UnsupportedOperationException("Missing system property for: " + key); + }); + } + + private static void setKmsProviderProperty(final Map kmsProviderMap, + final BsonDocument kmsProviderOptions, final String key, + @Nullable final Supplier placeholderPropertySupplier) { + if (kmsProviderOptions.containsKey(key)) { + boolean isPlaceholderValue = kmsProviderOptions.get(key).equals(PLACEHOLDER); + if (isPlaceholderValue) { + if (placeholderPropertySupplier == null) { + throw new UnsupportedOperationException("Placeholder is not supported for: " + key + " :: " + kmsProviderOptions.toJson()); + } + kmsProviderMap.put(key, placeholderPropertySupplier.get()); + return; + } + + BsonValue kmsValue = kmsProviderOptions.get(key); + if (kmsValue.isString() && !key.equals("sessionToken")) { + kmsProviderMap.put(key, decodeKmsProviderString(kmsValue.asString().getValue())); + } else { + kmsProviderMap.put(key, kmsValue); + } + } + } + + OperationResult executeCreateDataKey(final BsonDocument operation) { + ClientEncryption clientEncryption = entities.getClientEncryption(operation.getString("object").getValue()); + BsonDocument arguments = operation.getDocument("arguments", new BsonDocument()); + + DataKeyOptions dataKeyOptions = new DataKeyOptions(); + BsonDocument options = arguments.getDocument("opts", new BsonDocument()); + for (String key : options.keySet()) { + switch (key) { + case "keyAltNames": + List keyAltNames = new ArrayList<>(); + options.getArray("keyAltNames", new BsonArray()).forEach(v -> keyAltNames.add(v.asString().getValue())); + dataKeyOptions.keyAltNames(keyAltNames); + break; + case "masterKey": + dataKeyOptions.masterKey(options.getDocument("masterKey")); + break; + case "keyMaterial": + dataKeyOptions.keyMaterial(options.getBinary("keyMaterial").getData()); + break; + default: + throw new UnsupportedOperationException("Missing key handler for: " + key + " :: " + options.toJson()); + } + } + return resultOf(() -> clientEncryption.createDataKey(arguments.getString("kmsProvider").getValue(), dataKeyOptions)); + } + + + OperationResult executeAddKeyAltName(final BsonDocument operation) { + ClientEncryption clientEncryption = entities.getClientEncryption(operation.getString("object").getValue()); + BsonDocument arguments = operation.getDocument("arguments", new BsonDocument()); + return resultOf(() -> clientEncryption.addKeyAltName(arguments.getBinary("id"), arguments.getString("keyAltName").getValue())); + } + + OperationResult executeRemoveKeyAltName(final BsonDocument operation) { + ClientEncryption clientEncryption = entities.getClientEncryption(operation.getString("object").getValue()); + BsonDocument arguments = operation.getDocument("arguments", new BsonDocument()); + return resultOf(() -> clientEncryption.removeKeyAltName(arguments.getBinary("id"), arguments.getString("keyAltName").getValue())); + } + + OperationResult executeDeleteKey(final BsonDocument operation) { + ClientEncryption clientEncryption = entities.getClientEncryption(operation.getString("object").getValue()); + BsonDocument arguments = operation.getDocument("arguments", new BsonDocument()); + return resultOf(() -> toExpected(clientEncryption.deleteKey(arguments.getBinary("id")))); + } + + OperationResult executeGetKey(final BsonDocument operation) { + ClientEncryption clientEncryption = entities.getClientEncryption(operation.getString("object").getValue()); + BsonDocument arguments = operation.getDocument("arguments", new BsonDocument()); + return resultOf(() -> clientEncryption.getKey(arguments.getBinary("id"))); + } + + OperationResult executeGetKeyByAltName(final BsonDocument operation) { + ClientEncryption clientEncryption = entities.getClientEncryption(operation.getString("object").getValue()); + BsonDocument arguments = operation.getDocument("arguments", new BsonDocument()); + return resultOf(() -> clientEncryption.getKeyByAltName(arguments.getString("keyAltName").getValue())); + } + + OperationResult executeGetKeys(final BsonDocument operation) { + ClientEncryption clientEncryption = entities.getClientEncryption(operation.getString("object").getValue()); + return resultOf(() -> new BsonArray(clientEncryption.getKeys().into(new ArrayList<>()))); + } + + OperationResult executeRewrapManyDataKey(final BsonDocument operation) { + ClientEncryption clientEncryption = entities.getClientEncryption(operation.getString("object").getValue()); + BsonDocument arguments = operation.getDocument("arguments", new BsonDocument()); + BsonDocument options = arguments.getDocument("opts", new BsonDocument()); + + BsonDocument filter = arguments.getDocument("filter"); + RewrapManyDataKeyOptions rewrapManyDataKeyOptions = new RewrapManyDataKeyOptions(); + for (String key : options.keySet()) { + switch (key) { + case "provider": + rewrapManyDataKeyOptions.provider(options.getString("provider").getValue()); + break; + case "masterKey": + rewrapManyDataKeyOptions.masterKey(options.getDocument("masterKey")); + break; + default: + throw new UnsupportedOperationException("Missing key handler for: " + key + " :: " + options.toJson()); + } + } + return resultOf(() -> toExpected(clientEncryption.rewrapManyDataKey(filter, rewrapManyDataKeyOptions))); + } + + OperationResult executeEncrypt(final BsonDocument operation) { + ClientEncryption clientEncryption = entities.getClientEncryption(operation.getString("object").getValue()); + BsonDocument arguments = operation.getDocument("arguments"); + BsonDocument options = arguments.getDocument("opts"); + + BsonString value = arguments.getString("value"); + String algorithm = options.remove("algorithm") + .asString() + .getValue(); + + EncryptOptions encryptOptions = new EncryptOptions(algorithm); + for (String key : options.keySet()) { + switch (key) { + case "keyAltName": + encryptOptions.keyAltName(options.getString("keyAltName").getValue()); + break; + default: + throw new UnsupportedOperationException("Missing key handler for: " + key + " :: " + options.toJson()); + } + } + return resultOf(() -> clientEncryption.encrypt(value, encryptOptions)); + } + + + OperationResult executeDecrypt(final BsonDocument operation) { + ClientEncryption clientEncryption = entities.getClientEncryption(operation.getString("object").getValue()); + BsonDocument arguments = operation.getDocument("arguments"); + BsonBinary value = arguments.getBinary("value"); + + return resultOf(() -> clientEncryption.decrypt(value)); + } + + private BsonDocument toExpected(final DeleteResult result) { + if (result.wasAcknowledged()) { + return new BsonDocument("deletedCount", new BsonInt32(toIntExact(result.getDeletedCount()))); + } else { + return new BsonDocument(); + } + } + + private BsonDocument toExpected(final RewrapManyDataKeyResult result) { + if (result.getBulkWriteResult() != null) { + return new BsonDocument("bulkWriteResult", toExpected(result.getBulkWriteResult())); + } + return new BsonDocument(); + } + + private BsonDocument toExpected(final BulkWriteResult result) { + if (result.wasAcknowledged()) { + BsonDocument upsertedIds = new BsonDocument(); + result.getUpserts().forEach(u -> upsertedIds.put("" + u.getIndex(), u.getId())); + return new BsonDocument("insertedCount", new BsonInt32(result.getInsertedCount())) + .append("insertedCount", new BsonInt32(result.getInsertedCount())) + .append("matchedCount", new BsonInt32(result.getMatchedCount())) + .append("modifiedCount", new BsonInt32(result.getModifiedCount())) + .append("deletedCount", new BsonInt32(result.getDeletedCount())) + .append("upsertedCount", new BsonInt32(result.getUpserts().size())) + .append("upsertedIds", upsertedIds); + } + + return new BsonDocument(); + } +} diff --git a/driver-sync/src/test/functional/com/mongodb/client/unified/UnifiedCrudHelper.java b/driver-sync/src/test/functional/com/mongodb/client/unified/UnifiedCrudHelper.java new file mode 100644 index 00000000000..aa220d75f72 --- /dev/null +++ b/driver-sync/src/test/functional/com/mongodb/client/unified/UnifiedCrudHelper.java @@ -0,0 +1,2410 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.unified; + +import com.mongodb.CursorType; +import com.mongodb.MongoDriverInformation; +import com.mongodb.MongoNamespace; +import com.mongodb.ReadConcern; +import com.mongodb.ReadConcernLevel; +import com.mongodb.ReadPreference; +import com.mongodb.ServerAddress; +import com.mongodb.ServerCursor; +import com.mongodb.Tag; +import com.mongodb.TagSet; +import com.mongodb.TransactionOptions; +import com.mongodb.WriteConcern; +import com.mongodb.assertions.Assertions; +import com.mongodb.bulk.BulkWriteResult; +import com.mongodb.client.AggregateIterable; +import com.mongodb.client.ChangeStreamIterable; +import com.mongodb.client.ClientSession; +import com.mongodb.client.DistinctIterable; +import com.mongodb.client.FindIterable; +import com.mongodb.client.ListCollectionsIterable; +import com.mongodb.client.ListDatabasesIterable; +import com.mongodb.client.ListIndexesIterable; +import com.mongodb.client.ListSearchIndexesIterable; +import com.mongodb.client.MongoChangeStreamCursor; +import com.mongodb.client.MongoClient; +import com.mongodb.client.MongoCluster; +import com.mongodb.client.MongoCollection; +import com.mongodb.client.MongoCursor; +import com.mongodb.client.MongoDatabase; +import com.mongodb.client.MongoIterable; +import com.mongodb.client.cursor.TimeoutMode; +import com.mongodb.client.model.BulkWriteOptions; +import com.mongodb.client.model.ChangeStreamPreAndPostImagesOptions; +import com.mongodb.client.model.ClusteredIndexOptions; +import com.mongodb.client.model.Collation; +import com.mongodb.client.model.CollationStrength; +import com.mongodb.client.model.CountOptions; +import com.mongodb.client.model.CreateCollectionOptions; +import com.mongodb.client.model.DeleteManyModel; +import com.mongodb.client.model.DeleteOneModel; +import com.mongodb.client.model.DeleteOptions; +import com.mongodb.client.model.DropCollectionOptions; +import com.mongodb.client.model.DropIndexOptions; +import com.mongodb.client.model.EstimatedDocumentCountOptions; +import com.mongodb.client.model.FindOneAndDeleteOptions; +import com.mongodb.client.model.FindOneAndReplaceOptions; +import com.mongodb.client.model.FindOneAndUpdateOptions; +import com.mongodb.client.model.IndexOptions; +import com.mongodb.client.model.InsertManyOptions; +import com.mongodb.client.model.InsertOneModel; +import com.mongodb.client.model.InsertOneOptions; +import com.mongodb.client.model.RenameCollectionOptions; +import com.mongodb.client.model.ReplaceOneModel; +import com.mongodb.client.model.ReplaceOptions; +import com.mongodb.client.model.ReturnDocument; +import com.mongodb.client.model.SearchIndexModel; +import com.mongodb.client.model.SearchIndexType; +import com.mongodb.client.model.TimeSeriesGranularity; +import com.mongodb.client.model.TimeSeriesOptions; +import com.mongodb.client.model.UpdateManyModel; +import com.mongodb.client.model.UpdateOneModel; +import com.mongodb.client.model.UpdateOptions; +import com.mongodb.client.model.ValidationOptions; +import com.mongodb.client.model.WriteModel; +import com.mongodb.client.model.bulk.ClientBulkWriteOptions; +import com.mongodb.client.model.bulk.ClientBulkWriteResult; +import com.mongodb.client.model.bulk.ClientDeleteManyOptions; +import com.mongodb.client.model.bulk.ClientDeleteOneOptions; +import com.mongodb.client.model.bulk.ClientNamespacedWriteModel; +import com.mongodb.client.model.bulk.ClientReplaceOneOptions; +import com.mongodb.client.model.bulk.ClientUpdateManyOptions; +import com.mongodb.client.model.bulk.ClientUpdateOneOptions; +import com.mongodb.client.model.bulk.ClientUpdateResult; +import com.mongodb.client.model.changestream.ChangeStreamDocument; +import com.mongodb.client.model.changestream.FullDocument; +import com.mongodb.client.model.changestream.FullDocumentBeforeChange; +import com.mongodb.client.result.DeleteResult; +import com.mongodb.client.result.InsertManyResult; +import com.mongodb.client.result.InsertOneResult; +import com.mongodb.client.result.UpdateResult; +import com.mongodb.internal.client.model.bulk.AbstractClientDeleteOptions; +import com.mongodb.internal.client.model.bulk.AbstractClientUpdateOptions; +import com.mongodb.internal.client.model.bulk.ConcreteClientDeleteManyOptions; +import com.mongodb.internal.client.model.bulk.ConcreteClientDeleteOneOptions; +import com.mongodb.internal.client.model.bulk.ConcreteClientUpdateManyOptions; +import com.mongodb.internal.client.model.bulk.ConcreteClientUpdateOneOptions; +import com.mongodb.lang.NonNull; +import com.mongodb.lang.Nullable; +import org.bson.BsonArray; +import org.bson.BsonDocument; +import org.bson.BsonDocumentWriter; +import org.bson.BsonElement; +import org.bson.BsonInt32; +import org.bson.BsonInt64; +import org.bson.BsonString; +import org.bson.BsonValue; +import org.bson.codecs.BsonCodecProvider; +import org.bson.codecs.Codec; +import org.bson.codecs.EncoderContext; +import org.bson.codecs.ValueCodecProvider; +import org.bson.codecs.configuration.CodecRegistries; + +import java.lang.reflect.InvocationTargetException; +import java.lang.reflect.Method; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.Optional; +import java.util.Set; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.function.Supplier; + +import static com.mongodb.client.model.bulk.ClientBulkWriteOptions.clientBulkWriteOptions; +import static com.mongodb.client.model.bulk.ClientReplaceOneOptions.clientReplaceOneOptions; +import static java.lang.String.format; +import static java.util.Arrays.asList; +import static java.util.Collections.singleton; +import static java.util.Objects.requireNonNull; +import static java.util.Optional.ofNullable; +import static java.util.stream.Collectors.toList; + +@SuppressWarnings("deprecation") +final class UnifiedCrudHelper extends UnifiedHelper { + private final Entities entities; + private final String testDescription; + private final AtomicInteger uniqueIdGenerator = new AtomicInteger(); + + private final Codec> changeStreamDocumentCodec = ChangeStreamDocument.createCodec( + BsonDocument.class, + CodecRegistries.fromProviders(asList(new BsonCodecProvider(), new ValueCodecProvider()))); + + UnifiedCrudHelper(final Entities entities, final String testDescription) { + this.entities = entities; + this.testDescription = testDescription; + } + + static ReadConcern asReadConcern(final BsonDocument readConcernDocument) { + if (readConcernDocument.size() > 1) { + throw new UnsupportedOperationException("Unsupported read concern properties"); + } + return new ReadConcern(ReadConcernLevel.fromString(readConcernDocument.getString("level").getValue())); + } + + static WriteConcern asWriteConcern(final BsonDocument writeConcernDocument) { + WriteConcern writeConcern = WriteConcern.ACKNOWLEDGED; + + for (Map.Entry entry: writeConcernDocument.entrySet()) { + switch (entry.getKey()) { + case "w": + writeConcern = writeConcernDocument.isString("w") + ? writeConcern.withW(writeConcernDocument.getString("w").getValue()) + : writeConcern.withW(writeConcernDocument.getInt32("w").intValue()); + break; + case "journal": + writeConcern = writeConcern.withJournal(entry.getValue().asBoolean().getValue()); + break; + case "wtimeoutMS": + writeConcern = writeConcern.withWTimeout(entry.getValue().asNumber().longValue(), TimeUnit.MILLISECONDS); + break; + default: + throw new UnsupportedOperationException("Unsupported argument: " + entry.getKey()); + } + } + + return writeConcern; + } + + public static ReadPreference asReadPreference(final BsonDocument readPreferenceDocument) { + List supportedKeys = asList("mode", "tagSets", "maxStalenessSeconds"); + List unsupportedKeys = readPreferenceDocument.keySet().stream().filter(key -> !supportedKeys.contains(key)).collect(toList()); + if (!unsupportedKeys.isEmpty()) { + throw new UnsupportedOperationException("Unsupported read preference keys: " + unsupportedKeys + " in " + readPreferenceDocument); + } + String mode = readPreferenceDocument.getString("mode").getValue(); + if (readPreferenceDocument.size() == 1) { + return ReadPreference.valueOf(mode); + } + List tagSets = tagSets(readPreferenceDocument.getArray("tagSets", new BsonArray())); + BsonValue maxStalenessSecondsBson = readPreferenceDocument.get("maxStalenessSeconds"); + Integer maxStalenessSeconds = maxStalenessSecondsBson == null ? null : maxStalenessSecondsBson.asInt32().intValue(); + if (maxStalenessSecondsBson == null) { + return ReadPreference.valueOf(mode, tagSets); + } + return ReadPreference.valueOf(mode, tagSets, maxStalenessSeconds, TimeUnit.SECONDS); + } + + private static List tagSets(final BsonArray tagSetsBson) { + return tagSetsBson.stream() + .map(tagSetBson -> new TagSet(tagSetBson.asDocument() + .entrySet() + .stream() + .map(entry -> new Tag(entry.getKey(), entry.getValue().asString().getValue())) + .collect(toList()))) + .collect(toList()); + } + + private static Collation asCollation(final BsonDocument collationDocument) { + Collation.Builder builder = Collation.builder(); + + for (Map.Entry entry: collationDocument.entrySet()) { + switch (entry.getKey()) { + case "locale": + builder.locale(entry.getValue().asString().getValue()); + break; + case "strength": + builder.collationStrength(CollationStrength.fromInt(entry.getValue().asNumber().intValue())); + break; + default: + throw new UnsupportedOperationException("Unsupported argument: " + entry.getKey()); + } + } + + return builder.build(); + } + + private OperationResult resultOf(final Supplier operationResult) { + try { + return OperationResult.of(operationResult.get()); + } catch (Exception e) { + return OperationResult.of(e); + } + } + + @Nullable + private ClientSession getSession(final BsonDocument arguments) { + if (arguments.containsKey("session")) { + return entities.getSession(arguments.getString("session").asString().getValue()); + } else { + return null; + } + } + + + OperationResult executeListDatabases(final BsonDocument operation) { + MongoCluster mongoCluster = getMongoCluster(operation); + + BsonDocument arguments = operation.getDocument("arguments", new BsonDocument()); + ClientSession session = getSession(arguments); + ListDatabasesIterable iterable = session == null + ? mongoCluster.listDatabases(BsonDocument.class) + : mongoCluster.listDatabases(session, BsonDocument.class); + + for (Map.Entry cur : arguments.entrySet()) { + switch (cur.getKey()) { + case "session": + break; + case "filter": + iterable.filter(cur.getValue().asDocument()); + break; + default: + throw new UnsupportedOperationException("Unsupported argument: " + cur.getKey()); + } + } + + return resultOf(() -> + new BsonArray(iterable.into(new ArrayList<>()))); + } + + OperationResult executeListDatabaseNames(final BsonDocument operation) { + MongoCluster mongoCluster = getMongoCluster(operation); + + BsonDocument arguments = operation.getDocument("arguments", new BsonDocument()); + ClientSession session = getSession(arguments); + MongoIterable iterable = session == null + ? mongoCluster.listDatabaseNames() + : mongoCluster.listDatabaseNames(session); + + for (Map.Entry cur : arguments.entrySet()) { + //noinspection SwitchStatementWithTooFewBranches + switch (cur.getKey()) { + case "session": + break; + default: + throw new UnsupportedOperationException("Unsupported argument: " + cur.getKey()); + } + } + + return resultOf(() -> + new BsonArray(iterable.into(new ArrayList<>()).stream().map(BsonString::new).collect(toList()))); + } + + OperationResult executeListCollections(final BsonDocument operation) { + MongoDatabase database = getMongoDatabase(operation); + BsonDocument arguments = operation.getDocument("arguments", new BsonDocument()); + ClientSession session = getSession(arguments); + ListCollectionsIterable iterable = session == null + ? database.listCollections(BsonDocument.class) + : database.listCollections(session, BsonDocument.class); + return resultOf(() -> { + for (Map.Entry cur : arguments.entrySet()) { + switch (cur.getKey()) { + case "session": + break; + case "filter": + iterable.filter(cur.getValue().asDocument()); + break; + case "batchSize": + iterable.batchSize(cur.getValue().asNumber().intValue()); + break; + case "timeoutMode": + setTimeoutMode(iterable, cur); + break; + case "maxTimeMS": + iterable.maxTime(cur.getValue().asNumber().longValue(), TimeUnit.MILLISECONDS); + break; + default: + throw new UnsupportedOperationException("Unsupported argument: " + cur.getKey()); + } + } + + return new BsonArray(iterable.into(new ArrayList<>())); + }); + } + + OperationResult executeListCollectionNames(final BsonDocument operation) { + MongoDatabase database = getMongoDatabase(operation); + + BsonDocument arguments = operation.getDocument("arguments", new BsonDocument()); + ClientSession session = getSession(arguments); + MongoIterable iterable = session == null + ? database.listCollectionNames() + : database.listCollectionNames(session); + for (Map.Entry cur : arguments.entrySet()) { + switch (cur.getKey()) { + case "session": + break; + case "filter": + BsonDocument filter = cur.getValue().asDocument(); + if (!filter.isEmpty()) { + throw new UnsupportedOperationException("The driver does not support filtering of collection names"); + } + break; + case "batchSize": + iterable.batchSize(cur.getValue().asNumber().intValue()); + break; + default: + throw new UnsupportedOperationException("Unsupported argument: " + cur.getKey()); + } + } + + return resultOf(() -> + new BsonArray(iterable.into(new ArrayList<>()).stream().map(BsonString::new).collect(toList()))); + } + + OperationResult executeListIndexes(final BsonDocument operation) { + return resultOf(() -> { + ListIndexesIterable iterable = createListIndexesIterable(operation); + return new BsonArray(iterable.into(new ArrayList<>())); + }); + } + + OperationResult executeListIndexNames(final BsonDocument operation) { + return resultOf(() -> { + ListIndexesIterable iterable = createListIndexesIterable(operation); + return new BsonArray(iterable.into(new ArrayList<>()).stream().map(document -> document.getString("name")).collect(toList())); + }); + } + + private ListIndexesIterable createListIndexesIterable(final BsonDocument operation) { + MongoCollection collection = getMongoCollection(operation); + BsonDocument arguments = operation.getDocument("arguments", new BsonDocument()); + ClientSession session = getSession(arguments); + ListIndexesIterable iterable = session == null + ? collection.listIndexes(BsonDocument.class) + : collection.listIndexes(session, BsonDocument.class); + for (Map.Entry cur : arguments.entrySet()) { + switch (cur.getKey()) { + case "session": + break; + case "batchSize": + iterable.batchSize(cur.getValue().asNumber().intValue()); + break; + case "timeoutMode": + setTimeoutMode(iterable, cur); + break; + case "maxTimeMS": + iterable.maxTime(cur.getValue().asNumber().longValue(), TimeUnit.MILLISECONDS); + break; + default: + throw new UnsupportedOperationException("Unsupported argument: " + cur.getKey()); + } + } + return iterable; + } + + OperationResult executeFind(final BsonDocument operation) { + return resultOf(() -> { + FindIterable iterable = createFindIterable(operation); + return new BsonArray(iterable.into(new ArrayList<>())); + }); + } + + /** + * There is no explicit {@code findOne()} method in {@link MongoCollection} class. + * Its feature was emulated by {@link FindIterable#first()}, which would close cursor on server + * by setting {@code batchSize} and {@code limit} appropriately. + */ + OperationResult executeFindOne(final BsonDocument operation) { + return resultOf(() -> createFindIterable(operation).first()); + } + + OperationResult createFindCursor(final BsonDocument operation) { + return resultOf(() -> { + FindIterable iterable = createFindIterable(operation); + entities.addCursor(operation.getString("saveResultAsEntity", new BsonString(createRandomEntityId())).getValue(), + iterable.cursor()); + return null; + }); + } + + @NonNull + private FindIterable createFindIterable(final BsonDocument operation) { + MongoCollection collection = getMongoCollection(operation); + BsonDocument arguments = operation.getDocument("arguments", new BsonDocument()); + ClientSession session = getSession(arguments); + BsonDocument filter = arguments.getDocument("filter"); + FindIterable iterable = session == null ? collection.find(filter) : collection.find(session, filter); + for (Map.Entry cur : arguments.entrySet()) { + switch (cur.getKey()) { + case "session": + case "filter": + break; + case "projection": + iterable.projection(cur.getValue().asDocument()); + break; + case "sort": + iterable.sort(cur.getValue().asDocument()); + break; + case "batchSize": + iterable.batchSize(cur.getValue().asInt32().intValue()); + break; + case "maxTimeMS": + iterable.maxTime(cur.getValue().asInt32().longValue(), TimeUnit.MILLISECONDS); + break; + case "maxAwaitTimeMS": + iterable.maxAwaitTime(cur.getValue().asInt32().longValue(), TimeUnit.MILLISECONDS); + break; + case "skip": + iterable.skip(cur.getValue().asInt32().intValue()); + break; + case "limit": + iterable.limit(cur.getValue().asInt32().intValue()); + break; + case "allowDiskUse": + iterable.allowDiskUse(cur.getValue().asBoolean().getValue()); + break; + case "hint": + if (cur.getValue().isString()) { + iterable.hintString(cur.getValue().asString().getValue()); + } else { + iterable.hint(cur.getValue().asDocument()); + } + break; + case "collation": + iterable.collation(asCollation(cur.getValue().asDocument())); + break; + case "comment": + iterable.comment(cur.getValue()); + break; + case "let": + iterable.let(cur.getValue().asDocument()); + break; + case "min": + iterable.min(cur.getValue().asDocument()); + break; + case "max": + iterable.max(cur.getValue().asDocument()); + break; + case "returnKey": + iterable.returnKey(cur.getValue().asBoolean().getValue()); + break; + case "showRecordId": + iterable.showRecordId(cur.getValue().asBoolean().getValue()); + break; + case "cursorType": + setCursorType(iterable, cur); + break; + case "timeoutMode": + setTimeoutMode(iterable, cur); + break; + default: + throw new UnsupportedOperationException("Unsupported argument: " + cur.getKey()); + } + } + return iterable; + } + + @SuppressWarnings("deprecation") //maxTimeMS + OperationResult executeDistinct(final BsonDocument operation) { + MongoCollection collection = getMongoCollection(operation); + BsonDocument arguments = operation.getDocument("arguments", new BsonDocument()); + ClientSession session = getSession(arguments); + + BsonString fieldName = arguments.getString("fieldName"); + DistinctIterable iterable = session == null + ? collection.distinct(fieldName.getValue(), BsonValue.class) + : collection.distinct(session, fieldName.getValue(), BsonValue.class); + + for (Map.Entry cur : arguments.entrySet()) { + switch (cur.getKey()) { + case "fieldName": + case "session": + break; + case "comment": + iterable.comment(cur.getValue()); + break; + case "hint": + if (cur.getValue().isString()) { + iterable.hintString(cur.getValue().asString().getValue()); + } else { + iterable.hint(cur.getValue().asDocument()); + } + break; + case "filter": + iterable.filter(cur.getValue().asDocument()); + break; + case "maxTimeMS": + iterable.maxTime(cur.getValue().asInt32().intValue(), TimeUnit.MILLISECONDS); + break; + case "collation": + iterable.collation(asCollation(cur.getValue().asDocument())); + break; + default: + throw new UnsupportedOperationException("Unsupported argument: " + cur.getKey()); + } + } + + return resultOf(() -> + new BsonArray(iterable.into(new ArrayList<>()))); + } + + @SuppressWarnings("deprecation") + OperationResult executeMapReduce(final BsonDocument operation) { + MongoCollection collection = getMongoCollection(operation); + BsonDocument arguments = operation.getDocument("arguments", new BsonDocument()); + ClientSession session = getSession(arguments); + + String mapFunction = arguments.get("map").asJavaScript().getCode(); + String reduceFunction = arguments.get("reduce").asJavaScript().getCode(); + com.mongodb.client.MapReduceIterable iterable = session == null + ? collection.mapReduce(mapFunction, reduceFunction) + : collection.mapReduce(session, mapFunction, reduceFunction); + + for (Map.Entry cur : arguments.entrySet()) { + switch (cur.getKey()) { + case "map": + case "reduce": + case "session": + break; + case "out": + if (!cur.getValue().asDocument().equals(new BsonDocument("inline", new BsonInt32(1)))) { + throw new UnsupportedOperationException("Unsupported value for out argument: " + cur.getValue()); + } + break; + default: + throw new UnsupportedOperationException("Unsupported argument: " + cur.getKey()); + } + } + + return resultOf(() -> + new BsonArray(iterable.into(new ArrayList<>()))); + } + + @SuppressWarnings("deprecation") //maxTimeMS + OperationResult executeFindOneAndUpdate(final BsonDocument operation) { + MongoCollection collection = getMongoCollection(operation); + BsonDocument arguments = operation.getDocument("arguments", new BsonDocument()); + + BsonDocument filter = arguments.getDocument("filter").asDocument(); + BsonValue update = arguments.get("update"); + ClientSession session = getSession(arguments); + FindOneAndUpdateOptions options = new FindOneAndUpdateOptions(); + + for (Map.Entry cur : arguments.entrySet()) { + switch (cur.getKey()) { + case "filter": + case "update": + case "session": + break; + case "upsert": + options.upsert(cur.getValue().asBoolean().getValue()); + break; + case "sort": + options.sort(cur.getValue().asDocument()); + break; + case "returnDocument": + switch (cur.getValue().asString().getValue()) { + case "Before": + options.returnDocument(ReturnDocument.BEFORE); + break; + case "After": + options.returnDocument(ReturnDocument.AFTER); + break; + default: + throw new UnsupportedOperationException("Can't happen"); + } + break; + case "projection": + options.projection(cur.getValue().asDocument()); + break; + case "hint": + if (cur.getValue().isString()) { + options.hintString(cur.getValue().asString().getValue()); + } else { + options.hint(cur.getValue().asDocument()); + } + break; + case "comment": + options.comment(cur.getValue()); + break; + case "let": + options.let(cur.getValue().asDocument()); + break; + case "maxTimeMS": + options.maxTime(cur.getValue().asInt32().intValue(), TimeUnit.MILLISECONDS); + break; + case "collation": + options.collation(asCollation(cur.getValue().asDocument())); + break; + case "arrayFilters": + options.arrayFilters(cur.getValue().asArray().stream().map(BsonValue::asDocument).collect(toList())); + break; + case "bypassDocumentValidation": + options.bypassDocumentValidation(cur.getValue().asBoolean().getValue()); + break; + default: + throw new UnsupportedOperationException("Unsupported argument: " + cur.getKey()); + } + } + + return resultOf(() -> { + if (session == null) { + return update.isArray() + ? collection.findOneAndUpdate(filter, update.asArray().stream().map(BsonValue::asDocument).collect(toList()), + options) + : collection.findOneAndUpdate(filter, update.asDocument(), options); + } else { + return update.isArray() + ? collection.findOneAndUpdate(session, filter, + update.asArray().stream().map(BsonValue::asDocument).collect(toList()), options) + : collection.findOneAndUpdate(session, filter, update.asDocument(), options); + + } + }); + } + + @SuppressWarnings("deprecation") + OperationResult executeFindOneAndReplace(final BsonDocument operation) { + MongoCollection collection = getMongoCollection(operation); + BsonDocument arguments = operation.getDocument("arguments", new BsonDocument()); + ClientSession session = getSession(arguments); + BsonDocument filter = arguments.getDocument("filter").asDocument(); + BsonDocument replacement = arguments.getDocument("replacement").asDocument(); + FindOneAndReplaceOptions options = new FindOneAndReplaceOptions(); + + for (Map.Entry cur : arguments.entrySet()) { + switch (cur.getKey()) { + case "filter": + case "replacement": + case "session": + break; + case "upsert": + options.upsert(cur.getValue().asBoolean().getValue()); + break; + case "sort": + options.sort(cur.getValue().asDocument()); + break; + case "returnDocument": + switch (cur.getValue().asString().getValue()) { + case "Before": + options.returnDocument(ReturnDocument.BEFORE); + break; + case "After": + options.returnDocument(ReturnDocument.AFTER); + break; + default: + throw new UnsupportedOperationException("Can't happen"); + } + break; + case "projection": + options.projection(cur.getValue().asDocument()); + break; + case "hint": + if (cur.getValue().isString()) { + options.hintString(cur.getValue().asString().getValue()); + } else { + options.hint(cur.getValue().asDocument()); + } + break; + case "comment": + options.comment(cur.getValue()); + break; + case "let": + options.let(cur.getValue().asDocument()); + break; + case "maxTimeMS": + options.maxTime(cur.getValue().asInt32().intValue(), TimeUnit.MILLISECONDS); + break; + case "collation": + options.collation(asCollation(cur.getValue().asDocument())); + break; + case "bypassDocumentValidation": + options.bypassDocumentValidation(cur.getValue().asBoolean().getValue()); + break; + default: + throw new UnsupportedOperationException("Unsupported argument: " + cur.getKey()); + } + } + + return resultOf(() -> { + if (session == null) { + return collection.findOneAndReplace(filter, replacement, options); + } else { + return collection.findOneAndReplace(session, filter, replacement, options); + } + }); + } + + @SuppressWarnings("deprecation") //maxTimeMS + OperationResult executeFindOneAndDelete(final BsonDocument operation) { + MongoCollection collection = getMongoCollection(operation); + BsonDocument arguments = operation.getDocument("arguments", new BsonDocument()); + ClientSession session = getSession(arguments); + BsonDocument filter = arguments.getDocument("filter").asDocument(); + FindOneAndDeleteOptions options = new FindOneAndDeleteOptions(); + + for (Map.Entry cur : arguments.entrySet()) { + switch (cur.getKey()) { + case "filter": + case "session": + break; + case "projection": + options.projection(cur.getValue().asDocument()); + break; + case "sort": + options.sort(cur.getValue().asDocument()); + break; + case "hint": + if (cur.getValue().isString()) { + options.hintString(cur.getValue().asString().getValue()); + } else { + options.hint(cur.getValue().asDocument()); + } + break; + case "collation": + options.collation(asCollation(cur.getValue().asDocument())); + break; + case "comment": + options.comment(cur.getValue()); + break; + case "let": + options.let(cur.getValue().asDocument()); + break; + case "maxTimeMS": + options.maxTime(cur.getValue().asInt32().intValue(), TimeUnit.MILLISECONDS); + break; + default: + throw new UnsupportedOperationException("Unsupported argument: " + cur.getKey()); + } + } + + return resultOf(() -> { + if (session == null) { + return collection.findOneAndDelete(filter, options); + } else { + return collection.findOneAndDelete(session, filter, options); + } + }); + } + + OperationResult executeAggregate(final BsonDocument operation) { + String entityName = operation.getString("object").getValue(); + BsonDocument arguments = operation.getDocument("arguments", new BsonDocument()); + ClientSession session = getSession(arguments); + List pipeline = arguments.getArray("pipeline").stream().map(BsonValue::asDocument).collect(toList()); + AggregateIterable iterable; + if (entities.hasDatabase(entityName)) { + Long timeoutMS = getAndRemoveTimeoutMS(operation.getDocument("arguments")); + MongoDatabase database = entities.getDatabaseWithTimeoutMS(entityName, timeoutMS); + iterable = session == null + ? database.aggregate(requireNonNull(pipeline), BsonDocument.class) + : database.aggregate(session, requireNonNull(pipeline), BsonDocument.class); + } else if (entities.hasCollection(entityName)) { + Long timeoutMS = getAndRemoveTimeoutMS(operation.getDocument("arguments")); + MongoCollection collection = entities.getCollectionWithTimeoutMS(entityName, timeoutMS); + iterable = session == null + ? collection.aggregate(requireNonNull(pipeline)) + : collection.aggregate(session, requireNonNull(pipeline)); + } else { + throw new UnsupportedOperationException("Unsupported entity type with name: " + entityName); + } + return resultOf(() -> { + for (Map.Entry cur : arguments.entrySet()) { + switch (cur.getKey()) { + case "pipeline": + case "session": + break; + case "batchSize": + iterable.batchSize(cur.getValue().asNumber().intValue()); + break; + case "allowDiskUse": + iterable.allowDiskUse(cur.getValue().asBoolean().getValue()); + break; + case "let": + iterable.let(cur.getValue().asDocument()); + break; + case "collation": + iterable.collation(asCollation(cur.getValue().asDocument())); + break; + case "comment": + iterable.comment(cur.getValue()); + break; + case "timeoutMode": + setTimeoutMode(iterable, cur); + break; + case "maxTimeMS": + iterable.maxTime(cur.getValue().asNumber().longValue(), TimeUnit.MILLISECONDS); + break; + case "maxAwaitTimeMS": + iterable.maxAwaitTime(cur.getValue().asNumber().longValue(), TimeUnit.MILLISECONDS); + break; + case "bypassDocumentValidation": + iterable.bypassDocumentValidation(cur.getValue().asBoolean().getValue()); + break; + default: + throw new UnsupportedOperationException("Unsupported argument: " + cur.getKey()); + } + } + String lastStageName = pipeline.isEmpty() ? null : pipeline.get(pipeline.size() - 1).getFirstKey(); + boolean useToCollection = Objects.equals(lastStageName, "$out") || Objects.equals(lastStageName, "$merge"); + if (!pipeline.isEmpty() && useToCollection) { + iterable.toCollection(); + return null; + } else { + return new BsonArray(iterable.into(new ArrayList<>())); + } + }); + } + + OperationResult executeDeleteOne(final BsonDocument operation) { + MongoCollection collection = getMongoCollection(operation); + BsonDocument arguments = operation.getDocument("arguments", new BsonDocument()); + BsonDocument filter = arguments.getDocument("filter"); + ClientSession session = getSession(arguments); + DeleteOptions options = getDeleteOptions(arguments); + + return resultOf(() -> { + if (session == null) { + return toExpected(collection.deleteOne(filter, options)); + } else { + return toExpected(collection.deleteOne(session, filter, options)); + } + }); + } + + OperationResult executeDeleteMany(final BsonDocument operation) { + MongoCollection collection = getMongoCollection(operation); + BsonDocument arguments = operation.getDocument("arguments", new BsonDocument()); + BsonDocument filter = arguments.getDocument("filter"); + ClientSession session = getSession(arguments); + DeleteOptions options = getDeleteOptions(arguments); + + return resultOf(() -> { + if (session == null) { + return toExpected(collection.deleteMany(filter, options)); + } else { + return toExpected(collection.deleteMany(session, filter, options)); + } + }); + } + + private BsonDocument toExpected(final DeleteResult result) { + if (result.wasAcknowledged()) { + return new BsonDocument("deletedCount", new BsonInt32((int) result.getDeletedCount())); + } else { + return new BsonDocument(); + } + } + + OperationResult executeUpdateOne(final BsonDocument operation) { + MongoCollection collection = getMongoCollection(operation); + BsonDocument arguments = operation.getDocument("arguments", new BsonDocument()); + ClientSession session = getSession(arguments); + BsonDocument filter = arguments.getDocument("filter"); + BsonValue update = arguments.get("update"); + UpdateOptions options = getUpdateOptions(arguments); + + return resultOf(() -> { + UpdateResult updateResult; + if (session == null) { + updateResult = update.isArray() + ? collection.updateOne(filter, update.asArray().stream().map(BsonValue::asDocument).collect(toList()), options) + : collection.updateOne(filter, update.asDocument(), options); + } else { + updateResult = update.isArray() + ? collection.updateOne(session, filter, update.asArray().stream().map(BsonValue::asDocument).collect(toList()), + options) + : collection.updateOne(session, filter, update.asDocument(), options); + } + return toExpected(updateResult); + }); + } + + OperationResult executeUpdateMany(final BsonDocument operation) { + MongoCollection collection = getMongoCollection(operation); + BsonDocument arguments = operation.getDocument("arguments", new BsonDocument()); + BsonDocument filter = arguments.getDocument("filter"); + BsonValue update = arguments.get("update"); + ClientSession session = getSession(arguments); + UpdateOptions options = getUpdateOptions(arguments); + + return resultOf(() -> { + if (session == null) { + return update.isArray() + ? toExpected(collection.updateMany(filter, update.asArray().stream().map(BsonValue::asDocument).collect(toList()), + options)) + : toExpected(collection.updateMany(filter, update.asDocument(), options)); + } else { + return update.isArray() + ? toExpected(collection.updateMany(session, filter, + update.asArray().stream().map(BsonValue::asDocument).collect(toList()), options)) + : toExpected(collection.updateMany(session, filter, update.asDocument(), options)); + } + }); + } + + OperationResult executeReplaceOne(final BsonDocument operation) { + MongoCollection collection = getMongoCollection(operation); + BsonDocument arguments = operation.getDocument("arguments", new BsonDocument()); + ClientSession session = getSession(arguments); + BsonDocument filter = arguments.getDocument("filter"); + BsonDocument replacement = arguments.getDocument("replacement"); + ReplaceOptions options = getReplaceOptions(arguments); + + return resultOf(() -> { + if (session == null) { + return toExpected(collection.replaceOne(filter, replacement, options)); + } else { + return toExpected(collection.replaceOne(session, filter, replacement, options)); + } + }); + } + + private BsonDocument toExpected(final UpdateResult result) { + if (result.wasAcknowledged()) { + BsonDocument expectedDocument = new BsonDocument() + .append("matchedCount", new BsonInt32((int) result.getMatchedCount())) + .append("modifiedCount", new BsonInt32((int) result.getModifiedCount())) + .append("upsertedCount", new BsonInt32(result.getUpsertedId() == null ? 0 : 1)); + if (result.getUpsertedId() != null) { + expectedDocument.append("upsertedId", result.getUpsertedId()); + } + return expectedDocument; + } else { + return new BsonDocument(); + } + } + + + OperationResult executeInsertOne(final BsonDocument operation) { + MongoCollection collection = getMongoCollection(operation); + BsonDocument arguments = operation.getDocument("arguments", new BsonDocument()); + ClientSession session = getSession(arguments); + BsonDocument document = arguments.getDocument("document").asDocument(); + InsertOneOptions options = new InsertOneOptions(); + + for (Map.Entry cur : arguments.entrySet()) { + switch (cur.getKey()) { + case "session": + case "document": + break; + case "comment": + options.comment(cur.getValue()); + break; + case "bypassDocumentValidation": + options.bypassDocumentValidation(cur.getValue().asBoolean().getValue()); + break; + default: + throw new UnsupportedOperationException("Unsupported argument: " + cur.getKey()); + } + } + + return resultOf(() -> + toExpected(session == null + ? collection.insertOne(document, options) + : collection.insertOne(session, document, options))); + } + + private BsonDocument toExpected(final InsertOneResult result) { + if (result.wasAcknowledged()) { + return new BsonDocument("insertedId", result.getInsertedId()); + } else { + return new BsonDocument(); + } + } + + OperationResult executeInsertMany(final BsonDocument operation) { + MongoCollection collection = getMongoCollection(operation); + BsonDocument arguments = operation.getDocument("arguments", new BsonDocument()); + List documents = arguments.getArray("documents").stream().map(BsonValue::asDocument).collect(toList()); + ClientSession session = getSession(arguments); + InsertManyOptions options = new InsertManyOptions(); + + for (Map.Entry cur : arguments.entrySet()) { + switch (cur.getKey()) { + case "documents": + case "session": + break; + case "ordered": + options.ordered(cur.getValue().asBoolean().getValue()); + break; + case "comment": + options.comment(cur.getValue()); + break; + case "bypassDocumentValidation": + options.bypassDocumentValidation(cur.getValue().asBoolean().getValue()); + break; + default: + throw new UnsupportedOperationException("Unsupported argument: " + cur.getKey()); + } + } + + return resultOf(() -> { + if (session == null) { + return toExpected(collection.insertMany(documents, options)); + } else { + return toExpected(collection.insertMany(session, documents, options)); + } + }); + } + + private BsonDocument toExpected(final InsertManyResult result) { + if (result.wasAcknowledged()) { + return new BsonDocument("insertedIds", new BsonDocument(result.getInsertedIds().entrySet().stream() + .map(value -> new BsonElement(value.getKey().toString(), value.getValue())).collect(toList()))); + } else { + return new BsonDocument(); + } + } + + OperationResult executeBulkWrite(final BsonDocument operation) { + MongoCollection collection = getMongoCollection(operation); + BsonDocument arguments = operation.getDocument("arguments", new BsonDocument()); + ClientSession session = getSession(arguments); + List> requests = arguments.getArray("requests").stream() + .map(value -> toWriteModel(value.asDocument())).collect(toList()); + BulkWriteOptions options = new BulkWriteOptions(); + for (Map.Entry cur : arguments.entrySet()) { + switch (cur.getKey()) { + case "requests": + case "session": + break; + case "ordered": + options.ordered(cur.getValue().asBoolean().getValue()); + break; + case "comment": + options.comment(cur.getValue()); + break; + case "let": + options.let(cur.getValue().asDocument()); + break; + case "bypassDocumentValidation": + options.bypassDocumentValidation(cur.getValue().asBoolean().getValue()); + break; + default: + throw new UnsupportedOperationException("Unsupported argument: " + cur.getKey()); + } + } + + return resultOf(() -> { + if (session == null) { + return toExpected(collection.bulkWrite(requests, options)); + } else { + return toExpected(collection.bulkWrite(session, requests, options)); + } + }); + } + + private BsonDocument toExpected(final BulkWriteResult result) { + if (result.wasAcknowledged()) { + return new BsonDocument() + .append("deletedCount", new BsonInt32(result.getDeletedCount())) + .append("insertedCount", new BsonInt32(result.getInsertedCount())) + .append("matchedCount", new BsonInt32(result.getMatchedCount())) + .append("modifiedCount", new BsonInt32(result.getModifiedCount())) + .append("upsertedCount", new BsonInt32(result.getUpserts().size())) + .append("insertedIds", new BsonDocument(result.getInserts().stream() + .map(value -> new BsonElement(Integer.toString(value.getIndex()), value.getId())).collect(toList()))) + .append("upsertedIds", new BsonDocument(result.getUpserts().stream() + .map(value -> new BsonElement(Integer.toString(value.getIndex()), value.getId())).collect(toList()))); + } else { + return new BsonDocument(); + } + } + + private WriteModel toWriteModel(final BsonDocument document) { + + String requestType = document.getFirstKey(); + BsonDocument arguments = document.getDocument(requestType); + switch (requestType) { + case "insertOne": + return new InsertOneModel<>(arguments.getDocument("document")); + case "updateOne": + return arguments.isArray("update") + ? new UpdateOneModel<>(arguments.getDocument("filter"), + arguments.getArray("update").stream().map(BsonValue::asDocument).collect(toList()), + getUpdateOptions(arguments)) + : new UpdateOneModel<>(arguments.getDocument("filter"), arguments.getDocument("update"), + getUpdateOptions(arguments)); + case "updateMany": + return arguments.isArray("update") + ? new UpdateManyModel<>(arguments.getDocument("filter"), + arguments.getArray("update").stream().map(BsonValue::asDocument).collect(toList()), + getUpdateOptions(arguments)) + : new UpdateManyModel<>(arguments.getDocument("filter"), arguments.getDocument("update"), + getUpdateOptions(arguments)); + case "deleteOne": + return new DeleteOneModel<>(arguments.getDocument("filter"), getDeleteOptions(arguments)); + case "deleteMany": + return new DeleteManyModel<>(arguments.getDocument("filter"), getDeleteOptions(arguments)); + case "replaceOne": + return new ReplaceOneModel<>(arguments.getDocument("filter"), arguments.getDocument("replacement"), + getReplaceOptions(arguments)); + default: + throw new UnsupportedOperationException("Unsupported write model type: " + requestType); + } + } + + @NonNull + private DeleteOptions getDeleteOptions(final BsonDocument arguments) { + DeleteOptions options = new DeleteOptions(); + + for (Map.Entry cur : arguments.entrySet()) { + switch (cur.getKey()) { + case "session": + case "filter": + break; + case "hint": + if (cur.getValue().isString()) { + options.hintString(cur.getValue().asString().getValue()); + } else { + options.hint(cur.getValue().asDocument()); + } + break; + case "comment": + options.comment(cur.getValue()); + break; + case "let": + options.let(cur.getValue().asDocument()); + break; + case "collation": + options.collation(asCollation(cur.getValue().asDocument())); + break; + default: + throw new UnsupportedOperationException("Unsupported argument: " + cur.getKey()); + } + } + return options; + } + + private UpdateOptions getUpdateOptions(final BsonDocument arguments) { + UpdateOptions options = new UpdateOptions(); + + for (Map.Entry cur : arguments.entrySet()) { + switch (cur.getKey()) { + case "session": + case "filter": + case "update": + break; + case "upsert": + options.upsert(cur.getValue().asBoolean().getValue()); + break; + case "arrayFilters": + options.arrayFilters(cur.getValue().asArray().stream().map(BsonValue::asDocument).collect(toList())); + break; + case "hint": + if (cur.getValue().isString()) { + options.hintString(cur.getValue().asString().getValue()); + } else { + options.hint(cur.getValue().asDocument()); + } + break; + case "comment": + options.comment(cur.getValue()); + break; + case "let": + options.let(cur.getValue().asDocument()); + break; + case "collation": + options.collation(asCollation(cur.getValue().asDocument())); + break; + case "sort": + options.sort(cur.getValue().asDocument()); + break; + case "bypassDocumentValidation": + options.bypassDocumentValidation(cur.getValue().asBoolean().getValue()); + break; + default: + throw new UnsupportedOperationException("Unsupported argument: " + cur.getKey()); + } + } + return options; + } + + private ReplaceOptions getReplaceOptions(final BsonDocument arguments) { + ReplaceOptions options = new ReplaceOptions(); + for (Map.Entry cur : arguments.entrySet()) { + switch (cur.getKey()) { + case "filter": + case "replacement": + case "session": + break; + case "upsert": + options.upsert(cur.getValue().asBoolean().getValue()); + break; + case "hint": + if (cur.getValue().isString()) { + options.hintString(cur.getValue().asString().getValue()); + } else { + options.hint(cur.getValue().asDocument()); + } + break; + case "comment": + options.comment(cur.getValue()); + break; + case "let": + options.let(cur.getValue().asDocument()); + break; + case "collation": + options.collation(asCollation(cur.getValue().asDocument())); + break; + case "sort": + options.sort(cur.getValue().asDocument()); + break; + case "bypassDocumentValidation": + options.bypassDocumentValidation(cur.getValue().asBoolean().getValue()); + break; + default: + throw new UnsupportedOperationException("Unsupported argument: " + cur.getKey()); + } + } + return options; + } + + OperationResult executeStartTransaction(final BsonDocument operation) { + ClientSession session = entities.getSession(operation.getString("object").getValue()); + BsonDocument arguments = operation.getDocument("arguments", new BsonDocument()); + TransactionOptions.Builder optionsBuilder = TransactionOptions.builder(); + + for (Map.Entry cur : arguments.entrySet()) { + switch (cur.getKey()) { + case "writeConcern": + optionsBuilder.writeConcern(asWriteConcern(cur.getValue().asDocument())); + break; + case "readPreference": + optionsBuilder.readPreference(asReadPreference(cur.getValue().asDocument())); + break; + case "readConcern": + optionsBuilder.readConcern(asReadConcern(cur.getValue().asDocument())); + break; + case "timeoutMS": + optionsBuilder.timeout(cur.getValue().asInt32().longValue(), TimeUnit.MILLISECONDS); + break; + case "maxCommitTimeMS": + optionsBuilder.maxCommitTime(cur.getValue().asNumber().longValue(), TimeUnit.MILLISECONDS); + break; + default: + throw new UnsupportedOperationException("Unsupported argument: " + cur.getKey()); + } + } + + return resultOf(() -> { + session.startTransaction(optionsBuilder.build()); + return null; + }); + } + + OperationResult executeCommitTransaction(final BsonDocument operation) { + ClientSession session = entities.getSession(operation.getString("object").getValue()); + + if (operation.containsKey("arguments")) { + throw new UnsupportedOperationException("Unexpected arguments " + operation.get("arguments")); + } + + return resultOf(() -> { + session.commitTransaction(); + return null; + }); + } + + OperationResult executeAbortTransaction(final BsonDocument operation) { + ClientSession session = entities.getSession(operation.getString("object").getValue()); + + if (operation.containsKey("arguments")) { + throw new UnsupportedOperationException("Unexpected arguments: " + operation.get("arguments")); + } + + return resultOf(() -> { + session.abortTransaction(); + return null; + }); + } + + OperationResult executeWithTransaction(final BsonDocument operation, final OperationAsserter operationAsserter) { + ClientSession session = entities.getSession(operation.getString("object").getValue()); + BsonArray callback = operation.getDocument("arguments", new BsonDocument()).getArray("callback"); + TransactionOptions.Builder optionsBuilder = TransactionOptions.builder(); + for (Map.Entry entry : operation.getDocument("arguments", new BsonDocument()).entrySet()) { + switch (entry.getKey()) { + case "callback": + break; + case "readConcern": + optionsBuilder.readConcern(asReadConcern(entry.getValue().asDocument())); + break; + case "writeConcern": + optionsBuilder.writeConcern(asWriteConcern(entry.getValue().asDocument())); + break; + case "timeoutMS": + optionsBuilder.timeout(entry.getValue().asNumber().longValue(), TimeUnit.MILLISECONDS); + break; + case "maxCommitTimeMS": + optionsBuilder.maxCommitTime(entry.getValue().asNumber().longValue(), TimeUnit.MILLISECONDS); + break; + default: + throw new UnsupportedOperationException("Unsupported transaction option: " + entry.getKey()); + } + } + + return resultOf(() -> { + session.withTransaction(() -> { + for (int i = 0; i < callback.size(); i++) { + BsonValue cur = callback.get(i); + operationAsserter.assertOperation(cur.asDocument(), i); + } + //noinspection ConstantConditions + return null; + }, optionsBuilder.build()); + return null; + }); + } + + public OperationResult executeDropCollection(final BsonDocument operation) { + MongoDatabase database = getMongoDatabase(operation); + BsonDocument arguments = operation.getDocument("arguments", new BsonDocument()); + String collectionName = arguments.remove("collection").asString().getValue(); + + DropCollectionOptions dropCollectionOptions = new DropCollectionOptions(); + for (Map.Entry entry : arguments.entrySet()) { + if (entry.getKey().equals("encryptedFields")) { + dropCollectionOptions.encryptedFields(entry.getValue().asDocument()); + } else { + throw new UnsupportedOperationException("Unsupported drop collections option: " + entry.getKey()); + } + } + + return resultOf(() -> { + database.getCollection(collectionName).drop(dropCollectionOptions); + return null; + }); + } + + public OperationResult executeCreateCollection(final BsonDocument operation) { + MongoDatabase database = getMongoDatabase(operation); + BsonDocument arguments = operation.getDocument("arguments", new BsonDocument()); + String collectionName = arguments.getString("collection").getValue(); + ClientSession session = getSession(arguments); + + // In Java driver there is a separate method for creating a view, but in the unified test CRUD format + // views and collections are both created with the createCollection operation. We use the createView + // method if the requisite arguments are present. + if (arguments.containsKey("viewOn")) { + String viewOn = arguments.getString("viewOn").getValue(); + List pipeline = + arguments.getArray("pipeline", new BsonArray()).stream().map(BsonValue::asDocument).collect(toList()); + + for (Map.Entry cur : arguments.entrySet()) { + switch (cur.getKey()) { + case "collection": + case "session": + case "viewOn": + case "pipeline": + break; + default: + throw new UnsupportedOperationException("Unsupported argument: " + cur.getKey()); + } + } + + return resultOf(() -> { + if (session == null) { + database.createView(collectionName, viewOn, pipeline); + } else { + database.createView(session, collectionName, viewOn, pipeline); + } + return null; + }); + } else { + CreateCollectionOptions options = new CreateCollectionOptions(); + + for (Map.Entry cur : arguments.entrySet()) { + switch (cur.getKey()) { + case "collection": + case "session": + case "viewOn": + break; + case "expireAfterSeconds": + options.expireAfter(cur.getValue().asNumber().longValue(), TimeUnit.SECONDS); + break; + case "timeseries": + options.timeSeriesOptions(createTimeSeriesOptions(cur.getValue().asDocument())); + break; + case "changeStreamPreAndPostImages": + options.changeStreamPreAndPostImagesOptions(createChangeStreamPreAndPostImagesOptions(cur.getValue().asDocument())); + break; + case "clusteredIndex": + options.clusteredIndexOptions(createClusteredIndexOptions(cur.getValue().asDocument())); + break; + case "encryptedFields": + options.encryptedFields(cur.getValue().asDocument()); + break; + case "validator": + ValidationOptions validationOptions = new ValidationOptions(); + validationOptions.validator(cur.getValue().asDocument()); + options.validationOptions(validationOptions); + break; + default: + throw new UnsupportedOperationException("Unsupported argument: " + cur.getKey()); + } + } + return resultOf(() -> { + if (session == null) { + database.createCollection(collectionName, options); + } else { + database.createCollection(session, collectionName, options); + } + return null; + }); + } + } + + public OperationResult executeModifyCollection(final BsonDocument operation) { + MongoDatabase database = getMongoDatabase(operation); + BsonDocument arguments = operation.getDocument("arguments", new BsonDocument()); + String collectionName = arguments.getString("collection").getValue(); + ClientSession session = getSession(arguments); + + BsonDocument collModCommandDocument = new BsonDocument("collMod", new BsonString(collectionName)); + + for (Map.Entry cur : arguments.entrySet()) { + switch (cur.getKey()) { + case "collection": + case "session": + break; + case "validator": + collModCommandDocument.append("validator", cur.getValue()); + break; + case "index": + collModCommandDocument.append("index", cur.getValue()); + break; + default: + throw new UnsupportedOperationException("Unsupported argument: " + cur.getKey()); + } + } + return resultOf(() -> { + if (session == null) { + database.runCommand(collModCommandDocument); + } else { + database.runCommand(session, collModCommandDocument); + } + return null; + }); + } + + public OperationResult executeRenameCollection(final BsonDocument operation) { + MongoCollection collection = getMongoCollection(operation); + BsonDocument arguments = operation.getDocument("arguments", new BsonDocument()); + String newCollectionName = arguments.getString("to").getValue(); + ClientSession session = getSession(arguments); + RenameCollectionOptions options = new RenameCollectionOptions(); + + for (Map.Entry cur : arguments.entrySet()) { + switch (cur.getKey()) { + case "to": + case "session": + break; + case "dropTarget": + options.dropTarget(cur.getValue().asBoolean().getValue()); + break; + default: + throw new UnsupportedOperationException("Unsupported argument: " + cur.getKey()); + } + } + + return resultOf(() -> { + MongoNamespace newCollectionNamespace = new MongoNamespace(collection.getNamespace().getDatabaseName(), newCollectionName); + if (session == null) { + collection.renameCollection(newCollectionNamespace, options); + } else { + collection.renameCollection(session, newCollectionNamespace, options); + } + return null; + }); + } + + private TimeSeriesOptions createTimeSeriesOptions(final BsonDocument timeSeriesDocument) { + TimeSeriesOptions options = new TimeSeriesOptions(timeSeriesDocument.getString("timeField").getValue()); + + for (Map.Entry cur : timeSeriesDocument.entrySet()) { + switch (cur.getKey()) { + case "timeField": + break; + case "metaField": + options.metaField(cur.getValue().asString().getValue()); + break; + case "bucketMaxSpanSeconds": + options.bucketMaxSpan(cur.getValue().asInt32().longValue(), TimeUnit.SECONDS); + break; + case "bucketRoundingSeconds": + options.bucketRounding(cur.getValue().asInt32().longValue(), TimeUnit.SECONDS); + break; + case "granularity": + options.granularity(createTimeSeriesGranularity(cur.getValue().asString().getValue())); + break; + default: + throw new UnsupportedOperationException("Unsupported argument: " + cur.getKey()); + } + } + return options; + } + + private ClusteredIndexOptions createClusteredIndexOptions(final BsonDocument clusteredIndexDocument) { + ClusteredIndexOptions options = new ClusteredIndexOptions(clusteredIndexDocument.getDocument("key"), + clusteredIndexDocument.getBoolean("unique").getValue()); + + for (Map.Entry cur : clusteredIndexDocument.entrySet()) { + switch (cur.getKey()) { + case "key": + case "unique": + break; + case "name": + options.name(cur.getValue().asString().getValue()); + break; + default: + throw new UnsupportedOperationException("Unsupported argument: " + cur.getKey()); + } + } + return options; + } + + private ChangeStreamPreAndPostImagesOptions createChangeStreamPreAndPostImagesOptions( + final BsonDocument changeStreamPreAndPostImagesDocument) { + ChangeStreamPreAndPostImagesOptions changeStreamPreAndPostImagesOptions = + new ChangeStreamPreAndPostImagesOptions(changeStreamPreAndPostImagesDocument.getBoolean("enabled").getValue()); + + return changeStreamPreAndPostImagesOptions; + } + + private TimeSeriesGranularity createTimeSeriesGranularity(final String value) { + switch (value) { + case "seconds": + return TimeSeriesGranularity.SECONDS; + case "minutes": + return TimeSeriesGranularity.MINUTES; + case "hours": + return TimeSeriesGranularity.HOURS; + default: + throw new UnsupportedOperationException("Unsupported time series granularity: " + value); + } + } + + + OperationResult executeCreateSearchIndex(final BsonDocument operation) { + MongoCollection collection = getMongoCollection(operation); + BsonDocument arguments = operation.getDocument("arguments", new BsonDocument()); + BsonDocument model = arguments.getDocument("model"); + + return resultOf(() -> { + collection.createSearchIndexes(Collections.singletonList(toIndexSearchModel(model))); + return null; + }); + } + + private static SearchIndexType getSearchIndexType(final BsonString type) { + switch (type.getValue()) { + case "search": + return SearchIndexType.search(); + case "vectorSearch": + return SearchIndexType.vectorSearch(); + default: + throw new UnsupportedOperationException("Unsupported search index type: " + type.getValue()); + } + } + + OperationResult executeCreateSearchIndexes(final BsonDocument operation) { + MongoCollection collection = getMongoCollection(operation); + BsonDocument arguments = operation.getDocument("arguments", new BsonDocument()); + BsonArray models = arguments.getArray("models"); + + List searchIndexModels = models.stream() + .map(UnifiedCrudHelper::toIndexSearchModel).collect(toList()); + + return resultOf(() -> { + collection.createSearchIndexes(searchIndexModels); + return null; + }); + } + + + OperationResult executeUpdateSearchIndex(final BsonDocument operation) { + MongoCollection collection = getMongoCollection(operation); + BsonDocument arguments = operation.getDocument("arguments", new BsonDocument()); + BsonDocument definition = arguments.getDocument("definition"); + String name = arguments.getString("name").getValue(); + + return resultOf(() -> { + collection.updateSearchIndex(name, definition); + return null; + }); + } + + OperationResult executeDropSearchIndex(final BsonDocument operation) { + MongoCollection collection = getMongoCollection(operation); + BsonDocument arguments = operation.getDocument("arguments", new BsonDocument()); + String name = arguments.getString("name").getValue(); + + return resultOf(() -> { + collection.dropSearchIndex(name); + return null; + }); + } + + private static SearchIndexModel toIndexSearchModel(final BsonValue bsonValue) { + BsonDocument model = bsonValue.asDocument(); + BsonDocument definition = model.getDocument("definition"); + SearchIndexType type = model.containsKey("type") ? getSearchIndexType(model.getString("type")) : null; + String name = ofNullable(model.getString("name", null)) + .map(BsonString::getValue). + orElse(null); + return new SearchIndexModel(name, definition, type); + } + + + OperationResult executeListSearchIndexes(final BsonDocument operation) { + MongoCollection collection = getMongoCollection(operation); + Optional arguments = ofNullable(operation.getOrDefault("arguments", null)).map(BsonValue::asDocument); + + if (arguments.isPresent()) { + ListSearchIndexesIterable iterable = createListSearchIndexesIterable(collection, arguments.get()); + return resultOf(() -> { + iterable.into(new ArrayList<>()); + return null; + }); + } + + return resultOf(() -> { + collection.listSearchIndexes().into(new ArrayList<>()); + return null; + }); + } + + private ListSearchIndexesIterable createListSearchIndexesIterable(final MongoCollection collection, + final BsonDocument arguments) { + Optional name = ofNullable(arguments.getOrDefault("name", null)) + .map(BsonValue::asString).map(BsonString::getValue); + + ListSearchIndexesIterable iterable = collection.listSearchIndexes(BsonDocument.class); + + if (arguments.containsKey("aggregationOptions")) { + for (Map.Entry option : arguments.getDocument("aggregationOptions").entrySet()) { + switch (option.getKey()) { + case "batchSize": + iterable.batchSize(option.getValue().asNumber().intValue()); + break; + default: + throw new UnsupportedOperationException("Unsupported argument: " + option.getKey()); + } + } + } + return iterable.name(name.get()); + } + + public OperationResult executeCreateIndex(final BsonDocument operation) { + MongoCollection collection = getMongoCollection(operation); + BsonDocument arguments = operation.getDocument("arguments", new BsonDocument()); + BsonDocument keys = arguments.getDocument("keys").asDocument(); + ClientSession session = getSession(arguments); + IndexOptions options = new IndexOptions(); + + for (Map.Entry cur : arguments.entrySet()) { + switch (cur.getKey()) { + case "keys": + case "session": + break; + case "name": + options.name(cur.getValue().asString().getValue()); + break; + case "unique": + options.unique(cur.getValue().asBoolean().getValue()); + break; + default: + throw new UnsupportedOperationException("Unsupported argument: " + cur.getKey()); + } + } + + return resultOf(() -> { + if (session == null) { + collection.createIndex(keys, options); + } else { + collection.createIndex(session, keys, options); + } + return null; + }); + } + + public OperationResult executeDropIndex(final BsonDocument operation) { + MongoCollection collection = getMongoCollection(operation); + BsonDocument arguments = operation.getDocument("arguments", new BsonDocument()); + ClientSession session = getSession(arguments); + String indexName = arguments.get("name").asString().getValue(); + + if (!arguments.containsKey("name")) { + throw new UnsupportedOperationException("Drop index without name is not supported"); + } + + DropIndexOptions options = getDropIndexOptions(arguments); + return resultOf(() -> { + if (session == null) { + collection.dropIndex(indexName, options); + } else { + collection.dropIndex(session, indexName, options); + } + return null; + }); + } + + public OperationResult executeDropIndexes(final BsonDocument operation) { + MongoCollection collection = getMongoCollection(operation); + + if (operation.containsKey("arguments")) { + BsonDocument arguments = operation.getDocument("arguments", new BsonDocument()); + ClientSession session = getSession(arguments); + DropIndexOptions options = getDropIndexOptions(arguments); + return resultOf(() -> { + if (session == null) { + collection.dropIndexes(options); + } else { + collection.dropIndexes(session, options); + } + return null; + }); + } + return resultOf(() -> { + collection.dropIndexes(); + return null; + }); + } + + private static DropIndexOptions getDropIndexOptions(final BsonDocument arguments) { + DropIndexOptions options = new DropIndexOptions(); + for (Map.Entry cur : arguments.entrySet()) { + switch (cur.getKey()) { + case "session": + case "name": + break; + case "maxTimeMS": + options.maxTime(cur.getValue().asNumber().intValue(), TimeUnit.MILLISECONDS); + break; + default: + throw new UnsupportedOperationException("Unsupported argument: " + cur.getKey()); + } + } + return options; + } + + public OperationResult createChangeStreamCursor(final BsonDocument operation) { + String entityName = operation.getString("object").getValue(); + BsonDocument arguments = operation.getDocument("arguments", new BsonDocument()); + List pipeline = arguments.getArray("pipeline").stream().map(BsonValue::asDocument).collect(toList()); + ChangeStreamIterable iterable; + + Long timeoutMS = arguments.containsKey("timeoutMS") ? arguments.remove("timeoutMS").asNumber().longValue() : null; + if (entities.hasCollection(entityName)) { + iterable = entities.getCollectionWithTimeoutMS(entityName, timeoutMS).watch(pipeline); + } else if (entities.hasDatabase(entityName)) { + iterable = entities.getDatabaseWithTimeoutMS(entityName, timeoutMS).watch(pipeline, BsonDocument.class); + } else if (entities.hasClient(entityName)) { + iterable = entities.getMongoClusterWithTimeoutMS(entityName, timeoutMS).watch(pipeline, BsonDocument.class); + } else { + throw new UnsupportedOperationException("No entity found for id: " + entityName); + } + + return resultOf(() -> { + for (Map.Entry cur : arguments.entrySet()) { + switch (cur.getKey()) { + case "batchSize": + iterable.batchSize(cur.getValue().asNumber().intValue()); + break; + case "pipeline": + break; + case "comment": + iterable.comment(cur.getValue()); + break; + case "fullDocument": + iterable.fullDocument(FullDocument.fromString(cur.getValue().asString().getValue())); + break; + case "fullDocumentBeforeChange": + iterable.fullDocumentBeforeChange(FullDocumentBeforeChange.fromString(cur.getValue().asString().getValue())); + break; + case "maxAwaitTimeMS": + iterable.maxAwaitTime(cur.getValue().asNumber().longValue(), TimeUnit.MILLISECONDS); + break; + case "showExpandedEvents": + iterable.showExpandedEvents(cur.getValue().asBoolean().getValue()); + break; + default: + throw new UnsupportedOperationException("Unsupported argument: " + cur.getKey()); + } + } + MongoCursor changeStreamWrappingCursor = createChangeStreamWrappingCursor(iterable); + entities.addCursor(operation.getString("saveResultAsEntity", + new BsonString(createRandomEntityId())).getValue(), changeStreamWrappingCursor); + return null; + }); + } + + public OperationResult clientBulkWrite(final BsonDocument operation) { + Set unexpectedOperationKeys = singleton("saveResultAsEntity"); + if (operation.keySet().stream().anyMatch(unexpectedOperationKeys::contains)) { + throw new UnsupportedOperationException("Unexpected field in operation. One of " + unexpectedOperationKeys); + } + String clientId = operation.getString("object").getValue(); + MongoCluster cluster = entities.getClient(clientId); + BsonDocument arguments = operation.getDocument("arguments"); + ClientSession session = getSession(arguments); + List models = arguments.getArray("models").stream() + .map(BsonValue::asDocument) + .map(UnifiedCrudHelper::toClientNamespacedWriteModel) + .collect(toList()); + ClientBulkWriteOptions options = clientBulkWriteOptions(); + for (Map.Entry entry : arguments.entrySet()) { + String key = entry.getKey(); + BsonValue argument = entry.getValue(); + switch (key) { + case "models": + case "session": + break; + case "writeConcern": + cluster = cluster.withWriteConcern(asWriteConcern(argument.asDocument())); + break; + case "ordered": + options.ordered(argument.asBoolean().getValue()); + break; + case "bypassDocumentValidation": + options.bypassDocumentValidation(argument.asBoolean().getValue()); + break; + case "let": + options.let(argument.asDocument()); + break; + case "comment": + options.comment(argument); + break; + case "verboseResults": + options.verboseResults(argument.asBoolean().getValue()); + break; + default: + throw new UnsupportedOperationException(format("Unsupported argument: key=%s, argument=%s", key, argument)); + } + } + MongoCluster clusterWithWriteConcern = cluster; + return resultOf(() -> { + if (session == null) { + return toMatchableValue(clusterWithWriteConcern.bulkWrite(models, options)); + } else { + return toMatchableValue(clusterWithWriteConcern.bulkWrite(session, models, options)); + } + }); + } + + private static ClientNamespacedWriteModel toClientNamespacedWriteModel(final BsonDocument model) { + String modelType = model.getFirstKey(); + BsonDocument arguments = model.getDocument(modelType); + MongoNamespace namespace = new MongoNamespace(arguments.getString("namespace").getValue()); + switch (modelType) { + case "insertOne": + Set expectedArguments = new HashSet<>(asList("namespace", "document")); + if (!expectedArguments.containsAll(arguments.keySet())) { + // for other `modelType`s a conceptually similar check is done when creating their options objects + throw new UnsupportedOperationException("Unsupported argument, one of: " + arguments.keySet()); + } + return ClientNamespacedWriteModel.insertOne( + namespace, + arguments.getDocument("document")); + case "replaceOne": + return ClientNamespacedWriteModel.replaceOne( + namespace, + arguments.getDocument("filter"), + arguments.getDocument("replacement"), + getClientReplaceOneOptions(arguments)); + case "updateOne": + return arguments.isDocument("update") + ? ClientNamespacedWriteModel.updateOne( + namespace, + arguments.getDocument("filter"), + arguments.getDocument("update"), + getClientUpdateOneOptions(arguments)) + : ClientNamespacedWriteModel.updateOne( + namespace, + arguments.getDocument("filter"), + arguments.getArray("update").stream().map(BsonValue::asDocument).collect(toList()), + getClientUpdateOneOptions(arguments)); + case "updateMany": + return arguments.isDocument("update") + ? ClientNamespacedWriteModel.updateMany( + namespace, + arguments.getDocument("filter"), + arguments.getDocument("update"), + getClientUpdateManyOptions(arguments)) + : ClientNamespacedWriteModel.updateMany( + namespace, + arguments.getDocument("filter"), + arguments.getArray("update").stream().map(BsonValue::asDocument).collect(toList()), + getClientUpdateManyOptions(arguments)); + case "deleteOne": + return ClientNamespacedWriteModel.deleteOne( + namespace, + arguments.getDocument("filter"), + getClientDeleteOneOptions(arguments)); + case "deleteMany": + return ClientNamespacedWriteModel.deleteMany( + namespace, + arguments.getDocument("filter"), + getClientDeleteManyOptions(arguments)); + default: + throw new UnsupportedOperationException("Unsupported client write model type: " + modelType); + } + } + + private static ClientReplaceOneOptions getClientReplaceOneOptions(final BsonDocument arguments) { + ClientReplaceOneOptions options = clientReplaceOneOptions(); + arguments.forEach((key, argument) -> { + switch (key) { + case "namespace": + case "filter": + case "replacement": + break; + case "collation": + options.collation(asCollation(argument.asDocument())); + break; + case "hint": + if (argument.isDocument()) { + options.hint(argument.asDocument()); + } else { + options.hintString(argument.asString().getValue()); + } + break; + case "upsert": + options.upsert(argument.asBoolean().getValue()); + break; + case "sort": + options.sort(argument.asDocument()); + break; + default: + throw new UnsupportedOperationException(format("Unsupported argument: key=%s, argument=%s", key, argument)); + } + }); + return options; + } + + private static ClientUpdateOneOptions getClientUpdateOneOptions(final BsonDocument arguments) { + ConcreteClientUpdateOneOptions options = new ConcreteClientUpdateOneOptions(); + + if (arguments.containsKey("sort")) { + BsonDocument sort = arguments + .remove("sort") + .asDocument(); + options.sort(sort); + } + + return fillAbstractClientUpdateOptions(options, arguments); + } + + private static ClientUpdateManyOptions getClientUpdateManyOptions(final BsonDocument arguments) { + return fillAbstractClientUpdateOptions(new ConcreteClientUpdateManyOptions(), arguments); + } + + private static T fillAbstractClientUpdateOptions( + final T options, + final BsonDocument arguments) { + arguments.forEach((key, argument) -> { + switch (key) { + case "namespace": + case "filter": + case "update": + break; + case "arrayFilters": + options.arrayFilters(argument.asArray().stream().map(BsonValue::asDocument).collect(toList())); + break; + case "collation": + options.collation(asCollation(argument.asDocument())); + break; + case "hint": + if (argument.isDocument()) { + options.hint(argument.asDocument()); + } else { + options.hintString(argument.asString().getValue()); + } + break; + case "upsert": + options.upsert(argument.asBoolean().getValue()); + break; + default: + throw new UnsupportedOperationException(format("Unsupported argument: key=%s, argument=%s", key, argument)); + } + }); + return options; + } + + private static ClientDeleteOneOptions getClientDeleteOneOptions(final BsonDocument arguments) { + return fillAbstractClientDeleteOptions(new ConcreteClientDeleteOneOptions(), arguments); + } + + private static ClientDeleteManyOptions getClientDeleteManyOptions(final BsonDocument arguments) { + return fillAbstractClientDeleteOptions(new ConcreteClientDeleteManyOptions(), arguments); + } + + private static T fillAbstractClientDeleteOptions( + final T options, + final BsonDocument arguments) { + arguments.forEach((key, argument) -> { + switch (key) { + case "namespace": + case "filter": + break; + case "collation": + options.collation(asCollation(argument.asDocument())); + break; + case "hint": + if (argument.isDocument()) { + options.hint(argument.asDocument()); + } else { + options.hintString(argument.asString().getValue()); + } + break; + default: + throw new UnsupportedOperationException(format("Unsupported argument: key=%s, argument=%s", key, argument)); + } + }); + return options; + } + + static BsonDocument toMatchableValue(final ClientBulkWriteResult result) { + BsonDocument expected = new BsonDocument(); + if (result.isAcknowledged()) { + expected.append("insertedCount", new BsonInt64(result.getInsertedCount())) + .append("upsertedCount", new BsonInt64(result.getUpsertedCount())) + .append("matchedCount", new BsonInt64(result.getMatchedCount())) + .append("modifiedCount", new BsonInt64(result.getModifiedCount())) + .append("deletedCount", new BsonInt64(result.getDeletedCount())); + result.getVerboseResults().ifPresent(verbose -> + expected.append("insertResults", new BsonDocument(verbose.getInsertResults().entrySet().stream() + .map(entry -> new BsonElement( + entry.getKey().toString(), + new BsonDocument("insertedId", entry.getValue().getInsertedId().orElseThrow(Assertions::fail)))) + .collect(toList()))) + .append("updateResults", new BsonDocument(verbose.getUpdateResults().entrySet().stream() + .map(entry -> { + ClientUpdateResult updateResult = entry.getValue(); + BsonDocument updateResultDocument = new BsonDocument( + "matchedCount", new BsonInt64(updateResult.getMatchedCount())) + .append("modifiedCount", new BsonInt64(updateResult.getModifiedCount())); + updateResult.getUpsertedId().ifPresent(upsertedId -> updateResultDocument.append("upsertedId", upsertedId)); + return new BsonElement(entry.getKey().toString(), updateResultDocument); + }) + .collect(toList()))) + .append("deleteResults", new BsonDocument(verbose.getDeleteResults().entrySet().stream() + .map(entry -> new BsonElement( + entry.getKey().toString(), + new BsonDocument("deletedCount", new BsonInt64(entry.getValue().getDeletedCount())))) + .collect(toList())))); + } + return expected; + } + + public OperationResult executeIterateUntilDocumentOrError(final BsonDocument operation) { + String id = operation.getString("object").getValue(); + MongoCursor cursor = entities.getCursor(id); + + if (operation.containsKey("arguments")) { + throw new UnsupportedOperationException("Unexpected arguments " + operation.get("arguments")); + } + + return resultOf(cursor::next); + } + + + public OperationResult executeIterateOnce(final BsonDocument operation) { + String id = operation.getString("object").getValue(); + MongoCursor cursor = entities.getCursor(id); + + if (operation.containsKey("arguments")) { + throw new UnsupportedOperationException("Unexpected arguments " + operation.get("arguments")); + } + + return resultOf(cursor::tryNext); + } + + public OperationResult close(final BsonDocument operation) { + String id = operation.getString("object").getValue(); + + if (entities.hasClient(id)) { + entities.getClient(id).close(); + } else { + MongoCursor cursor = entities.getCursor(id); + cursor.close(); + } + + return OperationResult.NONE; + } + + public OperationResult executeRunCommand(final BsonDocument operation) { + MongoDatabase database = getMongoDatabase(operation); + BsonDocument arguments = operation.getDocument("arguments", new BsonDocument()); + ClientSession session = getSession(arguments); + BsonDocument command = arguments.getDocument("command"); + ReadPreference readPreference = arguments.containsKey("readPreference") + ? asReadPreference(arguments.getDocument("readPreference")) : null; + for (Map.Entry cur : arguments.entrySet()) { + switch (cur.getKey()) { + case "command": + case "commandName": + case "session": + case "readPreference": + break; + default: + throw new UnsupportedOperationException("Unsupported argument: " + cur.getKey()); + } + } + + return resultOf(() -> { + if (session == null) { + if (readPreference == null) { + return database.runCommand(command, BsonDocument.class); + } else { + return database.runCommand(command, readPreference, BsonDocument.class); + } + } else { + if (readPreference == null) { + return database.runCommand(session, command, BsonDocument.class); + } else { + return database.runCommand(session, command, readPreference, BsonDocument.class); + } + } + }); + } + + public OperationResult executeCountDocuments(final BsonDocument operation) { + MongoCollection collection = getMongoCollection(operation); + BsonDocument arguments = operation.getDocument("arguments", new BsonDocument()); + BsonDocument filter = arguments.getDocument("filter"); + ClientSession session = getSession(arguments); + CountOptions options = new CountOptions(); + + for (Map.Entry cur : arguments.entrySet()) { + switch (cur.getKey()) { + case "filter": + case "session": + break; + case "skip": + options.skip(cur.getValue().asNumber().intValue()); + break; + case "limit": + options.limit(cur.getValue().asNumber().intValue()); + break; + case "comment": + options.comment(cur.getValue()); + break; + case "collation": + options.collation(asCollation(cur.getValue().asDocument())); + break; + default: + throw new UnsupportedOperationException("Unsupported argument: " + cur.getKey()); + } + } + + return resultOf(() -> { + if (session == null) { + return new BsonInt64(collection.countDocuments(filter, options)); + } else { + return new BsonInt64(collection.countDocuments(session, filter, options)); + } + }); + } + + public OperationResult executeEstimatedDocumentCount(final BsonDocument operation) { + MongoCollection collection = getMongoCollection(operation); + BsonDocument arguments = operation.getDocument("arguments", new BsonDocument()); + + EstimatedDocumentCountOptions options = new EstimatedDocumentCountOptions(); + for (Map.Entry cur : arguments.entrySet()) { + switch (cur.getKey()) { + case "maxTimeMS": + options.maxTime(cur.getValue().asNumber().intValue(), TimeUnit.MILLISECONDS); + break; + case "comment": + options.comment(cur.getValue()); + break; + default: + throw new UnsupportedOperationException("Unsupported argument: " + cur.getKey()); + } + } + + return resultOf(() -> + new BsonInt64(collection.estimatedDocumentCount(options))); + } + + public OperationResult executeUpdateClientMetadata(final BsonDocument operation) { + BsonDocument arguments = operation.getDocument("arguments", new BsonDocument()); + BsonDocument driverInfo = arguments.getDocument("driverInfoOptions"); + + MongoDriverInformation mongoDriverInformation = MongoDriverInformation.builder() + .driverVersion(driverInfo.getString("version").getValue()) + .driverName(driverInfo.getString("name").getValue()) + .driverPlatform(driverInfo.getString("platform").getValue()) + .build(); + + String clientId = operation.getString("object").getValue(); + MongoClient client = entities.getClient(clientId); + return resultOf(() -> { + client.appendMetadata(mongoDriverInformation); + return null; + }); + } + + @NonNull + private String createRandomEntityId() { + return "random-entity-id" + uniqueIdGenerator.getAndIncrement(); + } + + /** + * The tests in this list can not currently pass when using {@link ChangeStreamDocument} because there is information loss when + * decoding into an instance of that class. So for these tests, we just decode directly into {@link BsonDocument}. For all + * others, we decode into {@link ChangeStreamDocument} and from there to {@link BsonDocument} so that there is some integration test + * coverage of {@link ChangeStreamDocument}. + */ + private static final List BSON_DOCUMENT_CHANGE_STREAM_TESTS = asList( + "Test newField added in response MUST NOT err", + "Test projection in change stream returns expected fields", + "fullDocument:whenAvailable with changeStreamPreAndPostImages disabled", + "fullDocumentBeforeChange:whenAvailable with changeStreamPreAndPostImages disabled"); + + @NonNull + private MongoCursor createChangeStreamWrappingCursor(final ChangeStreamIterable iterable) { + if (BSON_DOCUMENT_CHANGE_STREAM_TESTS.contains(testDescription)) { + return iterable.withDocumentClass(BsonDocument.class).cursor(); + } else { + MongoChangeStreamCursor> wrappedCursor = iterable.cursor(); + return new MongoCursor() { + @Override + public void close() { + wrappedCursor.close(); + } + + @Override + public boolean hasNext() { + return wrappedCursor.hasNext(); + } + + @NonNull + @Override + public BsonDocument next() { + return encodeChangeStreamDocumentToBsonDocument(wrappedCursor.next()); + } + + @Override + public int available() { + return wrappedCursor.available(); + } + + @Override + public BsonDocument tryNext() { + ChangeStreamDocument next = wrappedCursor.tryNext(); + if (next == null) { + return null; + } else { + return encodeChangeStreamDocumentToBsonDocument(next); + } + } + + @Override + public ServerCursor getServerCursor() { + return wrappedCursor.getServerCursor(); + } + + @NonNull + @Override + public ServerAddress getServerAddress() { + return wrappedCursor.getServerAddress(); + } + + private BsonDocument encodeChangeStreamDocumentToBsonDocument(final ChangeStreamDocument next) { + BsonDocumentWriter writer = new BsonDocumentWriter(new BsonDocument()); + changeStreamDocumentCodec.encode(writer, next, EncoderContext.builder().build()); + return writer.getDocument(); + } + }; + } + } + + private MongoCollection getMongoCollection(final BsonDocument operation) { + MongoCollection collection = entities.getCollection(operation.getString("object").getValue()); + Long timeoutMS = getAndRemoveTimeoutMS(operation.getDocument("arguments", new BsonDocument())); + if (timeoutMS != null) { + collection = collection.withTimeout(timeoutMS, TimeUnit.MILLISECONDS); + } + return collection; + } + private MongoDatabase getMongoDatabase(final BsonDocument operation) { + MongoDatabase database = entities.getDatabase(operation.getString("object").getValue()); + if (operation.containsKey("arguments")) { + BsonDocument arguments = operation.getDocument("arguments", new BsonDocument()); + Long timeoutMS = getAndRemoveTimeoutMS(arguments); + if (timeoutMS != null) { + database = database.withTimeout(timeoutMS, TimeUnit.MILLISECONDS); + arguments.remove("timeoutMS"); + } + } + return database; + } + + private MongoCluster getMongoCluster(final BsonDocument operation) { + MongoCluster mongoCluster = entities.getClient(operation.getString("object").getValue()); + if (operation.containsKey("arguments")) { + BsonDocument arguments = operation.getDocument("arguments", new BsonDocument()); + Long timeoutMS = getAndRemoveTimeoutMS(arguments); + if (timeoutMS != null) { + mongoCluster = mongoCluster.withTimeout(timeoutMS, TimeUnit.MILLISECONDS); + arguments.remove("timeoutMS"); + } + } + return mongoCluster; + } + + private static void setCursorType(final FindIterable iterable, final Map.Entry cur) { + switch (cur.getValue().asString().getValue()) { + case "tailable": + iterable.cursorType(CursorType.Tailable); + break; + case "nonTailable": + iterable.cursorType(CursorType.NonTailable); + break; + case "tailableAwait": + iterable.cursorType(CursorType.TailableAwait); + break; + default: + throw new UnsupportedOperationException("Unsupported cursorType: " + cur.getValue()); + } + } + + private static void setTimeoutMode(final MongoIterable iterable, final Map.Entry cur) { + switch (cur.getValue().asString().getValue()) { + case "cursorLifetime": + invokeTimeoutMode(iterable, TimeoutMode.CURSOR_LIFETIME); + break; + case "iteration": + invokeTimeoutMode(iterable, TimeoutMode.ITERATION); + break; + default: + throw new UnsupportedOperationException("Unsupported timeoutMode: " + cur.getValue()); + } + } + + private static void invokeTimeoutMode(final MongoIterable iterable, final TimeoutMode timeoutMode) { + try { + Method timeoutModeMethod = iterable.getClass().getDeclaredMethod("timeoutMode", TimeoutMode.class); + timeoutModeMethod.setAccessible(true); + timeoutModeMethod.invoke(iterable, timeoutMode); + } catch (NoSuchMethodException e) { + throw new UnsupportedOperationException("Unsupported timeoutMode method for class: " + iterable.getClass(), e); + } catch (IllegalAccessException e) { + throw new UnsupportedOperationException("Unable to set timeoutMode method for class: " + iterable.getClass(), e); + } catch (InvocationTargetException e) { + Throwable targetException = e.getTargetException(); + if (targetException instanceof IllegalArgumentException) { + throw (IllegalArgumentException) targetException; + } + throw new UnsupportedOperationException("Unable to set timeoutMode method for class: " + iterable.getClass(), targetException); + } + } +} diff --git a/driver-sync/src/test/functional/com/mongodb/client/unified/UnifiedCrudTest.java b/driver-sync/src/test/functional/com/mongodb/client/unified/UnifiedCrudTest.java new file mode 100644 index 00000000000..4f3a4384ecf --- /dev/null +++ b/driver-sync/src/test/functional/com/mongodb/client/unified/UnifiedCrudTest.java @@ -0,0 +1,27 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.unified; + +import org.junit.jupiter.params.provider.Arguments; + +import java.util.Collection; + +public final class UnifiedCrudTest extends UnifiedSyncTest { + private static Collection data() { + return getTestData("crud"); + } +} diff --git a/driver-sync/src/test/functional/com/mongodb/client/unified/UnifiedGridFSHelper.java b/driver-sync/src/test/functional/com/mongodb/client/unified/UnifiedGridFSHelper.java new file mode 100644 index 00000000000..13e95a58463 --- /dev/null +++ b/driver-sync/src/test/functional/com/mongodb/client/unified/UnifiedGridFSHelper.java @@ -0,0 +1,263 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.unified; + +import com.mongodb.client.gridfs.GridFSBucket; +import com.mongodb.client.gridfs.GridFSFindIterable; +import com.mongodb.client.gridfs.model.GridFSDownloadOptions; +import com.mongodb.client.gridfs.model.GridFSFile; +import com.mongodb.client.gridfs.model.GridFSUploadOptions; +import com.mongodb.internal.HexUtils; +import org.bson.BsonDocument; +import org.bson.BsonDocumentReader; +import org.bson.BsonObjectId; +import org.bson.BsonString; +import org.bson.BsonValue; +import org.bson.Document; +import org.bson.codecs.DecoderContext; +import org.bson.codecs.DocumentCodec; +import util.Hex; + +import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; +import java.util.ArrayList; +import java.util.Map; +import java.util.concurrent.TimeUnit; + +import static java.util.Objects.requireNonNull; + +final class UnifiedGridFSHelper extends UnifiedHelper{ + private final Entities entities; + + UnifiedGridFSHelper(final Entities entities) { + this.entities = entities; + } + + public OperationResult executeFind(final BsonDocument operation) { + GridFSFindIterable iterable = createGridFSFindIterable(operation); + try { + ArrayList target = new ArrayList<>(); + iterable.into(target); + + if (target.isEmpty()) { + return OperationResult.NONE; + } + + throw new UnsupportedOperationException("expectResult is not implemented for Unified GridFS tests. " + + "Unexpected result: " + target); + } catch (Exception e) { + return OperationResult.of(e); + } + } + + public OperationResult executeRename(final BsonDocument operation) { + GridFSBucket bucket = getGridFsBucket(operation); + BsonDocument arguments = operation.getDocument("arguments"); + BsonValue id = arguments.get("id"); + String fileName = arguments.get("newFilename").asString().getValue(); + + requireNonNull(id); + requireNonNull(fileName); + + try { + bucket.rename(id, fileName); + return OperationResult.NONE; + } catch (Exception e) { + return OperationResult.of(e); + } + } + + OperationResult executeDelete(final BsonDocument operation) { + GridFSBucket bucket = getGridFsBucket(operation); + + BsonDocument arguments = operation.getDocument("arguments"); + BsonValue id = arguments.get("id"); + + if (arguments.size() > 1) { + throw new UnsupportedOperationException("Unexpected arguments " + arguments); + } + + requireNonNull(id); + + try { + bucket.delete(id); + return OperationResult.NONE; + } catch (Exception e) { + return OperationResult.of(e); + } + } + + public OperationResult executeDrop(final BsonDocument operation) { + GridFSBucket bucket = getGridFsBucket(operation); + BsonDocument arguments = operation.getDocument("arguments", new BsonDocument()); + if (arguments.size() > 0) { + throw new UnsupportedOperationException("Unexpected arguments " + operation.get("arguments")); + } + + try { + bucket.drop(); + return OperationResult.NONE; + } catch (Exception e) { + return OperationResult.of(e); + } + } + + public OperationResult executeDownload(final BsonDocument operation) { + GridFSBucket bucket = getGridFsBucket(operation); + + BsonDocument arguments = operation.getDocument("arguments"); + BsonValue id = arguments.get("id"); + + if (arguments.size() > 1) { + throw new UnsupportedOperationException("Unexpected arguments " + operation.get("arguments")); + } + + requireNonNull(id); + + try { + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + bucket.downloadToStream(id, baos); + return OperationResult.of(new BsonString(HexUtils.toHex(baos.toByteArray()))); + } catch (Exception e) { + return OperationResult.of(e); + } + } + + public OperationResult executeDownloadByName(final BsonDocument operation) { + GridFSBucket bucket = entities.getBucket(operation.getString("object").getValue()); + + BsonDocument arguments = operation.getDocument("arguments"); + String filename = arguments.getString("filename").getValue(); + requireNonNull(filename); + GridFSDownloadOptions options = getDownloadOptions(arguments); + + try { + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + bucket.downloadToStream(filename, baos, options); + return OperationResult.of(new BsonString(HexUtils.toHex(baos.toByteArray()))); + } catch (Exception e) { + return OperationResult.of(e); + } + } + + private GridFSDownloadOptions getDownloadOptions(final BsonDocument arguments) { + GridFSDownloadOptions options = new GridFSDownloadOptions(); + + for (Map.Entry cur : arguments.entrySet()) { + switch (cur.getKey()) { + case "filename": + break; + case "revision": + options.revision(cur.getValue().asNumber().intValue()); + break; + default: + throw new UnsupportedOperationException("Unsupported argument: " + cur.getKey()); + } + } + return options; + } + + public OperationResult executeUpload(final BsonDocument operation) { + GridFSBucket bucket = getGridFsBucket(operation); + + BsonDocument arguments = operation.getDocument("arguments"); + String filename = null; + byte[] bytes = null; + GridFSUploadOptions options = new GridFSUploadOptions(); + + for (Map.Entry cur : arguments.entrySet()) { + switch (cur.getKey()) { + case "filename": + filename = cur.getValue().asString().getValue(); + break; + case "source": + bytes = Hex.decode(cur.getValue().asDocument().getString("$$hexBytes").getValue()); + break; + case "chunkSizeBytes": + options.chunkSizeBytes(cur.getValue().asInt32().getValue()); + break; + case "disableMD5": + break; + case "metadata": + options.metadata(asDocument(cur.getValue().asDocument())); + break; + default: + throw new UnsupportedOperationException("Unsupported argument: " + cur.getKey()); + } + } + requireNonNull(filename); + requireNonNull(bytes); + + try { + ByteArrayInputStream bais = new ByteArrayInputStream(bytes); + BsonObjectId id = new BsonObjectId(bucket.uploadFromStream(filename, bais, options)); + + if (operation.containsKey("saveResultAsEntity")) { + entities.addResult(operation.getString("saveResultAsEntity").getValue(), id); + } + return OperationResult.of(id); + } catch (Exception e) { + return OperationResult.of(e); + } + } + + Document asDocument(final BsonDocument bsonDocument) { + return new DocumentCodec().decode(new BsonDocumentReader(bsonDocument), DecoderContext.builder().build()); + } + + private GridFSBucket getGridFsBucket(final BsonDocument operation) { + GridFSBucket bucket = entities.getBucket(operation.getString("object").getValue()); + Long timeoutMS = getAndRemoveTimeoutMS(operation.getDocument("arguments", new BsonDocument())); + if (timeoutMS != null) { + bucket = bucket.withTimeout(timeoutMS, TimeUnit.MILLISECONDS); + } + return bucket; + } + + private GridFSFindIterable createGridFSFindIterable(final BsonDocument operation) { + GridFSBucket bucket = getGridFsBucket(operation); + + BsonDocument arguments = operation.getDocument("arguments"); + BsonDocument filter = arguments.getDocument("filter"); + GridFSFindIterable iterable = bucket.find(filter); + for (Map.Entry cur : arguments.entrySet()) { + switch (cur.getKey()) { + case "session": + case "filter": + break; + case "sort": + iterable.sort(cur.getValue().asDocument()); + break; + case "batchSize": + iterable.batchSize(cur.getValue().asInt32().intValue()); + break; + case "maxTimeMS": + iterable.maxTime(cur.getValue().asInt32().longValue(), TimeUnit.MILLISECONDS); + break; + case "skip": + iterable.skip(cur.getValue().asInt32().intValue()); + break; + case "limit": + iterable.limit(cur.getValue().asInt32().intValue()); + break; + default: + throw new UnsupportedOperationException("Unsupported argument: " + cur.getKey()); + } + } + return iterable; + } +} diff --git a/driver-sync/src/test/functional/com/mongodb/client/unified/UnifiedGridFSTest.java b/driver-sync/src/test/functional/com/mongodb/client/unified/UnifiedGridFSTest.java new file mode 100644 index 00000000000..19ab63cec59 --- /dev/null +++ b/driver-sync/src/test/functional/com/mongodb/client/unified/UnifiedGridFSTest.java @@ -0,0 +1,27 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.unified; + +import org.junit.jupiter.params.provider.Arguments; + +import java.util.Collection; + +final class UnifiedGridFSTest extends UnifiedSyncTest { + private static Collection data() { + return getTestData("gridfs"); + } +} diff --git a/driver-sync/src/test/functional/com/mongodb/client/unified/UnifiedHelper.java b/driver-sync/src/test/functional/com/mongodb/client/unified/UnifiedHelper.java new file mode 100644 index 00000000000..027ccf92fb5 --- /dev/null +++ b/driver-sync/src/test/functional/com/mongodb/client/unified/UnifiedHelper.java @@ -0,0 +1,31 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.unified; + +import org.bson.BsonDocument; + +abstract class UnifiedHelper { + + static Long getAndRemoveTimeoutMS(final BsonDocument arguments) { + Long timeoutMS = null; + if (arguments.containsKey("timeoutMS")) { + timeoutMS = arguments.getNumber("timeoutMS").longValue(); + arguments.remove("timeoutMS"); + } + return timeoutMS; + } +} diff --git a/driver-sync/src/test/functional/com/mongodb/client/unified/UnifiedRetryableReadsTest.java b/driver-sync/src/test/functional/com/mongodb/client/unified/UnifiedRetryableReadsTest.java new file mode 100644 index 00000000000..7d8239707ad --- /dev/null +++ b/driver-sync/src/test/functional/com/mongodb/client/unified/UnifiedRetryableReadsTest.java @@ -0,0 +1,27 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.unified; + +import org.junit.jupiter.params.provider.Arguments; + +import java.util.Collection; + +public final class UnifiedRetryableReadsTest extends UnifiedSyncTest { + private static Collection data() { + return getTestData("retryable-reads"); + } +} diff --git a/driver-sync/src/test/functional/com/mongodb/client/unified/UnifiedRetryableWritesTest.java b/driver-sync/src/test/functional/com/mongodb/client/unified/UnifiedRetryableWritesTest.java new file mode 100644 index 00000000000..871af77b968 --- /dev/null +++ b/driver-sync/src/test/functional/com/mongodb/client/unified/UnifiedRetryableWritesTest.java @@ -0,0 +1,27 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.unified; + +import org.junit.jupiter.params.provider.Arguments; + +import java.util.Collection; + +public final class UnifiedRetryableWritesTest extends UnifiedSyncTest { + private static Collection data() { + return getTestData("retryable-writes"); + } +} diff --git a/driver-sync/src/test/functional/com/mongodb/client/unified/UnifiedServerDiscoveryAndMonitoringTest.java b/driver-sync/src/test/functional/com/mongodb/client/unified/UnifiedServerDiscoveryAndMonitoringTest.java new file mode 100644 index 00000000000..83a3e75d956 --- /dev/null +++ b/driver-sync/src/test/functional/com/mongodb/client/unified/UnifiedServerDiscoveryAndMonitoringTest.java @@ -0,0 +1,26 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.unified; + +import org.junit.jupiter.params.provider.Arguments; +import java.util.Collection; + +public final class UnifiedServerDiscoveryAndMonitoringTest extends UnifiedSyncTest { + private static Collection data() { + return getTestData("server-discovery-and-monitoring"); + } +} diff --git a/driver-sync/src/test/functional/com/mongodb/client/unified/UnifiedSyncTest.java b/driver-sync/src/test/functional/com/mongodb/client/unified/UnifiedSyncTest.java new file mode 100644 index 00000000000..9fc9ef5617f --- /dev/null +++ b/driver-sync/src/test/functional/com/mongodb/client/unified/UnifiedSyncTest.java @@ -0,0 +1,56 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.unified; + +import com.mongodb.ClientEncryptionSettings; +import com.mongodb.MongoClientSettings; +import com.mongodb.client.MongoClient; +import com.mongodb.client.MongoClients; +import com.mongodb.client.MongoDatabase; +import com.mongodb.client.gridfs.GridFSBucket; +import com.mongodb.client.gridfs.GridFSBuckets; +import com.mongodb.client.internal.ClientEncryptionImpl; +import com.mongodb.client.vault.ClientEncryption; +import com.mongodb.lang.NonNull; +import org.junit.jupiter.params.provider.Arguments; + +import java.util.Collection; + +public abstract class UnifiedSyncTest extends UnifiedTest { + protected UnifiedSyncTest() { + } + + @Override + protected MongoClient createMongoClient(final MongoClientSettings settings) { + return MongoClients.create(settings); + } + + @Override + protected GridFSBucket createGridFSBucket(final MongoDatabase database) { + return GridFSBuckets.create(database); + } + + @Override + protected ClientEncryption createClientEncryption(final MongoClient keyVaultClient, final ClientEncryptionSettings clientEncryptionSettings) { + return new ClientEncryptionImpl(keyVaultClient, clientEncryptionSettings); + } + + @NonNull + protected static Collection getTestData(final String directory) { + return getTestData(directory, false, Language.JAVA); + } +} diff --git a/driver-sync/src/test/functional/com/mongodb/client/unified/UnifiedTest.java b/driver-sync/src/test/functional/com/mongodb/client/unified/UnifiedTest.java new file mode 100644 index 00000000000..79b2a9c9da9 --- /dev/null +++ b/driver-sync/src/test/functional/com/mongodb/client/unified/UnifiedTest.java @@ -0,0 +1,1117 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.unified; + +import com.mongodb.ClientEncryptionSettings; +import com.mongodb.MongoClientSettings; +import com.mongodb.MongoNamespace; +import com.mongodb.ReadPreference; +import com.mongodb.UnixServerAddress; +import com.mongodb.WriteConcern; +import com.mongodb.client.ClientSession; +import com.mongodb.client.MongoClient; +import com.mongodb.client.MongoDatabase; +import com.mongodb.client.gridfs.GridFSBucket; +import com.mongodb.client.model.Filters; +import com.mongodb.client.test.CollectionHelper; +import com.mongodb.client.unified.UnifiedTestModifications.TestDef; +import com.mongodb.client.vault.ClientEncryption; +import com.mongodb.connection.ClusterDescription; +import com.mongodb.connection.ClusterType; +import com.mongodb.connection.ServerDescription; +import com.mongodb.event.CommandEvent; +import com.mongodb.event.CommandStartedEvent; +import com.mongodb.event.TestServerMonitorListener; +import com.mongodb.internal.connection.TestClusterListener; +import com.mongodb.internal.connection.TestCommandListener; +import com.mongodb.internal.connection.TestConnectionPoolListener; +import com.mongodb.internal.logging.LogMessage; +import com.mongodb.lang.NonNull; +import com.mongodb.lang.Nullable; +import com.mongodb.logging.TestLoggingInterceptor; +import com.mongodb.test.AfterBeforeParameterResolver; +import org.bson.BsonArray; +import org.bson.BsonBoolean; +import org.bson.BsonDocument; +import org.bson.BsonInt32; +import org.bson.BsonString; +import org.bson.BsonValue; +import org.bson.assertions.Assertions; +import org.bson.codecs.BsonDocumentCodec; +import org.bson.diagnostics.Logger; +import org.bson.diagnostics.Loggers; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.extension.ExtendWith; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.Arguments; +import org.junit.jupiter.params.provider.MethodSource; +import org.opentest4j.TestAbortedException; + +import java.text.MessageFormat; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Future; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; +import java.util.stream.Collectors; + +import static com.mongodb.ClusterFixture.getServerVersion; +import static com.mongodb.client.Fixture.getMongoClient; +import static com.mongodb.client.Fixture.getMongoClientSettings; +import static com.mongodb.client.test.CollectionHelper.getCurrentClusterTime; +import static com.mongodb.client.test.CollectionHelper.killAllSessions; +import static com.mongodb.client.unified.RunOnRequirementsMatcher.runOnRequirementsMet; +import static com.mongodb.client.unified.UnifiedTestModifications.Modifier; +import static com.mongodb.client.unified.UnifiedTestModifications.applyCustomizations; +import static com.mongodb.client.unified.UnifiedTestModifications.testDef; +import static java.lang.String.format; +import static java.util.Arrays.asList; +import static java.util.stream.Collectors.toList; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNotEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; +import static org.junit.jupiter.api.Assumptions.abort; +import static org.junit.jupiter.api.Assumptions.assumeFalse; +import static org.junit.jupiter.api.Assumptions.assumeTrue; +import static util.JsonPoweredTestHelper.getSpecTestDocuments; + +@ExtendWith(AfterBeforeParameterResolver.class) +public abstract class UnifiedTest { + private static final Logger LOGGER = Loggers.getLogger("UnifiedTest"); + + private static final Set PRESTART_POOL_ASYNC_WORK_MANAGER_FILE_DESCRIPTIONS = Collections.singleton( + "wait queue timeout errors include details about checked out connections"); + + private static final String MAX_SUPPORTED_SCHEMA_VERSION = "1.25"; + private static final List MAX_SUPPORTED_SCHEMA_VERSION_COMPONENTS = Arrays.stream(MAX_SUPPORTED_SCHEMA_VERSION.split("\\.")) + .map(Integer::parseInt) + .collect(Collectors.toList()); + + private static final String TOPOLOGY_CLOSED_EVENT = "topologyClosedEvent"; + private static final List TOPOLOGY_EVENT_NAMES = asList("topologyOpeningEvent", "topologyDescriptionChangedEvent", + TOPOLOGY_CLOSED_EVENT); + + public static final int RETRY_ATTEMPTS = 3; + public static final int FORCE_FLAKY_ATTEMPTS = 10; + private static final Set ATTEMPTED_TESTS_TO_HENCEFORTH_IGNORE = new HashSet<>(); + + @Nullable + private String fileDescription; + private String schemaVersion; + @Nullable + private BsonArray runOnRequirements; + private BsonArray entitiesArray; + private BsonArray initialData; + private BsonDocument definition; + private Entities entities; + private UnifiedCrudHelper crudHelper; + private UnifiedGridFSHelper gridFSHelper; + private UnifiedClientEncryptionHelper clientEncryptionHelper; + private List failPoints; + private UnifiedTestContext rootContext; + private boolean ignoreExtraEvents; + private BsonDocument startingClusterTime; + @Nullable + private TestDef testDef; + + private class UnifiedTestContext { + private final AssertionContext context = new AssertionContext(); + private final ValueMatcher valueMatcher = new ValueMatcher(entities, context); + private final ErrorMatcher errorMatcher = new ErrorMatcher(context, valueMatcher); + private final EventMatcher eventMatcher = new EventMatcher(valueMatcher, context); + private final LogMatcher logMatcher = new LogMatcher(valueMatcher, context); + + AssertionContext getAssertionContext() { + return context; + } + + ValueMatcher getValueMatcher() { + return valueMatcher; + } + + ErrorMatcher getErrorMatcher() { + return errorMatcher; + } + + EventMatcher getEventMatcher() { + return eventMatcher; + } + + LogMatcher getLogMatcher() { + return logMatcher; + } + } + + protected UnifiedTest() { + } + + public Entities getEntities() { + return entities; + } + + @NonNull + protected static Collection getTestData(final String directory, final boolean isReactive, final Language language) { + List data = new ArrayList<>(); + + for (BsonDocument fileDocument : getSpecTestDocuments(directory)) { + if (!fileDocument.containsKey("schemaVersion")) { + LOGGER.info("Not a unified test file: " + fileDocument.getString("fileName").getValue()); + continue; + } + String fileDescription = fileDocument.getString("description").getValue(); + for (BsonValue cur : fileDocument.getArray("tests")) { + final BsonDocument testDocument = cur.asDocument(); + String testDescription = testDocument.getString("description").getValue(); + TestDef testDef = testDef(directory, fileDescription, testDescription, isReactive, language); + applyCustomizations(testDef); + + boolean forceFlaky = testDef.wasAssignedModifier(Modifier.FORCE_FLAKY); + boolean retry = forceFlaky || testDef.wasAssignedModifier(Modifier.RETRY); + + int attempts; + if (retry) { + attempts = forceFlaky ? FORCE_FLAKY_ATTEMPTS : RETRY_ATTEMPTS; + } else { + attempts = 1; + } + + for (int attempt = 1; attempt <= attempts; attempt++) { + String testName = MessageFormat.format("{0}: {1}", fileDescription, testDescription); + data.add(Arguments.of( + testName, + fileDescription, + testDescription, + directory, + attempt, + attempts, + fileDocument.getString("schemaVersion").getValue(), + fileDocument.getArray("runOnRequirements", null), + fileDocument.getArray("createEntities", new BsonArray()), + fileDocument.getArray("initialData", new BsonArray()), + testDocument.clone())); + } + } + } + return data; + } + + protected BsonDocument getDefinition() { + return definition; + } + + protected abstract MongoClient createMongoClient(MongoClientSettings settings); + + protected abstract GridFSBucket createGridFSBucket(MongoDatabase database); + + protected abstract ClientEncryption createClientEncryption(MongoClient keyVaultClient, ClientEncryptionSettings clientEncryptionSettings); + + @BeforeEach + public void setUp( + final String testName, + @Nullable final String fileDescription, + @Nullable final String testDescription, + @Nullable final String directoryName, + final int attemptNumber, + final int totalAttempts, + final String schemaVersion, + @Nullable final BsonArray runOnRequirements, + final BsonArray entitiesArray, + final BsonArray initialData, + final BsonDocument definition) { + this.fileDescription = fileDescription; + this.schemaVersion = schemaVersion; + this.runOnRequirements = runOnRequirements; + this.entitiesArray = entitiesArray; + this.initialData = initialData; + this.definition = definition; + entities = new Entities(); + crudHelper = new UnifiedCrudHelper(entities, definition.getString("description").getValue()); + gridFSHelper = new UnifiedGridFSHelper(entities); + clientEncryptionHelper = new UnifiedClientEncryptionHelper(entities); + failPoints = new ArrayList<>(); + rootContext = new UnifiedTestContext(); + rootContext.getAssertionContext().push(ContextElement.ofTest(definition)); + ignoreExtraEvents = false; + if (directoryName != null && fileDescription != null && testDescription != null) { + testDef = testDef(directoryName, fileDescription, testDescription, isReactive(), getLanguage()); + applyCustomizations(testDef); + + boolean skip = testDef.wasAssignedModifier(Modifier.SKIP); + assumeFalse(skip, "Skipping test"); + } + skips(fileDescription, testDescription); + + assumeTrue(isSupportedSchemaVersion(schemaVersion), format("Unsupported schema version %s", schemaVersion)); + + if (runOnRequirements != null) { + assumeTrue(runOnRequirementsMet(runOnRequirements, getMongoClientSettings(), getServerVersion()), + "Run-on requirements not met"); + } + if (definition.containsKey("runOnRequirements")) { + assumeTrue(runOnRequirementsMet(definition.getArray("runOnRequirements", new BsonArray()), getMongoClientSettings(), + getServerVersion()), + "Run-on requirements not met"); + } + if (definition.containsKey("skipReason")) { + throw new TestAbortedException(definition.getString("skipReason").getValue()); + } + + killAllSessions(); + + startingClusterTime = addInitialDataAndGetClusterTime(); + + entities.init(entitiesArray, startingClusterTime, + fileDescription != null && PRESTART_POOL_ASYNC_WORK_MANAGER_FILE_DESCRIPTIONS.contains(fileDescription), + this::createMongoClient, + this::createGridFSBucket, + this::createClientEncryption); + if (testDef != null) { + postSetUp(testDef); + } + } + + protected void postSetUp(final TestDef def) { + } + + @AfterEach + public void cleanUp() { + for (FailPoint failPoint : failPoints) { + failPoint.disableFailPoint(); + } + entities.close(); + if (testDef != null) { + postCleanUp(testDef); + } + } + + protected void postCleanUp(final TestDef testDef) { + } + + /** + * This method is called once per + * {@link #setUp(String, String, String, String, int, int, String, org.bson.BsonArray, org.bson.BsonArray, org.bson.BsonArray, org.bson.BsonDocument)}, unless + * {@link #setUp(String, String, String, String, int, int, String, org.bson.BsonArray, org.bson.BsonArray, org.bson.BsonArray, org.bson.BsonDocument)} fails unexpectedly. + */ + protected void skips(final String fileDescription, final String testDescription) { + } + + protected boolean isReactive() { + return false; + } + + protected Language getLanguage() { + return Language.JAVA; + } + + @ParameterizedTest(name = "{0}") + @MethodSource("data") + public void shouldPassAllOutcomes( + final String testName, + @Nullable final String fileDescription, + @Nullable final String testDescription, + @Nullable final String directoryName, + final int attemptNumber, + final int totalAttempts, + final String schemaVersion, + @Nullable final BsonArray runOnRequirements, + final BsonArray entitiesArray, + final BsonArray initialData, + final BsonDocument definition) { + boolean forceFlaky = testDef.wasAssignedModifier(Modifier.FORCE_FLAKY); + if (!forceFlaky) { + boolean ignoreThisTest = ATTEMPTED_TESTS_TO_HENCEFORTH_IGNORE.contains(testName); + assumeFalse(ignoreThisTest, "Skipping a retryable test that already succeeded"); + // The attempt is what counts, since a test may fail with + // something like "ignored", and would not be retried. + // Only failures should trigger another attempt. + ATTEMPTED_TESTS_TO_HENCEFORTH_IGNORE.add(testName); + } + try { + BsonArray operations = definition.getArray("operations"); + for (int i = 0; i < operations.size(); i++) { + BsonValue cur = operations.get(i); + assertOperation(rootContext, cur.asDocument(), i); + } + + if (definition.containsKey("outcome")) { + assertOutcome(rootContext); + } + + if (definition.containsKey("expectEvents")) { + compareEvents(rootContext, definition); + } + + if (definition.containsKey("expectLogMessages")) { + ArrayList tweaks = new ArrayList<>(); + if (getMongoClientSettings().getClusterSettings() + .getHosts().stream().anyMatch(serverAddress -> serverAddress instanceof UnixServerAddress)) { + tweaks.add(LogMatcher.Tweak.skip(LogMessage.Entry.Name.SERVER_PORT)); + } + compareLogMessages(rootContext, definition, tweaks); + } + } catch (TestAbortedException e) { + // if a test is ignored, we do not retry + throw e; + } catch (Throwable e) { + if (forceFlaky) { + throw e; + } + if (testDef != null && !testDef.matchesThrowable(e)) { + // if the throwable is not matched, test definitions were not intended to apply; rethrow it + throw e; + } + boolean isLastAttempt = attemptNumber == totalAttempts; + if (isLastAttempt) { + throw e; + } + + ATTEMPTED_TESTS_TO_HENCEFORTH_IGNORE.remove(testName); + abort("Ignoring failure and retrying attempt " + attemptNumber); + } + } + + private void compareEvents(final UnifiedTestContext context, final BsonDocument definition) { + for (BsonValue cur : definition.getArray("expectEvents")) { + BsonDocument curClientEvents = cur.asDocument(); + String client = curClientEvents.getString("client").getValue(); + boolean ignoreExtraEvents = + curClientEvents.getBoolean("ignoreExtraEvents", BsonBoolean.valueOf(this.ignoreExtraEvents)).getValue(); + String eventType = curClientEvents.getString("eventType", new BsonString("command")).getValue(); + BsonArray expectedEvents = curClientEvents.getArray("events"); + if (eventType.equals("command")) { + TestCommandListener listener = entities.getClientCommandListener(client); + context.getEventMatcher().assertCommandEventsEquality(client, ignoreExtraEvents, expectedEvents, + listener.getEvents()); + } else if (eventType.equals("cmap")) { + TestConnectionPoolListener listener = entities.getConnectionPoolListener(client); + context.getEventMatcher().assertConnectionPoolEventsEquality(client, ignoreExtraEvents, expectedEvents, + listener.getEvents()); + } else if (eventType.equals("sdam")) { + + // SDAM tests also include topology events, so we need to separate them to be able to assert them separately. + // Partition the expected events into two lists with the key being if it's a topology based event or not. + Map> partitionedEventsMap = expectedEvents.stream() + .map(BsonValue::asDocument) + .collect(Collectors.partitioningBy(doc -> TOPOLOGY_EVENT_NAMES.stream().anyMatch(doc::containsKey))); + + BsonArray expectedTopologyEvents = new BsonArray(partitionedEventsMap.get(true)); + if (!expectedTopologyEvents.isEmpty()) { + TestClusterListener clusterListener = entities.getClusterListener(client); + // Unfortunately, some tests expect the cluster to be closed, but do not define it as a waitForEvent in the spec - + // causing a race condition in the test. + if (expectedTopologyEvents.stream().anyMatch(doc -> doc.asDocument().containsKey(TOPOLOGY_CLOSED_EVENT))) { + context.getEventMatcher().waitForClusterClosedEvent(client, clusterListener); + } + + List topologyEvents = new ArrayList<>(); + topologyEvents.add(clusterListener.getClusterOpeningEvent()); + topologyEvents.addAll(clusterListener.getClusterDescriptionChangedEvents()); + topologyEvents.add(clusterListener.getClusterClosingEvent()); + context.getEventMatcher().assertTopologyEventsEquality(client, ignoreExtraEvents, expectedTopologyEvents, topologyEvents); + } + + BsonArray expectedSdamEvents = new BsonArray(partitionedEventsMap.get(false)); + if (!expectedSdamEvents.isEmpty()) { + TestServerMonitorListener serverMonitorListener = entities.getServerMonitorListener(client); + context.getEventMatcher().assertServerMonitorEventsEquality(client, ignoreExtraEvents, expectedSdamEvents, serverMonitorListener.getEvents()); + } + } else { + throw new UnsupportedOperationException("Unexpected event type: " + eventType); + } + } + } + + private boolean isSupportedSchemaVersion(final String schemaVersion) { + List schemaVersionComponents = Arrays.stream(schemaVersion.split("\\.")) + .map(Integer::parseInt) + .collect(Collectors.toList()); + + if (schemaVersionComponents.size() != 2) { + Assertions.fail("Unsupported schema version: " + schemaVersion); + } else if (schemaVersionComponents.get(0) < 1) { + Assertions.fail("Unsupported schema version: " + schemaVersion); + } + + for (int i = 0; i < 2; i++){ + int schemaComponent = schemaVersionComponents.get(i); + int maxSupportedComponent = MAX_SUPPORTED_SCHEMA_VERSION_COMPONENTS.get(i); + if (schemaComponent > maxSupportedComponent) { + return false; + } + } + return true; + } + + private void compareLogMessages(final UnifiedTestContext rootContext, final BsonDocument definition, + final Iterable tweaks) { + for (BsonValue cur : definition.getArray("expectLogMessages")) { + BsonDocument curLogMessagesForClient = cur.asDocument(); + boolean ignoreExtraMessages = curLogMessagesForClient.getBoolean("ignoreExtraMessages", BsonBoolean.FALSE).getValue(); + BsonArray ignoreMessages = curLogMessagesForClient.getArray("ignoreMessages", new BsonArray()); + String clientId = curLogMessagesForClient.getString("client").getValue(); + TestLoggingInterceptor loggingInterceptor = + entities.getClientLoggingInterceptor(clientId); + rootContext.getLogMatcher().assertLogMessageEquality(clientId, ignoreMessages, ignoreExtraMessages, + curLogMessagesForClient.getArray("messages"), loggingInterceptor.getMessages(), tweaks); + } + } + + private void assertOutcome(final UnifiedTestContext context) { + for (BsonValue cur : definition.getArray("outcome")) { + BsonDocument curDocument = cur.asDocument(); + MongoNamespace namespace = new MongoNamespace(curDocument.getString("databaseName").getValue(), + curDocument.getString("collectionName").getValue()); + List expectedOutcome = curDocument.getArray("documents").stream().map(BsonValue::asDocument).collect(toList()); + List actualOutcome = new CollectionHelper<>(new BsonDocumentCodec(), namespace).find(); + context.getAssertionContext().push(ContextElement.ofOutcome(namespace, expectedOutcome, actualOutcome)); + assertEquals(expectedOutcome, actualOutcome, context.getAssertionContext().getMessage("Outcomes are not equal")); + context.getAssertionContext().pop(); + } + } + + private void assertOperationAndThrow(final UnifiedTestContext context, final BsonDocument operation, final int operationIndex) { + OperationResult result = executeOperation(context, operation, operationIndex); + assertOperationResult(context, operation, operationIndex, result); + + if (result.getException() != null) { + throw (RuntimeException) result.getException(); + } + } + + private void assertOperation(final UnifiedTestContext context, final BsonDocument operation, final int operationIndex) { + OperationResult result = executeOperation(context, operation, operationIndex); + assertOperationResult(context, operation, operationIndex, result); + } + + private static void assertOperationResult(final UnifiedTestContext context, final BsonDocument operation, final int operationIndex, + final OperationResult result) { + context.getAssertionContext().push(ContextElement.ofCompletedOperation(operation, result, operationIndex)); + + if (!operation.getBoolean("ignoreResultAndError", BsonBoolean.FALSE).getValue()) { + Exception operationException = result.getException(); + if (operation.containsKey("expectResult")) { + BsonValue expectedResult = operation.get("expectResult"); + if (expectedResult.isDocument() && expectedResult.asDocument().containsKey("isTimeoutError")) { + assertNotNull(operationException, + context.getAssertionContext().getMessage("The operation expects a timeout error but no timeout exception was" + + " thrown")); + context.getErrorMatcher().assertErrorsMatch(expectedResult.asDocument(), operationException); + } else { + assertNull(operationException, + context.getAssertionContext().getMessage("The operation expects a result but an exception occurred")); + context.getValueMatcher().assertValuesMatch(expectedResult, result.getResult()); + } + } else if (operation.containsKey("expectError")) { + assertNotNull(operationException, + context.getAssertionContext().getMessage("The operation expects an error but no exception was thrown")); + context.getErrorMatcher().assertErrorsMatch(operation.getDocument("expectError"), operationException); + } else { + assertNull(operationException, + context.getAssertionContext().getMessage("The operation expects no error but an exception occurred")); + } + } + context.getAssertionContext().pop(); + } + + private OperationResult executeOperation(final UnifiedTestContext context, final BsonDocument operation, final int operationNum) { + context.getAssertionContext().push(ContextElement.ofStartedOperation(operation, operationNum)); + String name = operation.getString("name").getValue(); + String object = operation.getString("object").getValue(); + try { + switch (name) { + case "createEntities": + return executeCreateEntities(operation); + case "wait": + return executeWait(operation); + case "waitForEvent": + return executeWaitForEvent(context, operation); + case "waitForPrimaryChange": + return executeWaitPrimaryChange(context, operation); + case "waitForThread": + return executeWaitForThread(context, operation); + case "recordTopologyDescription": + return executeRecordTopologyDescription(operation); + case "assertTopologyType": + return executeAssertTopologyType(context, operation); + case "runOnThread": + return executeRunOnThread(context, operation); + case "assertEventCount": + return executeAssertEventCount(context, operation); + case "failPoint": + return executeFailPoint(operation); + case "targetedFailPoint": + return executeTargetedFailPoint(operation); + case "endSession": + return executeEndSession(operation); + case "assertSessionDirty": + return executeAssertSessionDirty(operation); + case "assertSessionNotDirty": + return executeAssertSessionNotDirty(operation); + case "assertSessionPinned": + return executeAssertSessionPinned(operation); + case "assertSessionUnpinned": + return executeAssertSessionUnpinned(operation); + case "assertSameLsidOnLastTwoCommands": + return executeAssertSameLsidOnLastTwoCommands(operation); + case "assertDifferentLsidOnLastTwoCommands": + return executeAssertDifferentLsidOnLastTwoCommands(operation); + case "assertNumberConnectionsCheckedOut": + return executeAssertNumberConnectionsCheckedOut(context, operation); + case "assertSessionTransactionState": + return executeAssertSessionTransactionState(operation); + case "assertCollectionExists": + return executeAssertCollectionExists(operation); + case "assertCollectionNotExists": + return executeAssertCollectionNotExists(operation); + case "assertIndexExists": + return executeAssertIndexExists(operation); + case "assertIndexNotExists": + return executeAssertIndexNotExists(operation); + case "bulkWrite": + return crudHelper.executeBulkWrite(operation); + case "insertOne": + return crudHelper.executeInsertOne(operation); + case "insertMany": + return crudHelper.executeInsertMany(operation); + case "updateOne": + return crudHelper.executeUpdateOne(operation); + case "updateMany": + return crudHelper.executeUpdateMany(operation); + case "replaceOne": + return crudHelper.executeReplaceOne(operation); + case "deleteOne": + return crudHelper.executeDeleteOne(operation); + case "deleteMany": + return crudHelper.executeDeleteMany(operation); + case "aggregate": + return crudHelper.executeAggregate(operation); + case "find": + if ("bucket".equals(object)){ + return gridFSHelper.executeFind(operation); + } + return crudHelper.executeFind(operation); + case "findOne": + return crudHelper.executeFindOne(operation); + case "distinct": + return crudHelper.executeDistinct(operation); + case "mapReduce": + return crudHelper.executeMapReduce(operation); + case "countDocuments": + return crudHelper.executeCountDocuments(operation); + case "estimatedDocumentCount": + return crudHelper.executeEstimatedDocumentCount(operation); + case "findOneAndUpdate": + return crudHelper.executeFindOneAndUpdate(operation); + case "findOneAndReplace": + return crudHelper.executeFindOneAndReplace(operation); + case "findOneAndDelete": + return crudHelper.executeFindOneAndDelete(operation); + case "listDatabases": + return crudHelper.executeListDatabases(operation); + case "listDatabaseNames": + return crudHelper.executeListDatabaseNames(operation); + case "listCollections": + return crudHelper.executeListCollections(operation); + case "listCollectionNames": + return crudHelper.executeListCollectionNames(operation); + case "listIndexes": + return crudHelper.executeListIndexes(operation); + case "listIndexNames": + return crudHelper.executeListIndexNames(operation); + case "dropCollection": + return crudHelper.executeDropCollection(operation); + case "createCollection": + return crudHelper.executeCreateCollection(operation); + case "modifyCollection": + return crudHelper.executeModifyCollection(operation); + case "rename": + if ("bucket".equals(object)){ + return gridFSHelper.executeRename(operation); + } + return crudHelper.executeRenameCollection(operation); + case "createSearchIndex": + return crudHelper.executeCreateSearchIndex(operation); + case "createSearchIndexes": + return crudHelper.executeCreateSearchIndexes(operation); + case "updateSearchIndex": + return crudHelper.executeUpdateSearchIndex(operation); + case "dropSearchIndex": + return crudHelper.executeDropSearchIndex(operation); + case "listSearchIndexes": + return crudHelper.executeListSearchIndexes(operation); + case "createIndex": + return crudHelper.executeCreateIndex(operation); + case "dropIndex": + return crudHelper.executeDropIndex(operation); + case "dropIndexes": + return crudHelper.executeDropIndexes(operation); + case "startTransaction": + return crudHelper.executeStartTransaction(operation); + case "commitTransaction": + return crudHelper.executeCommitTransaction(operation); + case "abortTransaction": + return crudHelper.executeAbortTransaction(operation); + case "withTransaction": + return crudHelper.executeWithTransaction(operation, (op, idx) -> assertOperationAndThrow(context, op, idx)); + case "createFindCursor": + return crudHelper.createFindCursor(operation); + case "createChangeStream": + return crudHelper.createChangeStreamCursor(operation); + case "clientBulkWrite": + return crudHelper.clientBulkWrite(operation); + case "close": + return crudHelper.close(operation); + case "iterateUntilDocumentOrError": + return crudHelper.executeIterateUntilDocumentOrError(operation); + case "iterateOnce": + return crudHelper.executeIterateOnce(operation); + case "delete": + return gridFSHelper.executeDelete(operation); + case "drop": + return gridFSHelper.executeDrop(operation); + case "download": + return gridFSHelper.executeDownload(operation); + case "downloadByName": + return gridFSHelper.executeDownloadByName(operation); + case "upload": + return gridFSHelper.executeUpload(operation); + case "runCommand": + return crudHelper.executeRunCommand(operation); + case "createDataKey": + return clientEncryptionHelper.executeCreateDataKey(operation); + case "addKeyAltName": + return clientEncryptionHelper.executeAddKeyAltName(operation); + case "deleteKey": + return clientEncryptionHelper.executeDeleteKey(operation); + case "removeKeyAltName": + return clientEncryptionHelper.executeRemoveKeyAltName(operation); + case "getKey": + return clientEncryptionHelper.executeGetKey(operation); + case "getKeys": + return clientEncryptionHelper.executeGetKeys(operation); + case "getKeyByAltName": + return clientEncryptionHelper.executeGetKeyByAltName(operation); + case "rewrapManyDataKey": + return clientEncryptionHelper.executeRewrapManyDataKey(operation); + case "encrypt": + return clientEncryptionHelper.executeEncrypt(operation); + case "decrypt": + return clientEncryptionHelper.executeDecrypt(operation); + case "appendMetadata": + return crudHelper.executeUpdateClientMetadata(operation); + default: + throw new UnsupportedOperationException("Unsupported test operation: " + name); + } + } finally { + context.getAssertionContext().pop(); + } + } + + protected boolean terminateLoop() { + return true; + } + + private OperationResult executeCreateEntities(final BsonDocument operation) { + entities.init(operation.getDocument("arguments").getArray("entities"), + startingClusterTime, + false, + this::createMongoClient, + this::createGridFSBucket, + this::createClientEncryption); + return OperationResult.NONE; + } + + private OperationResult executeWait(final BsonDocument operation) { + try { + Thread.sleep(operation.getDocument("arguments").getNumber("ms").longValue()); + return OperationResult.NONE; + } catch (InterruptedException e) { + throw new RuntimeException(e); + } + } + + private OperationResult executeWaitForEvent(final UnifiedTestContext context, final BsonDocument operation) { + BsonDocument arguments = operation.getDocument("arguments"); + String clientId = arguments.getString("client").getValue(); + BsonDocument event = arguments.getDocument("event"); + String eventName = event.getFirstKey(); + int count = arguments.getNumber("count").intValue(); + + switch (eventName) { + case "serverDescriptionChangedEvent": + context.getEventMatcher().waitForServerDescriptionChangedEvents(clientId, event, count, + entities.getServerListener(clientId)); + break; + case "topologyDescriptionChangedEvent": + context.getEventMatcher().waitForClusterDescriptionChangedEvents(clientId, event, count, + entities.getClusterListener(clientId)); + break; + case "poolClearedEvent": + case "poolReadyEvent": + case "connectionCreatedEvent": + case "connectionReadyEvent": + context.getEventMatcher().waitForConnectionPoolEvents(clientId, event, count, entities.getConnectionPoolListener(clientId)); + break; + case "serverHeartbeatStartedEvent": + case "serverHeartbeatSucceededEvent": + case "serverHeartbeatFailedEvent": + context.getEventMatcher().waitForServerMonitorEvents(clientId, TestServerMonitorListener.eventType(eventName), event, count, + entities.getServerMonitorListener(clientId)); + break; + case "commandStartedEvent": + context.getEventMatcher().waitForCommandEvents(clientId, event, count, entities.getClientCommandListener(clientId)); + break; + default: + throw new UnsupportedOperationException("Unsupported event: " + eventName); + } + + return OperationResult.NONE; + } + + private OperationResult executeAssertEventCount(final UnifiedTestContext context, final BsonDocument operation) { + BsonDocument arguments = operation.getDocument("arguments"); + String clientId = arguments.getString("client").getValue(); + BsonDocument event = arguments.getDocument("event"); + String eventName = event.getFirstKey(); + int count = arguments.getNumber("count").intValue(); + + switch (eventName) { + case "serverDescriptionChangedEvent": + context.getEventMatcher().assertServerDescriptionChangeEventCount(clientId, event, count, + entities.getServerListener(clientId).getServerDescriptionChangedEvents()); + break; + case "topologyDescriptionChangedEvent": + context.getEventMatcher().assertClusterDescriptionChangeEventCount(clientId, event, count, + entities.getClusterListener(clientId).getClusterDescriptionChangedEvents()); + break; + case "poolClearedEvent": + case "poolReadyEvent": + context.getEventMatcher().assertConnectionPoolEventCount(clientId, event, count, + entities.getConnectionPoolListener(clientId).getEvents()); + break; + case "serverHeartbeatStartedEvent": + case "serverHeartbeatSucceededEvent": + case "serverHeartbeatFailedEvent": + context.getEventMatcher().assertServerMonitorEventCount(clientId, TestServerMonitorListener.eventType(eventName), event, count, + entities.getServerMonitorListener(clientId)); + break; + default: + throw new UnsupportedOperationException("Unsupported event: " + eventName); + } + + return OperationResult.NONE; + } + + private OperationResult executeWaitPrimaryChange(final UnifiedTestContext context, final BsonDocument operation) { + context.getAssertionContext().push(ContextElement.ofWaitForPrimaryChange()); + BsonDocument arguments = operation.getDocument("arguments"); + MongoClient client = entities.getClient(arguments.getString("client").getValue()); + ClusterDescription priorClusterDescription = + entities.getTopologyDescription(arguments.getString("priorTopologyDescription").getValue()); + ClusterDescription currentClusterDescription = client.getClusterDescription(); + long timeoutNanos = + TimeUnit.NANOSECONDS.convert(arguments.getNumber("timeoutMS", new BsonInt32(10000)).longValue(), TimeUnit.MILLISECONDS); + long startTime = System.nanoTime(); + while (primaryIsSame(priorClusterDescription, currentClusterDescription) || noPrimary(currentClusterDescription)) { + if (System.nanoTime() - startTime > timeoutNanos) { + fail(context.getAssertionContext().getMessage("Timed out waiting for primary change")); + } + try { + //noinspection BusyWait + Thread.sleep(10); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } + currentClusterDescription = client.getClusterDescription(); + } + context.getAssertionContext().pop(); + return OperationResult.NONE; + } + + private boolean noPrimary(final ClusterDescription clusterDescription) { + return ReadPreference.primary().choose(clusterDescription).isEmpty(); + } + + private boolean primaryIsSame(final ClusterDescription priorClusterDescription, final ClusterDescription currentClusterDescription) { + List priorPrimaries = ReadPreference.primary().choose(priorClusterDescription); + List currentPrimaries = ReadPreference.primary().choose(currentClusterDescription); + if (priorPrimaries.isEmpty() && currentPrimaries.isEmpty()) { + return true; + } + if (priorPrimaries.size() == 1 && currentPrimaries.size() == 1) { + return priorPrimaries.get(0).getAddress().equals(currentPrimaries.get(0).getAddress()); + } else { + return false; + } + } + + private OperationResult executeWaitForThread(final UnifiedTestContext context, final BsonDocument operation) { + BsonDocument arguments = operation.getDocument("arguments"); + String threadId = arguments.getString("thread").getValue(); + context.getAssertionContext().push(ContextElement.ofWaitForThread(threadId)); + List> tasks = entities.getThreadTasks(threadId); + for (Future task : tasks) { + try { + task.get(10, TimeUnit.SECONDS); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } catch (ExecutionException executionException) { + try { + throw executionException.getCause(); + } catch (Throwable e) { + fail(context.getAssertionContext().getMessage(e.getMessage())); + } + } catch (TimeoutException e) { + fail(context.getAssertionContext().getMessage(e.getMessage())); + } + } + entities.clearThreadTasks(threadId); + context.getAssertionContext().pop(); + return OperationResult.NONE; + } + + private OperationResult executeRecordTopologyDescription(final BsonDocument operation) { + BsonDocument arguments = operation.getDocument("arguments"); + ClusterDescription clusterDescription = entities.getClient(arguments.getString("client").getValue()).getClusterDescription(); + String topologyDescriptionId = arguments.getString("id").getValue(); + entities.addTopologyDescription(topologyDescriptionId, clusterDescription); + return OperationResult.NONE; + } + + private OperationResult executeAssertTopologyType(final UnifiedTestContext context, final BsonDocument operation) { + BsonDocument arguments = operation.getDocument("arguments"); + ClusterDescription clusterDescription = entities.getTopologyDescription(arguments.getString("topologyDescription").getValue()); + String expectedTopologyType = arguments.getString("topologyType").getValue(); + + context.getAssertionContext().push(ContextElement.ofTopologyType(expectedTopologyType)); + + assertEquals(getClusterType(expectedTopologyType), clusterDescription.getType(), + context.getAssertionContext().getMessage("Unexpected topology type")); + + context.getAssertionContext().pop(); + return OperationResult.NONE; + } + + private ClusterType getClusterType(final String topologyType) { + if (topologyType.equalsIgnoreCase("Sharded")) { + return ClusterType.SHARDED; + } else if (topologyType.equalsIgnoreCase("LoadBalanced")) { + return ClusterType.LOAD_BALANCED; + } else if (topologyType.startsWith("ReplicaSet")) { + return ClusterType.REPLICA_SET; + } else if (topologyType.equalsIgnoreCase("Unknown")) { + return ClusterType.UNKNOWN; + } else { + throw new IllegalArgumentException("Unsupported topology type: " + topologyType); + } + } + + private OperationResult executeRunOnThread(final UnifiedTestContext context, final BsonDocument operation) { + UnifiedTestContext newContext = new UnifiedTestContext(); + BsonDocument arguments = operation.getDocument("arguments"); + String threadId = arguments.getString("thread").getValue(); + ExecutorService thread = entities.getThread(threadId); + Future future = thread.submit(() -> assertOperation(newContext, arguments.getDocument("operation"), 0)); + entities.addThreadTask(threadId, future); + return OperationResult.NONE; + } + + private OperationResult executeFailPoint(final BsonDocument operation) { + FailPoint failPoint = FailPoint.untargeted(operation, entities); + failPoint.executeFailPoint(); + failPoints.add(failPoint); + return OperationResult.NONE; + } + + private OperationResult executeTargetedFailPoint(final BsonDocument operation) { + FailPoint failPoint = FailPoint.targeted(operation, entities); + failPoint.executeFailPoint(); + failPoints.add(failPoint); + return OperationResult.NONE; + } + + private OperationResult executeEndSession(final BsonDocument operation) { + ClientSession session = entities.getSession(operation.getString("object").getValue()); + session.close(); + return OperationResult.NONE; + } + + private OperationResult executeAssertSessionDirty(final BsonDocument operation) { + return executeAssertSessionDirtiness(operation, true); + } + + private OperationResult executeAssertSessionNotDirty(final BsonDocument operation) { + return executeAssertSessionDirtiness(operation, false); + } + + private OperationResult executeAssertSessionDirtiness(final BsonDocument operation, final boolean expected) { + ClientSession session = entities.getSession(operation.getDocument("arguments").getString("session").getValue()); + assertNotNull(session.getServerSession()); + assertEquals(expected, session.getServerSession().isMarkedDirty()); + return OperationResult.NONE; + } + + private OperationResult executeAssertSessionPinned(final BsonDocument operation) { + return executeAssertSessionPinniness(operation, true); + } + + private OperationResult executeAssertSessionUnpinned(final BsonDocument operation) { + return executeAssertSessionPinniness(operation, false); + } + + private OperationResult executeAssertSessionPinniness(final BsonDocument operation, final boolean expected) { + ClientSession session = entities.getSession(operation.getDocument("arguments").getString("session").getValue()); + assertNotNull(session.getServerSession()); + assertEquals(expected, session.getPinnedServerAddress() != null); + return OperationResult.NONE; + } + + private OperationResult executeAssertNumberConnectionsCheckedOut(final UnifiedTestContext context, final BsonDocument operation) { + TestConnectionPoolListener listener = entities.getConnectionPoolListener( + operation.getDocument("arguments").getString("client").getValue()); + assertEquals(operation.getDocument("arguments").getNumber("connections").intValue(), listener.getNumConnectionsCheckedOut(), + context.getAssertionContext().getMessage("Number of checked out connections must match expected")); + return OperationResult.NONE; + } + + private OperationResult executeAssertSameLsidOnLastTwoCommands(final BsonDocument operation) { + return executeAssertLsidOnLastTwoCommands(operation, true); + } + + private OperationResult executeAssertDifferentLsidOnLastTwoCommands(final BsonDocument operation) { + return executeAssertLsidOnLastTwoCommands(operation, false); + } + + private OperationResult executeAssertLsidOnLastTwoCommands(final BsonDocument operation, final boolean same) { + TestCommandListener listener = entities.getClientCommandListener( + operation.getDocument("arguments").getString("client").getValue()); + List events = lastTwoCommandEvents(listener); + String eventsJson = listener.getCommandStartedEvents().stream() + .map(e -> e.getCommand().toJson()) + .collect(Collectors.joining(", ")); + BsonDocument expected = ((CommandStartedEvent) events.get(0)).getCommand().getDocument("lsid"); + BsonDocument actual = ((CommandStartedEvent) events.get(1)).getCommand().getDocument("lsid"); + if (same) { + assertEquals(expected, actual, eventsJson); + } else { + assertNotEquals(expected, actual, eventsJson); + } + return OperationResult.NONE; + } + + private OperationResult executeAssertSessionTransactionState(final BsonDocument operation) { + BsonDocument arguments = operation.getDocument("arguments"); + ClientSession session = entities.getSession(arguments.getString("session").getValue()); + String state = arguments.getString("state").getValue(); + switch (state) { + case "starting": + case "in_progress": + assertTrue(session.hasActiveTransaction()); + break; + default: + throw new UnsupportedOperationException("Unsupported transaction state: " + state); + } + return OperationResult.NONE; + } + + private OperationResult executeAssertCollectionExists(final BsonDocument operation) { + assertTrue(collectionExists(operation)); + return OperationResult.NONE; + } + + private OperationResult executeAssertCollectionNotExists(final BsonDocument operation) { + assertFalse(collectionExists(operation)); + return OperationResult.NONE; + } + + private boolean collectionExists(final BsonDocument operation) { + BsonDocument arguments = operation.getDocument("arguments"); + String databaseName = arguments.getString("databaseName").getValue(); + String collectionName = arguments.getString("collectionName").getValue(); + return getMongoClient().getDatabase(databaseName) + .listCollections().filter(Filters.eq("name", collectionName)).first() != null; + } + + private OperationResult executeAssertIndexExists(final BsonDocument operation) { + assertTrue(indexExists(operation)); + return OperationResult.NONE; + } + + private OperationResult executeAssertIndexNotExists(final BsonDocument operation) { + assertFalse(indexExists(operation)); + return OperationResult.NONE; + } + + private boolean indexExists(final BsonDocument operation) { + BsonDocument arguments = operation.getDocument("arguments"); + String databaseName = arguments.getString("databaseName").getValue(); + String collectionName = arguments.getString("collectionName").getValue(); + String indexName = arguments.getString("indexName").getValue(); + return getMongoClient().getDatabase(databaseName).getCollection(collectionName) + .listIndexes(BsonDocument.class).into(new ArrayList<>()).stream() + .anyMatch(document -> document.getString("name").getValue().equals(indexName)); + } + + private List lastTwoCommandEvents(final TestCommandListener listener) { + List events = listener.getCommandStartedEvents(); + assertTrue(events.size() >= 2); + return new ArrayList<>(events.subList(events.size() - 2, events.size())); + } + + private BsonDocument addInitialDataAndGetClusterTime() { + for (BsonValue cur : initialData.getValues()) { + BsonDocument curDataSet = cur.asDocument(); + CollectionHelper helper = new CollectionHelper<>(new BsonDocumentCodec(), + new MongoNamespace(curDataSet.getString("databaseName").getValue(), + curDataSet.getString("collectionName").getValue())); + + helper.dropAndCreate(curDataSet.getDocument("createOptions", new BsonDocument())); + + BsonArray documentsArray = curDataSet.getArray("documents", new BsonArray()); + if (!documentsArray.isEmpty()) { + helper.insertDocuments(documentsArray.stream().map(BsonValue::asDocument).collect(toList()), + WriteConcern.MAJORITY); + } + } + return getCurrentClusterTime(); + } + + protected void ignoreExtraCommandEvents(final boolean ignoreExtraEvents) { + this.ignoreExtraEvents = ignoreExtraEvents; + } + + protected void ignoreExtraEvents() { + this.ignoreExtraEvents = true; + } + + public enum Language { + JAVA, KOTLIN, SCALA + } +} diff --git a/driver-sync/src/test/functional/com/mongodb/client/unified/UnifiedTestFailureValidator.java b/driver-sync/src/test/functional/com/mongodb/client/unified/UnifiedTestFailureValidator.java new file mode 100644 index 00000000000..2b83bba8d5b --- /dev/null +++ b/driver-sync/src/test/functional/com/mongodb/client/unified/UnifiedTestFailureValidator.java @@ -0,0 +1,105 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.unified; + +import com.mongodb.lang.Nullable; +import org.bson.BsonArray; +import org.bson.BsonDocument; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.Arguments; +import org.junit.jupiter.params.provider.MethodSource; + +import java.util.Collection; + +import static org.junit.jupiter.api.Assertions.assertNotNull; + +final class UnifiedTestFailureValidator extends UnifiedSyncTest { + private Throwable exception; + + @Override + @BeforeEach + public void setUp( + final String testName, + @Nullable final String fileDescription, + @Nullable final String testDescription, + final String directoryName, + final int attemptNumber, + final int totalAttempts, + final String schemaVersion, + @Nullable final BsonArray runOnRequirements, + final BsonArray entitiesArray, + final BsonArray initialData, + final BsonDocument definition) { + try { + super.setUp( + testName, + fileDescription, + testDescription, + directoryName, + attemptNumber, + totalAttempts, + schemaVersion, + runOnRequirements, + entitiesArray, + initialData, + definition); + } catch (AssertionError | Exception e) { + exception = e; + } + } + + @Override + @ParameterizedTest + @MethodSource("data") + public void shouldPassAllOutcomes( + final String testName, + @Nullable final String fileDescription, + @Nullable final String testDescription, + @Nullable final String directoryName, + final int attemptNumber, + final int totalAttempts, + final String schemaVersion, + @Nullable final BsonArray runOnRequirements, + final BsonArray entitiesArray, + final BsonArray initialData, + final BsonDocument definition) { + if (exception == null) { + try { + super.shouldPassAllOutcomes( + testName, + fileDescription, + testDescription, + directoryName, + attemptNumber, + totalAttempts, + schemaVersion, + runOnRequirements, + entitiesArray, + initialData, + definition); + } catch (AssertionError | Exception e) { + exception = e; + } + } + assertNotNull(exception, "Expected exception but not was thrown"); + } + + private static Collection data() { + return getTestData("unified-test-format/tests/valid-fail"); + } +} diff --git a/driver-sync/src/test/functional/com/mongodb/client/unified/UnifiedTestModifications.java b/driver-sync/src/test/functional/com/mongodb/client/unified/UnifiedTestModifications.java new file mode 100644 index 00000000000..327cc3f3da8 --- /dev/null +++ b/driver-sync/src/test/functional/com/mongodb/client/unified/UnifiedTestModifications.java @@ -0,0 +1,716 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.unified; + +import org.opentest4j.AssertionFailedError; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.function.Function; +import java.util.function.Supplier; + +import static com.mongodb.ClusterFixture.isDiscoverableReplicaSet; +import static com.mongodb.ClusterFixture.isSharded; +import static com.mongodb.ClusterFixture.serverVersionLessThan; +import static com.mongodb.assertions.Assertions.assertNotNull; +import static com.mongodb.assertions.Assertions.assertTrue; +import static com.mongodb.client.unified.UnifiedTestModifications.Modifier.IGNORE_EXTRA_EVENTS; +import static com.mongodb.client.unified.UnifiedTestModifications.Modifier.RETRY; +import static com.mongodb.client.unified.UnifiedTestModifications.Modifier.SKIP; +import static com.mongodb.client.unified.UnifiedTestModifications.Modifier.SLEEP_AFTER_CURSOR_CLOSE; +import static com.mongodb.client.unified.UnifiedTestModifications.Modifier.SLEEP_AFTER_CURSOR_OPEN; +import static com.mongodb.client.unified.UnifiedTestModifications.Modifier.WAIT_FOR_BATCH_CURSOR_CREATION; +import static java.lang.String.format; + +public final class UnifiedTestModifications { + public static void applyCustomizations(final TestDef def) { + + // change-streams + def.skipNoncompliantReactive("error required from change stream initialization") // TODO-JAVA-5711 reason? + .test("change-streams", "change-streams", "Test with document comment - pre 4.4"); + def.skipNoncompliantReactive("event sensitive tests. We can't guarantee the amount of GetMore commands sent in the reactive driver") + .test("change-streams", "change-streams", "Test that comment is set on getMore") + .test("change-streams", "change-streams", "Test that comment is not set on getMore - pre 4.4"); + def.modify(IGNORE_EXTRA_EVENTS) + .test("change-streams", "change-streams", "Test with document comment") + .test("change-streams", "change-streams", "Test with string comment"); + def.modify(SLEEP_AFTER_CURSOR_OPEN) + .directory("change-streams"); + def.modify(WAIT_FOR_BATCH_CURSOR_CREATION) + .test("change-streams", "change-streams-errors", "Change Stream should error when an invalid aggregation stage is passed in") + .test("change-streams", "change-streams-errors", "The watch helper must not throw a custom exception when executed against a single server topology, but instead depend on a server error"); + + // Client side encryption (QE) + def.skipJira("https://jira.mongodb.org/browse/JAVA-5675 Support QE with Client.bulkWrite") + .file("client-side-encryption/tests/unified", "client bulkWrite with queryable encryption"); + + // client-side-operation-timeout (CSOT) + + def.skipNoncompliantReactive("No good way to fulfill tryNext() requirement with a Publisher") + .test("client-side-operations-timeout", "timeoutMS behaves correctly for tailable awaitData cursors", + "apply remaining timeoutMS if less than maxAwaitTimeMS"); + + def.skipNoncompliantReactive("No good way to fulfill tryNext() requirement with a Publisher") + .test("client-side-operations-timeout", "timeoutMS behaves correctly for tailable awaitData cursors", + "apply maxAwaitTimeMS if less than remaining timeout"); + + def.skipJira("https://jira.mongodb.org/browse/JAVA-5839") + .test("client-side-operations-timeout", "timeoutMS behaves correctly for GridFS download operations", + "timeoutMS applied to entire download, not individual parts"); + + def.skipJira("https://jira.mongodb.org/browse/JAVA-5491") + .when(() -> !serverVersionLessThan(8, 3)) + .test("client-side-operations-timeout", "operations ignore deprecated timeout options if timeoutMS is set", + "socketTimeoutMS is ignored if timeoutMS is set - dropIndex on collection") + .test("client-side-operations-timeout", "operations ignore deprecated timeout options if timeoutMS is set", + "wTimeoutMS is ignored if timeoutMS is set - dropIndex on collection") + .test("client-side-operations-timeout", "operations ignore deprecated timeout options if timeoutMS is set", + "maxTimeMS is ignored if timeoutMS is set - dropIndex on collection") + .test("client-side-operations-timeout", "operations ignore deprecated timeout options if timeoutMS is set", + "socketTimeoutMS is ignored if timeoutMS is set - dropIndexes on collection") + .test("client-side-operations-timeout", "operations ignore deprecated timeout options if timeoutMS is set", + "wTimeoutMS is ignored if timeoutMS is set - dropIndexes on collection") + .test("client-side-operations-timeout", "operations ignore deprecated timeout options if timeoutMS is set", + "maxTimeMS is ignored if timeoutMS is set - dropIndexes on collection") + .test("client-side-operations-timeout", "timeoutMS can be overridden for a MongoDatabase", + "timeoutMS can be configured on a MongoDatabase - dropIndex on collection") + .test("client-side-operations-timeout", "timeoutMS can be overridden for a MongoDatabase", + "timeoutMS can be set to 0 on a MongoDatabase - dropIndex on collection") + .test("client-side-operations-timeout", "timeoutMS can be overridden for a MongoDatabase", + "timeoutMS can be configured on a MongoDatabase - dropIndexes on collection") + .test("client-side-operations-timeout", "timeoutMS can be overridden for a MongoDatabase", + "timeoutMS can be set to 0 on a MongoDatabase - dropIndexes on collection") + .test("client-side-operations-timeout", "timeoutMS can be overridden for a MongoCollection", + "timeoutMS can be configured on a MongoCollection - dropIndex on collection") + .test("client-side-operations-timeout", "timeoutMS can be overridden for a MongoCollection", + "timeoutMS can be set to 0 on a MongoCollection - dropIndex on collection") + .test("client-side-operations-timeout", "timeoutMS can be overridden for a MongoCollection", + "timeoutMS can be configured on a MongoCollection - dropIndexes on collection") + .test("client-side-operations-timeout", "timeoutMS can be overridden for a MongoCollection", + "timeoutMS can be set to 0 on a MongoCollection - dropIndexes on collection") + .test("client-side-operations-timeout", "timeoutMS can be overridden for an operation", + "timeoutMS can be configured for an operation - dropIndex on collection") + .test("client-side-operations-timeout", "timeoutMS can be overridden for an operation", + "timeoutMS can be set to 0 for an operation - dropIndex on collection") + .test("client-side-operations-timeout", "timeoutMS can be overridden for an operation", + "timeoutMS can be configured for an operation - dropIndexes on collection") + .test("client-side-operations-timeout", "timeoutMS can be overridden for an operation", + "timeoutMS can be set to 0 for an operation - dropIndexes on collection") + .test("client-side-operations-timeout", "timeoutMS can be configured on a MongoClient", + "timeoutMS can be configured on a MongoClient - dropIndex on collection") + .test("client-side-operations-timeout", "timeoutMS can be configured on a MongoClient", + "timeoutMS can be set to 0 on a MongoClient - dropIndex on collection") + .test("client-side-operations-timeout", "timeoutMS can be configured on a MongoClient", + "timeoutMS can be configured on a MongoClient - dropIndexes on collection") + .test("client-side-operations-timeout", "timeoutMS can be configured on a MongoClient", + "timeoutMS can be set to 0 on a MongoClient - dropIndexes on collection") + .test("client-side-operations-timeout", "operations ignore deprecated timeout options if timeoutMS is set", + "socketTimeoutMS is ignored if timeoutMS is set - dropIndex on collection") + .test("client-side-operations-timeout", "operations ignore deprecated timeout options if timeoutMS is set", + "wTimeoutMS is ignored if timeoutMS is set - dropIndex on collection") + .test("client-side-operations-timeout", "operations ignore deprecated timeout options if timeoutMS is set", + "maxTimeMS is ignored if timeoutMS is set - dropIndex on collection") + .test("client-side-operations-timeout", "operations ignore deprecated timeout options if timeoutMS is set", + "socketTimeoutMS is ignored if timeoutMS is set - dropIndexes on collection") + .test("client-side-operations-timeout", "operations ignore deprecated timeout options if timeoutMS is set", + "wTimeoutMS is ignored if timeoutMS is set - dropIndexes on collection") + .test("client-side-operations-timeout", "operations ignore deprecated timeout options if timeoutMS is set", + "maxTimeMS is ignored if timeoutMS is set - dropIndexes on collection") + .test("client-side-operations-timeout", "timeoutMS can be overridden for a MongoDatabase", + "timeoutMS can be configured on a MongoDatabase - dropIndex on collection") + .test("client-side-operations-timeout", "timeoutMS can be overridden for a MongoDatabase", + "timeoutMS can be set to 0 on a MongoDatabase - dropIndex on collection") + .test("client-side-operations-timeout", "timeoutMS can be overridden for a MongoDatabase", + "timeoutMS can be configured on a MongoDatabase - dropIndexes on collection") + .test("client-side-operations-timeout", "timeoutMS can be overridden for a MongoDatabase", + "timeoutMS can be set to 0 on a MongoDatabase - dropIndexes on collection") + .test("client-side-operations-timeout", "timeoutMS can be overridden for a MongoCollection", + "timeoutMS can be configured on a MongoCollection - dropIndex on collection") + .test("client-side-operations-timeout", "timeoutMS can be overridden for a MongoCollection", + "timeoutMS can be set to 0 on a MongoCollection - dropIndex on collection") + .test("client-side-operations-timeout", "timeoutMS can be overridden for a MongoCollection", + "timeoutMS can be configured on a MongoCollection - dropIndexes on collection") + .test("client-side-operations-timeout", "timeoutMS can be overridden for a MongoCollection", + "timeoutMS can be set to 0 on a MongoCollection - dropIndexes on collection") + .test("client-side-operations-timeout", "timeoutMS can be overridden for an operation", + "timeoutMS can be configured for an operation - dropIndex on collection") + .test("client-side-operations-timeout", "timeoutMS can be overridden for an operation", + "timeoutMS can be set to 0 for an operation - dropIndex on collection") + .test("client-side-operations-timeout", "timeoutMS can be overridden for an operation", + "timeoutMS can be configured for an operation - dropIndexes on collection") + .test("client-side-operations-timeout", "timeoutMS can be overridden for an operation", + "timeoutMS can be set to 0 for an operation - dropIndexes on collection") + .test("client-side-operations-timeout", "timeoutMS can be configured on a MongoClient", + "timeoutMS can be configured on a MongoClient - dropIndex on collection") + .test("client-side-operations-timeout", "timeoutMS can be configured on a MongoClient", + "timeoutMS can be set to 0 on a MongoClient - dropIndex on collection") + .test("client-side-operations-timeout", "timeoutMS can be configured on a MongoClient", + "timeoutMS can be configured on a MongoClient - dropIndexes on collection") + .test("client-side-operations-timeout", "timeoutMS can be configured on a MongoClient", + "timeoutMS can be set to 0 on a MongoClient - dropIndexes on collection"); + + // TODO-JAVA-5712 + + // collection-management + + def.skipNoncompliant("") // TODO-JAVA-5711 reason? + .test("collection-management", "modifyCollection-pre_and_post_images", "modifyCollection to changeStreamPreAndPostImages enabled"); + + // command-logging-and-monitoring + + def.skipNoncompliant("The driver has a hack where getLastError command " + + "is executed as part of the handshake in order to " + + "get a connectionId even when the hello command " + + "response doesn't contain it.") + .file("command-logging-and-monitoring/tests/logging", "pre-42-server-connection-id") + .file("command-logging-and-monitoring/tests/monitoring", "pre-42-server-connection-id"); + + def.skipNoncompliant("The driver doesn't reduce the batchSize for the getMore") + .test("command-logging-and-monitoring/tests/monitoring", "find", + "A successful find event with a getmore and the server kills the cursor (<= 4.4)"); + + // connection-monitoring-and-pooling + def.skipNoncompliant("According to the test, we should clear the pool then close the connection. Our implementation" + + "immediately closes the failed connection, then clears the pool.") + .test("connection-monitoring-and-pooling/tests/logging", "connection-logging", "Connection checkout fails due to error establishing connection"); + + + def.skipNoncompliant("Driver does not support waitQueueSize or waitQueueMultiple options") + .test("connection-monitoring-and-pooling/tests/logging", "connection-pool-options", "waitQueueSize should be included in connection pool created message when specified") + .test("connection-monitoring-and-pooling/tests/logging", "connection-pool-options", "waitQueueMultiple should be included in connection pool created message when specified"); + + // load-balancers + + def.modify(SLEEP_AFTER_CURSOR_OPEN) + .test("load-balancers", "state change errors are correctly handled", "only connections for a specific serviceId are closed when pools are cleared") + .test("load-balancers", "state change errors are correctly handled", "stale errors are ignored") + .test("load-balancers", "cursors are correctly pinned to connections for load-balanced clusters", "pinned connections are returned when the cursor is drained") + .test("load-balancers", "cursors are correctly pinned to connections for load-balanced clusters", "pinned connections are returned to the pool when the cursor is closed") + .test("load-balancers", "cursors are correctly pinned to connections for load-balanced clusters", "no connection is pinned if all documents are returned in the initial batch") + .test("load-balancers", "transactions are correctly pinned to connections for load-balanced clusters", "a connection can be shared by a transaction and a cursor") + .test("load-balancers", "wait queue timeout errors include details about checked out connections", "wait queue timeout errors include cursor statistics"); + def.modify(SLEEP_AFTER_CURSOR_CLOSE) + .test("load-balancers", "state change errors are correctly handled", "only connections for a specific serviceId are closed when pools are cleared") + .test("load-balancers", "cursors are correctly pinned to connections for load-balanced clusters", "pinned connections are returned to the pool when the cursor is closed") + .test("load-balancers", "cursors are correctly pinned to connections for load-balanced clusters", "pinned connections are returned after a network error during a killCursors request") + .test("load-balancers", "transactions are correctly pinned to connections for load-balanced clusters", "a connection can be shared by a transaction and a cursor"); + def.skipNoncompliantReactive("Reactive streams driver can't implement " + + "these tests because the underlying cursor is closed " + + "on error, which breaks assumption in the tests that " + + "closing the cursor is something that happens under " + + "user control") + .test("load-balancers", "cursors are correctly pinned to connections for load-balanced clusters", "pinned connections are not returned after an network error during getMore") + .test("load-balancers", "cursors are correctly pinned to connections for load-balanced clusters", "pinned connections are not returned to the pool after a non-network error on getMore"); + def.skipNoncompliantReactive("Reactive streams driver can't implement " + + "this test because there is no way to tell that a " + + "change stream cursor that has not yet received any " + + "results has even initiated the change stream") + .test("load-balancers", "cursors are correctly pinned to connections for load-balanced clusters", "change streams pin to a connection"); + + // crud + + def.skipDeprecated("Deprecated count method removed, cf https://github.com/mongodb/mongo-java-driver/pull/1328#discussion_r1513641410") + .test("crud", "count-empty", "Deprecated count with empty collection") + .test("crud", "count-collation", "Deprecated count with collation") + .test("crud", "count", "Deprecated count without a filter") + .test("crud", "count", "Deprecated count with a filter") + .test("crud", "count", "Deprecated count with skip and limit"); + def.skipUnknownReason("See downstream changes comment on https://jira.mongodb.org/browse/JAVA-4275") + .test("crud", "findOneAndReplace-hint-unacknowledged", "Unacknowledged findOneAndReplace with hint string on 4.4+ server") + .test("crud", "findOneAndReplace-hint-unacknowledged", "Unacknowledged findOneAndReplace with hint document on 4.4+ server") + .test("crud", "findOneAndUpdate-hint-unacknowledged", "Unacknowledged findOneAndUpdate with hint string on 4.4+ server") + .test("crud", "findOneAndUpdate-hint-unacknowledged", "Unacknowledged findOneAndUpdate with hint document on 4.4+ server") + .test("crud", "findOneAndDelete-hint-unacknowledged", "Unacknowledged findOneAndDelete with hint string on 4.4+ server") + .test("crud", "findOneAndDelete-hint-unacknowledged", "Unacknowledged findOneAndDelete with hint document on 4.4+ server"); + + def.skipNoncompliant("https://jira.mongodb.org/browse/JAVA-5838") + .when(() -> def.isReactive() && UnifiedTest.Language.KOTLIN.equals(def.getLanguage())) + .file("crud", "findOne"); + + def.skipNoncompliant("Scala Mono pulls the data and sets the batch size https://jira.mongodb.org/browse/JAVA-5838") + .when(() -> UnifiedTest.Language.SCALA.equals(def.getLanguage())) + .file("crud", "findOne"); + + def.skipNoncompliant("Updates and Replace bulk operations are split in the java driver") + .file("crud", "bulkWrite-comment"); + + // gridfs + + def.skipDeprecated("contentType is deprecated in GridFS spec, and 4.x Java driver no longer supports it") + .test("gridfs", "gridfs-upload", "upload when contentType is provided"); + def.skipJira("https://jira.mongodb.org/browse/JAVA-4214") + .test("gridfs", "gridfs-delete", "delete when files entry does not exist and there are orphaned chunks"); + def.skipJira("https://jira.mongodb.org/browse/JAVA-5677") + .file("gridfs", "gridfs-rename"); + def.skipJira("https://jira.mongodb.org/browse/JAVA-5689") + .file("gridfs", "gridfs-deleteByName") + .file("gridfs", "gridfs-renameByName"); + + // Skip all rawData based tests + def.skipJira("https://jira.mongodb.org/browse/JAVA-5830 rawData support only added to Go and Node") + .file("collection-management", "listCollections-rawData") + .file("crud", "aggregate-rawData") + .file("crud", "aggregate-rawData") + .file("crud", "BulkWrite deleteMany-rawData") + .file("crud", "BulkWrite deleteOne-rawData") + .file("crud", "BulkWrite replaceOne-rawData") + .file("crud", "BulkWrite updateMany-rawData") + .file("crud", "BulkWrite updateOne-rawData") + .file("crud", "client bulkWrite delete-rawData") + .file("crud", "client bulkWrite replaceOne-rawData") + .file("crud", "client bulkWrite update-rawData") + .file("crud", "count-rawData") + .file("crud", "countDocuments-rawData") + .file("crud", "db-aggregate-rawdata") + .file("crud", "deleteMany-rawData") + .file("crud", "deleteOne-rawData") + .file("crud", "distinct-rawData") + .file("crud", "estimatedDocumentCount-rawData") + .file("crud", "find-rawData") + .file("crud", "findOneAndDelete-rawData") + .file("crud", "findOneAndReplace-rawData") + .file("crud", "findOneAndUpdate-rawData") + .file("crud", "insertMany-rawData") + .file("crud", "insertOne-rawData") + .file("crud", "replaceOne-rawData") + .file("crud", "updateMany-rawData") + .file("crud", "updateOne-rawData") + .file("index-management", "index management-rawData"); + + // retryable-reads + + def.modify(WAIT_FOR_BATCH_CURSOR_CREATION, IGNORE_EXTRA_EVENTS) + //.testContains("retryable-reads", "ChangeStream") + .test("retryable-reads", "retryable reads handshake failures", "client.createChangeStream succeeds after retryable handshake network error") + .test("retryable-reads", "retryable reads handshake failures", "client.createChangeStream succeeds after retryable handshake server error (ShutdownInProgress)") + .test("retryable-reads", "retryable reads handshake failures", "database.createChangeStream succeeds after retryable handshake network error") + .test("retryable-reads", "retryable reads handshake failures", "database.createChangeStream succeeds after retryable handshake server error (ShutdownInProgress)") + .test("retryable-reads", "retryable reads handshake failures", "collection.createChangeStream succeeds after retryable handshake network error") + .test("retryable-reads", "retryable reads handshake failures", "collection.createChangeStream succeeds after retryable handshake server error (ShutdownInProgress)"); + def.modify(WAIT_FOR_BATCH_CURSOR_CREATION, IGNORE_EXTRA_EVENTS) + .file("retryable-reads", "changeStreams-client.watch-serverErrors") + .file("retryable-reads", "changeStreams-client.watch") + .file("retryable-reads", "changeStreams-db.coll.watch-serverErrors") + .file("retryable-reads", "changeStreams-db.coll.watch") + .file("retryable-reads", "changeStreams-db.watch-serverErrors") + .file("retryable-reads", "changeStreams-db.watch"); + def.skipDeprecated("Deprecated feature removed") + .file("retryable-reads", "count") + .file("retryable-reads", "count-serverErrors"); + def.skipDeprecated("Deprecated feature never implemented") + .file("retryable-reads", "listDatabaseObjects") + .file("retryable-reads", "listDatabaseObjects-serverErrors") + .file("retryable-reads", "listCollectionObjects") + .file("retryable-reads", "listCollectionObjects-serverErrors"); + + // retryable-writes + + def.skipJira("https://jira.mongodb.org/browse/JAVA-5125") + .when(() -> isSharded() && serverVersionLessThan(5, 0)) + //.testContains("retryable-writes", "succeeds after WriteConcernError") + .test("retryable-writes", "bulkWrite-errorLabels", "BulkWrite succeeds after WriteConcernError ShutdownInProgress") + .test("retryable-writes", "updateOne-errorLabels", "UpdateOne succeeds after WriteConcernError ShutdownInProgress") + .test("retryable-writes", "deleteOne-errorLabels", "DeleteOne succeeds after WriteConcernError ShutdownInProgress") + .test("retryable-writes", "insertOne-errorLabels", "InsertOne succeeds after WriteConcernError InterruptedAtShutdown") + .test("retryable-writes", "insertOne-errorLabels", "InsertOne succeeds after WriteConcernError InterruptedDueToReplStateChange") + .test("retryable-writes", "insertOne-errorLabels", "InsertOne succeeds after WriteConcernError PrimarySteppedDown") + .test("retryable-writes", "insertOne-errorLabels", "InsertOne succeeds after WriteConcernError ShutdownInProgress") + .test("retryable-writes", "insertMany-errorLabels", "InsertMany succeeds after WriteConcernError ShutdownInProgress") + .test("retryable-writes", "replaceOne-errorLabels", "ReplaceOne succeeds after WriteConcernError ShutdownInProgress") + .test("retryable-writes", "findOneAndUpdate-errorLabels", "FindOneAndUpdate succeeds after WriteConcernError ShutdownInProgress") + .test("retryable-writes", "findOneAndDelete-errorLabels", "FindOneAndDelete succeeds after WriteConcernError ShutdownInProgress") + .test("retryable-writes", "findOneAndReplace-errorLabels", "FindOneAndReplace succeeds after WriteConcernError ShutdownInProgress") + //.testContains("retryable-writes", "succeeds after retryable writeConcernError") + .test("retryable-writes", "retryable-writes insertOne serverErrors", "InsertOne succeeds after retryable writeConcernError") + .test("retryable-writes", "retryable-writes bulkWrite serverErrors", "BulkWrite succeeds after retryable writeConcernError in first batch"); + def.skipJira("https://jira.mongodb.org/browse/JAVA-5341") + .when(() -> isDiscoverableReplicaSet() && serverVersionLessThan(4, 4)) + .test("retryable-writes", "retryable-writes insertOne serverErrors", "RetryableWriteError label is added based on writeConcernError in pre-4.4 mongod response"); + + // server-discovery-and-monitoring (SDAM) + + def.skipJira("https://jira.mongodb.org/browse/JAVA-5230") + .test("server-discovery-and-monitoring", "serverMonitoringMode", "connect with serverMonitoringMode=auto >=4.4") + .test("server-discovery-and-monitoring", "serverMonitoringMode", "connect with serverMonitoringMode=stream >=4.4"); + def.skipJira("https://jira.mongodb.org/browse/JAVA-5564") + .test("server-discovery-and-monitoring", "serverMonitoringMode", "poll waits after successful heartbeat"); + def.skipJira("https://jira.mongodb.org/browse/JAVA-4536") + .file("server-discovery-and-monitoring", "interruptInUse"); + def.skipJira("https://jira.mongodb.org/browse/JAVA-5664") + .file("server-discovery-and-monitoring", "pool-clear-application-error"); + def.skipJira("https://jira.mongodb.org/browse/JAVA-5664") + .file("server-discovery-and-monitoring", "pool-clear-on-error-checkout"); + def.skipJira("https://jira.mongodb.org/browse/JAVA-5664") + .file("server-discovery-and-monitoring", "pool-cleared-on-min-pool-size-population-error"); + + // transactions + + def.skipDeprecated("Deprecated feature removed") + .file("transactions", "count"); + def.skipDeprecated("Only affects 4.2, which is EOL, see https://github.com/mongodb/mongo-java-driver/pull/1310/files#r1491812405") + .when(() -> serverVersionLessThan(4, 4) && isSharded()) + .test("transactions", "pin-mongos", "distinct") + .test("transactions", "read-concern", "only first distinct includes readConcern") + .test("transactions", "read-concern", "distinct ignores collection readConcern") + .test("transactions", "reads", "distinct"); + def.skipNoncompliant("`MongoCluster.getWriteConcern`/`MongoCollection.getWriteConcern` are silently ignored in a transaction") + .test("transactions", "client bulkWrite transactions", + "client bulkWrite with writeConcern in a transaction causes a transaction error"); + + // valid-pass + + def.skipDeprecated("MongoDB releases prior to 4.4 incorrectly add " + + "errorLabels as a field within the writeConcernError " + + "document instead of as a top-level field. Rather " + + "than handle that in code, we skip the test on older " + + "server versions.") + .when(() -> serverVersionLessThan(4, 4)) + .test("unified-test-format/tests/valid-pass", "poc-retryable-writes", "InsertOne fails after multiple retryable writeConcernErrors"); + + def.skipNoncompliant("The driver doesn't reduce the batchSize for the getMore") + .test("unified-test-format/tests/valid-pass", "poc-command-monitoring", + "A successful find event with a getmore and the server kills the cursor (<= 4.4)"); + + def.skipJira("https://jira.mongodb.org/browse/JAVA-5389") + .file("unified-test-format/tests/valid-pass", "expectedEventsForClient-topologyDescriptionChangedEvent"); + def.skipJira("https://jira.mongodb.org/browse/JAVA-4862") + .file("unified-test-format/tests/valid-pass", "entity-commandCursor"); + def.skipJira("https://jira.mongodb.org/browse/JAVA-5631") + .file("unified-test-format/tests/valid-pass", "kmsProviders-explicit_kms_credentials") + .file("unified-test-format/tests/valid-pass", "kmsProviders-mixed_kms_credential_fields"); + def.skipJira("https://jira.mongodb.org/browse/JAVA-5672") + .file("unified-test-format/tests/valid-pass", "operator-matchAsRoot"); + + // valid fail + + def.skipJira("https://jira.mongodb.org/browse/JAVA-5672") + .file("unified-test-format/tests/valid-fail", "operator-matchAsDocument"); + } + + private UnifiedTestModifications() {} + + public static TestDef testDef(final String dir, final String file, final String test, final boolean reactive, + final UnifiedTest.Language language) { + return new TestDef(dir, file, test, reactive, language); + } + + public static final class TestDef { + + private final String dir; + private final String file; + private final String test; + private final boolean reactive; + private final UnifiedTest.Language language; + + private final List modifiers = new ArrayList<>(); + private Function matchesThrowable; + + private TestDef(final String dir, final String file, final String test, final boolean reactive, final UnifiedTest.Language language) { + this.dir = assertNotNull(dir); + this.file = assertNotNull(file); + this.test = assertNotNull(test); + this.reactive = reactive; + this.language = assertNotNull(language); + } + + @Override + public String toString() { + return "TestDef{" + + "modifiers=" + modifiers + + ", reactive=" + reactive + + ", test='" + test + '\'' + + ", file='" + file + '\'' + + ", dir='" + dir + '\'' + + '}'; + } + + /** + * Test is skipped because it is pending implementation, and there is + * a Jira ticket tracking this which has more information. + * + * @param ticket reason for skipping the test; must start with a Jira URL + */ + public TestApplicator skipJira(final String ticket) { + assertTrue(ticket.startsWith("https://jira.mongodb.org/browse/JAVA-")); + return new TestApplicator(this, ticket, SKIP); + } + + /** + * Test is skipped because the feature under test was deprecated, and + * was removed in the Java driver. + * + * @param reason reason for skipping the test + */ + public TestApplicator skipDeprecated(final String reason) { + return new TestApplicator(this, reason, SKIP); + } + + /** + * Test is skipped because the Java driver cannot comply with the spec. + * + * @param reason reason for skipping the test + */ + public TestApplicator skipNoncompliant(final String reason) { + return new TestApplicator(this, reason, SKIP); + } + + /** + * Test is skipped because the Java Reactive driver cannot comply with the spec. + * + * @param reason reason for skipping the test + */ + public TestApplicator skipNoncompliantReactive(final String reason) { + return new TestApplicator(this, reason, SKIP) + .when(this::isReactive); + } + + /** + * The test is skipped, as specified. This should be paired with a + * "when" clause. + */ + public TestApplicator skipAccordingToSpec(final String reason) { + return new TestApplicator(this, reason, SKIP); + } + + /** + * The test is skipped for an unknown reason. + */ + public TestApplicator skipUnknownReason(final String reason) { + return new TestApplicator(this, reason, SKIP); + } + + /** + * The test will be retried, for the reason provided + */ + public TestApplicator retry(final String reason) { + return new TestApplicator(this, reason, RETRY); + } + + /** + * The reactive test will be retried, for the reason provided + */ + public TestApplicator retryReactive(final String reason) { + return new TestApplicator(this, reason, RETRY) + .when(this::isReactive); + } + + public TestApplicator modify(final Modifier... modifiers) { + return new TestApplicator(this, null, modifiers); + } + + public boolean isReactive() { + return reactive; + } + + public UnifiedTest.Language getLanguage() { + return language; + } + + public boolean wasAssignedModifier(final Modifier modifier) { + return this.modifiers.contains(modifier); + } + + public boolean matchesThrowable(final Throwable e) { + if (matchesThrowable != null) { + return matchesThrowable.apply(e); + } + return false; + } + } + + /** + * Applies settings to the underlying test definition. Chainable. + */ + public static final class TestApplicator { + private final TestDef testDef; + private Supplier precondition; + private boolean matchWasPerformed = false; + + private final List modifiersToApply; + private Function matchesThrowable; + + private TestApplicator( + final TestDef testDef, + final String reason, + final Modifier... modifiersToApply) { + this.testDef = testDef; + this.modifiersToApply = Arrays.asList(modifiersToApply); + if (this.modifiersToApply.contains(SKIP) || this.modifiersToApply.contains(RETRY)) { + assertNotNull(reason); + } + } + + private TestApplicator onMatch(final boolean match) { + matchWasPerformed = true; + if (precondition != null && !precondition.get()) { + return this; + } + if (match) { + this.testDef.modifiers.addAll(this.modifiersToApply); + this.testDef.matchesThrowable = this.matchesThrowable; + } + return this; + } + + /** + * Applies to all tests in directory. + * @param dir the directory name + * @return this + */ + public TestApplicator directory(final String dir) { + boolean match = (dir).equals(testDef.dir); + return onMatch(match); + } + + /** + * Applies to all tests in file under the directory. + * @param dir the directory name + * @param file the test file's "description" field + * @return this + */ + public TestApplicator file(final String dir, final String file) { + boolean match = (dir).equals(testDef.dir) + && file.equals(testDef.file); + return onMatch(match); + } + + /** + * Applies to the test where dir, file, and test match. + * @param dir the directory name + * @param file the test file's "description" field + * @param test the individual test's "description" field + * @return this + */ + public TestApplicator test(final String dir, final String file, final String test) { + boolean match = testDef.dir.equals(dir) + && testDef.file.equals(file) + && testDef.test.equals(test); + return onMatch(match); + } + + /** + * Utility method: emit replacement to standard out. + * @param dir the directory name + * @param fragment the substring to check in the test "description" field + * @return this + */ + public TestApplicator testContains(final String dir, final String fragment) { + boolean match = (dir).equals(testDef.dir) + && testDef.test.contains(fragment); + if (match) { + System.out.printf( + "!!! REPLACE %s WITH: .test(\"%s\", \"%s\", \"%s\")%n", + fragment, + testDef.dir, + testDef.file, + testDef.test); + } + return this; + } + + /** + * Utility method: emit file info to standard out + * @param dir the directory name + * @param test the individual test's "description" field + * @return this + */ + public TestApplicator debug(final String dir, final String test) { + boolean match = testDef.test.equals(test); + if (match) { + System.out.printf( + "!!! ADD: \"%s\", \"%s\", \"%s\"%n", + testDef.dir, testDef.file, test); + } + return this; + } + + /** + * Ensuing matching methods are applied only when the condition is met. + * For example, if tests should only be skipped (or modified) on + * sharded clusters, check for sharded in the condition. + * Must be the first method called in the chain. + * @param precondition the condition; methods are no-op when false. + * @return this + */ + public TestApplicator when(final Supplier precondition) { + if (this.precondition != null || this.matchWasPerformed) { + throw new IllegalStateException("Condition must be specified first and once."); + } + this.precondition = precondition; + return this; + } + + /** + * The modification, if it is a RETRY, will only be applied when the + * failure message contains the provided message fragment. If an + * {@code AssertionFailedError} occurs, and has a cause, the cause's + * message will be checked. Otherwise, the throwable will be checked. + */ + public TestApplicator whenFailureContains(final String messageFragment) { + assertTrue(this.modifiersToApply.contains(RETRY), + format("Modifier %s was not specified before calling whenFailureContains", RETRY)); + this.matchesThrowable = (final Throwable e) -> { + // inspect the cause for failed assertions with a cause + if (e instanceof AssertionFailedError && e.getCause() != null) { + return e.getCause().getMessage().contains(messageFragment); + } else { + return e.getMessage().contains(messageFragment); + } + }; + return this; + } + + } + + public enum Modifier { + /** + * Reactive only. + * The reactive driver produces extra getMore commands. + * This will ignore all extra commands, including the getMores. + */ + IGNORE_EXTRA_EVENTS, + /** + * Reactive only. + */ + SLEEP_AFTER_CURSOR_OPEN, + /** + * Reactive only. + */ + SLEEP_AFTER_CURSOR_CLOSE, + /** + * Reactive only. + */ + WAIT_FOR_BATCH_CURSOR_CREATION, + /** + * Skip the test. + */ + SKIP, + /** + * Ignore results and retry the test on failure. Will not repeat the + * test if the test succeeds. Multiple copies of the test are used to + * facilitate retries. + */ + RETRY, + /** + * The test will be retried multiple times, without the results being + * ignored. This is a helper that can be used, in patches, to check + * if certain tests are (still) flaky. + */ + FORCE_FLAKY, + } +} diff --git a/driver-sync/src/test/functional/com/mongodb/client/unified/UnifiedTestValidator.java b/driver-sync/src/test/functional/com/mongodb/client/unified/UnifiedTestValidator.java new file mode 100644 index 00000000000..bc5177be3e0 --- /dev/null +++ b/driver-sync/src/test/functional/com/mongodb/client/unified/UnifiedTestValidator.java @@ -0,0 +1,27 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.unified; + +import org.junit.jupiter.params.provider.Arguments; + +import java.util.Collection; + +final class UnifiedTestValidator extends UnifiedSyncTest { + private static Collection data() { + return getTestData("unified-test-format/tests/valid-pass"); + } +} diff --git a/driver-sync/src/test/functional/com/mongodb/client/unified/UnifiedTransactionsTest.java b/driver-sync/src/test/functional/com/mongodb/client/unified/UnifiedTransactionsTest.java new file mode 100644 index 00000000000..43e9e16452c --- /dev/null +++ b/driver-sync/src/test/functional/com/mongodb/client/unified/UnifiedTransactionsTest.java @@ -0,0 +1,27 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.unified; + +import org.junit.jupiter.params.provider.Arguments; + +import java.util.Collection; + +final class UnifiedTransactionsTest extends UnifiedSyncTest { + private static Collection data() { + return getTestData("transactions"); + } +} diff --git a/driver-sync/src/test/functional/com/mongodb/client/unified/UnifiedWriteConcernTest.java b/driver-sync/src/test/functional/com/mongodb/client/unified/UnifiedWriteConcernTest.java new file mode 100644 index 00000000000..e9d9019b48d --- /dev/null +++ b/driver-sync/src/test/functional/com/mongodb/client/unified/UnifiedWriteConcernTest.java @@ -0,0 +1,28 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.unified; + +import org.junit.jupiter.params.provider.Arguments; + +import java.util.Collection; + +final class UnifiedWriteConcernTest extends UnifiedSyncTest { + private static Collection data() { + // Note: only the write-concern tests in this directory are in unified test format + return getTestData("read-write-concern"); + } +} diff --git a/driver-sync/src/test/functional/com/mongodb/client/unified/ValueMatcher.java b/driver-sync/src/test/functional/com/mongodb/client/unified/ValueMatcher.java new file mode 100644 index 00000000000..ba887ff0fd5 --- /dev/null +++ b/driver-sync/src/test/functional/com/mongodb/client/unified/ValueMatcher.java @@ -0,0 +1,209 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.unified; + +import com.mongodb.lang.Nullable; +import org.bson.BsonDocument; +import org.bson.BsonType; +import org.bson.BsonValue; + +import java.util.List; +import java.util.stream.Collectors; + +import static java.util.Arrays.asList; +import static java.util.Collections.singletonList; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; + +final class ValueMatcher { + private final Entities entities; + private final AssertionContext context; + private static final List NUMBER_TYPES = asList("int", "long", "double", "decimal"); + + ValueMatcher(final Entities entities, final AssertionContext context) { + this.entities = entities; + this.context = context; + } + + public void assertValuesMatch(final BsonValue expected, final BsonValue actual) { + assertValuesMatch(expected, actual, null, -1); + } + + private void assertValuesMatch(final BsonValue initialExpected, @Nullable final BsonValue actual, + @Nullable final String keyContext, final int arrayPositionContext) { + BsonValue expected = initialExpected; + context.push(ContextElement.ofValueMatcher(expected, actual, keyContext, arrayPositionContext)); + + try { + if (initialExpected.isDocument() && initialExpected.asDocument().size() == 1 + && initialExpected.asDocument().getFirstKey().startsWith("$$")) { + BsonDocument expectedDocument = initialExpected.asDocument(); + + switch (expectedDocument.getFirstKey()) { + case "$$exists": + if (expectedDocument.getBoolean("$$exists").getValue()) { + assertNotNull(context.getMessage("Actual document must contain key " + keyContext), actual); + } else { + assertNull(context.getMessage("Actual document must not contain key " + keyContext), actual); + } + return; + case "$$unsetOrMatches": + if (actual == null) { + return; + } + expected = expectedDocument.get("$$unsetOrMatches"); + break; + case "$$type": + assertExpectedType(actual, expectedDocument.get("$$type")); + return; + case "$$matchesHexBytes": + expected = expectedDocument.getString("$$matchesHexBytes"); + break; + case "$$matchAsRoot": + expected = expectedDocument.getDocument("$$matchAsRoot"); + break; + default: + throw new UnsupportedOperationException("Unsupported special operator: " + expectedDocument.getFirstKey()); + } + } + + if (expected.isDocument()) { + BsonDocument expectedDocument = expected.asDocument(); + assertTrue(context.getMessage("Actual value must be a document but is " + actual.getBsonType()), actual.isDocument()); + BsonDocument actualDocument = actual.asDocument(); + expectedDocument.forEach((key, value) -> { + BsonValue actualValue = actualDocument.get(key); + if (value.isDocument() && value.asDocument().size() == 1 && value.asDocument().getFirstKey().startsWith("$$")) { + switch (value.asDocument().getFirstKey()) { + case "$$exists": + if (value.asDocument().getBoolean("$$exists").getValue()) { + assertTrue(context.getMessage("Actual document must contain key " + key), + actualDocument.containsKey(key)); + } else { + assertFalse(context.getMessage("Actual document must not contain key " + key), + actualDocument.containsKey(key)); + } + return; + case "$$type": + assertTrue(context.getMessage("Actual document must contain key " + key), actualDocument.containsKey(key)); + assertExpectedType(actualDocument.get(key), value.asDocument().get("$$type")); + return; + case "$$unsetOrMatches": + if (actualValue == null) { + return; + } + value = value.asDocument().get("$$unsetOrMatches"); + break; + case "$$matchesEntity": + value = entities.getResult(value.asDocument().getString("$$matchesEntity").getValue()); + break; + case "$$matchesHexBytes": + value = value.asDocument().getString("$$matchesHexBytes"); + break; + case "$$sessionLsid": + value = entities.getSessionIdentifier(value.asDocument().getString("$$sessionLsid").getValue()); + break; + case "$$matchAsDocument": + actualValue = BsonDocument.parse(actualValue.asString().getValue()); + value = value.asDocument().getDocument("$$matchAsDocument"); + break; + case "$$lte": + value = value.asDocument().getNumber("$$lte"); + assertTrue(actualValue.asNumber().longValue() <= value.asNumber().longValue()); + return; + default: + throw new UnsupportedOperationException("Unsupported special operator: " + value.asDocument().getFirstKey()); + } + } + + assertNotNull(context.getMessage("Actual document must contain key " + key), actualValue); + assertValuesMatch(value, actualValue, key, -1); + }); + } else if (expected.isArray()) { + assertTrue(context.getMessage("Actual value must be an array but is " + actual.getBsonType()), actual.isArray()); + assertEquals(context.getMessage("Arrays must be the same size"), expected.asArray().size(), actual.asArray().size()); + for (int i = 0; i < expected.asArray().size(); i++) { + assertValuesMatch(expected.asArray().get(i), actual.asArray().get(i), keyContext, i); + } + } else if (expected.isNumber()) { + assertTrue(context.getMessage("Expected a number"), actual.isNumber()); + assertEquals(context.getMessage("Expected BSON numbers to be equal"), + expected.asNumber().doubleValue(), actual.asNumber().doubleValue(), 0.0); + } else if (expected.isNull()) { + assertTrue(context.getMessage("Expected BSON null"), actual == null || actual.isNull()); + } else { + assertEquals(context.getMessage("Expected BSON types to be equal"), expected.getBsonType(), actual.getBsonType()); + assertEquals(context.getMessage("Expected BSON values to be equal"), expected, actual); + } + } finally { + context.pop(); + } + } + + private void assertExpectedType(final BsonValue actualValue, final BsonValue expectedTypes) { + List types; + if (expectedTypes.isString()) { + String expectedType = expectedTypes.asString().getValue(); + types = expectedType.equals("number") ? NUMBER_TYPES : singletonList(expectedType); + } else if (expectedTypes.isArray()) { + types = expectedTypes.asArray().stream().map(type -> type.asString().getValue()).collect(Collectors.toList()); + } else { + throw new UnsupportedOperationException("Unsupported type for $$type value"); + } + assertTrue(context.getMessage("Expected BSON type to be one of " + types + " but was " + + asTypeString(actualValue.getBsonType())), + types.contains(asTypeString(actualValue.getBsonType()))); + } + + private static String asTypeString(final BsonType bsonType) { + switch (bsonType) { + case DOUBLE: + return "double"; + case STRING: + return "string"; + case DOCUMENT: + return "object"; + case ARRAY: + return "array"; + case BINARY: + return "binData"; + case OBJECT_ID: + return "objectId"; + case BOOLEAN: + return "bool"; + case DATE_TIME: + return "date"; + case NULL: + return "null"; + case REGULAR_EXPRESSION: + return "regex"; + case INT32: + return "int"; + case TIMESTAMP: + return "timestamp"; + case INT64: + return "long"; + case DECIMAL128: + return "decimal"; + default: + throw new UnsupportedOperationException("Unsupported bson type conversion to string: " + bsonType); + } + } +} diff --git a/driver-sync/src/test/functional/com/mongodb/client/unified/VersionedApiTest.java b/driver-sync/src/test/functional/com/mongodb/client/unified/VersionedApiTest.java new file mode 100644 index 00000000000..95b23da9ce7 --- /dev/null +++ b/driver-sync/src/test/functional/com/mongodb/client/unified/VersionedApiTest.java @@ -0,0 +1,27 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.unified; + +import org.junit.jupiter.params.provider.Arguments; + +import java.util.Collection; + +final class VersionedApiTest extends UnifiedSyncTest { + private static Collection data() { + return getTestData("versioned-api"); + } +} diff --git a/driver-sync/src/test/functional/com/mongodb/client/unified/WithTransactionHelperTransactionsTest.java b/driver-sync/src/test/functional/com/mongodb/client/unified/WithTransactionHelperTransactionsTest.java new file mode 100644 index 00000000000..264e73a7c08 --- /dev/null +++ b/driver-sync/src/test/functional/com/mongodb/client/unified/WithTransactionHelperTransactionsTest.java @@ -0,0 +1,27 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.unified; + +import org.junit.jupiter.params.provider.Arguments; + +import java.util.Collection; + +final class WithTransactionHelperTransactionsTest extends UnifiedSyncTest { + private static Collection data() { + return getTestData("transactions-convenient-api"); + } +} diff --git a/driver-sync/src/test/functional/com/mongodb/client/vector/AbstractBinaryVectorFunctionalTest.java b/driver-sync/src/test/functional/com/mongodb/client/vector/AbstractBinaryVectorFunctionalTest.java new file mode 100644 index 00000000000..5d61051a997 --- /dev/null +++ b/driver-sync/src/test/functional/com/mongodb/client/vector/AbstractBinaryVectorFunctionalTest.java @@ -0,0 +1,346 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.vector; + +import com.mongodb.MongoClientSettings; +import com.mongodb.ReadConcern; +import com.mongodb.ReadPreference; +import com.mongodb.WriteConcern; +import com.mongodb.client.Fixture; +import com.mongodb.client.MongoClient; +import com.mongodb.client.MongoCollection; +import com.mongodb.client.model.OperationTest; +import org.bson.BsonBinary; +import org.bson.BsonBinarySubType; +import org.bson.BsonInvalidOperationException; +import org.bson.Document; +import org.bson.Float32BinaryVector; +import org.bson.Int8BinaryVector; +import org.bson.PackedBitBinaryVector; +import org.bson.BinaryVector; +import org.bson.codecs.configuration.CodecRegistry; +import org.bson.codecs.pojo.PojoCodecProvider; +import org.bson.types.Binary; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.MethodSource; +import org.junit.jupiter.params.provider.ValueSource; + +import java.util.ArrayList; +import java.util.List; +import java.util.stream.Stream; + +import static com.mongodb.MongoClientSettings.getDefaultCodecRegistry; +import static org.bson.BinaryVector.DataType.FLOAT32; +import static org.bson.BinaryVector.DataType.INT8; +import static org.bson.BinaryVector.DataType.PACKED_BIT; +import static org.bson.codecs.configuration.CodecRegistries.fromProviders; +import static org.bson.codecs.configuration.CodecRegistries.fromRegistries; +import static org.junit.jupiter.api.Assertions.assertEquals; + +public abstract class AbstractBinaryVectorFunctionalTest extends OperationTest { + + private static final byte VECTOR_SUBTYPE = BsonBinarySubType.VECTOR.getValue(); + private static final String FIELD_VECTOR = "vector"; + private static final CodecRegistry CODEC_REGISTRY = fromRegistries(getDefaultCodecRegistry(), + fromProviders(PojoCodecProvider + .builder() + .automatic(true).build())); + private MongoCollection documentCollection; + + private MongoClient mongoClient; + + @BeforeEach + public void setUp() { + super.beforeEach(); + mongoClient = getMongoClient(getMongoClientSettingsBuilder() + .codecRegistry(CODEC_REGISTRY) + .build()); + documentCollection = mongoClient + .getDatabase(getDatabaseName()) + .getCollection(getCollectionName()); + } + + @AfterEach + @SuppressWarnings("try") + public void afterEach() { + try (MongoClient ignore = mongoClient) { + super.afterEach(); + } + } + + private static MongoClientSettings.Builder getMongoClientSettingsBuilder() { + return Fixture.getMongoClientSettingsBuilder() + .readConcern(ReadConcern.MAJORITY) + .writeConcern(WriteConcern.MAJORITY) + .readPreference(ReadPreference.primary()); + } + + protected abstract MongoClient getMongoClient(MongoClientSettings settings); + + @ParameterizedTest + @ValueSource(bytes = {-1, 1, 2, 3, 4, 5, 6, 7, 8}) + void shouldThrowExceptionForInvalidPackedBitArrayPaddingWhenDecodeEmptyVector(final byte invalidPadding) { + //given + Binary invalidVector = new Binary(VECTOR_SUBTYPE, new byte[]{PACKED_BIT.getValue(), invalidPadding}); + documentCollection.insertOne(new Document(FIELD_VECTOR, invalidVector)); + + // when & then + BsonInvalidOperationException exception = Assertions.assertThrows(BsonInvalidOperationException.class, ()-> { + findExactlyOne(documentCollection) + .get(FIELD_VECTOR, BinaryVector.class); + }); + assertEquals("Padding must be 0 if vector is empty, but found: " + invalidPadding, exception.getMessage()); + } + + @ParameterizedTest + @ValueSource(bytes = {-1, 1}) + void shouldThrowExceptionForInvalidFloat32Padding(final byte invalidPadding) { + // given + Binary invalidVector = new Binary(VECTOR_SUBTYPE, new byte[]{FLOAT32.getValue(), invalidPadding, 10, 20, 30, 40}); + documentCollection.insertOne(new Document(FIELD_VECTOR, invalidVector)); + + // when & then + BsonInvalidOperationException exception = Assertions.assertThrows(BsonInvalidOperationException.class, ()-> { + findExactlyOne(documentCollection) + .get(FIELD_VECTOR, BinaryVector.class); + }); + assertEquals("Padding must be 0 for FLOAT32 data type, but found: " + invalidPadding, exception.getMessage()); + } + + @ParameterizedTest + @ValueSource(bytes = {-1, 1}) + void shouldThrowExceptionForInvalidInt8Padding(final byte invalidPadding) { + // given + Binary invalidVector = new Binary(VECTOR_SUBTYPE, new byte[]{INT8.getValue(), invalidPadding, 10, 20, 30, 40}); + documentCollection.insertOne(new Document(FIELD_VECTOR, invalidVector)); + + // when & then + BsonInvalidOperationException exception = Assertions.assertThrows(BsonInvalidOperationException.class, ()-> { + findExactlyOne(documentCollection) + .get(FIELD_VECTOR, BinaryVector.class); + }); + assertEquals("Padding must be 0 for INT8 data type, but found: " + invalidPadding, exception.getMessage()); + } + + @ParameterizedTest + @ValueSource(bytes = {-1, 8}) + void shouldThrowExceptionForInvalidPackedBitPadding(final byte invalidPadding) { + // given + Binary invalidVector = new Binary(VECTOR_SUBTYPE, new byte[]{PACKED_BIT.getValue(), invalidPadding, 10, 20, 30, 40}); + documentCollection.insertOne(new Document(FIELD_VECTOR, invalidVector)); + + // when & then + BsonInvalidOperationException exception = Assertions.assertThrows(BsonInvalidOperationException.class, ()-> { + findExactlyOne(documentCollection) + .get(FIELD_VECTOR, BinaryVector.class); + }); + assertEquals("Padding must be between 0 and 7 bits, but found: " + invalidPadding, exception.getMessage()); + } + + private static Stream provideValidVectors() { + return Stream.of( + BinaryVector.floatVector(new float[]{1.1f, 2.2f, 3.3f}), + BinaryVector.int8Vector(new byte[]{10, 20, 30, 40}), + BinaryVector.packedBitVector(new byte[]{(byte) 0b10101010, (byte) 0b01010101}, (byte) 3) + ); + } + + @ParameterizedTest + @MethodSource("provideValidVectors") + void shouldStoreAndRetrieveValidVector(final BinaryVector expectedVector) { + // Given + Document documentToInsert = new Document(FIELD_VECTOR, expectedVector) + .append("otherField", 1); // to test that the next field is not affected + documentCollection.insertOne(documentToInsert); + + // when & then + BinaryVector actualVector = findExactlyOne(documentCollection) + .get(FIELD_VECTOR, BinaryVector.class); + + assertEquals(expectedVector, actualVector); + } + + @ParameterizedTest + @MethodSource("provideValidVectors") + void shouldStoreAndRetrieveValidVectorWithBsonBinary(final BinaryVector expectedVector) { + // Given + Document documentToInsert = new Document(FIELD_VECTOR, new BsonBinary(expectedVector)); + documentCollection.insertOne(documentToInsert); + + // when & then + BinaryVector actualVector = findExactlyOne(documentCollection) + .get(FIELD_VECTOR, BinaryVector.class); + + assertEquals(actualVector, actualVector); + } + + @Test + void shouldStoreAndRetrieveValidVectorWithFloatVectorPojo() { + // given + MongoCollection floatVectorPojoMongoCollection = mongoClient + .getDatabase(getDatabaseName()) + .getCollection(getCollectionName()).withDocumentClass(Float32BinaryVectorPojo.class); + Float32BinaryVector vector = BinaryVector.floatVector(new float[]{1.1f, 2.2f, 3.3f}); + + // whe + floatVectorPojoMongoCollection.insertOne(new Float32BinaryVectorPojo(vector)); + Float32BinaryVectorPojo floatVectorPojo = floatVectorPojoMongoCollection.find().first(); + + // then + Assertions.assertNotNull(floatVectorPojo); + assertEquals(vector, floatVectorPojo.getVector()); + } + + @Test + void shouldStoreAndRetrieveValidVectorWithInt8VectorPojo() { + // given + MongoCollection floatVectorPojoMongoCollection = mongoClient + .getDatabase(getDatabaseName()) + .getCollection(getCollectionName()).withDocumentClass(Int8BinaryVectorPojo.class); + Int8BinaryVector vector = BinaryVector.int8Vector(new byte[]{10, 20, 30, 40}); + + // when + floatVectorPojoMongoCollection.insertOne(new Int8BinaryVectorPojo(vector)); + Int8BinaryVectorPojo int8VectorPojo = floatVectorPojoMongoCollection.find().first(); + + // then + Assertions.assertNotNull(int8VectorPojo); + assertEquals(vector, int8VectorPojo.getVector()); + } + + @Test + void shouldStoreAndRetrieveValidVectorWithPackedBitVectorPojo() { + // given + MongoCollection floatVectorPojoMongoCollection = mongoClient + .getDatabase(getDatabaseName()) + .getCollection(getCollectionName()).withDocumentClass(PackedBitBinaryVectorPojo.class); + + PackedBitBinaryVector vector = BinaryVector.packedBitVector(new byte[]{(byte) 0b10101010, (byte) 0b01010101}, (byte) 3); + + // when + floatVectorPojoMongoCollection.insertOne(new PackedBitBinaryVectorPojo(vector)); + PackedBitBinaryVectorPojo packedBitVectorPojo = floatVectorPojoMongoCollection.find().first(); + + // then + Assertions.assertNotNull(packedBitVectorPojo); + assertEquals(vector, packedBitVectorPojo.getVector()); + } + + @ParameterizedTest + @MethodSource("provideValidVectors") + void shouldStoreAndRetrieveValidVectorWithGenericVectorPojo(final BinaryVector actualVector) { + // given + MongoCollection floatVectorPojoMongoCollection = mongoClient + .getDatabase(getDatabaseName()) + .getCollection(getCollectionName()).withDocumentClass(BinaryVectorPojo.class); + + // when + floatVectorPojoMongoCollection.insertOne(new BinaryVectorPojo(actualVector)); + BinaryVectorPojo vectorPojo = floatVectorPojoMongoCollection.find().first(); + + //then + Assertions.assertNotNull(vectorPojo); + assertEquals(actualVector, vectorPojo.getVector()); + } + + private Document findExactlyOne(final MongoCollection collection) { + List documents = new ArrayList<>(); + collection.find().into(documents); + assertEquals(1, documents.size(), "Expected exactly one document, but found: " + documents.size()); + return documents.get(0); + } + + public static class BinaryVectorPojo { + private BinaryVector vector; + + public BinaryVectorPojo() { + } + + public BinaryVectorPojo(final BinaryVector vector) { + this.vector = vector; + } + + public BinaryVector getVector() { + return vector; + } + + public void setVector(final BinaryVector vector) { + this.vector = vector; + } + } + + public static class Int8BinaryVectorPojo { + private Int8BinaryVector vector; + + public Int8BinaryVectorPojo() { + } + + public Int8BinaryVectorPojo(final Int8BinaryVector vector) { + this.vector = vector; + } + + public BinaryVector getVector() { + return vector; + } + + public void setVector(final Int8BinaryVector vector) { + this.vector = vector; + } + } + + public static class PackedBitBinaryVectorPojo { + private PackedBitBinaryVector vector; + + public PackedBitBinaryVectorPojo() { + } + + public PackedBitBinaryVectorPojo(final PackedBitBinaryVector vector) { + this.vector = vector; + } + + public BinaryVector getVector() { + return vector; + } + + public void setVector(final PackedBitBinaryVector vector) { + this.vector = vector; + } + } + + public static class Float32BinaryVectorPojo { + private Float32BinaryVector vector; + + public Float32BinaryVectorPojo() { + } + + public Float32BinaryVectorPojo(final Float32BinaryVector vector) { + this.vector = vector; + } + + public BinaryVector getVector() { + return vector; + } + + public void setVector(final Float32BinaryVector vector) { + this.vector = vector; + } + } +} diff --git a/driver-sync/src/test/functional/com/mongodb/client/vector/BinaryVectorFunctionalTest.java b/driver-sync/src/test/functional/com/mongodb/client/vector/BinaryVectorFunctionalTest.java new file mode 100644 index 00000000000..05bf084dc84 --- /dev/null +++ b/driver-sync/src/test/functional/com/mongodb/client/vector/BinaryVectorFunctionalTest.java @@ -0,0 +1,28 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.vector; + +import com.mongodb.MongoClientSettings; +import com.mongodb.client.MongoClient; +import com.mongodb.client.MongoClients; + +public class BinaryVectorFunctionalTest extends AbstractBinaryVectorFunctionalTest { + @Override + protected MongoClient getMongoClient(final MongoClientSettings settings) { + return MongoClients.create(settings); + } +} diff --git a/driver-sync/src/test/functional/com/mongodb/fixture/EncryptionFixture.java b/driver-sync/src/test/functional/com/mongodb/fixture/EncryptionFixture.java new file mode 100644 index 00000000000..f6edb9a14ed --- /dev/null +++ b/driver-sync/src/test/functional/com/mongodb/fixture/EncryptionFixture.java @@ -0,0 +1,83 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * + */ + +package com.mongodb.fixture; + +import java.util.HashMap; +import java.util.Map; + +import static com.mongodb.ClusterFixture.getEnv; + +/** + * Helper class for the CSFLE/QE tests. + */ +public final class EncryptionFixture { + + private EncryptionFixture() { + //NOP + } + + public static Map> getKmsProviders(final KmsProviderType... kmsProviderTypes) { + return new HashMap>() {{ + for (KmsProviderType kmsProviderType : kmsProviderTypes) { + switch (kmsProviderType) { + case LOCAL: + put("local", new HashMap() {{ + put("key", "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBM" + + "UN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk"); + }}); + break; + case GCP: + put("gcp", new HashMap() {{ + put("email", getEnv("GCP_EMAIL")); + put("privateKey", getEnv("GCP_PRIVATE_KEY")); + }}); + break; + case AWS: + put("aws", new HashMap() {{ + put("accessKeyId", getEnv("AWS_ACCESS_KEY_ID")); + put("secretAccessKey", getEnv("AWS_SECRET_ACCESS_KEY")); + }}); + break; + case AZURE: + put("azure", new HashMap() {{ + put("tenantId", getEnv("AZURE_TENANT_ID")); + put("clientId", getEnv("AZURE_CLIENT_ID")); + put("clientSecret", getEnv("AZURE_CLIENT_SECRET")); + }}); + break; + case KMIP: + put("kmip", new HashMap() {{ + put("endpoint", getEnv("org.mongodb.test.kmipEndpoint", "localhost:5698")); + }}); + break; + default: + throw new IllegalArgumentException("Unsupported KMS provider type: " + kmsProviderType); + } + } + }}; + } + + public enum KmsProviderType { + LOCAL, + AWS, + AZURE, + GCP, + KMIP + } +} diff --git a/driver-sync/src/test/functional/com/mongodb/internal/connection/OidcAuthenticationProseTests.java b/driver-sync/src/test/functional/com/mongodb/internal/connection/OidcAuthenticationProseTests.java new file mode 100644 index 00000000000..8c9432b77bf --- /dev/null +++ b/driver-sync/src/test/functional/com/mongodb/internal/connection/OidcAuthenticationProseTests.java @@ -0,0 +1,1261 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.connection; + +import com.mongodb.ClusterFixture; +import com.mongodb.ConnectionString; +import com.mongodb.MongoClientSettings; +import com.mongodb.MongoCommandException; +import com.mongodb.MongoConfigurationException; +import com.mongodb.MongoCredential; +import com.mongodb.MongoSecurityException; +import com.mongodb.MongoSocketException; +import com.mongodb.assertions.Assertions; +import com.mongodb.client.ClientSession; +import com.mongodb.client.FindIterable; +import com.mongodb.client.Fixture; +import com.mongodb.client.MongoClient; +import com.mongodb.client.MongoClients; +import com.mongodb.client.MongoCollection; +import com.mongodb.client.TestListener; +import com.mongodb.event.CommandListener; +import com.mongodb.lang.Nullable; +import org.bson.BsonArray; +import org.bson.BsonBoolean; +import org.bson.BsonDocument; +import org.bson.BsonInt32; +import org.bson.BsonString; +import org.bson.Document; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.DisplayName; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.Arguments; +import org.junit.jupiter.params.provider.MethodSource; + +import java.io.IOException; +import java.lang.reflect.Field; +import java.nio.charset.StandardCharsets; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.time.Duration; +import java.time.temporal.ChronoUnit; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.Random; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ConcurrentLinkedQueue; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.function.Supplier; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +import static com.mongodb.MongoCredential.ALLOWED_HOSTS_KEY; +import static com.mongodb.MongoCredential.ENVIRONMENT_KEY; +import static com.mongodb.MongoCredential.OIDC_CALLBACK_KEY; +import static com.mongodb.MongoCredential.OIDC_HUMAN_CALLBACK_KEY; +import static com.mongodb.MongoCredential.OidcCallback; +import static com.mongodb.MongoCredential.OidcCallbackContext; +import static com.mongodb.MongoCredential.OidcCallbackResult; +import static com.mongodb.MongoCredential.TOKEN_RESOURCE_KEY; +import static com.mongodb.assertions.Assertions.assertNotNull; +import static com.mongodb.testing.MongoAssertions.assertCause; +import static java.lang.String.format; +import static java.lang.System.getenv; +import static java.util.Arrays.asList; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assumptions.assumeTrue; +import static util.ThreadTestHelpers.executeAll; + +/** + * See + * Prose Tests. + */ +public class OidcAuthenticationProseTests { + + private String appName; + + public static boolean oidcTestsEnabled() { + return Boolean.parseBoolean(getenv().get("OIDC_TESTS_ENABLED")); + } + + private void assumeTestEnvironment() { + assumeTrue(getenv("OIDC_TOKEN_DIR") != null); + } + + protected static String getOidcUri() { + return assertNotNull(getenv("MONGODB_URI_SINGLE")); + } + + private static String getOidcUriMulti() { + return assertNotNull(getenv("MONGODB_URI_MULTI")); + } + + private static String getOidcEnv() { + return assertNotNull(getenv("OIDC_ENV")); + } + + private static void assumeAzure() { + assumeTrue(getOidcEnv().equals("azure")); + } + + @Nullable + private static String getUserWithDomain(@Nullable final String user) { + return user == null ? null : user + "@" + getenv("OIDC_DOMAIN"); + } + + private static String oidcTokenDirectory() { + String dir = getenv("OIDC_TOKEN_DIR"); + if (!dir.endsWith("/")) { + dir = dir + "/"; + } + return dir; + } + + private static String getTestTokenFilePath() { + return getenv(OidcAuthenticator.OIDC_TOKEN_FILE); + } + + protected MongoClient createMongoClient(final MongoClientSettings settings) { + return MongoClients.create(settings); + } + + @BeforeEach + public void beforeEach() { + assumeTrue(oidcTestsEnabled()); + InternalStreamConnection.setRecordEverything(true); + this.appName = this.getClass().getSimpleName() + "-" + new Random().nextInt(Integer.MAX_VALUE); + } + + @AfterEach + public void afterEach() { + InternalStreamConnection.setRecordEverything(false); + } + + @Test + public void test1p1CallbackIsCalledDuringAuth() { + // #. Create a ``MongoClient`` configured with an OIDC callback... + TestCallback callback = createCallback(); + MongoClientSettings clientSettings = createSettings(callback); + // #. Perform a find operation that succeeds + performFind(clientSettings); + assertEquals(1, callback.invocations.get()); + } + + @Test + public void test1p2CallbackCalledOnceForMultipleConnections() { + TestCallback callback = createCallback(); + MongoClientSettings clientSettings = createSettings(callback); + try (MongoClient mongoClient = createMongoClient(clientSettings)) { + List threads = new ArrayList<>(); + for (int i = 0; i < 10; i++) { + Thread t = new Thread(() -> performFind(mongoClient)); + t.setDaemon(true); + t.start(); + threads.add(t); + } + for (Thread t : threads) { + try { + t.join(); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } + } + } + assertEquals(1, callback.invocations.get()); + } + + @Test + public void test2p1ValidCallbackInputs() { + Duration expectedTimeoutDuration = Duration.ofMinutes(1); + + TestCallback callback1 = createCallback(); + // #. Verify that the request callback was called with the appropriate + // inputs, including the timeout parameter if possible. + OidcCallback callback2 = (context) -> { + assertEquals(expectedTimeoutDuration, context.getTimeout()); + return callback1.onRequest(context); + }; + MongoClientSettings clientSettings = createSettings(callback2); + try (MongoClient mongoClient = createMongoClient(clientSettings)) { + performFind(mongoClient); + // callback was called + assertEquals(1, callback1.getInvocations()); + } + } + + // Not a prose test + @ParameterizedTest(name = "{0}. " + + "Parameters: timeoutMs={1}, " + + "serverSelectionTimeoutMS={2}," + + " expectedTimeoutThreshold={3}") + @MethodSource + void testValidCallbackInputsTimeoutWhenTimeoutMsIsSet(final String testName, + final long timeoutMs, + final long serverSelectionTimeoutMS, + final long expectedTimeoutThreshold) { + TestCallback callback1 = createCallback(); + + OidcCallback callback2 = (context) -> { + assertTrue(context.getTimeout().toMillis() < expectedTimeoutThreshold, + format("Expected timeout to be less than %d, but was %d", + expectedTimeoutThreshold, + context.getTimeout().toMillis())); + return callback1.onRequest(context); + }; + + MongoClientSettings clientSettings = MongoClientSettings.builder(createSettings(callback2)) + .applyToClusterSettings(builder -> + builder.serverSelectionTimeout( + serverSelectionTimeoutMS, + TimeUnit.MILLISECONDS)) + .timeout(timeoutMs, TimeUnit.MILLISECONDS) + .build(); + + try (MongoClient mongoClient = createMongoClient(clientSettings)) { + long start = System.nanoTime(); + performFind(mongoClient); + assertEquals(1, callback1.getInvocations()); + long elapsed = msElapsedSince(start); + + + assertFalse(elapsed > minTimeout(timeoutMs, serverSelectionTimeoutMS), + format("Elapsed time %d is greater then minimum of serverSelectionTimeoutMS and timeoutMs, which is %d. " + + "This indicates that the callback was not called with the expected timeout.", + elapsed, + minTimeout(timeoutMs, serverSelectionTimeoutMS))); + + } + } + + private static Stream testValidCallbackInputsTimeoutWhenTimeoutMsIsSet() { + long rtt = ClusterFixture.getPrimaryRTT(); + return Stream.of( + Arguments.of("serverSelectionTimeoutMS honored for oidc callback if it's lower than timeoutMS", + 1000 + rtt, // timeoutMS + 500 + rtt, // serverSelectionTimeoutMS + 499 + rtt), // expectedTimeoutThreshold + Arguments.of("timeoutMS honored for oidc callback if it's lower than serverSelectionTimeoutMS", + 500 + rtt, // timeoutMS + 1000 + rtt, // serverSelectionTimeoutMS + 499 + rtt), // expectedTimeoutThreshold + Arguments.of("timeoutMS honored for oidc callback if serverSelectionTimeoutMS is infinite", + 500 + rtt, // timeoutMS + -1, // serverSelectionTimeoutMS + 499 + rtt), // expectedTimeoutThreshold, + Arguments.of("serverSelectionTimeoutMS honored for oidc callback if timeoutMS=0", + 0, // infinite timeoutMS + 500 + rtt, // serverSelectionTimeoutMS + 499 + rtt) // expectedTimeoutThreshold + ); + } + + // Not a prose test + @Test + @DisplayName("test callback timeout when serverSelectionTimeoutMS and timeoutMS are infinite") + void testCallbackTimeoutWhenServerSelectionTimeoutMsIsInfiniteTimeoutMsIsSet() { + TestCallback callback1 = createCallback(); + Duration expectedTimeout = ChronoUnit.FOREVER.getDuration(); + + OidcCallback callback2 = (context) -> { + assertEquals(expectedTimeout, context.getTimeout(), + format("Expected timeout to be infinite (%s), but was %s", + expectedTimeout, context.getTimeout())); + + return callback1.onRequest(context); + }; + + MongoClientSettings clientSettings = MongoClientSettings.builder(createSettings(callback2)) + .applyToClusterSettings(builder -> + builder.serverSelectionTimeout( + -1, // -1 means infinite + TimeUnit.MILLISECONDS)) + .timeout(0, TimeUnit.MILLISECONDS) + .build(); + + try (MongoClient mongoClient = createMongoClient(clientSettings)) { + performFind(mongoClient); + assertEquals(1, callback1.getInvocations()); + } + } + + @Test + public void test2p2RequestCallbackReturnsNull() { + //noinspection ConstantConditions + OidcCallback callback = (context) -> null; + MongoClientSettings clientSettings = this.createSettings(callback); + assertFindFails(clientSettings, MongoConfigurationException.class, + "Result of callback must not be null"); + } + + @Test + public void test2p3CallbackReturnsMissingData() { + // #. Create a client with a request callback that returns data not + // conforming to the OIDCRequestTokenResult with missing field(s). + OidcCallback callback = (context) -> { + //noinspection ConstantConditions + return new OidcCallbackResult(null); + }; + // we ensure that the error is propagated + MongoClientSettings clientSettings = createSettings(callback); + try (MongoClient mongoClient = createMongoClient(clientSettings)) { + assertCause(IllegalArgumentException.class, + "accessToken can not be null", + () -> performFind(mongoClient)); + } + } + + @Test + public void test2p4InvalidClientConfigurationWithCallback() { + String uri = getOidcUri() + "&authMechanismProperties=ENVIRONMENT:" + getOidcEnv(); + MongoClientSettings settings = createSettings( + uri, createCallback(), null, OIDC_CALLBACK_KEY); + assertCause(IllegalArgumentException.class, + "OIDC_CALLBACK must not be specified when ENVIRONMENT is specified", + () -> performFind(settings)); + } + + @Test + public void test2p5InvalidAllowedHosts() { + assumeTestEnvironment(); + + String uri = "mongodb://localhost/?authMechanism=MONGODB-OIDC&authMechanismProperties=ENVIRONMENT:azure,TOKEN_RESOURCE:123"; + ConnectionString cs = new ConnectionString(uri); + MongoCredential credential = assertNotNull(cs.getCredential()) + .withMechanismProperty("ALLOWED_HOSTS", Collections.emptyList()); + MongoClientSettings settings = MongoClientSettings.builder() + .applicationName(appName) + .applyConnectionString(cs) + .retryReads(false) + .credential(credential) + .build(); + assertCause(IllegalArgumentException.class, + "ALLOWED_HOSTS must be specified only when OIDC_HUMAN_CALLBACK is specified", + () -> { + try (MongoClient mongoClient = createMongoClient(settings)) { + performFind(mongoClient); + } + }); + } + + @Test + public void test3p1AuthFailsWithCachedToken() throws ExecutionException, InterruptedException, NoSuchFieldException, IllegalAccessException { + TestCallback callbackWrapped = createCallback(); + // reference to the token to poison + CompletableFuture poisonToken = new CompletableFuture<>(); + OidcCallback callback = (context) -> { + OidcCallbackResult result = callbackWrapped.onRequest(context); + String accessToken = result.getAccessToken(); + if (!poisonToken.isDone()) { + poisonToken.complete(accessToken); + } + return result; + }; + + MongoClientSettings clientSettings = createSettings(callback); + try (MongoClient mongoClient = createMongoClient(clientSettings)) { + // populate cache + performFind(mongoClient); + assertEquals(1, callbackWrapped.invocations.get()); + // Poison the *Client Cache* with an invalid access token. + // uses reflection + String poisonString = poisonToken.get(); + Field f = String.class.getDeclaredField("value"); + f.setAccessible(true); + byte[] poisonChars = (byte[]) f.get(poisonString); + poisonChars[0] = '~'; + poisonChars[1] = '~'; + + assertEquals(1, callbackWrapped.invocations.get()); + + // cause another connection to be opened + delayNextFind(); + executeAll(2, () -> performFind(mongoClient)); + } + assertEquals(2, callbackWrapped.invocations.get()); + } + + @Test + public void test3p2AuthFailsWithoutCachedToken() { + OidcCallback callback = + (x) -> new OidcCallbackResult("invalid_token"); + MongoClientSettings clientSettings = createSettings(callback); + try (MongoClient mongoClient = createMongoClient(clientSettings)) { + assertCause(MongoCommandException.class, + "Command execution failed on MongoDB server with error 18 (AuthenticationFailed):", + () -> performFind(mongoClient)); + } + } + + @Test + public void test3p3UnexpectedErrorDoesNotClearCache() { + assumeTestEnvironment(); + + TestListener listener = new TestListener(); + TestCommandListener commandListener = new TestCommandListener(listener); + + TestCallback callback = createCallback(); + MongoClientSettings clientSettings = createSettings(getOidcUri(), callback, commandListener); + + try (MongoClient mongoClient = createMongoClient(clientSettings)) { + failCommand(20, 1, "saslStart"); + assertCause(MongoCommandException.class, + "Command execution failed on MongoDB server with error 20", + () -> performFind(mongoClient)); + + assertEquals(Arrays.asList( + "isMaster started", + "isMaster succeeded", + "saslStart started", + "saslStart failed" + ), listener.getEventStrings()); + + assertEquals(1, callback.getInvocations()); + performFind(mongoClient); + assertEquals(1, callback.getInvocations()); + } + } + + @Test + public void test4p1Reauthentication() { + testReauthentication(false); + } + + private void testReauthentication(final boolean inSession) { + TestCallback callback = createCallback(); + MongoClientSettings clientSettings = createSettings(callback); + try (MongoClient mongoClient = createMongoClient(clientSettings); + ClientSession session = inSession ? mongoClient.startSession() : null) { + failCommand(391, 1, "find"); + // #. Perform a find operation that succeeds. + performFind(mongoClient, session); + } + assertEquals(2, callback.invocations.get()); + } + + @Test + public void test4p2ReadCommandsFailIfReauthenticationFails() { + // Create a `MongoClient` whose OIDC callback returns one good token + // and then bad tokens after the first call. + TestCallback wrappedCallback = createCallback(); + OidcCallback callback = (context) -> { + OidcCallbackResult result1 = wrappedCallback.callback(context); + return new OidcCallbackResult(wrappedCallback.getInvocations() > 1 ? "bad" : result1.getAccessToken()); + }; + MongoClientSettings clientSettings = createSettings(callback); + try (MongoClient mongoClient = createMongoClient(clientSettings)) { + performFind(mongoClient); + failCommand(391, 1, "find"); + assertCause(MongoCommandException.class, + "Command execution failed on MongoDB server with error 18", + () -> performFind(mongoClient)); + } + assertEquals(2, wrappedCallback.invocations.get()); + } + + @Test + public void test4p3WriteCommandsFailIfReauthenticationFails() { + // Create a `MongoClient` whose OIDC callback returns one good token + // and then bad tokens after the first call. + TestCallback wrappedCallback = createCallback(); + OidcCallback callback = (context) -> { + OidcCallbackResult result1 = wrappedCallback.callback(context); + return new OidcCallbackResult( + wrappedCallback.getInvocations() > 1 ? "bad" : result1.getAccessToken()); + }; + MongoClientSettings clientSettings = createSettings(callback); + try (MongoClient mongoClient = createMongoClient(clientSettings)) { + performInsert(mongoClient); + failCommand(391, 1, "insert"); + assertCause(MongoCommandException.class, + "Command execution failed on MongoDB server with error 18", + () -> performInsert(mongoClient)); + } + assertEquals(2, wrappedCallback.invocations.get()); + } + + private static void performInsert(final MongoClient mongoClient) { + mongoClient + .getDatabase("test") + .getCollection("test") + .insertOne(Document.parse("{ x: 1 }")); + } + + @Test + public void test4p5ReauthenticationInSession() { + testReauthentication(true); + } + + @Test + public void test5p1AzureSucceedsWithNoUsername() { + assumeAzure(); + String oidcUri = getOidcUri(); + MongoClientSettings clientSettings = createSettings(oidcUri, createCallback(), null); + // Create an OIDC configured client with `ENVIRONMENT:azure` and a valid + // `TOKEN_RESOURCE` and no username. + MongoCredential credential = Assertions.assertNotNull(clientSettings.getCredential()); + assertNotNull(credential.getMechanismProperty(TOKEN_RESOURCE_KEY, null)); + assertNull(credential.getUserName()); + try (MongoClient mongoClient = createMongoClient(clientSettings)) { + // Perform a `find` operation that succeeds. + performFind(mongoClient); + } + } + + @Test + public void test5p2AzureFailsWithBadUsername() { + assumeAzure(); + String oidcUri = getOidcUri(); + ConnectionString cs = new ConnectionString(oidcUri); + MongoCredential oldCredential = Assertions.assertNotNull(cs.getCredential()); + String tokenResource = oldCredential.getMechanismProperty(TOKEN_RESOURCE_KEY, null); + assertNotNull(tokenResource); + MongoCredential cred = MongoCredential.createOidcCredential("bad") + .withMechanismProperty(ENVIRONMENT_KEY, "azure") + .withMechanismProperty(TOKEN_RESOURCE_KEY, tokenResource); + MongoClientSettings.Builder builder = MongoClientSettings.builder() + .applicationName(appName) + .retryReads(false) + .applyConnectionString(cs) + .credential(cred); + MongoClientSettings clientSettings = builder.build(); + // the failure is external to the driver + assertFindFails(clientSettings, IOException.class, "400 Bad Request"); + } + + // Tests for human authentication ("testh", to preserve ordering) + + @Test + public void testh1p1SinglePrincipalImplicitUsername() { + assumeTestEnvironment(); + // #. Create default OIDC client with authMechanism=MONGODB-OIDC. + TestCallback callback = createHumanCallback(); + MongoClientSettings clientSettings = createHumanSettings(callback, null); + // #. Perform a find operation that succeeds + performFind(clientSettings); + assertEquals(1, callback.invocations.get()); + } + + @Test + public void testh1p2SinglePrincipalExplicitUsername() { + assumeTestEnvironment(); + // #. Create a client with MONGODB_URI_SINGLE, a username of test_user1, + // authMechanism=MONGODB-OIDC, and the OIDC human callback. + TestCallback callback = createHumanCallback(); + MongoClientSettings clientSettings = createSettingsHuman(getUserWithDomain("test_user1"), callback, getOidcUri()); + // #. Perform a find operation that succeeds + performFind(clientSettings); + } + + @Test + public void testh1p3MultiplePrincipalUser1() { + assumeTestEnvironment(); + // #. Create a client with MONGODB_URI_MULTI, a username of test_user1, + // authMechanism=MONGODB-OIDC, and the OIDC human callback. + MongoClientSettings clientSettings = createSettingsMulti(getUserWithDomain("test_user1"), createHumanCallback()); + // #. Perform a find operation that succeeds + performFind(clientSettings); + } + + @Test + public void testh1p4MultiplePrincipalUser2() { + assumeTestEnvironment(); + //- Create a human callback that reads in the generated ``test_user2`` token file. + //- Create a client with ``MONGODB_URI_MULTI``, a username of ``test_user2``, + // ``authMechanism=MONGODB-OIDC``, and the OIDC human callback. + MongoClientSettings clientSettings = createSettingsMulti(getUserWithDomain("test_user2"), createHumanCallback() + .setPathSupplier(() -> tokenQueue("test_user2").remove())); + performFind(clientSettings); + } + + @Test + public void testh1p5MultiplePrincipalNoUser() { + assumeTestEnvironment(); + // Create an OIDC configured client with `MONGODB_URI_MULTI` and no username. + MongoClientSettings clientSettings = createSettingsMulti(null, createHumanCallback()); + // Assert that a `find` operation fails. + assertFindFails(clientSettings, MongoCommandException.class, "Authentication failed"); + } + + @Test + public void testh1p6AllowedHostsBlocked() { + assumeTestEnvironment(); + //- Create a default OIDC client, with an ``ALLOWED_HOSTS`` that is an empty list. + //- Assert that a ``find`` operation fails with a client-side error. + MongoClientSettings clientSettings1 = createSettings(getOidcUri(), + createHumanCallback(), null, OIDC_HUMAN_CALLBACK_KEY, Collections.emptyList()); + assertFindFails(clientSettings1, MongoSecurityException.class, "not permitted by ALLOWED_HOSTS"); + + //- Create a client that uses the URL + // ``mongodb://localhost/?authMechanism=MONGODB-OIDC&ignored=example.com``, a + // human callback, and an ``ALLOWED_HOSTS`` that contains ``["example.com"]``. + //- Assert that a ``find`` operation fails with a client-side error. + MongoClientSettings clientSettings2 = createSettings(getOidcUri() + "&ignored=example.com", + createHumanCallback(), null, OIDC_HUMAN_CALLBACK_KEY, Arrays.asList("example.com")); + assertFindFails(clientSettings2, MongoSecurityException.class, "not permitted by ALLOWED_HOSTS"); + } + + // Not a prose test + @Test + public void testAllowedHostsDisallowedInConnectionString() { + String string = "mongodb://localhost/?authMechanism=MONGODB-OIDC&authMechanismProperties=ALLOWED_HOSTS:localhost"; + assertCause(IllegalArgumentException.class, + "connection string contains disallowed mechanism properties", + () -> new ConnectionString(string)); + } + + @Test + public void testh1p7AllowedHostsInConnectionStringIgnored() { + // example.com changed to localhost, because resolveAdditionalQueryParametersFromTxtRecords + // fails with "Failed looking up TXT record for host example.com" + String string = "mongodb+srv://localhost/?authMechanism=MONGODB-OIDC&authMechanismProperties=ALLOWED_HOSTS:%5B%22localhost%22%5D"; + assertCause(IllegalArgumentException.class, + "connection string contains disallowed mechanism properties", + () -> new ConnectionString(string)); + } + + @Test + public void testh1p8MachineIdpWithHumanCallback() { + assumeTrue(getenv("OIDC_IS_LOCAL") != null); + + TestCallback callback = createHumanCallback() + .setPathSupplier(() -> oidcTokenDirectory() + "test_machine"); + MongoClientSettings clientSettings = createSettingsHuman( + "test_machine", callback, getOidcUri()); + performFind(clientSettings); + } + + @Test + public void testh2p1ValidCallbackInputs() { + assumeTestEnvironment(); + TestCallback callback1 = createHumanCallback(); + OidcCallback callback2 = (context) -> { + MongoCredential.IdpInfo idpInfo = assertNotNull(context.getIdpInfo()); + assertTrue(assertNotNull(idpInfo.getClientId()).startsWith("0oad")); + assertTrue(idpInfo.getIssuer().endsWith("mock-identity-config-oidc")); + assertEquals(Arrays.asList("fizz", "buzz"), idpInfo.getRequestScopes()); + assertEquals(Duration.ofMinutes(5), context.getTimeout()); + return callback1.onRequest(context); + }; + MongoClientSettings clientSettings = createHumanSettings(callback2, null); + try (MongoClient mongoClient = createMongoClient(clientSettings)) { + performFind(mongoClient); + // Ensure that callback was called + assertEquals(1, callback1.getInvocations()); + } + } + + @Test + public void testh2p2HumanCallbackReturnsMissingData() { + assumeTestEnvironment(); + //noinspection ConstantConditions + OidcCallback callbackNull = (context) -> null; + assertFindFails(createHumanSettings(callbackNull, null), + MongoConfigurationException.class, + "Result of callback must not be null"); + + //noinspection ConstantConditions + OidcCallback callback = + (context) -> new OidcCallbackResult(null); + assertFindFails(createHumanSettings(callback, null), + IllegalArgumentException.class, + "accessToken can not be null"); + } + + // Not a prose test + @Test + public void testRefreshTokenAbsent() { + // additionally, check validation for refresh in machine workflow: + OidcCallback callbackMachineRefresh = + (context) -> new OidcCallbackResult("access", Duration.ZERO, "exists"); + assertFindFails(createSettings(callbackMachineRefresh), + MongoConfigurationException.class, + "Refresh token must only be provided in human workflow"); + } + + @Test + public void testh2p3RefreshTokenPassed() { + assumeTestEnvironment(); + AtomicInteger refreshTokensProvided = new AtomicInteger(); + TestCallback callback1 = createHumanCallback(); + OidcCallback callback2 = (context) -> { + if (context.getRefreshToken() != null) { + refreshTokensProvided.incrementAndGet(); + } + return callback1.onRequest(context); + }; + MongoClientSettings clientSettings = createHumanSettings(callback2, null); + try (MongoClient mongoClient = createMongoClient(clientSettings)) { + performFind(mongoClient); + failCommand(391, 1, "find"); + performFind(mongoClient); + assertEquals(2, callback1.getInvocations()); + assertEquals(1, refreshTokensProvided.get()); + } + } + + @Test + public void testh3p1UsesSpecAuthIfCachedToken() { + assumeTestEnvironment(); + MongoClientSettings clientSettings = createHumanSettings(createHumanCallback(), null); + try (MongoClient mongoClient = createMongoClient(clientSettings)) { + failCommandAndCloseConnection("find", 1); + assertCause(MongoSocketException.class, + "Prematurely reached end of stream", + () -> performFind(mongoClient)); + failCommand(18, 1, "saslStart"); + performFind(mongoClient); + } + } + + @Test + public void testh3p2NoSpecAuthIfNoCachedToken() { + assumeTestEnvironment(); + failCommand(18, 1, "saslStart"); + TestListener listener = new TestListener(); + TestCommandListener commandListener = new TestCommandListener(listener); + assertFindFails(createHumanSettings(createHumanCallback(), commandListener), + MongoCommandException.class, + "Command execution failed on MongoDB server with error 18"); + assertEquals(Arrays.asList( + "isMaster started", + "isMaster succeeded", + "saslStart started", + "saslStart failed" + ), listener.getEventStrings()); + listener.clear(); + } + + @Test + public void testh4p1ReauthenticationSucceeds() { + assumeTestEnvironment(); + TestListener listener = new TestListener(); + TestCommandListener commandListener = new TestCommandListener(listener); + TestCallback callback = createHumanCallback() + .setEventListener(listener); + MongoClientSettings clientSettings = createHumanSettings(callback, commandListener); + try (MongoClient mongoClient = createMongoClient(clientSettings)) { + performFind(mongoClient); + listener.clear(); + assertEquals(1, callback.getInvocations()); + failCommand(391, 1, "find"); + // Perform another find operation that succeeds. + performFind(mongoClient); + assertEquals(Arrays.asList( + // first find fails: + "find started", + "find failed", + "onRequest invoked (Refresh Token: present - IdpInfo: present)", + "read access token: test_user1", + "saslStart started", + "saslStart succeeded", + // second find succeeds: + "find started", + "find succeeded" + ), listener.getEventStrings()); + assertEquals(2, callback.getInvocations()); + } + } + + @Test + public void testh4p2SucceedsNoRefresh() { + assumeTestEnvironment(); + TestCallback callback = createHumanCallback(); + MongoClientSettings clientSettings = createHumanSettings(callback, null); + try (MongoClient mongoClient = createMongoClient(clientSettings)) { + performFind(mongoClient); + assertEquals(1, callback.getInvocations()); + + failCommand(391, 1, "find"); + performFind(mongoClient); + assertEquals(2, callback.getInvocations()); + } + } + + + @Test + public void testh4p3SucceedsAfterRefreshFails() { + assumeTestEnvironment(); + TestCallback callback1 = createHumanCallback(); + OidcCallback callback2 = (context) -> { + OidcCallbackResult oidcCallbackResult = callback1.onRequest(context); + return new OidcCallbackResult(oidcCallbackResult.getAccessToken(), Duration.ofMinutes(5), "BAD_REFRESH"); + }; + MongoClientSettings clientSettings = createHumanSettings(callback2, null); + try (MongoClient mongoClient = createMongoClient(clientSettings)) { + performFind(mongoClient); + failCommand(391, 1, "find"); + performFind(mongoClient); + assertEquals(2, callback1.getInvocations()); + } + } + + @Test + public void testh4p4Fails() { + assumeTestEnvironment(); + ConcurrentLinkedQueue tokens = tokenQueue( + "test_user1", + "test_user1_expires", + "test_user1_expires"); + TestCallback callback1 = createHumanCallback() + .setPathSupplier(() -> tokens.remove()); + OidcCallback callback2 = (context) -> { + OidcCallbackResult oidcCallbackResult = callback1.onRequest(context); + return new OidcCallbackResult(oidcCallbackResult.getAccessToken(), Duration.ofMinutes(5), "BAD_REFRESH"); + }; + MongoClientSettings clientSettings = createHumanSettings(callback2, null); + try (MongoClient mongoClient = createMongoClient(clientSettings)) { + performFind(mongoClient); + assertEquals(1, callback1.getInvocations()); + failCommand(391, 1, "find"); + assertCause(MongoCommandException.class, + "Command execution failed on MongoDB server with error 18", + () -> performFind(mongoClient)); + assertEquals(3, callback1.getInvocations()); + } + } + + // Not a prose test + @Test + public void testErrorClearsCache() { + assumeTestEnvironment(); + // #. Create a new client with a valid request callback that + // gives credentials that expire within 5 minutes and + // a refresh callback that gives invalid credentials. + TestListener listener = new TestListener(); + ConcurrentLinkedQueue tokens = tokenQueue( + "test_user1", + "test_user1_expires", + "test_user1_expires", + "test_user1_1"); + TestCallback callback = createHumanCallback() + .setRefreshToken("refresh") + .setPathSupplier(() -> tokens.remove()) + .setEventListener(listener); + + TestCommandListener commandListener = new TestCommandListener(listener); + + MongoClientSettings clientSettings = createHumanSettings(callback, commandListener); + try (MongoClient mongoClient = createMongoClient(clientSettings)) { + // #. Ensure that a find operation adds a new entry to the cache. + performFind(mongoClient); + assertEquals(Arrays.asList( + "isMaster started", + "isMaster succeeded", + // no speculative auth. Send principal request: + "saslStart started", + "saslStart succeeded", + "onRequest invoked (Refresh Token: none - IdpInfo: present)", + "read access token: test_user1", + // the refresh token from the callback is cached here + // send jwt: + "saslContinue started", + "saslContinue succeeded", + "find started", + "find succeeded" + ), listener.getEventStrings()); + listener.clear(); + + // #. Ensure that a subsequent find operation results in a 391 error. + failCommand(391, 1, "find"); + // ensure that the operation entirely fails, after attempting both potential fallback callbacks + assertThrows(MongoSecurityException.class, () -> performFind(mongoClient)); + assertEquals(Arrays.asList( + "find started", + "find failed", // reauth 391; current access token is invalid + // fall back to refresh token, from prior find + "onRequest invoked (Refresh Token: present - IdpInfo: present)", + "read access token: test_user1_expires", + "saslStart started", + "saslStart failed", // it is expired, fails immediately + // fall back to principal request, and non-refresh callback: + "saslStart started", + "saslStart succeeded", + "onRequest invoked (Refresh Token: none - IdpInfo: present)", + "read access token: test_user1_expires", + "saslContinue started", + "saslContinue failed" // also fails due to 391 + ), listener.getEventStrings()); + listener.clear(); + + // #. Ensure that the cache value cleared. + failCommand(391, 1, "find"); + performFind(mongoClient); + assertEquals(Arrays.asList( + "find started", + "find failed", + // falling back to principal request, onRequest callback. + // this implies that the cache has been cleared during the + // preceding find operation. + "saslStart started", + "saslStart succeeded", + "onRequest invoked (Refresh Token: none - IdpInfo: present)", + "read access token: test_user1_1", + "saslContinue started", + "saslContinue succeeded", + // auth has finished + "find started", + "find succeeded" + ), listener.getEventStrings()); + listener.clear(); + } + } + + + private MongoClientSettings createSettings(final OidcCallback callback) { + return createSettings(getOidcUri(), callback, null); + } + + public MongoClientSettings createSettings( + final String connectionString, + @Nullable final TestCallback callback) { + return createSettings(connectionString, callback, null); + } + + private MongoClientSettings createSettings( + final String connectionString, + @Nullable final OidcCallback callback, + @Nullable final CommandListener commandListener) { + String cleanedConnectionString = callback == null ? connectionString : connectionString + .replace("ENVIRONMENT:azure,", "") + .replace("ENVIRONMENT:gcp,", "") + .replace("&authMechanismProperties=ENVIRONMENT:k8s", "") + .replace("ENVIRONMENT:test,", ""); + return createSettings(cleanedConnectionString, callback, commandListener, OIDC_CALLBACK_KEY); + } + + private MongoClientSettings createHumanSettings( + final OidcCallback callback, @Nullable final TestCommandListener commandListener) { + return createHumanSettings(getOidcUri(), callback, commandListener); + } + + private MongoClientSettings createHumanSettings( + final String connectionString, + @Nullable final OidcCallback callback, + @Nullable final CommandListener commandListener) { + return createSettings(connectionString, callback, commandListener, OIDC_HUMAN_CALLBACK_KEY); + } + + private MongoClientSettings createSettings( + final String connectionString, + final @Nullable OidcCallback callback, + @Nullable final CommandListener commandListener, + final String oidcCallbackKey) { + ConnectionString cs = new ConnectionString(connectionString); + MongoCredential credential = assertNotNull(cs.getCredential()); + if (callback != null) { + credential = credential.withMechanismProperty(oidcCallbackKey, callback); + } + MongoClientSettings.Builder builder = MongoClientSettings.builder() + .applicationName(appName) + .applyConnectionString(cs) + .retryReads(false) + .credential(credential); + if (commandListener != null) { + builder.addCommandListener(commandListener); + } + return builder.build(); + } + + private MongoClientSettings createSettings( + final String connectionString, + @Nullable final OidcCallback callback, + @Nullable final CommandListener commandListener, + final String oidcCallbackKey, + @Nullable final List allowedHosts) { + ConnectionString cs = new ConnectionString(connectionString); + MongoCredential credential = cs.getCredential() + .withMechanismProperty(oidcCallbackKey, callback) + .withMechanismProperty(ALLOWED_HOSTS_KEY, allowedHosts); + MongoClientSettings.Builder builder = MongoClientSettings.builder() + .applicationName(appName) + .applyConnectionString(cs) + .credential(credential); + if (commandListener != null) { + builder.addCommandListener(commandListener); + } + return builder.build(); + } + + private MongoClientSettings createSettingsMulti(@Nullable final String user, final OidcCallback callback) { + return createSettingsHuman(user, callback, getOidcUriMulti()); + } + + private MongoClientSettings createSettingsHuman(@Nullable final String user, final OidcCallback callback, final String oidcUri) { + ConnectionString cs = new ConnectionString(oidcUri); + MongoCredential credential = MongoCredential.createOidcCredential(user) + .withMechanismProperty(OIDC_HUMAN_CALLBACK_KEY, callback); + return MongoClientSettings.builder() + .applicationName(appName) + .applyConnectionString(cs) + .retryReads(false) + .credential(credential) + .build(); + } + + private void performFind(final MongoClientSettings settings) { + try (MongoClient mongoClient = createMongoClient(settings)) { + performFind(mongoClient); + } + } + + private void assertFindFails( + final MongoClientSettings settings, + final Class expectedExceptionOrCause, + final String expectedMessage) { + try (MongoClient mongoClient = createMongoClient(settings)) { + assertCause(expectedExceptionOrCause, expectedMessage, () -> performFind(mongoClient)); + } + } + + private static void performFind(final MongoClient mongoClient) { + performFind(mongoClient, null); + } + + private static void performFind(final MongoClient mongoClient, @Nullable final ClientSession session) { + MongoCollection collection = mongoClient.getDatabase("test").getCollection("test"); + FindIterable findIterable = session == null ? collection.find() : collection.find(session); + findIterable.first(); + } + + protected void delayNextFind() { + + try (MongoClient client = createMongoClient(Fixture.getMongoClientSettings())) { + BsonDocument failPointDocument = new BsonDocument("configureFailPoint", new BsonString("failCommand")) + .append("mode", new BsonDocument("times", new BsonInt32(1))) + .append("data", new BsonDocument() + .append("appName", new BsonString(appName)) + .append("failCommands", new BsonArray(asList(new BsonString("find")))) + .append("blockConnection", new BsonBoolean(true)) + .append("blockTimeMS", new BsonInt32(100))); + client.getDatabase("admin").runCommand(failPointDocument); + } + } + + protected void failCommand(final int code, final int times, final String... commands) { + try (MongoClient mongoClient = createMongoClient(Fixture.getMongoClientSettings())) { + List list = Arrays.stream(commands).map(c -> new BsonString(c)).collect(Collectors.toList()); + BsonDocument failPointDocument = new BsonDocument("configureFailPoint", new BsonString("failCommand")) + .append("mode", new BsonDocument("times", new BsonInt32(times))) + .append("data", new BsonDocument() + .append("appName", new BsonString(appName)) + .append("failCommands", new BsonArray(list)) + .append("errorCode", new BsonInt32(code))); + mongoClient.getDatabase("admin").runCommand(failPointDocument); + } + } + + private void failCommandAndCloseConnection(final String command, final int times) { + try (MongoClient mongoClient = createMongoClient(Fixture.getMongoClientSettings())) { + BsonDocument failPointDocument = new BsonDocument("configureFailPoint", new BsonString("failCommand")) + .append("mode", new BsonDocument("times", new BsonInt32(times))) + .append("data", new BsonDocument() + .append("appName", new BsonString(appName)) + .append("closeConnection", new BsonBoolean(true)) + .append("failCommands", new BsonArray(Arrays.asList(new BsonString(command)))) + ); + mongoClient.getDatabase("admin").runCommand(failPointDocument); + } + } + + public static class TestCallback implements OidcCallback { + private final AtomicInteger invocations = new AtomicInteger(); + @Nullable + private final Integer delayInMilliseconds; + @Nullable + private final String refreshToken; + @Nullable + private final AtomicInteger concurrentTracker; + @Nullable + private final TestListener testListener; + @Nullable + private final Supplier pathSupplier; + + public TestCallback() { + this(null, null, new AtomicInteger(), null, null); + } + + public TestCallback( + @Nullable final String refreshToken, + @Nullable final Integer delayInMilliseconds, + @Nullable final AtomicInteger concurrentTracker, + @Nullable final TestListener testListener, + @Nullable final Supplier pathSupplier) { + this.refreshToken = refreshToken; + this.delayInMilliseconds = delayInMilliseconds; + this.concurrentTracker = concurrentTracker; + this.testListener = testListener; + this.pathSupplier = pathSupplier; + } + + public int getInvocations() { + return invocations.get(); + } + + @Override + public OidcCallbackResult onRequest(final OidcCallbackContext context) { + if (testListener != null) { + testListener.add("onRequest invoked (" + + "Refresh Token: " + (context.getRefreshToken() == null ? "none" : "present") + + " - IdpInfo: " + (context.getIdpInfo() == null ? "none" : "present") + + ")"); + } + return callback(context); + } + + private OidcCallbackResult callback(final OidcCallbackContext context) { + if (concurrentTracker != null) { + if (concurrentTracker.get() > 0) { + throw new RuntimeException("Callbacks should not be invoked by multiple threads."); + } + concurrentTracker.incrementAndGet(); + } + try { + invocations.incrementAndGet(); + try { + simulateDelay(); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } + MongoCredential credential = assertNotNull(new ConnectionString(getOidcUri()).getCredential()); + String oidcEnv = getOidcEnv(); + OidcCallback c; + if (oidcEnv.contains("azure")) { + c = OidcAuthenticator.getAzureCallback(credential); + } else if (oidcEnv.contains("gcp")) { + c = OidcAuthenticator.getGcpCallback(credential); + } else if (oidcEnv.contains("k8s")) { + c = OidcAuthenticator.getK8sCallback(); + } else { + c = getProseTestCallback(); + } + return c.onRequest(context); + + } finally { + if (concurrentTracker != null) { + concurrentTracker.decrementAndGet(); + } + } + } + + private OidcCallback getProseTestCallback() { + return (x) -> { + try { + Path path = Paths.get(pathSupplier == null + ? getTestTokenFilePath() + : pathSupplier.get()); + String accessToken = new String(Files.readAllBytes(path), StandardCharsets.UTF_8); + if (testListener != null) { + testListener.add("read access token: " + path.getFileName()); + } + return new OidcCallbackResult(accessToken, Duration.ZERO, refreshToken); + } catch (IOException e) { + throw new RuntimeException(e); + } + }; + } + + private void simulateDelay() throws InterruptedException { + if (delayInMilliseconds != null) { + Thread.sleep(delayInMilliseconds); + } + } + + public TestCallback setDelayMs(final int milliseconds) { + return new TestCallback( + this.refreshToken, + milliseconds, + this.concurrentTracker, + this.testListener, + this.pathSupplier); + } + + public TestCallback setConcurrentTracker(final AtomicInteger c) { + return new TestCallback( + this.refreshToken, + this.delayInMilliseconds, + c, + this.testListener, + this.pathSupplier); + } + + public TestCallback setEventListener(final TestListener testListener) { + return new TestCallback( + this.refreshToken, + this.delayInMilliseconds, + this.concurrentTracker, + testListener, + this.pathSupplier); + } + + public TestCallback setPathSupplier(final Supplier pathSupplier) { + return new TestCallback( + this.refreshToken, + this.delayInMilliseconds, + this.concurrentTracker, + this.testListener, + pathSupplier); + } + + public TestCallback setRefreshToken(final String token) { + return new TestCallback( + token, + this.delayInMilliseconds, + this.concurrentTracker, + this.testListener, + this.pathSupplier); + } + } + + private ConcurrentLinkedQueue tokenQueue(final String... queue) { + String tokenPath = oidcTokenDirectory(); + return java.util.stream.Stream + .of(queue) + .map(v -> tokenPath + v) + .collect(Collectors.toCollection(ConcurrentLinkedQueue::new)); + } + + public TestCallback createCallback() { + return new TestCallback(); + } + + public TestCallback createHumanCallback() { + return new TestCallback() + .setPathSupplier(() -> oidcTokenDirectory() + "test_user1") + .setRefreshToken("refreshToken"); + } + + private long msElapsedSince(final long timeOfStart) { + return TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - timeOfStart); + } + + private static long minTimeout(final long timeoutMs, final long serverSelectionTimeoutMS) { + long timeoutMsEffective = timeoutMs != 0 ? timeoutMs : Long.MAX_VALUE; + long serverSelectionTimeoutMSEffective = serverSelectionTimeoutMS != -1 ? serverSelectionTimeoutMS : Long.MAX_VALUE; + return Math.min(timeoutMsEffective, serverSelectionTimeoutMSEffective); + } +} diff --git a/driver-sync/src/test/resources/logback-test.xml b/driver-sync/src/test/resources/logback-test.xml new file mode 100644 index 00000000000..022806f0e4e --- /dev/null +++ b/driver-sync/src/test/resources/logback-test.xml @@ -0,0 +1,13 @@ + + + + + %d{HH:mm:ss.SSS} [%thread] %-5level %logger{36} - %msg%n + + + + + + + + diff --git a/driver-sync/src/test/unit/com/mongodb/client/MongoClientSpecification.groovy b/driver-sync/src/test/unit/com/mongodb/client/MongoClientSpecification.groovy new file mode 100644 index 00000000000..916d8179af5 --- /dev/null +++ b/driver-sync/src/test/unit/com/mongodb/client/MongoClientSpecification.groovy @@ -0,0 +1,227 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client + +import com.mongodb.MongoClientSettings +import com.mongodb.MongoDriverInformation +import com.mongodb.MongoNamespace +import com.mongodb.ReadConcern +import com.mongodb.ServerAddress +import com.mongodb.WriteConcern +import com.mongodb.client.internal.ChangeStreamIterableImpl +import com.mongodb.client.internal.ListDatabasesIterableImpl +import com.mongodb.client.internal.MongoClientImpl +import com.mongodb.client.internal.MongoDatabaseImpl +import com.mongodb.client.internal.TestOperationExecutor +import com.mongodb.connection.ClusterConnectionMode +import com.mongodb.connection.ClusterDescription +import com.mongodb.connection.ClusterType +import com.mongodb.connection.ServerConnectionState +import com.mongodb.connection.ServerDescription +import com.mongodb.connection.ServerType +import com.mongodb.internal.TimeoutSettings +import com.mongodb.internal.client.model.changestream.ChangeStreamLevel +import com.mongodb.internal.connection.ClientMetadata +import com.mongodb.internal.connection.Cluster +import org.bson.BsonDocument +import org.bson.Document +import org.bson.codecs.UuidCodec +import org.bson.codecs.ValueCodecProvider +import org.bson.codecs.configuration.CodecRegistry +import spock.lang.Specification + +import static com.mongodb.CustomMatchers.isTheSameAs +import static com.mongodb.MongoClientSettings.getDefaultCodecRegistry +import static com.mongodb.ReadPreference.primary +import static com.mongodb.ReadPreference.secondary +import static com.mongodb.client.internal.TestHelper.execute +import static java.util.concurrent.TimeUnit.SECONDS +import static org.bson.UuidRepresentation.C_SHARP_LEGACY +import static org.bson.UuidRepresentation.UNSPECIFIED +import static org.bson.codecs.configuration.CodecRegistries.fromProviders +import static org.bson.codecs.configuration.CodecRegistries.withUuidRepresentation +import static spock.util.matcher.HamcrestSupport.expect + +class MongoClientSpecification extends Specification { + + private static final CodecRegistry CODEC_REGISTRY = fromProviders(new ValueCodecProvider()) + private static final TimeoutSettings TIMEOUT_SETTINGS = new TimeoutSettings(30_000, 10_000, 0, null, SECONDS.toMillis(120)) + + def 'should pass the correct settings to getDatabase'() { + given: + def settings = MongoClientSettings.builder() + .readPreference(secondary()) + .writeConcern(WriteConcern.MAJORITY) + .readConcern(ReadConcern.MAJORITY) + .retryWrites(true) + .codecRegistry(CODEC_REGISTRY) + .build() + def client = new MongoClientImpl(Stub(Cluster), null, settings, null, new TestOperationExecutor([])) + + when: + def database = client.getDatabase('name') + + then: + expect database, isTheSameAs(expectedDatabase) + + where: + expectedDatabase << new MongoDatabaseImpl('name', withUuidRepresentation(CODEC_REGISTRY, UNSPECIFIED), secondary(), + WriteConcern.MAJORITY, true, true, ReadConcern.MAJORITY, UNSPECIFIED, null, + TIMEOUT_SETTINGS, new TestOperationExecutor([])) + } + + def 'should use ListDatabasesIterableImpl correctly'() { + given: + def executor = new TestOperationExecutor([null, null]) + def client = new MongoClientImpl(Stub(Cluster), null, MongoClientSettings.builder().build(), null, executor) + def listDatabasesMethod = client.&listDatabases + def listDatabasesNamesMethod = client.&listDatabaseNames + + when: + def listDatabasesIterable = execute(listDatabasesMethod, session) + + then: + expect listDatabasesIterable, isTheSameAs(new ListDatabasesIterableImpl<>(session, Document, + withUuidRepresentation(getDefaultCodecRegistry(), UNSPECIFIED), primary(), executor, true, TIMEOUT_SETTINGS)) + + when: + listDatabasesIterable = execute(listDatabasesMethod, session, BsonDocument) + + then: + expect listDatabasesIterable, isTheSameAs(new ListDatabasesIterableImpl<>(session, BsonDocument, + withUuidRepresentation(getDefaultCodecRegistry(), UNSPECIFIED), primary(), executor, true, TIMEOUT_SETTINGS)) + + when: + def listDatabaseNamesIterable = execute(listDatabasesNamesMethod, session) as MongoIterable + + then: + // listDatabaseNamesIterable is an instance of a MappingIterable, so have to get the mapped iterable inside it + expect listDatabaseNamesIterable.getMapped(), isTheSameAs(new ListDatabasesIterableImpl<>(session, BsonDocument, + withUuidRepresentation(getDefaultCodecRegistry(), UNSPECIFIED), primary(), executor, true, TIMEOUT_SETTINGS) + .nameOnly(true)) + + cleanup: + client?.close() + + where: + session << [null, Stub(ClientSession)] + } + + def 'should create ChangeStreamIterable correctly'() { + given: + def executor = new TestOperationExecutor([]) + def namespace = new MongoNamespace('admin', '_ignored') + def settings = MongoClientSettings.builder() + .readPreference(secondary()) + .readConcern(ReadConcern.MAJORITY) + .codecRegistry(getDefaultCodecRegistry()) + .build() + def readPreference = settings.getReadPreference() + def readConcern = settings.getReadConcern() + def client = new MongoClientImpl(Stub(Cluster), null, settings, null, executor) + def watchMethod = client.&watch + + when: + def changeStreamIterable = execute(watchMethod, session) + + then: + expect changeStreamIterable, isTheSameAs(new ChangeStreamIterableImpl<>(session, namespace, + withUuidRepresentation(getDefaultCodecRegistry(), UNSPECIFIED), + readPreference, readConcern, executor, [], Document, ChangeStreamLevel.CLIENT, true, TIMEOUT_SETTINGS), + ['codec']) + + when: + changeStreamIterable = execute(watchMethod, session, [new Document('$match', 1)]) + + then: + expect changeStreamIterable, isTheSameAs(new ChangeStreamIterableImpl<>(session, namespace, + withUuidRepresentation(getDefaultCodecRegistry(), UNSPECIFIED), + readPreference, readConcern, executor, [new Document('$match', 1)], Document, ChangeStreamLevel.CLIENT, + true, TIMEOUT_SETTINGS), ['codec']) + + when: + changeStreamIterable = execute(watchMethod, session, [new Document('$match', 1)], BsonDocument) + + then: + expect changeStreamIterable, isTheSameAs(new ChangeStreamIterableImpl<>(session, namespace, + withUuidRepresentation(getDefaultCodecRegistry(), UNSPECIFIED), + readPreference, readConcern, executor, [new Document('$match', 1)], BsonDocument, + ChangeStreamLevel.CLIENT, true, TIMEOUT_SETTINGS), ['codec']) + + where: + session << [null, Stub(ClientSession)] + } + + def 'should validate the ChangeStreamIterable pipeline data correctly'() { + given: + def executor = new TestOperationExecutor([]) + def client = new MongoClientImpl(Stub(Cluster), null, MongoClientSettings.builder().build(), null, + executor) + + when: + client.watch((Class) null) + + then: + thrown(IllegalArgumentException) + + when: + client.watch([null]).into([]) + + then: + thrown(IllegalArgumentException) + } + + def 'should get the cluster description'() { + given: + def clusterDescription = new ClusterDescription(ClusterConnectionMode.SINGLE, ClusterType.STANDALONE, + [ServerDescription.builder() + .address(new ServerAddress()) + .type(ServerType.UNKNOWN) + .state(ServerConnectionState.CONNECTING) + .build()]) + def driverInformation = MongoDriverInformation.builder().build() + def cluster = Mock(Cluster) { + 1 * getCurrentDescription() >> { + clusterDescription + } + 1 * getClientMetadata() >> new ClientMetadata("test", driverInformation) + } + def settings = MongoClientSettings.builder().build() + def client = new MongoClientImpl(cluster, driverInformation, settings, null, new TestOperationExecutor([])) + + expect: + client.getClusterDescription() == clusterDescription + } + + def 'should create registry reflecting UuidRepresentation'() { + given: + def codecRegistry = fromProviders([new ValueCodecProvider()]) + def settings = MongoClientSettings.builder() + .codecRegistry(codecRegistry) + .uuidRepresentation(C_SHARP_LEGACY) + .build() + + when: + def client = new MongoClientImpl(Stub(Cluster), null, settings, null, new TestOperationExecutor([])) + + then: + (client.getCodecRegistry().get(UUID) as UuidCodec).getUuidRepresentation() == C_SHARP_LEGACY + + cleanup: + client?.close() + } +} diff --git a/driver-sync/src/test/unit/com/mongodb/client/gridfs/GridFSBucketSpecification.groovy b/driver-sync/src/test/unit/com/mongodb/client/gridfs/GridFSBucketSpecification.groovy new file mode 100644 index 00000000000..cb34236c627 --- /dev/null +++ b/driver-sync/src/test/unit/com/mongodb/client/gridfs/GridFSBucketSpecification.groovy @@ -0,0 +1,920 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.gridfs + +import com.mongodb.MongoClientSettings +import com.mongodb.MongoGridFSException +import com.mongodb.MongoNamespace +import com.mongodb.ReadConcern +import com.mongodb.WriteConcern +import com.mongodb.client.ClientSession +import com.mongodb.client.FindIterable +import com.mongodb.client.ListIndexesIterable +import com.mongodb.client.MongoCollection +import com.mongodb.client.MongoCursor +import com.mongodb.client.gridfs.model.GridFSDownloadOptions +import com.mongodb.client.gridfs.model.GridFSFile +import com.mongodb.client.internal.MongoDatabaseImpl +import com.mongodb.client.internal.OperationExecutor +import com.mongodb.client.internal.TestOperationExecutor +import com.mongodb.client.result.DeleteResult +import com.mongodb.client.result.UpdateResult +import com.mongodb.internal.TimeoutSettings +import com.mongodb.internal.operation.BatchCursor +import com.mongodb.internal.operation.FindOperation +import org.bson.BsonBinary +import org.bson.BsonDocument +import org.bson.BsonInt32 +import org.bson.BsonObjectId +import org.bson.BsonString +import org.bson.Document +import org.bson.codecs.DocumentCodecProvider +import org.bson.types.ObjectId +import spock.lang.Specification +import spock.lang.Unroll + +import java.util.concurrent.TimeUnit + +import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS +import static com.mongodb.CustomMatchers.isTheSameAs +import static com.mongodb.ReadPreference.primary +import static com.mongodb.ReadPreference.secondary +import static org.bson.UuidRepresentation.JAVA_LEGACY +import static org.bson.codecs.configuration.CodecRegistries.fromProviders +import static spock.util.matcher.HamcrestSupport.expect + +@SuppressWarnings('ClosureAsLastMethodParameter') +class GridFSBucketSpecification extends Specification { + + def readConcern = ReadConcern.DEFAULT + def registry = MongoClientSettings.getDefaultCodecRegistry() + def database = databaseWithExecutor(Stub(OperationExecutor)) + def databaseWithExecutor(OperationExecutor executor) { + new MongoDatabaseImpl('test', registry, primary(), WriteConcern.ACKNOWLEDGED, false, false, readConcern, + JAVA_LEGACY, null, TIMEOUT_SETTINGS, executor) + } + + def 'should return the correct bucket name'() { + when: + def bucketName = new GridFSBucketImpl(database).getBucketName() + + then: + bucketName == 'fs' + + when: + bucketName = new GridFSBucketImpl(database, 'custom').getBucketName() + + then: + bucketName == 'custom' + } + + def 'should behave correctly when using withChunkSizeBytes'() { + given: + def newChunkSize = 200 + + when: + def gridFSBucket = new GridFSBucketImpl(database).withChunkSizeBytes(newChunkSize) + + then: + gridFSBucket.getChunkSizeBytes() == newChunkSize + } + + def 'should behave correctly when using withReadPreference'() { + given: + def filesCollection = Mock(MongoCollection) + def chunksCollection = Mock(MongoCollection) + def newReadPreference = secondary() + + when: + def gridFSBucket = new GridFSBucketImpl('fs', 255, filesCollection, chunksCollection) + .withReadPreference(newReadPreference) + + then: + 1 * filesCollection.withReadPreference(newReadPreference) >> filesCollection + 1 * chunksCollection.withReadPreference(newReadPreference) >> chunksCollection + + when: + gridFSBucket.getReadConcern() + + then: + 1 * filesCollection.getReadConcern() + } + + def 'should behave correctly when using withWriteConcern'() { + given: + def filesCollection = Mock(MongoCollection) + def chunksCollection = Mock(MongoCollection) + def newWriteConcern = WriteConcern.MAJORITY + + when: + def gridFSBucket = new GridFSBucketImpl('fs', 255, filesCollection, chunksCollection) + .withWriteConcern(newWriteConcern) + + then: + 1 * filesCollection.withWriteConcern(newWriteConcern) >> filesCollection + 1 * chunksCollection.withWriteConcern(newWriteConcern) >> chunksCollection + + when: + gridFSBucket.getWriteConcern() + + then: + 1 * filesCollection.getWriteConcern() + } + + def 'should behave correctly when using withReadConcern'() { + given: + def filesCollection = Mock(MongoCollection) + def chunksCollection = Mock(MongoCollection) + def newReadConcern = ReadConcern.MAJORITY + + when: + def gridFSBucket = new GridFSBucketImpl('fs', 255, filesCollection, chunksCollection) + .withReadConcern(newReadConcern) + + then: + 1 * filesCollection.withReadConcern(newReadConcern) >> filesCollection + 1 * chunksCollection.withReadConcern(newReadConcern) >> chunksCollection + + when: + gridFSBucket.getReadConcern() + + then: + 1 * filesCollection.getReadConcern() >> newReadConcern + } + + def 'should get defaults from MongoDatabase'() { + given: + def defaultChunkSizeBytes = 255 * 1024 + def database = new MongoDatabaseImpl('test', fromProviders(new DocumentCodecProvider()), secondary(), WriteConcern.ACKNOWLEDGED, + false, false, readConcern, JAVA_LEGACY, null, + new TimeoutSettings(0, 0, 0, null, 0), + new TestOperationExecutor([])) + + when: + def gridFSBucket = new GridFSBucketImpl(database) + + then: + gridFSBucket.getChunkSizeBytes() == defaultChunkSizeBytes + gridFSBucket.getReadPreference() == database.getReadPreference() + gridFSBucket.getWriteConcern() == database.getWriteConcern() + gridFSBucket.getReadConcern() == database.getReadConcern() + } + + def 'should create the expected GridFSUploadStream'() { + given: + def filesCollection = Stub(MongoCollection) + def chunksCollection = Stub(MongoCollection) + filesCollection.getTimeout(TimeUnit.MILLISECONDS) >> null + chunksCollection.getTimeout(TimeUnit.MILLISECONDS) >> null + + def gridFSBucket = new GridFSBucketImpl('fs', 255, filesCollection, chunksCollection) + + when: + def stream + if (clientSession != null) { + stream = gridFSBucket.openUploadStream(clientSession, 'filename') + } else { + stream = gridFSBucket.openUploadStream('filename') + } + + then: + expect stream, isTheSameAs(new GridFSUploadStreamImpl(clientSession, filesCollection, chunksCollection, stream.getId(), 'filename', + 255, null, null), ['closeLock']) + + where: + clientSession << [null, Stub(ClientSession)] + } + + def 'should upload from stream'() { + given: + def findIterable = Mock(FindIterable) + def filesCollection = Mock(MongoCollection) + def chunksCollection = Mock(MongoCollection) + def gridFSBucket = new GridFSBucketImpl('fs', 255, filesCollection, chunksCollection) + def contentBytes = 'content' as byte[] + def inputStream = new ByteArrayInputStream(contentBytes) + + when: + gridFSBucket.uploadFromStream('filename', inputStream) + + then: 'index check' + 1 * filesCollection.withDocumentClass(Document) >> filesCollection + 1 * filesCollection.withReadPreference(primary()) >> filesCollection + 1 * filesCollection.find() >> findIterable + 1 * findIterable.projection(new Document('_id', 1)) >> findIterable + 1 * findIterable.first() >> new Document() + + then: + 1 * chunksCollection.insertOne(_) + + then: + 1 * filesCollection.insertOne(_) + } + + def 'should clean up any chunks when upload from stream throws an IOException'() { + given: + def findIterable = Mock(FindIterable) + def filesCollection = Mock(MongoCollection) + def chunksCollection = Mock(MongoCollection) + def gridFSBucket = new GridFSBucketImpl('fs', 255, filesCollection, chunksCollection) + def inputStream = Mock(InputStream) { + 2 * read(_) >> 255 >> { throw new IOException('stream failure') } + } + + when: + gridFSBucket.uploadFromStream('filename', inputStream) + + then: 'index check' + 1 * filesCollection.withDocumentClass(Document) >> filesCollection + 1 * filesCollection.withReadPreference(primary()) >> filesCollection + 1 * filesCollection.find() >> findIterable + 1 * findIterable.projection(new Document('_id', 1)) >> findIterable + 1 * findIterable.first() >> new Document() + + then: + 1 * chunksCollection.insertOne(_) + + then: + 1 * chunksCollection.deleteMany(_) + + then: + 0 * filesCollection.insertOne(_) + + then: + def exception = thrown(MongoGridFSException) + exception.getMessage() == 'IOException when reading from the InputStream' + } + + def 'should not clean up any chunks when upload throws an exception'() { + given: + def findIterable = Mock(FindIterable) + def filesCollection = Mock(MongoCollection) + def chunksCollection = Mock(MongoCollection) + def alternativeException = new MongoGridFSException('Alternative failure') + def gridFSBucket = new GridFSBucketImpl('fs', 255, filesCollection, chunksCollection) + def inputStream = Mock(InputStream) { + 2 * read(_) >> 255 >> { throw alternativeException } + } + + when: + gridFSBucket.uploadFromStream('filename', inputStream) + + then: 'index check' + 1 * filesCollection.withDocumentClass(Document) >> filesCollection + 1 * filesCollection.withReadPreference(primary()) >> filesCollection + 1 * filesCollection.find() >> findIterable + 1 * findIterable.projection(new Document('_id', 1)) >> findIterable + 1 * findIterable.first() >> new Document() + + then: + 1 * chunksCollection.insertOne(_) + + then: + 0 * chunksCollection.deleteMany(_) + + then: + 0 * filesCollection.insertOne(_) + + then: + def exception = thrown(MongoGridFSException) + exception == alternativeException + } + + def 'should create the expected GridFSDownloadStream'() { + given: + def fileId = new BsonObjectId(new ObjectId()) + def fileInfo = new GridFSFile(fileId, 'File 1', 10, 255, new Date(), new Document()) + def findIterable = Mock(FindIterable) + def filesCollection = Mock(MongoCollection) + filesCollection.getTimeout(TimeUnit.MILLISECONDS) >> null + def chunksCollection = Stub(MongoCollection) + chunksCollection.getTimeout(TimeUnit.MILLISECONDS) >> null + def gridFSBucket = new GridFSBucketImpl('fs', 255, filesCollection, chunksCollection) + + when: + def stream + if (clientSession != null) { + stream = gridFSBucket.openDownloadStream(clientSession, fileId.getValue()) + } else { + stream = gridFSBucket.openDownloadStream(fileId.getValue()) + } + + then: + if (clientSession != null) { + 1 * filesCollection.find(clientSession) >> findIterable + } else { + 1 * filesCollection.find() >> findIterable + } + 1 * findIterable.filter(_) >> findIterable + 1 * findIterable.first() >> fileInfo + + then: + expect stream, isTheSameAs(new GridFSDownloadStreamImpl(clientSession, fileInfo, chunksCollection, + null), ['closeLock', 'cursorLock']) + + + where: + clientSession << [null, Stub(ClientSession)] + } + + def 'should download to stream'() { + given: + def fileId = new ObjectId() + def bsonFileId = new BsonObjectId(fileId) + def fileInfo = new GridFSFile(bsonFileId, 'filename', 10, 255, new Date(), new Document()) + def mongoCursor = Mock(MongoCursor) + def findIterable = Mock(FindIterable) + def filesCollection = Mock(MongoCollection) + def tenBytes = new byte[10] + def chunkDocument = new BsonDocument('files_id', fileInfo.getId()) + .append('n', new BsonInt32(0)) + .append('data', new BsonBinary(tenBytes)) + def chunksCollection = Mock(MongoCollection) + def gridFSBucket = new GridFSBucketImpl('fs', 255, filesCollection, chunksCollection) + def outputStream = new ByteArrayOutputStream(10) + + when: + if (clientSession != null) { + gridFSBucket.downloadToStream(clientSession, fileId, outputStream) + } else { + gridFSBucket.downloadToStream(fileId, outputStream) + } + outputStream.close() + + then: + if (clientSession != null) { + 1 * filesCollection.find(clientSession) >> findIterable + } else { + 1 * filesCollection.find() >> findIterable + } + 1 * findIterable.filter(new BsonDocument('_id', bsonFileId)) >> findIterable + 1 * findIterable.first() >> fileInfo + + then: + if (clientSession != null) { + 1 * chunksCollection.find(clientSession, _) >> findIterable + } else { + 1 * chunksCollection.find(_) >> findIterable + } + 1 * findIterable.sort(_) >> findIterable + 1 * findIterable.batchSize(_) >> findIterable + 1 * findIterable.iterator() >> mongoCursor + 1 * mongoCursor.hasNext() >> true + 1 * mongoCursor.next() >> chunkDocument + + then: + outputStream.toByteArray() == tenBytes + + where: + clientSession << [null, Stub(ClientSession)] + } + + def 'should download to stream using BsonValue'() { + given: + def bsonFileId = new BsonString('1') + def fileInfo = new GridFSFile(bsonFileId, 'filename', 10L, 255, new Date(), new Document()) + def mongoCursor = Mock(MongoCursor) + def findIterable = Mock(FindIterable) + def filesCollection = Mock(MongoCollection) + def tenBytes = new byte[10] + def chunkDocument = new BsonDocument('files_id', fileInfo.getId()) + .append('n', new BsonInt32(0)) + .append('data', new BsonBinary(tenBytes)) + def chunksCollection = Mock(MongoCollection) + def gridFSBucket = new GridFSBucketImpl('fs', 255, filesCollection, chunksCollection) + def outputStream = new ByteArrayOutputStream(10) + + when: + if (clientSession != null) { + gridFSBucket.downloadToStream(clientSession, bsonFileId, outputStream) + } else { + gridFSBucket.downloadToStream(bsonFileId, outputStream) + } + outputStream.close() + + then: + if (clientSession != null) { + 1 * filesCollection.find(clientSession) >> findIterable + } else { + 1 * filesCollection.find() >> findIterable + } + 1 * findIterable.filter(new BsonDocument('_id', bsonFileId)) >> findIterable + 1 * findIterable.first() >> fileInfo + + then: + if (clientSession != null) { + 1 * chunksCollection.find(clientSession, _) >> findIterable + } else { + 1 * chunksCollection.find(_) >> findIterable + } + 1 * findIterable.sort(_) >> findIterable + 1 * findIterable.batchSize(_) >> findIterable + 1 * findIterable.iterator() >> mongoCursor + 1 * mongoCursor.hasNext() >> true + 1 * mongoCursor.next() >> chunkDocument + + then: + outputStream.toByteArray() == tenBytes + + where: + clientSession << [null, Stub(ClientSession)] + } + + def 'should download to stream by name'() { + given: + def filename = 'filename' + def fileId = new ObjectId() + def bsonFileId = new BsonObjectId(fileId) + def fileInfo = new GridFSFile(bsonFileId, filename, 10, 255, new Date(), new Document()) + def mongoCursor = Mock(MongoCursor) + def gridFsFileFindIterable = Mock(FindIterable) + def findChunkIterable = Mock(FindIterable) + def filesCollection = Mock(MongoCollection) + def tenBytes = new byte[10] + def chunkDocument = new BsonDocument('files_id', fileInfo.getId()) + .append('n', new BsonInt32(0)) + .append('data', new BsonBinary(tenBytes)) + def chunksCollection = Mock(MongoCollection) + def gridFSBucket = new GridFSBucketImpl('fs', 255, filesCollection, chunksCollection) + def outputStream = new ByteArrayOutputStream(10) + + when: + if (clientSession != null) { + gridFSBucket.downloadToStream(clientSession, filename, outputStream) + } else { + gridFSBucket.downloadToStream(filename, outputStream) + } + outputStream.close() + + then: + if (clientSession != null) { + 1 * filesCollection.find(clientSession) >> gridFsFileFindIterable + } else { + 1 * filesCollection.find() >> gridFsFileFindIterable + } + 1 * gridFsFileFindIterable.filter(new Document('filename', filename)) >> gridFsFileFindIterable + 1 * gridFsFileFindIterable.skip(_) >> gridFsFileFindIterable + 1 * gridFsFileFindIterable.sort(_) >> gridFsFileFindIterable + 1 * gridFsFileFindIterable.first() >> fileInfo + + if (clientSession != null) { + 1 * chunksCollection.find(clientSession, _) >> findChunkIterable + } else { + 1 * chunksCollection.find(_) >> findChunkIterable + } + 1 * findChunkIterable.sort(_) >> findChunkIterable + 1 * findChunkIterable.batchSize(_) >> findChunkIterable + 1 * findChunkIterable.iterator() >> mongoCursor + 1 * mongoCursor.hasNext() >> true + 1 * mongoCursor.next() >> chunkDocument + + then: + outputStream.toByteArray() == tenBytes + + then: + 1 * mongoCursor.close() + + where: + clientSession << [null, Stub(ClientSession)] + } + + def 'should throw an exception if file not found'() { + given: + def fileId = new ObjectId() + def bsonFileId = new BsonObjectId(fileId) + def findIterable = Mock(FindIterable) + def filesCollection = Mock(MongoCollection) + def chunksCollection = Stub(MongoCollection) + def gridFSBucket = new GridFSBucketImpl('fs', 255, filesCollection, chunksCollection) + + when: + if (clientSession != null) { + gridFSBucket.openDownloadStream(clientSession, fileId) + } else { + gridFSBucket.openDownloadStream(fileId) + } + + then: + if (clientSession != null) { + 1 * filesCollection.find(clientSession) >> findIterable + } else { + 1 * filesCollection.find() >> findIterable + } + + 1 * findIterable.filter(new Document('_id', bsonFileId)) >> findIterable + 1 * findIterable.first() >> null + + then: + thrown(MongoGridFSException) + + where: + clientSession << [null, Stub(ClientSession)] + } + + @Unroll + def 'should create the expected GridFSDownloadStream when opening by name with version: #version'() { + given: + def filename = 'filename' + def fileId = new ObjectId() + def bsonFileId = new BsonObjectId(fileId) + def fileInfo = new GridFSFile(bsonFileId, filename, 10, 255, new Date(), new Document()) + def findIterable = Mock(FindIterable) + def filesCollection = Mock(MongoCollection) + filesCollection.getTimeout(TimeUnit.MILLISECONDS) >> null + def chunksCollection = Stub(MongoCollection) + chunksCollection.getTimeout(TimeUnit.MILLISECONDS) >> null + def gridFSBucket = new GridFSBucketImpl('fs', 255, filesCollection, chunksCollection) + + when: + def stream = gridFSBucket.openDownloadStream(filename, new GridFSDownloadOptions().revision(version)) + + then: + 1 * filesCollection.find() >> findIterable + 1 * findIterable.filter(new Document('filename', filename)) >> findIterable + + then: + 1 * findIterable.skip(skip) >> findIterable + + then: + 1 * findIterable.sort(new Document('uploadDate', sortOrder)) >> findIterable + 1 * findIterable.first() >> fileInfo + + then: + expect stream, isTheSameAs(new GridFSDownloadStreamImpl(null, fileInfo, chunksCollection, null), ['closeLock', 'cursorLock']) + + where: + version | skip | sortOrder + 0 | 0 | 1 + 1 | 1 | 1 + 2 | 2 | 1 + 3 | 3 | 1 + -1 | 0 | -1 + -2 | 1 | -1 + -3 | 2 | -1 + } + + def 'should create the expected GridFSFindIterable'() { + given: + def collection = Mock(MongoCollection) + def findIterable = Mock(FindIterable) + def gridFSBucket = new GridFSBucketImpl('fs', 255, collection, Stub(MongoCollection)) + + when: + def result + if (clientSession != null) { + result = gridFSBucket.find(clientSession) + } else { + result = gridFSBucket.find() + } + then: + if (clientSession != null) { + 1 * collection.find(clientSession) >> findIterable + } else { + 1 * collection.find() >> findIterable + } + expect result, isTheSameAs(new GridFSFindIterableImpl(findIterable)) + + where: + clientSession << [null, Stub(ClientSession)] + } + + def 'should execute the expected FindOperation when finding a file'() { + given: + def executor = new TestOperationExecutor([Stub(BatchCursor), Stub(BatchCursor)]) + def database = databaseWithExecutor(executor) + def gridFSBucket = new GridFSBucketImpl(database) + def decoder = registry.get(GridFSFile) + + when: + gridFSBucket.find().iterator() + + then: + executor.getReadPreference() == primary() + expect executor.getReadOperation(), isTheSameAs(new FindOperation(new MongoNamespace('test.fs.files'), decoder) + .filter(new BsonDocument())) + + when: + def filter = new BsonDocument('filename', new BsonString('filename')) + def readConcern = ReadConcern.MAJORITY + gridFSBucket.withReadPreference(secondary()).withReadConcern(readConcern).find(filter).iterator() + + then: + executor.getReadPreference() == secondary() + expect executor.getReadOperation(), isTheSameAs( + new FindOperation(new MongoNamespace('test.fs.files'), decoder).filter(filter)) + } + + def 'should throw an exception if file not found when opening by name'() { + given: + def filesCollection = Mock(MongoCollection) + def findIterable = Mock(FindIterable) + def chunksCollection = Stub(MongoCollection) + def gridFSBucket = new GridFSBucketImpl('fs', 255, filesCollection, chunksCollection) + + when: + if (clientSession != null) { + gridFSBucket.openDownloadStream(clientSession, 'filename') + } else { + gridFSBucket.openDownloadStream('filename') + } + + then: + if (clientSession != null) { + 1 * filesCollection.find(clientSession) >> findIterable + } else { + 1 * filesCollection.find() >> findIterable + } + + 1 * findIterable.filter(new Document('filename', 'filename')) >> findIterable + 1 * findIterable.skip(0) >> findIterable + 1 * findIterable.sort(new Document('uploadDate', -1)) >> findIterable + 1 * findIterable.first() >> null + + then: + thrown(MongoGridFSException) + + where: + clientSession << [null, Stub(ClientSession)] + } + + def 'should create indexes on write'() { + given: + def filesCollection = Mock(MongoCollection) + def chunksCollection = Mock(MongoCollection) + def listIndexesIterable = Mock(ListIndexesIterable) + def findIterable = Mock(FindIterable) + def gridFSBucket = new GridFSBucketImpl('fs', 255, filesCollection, chunksCollection) + + when: + if (clientSession != null) { + gridFSBucket.openUploadStream(clientSession, 'filename') + } else { + gridFSBucket.openUploadStream('filename') + } + + then: + 1 * filesCollection.withDocumentClass(Document) >> filesCollection + 1 * filesCollection.withReadPreference(primary()) >> filesCollection + if (clientSession != null) { + 1 * filesCollection.find(clientSession) >> findIterable + } else { + 1 * filesCollection.find() >> findIterable + } + 1 * findIterable.projection(new Document('_id', 1)) >> findIterable + 1 * findIterable.first() >> null + + then: + 1 * filesCollection.withReadPreference(primary()) >> filesCollection + if (clientSession != null) { + 1 * filesCollection.listIndexes(clientSession) >> listIndexesIterable + } else { + 1 * filesCollection.listIndexes() >> listIndexesIterable + } + 1 * listIndexesIterable.into(_) >> [] + + then: + if (clientSession != null) { + 1 * filesCollection.createIndex(clientSession, { index -> index == Document.parse('{"filename": 1, "uploadDate": 1 }') }, + { indexOptions -> !indexOptions.isUnique() }) + } else { + 1 * filesCollection.createIndex({ index -> index == Document.parse('{"filename": 1, "uploadDate": 1 }') }, + { indexOptions -> !indexOptions.isUnique() }) + } + + then: + 1 * chunksCollection.withReadPreference(primary()) >> chunksCollection + if (clientSession != null) { + 1 * chunksCollection.listIndexes(clientSession) >> listIndexesIterable + } else { + 1 * chunksCollection.listIndexes() >> listIndexesIterable + } + 1 * listIndexesIterable.into(_) >> [] + + then: + if (clientSession != null) { + 1 * chunksCollection.createIndex(clientSession, { index -> index == Document.parse('{"files_id": 1, "n": 1}') }, + { indexOptions -> indexOptions.isUnique() }) + } else { + 1 * chunksCollection.createIndex({ index -> index == Document.parse('{"files_id": 1, "n": 1}') }, + { indexOptions -> indexOptions.isUnique() }) + } + + where: + clientSession << [null, Stub(ClientSession)] + } + + def 'should not create indexes if they already exist'() { + given: + def filesCollection = Mock(MongoCollection) + def chunksCollection = Mock(MongoCollection) + def listIndexesIterable = Mock(ListIndexesIterable) + def findIterable = Mock(FindIterable) + def gridFSBucket = new GridFSBucketImpl('fs', 255, filesCollection, chunksCollection) + + when: + gridFSBucket.openUploadStream('filename') + + then: + 1 * filesCollection.withDocumentClass(Document) >> filesCollection + 1 * filesCollection.withReadPreference(primary()) >> filesCollection + 1 * filesCollection.find() >> findIterable + 1 * findIterable.projection(new Document('_id', 1)) >> findIterable + 1 * findIterable.first() >> null + + then: + 1 * filesCollection.withReadPreference(primary()) >> filesCollection + 1 * filesCollection.listIndexes() >> listIndexesIterable + 1 * listIndexesIterable.into(_) >> [Document.parse('{"key": {"_id": 1}}'), + Document.parse('{"key": {"filename": 1, "uploadDate": 1 }}')] + + then: + 0 * filesCollection.createIndex(_) + + then: + 1 * chunksCollection.withReadPreference(primary()) >> chunksCollection + 1 * chunksCollection.listIndexes() >> listIndexesIterable + 1 * listIndexesIterable.into(_) >> [Document.parse('{"key": {"_id": 1}}'), + Document.parse('{"key": {"files_id": 1, "n": 1 }}')] + + then: + 0 * chunksCollection.createIndex(_) + } + + def 'should delete from files collection then chunks collection'() { + given: + def fileId = new ObjectId() + def filesCollection = Mock(MongoCollection) + def chunksCollection = Mock(MongoCollection) + def gridFSBucket = new GridFSBucketImpl('fs', 255, filesCollection, chunksCollection) + + when: + gridFSBucket.delete(fileId) + + then: 'Delete from the files collection first' + 1 * filesCollection.deleteOne(new Document('_id', new BsonObjectId(fileId))) >> DeleteResult.acknowledged(1) + + then: + 1 * chunksCollection.deleteMany(new Document('files_id', new BsonObjectId(fileId))) + } + + def 'should throw an exception when deleting if no record in the files collection'() { + given: + def fileId = new ObjectId() + def filesCollection = Mock(MongoCollection) + def chunksCollection = Mock(MongoCollection) + def gridFSBucket = new GridFSBucketImpl('fs', 255, filesCollection, chunksCollection) + + when: + gridFSBucket.delete(fileId) + + then: 'Delete from the files collection first' + 1 * filesCollection.deleteOne(new Document('_id', new BsonObjectId(fileId))) >> DeleteResult.acknowledged(0) + + then: 'Should still delete any orphan chunks' + 1 * chunksCollection.deleteMany(new Document('files_id', new BsonObjectId(fileId))) + + then: + thrown(MongoGridFSException) + } + + def 'should rename a file'() { + given: + def id = new ObjectId() + def fileId = new BsonObjectId(id) + def filesCollection = Mock(MongoCollection) + def newFilename = 'newFilename' + def gridFSBucket = new GridFSBucketImpl('fs', 255, filesCollection, Stub(MongoCollection)) + + when: + gridFSBucket.rename(id, newFilename) + + then: + 1 * filesCollection.updateOne(new BsonDocument('_id', fileId), + new BsonDocument('$set', + new BsonDocument('filename', new BsonString(newFilename)))) >> new UpdateResult.UnacknowledgedUpdateResult() + + when: + gridFSBucket.rename(fileId, newFilename) + + then: + 1 * filesCollection.updateOne(new BsonDocument('_id', fileId), + new BsonDocument('$set', + new BsonDocument('filename', new BsonString(newFilename)))) >> new UpdateResult.UnacknowledgedUpdateResult() + } + + def 'should throw an exception renaming non existent file'() { + given: + def fileId = new ObjectId() + def filesCollection = Mock(MongoCollection) { + 1 * updateOne(_, _) >> new UpdateResult.AcknowledgedUpdateResult(0, 0, null) + } + def newFilename = 'newFilename' + def gridFSBucket = new GridFSBucketImpl('fs', 255, filesCollection, Stub(MongoCollection)) + + when: + gridFSBucket.rename(fileId, newFilename) + + then: + thrown(MongoGridFSException) + } + + def 'should be able to drop the bucket'() { + given: + def filesCollection = Mock(MongoCollection) + def chunksCollection = Mock(MongoCollection) + def gridFSBucket = new GridFSBucketImpl('fs', 255, filesCollection, chunksCollection) + + when: + gridFSBucket.drop() + + then: 'drop the files collection first' + 1 * filesCollection.drop() + + then: + 1 * chunksCollection.drop() + } + + def 'should validate the clientSession is not null'() { + given: + def objectId = new ObjectId() + def bsonValue = new BsonObjectId(objectId) + def filename = 'filename' + def filesCollection = Mock(MongoCollection) + def chunksCollection = Mock(MongoCollection) + def gridFSBucket = new GridFSBucketImpl('fs', 255, filesCollection, chunksCollection) + + when: + gridFSBucket.delete(null, objectId) + then: + thrown(IllegalArgumentException) + + when: + gridFSBucket.downloadToStream(null, filename, Stub(OutputStream)) + then: + thrown(IllegalArgumentException) + + when: + gridFSBucket.downloadToStream(null, objectId, Stub(OutputStream)) + then: + thrown(IllegalArgumentException) + + when: + gridFSBucket.drop(null) + then: + thrown(IllegalArgumentException) + + when: + gridFSBucket.find((ClientSession) null) + then: + thrown(IllegalArgumentException) + + when: + gridFSBucket.openDownloadStream(null, filename) + then: + thrown(IllegalArgumentException) + + when: + gridFSBucket.openDownloadStream(null, objectId) + then: + thrown(IllegalArgumentException) + + when: + gridFSBucket.openUploadStream(null, filename) + then: + thrown(IllegalArgumentException) + + when: + gridFSBucket.openUploadStream(null, bsonValue, filename) + then: + thrown(IllegalArgumentException) + + when: + gridFSBucket.rename(null, objectId, filename) + then: + thrown(IllegalArgumentException) + + when: + gridFSBucket.uploadFromStream(null, filename, Stub(InputStream)) + then: + thrown(IllegalArgumentException) + + when: + gridFSBucket.uploadFromStream(null, bsonValue, filename, Stub(InputStream)) + then: + thrown(IllegalArgumentException) + } +} diff --git a/driver-sync/src/test/unit/com/mongodb/client/gridfs/GridFSBucketsSpecification.groovy b/driver-sync/src/test/unit/com/mongodb/client/gridfs/GridFSBucketsSpecification.groovy new file mode 100644 index 00000000000..0064cc9aad8 --- /dev/null +++ b/driver-sync/src/test/unit/com/mongodb/client/gridfs/GridFSBucketsSpecification.groovy @@ -0,0 +1,62 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.gridfs + +import com.mongodb.ClusterFixture +import com.mongodb.ReadConcern +import com.mongodb.ReadPreference +import com.mongodb.WriteConcern +import com.mongodb.client.internal.MongoDatabaseImpl +import com.mongodb.client.internal.OperationExecutor +import org.bson.codecs.configuration.CodecRegistry +import spock.lang.Specification + +import static com.mongodb.CustomMatchers.isTheSameAs +import static org.bson.UuidRepresentation.JAVA_LEGACY +import static spock.util.matcher.HamcrestSupport.expect + +class GridFSBucketsSpecification extends Specification { + + def readConcern = ReadConcern.DEFAULT + + def 'should create a GridFSBucket with default bucket name'() { + given: + def database = new MongoDatabaseImpl('db', Stub(CodecRegistry), Stub(ReadPreference), Stub(WriteConcern), false, true, readConcern, + JAVA_LEGACY, null, ClusterFixture.TIMEOUT_SETTINGS, Stub(OperationExecutor)) + + when: + def gridFSBucket = GridFSBuckets.create(database) + + then: + expect gridFSBucket, isTheSameAs(new GridFSBucketImpl(database)) + } + + + def 'should create a GridFSBucket with custom bucket name'() { + given: + def database = new MongoDatabaseImpl('db', Stub(CodecRegistry), Stub(ReadPreference), Stub(WriteConcern), false, true, readConcern, + JAVA_LEGACY, null, ClusterFixture.TIMEOUT_SETTINGS, Stub(OperationExecutor)) + def customName = 'custom' + + when: + def gridFSBucket = GridFSBuckets.create(database, customName) + + then: + expect gridFSBucket, isTheSameAs(new GridFSBucketImpl(database, customName)) + } + +} diff --git a/driver-sync/src/test/unit/com/mongodb/client/gridfs/GridFSDownloadStreamSpecification.groovy b/driver-sync/src/test/unit/com/mongodb/client/gridfs/GridFSDownloadStreamSpecification.groovy new file mode 100644 index 00000000000..59bf12ec3a4 --- /dev/null +++ b/driver-sync/src/test/unit/com/mongodb/client/gridfs/GridFSDownloadStreamSpecification.groovy @@ -0,0 +1,675 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.gridfs + +import com.mongodb.MongoGridFSException +import com.mongodb.client.ClientSession +import com.mongodb.client.FindIterable +import com.mongodb.client.MongoCollection +import com.mongodb.client.MongoCursor +import com.mongodb.client.gridfs.model.GridFSFile +import org.bson.BsonBinary +import org.bson.BsonDocument +import org.bson.BsonInt32 +import org.bson.BsonObjectId +import org.bson.Document +import org.bson.types.ObjectId +import spock.lang.Specification + +class GridFSDownloadStreamSpecification extends Specification { + def fileInfo = new GridFSFile(new BsonObjectId(new ObjectId()), 'filename', 3L, 2, new Date(), new Document()) + + def 'should return the file info'() { + when: + def downloadStream = new GridFSDownloadStreamImpl(null, fileInfo, Stub(MongoCollection), null) + + then: + downloadStream.getGridFSFile() == fileInfo + } + + def 'should query the chunks collection as expected'() { + when: + def twoBytes = new byte[2] + def oneByte = new byte[1] + def findQuery = new BsonDocument('files_id', fileInfo.getId()) + .append('n', new BsonDocument('$gte', new BsonInt32(0))) + def sort = new BsonDocument('n', new BsonInt32(1)) + def chunkDocument = new BsonDocument('files_id', fileInfo.getId()) + .append('n', new BsonInt32(0)) + .append('data', new BsonBinary(twoBytes)) + + def secondChunkDocument = new BsonDocument('files_id', fileInfo.getId()) + .append('n', new BsonInt32(1)) + .append('data', new BsonBinary(oneByte)) + + def mongoCursor = Mock(MongoCursor) + def findIterable = Mock(FindIterable) + def chunksCollection = Mock(MongoCollection) + def downloadStream = new GridFSDownloadStreamImpl(clientSession, fileInfo, chunksCollection, null) + + then: + downloadStream.available() == 0 + + when: + def result = downloadStream.read() + + then: + result == (twoBytes[0] & 0xFF) + if (clientSession != null) { + 1 * chunksCollection.find(clientSession, findQuery) >> findIterable + } else { + 1 * chunksCollection.find(findQuery) >> findIterable + } + 1 * findIterable.sort(sort) >> findIterable + 1 * findIterable.batchSize(0) >> findIterable + 1 * findIterable.iterator() >> mongoCursor + + then: + 1 * mongoCursor.hasNext() >> true + 1 * mongoCursor.next() >> chunkDocument + + downloadStream.available() == 1 + + when: + result = downloadStream.read() + + then: + result == (twoBytes[1] & 0xFF) + 0 * mongoCursor.hasNext() + 0 * mongoCursor.next() + downloadStream.available() == 0 + + when: + result = downloadStream.read() + + then: + result == (oneByte[0] & 0xFF) + 1 * mongoCursor.hasNext() >> true + 1 * mongoCursor.next() >> secondChunkDocument + + when: + result = downloadStream.read() + + then: + result == -1 + + where: + clientSession << [null, Stub(ClientSession)] + } + + def 'should create a new cursor each time when using batchSize 1'() { + when: + def twoBytes = new byte[2] + def oneByte = new byte[1] + def findQuery = new BsonDocument('files_id', fileInfo.getId()).append('n', + new BsonDocument('$gte', + new BsonInt32(0))) + def secondFindQuery = new BsonDocument('files_id', fileInfo.getId()) + .append('n', new BsonDocument('$gte', new BsonInt32(1))) + def sort = new BsonDocument('n', new BsonInt32(1)) + def chunkDocument = new BsonDocument('files_id', fileInfo.getId()) + .append('n', new BsonInt32(0)) + .append('data', new BsonBinary(twoBytes)) + + def secondChunkDocument = new BsonDocument('files_id', fileInfo.getId()) + .append('n', new BsonInt32(1)) + .append('data', new BsonBinary(oneByte)) + + def mongoCursor = Mock(MongoCursor) + def findIterable = Mock(FindIterable) + def chunksCollection = Mock(MongoCollection) + def downloadStream = new GridFSDownloadStreamImpl(clientSession, fileInfo, chunksCollection, + null).batchSize(1) + + then: + downloadStream.available() == 0 + + when: + def result = downloadStream.read() + + then: + result == (twoBytes[0] & 0xFF) + if (clientSession != null) { + 1 * chunksCollection.find(clientSession, findQuery) >> findIterable + } else { + 1 * chunksCollection.find(findQuery) >> findIterable + } + 1 * findIterable.sort(sort) >> findIterable + 1 * findIterable.batchSize(1) >> findIterable + 1 * findIterable.iterator() >> mongoCursor + + then: + 1 * mongoCursor.hasNext() >> true + 1 * mongoCursor.next() >> chunkDocument + + downloadStream.available() == 1 + + when: + result = downloadStream.read() + + then: + result == (twoBytes[1] & 0xFF) + 0 * mongoCursor.hasNext() + 0 * mongoCursor.next() + downloadStream.available() == 0 + + when: + result = downloadStream.read() + + then: + result == (oneByte[0] & 0xFF) + if (clientSession != null) { + 1 * chunksCollection.find(clientSession, secondFindQuery) >> findIterable + } else { + 1 * chunksCollection.find(secondFindQuery) >> findIterable + } + 1 * findIterable.sort(sort) >> findIterable + 1 * findIterable.batchSize(1) >> findIterable + 1 * findIterable.iterator() >> mongoCursor + 1 * mongoCursor.hasNext() >> true + 1 * mongoCursor.next() >> secondChunkDocument + + when: + result = downloadStream.read() + + then: + result == -1 + + where: + clientSession << [null, Stub(ClientSession)] + } + + def 'should skip to the correct point'() { + given: + def fileInfo = new GridFSFile(new BsonObjectId(new ObjectId()), 'filename', 4194297L, 32, + new Date(), new Document()) + + def firstChunkBytes = 1..32 as byte[] + def lastChunkBytes = 33 .. 57 as byte[] + + def sort = new BsonDocument('n', new BsonInt32(1)) + + def findQueries = [new BsonDocument('files_id', fileInfo.getId()) + .append('n', new BsonDocument('$gte', new BsonInt32(0))), + new BsonDocument('files_id', fileInfo.getId()) + .append('n', new BsonDocument('$gte', new BsonInt32(131071)))] + def chunkDocuments = + [new BsonDocument('files_id', fileInfo.getId()) + .append('n', new BsonInt32(0)).append('data', new BsonBinary(firstChunkBytes)), + new BsonDocument('files_id', fileInfo.getId()) + .append('n', new BsonInt32(131071)).append('data', new BsonBinary(lastChunkBytes))] + + def mongoCursor = Mock(MongoCursor) + def findIterable = Mock(FindIterable) + def chunksCollection = Mock(MongoCollection) + def downloadStream = new GridFSDownloadStreamImpl(clientSession, fileInfo, chunksCollection, null) + + when: + def skipResult = downloadStream.skip(15) + + then: + skipResult == 15L + 0 * chunksCollection.find(*_) + + when: + def readByte = new byte[5] + downloadStream.read(readByte) + + then: + if (clientSession != null) { + 1 * chunksCollection.find(clientSession, findQueries[0]) >> findIterable + } else { + 1 * chunksCollection.find(findQueries[0]) >> findIterable + } + 1 * findIterable.sort(sort) >> findIterable + 1 * findIterable.batchSize(0) >> findIterable + 1 * findIterable.iterator() >> mongoCursor + 1 * mongoCursor.hasNext() >> true + 1 * mongoCursor.next() >> chunkDocuments[0] + + then: + readByte == [16, 17, 18, 19, 20] as byte[] + + when: + skipResult = downloadStream.skip(4194272) + + then: + skipResult == 4194272L + 0 * chunksCollection.find(*_) + + when: + downloadStream.read(readByte) + + then: + if (clientSession != null) { + 1 * chunksCollection.find(clientSession, findQueries[1]) >> findIterable + } else { + 1 * chunksCollection.find(findQueries[1]) >> findIterable + } + 1 * findIterable.sort(sort) >> findIterable + 1 * findIterable.batchSize(0) >> findIterable + 1 * findIterable.iterator() >> mongoCursor + 1 * mongoCursor.hasNext() >> true + 1 * mongoCursor.next() >> chunkDocuments[1] + + then: + readByte == [53, 54, 55, 56, 57] as byte[] + + when: + skipResult = downloadStream.skip(1) + + then: + skipResult == 0L + 0 * chunksCollection.find(*_) + + where: + clientSession << [null, Stub(ClientSession)] + } + + def 'should mark and reset to the correct point'() { + given: + def fileInfo = new GridFSFile(new BsonObjectId(new ObjectId()), 'filename', 25L, 25, new Date(), new Document()) + + def expected10Bytes = 11 .. 20 as byte[] + def firstChunkBytes = 1..25 as byte[] + + def chunkDocument = new BsonDocument('files_id', fileInfo.getId()) + .append('n', new BsonInt32(0)) + .append('data', new BsonBinary(firstChunkBytes)) + + def mongoCursor = Mock(MongoCursor) + def findIterable = Mock(FindIterable) + def chunksCollection = Mock(MongoCollection) + def downloadStream = new GridFSDownloadStreamImpl(clientSession, fileInfo, chunksCollection, null) + + when: + def readByte = new byte[10] + downloadStream.read(readByte) + + then: + if (clientSession != null) { + 1 * chunksCollection.find(clientSession, _) >> findIterable + } else { + 1 * chunksCollection.find(_) >> findIterable + } + 1 * findIterable.sort(_) >> findIterable + 1 * findIterable.batchSize(0) >> findIterable + 1 * findIterable.iterator() >> mongoCursor + 1 * mongoCursor.hasNext() >> true + 1 * mongoCursor.next() >> chunkDocument + + then: + readByte == 1 .. 10 as byte[] + + when: + downloadStream.mark() + + then: + 0 * chunksCollection.find(*_) + + when: + downloadStream.read(readByte) + + then: + readByte == expected10Bytes + + when: + downloadStream.reset() + + then: + 0 * chunksCollection.find(*_) + + when: + downloadStream.read(readByte) + + then: + 0 * chunksCollection.find(*_) + readByte == expected10Bytes + + where: + clientSession << [null, Stub(ClientSession)] + } + + + def 'should mark and reset across chunks'() { + given: + def fileInfo = new GridFSFile(new BsonObjectId(new ObjectId()), 'filename', 50L, 25, new Date(), new Document()) + + def firstChunkBytes = 1..25 as byte[] + def secondChunkBytes = 26 .. 50 as byte[] + + def chunkDocuments = + [new BsonDocument('files_id', fileInfo.getId()) + .append('n', new BsonInt32(0)) + .append('data', new BsonBinary(firstChunkBytes)), + new BsonDocument('files_id', fileInfo.getId()) + .append('n', new BsonInt32(1)) + .append('data', new BsonBinary(secondChunkBytes))] + + def mongoCursor = Mock(MongoCursor) + def findIterable = Mock(FindIterable) + def chunksCollection = Mock(MongoCollection) + def downloadStream = new GridFSDownloadStreamImpl(clientSession, fileInfo, chunksCollection, null) + + when: + downloadStream.mark() + def readByte = new byte[25] + downloadStream.read(readByte) + + then: + if (clientSession != null) { + 1 * chunksCollection.find(clientSession, _) >> findIterable + } else { + 1 * chunksCollection.find(_) >> findIterable + } + 1 * findIterable.sort(_) >> findIterable + 1 * findIterable.batchSize(0) >> findIterable + 1 * findIterable.iterator() >> mongoCursor + 1 * mongoCursor.hasNext() >> true + 1 * mongoCursor.next() >> chunkDocuments[0] + + then: + readByte == firstChunkBytes + + then: + 0 * chunksCollection.find(*_) + + when: + downloadStream.read(readByte) + + then: + readByte == secondChunkBytes + 1 * mongoCursor.hasNext() >> true + 1 * mongoCursor.next() >> chunkDocuments[1] + + when: 'check read to EOF' + def result = downloadStream.read(readByte) + + then: + result == -1 + + when: + downloadStream.reset() + + then: + 0 * chunksCollection.find(*_) + + when: + downloadStream.read(readByte) + + then: + readByte == firstChunkBytes + if (clientSession != null) { + 1 * chunksCollection.find(clientSession, _) >> findIterable + } else { + 1 * chunksCollection.find(_) >> findIterable + } + 1 * findIterable.sort(_) >> findIterable + 1 * findIterable.batchSize(0) >> findIterable + 1 * findIterable.iterator() >> mongoCursor + 1 * mongoCursor.hasNext() >> true + 1 * mongoCursor.next() >> chunkDocuments[0] + + where: + clientSession << [null, Stub(ClientSession)] + } + + def 'should validate next chunk when marked and reset at eof'() { + given: + def fileInfo = new GridFSFile(new BsonObjectId(new ObjectId()), 'filename', 25L, 25, new Date(), new Document()) + + def chunkBytes = 1..25 as byte[] + def chunkDocument = new BsonDocument('files_id', fileInfo.getId()) + .append('n', new BsonInt32(0)) + .append('data', new BsonBinary(chunkBytes)) + + def mongoCursor = Mock(MongoCursor) + def findIterable = Mock(FindIterable) + def chunksCollection = Mock(MongoCollection) + def downloadStream = new GridFSDownloadStreamImpl(clientSession, fileInfo, chunksCollection, null) + + when: + def readByte = new byte[25] + downloadStream.read(readByte) + + then: + if (clientSession != null) { + 1 * chunksCollection.find(clientSession, _) >> findIterable + } else { + 1 * chunksCollection.find(_) >> findIterable + } + 1 * findIterable.sort(_) >> findIterable + 1 * findIterable.batchSize(0) >> findIterable + 1 * findIterable.iterator() >> mongoCursor + 1 * mongoCursor.hasNext() >> true + 1 * mongoCursor.next() >> chunkDocument + + then: + readByte == chunkBytes + + when: + downloadStream.mark() + + then: + 0 * chunksCollection.find(*_) + + when: + downloadStream.reset() + + then: + 0 * chunksCollection.find(*_) + + when: 'Trying to read past eof' + def result = downloadStream.read(readByte) + + then: + result == -1 + + when: 'Resets back to eof' + downloadStream.reset() + + then: + 0 * chunksCollection.find(*_) + + when: + result = downloadStream.read(readByte) + + then: + result == -1 + + where: + clientSession << [null, Stub(ClientSession)] + } + + def 'should not throw an exception when trying to mark post close'() { + given: + def downloadStream = new GridFSDownloadStreamImpl(clientSession, fileInfo, Stub(MongoCollection), null) + downloadStream.close() + + when: + downloadStream.mark() + + then: + notThrown(MongoGridFSException) + + when: + downloadStream.mark(1) + + then: + notThrown(MongoGridFSException) + + where: + clientSession << [null, Stub(ClientSession)] + } + + def 'should handle negative skip value correctly '() { + given: + def downloadStream = new GridFSDownloadStreamImpl(clientSession, fileInfo, Stub(MongoCollection), null) + + when: + def result = downloadStream.skip(-1) + + then: + result == 0L + + where: + clientSession << [null, Stub(ClientSession)] + } + + def 'should handle skip that is larger or equal to the file length'() { + given: + def chunksCollection = Mock(MongoCollection) + def downloadStream = new GridFSDownloadStreamImpl(clientSession, fileInfo, chunksCollection, null) + + when: + def result = downloadStream.skip(skipValue) + + then: + result == 3L + 0 * chunksCollection.find(*_) + + when: + result = downloadStream.read() + + then: + result == -1 + + where: + [skipValue, clientSession] << [[3, 100], [null, Stub(ClientSession)]].combinations() + } + + def 'should throw if trying to pass negative batchSize'() { + given: + def downloadStream = new GridFSDownloadStreamImpl(clientSession, fileInfo, Stub(MongoCollection), null) + + when: + downloadStream.batchSize(0) + + then: + notThrown(IllegalArgumentException) + + + when: + downloadStream.batchSize(-1) + + then: + thrown(IllegalArgumentException) + + where: + clientSession << [null, Stub(ClientSession)] + } + + def 'should throw if no chunks found when data is expected'() { + given: + def mongoCursor = Mock(MongoCursor) + def findIterable = Mock(FindIterable) + def chunksCollection = Mock(MongoCollection) + def downloadStream = new GridFSDownloadStreamImpl(clientSession, fileInfo, chunksCollection, null) + + when: + downloadStream.read() + + then: + if (clientSession != null) { + 1 * chunksCollection.find(clientSession, _) >> findIterable + } else { + 1 * chunksCollection.find(_) >> findIterable + } + 1 * findIterable.sort(_) >> findIterable + 1 * findIterable.batchSize(0) >> findIterable + 1 * findIterable.iterator() >> mongoCursor + 1 * mongoCursor.hasNext() >> false + + then: + thrown(MongoGridFSException) + + where: + clientSession << [null, Stub(ClientSession)] + } + + def 'should throw if chunk data differs from the expected'() { + given: + def chunkDocument = new BsonDocument('files_id', fileInfo.getId()) + .append('n', new BsonInt32(0)) + .append('data', new BsonBinary(data)) + + def mongoCursor = Mock(MongoCursor) + def findIterable = Mock(FindIterable) + def chunksCollection = Mock(MongoCollection) + def downloadStream = new GridFSDownloadStreamImpl(clientSession, fileInfo, chunksCollection, null) + + when: + downloadStream.read() + + then: + if (clientSession != null) { + 1 * chunksCollection.find(clientSession, _) >> findIterable + } else { + 1 * chunksCollection.find(_) >> findIterable + } + 1 * findIterable.sort(_) >> findIterable + 1 * findIterable.batchSize(0) >> findIterable + 1 * findIterable.iterator() >> mongoCursor + 1 * mongoCursor.hasNext() >> true + 1 * mongoCursor.next() >> chunkDocument + + then: + thrown(MongoGridFSException) + + where: + [data, clientSession] << [[new byte[1], new byte[100]], [null, Stub(ClientSession)]].combinations() + } + + def 'should throw an exception when trying to action post close'() { + given: + def downloadStream = new GridFSDownloadStreamImpl(clientSession, fileInfo, Stub(MongoCollection), null) + downloadStream.close() + + when: + downloadStream.read() + + then: + thrown(MongoGridFSException) + + when: + downloadStream.skip(10) + + then: + thrown(MongoGridFSException) + + when: + downloadStream.reset() + + then: + thrown(MongoGridFSException) + + when: + downloadStream.read(new byte[10]) + + then: + thrown(MongoGridFSException) + + when: + downloadStream.read(new byte[10], 0, 10) + + then: + thrown(MongoGridFSException) + + where: + clientSession << [null, Stub(ClientSession)] + } +} diff --git a/driver-sync/src/test/unit/com/mongodb/client/gridfs/GridFSFindIterableSpecification.groovy b/driver-sync/src/test/unit/com/mongodb/client/gridfs/GridFSFindIterableSpecification.groovy new file mode 100644 index 00000000000..40cd03bc7e9 --- /dev/null +++ b/driver-sync/src/test/unit/com/mongodb/client/gridfs/GridFSFindIterableSpecification.groovy @@ -0,0 +1,194 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.gridfs + + +import com.mongodb.CursorType +import com.mongodb.Function +import com.mongodb.MongoClientSettings +import com.mongodb.MongoNamespace +import com.mongodb.ReadConcern +import com.mongodb.client.gridfs.codecs.GridFSFileCodec +import com.mongodb.client.gridfs.model.GridFSFile +import com.mongodb.client.internal.FindIterableImpl +import com.mongodb.client.internal.TestOperationExecutor +import com.mongodb.client.model.Collation +import com.mongodb.internal.operation.BatchCursor +import com.mongodb.internal.operation.FindOperation +import org.bson.BsonDocument +import org.bson.BsonInt32 +import org.bson.BsonObjectId +import org.bson.Document +import org.bson.types.ObjectId +import spock.lang.Specification + +import java.util.function.Consumer + +import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS +import static com.mongodb.CustomMatchers.isTheSameAs +import static com.mongodb.ReadPreference.secondary +import static java.util.concurrent.TimeUnit.MILLISECONDS +import static spock.util.matcher.HamcrestSupport.expect + +class GridFSFindIterableSpecification extends Specification { + + def codecRegistry = MongoClientSettings.getDefaultCodecRegistry() + def gridFSFileCodec = new GridFSFileCodec(codecRegistry) + def readPreference = secondary() + def readConcern = ReadConcern.DEFAULT + def collation = Collation.builder().locale('en').build() + def namespace = new MongoNamespace('test', 'fs.files') + + def 'should build the expected findOperation'() { + given: + def executor = new TestOperationExecutor([null, null]) + def underlying = new FindIterableImpl(null, namespace, GridFSFile, GridFSFile, codecRegistry, readPreference, readConcern, executor, + new Document(), true, TIMEOUT_SETTINGS) + def findIterable = new GridFSFindIterableImpl(underlying) + + when: 'default input should be as expected' + findIterable.iterator() + + def operation = executor.getReadOperation() as FindOperation + def readPreference = executor.getReadPreference() + + then: + expect operation, isTheSameAs(new FindOperation(namespace, gridFSFileCodec) + .filter(new BsonDocument()).retryReads(true)) + readPreference == secondary() + + when: 'overriding initial options' + findIterable.filter(new Document('filter', 2)) + .sort(new Document('sort', 2)) + .maxTime(100, MILLISECONDS) + .batchSize(99) + .limit(99) + .skip(9) + .noCursorTimeout(true) + .collation(collation) + .iterator() + + operation = executor.getReadOperation() as FindOperation + + then: 'should use the overrides' + expect operation, isTheSameAs(new FindOperation(namespace, gridFSFileCodec) + .filter(new BsonDocument('filter', new BsonInt32(2))) + .sort(new BsonDocument('sort', new BsonInt32(2))) + .batchSize(99) + .limit(99) + .skip(9) + .noCursorTimeout(true) + .collation(collation) + .retryReads(true) + ) + } + + def 'should handle mixed types'() { + given: + def executor = new TestOperationExecutor([null, null]) + def findIterable = new FindIterableImpl(null, namespace, GridFSFile, GridFSFile, codecRegistry, readPreference, readConcern, + executor, new Document('filter', 1), true, TIMEOUT_SETTINGS) + + when: + findIterable.filter(new Document('filter', 1)) + .sort(new BsonDocument('sort', new BsonInt32(1))) + .iterator() + + def operation = executor.getReadOperation() as FindOperation + + then: + expect operation, isTheSameAs(new FindOperation(namespace, gridFSFileCodec) + .filter(new BsonDocument('filter', new BsonInt32(1))) + .sort(new BsonDocument('sort', new BsonInt32(1))) + .cursorType(CursorType.NonTailable) + .retryReads(true) + ) + } + + def 'should follow the MongoIterable interface as expected'() { + given: + def cannedResults = [ + new GridFSFile(new BsonObjectId(new ObjectId()), 'File 1', 123L, 255, new Date(1438679434041) + , null), + new GridFSFile(new BsonObjectId(new ObjectId()), 'File 2', 999999L, 255, new Date(1438679434050) + , null), + new GridFSFile(new BsonObjectId(new ObjectId()), 'File 3', 1L, 255, new Date(1438679434090) + , null), + ] + def cursor = { + def batchToReturn = cannedResults.collect(); + Stub(BatchCursor) { + def count = 0 + def results + def getResult = { + count++ + results = count == 1 ? batchToReturn : null + results + } + next() >> { + getResult() + } + hasNext() >> { + count == 0 + } + } + } + def executor = new TestOperationExecutor([cursor(), cursor(), cursor(), cursor()]) + def underlying = new FindIterableImpl(null, namespace, GridFSFile, GridFSFile, codecRegistry, readPreference, readConcern, executor, + new Document(), true, TIMEOUT_SETTINGS) + def mongoIterable = new GridFSFindIterableImpl(underlying) + + when: + def firstResult = mongoIterable.first() + def expectedResult = cannedResults[0] + + then: + firstResult == expectedResult + + when: + def count = 0 + mongoIterable.forEach(new Consumer() { + @Override + void accept(GridFSFile document) { + count++ + } + }) + + then: + count == 3 + + when: + def target = [] + mongoIterable.into(target) + + then: + target == cannedResults + + when: + target = [] + mongoIterable.map(new Function() { + @Override + String apply(GridFSFile file) { + file.getFilename() + } + }).into(target) + + then: + target == cannedResults*.getFilename() + } + +} diff --git a/driver-sync/src/test/unit/com/mongodb/client/gridfs/GridFSUploadStreamSpecification.groovy b/driver-sync/src/test/unit/com/mongodb/client/gridfs/GridFSUploadStreamSpecification.groovy new file mode 100644 index 00000000000..c81f947abf0 --- /dev/null +++ b/driver-sync/src/test/unit/com/mongodb/client/gridfs/GridFSUploadStreamSpecification.groovy @@ -0,0 +1,267 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.gridfs + +import com.mongodb.MongoGridFSException +import com.mongodb.client.ClientSession +import com.mongodb.client.MongoCollection +import com.mongodb.client.gridfs.model.GridFSFile +import org.bson.BsonDocument +import org.bson.BsonInt32 +import org.bson.BsonObjectId +import org.bson.BsonString +import org.bson.Document +import spock.lang.Specification + +class GridFSUploadStreamSpecification extends Specification { + def fileId = new BsonObjectId() + def filename = 'filename' + def metadata = new Document() + + def 'should return the file id'() { + when: + def uploadStream = new GridFSUploadStreamImpl(null, Stub(MongoCollection), Stub(MongoCollection), fileId, filename, 255 + , metadata, null) + then: + uploadStream.getId() == fileId + } + + def 'should write the buffer it reaches the chunk size'() { + given: + def filesCollection = Mock(MongoCollection) + def chunksCollection = Mock(MongoCollection) + def uploadStream = new GridFSUploadStreamImpl(clientSession, filesCollection, chunksCollection, fileId, filename, 2 + , metadata, null) + when: + uploadStream.write(1) + + then: + 0 * chunksCollection.insertOne(*_) + + when: + uploadStream.write(1) + + then: + if (clientSession != null) { + 1 * chunksCollection.insertOne(clientSession, _) + } else { + 1 * chunksCollection.insertOne(_) + } + + where: + clientSession << [null, Stub(ClientSession)] + } + + def 'should write to the files collection on close'() { + given: + def filesCollection = Mock(MongoCollection) + def chunksCollection = Mock(MongoCollection) + def uploadStream = new GridFSUploadStreamImpl(clientSession, filesCollection, chunksCollection, fileId, filename, 255 + , null, null) + + when: + uploadStream.write('file content ' as byte[]) + + then: + 0 * chunksCollection.insertOne(*_) + + when: + uploadStream.close() + + then: + if (clientSession != null) { + 1 * chunksCollection.insertOne(clientSession, _) + 1 * filesCollection.insertOne(clientSession, _) + } else { + 1 * chunksCollection.insertOne(_) + 1 * filesCollection.insertOne(_) + } + + where: + clientSession << [null, Stub(ClientSession)] + } + + def 'should write to the files and chunks collection as expected close'() { + given: + def filesCollection = Mock(MongoCollection) + def chunksCollection = Mock(MongoCollection) + def content = 'file content ' as byte[] + def metadata = new Document('contentType', 'text/txt') + def uploadStream = new GridFSUploadStreamImpl(clientSession, filesCollection, chunksCollection, fileId, filename, 255, + metadata, null) + def filesId = fileId + + when: + uploadStream.write(content) + uploadStream.close() + + then: + if (clientSession != null) { + 1 * chunksCollection.insertOne(clientSession) { + verifyAll(it, BsonDocument) { + it.get('files_id') == filesId + it.getInt32('n') == new BsonInt32(0) + it.getBinary('data').getData() == content + } + } + } else { + 1 * chunksCollection.insertOne { + verifyAll(it, BsonDocument) { + it.get('files_id') == filesId + it.getInt32('n') == new BsonInt32(0) + it.getBinary('data').getData() == content + } + } + } + + then: + if (clientSession != null) { + 1 * filesCollection.insertOne(clientSession) { + verifyAll(it, GridFSFile) { + it.getId() == fileId + it.getFilename() == filename + it.getLength() == content.length as Long + it.getChunkSize() == 255 + it.getMetadata() == metadata + } + } + } else { + 1 * filesCollection.insertOne { + verifyAll(it, GridFSFile) { + it.getId() == fileId + it.getFilename() == filename + it.getLength() == content.length as Long + it.getChunkSize() == 255 + it.getMetadata() == metadata + } + } + } + + where: + clientSession << [null, Stub(ClientSession)] + } + + def 'should not write an empty chunk'() { + given: + def filesCollection = Mock(MongoCollection) + def chunksCollection = Mock(MongoCollection) + def uploadStream = new GridFSUploadStreamImpl(clientSession, filesCollection, chunksCollection, fileId, filename, 255 + , metadata, null) + when: + uploadStream.close() + + then: + 0 * chunksCollection.insertOne(*_) + if (clientSession != null) { + 1 * filesCollection.insertOne(clientSession, _) + } else { + 1 * filesCollection.insertOne(_) + } + + where: + clientSession << [null, Stub(ClientSession)] + } + + def 'should delete any chunks when calling abort'() { + given: + def chunksCollection = Mock(MongoCollection) + def uploadStream = new GridFSUploadStreamImpl(clientSession, Stub(MongoCollection), chunksCollection, fileId, filename, 255 + , metadata, null) + + when: + uploadStream.write('file content ' as byte[]) + uploadStream.abort() + + then: + if (clientSession != null) { + 1 * chunksCollection.deleteMany(clientSession, new Document('files_id', fileId)) + } else { + 1 * chunksCollection.deleteMany(new Document('files_id', fileId)) + } + + where: + clientSession << [null, Stub(ClientSession)] + } + + def 'should close the stream on abort'() { + given: + def uploadStream = new GridFSUploadStreamImpl(clientSession, Stub(MongoCollection), Stub(MongoCollection), fileId, filename, 255 + , metadata, null) + uploadStream.write('file content ' as byte[]) + uploadStream.abort() + + when: + uploadStream.write(1) + + then: + thrown(MongoGridFSException) + + where: + clientSession << [null, Stub(ClientSession)] + } + + def 'should not do anything when calling flush'() { + given: + def chunksCollection = Mock(MongoCollection) + def uploadStream = new GridFSUploadStreamImpl(clientSession, Stub(MongoCollection), chunksCollection, fileId, filename, 255 + , metadata, null) + + when: + uploadStream.write('file content ' as byte[]) + uploadStream.flush() + + then: + 0 * chunksCollection.insertOne(*_) + + where: + clientSession << [null, Stub(ClientSession)] + } + + def 'should throw an exception when trying to action post close'() { + given: + def filesCollection = Mock(MongoCollection) + def chunksCollection = Mock(MongoCollection) + def uploadStream = new GridFSUploadStreamImpl(clientSession, filesCollection, chunksCollection, fileId, filename, 255 + , metadata, null) + when: + uploadStream.close() + uploadStream.write(1) + + then: + thrown(MongoGridFSException) + + where: + clientSession << [null, Stub(ClientSession)] + } + + def 'should throw an exception when calling getObjectId and the fileId is not an ObjectId'() { + given: + def fileId = new BsonString('myFile') + def filesCollection = Mock(MongoCollection) + def chunksCollection = Mock(MongoCollection) + def uploadStream = new GridFSUploadStreamImpl(clientSession, filesCollection, chunksCollection, fileId, filename, 255 + , metadata, null) + when: + uploadStream.getObjectId() + + then: + thrown(MongoGridFSException) + + where: + clientSession << [null, Stub(ClientSession)] + } +} diff --git a/driver-sync/src/test/unit/com/mongodb/client/internal/AggregateIterableSpecification.groovy b/driver-sync/src/test/unit/com/mongodb/client/internal/AggregateIterableSpecification.groovy new file mode 100644 index 00000000000..467e9614424 --- /dev/null +++ b/driver-sync/src/test/unit/com/mongodb/client/internal/AggregateIterableSpecification.groovy @@ -0,0 +1,672 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.internal + +import com.mongodb.Function +import com.mongodb.MongoException +import com.mongodb.MongoNamespace +import com.mongodb.ReadConcern +import com.mongodb.WriteConcern +import com.mongodb.client.ClientSession +import com.mongodb.client.model.Collation +import com.mongodb.internal.client.model.AggregationLevel +import com.mongodb.internal.operation.AggregateOperation +import com.mongodb.internal.operation.AggregateToCollectionOperation +import com.mongodb.internal.operation.BatchCursor +import com.mongodb.internal.operation.FindOperation +import org.bson.BsonDocument +import org.bson.BsonInt32 +import org.bson.BsonString +import org.bson.Document +import org.bson.codecs.BsonValueCodecProvider +import org.bson.codecs.DocumentCodec +import org.bson.codecs.DocumentCodecProvider +import org.bson.codecs.ValueCodecProvider +import org.bson.codecs.configuration.CodecConfigurationException +import spock.lang.Specification + +import java.util.function.Consumer + +import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS +import static com.mongodb.CustomMatchers.isTheSameAs +import static com.mongodb.ReadPreference.secondary +import static java.util.concurrent.TimeUnit.MILLISECONDS +import static org.bson.codecs.configuration.CodecRegistries.fromProviders +import static spock.util.matcher.HamcrestSupport.expect + +class AggregateIterableSpecification extends Specification { + + def namespace = new MongoNamespace('db', 'coll') + def codecRegistry = fromProviders([new ValueCodecProvider(), new DocumentCodecProvider(), new BsonValueCodecProvider()]) + def readPreference = secondary() + def readConcern = ReadConcern.MAJORITY + def writeConcern = WriteConcern.MAJORITY + def collation = Collation.builder().locale('en').build() + + def 'should build the expected AggregationOperation'() { + given: + def executor = new TestOperationExecutor([null, null, null, null, null]) + def pipeline = [new Document('$match', 1)] + def aggregationIterable = new AggregateIterableImpl(null, namespace, Document, Document, codecRegistry, readPreference, + readConcern, writeConcern, executor, pipeline, AggregationLevel.COLLECTION, + true, TIMEOUT_SETTINGS) + + when: 'default input should be as expected' + aggregationIterable.iterator() + + def operation = executor.getReadOperation() as AggregateOperation + def readPreference = executor.getReadPreference() + + then: + expect operation, isTheSameAs(new AggregateOperation(namespace, + [new BsonDocument('$match', new BsonInt32(1))], new DocumentCodec()) + .retryReads(true)) + readPreference == secondary() + + when: 'overriding initial options' + aggregationIterable + .maxAwaitTime(1001, MILLISECONDS) + .maxTime(101, MILLISECONDS) + .collation(collation) + .hint(new Document('a', 1)) + .comment('this is a comment') + .iterator() + + operation = executor.getReadOperation() as AggregateOperation + + then: 'should use the overrides' + expect operation, isTheSameAs(new AggregateOperation(namespace, + [new BsonDocument('$match', new BsonInt32(1))], new DocumentCodec()) + .retryReads(true) + .collation(collation) + .hint(new BsonDocument('a', new BsonInt32(1))) + .comment(new BsonString('this is a comment'))) + + when: 'both hint and hint string are set' + aggregationIterable = new AggregateIterableImpl(null, namespace, Document, Document, codecRegistry, readPreference, + readConcern, writeConcern, executor, pipeline, AggregationLevel.COLLECTION, false, TIMEOUT_SETTINGS) + + aggregationIterable + .hint(new Document('a', 1)) + .hintString('a_1') + .iterator() + + operation = executor.getReadOperation() as AggregateOperation + + then: 'should use hint not hint string' + expect operation, isTheSameAs(new AggregateOperation(namespace, + [new BsonDocument('$match', new BsonInt32(1))], new DocumentCodec()) + .hint(new BsonDocument('a', new BsonInt32(1)))) + } + + def 'should build the expected AggregateToCollectionOperation for $out'() { + given: + def executor = new TestOperationExecutor([null, null, null, null, null]) + def collectionName = 'collectionName' + def collectionNamespace = new MongoNamespace(namespace.getDatabaseName(), collectionName) + def pipeline = [new Document('$match', 1), new Document('$out', collectionName)] + + when: 'aggregation includes $out' + new AggregateIterableImpl(null, namespace, Document, Document, codecRegistry, readPreference, readConcern, writeConcern, executor, + pipeline, AggregationLevel.COLLECTION, false, TIMEOUT_SETTINGS) + .batchSize(99) + .allowDiskUse(true) + .collation(collation) + .hint(new Document('a', 1)) + .comment(new BsonString('this is a comment')) + .iterator() + + def operation = executor.getReadOperation() as AggregateToCollectionOperation + + then: 'should use the overrides' + expect operation, isTheSameAs(new AggregateToCollectionOperation(namespace, + [new BsonDocument('$match', new BsonInt32(1)), new BsonDocument('$out', new BsonString(collectionName))], + readConcern, writeConcern, AggregationLevel.COLLECTION) + .allowDiskUse(true) + .collation(collation) + .hint(new BsonDocument('a', new BsonInt32(1))) + .comment(new BsonString('this is a comment')) + ) + + when: 'the subsequent read should have the batchSize set' + operation = executor.getReadOperation() as FindOperation + + then: 'should use the correct settings' + operation.getNamespace() == collectionNamespace + operation.getBatchSize() == 99 + operation.getCollation() == collation + + when: 'aggregation includes $out and is at the database level' + new AggregateIterableImpl(null, namespace, Document, Document, codecRegistry, readPreference, readConcern, writeConcern, executor, + pipeline, AggregationLevel.DATABASE, false, TIMEOUT_SETTINGS) + .batchSize(99) + .maxTime(100, MILLISECONDS) + .allowDiskUse(true) + .collation(collation) + .hint(new Document('a', 1)) + .comment(new BsonString('this is a comment')) + .iterator() + + operation = executor.getReadOperation() as AggregateToCollectionOperation + + then: 'should use the overrides' + expect operation, isTheSameAs(new AggregateToCollectionOperation(namespace, + [new BsonDocument('$match', new BsonInt32(1)), new BsonDocument('$out', new BsonString(collectionName))], + readConcern, writeConcern, + AggregationLevel.DATABASE) + .allowDiskUse(true) + .collation(collation) + .hint(new BsonDocument('a', new BsonInt32(1))) + .comment(new BsonString('this is a comment')) + ) + + when: 'the subsequent read should have the batchSize set' + operation = executor.getReadOperation() as FindOperation + + then: 'should use the correct settings' + operation.getNamespace() == collectionNamespace + operation.getBatchSize() == 99 + operation.getCollation() == collation + operation.isAllowDiskUse() == null + + when: 'toCollection should work as expected' + new AggregateIterableImpl(null, namespace, Document, Document, codecRegistry, readPreference, readConcern, writeConcern, executor, + pipeline, AggregationLevel.COLLECTION, false, TIMEOUT_SETTINGS) + .allowDiskUse(true) + .collation(collation) + .hint(new Document('a', 1)) + .comment(new BsonString('this is a comment')) + .toCollection() + + operation = executor.getReadOperation() as AggregateToCollectionOperation + + then: + expect operation, isTheSameAs(new AggregateToCollectionOperation(namespace, + [new BsonDocument('$match', new BsonInt32(1)), new BsonDocument('$out', new BsonString(collectionName))], + readConcern, writeConcern) + .allowDiskUse(true) + .collation(collation) + .hint(new BsonDocument('a', new BsonInt32(1))) + .comment(new BsonString('this is a comment'))) + } + + def 'should build the expected AggregateToCollectionOperation for $out with hint string'() { + given: + def executor = new TestOperationExecutor([null, null, null, null, null]) + def collectionName = 'collectionName' + def pipeline = [new Document('$match', 1), new Document('$out', collectionName)] + + when: 'aggregation includes $out and hint string' + new AggregateIterableImpl(null, namespace, Document, Document, codecRegistry, readPreference, readConcern, writeConcern, executor, + pipeline, AggregationLevel.COLLECTION, false, TIMEOUT_SETTINGS) + .hintString('x_1').iterator() + + def operation = executor.getReadOperation() as AggregateToCollectionOperation + + then: 'should use the overrides' + expect operation, isTheSameAs(new AggregateToCollectionOperation(namespace, + [new BsonDocument('$match', new BsonInt32(1)), new BsonDocument('$out', new BsonString(collectionName))], + readConcern, writeConcern, AggregationLevel.COLLECTION) + .hint(new BsonString('x_1'))) + + when: 'aggregation includes $out and hint and hint string' + executor = new TestOperationExecutor([null, null, null, null, null]) + new AggregateIterableImpl(null, namespace, Document, Document, codecRegistry, readPreference, readConcern, writeConcern, executor, + pipeline, AggregationLevel.COLLECTION, false, TIMEOUT_SETTINGS) + .hint(new BsonDocument('x', new BsonInt32(1))) + .hintString('x_1').iterator() + + operation = executor.getReadOperation() as AggregateToCollectionOperation + + then: 'should use the hint not the hint string' + expect operation, isTheSameAs(new AggregateToCollectionOperation(namespace, + [new BsonDocument('$match', new BsonInt32(1)), new BsonDocument('$out', new BsonString(collectionName))], + readConcern, writeConcern, AggregationLevel.COLLECTION) + .hint(new BsonDocument('x', new BsonInt32(1)))) + } + + def 'should build the expected AggregateToCollectionOperation for $merge document'() { + given: + def executor = new TestOperationExecutor([null, null, null, null, null, null, null]) + def collectionName = 'collectionName' + def collectionNamespace = new MongoNamespace(namespace.getDatabaseName(), collectionName) + def pipeline = [new Document('$match', 1), new Document('$merge', new Document('into', collectionName))] + def pipelineWithIntoDocument = [new Document('$match', 1), new Document('$merge', + new Document('into', new Document('db', 'db2').append('coll', collectionName)))] + + when: 'aggregation includes $merge' + new AggregateIterableImpl(null, namespace, Document, Document, codecRegistry, readPreference, readConcern, writeConcern, executor, + pipeline, AggregationLevel.COLLECTION, false, TIMEOUT_SETTINGS) + .batchSize(99) + .allowDiskUse(true) + .collation(collation) + .hint(new Document('a', 1)) + .comment(new BsonString('this is a comment')).iterator() + + def operation = executor.getReadOperation() as AggregateToCollectionOperation + + then: 'should use the overrides' + expect operation, isTheSameAs(new AggregateToCollectionOperation(namespace, + [new BsonDocument('$match', new BsonInt32(1)), + new BsonDocument('$merge', new BsonDocument('into', new BsonString(collectionName)))], + readConcern, writeConcern, + AggregationLevel.COLLECTION) + .allowDiskUse(true) + .collation(collation) + .hint(new BsonDocument('a', new BsonInt32(1))) + .comment(new BsonString('this is a comment')) + ) + + when: 'the subsequent read should have the batchSize set' + operation = executor.getReadOperation() as FindOperation + + then: 'should use the correct settings' + operation.getNamespace() == collectionNamespace + operation.getBatchSize() == 99 + operation.getCollation() == collation + + when: 'aggregation includes $merge into a different database' + new AggregateIterableImpl(null, namespace, Document, Document, codecRegistry, readPreference, readConcern, writeConcern, executor, + pipelineWithIntoDocument, AggregationLevel.COLLECTION, false, TIMEOUT_SETTINGS) + .batchSize(99) + .maxTime(100, MILLISECONDS) + .allowDiskUse(true) + .collation(collation) + .hint(new Document('a', 1)) + .comment(new BsonString('this is a comment')).iterator() + + operation = executor.getReadOperation() as AggregateToCollectionOperation + + then: 'should use the overrides' + expect operation, isTheSameAs(new AggregateToCollectionOperation(namespace, + [new BsonDocument('$match', new BsonInt32(1)), + new BsonDocument('$merge', new BsonDocument('into', + new BsonDocument('db', new BsonString('db2')).append('coll', new BsonString(collectionName))))], + readConcern, writeConcern, + AggregationLevel.COLLECTION) + .allowDiskUse(true) + .collation(collation) + .hint(new BsonDocument('a', new BsonInt32(1))) + .comment(new BsonString('this is a comment')) + ) + + when: 'the subsequent read should have the batchSize set' + operation = executor.getReadOperation() as FindOperation + + then: 'should use the correct settings' + operation.getNamespace() == new MongoNamespace('db2', collectionName) + operation.getBatchSize() == 99 + operation.getCollation() == collation + + when: 'aggregation includes $merge and is at the database level' + new AggregateIterableImpl(null, namespace, Document, Document, codecRegistry, readPreference, readConcern, writeConcern, executor, + pipeline, AggregationLevel.DATABASE, false, TIMEOUT_SETTINGS) + .batchSize(99) + .maxTime(100, MILLISECONDS) + .allowDiskUse(true) + .collation(collation) + .hint(new Document('a', 1)) + .comment(new BsonString('this is a comment')).iterator() + + operation = executor.getReadOperation() as AggregateToCollectionOperation + + then: 'should use the overrides' + expect operation, isTheSameAs(new AggregateToCollectionOperation(namespace, + [new BsonDocument('$match', new BsonInt32(1)), + new BsonDocument('$merge', new BsonDocument('into', new BsonString(collectionName)))], + readConcern, writeConcern, + AggregationLevel.DATABASE) + .allowDiskUse(true) + .collation(collation) + .hint(new BsonDocument('a', new BsonInt32(1))) + .comment(new BsonString('this is a comment')) + ) + + when: 'the subsequent read should have the batchSize set' + operation = executor.getReadOperation() as FindOperation + + then: 'should use the correct settings' + operation.getNamespace() == collectionNamespace + operation.getBatchSize() == 99 + operation.getCollation() == collation + + when: 'toCollection should work as expected' + new AggregateIterableImpl(null, namespace, Document, Document, codecRegistry, readPreference, readConcern, writeConcern, executor, + pipeline, AggregationLevel.COLLECTION, false, TIMEOUT_SETTINGS) + .allowDiskUse(true) + .collation(collation) + .hint(new Document('a', 1)) + .comment(new BsonString('this is a comment')) + .toCollection() + + operation = executor.getReadOperation() as AggregateToCollectionOperation + + then: + expect operation, isTheSameAs(new AggregateToCollectionOperation(namespace, + [new BsonDocument('$match', new BsonInt32(1)), + new BsonDocument('$merge', new BsonDocument('into', new BsonString(collectionName)))], + readConcern, writeConcern) + .allowDiskUse(true) + .collation(collation) + .hint(new BsonDocument('a', new BsonInt32(1))) + .comment(new BsonString('this is a comment'))) + } + + def 'should build the expected AggregateToCollectionOperation for $merge string'() { + given: + def executor = new TestOperationExecutor([null, null, null, null, null, null, null]) + def collectionName = 'collectionName' + def collectionNamespace = new MongoNamespace(namespace.getDatabaseName(), collectionName) + def pipeline = [new BsonDocument('$match', new BsonDocument()), new BsonDocument('$merge', new BsonString(collectionName))] + + when: + new AggregateIterableImpl(null, namespace, Document, Document, codecRegistry, readPreference, readConcern, writeConcern, executor, + pipeline, AggregationLevel.COLLECTION, false, TIMEOUT_SETTINGS) + .iterator() + + def operation = executor.getReadOperation() as AggregateToCollectionOperation + + then: + expect operation, isTheSameAs(new AggregateToCollectionOperation(namespace, pipeline, readConcern, + writeConcern, AggregationLevel.COLLECTION)) + + when: + operation = executor.getReadOperation() as FindOperation + + then: + operation.getNamespace() == collectionNamespace + } + + def 'should build the expected AggregateToCollectionOperation for $out as a document'() { + given: + def cannedResults = [new Document('_id', 1), new Document('_id', 2), new Document('_id', 3)] + def cursor = { + def batchToReturn = cannedResults.collect() + Stub(BatchCursor) { + def count = 0 + def results + def getResult = { + count++ + results = count == 1 ? batchToReturn : null + results + } + next() >> { + getResult() + } + hasNext() >> { + count == 0 + } + } + } + def executor = new TestOperationExecutor([cursor(), cursor(), cursor(), cursor(), cursor(), cursor()]) + def pipeline = [new Document('$match', 1), new Document('$out', new Document('s3', true))] + def outWithDBpipeline = [new Document('$match', 1), + new Document('$out', new Document('db', 'testDB').append('coll', 'testCollection'))] + + when: 'aggregation includes $out' + def aggregateIterable = new AggregateIterableImpl(null, namespace, Document, Document, codecRegistry, readPreference, + readConcern, writeConcern, executor, pipeline, AggregationLevel.COLLECTION, false, TIMEOUT_SETTINGS) + + aggregateIterable.toCollection() + def operation = executor.getReadOperation() as AggregateToCollectionOperation + + then: + expect operation, isTheSameAs(new AggregateToCollectionOperation(namespace, + [new BsonDocument('$match', new BsonInt32(1)), BsonDocument.parse('{$out: {s3: true}}')], + readConcern, writeConcern, AggregationLevel.COLLECTION) + ) + + when: 'Trying to iterate it should fail' + aggregateIterable.iterator() + + then: + thrown(IllegalStateException) + + when: 'aggregation includes $out and is at the database level' + aggregateIterable = new AggregateIterableImpl(null, namespace, Document, Document, codecRegistry, readPreference, + readConcern, writeConcern, executor, pipeline, AggregationLevel.DATABASE, false, TIMEOUT_SETTINGS) + aggregateIterable.toCollection() + + operation = executor.getReadOperation() as AggregateToCollectionOperation + + then: + expect operation, isTheSameAs(new AggregateToCollectionOperation(namespace, + [new BsonDocument('$match', new BsonInt32(1)), BsonDocument.parse('{$out: {s3: true}}')], + readConcern, writeConcern, AggregationLevel.DATABASE) + ) + + when: 'Trying to iterate it should fail' + aggregateIterable.iterator() + + then: + thrown(IllegalStateException) + + when: 'toCollection should work as expected' + aggregateIterable = new AggregateIterableImpl(null, namespace, Document, Document, codecRegistry, readPreference, + readConcern, writeConcern, executor, pipeline, AggregationLevel.COLLECTION, false, TIMEOUT_SETTINGS) + aggregateIterable.toCollection() + + operation = executor.getReadOperation() as AggregateToCollectionOperation + + then: + expect operation, isTheSameAs(new AggregateToCollectionOperation(namespace, + [new BsonDocument('$match', new BsonInt32(1)), BsonDocument.parse('{$out: {s3: true}}')], + readConcern, writeConcern)) + + when: 'Trying to iterate it should fail' + aggregateIterable.iterator() + + then: + thrown(IllegalStateException) + + when: 'aggregation includes $out with namespace' + aggregateIterable = new AggregateIterableImpl(null, namespace, Document, Document, codecRegistry, readPreference, + readConcern, writeConcern, executor, outWithDBpipeline, AggregationLevel.COLLECTION, false, TIMEOUT_SETTINGS) + aggregateIterable.toCollection() + + operation = executor.getReadOperation() as AggregateToCollectionOperation + + then: + expect operation, isTheSameAs(new AggregateToCollectionOperation(namespace, + [new BsonDocument('$match', new BsonInt32(1)), + BsonDocument.parse('{$out: {db: "testDB", coll: "testCollection"}}')], readConcern, writeConcern)) + + when: 'Trying to iterate it should succeed' + def results = [] + aggregateIterable.into(results) + + then: + results == cannedResults + } + + + def 'should use ClientSession for AggregationOperation'() { + given: + def batchCursor = Stub(BatchCursor) { + _ * hasNext() >> { false } + } + def executor = new TestOperationExecutor([batchCursor, batchCursor]) + def pipeline = [new Document('$match', 1)] + def aggregationIterable = new AggregateIterableImpl(clientSession, namespace, Document, Document, codecRegistry, readPreference, + readConcern, writeConcern, executor, pipeline, AggregationLevel.COLLECTION, false, TIMEOUT_SETTINGS) + + when: + aggregationIterable.first() + + then: + executor.getClientSession() == clientSession + + when: + aggregationIterable.iterator() + + then: + executor.getClientSession() == clientSession + + where: + clientSession << [null, Stub(ClientSession)] + } + + def 'should use ClientSession for AggregateToCollectionOperation'() { + given: + def batchCursor = Stub(BatchCursor) { + _ * hasNext() >> { false } + } + def executor = new TestOperationExecutor([null, batchCursor, null, batchCursor, null]) + def pipeline = [new Document('$match', 1), new Document('$out', 'collName')] + def aggregationIterable = new AggregateIterableImpl(clientSession, namespace, Document, Document, codecRegistry, readPreference, + readConcern, writeConcern, executor, pipeline, AggregationLevel.COLLECTION, false, TIMEOUT_SETTINGS) + + when: + aggregationIterable.first() + + then: + executor.getClientSession() == clientSession + + when: + aggregationIterable.iterator() + + then: + executor.getClientSession() == clientSession + executor.getClientSession() == clientSession + + when: + aggregationIterable.toCollection() + + then: + executor.getClientSession() == clientSession + + where: + clientSession << [null, Stub(ClientSession)] + } + + def 'should handle exceptions correctly'() { + given: + def codecRegistry = fromProviders([new ValueCodecProvider(), new BsonValueCodecProvider()]) + def executor = new TestOperationExecutor([new MongoException('failure')]) + def pipeline = [new BsonDocument('$match', new BsonInt32(1))] + def aggregationIterable = new AggregateIterableImpl(null, namespace, BsonDocument, BsonDocument, codecRegistry, readPreference, + readConcern, writeConcern, executor, pipeline, AggregationLevel.COLLECTION, false, TIMEOUT_SETTINGS) + + when: 'The operation fails with an exception' + aggregationIterable.iterator() + + then: 'the future should handle the exception' + thrown(MongoException) + + when: 'toCollection should throw IllegalStateException when last state is not $out' + aggregationIterable.toCollection() + + then: + thrown(IllegalStateException) + + when: 'a codec is missing' + new AggregateIterableImpl(null, namespace, Document, Document, codecRegistry, readPreference, readConcern, writeConcern, executor, + pipeline, AggregationLevel.COLLECTION, false, TIMEOUT_SETTINGS).iterator() + + then: + thrown(CodecConfigurationException) + + when: 'pipeline contains null' + new AggregateIterableImpl(null, namespace, Document, Document, codecRegistry, readPreference, readConcern, writeConcern, executor, + [null], AggregationLevel.COLLECTION, false, TIMEOUT_SETTINGS).iterator() + + then: + thrown(IllegalArgumentException) + } + + def 'should follow the MongoIterable interface as expected'() { + given: + def cannedResults = [new Document('_id', 1), new Document('_id', 2), new Document('_id', 3)] + def cursor = { + def batchToReturn = cannedResults.collect() + Stub(BatchCursor) { + def count = 0 + def results + def getResult = { + count++ + results = count == 1 ? batchToReturn : null + results + } + next() >> { + getResult() + } + hasNext() >> { + count == 0 + } + } + } + def executor = new TestOperationExecutor([cursor(), cursor(), cursor(), cursor()]) + def mongoIterable = new AggregateIterableImpl(null, namespace, Document, Document, codecRegistry, readPreference, + readConcern, writeConcern, executor, [new Document('$match', 1)], AggregationLevel.COLLECTION, false, + TIMEOUT_SETTINGS) + + when: + def results = mongoIterable.first() + + then: + results == cannedResults[0] + + when: + def count = 0 + mongoIterable.forEach(new Consumer() { + @Override + void accept(Document document) { + count++ + } + }) + + then: + count == 3 + + when: + def target = [] + mongoIterable.into(target) + + then: + target == cannedResults + + when: + target = [] + mongoIterable.map(new Function() { + @Override + Integer apply(Document document) { + document.getInteger('_id') + } + }).into(target) + + then: + target == [1, 2, 3] + } + + def 'should get and set batchSize as expected'() { + when: + def batchSize = 5 + def mongoIterable = new AggregateIterableImpl(null, namespace, Document, Document, codecRegistry, readPreference, + readConcern, writeConcern, Stub(OperationExecutor), [new Document('$match', 1)], AggregationLevel.COLLECTION, + false, TIMEOUT_SETTINGS) + + then: + mongoIterable.getBatchSize() == null + + when: + mongoIterable.batchSize(batchSize) + + then: + mongoIterable.getBatchSize() == batchSize + } + +} diff --git a/driver-sync/src/test/unit/com/mongodb/client/internal/ChangeStreamIterableSpecification.groovy b/driver-sync/src/test/unit/com/mongodb/client/internal/ChangeStreamIterableSpecification.groovy new file mode 100644 index 00000000000..fdf31a76b56 --- /dev/null +++ b/driver-sync/src/test/unit/com/mongodb/client/internal/ChangeStreamIterableSpecification.groovy @@ -0,0 +1,290 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.internal + +import com.mongodb.Function +import com.mongodb.MongoException +import com.mongodb.MongoNamespace +import com.mongodb.ReadConcern +import com.mongodb.client.ClientSession +import com.mongodb.client.model.Collation +import com.mongodb.client.model.changestream.ChangeStreamDocument +import com.mongodb.client.model.changestream.FullDocument +import com.mongodb.client.model.changestream.FullDocumentBeforeChange +import com.mongodb.internal.client.model.changestream.ChangeStreamLevel +import com.mongodb.internal.operation.AggregateResponseBatchCursor +import com.mongodb.internal.operation.ChangeStreamOperation +import org.bson.BsonDocument +import org.bson.BsonInt32 +import org.bson.BsonTimestamp +import org.bson.Document +import org.bson.RawBsonDocument +import org.bson.codecs.BsonValueCodecProvider +import org.bson.codecs.DocumentCodecProvider +import org.bson.codecs.RawBsonDocumentCodec +import org.bson.codecs.ValueCodecProvider +import org.bson.codecs.configuration.CodecConfigurationException +import spock.lang.Specification + +import java.util.function.Consumer + +import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS +import static com.mongodb.CustomMatchers.isTheSameAs +import static com.mongodb.ReadPreference.secondary +import static java.util.concurrent.TimeUnit.MILLISECONDS +import static org.bson.codecs.configuration.CodecRegistries.fromProviders +import static spock.util.matcher.HamcrestSupport.expect + +class ChangeStreamIterableSpecification extends Specification { + def namespace = new MongoNamespace('db', 'coll') + def codecRegistry = fromProviders([new ValueCodecProvider(), new DocumentCodecProvider(), new BsonValueCodecProvider()]) + def readPreference = secondary() + def readConcern = ReadConcern.MAJORITY + def collation = Collation.builder().locale('en').build() + + def 'should build the expected ChangeStreamOperation'() { + given: + def executor = new TestOperationExecutor([null, null, null, null, null]) + def pipeline = [new Document('$match', 1)] + def changeStreamIterable = new ChangeStreamIterableImpl(null, namespace, codecRegistry, readPreference, readConcern, + executor, pipeline, Document, ChangeStreamLevel.COLLECTION, true, TIMEOUT_SETTINGS) + + when: 'default input should be as expected' + changeStreamIterable.iterator() + + def codec = new RawBsonDocumentCodec() + def operation = executor.getReadOperation() as ChangeStreamOperation + def readPreference = executor.getReadPreference() + + then: + expect operation, isTheSameAs(new ChangeStreamOperation(namespace, + FullDocument.DEFAULT, FullDocumentBeforeChange.DEFAULT, [BsonDocument.parse('{$match: 1}')], codec, + ChangeStreamLevel.COLLECTION) + .retryReads(true)) + readPreference == secondary() + + when: 'overriding initial options' + def resumeToken = RawBsonDocument.parse('{_id: {a: 1}}') + def startAtOperationTime = new BsonTimestamp(99) + changeStreamIterable.collation(collation) + .maxAwaitTime(101, MILLISECONDS) + .fullDocument(FullDocument.UPDATE_LOOKUP) + .fullDocumentBeforeChange(FullDocumentBeforeChange.WHEN_AVAILABLE) + .resumeAfter(resumeToken).startAtOperationTime(startAtOperationTime) + .startAfter(resumeToken).iterator() + + operation = executor.getReadOperation() as ChangeStreamOperation + + then: 'should use the overrides' + expect operation, isTheSameAs(new ChangeStreamOperation(namespace, + FullDocument.UPDATE_LOOKUP, FullDocumentBeforeChange.WHEN_AVAILABLE, [BsonDocument.parse('{$match: 1}')], codec, + ChangeStreamLevel.COLLECTION) + .retryReads(true) + .collation(collation) + .resumeAfter(resumeToken) + .startAtOperationTime(startAtOperationTime) + .startAfter(resumeToken)) + } + + def 'should use ClientSession'() { + given: + def batchCursor = Stub(AggregateResponseBatchCursor) { + _ * hasNext() >> { false } + } + def executor = new TestOperationExecutor([batchCursor, batchCursor]) + def changeStreamIterable = new ChangeStreamIterableImpl(clientSession, namespace, codecRegistry, readPreference, readConcern, + executor, [], Document, ChangeStreamLevel.COLLECTION, true, TIMEOUT_SETTINGS) + + when: + changeStreamIterable.first() + + then: + executor.getClientSession() == clientSession + + when: + changeStreamIterable.iterator() + + then: + executor.getClientSession() == clientSession + + where: + clientSession << [null, Stub(ClientSession)] + } + + def 'should handle exceptions correctly'() { + given: + def altRegistry = fromProviders([new ValueCodecProvider(), new BsonValueCodecProvider()]) + def executor = new TestOperationExecutor([new MongoException('failure')]) + def pipeline = [new BsonDocument('$match', new BsonInt32(1))] + def changeStreamIterable = new ChangeStreamIterableImpl(null, namespace, codecRegistry, readPreference, readConcern, + executor, pipeline, BsonDocument, ChangeStreamLevel.COLLECTION, true, TIMEOUT_SETTINGS) + + when: 'The operation fails with an exception' + changeStreamIterable.iterator() + + then: + thrown(MongoException) + + when: 'a codec is missing' + new ChangeStreamIterableImpl(null, namespace, altRegistry, readPreference, readConcern, executor, pipeline, Document, + ChangeStreamLevel.COLLECTION, true, TIMEOUT_SETTINGS).iterator() + + then: + thrown(CodecConfigurationException) + + when: 'pipeline contains null' + new ChangeStreamIterableImpl(null, namespace, codecRegistry, readPreference, readConcern, executor, [null], Document, + ChangeStreamLevel.COLLECTION, true, TIMEOUT_SETTINGS).iterator() + + then: + thrown(IllegalArgumentException) + } + + def 'should follow the MongoIterable interface as expected'() { + given: + def count = 0 + def cannedResults = ['{_id: {_data: 1}}', '{_id: {_data: 2}}', '{_id: {_data: 3}}'].collect { + RawBsonDocument.parse(it) + } + def executor = new TestOperationExecutor([cursor(cannedResults.collect()), cursor(cannedResults.collect()), + cursor(cannedResults.collect()), cursor(cannedResults.collect())]) + def mongoIterable = new ChangeStreamIterableImpl(null, namespace, codecRegistry, readPreference, readConcern, executor, [], + Document, ChangeStreamLevel.COLLECTION, true, TIMEOUT_SETTINGS) + + when: + def results = mongoIterable.first() + + then: + results.getResumeToken().equals(cannedResults[0].getDocument('_id')) + + when: + mongoIterable.forEach(new Consumer>() { + @Override + void accept(ChangeStreamDocument result) { + count++ + } + }) + + then: + count == 3 + + when: + def target = [] + mongoIterable.into(target) + + then: + target[0].getResumeToken().equals(cannedResults[0].getDocument('_id')) + target[1].getResumeToken().equals(cannedResults[1].getDocument('_id')) + target[2].getResumeToken().equals(cannedResults[2].getDocument('_id')) + + when: + target = [] + mongoIterable.map(new Function, Integer>() { + @Override + Integer apply(ChangeStreamDocument document) { + document.getResumeToken().getInt32('_data').intValue() + } + }).into(target) + + then: + target == [1, 2, 3] + } + + def 'should be able to return the raw results'() { + given: + def count = 0 + def cannedResults = ['{_id: { _data: 1}}', '{_id: {_data: 2}}', '{_id: {_data: 3}}'].collect { RawBsonDocument.parse(it) } + def executor = new TestOperationExecutor([cursor(cannedResults.collect()), cursor(cannedResults.collect()), + cursor(cannedResults.collect()), cursor(cannedResults.collect()), + cursor(cannedResults.collect())]) + def mongoIterable = new ChangeStreamIterableImpl(null, namespace, codecRegistry, readPreference, readConcern, executor, [], + Document, ChangeStreamLevel.COLLECTION, true, TIMEOUT_SETTINGS).withDocumentClass(RawBsonDocument) + + when: + def results = mongoIterable.first() + + then: + results == cannedResults[0] + + when: + mongoIterable.forEach(new Consumer() { + @Override + void accept(final RawBsonDocument rawBsonDocument) { + count++ + } + }) + + then: + count == 3 + + when: + def target = [] + mongoIterable.into(target) + + then: + target == cannedResults + + when: + target = [] + mongoIterable.map(new Function() { + @Override + Integer apply(BsonDocument document) { + document.getDocument('_id').getInt32('_data').intValue() + } + }).into(target) + + then: + target == [1, 2, 3] + } + + + def 'should get and set batchSize as expected'() { + when: + def batchSize = 5 + def mongoIterable = new ChangeStreamIterableImpl(null, namespace, codecRegistry, readPreference, readConcern, + Stub(OperationExecutor), [BsonDocument.parse('{$match: 1}')], BsonDocument, ChangeStreamLevel.COLLECTION, true, + TIMEOUT_SETTINGS) + + then: + mongoIterable.getBatchSize() == null + + when: + mongoIterable.batchSize(batchSize) + + then: + mongoIterable.getBatchSize() == batchSize + } + + def cursor(List cannedResults) { + Stub(AggregateResponseBatchCursor) { + def counter = 0 + def results + def getResult = { + counter++ + results = counter == 1 ? cannedResults : null + results + } + next() >> { + getResult() + } + hasNext() >> { + counter == 0 + } + } + } + +} diff --git a/driver-sync/src/test/unit/com/mongodb/client/internal/ClientSessionBindingSpecification.groovy b/driver-sync/src/test/unit/com/mongodb/client/internal/ClientSessionBindingSpecification.groovy new file mode 100644 index 00000000000..49332bc8ed3 --- /dev/null +++ b/driver-sync/src/test/unit/com/mongodb/client/internal/ClientSessionBindingSpecification.groovy @@ -0,0 +1,162 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.internal + +import com.mongodb.ReadConcern +import com.mongodb.ReadPreference +import com.mongodb.client.ClientSession +import com.mongodb.internal.binding.ClusterBinding +import com.mongodb.internal.binding.ConnectionSource +import com.mongodb.internal.binding.ReadWriteBinding +import com.mongodb.internal.connection.Cluster +import com.mongodb.internal.session.ClientSessionContext +import spock.lang.Specification + +import static com.mongodb.ClusterFixture.OPERATION_CONTEXT + +class ClientSessionBindingSpecification extends Specification { + def 'should return the session context from the binding'() { + given: + def session = Stub(ClientSession) + def wrappedBinding = Stub(ClusterBinding) { + getOperationContext() >> OPERATION_CONTEXT + } + def binding = new ClientSessionBinding(session, false, wrappedBinding) + + when: + def context = binding.getOperationContext().getSessionContext() + + then: + (context as ClientSessionContext).getClientSession() == session + } + + def 'should return the session context from the connection source'() { + given: + def session = Stub(ClientSession) + def wrappedBinding = Mock(ClusterBinding) { + getOperationContext() >> OPERATION_CONTEXT + } + def binding = new ClientSessionBinding(session, false, wrappedBinding) + + when: + def readConnectionSource = binding.getReadConnectionSource() + def context = readConnectionSource.getOperationContext().getSessionContext() + + then: + (context as ClientSessionContext).getClientSession() == session + 1 * wrappedBinding.getReadConnectionSource() >> { + Stub(ConnectionSource) + } + + when: + def writeConnectionSource = binding.getWriteConnectionSource() + context = writeConnectionSource.getOperationContext().getSessionContext() + + then: + (context as ClientSessionContext).getClientSession() == session + 1 * wrappedBinding.getWriteConnectionSource() >> { + Stub(ConnectionSource) + } + } + + def 'should close client session when binding reference count drops to zero if it is owned by the binding'() { + given: + def session = Mock(ClientSession) + def wrappedBinding = createStubBinding() + def binding = new ClientSessionBinding(session, true, wrappedBinding) + binding.retain() + + when: + binding.release() + + then: + 0 * session.close() + + when: + binding.release() + + then: + 1 * session.close() + } + + def 'should close client session when binding reference count drops to zero due to connection source if it is owned by the binding'() { + given: + def session = Mock(ClientSession) + def wrappedBinding = createStubBinding() + def binding = new ClientSessionBinding(session, true, wrappedBinding) + def readConnectionSource = binding.getReadConnectionSource() + def writeConnectionSource = binding.getWriteConnectionSource() + + when: + binding.release() + + then: + 0 * session.close() + + when: + writeConnectionSource.release() + + then: + 0 * session.close() + + when: + readConnectionSource.release() + + then: + 1 * session.close() + } + + def 'should not close client session when binding reference count drops to zero if it is not owned by the binding'() { + given: + def session = Mock(ClientSession) + def wrappedBinding = createStubBinding() + def binding = new ClientSessionBinding(session, false, wrappedBinding) + binding.retain() + + when: + binding.release() + + then: + 0 * session.close() + + when: + binding.release() + + then: + 0 * session.close() + } + + def 'owned session is implicit'() { + given: + def session = Mock(ClientSession) + def wrappedBinding = createStubBinding() + + when: + def binding = new ClientSessionBinding(session, ownsSession, wrappedBinding) + + then: + binding.getOperationContext().getSessionContext().isImplicitSession() == ownsSession + + where: + ownsSession << [true, false] + } + + private ReadWriteBinding createStubBinding() { + def cluster = Stub(Cluster) + new ClusterBinding(cluster, ReadPreference.primary(), ReadConcern.DEFAULT, OPERATION_CONTEXT) + } +} diff --git a/driver-sync/src/test/unit/com/mongodb/client/internal/CryptConnectionSpecification.groovy b/driver-sync/src/test/unit/com/mongodb/client/internal/CryptConnectionSpecification.groovy new file mode 100644 index 00000000000..8a38f966754 --- /dev/null +++ b/driver-sync/src/test/unit/com/mongodb/client/internal/CryptConnectionSpecification.groovy @@ -0,0 +1,233 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.internal + +import com.mongodb.ClusterFixture +import com.mongodb.ReadPreference +import com.mongodb.ServerAddress +import com.mongodb.connection.ClusterId +import com.mongodb.connection.ConnectionDescription +import com.mongodb.connection.ConnectionId +import com.mongodb.connection.ServerId +import com.mongodb.internal.TimeoutContext +import com.mongodb.internal.bulk.InsertRequest +import com.mongodb.internal.bulk.WriteRequestWithIndex +import com.mongodb.internal.connection.Connection +import com.mongodb.internal.connection.MessageSequences +import com.mongodb.internal.connection.SplittablePayload +import com.mongodb.internal.time.Timeout +import com.mongodb.internal.validator.NoOpFieldNameValidator +import org.bson.BsonArray +import org.bson.BsonBinary +import org.bson.BsonBinaryReader +import org.bson.BsonBinaryWriter +import org.bson.BsonDocument +import org.bson.BsonDocumentWrapper +import org.bson.BsonInt32 +import org.bson.BsonString +import org.bson.Document +import org.bson.RawBsonDocument +import org.bson.codecs.BsonDocumentCodec +import org.bson.codecs.DecoderContext +import org.bson.codecs.DocumentCodec +import org.bson.codecs.EncoderContext +import org.bson.codecs.RawBsonDocumentCodec +import org.bson.io.BasicOutputBuffer +import spock.lang.Specification + +import static com.mongodb.connection.ServerType.STANDALONE +import static com.mongodb.internal.connection.SplittablePayload.Type.INSERT + +class CryptConnectionSpecification extends Specification { + + def 'should encrypt and decrypt a command'() { + given: + def wrappedConnection = Mock(Connection) + def crypt = Mock(Crypt) + def cryptConnection = new CryptConnection(wrappedConnection, crypt) + def codec = new DocumentCodec() + def timeoutContext = Mock(TimeoutContext) + def operationContext = ClusterFixture.OPERATION_CONTEXT.withTimeoutContext(timeoutContext) + def operationTimeout = Mock(Timeout) + timeoutContext.getTimeout() >> operationTimeout + + def encryptedCommand = toRaw(new BsonDocument('find', new BsonString('test')) + .append('ssid', new BsonBinary(6 as byte, new byte[10]))) + + def encryptedResponse = toRaw(new BsonDocument('ok', new BsonInt32(1)) + .append('cursor', + new BsonDocument('firstBatch', + new BsonArray([new BsonDocument('_id', new BsonInt32(1)) + .append('ssid', new BsonBinary(6 as byte, new byte[10]))])))) + + def decryptedResponse = toRaw(new BsonDocument('ok', new BsonInt32(1)) + .append('cursor', new BsonDocument('firstBatch', + new BsonArray([new BsonDocument('_id', new BsonInt32(1)) + .append('ssid', new BsonString('555-55-5555'))])))) + + + when: + + def response = cryptConnection.command('db', + new BsonDocumentWrapper(new Document('find', 'test') + .append('filter', new Document('ssid', '555-55-5555')), codec), + NoOpFieldNameValidator.INSTANCE, ReadPreference.primary(), codec, operationContext) + + then: + _ * wrappedConnection.getDescription() >> { + new ConnectionDescription(new ConnectionId(new ServerId(new ClusterId(), new ServerAddress())), 8, STANDALONE, + 1000, 1024 * 16_000, 1024 * 48_000, []) + } + 1 * crypt.encrypt('db', toRaw(new BsonDocument('find', new BsonString('test')) + .append('filter', new BsonDocument('ssid', new BsonString('555-55-5555')))), operationTimeout) >> { + encryptedCommand + } + 1 * wrappedConnection.command('db', encryptedCommand, _ as NoOpFieldNameValidator, ReadPreference.primary(), + _ as RawBsonDocumentCodec, operationContext, true, MessageSequences.EmptyMessageSequences.INSTANCE) >> { + encryptedResponse + } + 1 * crypt.decrypt(encryptedResponse, operationTimeout) >> { + decryptedResponse + } + response == rawToDocument(decryptedResponse) + } + + def 'should split at 2 MiB'() { + given: + def wrappedConnection = Mock(Connection) + def crypt = Mock(Crypt) + def cryptConnection = new CryptConnection(wrappedConnection, crypt) + def codec = new DocumentCodec() + def bytes = new byte[2097152 - 85] + def payload = new SplittablePayload(INSERT, [ + new BsonDocumentWrapper(new Document('_id', 1).append('ssid', '555-55-5555').append('b', bytes), codec), + new BsonDocumentWrapper(new Document('_id', 2).append('ssid', '666-66-6666').append('b', bytes), codec) + ].withIndex().collect { doc, i -> new WriteRequestWithIndex(new InsertRequest(doc), i) }, true, NoOpFieldNameValidator.INSTANCE) + def encryptedCommand = toRaw(new BsonDocument('insert', new BsonString('test')).append('documents', new BsonArray( + [ + new BsonDocument('_id', new BsonInt32(1)) + .append('ssid', new BsonBinary(6 as byte, new byte[10])) + .append('b', new BsonBinary(bytes)) + ]))) + + def encryptedResponse = toRaw(new BsonDocument('ok', new BsonInt32(1))) + def decryptedResponse = encryptedResponse + def timeoutContext = Mock(TimeoutContext) + def operationContext = ClusterFixture.OPERATION_CONTEXT.withTimeoutContext(timeoutContext) + def operationTimeout = Mock(Timeout) + timeoutContext.getTimeout() >> operationTimeout + + when: + def response = cryptConnection.command('db', + new BsonDocumentWrapper(new Document('insert', 'test'), codec), + NoOpFieldNameValidator.INSTANCE, ReadPreference.primary(), new BsonDocumentCodec(), operationContext, true, payload) + + then: + _ * wrappedConnection.getDescription() >> { + new ConnectionDescription(new ConnectionId(new ServerId(new ClusterId(), new ServerAddress())), 8, STANDALONE, + 1000, 1024 * 16_000, 1024 * 48_000, []) + } + 1 * crypt.encrypt('db', + toRaw(new BsonDocument('insert', new BsonString('test')).append('documents', + new BsonArray([ + new BsonDocument('_id', new BsonInt32(1)) + .append('ssid', new BsonString('555-55-5555')) + .append('b', new BsonBinary(bytes)) + ]))), operationTimeout) >> { + encryptedCommand + } + 1 * wrappedConnection.command('db', encryptedCommand, _ as NoOpFieldNameValidator, ReadPreference.primary(), + _ as RawBsonDocumentCodec, operationContext, true, MessageSequences.EmptyMessageSequences.INSTANCE) >> { + encryptedResponse + } + 1 * crypt.decrypt(encryptedResponse, operationTimeout) >> { + decryptedResponse + } + response == rawToBsonDocument(decryptedResponse) + payload.getPosition() == 1 + } + + def 'should split at maxBatchCount'() { + given: + def wrappedConnection = Mock(Connection) + def crypt = Mock(Crypt) + def cryptConnection = new CryptConnection(wrappedConnection, crypt) + def codec = new DocumentCodec() + def maxBatchCount = 2 + def payload = new SplittablePayload(INSERT, [ + new BsonDocumentWrapper(new Document('_id', 1), codec), + new BsonDocumentWrapper(new Document('_id', 2), codec), + new BsonDocumentWrapper(new Document('_id', 3), codec) + ].withIndex().collect { doc, i -> new WriteRequestWithIndex(new InsertRequest(doc), i) }, true, NoOpFieldNameValidator.INSTANCE) + def encryptedCommand = toRaw(new BsonDocument('insert', new BsonString('test')).append('documents', new BsonArray( + [ + new BsonDocument('_id', new BsonInt32(1)), + new BsonDocument('_id', new BsonInt32(2)), + + ]))) + + def encryptedResponse = toRaw(new BsonDocument('ok', new BsonInt32(1))) + def decryptedResponse = encryptedResponse + def timeoutContext = Mock(TimeoutContext) + def operationContext = ClusterFixture.OPERATION_CONTEXT.withTimeoutContext(timeoutContext) + def operationTimeout = Mock(Timeout) + timeoutContext.getTimeout() >> operationTimeout + + when: + def response = cryptConnection.command('db', + new BsonDocumentWrapper(new Document('insert', 'test'), codec), + NoOpFieldNameValidator.INSTANCE, ReadPreference.primary(), new BsonDocumentCodec(), operationContext, true, payload) + + then: + _ * wrappedConnection.getDescription() >> { + new ConnectionDescription(new ConnectionId(new ServerId(new ClusterId(), new ServerAddress())), 8, STANDALONE, + maxBatchCount, 1024 * 16_000, 1024 * 48_000, []) + } + 1 * crypt.encrypt('db', + toRaw(new BsonDocument('insert', new BsonString('test')).append('documents', + new BsonArray([ + new BsonDocument('_id', new BsonInt32(1)), + new BsonDocument('_id', new BsonInt32(2)) + ]))), operationTimeout) >> { + encryptedCommand + } + 1 * wrappedConnection.command('db', encryptedCommand, _ as NoOpFieldNameValidator, ReadPreference.primary(), + _ as RawBsonDocumentCodec, operationContext, true, MessageSequences.EmptyMessageSequences.INSTANCE) >> { + encryptedResponse + } + 1 * crypt.decrypt(encryptedResponse, operationTimeout) >> { + decryptedResponse + } + response == rawToBsonDocument(decryptedResponse) + payload.getPosition() == 2 + } + + RawBsonDocument toRaw(BsonDocument document) { + def buffer = new BasicOutputBuffer() + def writer = new BsonBinaryWriter(buffer) + new BsonDocumentCodec().encode(writer, document, EncoderContext.builder().build()) + new RawBsonDocument(buffer.getInternalBuffer(), 0, buffer.getSize()) + } + + Document rawToDocument(RawBsonDocument document) { + new DocumentCodec().decode(new BsonBinaryReader(document.getByteBuffer().asNIO()), DecoderContext.builder().build()) + } + + BsonDocument rawToBsonDocument(RawBsonDocument document) { + new BsonDocumentCodec().decode(new BsonBinaryReader(document.getByteBuffer().asNIO()), DecoderContext.builder().build()) + } +} diff --git a/driver-sync/src/test/unit/com/mongodb/client/internal/DistinctIterableSpecification.groovy b/driver-sync/src/test/unit/com/mongodb/client/internal/DistinctIterableSpecification.groovy new file mode 100644 index 00000000000..82c4bf9a037 --- /dev/null +++ b/driver-sync/src/test/unit/com/mongodb/client/internal/DistinctIterableSpecification.groovy @@ -0,0 +1,205 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.internal + + +import com.mongodb.Function +import com.mongodb.MongoException +import com.mongodb.MongoNamespace +import com.mongodb.ReadConcern +import com.mongodb.client.ClientSession +import com.mongodb.client.model.Collation +import com.mongodb.internal.operation.BatchCursor +import com.mongodb.internal.operation.DistinctOperation +import org.bson.BsonDocument +import org.bson.BsonInt32 +import org.bson.Document +import org.bson.codecs.BsonValueCodecProvider +import org.bson.codecs.DocumentCodec +import org.bson.codecs.DocumentCodecProvider +import org.bson.codecs.ValueCodecProvider +import org.bson.codecs.configuration.CodecConfigurationException +import spock.lang.Specification + +import java.util.function.Consumer + +import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS +import static com.mongodb.CustomMatchers.isTheSameAs +import static com.mongodb.ReadPreference.secondary +import static java.util.concurrent.TimeUnit.MILLISECONDS +import static org.bson.codecs.configuration.CodecRegistries.fromProviders +import static spock.util.matcher.HamcrestSupport.expect + +class DistinctIterableSpecification extends Specification { + + def namespace = new MongoNamespace('db', 'coll') + def codecRegistry = fromProviders([new ValueCodecProvider(), new DocumentCodecProvider(), new BsonValueCodecProvider()]) + def readPreference = secondary() + def readConcern = ReadConcern.MAJORITY + def collation = Collation.builder().locale('en').build() + + def 'should build the expected DistinctOperation'() { + given: + def executor = new TestOperationExecutor([null, null]) + def distinctIterable = new DistinctIterableImpl(null, namespace, Document, Document, codecRegistry, readPreference, readConcern, + executor, 'field', new BsonDocument(), true, TIMEOUT_SETTINGS) + + when: 'default input should be as expected' + distinctIterable.iterator() + + def operation = executor.getReadOperation() as DistinctOperation + def readPreference = executor.getReadPreference() + + then: + expect operation, isTheSameAs(new DistinctOperation(namespace, 'field', new DocumentCodec()) + .filter(new BsonDocument()).retryReads(true)) + readPreference == secondary() + + when: 'overriding initial options' + distinctIterable.filter(new Document('field', 1)).maxTime(100, MILLISECONDS).batchSize(99).collation(collation).iterator() + + operation = executor.getReadOperation() as DistinctOperation + + then: 'should use the overrides' + expect operation, isTheSameAs( + new DistinctOperation(namespace, 'field', new DocumentCodec()) + .filter(new BsonDocument('field', new BsonInt32(1))).collation(collation).retryReads(true)) + } + + def 'should use ClientSession'() { + given: + def batchCursor = Stub(BatchCursor) { + _ * hasNext() >> { false } + } + def executor = new TestOperationExecutor([batchCursor, batchCursor]) + def distinctIterable = new DistinctIterableImpl(clientSession, namespace, Document, Document, codecRegistry, readPreference, + readConcern, executor, 'field', new BsonDocument(), true, TIMEOUT_SETTINGS) + + when: + distinctIterable.first() + + then: + executor.getClientSession() == clientSession + + when: + distinctIterable.iterator() + + then: + executor.getClientSession() == clientSession + + where: + clientSession << [null, Stub(ClientSession)] + } + + def 'should handle exceptions correctly'() { + given: + def codecRegistry = fromProviders([new ValueCodecProvider(), new BsonValueCodecProvider()]) + def executor = new TestOperationExecutor([new MongoException('failure')]) + def distinctIterable = new DistinctIterableImpl(null, namespace, Document, BsonDocument, codecRegistry, readPreference, + readConcern, executor, 'field', new BsonDocument(), true, TIMEOUT_SETTINGS) + + when: 'The operation fails with an exception' + distinctIterable.iterator() + + then: 'the future should handle the exception' + thrown(MongoException) + + when: 'a codec is missing' + distinctIterable.filter(new Document('field', 1)).iterator() + then: + thrown(CodecConfigurationException) + } + + def 'should follow the MongoIterable interface as expected'() { + given: + def cannedResults = [new Document('_id', 1), new Document('_id', 2), new Document('_id', 3)] + def cursor = { + def batchToReturn = cannedResults.collect() + Stub(BatchCursor) { + def count = 0 + def results + def getResult = { + count++ + results = count == 1 ? batchToReturn : null + results + } + next() >> { + getResult() + } + hasNext() >> { + count == 0 + } + } + } + def executor = new TestOperationExecutor([cursor(), cursor(), cursor(), cursor()]) + def mongoIterable = new DistinctIterableImpl(null, namespace, Document, Document, codecRegistry, readPreference, ReadConcern.LOCAL, + executor, 'field', new BsonDocument(), true, TIMEOUT_SETTINGS) + + when: + def results = mongoIterable.first() + + then: + results == cannedResults[0] + + when: + def count = 0 + mongoIterable.forEach(new Consumer() { + @Override + void accept(Document document) { + count++ + } + }) + + then: + count == 3 + + when: + def target = [] + mongoIterable.into(target) + + then: + target == cannedResults + + when: + target = [] + mongoIterable.map(new Function() { + @Override + Integer apply(Document document) { + document.getInteger('_id') + } + }).into(target) + + then: + target == [1, 2, 3] + } + + def 'should get and set batchSize as expected'() { + when: + def batchSize = 5 + def mongoIterable = new DistinctIterableImpl(null, namespace, Document, Document, codecRegistry, readPreference, readConcern, + Stub(OperationExecutor), 'field', new BsonDocument(), true, TIMEOUT_SETTINGS) + + then: + mongoIterable.getBatchSize() == null + + when: + mongoIterable.batchSize(batchSize) + + then: + mongoIterable.getBatchSize() == batchSize + } +} diff --git a/driver-sync/src/test/unit/com/mongodb/client/internal/FindIterableSpecification.groovy b/driver-sync/src/test/unit/com/mongodb/client/internal/FindIterableSpecification.groovy new file mode 100644 index 00000000000..78ab9a3601b --- /dev/null +++ b/driver-sync/src/test/unit/com/mongodb/client/internal/FindIterableSpecification.groovy @@ -0,0 +1,321 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.internal + +import com.mongodb.CursorType +import com.mongodb.Function +import com.mongodb.MongoException +import com.mongodb.MongoNamespace +import com.mongodb.ReadConcern +import com.mongodb.client.ClientSession +import com.mongodb.client.model.Collation +import com.mongodb.internal.operation.BatchCursor +import com.mongodb.internal.operation.FindOperation +import org.bson.BsonDocument +import org.bson.BsonInt32 +import org.bson.BsonString +import org.bson.Document +import org.bson.codecs.BsonValueCodecProvider +import org.bson.codecs.DocumentCodec +import org.bson.codecs.DocumentCodecProvider +import org.bson.codecs.ValueCodecProvider +import org.bson.conversions.Bson +import spock.lang.Specification + +import java.util.function.Consumer + +import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS +import static com.mongodb.CustomMatchers.isTheSameAs +import static com.mongodb.ReadPreference.secondary +import static java.util.concurrent.TimeUnit.MILLISECONDS +import static org.bson.codecs.configuration.CodecRegistries.fromProviders +import static spock.util.matcher.HamcrestSupport.expect + +class FindIterableSpecification extends Specification { + + def codecRegistry = fromProviders([new ValueCodecProvider(), new DocumentCodecProvider(), + new BsonValueCodecProvider()]) + def readPreference = secondary() + def readConcern = ReadConcern.MAJORITY + def namespace = new MongoNamespace('db', 'coll') + def collation = Collation.builder().locale('en').build() + + def 'should build the expected findOperation'() { + given: + def executor = new TestOperationExecutor([null, null, null]) + def findIterable = new FindIterableImpl(null, namespace, Document, Document, codecRegistry, readPreference, readConcern, + executor, new Document('filter', 1), true, TIMEOUT_SETTINGS) + .sort(new Document('sort', 1)) + .projection(new Document('projection', 1)) + .batchSize(100) + .limit(100) + .skip(10) + .cursorType(CursorType.NonTailable) + .noCursorTimeout(false) + .partial(false) + .collation(null) + .comment(new BsonString('my comment')) + .hintString('a_1') + .min(new Document('min', 1)) + .max(new Document('max', 1)) + .returnKey(false) + .showRecordId(false) + .allowDiskUse(false) + + when: 'default input should be as expected' + findIterable.iterator() + + def operation = executor.getReadOperation() as FindOperation + def readPreference = executor.getReadPreference() + + then: + expect operation, isTheSameAs(new FindOperation(namespace, new DocumentCodec()) + .filter(new BsonDocument('filter', new BsonInt32(1))) + .sort(new BsonDocument('sort', new BsonInt32(1))) + .projection(new BsonDocument('projection', new BsonInt32(1))) + .batchSize(100) + .limit(100) + .skip(10) + .cursorType(CursorType.NonTailable) + .comment(new BsonString('my comment')) + .hint(new BsonString('a_1')) + .min(new BsonDocument('min', new BsonInt32(1))) + .max(new BsonDocument('max', new BsonInt32(1))) + .returnKey(false) + .showRecordId(false) + .allowDiskUse(false) + .retryReads(true) + ) + readPreference == secondary() + + when: 'overriding initial options' + findIterable.filter(new Document('filter', 2)) + .sort(new Document('sort', 2)) + .projection(new Document('projection', 2)) + .maxTime(101, MILLISECONDS) + .maxAwaitTime(1001, MILLISECONDS) + .batchSize(99) + .limit(99) + .skip(9) + .cursorType(CursorType.Tailable) + .noCursorTimeout(true) + .partial(true) + .collation(collation) + .comment('alt comment') + .hint(new Document('hint', 2)) + .min(new Document('min', 2)) + .max(new Document('max', 2)) + .returnKey(true) + .showRecordId(true) + .allowDiskUse(true) + .iterator() + + operation = executor.getReadOperation() as FindOperation + + then: 'should use the overrides' + expect operation, isTheSameAs( + new FindOperation(namespace, new DocumentCodec()) + .filter(new BsonDocument('filter', new BsonInt32(2))) + .sort(new BsonDocument('sort', new BsonInt32(2))) + .projection(new BsonDocument('projection', new BsonInt32(2))) + .batchSize(99) + .limit(99) + .skip(9) + .cursorType(CursorType.Tailable) + .noCursorTimeout(true) + .partial(true) + .collation(collation) + .comment(new BsonString('alt comment')) + .hint(new BsonDocument('hint', new BsonInt32(2))) + .min(new BsonDocument('min', new BsonInt32(2))) + .max(new BsonDocument('max', new BsonInt32(2))) + .returnKey(true) + .showRecordId(true) + .allowDiskUse(true) + .retryReads(true) + ) + + when: 'passing nulls to nullable methods' + new FindIterableImpl(null, namespace, Document, Document, codecRegistry, readPreference, readConcern, + executor, new Document('filter', 1), true, TIMEOUT_SETTINGS) + .filter(null as Bson) + .collation(null) + .projection(null) + .sort(null as Bson) + .comment(null as BsonString) + .hint(null) + .max(null as Bson) + .min(null as Bson) + .iterator() + + operation = executor.getReadOperation() as FindOperation + + then: 'should set an empty doc for the filter' + expect operation, isTheSameAs(new FindOperation(namespace, new DocumentCodec()) + .filter(new BsonDocument()).retryReads(true)) + } + + def 'should use ClientSession'() { + given: + def batchCursor = Stub(BatchCursor) { + _ * hasNext() >> { false } + } + def executor = new TestOperationExecutor([batchCursor, batchCursor]) + def findIterable = new FindIterableImpl(clientSession, namespace, Document, Document, codecRegistry, readPreference, readConcern, + executor, new Document('filter', 1), true, TIMEOUT_SETTINGS) + + when: + findIterable.first() + + then: + executor.getClientSession() == clientSession + + when: + findIterable.iterator() + + then: + executor.getClientSession() == clientSession + + where: + clientSession << [null, Stub(ClientSession)] + } + + def 'should handle mixed types'() { + given: + def executor = new TestOperationExecutor([null, null]) + def findIterable = new FindIterableImpl(null, namespace, Document, Document, codecRegistry, readPreference, readConcern, + executor, new Document('filter', 1), true, TIMEOUT_SETTINGS) + + when: + findIterable.filter(new Document('filter', 1)) + .sort(new BsonDocument('sort', new BsonInt32(1))) + .iterator() + + def operation = executor.getReadOperation() as FindOperation + + then: + expect operation, isTheSameAs(new FindOperation(namespace, new DocumentCodec()) + .filter(new BsonDocument('filter', new BsonInt32(1))) + .sort(new BsonDocument('sort', new BsonInt32(1))) + .cursorType(CursorType.NonTailable) + .retryReads(true) + ) + } + + def 'should follow the MongoIterable interface as expected'() { + given: + def cannedResults = [new Document('_id', 1), new Document('_id', 2), new Document('_id', 3)] + def cursor = { + Stub(BatchCursor) { + def batchToReturn = cannedResults.collect() + def count = 0 + def results + def getResult = { + count++ + results = count == 1 ? batchToReturn : null + results + } + next() >> { + getResult() + } + hasNext() >> { + count == 0 + } + } + } + def executor = new TestOperationExecutor([cursor(), cursor(), cursor(), cursor()]) + def mongoIterable = new FindIterableImpl(null, namespace, Document, Document, codecRegistry, readPreference, readConcern, + executor, new Document(), true, TIMEOUT_SETTINGS) + + when: + def results = mongoIterable.first() + + then: + results == cannedResults[0] + + when: + def count = 0 + mongoIterable.forEach(new Consumer() { + @Override + void accept(Document document) { + count++ + } + }) + + then: + count == 3 + + when: + def target = [] + mongoIterable.into(target) + + then: + target == cannedResults + + when: + target = [] + mongoIterable.map(new Function() { + @Override + Integer apply(Document document) { + document.getInteger('_id') + } + }).into(target) + + then: + target == [1, 2, 3] + } + + def 'should get and set batchSize as expected'() { + when: + def batchSize = 5 + def mongoIterable = new FindIterableImpl(null, namespace, Document, Document, codecRegistry, readPreference, + readConcern, Stub(OperationExecutor), new Document(), true, TIMEOUT_SETTINGS) + + then: + mongoIterable.getBatchSize() == null + + when: + mongoIterable.batchSize(batchSize) + + then: + mongoIterable.getBatchSize() == batchSize + } + + // Really testing MongoIterableImpl#forEach, but doing it once here since that class is abstract + def 'forEach should close cursor when there is an exception during iteration'() { + given: + def cursor = Mock(BatchCursor) { + hasNext() >> { + throw new MongoException('') + } + } + def executor = new TestOperationExecutor([cursor]) + def mongoIterable = new FindIterableImpl(null, namespace, Document, Document, codecRegistry, readPreference, readConcern, + executor, new Document(), true, TIMEOUT_SETTINGS) + + when: + mongoIterable.forEach(new Consumer() { + @Override + void accept(Document document) { + } + }) + + then: + thrown(MongoException) + 1 * cursor.close() + } +} diff --git a/driver-sync/src/test/unit/com/mongodb/client/internal/ListCollectionsIterableSpecification.groovy b/driver-sync/src/test/unit/com/mongodb/client/internal/ListCollectionsIterableSpecification.groovy new file mode 100644 index 00000000000..12556430167 --- /dev/null +++ b/driver-sync/src/test/unit/com/mongodb/client/internal/ListCollectionsIterableSpecification.groovy @@ -0,0 +1,204 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.internal + + +import com.mongodb.Function +import com.mongodb.client.ClientSession +import com.mongodb.internal.operation.BatchCursor +import com.mongodb.internal.operation.ListCollectionsOperation +import org.bson.BsonDocument +import org.bson.BsonInt32 +import org.bson.Document +import org.bson.codecs.BsonValueCodecProvider +import org.bson.codecs.DocumentCodec +import org.bson.codecs.DocumentCodecProvider +import org.bson.codecs.ValueCodecProvider +import spock.lang.Specification + +import java.util.concurrent.TimeUnit +import java.util.function.Consumer + +import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS +import static com.mongodb.CustomMatchers.isTheSameAs +import static com.mongodb.ReadPreference.secondary +import static org.bson.codecs.configuration.CodecRegistries.fromProviders +import static spock.util.matcher.HamcrestSupport.expect + +class ListCollectionsIterableSpecification extends Specification { + + def codecRegistry = fromProviders([new ValueCodecProvider(), new DocumentCodecProvider(), + new BsonValueCodecProvider()]) + def readPreference = secondary() + + def 'should build the expected listCollectionOperation'() { + given: + def executor = new TestOperationExecutor([null, null, null, null]) + def listCollectionIterable = new ListCollectionsIterableImpl(null, 'db', false, Document, codecRegistry, + readPreference, executor, true, TIMEOUT_SETTINGS) + .filter(new Document('filter', 1)) + .batchSize(100) + def listCollectionNamesIterable = new ListCollectionsIterableImpl(null, 'db', true, Document, codecRegistry, + readPreference, executor, true, TIMEOUT_SETTINGS) + + when: 'default input should be as expected' + listCollectionIterable.iterator() + + def operation = executor.getReadOperation() as ListCollectionsOperation + def readPreference = executor.getReadPreference() + + then: + expect operation, isTheSameAs(new ListCollectionsOperation('db', new DocumentCodec()) + .filter(new BsonDocument('filter', new BsonInt32(1))).batchSize(100) + .retryReads(true) + .authorizedCollections(false)) + readPreference == secondary() + + when: 'overriding initial options' + listCollectionIterable.filter(new Document('filter', 2)).batchSize(99).maxTime(100, TimeUnit.MILLISECONDS).iterator() + + operation = executor.getReadOperation() as ListCollectionsOperation + + then: 'should use the overrides' + expect operation, isTheSameAs(new ListCollectionsOperation('db', new DocumentCodec()) + .filter(new BsonDocument('filter', new BsonInt32(2))).batchSize(99) + .retryReads(true)) + + when: 'requesting collection names only' + listCollectionNamesIterable.iterator() + + operation = executor.getReadOperation() as ListCollectionsOperation + + then: 'should create operation with nameOnly' + expect operation, isTheSameAs(new ListCollectionsOperation('db', new DocumentCodec()).nameOnly(true) + .retryReads(true)) + + when: 'requesting `authorizedCollections`' + listCollectionNamesIterable.authorizedCollections(true).iterator() + operation = executor.getReadOperation() as ListCollectionsOperation + + then: 'should create operation with `authorizedCollections`' + expect operation, isTheSameAs(new ListCollectionsOperation('db', new DocumentCodec()) + .authorizedCollections(true) + .nameOnly(true) + .retryReads(true)) + } + + def 'should use ClientSession'() { + given: + def batchCursor = Stub(BatchCursor) { + _ * hasNext() >> { false } + } + def executor = new TestOperationExecutor([batchCursor, batchCursor]) + def listCollectionIterable = new ListCollectionsIterableImpl(clientSession, 'db', false, Document, codecRegistry, + readPreference, executor, true, TIMEOUT_SETTINGS) + + when: + listCollectionIterable.first() + + then: + executor.getClientSession() == clientSession + + when: + listCollectionIterable.iterator() + + then: + executor.getClientSession() == clientSession + + where: + clientSession << [null, Stub(ClientSession)] + } + + def 'should follow the MongoIterable interface as expected'() { + given: + def cannedResults = [new Document('_id', 1), new Document('_id', 2), new Document('_id', 3)] + def cursor = { + def batchToReturn = cannedResults.collect() + Stub(BatchCursor) { + def count = 0 + def results + def getResult = { + count++ + results = count == 1 ? batchToReturn : null + results + } + next() >> { + getResult() + } + hasNext() >> { + count == 0 + } + } + } + def executor = new TestOperationExecutor([cursor(), cursor(), cursor(), cursor()]) + def mongoIterable = new ListCollectionsIterableImpl(null, 'db', false, Document, codecRegistry, readPreference, + executor, true, TIMEOUT_SETTINGS) + + when: + def results = mongoIterable.first() + + then: + results == cannedResults[0] + + when: + def count = 0 + mongoIterable.forEach(new Consumer() { + @Override + void accept(Document document) { + count++ + } + }) + + then: + count == 3 + + when: + def target = [] + mongoIterable.into(target) + + then: + target == cannedResults + + when: + target = [] + mongoIterable.map(new Function() { + @Override + Integer apply(Document document) { + document.getInteger('_id') + } + }).into(target) + + then: + target == [1, 2, 3] + } + + def 'should get and set batchSize as expected'() { + when: + def batchSize = 5 + def mongoIterable = new ListCollectionsIterableImpl(null, 'db', false, Document, codecRegistry, readPreference, + Stub(OperationExecutor), true, TIMEOUT_SETTINGS) + + then: + mongoIterable.getBatchSize() == null + + when: + mongoIterable.batchSize(batchSize) + + then: + mongoIterable.getBatchSize() == batchSize + } +} diff --git a/driver-sync/src/test/unit/com/mongodb/client/internal/ListDatabasesIterableSpecification.groovy b/driver-sync/src/test/unit/com/mongodb/client/internal/ListDatabasesIterableSpecification.groovy new file mode 100644 index 00000000000..627cc13ef3c --- /dev/null +++ b/driver-sync/src/test/unit/com/mongodb/client/internal/ListDatabasesIterableSpecification.groovy @@ -0,0 +1,160 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.internal + + +import com.mongodb.Function +import com.mongodb.internal.operation.BatchCursor +import com.mongodb.internal.operation.ListDatabasesOperation +import org.bson.BsonDocument +import org.bson.Document +import org.bson.codecs.BsonValueCodecProvider +import org.bson.codecs.DocumentCodec +import org.bson.codecs.DocumentCodecProvider +import org.bson.codecs.ValueCodecProvider +import spock.lang.Specification + +import java.util.function.Consumer + +import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS +import static com.mongodb.CustomMatchers.isTheSameAs +import static com.mongodb.ReadPreference.secondary +import static java.util.concurrent.TimeUnit.MILLISECONDS +import static org.bson.codecs.configuration.CodecRegistries.fromProviders +import static spock.util.matcher.HamcrestSupport.expect + +class ListDatabasesIterableSpecification extends Specification { + + def codecRegistry = fromProviders([new ValueCodecProvider(), new DocumentCodecProvider(), + new BsonValueCodecProvider()]) + def readPreference = secondary() + + def 'should build the expected listCollectionOperation'() { + given: + def executor = new TestOperationExecutor([null, null, null]) + def listDatabaseIterable = new ListDatabasesIterableImpl(null, Document, codecRegistry, readPreference, executor, true, + TIMEOUT_SETTINGS) + + when: 'default input should be as expected' + listDatabaseIterable.iterator() + + def operation = executor.getReadOperation() as ListDatabasesOperation + def readPreference = executor.getReadPreference() + + then: + expect operation, isTheSameAs(new ListDatabasesOperation(new DocumentCodec()) + .retryReads(true)) + readPreference == secondary() + + when: 'overriding initial options' + listDatabaseIterable.maxTime(100, MILLISECONDS).filter(Document.parse('{a: 1}')).nameOnly(true).iterator() + + operation = executor.getReadOperation() as ListDatabasesOperation + + then: 'should use the overrides' + expect operation, isTheSameAs(new ListDatabasesOperation(new DocumentCodec()) + .filter(BsonDocument.parse('{a: 1}')).nameOnly(true).retryReads(true)) + + when: 'overriding initial options' + listDatabaseIterable.filter(Document.parse('{a: 1}')).authorizedDatabasesOnly(true).iterator() + + operation = executor.getReadOperation() as ListDatabasesOperation + + then: 'should use the overrides' + expect operation, isTheSameAs(new ListDatabasesOperation(new DocumentCodec()) + .filter(BsonDocument.parse('{a: 1}')).nameOnly(true).authorizedDatabasesOnly(true).retryReads(true)) + } + + def 'should follow the MongoIterable interface as expected'() { + given: + def cannedResults = [new Document('_id', 1), new Document('_id', 2), new Document('_id', 3)] + def cursor = { + def batchToReturn = cannedResults.collect() + Stub(BatchCursor) { + def count = 0 + def results + def getResult = { + count++ + results = count == 1 ? batchToReturn : null + results + } + next() >> { + getResult() + } + hasNext() >> { + count == 0 + } + } + } + def executor = new TestOperationExecutor([cursor(), cursor(), cursor(), cursor()]) + def mongoIterable = new ListDatabasesIterableImpl(null, Document, codecRegistry, readPreference, executor, + true, TIMEOUT_SETTINGS) + + when: + def results = mongoIterable.first() + + then: + results == cannedResults[0] + + when: + def count = 0 + mongoIterable.forEach(new Consumer() { + @Override + void accept(Document document) { + count++ + } + }) + + then: + count == 3 + + when: + def target = [] + mongoIterable.into(target) + + then: + target == cannedResults + + when: + target = [] + mongoIterable.map(new Function() { + @Override + Integer apply(Document document) { + document.getInteger('_id') + } + }).into(target) + + then: + target == [1, 2, 3] + } + + def 'should get and set batchSize as expected'() { + when: + def batchSize = 5 + def mongoIterable = new ListDatabasesIterableImpl(null, Document, codecRegistry, readPreference, + Stub(OperationExecutor), true, TIMEOUT_SETTINGS) + + then: + mongoIterable.getBatchSize() == null + + when: + mongoIterable.batchSize(batchSize) + + then: + mongoIterable.getBatchSize() == batchSize + } +} diff --git a/driver-sync/src/test/unit/com/mongodb/client/internal/ListIndexesIterableSpecification.groovy b/driver-sync/src/test/unit/com/mongodb/client/internal/ListIndexesIterableSpecification.groovy new file mode 100644 index 00000000000..f7bad5189dd --- /dev/null +++ b/driver-sync/src/test/unit/com/mongodb/client/internal/ListIndexesIterableSpecification.groovy @@ -0,0 +1,181 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.internal + + +import com.mongodb.Function +import com.mongodb.MongoNamespace +import com.mongodb.client.ClientSession +import com.mongodb.internal.operation.BatchCursor +import com.mongodb.internal.operation.ListIndexesOperation +import org.bson.Document +import org.bson.codecs.BsonValueCodecProvider +import org.bson.codecs.DocumentCodec +import org.bson.codecs.DocumentCodecProvider +import org.bson.codecs.ValueCodecProvider +import spock.lang.Specification + +import java.util.function.Consumer + +import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS +import static com.mongodb.CustomMatchers.isTheSameAs +import static com.mongodb.ReadPreference.secondary +import static java.util.concurrent.TimeUnit.MILLISECONDS +import static org.bson.codecs.configuration.CodecRegistries.fromProviders +import static spock.util.matcher.HamcrestSupport.expect + +class ListIndexesIterableSpecification extends Specification { + + def namespace = new MongoNamespace('db', 'coll') + def codecRegistry = fromProviders([new ValueCodecProvider(), new DocumentCodecProvider(), + new BsonValueCodecProvider()]) + def readPreference = secondary() + + def 'should build the expected listIndexesOperation'() { + given: + def executor = new TestOperationExecutor([null, null]) + def listIndexesIterable = new ListIndexesIterableImpl(null, namespace, Document, codecRegistry, readPreference, + executor, true, TIMEOUT_SETTINGS).batchSize(100) + + when: 'default input should be as expected' + listIndexesIterable.iterator() + + def operation = executor.getReadOperation() as ListIndexesOperation + def readPreference = executor.getReadPreference() + + then: + expect operation, isTheSameAs(new ListIndexesOperation(namespace, new DocumentCodec()) + .batchSize(100).retryReads(true)) + readPreference == secondary() + + when: 'overriding initial options' + listIndexesIterable.batchSize(99) + .maxTime(100, MILLISECONDS) + .iterator() + + operation = executor.getReadOperation() as ListIndexesOperation + + then: 'should use the overrides' + expect operation, isTheSameAs(new ListIndexesOperation(namespace, new DocumentCodec()) + .batchSize(99).retryReads(true)) + } + + def 'should use ClientSession'() { + given: + def batchCursor = Stub(BatchCursor) { + _ * hasNext() >> { false } + } + def executor = new TestOperationExecutor([batchCursor, batchCursor]) + def listIndexesIterable = new ListIndexesIterableImpl(clientSession, namespace, Document, codecRegistry, readPreference, + executor, true, TIMEOUT_SETTINGS) + + when: + listIndexesIterable.first() + + then: + executor.getClientSession() == clientSession + + when: + listIndexesIterable.iterator() + + then: + executor.getClientSession() == clientSession + + where: + clientSession << [null, Stub(ClientSession)] + } + + + def 'should follow the MongoIterable interface as expected'() { + given: + def cannedResults = [new Document('_id', 1), new Document('_id', 2), new Document('_id', 3)] + def cursor = { + Stub(BatchCursor) { + def batchToReturn = cannedResults.collect() + def count = 0 + def results + def getResult = { + count++ + results = count == 1 ? batchToReturn : null + results + } + next() >> { + getResult() + } + hasNext() >> { + count == 0 + } + } + } + def executor = new TestOperationExecutor([cursor(), cursor(), cursor(), cursor()]) + def mongoIterable = new ListIndexesIterableImpl(null, namespace, Document, codecRegistry, readPreference, + executor, true, TIMEOUT_SETTINGS) + + when: + def results = mongoIterable.first() + + then: + results == cannedResults[0] + + when: + def count = 0 + mongoIterable.forEach(new Consumer() { + @Override + void accept(Document document) { + count++ + } + }) + + then: + count == 3 + + when: + def target = [] + mongoIterable.into(target) + + then: + target == cannedResults + + when: + target = [] + mongoIterable.map(new Function() { + @Override + Integer apply(Document document) { + document.getInteger('_id') + } + }).into(target) + + then: + target == [1, 2, 3] + } + + def 'should get and set batchSize as expected'() { + when: + def batchSize = 5 + def mongoIterable = new ListIndexesIterableImpl(null, namespace, Document, codecRegistry, readPreference, + Stub(OperationExecutor), true, TIMEOUT_SETTINGS) + + then: + mongoIterable.getBatchSize() == null + + when: + mongoIterable.batchSize(batchSize) + + then: + mongoIterable.getBatchSize() == batchSize + } +} diff --git a/driver-sync/src/test/unit/com/mongodb/client/internal/MapReduceIterableSpecification.groovy b/driver-sync/src/test/unit/com/mongodb/client/internal/MapReduceIterableSpecification.groovy new file mode 100644 index 00000000000..b2b7faa6b2a --- /dev/null +++ b/driver-sync/src/test/unit/com/mongodb/client/internal/MapReduceIterableSpecification.groovy @@ -0,0 +1,333 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.internal + + +import com.mongodb.Function +import com.mongodb.MongoException +import com.mongodb.MongoNamespace +import com.mongodb.ReadConcern +import com.mongodb.WriteConcern +import com.mongodb.client.ClientSession +import com.mongodb.client.model.Collation +import com.mongodb.client.model.MapReduceAction +import com.mongodb.internal.operation.BatchCursor +import com.mongodb.internal.operation.FindOperation +import com.mongodb.internal.operation.MapReduceToCollectionOperation +import com.mongodb.internal.operation.MapReduceWithInlineResultsOperation +import org.bson.BsonDocument +import org.bson.BsonInt32 +import org.bson.BsonJavaScript +import org.bson.Document +import org.bson.codecs.BsonValueCodecProvider +import org.bson.codecs.DocumentCodec +import org.bson.codecs.DocumentCodecProvider +import org.bson.codecs.ValueCodecProvider +import org.bson.codecs.configuration.CodecConfigurationException +import spock.lang.Specification + +import java.util.function.Consumer + +import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS +import static com.mongodb.CustomMatchers.isTheSameAs +import static com.mongodb.ReadPreference.secondary +import static java.util.concurrent.TimeUnit.MILLISECONDS +import static org.bson.codecs.configuration.CodecRegistries.fromProviders +import static spock.util.matcher.HamcrestSupport.expect + +@SuppressWarnings('deprecation') +class MapReduceIterableSpecification extends Specification { + + def namespace = new MongoNamespace('db', 'coll') + def codecRegistry = fromProviders([new ValueCodecProvider(), new DocumentCodecProvider(), new BsonValueCodecProvider()]) + def readPreference = secondary() + def readConcern = ReadConcern.MAJORITY + def writeConcern = WriteConcern.MAJORITY + def collation = Collation.builder().locale('en').build() + + def 'should build the expected MapReduceWithInlineResultsOperation'() { + given: + def executor = new TestOperationExecutor([null, null]) + def mapReduceIterable = new MapReduceIterableImpl(null, namespace, Document, Document, codecRegistry, readPreference, + readConcern, writeConcern, executor, 'map', 'reduce', TIMEOUT_SETTINGS) + + when: 'default input should be as expected' + mapReduceIterable.iterator() + + def operation = (executor.getReadOperation() as MapReduceIterableImpl.WrappedMapReduceReadOperation).getOperation() + def readPreference = executor.getReadPreference() + + then: + expect operation, isTheSameAs(new MapReduceWithInlineResultsOperation(namespace, + new BsonJavaScript('map'), new BsonJavaScript('reduce'), new DocumentCodec()) + .verbose(true)) + readPreference == secondary() + + when: 'overriding initial options' + mapReduceIterable.filter(new Document('filter', 1)) + .finalizeFunction('finalize') + .limit(999) + .maxTime(100, MILLISECONDS) + .scope(new Document('scope', 1)) + .sort(new Document('sort', 1)) + .verbose(false) + .collation(collation) + .iterator() + + operation = (executor.getReadOperation() as MapReduceIterableImpl.WrappedMapReduceReadOperation).getOperation() + + then: 'should use the overrides' + expect operation, isTheSameAs(new MapReduceWithInlineResultsOperation(namespace, + new BsonJavaScript('map'), new BsonJavaScript('reduce'), new DocumentCodec()) + .filter(new BsonDocument('filter', new BsonInt32(1))) + .finalizeFunction(new BsonJavaScript('finalize')) + .limit(999) + .scope(new BsonDocument('scope', new BsonInt32(1))) + .sort(new BsonDocument('sort', new BsonInt32(1))) + .verbose(false) + .collation(collation) + ) + } + + def 'should build the expected MapReduceToCollectionOperation'() { + given: + def executor = new TestOperationExecutor([null, null, null]) + + when: 'mapReduce to a collection' + def collectionNamespace = new MongoNamespace('dbName', 'collName') + def mapReduceIterable = new MapReduceIterableImpl(null, namespace, Document, Document, codecRegistry, + readPreference, readConcern, writeConcern, executor, 'map', 'reduce', TIMEOUT_SETTINGS) + .collectionName(collectionNamespace.getCollectionName()) + .databaseName(collectionNamespace.getDatabaseName()) + .filter(new Document('filter', 1)) + .finalizeFunction('finalize') + .limit(999) + .maxTime(100, MILLISECONDS) + .scope(new Document('scope', 1)) + .sort(new Document('sort', 1)) + .verbose(false) + .batchSize(99) + .action(MapReduceAction.MERGE) + .jsMode(true) + .bypassDocumentValidation(true) + .collation(collation) + mapReduceIterable.iterator() + + def operation = executor.getWriteOperation() as MapReduceToCollectionOperation + def expectedOperation = new MapReduceToCollectionOperation(namespace, + new BsonJavaScript('map'), new BsonJavaScript('reduce'), 'collName', writeConcern) + .databaseName(collectionNamespace.getDatabaseName()) + .filter(new BsonDocument('filter', new BsonInt32(1))) + .finalizeFunction(new BsonJavaScript('finalize')) + .limit(999) + .scope(new BsonDocument('scope', new BsonInt32(1))) + .sort(new BsonDocument('sort', new BsonInt32(1))) + .verbose(false) + .action(MapReduceAction.MERGE.getValue()) + .jsMode(true) + .bypassDocumentValidation(true) + .collation(collation) + + then: 'should use the overrides' + expect operation, isTheSameAs(expectedOperation) + + when: 'the subsequent read should have the batchSize set' + operation = executor.getReadOperation() as FindOperation + + then: 'should use the correct settings' + operation.getNamespace() == collectionNamespace + operation.getBatchSize() == 99 + operation.getCollation() == collation + + when: 'toCollection should work as expected' + mapReduceIterable.toCollection() + + operation = executor.getWriteOperation() as MapReduceToCollectionOperation + + then: + expect operation, isTheSameAs(expectedOperation) + } + + def 'should use ClientSession for MapReduceWithInlineResultsOperation'() { + given: + def batchCursor = Stub(BatchCursor) { + _ * hasNext() >> { false } + } + def executor = new TestOperationExecutor([batchCursor, batchCursor]) + def mapReduceIterable = new MapReduceIterableImpl(clientSession, namespace, Document, Document, codecRegistry, readPreference, + readConcern, writeConcern, executor, 'map', 'reduce', TIMEOUT_SETTINGS) + + when: + mapReduceIterable.first() + + then: + executor.getClientSession() == clientSession + + when: + mapReduceIterable.iterator() + + then: + executor.getClientSession() == clientSession + + where: + clientSession << [null, Stub(ClientSession)] + } + + def 'should use ClientSession for MapReduceToCollectionOperation'() { + given: + def batchCursor = Stub(BatchCursor) { + _ * hasNext() >> { false } + } + def executor = new TestOperationExecutor([null, batchCursor, null, batchCursor, null]) + def mapReduceIterable = new MapReduceIterableImpl(clientSession, namespace, Document, Document, codecRegistry, readPreference, + readConcern, writeConcern, executor, 'map', 'reduce', TIMEOUT_SETTINGS) + .collectionName('collName') + + when: + mapReduceIterable.first() + + then: + executor.getClientSession() == clientSession + executor.getClientSession() == clientSession + + when: + mapReduceIterable.iterator() + + then: + executor.getClientSession() == clientSession + executor.getClientSession() == clientSession + + when: + mapReduceIterable.toCollection() + + then: + executor.getClientSession() == clientSession + + where: + clientSession << [null, Stub(ClientSession)] + } + + + def 'should handle exceptions correctly'() { + given: + def codecRegistry = fromProviders([new ValueCodecProvider(), new BsonValueCodecProvider()]) + def executor = new TestOperationExecutor([new MongoException('failure')]) + def mapReduceIterable = new MapReduceIterableImpl(null, namespace, BsonDocument, BsonDocument, codecRegistry, + readPreference, readConcern, writeConcern, executor, 'map', 'reduce', TIMEOUT_SETTINGS) + + + when: 'The operation fails with an exception' + mapReduceIterable.iterator() + + then: 'the future should handle the exception' + thrown(MongoException) + + when: 'toCollection should throw IllegalStateException if its inline' + mapReduceIterable.toCollection() + + then: + thrown(IllegalStateException) + + when: 'a codec is missing' + new MapReduceIterableImpl(null, namespace, Document, Document, codecRegistry, readPreference, readConcern, writeConcern, executor, + 'map', 'reduce', TIMEOUT_SETTINGS).iterator() + + then: + thrown(CodecConfigurationException) + } + + + def 'should follow the MongoIterable interface as expected'() { + given: + def cannedResults = [new Document('_id', 1), new Document('_id', 2), new Document('_id', 3)] + def cursor = { + def batchToReturn = cannedResults.collect() + Stub(BatchCursor) { + def count = 0 + def results + def getResult = { + count++ + results = count == 1 ? batchToReturn : null + results + } + next() >> { + getResult() + } + hasNext() >> { + count == 0 + } + } + } + def executor = new TestOperationExecutor([cursor(), cursor(), cursor(), cursor()]) + def mongoIterable = new MapReduceIterableImpl(null, namespace, BsonDocument, BsonDocument, codecRegistry, readPreference, + readConcern, writeConcern, executor, 'map', 'reduce', TIMEOUT_SETTINGS) + + when: + def results = mongoIterable.first() + + then: + results == cannedResults[0] + + when: + def count = 0 + mongoIterable.forEach(new Consumer() { + @Override + void accept(Document document) { + count++ + } + }) + + then: + count == 3 + + when: + def target = [] + mongoIterable.into(target) + + then: + target == cannedResults + + when: + target = [] + mongoIterable.map(new Function() { + @Override + Integer apply(Document document) { + document.getInteger('_id') + } + }).into(target) + + then: + target == [1, 2, 3] + } + + def 'should get and set batchSize as expected'() { + when: + def batchSize = 5 + def mongoIterable = new MapReduceIterableImpl(null, namespace, Document, Document, codecRegistry, readPreference, + readConcern, writeConcern, Stub(OperationExecutor), 'map', 'reduce', TIMEOUT_SETTINGS) + + then: + mongoIterable.getBatchSize() == null + + when: + mongoIterable.batchSize(batchSize) + + then: + mongoIterable.getBatchSize() == batchSize + } + +} diff --git a/driver-sync/src/test/unit/com/mongodb/client/internal/MappingIterableSpecification.groovy b/driver-sync/src/test/unit/com/mongodb/client/internal/MappingIterableSpecification.groovy new file mode 100644 index 00000000000..ec2b889b304 --- /dev/null +++ b/driver-sync/src/test/unit/com/mongodb/client/internal/MappingIterableSpecification.groovy @@ -0,0 +1,72 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.internal + +import com.mongodb.Function +import com.mongodb.client.MongoCursor +import com.mongodb.client.MongoIterable +import spock.lang.Specification + +import static com.mongodb.CustomMatchers.isTheSameAs +import static spock.util.matcher.HamcrestSupport.expect + +class MappingIterableSpecification extends Specification { + + def 'should follow the MongoIterable interface as expected'() { + given: + def iterable = Mock(MongoIterable) + def mapper = { doc -> doc } + def mappingIterable = new MappingIterable(iterable, mapper) + + when: + mappingIterable.first() + + then: + 1 * iterable.first() + + when: + mappingIterable.forEach { } + + then: + 1 * iterable.forEach(_) + + when: + mappingIterable.into([]) + + then: + 1 * iterable.forEach(_) // Use foreach to populate the target + + when: + mappingIterable.batchSize(5) + + then: + 1 * iterable.batchSize(5) + + when: + mappingIterable.iterator() + + then: + 1 * iterable.iterator() >> Stub(MongoCursor) + + when: + def newMapper = { } as Function + + then: + expect mappingIterable.map(newMapper), isTheSameAs(new MappingIterable(mappingIterable, newMapper)) + } + +} diff --git a/driver-sync/src/test/unit/com/mongodb/client/internal/MongoBatchCursorAdapterSpecification.groovy b/driver-sync/src/test/unit/com/mongodb/client/internal/MongoBatchCursorAdapterSpecification.groovy new file mode 100644 index 00000000000..2b91f584027 --- /dev/null +++ b/driver-sync/src/test/unit/com/mongodb/client/internal/MongoBatchCursorAdapterSpecification.groovy @@ -0,0 +1,210 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.internal + +import com.mongodb.ServerAddress +import com.mongodb.ServerCursor +import com.mongodb.internal.operation.BatchCursor +import org.bson.Document +import spock.lang.Specification + + +class MongoBatchCursorAdapterSpecification extends Specification { + def 'should get server cursor and address'() { + given: + def batchCursor = Stub(BatchCursor) + def address = new ServerAddress('host', 27018) + def serverCursor = new ServerCursor(5, address) + batchCursor.getServerAddress() >> address + batchCursor.getServerCursor() >> serverCursor + def cursor = new MongoBatchCursorAdapter(batchCursor) + + expect: + cursor.serverAddress.is(address) + cursor.serverCursor.is(serverCursor) + } + + def 'should throw on remove'() { + given: + def batchCursor = Stub(BatchCursor) + def cursor = new MongoBatchCursorAdapter(batchCursor) + + when: + cursor.remove() + + then: + thrown(UnsupportedOperationException) + } + + def 'should close batch cursor'() { + given: + def batchCursor = Mock(BatchCursor) + def cursor = new MongoBatchCursorAdapter(batchCursor) + + when: + cursor.close() + + then: + 1 * batchCursor.close() + } + + def 'next should throw if there is no next'() { + given: + def batchCursor = Stub(BatchCursor) + + batchCursor.hasNext() >> false + + def cursor = new MongoBatchCursorAdapter(batchCursor) + + when: + cursor.next() + + then: + thrown(NoSuchElementException) + } + + + def 'should get next from batch cursor'() { + given: + def firstBatchFromBatchCursor = [new Document('x', 1), new Document('x', 1)] + def expectedFirstBatch = firstBatchFromBatchCursor.collect() + def secondBatchFromBatchCursor = [new Document('x', 2)] + def expectedSecondBatch = secondBatchFromBatchCursor.collect() + + def batchCursor = Stub(BatchCursor) + + batchCursor.hasNext() >>> [true, true, true, true, false] + batchCursor.next() >>> [firstBatchFromBatchCursor, secondBatchFromBatchCursor] + + def cursor = new MongoBatchCursorAdapter(batchCursor) + + expect: + cursor.hasNext() + cursor.next() == expectedFirstBatch[0] + cursor.hasNext() + cursor.next() == expectedFirstBatch[1] + cursor.hasNext() + cursor.next() == expectedSecondBatch[0] + !cursor.hasNext() + } + + def 'should try next from batch cursor'() { + given: + def firstBatchFromBatchCursor = [new Document('x', 1), new Document('x', 1)] + def expectedFirstBatch = firstBatchFromBatchCursor.collect() + def secondBatchFromBatchCursor = [new Document('x', 2)] + def expectedSecondBatch = secondBatchFromBatchCursor.collect() + + def batchCursor = Stub(BatchCursor) + + batchCursor.tryNext() >>> [firstBatchFromBatchCursor, null, secondBatchFromBatchCursor, null] + + def cursor = new MongoBatchCursorAdapter(batchCursor) + + expect: + cursor.tryNext() == expectedFirstBatch[0] + cursor.tryNext() == expectedFirstBatch[1] + cursor.tryNext() == null + cursor.tryNext() == expectedSecondBatch[0] + cursor.tryNext() == null + } + + def 'should report available documents'() { + given: + def firstBatch = [new Document('x', 1), new Document('x', 1)] + def secondBatch = [new Document('x', 2)] + + def batchCursor = Stub(BatchCursor) + + batchCursor.hasNext() >>> [true, true, true, true, false] + batchCursor.next() >>> [firstBatch, secondBatch] + batchCursor.available() >>> [2, 2, 0, 0, 0, 1, 0, 0, 0] + + when: + def cursor = new MongoBatchCursorAdapter(batchCursor) + + then: + cursor.available() == 2 + + when: + cursor.hasNext() + + then: + cursor.available() == 2 + + when: + cursor.next() + + then: + cursor.available() == 1 + + when: + cursor.hasNext() + + then: + cursor.available() == 1 + + when: + cursor.next() + + then: + cursor.available() == 0 + + when: + cursor.hasNext() + + then: + cursor.available() == 1 + + when: + cursor.next() + + then: + cursor.available() == 0 // fail + + when: + cursor.hasNext() + + then: + cursor.available() == 0 + + when: + cursor.close() + + then: + cursor.available() == 0 + } + + def 'should close cursor in forEachRemaining if there is an exception'() { + given: + def firstBatch = [new Document('x', 1)] + + def batchCursor = Mock(BatchCursor) + batchCursor.hasNext() >>> [true, true] + batchCursor.next() >>> [firstBatch] + def cursor = new MongoBatchCursorAdapter(batchCursor) + + when: + cursor.forEachRemaining { + throw new IllegalStateException('test') + } + + then: + thrown(IllegalStateException) + 1 * batchCursor.close() + } +} diff --git a/driver-sync/src/test/unit/com/mongodb/client/internal/MongoChangeStreamCursorSpecification.groovy b/driver-sync/src/test/unit/com/mongodb/client/internal/MongoChangeStreamCursorSpecification.groovy new file mode 100644 index 00000000000..045a8c6daf5 --- /dev/null +++ b/driver-sync/src/test/unit/com/mongodb/client/internal/MongoChangeStreamCursorSpecification.groovy @@ -0,0 +1,281 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.internal + +import com.mongodb.ServerAddress +import com.mongodb.ServerCursor +import com.mongodb.internal.operation.AggregateResponseBatchCursor +import org.bson.BsonDocument +import org.bson.BsonInt32 +import org.bson.RawBsonDocument +import org.bson.codecs.Decoder +import org.bson.codecs.RawBsonDocumentCodec +import spock.lang.Specification + +class MongoChangeStreamCursorSpecification extends Specification { + def 'should get server cursor and address'() { + given: + def batchCursor = Stub(AggregateResponseBatchCursor) + def decoder = Mock(Decoder) + def resumeToken = Mock(BsonDocument) + def address = new ServerAddress('host', 27018) + def serverCursor = new ServerCursor(5, address) + batchCursor.getServerAddress() >> address + batchCursor.getServerCursor() >> serverCursor + def cursor = new MongoChangeStreamCursorImpl(batchCursor, decoder, resumeToken) + + expect: + cursor.serverAddress.is(address) + cursor.serverCursor.is(serverCursor) + } + + def 'should throw on remove'() { + given: + def batchCursor = Stub(AggregateResponseBatchCursor) + def decoder = Mock(Decoder) + def resumeToken = Mock(BsonDocument) + def cursor = new MongoChangeStreamCursorImpl(batchCursor, decoder, resumeToken) + + when: + cursor.remove() + + then: + thrown(UnsupportedOperationException) + } + + def 'should close batch cursor'() { + given: + def batchCursor = Mock(AggregateResponseBatchCursor) + def decoder = Mock(Decoder) + def resumeToken = Mock(BsonDocument) + def cursor = new MongoChangeStreamCursorImpl(batchCursor, decoder, resumeToken) + + when: + cursor.close() + + then: + 1 * batchCursor.close() + } + + def 'next should throw if there is no next'() { + given: + def batchCursor = Stub(AggregateResponseBatchCursor) + def codec = new RawBsonDocumentCodec() + def resumeToken = Mock(BsonDocument) + + batchCursor.hasNext() >> false + + def cursor = new MongoChangeStreamCursorImpl(batchCursor, codec, resumeToken) + + when: + cursor.next() + + then: + thrown(NoSuchElementException) + } + + + def 'should get next from batch cursor'() { + given: + + def firstBatchFromBatchCursor = [RawBsonDocument.parse('{ _id: { _data: 1 }, x: 1 }'), + RawBsonDocument.parse('{ _id: { _data: 2 }, x: 1 }')] + def expectedFirstBatch = firstBatchFromBatchCursor.collect() + def secondBatchFromBatchCursor = [RawBsonDocument.parse('{ _id: { _data: 3 }, x: 2 }')] + def expectedSecondBatch = secondBatchFromBatchCursor.collect() + + def batchCursor = Stub(AggregateResponseBatchCursor) + def codec = new RawBsonDocumentCodec() + def resumeToken = Mock(BsonDocument) + + batchCursor.hasNext() >>> [true, true, true, true, false] + batchCursor.next() >>> [firstBatchFromBatchCursor, secondBatchFromBatchCursor] + + def cursor = new MongoChangeStreamCursorImpl(batchCursor, codec, resumeToken) + + expect: + cursor.hasNext() + cursor.next() == expectedFirstBatch[0] + cursor.hasNext() + cursor.next() == expectedFirstBatch[1] + cursor.hasNext() + cursor.next() == expectedSecondBatch[0] + !cursor.hasNext() + } + + def 'should try next from batch cursor'() { + given: + def firstBatchFromBatchCursor = [RawBsonDocument.parse('{ _id: { _data: 1 }, x: 1 }'), + RawBsonDocument.parse('{ _id: { _data: 2 }, x: 1 }')] + def expectedFirstBatch = firstBatchFromBatchCursor.collect() + def secondBatchFromBatchCursor = [RawBsonDocument.parse('{ _id: { _data: 3 }, x: 2 }')] + def expectedSecondBatch = secondBatchFromBatchCursor.collect() + + def batchCursor = Stub(AggregateResponseBatchCursor) + def codec = new RawBsonDocumentCodec() + def resumeToken = Mock(BsonDocument) + + batchCursor.tryNext() >>> [firstBatchFromBatchCursor, null, secondBatchFromBatchCursor, null] + + def cursor = new MongoChangeStreamCursorImpl(batchCursor, codec, resumeToken) + + expect: + cursor.tryNext() == expectedFirstBatch[0] + cursor.tryNext() == expectedFirstBatch[1] + cursor.tryNext() == null + cursor.tryNext() == expectedSecondBatch[0] + cursor.tryNext() == null + } + + def 'should get cached resume token after next'() { + given: + def firstBatchFromBatchCursor = [RawBsonDocument.parse('{ _id: { _data: 1 }, x: 1 }'), + RawBsonDocument.parse('{ _id: { _data: 2 }, x: 1 }')] + def expectedFirstBatch = firstBatchFromBatchCursor.collect() + def secondBatchFromBatchCursor = [RawBsonDocument.parse('{ _id: { _data: 3 }, x: 2 }')] + def expectedSecondBatch = secondBatchFromBatchCursor.collect() + + def batchCursor = Stub(AggregateResponseBatchCursor) + def codec = new RawBsonDocumentCodec() + def resumeToken = new BsonDocument('_data', new BsonInt32(1)) + + batchCursor.hasNext() >>> [true, true, true, false] + batchCursor.next() >>> [firstBatchFromBatchCursor, secondBatchFromBatchCursor] + batchCursor.getPostBatchResumeToken() >>> [new BsonDocument('_data', new BsonInt32(2)), + new BsonDocument('_data', new BsonInt32(2)), + new BsonDocument('_data', new BsonInt32(3)), + new BsonDocument('_data', new BsonInt32(3))] + + def cursor = new MongoChangeStreamCursorImpl(batchCursor, codec, resumeToken) + + expect: + cursor.getResumeToken() == resumeToken + cursor.next() == expectedFirstBatch.head() + cursor.getResumeToken() == new BsonDocument('_data', new BsonInt32(1)) + cursor.next() == expectedFirstBatch.last() + cursor.getResumeToken() == new BsonDocument('_data', new BsonInt32(2)) + cursor.next() == expectedSecondBatch.head() + cursor.getResumeToken() == new BsonDocument('_data', new BsonInt32(3)) + } + + def 'should get cached resume token after tryNext'() { + given: + def firstBatchFromBatchCursor = [RawBsonDocument.parse('{ _id: { _data: 1 }, x: 1 }'), + RawBsonDocument.parse('{ _id: { _data: 2 }, x: 1 }')] + def expectedFirstBatch = firstBatchFromBatchCursor.collect() + def secondBatchFromBatchCursor = [RawBsonDocument.parse('{ _id: { _data: 3 }, x: 2 }')] + def expectedSecondBatch = secondBatchFromBatchCursor.collect() + + + def batchCursor = Stub(AggregateResponseBatchCursor) + def codec = new RawBsonDocumentCodec() + def resumeToken = new BsonDocument('_data', new BsonInt32(1)) + + batchCursor.hasNext() >>> [true, true, true, false] + batchCursor.tryNext() >>> [firstBatchFromBatchCursor, null, secondBatchFromBatchCursor, null] + batchCursor.getPostBatchResumeToken() >>> [new BsonDocument('_data', new BsonInt32(2)), + new BsonDocument('_data', new BsonInt32(2)), + new BsonDocument('_data', new BsonInt32(2)), + new BsonDocument('_data', new BsonInt32(2)), + new BsonDocument('_data', new BsonInt32(3)), + new BsonDocument('_data', new BsonInt32(3)), + new BsonDocument('_data', new BsonInt32(3))] + + def cursor = new MongoChangeStreamCursorImpl(batchCursor, codec, resumeToken) + + expect: + cursor.getResumeToken() == resumeToken + cursor.tryNext() == expectedFirstBatch.head() + cursor.getResumeToken() == new BsonDocument('_data', new BsonInt32(1)) + cursor.tryNext() == expectedFirstBatch.last() + cursor.getResumeToken() == new BsonDocument('_data', new BsonInt32(2)) + cursor.tryNext() == null + cursor.getResumeToken() == new BsonDocument('_data', new BsonInt32(2)) + cursor.tryNext() == expectedSecondBatch.head() + cursor.getResumeToken() == new BsonDocument('_data', new BsonInt32(3)) + cursor.tryNext() == null + cursor.getResumeToken() == new BsonDocument('_data', new BsonInt32(3)) + } + + + def 'should report available documents'() { + given: + def firstBatch = [RawBsonDocument.parse('{ _id: { _data: 1 }, x: 1 }'), + RawBsonDocument.parse('{ _id: { _data: 2 }, x: 1 }')] + def secondBatch = [RawBsonDocument.parse('{ _id: { _data: 3 }, x: 2 }')] + + def batchCursor = Stub(AggregateResponseBatchCursor) + + batchCursor.hasNext() >>> [true, true, true, true, false] + batchCursor.next() >>> [firstBatch, secondBatch] + batchCursor.available() >>> [2, 2, 0, 0, 0, 1, 0, 0, 0] + + when: + def cursor = new MongoChangeStreamCursorImpl(batchCursor, new RawBsonDocumentCodec(), new BsonDocument('_data', new BsonInt32(1))) + + then: + cursor.available() == 2 + + when: + cursor.hasNext() + + then: + cursor.available() == 2 + + when: + cursor.next() + + then: + cursor.available() == 1 + + when: + cursor.hasNext() + + then: + cursor.available() == 1 + + when: + cursor.next() + + then: + cursor.available() == 0 + + when: + cursor.hasNext() + + then: + cursor.available() == 1 + + when: + cursor.next() + + then: + cursor.available() == 0 // fail + + when: + cursor.hasNext() + + then: + cursor.available() == 0 + + when: + cursor.close() + + then: + cursor.available() == 0 + } +} diff --git a/driver-sync/src/test/unit/com/mongodb/client/internal/MongoClusterSpecification.groovy b/driver-sync/src/test/unit/com/mongodb/client/internal/MongoClusterSpecification.groovy new file mode 100644 index 00000000000..563528e7dce --- /dev/null +++ b/driver-sync/src/test/unit/com/mongodb/client/internal/MongoClusterSpecification.groovy @@ -0,0 +1,263 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.internal + +import com.mongodb.MongoClientSettings +import com.mongodb.MongoNamespace +import com.mongodb.ReadConcern +import com.mongodb.ReadPreference +import com.mongodb.WriteConcern +import com.mongodb.client.ClientSession +import com.mongodb.client.MongoClient +import com.mongodb.client.MongoIterable +import com.mongodb.internal.TimeoutSettings +import com.mongodb.internal.client.model.changestream.ChangeStreamLevel +import com.mongodb.internal.connection.Cluster +import com.mongodb.internal.session.ServerSessionPool +import org.bson.BsonDocument +import org.bson.Document +import org.bson.codecs.UuidCodec +import org.bson.codecs.ValueCodecProvider +import org.bson.codecs.configuration.CodecRegistry +import spock.lang.Specification + +import java.util.concurrent.TimeUnit + +import static com.mongodb.CustomMatchers.isTheSameAs +import static com.mongodb.MongoClientSettings.getDefaultCodecRegistry +import static com.mongodb.ReadPreference.primary +import static com.mongodb.ReadPreference.secondary +import static com.mongodb.client.internal.TestHelper.execute +import static org.bson.UuidRepresentation.UNSPECIFIED +import static org.bson.codecs.configuration.CodecRegistries.fromProviders +import static spock.util.matcher.HamcrestSupport.expect + +class MongoClusterSpecification extends Specification { + + private static final CodecRegistry CODEC_REGISTRY = fromProviders(new ValueCodecProvider()) + private static final MongoClientSettings CLIENT_SETTINGS = MongoClientSettings.builder().build() + private static final TimeoutSettings TIMEOUT_SETTINGS = TimeoutSettings.create(CLIENT_SETTINGS) + private final Cluster cluster = Stub(Cluster) + private final MongoClient originator = Stub(MongoClient) + private final ServerSessionPool serverSessionPool = Stub(ServerSessionPool) + private final OperationExecutor operationExecutor = Stub(OperationExecutor) + + def 'should pass the correct settings to getDatabase'() { + given: + def settings = MongoClientSettings.builder() + .readPreference(secondary()) + .writeConcern(WriteConcern.MAJORITY) + .readConcern(ReadConcern.MAJORITY) + .retryWrites(true) + .codecRegistry(CODEC_REGISTRY) + .build() + def operationExecutor = new TestOperationExecutor([]) + def mongoClientCluster = createMongoCluster(settings, operationExecutor) + + when: + def database = mongoClientCluster.getDatabase('name') + + then: + expect database, isTheSameAs(expectedDatabase) + + where: + expectedDatabase << new MongoDatabaseImpl('name', CODEC_REGISTRY, secondary(), + WriteConcern.MAJORITY, true, true, ReadConcern.MAJORITY, UNSPECIFIED, null, + TIMEOUT_SETTINGS, new TestOperationExecutor([])) + } + + def 'should behave correctly when using withCodecRegistry'() { + given: + def newCodecRegistry = fromProviders(new ValueCodecProvider()) + + when: + def mongoCluster = createMongoCluster().withCodecRegistry(newCodecRegistry) + + then: + (mongoCluster.getCodecRegistry().get(UUID) as UuidCodec).getUuidRepresentation() == UNSPECIFIED + expect mongoCluster, isTheSameAs(createMongoCluster( + MongoClientSettings.builder(CLIENT_SETTINGS).codecRegistry(newCodecRegistry).build())) + } + + def 'should behave correctly when using withReadPreference'() { + given: + def newReadPreference = ReadPreference.secondaryPreferred() + + when: + def mongoCluster = createMongoCluster().withReadPreference(newReadPreference) + + then: + mongoCluster.getReadPreference() == newReadPreference + expect mongoCluster, isTheSameAs( + createMongoCluster(MongoClientSettings.builder(CLIENT_SETTINGS).readPreference(newReadPreference).build())) + } + + def 'should behave correctly when using withWriteConcern'() { + given: + def newWriteConcern = WriteConcern.MAJORITY + + when: + def mongoCluster = createMongoCluster().withWriteConcern(newWriteConcern) + + then: + mongoCluster.getWriteConcern() == newWriteConcern + expect mongoCluster, isTheSameAs(createMongoCluster( + MongoClientSettings.builder(CLIENT_SETTINGS).writeConcern(newWriteConcern).build())) + } + + def 'should behave correctly when using withReadConcern'() { + given: + def newReadConcern = ReadConcern.MAJORITY + + when: + def mongoCluster = createMongoCluster().withReadConcern(newReadConcern) + + then: + mongoCluster.getReadConcern() == newReadConcern + expect mongoCluster, isTheSameAs(createMongoCluster( + MongoClientSettings.builder(CLIENT_SETTINGS).readConcern(newReadConcern).build())) + } + + def 'should behave correctly when using withTimeout'() { + when: + def mongoCluster = createMongoCluster().withTimeout(10_000, TimeUnit.MILLISECONDS) + + then: + mongoCluster.getTimeout(TimeUnit.MILLISECONDS) == 10_000 + expect mongoCluster, isTheSameAs(createMongoCluster(MongoClientSettings.builder(CLIENT_SETTINGS) + .timeout(10_000, TimeUnit.MILLISECONDS).build())) + + when: + createMongoCluster().withTimeout(500, TimeUnit.NANOSECONDS) + + then: + thrown(IllegalArgumentException) + } + + + def 'should use ListDatabasesIterableImpl correctly'() { + given: + def executor = new TestOperationExecutor([null, null]) + def mongoCluster = createMongoCluster(executor) + def listDatabasesMethod = mongoCluster.&listDatabases + def listDatabasesNamesMethod = mongoCluster.&listDatabaseNames + + when: + def listDatabasesIterable = execute(listDatabasesMethod, session) + + then: + expect listDatabasesIterable, isTheSameAs(new ListDatabasesIterableImpl<>(session, Document, + CLIENT_SETTINGS.codecRegistry, primary(), executor, true, TIMEOUT_SETTINGS)) + + when: + listDatabasesIterable = execute(listDatabasesMethod, session, BsonDocument) + + then: + expect listDatabasesIterable, isTheSameAs(new ListDatabasesIterableImpl<>(session, BsonDocument, + CLIENT_SETTINGS.codecRegistry, primary(), executor, true, TIMEOUT_SETTINGS)) + + when: + def listDatabaseNamesIterable = execute(listDatabasesNamesMethod, session) as MongoIterable + + then: + // listDatabaseNamesIterable is an instance of a MappingIterable, so have to get the mapped iterable inside it + expect listDatabaseNamesIterable.getMapped(), isTheSameAs(new ListDatabasesIterableImpl<>(session, BsonDocument, + CLIENT_SETTINGS.codecRegistry, primary(), executor, true, TIMEOUT_SETTINGS) + .nameOnly(true)) + + where: + session << [null, Stub(ClientSession)] + } + + def 'should create ChangeStreamIterable correctly'() { + given: + def executor = new TestOperationExecutor([]) + def namespace = new MongoNamespace('admin', '_ignored') + def settings = MongoClientSettings.builder() + .readPreference(secondary()) + .readConcern(ReadConcern.MAJORITY) + .codecRegistry(getDefaultCodecRegistry()) + .build() + def readPreference = settings.getReadPreference() + def readConcern = settings.getReadConcern() + def mongoCluster = createMongoCluster(settings, executor) + def watchMethod = mongoCluster.&watch + + when: + def changeStreamIterable = execute(watchMethod, session) + + then: + expect changeStreamIterable, isTheSameAs(new ChangeStreamIterableImpl<>(session, namespace, settings.codecRegistry, + readPreference, readConcern, executor, [], Document, ChangeStreamLevel.CLIENT, true, TIMEOUT_SETTINGS), + ['codec']) + + when: + changeStreamIterable = execute(watchMethod, session, [new Document('$match', 1)]) + + then: + expect changeStreamIterable, isTheSameAs(new ChangeStreamIterableImpl<>(session, namespace, settings.codecRegistry, + readPreference, readConcern, executor, [new Document('$match', 1)], Document, ChangeStreamLevel.CLIENT, + true, TIMEOUT_SETTINGS), ['codec']) + + when: + changeStreamIterable = execute(watchMethod, session, [new Document('$match', 1)], BsonDocument) + + then: + expect changeStreamIterable, isTheSameAs(new ChangeStreamIterableImpl<>(session, namespace, settings.codecRegistry, + readPreference, readConcern, executor, [new Document('$match', 1)], BsonDocument, + ChangeStreamLevel.CLIENT, true, TIMEOUT_SETTINGS), ['codec']) + + where: + session << [null, Stub(ClientSession)] + } + + def 'should validate the ChangeStreamIterable pipeline data correctly'() { + given: + def executor = new TestOperationExecutor([]) + def mongoCluster = createMongoCluster(executor) + + when: + mongoCluster.watch((Class) null) + + then: + thrown(IllegalArgumentException) + + when: + mongoCluster.watch([null]).into([]) + + then: + thrown(IllegalArgumentException) + } + + MongoClusterImpl createMongoCluster() { + createMongoCluster(CLIENT_SETTINGS) + } + + MongoClusterImpl createMongoCluster(final MongoClientSettings settings) { + createMongoCluster(settings, operationExecutor) + } + + MongoClusterImpl createMongoCluster(final OperationExecutor operationExecutor) { + createMongoCluster(CLIENT_SETTINGS, operationExecutor) + } + + MongoClusterImpl createMongoCluster(final MongoClientSettings settings, final OperationExecutor operationExecutor) { + new MongoClusterImpl(null, cluster, settings.codecRegistry, null, null, + originator, operationExecutor, settings.readConcern, settings.readPreference, settings.retryReads, settings.retryWrites, + null, serverSessionPool, TimeoutSettings.create(settings), settings.uuidRepresentation, settings.writeConcern) + } +} diff --git a/driver-sync/src/test/unit/com/mongodb/client/internal/MongoCollectionSpecification.groovy b/driver-sync/src/test/unit/com/mongodb/client/internal/MongoCollectionSpecification.groovy new file mode 100644 index 00000000000..cbe43c10517 --- /dev/null +++ b/driver-sync/src/test/unit/com/mongodb/client/internal/MongoCollectionSpecification.groovy @@ -0,0 +1,1610 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.internal + +import com.mongodb.CreateIndexCommitQuorum +import com.mongodb.MongoBulkWriteException +import com.mongodb.MongoClientSettings +import com.mongodb.MongoException +import com.mongodb.MongoNamespace +import com.mongodb.MongoWriteConcernException +import com.mongodb.MongoWriteException +import com.mongodb.ReadConcern +import com.mongodb.ServerAddress +import com.mongodb.WriteConcern +import com.mongodb.WriteConcernResult +import com.mongodb.WriteError +import com.mongodb.bulk.BulkWriteError +import com.mongodb.bulk.WriteConcernError +import com.mongodb.client.ClientSession +import com.mongodb.client.ImmutableDocument +import com.mongodb.client.ImmutableDocumentCodecProvider +import com.mongodb.client.model.BulkWriteOptions +import com.mongodb.client.model.Collation +import com.mongodb.client.model.CountOptions +import com.mongodb.client.model.CreateIndexOptions +import com.mongodb.client.model.DeleteManyModel +import com.mongodb.client.model.DeleteOneModel +import com.mongodb.client.model.DeleteOptions +import com.mongodb.client.model.DropIndexOptions +import com.mongodb.client.model.EstimatedDocumentCountOptions +import com.mongodb.client.model.FindOneAndDeleteOptions +import com.mongodb.client.model.FindOneAndReplaceOptions +import com.mongodb.client.model.FindOneAndUpdateOptions +import com.mongodb.client.model.IndexModel +import com.mongodb.client.model.IndexOptions +import com.mongodb.client.model.InsertManyOptions +import com.mongodb.client.model.InsertOneModel +import com.mongodb.client.model.InsertOneOptions +import com.mongodb.client.model.RenameCollectionOptions +import com.mongodb.client.model.ReplaceOneModel +import com.mongodb.client.model.ReplaceOptions +import com.mongodb.client.model.UpdateManyModel +import com.mongodb.client.model.UpdateOneModel +import com.mongodb.client.model.UpdateOptions +import com.mongodb.client.result.DeleteResult +import com.mongodb.client.result.UpdateResult +import com.mongodb.client.test.Worker +import com.mongodb.internal.bulk.DeleteRequest +import com.mongodb.internal.bulk.IndexRequest +import com.mongodb.internal.bulk.InsertRequest +import com.mongodb.internal.bulk.UpdateRequest +import com.mongodb.internal.client.model.AggregationLevel +import com.mongodb.internal.client.model.changestream.ChangeStreamLevel +import com.mongodb.internal.operation.BatchCursor +import com.mongodb.internal.operation.CountDocumentsOperation +import com.mongodb.internal.operation.CreateIndexesOperation +import com.mongodb.internal.operation.DropCollectionOperation +import com.mongodb.internal.operation.DropIndexOperation +import com.mongodb.internal.operation.EstimatedDocumentCountOperation +import com.mongodb.internal.operation.FindAndDeleteOperation +import com.mongodb.internal.operation.FindAndReplaceOperation +import com.mongodb.internal.operation.FindAndUpdateOperation +import com.mongodb.internal.operation.ListIndexesOperation +import com.mongodb.internal.operation.MixedBulkWriteOperation +import com.mongodb.internal.operation.RenameCollectionOperation +import org.bson.BsonDocument +import org.bson.BsonInt32 +import org.bson.Document +import org.bson.codecs.BsonDocumentCodec +import org.bson.codecs.BsonValueCodecProvider +import org.bson.codecs.DocumentCodec +import org.bson.codecs.UuidCodec +import org.bson.codecs.ValueCodecProvider +import org.bson.codecs.configuration.CodecConfigurationException +import org.bson.codecs.configuration.CodecRegistries +import org.bson.conversions.Bson +import spock.lang.Specification + +import java.util.concurrent.TimeUnit + +import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS +import static com.mongodb.CustomMatchers.isTheSameAs +import static com.mongodb.ReadPreference.primary +import static com.mongodb.ReadPreference.secondary +import static com.mongodb.WriteConcern.ACKNOWLEDGED +import static com.mongodb.WriteConcern.UNACKNOWLEDGED +import static com.mongodb.bulk.BulkWriteResult.acknowledged +import static com.mongodb.bulk.BulkWriteResult.unacknowledged +import static com.mongodb.client.internal.TestHelper.execute +import static com.mongodb.internal.bulk.WriteRequest.Type.DELETE +import static com.mongodb.internal.bulk.WriteRequest.Type.INSERT +import static com.mongodb.internal.bulk.WriteRequest.Type.REPLACE +import static com.mongodb.internal.bulk.WriteRequest.Type.UPDATE +import static java.util.concurrent.TimeUnit.MILLISECONDS +import static org.bson.UuidRepresentation.C_SHARP_LEGACY +import static org.bson.UuidRepresentation.JAVA_LEGACY +import static org.bson.codecs.configuration.CodecRegistries.fromProviders +import static spock.util.matcher.HamcrestSupport.expect + +@SuppressWarnings('ClassSize') +class MongoCollectionSpecification extends Specification { + + def namespace = new MongoNamespace('databaseName', 'collectionName') + def codecRegistry = MongoClientSettings.getDefaultCodecRegistry() + def readPreference = secondary() + def readConcern = ReadConcern.MAJORITY + def collation = Collation.builder().locale('en').build() + + def 'should return the correct name from getName'() { + given: + def collection = new MongoCollectionImpl(namespace, Document, codecRegistry, readPreference, ACKNOWLEDGED, true, + true, readConcern, JAVA_LEGACY, null, TIMEOUT_SETTINGS, new TestOperationExecutor([null])) + + expect: + collection.getNamespace() == namespace + } + + def 'should behave correctly when using withDocumentClass'() { + given: + def newClass = Worker + def executor = new TestOperationExecutor([]) + + when: + def collection = new MongoCollectionImpl(namespace, Document, codecRegistry, readPreference, ACKNOWLEDGED, + true, true, readConcern, JAVA_LEGACY, null, TIMEOUT_SETTINGS, executor).withDocumentClass(newClass) + + then: + collection.getDocumentClass() == newClass + expect collection, isTheSameAs(new MongoCollectionImpl(namespace, newClass, codecRegistry, readPreference, ACKNOWLEDGED, + true, true, readConcern, JAVA_LEGACY, null, TIMEOUT_SETTINGS, executor)) + } + + def 'should behave correctly when using withCodecRegistry'() { + given: + def newCodecRegistry = fromProviders(new ValueCodecProvider()) + def executor = new TestOperationExecutor([]) + + when: + def collection = new MongoCollectionImpl(namespace, Document, codecRegistry, readPreference, ACKNOWLEDGED, + true, true, readConcern, C_SHARP_LEGACY, null, TIMEOUT_SETTINGS, executor).withCodecRegistry(newCodecRegistry) + + then: + (collection.getCodecRegistry().get(UUID) as UuidCodec).getUuidRepresentation() == C_SHARP_LEGACY + expect collection, isTheSameAs(new MongoCollectionImpl(namespace, Document, collection.getCodecRegistry(), readPreference, + ACKNOWLEDGED, true, true, readConcern, C_SHARP_LEGACY, null, TIMEOUT_SETTINGS, executor)) + } + + def 'should behave correctly when using withReadPreference'() { + given: + def newReadPreference = primary() + def executor = new TestOperationExecutor([]) + + when: + def collection = new MongoCollectionImpl(namespace, Document, codecRegistry, readPreference, ACKNOWLEDGED, + true, true, readConcern, JAVA_LEGACY, null, TIMEOUT_SETTINGS, executor).withReadPreference(newReadPreference) + + then: + collection.getReadPreference() == newReadPreference + expect collection, isTheSameAs(new MongoCollectionImpl(namespace, Document, codecRegistry, newReadPreference, ACKNOWLEDGED, + true, true, readConcern, JAVA_LEGACY, null, TIMEOUT_SETTINGS, executor)) + } + + def 'should behave correctly when using withWriteConcern'() { + given: + def newWriteConcern = WriteConcern.MAJORITY + def executor = new TestOperationExecutor([]) + + when: + def collection = new MongoCollectionImpl(namespace, Document, codecRegistry, readPreference, ACKNOWLEDGED, + true, true, readConcern, JAVA_LEGACY, null, TIMEOUT_SETTINGS, executor).withWriteConcern(newWriteConcern) + + then: + collection.getWriteConcern() == newWriteConcern + expect collection, isTheSameAs(new MongoCollectionImpl(namespace, Document, codecRegistry, readPreference, newWriteConcern, + true, true, readConcern, JAVA_LEGACY, null, TIMEOUT_SETTINGS, executor)) + } + + def 'should behave correctly when using withReadConcern'() { + given: + def newReadConcern = ReadConcern.MAJORITY + def executor = new TestOperationExecutor([]) + + when: + def collection = new MongoCollectionImpl(namespace, Document, codecRegistry, readPreference, ACKNOWLEDGED, + true, true, readConcern, JAVA_LEGACY, null, TIMEOUT_SETTINGS, executor).withReadConcern(newReadConcern) + + then: + collection.getReadConcern() == newReadConcern + expect collection, isTheSameAs(new MongoCollectionImpl(namespace, Document, codecRegistry, readPreference, ACKNOWLEDGED, + true, true, newReadConcern, JAVA_LEGACY, null, TIMEOUT_SETTINGS, executor)) + } + + def 'should behave correctly when using withTimeout'() { + given: + def executor = new TestOperationExecutor([]) + def collection = new MongoCollectionImpl(namespace, Document, codecRegistry, readPreference, ACKNOWLEDGED, + true, true, readConcern, JAVA_LEGACY, null, TIMEOUT_SETTINGS, executor) + + when: + def newCollection = collection.withTimeout(10_000, MILLISECONDS) + + then: + newCollection.getTimeout(MILLISECONDS) == 10_000 + expect newCollection, isTheSameAs(new MongoCollectionImpl(namespace, Document, codecRegistry, readPreference, ACKNOWLEDGED, + true, true, readConcern, JAVA_LEGACY, null, TIMEOUT_SETTINGS.withTimeout(10_000, MILLISECONDS), executor)) + + when: + collection.withTimeout(500, TimeUnit.NANOSECONDS) + + then: + thrown(IllegalArgumentException) + } + + def 'should use CountOperation correctly with documentCount'() { + given: + def executor = new TestOperationExecutor([1L, 2L, 3L, 4L]) + def filter = new BsonDocument() + def collection = new MongoCollectionImpl(namespace, Document, codecRegistry, readPreference, ACKNOWLEDGED, true, + true, readConcern, JAVA_LEGACY, null, TIMEOUT_SETTINGS, executor) + def expectedOperation = new CountDocumentsOperation(namespace) + .filter(filter).retryReads(true) + + def countMethod = collection.&countDocuments + + when: + execute(countMethod, session) + def operation = executor.getReadOperation() as CountDocumentsOperation + + then: + executor.getClientSession() == session + expect operation, isTheSameAs(expectedOperation) + + when: + filter = new BsonDocument('a', new BsonInt32(1)) + execute(countMethod, session, filter) + operation = executor.getReadOperation() as CountDocumentsOperation + + then: + executor.getClientSession() == session + expect operation, isTheSameAs(expectedOperation.filter(filter)) + + when: + def hint = new BsonDocument('hint', new BsonInt32(1)) + execute(countMethod, session, filter, new CountOptions().hint(hint).skip(10).limit(100).collation(collation)) + operation = executor.getReadOperation() as CountDocumentsOperation + + then: + executor.getClientSession() == session + expect operation, isTheSameAs(expectedOperation.filter(filter).hint(hint).skip(10).limit(100) + .collation(collation)) + + where: + session << [null, Stub(ClientSession)] + } + + def 'should use CountOperation correctly with estimatedDocumentCount'() { + given: + def executor = new TestOperationExecutor([1L, 2L]) + def collection = new MongoCollectionImpl(namespace, Document, codecRegistry, readPreference, ACKNOWLEDGED, true, + true, readConcern, JAVA_LEGACY, null, TIMEOUT_SETTINGS, executor) + def expectedOperation = new EstimatedDocumentCountOperation(namespace) + .retryReads(true) + + def countMethod = collection.&estimatedDocumentCount + + when: + execute(countMethod, session) + def operation = executor.getReadOperation() as EstimatedDocumentCountOperation + + then: + executor.getClientSession() == session + expect operation, isTheSameAs(expectedOperation) + + when: + expectedOperation = new EstimatedDocumentCountOperation(namespace).retryReads(true) + execute(countMethod, session, new EstimatedDocumentCountOptions().maxTime(100, MILLISECONDS)) + operation = executor.getReadOperation() as EstimatedDocumentCountOperation + + then: + executor.getClientSession() == session + expect operation, isTheSameAs(expectedOperation) + + where: + session << [null] + } + + def 'should create DistinctIterable correctly'() { + given: + def executor = new TestOperationExecutor([]) + def collection = new MongoCollectionImpl(namespace, Document, codecRegistry, readPreference, ACKNOWLEDGED, + true, true, readConcern, JAVA_LEGACY, null, TIMEOUT_SETTINGS, executor) + def filter = new Document('a', 1) + def distinctMethod = collection.&distinct + + when: + def distinctIterable = execute(distinctMethod, session, 'field', String) + + then: + expect distinctIterable, isTheSameAs(new DistinctIterableImpl<>(session, namespace, Document, String, + codecRegistry, readPreference, readConcern, executor, 'field', new BsonDocument(), true, TIMEOUT_SETTINGS)) + + when: + distinctIterable = execute(distinctMethod, session, 'field', String).filter(filter) + + then: + expect distinctIterable, isTheSameAs(new DistinctIterableImpl<>(session, namespace, Document, String, + codecRegistry, readPreference, readConcern, executor, 'field', filter, true, TIMEOUT_SETTINGS)) + + where: + session << [null, Stub(ClientSession)] + } + + def 'should create FindIterable correctly'() { + given: + def executor = new TestOperationExecutor([]) + def collection = new MongoCollectionImpl(namespace, Document, codecRegistry, readPreference, ACKNOWLEDGED, + true, true, readConcern, JAVA_LEGACY, null, TIMEOUT_SETTINGS, executor) + def findMethod = collection.&find + + when: + def findIterable = execute(findMethod, session) + + then: + expect findIterable, isTheSameAs(new FindIterableImpl<>(session, namespace, Document, Document, codecRegistry, + readPreference, readConcern, executor, new BsonDocument(), true, TIMEOUT_SETTINGS)) + + when: + findIterable = execute(findMethod, session, BsonDocument) + + then: + expect findIterable, isTheSameAs(new FindIterableImpl<>(session, namespace, Document, BsonDocument, + codecRegistry, readPreference, readConcern, executor, new BsonDocument(), true, TIMEOUT_SETTINGS)) + + when: + findIterable = execute(findMethod, session, new Document()) + + then: + expect findIterable, isTheSameAs(new FindIterableImpl<>(session, namespace, Document, Document, + codecRegistry, readPreference, readConcern, executor, new Document(), true, TIMEOUT_SETTINGS)) + + when: + findIterable = execute(findMethod, session, new Document(), BsonDocument) + + then: + expect findIterable, isTheSameAs(new FindIterableImpl<>(session, namespace, Document, BsonDocument, + codecRegistry, readPreference, readConcern, executor, new Document(), true, TIMEOUT_SETTINGS)) + + where: + session << [null, Stub(ClientSession)] + } + + def 'should create AggregateIterable correctly'() { + given: + def executor = new TestOperationExecutor([]) + def collection = new MongoCollectionImpl(namespace, Document, codecRegistry, readPreference, ACKNOWLEDGED, + true, true, readConcern, JAVA_LEGACY, null, TIMEOUT_SETTINGS, executor) + def aggregateMethod = collection.&aggregate + + when: + def aggregateIterable = execute(aggregateMethod, session, [new Document('$match', 1)]) + + then: + expect aggregateIterable, isTheSameAs(new AggregateIterableImpl<>(session, namespace, Document, Document, + codecRegistry, readPreference, readConcern, ACKNOWLEDGED, executor, [new Document('$match', 1)], + AggregationLevel.COLLECTION, true, TIMEOUT_SETTINGS)) + + when: + aggregateIterable = execute(aggregateMethod, session, [new Document('$match', 1)], BsonDocument) + + then: + expect aggregateIterable, isTheSameAs(new AggregateIterableImpl<>(session, namespace, Document, BsonDocument, + codecRegistry, readPreference, readConcern, ACKNOWLEDGED, executor, [new Document('$match', 1)], + AggregationLevel.COLLECTION, true, TIMEOUT_SETTINGS)) + + where: + session << [null, Stub(ClientSession)] + } + + def 'should validate the aggregation pipeline data correctly'() { + given: + def executor = new TestOperationExecutor([]) + def collection = new MongoCollectionImpl(namespace, Document, codecRegistry, readPreference, ACKNOWLEDGED, + true, true, readConcern, JAVA_LEGACY, null, TIMEOUT_SETTINGS, executor) + + when: + collection.aggregate(null) + + then: + thrown(IllegalArgumentException) + + when: + collection.aggregate([null]).into([]) + + then: + thrown(IllegalArgumentException) + } + + def 'should create ChangeStreamIterable correctly'() { + given: + def executor = new TestOperationExecutor([]) + def collection = new MongoCollectionImpl(namespace, Document, codecRegistry, readPreference, ACKNOWLEDGED, + true, true, readConcern, JAVA_LEGACY, null, TIMEOUT_SETTINGS, executor) + def watchMethod = collection.&watch + + when: + def changeStreamIterable = execute(watchMethod, session) + + then: + expect changeStreamIterable, isTheSameAs(new ChangeStreamIterableImpl<>(session, namespace, codecRegistry, + readPreference, readConcern, executor, [], Document, ChangeStreamLevel.COLLECTION, true, TIMEOUT_SETTINGS), + ['codec']) + + when: + changeStreamIterable = execute(watchMethod, session, [new Document('$match', 1)]) + + then: + expect changeStreamIterable, isTheSameAs(new ChangeStreamIterableImpl<>(session, namespace, codecRegistry, + readPreference, readConcern, executor, [new Document('$match', 1)], Document, + ChangeStreamLevel.COLLECTION, true, TIMEOUT_SETTINGS), ['codec']) + + when: + changeStreamIterable = execute(watchMethod, session, [new Document('$match', 1)], BsonDocument) + + then: + expect changeStreamIterable, isTheSameAs(new ChangeStreamIterableImpl<>(session, namespace, codecRegistry, + readPreference, readConcern, executor, [new Document('$match', 1)], BsonDocument, + ChangeStreamLevel.COLLECTION, true, TIMEOUT_SETTINGS), ['codec']) + + where: + session << [null, Stub(ClientSession)] + } + + def 'should validate the ChangeStreamIterable pipeline data correctly'() { + given: + def executor = new TestOperationExecutor([]) + def collection = new MongoCollectionImpl(namespace, Document, codecRegistry, readPreference, ACKNOWLEDGED, + true, true, readConcern, JAVA_LEGACY, null, TIMEOUT_SETTINGS, executor) + + when: + collection.watch((Class) null) + + then: + thrown(IllegalArgumentException) + + when: + collection.watch([null]).into([]) + + then: + thrown(IllegalArgumentException) + } + + def 'should create MapReduceIterable correctly'() { + given: + def executor = new TestOperationExecutor([]) + def collection = new MongoCollectionImpl(namespace, Document, codecRegistry, readPreference, ACKNOWLEDGED, + true, true, readConcern, JAVA_LEGACY, null, TIMEOUT_SETTINGS, executor) + def mapReduceMethod = collection.&mapReduce + + when: + def mapReduceIterable = execute(mapReduceMethod, session, 'map', 'reduce') + + then: + expect mapReduceIterable, isTheSameAs(new MapReduceIterableImpl<>(session, namespace, Document, Document, + codecRegistry, readPreference, readConcern, ACKNOWLEDGED, executor, 'map', 'reduce', TIMEOUT_SETTINGS)) + + when: + mapReduceIterable = execute(mapReduceMethod, session, 'map', 'reduce', BsonDocument) + + then: + expect mapReduceIterable, isTheSameAs(new MapReduceIterableImpl<>(session, namespace, Document, BsonDocument, + codecRegistry, readPreference, readConcern, ACKNOWLEDGED, executor, 'map', 'reduce', TIMEOUT_SETTINGS)) + + where: + session << [null, Stub(ClientSession)] + } + + def 'bulkWrite should use MixedBulkWriteOperation correctly'() { + given: + def executor = new TestOperationExecutor((1..3).collect { + writeConcern.isAcknowledged() ? acknowledged(INSERT, 0, 0, [], []) : unacknowledged() + }) + def collection = new MongoCollectionImpl(namespace, BsonDocument, codecRegistry, readPreference, writeConcern, + retryWrites, true, readConcern, JAVA_LEGACY, null, TIMEOUT_SETTINGS, executor) + def expectedOperation = { boolean ordered, WriteConcern wc, Boolean bypassValidation, List filters -> + new MixedBulkWriteOperation(namespace, [ + new InsertRequest(BsonDocument.parse('{_id: 1}')), + new UpdateRequest(BsonDocument.parse('{a: 2}'), BsonDocument.parse('{a: 200}'), REPLACE) + .multi(false).upsert(true).collation(collation).hint(hint).hintString(hintString), + new UpdateRequest(BsonDocument.parse('{a: 3}'), BsonDocument.parse('{$set: {a: 1}}'), UPDATE) + .multi(false).upsert(true).collation(collation).arrayFilters(filters) + .hint(hint).hintString(hintString), + new UpdateRequest(BsonDocument.parse('{a: 4}'), BsonDocument.parse('{$set: {a: 1}}'), UPDATE).multi(true) + .upsert(true).collation(collation).arrayFilters(filters).hint(hint).hintString(hintString), + new DeleteRequest(BsonDocument.parse('{a: 5}')).multi(false), + new DeleteRequest(BsonDocument.parse('{a: 6}')).multi(true).collation(collation) + ], ordered, wc, retryWrites).bypassDocumentValidation(bypassValidation) + } + def updateOptions = new UpdateOptions().upsert(true).collation(collation).arrayFilters(arrayFilters) + .hint(hint).hintString(hintString) + def replaceOptions = new ReplaceOptions().upsert(true).collation(collation).hint(hint).hintString(hintString) + def deleteOptions = new DeleteOptions().collation(collation) + def bulkOperations = [new InsertOneModel(BsonDocument.parse('{_id: 1}')), + new ReplaceOneModel(BsonDocument.parse('{a: 2}'), BsonDocument.parse('{a: 200}'), replaceOptions), + new UpdateOneModel(BsonDocument.parse('{a: 3}'), BsonDocument.parse('{$set: {a: 1}}'), updateOptions), + new UpdateManyModel(BsonDocument.parse('{a: 4}'), BsonDocument.parse('{$set: {a: 1}}'), updateOptions), + new DeleteOneModel(BsonDocument.parse('{a: 5}')), + new DeleteManyModel(BsonDocument.parse('{a: 6}'), deleteOptions)] + def bulkWriteMethod = collection.&bulkWrite + + when: + def result = execute(bulkWriteMethod, session, bulkOperations) + def operation = executor.getWriteOperation() as MixedBulkWriteOperation + + then: + result.wasAcknowledged() == writeConcern.isAcknowledged() + expect operation, isTheSameAs(expectedOperation(true, writeConcern, null, arrayFilters)) + + when: + result = execute(bulkWriteMethod, session, bulkOperations, new BulkWriteOptions().ordered(true).bypassDocumentValidation(true)) + operation = executor.getWriteOperation() as MixedBulkWriteOperation + + then: + result.wasAcknowledged() == writeConcern.isAcknowledged() + expect operation, isTheSameAs(expectedOperation(true, writeConcern, true, arrayFilters)) + + when: + result = execute(bulkWriteMethod, session, bulkOperations, new BulkWriteOptions().ordered(false).bypassDocumentValidation(false)) + operation = executor.getWriteOperation() as MixedBulkWriteOperation + + then: + result.wasAcknowledged() == writeConcern.isAcknowledged() + expect operation, isTheSameAs(expectedOperation(false, writeConcern, false, arrayFilters)) + + where: + [writeConcern, arrayFilters, hint, hintString, session, retryWrites, retryReads] << [ + [ACKNOWLEDGED, UNACKNOWLEDGED], + [null, [], [new BsonDocument('a.b', new BsonInt32(42))]], + [null, new BsonDocument('_id', new BsonInt32(1))], + [null, '_id_'], + [null, Stub(ClientSession)], + [true, false] + ].combinations() + } + + def 'should handle exceptions in bulkWrite correctly'() { + given: + def codecRegistry = fromProviders([new ValueCodecProvider(), new BsonValueCodecProvider()]) + def executor = new TestOperationExecutor([new MongoException('failure')]) + def collection = new MongoCollectionImpl(namespace, Document, codecRegistry, readPreference, ACKNOWLEDGED, + true, true, readConcern, JAVA_LEGACY, null, TIMEOUT_SETTINGS, executor) + + when: + collection.bulkWrite(null) + + then: + thrown(IllegalArgumentException) + + when: + collection.bulkWrite([null]) + + then: + thrown(IllegalArgumentException) + + when: 'a codec is missing its acceptable to immediately throw' + collection.bulkWrite([new InsertOneModel(new Document('_id', 1))]) + + then: + thrown(CodecConfigurationException) + } + + def 'insertOne should use MixedBulkWriteOperation correctly'() { + given: + def executor = new TestOperationExecutor((1..3).collect { + writeConcern.isAcknowledged() ? acknowledged(INSERT, 0, 0, [], []) : unacknowledged() + }) + def collection = new MongoCollectionImpl(namespace, Document, codecRegistry, readPreference, writeConcern, + retryWrites, true, readConcern, JAVA_LEGACY, null, TIMEOUT_SETTINGS, executor) + def expectedOperation = { WriteConcern wc, Boolean bypassDocumentValidation -> + new MixedBulkWriteOperation(namespace, [new InsertRequest(new BsonDocument('_id', new BsonInt32(1)))], + true, wc, retryWrites).bypassDocumentValidation(bypassDocumentValidation) + } + def insertOneMethod = collection.&insertOne + + when: + execute(insertOneMethod, session, new Document('_id', 1)) + def operation = executor.getWriteOperation() as MixedBulkWriteOperation + + then: + expect operation, isTheSameAs(expectedOperation(writeConcern, null)) + executor.getClientSession() == session + + when: + execute(insertOneMethod, session, new Document('_id', 1), new InsertOneOptions().bypassDocumentValidation(true)) + operation = executor.getWriteOperation() as MixedBulkWriteOperation + + then: + expect operation, isTheSameAs(expectedOperation(writeConcern, true)) + executor.getClientSession() == session + + when: + execute(insertOneMethod, session, new Document('_id', 1), new InsertOneOptions().bypassDocumentValidation(false)) + operation = executor.getWriteOperation() as MixedBulkWriteOperation + + then: + expect operation, isTheSameAs(expectedOperation(writeConcern, false)) + executor.getClientSession() == session + + where: + [writeConcern, session, retryWrites, retryReads] << [ + [ACKNOWLEDGED, UNACKNOWLEDGED], + [null, Stub(ClientSession)], + [true, false] + ].combinations() + } + + def 'insertMany should use MixedBulkWriteOperation correctly'() { + given: + def executor = new TestOperationExecutor((1..3).collect { + writeConcern.isAcknowledged() ? acknowledged(INSERT, 0, 0, [], []) : unacknowledged() + }) + def collection = new MongoCollectionImpl(namespace, Document, codecRegistry, readPreference, writeConcern, + retryWrites, true, readConcern, JAVA_LEGACY, null, TIMEOUT_SETTINGS, executor) + def expectedOperation = { boolean ordered, WriteConcern wc, Boolean bypassDocumentValidation -> + new MixedBulkWriteOperation(namespace, + [new InsertRequest(new BsonDocument('_id', new BsonInt32(1))), + new InsertRequest(new BsonDocument('_id', new BsonInt32(2)))], + ordered, wc, retryWrites).bypassDocumentValidation(bypassDocumentValidation) + } + def insertManyMethod = collection.&insertMany + + when: + execute(insertManyMethod, session, [new Document('_id', 1), new Document('_id', 2)]) + def operation = executor.getWriteOperation() as MixedBulkWriteOperation + + then: + expect operation, isTheSameAs(expectedOperation(true, writeConcern, null)) + executor.getClientSession() == session + + when: + execute(insertManyMethod, session, [new Document('_id', 1), new Document('_id', 2)], + new InsertManyOptions().ordered(true).bypassDocumentValidation(true)) + operation = executor.getWriteOperation() as MixedBulkWriteOperation + + then: + expect operation, isTheSameAs(expectedOperation(true, writeConcern, true)) + executor.getClientSession() == session + + when: + execute(insertManyMethod, session, [new Document('_id', 1), new Document('_id', 2)], + new InsertManyOptions().ordered(false).bypassDocumentValidation(false)) + operation = executor.getWriteOperation() as MixedBulkWriteOperation + + then: + expect operation, isTheSameAs(expectedOperation(false, writeConcern, false)) + executor.getClientSession() == session + + where: + [writeConcern, session, retryWrites] << [ + [ACKNOWLEDGED, UNACKNOWLEDGED], + [null, Stub(ClientSession)], + [true, false] + ].combinations() + } + + def 'should validate the insertMany data correctly'() { + given: + def collection = new MongoCollectionImpl(namespace, Document, codecRegistry, readPreference, ACKNOWLEDGED, + true, true, readConcern, JAVA_LEGACY, null, TIMEOUT_SETTINGS, Stub(OperationExecutor)) + + when: + collection.insertMany(null) + + then: + thrown(IllegalArgumentException) + + when: + collection.insertMany([null]) + + then: + thrown(IllegalArgumentException) + } + + def 'deleteOne should use MixedBulkWriteOperation correctly'() { + given: + def executor = new TestOperationExecutor((1..2).collect { + writeConcern.isAcknowledged() ? acknowledged(DELETE, 1, 0, [], []) : unacknowledged() + }) + def expectedResult = writeConcern.isAcknowledged() ? DeleteResult.acknowledged(1) : DeleteResult.unacknowledged() + def collection = new MongoCollectionImpl(namespace, Document, codecRegistry, readPreference, writeConcern, + retryWrites, true, readConcern, JAVA_LEGACY, null, TIMEOUT_SETTINGS, executor) + def deleteOneMethod = collection.&deleteOne + + when: + def result = execute(deleteOneMethod, session, new Document('_id', 1)) + def operation = executor.getWriteOperation() as MixedBulkWriteOperation + + then: + result.wasAcknowledged() == writeConcern.isAcknowledged() + expect operation, isTheSameAs(new MixedBulkWriteOperation(namespace, + [new DeleteRequest(new BsonDocument('_id', new BsonInt32(1))).multi(false)], + true, writeConcern, retryWrites)) + result == expectedResult + executor.getClientSession() == session + + when: + result = execute(deleteOneMethod, session, new Document('_id', 1), new DeleteOptions().collation(collation)) + operation = executor.getWriteOperation() as MixedBulkWriteOperation + + then: + result.wasAcknowledged() == writeConcern.isAcknowledged() + expect operation, isTheSameAs(new MixedBulkWriteOperation(namespace, + [new DeleteRequest(new BsonDocument('_id', new BsonInt32(1))).multi(false).collation(collation)], + true, writeConcern, retryWrites)) + result == expectedResult + executor.getClientSession() == session + + where: + [writeConcern, session, retryWrites] << [ + [ACKNOWLEDGED, UNACKNOWLEDGED], + [null, Stub(ClientSession)], + [true, false] + ].combinations() + } + + def 'deleteOne should translate BulkWriteException correctly'() { + given: + def bulkWriteException = new MongoBulkWriteException(acknowledged(0, 0, 1, null, [], []), + [], new WriteConcernError(100, 'codeName', 'Message', new BsonDocument()), new ServerAddress(), [] as Set) + + def executor = new TestOperationExecutor([bulkWriteException]) + def collection = new MongoCollectionImpl(namespace, Document, codecRegistry, readPreference, ACKNOWLEDGED, + true, true, readConcern, JAVA_LEGACY, null, TIMEOUT_SETTINGS, executor) + + when: + collection.deleteOne(new Document('_id', 1)) + + then: + def ex = thrown(MongoWriteConcernException) + ex.writeConcernError == bulkWriteException.writeConcernError + ex.writeResult.wasAcknowledged() + ex.writeResult.count == 1 + !ex.writeResult.updateOfExisting + ex.writeResult.upsertedId == null + } + + def 'deleteMany should use MixedBulkWriteOperation correctly'() { + given: + def executor = new TestOperationExecutor((1..2).collect { + writeConcern.isAcknowledged() ? acknowledged(DELETE, 1, 0, [], []) : unacknowledged() + }) + def expectedResult = writeConcern.isAcknowledged() ? DeleteResult.acknowledged(1) : DeleteResult.unacknowledged() + def collection = new MongoCollectionImpl(namespace, Document, codecRegistry, readPreference, writeConcern, + retryWrites, true, readConcern, JAVA_LEGACY, null, TIMEOUT_SETTINGS, executor) + def deleteManyMethod = collection.&deleteMany + + when: + def result = execute(deleteManyMethod, session, new Document('_id', 1)) + def operation = executor.getWriteOperation() as MixedBulkWriteOperation + + then: + result.wasAcknowledged() == writeConcern.isAcknowledged() + expect operation, isTheSameAs(new MixedBulkWriteOperation(namespace, + [new DeleteRequest(new BsonDocument('_id', new BsonInt32(1))).multi(true)], + true, writeConcern, retryWrites)) + result == expectedResult + + when: + result = execute(deleteManyMethod, session, new Document('_id', 1), new DeleteOptions().collation(collation)) + operation = executor.getWriteOperation() as MixedBulkWriteOperation + + then: + result.wasAcknowledged() == writeConcern.isAcknowledged() + expect operation, isTheSameAs(new MixedBulkWriteOperation(namespace, + [new DeleteRequest(new BsonDocument('_id', new BsonInt32(1))).multi(true).collation(collation)], + true, writeConcern, retryWrites)) + result == expectedResult + + where: + [writeConcern, session, retryWrites] << [ + [ACKNOWLEDGED, UNACKNOWLEDGED], + [null, Stub(ClientSession)], + [true, false] + ].combinations() + } + + def 'replaceOne should use MixedBulkWriteOperation correctly'() { + given: + def executor = new TestOperationExecutor((1..2).collect { + writeConcern.isAcknowledged() ? + acknowledged(REPLACE, 1, modifiedCount, + upsertedId == null ? [] : [new com.mongodb.bulk.BulkWriteUpsert(0, upsertedId)], []) : + unacknowledged() + }) + def expectedResult = writeConcern.isAcknowledged() ? + UpdateResult.acknowledged(1, modifiedCount, upsertedId) : UpdateResult.unacknowledged() + def collection = new MongoCollectionImpl(namespace, Document, codecRegistry, readPreference, writeConcern, + retryWrites, true, readConcern, JAVA_LEGACY, null, TIMEOUT_SETTINGS, executor) + + def expectedOperation = { boolean upsert, WriteConcern wc, Boolean bypassValidation, Collation collation -> + new MixedBulkWriteOperation(namespace, + [new UpdateRequest(new BsonDocument('a', new BsonInt32(1)), new BsonDocument('a', new BsonInt32(10)), REPLACE) + .collation(collation).upsert(upsert).hint(hint).hintString(hintString).sort(sort)], true, wc, retryWrites) + .bypassDocumentValidation(bypassValidation) + } + def replaceOneMethod = collection.&replaceOne + + when: + def result = execute(replaceOneMethod, session, new Document('a', 1), new Document('a', 10), + new ReplaceOptions().upsert(true).bypassDocumentValidation(bypassDocumentValidation).collation(collation) + .hint(hint).hintString(hintString).sort(sort)) + executor.getClientSession() == session + def operation = executor.getWriteOperation() as MixedBulkWriteOperation + + then: + expect operation, isTheSameAs(expectedOperation(true, writeConcern, bypassDocumentValidation, collation)) + result == expectedResult + + where: + [bypassDocumentValidation, modifiedCount, upsertedId, writeConcern, session, retryWrites, hint, hintString, sort] << [ + [null, true, false], + [1], + [null, new BsonInt32(42)], + [ACKNOWLEDGED, UNACKNOWLEDGED], + [null, Stub(ClientSession)], + [true, false], + [null, new BsonDocument('_id', new BsonInt32(1))], + [null, '_id_'], + [null, new BsonDocument('_id', new BsonInt32(1))] + ].combinations() + } + + def 'replaceOne should translate BulkWriteException correctly'() { + given: + def bulkWriteException = new MongoBulkWriteException(bulkWriteResult, [], + new WriteConcernError(100, 'codeName', 'Message', new BsonDocument()), + new ServerAddress(), [] as Set) + + def executor = new TestOperationExecutor([bulkWriteException]) + def collection = new MongoCollectionImpl(namespace, Document, codecRegistry, readPreference, ACKNOWLEDGED, + true, true, readConcern, JAVA_LEGACY, null, TIMEOUT_SETTINGS, executor) + + when: + collection.replaceOne(new Document('_id', 1), new Document('_id', 1)) + + then: + def ex = thrown(MongoWriteConcernException) + ex.writeConcernError == bulkWriteException.writeConcernError + ex.writeResult.wasAcknowledged() == writeResult.wasAcknowledged() + ex.writeResult.count == writeResult.count + ex.writeResult.updateOfExisting == writeResult.updateOfExisting + ex.writeResult.upsertedId == writeResult.upsertedId + + where: + bulkWriteResult | writeResult + acknowledged(0, 1, 0, 1, [], []) | WriteConcernResult.acknowledged(1, true, null) + acknowledged(0, 0, 0, 0, + [new com.mongodb.bulk.BulkWriteUpsert(0, new BsonInt32(1))], []) | WriteConcernResult.acknowledged(1, false, + new BsonInt32(1)) + } + + def 'updateOne should use MixedBulkWriteOperationOperation correctly'() { + given: + def executor = new TestOperationExecutor((1..2).collect { + writeConcern.isAcknowledged() ? acknowledged(UPDATE, 1, 0, [], []) : unacknowledged() + }) + def expectedResult = writeConcern.isAcknowledged() ? UpdateResult.acknowledged(1, 0, null) : UpdateResult.unacknowledged() + def collection = new MongoCollectionImpl(namespace, Document, codecRegistry, readPreference, writeConcern, + retryWrites, true, readConcern, JAVA_LEGACY, null, TIMEOUT_SETTINGS, executor) + def expectedOperation = { boolean upsert, WriteConcern wc, Boolean bypassDocumentValidation, Collation collation, + List filters, BsonDocument hintDoc, String hintStr, BsonDocument sortDoc -> + new MixedBulkWriteOperation(namespace, + [new UpdateRequest(new BsonDocument('a', new BsonInt32(1)), new BsonDocument('a', new BsonInt32(10)), UPDATE) + .multi(false).upsert(upsert).collation(collation).arrayFilters(filters) + .hint(hintDoc).hintString(hintStr).sort(sortDoc)], true, wc, retryWrites) + .bypassDocumentValidation(bypassDocumentValidation) + } + def updateOneMethod = collection.&updateOne + + when: + def result = execute(updateOneMethod, session, new Document('a', 1), new Document('a', 10)) + def operation = executor.getWriteOperation() as MixedBulkWriteOperation + + then: + expect operation, isTheSameAs(expectedOperation(false, writeConcern, null, null, null, null, null, null)) + executor.getClientSession() == session + result == expectedResult + + when: + result = execute(updateOneMethod, session, new Document('a', 1), new Document('a', 10), + new UpdateOptions().upsert(true).bypassDocumentValidation(true).collation(collation) + .arrayFilters(arrayFilters).hint(hint).hintString(hintString).sort(sort)) + operation = executor.getWriteOperation() as MixedBulkWriteOperation + + then: + expect operation, isTheSameAs(expectedOperation(true, writeConcern, true, collation, arrayFilters, hint, hintString, sort)) + executor.getClientSession() == session + result == expectedResult + + where: + [writeConcern, arrayFilters, session, retryWrites, hint, hintString, sort] << [ + [ACKNOWLEDGED, UNACKNOWLEDGED], + [null, [], [new BsonDocument('a.b', new BsonInt32(42))]], + [null, Stub(ClientSession)], + [true, false], + [null, new BsonDocument('_id', new BsonInt32(1))], + [null, '_id_'], + [null, new BsonDocument('_id', new BsonInt32(1))] + ].combinations() + } + + def 'updateMany should use MixedBulkWriteOperationOperation correctly'() { + given: + def executor = new TestOperationExecutor((1..2).collect { + writeConcern.isAcknowledged() ? acknowledged(UPDATE, 5, 3, [], []) : unacknowledged() + }) + def expectedResult = writeConcern.isAcknowledged() ? UpdateResult.acknowledged(5, 3, null) : UpdateResult.unacknowledged() + def collection = new MongoCollectionImpl(namespace, Document, codecRegistry, readPreference, writeConcern, + retryWrites, true, readConcern, JAVA_LEGACY, null, TIMEOUT_SETTINGS, executor) + def expectedOperation = { boolean upsert, WriteConcern wc, Boolean bypassDocumentValidation, Collation collation, + List filters, BsonDocument hintDoc, String hintStr -> + new MixedBulkWriteOperation(namespace, + [new UpdateRequest(new BsonDocument('a', new BsonInt32(1)), new BsonDocument('a', new BsonInt32(10)), UPDATE) + .multi(true).upsert(upsert).collation(collation).arrayFilters(filters) + .hint(hintDoc).hintString(hintStr)], true, wc, retryWrites) + .bypassDocumentValidation(bypassDocumentValidation) + } + def updateManyMethod = collection.&updateMany + + when: + def result = execute(updateManyMethod, session, new Document('a', 1), new Document('a', 10)) + def operation = executor.getWriteOperation() as MixedBulkWriteOperation + + then: + expect operation, isTheSameAs(expectedOperation(false, writeConcern, null, null, null, null, null)) + result == expectedResult + + when: + result = execute(updateManyMethod, session, new Document('a', 1), new Document('a', 10), + new UpdateOptions().upsert(true).bypassDocumentValidation(true).collation(collation) + .arrayFilters(arrayFilters).hint(hint).hintString(hintString)) + operation = executor.getWriteOperation() as MixedBulkWriteOperation + + then: + expect operation, isTheSameAs(expectedOperation(true, writeConcern, true, collation, arrayFilters, hint, + hintString)) + result == expectedResult + + where: + [writeConcern, arrayFilters, session, retryWrites, hint, hintString] << [ + [ACKNOWLEDGED, UNACKNOWLEDGED], + [null, [], [new BsonDocument('a.b', new BsonInt32(42))]], + [null, Stub(ClientSession)], + [true, false], + [null, new BsonDocument('_id', new BsonInt32(1))], + [null, '_id_'] + ].combinations() + } + + def 'should translate MongoBulkWriteException to MongoWriteException'() { + given: + def collection = new MongoCollectionImpl(namespace, Document, codecRegistry, readPreference, ACKNOWLEDGED, + true, true, readConcern, JAVA_LEGACY, null, TIMEOUT_SETTINGS, executor) + + when: + collection.insertOne(new Document('_id', 1)) + + then: + def e = thrown(MongoWriteException) + e.error == new WriteError(11000, 'oops', new BsonDocument()) + + where: + executor << new TestOperationExecutor([new MongoBulkWriteException(acknowledged(INSERT, 1, 0, [], []), + [new BulkWriteError(11000, 'oops', + new BsonDocument(), 0)], + null, new ServerAddress(), [] as Set)]) + } + + def 'should translate MongoBulkWriteException to MongoWriteConcernException'() { + given: + def executor = new TestOperationExecutor([new MongoBulkWriteException(acknowledged(INSERT, 1, 0, [], []), [], + new WriteConcernError(42, 'codeName', 'Message', new BsonDocument()), + new ServerAddress(), [] as Set)]) + def collection = new MongoCollectionImpl(namespace, Document, codecRegistry, readPreference, ACKNOWLEDGED, + true, true, readConcern, JAVA_LEGACY, null, TIMEOUT_SETTINGS, executor) + + when: + collection.insertOne(new Document('_id', 1)) + + then: + def e = thrown(MongoWriteConcernException) + e.writeConcernError == new WriteConcernError(42, 'codeName', 'Message', new BsonDocument()) + } + + def 'should use FindOneAndDeleteOperation correctly'() { + given: + def executor = new TestOperationExecutor((1..4).collect { + writeConcern.isAcknowledged() ? WriteConcernResult.acknowledged(1, true, null) : unacknowledged() + }) + def collection = new MongoCollectionImpl(namespace, Document, codecRegistry, readPreference, ACKNOWLEDGED, + retryWrites, true, readConcern, JAVA_LEGACY, null, TIMEOUT_SETTINGS, executor) + def expectedOperation = new FindAndDeleteOperation(namespace, ACKNOWLEDGED, retryWrites, + new DocumentCodec()) + .filter(new BsonDocument('a', new BsonInt32(1))) + def findOneAndDeleteMethod = collection.&findOneAndDelete + + when: + execute(findOneAndDeleteMethod, session, new Document('a', 1)) + def operation = executor.getWriteOperation() as FindAndDeleteOperation + + then: + expect operation, isTheSameAs(expectedOperation) + + when: + expectedOperation = + new FindAndDeleteOperation(namespace, ACKNOWLEDGED, retryWrites, new DocumentCodec()) + .filter(new BsonDocument('a', new BsonInt32(1))) + .projection(new BsonDocument('projection', new BsonInt32(1))) + .collation(collation) + execute(findOneAndDeleteMethod, session, new Document('a', 1), + new FindOneAndDeleteOptions() + .projection(new Document('projection', 1)) + .maxTime(100, MILLISECONDS) + .collation(collation)) + operation = executor.getWriteOperation() as FindAndDeleteOperation + + then: + expect operation, isTheSameAs(expectedOperation) + + where: + [writeConcern, session, retryWrites] << [ + [ACKNOWLEDGED, UNACKNOWLEDGED], + [null, Stub(ClientSession)], + [true, false] + ].combinations() + } + + def 'should use FindOneAndReplaceOperation correctly'() { + given: + def executor = new TestOperationExecutor((1..3).collect { + writeConcern.isAcknowledged() ? WriteConcernResult.acknowledged(1, true, null) : WriteConcernResult.unacknowledged() + }) + def collection = new MongoCollectionImpl(namespace, Document, codecRegistry, readPreference, writeConcern, + retryWrites, true, readConcern, JAVA_LEGACY, null, TIMEOUT_SETTINGS, executor) + def expectedOperation = new FindAndReplaceOperation(namespace, writeConcern, + retryWrites, new DocumentCodec(), new BsonDocument('a', new BsonInt32(10))) + .filter(new BsonDocument('a', new BsonInt32(1))) + def findOneAndReplaceMethod = collection.&findOneAndReplace + + when: + execute(findOneAndReplaceMethod, session, new Document('a', 1), new Document('a', 10)) + def operation = executor.getWriteOperation() as FindAndReplaceOperation + + then: + expect operation, isTheSameAs(expectedOperation) + + when: + expectedOperation = new FindAndReplaceOperation(namespace, writeConcern, + retryWrites, new DocumentCodec(), new BsonDocument('a', new BsonInt32(10))) + .filter(new BsonDocument('a', new BsonInt32(1))) + .projection(new BsonDocument('projection', new BsonInt32(1))) + .bypassDocumentValidation(false) + .collation(collation) + execute(findOneAndReplaceMethod, session, new Document('a', 1), new Document('a', 10), + new FindOneAndReplaceOptions() + .projection(new Document('projection', 1)) + .maxTime(100, MILLISECONDS) + .bypassDocumentValidation(false) + .collation(collation)) + operation = executor.getWriteOperation() as FindAndReplaceOperation + + then: + expect operation, isTheSameAs(expectedOperation) + + where: + [writeConcern, session, retryWrites] << [ + [ACKNOWLEDGED, UNACKNOWLEDGED], + [null, Stub(ClientSession)], + [true, false] + ].combinations() + } + + def 'should use FindAndUpdateOperation correctly'() { + given: + def executor = new TestOperationExecutor((1..3).collect { + writeConcern.isAcknowledged() ? WriteConcernResult.acknowledged(1, true, null) : unacknowledged() + }) + def collection = new MongoCollectionImpl(namespace, Document, codecRegistry, readPreference, writeConcern, + retryWrites, true, readConcern, JAVA_LEGACY, null, TIMEOUT_SETTINGS, executor) + def expectedOperation = new FindAndUpdateOperation(namespace, writeConcern, retryWrites, + new DocumentCodec(), new BsonDocument('a', new BsonInt32(10))) + .filter(new BsonDocument('a', new BsonInt32(1))) + def findOneAndUpdateMethod = collection.&findOneAndUpdate + + when: + execute(findOneAndUpdateMethod, session, new Document('a', 1), new Document('a', 10)) + def operation = executor.getWriteOperation() as FindAndUpdateOperation + + then: + expect operation, isTheSameAs(expectedOperation) + + when: + expectedOperation = new FindAndUpdateOperation(namespace, writeConcern, retryWrites, + new DocumentCodec(), new BsonDocument('a', new BsonInt32(10))) + .filter(new BsonDocument('a', new BsonInt32(1))) + .projection(new BsonDocument('projection', new BsonInt32(1))) + .bypassDocumentValidation(bypassDocumentValidation) + .collation(collation) + .arrayFilters(arrayFilters) + + execute(findOneAndUpdateMethod, session, new Document('a', 1), new Document('a', 10), + new FindOneAndUpdateOptions() + .projection(new Document('projection', 1)) + .maxTime(100, MILLISECONDS) + .bypassDocumentValidation(bypassDocumentValidation) + .collation(collation) + .arrayFilters(arrayFilters)) + operation = executor.getWriteOperation() as FindAndUpdateOperation + + then: + expect operation, isTheSameAs(expectedOperation) + + where: + [writeConcern, arrayFilters, bypassDocumentValidation, session, retryWrites] << [ + [ACKNOWLEDGED, UNACKNOWLEDGED], + [null, [], [new BsonDocument('a.b', new BsonInt32(42))]], + [true, false], + [null, Stub(ClientSession)], + [true, false], + ].combinations() + } + + def 'should use DropCollectionOperation correctly'() { + given: + def executor = new TestOperationExecutor([null]) + def collection = new MongoCollectionImpl(namespace, Document, codecRegistry, readPreference, ACKNOWLEDGED, + true, true, readConcern, JAVA_LEGACY, null, TIMEOUT_SETTINGS, executor) + def expectedOperation = new DropCollectionOperation(namespace, ACKNOWLEDGED) + def dropMethod = collection.&drop + + when: + execute(dropMethod, session) + def operation = executor.getWriteOperation() as DropCollectionOperation + executor.getClientSession() == session + + then: + expect operation, isTheSameAs(expectedOperation) + + where: + session << [null, Stub(ClientSession)] + } + + def 'should use CreateIndexOperations correctly'() { + given: + def executor = new TestOperationExecutor([null, null, null, null, null]) + def collection = new MongoCollectionImpl(namespace, Document, codecRegistry, readPreference, ACKNOWLEDGED, + true, true, readConcern, JAVA_LEGACY, null, TIMEOUT_SETTINGS, executor) + def createIndexMethod = collection.&createIndex + def createIndexesMethod = collection.&createIndexes + + when: + def expectedOperation = new CreateIndexesOperation(namespace, + [new IndexRequest(new BsonDocument('key', new BsonInt32(1)))], ACKNOWLEDGED) + def indexName = execute(createIndexMethod, session, new Document('key', 1)) + def operation = executor.getWriteOperation() as CreateIndexesOperation + + then: + expect operation, isTheSameAs(expectedOperation) + indexName == 'key_1' + + when: + expectedOperation = new CreateIndexesOperation(namespace, + [new IndexRequest(new BsonDocument('key', new BsonInt32(1))), + new IndexRequest(new BsonDocument('key1', new BsonInt32(1)))], ACKNOWLEDGED) + def indexNames = execute(createIndexesMethod, session, [new IndexModel(new Document('key', 1)), + new IndexModel(new Document('key1', 1))]) + operation = executor.getWriteOperation() as CreateIndexesOperation + + then: + expect operation, isTheSameAs(expectedOperation) + executor.getClientSession() == session + indexNames == ['key_1', 'key1_1'] + + when: + expectedOperation = new CreateIndexesOperation(namespace, + [new IndexRequest(new BsonDocument('key', new BsonInt32(1))), + new IndexRequest(new BsonDocument('key1', new BsonInt32(1)))], ACKNOWLEDGED) + indexNames = execute(createIndexesMethod, session, + [new IndexModel(new Document('key', 1)), new IndexModel(new Document('key1', 1))], + new CreateIndexOptions().maxTime(100, MILLISECONDS)) + operation = executor.getWriteOperation() as CreateIndexesOperation + + then: + expect operation, isTheSameAs(expectedOperation) + executor.getClientSession() == session + indexNames == ['key_1', 'key1_1'] + + when: + expectedOperation = new CreateIndexesOperation(namespace, + [new IndexRequest(new BsonDocument('key', new BsonInt32(1))), + new IndexRequest(new BsonDocument('key1', new BsonInt32(1)))], ACKNOWLEDGED) + .commitQuorum(CreateIndexCommitQuorum.VOTING_MEMBERS) + indexNames = execute(createIndexesMethod, session, + [new IndexModel(new Document('key', 1)), new IndexModel(new Document('key1', 1))], + new CreateIndexOptions().commitQuorum(CreateIndexCommitQuorum.VOTING_MEMBERS)) + operation = executor.getWriteOperation() as CreateIndexesOperation + + then: + expect operation, isTheSameAs(expectedOperation) + executor.getClientSession() == session + indexNames == ['key_1', 'key1_1'] + + when: + expectedOperation = new CreateIndexesOperation(namespace, + [new IndexRequest(new BsonDocument('key', new BsonInt32(1))) + .background(true) + .unique(true) + .sparse(true) + .name('aIndex') + .expireAfter(100, TimeUnit.SECONDS) + .version(1) + .weights(new BsonDocument('a', new BsonInt32(1000))) + .defaultLanguage('es') + .languageOverride('language') + .textVersion(1) + .sphereVersion(2) + .bits(1) + .min(-180.0) + .max(180.0) + .storageEngine(BsonDocument.parse('{wiredTiger: {configString: "block_compressor=zlib"}}')) + .partialFilterExpression(BsonDocument.parse('{status: "active"}')) + .collation(collation) + .wildcardProjection(new BsonDocument('a', new BsonInt32(1))) + .hidden(true) + ], ACKNOWLEDGED) + indexName = execute(createIndexMethod, session, new Document('key', 1), new IndexOptions() + .background(true) + .unique(true) + .sparse(true) + .name('aIndex') + .expireAfter(100, TimeUnit.SECONDS) + .version(1) + .weights(new BsonDocument('a', new BsonInt32(1000))) + .defaultLanguage('es') + .languageOverride('language') + .textVersion(1) + .sphereVersion(2) + .bits(1) + .min(-180.0) + .max(180.0) + .storageEngine(BsonDocument.parse('{wiredTiger: {configString: "block_compressor=zlib"}}')) + .partialFilterExpression(BsonDocument.parse('{status: "active"}')) + .collation(collation) + .wildcardProjection(new BsonDocument('a', new BsonInt32(1))) + .hidden(true)) + operation = executor.getWriteOperation() as CreateIndexesOperation + + then: + expect operation, isTheSameAs(expectedOperation) + executor.getClientSession() == session + indexName == 'aIndex' + + where: + session << [null, Stub(ClientSession)] + } + + def 'should validate the createIndexes data correctly'() { + given: + def collection = new MongoCollectionImpl(namespace, Document, codecRegistry, readPreference, ACKNOWLEDGED, + true, true, readConcern, JAVA_LEGACY, null, TIMEOUT_SETTINGS, Stub(OperationExecutor)) + + when: + collection.createIndexes(null) + + then: + thrown(IllegalArgumentException) + + when: + collection.createIndexes([null]) + + then: + thrown(IllegalArgumentException) + } + + def 'should use ListIndexesOperations correctly'() { + given: + def batchCursor = Stub(BatchCursor) + def executor = new TestOperationExecutor([batchCursor, batchCursor, batchCursor]) + def collection = new MongoCollectionImpl(namespace, Document, codecRegistry, readPreference, ACKNOWLEDGED, + true, true, readConcern, JAVA_LEGACY, null, TIMEOUT_SETTINGS, executor) + def listIndexesMethod = collection.&listIndexes + + when: + execute(listIndexesMethod, session).into([]) + def operation = executor.getReadOperation() as ListIndexesOperation + + then: + expect operation, isTheSameAs(new ListIndexesOperation(namespace, new DocumentCodec()).retryReads(true)) + executor.getClientSession() == session + + when: + def indexes = execute(listIndexesMethod, session, BsonDocument).into([]) + operation = executor.getReadOperation() as ListIndexesOperation + indexes == [] + + then: + expect operation, isTheSameAs(new ListIndexesOperation(namespace, new BsonDocumentCodec()).retryReads(true)) + executor.getClientSession() == session + + when: + execute(listIndexesMethod, session).batchSize(10).maxTime(100, MILLISECONDS).iterator() + operation = executor.getReadOperation() as ListIndexesOperation + + then: + expect operation, isTheSameAs(new ListIndexesOperation(namespace, new DocumentCodec()).batchSize(10) + .retryReads(true)) + executor.getClientSession() == session + + where: + session << [null, Stub(ClientSession)] + } + + def 'should use DropIndexOperation correctly for dropIndex'() { + given: + def executor = new TestOperationExecutor([null, null, null]) + def collection = new MongoCollectionImpl(namespace, Document, codecRegistry, readPreference, ACKNOWLEDGED, + true, true, readConcern, JAVA_LEGACY, null, TIMEOUT_SETTINGS, executor) + def dropIndexMethod = collection.&dropIndex + + when: + def expectedOperation = new DropIndexOperation(namespace, 'indexName', ACKNOWLEDGED) + execute(dropIndexMethod, session, 'indexName') + def operation = executor.getWriteOperation() as DropIndexOperation + + then: + expect operation, isTheSameAs(expectedOperation) + executor.getClientSession() == session + + when: + def keys = new BsonDocument('x', new BsonInt32(1)) + expectedOperation = new DropIndexOperation(namespace, keys, ACKNOWLEDGED) + execute(dropIndexMethod, session, keys) + operation = executor.getWriteOperation() as DropIndexOperation + + then: + expect operation, isTheSameAs(expectedOperation) + executor.getClientSession() == session + + when: + expectedOperation = new DropIndexOperation(namespace, keys, ACKNOWLEDGED) + execute(dropIndexMethod, session, keys, new DropIndexOptions().maxTime(100, MILLISECONDS)) + operation = executor.getWriteOperation() as DropIndexOperation + + then: + expect operation, isTheSameAs(expectedOperation) + executor.getClientSession() == session + + where: + session << [null, Stub(ClientSession)] + } + + def 'should use DropIndexOperation correctly for dropIndexes'() { + given: + def executor = new TestOperationExecutor([null, null]) + def collection = new MongoCollectionImpl(namespace, Document, codecRegistry, readPreference, ACKNOWLEDGED, + true, true, readConcern, JAVA_LEGACY, null, TIMEOUT_SETTINGS, executor) + def expectedOperation = new DropIndexOperation(namespace, '*', ACKNOWLEDGED) + def dropIndexesMethod = collection.&dropIndexes + + when: + execute(dropIndexesMethod, session) + def operation = executor.getWriteOperation() as DropIndexOperation + + then: + expect operation, isTheSameAs(expectedOperation) + executor.getClientSession() == session + + when: + expectedOperation = new DropIndexOperation(namespace, '*', ACKNOWLEDGED) + execute(dropIndexesMethod, session, new DropIndexOptions().maxTime(100, MILLISECONDS)) + operation = executor.getWriteOperation() as DropIndexOperation + + then: + expect operation, isTheSameAs(expectedOperation) + executor.getClientSession() == session + + where: + session << [null, Stub(ClientSession)] + } + + def 'should use RenameCollectionOperation correctly'() { + given: + def executor = new TestOperationExecutor([null, null]) + def collection = new MongoCollectionImpl(namespace, Document, codecRegistry, readPreference, ACKNOWLEDGED, + true, true, readConcern, JAVA_LEGACY, null, TIMEOUT_SETTINGS, executor) + def newNamespace = new MongoNamespace(namespace.getDatabaseName(), 'newName') + def renameCollectionOptions = new RenameCollectionOptions().dropTarget(dropTarget) + def expectedOperation = new RenameCollectionOperation(namespace, newNamespace, ACKNOWLEDGED) + def renameCollection = collection.&renameCollection + + when: + execute(renameCollection, session, newNamespace) + def operation = executor.getWriteOperation() as RenameCollectionOperation + + then: + expect operation, isTheSameAs(expectedOperation) + executor.getClientSession() == session + + when: + execute(renameCollection, session, newNamespace, renameCollectionOptions) + operation = executor.getWriteOperation() as RenameCollectionOperation + + then: + expect operation, isTheSameAs(expectedOperation.dropTarget(dropTarget)) + executor.getClientSession() == session + + where: + [session, dropTarget] << [[null, Stub(ClientSession)], [true, false]].combinations() + } + + def 'should not expect to mutate the document when inserting'() { + given: + def executor = new TestOperationExecutor([acknowledged(INSERT, 1, 0, [], [])]) + def customCodecRegistry = CodecRegistries.fromRegistries(fromProviders(new ImmutableDocumentCodecProvider()), codecRegistry) + def collection = new MongoCollectionImpl(namespace, ImmutableDocument, customCodecRegistry, readPreference, ACKNOWLEDGED, + true, true, readConcern, JAVA_LEGACY, null, TIMEOUT_SETTINGS, executor) + def document = new ImmutableDocument(['a': 1]) + + when: + collection.insertOne(document) + + then: + !document.containsKey('_id') + + when: + def operation = executor.getWriteOperation() as MixedBulkWriteOperation + def request = operation.writeRequests.get(0) as InsertRequest + + then: + request.getDocument().containsKey('_id') + } + + def 'should not expect to mutate the document when bulk writing'() { + given: + def executor = new TestOperationExecutor([null]) + def customCodecRegistry = CodecRegistries.fromRegistries(fromProviders(new ImmutableDocumentCodecProvider()), codecRegistry) + def collection = new MongoCollectionImpl(namespace, ImmutableDocument, customCodecRegistry, readPreference, ACKNOWLEDGED, + true, true, readConcern, JAVA_LEGACY, null, TIMEOUT_SETTINGS, executor) + def document = new ImmutableDocument(['a': 1]) + + when: + collection.bulkWrite([new InsertOneModel(document)]) + + then: + !document.containsKey('_id') + + when: + def operation = executor.getWriteOperation() as MixedBulkWriteOperation + def request = operation.writeRequests.get(0) as InsertRequest + + then: + request.getDocument().containsKey('_id') + } + + def 'should validate the client session correctly'() { + given: + def collection = new MongoCollectionImpl(namespace, Document, codecRegistry, readPreference, ACKNOWLEDGED, + true, true, readConcern, JAVA_LEGACY, null, TIMEOUT_SETTINGS, + Stub(OperationExecutor)) + + when: + collection.aggregate(null, [Document.parse('{$match:{}}')]) + then: + thrown(IllegalArgumentException) + + when: + collection.bulkWrite(null, [new InsertOneModel(new Document())]) + then: + thrown(IllegalArgumentException) + + when: + collection.createIndex(null, new Document()) + then: + thrown(IllegalArgumentException) + + when: + collection.createIndexes(null, [Stub(IndexModel)]) + then: + thrown(IllegalArgumentException) + + when: + collection.deleteMany(null, new Document()) + then: + thrown(IllegalArgumentException) + + when: + collection.deleteOne(null, new Document()) + then: + thrown(IllegalArgumentException) + + when: + collection.distinct(null, 'field', Document) + then: + thrown(IllegalArgumentException) + + when: + collection.distinct(null, new Document(), Document) + then: + thrown(IllegalArgumentException) + + when: + collection.drop((ClientSession) null) + then: + thrown(IllegalArgumentException) + + when: + collection.dropIndex(null, 'index') + then: + thrown(IllegalArgumentException) + + when: + collection.dropIndex(null, new Document()) + then: + thrown(IllegalArgumentException) + + when: + collection.dropIndexes((ClientSession) null) + then: + thrown(IllegalArgumentException) + + when: + collection.find((ClientSession) null) + then: + thrown(IllegalArgumentException) + + when: + collection.findOneAndDelete(null, new Document()) + then: + thrown(IllegalArgumentException) + + when: + collection.findOneAndReplace(null, new Document(), new Document()) + then: + thrown(IllegalArgumentException) + + when: + collection.findOneAndUpdate(null, new Document(), new Document()) + then: + thrown(IllegalArgumentException) + + when: + collection.insertMany(null, [new Document()]) + then: + thrown(IllegalArgumentException) + + when: + collection.insertOne(null, new Document()) + then: + thrown(IllegalArgumentException) + + when: + collection.listIndexes((ClientSession) null) + then: + thrown(IllegalArgumentException) + + when: + collection.mapReduce(null, '') + then: + thrown(IllegalArgumentException) + + when: + collection.renameCollection(null, new MongoNamespace('db', 'coll')) + then: + thrown(IllegalArgumentException) + + when: + collection.replaceOne(null, new Document(), new Document()) + then: + thrown(IllegalArgumentException) + + when: + collection.updateMany(null, new Document(), Document.parse('{$set: {a: 1}}')) + then: + thrown(IllegalArgumentException) + + when: + collection.updateOne(null, new Document(), Document.parse('{$set: {a: 1}}')) + then: + thrown(IllegalArgumentException) + + when: + collection.watch((ClientSession) null) + then: + thrown(IllegalArgumentException) + } + +} diff --git a/driver-sync/src/test/unit/com/mongodb/client/internal/MongoDatabaseSpecification.groovy b/driver-sync/src/test/unit/com/mongodb/client/internal/MongoDatabaseSpecification.groovy new file mode 100644 index 00000000000..56b55f61332 --- /dev/null +++ b/driver-sync/src/test/unit/com/mongodb/client/internal/MongoDatabaseSpecification.groovy @@ -0,0 +1,558 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.internal + +import com.mongodb.MongoClientSettings +import com.mongodb.MongoNamespace +import com.mongodb.ReadConcern +import com.mongodb.WriteConcern +import com.mongodb.client.ClientSession +import com.mongodb.client.model.Collation +import com.mongodb.client.model.CreateCollectionOptions +import com.mongodb.client.model.CreateViewOptions +import com.mongodb.client.model.IndexOptionDefaults +import com.mongodb.client.model.ValidationAction +import com.mongodb.client.model.ValidationLevel +import com.mongodb.client.model.ValidationOptions +import com.mongodb.internal.client.model.AggregationLevel +import com.mongodb.internal.client.model.changestream.ChangeStreamLevel +import com.mongodb.internal.operation.CommandReadOperation +import com.mongodb.internal.operation.CreateCollectionOperation +import com.mongodb.internal.operation.CreateViewOperation +import com.mongodb.internal.operation.DropDatabaseOperation +import org.bson.BsonBoolean +import org.bson.BsonDocument +import org.bson.BsonInt32 +import org.bson.Document +import org.bson.codecs.BsonValueCodecProvider +import org.bson.codecs.DocumentCodecProvider +import org.bson.codecs.UuidCodec +import org.bson.codecs.ValueCodecProvider +import spock.lang.Specification + +import java.util.concurrent.TimeUnit + +import static com.mongodb.ClusterFixture.TIMEOUT_SETTINGS +import static com.mongodb.CustomMatchers.isTheSameAs +import static com.mongodb.ReadPreference.primary +import static com.mongodb.ReadPreference.primaryPreferred +import static com.mongodb.ReadPreference.secondary +import static com.mongodb.client.internal.TestHelper.execute +import static org.bson.UuidRepresentation.C_SHARP_LEGACY +import static org.bson.UuidRepresentation.JAVA_LEGACY +import static org.bson.codecs.configuration.CodecRegistries.fromProviders +import static spock.util.matcher.HamcrestSupport.expect + +class MongoDatabaseSpecification extends Specification { + + def name = 'databaseName' + def codecRegistry = MongoClientSettings.getDefaultCodecRegistry() + def readPreference = secondary() + def writeConcern = WriteConcern.ACKNOWLEDGED + def readConcern = ReadConcern.DEFAULT + def collation = Collation.builder().locale('en').build() + + def 'should throw IllegalArgumentException if name is invalid'() { + when: + new MongoDatabaseImpl('a.b', codecRegistry, readPreference, writeConcern, false, false, readConcern, + JAVA_LEGACY, null, TIMEOUT_SETTINGS, new TestOperationExecutor([])) + + then: + thrown(IllegalArgumentException) + } + + def 'should throw IllegalArgumentException from getCollection if collectionName is invalid'() { + given: + def database = new MongoDatabaseImpl(name, codecRegistry, readPreference, writeConcern, false, false, readConcern, + JAVA_LEGACY, null, TIMEOUT_SETTINGS, new TestOperationExecutor([])) + + when: + database.getCollection('') + + then: + thrown(IllegalArgumentException) + } + + def 'should return the correct name from getName'() { + given: + def database = new MongoDatabaseImpl(name, codecRegistry, readPreference, writeConcern, false, false, readConcern, + JAVA_LEGACY, null, TIMEOUT_SETTINGS, new TestOperationExecutor([])) + + expect: + database.getName() == name + } + + def 'should behave correctly when using withCodecRegistry'() { + given: + def newCodecRegistry = fromProviders(new ValueCodecProvider()) + def executor = new TestOperationExecutor([]) + + when: + def database = new MongoDatabaseImpl(name, codecRegistry, readPreference, writeConcern, false, true, readConcern, + C_SHARP_LEGACY, null, TIMEOUT_SETTINGS, executor) + .withCodecRegistry(newCodecRegistry) + + then: + (database.getCodecRegistry().get(UUID) as UuidCodec).getUuidRepresentation() == C_SHARP_LEGACY + expect database, isTheSameAs(new MongoDatabaseImpl(name, database.getCodecRegistry(), readPreference, writeConcern, + false, true, readConcern, C_SHARP_LEGACY, null, TIMEOUT_SETTINGS, executor)) + } + + def 'should behave correctly when using withReadPreference'() { + given: + def newReadPreference = primary() + def executor = new TestOperationExecutor([]) + + when: + def database = new MongoDatabaseImpl(name, codecRegistry, readPreference, writeConcern, false, false, + readConcern, JAVA_LEGACY, null, TIMEOUT_SETTINGS, executor) + .withReadPreference(newReadPreference) + + then: + database.getReadPreference() == newReadPreference + expect database, isTheSameAs(new MongoDatabaseImpl(name, codecRegistry, newReadPreference, writeConcern, false, false, + readConcern, JAVA_LEGACY, null, TIMEOUT_SETTINGS, executor)) + } + + def 'should behave correctly when using withWriteConcern'() { + given: + def newWriteConcern = WriteConcern.MAJORITY + def executor = new TestOperationExecutor([]) + + when: + def database = new MongoDatabaseImpl(name, codecRegistry, readPreference, writeConcern, false, false, + readConcern, JAVA_LEGACY, null, TIMEOUT_SETTINGS, executor) + .withWriteConcern(newWriteConcern) + + then: + database.getWriteConcern() == newWriteConcern + expect database, isTheSameAs(new MongoDatabaseImpl(name, codecRegistry, readPreference, newWriteConcern, false, false, + readConcern, JAVA_LEGACY, null, TIMEOUT_SETTINGS, executor)) + } + + def 'should behave correctly when using withReadConcern'() { + given: + def newReadConcern = ReadConcern.MAJORITY + def executor = new TestOperationExecutor([]) + + when: + def database = new MongoDatabaseImpl(name, codecRegistry, readPreference, writeConcern, false, false, + readConcern, JAVA_LEGACY, null, TIMEOUT_SETTINGS, executor) + .withReadConcern(newReadConcern) + + then: + database.getReadConcern() == newReadConcern + expect database, isTheSameAs(new MongoDatabaseImpl(name, codecRegistry, readPreference, writeConcern, false, false, + newReadConcern, JAVA_LEGACY, null, TIMEOUT_SETTINGS, executor)) + } + + def 'should behave correctly when using withTimeout'() { + given: + def executor = new TestOperationExecutor([]) + def database = new MongoDatabaseImpl(name, codecRegistry, readPreference, writeConcern, false, false, + readConcern, JAVA_LEGACY, null, TIMEOUT_SETTINGS, executor) + + when: + def newDatabase = database.withTimeout(10_000, TimeUnit.MILLISECONDS) + + then: + newDatabase.getTimeout(TimeUnit.MILLISECONDS) == 10_000 + expect newDatabase, isTheSameAs(new MongoDatabaseImpl(name, codecRegistry, readPreference, writeConcern, false, false, + readConcern, JAVA_LEGACY, null, TIMEOUT_SETTINGS.withTimeout(10_000, TimeUnit.MILLISECONDS), executor)) + + when: + database.withTimeout(500, TimeUnit.NANOSECONDS) + + then: + thrown(IllegalArgumentException) + } + + def 'should be able to executeCommand correctly'() { + given: + def command = new BsonDocument('command', new BsonInt32(1)) + def executor = new TestOperationExecutor([null, null, null, null]) + def database = new MongoDatabaseImpl(name, codecRegistry, readPreference, writeConcern, false, false, + readConcern, JAVA_LEGACY, null, TIMEOUT_SETTINGS, executor) + def runCommandMethod = database.&runCommand + + when: + execute(runCommandMethod, session, command) + executor.getReadOperation() as CommandReadOperation + + then: + executor.getClientSession() == session + executor.getReadPreference() == primary() + + when: + execute(runCommandMethod, session, command, primaryPreferred()) + executor.getReadOperation() as CommandReadOperation + + then: + executor.getClientSession() == session + executor.getReadPreference() == primaryPreferred() + + when: + execute(runCommandMethod, session, command, BsonDocument) + executor.getReadOperation() as CommandReadOperation + + then: + executor.getClientSession() == session + executor.getReadPreference() == primary() + + when: + execute(runCommandMethod, session, command, primaryPreferred(), BsonDocument) + executor.getReadOperation() as CommandReadOperation + + then: + executor.getClientSession() == session + executor.getReadPreference() == primaryPreferred() + + where: + session << [null, Stub(ClientSession)] + } + + def 'should use DropDatabaseOperation correctly'() { + given: + def executor = new TestOperationExecutor([null]) + def database = new MongoDatabaseImpl(name, codecRegistry, readPreference, writeConcern, false, false, + readConcern, JAVA_LEGACY, null, TIMEOUT_SETTINGS, executor) + def dropMethod = database.&drop + + when: + execute(dropMethod, session) + def operation = executor.getWriteOperation() as DropDatabaseOperation + + then: + expect operation, isTheSameAs(new DropDatabaseOperation(name, writeConcern)) + executor.getClientSession() == session + + where: + session << [null, Stub(ClientSession)] + } + + def 'should use ListCollectionsOperation correctly'() { + given: + def executor = new TestOperationExecutor([null, null, null]) + def database = new MongoDatabaseImpl(name, codecRegistry, readPreference, writeConcern, false, false, + readConcern, JAVA_LEGACY, null, TIMEOUT_SETTINGS, executor) + def listCollectionsMethod = database.&listCollections + def listCollectionNamesMethod = database.&listCollectionNames + + when: + def listCollectionIterable = execute(listCollectionsMethod, session) + + then: + expect listCollectionIterable, isTheSameAs(new ListCollectionsIterableImpl<>(session, name, false, + Document, codecRegistry, primary(), executor, false, TIMEOUT_SETTINGS)) + + when: + listCollectionIterable = execute(listCollectionsMethod, session, BsonDocument) + + then: + expect listCollectionIterable, isTheSameAs(new ListCollectionsIterableImpl<>(session, name, false, + BsonDocument, codecRegistry, primary(), executor, false, TIMEOUT_SETTINGS)) + + when: + def listCollectionNamesIterable = execute(listCollectionNamesMethod, session) + + then: + // `listCollectionNamesIterable` is an instance of a `ListCollectionNamesIterableImpl`, so have to get the wrapped iterable from it + expect listCollectionNamesIterable.getWrapped(), isTheSameAs(new ListCollectionsIterableImpl<>(session, name, + true, BsonDocument, codecRegistry, primary(), executor, false, TIMEOUT_SETTINGS)) + + where: + session << [null, Stub(ClientSession)] + } + + def 'should use CreateCollectionOperation correctly'() { + given: + def collectionName = 'collectionName' + def executor = new TestOperationExecutor([null, null]) + def database = new MongoDatabaseImpl(name, codecRegistry, readPreference, writeConcern, false, false, + readConcern, JAVA_LEGACY, null, TIMEOUT_SETTINGS, executor) + def createCollectionMethod = database.&createCollection + + when: + execute(createCollectionMethod, session, collectionName) + def operation = executor.getWriteOperation() as CreateCollectionOperation + + then: + expect operation, isTheSameAs(new CreateCollectionOperation(name, collectionName, writeConcern)) + executor.getClientSession() == session + + when: + def createCollectionOptions = new CreateCollectionOptions() + .capped(true) + .maxDocuments(100) + .sizeInBytes(1000) + .storageEngineOptions(BsonDocument.parse('{ wiredTiger : {}}')) + .indexOptionDefaults(new IndexOptionDefaults().storageEngine(BsonDocument.parse('{ mmapv1 : {}}'))) + .validationOptions(new ValidationOptions().validator(BsonDocument.parse('{level: {$gte: 10}}')) + .validationLevel(ValidationLevel.MODERATE) + .validationAction(ValidationAction.WARN)) + .collation(collation) + + execute(createCollectionMethod, session, collectionName, createCollectionOptions) + operation = executor.getWriteOperation() as CreateCollectionOperation + + then: + expect operation, isTheSameAs(new CreateCollectionOperation(name, collectionName, writeConcern) + .collation(collation) + .capped(true) + .maxDocuments(100) + .sizeInBytes(1000) + .storageEngineOptions(BsonDocument.parse('{ wiredTiger : {}}')) + .indexOptionDefaults(BsonDocument.parse('{ storageEngine : { mmapv1 : {}}}')) + .validator(BsonDocument.parse('{level: {$gte: 10}}')) + .validationLevel(ValidationLevel.MODERATE) + .validationAction(ValidationAction.WARN)) + executor.getClientSession() == session + + where: + session << [null, Stub(ClientSession)] + } + + def 'should use CreateViewOperation correctly'() { + given: + def viewName = 'view1' + def viewOn = 'col1' + def pipeline = [new Document('$match', new Document('x', true))] + def writeConcern = WriteConcern.JOURNALED + def executor = new TestOperationExecutor([null, null]) + def database = new MongoDatabaseImpl(name, codecRegistry, readPreference, writeConcern, false, false, + readConcern, JAVA_LEGACY, null, TIMEOUT_SETTINGS, executor) + def createViewMethod = database.&createView + + when: + execute(createViewMethod, session, viewName, viewOn, pipeline) + def operation = executor.getWriteOperation() as CreateViewOperation + + then: + expect operation, isTheSameAs(new CreateViewOperation(name, viewName, viewOn, + [new BsonDocument('$match', new BsonDocument('x', BsonBoolean.TRUE))], writeConcern)) + executor.getClientSession() == session + + when: + execute(createViewMethod, session, viewName, viewOn, pipeline, new CreateViewOptions().collation(collation)) + operation = executor.getWriteOperation() as CreateViewOperation + + then: + expect operation, isTheSameAs(new CreateViewOperation(name, viewName, viewOn, + [new BsonDocument('$match', new BsonDocument('x', BsonBoolean.TRUE))], writeConcern).collation(collation)) + executor.getClientSession() == session + + where: + session << [null, Stub(ClientSession)] + } + + def 'should validate the createView pipeline data correctly'() { + given: + def viewName = 'view1' + def viewOn = 'col1' + def database = new MongoDatabaseImpl(name, codecRegistry, readPreference, writeConcern, false, false, + readConcern, JAVA_LEGACY, null, TIMEOUT_SETTINGS, Stub(OperationExecutor)) + + when: + database.createView(viewName, viewOn, null) + + then: + thrown(IllegalArgumentException) + + when: + database.createView(viewName, viewOn, [null]) + + then: + thrown(IllegalArgumentException) + } + + def 'should create ChangeStreamIterable correctly'() { + given: + def executor = new TestOperationExecutor([]) + def namespace = new MongoNamespace(name, '_ignored') + def database = new MongoDatabaseImpl(name, codecRegistry, readPreference, writeConcern, false, false, + readConcern, JAVA_LEGACY, null, TIMEOUT_SETTINGS, executor) + def watchMethod = database.&watch + + when: + def changeStreamIterable = execute(watchMethod, session) + + then: + expect changeStreamIterable, isTheSameAs(new ChangeStreamIterableImpl<>(session, namespace, codecRegistry, + readPreference, readConcern, executor, [], Document, ChangeStreamLevel.DATABASE, false, TIMEOUT_SETTINGS), + ['codec']) + + when: + changeStreamIterable = execute(watchMethod, session, [new Document('$match', 1)]) + + then: + expect changeStreamIterable, isTheSameAs(new ChangeStreamIterableImpl<>(session, namespace, codecRegistry, + readPreference, readConcern, executor, [new Document('$match', 1)], Document, + ChangeStreamLevel.DATABASE, false, TIMEOUT_SETTINGS), ['codec']) + + when: + changeStreamIterable = execute(watchMethod, session, [new Document('$match', 1)], BsonDocument) + + then: + expect changeStreamIterable, isTheSameAs(new ChangeStreamIterableImpl<>(session, namespace, codecRegistry, + readPreference, readConcern, executor, [new Document('$match', 1)], BsonDocument, + ChangeStreamLevel.DATABASE, false, TIMEOUT_SETTINGS), ['codec']) + + where: + session << [null, Stub(ClientSession)] + } + + def 'should validate the ChangeStreamIterable pipeline data correctly'() { + given: + def executor = new TestOperationExecutor([]) + def database = new MongoDatabaseImpl(name, codecRegistry, readPreference, writeConcern, false, false, + readConcern, JAVA_LEGACY, null, TIMEOUT_SETTINGS, executor) + + when: + database.watch((Class) null) + + then: + thrown(IllegalArgumentException) + + when: + database.watch([null]).into([]) + + then: + thrown(IllegalArgumentException) + } + + def 'should create AggregateIterable correctly'() { + given: + def executor = new TestOperationExecutor([]) + def database = new MongoDatabaseImpl(name, codecRegistry, readPreference, writeConcern, false, false, + readConcern, JAVA_LEGACY, null, TIMEOUT_SETTINGS, executor) + def aggregateMethod = database.&aggregate + + when: + def aggregateIterable = execute(aggregateMethod, session, []) + + then: + expect aggregateIterable, isTheSameAs(new AggregateIterableImpl<>(session, name, Document, Document, + codecRegistry, readPreference, readConcern, writeConcern, executor, [], AggregationLevel.DATABASE, + false, TIMEOUT_SETTINGS), ['codec']) + + when: + aggregateIterable = execute(aggregateMethod, session, [new Document('$match', 1)]) + + then: + expect aggregateIterable, isTheSameAs(new AggregateIterableImpl<>(session, name, Document, Document, + codecRegistry, readPreference, readConcern, writeConcern, executor, [new Document('$match', 1)], + AggregationLevel.DATABASE, false, TIMEOUT_SETTINGS), ['codec']) + + when: + aggregateIterable = execute(aggregateMethod, session, [new Document('$match', 1)], BsonDocument) + + then: + expect aggregateIterable, isTheSameAs(new AggregateIterableImpl<>(session, name, Document, BsonDocument, + codecRegistry, readPreference, readConcern, writeConcern, executor, [new Document('$match', 1)], + AggregationLevel.DATABASE, false, TIMEOUT_SETTINGS), ['codec']) + + where: + session << [null, Stub(ClientSession)] + } + + def 'should validate the AggregationIterable pipeline data correctly'() { + given: + def executor = new TestOperationExecutor([]) + def database = new MongoDatabaseImpl(name, codecRegistry, readPreference, writeConcern, false, false, + readConcern, JAVA_LEGACY, null, TIMEOUT_SETTINGS, executor) + + when: + database.aggregate(null, []) + + then: + thrown(IllegalArgumentException) + + when: + database.aggregate((List) null) + + then: + thrown(IllegalArgumentException) + + when: + database.aggregate([null]).into([]) + + then: + thrown(IllegalArgumentException) + } + + def 'should pass the correct options to getCollection'() { + given: + def codecRegistry = fromProviders([new ValueCodecProvider(), new DocumentCodecProvider(), new BsonValueCodecProvider()]) + def database = new MongoDatabaseImpl('databaseName', codecRegistry, secondary(), WriteConcern.MAJORITY, true, true, + ReadConcern.MAJORITY, JAVA_LEGACY, null, TIMEOUT_SETTINGS, new TestOperationExecutor([])) + + when: + def collection = database.getCollection('collectionName') + + then: + expect collection, isTheSameAs(expectedCollection) + + where: + expectedCollection = new MongoCollectionImpl(new MongoNamespace('databaseName', 'collectionName'), Document, + fromProviders([new ValueCodecProvider(), new DocumentCodecProvider(), new BsonValueCodecProvider()]), secondary(), + WriteConcern.MAJORITY, true, true, ReadConcern.MAJORITY, JAVA_LEGACY, null, TIMEOUT_SETTINGS, + new TestOperationExecutor([])) + } + + def 'should validate the client session correctly'() { + given: + def database = new MongoDatabaseImpl(name, codecRegistry, readPreference, writeConcern, false, + false, readConcern, JAVA_LEGACY, null, TIMEOUT_SETTINGS, Stub(OperationExecutor)) + + when: + database.createCollection(null, 'newColl') + + then: + thrown(IllegalArgumentException) + + when: + database.createView(null, 'newView', [Document.parse('{$match: {}}')]) + + then: + thrown(IllegalArgumentException) + + when: + database.drop(null) + + then: + thrown(IllegalArgumentException) + + when: + database.listCollectionNames(null) + + then: + thrown(IllegalArgumentException) + + when: + database.listCollections(null) + + then: + thrown(IllegalArgumentException) + + when: + database.runCommand(null, Document.parse('{}')) + + then: + thrown(IllegalArgumentException) + } + +} diff --git a/driver-sync/src/test/unit/com/mongodb/client/internal/MongoMappingCursorSpecification.groovy b/driver-sync/src/test/unit/com/mongodb/client/internal/MongoMappingCursorSpecification.groovy new file mode 100644 index 00000000000..3478fe1e236 --- /dev/null +++ b/driver-sync/src/test/unit/com/mongodb/client/internal/MongoMappingCursorSpecification.groovy @@ -0,0 +1,96 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.internal + +import com.mongodb.ServerAddress +import com.mongodb.ServerCursor +import com.mongodb.client.MongoCursor +import org.bson.Document +import spock.lang.Specification + +class MongoMappingCursorSpecification extends Specification { + def 'should get server cursor and address'() { + given: + def cursor = Stub(MongoCursor) + def address = new ServerAddress('host', 27018) + def serverCursor = new ServerCursor(5, address) + cursor.getServerAddress() >> address + cursor.getServerCursor() >> serverCursor + def mappingCursor = new MongoMappingCursor(cursor, { }) + + expect: + mappingCursor.serverAddress.is(address) + mappingCursor.serverCursor.is(serverCursor) + } + + def 'should throw on remove'() { + given: + def cursor = Stub(MongoCursor) + cursor.remove() >> { throw new UnsupportedOperationException() } + def mappingCursor = new MongoMappingCursor(cursor, { }) + + when: + mappingCursor.remove() + + then: + thrown(UnsupportedOperationException) + } + + def 'should close cursor'() { + given: + def cursor = Mock(MongoCursor) + def mappingCursor = new MongoMappingCursor(cursor, { }) + + when: + mappingCursor.close() + + then: + 1 * cursor.close() + } + + def 'should have next if cursor does'() { + given: + def cursor = Stub(MongoCursor) + cursor.hasNext() >>> [true, false] + def mappingCursor = new MongoMappingCursor(cursor, { }) + + expect: + mappingCursor.hasNext() + !mappingCursor.hasNext() + } + + def 'should map next'() { + given: + def cursor = Stub(MongoCursor) + cursor.next() >> new Document('_id', 1) + def mappingCursor = new MongoMappingCursor(cursor, { Document d -> d.get('_id') }) + + expect: + mappingCursor.next() == 1 + } + + def 'should map try next'() { + given: + def cursor = Stub(MongoCursor) + cursor.tryNext() >>> [new Document('_id', 1), null] + def mappingCursor = new MongoMappingCursor(cursor, { Document d -> d.get('_id') }) + + expect: + mappingCursor.tryNext() == 1 + !mappingCursor.tryNext() + } +} diff --git a/driver-sync/src/test/unit/com/mongodb/client/internal/TestHelper.groovy b/driver-sync/src/test/unit/com/mongodb/client/internal/TestHelper.groovy new file mode 100644 index 00000000000..e743fe13cdd --- /dev/null +++ b/driver-sync/src/test/unit/com/mongodb/client/internal/TestHelper.groovy @@ -0,0 +1,29 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.internal + +import com.mongodb.client.ClientSession + +class TestHelper { + static T execute(final Closure method, final ClientSession session, ... restOfArgs) { + if (session == null) { + method.call(restOfArgs) + } else { + method.call([session, *restOfArgs] as Object[]) + } + } +} diff --git a/driver-sync/src/test/unit/com/mongodb/client/internal/TestOperationExecutor.java b/driver-sync/src/test/unit/com/mongodb/client/internal/TestOperationExecutor.java new file mode 100644 index 00000000000..adcfaa0f903 --- /dev/null +++ b/driver-sync/src/test/unit/com/mongodb/client/internal/TestOperationExecutor.java @@ -0,0 +1,114 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.internal; + +import com.mongodb.ReadConcern; +import com.mongodb.ReadPreference; +import com.mongodb.client.ClientSession; +import com.mongodb.internal.TimeoutSettings; +import com.mongodb.internal.operation.ReadOperation; +import com.mongodb.internal.operation.WriteOperation; +import com.mongodb.lang.Nullable; + +import java.util.ArrayList; +import java.util.List; + +@SuppressWarnings("rawtypes") +public class TestOperationExecutor implements OperationExecutor { + + private final List responses; + private final List clientSessions = new ArrayList<>(); + private final List readPreferences = new ArrayList<>(); + private final List readConcerns = new ArrayList<>(); + private final List readOperations = new ArrayList<>(); + private final List writeOperations = new ArrayList<>(); + + public TestOperationExecutor(final List responses) { + this.responses = responses; + } + + @Override + public T execute(final ReadOperation operation, final ReadPreference readPreference, final ReadConcern readConcern) { + return execute(operation, readPreference, readConcern, null); + } + + @Override + public T execute(final WriteOperation operation, final ReadConcern readConcern) { + return execute(operation, readConcern, null); + } + + @Override + public T execute(final ReadOperation operation, final ReadPreference readPreference, final ReadConcern readConcern, + @Nullable final ClientSession session) { + clientSessions.add(session); + readOperations.add(operation); + readPreferences.add(readPreference); + readConcerns.add(readConcern); + return getResponse(); + } + + @Override + public T execute(final WriteOperation operation, final ReadConcern readConcern, @Nullable final ClientSession session) { + clientSessions.add(session); + writeOperations.add(operation); + readConcerns.add(readConcern); + return getResponse(); + } + + @Override + public OperationExecutor withTimeoutSettings(final TimeoutSettings timeoutSettings) { + return this; + } + + @Override + public TimeoutSettings getTimeoutSettings() { + throw new UnsupportedOperationException("Not supported"); + } + + @SuppressWarnings("unchecked") + private T getResponse() { + Object response = responses.remove(0); + if (response instanceof RuntimeException) { + throw (RuntimeException) response; + } + return (T) response; + } + + public ClientSession getClientSession() { + return clientSessions.remove(0); + } + + @Nullable + public ReadOperation getReadOperation() { + return readOperations.isEmpty() ? null : readOperations.remove(0); + } + + @Nullable + public ReadPreference getReadPreference() { + return readPreferences.isEmpty() ? null : readPreferences.remove(0); + } + + @Nullable + public ReadConcern getReadConcern() { + return readConcerns.isEmpty() ? null : readConcerns.remove(0); + } + + @Nullable + public WriteOperation getWriteOperation() { + return writeOperations.isEmpty() ? null : writeOperations.remove(0); + } +} diff --git a/driver-sync/src/test/unit/com/mongodb/client/internal/TimeoutHelperTest.java b/driver-sync/src/test/unit/com/mongodb/client/internal/TimeoutHelperTest.java new file mode 100644 index 00000000000..c3569624414 --- /dev/null +++ b/driver-sync/src/test/unit/com/mongodb/client/internal/TimeoutHelperTest.java @@ -0,0 +1,192 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.internal; + +import com.mongodb.MongoOperationTimeoutException; +import com.mongodb.client.MongoCollection; +import com.mongodb.client.MongoDatabase; +import com.mongodb.internal.time.Timeout; +import org.bson.Document; +import org.junit.jupiter.api.Test; + +import java.util.concurrent.TimeUnit; + +import static com.mongodb.client.internal.TimeoutHelper.collectionWithTimeout; +import static com.mongodb.client.internal.TimeoutHelper.databaseWithTimeout; +import static com.mongodb.internal.mockito.MongoMockito.mock; +import static com.mongodb.internal.time.Timeout.ZeroSemantics.ZERO_DURATION_MEANS_EXPIRED; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.mockito.ArgumentMatchers.anyLong; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.ArgumentMatchers.longThat; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.verifyNoInteractions; +import static org.mockito.Mockito.when; + +@SuppressWarnings("unchecked") +class TimeoutHelperTest { + + private static final String TIMEOUT_ERROR_MESSAGE = "message"; + + @Test + void shouldNotSetRemainingTimeoutOnCollectionWhenTimeoutIsNull() { + //given + MongoCollection collection = mock(MongoCollection.class); + + //when + MongoCollection result = collectionWithTimeout(collection, TIMEOUT_ERROR_MESSAGE, null); + + //then + assertEquals(collection, result); + } + + @Test + void shouldNotSetRemainingTimeoutDatabaseWhenTimeoutIsNull() { + //given + MongoDatabase database = mock(MongoDatabase.class); + + //when + MongoDatabase result = databaseWithTimeout(database, TIMEOUT_ERROR_MESSAGE, null); + + //then + assertEquals(database, result); + } + + @Test + void shouldSetRemainingTimeoutOnCollectionWhenTimeoutIsInfinite() { + //given + MongoCollection collectionWithTimeout = mock(MongoCollection.class); + MongoCollection collection = mock(MongoCollection.class, mongoCollection -> { + when(mongoCollection.withTimeout(anyLong(), eq(TimeUnit.MILLISECONDS))).thenReturn(collectionWithTimeout); + }); + + //when + MongoCollection result = collectionWithTimeout(collection, TIMEOUT_ERROR_MESSAGE, Timeout.infinite()); + + //then + assertEquals(collectionWithTimeout, result); + verify(collection).withTimeout(0L, TimeUnit.MILLISECONDS); + } + + @Test + void shouldNotSetRemainingTimeoutOnDatabaseWhenTimeoutIsInfinite() { + //given + MongoDatabase databaseWithTimeout = mock(MongoDatabase.class); + MongoDatabase database = mock(MongoDatabase.class, mongoDatabase -> { + when(mongoDatabase.withTimeout(anyLong(), eq(TimeUnit.MILLISECONDS))).thenReturn(databaseWithTimeout); + }); + + //when + MongoDatabase result = databaseWithTimeout(database, TIMEOUT_ERROR_MESSAGE, Timeout.infinite()); + + //then + assertEquals(databaseWithTimeout, result); + verify(database).withTimeout(0L, TimeUnit.MILLISECONDS); + } + + @Test + void shouldSetRemainingTimeoutOnCollectionWhenTimeout() { + //given + MongoCollection collectionWithTimeout = mock(MongoCollection.class); + MongoCollection collection = mock(MongoCollection.class, mongoCollection -> { + when(mongoCollection.withTimeout(anyLong(), eq(TimeUnit.MILLISECONDS))).thenReturn(collectionWithTimeout); + }); + Timeout timeout = Timeout.expiresIn(1, TimeUnit.DAYS, ZERO_DURATION_MEANS_EXPIRED); + + //when + MongoCollection result = collectionWithTimeout(collection, TIMEOUT_ERROR_MESSAGE, timeout); + + //then + verify(collection).withTimeout(longThat(remaining -> remaining > 0), eq(TimeUnit.MILLISECONDS)); + assertEquals(collectionWithTimeout, result); + } + + @Test + void shouldSetRemainingTimeoutOnDatabaseWhenTimeout() { + //given + MongoDatabase databaseWithTimeout = mock(MongoDatabase.class); + MongoDatabase database = mock(MongoDatabase.class, mongoDatabase -> { + when(mongoDatabase.withTimeout(anyLong(), eq(TimeUnit.MILLISECONDS))).thenReturn(databaseWithTimeout); + }); + Timeout timeout = Timeout.expiresIn(1, TimeUnit.DAYS, ZERO_DURATION_MEANS_EXPIRED); + + //when + MongoDatabase result = databaseWithTimeout(database, TIMEOUT_ERROR_MESSAGE, timeout); + + //then + verify(database).withTimeout(longThat(remaining -> remaining > 0), eq(TimeUnit.MILLISECONDS)); + assertEquals(databaseWithTimeout, result); + } + + @Test + void shouldThrowErrorWhenTimeoutHasExpiredOnCollection() { + //given + MongoCollection collection = mock(MongoCollection.class); + Timeout timeout = Timeout.expiresIn(1, TimeUnit.MICROSECONDS, ZERO_DURATION_MEANS_EXPIRED); + + //when + MongoOperationTimeoutException mongoExecutionTimeoutException = + assertThrows(MongoOperationTimeoutException.class, () -> collectionWithTimeout(collection, TIMEOUT_ERROR_MESSAGE, timeout)); + + //then + assertEquals(TIMEOUT_ERROR_MESSAGE, mongoExecutionTimeoutException.getMessage()); + verifyNoInteractions(collection); + } + + @Test + void shouldThrowErrorWhenTimeoutHasExpiredOnDatabase() { + //given + MongoDatabase database = mock(MongoDatabase.class); + Timeout timeout = Timeout.expiresIn(1, TimeUnit.MICROSECONDS, ZERO_DURATION_MEANS_EXPIRED); + + //when + MongoOperationTimeoutException mongoExecutionTimeoutException = + assertThrows(MongoOperationTimeoutException.class, () -> databaseWithTimeout(database, TIMEOUT_ERROR_MESSAGE, timeout)); + + //then + assertEquals(TIMEOUT_ERROR_MESSAGE, mongoExecutionTimeoutException.getMessage()); + verifyNoInteractions(database); + } + + @Test + void shouldThrowErrorWhenTimeoutHasExpiredWithZeroRemainingOnCollection() { + //given + MongoCollection collection = mock(MongoCollection.class); + Timeout timeout = Timeout.expiresIn(0, TimeUnit.NANOSECONDS, ZERO_DURATION_MEANS_EXPIRED); + + //when + assertThrows(MongoOperationTimeoutException.class, () -> collectionWithTimeout(collection, TIMEOUT_ERROR_MESSAGE, timeout)); + + //then + + } + + @Test + void shouldThrowErrorWhenTimeoutHasExpiredWithZeroRemainingOnDatabase() { + //given + MongoDatabase database = mock(MongoDatabase.class); + Timeout timeout = Timeout.expiresIn(0, TimeUnit.NANOSECONDS, ZERO_DURATION_MEANS_EXPIRED); + + //when + assertThrows(MongoOperationTimeoutException.class, () -> databaseWithTimeout(database, TIMEOUT_ERROR_MESSAGE, timeout)); + + //then + verifyNoInteractions(database); + } + +} diff --git a/eclipse/eclipse-java-code-cleanup.xml b/eclipse/eclipse-java-code-cleanup.xml deleted file mode 100644 index 1a6656e6738..00000000000 --- a/eclipse/eclipse-java-code-cleanup.xml +++ /dev/null @@ -1,56 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/eclipse/eclipse-java-code-formatters.xml b/eclipse/eclipse-java-code-formatters.xml deleted file mode 100644 index acf47e53ce3..00000000000 --- a/eclipse/eclipse-java-code-formatters.xml +++ /dev/null @@ -1,279 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/eclipse/eclipse-java.importorder b/eclipse/eclipse-java.importorder deleted file mode 100644 index 1072e4c6536..00000000000 --- a/eclipse/eclipse-java.importorder +++ /dev/null @@ -1,6 +0,0 @@ -#Organize Import Order -#Thu Oct 28 14:28:57 EDT 2010 -3=com -2=org -1=javax -0=java diff --git a/examples/DefaultSecurityCallbackHandler.java b/examples/DefaultSecurityCallbackHandler.java deleted file mode 100644 index 4398df07fdc..00000000000 --- a/examples/DefaultSecurityCallbackHandler.java +++ /dev/null @@ -1,37 +0,0 @@ -/** - * Copyright (c) 2008 - 2012 10gen, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -import javax.security.auth.callback.Callback; -import javax.security.auth.callback.CallbackHandler; -import javax.security.auth.callback.NameCallback; -import javax.security.auth.callback.PasswordCallback; -import javax.security.auth.callback.UnsupportedCallbackException; -import java.io.IOException; - -public class DefaultSecurityCallbackHandler implements CallbackHandler { - public void handle(final Callback[] callbacks) throws IOException, UnsupportedCallbackException { - for (Callback callback : callbacks) { - if (callback instanceof NameCallback) { - NameCallback nameCallback = (NameCallback) callback; - nameCallback.setName(""); // or just return - } - if (callback instanceof PasswordCallback) { - PasswordCallback passwordCallback = (PasswordCallback) callback; - passwordCallback.setPassword("".toCharArray()); // or just return - } - } - } -} diff --git a/examples/GSSAPICredentialsExample.java b/examples/GSSAPICredentialsExample.java deleted file mode 100644 index 3c9486bf69c..00000000000 --- a/examples/GSSAPICredentialsExample.java +++ /dev/null @@ -1,78 +0,0 @@ -/** - * Copyright (c) 2008 - 2012 10gen, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -import com.mongodb.BasicDBObject; -import com.mongodb.DB; -import com.mongodb.MongoClient; -import com.mongodb.MongoClientOptions; -import com.mongodb.MongoCredential; -import com.mongodb.ServerAddress; - -import java.net.UnknownHostException; -import java.security.Security; -import java.util.Arrays; - -/** - * Example usage of Kerberos (GSSAPI) credentials. - *

- * Usage: - *

- *
- *     java GSSAPICredentialsExample server userName databaseName
- * 
- */ -public class GSSAPICredentialsExample { - - // Steps: - // 1. Install unlimited strength encryption jar files in jre/lib/security - // (e.g. http://www.oracle.com/technetwork/java/javase/downloads/jce-6-download-429243.html) - // 2. run kinit - // 3. Set system properties, e.g.: - // -Djava.security.krb5.realm=10GEN.ME -Djavax.security.auth.useSubjectCredsOnly=false -Djava.security.krb5.kdc=kdc.10gen.me - // auth.login.defaultCallbackHandler=name of class that implements javax.security.auth.callback.CallbackHandler - public static void main(String[] args) throws UnknownHostException, InterruptedException { - // Set this property to avoid the default behavior where the program prompts on the command line for username/password -// Security.setProperty("auth.login.defaultCallbackHandler", "DefaultSecurityCallbackHandler"); - - String server = args[0]; - String user = args[1]; - String databaseName = args[2]; - - System.out.println("javax.security.auth.useSubjectCredsOnly: " + System.getProperty("javax.security.auth.useSubjectCredsOnly")); - System.out.println("java.security.krb5.realm: " + System.getProperty("java.security.krb5.realm")); - System.out.println("java.security.krb5.kdc: " + System.getProperty("java.security.krb5.kdc")); - System.out.println("auth.login.defaultCallbackHandler: " + Security.getProperty("auth.login.defaultCallbackHandler")); - System.out.println("login.configuration.provider: " + Security.getProperty("login.configuration.provider")); - System.out.println("java.security.auth.login.config: " + Security.getProperty("java.security.auth.login.config")); - System.out.println("login.config.url.1: " + Security.getProperty("login.config.url.1")); - System.out.println("login.config.url.2: " + Security.getProperty("login.config.url.2")); - System.out.println("login.config.url.3: " + Security.getProperty("login.config.url.3")); - - System.out.println("server: " + server); - System.out.println("user: " + user); - System.out.println("database: " + databaseName); - - System.out.println(); - - MongoClient mongoClient = new MongoClient(new ServerAddress(server), - Arrays.asList(MongoCredential.createGSSAPICredential(user)), - new MongoClientOptions.Builder().socketKeepAlive(true).socketTimeout(30000).build()); - DB testDB = mongoClient.getDB(databaseName); - - System.out.println("Insert result: " + testDB.getCollection("test").insert(new BasicDBObject())); - System.out.println("Count: " + testDB.getCollection("test").count()); - } -} diff --git a/examples/MongoCredentialsExample.java b/examples/MongoCredentialsExample.java deleted file mode 100644 index 53fe92b2044..00000000000 --- a/examples/MongoCredentialsExample.java +++ /dev/null @@ -1,51 +0,0 @@ -/* - * Copyright (c) 2008 - 2013 10gen, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -import com.mongodb.BasicDBObject; -import com.mongodb.DB; -import com.mongodb.MongoClient; -import com.mongodb.MongoClientOptions; -import com.mongodb.MongoCredential; -import com.mongodb.ServerAddress; - -import java.net.UnknownHostException; -import java.util.Arrays; - -public class MongoCredentialsExample { - public static void main(String[] args) throws UnknownHostException { - String server = args[0]; - String user = args[1]; - String password = args[2]; - String databaseName = args[3]; - - System.out.println("server: " + server); - System.out.println("user: " + user); - System.out.println("database: " + databaseName); - - System.out.println(); - - MongoClient mongoClient = new MongoClient(new ServerAddress(server), - Arrays.asList(MongoCredential.createMongoCRCredential(user, "test", password.toCharArray())), - new MongoClientOptions.Builder().socketKeepAlive(true).socketTimeout(30000).build()); - DB testDB = mongoClient.getDB(databaseName); - - System.out.println("Count: " + testDB.getCollection("test").count()); - - System.out.println("Insert result: " + testDB.getCollection("test").insert(new BasicDBObject())); - - } -} diff --git a/examples/QuickTour.java b/examples/QuickTour.java deleted file mode 100644 index b1df5f70f12..00000000000 --- a/examples/QuickTour.java +++ /dev/null @@ -1,141 +0,0 @@ -/** - * Copyright (C) 2008-2012 10gen Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -import com.mongodb.BasicDBObject; -import com.mongodb.DB; -import com.mongodb.DBCollection; -import com.mongodb.DBCursor; -import com.mongodb.DBObject; -import com.mongodb.MongoClient; - -import java.util.List; -import java.util.Set; - -public class QuickTour { - - public static void main(String[] args) throws Exception { - - // connect to the local database server - MongoClient mongoClient = new MongoClient(); - - // get handle to "mydb" - DB db = mongoClient.getDB("mydb"); - - // Authenticate - optional - // boolean auth = db.authenticate("foo", "bar"); - - // get a list of the collections in this database and print them out - Set collectionNames = db.getCollectionNames(); - for (String s : collectionNames) { - System.out.println(s); - } - - // get a collection object to work with - DBCollection testCollection = db.getCollection("testCollection"); - - // drop all the data in it - testCollection.drop(); - - // make a document and insert it - BasicDBObject doc = new BasicDBObject("name", "MongoDB").append("type", "database").append("count", 1) - .append("info", new BasicDBObject("x", 203).append("y", 102)); - - testCollection.insert(doc); - - // get it (since it's the only one in there since we dropped the rest earlier on) - DBObject myDoc = testCollection.findOne(); - System.out.println(myDoc); - - // now, lets add lots of little documents to the collection so we can explore queries and cursors - for (int i = 0; i < 100; i++) { - testCollection.insert(new BasicDBObject().append("i", i)); - } - System.out.println("total # of documents after inserting 100 small ones (should be 101) " + testCollection.getCount()); - - // lets get all the documents in the collection and print them out - DBCursor cursor = testCollection.find(); - try { - while (cursor.hasNext()) { - System.out.println(cursor.next()); - } - } finally { - cursor.close(); - } - - // now use a query to get 1 document out - BasicDBObject query = new BasicDBObject("i", 71); - cursor = testCollection.find(query); - - try { - while (cursor.hasNext()) { - System.out.println(cursor.next()); - } - } finally { - cursor.close(); - } - - // now use a range query to get a larger subset - query = new BasicDBObject("i", new BasicDBObject("$gt", 50)); // i.e. find all where i > 50 - cursor = testCollection.find(query); - - try { - while (cursor.hasNext()) { - System.out.println(cursor.next()); - } - } finally { - cursor.close(); - } - - // range query with multiple constraints - query = new BasicDBObject("i", new BasicDBObject("$gt", 20).append("$lte", 30)); // i.e. 20 < i <= 30 - cursor = testCollection.find(query); - - try { - while (cursor.hasNext()) { - System.out.println(cursor.next()); - } - } finally { - cursor.close(); - } - - // create an index on the "i" field - testCollection.createIndex(new BasicDBObject("i", 1)); // create index on "i", ascending - - - // list the indexes on the collection - List list = testCollection.getIndexInfo(); - for (DBObject o : list) { - System.out.println(o); - } - - // See if the last operation had an error - System.out.println("Last error : " + db.getLastError()); - - // see if any previous operation had an error - System.out.println("Previous error : " + db.getPreviousError()); - - // force an error - db.forceError(); - - // See if the last operation had an error - System.out.println("Last error : " + db.getLastError()); - - db.resetError(); - - // release resources - mongoClient.close(); - } -} diff --git a/examples/QuickTourAdmin.java b/examples/QuickTourAdmin.java deleted file mode 100644 index 953d5d2e544..00000000000 --- a/examples/QuickTourAdmin.java +++ /dev/null @@ -1,53 +0,0 @@ -/** - * Copyright (C) 2008-2012 10gen Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -import com.mongodb.BasicDBObject; -import com.mongodb.DB; -import com.mongodb.MongoClient; - -public class QuickTourAdmin { - - public static void main(String[] args) throws Exception { - - // connect to the local database server - MongoClient mongoClient = new MongoClient(); - - // Authenticate - optional - // boolean auth = db.authenticate("foo", "bar"); - - // get db names - for (String s : mongoClient.getDatabaseNames()) { - System.out.println(s); - } - - // get a db - DB db = mongoClient.getDB("com_mongodb_MongoAdmin"); - - // do an insert so that the db will really be created. Calling getDB() doesn't really take any - // action with the server - db.getCollection("testcollection").insert(new BasicDBObject("i", 1)); - for (String s : mongoClient.getDatabaseNames()) { - System.out.println(s); - } - - // drop a database - mongoClient.dropDatabase("com_mongodb_MongoAdmin"); - - for (String s : mongoClient.getDatabaseNames()) { - System.out.println(s); - } - } -} diff --git a/examples/ReadOplog.java b/examples/ReadOplog.java deleted file mode 100644 index 3dae87bc8de..00000000000 --- a/examples/ReadOplog.java +++ /dev/null @@ -1,59 +0,0 @@ -/** - * Copyright (C) 2008-2012 10gen Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -import com.mongodb.BasicDBObject; -import com.mongodb.Bytes; -import com.mongodb.DB; -import com.mongodb.DBCollection; -import com.mongodb.DBCursor; -import com.mongodb.DBObject; -import com.mongodb.MongoClient; -import org.bson.types.BSONTimestamp; - -public class ReadOplog { - - public static void main(String[] args) throws Exception { - - MongoClient mongoClient = new MongoClient(); - DB local = mongoClient.getDB("local"); - - DBCollection oplog = local.getCollection("oplog.$main"); - - DBCursor lastCursor = oplog.find().sort(new BasicDBObject("$natural", -1)).limit(1); - if (!lastCursor.hasNext()) { - System.out.println("no oplog!"); - return; - } - DBObject last = lastCursor.next(); - - BSONTimestamp ts = (BSONTimestamp) last.get("ts"); - System.out.println("starting point: " + ts); - - while (true) { - System.out.println("starting at ts: " + ts); - DBCursor cursor = oplog.find(new BasicDBObject("ts", new BasicDBObject("$gt", ts))); - cursor.addOption(Bytes.QUERYOPTION_TAILABLE); - cursor.addOption(Bytes.QUERYOPTION_AWAITDATA); - while (cursor.hasNext()) { - DBObject x = cursor.next(); - ts = (BSONTimestamp) x.get("ts"); - System.out.println("\t" + x); - } - - Thread.sleep(1000); - } - } -} diff --git a/graalvm-native-image-app/build.gradle.kts b/graalvm-native-image-app/build.gradle.kts new file mode 100644 index 00000000000..464c7711f20 --- /dev/null +++ b/graalvm-native-image-app/build.gradle.kts @@ -0,0 +1,185 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Note requires a Gradle project flag `-PincludeGraalvm` (see settings.gradle.kts). + +plugins { + id("application") + id("java-library") + id("project.base") + id("conventions.test-artifacts") + alias(libs.plugins.graalvm.buildtools) +} + +application { + mainClass = "com.mongodb.internal.graalvm.NativeImageApp" +} + +sourceSets { + main { + java { setSrcDirs(listOf("src/main")) } + resources { setSrcDirs(listOf("src/main/resources")) } + } +} + +dependencies { + // we intentionally depend here on the driver artifacts instead of depending on compiled classes + implementation(project(path = ":bson", configuration = "archives")) + implementation(project(path = ":driver-core", configuration = "archives")) + implementation(project(path = ":driver-sync", configuration = "archives")) + implementation(project(path = ":driver-legacy", configuration = "archives")) + implementation(project(path = ":driver-reactive-streams", configuration = "archives")) + implementation(project(path = ":mongodb-crypt", configuration = "archives")) + implementation(project(path = ":mongodb-crypt", configuration = "runtimeElements")) + + implementation(project(path = ":driver-core", configuration = "testArtifacts")) + implementation(project(path = ":driver-sync", configuration = "testArtifacts")) + implementation(project(path = ":driver-legacy", configuration = "testArtifacts")) + implementation(project(path = ":driver-reactive-streams", configuration = "testArtifacts")) + + implementation(libs.slf4j) + implementation(libs.jna) + implementation(libs.graal.sdk.nativeimage) + implementation(libs.jetbrains.annotations) + implementation(libs.logback.classic) + implementation(platform(libs.project.reactor.bom)) + implementation(libs.project.reactor.core) +} + +tasks.withType().configureEach { + enabled = false +} + + +@Suppress("UNCHECKED_CAST") +val systemPropertiesForRunningNativeApp: Map = (System.getProperties().toMap() as Map) + .filterKeys { it.startsWith("org.mongodb.") } +tasks.named("run") { systemProperties = systemPropertiesForRunningNativeApp } + +// see https://graalvm.github.io/native-build-tools/latest/gradle-plugin.html +graalvmNative { + metadataRepository { + enabled.set(false) + } + agent { + // Executing the `run` Gradle task with the tracing agent + // https://www.graalvm.org/latest/reference-manual/native-image/metadata/AutomaticMetadataCollection/ + // requires running Gradle with GraalVM despite the toolchain for the task already being GraalVM. + // The same is true about executing the `metadataCopy` Gradle task. + // This may be a manifestation of an issue with the `org.graalvm.buildtools.native` plugin. + enabled.set(false) + defaultMode.set("direct") + val taskExecutedWithAgentAttached = "run" + modes { + direct { + // see https://www.graalvm.org/latest/reference-manual/native-image/metadata/ExperimentalAgentOptions + options.add("config-output-dir=${rootProject.file("build/native/agent-output/$taskExecutedWithAgentAttached").path}") + // `experimental-configuration-with-origins` produces + // `graalvm-native-image-app/build/native/agent-output/run/reflect-origins.txt` + // and similar files that explain the origin of each of the reachability metadata piece. + // However, for some reason, the actual reachability metadata is not generated when this option is enabled, + // so enable it manually if you need an explanation for a specific reachability metadata entry, + // and expect the build to fail. + // options.add("experimental-configuration-with-origins") + + // `experimental-class-define-support` does not seem to do what it is supposed to do. + // We need this option to work if we want to support `UnixServerAddress` in native image. + // Unfortunately, the tracing agent neither generates the metadata in + // `graalvm-native-image-app/src/main/resources/META-INF/native-image/proxy-config.json`, + // nor does it extract the bytecode of the generated classes to + // `graalvm-native-image-app/src/main/resources/META-INF/native-image/agent-extracted-predefined-classes`. + options.add("experimental-class-define-support") + } + } + metadataCopy { + inputTaskNames.add(taskExecutedWithAgentAttached) + outputDirectories.add("src/main/resources/META-INF/native-image") + mergeWithExisting.set(false) + } + } + binaries { + configureEach { + buildArgs.add("--strict-image-heap") + buildArgs.add("-H:+UnlockExperimentalVMOptions") + // see class initialization and other reports in `graalvm/build/native/nativeCompile/reports` + buildArgs.add("--diagnostics-mode") + // see the "registerResource" entries in the `native-image` built-time output, + // informing us on the resources included in the native image being built + buildArgs.add("-H:Log=registerResource:5") + } + named("main") { + val mainClassName = application.mainClass.get() + imageName = mainClassName.substring(mainClassName.lastIndexOf('.') + 1) + sharedLibrary.set(false) + runtimeArgs.addAll(systemPropertiesForRunningNativeApp.entries + .stream() + .map {"-D${it.key}=${it.value}" } + .toList()) + quickBuild.set(true) + // See the "Apply" entries in the `native-image` built-time output, informing us on + // the build configuration files (https://www.graalvm.org/latest/reference-manual/native-image/overview/BuildConfiguration/) + // and the reachability metadata files (https://www.graalvm.org/latest/reference-manual/native-image/metadata/) + // which are applied at build time. + verbose.set(true) + } + } +} + +// By configuring the toolchains for the `org.graalvm.buildtools.native` plugin +// conditionally, we avoid Gradle errors caused by it failing to locate an installed GraalVM +// for Java SE older than 21. One situation when this is relevant is building from an IDE, +// where the `DEFAULT_JDK_VERSION` is likely used. +val minRequiredGraalVMJavaVersion = 21 +val graalJavaVersion: Int = findProperty("javaVersion")?.toString()?.toInt() ?: minRequiredGraalVMJavaVersion +val javaLanguageVersion: JavaLanguageVersion = JavaLanguageVersion.of(graalJavaVersion) + +if (graalJavaVersion >= minRequiredGraalVMJavaVersion) { + // `JvmVendorSpec.GRAAL_VM` matches only GraalVM Community (https://github.com/graalvm/graalvm-ce-builds/releases), + // and does not match any other GraalVM distribution. + // That is, Gradle fails to locate any other installed distribution of GraalVM. + // Furthermore, there is no other way to express via the Gradle toolchain functionality + // that GraalVM must be used. The documentation of the `org.graalvm.buildtools.native` plugin + // says the following about this limitation: + // "be aware that the toolchain detection cannot distinguish between GraalVM JDKs + // and standard JDKs without Native Image support: + // if you have both installed on the machine, Gradle may randomly pick one or the other". + // Fortunately, `JvmVendorSpec.GRAAL_VM` makes things less hideous than that. + // + // The documentation of the `org.graalvm.buildtools.native` plugin mentions + // the environment variable `GRAALVM_HOME` as an alternative to Gradle toolchain functionality. + // I was unable to find a way to stop relying on the toolchain specification requiring `JvmVendorSpec.GRAAL_VM` + // even with `GRAALVM_HOME`. + val graalVendor = JvmVendorSpec.GRAAL_VM + graalvmNative { + agent { + java { + toolchain { + // TODO - errors saying its immutable. + // languageVersion.set(javaLanguageVersion) + // vendor.set(graalVendor) + } + } + } + binaries { + configureEach { + javaLauncher.set(javaToolchains.launcherFor { + languageVersion.set(javaLanguageVersion) + vendor.set(graalVendor) + }) + } + } + } +} diff --git a/graalvm-native-image-app/readme.md b/graalvm-native-image-app/readme.md new file mode 100644 index 00000000000..c47a9829851 --- /dev/null +++ b/graalvm-native-image-app/readme.md @@ -0,0 +1,71 @@ +# graalvm-native-image-app + +## About +This is an example of a native application that uses the driver and is built using +[GraalVM native image](https://www.graalvm.org/latest/reference-manual/native-image/). + +## Contributor Guide + +This guide assumes you are using a shell capable of running [Bash](https://www.gnu.org/software/bash/) scripts. + +### Prepare the development environment + +#### Install GraalVM + +[GraalVM for JDK 21 Community](https://github.com/graalvm/graalvm-ce-builds/releases/tag/jdk-21.0.2) is required +in addition to the JDK you are using for running [Gradle](https://gradle.org/) when building the driver. +Note that GraalVM for JDK 21 Community is [available](https://sdkman.io/jdks#graalce) via [SDKMAN!](https://sdkman.io/). + +##### Explanation of the requirement + +* GraalVM Community is the only distribution of GraalVM for which it is possible to + specify a Gradle toolchain specification that matches only GraalVM + and does not match any other JDK. +* GraalVM for Java SE 21 is required because it is the latest released version at the moment, + and not supporting the build for multiple, especially older versions, simplifies things. + Releases of JDKs for Java SE 21 having a long-term support from most vendors + also makes this version more attractive. + +#### Configure environment variables pointing to JDKs. + +Assuming that the JDK you are using for running Gradle is for Java SE 17, export the following variables +(your values may differ): + +```bash +export JDK17=$(realpath ~/".sdkman/candidates/java/17.0.10-librca/") +export JDK21_GRAALVM=$(realpath ~/".sdkman/candidates/java/21.0.2-graalce/") +``` + +##### Informing Gradle on JDK locations it does not know about + +If `JDK21_GRAALVM` points to a +[location the Gradle auto-detection mechanism is not aware of](https://docs.gradle.org/current/userguide/toolchains.html#sec:auto_detection), +you need to inform Gradle about that location as specified in https://docs.gradle.org/current/userguide/toolchains.html#sec:custom_loc. + +### Build-related commands + +Assuming that your MongoDB deployment is accessible at `mongodb://localhost:27017`, +run from the driver project root directory: + +| # | Command | Description | +|--------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------| +| 0 | `env JAVA_HOME="${JDK17}" ./gradlew -PincludeGraalvm -PjavaVersion=21 :graalvm-native-image-app:nativeCompile` | Build the application relying on the reachability metadata stored in `graalvm-native-image-app/src/main/resources/META-INF/native-image`. | +| 1 | `env JAVA_HOME="${JDK17}" ./gradlew clean && env JAVA_HOME=${JDK21_GRAALVM} ./gradlew -PincludeGraalvm -PjavaVersion=21 -Pagent :graalvm-native-image-app:run && env JAVA_HOME=${JDK21_GRAALVM} ./gradlew -PincludeGraalvm :graalvm-native-image-app:metadataCopy` | Collect the reachability metadata and update the files storing it. Do this before building the application only if building fails otherwise. | +| 2 | `./graalvm-native-image-app/build/native/nativeCompile/NativeImageApp` | Run the application that has been built. | +| 3 | `env JAVA_HOME="${JDK17}" ./gradlew -PincludeGraalvm -PjavaVersion=21 :graalvm-native-image-app:nativeRun` | Run the application using Gradle, build it if necessary relying on the stored reachability metadata. | + +#### Specifying a custom connection string + +If your MongoDB deployment is not accessible at `mongodb://localhost:27017`, +or you want to use a custom connection string, +you can specify the connection string used by the `:graalvm-native-image-app:run`, `:graalvm-native-image-app:nativeRun` +Gradle tasks, as well as by the built native application by passing the CLI argument +`-Dorg.mongodb.test.uri=""` to `gradlew` or `NativeImageApp` respectively: + +```bash +./gradlew ... -Dorg.mongodb.test.uri="" +``` + +```bash +./graalvm-native-image-app/build/native/nativeCompile/NativeImageApp -Dorg.mongodb.test.uri="" +``` diff --git a/graalvm-native-image-app/src/main/com/mongodb/internal/graalvm/CustomDnsClientProvider.java b/graalvm-native-image-app/src/main/com/mongodb/internal/graalvm/CustomDnsClientProvider.java new file mode 100644 index 00000000000..696d37becd0 --- /dev/null +++ b/graalvm-native-image-app/src/main/com/mongodb/internal/graalvm/CustomDnsClientProvider.java @@ -0,0 +1,61 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.internal.graalvm; + +import com.mongodb.internal.dns.JndiDnsClient; +import com.mongodb.spi.dns.DnsClient; +import com.mongodb.spi.dns.DnsClientProvider; +import com.mongodb.spi.dns.DnsException; + +import java.util.List; + +import static java.lang.String.format; + +public final class CustomDnsClientProvider implements DnsClientProvider { + private static volatile boolean used = false; + + public CustomDnsClientProvider() { + } + + @Override + public DnsClient create() { + return new CustomDnsClient(); + } + + static void assertUsed() throws AssertionError { + if (!used) { + throw new AssertionError(format("%s is not used", CustomDnsClientProvider.class.getSimpleName())); + } + } + + private static void markUsed() { + used = true; + } + + private static final class CustomDnsClient implements DnsClient { + private final JndiDnsClient wrapped; + + CustomDnsClient() { + wrapped = new JndiDnsClient(); + } + + @Override + public List getResourceRecordData(final String name, final String type) throws DnsException { + markUsed(); + return wrapped.getResourceRecordData(name, type); + } + } +} diff --git a/graalvm-native-image-app/src/main/com/mongodb/internal/graalvm/CustomInetAddressResolverProvider.java b/graalvm-native-image-app/src/main/com/mongodb/internal/graalvm/CustomInetAddressResolverProvider.java new file mode 100644 index 00000000000..d2bf48535ee --- /dev/null +++ b/graalvm-native-image-app/src/main/com/mongodb/internal/graalvm/CustomInetAddressResolverProvider.java @@ -0,0 +1,62 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.internal.graalvm; + +import com.mongodb.internal.connection.DefaultInetAddressResolver; +import com.mongodb.spi.dns.InetAddressResolver; +import com.mongodb.spi.dns.InetAddressResolverProvider; + +import java.net.InetAddress; +import java.net.UnknownHostException; +import java.util.List; + +import static java.lang.String.format; + +public final class CustomInetAddressResolverProvider implements InetAddressResolverProvider { + private static volatile boolean used = false; + + public CustomInetAddressResolverProvider() { + } + + @Override + public InetAddressResolver create() { + return new CustomInetAddressResolver(); + } + + static void assertUsed() throws AssertionError { + if (!used) { + throw new AssertionError(format("%s is not used", CustomInetAddressResolverProvider.class.getSimpleName())); + } + } + + private static void markUsed() { + used = true; + } + + private static final class CustomInetAddressResolver implements InetAddressResolver { + private final DefaultInetAddressResolver wrapped; + + CustomInetAddressResolver() { + wrapped = new DefaultInetAddressResolver(); + } + + @Override + public List lookupByName(final String host) throws UnknownHostException { + markUsed(); + return wrapped.lookupByName(host); + } + } +} diff --git a/graalvm-native-image-app/src/main/com/mongodb/internal/graalvm/DnsSpi.java b/graalvm-native-image-app/src/main/com/mongodb/internal/graalvm/DnsSpi.java new file mode 100644 index 00000000000..e1d6ad72bfd --- /dev/null +++ b/graalvm-native-image-app/src/main/com/mongodb/internal/graalvm/DnsSpi.java @@ -0,0 +1,68 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.internal.graalvm; + +import com.mongodb.MongoClientSettings; +import com.mongodb.client.MongoClient; +import com.mongodb.client.MongoClients; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.ArrayList; +import java.util.concurrent.TimeUnit; + +final class DnsSpi { + private static final Logger LOGGER = LoggerFactory.getLogger(DnsSpi.class); + + public static void main(final String... args) { + useInetAddressResolverProvider(args); + useDnsClientProvider(); + } + + private static void useInetAddressResolverProvider(final String... args) { + try (MongoClient client = args.length == 0 ? MongoClients.create() : MongoClients.create(args[0])) { + ArrayList databaseNames = client.listDatabaseNames().into(new ArrayList<>()); + LOGGER.info("Database names: {}", databaseNames); + } + CustomInetAddressResolverProvider.assertUsed(); + } + + private static void useDnsClientProvider() { + try (MongoClient client = MongoClients.create(MongoClientSettings.builder() + .applyToClusterSettings(builder -> builder + .srvHost("a.b.c") + // `MongoClient` uses `CustomDnsClientProvider` asynchronously, + // and by waiting for server selection that cannot succeed due to `a.b.c` not resolving to an IP address, + // we give `MongoClient` enough time to use `CustomDnsClientProvider`. + // This is a tolerable race condition for a test. + .serverSelectionTimeout(2, TimeUnit.SECONDS)) + .build())) { + ArrayList databaseNames = client.listDatabaseNames().into(new ArrayList<>()); + LOGGER.info("Database names: {}", databaseNames); + } catch (RuntimeException e) { + try { + CustomDnsClientProvider.assertUsed(); + } catch (AssertionError err) { + err.addSuppressed(e); + throw err; + } + // an exception is expected because `a.b.c` does not resolve to an IP address + } + } + + private DnsSpi() { + } +} diff --git a/graalvm-native-image-app/src/main/com/mongodb/internal/graalvm/NativeImageApp.java b/graalvm-native-image-app/src/main/com/mongodb/internal/graalvm/NativeImageApp.java new file mode 100644 index 00000000000..400a1131108 --- /dev/null +++ b/graalvm-native-image-app/src/main/com/mongodb/internal/graalvm/NativeImageApp.java @@ -0,0 +1,151 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.internal.graalvm; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import org.jetbrains.annotations.Nullable; +import java.util.Arrays; +import java.util.List; +import java.util.Objects; +import java.util.stream.Stream; +import static com.mongodb.ClusterFixture.getConnectionStringSystemPropertyOrDefault; + +final class NativeImageApp { + private static final Logger LOGGER = LoggerFactory.getLogger(NativeImageApp.class); + + public static void main(final String[] args) { + LOGGER.info("Begin"); + LOGGER.info("java.vendor={}, java.vm.name={}, java.version={}", + System.getProperty("java.vendor"), System.getProperty("java.vm.name"), System.getProperty("java.version")); + String[] arguments = new String[] {getConnectionStringSystemPropertyOrDefault()}; + LOGGER.info("proper args={}, tour/example arguments={}", Arrays.toString(args), Arrays.toString(arguments)); + List errors = Stream.of( + new ThrowingRunnable.Named(Substitutions.class, + () -> Substitutions.main(arguments)), + new ThrowingRunnable.Named(DnsSpi.class, + () -> DnsSpi.main(arguments)), + new ThrowingRunnable.Named(gridfs.GridFSTour.class, + () -> gridfs.GridFSTour.main(arguments)), + new ThrowingRunnable.Named(documentation.CausalConsistencyExamples.class, + () -> documentation.CausalConsistencyExamples.main(arguments)), + new ThrowingRunnable.Named(documentation.ChangeStreamSamples.class, + () -> documentation.ChangeStreamSamples.main(arguments)), + new ThrowingRunnable.Named(tour.ClientSideEncryptionAutoEncryptionSettingsTour.class, + () -> tour.ClientSideEncryptionAutoEncryptionSettingsTour.main(arguments)), + new ThrowingRunnable.Named(tour.ClientSideEncryptionExplicitEncryptionAndDecryptionTour.class, + () -> tour.ClientSideEncryptionExplicitEncryptionAndDecryptionTour.main(arguments)), + new ThrowingRunnable.Named(tour.ClientSideEncryptionExplicitEncryptionOnlyTour.class, + () -> tour.ClientSideEncryptionExplicitEncryptionOnlyTour.main(arguments)), + new ThrowingRunnable.Named(tour.ClientSideEncryptionQueryableEncryptionTour.class, + () -> tour.ClientSideEncryptionQueryableEncryptionTour.main(arguments)), + new ThrowingRunnable.Named(tour.ClientSideEncryptionSimpleTour.class, + () -> tour.ClientSideEncryptionSimpleTour.main(arguments)), + new ThrowingRunnable.Named(tour.Decimal128QuickTour.class, + () -> tour.Decimal128QuickTour.main(arguments)), + new ThrowingRunnable.Named(tour.PojoQuickTour.class, + () -> tour.PojoQuickTour.main(arguments)), + new ThrowingRunnable.Named(tour.QuickTour.class, + () -> tour.QuickTour.main(arguments)), + new ThrowingRunnable.Named(tour.Decimal128LegacyAPIQuickTour.class, + () -> tour.Decimal128LegacyAPIQuickTour.main(arguments)), + new ThrowingRunnable.Named(reactivestreams.gridfs.GridFSTour.class, + () -> reactivestreams.gridfs.GridFSTour.main(arguments)), + // This tour is broken and hangs even when run by a JVM. + // See https://jira.mongodb.org/browse/JAVA-5364. + // new ThrowingRunnable.Named(reactivestreams.tour.ClientSideEncryptionAutoEncryptionSettingsTour.class, + // () -> reactivestreams.tour.ClientSideEncryptionAutoEncryptionSettingsTour.main(arguments)), + new ThrowingRunnable.Named(reactivestreams.tour.ClientSideEncryptionExplicitEncryptionAndDecryptionTour.class, + () -> reactivestreams.tour.ClientSideEncryptionExplicitEncryptionAndDecryptionTour.main(arguments)), + new ThrowingRunnable.Named(reactivestreams.tour.ClientSideEncryptionExplicitEncryptionOnlyTour.class, + () -> reactivestreams.tour.ClientSideEncryptionExplicitEncryptionOnlyTour.main(arguments)), + new ThrowingRunnable.Named(reactivestreams.tour.ClientSideEncryptionQueryableEncryptionTour.class, + () -> reactivestreams.tour.ClientSideEncryptionQueryableEncryptionTour.main(arguments)), + new ThrowingRunnable.Named(reactivestreams.tour.ClientSideEncryptionSimpleTour.class, + () -> reactivestreams.tour.ClientSideEncryptionSimpleTour.main(arguments)), + new ThrowingRunnable.Named(reactivestreams.tour.PojoQuickTour.class, + () -> reactivestreams.tour.PojoQuickTour.main(arguments)), + new ThrowingRunnable.Named(reactivestreams.tour.QuickTour.class, + () -> reactivestreams.tour.QuickTour.main(arguments)) + ).map(ThrowingRunnable::runAndCatch) + .filter(Objects::nonNull) + .toList(); + if (!errors.isEmpty()) { + AssertionError error = new AssertionError(String.format("%d %s failed", + errors.size(), errors.size() == 1 ? "application" : "applications")); + errors.forEach(error::addSuppressed); + throw error; + } + LOGGER.info("End"); + } + + private NativeImageApp() { + } + + private interface ThrowingRunnable { + void run() throws Exception; + + @Nullable + default Throwable runAndCatch() { + try { + run(); + } catch (Exception | AssertionError e) { + return e; + } + return null; + } + + final class Named implements ThrowingRunnable { + private final String name; + private final ThrowingRunnable loggingRunnable; + + Named(final String name, final ThrowingRunnable runnable) { + this.name = name; + this.loggingRunnable = () -> { + LOGGER.info("Begin {}", name); + try { + runnable.run(); + } catch (Exception | AssertionError e) { + LOGGER.info("Failure in {}", name, e); + throw e; + } finally { + LOGGER.info("End {}", name); + } + }; + } + + Named(final Class mainClass, final ThrowingRunnable runnable) { + this(mainClass.getName(), runnable); + } + + @Override + public void run() throws Exception { + loggingRunnable.run(); + } + + @Override + @Nullable + public Throwable runAndCatch() { + Throwable t = loggingRunnable.runAndCatch(); + if (t != null) { + t = new AssertionError(name, t); + } + return t; + } + } + } +} diff --git a/graalvm-native-image-app/src/main/com/mongodb/internal/graalvm/Substitutions.java b/graalvm-native-image-app/src/main/com/mongodb/internal/graalvm/Substitutions.java new file mode 100644 index 00000000000..e21d6e6d0bb --- /dev/null +++ b/graalvm-native-image-app/src/main/com/mongodb/internal/graalvm/Substitutions.java @@ -0,0 +1,45 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.mongodb.internal.graalvm; + +import com.mongodb.UnixServerAddress; +import com.mongodb.internal.graalvm.substitution.UnixServerAddressSubstitution; + +import static com.mongodb.assertions.Assertions.fail; +import static org.graalvm.nativeimage.ImageInfo.inImageRuntimeCode; + +final class Substitutions { + public static void main(final String... args) { + assertUnixServerAddressSubstitution(); + } + + private static void assertUnixServerAddressSubstitution() { + try { + new UnixServerAddress("/tmp/mongodb-27017.sock"); + if (inImageRuntimeCode()) { + fail(String.format("%s was not applied", UnixServerAddressSubstitution.class)); + } + } catch (UnsupportedOperationException e) { + if (!inImageRuntimeCode()) { + throw e; + } + // expected in GraalVM + } + } + + private Substitutions() { + } +} diff --git a/graalvm-native-image-app/src/main/com/mongodb/internal/graalvm/package-info.java b/graalvm-native-image-app/src/main/com/mongodb/internal/graalvm/package-info.java new file mode 100644 index 00000000000..00921d8b60e --- /dev/null +++ b/graalvm-native-image-app/src/main/com/mongodb/internal/graalvm/package-info.java @@ -0,0 +1,19 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +@NonNullApi +package com.mongodb.internal.graalvm; + +import com.mongodb.lang.NonNullApi; diff --git a/graalvm-native-image-app/src/main/resources/META-INF/native-image/jni-config.json b/graalvm-native-image-app/src/main/resources/META-INF/native-image/jni-config.json new file mode 100644 index 00000000000..2be5d0ca308 --- /dev/null +++ b/graalvm-native-image-app/src/main/resources/META-INF/native-image/jni-config.json @@ -0,0 +1,6 @@ +[ +{ + "name":"com.mongodb.internal.graalvm.NativeImageApp", + "methods":[{"name":"main","parameterTypes":["java.lang.String[]"] }] +} +] diff --git a/graalvm-native-image-app/src/main/resources/META-INF/native-image/predefined-classes-config.json b/graalvm-native-image-app/src/main/resources/META-INF/native-image/predefined-classes-config.json new file mode 100644 index 00000000000..847895071fb --- /dev/null +++ b/graalvm-native-image-app/src/main/resources/META-INF/native-image/predefined-classes-config.json @@ -0,0 +1,7 @@ +[ + { + "type":"agent-extracted", + "classes":[ + ] + } +] diff --git a/graalvm-native-image-app/src/main/resources/META-INF/native-image/proxy-config.json b/graalvm-native-image-app/src/main/resources/META-INF/native-image/proxy-config.json new file mode 100644 index 00000000000..32960f8ced3 --- /dev/null +++ b/graalvm-native-image-app/src/main/resources/META-INF/native-image/proxy-config.json @@ -0,0 +1,2 @@ +[ +] \ No newline at end of file diff --git a/graalvm-native-image-app/src/main/resources/META-INF/native-image/reflect-config.json b/graalvm-native-image-app/src/main/resources/META-INF/native-image/reflect-config.json new file mode 100644 index 00000000000..609320d4645 --- /dev/null +++ b/graalvm-native-image-app/src/main/resources/META-INF/native-image/reflect-config.json @@ -0,0 +1,132 @@ +[ +{ + "name":"ch.qos.logback.classic.encoder.PatternLayoutEncoder", + "queryAllPublicMethods":true, + "methods":[{"name":"","parameterTypes":[] }] +}, +{ + "name":"ch.qos.logback.classic.joran.SerializedModelConfigurator", + "methods":[{"name":"","parameterTypes":[] }] +}, +{ + "name":"ch.qos.logback.classic.pattern.DateConverter", + "methods":[{"name":"","parameterTypes":[] }] +}, +{ + "name":"ch.qos.logback.classic.pattern.LevelConverter", + "methods":[{"name":"","parameterTypes":[] }] +}, +{ + "name":"ch.qos.logback.classic.pattern.LineSeparatorConverter", + "methods":[{"name":"","parameterTypes":[] }] +}, +{ + "name":"ch.qos.logback.classic.pattern.LoggerConverter", + "methods":[{"name":"","parameterTypes":[] }] +}, +{ + "name":"ch.qos.logback.classic.pattern.MessageConverter", + "methods":[{"name":"","parameterTypes":[] }] +}, +{ + "name":"ch.qos.logback.classic.pattern.ThreadConverter", + "methods":[{"name":"","parameterTypes":[] }] +}, +{ + "name":"ch.qos.logback.classic.util.DefaultJoranConfigurator", + "methods":[{"name":"","parameterTypes":[] }] +}, +{ + "name":"ch.qos.logback.core.ConsoleAppender", + "queryAllPublicMethods":true, + "methods":[{"name":"","parameterTypes":[] }] +}, +{ + "name":"ch.qos.logback.core.OutputStreamAppender", + "methods":[{"name":"setEncoder","parameterTypes":["ch.qos.logback.core.encoder.Encoder"] }] +}, +{ + "name":"ch.qos.logback.core.encoder.Encoder", + "methods":[{"name":"valueOf","parameterTypes":["java.lang.String"] }] +}, +{ + "name":"ch.qos.logback.core.encoder.LayoutWrappingEncoder", + "methods":[{"name":"setParent","parameterTypes":["ch.qos.logback.core.spi.ContextAware"] }] +}, +{ + "name":"ch.qos.logback.core.pattern.PatternLayoutEncoderBase", + "methods":[{"name":"setPattern","parameterTypes":["java.lang.String"] }] +}, +{ + "name":"ch.qos.logback.core.spi.ContextAware", + "methods":[{"name":"valueOf","parameterTypes":["java.lang.String"] }] +}, +{ + "name":"com.sun.org.apache.xerces.internal.jaxp.SAXParserFactoryImpl", + "methods":[{"name":"","parameterTypes":[] }] +}, +{ + "name":"java.io.FilePermission" +}, +{ + "name":"java.lang.RuntimePermission" +}, +{ + "name":"java.net.NetPermission" +}, +{ + "name":"java.net.SocketPermission" +}, +{ + "name":"java.net.URLPermission", + "methods":[{"name":"","parameterTypes":["java.lang.String","java.lang.String"] }] +}, +{ + "name":"java.security.AllPermission" +}, +{ + "name":"java.security.SecurityPermission" +}, +{ + "name":"java.util.PropertyPermission" +}, +{ + "name":"java.util.concurrent.atomic.AtomicBoolean", + "fields":[{"name":"value"}] +}, +{ + "name":"java.util.concurrent.atomic.AtomicReference", + "fields":[{"name":"value"}] +}, +{ + "name":"javax.smartcardio.CardPermission" +}, +{ + "name":"reactivestreams.tour.Address", + "allDeclaredFields":true, + "queryAllDeclaredMethods":true, + "queryAllDeclaredConstructors":true, + "methods":[{"name":"","parameterTypes":[] }, {"name":"getCity","parameterTypes":[] }, {"name":"getStreet","parameterTypes":[] }, {"name":"getZip","parameterTypes":[] }, {"name":"setCity","parameterTypes":["java.lang.String"] }, {"name":"setStreet","parameterTypes":["java.lang.String"] }, {"name":"setZip","parameterTypes":["java.lang.String"] }] +}, +{ + "name":"reactivestreams.tour.Person", + "allDeclaredFields":true, + "queryAllDeclaredMethods":true, + "queryAllDeclaredConstructors":true, + "methods":[{"name":"","parameterTypes":[] }, {"name":"getAddress","parameterTypes":[] }, {"name":"getAge","parameterTypes":[] }, {"name":"getId","parameterTypes":[] }, {"name":"getName","parameterTypes":[] }, {"name":"setAddress","parameterTypes":["reactivestreams.tour.Address"] }, {"name":"setAge","parameterTypes":["int"] }, {"name":"setId","parameterTypes":["org.bson.types.ObjectId"] }, {"name":"setName","parameterTypes":["java.lang.String"] }] +}, +{ + "name":"tour.Address", + "allDeclaredFields":true, + "queryAllDeclaredMethods":true, + "queryAllDeclaredConstructors":true, + "methods":[{"name":"","parameterTypes":[] }, {"name":"getCity","parameterTypes":[] }, {"name":"getStreet","parameterTypes":[] }, {"name":"getZip","parameterTypes":[] }, {"name":"setCity","parameterTypes":["java.lang.String"] }, {"name":"setStreet","parameterTypes":["java.lang.String"] }, {"name":"setZip","parameterTypes":["java.lang.String"] }] +}, +{ + "name":"tour.Person", + "allDeclaredFields":true, + "queryAllDeclaredMethods":true, + "queryAllDeclaredConstructors":true, + "methods":[{"name":"","parameterTypes":[] }, {"name":"getAddress","parameterTypes":[] }, {"name":"getAge","parameterTypes":[] }, {"name":"getId","parameterTypes":[] }, {"name":"getName","parameterTypes":[] }, {"name":"setAddress","parameterTypes":["tour.Address"] }, {"name":"setAge","parameterTypes":["int"] }, {"name":"setId","parameterTypes":["org.bson.types.ObjectId"] }, {"name":"setName","parameterTypes":["java.lang.String"] }] +} +] diff --git a/graalvm-native-image-app/src/main/resources/META-INF/native-image/resource-config.json b/graalvm-native-image-app/src/main/resources/META-INF/native-image/resource-config.json new file mode 100644 index 00000000000..3727b46aaff --- /dev/null +++ b/graalvm-native-image-app/src/main/resources/META-INF/native-image/resource-config.json @@ -0,0 +1,51 @@ +{ + "resources":{ + "includes":[{ + "pattern":"\\Qcom/sun/jna/darwin-aarch64/libjnidispatch.jnilib\\E" + }, { + "pattern":"\\Qcom/sun/jna/darwin-x86-64/libjnidispatch.jnilib\\E" + }, { + "pattern":"\\Qcom/sun/jna/linux-aarch64/libjnidispatch.so\\E" + }, { + "pattern":"\\Qcom/sun/jna/linux-ppc64le/libjnidispatch.so\\E" + }, { + "pattern":"\\Qcom/sun/jna/linux-s390x/libjnidispatch.so\\E" + }, { + "pattern":"\\Qcom/sun/jna/linux-x86-64/libjnidispatch.so\\E" + }, { + "pattern":"\\Qcom/sun/jna/win32-x86-64/jnidispatch.dll\\E" + }, { + "pattern":"\\Qdarwin/libmongocrypt.dylib\\E" + }, { + "pattern":"\\Qlinux-aarch64/libmongocrypt.so\\E" + }, { + "pattern":"\\Qlinux-ppc64le/libmongocrypt.so\\E" + }, { + "pattern":"\\Qlinux-s390x/libmongocrypt.so\\E" + }, { + "pattern":"\\Qlinux-x86-64/libmongocrypt.so\\E" + }, { + "pattern":"\\Qwin32-x86-64/mongocrypt.dll\\E" + }, { + "pattern":"\\QMETA-INF/services/ch.qos.logback.classic.spi.Configurator\\E" + }, { + "pattern":"\\QMETA-INF/services/java.lang.System$LoggerFinder\\E" + }, { + "pattern":"\\QMETA-INF/services/java.net.spi.InetAddressResolverProvider\\E" + }, { + "pattern":"\\QMETA-INF/services/java.net.spi.URLStreamHandlerProvider\\E" + }, { + "pattern":"\\QMETA-INF/services/java.nio.channels.spi.AsynchronousChannelProvider\\E" + }, { + "pattern":"\\QMETA-INF/services/java.nio.channels.spi.SelectorProvider\\E" + }, { + "pattern":"\\QMETA-INF/services/java.time.zone.ZoneRulesProvider\\E" + }, { + "pattern":"\\QMETA-INF/services/javax.xml.parsers.SAXParserFactory\\E" + }, { + "pattern":"\\QMETA-INF/services/org.slf4j.spi.SLF4JServiceProvider\\E" + }, { + "pattern":"java.base:\\Qjdk/internal/icu/impl/data/icudt72b/nfc.nrm\\E" + }]}, + "bundles":[] +} diff --git a/graalvm-native-image-app/src/main/resources/META-INF/native-image/serialization-config.json b/graalvm-native-image-app/src/main/resources/META-INF/native-image/serialization-config.json new file mode 100644 index 00000000000..d0304f2a1c7 --- /dev/null +++ b/graalvm-native-image-app/src/main/resources/META-INF/native-image/serialization-config.json @@ -0,0 +1,8 @@ +{ + "types":[ + ], + "lambdaCapturingTypes":[ + ], + "proxies":[ + ] +} \ No newline at end of file diff --git a/graalvm-native-image-app/src/main/resources/META-INF/services/com.mongodb.spi.dns.DnsClientProvider b/graalvm-native-image-app/src/main/resources/META-INF/services/com.mongodb.spi.dns.DnsClientProvider new file mode 100644 index 00000000000..4b53a569c91 --- /dev/null +++ b/graalvm-native-image-app/src/main/resources/META-INF/services/com.mongodb.spi.dns.DnsClientProvider @@ -0,0 +1 @@ +com.mongodb.internal.graalvm.CustomDnsClientProvider diff --git a/graalvm-native-image-app/src/main/resources/META-INF/services/com.mongodb.spi.dns.InetAddressResolverProvider b/graalvm-native-image-app/src/main/resources/META-INF/services/com.mongodb.spi.dns.InetAddressResolverProvider new file mode 100644 index 00000000000..c66b3b99e63 --- /dev/null +++ b/graalvm-native-image-app/src/main/resources/META-INF/services/com.mongodb.spi.dns.InetAddressResolverProvider @@ -0,0 +1 @@ +com.mongodb.internal.graalvm.CustomInetAddressResolverProvider diff --git a/gradle.properties b/gradle.properties new file mode 100644 index 00000000000..00024442054 --- /dev/null +++ b/gradle.properties @@ -0,0 +1,26 @@ +# +# Copyright 2008-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +version=5.7.0-SNAPSHOT + +org.gradle.daemon=true +org.gradle.jvmargs=-Dfile.encoding=UTF-8 -Duser.country=US -Duser.language=en +## NOTE: This property is also used to generate scala compile versions in BOM. +supportedScalaVersions=2.13,2.12,2.11 +defaultScalaVersion=2.13 +runOnceTasks=clean,release +org.gradle.java.installations.auto-download=false +org.gradle.java.installations.fromEnv=JDK8,JDK11,JDK17,JDK21,JDK21_GRAALVM diff --git a/gradle/libs.versions.toml b/gradle/libs.versions.toml new file mode 100644 index 00000000000..8b8222d66e5 --- /dev/null +++ b/gradle/libs.versions.toml @@ -0,0 +1,216 @@ +# Copyright 2008-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +[versions] +aws-sdk-v1 = "1.12.782" +aws-sdk-v2 = "2.30.31" +graal-sdk = "24.0.0" +jna = "5.11.0" +jnr-unixsocket = "0.38.17" +netty-bom = "4.1.87.Final" +project-reactor-bom = "2022.0.0" +reactive-streams = "1.0.4" +snappy = "1.1.10.3" +zstd = "1.5.5-3" +jetbrains-annotations = "26.0.2" + +kotlin = "1.8.10" +kotlinx-coroutines-bom = "1.6.4" +kotlinx-datetime = "0.4.0" +kotlinx-serialization = "1.5.0" + +scala-v2-v13 = "2.13.16" +scala-v2-v12 = "2.12.20" +scala-v2-v11 = "2.11.12" + +# Test +assertj = "3.24.2" +aws-lambda-core = "1.2.2" +aws-lambda-events = "3.11.1" +cglib = "2.2.2" +classgraph = "4.8.154" +findbugs-jsr = "1.3.9" +groovy = "3.0.9" +hamcrest = "1.3" +jmh = "1.37" +junit-bom = "5.10.2" +logback = "1.3.14" +mockito = "5.11.0" +mockito-java8 = "4.6.1" +mockito-kotlin = "4.1.0" +objenesis = "1.3" +reflections = "0.9.10" +slf4j = "1.7.6" +spock-bom = "2.1-groovy-3.0" +scala-test = "3.2.18" +scala-test-plus = "3.2.18.0" + +# Plugins +plugin-bnd = "5.1.2" +plugin-build-config = "3.0.3" +plugin-detekt = "1.21.0" +plugin-dokka = "1.8.10" +plugin-download = "5.6.0" +plugin-graalvm = "0.9.23" +plugin-optional-base = "7.0.0" +plugin-nexus-publish = "2.0.0" +plugin-shadow = "8.3.6" +plugin-spotbugs = "6.0.15" +plugin-spotless = "6.14.0" +plugin-test-logger = "4.0.0" + +[libraries] +aws-java-sdk-v1-core = { module = "com.amazonaws:aws-java-sdk-core", version.ref = "aws-sdk-v1" } +aws-java-sdk-v1-sts = { module = "com.amazonaws:aws-java-sdk-sts", version.ref = "aws-sdk-v1" } +aws-java-sdk-v2-auth = { module = "software.amazon.awssdk:auth", version.ref = "aws-sdk-v2" } +aws-java-sdk-v2-sts = { module = "software.amazon.awssdk:sts", version.ref = "aws-sdk-v2" } + +jna = { module = "net.java.dev.jna:jna", version.ref = "jna" } +jna-platform = { module = "net.java.dev.jna:jna-platform", version.ref = "jna" } +jnr-unixsocket = { module = "com.github.jnr:jnr-unixsocket", version.ref = "jnr-unixsocket" } +jetbrains-annotations = {module = "org.jetbrains:annotations", version.ref = "jetbrains-annotations" } + +netty-bom = { module = "io.netty:netty-bom", version.ref = "netty-bom" } +netty-buffer = { module = "io.netty:netty-buffer" } +netty-handler = { module = "io.netty:netty-handler" } +netty-transport = { module = "io.netty:netty-transport" } + +project-reactor-bom = { module = "io.projectreactor:reactor-bom", version.ref = "project-reactor-bom" } +project-reactor-core = { module = "io.projectreactor:reactor-core" } +reactive-streams = { module = " org.reactivestreams:reactive-streams", version.ref = "reactive-streams" } + +slf4j = { module = "org.slf4j:slf4j-api", version.ref = "slf4j" } +snappy-java = { module = "org.xerial.snappy:snappy-java", version.ref = "snappy" } +zstd-jni = { module = "com.github.luben:zstd-jni", version.ref = "zstd" } + +graal-sdk = { module = "org.graalvm.sdk:graal-sdk", version.ref = "graal-sdk" } +graal-sdk-nativeimage = { module = "org.graalvm.sdk:nativeimage", version.ref = "graal-sdk" } + +kotlin-bom = { module = "org.jetbrains.kotlin:kotlin-bom" } +kotlin-stdlib-jdk8 = { module = "org.jetbrains.kotlin:kotlin-stdlib-jdk8" } +kotlinx-coroutines-bom = { module = "org.jetbrains.kotlinx:kotlinx-coroutines-bom", version.ref = "kotlinx-coroutines-bom" } +kotlinx-coroutines-core = { module = "org.jetbrains.kotlinx:kotlinx-coroutines-core" } +kotlinx-coroutines-reactive = { module = "org.jetbrains.kotlinx:kotlinx-coroutines-reactive" } +kotlin-reflect = { module = "org.jetbrains.kotlin:kotlin-reflect" } +kotlinx-serialization = { module = "org.jetbrains.kotlinx:kotlinx-serialization-bom", version.ref = "kotlinx-serialization" } +kotlinx-serialization-core = { module = "org.jetbrains.kotlinx:kotlinx-serialization-core" } +kotlinx-serialization-json = { module = "org.jetbrains.kotlinx:kotlinx-serialization-json" } +kotlinx-serialization-datetime = { module = "org.jetbrains.kotlinx:kotlinx-datetime", version.ref = "kotlinx-datetime" } + +scala-library-v2-v13 = { module = "org.scala-lang:scala-library", version.ref = "scala-v2-v13" } +scala-reflect-v2-v13 = { module = "org.scala-lang:scala-reflect", version.ref = "scala-v2-v13" } + +scala-library-v2-v12 = { module = "org.scala-lang:scala-library", version.ref = "scala-v2-v12" } +scala-reflect-v2-v12 = { module = "org.scala-lang:scala-reflect", version.ref = "scala-v2-v12" } + +scala-library-v2-v11 = { module = "org.scala-lang:scala-library", version.ref = "scala-v2-v11" } +scala-reflect-v2-v11 = { module = "org.scala-lang:scala-reflect", version.ref = "scala-v2-v11" } + +# Test +jmh-core = { module = "org.openjdk.jmh:jmh-core", version.ref = "jmh"} +jmh-generator-annprocess = { module = "org.openjdk.jmh:jmh-generator-annprocess", version.ref = "jmh"} + +junit-bom = { module = "org.junit:junit-bom", version.ref = "junit-bom" } +junit-jupiter = { module = "org.junit.jupiter:junit-jupiter" } +junit-jupiter-params = { module = "org.junit.jupiter:junit-jupiter-params" } +junit-jupiter-engine = { module = "org.junit.jupiter:junit-jupiter-engine" } +junit-vintage-engine = { module = "org.junit.vintage:junit-vintage-engine" } +junit-kotlin = { module = "org.jetbrains.kotlin:kotlin-test-junit5" } + +spock-bom = { module = "org.spockframework:spock-bom", version.ref = "spock-bom" } +spock-core = { module = "org.spockframework:spock-core" } +spock-junit4 = { module = "org.spockframework:spock-junit4" } + +mockito-core = { module = "org.mockito:mockito-core", version.ref = "mockito" } +mockito-junit-jupiter = { module = "org.mockito:mockito-junit-jupiter", version.ref = "mockito" } +mockito-junit-jupiter-java8 = { module = "org.mockito:mockito-junit-jupiter", version.ref = "mockito-java8" } +mockito-core-java8 = { module = "org.mockito:mockito-core", version.ref = "mockito-java8" } +mockito-inline-java8 = { module = "org.mockito:mockito-inline", version.ref = "mockito-java8" } +mockito-kotlin = { module = "org.mockito.kotlin:mockito-kotlin", version.ref = "mockito-kotlin" } + + +scala-test-flatspec-v2-v13 = { module = "org.scalatest:scalatest-flatspec_2.13", version.ref = "scala-test" } +scala-test-shouldmatchers-v2-v13 = { module = "org.scalatest:scalatest-shouldmatchers_2.13", version.ref = "scala-test" } +scala-test-mockito-v2-v13 = { module = "org.scalatestplus:mockito-4-11_2.13", version.ref = "scala-test-plus" } +scala-test-junit-runner-v2-v13 = { module = "org.scalatestplus:junit-5-10_2.13", version.ref = "scala-test-plus" } + +scala-test-flatspec-v2-v12 = { module = "org.scalatest:scalatest-flatspec_2.12", version.ref = "scala-test" } +scala-test-shouldmatchers-v2-v12 = { module = "org.scalatest:scalatest-shouldmatchers_2.12", version.ref = "scala-test" } +scala-test-mockito-v2-v12 = { module = "org.scalatestplus:mockito-4-11_2.12", version.ref = "scala-test-plus" } +scala-test-junit-runner-v2-v12 = { module = "org.scalatestplus:junit-5-10_2.12", version.ref = "scala-test-plus" } + +scala-test-flatspec-v2-v11 = { module = "org.scalatest:scalatest-flatspec_2.11", version.ref = "scala-test" } +scala-test-shouldmatchers-v2-v11 = { module = "org.scalatest:scalatest-shouldmatchers_2.11", version.ref = "scala-test" } +scala-test-mockito-v2-v11 = { module = "org.scalatestplus:mockito-4-11_2.11", version.ref = "scala-test-plus" } +scala-test-junit-runner-v2-v11 = { module = "org.scalatestplus:junit-5-10_2.11", version.ref = "scala-test-plus" } + +kotlinx-coroutines-test = { module = "org.jetbrains.kotlinx:kotlinx-coroutines-test" } + +assertj = { module = "org.assertj:assertj-core", version.ref = "assertj" } +aws-lambda-core = { module = " com.amazonaws:aws-lambda-java-core", version.ref = "aws-lambda-core" } +aws-lambda-events = { module = " com.amazonaws:aws-lambda-java-events", version.ref = "aws-lambda-events" } +cglib = { module = "cglib:cglib-nodep", version.ref = "cglib" } +classgraph = { module = "io.github.classgraph:classgraph", version.ref = "classgraph" } +findbugs-jsr = { module = "com.google.code.findbugs:jsr305", version.ref = "findbugs-jsr" } +groovy = { module = "org.codehaus.groovy:groovy-all", version.ref = "groovy" } +hamcrest-all = { module = "org.hamcrest:hamcrest-all", version.ref = "hamcrest" } +logback-classic = { module = "ch.qos.logback:logback-classic", version.ref = "logback" } +netty-tcnative-boringssl-static = { module = "io.netty:netty-tcnative-boringssl-static" } +objenesis = { module = "org.objenesis:objenesis", version.ref = "objenesis" } +project-reactor-test = { module = "io.projectreactor:reactor-test" } +reactive-streams-tck = { module = " org.reactivestreams:reactive-streams-tck", version.ref = "reactive-streams" } +reflections = { module = "org.reflections:reflections", version.ref = "reflections" } + + +[bundles] +aws-java-sdk-v1 = ["aws-java-sdk-v1-core", "aws-java-sdk-v1-sts"] +aws-java-sdk-v2 = ["aws-java-sdk-v2-auth", "aws-java-sdk-v2-sts"] +netty = ["netty-buffer", "netty-handler", "netty-transport"] + +scala-v2-v13 = ["scala-library-v2-v13", "scala-reflect-v2-v13"] +scala-v2-v12 = ["scala-library-v2-v12", "scala-reflect-v2-v12"] +scala-v2-v11 = ["scala-library-v2-v11", "scala-reflect-v2-v11"] + +# Test +junit = ["junit-jupiter", "junit-jupiter-params", "junit-jupiter-engine", "logback-classic", "hamcrest-all"] +junit-vintage = ["junit-vintage-engine", "junit-jupiter-params", "junit-jupiter-engine", "logback-classic", "hamcrest-all"] +spock = ["spock-core", "spock-junit4"] + +mockito = ["mockito-junit-jupiter", "mockito-core"] +mockito-java8 = ["mockito-junit-jupiter-java8", "mockito-core-java8", "mockito-inline-java8"] +mockito-kotlin = ["mockito-kotlin", "mockito-junit-jupiter-java8"] + +scala-test-v2-v13 = ["scala-test-flatspec-v2-v13", "scala-test-shouldmatchers-v2-v13", "scala-test-mockito-v2-v13", + "scala-test-junit-runner-v2-v13", "reflections"] +scala-test-v2-v12 = ["scala-test-flatspec-v2-v12", "scala-test-shouldmatchers-v2-v12", "scala-test-mockito-v2-v12", + "scala-test-junit-runner-v2-v12", "reflections"] +scala-test-v2-v11 = ["scala-test-flatspec-v2-v11", "scala-test-shouldmatchers-v2-v11", "scala-test-mockito-v2-v11", + "scala-test-junit-runner-v2-v11", "reflections"] + +[plugins] +kotlin-gradle = { id = "org.jetbrains.kotlin.jvm", version.ref = "kotlin" } +bnd = { id = "biz.aQute.bnd.builder", version.ref = "plugin-bnd" } +build-config = { id = "com.github.gmazzo.buildconfig", version.ref = "plugin-build-config" } +detekt = { id = "io.gitlab.arturbosch.detekt", version.ref = "plugin-detekt" } +dokka = { id = "org.jetbrains.dokka", version.ref = "plugin-dokka" } +download = { id = "de.undercouch.download", version.ref = "plugin-download" } +graalvm-buildtools = { id = "org.graalvm.buildtools.native", version.ref = "plugin-graalvm" } +kotlin = { id = "org.jetbrains.kotlin.jvm", version.ref = "kotlin" } +kotlin-serialization = { id = "org.jetbrains.kotlin.plugin.serialization", version.ref = "kotlin" } +nexus-publish = { id = "io.github.gradle-nexus.publish-plugin", version.ref = "plugin-nexus-publish" } +optional = { id = "nebula.optional-base", version.ref = "plugin-optional-base" } +shadow = { id = "com.gradleup.shadow", version.ref = "plugin-shadow" } +spotbugs = { id = "com.github.spotbugs", version.ref = "plugin-spotbugs" } +spotless = { id = "com.diffplug.spotless", version.ref = "plugin-spotless" } +test-logger = { id = "com.adarshr.test-logger", version.ref = "plugin-test-logger" } diff --git a/gradle/scala/lib/scala-ant-2.13.1.jar b/gradle/scala/lib/scala-ant-2.13.1.jar new file mode 100644 index 00000000000..4a19217be5d Binary files /dev/null and b/gradle/scala/lib/scala-ant-2.13.1.jar differ diff --git a/gradle/wrapper/gradle-wrapper.jar b/gradle/wrapper/gradle-wrapper.jar new file mode 100644 index 00000000000..afba109285a Binary files /dev/null and b/gradle/wrapper/gradle-wrapper.jar differ diff --git a/gradle/wrapper/gradle-wrapper.properties b/gradle/wrapper/gradle-wrapper.properties new file mode 100644 index 00000000000..9bf7bd33972 --- /dev/null +++ b/gradle/wrapper/gradle-wrapper.properties @@ -0,0 +1,6 @@ +distributionBase=GRADLE_USER_HOME +distributionPath=wrapper/dists +distributionUrl=https\://services.gradle.org/distributions/gradle-8.12.1-bin.zip +networkTimeout=10000 +zipStoreBase=GRADLE_USER_HOME +zipStorePath=wrapper/dists diff --git a/gradlew b/gradlew new file mode 100755 index 00000000000..65dcd68d65c --- /dev/null +++ b/gradlew @@ -0,0 +1,244 @@ +#!/bin/sh + +# +# Copyright © 2015-2021 the original authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +############################################################################## +# +# Gradle start up script for POSIX generated by Gradle. +# +# Important for running: +# +# (1) You need a POSIX-compliant shell to run this script. If your /bin/sh is +# noncompliant, but you have some other compliant shell such as ksh or +# bash, then to run this script, type that shell name before the whole +# command line, like: +# +# ksh Gradle +# +# Busybox and similar reduced shells will NOT work, because this script +# requires all of these POSIX shell features: +# * functions; +# * expansions «$var», «${var}», «${var:-default}», «${var+SET}», +# «${var#prefix}», «${var%suffix}», and «$( cmd )»; +# * compound commands having a testable exit status, especially «case»; +# * various built-in commands including «command», «set», and «ulimit». +# +# Important for patching: +# +# (2) This script targets any POSIX shell, so it avoids extensions provided +# by Bash, Ksh, etc; in particular arrays are avoided. +# +# The "traditional" practice of packing multiple parameters into a +# space-separated string is a well documented source of bugs and security +# problems, so this is (mostly) avoided, by progressively accumulating +# options in "$@", and eventually passing that to Java. +# +# Where the inherited environment variables (DEFAULT_JVM_OPTS, JAVA_OPTS, +# and GRADLE_OPTS) rely on word-splitting, this is performed explicitly; +# see the in-line comments for details. +# +# There are tweaks for specific operating systems such as AIX, CygWin, +# Darwin, MinGW, and NonStop. +# +# (3) This script is generated from the Groovy template +# https://github.com/gradle/gradle/blob/HEAD/subprojects/plugins/src/main/resources/org/gradle/api/internal/plugins/unixStartScript.txt +# within the Gradle project. +# +# You can find Gradle at https://github.com/gradle/gradle/. +# +############################################################################## + +# Attempt to set APP_HOME + +# Resolve links: $0 may be a link +app_path=$0 + +# Need this for daisy-chained symlinks. +while + APP_HOME=${app_path%"${app_path##*/}"} # leaves a trailing /; empty if no leading path + [ -h "$app_path" ] +do + ls=$( ls -ld "$app_path" ) + link=${ls#*' -> '} + case $link in #( + /*) app_path=$link ;; #( + *) app_path=$APP_HOME$link ;; + esac +done + +# This is normally unused +# shellcheck disable=SC2034 +APP_BASE_NAME=${0##*/} +APP_HOME=$( cd "${APP_HOME:-./}" && pwd -P ) || exit + +# Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. +DEFAULT_JVM_OPTS='"-Xmx64m" "-Xms64m"' + +# Use the maximum available, or set MAX_FD != -1 to use that value. +MAX_FD=maximum + +warn () { + echo "$*" +} >&2 + +die () { + echo + echo "$*" + echo + exit 1 +} >&2 + +# OS specific support (must be 'true' or 'false'). +cygwin=false +msys=false +darwin=false +nonstop=false +case "$( uname )" in #( + CYGWIN* ) cygwin=true ;; #( + Darwin* ) darwin=true ;; #( + MSYS* | MINGW* ) msys=true ;; #( + NONSTOP* ) nonstop=true ;; +esac + +CLASSPATH=$APP_HOME/gradle/wrapper/gradle-wrapper.jar + + +# Determine the Java command to use to start the JVM. +if [ -n "$JAVA_HOME" ] ; then + if [ -x "$JAVA_HOME/jre/sh/java" ] ; then + # IBM's JDK on AIX uses strange locations for the executables + JAVACMD=$JAVA_HOME/jre/sh/java + else + JAVACMD=$JAVA_HOME/bin/java + fi + if [ ! -x "$JAVACMD" ] ; then + die "ERROR: JAVA_HOME is set to an invalid directory: $JAVA_HOME + +Please set the JAVA_HOME variable in your environment to match the +location of your Java installation." + fi +else + JAVACMD=java + which java >/dev/null 2>&1 || die "ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. + +Please set the JAVA_HOME variable in your environment to match the +location of your Java installation." +fi + +# Increase the maximum file descriptors if we can. +if ! "$cygwin" && ! "$darwin" && ! "$nonstop" ; then + case $MAX_FD in #( + max*) + # In POSIX sh, ulimit -H is undefined. That's why the result is checked to see if it worked. + # shellcheck disable=SC3045 + MAX_FD=$( ulimit -H -n ) || + warn "Could not query maximum file descriptor limit" + esac + case $MAX_FD in #( + '' | soft) :;; #( + *) + # In POSIX sh, ulimit -n is undefined. That's why the result is checked to see if it worked. + # shellcheck disable=SC3045 + ulimit -n "$MAX_FD" || + warn "Could not set maximum file descriptor limit to $MAX_FD" + esac +fi + +# Collect all arguments for the java command, stacking in reverse order: +# * args from the command line +# * the main class name +# * -classpath +# * -D...appname settings +# * --module-path (only if needed) +# * DEFAULT_JVM_OPTS, JAVA_OPTS, and GRADLE_OPTS environment variables. + +# For Cygwin or MSYS, switch paths to Windows format before running java +if "$cygwin" || "$msys" ; then + APP_HOME=$( cygpath --path --mixed "$APP_HOME" ) + CLASSPATH=$( cygpath --path --mixed "$CLASSPATH" ) + + JAVACMD=$( cygpath --unix "$JAVACMD" ) + + # Now convert the arguments - kludge to limit ourselves to /bin/sh + for arg do + if + case $arg in #( + -*) false ;; # don't mess with options #( + /?*) t=${arg#/} t=/${t%%/*} # looks like a POSIX filepath + [ -e "$t" ] ;; #( + *) false ;; + esac + then + arg=$( cygpath --path --ignore --mixed "$arg" ) + fi + # Roll the args list around exactly as many times as the number of + # args, so each arg winds up back in the position where it started, but + # possibly modified. + # + # NB: a `for` loop captures its iteration list before it begins, so + # changing the positional parameters here affects neither the number of + # iterations, nor the values presented in `arg`. + shift # remove old arg + set -- "$@" "$arg" # push replacement arg + done +fi + +# Collect all arguments for the java command; +# * $DEFAULT_JVM_OPTS, $JAVA_OPTS, and $GRADLE_OPTS can contain fragments of +# shell script including quotes and variable substitutions, so put them in +# double quotes to make sure that they get re-expanded; and +# * put everything else in single quotes, so that it's not re-expanded. + +set -- \ + "-Dorg.gradle.appname=$APP_BASE_NAME" \ + -classpath "$CLASSPATH" \ + org.gradle.wrapper.GradleWrapperMain \ + "$@" + +# Stop when "xargs" is not available. +if ! command -v xargs >/dev/null 2>&1 +then + die "xargs is not available" +fi + +# Use "xargs" to parse quoted args. +# +# With -n1 it outputs one arg per line, with the quotes and backslashes removed. +# +# In Bash we could simply go: +# +# readarray ARGS < <( xargs -n1 <<<"$var" ) && +# set -- "${ARGS[@]}" "$@" +# +# but POSIX shell has neither arrays nor command substitution, so instead we +# post-process each arg (as a line of input to sed) to backslash-escape any +# character that might be a shell metacharacter, then use eval to reverse +# that process (while maintaining the separation between arguments), and wrap +# the whole thing up as a single "set" statement. +# +# This will of course break if any of these variables contains a newline or +# an unmatched quote. +# + +eval "set -- $( + printf '%s\n' "$DEFAULT_JVM_OPTS $JAVA_OPTS $GRADLE_OPTS" | + xargs -n1 | + sed ' s~[^-[:alnum:]+,./:=@_]~\\&~g; ' | + tr '\n' ' ' + )" '"$@"' + +exec "$JAVACMD" "$@" diff --git a/gradlew.bat b/gradlew.bat new file mode 100644 index 00000000000..93e3f59f135 --- /dev/null +++ b/gradlew.bat @@ -0,0 +1,92 @@ +@rem +@rem Copyright 2015 the original author or authors. +@rem +@rem Licensed under the Apache License, Version 2.0 (the "License"); +@rem you may not use this file except in compliance with the License. +@rem You may obtain a copy of the License at +@rem +@rem https://www.apache.org/licenses/LICENSE-2.0 +@rem +@rem Unless required by applicable law or agreed to in writing, software +@rem distributed under the License is distributed on an "AS IS" BASIS, +@rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +@rem See the License for the specific language governing permissions and +@rem limitations under the License. +@rem + +@if "%DEBUG%"=="" @echo off +@rem ########################################################################## +@rem +@rem Gradle startup script for Windows +@rem +@rem ########################################################################## + +@rem Set local scope for the variables with windows NT shell +if "%OS%"=="Windows_NT" setlocal + +set DIRNAME=%~dp0 +if "%DIRNAME%"=="" set DIRNAME=. +@rem This is normally unused +set APP_BASE_NAME=%~n0 +set APP_HOME=%DIRNAME% + +@rem Resolve any "." and ".." in APP_HOME to make it shorter. +for %%i in ("%APP_HOME%") do set APP_HOME=%%~fi + +@rem Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. +set DEFAULT_JVM_OPTS="-Xmx64m" "-Xms64m" + +@rem Find java.exe +if defined JAVA_HOME goto findJavaFromJavaHome + +set JAVA_EXE=java.exe +%JAVA_EXE% -version >NUL 2>&1 +if %ERRORLEVEL% equ 0 goto execute + +echo. +echo ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. +echo. +echo Please set the JAVA_HOME variable in your environment to match the +echo location of your Java installation. + +goto fail + +:findJavaFromJavaHome +set JAVA_HOME=%JAVA_HOME:"=% +set JAVA_EXE=%JAVA_HOME%/bin/java.exe + +if exist "%JAVA_EXE%" goto execute + +echo. +echo ERROR: JAVA_HOME is set to an invalid directory: %JAVA_HOME% +echo. +echo Please set the JAVA_HOME variable in your environment to match the +echo location of your Java installation. + +goto fail + +:execute +@rem Setup the command line + +set CLASSPATH=%APP_HOME%\gradle\wrapper\gradle-wrapper.jar + + +@rem Execute Gradle +"%JAVA_EXE%" %DEFAULT_JVM_OPTS% %JAVA_OPTS% %GRADLE_OPTS% "-Dorg.gradle.appname=%APP_BASE_NAME%" -classpath "%CLASSPATH%" org.gradle.wrapper.GradleWrapperMain %* + +:end +@rem End local scope for the variables with windows NT shell +if %ERRORLEVEL% equ 0 goto mainEnd + +:fail +rem Set variable GRADLE_EXIT_CONSOLE if you need the _script_ return code instead of +rem the _cmd.exe /c_ return code! +set EXIT_CODE=%ERRORLEVEL% +if %EXIT_CODE% equ 0 set EXIT_CODE=1 +if not ""=="%GRADLE_EXIT_CONSOLE%" exit %EXIT_CODE% +exit /b %EXIT_CODE% + +:mainEnd +if "%OS%"=="Windows_NT" endlocal + +:omega diff --git a/java-security-ocsp-property b/java-security-ocsp-property new file mode 100644 index 00000000000..e9fafb22ba5 --- /dev/null +++ b/java-security-ocsp-property @@ -0,0 +1 @@ +ocsp.enable=true diff --git a/lib/ant-contrib-1.0b3.jar b/lib/ant-contrib-1.0b3.jar deleted file mode 100644 index 062537661a5..00000000000 Binary files a/lib/ant-contrib-1.0b3.jar and /dev/null differ diff --git a/lib/clirr-core-0.6-uber.jar b/lib/clirr-core-0.6-uber.jar deleted file mode 100644 index d4245f483de..00000000000 Binary files a/lib/clirr-core-0.6-uber.jar and /dev/null differ diff --git a/lib/cobertura/asm-3.0.jar b/lib/cobertura/asm-3.0.jar deleted file mode 100644 index 112f5bd4aec..00000000000 Binary files a/lib/cobertura/asm-3.0.jar and /dev/null differ diff --git a/lib/cobertura/asm-tree-3.0.jar b/lib/cobertura/asm-tree-3.0.jar deleted file mode 100644 index 2a4b20856c0..00000000000 Binary files a/lib/cobertura/asm-tree-3.0.jar and /dev/null differ diff --git a/lib/cobertura/cobertura-1.9.4.1.jar b/lib/cobertura/cobertura-1.9.4.1.jar deleted file mode 100644 index 438fe551a04..00000000000 Binary files a/lib/cobertura/cobertura-1.9.4.1.jar and /dev/null differ diff --git a/lib/cobertura/jakarta-oro-2.0.8.jar b/lib/cobertura/jakarta-oro-2.0.8.jar deleted file mode 100644 index 23488d2600f..00000000000 Binary files a/lib/cobertura/jakarta-oro-2.0.8.jar and /dev/null differ diff --git a/lib/cobertura/log4j-1.2.9.jar b/lib/cobertura/log4j-1.2.9.jar deleted file mode 100644 index a6568b01a21..00000000000 Binary files a/lib/cobertura/log4j-1.2.9.jar and /dev/null differ diff --git a/lib/testng-6.3.1.jar b/lib/testng-6.3.1.jar deleted file mode 100644 index 3bfa24f9c29..00000000000 Binary files a/lib/testng-6.3.1.jar and /dev/null differ diff --git a/maven/build.xml b/maven/build.xml deleted file mode 100644 index 319bf90a76f..00000000000 --- a/maven/build.xml +++ /dev/null @@ -1,220 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/maven/maven-ant-tasks-2.1.3.jar b/maven/maven-ant-tasks-2.1.3.jar deleted file mode 100644 index bec446fff5f..00000000000 Binary files a/maven/maven-ant-tasks-2.1.3.jar and /dev/null differ diff --git a/maven/maven-bson.xml b/maven/maven-bson.xml deleted file mode 100644 index f64fe61228e..00000000000 --- a/maven/maven-bson.xml +++ /dev/null @@ -1,35 +0,0 @@ - - - 4.0.0 - org.mongodb - bson - jar - BSON - @VERSION@ - The BSON libs - http://bsonspec.org/ - - - - The Apache Software License, Version 2.0 - http://www.apache.org/licenses/LICENSE-2.0.txt - repo - - - - - https://github.com/mongodb/mongo-java-driver - - - - - - - - Various - 10gen - - - - - diff --git a/maven/maven-mongo-java-driver.xml b/maven/maven-mongo-java-driver.xml deleted file mode 100644 index 64a5b47a887..00000000000 --- a/maven/maven-mongo-java-driver.xml +++ /dev/null @@ -1,34 +0,0 @@ - - - 4.0.0 - org.mongodb - mongo-java-driver - jar - MongoDB Java Driver - @VERSION@ - The MongoDB Java driver - http://www.mongodb.org - - - - The Apache Software License, Version 2.0 - http://www.apache.org/licenses/LICENSE-2.0.txt - repo - - - - - https://github.com/mongodb/mongo-java-driver - - - - - - - - Various - 10gen - - - - diff --git a/mongodb-crypt/build.gradle.kts b/mongodb-crypt/build.gradle.kts new file mode 100644 index 00000000000..812753151d5 --- /dev/null +++ b/mongodb-crypt/build.gradle.kts @@ -0,0 +1,145 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +import ProjectExtensions.configureJarManifest +import ProjectExtensions.configureMavenPublication +import de.undercouch.gradle.tasks.download.Download + +plugins { + id("project.java") + alias(libs.plugins.download) +} + +dependencies { + api(project(path = ":bson", configuration = "default")) + api(libs.jna) +} + +configureMavenPublication { + pom { + name.set("MongoCrypt") + description.set("MongoDB client-side crypto support") + } +} + +configureJarManifest { + attributes["Automatic-Module-Name"] = "com.mongodb.crypt.capi" + attributes["Bundle-Name"] = "MongoCrypt" + attributes["Bundle-SymbolicName"] = "com.mongodb.crypt.capi" + attributes["Import-Package"] = "org.slf4j.*;resolution:=optional,org.bson.*" + attributes["-exportcontents"] = "com.mongodb.*;-noimport:=true" + attributes["Private-Package"] = "" +} + +/* + * Jna copy or download resources + */ +val jnaDownloadsDir = rootProject.file("build/jnaLibs/downloads/").path +val jnaResourcesDir = rootProject.file("build/jnaLibs/resources/").path +val jnaLibPlatform: String = + if (com.sun.jna.Platform.RESOURCE_PREFIX.startsWith("darwin")) "darwin" else com.sun.jna.Platform.RESOURCE_PREFIX +val jnaLibsPath: String = System.getProperty("jnaLibsPath", "${jnaResourcesDir}${jnaLibPlatform}") +val jnaResources: String = System.getProperty("jna.library.path", jnaLibsPath) + +// Download jnaLibs that match the git tag or revision to jnaResourcesBuildDir +val downloadRevision = "1.15.1" +val binariesArchiveName = "libmongocrypt-java.tar.gz" + +/** + * The name of the archive includes downloadRevision to ensure that: + * - the archive is downloaded if the revision changes. + * - the archive is not downloaded if the revision is the same and archive had already been saved in build output. + */ +val localBinariesArchiveName = "libmongocrypt-java-$downloadRevision.tar.gz" + +val downloadUrl: String = + "https://mciuploads.s3.amazonaws.com/libmongocrypt/java/$downloadRevision/$binariesArchiveName" + +val jnaMapping: Map = + mapOf( + "rhel-62-64-bit" to "linux-x86-64", + "rhel72-zseries-test" to "linux-s390x", + "rhel-71-ppc64el" to "linux-ppc64le", + "ubuntu1604-arm64" to "linux-aarch64", + "windows-test" to "win32-x86-64", + "macos" to "darwin") + +sourceSets { main { java { resources { srcDirs(jnaResourcesDir) } } } } + +tasks.register("downloadJava") { + src(downloadUrl) + dest("${jnaDownloadsDir}/$localBinariesArchiveName") + overwrite(true) + /* To make sure we don't download archive with binaries if it hasn't been changed in S3 bucket since last download.*/ + onlyIfModified(true) +} + +tasks.register("unzipJava") { + /* + Clean up the directory first if the task is not UP-TO-DATE. + This can happen if the download revision has been changed and the archive is downloaded again. + */ + doFirst { + println("Cleaning up $jnaResourcesDir") + delete(jnaResourcesDir) + } + from(tarTree(resources.gzip("${jnaDownloadsDir}/$localBinariesArchiveName"))) + include( + jnaMapping.keys.flatMap { + listOf( + "${it}/nocrypto/**/libmongocrypt.so", "${it}/lib/**/libmongocrypt.dylib", "${it}/bin/**/mongocrypt.dll") + }) + eachFile { path = "${jnaMapping[path.substringBefore("/")]}/${name}" } + into(jnaResourcesDir) + dependsOn("downloadJava") + + doLast { println("jna.library.path contents: \n ${fileTree(jnaResourcesDir).files.joinToString(",\n ")}") } +} + +// The `processResources` task (defined by the `java-library` plug-in) consumes files in the main +// source set. +// Add a dependency on `unzipJava`. `unzipJava` adds libmongocrypt libraries to the main source set. +tasks.processResources { mustRunAfter(tasks.named("unzipJava")) } + +tasks.register("downloadJnaLibs") { dependsOn("downloadJava", "unzipJava") } + +tasks.test { + systemProperty("jna.debug_load", "true") + systemProperty("jna.library.path", jnaResources) + useJUnitPlatform() + testLogging { events("passed", "skipped", "failed") } + + doFirst { + println("jna.library.path contents:") + println(fileTree(jnaResources) { this.setIncludes(listOf("*.*")) }.files.joinToString(",\n ", " ")) + } + dependsOn("downloadJnaLibs", "downloadJava", "unzipJava") +} + +tasks.withType { + description = + """$description + | System properties: + | ================= + | + | jnaLibsPath : Custom local JNA library path for inclusion into the build (rather than downloading from s3) + | gitRevision : Optional Git Revision to download the built resources for from s3. + """.trimMargin() +} + +tasks.withType { + // NOTE this enables depending on the mongocrypt from driver-core + dependsOn("downloadJnaLibs") +} diff --git a/mongodb-crypt/src/main/com/mongodb/crypt/capi/MongoCryptException.java b/mongodb-crypt/src/main/com/mongodb/crypt/capi/MongoCryptException.java new file mode 100644 index 00000000000..c3110297ae4 --- /dev/null +++ b/mongodb-crypt/src/main/com/mongodb/crypt/capi/MongoCryptException.java @@ -0,0 +1,64 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.mongodb.crypt.capi; + + +/** + * Exception thrown for errors originating in the mongodb-crypt module. + * + * @serial exclude + */ +public class MongoCryptException extends RuntimeException { + private static final long serialVersionUID = -5524416583514807953L; + private final int code; + + /** + * Construct an instance + * + * @param message the message + * @param code the code + */ + public MongoCryptException(final String message, final int code) { + super(message); + this.code = code; + } + + /** + * @param msg the message + */ + public MongoCryptException(final String msg) { + super(msg); + this.code = -1; + } + + /** + * @param msg the message + * @param cause the cause + */ + public MongoCryptException(final String msg, final Throwable cause) { + super(msg, cause); + this.code = -1; + } + + /** + * @return the error code for the exception. + */ + public int getCode() { + return code; + } +} diff --git a/mongodb-crypt/src/main/com/mongodb/crypt/capi/package-info.java b/mongodb-crypt/src/main/com/mongodb/crypt/capi/package-info.java new file mode 100644 index 00000000000..c1c9060de33 --- /dev/null +++ b/mongodb-crypt/src/main/com/mongodb/crypt/capi/package-info.java @@ -0,0 +1,21 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +/** + * The mongocrypt API package + */ +package com.mongodb.crypt.capi; diff --git a/mongodb-crypt/src/main/com/mongodb/internal/crypt/capi/BinaryHolder.java b/mongodb-crypt/src/main/com/mongodb/internal/crypt/capi/BinaryHolder.java new file mode 100644 index 00000000000..14c7c7b29b6 --- /dev/null +++ b/mongodb-crypt/src/main/com/mongodb/internal/crypt/capi/BinaryHolder.java @@ -0,0 +1,45 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.mongodb.internal.crypt.capi; + +import com.mongodb.internal.crypt.capi.CAPI.mongocrypt_binary_t; + +import static com.mongodb.internal.crypt.capi.CAPI.mongocrypt_binary_destroy; + +// Wrap JNA memory and a mongocrypt_binary_t that references that memory, in order to ensure that the JNA Memory is not GC'd before the +// mongocrypt_binary_t is destroyed +class BinaryHolder implements AutoCloseable { + + private final DisposableMemory memory; + private final mongocrypt_binary_t binary; + + BinaryHolder(final DisposableMemory memory, final mongocrypt_binary_t binary) { + this.memory = memory; + this.binary = binary; + } + + mongocrypt_binary_t getBinary() { + return binary; + } + + @Override + public void close() { + mongocrypt_binary_destroy(binary); + memory.dispose(); + } +} diff --git a/mongodb-crypt/src/main/com/mongodb/internal/crypt/capi/CAPI.java b/mongodb-crypt/src/main/com/mongodb/internal/crypt/capi/CAPI.java new file mode 100644 index 00000000000..41cc8ced31b --- /dev/null +++ b/mongodb-crypt/src/main/com/mongodb/internal/crypt/capi/CAPI.java @@ -0,0 +1,1214 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.mongodb.internal.crypt.capi; + +import com.sun.jna.Callback; +import com.sun.jna.Memory; +import com.sun.jna.Native; +import com.sun.jna.Pointer; +import com.sun.jna.PointerType; +import com.sun.jna.ptr.PointerByReference; + +//CHECKSTYLE:OFF + +/** + * For internal use only. Not part of the public API. + */ +@SuppressWarnings("WeakerAccess") +public class CAPI { + + public static class cstring extends PointerType { + public cstring() { + super(); + } + + public cstring(String string) { + Pointer m = new Memory(string.length() + 1); + m.setString(0, string); + setPointer(m); + } + + public String toString() { + return getPointer().getString(0); + } + } + + + /** + * Indicates success or contains error information. + *

+ * Functions like @ref mongocrypt_ctx_encrypt_init follow a pattern to expose a + * status. A boolean is returned. True indicates success, and false indicates + * failure. On failure a status on the handle is set, and is accessible with a + * corresponding status function. E.g. @ref mongocrypt_ctx_status. + */ + public static class mongocrypt_status_t extends PointerType { + } + + /** + * Contains all options passed on initialization of a @ref mongocrypt_ctx_t. + */ + public static class mongocrypt_opts_t extends PointerType { + } + + /** + * A non-owning view of a byte buffer. + *

+ * Functions returning a mongocrypt_binary_t* expect it to be destroyed with + * mongocrypt_binary_destroy. + */ + public static class mongocrypt_binary_t extends PointerType { + // The `mongocrypt_binary_t` struct layout is part of libmongocrypt's ABI: + // typedef struct _mongocrypt_binary_t { + // void *data; + // uint32_t len; + // } mongocrypt_binary_t; + // To improve performance, fields are read directly using `getPointer` and `getInt`. + // This results in observed performance improvements over using of `mongocrypt_binary_data` and `mongocrypt_binary_len`. Refer: MONGOCRYPT-589. + public mongocrypt_binary_t() { + super(); + } + public Pointer data() { + return this.getPointer().getPointer(0); + } + public int len() { + int len = this.getPointer().getInt(Native.POINTER_SIZE); + // mongocrypt_binary_t represents length as an unsigned `uint32_t`. + // Representing `uint32_t` values greater than INT32_MAX is represented as a negative `int`. + // Throw an exception. mongocrypt_binary_t is not expected to use lengths greater than INT32_MAX. + if (len < 0) { + throw new AssertionError( + String.format("Expected mongocrypt_binary_t length to be non-negative, got: %d", len)); + } + return len; + + } + } + + /** + * The top-level handle to libmongocrypt. + *

+ * Create a mongocrypt_t handle to perform operations within libmongocrypt: + * encryption, decryption, registering log callbacks, etc. + *

+ * Functions on a mongocrypt_t are thread safe, though functions on derived + * handle (e.g. mongocrypt_encryptor_t) are not and must be owned by a single + * thread. See each handle's documentation for thread-safety considerations. + *

+ * Multiple mongocrypt_t handles may be created. + */ + public static class mongocrypt_t extends PointerType { + } + + /** + * Manages the state machine for encryption or decryption. + */ + public static class mongocrypt_ctx_t extends PointerType { + } + + /** + * Manages a single KMS HTTP request/response. + */ + public static class mongocrypt_kms_ctx_t extends PointerType { + } + + /** + * Returns the version string x.y.z for libmongocrypt. + * + * @param len an optional length of the returned string. May be NULL. + * @return the version string x.y.z for libmongocrypt. + */ + public static native cstring + mongocrypt_version(Pointer len); + + + /** + * Create a new non-owning view of a buffer (data + length). + *

+ * Use this to create a mongocrypt_binary_t used for output parameters. + * + * @return A new mongocrypt_binary_t. + */ + public static native mongocrypt_binary_t + mongocrypt_binary_new(); + + + /** + * Create a new non-owning view of a buffer (data + length). + * + * @param data A pointer to an array of bytes. This is not copied. data must outlive the binary object. + * @param len The length of the @p data byte array. + * @return A new mongocrypt_binary_t. + */ + public static native mongocrypt_binary_t + mongocrypt_binary_new_from_data(Pointer data, int len); + + + /** + * Get a pointer to the referenced data. + * + * @param binary The @ref mongocrypt_binary_t. + * @return A pointer to the referenced data. + */ + public static native Pointer + mongocrypt_binary_data(mongocrypt_binary_t binary); + + + /** + * Get the length of the referenced data. + * + * @param binary The @ref mongocrypt_binary_t. + * @return The length of the referenced data. + */ + public static native int + mongocrypt_binary_len(mongocrypt_binary_t binary); + + + /** + * Free the @ref mongocrypt_binary_t. + *

+ * This does not free the referenced data. Refer to individual function + * documentation to determine the lifetime guarantees of the underlying + * data. + * + * @param binary The mongocrypt_binary_t destroy. + */ + public static native void + mongocrypt_binary_destroy(mongocrypt_binary_t binary); + + + public static final int MONGOCRYPT_STATUS_OK = 0; + public static final int MONGOCRYPT_STATUS_ERROR_CLIENT = 1; + public static final int MONGOCRYPT_STATUS_ERROR_KMS = 2; + + /** + * Create a new status object. + *

+ * Use a new status object to retrieve the status from a handle by passing + * this as an out-parameter to functions like @ref mongocrypt_ctx_status. + * When done, destroy it with @ref mongocrypt_status_destroy. + * + * @return A new status object. + */ + public static native mongocrypt_status_t + mongocrypt_status_new(); + + /** + * Set a status object with message, type, and code. + *

+ * Use this to set the mongocrypt_status_t given in the crypto hooks. + * + * @param status The status. + * @param type The status type. + * @param code The status code. + * @param message The message. + * @param message_len The length of @p message. Pass -1 to determine the * string length with strlen (must * be NULL terminated). + */ + public static native void + mongocrypt_status_set(mongocrypt_status_t status, + int type, + int code, + cstring message, + int message_len); + + /** + * Indicates success or the type of error. + * + * @param status The status object. + * @return A @ref mongocrypt_status_type_t. + */ + + public static native int + mongocrypt_status_type(mongocrypt_status_t status); + + + /** + * Get an error code or 0. + * + * @param status The status object. + * @return An error code. + */ + public static native int + mongocrypt_status_code(mongocrypt_status_t status); + + + /** + * Get the error message associated with a status, or an empty string. + * + * @param status The status object. + * @param len an optional length of the returned string. May be NULL. + * @return An error message or an empty string. + */ + public static native cstring + mongocrypt_status_message(mongocrypt_status_t status, Pointer len); + + + /** + * Returns true if the status indicates success. + * + * @param status The status to check. + * @return A boolean indicating success. + */ + public static native boolean + mongocrypt_status_ok(mongocrypt_status_t status); + + + /** + * Free the memory for a status object. + * + * @param status The status to destroy. + */ + public static native void + mongocrypt_status_destroy(mongocrypt_status_t status); + + + public static final int MONGOCRYPT_LOG_LEVEL_FATAL = 0; + public static final int MONGOCRYPT_LOG_LEVEL_ERROR = 1; + public static final int MONGOCRYPT_LOG_LEVEL_WARNING = 2; + public static final int MONGOCRYPT_LOG_LEVEL_INFO = 3; + public static final int MONGOCRYPT_LOG_LEVEL_TRACE = 4; + + + /** + * A log callback function. Set a custom log callback with mongocrypt_setopt_log_handler. + */ + public interface mongocrypt_log_fn_t extends Callback { + void log(int level, cstring message, int message_len, Pointer ctx); + } + + public interface mongocrypt_crypto_fn extends Callback { + boolean crypt(Pointer ctx, mongocrypt_binary_t key, mongocrypt_binary_t iv, mongocrypt_binary_t in, + mongocrypt_binary_t out, Pointer bytesWritten, mongocrypt_status_t status); + } + + public interface mongocrypt_hmac_fn extends Callback { + boolean hmac(Pointer ctx, mongocrypt_binary_t key, mongocrypt_binary_t in, mongocrypt_binary_t out, + mongocrypt_status_t status); + } + + public interface mongocrypt_hash_fn extends Callback { + boolean hash(Pointer ctx, mongocrypt_binary_t in, mongocrypt_binary_t out, mongocrypt_status_t status); + } + + public interface mongocrypt_random_fn extends Callback { + boolean random(Pointer ctx, mongocrypt_binary_t out, int count, mongocrypt_status_t status); + } + + /** + * Allocate a new @ref mongocrypt_t object. + *

+ * Initialize with @ref mongocrypt_init. When done, free with @ref + * mongocrypt_destroy. + * + * @return A new @ref mongocrypt_t object. + */ + public static native mongocrypt_t + mongocrypt_new(); + + /** + * Set a handler to get called on every log message. + * + * @param crypt The @ref mongocrypt_t object. + * @param log_fn The log callback. + * @param log_ctx A context passed as an argument to the log callback every + * invokation. + * @return A boolean indicating success. + */ + public static native boolean + mongocrypt_setopt_log_handler(mongocrypt_t crypt, + mongocrypt_log_fn_t log_fn, + Pointer log_ctx); + + + public static native boolean + mongocrypt_setopt_crypto_hooks(mongocrypt_t crypt, + mongocrypt_crypto_fn aes_256_cbc_encrypt, + mongocrypt_crypto_fn aes_256_cbc_decrypt, + mongocrypt_random_fn random, + mongocrypt_hmac_fn hmac_sha_512, + mongocrypt_hmac_fn hmac_sha_256, + mongocrypt_hash_fn sha_256, + Pointer ctx); + + /** + * Set a crypto hook for the AES256-CTR operations. + * + * @param crypt The @ref mongocrypt_t object. + * @param aes_256_ctr_encrypt The crypto callback function for encrypt + * operation. + * @param aes_256_ctr_decrypt The crypto callback function for decrypt + * operation. + * @param ctx A context passed as an argument to the crypto callback + * every invocation. + * @return A boolean indicating success. If false, an error status is set. + * Retrieve it with @ref mongocrypt_status + * + */ + public static native boolean + mongocrypt_setopt_aes_256_ctr (mongocrypt_t crypt, + mongocrypt_crypto_fn aes_256_ctr_encrypt, + mongocrypt_crypto_fn aes_256_ctr_decrypt, + Pointer ctx); + + /** + * Set a crypto hook for the RSASSA-PKCS1-v1_5 algorithm with a SHA-256 hash. + * + *

See: https://tools.ietf.org/html/rfc3447#section-8.2

+ * + *

Note: this function has the wrong name. It should be: + * mongocrypt_setopt_crypto_hook_sign_rsassa_pkcs1_v1_5

+ * + * @param crypt The @ref mongocrypt_t object. + * @param sign_rsaes_pkcs1_v1_5 The crypto callback function. + * @param sign_ctx A context passed as an argument to the crypto callback + * every invocation. + * @return A boolean indicating success. If false, an error status is set. + * Retrieve it with @ref mongocrypt_status + */ + public static native boolean + mongocrypt_setopt_crypto_hook_sign_rsaes_pkcs1_v1_5( + mongocrypt_t crypt, + mongocrypt_hmac_fn sign_rsaes_pkcs1_v1_5, + Pointer sign_ctx); + + /** + * Set a handler to get called on every log message. + * + * @param crypt The @ref mongocrypt_t object. + * @param aws_access_key_id The AWS access key ID used to generate KMS + * messages. + * @param aws_access_key_id_len The string length (in bytes) of @p + * * aws_access_key_id. Pass -1 to determine the string length with strlen (must + * * be NULL terminated). + * @param aws_secret_access_key The AWS secret access key used to generate + * KMS messages. + * @param aws_secret_access_key_len The string length (in bytes) of @p + * aws_secret_access_key. Pass -1 to determine the string length with strlen + * (must be NULL terminated). + * @return A boolean indicating success. + */ + public static native boolean + mongocrypt_setopt_kms_provider_aws(mongocrypt_t crypt, + cstring aws_access_key_id, + int aws_access_key_id_len, + cstring aws_secret_access_key, + int aws_secret_access_key_len); + + /** + * Configure a local KMS provider on the @ref mongocrypt_t object. + * + * @param crypt The @ref mongocrypt_t object. + * @param key A 64 byte master key used to encrypt and decrypt key vault keys. + * @return A boolean indicating success. + */ + public static native boolean + mongocrypt_setopt_kms_provider_local(mongocrypt_t crypt, + mongocrypt_binary_t key); + + /** + * Configure KMS providers with a BSON document. + * + * @param crypt The @ref mongocrypt_t object. + * @param kms_providers A BSON document mapping the KMS provider names to credentials. + * @return A boolean indicating success. If false, an error status is set. + * @since 1.1 + */ + public static native boolean + mongocrypt_setopt_kms_providers(mongocrypt_t crypt, + mongocrypt_binary_t kms_providers); + + /** + * Set a local schema map for encryption. + * + * @param crypt The @ref mongocrypt_t object. + * @param schema_map A BSON document representing the schema map supplied by + * the user. The keys are collection namespaces and values are JSON schemas. + * @return A boolean indicating success. If false, an error status is set. + * Retrieve it with @ref mongocrypt_status + */ + public static native boolean + mongocrypt_setopt_schema_map (mongocrypt_t crypt, mongocrypt_binary_t schema_map); + + /** + * Opt-into setting KMS providers before each KMS request. + * + * If set, before entering the MONGOCRYPT_CTX_NEED_KMS state, + * contexts will enter the MONGOCRYPT_CTX_NEED_KMS_CREDENTIALS state + * and then wait for credentials to be supplied through @ref mongocrypt_ctx_provide_kms_providers. + * + * @param crypt The @ref mongocrypt_t object to update + */ + public static native void + mongocrypt_setopt_use_need_kms_credentials_state (mongocrypt_t crypt); + + + /** + * Set a local EncryptedFieldConfigMap for encryption. + * + * @param crypt The @ref mongocrypt_t object. + * @param encryptedFieldConfigMap A BSON document representing the EncryptedFieldConfigMap + * supplied by the user. The keys are collection namespaces and values are + * EncryptedFieldConfigMap documents. The viewed data copied. It is valid to + * destroy @p efc_map with @ref mongocrypt_binary_destroy immediately after. + * @return A boolean indicating success. If false, an error status is set. + * Retrieve it with @ref mongocrypt_status + */ + public static native boolean + mongocrypt_setopt_encrypted_field_config_map (mongocrypt_t crypt, mongocrypt_binary_t encryptedFieldConfigMap); + + /** + * Opt-into skipping query analysis. + * + *

If opted in: + *

    + *
  • The crypt_shared shared library will not attempt to be loaded.
  • + *
  • A mongocrypt_ctx_t will never enter the MONGOCRYPT_CTX_NEED_MARKINGS state.
  • + *
+ * + * @param crypt The @ref mongocrypt_t object to update + * @since 1.5 + */ + public static native void + mongocrypt_setopt_bypass_query_analysis (mongocrypt_t crypt); + + /** + * Set the expiration time for the data encryption key cache. Defaults to 60 seconds if not set. + * + * @param crypt The @ref mongocrypt_t object to update + * @param cache_expiration_ms if 0 the cache never expires + * @return A boolean indicating success. If false, an error status is set. + * @since 5.4 + */ + public static native boolean + mongocrypt_setopt_key_expiration (mongocrypt_t crypt, long cache_expiration_ms); + + /** + * Opt-into enabling sending multiple collection info documents. + * + * @param crypt The @ref mongocrypt_t object to update + */ + public static native void + mongocrypt_setopt_enable_multiple_collinfo (mongocrypt_t crypt); + + /** + * Set the contention factor used for explicit encryption. + * The contention factor is only used for indexed Queryable Encryption. + * + * @param ctx The @ref mongocrypt_ctx_t object. + * @param contention_factor the contention factor + * @return A boolean indicating success. If false, an error status is set. + * Retrieve it with @ref mongocrypt_ctx_status. + * @since 1.5 + */ + public static native boolean + mongocrypt_ctx_setopt_contention_factor (mongocrypt_ctx_t ctx, long contention_factor); + + /** + * Set the index key id to use for Queryable Encryption explicit encryption. + * + * If the index key id not set, the key id from @ref mongocrypt_ctx_setopt_key_id is used. + * + * @param ctx The @ref mongocrypt_ctx_t object. + * @param key_id The binary corresponding to the _id (a UUID) of the data key to use from + * the key vault collection. Note, the UUID must be encoded with RFC-4122 byte order. + * The viewed data is copied. It is valid to destroy key_id with @ref mongocrypt_binary_destroy immediately after. + * @return A boolean indicating success. If false, an error status is set. + * Retrieve it with @ref mongocrypt_ctx_status + * @since 1.5 + */ + public static native boolean + mongocrypt_ctx_setopt_index_key_id (mongocrypt_ctx_t ctx, mongocrypt_binary_t key_id); + + /** + * Append an additional search directory to the search path for loading + * the crypt_shared dynamic library. + * + * @param crypt The @ref mongocrypt_t object to update + * @param path A null-terminated sequence of bytes for the search path. On + * some filesystems, this may be arbitrary bytes. On other filesystems, this may + * be required to be a valid UTF-8 code unit sequence. If the leading element of + * the path is the literal string "$ORIGIN", that substring will be replaced + * with the directory path containing the executable libmongocrypt module. If + * the path string is literal "$SYSTEM", then libmongocrypt will defer to the + * system's library resolution mechanism to find the crypt_shared library. + * + *

If no crypt_shared dynamic library is found in any of the directories + * specified by the search paths loaded here, @ref mongocrypt_init() will still + * succeed and continue to operate without crypt_shared.

+ * + *

The search paths are searched in the order that they are appended. This + * allows one to provide a precedence in how the library will be discovered. For + * example, appending known directories before appending "$SYSTEM" will allow + * one to supersede the system's installed library, but still fall-back to it if + * the library wasn't found otherwise. If one does not ever append "$SYSTEM", + * then the system's library-search mechanism will never be consulted.

+ * + *

If an absolute path to the library is specified using @ref mongocrypt_setopt_set_crypt_shared_lib_path_override, + * then paths appended here will have no effect.

+ * @since 1.5 + */ + public static native void + mongocrypt_setopt_append_crypt_shared_lib_search_path (mongocrypt_t crypt, cstring path); + + /** + * Set a single override path for loading the crypt_shared dynamic library. + * @param crypt The @ref mongocrypt_t object to update + * @param path A null-terminated sequence of bytes for a path to the crypt_shared + * dynamic library. On some filesystems, this may be arbitrary bytes. On other + * filesystems, this may be required to be a valid UTF-8 code unit sequence. If + * the leading element of the path is the literal string `$ORIGIN`, that + * substring will be replaced with the directory path containing the executable + * libmongocrypt module. + * + *

This function will do no IO nor path validation. All validation will + * occur during the call to @ref mongocrypt_init.

+ *

If a crypt_shared library path override is specified here, then no paths given + * to @ref mongocrypt_setopt_append_crypt_shared_lib_search_path will be consulted when + * opening the crypt_shared library.

+ *

If a path is provided via this API and @ref mongocrypt_init fails to + * initialize a valid crypt_shared library instance for the path specified, then + * the initialization of mongocrypt_t will fail with an error.

+ * @since 1.5 + */ + public static native void + mongocrypt_setopt_set_crypt_shared_lib_path_override(mongocrypt_t crypt, cstring path); + + /** + * Set the query type to use for Queryable Encryption explicit encryption. + * The query type is only used for indexed Queryable Encryption. + * + * @param ctx The @ref mongocrypt_ctx_t object. + * @param query_type the query type + * @param len the length + * @return A boolean indicating success. If false, an error status is set. + * Retrieve it with @ref mongocrypt_ctx_status + */ + public static native boolean + mongocrypt_ctx_setopt_query_type (mongocrypt_ctx_t ctx, cstring query_type, int len); + + /** + * Set options for explicit encryption with the "range" algorithm. + * NOTE: "range" is currently unstable API and subject to backwards breaking changes. + * + * opts is a BSON document of the form: + * { + * "min": Optional<BSON value>, + * "max": Optional<BSON value>, + * "sparsity": Int64, + * "precision": Optional<Int32> + * "trimFactor": Optional<Int32> + * } + * + * @param ctx The @ref mongocrypt_ctx_t object. + * @param opts BSON. + * @return A boolean indicating success. If false, an error status is set. + * @since 1.7 + */ + public static native boolean + mongocrypt_ctx_setopt_algorithm_range (mongocrypt_ctx_t ctx, mongocrypt_binary_t opts); + + + /** + * Set options for explicit encryption with the "textPreview" algorithm. "prefix" and "suffix" can both be set. + * NOTE: "textPreview" is experimental only and may be removed in a future non-major release. + * opts is a BSON document of the form: + * + * { + * "caseSensitive": bool, + * "diacriticSensitive": bool, + * "prefix": Optional{ + * "strMaxQueryLength": Int32, + * "strMinQueryLength": Int32, + * }, + * "suffix": Optional{ + * "strMaxQueryLength": Int32, + * "strMinQueryLength": Int32, + * }, + * "substring": Optional{ + * "strMaxLength": Int32, + * "strMaxQueryLength": Int32, + * "strMinQueryLength": Int32, + * }, + * } + * + * @param ctx The @ref mongocrypt_ctx_t object. + * @param opts BSON. + * @return A boolean indicating success. If false, an error status is set. + * @since 5.6 + */ + public static native boolean mongocrypt_ctx_setopt_algorithm_text(mongocrypt_ctx_t ctx, mongocrypt_binary_t opts); + + /** + * Initialize new @ref mongocrypt_t object. + * + * @param crypt The @ref mongocrypt_t object. + * @return A boolean indicating success. Failure may occur if previously set options are invalid. + */ + public static native boolean + mongocrypt_init(mongocrypt_t crypt); + + /** + * Get the status associated with a @ref mongocrypt_t object. + * + * @param crypt The @ref mongocrypt_t object. + * @param status Receives the status. + * @return A boolean indicating success. + */ + public static native boolean + mongocrypt_status(mongocrypt_t crypt, mongocrypt_status_t status); + + /** + * Returns true if libmongocrypt was built with native crypto support. + * + *

+ * If libmongocrypt was not built with native crypto support, setting crypto hooks is required. + *

+ * + * @return true if libmongocrypt was built with native crypto support + */ + public static native boolean + mongocrypt_is_crypto_available(); + + /** + * Destroy the @ref mongocrypt_t object. + * + * @param crypt The @ref mongocrypt_t object to destroy. + */ + public static native void + mongocrypt_destroy(mongocrypt_t crypt); + + /** + * Obtain a nul-terminated version string of the loaded crypt_shared dynamic library, + * if available. + * + * If no crypt_shared was successfully loaded, this function returns NULL. + * + * @param crypt The mongocrypt_t object after a successful call to mongocrypt_init. + * @param len an optional length of the returned string. May be NULL. + * + * @return A nul-terminated string of the dynamically loaded crypt_shared library. + * @since 1.5 + */ + public static native cstring + mongocrypt_crypt_shared_lib_version_string (mongocrypt_t crypt, Pointer len); + + /** + * Call in response to the MONGOCRYPT_CTX_NEED_KMS_CREDENTIALS state + * to set per-context KMS provider settings. These follow the same format + * as @ref mongocrypt_setopt_kms_providers. If no keys are present in the + * BSON input, the KMS provider settings configured for the @ref mongocrypt_t + * at initialization are used. + * + * @param ctx The @ref mongocrypt_ctx_t object. + * @param kms_providers A BSON document mapping the KMS provider names + * to credentials. + * @return A boolean indicating success. If false, an error status is set. + * Retrieve it with @ref mongocrypt_ctx_status. + */ + public static native boolean + mongocrypt_ctx_provide_kms_providers (mongocrypt_ctx_t ctx, + mongocrypt_binary_t kms_providers); + + /** + * Set the key id to use for explicit encryption. + * + * @param ctx The @ref mongocrypt_ctx_t object. + * @param key_id The key_id to use. + * @return A boolean indicating success. + */ + public static native boolean + mongocrypt_ctx_setopt_key_id (mongocrypt_ctx_t ctx, + mongocrypt_binary_t key_id); + + /** + * Set the keyAltName to use for explicit encryption. + * keyAltName should be a binary encoding a bson document + * with the following format: { "keyAltName" : >BSON UTF8 value< } + * + *

It is an error to set both this and the key id.

+ * + * @param ctx The @ref mongocrypt_ctx_t object. + * @param key_alt_name The name to use. + * @return A boolean indicating success. If false, an error status is set. + * Retrieve it with @ref mongocrypt_ctx_status + */ + public static native boolean + mongocrypt_ctx_setopt_key_alt_name (mongocrypt_ctx_t ctx, + mongocrypt_binary_t key_alt_name); + + /** + * Set the keyMaterial to use for encrypting data. + * + *

+ * Pass the binary encoding of a BSON document like the following: + * { "keyMaterial" : (BSON BINARY value) } + *

+ * + * @param ctx The @ref mongocrypt_ctx_t object. + * @param key_material The data encryption key to use. The viewed data is + * copied. It is valid to destroy @p key_material with @ref + * mongocrypt_binary_destroy immediately after. + * @return A boolean indicating success. If false, an error status is set. + * Retrieve it with @ref mongocrypt_ctx_status + */ + public static native boolean + mongocrypt_ctx_setopt_key_material (mongocrypt_ctx_t ctx, mongocrypt_binary_t key_material); + + /** + * Set the algorithm used for encryption to either + * deterministic or random encryption. This value + * should only be set when using explicit encryption. + * + * If -1 is passed in for "len", then "algorithm" is + * assumed to be a null-terminated string. + * + * Valid values for algorithm are: + * "AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic" + * "AEAD_AES_256_CBC_HMAC_SHA_512-Randomized" + * + * @param ctx The @ref mongocrypt_ctx_t object. + * @param algorithm A string specifying the algorithm to + * use for encryption. + * @param len The length of the algorithm string. + * @return A boolean indicating success. + */ + public static native boolean + mongocrypt_ctx_setopt_algorithm (mongocrypt_ctx_t ctx, + cstring algorithm, + int len); + + + /** + * Create a new uninitialized @ref mongocrypt_ctx_t. + *

+ * Initialize the context with functions like @ref mongocrypt_ctx_encrypt_init. + * When done, destroy it with @ref mongocrypt_ctx_destroy. + * + * @param crypt The @ref mongocrypt_t object. + * @return A new context. + */ + public static native mongocrypt_ctx_t + mongocrypt_ctx_new(mongocrypt_t crypt); + + + /** + * Get the status associated with a @ref mongocrypt_ctx_t object. + * + * @param ctx The @ref mongocrypt_ctx_t object. + * @param status Receives the status. + * @return A boolean indicating success. + */ + + public static native boolean + mongocrypt_ctx_status(mongocrypt_ctx_t ctx, mongocrypt_status_t status); + + + /** + * Identify the AWS KMS master key to use for creating a data key. + * + * @param ctx The @ref mongocrypt_ctx_t object. + * @param region The AWS region. + * @param region_len The string length of @p region. Pass -1 to determine + * the string length with strlen (must be NULL terminated). + * @param cmk The Amazon Resource Name (ARN) of the customer master key + * (CMK). + * @param cmk_len The string length of @p cmk_len. Pass -1 to determine the + * string length with strlen (must be NULL terminated). + * @return A boolean indicating success. + */ + public static native boolean + mongocrypt_ctx_setopt_masterkey_aws (mongocrypt_ctx_t ctx, + cstring region, + int region_len, + cstring cmk, + int cmk_len); + + /** + * Identify a custom AWS endpoint when creating a data key. + * This is used internally to construct the correct HTTP request + * (with the Host header set to this endpoint). This endpoint + * is persisted in the new data key, and will be returned via + * mongocrypt_kms_ctx_endpoint. + * + * @param ctx The @ref mongocrypt_ctx_t object. + * @param endpoint The endpoint. + * @param endpoint_len The string length of @p endpoint. Pass -1 to + * determine the string length with strlen (must be NULL terminated). + * @return A boolean indicating success. If false, an error status is set. + * Retrieve it with @ref mongocrypt_ctx_status + */ + public static native boolean + mongocrypt_ctx_setopt_masterkey_aws_endpoint (mongocrypt_ctx_t ctx, + cstring endpoint, + int endpoint_len); + + + /** + * Set the master key to "local" for creating a data key. + * + * @param ctx The @ref mongocrypt_ctx_t object. + * @return A boolean indicating success. + */ + public static native boolean + mongocrypt_ctx_setopt_masterkey_local (mongocrypt_ctx_t ctx); + + /** + * Set key encryption key document for creating a data key. + * + * @param ctx The @ref mongocrypt_ctx_t object. + * @param keyDocument BSON representing the key encryption key document. + * @return A boolean indicating success. If false, and error status is set. + * @since 1.1 + */ + public static native boolean + mongocrypt_ctx_setopt_key_encryption_key(mongocrypt_ctx_t ctx, + mongocrypt_binary_t keyDocument); + + /** + * Initialize a context to create a data key. + * + * Set options before using @ref mongocrypt_ctx_setopt_masterkey_aws and + * mongocrypt_ctx_setopt_masterkey_local. + * + * @param ctx The @ref mongocrypt_ctx_t object. + * @return A boolean indicating success. + * + * Assumes a master key option has been set, and an associated KMS provider + * has been set on the parent @ref mongocrypt_t. + */ + public static native boolean + mongocrypt_ctx_datakey_init (mongocrypt_ctx_t ctx); + + /** + * Initialize a context for encryption. + * + * Associated options: + * - @ref mongocrypt_ctx_setopt_cache_noblock + * - @ref mongocrypt_ctx_setopt_schema + * + * @param ctx The @ref mongocrypt_ctx_t object. + * @param db The database name. + * @param db_len The byte length of @p db. Pass -1 to determine the string length with strlen (must be NULL terminated). + * @param cmd The BSON command to be encrypted. + * @return A boolean indicating success. If false, an error status is set. + * Retrieve it with @ref mongocrypt_ctx_status + */ + public static native boolean + mongocrypt_ctx_encrypt_init(mongocrypt_ctx_t ctx, + cstring db, + int db_len, + mongocrypt_binary_t cmd); + + /** + * Explicit helper method to encrypt a single BSON object. Contexts + * created for explicit encryption will not go through mongocryptd. + * + * To specify a key_id, algorithm, or iv to use, please use the + * corresponding mongocrypt_setopt methods before calling this. + * + * This method expects the passed-in BSON to be of the form: + * { "v" : BSON value to encrypt } + * + * @param ctx A @ref mongocrypt_ctx_t. + * @param msg A @ref mongocrypt_binary_t the plaintext BSON value. + * @return A boolean indicating success. + */ + public static native boolean + mongocrypt_ctx_explicit_encrypt_init (mongocrypt_ctx_t ctx, + mongocrypt_binary_t msg); + + /** + * Explicit helper method to encrypt a Match Expression or Aggregate Expression. + * Contexts created for explicit encryption will not go through mongocryptd. + * Requires query_type to be "range". + * NOTE: "range" is currently unstable API and subject to backwards breaking changes. + * + * This method expects the passed-in BSON to be of the form: + * { "v" : FLE2RangeFindDriverSpec } + * + * FLE2RangeFindDriverSpec is a BSON document with one of these forms: + * + * 1. A Match Expression of this form: + * {$and: [{<field>: {<op>: <value1>, {<field>: {<op>: <value2> }}]} + * 2. An Aggregate Expression of this form: + * {$and: [{<op>: [<fieldpath>, <value1>]}, {<op>: [<fieldpath>, <value2>]}] + * + * may be $lt, $lte, $gt, or $gte. + * + * The value of "v" is expected to be the BSON value passed to a driver + * ClientEncryption.encryptExpression helper. + * + * Associated options for FLE 1: + * - @ref mongocrypt_ctx_setopt_key_id + * - @ref mongocrypt_ctx_setopt_key_alt_name + * - @ref mongocrypt_ctx_setopt_algorithm + * + * Associated options for Queryable Encryption: + * - @ref mongocrypt_ctx_setopt_key_id + * - @ref mongocrypt_ctx_setopt_index_key_id + * - @ref mongocrypt_ctx_setopt_contention_factor + * - @ref mongocrypt_ctx_setopt_query_type + * - @ref mongocrypt_ctx_setopt_algorithm_range + * + * An error is returned if FLE 1 and Queryable Encryption incompatible options + * are set. + * + * @param ctx A @ref mongocrypt_ctx_t. + * @param msg A @ref mongocrypt_binary_t the plaintext BSON value. + * @return A boolean indicating success. + * @since 1.7 + */ + public static native boolean + mongocrypt_ctx_explicit_encrypt_expression_init (mongocrypt_ctx_t ctx, + mongocrypt_binary_t msg); + + /** + * Initialize a context for decryption. + * + * @param ctx The mongocrypt_ctx_t object. + * @param doc The document to be decrypted. + * @return A boolean indicating success. + */ + public static native boolean + mongocrypt_ctx_decrypt_init(mongocrypt_ctx_t ctx, mongocrypt_binary_t doc); + + + /** + * Explicit helper method to decrypt a single BSON object. + * + * @param ctx A @ref mongocrypt_ctx_t. + * @param msg A @ref mongocrypt_binary_t the encrypted BSON. + * @return A boolean indicating success. + */ + public static native boolean + mongocrypt_ctx_explicit_decrypt_init (mongocrypt_ctx_t ctx, + mongocrypt_binary_t msg); + + /** + * Initialize a context to rewrap datakeys. + * + * Associated options {@link #mongocrypt_ctx_setopt_key_encryption_key(mongocrypt_ctx_t, mongocrypt_binary_t)} + * + * @param ctx The @ref mongocrypt_ctx_t object. + * @param filter The filter to use for the find command on the key vault collection to retrieve datakeys to rewrap. + * @return A boolean indicating success. If false, and error status is set. + * @since 1.5 + */ + public static native boolean + mongocrypt_ctx_rewrap_many_datakey_init (mongocrypt_ctx_t ctx, mongocrypt_binary_t filter); + + + public static final int MONGOCRYPT_CTX_ERROR = 0; + public static final int MONGOCRYPT_CTX_NEED_MONGO_COLLINFO = 1; /* run on main MongoClient */ + public static final int MONGOCRYPT_CTX_NEED_MONGO_MARKINGS = 2; /* run on mongocryptd. */ + public static final int MONGOCRYPT_CTX_NEED_MONGO_KEYS = 3; /* run on key vault */ + public static final int MONGOCRYPT_CTX_NEED_KMS = 4; + public static final int MONGOCRYPT_CTX_READY = 5; /* ready for encryption/decryption */ + public static final int MONGOCRYPT_CTX_DONE = 6; + public static final int MONGOCRYPT_CTX_NEED_KMS_CREDENTIALS = 7; /* fetch/renew KMS credentials */ + + public static final int MONGOCRYPT_INDEX_TYPE_NONE = 1; + public static final int MONGOCRYPT_INDEX_TYPE_EQUALITY = 2; + public static final int MONGOCRYPT_QUERY_TYPE_EQUALITY = 1; + + /** + * Get the current state of a context. + * + * @param ctx The @ref mongocrypt_ctx_t object. + * @return A @ref mongocrypt_ctx_state_t. + */ + public static native int + mongocrypt_ctx_state(mongocrypt_ctx_t ctx); + + + /** + * Get BSON necessary to run the mongo operation when mongocrypt_ctx_t + * is in MONGOCRYPT_CTX_NEED_MONGO_* states. + * + *

+ * op_bson is a BSON document to be used for the operation. + * - For MONGOCRYPT_CTX_NEED_MONGO_COLLINFO it is a listCollections filter. + * - For MONGOCRYPT_CTX_NEED_MONGO_KEYS it is a find filter. + * - For MONGOCRYPT_CTX_NEED_MONGO_MARKINGS it is a JSON schema to append. + *

+ * + * @param ctx The @ref mongocrypt_ctx_t object. + * @param op_bson A BSON document for the MongoDB operation. + * @return A boolean indicating success. + */ + public static native boolean + mongocrypt_ctx_mongo_op(mongocrypt_ctx_t ctx, mongocrypt_binary_t op_bson); + + + /** + * Feed a BSON reply or result when when mongocrypt_ctx_t is in + * MONGOCRYPT_CTX_NEED_MONGO_* states. This may be called multiple times + * depending on the operation. + *

+ * op_bson is a BSON document to be used for the operation. + * - For MONGOCRYPT_CTX_NEED_MONGO_COLLINFO it is a doc from a listCollections + * cursor. + * - For MONGOCRYPT_CTX_NEED_MONGO_KEYS it is a doc from a find cursor. + * - For MONGOCRYPT_CTX_NEED_MONGO_MARKINGS it is a reply from mongocryptd. + * + * @param ctx The @ref mongocrypt_ctx_t object. + * @param reply A BSON document for the MongoDB operation. + * @return A boolean indicating success. + */ + public static native boolean + mongocrypt_ctx_mongo_feed(mongocrypt_ctx_t ctx, mongocrypt_binary_t reply); + + + /** + * Call when done feeding the reply (or replies) back to the context. + * + * @param ctx The @ref mongocrypt_ctx_t object. + * @return A boolean indicating success. + */ + + public static native boolean + mongocrypt_ctx_mongo_done(mongocrypt_ctx_t ctx); + + /** + * Get the next KMS handle. + *

+ * Multiple KMS handles may be retrieved at once. Drivers may do this to fan + * out multiple concurrent KMS HTTP requests. Feeding multiple KMS requests + * is thread-safe. + *

+ * Is KMS handles are being handled synchronously, the driver can reuse the same + * TLS socket to send HTTP requests and receive responses. + * + * @param ctx A @ref mongocrypt_ctx_t. + * @return a new @ref mongocrypt_kms_ctx_t or NULL. + */ + public static native mongocrypt_kms_ctx_t + mongocrypt_ctx_next_kms_ctx(mongocrypt_ctx_t ctx); + + /** + * Get the KMS provider identifier associated with this KMS request. + * + * This is used to conditionally configure TLS connections based on the KMS + * request. It is useful for KMIP, which authenticates with a client + * certificate. + * + * @param kms The mongocrypt_kms_ctx_t object. + * @param len Receives the length of the returned string. + * + * @return The name of the KMS provider + */ + public static native cstring + mongocrypt_kms_ctx_get_kms_provider(mongocrypt_kms_ctx_t kms, + Pointer len); + + /** + * Get the HTTP request message for a KMS handle. + * + * @param kms A @ref mongocrypt_kms_ctx_t. + * @param msg The HTTP request to send to KMS. + * @return A boolean indicating success. + */ + public static native boolean + mongocrypt_kms_ctx_message(mongocrypt_kms_ctx_t kms, + mongocrypt_binary_t msg); + + /** + * Get the hostname from which to connect over TLS. + *

+ * The storage for @p endpoint is not owned by the caller, but + * is valid until calling @ref mongocrypt_ctx_kms_done on the + * parent @ref mongocrypt_ctx_t. + * + * @param kms A @ref mongocrypt_kms_ctx_t. + * @param endpoint The output hostname. + * @return A boolean indicating success. + */ + public static native boolean + mongocrypt_kms_ctx_endpoint(mongocrypt_kms_ctx_t kms, PointerByReference endpoint); + + /** + * Indicates how many bytes to feed into @ref mongocrypt_kms_ctx_feed. + * + * @param kms The @ref mongocrypt_kms_ctx_t. + * @return The number of requested bytes. + */ + public static native int + mongocrypt_kms_ctx_bytes_needed(mongocrypt_kms_ctx_t kms); + + + /** + * Feed bytes from the HTTP response. + *

+ * Feeding more bytes than what has been returned in @ref + * mongocrypt_kms_ctx_bytes_needed is an error. + * + * @param kms The @ref mongocrypt_kms_ctx_t. + * @param bytes The bytes to feed. + * @return A boolean indicating success. + */ + public static native boolean + mongocrypt_kms_ctx_feed(mongocrypt_kms_ctx_t kms, mongocrypt_binary_t bytes); + + + /** + * Get the status associated with a @ref mongocrypt_kms_ctx_t object. + * + * @param kms The @ref mongocrypt_kms_ctx_t object. + * @param status Receives the status. + * @return A boolean indicating success. + */ + public static native boolean + mongocrypt_kms_ctx_status(mongocrypt_kms_ctx_t kms, + mongocrypt_status_t status); + + + /** + * Call when done handling all KMS contexts. + * + * @param ctx The @ref mongocrypt_ctx_t object. + * @return A boolean indicating success. + */ + public static native boolean + mongocrypt_ctx_kms_done(mongocrypt_ctx_t ctx); + + + /** + * Perform the final encryption or decryption. + * + * @param ctx A @ref mongocrypt_ctx_t. + * @param out The final BSON to send to the server. + * @return a boolean indicating success. + */ + public static native boolean + mongocrypt_ctx_finalize(mongocrypt_ctx_t ctx, mongocrypt_binary_t out); + + + /** + * Destroy and free all memory associated with a @ref mongocrypt_ctx_t. + * + * @param ctx A @ref mongocrypt_ctx_t. + */ + public static native void + mongocrypt_ctx_destroy(mongocrypt_ctx_t ctx); + + static final String NATIVE_LIBRARY_NAME = "mongocrypt"; + + static { + Native.register(CAPI.class, NATIVE_LIBRARY_NAME); + } +} diff --git a/mongodb-crypt/src/main/com/mongodb/internal/crypt/capi/CAPIHelper.java b/mongodb-crypt/src/main/com/mongodb/internal/crypt/capi/CAPIHelper.java new file mode 100644 index 00000000000..dd03c4792df --- /dev/null +++ b/mongodb-crypt/src/main/com/mongodb/internal/crypt/capi/CAPIHelper.java @@ -0,0 +1,92 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.mongodb.internal.crypt.capi; + +import com.mongodb.internal.crypt.capi.CAPI.mongocrypt_binary_t; +import com.sun.jna.Pointer; +import org.bson.BsonBinaryWriter; +import org.bson.BsonDocument; +import org.bson.RawBsonDocument; +import org.bson.codecs.BsonValueCodecProvider; +import org.bson.codecs.Codec; +import org.bson.codecs.EncoderContext; +import org.bson.codecs.configuration.CodecRegistries; +import org.bson.codecs.configuration.CodecRegistry; +import org.bson.io.BasicOutputBuffer; + +import java.nio.ByteBuffer; + +import static com.mongodb.internal.crypt.capi.CAPI.mongocrypt_binary_new_from_data; +import static java.lang.String.format; + +final class CAPIHelper { + + private static final CodecRegistry CODEC_REGISTRY = CodecRegistries.fromProviders(new BsonValueCodecProvider()); + + @SuppressWarnings("unchecked") + static BinaryHolder toBinary(final BsonDocument document) { + BasicOutputBuffer buffer = new BasicOutputBuffer(); + BsonBinaryWriter writer = new BsonBinaryWriter(buffer); + ((Codec) CODEC_REGISTRY.get(document.getClass())).encode(writer, document, EncoderContext.builder().build()); + + DisposableMemory memory = new DisposableMemory(buffer.size()); + memory.write(0, buffer.getInternalBuffer(), 0, buffer.size()); + + return new BinaryHolder(memory, mongocrypt_binary_new_from_data(memory, buffer.getSize())); + } + + static RawBsonDocument toDocument(final mongocrypt_binary_t binary) { + byte[] bytes = toByteArray(binary); + return new RawBsonDocument(bytes); + } + + static BinaryHolder toBinary(final ByteBuffer buffer) { + byte[] message = new byte[buffer.remaining()]; + buffer.get(message, 0, buffer.remaining()); + + DisposableMemory memory = new DisposableMemory(message.length); + memory.write(0, message, 0, message.length); + + return new BinaryHolder(memory, mongocrypt_binary_new_from_data(memory, message.length)); + } + + static ByteBuffer toByteBuffer(final mongocrypt_binary_t binary) { + Pointer pointer = binary.data(); + int length = binary.len(); + return pointer.getByteBuffer(0, length); + } + + static byte[] toByteArray(final mongocrypt_binary_t binary) { + ByteBuffer byteBuffer = toByteBuffer(binary); + byte[] byteArray = new byte[byteBuffer.remaining()]; + byteBuffer.get(byteArray); + return byteArray; + } + + static void writeByteArrayToBinary(final mongocrypt_binary_t binary, final byte[] bytes) { + if (binary.len() < bytes.length) { + throw new IllegalArgumentException(format("mongocrypt binary of length %d is not large enough to hold %d bytes", + binary.len(), bytes.length)); + } + Pointer outPointer = binary.data(); + outPointer.write(0, bytes, 0, bytes.length); + } + + private CAPIHelper() { + } +} diff --git a/mongodb-crypt/src/main/com/mongodb/internal/crypt/capi/CipherCallback.java b/mongodb-crypt/src/main/com/mongodb/internal/crypt/capi/CipherCallback.java new file mode 100644 index 00000000000..2e4888d9857 --- /dev/null +++ b/mongodb-crypt/src/main/com/mongodb/internal/crypt/capi/CipherCallback.java @@ -0,0 +1,92 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.mongodb.internal.crypt.capi; + +import com.mongodb.internal.crypt.capi.CAPI.cstring; +import com.mongodb.internal.crypt.capi.CAPI.mongocrypt_binary_t; +import com.mongodb.internal.crypt.capi.CAPI.mongocrypt_crypto_fn; +import com.mongodb.internal.crypt.capi.CAPI.mongocrypt_status_t; +import com.sun.jna.Pointer; + +import javax.crypto.Cipher; +import javax.crypto.NoSuchPaddingException; +import javax.crypto.spec.IvParameterSpec; +import javax.crypto.spec.SecretKeySpec; +import java.security.NoSuchAlgorithmException; +import java.util.concurrent.ConcurrentLinkedDeque; + +import static com.mongodb.internal.crypt.capi.CAPI.MONGOCRYPT_STATUS_ERROR_CLIENT; +import static com.mongodb.internal.crypt.capi.CAPI.mongocrypt_status_set; +import static com.mongodb.internal.crypt.capi.CAPIHelper.toByteArray; +import static com.mongodb.internal.crypt.capi.CAPIHelper.writeByteArrayToBinary; + +class CipherCallback implements mongocrypt_crypto_fn { + private final String algorithm; + private final String transformation; + private final int mode; + private final CipherPool cipherPool; + + CipherCallback(final String algorithm, final String transformation, final int mode) { + this.algorithm = algorithm; + this.transformation = transformation; + this.mode = mode; + this.cipherPool = new CipherPool(); + } + + @Override + public boolean crypt(final Pointer ctx, final mongocrypt_binary_t key, final mongocrypt_binary_t iv, + final mongocrypt_binary_t in, final mongocrypt_binary_t out, + final Pointer bytesWritten, final mongocrypt_status_t status) { + Cipher cipher = null; + try { + IvParameterSpec ivParameterSpec = new IvParameterSpec(toByteArray(iv)); + SecretKeySpec secretKeySpec = new SecretKeySpec(toByteArray(key), algorithm); + cipher = cipherPool.get(); + cipher.init(mode, secretKeySpec, ivParameterSpec); + + byte[] result = cipher.doFinal(toByteArray(in)); + writeByteArrayToBinary(out, result); + bytesWritten.setInt(0, result.length); + + return true; + } catch (Exception e) { + mongocrypt_status_set(status, MONGOCRYPT_STATUS_ERROR_CLIENT, 0, new cstring(e.toString()), -1); + return false; + } finally { + if (cipher != null) { + cipherPool.release(cipher); + } + } + } + + private class CipherPool { + private final ConcurrentLinkedDeque available = new ConcurrentLinkedDeque<>(); + + Cipher get() throws NoSuchAlgorithmException, NoSuchPaddingException { + Cipher cipher = available.pollLast(); + if (cipher != null) { + return cipher; + } + return Cipher.getInstance(transformation); + } + + void release(final Cipher cipher) { + available.addLast(cipher); + } + } +} diff --git a/mongodb-crypt/src/main/com/mongodb/internal/crypt/capi/DisposableMemory.java b/mongodb-crypt/src/main/com/mongodb/internal/crypt/capi/DisposableMemory.java new file mode 100644 index 00000000000..924b1cc90b1 --- /dev/null +++ b/mongodb-crypt/src/main/com/mongodb/internal/crypt/capi/DisposableMemory.java @@ -0,0 +1,31 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.mongodb.internal.crypt.capi; + +import com.sun.jna.Memory; + +// Subclass of JNA's Memory class so that we can call its protected dispose method +class DisposableMemory extends Memory { + DisposableMemory(final int size) { + super(size); + } + + public void dispose() { + super.dispose(); + } +} diff --git a/mongodb-crypt/src/main/com/mongodb/internal/crypt/capi/JULLogger.java b/mongodb-crypt/src/main/com/mongodb/internal/crypt/capi/JULLogger.java new file mode 100644 index 00000000000..43c15bbf489 --- /dev/null +++ b/mongodb-crypt/src/main/com/mongodb/internal/crypt/capi/JULLogger.java @@ -0,0 +1,130 @@ + +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.mongodb.internal.crypt.capi; + +import java.util.logging.Level; + +import static java.util.logging.Level.FINE; +import static java.util.logging.Level.FINER; +import static java.util.logging.Level.INFO; +import static java.util.logging.Level.SEVERE; +import static java.util.logging.Level.WARNING; + +class JULLogger implements Logger { + + private final java.util.logging.Logger delegate; + + JULLogger(final String name) { + this.delegate = java.util.logging.Logger.getLogger(name); + } + + @Override + public String getName() { + return delegate.getName(); + } + + @Override + public boolean isTraceEnabled() { + return isEnabled(FINER); + } + + @Override + public void trace(final String msg) { + log(FINER, msg); + } + + @Override + public void trace(final String msg, final Throwable t) { + log(FINER, msg, t); + } + + @Override + public boolean isDebugEnabled() { + return isEnabled(FINE); + } + + @Override + public void debug(final String msg) { + log(FINE, msg); + } + + @Override + public void debug(final String msg, final Throwable t) { + log(FINE, msg, t); + } + + @Override + public boolean isInfoEnabled() { + return delegate.isLoggable(INFO); + } + + @Override + public void info(final String msg) { + log(INFO, msg); + } + + @Override + public void info(final String msg, final Throwable t) { + log(INFO, msg, t); + } + + @Override + public boolean isWarnEnabled() { + return delegate.isLoggable(WARNING); + } + + @Override + public void warn(final String msg) { + log(WARNING, msg); + } + + @Override + public void warn(final String msg, final Throwable t) { + log(WARNING, msg, t); + } + + + @Override + public boolean isErrorEnabled() { + return delegate.isLoggable(SEVERE); + } + + @Override + public void error(final String msg) { + log(SEVERE, msg); + } + + @Override + public void error(final String msg, final Throwable t) { + log(SEVERE, msg, t); + } + + + private boolean isEnabled(final Level level) { + return delegate.isLoggable(level); + } + + private void log(final Level level, final String msg) { + delegate.log(level, msg); + } + + public void log(final Level level, final String msg, final Throwable t) { + delegate.log(level, msg, t); + } +} diff --git a/mongodb-crypt/src/main/com/mongodb/internal/crypt/capi/Logger.java b/mongodb-crypt/src/main/com/mongodb/internal/crypt/capi/Logger.java new file mode 100644 index 00000000000..e3ea361af4d --- /dev/null +++ b/mongodb-crypt/src/main/com/mongodb/internal/crypt/capi/Logger.java @@ -0,0 +1,144 @@ + +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.mongodb.internal.crypt.capi; + +/** + * Not part of the public API + */ +public interface Logger { + /** + * Return the name of this Logger instance. + * + * @return name of this logger instance + */ + String getName(); + + /** + * Is the logger instance enabled for the TRACE level? + * + * @return True if this Logger is enabled for the TRACE level, false otherwise. + */ + boolean isTraceEnabled(); + + /** + * Log a message at the TRACE level. + * + * @param msg the message string to be logged + */ + void trace(String msg); + + /** + * Log an exception (throwable) at the TRACE level with an accompanying message. + * + * @param msg the message accompanying the exception + * @param t the exception (throwable) to log + */ + void trace(String msg, Throwable t); + + /** + * Is the logger instance enabled for the DEBUG level? + * + * @return True if this Logger is enabled for the DEBUG level, false otherwise. + */ + boolean isDebugEnabled(); + + + /** + * Log a message at the DEBUG level. + * + * @param msg the message string to be logged + */ + void debug(String msg); + + + /** + * Log an exception (throwable) at the DEBUG level with an accompanying message. + * + * @param msg the message accompanying the exception + * @param t the exception (throwable) to log + */ + void debug(String msg, Throwable t); + + /** + * Is the logger instance enabled for the INFO level? + * + * @return True if this Logger is enabled for the INFO level, false otherwise. + */ + boolean isInfoEnabled(); + + + /** + * Log a message at the INFO level. + * + * @param msg the message string to be logged + */ + void info(String msg); + + /** + * Log an exception (throwable) at the INFO level with an accompanying message. + * + * @param msg the message accompanying the exception + * @param t the exception (throwable) to log + */ + void info(String msg, Throwable t); + + /** + * Is the logger instance enabled for the WARN level? + * + * @return True if this Logger is enabled for the WARN level, false otherwise. + */ + boolean isWarnEnabled(); + + /** + * Log a message at the WARN level. + * + * @param msg the message string to be logged + */ + void warn(String msg); + + /** + * Log an exception (throwable) at the WARN level with an accompanying message. + * + * @param msg the message accompanying the exception + * @param t the exception (throwable) to log + */ + void warn(String msg, Throwable t); + + /** + * Is the logger instance enabled for the ERROR level? + * + * @return True if this Logger is enabled for the ERROR level, false otherwise. + */ + boolean isErrorEnabled(); + + /** + * Log a message at the ERROR level. + * + * @param msg the message string to be logged + */ + void error(String msg); + + /** + * Log an exception (throwable) at the ERROR level with an accompanying message. + * + * @param msg the message accompanying the exception + * @param t the exception (throwable) to log + */ + void error(String msg, Throwable t); +} diff --git a/mongodb-crypt/src/main/com/mongodb/internal/crypt/capi/Loggers.java b/mongodb-crypt/src/main/com/mongodb/internal/crypt/capi/Loggers.java new file mode 100644 index 00000000000..a5ce431fbcf --- /dev/null +++ b/mongodb-crypt/src/main/com/mongodb/internal/crypt/capi/Loggers.java @@ -0,0 +1,50 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.mongodb.internal.crypt.capi; + +/** + * This class is not part of the public API. + */ +public final class Loggers { + private static final String NAME = "org.mongodb.driver.crypt"; + + private static final boolean USE_SLF4J = shouldUseSLF4J(); + + /** + * @return the logger + */ + public static Logger getLogger() { + if (USE_SLF4J) { + return new SLF4JLogger(NAME); + } else { + return new JULLogger(NAME); + } + } + + private Loggers() { + } + + private static boolean shouldUseSLF4J() { + try { + Class.forName("org.slf4j.Logger"); + return true; + } catch (ClassNotFoundException e) { + return false; + } + } +} diff --git a/mongodb-crypt/src/main/com/mongodb/internal/crypt/capi/MacCallback.java b/mongodb-crypt/src/main/com/mongodb/internal/crypt/capi/MacCallback.java new file mode 100644 index 00000000000..98a0e833faa --- /dev/null +++ b/mongodb-crypt/src/main/com/mongodb/internal/crypt/capi/MacCallback.java @@ -0,0 +1,60 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.mongodb.internal.crypt.capi; + +import com.mongodb.internal.crypt.capi.CAPI.cstring; +import com.mongodb.internal.crypt.capi.CAPI.mongocrypt_binary_t; +import com.mongodb.internal.crypt.capi.CAPI.mongocrypt_hmac_fn; +import com.mongodb.internal.crypt.capi.CAPI.mongocrypt_status_t; +import com.sun.jna.Pointer; + +import javax.crypto.Mac; +import javax.crypto.spec.SecretKeySpec; + +import static com.mongodb.internal.crypt.capi.CAPI.MONGOCRYPT_STATUS_ERROR_CLIENT; +import static com.mongodb.internal.crypt.capi.CAPI.mongocrypt_status_set; +import static com.mongodb.internal.crypt.capi.CAPIHelper.toByteArray; +import static com.mongodb.internal.crypt.capi.CAPIHelper.writeByteArrayToBinary; + +class MacCallback implements mongocrypt_hmac_fn { + private final String algorithm; + + MacCallback(final String algorithm) { + this.algorithm = algorithm; + } + + @Override + public boolean hmac(final Pointer ctx, final mongocrypt_binary_t key, final mongocrypt_binary_t in, + final mongocrypt_binary_t out, final mongocrypt_status_t status) { + try { + Mac mac = Mac.getInstance(algorithm); + SecretKeySpec keySpec = new SecretKeySpec(toByteArray(key), algorithm); + mac.init(keySpec); + + mac.update(toByteArray(in)); + + byte[] result = mac.doFinal(); + writeByteArrayToBinary(out, result); + + return true; + } catch (Exception e) { + mongocrypt_status_set(status, MONGOCRYPT_STATUS_ERROR_CLIENT, 0, new cstring(e.toString()), -1); + return false; + } + } +} diff --git a/mongodb-crypt/src/main/com/mongodb/internal/crypt/capi/MessageDigestCallback.java b/mongodb-crypt/src/main/com/mongodb/internal/crypt/capi/MessageDigestCallback.java new file mode 100644 index 00000000000..35e6a8f78ed --- /dev/null +++ b/mongodb-crypt/src/main/com/mongodb/internal/crypt/capi/MessageDigestCallback.java @@ -0,0 +1,55 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.mongodb.internal.crypt.capi; + +import com.mongodb.internal.crypt.capi.CAPI.cstring; +import com.mongodb.internal.crypt.capi.CAPI.mongocrypt_binary_t; +import com.mongodb.internal.crypt.capi.CAPI.mongocrypt_hash_fn; +import com.mongodb.internal.crypt.capi.CAPI.mongocrypt_status_t; +import com.sun.jna.Pointer; + +import java.security.MessageDigest; + +import static com.mongodb.internal.crypt.capi.CAPI.MONGOCRYPT_STATUS_ERROR_CLIENT; +import static com.mongodb.internal.crypt.capi.CAPI.mongocrypt_status_set; +import static com.mongodb.internal.crypt.capi.CAPIHelper.toByteArray; +import static com.mongodb.internal.crypt.capi.CAPIHelper.writeByteArrayToBinary; + +class MessageDigestCallback implements mongocrypt_hash_fn { + + private final String algorithm; + + MessageDigestCallback(final String algorithm) { + this.algorithm = algorithm; + } + + @Override + public boolean hash(final Pointer ctx, final mongocrypt_binary_t in, final mongocrypt_binary_t out, + final mongocrypt_status_t status) { + try { + MessageDigest messageDigest = MessageDigest.getInstance(algorithm); + messageDigest.update(toByteArray(in)); + byte[] digest = messageDigest.digest(); + writeByteArrayToBinary(out, digest); + return true; + } catch (Exception e) { + mongocrypt_status_set(status, MONGOCRYPT_STATUS_ERROR_CLIENT, 0, new cstring(e.toString()), -1); + return false; + } + } +} diff --git a/mongodb-crypt/src/main/com/mongodb/internal/crypt/capi/MongoAwsKmsProviderOptions.java b/mongodb-crypt/src/main/com/mongodb/internal/crypt/capi/MongoAwsKmsProviderOptions.java new file mode 100644 index 00000000000..d37f0b7f91f --- /dev/null +++ b/mongodb-crypt/src/main/com/mongodb/internal/crypt/capi/MongoAwsKmsProviderOptions.java @@ -0,0 +1,104 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.mongodb.internal.crypt.capi; + +import static org.bson.assertions.Assertions.notNull; + +/** + * The options for configuring the AWS KMS provider. + */ +public final class MongoAwsKmsProviderOptions { + + private final String accessKeyId; + private final String secretAccessKey; + + /** + * Construct a builder for the options + * + * @return the builder + */ + public static Builder builder() { + return new Builder(); + } + + /** + * Gets the access key id + * + * @return the access key id, which may not be null + */ + public String getAccessKeyId() { + return accessKeyId; + } + + /** + * Gets the secret access key + * + * @return the secret access key, which may not be null + */ + public String getSecretAccessKey() { + return secretAccessKey; + } + + + /** + * The builder for the options + */ + public static final class Builder { + private String accessKeyId; + private String secretAccessKey; + + private Builder() { + } + + /** + * Sets the access key id. + * + * @param accessKeyId the access key id + * @return this + */ + public Builder accessKeyId(final String accessKeyId) { + this.accessKeyId = accessKeyId; + return this; + } + + /** + * Sets the secret access key. + * + * @param secretAccessKey the secret access key + * @return this + */ + public Builder secretAccessKey(final String secretAccessKey) { + this.secretAccessKey = secretAccessKey; + return this; + } + + /** + * Build the options. + * + * @return the options + */ + public MongoAwsKmsProviderOptions build() { + return new MongoAwsKmsProviderOptions(this); + } + } + + private MongoAwsKmsProviderOptions(final Builder builder) { + this.accessKeyId = notNull("AWS KMS provider accessKeyId", builder.accessKeyId); + this.secretAccessKey = notNull("AWS KMS provider secretAccessKey", builder.secretAccessKey); + } +} diff --git a/mongodb-crypt/src/main/com/mongodb/internal/crypt/capi/MongoCrypt.java b/mongodb-crypt/src/main/com/mongodb/internal/crypt/capi/MongoCrypt.java new file mode 100644 index 00000000000..506b6428d8b --- /dev/null +++ b/mongodb-crypt/src/main/com/mongodb/internal/crypt/capi/MongoCrypt.java @@ -0,0 +1,100 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + + +package com.mongodb.internal.crypt.capi; + +import org.bson.BsonDocument; + +import java.io.Closeable; + +/** + * A context for encryption/decryption operations. + */ +public interface MongoCrypt extends Closeable { + + /** + * Create a context to use for encryption + * + * @param database the namespace + * @param command the document representing the command to encrypt + * @return the context + */ + MongoCryptContext createEncryptionContext(String database, BsonDocument command); + + /** + * Create a context to use for decryption + * + * @param document the document to decrypt + * @return the context + */ + MongoCryptContext createDecryptionContext(BsonDocument document); + + /** + * Create a context to use for creating a data key + * @param kmsProvider the KMS provider + * @param options the data key options + * @return the context + */ + MongoCryptContext createDataKeyContext(String kmsProvider, MongoDataKeyOptions options); + + /** + * Create a context to use for encryption + * + * @param document the document to encrypt, which must be in the form { "v" : BSON value to encrypt } + * @param options the explicit encryption options + * @return the context + */ + MongoCryptContext createExplicitEncryptionContext(BsonDocument document, MongoExplicitEncryptOptions options); + + /** + * Create a context to use for encryption + * + * @param document the document to encrypt, which must be in the form { "v" : BSON value to encrypt } + * @param options the expression encryption options + * @return the context + * @since 1.7 + */ + MongoCryptContext createEncryptExpressionContext(BsonDocument document, MongoExplicitEncryptOptions options); + + /** + * Create a context to use for encryption + * + * @param document the document to decrypt,which must be in the form { "v" : encrypted BSON value } + * @return the context + */ + MongoCryptContext createExplicitDecryptionContext(BsonDocument document); + + /** + * Create a context to use for encryption + * + * @param filter The filter to use for the find command on the key vault collection to retrieve datakeys to rewrap. + * @param options the rewrap many data key options + * @return the context + * @since 1.5 + */ + MongoCryptContext createRewrapManyDatakeyContext(BsonDocument filter, MongoRewrapManyDataKeyOptions options); + + /** + * @return the version string of the loaded crypt shared dynamic library if available or null + * @since 1.5 + */ + String getCryptSharedLibVersionString(); + + @Override + void close(); +} diff --git a/mongodb-crypt/src/main/com/mongodb/internal/crypt/capi/MongoCryptContext.java b/mongodb-crypt/src/main/com/mongodb/internal/crypt/capi/MongoCryptContext.java new file mode 100644 index 00000000000..37269e0f1e1 --- /dev/null +++ b/mongodb-crypt/src/main/com/mongodb/internal/crypt/capi/MongoCryptContext.java @@ -0,0 +1,143 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.mongodb.internal.crypt.capi; + +import com.mongodb.crypt.capi.MongoCryptException; +import org.bson.BsonDocument; +import org.bson.RawBsonDocument; + +import java.io.Closeable; + +/** + * An interface representing the lifecycle of an encryption or decryption request. It's modelled as a state machine. + */ +public interface MongoCryptContext extends Closeable { + + /** + * The possible states. + */ + enum State { + /** + * There has been an error + */ + ERROR(CAPI.MONGOCRYPT_CTX_ERROR), + + /** + * Needs collection information from the cluster encrypting to + */ + NEED_MONGO_COLLINFO(CAPI.MONGOCRYPT_CTX_NEED_MONGO_COLLINFO), + + /** + * Need to mark command with encryption markers + */ + NEED_MONGO_MARKINGS(CAPI.MONGOCRYPT_CTX_NEED_MONGO_MARKINGS), + + /** + * Need keys from the key vault + */ + NEED_MONGO_KEYS(CAPI.MONGOCRYPT_CTX_NEED_MONGO_KEYS), + + /** + * Need the key management service + */ + NEED_KMS(CAPI.MONGOCRYPT_CTX_NEED_KMS), + + /** + * Need to fetch/renew KMS credentials + * @since 1.4 + */ + NEED_KMS_CREDENTIALS(CAPI.MONGOCRYPT_CTX_NEED_KMS_CREDENTIALS), + + /** + * Ready for encryption/decryption + */ + READY(CAPI.MONGOCRYPT_CTX_READY), + + /** + * Done + */ + DONE(CAPI.MONGOCRYPT_CTX_DONE); + + private final int index; + + State(final int index) { + this.index = index; + } + + static State fromIndex(final int index) { + for (State state : State.values()) { + if (state.index == index) { + return state; + } + } + throw new MongoCryptException("Unknown context state " + index); + } + } + + /** + * Gets the current state. + * + * @return the current state + */ + State getState(); + + /** + * + * @return the operation to execute + */ + RawBsonDocument getMongoOperation(); + + /** + * + * @param document a result of the operation + */ + void addMongoOperationResult(BsonDocument document); + + /** + * Signal completion of the operation + */ + void completeMongoOperation(); + + /** + * Provide KMS credentials on demand, in response to NEED_KMS_CREDENTIALS state + * + * @param credentialsDocument document containing all credentials + * @since 1.4 + */ + void provideKmsProviderCredentials(BsonDocument credentialsDocument); + + /** + * + * @return the next key decryptor, or null if there are no more + */ + MongoKeyDecryptor nextKeyDecryptor(); + + /** + * Indicate that all key decryptors have been completed + */ + void completeKeyDecryptors(); + + /** + * + * @return the encrypted or decrypted document + */ + RawBsonDocument finish(); + + @Override + void close(); +} diff --git a/mongodb-crypt/src/main/com/mongodb/internal/crypt/capi/MongoCryptContextImpl.java b/mongodb-crypt/src/main/com/mongodb/internal/crypt/capi/MongoCryptContextImpl.java new file mode 100644 index 00000000000..06f282aa86b --- /dev/null +++ b/mongodb-crypt/src/main/com/mongodb/internal/crypt/capi/MongoCryptContextImpl.java @@ -0,0 +1,172 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.mongodb.internal.crypt.capi; + +import com.mongodb.crypt.capi.MongoCryptException; +import com.mongodb.internal.crypt.capi.CAPI.mongocrypt_binary_t; +import com.mongodb.internal.crypt.capi.CAPI.mongocrypt_ctx_t; +import com.mongodb.internal.crypt.capi.CAPI.mongocrypt_kms_ctx_t; +import org.bson.BsonDocument; +import org.bson.RawBsonDocument; + +import static com.mongodb.internal.crypt.capi.CAPI.mongocrypt_binary_destroy; +import static com.mongodb.internal.crypt.capi.CAPI.mongocrypt_binary_new; +import static com.mongodb.internal.crypt.capi.CAPI.mongocrypt_ctx_destroy; +import static com.mongodb.internal.crypt.capi.CAPI.mongocrypt_ctx_finalize; +import static com.mongodb.internal.crypt.capi.CAPI.mongocrypt_ctx_kms_done; +import static com.mongodb.internal.crypt.capi.CAPI.mongocrypt_ctx_mongo_done; +import static com.mongodb.internal.crypt.capi.CAPI.mongocrypt_ctx_mongo_feed; +import static com.mongodb.internal.crypt.capi.CAPI.mongocrypt_ctx_mongo_op; +import static com.mongodb.internal.crypt.capi.CAPI.mongocrypt_ctx_next_kms_ctx; +import static com.mongodb.internal.crypt.capi.CAPI.mongocrypt_ctx_provide_kms_providers; +import static com.mongodb.internal.crypt.capi.CAPI.mongocrypt_ctx_state; +import static com.mongodb.internal.crypt.capi.CAPI.mongocrypt_ctx_status; +import static com.mongodb.internal.crypt.capi.CAPI.mongocrypt_status_code; +import static com.mongodb.internal.crypt.capi.CAPI.mongocrypt_status_destroy; +import static com.mongodb.internal.crypt.capi.CAPI.mongocrypt_status_message; +import static com.mongodb.internal.crypt.capi.CAPI.mongocrypt_status_new; +import static com.mongodb.internal.crypt.capi.CAPI.mongocrypt_status_t; +import static com.mongodb.internal.crypt.capi.CAPIHelper.toBinary; +import static com.mongodb.internal.crypt.capi.CAPIHelper.toDocument; +import static org.bson.assertions.Assertions.isTrue; +import static org.bson.assertions.Assertions.notNull; + +class MongoCryptContextImpl implements MongoCryptContext { + private final mongocrypt_ctx_t wrapped; + private volatile boolean closed; + + MongoCryptContextImpl(final mongocrypt_ctx_t wrapped) { + notNull("wrapped", wrapped); + this.wrapped = wrapped; + } + + @Override + public State getState() { + isTrue("open", !closed); + State state = State.fromIndex(mongocrypt_ctx_state(wrapped)); + if (state.equals(State.ERROR)) { + throwExceptionFromStatus(); + } + return state; + } + + @Override + public RawBsonDocument getMongoOperation() { + isTrue("open", !closed); + mongocrypt_binary_t binary = mongocrypt_binary_new(); + + try { + boolean success = mongocrypt_ctx_mongo_op(wrapped, binary); + if (!success) { + throwExceptionFromStatus(); + } + return toDocument(binary); + } finally { + mongocrypt_binary_destroy(binary); + } + } + + @Override + public void addMongoOperationResult(final BsonDocument document) { + isTrue("open", !closed); + + try (BinaryHolder binaryHolder = toBinary(document)) { + boolean success = mongocrypt_ctx_mongo_feed(wrapped, binaryHolder.getBinary()); + if (!success) { + throwExceptionFromStatus(); + } + } + } + + @Override + public void completeMongoOperation() { + isTrue("open", !closed); + boolean success = mongocrypt_ctx_mongo_done(wrapped); + if (!success) { + throwExceptionFromStatus(); + } + } + + @Override + public void provideKmsProviderCredentials(final BsonDocument credentialsDocument) { + try (BinaryHolder binaryHolder = toBinary(credentialsDocument)) { + boolean success = mongocrypt_ctx_provide_kms_providers(wrapped, binaryHolder.getBinary()); + if (!success) { + throwExceptionFromStatus(); + } + } + } + + @Override + public MongoKeyDecryptor nextKeyDecryptor() { + isTrue("open", !closed); + + mongocrypt_kms_ctx_t kmsContext = mongocrypt_ctx_next_kms_ctx(wrapped); + if (kmsContext == null) { + return null; + } + return new MongoKeyDecryptorImpl(kmsContext); + } + + @Override + public void completeKeyDecryptors() { + isTrue("open", !closed); + + boolean success = mongocrypt_ctx_kms_done(wrapped); + if (!success) { + throwExceptionFromStatus(); + } + + } + + @Override + public RawBsonDocument finish() { + isTrue("open", !closed); + + mongocrypt_binary_t binary = mongocrypt_binary_new(); + + try { + boolean success = mongocrypt_ctx_finalize(wrapped, binary); + if (!success) { + throwExceptionFromStatus(); + } + return toDocument(binary); + } finally { + mongocrypt_binary_destroy(binary); + } + } + + @Override + public void close() { + mongocrypt_ctx_destroy(wrapped); + closed = true; + } + + static void throwExceptionFromStatus(final mongocrypt_ctx_t wrapped) { + mongocrypt_status_t status = mongocrypt_status_new(); + mongocrypt_ctx_status(wrapped, status); + MongoCryptException e = new MongoCryptException(mongocrypt_status_message(status, null).toString(), + mongocrypt_status_code(status)); + mongocrypt_status_destroy(status); + throw e; + } + + private void throwExceptionFromStatus() { + throwExceptionFromStatus(wrapped); + } +} diff --git a/mongodb-crypt/src/main/com/mongodb/internal/crypt/capi/MongoCryptImpl.java b/mongodb-crypt/src/main/com/mongodb/internal/crypt/capi/MongoCryptImpl.java new file mode 100644 index 00000000000..774b9e718cb --- /dev/null +++ b/mongodb-crypt/src/main/com/mongodb/internal/crypt/capi/MongoCryptImpl.java @@ -0,0 +1,506 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.mongodb.internal.crypt.capi; + +import com.mongodb.crypt.capi.MongoCryptException; +import com.mongodb.internal.crypt.capi.CAPI.cstring; +import com.mongodb.internal.crypt.capi.CAPI.mongocrypt_binary_t; +import com.mongodb.internal.crypt.capi.CAPI.mongocrypt_ctx_t; +import com.mongodb.internal.crypt.capi.CAPI.mongocrypt_log_fn_t; +import com.mongodb.internal.crypt.capi.CAPI.mongocrypt_status_t; +import com.mongodb.internal.crypt.capi.CAPI.mongocrypt_t; +import com.sun.jna.Pointer; +import org.bson.BsonBinary; +import org.bson.BsonDocument; +import org.bson.BsonString; + +import javax.crypto.Cipher; +import java.nio.ByteBuffer; +import java.security.SecureRandom; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.function.BiFunction; +import java.util.function.Consumer; +import java.util.function.Supplier; + +import static com.mongodb.internal.crypt.capi.CAPI.MONGOCRYPT_CTX_ERROR; +import static com.mongodb.internal.crypt.capi.CAPI.MONGOCRYPT_LOG_LEVEL_ERROR; +import static com.mongodb.internal.crypt.capi.CAPI.MONGOCRYPT_LOG_LEVEL_FATAL; +import static com.mongodb.internal.crypt.capi.CAPI.MONGOCRYPT_LOG_LEVEL_INFO; +import static com.mongodb.internal.crypt.capi.CAPI.MONGOCRYPT_LOG_LEVEL_TRACE; +import static com.mongodb.internal.crypt.capi.CAPI.MONGOCRYPT_LOG_LEVEL_WARNING; +import static com.mongodb.internal.crypt.capi.CAPI.mongocrypt_crypt_shared_lib_version_string; +import static com.mongodb.internal.crypt.capi.CAPI.mongocrypt_ctx_datakey_init; +import static com.mongodb.internal.crypt.capi.CAPI.mongocrypt_ctx_encrypt_init; +import static com.mongodb.internal.crypt.capi.CAPI.mongocrypt_ctx_new; +import static com.mongodb.internal.crypt.capi.CAPI.mongocrypt_ctx_setopt_algorithm; +import static com.mongodb.internal.crypt.capi.CAPI.mongocrypt_ctx_setopt_algorithm_range; +import static com.mongodb.internal.crypt.capi.CAPI.mongocrypt_ctx_setopt_algorithm_text; +import static com.mongodb.internal.crypt.capi.CAPI.mongocrypt_ctx_setopt_contention_factor; +import static com.mongodb.internal.crypt.capi.CAPI.mongocrypt_ctx_setopt_key_alt_name; +import static com.mongodb.internal.crypt.capi.CAPI.mongocrypt_ctx_setopt_key_encryption_key; +import static com.mongodb.internal.crypt.capi.CAPI.mongocrypt_ctx_setopt_key_id; +import static com.mongodb.internal.crypt.capi.CAPI.mongocrypt_ctx_setopt_key_material; +import static com.mongodb.internal.crypt.capi.CAPI.mongocrypt_ctx_setopt_query_type; +import static com.mongodb.internal.crypt.capi.CAPI.mongocrypt_ctx_state; +import static com.mongodb.internal.crypt.capi.CAPI.mongocrypt_destroy; +import static com.mongodb.internal.crypt.capi.CAPI.mongocrypt_init; +import static com.mongodb.internal.crypt.capi.CAPI.mongocrypt_is_crypto_available; +import static com.mongodb.internal.crypt.capi.CAPI.mongocrypt_new; +import static com.mongodb.internal.crypt.capi.CAPI.mongocrypt_setopt_aes_256_ctr; +import static com.mongodb.internal.crypt.capi.CAPI.mongocrypt_setopt_append_crypt_shared_lib_search_path; +import static com.mongodb.internal.crypt.capi.CAPI.mongocrypt_setopt_bypass_query_analysis; +import static com.mongodb.internal.crypt.capi.CAPI.mongocrypt_setopt_crypto_hook_sign_rsaes_pkcs1_v1_5; +import static com.mongodb.internal.crypt.capi.CAPI.mongocrypt_setopt_crypto_hooks; +import static com.mongodb.internal.crypt.capi.CAPI.mongocrypt_setopt_enable_multiple_collinfo; +import static com.mongodb.internal.crypt.capi.CAPI.mongocrypt_setopt_encrypted_field_config_map; +import static com.mongodb.internal.crypt.capi.CAPI.mongocrypt_setopt_key_expiration; +import static com.mongodb.internal.crypt.capi.CAPI.mongocrypt_setopt_kms_provider_aws; +import static com.mongodb.internal.crypt.capi.CAPI.mongocrypt_setopt_kms_provider_local; +import static com.mongodb.internal.crypt.capi.CAPI.mongocrypt_setopt_kms_providers; +import static com.mongodb.internal.crypt.capi.CAPI.mongocrypt_setopt_log_handler; +import static com.mongodb.internal.crypt.capi.CAPI.mongocrypt_setopt_schema_map; +import static com.mongodb.internal.crypt.capi.CAPI.mongocrypt_setopt_set_crypt_shared_lib_path_override; +import static com.mongodb.internal.crypt.capi.CAPI.mongocrypt_setopt_use_need_kms_credentials_state; +import static com.mongodb.internal.crypt.capi.CAPI.mongocrypt_status; +import static com.mongodb.internal.crypt.capi.CAPI.mongocrypt_status_code; +import static com.mongodb.internal.crypt.capi.CAPI.mongocrypt_status_destroy; +import static com.mongodb.internal.crypt.capi.CAPI.mongocrypt_status_message; +import static com.mongodb.internal.crypt.capi.CAPI.mongocrypt_status_new; +import static com.mongodb.internal.crypt.capi.CAPIHelper.toBinary; +import static org.bson.assertions.Assertions.isTrue; +import static org.bson.assertions.Assertions.notNull; + +/** + * MongoCryptImpl is the main implementation of the {@link MongoCrypt} interface. + *

+ * This class is responsible for configuring and managing the native libmongocrypt context, + * handling encryption and decryption operations, and bridging Java cryptographic hooks + * when required. It wraps the native resource and provides context creation methods for + * various cryptographic operations. + *

+ * Key responsibilities: + *

    + *
  • Configures libmongocrypt with KMS providers, schema maps, encrypted fields, and other options.
  • + *
  • Registers Java cryptographic hooks if native crypto is not available.
  • + *
  • Provides context creation for encryption, decryption, key management, and explicit operations.
  • + *
  • Manages native resource lifecycle and error handling.
  • + *
+ */ +class MongoCryptImpl implements MongoCrypt { + private static final Logger LOGGER = Loggers.getLogger(); + private final mongocrypt_t wrapped; + + // Keep a strong reference to all the callbacks so that they don't get garbage collected + @SuppressWarnings("FieldCanBeLocal") + private final LogCallback logCallback; + + @SuppressWarnings("FieldCanBeLocal") + private final CipherCallback aesCBC256EncryptCallback; + @SuppressWarnings("FieldCanBeLocal") + private final CipherCallback aesCBC256DecryptCallback; + @SuppressWarnings("FieldCanBeLocal") + private final CipherCallback aesCTR256EncryptCallback; + @SuppressWarnings("FieldCanBeLocal") + private final CipherCallback aesCTR256DecryptCallback; + @SuppressWarnings("FieldCanBeLocal") + private final MacCallback hmacSha512Callback; + @SuppressWarnings("FieldCanBeLocal") + private final MacCallback hmacSha256Callback; + @SuppressWarnings("FieldCanBeLocal") + private final MessageDigestCallback sha256Callback; + @SuppressWarnings("FieldCanBeLocal") + private final SecureRandomCallback secureRandomCallback; + @SuppressWarnings("FieldCanBeLocal") + private final SigningRSAESPKCSCallback signingRSAESPKCSCallback; + + private final AtomicBoolean closed; + + /** + * Constructs a MongoCryptImpl instance and configures the native libmongocrypt context. + *

+ * Registers log handlers, cryptographic hooks, and sets up KMS providers and other options. + * Throws MongoCryptException if initialization fails. + */ + MongoCryptImpl(final MongoCryptOptions options) { + closed = new AtomicBoolean(); + wrapped = mongocrypt_new(); + if (wrapped == null) { + throw new MongoCryptException("Unable to create new mongocrypt object"); + } + + logCallback = new LogCallback(); + + mongocrypt_setopt_enable_multiple_collinfo(wrapped); + + configure(() -> mongocrypt_setopt_log_handler(wrapped, logCallback, null)); + + if (mongocrypt_is_crypto_available()) { + LOGGER.debug("libmongocrypt is compiled with cryptography support, so not registering Java callbacks"); + aesCBC256EncryptCallback = null; + aesCBC256DecryptCallback = null; + aesCTR256EncryptCallback = null; + aesCTR256DecryptCallback = null; + hmacSha512Callback = null; + hmacSha256Callback = null; + sha256Callback = null; + secureRandomCallback = null; + signingRSAESPKCSCallback = null; + } else { + LOGGER.debug("libmongocrypt is compiled without cryptography support, so registering Java callbacks"); + // We specify NoPadding here because the underlying C library is responsible for padding prior + // to executing the callback + aesCBC256EncryptCallback = new CipherCallback("AES", "AES/CBC/NoPadding", Cipher.ENCRYPT_MODE); + aesCBC256DecryptCallback = new CipherCallback("AES", "AES/CBC/NoPadding", Cipher.DECRYPT_MODE); + aesCTR256EncryptCallback = new CipherCallback("AES", "AES/CTR/NoPadding", Cipher.ENCRYPT_MODE); + aesCTR256DecryptCallback = new CipherCallback("AES", "AES/CTR/NoPadding", Cipher.DECRYPT_MODE); + + hmacSha512Callback = new MacCallback("HmacSHA512"); + hmacSha256Callback = new MacCallback("HmacSHA256"); + sha256Callback = new MessageDigestCallback("SHA-256"); + secureRandomCallback = new SecureRandomCallback(new SecureRandom()); + + configure(() -> mongocrypt_setopt_crypto_hooks(wrapped, aesCBC256EncryptCallback, aesCBC256DecryptCallback, + secureRandomCallback, hmacSha512Callback, hmacSha256Callback, + sha256Callback, null)); + + signingRSAESPKCSCallback = new SigningRSAESPKCSCallback(); + configure(() -> mongocrypt_setopt_crypto_hook_sign_rsaes_pkcs1_v1_5(wrapped, signingRSAESPKCSCallback, null)); + configure(() -> mongocrypt_setopt_aes_256_ctr(wrapped, aesCTR256EncryptCallback, aesCTR256DecryptCallback, null)); + } + + if (options.getLocalKmsProviderOptions() != null) { + withBinaryHolder(options.getLocalKmsProviderOptions().getLocalMasterKey(), + binary -> configure(() -> mongocrypt_setopt_kms_provider_local(wrapped, binary))); + } + + if (options.getAwsKmsProviderOptions() != null) { + configure(() -> mongocrypt_setopt_kms_provider_aws(wrapped, + new cstring(options.getAwsKmsProviderOptions().getAccessKeyId()), -1, + new cstring(options.getAwsKmsProviderOptions().getSecretAccessKey()), -1)); + } + + if (options.isNeedsKmsCredentialsStateEnabled()) { + mongocrypt_setopt_use_need_kms_credentials_state(wrapped); + } + + if (options.getKmsProviderOptions() != null) { + withBinaryHolder(options.getKmsProviderOptions(), + binary -> configure(() -> mongocrypt_setopt_kms_providers(wrapped, binary))); + } + + if (options.getLocalSchemaMap() != null) { + BsonDocument localSchemaMapDocument = new BsonDocument(); + localSchemaMapDocument.putAll(options.getLocalSchemaMap()); + + withBinaryHolder(localSchemaMapDocument, binary -> configure(() -> mongocrypt_setopt_schema_map(wrapped, binary))); + } + + if (options.isBypassQueryAnalysis()) { + mongocrypt_setopt_bypass_query_analysis(wrapped); + } + + Long keyExpirationMS = options.getKeyExpirationMS(); + if (keyExpirationMS != null) { + configure(() -> mongocrypt_setopt_key_expiration(wrapped, keyExpirationMS)); + } + + if (options.getEncryptedFieldsMap() != null) { + BsonDocument localEncryptedFieldsMap = new BsonDocument(); + localEncryptedFieldsMap.putAll(options.getEncryptedFieldsMap()); + + withBinaryHolder(localEncryptedFieldsMap, + binary -> configure(() -> mongocrypt_setopt_encrypted_field_config_map(wrapped, binary))); + } + + options.getSearchPaths().forEach(p -> mongocrypt_setopt_append_crypt_shared_lib_search_path(wrapped, new cstring(p))); + if (options.getExtraOptions().containsKey("cryptSharedLibPath")) { + mongocrypt_setopt_set_crypt_shared_lib_path_override(wrapped, new cstring(options.getExtraOptions().getString("cryptSharedLibPath").getValue())); + } + + configure(() -> mongocrypt_init(wrapped)); + } + + /** + * Creates an encryption context for the given database and command document. + */ + @Override + public MongoCryptContext createEncryptionContext(final String database, final BsonDocument commandDocument) { + isTrue("open", !closed.get()); + notNull("database", database); + notNull("commandDocument", commandDocument); + return createMongoCryptContext(commandDocument, createNewMongoCryptContext(), + (context, binary) -> mongocrypt_ctx_encrypt_init(context, new cstring(database), -1, binary)); + } + + /** + * Creates a decryption context for the given document. + */ + @Override + public MongoCryptContext createDecryptionContext(final BsonDocument document) { + isTrue("open", !closed.get()); + return createMongoCryptContext(document, createNewMongoCryptContext(), CAPI::mongocrypt_ctx_decrypt_init); + } + + /** + * Creates a data key context for the specified KMS provider and options. + */ + @Override + public MongoCryptContext createDataKeyContext(final String kmsProvider, final MongoDataKeyOptions options) { + isTrue("open", !closed.get()); + mongocrypt_ctx_t context = createNewMongoCryptContext(); + + BsonDocument keyDocument = new BsonDocument("provider", new BsonString(kmsProvider)); + BsonDocument masterKey = options.getMasterKey(); + if (masterKey != null) { + masterKey.forEach(keyDocument::append); + } + withBinaryHolder(keyDocument, + binary -> configureContext(context, () -> mongocrypt_ctx_setopt_key_encryption_key(context, binary))); + + if (options.getKeyAltNames() != null) { + for (String cur : options.getKeyAltNames()) { + withBinaryHolder(new BsonDocument("keyAltName", new BsonString(cur)), + binary -> configureContext(context, () -> mongocrypt_ctx_setopt_key_alt_name(context, binary))); + } + } + + if (options.getKeyMaterial() != null) { + withBinaryHolder(new BsonDocument("keyMaterial", new BsonBinary(options.getKeyMaterial())), + binary -> configureContext(context, () -> mongocrypt_ctx_setopt_key_material(context, binary))); + } + + configureContext(context, () -> mongocrypt_ctx_datakey_init(context)); + return new MongoCryptContextImpl(context); + } + + /** + * Creates an explicit encryption context + */ + @Override + public MongoCryptContext createExplicitEncryptionContext(final BsonDocument document, final MongoExplicitEncryptOptions options) { + isTrue("open", !closed.get()); + return createMongoCryptContext(document, configureExplicitEncryption(options), CAPI::mongocrypt_ctx_explicit_encrypt_init); + } + + /** + * Creates an explicit encrypt *expression* context + */ + @Override + public MongoCryptContext createEncryptExpressionContext(final BsonDocument document, final MongoExplicitEncryptOptions options) { + isTrue("open", !closed.get()); + return createMongoCryptContext(document, configureExplicitEncryption(options), CAPI::mongocrypt_ctx_explicit_encrypt_expression_init); + } + + /** + * Creates an explicit decryption context + */ + @Override + public MongoCryptContext createExplicitDecryptionContext(final BsonDocument document) { + isTrue("open", !closed.get()); + return createMongoCryptContext(document, createNewMongoCryptContext(), CAPI::mongocrypt_ctx_explicit_decrypt_init); + } + + /** + * Creates a rewrap many data keys context + */ + @Override + public MongoCryptContext createRewrapManyDatakeyContext(final BsonDocument filter, final MongoRewrapManyDataKeyOptions options) { + isTrue("open", !closed.get()); + mongocrypt_ctx_t context = createNewMongoCryptContext(); + + if (options != null && options.getProvider() != null) { + BsonDocument keyDocument = new BsonDocument("provider", new BsonString(options.getProvider())); + BsonDocument masterKey = options.getMasterKey(); + if (masterKey != null) { + masterKey.forEach(keyDocument::append); + } + withBinaryHolder(keyDocument, + binary -> configureContext(context, () -> mongocrypt_ctx_setopt_key_encryption_key(context, binary))); + } + + return createMongoCryptContext(filter, context, CAPI::mongocrypt_ctx_rewrap_many_datakey_init); + } + + /** + * Returns the version string of the loaded crypt shared library. + */ + @Override + public String getCryptSharedLibVersionString() { + cstring versionString = mongocrypt_crypt_shared_lib_version_string(wrapped, null); + return versionString == null ? null : versionString.toString(); + } + + /** + * Closes the native libmongocrypt resource. + *

+ * This should be called when the instance is no longer needed to release native resources. + */ + @Override + public void close() { + if (!closed.getAndSet(true)) { + mongocrypt_destroy(wrapped); + } + } + + /** + * Helper to create a MongoCryptContext from a BSON document and a native context. + *

+ * Applies the given configuration function and checks for errors. + */ + private MongoCryptContext createMongoCryptContext(final BsonDocument document, final mongocrypt_ctx_t context, + final BiFunction configureFunction) { + withBinaryHolder(document, + binary -> { + if (!configureFunction.apply(context, binary)) { + MongoCryptContextImpl.throwExceptionFromStatus(context); + } + }); + if (mongocrypt_ctx_state(context) == MONGOCRYPT_CTX_ERROR) { + MongoCryptContextImpl.throwExceptionFromStatus(context); + } + return new MongoCryptContextImpl(context); + } + + /** + * Helper to create a new native mongocrypt_ctx_t context. + *

+ * Throws if context creation fails. + */ + private mongocrypt_ctx_t createNewMongoCryptContext() { + mongocrypt_ctx_t context = mongocrypt_ctx_new(wrapped); + if (context == null) { + throwExceptionFromStatus(); + } + return context; + } + + /** + * Configures explicit encryption options on a new native context. + *

+ * Applies key ID, key alt name, algorithm, query type, contention factor, and other options. + */ + private mongocrypt_ctx_t configureExplicitEncryption(final MongoExplicitEncryptOptions options) { + mongocrypt_ctx_t context = createNewMongoCryptContext(); + if (options.getKeyId() != null) { + withBinaryHolder(ByteBuffer.wrap(options.getKeyId().getData()), + binary -> configureContext(context, () -> mongocrypt_ctx_setopt_key_id(context, binary))); + } + + if (options.getKeyAltName() != null) { + withBinaryHolder(new BsonDocument("keyAltName", new BsonString(options.getKeyAltName())), + binary -> configureContext(context, () -> mongocrypt_ctx_setopt_key_alt_name(context, binary))); + } + + if (options.getAlgorithm() != null) { + configureContext(context, () -> mongocrypt_ctx_setopt_algorithm(context, new cstring(options.getAlgorithm()), -1)); + } + if (options.getQueryType() != null) { + configureContext(context, () -> mongocrypt_ctx_setopt_query_type(context, new cstring(options.getQueryType()), -1)); + } + if (options.getContentionFactor() != null) { + configureContext(context, () -> mongocrypt_ctx_setopt_contention_factor(context, options.getContentionFactor())); + } + if (options.getRangeOptions() != null) { + withBinaryHolder(options.getRangeOptions(), + binary -> configureContext(context, () -> mongocrypt_ctx_setopt_algorithm_range(context, binary))); + } + if (options.getTextOptions() != null) { + withBinaryHolder(options.getTextOptions(), + binary -> configureContext(context, () -> mongocrypt_ctx_setopt_algorithm_text(context, binary))); + } + return context; + } + + /** + * Configures the main mongocrypt instance with the given supplier that indicates if configuration was successful or not. + *

+ * Throws an exception derived from the mongocrypt status if the configuration fails. + */ + private void configure(final Supplier successSupplier) { + if (!successSupplier.get()) { + throwExceptionFromStatus(); + } + } + + /** + * Configures a mongocrypt_ctx_t context instance with the given supplier that indicates if configuration was successful or not. + *

+ * Throws an exception derived from the contexts mongocrypt status if the configuration fails. + */ + private void configureContext(final mongocrypt_ctx_t context, final Supplier successSupplier) { + if (!successSupplier.get()) { + MongoCryptContextImpl.throwExceptionFromStatus(context); + } + } + + /** + * Throws a MongoCryptException based on the current status of the native context. + */ + private void throwExceptionFromStatus() { + mongocrypt_status_t status = mongocrypt_status_new(); + mongocrypt_status(wrapped, status); + MongoCryptException e = new MongoCryptException(mongocrypt_status_message(status, null).toString(), + mongocrypt_status_code(status)); + mongocrypt_status_destroy(status); + throw e; + } + + /** + * Utility method to handle BinaryHolder resource management for ByteBuffer values. + */ + private static void withBinaryHolder(final ByteBuffer value, final Consumer consumer) { + try (BinaryHolder binaryHolder = toBinary(value)) { + consumer.accept(binaryHolder.getBinary()); + } + } + + /** + * Utility method to handle BinaryHolder resource management for BsonDocument values. + */ + private static void withBinaryHolder(final BsonDocument value, final Consumer consumer) { + try (BinaryHolder binaryHolder = toBinary(value)) { + consumer.accept(binaryHolder.getBinary()); + } + } + + /** + * LogCallback bridges native log events to the Java logger. + *

+ * Handles different log levels and forwards messages to the appropriate logger method. + */ + static class LogCallback implements mongocrypt_log_fn_t { + @Override + public void log(final int level, final cstring message, final int messageLength, final Pointer ctx) { + if (level == MONGOCRYPT_LOG_LEVEL_FATAL) { + LOGGER.error(message.toString()); + } + if (level == MONGOCRYPT_LOG_LEVEL_ERROR) { + LOGGER.error(message.toString()); + } + if (level == MONGOCRYPT_LOG_LEVEL_WARNING) { + LOGGER.warn(message.toString()); + } + if (level == MONGOCRYPT_LOG_LEVEL_INFO) { + LOGGER.info(message.toString()); + } + if (level == MONGOCRYPT_LOG_LEVEL_TRACE) { + LOGGER.trace(message.toString()); + } + } + } +} diff --git a/mongodb-crypt/src/main/com/mongodb/internal/crypt/capi/MongoCryptOptions.java b/mongodb-crypt/src/main/com/mongodb/internal/crypt/capi/MongoCryptOptions.java new file mode 100644 index 00000000000..782e278e7c8 --- /dev/null +++ b/mongodb-crypt/src/main/com/mongodb/internal/crypt/capi/MongoCryptOptions.java @@ -0,0 +1,313 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.mongodb.internal.crypt.capi; + +import org.bson.BsonDocument; + +import java.util.List; +import java.util.Map; + +import static java.util.Collections.emptyList; +import static org.bson.assertions.Assertions.isTrue; + +/** + * The options for configuring MongoCrypt. + */ +public final class MongoCryptOptions { + + private final MongoAwsKmsProviderOptions awsKmsProviderOptions; + private final MongoLocalKmsProviderOptions localKmsProviderOptions; + private final BsonDocument kmsProviderOptions; + private final Map localSchemaMap; + private final boolean needsKmsCredentialsStateEnabled; + private final Map encryptedFieldsMap; + private final BsonDocument extraOptions; + private final boolean bypassQueryAnalysis; + private final List searchPaths; + private final Long keyExpirationMS; + + + /** + * Construct a builder for the options + * + * @return the builder + */ + public static Builder builder() { + return new Builder(); + } + + /** + * Gets the AWS KMS provider options. + * + * @return the AWS KMS provider options, which may be null + */ + public MongoAwsKmsProviderOptions getAwsKmsProviderOptions() { + return awsKmsProviderOptions; + } + + /** + * Gets the local KMS provider options. + * + * @return the local KMS provider options, which may be null + */ + public MongoLocalKmsProviderOptions getLocalKmsProviderOptions() { + return localKmsProviderOptions; + } + + /** + * Returns the KMS provider options. + * + * @return the KMS provider options, which may be null + * @since 1.1 + */ + public BsonDocument getKmsProviderOptions() { + return kmsProviderOptions; + } + + /** + * Gets the local schema map. + * + * @return the local schema map + */ + public Map getLocalSchemaMap() { + return localSchemaMap; + } + + /** + * Gets whether the MONGOCRYPT_CTX_NEED_KMS_CREDENTIALS is enabled. Defaults to false + * + * @return whether the MONGOCRYPT_CTX_NEED_KMS_CREDENTIALS is enabled + * @since 1.4 + */ + public boolean isNeedsKmsCredentialsStateEnabled() { + return needsKmsCredentialsStateEnabled; + } + + /** + * Gets the encrypted fields map. + * + * @since 1.5 + * @return the encrypted fields map + */ + public Map getEncryptedFieldsMap() { + return encryptedFieldsMap; + } + + /** + * Gets whether automatic analysis of outgoing commands should be disabled. + * + * @since 1.5 + * @return true if bypassing query analysis + */ + public boolean isBypassQueryAnalysis() { + return bypassQueryAnalysis; + } + + /** + * The extraOptions that relate to the mongocryptd process or shared library. + * @return the extra options + * @since 1.5 + */ + public BsonDocument getExtraOptions() { + return extraOptions; + } + + /** + * Gets the search paths + * @return this + * @since 1.5 + */ + public List getSearchPaths() { + return searchPaths; + } + + /** + * Returns the cache expiration time for data encryption keys. + * + *

Defaults to {@code null} which defers to libmongocrypt's default which is currently {@code 60000 ms}. + * Set to {@code 0} to disable key expiration.

+ * + * @return the cache expiration time or null if not set. + * @since 5.5 + */ + public Long getKeyExpirationMS() { + return keyExpirationMS; + } + + /** + * The builder for the options + */ + public static final class Builder { + private MongoAwsKmsProviderOptions awsKmsProviderOptions; + private MongoLocalKmsProviderOptions localKmsProviderOptions; + private BsonDocument kmsProviderOptions = null; + private Map localSchemaMap = null; + private boolean needsKmsCredentialsStateEnabled; + private Map encryptedFieldsMap = null; + private boolean bypassQueryAnalysis; + private BsonDocument extraOptions = new BsonDocument(); + private List searchPaths = emptyList(); + private Long keyExpirationMS = null; + + private Builder() { + } + + /** + * Sets the AWS KMS provider options. + * + * @param awsKmsProviderOptions the AWS KMS provider options + * @return this + */ + public Builder awsKmsProviderOptions(final MongoAwsKmsProviderOptions awsKmsProviderOptions) { + this.awsKmsProviderOptions = awsKmsProviderOptions; + return this; + } + + /** + * Sets the local KMS provider options. + * + * @param localKmsProviderOptions the local KMS provider options + * @return this + */ + public Builder localKmsProviderOptions(final MongoLocalKmsProviderOptions localKmsProviderOptions) { + this.localKmsProviderOptions = localKmsProviderOptions; + return this; + } + + /** + * Sets the KMS provider options. + * + * @param kmsProviderOptions the KMS provider options document + * @return this + * @since 1.1 + */ + public Builder kmsProviderOptions(final BsonDocument kmsProviderOptions) { + this.kmsProviderOptions = kmsProviderOptions; + return this; + } + + /** + * Sets the local schema map. + * + * @param localSchemaMap local schema map + * @return this + */ + public Builder localSchemaMap(final Map localSchemaMap) { + this.localSchemaMap = localSchemaMap; + return this; + } + + /** + * Sets whether the MONGOCRYPT_CTX_NEED_KMS_CREDENTIALS is enabled. Defaults to false + * + * @param needsKmsCredentialsStateEnabled whether the MONGOCRYPT_CTX_NEED_KMS_CREDENTIALS is enabled + * @return this + * @since 1.4 + */ + public Builder needsKmsCredentialsStateEnabled(final boolean needsKmsCredentialsStateEnabled) { + this.needsKmsCredentialsStateEnabled = needsKmsCredentialsStateEnabled; + return this; + } + + /** + * Sets the encrypted fields map. + * + * @param encryptedFieldsMap the encrypted fields map + * @since 1.5 + * @return this + */ + public Builder encryptedFieldsMap(final Map encryptedFieldsMap) { + this.encryptedFieldsMap = encryptedFieldsMap; + return this; + } + + /** + * Sets whether automatic analysis of outgoing commands should be disabled. + * + *

Set bypassQueryAnalysis to true to use explicit encryption on indexed fields + * without the MongoDB Enterprise Advanced licensed crypt shared library.

+ * + * @param bypassQueryAnalysis whether the analysis of outgoing commands should be disabled. + * @since 1.5 + * @return this + */ + public Builder bypassQueryAnalysis(final boolean bypassQueryAnalysis) { + this.bypassQueryAnalysis = bypassQueryAnalysis; + return this; + } + + /** + * The extraOptions that relate to the mongocryptd process or shared library. + * @param extraOptions the extraOptions + * @return this + * @since 1.5 + */ + public Builder extraOptions(final BsonDocument extraOptions) { + this.extraOptions = extraOptions; + return this; + } + + /** + * Sets search paths + * @param searchPaths sets search path + * @return this + * @since 1.5 + */ + public Builder searchPaths(final List searchPaths) { + this.searchPaths = searchPaths; + return this; + } + + /** + * The cache expiration time for data encryption keys. + * + * @param keyExpirationMS the cache expiration time in milliseconds or null to use libmongocrypt's default. + * @return this + * @since 5.5 + */ + public Builder keyExpirationMS(final Long keyExpirationMS) { + this.keyExpirationMS = keyExpirationMS; + return this; + } + + /** + * Build the options. + * + * @return the options + */ + public MongoCryptOptions build() { + return new MongoCryptOptions(this); + } + } + + private MongoCryptOptions(final Builder builder) { + isTrue("at least one KMS provider is configured", + builder.awsKmsProviderOptions != null || builder.localKmsProviderOptions != null + || builder.kmsProviderOptions != null); + this.awsKmsProviderOptions = builder.awsKmsProviderOptions; + this.localKmsProviderOptions = builder.localKmsProviderOptions; + this.kmsProviderOptions = builder.kmsProviderOptions; + this.localSchemaMap = builder.localSchemaMap; + this.needsKmsCredentialsStateEnabled = builder.needsKmsCredentialsStateEnabled; + this.encryptedFieldsMap = builder.encryptedFieldsMap; + this.bypassQueryAnalysis = builder.bypassQueryAnalysis; + this.extraOptions = builder.extraOptions; + this.searchPaths = builder.searchPaths; + this.keyExpirationMS = builder.keyExpirationMS; + } +} diff --git a/mongodb-crypt/src/main/com/mongodb/internal/crypt/capi/MongoCrypts.java b/mongodb-crypt/src/main/com/mongodb/internal/crypt/capi/MongoCrypts.java new file mode 100644 index 00000000000..58739043627 --- /dev/null +++ b/mongodb-crypt/src/main/com/mongodb/internal/crypt/capi/MongoCrypts.java @@ -0,0 +1,42 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.mongodb.internal.crypt.capi; + +/** + * The entry point to the MongoCrypt library. + */ +public final class MongoCrypts { + + private MongoCrypts() { + //NOP + } + + /** + * Create a {@code MongoCrypt} instance. + * + *

+ * Make sure that JNA is able to find the shared library, most likely by setting the jna.library.path system property + *

+ * + * @param options the options + * @return the instance + */ + public static MongoCrypt create(final MongoCryptOptions options) { + return new MongoCryptImpl(options); + } +} diff --git a/mongodb-crypt/src/main/com/mongodb/internal/crypt/capi/MongoDataKeyOptions.java b/mongodb-crypt/src/main/com/mongodb/internal/crypt/capi/MongoDataKeyOptions.java new file mode 100644 index 00000000000..6ec24954475 --- /dev/null +++ b/mongodb-crypt/src/main/com/mongodb/internal/crypt/capi/MongoDataKeyOptions.java @@ -0,0 +1,125 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.mongodb.internal.crypt.capi; + +import org.bson.BsonDocument; + +import java.util.List; + +/** + * The options for creation of a data key + */ +public final class MongoDataKeyOptions { + private final List keyAltNames; + private final BsonDocument masterKey; + private final byte[] keyMaterial; + + /** + * Options builder + */ + public static final class Builder { + private List keyAltNames; + private BsonDocument masterKey; + private byte[] keyMaterial; + + /** + * Add alternate key names + * @param keyAltNames the alternate key names + * @return this + */ + public Builder keyAltNames(final List keyAltNames) { + this.keyAltNames = keyAltNames; + return this; + } + + /** + * Add the master key. + * + * @param masterKey the master key + * @return this + */ + public Builder masterKey(final BsonDocument masterKey) { + this.masterKey = masterKey; + return this; + } + + /** + * Add the key material + * + * @param keyMaterial the optional custom key material for the data key + * @return this + * @since 1.5 + */ + public Builder keyMaterial(final byte[] keyMaterial) { + this.keyMaterial = keyMaterial; + return this; + } + + /** + * Build the options. + * + * @return the options + */ + public MongoDataKeyOptions build() { + return new MongoDataKeyOptions(this); + } + } + + /** + * Create a builder for the options. + * + * @return the builder + */ + public static Builder builder() { + return new Builder(); + } + + /** + * Gets the alternate key names for the data key. + * + * @return the alternate key names + */ + public List getKeyAltNames() { + return keyAltNames; + } + + /** + * Gets the master key for the data key. + * + * @return the master key + */ + public BsonDocument getMasterKey() { + return masterKey; + } + + /** + * Gets the custom key material if set. + * + * @return the custom key material for the data key or null + * @since 1.5 + */ + public byte[] getKeyMaterial() { + return keyMaterial; + } + + private MongoDataKeyOptions(final Builder builder) { + keyAltNames = builder.keyAltNames; + masterKey = builder.masterKey; + keyMaterial = builder.keyMaterial; + } +} diff --git a/mongodb-crypt/src/main/com/mongodb/internal/crypt/capi/MongoExplicitEncryptOptions.java b/mongodb-crypt/src/main/com/mongodb/internal/crypt/capi/MongoExplicitEncryptOptions.java new file mode 100644 index 00000000000..c08608ca595 --- /dev/null +++ b/mongodb-crypt/src/main/com/mongodb/internal/crypt/capi/MongoExplicitEncryptOptions.java @@ -0,0 +1,243 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.mongodb.internal.crypt.capi; + +import org.bson.BsonBinary; +import org.bson.BsonDocument; + +/** + * Options for explicit encryption. + */ +public final class MongoExplicitEncryptOptions { + private final BsonBinary keyId; + private final String keyAltName; + private final String algorithm; + private final Long contentionFactor; + private final String queryType; + private final BsonDocument rangeOptions; + private final BsonDocument textOptions; + + /** + * The builder for the options + */ + public static final class Builder { + private BsonBinary keyId; + private String keyAltName; + private String algorithm; + private Long contentionFactor; + private String queryType; + private BsonDocument rangeOptions; + private BsonDocument textOptions; + + private Builder() { + } + + /** + * Add the key identifier. + * + * @param keyId the key idenfifier + * @return this + */ + public Builder keyId(final BsonBinary keyId) { + this.keyId = keyId; + return this; + } + + /** + * Add the key alternative name. + * + * @param keyAltName the key alternative name + * @return this + */ + public Builder keyAltName(final String keyAltName) { + this.keyAltName = keyAltName; + return this; + } + + /** + * Add the encryption algorithm. + * + *

To insert or query with an "Indexed" encrypted payload, use a MongoClient configured with {@code AutoEncryptionSettings}. + * {@code AutoEncryptionSettings.bypassQueryAnalysis} may be true. + * {@code AutoEncryptionSettings.bypassAutoEncryption must be false}.

+ * + * @param algorithm the encryption algorithm + * @return this + */ + public Builder algorithm(final String algorithm) { + this.algorithm = algorithm; + return this; + } + + /** + * The contention factor. + * + *

Only applies when algorithm is "Indexed", "Range", or "TextPreview".

+ * @param contentionFactor the contention factor + * @return this + * @since 1.5 + */ + public Builder contentionFactor(final Long contentionFactor) { + this.contentionFactor = contentionFactor; + return this; + } + + /** + * The QueryType. + * + *

Only applies when algorithm is "Indexed", "Range", or "TextPreview".

+ * + * @param queryType the query type + * @return this + * @since 1.5 + */ + public Builder queryType(final String queryType) { + this.queryType = queryType; + return this; + } + + /** + * The Range Options. + * + *

Only applies when algorithm is "Range".

+ * + * @param rangeOptions the range options + * @return this + * @since 1.7 + */ + public Builder rangeOptions(final BsonDocument rangeOptions) { + this.rangeOptions = rangeOptions; + return this; + } + + /** + * The Text Options. + * + *

Only applies when algorithm is "TextPreview".

+ * + * @param textOptions the text options + * @return this + * @since 5.6 + */ + public Builder textOptions(final BsonDocument textOptions) { + this.textOptions = textOptions; + return this; + } + + /** + * Build the options. + * + * @return the options + */ + public MongoExplicitEncryptOptions build() { + return new MongoExplicitEncryptOptions(this); + } + } + + /** + * Create a builder for the options. + * + * @return the builder + */ + public static Builder builder() { + return new Builder(); + } + + /** + * Gets the key identifier + * @return the key identifier + */ + public BsonBinary getKeyId() { + return keyId; + } + + /** + * Gets the key alternative name + * @return the key alternative name + */ + public String getKeyAltName() { + return keyAltName; + } + + /** + * Gets the encryption algorithm + * @return the encryption algorithm + */ + public String getAlgorithm() { + return algorithm; + } + + /** + * Gets the contention factor + * @return the contention factor + * @since 1.5 + */ + public Long getContentionFactor() { + return contentionFactor; + } + + /** + * Gets the query type + * @return the query type + * @since 1.5 + */ + public String getQueryType() { + return queryType; + } + + /** + * Gets the range options + * @return the range options + * @since 1.7 + */ + public BsonDocument getRangeOptions() { + return rangeOptions; + } + + /** + * Gets the text options + * @return the text options + * @since 5.6 + */ + public BsonDocument getTextOptions() { + return textOptions; + } + + private MongoExplicitEncryptOptions(final Builder builder) { + this.keyId = builder.keyId; + this.keyAltName = builder.keyAltName; + this.algorithm = builder.algorithm; + this.contentionFactor = builder.contentionFactor; + this.queryType = builder.queryType; + this.rangeOptions = builder.rangeOptions; + this.textOptions = builder.textOptions; + } + + @Override + public String toString() { + return "MongoExplicitEncryptOptions{" + + "keyId=" + keyId + + ", keyAltName='" + keyAltName + '\'' + + ", algorithm='" + algorithm + '\'' + + ", contentionFactor=" + contentionFactor + + ", queryType='" + queryType + '\'' + + ", rangeOptions=" + rangeOptions + + ", textOptions=" + textOptions + + '}'; + } +} diff --git a/mongodb-crypt/src/main/com/mongodb/internal/crypt/capi/MongoKeyDecryptor.java b/mongodb-crypt/src/main/com/mongodb/internal/crypt/capi/MongoKeyDecryptor.java new file mode 100644 index 00000000000..9b0eae6776f --- /dev/null +++ b/mongodb-crypt/src/main/com/mongodb/internal/crypt/capi/MongoKeyDecryptor.java @@ -0,0 +1,76 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.mongodb.internal.crypt.capi; + +import java.nio.ByteBuffer; + +/** + * An interface representing a key decryption operation using a key management service. + */ +public interface MongoKeyDecryptor { + + /** + * Gets the name of the KMS provider, e.g. "aws" or "kmip" + * + * @return the KMS provider name + */ + String getKmsProvider(); + + /** + * Gets the host name of the key management service. + * + * @return the host name + */ + String getHostName(); + + /** + * Gets the message to send to the key management service. + * + *

+ * Clients should call this method first, and send the message on a TLS connection to a configured KMS server. + *

+ * + * @return the message to send + */ + ByteBuffer getMessage(); + + /** + * Gets the number of bytes that should be received from the KMS server. + * + *

+ * After sending the message to the KMS server, clients should call this method in a loop, receiving {@code bytesNeeded} from + * the KMS server and feeding those bytes to this decryptor, until {@code bytesNeeded} is 0. + *

+ * + * @return the actual number of bytes that clients should be prepared receive + */ + int bytesNeeded(); + + /** + * Feed the received bytes to the decryptor. + * + *

+ * After sending the message to the KMS server, clients should call this method in a loop, receiving the number of bytes indicated by + * a call to {@link #bytesNeeded()} from the KMS server and feeding those bytes to this decryptor, until {@link #bytesNeeded()} + * returns 0. + *

+ * + * @param bytes the received bytes + */ + void feed(ByteBuffer bytes); +} diff --git a/mongodb-crypt/src/main/com/mongodb/internal/crypt/capi/MongoKeyDecryptorImpl.java b/mongodb-crypt/src/main/com/mongodb/internal/crypt/capi/MongoKeyDecryptorImpl.java new file mode 100644 index 00000000000..1411adffc21 --- /dev/null +++ b/mongodb-crypt/src/main/com/mongodb/internal/crypt/capi/MongoKeyDecryptorImpl.java @@ -0,0 +1,108 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.mongodb.internal.crypt.capi; + +import com.mongodb.crypt.capi.MongoCryptException; +import com.mongodb.internal.crypt.capi.CAPI.mongocrypt_binary_t; +import com.mongodb.internal.crypt.capi.CAPI.mongocrypt_kms_ctx_t; +import com.mongodb.internal.crypt.capi.CAPI.mongocrypt_status_t; +import com.sun.jna.Pointer; +import com.sun.jna.ptr.PointerByReference; + +import java.nio.ByteBuffer; + +import static com.mongodb.internal.crypt.capi.CAPI.mongocrypt_binary_destroy; +import static com.mongodb.internal.crypt.capi.CAPI.mongocrypt_binary_new; +import static com.mongodb.internal.crypt.capi.CAPI.mongocrypt_kms_ctx_bytes_needed; +import static com.mongodb.internal.crypt.capi.CAPI.mongocrypt_kms_ctx_endpoint; +import static com.mongodb.internal.crypt.capi.CAPI.mongocrypt_kms_ctx_feed; +import static com.mongodb.internal.crypt.capi.CAPI.mongocrypt_kms_ctx_get_kms_provider; +import static com.mongodb.internal.crypt.capi.CAPI.mongocrypt_kms_ctx_message; +import static com.mongodb.internal.crypt.capi.CAPI.mongocrypt_kms_ctx_status; +import static com.mongodb.internal.crypt.capi.CAPI.mongocrypt_status_code; +import static com.mongodb.internal.crypt.capi.CAPI.mongocrypt_status_destroy; +import static com.mongodb.internal.crypt.capi.CAPI.mongocrypt_status_message; +import static com.mongodb.internal.crypt.capi.CAPI.mongocrypt_status_new; +import static com.mongodb.internal.crypt.capi.CAPIHelper.toBinary; +import static com.mongodb.internal.crypt.capi.CAPIHelper.toByteBuffer; +import static org.bson.assertions.Assertions.notNull; + +class MongoKeyDecryptorImpl implements MongoKeyDecryptor { + private final mongocrypt_kms_ctx_t wrapped; + + MongoKeyDecryptorImpl(final mongocrypt_kms_ctx_t wrapped) { + notNull("wrapped", wrapped); + this.wrapped = wrapped; + } + + @Override + public String getKmsProvider() { + return mongocrypt_kms_ctx_get_kms_provider(wrapped, null).toString(); + } + + @Override + public String getHostName() { + PointerByReference hostNamePointerByReference = new PointerByReference(); + boolean success = mongocrypt_kms_ctx_endpoint(wrapped, hostNamePointerByReference); + if (!success) { + throwExceptionFromStatus(); + } + Pointer hostNamePointer = hostNamePointerByReference.getValue(); + return hostNamePointer.getString(0); + } + + @Override + public ByteBuffer getMessage() { + mongocrypt_binary_t binary = mongocrypt_binary_new(); + + try { + boolean success = mongocrypt_kms_ctx_message(wrapped, binary); + if (!success) { + throwExceptionFromStatus(); + } + return toByteBuffer(binary); + } finally { + mongocrypt_binary_destroy(binary); + } + } + + @Override + public int bytesNeeded() { + return mongocrypt_kms_ctx_bytes_needed(wrapped); + } + + @Override + public void feed(final ByteBuffer bytes) { + try (BinaryHolder binaryHolder = toBinary(bytes)) { + boolean success = mongocrypt_kms_ctx_feed(wrapped, binaryHolder.getBinary()); + if (!success) { + throwExceptionFromStatus(); + } + } + } + + private void throwExceptionFromStatus() { + mongocrypt_status_t status = mongocrypt_status_new(); + mongocrypt_kms_ctx_status(wrapped, status); + MongoCryptException e = new MongoCryptException(mongocrypt_status_message(status, null).toString(), + mongocrypt_status_code(status)); + mongocrypt_status_destroy(status); + throw e; + } + +} diff --git a/mongodb-crypt/src/main/com/mongodb/internal/crypt/capi/MongoLocalKmsProviderOptions.java b/mongodb-crypt/src/main/com/mongodb/internal/crypt/capi/MongoLocalKmsProviderOptions.java new file mode 100644 index 00000000000..d2a975b8fae --- /dev/null +++ b/mongodb-crypt/src/main/com/mongodb/internal/crypt/capi/MongoLocalKmsProviderOptions.java @@ -0,0 +1,83 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.mongodb.internal.crypt.capi; + +import java.nio.ByteBuffer; + +import static org.bson.assertions.Assertions.notNull; + +/** + * The options for configuring a local KMS provider. + */ +public final class MongoLocalKmsProviderOptions { + + private final ByteBuffer localMasterKey; + + /** + * Construct a builder for the options + * + * @return the builder + */ + public static Builder builder() { + return new Builder(); + } + + /** + * Gets the local master key + * + * @return the local master key + */ + public ByteBuffer getLocalMasterKey() { + return localMasterKey; + } + + /** + * The builder for the options + */ + public static final class Builder { + private ByteBuffer localMasterKey; + + private Builder() { + } + + /** + * Sets the local master key. + * + * @param localMasterKey the local master key + * @return this + */ + public Builder localMasterKey(final ByteBuffer localMasterKey) { + this.localMasterKey = localMasterKey; + return this; + } + + /** + * Build the options. + * + * @return the options + */ + public MongoLocalKmsProviderOptions build() { + return new MongoLocalKmsProviderOptions(this); + } + } + + private MongoLocalKmsProviderOptions(final Builder builder) { + this.localMasterKey = notNull("Local KMS provider localMasterKey", builder.localMasterKey); + + } +} diff --git a/mongodb-crypt/src/main/com/mongodb/internal/crypt/capi/MongoRewrapManyDataKeyOptions.java b/mongodb-crypt/src/main/com/mongodb/internal/crypt/capi/MongoRewrapManyDataKeyOptions.java new file mode 100644 index 00000000000..84c5031d635 --- /dev/null +++ b/mongodb-crypt/src/main/com/mongodb/internal/crypt/capi/MongoRewrapManyDataKeyOptions.java @@ -0,0 +1,104 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.internal.crypt.capi; + +import org.bson.BsonDocument; + +/** + * The rewrap many data key options + * + *

+ * The masterKey document MUST have the fields corresponding to the given provider as specified in masterKey. + *

+ * + * @since 1.5 + */ +public final class MongoRewrapManyDataKeyOptions { + + private final String provider; + private final BsonDocument masterKey; + + /** + * Options builder + */ + public static final class Builder { + private String provider; + private BsonDocument masterKey; + + /** + * The provider + * + * @param provider the provider + * @return this + */ + public Builder provider(final String provider) { + this.provider = provider; + return this; + } + + /** + * Add the master key. + * + * @param masterKey the master key + * @return this + */ + public Builder masterKey(final BsonDocument masterKey) { + this.masterKey = masterKey; + return this; + } + + /** + * Build the options. + * + * @return the options + */ + public MongoRewrapManyDataKeyOptions build() { + return new MongoRewrapManyDataKeyOptions(this); + } + } + + /** + * Create a builder for the options. + * + * @return the builder + */ + public static Builder builder() { + return new Builder(); + } + + /** + * @return the provider name + */ + public String getProvider() { + return provider; + } + + /** + * Gets the master key for the data key. + * + * @return the master key + */ + public BsonDocument getMasterKey() { + return masterKey; + } + + private MongoRewrapManyDataKeyOptions(final Builder builder) { + provider = builder.provider; + masterKey = builder.masterKey; + } +} + diff --git a/mongodb-crypt/src/main/com/mongodb/internal/crypt/capi/SLF4JLogger.java b/mongodb-crypt/src/main/com/mongodb/internal/crypt/capi/SLF4JLogger.java new file mode 100644 index 00000000000..2ed00d74562 --- /dev/null +++ b/mongodb-crypt/src/main/com/mongodb/internal/crypt/capi/SLF4JLogger.java @@ -0,0 +1,110 @@ + +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.mongodb.internal.crypt.capi; + +import org.slf4j.LoggerFactory; + +class SLF4JLogger implements Logger { + + private final org.slf4j.Logger delegate; + + SLF4JLogger(final String name) { + this.delegate = LoggerFactory.getLogger(name); + } + + @Override + public String getName() { + return delegate.getName(); + } + + @Override + public boolean isTraceEnabled() { + return delegate.isTraceEnabled(); + } + + @Override + public void trace(final String msg) { + delegate.trace(msg); + } + + @Override + public void trace(final String msg, final Throwable t) { + delegate.trace(msg, t); + } + + @Override + public boolean isDebugEnabled() { + return delegate.isDebugEnabled(); + } + + @Override + public void debug(final String msg) { + delegate.debug(msg); + } + + @Override + public void debug(final String msg, final Throwable t) { + delegate.debug(msg, t); + } + + @Override + public boolean isInfoEnabled() { + return delegate.isInfoEnabled(); + } + + @Override + public void info(final String msg) { + delegate.info(msg); + } + + @Override + public void info(final String msg, final Throwable t) { + delegate.info(msg, t); + } + + @Override + public boolean isWarnEnabled() { + return delegate.isWarnEnabled(); + } + + @Override + public void warn(final String msg) { + delegate.warn(msg); + } + + @Override + public void warn(final String msg, final Throwable t) { + delegate.warn(msg, t); + } + + @Override + public boolean isErrorEnabled() { + return delegate.isErrorEnabled(); + } + + @Override + public void error(final String msg) { + delegate.error(msg); + } + + @Override + public void error(final String msg, final Throwable t) { + delegate.error(msg, t); + } +} diff --git a/mongodb-crypt/src/main/com/mongodb/internal/crypt/capi/SecureRandomCallback.java b/mongodb-crypt/src/main/com/mongodb/internal/crypt/capi/SecureRandomCallback.java new file mode 100644 index 00000000000..215f453f923 --- /dev/null +++ b/mongodb-crypt/src/main/com/mongodb/internal/crypt/capi/SecureRandomCallback.java @@ -0,0 +1,51 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.mongodb.internal.crypt.capi; + +import com.mongodb.internal.crypt.capi.CAPI.cstring; +import com.mongodb.internal.crypt.capi.CAPI.mongocrypt_binary_t; +import com.mongodb.internal.crypt.capi.CAPI.mongocrypt_random_fn; +import com.mongodb.internal.crypt.capi.CAPI.mongocrypt_status_t; +import com.sun.jna.Pointer; + +import java.security.SecureRandom; + +import static com.mongodb.internal.crypt.capi.CAPI.MONGOCRYPT_STATUS_ERROR_CLIENT; +import static com.mongodb.internal.crypt.capi.CAPI.mongocrypt_status_set; +import static com.mongodb.internal.crypt.capi.CAPIHelper.writeByteArrayToBinary; + +class SecureRandomCallback implements mongocrypt_random_fn { + private final SecureRandom secureRandom; + + SecureRandomCallback(final SecureRandom secureRandom) { + this.secureRandom = secureRandom; + } + + @Override + public boolean random(final Pointer ctx, final mongocrypt_binary_t out, final int count, final mongocrypt_status_t status) { + try { + byte[] randomBytes = new byte[count]; + secureRandom.nextBytes(randomBytes); + writeByteArrayToBinary(out, randomBytes); + return true; + } catch (Exception e) { + mongocrypt_status_set(status, MONGOCRYPT_STATUS_ERROR_CLIENT, 0, new cstring(e.toString()), -1); + return false; + } + } +} diff --git a/mongodb-crypt/src/main/com/mongodb/internal/crypt/capi/SigningRSAESPKCSCallback.java b/mongodb-crypt/src/main/com/mongodb/internal/crypt/capi/SigningRSAESPKCSCallback.java new file mode 100644 index 00000000000..12717a466c9 --- /dev/null +++ b/mongodb-crypt/src/main/com/mongodb/internal/crypt/capi/SigningRSAESPKCSCallback.java @@ -0,0 +1,73 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.mongodb.internal.crypt.capi; + +import com.mongodb.internal.crypt.capi.CAPI.cstring; +import com.mongodb.internal.crypt.capi.CAPI.mongocrypt_binary_t; +import com.mongodb.internal.crypt.capi.CAPI.mongocrypt_hmac_fn; +import com.mongodb.internal.crypt.capi.CAPI.mongocrypt_status_t; +import com.sun.jna.Pointer; + +import java.security.InvalidKeyException; +import java.security.KeyFactory; +import java.security.NoSuchAlgorithmException; +import java.security.PrivateKey; +import java.security.Signature; +import java.security.SignatureException; +import java.security.spec.InvalidKeySpecException; +import java.security.spec.KeySpec; +import java.security.spec.PKCS8EncodedKeySpec; + +import static com.mongodb.internal.crypt.capi.CAPI.MONGOCRYPT_STATUS_ERROR_CLIENT; +import static com.mongodb.internal.crypt.capi.CAPI.mongocrypt_status_set; +import static com.mongodb.internal.crypt.capi.CAPIHelper.toByteArray; +import static com.mongodb.internal.crypt.capi.CAPIHelper.writeByteArrayToBinary; + +class SigningRSAESPKCSCallback implements mongocrypt_hmac_fn { + + private static final String KEY_ALGORITHM = "RSA"; + private static final String SIGN_ALGORITHM = "SHA256withRSA"; + + SigningRSAESPKCSCallback() { + } + + @Override + public boolean hmac(final Pointer ctx, final mongocrypt_binary_t key, final mongocrypt_binary_t in, + final mongocrypt_binary_t out, final mongocrypt_status_t status) { + try { + byte[] result = getSignature(toByteArray(key), toByteArray(in)); + writeByteArrayToBinary(out, result); + return true; + } catch (Exception e) { + mongocrypt_status_set(status, MONGOCRYPT_STATUS_ERROR_CLIENT, 0, new cstring(e.toString()), -1); + return false; + } + } + + static byte[] getSignature(final byte[] privateKeyBytes, final byte[] dataToSign) throws NoSuchAlgorithmException, InvalidKeySpecException, InvalidKeyException, SignatureException { + KeySpec keySpec = new PKCS8EncodedKeySpec(privateKeyBytes); + KeyFactory keyFactory = KeyFactory.getInstance(KEY_ALGORITHM); + PrivateKey privateKey = keyFactory.generatePrivate(keySpec); + + Signature privateSignature = Signature.getInstance(SIGN_ALGORITHM); + privateSignature.initSign(privateKey); + privateSignature.update(dataToSign); + + return privateSignature.sign(); + } +} diff --git a/mongodb-crypt/src/main/com/mongodb/internal/crypt/capi/package-info.java b/mongodb-crypt/src/main/com/mongodb/internal/crypt/capi/package-info.java new file mode 100644 index 00000000000..5789855267d --- /dev/null +++ b/mongodb-crypt/src/main/com/mongodb/internal/crypt/capi/package-info.java @@ -0,0 +1,21 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +/** + * The mongocrypt internal package + */ +package com.mongodb.internal.crypt.capi; diff --git a/mongodb-crypt/src/main/resources/META-INF/native-image/jni-config.json b/mongodb-crypt/src/main/resources/META-INF/native-image/jni-config.json new file mode 100644 index 00000000000..62ca1f8abae --- /dev/null +++ b/mongodb-crypt/src/main/resources/META-INF/native-image/jni-config.json @@ -0,0 +1,180 @@ +[ +{ + "name":"com.mongodb.internal.crypt.capi.CAPI$mongocrypt_crypto_fn", + "methods":[{"name":"crypt","parameterTypes":["com.sun.jna.Pointer","com.mongodb.internal.crypt.capi.CAPI$mongocrypt_binary_t","com.mongodb.internal.crypt.capi.CAPI$mongocrypt_binary_t","com.mongodb.internal.crypt.capi.CAPI$mongocrypt_binary_t","com.mongodb.internal.crypt.capi.CAPI$mongocrypt_binary_t","com.sun.jna.Pointer","com.mongodb.internal.crypt.capi.CAPI$mongocrypt_status_t"] }] +}, +{ + "name":"com.mongodb.internal.crypt.capi.CAPI$mongocrypt_hash_fn", + "methods":[{"name":"hash","parameterTypes":["com.sun.jna.Pointer","com.mongodb.internal.crypt.capi.CAPI$mongocrypt_binary_t","com.mongodb.internal.crypt.capi.CAPI$mongocrypt_binary_t","com.mongodb.internal.crypt.capi.CAPI$mongocrypt_status_t"] }] +}, +{ + "name":"com.mongodb.internal.crypt.capi.CAPI$mongocrypt_hmac_fn", + "methods":[{"name":"hmac","parameterTypes":["com.sun.jna.Pointer","com.mongodb.internal.crypt.capi.CAPI$mongocrypt_binary_t","com.mongodb.internal.crypt.capi.CAPI$mongocrypt_binary_t","com.mongodb.internal.crypt.capi.CAPI$mongocrypt_binary_t","com.mongodb.internal.crypt.capi.CAPI$mongocrypt_status_t"] }] +}, +{ + "name":"com.mongodb.internal.crypt.capi.CAPI$mongocrypt_log_fn_t", + "methods":[{"name":"log","parameterTypes":["int","com.mongodb.internal.crypt.capi.CAPI$cstring","int","com.sun.jna.Pointer"] }] +}, +{ + "name":"com.mongodb.internal.crypt.capi.CAPI$mongocrypt_random_fn", + "methods":[{"name":"random","parameterTypes":["com.sun.jna.Pointer","com.mongodb.internal.crypt.capi.CAPI$mongocrypt_binary_t","int","com.mongodb.internal.crypt.capi.CAPI$mongocrypt_status_t"] }] +}, +{ + "name":"com.sun.jna.Callback" +}, +{ + "name":"com.sun.jna.CallbackReference", + "methods":[{"name":"getCallback","parameterTypes":["java.lang.Class","com.sun.jna.Pointer","boolean"] }, {"name":"getFunctionPointer","parameterTypes":["com.sun.jna.Callback","boolean"] }, {"name":"getNativeString","parameterTypes":["java.lang.Object","boolean"] }, {"name":"initializeThread","parameterTypes":["com.sun.jna.Callback","com.sun.jna.CallbackReference$AttachOptions"] }] +}, +{ + "name":"com.sun.jna.CallbackReference$AttachOptions" +}, +{ + "name":"com.sun.jna.FromNativeConverter", + "methods":[{"name":"nativeType","parameterTypes":[] }] +}, +{ + "name":"com.sun.jna.IntegerType", + "fields":[{"name":"value"}] +}, +{ + "name":"com.sun.jna.JNIEnv" +}, +{ + "name":"com.sun.jna.Native", + "methods":[{"name":"dispose","parameterTypes":[] }, {"name":"fromNative","parameterTypes":["com.sun.jna.FromNativeConverter","java.lang.Object","java.lang.reflect.Method"] }, {"name":"fromNative","parameterTypes":["java.lang.Class","java.lang.Object"] }, {"name":"fromNative","parameterTypes":["java.lang.reflect.Method","java.lang.Object"] }, {"name":"nativeType","parameterTypes":["java.lang.Class"] }, {"name":"toNative","parameterTypes":["com.sun.jna.ToNativeConverter","java.lang.Object"] }] +}, +{ + "name":"com.sun.jna.Native$ffi_callback", + "methods":[{"name":"invoke","parameterTypes":["long","long","long"] }] +}, +{ + "name":"com.sun.jna.NativeMapped", + "methods":[{"name":"toNative","parameterTypes":[] }] +}, +{ + "name":"com.sun.jna.Pointer", + "fields":[{"name":"peer"}], + "methods":[{"name":"","parameterTypes":["long"] }] +}, +{ + "name":"com.sun.jna.PointerType", + "fields":[{"name":"pointer"}] +}, +{ + "name":"com.sun.jna.Structure", + "fields":[{"name":"memory"}, {"name":"typeInfo"}], + "methods":[{"name":"autoRead","parameterTypes":[] }, {"name":"autoWrite","parameterTypes":[] }, {"name":"getTypeInfo","parameterTypes":[] }, {"name":"newInstance","parameterTypes":["java.lang.Class","long"] }] +}, +{ + "name":"com.sun.jna.Structure$ByValue" +}, +{ + "name":"com.sun.jna.Structure$FFIType$FFITypes", + "fields":[{"name":"ffi_type_double"}, {"name":"ffi_type_float"}, {"name":"ffi_type_longdouble"}, {"name":"ffi_type_pointer"}, {"name":"ffi_type_sint16"}, {"name":"ffi_type_sint32"}, {"name":"ffi_type_sint64"}, {"name":"ffi_type_sint8"}, {"name":"ffi_type_uint16"}, {"name":"ffi_type_uint32"}, {"name":"ffi_type_uint64"}, {"name":"ffi_type_uint8"}, {"name":"ffi_type_void"}] +}, +{ + "name":"com.sun.jna.WString", + "methods":[{"name":"","parameterTypes":["java.lang.String"] }] +}, +{ + "name":"java.lang.Boolean", + "fields":[{"name":"TYPE"}, {"name":"value"}], + "methods":[{"name":"","parameterTypes":["boolean"] }, {"name":"getBoolean","parameterTypes":["java.lang.String"] }] +}, +{ + "name":"java.lang.Byte", + "fields":[{"name":"TYPE"}, {"name":"value"}], + "methods":[{"name":"","parameterTypes":["byte"] }] +}, +{ + "name":"java.lang.Character", + "fields":[{"name":"TYPE"}, {"name":"value"}], + "methods":[{"name":"","parameterTypes":["char"] }] +}, +{ + "name":"java.lang.Class", + "methods":[{"name":"getComponentType","parameterTypes":[] }] +}, +{ + "name":"java.lang.Double", + "fields":[{"name":"TYPE"}, {"name":"value"}], + "methods":[{"name":"","parameterTypes":["double"] }] +}, +{ + "name":"java.lang.Float", + "fields":[{"name":"TYPE"}, {"name":"value"}], + "methods":[{"name":"","parameterTypes":["float"] }] +}, +{ + "name":"java.lang.Integer", + "fields":[{"name":"TYPE"}, {"name":"value"}], + "methods":[{"name":"","parameterTypes":["int"] }] +}, +{ + "name":"java.lang.Long", + "fields":[{"name":"TYPE"}, {"name":"value"}], + "methods":[{"name":"","parameterTypes":["long"] }] +}, +{ + "name":"java.lang.Object", + "methods":[{"name":"toString","parameterTypes":[] }] +}, +{ + "name":"java.lang.Short", + "fields":[{"name":"TYPE"}, {"name":"value"}], + "methods":[{"name":"","parameterTypes":["short"] }] +}, +{ + "name":"java.lang.String", + "methods":[{"name":"","parameterTypes":["byte[]"] }, {"name":"","parameterTypes":["byte[]","java.lang.String"] }, {"name":"getBytes","parameterTypes":[] }, {"name":"getBytes","parameterTypes":["java.lang.String"] }, {"name":"lastIndexOf","parameterTypes":["int"] }, {"name":"substring","parameterTypes":["int"] }, {"name":"toCharArray","parameterTypes":[] }] +}, +{ + "name":"java.lang.System", + "methods":[{"name":"getProperty","parameterTypes":["java.lang.String"] }, {"name":"setProperty","parameterTypes":["java.lang.String","java.lang.String"] }] +}, +{ + "name":"java.lang.UnsatisfiedLinkError", + "methods":[{"name":"","parameterTypes":["java.lang.String"] }] +}, +{ + "name":"java.lang.Void", + "fields":[{"name":"TYPE"}] +}, +{ + "name":"java.lang.reflect.Method", + "methods":[{"name":"getParameterTypes","parameterTypes":[] }, {"name":"getReturnType","parameterTypes":[] }] +}, +{ + "name":"java.nio.Buffer", + "methods":[{"name":"position","parameterTypes":[] }] +}, +{ + "name":"java.nio.ByteBuffer", + "methods":[{"name":"array","parameterTypes":[] }, {"name":"arrayOffset","parameterTypes":[] }] +}, +{ + "name":"java.nio.CharBuffer", + "methods":[{"name":"array","parameterTypes":[] }, {"name":"arrayOffset","parameterTypes":[] }] +}, +{ + "name":"java.nio.DoubleBuffer", + "methods":[{"name":"array","parameterTypes":[] }, {"name":"arrayOffset","parameterTypes":[] }] +}, +{ + "name":"java.nio.FloatBuffer", + "methods":[{"name":"array","parameterTypes":[] }, {"name":"arrayOffset","parameterTypes":[] }] +}, +{ + "name":"java.nio.IntBuffer", + "methods":[{"name":"array","parameterTypes":[] }, {"name":"arrayOffset","parameterTypes":[] }] +}, +{ + "name":"java.nio.LongBuffer", + "methods":[{"name":"array","parameterTypes":[] }, {"name":"arrayOffset","parameterTypes":[] }] +}, +{ + "name":"java.nio.ShortBuffer", + "methods":[{"name":"array","parameterTypes":[] }, {"name":"arrayOffset","parameterTypes":[] }] +} +] diff --git a/mongodb-crypt/src/main/resources/META-INF/native-image/native-image.properties b/mongodb-crypt/src/main/resources/META-INF/native-image/native-image.properties new file mode 100644 index 00000000000..731d0a1d602 --- /dev/null +++ b/mongodb-crypt/src/main/resources/META-INF/native-image/native-image.properties @@ -0,0 +1,20 @@ +# +# Copyright 2008-present MongoDB, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +Args =\ + --initialize-at-run-time=\ + com.mongodb.internal.crypt.capi.CAPI,\ + com.sun.jna.Native,\ + com.sun.jna.Structure diff --git a/mongodb-crypt/src/main/resources/META-INF/native-image/reflect-config.json b/mongodb-crypt/src/main/resources/META-INF/native-image/reflect-config.json new file mode 100644 index 00000000000..c5ca33e6413 --- /dev/null +++ b/mongodb-crypt/src/main/resources/META-INF/native-image/reflect-config.json @@ -0,0 +1,134 @@ +[ +{ + "name":"com.mongodb.internal.crypt.capi.CAPI", + "allPublicFields":true, + "queryAllDeclaredMethods":true +}, +{ + "name":"com.mongodb.internal.crypt.capi.CAPI$cstring", + "methods":[{"name":"","parameterTypes":[] }] +}, +{ + "name":"com.mongodb.internal.crypt.capi.CAPI$mongocrypt_binary_t", + "methods":[{"name":"","parameterTypes":[] }] +}, +{ + "name":"com.mongodb.internal.crypt.capi.CAPI$mongocrypt_crypto_fn", + "queryAllDeclaredMethods":true, + "queryAllPublicMethods":true +}, +{ + "name":"com.mongodb.internal.crypt.capi.CAPI$mongocrypt_ctx_t", + "methods":[{"name":"","parameterTypes":[] }] +}, +{ + "name":"com.mongodb.internal.crypt.capi.CAPI$mongocrypt_hash_fn", + "queryAllDeclaredMethods":true, + "queryAllPublicMethods":true +}, +{ + "name":"com.mongodb.internal.crypt.capi.CAPI$mongocrypt_hmac_fn", + "queryAllDeclaredMethods":true, + "queryAllPublicMethods":true +}, +{ + "name":"com.mongodb.internal.crypt.capi.CAPI$mongocrypt_kms_ctx_t", + "methods":[{"name":"","parameterTypes":[] }] +}, +{ + "name":"com.mongodb.internal.crypt.capi.CAPI$mongocrypt_log_fn_t", + "queryAllDeclaredMethods":true, + "queryAllPublicMethods":true +}, +{ + "name":"com.mongodb.internal.crypt.capi.CAPI$mongocrypt_random_fn", + "queryAllDeclaredMethods":true, + "queryAllPublicMethods":true +}, +{ + "name":"com.mongodb.internal.crypt.capi.CAPI$mongocrypt_status_t", + "methods":[{"name":"","parameterTypes":[] }] +}, +{ + "name":"com.mongodb.internal.crypt.capi.CAPI$mongocrypt_t", + "methods":[{"name":"","parameterTypes":[] }] +}, +{ + "name":"com.sun.jna.CallbackProxy", + "methods":[{"name":"callback","parameterTypes":["java.lang.Object[]"] }] +}, +{ + "name":"com.sun.jna.Pointer", + "fields":[{"name":"OPTIONS"}, {"name":"STRING_ENCODING"}, {"name":"STRUCTURE_ALIGNMENT"}, {"name":"TYPE_MAPPER"}] +}, +{ + "name":"com.sun.jna.Structure$FFIType", + "allDeclaredFields":true, + "queryAllPublicConstructors":true, + "fields":[{"name":"OPTIONS"}, {"name":"STRING_ENCODING"}, {"name":"STRUCTURE_ALIGNMENT"}, {"name":"TYPE_MAPPER"}], + "methods":[{"name":"","parameterTypes":[] }] +}, +{ + "name":"com.sun.jna.Structure$FFIType$size_t", + "methods":[{"name":"","parameterTypes":[] }] +}, +{ + "name":"com.sun.jna.ptr.PointerByReference", + "fields":[{"name":"OPTIONS"}, {"name":"STRING_ENCODING"}, {"name":"STRUCTURE_ALIGNMENT"}, {"name":"TYPE_MAPPER"}], + "methods":[{"name":"","parameterTypes":[] }] +}, +{ + "name":"boolean", + "fields":[{"name":"OPTIONS"}, {"name":"STRING_ENCODING"}, {"name":"STRUCTURE_ALIGNMENT"}, {"name":"TYPE_MAPPER"}] +}, +{ + "name":"com.sun.crypto.provider.AESCipher$General", + "methods":[{"name":"","parameterTypes":[] }] +}, +{ + "name":"com.sun.crypto.provider.HmacCore$HmacSHA256", + "methods":[{"name":"","parameterTypes":[] }] +}, +{ + "name":"com.sun.crypto.provider.HmacCore$HmacSHA512", + "methods":[{"name":"","parameterTypes":[] }] +}, +{ + "name":"int", + "fields":[{"name":"OPTIONS"}, {"name":"STRING_ENCODING"}, {"name":"STRUCTURE_ALIGNMENT"}, {"name":"TYPE_MAPPER"}] +}, +{ + "name":"java.lang.Throwable", + "methods":[{"name":"addSuppressed","parameterTypes":["java.lang.Throwable"] }] +}, +{ + "name":"java.lang.reflect.Method", + "methods":[{"name":"isVarArgs","parameterTypes":[] }] +}, +{ + "name":"java.nio.Buffer" +}, +{ + "name":"long", + "fields":[{"name":"OPTIONS"}, {"name":"STRING_ENCODING"}, {"name":"STRUCTURE_ALIGNMENT"}, {"name":"TYPE_MAPPER"}] +}, +{ + "name":"sun.security.provider.NativePRNG", + "methods":[{"name":"","parameterTypes":[] }, {"name":"","parameterTypes":["java.security.SecureRandomParameters"] }] +}, +{ + "name":"sun.security.provider.SHA2$SHA256", + "methods":[{"name":"","parameterTypes":[] }] +}, +{ + "name":"sun.security.provider.SHA5$SHA512", + "methods":[{"name":"","parameterTypes":[] }] +}, +{ + "name":"void", + "fields":[{"name":"OPTIONS"}, {"name":"STRING_ENCODING"}, {"name":"STRUCTURE_ALIGNMENT"}, {"name":"TYPE_MAPPER"}] +}, +{ + "name":"org.slf4j.Logger" +} +] diff --git a/mongodb-crypt/src/test/java/com/mongodb/crypt/capi/MongoCryptTest.java b/mongodb-crypt/src/test/java/com/mongodb/crypt/capi/MongoCryptTest.java new file mode 100644 index 00000000000..14bb2a5ccdc --- /dev/null +++ b/mongodb-crypt/src/test/java/com/mongodb/crypt/capi/MongoCryptTest.java @@ -0,0 +1,396 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package com.mongodb.crypt.capi; + +import com.mongodb.internal.crypt.capi.MongoAwsKmsProviderOptions; +import com.mongodb.internal.crypt.capi.MongoCrypt; +import com.mongodb.internal.crypt.capi.MongoCryptContext; +import com.mongodb.internal.crypt.capi.MongoCryptContext.State; +import com.mongodb.internal.crypt.capi.MongoCryptOptions; +import com.mongodb.internal.crypt.capi.MongoCrypts; +import com.mongodb.internal.crypt.capi.MongoDataKeyOptions; +import com.mongodb.internal.crypt.capi.MongoExplicitEncryptOptions; +import com.mongodb.internal.crypt.capi.MongoKeyDecryptor; +import com.mongodb.internal.crypt.capi.MongoLocalKmsProviderOptions; +import org.bson.BsonBinary; +import org.bson.BsonBinarySubType; +import org.bson.BsonDocument; +import org.bson.BsonString; +import org.bson.RawBsonDocument; +import org.junit.jupiter.api.Test; + +import java.io.BufferedReader; +import java.io.File; +import java.io.IOException; +import java.io.InputStreamReader; +import java.net.URISyntaxException; +import java.net.URL; +import java.nio.ByteBuffer; +import java.nio.charset.StandardCharsets; +import java.nio.file.Files; +import java.util.Arrays; +import java.util.Base64; +import java.util.List; +import java.util.stream.Collectors; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertIterableEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertThrows; + + +@SuppressWarnings("SameParameterValue") +public class MongoCryptTest { + @Test + public void testEncrypt() throws URISyntaxException, IOException { + MongoCrypt mongoCrypt = createMongoCrypt(); + assertNotNull(mongoCrypt); + + MongoCryptContext encryptor = mongoCrypt.createEncryptionContext("test", getResourceAsDocument("command.json")); + + assertEquals(State.NEED_MONGO_COLLINFO, encryptor.getState()); + + BsonDocument listCollectionsFilter = encryptor.getMongoOperation(); + assertEquals(getResourceAsDocument("list-collections-filter.json"), listCollectionsFilter); + + encryptor.addMongoOperationResult(getResourceAsDocument("collection-info.json")); + encryptor.completeMongoOperation(); + assertEquals(State.NEED_MONGO_MARKINGS, encryptor.getState()); + + BsonDocument jsonSchema = encryptor.getMongoOperation(); + assertEquals(getResourceAsDocument("mongocryptd-command.json"), jsonSchema); + + encryptor.addMongoOperationResult(getResourceAsDocument("mongocryptd-reply.json")); + encryptor.completeMongoOperation(); + assertEquals(State.NEED_MONGO_KEYS, encryptor.getState()); + + testKeyDecryptor(encryptor); + + assertEquals(State.READY, encryptor.getState()); + + RawBsonDocument encryptedDocument = encryptor.finish(); + assertEquals(State.DONE, encryptor.getState()); + assertEquals(getResourceAsDocument("encrypted-command.json"), encryptedDocument); + + encryptor.close(); + + mongoCrypt.close(); + } + + + @Test + public void testDecrypt() throws IOException, URISyntaxException { + MongoCrypt mongoCrypt = createMongoCrypt(); + assertNotNull(mongoCrypt); + + MongoCryptContext decryptor = mongoCrypt.createDecryptionContext(getResourceAsDocument("encrypted-command-reply.json")); + + assertEquals(State.NEED_MONGO_KEYS, decryptor.getState()); + + testKeyDecryptor(decryptor); + + assertEquals(State.READY, decryptor.getState()); + + RawBsonDocument decryptedDocument = decryptor.finish(); + assertEquals(State.DONE, decryptor.getState()); + assertEquals(getResourceAsDocument("command-reply.json"), decryptedDocument); + + decryptor.close(); + + mongoCrypt.close(); + } + + @Test + public void testEmptyAwsCredentials() throws URISyntaxException, IOException { + MongoCrypt mongoCrypt = MongoCrypts.create(MongoCryptOptions + .builder() + .kmsProviderOptions(new BsonDocument("aws", new BsonDocument())) + .needsKmsCredentialsStateEnabled(true) + .build()); + + MongoCryptContext decryptor = mongoCrypt.createDecryptionContext(getResourceAsDocument("encrypted-command-reply.json")); + + assertEquals(State.NEED_KMS_CREDENTIALS, decryptor.getState()); + + BsonDocument awsCredentials = new BsonDocument(); + awsCredentials.put("accessKeyId", new BsonString("example")); + awsCredentials.put("secretAccessKey", new BsonString("example")); + + decryptor.provideKmsProviderCredentials(new BsonDocument("aws", awsCredentials)); + + assertEquals(State.NEED_MONGO_KEYS, decryptor.getState()); + + mongoCrypt.close(); + } + + @Test + public void testMultipleCloseCalls() { + MongoCrypt mongoCrypt = createMongoCrypt(); + assertNotNull(mongoCrypt); + + mongoCrypt.close(); + mongoCrypt.close(); + } + + @Test + public void testDataKeyCreation() { + MongoCrypt mongoCrypt = createMongoCrypt(); + assertNotNull(mongoCrypt); + + List keyAltNames = Arrays.asList("first", "second"); + MongoCryptContext dataKeyContext = mongoCrypt.createDataKeyContext("local", + MongoDataKeyOptions.builder().masterKey(new BsonDocument()) + .keyAltNames(keyAltNames) + .build()); + assertEquals(State.READY, dataKeyContext.getState()); + + RawBsonDocument dataKeyDocument = dataKeyContext.finish(); + assertEquals(State.DONE, dataKeyContext.getState()); + assertNotNull(dataKeyDocument); + + List actualKeyAltNames = dataKeyDocument.getArray("keyAltNames").stream() + .map(bsonValue -> bsonValue.asString().getValue()) + .sorted() + .collect(Collectors.toList()); + assertIterableEquals(keyAltNames, actualKeyAltNames); + dataKeyContext.close(); + mongoCrypt.close(); + } + + @Test + public void testExplicitEncryptionDecryption() { + MongoCrypt mongoCrypt = createMongoCrypt(); + assertNotNull(mongoCrypt); + + BsonDocument documentToEncrypt = new BsonDocument("v", new BsonString("hello")); + MongoExplicitEncryptOptions options = MongoExplicitEncryptOptions.builder() + .keyId(new BsonBinary(BsonBinarySubType.UUID_STANDARD, Base64.getDecoder().decode("YWFhYWFhYWFhYWFhYWFhYQ=="))) + .algorithm("AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic") + .build(); + MongoCryptContext encryptor = mongoCrypt.createExplicitEncryptionContext(documentToEncrypt, options); + assertEquals(State.NEED_MONGO_KEYS, encryptor.getState()); + + testKeyDecryptor(encryptor); + + assertEquals(State.READY, encryptor.getState()); + + RawBsonDocument encryptedDocument = encryptor.finish(); + assertEquals(State.DONE, encryptor.getState()); + assertEquals(getResourceAsDocument("encrypted-value.json"), encryptedDocument); + + MongoCryptContext decryptor = mongoCrypt.createExplicitDecryptionContext(encryptedDocument); + + assertEquals(State.READY, decryptor.getState()); + + RawBsonDocument decryptedDocument = decryptor.finish(); + assertEquals(State.DONE, decryptor.getState()); + assertEquals(documentToEncrypt, decryptedDocument); + + encryptor.close(); + + mongoCrypt.close(); + } + + + @Test + public void testExplicitExpressionEncryption() { + MongoCrypt mongoCrypt = createMongoCrypt(); + assertNotNull(mongoCrypt); + + BsonDocument valueToEncrypt = getResourceAsDocument("fle2-find-range-explicit-v2/int32/value-to-encrypt.json"); + BsonDocument rangeOptions = getResourceAsDocument("fle2-find-range-explicit-v2/int32/rangeopts.json"); + BsonDocument expectedEncryptedPayload = getResourceAsDocument("fle2-find-range-explicit-v2/int32/encrypted-payload.json"); + + MongoExplicitEncryptOptions options = MongoExplicitEncryptOptions.builder() + .keyId(new BsonBinary(BsonBinarySubType.UUID_STANDARD, Base64.getDecoder().decode("q83vqxI0mHYSNBI0VniQEg=="))) + .algorithm("Range") + .queryType("range") + .contentionFactor(4L) + .rangeOptions(rangeOptions) + .build(); + MongoCryptContext encryptor = mongoCrypt.createEncryptExpressionContext(valueToEncrypt, options); + assertEquals(State.NEED_MONGO_KEYS, encryptor.getState()); + + testKeyDecryptor(encryptor, "fle2-find-range-explicit-v2/int32/key-filter.json", "keys/ABCDEFAB123498761234123456789012-local-document.json"); + + assertEquals(State.READY, encryptor.getState()); + + RawBsonDocument actualEncryptedPayload = encryptor.finish(); + assertEquals(State.DONE, encryptor.getState()); + assertEquals(expectedEncryptedPayload, actualEncryptedPayload); + + encryptor.close(); + mongoCrypt.close(); + } + + @Test + public void testRangePreviewQueryTypeIsNotSupported() { + MongoCrypt mongoCrypt = createMongoCrypt(); + assertNotNull(mongoCrypt); + + BsonDocument valueToEncrypt = getResourceAsDocument("fle2-find-range-explicit-v2/int32/value-to-encrypt.json"); + BsonDocument rangeOptions = getResourceAsDocument("fle2-find-range-explicit-v2/int32/rangeopts.json"); + + MongoExplicitEncryptOptions options = MongoExplicitEncryptOptions.builder() + .keyId(new BsonBinary(BsonBinarySubType.UUID_STANDARD, Base64.getDecoder().decode("q83vqxI0mHYSNBI0VniQEg=="))) + .algorithm("Range") + .queryType("rangePreview") + .contentionFactor(4L) + .rangeOptions(rangeOptions) + .build(); + + MongoCryptException exp = assertThrows(MongoCryptException.class, () -> mongoCrypt.createEncryptExpressionContext(valueToEncrypt, options)); + assertEquals("Query type 'rangePreview' is deprecated, please use 'range'", exp.getMessage()); + mongoCrypt.close(); + } + + @Test + public void testRangePreviewAlgorithmIsNotSupported() { + MongoCrypt mongoCrypt = createMongoCrypt(); + assertNotNull(mongoCrypt); + + BsonDocument valueToEncrypt = getResourceAsDocument("fle2-find-range-explicit-v2/int32/value-to-encrypt.json"); + BsonDocument rangeOptions = getResourceAsDocument("fle2-find-range-explicit-v2/int32/rangeopts.json"); + + MongoExplicitEncryptOptions options = MongoExplicitEncryptOptions.builder() + .keyId(new BsonBinary(BsonBinarySubType.UUID_STANDARD, Base64.getDecoder().decode("q83vqxI0mHYSNBI0VniQEg=="))) + .algorithm("RangePreview") + .rangeOptions(rangeOptions) + .build(); + + MongoCryptException exp = assertThrows(MongoCryptException.class, () -> mongoCrypt.createEncryptExpressionContext(valueToEncrypt, options)); + assertEquals("Algorithm 'rangePreview' is deprecated, please use 'range'", exp.getMessage()); + mongoCrypt.close(); + } + + @Test + public void testExplicitEncryptionDecryptionKeyAltName() throws IOException, URISyntaxException { + MongoCrypt mongoCrypt = createMongoCrypt(); + assertNotNull(mongoCrypt); + + BsonDocument documentToEncrypt = new BsonDocument("v", new BsonString("hello")); + MongoExplicitEncryptOptions options = MongoExplicitEncryptOptions.builder() + .keyAltName("altKeyName") + .algorithm("AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic") + .build(); + MongoCryptContext encryptor = mongoCrypt.createExplicitEncryptionContext(documentToEncrypt, options); + + assertEquals(State.NEED_MONGO_KEYS, encryptor.getState()); + testKeyDecryptor(encryptor, "key-filter-keyAltName.json", "key-document.json"); + + assertEquals(State.READY, encryptor.getState()); + + RawBsonDocument encryptedDocument = encryptor.finish(); + assertEquals(State.DONE, encryptor.getState()); + assertEquals(getResourceAsDocument("encrypted-value.json"), encryptedDocument); + + MongoCryptContext decryptor = mongoCrypt.createExplicitDecryptionContext(encryptedDocument); + + assertEquals(State.READY, decryptor.getState()); + + RawBsonDocument decryptedDocument = decryptor.finish(); + assertEquals(State.DONE, decryptor.getState()); + assertEquals(documentToEncrypt, decryptedDocument); + + encryptor.close(); + + mongoCrypt.close(); + } + + private void testKeyDecryptor(final MongoCryptContext context) { + testKeyDecryptor(context, "key-filter.json", "key-document.json"); + } + + private void testKeyDecryptor(final MongoCryptContext context, final String keyFilterPath, final String keyDocumentPath) { + BsonDocument keyFilter = context.getMongoOperation(); + assertEquals(getResourceAsDocument(keyFilterPath), keyFilter); + context.addMongoOperationResult(getResourceAsDocument(keyDocumentPath)); + context.completeMongoOperation(); + if (context.getState() == State.READY) { + return; + } + + assertEquals(State.NEED_KMS, context.getState()); + + MongoKeyDecryptor keyDecryptor = context.nextKeyDecryptor(); + assertEquals("aws", keyDecryptor.getKmsProvider()); + assertEquals("kms.us-east-1.amazonaws.com:443", keyDecryptor.getHostName()); + + ByteBuffer keyDecryptorMessage = keyDecryptor.getMessage(); + assertEquals(790, keyDecryptorMessage.remaining()); + + int bytesNeeded = keyDecryptor.bytesNeeded(); + assertEquals(1024, bytesNeeded); + + keyDecryptor.feed(getHttpResourceAsByteBuffer("kms-reply.txt")); + bytesNeeded = keyDecryptor.bytesNeeded(); + assertEquals(0, bytesNeeded); + + assertNull(context.nextKeyDecryptor()); + + context.completeKeyDecryptors(); + } + + private MongoCrypt createMongoCrypt() { + return MongoCrypts.create(MongoCryptOptions + .builder() + .awsKmsProviderOptions(MongoAwsKmsProviderOptions.builder() + .accessKeyId("example") + .secretAccessKey("example") + .build()) + .localKmsProviderOptions(MongoLocalKmsProviderOptions.builder() + .localMasterKey(ByteBuffer.wrap(new byte[96])) + .build()) + .build()); + } + + private static BsonDocument getResourceAsDocument(final String fileName) { + return BsonDocument.parse(getFileAsString(fileName, System.getProperty("line.separator"))); + } + + private static ByteBuffer getHttpResourceAsByteBuffer(final String fileName) { + return ByteBuffer.wrap(getFileAsString(fileName, "\r\n").getBytes(StandardCharsets.UTF_8)); + } + + private static String getFileAsString(final String fileName, final String lineSeparator) { + try { + URL resource = MongoCryptTest.class.getResource("/" + fileName); + if (resource == null) { + throw new RuntimeException("Could not find file " + fileName); + } + File file = new File(resource.toURI()); + StringBuilder stringBuilder = new StringBuilder(); + String line; + try (BufferedReader reader = new BufferedReader( + new InputStreamReader(Files.newInputStream(file.toPath()), StandardCharsets.UTF_8))) { + boolean first = true; + while ((line = reader.readLine()) != null) { + if (!first) { + stringBuilder.append(lineSeparator); + } + first = false; + stringBuilder.append(line); + } + } + return stringBuilder.toString(); + } catch (Throwable t) { + throw new RuntimeException("Could not parse file " + fileName, t); + } + } +} diff --git a/mongodb-crypt/src/test/resources/collection-info.json b/mongodb-crypt/src/test/resources/collection-info.json new file mode 100644 index 00000000000..3b9660938a3 --- /dev/null +++ b/mongodb-crypt/src/test/resources/collection-info.json @@ -0,0 +1,37 @@ +{ + "type": "collection", + "name": "test", + "idIndex": { + "ns": "test.test", + "name": "_id_", + "key": { + "_id": { + "$numberInt": "1" + } + }, + "v": { + "$numberInt": "2" + } + }, + "options": { + "validator": { + "$jsonSchema": { + "properties": { + "ssn": { + "encrypt": { + "keyId": { + "$binary": { + "base64": "YWFhYWFhYWFhYWFhYWFhYQ==", + "subType": "04" + } + }, + "type": "string", + "algorithm": "AEAD_AES_CBC_HMAC_SHA512-Deterministic" + } + } + }, + "bsonType": "object" + } + } + } +} \ No newline at end of file diff --git a/mongodb-crypt/src/test/resources/command-reply.json b/mongodb-crypt/src/test/resources/command-reply.json new file mode 100644 index 00000000000..c110f737f45 --- /dev/null +++ b/mongodb-crypt/src/test/resources/command-reply.json @@ -0,0 +1,13 @@ +{ + "cursor": { + "firstBatch": [ + { + "_id": 1, + "ssn": "457-55-5462" + } + ], + "id": 0, + "ns": "test.test" + }, + "ok": 1 +} diff --git a/mongodb-crypt/src/test/resources/command.json b/mongodb-crypt/src/test/resources/command.json new file mode 100644 index 00000000000..d04bf7799ad --- /dev/null +++ b/mongodb-crypt/src/test/resources/command.json @@ -0,0 +1,6 @@ +{ + "find": "test", + "filter": { + "ssn": "457-55-5462" + } +} \ No newline at end of file diff --git a/mongodb-crypt/src/test/resources/encrypted-command-reply.json b/mongodb-crypt/src/test/resources/encrypted-command-reply.json new file mode 100644 index 00000000000..73d4d3427ee --- /dev/null +++ b/mongodb-crypt/src/test/resources/encrypted-command-reply.json @@ -0,0 +1,16 @@ +{ + "cursor" : { + "firstBatch" : [ + { + "_id": 1, + "ssn": { + "$binary": "AWFhYWFhYWFhYWFhYWFhYWECRTOW9yZzNDn5dGwuqsrJQNLtgMEKaujhs9aRWRp+7Yo3JK8N8jC8P0Xjll6C1CwLsE/iP5wjOMhVv1KMMyOCSCrHorXRsb2IKPtzl2lKTqQ=", + "$type": "06" + } + } + ], + "id" : 0, + "ns" : "test.test" + }, + "ok" : 1 +} \ No newline at end of file diff --git a/mongodb-crypt/src/test/resources/encrypted-command.json b/mongodb-crypt/src/test/resources/encrypted-command.json new file mode 100644 index 00000000000..8b8cfaa27ee --- /dev/null +++ b/mongodb-crypt/src/test/resources/encrypted-command.json @@ -0,0 +1,11 @@ +{ + "filter": { + "ssn": { + "$binary": { + "base64": "AWFhYWFhYWFhYWFhYWFhYWECRTOW9yZzNDn5dGwuqsrJQNLtgMEKaujhs9aRWRp+7Yo3JK8N8jC8P0Xjll6C1CwLsE/iP5wjOMhVv1KMMyOCSCrHorXRsb2IKPtzl2lKTqQ=", + "subType": "06" + } + } + }, + "find": "test" +} diff --git a/mongodb-crypt/src/test/resources/encrypted-value.json b/mongodb-crypt/src/test/resources/encrypted-value.json new file mode 100644 index 00000000000..e1a832b5ecb --- /dev/null +++ b/mongodb-crypt/src/test/resources/encrypted-value.json @@ -0,0 +1,6 @@ +{ + "v": { + "$binary": "AWFhYWFhYWFhYWFhYWFhYWECW+zDjR/69eS6VtuMD5+O2lZw6JyiWOw3avI7mnUkdpKzPfvy8F/nlZrgZa2cGmQsb0TmLZuk5trldosnGKD91w==", + "$type": "06" + } +} diff --git a/mongodb-crypt/src/test/resources/fle2-find-range-explicit-v2/int32/encrypted-payload.json b/mongodb-crypt/src/test/resources/fle2-find-range-explicit-v2/int32/encrypted-payload.json new file mode 100644 index 00000000000..7db5540ca1b --- /dev/null +++ b/mongodb-crypt/src/test/resources/fle2-find-range-explicit-v2/int32/encrypted-payload.json @@ -0,0 +1,26 @@ +{ + "v": { + "$and": [ + { + "age": { + "$gte": { + "$binary": { + "base64": "DQECAAADcGF5bG9hZACZAQAABGcAhQEAAAMwAH0AAAAFZAAgAAAAAInd0noBhIiJMv8QTjcfgRqnnVhxRJRRACLfvgT+CTR/BXMAIAAAAADm0EjqF/T4EmR6Dw6NaPLrL0OuzS4AFvm90czFluAAygVsACAAAAAA5MXcYWjYlzhPFUDebBEa17B5z2bupmaW9uCdtLjc7RkAAzEAfQAAAAVkACAAAAAA7lkNtT6RLw91aJ07K/blwlFs5wi9pQjqUXDcaCTxe98FcwAgAAAAAPwySffuLQihmF70Ot93KtaUMNU8KpmA+niyPRcvarNMBWwAIAAAAACDv6fJXXwRqwZH3O2kO+hdeLZ36U6bMZSui8kv0PsPtAADMgB9AAAABWQAIAAAAACcMWVTbZC4ox5VdjWeYKLgf4oBjpPlbTTAkucm9JPK0wVzACAAAAAA3tIww4ZTytkxFsUKyJbc3zwQ2w7DhkOqaNvX9g8pi3gFbAAgAAAAAGs9XR3Q1JpxV+HPW8P2GvCuCBF5bGZ8Kl1zHqzZcd5/AAASY20ABAAAAAAAAAAAEHBheWxvYWRJZAAAAAAAEGZpcnN0T3BlcmF0b3IAAgAAABBzZWNvbmRPcGVyYXRvcgAEAAAAEnNwAAEAAAAAAAAAEHRmAAEAAAAQbW4AAAAAABBteADIAAAAAA==", + "subType": "06" + } + } + } + }, + { + "age": { + "$lte": { + "$binary": { + "base64": "DTsAAAAQcGF5bG9hZElkAAAAAAAQZmlyc3RPcGVyYXRvcgACAAAAEHNlY29uZE9wZXJhdG9yAAQAAAAA", + "subType": "06" + } + } + } + } + ] + } +} diff --git a/mongodb-crypt/src/test/resources/fle2-find-range-explicit-v2/int32/key-filter.json b/mongodb-crypt/src/test/resources/fle2-find-range-explicit-v2/int32/key-filter.json new file mode 100644 index 00000000000..897364761c7 --- /dev/null +++ b/mongodb-crypt/src/test/resources/fle2-find-range-explicit-v2/int32/key-filter.json @@ -0,0 +1,19 @@ +{ + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": "q83vqxI0mHYSNBI0VniQEg==", + "$type": "04" + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] +} \ No newline at end of file diff --git a/mongodb-crypt/src/test/resources/fle2-find-range-explicit-v2/int32/rangeopts.json b/mongodb-crypt/src/test/resources/fle2-find-range-explicit-v2/int32/rangeopts.json new file mode 100644 index 00000000000..2e1407fe4e6 --- /dev/null +++ b/mongodb-crypt/src/test/resources/fle2-find-range-explicit-v2/int32/rangeopts.json @@ -0,0 +1,14 @@ +{ + "min": { + "$numberInt": "0" + }, + "max": { + "$numberInt": "200" + }, + "sparsity": { + "$numberLong": "1" + }, + "trimFactor": { + "$numberInt": "1" + } +} diff --git a/mongodb-crypt/src/test/resources/fle2-find-range-explicit-v2/int32/value-to-encrypt.json b/mongodb-crypt/src/test/resources/fle2-find-range-explicit-v2/int32/value-to-encrypt.json new file mode 100644 index 00000000000..4c294e887e6 --- /dev/null +++ b/mongodb-crypt/src/test/resources/fle2-find-range-explicit-v2/int32/value-to-encrypt.json @@ -0,0 +1,20 @@ +{ + "v": { + "$and": [ + { + "age": { + "$gte": { + "$numberInt": "23" + } + } + }, + { + "age": { + "$lte": { + "$numberInt": "35" + } + } + } + ] + } +} diff --git a/mongodb-crypt/src/test/resources/json-schema.json b/mongodb-crypt/src/test/resources/json-schema.json new file mode 100644 index 00000000000..059373d9ca1 --- /dev/null +++ b/mongodb-crypt/src/test/resources/json-schema.json @@ -0,0 +1,15 @@ +{ + "properties": { + "ssn": { + "encrypt": { + "keyId": { + "$binary": "YWFhYWFhYWFhYWFhYWFhYQ==", + "$type": "04" + }, + "type": "string", + "algorithm": "AEAD_AES_CBC_HMAC_SHA512-Deterministic" + } + } + }, + "bsonType": "object" +} \ No newline at end of file diff --git a/mongodb-crypt/src/test/resources/key-document.json b/mongodb-crypt/src/test/resources/key-document.json new file mode 100644 index 00000000000..5414072596d --- /dev/null +++ b/mongodb-crypt/src/test/resources/key-document.json @@ -0,0 +1,36 @@ +{ + "status": { + "$numberInt": "1" + }, + "_id": { + "$binary": { + "base64": "YWFhYWFhYWFhYWFhYWFhYQ==", + "subType": "04" + } + }, + "masterKey": { + "region": "us-east-1", + "key": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", + "provider": "aws" + }, + "updateDate": { + "$date": { + "$numberLong": "1557827033449" + } + }, + "keyMaterial": { + "$binary": { + "base64": "AQICAHhQNmWG2CzOm1dq3kWLM+iDUZhEqnhJwH9wZVpuZ94A8gEqnsxXlR51T5EbEVezUqqKAAAAwjCBvwYJKoZIhvcNAQcGoIGxMIGuAgEAMIGoBgkqhkiG9w0BBwEwHgYJYIZIAWUDBAEuMBEEDHa4jo6yp0Z18KgbUgIBEIB74sKxWtV8/YHje5lv5THTl0HIbhSwM6EqRlmBiFFatmEWaeMk4tO4xBX65eq670I5TWPSLMzpp8ncGHMmvHqRajNBnmFtbYxN3E3/WjxmdbOOe+OXpnGJPcGsftc7cB2shRfA4lICPnE26+oVNXT6p0Lo20nY5XC7jyCO", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1557827033449" + } + }, + "keyAltNames": [ + "altKeyName", + "another_altname" + ] +} diff --git a/mongodb-crypt/src/test/resources/key-filter-keyAltName.json b/mongodb-crypt/src/test/resources/key-filter-keyAltName.json new file mode 100644 index 00000000000..eb53a142a14 --- /dev/null +++ b/mongodb-crypt/src/test/resources/key-filter-keyAltName.json @@ -0,0 +1,14 @@ +{ + "$or": [ + { + "_id": { + "$in": [] + } + }, + { + "keyAltNames": { + "$in": ["altKeyName"] + } + } + ] +} diff --git a/mongodb-crypt/src/test/resources/key-filter.json b/mongodb-crypt/src/test/resources/key-filter.json new file mode 100644 index 00000000000..9ad7c70e5a7 --- /dev/null +++ b/mongodb-crypt/src/test/resources/key-filter.json @@ -0,0 +1,19 @@ +{ + "$or": [ + { + "_id": { + "$in": [ + { + "$binary": "YWFhYWFhYWFhYWFhYWFhYQ==", + "$type": "04" + } + ] + } + }, + { + "keyAltNames": { + "$in": [] + } + } + ] +} \ No newline at end of file diff --git a/mongodb-crypt/src/test/resources/keys/ABCDEFAB123498761234123456789012-local-document.json b/mongodb-crypt/src/test/resources/keys/ABCDEFAB123498761234123456789012-local-document.json new file mode 100644 index 00000000000..e5d1a3f7661 --- /dev/null +++ b/mongodb-crypt/src/test/resources/keys/ABCDEFAB123498761234123456789012-local-document.json @@ -0,0 +1,30 @@ +{ + "_id": { + "$binary": { + "base64": "q83vqxI0mHYSNBI0VniQEg==", + "subType": "04" + } + }, + "keyMaterial": { + "$binary": { + "base64": "27OBvUqHAuYFy60nwCdvq2xmZ4kFzVySphXzBGq+HEot13comCoydEfnltBzLTuXLbV9cnREFJIO5f0jMqrlkxIuvAV8yO84p5VJTEa8j/xSNe7iA594rx7UeKT0fOt4VqM47fht8h+8PZYc5JVezvEMvwk115IBCwENxDjLtT0g+y8Hf+aTUEGtxrYToH8zf1/Y7S16mHiIc4jK3/vxHw==", + "subType": "00" + } + }, + "creationDate": { + "$date": { + "$numberLong": "1648915408923" + } + }, + "updateDate": { + "$date": { + "$numberLong": "1648915408923" + } + }, + "status": { + "$numberInt": "0" + }, + "masterKey": { + "provider": "local" + } +} diff --git a/mongodb-crypt/src/test/resources/kms-reply.txt b/mongodb-crypt/src/test/resources/kms-reply.txt new file mode 100644 index 00000000000..c2c52e38413 --- /dev/null +++ b/mongodb-crypt/src/test/resources/kms-reply.txt @@ -0,0 +1,6 @@ +HTTP/1.1 200 OK +x-amzn-RequestId: deeb35e5-4ecb-4bf1-9af5-84a54ff0af0e +Content-Type: application/x-amz-json-1.1 +Content-Length: 233 + +{"KeyId": "arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0", "Plaintext": "TqhXy3tKckECjy4/ZNykMWG8amBF46isVPzeOgeusKrwheBmYaU8TMG5AHR/NeUDKukqo8hBGgogiQOVpLPkqBQHD8YkLsNbDmHoGOill5QAHnniF/Lz405bGucB5TfR"} \ No newline at end of file diff --git a/mongodb-crypt/src/test/resources/list-collections-filter.json b/mongodb-crypt/src/test/resources/list-collections-filter.json new file mode 100644 index 00000000000..2f37dc5b093 --- /dev/null +++ b/mongodb-crypt/src/test/resources/list-collections-filter.json @@ -0,0 +1,3 @@ +{ + "name": "test" +} \ No newline at end of file diff --git a/mongodb-crypt/src/test/resources/mongocryptd-command.json b/mongodb-crypt/src/test/resources/mongocryptd-command.json new file mode 100644 index 00000000000..2ec0612d7e9 --- /dev/null +++ b/mongodb-crypt/src/test/resources/mongocryptd-command.json @@ -0,0 +1,22 @@ +{ + "find": "test", + "filter": { + "ssn": "457-55-5462" + }, + "jsonSchema": { + "properties": { + "ssn": { + "encrypt": { + "keyId": { + "$binary": "YWFhYWFhYWFhYWFhYWFhYQ==", + "$type": "04" + }, + "type": "string", + "algorithm": "AEAD_AES_CBC_HMAC_SHA512-Deterministic" + } + } + }, + "bsonType": "object" + }, + "isRemoteSchema": true +} \ No newline at end of file diff --git a/mongodb-crypt/src/test/resources/mongocryptd-reply.json b/mongodb-crypt/src/test/resources/mongocryptd-reply.json new file mode 100644 index 00000000000..0d1873de7e2 --- /dev/null +++ b/mongodb-crypt/src/test/resources/mongocryptd-reply.json @@ -0,0 +1,18 @@ +{ + "schemaRequiresEncryption": true, + "ok": { + "$numberInt": "1" + }, + "result": { + "filter": { + "ssn": { + "$binary": { + "base64": "ADgAAAAQYQABAAAABWtpABAAAAAEYWFhYWFhYWFhYWFhYWFhYQJ2AAwAAAA0NTctNTUtNTQ2MgAA", + "subType": "06" + } + } + }, + "find": "test" + }, + "hasEncryptedPlaceholders": true +} \ No newline at end of file diff --git a/pom.xml b/pom.xml deleted file mode 100644 index 86840642bef..00000000000 --- a/pom.xml +++ /dev/null @@ -1,80 +0,0 @@ - - - 4.0.0 - org.mongodb - mongo-java-driver - bundle - MongoDB Java Driver - 2.12.0-SNAPSHOT - The MongoDB Java driver - http://www.mongodb.org - - - - The Apache Software License, Version 2.0 - http://www.apache.org/licenses/LICENSE-2.0.txt - repo - - - - - https://github.com/mongodb/mongo-java-driver - - - - UTF-8 - - - - src/main - src/test - - - org.apache.maven.plugins - maven-source-plugin - 2.1.2 - - - attach-sources - verify - - jar-no-fork - - - - - - org.apache.felix - maven-bundle-plugin - 2.3.7 - true - - - com.mongodb.* - org.bson.* - - - - - - - - - org.testng - testng - 6.3.1 - test - - - - - - Various - 10gen - - - - diff --git a/sbom.json b/sbom.json new file mode 100644 index 00000000000..ddfc1b15e9a --- /dev/null +++ b/sbom.json @@ -0,0 +1,7 @@ +{ + "serialNumber": "urn:uuid:a291eaa6-9c96-4c46-9fb1-474f745cf6f5", + "version": 1, + "$schema": "http://cyclonedx.org/schema/bom-1.5.schema.json", + "bomFormat": "CycloneDX", + "specVersion": "1.5" +} diff --git a/settings.gradle.kts b/settings.gradle.kts new file mode 100644 index 00000000000..29d17792ad4 --- /dev/null +++ b/settings.gradle.kts @@ -0,0 +1,48 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +pluginManagement { + repositories { + gradlePluginPortal() + google() + mavenCentral() + } +} + +include(":bom") + +include(":bson") +include(":bson-kotlin") +include(":bson-kotlinx") +include(":bson-record-codec") +include(":bson-scala") + +include(":driver-core") +include(":driver-sync") +include(":driver-legacy") +include(":driver-reactive-streams") +include(":mongodb-crypt") + +include(":driver-kotlin-coroutine") +include(":driver-kotlin-extensions") +include(":driver-kotlin-sync") +include(":driver-scala") + +include(":driver-benchmarks") +include(":driver-lambda") +if (providers.gradleProperty("includeGraalvm").isPresent) { + include(":graalvm-native-image-app") +} diff --git a/src/main/META-INF/MANIFEST.MF b/src/main/META-INF/MANIFEST.MF deleted file mode 100644 index ae440f8defe..00000000000 --- a/src/main/META-INF/MANIFEST.MF +++ /dev/null @@ -1,16 +0,0 @@ -Manifest-Version: 1.0 -Bundle-License: http://www.apache.org/licenses/LICENSE-2.0.txt -Bundle-ManifestVersion: 2 -Bundle-Name: MongoDB Java Driver -Bundle-SymbolicName: org.mongodb.mongo-java-driver -Bundle-Version: @VERSION@ -Import-Package: javax.management, javax.net, javax.net.ssl -Export-Package: com.mongodb;version="@VERSION@", - com.mongodb.io;version="@VERSION@", - com.mongodb.util;version="@VERSION@", - com.mongodb.gridfs;version="@VERSION@", - org.bson;version="@VERSION@", - org.bson.util;version="@VERSION@", - org.bson.util.annotations;version="@VERSION@", - org.bson.types;version="@VERSION@", - org.bson.io;version="@VERSION@" diff --git a/src/main/META-INF/MANIFEST_BSON.MF b/src/main/META-INF/MANIFEST_BSON.MF deleted file mode 100644 index 533700ecf10..00000000000 --- a/src/main/META-INF/MANIFEST_BSON.MF +++ /dev/null @@ -1,10 +0,0 @@ -Manifest-Version: 1.0 -Bundle-License: http://www.apache.org/licenses/LICENSE-2.0.txt -Bundle-ManifestVersion: 2 -Bundle-Name: BSON -Bundle-SymbolicName: org.mongodb.bson -Bundle-Version: @VERSION@ -Export-Package: org.bson;version="@VERSION@", - org.bson.util;version="@VERSION@", - org.bson.types;version="@VERSION@", - org.bson.io;version="@VERSION@" diff --git a/src/main/com/mongodb/AggregationOutput.java b/src/main/com/mongodb/AggregationOutput.java deleted file mode 100644 index 7d9ceacf8e7..00000000000 --- a/src/main/com/mongodb/AggregationOutput.java +++ /dev/null @@ -1,72 +0,0 @@ -package com.mongodb; - -public class AggregationOutput { - - /** - * returns an iterator to the results of the aggregation - * @return - */ - public Iterable results() { - return _resultSet; - } - - /** - * returns the command result of the aggregation - * @return - */ - public CommandResult getCommandResult(){ - return _commandResult; - } - - /** - * returns the original aggregation command - * @return - */ - public DBObject getCommand() { - return _cmd; - } - - /** - * returns the address of the server used to execute the aggregation - * @return - */ - public ServerAddress getServerUsed() { - return _commandResult.getServerUsed(); - } - - /** - * string representation of the aggregation command - */ - public String toString(){ - return _commandResult.toString(); - } - - @SuppressWarnings("unchecked") - public AggregationOutput(DBObject cmd, CommandResult raw) { - _commandResult = raw; - _cmd = cmd; - - if(raw.containsField("result")) - _resultSet = (Iterable) raw.get( "result" ); - else - throw new IllegalArgumentException("result undefined"); - } - - /** - * @deprecated Please use {@link #getCommandResult()} instead. - */ - @Deprecated - protected final CommandResult _commandResult; - - /** - * @deprecated Please use {@link #getCommand()} instead. - */ - @Deprecated - protected final DBObject _cmd; - - /** - * @deprecated Please use {@link #results()} instead. - */ - @Deprecated - protected final Iterable _resultSet; -} \ No newline at end of file diff --git a/src/main/com/mongodb/BasicDBList.java b/src/main/com/mongodb/BasicDBList.java deleted file mode 100644 index c8255e1cbc5..00000000000 --- a/src/main/com/mongodb/BasicDBList.java +++ /dev/null @@ -1,67 +0,0 @@ -// BasicDBList.java - -/** - * Copyright (C) 2008 10gen Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.mongodb; - -import org.bson.types.BasicBSONList; - -import com.mongodb.util.JSON; - -/** - * a basic implementation of bson list that is mongo specific - * @author antoine - */ -public class BasicDBList extends BasicBSONList implements DBObject { - - private static final long serialVersionUID = -4415279469780082174L; - - /** - * Returns a JSON serialization of this object - * @return JSON serialization - */ - @Override - public String toString(){ - return JSON.serialize( this ); - } - - public boolean isPartialObject(){ - return _isPartialObject; - } - - public void markAsPartialObject(){ - _isPartialObject = true; - } - - public Object copy() { - // copy field values into new object - BasicDBList newobj = new BasicDBList(); - // need to clone the sub obj - for (int i = 0; i < size(); ++i) { - Object val = get(i); - if (val instanceof BasicDBObject) { - val = ((BasicDBObject)val).copy(); - } else if (val instanceof BasicDBList) { - val = ((BasicDBList)val).copy(); - } - newobj.add(val); - } - return newobj; - } - - private boolean _isPartialObject; -} diff --git a/src/main/com/mongodb/BasicDBObject.java b/src/main/com/mongodb/BasicDBObject.java deleted file mode 100644 index eca3aaa521d..00000000000 --- a/src/main/com/mongodb/BasicDBObject.java +++ /dev/null @@ -1,108 +0,0 @@ -// BasicDBObject.java - -/** - * Copyright (C) 2008 10gen Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.mongodb; - -import com.mongodb.util.JSON; -import org.bson.BasicBSONObject; - -import java.util.Map; - -/** - * a basic implementation of bson object that is mongo specific. - * A DBObject can be created as follows, using this class: - *
- * DBObject obj = new BasicDBObject();
- * obj.put( "foo", "bar" );
- * 
- */ -public class BasicDBObject extends BasicBSONObject implements DBObject { - - private static final long serialVersionUID = -4415279469780082174L; - - /** - * Creates an empty object. - */ - public BasicDBObject(){ - } - - /** - * creates an empty object - * @param size an estimate of number of fields that will be inserted - */ - public BasicDBObject(int size){ - super(size); - } - - /** - * creates an object with the given key/value - * @param key key under which to store - * @param value value to stor - */ - public BasicDBObject(String key, Object value){ - super(key, value); - } - - /** - * Creates an object from a map. - * @param m map to convert - */ - public BasicDBObject(Map m) { - super(m); - } - - public boolean isPartialObject(){ - return _isPartialObject; - } - - public void markAsPartialObject(){ - _isPartialObject = true; - } - - /** - * Returns a JSON serialization of this object - * @return JSON serialization - */ - @Override - public String toString(){ - return JSON.serialize( this ); - } - - @Override - public BasicDBObject append( String key , Object val ){ - put( key , val ); - return this; - } - - public Object copy() { - // copy field values into new object - BasicDBObject newobj = new BasicDBObject(this.toMap()); - // need to clone the sub obj - for (String field : keySet()) { - Object val = get(field); - if (val instanceof BasicDBObject) { - newobj.put(field, ((BasicDBObject)val).copy()); - } else if (val instanceof BasicDBList) { - newobj.put(field, ((BasicDBList)val).copy()); - } - } - return newobj; - } - - private boolean _isPartialObject; -} diff --git a/src/main/com/mongodb/BasicDBObjectBuilder.java b/src/main/com/mongodb/BasicDBObjectBuilder.java deleted file mode 100644 index 8edbf4bc434..00000000000 --- a/src/main/com/mongodb/BasicDBObjectBuilder.java +++ /dev/null @@ -1,142 +0,0 @@ -// BasicDBObjectBuilder.java - -/** - * Copyright (C) 2008 10gen Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.mongodb; - -import java.util.Iterator; -import java.util.LinkedList; -import java.util.Map; - -/** - * utility for building complex objects - * example: - * BasicDBObjectBuilder.start().add( "name" , "eliot" ).add( "number" , 17 ).get() - */ -public class BasicDBObjectBuilder { - - /** - * creates an empty object - */ - public BasicDBObjectBuilder(){ - _stack = new LinkedList(); - _stack.add( new BasicDBObject() ); - } - - /** - * Creates an empty object - * @return The new empty builder - */ - public static BasicDBObjectBuilder start(){ - return new BasicDBObjectBuilder(); - } - - /** - * creates an object with the given key/value - * @param k The field name - * @param val The value - */ - public static BasicDBObjectBuilder start( String k , Object val ){ - return (new BasicDBObjectBuilder()).add( k , val ); - } - - /** - * Creates an object builder from an existing map. - * @param m map to use - * @return the new builder - */ - @SuppressWarnings("unchecked") - public static BasicDBObjectBuilder start(Map m){ - BasicDBObjectBuilder b = new BasicDBObjectBuilder(); - Iterator i = m.entrySet().iterator(); - while (i.hasNext()) { - Map.Entry entry = i.next(); - b.add(entry.getKey().toString(), entry.getValue()); - } - return b; - } - - /** - * appends the key/value to the active object - * @param key - * @param val - * @return returns itself so you can chain - */ - public BasicDBObjectBuilder append( String key , Object val ){ - _cur().put( key , val ); - return this; - } - - - /** - * same as appends - * @see #append(String, Object) - * @param key - * @param val - * @return returns itself so you can chain - */ - public BasicDBObjectBuilder add( String key , Object val ){ - return append( key, val ); - } - - /** - * creates an new empty object and inserts it into the current object with the given key. - * The new child object becomes the active one. - * @param key - * @return returns itself so you can chain - */ - public BasicDBObjectBuilder push( String key ){ - BasicDBObject o = new BasicDBObject(); - _cur().put( key , o ); - _stack.addLast( o ); - return this; - } - - /** - * pops the active object, which means that the parent object becomes active - * @return returns itself so you can chain - */ - public BasicDBObjectBuilder pop(){ - if ( _stack.size() <= 1 ) - throw new IllegalArgumentException( "can't pop last element" ); - _stack.removeLast(); - return this; - } - - /** - * gets the base object - * @return The base object - */ - public DBObject get(){ - return _stack.getFirst(); - } - - /** - * returns true if no key/value was inserted into base object - * @return True if empty - */ - public boolean isEmpty(){ - return ((BasicDBObject) _stack.getFirst()).size() == 0; - } - - private DBObject _cur(){ - return _stack.getLast(); - } - - private final LinkedList _stack; - -} diff --git a/src/main/com/mongodb/Bytes.java b/src/main/com/mongodb/Bytes.java deleted file mode 100644 index df76988840b..00000000000 --- a/src/main/com/mongodb/Bytes.java +++ /dev/null @@ -1,227 +0,0 @@ -// Bytes.java - -/** - * Copyright (C) 2008 10gen Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.mongodb; - -import java.nio.ByteOrder; -import java.util.concurrent.atomic.AtomicInteger; -import java.util.concurrent.atomic.AtomicLong; -import java.util.logging.Level; -import java.util.logging.Logger; - -import org.bson.BSON; -import org.bson.types.BSONTimestamp; -import org.bson.types.Code; -import org.bson.types.CodeWScope; -import org.bson.types.ObjectId; - -/** - * Class that hold definitions of the wire protocol - * @author antoine - */ -public class Bytes extends BSON { - - static final Logger LOGGER = Logger.getLogger( "com.mongodb" ); - - static final boolean D = Boolean.getBoolean( "DEBUG.MONGO" ); - - static { - if ( LOGGER.getLevel() == null ){ - if ( D ) - LOGGER.setLevel( Level.ALL ); - else - LOGGER.setLevel( Level.WARNING ); - } - } - - /** Little-endian */ - public static final ByteOrder ORDER = ByteOrder.LITTLE_ENDIAN; - - /** this size is set low to 4MB, but just serves as safe default */ - static final int MAX_OBJECT_SIZE = 1024 * 1024 * 4; - - /** default target size of an insert batch */ - static final int BATCH_INSERT_SIZE = 1024 * 1024 * 8; - - static final int CONNECTIONS_PER_HOST = Integer.parseInt( System.getProperty( "MONGO.POOLSIZE" , "10" ) ); - - - // --- network protocol options - - /** - * Tailable means cursor is not closed when the last data is retrieved. - * Rather, the cursor marks the final object's position. - * You can resume using the cursor later, from where it was located, if more data were received. - * Like any "latent cursor", the cursor may become invalid at some point (CursorNotFound) - for example if the final object it references were deleted. - */ - public static final int QUERYOPTION_TAILABLE = 1 << 1; - /** - * When turned on, read queries will be directed to slave servers instead of the primary server. - */ - public static final int QUERYOPTION_SLAVEOK = 1 << 2; - /** - * Internal replication use only - driver should not set - */ - public static final int QUERYOPTION_OPLOGREPLAY = 1 << 3; - /** - * The server normally times out idle cursors after an inactivity period (10 minutes) to prevent excess memory use. - * Set this option to prevent that. - */ - public static final int QUERYOPTION_NOTIMEOUT = 1 << 4; - - /** - * Use with TailableCursor. - * If we are at the end of the data, block for a while rather than returning no data. - * After a timeout period, we do return as normal. - */ - public static final int QUERYOPTION_AWAITDATA = 1 << 5; - - /** - * Stream the data down full blast in multiple "more" packages, on the assumption that the client will fully read all data queried. - * Faster when you are pulling a lot of data and know you want to pull it all down. - * Note: the client is not allowed to not read all the data unless it closes the connection. - */ - public static final int QUERYOPTION_EXHAUST = 1 << 6; - - /** - * Use with sharding (mongos). - * Allows partial results from a sharded system if any shards are down/missing from the cluster. If not used an error will be returned - * from the mongos server. - */ - public static final int QUERYOPTION_PARTIAL = 1 << 7; - - /** - * Set when getMore is called but the cursor id is not valid at the server. - * Returned with zero results. - */ - public static final int RESULTFLAG_CURSORNOTFOUND = 1; - /** - * Set when query failed. - * Results consist of one document containing an "$err" field describing the failure. - */ - public static final int RESULTFLAG_ERRSET = 2; - /** - * Drivers should ignore this. - * Only mongos will ever see this set, in which case, it needs to update config from the server. - */ - public static final int RESULTFLAG_SHARDCONFIGSTALE = 4; - /** - * Set when the server supports the AwaitData Query option. - * If it doesn't, a client should sleep a little between getMore's of a Tailable cursor. - * Mongod version 1.6 supports AwaitData and thus always sets AwaitCapable. - */ - public static final int RESULTFLAG_AWAITCAPABLE = 8; - - - static class OptionHolder { - OptionHolder( OptionHolder parent ){ - _parent = parent; - } - - void set( int options ){ - _options = options; - _hasOptions = true; - } - - int get(){ - if ( _hasOptions ) - return _options; - if ( _parent == null ) - return 0; - return _parent.get(); - } - - void add( int option ){ - set( get() | option ); - } - - void reset(){ - _hasOptions = false; - } - - final OptionHolder _parent; - - int _options = 0; - boolean _hasOptions = false; - } - - /** - * Gets the type byte for a given object. - * @param o the object - * @return the byte value associated with the type, or -1 if no type is matched - */ - @SuppressWarnings("deprecation") - public static byte getType( Object o ){ - if ( o == null ) - return NULL; - - if ( o instanceof DBPointer ) - return REF; - - if (o instanceof Integer - || o instanceof Short - || o instanceof Byte - || o instanceof AtomicInteger) { - return NUMBER_INT; - } - - if (o instanceof Long || o instanceof AtomicLong) { - return NUMBER_LONG; - } - - if ( o instanceof Number ) - return NUMBER; - - if ( o instanceof String ) - return STRING; - - if ( o instanceof java.util.List ) - return ARRAY; - - if ( o instanceof byte[] ) - return BINARY; - - if ( o instanceof ObjectId ) - return OID; - - if ( o instanceof Boolean ) - return BOOLEAN; - - if ( o instanceof java.util.Date ) - return DATE; - - if ( o instanceof BSONTimestamp ) - return TIMESTAMP; - - if ( o instanceof java.util.regex.Pattern ) - return REGEX; - - if ( o instanceof DBObject || o instanceof DBRefBase ) - return OBJECT; - - if ( o instanceof Code ) - return CODE; - - if ( o instanceof CodeWScope ) - return CODE_W_SCOPE; - - return -1; - } - - static final ObjectId COLLECTION_REF_ID = new ObjectId( -1 , -1 , -1 ); -} diff --git a/src/main/com/mongodb/CommandFailureException.java b/src/main/com/mongodb/CommandFailureException.java deleted file mode 100644 index 5ebf8478ced..00000000000 --- a/src/main/com/mongodb/CommandFailureException.java +++ /dev/null @@ -1,45 +0,0 @@ -/* - * Copyright (c) 2008 - 2013 10gen, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.mongodb; - -/** - * An exception indicating a failed command. - */ -public class CommandFailureException extends MongoException { - private static final long serialVersionUID = -1180715413196161037L; - private final CommandResult commandResult; - - /** - * Construct a new instance with the CommandResult from a failed command - * - * @param commandResult the result - */ - public CommandFailureException(CommandResult commandResult){ - super(ServerError.getCode(commandResult), commandResult.toString()); - this.commandResult = commandResult; - } - - /** - * Gets the getlasterror command result document. - * - * @return the command result - */ - public CommandResult getCommandResult() { - return commandResult; - } -} diff --git a/src/main/com/mongodb/CommandResult.java b/src/main/com/mongodb/CommandResult.java deleted file mode 100644 index 5ba432452af..00000000000 --- a/src/main/com/mongodb/CommandResult.java +++ /dev/null @@ -1,121 +0,0 @@ -// CommandResult.java -/** - * Copyright (C) 2008 10gen Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - - -package com.mongodb; - - -/** - * A simple wrapper for the result of getLastError() calls and other commands - */ -public class CommandResult extends BasicDBObject { - - CommandResult(ServerAddress srv) { - if (srv == null) { - throw new IllegalArgumentException("server address is null"); - } - _host = srv; - //so it is shown in toString/debug - put("serverUsed", srv.toString()); - } - - /** - * gets the "ok" field which is the result of the command - * @return True if ok - */ - public boolean ok(){ - Object o = get( "ok" ); - if ( o == null ) - throw new IllegalArgumentException( "'ok' should never be null..." ); - - if ( o instanceof Boolean ) - return (Boolean) o; - - if ( o instanceof Number ) - return ((Number)o).intValue() == 1; - - throw new IllegalArgumentException( "can't figure out what to do with: " + o.getClass().getName() ); - } - - /** - * gets the "errmsg" field which holds the error message - * @return The error message or null - */ - public String getErrorMessage(){ - Object errorMessage = get( "errmsg" ); - if ( errorMessage == null ) - return null; - return errorMessage.toString(); - } - - /** - * utility method to create an exception with the command name - * @return The mongo exception or null - */ - public MongoException getException() { - if ( !ok() ) { // check for command failure - return new CommandFailureException( this ); - } else if ( hasErr() ) { // check for errors reported by getlasterror command - if (getCode() == 11000 || getCode() == 11001 || getCode() == 12582) { - return new MongoException.DuplicateKey(this); - } - else { - return new WriteConcernException(this); - } - } - - return null; - } - - /** - * returns the "code" field, as an int - * @return -1 if there is no code - */ - int getCode() { - int code = -1; - if ( get( "code" ) instanceof Number ) - code = ((Number)get("code")).intValue(); - return code; - } - - /** - * check the "err" field - * @return if it has it, and isn't null - */ - boolean hasErr(){ - Object o = get( "err" ); - return (o != null && ( (String) o ).length() > 0 ); - } - - /** - * throws an exception containing the cmd name, in case the command failed, or the "err/code" information - * @throws MongoException - */ - public void throwOnError() { - if ( !ok() || hasErr() ){ - throw getException(); - } - } - - public ServerAddress getServerUsed() { - return _host; - } - - private final ServerAddress _host; - private static final long serialVersionUID = 1L; - -} diff --git a/src/main/com/mongodb/ConnectionStatus.java b/src/main/com/mongodb/ConnectionStatus.java deleted file mode 100644 index c7e21b085e3..00000000000 --- a/src/main/com/mongodb/ConnectionStatus.java +++ /dev/null @@ -1,239 +0,0 @@ -/** - * Copyright (c) 2008 - 2012 10gen, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.mongodb; - -import java.util.ArrayList; -import java.util.List; -import java.util.logging.Level; -import java.util.logging.Logger; - -import static com.mongodb.ConnectionStatus.UpdatableNode.ConnectionState.Connected; -import static com.mongodb.ConnectionStatus.UpdatableNode.ConnectionState.Connecting; -import static com.mongodb.ConnectionStatus.UpdatableNode.ConnectionState.Unconnected; - -/** - * Base class for classes that manage connections to mongo instances as background tasks. - */ -abstract class ConnectionStatus { - - ConnectionStatus(List mongosAddresses, Mongo mongo) { - _mongoOptions = mongoOptionsDefaults.copy(); - _mongoOptions.socketFactory = mongo._options.socketFactory; - this._mongosAddresses = new ArrayList(mongosAddresses); - this._mongo = mongo; - } - - protected BackgroundUpdater _updater; - protected final Mongo _mongo; - protected final List _mongosAddresses; - protected volatile boolean _closed; - protected final MongoOptions _mongoOptions; - - protected static int updaterIntervalMS; - protected static int updaterIntervalNoMasterMS; - @SuppressWarnings("deprecation") - protected static final MongoOptions mongoOptionsDefaults = new MongoOptions(); - protected static final float latencySmoothFactor; - protected static final DBObject isMasterCmd = new BasicDBObject("ismaster", 1); - - /** - * Start the updater if there is one - */ - void start() { - if (_updater != null) { - _updater.start(); - } - } - - /** - * Stop the updater if there is one - */ - void close() { - _closed = true; - if (_updater != null) { - _updater.interrupt(); - } - } - - /** - * Gets the list of addresses for this connection. - */ - abstract List getServerAddressList(); - - - /** - * Whether there is least one server up. - */ - abstract boolean hasServerUp(); - - /** - * Ensures that we have the current master, if there is one. If the current snapshot of the replica set - * has no master, this method waits one cycle to find a new master, and returns it if found, or null if not. - * - * @return address of the current master, or null if there is none - */ - abstract Node ensureMaster(); - - /** - * Whether this connection has been closed. - */ - void checkClosed() { - if (_closed) - throw new IllegalStateException("ReplicaSetStatus closed"); - } - - static { - updaterIntervalMS = Integer.parseInt(System.getProperty("com.mongodb.updaterIntervalMS", "5000")); - updaterIntervalNoMasterMS = Integer.parseInt(System.getProperty("com.mongodb.updaterIntervalNoMasterMS", "10")); - mongoOptionsDefaults.connectTimeout = Integer.parseInt(System.getProperty("com.mongodb.updaterConnectTimeoutMS", "20000")); - mongoOptionsDefaults.socketTimeout = Integer.parseInt(System.getProperty("com.mongodb.updaterSocketTimeoutMS", "20000")); - latencySmoothFactor = Float.parseFloat(System.getProperty("com.mongodb.latencySmoothFactor", "4")); - } - - static class Node { - - Node(float pingTime, ServerAddress addr, int maxBsonObjectSize, boolean ok) { - this._pingTime = pingTime; - this._addr = addr; - this._maxBsonObjectSize = maxBsonObjectSize; - this._ok = ok; - } - - public boolean isOk() { - return _ok; - } - - public int getMaxBsonObjectSize() { - return _maxBsonObjectSize; - } - - public ServerAddress getServerAddress() { - return _addr; - } - - protected final ServerAddress _addr; - protected final float _pingTime; - protected final boolean _ok; - protected final int _maxBsonObjectSize; - - @Override - public boolean equals(final Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - - final Node node = (Node) o; - - if (_maxBsonObjectSize != node._maxBsonObjectSize) return false; - if (_ok != node._ok) return false; - if (Float.compare(node._pingTime, _pingTime) != 0) return false; - if (!_addr.equals(node._addr)) return false; - - return true; - } - - @Override - public int hashCode() { - int result = _addr.hashCode(); - result = 31 * result + (_pingTime != +0.0f ? Float.floatToIntBits(_pingTime) : 0); - result = 31 * result + (_ok ? 1 : 0); - result = 31 * result + _maxBsonObjectSize; - return result; - } - - public String toJSON() { - StringBuilder buf = new StringBuilder(); - buf.append("{"); - buf.append("address:'").append(_addr).append("', "); - buf.append("ok:").append(_ok).append(", "); - buf.append("ping:").append(_pingTime).append(", "); - buf.append("maxBsonObjectSize:").append(_maxBsonObjectSize).append(", "); - buf.append("}"); - - return buf.toString(); - } - } - - static class BackgroundUpdater extends Thread { - public BackgroundUpdater(final String name) { - super(name); - setDaemon(true); - } - } - - static abstract class UpdatableNode { - - enum ConnectionState { - Connecting, Connected, Unconnected - } - - UpdatableNode(final ServerAddress addr, Mongo mongo, MongoOptions mongoOptions) { - this._addr = addr; - this._mongo = mongo; - this._mongoOptions = mongoOptions; - this._port = new DBPort(addr, null, mongoOptions); - } - - public boolean isOk() { - return _connectionState == Connected; - } - - public CommandResult update() { - try { - long start = System.nanoTime(); - CommandResult res = _port.runCommand(_mongo.getDB("admin"), isMasterCmd); - - long end = System.nanoTime(); - float newPingMS = (end - start) / 1000000F; - if (_connectionState != Connected) { - _pingTimeMS = newPingMS; - } - else { - _pingTimeMS = _pingTimeMS + ((newPingMS - _pingTimeMS) / latencySmoothFactor); - } - - _maxBsonObjectSize = res.getInt("maxBsonObjectSize", Bytes.MAX_OBJECT_SIZE); - - if (_connectionState != Connected) { - _connectionState = Connected; - getLogger().log(Level.INFO, "Server seen up: " + _addr); - } - - getLogger().log(Level.FINE, "Latency to " + _addr + " actual=" + newPingMS + " smoothed=" + _pingTimeMS); - - return res; - } catch (Exception e) { - if (_connectionState != Unconnected) { - _connectionState = Unconnected; - getLogger().log(Level.WARNING, String.format("Server seen down: %s", _addr), e); - } - return null; - } - } - - protected abstract Logger getLogger(); - - final ServerAddress _addr; - final MongoOptions _mongoOptions; - final Mongo _mongo; - - DBPort _port; // we have our own port so we can set different socket options and don't have to worry about the pool - - float _pingTimeMS = 0; - int _maxBsonObjectSize; - ConnectionState _connectionState = Connecting; - } - -} diff --git a/src/main/com/mongodb/DB.java b/src/main/com/mongodb/DB.java deleted file mode 100644 index 175497ee33a..00000000000 --- a/src/main/com/mongodb/DB.java +++ /dev/null @@ -1,903 +0,0 @@ -// DB.java - -/** - * Copyright (C) 2008 10gen Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.mongodb; - -import com.mongodb.DBApiLayer.Result; -import com.mongodb.util.Util; -import org.bson.BSONObject; - -import java.io.ByteArrayOutputStream; -import java.io.IOException; -import java.util.ArrayList; -import java.util.Collections; -import java.util.HashSet; -import java.util.Iterator; -import java.util.LinkedHashSet; -import java.util.List; -import java.util.Set; - -/** - * An abstract class that represents a logical database on a server. - * Thread-safe. - *

- * DB instance can be achieved from {@link MongoClient} using code like:
- * - * Mongo m = new Mongo();
- * DB db = m.getDB("mydb"); - *
- * - * @dochub databases - */ -public abstract class DB { - - private static final Set _obedientCommands = new HashSet(); - - static { - _obedientCommands.add("group"); - _obedientCommands.add("aggregate"); - _obedientCommands.add("collstats"); - _obedientCommands.add("dbstats"); - _obedientCommands.add("count"); - _obedientCommands.add("distinct"); - _obedientCommands.add("geonear"); - _obedientCommands.add("geosearch"); - _obedientCommands.add("geowalk"); - _obedientCommands.add("text"); - } - - /** - * Constructs a new instance of the {@code DB}. - * - * @param mongo the mongo instance - * @param name the database name - */ - public DB( Mongo mongo , String name ){ - _mongo = mongo; - _name = name; - _options = new Bytes.OptionHolder( _mongo._netOptions ); - } - - /** - * Determines the read preference that should be used for the given command. - * - * @param command the {@link DBObject} representing the command - * @param requestedPreference the preference requested by the client. - * @return the read preference to use for the given command. It will never return {@code null}. - * @see com.mongodb.ReadPreference - */ - ReadPreference getCommandReadPreference(DBObject command, ReadPreference requestedPreference){ - String comString = command.keySet().iterator().next(); - - if (comString.equals("getnonce") || comString.equals("authenticate")) { - return ReadPreference.primaryPreferred(); - } - - boolean primaryRequired; - - // explicitly check mapreduce commands are inline - if(comString.equals("mapreduce")) { - Object out = command.get("out"); - if (out instanceof BSONObject ){ - BSONObject outMap = (BSONObject) out; - primaryRequired = outMap.get("inline") == null; - } - else - primaryRequired = true; - } else { - primaryRequired = !_obedientCommands.contains(comString.toLowerCase()); - } - - if (primaryRequired) { - return ReadPreference.primary(); - } else if (requestedPreference == null) { - return ReadPreference.primary(); - } else { - return requestedPreference; - } - } - - /** - * Starts a new 'consistent request'. - *

- * Following this call and until {@link com.mongodb.DB#requestDone()} is called, - * all db operations will use the same underlying connection. - *

- * This is useful to ensure that operations happen in a certain order with predictable results. - */ - public abstract void requestStart(); - - /** - * Ends the current 'consistent request'. - */ - public abstract void requestDone(); - - /** - * Ensure that a connection is assigned to the current 'consistent request' - * (from primary pool, if connected to a replica set) - */ - public abstract void requestEnsureConnection(); - - /** - * Gets a collection with a given name. - * If the collection does not exist, a new collection is created. - *

- * This class is NOT part of the public API. Be prepared for non-binary compatible changes in minor releases. - * - * @param name the name of the collection - * @return the collection - */ - protected abstract DBCollection doGetCollection( String name ); - - /** - * Gets a collection with a given name. - * If the collection does not exist, a new collection is created. - * - * @param name the name of the collection to return - * @return the collection - */ - public DBCollection getCollection( String name ){ - DBCollection c = doGetCollection( name ); - return c; - } - - /** - * Creates a collection with a given name and options. - * If the collection does not exist, a new collection is created. - *

- * Possible options: - *

    - *
  • - * capped ({@code boolean}) - Enables a collection cap. - * False by default. If enabled, you must specify a size parameter. - *
  • - *
  • - * size ({@code int}) - If capped is true, size specifies a maximum size in bytes for the capped collection. - * When capped is false, you may use size to preallocate space. - *
  • - *
  • - * max ({@code int}) - Optional. Specifies a maximum "cap" in number of documents for capped collections. - * You must also specify size when specifying max. - *
  • - *

    - *

- *

- * Note that if the {@code options} parameter is {@code null}, - * the creation will be deferred to when the collection is written to. - * - * @param name the name of the collection to return - * @param options options - * @return the collection - * @throws MongoException - */ - public DBCollection createCollection( String name, DBObject options ){ - if ( options != null ){ - DBObject createCmd = new BasicDBObject("create", name); - createCmd.putAll(options); - CommandResult result = command(createCmd); - result.throwOnError(); - } - return getCollection(name); - } - - - /** - * Returns a collection matching a given string. - * - * @param s the name of the collection - * @return the collection - */ - public DBCollection getCollectionFromString( String s ){ - DBCollection foo = null; - - int idx = s.indexOf( "." ); - while ( idx >= 0 ){ - String b = s.substring( 0 , idx ); - s = s.substring( idx + 1 ); - if ( foo == null ) - foo = getCollection( b ); - else - foo = foo.getCollection( b ); - idx = s.indexOf( "." ); - } - - if ( foo != null ) - return foo.getCollection( s ); - return getCollection( s ); - } - - /** - * Executes a database command. - * This method calls {@link DB#command(DBObject, int)} } with 0 as query option. - * - * @param cmd {@code DBObject} representation of the command to be executed - * @return result of the command execution - * @throws MongoException - * @dochub commands - */ - public CommandResult command( DBObject cmd ){ - return command( cmd, 0 ); - } - - - /** - * Executes a database command. - * This method calls {@link DB#command(com.mongodb.DBObject, int, com.mongodb.DBEncoder) } with 0 as query option. - * - * @param cmd {@code DBObject} representation of the command to be executed - * @param encoder {@link DBEncoder} to be used for command encoding - * @return result of the command execution - * @throws MongoException - * @dochub commands - */ - public CommandResult command( DBObject cmd, DBEncoder encoder ){ - return command( cmd, 0, encoder ); - } - - /** - * Executes a database command. - * This method calls {@link DB#command(com.mongodb.DBObject, int, com.mongodb.ReadPreference, com.mongodb.DBEncoder) } with a null readPrefs. - * - * @param cmd {@code DBObject} representation the command to be executed - * @param options query options to use - * @param encoder {@link DBEncoder} to be used for command encoding - * @return result of the command execution - * @throws MongoException - * @dochub commands - */ - public CommandResult command( DBObject cmd , int options, DBEncoder encoder ){ - return command(cmd, options, getReadPreference(), encoder); - } - - /** - * Executes a database command. - * This method calls {@link DB#command(com.mongodb.DBObject, int, com.mongodb.ReadPreference, com.mongodb.DBEncoder) } with a default encoder. - * - * @param cmd {@code DBObject} representation the command to be executed - * @param options query options to use - * @param readPrefs {@link ReadPreference} for this command (nodes selection is the biggest part of this) - * @return result of the command execution - * @throws MongoException - * @dochub commands - */ - public CommandResult command( DBObject cmd , int options, ReadPreference readPrefs ){ - return command(cmd, options, readPrefs, DefaultDBEncoder.FACTORY.create()); - } - - /** - * Executes a database command. - * - * @param cmd {@code DBObject} representation the command to be executed - * @param options query options to use - * @param readPrefs {@link ReadPreference} for this command (nodes selection is the biggest part of this) - * @param encoder {@link DBEncoder} to be used for command encoding - * @return result of the command execution - * @throws MongoException - * @dochub commands - */ - public CommandResult command( DBObject cmd , int options, ReadPreference readPrefs, DBEncoder encoder ){ - readPrefs = getCommandReadPreference(cmd, readPrefs); - cmd = wrapCommand(cmd, readPrefs); - - Iterator i = - getCollection("$cmd").__find(cmd, new BasicDBObject(), 0, -1, 0, options, readPrefs , - DefaultDBDecoder.FACTORY.create(), encoder); - if ( i == null || ! i.hasNext() ) - return null; - - DBObject res = i.next(); - ServerAddress sa = (i instanceof Result) ? ((Result) i).getServerAddress() : null; - CommandResult cr = new CommandResult(sa); - cr.putAll( res ); - return cr; - } - - // Only append $readPreference meta-operator if connected to a mongos, read preference is not primary - // or secondary preferred, - // and command is an instance of BasicDBObject. The last condition is unfortunate, but necessary in case - // the encoder is not capable of encoding a BasicDBObject - // Due to issues with compatibility between different versions of mongos, also wrap the command in a - // $query field, so that the $readPreference is not rejected - private DBObject wrapCommand(DBObject cmd, final ReadPreference readPrefs) { - if (getMongo().isMongosConnection() && - !(ReadPreference.primary().equals(readPrefs) || ReadPreference.secondaryPreferred().equals(readPrefs)) && - cmd instanceof BasicDBObject) { - cmd = new BasicDBObject("$query", cmd) - .append(QueryOpBuilder.READ_PREFERENCE_META_OPERATOR, readPrefs.toDBObject()); - } - return cmd; - } - - /** - * Executes a database command. - * - * @param cmd {@code DBObject} representation the command to be executed - * @param options query options to use - * @return result of the command execution - * @throws MongoException - * @dochub commands - */ - public CommandResult command( DBObject cmd , int options ){ - return command(cmd, options, getReadPreference()); - } - - /** - * Executes a database command. - * This method constructs a simple dbobject and calls {@link DB#command(com.mongodb.DBObject) } - * - * @param cmd name of the command to be executed - * @return result of the command execution - * @throws MongoException - * @dochub commands - */ - public CommandResult command( String cmd ){ - return command( new BasicDBObject( cmd , Boolean.TRUE ) ); - } - - /** - * Executes a database command. - * This method constructs a simple dbobject and calls {@link DB#command(com.mongodb.DBObject, int) } - * - * @param cmd name of the command to be executed - * @param options query options to use - * @return result of the command execution - * @throws MongoException - * @dochub commands - */ - public CommandResult command( String cmd, int options ){ - return command( new BasicDBObject( cmd , Boolean.TRUE ), options ); - } - - /** - * Evaluates JavaScript functions on the database server. - * This is useful if you need to touch a lot of data lightly, in which case network transfer could be a bottleneck. - * - * @param code @{code String} representation of JavaScript function - * @param args arguments to pass to the JavaScript function - * @return result of the command execution - * @throws MongoException - */ - public CommandResult doEval( String code , Object ... args ){ - - return command( BasicDBObjectBuilder.start() - .add( "$eval" , code ) - .add( "args" , args ) - .get() ); - } - - /** - * Calls {@link DB#doEval(java.lang.String, java.lang.Object[]) }. - * If the command is successful, the "retval" field is extracted and returned. - * Otherwise an exception is thrown. - * - * @param code @{code String} representation of JavaScript function - * @param args arguments to pass to the JavaScript function - * @return result of the execution - * @throws MongoException - */ - public Object eval( String code , Object ... args ){ - - CommandResult res = doEval( code , args ); - res.throwOnError(); - return res.get( "retval" ); - } - - /** - * Helper method for calling a 'dbStats' command. - * It returns storage statistics for a given database. - * - * @return result of the execution - * @throws MongoException - */ - public CommandResult getStats() { - return command("dbstats"); - } - - /** - * Returns the name of this database. - * - * @return the name - */ - public String getName(){ - return _name; - } - - /** - * Makes this database read-only. - * Important note: this is a convenience setting that is only known on the client side and not persisted. - * - * @param b if the database should be read-only - * @deprecated Avoid making database read-only via this method. - * Connect with a user credentials that has a read-only access to a server instead. - */ - @Deprecated - public void setReadOnly(Boolean b) { - _readOnly = b; - } - - /** - * Returns a set containing all collections in the existing database. - * - * @return an set of names - * @throws MongoException - */ - public Set getCollectionNames(){ - - DBCollection namespaces = getCollection("system.namespaces"); - if (namespaces == null) - throw new RuntimeException("this is impossible"); - - Iterator i = namespaces.__find(new BasicDBObject(), null, 0, 0, 0, getOptions(), getReadPreference(), null); - if (i == null) - return new HashSet(); - - List tables = new ArrayList(); - - for (; i.hasNext();) { - DBObject o = i.next(); - if ( o.get( "name" ) == null ){ - throw new MongoException( "how is name null : " + o ); - } - String n = o.get("name").toString(); - int idx = n.indexOf("."); - - String root = n.substring(0, idx); - if (!root.equals(_name)) - continue; - - if (n.indexOf("$") >= 0) - continue; - - String table = n.substring(idx + 1); - - tables.add(table); - } - - Collections.sort(tables); - - return new LinkedHashSet(tables); - } - - /** - * Checks to see if a collection with a given name exists on a server. - * - * @param collectionName a name of the collection to test for existence - * @return {@code false} if no collection by that name exists, {@code true} if a match to an existing collection was found - * @throws MongoException - */ - public boolean collectionExists(String collectionName) - { - if (collectionName == null || "".equals(collectionName)) - return false; - - Set collections = getCollectionNames(); - if (collections.isEmpty()) - return false; - - for (String collection : collections) - { - if (collectionName.equalsIgnoreCase(collection)) - return true; - } - - return false; - } - - - /** - * Returns the name of this database. - * - * @return the name - */ - @Override - public String toString(){ - return _name; - } - - /** - * Returns the error status of the last operation on the current connection. - *

- * The result of this command will look like: - *

- * - * { "err" : errorMessage , "ok" : 1.0 } - * - *

- * The value for errorMessage will be null if no error occurred, or a description otherwise. - *

- * Important note: when calling this method directly, it is undefined which connection "getLastError" is called on. - * You may need to explicitly use a "consistent Request", see {@link DB#requestStart()} - * For most purposes it is better not to call this method directly but instead use {@link WriteConcern} - * - * @return {@code DBObject} with error and status information - * @throws MongoException - */ - public CommandResult getLastError(){ - return command(new BasicDBObject("getlasterror", 1)); - } - - /** - * Returns the error status of the last operation on the current connection. - * - * @param concern a {@link WriteConcern} to be used while checking for the error status. - * @return {@code DBObject} with error and status information - * @throws MongoException - * @see {@link DB#getLastError() } - */ - public CommandResult getLastError( com.mongodb.WriteConcern concern ){ - return command( concern.getCommand() ); - } - - /** - * Returns the error status of the last operation on the current connection. - * - * @param w when running with replication, this is the number of servers to replicate to before returning. A w value of 1 indicates the primary only. A w value of 2 includes the primary and at least one secondary, etc. In place of a number, you may also set w to majority to indicate that the command should wait until the latest write propagates to a majority of replica set members. If using w, you should also use wtimeout. Specifying a value for w without also providing a wtimeout may cause {@code getLastError} to block indefinitely. - * @param wtimeout a value in milliseconds that controls how long to wait for write propagation to complete. If replication does not complete in the given timeframe, the getLastError command will return with an error status. - * @param fsync if true, wait for {@code mongod} to write this data to disk before returning. Defaults to false. - * @return {@code DBObject} with error and status information - * @throws MongoException - * @see {@link DB#getLastError(com.mongodb.WriteConcern) } - */ - public CommandResult getLastError( int w , int wtimeout , boolean fsync ){ - return command( (new com.mongodb.WriteConcern( w, wtimeout , fsync )).getCommand() ); - } - - - /** - * Sets the write concern for this database. It will be used for - * write operations to any collection in this database. See the - * documentation for {@link WriteConcern} for more information. - * - * @param concern {@code WriteConcern} to use - */ - public void setWriteConcern( com.mongodb.WriteConcern concern ){ - if (concern == null) throw new IllegalArgumentException(); - _concern = concern; - } - - /** - * Gets the write concern for this database. - * - * @return {@code WriteConcern} to be used for write operations, if not specified explicitly - */ - public com.mongodb.WriteConcern getWriteConcern(){ - if ( _concern != null ) - return _concern; - return _mongo.getWriteConcern(); - } - - /** - * Sets the read preference for this database. Will be used as default for - * read operations from any collection in this database. See the - * documentation for {@link ReadPreference} for more information. - * - * @param preference {@code ReadPreference} to use - */ - public void setReadPreference( ReadPreference preference ){ - _readPref = preference; - } - - /** - * Gets the read preference for this database. - * - * @return {@code ReadPreference} to be used for read operations, if not specified explicitly - */ - public ReadPreference getReadPreference(){ - if ( _readPref != null ) - return _readPref; - return _mongo.getReadPreference(); - } - - /** - * Drops this database, deleting the associated data files. Use with caution. - * - * @throws MongoException - */ - public void dropDatabase(){ - - CommandResult res = command(new BasicDBObject("dropDatabase", 1)); - res.throwOnError(); - _mongo._dbs.remove(this.getName()); - } - - /** - * Returns {@code true} if a user has been authenticated on this database. - * - * @return {@code true} if authenticated, {@code false} otherwise - * @dochub authenticate - * @deprecated Please use {@link MongoClient#MongoClient(java.util.List, java.util.List)} to create a client, which - * will authentificate all connections to server - */ - @Deprecated - public boolean isAuthenticated() { - return getAuthenticationCredentials() != null; - } - - /** - * Authenticates to db with the given credentials. If this method (or {@code authenticateCommand} has already been - * called with the same credentials and the authentication test succeeded, this method will return {@code true}. If this method - * has already been called with different credentials and the authentication test succeeded, - * this method will throw an {@code IllegalStateException}. If this method has already been called with any credentials - * and the authentication test failed, this method will re-try the authentication test with the - * given credentials. - * - * @param username name of user for this database - * @param password password of user for this database - * @return true if authenticated, false otherwise - * @throws MongoException if authentication failed due to invalid user/pass, or other exceptions like I/O - * @throws IllegalStateException if authentiation test has already succeeded with different credentials - * @dochub authenticate - * @see #authenticateCommand(String, char[]) - * @deprecated Please use {@link MongoClient#MongoClient(java.util.List, java.util.List)} to create a client, which - * will authentificate all connections to server - */ - @Deprecated - public boolean authenticate(String username, char[] password) { - return authenticateCommandHelper(username, password).failure == null; - } - - /** - * Authenticates to db with the given credentials. If this method (or {@code authenticate} has already been - * called with the same credentials and the authentication test succeeded, this method will return true. If this method - * has already been called with different credentials and the authentication test succeeded, - * this method will throw an {@code IllegalStateException}. If this method has already been called with any credentials - * and the authentication test failed, this method will re-try the authentication test with the - * given credentials. - * - * @param username name of user for this database - * @param password password of user for this database - * @return the CommandResult from authenticate command - * @throws MongoException if authentication failed due to invalid user/pass, or other exceptions like I/O - * @throws IllegalStateException if authentiation test has already succeeded with different credentials - * @dochub authenticate - * @see #authenticate(String, char[]) - * @deprecated Please use {@link MongoClient#MongoClient(java.util.List, java.util.List)} to create a client, which - * will authentificate all connections to server - */ - @Deprecated - public synchronized CommandResult authenticateCommand(String username, char[] password) { - CommandResultPair commandResultPair = authenticateCommandHelper(username, password); - if (commandResultPair.failure != null) { - throw commandResultPair.failure; - } - return commandResultPair.result; - } - - private CommandResultPair authenticateCommandHelper(String username, char[] password) { - MongoCredential credentials = - MongoCredential.createMongoCRCredential(username, getName(), password); - if (getAuthenticationCredentials() != null) { - if (getAuthenticationCredentials().equals(credentials)) { - if (authenticationTestCommandResult != null) { - return new CommandResultPair(authenticationTestCommandResult); - } - } else { - throw new IllegalStateException("can't authenticate twice on the same database"); - } - } - - try { - authenticationTestCommandResult = doAuthenticate(credentials); - return new CommandResultPair(authenticationTestCommandResult); - } catch (CommandFailureException commandFailureException) { - return new CommandResultPair(commandFailureException); - } - } - - class CommandResultPair { - CommandResult result; - CommandFailureException failure; - - public CommandResultPair(final CommandResult result) { - this.result = result; - } - - public CommandResultPair(final CommandFailureException failure) { - this.failure = failure; - } - } - - abstract CommandResult doAuthenticate(MongoCredential credentials); - - /** - * Adds a new user for this db - * - * @param username - * @param passwd - * @throws MongoException - */ - public WriteResult addUser( String username , char[] passwd ){ - return addUser(username, passwd, false); - } - - /** - * Adds privilege documents to the {@code system.users} collection in a database, - * which creates database credentials in MongoDB. - * - * @param username - * @param passwd - * @param readOnly if true, user will only be able to read - * @throws MongoException - */ - public WriteResult addUser( String username , char[] passwd, boolean readOnly ){ - DBCollection c = getCollection( "system.users" ); - DBObject o = c.findOne( new BasicDBObject( "user" , username ) ); - if ( o == null ) - o = new BasicDBObject( "user" , username ); - o.put( "pwd" , _hash( username , passwd ) ); - o.put( "readOnly" , readOnly ); - return c.save( o ); - } - - /** - * Removes the specified username from the database. - * - * @param username user to be removed - * @throws MongoException - */ - public WriteResult removeUser( String username ){ - DBCollection c = getCollection( "system.users" ); - return c.remove(new BasicDBObject( "user" , username )); - } - - String _hash( String username , char[] passwd ){ - ByteArrayOutputStream bout = new ByteArrayOutputStream( username.length() + 20 + passwd.length ); - try { - bout.write( username.getBytes() ); - bout.write( ":mongo:".getBytes() ); - for ( int i=0; i= 128 ) - throw new IllegalArgumentException( "can't handle non-ascii passwords yet" ); - bout.write( (byte)passwd[i] ); - } - } - catch ( IOException ioe ){ - throw new RuntimeException( "impossible" , ioe ); - } - return Util.hexMD5( bout.toByteArray() ); - } - - /** - * Returns the last error that occurred since start of database or a call to {@link com.mongodb.DB#resetError()} - *

- * The return object will look like: - *

- * - * { err : errorMessage, nPrev : countOpsBack, ok : 1 } - * - *

- * The value for errorMessage will be null of no error has occurred, otherwise the error message. - * The value of countOpsBack will be the number of operations since the error occurred. - *

- * Care must be taken to ensure that calls to getPreviousError go to the same connection as that - * of the previous operation. - * See {@link DB#requestStart()} for more information. - * - * @return {@code DBObject} with error and status information - * @throws MongoException - */ - public CommandResult getPreviousError(){ - return command(new BasicDBObject("getpreverror", 1)); - } - - /** - * Resets the error memory for this database. - * Used to clear all errors such that {@link DB#getPreviousError()} will return no error. - * - * @throws MongoException - */ - public void resetError(){ - command(new BasicDBObject("reseterror", 1)); - } - - /** - * For testing purposes only - this method forces an error to help test error handling - * - * @throws MongoException - */ - public void forceError(){ - command(new BasicDBObject("forceerror", 1)); - } - - /** - * Gets the {@link Mongo} instance - * - * @return the instance of {@link Mongo} this database belongs to - */ - public Mongo getMongo(){ - return _mongo; - } - - /** - * Gets another database on same server - * - * @param name name of the database - * @return the database - */ - public DB getSisterDB( String name ){ - return _mongo.getDB( name ); - } - - /** - * Makes it possible to execute "read" queries on a slave node - * - * @see ReadPreference#secondaryPreferred() - * @deprecated Replaced with {@code ReadPreference.secondaryPreferred()} - */ - @Deprecated - public void slaveOk(){ - addOption( Bytes.QUERYOPTION_SLAVEOK ); - } - - /** - * Adds the given flag to the query options. - * - * @param option value to be added - */ - public void addOption( int option ){ - _options.add( option ); - } - - /** - * Sets the query options, overwriting previous value. - * - * @param options bit vector of query options - */ - public void setOptions( int options ){ - _options.set( options ); - } - - /** - * Resets the query options. - */ - public void resetOptions(){ - _options.reset(); - } - - /** - * Gets the query options - * - * @return bit vector of query options - */ - public int getOptions(){ - return _options.get(); - } - - public abstract void cleanCursors( boolean force ); - - MongoCredential getAuthenticationCredentials() { - return getMongo().getAuthority().getCredentialsStore().get(getName()); - } - - final Mongo _mongo; - final String _name; - - - /** - * @deprecated See {@link #setReadOnly(Boolean)} - */ - @Deprecated - protected boolean _readOnly = false; - private com.mongodb.WriteConcern _concern; - private com.mongodb.ReadPreference _readPref; - final Bytes.OptionHolder _options; - - // cached authentication command result, to return in case of multiple calls to authenticateCommand with the - // same credentials - private volatile CommandResult authenticationTestCommandResult; -} diff --git a/src/main/com/mongodb/DBAddress.java b/src/main/com/mongodb/DBAddress.java deleted file mode 100644 index 8fc507df583..00000000000 --- a/src/main/com/mongodb/DBAddress.java +++ /dev/null @@ -1,187 +0,0 @@ -// DBAddress.java - -/** - * Copyright (C) 2008 10gen Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.mongodb; - -import java.net.InetAddress; -import java.net.UnknownHostException; - -/** - * Represents a database address - */ -public class DBAddress extends ServerAddress { - - /** Creates a new address - * Accepts as the parameter format: - * - * - * - * - * - * - * - * - * - * - * - * - * - *
name"mydb"
<host>/name"127.0.0.1/mydb"
<host>:<port>/name"127.0.0.1:8080/mydb"
- * @param urlFormat - * @throws UnknownHostException - */ - public DBAddress( String urlFormat ) - throws UnknownHostException { - super( _getHostSection( urlFormat ) ); - - _check( urlFormat , "urlFormat" ); - _db = _fixName( _getDBSection( urlFormat ) ); - - _check( _host , "host" ); - _check( _db , "db" ); - } - - static String _getHostSection( String urlFormat ){ - if ( urlFormat == null ) - throw new NullPointerException( "urlFormat can't be null" ); - int idx = urlFormat.indexOf( "/" ); - if ( idx >= 0 ) - return urlFormat.substring( 0 , idx ); - return null; - } - - static String _getDBSection( String urlFormat ){ - if ( urlFormat == null ) - throw new NullPointerException( "urlFormat can't be null" ); - int idx = urlFormat.indexOf( "/" ); - if ( idx >= 0 ) - return urlFormat.substring( idx + 1 ); - return urlFormat; - } - - static String _fixName( String name ){ - name = name.replace( '.' , '-' ); - return name; - } - - /** - * @param other an existing DBAddress that gives the host and port - * @param dbname the database to which to connect - * @throws UnknownHostException - */ - public DBAddress( DBAddress other , String dbname ) - throws UnknownHostException { - this( other._host , other._port , dbname ); - } - - /** - * @param host host name - * @param dbname database name - * @throws UnknownHostException - */ - public DBAddress( String host , String dbname ) - throws UnknownHostException { - this( host , DBPort.PORT , dbname ); - } - - /** - * @param host host name - * @param port database port - * @param dbname database name - * @throws UnknownHostException - */ - public DBAddress( String host , int port , String dbname ) - throws UnknownHostException { - super( host , port ); - _db = dbname.trim(); - } - - /** - * @param addr host address - * @param port database port - * @param dbname database name - */ - public DBAddress( InetAddress addr , int port , String dbname ){ - super( addr , port ); - _check( dbname , "name" ); - _db = dbname.trim(); - } - - static void _check( String thing , String name ){ - if ( thing == null ) - throw new NullPointerException( name + " can't be null " ); - - thing = thing.trim(); - if ( thing.length() == 0 ) - throw new IllegalArgumentException( name + " can't be empty" ); - } - - @Override - public int hashCode(){ - return super.hashCode() + _db.hashCode(); - } - - @Override - public boolean equals( Object other ){ - if ( other instanceof DBAddress ){ - DBAddress a = (DBAddress)other; - return - a._port == _port && - a._db.equals( _db ) && - a._host.equals( _host ); - } else if ( other instanceof ServerAddress ){ - return other.equals(this); - } - return false; - } - - - /** - * creates a DBAddress pointing to a different database on the same server - * @param name database name - * @return - * @throws MongoException - */ - public DBAddress getSister( String name ){ - try { - return new DBAddress( _host , _port , name ); - } - catch ( UnknownHostException uh ){ - throw new MongoInternalException( "shouldn't be possible" , uh ); - } - } - - /** - * gets the database name - * @return - */ - public String getDBName(){ - return _db; - } - - /** - * gets a String representation of address as host:port/dbname. - * @return this address - */ - @Override - public String toString(){ - return super.toString() + "/" + _db; - } - - final String _db; -} diff --git a/src/main/com/mongodb/DBApiLayer.java b/src/main/com/mongodb/DBApiLayer.java deleted file mode 100644 index d9430dae8de..00000000000 --- a/src/main/com/mongodb/DBApiLayer.java +++ /dev/null @@ -1,556 +0,0 @@ -// DBApiLayer.java - -/** - * Copyright (C) 2008 10gen Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.mongodb; - -import com.mongodb.util.JSON; -import org.bson.BSONObject; -import org.bson.types.ObjectId; - -import java.util.*; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.ConcurrentLinkedQueue; -import java.util.logging.Level; -import java.util.logging.Logger; - - -/** Database API - * This cannot be directly instantiated, but the functions are available - * through instances of Mongo. - * - * @deprecated This class is NOT part of the public API. It will be dropped in 3.x releases. - */ -@Deprecated -public class DBApiLayer extends DB { - - /** The maximum number of cursors allowed */ - static final int NUM_CURSORS_BEFORE_KILL = 100; - static final int NUM_CURSORS_PER_BATCH = 20000; - - // --- show - - static final Logger TRACE_LOGGER = Logger.getLogger( "com.mongodb.TRACE" ); - static final Level TRACE_LEVEL = Boolean.getBoolean( "DB.TRACE" ) ? Level.INFO : Level.FINEST; - - static boolean willTrace(){ - return TRACE_LOGGER.isLoggable( TRACE_LEVEL ); - } - - static void trace( String s ){ - TRACE_LOGGER.log( TRACE_LEVEL , s ); - } - - static int chooseBatchSize(int batchSize, int limit, int fetched) { - int bs = Math.abs(batchSize); - int remaining = limit > 0 ? limit - fetched : 0; - int res; - if (bs == 0 && remaining > 0) - res = remaining; - else if (bs > 0 && remaining == 0) - res = bs; - else - res = Math.min(bs, remaining); - - if (batchSize < 0) { - // force close - res = -res; - } - - if (res == 1) { - // optimization: use negative batchsize to close cursor - res = -1; - } - return res; - } - - /** - * @param mongo the Mongo instance - * @param name the database name - * @param connector the connector - */ - protected DBApiLayer( Mongo mongo, String name , DBConnector connector ){ - super( mongo, name ); - - if ( connector == null ) - throw new IllegalArgumentException( "need a connector: " + name ); - - _root = name; - _rootPlusDot = _root + "."; - - _connector = connector; - } - - public void requestStart(){ - _connector.requestStart(); - } - - public void requestDone(){ - _connector.requestDone(); - } - - public void requestEnsureConnection(){ - _connector.requestEnsureConnection(); - } - - protected MyCollection doGetCollection( String name ){ - MyCollection c = _collections.get( name ); - if ( c != null ) - return c; - - c = new MyCollection( name ); - MyCollection old = _collections.putIfAbsent(name, c); - return old != null ? old : c; - } - - - /** - * @param force true if should clean regardless of number of dead cursors - * @throws MongoException - */ - public void cleanCursors( boolean force ){ - - int sz = _deadCursorIds.size(); - - if ( sz == 0 || ( ! force && sz < NUM_CURSORS_BEFORE_KILL)) - return; - - Bytes.LOGGER.info( "going to kill cursors : " + sz ); - - Map> m = new HashMap>(); - DeadCursor c; - while (( c = _deadCursorIds.poll()) != null ){ - List x = m.get( c.host ); - if ( x == null ){ - x = new LinkedList(); - m.put( c.host , x ); - } - x.add( c.id ); - } - - for ( Map.Entry> e : m.entrySet() ){ - try { - killCursors( e.getKey() , e.getValue() ); - } - catch ( Throwable t ){ - Bytes.LOGGER.log( Level.WARNING , "can't clean cursors" , t ); - for ( Long x : e.getValue() ) - _deadCursorIds.add( new DeadCursor( x , e.getKey() ) ); - } - } - } - - void killCursors( ServerAddress addr , List all ){ - if ( all == null || all.size() == 0 ) - return; - - OutMessage om = OutMessage.killCursors(_mongo, Math.min( NUM_CURSORS_PER_BATCH , all.size())); - - int soFar = 0; - int totalSoFar = 0; - for (Long l : all) { - om.writeLong(l); - - totalSoFar++; - soFar++; - - if ( soFar >= NUM_CURSORS_PER_BATCH ){ - _connector.say( this , om ,com.mongodb.WriteConcern.NONE ); - om = OutMessage.killCursors(_mongo, Math.min( NUM_CURSORS_PER_BATCH , all.size() - totalSoFar)); - soFar = 0; - } - } - - _connector.say( this , om ,com.mongodb.WriteConcern.NONE , addr ); - } - - @Override - CommandResult doAuthenticate(MongoCredential credentials) { - return _connector.authenticate(credentials); - } - - class MyCollection extends DBCollection { - MyCollection( String name ){ - super( DBApiLayer.this , name ); - _fullNameSpace = _root + "." + name; - } - - public void doapply( DBObject o ){ - } - - @Override - public void drop(){ - _collections.remove(getName()); - super.drop(); - } - - public WriteResult insert(List list, com.mongodb.WriteConcern concern, DBEncoder encoder ){ - - if (concern == null) { - throw new IllegalArgumentException("Write concern can not be null"); - } - - return insert(list, true, concern, encoder); - } - - protected WriteResult insert(List list, boolean shouldApply , com.mongodb.WriteConcern concern, DBEncoder encoder ){ - - if (encoder == null) - encoder = DefaultDBEncoder.FACTORY.create(); - - if ( willTrace() ) { - for (DBObject o : list) { - trace( "save: " + _fullNameSpace + " " + JSON.serialize( o ) ); - } - } - - if ( shouldApply ){ - for (DBObject o : list) { - apply(o); - _checkObject(o, false, false); - Object id = o.get("_id"); - if (id instanceof ObjectId) { - ((ObjectId) id).notNew(); - } - } - } - - WriteResult last = null; - - int cur = 0; - int maxsize = _mongo.getMaxBsonObjectSize(); - while ( cur < list.size() ) { - - OutMessage om = OutMessage.insert( this , encoder, concern ); - - for ( ; cur < list.size(); cur++ ){ - DBObject o = list.get(cur); - om.putObject( o ); - - // limit for batch insert is 4 x maxbson on server, use 2 x to be safe - if ( om.size() > 2 * maxsize ){ - cur++; - break; - } - } - - last = _connector.say( _db , om , concern ); - } - - return last; - } - - public WriteResult remove( DBObject o , com.mongodb.WriteConcern concern, DBEncoder encoder ){ - - if (concern == null) { - throw new IllegalArgumentException("Write concern can not be null"); - } - - if (encoder == null) - encoder = DefaultDBEncoder.FACTORY.create(); - - if ( willTrace() ) trace( "remove: " + _fullNameSpace + " " + JSON.serialize( o ) ); - - OutMessage om = OutMessage.remove(this, encoder, o); - - return _connector.say( _db , om , concern ); - } - - @Override - Iterator __find( DBObject ref , DBObject fields , int numToSkip , int batchSize, int limit , int options, ReadPreference readPref, DBDecoder decoder ){ - - return __find(ref, fields, numToSkip, batchSize, limit, options, readPref, decoder, DefaultDBEncoder.FACTORY.create()); - } - - @Override - Iterator __find( DBObject ref , DBObject fields , int numToSkip , int batchSize , int limit, int options, - ReadPreference readPref, DBDecoder decoder, DBEncoder encoder ){ - - if ( ref == null ) - ref = new BasicDBObject(); - - if ( willTrace() ) trace( "find: " + _fullNameSpace + " " + JSON.serialize( ref ) ); - - OutMessage query = OutMessage.query( this , options , numToSkip , chooseBatchSize(batchSize, limit, 0) , ref , fields, readPref, - encoder); - - Response res = _connector.call( _db , this , query , null , 2, readPref, decoder ); - - if ( res.size() == 1 ){ - BSONObject foo = res.get(0); - MongoException e = MongoException.parse( foo ); - if ( e != null && ! _name.equals( "$cmd" ) ) - throw e; - } - - return new Result( this , res , batchSize, limit , options, decoder ); - } - - @Override - public WriteResult update( DBObject query , DBObject o , boolean upsert , boolean multi , com.mongodb.WriteConcern concern, DBEncoder encoder ){ - - if (o == null) { - throw new IllegalArgumentException("update can not be null"); - } - - if (concern == null) { - throw new IllegalArgumentException("Write concern can not be null"); - } - - if (encoder == null) - encoder = DefaultDBEncoder.FACTORY.create(); - - if (!o.keySet().isEmpty()) { - // if 1st key doesn't start with $, then object will be inserted as is, need to check it - String key = o.keySet().iterator().next(); - if (!key.startsWith("$")) - _checkObject(o, false, false); - } - - if ( willTrace() ) { - trace( "update: " + _fullNameSpace + " " + JSON.serialize( query ) + " " + JSON.serialize( o ) ); - } - - OutMessage om = OutMessage.update(this, encoder, upsert, multi, query, o); - - return _connector.say( _db , om , concern ); - } - - public void createIndex( final DBObject keys, final DBObject options, DBEncoder encoder ){ - - if (encoder == null) - encoder = DefaultDBEncoder.FACTORY.create(); - - DBObject full = new BasicDBObject(); - for ( String k : options.keySet() ) - full.put( k , options.get( k ) ); - full.put( "key" , keys ); - - DBApiLayer.this.doGetCollection( "system.indexes" ).insert(Arrays.asList(full), false, WriteConcern.SAFE, encoder); - } - - final String _fullNameSpace; - } - - class Result implements Iterator { - - Result( MyCollection coll , Response res , int batchSize, int limit , int options, DBDecoder decoder ){ - _collection = coll; - _batchSize = batchSize; - _limit = limit; - _options = options; - _host = res._host; - _decoder = decoder; - init( res ); - // Only enable finalizer if cursor finalization is enabled and there is actually a cursor that needs killing - _optionalFinalizer = _mongo.getMongoOptions().isCursorFinalizerEnabled() && res.cursor() != 0 ? - new OptionalFinalizer() : null; - } - - private void init( Response res ){ - if ( ( res._flags & Bytes.RESULTFLAG_CURSORNOTFOUND ) > 0 ){ - throw new MongoException.CursorNotFound(_curResult.cursor(), res.serverUsed()); - } - - _totalBytes += res._len; - _curResult = res; - _cur = res.iterator(); - _sizes.add( res.size() ); - _numFetched += res.size(); - - if (res._cursor != 0 && _limit > 0 && _limit - _numFetched <= 0) { - // fetched all docs within limit, close cursor server-side - killCursor(); - } - } - - public DBObject next(){ - if ( _cur.hasNext() ) { - return _cur.next(); - } - - if ( ! _curResult.hasGetMore( _options ) ) - throw new NoSuchElementException("no more"); - - _advance(); - return next(); - } - - public boolean hasNext(){ - boolean hasNext = _cur.hasNext(); - while ( !hasNext ) { - if ( ! _curResult.hasGetMore( _options ) ) - return false; - - _advance(); - hasNext = _cur.hasNext(); - - if (!hasNext) { - if ( ( _options & Bytes.QUERYOPTION_AWAITDATA ) == 0 ) { - // dont block waiting for data if no await - return false; - } else { - // if await, driver should block until data is available - // if server does not support await, driver must sleep to avoid busy loop - if ((_curResult._flags & Bytes.RESULTFLAG_AWAITCAPABLE) == 0) { - try { - Thread.sleep(500); - } catch (InterruptedException e) { - throw new MongoInterruptedException(e); - } - } - } - } - } - return hasNext; - } - - private void _advance(){ - - if ( _curResult.cursor() <= 0 ) - throw new RuntimeException( "can't advance a cursor <= 0" ); - - OutMessage m = OutMessage.getMore(_collection, _curResult.cursor(), - chooseBatchSize(_batchSize, _limit, _numFetched)); - - Response res = _connector.call( DBApiLayer.this , _collection , m , _host, _decoder ); - _numGetMores++; - init( res ); - } - - public void remove(){ - throw new RuntimeException( "can't remove this way" ); - } - - public int getBatchSize(){ - return _batchSize; - } - - public void setBatchSize(int size){ - _batchSize = size; - } - - public String toString(){ - return "DBCursor"; - } - - public long totalBytes(){ - return _totalBytes; - } - - public long getCursorId(){ - if ( _curResult == null ) - return 0; - return _curResult._cursor; - } - - int numGetMores(){ - return _numGetMores; - } - - List getSizes(){ - return Collections.unmodifiableList( _sizes ); - } - - void close(){ - // not perfectly thread safe here, may need to use an atomicBoolean - if (_curResult != null) { - killCursor(); - _curResult = null; - _cur = null; - } - } - - void killCursor() { - if (_curResult == null) - return; - long curId = _curResult.cursor(); - if (curId == 0) - return; - - List l = new ArrayList(); - l.add(curId); - - try { - killCursors(_host, l); - } catch (Throwable t) { - Bytes.LOGGER.log(Level.WARNING, "can't clean 1 cursor", t); - _deadCursorIds.add(new DeadCursor(curId, _host)); - } - _curResult._cursor = 0; - } - - public ServerAddress getServerAddress() { - return _host; - } - - boolean hasFinalizer() { - return _optionalFinalizer != null; - } - - Response _curResult; - Iterator _cur; - int _batchSize; - int _limit; - final DBDecoder _decoder; - final MyCollection _collection; - final int _options; - final ServerAddress _host; // host where first went. all subsequent have to go there - - private long _totalBytes = 0; - private int _numGetMores = 0; - private List _sizes = new ArrayList(); - private int _numFetched = 0; - - // This allows us to easily enable/disable finalizer for cleaning up un-closed cursors - private final OptionalFinalizer _optionalFinalizer; - - private class OptionalFinalizer { - @Override - protected void finalize() { - if (_curResult != null) { - long curId = _curResult.cursor(); - _curResult = null; - _cur = null; - if (curId != 0) { - _deadCursorIds.add(new DeadCursor(curId, _host)); - } - } - } - } - - } // class Result - - static class DeadCursor { - - DeadCursor( long a , ServerAddress b ){ - id = a; - host = b; - } - - final long id; - final ServerAddress host; - } - - final String _root; - final String _rootPlusDot; - final DBConnector _connector; - final ConcurrentHashMap _collections = new ConcurrentHashMap(); - - ConcurrentLinkedQueue _deadCursorIds = new ConcurrentLinkedQueue(); - -} diff --git a/src/main/com/mongodb/DBCallback.java b/src/main/com/mongodb/DBCallback.java deleted file mode 100644 index 0e08226a0f9..00000000000 --- a/src/main/com/mongodb/DBCallback.java +++ /dev/null @@ -1,30 +0,0 @@ -// DBCallback.java - -/** - * Copyright (C) 2008 10gen Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.mongodb; - - -import org.bson.BSONCallback; - -/** - * The DB callback interface. - */ -public interface DBCallback extends BSONCallback { - -} - diff --git a/src/main/com/mongodb/DBCallbackFactory.java b/src/main/com/mongodb/DBCallbackFactory.java deleted file mode 100644 index 67b74116213..00000000000 --- a/src/main/com/mongodb/DBCallbackFactory.java +++ /dev/null @@ -1,27 +0,0 @@ -/** - * Copyright (C) 2011 10gen Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.mongodb; - -/** - * The DBCallback factory interface. - */ -public interface DBCallbackFactory { - - public DBCallback create( DBCollection collection ); - -} - diff --git a/src/main/com/mongodb/DBCollection.java b/src/main/com/mongodb/DBCollection.java deleted file mode 100644 index 4e5d1a56409..00000000000 --- a/src/main/com/mongodb/DBCollection.java +++ /dev/null @@ -1,1751 +0,0 @@ -// DBCollection.java - -/** - * Copyright (C) 2008 10gen Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.mongodb; - -// Mongo -import org.bson.types.ObjectId; - -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collections; -import java.util.HashMap; -import java.util.HashSet; -import java.util.Iterator; -import java.util.List; -import java.util.Map; -import java.util.Set; - -/** This class provides a skeleton implementation of a database collection. - *

A typical invocation sequence is thus - *

- *     MongoClient mongoClient = new MongoClient(new ServerAddress("localhost", 27017));
- *     DB db = mongo.getDB("mydb");
- *     DBCollection collection = db.getCollection("test");
- * 
- * @dochub collections - */ -@SuppressWarnings("unchecked") -public abstract class DBCollection { - - /** - * Saves document(s) to the database. - * if doc doesn't have an _id, one will be added - * you can get the _id that was added from doc after the insert - * - * @param arr array of documents to save - * @param concern the write concern - * @return - * @throws MongoException - * @dochub insert - */ - public WriteResult insert(DBObject[] arr , WriteConcern concern ){ - return insert( arr, concern, getDBEncoder()); - } - - /** - * Saves document(s) to the database. - * if doc doesn't have an _id, one will be added - * you can get the _id that was added from doc after the insert - * - * @param arr array of documents to save - * @param concern the write concern - * @param encoder the DBEncoder to use - * @return - * @throws MongoException - * @dochub insert - */ - public WriteResult insert(DBObject[] arr , WriteConcern concern, DBEncoder encoder) { - return insert(Arrays.asList(arr), concern, encoder); - } - - /** - * Inserts a document into the database. - * if doc doesn't have an _id, one will be added - * you can get the _id that was added from doc after the insert - * - * @param o - * @param concern the write concern - * @return - * @throws MongoException - * @dochub insert - */ - public WriteResult insert(DBObject o , WriteConcern concern ){ - return insert( Arrays.asList(o) , concern ); - } - - /** - * Saves document(s) to the database. - * if doc doesn't have an _id, one will be added - * you can get the _id that was added from doc after the insert - * - * @param arr array of documents to save - * @return - * @throws MongoException - * @dochub insert - */ - public WriteResult insert(DBObject ... arr){ - return insert( arr , getWriteConcern() ); - } - - /** - * Saves document(s) to the database. - * if doc doesn't have an _id, one will be added - * you can get the _id that was added from doc after the insert - * - * @param arr array of documents to save - * @return - * @throws MongoException - * @dochub insert - */ - public WriteResult insert(WriteConcern concern, DBObject ... arr){ - return insert( arr, concern ); - } - - /** - * Saves document(s) to the database. - * if doc doesn't have an _id, one will be added - * you can get the _id that was added from doc after the insert - * - * @param list list of documents to save - * @return - * @throws MongoException - * @dochub insert - */ - public WriteResult insert(List list ){ - return insert( list, getWriteConcern() ); - } - - /** - * Saves document(s) to the database. - * if doc doesn't have an _id, one will be added - * you can get the _id that was added from doc after the insert - * - * @param list list of documents to save - * @param concern the write concern - * @return - * @throws MongoException - * @dochub insert - */ - public WriteResult insert(List list, WriteConcern concern ){ - return insert(list, concern, getDBEncoder() ); - } - - /** - * Saves document(s) to the database. - * if doc doesn't have an _id, one will be added - * you can get the _id that was added from doc after the insert - * - * @param list list of documents to save - * @param concern the write concern - * @return - * @throws MongoException - * @dochub insert - */ - public abstract WriteResult insert(List list, WriteConcern concern, DBEncoder encoder); - - /** - * Performs an update operation. - * @param q search query for old object to update - * @param o object with which to update q - * @param upsert if the database should create the element if it does not exist - * @param multi if the update should be applied to all objects matching (db version 1.1.3 and above). An object will - * not be inserted if it does not exist in the collection and upsert=true and multi=true. - * See http://www.mongodb.org/display/DOCS/Atomic+Operations - * @param concern the write concern - * @return - * @throws MongoException - * @dochub update - */ - public WriteResult update( DBObject q , DBObject o , boolean upsert , boolean multi , WriteConcern concern ){ - return update( q, o, upsert, multi, concern, getDBEncoder()); - } - - /** - * Performs an update operation. - * @param q search query for old object to update - * @param o object with which to update q - * @param upsert if the database should create the element if it does not exist - * @param multi if the update should be applied to all objects matching (db version 1.1.3 and above). An object will - * not be inserted if it does not exist in the collection and upsert=true and multi=true. - * See http://www.mongodb.org/display/DOCS/Atomic+Operations - * @param concern the write concern - * @param encoder the DBEncoder to use - * @return - * @throws MongoException - * @dochub update - */ - public abstract WriteResult update( DBObject q , DBObject o , boolean upsert , boolean multi , WriteConcern concern, DBEncoder encoder ); - - /** - * calls {@link DBCollection#update(com.mongodb.DBObject, com.mongodb.DBObject, boolean, boolean, com.mongodb.WriteConcern)} with default WriteConcern. - * @param q search query for old object to update - * @param o object with which to update q - * @param upsert if the database should create the element if it does not exist - * @param multi if the update should be applied to all objects matching (db version 1.1.3 and above) - * See http://www.mongodb.org/display/DOCS/Atomic+Operations - * @return - * @throws MongoException - * @dochub update - */ - public WriteResult update( DBObject q , DBObject o , boolean upsert , boolean multi ){ - return update( q , o , upsert , multi , getWriteConcern() ); - } - - /** - * calls {@link DBCollection#update(com.mongodb.DBObject, com.mongodb.DBObject, boolean, boolean)} with upsert=false and multi=false - * @param q search query for old object to update - * @param o object with which to update q - * @return - * @throws MongoException - * @dochub update - */ - public WriteResult update( DBObject q , DBObject o ){ - return update( q , o , false , false ); - } - - /** - * calls {@link DBCollection#update(com.mongodb.DBObject, com.mongodb.DBObject, boolean, boolean)} with upsert=false and multi=true - * @param q search query for old object to update - * @param o object with which to update q - * @return - * @throws MongoException - * @dochub update - */ - public WriteResult updateMulti( DBObject q , DBObject o ){ - return update( q , o , false , true ); - } - - /** - * Adds any necessary fields to a given object before saving it to the collection. - * @param o object to which to add the fields - */ - protected abstract void doapply( DBObject o ); - - /** - * Removes objects from the database collection. - * @param o the object that documents to be removed must match - * @param concern WriteConcern for this operation - * @return - * @throws MongoException - * @dochub remove - */ - public WriteResult remove( DBObject o , WriteConcern concern ){ - return remove( o, concern, getDBEncoder()); - } - - /** - * Removes objects from the database collection. - * @param o the object that documents to be removed must match - * @param concern WriteConcern for this operation - * @param encoder the DBEncoder to use - * @return - * @throws MongoException - * @dochub remove - */ - public abstract WriteResult remove( DBObject o , WriteConcern concern, DBEncoder encoder ); - - /** - * calls {@link DBCollection#remove(com.mongodb.DBObject, com.mongodb.WriteConcern)} with the default WriteConcern - * @param o the object that documents to be removed must match - * @return - * @throws MongoException - * @dochub remove - */ - public WriteResult remove( DBObject o ){ - return remove( o , getWriteConcern() ); - } - - - /** - * Finds objects - */ - abstract Iterator __find( DBObject ref , DBObject fields , int numToSkip , int batchSize , int limit, int options, ReadPreference readPref, DBDecoder decoder ); - - abstract Iterator __find( DBObject ref , DBObject fields , int numToSkip , int batchSize , int limit, int options, - ReadPreference readPref, DBDecoder decoder, DBEncoder encoder ); - - - /** - * Calls {@link DBCollection#find(com.mongodb.DBObject, com.mongodb.DBObject, int, int)} and applies the query options - * @param query query used to search - * @param fields the fields of matching objects to return - * @param numToSkip number of objects to skip - * @param batchSize the batch size. This option has a complex behavior, see {@link DBCursor#batchSize(int) } - * @param options - see Bytes QUERYOPTION_* - * @return the cursor - * @throws MongoException - * @dochub find - */ - @Deprecated - public DBCursor find( DBObject query , DBObject fields , int numToSkip , int batchSize , int options ){ - return find(query, fields, numToSkip, batchSize).addOption(options); - } - - - /** - * Finds objects from the database that match a query. - * A DBCursor object is returned, that can be iterated to go through the results. - * - * @param query query used to search - * @param fields the fields of matching objects to return - * @param numToSkip number of objects to skip - * @param batchSize the batch size. This option has a complex behavior, see {@link DBCursor#batchSize(int) } - * @return the cursor - * @throws MongoException - * @dochub find - */ - @Deprecated - public DBCursor find( DBObject query , DBObject fields , int numToSkip , int batchSize ) { - DBCursor cursor = find(query, fields).skip(numToSkip).batchSize(batchSize); - return cursor; - } - - // ------ - - /** - * Finds an object by its id. - * This compares the passed in value to the _id field of the document - * - * @param obj any valid object - * @return the object, if found, otherwise null - * @throws MongoException - */ - public DBObject findOne( Object obj ){ - return findOne(obj, null); - } - - - /** - * Finds an object by its id. - * This compares the passed in value to the _id field of the document - * - * @param obj any valid object - * @param fields fields to return - * @return the object, if found, otherwise null - * @throws MongoException - * @dochub find - */ - public DBObject findOne( Object obj, DBObject fields ){ - Iterator iterator = __find( new BasicDBObject("_id", obj), fields, 0, -1, 0, getOptions(), getReadPreference(), getDecoder() ); - return (iterator.hasNext() ? iterator.next() : null); - } - - /** - * Finds the first document in the query and updates it. - * @param query query to match - * @param fields fields to be returned - * @param sort sort to apply before picking first document - * @param remove if true, document found will be removed - * @param update update to apply - * @param returnNew if true, the updated document is returned, otherwise the old document is returned (or it would be lost forever) - * @param upsert do upsert (insert if document not present) - * @return the document - * @throws MongoException - */ - public DBObject findAndModify(DBObject query, DBObject fields, DBObject sort, boolean remove, DBObject update, boolean returnNew, boolean upsert){ - - BasicDBObject cmd = new BasicDBObject( "findandmodify", _name); - if (query != null && !query.keySet().isEmpty()) - cmd.append( "query", query ); - if (fields != null && !fields.keySet().isEmpty()) - cmd.append( "fields", fields ); - if (sort != null && !sort.keySet().isEmpty()) - cmd.append( "sort", sort ); - - if (remove) - cmd.append( "remove", remove ); - else { - if (update != null && !update.keySet().isEmpty()) { - // if 1st key doesn't start with $, then object will be inserted as is, need to check it - String key = update.keySet().iterator().next(); - if (key.charAt(0) != '$') - _checkObject(update, false, false); - cmd.append( "update", update ); - } - if (returnNew) - cmd.append( "new", returnNew ); - if (upsert) - cmd.append( "upsert", upsert ); - } - - if (remove && !(update == null || update.keySet().isEmpty() || returnNew)) - throw new MongoException("FindAndModify: Remove cannot be mixed with the Update, or returnNew params!"); - - CommandResult res = this._db.command( cmd ); - if (res.ok() || res.getErrorMessage().equals( "No matching object found" )) { - return replaceWithObjectClass((DBObject) res.get( "value" )); - } - res.throwOnError(); - return null; - } - - /** - * Doesn't yet handle internal classes properly, so this method only does something if object class is set but - * no internal classes are set. - * - * @param oldObj the original value from the command result - * @return replaced object if necessary, or oldObj - */ - private DBObject replaceWithObjectClass(DBObject oldObj) { - if (oldObj == null || getObjectClass() == null & _internalClass.isEmpty()) { - return oldObj; - } - - DBObject newObj = instantiateObjectClassInstance(); - - for (String key : oldObj.keySet()) { - newObj.put(key, oldObj.get(key)); - } - return newObj; - } - - private DBObject instantiateObjectClassInstance() { - try { - return (DBObject) getObjectClass().newInstance(); - } catch (InstantiationException e) { - throw new MongoInternalException("can't create instance of type " + getObjectClass(), e); - } catch (IllegalAccessException e) { - throw new MongoInternalException("can't create instance of type " + getObjectClass(), e); - } - } - - - /** - * calls {@link DBCollection#findAndModify(com.mongodb.DBObject, com.mongodb.DBObject, com.mongodb.DBObject, boolean, com.mongodb.DBObject, boolean, boolean)} - * with fields=null, remove=false, returnNew=false, upsert=false - * @param query - * @param sort - * @param update - * @return the old document - * @throws MongoException - */ - public DBObject findAndModify( DBObject query , DBObject sort , DBObject update) { - return findAndModify( query, null, sort, false, update, false, false); - } - - /** - * calls {@link DBCollection#findAndModify(com.mongodb.DBObject, com.mongodb.DBObject, com.mongodb.DBObject, boolean, com.mongodb.DBObject, boolean, boolean)} - * with fields=null, sort=null, remove=false, returnNew=false, upsert=false - * @param query - * @param update - * @return the old document - * @throws MongoException - */ - public DBObject findAndModify( DBObject query , DBObject update ){ - return findAndModify( query, null, null, false, update, false, false ); - } - - /** - * calls {@link DBCollection#findAndModify(com.mongodb.DBObject, com.mongodb.DBObject, com.mongodb.DBObject, boolean, com.mongodb.DBObject, boolean, boolean)} - * with fields=null, sort=null, remove=true, returnNew=false, upsert=false - * @param query - * @return the removed document - * @throws MongoException - */ - public DBObject findAndRemove( DBObject query ) { - return findAndModify( query, null, null, true, null, false, false ); - } - - // --- START INDEX CODE --- - - /** - * calls {@link DBCollection#createIndex(com.mongodb.DBObject, com.mongodb.DBObject)} with default index options - * @param keys an object with a key set of the fields desired for the index - * @throws MongoException - */ - public void createIndex( final DBObject keys ){ - createIndex( keys , defaultOptions( keys ) ); - } - - /** - * Forces creation of an index on a set of fields, if one does not already exist. - * @param keys - * @param options - * @throws MongoException - */ - public void createIndex( DBObject keys , DBObject options ){ - createIndex( keys, options, getDBEncoder()); - } - - /** - * Forces creation of an index on a set of fields, if one does not already exist. - * @param keys - * @param options - * @param encoder the DBEncoder to use - * @throws MongoException - */ - public abstract void createIndex( DBObject keys , DBObject options, DBEncoder encoder ); - - /** - * Creates an ascending index on a field with default options, if one does not already exist. - * @param name name of field to index on - * @throws MongoException - */ - public void ensureIndex( final String name ){ - ensureIndex( new BasicDBObject( name , 1 ) ); - } - - /** - * calls {@link DBCollection#ensureIndex(com.mongodb.DBObject, com.mongodb.DBObject)} with default options - * @param keys an object with a key set of the fields desired for the index - * @throws MongoException - */ - public void ensureIndex( final DBObject keys ){ - ensureIndex( keys , defaultOptions( keys ) ); - } - - /** - * calls {@link DBCollection#ensureIndex(com.mongodb.DBObject, java.lang.String, boolean)} with unique=false - * @param keys fields to use for index - * @param name an identifier for the index - * @throws MongoException - * @dochub indexes - */ - public void ensureIndex( DBObject keys , String name ){ - ensureIndex( keys , name , false ); - } - - /** - * Ensures an index on this collection (that is, the index will be created if it does not exist). - * @param keys fields to use for index - * @param name an identifier for the index. If null or empty, the default name will be used. - * @param unique if the index should be unique - * @throws MongoException - */ - public void ensureIndex( DBObject keys , String name , boolean unique ){ - DBObject options = defaultOptions( keys ); - if (name != null && name.length()>0) - options.put( "name" , name ); - if ( unique ) - options.put( "unique" , Boolean.TRUE ); - ensureIndex( keys , options ); - } - - /** - * Creates an index on a set of fields, if one does not already exist. - * @param keys an object with a key set of the fields desired for the index - * @param optionsIN options for the index (name, unique, etc) - * @throws MongoException - */ - public void ensureIndex( final DBObject keys , final DBObject optionsIN ){ - - if ( checkReadOnly( false ) ) return; - - final DBObject options = defaultOptions( keys ); - for ( String k : optionsIN.keySet() ) - options.put( k , optionsIN.get( k ) ); - - final String name = options.get( "name" ).toString(); - - if ( _createdIndexes.contains( name ) ) - return; - - createIndex( keys , options ); - _createdIndexes.add( name ); - } - - /** - * Clears all indices that have not yet been applied to this collection. - */ - public void resetIndexCache(){ - _createdIndexes.clear(); - } - - DBObject defaultOptions( DBObject keys ){ - DBObject o = new BasicDBObject(); - o.put( "name" , genIndexName( keys ) ); - o.put( "ns" , _fullName ); - return o; - } - - /** - * Convenience method to generate an index name from the set of fields it is over. - * @param keys the names of the fields used in this index - * @return a string representation of this index's fields - * - * @deprecated This method is NOT a part of public API and will be dropped in 3.x versions. - */ - @Deprecated - public static String genIndexName( DBObject keys ){ - StringBuilder name = new StringBuilder(); - for ( String s : keys.keySet() ){ - if ( name.length() > 0 ) - name.append( '_' ); - name.append( s ).append( '_' ); - Object val = keys.get( s ); - if ( val instanceof Number || val instanceof String ) - name.append( val.toString().replace( ' ', '_' ) ); - } - return name.toString(); - } - - // --- END INDEX CODE --- - - /** - * Set hint fields for this collection (to optimize queries). - * @param lst a list of DBObjects to be used as hints - */ - public void setHintFields( List lst ){ - _hintFields = lst; - } - - /** - * Get hint fields for this collection (used to optimize queries). - * @return a list of {@code DBObject} to be used as hints. - */ - protected List getHintFields() { - return _hintFields; - } - - /** - * Queries for an object in this collection. - * @param ref object for which to search - * @return an iterator over the results - * @dochub find - */ - public DBCursor find( DBObject ref ){ - return new DBCursor( this, ref, null, getReadPreference()); - } - - /** - * Queries for an object in this collection. - * - *

- * An empty DBObject will match every document in the collection. - * Regardless of fields specified, the _id fields are always returned. - *

- *

- * An example that returns the "x" and "_id" fields for every document - * in the collection that has an "x" field: - *

- *
-     * BasicDBObject keys = new BasicDBObject();
-     * keys.put("x", 1);
-     *
-     * DBCursor cursor = collection.find(new BasicDBObject(), keys);
-     * 
- * - * @param ref object for which to search - * @param keys fields to return - * @return a cursor to iterate over results - * @dochub find - */ - public DBCursor find( DBObject ref , DBObject keys ){ - return new DBCursor( this, ref, keys, getReadPreference()); - } - - - /** - * Queries for all objects in this collection. - * @return a cursor which will iterate over every object - * @dochub find - */ - public DBCursor find(){ - return new DBCursor( this, null, null, getReadPreference()); - } - - /** - * Returns a single object from this collection. - * @return the object found, or null if the collection is empty - * @throws MongoException - */ - public DBObject findOne(){ - return findOne( new BasicDBObject() ); - } - - /** - * Returns a single object from this collection matching the query. - * @param o the query object - * @return the object found, or null if no such object exists - * @throws MongoException - */ - public DBObject findOne( DBObject o ){ - return findOne( o, null, null, getReadPreference()); - } - - /** - * Returns a single object from this collection matching the query. - * @param o the query object - * @param fields fields to return - * @return the object found, or null if no such object exists - * @throws MongoException - * @dochub find - */ - public DBObject findOne( DBObject o, DBObject fields ) { - return findOne( o, fields, null, getReadPreference()); - } - - /** - * Returns a single obejct from this collection matching the query. - * @param o the query object - * @param fields fields to return - * @param orderBy fields to order by - * @return the object found, or null if no such object exists - * @throws MongoException - * @dochub find - */ - public DBObject findOne( DBObject o, DBObject fields, DBObject orderBy){ - return findOne(o, fields, orderBy, getReadPreference()); - } - - /** - * Returns a single object from this collection matching the query. - * @param o the query object - * @param fields fields to return - * @param readPref - * @return the object found, or null if no such object exists - * @throws MongoException - * @dochub find - */ - public DBObject findOne( DBObject o, DBObject fields, ReadPreference readPref ){ - return findOne(o, fields, null, readPref); - } - - /** - * Returns a single object from this collection matching the query. - * @param o the query object - * @param fields fields to return - * @param orderBy fields to order by - * @return the object found, or null if no such object exists - * @throws MongoException - * @dochub find - */ - public DBObject findOne( DBObject o, DBObject fields, DBObject orderBy, ReadPreference readPref ){ - - QueryOpBuilder queryOpBuilder = new QueryOpBuilder().addQuery(o).addOrderBy(orderBy); - - if (getDB().getMongo().isMongosConnection()) { - queryOpBuilder.addReadPreference(readPref); - } - - Iterator i = __find(queryOpBuilder.get(), fields , 0 , -1 , 0, getOptions(), readPref, getDecoder() ); - - DBObject obj = (i.hasNext() ? i.next() : null); - if ( obj != null && ( fields != null && fields.keySet().size() > 0 ) ){ - obj.markAsPartialObject(); - } - return obj; - } - - // Only create a new decoder if there is a decoder factory explicitly set on the collection. Otherwise return null - // so that DBPort will use a cached decoder from the default factory. - private DBDecoder getDecoder() { - return getDBDecoderFactory() != null ? getDBDecoderFactory().create() : null; - } - - // Only create a new encoder if there is an encoder factory explicitly set on the collection. Otherwise return null - // to allow DB to create its own or use a cached one. - private DBEncoder getDBEncoder() { - return getDBEncoderFactory() != null ? getDBEncoderFactory().create() : null; - } - - - /** - * calls {@link DBCollection#apply(com.mongodb.DBObject, boolean)} with ensureID=true - * @param o DBObject to which to add fields - * @return the modified parameter object - */ - public Object apply( DBObject o ){ - return apply( o , true ); - } - - /** - * calls {@link DBCollection#doapply(com.mongodb.DBObject)}, optionally adding an automatic _id field - * @param jo object to add fields to - * @param ensureID whether to add an _id field - * @return the modified object o - */ - public Object apply( DBObject jo , boolean ensureID ){ - - Object id = jo.get( "_id" ); - if ( ensureID && id == null ){ - id = ObjectId.get(); - jo.put( "_id" , id ); - } - - doapply( jo ); - - return id; - } - - /** - * calls {@link DBCollection#save(com.mongodb.DBObject, com.mongodb.WriteConcern)} with default WriteConcern - * @param jo the DBObject to save - * will add _id field to jo if needed - * @return - * @throws MongoException - */ - public WriteResult save( DBObject jo ){ - return save(jo, getWriteConcern()); - } - - /** - * Saves an object to this collection (does insert or update based on the object _id). - * @param jo the DBObject to save - * @param concern the write concern - * @return - * @throws MongoException - */ - public WriteResult save( DBObject jo, WriteConcern concern ){ - if ( checkReadOnly( true ) ) - return null; - - _checkObject( jo , false , false ); - - Object id = jo.get( "_id" ); - - if ( id == null || ( id instanceof ObjectId && ((ObjectId)id).isNew() ) ){ - if ( id != null && id instanceof ObjectId ) - ((ObjectId)id).notNew(); - if ( concern == null ) - return insert( jo ); - else - return insert( jo, concern ); - } - - DBObject q = new BasicDBObject(); - q.put( "_id" , id ); - if ( concern == null ) - return update( q , jo , true , false ); - else - return update( q , jo , true , false , concern ); - - } - - // ---- DB COMMANDS ---- - /** - * Drops all indices from this collection - * @throws MongoException - */ - public void dropIndexes(){ - dropIndexes( "*" ); - } - - - /** - * Drops an index from this collection - * @param name the index name - * @throws MongoException - */ - public void dropIndexes( String name ){ - DBObject cmd = BasicDBObjectBuilder.start() - .add( "deleteIndexes" , getName() ) - .add( "index" , name ) - .get(); - - resetIndexCache(); - CommandResult res = _db.command( cmd ); - if (res.ok() || res.getErrorMessage().equals( "ns not found" )) - return; - res.throwOnError(); - } - - /** - * Drops (deletes) this collection. Use with care. - * @throws MongoException - */ - public void drop(){ - resetIndexCache(); - CommandResult res =_db.command( BasicDBObjectBuilder.start().add( "drop" , getName() ).get() ); - if (res.ok() || res.getErrorMessage().equals( "ns not found" )) - return; - res.throwOnError(); - } - - /** - * returns the number of documents in this collection. - * @return - * @throws MongoException - */ - public long count(){ - return getCount(new BasicDBObject(), null); - } - - /** - * returns the number of documents that match a query. - * @param query query to match - * @return - * @throws MongoException - */ - public long count(DBObject query){ - return getCount(query, null); - } - - /** - * returns the number of documents that match a query. - * @param query query to match - * @param readPrefs ReadPreferences for this query - * @return - * @throws MongoException - */ - public long count(DBObject query, ReadPreference readPrefs ){ - return getCount(query, null, readPrefs); - } - - - /** - * calls {@link DBCollection#getCount(com.mongodb.DBObject, com.mongodb.DBObject)} with an empty query and null fields. - * @return number of documents that match query - * @throws MongoException - */ - public long getCount(){ - return getCount(new BasicDBObject(), null); - } - - /** - * calls {@link DBCollection#getCount(com.mongodb.DBObject, com.mongodb.DBObject, com.mongodb.ReadPreference)} with empty query and null fields. - * @param readPrefs ReadPreferences for this command - * @return number of documents that match query - * @throws MongoException - */ - public long getCount(ReadPreference readPrefs){ - return getCount(new BasicDBObject(), null, readPrefs); - } - - /** - * calls {@link DBCollection#getCount(com.mongodb.DBObject, com.mongodb.DBObject)} with null fields. - * @param query query to match - * @return - * @throws MongoException - */ - public long getCount(DBObject query){ - return getCount(query, null); - } - - - /** - * calls {@link DBCollection#getCount(com.mongodb.DBObject, com.mongodb.DBObject, long, long)} with limit=0 and skip=0 - * @param query query to match - * @param fields fields to return - * @return - * @throws MongoException - */ - public long getCount(DBObject query, DBObject fields){ - return getCount( query , fields , 0 , 0 ); - } - - /** - * calls {@link DBCollection#getCount(com.mongodb.DBObject, com.mongodb.DBObject, long, long, com.mongodb.ReadPreference)} with limit=0 and skip=0 - * @param query query to match - * @param fields fields to return - * @param readPrefs ReadPreferences for this command - * @return - * @throws MongoException - */ - public long getCount(DBObject query, DBObject fields, ReadPreference readPrefs){ - return getCount( query , fields , 0 , 0, readPrefs ); - } - - /** - * calls {@link DBCollection#getCount(com.mongodb.DBObject, com.mongodb.DBObject, long, long, com.mongodb.ReadPreference)} with the DBCollection's ReadPreference - * @param query query to match - * @param fields fields to return - * @param limit limit the count to this value - * @param skip skip number of entries to skip - * @return - * @throws MongoException - */ - public long getCount(DBObject query, DBObject fields, long limit, long skip){ - return getCount(query, fields, limit, skip, getReadPreference()); - } - - /** - * Returns the number of documents in the collection - * that match the specified query - * - * @param query query to select documents to count - * @param fields fields to return - * @param limit limit the count to this value - * @param skip number of entries to skip - * @param readPrefs ReadPreferences for this command - * @return number of documents that match query and fields - * @throws MongoException - */ - - public long getCount(DBObject query, DBObject fields, long limit, long skip, ReadPreference readPrefs ){ - BasicDBObject cmd = new BasicDBObject(); - cmd.put("count", getName()); - cmd.put("query", query); - if (fields != null) { - cmd.put("fields", fields); - } - - if ( limit > 0 ) - cmd.put( "limit" , limit ); - if ( skip > 0 ) - cmd.put( "skip" , skip ); - - CommandResult res = _db.command(cmd,getOptions(),readPrefs); - if ( ! res.ok() ){ - String errmsg = res.getErrorMessage(); - - if ( errmsg.equals("ns does not exist") || - errmsg.equals("ns missing" ) ){ - // for now, return 0 - lets pretend it does exist - return 0; - } - - res.throwOnError(); - } - - return res.getLong("n"); - } - - CommandResult command(DBObject cmd, int options, ReadPreference readPrefs){ - return _db.command(cmd,getOptions(),readPrefs); - } - - /** - * Calls {@link DBCollection#rename(java.lang.String, boolean)} with dropTarget=false - * @param newName new collection name (not a full namespace) - * @return the new collection - * @throws MongoException - */ - public DBCollection rename( String newName ){ - return rename(newName, false); - } - - /** - * renames of this collection to newName - * @param newName new collection name (not a full namespace) - * @param dropTarget if a collection with the new name exists, whether or not to drop it - * @return the new collection - * @throws MongoException - */ - public DBCollection rename( String newName, boolean dropTarget ){ - CommandResult ret = - _db.getSisterDB( "admin" ) - .command( BasicDBObjectBuilder.start() - .add( "renameCollection" , _fullName ) - .add( "to" , _db._name + "." + newName ) - .add( "dropTarget" , dropTarget ) - .get() ); - ret.throwOnError(); - resetIndexCache(); - return _db.getCollection( newName ); - } - - /** - * calls {@link DBCollection#group(com.mongodb.DBObject, com.mongodb.DBObject, com.mongodb.DBObject, java.lang.String, java.lang.String)} with finalize=null - * @param key - { a : true } - * @param cond - optional condition on query - * @param reduce javascript reduce function - * @param initial initial value for first match on a key - * @return - * @throws MongoException - * @see http://www.mongodb.org/display/DOCS/Aggregation - */ - public DBObject group( DBObject key , DBObject cond , DBObject initial , String reduce ){ - return group( key , cond , initial , reduce , null ); - } - - /** - * Applies a group operation - * @param key - { a : true } - * @param cond - optional condition on query - * @param reduce javascript reduce function - * @param initial initial value for first match on a key - * @param finalize An optional function that can operate on the result(s) of the reduce function. - * @return - * @throws MongoException - * @see http://www.mongodb.org/display/DOCS/Aggregation - */ - public DBObject group( DBObject key , DBObject cond , DBObject initial , String reduce , String finalize ){ - GroupCommand cmd = new GroupCommand(this, key, cond, initial, reduce, finalize); - return group( cmd ); - } - - /** - * Applies a group operation - * @param key - { a : true } - * @param cond - optional condition on query - * @param reduce javascript reduce function - * @param initial initial value for first match on a key - * @param finalize An optional function that can operate on the result(s) of the reduce function. - * @param readPrefs ReadPreferences for this command - * @return - * @throws MongoException - * @see http://www.mongodb.org/display/DOCS/Aggregation - */ - public DBObject group( DBObject key , DBObject cond , DBObject initial , String reduce , String finalize, ReadPreference readPrefs ){ - GroupCommand cmd = new GroupCommand(this, key, cond, initial, reduce, finalize); - return group( cmd, readPrefs ); - } - - /** - * Applies a group operation - * @param cmd the group command - * @return - * @throws MongoException - * @see http://www.mongodb.org/display/DOCS/Aggregation - */ - public DBObject group( GroupCommand cmd ) { - return group(cmd, getReadPreference()); - } - - /** - * Applies a group operation - * @param cmd the group command - * @param readPrefs ReadPreferences for this command - * @return - * @throws MongoException - * @see http://www.mongodb.org/display/DOCS/Aggregation - */ - public DBObject group( GroupCommand cmd, ReadPreference readPrefs ) { - CommandResult res = _db.command( cmd.toDBObject(), getOptions(), readPrefs ); - res.throwOnError(); - return (DBObject)res.get( "retval" ); - } - - /** - * @deprecated prefer the {@link DBCollection#group(com.mongodb.GroupCommand)} which is more standard - * Applies a group operation - * @param args object representing the arguments to the group function - * @return - * @throws MongoException - * @see http://www.mongodb.org/display/DOCS/Aggregation - */ - @Deprecated - public DBObject group( DBObject args ){ - args.put( "ns" , getName() ); - CommandResult res = _db.command( new BasicDBObject( "group" , args ), getOptions(), getReadPreference() ); - res.throwOnError(); - return (DBObject)res.get( "retval" ); - } - - /** - * find distinct values for a key - * @param key - * @return - * @throws MongoException - */ - public List distinct( String key ){ - return distinct( key , new BasicDBObject() ); - } - - /** - * find distinct values for a key - * @param key - * @param readPrefs - * @return - * @throws MongoException - */ - public List distinct( String key, ReadPreference readPrefs ){ - return distinct( key , new BasicDBObject(), readPrefs ); - } - - /** - * find distinct values for a key - * @param key - * @param query query to match - * @return - * @throws MongoException - */ - public List distinct( String key , DBObject query ){ - return distinct(key, query, getReadPreference()); - } - - /** - * find distinct values for a key - * @param key - * @param query query to match - * @param readPrefs - * @return - * @throws MongoException - */ - public List distinct( String key , DBObject query, ReadPreference readPrefs ){ - DBObject c = BasicDBObjectBuilder.start() - .add( "distinct" , getName() ) - .add( "key" , key ) - .add( "query" , query ) - .get(); - - CommandResult res = _db.command( c, getOptions(), readPrefs ); - res.throwOnError(); - return (List)(res.get( "values" )); - } - - /** - * performs a map reduce operation - * Runs the command in REPLACE output mode (saves to named collection) - * - * @param map - * map function in javascript code - * @param outputTarget - * optional - leave null if want to use temp collection - * @param reduce - * reduce function in javascript code - * @param query - * to match - * @return - * @throws MongoException - * @dochub mapreduce - */ - public MapReduceOutput mapReduce( String map , String reduce , String outputTarget , DBObject query ){ - return mapReduce( new MapReduceCommand( this , map , reduce , outputTarget , MapReduceCommand.OutputType.REPLACE, query ) ); - } - - /** - * performs a map reduce operation - * Specify an outputType to control job execution - * * INLINE - Return results inline - * * REPLACE - Replace the output collection with the job output - * * MERGE - Merge the job output with the existing contents of outputTarget - * * REDUCE - Reduce the job output with the existing contents of - * outputTarget - * - * @param map - * map function in javascript code - * @param outputTarget - * optional - leave null if want to use temp collection - * @param outputType - * set the type of job output - * @param reduce - * reduce function in javascript code - * @param query - * to match - * @return - * @throws MongoException - * @dochub mapreduce - */ - public MapReduceOutput mapReduce( String map , String reduce , String outputTarget , MapReduceCommand.OutputType outputType , DBObject query ){ - return mapReduce( new MapReduceCommand( this , map , reduce , outputTarget , outputType , query ) ); - } - - /** - * performs a map reduce operation - * Specify an outputType to control job execution - * * INLINE - Return results inline - * * REPLACE - Replace the output collection with the job output - * * MERGE - Merge the job output with the existing contents of outputTarget - * * REDUCE - Reduce the job output with the existing contents of - * outputTarget - * - * @param map - * map function in javascript code - * @param outputTarget - * optional - leave null if want to use temp collection - * @param outputType - * set the type of job output - * @param reduce - * reduce function in javascript code - * @param query - * to match - * @param readPrefs - * ReadPreferences for this operation - * @return - * @throws MongoException - * @dochub mapreduce - */ - public MapReduceOutput mapReduce( String map , String reduce , String outputTarget , MapReduceCommand.OutputType outputType , DBObject query, ReadPreference readPrefs ){ - MapReduceCommand command = new MapReduceCommand( this , map , reduce , outputTarget , outputType , query ); - command.setReadPreference(readPrefs); - return mapReduce( command ); - } - - /** - * performs a map reduce operation - * - * @param command - * object representing the parameters - * @return - * @throws MongoException - */ - public MapReduceOutput mapReduce( MapReduceCommand command ){ - DBObject cmd = command.toDBObject(); - // if type in inline, then query options like slaveOk is fine - CommandResult res = _db.command( cmd, getOptions(), command.getReadPreference() != null ? command.getReadPreference() : getReadPreference() ); - res.throwOnError(); - return new MapReduceOutput( this , cmd, res ); - } - - /** - * performs a map reduce operation - * - * @param command - * object representing the parameters - * @return - * @throws MongoException - */ - public MapReduceOutput mapReduce( DBObject command ){ - if ( command.get( "mapreduce" ) == null && command.get( "mapReduce" ) == null ) - throw new IllegalArgumentException( "need mapreduce arg" ); - CommandResult res = _db.command( command, getOptions(), getReadPreference() ); - res.throwOnError(); - return new MapReduceOutput( this , command, res ); - } - - /** - * performs an aggregation operation - * - * @param firstOp - * requisite first operation to be performed in the aggregation pipeline - * - * @param additionalOps - * additional operations to be performed in the aggregation pipeline - * @return The aggregation operation's result set - * - */ - public AggregationOutput aggregate( DBObject firstOp, DBObject ... additionalOps){ - if (firstOp == null) - throw new IllegalArgumentException("aggregate can not accept null pipeline operation"); - - DBObject command = new BasicDBObject("aggregate", _name ); - - List pipelineOps = new ArrayList(); - pipelineOps.add(firstOp); - Collections.addAll(pipelineOps, additionalOps); - command.put( "pipeline", pipelineOps ); - - CommandResult res = _db.command( command, getOptions(), getReadPreference() ); - res.throwOnError(); - return new AggregationOutput( command, res ); - } - - /** - * Return a list of the indexes for this collection. Each object - * in the list is the "info document" from MongoDB - * - * @return list of index documents - * @throws MongoException - */ - public List getIndexInfo() { - BasicDBObject cmd = new BasicDBObject(); - cmd.put("ns", getFullName()); - - DBCursor cur = _db.getCollection("system.indexes").find(cmd); - - List list = new ArrayList(); - - while(cur.hasNext()) { - list.add(cur.next()); - } - - return list; - } - - /** - * Drops an index from this collection - * @param keys keys of the index - * @throws MongoException - */ - public void dropIndex( DBObject keys ){ - dropIndexes( genIndexName( keys ) ); - } - - /** - * Drops an index from this collection - * @param name name of index to drop - * @throws MongoException - */ - public void dropIndex( String name ){ - dropIndexes( name ); - } - - /** - * gets the collections statistics ("collstats" command) - * @return - * @throws MongoException - */ - public CommandResult getStats() { - return getDB().command(new BasicDBObject("collstats", getName()), getOptions(), getReadPreference()); - } - - /** - * returns whether or not this is a capped collection - * @return - * @throws MongoException - */ - public boolean isCapped() { - CommandResult stats = getStats(); - Object capped = stats.get("capped"); - return(capped != null && ( capped.equals(1) || capped.equals(true) ) ); - } - - // ------ - - /** - * Initializes a new collection. No operation is actually performed on the database. - * @param base database in which to create the collection - * @param name the name of the collection - */ - protected DBCollection( DB base , String name ){ - _db = base; - _name = name; - _fullName = _db.getName() + "." + name; - _options = new Bytes.OptionHolder( _db._options ); - } - - /** - * @deprecated This method should not be a part of API. - * If you override one of the {@code DBCollection} methods please rely on superclass - * implementation in checking argument correctness and validity. - */ - @Deprecated - protected DBObject _checkObject(DBObject o, boolean canBeNull, boolean query) { - if (o == null) { - if (canBeNull) - return null; - throw new IllegalArgumentException("can't be null"); - } - - if (o.isPartialObject() && !query) - throw new IllegalArgumentException("can't save partial objects"); - - if (!query) { - _checkKeys(o); - } - return o; - } - - /** - * Checks key strings for invalid characters. - */ - private void _checkKeys( DBObject o ) { - if ( o instanceof LazyDBObject || o instanceof LazyDBList ) - return; - - for ( String s : o.keySet() ){ - validateKey( s ); - _checkValue( o.get( s ) ); - } - } - - /** - * Checks key strings for invalid characters. - */ - private void _checkKeys( Map o ) { - for ( Map.Entry cur : o.entrySet() ){ - validateKey( cur.getKey() ); - _checkValue( cur.getValue() ); - } - } - - private void _checkValues( final List list ) { - for ( Object cur : list ) { - _checkValue( cur ); - } - } - - private void _checkValue(final Object value) { - if ( value instanceof DBObject ) { - _checkKeys( (DBObject)value ); - } else if ( value instanceof Map ) { - _checkKeys( (Map)value ); - } else if ( value instanceof List ) { - _checkValues((List) value); - } - } - - /** - * Check for invalid key names - * @param s the string field/key to check - * @exception IllegalArgumentException if the key is not valid. - */ - private void validateKey(String s ) { - if ( s.contains( "\0" ) ) - throw new IllegalArgumentException( "Document field names can't have a NULL character. (Bad Key: '" + s + "')" ); - if ( s.contains( "." ) ) - throw new IllegalArgumentException( "Document field names can't have a . in them. (Bad Key: '" + s + "')" ); - if ( s.startsWith( "$" ) ) - throw new IllegalArgumentException( "Document field names can't start with '$' (Bad Key: '" + s + "')" ); - } - - /** - * Finds a collection that is prefixed with this collection's name. - * A typical use of this might be - *
-     *    DBCollection users = mongo.getCollection( "wiki" ).getCollection( "users" );
-     * 
- * Which is equivalent to - *
- * DBCollection users = mongo.getCollection( "wiki.users" ); - *
- * @param n the name of the collection to find - * @return the matching collection - */ - public DBCollection getCollection( String n ){ - return _db.getCollection( _name + "." + n ); - } - - /** - * Returns the name of this collection. - * @return the name of this collection - */ - public String getName(){ - return _name; - } - - /** - * Returns the full name of this collection, with the database name as a prefix. - * @return the name of this collection - */ - public String getFullName(){ - return _fullName; - } - - /** - * Returns the database this collection is a member of. - * @return this collection's database - */ - public DB getDB(){ - return _db; - } - - /** - * Returns if this collection's database is read-only - * @param strict if an exception should be thrown if the database is read-only - * @return if this collection's database is read-only - * @throws RuntimeException if the database is read-only and strict is set - * - * @deprecated See {@link DB#setReadOnly(Boolean)} - */ - @Deprecated - protected boolean checkReadOnly( boolean strict ){ - if ( ! _db._readOnly ) - return false; - - if ( ! strict ) - return true; - - throw new IllegalStateException( "db is read only" ); - } - - @Override - public int hashCode(){ - return _fullName.hashCode(); - } - - @Override - public boolean equals( Object o ){ - return o == this; - } - - @Override - public String toString(){ - return _name; - } - - /** - * Sets a default class for objects in this collection; null resets the class to nothing. - * @param c the class - * @throws IllegalArgumentException if c is not a DBObject - */ - public void setObjectClass( Class c ){ - if ( c == null ){ - // reset - _wrapper = null; - _objectClass = null; - return; - } - - if ( ! DBObject.class.isAssignableFrom( c ) ) - throw new IllegalArgumentException( c.getName() + " is not a DBObject" ); - _objectClass = c; - if ( ReflectionDBObject.class.isAssignableFrom( c ) ) - _wrapper = ReflectionDBObject.getWrapper( c ); - else - _wrapper = null; - } - - /** - * Gets the default class for objects in the collection - * @return the class - */ - public Class getObjectClass(){ - return _objectClass; - } - - /** - * sets the internal class - * @param path - * @param c - */ - public void setInternalClass( String path , Class c ){ - _internalClass.put( path , c ); - } - - /** - * gets the internal class - * @param path - * @return - */ - protected Class getInternalClass( String path ){ - Class c = _internalClass.get( path ); - if ( c != null ) - return c; - - if ( _wrapper == null ) - return null; - return _wrapper.getInternalClass( path ); - } - - /** - * Set the write concern for this collection. Will be used for - * writes to this collection. Overrides any setting of write - * concern at the DB level. See the documentation for - * {@link WriteConcern} for more information. - * - * @param concern write concern to use - */ - public void setWriteConcern( WriteConcern concern ){ - _concern = concern; - } - - /** - * Get the write concern for this collection. - * @return - */ - public WriteConcern getWriteConcern(){ - if ( _concern != null ) - return _concern; - return _db.getWriteConcern(); - } - - /** - * Sets the read preference for this collection. Will be used as default - * for reads from this collection; overrides DB & Connection level settings. - * See the * documentation for {@link ReadPreference} for more information. - * - * @param preference Read Preference to use - */ - public void setReadPreference( ReadPreference preference ){ - _readPref = preference; - } - - /** - * Gets the read preference - * @return - */ - public ReadPreference getReadPreference(){ - if ( _readPref != null ) - return _readPref; - return _db.getReadPreference(); - } - /** - * makes this query ok to run on a slave node - * - * @deprecated Replaced with {@code ReadPreference.secondaryPreferred()} - * @see com.mongodb.ReadPreference#secondaryPreferred() - */ - @Deprecated - public void slaveOk(){ - addOption( Bytes.QUERYOPTION_SLAVEOK ); - } - - /** - * adds a default query option - * @param option - */ - public void addOption( int option ){ - _options.add(option); - } - - /** - * sets the default query options - * @param options - */ - public void setOptions( int options ){ - _options.set(options); - } - - /** - * resets the default query options - */ - public void resetOptions(){ - _options.reset(); - } - - /** - * gets the default query options - * @return - */ - public int getOptions(){ - return _options.get(); - } - - /** - * Set a customer decoder factory for this collection. Set to null to use the default from MongoOptions. - * @param fact the factory to set. - */ - public synchronized void setDBDecoderFactory(DBDecoderFactory fact) { - _decoderFactory = fact; - } - - /** - * Get the decoder factory for this collection. A null return value means that the default from MongoOptions - * is being used. - * @return the factory - */ - public synchronized DBDecoderFactory getDBDecoderFactory() { - return _decoderFactory; - } - - /** - * Set a customer encoder factory for this collection. Set to null to use the default from MongoOptions. - * @param fact the factory to set. - */ - public synchronized void setDBEncoderFactory(DBEncoderFactory fact) { - _encoderFactory = fact; - } - - /** - * Get the encoder factory for this collection. A null return value means that the default from MongoOptions - * is being used. - * @return the factory - */ - public synchronized DBEncoderFactory getDBEncoderFactory() { - return _encoderFactory; - } - - final DB _db; - - /** - * @deprecated Please use {@link #getName()} instead. - */ - @Deprecated - final protected String _name; - - /** - * @deprecated Please use {@link #getFullName()} instead. - */ - @Deprecated - final protected String _fullName; - - /** - * @deprecated Please use {@link #setHintFields(java.util.List)} and {@link #getHintFields()} instead. - */ - @Deprecated - protected List _hintFields; - private WriteConcern _concern = null; - private ReadPreference _readPref = null; - private DBDecoderFactory _decoderFactory; - private DBEncoderFactory _encoderFactory; - final Bytes.OptionHolder _options; - - /** - * @deprecated Please use {@link #getObjectClass()} and {@link #setObjectClass(Class)} instead. - */ - @Deprecated - protected Class _objectClass = null; - private Map _internalClass = Collections.synchronizedMap( new HashMap() ); - private ReflectionDBObject.JavaWrapper _wrapper = null; - - final private Set _createdIndexes = new HashSet(); -} diff --git a/src/main/com/mongodb/DBConnector.java b/src/main/com/mongodb/DBConnector.java deleted file mode 100644 index e9f655436fd..00000000000 --- a/src/main/com/mongodb/DBConnector.java +++ /dev/null @@ -1,119 +0,0 @@ -// DBConnector.java - -/** - * Copyright (C) 2008 10gen Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.mongodb; - - -/** - * Interface that provides the ability to exchange request/response with the database - * - * @deprecated This class is NOT part of the public API. It will be dropped in 3.x releases. - */ -@Deprecated -public interface DBConnector { - - /** - * initiates a "consistent request" on the thread. - * Once this has been called, the connector will ensure that the same underlying connection is always used for a given thread. - * This happens until requestStop() is called. - */ - public void requestStart(); - /** - * terminates the "consistent request". - */ - public void requestDone(); - /** - * Ensures that a connection exists for the "consistent request" - */ - public void requestEnsureConnection(); - - /** - * does a write operation - * @param db the database - * @param m the request message - * @param concern the write concern - * @return the write result - * @throws MongoException - */ - public WriteResult say( DB db , OutMessage m , WriteConcern concern ); - /** - * does a write operation - * @param db the database - * @param m the request message - * @param concern the write concern - * @param hostNeeded specific server to connect to - * @return the write result - * @throws MongoException - */ - public WriteResult say( DB db , OutMessage m , WriteConcern concern , ServerAddress hostNeeded ); - - /** - * does a read operation on the database - * @param db the database - * @param coll the collection - * @param m the request message - * @param hostNeeded specific server to connect to - * @param decoder the decoder to use - * @return the read result - * @throws MongoException - */ - public Response call( DB db , DBCollection coll , OutMessage m , - ServerAddress hostNeeded , DBDecoder decoder ); - /** - * - * does a read operation on the database - * @param db the database - * @param coll the collection - * @param m the request message - * @param hostNeeded specific server to connect to - * @param retries the number of retries in case of an error - * @return the read result - * @throws MongoException - */ - public Response call( DB db , DBCollection coll , OutMessage m , ServerAddress hostNeeded , int retries ); - - /** - * does a read operation on the database - * @param db the database - * @param coll the collection - * @param m the request message - * @param hostNeeded specific server to connect to - * @param retries number of retries in case of error - * @param readPref the read preferences - * @param decoder the decoder to use - * @return the read result - * @throws MongoException - */ - public Response call( DB db , DBCollection coll , OutMessage m , ServerAddress hostNeeded , int retries , ReadPreference readPref , DBDecoder decoder ); - - /** - * returns true if the connector is in a usable state - * @return - */ - public boolean isOpen(); - - /** - * Authenticate using the given credentials. - * - * @param credentials the credentials. - * @return the result of the authentication command, if successful - * @throws CommandFailureException if the authentication failed - * @since 2.11.0 - */ - public CommandResult authenticate(MongoCredential credentials); -} diff --git a/src/main/com/mongodb/DBCursor.java b/src/main/com/mongodb/DBCursor.java deleted file mode 100644 index 9718eb668c8..00000000000 --- a/src/main/com/mongodb/DBCursor.java +++ /dev/null @@ -1,733 +0,0 @@ -// DBCursor.java - -/** - * Copyright (C) 2008 10gen Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.mongodb; - -import com.mongodb.DBApiLayer.Result; -import org.bson.util.annotations.NotThreadSafe; - -import java.io.Closeable; -import java.util.ArrayList; -import java.util.Iterator; -import java.util.List; -import java.util.Set; - - -/** An iterator over database results. - * Doing a find() query on a collection returns a - * DBCursor thus - * - *
- * DBCursor cursor = collection.find( query );
- * if( cursor.hasNext() )
- *     DBObject obj = cursor.next();
- * 
- * - *

Warning: Calling toArray or length on - * a DBCursor will irrevocably turn it into an array. This - * means that, if the cursor was iterating over ten million results - * (which it was lazily fetching from the database), suddenly there will - * be a ten-million element array in memory. Before converting to an array, - * make sure that there are a reasonable number of results using - * skip() and limit(). - *

For example, to get an array of the 1000-1100th elements of a cursor, use - * - *

- * List obj = collection.find( query ).skip( 1000 ).limit( 100 ).toArray();
- * 
- * - * - * @dochub cursors - */ -@NotThreadSafe -public class DBCursor implements Iterator , Iterable, Closeable { - - /** - * Initializes a new database cursor - * @param collection collection to use - * @param q query to perform - * @param k keys to return from the query - * @param preference the Read Preference for this query - */ - public DBCursor( DBCollection collection , DBObject q , DBObject k, ReadPreference preference ){ - if (collection == null) { - throw new IllegalArgumentException("collection is null"); - } - _collection = collection; - _query = q == null ? new BasicDBObject() : q; - _keysWanted = k; - _options = _collection.getOptions(); - _readPref = preference; - _decoderFact = collection.getDBDecoderFactory(); - } - - /** - * Types of cursors: iterator or array. - */ - static enum CursorType { ITERATOR , ARRAY }; - - /** - * Creates a copy of an existing database cursor. - * The new cursor is an iterator, even if the original - * was an array. - * - * @return the new cursor - */ - public DBCursor copy() { - DBCursor c = new DBCursor(_collection, _query, _keysWanted, _readPref); - c._orderBy = _orderBy; - c._hint = _hint; - c._hintDBObj = _hintDBObj; - c._limit = _limit; - c._skip = _skip; - c._options = _options; - c._batchSize = _batchSize; - c._snapshot = _snapshot; - c._explain = _explain; - if ( _specialFields != null ) - c._specialFields = new BasicDBObject( _specialFields.toMap() ); - return c; - } - - /** - * creates a copy of this cursor object that can be iterated. - * Note: - * - you can iterate the DBCursor itself without calling this method - * - no actual data is getting copied. - * - * @return - */ - public Iterator iterator(){ - return this.copy(); - } - - // ---- querty modifiers -------- - - /** - * Sorts this cursor's elements. - * This method must be called before getting any object from the cursor. - * @param orderBy the fields by which to sort - * @return a cursor pointing to the first element of the sorted results - */ - public DBCursor sort( DBObject orderBy ){ - if ( _it != null ) - throw new IllegalStateException( "can't sort after executing query" ); - - _orderBy = orderBy; - return this; - } - - /** - * adds a special operator like $maxScan or $returnKey - * e.g. addSpecial( "$returnKey" , 1 ) - * e.g. addSpecial( "$maxScan" , 100 ) - * @param name - * @param o - * @return - * @dochub specialOperators - */ - public DBCursor addSpecial( String name , Object o ){ - if ( _specialFields == null ) - _specialFields = new BasicDBObject(); - _specialFields.put( name , o ); - return this; - } - - /** - * Informs the database of indexed fields of the collection in order to improve performance. - * @param indexKeys a DBObject with fields and direction - * @return same DBCursor for chaining operations - */ - public DBCursor hint( DBObject indexKeys ){ - if ( _it != null ) - throw new IllegalStateException( "can't hint after executing query" ); - - _hintDBObj = indexKeys; - return this; - } - - /** - * Informs the database of an indexed field of the collection in order to improve performance. - * @param indexName the name of an index - * @return same DBCursor for chaining operations - */ - public DBCursor hint( String indexName ){ - if ( _it != null ) - throw new IllegalStateException( "can't hint after executing query" ); - - _hint = indexName; - return this; - } - - /** - * Use snapshot mode for the query. Snapshot mode assures no duplicates are - * returned, or objects missed, which were present at both the start and end - * of the query's execution (if an object is new during the query, or deleted - * during the query, it may or may not be returned, even with snapshot mode). - * Note that short query responses (less than 1MB) are always effectively snapshotted. - * Currently, snapshot mode may not be used with sorting or explicit hints. - * @return same DBCursor for chaining operations - */ - public DBCursor snapshot() { - if (_it != null) - throw new IllegalStateException("can't snapshot after executing the query"); - - _snapshot = true; - - return this; - } - - /** - * Returns an object containing basic information about the - * execution of the query that created this cursor - * This creates a DBObject with the key/value pairs: - * "cursor" : cursor type - * "nScanned" : number of records examined by the database for this query - * "n" : the number of records that the database returned - * "millis" : how long it took the database to execute the query - * @return a DBObject - * @throws MongoException - * @dochub explain - */ - public DBObject explain(){ - DBCursor c = copy(); - c._explain = true; - if (c._limit > 0) { - // need to pass a negative batchSize as limit for explain - c._batchSize = c._limit * -1; - c._limit = 0; - } - return c.next(); - } - - /** - * Limits the number of elements returned. - * Note: parameter n should be positive, although a negative value is supported for legacy reason. - * Passing a negative value will call {@link DBCursor#batchSize(int)} which is the preferred method. - * @param n the number of elements to return - * @return a cursor to iterate the results - * @dochub limit - */ - public DBCursor limit( int n ){ - if ( _it != null ) - throw new IllegalStateException( "can't set limit after executing query" ); - - if (n > 0) - _limit = n; - else if (n < 0) - batchSize(n); - return this; - } - - /** - * Limits the number of elements returned in one batch. - * A cursor typically fetches a batch of result objects and store them locally. - * - * If batchSize is positive, it represents the size of each batch of objects retrieved. - * It can be adjusted to optimize performance and limit data transfer. - * - * If batchSize is negative, it will limit of number objects returned, that fit within the max batch size limit (usually 4MB), and cursor will be closed. - * For example if batchSize is -10, then the server will return a maximum of 10 documents and as many as can fit in 4MB, then close the cursor. - * Note that this feature is different from limit() in that documents must fit within a maximum size, and it removes the need to send a request to close the cursor server-side. - * - * The batch size can be changed even after a cursor is iterated, in which case the setting will apply on the next batch retrieval. - * - * @param n the number of elements to return in a batch - * @return - */ - public DBCursor batchSize( int n ){ - // check for special case, used to have server bug with 1 - if ( n == 1 ) - n = 2; - - if ( _it != null ) { - if (_it instanceof DBApiLayer.Result) - ((DBApiLayer.Result)_it).setBatchSize(n); - } - - _batchSize = n; - return this; - } - - /** - * Discards a given number of elements at the beginning of the cursor. - * @param n the number of elements to skip - * @return a cursor pointing to the new first element of the results - * @throws IllegalStateException if the cursor has started to be iterated through - */ - public DBCursor skip( int n ){ - if ( _it != null ) - throw new IllegalStateException( "can't set skip after executing query" ); - _skip = n; - return this; - } - - /** - * gets the cursor id. - * @return the cursor id, or 0 if there is no active cursor. - */ - public long getCursorId() { - if ( _it instanceof Result ) - return ((Result)_it).getCursorId(); - - return 0; - } - - /** - * kills the current cursor on the server. - */ - public void close() { - if ( _it instanceof Result ) - ((Result)_it).close(); - } - - /** - * makes this query ok to run on a slave node - * - * @return a copy of the same cursor (for chaining) - * - * @deprecated Replaced with {@code ReadPreference.secondaryPreferred()} - * @see ReadPreference#secondaryPreferred() - */ - @Deprecated - public DBCursor slaveOk(){ - return addOption( Bytes.QUERYOPTION_SLAVEOK ); - } - - /** - * adds a query option - see Bytes.QUERYOPTION_* for list - * @param option - * @return - */ - public DBCursor addOption( int option ){ - if ( option == Bytes.QUERYOPTION_EXHAUST ) - throw new IllegalArgumentException("The exhaust option is not user settable."); - - _options |= option; - return this; - } - - /** - * sets the query option - see Bytes.QUERYOPTION_* for list - * @param options - */ - public DBCursor setOptions( int options ){ - _options = options; - return this; - } - - /** - * resets the query options - */ - public DBCursor resetOptions(){ - _options = 0; - return this; - } - - /** - * gets the query options - * @return - */ - public int getOptions(){ - return _options; - } - - // ---- internal stuff ------ - - private void _check() { - if (_it != null) - return; - - _lookForHints(); - - QueryOpBuilder builder = new QueryOpBuilder() - .addQuery(_query) - .addOrderBy(_orderBy) - .addHint(_hintDBObj) - .addHint(_hint) - .addExplain(_explain) - .addSnapshot(_snapshot) - .addSpecialFields(_specialFields); - - if (_collection.getDB().getMongo().isMongosConnection()) { - builder.addReadPreference(_readPref); - } - - _it = _collection.__find(builder.get(), _keysWanted, _skip, _batchSize, _limit, - _options, _readPref, getDecoder()); - } - - // Only create a new decoder if there is a decoder factory explicitly set on the collection. Otherwise return null - // so that the collection can use a cached decoder - private DBDecoder getDecoder() { - return _decoderFact != null ? _decoderFact.create() : null; - } - - /** - * if there is a hint to use, use it - */ - private void _lookForHints(){ - - if ( _hint != null ) // if someone set a hint, then don't do this - return; - - if ( _collection._hintFields == null ) - return; - - Set mykeys = _query.keySet(); - - for ( DBObject o : _collection._hintFields ){ - - Set hintKeys = o.keySet(); - - if ( ! mykeys.containsAll( hintKeys ) ) - continue; - - hint( o ); - return; - } - } - - void _checkType( CursorType type ){ - if ( _cursorType == null ){ - _cursorType = type; - return; - } - - if ( type == _cursorType ) - return; - - throw new IllegalArgumentException( "can't switch cursor access methods" ); - } - - private DBObject _next() { - if ( _cursorType == null ) - _checkType( CursorType.ITERATOR ); - - _check(); - - _cur = _it.next(); - _num++; - - if ( _keysWanted != null && _keysWanted.keySet().size() > 0 ){ - _cur.markAsPartialObject(); - //throw new UnsupportedOperationException( "need to figure out partial" ); - } - - if ( _cursorType == CursorType.ARRAY ){ - _all.add( _cur ); - } - - return _cur; - } - - /** - * gets the number of times, so far, that the cursor retrieved a batch from the database - * @return - */ - public int numGetMores(){ - if ( _it instanceof DBApiLayer.Result ) - return ((DBApiLayer.Result)_it).numGetMores(); - - throw new IllegalArgumentException("_it not a real result" ); - } - - /** - * gets a list containing the number of items received in each batch - * @return - */ - public List getSizes(){ - if ( _it instanceof DBApiLayer.Result ) - return ((DBApiLayer.Result)_it).getSizes(); - - throw new IllegalArgumentException("_it not a real result" ); - } - - private boolean _hasNext() { - _check(); - - if ( _limit > 0 && _num >= _limit ) - return false; - - return _it.hasNext(); - } - - /** - * Returns the number of objects through which the cursor has iterated. - * @return the number of objects seen - */ - public int numSeen(){ - return _num; - } - - // ----- iterator api ----- - - /** - * Checks if there is another object available - * @return - * @throws MongoException - */ - public boolean hasNext() { - _checkType( CursorType.ITERATOR ); - return _hasNext(); - } - - /** - * Returns the object the cursor is at and moves the cursor ahead by one. - * @return the next element - * @throws MongoException - */ - public DBObject next() { - _checkType( CursorType.ITERATOR ); - return _next(); - } - - /** - * Returns the element the cursor is at. - * @return the current element - */ - public DBObject curr(){ - _checkType( CursorType.ITERATOR ); - return _cur; - } - - /** - * Not implemented. - */ - public void remove(){ - throw new UnsupportedOperationException( "can't remove from a cursor" ); - } - - - // ---- array api ----- - - void _fill( int n ){ - _checkType( CursorType.ARRAY ); - while ( n >= _all.size() && _hasNext() ) - _next(); - } - - /** - * pulls back all items into an array and returns the number of objects. - * Note: this can be resource intensive - * @see #count() - * @see #size() - * @return the number of elements in the array - * @throws MongoException - */ - public int length() { - _checkType( CursorType.ARRAY ); - _fill( Integer.MAX_VALUE ); - return _all.size(); - } - - /** - * Converts this cursor to an array. - * @return an array of elements - * @throws MongoException - */ - public List toArray(){ - return toArray( Integer.MAX_VALUE ); - } - - /** - * Converts this cursor to an array. - * @param max the maximum number of objects to return - * @return an array of objects - * @throws MongoException - */ - public List toArray( int max ) { - _checkType( CursorType.ARRAY ); - _fill( max - 1 ); - return _all; - } - - /** - * for testing only! - * Iterates cursor and counts objects - * @see #count() - * @return num objects - * @throws MongoException - */ - public int itcount(){ - int n = 0; - while ( this.hasNext() ){ - this.next(); - n++; - } - return n; - } - - /** - * Counts the number of objects matching the query - * This does not take limit/skip into consideration - * @see #size() - * @return the number of objects - * @throws MongoException - */ - public int count() { - if ( _collection == null ) - throw new IllegalArgumentException( "why is _collection null" ); - if ( _collection._db == null ) - throw new IllegalArgumentException( "why is _collection._db null" ); - - return (int)_collection.getCount(this._query, this._keysWanted, getReadPreference()); - } - - /** - * Counts the number of objects matching the query - * this does take limit/skip into consideration - * @see #count() - * @return the number of objects - * @throws MongoException - */ - public int size() { - if ( _collection == null ) - throw new IllegalArgumentException( "why is _collection null" ); - if ( _collection._db == null ) - throw new IllegalArgumentException( "why is _collection._db null" ); - - return (int)_collection.getCount(this._query, this._keysWanted, this._limit, this._skip, getReadPreference() ); - } - - - /** - * gets the fields to be returned - * @return - */ - public DBObject getKeysWanted(){ - return _keysWanted; - } - - /** - * gets the query - * @return - */ - public DBObject getQuery(){ - return _query; - } - - /** - * gets the collection - * @return - */ - public DBCollection getCollection(){ - return _collection; - } - - /** - * Gets the Server Address of the server that data is pulled from. - * Note that this information may not be available until hasNext() or next() is called. - * @return - */ - public ServerAddress getServerAddress() { - if (_it != null && _it instanceof DBApiLayer.Result) - return ((DBApiLayer.Result)_it).getServerAddress(); - - return null; - } - - /** - * Sets the read preference for this cursor. - * See the * documentation for {@link ReadPreference} - * for more information. - * - * @param preference Read Preference to use - */ - public DBCursor setReadPreference( ReadPreference preference ){ - _readPref = preference; - return this; - } - - /** - * Gets the default read preference - * @return - */ - public ReadPreference getReadPreference(){ - return _readPref; - } - - public DBCursor setDecoderFactory(DBDecoderFactory fact){ - _decoderFact = fact; - return this; - } - - public DBDecoderFactory getDecoderFactory(){ - return _decoderFact; - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder(); - sb.append("Cursor id=").append(getCursorId()); - sb.append(", ns=").append(getCollection().getFullName()); - sb.append(", query=").append(getQuery()); - if (getKeysWanted() != null) - sb.append(", fields=").append(getKeysWanted()); - sb.append(", numIterated=").append(_num); - if (_skip != 0) - sb.append(", skip=").append(_skip); - if (_limit != 0) - sb.append(", limit=").append(_limit); - if (_batchSize != 0) - sb.append(", batchSize=").append(_batchSize); - - ServerAddress addr = getServerAddress(); - if (addr != null) - sb.append(", addr=").append(addr); - - if (_readPref != null) - sb.append(", readPreference=").append( _readPref.toString() ); - return sb.toString(); - } - - boolean hasFinalizer() { - if (_it == null || ! (_it instanceof Result)) { - return false; - } - return ((Result) _it).hasFinalizer(); - } - - // ---- query setup ---- - private final DBCollection _collection; - private final DBObject _query; - private final DBObject _keysWanted; - - private DBObject _orderBy = null; - private String _hint = null; - private DBObject _hintDBObj = null; - private boolean _explain = false; - private int _limit = 0; - private int _batchSize = 0; - private int _skip = 0; - private boolean _snapshot = false; - private int _options = 0; - private ReadPreference _readPref; - private DBDecoderFactory _decoderFact; - - private DBObject _specialFields; - - // ---- result info ---- - private Iterator _it = null; - - private CursorType _cursorType = null; - private DBObject _cur = null; - private int _num = 0; - - private final ArrayList _all = new ArrayList(); -} diff --git a/src/main/com/mongodb/DBDecoder.java b/src/main/com/mongodb/DBDecoder.java deleted file mode 100644 index 0451d826f4e..00000000000 --- a/src/main/com/mongodb/DBDecoder.java +++ /dev/null @@ -1,32 +0,0 @@ -/** - * Copyright (C) 2008 10gen Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.mongodb; - -import java.io.IOException; -import java.io.InputStream; - -import org.bson.BSONDecoder; - -/** - * - */ -public interface DBDecoder extends BSONDecoder { - public DBCallback getDBCallback(DBCollection collection); - - public DBObject decode( byte[] b, DBCollection collection ); - - public DBObject decode( InputStream in, DBCollection collection ) throws IOException; -} diff --git a/src/main/com/mongodb/DBDecoderFactory.java b/src/main/com/mongodb/DBDecoderFactory.java deleted file mode 100644 index be9fb2bbc86..00000000000 --- a/src/main/com/mongodb/DBDecoderFactory.java +++ /dev/null @@ -1,25 +0,0 @@ -/** - * Copyright (C) 2008 10gen Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.mongodb; - -/** - * - */ -public interface DBDecoderFactory { - - public DBDecoder create( ); - -} diff --git a/src/main/com/mongodb/DBEncoder.java b/src/main/com/mongodb/DBEncoder.java deleted file mode 100644 index 4a75a962e9c..00000000000 --- a/src/main/com/mongodb/DBEncoder.java +++ /dev/null @@ -1,21 +0,0 @@ -/** - * Copyright (c) 2008 - 2011 10gen, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on - * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the - * specific language governing permissions and limitations under the License. - */ - -package com.mongodb; - -import org.bson.*; -import org.bson.io.*; - -public interface DBEncoder { - public int writeObject( OutputBuffer buf, BSONObject o ); -} diff --git a/src/main/com/mongodb/DBEncoderFactory.java b/src/main/com/mongodb/DBEncoderFactory.java deleted file mode 100644 index 58fdb9332ca..00000000000 --- a/src/main/com/mongodb/DBEncoderFactory.java +++ /dev/null @@ -1,25 +0,0 @@ -/** - * Copyright (C) 2008 10gen Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.mongodb; - -/** - * - */ -public interface DBEncoderFactory { - - public DBEncoder create(); - -} diff --git a/src/main/com/mongodb/DBObject.java b/src/main/com/mongodb/DBObject.java deleted file mode 100644 index e46c2d4c1c9..00000000000 --- a/src/main/com/mongodb/DBObject.java +++ /dev/null @@ -1,40 +0,0 @@ -// DBObject.java - -/** - * Copyright (C) 2008 10gen Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.mongodb; - -import org.bson.BSONObject; - -/** - * A key-value map that can be saved to the database. - */ -public interface DBObject extends BSONObject { - - /** - * if this object was retrieved with only some fields (using a field filter) - * this method will be called to mark it as such. - */ - public void markAsPartialObject(); - - /** - * whether markAsPartialObject was ever called - * only matters if you are going to upsert and do not want to risk losing fields - */ - public boolean isPartialObject(); - -} diff --git a/src/main/com/mongodb/DBPointer.java b/src/main/com/mongodb/DBPointer.java deleted file mode 100644 index 8351e82b9c8..00000000000 --- a/src/main/com/mongodb/DBPointer.java +++ /dev/null @@ -1,60 +0,0 @@ -// DBPointer.java - -/** - * Copyright (C) 2008 10gen Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.mongodb; - -import org.bson.types.ObjectId; - -/** - * @deprecated BSON type DBPointer(0x0c) is deprecated. Please use a {@link com.mongodb.DBRef} instead. - */ -@Deprecated -public class DBPointer extends DBRefBase { - - static final boolean D = Boolean.getBoolean( "DEBUG.DBPOINTER" ); - - /** - * CTOR used for testing BSON encoding. Otherwise - * non-functional due to a DBRef needing a parent db object, - * a fieldName and a db - * - * @param ns namespace to point to - * @param id value of _id - */ - public DBPointer(String ns, ObjectId id) { - this (null, null, null, ns, id); - } - - DBPointer( DBObject parent , String fieldName , DB db , String ns , ObjectId id ){ - super(db, ns, (Object)id); - - _parent = parent; - _fieldName = fieldName; - } - - public String toString(){ - return "{ \"$ref\" : \"" + _ns + "\", \"$id\" : ObjectId(\"" + _id + "\") }"; - } - - public ObjectId getId() { - return (ObjectId)_id; - } - - final DBObject _parent; - final String _fieldName; -} diff --git a/src/main/com/mongodb/DBPort.java b/src/main/com/mongodb/DBPort.java deleted file mode 100644 index 8c445fdca3a..00000000000 --- a/src/main/com/mongodb/DBPort.java +++ /dev/null @@ -1,568 +0,0 @@ -// DBPort.java - -/** - * Copyright (C) 2008 10gen Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.mongodb; - -import com.mongodb.util.ThreadUtil; -import org.ietf.jgss.GSSCredential; -import org.ietf.jgss.GSSException; -import org.ietf.jgss.GSSManager; -import org.ietf.jgss.GSSName; -import org.ietf.jgss.Oid; - -import javax.security.auth.callback.Callback; -import javax.security.auth.callback.CallbackHandler; -import javax.security.auth.callback.NameCallback; -import javax.security.auth.callback.PasswordCallback; -import javax.security.auth.callback.UnsupportedCallbackException; -import javax.security.sasl.Sasl; -import javax.security.sasl.SaslClient; -import javax.security.sasl.SaslException; -import java.io.BufferedInputStream; -import java.io.IOException; -import java.io.InputStream; -import java.io.OutputStream; -import java.net.InetSocketAddress; -import java.net.Socket; -import java.util.Collections; -import java.util.HashMap; -import java.util.HashSet; -import java.util.Map; -import java.util.Set; -import java.util.concurrent.atomic.AtomicLong; -import java.util.logging.Level; -import java.util.logging.Logger; - -/** - * represents a Port to the database, which is effectively a single connection to a server - * Methods implemented at the port level should throw the raw exceptions like IOException, - * so that the connector above can make appropriate decisions on how to handle. - * - * @deprecated This class is NOT a part of public API and will be dropped in 3.x versions. - */ -@Deprecated -public class DBPort { - - /** - * the default port - */ - public static final int PORT = 27017; - static final boolean USE_NAGLE = false; - - static final long CONN_RETRY_TIME_MS = 15000; - - /** - * creates a new DBPort - * @param addr the server address - */ - @SuppressWarnings("deprecation") - public DBPort( ServerAddress addr ){ - this( addr , null , new MongoOptions() ); - } - - DBPort( ServerAddress addr, DBPortPool pool, MongoOptions options ){ - _options = options; - _sa = addr; - _addr = addr; - _pool = pool; - - _logger = Logger.getLogger( _rootLogger.getName() + "." + addr.toString() ); - _decoder = _options.dbDecoderFactory.create(); - } - - Response call( OutMessage msg , DBCollection coll ) throws IOException{ - return go( msg, coll ); - } - - Response call(OutMessage msg, DBCollection coll, DBDecoder decoder) throws IOException{ - return go( msg, coll, false, decoder); - } - - void say( OutMessage msg ) - throws IOException { - go( msg , null ); - } - - private synchronized Response go( OutMessage msg , DBCollection coll ) - throws IOException { - return go( msg , coll , false, null ); - } - - private synchronized Response go( OutMessage msg , DBCollection coll , DBDecoder decoder ) throws IOException{ - return go( msg, coll, false, decoder ); - } - - private synchronized Response go(OutMessage msg, DBCollection coll, boolean forceResponse, DBDecoder decoder) - throws IOException { - - if ( _processingResponse ){ - if ( coll == null ){ - // this could be a pipeline and should be safe - } - else { - // this could cause issues since we're reading data off the wire - throw new IllegalStateException( "DBPort.go called and expecting a response while processing another response" ); - } - } - - _calls.incrementAndGet(); - - if ( _socket == null ) - _open(); - - if ( _out == null ) - throw new IllegalStateException( "_out shouldn't be null" ); - - try { - msg.prepare(); - _activeState = new ActiveState(msg); - msg.pipe( _out ); - - if ( _pool != null ) - _pool._everWorked = true; - - if ( coll == null && ! forceResponse ) - return null; - - _processingResponse = true; - return new Response( _sa , coll , _in , (decoder == null ? _decoder : decoder) ); - } - catch ( IOException ioe ){ - close(); - throw ioe; - } - finally { - _activeState = null; - _processingResponse = false; - } - } - - synchronized CommandResult getLastError( DB db , WriteConcern concern ) throws IOException{ - DBApiLayer dbAL = (DBApiLayer) db; - return runCommand( dbAL, concern.getCommand() ); - } - - synchronized private Response findOne( DB db , String coll , DBObject q ) throws IOException { - OutMessage msg = OutMessage.query( db.getCollection(coll) , 0 , 0 , -1 , q , null ); - try { - Response res = go( msg , db.getCollection( coll ) , null ); - return res; - } finally { - msg.doneWithMessage(); - } - } - - synchronized CommandResult runCommand( DB db , DBObject cmd ) throws IOException { - Response res = findOne(db, "$cmd", cmd); - return convertToCommandResult(cmd, res); - } - - private CommandResult convertToCommandResult(DBObject cmd, Response res) { - if ( res.size() == 0 ) - return null; - if ( res.size() > 1 ) - throw new MongoInternalException( "something is wrong. size:" + res.size() ); - - DBObject data = res.get(0); - if ( data == null ) - throw new MongoInternalException( "something is wrong, no command result" ); - - CommandResult cr = new CommandResult(res.serverUsed()); - cr.putAll( data ); - return cr; - } - - synchronized CommandResult tryGetLastError( DB db , long last, WriteConcern concern) throws IOException { - if ( last != _calls.get() ) - return null; - - return getLastError(db, concern); - } - - /** - * makes sure that a connection to the server has been opened - * @throws IOException - */ - public synchronized void ensureOpen() - throws IOException { - - if ( _socket != null ) - return; - - _open(); - } - - void _open() throws IOException { - - long sleepTime = 100; - - long maxAutoConnectRetryTime = CONN_RETRY_TIME_MS; - if (_options.maxAutoConnectRetryTime > 0) { - maxAutoConnectRetryTime = _options.maxAutoConnectRetryTime; - } - - boolean successfullyConnected = false; - final long start = System.currentTimeMillis(); - do { - try { - _socket = _options.socketFactory.createSocket(); - _socket.connect( _addr.getSocketAddress() , _options.connectTimeout ); - - _socket.setTcpNoDelay( ! USE_NAGLE ); - _socket.setKeepAlive( _options.socketKeepAlive ); - _socket.setSoTimeout( _options.socketTimeout ); - _in = new BufferedInputStream( _socket.getInputStream() ); - _out = _socket.getOutputStream(); - successfullyConnected = true; - } - catch ( IOException e ){ - close(); - - if (!_options.autoConnectRetry || (_pool != null && !_pool._everWorked)) - throw e; - - long waitSoFar = System.currentTimeMillis() - start; - - if (waitSoFar >= maxAutoConnectRetryTime) - throw e; - - if (sleepTime + waitSoFar > maxAutoConnectRetryTime) - sleepTime = maxAutoConnectRetryTime - waitSoFar; - - _logger.log(Level.WARNING, "Exception connecting to " + serverAddress().getHost() + ": " + e + - ". Total wait time so far is " + waitSoFar + " ms. Will retry after sleeping for " + sleepTime + " ms."); - ThreadUtil.sleep(sleepTime); - sleepTime *= 2; - } - } while (!successfullyConnected); - } - - @Override - public int hashCode(){ - return _addr.hashCode(); - } - - /** - * returns a String representation of the target host - * @return - */ - public String host(){ - return _addr.toString(); - } - - /** - * @return the server address for this port - */ - public ServerAddress serverAddress() { - return _sa; - } - - @Override - public String toString(){ - return "{DBPort " + host() + "}"; - } - - @Override - protected void finalize() throws Throwable{ - super.finalize(); - close(); - } - - ActiveState getActiveState() { - return _activeState; - } - - int getLocalPort() { - return _socket != null ? _socket.getLocalPort() : -1; - } - - /** - * closes the underlying connection and streams - */ - protected void close(){ - authenticatedDatabases.clear(); - - if ( _socket != null ){ - try { - _socket.close(); - } - catch ( Exception e ){ - // don't care - } - } - - _in = null; - _out = null; - _socket = null; - } - - CommandResult authenticate(Mongo mongo, final MongoCredential credentials) { - Authenticator authenticator; - if (credentials.getMechanism().equals(MongoCredential.MONGODB_CR_MECHANISM)) { - authenticator = new NativeAuthenticator(mongo, credentials); - } else if (credentials.getMechanism().equals(MongoCredential.GSSAPI_MECHANISM)) { - authenticator = new GSSAPIAuthenticator(mongo, credentials); - } else { - throw new IllegalArgumentException("Unsupported authentication protocol: " + credentials.getMechanism()); - } - CommandResult res = authenticator.authenticate(); - authenticatedDatabases.add(credentials.getSource()); - return res; - } - - void checkAuth(Mongo mongo) throws IOException { - // get the difference between the set of credentialed databases and the set of authenticated databases on this connection - Set unauthenticatedDatabases = new HashSet(mongo.getAuthority().getCredentialsStore().getDatabases()); - unauthenticatedDatabases.removeAll(authenticatedDatabases); - - for (String databaseName : unauthenticatedDatabases) { - authenticate(mongo, mongo.getAuthority().getCredentialsStore().get(databaseName)); - } - } - - /** - * Gets the pool that this port belongs to. - * @return the pool that this port belongs to. - */ - public DBPortPool getPool() { - return _pool; - } - - private static Logger _rootLogger = Logger.getLogger( "com.mongodb.port" ); - - final ServerAddress _sa; - final ServerAddress _addr; - final DBPortPool _pool; - final MongoOptions _options; - final Logger _logger; - final DBDecoder _decoder; - - private volatile Socket _socket; - private volatile InputStream _in; - private volatile OutputStream _out; - - private volatile boolean _processingResponse; - - // needs synchronization to ensure that modifications are published. - final Set authenticatedDatabases = Collections.synchronizedSet(new HashSet()); - - volatile int _lastThread; - final AtomicLong _calls = new AtomicLong(); - private volatile ActiveState _activeState; - private volatile Boolean useCRAMAuthenticationProtocol; - - class ActiveState { - ActiveState(final OutMessage outMessage) { - this.outMessage = outMessage; - this.startTime = System.nanoTime(); - this.threadName = Thread.currentThread().getName(); - } - final OutMessage outMessage; - final long startTime; - final String threadName; - } - - class GenericSaslAuthenticator extends SaslAuthenticator { - static final String CRAM_MD5 = "CRAM-MD5"; - - private final String mechanism; - - GenericSaslAuthenticator(final Mongo mongo, MongoCredential credentials, String mechanism) { - super(mongo, credentials); - this.mechanism = mechanism; - } - - @Override - protected SaslClient createSaslClient() { - try { - return Sasl.createSaslClient(new String[]{mechanism}, - credential.getUserName(), MONGODB_PROTOCOL, - serverAddress().getHost(), null, new CredentialsHandlingCallbackHandler()); - } catch (SaslException e) { - throw new MongoException("Exception initializing SASL client", e); - } - } - - @Override - protected DB getDatabase() { - return mongo.getDB(credential.getSource()); - } - - @Override - public String getMechanismName() { - return mechanism; - } - - class CredentialsHandlingCallbackHandler implements CallbackHandler { - - public void handle(final Callback[] callbacks) throws IOException, UnsupportedCallbackException { - for (Callback callback : callbacks) { - if (callback instanceof NameCallback) { - NameCallback nameCallback = (NameCallback) callback; - nameCallback.setName(credential.getUserName()); - } - if (callback instanceof PasswordCallback) { - PasswordCallback passwordCallback = (PasswordCallback) callback; - String hashedPassword = new String(NativeAuthenticationHelper.createHash( - credential.getUserName(), credential.getPassword())); - passwordCallback.setPassword(hashedPassword.toCharArray()); - } - } - } - } - } - - class GSSAPIAuthenticator extends SaslAuthenticator { - public static final String GSSAPI_OID = "1.2.840.113554.1.2.2"; - public static final String GSSAPI_MECHANISM = MongoCredential.GSSAPI_MECHANISM; - - GSSAPIAuthenticator(final Mongo mongo, final MongoCredential credentials) { - super(mongo, credentials); - - if (!this.credential.getMechanism().equals(MongoCredential.GSSAPI_MECHANISM)) { - throw new MongoException("Incorrect mechanism: " + this.credential.getMechanism()); - } - } - - @Override - protected SaslClient createSaslClient() { - try { - Map props = new HashMap(); - props.put(Sasl.CREDENTIALS, getGSSCredential(credential.getUserName())); - - return Sasl.createSaslClient(new String[]{GSSAPI_MECHANISM}, credential.getUserName(), MONGODB_PROTOCOL, - serverAddress().getHost(), props, null); - } catch (SaslException e) { - throw new MongoException("Exception initializing SASL client", e); - } catch (GSSException e) { - throw new MongoException("Exception initializing GSSAPI credentials", e); - } - } - - @Override - protected DB getDatabase() { - return mongo.getDB(credential.getSource()); - } - - @Override - public String getMechanismName() { - return "GSSAPI"; - } - - private GSSCredential getGSSCredential(String userName) throws GSSException { - Oid krb5Mechanism = new Oid(GSSAPI_OID); - GSSManager manager = GSSManager.getInstance(); - GSSName name = manager.createName(userName, GSSName.NT_USER_NAME); - return manager.createCredential(name, GSSCredential.INDEFINITE_LIFETIME, - krb5Mechanism, GSSCredential.INITIATE_ONLY); - } - } - - abstract class SaslAuthenticator extends Authenticator { - public static final String MONGODB_PROTOCOL = "mongodb"; - - SaslAuthenticator(final Mongo mongo, MongoCredential credentials) { - super(mongo, credentials); - } - - public CommandResult authenticate() { - SaslClient saslClient = createSaslClient(); - try { - byte[] response = (saslClient.hasInitialResponse() ? saslClient.evaluateChallenge(new byte[0]) : null); - CommandResult res = sendSaslStart(response); - res.throwOnError(); - - int conversationId = (Integer) res.get("conversationId"); - - while (! (Boolean) res.get("done")) { - response = saslClient.evaluateChallenge((byte[]) res.get("payload")); - - if (response == null) { - throw new MongoException("SASL protocol error: no client response to challenge"); - } - - res = sendSaslContinue(conversationId, response); - res.throwOnError(); - } - return res; - } catch (IOException e) { - throw new MongoException.Network("IOException authenticating the connection", e); - } finally { - try { - saslClient.dispose(); - } catch (SaslException e) { - // ignore - } - } - } - - protected abstract SaslClient createSaslClient(); - - protected abstract DB getDatabase(); - - private CommandResult sendSaslStart(final byte[] outToken) throws IOException { - DBObject cmd = new BasicDBObject("saslStart", 1). - append("mechanism", getMechanismName()) - .append("payload", outToken != null ? outToken : new byte[0]); - return runCommand(getDatabase(), cmd); - } - - private CommandResult sendSaslContinue(final int conversationId, final byte[] outToken) throws IOException { - DB adminDB = getDatabase(); - DBObject cmd = new BasicDBObject("saslContinue", 1).append("conversationId", conversationId). - append("payload", outToken); - return runCommand(adminDB, cmd); - } - - public abstract String getMechanismName(); - } - - class NativeAuthenticator extends Authenticator { - NativeAuthenticator(Mongo mongo, MongoCredential credentials) { - super(mongo, credentials); - } - - @Override - public CommandResult authenticate() { - try { - DB db = mongo.getDB(credential.getSource()); - CommandResult res = runCommand(db, NativeAuthenticationHelper.getNonceCommand()); - res.throwOnError(); - - res = runCommand(db, NativeAuthenticationHelper.getAuthCommand(credential.getUserName(), - credential.getPassword(), res.getString("nonce"))); - res.throwOnError(); - return res; - } catch (IOException e) { - throw new MongoException.Network("IOException authenticating the connection", e); - } - } - } - - abstract class Authenticator { - protected final Mongo mongo; - protected final MongoCredential credential; - - Authenticator(Mongo mongo, MongoCredential credential) { - this.mongo = mongo; - this.credential = credential; - } - - abstract CommandResult authenticate(); - } -} diff --git a/src/main/com/mongodb/DBPortPool.java b/src/main/com/mongodb/DBPortPool.java deleted file mode 100644 index 51ab6c7982a..00000000000 --- a/src/main/com/mongodb/DBPortPool.java +++ /dev/null @@ -1,310 +0,0 @@ -// DBPortPool.java - -/** - * Copyright (C) 2008 10gen Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.mongodb; - -import com.mongodb.util.ConnectionPoolStatisticsBean; -import com.mongodb.util.SimplePool; -import com.mongodb.util.management.JMException; -import com.mongodb.util.management.MBeanServerFactory; - -import java.io.InterruptedIOException; -import java.util.ArrayList; -import java.util.Collections; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.concurrent.Semaphore; -import java.util.concurrent.atomic.AtomicInteger; -import java.util.logging.Level; - -/** - * This class is NOT part of the public API. Be prepared for non-binary compatible changes in minor releases. - * - * @deprecated This class is NOT a part of public API and will be dropped in 3.x versions. - */ -@Deprecated -public class DBPortPool extends SimplePool { - - public String getHost() { - return _addr.getHost(); - } - - public int getPort() { - return _addr.getPort(); - } - - public synchronized ConnectionPoolStatisticsBean getStatistics() { - return new ConnectionPoolStatisticsBean(getTotal(), getInUse(), getInUseConnections()); - } - - private InUseConnectionBean[] getInUseConnections() { - List inUseConnectionInfoList = new ArrayList(); - long currentNanoTime = System.nanoTime(); - for (DBPort port : _out) { - inUseConnectionInfoList.add(new InUseConnectionBean(port, currentNanoTime)); - } - return inUseConnectionInfoList.toArray(new InUseConnectionBean[inUseConnectionInfoList.size()]); - } - - - static class Holder { - - Holder( MongoOptions options ){ - _options = options; - } - - DBPortPool get( ServerAddress addr ){ - - DBPortPool p = _pools.get( addr ); - - if (p != null) - return p; - - synchronized (_pools) { - p = _pools.get( addr ); - if (p != null) { - return p; - } - - p = createPool(addr); - _pools.put( addr , p); - - try { - String on = createObjectName(addr); - if (MBeanServerFactory.getMBeanServer().isRegistered(on)) { - MBeanServerFactory.getMBeanServer().unregisterMBean(on); - Bytes.LOGGER.log(Level.INFO, "multiple Mongo instances for same host, jmx numbers might be off"); - } - MBeanServerFactory.getMBeanServer().registerMBean(p, on); - } catch (JMException e) { - Bytes.LOGGER.log(Level.WARNING, "JMX registration error: " + e + - "\nConsider setting com.mongodb.MongoOptions.alwaysUseMBeans property to true." + - "\nContinuing..."); - } catch (java.security.AccessControlException e) { - Bytes.LOGGER.log(Level.WARNING, "JMX registration error: " + e + - "\nContinuing..."); - } - } - - return p; - } - - private DBPortPool createPool(final ServerAddress addr) { - if (isJava5 || _options.isAlwaysUseMBeans()) { - return new Java5MongoConnectionPool(addr, _options); - } else { - return new MongoConnectionPool(addr, _options); - } - } - - void close(){ - synchronized ( _pools ){ - for ( DBPortPool p : _pools.values() ){ - p.close(); - - try { - String on = createObjectName( p._addr ); - if ( MBeanServerFactory.getMBeanServer().isRegistered(on) ){ - MBeanServerFactory.getMBeanServer().unregisterMBean(on); - } - } catch ( JMException e ){ - Bytes.LOGGER.log( Level.WARNING , "jmx de-registration error, continuing" , e ); - } - } - } - } - - private String createObjectName( ServerAddress addr ) { - String name = "com.mongodb:type=ConnectionPool,host=" + addr.toString().replace( ":" , ",port=" ) + ",instance=" + _serial; - if ( _options.description != null ) - name += ",description=" + _options.description; - return name; - } - - static { - isJava5 = System.getProperty("java.version").startsWith("1.5"); - } - - final MongoOptions _options; - final Map _pools = Collections.synchronizedMap( new HashMap() ); - final int _serial = nextSerial.incrementAndGet(); - - // we use this to give each Holder a different mbean name - static AtomicInteger nextSerial = new AtomicInteger(0); - static final boolean isJava5; - } - - // ---- - - /** - * @deprecated This class will be dropped in 3.x versions. - * Please catch {@link MongoClientException} instead. - */ - @Deprecated - public static class NoMoreConnection extends MongoClientException { - - private static final long serialVersionUID = -4415279469780082174L; - - /** - * Constructs a new instance with the given message. - * - * @param msg the message - */ - public NoMoreConnection(String msg) { - super(msg); - } - } - - /** - * @deprecated This class will be dropped in 3.x versions. - * Please catch {@link MongoWaitQueueFullException} instead. - */ - @Deprecated - public static class SemaphoresOut extends MongoWaitQueueFullException { - - private static final long serialVersionUID = -4415279469780082174L; - - private static final String message = "Concurrent requests for database connection have exceeded limit"; - SemaphoresOut(){ - super( message ); - } - - SemaphoresOut(int numPermits){ - super( message + " of " + numPermits); - } - } - - /** - * @deprecated This class will be dropped in 3.x versions. - * Please catch {@link MongoTimeoutException} instead. - */ - @Deprecated - public static class ConnectionWaitTimeOut extends MongoTimeoutException { - - private static final long serialVersionUID = -4415279469780082174L; - - ConnectionWaitTimeOut(int timeout) { - super("Connection wait timeout after " + timeout + " ms"); - } - } - - // ---- - - DBPortPool( ServerAddress addr , MongoOptions options ){ - super( "DBPortPool-" + addr.toString() + ", options = " + options.toString() , options.connectionsPerHost ); - _options = options; - _addr = addr; - _waitingSem = new Semaphore( _options.connectionsPerHost * _options.threadsAllowedToBlockForConnectionMultiplier ); - } - - protected long memSize( DBPort p ){ - return 0; - } - - @Override - protected int pick( int recommended, boolean couldCreate ){ - int id = System.identityHashCode(Thread.currentThread()); - for (int i = _avail.size() - 1; i >= 0; i--){ - if ( _avail.get(i)._lastThread == id ) - return i; - } - - return couldCreate ? -1 : recommended; - } - - /** - * @return - * @throws MongoException - */ - @Override - public DBPort get() { - DBPort port = null; - if ( ! _waitingSem.tryAcquire() ) - throw new SemaphoresOut(_options.connectionsPerHost * _options.threadsAllowedToBlockForConnectionMultiplier); - - try { - port = get( _options.maxWaitTime ); - } catch (InterruptedException e) { - throw new MongoInterruptedException(e); - } finally { - _waitingSem.release(); - } - - if ( port == null ) - throw new ConnectionWaitTimeOut( _options.maxWaitTime ); - - port._lastThread = System.identityHashCode(Thread.currentThread()); - return port; - } - - // return true if the exception is recoverable - boolean gotError( Exception e ){ - if (e instanceof java.nio.channels.ClosedByInterruptException){ - // this is probably a request that is taking too long - // so usually doesn't mean there is a real db problem - return true; - } - - if ( e instanceof InterruptedIOException){ - // we don't want to clear the port pool for a connection timing out or interrupted - return true; - } - Bytes.LOGGER.log( Level.WARNING , "emptying DBPortPool to " + getServerAddress() + " b/c of error" , e ); - - // force close all sockets - - List all = new ArrayList(); - while ( true ){ - try { - DBPort temp = get(0); - if ( temp == null ) - break; - all.add( temp ); - } catch (InterruptedException interruptedException) { - throw new MongoInterruptedException(interruptedException); - } - } - - for ( DBPort p : all ){ - p.close(); - done(p); - } - - return false; - } - - @Override - public void cleanup( DBPort p ){ - p.close(); - } - - @Override - protected DBPort createNew(){ - return new DBPort( _addr , this , _options ); - } - - public ServerAddress getServerAddress() { - return _addr; - } - - final MongoOptions _options; - final private Semaphore _waitingSem; - final ServerAddress _addr; - boolean _everWorked = false; -} diff --git a/src/main/com/mongodb/DBRef.java b/src/main/com/mongodb/DBRef.java deleted file mode 100644 index dfa035e5a34..00000000000 --- a/src/main/com/mongodb/DBRef.java +++ /dev/null @@ -1,66 +0,0 @@ -// DBRef.java - -/** - * Copyright (C) 2008 10gen Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.mongodb; - -import org.bson.BSONObject; - -/** - * overrides DBRefBase to understand a BSONObject representation of a reference. - * @dochub dbrefs - */ -public class DBRef extends DBRefBase { - - static final boolean D = Boolean.getBoolean( "DEBUG.DBREF" ); - - /** - * Creates a DBRef - * @param db the database - * @param o a BSON object representing the reference - */ - public DBRef(DB db , BSONObject o ){ - super( db , o.get( "$ref" ).toString() , o.get( "$id" ) ); - } - - /** - * Creates a DBRef - * @param db the database - * @param ns the namespace where the object is stored - * @param id the object id - */ - public DBRef(DB db , String ns , Object id) { - super(db, ns, id); - } - - /** - * fetches a referenced object from the database - * @param db the database - * @param ref the reference - * @return - * @throws MongoException - */ - public static DBObject fetch(DB db, DBObject ref) { - String ns; - Object id; - - if ((ns = (String)ref.get("$ref")) != null && (id = ref.get("$id")) != null) { - return db.getCollection(ns).findOne(new BasicDBObject("_id", id)); - } - return null; - } -} diff --git a/src/main/com/mongodb/DBRefBase.java b/src/main/com/mongodb/DBRefBase.java deleted file mode 100644 index 14fe5a8c23d..00000000000 --- a/src/main/com/mongodb/DBRefBase.java +++ /dev/null @@ -1,113 +0,0 @@ -// DBRefBase.java - -/** - * Copyright (C) 2008 10gen Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.mongodb; - -/** - * represents a database reference, which points to an object stored in the database - */ -public class DBRefBase { - - - /** - * Creates a DBRefBase - * @param db the database - * @param ns the namespace where the object is stored - * @param id the object id - */ - public DBRefBase(DB db , String ns , Object id) { - _db = db; - _ns = ns.intern(); - _id = id; - } - - /** - * fetches the object referenced from the database - * @return - * @throws MongoException - */ - public DBObject fetch() throws MongoException { - if (_loadedPointedTo) - return _pointedTo; - - if (_db == null) - throw new RuntimeException("no db"); - - final DBCollection coll = _db.getCollectionFromString(_ns); - - _pointedTo = coll.findOne(_id); - _loadedPointedTo = true; - return _pointedTo; - } - - @Override - public String toString(){ - return "{ \"$ref\" : \"" + _ns + "\", \"$id\" : \"" + _id + "\" }"; - } - - /** - * Gets the object's id - * @return - */ - public Object getId() { - return _id; - } - - /** - * Gets the object's namespace (collection name) - * @return - */ - public String getRef() { - return _ns; - } - - /** - * Gets the database - * @return - */ - public DB getDB() { - return _db; - } - - @Override - public boolean equals(final Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - - final DBRefBase dbRefBase = (DBRefBase) o; - - if (_id != null ? !_id.equals(dbRefBase._id) : dbRefBase._id != null) return false; - if (_ns != null ? !_ns.equals(dbRefBase._ns) : dbRefBase._ns != null) return false; - - return true; - } - - @Override - public int hashCode() { - int result = _id != null ? _id.hashCode() : 0; - result = 31 * result + (_ns != null ? _ns.hashCode() : 0); - return result; - } - - final Object _id; - final String _ns; - final DB _db; - - private boolean _loadedPointedTo = false; - private DBObject _pointedTo; -} diff --git a/src/main/com/mongodb/DBTCPConnector.java b/src/main/com/mongodb/DBTCPConnector.java deleted file mode 100644 index 01fedbdace0..00000000000 --- a/src/main/com/mongodb/DBTCPConnector.java +++ /dev/null @@ -1,671 +0,0 @@ -// DBTCPConnector.java - -/** - * Copyright (C) 2008 10gen Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.mongodb; - -import java.io.IOException; -import java.net.SocketTimeoutException; -import java.util.ArrayList; -import java.util.List; -import java.util.concurrent.atomic.AtomicBoolean; -import java.util.logging.Level; -import java.util.logging.Logger; - -/** - * @deprecated This class is NOT part of the public API. It will be dropped in 3.x releases. - */ -@Deprecated -public class DBTCPConnector implements DBConnector { - - static Logger _logger = Logger.getLogger( Bytes.LOGGER.getName() + ".tcp" ); - - /** - * @param mongo the Mongo instance - * @throws MongoException - */ - public DBTCPConnector( Mongo mongo ) { - _mongo = mongo; - _portHolder = new DBPortPool.Holder( mongo._options ); - MongoAuthority.Type type = mongo.getAuthority().getType(); - if (type == MongoAuthority.Type.Direct) { - setMasterAddress(mongo.getAuthority().getServerAddresses().get(0)); - } else if (type == MongoAuthority.Type.Set) { - _connectionStatus = new DynamicConnectionStatus(mongo, mongo.getAuthority().getServerAddresses()); - } else { - throw new IllegalArgumentException("Unsupported authority type: " + type); - } - } - - public void start() { - if (_connectionStatus != null) { - _connectionStatus.start(); - } - } - - /** - * Start a "request". - * - * A "request" is a group of operations in which order matters. Examples - * include inserting a document and then performing a query which expects - * that document to have been inserted, or performing an operation and - * then using com.mongodb.Mongo.getLastError to perform error-checking - * on that operation. When a thread performs operations in a "request", all - * operations will be performed on the same socket, so they will be - * correctly ordered. - */ - @Override - public void requestStart(){ - _myPort.requestStart(); - } - - /** - * End the current "request", if this thread is in one. - * - * By ending a request when it is safe to do so the built-in connection- - * pool is allowed to reassign requests to different sockets in order to - * more effectively balance load. See requestStart for more information. - */ - @Override - public void requestDone(){ - _myPort.requestDone(); - } - - /** - * @throws MongoException - */ - @Override - public void requestEnsureConnection(){ - checkMaster( false , true ); - _myPort.requestEnsureConnection(); - } - - void _checkClosed(){ - if ( _closed.get() ) - throw new IllegalStateException( "this Mongo has been closed" ); - } - - WriteResult _checkWriteError( DB db, DBPort port , WriteConcern concern ) - throws IOException{ - CommandResult e = port.runCommand( db , concern.getCommand() ); - - e.throwOnError(); - return new WriteResult( e , concern ); - } - - /** - * @param db - * @param m - * @param concern - * @return - * @throws MongoException - */ - @Override - public WriteResult say( DB db , OutMessage m , WriteConcern concern ){ - return say( db , m , concern , null ); - } - - /** - * @param db - * @param m - * @param concern - * @param hostNeeded - * @return - * @throws MongoException - */ - @Override - public WriteResult say( DB db , OutMessage m , WriteConcern concern , ServerAddress hostNeeded ){ - - if (concern == null) { - throw new IllegalArgumentException("Write concern is null"); - } - - _checkClosed(); - checkMaster( false , true ); - - DBPort port = _myPort.get(true, ReadPreference.primary(), hostNeeded); - - try { - port.checkAuth( db.getMongo() ); - port.say( m ); - if ( concern.callGetLastError() ){ - return _checkWriteError( db , port , concern ); - } - else { - return new WriteResult( db , port , concern ); - } - } - catch ( IOException ioe ){ - _myPort.error(port, ioe); - _error( ioe, false ); - - if ( concern.raiseNetworkErrors() ) - throw new MongoException.Network("Write operation to server " + port.host() + " failed on database " + db , ioe ); - - CommandResult res = new CommandResult(port.serverAddress()); - res.put( "ok" , false ); - res.put( "$err" , "NETWORK ERROR" ); - return new WriteResult( res , concern ); - } - catch ( MongoException me ){ - throw me; - } - catch ( RuntimeException re ){ - _myPort.error(port, re); - throw re; - } - finally { - _myPort.done(port); - m.doneWithMessage(); - } - } - - /** - * @param db - * @param coll - * @param m - * @param hostNeeded - * @param decoder - * @return - * @throws MongoException - */ - @Override - public Response call( DB db , DBCollection coll , OutMessage m, ServerAddress hostNeeded, DBDecoder decoder ){ - return call( db , coll , m , hostNeeded , 2, null, decoder ); - } - - /** - * @param db - * @param coll - * @param m - * @param hostNeeded - * @param retries - * @return - * @throws MongoException - */ - @Override - public Response call( DB db , DBCollection coll , OutMessage m , ServerAddress hostNeeded , int retries ){ - return call( db, coll, m, hostNeeded, retries, null, null); - } - - - /** - * @param db - * @param coll - * @param m - * @param hostNeeded - * @param readPref - * @param decoder - * @return - * @throws MongoException - */ - @Override - public Response call( DB db, DBCollection coll, OutMessage m, ServerAddress hostNeeded, int retries, - ReadPreference readPref, DBDecoder decoder ){ - try { - return innerCall(db, coll, m, hostNeeded, retries, readPref, decoder); - } finally { - m.doneWithMessage(); - } - } - - // This method is recursive. It calls itself to implement query retry logic. - private Response innerCall(final DB db, final DBCollection coll, final OutMessage m, final ServerAddress hostNeeded, - final int retries, ReadPreference readPref, final DBDecoder decoder) { - if (readPref == null) - readPref = ReadPreference.primary(); - - if (readPref == ReadPreference.primary() && m.hasOption( Bytes.QUERYOPTION_SLAVEOK )) - readPref = ReadPreference.secondaryPreferred(); - - boolean secondaryOk = !(readPref == ReadPreference.primary()); - - _checkClosed(); - // Don't check master on secondary reads unless connected to a replica set - if (!secondaryOk || getReplicaSetStatus() == null) - checkMaster( false, !secondaryOk ); - - final DBPort port = _myPort.get(false, readPref, hostNeeded); - - Response res = null; - boolean retry = false; - try { - port.checkAuth( db.getMongo() ); - res = port.call( m , coll, decoder ); - if ( res._responseTo != m.getId() ) - throw new MongoException( "ids don't match" ); - } - catch ( IOException ioe ){ - _myPort.error(port, ioe); - retry = retries > 0 && !coll._name.equals( "$cmd" ) - && !(ioe instanceof SocketTimeoutException) && _error( ioe, secondaryOk ); - if ( !retry ){ - throw new MongoException.Network("Read operation to server " + port.host() + " failed on database " + db , ioe ); - } - } - catch ( RuntimeException re ){ - _myPort.error(port, re); - throw re; - } finally { - _myPort.done(port); - } - - if (retry) - return innerCall( db , coll , m , hostNeeded , retries - 1 , readPref, decoder ); - - ServerError err = res.getError(); - - if ( err != null && err.isNotMasterError() ){ - checkMaster( true , true ); - if ( retries <= 0 ){ - throw new MongoException( "not talking to master and retries used up" ); - } - return innerCall( db , coll , m , hostNeeded , retries -1, readPref, decoder ); - } - - return res; - } - - public ServerAddress getAddress(){ - DBPortPool pool = _masterPortPool; - return pool != null ? pool.getServerAddress() : null; - } - - /** - * Gets the list of seed server addresses - * @return - */ - public List getAllAddress() { - return _mongo._authority.getServerAddresses(); - } - - /** - * Gets the list of server addresses currently seen by the connector. - * This includes addresses auto-discovered from a replica set. - * @return - * @throws MongoException - */ - public List getServerAddressList() { - if (_connectionStatus != null) { - return _connectionStatus.getServerAddressList(); - } - - ServerAddress master = getAddress(); - if (master != null) { - // single server - List list = new ArrayList(); - list.add(master); - return list; - } - return null; - } - - public ReplicaSetStatus getReplicaSetStatus() { - if (_connectionStatus instanceof ReplicaSetStatus) { - return (ReplicaSetStatus) _connectionStatus; - } else if (_connectionStatus instanceof DynamicConnectionStatus) { - return ((DynamicConnectionStatus) _connectionStatus).asReplicaSetStatus(); - } else { - return null; - } - } - - // This call can block if it's not yet known. - // Be careful when modifying this method, as this method is using the fact that _isMongosDirectConnection - // is of type Boolean and is null when uninitialized. - boolean isMongosConnection() { - if (_connectionStatus instanceof MongosStatus) { - return true; - } else if (_connectionStatus instanceof DynamicConnectionStatus) { - return ((DynamicConnectionStatus) _connectionStatus).asMongosStatus() != null; - } - - if (_isMongosDirectConnection == null) { - initDirectConnection(); - } - - return _isMongosDirectConnection != null ? _isMongosDirectConnection : false; - } - - public String getConnectPoint(){ - ServerAddress master = getAddress(); - return master != null ? master.toString() : null; - } - - /** - * This method is called in case of an IOException. - * It will potentially trigger a checkMaster() to check the status of all servers. - * @param t the exception thrown - * @param secondaryOk secondaryOk flag - * @return true if the request should be retried, false otherwise - * @throws MongoException - */ - boolean _error( Throwable t, boolean secondaryOk ){ - if (_connectionStatus == null) { - // single server, no need to retry - return false; - } - - // the replset has at least 1 server up, try to see if should switch master - // if no server is up, we wont retry until the updater thread finds one - // this is to cut down the volume of requests/errors when all servers are down - if ( _connectionStatus.hasServerUp() ){ - checkMaster( true , !secondaryOk ); - } - return _connectionStatus.hasServerUp(); - } - - class MyPort { - - DBPort get( boolean keep , ReadPreference readPref, ServerAddress hostNeeded ){ - - DBPort pinnedRequestPort = getPinnedRequestPortForThread(); - - if ( hostNeeded != null ) { - if (pinnedRequestPort != null && pinnedRequestPort.serverAddress().equals(hostNeeded)) { - return pinnedRequestPort; - } - - // asked for a specific host - return _portHolder.get( hostNeeded ).get(); - } - - if ( pinnedRequestPort != null ){ - // we are within a request, and have a port, should stick to it - if ( pinnedRequestPort.getPool() == _masterPortPool || !keep ) { - // if keep is false, it's a read, so we use port even if master changed - return pinnedRequestPort; - } - - // it's write and master has changed - // we fall back on new master and try to go on with request - // this may not be best behavior if spec of request is to stick with same server - pinnedRequestPort.getPool().done(pinnedRequestPort); - setPinnedRequestPortForThread(null); - } - - DBPort port; - if (getReplicaSetStatus() == null){ - if (_masterPortPool == null) { - // this should only happen in rare case that no master was ever found - // may get here at startup if it's a read, slaveOk=true, and ALL servers are down - throw new MongoException("Rare case where master=null, probably all servers are down"); - } - port = _masterPortPool.get(); - } - else { - ReplicaSetStatus.ReplicaSet replicaSet = getReplicaSetStatus()._replicaSetHolder.get(); - ConnectionStatus.Node node = readPref.getNode(replicaSet); - - if (node == null) - throw new MongoException("No replica set members available in " + replicaSet + " for " + readPref.toDBObject().toString()); - - port = _portHolder.get(node.getServerAddress()).get(); - } - - // if within request, remember port to stick to same server - if (threadHasPinnedRequest()) { - setPinnedRequestPortForThread(port); - } - - return port; - } - - void done( DBPort port ) { - DBPort requestPort = getPinnedRequestPortForThread(); - - // keep request port - if (port != requestPort) { - port.getPool().done(port); - } - } - - /** - * call this method when there is an IOException or other low level error on port. - * @param port - * @param e - */ - void error( DBPort port , Exception e ){ - port.close(); - pinnedRequestStatusThreadLocal.remove(); - - // depending on type of error, may need to close other connections in pool - boolean recoverable = port.getPool().gotError(e); - if (!recoverable && _connectionStatus != null && _masterPortPool._addr.equals(port.serverAddress())) { - ConnectionStatus.Node newMaster = _connectionStatus.ensureMaster(); - if (newMaster != null) { - setMaster(newMaster); - } - } - } - - void requestEnsureConnection(){ - if ( !threadHasPinnedRequest() ) - return; - - if ( getPinnedRequestPortForThread() != null ) - return; - - setPinnedRequestPortForThread(_masterPortPool.get()); - } - - void requestStart(){ - PinnedRequestStatus current = getPinnedRequestStatusForThread(); - if (current == null) { - pinnedRequestStatusThreadLocal.set(new PinnedRequestStatus()); - } - else { - current.nestedBindings++; - } - } - - void requestDone(){ - PinnedRequestStatus current = getPinnedRequestStatusForThread(); - if (current != null) { - if (current.nestedBindings > 0) { - current.nestedBindings--; - } - else { - pinnedRequestStatusThreadLocal.remove(); - if (current.requestPort != null) - current.requestPort.getPool().done(current.requestPort); - } - } - } - - PinnedRequestStatus getPinnedRequestStatusForThread() { - return pinnedRequestStatusThreadLocal.get(); - } - - boolean threadHasPinnedRequest() { - return pinnedRequestStatusThreadLocal.get() != null; - } - - DBPort getPinnedRequestPortForThread() { - return threadHasPinnedRequest() ? pinnedRequestStatusThreadLocal.get().requestPort : null; - } - - void setPinnedRequestPortForThread(final DBPort port) { - pinnedRequestStatusThreadLocal.get().requestPort = port; - } - - private final ThreadLocal pinnedRequestStatusThreadLocal = new ThreadLocal(); - } - - static class PinnedRequestStatus { - DBPort requestPort; - public int nestedBindings; - } - - void checkMaster( boolean force , boolean failIfNoMaster ){ - - if ( _connectionStatus != null ){ - if ( _masterPortPool == null || force ){ - ConnectionStatus.Node master = _connectionStatus.ensureMaster(); - if ( master == null ){ - if ( failIfNoMaster ) - throw new MongoException( "can't find a master" ); - } - else { - setMaster(master); - } - } - } else { - // single server, may have to obtain max bson size - if (_maxBsonObjectSize == 0) - initDirectConnection(); - } - } - - synchronized void setMaster(ConnectionStatus.Node master) { - if (_closed.get()) { - return; - } - setMasterAddress(master.getServerAddress()); - _maxBsonObjectSize = master.getMaxBsonObjectSize(); - } - - /** - * Fetches the maximum size for a BSON object from the current master server - * @return the size, or 0 if it could not be obtained - */ - void initDirectConnection() { - if (_masterPortPool == null) - return; - DBPort port = _masterPortPool.get(); - try { - CommandResult res = port.runCommand(_mongo.getDB("admin"), new BasicDBObject("isMaster", 1)); - // max size was added in 1.8 - if (res.containsField("maxBsonObjectSize")) { - _maxBsonObjectSize = (Integer) res.get("maxBsonObjectSize"); - } else { - _maxBsonObjectSize = Bytes.MAX_OBJECT_SIZE; - } - - String msg = res.getString("msg"); - _isMongosDirectConnection = msg != null && msg.equals("isdbgrid"); - } catch (Exception e) { - _logger.log(Level.WARNING, "Exception executing isMaster command on " + port.serverAddress(), e); - } finally { - port.getPool().done(port); - } - } - - - - private synchronized boolean setMasterAddress(ServerAddress addr) { - DBPortPool newPool = _portHolder.get( addr ); - if (newPool == _masterPortPool) - return false; - - if ( _masterPortPool != null ) - _logger.log(Level.WARNING, "Primary switching from " + _masterPortPool.getServerAddress() + " to " + addr); - _masterPortPool = newPool; - return true; - } - - public String debugString(){ - StringBuilder buf = new StringBuilder( "DBTCPConnector: " ); - if ( _connectionStatus != null ) { - buf.append( "set : " ).append( _mongo._authority.getServerAddresses() ); - } else { - buf.append(getAddress()); - } - - return buf.toString(); - } - - public void close(){ - _closed.set( true ); - if ( _portHolder != null ) { - try { - _portHolder.close(); - _portHolder = null; - } catch (final Throwable t) { /* nada */ } - } - if ( _connectionStatus != null ) { - try { - _connectionStatus.close(); - _connectionStatus = null; - } catch (final Throwable t) { /* nada */ } - } - } - - /** - * Assigns a new DBPortPool for a given ServerAddress. - * This is used to obtain a new pool when the resolved IP of a host changes, for example. - * User application should not have to call this method directly. - * @param addr - */ - public void updatePortPool(ServerAddress addr) { - // just remove from map, a new pool will be created lazily - _portHolder._pools.remove(addr); - } - - /** - * Gets the DBPortPool associated with a ServerAddress. - * @param addr - * @return - */ - public DBPortPool getDBPortPool(ServerAddress addr) { - return _portHolder.get(addr); - } - - public boolean isOpen(){ - return ! _closed.get(); - } - - @Override - public CommandResult authenticate(MongoCredential credentials) { - checkMaster(false, true); - final DBPort port = _myPort.get(false, ReadPreference.primaryPreferred(), null); - - try { - CommandResult result = port.authenticate(_mongo, credentials); - _mongo.getAuthority().getCredentialsStore().add(credentials); - return result; - } finally { - _myPort.done(port); - } - } - - /** - * Gets the maximum size for a BSON object supported by the current master server. - * Note that this value may change over time depending on which server is master. - * @return the maximum size, or 0 if not obtained from servers yet. - */ - public int getMaxBsonObjectSize() { - return _maxBsonObjectSize; - } - - // expose for unit testing - MyPort getMyPort() { - return _myPort; - } - - private volatile DBPortPool _masterPortPool; - private final Mongo _mongo; - private DBPortPool.Holder _portHolder; - private ConnectionStatus _connectionStatus; - - private final AtomicBoolean _closed = new AtomicBoolean(false); - - private volatile int _maxBsonObjectSize; - private volatile Boolean _isMongosDirectConnection; - - MyPort _myPort = new MyPort(); -} diff --git a/src/main/com/mongodb/DefaultDBCallback.java b/src/main/com/mongodb/DefaultDBCallback.java deleted file mode 100644 index 4c878f68b29..00000000000 --- a/src/main/com/mongodb/DefaultDBCallback.java +++ /dev/null @@ -1,155 +0,0 @@ -// DBCallback.java - -/** - * Copyright (C) 2008 10gen Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.mongodb; - -// Bson -import org.bson.BSONObject; -import org.bson.BasicBSONCallback; -import org.bson.types.ObjectId; - -import java.util.LinkedList; -import java.util.List; -import java.util.logging.Level; -import java.util.logging.Logger; - -/** - * This class overrides BasicBSONCallback to implement some extra features specific to the Database. - * For example DBRef type. - * @author antoine - */ -public class DefaultDBCallback extends BasicBSONCallback implements DBCallback { - - static class DefaultFactory implements DBCallbackFactory { - @Override - public DBCallback create( DBCollection collection ){ - return new DefaultDBCallback( collection ); - } - } - - public static DBCallbackFactory FACTORY = new DefaultFactory(); - - public DefaultDBCallback( DBCollection coll ){ - _collection = coll; - _db = _collection == null ? null : _collection.getDB(); - } - - @Override - public void gotDBRef( String name , String ns , ObjectId id ){ - if ( id.equals( Bytes.COLLECTION_REF_ID ) ) - cur().put( name , _collection ); - else - cur().put(name, new DBRef(_db, ns, id)); - } - - @Override - public void objectStart(boolean array, String name){ - _nameStack.addLast(name); - super.objectStart( array , name ); - } - - @Override - public Object objectDone(){ - BSONObject o = (BSONObject)super.objectDone(); - String lastName = null; - if ( _nameStack.size() > 0 ){ - lastName = _nameStack.removeLast(); - } - if ( ! ( o instanceof List ) && lastName != null && - o.containsField( "$ref" ) && - o.containsField( "$id" ) ){ - return cur().put(lastName, new DBRef( _db, o ) ); - } - - return o; - } - - /** - * @return - * @throws MongoException - */ - @Override - public BSONObject create(){ - return _create( null ); - } - - /** - * @param array - * @param path - * @return - * @throws MongoException - */ - @Override - public BSONObject create( boolean array , List path ){ - if ( array ) - return new BasicDBList(); - return _create( path ); - } - - private DBObject _create( List path ){ - - Class c = null; - - if ( _collection != null && _collection._objectClass != null){ - if ( path == null || path.size() == 0 ){ - c = _collection._objectClass; - } - else { - StringBuilder buf = new StringBuilder(); - for ( int i=0; i 0 ) - buf.append("."); - buf.append( path.get(i) ); - } - c = _collection.getInternalClass( buf.toString() ); - } - - } - - if ( c != null ){ - try { - return (DBObject)c.newInstance(); - } - catch ( InstantiationException ie ){ - LOGGER.log( Level.FINE , "can't create a: " + c , ie ); - throw new MongoInternalException( "can't instantiate a : " + c , ie ); - } - catch ( IllegalAccessException iae ){ - LOGGER.log( Level.FINE , "can't create a: " + c , iae ); - throw new MongoInternalException( "can't instantiate a : " + c , iae ); - } - } - - return new BasicDBObject(); - } - - DBObject dbget(){ - return (DBObject)get(); - } - - @Override - public void reset(){ - _nameStack = new LinkedList(); - super.reset(); - } - - private LinkedList _nameStack; - final DBCollection _collection; - final DB _db; - static final Logger LOGGER = Logger.getLogger( "com.mongo.DECODING" ); -} diff --git a/src/main/com/mongodb/DefaultDBDecoder.java b/src/main/com/mongodb/DefaultDBDecoder.java deleted file mode 100644 index 5d718d2c018..00000000000 --- a/src/main/com/mongodb/DefaultDBDecoder.java +++ /dev/null @@ -1,69 +0,0 @@ -/** - * Copyright (C) 2008 10gen Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.mongodb; - -import org.bson.BasicBSONDecoder; - -import java.io.IOException; -import java.io.InputStream; - -/** - * - * @author antoine - */ -public class DefaultDBDecoder extends BasicBSONDecoder implements DBDecoder { - - static class DefaultFactory implements DBDecoderFactory { - @Override - public DBDecoder create( ){ - return new DefaultDBDecoder( ); - } - - @Override - public String toString() { - return "DefaultDBDecoder.DefaultFactory"; - } - } - - public static DBDecoderFactory FACTORY = new DefaultFactory(); - - public DefaultDBDecoder( ){ - } - - public DBCallback getDBCallback(DBCollection collection) { - // brand new callback every time - return new DefaultDBCallback(collection); - } - - public DBObject decode(byte[] b, DBCollection collection) { - DBCallback cbk = getDBCallback(collection); - cbk.reset(); - decode(b, cbk); - return (DBObject) cbk.get(); - } - - public DBObject decode(InputStream in, DBCollection collection) throws IOException { - DBCallback cbk = getDBCallback(collection); - cbk.reset(); - decode(in, cbk); - return (DBObject) cbk.get(); - } - - @Override - public String toString() { - return "DefaultDBDecoder"; - } -} diff --git a/src/main/com/mongodb/DefaultDBEncoder.java b/src/main/com/mongodb/DefaultDBEncoder.java deleted file mode 100644 index 3b8368e4832..00000000000 --- a/src/main/com/mongodb/DefaultDBEncoder.java +++ /dev/null @@ -1,92 +0,0 @@ -/** - * Copyright (c) 2008 - 2011 10gen, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on - * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the - * specific language governing permissions and limitations under the License. - */ -package com.mongodb; - -import org.bson.BSONObject; -import org.bson.BasicBSONEncoder; -import org.bson.io.OutputBuffer; -import org.bson.types.ObjectId; - -import static org.bson.BSON.*; - - -public class DefaultDBEncoder extends BasicBSONEncoder implements DBEncoder { - - public int writeObject( OutputBuffer buf, BSONObject o ){ - set( buf ); - int x = super.putObject( o ); - done(); - return x; - } - - static class DefaultFactory implements DBEncoderFactory { - @Override - public DBEncoder create( ){ - return new DefaultDBEncoder( ); - } - - @Override - public String toString() { - return "DefaultDBEncoder.DefaultFactory"; - } - - } - - protected boolean putSpecial( String name , Object val ){ - if (val instanceof DBRefBase) { - putDBRef(name, (DBRefBase) val); - return true; - } else { - return false; - } - } - - /** - * @deprecated Please see {@link DBPointer}. - * You can override {@link #putDBRef(String, DBRefBase)} if you need - * a specific behaviour while decoding database references. - */ - @Deprecated - protected void putDBPointer( String name , String ns , ObjectId oid ){ - _put( REF , name ); - - _putValueString( ns ); - _buf.writeInt( oid._time() ); - _buf.writeInt( oid._machine() ); - _buf.writeInt( oid._inc() ); - } - - protected void putDBRef( String name, DBRefBase ref ){ - _put( OBJECT , name ); - final int sizePos = _buf.getPosition(); - _buf.writeInt( 0 ); - - _putObjectField( "$ref" , ref.getRef() ); - _putObjectField( "$id" , ref.getId() ); - - _buf.write( EOO ); - _buf.writeInt( sizePos , _buf.getPosition() - sizePos ); - } - - - public static DBEncoderFactory FACTORY = new DefaultFactory(); - - public DefaultDBEncoder( ){ - } - - @Override - public String toString() { - return "DefaultDBEncoder"; - } - -} diff --git a/src/main/com/mongodb/DynamicConnectionStatus.java b/src/main/com/mongodb/DynamicConnectionStatus.java deleted file mode 100644 index 45948eca180..00000000000 --- a/src/main/com/mongodb/DynamicConnectionStatus.java +++ /dev/null @@ -1,195 +0,0 @@ -/** - * Copyright (c) 2008 - 2012 10gen, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.mongodb; - -import java.util.ArrayList; -import java.util.List; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Executors; -import java.util.concurrent.RejectedExecutionException; -import java.util.logging.Level; -import java.util.logging.Logger; - -/** - * Responsible for dynamically determining whether the list of server address represents a set of mongos server or - * a replica set. It starts threads that call the ismaster command on every server in the seed list, and as soon as it - * reaches one determines what type of server it is. It then creates the appropriate ConnectionStatus implementation - * and forwards all calls to it. - */ -class DynamicConnectionStatus extends ConnectionStatus { - - private static final Logger logger = Logger.getLogger("com.mongodb.DynamicConnectionStatus"); - - DynamicConnectionStatus(Mongo mongo, List mongosAddresses) { - super(mongosAddresses, mongo); - } - - @Override - void start() { - super.start(); - executorService = Executors.newFixedThreadPool(_mongosAddresses.size()); - initExecutorService(); - } - - @Override - void close() { - if (connectionStatus != null) { - connectionStatus.close(); - } - if (executorService != null) { - executorService.shutdownNow(); - } - super.close(); - } - - ReplicaSetStatus asReplicaSetStatus() { - ConnectionStatus connectionStatus = getConnectionStatus(); - if (connectionStatus instanceof ReplicaSetStatus) { - return (ReplicaSetStatus) connectionStatus; - } - return null; - } - - MongosStatus asMongosStatus() { - ConnectionStatus connectionStatus = getConnectionStatus(); - if (connectionStatus instanceof MongosStatus) { - return (MongosStatus) connectionStatus; - } - return null; - } - - @Override - List getServerAddressList() { - if (connectionStatus != null) { - return connectionStatus.getServerAddressList(); - } else { - return new ArrayList(_mongosAddresses); - } - } - - @Override - boolean hasServerUp() { - ConnectionStatus connectionStatus = getConnectionStatus(); - if (connectionStatus != null) { - return connectionStatus.hasServerUp(); - } else { - return false; - } - } - - @Override - Node ensureMaster() { - ConnectionStatus connectionStatus = getConnectionStatus(); - if (connectionStatus != null) { - return connectionStatus.ensureMaster(); - } else { - return null; - } - } - - void initExecutorService() { - try { - for (final ServerAddress cur : _mongosAddresses) { - executorService.submit(new Runnable() { - @Override - public void run() { - DynamicNode node = new DynamicNode(cur, _mongo, _mongoOptions); - try { - while (!Thread.interrupted()) { - try { - node.update(); - if (node.isOk()) { - notifyOfOkNode(node); - return; - } - } catch (Exception e) { - logger.log(Level.WARNING, "couldn't reach " + node._addr, e); - } - - int sleepTime = updaterIntervalNoMasterMS; - Thread.sleep(sleepTime); - } - } catch (InterruptedException e) { - // fall through - } - } - }); - } - } catch (RejectedExecutionException e) { - // Ignore, as this can happen if a good node is found before all jobs are submitted and the service has - // been shutdown. - } - } - - private void notifyOfOkNode(DynamicNode node) { - synchronized (this) { - if (connectionStatus != null) { - return; - } - - if (node.isMongos) { - connectionStatus = new MongosStatus(_mongo, _mongosAddresses); - } else { - connectionStatus = new ReplicaSetStatus(_mongo, _mongosAddresses); - } - notifyAll(); - } - connectionStatus.start(); - executorService.shutdownNow(); - } - - static class DynamicNode extends UpdatableNode { - DynamicNode(final ServerAddress addr, Mongo mongo, MongoOptions mongoOptions) { - super(addr, mongo, mongoOptions); - } - - @Override - protected Logger getLogger() { - return logger; - } - - @Override - public CommandResult update() { - CommandResult res = super.update(); - - if (res != null) { - String msg = res.getString("msg"); - if (msg != null && msg.equals("isdbgrid")) { - isMongos = true; - } - } - return res; - } - - private boolean isMongos; - } - - private synchronized ConnectionStatus getConnectionStatus() { - if (connectionStatus == null) { - try { - wait(_mongo.getMongoOptions().getConnectTimeout()); - } catch (InterruptedException e) { - throw new MongoInterruptedException("Interrupted while waiting for next update to dynamic status", e); - } - } - return connectionStatus; - } - - - private volatile ConnectionStatus connectionStatus; - private ExecutorService executorService; -} diff --git a/src/main/com/mongodb/GroupCommand.java b/src/main/com/mongodb/GroupCommand.java deleted file mode 100644 index af3d7f38763..00000000000 --- a/src/main/com/mongodb/GroupCommand.java +++ /dev/null @@ -1,51 +0,0 @@ -/** - * Copyright (C) 2008 10gen Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.mongodb; - -/** - * This class groups the argument for a group operation and can build the underlying command object - * @dochub mapreduce - */ -public class GroupCommand { - - public GroupCommand(DBCollection inputCollection, DBObject keys, DBObject condition, DBObject initial, String reduce, String finalize) { - this.input = inputCollection.getName(); - this.keys = keys; - this.condition = condition; - this.initial = initial; - this.reduce = reduce; - this.finalize = finalize; - } - - public DBObject toDBObject() { - BasicDBObject args = new BasicDBObject(); - args.put( "ns" , input ); - args.put( "key" , keys ); - args.put( "cond" , condition ); - args.put( "$reduce" , reduce ); - args.put( "initial" , initial ); - if ( finalize != null ) - args.put( "finalize" , finalize ); - return new BasicDBObject( "group" , args ); - } - - String input; - DBObject keys; - DBObject condition; - DBObject initial; - String reduce; - String finalize; -} diff --git a/src/main/com/mongodb/InUseConnectionBean.java b/src/main/com/mongodb/InUseConnectionBean.java deleted file mode 100644 index 3715ebb5c0f..00000000000 --- a/src/main/com/mongodb/InUseConnectionBean.java +++ /dev/null @@ -1,85 +0,0 @@ -/** - * Copyright (c) 2008 - 2011 10gen, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.mongodb; - -import java.util.concurrent.TimeUnit; - -/** - * This class is NOT part of the public API. Be prepared for non-binary compatible changes in minor releases. - * - * @deprecated This class will be removed in 3.x versions of the driver, - * so please remove it from your compile time dependencies. - */ -@Deprecated -public class InUseConnectionBean { - - InUseConnectionBean(final DBPort port, long currentNanoTime) { - DBPort.ActiveState activeState = port.getActiveState(); - if (activeState == null) { - durationMS = 0; - namespace = null; - opCode = null; - query = null; - threadName = null; - numDocuments = 0; - } - else { - durationMS = TimeUnit.NANOSECONDS.toMillis(currentNanoTime - activeState.startTime); - namespace = activeState.outMessage.getNamespace(); - opCode = activeState.outMessage.getOpCode(); - query = activeState.outMessage.getQuery() != null ? activeState.outMessage.getQuery().toString() : null; - threadName = activeState.threadName; - numDocuments = activeState.outMessage.getNumDocuments(); - } - localPort = port.getLocalPort(); - } - - public String getNamespace() { - return namespace; - } - - public OutMessage.OpCode getOpCode() { - return opCode; - } - - public String getQuery() { - return query; - } - - public int getLocalPort() { - return localPort; - } - - public long getDurationMS() { - return durationMS; - } - - public String getThreadName() { - return threadName; - } - - public int getNumDocuments() { - return numDocuments; - } - - private final String namespace; - private final OutMessage.OpCode opCode; - private final String query; - private final int localPort; - private final long durationMS; - private final String threadName; - private final int numDocuments; -} diff --git a/src/main/com/mongodb/Java5MongoConnectionPool.java b/src/main/com/mongodb/Java5MongoConnectionPool.java deleted file mode 100644 index 03a48eebff5..00000000000 --- a/src/main/com/mongodb/Java5MongoConnectionPool.java +++ /dev/null @@ -1,33 +0,0 @@ -/** - * Copyright (c) 2008 - 2012 10gen, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.mongodb; - -/** - * This class exists only so that on Java 5 the driver can create instances of a standard MBean, - * therefore keeping compatibility with the JMX implementation in the Java 5 JMX class libraries. - * - * @deprecated This class will be removed in 3.x versions of the driver, - * so please remove it from your compile time dependencies. - */ -@Deprecated -class Java5MongoConnectionPool extends DBPortPool implements Java5MongoConnectionPoolMBean { - - Java5MongoConnectionPool(ServerAddress addr, MongoOptions options) { - super(addr, options); - } -} diff --git a/src/main/com/mongodb/Java5MongoConnectionPoolMBean.java b/src/main/com/mongodb/Java5MongoConnectionPoolMBean.java deleted file mode 100644 index 2efaf48490b..00000000000 --- a/src/main/com/mongodb/Java5MongoConnectionPoolMBean.java +++ /dev/null @@ -1,70 +0,0 @@ -/** - * Copyright (c) 2008 - 2012 10gen, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.mongodb; - -/** - * A standard MBean interface for a Mongo connection pool, for use on Java 5 virtual machines. - *

- * This interface is NOT part of the public API. Be prepared for non-binary compatible changes in minor releases. - * - * @deprecated This class will be removed in 3.x versions of the driver, - * so please remove it from your compile time dependencies. - */ -@Deprecated -public interface Java5MongoConnectionPoolMBean { - /** - * Gets the name of the pool. - * - * @return the name of the pool - */ - String getName(); - - /** - * Gets the host that this connection pool is connecting to. - * - * @return the host - */ - String getHost(); - - /** - * Gets the port that this connection pool is connecting to. - * - * @return the port - */ - int getPort(); - - /** - * Gets the total number of pool members, including idle and and in-use members. - * - * @return total number of members - */ - int getTotal(); - - /** - * Gets the number of pool members that are currently in use. - * - * @return number of in-use members - */ - int getInUse(); - - /** - * Gets the maximum allowed size of the pool, including idle and in-use members. - * - * @return the maximum size - */ - int getMaxSize(); -} diff --git a/src/main/com/mongodb/LazyDBCallback.java b/src/main/com/mongodb/LazyDBCallback.java deleted file mode 100644 index 3a22ae017a2..00000000000 --- a/src/main/com/mongodb/LazyDBCallback.java +++ /dev/null @@ -1,61 +0,0 @@ -/** - * Copyright (C) 2008 10gen Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.mongodb; - -import java.util.Iterator; -import java.util.List; -import java.util.logging.Logger; - -import org.bson.LazyBSONCallback; -import org.bson.types.ObjectId; - -/** - * - */ -public class LazyDBCallback extends LazyBSONCallback implements DBCallback { - - public LazyDBCallback( DBCollection coll ){ - _collection = coll; - _db = _collection == null ? null : _collection.getDB(); - } - - @Override - public Object createObject( byte[] data, int offset ){ - LazyDBObject o = new LazyDBObject( data, offset, this ); - //log.info("Created inner BSONObject: " + o); - // need to detect DBRef but must make sure we dont search through all fields - // $ref must be 1st key - Iterator it = o.keySet().iterator(); - if ( it.hasNext() && it.next().equals( "$ref" ) && - o.containsField( "$id" ) ){ - return new DBRef( _db, o ); - } - return o; - } - - @Override - public List createArray(byte[] data, int offset) { - return new LazyDBList(data, offset, this); - } - - public Object createDBRef( String ns, ObjectId id ){ - return new DBRef( _db, ns, id ); - } - - final DBCollection _collection; - final DB _db; - private static final Logger log = Logger.getLogger( LazyDBCallback.class.getName() ); -} diff --git a/src/main/com/mongodb/LazyDBDecoder.java b/src/main/com/mongodb/LazyDBDecoder.java deleted file mode 100644 index 364b112549f..00000000000 --- a/src/main/com/mongodb/LazyDBDecoder.java +++ /dev/null @@ -1,58 +0,0 @@ -/** - * Copyright (C) 2008 10gen Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.mongodb; - -import java.io.IOException; -import java.io.InputStream; - -import org.bson.LazyBSONDecoder; - -/** - * - */ -public class LazyDBDecoder extends LazyBSONDecoder implements DBDecoder { - static class LazyDBDecoderFactory implements DBDecoderFactory { - @Override - public DBDecoder create( ){ - return new LazyDBDecoder(); - } - } - - public static DBDecoderFactory FACTORY = new LazyDBDecoderFactory(); - - public LazyDBDecoder( ){ - } - - public DBCallback getDBCallback(DBCollection collection) { - // callback doesnt do anything special, could be unique per decoder - // but have to create per collection due to DBRef, at least - return new LazyDBCallback(collection); - } - - public DBObject decode(byte[] b, DBCollection collection) { - DBCallback cbk = getDBCallback(collection); - cbk.reset(); - decode(b, cbk); - return (DBObject) cbk.get(); - } - - public DBObject decode(InputStream in, DBCollection collection) throws IOException { - DBCallback cbk = getDBCallback(collection); - cbk.reset(); - decode(in, cbk); - return (DBObject) cbk.get(); - } -} diff --git a/src/main/com/mongodb/LazyDBEncoder.java b/src/main/com/mongodb/LazyDBEncoder.java deleted file mode 100644 index 349fb6000ce..00000000000 --- a/src/main/com/mongodb/LazyDBEncoder.java +++ /dev/null @@ -1,52 +0,0 @@ -/** - * Copyright (c) 2008 - 2011 10gen, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.mongodb; - -import org.bson.BSONObject; -import org.bson.io.OutputBuffer; - -import java.io.IOException; -import java.io.OutputStream; - -/** - * Encoder that only knows how to encode BSONObject instances of type LazyDBObject. - */ -public class LazyDBEncoder implements DBEncoder { - - /** - * @param buf - * @param o - * @return - * @throws MongoException - */ - @Override - public int writeObject(final OutputBuffer buf, BSONObject o) { - if (!(o instanceof LazyDBObject)) { - throw new IllegalArgumentException("LazyDBEncoder can only encode BSONObject instances of type LazyDBObject"); - } - - LazyDBObject lazyDBObject = (LazyDBObject) o; - - try { - lazyDBObject.pipe(buf); - } catch (IOException e) { - throw new MongoException("Exception serializing a LazyDBObject", e); - } - - return lazyDBObject.getBSONSize(); - } -} diff --git a/src/main/com/mongodb/LazyDBList.java b/src/main/com/mongodb/LazyDBList.java deleted file mode 100644 index d3eaff0c8f2..00000000000 --- a/src/main/com/mongodb/LazyDBList.java +++ /dev/null @@ -1,38 +0,0 @@ -/* - * Copyright (c) 2008 - 2013 10gen, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.mongodb; - -import org.bson.LazyBSONCallback; -import org.bson.io.BSONByteBuffer; - -public class LazyDBList extends org.bson.LazyDBList { - - public LazyDBList(final byte[] data, final LazyBSONCallback callback) { - super(data, callback); - } - - public LazyDBList(final byte[] data, final int offset, final LazyBSONCallback callback) { - super(data, offset, callback); - } - - public LazyDBList(final BSONByteBuffer buffer, final LazyBSONCallback callback) { - super(buffer, callback); - } - - public LazyDBList(final BSONByteBuffer buffer, final int offset, final LazyBSONCallback callback) { - super(buffer, offset, callback); - } -} diff --git a/src/main/com/mongodb/LazyDBObject.java b/src/main/com/mongodb/LazyDBObject.java deleted file mode 100644 index c88f3152c80..00000000000 --- a/src/main/com/mongodb/LazyDBObject.java +++ /dev/null @@ -1,50 +0,0 @@ -/** - * Copyright (C) 2008 10gen Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.mongodb; - -import org.bson.LazyBSONCallback; -import org.bson.LazyBSONObject; -import org.bson.io.BSONByteBuffer; - -public class LazyDBObject extends LazyBSONObject implements DBObject { - - public void markAsPartialObject() { - _partial = true; - } - - public boolean isPartialObject() { - return _partial; - } - - public LazyDBObject(BSONByteBuffer buff, LazyBSONCallback cbk){ - super(buff, cbk); - } - - public LazyDBObject(BSONByteBuffer buff, int offset, LazyBSONCallback cbk){ - super(buff, offset, cbk); - } - - - public LazyDBObject(byte[] data, LazyBSONCallback cbk){ - this(data, 0, cbk); - } - - public LazyDBObject(byte[] data, int offset, LazyBSONCallback cbk){ - super(data, offset, cbk); - } - - private boolean _partial = false; -} diff --git a/src/main/com/mongodb/LazyWriteableDBCallback.java b/src/main/com/mongodb/LazyWriteableDBCallback.java deleted file mode 100644 index 87c16697927..00000000000 --- a/src/main/com/mongodb/LazyWriteableDBCallback.java +++ /dev/null @@ -1,45 +0,0 @@ -/** - * Copyright (C) 2008 10gen Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.mongodb; - -import java.util.Iterator; -import java.util.logging.Logger; - -/** - * - */ -public class LazyWriteableDBCallback extends LazyDBCallback { - - public LazyWriteableDBCallback( DBCollection coll ){ - super(coll); - } - - @Override - public Object createObject( byte[] data, int offset ){ - LazyWriteableDBObject o = new LazyWriteableDBObject( data, offset, this ); - //log.info("Created inner BSONObject: " + o); - // need to detect DBRef but must make sure we dont search through all fields - // $ref must be 1st key - Iterator it = o.keySet().iterator(); - if ( it.hasNext() && it.next().equals( "$ref" ) && - o.containsField( "$id" ) ){ - return new DBRef( _db, o ); - } - return o; - } - - private static final Logger log = Logger.getLogger( LazyWriteableDBCallback.class.getName() ); -} diff --git a/src/main/com/mongodb/LazyWriteableDBDecoder.java b/src/main/com/mongodb/LazyWriteableDBDecoder.java deleted file mode 100644 index 4d9a6803f9e..00000000000 --- a/src/main/com/mongodb/LazyWriteableDBDecoder.java +++ /dev/null @@ -1,36 +0,0 @@ -/** - * Copyright (C) 2008 10gen Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.mongodb; - - -/** - * - */ -public class LazyWriteableDBDecoder extends LazyDBDecoder { - static class LazyDBDecoderFactory implements DBDecoderFactory { - @Override - public DBDecoder create( ){ - return new LazyWriteableDBDecoder(); - } - } - - public static DBDecoderFactory FACTORY = new LazyDBDecoderFactory(); - - public DBCallback getDBCallback(DBCollection collection) { - return new LazyWriteableDBCallback(collection); - } - -} diff --git a/src/main/com/mongodb/LazyWriteableDBObject.java b/src/main/com/mongodb/LazyWriteableDBObject.java deleted file mode 100644 index 496540106b4..00000000000 --- a/src/main/com/mongodb/LazyWriteableDBObject.java +++ /dev/null @@ -1,120 +0,0 @@ -/** - * Copyright (C) 2008 10gen Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.mongodb; - -import java.util.HashMap; -import java.util.HashSet; -import java.util.Map; -import java.util.Set; - -import org.bson.BSONObject; -import org.bson.LazyBSONCallback; -import org.bson.io.BSONByteBuffer; - -public class LazyWriteableDBObject extends LazyDBObject { - - public LazyWriteableDBObject(BSONByteBuffer buff, LazyBSONCallback cbk){ - super(buff, cbk); - } - - public LazyWriteableDBObject(BSONByteBuffer buff, int offset, LazyBSONCallback cbk){ - super(buff, offset, cbk); - } - - - public LazyWriteableDBObject(byte[] data, LazyBSONCallback cbk){ - this(data, 0, cbk); - } - - public LazyWriteableDBObject(byte[] data, int offset, LazyBSONCallback cbk){ - super(data, offset, cbk); - } - - /* (non-Javadoc) - * @see org.bson.LazyBSONObject#put(java.lang.String, java.lang.Object) - */ - @Override - public Object put(String key, Object v) { - return writeable.put(key, v); - } - - /* (non-Javadoc) - * @see org.bson.LazyBSONObject#putAll(org.bson.BSONObject) - */ - @Override - public void putAll(BSONObject o) { - for(String key : o.keySet()){ - put(key, o.get(key)); - } - } - - /* (non-Javadoc) - * @see org.bson.LazyBSONObject#putAll(java.util.Map) - */ - @SuppressWarnings("unchecked") - @Override - public void putAll(Map m) { - writeable.putAll(m); - } - - /* (non-Javadoc) - * @see org.bson.LazyBSONObject#get(java.lang.String) - */ - @Override - public Object get(String key) { - Object o = writeable.get(key); - return (o!=null) ? o : super.get(key); - } - - /* (non-Javadoc) - * @see org.bson.LazyBSONObject#removeField(java.lang.String) - */ - @Override - public Object removeField(String key) { - Object o = writeable.remove(key); - return (o!=null) ? o : super.removeField(key); - } - - /* (non-Javadoc) - * @see org.bson.LazyBSONObject#containsField(java.lang.String) - */ - @Override - public boolean containsField(String s) { - boolean has = writeable.containsKey(s); - return (has) ? has : super.containsField(s); - } - - /* (non-Javadoc) - * @see org.bson.LazyBSONObject#keySet() - */ - @Override - public Set keySet() { - Set combined = new HashSet(); - combined.addAll(writeable.keySet()); - combined.addAll(super.keySet()); - return combined; - } - - /* (non-Javadoc) - * @see org.bson.LazyBSONObject#isEmpty() - */ - @Override - public boolean isEmpty() { - return writeable.isEmpty() || super.isEmpty(); - } - - final private HashMap writeable = new HashMap(); -} diff --git a/src/main/com/mongodb/MapReduceCommand.java b/src/main/com/mongodb/MapReduceCommand.java deleted file mode 100644 index af011b63d54..00000000000 --- a/src/main/com/mongodb/MapReduceCommand.java +++ /dev/null @@ -1,326 +0,0 @@ -/** - * Copyright (c) 2010 10gen, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.mongodb; - -import java.util.Map; - -/** - * This class groups the argument for a map/reduce operation and can build the underlying command object - * @dochub mapreduce - */ -public class MapReduceCommand { - - /** - * INLINE - Return results inline, no result is written to the DB server - * REPLACE - Save the job output to a collection, replacing its previous content - * MERGE - Merge the job output with the existing contents of outputTarget collection - * REDUCE - Reduce the job output with the existing contents of outputTarget collection - */ - public static enum OutputType { - REPLACE, MERGE, REDUCE, INLINE - }; - - /** - * Represents the command for a map reduce operation - * Runs the command in REPLACE output type to a named collection - * - * @param inputCollection - * the collection to read from - * @param map - * map function in javascript code - * @param reduce - * reduce function in javascript code - * @param outputCollection - * optional - leave null if want to get the result inline - * @param type - * the type of output - * @param query - * the query to use on input - * @return - * @dochub mapreduce - */ - public MapReduceCommand(DBCollection inputCollection , String map , String reduce , String outputCollection, OutputType type, DBObject query) { - _input = inputCollection.getName(); - _map = map; - _reduce = reduce; - _outputTarget = outputCollection; - _outputType = type; - _query = query; - } - - /** - * Sets the verbosity of the MapReduce job, - * defaults to 'true' - * - * @param verbose - * The verbosity level. - */ - public void setVerbose( Boolean verbose ){ - _verbose = verbose; - } - - /** - * Gets the verbosity of the MapReduce job. - * - * @return the verbosity level. - */ - public Boolean isVerbose(){ - return _verbose; - } - - /** - * Get the name of the collection the MapReduce will read from - * - * @return name of the collection the MapReduce will read from - */ - public String getInput(){ - return _input; - } - - - /** - * Get the map function, as a JS String - * - * @return the map function (as a JS String) - */ - public String getMap(){ - return _map; - } - - /** - * Gets the reduce function, as a JS String - * - * @return the reduce function (as a JS String) - */ - public String getReduce(){ - return _reduce; - } - - /** - * Gets the output target (name of collection to save to) - * This value is nullable only if OutputType is set to INLINE - * - * @return The outputTarget - */ - public String getOutputTarget(){ - return _outputTarget; - } - - - /** - * Gets the OutputType for this instance. - * @return The outputType. - */ - public OutputType getOutputType(){ - return _outputType; - } - - - /** - * Gets the Finalize JS Function - * - * @return The finalize function (as a JS String). - */ - public String getFinalize(){ - return _finalize; - } - - /** - * Sets the Finalize JS Function - * - * @param finalize - * The finalize function (as a JS String) - */ - public void setFinalize( String finalize ){ - _finalize = finalize; - } - - /** - * Gets the query to run for this MapReduce job - * - * @return The query object - */ - public DBObject getQuery(){ - return _query; - } - - /** - * Gets the (optional) sort specification object - * - * @return the Sort DBObject - */ - public DBObject getSort(){ - return _sort; - } - - /** - * Sets the (optional) sort specification object - * - * @param sort - * The sort specification object - */ - public void setSort( DBObject sort ){ - _sort = sort; - } - - /** - * Gets the (optional) limit on input - * - * @return The limit specification object - */ - public int getLimit(){ - return _limit; - } - - /** - * Sets the (optional) limit on input - * - * @param limit - * The limit specification object - */ - public void setLimit( int limit ){ - _limit = limit; - } - - /** - * Gets the (optional) JavaScript scope - * - * @return The JavaScript scope - */ - public Map getScope(){ - return _scope; - } - - /** - * Sets the (optional) JavaScript scope - * - * @param scope - * The JavaScript scope - */ - public void setScope( Map scope ){ - _scope = scope; - } - - /** - * Sets the (optional) database name where the output collection should reside - * @param outputDB - */ - public void setOutputDB(String outputDB) { - this._outputDB = outputDB; - } - - - - public DBObject toDBObject() { - BasicDBObject cmd = new BasicDBObject(); - - cmd.put("mapreduce", _input); - cmd.put("map", _map); - cmd.put("reduce", _reduce); - cmd.put("verbose", _verbose); - - BasicDBObject out = new BasicDBObject(); - switch(_outputType) { - case INLINE: - out.put("inline", 1); - break; - case REPLACE: - out.put("replace", _outputTarget); - break; - case MERGE: - out.put("merge", _outputTarget); - break; - case REDUCE: - out.put("reduce", _outputTarget); - break; - } - if (_outputDB != null) - out.put("db", _outputDB); - cmd.put("out", out); - - if (_query != null) - cmd.put("query", _query); - - if (_finalize != null) - cmd.put( "finalize", _finalize ); - - if (_sort != null) - cmd.put("sort", _sort); - - if (_limit > 0) - cmd.put("limit", _limit); - - if (_scope != null) - cmd.put("scope", _scope); - - if (_extra != null) { - cmd.putAll(_extra); - } - - return cmd; - } - - public void addExtraOption(String name, Object value) { - if (_extra == null) - _extra = new BasicDBObject(); - _extra.put(name, value); - } - - public DBObject getExtraOptions() { - return _extra; - } - - /** - * Sets the read preference for this command. - * See the * documentation for {@link ReadPreference} - * for more information. - * - * @param preference Read Preference to use - */ - public void setReadPreference( ReadPreference preference ){ - _readPref = preference; - } - - /** - * Gets the read preference - * @return - */ - public ReadPreference getReadPreference(){ - return _readPref; - } - - - public String toString() { - return toDBObject().toString(); - } - - final String _input; - final String _map; - final String _reduce; - final String _outputTarget; - ReadPreference _readPref; - String _outputDB = null; - final OutputType _outputType; - final DBObject _query; - String _finalize; - DBObject _sort; - int _limit; - Map _scope; - Boolean _verbose = true; - DBObject _extra; -} diff --git a/src/main/com/mongodb/MapReduceOutput.java b/src/main/com/mongodb/MapReduceOutput.java deleted file mode 100644 index 23703f0fe24..00000000000 --- a/src/main/com/mongodb/MapReduceOutput.java +++ /dev/null @@ -1,113 +0,0 @@ -// MapReduceOutput.java - -/** - * Copyright (C) 2008 10gen Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.mongodb; - -/** - * Represents the result of a map/reduce operation - * @author antoine - */ -public class MapReduceOutput { - - @SuppressWarnings("unchecked") - public MapReduceOutput( DBCollection from , DBObject cmd, CommandResult raw ){ - _commandResult = raw; - _cmd = cmd; - - if ( raw.containsField( "results" ) ) { - _coll = null; - _collname = null; - _resultSet = (Iterable) raw.get( "results" ); - } else { - Object res = raw.get("result"); - if (res instanceof String) { - _collname = (String) res; - } else { - BasicDBObject output = (BasicDBObject) res; - _collname = output.getString("collection"); - _dbname = output.getString("db"); - } - - DB db = from._db; - if (_dbname != null) { - db = db.getSisterDB(_dbname); - } - _coll = db.getCollection( _collname ); - // M/R only applies to master, make sure we dont go to slave for results - _coll.setOptions(_coll.getOptions() & ~Bytes.QUERYOPTION_SLAVEOK); - _resultSet = _coll.find(); - } - _counts = (BasicDBObject)raw.get( "counts" ); - } - - /** - * returns a cursor to the results of the operation - * @return - */ - public Iterable results(){ - return _resultSet; - } - - /** - * drops the collection that holds the results - * @throws MongoException - */ - public void drop(){ - if ( _coll != null) - _coll.drop(); - } - - /** - * gets the collection that holds the results - * (Will return null if results are Inline) - * @return - */ - public DBCollection getOutputCollection(){ - return _coll; - } - - @Deprecated - public BasicDBObject getRaw(){ - return _commandResult; - } - - public CommandResult getCommandResult(){ - return _commandResult; - } - - public DBObject getCommand() { - return _cmd; - } - - public ServerAddress getServerUsed() { - return _commandResult.getServerUsed(); - } - - public String toString(){ - return _commandResult.toString(); - } - - final CommandResult _commandResult; - - final String _collname; - String _dbname = null; - final Iterable _resultSet; - final DBCollection _coll; - final BasicDBObject _counts; - final DBObject _cmd; -} diff --git a/src/main/com/mongodb/Mongo.java b/src/main/com/mongodb/Mongo.java deleted file mode 100644 index da9a9d35c91..00000000000 --- a/src/main/com/mongodb/Mongo.java +++ /dev/null @@ -1,829 +0,0 @@ -// Mongo.java - -/** - * Copyright (C) 2008 10gen Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.mongodb; - -import org.bson.io.PoolOutputBuffer; - -import java.net.UnknownHostException; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collection; -import java.util.List; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.ConcurrentMap; -import java.util.logging.Logger; - -/** - * A database connection with internal connection pooling. For most applications, you should have one Mongo instance - * for the entire JVM. - *

- * The following are equivalent, and all connect to the local database running on the default port: - *

- * Mongo mongo1 = new Mongo();
- * Mongo mongo1 = new Mongo("localhost");
- * Mongo mongo2 = new Mongo("localhost", 27017);
- * Mongo mongo4 = new Mongo(new ServerAddress("localhost"));
- * 
- *

- * You can connect to a - * replica set using the Java driver by passing - * a ServerAddress list to the Mongo constructor. For example: - *

- * Mongo mongo = new Mongo(Arrays.asList(
- *   new ServerAddress("localhost", 27017),
- *   new ServerAddress("localhost", 27018),
- *   new ServerAddress("localhost", 27019)));
- * 
- * You can connect to a sharded cluster using the same constructor. Mongo will auto-detect whether the servers are - * a list of replica set members or a list of mongos servers. - *

- * By default, all read and write operations will be made on the primary, - * but it's possible to read from secondaries by changing the read preference: - *

- *

- * mongo.setReadPreference(ReadPreference.secondary());
- * 
- * By default, write operations will not throw exceptions on failure, but that is easily changed too: - *

- *

- * mongo.setWriteConcern(WriteConcern.SAFE);
- * 
- * - * Note: This class has been superseded by {@code MongoClient}, and may be deprecated in a future release. - * - * @see MongoClient - * @see ReadPreference - * @see WriteConcern - */ -public class Mongo { - - static Logger logger = Logger.getLogger(Bytes.LOGGER.getName() + ".Mongo"); - - - // Make sure you don't change the format of these two static variables. A preprocessing regexp - // is applied and updates the version based on configuration in build.properties. - - /** - * @deprecated Replaced by Mongo.getMajorVersion() - */ - @Deprecated - public static final int MAJOR_VERSION = 2; - - /** - * @deprecated Replaced by Mongo.getMinorVersion() - */ - @Deprecated - public static final int MINOR_VERSION = 12; - - private static final String FULL_VERSION = "2.12.0-SNAPSHOT"; - - static int cleanerIntervalMS; - - private static final String ADMIN_DATABASE_NAME = "admin"; - - static { - cleanerIntervalMS = Integer.parseInt(System.getProperty("com.mongodb.cleanerIntervalMS", "1000")); - } - - /** - * Gets the major version of this library - * @return the major version, e.g. 2 - * - * @deprecated Please use {@link #getVersion()} instead. - */ - @Deprecated - public static int getMajorVersion() { - return MAJOR_VERSION; - } - - /** - * Gets the minor version of this library - * @return the minor version, e.g. 8 - * - * @deprecated Please use {@link #getVersion()} instead. - */ - @Deprecated - public static int getMinorVersion() { - return MINOR_VERSION; - } - - /** - * returns a database object - * @param addr the database address - * @return - * @throws MongoException - * - * @deprecated Please use {@link MongoClient#getDB(String)} instead. - */ - @Deprecated - public static DB connect( DBAddress addr ){ - return new Mongo( addr ).getDB( addr.getDBName() ); - } - - /** - * Creates a Mongo instance based on a (single) mongodb node (localhost, default port) - * @throws UnknownHostException - * @throws MongoException - * - * @deprecated Replaced by {@link MongoClient#MongoClient()}) - * - */ - @Deprecated - public Mongo() - throws UnknownHostException { - this( new ServerAddress() ); - } - - /** - * Creates a Mongo instance based on a (single) mongodb node (default port) - * @param host server to connect to - * @throws UnknownHostException if the database host cannot be resolved - * @throws MongoException - * - * @deprecated Replaced by {@link MongoClient#MongoClient(String)} - * - */ - @Deprecated - public Mongo( String host ) - throws UnknownHostException{ - this( new ServerAddress( host ) ); - } - - /** - * Creates a Mongo instance based on a (single) mongodb node (default port) - * @param host server to connect to - * @param options default query options - * @throws UnknownHostException if the database host cannot be resolved - * @throws MongoException - * - * @deprecated Replaced by {@link MongoClient#MongoClient(String, MongoClientOptions)} - * - */ - @Deprecated - public Mongo( String host , MongoOptions options ) - throws UnknownHostException { - this( new ServerAddress( host ) , options ); - } - - /** - * Creates a Mongo instance based on a (single) mongodb node - * @param host the database's host address - * @param port the port on which the database is running - * @throws UnknownHostException if the database host cannot be resolved - * @throws MongoException - * - * @deprecated Replaced by {@link MongoClient#MongoClient(String, int)} - * - */ - @Deprecated - public Mongo( String host , int port ) - throws UnknownHostException { - this( new ServerAddress( host , port ) ); - } - - /** - * Creates a Mongo instance based on a (single) mongodb node - * @see com.mongodb.ServerAddress - * @param addr the database address - * @throws MongoException - * - * @deprecated Replaced by {@link MongoClient#MongoClient(ServerAddress)} - * - */ - @Deprecated - public Mongo( ServerAddress addr ) { - this(addr, new MongoOptions()); - } - - /** - * Creates a Mongo instance based on a (single) mongo node using a given ServerAddress - * @see com.mongodb.ServerAddress - * @param addr the database address - * @param options default query options - * @throws MongoException - * - * @deprecated Replaced by {@link MongoClient#MongoClient(ServerAddress, MongoClientOptions)} - * - */ - @Deprecated - public Mongo( ServerAddress addr , MongoOptions options ) { - this(MongoAuthority.direct(addr), options); - } - - /** - *

Creates a Mongo in paired mode.
This will also work for - * a replica set and will find all members (the master will be used by - * default).

- * - * @see com.mongodb.ServerAddress - * @param left left side of the pair - * @param right right side of the pair - * @throws MongoException - */ - @Deprecated - public Mongo( ServerAddress left , ServerAddress right ) { - this(left, right, new MongoOptions()); - } - - /** - *

Creates a Mongo connection in paired mode.
This will also work for - * a replica set and will find all members (the master will be used by - * default).

- * - * @see com.mongodb.ServerAddress - * @param left left side of the pair - * @param right right side of the pair - * @param options - * @throws MongoException - */ - @Deprecated - public Mongo( ServerAddress left , ServerAddress right , MongoOptions options ) { - this(MongoAuthority.dynamicSet(Arrays.asList(left, right)), options); - } - - /** - * Creates a Mongo based on a list of replica set members or a list of mongos. - * It will find all members (the master will be used by default). If you pass in a single server in the list, - * the driver will still function as if it is a replica set. If you have a standalone server, - * use the Mongo(ServerAddress) constructor. - *

- * If this is a list of mongos servers, it will pick the closest (lowest ping time) one to send all requests to, - * and automatically fail over to the next server if the closest is down. - * - * @see com.mongodb.ServerAddress - * @param seeds Put as many servers as you can in the list and the system will figure out the rest. This can - * either be a list of mongod servers in the same replica set or a list of mongos servers in the same - * sharded cluster. - * @throws MongoException - * - * @deprecated Replaced by {@link MongoClient#MongoClient(java.util.List)} - * - */ - @Deprecated - public Mongo( List seeds ) { - this( seeds , new MongoOptions() ); - } - - /** - * Creates a Mongo based on a list of replica set members or a list of mongos. - * It will find all members (the master will be used by default). If you pass in a single server in the list, - * the driver will still function as if it is a replica set. If you have a standalone server, - * use the Mongo(ServerAddress) constructor. - *

- * If this is a list of mongos servers, it will pick the closest (lowest ping time) one to send all requests to, - * and automatically fail over to the next server if the closest is down. - * - * @see com.mongodb.ServerAddress - * @param seeds Put as many servers as you can in the list and the system will figure out the rest. This can - * either be a list of mongod servers in the same replica set or a list of mongos servers in the same - * sharded cluster. - * @param options for configuring this Mongo instance - * @throws MongoException - * - * @deprecated Replaced by {@link MongoClient#MongoClient(java.util.List, MongoClientOptions)} - * - */ - @Deprecated - public Mongo( List seeds , MongoOptions options ) { - this(MongoAuthority.dynamicSet(seeds), options); - } - - /** - * Creates a Mongo described by a URI. - * If only one address is used it will only connect to that node, otherwise it will discover all nodes. - * If the URI contains database credentials, the database will be authenticated lazily on first use - * with those credentials. - * @param uri - * @see MongoURI - *

examples: - *

  • mongodb://localhost
  • - *
  • mongodb://fred:foobar@localhost/
  • - *

    - * @throws MongoException - * @throws UnknownHostException - * @dochub connections - * - * @deprecated Replaced by {@link MongoClient#MongoClient(MongoClientURI)} - * - */ - @Deprecated - public Mongo( MongoURI uri ) throws UnknownHostException { - this(getMongoAuthorityFromURI(uri), uri.getOptions()); - } - - /** - * Creates a Mongo based on an authority and options. - *

    - * Note: This constructor is provisional and is subject to change before the final release - * - * @param authority the authority - * @param options the options - */ - Mongo(MongoAuthority authority, MongoOptions options) { - logger.info("Creating Mongo instance (driver version " + getVersion() + ") with authority " + authority + " and options " + options); - _authority = authority; - _options = options; - _applyMongoOptions(); - - _connector = new DBTCPConnector( this ); - - _connector.start(); - if (_options.cursorFinalizerEnabled) { - _cleaner = new CursorCleanerThread(); - _cleaner.start(); - } else { - _cleaner = null; - } - } - - /** - * gets a database object - * @param dbname the database name - * @return - */ - public DB getDB( String dbname ){ - - DB db = _dbs.get( dbname ); - if ( db != null ) - return db; - - db = new DBApiLayer( this , dbname , _connector ); - DB temp = _dbs.putIfAbsent( dbname , db ); - if ( temp != null ) - return temp; - return db; - } - - /** - * gets a collection of DBs used by the driver since this Mongo instance was created. - * This may include DBs that exist in the client but not yet on the server. - * @return - */ - public Collection getUsedDatabases(){ - return _dbs.values(); - } - - /** - * gets a list of all database names present on the server - * @return - * @throws MongoException - */ - public List getDatabaseNames(){ - - BasicDBObject cmd = new BasicDBObject(); - cmd.put("listDatabases", 1); - - - CommandResult res = getDB(ADMIN_DATABASE_NAME).command(cmd, getOptions()); - res.throwOnError(); - - List l = (List)res.get("databases"); - - List list = new ArrayList(); - - for (Object o : l) { - list.add(((BasicDBObject)o).getString("name")); - } - return list; - } - - - /** - * Drops the database if it exists. - * @param dbName name of database to drop - * @throws MongoException - */ - public void dropDatabase(String dbName){ - - getDB( dbName ).dropDatabase(); - } - - /** - * gets this driver version - * @return the full version string of this driver, e.g. "2.8.0" - */ - public String getVersion(){ - return FULL_VERSION; - } - - /** - * returns a string representing the hosts used in this Mongo instance - * @return - * - * @deprecated This method is NOT a part of public API and will be dropped in 3.x versions. - */ - @Deprecated - public String debugString(){ - return _connector.debugString(); - } - - /** - * Gets the current master's hostname - * @return - */ - public String getConnectPoint(){ - return _connector.getConnectPoint(); - } - - /** - * Gets the underlying TCP connector - * @return - * @deprecated {@link DBTCPConnector} is NOT part of the public API. It will be dropped in 3.x releases. - */ - @Deprecated - public DBTCPConnector getConnector() { - return _connector; - } - - /** - * Gets the replica set status object - * @return - */ - public ReplicaSetStatus getReplicaSetStatus() { - return _connector.getReplicaSetStatus(); - } - - /** - * Gets the address of the current master - * @return the address - */ - public ServerAddress getAddress(){ - return _connector.getAddress(); - } - - /** - * Gets a list of all server addresses used when this Mongo was created - * @return - */ - public List getAllAddress() { - List result = _connector.getAllAddress(); - if (result == null) { - return Arrays.asList(getAddress()); - } - return result; - } - - /** - * Gets the list of server addresses currently seen by the connector. - * This includes addresses auto-discovered from a replica set. - * @return - * @throws MongoException - */ - public List getServerAddressList() { - return _connector.getServerAddressList(); - } - - /** - * closes the underlying connector, which in turn closes all open connections. - * Once called, this Mongo instance can no longer be used. - */ - public void close(){ - - try { - _connector.close(); - } catch (final Throwable t) { /* nada */ } - - if (_cleaner != null) { - _cleaner.interrupt(); - - try { - _cleaner.join(); - } catch (InterruptedException e) { - //end early - } - } - } - - /** - * Sets the write concern for this database. Will be used as default for - * writes to any collection in any database. See the - * documentation for {@link WriteConcern} for more information. - * - * @param concern write concern to use - */ - public void setWriteConcern( WriteConcern concern ){ - _concern = concern; - } - - /** - * Gets the default write concern - * @return - */ - public WriteConcern getWriteConcern(){ - return _concern; - } - - /** - * Sets the read preference for this database. Will be used as default for - * reads from any collection in any database. See the - * documentation for {@link ReadPreference} for more information. - * - * @param preference Read Preference to use - */ - public void setReadPreference( ReadPreference preference ){ - _readPref = preference; - } - - /** - * Gets the default read preference - * @return - */ - public ReadPreference getReadPreference(){ - return _readPref; - } - - /** - * makes it possible to run read queries on secondary nodes - * - * @deprecated Replaced with {@code ReadPreference.secondaryPreferred()} - * @see ReadPreference#secondaryPreferred() - */ - @Deprecated - public void slaveOk(){ - addOption( Bytes.QUERYOPTION_SLAVEOK ); - } - - /** - * adds a default query option - * @param option - */ - public void addOption( int option ){ - _netOptions.add( option ); - } - - /** - * sets the default query options - * @param options - */ - public void setOptions( int options ){ - _netOptions.set( options ); - } - - /** - * reset the default query options - */ - public void resetOptions(){ - _netOptions.reset(); - } - - /** - * gets the default query options - * @return - */ - public int getOptions(){ - return _netOptions.get(); - } - - /** - * Helper method for setting up MongoOptions at instantiation - * so that any options which affect this connection can be set. - */ - @SuppressWarnings("deprecation") - void _applyMongoOptions() { - if (_options.slaveOk) { - slaveOk(); - } - if (_options.getReadPreference() != null) { - setReadPreference(_options.getReadPreference()); - } - setWriteConcern(_options.getWriteConcern()); - } - - /** - * Returns the mongo options. - * - * @deprecated Please use {@link MongoClient} - * and corresponding {@link com.mongodb.MongoClient#getMongoClientOptions()} - */ - @Deprecated - public MongoOptions getMongoOptions() { - return _options; - } - - /** - * Gets the maximum size for a BSON object supported by the current master server. - * Note that this value may change over time depending on which server is master. - * If the size is not known yet, a request may be sent to the master server - * @return the maximum size - * @throws MongoException - */ - public int getMaxBsonObjectSize() { - int maxsize = _connector.getMaxBsonObjectSize(); - if (maxsize == 0) { - _connector.initDirectConnection(); - } - maxsize = _connector.getMaxBsonObjectSize(); - return maxsize > 0 ? maxsize : Bytes.MAX_OBJECT_SIZE; - } - - boolean isMongosConnection() { - return _connector.isMongosConnection(); - } - - private static MongoAuthority getMongoAuthorityFromURI(final MongoURI uri) throws UnknownHostException { - if ( uri.getHosts().size() == 1 ){ - return MongoAuthority.direct(new ServerAddress(uri.getHosts().get(0)), uri.getCredentials()); - } - else { - List replicaSetSeeds = new ArrayList(uri.getHosts().size()); - for ( String host : uri.getHosts() ) - replicaSetSeeds.add( new ServerAddress( host ) ); - return MongoAuthority.dynamicSet(replicaSetSeeds, uri.getCredentials()); - } - } - - final MongoOptions _options; - final DBTCPConnector _connector; - final ConcurrentMap _dbs = new ConcurrentHashMap(); - private WriteConcern _concern = WriteConcern.NORMAL; - private ReadPreference _readPref = ReadPreference.primary(); - final Bytes.OptionHolder _netOptions = new Bytes.OptionHolder( null ); - final CursorCleanerThread _cleaner; - final MongoAuthority _authority; - - - org.bson.util.SimplePool _bufferPool = - new org.bson.util.SimplePool( 1000 ){ - - protected PoolOutputBuffer createNew(){ - return new PoolOutputBuffer(); - } - - }; - - /** - * Forces the master server to fsync the RAM data to disk - * This is done automatically by the server at intervals, but can be forced for better reliability. - * @param async if true, the fsync will be done asynchronously on the server. - * @return - * @throws MongoException - */ - public CommandResult fsync(boolean async) { - DBObject cmd = new BasicDBObject("fsync", 1); - if (async) { - cmd.put("async", 1); - } - return getDB(ADMIN_DATABASE_NAME).command(cmd); - } - - /** - * Forces the master server to fsync the RAM data to disk, then lock all writes. - * The database will be read-only after this command returns. - * @return - * @throws MongoException - */ - public CommandResult fsyncAndLock() { - DBObject cmd = new BasicDBObject("fsync", 1); - cmd.put("lock", 1); - return getDB(ADMIN_DATABASE_NAME).command(cmd); - } - - /** - * Unlocks the database, allowing the write operations to go through. - * This command may be asynchronous on the server, which means there may be a small delay before the database becomes writable. - * @return - * @throws MongoException - */ - public DBObject unlock() { - DB db = getDB(ADMIN_DATABASE_NAME); - DBCollection col = db.getCollection("$cmd.sys.unlock"); - return col.findOne(); - } - - /** - * Returns true if the database is locked (read-only), false otherwise. - * @return - * @throws MongoException - */ - public boolean isLocked() { - DB db = getDB(ADMIN_DATABASE_NAME); - DBCollection col = db.getCollection("$cmd.sys.inprog"); - BasicDBObject res = (BasicDBObject) col.findOne(); - if (res.containsField("fsyncLock")) { - return res.getInt("fsyncLock") == 1; - } - return false; - } - - // ------- - - - /** - * Mongo.Holder can be used as a static place to hold several instances of Mongo. - * Security is not enforced at this level, and needs to be done on the application side. - */ - public static class Holder { - - /** - * Attempts to find an existing MongoClient instance matching that URI in the holder, and returns it if exists. - * Otherwise creates a new Mongo instance based on this URI and adds it to the holder. - * - * @param uri the Mongo URI - * @return the client - * @throws MongoException - * @throws UnknownHostException - * - * @deprecated Please use {@link #connect(MongoClientURI)} instead. - */ - @Deprecated - public Mongo connect(final MongoURI uri) throws UnknownHostException { - return connect(uri.toClientURI()); - } - - /** - * Attempts to find an existing MongoClient instance matching that URI in the holder, and returns it if exists. - * Otherwise creates a new Mongo instance based on this URI and adds it to the holder. - * - * @param uri the Mongo URI - * @return the client - * @throws MongoException - * @throws UnknownHostException - */ - public Mongo connect(final MongoClientURI uri) throws UnknownHostException { - - final String key = toKey(uri); - - Mongo client = _mongos.get(key); - - if (client == null) { - final Mongo newbie = new MongoClient(uri); - client = _mongos.putIfAbsent(key, newbie); - if (client == null) { - client = newbie; - } else { - newbie.close(); - } - } - - return client; - } - - private String toKey(final MongoClientURI uri) { - return uri.toString(); - } - - public static Holder singleton() { return _default; } - - private static Holder _default = new Holder(); - private final ConcurrentMap _mongos = new ConcurrentHashMap(); - - } - - class CursorCleanerThread extends Thread { - - CursorCleanerThread() { - setDaemon(true); - setName("MongoCleaner" + hashCode()); - } - - public void run() { - while (_connector.isOpen()) { - try { - try { - Thread.sleep(cleanerIntervalMS); - } catch (InterruptedException e) { - //caused by the Mongo instance being closed -- proceed with cleanup - } - for (DB db : _dbs.values()) { - db.cleanCursors(true); - } - } catch (Throwable t) { - // thread must never die - } - } - } - } - - @Override - public String toString() { - return "Mongo{" + - "authority=" + _authority + - ", options=" + _options + - '}'; - } - - /** - * Gets the authority, which includes the connection type, the server address(es), and the credentials. - - * @return the authority - */ - MongoAuthority getAuthority() { - return _authority; - } -} diff --git a/src/main/com/mongodb/MongoAuthority.java b/src/main/com/mongodb/MongoAuthority.java deleted file mode 100644 index b1934826bb1..00000000000 --- a/src/main/com/mongodb/MongoAuthority.java +++ /dev/null @@ -1,214 +0,0 @@ -/** - * Copyright (c) 2008 - 2012 10gen, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.mongodb; - -import org.bson.util.annotations.Immutable; - -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collections; -import java.util.List; - -/** - * This class represents the authority to which this client is connecting. It includes - * both the server address(es) and optional authentication credentials. The class name is informed by the - * URI RFC, which refers to the username/host/port - * part of a URI as the "authority". - * - * @since 2.11.0 - */ -@Immutable -class MongoAuthority { - private final Type type; - private final List serverAddresses; - private final MongoCredentialsStore credentialsStore; - - /** - * Enumeration of the connection types. - */ - enum Type { - Direct, - Set - } - - /** - * - * @param serverAddress - * @return - */ - public static MongoAuthority direct(ServerAddress serverAddress) { - return direct(serverAddress, (MongoCredential) null); - } - - /** - * - * @param serverAddress - * @param credentials - * @return - */ - public static MongoAuthority direct(ServerAddress serverAddress, MongoCredential credentials) { - return direct(serverAddress, new MongoCredentialsStore(credentials)); - } - - /** - * - * @param serverAddress - * @param credentialsStore - * @return - */ - public static MongoAuthority direct(ServerAddress serverAddress, MongoCredentialsStore credentialsStore) { - return new MongoAuthority(serverAddress, credentialsStore); - } - - /** - * - * @param serverAddresses - * @return - */ - public static MongoAuthority dynamicSet(List serverAddresses) { - return dynamicSet(serverAddresses, (MongoCredential) null); - } - - /** - * - * @param serverAddresses - * @param credentials - * @return - */ - public static MongoAuthority dynamicSet(List serverAddresses, MongoCredential credentials) { - return dynamicSet(serverAddresses, new MongoCredentialsStore(credentials)); - } - - /** - * - * @param serverAddresses - * @param credentialsStore - * @return - */ - public static MongoAuthority dynamicSet(List serverAddresses, MongoCredentialsStore credentialsStore) { - return new MongoAuthority(serverAddresses, Type.Set, credentialsStore); - } - - /** - * Constructs an instance with a single server address and a store of authentication credentials. - * This will be a direct connection, even if it's part of a replica set. - * - * @param serverAddress the server address of a mongo server - */ - private MongoAuthority(final ServerAddress serverAddress, MongoCredentialsStore credentialsStore) { - if (serverAddress == null) { - throw new IllegalArgumentException("serverAddress can not be null"); - } - - if (credentialsStore == null) { - throw new IllegalArgumentException("credentialsStore can not be null"); - } - - this.serverAddresses = Arrays.asList(serverAddress); - this.credentialsStore = credentialsStore; - this.type = Type.Direct; - } - - /** - * Constructs an instance with a list of server addresses, which may either be a list of mongos servers - * or a list of members of a replica set, and a store of authentication credentials. - * - * @param serverAddresses the server addresses - * @param credentialsStore the credentials store - */ - private MongoAuthority(final List serverAddresses, Type type, MongoCredentialsStore credentialsStore) { - if (serverAddresses == null) { - throw new IllegalArgumentException("serverAddresses can not be null"); - } - - if (credentialsStore == null) { - throw new IllegalArgumentException("credentialsStore can not be null"); - } - - if (type == null) { - throw new IllegalArgumentException("type can not be null"); - } - - if (type == Type.Direct) { - throw new IllegalArgumentException("type can not be Direct with a list of server addresses"); - } - - this.type = type; - this.serverAddresses = new ArrayList(serverAddresses); - this.credentialsStore = credentialsStore; - } - - /** - * Returns the list of server addresses. - * - * @return the server address list - */ - public List getServerAddresses() { - return serverAddresses == null ? null : Collections.unmodifiableList(serverAddresses); - } - - /** - * Gets the credentials store. If this instance was constructed with a single credential, this store will - * contain it. - * - * @return the credentials store - */ - public MongoCredentialsStore getCredentialsStore() { - return credentialsStore; - } - - /** - * Gets the authority type - * - * @return the authority type - */ - public Type getType() { - return type; - } - - @Override - public boolean equals(final Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - - final MongoAuthority that = (MongoAuthority) o; - - if (!credentialsStore.equals(that.credentialsStore)) return false; - if (!serverAddresses.equals(that.serverAddresses)) return false; - if (type != that.type) return false; - - return true; - } - - @Override - public int hashCode() { - int result = credentialsStore.hashCode(); - result = 31 * result + serverAddresses.hashCode(); - result = 31 * result + type.hashCode(); - return result; - } - - @Override - public String toString() { - return "MongoAuthority{" + - "type=" + type + - ", serverAddresses=" + serverAddresses + - ", credentials=" + credentialsStore + - '}'; - } -} diff --git a/src/main/com/mongodb/MongoClient.java b/src/main/com/mongodb/MongoClient.java deleted file mode 100644 index ce32d799e3b..00000000000 --- a/src/main/com/mongodb/MongoClient.java +++ /dev/null @@ -1,286 +0,0 @@ -/** - * Copyright (c) 2008 - 2012 10gen, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.mongodb; - -import java.net.UnknownHostException; -import java.util.List; - -/** - * A MongoDB client with internal connection pooling. For most applications, you should have one MongoClient instance - * for the entire JVM. - *

    - * The following are equivalent, and all connect to the local database running on the default port: - *

    - * MongoClient mongoClient1 = new MongoClient();
    - * MongoClient mongoClient1 = new MongoClient("localhost");
    - * MongoClient mongoClient2 = new MongoClient("localhost", 27017);
    - * MongoClient mongoClient4 = new MongoClient(new ServerAddress("localhost"));
    - * MongoClient mongoClient5 = new MongoClient(new ServerAddress("localhost"), new MongoClientOptions.Builder().build());
    - * 
    - *

    - * You can connect to a - * replica set using the Java driver by passing - * a ServerAddress list to the MongoClient constructor. For example: - *

    - * MongoClient mongoClient = new MongoClient(Arrays.asList(
    - *   new ServerAddress("localhost", 27017),
    - *   new ServerAddress("localhost", 27018),
    - *   new ServerAddress("localhost", 27019)));
    - * 
    - * You can connect to a sharded cluster using the same constructor. MongoClient will auto-detect whether the servers are - * a list of replica set members or a list of mongos servers. - *

    - * By default, all read and write operations will be made on the primary, but it's possible to read from secondaries - * by changing the read preference: - *

    - * mongoClient.setReadPreference(ReadPreference.secondaryPreferred());
    - * 
    - * By default, all write operations will wait for acknowledgment by the server, as the default write concern is - * {@code WriteConcern.ACKNOWLEDGED}. - *

    - * Note: This class supersedes the {@code Mongo} class. While it extends {@code Mongo}, it differs from it in that - * the default write concern is to wait for acknowledgment from the server of all write operations. In addition, its - * constructors accept instances of {@code MongoClientOptions} and {@code MongoClientURI}, which both also - * set the same default write concern. - *

    - * In general, users of this class will pick up all of the default options specified in {@code MongoClientOptions}. In - * particular, note that the default value of the connectionsPerHost option has been increased to 100 from the old - * default value of 10 used by the superseded {@code Mongo} class. - * - * @see ReadPreference#primary() - * @see com.mongodb.WriteConcern#ACKNOWLEDGED - * @see MongoClientOptions - * @see MongoClientURI - * @since 2.10.0 - */ -public class MongoClient extends Mongo { - - private final MongoClientOptions options; - - /** - * Creates an instance based on a (single) mongodb node (localhost, default port). - * - * @throws UnknownHostException - * @throws MongoException - */ - public MongoClient() throws UnknownHostException { - this(new ServerAddress()); - } - - /** - * Creates a Mongo instance based on a (single) mongodb node. - * - * @param host server to connect to in format host[:port] - * @throws UnknownHostException if the database host cannot be resolved - * @throws MongoException - */ - public MongoClient(String host) throws UnknownHostException { - this(new ServerAddress(host)); - } - - /** - * Creates a Mongo instance based on a (single) mongodb node (default port). - * - * @param host server to connect to in format host[:port] - * @param options default query options - * @throws UnknownHostException if the database host cannot be resolved - * @throws MongoException - */ - public MongoClient(String host, MongoClientOptions options) throws UnknownHostException { - this(new ServerAddress(host), options); - } - - /** - * Creates a Mongo instance based on a (single) mongodb node. - * - * @param host the database's host address - * @param port the port on which the database is running - * @throws UnknownHostException if the database host cannot be resolved - * @throws MongoException - */ - public MongoClient(String host, int port) throws UnknownHostException { - this(new ServerAddress(host, port)); - } - - /** - * Creates a Mongo instance based on a (single) mongodb node - * - * @param addr the database address - * @throws MongoException - * @see com.mongodb.ServerAddress - */ - public MongoClient(ServerAddress addr) { - this(addr, new MongoClientOptions.Builder().build()); - } - - /** - * Creates a Mongo instance based on a (single) mongodb node and a list of credentials - * - * @param addr the database address - * @param credentialsList the list of credentials used to authenticate all connections - * @throws MongoException - * @see com.mongodb.ServerAddress - * @since 2.11.0 - */ - public MongoClient(ServerAddress addr, List credentialsList) { - this(addr, credentialsList, new MongoClientOptions.Builder().build()); - } - - /** - * Creates a Mongo instance based on a (single) mongo node using a given ServerAddress and default options. - * - * @param addr the database address - * @param options default options - * @throws MongoException - * @see com.mongodb.ServerAddress - */ - public MongoClient(ServerAddress addr, MongoClientOptions options) { - this(addr, null, options); - } - - /** - * Creates a Mongo instance based on a (single) mongo node using a given ServerAddress and default options. - * - * @param addr the database address - * @param credentialsList the list of credentials used to authenticate all connections - * @param options default options - * @throws MongoException - * @see com.mongodb.ServerAddress - * @since 2.11.0 - */ - @SuppressWarnings("deprecation") - public MongoClient(ServerAddress addr, List credentialsList, MongoClientOptions options) { - super(MongoAuthority.direct(addr, new MongoCredentialsStore(credentialsList)), new MongoOptions(options)); - this.options = options; - } - - /** - * Creates a Mongo based on a list of replica set members or a list of mongos. - * It will find all members (the master will be used by default). If you pass in a single server in the list, - * the driver will still function as if it is a replica set. If you have a standalone server, - * use the Mongo(ServerAddress) constructor. - *

    - * If this is a list of mongos servers, it will pick the closest (lowest ping time) one to send all requests to, - * and automatically fail over to the next server if the closest is down. - * - * @param seeds Put as many servers as you can in the list and the system will figure out the rest. This can - * either be a list of mongod servers in the same replica set or a list of mongos servers in the same - * sharded cluster. - * @throws MongoException - * @see com.mongodb.ServerAddress - */ - public MongoClient(List seeds) { - this(seeds, null, new MongoClientOptions.Builder().build()); - } - - /** - * Creates a Mongo based on a list of replica set members or a list of mongos. - * It will find all members (the master will be used by default). If you pass in a single server in the list, - * the driver will still function as if it is a replica set. If you have a standalone server, - * use the Mongo(ServerAddress) constructor. - *

    - * If this is a list of mongos servers, it will pick the closest (lowest ping time) one to send all requests to, - * and automatically fail over to the next server if the closest is down. - * - * @param seeds Put as many servers as you can in the list and the system will figure out the rest. This can - * either be a list of mongod servers in the same replica set or a list of mongos servers in the same - * sharded cluster. \ - * @param credentialsList the list of credentials used to authenticate all connections - * @throws MongoException - * @see com.mongodb.ServerAddress - * @since 2.11.0 - */ - public MongoClient(List seeds, List credentialsList) { - this(seeds, credentialsList, new MongoClientOptions.Builder().build()); - } - - - /** - * Creates a Mongo based on a list of replica set members or a list of mongos. - * It will find all members (the master will be used by default). If you pass in a single server in the list, - * the driver will still function as if it is a replica set. If you have a standalone server, - * use the Mongo(ServerAddress) constructor. - *

    - * If this is a list of mongos servers, it will pick the closest (lowest ping time) one to send all requests to, - * and automatically fail over to the next server if the closest is down. - * - * @param seeds Put as many servers as you can in the list and the system will figure out the rest. This can - * either be a list of mongod servers in the same replica set or a list of mongos servers in the same - * sharded cluster. - * @param options default options - * @throws MongoException - * @see com.mongodb.ServerAddress - */ - public MongoClient(List seeds, MongoClientOptions options) { - this(seeds, null, options); - } - - /** - * Creates a Mongo based on a list of replica set members or a list of mongos. - * It will find all members (the master will be used by default). If you pass in a single server in the list, - * the driver will still function as if it is a replica set. If you have a standalone server, - * use the Mongo(ServerAddress) constructor. - *

    - * If this is a list of mongos servers, it will pick the closest (lowest ping time) one to send all requests to, - * and automatically fail over to the next server if the closest is down. - * - * @param seeds Put as many servers as you can in the list and the system will figure out the rest. This can - * either be a list of mongod servers in the same replica set or a list of mongos servers in the same - * sharded cluster. - * @param credentialsList the list of credentials used to authenticate all connections - * @param options default options - * @throws MongoException - * @see com.mongodb.ServerAddress - * @since 2.11.0 - */ - @SuppressWarnings("deprecation") - public MongoClient(List seeds, List credentialsList, MongoClientOptions options) { - super(MongoAuthority.dynamicSet(seeds, new MongoCredentialsStore(credentialsList)), new MongoOptions(options)); - this.options = options; - } - - - /** - * Creates a Mongo described by a URI. - * If only one address is used it will only connect to that node, otherwise it will discover all nodes. - * @param uri the URI - * @throws MongoException - * @throws UnknownHostException - * @see MongoURI - * @dochub connections - */ - @SuppressWarnings("deprecation") - public MongoClient(MongoClientURI uri) throws UnknownHostException { - super(new MongoURI(uri)); - this.options = uri.getOptions(); - } - - /** - * Gets the list of credentials that this client authenticates all connections with - * - * @return the list of credentials - * @since 2.11.0 - */ - public List getCredentialsList() { - return getAuthority().getCredentialsStore().asList(); - } - - public MongoClientOptions getMongoClientOptions() { - return options; - } -} \ No newline at end of file diff --git a/src/main/com/mongodb/MongoClientException.java b/src/main/com/mongodb/MongoClientException.java deleted file mode 100644 index 4b7f3bc9a01..00000000000 --- a/src/main/com/mongodb/MongoClientException.java +++ /dev/null @@ -1,18 +0,0 @@ -package com.mongodb; - -/** - * A base class for exceptions indicating a failure condition within the driver. - */ -public class MongoClientException extends MongoInternalException { - - private static final long serialVersionUID = -5127414714432646066L; - - /** - * Constructs a new instance with the given message. - * - * @param msg the message - */ - MongoClientException(String msg) { - super(msg); - } -} diff --git a/src/main/com/mongodb/MongoClientOptions.java b/src/main/com/mongodb/MongoClientOptions.java deleted file mode 100644 index 5fcb1518aa4..00000000000 --- a/src/main/com/mongodb/MongoClientOptions.java +++ /dev/null @@ -1,600 +0,0 @@ -/** - * Copyright (c) 2008 - 2012 10gen, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.mongodb; - -import org.bson.util.annotations.Immutable; - -import javax.net.SocketFactory; - -/** - * Various settings to control the behavior of a MongoClient. - *

    - * Note: This class is a replacement for {@code MongoOptions}, to be used with {@code MongoClient}. The main difference - * in behavior is that the default write concern is {@code WriteConcern.ACKNOWLEDGED}. - * - * @see MongoClient - * @since 2.10.0 - */ -@Immutable -public class MongoClientOptions { - /** - * A builder for MongoClientOptions so that MongoClientOptions can be immutable, and to support easier - * construction through chaining. - * - * @since 2.10.0 - */ - public static class Builder { - - private String description; - private int connectionsPerHost = 100; - private int threadsAllowedToBlockForConnectionMultiplier = 5; - private int maxWaitTime = 1000 * 60 * 2; - private int connectTimeout = 1000 * 10; - private int socketTimeout = 0; - private boolean socketKeepAlive = false; - private boolean autoConnectRetry = false; - private long maxAutoConnectRetryTime = 0; - private ReadPreference readPreference = ReadPreference.primary(); - private DBDecoderFactory dbDecoderFactory = DefaultDBDecoder.FACTORY; - private DBEncoderFactory dbEncoderFactory = DefaultDBEncoder.FACTORY; - private WriteConcern writeConcern = WriteConcern.ACKNOWLEDGED; - private SocketFactory socketFactory = SocketFactory.getDefault(); - private boolean cursorFinalizerEnabled = true; - private boolean alwaysUseMBeans = false; - - /** - * Sets the description. - * - * @param description the description of this MongoClient - * @return {@code this} - * @see com.mongodb.MongoClientOptions#getDescription() - */ - public Builder description(final String description) { - this.description = description; - return this; - } - - /** - * Sets the maximum number of connections per host. - * - * @param connectionsPerHost maximum number of connections - * @return {@code this} - * @throws IllegalArgumentException if connnectionsPerHost < 1 - * @see com.mongodb.MongoClientOptions#getConnectionsPerHost() - */ - public Builder connectionsPerHost(final int connectionsPerHost) { - if (connectionsPerHost < 1) { - throw new IllegalArgumentException("Minimum value is 1"); - } - this.connectionsPerHost = connectionsPerHost; - return this; - } - - /** - * Sets the multiplier for number of threads allowed to block waiting for a connection. - * - * @param threadsAllowedToBlockForConnectionMultiplier - * the multiplier - * @return {@code this} - * @throws IllegalArgumentException if threadsAllowedToBlockForConnectionMultiplier < 1 - * @see com.mongodb.MongoClientOptions#getThreadsAllowedToBlockForConnectionMultiplier() - */ - public Builder threadsAllowedToBlockForConnectionMultiplier(final int threadsAllowedToBlockForConnectionMultiplier) { - if (threadsAllowedToBlockForConnectionMultiplier < 1) { - throw new IllegalArgumentException("Minimum value is 1"); - } - this.threadsAllowedToBlockForConnectionMultiplier = threadsAllowedToBlockForConnectionMultiplier; - return this; - } - - /** - * Sets the maximum time that a thread will block waiting for a connection. - * - * @param maxWaitTime the maximum wait time (in milliseconds) - * @return {@code this} - * @throws IllegalArgumentException if maxWaitTime < 0 - * @see com.mongodb.MongoClientOptions#getMaxWaitTime() - */ - public Builder maxWaitTime(final int maxWaitTime) { - if (maxWaitTime < 0) { - throw new IllegalArgumentException("Minimum value is 0"); - } - this.maxWaitTime = maxWaitTime; - return this; - } - - /** - * Sets the connection timeout. - * - * @param connectTimeout the connection timeout (in milliseconds) - * @return {@code this} - * @see com.mongodb.MongoClientOptions#getConnectTimeout() - */ - public Builder connectTimeout(final int connectTimeout) { - if (connectTimeout < 0) { - throw new IllegalArgumentException("Minimum value is 0"); - } - this.connectTimeout = connectTimeout; - return this; - } - - /** - * Sets the socket timeout. - * - * @param socketTimeout the socket timeout (in milliseconds) - * @return {@code this} - * @see com.mongodb.MongoClientOptions#getSocketTimeout() - */ - public Builder socketTimeout(final int socketTimeout) { - if (socketTimeout < 0) { - throw new IllegalArgumentException("Minimum value is 0"); - } - this.socketTimeout = socketTimeout; - return this; - } - - /** - * Sets whether socket keep alive is enabled. - * - * @param socketKeepAlive keep alive - * @return {@code this} - * @see com.mongodb.MongoClientOptions#isSocketKeepAlive() - */ - public Builder socketKeepAlive(final boolean socketKeepAlive) { - this.socketKeepAlive = socketKeepAlive; - return this; - } - - /** - * Sets whether auto connect retry is enabled. - * - * @param autoConnectRetry auto connect retry - * @return {@code this} - * @see MongoClientOptions#isAutoConnectRetry() - */ - public Builder autoConnectRetry(final boolean autoConnectRetry) { - this.autoConnectRetry = autoConnectRetry; - return this; - } - - /** - * Sets the maximum auto connect retry time. - * - * @param maxAutoConnectRetryTime the maximum auto connect retry time - * @return {@code this} - * @see MongoClientOptions#getMaxAutoConnectRetryTime() - */ - public Builder maxAutoConnectRetryTime(final long maxAutoConnectRetryTime) { - if (maxAutoConnectRetryTime < 0) { - throw new IllegalArgumentException("Minimum value is 0"); - } - this.maxAutoConnectRetryTime = maxAutoConnectRetryTime; - return this; - } - - /** - * Sets the read preference. - * - * @param readPreference read preference - * @return {@code this} - * @see MongoClientOptions#getReadPreference() - */ - public Builder readPreference(final ReadPreference readPreference) { - if (readPreference == null) { - throw new IllegalArgumentException("null is not a legal value"); - } - this.readPreference = readPreference; - return this; - } - - /** - * Sets the decoder factory. - * - * @param dbDecoderFactory the decoder factory - * @return {@code this} - * @see MongoClientOptions#getDbDecoderFactory() - */ - public Builder dbDecoderFactory(final DBDecoderFactory dbDecoderFactory) { - if (dbDecoderFactory == null) { - throw new IllegalArgumentException("null is not a legal value"); - } - this.dbDecoderFactory = dbDecoderFactory; - return this; - } - - /** - * Sets the encoder factory. - * - * @param dbEncoderFactory the encoder factory - * @return {@code this} - * @see MongoClientOptions#getDbEncoderFactory() - */ - public Builder dbEncoderFactory(final DBEncoderFactory dbEncoderFactory) { - if (dbEncoderFactory == null) { - throw new IllegalArgumentException("null is not a legal value"); - } - this.dbEncoderFactory = dbEncoderFactory; - return this; - } - - /** - * Sets the write concern. - * - * @param writeConcern the write concern - * @return {@code this} - * @see MongoClientOptions#getWriteConcern() - */ - public Builder writeConcern(final WriteConcern writeConcern) { - if (writeConcern == null) { - throw new IllegalArgumentException("null is not a legal value"); - } - this.writeConcern = writeConcern; - return this; - } - - /** - * Sets the socket factory. - * - * @param socketFactory the socket factory - * @return {@code this} - * @see MongoClientOptions#getSocketFactory() - */ - public Builder socketFactory(final SocketFactory socketFactory) { - if (socketFactory == null) { - throw new IllegalArgumentException("null is not a legal value"); - } - this.socketFactory = socketFactory; - return this; - } - - /** - * Sets whether cursor finalizers are enabled. - * - * @param cursorFinalizerEnabled whether cursor finalizers are enabled. - * @return {@code this} - * @see MongoClientOptions#isCursorFinalizerEnabled() - */ - public Builder cursorFinalizerEnabled(final boolean cursorFinalizerEnabled) { - this.cursorFinalizerEnabled = cursorFinalizerEnabled; - return this; - } - - /** - * Sets whether JMX beans registered by the driver should always be MBeans, regardless of whether the VM is - * Java 6 or greater. If false, the driver will use MXBeans if the VM is Java 6 or greater, and use MBeans if - * the VM is Java 5. - * - * @param alwaysUseMBeans true if driver should always use MBeans, regardless of VM version - * @return this - * @see MongoClientOptions#isAlwaysUseMBeans() - */ - public Builder alwaysUseMBeans(final boolean alwaysUseMBeans) { - this.alwaysUseMBeans = alwaysUseMBeans; - return this; - } - - /** - * Sets defaults to be what they are in {@code MongoOptions}. - * - * @return {@code this} - * @see MongoOptions - */ - public Builder legacyDefaults() { - connectionsPerHost = 10; - writeConcern = WriteConcern.NORMAL; - return this; - } - - /** - * Build an instance of MongoClientOptions. - * - * @return the options from this builder - */ - public MongoClientOptions build() { - return new MongoClientOptions(this); - } - } - - /** - * Create a new Builder instance. This is a convenience method, equivalent to {@code new MongoClientOptions.Builder()}. - * - * @return a new instance of a Builder - */ - public static Builder builder() { - return new Builder(); - } - - /** - * Gets the description for this MongoClient, which is used in various places like logging and JMX. - *

    - * Default is null. - * - * @return the description - */ - public String getDescription() { - return description; - } - - /** - * The maximum number of connections allowed per host for this MongoClient instance. - * Those connections will be kept in a pool when idle. - * Once the pool is exhausted, any operation requiring a connection will block waiting for an available connection. - *

    - * Default is 100. - * - * @return the maximum size of the connection pool per host - * @see MongoClientOptions#getThreadsAllowedToBlockForConnectionMultiplier() - */ - public int getConnectionsPerHost() { - return connectionsPerHost; - } - - /** - * this multiplier, multiplied with the connectionsPerHost setting, gives the maximum number of threads that - * may be waiting for a connection to become available from the pool. All further threads will get an exception right - * away. For example if connectionsPerHost is 10 and threadsAllowedToBlockForConnectionMultiplier is 5, then up to 50 - * threads can wait for a connection. - *

    - * Default is 5. - * - * @return the multiplier - */ - public int getThreadsAllowedToBlockForConnectionMultiplier() { - return threadsAllowedToBlockForConnectionMultiplier; - } - - /** - * The maximum wait time in milliseconds that a thread may wait for a connection to become available. - *

    - * Default is 120,000. A value of 0 means that it will not wait. A negative value means to wait indefinitely. - * - * @return the maximum wait time. - */ - public int getMaxWaitTime() { - return maxWaitTime; - } - - /** - * The connection timeout in milliseconds. A value of 0 means no timeout. - * It is used solely when establishing a new connection {@link java.net.Socket#connect(java.net.SocketAddress, int) } - *

    - * Default is 10,000. - * - * @return the socket connect timeout - */ - public int getConnectTimeout() { - return connectTimeout; - } - - /** - * The socket timeout in milliseconds. - * It is used for I/O socket read and write operations {@link java.net.Socket#setSoTimeout(int)} - *

    - * Default is 0 and means no timeout. - * - * @return the socket timeout - */ - public int getSocketTimeout() { - return socketTimeout; - } - - /** - * This flag controls the socket keep alive feature that keeps a connection alive through firewalls {@link java.net.Socket#setKeepAlive(boolean)} - *

    - * * Default is false. - * - * @return whether keep-alive is enabled on each socket - */ - public boolean isSocketKeepAlive() { - return socketKeepAlive; - } - - /** - * If true, the driver will keep trying to connect to the same server in case that the socket cannot be established. - * There is maximum amount of time to keep retrying, which is 15s by default. - * This can be useful to avoid some exceptions being thrown when a server is down temporarily by blocking the operations. - * It also can be useful to smooth the transition to a new master (so that a new master is elected within the retry time). - * Note that when using this flag: - * - for a replica set, the driver will trying to connect to the old master for that time, instead of failing over to the new one right away - * - this does not prevent exception from being thrown in read/write operations on the socket, which must be handled by application - *

    - * Even if this flag is false, the driver already has mechanisms to automatically recreate broken connections and retry the read operations. - * Default is false. - * - * @return whether socket connect is retried - */ - public boolean isAutoConnectRetry() { - return autoConnectRetry; - } - - /** - * The maximum amount of time in MS to spend retrying to open connection to the same server. - * Default is 0, which means to use the default 15s if autoConnectRetry is on. - * - * @return the maximum socket connect retry time. - */ - public long getMaxAutoConnectRetryTime() { - return maxAutoConnectRetryTime; - } - - /** - * The read preference to use for queries, map-reduce, aggregation, and count. - *

    - * Default is {@code ReadPreference.primary()}. - * - * @return the read preference - * @see com.mongodb.ReadPreference#primary() - */ - public ReadPreference getReadPreference() { - return readPreference; - } - - /** - * Override the decoder factory. Default is for the standard Mongo Java driver configuration. - * - * @return the decoder factory - */ - public DBDecoderFactory getDbDecoderFactory() { - return dbDecoderFactory; - } - - /** - * Override the encoder factory. Default is for the standard Mongo Java driver configuration. - * - * @return the encoder factory - */ - public DBEncoderFactory getDbEncoderFactory() { - return dbEncoderFactory; - } - - /** - * The write concern to use. - *

    - * Default is {@code WriteConcern.ACKNOWLEDGED}. - * - * @return the write concern - * @see WriteConcern#ACKNOWLEDGED - */ - public WriteConcern getWriteConcern() { - return writeConcern; - } - - /** - * The socket factory for creating sockets to the mongo server. - *

    - * Default is SocketFactory.getDefault() - * - * @return the socket factory - */ - public SocketFactory getSocketFactory() { - return socketFactory; - } - - /** - * Gets whether there is a a finalize method created that cleans up instances of DBCursor that the client - * does not close. If you are careful to always call the close method of DBCursor, then this can safely be set to false. - *

    - * Default is true. - * - * @return whether finalizers are enabled on cursors - * @see DBCursor - * @see com.mongodb.DBCursor#close() - */ - public boolean isCursorFinalizerEnabled() { - return cursorFinalizerEnabled; - } - - /** - * Gets whether JMX beans registered by the driver should always be MBeans, regardless of whether the VM is - * Java 6 or greater. If false, the driver will use MXBeans if the VM is Java 6 or greater, and use MBeans if - * the VM is Java 5. - *

    - * Default is false. - *

    - */ - public boolean isAlwaysUseMBeans() { - return alwaysUseMBeans; - } - - @Override - public boolean equals(final Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - - final MongoClientOptions that = (MongoClientOptions) o; - - if (alwaysUseMBeans != that.alwaysUseMBeans) return false; - if (autoConnectRetry != that.autoConnectRetry) return false; - if (connectTimeout != that.connectTimeout) return false; - if (connectionsPerHost != that.connectionsPerHost) return false; - if (cursorFinalizerEnabled != that.cursorFinalizerEnabled) return false; - if (maxAutoConnectRetryTime != that.maxAutoConnectRetryTime) return false; - if (maxWaitTime != that.maxWaitTime) return false; - if (socketKeepAlive != that.socketKeepAlive) return false; - if (socketTimeout != that.socketTimeout) return false; - if (threadsAllowedToBlockForConnectionMultiplier != that.threadsAllowedToBlockForConnectionMultiplier) - return false; - if (!dbDecoderFactory.equals(that.dbDecoderFactory)) return false; - if (!dbEncoderFactory.equals(that.dbEncoderFactory)) return false; - if (description != null ? !description.equals(that.description) : that.description != null) return false; - if (!readPreference.equals(that.readPreference)) return false; - // Compare SocketFactory Class, since some equivalent SocketFactory instances are not equal to each other - if (!socketFactory.getClass().equals(that.socketFactory.getClass())) return false; - if (!writeConcern.equals(that.writeConcern)) return false; - - return true; - } - - @Override - public int hashCode() { - int result = description != null ? description.hashCode() : 0; - result = 31 * result + connectionsPerHost; - result = 31 * result + threadsAllowedToBlockForConnectionMultiplier; - result = 31 * result + maxWaitTime; - result = 31 * result + connectTimeout; - result = 31 * result + socketTimeout; - result = 31 * result + (socketKeepAlive ? 1 : 0); - result = 31 * result + (autoConnectRetry ? 1 : 0); - result = 31 * result + (int) (maxAutoConnectRetryTime ^ (maxAutoConnectRetryTime >>> 32)); - result = 31 * result + readPreference.hashCode(); - result = 31 * result + dbDecoderFactory.hashCode(); - result = 31 * result + dbEncoderFactory.hashCode(); - result = 31 * result + writeConcern.hashCode(); - result = 31 * result + socketFactory.hashCode(); - result = 31 * result + (cursorFinalizerEnabled ? 1 : 0); - result = 31 * result + (alwaysUseMBeans ? 1 : 0); - return result; - } - - private MongoClientOptions(final Builder builder) { - description = builder.description; - connectionsPerHost = builder.connectionsPerHost; - threadsAllowedToBlockForConnectionMultiplier = builder.threadsAllowedToBlockForConnectionMultiplier; - maxWaitTime = builder.maxWaitTime; - connectTimeout = builder.connectTimeout; - socketTimeout = builder.socketTimeout; - autoConnectRetry = builder.autoConnectRetry; - socketKeepAlive = builder.socketKeepAlive; - maxAutoConnectRetryTime = builder.maxAutoConnectRetryTime; - readPreference = builder.readPreference; - dbDecoderFactory = builder.dbDecoderFactory; - dbEncoderFactory = builder.dbEncoderFactory; - writeConcern = builder.writeConcern; - socketFactory = builder.socketFactory; - cursorFinalizerEnabled = builder.cursorFinalizerEnabled; - alwaysUseMBeans = builder.alwaysUseMBeans; - } - - - private final String description; - private final int connectionsPerHost; - private final int threadsAllowedToBlockForConnectionMultiplier; - private final int maxWaitTime; - private final int connectTimeout; - private final int socketTimeout; - private final boolean socketKeepAlive; - private final boolean autoConnectRetry; - private final long maxAutoConnectRetryTime; - private final ReadPreference readPreference; - private final DBDecoderFactory dbDecoderFactory; - private final DBEncoderFactory dbEncoderFactory; - private final WriteConcern writeConcern; - private final SocketFactory socketFactory; - private final boolean cursorFinalizerEnabled; - private final boolean alwaysUseMBeans; -} diff --git a/src/main/com/mongodb/MongoClientURI.java b/src/main/com/mongodb/MongoClientURI.java deleted file mode 100644 index 89f9e6a5cad..00000000000 --- a/src/main/com/mongodb/MongoClientURI.java +++ /dev/null @@ -1,612 +0,0 @@ -/** - * Copyright (c) 2008 - 2012 10gen, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.mongodb; - -import javax.net.ssl.SSLSocketFactory; -import java.io.UnsupportedEncodingException; -import java.net.URLDecoder; -import java.util.ArrayList; -import java.util.Collections; -import java.util.HashMap; -import java.util.HashSet; -import java.util.LinkedList; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.logging.Logger; - - -/** - * Represents a URI - * which can be used to create a MongoClient instance. The URI describes the hosts to - * be used and options. - *

    The format of the URI is: - *

    - *   mongodb://[username:password@]host1[:port1][,host2[:port2],...[,hostN[:portN]]][/[database[.collection]][?options]]
    - * 
    - *
      - *
    • {@code mongodb://} is a required prefix to identify that this is a string in the standard connection format.
    • - *
    • {@code username:password@} are optional. If given, the driver will attempt to login to a database after - * connecting to a database server.
    • - *
    • {@code host1} is the only required part of the URI. It identifies a server address to connect to.
    • - *
    • {@code :portX} is optional and defaults to :27017 if not provided.
    • - *
    • {@code /database} is the name of the database to login to and thus is only relevant if the - * {@code username:password@} syntax is used. If not specified the "admin" database will be used by default.
    • - *
    • {@code ?options} are connection options. Note that if {@code database} is absent there is still a {@code /} - * required between the last host and the {@code ?} introducing the options. Options are name=value pairs and the pairs - * are separated by "&". For backwards compatibility, ";" is accepted as a separator in addition to "&", - * but should be considered as deprecated.
    • - *
    - *

    - * The Java driver supports the following options (case insensitive): - *

    - * Replica set configuration: - *

    - *
      - *
    • {@code replicaSet=name}: Implies that the hosts given are a seed list, and the driver will attempt to find - * all members of the set.
    • - *
    - *

    Connection Configuration:

    - *
      - *
    • {@code ssl=true|false}: Whether to connect using SSL.
    • - *
    • {@code connectTimeoutMS=ms}: How long a connection can take to be opened before timing out.
    • - *
    • {@code socketTimeoutMS=ms}: How long a send or receive on a socket can take before timing out.
    • - *
    - *

    Connection pool configuration:

    - *
      - *
    • {@code maxPoolSize=n}: The maximum number of connections in the connection pool.
    • - *
    • {@code waitQueueMultiple=n} : this multiplier, multiplied with the maxPoolSize setting, gives the maximum number of - * threads that may be waiting for a connection to become available from the pool. All further threads will get an - * exception right away.
    • - *
    • {@code waitQueueTimeoutMS=ms}: The maximum wait time in milliseconds that a thread may wait for a connection to - * become available.
    • - *
    - *

    Write concern configuration:

    - *
      - *
    • {@code safe=true|false} - *
        - *
      • {@code true}: the driver sends a getLastError command after every update to ensure that the update succeeded - * (see also {@code w} and {@code wtimeoutMS}).
      • - *
      • {@code false}: the driver does not send a getLastError command after every update.
      • - *
      - *
    • - *
    • {@code w=wValue} - *
        - *
      • The driver adds { w : wValue } to the getLastError command. Implies {@code safe=true}.
      • - *
      • wValue is typically a number, but can be any string in order to allow for specifications like - * {@code "majority"}
      • - *
      - *
    • - *
    • {@code wtimeoutMS=ms} - *
        - *
      • The driver adds { wtimeout : ms } to the getlasterror command. Implies {@code safe=true}.
      • - *
      • Used in combination with {@code w}
      • - *
      - *
    • - *
    - *

    Read preference configuration:

    - *
      - *
    • {@code slaveOk=true|false}: Whether a driver connected to a replica set will send reads to slaves/secondaries.
    • - *
    • {@code readPreference=enum}: The read preference for this connection. If set, it overrides any slaveOk value. - *
        - *
      • Enumerated values: - *
          - *
        • {@code primary}
        • - *
        • {@code primaryPreferred}
        • - *
        • {@code secondary}
        • - *
        • {@code secondaryPreferred}
        • - *
        • {@code nearest}
        • - *
        - *
      • - *
      - *
    • - *
    • {@code readPreferenceTags=string}. A representation of a tag set as a comma-separated list of colon-separated - * key-value pairs, e.g. {@code "dc:ny,rack:1}". Spaces are stripped from beginning and end of all keys and values. - * To specify a list of tag sets, using multiple readPreferenceTags, - * e.g. {@code readPreferenceTags=dc:ny,rack:1;readPreferenceTags=dc:ny;readPreferenceTags=} - *
        - *
      • Note the empty value for the last one, which means match any secondary as a last resort.
      • - *
      • Order matters when using multiple readPreferenceTags.
      • - *
      - *
    • - *
    - *

    Authentication configuration:

    - *
      - *
    • {@code authMechanism=MONGO-CR|GSSAPI}: The authentication mechanism to use if a credential was supplied. - * The default is MONGODB-CR, which is the native MongoDB Challenge Response mechanism. - *
    • - *
    • {@code authSource=string}: The source of the authentication credentials. This is typically the database that - * the credentials have been created. The value defaults to the database specified in the path portion of the URI. - * If the database is specified in neither place, the default value is "admin". For GSSAPI, it's not necessary to specify - * a source. - *
    • - *
        - *

        - * Note: This class is a replacement for {@code MongoURI}, to be used with {@code MongoClient}. The main difference - * in behavior is that the default write concern is {@code WriteConcern.ACKNOWLEDGED}. - *

        - * - * @see MongoClientOptions for the default values for all options - * @since 2.10.0 - */ -public class MongoClientURI { - - private static final String PREFIX = "mongodb://"; - private static final String UTF_8 = "UTF-8"; - - /** - * Creates a MongoURI from the given string. - * - * @param uri the URI - * @dochub connections - */ - public MongoClientURI(final String uri) { - this(uri, new MongoClientOptions.Builder()); - } - - /** - * Creates a MongoURI from the given URI string, and MongoClientOptions.Builder. The builder can be configured - * with default options, which may be overridden by options specified in the URI string. - * - * @param uri the URI - * @param builder a Builder - * @see com.mongodb.MongoClientURI#getOptions() - * @since 2.11.0 - */ - public MongoClientURI(String uri, MongoClientOptions.Builder builder) { - try { - this.uri = uri; - if (!uri.startsWith(PREFIX)) - throw new IllegalArgumentException("uri needs to start with " + PREFIX); - - uri = uri.substring(PREFIX.length()); - - String serverPart; - String nsPart; - String optionsPart; - String userName = null; - char[] password = null; - - { - int idx = uri.lastIndexOf("/"); - if (idx < 0) { - if (uri.contains("?")) { - throw new IllegalArgumentException("URI contains options without trailing slash"); - } - serverPart = uri; - nsPart = null; - optionsPart = ""; - } else { - serverPart = uri.substring(0, idx); - nsPart = uri.substring(idx + 1); - - idx = nsPart.indexOf("?"); - if (idx >= 0) { - optionsPart = nsPart.substring(idx + 1); - nsPart = nsPart.substring(0, idx); - } else { - optionsPart = ""; - } - - } - } - - { // userName,password,hosts - List all = new LinkedList(); - - int idx = serverPart.indexOf("@"); - - if (idx > 0) { - String authPart = serverPart.substring(0, idx); - serverPart = serverPart.substring(idx + 1); - - idx = authPart.indexOf(":"); - if (idx == -1) { - userName = URLDecoder.decode(authPart, UTF_8); - } else { - userName = URLDecoder.decode(authPart.substring(0, idx), UTF_8); - password = URLDecoder.decode(authPart.substring(idx + 1), UTF_8).toCharArray(); - } - } - - Collections.addAll(all, serverPart.split(",")); - - hosts = Collections.unmodifiableList(all); - } - - if (nsPart != null && nsPart.length() != 0) { // database,_collection - int idx = nsPart.indexOf("."); - if (idx < 0) { - database = nsPart; - collection = null; - } else { - database = nsPart.substring(0, idx); - collection = nsPart.substring(idx + 1); - } - } else { - database = null; - collection = null; - } - - Map> optionsMap = parseOptions(optionsPart); - options = createOptions(optionsMap, builder); - credentials = createCredentials(optionsMap, userName, password, database); - warnOnUnsupportedOptions(optionsMap); - } catch (UnsupportedEncodingException e) { - throw new MongoInternalException("This should not happen", e); - } - } - - static Set generalOptionsKeys = new HashSet(); - static Set authKeys = new HashSet(); - static Set readPreferenceKeys = new HashSet(); - static Set writeConcernKeys = new HashSet(); - static Set allKeys = new HashSet(); - - static { - generalOptionsKeys.add("maxpoolsize"); - generalOptionsKeys.add("waitqueuemultiple"); - generalOptionsKeys.add("waitqueuetimeoutms"); - generalOptionsKeys.add("connecttimeoutms"); - generalOptionsKeys.add("sockettimeoutms"); - generalOptionsKeys.add("sockettimeoutms"); - generalOptionsKeys.add("autoconnectretry"); - generalOptionsKeys.add("ssl"); - - readPreferenceKeys.add("slaveok"); - readPreferenceKeys.add("readpreference"); - readPreferenceKeys.add("readpreferencetags"); - - writeConcernKeys.add("safe"); - writeConcernKeys.add("w"); - writeConcernKeys.add("wtimeout"); - writeConcernKeys.add("fsync"); - writeConcernKeys.add("j"); - - authKeys.add("authmechanism"); - authKeys.add("authsource"); - - allKeys.addAll(generalOptionsKeys); - allKeys.addAll(authKeys); - allKeys.addAll(readPreferenceKeys); - allKeys.addAll(writeConcernKeys); - } - - private void warnOnUnsupportedOptions(Map> optionsMap) { - for (String key : optionsMap.keySet()) { - if (!allKeys.contains(key)) { - LOGGER.warning("Unknown or Unsupported Option '" + key + "'"); - } - } - } - - private MongoClientOptions createOptions(Map> optionsMap, MongoClientOptions.Builder builder) { - for (String key : generalOptionsKeys) { - String value = getLastValue(optionsMap, key); - if (value == null) { - continue; - } - - if (key.equals("maxpoolsize")) { - builder.connectionsPerHost(Integer.parseInt(value)); - } else if (key.equals("waitqueuemultiple")) { - builder.threadsAllowedToBlockForConnectionMultiplier(Integer.parseInt(value)); - } else if (key.equals("waitqueuetimeoutms")) { - builder.maxWaitTime(Integer.parseInt(value)); - } else if (key.equals("connecttimeoutms")) { - builder.connectTimeout(Integer.parseInt(value)); - } else if (key.equals("sockettimeoutms")) { - builder.socketTimeout(Integer.parseInt(value)); - } else if (key.equals("autoconnectretry")) { - builder.autoConnectRetry(_parseBoolean(value)); - } else if (key.equals("ssl")) { - if (_parseBoolean(value)) { - builder.socketFactory(SSLSocketFactory.getDefault()); - } - } - } - - WriteConcern writeConcern = createWriteConcern(optionsMap); - ReadPreference readPreference = createReadPreference(optionsMap); - - if (writeConcern != null) { - builder.writeConcern(writeConcern); - } - if (readPreference != null) { - builder.readPreference(readPreference); - } - - return builder.build(); - } - - private WriteConcern createWriteConcern(final Map> optionsMap) { - Boolean safe = null; - String w = null; - int wTimeout = 0; - boolean fsync = false; - boolean journal = false; - - for (String key : writeConcernKeys) { - String value = getLastValue(optionsMap, key); - if (value == null) { - continue; - } - - if (key.equals("safe")) { - safe = _parseBoolean(value); - } else if (key.equals("w")) { - w = value; - } else if (key.equals("wtimeout")) { - wTimeout = Integer.parseInt(value); - } else if (key.equals("fsync")) { - fsync = _parseBoolean(value); - } else if (key.equals("j")) { - journal = _parseBoolean(value); - } - } - return buildWriteConcern(safe, w, wTimeout, fsync, journal); - } - - private ReadPreference createReadPreference(final Map> optionsMap) { - Boolean slaveOk = null; - String readPreferenceType = null; - DBObject firstTagSet = null; - List remainingTagSets = new ArrayList(); - - for (String key : readPreferenceKeys) { - String value = getLastValue(optionsMap, key); - if (value == null) { - continue; - } - - if (key.equals("slaveok")) { - slaveOk = _parseBoolean(value); - } else if (key.equals("readpreference")) { - readPreferenceType = value; - } else if (key.equals("readpreferencetags")) { - for (String cur : optionsMap.get(key)) { - DBObject tagSet = getTagSet(cur.trim()); - if (firstTagSet == null) { - firstTagSet = tagSet; - } else { - remainingTagSets.add(tagSet); - } - } - } - } - return buildReadPreference(readPreferenceType, firstTagSet, remainingTagSets, slaveOk); - } - - private MongoCredential createCredentials(Map> optionsMap, final String userName, - final char[] password, String database) { - if (userName == null) { - return null; - } - - if (database == null) { - database = "admin"; - } - - String mechanism = MongoCredential.MONGODB_CR_MECHANISM; - String authSource = database; - - for (String key : authKeys) { - String value = getLastValue(optionsMap, key); - - if (value == null) { - continue; - } - - if (key.equals("authmechanism")) { - mechanism = value; - } else if (key.equals("authsource")) { - authSource = value; - } - } - - if (mechanism.equals(MongoCredential.GSSAPI_MECHANISM)) { - return MongoCredential.createGSSAPICredential(userName); - } - else if (mechanism.equals(MongoCredential.MONGODB_CR_MECHANISM)) { - return MongoCredential.createMongoCRCredential(userName, authSource, password); - } - else { - throw new IllegalArgumentException("Unsupported authMechanism: " + mechanism); - } - } - - private String getLastValue(final Map> optionsMap, final String key) { - List valueList = optionsMap.get(key); - if (valueList == null) { - return null; - } - return valueList.get(valueList.size() - 1); - } - - private Map> parseOptions(String optionsPart) { - Map> optionsMap = new HashMap>(); - - for (String _part : optionsPart.split("&|;")) { - int idx = _part.indexOf("="); - if (idx >= 0) { - String key = _part.substring(0, idx).toLowerCase(); - String value = _part.substring(idx + 1); - List valueList = optionsMap.get(key); - if (valueList == null) { - valueList = new ArrayList(1); - } - valueList.add(value); - optionsMap.put(key, valueList); - } - } - - return optionsMap; - } - - private ReadPreference buildReadPreference(final String readPreferenceType, final DBObject firstTagSet, - final List remainingTagSets, final Boolean slaveOk) { - if (readPreferenceType != null) { - if (firstTagSet == null) { - return ReadPreference.valueOf(readPreferenceType); - } else { - return ReadPreference.valueOf(readPreferenceType, firstTagSet, - remainingTagSets.toArray(new DBObject[remainingTagSets.size()])); - } - } else if (slaveOk != null) { - if (slaveOk.equals(Boolean.TRUE)) { - return ReadPreference.secondaryPreferred(); - } - } - return null; - } - - private WriteConcern buildWriteConcern(final Boolean safe, final String w, - final int wTimeout, final boolean fsync, final boolean journal) { - if (w != null || wTimeout != 0 || fsync || journal) { - if (w == null) { - return new WriteConcern(1, wTimeout, fsync, journal); - } else { - try { - return new WriteConcern(Integer.parseInt(w), wTimeout, fsync, journal); - } catch (NumberFormatException e) { - return new WriteConcern(w, wTimeout, fsync, journal); - } - } - } else if (safe != null) { - if (safe) { - return WriteConcern.ACKNOWLEDGED; - } else { - return WriteConcern.UNACKNOWLEDGED; - } - } - return null; - } - - private DBObject getTagSet(String tagSetString) { - DBObject tagSet = new BasicDBObject(); - if (tagSetString.length() > 0) { - for (String tag : tagSetString.split(",")) { - String[] tagKeyValuePair = tag.split(":"); - if (tagKeyValuePair.length != 2) { - throw new IllegalArgumentException("Bad read preference tags: " + tagSetString); - } - tagSet.put(tagKeyValuePair[0].trim(), tagKeyValuePair[1].trim()); - } - } - return tagSet; - } - - boolean _parseBoolean(String _in) { - String in = _in.trim(); - return in != null && in.length() > 0 && (in.equals("1") || in.toLowerCase().equals("true") || in.toLowerCase() - .equals("yes")); - } - - // --------------------------------- - - /** - * Gets the username - * - * @return the username - */ - public String getUsername() { - return credentials != null ? credentials.getUserName() : null; - } - - /** - * Gets the password - * - * @return the password - */ - public char[] getPassword() { - return credentials != null ? credentials.getPassword() : null; - } - - /** - * Gets the list of hosts - * - * @return the host list - */ - public List getHosts() { - return hosts; - } - - /** - * Gets the database name - * - * @return the database name - */ - public String getDatabase() { - return database; - } - - - /** - * Gets the collection name - * - * @return the collection name - */ - public String getCollection() { - return collection; - } - - /** - * Get the unparsed URI. - * - * @return the URI - */ - public String getURI() { - return uri; - } - - - /** - * Gets the credentials. - * - * @return the credentials - */ - public MongoCredential getCredentials() { - return credentials; - } - - /** - * Gets the options - * - * @return the MongoClientOptions based on this URI. - */ - public MongoClientOptions getOptions() { - return options; - } - - // --------------------------------- - - private final MongoClientOptions options; - private final MongoCredential credentials; - private final List hosts; - private final String database; - private final String collection; - private final String uri; - - - static final Logger LOGGER = Logger.getLogger("com.mongodb.MongoURI"); - - @Override - public String toString() { - return uri; - } -} diff --git a/src/main/com/mongodb/MongoConnectionPool.java b/src/main/com/mongodb/MongoConnectionPool.java deleted file mode 100644 index 44236da2bf5..00000000000 --- a/src/main/com/mongodb/MongoConnectionPool.java +++ /dev/null @@ -1,28 +0,0 @@ -/** - * Copyright (c) 2008 - 2012 10gen, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.mongodb; - -/** - * This class exists only so that, on Java 6 and above, the driver can create instances of an MXBean. - */ -class MongoConnectionPool extends DBPortPool implements MongoConnectionPoolMXBean { - - MongoConnectionPool(ServerAddress addr, MongoOptions options) { - super(addr, options); - } -} diff --git a/src/main/com/mongodb/MongoConnectionPoolMXBean.java b/src/main/com/mongodb/MongoConnectionPoolMXBean.java deleted file mode 100644 index 97cacabda0d..00000000000 --- a/src/main/com/mongodb/MongoConnectionPoolMXBean.java +++ /dev/null @@ -1,66 +0,0 @@ -/** - * Copyright (c) 2008 - 20112 10gen, Inc. - *

        - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

        - * http://www.apache.org/licenses/LICENSE-2.0 - *

        - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.mongodb; - -import com.mongodb.util.ConnectionPoolStatisticsBean; - -/** - * A standard MXBean interface for a Mongo connection pool, for use on Java 6 and above virtual machines. - *

        - * This interface is NOT part of the public API. Be prepared for non-binary compatible changes in minor releases. - * - * @deprecated This class will be removed in 3.x versions of the driver, - * so please remove it from your compile time dependencies. - */ -@Deprecated -public interface MongoConnectionPoolMXBean { - /** - * Gets the name of the pool. - * - * @return the name of the pool - */ - String getName(); - - /** - * Gets the maximum allowed size of the pool, including idle and in-use members. - * - * @return the maximum size - */ - int getMaxSize(); - - - /** - * Gets the host that this connection pool is connecting to. - * - * @return the host - */ - String getHost(); - - /** - * Gets the port that this connection pool is connecting to. - * - * @return the port - */ - int getPort(); - - /** - * Gets the statistics for this connection pool. - * - * @return the connection pool statistics - */ - ConnectionPoolStatisticsBean getStatistics(); -} diff --git a/src/main/com/mongodb/MongoCredential.java b/src/main/com/mongodb/MongoCredential.java deleted file mode 100644 index 1d6303067a6..00000000000 --- a/src/main/com/mongodb/MongoCredential.java +++ /dev/null @@ -1,175 +0,0 @@ -/** - * Copyright (c) 2008 - 2012 10gen, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.mongodb; - -import org.bson.util.annotations.Immutable; - -import java.util.Arrays; - -/** - * Represents credentials to authenticate to a mongo server, as well as the source of the credentials and - * the authentication mechanism to use. - * - * @since 2.11.0 - */ -@Immutable -public final class MongoCredential { - - /** - * The GSSAPI mechanism. See the RFC. - */ - public static final String GSSAPI_MECHANISM = "GSSAPI"; - - /** - * The MongoDB Challenge Response mechanism. - */ - public static final String MONGODB_CR_MECHANISM = "MONGODB-CR"; - - - private final String mechanism; - private final String userName; - private final String source; - private final char[] password; - - /** - * Creates a MongoCredential instance for the MongoDB Challenge Response protocol. - * - * @param userName the user name - * @param database the database where the user is defined - * @param password the user's password - * @return the credential - */ - public static MongoCredential createMongoCRCredential(String userName, String database, char[] password) { - return new MongoCredential(MONGODB_CR_MECHANISM, userName, database, password); - } - - /** - * Creates a MongoCredential instance for the GSSAPI SASL mechanism. - * - * @param userName the user name - * @return the credential - */ - public static MongoCredential createGSSAPICredential(String userName) { - return new MongoCredential(GSSAPI_MECHANISM, userName, "$external", null); - } - - /** - * - * Constructs a new instance using the given mechanism, userName, source, and password - * - * @param mechanism the authentication mechanism - * @param userName the user name - * @param source the source of the user name, typically a database name - * @param password the password - */ - MongoCredential(final String mechanism, final String userName, final String source, final char[] password) { - if (mechanism == null) { - throw new IllegalArgumentException("mechanism can not be null"); - } - - if (userName == null) { - throw new IllegalArgumentException("username can not be null"); - } - - if (mechanism.equals(MONGODB_CR_MECHANISM) && password == null) { - throw new IllegalArgumentException("Password can not be null for " + MONGODB_CR_MECHANISM + " mechanism"); - } - - if (mechanism.equals(GSSAPI_MECHANISM) && password != null) { - throw new IllegalArgumentException("Password must be null for the " + GSSAPI_MECHANISM + " mechanism"); - } - - this.mechanism = mechanism; - this.userName = userName; - this.source = source; - this.password = password != null ? password.clone() : null; - } - - /** - * Gets the mechanism - * - * @return the mechanism. - */ - public String getMechanism() { - return mechanism; - } - - /** - * Gets the user name - * - * @return the user name. Can never be null. - */ - public String getUserName() { - return userName; - } - - /** - * Gets the source of the user name, typically the name of the database where the user is defined. - * - * @return the user name. Can never be null. - */ - public String getSource() { - return source; - } - - /** - * Gets the password. - * - * @return the password. Can be null for some mechanisms. - */ - public char[] getPassword() { - if (password == null) { - return null; - } - return password.clone(); - } - - @Override - public boolean equals(final Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - - final MongoCredential that = (MongoCredential) o; - - if (!mechanism.equals(that.mechanism)) return false; - if (!Arrays.equals(password, that.password)) return false; - if (!source.equals(that.source)) return false; - if (!userName.equals(that.userName)) return false; - - return true; - } - - @Override - public int hashCode() { - int result = mechanism.hashCode(); - result = 31 * result + userName.hashCode(); - result = 31 * result + source.hashCode(); - result = 31 * result + (password != null ? Arrays.hashCode(password) : 0); - return result; - } - - @Override - public String toString() { - return "MongoCredential{" + - "mechanism='" + mechanism + '\'' + - ", userName='" + userName + '\'' + - ", source='" + source + '\'' + - ", password=" + - '}'; - } -} diff --git a/src/main/com/mongodb/MongoCredentialsStore.java b/src/main/com/mongodb/MongoCredentialsStore.java deleted file mode 100644 index e964bf1816d..00000000000 --- a/src/main/com/mongodb/MongoCredentialsStore.java +++ /dev/null @@ -1,147 +0,0 @@ -/* - * Copyright (c) 2008 - 2013 10gen, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.mongodb; - -import org.bson.util.annotations.ThreadSafe; - -import java.util.ArrayList; -import java.util.Collections; -import java.util.HashMap; -import java.util.HashSet; -import java.util.List; -import java.util.Map; -import java.util.Set; - -/** - * An effectively immutable store of credentials to mongo servers. It enforces the invariant that there can be at most - * one credentials for each database. It allows at most one credentials with a null database. - * - * There is still a package-protected method to add a new credentials to the store, but it's only there - * to support DB.authenticate, which allows you to add new credentials at any point during the life of a MongoClient. - * - * @since 2.11.0 - */ -@ThreadSafe -class MongoCredentialsStore { - private final Map credentialsMap = new HashMap(); - private volatile Set allDatabasesWithCredentials = new HashSet(); - - /** - * Creates an empty store - */ - public MongoCredentialsStore() { - } - - /** - * Creates a store with a single credentials. - * - * @param credentials A single credentials, which may be null. - */ - public MongoCredentialsStore(MongoCredential credentials) { - if (credentials == null) { - return; - } - add(credentials); - } - - /** - * Creates a store with the list of credentials. - * - * @param credentialsList The list of credentials - */ - public MongoCredentialsStore(Iterable credentialsList) { - if (credentialsList == null) { - return; - } - for (MongoCredential cur : credentialsList) { - add(cur); - } - } - - /** - * Adds a new credentials. - * - * @param credentials the new credentials - * @throws IllegalArgumentException if there already exist different credentials for the same database - */ - synchronized void add(MongoCredential credentials) { - MongoCredential existingCredentials = credentialsMap.get(credentials.getSource()); - - if (existingCredentials != null) { - if (existingCredentials.equals(credentials)) { - return; - } - throw new IllegalArgumentException("Can't add more than one credentials for the same database"); - } - - credentialsMap.put(credentials.getSource(), credentials); - allDatabasesWithCredentials = new HashSet(allDatabasesWithCredentials); - allDatabasesWithCredentials.add(credentials.getSource()); - } - - /** - * Gets the set of databases for which there are credentials stored. - * - * @return an unmodifiable set of database names. Can contain the null string. - */ - public Set getDatabases() { - return Collections.unmodifiableSet(allDatabasesWithCredentials); - } - - /** - * Gets the stored credentials for the given database. - * - * @param database the database. This can be null, to get the credentials with the null database. - * @return the credentials for the given database. Can be null if not are stored. - */ - public synchronized MongoCredential get(String database) { - return credentialsMap.get(database); - } - - /** - * Gets the MongoCredentials in this map as a List - * @return the list of credentials - */ - public synchronized List asList() { - return new ArrayList(credentialsMap.values()); - } - - @Override - public synchronized boolean equals(final Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - - final MongoCredentialsStore that = (MongoCredentialsStore) o; - - if (!credentialsMap.equals(that.credentialsMap)) return false; - - return true; - } - - @Override - public synchronized int hashCode() { - return credentialsMap.hashCode(); - } - - @Override - public String toString() { - return "{" + - "credentials=" + credentialsMap + - '}'; - } -} diff --git a/src/main/com/mongodb/MongoCursorNotFoundException.java b/src/main/com/mongodb/MongoCursorNotFoundException.java deleted file mode 100644 index 8749584144b..00000000000 --- a/src/main/com/mongodb/MongoCursorNotFoundException.java +++ /dev/null @@ -1,40 +0,0 @@ -package com.mongodb; - -/** - * Subclass of {@link MongoException} representing a cursor-not-found exception. - */ -public class MongoCursorNotFoundException extends MongoException { - - private static final long serialVersionUID = -4415279469780082174L; - - private final long cursorId; - private final ServerAddress serverAddress; - - /** - * @param cursorId cursor - * @param serverAddress server address - */ - MongoCursorNotFoundException(final long cursorId, final ServerAddress serverAddress) { - super(-5, "Cursor " + cursorId + " not found on server " + serverAddress); - this.cursorId = cursorId; - this.serverAddress = serverAddress; - } - - /** - * Get the cursor id that wasn't found. - * - * @return the ID of the cursor - */ - public long getCursorId() { - return cursorId; - } - - /** - * The server address where the cursor is. - * - * @return the ServerAddress representing the server the cursor was on. - */ - public ServerAddress getServerAddress() { - return serverAddress; - } -} diff --git a/src/main/com/mongodb/MongoDuplicateKeyException.java b/src/main/com/mongodb/MongoDuplicateKeyException.java deleted file mode 100644 index 30e068aedd1..00000000000 --- a/src/main/com/mongodb/MongoDuplicateKeyException.java +++ /dev/null @@ -1,34 +0,0 @@ -/* - * Copyright (c) 2008 - 2013 10gen, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.mongodb; - -/** - * Subclass of {@link WriteConcernException} representing a duplicate key exception. - */ -public class MongoDuplicateKeyException extends WriteConcernException { - - private static final long serialVersionUID = -4415279469780082174L; - - /** - * Construct a new instance with the CommandResult from getlasterror command - * - * @param commandResult the command result - */ - MongoDuplicateKeyException(final CommandResult commandResult) { - super(commandResult); - } -} diff --git a/src/main/com/mongodb/MongoException.java b/src/main/com/mongodb/MongoException.java deleted file mode 100644 index 73d3e34f908..00000000000 --- a/src/main/com/mongodb/MongoException.java +++ /dev/null @@ -1,159 +0,0 @@ -// MongoException.java - -/** - * Copyright (C) 2008 10gen Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.mongodb; - -import org.bson.BSONObject; - -import java.io.IOException; - -/** - * A general exception raised in Mongo - * @author antoine - */ -public class MongoException extends RuntimeException { - - private static final long serialVersionUID = -4415279469780082174L; - - /** - * @param msg the message - */ - public MongoException( String msg ){ - super( msg ); - _code = -3; - } - - /** - * - * @param code the error code - * @param msg the message - */ - public MongoException( int code , String msg ){ - super( msg ); - _code = code; - } - - /** - * - * @param msg the message - * @param t the throwable cause - */ - public MongoException( String msg , Throwable t ){ - super( msg , t ); - _code = -4; - } - - /** - * - * @param code the error code - * @param msg the message - * @param t the throwable cause - */ - public MongoException( int code , String msg , Throwable t ){ - super( msg , t ); - _code = code; - } - - /** - * Creates a MongoException from a BSON object representing an error - * @param o - */ - public MongoException( BSONObject o ){ - this( ServerError.getCode( o ) , ServerError.getMsg( o , "UNKNOWN" ) ); - } - - static MongoException parse( BSONObject o ){ - String s = ServerError.getMsg( o , null ); - if ( s == null ) - return null; - return new MongoException( ServerError.getCode( o ) , s ); - } - - /** - * Subclass of MongoException representing a network-related exception. - * - * @deprecated This class will be dropped in 3.x versions. - * Please catch {@link MongoSocketException} instead. - */ - @Deprecated - public static class Network extends MongoSocketException { - - private static final long serialVersionUID = 8364298902504372967L; - - /** - * @param msg the message - * @param ioe the cause - */ - public Network(String msg, IOException ioe) { - super(msg, ioe); - } - - /** - * @param ioe the cause - */ - public Network(IOException ioe) { - super(ioe); - } - } - - /** - * Subclass of WriteConcernException representing a duplicate key error. - * - * @deprecated This class will be dropped in 3.x versions. - * Please catch {@link MongoDuplicateKeyException} instead. - */ - @Deprecated - public static class DuplicateKey extends MongoDuplicateKeyException { - - private static final long serialVersionUID = 6557680785576001838L; - - public DuplicateKey(final CommandResult commandResult) { - super(commandResult); - } - } - - /** - * Subclass of MongoException representing a cursor-not-found exception - * - * @deprecated This class will be dropped in 3.x versions. - * Please catch {@link MongoCursorNotFoundException} instead. - */ - @Deprecated - public static class CursorNotFound extends MongoCursorNotFoundException { - - private static final long serialVersionUID = -3759595395830412426L; - - /** - * @param cursorId cursor - * @param serverAddress server address - */ - public CursorNotFound(long cursorId, ServerAddress serverAddress) { - super(cursorId, serverAddress); - } - } - - /** - * Gets the exception code - * @return - */ - public int getCode(){ - return _code; - } - - final int _code; -} diff --git a/src/main/com/mongodb/MongoInternalException.java b/src/main/com/mongodb/MongoInternalException.java deleted file mode 100644 index cf38dc42bdc..00000000000 --- a/src/main/com/mongodb/MongoInternalException.java +++ /dev/null @@ -1,47 +0,0 @@ -// MongoInternalException.java - -/** - * Copyright (C) 2008 10gen Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.mongodb; - -/** - * An Mongo exception internal to the driver, not carrying any error code - * @author antoine - */ -public class MongoInternalException extends MongoException { - - private static final long serialVersionUID = -4415279469780082174L; - - /** - * - * @param msg the message - */ - public MongoInternalException( String msg ){ - super( msg ); - } - - /** - * - * @param msg the message - * @param t the throwable cause - */ - public MongoInternalException( String msg , Throwable t ){ - super(msg, t); - } - -} - diff --git a/src/main/com/mongodb/MongoInterruptedException.java b/src/main/com/mongodb/MongoInterruptedException.java deleted file mode 100644 index a13c6a73513..00000000000 --- a/src/main/com/mongodb/MongoInterruptedException.java +++ /dev/null @@ -1,36 +0,0 @@ -/** - * Copyright (c) 2008 - 2012 10gen, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.mongodb; - -/** - * A non-checked exception indicating that the driver has been interrupted by a call to Thread.interrupt. - * - * @see Thread#interrupt() - * @see InterruptedException - */ -public class MongoInterruptedException extends MongoException { - private static final long serialVersionUID = -4110417867718417860L; - - public MongoInterruptedException(final InterruptedException e) { - super("A driver operation has been interrupted", e); - } - - public MongoInterruptedException(final String message, final InterruptedException e) { - super(message, e); - } -} diff --git a/src/main/com/mongodb/MongoOptions.java b/src/main/com/mongodb/MongoOptions.java deleted file mode 100644 index 4055f1671c6..00000000000 --- a/src/main/com/mongodb/MongoOptions.java +++ /dev/null @@ -1,720 +0,0 @@ -// MongoOptions.java - -/** - * Copyright (C) 2008 10gen Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.mongodb; - -import javax.net.SocketFactory; - -/** - * Various settings for a Mongo instance. Not thread safe, and superseded by MongoClientOptions. This class may - * be deprecated in a future release. - * - * @see MongoClientOptions - * @see MongoClient - * - * @deprecated Replaced by {@link MongoClientOptions}. - */ -@Deprecated -public class MongoOptions { - - @Deprecated - public MongoOptions(){ - reset(); - } - - /** - * @deprecated Replaced by {@link MongoClientOptions} - */ - @Deprecated - public MongoOptions(final MongoClientOptions options) { - connectionsPerHost = options.getConnectionsPerHost(); - threadsAllowedToBlockForConnectionMultiplier = options.getThreadsAllowedToBlockForConnectionMultiplier(); - maxWaitTime = options.getMaxWaitTime(); - connectTimeout = options.getConnectTimeout(); - socketTimeout = options.getSocketTimeout(); - socketKeepAlive = options.isSocketKeepAlive(); - autoConnectRetry = options.isAutoConnectRetry(); - maxAutoConnectRetryTime = options.getMaxAutoConnectRetryTime(); - readPreference = options.getReadPreference(); - dbDecoderFactory = options.getDbDecoderFactory(); - dbEncoderFactory = options.getDbEncoderFactory(); - socketFactory = options.getSocketFactory(); - description = options.getDescription(); - cursorFinalizerEnabled = options.isCursorFinalizerEnabled(); - writeConcern = options.getWriteConcern(); - slaveOk = false; // default to false, as readPreference field will be responsible - alwaysUseMBeans = options.isAlwaysUseMBeans(); - } - - public void reset(){ - connectionsPerHost = Bytes.CONNECTIONS_PER_HOST; - threadsAllowedToBlockForConnectionMultiplier = 5; - maxWaitTime = 1000 * 60 * 2; - connectTimeout = 1000 * 10; - socketTimeout = 0; - socketKeepAlive = false; - autoConnectRetry = false; - maxAutoConnectRetryTime = 0; - slaveOk = false; - readPreference = null; - writeConcern = null; - safe = false; - w = 0; - wtimeout = 0; - fsync = false; - j = false; - dbDecoderFactory = DefaultDBDecoder.FACTORY; - dbEncoderFactory = DefaultDBEncoder.FACTORY; - socketFactory = SocketFactory.getDefault(); - description = null; - cursorFinalizerEnabled = true; - alwaysUseMBeans = false; - } - - public MongoOptions copy() { - MongoOptions m = new MongoOptions(); - m.connectionsPerHost = connectionsPerHost; - m.threadsAllowedToBlockForConnectionMultiplier = threadsAllowedToBlockForConnectionMultiplier; - m.maxWaitTime = maxWaitTime; - m.connectTimeout = connectTimeout; - m.socketTimeout = socketTimeout; - m.socketKeepAlive = socketKeepAlive; - m.autoConnectRetry = autoConnectRetry; - m.maxAutoConnectRetryTime = maxAutoConnectRetryTime; - m.slaveOk = slaveOk; - m.readPreference = readPreference; - m.writeConcern = writeConcern; - m.safe = safe; - m.w = w; - m.wtimeout = wtimeout; - m.fsync = fsync; - m.j = j; - m.dbDecoderFactory = dbDecoderFactory; - m.dbEncoderFactory = dbEncoderFactory; - m.socketFactory = socketFactory; - m.description = description; - m.cursorFinalizerEnabled = cursorFinalizerEnabled; - m.alwaysUseMBeans = alwaysUseMBeans; - return m; - } - - /** - * Helper method to return the appropriate WriteConcern instance based on the current related options settings. - **/ - public WriteConcern getWriteConcern() { - if (writeConcern != null) { - return writeConcern; - } else if ( w != 0 || wtimeout != 0 || fsync | j) { - return new WriteConcern( w , wtimeout , fsync, j ); - } else if (safe) { - return WriteConcern.SAFE; - } else { - return WriteConcern.NORMAL; - } - } - - @Override - public boolean equals(final Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - - final MongoOptions options = (MongoOptions) o; - - if (autoConnectRetry != options.autoConnectRetry) return false; - if (connectTimeout != options.connectTimeout) return false; - if (connectionsPerHost != options.connectionsPerHost) return false; - if (cursorFinalizerEnabled != options.cursorFinalizerEnabled) return false; - if (fsync != options.fsync) return false; - if (j != options.j) return false; - if (maxAutoConnectRetryTime != options.maxAutoConnectRetryTime) return false; - if (maxWaitTime != options.maxWaitTime) return false; - if (safe != options.safe) return false; - if (slaveOk != options.slaveOk) return false; - if (socketKeepAlive != options.socketKeepAlive) return false; - if (socketTimeout != options.socketTimeout) return false; - if (threadsAllowedToBlockForConnectionMultiplier != options.threadsAllowedToBlockForConnectionMultiplier) - return false; - if (w != options.w) return false; - if (wtimeout != options.wtimeout) return false; - if (dbDecoderFactory != null ? !dbDecoderFactory.equals(options.dbDecoderFactory) : options.dbDecoderFactory != null) - return false; - if (dbEncoderFactory != null ? !dbEncoderFactory.equals(options.dbEncoderFactory) : options.dbEncoderFactory != null) - return false; - if (description != null ? !description.equals(options.description) : options.description != null) return false; - if (readPreference != null ? !readPreference.equals(options.readPreference) : options.readPreference != null) - return false; - if (socketFactory != null ? !socketFactory.equals(options.socketFactory) : options.socketFactory != null) - return false; - if (writeConcern != null ? !writeConcern.equals(options.writeConcern) : options.writeConcern != null) - return false; - - return true; - } - - @Override - public int hashCode() { - int result = description != null ? description.hashCode() : 0; - result = 31 * result + connectionsPerHost; - result = 31 * result + threadsAllowedToBlockForConnectionMultiplier; - result = 31 * result + maxWaitTime; - result = 31 * result + connectTimeout; - result = 31 * result + socketTimeout; - result = 31 * result + (socketKeepAlive ? 1 : 0); - result = 31 * result + (autoConnectRetry ? 1 : 0); - result = 31 * result + (int) (maxAutoConnectRetryTime ^ (maxAutoConnectRetryTime >>> 32)); - result = 31 * result + (slaveOk ? 1 : 0); - result = 31 * result + (readPreference != null ? readPreference.hashCode() : 0); - result = 31 * result + (dbDecoderFactory != null ? dbDecoderFactory.hashCode() : 0); - result = 31 * result + (dbEncoderFactory != null ? dbEncoderFactory.hashCode() : 0); - result = 31 * result + (safe ? 1 : 0); - result = 31 * result + w; - result = 31 * result + wtimeout; - result = 31 * result + (fsync ? 1 : 0); - result = 31 * result + (j ? 1 : 0); - result = 31 * result + (socketFactory != null ? socketFactory.hashCode() : 0); - result = 31 * result + (cursorFinalizerEnabled ? 1 : 0); - result = 31 * result + (writeConcern != null ? writeConcern.hashCode() : 0); - return result; - } - - /** - *

        The description for Mongo instances created with these options. This is used in various places like logging.

        - */ - public String description; - - /** - * The maximum number of connections allowed per host for this Mongo instance. - * Those connections will be kept in a pool when idle. - * Once the pool is exhausted, any operation requiring a connection will block waiting for an available connection. - * Default is 10. - * @see {@linkplain MongoOptions#threadsAllowedToBlockForConnectionMultiplier}

        - */ - public int connectionsPerHost; - - /** - * this multiplier, multiplied with the connectionsPerHost setting, gives the maximum number of threads that - * may be waiting for a connection to become available from the pool. - * All further threads will get an exception right away. - * For example if connectionsPerHost is 10 and threadsAllowedToBlockForConnectionMultiplier is 5, then up to 50 threads can wait for a connection. - * Default is 5. - */ - public int threadsAllowedToBlockForConnectionMultiplier; - - /** - * The maximum wait time in milliseconds that a thread may wait for a connection to become available. - * Default is 120,000. A value of 0 means that it will not wait. A negative value means to wait indefinitely. - */ - public int maxWaitTime; - - /** - * The connection timeout in milliseconds. A value of 0 means no timeout. - * It is used solely when establishing a new connection {@link java.net.Socket#connect(java.net.SocketAddress, int) } - * Default is 10,000. - */ - public int connectTimeout; - - /** - * The socket timeout in milliseconds - * It is used for I/O socket read and write operations {@link java.net.Socket#setSoTimeout(int)} - * Default is 0 and means no timeout. - */ - public int socketTimeout; - - /** - * This flag controls the socket keep alive feature that keeps a connection alive through firewalls {@link java.net.Socket#setKeepAlive(boolean)} - * Default is false. - */ - public boolean socketKeepAlive; - - /** - * If true, the driver will keep trying to connect to the same server in case that the socket cannot be established. - * There is maximum amount of time to keep retrying, which is 15s by default. - * This can be useful to avoid some exceptions being thrown when a server is down temporarily by blocking the operations. - * It also can be useful to smooth the transition to a new master (so that a new master is elected within the retry time). - * Note that when using this flag: - * - for a replica set, the driver will trying to connect to the old master for that time, instead of failing over to the new one right away - * - this does not prevent exception from being thrown in read/write operations on the socket, which must be handled by application - * - * Even if this flag is false, the driver already has mechanisms to automatically recreate broken connections and retry the read operations. - * Default is false. - */ - public boolean autoConnectRetry; - - /** - * The maximum amount of time in MS to spend retrying to open connection to the same server. - * Default is 0, which means to use the default 15s if autoConnectRetry is on. - */ - public long maxAutoConnectRetryTime; - - /** - * This flag specifies if the driver is allowed to read from secondary (slave) servers. - * Specifically in the current implementation, the driver will avoid reading from the primary server and round robin requests to secondaries. - * Driver also factors in the latency to secondaries when choosing a server. - * Note that reading from secondaries can increase performance and reliability, but it may result in temporary inconsistent results. - * Default is false. - * - * @deprecated Replaced with {@code ReadPreference.secondaryPreferred()} - * @see ReadPreference#secondaryPreferred() - */ - @Deprecated - public boolean slaveOk; - - /** - * Specifies the read preference. - */ - public ReadPreference readPreference; - - /** - * Override the DBCallback factory. Default is for the standard Mongo Java driver configuration. - */ - public DBDecoderFactory dbDecoderFactory; - - /** - * Override the encoding factory. Default is for the standard Mongo Java driver configuration. - */ - public DBEncoderFactory dbEncoderFactory; - - /** - * If true the driver will use a WriteConcern of WriteConcern.SAFE for all operations. - * If w, wtimeout, fsync or j are specified, this setting is ignored. - * Default is false. - */ - public boolean safe; - - /** - * The "w" value, (number of writes), of the global WriteConcern. - * Default is 0. - */ - public int w; - - /** - * The "wtimeout" value of the global WriteConcern. - * Default is 0. - */ - public int wtimeout; - - /** - * The "fsync" value of the global WriteConcern. - * true indicates writes should wait for data to be written to server data file - * Default is false. - */ - public boolean fsync; - - /** - * The "j" value of the global WriteConcern. - * true indicates writes should wait for a journaling group commit - * Default is false. - */ - public boolean j; - - /** - * sets the socket factory for creating sockets to mongod - * Default is SocketFactory.getDefault() - */ - public SocketFactory socketFactory; - - /** - * Sets whether there is a a finalize method created that cleans up instances of DBCursor that the client - * does not close. If you are careful to always call the close method of DBCursor, then this can safely be set to false. - * @see com.mongodb.DBCursor#close(). - * Default is true. - */ - public boolean cursorFinalizerEnabled; - - /** - * Sets the write concern. If this is not set, the write concern defaults to the combination of settings of - * the other write concern-related fields. If set, this will override all of the other write concern-related - * fields. - * - * @see #w - * @see #safe - * @see #wtimeout - * @see #fsync - * @see #j - */ - public WriteConcern writeConcern; - - /** - * Sets whether JMX beans registered by the driver should always be MBeans, regardless of whether the VM is - * Java 6 or greater. If false, the driver will use MXBeans if the VM is Java 6 or greater, and use MBeans if - * the VM is Java 5. - *

        - * Default is false. - *

        - */ - public boolean alwaysUseMBeans; - - /** - * @return The description for MongoClient instances created with these options - */ - public synchronized String getDescription() { - return description; - } - - /** - * - * @param desc The description for Mongo instances created with these options - */ - public synchronized void setDescription(String desc) { - description = desc; - } - - /** - * - * @return the maximum number of connections allowed per host for this Mongo instance - */ - public synchronized int getConnectionsPerHost() { - return connectionsPerHost; - } - - /** - * - * @param connections sets the maximum number of connections allowed per host for this Mongo instance - */ - public synchronized void setConnectionsPerHost(int connections) { - connectionsPerHost = connections; - } - - /** - * - * @return the maximum number of threads that - * may be waiting for a connection - */ - public synchronized int getThreadsAllowedToBlockForConnectionMultiplier() { - return threadsAllowedToBlockForConnectionMultiplier; - } - - /** - * - * @param threads multiplied with connectionsPerHost, sets the maximum number of threads that - * may be waiting for a connection - */ - public synchronized void setThreadsAllowedToBlockForConnectionMultiplier(int threads) { - threadsAllowedToBlockForConnectionMultiplier = threads; - } - - /** - * - * @return The maximum time in milliseconds that threads wait for a connection - */ - public synchronized int getMaxWaitTime() { - return maxWaitTime; - } - - /** - * - * @param timeMS set the maximum time in milliseconds that threads wait for a connection - */ - public synchronized void setMaxWaitTime(int timeMS) { - maxWaitTime = timeMS; - } - - /** - * - * @return the connection timeout in milliseconds. - */ - public synchronized int getConnectTimeout() { - return connectTimeout; - } - - /** - * - * @param timeoutMS set the connection timeout in milliseconds. - */ - public synchronized void setConnectTimeout(int timeoutMS) { - connectTimeout = timeoutMS; - } - - /** - * - * @return The socket timeout in milliseconds - */ - public synchronized int getSocketTimeout() { - return socketTimeout; - } - - /** - * - * @param timeoutMS set the socket timeout in milliseconds - */ - public synchronized void setSocketTimeout(int timeoutMS) { - socketTimeout = timeoutMS; - } - - /** - * - * @return connection keep-alive flag - */ - public synchronized boolean isSocketKeepAlive() { - return socketKeepAlive; - } - - /** - * - * @param keepAlive set connection keep-alive flag - */ - public synchronized void setSocketKeepAlive(boolean keepAlive) { - socketKeepAlive = keepAlive; - } - - /** - * - * @return keep trying connection flag - */ - public synchronized boolean isAutoConnectRetry() { - return autoConnectRetry; - } - - /** - * - * @param retry sets keep trying connection flag - */ - public synchronized void setAutoConnectRetry(boolean retry) { - autoConnectRetry = retry; - } - - /** - * - * @return max time in MS to retrying open connection - */ - public synchronized long getMaxAutoConnectRetryTime() { - return maxAutoConnectRetryTime; - } - - /** - * - * @param retryTimeMS set max time in MS to retrying open connection - */ - public synchronized void setMaxAutoConnectRetryTime(long retryTimeMS) { - maxAutoConnectRetryTime = retryTimeMS; - } - - /** - * - * @return the DBCallback decoding factory - */ - public synchronized DBDecoderFactory getDbDecoderFactory() { - return dbDecoderFactory; - } - - /** - * - * @param factory sets the DBCallback decoding factory - */ - public synchronized void setDbDecoderFactory(DBDecoderFactory factory) { - dbDecoderFactory = factory; - } - - /** - * - * @return the encoding factory - */ - public synchronized DBEncoderFactory getDbEncoderFactory() { - return dbEncoderFactory; - } - - /** - * - * @param factory sets the encoding factory - */ - public synchronized void setDbEncoderFactory(DBEncoderFactory factory) { - dbEncoderFactory = factory; - } - - /** - * - * @return true if driver uses WriteConcern.SAFE for all operations. - */ - public synchronized boolean isSafe() { - return safe; - } - - /** - * - * @param isSafe true if driver uses WriteConcern.SAFE for all operations. - */ - public synchronized void setSafe(boolean isSafe) { - safe = isSafe; - } - - /** - * - * @return value returns the number of writes of the global WriteConcern. - */ - public synchronized int getW() { - return w; - } - - /** - * - * @param val set the number of writes of the global WriteConcern. - */ - public synchronized void setW(int val) { - w = val; - } - - /** - * - * @return timeout for write operation - */ - public synchronized int getWtimeout() { - return wtimeout; - } - - /** - * - * @param timeoutMS sets timeout for write operation - */ - public synchronized void setWtimeout(int timeoutMS) { - wtimeout = timeoutMS; - } - - /** - * - * @return true if global write concern is set to fsync - */ - public synchronized boolean isFsync() { - return fsync; - } - - /** - * - * @param sync sets global write concern's fsync safe value - */ - public synchronized void setFsync(boolean sync) { - fsync = sync; - } - - /** - * - * @return true if global write concern is set to journal safe - */ - public synchronized boolean isJ() { - return j; - } - - /** - * - * @param safe sets global write concern's journal safe value - */ - public synchronized void setJ(boolean safe) { - j = safe; - } - - /** - * - * @param writeConcern sets the write concern - */ - public void setWriteConcern(final WriteConcern writeConcern) { - this.writeConcern = writeConcern; - } - - /** - * - * @return the socket factory for creating sockets to mongod - */ - public synchronized SocketFactory getSocketFactory() { - return socketFactory; - } - - /** - * - * @param factory sets the socket factory for creating sockets to mongod - */ - public synchronized void setSocketFactory(SocketFactory factory) { - socketFactory = factory; - } - - /** - * - * @return the read preference - */ - public ReadPreference getReadPreference() { - return readPreference; - } - - /** - * - * @param readPreference the read preference - */ - public void setReadPreference(ReadPreference readPreference) { - this.readPreference = readPreference; - } - - - /** - * - * @return whether DBCursor finalizer is enabled - */ - public boolean isCursorFinalizerEnabled() { - return cursorFinalizerEnabled; - } - - /** - * - * @param cursorFinalizerEnabled whether cursor finalizer is enabled - */ - public void setCursorFinalizerEnabled(final boolean cursorFinalizerEnabled) { - this.cursorFinalizerEnabled = cursorFinalizerEnabled; - - } - - /** - * - * @return true if the driver should always use MBeans, regardless of VM - */ - public boolean isAlwaysUseMBeans() { - return alwaysUseMBeans; - } - - /** - * - * @param alwaysUseMBeans sets whether the driver should always use MBeans, regardless of VM - */ - public void setAlwaysUseMBeans(final boolean alwaysUseMBeans) { - this.alwaysUseMBeans = alwaysUseMBeans; - } - - @Override - public String toString() { - return "MongoOptions{" + - "description='" + description + '\'' + - ", connectionsPerHost=" + connectionsPerHost + - ", threadsAllowedToBlockForConnectionMultiplier=" + threadsAllowedToBlockForConnectionMultiplier + - ", maxWaitTime=" + maxWaitTime + - ", connectTimeout=" + connectTimeout + - ", socketTimeout=" + socketTimeout + - ", socketKeepAlive=" + socketKeepAlive + - ", autoConnectRetry=" + autoConnectRetry + - ", maxAutoConnectRetryTime=" + maxAutoConnectRetryTime + - ", slaveOk=" + slaveOk + - ", readPreference=" + readPreference + - ", dbDecoderFactory=" + dbDecoderFactory + - ", dbEncoderFactory=" + dbEncoderFactory + - ", safe=" + safe + - ", w=" + w + - ", wtimeout=" + wtimeout + - ", fsync=" + fsync + - ", j=" + j + - ", socketFactory=" + socketFactory + - ", cursorFinalizerEnabled=" + cursorFinalizerEnabled + - ", writeConcern=" + writeConcern + - ", alwaysUseMBeans=" + alwaysUseMBeans + - '}'; - } -} diff --git a/src/main/com/mongodb/MongoSocketException.java b/src/main/com/mongodb/MongoSocketException.java deleted file mode 100644 index dc82c5ef2f7..00000000000 --- a/src/main/com/mongodb/MongoSocketException.java +++ /dev/null @@ -1,42 +0,0 @@ -/* - * Copyright (c) 2008 - 2013 10gen, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.mongodb; - -import java.io.IOException; - -/** - * Subclass of {@link MongoException} representing a network-related exception - */ -public class MongoSocketException extends MongoException { - - private static final long serialVersionUID = -4415279469780082174L; - - /** - * @param msg the message - * @param ioe the cause - */ - MongoSocketException(final String msg, final IOException ioe) { - super(-2, msg, ioe); - } - - /** - * @param ioe the cause - */ - MongoSocketException(final IOException ioe) { - super(ioe.toString(), ioe); - } -} diff --git a/src/main/com/mongodb/MongoTimeoutException.java b/src/main/com/mongodb/MongoTimeoutException.java deleted file mode 100644 index 939b42350ac..00000000000 --- a/src/main/com/mongodb/MongoTimeoutException.java +++ /dev/null @@ -1,31 +0,0 @@ -/* - * Copyright (c) 2008 - 2013 10gen, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.mongodb; - -public class MongoTimeoutException extends DBPortPool.NoMoreConnection{ - - private static final long serialVersionUID = -3016560214331826577L; - - /** - * Constructs a new instance with the given message. - * - * @param msg the message - */ - MongoTimeoutException(String msg) { - super(msg); - } -} diff --git a/src/main/com/mongodb/MongoURI.java b/src/main/com/mongodb/MongoURI.java deleted file mode 100644 index 1641ded8ded..00000000000 --- a/src/main/com/mongodb/MongoURI.java +++ /dev/null @@ -1,274 +0,0 @@ -/** - * Copyright (C) 2008 10gen Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.mongodb; - -import java.net.UnknownHostException; -import java.util.List; - - -/** - * Represents a URI - * which can be used to create a Mongo instance. The URI describes the hosts to - * be used and options. - *

        - * This class has been superseded by <{@code MongoClientURI}, and may be deprecated in a future release. - *

        The format of the URI is: - *

        - *   mongodb://[username:password@]host1[:port1][,host2[:port2],...[,hostN[:portN]]][/[database][?options]]
        - * 
        - *
          - *
        • {@code mongodb://} is a required prefix to identify that this is a string in the standard connection format.
        • - *
        • {@code username:password@} are optional. If given, the driver will attempt to login to a database after - * connecting to a database server.
        • - *
        • {@code host1} is the only required part of the URI. It identifies a server address to connect to.
        • - *
        • {@code :portX} is optional and defaults to :27017 if not provided.
        • - *
        • {@code /database} is the name of the database to login to and thus is only relevant if the - * {@code username:password@} syntax is used. If not specified the "admin" database will be used by default.
        • - *
        • {@code ?options} are connection options. Note that if {@code database} is absent there is still a {@code /} - * required between the last host and the {@code ?} introducing the options. Options are name=value pairs and the pairs - * are separated by "&". For backwards compatibility, ";" is accepted as a separator in addition to "&", - * but should be considered as deprecated.
        • - *
        - *

        - * The Java driver supports the following options (case insensitive): - *

        - * Replica set configuration: - *

        - *
          - *
        • {@code replicaSet=name}: Implies that the hosts given are a seed list, and the driver will attempt to find - * all members of the set.
        • - *
        - *

        Connection Configuration:

        - *
          - *
        • {@code connectTimeoutMS=ms}: How long a connection can take to be opened before timing out.
        • - *
        • {@code socketTimeoutMS=ms}: How long a send or receive on a socket can take before timing out.
        • - *
        - *

        Connection pool configuration:

        - *
          - *
        • {@code maxPoolSize=n}: The maximum number of connections in the connection pool.
        • - *
        • {@code waitQueueMultiple=n} : this multiplier, multiplied with the maxPoolSize setting, gives the maximum number of - * threads that may be waiting for a connection to become available from the pool. All further threads will get an - * exception right away.
        • - *
        • {@code waitQueueTimeoutMS=ms}: The maximum wait time in milliseconds that a thread may wait for a connection to - * become available.
        • - *
        - *

        Write concern configuration:

        - *
          - *
        • {@code safe=true|false} - *
            - *
          • {@code true}: the driver sends a getLastError command after every update to ensure that the update succeeded - * (see also {@code w} and {@code wtimeoutMS}).
          • - *
          • {@code false}: the driver does not send a getLastError command after every update.
          • - *
          - *
        • - *
        • {@code w=wValue} - *
            - *
          • The driver adds { w : wValue } to the getLastError command. Implies {@code safe=true}.
          • - *
          • wValue is typically a number, but can be any string in order to allow for specifications like - * {@code "majority"}
          • - *
          - *
        • - *
        • {@code wtimeoutMS=ms} - *
            - *
          • The driver adds { wtimeout : ms } to the getlasterror command. Implies {@code safe=true}.
          • - *
          • Used in combination with {@code w}
          • - *
          - *
        • - *
        - *

        Read preference configuration:

        - *
          - *
        • {@code slaveOk=true|false}: Whether a driver connected to a replica set will send reads to slaves/secondaries.
        • - *
        • {@code readPreference=enum}: The read preference for this connection. If set, it overrides any slaveOk value. - *
            - *
          • Enumerated values: - *
              - *
            • {@code primary}
            • - *
            • {@code primaryPreferred}
            • - *
            • {@code secondary}
            • - *
            • {@code secondaryPreferred}
            • - *
            • {@code nearest}
            • - *
            - *
          • - *
          - *
        • - *
        • {@code readPreferenceTags=string}. A representation of a tag set as a comma-separated list of colon-separated - * key-value pairs, e.g. {@code "dc:ny,rack:1}". Spaces are stripped from beginning and end of all keys and values. - * To specify a list of tag sets, using multiple readPreferenceTags, - * e.g. {@code readPreferenceTags=dc:ny,rack:1;readPreferenceTags=dc:ny;readPreferenceTags=} - *
            - *
          • Note the empty value for the last one, which means match any secondary as a last resort.
          • - *
          • Order matters when using multiple readPreferenceTags.
          • - *
          - *
        • - *
        - * @see MongoClientURI - * @see MongoOptions for the default values for all options - */ -public class MongoURI { - - /** - * The prefix for mongodb URIs. - */ - public static final String MONGODB_PREFIX = "mongodb://"; - - private final MongoClientURI mongoClientURI; - private final MongoOptions mongoOptions; - - /** - * Creates a MongoURI from a string. - * @param uri the URI - * @dochub connections - * - * @deprecated Replaced by {@link MongoClientURI#MongoClientURI(String)} - * - */ - @Deprecated - public MongoURI( String uri ) { - this.mongoClientURI = new MongoClientURI(uri, new MongoClientOptions.Builder().legacyDefaults()); - mongoOptions = new MongoOptions(mongoClientURI.getOptions()); - } - - @Deprecated - public MongoURI(final MongoClientURI mongoClientURI) { - this.mongoClientURI = mongoClientURI; - mongoOptions = new MongoOptions(mongoClientURI.getOptions()); - } - - // --------------------------------- - - /** - * Gets the username - * @return - */ - public String getUsername(){ - return mongoClientURI.getUsername(); - } - - /** - * Gets the password - * @return - */ - public char[] getPassword(){ - return mongoClientURI.getPassword(); - } - - /** - * Gets the list of hosts - * @return - */ - public List getHosts(){ - return mongoClientURI.getHosts(); - } - - /** - * Gets the database name - * @return - */ - public String getDatabase(){ - return mongoClientURI.getDatabase(); - } - - /** - * Gets the collection name - * @return - */ - public String getCollection(){ - return mongoClientURI.getCollection(); - } - - /** - * Gets the credentials - * - * @since 2.11.0 - */ - public MongoCredential getCredentials() { - return mongoClientURI.getCredentials(); - } - - /** - * Gets the options. This method will return the same instance of {@code MongoOptions} for every call, so it's - * possible to mutate the returned instance to change the defaults. - * @return the mongo options - */ - public MongoOptions getOptions(){ - return mongoOptions; - } - - /** - * creates a Mongo instance based on the URI - * @return a new Mongo instance. There is no caching, so each call will create a new instance, each of which - * must be closed manually. - * @throws MongoException - * @throws UnknownHostException - */ - @SuppressWarnings("deprecation") - public Mongo connect() - throws UnknownHostException { - // TODO caching? - // Note: we can't change this to new MongoClient(this) as that would silently change the default write concern. - return new Mongo(this); - } - - /** - * returns the DB object from a newly created Mongo instance based on this URI - * @return the database specified in the URI. This will implicitly create a new Mongo instance, - * which must be closed manually. - * @throws MongoException - * @throws UnknownHostException - */ - public DB connectDB() throws UnknownHostException { - return connect().getDB(getDatabase()); - } - - /** - * returns the URI's DB object from a given Mongo instance - * @param mongo the Mongo instance to get the database from. - * @return the database specified in this URI - */ - public DB connectDB( Mongo mongo ){ - return mongo.getDB( getDatabase() ); - } - - /** - * returns the URI's Collection from a given DB object - * @param db the database to get the collection from - * @return - */ - public DBCollection connectCollection( DB db ){ - return db.getCollection( getCollection() ); - } - - /** - * returns the URI's Collection from a given Mongo instance - * @param mongo the mongo instance to get the collection from - * @return the collection specified in this URI - */ - public DBCollection connectCollection( Mongo mongo ){ - return connectDB( mongo ).getCollection( getCollection() ); - } - - // --------------------------------- - - @Override - public String toString() { - return mongoClientURI.toString(); - } - - MongoClientURI toClientURI() { - return mongoClientURI; - } -} diff --git a/src/main/com/mongodb/MongoWaitQueueFullException.java b/src/main/com/mongodb/MongoWaitQueueFullException.java deleted file mode 100644 index 532546489c2..00000000000 --- a/src/main/com/mongodb/MongoWaitQueueFullException.java +++ /dev/null @@ -1,31 +0,0 @@ -/* - * Copyright (c) 2008 - 2013 10gen, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.mongodb; - -public class MongoWaitQueueFullException extends DBPortPool.NoMoreConnection { - - private static final long serialVersionUID = 1482094507852255793L; - - /** - * Constructs a new instance with the given message. - * - * @param msg the message - */ - MongoWaitQueueFullException(String msg) { - super(msg); - } -} diff --git a/src/main/com/mongodb/MongosStatus.java b/src/main/com/mongodb/MongosStatus.java deleted file mode 100644 index 1a479380f7c..00000000000 --- a/src/main/com/mongodb/MongosStatus.java +++ /dev/null @@ -1,134 +0,0 @@ -/** - * Copyright (c) 2008 - 2012 10gen, Inc. - *

        - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

        - * http://www.apache.org/licenses/LICENSE-2.0 - *

        - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.mongodb; - -import java.util.ArrayList; -import java.util.List; -import java.util.logging.Level; -import java.util.logging.Logger; - -/** - * A connection to a set of mongos servers. - */ -class MongosStatus extends ConnectionStatus { - - private static final Logger logger = Logger.getLogger("com.mongodb.MongosStatus"); - - MongosStatus(Mongo mongo, List mongosAddresses) { - super(mongosAddresses, mongo); - _updater = new MongosUpdater(); - } - - @Override - boolean hasServerUp() { - return preferred != null; - } - - @Override - Node ensureMaster() { - checkClosed(); - return getPreferred(); - } - - - @Override - List getServerAddressList() { - return new ArrayList(_mongosAddresses); - } - - class MongosUpdater extends BackgroundUpdater { - MongosUpdater() { - super("MongosStatus:MongosUpdater"); - } - - @Override - public void run() { - List mongosNodes = getMongosNodes(); - try { - while (!Thread.interrupted()) { - try { - MongosNode bestThisPass = null; - for (MongosNode cur : mongosNodes) { - cur.update(); - if (cur.isOk()) { - if (bestThisPass == null || (cur._pingTimeMS < bestThisPass._pingTimeMS)) { - bestThisPass = cur; - } - } - } - setPreferred(bestThisPass); - } catch (Exception e) { - logger.log(Level.WARNING, "couldn't do update pass", e); - } - - int sleepTime = preferred == null ? updaterIntervalNoMasterMS : updaterIntervalMS; - Thread.sleep(sleepTime); - } - } catch (InterruptedException e) { - logger.log(Level.INFO, "Exiting background thread"); - // Allow thread to exit - } - } - - private List getMongosNodes() { - List mongosNodes = new ArrayList(_mongosAddresses.size()); - for (ServerAddress serverAddress : _mongosAddresses) { - mongosNodes.add(new MongosNode(serverAddress, _mongo, _mongoOptions)); - } - return mongosNodes; - } - } - - static class MongosNode extends UpdatableNode { - MongosNode(final ServerAddress addr, Mongo mongo, MongoOptions mongoOptions) { - super(addr, mongo, mongoOptions); - } - - @Override - protected Logger getLogger() { - return logger; - } - } - - // Sends a notification every time preferred is set. - private synchronized void setPreferred(final MongosNode bestThisPass) { - if (bestThisPass == null) { - preferred = null; - } else { - preferred = new Node(bestThisPass._pingTimeMS, bestThisPass._addr, bestThisPass._maxBsonObjectSize, bestThisPass.isOk()); - } - notifyAll(); - } - - // Gets the current preferred node. If there is no preferred node, wait to get a notification before returning null. - private synchronized Node getPreferred() { - if (preferred == null) { - try { - synchronized (this) { - wait(_mongo.getMongoOptions().getConnectTimeout()); - } - } catch (InterruptedException e) { - throw new MongoInterruptedException("Interrupted while waiting for next update to mongos status", e); - } - } - return preferred; - } - - // The current preferred mongos Node to use as the master. This is not necessarily the node that is currently in use. - // Rather, it's the node that is preferred if there is a problem with the currently in use node. - private volatile Node preferred; -} \ No newline at end of file diff --git a/src/main/com/mongodb/NativeAuthenticationHelper.java b/src/main/com/mongodb/NativeAuthenticationHelper.java deleted file mode 100644 index 7290f8e23f5..00000000000 --- a/src/main/com/mongodb/NativeAuthenticationHelper.java +++ /dev/null @@ -1,66 +0,0 @@ -/* - * Copyright (c) 2008 - 2013 10gen, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.mongodb; - -import com.mongodb.util.Util; - -import java.io.ByteArrayOutputStream; -import java.io.IOException; - -class NativeAuthenticationHelper { - - static DBObject getAuthCommand(String userName, char[] password, String nonce) { - return getAuthCommand(userName, createHash(userName, password), nonce); - } - - static DBObject getAuthCommand(String userName, byte[] authHash, String nonce) { - String key = nonce + userName + new String(authHash); - - BasicDBObject cmd = new BasicDBObject(); - - cmd.put("authenticate", 1); - cmd.put("user", userName); - cmd.put("nonce", nonce); - cmd.put("key", Util.hexMD5(key.getBytes())); - - return cmd; - } - - static BasicDBObject getNonceCommand() { - return new BasicDBObject("getnonce", 1); - } - - static byte[] createHash(String userName, char[] password) { - ByteArrayOutputStream bout = new ByteArrayOutputStream(userName.length() + 20 + password.length); - try { - bout.write(userName.getBytes()); - bout.write(":mongo:".getBytes()); - for (final char ch : password) { - if (ch >= 128) - throw new IllegalArgumentException("can't handle non-ascii passwords yet"); - bout.write((byte) ch); - } - } catch (IOException ioe) { - throw new RuntimeException("impossible", ioe); - } - return Util.hexMD5(bout.toByteArray()).getBytes(); - } - - private NativeAuthenticationHelper() { - } -} diff --git a/src/main/com/mongodb/OutMessage.java b/src/main/com/mongodb/OutMessage.java deleted file mode 100644 index 3f0b4b8f70d..00000000000 --- a/src/main/com/mongodb/OutMessage.java +++ /dev/null @@ -1,306 +0,0 @@ -/** - * Copyright (C) 2008 10gen Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.mongodb; - -import org.bson.BSONObject; -import org.bson.BasicBSONEncoder; -import org.bson.io.PoolOutputBuffer; -import org.bson.types.ObjectId; - -import java.io.IOException; -import java.io.OutputStream; -import java.util.Collection; -import java.util.concurrent.atomic.AtomicInteger; - -class OutMessage extends BasicBSONEncoder { - - enum OpCode { - OP_UPDATE(2001), - OP_INSERT(2002), - OP_QUERY(2004), - OP_GETMORE(2005), - OP_DELETE(2006), - OP_KILL_CURSORS(2007); - - OpCode(int value) { - this.value = value; - } - - private final int value; - - public int getValue() { - return value; - } - } - - static AtomicInteger REQUEST_ID = new AtomicInteger(1); - - public static OutMessage insert(final DBCollection collection, final DBEncoder encoder, WriteConcern concern) { - OutMessage om = new OutMessage(collection, OpCode.OP_INSERT, encoder); - om.writeInsertPrologue(concern); - - return om; - } - - public static OutMessage update(final DBCollection collection, final DBEncoder encoder, - final boolean upsert, final boolean multi, final DBObject query, final DBObject o) { - OutMessage om = new OutMessage(collection, OpCode.OP_UPDATE, encoder, query); - om.writeUpdate(upsert, multi, query, o); - - return om; - } - - public static OutMessage remove(final DBCollection collection, final DBEncoder encoder, final DBObject query) { - OutMessage om = new OutMessage(collection, OpCode.OP_DELETE, encoder, query); - om.writeRemove(); - - return om; - } - - static OutMessage query( DBCollection collection , int options , int numToSkip , int batchSize , DBObject query , DBObject fields ){ - return query( collection, options, numToSkip, batchSize, query, fields, ReadPreference.primary() ); - } - - static OutMessage query( DBCollection collection , int options , int numToSkip , int batchSize , DBObject query , DBObject fields, ReadPreference readPref ){ - return query( collection, options, numToSkip, batchSize, query, fields, readPref, DefaultDBEncoder.FACTORY.create()); - } - - static OutMessage query( DBCollection collection , int options , int numToSkip , int batchSize , DBObject query , DBObject fields, ReadPreference readPref, DBEncoder enc ){ - OutMessage om = new OutMessage(collection, enc, query, options, readPref); - om.writeQuery(fields, numToSkip, batchSize); - - return om; - } - - static OutMessage getMore(DBCollection collection, long cursorId, int batchSize) { - OutMessage om = new OutMessage(collection, OpCode.OP_GETMORE); - om.writeGetMore(cursorId, batchSize); - - return om; - } - - static OutMessage killCursors(Mongo mongo, int numCursors) { - OutMessage om = new OutMessage(mongo , OpCode.OP_KILL_CURSORS); - om.writeKillCursorsPrologue(numCursors); - - return om; - } - - private OutMessage( Mongo m , OpCode opCode ){ - this(null, m, opCode, null); - } - - private OutMessage(final DBCollection collection, final OpCode opCode) { - this(collection, opCode, null); - } - - private OutMessage(final DBCollection collection, final OpCode opCode, final DBEncoder enc) { - this(collection, collection.getDB().getMongo(), opCode, enc); - } - - private OutMessage(final DBCollection collection, final Mongo m, final OpCode opCode, final DBEncoder enc) { - this(collection, m, opCode, enc, null, -1, null); - } - - private OutMessage(final DBCollection collection, final OpCode opCode, final DBEncoder enc, final DBObject query) { - this(collection, collection.getDB().getMongo(), opCode, enc, query, 0, null); - } - - private OutMessage(final DBCollection collection, final DBEncoder enc, final DBObject query, final int options, final ReadPreference readPref) { - this(collection, collection.getDB().getMongo(), OpCode.OP_QUERY, enc, query, options, readPref); - } - - private OutMessage(final DBCollection collection, final Mongo m, OpCode opCode, final DBEncoder enc, final DBObject query, final int options, final ReadPreference readPref) { - _collection = collection; - _mongo = m; - _encoder = enc; - - _buffer = _mongo._bufferPool.get(); - _buffer.reset(); - set(_buffer); - - _id = REQUEST_ID.getAndIncrement(); - _opCode = opCode; - - writeMessagePrologue(opCode); - - if (query == null) { - _query = null; - _queryOptions = 0; - } else { - _query = query; - - int allOptions = options; - if (readPref != null && readPref.isSlaveOk()) { - allOptions |= Bytes.QUERYOPTION_SLAVEOK; - } - - _queryOptions = allOptions; - } - } - - private void writeInsertPrologue(final WriteConcern concern) { - int flags = 0; - if (concern.getContinueOnErrorForInsert()) { - flags |= 1; - } - writeInt(flags); - writeCString(_collection.getFullName()); - } - - private void writeUpdate(final boolean upsert, final boolean multi, final DBObject query, final DBObject o) { - writeInt(0); // reserved - writeCString(_collection.getFullName()); - - int flags = 0; - if ( upsert ) flags |= 1; - if ( multi ) flags |= 2; - writeInt(flags); - - putObject(query); - putObject(o); - } - - private void writeRemove() { - writeInt(0); // reserved - writeCString(_collection.getFullName()); - - Collection keys = _query.keySet(); - - if ( keys.size() == 1 && keys.iterator().next().equals( "_id" ) && _query.get( keys.iterator().next() ) instanceof ObjectId) - writeInt( 1 ); - else - writeInt( 0 ); - - putObject(_query); - } - - private void writeGetMore(final long cursorId, final int batchSize) { - writeInt(0); - writeCString(_collection.getFullName()); - writeInt(batchSize); - writeLong(cursorId); - } - - private void writeKillCursorsPrologue(final int numCursors) { - writeInt(0); // reserved - writeInt(numCursors); - } - - private void writeQuery(final DBObject fields, final int numToSkip, final int batchSize) { - writeInt(_queryOptions); - writeCString(_collection.getFullName()); - - writeInt(numToSkip); - writeInt(batchSize); - - putObject(_query); - if (fields != null) - putObject(fields); - } - - private void writeMessagePrologue(final OpCode opCode) { - writeInt( 0 ); // length: will set this later - writeInt( _id ); - writeInt( 0 ); // response to - writeInt( opCode.getValue() ); - } - - void prepare(){ - if (_buffer == null) { - throw new IllegalStateException("Already closed"); - } - - _buffer.writeInt( 0 , _buffer.size() ); - } - - void pipe( OutputStream out ) throws IOException { - if (_buffer == null) { - throw new IllegalStateException("Already closed"); - } - - _buffer.pipe( out ); - } - - int size() { - if (_buffer == null) { - throw new IllegalStateException("Already closed"); - } - - return _buffer.size(); - } - - void doneWithMessage() { - if (_buffer == null) { - throw new IllegalStateException("Only call this once per instance"); - } - - _buffer.reset(); - _mongo._bufferPool.done(_buffer); - _buffer = null; - done(); - } - - boolean hasOption( int option ){ - return ( _queryOptions & option ) != 0; - } - - int getId(){ - return _id; - } - - OpCode getOpCode() { - return _opCode; - } - - DBObject getQuery() { - return _query; - } - - String getNamespace() { - return _collection != null ? _collection.getFullName() : null; - } - - int getNumDocuments() { - return _numDocuments; - } - - @Override - public int putObject(BSONObject o) { - if (_buffer == null) { - throw new IllegalStateException("Already closed"); - } - - // check max size - int objectSize = _encoder.writeObject(_buf, o); - if (objectSize > Math.max(_mongo.getConnector().getMaxBsonObjectSize(), Bytes.MAX_OBJECT_SIZE)) { - throw new MongoInternalException("DBObject of size " + objectSize + " is over Max BSON size " + _mongo.getMaxBsonObjectSize()); - } - _numDocuments++; - return objectSize; - } - - private final Mongo _mongo; - private final DBCollection _collection; - private PoolOutputBuffer _buffer; - private final int _id; - private final OpCode _opCode; - private final int _queryOptions; - private final DBObject _query; - private final DBEncoder _encoder; - private volatile int _numDocuments; // only one thread will modify this field, so volatile is sufficient synchronization -} diff --git a/src/main/com/mongodb/QueryBuilder.java b/src/main/com/mongodb/QueryBuilder.java deleted file mode 100644 index 68a56bb4cca..00000000000 --- a/src/main/com/mongodb/QueryBuilder.java +++ /dev/null @@ -1,431 +0,0 @@ -/* QueryBuilder.java - * - * modified April 11, 2012 by Bryan Reinero - * added $nearSphere, $centerSphere and $within $polygon query support - */ - -/** - * Copyright (C) 2010 10gen Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.mongodb; - -import java.util.ArrayList; -import java.util.Collections; -import java.util.List; -import java.util.regex.Pattern; - -/** - * Utility for creating DBObject queries - * @author Julson Lim - * - */ -public class QueryBuilder { - - /** - * Creates a builder with an empty query - */ - public QueryBuilder() { - _query = new BasicDBObject(); - } - - /** - * returns a new QueryBuilder - * @return - */ - public static QueryBuilder start() { - return new QueryBuilder(); - } - - /** - * Creates a new query with a document key - * @param key MongoDB document key - * @return Returns a new QueryBuilder - */ - public static QueryBuilder start(String key) { - return (new QueryBuilder()).put(key); - } - - /** - * Adds a new key to the query if not present yet. - * Sets this key as the current key. - * @param key MongoDB document key - * @return this - */ - public QueryBuilder put(String key) { - _currentKey = key; - if(_query.get(key) == null) { - _query.put(_currentKey, new NullObject()); - } - return this; - } - - /** - * Equivalent to QueryBuilder.put(key). Intended for compound query chains to be more readable, e.g. - * {@code QueryBuilder.start("a").greaterThan(1).and("b").lessThan(3) } - * @param key MongoDB document key - * @return this - */ - public QueryBuilder and(String key) { - return put(key); - } - - /** - * Equivalent to the $gt operator - * @param object Value to query - * @return Returns the current QueryBuilder with an appended "greater than" query - */ - public QueryBuilder greaterThan(Object object) { - addOperand(QueryOperators.GT, object); - return this; - } - - /** - * Equivalent to the $gte operator - * @param object Value to query - * @return Returns the current QueryBuilder with an appended "greater than or equals" query - */ - public QueryBuilder greaterThanEquals(Object object) { - addOperand(QueryOperators.GTE, object); - return this; - } - - /** - * Equivalent to the $lt operand - * @param object Value to query - * @return Returns the current QueryBuilder with an appended "less than" query - */ - public QueryBuilder lessThan(Object object) { - addOperand(QueryOperators.LT, object); - return this; - } - - /** - * Equivalent to the $lte operand - * @param object Value to query - * @return Returns the current QueryBuilder with an appended "less than or equals" query - */ - public QueryBuilder lessThanEquals(Object object) { - addOperand(QueryOperators.LTE, object); - return this; - } - - /** - * Equivalent of the find({key:value}) - * @param object Value to query - * @return Returns the current QueryBuilder with an appended equality query - */ - public QueryBuilder is(Object object) { - addOperand(null, object); - return this; - } - - /** - * Equivalent of the $ne operand - * @param object Value to query - * @return Returns the current QueryBuilder with an appended inequality query - */ - public QueryBuilder notEquals(Object object) { - addOperand(QueryOperators.NE, object); - return this; - } - - /** - * Equivalent of the $in operand - * @param object Value to query - * @return Returns the current QueryBuilder with an appended "in array" query - */ - public QueryBuilder in(Object object) { - addOperand(QueryOperators.IN, object); - return this; - } - - /** - * Equivalent of the $nin operand - * @param object Value to query - * @return Returns the current QueryBuilder with an appended "not in array" query - */ - public QueryBuilder notIn(Object object) { - addOperand(QueryOperators.NIN, object); - return this; - } - - /** - * Equivalent of the $mod operand - * @param object Value to query - * @return Returns the current QueryBuilder with an appended modulo query - */ - public QueryBuilder mod(Object object) { - addOperand(QueryOperators.MOD, object); - return this; - } - - /** - * Equivalent of the $all operand - * @param object Value to query - * @return Returns the current QueryBuilder with an appended "matches all array contents" query - */ - public QueryBuilder all(Object object) { - addOperand(QueryOperators.ALL, object); - return this; - } - - /** - * Equivalent of the $size operand - * @param object Value to query - * @return Returns the current QueryBuilder with an appended size operator - */ - public QueryBuilder size(Object object) { - addOperand(QueryOperators.SIZE, object); - return this; - } - - /** - * Equivalent of the $exists operand - * @param object Value to query - * @return Returns the current QueryBuilder with an appended exists operator - */ - public QueryBuilder exists(Object object) { - addOperand(QueryOperators.EXISTS, object); - return this; - } - - /** - * Passes a regular expression for a query - * @param regex Regex pattern object - * @return Returns the current QueryBuilder with an appended regex query - */ - public QueryBuilder regex(Pattern regex) { - addOperand(null, regex); - return this; - } - - /** - * Equivalent to the $elemMatch operand - * @param match the object to match - * @return Returns the current QueryBuilder with an appended elemMatch operator - */ - public QueryBuilder elemMatch(final DBObject match) { - addOperand(QueryOperators.ELEM_MATCH, match); - return this; - } - - - - /** - * Equivalent of the $within operand, used for geospatial operation - * @param x x coordinate - * @param y y coordinate - * @param radius radius - * @return - */ - public QueryBuilder withinCenter( double x , double y , double radius ){ - addOperand( QueryOperators.WITHIN , - new BasicDBObject(QueryOperators.CENTER, new Object[]{ new Double[]{ x , y } , radius } ) ); - return this; - } - - /** - * Equivalent of the $near operand - * @param x x coordinate - * @param y y coordinate - * @return - */ - public QueryBuilder near( double x , double y ){ - addOperand(QueryOperators.NEAR, - new Double[]{ x , y } ); - return this; - } - - /** - * Equivalent of the $near operand - * @param x x coordinate - * @param y y coordinate - * @param maxDistance max distance - * @return - */ - public QueryBuilder near( double x , double y , double maxDistance ){ - addOperand( QueryOperators.NEAR , - new Double[]{ x , y , maxDistance } ); - return this; - } - - /** - * Equivalent of the $nearSphere operand - * @param longitude coordinate in decimal degrees - * @param latitude coordinate in decimal degrees - * @return - */ - public QueryBuilder nearSphere( double longitude , double latitude ){ - addOperand(QueryOperators.NEAR_SPHERE, - new Double[]{ longitude , latitude } ); - return this; - } - - /** - * Equivalent of the $nearSphere operand - * @param longitude coordinate in decimal degrees - * @param latitude coordinate in decimal degrees - * @param maxDistance max spherical distance - * @return - */ - public QueryBuilder nearSphere( double longitude , double latitude , double maxDistance ){ - addOperand( QueryOperators.NEAR_SPHERE , - new Double[]{ longitude , latitude , maxDistance } ); - return this; - } - - /** - * Equivalent of the $centerSphere operand - * mostly intended for queries up to a few hundred miles or km. - * @param longitude coordinate in decimal degrees - * @param latitude coordinate in decimal degrees - * @param maxDistance max spherical distance - * @return - */ - public QueryBuilder withinCenterSphere( double longitude , double latitude , double maxDistance ){ - addOperand( QueryOperators.WITHIN , - new BasicDBObject(QueryOperators.CENTER_SPHERE, new Object[]{ new Double[]{longitude , latitude} , maxDistance } ) ); - return this; - } - - /** - * Equivalent to a $within operand, based on a bounding box using represented by two corners - * - * @param x the x coordinate of the first box corner. - * @param y the y coordinate of the first box corner. - * @param x2 the x coordinate of the second box corner. - * @param y2 the y coordinate of the second box corner. - * @return - */ - public QueryBuilder withinBox(double x, double y, double x2, double y2) { - addOperand( QueryOperators.WITHIN , - new BasicDBObject(QueryOperators.BOX, new Object[] { new Double[] { x, y }, new Double[] { x2, y2 } } ) ); - return this; - } - - /** - * Equivalent to a $within operand, based on a bounding polygon represented by an array of points - * - * @param points an array of Double[] defining the vertices of the search area - * @return this - */ - public QueryBuilder withinPolygon(List points) { - if(points == null || points.isEmpty() || points.size() < 3) - throw new IllegalArgumentException("Polygon insufficient number of vertices defined"); - addOperand( QueryOperators.WITHIN , - new BasicDBObject(QueryOperators.POLYGON, points ) ); - return this; - } - - /** - * Equivalent to $not meta operator. Must be followed by an operand, not a value, e.g. - * {@code QueryBuilder.start("val").not().mod(Arrays.asList(10, 1)) } - * - * @return Returns the current QueryBuilder with an appended "not" meta operator - */ - public QueryBuilder not() { - _hasNot = true; - return this; - } - - /** - * Equivalent to an $or operand - * @param ors the list of conditions to or together - * @return Returns the current QueryBuilder with appended "or" operator - */ - @SuppressWarnings("unchecked") - public QueryBuilder or( DBObject ... ors ){ - List l = (List)_query.get( QueryOperators.OR ); - if ( l == null ){ - l = new ArrayList(); - _query.put( QueryOperators.OR , l ); - } - Collections.addAll(l, ors); - return this; - } - - /** - * Equivalent to an $and operand - * @param ands the list of conditions to and together - * @return Returns the current QueryBuilder with appended "and" operator - */ - @SuppressWarnings("unchecked") - public QueryBuilder and( DBObject ... ands ){ - List l = (List)_query.get( QueryOperators.AND ); - if ( l == null ){ - l = new ArrayList(); - _query.put( QueryOperators.AND , l ); - } - Collections.addAll(l, ands); - return this; - } - - /** - * Creates a DBObject query to be used for the driver's find operations - * @return Returns a DBObject query instance - * @throws RuntimeException if a key does not have a matching operand - */ - public DBObject get() { - for(String key : _query.keySet()) { - if(_query.get(key) instanceof NullObject) { - throw new QueryBuilderException("No operand for key:" + key); - } - } - return _query; - } - - private void addOperand(String op, Object value) { - if(op == null) { - if (_hasNot) { - value = new BasicDBObject(QueryOperators.NOT, value); - _hasNot = false; - } - _query.put(_currentKey, value); - return; - } - - Object storedValue = _query.get(_currentKey); - BasicDBObject operand; - if(!(storedValue instanceof DBObject)) { - operand = new BasicDBObject(); - if (_hasNot) { - DBObject notOperand = new BasicDBObject(QueryOperators.NOT, operand); - _query.put(_currentKey, notOperand); - _hasNot = false; - } else { - _query.put(_currentKey, operand); - } - } else { - operand = (BasicDBObject)_query.get(_currentKey); - if (operand.get(QueryOperators.NOT) != null) { - operand = (BasicDBObject) operand.get(QueryOperators.NOT); - } - } - operand.put(op, value); - } - @SuppressWarnings("serial") - static class QueryBuilderException extends RuntimeException { - QueryBuilderException(String message) { - super(message); - } - } - private static class NullObject {} - - private DBObject _query; - private String _currentKey; - private boolean _hasNot; - -} diff --git a/src/main/com/mongodb/QueryOpBuilder.java b/src/main/com/mongodb/QueryOpBuilder.java deleted file mode 100644 index 94f351a1fab..00000000000 --- a/src/main/com/mongodb/QueryOpBuilder.java +++ /dev/null @@ -1,193 +0,0 @@ -package com.mongodb; - -/** - * Utility for constructing Query operation command with query, orderby, hint, explain, snapshot. - */ -class QueryOpBuilder { - - static final String READ_PREFERENCE_META_OPERATOR = "$readPreference"; - - private DBObject query; - private DBObject orderBy; - private DBObject hintObj; - private String hintStr; - private boolean explain; - private boolean snapshot; - private ReadPreference readPref; - - private DBObject specialFields; - - public QueryOpBuilder(){ - } - - - /** - * Adds the query clause to the operation - * @param query - * @return - */ - public QueryOpBuilder addQuery(DBObject query){ - this.query = query; - return this; - } - - /** - * Adds the orderby clause to the operation - * @param orderBy - * @return - */ - public QueryOpBuilder addOrderBy(DBObject orderBy){ - this.orderBy = orderBy; - return this; - } - - /** - * Adds the hint clause to the operation - * @param hint - * @return - */ - public QueryOpBuilder addHint(String hint){ - this.hintStr = hint; - return this; - } - - /** - * Adds hint clause to the operation - * @param hint - * @return - */ - public QueryOpBuilder addHint(DBObject hint){ - this.hintObj = hint; - return this; - } - - - /** - * Adds special fields to the operation - * @param specialFields - * @return - */ - public QueryOpBuilder addSpecialFields(DBObject specialFields){ - this.specialFields = specialFields; - return this; - } - - /** - * Adds the explain flag to the operation - * @param explain - * @return - */ - public QueryOpBuilder addExplain(boolean explain){ - this.explain = explain; - return this; - } - - /** - * Adds the snapshot flag to the operation - * @param snapshot - * @return - */ - public QueryOpBuilder addSnapshot(boolean snapshot){ - this.snapshot = snapshot; - return this; - } - - /** - * Adds a read preference to the query operation - * - * @param readPref the read preference - * @return this - */ - public QueryOpBuilder addReadPreference(ReadPreference readPref) { - this.readPref = readPref; - return this; - } - - /** - * Constructs the query operation DBObject - * @return DBObject representing the query command to be sent to server - */ - public DBObject get() { - DBObject lclQuery = query; - - //must always have a query - if (lclQuery == null) { - lclQuery = new BasicDBObject(); - } - - if (hasSpecialQueryFields()) { - DBObject queryop = (specialFields == null ? new BasicDBObject() : specialFields); - - addToQueryObject(queryop, "$query", lclQuery, true); - addToQueryObject(queryop, "$orderby", orderBy, false); - if (hintStr != null) - addToQueryObject(queryop, "$hint", hintStr); - if (hintObj != null) - addToQueryObject(queryop, "$hint", hintObj); - - if (explain) - queryop.put("$explain", true); - if (snapshot) - queryop.put("$snapshot", true); - if (readPref != null && readPref != ReadPreference.primary()) - queryop.put(READ_PREFERENCE_META_OPERATOR, readPref.toDBObject()); - - return queryop; - } - - return lclQuery; - } - - private boolean hasSpecialQueryFields(){ - - if ( readPref != null ) - return true; - - if ( specialFields != null ) - return true; - - if ( orderBy != null && orderBy.keySet().size() > 0 ) - return true; - - if ( hintStr != null || hintObj != null || snapshot || explain) - return true; - - return false; - } - - /** - * Adds DBObject to the operation - * @param dbobj DBObject to add field to - * @param field name of the field - * @param obj object to add to the operation. Ignore if null. - * @param sendEmpty if true adds obj even if it's empty. Ignore if false and obj is empty. - * @return - */ - private void addToQueryObject(DBObject dbobj, String field, DBObject obj, boolean sendEmpty) { - if (obj == null) - return; - - if (!sendEmpty && obj.keySet().size() == 0) - return; - - addToQueryObject(dbobj, field, obj); - } - - /** - * Adds an Object to the operation - * @param dbobj DBObject to add field to - * @param field name of the field - * @param obj Object to be added. Ignore if null - * @return - */ - private void addToQueryObject(DBObject dbobj, String field, Object obj) { - - if (obj == null) - return; - - dbobj.put(field, obj); - } - - - -} diff --git a/src/main/com/mongodb/QueryOperators.java b/src/main/com/mongodb/QueryOperators.java deleted file mode 100644 index c2199940413..00000000000 --- a/src/main/com/mongodb/QueryOperators.java +++ /dev/null @@ -1,80 +0,0 @@ -// QueryOperators.java - -/** - * Copyright (C) 2010 10gen Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.mongodb; - -/** - * MongoDB keywords for various query operations - * - * @author Julson Lim - */ -public class QueryOperators { - public static final String OR = "$or"; - public static final String AND = "$and"; - - public static final String GT = "$gt"; - public static final String GTE = "$gte"; - public static final String LT = "$lt"; - public static final String LTE = "$lte"; - - public static final String NE = "$ne"; - public static final String IN = "$in"; - public static final String NIN = "$nin"; - public static final String MOD = "$mod"; - public static final String ALL = "$all"; - public static final String SIZE = "$size"; - public static final String EXISTS = "$exists"; - public static final String ELEM_MATCH = "$elemMatch"; - - // (to be implemented in QueryBuilder) - public static final String WHERE = "$where"; - public static final String NOR = "$nor"; - public static final String TYPE = "$type"; - public static final String NOT = "$not"; - - // geo operators - public static final String WITHIN = "$within"; - public static final String NEAR = "$near"; - public static final String NEAR_SPHERE = "$nearSphere"; - public static final String BOX = "$box"; - public static final String CENTER = "$center"; - public static final String POLYGON = "$polygon"; - public static final String CENTER_SPHERE = "$centerSphere"; - // (to be implemented in QueryBuilder) - public static final String MAX_DISTANCE = "$maxDistance"; - public static final String UNIQUE_DOCS = "$uniqueDocs"; - - - // meta query operators (to be implemented in QueryBuilder) - public static final String RETURN_KEY = "$returnKey"; - public static final String MAX_SCAN = "$maxScan"; - public static final String ORDER_BY = "$orderby"; - public static final String EXPLAIN = "$explain"; - public static final String SNAPSHOT = "$snapshot"; - public static final String MIN = "$min"; - public static final String MAX = "$max"; - public static final String SHOW_DISK_LOC = "$showDiskLoc"; - public static final String HINT = "$hint"; - public static final String COMMENT = "$comment"; - - /** - * @deprecated Utility classes should not have a public or default constructor. - */ - @Deprecated - public QueryOperators() { - } -} diff --git a/src/main/com/mongodb/RawDBObject.java b/src/main/com/mongodb/RawDBObject.java deleted file mode 100644 index 8da392073d1..00000000000 --- a/src/main/com/mongodb/RawDBObject.java +++ /dev/null @@ -1,368 +0,0 @@ -// RawDBObject.java - -/** - * Copyright (C) 2008 10gen Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.mongodb; - -import static com.mongodb.util.MyAsserts.assertEquals; -import static org.bson.BSON.ARRAY; -import static org.bson.BSON.BINARY; -import static org.bson.BSON.BOOLEAN; -import static org.bson.BSON.CODE; -import static org.bson.BSON.CODE_W_SCOPE; -import static org.bson.BSON.DATE; -import static org.bson.BSON.EOO; -import static org.bson.BSON.MAXKEY; -import static org.bson.BSON.MINKEY; -import static org.bson.BSON.NULL; -import static org.bson.BSON.NUMBER; -import static org.bson.BSON.NUMBER_INT; -import static org.bson.BSON.NUMBER_LONG; -import static org.bson.BSON.OBJECT; -import static org.bson.BSON.OID; -import static org.bson.BSON.REF; -import static org.bson.BSON.REGEX; -import static org.bson.BSON.STRING; -import static org.bson.BSON.SYMBOL; -import static org.bson.BSON.TIMESTAMP; -import static org.bson.BSON.UNDEFINED; - -import java.nio.ByteBuffer; -import java.util.Date; -import java.util.HashMap; -import java.util.HashSet; -import java.util.Iterator; -import java.util.Map; -import java.util.Set; - -import org.bson.BSONObject; -import org.bson.types.ObjectId; - -/** - * This object wraps the binary object format ("BSON") used for the transport of serialized objects to / from the Mongo database. - * - * @deprecated This class is NOT a part of public API and will be dropped in 3.x versions. - */ -@Deprecated -public class RawDBObject implements DBObject { - - RawDBObject( ByteBuffer buf ){ - this( buf , 0 ); - assertEquals( _end , _buf.limit() ); - } - - RawDBObject( ByteBuffer buf , int offset ){ - _buf = buf; - _offset = offset; - _end = _buf.getInt( _offset ); - } - - public Object get( String key ){ - Element e = findElement( key ); - if ( e == null ) - return null; - return e.getObject(); - } - - @SuppressWarnings("unchecked") - public Map toMap() { - Map m = new HashMap(); - Iterator i = this.keySet().iterator(); - while (i.hasNext()) { - Object s = i.next(); - m.put(s, this.get(String.valueOf(s))); - } - return m; - } - - public Object put( String key , Object v ){ - throw new RuntimeException( "read only" ); - } - - public void putAll( BSONObject o ){ - throw new RuntimeException( "read only" ); - } - - public void putAll( Map m ){ - throw new RuntimeException( "read only" ); - } - - public Object removeField( String key ){ - throw new RuntimeException( "read only" ); - } - - /** - * @deprecated - */ - @Deprecated - public boolean containsKey( String key ){ - return containsField(key); - } - - public boolean containsField( String field ){ - return findElement( field ) != null; - } - - public Set keySet(){ - Set keys = new HashSet(); - - ElementIter i = new ElementIter(); - while ( i.hasNext() ){ - Element e = i.next(); - if ( e.eoo() ) - break; - keys.add( e.fieldName() ); - } - - return keys; - } - - String _readCStr( final int start ){ - return _readCStr( start , null ); - } - - String _readCStr( final int start , final int[] end ){ - synchronized ( _cStrBuf ){ - int pos = 0; - while ( _buf.get( pos + start ) != 0 ){ - _cStrBuf[pos] = _buf.get( pos + start ); - pos++; - if ( pos >= _cStrBuf.length ) - throw new IllegalArgumentException( "c string too big for RawDBObject. so far[" + new String( _cStrBuf ) + "]" ); - - if ( pos + start >= _buf.limit() ){ - StringBuilder sb = new StringBuilder(); - for ( int x=0; x<10; x++ ){ - int y = start + x; - if ( y >= _buf.limit() ) - break; - sb.append( (char)_buf.get( y ) ); - } - throw new IllegalArgumentException( "can't find end of cstring. start:" + start + " pos: " + pos + " [" + sb + "]" ); - } - } - if ( end != null && end.length > 0 ) - end[0] = start + pos; - return new String( _cStrBuf , 0 , pos ); - - } - } - - String _readJavaString( final int start ){ - int size = _buf.getInt( start ) - 1; - - byte[] b = new byte[size]; - - int old = _buf.position(); - _buf.position( start + 4 ); - _buf.get( b , 0 , b.length ); - _buf.position( old ); - - try { - return new String( b , "UTF-8" ); - } - catch ( java.io.UnsupportedEncodingException uee ){ - return new String( b ); - } - } - - /** - * includes 0 at end - */ - int _cStrLength( final int start ){ - int end = start; - while ( _buf.get( end ) != 0 ) - end++; - return 1 + ( end - start ); - } - - Element findElement( String name ){ - ElementIter i = new ElementIter(); - while ( i.hasNext() ){ - Element e = i.next(); - if ( e.fieldName().equals( name ) ) - return e; - } - return null; - } - - public boolean isPartialObject(){ - return false; - } - - - public void markAsPartialObject(){ - throw new RuntimeException( "RawDBObject can't be a partial object" ); - } - - @Override - public String toString(){ - return "Object"; - } - - class Element { - Element( final int start ){ - _start = start; - _type = _buf.get( _start ); - int end[] = new int[1]; - _name = eoo() ? "" : _readCStr( _start + 1 , end ); - - int size = 1 + ( end[0] - _start); // 1 for the end of the string - _dataStart = _start + size; - - switch ( _type ){ - case MAXKEY: - case MINKEY: - case EOO: - case UNDEFINED: - case NULL: - break; - case BOOLEAN: - size += 1; - break; - case DATE: - case NUMBER: - case NUMBER_LONG: - size += 8; - break; - case NUMBER_INT: - size += 4; - break; - case OID: - size += 12; - break; - case REF: - size += 12; - size += 4 + _buf.getInt( _dataStart ); - break; - case SYMBOL: - case CODE: - case STRING: - size += 4 + _buf.getInt( _dataStart ); - break; - case CODE_W_SCOPE: - case ARRAY: - case OBJECT: - size += _buf.getInt( _dataStart ); - break; - case BINARY: - size += 4 + _buf.getInt( _dataStart ) + 1; - break; - case REGEX: - int first = _cStrLength( _dataStart ); - int second = _cStrLength( _dataStart + first ); - size += first + second; - break; - case TIMESTAMP: - size += 8; - break; - default: - throw new RuntimeException( "RawDBObject can't size type " + _type ); - } - _size = size; - } - - String fieldName(){ - return _name; - } - - boolean eoo(){ - return _type == EOO || _type == MAXKEY; - } - - int size(){ - return _size; - } - - Object getObject(){ - - if ( _cached != null ) - return _cached; - - switch ( _type ){ - case NUMBER: - return _buf.getDouble( _dataStart ); - case NUMBER_INT: - return _buf.getInt( _dataStart ); - case OID: - return new ObjectId( _buf.getInt( _dataStart ) , _buf.getInt( _dataStart + 4 ) , _buf.getInt( _dataStart + 8 ) ); - case CODE: - case CODE_W_SCOPE: - throw new RuntimeException( "can't handle code" ); - case SYMBOL: - case STRING: - return _readJavaString( _dataStart ); - case DATE: - return new Date( _buf.getLong( _dataStart ) ); - case REGEX: - //int[] endPos = new int[1]; - //String first = _readCStr( _dataStart , endPos ); - //return new JSRegex( first , _readCStr( 1 + endPos[0] ) ); - throw new RuntimeException( "can't handle regex" ); - case BINARY: - throw new RuntimeException( "can't inspect binary in db" ); - case BOOLEAN: - return _buf.get( _dataStart ) > 0; - case ARRAY: - case OBJECT: - throw new RuntimeException( "can't handle emebdded objects" ); - case NULL: - case EOO: - case MAXKEY: - case MINKEY: - case UNDEFINED: - return null; - } - throw new RuntimeException( "can't decode type " + _type ); - } - - final int _start; - final byte _type; - final String _name; - final int _dataStart; - final int _size; - - Object _cached; - } - - class ElementIter { - - ElementIter(){ - _pos = _offset + 4; - } - - boolean hasNext(){ - return ! _done && _pos < _buf.limit(); - } - - Element next(){ - Element e = new Element( _pos ); - _done = e.eoo(); - - _pos += e.size(); - return e; - } - - int _pos; - boolean _done = false; - } - - final ByteBuffer _buf; - final int _offset; - final int _end; - private final static byte[] _cStrBuf = new byte[1024]; -} diff --git a/src/main/com/mongodb/ReadPreference.java b/src/main/com/mongodb/ReadPreference.java deleted file mode 100644 index 3fcfdbe3f63..00000000000 --- a/src/main/com/mongodb/ReadPreference.java +++ /dev/null @@ -1,343 +0,0 @@ -/** - * Copyright (c) 2008 - 2011 10gen, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on - * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the - * specific language governing permissions and limitations under the License. - */ - -package com.mongodb; - -import com.mongodb.ReplicaSetStatus.ReplicaSetNode; - -import java.util.ArrayList; -import java.util.List; -import java.util.Map; - - -/** - * An abstract class that represents preferred replica set members to which a query or command can be sent. - * - * @mongodb.driver.manual applications/replication/#replica-set-read-preference Read Preference - */ -public abstract class ReadPreference { - - ReadPreference() { - } - - /** - * @return true if this preference allows reads or commands from secondary nodes - */ - public abstract boolean isSlaveOk(); - - /** - * @return DBObject representation of this preference - */ - public abstract DBObject toDBObject(); - - /** - * The name of this read preference. - * - * @return the name - */ - public abstract String getName(); - - abstract ReplicaSetNode getNode(ReplicaSetStatus.ReplicaSet set); - - /** - * Preference to read from primary only. - * Cannot be combined with tags. - * - * @author breinero - */ - private static class PrimaryReadPreference extends ReadPreference { - private PrimaryReadPreference() { - } - - @Override - public boolean isSlaveOk() { - return false; - } - - @Override - public String toString() { - return getName(); - } - - @Override - public boolean equals(final Object o) { - return o != null && getClass() == o.getClass(); - } - - @Override - public int hashCode() { - return getName().hashCode(); - } - - @Override - ReplicaSetNode getNode(ReplicaSetStatus.ReplicaSet set) { - return set.getMaster(); - } - - @Override - public DBObject toDBObject() { - return new BasicDBObject("mode", getName()); - } - - @Override - public String getName() { - return "primary"; - } - } - - /** - * Read from a secondary if available and matches tags. - * - * @deprecated As of release 2.9, replaced by - * ReadPreference.secondaryPreferred(DBObject firstTagSet, DBObject... remainingTagSets) - */ - @Deprecated - public static class TaggedReadPreference extends ReadPreference { - - public TaggedReadPreference(Map tags) { - if (tags == null || tags.size() == 0) { - throw new IllegalArgumentException("tags can not be null or empty"); - } - _tags = new BasicDBObject(tags); - List maps = splitMapIntoMultipleMaps(_tags); - _pref = new TaggableReadPreference.SecondaryReadPreference(maps.get(0), getRemainingMaps(maps)); - - } - - public TaggedReadPreference(DBObject tags) { - if (tags == null || tags.keySet().size() == 0) { - throw new IllegalArgumentException("tags can not be null or empty"); - } - _tags = tags; - List maps = splitMapIntoMultipleMaps(_tags); - _pref = new TaggableReadPreference.SecondaryReadPreference(maps.get(0), getRemainingMaps(maps)); - } - - public DBObject getTags() { - DBObject tags = new BasicDBObject(); - for (String key : _tags.keySet()) - tags.put(key, _tags.get(key)); - - return tags; - } - - @Override - public boolean isSlaveOk() { - return _pref.isSlaveOk(); - } - - @Override - ReplicaSetNode getNode(ReplicaSetStatus.ReplicaSet set) { - return _pref.getNode(set); - } - - @Override - public DBObject toDBObject() { - return _pref.toDBObject(); - } - - @Override - public String getName() { - return _pref.getName(); - } - - private static List splitMapIntoMultipleMaps(DBObject tags) { - List tagList = new ArrayList(tags.keySet().size()); - - for (String key : tags.keySet()) { - tagList.add(new BasicDBObject(key, tags.get(key).toString())); - } - return tagList; - } - - private DBObject[] getRemainingMaps(final List maps) { - if (maps.size() <= 1) { - return new DBObject[0]; - } - return maps.subList(1, maps.size() - 1).toArray(new DBObject[maps.size() - 1]); - } - - private final DBObject _tags; - private final ReadPreference _pref; - } - - /** - * @return ReadPreference which reads from primary only - */ - public static ReadPreference primary() { - return _PRIMARY; - } - - /** - * @return ReadPreference which reads primary if available. - */ - public static ReadPreference primaryPreferred() { - return _PRIMARY_PREFERRED; - } - - /** - * @return ReadPreference which reads primary if available, otherwise a secondary respective of tags. - */ - public static TaggableReadPreference primaryPreferred(DBObject firstTagSet, DBObject... remainingTagSets) { - return new TaggableReadPreference.PrimaryPreferredReadPreference(firstTagSet, remainingTagSets); - } - - /** - * @return ReadPreference which reads secondary. - */ - public static ReadPreference secondary() { - return _SECONDARY; - } - - /** - * @return ReadPreference which reads secondary respective of tags. - */ - public static TaggableReadPreference secondary(DBObject firstTagSet, DBObject... remainingTagSets) { - return new TaggableReadPreference.SecondaryReadPreference(firstTagSet, remainingTagSets); - } - - /** - * @return ReadPreference which reads secondary if available, otherwise from primary. - */ - public static ReadPreference secondaryPreferred() { - return _SECONDARY_PREFERRED; - } - - /** - * @return ReadPreference which reads secondary if available respective of tags, otherwise from primary irrespective of tags. - */ - public static TaggableReadPreference secondaryPreferred(DBObject firstTagSet, DBObject... remainingTagSets) { - return new TaggableReadPreference.SecondaryPreferredReadPreference(firstTagSet, remainingTagSets); - } - - /** - * @return ReadPreference which reads nearest node. - */ - public static ReadPreference nearest() { - return _NEAREST; - } - - public static ReadPreference valueOf(String name) { - if (name == null) { - throw new IllegalArgumentException(); - } - - name = name.toLowerCase(); - - if (name.equals(_PRIMARY.getName().toLowerCase())) { - return _PRIMARY; - } - if (name.equals(_SECONDARY.getName().toLowerCase())) { - return _SECONDARY; - } - if (name.equals(_SECONDARY_PREFERRED.getName().toLowerCase())) { - return _SECONDARY_PREFERRED; - } - if (name.equals(_PRIMARY_PREFERRED.getName().toLowerCase())) { - return _PRIMARY_PREFERRED; - } - if (name.equals(_NEAREST.getName().toLowerCase())) { - return _NEAREST; - } - - throw new IllegalArgumentException("No match for read preference of " + name); - } - - public static TaggableReadPreference valueOf(String name, DBObject firstTagSet, final DBObject... remainingTagSets) { - if (name == null) { - throw new IllegalArgumentException(); - } - - name = name.toLowerCase(); - - if (name.equals(_SECONDARY.getName().toLowerCase())) { - return new TaggableReadPreference.SecondaryReadPreference(firstTagSet, remainingTagSets); - } - if (name.equals(_SECONDARY_PREFERRED.getName().toLowerCase())) { - return new TaggableReadPreference.SecondaryPreferredReadPreference(firstTagSet, remainingTagSets); - } - if (name.equals(_PRIMARY_PREFERRED.getName().toLowerCase())) { - return new TaggableReadPreference.PrimaryPreferredReadPreference(firstTagSet, remainingTagSets); - } - if (name.equals(_NEAREST.getName().toLowerCase())) { - return new TaggableReadPreference.NearestReadPreference(firstTagSet, remainingTagSets); - } - - throw new IllegalArgumentException("No match for read preference of " + name); - } - - /** - * @return ReadPreference which reads nearest node respective of tags. - */ - public static TaggableReadPreference nearest(DBObject firstTagSet, DBObject... remainingTagSets) { - return new TaggableReadPreference.NearestReadPreference(firstTagSet, remainingTagSets); - } - - /** - * A primary read preference. Equivalent to calling {@code ReadPreference.primary()}. - * - * @see com.mongodb.ReadPreference#primary() - * @deprecated As of release 2.9.0, replaced by {@code ReadPreference.primary()} - */ - @Deprecated - public static final ReadPreference PRIMARY; - - /** - * A secondary-preferred read preference. Equivalent to calling - * {@code ReadPreference.secondaryPreferred}. This reference should really have been called - * {@code ReadPreference.SECONDARY_PREFERRED}, but the naming of it preceded the idea of distinguishing - * between secondary and secondary-preferred, so for backwards compatibility, leaving the name as is with - * the behavior as it was when it was created. - * - * @see com.mongodb.ReadPreference#secondary() - * @see com.mongodb.ReadPreference#secondaryPreferred() - * @deprecated As of release 2.9.0, replaced by {@code ReadPreference.secondaryPreferred()} - */ - @Deprecated - public static final ReadPreference SECONDARY; - - /** - * @deprecated As of release 2.9.0, replaced by - * {@code ReadPreference.secondaryPreferred(DBObject firstTagSet, DBObject... remainingTagSets)} - */ - @Deprecated - public static ReadPreference withTags(Map tags) { - return new TaggedReadPreference( tags ); - } - - /** - * @deprecated As of release 2.9.0, replaced by - * {@code ReadPreference.secondaryPreferred(DBObject firstTagSet, DBObject... remainingTagSets)} - */ - @Deprecated - public static ReadPreference withTags( final DBObject tags ) { - return new TaggedReadPreference( tags ); - } - - private static final ReadPreference _PRIMARY; - private static final ReadPreference _SECONDARY; - private static final ReadPreference _SECONDARY_PREFERRED; - private static final ReadPreference _PRIMARY_PREFERRED; - private static final ReadPreference _NEAREST; - - static { - _PRIMARY = new PrimaryReadPreference(); - _SECONDARY = new TaggableReadPreference.SecondaryReadPreference(); - _SECONDARY_PREFERRED = new TaggableReadPreference.SecondaryPreferredReadPreference(); - _PRIMARY_PREFERRED = new TaggableReadPreference.PrimaryPreferredReadPreference(); - _NEAREST = new TaggableReadPreference.NearestReadPreference(); - - PRIMARY = _PRIMARY; - SECONDARY = _SECONDARY_PREFERRED; // this is not a bug. See SECONDARY Javadoc. - } -} diff --git a/src/main/com/mongodb/ReflectionDBObject.java b/src/main/com/mongodb/ReflectionDBObject.java deleted file mode 100644 index 3e0c789ecc4..00000000000 --- a/src/main/com/mongodb/ReflectionDBObject.java +++ /dev/null @@ -1,284 +0,0 @@ -// ReflectionDBObject.java - -/** - * Copyright (C) 2008 10gen Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.mongodb; - -import java.lang.reflect.Method; -import java.util.Collections; -import java.util.HashMap; -import java.util.HashSet; -import java.util.Iterator; -import java.util.Map; -import java.util.Set; -import java.util.TreeMap; - -import org.bson.BSONObject; - -/** - * This class enables to map simple Class fields to a BSON object fields - */ -public abstract class ReflectionDBObject implements DBObject { - - public Object get( String key ){ - return getWrapper().get( this , key ); - } - - public Set keySet(){ - return getWrapper().keySet(); - } - - /** - * @deprecated - */ - @Deprecated - public boolean containsKey( String s ){ - return containsField( s ); - } - - public boolean containsField( String s ){ - return getWrapper().containsKey( s ); - } - - public Object put( String key , Object v ){ - return getWrapper().set( this , key , v ); - } - - @SuppressWarnings("unchecked") - public void putAll( Map m ){ - for ( Map.Entry entry : (Set)m.entrySet() ){ - put( entry.getKey().toString() , entry.getValue() ); - } - } - - public void putAll( BSONObject o ){ - for ( String k : o.keySet() ){ - put( k , o.get( k ) ); - } - } - - /** - * Gets the _id - * @return - */ - public Object get_id(){ - return _id; - } - - /** - * Sets the _id - * @param id - */ - public void set_id( Object id ){ - _id = id; - } - - public boolean isPartialObject(){ - return false; - } - - @SuppressWarnings("unchecked") - public Map toMap() { - Map m = new HashMap(); - Iterator i = this.keySet().iterator(); - while (i.hasNext()) { - Object s = i.next(); - m.put(s, this.get(s+"")); - } - return m; - } - - /** - * ReflectionDBObjects can't be partial - */ - public void markAsPartialObject(){ - throw new RuntimeException( "ReflectionDBObjects can't be partial" ); - } - - /** - * can't remove from a ReflectionDBObject - * @param key - * @return - */ - public Object removeField( String key ){ - throw new RuntimeException( "can't remove from a ReflectionDBObject" ); - } - - JavaWrapper getWrapper(){ - if ( _wrapper != null ) - return _wrapper; - - _wrapper = getWrapper( this.getClass() ); - return _wrapper; - } - - JavaWrapper _wrapper; - Object _id; - - /** - * Represents a wrapper around the DBObject to interface with the Class fields - */ - public static class JavaWrapper { - JavaWrapper( Class c ){ - _class = c; - _name = c.getName(); - - _fields = new TreeMap(); - for ( Method m : c.getMethods() ){ - if ( ! ( m.getName().startsWith( "get" ) || m.getName().startsWith( "set" ) ) ) - continue; - - String name = m.getName().substring(3); - if ( name.length() == 0 || IGNORE_FIELDS.contains( name ) ) - continue; - - Class type = m.getName().startsWith( "get" ) ? m.getReturnType() : m.getParameterTypes()[0]; - - FieldInfo fi = _fields.get( name ); - if ( fi == null ){ - fi = new FieldInfo( name , type ); - _fields.put( name , fi ); - } - - if ( m.getName().startsWith( "get" ) ) - fi._getter = m; - else - fi._setter = m; - } - - Set names = new HashSet( _fields.keySet() ); - for ( String name : names ) - if ( ! _fields.get( name ).ok() ) - _fields.remove( name ); - - _keys = Collections.unmodifiableSet( _fields.keySet() ); - } - - public Set keySet(){ - return _keys; - } - - /** - * @deprecated - */ - @Deprecated - public boolean containsKey( String key ){ - return _keys.contains( key ); - } - - public Object get( ReflectionDBObject t , String name ){ - FieldInfo i = _fields.get( name ); - if ( i == null ) - return null; - try { - return i._getter.invoke( t ); - } - catch ( Exception e ){ - throw new RuntimeException( "could not invoke getter for [" + name + "] on [" + _name + "]" , e ); - } - } - - public Object set( ReflectionDBObject t , String name , Object val ){ - FieldInfo i = _fields.get( name ); - if ( i == null ) - throw new IllegalArgumentException( "no field [" + name + "] on [" + _name + "]" ); - try { - return i._setter.invoke( t , val ); - } - catch ( Exception e ){ - throw new RuntimeException( "could not invoke setter for [" + name + "] on [" + _name + "]" , e ); - } - } - - Class getInternalClass( String path ){ - String cur = path; - String next = null; - final int idx = path.indexOf( "." ); - if ( idx >= 0 ){ - cur = path.substring( 0 , idx ); - next = path.substring( idx + 1 ); - } - - FieldInfo fi = _fields.get( cur ); - if ( fi == null ) - return null; - - if ( next == null ) - return fi._class; - - JavaWrapper w = getWrapperIfReflectionObject( fi._class ); - if ( w == null ) - return null; - return w.getInternalClass( next ); - } - - final Class _class; - final String _name; - final Map _fields; - final Set _keys; - } - - static class FieldInfo { - FieldInfo( String name , Class c ){ - _name = name; - _class = c; - } - - boolean ok(){ - return - _getter != null && - _setter != null; - } - - final String _name; - final Class _class; - Method _getter; - Method _setter; - } - - /** - * Returns the wrapper if this object can be assigned from this class - * @param c - * @return - */ - public static JavaWrapper getWrapperIfReflectionObject( Class c ){ - if ( ReflectionDBObject.class.isAssignableFrom( c ) ) - return getWrapper( c ); - return null; - } - - /** - * Returns an existing Wrapper instance associated with a class, or creates a new one. - * @param c - * @return - */ - public static JavaWrapper getWrapper( Class c ){ - JavaWrapper w = _wrappers.get( c ); - if ( w == null ){ - w = new JavaWrapper( c ); - _wrappers.put( c , w ); - } - return w; - } - - private static final Map _wrappers = Collections.synchronizedMap( new HashMap() ); - private static final Set IGNORE_FIELDS = new HashSet(); - static { - IGNORE_FIELDS.add( "Int" ); - } - -} diff --git a/src/main/com/mongodb/ReplicaSetStatus.java b/src/main/com/mongodb/ReplicaSetStatus.java deleted file mode 100644 index 5efc4f10a62..00000000000 --- a/src/main/com/mongodb/ReplicaSetStatus.java +++ /dev/null @@ -1,818 +0,0 @@ -// ReplicaSetStatus.java - -/** - * Copyright (C) 2008 10gen Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.mongodb; - -import org.bson.util.annotations.Immutable; -import org.bson.util.annotations.ThreadSafe; - -import java.net.UnknownHostException; -import java.util.List; -import java.util.ArrayList; -import java.util.Set; -import java.util.HashSet; -import java.util.Iterator; -import java.util.LinkedHashMap; -import java.util.Collections; -import java.util.Map; -import java.util.Random; -import java.util.concurrent.atomic.AtomicReference; -import java.util.logging.Level; -import java.util.logging.Logger; - -// TODO: -// pull config to get -// priority -// slave delay - -/** - * Keeps replica set status. Maintains a background thread to ping all members of the set to keep the status current. - */ -@ThreadSafe -public class ReplicaSetStatus extends ConnectionStatus { - - static final Logger _rootLogger = Logger.getLogger( "com.mongodb.ReplicaSetStatus" ); - - ReplicaSetStatus( Mongo mongo, List initial ){ - super(initial, mongo); - _updater = new Updater(initial); - } - - public String getName() { - return _replicaSetHolder.get().getSetName(); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder(); - sb.append("{replSetName: ").append(_replicaSetHolder.get().getSetName()); - sb.append(", members: ").append(_replicaSetHolder); - sb.append(", updaterIntervalMS: ").append(updaterIntervalMS); - sb.append(", updaterIntervalNoMasterMS: ").append(updaterIntervalNoMasterMS); - sb.append(", slaveAcceptableLatencyMS: ").append(slaveAcceptableLatencyMS); - sb.append(", latencySmoothFactor: ").append(latencySmoothFactor); - sb.append("}"); - - return sb.toString(); - } - - /** - * @return master or null if don't have one - * @throws MongoException - */ - public ServerAddress getMaster(){ - ReplicaSetNode n = getMasterNode(); - if ( n == null ) - return null; - return n.getServerAddress(); - } - - ReplicaSetNode getMasterNode(){ - checkClosed(); - return _replicaSetHolder.get().getMaster(); - } - - /** - * @param srv the server to compare - * @return indication if the ServerAddress is the current Master/Primary - * @throws MongoException - */ - public boolean isMaster(ServerAddress srv) { - if (srv == null) - return false; - - return srv.equals(getMaster()); - } - - /** - * @return a good secondary or null if can't find one - */ - ServerAddress getASecondary() { - ReplicaSetNode node = _replicaSetHolder.get().getASecondary(); - if (node == null) { - return null; - } - return node._addr; - } - - @Override - boolean hasServerUp() { - for (ReplicaSetNode node : _replicaSetHolder.get().getAll()) { - if (node.isOk()) { - return true; - } - } - return false; - } - - // Simple abstraction over a volatile ReplicaSet reference that starts as null. The get method blocks until members - // is not null. The set method notifies all, thus waking up all getters. - @ThreadSafe - class ReplicaSetHolder { - private volatile ReplicaSet members; - - // blocks until replica set is set, or a timeout occurs - synchronized ReplicaSet get() { - while (members == null) { - try { - wait(_mongo.getMongoOptions().getConnectTimeout()); - } catch (InterruptedException e) { - throw new MongoInterruptedException("Interrupted while waiting for next update to replica set status", e); - } - } - return members; - } - - // set the replica set to a non-null value and notifies all threads waiting. - synchronized void set(ReplicaSet members) { - if (members == null) { - throw new IllegalArgumentException("members can not be null"); - } - - this.members = members; - notifyAll(); - } - - // blocks until the replica set is set again - synchronized void waitForNextUpdate() { - try { - wait(_mongo.getMongoOptions().getConnectTimeout()); - } catch (InterruptedException e) { - throw new MongoInterruptedException("Interrupted while waiting for next update to replica set status", e); - } - } - - public synchronized void close() { - this.members = null; - notifyAll(); - } - - public String toString() { - ReplicaSet cur = this.members; - if (cur != null) { - return cur.toString(); - } - return "none"; - } - } - - // Immutable snapshot state of a replica set. Since the nodes don't change state, this class pre-computes the list - // of good secondaries so that choosing a random good secondary is dead simple - @Immutable - static class ReplicaSet { - final List all; - final Random random; - final List acceptableSecondaries; - final List acceptableMembers; - final ReplicaSetNode master; - final String setName; - final ReplicaSetErrorStatus errorStatus; - - private int acceptableLatencyMS; - - public ReplicaSet(List nodeList, Random random, int acceptableLatencyMS) { - - this.random = random; - this.all = Collections.unmodifiableList(new ArrayList(nodeList)); - this.acceptableLatencyMS = acceptableLatencyMS; - - errorStatus = validate(); - setName = determineSetName(); - - this.acceptableSecondaries = - Collections.unmodifiableList(calculateGoodMembers( - all, calculateBestPingTime(all, false), acceptableLatencyMS, false)); - this.acceptableMembers = - Collections.unmodifiableList(calculateGoodMembers(all, calculateBestPingTime(all, true), acceptableLatencyMS, true)); - master = findMaster(); - } - - public List getAll() { - checkStatus(); - - return all; - } - - public boolean hasMaster() { - return getMaster() != null; - } - - public ReplicaSetNode getMaster() { - checkStatus(); - - return master; - } - - public int getMaxBsonObjectSize() { - if (hasMaster()) { - return getMaster().getMaxBsonObjectSize(); - } else { - return Bytes.MAX_OBJECT_SIZE; - } - } - - public ReplicaSetNode getASecondary() { - checkStatus(); - - if (acceptableSecondaries.isEmpty()) { - return null; - } - return acceptableSecondaries.get(random.nextInt(acceptableSecondaries.size())); - } - - public ReplicaSetNode getASecondary(List tags) { - checkStatus(); - - // optimization - if (tags.isEmpty()) { - return getASecondary(); - } - - List acceptableTaggedSecondaries = getGoodSecondariesByTags(tags); - - if (acceptableTaggedSecondaries.isEmpty()) { - return null; - } - return acceptableTaggedSecondaries.get(random.nextInt(acceptableTaggedSecondaries.size())); - } - - public ReplicaSetNode getAMember() { - checkStatus(); - - if (acceptableMembers.isEmpty()) { - return null; - } - return acceptableMembers.get(random.nextInt(acceptableMembers.size())); - } - - public ReplicaSetNode getAMember(List tags) { - checkStatus(); - - if (tags.isEmpty()) - return getAMember(); - - List acceptableTaggedMembers = getGoodMembersByTags(tags); - - if (acceptableTaggedMembers.isEmpty()) - return null; - - return acceptableTaggedMembers.get(random.nextInt(acceptableTaggedMembers.size())); - } - - List getGoodSecondaries(List all) { - List goodSecondaries = new ArrayList(all.size()); - for (ReplicaSetNode cur : all) { - if (!cur.isOk()) { - continue; - } - goodSecondaries.add(cur); - } - return goodSecondaries; - } - - public List getGoodSecondariesByTags(final List tags) { - checkStatus(); - - List taggedSecondaries = getMembersByTags(all, tags); - return calculateGoodMembers(taggedSecondaries, - calculateBestPingTime(taggedSecondaries, false), acceptableLatencyMS, false); - } - - public List getGoodMembersByTags(final List tags) { - checkStatus(); - - List taggedMembers = getMembersByTags(all, tags); - return calculateGoodMembers(taggedMembers, - calculateBestPingTime(taggedMembers, true), acceptableLatencyMS, true); - } - - public String getSetName() { - checkStatus(); - - return setName; - } - - public ReplicaSetErrorStatus getErrorStatus(){ - return errorStatus; - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder(); - sb.append("[ "); - for (ReplicaSetNode node : getAll()) - sb.append(node.toJSON()).append(","); - sb.setLength(sb.length() - 1); //remove last comma - sb.append(" ]"); - return sb.toString(); - } - - private void checkStatus(){ - if (!errorStatus.isOk()) - throw new MongoException(errorStatus.getError()); - } - - private ReplicaSetNode findMaster() { - for (ReplicaSetNode node : all) { - if (node.master()) - return node; - } - return null; - } - - private String determineSetName() { - for (ReplicaSetNode node : all) { - String nodeSetName = node.getSetName(); - - if (nodeSetName != null && !nodeSetName.equals("")) { - return nodeSetName; - } - } - - return null; - } - - private ReplicaSetErrorStatus validate() { - //make sure all nodes have the same set name - HashSet nodeNames = new HashSet(); - - for(ReplicaSetNode node : all) { - String nodeSetName = node.getSetName(); - - if(nodeSetName != null && !nodeSetName.equals("")) { - nodeNames.add(nodeSetName); - } - } - - if(nodeNames.size() <= 1) - return new ReplicaSetErrorStatus(true, null); - else { - return new ReplicaSetErrorStatus(false, "nodes with different set names detected: " + nodeNames.toString()); - } - } - - static float calculateBestPingTime(List members, boolean includeMaster) { - float bestPingTime = Float.MAX_VALUE; - for (ReplicaSetNode cur : members) { - if (cur.secondary() || (includeMaster && cur.master())) { - if (cur._pingTime < bestPingTime) { - bestPingTime = cur._pingTime; - } - } - } - return bestPingTime; - } - - static List calculateGoodMembers(List members, float bestPingTime, int acceptableLatencyMS, boolean includeMaster) { - List goodSecondaries = new ArrayList(members.size()); - for (ReplicaSetNode cur : members) { - if (cur.secondary() || (includeMaster && cur.master())) { - if (cur._pingTime - acceptableLatencyMS <= bestPingTime) { - goodSecondaries.add(cur); - } - } - } - return goodSecondaries; - } - - static List getMembersByTags(List members, List tags) { - - List membersByTag = new ArrayList(); - - for (ReplicaSetNode cur : members) { - if (tags != null && cur.getTags() != null && cur.getTags().containsAll(tags)) { - membersByTag.add(cur); - } - } - - return membersByTag; - } - - } - - // Represents the state of a node in the replica set. Instances of this class are immutable. - @Immutable - static class ReplicaSetNode extends Node { - ReplicaSetNode(ServerAddress addr, Set names, String setName, float pingTime, boolean ok, boolean isMaster, boolean isSecondary, - LinkedHashMap tags, int maxBsonObjectSize) { - super(pingTime, addr, maxBsonObjectSize, ok); - this._names = Collections.unmodifiableSet(new HashSet(names)); - this._setName = setName; - this._isMaster = isMaster; - this._isSecondary = isSecondary; - this._tags = Collections.unmodifiableSet(getTagsFromMap(tags)); - } - - private static Set getTagsFromMap(LinkedHashMap tagMap) { - Set tagSet = new HashSet(); - for (Map.Entry curEntry : tagMap.entrySet()) { - tagSet.add(new Tag(curEntry.getKey(), curEntry.getValue())); - } - return tagSet; - } - - public boolean master(){ - return _ok && _isMaster; - } - - public boolean secondary(){ - return _ok && _isSecondary; - } - - public Set getNames() { - return _names; - } - - public String getSetName() { - return _setName; - } - - public Set getTags() { - return _tags; - } - - public float getPingTime() { - return _pingTime; - } - - public String toJSON(){ - StringBuilder buf = new StringBuilder(); - buf.append( "{ address:'" ).append( _addr ).append( "', " ); - buf.append( "ok:" ).append( _ok ).append( ", " ); - buf.append( "ping:" ).append( _pingTime ).append( ", " ); - buf.append( "isMaster:" ).append( _isMaster ).append( ", " ); - buf.append( "isSecondary:" ).append( _isSecondary ).append( ", " ); - buf.append( "setName:" ).append( _setName ).append( ", " ); - buf.append( "maxBsonObjectSize:" ).append( _maxBsonObjectSize ).append( ", " ); - if(_tags != null && _tags.size() > 0){ - List tagObjects = new ArrayList(); - for( Tag tag : _tags) - tagObjects.add(tag.toDBObject()); - - buf.append(new BasicDBObject("tags", tagObjects) ); - } - - buf.append("}"); - - return buf.toString(); - } - - @Override - public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - - ReplicaSetNode node = (ReplicaSetNode) o; - - if (_isMaster != node._isMaster) return false; - if (_maxBsonObjectSize != node._maxBsonObjectSize) return false; - if (_isSecondary != node._isSecondary) return false; - if (_ok != node._ok) return false; - if (Float.compare(node._pingTime, _pingTime) != 0) return false; - if (!_addr.equals(node._addr)) return false; - if (!_names.equals(node._names)) return false; - if (!_tags.equals(node._tags)) return false; - if (!_setName.equals(node._setName)) return false; - - return true; - } - - @Override - public int hashCode() { - int result = _addr.hashCode(); - result = 31 * result + (_pingTime != +0.0f ? Float.floatToIntBits(_pingTime) : 0); - result = 31 * result + _names.hashCode(); - result = 31 * result + _tags.hashCode(); - result = 31 * result + (_ok ? 1 : 0); - result = 31 * result + (_isMaster ? 1 : 0); - result = 31 * result + (_isSecondary ? 1 : 0); - result = 31 * result + _setName.hashCode(); - result = 31 * result + _maxBsonObjectSize; - return result; - } - - private final Set _names; - private final Set _tags; - private final boolean _isMaster; - private final boolean _isSecondary; - private final String _setName; - } - - - @Immutable - static final class ReplicaSetErrorStatus{ - final boolean ok; - final String error; - - ReplicaSetErrorStatus(boolean ok, String error){ - this.ok = ok; - this.error = error; - } - - public boolean isOk(){ - return ok; - } - - public String getError(){ - return error; - } - } - - // Simple class to hold a single tag, both key and value - @Immutable - static final class Tag { - final String key; - final String value; - - Tag(String key, String value) { - this.key = key; - this.value = value; - } - - @Override - public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - - Tag tag = (Tag) o; - - if (key != null ? !key.equals(tag.key) : tag.key != null) return false; - if (value != null ? !value.equals(tag.value) : tag.value != null) return false; - - return true; - } - - @Override - public int hashCode() { - int result = key != null ? key.hashCode() : 0; - result = 31 * result + (value != null ? value.hashCode() : 0); - return result; - } - - public DBObject toDBObject(){ - return new BasicDBObject(key, value); - } - } - - // Represents the state of a node in the replica set. Instances of this class are mutable. - static class UpdatableReplicaSetNode extends UpdatableNode { - - UpdatableReplicaSetNode(ServerAddress addr, - List all, - AtomicReference logger, - Mongo mongo, - MongoOptions mongoOptions, - AtomicReference lastPrimarySignal) { - super(addr, mongo, mongoOptions); - _all = all; - _names.add(addr.toString()); - _logger = logger; - _lastPrimarySignal = lastPrimarySignal; - } - - void update(Set seenNodes) { - CommandResult res = update(); - if (res == null || !isOk()) { - return; - } - - _isMaster = res.getBoolean("ismaster", false); - _isSecondary = res.getBoolean("secondary", false); - _lastPrimarySignal.set(res.getString("primary")); - - if (res.containsField("hosts")) { - for (Object x : (List) res.get("hosts")) { - String host = x.toString(); - UpdatableReplicaSetNode node = _addIfNotHere(host); - if (node != null && seenNodes != null) - seenNodes.add(node); - } - } - - if (res.containsField("passives")) { - for (Object x : (List) res.get("passives")) { - String host = x.toString(); - UpdatableReplicaSetNode node = _addIfNotHere(host); - if (node != null && seenNodes != null) - seenNodes.add(node); - } - } - - // Tags were added in 2.0 but may not be present - if (res.containsField("tags")) { - DBObject tags = (DBObject) res.get("tags"); - for (String key : tags.keySet()) { - _tags.put(key, tags.get(key).toString()); - } - } - - //old versions of mongod don't report setName - if (res.containsField("setName")) { - _setName = res.getString("setName", ""); - - if(_logger.get() == null) - _logger.set(Logger.getLogger(_rootLogger.getName() + "." + _setName)); - } - } - - @Override - protected Logger getLogger() { - return _logger.get(); - } - - UpdatableReplicaSetNode _addIfNotHere(String host) { - UpdatableReplicaSetNode n = findNode(host, _all, _logger); - if (n == null) { - try { - n = new UpdatableReplicaSetNode(new ServerAddress(host), _all, _logger, _mongo, _mongoOptions, _lastPrimarySignal); - _all.add(n); - } catch (UnknownHostException un) { - _logger.get().log(Level.WARNING, "couldn't resolve host [" + host + "]"); - } - } - return n; - } - - private UpdatableReplicaSetNode findNode(String host, List members, AtomicReference logger) { - for (UpdatableReplicaSetNode node : members) - if (node._names.contains(host)) - return node; - - ServerAddress addr; - try { - addr = new ServerAddress(host); - } catch (UnknownHostException un) { - logger.get().log(Level.WARNING, "couldn't resolve host [" + host + "]"); - return null; - } - - for (UpdatableReplicaSetNode node : members) { - if (node._addr.equals(addr)) { - node._names.add(host); - return node; - } - } - - return null; - } - - public void close() { - _port.close(); - _port = null; - } - - private final Set _names = Collections.synchronizedSet(new HashSet()); - final LinkedHashMap _tags = new LinkedHashMap(); - - boolean _isMaster = false; - boolean _isSecondary = false; - String _setName; - - private final AtomicReference _logger; - private final AtomicReference _lastPrimarySignal; - private final List _all; - } - - // Thread that monitors the state of the replica set. This thread is responsible for setting a new ReplicaSet - // instance on ReplicaSetStatus.members every pass through the members of the set. - class Updater extends BackgroundUpdater { - - Updater(List initial){ - super("ReplicaSetStatus:Updater"); - _all = new ArrayList(initial.size()); - for ( ServerAddress addr : initial ){ - _all.add( new UpdatableReplicaSetNode( addr, _all, _logger, _mongo, _mongoOptions, _lastPrimarySignal ) ); - } - } - - @Override - public void run() { - try { - while (!Thread.interrupted()) { - int curUpdateIntervalMS = updaterIntervalNoMasterMS; - - try { - updateAll(); - - ReplicaSet replicaSet = new ReplicaSet(createNodeList(), _random, slaveAcceptableLatencyMS); - _replicaSetHolder.set(replicaSet); - - if (replicaSet.getErrorStatus().isOk() && replicaSet.hasMaster()) { - _mongo.getConnector().setMaster(replicaSet.getMaster()); - curUpdateIntervalMS = updaterIntervalMS; - } - } catch (Exception e) { - _logger.get().log(Level.WARNING, "couldn't do update pass", e); - } - - Thread.sleep(curUpdateIntervalMS); - } - } - catch (InterruptedException e) { - // Allow thread to exit - } - - _replicaSetHolder.close(); - closeAllNodes(); - } - - public synchronized void updateAll(){ - HashSet seenNodes = new HashSet(); - - for (int i = 0; i < _all.size(); i++) { - _all.get(i).update(seenNodes); - } - - if (seenNodes.size() > 0) { - // not empty, means that at least 1 server gave node list - // remove unused hosts - Iterator it = _all.iterator(); - while (it.hasNext()) { - if (!seenNodes.contains(it.next())) - it.remove(); - } - } - } - - private List createNodeList() { - List nodeList = new ArrayList(_all.size()); - for (UpdatableReplicaSetNode cur : _all) { - nodeList.add(new ReplicaSetNode(cur._addr, cur._names, cur._setName, cur._pingTimeMS, cur.isOk(), cur._isMaster, cur._isSecondary, cur._tags, cur._maxBsonObjectSize)); - } - return nodeList; - } - - private void closeAllNodes() { - for (UpdatableReplicaSetNode node : _all) { - try { - node.close(); - } catch (final Throwable t) { /* nada */ } - } - } - - private final List _all; - private final Random _random = new Random(); - } - - @Override - Node ensureMaster() { - ReplicaSetNode masterNode = getMasterNode(); - if (masterNode != null) { - return masterNode; - } - - _replicaSetHolder.waitForNextUpdate(); - - masterNode = getMasterNode(); - if (masterNode != null) { - return masterNode; - } - - return null; - } - - List getServerAddressList() { - List addrs = new ArrayList(); - for (ReplicaSetNode node : _replicaSetHolder.get().getAll()) - addrs.add(node.getServerAddress()); - return addrs; - } - - /** - * Gets the maximum size for a BSON object supported by the current master server. - * Note that this value may change over time depending on which server is master. - * @return the maximum size, or 0 if not obtained from servers yet. - * @throws MongoException - */ - public int getMaxBsonObjectSize() { - return _replicaSetHolder.get().getMaxBsonObjectSize(); - } - - final ReplicaSetHolder _replicaSetHolder = new ReplicaSetHolder(); - - // will get changed to use set name once its found - private final AtomicReference _logger = new AtomicReference(_rootLogger); - - private final AtomicReference _lastPrimarySignal = new AtomicReference(); - final static int slaveAcceptableLatencyMS; - - static { - slaveAcceptableLatencyMS = Integer.parseInt(System.getProperty("com.mongodb.slaveAcceptableLatencyMS", "15")); - } - -} diff --git a/src/main/com/mongodb/Response.java b/src/main/com/mongodb/Response.java deleted file mode 100644 index 1a2692d291f..00000000000 --- a/src/main/com/mongodb/Response.java +++ /dev/null @@ -1,200 +0,0 @@ -// Response.java - -/** - * Copyright (C) 2008 10gen Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.mongodb; - -// Bson -import org.bson.io.Bits; - -// Java -import java.io.IOException; -import java.io.InputStream; -import java.util.ArrayList; -import java.util.Iterator; -import java.util.LinkedList; -import java.util.List; - -class Response { - - Response( ServerAddress addr , DBCollection collection , InputStream in, DBDecoder decoder) - throws IOException { - - _host = addr; - - final byte [] b = new byte[36]; - Bits.readFully(in, b); - int pos = 0; - - _len = Bits.readInt(b, pos); - pos += 4; - - if (_len > MAX_LENGTH) { - throw new IllegalArgumentException( "response too long: " + _len ); - } - - _id = Bits.readInt(b, pos); - pos += 4; - - _responseTo = Bits.readInt(b, pos); - pos += 4; - - _operation = Bits.readInt(b, pos); - pos += 4; - - _flags = Bits.readInt(b, pos); - pos += 4; - - _cursor = Bits.readLong(b, pos); - pos += 8; - - _startingFrom = Bits.readInt(b, pos); - pos += 4; - - _num = Bits.readInt(b, pos); - pos += 4; - - final MyInputStream user = new MyInputStream( in , _len - b.length ); - - if ( _num < 2 ) - _objects = new LinkedList(); - else - _objects = new ArrayList( _num ); - - for ( int i=0; i < _num; i++ ){ - if ( user._toGo < 5 ) - throw new IOException( "should have more objects, but only " + user._toGo + " bytes left" ); - // TODO: By moving to generics, you can remove these casts (and requirement to impl DBOBject). - - _objects.add( decoder.decode( user, collection ) ); - } - - if ( user._toGo != 0 ) - throw new IOException( "finished reading objects but still have: " + user._toGo + " bytes to read!' " ); - - if ( _num != _objects.size() ) - throw new RuntimeException( "something is really broken" ); - } - - public int size(){ - return _num; - } - - public ServerAddress serverUsed() { - return _host; - } - - public DBObject get( int i ){ - return _objects.get( i ); - } - - public Iterator iterator(){ - return _objects.iterator(); - } - - public boolean hasGetMore( int queryOptions ){ - if ( _cursor == 0 ) - return false; - - if ( _num > 0 ) - return true; - - if ( ( queryOptions & Bytes.QUERYOPTION_TAILABLE ) == 0 ) - return false; - - // have a tailable cursor, it is always possible to call get more - return true; - } - - public long cursor(){ - return _cursor; - } - - public ServerError getError(){ - if ( _num != 1 ) - return null; - - DBObject obj = get(0); - - if ( ServerError.getMsg( obj , null ) == null ) - return null; - - return new ServerError( obj ); - } - - static class MyInputStream extends InputStream { - MyInputStream( InputStream in , int max ){ - _in = in; - _toGo = max; - } - - public int available() - throws IOException { - return _in.available(); - } - - public int read() - throws IOException { - - if ( _toGo <= 0 ) - return -1; - - int val = _in.read(); - _toGo--; - - return val; - } - - public int read(byte[] b, int off, int len) - throws IOException { - - if ( _toGo <= 0 ) - return -1; - - int n = _in.read(b, off, Math.min(_toGo, len)); - _toGo -= n; - return n; - } - - public void close(){ - throw new RuntimeException( "can't close thos" ); - } - - final InputStream _in; - private int _toGo; - } - - public String toString(){ - return "flags:" + _flags + " _cursor:" + _cursor + " _startingFrom:" + _startingFrom + " _num:" + _num ; - } - - final ServerAddress _host; - - final int _len; - final int _id; - final int _responseTo; - final int _operation; - - final int _flags; - long _cursor; - final int _startingFrom; - final int _num; - - final List _objects; - - private static final int MAX_LENGTH = ( 32 * 1024 * 1024 ); -} diff --git a/src/main/com/mongodb/ServerAddress.java b/src/main/com/mongodb/ServerAddress.java deleted file mode 100644 index 3ad39aad950..00000000000 --- a/src/main/com/mongodb/ServerAddress.java +++ /dev/null @@ -1,198 +0,0 @@ -// ServerAddress.java - -/** - * Copyright (C) 2008 10gen Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.mongodb; - -import org.bson.util.annotations.Immutable; - -import java.net.InetAddress; -import java.net.InetSocketAddress; -import java.net.UnknownHostException; - -/** - * mongo server address - */ -@Immutable -public class ServerAddress { - - /** - * Creates a ServerAddress with default host and port - * @throws UnknownHostException - */ - public ServerAddress() - throws UnknownHostException { - this( defaultHost() , defaultPort() ); - } - - /** - * Creates a ServerAddress with default port - * @param host hostname - * @throws UnknownHostException - */ - public ServerAddress( String host ) - throws UnknownHostException { - this( host , defaultPort() ); - } - - /** - * Creates a ServerAddress - * @param host hostname - * @param port mongod port - * @throws UnknownHostException - */ - public ServerAddress( String host , int port ) - throws UnknownHostException { - if ( host == null ) - host = defaultHost(); - host = host.trim(); - if ( host.length() == 0 ) - host = defaultHost(); - - int idx = host.indexOf( ":" ); - if ( idx > 0 ){ - if ( port != defaultPort() ) - throw new IllegalArgumentException( "can't specify port in construct and via host" ); - port = Integer.parseInt( host.substring( idx + 1 ) ); - host = host.substring( 0 , idx ).trim(); - } - - _host = host; - _port = port; - } - - /** - * Creates a ServerAddress with default port - * @param addr host address - */ - public ServerAddress( InetAddress addr ){ - this( new InetSocketAddress( addr , defaultPort() ) ); - } - - /** - * Creates a ServerAddress - * @param addr host address - * @param port mongod port - */ - public ServerAddress( InetAddress addr , int port ){ - this( new InetSocketAddress( addr , port ) ); - } - - /** - * Creates a ServerAddress - * @param addr inet socket address containing hostname and port - */ - public ServerAddress( InetSocketAddress addr ){ - _host = addr.getHostName(); - _port = addr.getPort(); - } - - // -------- - // equality, etc... - // -------- - - - /** - * Determines whether this address is the same as a given host. - * @param host the address to compare - * @return if they are the same - */ - public boolean sameHost( String host ){ - int idx = host.indexOf( ":" ); - int port = defaultPort(); - if ( idx > 0 ){ - port = Integer.parseInt( host.substring( idx + 1 ) ); - host = host.substring( 0 , idx ); - } - - return - _port == port && - _host.equalsIgnoreCase( host ); - } - - @Override - public boolean equals(final Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - - final ServerAddress that = (ServerAddress) o; - - if (_port != that._port) return false; - if (!_host.equals(that._host)) return false; - - return true; - } - - @Override - public int hashCode() { - int result = _host.hashCode(); - result = 31 * result + _port; - return result; - } - - /** - * Gets the hostname - * @return hostname - */ - public String getHost(){ - return _host; - } - - /** - * Gets the port number - * @return port - */ - public int getPort(){ - return _port; - } - - /** - * Gets the underlying socket address - * @return socket address - * @throws MongoException.Network if the host can not be resolved - */ - public InetSocketAddress getSocketAddress() throws UnknownHostException { - return new InetSocketAddress(InetAddress.getByName(_host), _port); - } - - @Override - public String toString(){ - return _host + ":" + _port; - } - - final String _host; - final int _port; - - // -------- - // static helpers - // -------- - - /** - * Returns the default database host: "127.0.0.1" - * @return IP address of default host. - */ - public static String defaultHost(){ - return "127.0.0.1"; - } - - /** Returns the default database port: 27017 - * @return the default port - */ - public static int defaultPort(){ - return DBPort.PORT; - } -} diff --git a/src/main/com/mongodb/ServerError.java b/src/main/com/mongodb/ServerError.java deleted file mode 100644 index 9c88e5ebd39..00000000000 --- a/src/main/com/mongodb/ServerError.java +++ /dev/null @@ -1,103 +0,0 @@ -// ServerError.java - -/** - * Copyright (C) 2008 10gen Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.mongodb; - -import org.bson.BSONObject; - -/** - * Represents a server error - */ -public class ServerError { - - ServerError( DBObject o ){ - _err = getMsg( o , null ); - if ( _err == null ) - throw new IllegalArgumentException( "need to have $err" ); - _code = getCode( o ); - } - - static String getMsg( BSONObject o , String def ){ - Object e = o.get( "$err" ); - if ( e == null ) - e = o.get( "err" ); - if ( e == null ) - e = o.get( "errmsg" ); - if ( e == null ) - return def; - return e.toString(); - } - - static int getCode( BSONObject o ){ - Object c = o.get( "code" ); - if ( c == null ) - c = o.get( "$code" ); - if ( c == null ) - c = o.get( "assertionCode" ); - - if ( c == null ) - return -5; - - return ((Number)c).intValue(); - } - - /** - * Gets the error String - * @return - */ - public String getError(){ - return _err; - } - - /** - * Gets the error code - * @return - */ - public int getCode(){ - return _code; - } - - /** - * returns true if the error is "not master", which usually happens when doing operation on slave - * @return - */ - public boolean isNotMasterError(){ - switch ( _code ){ - case 10054: - case 10056: - case 10058: - case 10107: - case 13435: - case 13436: - return true; - } - - return _err.startsWith( "not master" ); - } - - @Override - public String toString(){ - if ( _code > 0 ) - return _code + " " + _err; - return _err; - } - - final String _err; - final int _code; - -} diff --git a/src/main/com/mongodb/TaggableReadPreference.java b/src/main/com/mongodb/TaggableReadPreference.java deleted file mode 100644 index e8b71a5ece8..00000000000 --- a/src/main/com/mongodb/TaggableReadPreference.java +++ /dev/null @@ -1,215 +0,0 @@ -package com.mongodb; - -import java.util.ArrayList; -import java.util.Collections; -import java.util.List; - -/** - * Abstract base class for all preference which can be combined with tags - * - * @author breinero - */ -public abstract class TaggableReadPreference extends ReadPreference { - private final static List EMPTY = new ArrayList(); - - TaggableReadPreference() { - _tags = EMPTY; - } - - TaggableReadPreference(DBObject firstTagSet, DBObject... remainingTagSets) { - if (firstTagSet == null) { - throw new IllegalArgumentException("Must have at least one tag set"); - } - _tags = new ArrayList(); - _tags.add(firstTagSet); - Collections.addAll(_tags, remainingTagSets); - } - - @Override - public boolean isSlaveOk() { - return true; - } - - @Override - public DBObject toDBObject() { - DBObject readPrefObject = new BasicDBObject("mode", getName()); - - if (!_tags.isEmpty()) - readPrefObject.put("tags", _tags); - - return readPrefObject; - } - - - public List getTagSets() { - List tags = new ArrayList(); - for (DBObject tagSet : _tags) { - tags.add(tagSet); - } - return tags; - } - - @Override - public String toString() { - return getName() + printTags(); - } - - @Override - public boolean equals(final Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - - final TaggableReadPreference that = (TaggableReadPreference) o; - - if (!_tags.equals(that._tags)) return false; - - return true; - } - - @Override - public int hashCode() { - int result = _tags.hashCode(); - result = 31 * result + getName().hashCode(); - return result; - } - - String printTags() { - return (_tags.isEmpty() ? "" : " : " + new BasicDBObject("tags", _tags)); - } - - private static List getTagListFromDBObject(final DBObject curTagSet) { - List tagList = new ArrayList(); - for (String key : curTagSet.keySet()) { - tagList.add(new ReplicaSetStatus.Tag(key, curTagSet.get(key).toString())); - } - return tagList; - } - - final List _tags; - - /** - * Read from secondary - * - * @author breinero - */ - static class SecondaryReadPreference extends TaggableReadPreference { - SecondaryReadPreference() { - } - - SecondaryReadPreference(DBObject firstTagSet, DBObject... remainingTagSets) { - super(firstTagSet, remainingTagSets); - } - - @Override - public String getName() { - return "secondary"; - } - - @Override - ReplicaSetStatus.ReplicaSetNode getNode(ReplicaSetStatus.ReplicaSet set) { - - if (_tags.isEmpty()) - return set.getASecondary(); - - for (DBObject curTagSet : _tags) { - List tagList = getTagListFromDBObject(curTagSet); - ReplicaSetStatus.ReplicaSetNode node = set.getASecondary(tagList); - if (node != null) { - return node; - } - } - return null; - } - - } - - /** - * Read from secondary if available, otherwise from primary, irrespective of tags. - * - * @author breinero - */ - static class SecondaryPreferredReadPreference extends SecondaryReadPreference { - SecondaryPreferredReadPreference() { - } - - SecondaryPreferredReadPreference(DBObject firstTagSet, DBObject... remainingTagSets) { - super(firstTagSet, remainingTagSets); - } - - @Override - public String getName() { - return "secondaryPreferred"; - } - - @Override - ReplicaSetStatus.ReplicaSetNode getNode(ReplicaSetStatus.ReplicaSet set) { - ReplicaSetStatus.ReplicaSetNode node = super.getNode(set); - return (node != null) ? node : set.getMaster(); - } - } - - /** - * Read from nearest node respective of tags. - * - * @author breinero - */ - static class NearestReadPreference extends TaggableReadPreference { - NearestReadPreference() { - } - - NearestReadPreference(DBObject firstTagSet, DBObject... remainingTagSets) { - super(firstTagSet, remainingTagSets); - } - - - @Override - public String getName() { - return "nearest"; - } - - - @Override - ReplicaSetStatus.ReplicaSetNode getNode(ReplicaSetStatus.ReplicaSet set) { - - if (_tags.isEmpty()) - return set.getAMember(); - - for (DBObject curTagSet : _tags) { - List tagList = getTagListFromDBObject(curTagSet); - ReplicaSetStatus.ReplicaSetNode node = set.getAMember(tagList); - if (node != null) { - return node; - } - } - return null; - } - } - - /** - * Read from primary if available, otherwise a secondary. - * - * @author breinero - */ - static class PrimaryPreferredReadPreference extends SecondaryReadPreference { - PrimaryPreferredReadPreference() {} - - PrimaryPreferredReadPreference(DBObject firstTagSet, DBObject... remainingTagSets) { - super(firstTagSet, remainingTagSets); - } - - @Override - public String getName() { - return "primaryPreferred"; - } - - @Override - ReplicaSetStatus.ReplicaSetNode getNode(ReplicaSetStatus.ReplicaSet set) { - ReplicaSetStatus.ReplicaSetNode node = set.getMaster(); - return (node != null) ? node : super.getNode(set); - } - } - - - - -} diff --git a/src/main/com/mongodb/WriteConcern.java b/src/main/com/mongodb/WriteConcern.java deleted file mode 100644 index 57a7988cf5d..00000000000 --- a/src/main/com/mongodb/WriteConcern.java +++ /dev/null @@ -1,541 +0,0 @@ -// WriteConcern.java - -/** - * Copyright (C) 2008-2011 10gen Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.mongodb; - -import java.io.Serializable; -import java.lang.reflect.Field; -import java.lang.reflect.Modifier; -import java.util.HashMap; -import java.util.Map; - -/** - *

        WriteConcern control the acknowledgment of write operations with various options. - *

        - * w - *

          - *
        • -1 = Don't even report network errors
        • - *
        • 0 = Don't wait for acknowledgement from the server
        • - *
        • 1 = Wait for acknowledgement, but don't wait for secondaries to replicate
        • - *
        • 2+= Wait for one or more secondaries to also acknowledge
        • - *
        - * wtimeout how long to wait for slaves before failing - *
          - *
        • 0: indefinite
        • - *
        • greater than 0: ms to wait
        • - *
        - *

        - *

        - * Other options: - *

          - *
        • j: wait for group commit to journal
        • - *
        • fsync: force fsync to disk
        • - *
        - * @dochub databases - */ -public class WriteConcern implements Serializable { - - private static final long serialVersionUID = 1884671104750417011L; - - /** - * No exceptions are raised, even for network issues. - */ - public final static WriteConcern ERRORS_IGNORED = new WriteConcern(-1); - - /** - * Write operations that use this write concern will wait for acknowledgement from the primary server before returning. - * Exceptions are raised for network issues, and server errors. - * @since 2.10.0 - */ - public final static WriteConcern ACKNOWLEDGED = new WriteConcern(1); - /** - * Write operations that use this write concern will return as soon as the message is written to the socket. - * Exceptions are raised for network issues, but not server errors. - * @since 2.10.0 - */ - public final static WriteConcern UNACKNOWLEDGED = new WriteConcern(0); - - /** - * Exceptions are raised for network issues, and server errors; the write operation waits for the server to flush - * the data to disk. - */ - public final static WriteConcern FSYNCED = new WriteConcern(true); - - /** - * Exceptions are raised for network issues, and server errors; the write operation waits for the server to - * group commit to the journal file on disk. - */ - public final static WriteConcern JOURNALED = new WriteConcern( 1, 0, false, true ); - - /** - * Exceptions are raised for network issues, and server errors; waits for at least 2 servers for the write operation. - */ - public final static WriteConcern REPLICA_ACKNOWLEDGED= new WriteConcern(2); - - /** - * No exceptions are raised, even for network issues. - *

        - * This field has been superseded by {@code WriteConcern.ERRORS_IGNORED}, and may be deprecated in a future release. - * @see WriteConcern#ERRORS_IGNORED - */ - public final static WriteConcern NONE = new WriteConcern(-1); - - /** - * Write operations that use this write concern will return as soon as the message is written to the socket. - * Exceptions are raised for network issues, but not server errors. - *

        - * This field has been superseded by {@code WriteConcern.UNACKNOWLEDGED}, and may be deprecated in a future release. - * @see WriteConcern#UNACKNOWLEDGED - */ - public final static WriteConcern NORMAL = new WriteConcern(0); - - /** - * Write operations that use this write concern will wait for acknowledgement from the primary server before returning. - * Exceptions are raised for network issues, and server errors. - *

        - * This field has been superseded by {@code WriteConcern.ACKNOWLEDGED}, and may be deprecated in a future release. - * @see WriteConcern#ACKNOWLEDGED - */ - public final static WriteConcern SAFE = new WriteConcern(1); - - /** - * Exceptions are raised for network issues, and server errors; waits on a majority of servers for the write operation. - */ - public final static WriteConcern MAJORITY = new Majority(); - - /** - * Exceptions are raised for network issues, and server errors; the write operation waits for the server to flush - * the data to disk. - *

        - * This field has been superseded by {@code WriteConcern.FSYNCED}, and may be deprecated in a future release. - * @see WriteConcern#FSYNCED - */ - public final static WriteConcern FSYNC_SAFE = new WriteConcern(true); - - /** - * Exceptions are raised for network issues, and server errors; the write operation waits for the server to - * group commit to the journal file on disk. - *

        - * This field has been superseded by {@code WriteConcern.JOURNALED}, and may be deprecated in a future release. - * @see WriteConcern#JOURNALED - */ - public final static WriteConcern JOURNAL_SAFE = new WriteConcern( 1, 0, false, true ); - - /** - * Exceptions are raised for network issues, and server errors; waits for at least 2 servers for the write operation. - *

        - * This field has been superseded by {@code WriteConcern.REPLICA_ACKNOWLEDGED}, and may be deprecated in a future release. - * @see WriteConcern#REPLICA_ACKNOWLEDGED - */ - public final static WriteConcern REPLICAS_SAFE = new WriteConcern(2); - - // map of the constants from above for use by fromString - private static Map _namedConcerns = null; - - /** - * Default constructor keeping all options as default. Be careful using this constructor, as it's equivalent to - * {@code WriteConcern.UNACKNOWLEDGED}, so writes may be lost without any errors being reported. - * @see WriteConcern#UNACKNOWLEDGED - */ - public WriteConcern(){ - this(0); - } - - /** - * Calls {@link WriteConcern#WriteConcern(int, int, boolean)} with wtimeout=0 and fsync=false - * @param w number of writes - */ - public WriteConcern( int w ){ - this( w , 0 , false ); - } - - /** - * Tag based Write Concern with wtimeout=0, fsync=false, and j=false - * @param w Write Concern tag - */ - public WriteConcern( String w ){ - this( w , 0 , false, false ); - } - - /** - * Calls {@link WriteConcern#WriteConcern(int, int, boolean)} with fsync=false - * @param w number of writes - * @param wtimeout timeout for write operation - */ - public WriteConcern( int w , int wtimeout ){ - this( w , wtimeout , false ); - } - - /** - * Calls {@link WriteConcern#WriteConcern(int, int, boolean)} with w=1 and wtimeout=0 - * @param fsync whether or not to fsync - */ - public WriteConcern( boolean fsync ){ - this( 1 , 0 , fsync); - } - - /** - * Creates a WriteConcern object. - *

        Specifies the number of servers to wait for on the write operation, and exception raising behavior

        - *

        w represents the number of servers: - *

          - *
        • {@code w=-1} None, no checking is done
        • - *
        • {@code w=0} None, network socket errors raised
        • - *
        • {@code w=1} Checks server for errors as well as network socket errors raised
        • - *
        • {@code w>1} Checks servers (w) for errors as well as network socket errors raised
        • - *
        - *

        - * @param w number of writes - * @param wtimeout timeout for write operation - * @param fsync whether or not to fsync - */ - public WriteConcern( int w , int wtimeout , boolean fsync ){ - this(w, wtimeout, fsync, false); - } - - /** - * Creates a WriteConcern object. - *

        Specifies the number of servers to wait for on the write operation, and exception raising behavior

        - *

        w represents the number of servers: - *

          - *
        • {@code w=-1} None, no checking is done
        • - *
        • {@code w=0} None, network socket errors raised
        • - *
        • {@code w=1} Checks server for errors as well as network socket errors raised
        • - *
        • {@code w>1} Checks servers (w) for errors as well as network socket errors raised
        • - *
        - *

        - * @param w number of writes - * @param wtimeout timeout for write operation - * @param fsync whether or not to fsync - * @param j whether writes should wait for a journaling group commit - */ - public WriteConcern( int w , int wtimeout , boolean fsync , boolean j ){ - this( w, wtimeout, fsync, j, false); - } - - /** - * Creates a WriteConcern object. - *

        Specifies the number of servers to wait for on the write operation, and exception raising behavior

        - *

        w represents the number of servers: - *

          - *
        • {@code w=-1} None, no checking is done
        • - *
        • {@code w=0} None, network socket errors raised
        • - *
        • {@code w=1} Checks server for errors as well as network socket errors raised
        • - *
        • {@code w>1} Checks servers (w) for errors as well as network socket errors raised
        • - *
        - *

        - * @param w number of writes - * @param wtimeout timeout for write operation - * @param fsync whether or not to fsync - * @param j whether writes should wait for a journaling group commit - * @param continueOnInsertError if batch inserts should continue after the first error - */ - public WriteConcern( int w , int wtimeout , boolean fsync , boolean j, boolean continueOnInsertError) { - _w = w; - _wtimeout = wtimeout; - _fsync = fsync; - _j = j; - _continueOnErrorForInsert = continueOnInsertError; - } - - /** - * Creates a WriteConcern object. - *

        Specifies the number of servers to wait for on the write operation, and exception raising behavior

        - *

        w represents the number of servers: - *

          - *
        • {@code w=-1} None, no checking is done
        • - *
        • {@code w=0} None, network socket errors raised
        • - *
        • {@code w=1} Checks server for errors as well as network socket errors raised
        • - *
        • {@code w>1} Checks servers (w) for errors as well as network socket errors raised
        • - *
        - *

        - * @param w number of writes - * @param wtimeout timeout for write operation - * @param fsync whether or not to fsync - * @param j whether writes should wait for a journaling group commit - */ - public WriteConcern( String w , int wtimeout , boolean fsync, boolean j ){ - this( w, wtimeout, fsync, j, false); - } - - /** - * Creates a WriteConcern object. - *

        Specifies the number of servers to wait for on the write operation, and exception raising behavior

        - *

        w represents the number of servers: - *

          - *
        • {@code w=-1} None, no checking is done
        • - *
        • {@code w=0} None, network socket errors raised
        • - *
        • {@code w=1} Checks server for errors as well as network socket errors raised
        • - *
        • {@code w>1} Checks servers (w) for errors as well as network socket errors raised
        • - *
        - *

        - * @param w number of writes - * @param wtimeout timeout for write operation - * @param fsync whether or not to fsync - * @param j whether writes should wait for a journaling group commit - * @param continueOnInsertError if batch inserts should continue after the first error - * @return - */ - public WriteConcern( String w , int wtimeout , boolean fsync, boolean j, boolean continueOnInsertError ){ - if (w == null) { - throw new IllegalArgumentException("w can not be null"); - } - - _w = w; - _wtimeout = wtimeout; - _fsync = fsync; - _j = j; - _continueOnErrorForInsert = continueOnInsertError; - } - - /** - * Gets the getlasterror command for this write concern. - * - * @return getlasterror command, even if w <= 0 - */ - public BasicDBObject getCommand() { - BasicDBObject _command = new BasicDBObject( "getlasterror" , 1 ); - - if (_w instanceof Integer && ((Integer) _w > 1) || (_w instanceof String)){ - _command.put( "w" , _w ); - } - - if (_wtimeout > 0) { - _command.put( "wtimeout" , _wtimeout ); - } - - if ( _fsync ) - _command.put( "fsync" , true ); - - if ( _j ) - _command.put( "j", true ); - - return _command; - } - - /** - * Sets the w value (the write strategy). - * - * @param w the value of w. - * @deprecated construct a new instance instead. This method will be removed in a future major release, as instances of this class - * should really be immutable. - */ - @Deprecated - public void setWObject(Object w) { - if ( ! (w instanceof Integer) && ! (w instanceof String) ) - throw new IllegalArgumentException("The w parameter must be an int or a String"); - this._w = w; - } - - /** - * Gets the w value (the write strategy) - * @return - */ - public Object getWObject(){ - return _w; - } - - /** - * Gets the w parameter (the write strategy) - * @return - */ - public int getW(){ - return (Integer) _w; - } - - /** - * Gets the w parameter (the write strategy) in String format - * @return w as a string - */ - public String getWString(){ - return _w.toString(); - } - - /** - * Gets the write timeout (in milliseconds) - * @return - */ - public int getWtimeout(){ - return _wtimeout; - } - - /** - * Gets the fsync flag (fsync to disk on the server) - * @return - */ - public boolean getFsync(){ - return _fsync; - } - - /** - * Gets the fsync flag (fsync to disk on the server) - * @return - */ - public boolean fsync(){ - return _fsync; - } - - /** - * Returns whether network error may be raised (w >= 0) - * @return - */ - public boolean raiseNetworkErrors(){ - if (_w instanceof Integer) - return (Integer) _w >= 0; - return _w != null; - } - - /** - * Returns whether "getlasterror" should be called (w > 0) - * @return - */ - public boolean callGetLastError(){ - if (_w instanceof Integer) - return (Integer) _w > 0; - return _w != null; - } - - /** - * Gets the WriteConcern constants by name: NONE, NORMAL, SAFE, FSYNC_SAFE, - * REPLICA_SAFE. (matching is done case insensitively) - * @param name - * @return - */ - public static WriteConcern valueOf(String name) { - if (_namedConcerns == null) { - HashMap newMap = new HashMap( 8 , 1 ); - for (Field f : WriteConcern.class.getFields()) - if (Modifier.isStatic( f.getModifiers() ) && f.getType().equals( WriteConcern.class )) { - try { - String key = f.getName().toLowerCase(); - newMap.put(key, (WriteConcern) f.get( null ) ); - } catch (Exception e) { - throw new RuntimeException( e ); - } - } - - // Thought about doing a synchronize but this seems just as safe and - // I don't care about race conditions. - _namedConcerns = newMap; - } - - return _namedConcerns.get( name.toLowerCase() ); - } - - @Override - public String toString() { - return "WriteConcern " + getCommand() + " / (Continue Inserting on Errors? " + getContinueOnErrorForInsert() + ")"; - } - - @Override - public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - - WriteConcern that = (WriteConcern) o; - - if (_continueOnErrorForInsert != that._continueOnErrorForInsert) return false; - if (_fsync != that._fsync) return false; - if (_j != that._j) return false; - if (_wtimeout != that._wtimeout) return false; - if (!_w.equals(that._w)) return false; - - return true; - } - - @Override - public int hashCode() { - int result = _w.hashCode(); - result = 31 * result + _wtimeout; - result = 31 * result + (_fsync ? 1 : 0); - result = 31 * result + (_j ? 1 : 0); - result = 31 * result + (_continueOnErrorForInsert ? 1 : 0); - return result; - } - - /** - * Gets the j parameter (journal syncing) - * @return - */ - public boolean getJ() { - return _j; - } - - /** - * Toggles the "continue inserts on error" mode. This only applies to server side errors. - * If there is a document which does not validate in the client, an exception will still - * be thrown in the client. - * This will return a *NEW INSTANCE* of WriteConcern with your preferred continueOnInsert value - * - * @param continueOnErrorForInsert - */ - public WriteConcern continueOnErrorForInsert(boolean continueOnErrorForInsert) { - if ( _w instanceof Integer ) - return new WriteConcern((Integer) _w, _wtimeout, _fsync, _j, continueOnErrorForInsert); - else if ( _w instanceof String ) - return new WriteConcern((String) _w, _wtimeout, _fsync, _j, continueOnErrorForInsert); - else - throw new IllegalStateException("The w parameter must be an int or a String"); - } - - /** - * Gets the "continue inserts on error" mode - * @return - */ - public boolean getContinueOnErrorForInsert() { - return _continueOnErrorForInsert; - } - - /** - * Create a Majority Write Concern that requires a majority of - * servers to acknowledge the write. - * - * @param wtimeout timeout for write operation - * @param fsync whether or not to fsync - * @param j whether writes should wait for a journaling group commit - */ - public static Majority majorityWriteConcern( int wtimeout, boolean fsync, boolean j ) { - return new Majority( wtimeout, fsync, j ); - } - - - Object _w; // this should be final, but can't be because of inadvertent public setter - final int _wtimeout; - final boolean _fsync; - final boolean _j; - final boolean _continueOnErrorForInsert ; - - public static class Majority extends WriteConcern { - - private static final long serialVersionUID = -4128295115883875212L; - - public Majority( ) { - super( "majority", 0, false, false ); - } - - public Majority( int wtimeout, boolean fsync, boolean j ){ - super( "majority", wtimeout, fsync, j ); - } - - @Override - public String toString(){ - return "[Majority] WriteConcern " + getCommand(); - } - - } -} diff --git a/src/main/com/mongodb/WriteConcernException.java b/src/main/com/mongodb/WriteConcernException.java deleted file mode 100644 index ef84b9e6345..00000000000 --- a/src/main/com/mongodb/WriteConcernException.java +++ /dev/null @@ -1,47 +0,0 @@ -/* - * Copyright (c) 2008 - 2013 10gen, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.mongodb; - -/** - * An exception representing an error reported due to a write failure. - */ -public class WriteConcernException extends MongoException { - - private static final long serialVersionUID = 841056799207039974L; - - private final CommandResult commandResult; - - /** - * Construct a new instance with the CommandResult from getlasterror command - * - * @param commandResult the command result - */ - public WriteConcernException(final CommandResult commandResult) { - super(commandResult.getCode(), commandResult.toString()); - this.commandResult = commandResult; - } - - /** - * Gets the getlasterror command result document. - * - * @return the command result - */ - public CommandResult getCommandResult() { - return commandResult; - } -} diff --git a/src/main/com/mongodb/WriteResult.java b/src/main/com/mongodb/WriteResult.java deleted file mode 100644 index 8af14d0fb07..00000000000 --- a/src/main/com/mongodb/WriteResult.java +++ /dev/null @@ -1,169 +0,0 @@ -// WriteResult.java - -package com.mongodb; - -import java.io.IOException; - - -/** - * Copyright (C) 2008 10gen Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - - -/** - * This class lets you access the results of the previous write. - * if you have STRICT mode on, this just stores the result of that getLastError call - * if you don't, then this will actually do the getlasterror call. - * if another operation has been done on this connection in the interim, calls will fail - */ -public class WriteResult { - - WriteResult( CommandResult o , WriteConcern concern ){ - _lastErrorResult = o; - _lastConcern = concern; - _lazy = false; - _port = null; - _db = null; - } - - WriteResult( DB db , DBPort p , WriteConcern concern ){ - _db = db; - _port = p; - _lastCall = p._calls.get(); - _lastConcern = concern; - _lazy = true; - } - - /** - * Gets the last result from getLastError() - * @return - */ - public CommandResult getCachedLastError(){ - return _lastErrorResult; - - } - - /** - * Gets the last {@link WriteConcern} used when calling getLastError() - * @return - */ - public WriteConcern getLastConcern(){ - return _lastConcern; - - } - - /** - * calls {@link WriteResult#getLastError(com.mongodb.WriteConcern)} with concern=null - * @return - * @throws MongoException - */ - public synchronized CommandResult getLastError(){ - return getLastError(null); - } - - /** - * This method does following: - * - returns the existing CommandResult if concern is null or less strict than the concern it was obtained with - * - otherwise attempts to obtain a CommandResult by calling getLastError with the concern - * @param concern the concern - * @return - * @throws MongoException - * @deprecated Please invoke write operation with appropriate {@code WriteConcern} - * and then use {@link #getLastError()} method. - */ - @Deprecated - public synchronized CommandResult getLastError(WriteConcern concern){ - if ( _lastErrorResult != null ) { - // do we have a satisfying concern? - if ( concern == null || ( _lastConcern != null && _lastConcern.getW() >= concern.getW() ) ) - return _lastErrorResult; - } - - // here we dont have a satisfying result - if ( _port != null ){ - try { - _lastErrorResult = _port.tryGetLastError( _db , _lastCall , (concern == null) ? new WriteConcern() : concern ); - } catch ( IOException ioe ){ - throw new MongoException.Network( ioe.getMessage() , ioe ); - } - - if (_lastErrorResult == null) - throw new IllegalStateException( "The connection may have been used since this write, cannot obtain a result" ); - _lastConcern = concern; - _lastCall++; - } else { - // this means we dont have satisfying result and cant get new one - throw new IllegalStateException( "Don't have a port to obtain a write result, and existing one is not good enough." ); - } - - return _lastErrorResult; - } - - - /** - * Gets the error String ("err" field) - * @return - * @throws MongoException - */ - public String getError(){ - Object foo = getField( "err" ); - if ( foo == null ) - return null; - return foo.toString(); - } - - /** - * Gets the "n" field, which contains the number of documents - * affected in the write operation. - * @return - * @throws MongoException - */ - public int getN(){ - return getLastError().getInt( "n" ); - } - - /** - * Gets a field - * @param name field name - * @return - * @throws MongoException - */ - public Object getField( String name ){ - return getLastError().get( name ); - } - - /** - * Returns whether or not the result is lazy, meaning that getLastError was not called automatically - * @return - */ - public boolean isLazy(){ - return _lazy; - } - - @Override - public String toString(){ - CommandResult res = getCachedLastError(); - if (res != null) - return res.toString(); - return "N/A"; - } - - private long _lastCall; - private WriteConcern _lastConcern; - private CommandResult _lastErrorResult; - final private DB _db; - final private DBPort _port; - final private boolean _lazy; -} diff --git a/src/main/com/mongodb/gridfs/CLI.java b/src/main/com/mongodb/gridfs/CLI.java deleted file mode 100644 index e64dd9b5455..00000000000 --- a/src/main/com/mongodb/gridfs/CLI.java +++ /dev/null @@ -1,170 +0,0 @@ -// CLI.java - -/** - * Copyright (C) 2008 10gen Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.mongodb.gridfs; - -import com.mongodb.DBObject; -import com.mongodb.Mongo; -import com.mongodb.MongoClient; -import com.mongodb.MongoClientURI; -import com.mongodb.ReadPreference; -import com.mongodb.util.Util; - -import java.io.File; -import java.security.DigestInputStream; -import java.security.MessageDigest; - - -/** - * a simple CLI for Gridfs - */ -public class CLI { - - /** - * Dumps usage info to stdout - */ - private static void printUsage() { - System.out.println("Usage : [--db database] action"); - System.out.println(" where action is one of:"); - System.out.println(" list : lists all files in the store"); - System.out.println(" put filename : puts the file filename into the store"); - System.out.println(" get filename1 filename2 : gets filename1 from store and sends to filename2"); - System.out.println(" md5 filename : does an md5 hash on a file in the db (for testing)"); - } - - private static String db = "test"; - private static String uri = "mongodb://127.0.0.1"; - private static Mongo _mongo = null; - - @SuppressWarnings("deprecation") - private static Mongo getMongo() - throws Exception { - if ( _mongo == null ) { - _mongo = new MongoClient(new MongoClientURI(uri)); - } - return _mongo; - } - - private static GridFS _gridfs; - private static GridFS getGridFS() - throws Exception { - if ( _gridfs == null ) - _gridfs = new GridFS( getMongo().getDB( db ) ); - return _gridfs; - } - - public static void main(String[] args) throws Exception { - - if ( args.length < 1 ){ - printUsage(); - return; - } - - for ( int i=0; i= 0 ){ - read++; - int r = is.read( new byte[17] ); - if ( r < 0 ) - break; - read += r; - } - byte[] digest = md5.digest(); - System.out.println( "length: " + read + " md5: " + Util.toHex( digest ) ); - return; - } - - - System.err.println( "unknown option: " + s ); - return; - } - - } - -} diff --git a/src/main/com/mongodb/gridfs/GridFS.java b/src/main/com/mongodb/gridfs/GridFS.java deleted file mode 100644 index a5c6305cad5..00000000000 --- a/src/main/com/mongodb/gridfs/GridFS.java +++ /dev/null @@ -1,466 +0,0 @@ -// GridFS.java - -/** - * Copyright (C) 2008 10gen Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.mongodb.gridfs; - -import java.io.ByteArrayInputStream; -import java.io.File; -import java.io.FileInputStream; -import java.io.IOException; -import java.io.InputStream; -import java.util.ArrayList; -import java.util.List; -import java.util.logging.Logger; - -import com.mongodb.MongoException; -import org.bson.types.ObjectId; - -import com.mongodb.BasicDBObject; -import com.mongodb.BasicDBObjectBuilder; -import com.mongodb.DB; -import com.mongodb.DBCollection; -import com.mongodb.DBCursor; -import com.mongodb.DBObject; - -/** - * Implementation of GridFS v1.0 - * - * GridFS 1.0 spec - * - * @dochub gridfs - */ -public class GridFS { - private static final Logger LOGGER = Logger.getLogger( "com.mongodb.gridfs" ); - - /** - * file's chunk size - */ - public static final int DEFAULT_CHUNKSIZE = 256 * 1024; - - /** - * file's max chunk size - * - * @deprecated You can calculate max chunkSize with - * a similar formula {@link com.mongodb.MongoClient#getMaxBsonObjectSize()} - 500*1000. - * Please ensure that you left enough space for metadata (500kb is enough). - */ - @Deprecated - public static final long MAX_CHUNKSIZE = (long) (3.5 * 1000 * 1000); - - /** - * bucket to use for the collection namespaces - */ - public static final String DEFAULT_BUCKET = "fs"; - - // -------------------------- - // ------ constructors ------- - // -------------------------- - - /** - * Creates a GridFS instance for the default bucket "fs" - * in the given database. Set the preferred WriteConcern on the give DB with DB.setWriteConcern - * @see com.mongodb.WriteConcern - * @param db database to work with - * @throws MongoException - */ - public GridFS(DB db) { - this(db, DEFAULT_BUCKET); - } - - /** - * Creates a GridFS instance for the specified bucket - * in the given database. Set the preferred WriteConcern on the give DB with DB.setWriteConcern - * - * @see com.mongodb.WriteConcern - * @param db database to work with - * @param bucket bucket to use in the given database - * @throws MongoException - */ - public GridFS(DB db, String bucket) { - _db = db; - _bucketName = bucket; - - _filesCollection = _db.getCollection( _bucketName + ".files" ); - _chunkCollection = _db.getCollection( _bucketName + ".chunks" ); - - // ensure standard indexes as long as collections are small - try { - if (_filesCollection.count() < 1000) { - _filesCollection.ensureIndex( BasicDBObjectBuilder.start().add( "filename" , 1 ).add( "uploadDate" , 1 ).get() ); - } - if (_chunkCollection.count() < 1000) { - _chunkCollection.ensureIndex( BasicDBObjectBuilder.start().add( "files_id" , 1 ).add( "n" , 1 ).get() , - BasicDBObjectBuilder.start().add( "unique" , true ).get() ); - } - } catch (MongoException e) { - LOGGER.info(String.format("Unable to ensure indices on GridFS collections in database %s", db.getName())); - } - _filesCollection.setObjectClass( GridFSDBFile.class ); - } - - - // -------------------------- - // ------ utils ------- - // -------------------------- - - - /** - * gets the list of files stored in this gridfs, sorted by filename - * - * @return cursor of file objects - */ - public DBCursor getFileList(){ - return getFileList(new BasicDBObject()); - } - - /** - * gets a filtered list of files stored in this gridfs, sorted by filename - * - * @param query filter to apply - * @return cursor of file objects - */ - public DBCursor getFileList( DBObject query ){ - return getFileList(query, new BasicDBObject("filename",1)); - } - - /** - * gets a filtered list of files stored in this gridfs, sorted by param sort - * - * @param query filter to apply - * @param sort sorting to apply - * @return cursor of file objects - */ - public DBCursor getFileList( DBObject query, DBObject sort){ - return _filesCollection.find( query ).sort(sort); - } - - - // -------------------------- - // ------ reading ------- - // -------------------------- - - /** - * finds one file matching the given id. Equivalent to findOne(id) - * @param id - * @return - * @throws MongoException - */ - public GridFSDBFile find( ObjectId id ){ - return findOne( id ); - } - /** - * finds one file matching the given id. - * @param id - * @return - * @throws MongoException - */ - public GridFSDBFile findOne( ObjectId id ){ - return findOne( new BasicDBObject( "_id" , id ) ); - } - /** - * finds one file matching the given filename - * @param filename - * @return - * @throws MongoException - */ - public GridFSDBFile findOne( String filename ){ - return findOne( new BasicDBObject( "filename" , filename ) ); - } - /** - * finds one file matching the given query - * @param query - * @return - * @throws MongoException - */ - public GridFSDBFile findOne( DBObject query ){ - return _fix( _filesCollection.findOne( query ) ); - } - - /** - * finds a list of files matching the given filename - * @param filename - * @return - * @throws MongoException - */ - public List find( String filename ){ - return find( filename, null ); - } - - /** - * finds a list of files matching the given filename - * @param filename - * @param sort - * @return - * @throws MongoException - */ - public List find( String filename , DBObject sort){ - return find( new BasicDBObject( "filename" , filename ), sort ); - } - - /** - * finds a list of files matching the given query - * @param query - * @return - * @throws MongoException - */ - public List find( DBObject query ){ - return find(query, null); - } - - /** - * finds a list of files matching the given query - * @param query - * @param sort - * @return - * @throws MongoException - */ - public List find( DBObject query , DBObject sort){ - List files = new ArrayList(); - - DBCursor c = null; - try { - c = _filesCollection.find( query ); - if (sort != null) { - c.sort(sort); - } - while ( c.hasNext() ){ - files.add( _fix( c.next() ) ); - } - } finally { - if (c != null){ - c.close(); - } - } - return files; - } - - /** - * @deprecated This method is NOT a part of public API and will be dropped in 3.x versions. - */ - @Deprecated - protected GridFSDBFile _fix( Object o ){ - if ( o == null ) - return null; - - if ( ! ( o instanceof GridFSDBFile ) ) - throw new RuntimeException( "somehow didn't get a GridFSDBFile" ); - - GridFSDBFile f = (GridFSDBFile)o; - f._fs = this; - return f; - } - - - // -------------------------- - // ------ remove ------- - // -------------------------- - - /** - * removes the file matching the given id - * @param id - * @throws MongoException - */ - public void remove( ObjectId id ){ - _filesCollection.remove( new BasicDBObject( "_id" , id ) ); - _chunkCollection.remove( new BasicDBObject( "files_id" , id ) ); - } - - /** - * removes all files matching the given filename - * @param filename - * @throws MongoException - */ - public void remove( String filename ){ - remove( new BasicDBObject( "filename" , filename ) ); - } - - /** - * removes all files matching the given query - * @param query - * @throws MongoException - */ - public void remove( DBObject query ){ - for ( GridFSDBFile f : find( query ) ){ - f.remove(); - } - } - - - // -------------------------- - // ------ writing ------- - // -------------------------- - - /** - * creates a file entry. - * After calling this method, you have to call save() on the GridFSInputFile file - * @param data the file's data - * @return - */ - public GridFSInputFile createFile( byte[] data ){ - return createFile( new ByteArrayInputStream( data ), true ); - } - - - /** - * creates a file entry. - * After calling this method, you have to call save() on the GridFSInputFile file - * @param f the file object - * @return - * @throws IOException - */ - public GridFSInputFile createFile( File f ) - throws IOException { - return createFile( new FileInputStream( f ) , f.getName(), true ); - } - - /** - * creates a file entry. - * after calling this method, you have to call save() on the GridFSInputFile file - * @param in an inputstream containing the file's data - * @return - */ - public GridFSInputFile createFile( InputStream in ){ - return createFile( in , null ); - } - - /** - * creates a file entry. - * after calling this method, you have to call save() on the GridFSInputFile file - * @param in an inputstream containing the file's data - * @param closeStreamOnPersist indicate the passed in input stream should be closed - * once the data chunk persisted - * @return - */ - public GridFSInputFile createFile( InputStream in, boolean closeStreamOnPersist ){ - return createFile( in , null, closeStreamOnPersist ); - } - - /** - * creates a file entry. - * After calling this method, you have to call save() on the GridFSInputFile file - * @param in an inputstream containing the file's data - * @param filename the file name as stored in the db - * @return - */ - public GridFSInputFile createFile( InputStream in , String filename ){ - return new GridFSInputFile( this , in , filename ); - } - - /** - * creates a file entry. - * After calling this method, you have to call save() on the GridFSInputFile file - * @param in an inputstream containing the file's data - * @param filename the file name as stored in the db - * @param closeStreamOnPersist indicate the passed in input stream should be closed - * once the data chunk persisted - * @return - */ - public GridFSInputFile createFile( InputStream in , String filename, boolean closeStreamOnPersist ){ - return new GridFSInputFile( this , in , filename, closeStreamOnPersist ); - } - - /** - * @see {@link GridFS#createFile()} on how to use this method - * @param filename the file name as stored in the db - * @return - */ - public GridFSInputFile createFile(String filename) { - return new GridFSInputFile( this , filename ); - } - - /** - * This method creates an empty {@link GridFSInputFile} instance. On this - * instance an {@link java.io.OutputStream} can be obtained using the - * {@link GridFSInputFile#getOutputStream()} method. You can still call - * {@link GridFSInputFile#setContentType(String)} and - * {@link GridFSInputFile#setFilename(String)}. The file will be completely - * written and closed after calling the {@link java.io.OutputStream#close()} - * method on the output stream. - * - * @return GridFS file handle instance. - */ - public GridFSInputFile createFile() { - return new GridFSInputFile( this ); - } - - - - // -------------------------- - // ------ members ------- - // -------------------------- - - /** - * gets the bucket name used in the collection's namespace - * @return - */ - public String getBucketName(){ - return _bucketName; - } - - /** - * gets the db used - * @return - */ - public DB getDB(){ - return _db; - } - - /** - * Gets the {@link DBCollection} in which the file’s metadata is stored. - * - * @return the collection - */ - protected DBCollection getFilesCollection() { - return _filesCollection; - } - - /** - * Gets the {@link DBCollection} in which the binary chunks are stored. - * - * @return the collection - */ - protected DBCollection getChunksCollection() { - return _chunkCollection; - } - - - /** - * @deprecated Please use {@link #getDB()} for access. - */ - @Deprecated - protected final DB _db; - - /** - * @deprecated Please use {@link #getBucketName()} for access. - */ - @Deprecated - protected final String _bucketName; - - /** - * @deprecated Please use {@link #getFilesCollection()} for access. - */ - @Deprecated - protected final DBCollection _filesCollection; - - /** - * @deprecated Please use {@link #getChunksCollection()} for access. - */ - @Deprecated - protected final DBCollection _chunkCollection; - -} diff --git a/src/main/com/mongodb/gridfs/GridFSDBFile.java b/src/main/com/mongodb/gridfs/GridFSDBFile.java deleted file mode 100644 index b093e77b098..00000000000 --- a/src/main/com/mongodb/gridfs/GridFSDBFile.java +++ /dev/null @@ -1,203 +0,0 @@ -// GridFSDBFile.java - -/** - * Copyright (C) 2008 10gen Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.mongodb.gridfs; - -import java.io.File; -import java.io.FileOutputStream; -import java.io.IOException; -import java.io.InputStream; -import java.io.OutputStream; - -import com.mongodb.BasicDBObject; -import com.mongodb.BasicDBObjectBuilder; -import com.mongodb.DBObject; -import com.mongodb.MongoException; - -/** - * This class enables to retrieve a GridFS file metadata and content. - * Operations include: - * - writing data to a file on disk or an OutputStream - * - getting each chunk as a byte array - * - getting an InputStream to stream the data into - * @author antoine - */ -public class GridFSDBFile extends GridFSFile { - - - /** - * Returns an InputStream from which data can be read - * @return - */ - public InputStream getInputStream(){ - return new MyInputStream(); - } - - /** - * Writes the file's data to a file on disk - * @param filename the file name on disk - * @return - * @throws IOException - * @throws MongoException - */ - public long writeTo( String filename ) throws IOException { - return writeTo( new File( filename ) ); - } - /** - * Writes the file's data to a file on disk - * @param f the File object - * @return - * @throws IOException - * @throws MongoException - */ - public long writeTo( File f ) throws IOException { - - FileOutputStream out = null; - try{ - out = new FileOutputStream( f ); - return writeTo( out); - }finally{ - if(out != null) - out.close(); - } - } - - /** - * Writes the file's data to an OutputStream - * @param out the OutputStream - * @return - * @throws IOException - * @throws MongoException - */ - public long writeTo( OutputStream out ) - throws IOException { - final int nc = numChunks(); - for ( int i=0; i= _data.length ){ - if ( _currentChunkIdx + 1 >= _numChunks ) - return -1; - - _data = getChunk( ++_currentChunkIdx ); - _offset = 0; - } - - int r = Math.min( len , _data.length - _offset ); - System.arraycopy( _data , _offset , b , off , r ); - _offset += r; - return r; - } - - /** - * Will smartly skips over chunks without fetching them if possible. - */ - public long skip(long numBytesToSkip) throws IOException { - if (numBytesToSkip <= 0) - return 0; - - if (_currentChunkIdx == _numChunks) - //We're actually skipping over the back end of the file, short-circuit here - //Don't count those extra bytes to skip in with the return value - return 0; - - // offset in the whole file - long offsetInFile = 0; - if (_currentChunkIdx >= 0) - offsetInFile = _currentChunkIdx * _chunkSize + _offset; - if (numBytesToSkip + offsetInFile >= _length) { - _currentChunkIdx = _numChunks; - _data = null; - return _length - offsetInFile; - } - - int temp = _currentChunkIdx; - _currentChunkIdx = (int)((numBytesToSkip + offsetInFile) / _chunkSize); - if (temp != _currentChunkIdx) - _data = getChunk(_currentChunkIdx); - _offset = (int)((numBytesToSkip + offsetInFile) % _chunkSize); - - return numBytesToSkip; - } - - final int _numChunks; - - int _currentChunkIdx = -1; - int _offset = 0; - byte[] _data = null; - } - - void remove(){ - _fs._filesCollection.remove( new BasicDBObject( "_id" , _id ) ); - _fs._chunkCollection.remove( new BasicDBObject( "files_id" , _id ) ); - } -} diff --git a/src/main/com/mongodb/gridfs/GridFSFile.java b/src/main/com/mongodb/gridfs/GridFSFile.java deleted file mode 100644 index 1e1c5268cab..00000000000 --- a/src/main/com/mongodb/gridfs/GridFSFile.java +++ /dev/null @@ -1,317 +0,0 @@ -// GridFSFile.java - -/** - * Copyright (C) 2008 10gen Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.mongodb.gridfs; - -import java.util.Arrays; -import java.util.Collections; -import java.util.Date; -import java.util.HashSet; -import java.util.List; -import java.util.Map; -import java.util.Set; - -import org.bson.BSONObject; - -import com.mongodb.BasicDBObject; -import com.mongodb.DBObject; -import com.mongodb.MongoException; -import com.mongodb.util.JSON; - -/** - * The abstract class representing a GridFS file - * @author antoine - */ -public abstract class GridFSFile implements DBObject { - - - // ------------------------------ - // --------- db ------- - // ------------------------------ - - /** - * Saves the file entry to the files collection - * @throws MongoException - */ - public void save(){ - if ( _fs == null ) - throw new MongoException( "need _fs" ); - _fs._filesCollection.save( this ); - } - - /** - * Verifies that the MD5 matches between the database and the local file. - * This should be called after transferring a file. - * @throws MongoException - */ - public void validate(){ - if ( _fs == null ) - throw new MongoException( "no _fs" ); - if ( _md5 == null ) - throw new MongoException( "no _md5 stored" ); - - DBObject cmd = new BasicDBObject( "filemd5" , _id ); - cmd.put( "root" , _fs._bucketName ); - DBObject res = _fs._db.command( cmd ); - if ( res != null && res.containsField( "md5" ) ) { - String m = res.get( "md5" ).toString(); - if ( m.equals( _md5 ) ) - return; - throw new MongoException( "md5 differ. mine [" + _md5 + "] theirs [" + m + "]" ); - } - - // no md5 from the server - throw new MongoException( "no md5 returned from server: " + res ); - - } - - /** - * Returns the number of chunks that store the file data - * @return - */ - public int numChunks(){ - double d = _length; - d = d / _chunkSize; - return (int)Math.ceil( d ); - } - - // ------------------------------ - // --------- getters ------- - // ------------------------------ - - - /** - * Gets the id - * @return - */ - public Object getId(){ - return _id; - } - - /** - * Gets the filename - * @return - */ - public String getFilename(){ - return _filename; - } - - /** - * Gets the content type - * @return - */ - public String getContentType(){ - return _contentType; - } - - /** - * Gets the file's length - * @return - */ - public long getLength(){ - return _length; - } - - /** - * Gets the size of a chunk - * @return - */ - public long getChunkSize(){ - return _chunkSize; - } - - /** - * Gets the upload date - * @return - */ - public Date getUploadDate(){ - return _uploadDate; - } - - /** - * Gets the aliases from the metadata. - * note: to set aliases, call put( "aliases" , List ) - * @return - */ - @SuppressWarnings("unchecked") - public List getAliases(){ - return (List)_extradata.get( "aliases" ); - } - - /** - * Gets the file metadata - * @return - */ - public DBObject getMetaData(){ - return (DBObject)_extradata.get( "metadata" ); - } - - /** - * Gets the file metadata - * @return - */ - public void setMetaData(DBObject metadata){ - _extradata.put( "metadata", metadata ); - } - - /** - * Gets the observed MD5 during transfer - * @return - */ - public String getMD5(){ - return _md5; - } - - // ------------------------------ - // --------- DBOBject methods --- - // ------------------------------ - - public Object put( String key , Object v ){ - if ( key == null ) - throw new RuntimeException( "key should never be null" ); - else if ( key.equals( "_id" ) ) - _id = v; - else if ( key.equals( "filename" ) ) - _filename = v == null ? null : v.toString(); - else if ( key.equals( "contentType" ) ) - _contentType = (String)v; - else if ( key.equals( "length" ) ) - _length = ((Number)v).longValue(); - else if ( key.equals( "chunkSize" ) ) - _chunkSize = ((Number)v).longValue(); - else if ( key.equals( "uploadDate" ) ) - _uploadDate = (Date)v; - else if ( key.equals( "md5" ) ) - _md5 = (String)v; - else - _extradata.put( key , v ); - return v; - } - - public Object get( String key ){ - if ( key == null ) - throw new RuntimeException( "key should never be null" ); - else if ( key.equals( "_id" ) ) - return _id; - else if ( key.equals( "filename" ) ) - return _filename; - else if ( key.equals( "contentType" ) ) - return _contentType; - else if ( key.equals( "length" ) ) - return _length; - else if ( key.equals( "chunkSize" ) ) - return _chunkSize; - else if ( key.equals( "uploadDate" ) ) - return _uploadDate; - else if ( key.equals( "md5" ) ) - return _md5; - return _extradata.get( key ); - } - - public void putAll( BSONObject o ){ - throw new UnsupportedOperationException(); - } - - public void putAll( Map m ){ - throw new UnsupportedOperationException(); - } - - public Map toMap(){ - throw new UnsupportedOperationException(); - } - - public Object removeField( String key ){ - throw new UnsupportedOperationException(); - } - - /* - * @deprecated - */ - @Deprecated - public boolean containsKey( String s ){ - return containsField( s ); - } - - public boolean containsField(String s){ - return keySet().contains( s ); - } - - @SuppressWarnings("unchecked") - public Set keySet(){ - Set keys = new HashSet(); - keys.addAll(VALID_FIELDS); - keys.addAll(_extradata.keySet()); - return keys; - } - - public boolean isPartialObject(){ - return false; - } - - public void markAsPartialObject(){ - throw new RuntimeException( "can't load partial GridFSFile file" ); - } - - // ---------------------- - // ------- fields ------- - // ---------------------- - - @Override - public String toString(){ - return JSON.serialize( this ); - } - - /** - * Gets the GridFS associated with this file - * - * @return gridFS instance - */ - protected GridFS getGridFS(){ - return this._fs; - } - - /** - * Sets the GridFS associated with this file - * @param fs - */ - protected void setGridFS( GridFS fs ){ - _fs = fs; - } - - /** - * @deprecated Please use {@link #getGridFS()} & {@link #setGridFS(GridFS)} instead. - */ - @Deprecated - protected GridFS _fs = null; - - Object _id; - String _filename; - String _contentType; - long _length; - long _chunkSize; - Date _uploadDate; - List _aliases; - DBObject _extradata = new BasicDBObject(); - String _md5; - - @SuppressWarnings("unchecked") - final static Set VALID_FIELDS = Collections.unmodifiableSet( new HashSet( Arrays.asList( new String[]{ - "_id" , "filename" , "contentType" , "length" , "chunkSize" , - "uploadDate" , "aliases" , "md5" - } ) ) ); -} diff --git a/src/main/com/mongodb/gridfs/GridFSInputFile.java b/src/main/com/mongodb/gridfs/GridFSInputFile.java deleted file mode 100644 index 6585a889d0d..00000000000 --- a/src/main/com/mongodb/gridfs/GridFSInputFile.java +++ /dev/null @@ -1,405 +0,0 @@ -// GridFSInputFile.java - -/** - * Copyright (C) 2008 10gen Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.mongodb.gridfs; - -import com.mongodb.BasicDBObjectBuilder; -import com.mongodb.DBObject; -import com.mongodb.MongoException; -import com.mongodb.util.Util; -import org.bson.types.ObjectId; - -import java.io.IOException; -import java.io.InputStream; -import java.io.OutputStream; -import java.security.MessageDigest; -import java.security.NoSuchAlgorithmException; -import java.util.Date; - -/** - * This class represents a GridFS file to be written to the database - * Operations include: - * - writing data obtained from an InputStream - * - getting an OutputStream to stream the data out - * - * @author Eliot Horowitz and Guy K. Kloss - */ -public class GridFSInputFile extends GridFSFile { - - /** - * Default constructor setting the GridFS file name and providing an input - * stream containing data to be written to the file. - * - * @param fs - * The GridFS connection handle. - * @param in - * Stream used for reading data from. - * @param filename - * Name of the file to be created. - * @param closeStreamOnPersist - indicate the passed in input stream should be closed once the data chunk persisted - */ - protected GridFSInputFile( GridFS fs , InputStream in , String filename, boolean closeStreamOnPersist ) { - _fs = fs; - _in = in; - _filename = filename; - _closeStreamOnPersist = closeStreamOnPersist; - - _id = new ObjectId(); - _chunkSize = GridFS.DEFAULT_CHUNKSIZE; - _uploadDate = new Date(); - try { - _messageDigester = MessageDigest.getInstance("MD5"); - } catch (NoSuchAlgorithmException e) { - throw new RuntimeException("No MD5!"); - } - _messageDigester.reset(); - _buffer = new byte[(int) _chunkSize]; - } - - /** - * Default constructor setting the GridFS file name and providing an input - * stream containing data to be written to the file. - * - * @param fs - * The GridFS connection handle. - * @param in - * Stream used for reading data from. - * @param filename - * Name of the file to be created. - */ - protected GridFSInputFile( GridFS fs , InputStream in , String filename ) { - this( fs, in, filename, false); - } - - /** - * Constructor that only provides a file name, but does not rely on the - * presence of an {@link java.io.InputStream}. An - * {@link java.io.OutputStream} can later be obtained for writing using the - * {@link #getOutputStream()} method. - * - * @param fs - * The GridFS connection handle. - * @param filename - * Name of the file to be created. - */ - protected GridFSInputFile( GridFS fs , String filename ) { - this( fs , null , filename ); - } - - /** - * Minimal constructor that does not rely on the presence of an - * {@link java.io.InputStream}. An {@link java.io.OutputStream} can later be - * obtained for writing using the {@link #getOutputStream()} method. - * - * @param fs - * The GridFS connection handle. - */ - protected GridFSInputFile( GridFS fs ) { - this( fs , null , null ); - } - - public void setId(Object id) { - _id = id; - } - - /** - * Sets the file name on the GridFS entry. - * - * @param fn - * File name. - */ - public void setFilename( String fn ) { - _filename = fn; - } - - /** - * Sets the content type (MIME type) on the GridFS entry. - * - * @param ct - * Content type. - */ - public void setContentType( String ct ) { - _contentType = ct; - } - - /** - * Set the chunk size. This must be called before saving any data. - * @param chunkSize The size in bytes. - */ - public void setChunkSize(long chunkSize) { - if (_outputStream != null || _savedChunks) - return; - _chunkSize = chunkSize; - _buffer = new byte[(int) _chunkSize]; - } - - /** - * calls {@link GridFSInputFile#save(long)} with the existing chunk size - * @throws MongoException - */ - @Override - public void save() { - save( _chunkSize ); - } - - /** - * This method first calls saveChunks(long) if the file data has not been saved yet. - * Then it persists the file entry to GridFS. - * - * @param chunkSize - * Size of chunks for file in bytes. - * @throws MongoException - */ - public void save( long chunkSize ) { - if (_outputStream != null) - throw new MongoException( "cannot mix OutputStream and regular save()" ); - - // note that chunkSize only changes _chunkSize in case we actually save chunks - // otherwise there is a risk file and chunks are not compatible - if ( ! _savedChunks ) { - try { - saveChunks( chunkSize ); - } catch ( IOException ioe ) { - throw new MongoException( "couldn't save chunks" , ioe ); - } - } - - super.save(); - } - - /** - * @see com.mongodb.gridfs.GridFSInputFile#saveChunks(long) - * - * @return Number of the next chunk. - * @throws IOException - * on problems reading the new entry's - * {@link java.io.InputStream}. - * @throws MongoException - */ - public int saveChunks() throws IOException { - return saveChunks( _chunkSize ); - } - - /** - * Saves all data into chunks from configured {@link java.io.InputStream} input stream - * to GridFS. A non-default chunk size can be specified. - * This method does NOT save the file object itself, one must call save() to do so. - * - * @param chunkSize - * Size of chunks for file in bytes. - * @return Number of the next chunk. - * @throws IOException - * on problems reading the new entry's - * {@link java.io.InputStream}. - * @throws MongoException - */ - public int saveChunks( long chunkSize ) throws IOException { - if (_outputStream != null) - throw new MongoException( "cannot mix OutputStream and regular save()" ); - if ( _savedChunks ) - throw new MongoException( "chunks already saved!" ); - - if ( chunkSize <= 0) { - throw new MongoException("chunkSize must be greater than zero"); - } - - if ( _chunkSize != chunkSize ) { - _chunkSize = chunkSize; - _buffer = new byte[(int) _chunkSize]; - } - - int bytesRead = 0; - while ( bytesRead >= 0 ) { - _currentBufferPosition = 0; - bytesRead = _readStream2Buffer(); - _dumpBuffer( true ); - } - - // only finish data, do not write file, in case one wants to change metadata - _finishData(); - return _currentChunkNumber; - } - - /** - * After retrieving this {@link java.io.OutputStream}, this object will be - * capable of accepting successively written data to the output stream. - * To completely persist this GridFS object, you must finally call the {@link java.io.OutputStream#close()} - * method on the output stream. Note that calling the save() and saveChunks() - * methods will throw Exceptions once you obtained the OutputStream. - * - * @return Writable stream object. - */ - public OutputStream getOutputStream() { - if ( _outputStream == null ) { - _outputStream = new MyOutputStream(); - } - return _outputStream; - } - - /** - * Dumps a new chunk into the chunks collection. Depending on the flag, also - * partial buffers (at the end) are going to be written immediately. - * - * @param writePartial - * Write also partial buffers full. - * @throws MongoException - */ - private void _dumpBuffer( boolean writePartial ) { - if ( ( _currentBufferPosition < _chunkSize ) && !writePartial ) { - // Bail out, chunk not complete yet - return; - } - if (_currentBufferPosition == 0) { - // chunk is empty, may be last chunk - return; - } - - byte[] writeBuffer = _buffer; - if ( _currentBufferPosition != _chunkSize ) { - writeBuffer = new byte[_currentBufferPosition]; - System.arraycopy( _buffer, 0, writeBuffer, 0, _currentBufferPosition ); - } - - DBObject chunk = createChunk(_id, _currentChunkNumber, writeBuffer); - - _fs._chunkCollection.save( chunk ); - - _currentChunkNumber++; - _totalBytes += writeBuffer.length; - _messageDigester.update( writeBuffer ); - _currentBufferPosition = 0; - } - - protected DBObject createChunk(Object id, int currentChunkNumber, byte[] writeBuffer) { - return BasicDBObjectBuilder.start() - .add("files_id", id) - .add("n", currentChunkNumber) - .add("data", writeBuffer).get(); - } - - /** - * Reads a buffer full from the {@link java.io.InputStream}. - * - * @return Number of bytes read from stream. - * @throws IOException - * if the reading from the stream fails. - */ - private int _readStream2Buffer() throws IOException { - int bytesRead = 0; - while ( _currentBufferPosition < _chunkSize && bytesRead >= 0 ) { - bytesRead = _in.read( _buffer, _currentBufferPosition, - (int) _chunkSize - _currentBufferPosition ); - if ( bytesRead > 0 ) { - _currentBufferPosition += bytesRead; - } else if ( bytesRead == 0 ) { - throw new RuntimeException( "i'm doing something wrong" ); - } - } - return bytesRead; - } - - /** - * Marks the data as fully written. This needs to be called before super.save() - */ - private void _finishData() { - if (!_savedChunks) { - _md5 = Util.toHex( _messageDigester.digest() ); - _messageDigester = null; - _length = _totalBytes; - _savedChunks = true; - try { - if ( _in != null && _closeStreamOnPersist ) - _in.close(); - } catch (IOException e) { - //ignore - } - } - } - - private final InputStream _in; - private boolean _closeStreamOnPersist; - private boolean _savedChunks = false; - private byte[] _buffer = null; - private int _currentChunkNumber = 0; - private int _currentBufferPosition = 0; - private long _totalBytes = 0; - private MessageDigest _messageDigester = null; - private OutputStream _outputStream = null; - - /** - * An output stream implementation that can be used to successively write to - * a GridFS file. - * - * @author Guy K. Kloss - */ - class MyOutputStream extends OutputStream { - - /** - * {@inheritDoc} - * - * @see java.io.OutputStream#write(int) - */ - @Override - public void write( int b ) throws IOException { - byte[] byteArray = new byte[1]; - byteArray[0] = (byte) (b & 0xff); - write( byteArray, 0, 1 ); - } - - /** - * {@inheritDoc} - * - * @see java.io.OutputStream#write(byte[], int, int) - */ - @Override - public void write( byte[] b , int off , int len ) throws IOException { - int offset = off; - int length = len; - int toCopy = 0; - while ( length > 0 ) { - toCopy = length; - if ( toCopy > _chunkSize - _currentBufferPosition ) { - toCopy = (int) _chunkSize - _currentBufferPosition; - } - System.arraycopy( b, offset, _buffer, _currentBufferPosition, toCopy ); - _currentBufferPosition += toCopy; - offset += toCopy; - length -= toCopy; - if ( _currentBufferPosition == _chunkSize ) { - _dumpBuffer( false ); - } - } - } - - /** - * Processes/saves all data from {@link java.io.InputStream} and closes - * the potentially present {@link java.io.OutputStream}. The GridFS file - * will be persisted afterwards. - */ - @Override - public void close() { - // write last buffer if needed - _dumpBuffer( true ); - // finish stream - _finishData(); - // save file obj - GridFSInputFile.super.save(); - } - } -} diff --git a/src/main/com/mongodb/gridfs/package.html b/src/main/com/mongodb/gridfs/package.html deleted file mode 100644 index 5693da42bc6..00000000000 --- a/src/main/com/mongodb/gridfs/package.html +++ /dev/null @@ -1,3 +0,0 @@ - - GridFS tools. Used for storing files in MongoDB - diff --git a/src/main/com/mongodb/io/ByteBufferFactory.java b/src/main/com/mongodb/io/ByteBufferFactory.java deleted file mode 100644 index 1afeeb2db54..00000000000 --- a/src/main/com/mongodb/io/ByteBufferFactory.java +++ /dev/null @@ -1,45 +0,0 @@ -// ByteBufferFactory.java - -/** - * Copyright (C) 2008 10gen Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.mongodb.io; - -import java.nio.ByteBuffer; - -/** - * @deprecated This class is NOT a part of public API and will be dropped in 3.x versions. - */ -@Deprecated -public interface ByteBufferFactory { - public ByteBuffer get(); - - /** - * @deprecated This class is NOT a part of public API and will be dropped in 3.x versions. - */ - @Deprecated - public static class SimpleHeapByteBufferFactory implements ByteBufferFactory { - public SimpleHeapByteBufferFactory( int size ){ - _size = size; - } - - public ByteBuffer get(){ - return ByteBuffer.wrap( new byte[_size] ); - } - - final int _size; - } -} diff --git a/src/main/com/mongodb/io/ByteBufferHolder.java b/src/main/com/mongodb/io/ByteBufferHolder.java deleted file mode 100644 index 1cebf62cb82..00000000000 --- a/src/main/com/mongodb/io/ByteBufferHolder.java +++ /dev/null @@ -1,131 +0,0 @@ -// ByteBufferHolder.java - -/** - * Copyright (C) 2008 10gen Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.mongodb.io; - -import java.nio.ByteBuffer; -import java.util.ArrayList; -import java.util.List; - -/** - * @deprecated This class is NOT a part of public API and will be dropped in 3.x versions. - */ -@Deprecated -public class ByteBufferHolder { - - public ByteBufferHolder(){ - this( 1024 * 1024 * 1024 ); // 1gb - } - - public ByteBufferHolder( int max ){ - _max = max; - } - - public byte get( int i ){ - if ( i >= _pos ) - throw new RuntimeException( "out of bounds" ); - - final int num = i / _bufSize; - final int pos = i % _bufSize; - - return _buffers.get( num ).get( pos ); - } - - public void get( int pos , byte b[] ){ - for ( int i=0; i= _pos ) - throw new RuntimeException( "out of bounds" ); - - final int num = i / _bufSize; - final int pos = i % _bufSize; - - _buffers.get( num ).put( pos , val ); - } - - public int position(){ - return _pos; - } - - public void position( int p ){ - _pos = p; - int num = _pos / _bufSize; - int pos = _pos % _bufSize; - - while ( _buffers.size() <= num ) - _addBucket(); - - ByteBuffer bb = _buffers.get( num ); - bb.position( pos ); - for ( int i=num+1; i<_buffers.size(); i++ ) - _buffers.get( i ).position( 0 ); - } - - public int remaining(){ - return Integer.MAX_VALUE; - } - - public void put( ByteBuffer in ){ - while ( in.hasRemaining() ){ - int num = _pos / _bufSize; - if ( num >= _buffers.size() ) - _addBucket(); - - ByteBuffer bb = _buffers.get( num ); - - final int canRead = Math.min( bb.remaining() , in.remaining() ); - - final int oldLimit = in.limit(); - in.limit( in.position() + canRead ); - - bb.put( in ); - - in.limit( oldLimit ); - - _pos += canRead; - } - - } - - private void _addBucket(){ - if ( capacity() + _bufSize > _max ) - throw new RuntimeException( "too big current:" + capacity() ); - _buffers.add( ByteBuffer.allocateDirect( _bufSize ) ); - } - - public int capacity(){ - return _buffers.size() * _bufSize; - } - - public String toString(){ - StringBuilder buf = new StringBuilder(); - buf.append( "{ ByteBufferHolder pos:" + _pos + " " ); - for ( ByteBuffer bb : _buffers ) - buf.append( bb ).append( " " ); - return buf.append( "}" ).toString(); - } - - List _buffers = new ArrayList(); - int _pos = 0; - final int _max; - - static final int _bufSize = 4096; -} diff --git a/src/main/com/mongodb/io/ByteBufferInputStream.java b/src/main/com/mongodb/io/ByteBufferInputStream.java deleted file mode 100644 index ac874854c80..00000000000 --- a/src/main/com/mongodb/io/ByteBufferInputStream.java +++ /dev/null @@ -1,124 +0,0 @@ -// ByteBufferInputStream.java - -/** - * Copyright (C) 2008 10gen Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.mongodb.io; - -import java.io.InputStream; -import java.nio.ByteBuffer; -import java.util.List; - -/** - * @deprecated This class is NOT a part of public API and will be dropped in 3.x versions. - */ -@Deprecated -public class ByteBufferInputStream extends InputStream { - - public ByteBufferInputStream( List lst ){ - this( lst , false ); - } - - public ByteBufferInputStream( List lst , boolean flip ){ - _lst = lst; - if ( flip ) - for ( ByteBuffer buf : _lst ) - buf.flip(); - } - - public int available(){ - int avail = 0; - for ( int i=_pos; i<_lst.size(); i++ ) - avail += _lst.get( i ).remaining(); - return avail; - } - - public void close(){} - - public void mark(int readlimit){ - throw new RuntimeException( "mark not supported" ); - } - - public void reset(){ - throw new RuntimeException( "mark not supported" ); - } - - public boolean markSupported(){ - return false; - } - - public int read(){ - if ( _pos >= _lst.size() ) - return -1; - - ByteBuffer buf = _lst.get( _pos ); - if ( buf.remaining() > 0 ) - return buf.get() & 0xff; - - _pos++; - return read(); - } - - public int read(byte[] b){ - return read( b , 0 , b.length ); - } - - public int read(byte[] b, int off, int len){ - if ( _pos >= _lst.size() ) - return -1; - - ByteBuffer buf = _lst.get( _pos ); - - if ( buf.remaining() == 0 ){ - _pos++; - return read( b , off , len ); - } - - int toRead = Math.min( len , buf.remaining() ); - buf.get( b , off , toRead ); - - if ( toRead == len || _pos + 1 >= _lst.size() ) - return toRead; - - _pos++; - return toRead + read( b , off + toRead , len - toRead ); - } - - - public long skip(long n){ - long skipped = 0; - - while ( n >= 0 && _pos < _lst.size() ){ - ByteBuffer b = _lst.get( _pos ); - if ( b.remaining() < n ){ - skipped += b.remaining(); - n -= b.remaining(); - b.position( b.limit() ); - _pos++; - continue; - } - - skipped += n; - b.position( (int)(b.position() + n) ); - return skipped; - } - - return skipped; - } - - final List _lst; - private int _pos = 0; -} diff --git a/src/main/com/mongodb/io/ByteBufferOutputStream.java b/src/main/com/mongodb/io/ByteBufferOutputStream.java deleted file mode 100644 index 4e4fa2e7ab5..00000000000 --- a/src/main/com/mongodb/io/ByteBufferOutputStream.java +++ /dev/null @@ -1,99 +0,0 @@ -// ByteBufferOutputStream.java - -/** - * Copyright (C) 2008 10gen Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.mongodb.io; - -import java.io.OutputStream; -import java.nio.ByteBuffer; -import java.util.ArrayList; -import java.util.List; - -/** - * @deprecated This class is NOT a part of public API and will be dropped in 3.x versions. - */ -@Deprecated -public class ByteBufferOutputStream extends OutputStream { - - public ByteBufferOutputStream(){ - this( _defaultFactory ); - } - - public ByteBufferOutputStream( int size ){ - this( new ByteBufferFactory.SimpleHeapByteBufferFactory( size ) ); - } - - public ByteBufferOutputStream( ByteBufferFactory factory ){ - _factory = factory; - } - - public void close(){ - } - - public void flush(){ - } - - public void write(byte[] b){ - write( b , 0 , b.length ); - } - - public void write(byte[] b, int off, int len){ - ByteBuffer cur = _need( 1 ); - - int toWrite = Math.min( len , cur.remaining() ); - cur.put( b , off , toWrite ); - - if ( toWrite == len ) - return; - - write( b , off + toWrite , len - toWrite ); - } - - public void write(int b){ - _need(1).put((byte)b); - } - - public List getBuffers(){ - return _lst; - } - - public List getBuffers( boolean flip ){ - if ( flip ) - for ( ByteBuffer buf : _lst ) - buf.flip(); - return _lst; - } - - private ByteBuffer _need( int space ){ - if ( _lst.size() == 0 ){ - _lst.add( _factory.get() ); - return _lst.get( 0 ); - } - - ByteBuffer cur = _lst.get( _lst.size() - 1 ); - if ( space <= cur.remaining() ) - return cur; - - _lst.add( _factory.get() ); - return _lst.get( _lst.size() - 1 ); - } - - final List _lst = new ArrayList(); - final ByteBufferFactory _factory; - - static final ByteBufferFactory _defaultFactory = new ByteBufferFactory.SimpleHeapByteBufferFactory( 1024 * 4 ); -} diff --git a/src/main/com/mongodb/io/ByteStream.java b/src/main/com/mongodb/io/ByteStream.java deleted file mode 100644 index 002fe261728..00000000000 --- a/src/main/com/mongodb/io/ByteStream.java +++ /dev/null @@ -1,32 +0,0 @@ -// ByteStream.java - -/** - * Copyright (C) 2008 10gen Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.mongodb.io; - -import java.nio.ByteBuffer; - -/** - * @deprecated This class is NOT a part of public API and will be dropped in 3.x versions. - */ -@Deprecated -public interface ByteStream { - - public boolean hasMore(); - public int write( ByteBuffer bb ); - -} diff --git a/src/main/com/mongodb/package.html b/src/main/com/mongodb/package.html deleted file mode 100644 index 084ef185dbd..00000000000 --- a/src/main/com/mongodb/package.html +++ /dev/null @@ -1,3 +0,0 @@ - -

        Main package with core files. @see Mongo is the main entry point.

        - diff --git a/src/main/com/mongodb/tools/ConnectionPoolStat.java b/src/main/com/mongodb/tools/ConnectionPoolStat.java deleted file mode 100644 index 25592c93847..00000000000 --- a/src/main/com/mongodb/tools/ConnectionPoolStat.java +++ /dev/null @@ -1,314 +0,0 @@ -/** - * Copyright (c) 2008 - 2011 10gen, Inc. - *

        - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

        - * http://www.apache.org/licenses/LICENSE-2.0 - *

        - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.mongodb.tools; - -import javax.management.AttributeNotFoundException; -import javax.management.InstanceNotFoundException; -import javax.management.JMException; -import javax.management.MBeanException; -import javax.management.MBeanServerConnection; -import javax.management.ObjectName; -import javax.management.ReflectionException; -import javax.management.openmbean.CompositeData; -import javax.management.remote.JMXConnector; -import javax.management.remote.JMXConnectorFactory; -import javax.management.remote.JMXServiceURL; -import java.io.CharArrayWriter; -import java.io.IOException; -import java.io.PrintWriter; -import java.lang.management.ManagementFactory; -import java.util.Set; - -/** - * A simple class that formats Mongo Java driver connection pool statistics in an easily-accessible JSON format. - * It can be used to get statistics on connection pool in the same VM by using the no-args constructor, or in any - * VM by using the constructor that takes an MBeanServerConnection. - *

        - * This class also exposes a command line interface modeled after mongostat. For usage, run: - - *

           java -cp mongo.jar com.mongodb.util.tools.ConnectionPoolStat --help}
        - * - * @mongodb.driver.manual reference/mongostat mongostat - * @deprecated This class will be removed in 3.x versions of the driver, - * so please remove it from your compile time dependencies. - */ -@Deprecated -public class ConnectionPoolStat { - - /** - * Use the given MBean server connection to access statistics for connection pools. - * - * @param mBeanConnection the MBean server to connect to - */ - public ConnectionPoolStat(MBeanServerConnection mBeanConnection) { - this.mBeanConnection = mBeanConnection; - } - - /** - * Use the platform MBean server. This is useful if you want to access statistics - * for connection pools in the same virtual machine. - * - * @see java.lang.management.ManagementFactory#getPlatformMBeanServer() - */ - public ConnectionPoolStat() { - this.mBeanConnection = ManagementFactory.getPlatformMBeanServer(); - } - - /** - * Gets the statistics for all Mongo connection pools registered with the MBean server used - * by this instance. The format will always be JSON, but the specific JSON fields may change in a - * future release. An example of the output, which should not be taken as a specification: - * - *
        -   { pools : [
        -     { objectName: 'com.mongodb:type=ConnectionPool,host=localhost/127.0.0.1,port=27018,instance=1',
        -       host: 'localhost', port: 27018, maxSize: 10, total: 10, inUse: 3,
        -       inUseConnections: [
        -         { namespace: 'FindContention.test', opCode: 'OP_QUERY', query: { }, numDocuments: 1, threadName: 'pool-2-thread-19', durationMS: 843, localPort: 64062 },
        -         { namespace: 'FindContention.test', opCode: 'OP_QUERY', query: { }, numDocuments: 1, threadName: 'pool-2-thread-1', durationMS: 4331, localPort: 64095 },
        -         { namespace: 'FindContention.test', opCode: 'OP_QUERY', query: { }, numDocuments: 1, threadName: 'pool-2-thread-16', durationMS: 4343, localPort: 64087 }
        -       ]
        -     },
        -     { objectName: 'com.mongodb:type=ConnectionPool,host=localhost/127.0.0.1,port=27017,instance=1',
        -       host: 'localhost', port: 27017, maxSize: 10, total: 10, inUse: 2,
        -       inUseConnections: [
        -         { namespace: 'FindContention.test', opCode: 'OP_QUERY', query: { }, numDocuments: 1, threadName: 'pool-2-thread-5', durationMS: 920, localPort: 64093 },
        -         { namespace: 'FindContention.test', opCode: 'OP_QUERY', query: { }, numDocuments: 1, threadName: 'pool-2-thread-11', durationMS: 1468, localPort: 64068 },
        -       ]
        -     }
        -    ]
        -   }
        - * - * @return JSON-formatted stats for all connection pools registered in JMX - * @throws JMException for any JMX-related exceptions - * @throws IOException for any I/O exceptions - */ - public String getStats() throws JMException, IOException { - CharArrayWriter charArrayWriter = new CharArrayWriter(); - PrintWriter printWriter = new PrintWriter(charArrayWriter); - print(printWriter); - return charArrayWriter.toString(); - } - - /** - * Command line interface for displaying connection pool stats. In order to connect to a remote JMX server to - * get these stats, currently you must set com.sun.management.jmxremote.port system property on the remote server - * and specify that port using the --port argument. - * - * @param args program arguments - * @throws Exception JMX-related exceptions - * @see ConnectionPoolStat#printUsage() - */ - public static void main(String[] args) throws Exception { - String host = "localhost"; - int port = -1; - long rowCount = 0; - int sleepTime = 1000; - - int pos = 0; - for (; pos < args.length; pos++) { - if (args[pos].equals("--help")) { - printUsage(); - System.exit(0); - } else if (args[pos].equals("--host") || args[pos].equals("-h")) { - host = args[++pos]; - } else if (args[pos].equals("--port")) { - port = getIntegerArg(args[++pos], "--port"); - } else if (args[pos].equals("--rowcount") || args[pos].equals("-n")) { - rowCount = getIntegerArg(args[++pos], "--rowCount"); - } else if (args[pos].startsWith("-")) { - printErrorAndUsageAndExit("unknown option " + args[pos]); - } - else { - sleepTime = getIntegerArg(args[pos++], "sleep time") * 1000; - break; - } - } - - if (pos != args.length) { - printErrorAndUsageAndExit("too many positional options"); - } - - if (port == -1 && !host.contains(":")) { - printErrorAndUsageAndExit("port is required"); - } - - String hostAndPort = (port != -1) ? host + ":" + port : host; - - if (rowCount == 0) { - rowCount = Long.MAX_VALUE; - } - - JMXServiceURL u = new JMXServiceURL("service:jmx:rmi:///jndi/rmi://" + hostAndPort + "/jmxrmi"); - JMXConnector connector = JMXConnectorFactory.connect(u); - MBeanServerConnection mBeanConnection = connector.getMBeanServerConnection(); - try { - ConnectionPoolStat printer = new ConnectionPoolStat(mBeanConnection); - for (int i = 0; i < rowCount; i++) { - System.out.println(printer.getStats()); - if (i != rowCount - 1) { - Thread.sleep(sleepTime); - } - } - } finally { - connector.close(); - } - } - - private static int getIntegerArg(String arg, String argName) { - try { - return Integer.parseInt(arg); - } catch (NumberFormatException e) { - printErrorAndUsageAndExit(argName + " arg must be an integer"); - } - throw new IllegalStateException(); - } - - private static void printErrorAndUsageAndExit(final String error) { - System.err.println("ERROR: " + error); - System.out.println(); - printUsage(); - System.exit(1); - } - - private static void printUsage() { - System.out.println("View live MongoDB connection pool statistics from a remote JMX server."); - System.out.println(); - System.out.println("usage: java com.mongodb.tools.ConnectionPoolStat [options] [sleep time"); - System.out.println("sleep time: time to wait (in seconds) between calls. Defaults to 1"); - System.out.println("options:"); - System.out.println(" --help produce help message"); - System.out.println(" --port arg JMX remote port. Required. Can also use --host hostname:port"); - System.out.println(" -h [ --host ] arg JMX remote host. Defaults to localhost"); - System.out.println(" -n [ --rowcount ] arg number of times to print stats (0 for indefinite)"); - System.out.println(); - System.out.println("Fields"); - System.out.println(" objectName - name of the JMX bean for this connection pool"); - System.out.println(" host - host of the mongod/mongos server"); - System.out.println(" port - port of the mongod/mongos server"); - System.out.println(" maxSize - max # of connections allowed"); - System.out.println(" total - # of connections allocated"); - System.out.println(" inUse - # of connections in use"); - System.out.println(" inUseConnections - list of all in use connections"); - System.out.println(" inUseConnections.namespace - namespace on which connection is operating"); - System.out.println(" inUseConnections.opCode - operation connection is executing"); - System.out.println(" inUseConnections.query - query the connection is executing (for query/update/remove)"); - System.out.println(" inUseConnections.numDocuments - # of documents in the message (mostly relevant for batch inserts)"); - System.out.println(" inUseConnections.threadName - name of thread on which connection is executing"); - System.out.println(" inUseConnections.durationMS - duration that the operation has been executing so far"); - System.out.println(" inUseConnections.localPort - local port of the connection"); - } - - private void print(PrintWriter pw) throws JMException, IOException { - Set beanSet = mBeanConnection.queryNames(new ObjectName("com.mongodb:type=ConnectionPool,*"), null); - pw.println("{ pools : ["); - int i = 0; - for (ObjectName objectName : beanSet) { - pw.print(" { "); - printAttribute("ObjectName", objectName.toString(), pw); - pw.println(); - pw.print(" "); - printAttribute("Host", objectName, pw); - printAttribute("Port", objectName, pw); - printAttribute("MaxSize", objectName, pw); - printStatistics(pw, objectName); - pw.println(" }" + (i == beanSet.size() - 1 ? "" : ",")); - i++; - } - pw.println(" ]"); - pw.println("}"); - } - - private void printStatistics(final PrintWriter pw, final ObjectName objectName) throws InstanceNotFoundException, IOException, ReflectionException, AttributeNotFoundException, MBeanException { - String key = "Statistics"; - CompositeData statistics = (CompositeData) mBeanConnection.getAttribute(objectName, key); - printSimpleStatistics(pw, statistics); - printInUseConnections(statistics, pw); - } - - private void printSimpleStatistics(final PrintWriter pw, final CompositeData statistics) throws InstanceNotFoundException, IOException, ReflectionException, AttributeNotFoundException, MBeanException { - printCompositeDataAttribute("total", statistics, pw); - printCompositeDataAttribute("inUse", statistics, pw); - pw.println(); - } - - private void printInUseConnections(final CompositeData statistics, final PrintWriter pw) throws InstanceNotFoundException, IOException, ReflectionException, AttributeNotFoundException, MBeanException { - String key = "inUseConnections"; - CompositeData[] compositeDataArray = (CompositeData[]) statistics.get(key); - pw.println(" " + getKeyString(key) + ": ["); - for (int i = 0; i < compositeDataArray.length; i++) { - CompositeData compositeData = compositeDataArray[i]; - pw.print(" { "); - printCompositeDataAttribute("namespace", compositeData, pw); - printCompositeDataAttribute("opCode", compositeData, pw); - printCompositeDataAttribute("query", compositeData, pw, StringType.JSON); - printCompositeDataAttribute("numDocuments", compositeData, pw); - printCompositeDataAttribute("threadName", compositeData, pw); - printCompositeDataAttribute("durationMS", compositeData, pw); - printCompositeDataAttribute("localPort", compositeData, pw, Position.LAST); - pw.println(" }" + (i == compositeDataArray.length -1 ? "" : ", ")); - } - pw.println(" ]"); - } - - private void printCompositeDataAttribute(String key, final CompositeData compositeData, final PrintWriter pw) { - printCompositeDataAttribute(key, compositeData, pw, Position.REGULAR); - } - - private void printCompositeDataAttribute(String key, final CompositeData compositeData, final PrintWriter pw, Position position) { - printCompositeDataAttribute(key, compositeData, pw, position, StringType.REGULAR); - } - - private void printCompositeDataAttribute(final String key, final CompositeData compositeData, final PrintWriter pw, final StringType stringType) { - printCompositeDataAttribute(key, compositeData, pw, Position.REGULAR, stringType); - } - - private void printCompositeDataAttribute(String key, final CompositeData compositeData, final PrintWriter pw, Position position, StringType stringType) { - printAttribute(key, compositeData.get(key), pw, position, stringType); - } - - private void printAttribute(final String key, final ObjectName objectName, final PrintWriter pw) throws InstanceNotFoundException, IOException, ReflectionException, AttributeNotFoundException, MBeanException { - printAttribute(key, mBeanConnection.getAttribute(objectName, key), pw); - } - - private void printAttribute(final String key, final Object value, final PrintWriter pw) { - printAttribute(key, value, pw, Position.REGULAR, StringType.REGULAR); - } - - private void printAttribute(final String key, final Object value, final PrintWriter pw, Position position, StringType stringType) { - if (value != null ) { - pw.print(getKeyString(key) + ": " + getValueString(value, stringType) + (position == Position.LAST ? "" : ", ")); - } - } - - private String getKeyString(final String key) { - return Character.toLowerCase(key.charAt(0)) + key.substring(1); - } - - private String getValueString(final Object value, final StringType stringType) { - if (value instanceof String && stringType == StringType.REGULAR) { - return "" + "'" + value + "'"; - } - return value.toString(); - } - - enum StringType { REGULAR, JSON } - - enum Position { REGULAR, LAST} - - private final MBeanServerConnection mBeanConnection; -} diff --git a/src/main/com/mongodb/util/AbstractObjectSerializer.java b/src/main/com/mongodb/util/AbstractObjectSerializer.java deleted file mode 100644 index 16c6762e639..00000000000 --- a/src/main/com/mongodb/util/AbstractObjectSerializer.java +++ /dev/null @@ -1,27 +0,0 @@ -/** -* Copyright (c) 2008 - 2011 10gen, Inc. -*

        -* Licensed under the Apache License, Version 2.0 (the "License"); -* you may not use this file except in compliance with the License. -* You may obtain a copy of the License at -*

        -* http://www.apache.org/licenses/LICENSE-2.0 -*

        -* Unless required by applicable law or agreed to in writing, software -* distributed under the License is distributed on an "AS IS" BASIS, -* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -* See the License for the specific language governing permissions and -* limitations under the License. -*/ - -package com.mongodb.util; - -abstract class AbstractObjectSerializer implements ObjectSerializer { - - @Override - public String serialize(final Object obj) { - StringBuilder builder = new StringBuilder(); - serialize(obj, builder); - return builder.toString(); - } -} diff --git a/src/main/com/mongodb/util/Args.java b/src/main/com/mongodb/util/Args.java deleted file mode 100644 index 244fb1fe104..00000000000 --- a/src/main/com/mongodb/util/Args.java +++ /dev/null @@ -1,86 +0,0 @@ -// Args.java - -/** - * Copyright (C) 2008 10gen Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.mongodb.util; - -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Map; - - -/** - * @deprecated This class is NOT a part of public API and will be dropped in 3.x versions. - */ -@Deprecated -public class Args { - public Args( String args[] ){ - - for ( String s : args ){ - - if ( s.startsWith( "-" ) ){ - s = s.substring(1); - int idx = s.indexOf( "=" ); - if ( idx < 0 ) - _options.put( s , "" ); - else - _options.put( s.substring( 0 , idx ) , s.substring( idx + 1 ) ); - continue; - } - - _params.add( s ); - - } - } - - public String getOption( String name ){ - return _options.get( name ); - } - - public String toString(){ - StringBuilder s = new StringBuilder(); - - for ( String p : _options.keySet() ){ - s.append( '-' ).append( p ); - - String v = _options.get( p ); - if ( v.length() == 0 ) - continue; - - s.append( '=' ); - - if ( v.indexOf( " " ) >= 0 ) - s.append( '"' ).append( v ).append( '"' ); - else - s.append( v ); - } - - for ( String p : _params ){ - s.append( ' ' ); - if ( p.indexOf( " " ) >= 0 ) - s.append( '"' ).append( p ).append( '"' ); - else - s.append( p ); - } - - return s.toString(); - } - - final Map _options = new HashMap(); - final List _params = new ArrayList(); -} diff --git a/src/main/com/mongodb/util/Base64Codec.java b/src/main/com/mongodb/util/Base64Codec.java deleted file mode 100644 index 5f6f7e58171..00000000000 --- a/src/main/com/mongodb/util/Base64Codec.java +++ /dev/null @@ -1,162 +0,0 @@ - -/** - * Copyright (C) 2012 10gen Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.mongodb.util; - -/* - * Copyright (C) 2012 10gen Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/** - * Provides Base64 encoding and decoding . - *

        - *

        - * This class implements Base64 encoding - *

        - * Thanks to Apache Commons project. This class refactored from org.apache.commons.codec.binary - *

        - * Original Thanks to "commons" project in ws.apache.org for this code. - * http://svn.apache.org/repos/asf/webservices/commons/trunk/modules/util/ - *

        - * @deprecated This class is NOT a part of public API and will be dropped in 3.x versions. - */ -@Deprecated -public class Base64Codec { - - private static final int BYTES_PER_UNENCODED_BLOCK = 3; - private static final int BYTES_PER_ENCODED_BLOCK = 4; - - /** - * Mask used to extract 6 bits, used when encoding - */ - private static final int SixBitMask = 0x3f; - - /** - * padding char - */ - private static final byte PAD = '='; - - /** - * This array is a lookup table that translates 6-bit positive integer index values into their "Base64 Alphabet" - * equivalents as specified in Table 1 of RFC 2045. - */ - private static final byte[] EncodeTable = {'A', 'B', 'C', 'D', 'E', 'F', - 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', - 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', 'a', 'b', 'c', 'd', 'e', 'f', - 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', - 't', 'u', 'v', 'w', 'x', 'y', 'z', '0', '1', '2', '3', '4', '5', - '6', '7', '8', '9', '+', '/'}; - - private static final int[] DecodeTable = new int[128]; - - static { - for (int i = 0; i < EncodeTable.length; i++) { - DecodeTable[EncodeTable[i]] = i; - } - } - - /** - * Translates the specified Base64 string into a byte array. - * - * @param s the Base64 string (not null) - * @return the byte array (not null) - */ - public byte[] decode(String s) { - int delta = s.endsWith("==") ? 2 : s.endsWith("=") ? 1 : 0; - byte[] buffer = new byte[s.length() * BYTES_PER_UNENCODED_BLOCK / BYTES_PER_ENCODED_BLOCK - delta]; - int mask = 0xFF; - int pos = 0; - for (int i = 0; i < s.length(); i += BYTES_PER_ENCODED_BLOCK) { - int c0 = DecodeTable[s.charAt(i)]; - int c1 = DecodeTable[s.charAt(i + 1)]; - buffer[pos++] = (byte) (((c0 << 2) | (c1 >> 4)) & mask); - if (pos >= buffer.length) { - return buffer; - } - int c2 = DecodeTable[s.charAt(i + 2)]; - buffer[pos++] = (byte) (((c1 << 4) | (c2 >> 2)) & mask); - if (pos >= buffer.length) { - return buffer; - } - int c3 = DecodeTable[s.charAt(i + 3)]; - buffer[pos++] = (byte) (((c2 << 6) | c3) & mask); - } - return buffer; - } - - /** - * Translates the specified byte array into Base64 string. - * - * @param in the byte array (not null) - * @return the translated Base64 string (not null) - */ - public String encode(byte[] in) { - - int modulus = 0; - int bitWorkArea = 0; - int numEncodedBytes = (in.length / BYTES_PER_UNENCODED_BLOCK) * BYTES_PER_ENCODED_BLOCK - + ((in.length % BYTES_PER_UNENCODED_BLOCK == 0) ? 0 : 4); - - byte[] buffer = new byte[numEncodedBytes]; - int pos = 0; - - for (int b : in) { - modulus = (modulus + 1) % BYTES_PER_UNENCODED_BLOCK; - - if (b < 0) - b += 256; - - bitWorkArea = (bitWorkArea << 8) + b; // BITS_PER_BYTE - if (0 == modulus) { // 3 bytes = 24 bits = 4 * 6 bits to extract - buffer[pos++] = EncodeTable[(bitWorkArea >> 18) & SixBitMask]; - buffer[pos++] = EncodeTable[(bitWorkArea >> 12) & SixBitMask]; - buffer[pos++] = EncodeTable[(bitWorkArea >> 6) & SixBitMask]; - buffer[pos++] = EncodeTable[bitWorkArea & SixBitMask]; - } - } - - switch (modulus) { // 0-2 - case 1: // 8 bits = 6 + 2 - buffer[pos++] = EncodeTable[(bitWorkArea >> 2) & SixBitMask]; // top 6 bits - buffer[pos++] = EncodeTable[(bitWorkArea << 4) & SixBitMask]; // remaining 2 - buffer[pos++] = PAD; - buffer[pos] = PAD; // Last entry no need to ++ - break; - - case 2: // 16 bits = 6 + 6 + 4 - buffer[pos++] = EncodeTable[(bitWorkArea >> 10) & SixBitMask]; - buffer[pos++] = EncodeTable[(bitWorkArea >> 4) & SixBitMask]; - buffer[pos++] = EncodeTable[(bitWorkArea << 2) & SixBitMask]; - buffer[pos] = PAD; // Last entry no need to ++ - break; - } - - return new String(buffer); - } -} diff --git a/src/main/com/mongodb/util/ClassMapBasedObjectSerializer.java b/src/main/com/mongodb/util/ClassMapBasedObjectSerializer.java deleted file mode 100644 index 772147321db..00000000000 --- a/src/main/com/mongodb/util/ClassMapBasedObjectSerializer.java +++ /dev/null @@ -1,83 +0,0 @@ - -/** - * Copyright (C) 2012 10gen Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.mongodb.util; - -import com.mongodb.Bytes; -import org.bson.util.ClassMap; - -import java.util.List; - -/** - * Objects of type ClassMapBasedObjectSerializer are constructed to perform - * instance specific object to JSON serialization schemes. - *

        - * This class is not thread safe - * - * @author breinero - */ -class ClassMapBasedObjectSerializer extends AbstractObjectSerializer { - - /** - * Assign a ObjectSerializer to perform a type specific serialization scheme - * @param c this object's type serves as a key in the serialization map. - * ClassMapBasedObjectSerializer uses org.bson.util.ClassMap and not only checks if 'c' is a key in the Map, - * but also walks the up superclass and interface graph of 'c' to find matches. - * This means that it is only necessary assign ObjectSerializers to base classes. @see org.bson.util.ClassMap - * @param serializer performs the serialization mapping specific to the @param key type - */ - void addObjectSerializer(Class c, ObjectSerializer serializer) { - _serializers.put(c , serializer); - } - - /** - * - * @param obj the object to be serialized - * @param buf StringBuilder containing the JSON representation of the object - */ - @Override - public void serialize(Object obj, StringBuilder buf){ - - obj = Bytes.applyEncodingHooks( obj ); - - if(obj == null) { - buf.append(" null "); - return; - } - - ObjectSerializer serializer = null; - - List> ancestors; - ancestors = ClassMap.getAncestry(obj.getClass()); - - for (final Class ancestor : ancestors) { - serializer = _serializers.get(ancestor); - if (serializer != null) - break; - } - - if (serializer == null && obj.getClass().isArray()) - serializer = _serializers.get(Object[].class); - - if (serializer == null) - throw new RuntimeException( "json can't serialize type : " + obj.getClass() ); - - serializer.serialize(obj, buf); - } - - private ClassMap _serializers = new ClassMap(); -} diff --git a/src/main/com/mongodb/util/ConnectionPoolStatisticsBean.java b/src/main/com/mongodb/util/ConnectionPoolStatisticsBean.java deleted file mode 100644 index cf53c0f58a9..00000000000 --- a/src/main/com/mongodb/util/ConnectionPoolStatisticsBean.java +++ /dev/null @@ -1,65 +0,0 @@ -/** - * Copyright (c) 2008 - 2012 10gen, Inc. - *

        - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

        - * http://www.apache.org/licenses/LICENSE-2.0 - *

        - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.mongodb.util; - -import com.mongodb.InUseConnectionBean; - -/** - * A bean representing connection pool statistics. - * - * @deprecated This class will be removed in 3.x versions of the driver, - * so please remove it from your compile time dependencies. - */ -@Deprecated -public class ConnectionPoolStatisticsBean { - private final int total; - private final int inUse; - private final InUseConnectionBean[] inUseConnections; - - public ConnectionPoolStatisticsBean(final int total, final int inUse, final InUseConnectionBean[] inUseConnections) { - //To change body of created methods use File | Settings | File Templates. - this.total = total; - this.inUse = inUse; - this.inUseConnections = inUseConnections; - } - - /** - * Gets the total number of pool members, including idle and and in-use members. - * - * @return total number of members - */ - public int getTotal() { - return total; - } - - /** - * Gets the number of pool members that are currently in use. - * - * @return number of in-use members - */ - public int getInUse() { - return inUse; - } - - /** - * Gets an array of beans describing all the connections that are currently in use. - * - * @return array of in-use connection beans - */ - public InUseConnectionBean[] getInUseConnections() { - return inUseConnections; - } -} diff --git a/src/main/com/mongodb/util/FastStack.java b/src/main/com/mongodb/util/FastStack.java deleted file mode 100644 index 24dda5a61e8..00000000000 --- a/src/main/com/mongodb/util/FastStack.java +++ /dev/null @@ -1,59 +0,0 @@ -// FastStack.java - -/** - * Copyright (C) 2008 10gen Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.mongodb.util; - -import java.util.ArrayList; -import java.util.List; - -/** - * @deprecated This class is NOT a part of public API and will be dropped in 3.x versions. - */ -@Deprecated -public class FastStack{ - - public void push( T t ){ - _data.add( t ); - } - - public T peek(){ - return _data.get( _data.size() - 1 ); - } - - public T pop(){ - return _data.remove( _data.size() - 1 ); - } - - public int size(){ - return _data.size(); - } - - public void clear(){ - _data.clear(); - } - - public T get( int i ){ - return _data.get( i ); - } - - public String toString(){ - return _data.toString(); - } - - private final List _data = new ArrayList(); -} diff --git a/src/main/com/mongodb/util/Hash.java b/src/main/com/mongodb/util/Hash.java deleted file mode 100644 index 955380ce969..00000000000 --- a/src/main/com/mongodb/util/Hash.java +++ /dev/null @@ -1,253 +0,0 @@ -// Hash.java - -/** - * Copyright (C) 2008 10gen Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.mongodb.util; - -/** - * @deprecated This class is NOT a part of public API and will be dropped in 3.x versions. - */ -@Deprecated -public final class Hash { - - /** Creates a hash for a string. - * @param s String to hash - * @return the hash code - */ - public static final int hashBackward( String s ) { - int hash = 0; - for ( int i = s.length()-1; i >= 0; i-- ) - hash = hash * 31 + s.charAt( i ); - return hash; - } - - /** Creates a long hash for a string. - * @param s the string to hash - * @return the hash code - */ - public static final long hashBackwardLong( String s ) { - long hash = 0; - for ( int i = s.length()-1; i >= 0; i-- ) - hash = hash * 63 + s.charAt( i ); - return hash; - } - - /** @unexpose */ - static final long _longHashConstant = 4095; - - /** - * 64-bit hash, using longs, in stead of ints, for less collisions, for when it matters. - * Calls longHash( s , 0 , s.length() ) - * @param s The String to hash. - * @return the hash code - */ - public static final long longHash( String s ) { - return longHash( s , 0 , s.length() ); - } - - /** - * 64-bit hash using longs, starting on index 'start' and including everything before 'end'. - * @param s The string to hash. - * @param start Where to start the hash. - * @param end Where to end the hash. - * @return the hash code - */ - public static final long longHash( String s , int start , int end ) { - long hash = 0; - for ( ; start < end; start++ ) - hash = _longHashConstant * hash + s.charAt( start ); - return hash; - } - - /** - * Same as longHash(String), using only lower-case values of letters. - * Calls longhash( s , 0 , s.length() ). - * @param s The string to Hash. - * @return the hash code - */ - public static final long longLowerHash( String s ) { - return longLowerHash( s , 0 , s.length() ); - } - - /** - * Long (64-bit) hash, lower-cased, from [start-end) - * @param s The string to hash. - * @param start where to start hashing. - * @param end Where to stop hashing. - * @return the hash code - */ - public static final long longLowerHash( String s , int start , int end ) { - long hash = 0; - for ( ; start < end; start++ ) - hash = _longHashConstant * hash + Character.toLowerCase( s.charAt( start ) ); - return hash; - } - - /** - * Long (64-bit) hash, lower-cased, from [start-end) - * @param s The string to hash. - * @param start where to start hashing. - * @param end Where to stop hashing. - * @return the hash code - */ - public static final long longLowerHash( String s , int start , int end , long hash ) { - for ( ; start < end; start++ ) - hash = _longHashConstant * hash + Character.toLowerCase( s.charAt( start ) ); - return hash; - } - - /** Adds the lower-case equivalent of a character to an existing hash code. - * @param hash the existing hash code - * @param c the character to add - * @return the hash code - */ - public static final long longLowerHashAppend( long hash , char c ) { - return hash * _longHashConstant + Character.toLowerCase( c ); - } - - /** Adds a character to an existing hash code. - * @param hash the existing hash code - * @param c the character to add - * @return the hash code - */ - public static final long longHashAppend( long hash , char c ) { - return hash * _longHashConstant + c; - } - - /** - * This is an exact copy of the String hashCode() function, aside from the lowercasing. - * @param s string to be hashed - * @return the hash code - */ - public static final int lowerCaseHash( String s ) { - int h = 0; - final int len = s.length(); - for ( int i = 0; i < len; i++ ) - h = 31*h + Character.toLowerCase( s.charAt( i ) ); - return h; - } - - /** - * Creates a hash code of a lowercase string from [start-end) - * @param s string to be hashed - * @param start the starting index - * @param end the ending index - * @return the hash code - */ - public static final int lowerCaseHash( String s , int start , int end ) { - int h = 0; - final int len = s.length(); - for ( int i = start; i < len && i < end; i++ ) - h = 31*h + Character.toLowerCase( s.charAt( i ) ); - return h; - } - - /** - * Creates a hash code of a string from [start-end) - * @param s string to be hashed - * @param start the starting index - * @param end the ending index - * @return the hash code - */ - public static final int hashCode( CharSequence s , int start , int end ) { - int h = 0; - final int len = s.length(); - for ( int i = start; i < len && i < end; i++ ) - h = 31*h + s.charAt( i ); - return h; - } - - /** - * Creates a hash code of a lowercase string with whitespace removed from [start-end) - * @param s string to be hashed - * @param start the starting index - * @param end the ending index - * @return the hash code - */ - public static final int nospaceLowerHash( String s , int start , int end ) { - int h = 0; - final int len = s.length(); - for ( int i = start; i < len && i < end; i++ ) { - char c = s.charAt( i ); - if ( Character.isWhitespace( c ) ) - continue; - h = 31*h + Character.toLowerCase( c ); - } - return h; - } - - /** - * This is an exact copy of the String hashCode() function, aside from the lowercasing. - * No, it's not. It also ignores consecutive whitespace. - * @param s string to be hashed - * @return the hash code - */ - public static final int lowerCaseSpaceTrimHash( String s ) { - int h = 0; - int len = s.length(); - while ( len > 1 && Character.isWhitespace( s.charAt( len-1 ) ) ) - len--; - boolean lastWasSpace = true; - for ( int i = 0; i < len; i++ ) { - boolean isSpace = Character.isWhitespace( s.charAt( i ) ); - if ( isSpace && lastWasSpace ) - continue; - lastWasSpace = isSpace; - h = 31*h + Character.toLowerCase( s.charAt( i ) ); - } - return h; - } - - /** - * Creates a hash code of a lowercase string from [start-end) ignoring whitespace - * @param s string to be hashed - * @param start the starting index - * @param end the ending index - * @return the hash code - */ - public static final int lowerCaseSpaceTrimHash( String s , int start , int end ) { - int h = 0; - int len = s.length(); - while ( len > 1 && Character.isWhitespace( s.charAt( len-1 ) ) ) - len--; - boolean lastWasSpace = true; - for ( int i = start; i < len && i < end; i++ ) { - boolean isSpace = Character.isWhitespace( s.charAt( i ) ); - if ( isSpace && lastWasSpace ) - continue; - lastWasSpace = isSpace; - h = 31*h + Character.toLowerCase( s.charAt( i ) ); - } - return h; - } - - /** - * Calculate the hashcode for a series of strings combined as one. - * @param strings Varargs array of Strings. - * @return A hashcode. - */ - public static final int hashCode( String ... strings ) { - int h = 0; - for ( String s : strings ) { - int len = s.length(); - for ( int i = 0; i < len; i++ ) - h = 31*h + s.charAt( i ); - } - return h; - } - -} diff --git a/src/main/com/mongodb/util/IdentitySet.java b/src/main/com/mongodb/util/IdentitySet.java deleted file mode 100644 index 09e4b4cedfd..00000000000 --- a/src/main/com/mongodb/util/IdentitySet.java +++ /dev/null @@ -1,85 +0,0 @@ -// IdentitySet.java - -/** - * Copyright (C) 2008 10gen Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.mongodb.util; - -import java.util.Collection; -import java.util.IdentityHashMap; -import java.util.Iterator; - -/** - * @deprecated This class is NOT a part of public API and will be dropped in 3.x versions. - */ -@Deprecated -public class IdentitySet implements Iterable { - - public IdentitySet(){ - } - - public IdentitySet( Iterable copy ){ - for ( T t : copy ) - add( t ); - } - - public boolean add( T t ){ - return _map.put( t , "a" ) == null; - } - - public boolean contains( T t ){ - return _map.containsKey( t ); - } - - public void remove( T t ){ - _map.remove( t ); - } - - public void removeall( Iterable coll ){ - for ( T t : coll ) - _map.remove( t ); - } - - public void clear(){ - _map.clear(); - } - - public int size(){ - return _map.size(); - } - - public Iterator iterator(){ - return _map.keySet().iterator(); - } - - public void addAll( Collection c ){ - for ( T t : c ){ - add( t ); - } - } - - public void addAll( IdentitySet c ){ - for ( T t : c ) - add( t ); - } - - public void removeAll( Iterable prev ){ - for ( T t : prev ) - remove( t ); - } - - final IdentityHashMap _map = new IdentityHashMap(); -} diff --git a/src/main/com/mongodb/util/JSON.java b/src/main/com/mongodb/util/JSON.java deleted file mode 100644 index a1ebea686bc..00000000000 --- a/src/main/com/mongodb/util/JSON.java +++ /dev/null @@ -1,558 +0,0 @@ -// JSON.java - -/** - * Copyright (C) 2008 10gen Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.mongodb.util; - -import org.bson.BSONCallback; - -import com.mongodb.DBObject; - -/** - * Helper methods for JSON serialization and de-serialization - */ -public class JSON { - - /** - * Serializes an object into its JSON form. - *

        - * This method delegates serialization to JSONSerializers.getLegacy - * - * @param o object to serialize - * @return String containing JSON form of the object - * @see com.mongodb.util.JSONSerializers#getLegacy() - */ - public static String serialize( Object o ){ - StringBuilder buf = new StringBuilder(); - serialize( o , buf ); - return buf.toString(); - } - - /** - * Serializes an object into its JSON form. - *

        - * This method delegates serialization to JSONSerializers.getLegacy - * - * @param o object to serialize - * @param buf StringBuilder containing the JSON representation under construction - * @return String containing JSON form of the object - * @see com.mongodb.util.JSONSerializers#getLegacy() - */ - public static void serialize( Object o, StringBuilder buf) { - JSONSerializers.getLegacy().serialize(o, buf); - } - - /** - * Parses a JSON string representing a JSON value - * - * @param s the string to parse - * @return the object - */ - public static Object parse( String s ){ - return parse(s, null); - } - - /** - * Parses a JSON string representing a JSON value - * - * @param s the string to parse - * @return the object - */ - public static Object parse( String s, BSONCallback c ){ - if (s == null || (s=s.trim()).equals("")) { - return (DBObject)null; - } - - JSONParser p = new JSONParser(s, c); - return p.parse(); - } - - static void string( StringBuilder a , String s ){ - a.append("\""); - for(int i = 0; i < s.length(); ++i){ - char c = s.charAt(i); - if (c == '\\') - a.append("\\\\"); - else if(c == '"') - a.append("\\\""); - else if(c == '\n') - a.append("\\n"); - else if(c == '\r') - a.append("\\r"); - else if(c == '\t') - a.append("\\t"); - else if(c == '\b') - a.append("\\b"); - else if ( c < 32 ) - continue; - else - a.append(c); - } - a.append("\""); - } -} - - -/** - * Parser for JSON objects. - * - * Supports all types described at www.json.org, except for - * numbers with "e" or "E" in them. - */ -class JSONParser { - - String s; - int pos = 0; - BSONCallback _callback; - - /** - * Create a new parser. - */ - public JSONParser(String s) { - this(s, null); - } - - /** - * Create a new parser. - */ - public JSONParser(String s, BSONCallback callback) { - this.s = s; - _callback = (callback == null) ? new JSONCallback() : callback; - } - - - /** - * Parse an unknown type. - * - * @return Object the next item - * @throws JSONParseException if invalid JSON is found - */ - public Object parse() { - return parse(null); - } - - /** - * Parse an unknown type. - * - * @return Object the next item - * @throws JSONParseException if invalid JSON is found - */ - protected Object parse(String name) { - Object value = null; - char current = get(); - - switch(current) { - // null - case 'n': - read('n'); read('u'); read('l'); read('l'); - value = null; - break; - // NaN - case 'N': - read('N'); read('a'); read('N'); - value = Double.NaN; - break; - // true - case 't': - read('t'); read('r'); read('u'); read('e'); - value = true; - break; - // false - case 'f': - read('f'); read('a'); read('l'); read('s'); read('e'); - value = false; - break; - // string - case '\'': - case '\"': - value = parseString(true); - break; - // number - case '0': case '1': case '2': case '3': case '4': case '5': - case '6': case '7': case '8': case '9': case '+': case '-': - value = parseNumber(); - break; - // array - case '[': - value = parseArray(name); - break; - // object - case '{': - value = parseObject(name); - break; - default: - throw new JSONParseException(s, pos); - } - return value; - } - - /** - * Parses an object for the form {} and { members }. - * - * @return DBObject the next object - * @throws JSONParseException if invalid JSON is found - */ - public Object parseObject() { - return parseObject(null); - } - - /** - * Parses an object for the form {} and { members }. - * - * @return DBObject the next object - * @throws JSONParseException if invalid JSON is found - */ - protected Object parseObject(String name){ - if (name != null) { - _callback.objectStart(name); - } else { - _callback.objectStart(); - } - - read('{'); - char current = get(); - while(get() != '}') { - String key = parseString(false); - read(':'); - Object value = parse(key); - doCallback(key, value); - - if((current = get()) == ',') { - read(','); - } - else { - break; - } - } - read('}'); - - return _callback.objectDone(); - } - - protected void doCallback(String name, Object value) { - if (value == null) { - _callback.gotNull(name); - } else if (value instanceof String) { - _callback.gotString(name, (String)value); - } else if (value instanceof Boolean) { - _callback.gotBoolean(name, (Boolean)value); - } else if (value instanceof Integer) { - _callback.gotInt(name, (Integer)value); - } else if (value instanceof Long) { - _callback.gotLong(name, (Long)value); - } else if (value instanceof Double) { - _callback.gotDouble(name, (Double)value); - } - } - - /** - * Read the current character, making sure that it is the expected character. - * Advances the pointer to the next character. - * - * @param ch the character expected - * - * @throws JSONParseException if the current character does not match the given character - */ - public void read(char ch) { - if(!check(ch)) { - throw new JSONParseException(s, pos); - } - pos++; - } - - public char read(){ - if ( pos >= s.length() ) - throw new IllegalStateException( "string done" ); - return s.charAt( pos++ ); - } - - /** - * Read the current character, making sure that it is a hexidecimal character. - * - * @throws JSONParseException if the current character is not a hexidecimal character - */ - public void readHex() { - if (pos < s.length() && - ((s.charAt(pos) >= '0' && s.charAt(pos) <= '9') || - (s.charAt(pos) >= 'A' && s.charAt(pos) <= 'F') || - (s.charAt(pos) >= 'a' && s.charAt(pos) <= 'f'))) { - pos++; - } - else { - throw new JSONParseException(s, pos); - } - } - - /** - * Checks the current character, making sure that it is the expected character. - * - * @param ch the character expected - * - * @throws JSONParseException if the current character does not match the given character - */ - public boolean check(char ch) { - return get() == ch; - } - - /** - * Advances the position in the string past any whitespace. - */ - public void skipWS() { - while(pos < s.length() && Character.isWhitespace(s.charAt(pos))) { - pos++; - } - } - - /** - * Returns the current character. - * Returns -1 if there are no more characters. - * - * @return the next character - */ - public char get() { - skipWS(); - if(pos < s.length()) - return s.charAt(pos); - return (char)-1; - } - - /** - * Parses a string. - * - * @return the next string. - * @throws JSONParseException if invalid JSON is found - */ - public String parseString(boolean needQuote) { - char quot = 0; - if(check('\'')) - quot = '\''; - else if(check('\"')) - quot = '\"'; - else if (needQuote) - throw new JSONParseException(s, pos); - - char current; - - if (quot > 0) - read(quot); - StringBuilder buf = new StringBuilder(); - int start = pos; - while(pos < s.length()) { - current = s.charAt(pos); - if (quot > 0) { - if (current == quot) - break; - } else { - if (current == ':' || current == ' ') - break; - } - - if(current == '\\') { - pos++; - - char x = get(); - - char special = 0; - - switch ( x ){ - - case 'u': - { // decode unicode - buf.append(s.substring(start, pos-1)); - pos++; - int tempPos = pos; - - readHex(); - readHex(); - readHex(); - readHex(); - - int codePoint = Integer.parseInt(s.substring(tempPos, tempPos+4), 16); - buf.append((char)codePoint); - - start = pos; - continue; - } - case 'n': special = '\n'; break; - case 'r': special = '\r'; break; - case 't': special = '\t'; break; - case 'b': special = '\b'; break; - case '"': special = '\"'; break; - case '\\': special = '\\'; break; - } - - buf.append(s.substring(start, pos-1)); - if ( special != 0 ){ - pos++; - buf.append( special ); - } - start = pos; - continue; - } - pos++; - } - buf.append(s.substring(start, pos)); - if (quot > 0) - read(quot); - return buf.toString(); - } - - /** - * Parses a number. - * - * @return the next number (int or double). - * @throws JSONParseException if invalid JSON is found - */ - public Number parseNumber() { - - char current = get(); - int start = this.pos; - boolean isDouble = false; - - if(check('-') || check('+')) { - pos++; - } - - outer: - while(pos < s.length()) { - switch(s.charAt(pos)) { - case '0': case '1': case '2': case '3': case '4': - case '5': case '6': case '7': case '8': case '9': - pos++; - break; - case '.': - isDouble = true; - parseFraction(); - break; - case 'e': case 'E': - isDouble = true; - parseExponent(); - break; - default: - break outer; - } - } - - try{ - if (isDouble) - return Double.valueOf(s.substring(start, pos)); - - Long val = Long.valueOf(s.substring(start, pos)); - if (val <= Integer.MAX_VALUE && val >= Integer.MIN_VALUE) - return val.intValue(); - return val; - }catch(NumberFormatException e){ - throw new JSONParseException(s, start, e); - } - } - - /** - * Advances the pointed through .digits. - */ - public void parseFraction() { - // get past . - pos++; - - outer: - while(pos < s.length()) { - switch(s.charAt(pos)) { - case '0': case '1': case '2': case '3': case '4': - case '5': case '6': case '7': case '8': case '9': - pos++; - break; - case 'e': case 'E': - parseExponent(); - break; - default: - break outer; - } - } - } - - /** - * Advances the pointer through the exponent. - */ - public void parseExponent() { - // get past E - pos++; - - if(check('-') || check('+')) { - pos++; - } - - outer: - while(pos < s.length()) { - switch(s.charAt(pos)) { - case '0': case '1': case '2': case '3': case '4': - case '5': case '6': case '7': case '8': case '9': - pos++; - break; - default: - break outer; - } - } - } - - /** - * Parses the next array. - * - * @return the array - * @throws JSONParseException if invalid JSON is found - */ - public Object parseArray() { - return parseArray(null); - } - - /** - * Parses the next array. - * - * @return the array - * @throws JSONParseException if invalid JSON is found - */ - protected Object parseArray(String name) { - if (name != null) { - _callback.arrayStart(name); - } else { - _callback.arrayStart(); - } - - read('['); - - int i = 0; - char current = get(); - while( current != ']' ) { - String elemName = String.valueOf(i++); - Object elem = parse(elemName); - doCallback(elemName, elem); - - if((current = get()) == ',') { - read(','); - } - else if(current == ']') { - break; - } - else { - throw new JSONParseException(s, pos); - } - } - - read(']'); - - return _callback.arrayDone(); - } - -} diff --git a/src/main/com/mongodb/util/JSONCallback.java b/src/main/com/mongodb/util/JSONCallback.java deleted file mode 100644 index 19bfeca5e1b..00000000000 --- a/src/main/com/mongodb/util/JSONCallback.java +++ /dev/null @@ -1,126 +0,0 @@ -// JSONCallback.java - -/** - * Copyright (C) 2008 10gen Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.mongodb.util; - -import com.mongodb.BasicDBList; -import com.mongodb.BasicDBObject; -import com.mongodb.DBObject; -import com.mongodb.DBRef; -import org.bson.BSON; -import org.bson.BSONObject; -import org.bson.BasicBSONCallback; -import org.bson.types.*; - -import java.text.ParsePosition; -import java.text.SimpleDateFormat; -import java.util.Date; -import java.util.GregorianCalendar; -import java.util.SimpleTimeZone; -import java.util.UUID; -import java.util.regex.Pattern; - -public class JSONCallback extends BasicBSONCallback { - - @Override - public BSONObject create() { - return new BasicDBObject(); - } - - @Override - protected BSONObject createList() { - return new BasicDBList(); - } - - public void objectStart(boolean array, String name) { - _lastArray = array; - super.objectStart(array, name); - } - - public Object objectDone() { - String name = curName(); - Object o = super.objectDone(); - if (_lastArray) { - return o; - } - BSONObject b = (BSONObject) o; - - // override the object if it's a special type - if (b.containsField("$oid")) { - o = new ObjectId((String) b.get("$oid")); - } else if (b.containsField("$date")) { - if (b.get("$date") instanceof Number) { - o = new Date(((Number) b.get("$date")).longValue()); - } else { - SimpleDateFormat format = new SimpleDateFormat(_msDateFormat); - format.setCalendar(new GregorianCalendar(new SimpleTimeZone(0, "GMT"))); - o = format.parse(b.get("$date").toString(), new ParsePosition(0)); - - if (o == null) { - // try older format with no ms - format = new SimpleDateFormat(_secDateFormat); - format.setCalendar(new GregorianCalendar(new SimpleTimeZone(0, "GMT"))); - o = format.parse(b.get("$date").toString(), new ParsePosition(0)); - } - } - } else if (b.containsField("$regex")) { - o = Pattern.compile((String) b.get("$regex"), - BSON.regexFlags((String) b.get("$options"))); - } else if (b.containsField("$ts")) { //Legacy timestamp format - Integer ts = ((Number) b.get("$ts")).intValue(); - Integer inc = ((Number) b.get("$inc")).intValue(); - o = new BSONTimestamp(ts, inc); - } else if (b.containsField("$timestamp")) { - BSONObject tsObject = (BSONObject) b.get("$timestamp"); - Integer ts = ((Number) tsObject.get("t")).intValue(); - Integer inc = ((Number) tsObject.get("i")).intValue(); - o = new BSONTimestamp(ts, inc); - } else if (b.containsField("$code")) { - if (b.containsField("$scope")) { - o = new CodeWScope((String) b.get("$code"), (DBObject) b.get("$scope")); - } else { - o = new Code((String) b.get("$code")); - } - } else if (b.containsField("$ref")) { - o = new DBRef(null, (String) b.get("$ref"), b.get("$id")); - } else if (b.containsField("$minKey")) { - o = new MinKey(); - } else if (b.containsField("$maxKey")) { - o = new MaxKey(); - } else if (b.containsField("$uuid")) { - o = UUID.fromString((String) b.get("$uuid")); - } else if (b.containsField("$binary")) { - int type = (Integer) b.get("$type"); - byte[] bytes = (new Base64Codec()).decode((String) b.get("$binary")); - o = new Binary((byte) type, bytes); - } - - if (!isStackEmpty()) { - _put(name, o); - } else { - o = !BSON.hasDecodeHooks() ? o : BSON.applyDecodingHooks( o ); - setRoot(o); - } - return o; - } - - private boolean _lastArray = false; - - public static final String _msDateFormat = "yyyy-MM-dd'T'HH:mm:ss.SSS'Z'"; - public static final String _secDateFormat = "yyyy-MM-dd'T'HH:mm:ss'Z'"; -} diff --git a/src/main/com/mongodb/util/JSONParseException.java b/src/main/com/mongodb/util/JSONParseException.java deleted file mode 100644 index 233a5781a55..00000000000 --- a/src/main/com/mongodb/util/JSONParseException.java +++ /dev/null @@ -1,58 +0,0 @@ -/** - * Copyright (C) 2008 10gen Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.mongodb.util; - -/** - * Exception throw when invalid JSON is passed to JSONParser. - * - * This exception creates a message that points to the first - * offending character in the JSON string: - *

        - * { "x" : 3, "y" : 4, some invalid json.... }
        - *                     ^
        - * 
        - */ -public class JSONParseException extends RuntimeException { - - private static final long serialVersionUID = -4415279469780082174L; - - String s; - int pos; - - public String getMessage() { - StringBuilder sb = new StringBuilder(); - sb.append("\n"); - sb.append(s); - sb.append("\n"); - for(int i=0;iObjectSerializer instances that produce various flavors of - * JSON. - */ -public class JSONSerializers { - - private JSONSerializers() { - } - - /** - * Returns an ObjectSerializer that mostly conforms to the strict JSON format defined in - * getStrict in preference to this method. - * - * @return object serializer - * @see #getStrict() - */ - public static ObjectSerializer getLegacy() { - - ClassMapBasedObjectSerializer serializer = addCommonSerializers(); - - serializer.addObjectSerializer(Date.class, new LegacyDateSerializer(serializer)); - serializer.addObjectSerializer(BSONTimestamp.class, new LegacyBSONTimestampSerializer(serializer)); - serializer.addObjectSerializer(Binary.class, new LegacyBinarySerializer()); - serializer.addObjectSerializer(byte[].class, new LegacyBinarySerializer()); - return serializer; - } - - /** - * Returns an ObjectSerializer that conforms to the strict JSON format defined in - * "); - } - - } - - private static class ObjectArraySerializer extends CompoundObjectSerializer { - - ObjectArraySerializer(ObjectSerializer serializer) { - super(serializer); - } - - @Override - public void serialize(Object obj, StringBuilder buf) { - buf.append("[ "); - for (int i = 0; i < Array.getLength(obj); i++) { - if (i > 0) - buf.append(" , "); - serializer.serialize(Array.get(obj, i), buf); - } - - buf.append("]"); - } - - } - - private static class ToStringSerializer extends AbstractObjectSerializer { - - @Override - public void serialize(Object obj, StringBuilder buf) { - buf.append(obj.toString()); - } - - } - - private static class LegacyBSONTimestampSerializer extends CompoundObjectSerializer { - - LegacyBSONTimestampSerializer(ObjectSerializer serializer) { - super(serializer); - } - - @Override - public void serialize(Object obj, StringBuilder buf) { - BSONTimestamp t = (BSONTimestamp) obj; - BasicDBObject temp = new BasicDBObject(); - temp.put("$ts", Integer.valueOf(t.getTime())); - temp.put("$inc", Integer.valueOf(t.getInc())); - serializer.serialize(temp, buf); - } - - } - - private static class CodeSerializer extends CompoundObjectSerializer { - - CodeSerializer(ObjectSerializer serializer) { - super(serializer); - } - - @Override - public void serialize(Object obj, StringBuilder buf) { - Code c = (Code) obj; - BasicDBObject temp = new BasicDBObject(); - temp.put("$code", c.getCode()); - serializer.serialize(temp, buf); - } - - } - - private static class CodeWScopeSerializer extends CompoundObjectSerializer { - - CodeWScopeSerializer(ObjectSerializer serializer) { - super(serializer); - } - - @Override - public void serialize(Object obj, StringBuilder buf) { - CodeWScope c = (CodeWScope) obj; - BasicDBObject temp = new BasicDBObject(); - temp.put("$code", c.getCode()); - temp.put("$scope", c.getScope()); - serializer.serialize(temp, buf); - } - - } - - private static class LegacyDateSerializer extends CompoundObjectSerializer { - - LegacyDateSerializer(ObjectSerializer serializer) { - super(serializer); - } - - @Override - public void serialize(Object obj, StringBuilder buf) { - Date d = (Date) obj; - SimpleDateFormat format = new SimpleDateFormat( - "yyyy-MM-dd'T'HH:mm:ss.SSS'Z'"); - format.setCalendar(new GregorianCalendar( - new SimpleTimeZone(0, "GMT"))); - serializer.serialize( - new BasicDBObject("$date", format.format(d)), - buf); - } - - } - - private static class DBObjectSerializer extends CompoundObjectSerializer { - - DBObjectSerializer(ObjectSerializer serializer) { - super(serializer); - } - - @Override - public void serialize(Object obj, StringBuilder buf) { - boolean first = true; - buf.append("{ "); - DBObject dbo = (DBObject) obj; - String name; - - for (final String s : dbo.keySet()) { - name = s; - - if (first) - first = false; - else - buf.append(" , "); - - JSON.string(buf, name); - buf.append(" : "); - serializer.serialize(dbo.get(name), buf); - } - - buf.append("}"); - } - - } - - private static class DBRefBaseSerializer extends CompoundObjectSerializer { - - DBRefBaseSerializer(ObjectSerializer serializer) { - super(serializer); - } - - @Override - public void serialize(Object obj, StringBuilder buf) { - DBRefBase ref = (DBRefBase) obj; - BasicDBObject temp = new BasicDBObject(); - temp.put("$ref", ref.getRef()); - temp.put("$id", ref.getId()); - serializer.serialize(temp, buf); - } - - } - - private static class IterableSerializer extends CompoundObjectSerializer { - - IterableSerializer(ObjectSerializer serializer) { - super(serializer); - } - - @Override - public void serialize(Object obj, StringBuilder buf) { - boolean first = true; - buf.append("[ "); - - for (final Object o : ((Iterable) obj)) { - if (first) - first = false; - else - buf.append(" , "); - - serializer.serialize(o, buf); - } - buf.append("]"); - } - } - - private static class MapSerializer extends CompoundObjectSerializer { - - MapSerializer(ObjectSerializer serializer) { - super(serializer); - } - - @Override - public void serialize(Object obj, StringBuilder buf) { - boolean first = true; - buf.append("{ "); - Map m = (Map) obj; - Entry entry; - - for (final Object o : m.entrySet()) { - entry = (Entry) o; - if (first) - first = false; - else - buf.append(" , "); - JSON.string(buf, entry.getKey().toString()); - buf.append(" : "); - serializer.serialize(entry.getValue(), buf); - } - - buf.append("}"); - } - - } - - private static class MaxKeySerializer extends CompoundObjectSerializer { - - MaxKeySerializer(ObjectSerializer serializer) { - super(serializer); - } - - @Override - public void serialize(Object obj, StringBuilder buf) { - serializer.serialize(new BasicDBObject("$maxKey", - 1), buf); - } - - } - - private static class MinKeySerializer extends CompoundObjectSerializer { - - MinKeySerializer(ObjectSerializer serializer) { - super(serializer); - } - - @Override - public void serialize(Object obj, StringBuilder buf) { - serializer.serialize(new BasicDBObject("$minKey", - 1), buf); - } - - } - - private static class ObjectIdSerializer extends CompoundObjectSerializer { - - ObjectIdSerializer(ObjectSerializer serializer) { - super(serializer); - } - - @Override - public void serialize(Object obj, StringBuilder buf) { - serializer.serialize( - new BasicDBObject("$oid", obj.toString()), buf); - } - } - - private static class PatternSerializer extends CompoundObjectSerializer { - - PatternSerializer(ObjectSerializer serializer) { - super(serializer); - } - - @Override - public void serialize(Object obj, StringBuilder buf) { - DBObject externalForm = new BasicDBObject(); - externalForm.put("$regex", obj.toString()); - if (((Pattern) obj).flags() != 0) - externalForm.put("$options", - Bytes.regexFlags(((Pattern) obj).flags())); - serializer.serialize(externalForm, buf); - } - } - - private static class StringSerializer extends AbstractObjectSerializer { - - @Override - public void serialize(Object obj, StringBuilder buf) { - JSON.string(buf, (String) obj); - } - } - - private static class UUIDSerializer extends CompoundObjectSerializer { - - UUIDSerializer(ObjectSerializer serializer) { - super(serializer); - } - - @Override - public void serialize(Object obj, StringBuilder buf) { - UUID uuid = (UUID) obj; - BasicDBObject temp = new BasicDBObject(); - temp.put("$uuid", uuid.toString()); - serializer.serialize(temp, buf); - } - } - - private static class BSONTimestampSerializer extends CompoundObjectSerializer { - - BSONTimestampSerializer(ObjectSerializer serializer) { - super(serializer); - } - - @Override - public void serialize(Object obj, StringBuilder buf) { - BSONTimestamp t = (BSONTimestamp) obj; - BasicDBObject temp = new BasicDBObject(); - temp.put("t", Integer.valueOf(t.getTime())); - temp.put("i", Integer.valueOf(t.getInc())); - BasicDBObject timestampObj = new BasicDBObject(); - timestampObj.put("$timestamp", temp); - serializer.serialize(timestampObj, buf); - } - - } - - private static class DateSerializer extends CompoundObjectSerializer { - - DateSerializer(ObjectSerializer serializer) { - super(serializer); - } - - @Override - public void serialize(Object obj, StringBuilder buf) { - Date d = (Date) obj; - serializer.serialize( - new BasicDBObject("$date", d.getTime()), buf); - } - - } - - private abstract static class BinarySerializerBase extends CompoundObjectSerializer { - BinarySerializerBase(ObjectSerializer serializer) { - super(serializer); - } - - protected void serialize(byte[] bytes, byte type, StringBuilder buf) { - DBObject temp = new BasicDBObject(); - temp.put("$binary", - (new Base64Codec()).encode(bytes)); - temp.put("$type", type); - serializer.serialize(temp, buf); - } - } - - private static class BinarySerializer extends BinarySerializerBase { - BinarySerializer(ObjectSerializer serializer) { - super(serializer); - } - - @Override - public void serialize(Object obj, StringBuilder buf) { - Binary bin = (Binary) obj; - serialize(bin.getData(), bin.getType(), buf); - } - - } - - private static class ByteArraySerializer extends BinarySerializerBase { - ByteArraySerializer(ObjectSerializer serializer) { - super(serializer); - } - - @Override - public void serialize(Object obj, StringBuilder buf) { - serialize((byte[]) obj, (byte) 0, buf); - } - - } -} diff --git a/src/main/com/mongodb/util/MyAsserts.java b/src/main/com/mongodb/util/MyAsserts.java deleted file mode 100644 index 3d1f318c14f..00000000000 --- a/src/main/com/mongodb/util/MyAsserts.java +++ /dev/null @@ -1,223 +0,0 @@ -// MyAsserts.java - -/** - * Copyright (C) 2008 10gen Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.mongodb.util; - -import java.util.Arrays; -import java.util.regex.Pattern; - -/** - * @deprecated This class is NOT a part of public API and will be dropped in 3.x versions. - */ -@Deprecated -public class MyAsserts { - - /** - * @deprecated This class is NOT a part of public API and will be dropped in 3.x versions. - */ - @Deprecated - public static class MyAssert extends RuntimeException { - - private static final long serialVersionUID = -4415279469780082174L; - - MyAssert( String s ){ - super( s ); - _s = s; - } - - public String toString(){ - return _s; - } - - final String _s; - } - - public static void assertTrue( boolean b ){ - if ( ! b ) - throw new MyAssert( "false" ); - } - - public static void assertTrue( boolean b , String msg ){ - if ( ! b ) - throw new MyAssert( "false : " + msg ); - } - - public static void assertFalse( boolean b ){ - if ( b ) - throw new MyAssert( "true" ); - } - - public static void assertEquals( int a , int b ){ - if ( a != b ) - throw new MyAssert( "" + a + " != " + b ); - } - - public static void assertEquals( long a , long b ){ - if ( a != b ) - throw new MyAssert( "" + a + " != " + b ); - } - - public static void assertEquals( char a , char b ){ - if ( a != b ) - throw new MyAssert( "" + a + " != " + b ); - } - - public static void assertEquals( short a , short b ){ - if ( a != b ) - throw new MyAssert( "" + a + " != " + b ); - } - - public static void assertEquals( byte expected , byte result ) { - if ( expected != result ) - throw new MyAssert( "" + expected + " != " + result ); - } - - public static void assertEquals( double a , double b , double diff ){ - if ( Math.abs( a - b ) > diff ) - throw new MyAssert( "" + a + " != " + b ); - } - - public static void assertEquals( String a , Object b ){ - _assertEquals( a , b == null ? null : b.toString() ); - } - - public static void assertSame(Object a, Object b) { - if ( a != b ) - throw new MyAssert( a + " != " + b ); - } - - public static void assertEquals( Object a , Object b ){ - _assertEquals( a , b ); - } - - public static void _assertEquals( Object a , Object b ){ - if ( a == null ){ - if ( b == null ) - return; - throw new MyAssert( "left null, right not" ); - } - - if ( a.equals( b ) ) - return; - - throw new MyAssert( "[" + a + "] != [" + b + "] " ); - } - - public static void assertEquals( String a , String b , String msg ){ - if ( a.equals( b ) ) - return; - - throw new MyAssert( "[" + a + "] != [" + b + "] " + msg ); - } - - public static void assertArrayEquals(byte[] expected, byte[] result) { - if (Arrays.equals(expected, result)) - return; - - throw new MyAssert("These arrays are different, but they might be big so not printing them here"); - } - - public static void assertArrayEquals(char[] expected, char[] result) { - if (Arrays.equals(expected, result)) - return; - - throw new MyAssert("These arrays are different, but they might be big so not printing them here"); - } - - public static void assertNotEquals( Object a , Object b ){ - if ( a == null ){ - if ( b != null ) - return; - throw new MyAssert( "left null, right null" ); - } - - if ( ! a.equals( b ) ) - return; - - throw new MyAssert( "[" + a + "] == [" + b + "] " ); - } - - public static void assertClose( String a , Object o){ - assertClose( a , o == null ? "null" : o.toString() ); - } - - public static void assertClose( String a , String b ){ - assertClose(a, b, ""); - } - - public static void assertClose( String a , String b, String tag ){ - - if (isClose(a, b)) { - return; - } - - throw new MyAssert( tag + "[" + a + "] != [" + b + "]" ); - } - - public static boolean isClose(String a, String b) { - a = _simplify( a ); - b = _simplify( b ); - return a.equalsIgnoreCase(b); - } - - private static String _simplify( String s ){ - s = s.trim(); - s = _whiteSpace.matcher( s ).replaceAll( "" ); - return s; - } - - private static Pattern _whiteSpace = Pattern.compile( "\\s+" , Pattern.DOTALL | Pattern.MULTILINE ); - - public static void assertNull( Object foo ){ - if ( foo == null ) - return; - - throw new MyAssert( "not null [" + foo + "]" ); - } - - public static void assertNotNull( Object foo ){ - if ( foo != null ) - return; - - throw new MyAssert( "null" ); - } - - public static void assertLess( long lower , long higher ){ - if ( lower < higher ) - return; - - throw new MyAssert( lower + " is higher than " + higher ); - } - - public static void assertLess( double lower , double higher ){ - if ( lower < higher ) - return; - - throw new MyAssert( lower + " is higher than " + higher ); - } - - public static void assertEmptyString( String s ) { - if( !s.equals( "" ) ) - throw new MyAssert( s ); - } - - public static void fail(String errorMessage) { - throw new MyAssert(errorMessage); - } - -} diff --git a/src/main/com/mongodb/util/ObjectSerializer.java b/src/main/com/mongodb/util/ObjectSerializer.java deleted file mode 100644 index 9ac50673343..00000000000 --- a/src/main/com/mongodb/util/ObjectSerializer.java +++ /dev/null @@ -1,37 +0,0 @@ -/** - * Copyright (C) 2012 10gen Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.mongodb.util; - -/** - * Interface describing methods for serializing an object to a string. - */ -public interface ObjectSerializer { - /** - * Serializes obj into buf. - * - * @param obj object to serialize - * @param buf buffer to serialize into - */ - void serialize(Object obj, StringBuilder buf); - - /** - * Serializes obj. - * @param obj object to serialize - * @return the serialized object - */ - String serialize(Object obj); -} diff --git a/src/main/com/mongodb/util/OptionMap.java b/src/main/com/mongodb/util/OptionMap.java deleted file mode 100644 index 123fb6b9b20..00000000000 --- a/src/main/com/mongodb/util/OptionMap.java +++ /dev/null @@ -1,35 +0,0 @@ -// OptionMap.java - -/** - * Copyright (C) 2008 10gen Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.mongodb.util; - -import java.util.TreeMap; - -/** - * @deprecated This class is NOT a part of public API and will be dropped in 3.x versions. - */ -@Deprecated -public class OptionMap extends TreeMap { - - private static final long serialVersionUID = -4415279469780082174L; - - public int getInt( String name , int def ){ - return StringParseUtil.parseIfInt( get( name ) , def ); - } - -} diff --git a/src/main/com/mongodb/util/SimplePool.java b/src/main/com/mongodb/util/SimplePool.java deleted file mode 100644 index e03c7f377db..00000000000 --- a/src/main/com/mongodb/util/SimplePool.java +++ /dev/null @@ -1,204 +0,0 @@ -/** - * Copyright (C) 2008-2012 10gen Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.mongodb.util; - -import java.util.ArrayList; -import java.util.HashSet; -import java.util.List; -import java.util.Set; -import java.util.concurrent.Semaphore; -import java.util.concurrent.TimeUnit; - -/** - * @deprecated This class is NOT a part of public API and will be dropped in 3.x versions. - */ -@Deprecated -public abstract class SimplePool { - - /** Initializes a new pool of objects. - * @param name name for the pool - * @param size max to hold to at any given time. if < 0 then no limit - */ - public SimplePool(String name, int size){ - _name = name; - _size = size; - _sem = new Semaphore(size); - } - - /** Creates a new object of this pool's type. Implementations should throw a runtime exception if unable to create. - * @return the new object. - */ - protected abstract T createNew(); - - /** - * override this if you need to do any cleanup - */ - public void cleanup( T t ) { - } - - /** - * Pick a member of {@code _avail}. This method is called with a lock held on {@code _avail}, so it may be used safely. - * - * @param recommended the recommended member to choose. - * @param couldCreate true if there is room in the pool to create a new object - * @return >= 0 the one to use, -1 create a new one - */ - protected int pick( int recommended , boolean couldCreate ){ - return recommended; - } - - /** - * call done when you are done with an object form the pool - * if there is room and the object is ok will get added - * @param t Object to add - */ - public void done( T t ){ - synchronized ( this ) { - if (_closed) { - cleanup(t); - return; - } - - assertConditions(); - - if (!_out.remove(t)) { - throw new RuntimeException("trying to put something back in the pool wasn't checked out"); - } - - _avail.add(t); - - } - _sem.release(); - } - - private void assertConditions() { - assert getTotal() <= getMaxSize(); - } - - public void remove( T t ) { - done(t); - } - - /** Gets an object from the pool - will block if none are available - * @return An object from the pool - */ - public T get() throws InterruptedException { - return get(-1); - } - - /** Gets an object from the pool - will block if none are available - * @param waitTime - * negative - forever - * 0 - return immediately no matter what - * positive ms to wait - * @return An object from the pool, or null if can't get one in the given waitTime - */ - public T get(long waitTime) throws InterruptedException { - if (!permitAcquired(waitTime)) { - return null; - } - - synchronized (this) { - assertConditions(); - - int toTake = pick(_avail.size() - 1, getTotal() < getMaxSize()); - T t; - if (toTake >= 0) { - t = _avail.remove(toTake); - } else { - t = createNewAndReleasePermitIfFailure(); - } - _out.add(t); - - return t; - } - } - - private T createNewAndReleasePermitIfFailure() { - try { - T newMember = createNew(); - if (newMember == null) { - throw new IllegalStateException("null pool members are not allowed"); - } - return newMember; - } catch (RuntimeException e) { - _sem.release(); - throw e; - } catch (Error e) { - _sem.release(); - throw e; - } - } - - private boolean permitAcquired(final long waitTime) throws InterruptedException { - if (waitTime > 0) { - return _sem.tryAcquire(waitTime, TimeUnit.MILLISECONDS); - } else if (waitTime < 0) { - _sem.acquire(); - return true; - } else { - return _sem.tryAcquire(); - } - } - - /** Clears the pool of all objects. */ - protected synchronized void close(){ - _closed = true; - for (T t : _avail) - cleanup(t); - _avail.clear(); - _out.clear(); - } - - public String getName() { - return _name; - } - - public synchronized int getTotal(){ - return _avail.size() + _out.size(); - } - - public synchronized int getInUse(){ - return _out.size(); - } - - public synchronized int getAvailable(){ - return _avail.size(); - } - - public int getMaxSize(){ - return _size; - } - - public synchronized String toString(){ - StringBuilder buf = new StringBuilder(); - buf.append("pool: ").append(_name) - .append(" maxToKeep: ").append(_size) - .append(" avail ").append(_avail.size()) - .append(" out ").append(_out.size()) - ; - return buf.toString(); - } - - protected final String _name; - protected final int _size; - - protected final List _avail = new ArrayList(); - protected final Set _out = new HashSet(); - private final Semaphore _sem; - private boolean _closed; -} diff --git a/src/main/com/mongodb/util/StringBuilderPool.java b/src/main/com/mongodb/util/StringBuilderPool.java deleted file mode 100644 index 2e76bd5d524..00000000000 --- a/src/main/com/mongodb/util/StringBuilderPool.java +++ /dev/null @@ -1,55 +0,0 @@ -// StringBuilderPool.java - -/** - * Copyright (C) 2008 10gen Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.mongodb.util; - -/** - * @deprecated This class is NOT a part of public API and will be dropped in 3.x versions. - */ -@Deprecated -public class StringBuilderPool extends SimplePool { - - /** Initializes a pool of a given number of StringBuilders, each of a certain size. - * @param maxToKeep the number of string builders in the pool - */ - public StringBuilderPool( String name , int maxToKeep ){ - super( "StringBuilderPool-" + name , maxToKeep ); - } - - /** Create a new string builder. - * @return the string builder - */ - public StringBuilder createNew(){ - return new StringBuilder(); - } - - /** Checks that the given string builder is within the size limit. - * @param buf the builder to check - * @return if it is not too big - */ - public boolean ok( StringBuilder buf ){ - if ( buf.length() > getMaxSize() ) - return false; - buf.setLength( 0 ); - return true; - } - - protected long memSize( StringBuilder buf ){ - return buf.length() * 2; - } -} diff --git a/src/main/com/mongodb/util/StringParseUtil.java b/src/main/com/mongodb/util/StringParseUtil.java deleted file mode 100644 index 423c366d195..00000000000 --- a/src/main/com/mongodb/util/StringParseUtil.java +++ /dev/null @@ -1,270 +0,0 @@ -// StringParseUtil.java - -/** - * Copyright (C) 2008 10gen Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.mongodb.util; - -/** - * @deprecated This class is NOT a part of public API and will be dropped in 3.x versions. - */ -@Deprecated -public final class StringParseUtil { - - /** Turns a string into a boolean value and returns a default value if unsuccessful. - * @param s the string to convert - * @param d the default value - * @return equivalent boolean value - */ - public static boolean parseBoolean( String s , boolean d ){ - - if ( s == null ) - return d; - - s = s.trim(); - if ( s.length() == 0 ) - return d; - - char c = s.charAt( 0 ); - - if ( c == 't' || c == 'T' || - c == 'y' || c == 'Y' ) - return true; - - if ( c == 'f' || c == 'F' || - c == 'n' || c == 'N' ) - return false; - - return d; - } - - /** Turns a string into an int and returns a default value if unsuccessful. - * @param s the string to convert - * @param def the default value - * @return the int value - */ - public static int parseInt( String s , int def ){ - return parseInt( s , def , null , true ); - } - - /** Turns a string into an int using a given radix. - * @param s the string to convert - * @param radix radix to use - * @return the int value - */ - public static Number parseIntRadix( String s , int radix ){ - if ( s == null ) - return Double.NaN; - - s = s.trim(); - if ( s.length() == 0 ) - return Double.NaN; - - int firstDigit = -1; - int i = 0; - if ( s.charAt( 0 ) == '-' ) - i = 1; - // Find first non-digit. - for ( ; i 0; - if ( useLastIdx ) - lastIdx[0] = -1; - - if ( s == null ) - return def; - - s = s.trim(); - if ( s.length() == 0 ) - return def; - - int firstDigit = -1; - for ( int i=0; i 0 && s.charAt( firstDigit - 1 ) == '-' ) - firstDigit--; - - if ( useLastIdx ) - lastIdx[0] = lastDigit; - return Integer.parseInt( s.substring( firstDigit , lastDigit ) ); - } - - /** Turns a string into a Number and returns a default value if unsuccessful. - * @param s the string to convert - * @param def the default value - * @return the numeric value - */ - public static Number parseNumber( String s , Number def ){ - if ( s == null ) - return def; - - s = s.trim(); - if ( s.length() == 0) - return def; - - - int firstDigit = -1; - for ( int i=0; i 0 && s.charAt( firstDigit - 1 ) == '.' ){ - firstDigit--; - isDouble = true; - } - - if ( firstDigit > 0 && s.charAt( firstDigit - 1 ) == '-' ) - firstDigit--; - - if ( lastDigit < s.length() && s.charAt( lastDigit ) == '.' ){ - lastDigit++; - while ( lastDigit < s.length() && Character.isDigit( s.charAt( lastDigit ) ) ) - lastDigit++; - - isDouble = true; - } - - if ( lastDigit < s.length() && s.charAt( lastDigit ) == 'E' ){ - lastDigit++; - while ( lastDigit < s.length() && Character.isDigit( s.charAt( lastDigit ) ) ) - lastDigit++; - - isDouble = true; - } - - - final String actual = s.substring( firstDigit , lastDigit ); - - if ( isDouble || actual.length() > 17 ) - return Double.valueOf( actual ); - - - if ( actual.length() > 10 ) - return Long.valueOf( actual ); - - return Integer.valueOf( actual ); - } - - /** Use Java's "strict parsing" methods Integer.parseInt and Double.parseDouble to parse s "strictly". i.e. if it's neither a double or an integer, fail. - * @param s the string to convert - * @return the numeric value - */ - public static Number parseStrict( String s ){ - if( s.length() == 0 ) - return 0; - if( s.charAt(0) == '+' ) - s = s.substring( 1 ); - - if( s.matches( "(\\+|-)?Infinity" ) ) { - if( s.startsWith( "-" ) ) { - return Double.NEGATIVE_INFINITY; - } - else { - return Double.POSITIVE_INFINITY; - } - } - else if( s.indexOf('.') != -1 || - s.equals( "-0" ) ) { - return Double.valueOf(s); - } - // parse hex - else if( s.toLowerCase().indexOf( "0x" ) > -1 ) { - int coef = s.charAt( 0 ) == '-' ? -1 : 1; - if( s.length() > 17 ) - throw new RuntimeException( "Can't handle a number this big: "+s ); - // if coef == -1: (coef * -.5 + 2.5) == 3 - // e.g., -0xf00 (start substring at 3) - // if coef == 1: (coef * -.5 + 2.5) == 2 - // e.g., 0xf00 (start substring at 2) - if( s.length() > 9 ) - return coef * Long.valueOf( s.substring( (int)(coef * -.5 + 2.5) ) , 16 ); - return coef * Integer.valueOf( s.substring( (int)(coef * -.5 + 2.5) ) , 16 ); - } - - int e = s.toLowerCase().indexOf( 'e' ); - // parse exp - if( e > 0 ) { - double num = Double.parseDouble( s.substring( 0, e ) ); - int exp = Integer.parseInt( s.substring( e + 1 ) ); - return num * Math.pow( 10 , exp ); - } - - // parse with smallest possible precision - if ( s.length() > 17 ) - return Double.valueOf( s ); - else if ( s.length() > 9 ) - return Long.valueOf(s); - return Integer.valueOf(s); - } - - public static int parseIfInt( String s , int def ){ - if ( s == null || s.length() == 0 ) - return def; - - s = s.trim(); - - for ( int i=0; i { - - /** Initializes a new thread pool with a given name and number of threads. - * @param name identifying name - * @param numThreads the number of threads allowed in the pool - */ - public ThreadPool( String name , int numThreads ){ - this( name , numThreads , Integer.MAX_VALUE ); - } - - /** Initializes a new thread pool with a given name, number of threads, and queue size. - * @param name identifying name - * @param numThreads the number of threads allowed in the pool - * @param maxQueueSize the size of the pool entry queue - */ - public ThreadPool( String name , int numThreads , int maxQueueSize ){ - _name = name; - _maxThreads = numThreads; - _queue = new LinkedBlockingQueue( maxQueueSize ); - _myThreadGroup = new MyThreadGroup(); - _threads.add( new MyThread() ); - } - - /** Handles a given object. - * @param t the object to handle - * @throws Exception - */ - public abstract void handle( T t ) - throws Exception ; - - /** Handles a given object and exception. - * @param t the object to handle - * @param e the exception to handle - */ - public abstract void handleError( T t , Exception e ); - - /** Returns the size of the pool's queue. - * @return pool size - */ - public int queueSize(){ - return _queue.size(); - } - - /** Adds a new object to the pool, if possible. - * @param t the object to be added - * @return if the object was successfully added - */ - public boolean offer( T t ){ - if ( ( _queue.size() > 0 || _inProgress.get() == _threads.size() ) && - _threads.size() < _maxThreads ) - _threads.add( new MyThread() ); - return _queue.offer( t ); - } - - public int inProgress(){ - return _inProgress.get(); - } - - public int numThreads(){ - return _threads.size(); - } - - class MyThreadGroup extends ThreadGroup { - MyThreadGroup(){ - super( "ThreadPool.MyThreadGroup:" + _name ); - } - - public void uncaughtException( Thread t, Throwable e ){ - for ( int i=0; i<_threads.size(); i++ ){ - if ( _threads.get( i ) == t ){ - _threads.remove( i ); - break; - } - } - } - } - - class MyThread extends Thread { - MyThread(){ - super( _myThreadGroup , "ThreadPool.MyThread:" + _name + ":" + _threads.size() ); - setDaemon( true ); - start(); - } - - public void run(){ - while ( true ){ - T t = null; - - try { - t = _queue.take(); - } - catch ( InterruptedException ie ){ - } - - if ( t == null ) - continue; - - try { - _inProgress.incrementAndGet(); - handle( t ); - } - catch ( Exception e ){ - handleError( t , e ); - } - finally { - _inProgress.decrementAndGet(); - } - } - } - } - - final String _name; - final int _maxThreads; - - private final AtomicInteger _inProgress = new AtomicInteger(0); - private final List _threads = new Vector(); - private final BlockingQueue _queue; - private final MyThreadGroup _myThreadGroup; -} diff --git a/src/main/com/mongodb/util/ThreadUtil.java b/src/main/com/mongodb/util/ThreadUtil.java deleted file mode 100644 index f18a72c7399..00000000000 --- a/src/main/com/mongodb/util/ThreadUtil.java +++ /dev/null @@ -1,80 +0,0 @@ -// ThreadUtil.java - -/** - * Copyright (C) 2008 10gen Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.mongodb.util; - -import java.util.Collections; -import java.util.HashMap; -import java.util.Map; - -/** - * @deprecated This class is NOT a part of public API and will be dropped in 3.x versions. - */ -@Deprecated -public class ThreadUtil { - - /** Creates an prints a stack trace */ - public static void printStackTrace(){ - Exception e = new Exception(); - e.fillInStackTrace(); - e.printStackTrace(); - } - - /** Pauses for a given number of milliseconds - * @param time number of milliseconds for which to pause - */ - public static void sleep( long time ){ - try { - Thread.sleep( time ); - } - catch ( InterruptedException e ){ - } - } - - public static void pushStatus( String what ){ - pushStatus( Thread.currentThread() , what ); - } - - public static void pushStatus( Thread t , String what ){ - getStatus( t ).push( what ); - } - - public static void clearStatus(){ - clearStatus( Thread.currentThread() ); - } - - public static void clearStatus( Thread t ){ - getStatus( t ).clear(); - } - - public static FastStack getStatus(){ - return getStatus( Thread.currentThread() ); - } - - public static FastStack getStatus( Thread t ){ - FastStack s = _threads.get( t.getId() ); - if ( s == null ){ - s = new FastStack(); - _threads.put( t.getId() , s ); - } - return s; - } - - private static final Map> _threads = Collections.synchronizedMap( new HashMap>() ); - -} diff --git a/src/main/com/mongodb/util/TimeConstants.java b/src/main/com/mongodb/util/TimeConstants.java deleted file mode 100644 index 906f31d265f..00000000000 --- a/src/main/com/mongodb/util/TimeConstants.java +++ /dev/null @@ -1,43 +0,0 @@ -// TimeConstants.java - -/** - * Copyright (C) 2008 10gen Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.mongodb.util; - -/** - * @deprecated This class is NOT a part of public API and will be dropped in 3.x versions. - */ -@Deprecated -public class TimeConstants { - - public static final long MS_MILLISECOND = 1; - public static final long MS_SECOND = 1000; - public static final long MS_MINUTE = MS_SECOND * 60; - public static final long MS_HOUR = MS_MINUTE * 60; - public static final long MS_DAY = MS_HOUR * 24; - public static final long MS_WEEK = MS_DAY * 7; - public static final long MS_MONTH = MS_WEEK * 4; - public static final long MS_YEAR = MS_DAY * 365; - - public static final long S_SECOND = 1; - public static final long S_MINUTE = 60 * S_SECOND; - public static final long S_HOUR = 60 * S_MINUTE; - public static final long S_DAY = 24 * S_HOUR; - public static final long S_WEEK = 7 * S_DAY; - public static final long S_MONTH = 30 * S_DAY; - public static final long S_YEAR = S_DAY * 365; -} diff --git a/src/main/com/mongodb/util/UniqueList.java b/src/main/com/mongodb/util/UniqueList.java deleted file mode 100644 index 47aecd1077c..00000000000 --- a/src/main/com/mongodb/util/UniqueList.java +++ /dev/null @@ -1,45 +0,0 @@ -// UniqueList.java - -/** - * Copyright (C) 2008 10gen Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.mongodb.util; - -import java.util.ArrayList; -import java.util.Collection; - -/** - * @deprecated This class is NOT a part of public API and will be dropped in 3.x versions. - */ -@Deprecated -public class UniqueList extends ArrayList { - - private static final long serialVersionUID = -4415279469780082174L; - - public boolean add( T t ){ - if ( contains( t ) ) - return false; - return super.add( t ); - } - - public boolean addAll(Collection c) { - boolean added = false; - for ( T t : c ) - added = added || add( t ); - return added; - } - -} diff --git a/src/main/com/mongodb/util/Util.java b/src/main/com/mongodb/util/Util.java deleted file mode 100644 index bc87170c828..00000000000 --- a/src/main/com/mongodb/util/Util.java +++ /dev/null @@ -1,72 +0,0 @@ -/** - * - * Copyright (C) 2008 10gen Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.mongodb.util; - -import java.nio.ByteBuffer; -import java.security.MessageDigest; -import java.security.NoSuchAlgorithmException; - -/** - * Misc utility helpers. Not sure what else to call the class - */ -public class Util { - - public static String toHex( byte b[] ){ - StringBuilder sb = new StringBuilder(); - - for ( int i=0; i implements Iterable { - - /** Initializes a new weak bag. */ - public WeakBag(){ - } - - /** Adds an element to the bag. - * @param t Element to add - */ - public void add( T t ){ - _refs.add( new MyRef( t ) ); - } - - public boolean remove( T t ){ - - for ( Iterator i = _refs.iterator(); i.hasNext(); ){ - MyRef ref = i.next(); - if( ref == null ) - continue; - T me = ref.get(); - - if ( me == null ){ - // this is just here cause i'm already doing the work, so why not - i.remove(); - continue; - } - - if ( me == t ){ - i.remove(); - return true; - } - } - return false; - } - - public boolean contains( T t ){ - - for ( Iterator i = _refs.iterator(); i.hasNext(); ){ - MyRef ref = i.next(); - T me = ref.get(); - if ( me == t ) - return true; - } - return false; - } - - /** Returns the size of the bag. - * @return the size of the bag - */ - public int size(){ - clean(); - return _refs.size(); - } - - /** Removes all object from the bag. */ - public void clear(){ - _refs.clear(); - } - - /** Removes any null objects from the bag. */ - public void clean(){ - for ( Iterator i = _refs.iterator(); i.hasNext(); ){ - MyRef ref = i.next(); - if ( ref.get() == null ) - i.remove(); - } - } - - public Iterator iterator(){ - return getAll().iterator(); - } - - public List getAll(){ - - List l = new ArrayList(); - - for ( Iterator i = _refs.iterator(); i.hasNext(); ){ - MyRef ref = i.next(); - T t = ref.get(); - if ( t == null ) - i.remove(); - else - l.add( t ); - } - - return l; - } - - class MyRef extends WeakReference { - MyRef( T t ){ - super( t ); - } - } - - private final List _refs = new LinkedList(); -} diff --git a/src/main/com/mongodb/util/management/JMException.java b/src/main/com/mongodb/util/management/JMException.java deleted file mode 100644 index 5f2f49acd91..00000000000 --- a/src/main/com/mongodb/util/management/JMException.java +++ /dev/null @@ -1,33 +0,0 @@ -/** - * Copyright (c) 2008 - 2011 10gen, Inc. - *

        - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

        - * http://www.apache.org/licenses/LICENSE-2.0 - *

        - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.mongodb.util.management; - -/** - * - * This class is NOT part of the public API. It may change at any time without notification. - * - * @deprecated This class will be removed in 3.x versions of the driver, - * so please remove it from your compile time dependencies. - */ -@Deprecated -public class JMException extends Exception { - static final long serialVersionUID = -2052972874393271421L; - - public JMException(Throwable cause) { - super(cause); - } -} diff --git a/src/main/com/mongodb/util/management/MBeanServer.java b/src/main/com/mongodb/util/management/MBeanServer.java deleted file mode 100644 index e17421d9c73..00000000000 --- a/src/main/com/mongodb/util/management/MBeanServer.java +++ /dev/null @@ -1,32 +0,0 @@ -/** - * Copyright (c) 2008 - 2011 10gen, Inc. - *

        - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

        - * http://www.apache.org/licenses/LICENSE-2.0 - *

        - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.mongodb.util.management; - -/** - * This class is NOT part of the public API. It may change at any time without notification. - * - * @deprecated This class will be removed in 3.x versions of the driver, - * so please remove it from your compile time dependencies. - */ -@Deprecated -public interface MBeanServer { - boolean isRegistered(String mBeanName) throws JMException; - - void unregisterMBean(String mBeanName) throws JMException; - - void registerMBean(Object mBean, String mBeanName) throws JMException; -} diff --git a/src/main/com/mongodb/util/management/MBeanServerFactory.java b/src/main/com/mongodb/util/management/MBeanServerFactory.java deleted file mode 100644 index 9883873d367..00000000000 --- a/src/main/com/mongodb/util/management/MBeanServerFactory.java +++ /dev/null @@ -1,47 +0,0 @@ -/** - * Copyright (c) 2008 - 2011 10gen, Inc. - *

        - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

        - * http://www.apache.org/licenses/LICENSE-2.0 - *

        - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.mongodb.util.management; - -import com.mongodb.util.management.jmx.JMXMBeanServer; - -/** - * This class is NOT part of the public API. It may change at any time without notification. - * - * This class is used to insulate the rest of the driver from the possibility that JMX is not available, - * as currently is the case on Android VM - * - * @deprecated This class will be removed in 3.x versions of the driver, - * so please remove it from your compile time dependencies. - */ -@Deprecated -public class MBeanServerFactory { - static { - MBeanServer tmp; - try { - tmp = new JMXMBeanServer(); - } catch (Throwable e) { - tmp = new NullMBeanServer(); - } - - mBeanServer = tmp; - } - - public static MBeanServer getMBeanServer() { - return mBeanServer; - } - - private static final MBeanServer mBeanServer; -} diff --git a/src/main/com/mongodb/util/management/NullMBeanServer.java b/src/main/com/mongodb/util/management/NullMBeanServer.java deleted file mode 100644 index e48ceb7d36c..00000000000 --- a/src/main/com/mongodb/util/management/NullMBeanServer.java +++ /dev/null @@ -1,39 +0,0 @@ -/** - * Copyright (c) 2008 - 2011 10gen, Inc. - *

        - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

        - * http://www.apache.org/licenses/LICENSE-2.0 - *

        - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.mongodb.util.management; - -/** - * This class is NOT part of the public API. It may change at any time without notification. - * - * @deprecated This class will be removed in 3.x versions of the driver, - * so please remove it from your compile time dependencies. - */ -@Deprecated -public class NullMBeanServer implements MBeanServer { - @Override - public boolean isRegistered(String mBeanName) { - return false; - } - - @Override - public void unregisterMBean(String mBeanName) { - } - - @Override - public void registerMBean(Object mBean, String mBeanName) { - } -} diff --git a/src/main/com/mongodb/util/management/jmx/JMXMBeanServer.java b/src/main/com/mongodb/util/management/jmx/JMXMBeanServer.java deleted file mode 100644 index 9c492e7fd8b..00000000000 --- a/src/main/com/mongodb/util/management/jmx/JMXMBeanServer.java +++ /dev/null @@ -1,71 +0,0 @@ -/** - * Copyright (c) 2008 - 2011 10gen, Inc. - *

        - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

        - * http://www.apache.org/licenses/LICENSE-2.0 - *

        - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.mongodb.util.management.jmx; - -import com.mongodb.util.management.JMException; -import com.mongodb.util.management.MBeanServer; - -import javax.management.*; -import java.lang.management.ManagementFactory; - -/** - * This class is NOT part of the public API. It may change at any time without notification. - * - * @deprecated This class will be removed in 3.x versions of the driver, - * so please remove it from your compile time dependencies. - */ -@Deprecated -public class JMXMBeanServer implements MBeanServer { - @Override - public boolean isRegistered(String mBeanName) throws JMException { - return server.isRegistered(createObjectName(mBeanName)); - } - - @Override - public void unregisterMBean(String mBeanName) throws JMException { - try { - server.unregisterMBean(createObjectName(mBeanName)); - } catch (InstanceNotFoundException e) { - throw new JMException(e); - } catch (MBeanRegistrationException e) { - throw new JMException(e); - } - } - - @Override - public void registerMBean(Object mBean, String mBeanName) throws JMException { - try { - server.registerMBean(mBean, createObjectName(mBeanName)); - } catch (InstanceAlreadyExistsException e) { - throw new JMException(e); - } catch (MBeanRegistrationException e) { - throw new JMException(e); - } catch (NotCompliantMBeanException e) { - throw new JMException(e); - } - } - - private ObjectName createObjectName(String mBeanName) throws JMException { - try { - return new ObjectName(mBeanName); - } catch (MalformedObjectNameException e) { - throw new JMException(e); - } - } - - private final javax.management.MBeanServer server = ManagementFactory.getPlatformMBeanServer(); -} diff --git a/src/main/com/mongodb/util/package.html b/src/main/com/mongodb/util/package.html deleted file mode 100644 index 10d79bb723c..00000000000 --- a/src/main/com/mongodb/util/package.html +++ /dev/null @@ -1,3 +0,0 @@ - -

        Package containing misc utils.

        - diff --git a/src/main/org/bson/BSON.java b/src/main/org/bson/BSON.java deleted file mode 100644 index ee98a3bbddd..00000000000 --- a/src/main/org/bson/BSON.java +++ /dev/null @@ -1,349 +0,0 @@ -// BSON.java - -/** - * Copyright (C) 2008 10gen Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.bson; - -import java.nio.charset.Charset; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.concurrent.CopyOnWriteArrayList; -import java.util.logging.Logger; -import java.util.regex.Pattern; - -import org.bson.util.ClassMap; - -public class BSON { - - static final Logger LOGGER = Logger.getLogger( "org.bson.BSON" ); - - // ---- basics ---- - - public static final byte EOO = 0; - public static final byte NUMBER = 1; - public static final byte STRING = 2; - public static final byte OBJECT = 3; - public static final byte ARRAY = 4; - public static final byte BINARY = 5; - public static final byte UNDEFINED = 6; - public static final byte OID = 7; - public static final byte BOOLEAN = 8; - public static final byte DATE = 9; - public static final byte NULL = 10; - public static final byte REGEX = 11; - public static final byte REF = 12; - public static final byte CODE = 13; - public static final byte SYMBOL = 14; - public static final byte CODE_W_SCOPE = 15; - public static final byte NUMBER_INT = 16; - public static final byte TIMESTAMP = 17; - public static final byte NUMBER_LONG = 18; - - public static final byte MINKEY = -1; - public static final byte MAXKEY = 127; - - // --- binary types - /* - these are binary types - so the format would look like - <...> - */ - - public static final byte B_GENERAL = 0; - public static final byte B_FUNC = 1; - public static final byte B_BINARY = 2; - public static final byte B_UUID = 3; - - // ---- regular expression handling ---- - - /** Converts a string of regular expression flags from the database in Java regular - * expression flags. - * @param flags flags from database - * @return the Java flags - */ - public static int regexFlags( String flags ){ - int fint = 0; - if ( flags == null || flags.length() == 0 ) - return fint; - - flags = flags.toLowerCase(); - - for( int i=0; i 0 ) { - buf.append( flag.flagChar ); - flags -= flag.javaFlag; - } - } - - if( flags > 0 ) - throw new IllegalArgumentException( "some flags could not be recognized." ); - - return buf.toString(); - } - - private static enum RegexFlag { - CANON_EQ( Pattern.CANON_EQ, 'c', "Pattern.CANON_EQ" ), - UNIX_LINES(Pattern.UNIX_LINES, 'd', "Pattern.UNIX_LINES" ), - GLOBAL( GLOBAL_FLAG, 'g', null ), - CASE_INSENSITIVE( Pattern.CASE_INSENSITIVE, 'i', null ), - MULTILINE(Pattern.MULTILINE, 'm', null ), - DOTALL( Pattern.DOTALL, 's', "Pattern.DOTALL" ), - LITERAL( Pattern.LITERAL, 't', "Pattern.LITERAL" ), - UNICODE_CASE( Pattern.UNICODE_CASE, 'u', "Pattern.UNICODE_CASE" ), - COMMENTS( Pattern.COMMENTS, 'x', null ); - - private static final Map byCharacter = new HashMap(); - - static { - for (RegexFlag flag : values()) { - byCharacter.put(flag.flagChar, flag); - } - } - - public static RegexFlag getByCharacter(char ch) { - return byCharacter.get(ch); - } - public final int javaFlag; - public final char flagChar; - public final String unsupported; - - RegexFlag( int f, char ch, String u ) { - javaFlag = f; - flagChar = ch; - unsupported = u; - } - } - - private static void _warnUnsupportedRegex( String flag ) { - LOGGER.info( "flag " + flag + " not supported by db." ); - } - - private static final int GLOBAL_FLAG = 256; - - // --- (en|de)coding hooks ----- - - public static boolean hasDecodeHooks() { return _decodeHooks; } - - public static void addEncodingHook( Class c , Transformer t ){ - _encodeHooks = true; - List l = _encodingHooks.get( c ); - if ( l == null ){ - l = new CopyOnWriteArrayList(); - _encodingHooks.put( c , l ); - } - l.add( t ); - } - - public static void addDecodingHook( Class c , Transformer t ){ - _decodeHooks = true; - List l = _decodingHooks.get( c ); - if ( l == null ){ - l = new CopyOnWriteArrayList(); - _decodingHooks.put( c , l ); - } - l.add( t ); - } - - public static Object applyEncodingHooks( Object o ){ - if ( ! _anyHooks() ) - return o; - - if ( _encodingHooks.size() == 0 || o == null ) - return o; - List l = _encodingHooks.get( o.getClass() ); - if ( l != null ) - for ( Transformer t : l ) - o = t.transform( o ); - return o; - } - - public static Object applyDecodingHooks( Object o ){ - if ( ! _anyHooks() || o == null ) - return o; - - List l = _decodingHooks.get( o.getClass() ); - if ( l != null ) - for ( Transformer t : l ) - o = t.transform( o ); - return o; - } - - /** - * Returns the encoding hook(s) associated with the specified class - * - */ - public static List getEncodingHooks( Class c ){ - return _encodingHooks.get( c ); - } - - /** - * Clears *all* encoding hooks. - */ - public static void clearEncodingHooks(){ - _encodeHooks = false; - _encodingHooks.clear(); - } - - /** - * Remove all encoding hooks for a specific class. - */ - public static void removeEncodingHooks( Class c ){ - _encodingHooks.remove( c ); - } - - /** - * Remove a specific encoding hook for a specific class. - */ - public static void removeEncodingHook( Class c , Transformer t ){ - getEncodingHooks( c ).remove( t ); - } - - /** - * Returns the decoding hook(s) associated with the specific class - */ - public static List getDecodingHooks( Class c ){ - return _decodingHooks.get( c ); - } - - /** - * Clears *all* decoding hooks. - */ - public static void clearDecodingHooks(){ - _decodeHooks = false; - _decodingHooks.clear(); - } - - /** - * Remove all decoding hooks for a specific class. - */ - public static void removeDecodingHooks( Class c ){ - _decodingHooks.remove( c ); - } - - /** - * Remove a specific encoding hook for a specific class. - */ - public static void removeDecodingHook( Class c , Transformer t ){ - getDecodingHooks( c ).remove( t ); - } - - - public static void clearAllHooks(){ - clearEncodingHooks(); - clearDecodingHooks(); - } - - /** - * Returns true if any encoding or decoding hooks are loaded. - */ - private static boolean _anyHooks(){ - return _encodeHooks || _decodeHooks; - } - - private static boolean _encodeHooks = false; - private static boolean _decodeHooks = false; - static ClassMap> _encodingHooks = - new ClassMap>(); - - static ClassMap> _decodingHooks = - new ClassMap>(); - - /** - * @deprecated Use {@link Charset#forName(String)} to create UTF-8 charset. - */ - @Deprecated - static protected Charset _utf8 = Charset.forName( "UTF-8" ); - - // ----- static encode/decode ----- - - public static byte[] encode( BSONObject o ){ - BSONEncoder e = _staticEncoder.get(); - try { - return e.encode( o ); - } - finally { - e.done(); - } - } - - public static BSONObject decode( byte[] b ){ - BSONDecoder d = _staticDecoder.get(); - return d.readObject( b ); - } - - static ThreadLocal _staticEncoder = new ThreadLocal(){ - protected BSONEncoder initialValue(){ - return new BasicBSONEncoder(); - } - }; - - static ThreadLocal _staticDecoder = new ThreadLocal(){ - protected BSONDecoder initialValue(){ - return new BasicBSONDecoder(); - } - }; - - // --- coercing --- - - public static int toInt( Object o ){ - if ( o == null ) - throw new NullPointerException( "can't be null" ); - - if ( o instanceof Number ) - return ((Number)o).intValue(); - - if ( o instanceof Boolean ) - return ((Boolean)o) ? 1 : 0; - - throw new IllegalArgumentException( "can't convert: " + o.getClass().getName() + " to int" ); - } -} diff --git a/src/main/org/bson/BSONCallback.java b/src/main/org/bson/BSONCallback.java deleted file mode 100644 index ead63eb0db0..00000000000 --- a/src/main/org/bson/BSONCallback.java +++ /dev/null @@ -1,78 +0,0 @@ -// BSONCallback.java - -/** - * Copyright (C) 2008 10gen Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.bson; - -import org.bson.types.ObjectId; - -public interface BSONCallback { - - void objectStart(); - void objectStart(String name); - void objectStart(boolean array); - Object objectDone(); - - void reset(); - Object get(); - BSONCallback createBSONCallback(); - - void arrayStart(); - void arrayStart(String name); - Object arrayDone(); - - void gotNull( String name ); - void gotUndefined( String name ); - void gotMinKey( String name ); - void gotMaxKey( String name ); - - void gotBoolean( String name , boolean v ); - void gotDouble( String name , double v ); - void gotInt( String name , int v ); - void gotLong( String name , long v ); - - void gotDate( String name , long millis ); - void gotString( String name , String v ); - void gotSymbol( String name , String v ); - void gotRegex( String name , String pattern , String flags ); - - void gotTimestamp( String name , int time , int inc ); - void gotObjectId( String name , ObjectId id ); - - /** - * Invoked when {@link org.bson.BSONDecoder} encountered a DBPointer(0x0c) type field in a byte sequence. - * - * @param name the name of the field - * @param ns the namespace to which reference is pointing to - * @param id the if of the object to which reference is pointing to - */ - void gotDBRef(String name, String ns, ObjectId id); - - /** - * - */ - @Deprecated - void gotBinaryArray( String name , byte[] data ); - void gotBinary( String name , byte type , byte[] data ); - /** - * subtype 3 - */ - void gotUUID( String name , long part1, long part2); - - void gotCode( String name , String code ); - void gotCodeWScope( String name , String code , Object scope ); -} diff --git a/src/main/org/bson/BSONDecoder.java b/src/main/org/bson/BSONDecoder.java deleted file mode 100644 index 0e873d57c5b..00000000000 --- a/src/main/org/bson/BSONDecoder.java +++ /dev/null @@ -1,34 +0,0 @@ -// BSONDecoder.java - -/** - * Copyright (C) 2008 10gen Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.bson; - -import java.io.IOException; -import java.io.InputStream; - -public interface BSONDecoder { - - public BSONObject readObject( byte[] b ); - - public BSONObject readObject( InputStream in ) throws IOException; - - public int decode( byte[] b , BSONCallback callback ); - - public int decode( InputStream in , BSONCallback callback ) throws IOException; - -} diff --git a/src/main/org/bson/BSONEncoder.java b/src/main/org/bson/BSONEncoder.java deleted file mode 100644 index f9fcded1866..00000000000 --- a/src/main/org/bson/BSONEncoder.java +++ /dev/null @@ -1,27 +0,0 @@ -/** - * Copyright (c) 2008 - 2011 10gen, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on - * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the - * specific language governing permissions and limitations under the License. - */ - -package org.bson; - -import org.bson.io.*; - - -public interface BSONEncoder { - public byte[] encode( BSONObject o ); - - public int putObject( BSONObject o ); - - public void done(); - - void set( OutputBuffer out ); -} diff --git a/src/main/org/bson/BSONException.java b/src/main/org/bson/BSONException.java deleted file mode 100644 index 80a6813fec9..00000000000 --- a/src/main/org/bson/BSONException.java +++ /dev/null @@ -1,73 +0,0 @@ -/** - * Copyright (C) 2011, 10gen Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.bson; - -/** - * A general runtime exception raised in BSON processing. - */ -public class BSONException extends RuntimeException { - - private static final long serialVersionUID = -4415279469780082174L; - - /** - * @param msg The error message. - */ - public BSONException( final String msg ) { - super( msg ); - } - - /** - * @param errorCode The error code. - * @param msg The error message. - */ - public BSONException( final int errorCode, final String msg ) { - super( msg ); - _errorCode = errorCode; - } - - /** - * @param msg The error message. - * @param t The throwable cause. - */ - public BSONException( final String msg , final Throwable t ) { - super( msg, t ); - } - - /** - * @param errorCode The error code. - * @param msg The error message. - * @param t The throwable cause. - */ - public BSONException( final int errorCode, final String msg, final Throwable t ) { - super( msg, t ); - _errorCode = errorCode; - } - - /** - * Returns the error code. - * @return The error code. - */ - public Integer getErrorCode() { return _errorCode; } - - /** - * Returns true if the error code is set (i.e., not null). - */ - public boolean hasErrorCode() { return (_errorCode != null); } - - private Integer _errorCode = null; -} - diff --git a/src/main/org/bson/BSONLazyDecoder.java b/src/main/org/bson/BSONLazyDecoder.java deleted file mode 100644 index bc07f7c6409..00000000000 --- a/src/main/org/bson/BSONLazyDecoder.java +++ /dev/null @@ -1,27 +0,0 @@ -/** - * Copyright (C) 2008 10gen Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.bson; - -/** - * - * @author antoine - * - * @deprecated This class is NOT a part of public API and will be dropped in 3.x versions. - */ -@Deprecated -public class BSONLazyDecoder { - -} diff --git a/src/main/org/bson/BSONObject.java b/src/main/org/bson/BSONObject.java deleted file mode 100644 index b2636fb97f6..00000000000 --- a/src/main/org/bson/BSONObject.java +++ /dev/null @@ -1,91 +0,0 @@ -// BSONObject.java - -/** - * Copyright (C) 2008 10gen Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.bson; - -import java.util.Map; -import java.util.Set; - -/** - * A key-value map that can be saved to the database. - */ -public interface BSONObject { - - /** - * Sets a name/value pair in this object. - * @param key Name to set - * @param v Corresponding value - * @return v - */ - public Object put( String key , Object v ); - - /** - * Sets all key/value pairs from an object into this object - * @param o the object - */ - public void putAll( BSONObject o ); - - /** - * Sets all key/value pairs from a map into this object - * @param m the map - */ - public void putAll( Map m ); - - /** - * Gets a field from this object by a given name. - * @param key The name of the field fetch - * @return The field, if found - */ - public Object get( String key ); - - /** - * Returns a map representing this BSONObject. - * @return the map - */ - public Map toMap(); - - /** - * Removes a field with a given name from this object. - * @param key The name of the field to remove - * @return The value removed from this object - */ - public Object removeField( String key ); - - /** - * Deprecated - * @param s - * @return True if the key is present - * @deprecated - */ - @Deprecated - public boolean containsKey( String s ); - - /** - * Checks if this object contains a field with the given name. - * @param s Field name for which to check - * @return True if the field is present - */ - public boolean containsField(String s); - - /** - * Returns this object's fields' names - * @return The names of the fields in this object - */ - public Set keySet(); -} - diff --git a/src/main/org/bson/BasicBSONCallback.java b/src/main/org/bson/BasicBSONCallback.java deleted file mode 100644 index 72bbae27852..00000000000 --- a/src/main/org/bson/BasicBSONCallback.java +++ /dev/null @@ -1,206 +0,0 @@ -// BasicBSONCallback.java - -/** - * Copyright (C) 2008 10gen Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.bson; - -import java.util.*; -import java.util.regex.Pattern; - -import org.bson.types.*; - -public class BasicBSONCallback implements BSONCallback { - - public BasicBSONCallback(){ - reset(); - } - - public BSONObject create(){ - return new BasicBSONObject(); - } - - protected BSONObject createList() { - return new BasicBSONList(); - } - - public BSONCallback createBSONCallback(){ - return new BasicBSONCallback(); - } - - public BSONObject create( boolean array , List path ){ - if ( array ) - return createList(); - return create(); - } - - public void objectStart(){ - if ( _stack.size() > 0 ) - throw new IllegalStateException( "something is wrong" ); - - objectStart(false); - } - - public void objectStart(boolean array){ - _root = create(array, null); - _stack.add( (BSONObject)_root ); - } - - public void objectStart(String name){ - objectStart( false , name ); - } - - public void objectStart(boolean array, String name){ - _nameStack.addLast( name ); - final BSONObject o = create( array , _nameStack ); - _stack.getLast().put( name , o); - _stack.addLast( o ); - } - - public Object objectDone(){ - final BSONObject o =_stack.removeLast(); - if ( _nameStack.size() > 0 ) - _nameStack.removeLast(); - else if ( _stack.size() > 0 ) - throw new IllegalStateException( "something is wrong" ); - - return !BSON.hasDecodeHooks() ? o : (BSONObject)BSON.applyDecodingHooks(o); - } - - public void arrayStart(){ - objectStart( true ); - } - - public void arrayStart(String name){ - objectStart( true , name ); - } - - public Object arrayDone(){ - return objectDone(); - } - - public void gotNull( String name ){ - cur().put( name , null ); - } - - public void gotUndefined( String name ) { } - - public void gotMinKey( String name ){ - cur().put( name , new MinKey() ); - } - - public void gotMaxKey( String name ){ - cur().put( name , new MaxKey() ); - } - - public void gotBoolean( String name , boolean v ){ - _put( name , v ); - } - - public void gotDouble( final String name , final double v ){ - _put( name , v ); - } - - public void gotInt( final String name , final int v ){ - _put( name , v ); - } - - public void gotLong( final String name , final long v ){ - _put( name , v ); - } - - public void gotDate( String name , long millis ){ - _put( name , new Date( millis ) ); - } - public void gotRegex( String name , String pattern , String flags ){ - _put( name , Pattern.compile( pattern , BSON.regexFlags( flags ) ) ); - } - - public void gotString( final String name , final String v ){ - _put( name , v ); - } - public void gotSymbol( String name , String v ){ - _put( name , v ); - } - - public void gotTimestamp( String name , int time , int inc ){ - _put( name , new BSONTimestamp( time , inc ) ); - } - public void gotObjectId( String name , ObjectId id ){ - _put( name , id ); - } - public void gotDBRef( String name , String ns , ObjectId id ){ - _put( name , new BasicBSONObject( "$ns" , ns ).append( "$id" , id ) ); - } - - @Deprecated - public void gotBinaryArray( String name , byte[] data ){ - gotBinary( name, BSON.B_GENERAL, data ); - } - - public void gotBinary( String name , byte type , byte[] data ){ - if( type == BSON.B_GENERAL || type == BSON.B_BINARY ) - _put( name , data ); - else - _put( name , new Binary( type , data ) ); - } - - public void gotUUID( String name , long part1, long part2){ - _put( name , new UUID(part1, part2) ); - } - - public void gotCode( String name , String code ){ - _put( name , new Code( code ) ); - } - - public void gotCodeWScope( String name , String code , Object scope ){ - _put( name , new CodeWScope( code, (BSONObject)scope ) ); - } - - protected void _put( final String name , final Object o ){ - cur().put( name , !BSON.hasDecodeHooks() ? o : BSON.applyDecodingHooks( o ) ); - } - - protected BSONObject cur(){ - return _stack.getLast(); - } - - protected String curName(){ - return (!_nameStack.isEmpty()) ? _nameStack.getLast() : null; - } - - public Object get(){ - return _root; - } - - protected void setRoot(Object o) { - _root = o; - } - - protected boolean isStackEmpty() { - return _stack.size() < 1; - } - - public void reset(){ - _root = null; - _stack.clear(); - _nameStack.clear(); - } - - private Object _root; - private final LinkedList _stack = new LinkedList(); - private final LinkedList _nameStack = new LinkedList(); -} diff --git a/src/main/org/bson/BasicBSONDecoder.java b/src/main/org/bson/BasicBSONDecoder.java deleted file mode 100644 index 4fd70290f98..00000000000 --- a/src/main/org/bson/BasicBSONDecoder.java +++ /dev/null @@ -1,611 +0,0 @@ -/** - * Copyright (C) 2008 10gen Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.bson; - -import static org.bson.BSON.*; - -import java.io.*; - -import org.bson.io.PoolOutputBuffer; -import org.bson.types.ObjectId; - - -/** - * Basic implementation of BSONDecoder interface that creates BasicBSONObject instances. - * - *

        Migration instructions

        - * In driver versions before 2.12 {@code BasicBSONDecoder} exposed several protected members to its subclasses: - *

        - * Fields: - *
          - *
        • {@code protected BSONInput _in}
        • - *
        • {@code protected BSONCallback _callback}
        • - *
        • {@code protected int _len}
        • - *
        • {@code protected int _pos}
        • - * - *
        - *
        - * Methods: - *
          - *
        • {@code protected void _binary(String)}
        • - *
        - *
        - * Nested Classes: - *
          - *
        • {@code protected class BSONInput}
        • - *
        - *
        - * - *

        Solution 1: Custom {@link BSONCallback} implementation

        - * With callbacks you can handle the process of creating objects from bytes in BSON format. - *

        - * For example to get away from overriging {@code BasicBSONDecoder._binary(String)} - * you can use the following piece of code: - *

        - *
        - * public class CustomBSONCallback extends BasicBSONCallback {
        - *
        - *  public void gotBinary(String name, byte type, byte[] data) {
        - *      _put(name,toHex(data));
        - *  }
        - *
        - *  private static String toHex(byte[] bytes) {...}
        - *}
        - *
        - * - * This solution covers majority of the cases. - * - *

        Solution 2: Custom {@link BSONDecoder} implementation

        - * If you need to customize byte-level decoding at the lowest layer you have to provide you own - * implementation of the {@link BSONDecoder} interface. - *
        - * Please check
        http://bsonspec.org/ for more information. - * - */ -public class BasicBSONDecoder implements BSONDecoder { - public BSONObject readObject( byte[] b ){ - try { - return readObject( new ByteArrayInputStream( b ) ); - } - catch ( IOException ioe ){ - throw new BSONException( "should be impossible" , ioe ); - } - } - - public BSONObject readObject( InputStream in ) - throws IOException { - BasicBSONCallback c = new BasicBSONCallback(); - decode( in , c ); - return (BSONObject)c.get(); - } - - public int decode( byte[] b , BSONCallback callback ){ - try { - return _decode( new BSONInput( new ByteArrayInputStream(b) ) , callback ); - } - catch ( IOException ioe ){ - throw new BSONException( "should be impossible" , ioe ); - } - } - - public int decode( InputStream in , BSONCallback callback ) - throws IOException { - return _decode( new BSONInput( in ) , callback ); - } - - private int _decode( BSONInput in , BSONCallback callback ) - throws IOException { - - if ( _in != null || _callback != null ) - throw new IllegalStateException( "not ready" ); - - _in = in; - _callback = callback; - - if ( in.numRead() != 0 ) - throw new IllegalArgumentException( "i'm confused" ); - - try { - - final int len = _in.readInt(); - - _in.setMax(len); - - _callback.objectStart(); - while ( decodeElement() ); - _callback.objectDone(); - - if ( _in.numRead() != len ) - throw new IllegalArgumentException( "bad data. lengths don't match read:" + _in.numRead() + " != len:" + len ); - - return len; - } - finally { - _in = null; - _callback = null; - } - } - - int decode( boolean first ) - throws IOException { - - final int start = _in.numRead(); - - final int len = _in.readInt(); - if ( first ) - _in.setMax(len); - - _callback.objectStart(); - while ( decodeElement() ); - _callback.objectDone(); - - final int read = _in.numRead() - start; - - if ( read != len ){ - //throw new IllegalArgumentException( "bad data. lengths don't match " + read + " != " + len ); - } - - return len; - } - - boolean decodeElement() - throws IOException { - - final byte type = _in.read(); - - if ( type == EOO ) - return false; - - String name = _in.readCStr(); - - switch ( type ){ - case NULL: - _callback.gotNull( name ); - break; - - case UNDEFINED: - _callback.gotUndefined( name ); - break; - - case BOOLEAN: - _callback.gotBoolean( name , _in.read() > 0 ); - break; - - case NUMBER: - _callback.gotDouble( name , _in.readDouble() ); - break; - - case NUMBER_INT: - _callback.gotInt( name , _in.readInt() ); - break; - - case NUMBER_LONG: - _callback.gotLong( name , _in.readLong() ); - break; - - case SYMBOL: - _callback.gotSymbol( name , _in.readUTF8String() ); - break; - - case STRING: - _callback.gotString(name, _in.readUTF8String() ); - break; - - case OID: - // OID is stored as big endian - _callback.gotObjectId( name , new ObjectId( _in.readIntBE() , _in.readIntBE() , _in.readIntBE() ) ); - break; - - case REF: - _in.readInt(); // length of ctring that follows - String ns = _in.readCStr(); - ObjectId theOID = new ObjectId( _in.readInt() , _in.readInt() , _in.readInt() ); - _callback.gotDBRef( name , ns , theOID ); - break; - - case DATE: - _callback.gotDate( name , _in.readLong() ); - break; - - case REGEX: - _callback.gotRegex( name , _in.readCStr() , _in.readCStr() ); - break; - - case BINARY: - _binary( name ); - break; - - case CODE: - _callback.gotCode( name , _in.readUTF8String() ); - break; - - case CODE_W_SCOPE: - _in.readInt(); - _callback.gotCodeWScope( name , _in.readUTF8String() , _readBasicObject() ); - - break; - - case ARRAY: - _in.readInt(); // total size - we don't care.... - - _callback.arrayStart( name ); - while ( decodeElement() ); - _callback.arrayDone(); - - break; - - - case OBJECT: - _in.readInt(); // total size - we don't care.... - - _callback.objectStart( name ); - while ( decodeElement() ); - _callback.objectDone(); - - break; - - case TIMESTAMP: - int i = _in.readInt(); - int time = _in.readInt(); - _callback.gotTimestamp( name , time , i ); - break; - - case MINKEY: - _callback.gotMinKey( name ); - break; - - case MAXKEY: - _callback.gotMaxKey( name ); - break; - - default: - throw new UnsupportedOperationException( "BSONDecoder doesn't understand type : " + type + " name: " + name ); - } - - return true; - } - - /** - * - * @param name the field name - * @throws IOException - * - * @deprecated This method should not be a part of API. - * Please see the class-level documentation for a migration instructions. - */ - @Deprecated - protected void _binary( final String name ) - throws IOException { - final int totalLen = _in.readInt(); - final byte bType = _in.read(); - - switch ( bType ){ - case B_GENERAL: { - final byte[] data = new byte[totalLen]; - _in.fill( data ); - _callback.gotBinary( name, bType, data ); - return; - } - case B_BINARY: - final int len = _in.readInt(); - if ( len + 4 != totalLen ) - throw new IllegalArgumentException( "bad data size subtype 2 len: " + len + " totalLen: " + totalLen ); - - final byte [] data = new byte[len]; - _in.fill( data ); - _callback.gotBinary( name , bType , data ); - return; - case B_UUID: - if ( totalLen != 16 ) - throw new IllegalArgumentException( "bad data size subtype 3 len: " + totalLen + " != 16"); - - final long part1 = _in.readLong(); - final long part2 = _in.readLong(); - _callback.gotUUID(name, part1, part2); - return; - } - - final byte [] data = new byte[totalLen]; - _in.fill( data ); - - _callback.gotBinary( name , bType , data ); - } - - Object _readBasicObject() - throws IOException { - _in.readInt(); - - final BSONCallback save = _callback; - final BSONCallback _basic = _callback.createBSONCallback(); - _callback = _basic; - _basic.reset(); - _basic.objectStart(false); - - while( decodeElement() ); - _callback = save; - return _basic.get(); - } - - /** - * @deprecated This class should not be a part of API. - * Please see the class-level documentation for a migration instructions. - */ - @Deprecated - protected class BSONInput { - - public BSONInput(final InputStream in){ - _raw = in; - _read = 0; - - _pos = 0; - _len = 0; - } - - /** - * ensure that there are num bytes to read - * _pos is where to start reading from - * @return where to start reading from - */ - protected int _need( final int num ) - throws IOException { - - //System.out.println( "p: " + _pos + " l: " + _len + " want: " + num ); - - if ( _len - _pos >= num ){ - final int ret = _pos; - _pos += num; - _read += num; - return ret; - } - - if ( num >= _inputBuffer.length ) - throw new IllegalArgumentException( "you can't need that much" ); - - final int remaining = _len - _pos; - if ( _pos > 0 ){ - System.arraycopy( _inputBuffer , _pos , _inputBuffer , 0 , remaining ); - - _pos = 0; - _len = remaining; - } - - // read as much as possible into buffer - int maxToRead = Math.min( _max - _read - remaining , _inputBuffer.length - _len ); - while ( maxToRead > 0 ){ - int x = _raw.read( _inputBuffer , _len , maxToRead); - if ( x <= 0 ) - throw new IOException( "unexpected EOF" ); - maxToRead -= x; - _len += x; - } - - int ret = _pos; - _pos += num; - _read += num; - return ret; - } - - public int readInt() - throws IOException { - return org.bson.io.Bits.readInt( _inputBuffer , _need(4) ); - } - - public int readIntBE() - throws IOException { - return org.bson.io.Bits.readIntBE( _inputBuffer , _need(4) ); - } - - public long readLong() - throws IOException { - return org.bson.io.Bits.readLong( _inputBuffer , _need(8) ); - } - - public double readDouble() - throws IOException { - return Double.longBitsToDouble( readLong() ); - } - - public byte read() - throws IOException { - if ( _pos < _len ){ - ++_read; - return _inputBuffer[_pos++]; - } - return _inputBuffer[_need(1)]; - } - - public void fill( byte b[] ) - throws IOException { - fill( b , b.length ); - } - - public void fill( byte b[] , int len ) - throws IOException { - // first use what we have - final int have = _len - _pos; - final int tocopy = Math.min( len , have ); - System.arraycopy( _inputBuffer , _pos , b , 0 , tocopy ); - - _pos += tocopy; - _read += tocopy; - - len -= tocopy; - - int off = tocopy; - while ( len > 0 ){ - final int x = _raw.read( b , off , len ); - if (x <= 0) - throw new IOException( "unexpected EOF" ); - _read += x; - off += x; - len -= x; - } - } - - protected boolean _isAscii( byte b ){ - return b >=0 && b <= 127; - } - - public String readCStr() throws IOException { - - boolean isAscii = true; - - // short circuit 1 byte strings - _random[0] = read(); - if (_random[0] == 0) { - return ""; - } - - _random[1] = read(); - if (_random[1] == 0) { - final String out = ONE_BYTE_STRINGS[_random[0]]; - return (out != null) ? out : new String(_random, 0, 1, DEFAULT_ENCODING); - } - - _stringBuffer.reset(); - _stringBuffer.write(_random, 0, 2); - - isAscii = _isAscii(_random[0]) && _isAscii(_random[1]); - - byte b; - while ((b = read()) != 0) { - _stringBuffer.write( b ); - isAscii = isAscii && _isAscii( b ); - } - - String out = null; - if ( isAscii ){ - out = _stringBuffer.asAscii(); - } - else { - try { - out = _stringBuffer.asString( DEFAULT_ENCODING ); - } - catch ( UnsupportedOperationException e ){ - throw new BSONException( "impossible" , e ); - } - } - _stringBuffer.reset(); - return out; - } - - public String readUTF8String() - throws IOException { - final int size = readInt(); - // this is just protection in case it's corrupted, to avoid huge strings - if ( size <= 0 || size > MAX_STRING ) - throw new BSONException( "bad string size: " + size ); - - if ( size < _inputBuffer.length / 2 ){ - if ( size == 1 ){ - read(); - return ""; - } - - return new String( _inputBuffer , _need(size) , size - 1 , DEFAULT_ENCODING ); - } - - final byte [] b = size < _random.length ? _random : new byte[size]; - - fill( b , size ); - - try { - return new String( b , 0 , size - 1 , DEFAULT_ENCODING ); - } - catch ( java.io.UnsupportedEncodingException uee ){ - throw new BSONException( "impossible" , uee ); - } - } - - public int numRead() { - return _read; - } - - public int getPos() { - return _pos; - } - - public int getMax() { - return _max; - } - - public void setMax(int _max) { - this._max = _max; - } - - int _read; - final InputStream _raw; - - int _max = 4; // max number of total bytes allowed to ready - - } - - /** - * @deprecated This field should not be a part of API. - * Please see the class-level documentation for a migration instructions. - */ - @Deprecated - protected BSONInput _in; - - /** - * @deprecated This field should not be a part of API. - * Please see the class-level documentation for a migration instructions. - */ - @Deprecated - protected BSONCallback _callback; - - private byte [] _random = new byte[1024]; // has to be used within a single function - private byte [] _inputBuffer = new byte[1024]; - - private PoolOutputBuffer _stringBuffer = new PoolOutputBuffer(); - - /** - * @deprecated This field should not be a part of API. - * Please see the class-level documentation for a migration instructions. - */ - @Deprecated - protected int _pos; // current offset into _inputBuffer - - /** - * @deprecated This field should not be a part of API. - * Please see the class-level documentation for a migration instructions. - */ - @Deprecated - protected int _len; // length of valid data in _inputBuffer - - private static final int MAX_STRING = ( 32 * 1024 * 1024 ); - - private static final String DEFAULT_ENCODING = "UTF-8"; - - private static final boolean _isAscii( final byte b ){ - return b >=0 && b <= 127; - } - - static final String[] ONE_BYTE_STRINGS = new String[128]; - static void _fillRange( byte min, byte max ){ - while ( min < max ){ - String s = ""; - s += (char)min; - ONE_BYTE_STRINGS[(int)min] = s; - min++; - } - } - static { - _fillRange( (byte)'0' , (byte)'9' ); - _fillRange( (byte)'a' , (byte)'z' ); - _fillRange( (byte)'A' , (byte)'Z' ); - } -} diff --git a/src/main/org/bson/BasicBSONEncoder.java b/src/main/org/bson/BasicBSONEncoder.java deleted file mode 100644 index 10948c83452..00000000000 --- a/src/main/org/bson/BasicBSONEncoder.java +++ /dev/null @@ -1,542 +0,0 @@ -// BSONEncoder.java - -/** - * Copyright (C) 2008 10gen Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.bson; - -import static org.bson.BSON.ARRAY; -import static org.bson.BSON.BINARY; -import static org.bson.BSON.BOOLEAN; -import static org.bson.BSON.B_BINARY; -import static org.bson.BSON.B_GENERAL; -import static org.bson.BSON.B_UUID; -import static org.bson.BSON.CODE; -import static org.bson.BSON.CODE_W_SCOPE; -import static org.bson.BSON.DATE; -import static org.bson.BSON.EOO; -import static org.bson.BSON.MAXKEY; -import static org.bson.BSON.MINKEY; -import static org.bson.BSON.NULL; -import static org.bson.BSON.NUMBER; -import static org.bson.BSON.NUMBER_INT; -import static org.bson.BSON.NUMBER_LONG; -import static org.bson.BSON.OBJECT; -import static org.bson.BSON.OID; -import static org.bson.BSON.REGEX; -import static org.bson.BSON.STRING; -import static org.bson.BSON.SYMBOL; -import static org.bson.BSON.TIMESTAMP; -import static org.bson.BSON.UNDEFINED; -import static org.bson.BSON.regexFlags; - -import java.lang.reflect.Array; -import java.nio.Buffer; -import java.util.Date; -import java.util.List; -import java.util.Map; -import java.util.Map.Entry; -import java.util.Set; -import java.util.UUID; -import java.util.concurrent.atomic.AtomicInteger; -import java.util.concurrent.atomic.AtomicLong; -import java.util.regex.Pattern; - -import org.bson.io.BasicOutputBuffer; -import org.bson.io.OutputBuffer; -import org.bson.types.BSONTimestamp; -import org.bson.types.Binary; -import org.bson.types.Code; -import org.bson.types.CodeWScope; -import org.bson.types.MaxKey; -import org.bson.types.MinKey; -import org.bson.types.ObjectId; -import org.bson.types.Symbol; - -import com.mongodb.DBRefBase; - -/** - * this is meant to be pooled or cached - * there is some per instance memory for string conversion, etc... - */ -@SuppressWarnings("unchecked") -public class BasicBSONEncoder implements BSONEncoder { - - static final boolean DEBUG = false; - - public BasicBSONEncoder(){ - - } - - public byte[] encode( BSONObject o ){ - BasicOutputBuffer buf = new BasicOutputBuffer(); - set( buf ); - putObject( o ); - done(); - return buf.toByteArray(); - } - - public void set( OutputBuffer out ){ - if ( _buf != null ) - throw new IllegalStateException( "in the middle of something" ); - - _buf = out; - } - - /** - * Gets the buffer this encoder is writing to. - * - * @return the output buffer - */ - protected OutputBuffer getOutputBuffer() { - return _buf; - } - - public void done(){ - _buf = null; - } - - /** - * @return true if object was handled - * - * @deprecated Override {@link #putSpecial(String, Object)} if you need to you need to handle custom types. - */ - @Deprecated - protected boolean handleSpecialObjects( String name , BSONObject o ){ - return false; - } - - protected boolean putSpecial( String name , Object o ){ - return false; - } - - /** Encodes a BSONObject. - * This is for the higher level api calls - * @param o the object to encode - * @return the number of characters in the encoding - */ - public int putObject( BSONObject o ){ - return putObject( null , o ); - } - - /** - * this is really for embedded objects - */ - protected int putObject( String name , BSONObject o ){ - - if ( o == null ) - throw new NullPointerException( "can't save a null object" ); - - if ( DEBUG ) System.out.println( "putObject : " + name + " [" + o.getClass() + "]" + " # keys " + o.keySet().size() ); - - final int start = _buf.getPosition(); - - byte myType = OBJECT; - if ( o instanceof List ) - myType = ARRAY; - - if ( handleSpecialObjects( name , o ) ) - return _buf.getPosition() - start; - - if ( name != null ){ - _put( myType , name ); - } - - final int sizePos = _buf.getPosition(); - _buf.writeInt( 0 ); // leaving space for this. set it at the end - - List transientFields = null; - boolean rewriteID = myType == OBJECT && name == null; - - - if ( myType == OBJECT ) { - if ( rewriteID && o.containsField( "_id" ) ) - _putObjectField( "_id" , o.get( "_id" ) ); - - { - Object temp = o.get( "_transientFields" ); - if ( temp instanceof List ) - transientFields = (List)temp; - } - } - - //TODO: reduce repeated code below. - if ( o instanceof Map ){ - for ( Entry e : ((Map)o).entrySet() ){ - - if ( rewriteID && e.getKey().equals( "_id" ) ) - continue; - - if ( transientFields != null && transientFields.contains( e.getKey() ) ) - continue; - - _putObjectField( e.getKey() , e.getValue() ); - - } - } else { - for ( String s : o.keySet() ){ - - if ( rewriteID && s.equals( "_id" ) ) - continue; - - if ( transientFields != null && transientFields.contains( s ) ) - continue; - - Object val = o.get( s ); - - _putObjectField( s , val ); - - } - } - _buf.write( EOO ); - - _buf.writeInt( sizePos , _buf.getPosition() - sizePos ); - return _buf.getPosition() - start; - } - - protected void _putObjectField( String name , Object val ){ - - if ( name.equals( "_transientFields" ) ) - return; - - if ( DEBUG ) System.out.println( "\t put thing : " + name ); - - if ( name.contains( "\0" ) ) - throw new IllegalArgumentException( "Document field names can't have a NULL character. (Bad Key: '" + name + "')" ); - - if ( name.equals( "$where") && val instanceof String ){ - _put( CODE , name ); - _putValueString( val.toString() ); - return; - } - - val = BSON.applyEncodingHooks( val ); - - if ( val == null ) - putNull(name); - else if ( val instanceof Date ) - putDate( name , (Date)val ); - else if ( val instanceof Number ) - putNumber(name, (Number)val ); - else if ( val instanceof Character ) - putString(name, val.toString() ); - else if ( val instanceof String ) - putString(name, val.toString() ); - else if ( val instanceof ObjectId ) - putObjectId(name, (ObjectId)val ); - else if ( val instanceof BSONObject ) - putObject(name, (BSONObject)val ); - else if ( val instanceof Boolean ) - putBoolean(name, (Boolean)val ); - else if ( val instanceof Pattern ) - putPattern(name, (Pattern)val ); - else if ( val instanceof Map ) - putMap( name , (Map)val ); - else if ( val instanceof Iterable) - putIterable( name , (Iterable)val ); - else if ( val instanceof byte[] ) - putBinary( name , (byte[])val ); - else if ( val instanceof Binary ) - putBinary( name , (Binary)val ); - else if ( val instanceof UUID ) - putUUID( name , (UUID)val ); - else if ( val.getClass().isArray() ) - putArray( name , val ); - - else if (val instanceof Symbol) { - putSymbol(name, (Symbol) val); - } - else if (val instanceof BSONTimestamp) { - putTimestamp( name , (BSONTimestamp)val ); - } - else if (val instanceof CodeWScope) { - putCodeWScope( name , (CodeWScope)val ); - } - else if (val instanceof Code) { - putCode( name , (Code)val ); - } - else if (val instanceof DBRefBase) { - BSONObject temp = new BasicBSONObject(); - temp.put("$ref", ((DBRefBase)val).getRef()); - temp.put("$id", ((DBRefBase)val).getId()); - putObject( name, temp ); - } - else if ( val instanceof MinKey ) - putMinKey( name ); - else if ( val instanceof MaxKey ) - putMaxKey( name ); - else if ( putSpecial( name , val ) ){ - // no-op - } - else { - throw new IllegalArgumentException( "can't serialize " + val.getClass() ); - } - - } - - private void putArray( String name , Object array ) { - _put( ARRAY , name ); - final int sizePos = _buf.getPosition(); - _buf.writeInt( 0 ); - - int size = Array.getLength(array); - for ( int i = 0; i < size; i++ ) - _putObjectField( String.valueOf( i ) , Array.get( array, i ) ); - - _buf.write( EOO ); - _buf.writeInt( sizePos , _buf.getPosition() - sizePos ); - } - - private void putIterable( String name , Iterable l ){ - _put( ARRAY , name ); - final int sizePos = _buf.getPosition(); - _buf.writeInt( 0 ); - - int i=0; - for ( Object obj: l ) { - _putObjectField( String.valueOf( i ) , obj ); - i++; - } - - - _buf.write( EOO ); - _buf.writeInt( sizePos , _buf.getPosition() - sizePos ); - } - - private void putMap( String name , Map m ){ - _put( OBJECT , name ); - final int sizePos = _buf.getPosition(); - _buf.writeInt( 0 ); - - for ( Map.Entry entry : (Set)m.entrySet() ) - _putObjectField( entry.getKey().toString() , entry.getValue() ); - - _buf.write( EOO ); - _buf.writeInt( sizePos , _buf.getPosition() - sizePos ); - } - - - protected void putNull( String name ){ - _put( NULL , name ); - } - - protected void putUndefined(String name){ - _put(UNDEFINED, name); - } - - protected void putTimestamp(String name, BSONTimestamp ts ){ - _put( TIMESTAMP , name ); - _buf.writeInt( ts.getInc() ); - _buf.writeInt( ts.getTime() ); - } - - protected void putCodeWScope( String name , CodeWScope code ){ - _put( CODE_W_SCOPE , name ); - int temp = _buf.getPosition(); - _buf.writeInt( 0 ); - _putValueString( code.getCode() ); - putObject( code.getScope() ); - _buf.writeInt( temp , _buf.getPosition() - temp ); - } - - protected void putCode( String name , Code code ){ - _put( CODE , name ); - int temp = _buf.getPosition(); - _putValueString( code.getCode() ); - } - - protected void putBoolean( String name , Boolean b ){ - _put( BOOLEAN , name ); - _buf.write( b ? (byte)0x1 : (byte)0x0 ); - } - - protected void putDate( String name , Date d ){ - _put( DATE , name ); - _buf.writeLong( d.getTime() ); - } - - protected void putNumber( String name , Number n ){ - if ( n instanceof Integer || - n instanceof Short || - n instanceof Byte || - n instanceof AtomicInteger ){ - _put( NUMBER_INT , name ); - _buf.writeInt( n.intValue() ); - } - else if ( n instanceof Long || n instanceof AtomicLong ) { - _put( NUMBER_LONG , name ); - _buf.writeLong( n.longValue() ); - } - else if ( n instanceof Float || n instanceof Double ) { - _put( NUMBER , name ); - _buf.writeDouble( n.doubleValue() ); - } - else { - throw new IllegalArgumentException( "can't serialize " + n.getClass() ); - } - } - - protected void putBinary( String name , byte[] data ){ - putBinary( name, B_GENERAL, data ); - } - - protected void putBinary( String name , Binary val ){ - putBinary( name, val.getType(), val.getData() ); - } - - private void putBinary( String name , int type , byte[] data ){ - _put( BINARY , name ); - int totalLen = data.length; - - if (type == B_BINARY) - totalLen += 4; - - _buf.writeInt( totalLen ); - _buf.write( type ); - if (type == B_BINARY) - _buf.writeInt( totalLen -4 ); - int before = _buf.getPosition(); - _buf.write( data ); - int after = _buf.getPosition(); - com.mongodb.util.MyAsserts.assertEquals( after - before , data.length ); - } - - protected void putUUID( String name , UUID val ){ - _put( BINARY , name ); - _buf.writeInt( 16 ); - _buf.write( B_UUID ); - _buf.writeLong( val.getMostSignificantBits()); - _buf.writeLong( val.getLeastSignificantBits()); - } - - protected void putSymbol( String name , Symbol s ){ - _putString(name, s.getSymbol(), SYMBOL); - } - - protected void putString(String name, String s) { - _putString(name, s, STRING); - } - - private void _putString( String name , String s, byte type ){ - _put( type , name ); - _putValueString( s ); - } - - protected void putObjectId( String name , ObjectId oid ){ - _put( OID , name ); - // according to spec, values should be stored big endian - _buf.writeIntBE( oid._time() ); - _buf.writeIntBE( oid._machine() ); - _buf.writeIntBE( oid._inc() ); - } - - private void putPattern( String name, Pattern p ) { - _put( REGEX , name ); - _put( p.pattern() ); - _put( regexFlags( p.flags() ) ); - } - - private void putMinKey( String name ) { - _put( MINKEY , name ); - } - - private void putMaxKey( String name ) { - _put( MAXKEY , name ); - } - - - // ---------------------------------------------- - - /** - * Encodes the type and key. - * - * @deprecated This method is NOT a part of public API and will be dropped in 3.x versions. - * Access buffer directly via {@link #getOutputBuffer()} if you need to change how BSON is written. - */ - @Deprecated - protected void _put(byte type, String name) { - _buf.write(type); - _put(name); - } - - /** - * @deprecated This method is NOT a part of public API and will be dropped in 3.x versions. - * Access buffer directly via {@link #getOutputBuffer()} if you need to change how BSON is written. - * Otherwise override {@link #putString(String, String)}. - */ - @Deprecated - protected void _putValueString( String s ){ - int lenPos = _buf.getPosition(); - _buf.writeInt( 0 ); // making space for size - int strLen = _put( s ); - _buf.writeInt( lenPos , strLen ); - } - - void _reset( Buffer b ){ - b.position(0); - b.limit(b.capacity()); - } - - /** - * puts as utf-8 string - * - * @deprecated Replaced by {@code getOutputBuffer().writeCString(String)}. - */ - @Deprecated - protected int _put( String str ){ - return _buf.writeCString(str); - } - - /** - * Writes integer to underlying buffer. - * - * @param x the integer number - * @deprecated Replaced by {@code getOutputBuffer().writeInt(int)}. - */ - @Deprecated - public void writeInt( int x ){ - _buf.writeInt( x ); - } - - /** - * Writes long to underlying buffer. - * - * @param x the long number - * @deprecated Replaced by {@code getOutputBuffer().writeLong(long)}. - */ - @Deprecated - public void writeLong( long x ){ - _buf.writeLong(x); - } - - /** - * Writes C string (null-terminated string) to underlying buffer. - * - * @param s the string - * @deprecated Replaced by {@code getOutputBuffer().writeCString(String)}. - */ - @Deprecated - public void writeCString( String s ){ - _buf.writeCString(s); - } - - /** - * @deprecated Replaced by {@link #getOutputBuffer()}. - */ - @Deprecated - protected OutputBuffer _buf; - -} diff --git a/src/main/org/bson/BasicBSONObject.java b/src/main/org/bson/BasicBSONObject.java deleted file mode 100644 index bd374dfc5e6..00000000000 --- a/src/main/org/bson/BasicBSONObject.java +++ /dev/null @@ -1,361 +0,0 @@ -// BasicBSONObject.java - -/** - * Copyright (C) 2008 10gen Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.bson; - -// BSON -import org.bson.types.ObjectId; - -// Java -import java.util.Map; -import java.util.Set; -import java.util.Date; -import java.util.LinkedHashMap; -import java.util.regex.Pattern; - -/** - * A simple implementation of DBObject. - * A DBObject can be created as follows, using this class: - *
        - * DBObject obj = new BasicBSONObject();
        - * obj.put( "foo", "bar" );
        - * 
        - */ -public class BasicBSONObject extends LinkedHashMap implements BSONObject { - - private static final long serialVersionUID = -4415279469780082174L; - - /** - * Creates an empty object. - */ - public BasicBSONObject(){ - } - - public BasicBSONObject(int size){ - super(size); - } - - /** - * Convenience CTOR - * @param key key under which to store - * @param value value to stor - */ - public BasicBSONObject(String key, Object value){ - put(key, value); - } - - /** - * Creates a DBObject from a map. - * @param m map to convert - */ - @SuppressWarnings("unchecked") - public BasicBSONObject(Map m) { - super(m); - } - - /** - * Converts a DBObject to a map. - * @return the DBObject - */ - public Map toMap() { - return new LinkedHashMap(this); - } - - /** Deletes a field from this object. - * @param key the field name to remove - * @return the object removed - */ - public Object removeField( String key ){ - return remove( key ); - } - - /** Checks if this object contains a given field - * @param field field name - * @return if the field exists - */ - public boolean containsField( String field ){ - return super.containsKey(field); - } - - /** - * @deprecated - */ - @Deprecated - public boolean containsKey( String key ){ - return containsField(key); - } - - /** Gets a value from this object - * @param key field name - * @return the value - */ - public Object get( String key ){ - return super.get(key); - } - - /** Returns the value of a field as an int. - * @param key the field to look for - * @return the field value (or default) - */ - public int getInt( String key ){ - Object o = get(key); - if ( o == null ) - throw new NullPointerException( "no value for: " + key ); - - return BSON.toInt( o ); - } - - /** Returns the value of a field as an int. - * @param key the field to look for - * @param def the default to return - * @return the field value (or default) - */ - public int getInt( String key , int def ){ - Object foo = get( key ); - if ( foo == null ) - return def; - - return BSON.toInt( foo ); - } - - /** - * Returns the value of a field as a long. - * - * @param key the field to return - * @return the field value - */ - public long getLong( String key){ - Object foo = get( key ); - return ((Number)foo).longValue(); - } - - /** - * Returns the value of a field as an long. - * @param key the field to look for - * @param def the default to return - * @return the field value (or default) - */ - public long getLong( String key , long def ) { - Object foo = get( key ); - if ( foo == null ) - return def; - - return ((Number)foo).longValue(); - } - - /** - * Returns the value of a field as a double. - * - * @param key the field to return - * @return the field value - */ - public double getDouble( String key){ - Object foo = get( key ); - return ((Number)foo).doubleValue(); - } - - /** - * Returns the value of a field as an double. - * @param key the field to look for - * @param def the default to return - * @return the field value (or default) - */ - public double getDouble( String key , double def ) { - Object foo = get( key ); - if ( foo == null ) - return def; - - return ((Number)foo).doubleValue(); - } - - /** Returns the value of a field as a string - * @param key the field to look up - * @return the value of the field, converted to a string - */ - public String getString( String key ){ - Object foo = get( key ); - if ( foo == null ) - return null; - return foo.toString(); - } - - /** - * Returns the value of a field as a string - * @param key the field to look up - * @param def the default to return - * @return the value of the field, converted to a string - */ - public String getString( String key, final String def ) { - Object foo = get( key ); - if ( foo == null ) - return def; - - return foo.toString(); - } - - /** Returns the value of a field as a boolean. - * @param key the field to look up - * @return the value of the field, or false if field does not exist - */ - public boolean getBoolean( String key ){ - return getBoolean(key, false); - } - - /** Returns the value of a field as a boolean - * @param key the field to look up - * @param def the default value in case the field is not found - * @return the value of the field, converted to a string - */ - public boolean getBoolean( String key , boolean def ){ - Object foo = get( key ); - if ( foo == null ) - return def; - if ( foo instanceof Number ) - return ((Number)foo).intValue() > 0; - if ( foo instanceof Boolean ) - return ((Boolean)foo).booleanValue(); - throw new IllegalArgumentException( "can't coerce to bool:" + foo.getClass() ); - } - - /** - * Returns the object id or null if not set. - * @param field The field to return - * @return The field object value or null if not found (or if null :-^). - */ - public ObjectId getObjectId( final String field ) { - return (ObjectId) get( field ); - } - - /** - * Returns the object id or def if not set. - * @param field The field to return - * @param def the default value in case the field is not found - * @return The field object value or def if not set. - */ - public ObjectId getObjectId( final String field, final ObjectId def ) { - final Object foo = get( field ); - return (foo != null) ? (ObjectId)foo : def; - } - - /** - * Returns the date or null if not set. - * @param field The field to return - * @return The field object value or null if not found. - */ - public Date getDate( final String field ) { - return (Date) get( field ); - } - - /** - * Returns the date or def if not set. - * @param field The field to return - * @param def the default value in case the field is not found - * @return The field object value or def if not set. - */ - public Date getDate( final String field, final Date def ) { - final Object foo = get( field ); - return (foo != null) ? (Date)foo : def; - } - - /** Add a key/value pair to this object - * @param key the field name - * @param val the field value - * @return the val parameter - */ - public Object put( String key , Object val ){ - return super.put( key , val ); - } - - @SuppressWarnings("unchecked") - public void putAll( Map m ){ - for ( Map.Entry entry : (Set)m.entrySet() ){ - put( entry.getKey().toString() , entry.getValue() ); - } - } - - public void putAll( BSONObject o ){ - for ( String k : o.keySet() ){ - put( k , o.get( k ) ); - } - } - - /** Add a key/value pair to this object - * @param key the field name - * @param val the field value - * @return this - */ - public BasicBSONObject append( String key , Object val ){ - put( key , val ); - - return this; - } - - /** Returns a JSON serialization of this object - * @return JSON serialization - */ - public String toString(){ - return com.mongodb.util.JSON.serialize( this ); - } - - public boolean equals( Object o ){ - if ( ! ( o instanceof BSONObject ) ) - return false; - - BSONObject other = (BSONObject)o; - if ( ! keySet().equals( other.keySet() ) ) - return false; - - for ( String key : keySet() ){ - Object a = get( key ); - Object b = other.get( key ); - - if ( a == null ){ - if ( b != null ) - return false; - } - if ( b == null ){ - if ( a != null ) - return false; - } - else if ( a instanceof Number && b instanceof Number ){ - Number aNumber = (Number) a; - Number bNumber = (Number) b; - if (aNumber instanceof Double || bNumber instanceof Double - || aNumber instanceof Float || bNumber instanceof Float) { - if (aNumber.doubleValue() != bNumber.doubleValue()) { - return false; - } - } else if (aNumber.longValue() != bNumber.longValue()) { - return false; - } - } - else if ( a instanceof Pattern && b instanceof Pattern ){ - Pattern p1 = (Pattern) a; - Pattern p2 = (Pattern) b; - if (!p1.pattern().equals(p2.pattern()) || p1.flags() != p2.flags()) - return false; - } - else { - if ( ! a.equals( b ) ) - return false; - } - } - return true; - } - -} diff --git a/src/main/org/bson/EmptyBSONCallback.java b/src/main/org/bson/EmptyBSONCallback.java deleted file mode 100644 index 680270eec85..00000000000 --- a/src/main/org/bson/EmptyBSONCallback.java +++ /dev/null @@ -1,143 +0,0 @@ -/** - * Copyright (C) 2011 10gen Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.bson; - -import org.bson.types.ObjectId; - -public class EmptyBSONCallback implements BSONCallback { - - public void objectStart(){ - throw new UnsupportedOperationException( "Not supported yet." ); - } - - public void objectStart( String name ){ - throw new UnsupportedOperationException( "Not supported yet." ); - } - - public void objectStart( boolean array ){ - throw new UnsupportedOperationException( "Not supported yet." ); - } - - public Object objectDone(){ - throw new UnsupportedOperationException( "Not supported yet." ); - } - - public BSONCallback createBSONCallback(){ - throw new UnsupportedOperationException( "Not supported yet." ); - } - - public void arrayStart(){ - throw new UnsupportedOperationException( "Not supported yet." ); - } - - public void arrayStart( String name ){ - throw new UnsupportedOperationException( "Not supported yet." ); - } - - public Object arrayDone(){ - throw new UnsupportedOperationException( "Not supported yet." ); - } - - public void gotNull( String name ){ - throw new UnsupportedOperationException( "Not supported yet." ); - } - - public void gotUndefined( String name ){ - throw new UnsupportedOperationException( "Not supported yet." ); - } - - public void gotMinKey( String name ){ - throw new UnsupportedOperationException( "Not supported yet." ); - } - - public void gotMaxKey( String name ){ - throw new UnsupportedOperationException( "Not supported yet." ); - } - - public void gotBoolean( String name , boolean v ){ - throw new UnsupportedOperationException( "Not supported yet." ); - } - - public void gotDouble( String name , double v ){ - throw new UnsupportedOperationException( "Not supported yet." ); - } - - public void gotInt( String name , int v ){ - throw new UnsupportedOperationException( "Not supported yet." ); - } - - public void gotLong( String name , long v ){ - throw new UnsupportedOperationException( "Not supported yet." ); - } - - public void gotDate( String name , long millis ){ - throw new UnsupportedOperationException( "Not supported yet." ); - } - - public void gotString( String name , String v ){ - throw new UnsupportedOperationException( "Not supported yet." ); - } - - public void gotSymbol( String name , String v ){ - throw new UnsupportedOperationException( "Not supported yet." ); - } - - public void gotRegex( String name , String pattern , String flags ){ - throw new UnsupportedOperationException( "Not supported yet." ); - } - - public void gotTimestamp( String name , int time , int inc ){ - throw new UnsupportedOperationException( "Not supported yet." ); - } - - public void gotObjectId( String name , ObjectId id ){ - throw new UnsupportedOperationException( "Not supported yet." ); - } - - public void gotDBRef( String name , String ns , ObjectId id ){ - throw new UnsupportedOperationException( "Not supported yet." ); - } - - @Deprecated - public void gotBinaryArray( String name , byte[] data ){ - throw new UnsupportedOperationException( "Not supported yet." ); - } - - public void gotUUID( String name , long part1 , long part2 ){ - throw new UnsupportedOperationException( "Not supported yet." ); - } - - public void gotCode( String name , String code ){ - throw new UnsupportedOperationException( "Not supported yet." ); - } - - public void gotCodeWScope( String name , String code , Object scope ){ - throw new UnsupportedOperationException( "Not supported yet." ); - } - - public void reset(){ - throw new UnsupportedOperationException( "Not supported yet." ); - } - - public Object get(){ - throw new UnsupportedOperationException( "Not supported yet." ); - } - - public void gotBinary( String name , byte type , byte[] data ){ - throw new UnsupportedOperationException( "Not supported yet." ); - } - -} \ No newline at end of file diff --git a/src/main/org/bson/KeyCachingLazyBSONObject.java b/src/main/org/bson/KeyCachingLazyBSONObject.java deleted file mode 100644 index 97fe76f2a3d..00000000000 --- a/src/main/org/bson/KeyCachingLazyBSONObject.java +++ /dev/null @@ -1,73 +0,0 @@ -/** - * Copyright (C) 2011 10gen Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.bson; - -import java.util.HashMap; - -import org.bson.io.BSONByteBuffer; - -/** - * @author brendan - * @author scotthernandez - * - * @deprecated This class is NOT a part of public API and will be dropped in 3.x versions. - */ -@Deprecated -public class KeyCachingLazyBSONObject extends LazyBSONObject { - - public KeyCachingLazyBSONObject(byte[] data , LazyBSONCallback cbk) { super( data , cbk ); } - public KeyCachingLazyBSONObject(byte[] data , int offset , LazyBSONCallback cbk) { super( data , offset , cbk ); } - public KeyCachingLazyBSONObject( BSONByteBuffer buffer, LazyBSONCallback callback ){ super( buffer, callback ); } - public KeyCachingLazyBSONObject( BSONByteBuffer buffer, int offset, LazyBSONCallback callback ){ super( buffer, offset, callback ); } - - @Override - public Object get( String key ) { - ensureFieldList(); - return super.get( key ); - } - - @Override - public boolean containsField( String s ) { - ensureFieldList(); - if (! fieldIndex.containsKey( s ) ) - return false; - else - return super.containsField( s ); - } - - synchronized private void ensureFieldList() { - //only run once - if (fieldIndex == null) return; - try { - int offset = _doc_start_offset + FIRST_ELMT_OFFSET; - - while ( !isElementEmpty( offset ) ){ - int fieldSize = sizeCString( offset ); - int elementSize = getElementBSONSize( offset++ ); - String name = _input.getCString( offset ); - ElementRecord _t_record = new ElementRecord( name, offset ); - fieldIndex.put( name, _t_record ); - offset += ( fieldSize + elementSize ); - } - } catch (Exception e) { - fieldIndex = new HashMap(); - } - } - - - private HashMap fieldIndex = new HashMap(); - -} diff --git a/src/main/org/bson/LazyBSONCallback.java b/src/main/org/bson/LazyBSONCallback.java deleted file mode 100644 index 3ac9461f674..00000000000 --- a/src/main/org/bson/LazyBSONCallback.java +++ /dev/null @@ -1,90 +0,0 @@ -/** - * Copyright (C) 2008 10gen Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.bson; - -import java.util.List; -import java.util.logging.Logger; - -import com.mongodb.LazyDBObject; -import org.bson.types.ObjectId; - -/** - * - */ -public class LazyBSONCallback extends EmptyBSONCallback { - - public void objectStart(){ - throw new UnsupportedOperationException( "Not supported yet." ); - } - - public void objectStart( String name ){ - throw new UnsupportedOperationException( "Not supported yet." ); - } - - public void objectStart( boolean array ){ - throw new UnsupportedOperationException( "Not supported yet." ); - } - - public Object objectDone(){ - throw new UnsupportedOperationException( "Not supported yet." ); - } - - public void reset(){ - _root = null; - } - - public Object get(){ - return _root; - } - - public void gotBinary( String name, byte type, byte[] data ){ - setRootObject( createObject( data, 0 ) ); - } - - /** - * @deprecated This method is NOT a part of public API and will be dropped in 3.x versions. - */ - @Deprecated - public void setRootObject( Object root ){ - _root = root; - } - - public Object createObject( byte[] data, int offset ){ - return new LazyDBObject( data, offset, this ); - } - - @SuppressWarnings("rawtypes") - public List createArray( byte[] data, int offset ){ - return new LazyBSONList( data, offset, this ); - } - - public Object createDBRef( String ns, ObjectId id ){ - return new BasicBSONObject( "$ns", ns ).append( "$id", id ); - } - - - /* public Object createObject(InputStream input, int offset) { - try { - return new LazyBSONObject(input, offset, this); - } - catch ( IOException e ) { - e.printStackTrace(); - return null; - } - }*/ - private Object _root; - private static final Logger log = Logger.getLogger( "org.bson.LazyBSONCallback" ); -} diff --git a/src/main/org/bson/LazyBSONDecoder.java b/src/main/org/bson/LazyBSONDecoder.java deleted file mode 100644 index 654e286a1db..00000000000 --- a/src/main/org/bson/LazyBSONDecoder.java +++ /dev/null @@ -1,70 +0,0 @@ -/** - * Copyright (C) 2008 10gen Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.bson; - -import org.bson.io.Bits; - -import java.io.ByteArrayInputStream; -import java.io.IOException; -import java.io.InputStream; -import java.util.logging.Logger; - -/** - * implementation of BSONDecoder that creates LazyBSONObject instances - */ -public class LazyBSONDecoder implements BSONDecoder { - static final Logger LOG = Logger.getLogger( LazyBSONDecoder.class.getName() ); - - public BSONObject readObject(byte[] b) { - try { - return readObject( new ByteArrayInputStream( b ) ); - } - catch ( IOException ioe ){ - throw new BSONException( "should be impossible" , ioe ); - } - } - - public BSONObject readObject(InputStream in) throws IOException { - BSONCallback c = new LazyBSONCallback(); - decode( in , c ); - return (BSONObject)c.get(); - } - - public int decode(byte[] b, BSONCallback callback) { - try { - return decode( new ByteArrayInputStream( b ), callback ); - } - catch ( IOException ioe ) { - throw new BSONException( "should be impossible" , ioe ); - } - } - - public int decode(InputStream in, BSONCallback callback) throws IOException { - byte[] objSizeBuffer = new byte[BYTES_IN_INTEGER]; - Bits.readFully(in, objSizeBuffer, 0, BYTES_IN_INTEGER); - int objSize = Bits.readInt(objSizeBuffer); - byte[] data = new byte[objSize]; - System.arraycopy(objSizeBuffer, 0, data, 0, BYTES_IN_INTEGER); - - Bits.readFully(in, data, BYTES_IN_INTEGER, objSize - BYTES_IN_INTEGER); - - // note that we are handing off ownership of the data byte array to the callback - callback.gotBinary(null, (byte) 0, data); - return objSize; - } - - private static int BYTES_IN_INTEGER = 4; -} diff --git a/src/main/org/bson/LazyBSONList.java b/src/main/org/bson/LazyBSONList.java deleted file mode 100644 index bf4a90e5c09..00000000000 --- a/src/main/org/bson/LazyBSONList.java +++ /dev/null @@ -1,177 +0,0 @@ -package org.bson; - -import org.bson.io.BSONByteBuffer; - -import java.util.*; - -@SuppressWarnings( "rawtypes" ) -public class LazyBSONList extends LazyBSONObject implements List { - - public LazyBSONList(byte[] data , LazyBSONCallback callback) { super( data , callback ); } - public LazyBSONList(byte[] data , int offset , LazyBSONCallback callback) { super( data , offset , callback ); } - public LazyBSONList(BSONByteBuffer buffer , LazyBSONCallback callback) { super( buffer , callback ); } - public LazyBSONList(BSONByteBuffer buffer , int offset , LazyBSONCallback callback) { super( buffer , offset , callback ); } - - @Override - public boolean contains( Object arg0 ){ - return indexOf(arg0) > -1; - } - - @Override - public boolean containsAll( Collection arg0 ){ - for ( Object obj : arg0 ) { - if ( !contains( obj ) ) - return false; - } - return true; - } - - @Override - public Object get( int pos ){ - return get("" + pos); - } - - @Override - public Iterator iterator(){ - return new LazyBSONListIterator(); - } - - @Override - public int indexOf( Object arg0 ){ - int pos = 0; - Iterator it = iterator(); - while ( it.hasNext() ) { - Object curr = it.next(); - if ( arg0.equals( curr ) ) - return pos; - - pos++; - } - return -1; - } - - @Override - public int lastIndexOf( Object arg0 ){ - int pos = 0; - int lastFound = -1; - - Iterator it = iterator(); - while(it.hasNext()) { - Object curr = it.next(); - if(arg0.equals( curr )) - lastFound = pos; - - pos++; - } - - return lastFound; - } - - @Override - public int size(){ - //TODO check the last one and get the key/field name to see the ordinal position in case the array is stored with missing elements. - return getElements().size(); - } - - public class LazyBSONListIterator implements Iterator { - List elements; - int pos=0; - - public LazyBSONListIterator() { - elements = getElements(); - } - - @Override - public boolean hasNext(){ - return pos < elements.size(); - } - - @Override - public Object next(){ - return getElementValue(elements.get(pos++)); - } - - @Override - public void remove(){ - throw new UnsupportedOperationException( "Read Only" ); - } - - } - - @Override - public ListIterator listIterator( int arg0 ){ - throw new UnsupportedOperationException( "Not Supported" ); - } - - @Override - public ListIterator listIterator(){ - throw new UnsupportedOperationException( "Not Supported" ); - } - - - @Override - public boolean add( Object arg0 ){ - throw new UnsupportedOperationException( "Read Only" ); - } - - @Override - public void add( int arg0 , Object arg1 ){ - throw new UnsupportedOperationException( "Read Only" ); - } - - @Override - public boolean addAll( Collection arg0 ){ - throw new UnsupportedOperationException( "Read Only" ); - } - - @Override - public boolean addAll( int arg0 , Collection arg1 ){ - throw new UnsupportedOperationException( "Read Only" ); - } - - @Override - public void clear(){ - throw new UnsupportedOperationException( "Read Only" ); - } - - @Override - public boolean remove( Object arg0 ){ - throw new UnsupportedOperationException( "Read Only" ); - } - - @Override - public Object remove( int arg0 ){ - throw new UnsupportedOperationException( "Read Only" ); - } - - @Override - public boolean removeAll( Collection arg0 ){ - throw new UnsupportedOperationException( "Read Only" ); - } - - @Override - public boolean retainAll( Collection arg0 ){ - throw new UnsupportedOperationException( "Read Only" ); - } - - @Override - public Object set( int arg0 , Object arg1 ){ - throw new UnsupportedOperationException( "Read Only" ); - } - - @Override - public List subList( int arg0 , int arg1 ){ - throw new UnsupportedOperationException( "Not Supported" ); - } - - @Override - public Object[] toArray(){ - throw new UnsupportedOperationException( "Not Supported" ); - } - - @Override - public Object[] toArray( Object[] arg0 ){ - throw new UnsupportedOperationException( "Not Supported" ); - } - -} diff --git a/src/main/org/bson/LazyBSONObject.java b/src/main/org/bson/LazyBSONObject.java deleted file mode 100644 index f88bd65ef7f..00000000000 --- a/src/main/org/bson/LazyBSONObject.java +++ /dev/null @@ -1,726 +0,0 @@ -/** - * Copyright (C) 2008-2011 10gen Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.bson; - -import org.bson.io.BSONByteBuffer; -import org.bson.types.*; - -import java.io.IOException; -import java.io.OutputStream; -import java.util.*; -import java.util.logging.Logger; -import java.util.regex.Pattern; - -/** - * @author antoine - * @author brendan - * @author scotthernandez - * @author Kilroy Wuz Here - */ -public class LazyBSONObject implements BSONObject { - - public LazyBSONObject( byte[] data, LazyBSONCallback callback ){ - this( BSONByteBuffer.wrap( data ), callback ); - } - - public LazyBSONObject( byte[] data, int offset, LazyBSONCallback callback ){ - this( BSONByteBuffer.wrap( data, offset, data.length - offset ), offset, callback ); - } - - public LazyBSONObject( BSONByteBuffer buffer, LazyBSONCallback callback ){ - this( buffer, 0, callback ); - } - - public LazyBSONObject( BSONByteBuffer buffer, int offset, LazyBSONCallback callback ){ - _callback = callback; - _input = buffer; - _doc_start_offset = offset; - } - - - class ElementRecord { - ElementRecord( final String name, final int offset ){ - this.name = name; - this.offset = offset; - this.type = getElementType( offset - 1 ); - this.fieldNameSize = sizeCString( offset ); - this.valueOffset = offset + fieldNameSize; - } - - final String name; - /** - * The offset the record begins at. - */ - final byte type; - final int fieldNameSize; - final int valueOffset; - final int offset; - } - - class LazyBSONKeyIterator implements Iterator { - - public boolean hasNext(){ - return !isElementEmpty( offset ); - } - - public String next(){ - int fieldSize = sizeCString( offset + 1); - int elementSize = getElementBSONSize( offset ); - String key = _input.getCString( offset + 1); - offset += fieldSize + elementSize + 1; - return key; - } - - public void remove(){ - throw new UnsupportedOperationException( "Read only" ); - } - - int offset = _doc_start_offset + FIRST_ELMT_OFFSET; - } - - /** - * @deprecated This class is NOT a part of public API and will be dropped in 3.x versions. - */ - @Deprecated - public class LazyBSONKeySet extends ReadOnlySet { - - /** - * This method runs in time linear to the total size of all keys in the document. - * - * @return the number of keys in the document - */ - @Override - public int size(){ - int size = 0; - Iterator iter = iterator(); - while(iter.hasNext()) { - iter.next(); - ++size; - } - return size; - } - - @Override - public boolean isEmpty(){ - return LazyBSONObject.this.isEmpty(); - } - - @Override - public boolean contains( Object o ){ - for ( String key : this ){ - if ( key.equals( o ) ){ - return true; - } - } - return false; - } - - @Override - public Iterator iterator(){ - return new LazyBSONKeyIterator(); - } - - @Override - public String[] toArray(){ - String[] a = new String[size()]; - return toArray(a); - } - - @SuppressWarnings( "unchecked" ) - @Override - public T[] toArray(T[] a) { - int size = size(); - - T[] localArray = a.length >= size ? a : - (T[]) java.lang.reflect.Array.newInstance(a.getClass().getComponentType(), size); - - int i = 0; - for ( String key : this ){ - localArray[i++] = (T) key; - } - - if (localArray.length > i) { - localArray[i] = null; - } - return localArray; - } - - @Override - public boolean add( String e ){ - throw new UnsupportedOperationException( "Not supported yet." ); - } - - @Override - public boolean remove( Object o ){ - throw new UnsupportedOperationException( "Not supported yet." ); - } - - @Override - public boolean containsAll( Collection collection ){ - for ( Object item : collection ){ - if ( !contains( item ) ){ - return false; - } - } - return true; - } - } - - class LazyBSONEntryIterator implements Iterator> { - - public boolean hasNext(){ - return !isElementEmpty( offset ); - } - - public Map.Entry next(){ - int fieldSize = sizeCString(offset + 1); - int elementSize = getElementBSONSize(offset); - String key = _input.getCString(offset + 1); - final ElementRecord nextElementRecord = new ElementRecord(key, ++offset); - offset += fieldSize + elementSize; - return new Map.Entry() { - @Override - public String getKey() { - return nextElementRecord.name; - } - - @Override - public Object getValue() { - return getElementValue(nextElementRecord); - } - - @Override - public Object setValue(Object value) { - throw new UnsupportedOperationException("Read only"); - } - - @Override - public boolean equals(Object o) { - if (!(o instanceof Map.Entry)) - return false; - Map.Entry e = (Map.Entry) o; - return getKey().equals(e.getKey()) && getValue().equals(e.getValue()); - } - - @Override - public int hashCode() { - return getKey().hashCode() ^ getValue().hashCode(); - } - - @Override - public String toString() { - return getKey() + "=" + getValue(); - } - }; - } - - public void remove(){ - throw new UnsupportedOperationException( "Read only" ); - } - - int offset = _doc_start_offset + FIRST_ELMT_OFFSET; - } - - class LazyBSONEntrySet extends ReadOnlySet> { - @Override - public int size() { - return LazyBSONObject.this.keySet().size(); - } - - @Override - public boolean isEmpty() { - return LazyBSONObject.this.isEmpty(); - } - - @Override - public boolean contains(Object o) { - Iterator> iter = iterator(); - while (iter.hasNext()) { - if (iter.next().equals(o)) { - return true; - } - } - return false; - } - - @Override - public boolean containsAll(Collection c) { - for (Object cur : c) { - if (!contains(cur)) { - return false; - } - } - - return true; - } - - @Override - public Iterator> iterator() { - return new LazyBSONEntryIterator(); - } - - @Override - public Object[] toArray() { - Map.Entry[] array = new Map.Entry[size()]; - return toArray(array); - } - - @SuppressWarnings( "unchecked" ) - @Override - public T[] toArray(T[] a) { - int size = size(); - - T[] localArray = a.length >= size ? a : - (T[]) java.lang.reflect.Array.newInstance(a.getClass().getComponentType(), size); - - Iterator> iter = iterator(); - int i = 0; - while(iter.hasNext()) { - localArray[i++] = (T) iter.next(); - } - - if (localArray.length > i) { - localArray[i] = null; - } - - return localArray; - } - } - - // Base class that throws UnsupportedOperationException for any method that writes to the Set - abstract class ReadOnlySet implements Set { - - @Override - public boolean add(E e) { - throw new UnsupportedOperationException("Read-only Set"); - } - - @Override - public boolean remove(Object o) { - throw new UnsupportedOperationException("Read-only Set"); - } - - @Override - public boolean addAll(Collection c) { - throw new UnsupportedOperationException("Read-only Set"); - } - - @Override - public boolean retainAll(Collection c) { - throw new UnsupportedOperationException("Read-only Set"); - } - - @Override - public boolean removeAll(Collection c) { - throw new UnsupportedOperationException("Read-only Set"); - } - - @Override - public void clear() { - throw new UnsupportedOperationException("Read-only Set"); - } - } - - public Object put( String key, Object v ){ - throw new UnsupportedOperationException( "Object is read only" ); - } - - public void putAll( BSONObject o ){ - throw new UnsupportedOperationException( "Object is read only" ); - } - - public void putAll( Map m ){ - throw new UnsupportedOperationException( "Object is read only" ); - } - - public Object get( String key ){ - //get element up to the key - ElementRecord element = getElement(key); - - //no found if null/empty - if (element == null) { - return null; - } - - return getElementValue(element); - - } - - /** - * returns the ElementRecord for the given key, or null if not found - * @param key the field/key to find - * @return ElementRecord for key, or null - */ - ElementRecord getElement(String key){ - int offset = _doc_start_offset + FIRST_ELMT_OFFSET; - - while ( !isElementEmpty( offset ) ){ - int fieldSize = sizeCString( offset + 1 ); - int elementSize = getElementBSONSize( offset ); - String name = _input.getCString( ++offset); - - if (name.equals(key)) { - return new ElementRecord( name, offset ); - } - offset += ( fieldSize + elementSize); - } - - return null; - } - - - /** - * returns all the ElementRecords in this document - * @return list of ElementRecord - */ - List getElements(){ - int offset = _doc_start_offset + FIRST_ELMT_OFFSET; - ArrayList elements = new ArrayList(); - - while ( !isElementEmpty( offset ) ){ - int fieldSize = sizeCString( offset + 1 ); - int elementSize = getElementBSONSize( offset ); - String name = _input.getCString( ++offset ); - ElementRecord rec = new ElementRecord( name, offset ); - elements.add( rec ); - offset += ( fieldSize + elementSize ); - } - - return elements; - } - - public Map toMap(){ - throw new UnsupportedOperationException( "Not Supported" ); - } - - public Object removeField( String key ){ - throw new UnsupportedOperationException( "Object is read only" ); - } - - @Deprecated - public boolean containsKey( String s ){ - return containsField( s ); - } - - public boolean containsField( String s ){ - return keySet().contains( s ); - } - - /** - * - * @return the set of all keys in the document - */ - public Set keySet(){ - return new LazyBSONKeySet(); - } - - /** - * This method will be more efficient than using a combination of keySet() and get(String key) - * @return the set of entries (key, value) in the document - */ - public Set> entrySet(){ - return new LazyBSONEntrySet(); - } - - /** - * @deprecated This method is NOT a part of public API and will be dropped in 3.x versions. - */ - @Deprecated - protected boolean isElementEmpty( int offset ){ - return getElementType( offset ) == BSON.EOO; - } - - public boolean isEmpty(){ - return isElementEmpty( _doc_start_offset + FIRST_ELMT_OFFSET ); - } - - private int getBSONSize( final int offset ){ - return _input.getInt( offset ); - } - - public int getBSONSize(){ - return getBSONSize( _doc_start_offset ); - } - - public int pipe(OutputStream os) throws IOException { - os.write(_input.array(), _doc_start_offset, getBSONSize()); - return getBSONSize(); - } - - private String getElementFieldName( final int offset ){ - return _input.getCString( offset ); - } - - /** - * @deprecated This method is NOT a part of public API and will be dropped in 3.x versions. - */ - @Deprecated - protected byte getElementType( final int offset ){ - return _input.get( offset ); - } - - /** - * @deprecated This method is NOT a part of public API and will be dropped in 3.x versions. - */ - @Deprecated - protected int getElementBSONSize( int offset ){ - int x = 0; - byte type = getElementType( offset++ ); - int n = sizeCString( offset ); - int valueOffset = offset + n; - switch ( type ){ - case BSON.EOO: - case BSON.UNDEFINED: - case BSON.NULL: - case BSON.MAXKEY: - case BSON.MINKEY: - break; - case BSON.BOOLEAN: - x = 1; - break; - case BSON.NUMBER_INT: - x = 4; - break; - case BSON.TIMESTAMP: - case BSON.DATE: - case BSON.NUMBER_LONG: - case BSON.NUMBER: - x = 8; - break; - case BSON.OID: - x = 12; - break; - case BSON.SYMBOL: - case BSON.CODE: - case BSON.STRING: - x = _input.getInt( valueOffset ) + 4; - break; - case BSON.CODE_W_SCOPE: - x = _input.getInt( valueOffset ); - break; - case BSON.REF: - x = _input.getInt( valueOffset ) + 4 + 12; - break; - case BSON.OBJECT: - case BSON.ARRAY: - x = _input.getInt( valueOffset ); - break; - case BSON.BINARY: - x = _input.getInt( valueOffset ) + 4 + 1/*subtype*/; - break; - case BSON.REGEX: - // 2 cstrs - int part1 = sizeCString( valueOffset ); - int part2 = sizeCString( valueOffset + part1 ); - x = part1 + part2; - break; - default: - throw new BSONException( "Invalid type " + type + " for field " + getElementFieldName( offset ) ); - } - return x; - } - - - /** - * Returns the size of the BSON cstring at the given offset in the buffer - * @param offset the offset into the buffer - * @return the size of the BSON cstring, including the null terminator - * - * @deprecated This method is NOT a part of public API and will be dropped in 3.x versions. - */ - @Deprecated - protected int sizeCString( int offset ){ - int end = offset; - while ( true ){ - byte b = _input.get( end ); - if ( b == 0 ) - break; - else - end++; - } - return end - offset + 1; - } - - /** - * @deprecated This method is NOT a part of public API and will be dropped in 3.x versions. - */ - @Deprecated - protected Object getElementValue( ElementRecord record ){ - switch ( record.type ){ - case BSON.EOO: - case BSON.UNDEFINED: - case BSON.NULL: - return null; - case BSON.MAXKEY: - return new MaxKey(); - case BSON.MINKEY: - return new MinKey(); - case BSON.BOOLEAN: - return ( _input.get( record.valueOffset ) != 0 ); - case BSON.NUMBER_INT: - return _input.getInt( record.valueOffset ); - case BSON.TIMESTAMP: - int inc = _input.getInt( record.valueOffset ); - int time = _input.getInt( record.valueOffset + 4 ); - return new BSONTimestamp( time, inc ); - case BSON.DATE: - return new Date( _input.getLong( record.valueOffset ) ); - case BSON.NUMBER_LONG: - return _input.getLong( record.valueOffset ); - case BSON.NUMBER: - return Double.longBitsToDouble( _input.getLong( record.valueOffset ) ); - case BSON.OID: - return new ObjectId( _input.getIntBE( record.valueOffset ), - _input.getIntBE( record.valueOffset + 4 ), - _input.getIntBE( record.valueOffset + 8 ) ); - case BSON.SYMBOL: - return new Symbol( _input.getUTF8String( record.valueOffset ) ); - case BSON.CODE: - return new Code( _input.getUTF8String( record.valueOffset ) ); - case BSON.STRING: - return _input.getUTF8String( record.valueOffset ); - case BSON.CODE_W_SCOPE: - int strsize = _input.getInt( record.valueOffset + 4 ); - String code = _input.getUTF8String( record.valueOffset + 4 ); - BSONObject scope = - (BSONObject) _callback.createObject( _input.array(), record.valueOffset + 4 + 4 + strsize ); - return new CodeWScope( code, scope ); - case BSON.REF: - int csize = _input.getInt( record.valueOffset ); - String ns = _input.getCString( record.valueOffset + 4 ); - int oidOffset = record.valueOffset + csize + 4; - ObjectId oid = new ObjectId( _input.getIntBE( oidOffset ), - _input.getIntBE( oidOffset + 4 ), - _input.getIntBE( oidOffset + 8 ) ); - return _callback.createDBRef( ns, oid ); - case BSON.OBJECT: - return _callback.createObject( _input.array(), record.valueOffset ); - case BSON.ARRAY: - return _callback.createArray( _input.array(), record.valueOffset ); - case BSON.BINARY: - return readBinary( record.valueOffset ); - case BSON.REGEX: - int patternCStringSize = sizeCString( record.valueOffset ); - String pattern = _input.getCString( record.valueOffset ); - String flags = _input.getCString( record.valueOffset + patternCStringSize + 1 ); - return Pattern.compile( pattern, BSON.regexFlags( flags ) ); - default: - throw new BSONException( - "Invalid type " + record.type + " for field " + getElementFieldName( record.offset ) ); - } - } - - private Object readBinary( int valueOffset ){ - final int totalLen = _input.getInt( valueOffset ); - valueOffset += 4; - final byte bType = _input.get( valueOffset ); - valueOffset += 1; - - byte[] bin; - switch ( bType ){ - case BSON.B_GENERAL:{ - bin = new byte[totalLen]; - for ( int n = 0; n < totalLen; n++ ){ - bin[n] = _input.get( valueOffset + n ); - } - return bin; - } - case BSON.B_BINARY: - final int len = _input.getInt( valueOffset ); - if ( len + 4 != totalLen ) - throw new IllegalArgumentException( - "Bad Data Size; Binary Subtype 2. { actual len: " + len + " expected totalLen: " + totalLen - + "}" ); - valueOffset += 4; - bin = new byte[len]; - for ( int n = 0; n < len; n++ ){ - bin[n] = _input.get( valueOffset + n ); - } - return bin; - case BSON.B_UUID: - if ( totalLen != 16 ) - throw new IllegalArgumentException( - "Bad Data Size; Binary Subtype 3 (UUID). { total length: " + totalLen + " != 16" ); - - long part1 = _input.getLong( valueOffset ); - valueOffset += 8; - long part2 = _input.getLong( valueOffset ); - return new UUID( part1, part2 ); - } - - bin = new byte[totalLen]; - for ( int n = 0; n < totalLen; n++ ){ - bin[n] = _input.get( valueOffset + n ); - } - return bin; - } - - protected int getOffset(){ - return _doc_start_offset; - } - - @Override - public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - - LazyBSONObject that = (LazyBSONObject) o; - - return Arrays.equals(this._input.array(), that._input.array()); - } - - @Override - public int hashCode() { - return Arrays.hashCode(_input.array()); - } - - /** - * Returns a JSON serialization of this object - * - * @return JSON serialization - */ - public String toString(){ - return com.mongodb.util.JSON.serialize( this ); - } - - /** - * In a "normal" (aka not embedded) doc, this will be the offset of the first element. - * - * In an embedded doc because we use ByteBuffers to avoid unecessary copying the offset must be manually set in - * _doc_start_offset - */ - final static int FIRST_ELMT_OFFSET = 4; - - /** - * @deprecated Please use {@link #getOffset()} instead. - */ - @Deprecated - protected final int _doc_start_offset; - - /** - * @deprecated This field is NOT a part of public API and will be dropped in 3.x versions. - */ - @Deprecated - protected final BSONByteBuffer _input; // TODO - Guard this with synchronicity? - // callback is kept to create sub-objects on the fly - - /** - * @deprecated This field is NOT a part of public API and will be dropped in 3.x versions. - */ - @Deprecated - protected final LazyBSONCallback _callback; - private static final Logger log = Logger.getLogger( "org.bson.LazyBSONObject" ); -} diff --git a/src/main/org/bson/LazyDBList.java b/src/main/org/bson/LazyDBList.java deleted file mode 100644 index 4bb071e7b28..00000000000 --- a/src/main/org/bson/LazyDBList.java +++ /dev/null @@ -1,42 +0,0 @@ -/** - * - */ -package org.bson; - -import org.bson.io.BSONByteBuffer; - -import com.mongodb.DBObject; -import com.mongodb.util.JSON; - -/** - * @author scotthernandez - * @deprecated Please use {@link com.mongodb.LazyDBList} instead. - */ -@Deprecated -public class LazyDBList extends LazyBSONList implements DBObject { - private static final long serialVersionUID = -4415279469780082174L; - - public LazyDBList(byte[] data, LazyBSONCallback callback) { super(data, callback); } - public LazyDBList(byte[] data, int offset, LazyBSONCallback callback) { super(data, offset, callback); } - public LazyDBList(BSONByteBuffer buffer, LazyBSONCallback callback) { super(buffer, callback); } - public LazyDBList(BSONByteBuffer buffer, int offset, LazyBSONCallback callback) { super(buffer, offset, callback); } - - /** - * Returns a JSON serialization of this object - * @return JSON serialization - */ - @Override - public String toString(){ - return JSON.serialize( this ); - } - - public boolean isPartialObject(){ - return _isPartialObject; - } - - public void markAsPartialObject(){ - _isPartialObject = true; - } - - private boolean _isPartialObject; -} diff --git a/src/main/org/bson/NewBSONDecoder.java b/src/main/org/bson/NewBSONDecoder.java deleted file mode 100644 index f9f84c7e662..00000000000 --- a/src/main/org/bson/NewBSONDecoder.java +++ /dev/null @@ -1,306 +0,0 @@ -/** - * Copyright (C) 2012 10gen Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.bson; - -import org.bson.io.Bits; -import org.bson.types.ObjectId; -import static org.bson.BSON.*; - -// Java -import java.io.IOException; -import java.io.InputStream; -import java.io.DataInputStream; -import java.io.UnsupportedEncodingException; - -/** - * A new implementation of the bson decoder. - * - * @deprecated This class is NOT a part of public API and will be dropped in 3.x versions. - */ -@Deprecated -public class NewBSONDecoder implements BSONDecoder { - - @Override - public BSONObject readObject(final byte [] pData) { - _length = pData.length; - final BasicBSONCallback c = new BasicBSONCallback(); - decode(pData, c); - return (BSONObject)c.get(); - } - - @Override - public BSONObject readObject(final InputStream pIn) throws IOException { - // Slurp in the data and convert to a byte array. - _length = Bits.readInt(pIn); - - if (_data == null || _data.length < _length) { - _data = new byte[_length]; - } - - (new DataInputStream(pIn)).readFully(_data, 4, (_length - 4)); - - return readObject(_data); - } - - @Override - public int decode(final byte [] pData, final BSONCallback pCallback) { - _data = pData; - _pos = 4; - _callback = pCallback; - _decode(); - return _length; - } - - @Override - public int decode(final InputStream pIn, final BSONCallback pCallback) throws IOException { - _length = Bits.readInt(pIn); - - if (_data == null || _data.length < _length) { - _data = new byte[_length]; - } - - (new DataInputStream(pIn)).readFully(_data, 4, (_length - 4)); - - return decode(_data, pCallback); - } - - private final void _decode() { - _callback.objectStart(); - while (decodeElement()); - _callback.objectDone(); - } - - private final String readCstr() { - int length = 0; - final int offset = _pos; - - while (_data[_pos++] != 0) length++; - - try { - return new String(_data, offset, length, DEFAULT_ENCODING); - } catch (final UnsupportedEncodingException uee) { - return new String(_data, offset, length); - } - } - - private final String readUtf8Str() { - final int length = Bits.readInt(_data, _pos); - _pos += 4; - - if (length <= 0 || length > MAX_STRING) throw new BSONException("String invalid - corruption"); - - try { - final String str = new String(_data, _pos, (length - 1), DEFAULT_ENCODING); - _pos += length; - return str; - - } catch (final UnsupportedEncodingException uee) { - throw new BSONException("What is in the db", uee); - } - } - - private final Object _readBasicObject() { - _pos += 4; - - final BSONCallback save = _callback; - final BSONCallback _basic = _callback.createBSONCallback(); - _callback = _basic; - _basic.reset(); - _basic.objectStart(false); - - while( decodeElement() ); - _callback = save; - return _basic.get(); - } - - private final void _binary(final String pName) { - - final int totalLen = Bits.readInt(_data, _pos); - _pos += 4; - - final byte bType = _data[_pos]; - _pos += 1; - - switch ( bType ){ - case B_GENERAL: { - final byte [] data = new byte[totalLen]; - - System.arraycopy(_data, _pos, data, 0, totalLen); - _pos += totalLen; - - _callback.gotBinary(pName, bType, data); - return; - } - - case B_BINARY: { - final int len = Bits.readInt(_data, _pos); - _pos += 4; - - if ( len + 4 != totalLen ) - throw new IllegalArgumentException( "bad data size subtype 2 len: " + len + " totalLen: " + totalLen ); - - final byte [] data = new byte[len]; - System.arraycopy(_data, _pos, data, 0, len); - _pos += len; - _callback.gotBinary(pName, bType, data); - return; - } - - case B_UUID: { - if ( totalLen != 16 ) - throw new IllegalArgumentException( "bad data size subtype 3 len: " + totalLen + " != 16"); - - final long part1 = Bits.readLong(_data, _pos); - _pos += 8; - - final long part2 = Bits.readLong(_data, _pos); - _pos += 8; - - _callback.gotUUID(pName, part1, part2); - return; - } - } - - final byte [] data = new byte[totalLen]; - System.arraycopy(_data, _pos, data, 0, totalLen); - _pos += totalLen; - - _callback.gotBinary(pName, bType, data); - } - - private final boolean decodeElement() { - - final byte type = _data[_pos]; - _pos += 1; - - if (type == EOO) return false; - - final String name = readCstr(); - - switch (type) { - case NULL: { _callback.gotNull(name); return true; } - - case UNDEFINED: { _callback.gotUndefined(name); return true; } - - case BOOLEAN: { _callback.gotBoolean(name, (_data[_pos] > 0)); _pos += 1; return true; } - - case NUMBER: { _callback.gotDouble(name, Double.longBitsToDouble(Bits.readLong(_data, _pos))); _pos += 8; return true; } - - case NUMBER_INT: { _callback.gotInt(name, Bits.readInt(_data, _pos)); _pos += 4; return true; } - - case NUMBER_LONG: { - _callback.gotLong(name, Bits.readLong(_data, _pos)); - _pos += 8; - return true; - } - - case SYMBOL: { _callback.gotSymbol(name, readUtf8Str()); return true; } - case STRING: { _callback.gotString(name, readUtf8Str()); return true; } - - case OID: { - // OID is stored as big endian - - final int p1 = Bits.readIntBE(_data, _pos); - _pos += 4; - - final int p2 = Bits.readIntBE(_data, _pos); - _pos += 4; - - final int p3 = Bits.readIntBE(_data, _pos); - _pos += 4; - - _callback.gotObjectId(name , new ObjectId(p1, p2, p3)); - return true; - } - - case REF: { - _pos += 4; - - final String ns = readCstr(); - - final int p1 = Bits.readInt(_data, _pos); - _pos += 4; - - final int p2 = Bits.readInt(_data, _pos); - _pos += 4; - - final int p3 = Bits.readInt(_data, _pos); - _pos += 4; - - _callback.gotDBRef(name , ns, new ObjectId(p1, p2, p3)); - - return true; - } - - case DATE: { _callback.gotDate(name , Bits.readLong(_data, _pos)); _pos += 8; return true; } - - - case REGEX: { - _callback.gotRegex(name, readCstr(), readCstr()); - return true; - } - - case BINARY: { _binary(name); return true; } - - case CODE: { _callback.gotCode(name, readUtf8Str()); return true; } - - case CODE_W_SCOPE: { - _pos += 4; - _callback.gotCodeWScope(name, readUtf8Str(), _readBasicObject()); - return true; - } - - case ARRAY: - _pos += 4; - _callback.arrayStart(name); - while (decodeElement()); - _callback.arrayDone(); - return true; - - case OBJECT: - _pos += 4; - _callback.objectStart(name); - while (decodeElement()); - _callback.objectDone(); - return true; - - case TIMESTAMP: - int i = Bits.readInt(_data, _pos); - _pos += 4; - - int time = Bits.readInt(_data, _pos); - _pos += 4; - - _callback.gotTimestamp(name, time, i); - return true; - - case MINKEY: _callback.gotMinKey(name); return true; - case MAXKEY: _callback.gotMaxKey(name); return true; - - default: throw new UnsupportedOperationException( "BSONDecoder doesn't understand type : " + type + " name: " + name ); - } - } - - private static final int MAX_STRING = ( 32 * 1024 * 1024 ); - private static final String DEFAULT_ENCODING = "UTF-8"; - - private byte [] _data; - private int _length; - private int _pos = 0; - private BSONCallback _callback; -} - diff --git a/src/main/org/bson/Transformer.java b/src/main/org/bson/Transformer.java deleted file mode 100644 index d534dbf212f..00000000000 --- a/src/main/org/bson/Transformer.java +++ /dev/null @@ -1,27 +0,0 @@ -// Transformer.java - -/** - * Copyright (C) 2008 10gen Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.bson; - -public interface Transformer { - - /** - * @return the new object. return passed in object if no change - */ - public Object transform( Object o ); -} diff --git a/src/main/org/bson/io/BSONByteBuffer.java b/src/main/org/bson/io/BSONByteBuffer.java deleted file mode 100644 index 6df6f5c29ad..00000000000 --- a/src/main/org/bson/io/BSONByteBuffer.java +++ /dev/null @@ -1,144 +0,0 @@ -/** - * Copyright (C) 2008-2011 10gen Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.bson.io; - -import org.bson.*; - -import java.io.*; -import java.nio.*; -import java.util.logging.*; - -/** - * Pseudo byte buffer, delegates as it is too hard to properly override / extend the ByteBuffer API - * - * @author brendan - */ -public class BSONByteBuffer { - - private BSONByteBuffer( ByteBuffer buf ){ - this.buf = buf; - buf.order( ByteOrder.LITTLE_ENDIAN ); - } - - public static BSONByteBuffer wrap( byte[] bytes, int offset, int length ){ - return new BSONByteBuffer( ByteBuffer.wrap( bytes, offset, length ) ); - } - - public static BSONByteBuffer wrap( byte[] bytes ){ - return new BSONByteBuffer( ByteBuffer.wrap( bytes ) ); - } - - public byte get( int i ){ - return buf.get(i); - } - - public ByteBuffer get( byte[] bytes, int offset, int length ){ - return buf.get(bytes, offset, length); - } - - public ByteBuffer get( byte[] bytes ){ - return buf.get(bytes); - } - - public byte[] array(){ - return buf.array(); - } - - public String toString(){ - return buf.toString(); - } - - public int hashCode(){ - return buf.hashCode(); - } - - @Override - public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - - BSONByteBuffer that = (BSONByteBuffer) o; - - if (buf != null ? !buf.equals(that.buf) : that.buf != null) return false; - - return true; - } - - /** - * Gets a Little Endian Integer - * - * @param i Index to read from - * - * @return - */ - public int getInt( int i ){ - return getIntLE( i ); - } - - public int getIntLE( int i ){ - int x = 0; - x |= ( 0xFF & buf.get( i + 0 ) ) << 0; - x |= ( 0xFF & buf.get( i + 1 ) ) << 8; - x |= ( 0xFF & buf.get( i + 2 ) ) << 16; - x |= ( 0xFF & buf.get( i + 3 ) ) << 24; - return x; - } - - public int getIntBE( int i ){ - int x = 0; - x |= ( 0xFF & buf.get( i + 0 ) ) << 24; - x |= ( 0xFF & buf.get( i + 1 ) ) << 16; - x |= ( 0xFF & buf.get( i + 2 ) ) << 8; - x |= ( 0xFF & buf.get( i + 3 ) ) << 0; - return x; - } - - public long getLong( int i ){ - return buf.getLong( i ); - } - - public String getCString(int offset) { - int end = offset; - while (get(end) != 0) { - ++end; - } - int len = end - offset; - return new String(array(), offset, len); - } - - public String getUTF8String(int valueOffset) { - int size = getInt(valueOffset) - 1; - try { - return new String(array(), valueOffset + 4, size, "UTF-8"); - } catch (UnsupportedEncodingException e) { - throw new BSONException( "Cannot decode string as UTF-8." ); - } - } - - public Buffer position( int i ){ - return buf.position(i); - } - - public Buffer reset(){ - return buf.reset(); - } - - public int size(){ - return getInt( 0 ); - } - - protected ByteBuffer buf; -} diff --git a/src/main/org/bson/io/BSONInput.java b/src/main/org/bson/io/BSONInput.java deleted file mode 100644 index bb906421fe4..00000000000 --- a/src/main/org/bson/io/BSONInput.java +++ /dev/null @@ -1,23 +0,0 @@ -/** - * Copyright (C) 2008 10gen Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.bson.io; - -import org.bson.BSONException; -import org.bson.BasicBSONDecoder; - -import java.io.IOException; -import java.io.InputStream; - diff --git a/src/main/org/bson/io/BasicOutputBuffer.java b/src/main/org/bson/io/BasicOutputBuffer.java deleted file mode 100644 index 59f1c87d67a..00000000000 --- a/src/main/org/bson/io/BasicOutputBuffer.java +++ /dev/null @@ -1,143 +0,0 @@ -// BasicOutputBuffer.java - -/** - * Copyright (C) 2008 10gen Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.bson.io; - -import java.io.*; - -public class BasicOutputBuffer extends OutputBuffer { - - @Override - public void write(byte[] b){ - write( b , 0 , b.length ); - } - - @Override - public void write(byte[] b, int off, int len){ - _ensure( len ); - System.arraycopy( b , off , _buffer , _cur , len ); - _cur += len; - _size = Math.max( _cur , _size ); - } - @Override - public void write(int b){ - _ensure(1); - _buffer[_cur++] = (byte)(0xFF&b); - _size = Math.max( _cur , _size ); - } - - @Override - public int getPosition(){ - return _cur; - } - - /** - * @deprecated This method is NOT a part of public API and will be dropped in 3.x versions. - */ - @Override - @Deprecated - public void setPosition( int position ){ - _cur = position; - } - - /** - * @deprecated This method is NOT a part of public API and will be dropped in 3.x versions. - */ - @Override - @Deprecated - public void seekEnd(){ - _cur = _size; - } - - /** - * @deprecated This method is NOT a part of public API and will be dropped in 3.x versions. - */ - @Deprecated - @Override - public void seekStart(){ - _cur = 0; - } - - /** - * @return size of data so far - */ - @Override - public int size(){ - return _size; - } - - /** - * @return bytes written - */ - @Override - public int pipe( OutputStream out ) - throws IOException { - out.write( _buffer , 0 , _size ); - return _size; - } - - /** - * @return bytes written - * @deprecated This method is NOT a part of public API and will be dropped in 3.x versions. - */ - @Deprecated - public int pipe( DataOutput out ) - throws IOException { - out.write( _buffer , 0 , _size ); - return _size; - } - - - void _ensure( int more ){ - final int need = _cur + more; - if ( need < _buffer.length ) - return; - - int newSize = _buffer.length*2; - if ( newSize <= need ) - newSize = need + 128; - - byte[] n = new byte[newSize]; - System.arraycopy( _buffer , 0 , n , 0 , _size ); - _buffer = n; - } - - /** - * @deprecated This method is NOT a part of public API and will be dropped in 3.x versions. - */ - @Override - @Deprecated - public String asString(){ - return new String( _buffer , 0 , _size ); - } - - /** - * @deprecated This method is NOT a part of public API and will be dropped in 3.x versions. - */ - @Override - @Deprecated - public String asString( String encoding ) - throws UnsupportedEncodingException { - return new String( _buffer , 0 , _size , encoding ); - } - - - private int _cur; - private int _size; - private byte[] _buffer = new byte[512]; -} diff --git a/src/main/org/bson/io/Bits.java b/src/main/org/bson/io/Bits.java deleted file mode 100644 index c15fbaf06b6..00000000000 --- a/src/main/org/bson/io/Bits.java +++ /dev/null @@ -1,115 +0,0 @@ -// Bits.java - -/** - * Copyright (C) 2008 10gen Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - - -package org.bson.io; - -import java.io.*; - -public class Bits { - - public static void readFully( InputStream in, byte[] b ) - throws IOException { - readFully( in , b , b.length ); - } - - public static void readFully( InputStream in, byte[] b, int length ) - throws IOException { - readFully(in, b, 0, length); - } - - public static void readFully( InputStream in, byte[] b, int startOffset, int length ) - throws IOException { - - if (b.length < length + startOffset) { - throw new IllegalArgumentException("Buffer is too small"); - } - - int offset = startOffset; - int toRead = length; - while ( toRead > 0 ){ - int bytesRead = in.read( b, offset , toRead ); - if ( bytesRead < 0 ) - throw new EOFException(); - toRead -= bytesRead; - offset += bytesRead; - } - } - - public static int readInt( InputStream in ) - throws IOException { - return readInt( in , new byte[4] ); - } - - public static int readInt( InputStream in , byte[] data ) - throws IOException { - readFully(in, data, 4); - return readInt(data); - } - - public static int readInt( byte[] data ) { - return readInt( data , 0 ); - } - - public static int readInt( byte[] data , int offset ) { - int x = 0; - x |= ( 0xFF & data[offset+0] ) << 0; - x |= ( 0xFF & data[offset+1] ) << 8; - x |= ( 0xFF & data[offset+2] ) << 16; - x |= ( 0xFF & data[offset+3] ) << 24; - return x; - } - - public static int readIntBE( byte[] data , int offset ) { - int x = 0; - x |= ( 0xFF & data[offset+0] ) << 24; - x |= ( 0xFF & data[offset+1] ) << 16; - x |= ( 0xFF & data[offset+2] ) << 8; - x |= ( 0xFF & data[offset+3] ) << 0; - return x; - } - - public static long readLong( InputStream in ) - throws IOException { - return readLong( in , new byte[8] ); - } - - - public static long readLong( InputStream in , byte[] data ) - throws IOException { - readFully(in, data, 8); - return readLong(data); - } - - public static long readLong( byte[] data ) { - return readLong( data , 0 ); - } - - public static long readLong( byte[] data , int offset ) { - long x = 0; - x |= ( 0xFFL & data[offset+0] ) << 0; - x |= ( 0xFFL & data[offset+1] ) << 8; - x |= ( 0xFFL & data[offset+2] ) << 16; - x |= ( 0xFFL & data[offset+3] ) << 24; - x |= ( 0xFFL & data[offset+4] ) << 32; - x |= ( 0xFFL & data[offset+5] ) << 40; - x |= ( 0xFFL & data[offset+6] ) << 48; - x |= ( 0xFFL & data[offset+7] ) << 56; - return x; - } -} diff --git a/src/main/org/bson/io/OutputBuffer.java b/src/main/org/bson/io/OutputBuffer.java deleted file mode 100644 index 94d6023f442..00000000000 --- a/src/main/org/bson/io/OutputBuffer.java +++ /dev/null @@ -1,245 +0,0 @@ -// OutputBuffer.java - -/** - * Copyright (C) 2008 10gen Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.bson.io; - -import org.bson.BSONException; - -import java.io.*; -import java.security.*; - -public abstract class OutputBuffer extends OutputStream { - - public abstract void write(byte[] b); - public abstract void write(byte[] b, int off, int len); - public abstract void write(int b); - - public abstract int getPosition(); - - /** - * @deprecated This method is NOT a part of public API and will be dropped in 3.x versions. - */ - @Deprecated - public abstract void setPosition( int position ); - - /** - * @deprecated This method is NOT a part of public API and will be dropped in 3.x versions. - */ - @Deprecated - public abstract void seekEnd(); - - /** - * @deprecated This method is NOT a part of public API and will be dropped in 3.x versions. - */ - @Deprecated - public abstract void seekStart(); - - /** - * @return size of data so far - */ - public abstract int size(); - - /** - * @return bytes written - */ - public abstract int pipe( OutputStream out ) - throws IOException; - - /** - * mostly for testing - */ - public byte [] toByteArray(){ - try { - final ByteArrayOutputStream bout = new ByteArrayOutputStream( size() ); - pipe( bout ); - return bout.toByteArray(); - } - catch ( IOException ioe ){ - throw new RuntimeException( "should be impossible" , ioe ); - } - } - - /** - * @deprecated This method is NOT a part of public API and will be dropped in 3.x versions. - */ - @Deprecated - public String asString(){ - return new String( toByteArray() ); - } - - /** - * @deprecated This method is NOT a part of public API and will be dropped in 3.x versions. - */ - @Deprecated - public String asString( String encoding ) - throws UnsupportedEncodingException { - return new String( toByteArray() , encoding ); - } - - /** - * @deprecated This method is NOT a part of public API and will be dropped in 3.x versions. - */ - @Deprecated - public String hex(){ - final StringBuilder buf = new StringBuilder(); - try { - pipe( new OutputStream(){ - public void write( int b ){ - String s = Integer.toHexString(0xff & b); - - if (s.length() < 2) - buf.append("0"); - buf.append(s); - } - } - ); - } - catch ( IOException ioe ){ - throw new RuntimeException( "impossible" ); - } - return buf.toString(); - } - - - /** - * @deprecated This method is NOT a part of public API and will be dropped in 3.x versions. - */ - @Deprecated - public String md5(){ - final MessageDigest md5 ; - try { - md5 = MessageDigest.getInstance("MD5"); - } - catch (NoSuchAlgorithmException e) { - throw new RuntimeException("Error - this implementation of Java doesn't support MD5."); - } - md5.reset(); - - try { - pipe( new OutputStream(){ - public void write( byte[] b , int off , int len ){ - md5.update( b , off , len ); - } - - public void write( int b ){ - md5.update( (byte)(b&0xFF) ); - } - } - ); - } - catch ( IOException ioe ){ - throw new RuntimeException( "impossible" ); - } - - return com.mongodb.util.Util.toHex( md5.digest() ); - } - - public void writeInt( int x ){ - write( x >> 0 ); - write( x >> 8 ); - write( x >> 16 ); - write( x >> 24 ); - } - - /** - * @deprecated This method is NOT a part of public API and will be dropped in 3.x versions. - */ - @Deprecated - public void writeIntBE( int x ){ - write( x >> 24 ); - write( x >> 16 ); - write( x >> 8 ); - write( x ); - } - - /** - * @deprecated This method is NOT a part of public API and will be dropped in 3.x versions. - */ - @Deprecated - public void writeInt( int pos , int x ){ - final int save = getPosition(); - setPosition( pos ); - writeInt( x ); - setPosition( save ); - } - - public void writeLong( long x ){ - write( (byte)(0xFFL & ( x >> 0 ) ) ); - write( (byte)(0xFFL & ( x >> 8 ) ) ); - write( (byte)(0xFFL & ( x >> 16 ) ) ); - write( (byte)(0xFFL & ( x >> 24 ) ) ); - write( (byte)(0xFFL & ( x >> 32 ) ) ); - write( (byte)(0xFFL & ( x >> 40 ) ) ); - write( (byte)(0xFFL & ( x >> 48 ) ) ); - write( (byte)(0xFFL & ( x >> 56 ) ) ); - } - - public void writeDouble( double x ){ - writeLong( Double.doubleToRawLongBits( x ) ); - } - - /** - * Writes C string (null-terminated string) to underlying buffer. - * - * @param str the string - * @return number of bytes written - */ - public int writeCString(final String str) { - - final int len = str.length(); - int total = 0; - - for (int i = 0; i < len;/*i gets incremented*/) { - final int c = Character.codePointAt(str, i); - - if (c == 0x0) { - throw new BSONException( - String.format("BSON cstring '%s' is not valid because it contains a null character at index %d", str, i)); - } - if (c < 0x80) { - write((byte) c); - total += 1; - } else if (c < 0x800) { - write((byte) (0xc0 + (c >> 6))); - write((byte) (0x80 + (c & 0x3f))); - total += 2; - } else if (c < 0x10000) { - write((byte) (0xe0 + (c >> 12))); - write((byte) (0x80 + ((c >> 6) & 0x3f))); - write((byte) (0x80 + (c & 0x3f))); - total += 3; - } else { - write((byte) (0xf0 + (c >> 18))); - write((byte) (0x80 + ((c >> 12) & 0x3f))); - write((byte) (0x80 + ((c >> 6) & 0x3f))); - write((byte) (0x80 + (c & 0x3f))); - total += 4; - } - - i += Character.charCount(c); - } - - write((byte) 0); - total++; - return total; - } - - public String toString(){ - return getClass().getName() + " size: " + size() + " pos: " + getPosition() ; - } -} diff --git a/src/main/org/bson/io/PoolOutputBuffer.java b/src/main/org/bson/io/PoolOutputBuffer.java deleted file mode 100644 index 451f006e5fb..00000000000 --- a/src/main/org/bson/io/PoolOutputBuffer.java +++ /dev/null @@ -1,262 +0,0 @@ -// PoolOutputBuffer.java - -/** - * Copyright (C) 2008 10gen Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.bson.io; - -import org.bson.*; -import org.bson.io.*; -import org.bson.util.*; - -import java.io.*; -import java.util.*; - -/** - * @deprecated This class is NOT a part of public API and will be dropped in 3.x versions. - */ -@Deprecated -public class PoolOutputBuffer extends OutputBuffer { - - public static final int BUF_SIZE = 1024 * 16; - - public PoolOutputBuffer(){ - reset(); - } - - public void reset(){ - _cur.reset(); - _end.reset(); - - for ( int i=0; i<_fromPool.size(); i++ ) - _extra.done( _fromPool.get(i) ); - _fromPool.clear(); - } - - public int getPosition(){ - return _cur.pos(); - } - - /** - * @deprecated This method is NOT a part of public API and will be dropped in 3.x versions. - */ - @Deprecated - public void setPosition( int position ){ - _cur.reset( position ); - } - - /** - * @deprecated This method is NOT a part of public API and will be dropped in 3.x versions. - */ - @Deprecated - public void seekEnd(){ - _cur.reset( _end ); - } - - /** - * @deprecated This method is NOT a part of public API and will be dropped in 3.x versions. - */ - @Deprecated - public void seekStart(){ - _cur.reset(); - } - - - public int size(){ - return _end.pos(); - } - - public void write(byte[] b){ - write( b , 0 , b.length ); - } - - public void write(byte[] b, int off, int len){ - while ( len > 0 ){ - byte[] bs = _cur(); - int space = Math.min( bs.length - _cur.y , len ); - System.arraycopy( b , off , bs , _cur.y , space ); - _cur.inc( space ); - len -= space; - off += space; - _afterWrite(); - } - } - - public void write(int b){ - byte[] bs = _cur(); - bs[_cur.getAndInc()] = (byte)(b&0xFF); - _afterWrite(); - } - - void _afterWrite(){ - - if ( _cur.pos() < _end.pos() ){ - // we're in the middle of the total space - // just need to make sure we're not at the end of a buffer - if ( _cur.y == BUF_SIZE ) - _cur.nextBuffer(); - return; - } - - _end.reset( _cur ); - - if ( _end.y < BUF_SIZE ) - return; - - _fromPool.add( _extra.get() ); - _end.nextBuffer(); - _cur.reset( _end ); - } - - byte[] _cur(){ - return _get( _cur.x ); - } - - byte[] _get( int z ){ - if ( z < 0 ) - return _mine; - return _fromPool.get(z); - } - - public int pipe( final OutputStream out ) - throws IOException { - - if ( out == null ) - throw new NullPointerException( "out is null" ); - - int total = 0; - - for ( int i=-1; i<_fromPool.size(); i++ ){ - final byte[] b = _get( i ); - final int amt = _end.len( i ); - out.write( b , 0 , amt ); - total += amt; - } - - return total; - } - - static class Position { - Position(){ - reset(); - } - - void reset(){ - x = -1; - y = 0; - } - - void reset( Position other ){ - x = other.x; - y = other.y; - } - - void reset( int pos ){ - x = ( pos / BUF_SIZE ) - 1; - y = pos % BUF_SIZE; - } - - int pos(){ - return ( ( x + 1 ) * BUF_SIZE ) + y; - } - - int getAndInc(){ - return y++; - } - - void inc( int amt ){ - y += amt; - if ( y > BUF_SIZE ) - throw new IllegalArgumentException( "something is wrong" ); - } - - void nextBuffer(){ - if ( y != BUF_SIZE ) - throw new IllegalArgumentException( "broken" ); - x++; - y = 0; - } - - int len( int which ){ - if ( which < x ) - return BUF_SIZE; - return y; - } - - public String toString(){ - return x + "," + y; - } - - int x; // which buffer -1 == _mine - int y; // position in buffer - } - - public String asAscii(){ - if ( _fromPool.size() > 0 ) - return super.asString(); - - final int m = size(); - final char c[] = m < _chars.length ? _chars : new char[m]; - - for ( int i=0; i 0 ) - return super.asString( encoding ); - - if ( encoding.equals( DEFAULT_ENCODING_1 ) || encoding.equals( DEFAULT_ENCODING_2) ){ - try { - return _encoding.decode( _mine , 0 , size() ); - } - catch ( IOException ioe ){ - // we failed, fall back - } - } - return new String( _mine , 0 , size() , encoding ); - } - - - final byte[] _mine = new byte[BUF_SIZE]; - final char[] _chars = new char[BUF_SIZE]; - final List _fromPool = new ArrayList(); - final UTF8Encoding _encoding = new UTF8Encoding(); - - private static final String DEFAULT_ENCODING_1 = "UTF-8"; - private static final String DEFAULT_ENCODING_2 = "UTF8"; - - private final Position _cur = new Position(); - private final Position _end = new Position(); - - private static org.bson.util.SimplePool _extra = - new org.bson.util.SimplePool( ( 1024 * 1024 * 10 ) / BUF_SIZE ){ - - protected byte[] createNew(){ - return new byte[BUF_SIZE]; - } - - }; -} diff --git a/src/main/org/bson/io/UTF8Encoding.java b/src/main/org/bson/io/UTF8Encoding.java deleted file mode 100644 index 3bccbe63922..00000000000 --- a/src/main/org/bson/io/UTF8Encoding.java +++ /dev/null @@ -1,202 +0,0 @@ -// UTF8Encoding.java - - -/** - * from postgresql jdbc driver: - * postgresql-jdbc-9.0-801.src - - -Copyright (c) 1997-2008, PostgreSQL Global Development Group -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - -1. Redistributions of source code must retain the above copyright notice, - this list of conditions and the following disclaimer. -2. Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. -3. Neither the name of the PostgreSQL Global Development Group nor the names - of its contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE -LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -POSSIBILITY OF SUCH DAMAGE. - - */ - -/*------------------------------------------------------------------------- -* -* Copyright (c) 2003-2008, PostgreSQL Global Development Group -* -* IDENTIFICATION -* -* -*------------------------------------------------------------------------- -*/ - -//package org.postgresql.core; -package org.bson.io; - -import java.io.IOException; -import java.text.MessageFormat; - -class UTF8Encoding { - - private static final int MIN_2_BYTES = 0x80; - private static final int MIN_3_BYTES = 0x800; - private static final int MIN_4_BYTES = 0x10000; - private static final int MAX_CODE_POINT = 0x10ffff; - - private char[] decoderArray = new char[1024]; - - // helper for decode - private final static void checkByte(int ch, int pos, int len) throws IOException { - if ((ch & 0xc0) != 0x80) - throw new IOException(MessageFormat.format("Illegal UTF-8 sequence: byte {0} of {1} byte sequence is not 10xxxxxx: {2}", - new Object[] { new Integer(pos), new Integer(len), new Integer(ch) })); - } - - private final static void checkMinimal(int ch, int minValue) throws IOException { - if (ch >= minValue) - return; - - int actualLen; - switch (minValue) { - case MIN_2_BYTES: - actualLen = 2; - break; - case MIN_3_BYTES: - actualLen = 3; - break; - case MIN_4_BYTES: - actualLen = 4; - break; - default: - throw new IllegalArgumentException("unexpected minValue passed to checkMinimal: " + minValue); - } - - int expectedLen; - if (ch < MIN_2_BYTES) - expectedLen = 1; - else if (ch < MIN_3_BYTES) - expectedLen = 2; - else if (ch < MIN_4_BYTES) - expectedLen = 3; - else - throw new IllegalArgumentException("unexpected ch passed to checkMinimal: " + ch); - - throw new IOException(MessageFormat.format("Illegal UTF-8 sequence: {0} bytes used to encode a {1} byte value: {2}", - new Object[] { new Integer(actualLen), new Integer(expectedLen), new Integer(ch) })); - } - - /** - * Custom byte[] -> String conversion routine for UTF-8 only. - * This is about twice as fast as using the String(byte[],int,int,String) - * ctor, at least under JDK 1.4.2. The extra checks for illegal representations - * add about 10-15% overhead, but they seem worth it given the number of SQL_ASCII - * databases out there. - * - * @param data the array containing UTF8-encoded data - * @param offset the offset of the first byte in data to decode from - * @param length the number of bytes to decode - * @return a decoded string - * @throws IOException if something goes wrong - */ - public synchronized String decode(byte[] data, int offset, int length) throws IOException { - char[] cdata = decoderArray; - if (cdata.length < length) - cdata = decoderArray = new char[length]; - - int in = offset; - int out = 0; - int end = length + offset; - - try - { - while (in < end) - { - int ch = data[in++] & 0xff; - - // Convert UTF-8 to 21-bit codepoint. - if (ch < 0x80) { - // 0xxxxxxx -- length 1. - } else if (ch < 0xc0) { - // 10xxxxxx -- illegal! - throw new IOException(MessageFormat.format("Illegal UTF-8 sequence: initial byte is {0}: {1}", - new Object[] { "10xxxxxx", new Integer(ch) })); - } else if (ch < 0xe0) { - // 110xxxxx 10xxxxxx - ch = ((ch & 0x1f) << 6); - checkByte(data[in], 2, 2); - ch = ch | (data[in++] & 0x3f); - checkMinimal(ch, MIN_2_BYTES); - } else if (ch < 0xf0) { - // 1110xxxx 10xxxxxx 10xxxxxx - ch = ((ch & 0x0f) << 12); - checkByte(data[in], 2, 3); - ch = ch | ((data[in++] & 0x3f) << 6); - checkByte(data[in], 3, 3); - ch = ch | (data[in++] & 0x3f); - checkMinimal(ch, MIN_3_BYTES); - } else if (ch < 0xf8) { - // 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx - ch = ((ch & 0x07) << 18); - checkByte(data[in], 2, 4); - ch = ch | ((data[in++] & 0x3f) << 12); - checkByte(data[in], 3, 4); - ch = ch | ((data[in++] & 0x3f) << 6); - checkByte(data[in], 4, 4); - ch = ch | (data[in++] & 0x3f); - checkMinimal(ch, MIN_4_BYTES); - } else { - throw new IOException(MessageFormat.format("Illegal UTF-8 sequence: initial byte is {0}: {1}", - new Object[] { "11111xxx", new Integer(ch) })); - } - - if (ch > MAX_CODE_POINT) - throw new IOException(MessageFormat.format("Illegal UTF-8 sequence: final value is out of range: {0}", - new Integer(ch))); - - // Convert 21-bit codepoint to Java chars: - // 0..ffff are represented directly as a single char - // 10000..10ffff are represented as a "surrogate pair" of two chars - // See: http://java.sun.com/developer/technicalArticles/Intl/Supplementary/ - - if (ch > 0xffff) { - // Use a surrogate pair to represent it. - ch -= 0x10000; // ch is now 0..fffff (20 bits) - cdata[out++] = (char) (0xd800 + (ch >> 10)); // top 10 bits - cdata[out++] = (char) (0xdc00 + (ch & 0x3ff)); // bottom 10 bits - } else if (ch >= 0xd800 && ch < 0xe000) { - // Not allowed to encode the surrogate range directly. - throw new IOException(MessageFormat.format("Illegal UTF-8 sequence: final value is a surrogate value: {0}", - new Integer(ch))); - } else { - // Normal case. - cdata[out++] = (char) ch; - } - } - } - catch (ArrayIndexOutOfBoundsException a) - { - throw new IOException("Illegal UTF-8 sequence: multibyte sequence was truncated"); - } - - // Check if we ran past the end without seeing an exception. - if (in > end) - throw new IOException("Illegal UTF-8 sequence: multibyte sequence was truncated"); - - return new String(cdata, 0, out); - } -} diff --git a/src/main/org/bson/io/package.html b/src/main/org/bson/io/package.html deleted file mode 100644 index 8bf6c44fc88..00000000000 --- a/src/main/org/bson/io/package.html +++ /dev/null @@ -1,3 +0,0 @@ - -

        Contains classes implementing I/O operations used by BSON objects.

        - diff --git a/src/main/org/bson/package.html b/src/main/org/bson/package.html deleted file mode 100644 index d10d9f8a7be..00000000000 --- a/src/main/org/bson/package.html +++ /dev/null @@ -1,3 +0,0 @@ - -

        Contains the base BSON classes and Encoder/Decoder.

        - diff --git a/src/main/org/bson/types/BSONTimestamp.java b/src/main/org/bson/types/BSONTimestamp.java deleted file mode 100644 index 071fa1c816f..00000000000 --- a/src/main/org/bson/types/BSONTimestamp.java +++ /dev/null @@ -1,96 +0,0 @@ -// BSONTimestamp.java - -/** - * Copyright (C) 2008 10gen Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.bson.types; - -import java.io.Serializable; -import java.util.Date; - -/** - * this is used for internal increment values. - * for storing normal dates in MongoDB, you should use java.util.Date - * time is seconds since epoch - * inc is an ordinal - */ -public class BSONTimestamp implements Comparable, Serializable { - - private static final long serialVersionUID = -3268482672267936464L; - - static final boolean D = Boolean.getBoolean( "DEBUG.DBTIMESTAMP" ); - - public BSONTimestamp(){ - _inc = 0; - _time = null; - } - - public BSONTimestamp(int time, int inc ){ - _time = new Date( time * 1000L ); - _inc = inc; - } - - /** - * @return get time in seconds since epoch - */ - public int getTime(){ - if ( _time == null ) - return 0; - return (int)(_time.getTime() / 1000); - } - - public int getInc(){ - return _inc; - } - - public String toString(){ - return "TS time:" + _time + " inc:" + _inc; - } - - @Override - public int compareTo(BSONTimestamp ts) { - if(getTime() != ts.getTime()) { - return getTime() - ts.getTime(); - } - else{ - return getInc() - ts.getInc(); - } - } - - @Override - public int hashCode() { - final int prime = 31; - int result = 1; - result = prime * result + _inc; - result = prime * result + getTime(); - return result; - } - - @Override - public boolean equals(Object obj) { - if (obj == this) - return true; - if (obj instanceof BSONTimestamp) { - BSONTimestamp t2 = (BSONTimestamp) obj; - return getTime() == t2.getTime() && getInc() == t2.getInc(); - } - return false; - } - - final int _inc; - final Date _time; - -} diff --git a/src/main/org/bson/types/BasicBSONList.java b/src/main/org/bson/types/BasicBSONList.java deleted file mode 100644 index a421b05ab39..00000000000 --- a/src/main/org/bson/types/BasicBSONList.java +++ /dev/null @@ -1,165 +0,0 @@ -// BasicBSONList.java - -/** - * Copyright (C) 2008 10gen Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.bson.types; - -import org.bson.*; -import org.bson.util.StringRangeSet; - -import java.util.*; - -/** - * Utility class to allow array DBObjects to be created. - *

        - * Note: MongoDB will also create arrays from java.util.Lists. - *

        - *

        - *

        - * DBObject obj = new BasicBSONList();
        - * obj.put( "0", value1 );
        - * obj.put( "4", value2 );
        - * obj.put( 2, value3 );
        - * 
        - * This simulates the array [ value1, null, value3, null, value2 ] by creating the - * DBObject { "0" : value1, "1" : null, "2" : value3, "3" : null, "4" : value2 }. - *

        - *

        - * BasicBSONList only supports numeric keys. Passing strings that cannot be converted to ints will cause an - * IllegalArgumentException. - *

        - * BasicBSONList list = new BasicBSONList();
        - * list.put("1", "bar"); // ok
        - * list.put("1E1", "bar"); // throws exception
        - * 
        - *

        - */ -public class BasicBSONList extends ArrayList implements BSONObject { - - private static final long serialVersionUID = -4415279469780082174L; - - public BasicBSONList() { } - - /** - * Puts a value at an index. - * For interface compatibility. Must be passed a String that is parsable to an int. - * @param key the index at which to insert the value - * @param v the value to insert - * @return the value - * @throws IllegalArgumentException if key cannot be parsed into an int - */ - public Object put( String key , Object v ){ - return put(_getInt( key ), v); - } - - /** - * Puts a value at an index. - * This will fill any unset indexes less than index with null. - * @param key the index at which to insert the value - * @param v the value to insert - * @return the value - */ - public Object put( int key, Object v ) { - while ( key >= size() ) - add( null ); - set( key , v ); - return v; - } - - @SuppressWarnings("unchecked") - public void putAll( Map m ){ - for ( Map.Entry entry : (Set)m.entrySet() ){ - put( entry.getKey().toString() , entry.getValue() ); - } - } - - public void putAll( BSONObject o ){ - for ( String k : o.keySet() ){ - put( k , o.get( k ) ); - } - } - - /** - * Gets a value at an index. - * For interface compatibility. Must be passed a String that is parsable to an int. - * @param key the index - * @return the value, if found, or null - * @throws IllegalArgumentException if key cannot be parsed into an int - */ - public Object get( String key ){ - int i = _getInt( key ); - if ( i < 0 ) - return null; - if ( i >= size() ) - return null; - return get( i ); - } - - public Object removeField( String key ){ - int i = _getInt( key ); - if ( i < 0 ) - return null; - if ( i >= size() ) - return null; - return remove( i ); - } - - /** - * @deprecated - */ - @Deprecated - public boolean containsKey( String key ){ - return containsField(key); - } - - public boolean containsField( String key ){ - int i = _getInt( key , false ); - if ( i < 0 ) - return false; - return i >= 0 && i < size(); - } - - public Set keySet(){ - return new StringRangeSet(size()); - } - - @SuppressWarnings("unchecked") - public Map toMap() { - Map m = new HashMap(); - Iterator i = this.keySet().iterator(); - while (i.hasNext()) { - Object s = i.next(); - m.put(s, this.get(String.valueOf(s))); - } - return m; - } - - int _getInt( String s ){ - return _getInt( s , true ); - } - - int _getInt( String s , boolean err ){ - try { - return Integer.parseInt( s ); - } - catch ( Exception e ){ - if ( err ) - throw new IllegalArgumentException( "BasicBSONList can only work with numeric keys, not: [" + s + "]" ); - return -1; - } - } -} diff --git a/src/main/org/bson/types/Binary.java b/src/main/org/bson/types/Binary.java deleted file mode 100644 index 772f9164ab5..00000000000 --- a/src/main/org/bson/types/Binary.java +++ /dev/null @@ -1,98 +0,0 @@ -// Binary.java - -/** - * See the NOTICE.txt file distributed with this work for - * information regarding copyright ownership. - * - * The authors license this file to you under the - * Apache License, Version 2.0 (the "License"); you may not use - * this file except in compliance with the License. You may - * obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.bson.types; - -import org.bson.BSON; - -import java.io.Serializable; -import java.util.Arrays; - -/** - * generic binary holder - */ -public class Binary implements Serializable { - - private static final long serialVersionUID = 7902997490338209467L; - - /** - * Creates a Binary object with the default binary type of 0 - * - * @param data raw data - */ - public Binary(byte[] data) { - this(BSON.B_GENERAL, data); - } - - /** - * Creates a Binary object - * - * @param type type of the field as encoded in BSON - * @param data raw data - */ - public Binary(byte type, byte[] data) { - _type = type; - _data = data; - } - - public byte getType() { - return _type; - } - - public byte[] getData() { - return _data; - } - - public int length() { - return _data.length; - } - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - if (!(o instanceof Binary)) { - return false; - } - - Binary binary = (Binary) o; - - if (_type != binary._type) { - return false; - } - if (!Arrays.equals(_data, binary._data)) { - return false; - } - - return true; - } - - @Override - public int hashCode() { - int result = (int) _type; - result = 31 * result + (_data != null ? Arrays.hashCode(_data) : 0); - return result; - } - - final byte _type; - final byte[] _data; -} diff --git a/src/main/org/bson/types/Code.java b/src/main/org/bson/types/Code.java deleted file mode 100644 index e03c2bc3e83..00000000000 --- a/src/main/org/bson/types/Code.java +++ /dev/null @@ -1,61 +0,0 @@ -// Code.java - -/** - * Copyright (C) 2008 10gen Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.bson.types; - -import java.io.Serializable; -import java.util.*; - -import org.bson.*; - -/** - * for using the Code type - */ -public class Code implements Serializable { - - private static final long serialVersionUID = 475535263314046697L; - - public Code( String code ){ - _code = code; - } - - public String getCode(){ - return _code; - } - - public boolean equals( Object o ){ - if ( ! ( o instanceof Code ) ) - return false; - - Code c = (Code)o; - return _code.equals( c._code ); - } - - public int hashCode(){ - return _code.hashCode(); - } - - @Override - public String toString() { - return getCode(); - } - - final String _code; - -} - diff --git a/src/main/org/bson/types/CodeWScope.java b/src/main/org/bson/types/CodeWScope.java deleted file mode 100644 index 1d332c18d5c..00000000000 --- a/src/main/org/bson/types/CodeWScope.java +++ /dev/null @@ -1,53 +0,0 @@ -// CodeWScope.java - -/** - * Copyright (C) 2008 10gen Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.bson.types; - -import org.bson.*; - -/** - * for using the CodeWScope type - */ -public class CodeWScope extends Code { - - private static final long serialVersionUID = -6284832275113680002L; - - public CodeWScope( String code , BSONObject scope ){ - super( code ); - _scope = scope; - } - - public BSONObject getScope(){ - return _scope; - } - - public boolean equals( Object o ){ - if ( ! ( o instanceof CodeWScope ) ) - return false; - - CodeWScope c = (CodeWScope)o; - return _code.equals( c._code ) && _scope.equals( c._scope ); - } - - public int hashCode(){ - return _code.hashCode() ^ _scope.hashCode(); - } - - final BSONObject _scope; -} - diff --git a/src/main/org/bson/types/MaxKey.java b/src/main/org/bson/types/MaxKey.java deleted file mode 100644 index 1130dca07a4..00000000000 --- a/src/main/org/bson/types/MaxKey.java +++ /dev/null @@ -1,47 +0,0 @@ - -/** - * Copyright (C) 2008 10gen Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.bson.types; - -import java.io.Serializable; - -/** - * Represent the maximum key value regardless of the key's type - */ -public class MaxKey implements Serializable { - - private static final long serialVersionUID = 5123414776151687185L; - - public MaxKey() { - } - - @Override - public boolean equals(Object o) { - return o instanceof MaxKey; - } - - @Override - public int hashCode() { - return 0; - } - - @Override - public String toString() { - return "MaxKey"; - } - -} diff --git a/src/main/org/bson/types/MinKey.java b/src/main/org/bson/types/MinKey.java deleted file mode 100644 index 8f21545ca2b..00000000000 --- a/src/main/org/bson/types/MinKey.java +++ /dev/null @@ -1,47 +0,0 @@ - -/** - * Copyright (C) 2008 10gen Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.bson.types; - -import java.io.Serializable; - -/** - * Represent the minimum key value regardless of the key's type - */ -public class MinKey implements Serializable { - - private static final long serialVersionUID = 4075901136671855684L; - - public MinKey() { - } - - @Override - public boolean equals(Object o) { - return o instanceof MinKey; - } - - @Override - public int hashCode() { - return 0; - } - - @Override - public String toString() { - return "MinKey"; - } - -} diff --git a/src/main/org/bson/types/ObjectId.java b/src/main/org/bson/types/ObjectId.java deleted file mode 100644 index eff07b4bfa2..00000000000 --- a/src/main/org/bson/types/ObjectId.java +++ /dev/null @@ -1,563 +0,0 @@ -// ObjectId.java - -/** - * Copyright (C) 2008 10gen Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.bson.types; - -import java.net.*; -import java.nio.*; -import java.util.*; -import java.util.concurrent.atomic.*; -import java.util.logging.*; - -/** - * A globally unique identifier for objects. - *

        Consists of 12 bytes, divided as follows: - *

        - * 
        - * 
        - *     
        - * 
        - *     
        - * 
        01234567891011
        timemachinepidinc
        - *
        - * - * @dochub objectids - */ -public class ObjectId implements Comparable , java.io.Serializable { - - private static final long serialVersionUID = -4415279469780082174L; - - static final Logger LOGGER = Logger.getLogger( "org.bson.ObjectId" ); - - /** Gets a new object id. - * @return the new id - */ - public static ObjectId get(){ - return new ObjectId(); - } - - /** Checks if a string could be an ObjectId. - * @return whether the string could be an object id - */ - public static boolean isValid( String s ){ - if ( s == null ) - return false; - - final int len = s.length(); - if ( len != 24 ) - return false; - - for ( int i=0; i= '0' && c <= '9' ) - continue; - if ( c >= 'a' && c <= 'f' ) - continue; - if ( c >= 'A' && c <= 'F' ) - continue; - - return false; - } - - return true; - } - - /** Turn an object into an ObjectId, if possible. - * Strings will be converted into ObjectIds, if possible, and ObjectIds will - * be cast and returned. Passing in null returns null. - * @param o the object to convert - * @return an ObjectId if it can be massaged, null otherwise - * - * @deprecated This method is NOT a part of public API and will be dropped in 3.x versions. - */ - @Deprecated - public static ObjectId massageToObjectId( Object o ){ - if ( o == null ) - return null; - - if ( o instanceof ObjectId ) - return (ObjectId)o; - - if ( o instanceof String ){ - String s = o.toString(); - if ( isValid( s ) ) - return new ObjectId( s ); - } - - return null; - } - - public ObjectId( Date time ){ - this(time, _genmachine, _nextInc.getAndIncrement()); - } - - public ObjectId( Date time , int inc ){ - this( time , _genmachine , inc ); - } - - /** - * @deprecated {@code ObjectId}'s constructed this way do not conform to - * the spec. - * Please use {@link org.bson.types.ObjectId#ObjectId(byte[])} instead. - */ - @Deprecated - public ObjectId( Date time , int machine , int inc ){ - _time = (int)(time.getTime() / 1000); - _machine = machine; - _inc = inc; - _new = false; - } - - /** Creates a new instance from a string. - * @param s the string to convert - * @throws IllegalArgumentException if the string is not a valid id - */ - public ObjectId( String s ){ - this( s , false ); - } - - /** - * Constructs a new instance of {@code ObjectId} from a string. - * @param s the string representation of ObjectId. Can contains only [0-9]|[a-f]|[A-F] characters. - * @param babble if {@code true} - convert to 'babble' objectId format - * - * @deprecated 'babble' format is deprecated. Please use {@link #ObjectId(String)} instead. - */ - @Deprecated - public ObjectId( String s , boolean babble ){ - - if ( ! isValid( s ) ) - throw new IllegalArgumentException( "invalid ObjectId [" + s + "]" ); - - if ( babble ) - s = babbleToMongod( s ); - - byte b[] = new byte[12]; - for ( int i=0; ispec. - * Please use {@link org.bson.types.ObjectId#ObjectId(byte[])} instead. - */ - @Deprecated - public ObjectId(int time, int machine, int inc) { - _time = time; - _machine = machine; - _inc = inc; - _new = false; - } - - /** Create a new object id. - */ - public ObjectId(){ - _time = (int) (System.currentTimeMillis() / 1000); - _machine = _genmachine; - _inc = _nextInc.getAndIncrement(); - _new = true; - } - - public int hashCode(){ - int x = _time; - x += ( _machine * 111 ); - x += ( _inc * 17 ); - return x; - } - - public boolean equals( Object o ){ - - if ( this == o ) - return true; - - ObjectId other = massageToObjectId( o ); - if ( other == null ) - return false; - - return - _time == other._time && - _machine == other._machine && - _inc == other._inc; - } - - /** - * @deprecated 'babble' format is deprecated. Please use {@link #toHexString()} instead. - */ - @Deprecated - public String toStringBabble(){ - return babbleToMongod( toStringMongod() ); - } - - /** - * Converts this instance into a 24-byte hexadecimal string representation. - * - * @return a string representation of the ObjectId in hexadecimal format - */ - public String toHexString() { - final StringBuilder buf = new StringBuilder(24); - - for (final byte b : toByteArray()) { - buf.append(String.format("%02x", b & 0xff)); - } - - return buf.toString(); - } - - /** - * @return a string representation of the ObjectId in hexadecimal format - * - * @deprecated Please use {@link #toHexString()} instead. - */ - @Deprecated - public String toStringMongod(){ - byte b[] = toByteArray(); - - StringBuilder buf = new StringBuilder(24); - - for ( int i=0; i=0; i-- ) - buf.append( _pos( b , i ) ); - for ( int i=11; i>=8; i-- ) - buf.append( _pos( b , i ) ); - - return buf.toString(); - } - - public String toString(){ - return toStringMongod(); - } - - int _compareUnsigned( int i , int j ){ - long li = 0xFFFFFFFFL; - li = i & li; - long lj = 0xFFFFFFFFL; - lj = j & lj; - long diff = li - lj; - if (diff < Integer.MIN_VALUE) - return Integer.MIN_VALUE; - if (diff > Integer.MAX_VALUE) - return Integer.MAX_VALUE; - return (int) diff; - } - - public int compareTo( ObjectId id ){ - if ( id == null ) - return -1; - - int x = _compareUnsigned( _time , id._time ); - if ( x != 0 ) - return x; - - x = _compareUnsigned( _machine , id._machine ); - if ( x != 0 ) - return x; - - return _compareUnsigned( _inc , id._inc ); - } - - /** - * Gets the machine identifier. - * - * @return the machine identifier - */ - public int getMachineIdentifier() { - return _machine; - } - - /** - * Gets the machine identifier. - * - * @return the machine identifier - * @deprecated Please use {@code #getMachineIdentifier()} instead. - */ - @Deprecated - public int getMachine() { - return _machine; - } - - /** - * Gets the timestamp as a {@code Date} instance. - * - * @return the Date - */ - public Date getDate() { - return new Date(_time * 1000L); - } - - /** - * Gets the time of this ID, in milliseconds - * - * @deprecated Please use {@link #getDate()} instead. - */ - @Deprecated - public long getTime(){ - return _time * 1000L; - } - - /** - * Gets the timestamp (number of seconds since the Unix epoch). - * - * @return the timestamp - */ - public int getTimestamp() { - return _time; - } - - /** - * Gets the time of this ID, in seconds. - * @deprecated Please use {@link #getTimestamp()} instead. - */ - @Deprecated - public int getTimeSecond() { - return _time; - } - - /** - * Gets the counter. - * - * @return the counter - */ - public int getCounter() { - return _inc; - } - - - /** - * Gets the counter. - * - * @return the counter - * @see org.bson.types.ObjectId#getCounter() - * @deprecated Please use the {@link #getCounter()} instead. - */ - @Deprecated - public int getInc() { - return _inc; - } - - /** - * Gets the timestamp. - * - * @return the timestamp - * @see org.bson.types.ObjectId#getTimestamp() - * @deprecated Please use {@link #getTimestamp()} instead. - */ - @Deprecated - public int _time(){ - return _time; - } - - /** - * Gets the machine identifier. - * - * @return the machine identifier - * @see org.bson.types.ObjectId#getMachineIdentifier() - * @deprecated Please use {@link #getMachineIdentifier()} instead. - */ - @Deprecated - public int _machine(){ - return _machine; - } - - /** - * Gets the counter. - * - * @return the counter - * @see org.bson.types.ObjectId#getCounter() - * @deprecated Please use {@link #getCounter()} instead. - */ - @Deprecated - public int _inc(){ - return _inc; - } - - /** - * @deprecated 'new' flag breaks the immutability of the {@code ObjectId} class - * and will be dropped in 3.x versions of the driver - */ - @Deprecated - public boolean isNew() { - return _new; - } - - /** - * @deprecated 'new' flag breaks the immutability of the {@code ObjectId} class - * and will be dropped in 3.x versions of the driver - */ - @Deprecated - public void notNew(){ - _new = false; - } - - /** - * Gets the machine identifier. - * - * @return the machine identifier - * @see org.bson.types.ObjectId#getMachineIdentifier() - * @deprecated Please use {@link #getMachineIdentifier()} instead. - */ - @Deprecated - public static int getGenMachineId() { - return _genmachine; - } - - /** - * Gets the current value of the auto-incrementing counter. - */ - public static int getCurrentCounter() { - return _nextInc.get(); - } - - /** - * Gets the current value of the auto-incrementing counter. - * - * @deprecated Please use {@link #getCurrentCounter()} instead. - */ - @Deprecated - public static int getCurrentInc() { - return _nextInc.get(); - } - - final int _time; - final int _machine; - final int _inc; - - boolean _new; - - /** - * @deprecated This method is NOT a part of public API and will be dropped in 3.x versions. - */ - @Deprecated - public static int _flip( int x ){ - int z = 0; - z |= ( ( x << 24 ) & 0xFF000000 ); - z |= ( ( x << 8 ) & 0x00FF0000 ); - z |= ( ( x >> 8 ) & 0x0000FF00 ); - z |= ( ( x >> 24 ) & 0x000000FF ); - return z; - } - - private static AtomicInteger _nextInc = new AtomicInteger( (new java.util.Random()).nextInt() ); - - private static final int _genmachine; - static { - - try { - // build a 2-byte machine piece based on NICs info - int machinePiece; - { - try { - StringBuilder sb = new StringBuilder(); - Enumeration e = NetworkInterface.getNetworkInterfaces(); - while ( e.hasMoreElements() ){ - NetworkInterface ni = e.nextElement(); - sb.append( ni.toString() ); - } - machinePiece = sb.toString().hashCode() << 16; - } catch (Throwable e) { - // exception sometimes happens with IBM JVM, use random - LOGGER.log(Level.WARNING, e.getMessage(), e); - machinePiece = (new Random().nextInt()) << 16; - } - LOGGER.fine( "machine piece post: " + Integer.toHexString( machinePiece ) ); - } - - // add a 2 byte process piece. It must represent not only the JVM but the class loader. - // Since static var belong to class loader there could be collisions otherwise - final int processPiece; - { - int processId = new java.util.Random().nextInt(); - try { - processId = java.lang.management.ManagementFactory.getRuntimeMXBean().getName().hashCode(); - } - catch ( Throwable t ){ - } - - ClassLoader loader = ObjectId.class.getClassLoader(); - int loaderId = loader != null ? System.identityHashCode(loader) : 0; - - StringBuilder sb = new StringBuilder(); - sb.append(Integer.toHexString(processId)); - sb.append(Integer.toHexString(loaderId)); - processPiece = sb.toString().hashCode() & 0xFFFF; - LOGGER.fine( "process piece: " + Integer.toHexString( processPiece ) ); - } - - _genmachine = machinePiece | processPiece; - LOGGER.fine( "machine : " + Integer.toHexString( _genmachine ) ); - } - catch ( Exception e ){ - throw new RuntimeException( e ); - } - - } -} - diff --git a/src/main/org/bson/types/Symbol.java b/src/main/org/bson/types/Symbol.java deleted file mode 100644 index 9c7d41b81ae..00000000000 --- a/src/main/org/bson/types/Symbol.java +++ /dev/null @@ -1,74 +0,0 @@ -// Symbol.java - -/** - * Copyright (C) 2009 10gen Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.bson.types; - -import java.io.Serializable; - -/** - * Class to hold a BSON symbol object, which is an interned string in Ruby - */ -public class Symbol implements Serializable { - - private static final long serialVersionUID = 1326269319883146072L; - - public Symbol(String s) { - _symbol = s; - } - - public String getSymbol(){ - return _symbol; - } - - /** - * Will compare equal to a String that is equal to the String that this holds - * @param o - * @return - */ - @Override - public boolean equals(Object o) { - if (this == o) return true; - if (o == null) return false; - - String otherSymbol; - if (o instanceof Symbol) { - otherSymbol = ((Symbol) o)._symbol; - } - else if (o instanceof String) { - otherSymbol = (String) o; - } - else { - return false; - } - - if (_symbol != null ? !_symbol.equals(otherSymbol) : otherSymbol != null) return false; - - return true; - } - - @Override - public int hashCode() { - return _symbol != null ? _symbol.hashCode() : 0; - } - - public String toString(){ - return _symbol; - } - - private final String _symbol; -} diff --git a/src/main/org/bson/types/package.html b/src/main/org/bson/types/package.html deleted file mode 100644 index ed93a43b598..00000000000 --- a/src/main/org/bson/types/package.html +++ /dev/null @@ -1,3 +0,0 @@ - -

        Contains classes implementing various BSON types.

        - diff --git a/src/main/org/bson/util/AbstractCopyOnWriteMap.java b/src/main/org/bson/util/AbstractCopyOnWriteMap.java deleted file mode 100644 index b1189004b48..00000000000 --- a/src/main/org/bson/util/AbstractCopyOnWriteMap.java +++ /dev/null @@ -1,630 +0,0 @@ -/** - * Copyright 2008 Atlassian Pty Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.bson.util; - -import static org.bson.util.Assertions.notNull; -import static java.util.Collections.unmodifiableCollection; -import static java.util.Collections.unmodifiableSet; -import org.bson.util.annotations.GuardedBy; -import org.bson.util.annotations.ThreadSafe; - -import java.io.Serializable; -import java.util.Collection; -import java.util.Collections; -import java.util.Iterator; -import java.util.Map; -import java.util.Set; -import java.util.concurrent.ConcurrentMap; -import java.util.concurrent.locks.Lock; -import java.util.concurrent.locks.ReentrantLock; - -/** - * Abstract base class for COW {@link Map} implementations that delegate to an - * internal map. - * - * @param The key type - * @param The value type - * @param the internal {@link Map} or extension for things like sorted and - * navigable maps. - */ -@ThreadSafe -abstract class AbstractCopyOnWriteMap> implements ConcurrentMap, Serializable { - private static final long serialVersionUID = 4508989182041753878L; - - @GuardedBy("lock") - private volatile M delegate; - - // import edu.umd.cs.findbugs.annotations.@SuppressWarnings - private final transient Lock lock = new ReentrantLock(); - - // private final transient EntrySet entrySet = new EntrySet(); - // private final transient KeySet keySet = new KeySet(); - // private final transient Values values = new Values(); - // private final View.Type viewType; - private final View view; - - /** - * Create a new {@link CopyOnWriteMap} with the supplied {@link Map} to - * initialize the values. - * - * @param map the initial map to initialize with - * @param viewType for writable or read-only key, value and entrySet views - */ - protected > AbstractCopyOnWriteMap(final N map, final View.Type viewType) { - this.delegate = notNull("delegate", copy(notNull("map", map))); - this.view = notNull("viewType", viewType).get(this); - } - - /** - * Copy function, implemented by sub-classes. - * - * @param the map to copy and return. - * @param map the initial values of the newly created map. - * @return a new map. Will never be modified after construction. - */ - @GuardedBy("lock") - abstract > M copy(N map); - - // - // mutable operations - // - - public final void clear() { - lock.lock(); - try { - set(copy(Collections. emptyMap())); - } finally { - lock.unlock(); - } - } - - public final V remove(final Object key) { - lock.lock(); - try { - // short circuit if key doesn't exist - if (!delegate.containsKey(key)) { - return null; - } - final M map = copy(); - try { - return map.remove(key); - } finally { - set(map); - } - } finally { - lock.unlock(); - } - } - - public boolean remove(final Object key, final Object value) { - lock.lock(); - try { - if (delegate.containsKey(key) && equals(value, delegate.get(key))) { - final M map = copy(); - map.remove(key); - set(map); - return true; - } else { - return false; - } - } finally { - lock.unlock(); - } - } - - public boolean replace(final K key, final V oldValue, final V newValue) { - lock.lock(); - try { - if (!delegate.containsKey(key) || !equals(oldValue, delegate.get(key))) { - return false; - } - final M map = copy(); - map.put(key, newValue); - set(map); - return true; - } finally { - lock.unlock(); - } - } - - public V replace(final K key, final V value) { - lock.lock(); - try { - if (!delegate.containsKey(key)) { - return null; - } - final M map = copy(); - try { - return map.put(key, value); - } finally { - set(map); - } - } finally { - lock.unlock(); - } - } - - public final V put(final K key, final V value) { - lock.lock(); - try { - final M map = copy(); - try { - return map.put(key, value); - } finally { - set(map); - } - } finally { - lock.unlock(); - } - } - - public V putIfAbsent(final K key, final V value) { - lock.lock(); - try { - if (!delegate.containsKey(key)) { - final M map = copy(); - try { - return map.put(key, value); - } finally { - set(map); - } - } - return delegate.get(key); - } finally { - lock.unlock(); - } - } - - public final void putAll(final Map t) { - lock.lock(); - try { - final M map = copy(); - map.putAll(t); - set(map); - } finally { - lock.unlock(); - } - } - - protected M copy() { - lock.lock(); - try { - return copy(delegate); - } finally { - lock.unlock(); - } - } - - @GuardedBy("lock") - protected void set(final M map) { - delegate = map; - } - - // - // Collection views - // - - public final Set> entrySet() { - return view.entrySet(); - } - - public final Set keySet() { - return view.keySet(); - } - - public final Collection values() { - return view.values(); - } - - // - // delegate operations - // - - public final boolean containsKey(final Object key) { - return delegate.containsKey(key); - } - - public final boolean containsValue(final Object value) { - return delegate.containsValue(value); - } - - public final V get(final Object key) { - return delegate.get(key); - } - - public final boolean isEmpty() { - return delegate.isEmpty(); - } - - public final int size() { - return delegate.size(); - } - - @Override - public final boolean equals(final Object o) { - return delegate.equals(o); - } - - @Override - public final int hashCode() { - return delegate.hashCode(); - } - - protected final M getDelegate() { - return delegate; - } - - @Override - public String toString() { - return delegate.toString(); - } - - // - // inner classes - // - - private class KeySet extends CollectionView implements Set { - - @Override - Collection getDelegate() { - return delegate.keySet(); - } - - // - // mutable operations - // - - public void clear() { - lock.lock(); - try { - final M map = copy(); - map.keySet().clear(); - set(map); - } finally { - lock.unlock(); - } - } - - public boolean remove(final Object o) { - return AbstractCopyOnWriteMap.this.remove(o) != null; - } - - public boolean removeAll(final Collection c) { - lock.lock(); - try { - final M map = copy(); - try { - return map.keySet().removeAll(c); - } finally { - set(map); - } - } finally { - lock.unlock(); - } - } - - public boolean retainAll(final Collection c) { - lock.lock(); - try { - final M map = copy(); - try { - return map.keySet().retainAll(c); - } finally { - set(map); - } - } finally { - lock.unlock(); - } - } - } - - private final class Values extends CollectionView { - - @Override - Collection getDelegate() { - return delegate.values(); - } - - public void clear() { - lock.lock(); - try { - final M map = copy(); - map.values().clear(); - set(map); - } finally { - lock.unlock(); - } - } - - public boolean remove(final Object o) { - lock.lock(); - try { - if (!contains(o)) { - return false; - } - final M map = copy(); - try { - return map.values().remove(o); - } finally { - set(map); - } - } finally { - lock.unlock(); - } - } - - public boolean removeAll(final Collection c) { - lock.lock(); - try { - final M map = copy(); - try { - return map.values().removeAll(c); - } finally { - set(map); - } - } finally { - lock.unlock(); - } - } - - public boolean retainAll(final Collection c) { - lock.lock(); - try { - final M map = copy(); - try { - return map.values().retainAll(c); - } finally { - set(map); - } - } finally { - lock.unlock(); - } - } - } - - private class EntrySet extends CollectionView> implements Set> { - - @Override - Collection> getDelegate() { - return delegate.entrySet(); - } - - public void clear() { - lock.lock(); - try { - final M map = copy(); - map.entrySet().clear(); - set(map); - } finally { - lock.unlock(); - } - } - - public boolean remove(final Object o) { - lock.lock(); - try { - if (!contains(o)) { - return false; - } - final M map = copy(); - try { - return map.entrySet().remove(o); - } finally { - set(map); - } - } finally { - lock.unlock(); - } - } - - public boolean removeAll(final Collection c) { - lock.lock(); - try { - final M map = copy(); - try { - return map.entrySet().removeAll(c); - } finally { - set(map); - } - } finally { - lock.unlock(); - } - } - - public boolean retainAll(final Collection c) { - lock.lock(); - try { - final M map = copy(); - try { - return map.entrySet().retainAll(c); - } finally { - set(map); - } - } finally { - lock.unlock(); - } - } - } - - private static class UnmodifiableIterator implements Iterator { - private final Iterator delegate; - - public UnmodifiableIterator(final Iterator delegate) { - this.delegate = delegate; - } - - public boolean hasNext() { - return delegate.hasNext(); - } - - public T next() { - return delegate.next(); - } - - public void remove() { - throw new UnsupportedOperationException(); - } - } - - protected static abstract class CollectionView implements Collection { - - abstract Collection getDelegate(); - - // - // delegate operations - // - - public final boolean contains(final Object o) { - return getDelegate().contains(o); - } - - public final boolean containsAll(final Collection c) { - return getDelegate().containsAll(c); - } - - public final Iterator iterator() { - return new UnmodifiableIterator(getDelegate().iterator()); - } - - public final boolean isEmpty() { - return getDelegate().isEmpty(); - } - - public final int size() { - return getDelegate().size(); - } - - public final Object[] toArray() { - return getDelegate().toArray(); - } - - public final T[] toArray(final T[] a) { - return getDelegate().toArray(a); - } - - @Override - public int hashCode() { - return getDelegate().hashCode(); - } - - @Override - public boolean equals(final Object obj) { - return getDelegate().equals(obj); - } - - @Override - public String toString() { - return getDelegate().toString(); - } - - // - // unsupported operations - // - - public final boolean add(final E o) { - throw new UnsupportedOperationException(); - } - - public final boolean addAll(final Collection c) { - throw new UnsupportedOperationException(); - } - } - - private boolean equals(final Object o1, final Object o2) { - if (o1 == null) { - return o2 == null; - } - return o1.equals(o2); - } - - /** - * Provides access to the views of the underlying key, value and entry - * collections. - */ - public static abstract class View { - View() {} - - abstract Set keySet(); - - abstract Set> entrySet(); - - abstract Collection values(); - - /** - * The different types of {@link View} available - */ - public enum Type { - STABLE { - @Override - > View get(final AbstractCopyOnWriteMap host) { - return host.new Immutable(); - } - }, - LIVE { - @Override - > View get(final AbstractCopyOnWriteMap host) { - return host.new Mutable(); - } - }; - abstract > View get(AbstractCopyOnWriteMap host); - } - } - - final class Immutable extends View implements Serializable { - - private static final long serialVersionUID = -4158727180429303818L; - - @Override - public Set keySet() { - return unmodifiableSet(delegate.keySet()); - } - - @Override - public Set> entrySet() { - return unmodifiableSet(delegate.entrySet()); - } - - @Override - public Collection values() { - return unmodifiableCollection(delegate.values()); - } - } - - final class Mutable extends View implements Serializable { - - private static final long serialVersionUID = 1624520291194797634L; - - private final transient KeySet keySet = new KeySet(); - private final transient EntrySet entrySet = new EntrySet(); - private final transient Values values = new Values(); - - @Override - public Set keySet() { - return keySet; - } - - @Override - public Set> entrySet() { - return entrySet; - } - - @Override - public Collection values() { - return values; - } - } -} diff --git a/src/main/org/bson/util/Assertions.java b/src/main/org/bson/util/Assertions.java deleted file mode 100644 index 47425e5e68a..00000000000 --- a/src/main/org/bson/util/Assertions.java +++ /dev/null @@ -1,51 +0,0 @@ -/** - * Copyright 2008 Atlassian Pty Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.bson.util; - -/** - * Design by contract assertions. - * - * @deprecated This class is NOT a part of public API and will be dropped in 3.x versions. - */ -@Deprecated -public class Assertions { - public static T notNull(final String name, final T notNull) throws IllegalArgumentException { - if (notNull == null) { - throw new NullArgumentException(name); - } - return notNull; - } - - public static void isTrue(final String name, final boolean check) throws IllegalArgumentException { - if (!check) { - throw new IllegalArgumentException(name); - } - } - - // /CLOVER:OFF - private Assertions() {} - - // /CLOVER:ON - - static class NullArgumentException extends IllegalArgumentException { - private static final long serialVersionUID = 6178592463723624585L; - - NullArgumentException(final String name) { - super(name + " should not be null!"); - } - } -} diff --git a/src/main/org/bson/util/ClassAncestry.java b/src/main/org/bson/util/ClassAncestry.java deleted file mode 100644 index 5581dbe3f2b..00000000000 --- a/src/main/org/bson/util/ClassAncestry.java +++ /dev/null @@ -1,72 +0,0 @@ -package org.bson.util; - -import static java.util.Collections.unmodifiableList; -import static org.bson.util.CopyOnWriteMap.newHashMap; - -import java.util.ArrayList; -import java.util.Collections; -import java.util.List; -import java.util.concurrent.ConcurrentMap; - -class ClassAncestry { - - /** - * getAncestry - * - * Walks superclass and interface graph, superclasses first, then - * interfaces, to compute an ancestry list. Supertypes are visited left to - * right. Duplicates are removed such that no Class will appear in the list - * before one of its subtypes. - * - * Does not need to be synchronized, races are harmless as the Class graph - * does not change at runtime. - */ - public static List> getAncestry(Class c) { - final ConcurrentMap, List>> cache = getClassAncestryCache(); - while (true) { - List> cachedResult = cache.get(c); - if (cachedResult != null) { - return cachedResult; - } - cache.putIfAbsent(c, computeAncestry(c)); - } - } - - /** - * computeAncestry, starting with children and going back to parents - */ - private static List> computeAncestry(Class c) { - final List> result = new ArrayList>(); - result.add(Object.class); - computeAncestry(c, result); - Collections.reverse(result); - return unmodifiableList(new ArrayList>(result)); - } - - private static void computeAncestry(Class c, List> result) { - if ((c == null) || (c == Object.class)) { - return; - } - - // first interfaces (looks backwards but is not) - Class[] interfaces = c.getInterfaces(); - for (int i = interfaces.length - 1; i >= 0; i--) { - computeAncestry(interfaces[i], result); - } - - // next superclass - computeAncestry(c.getSuperclass(), result); - - if (!result.contains(c)) - result.add(c); - } - - /** - * classAncestryCache - */ - private static ConcurrentMap, List>> getClassAncestryCache() { - return (_ancestryCache); - } - - private static final ConcurrentMap, List>> _ancestryCache = newHashMap(); -} diff --git a/src/main/org/bson/util/ClassMap.java b/src/main/org/bson/util/ClassMap.java deleted file mode 100644 index ee7cf090c6e..00000000000 --- a/src/main/org/bson/util/ClassMap.java +++ /dev/null @@ -1,100 +0,0 @@ -// ClassMap.java - -/** - * Copyright (C) 2008 10gen Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.bson.util; - -import java.util.List; -import java.util.Map; - -/** - * Maps Class objects to values. A ClassMap is different from a regular Map in - * that get(c) does not only look to see if 'c' is a key in the Map, but also - * walks the up superclass and interface graph of 'c' to find matches. Derived - * matches of this sort are then "cached" in the registry so that matches are - * faster on future gets. - * - * This is a very useful class for Class based registries. - * - * Example: - * - * ClassMap m = new ClassMap(); m.put(Animal.class, "Animal"); - * m.put(Fox.class, "Fox"); m.Fox.class) --> "Fox" m.get(Dog.class) --> "Animal" - * - * (assuming Dog.class < Animal.class) - */ -public class ClassMap { - /** - * Walks superclass and interface graph, superclasses first, then - * interfaces, to compute an ancestry list. Supertypes are visited left to - * right. Duplicates are removed such that no Class will appear in the list - * before one of its subtypes. - */ - public static List> getAncestry(Class c) { - return ClassAncestry.getAncestry(c); - } - - private final class ComputeFunction implements Function, T> { - @Override - public T apply(Class a) { - for (Class cls : getAncestry(a)) { - T result = map.get(cls); - if (result != null) { - return result; - } - } - return null; - } - }; - - private final Map, T> map = CopyOnWriteMap.newHashMap(); - private final Map, T> cache = ComputingMap.create(new ComputeFunction()); - - - public T get(Object key) { - return cache.get(key); - } - - public T put(Class key, T value) { - try { - return map.put(key, value); - } finally { - cache.clear(); - } - } - - public T remove(Object key) { - try { - return map.remove(key); - } finally { - cache.clear(); - } - } - - public void clear() { - map.clear(); - cache.clear(); - } - - public int size() { - return map.size(); - } - - public boolean isEmpty() { - return map.isEmpty(); - } -} diff --git a/src/main/org/bson/util/ComputingMap.java b/src/main/org/bson/util/ComputingMap.java deleted file mode 100644 index 85240931307..00000000000 --- a/src/main/org/bson/util/ComputingMap.java +++ /dev/null @@ -1,109 +0,0 @@ -package org.bson.util; - -import static org.bson.util.Assertions.notNull; - -import java.util.Collection; -import java.util.Map; -import java.util.Set; -import java.util.concurrent.ConcurrentMap; - -final class ComputingMap implements Map, Function { - - public static Map create(Function function) { - return new ComputingMap(CopyOnWriteMap. newHashMap(), function); - } - - private final ConcurrentMap map; - private final Function function; - - ComputingMap(ConcurrentMap map, Function function) { - this.map = notNull("map", map); - this.function = notNull("function", function); - } - - public V get(Object key) { - while (true) { - V v = map.get(key); - if (v != null) - return v; - @SuppressWarnings("unchecked") - K k = (K) key; - V value = function.apply(k); - if (value == null) - return null; - map.putIfAbsent(k, value); - } - } - - public V apply(K k) { - return get(k); - } - - public V putIfAbsent(K key, V value) { - return map.putIfAbsent(key, value); - } - - public boolean remove(Object key, Object value) { - return map.remove(key, value); - } - - public boolean replace(K key, V oldValue, V newValue) { - return map.replace(key, oldValue, newValue); - } - - public V replace(K key, V value) { - return map.replace(key, value); - } - - public int size() { - return map.size(); - } - - public boolean isEmpty() { - return map.isEmpty(); - } - - public boolean containsKey(Object key) { - return map.containsKey(key); - } - - public boolean containsValue(Object value) { - return map.containsValue(value); - } - - public V put(K key, V value) { - return map.put(key, value); - } - - public V remove(Object key) { - return map.remove(key); - } - - public void putAll(Map m) { - map.putAll(m); - } - - public void clear() { - map.clear(); - } - - public Set keySet() { - return map.keySet(); - } - - public Collection values() { - return map.values(); - } - - public Set> entrySet() { - return map.entrySet(); - } - - public boolean equals(Object o) { - return map.equals(o); - } - - public int hashCode() { - return map.hashCode(); - } -} diff --git a/src/main/org/bson/util/CopyOnWriteMap.java b/src/main/org/bson/util/CopyOnWriteMap.java deleted file mode 100644 index 716241ee6b2..00000000000 --- a/src/main/org/bson/util/CopyOnWriteMap.java +++ /dev/null @@ -1,273 +0,0 @@ -/** - * Copyright 2008 Atlassian Pty Ltd - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.bson.util; - -import java.util.Collections; -import java.util.HashMap; -import java.util.LinkedHashMap; -import java.util.Map; -import java.util.TreeMap; -import java.util.WeakHashMap; - -import org.bson.util.annotations.GuardedBy; -import org.bson.util.annotations.ThreadSafe; - -import org.bson.util.AbstractCopyOnWriteMap.View.Type; - -/** - * A thread-safe variant of {@link Map} in which all mutative operations (the - * "destructive" operations described by {@link Map} put, remove and so on) are - * implemented by making a fresh copy of the underlying map. - *

        - * This is ordinarily too costly, but may be more efficient than - * alternatives when traversal operations vastly out-number mutations, and is - * useful when you cannot or don't want to synchronize traversals, yet need to - * preclude interference among concurrent threads. The "snapshot" style - * iterators on the collections returned by {@link #entrySet()}, - * {@link #keySet()} and {@link #values()} use a reference to the internal map - * at the point that the iterator was created. This map never changes during the - * lifetime of the iterator, so interference is impossible and the iterator is - * guaranteed not to throw ConcurrentModificationException. The - * iterators will not reflect additions, removals, or changes to the list since - * the iterator was created. Removing elements via these iterators is not - * supported. The mutable operations on these collections (remove, retain etc.) - * are supported but as with the {@link Map} interface, add and addAll are not - * and throw {@link UnsupportedOperationException}. - *

        - * The actual copy is performed by an abstract {@link #copy(Map)} method. The - * method is responsible for the underlying Map implementation (for instance a - * {@link HashMap}, {@link TreeMap}, {@link LinkedHashMap} etc.) and therefore - * the semantics of what this map will cope with as far as null keys and values, - * iteration ordering etc. See the note below about suitable candidates for - * underlying Map implementations - *

        - * There are supplied implementations for the common j.u.c {@link Map} - * implementations via the {@link CopyOnWriteMap} static {@link Builder}. - *

        - * Collection views of the keys, values and entries are optionally - * {@link View.Type.LIVE live} or {@link View.Type.STABLE stable}. Live views - * are modifiable will cause a copy if a modifying method is called on them. - * Methods on these will reflect the current state of the collection, although - * iterators will be snapshot style. If the collection views are stable they are - * unmodifiable, and will be a snapshot of the state of the map at the time the - * collection was asked for. - *

        - * Please note that the thread-safety guarantees are limited to - * the thread-safety of the non-mutative (non-destructive) operations of the - * underlying map implementation. For instance some implementations such as - * {@link WeakHashMap} and {@link LinkedHashMap} with access ordering are - * actually structurally modified by the {@link #get(Object)} method and are - * therefore not suitable candidates as delegates for this class. - * - * @param the key type - * @param the value type - * @author Jed Wesley-Smith - */ -@ThreadSafe -abstract class CopyOnWriteMap extends AbstractCopyOnWriteMap> { - private static final long serialVersionUID = 7935514534647505917L; - - /** - * Get a {@link Builder} for a {@link CopyOnWriteMap} instance. - * - * @param key type - * @param value type - * @return a fresh builder - */ - public static Builder builder() { - return new Builder(); - } - - /** - * Build a {@link CopyOnWriteMap} and specify all the options. - * - * @param key type - * @param value type - */ - public static class Builder { - private View.Type viewType = View.Type.STABLE; - private final Map initialValues = new HashMap(); - - Builder() {} - - /** - * Views are stable (fixed in time) and unmodifiable. - */ - public Builder stableViews() { - viewType = View.Type.STABLE; - return this; - } - - /** - * Views are live (reflecting concurrent updates) and mutator methods - * are supported. - */ - public Builder addAll(final Map values) { - initialValues.putAll(values); - return this; - } - - /** - * Views are live (reflecting concurrent updates) and mutator methods - * are supported. - */ - public Builder liveViews() { - viewType = View.Type.LIVE; - return this; - } - - public CopyOnWriteMap newHashMap() { - return new Hash(initialValues, viewType); - } - - public CopyOnWriteMap newLinkedMap() { - return new Linked(initialValues, viewType); - } - } - - /** - * Creates a new {@link CopyOnWriteMap} with an underlying {@link HashMap}. - *

        - * This map has {@link View.Type.STABLE stable} views. - */ - public static CopyOnWriteMap newHashMap() { - final Builder builder = builder(); - return builder.newHashMap(); - } - - /** - * Creates a new {@link CopyOnWriteMap} with an underlying {@link HashMap} - * using the supplied map as the initial values. - *

        - * This map has {@link View.Type.STABLE stable} views. - */ - public static CopyOnWriteMap newHashMap(final Map map) { - final Builder builder = builder(); - return builder.addAll(map).newHashMap(); - } - - /** - * Creates a new {@link CopyOnWriteMap} with an underlying - * {@link LinkedHashMap}. Iterators for this map will be return elements in - * insertion order. - *

        - * This map has {@link View.Type.STABLE stable} views. - */ - public static CopyOnWriteMap newLinkedMap() { - final Builder builder = builder(); - return builder.newLinkedMap(); - } - - /** - * Creates a new {@link CopyOnWriteMap} with an underlying - * {@link LinkedHashMap} using the supplied map as the initial values. - * Iterators for this map will be return elements in insertion order. - *

        - * This map has {@link View.Type.STABLE stable} views. - */ - public static CopyOnWriteMap newLinkedMap(final Map map) { - final Builder builder = builder(); - return builder.addAll(map).newLinkedMap(); - } - - // - // constructors - // - - /** - * Create a new {@link CopyOnWriteMap} with the supplied {@link Map} to - * initialize the values. - * - * @param map the initial map to initialize with - * @deprecated since 0.0.12 use the versions that explicitly specify - * View.Type - */ - @Deprecated - protected CopyOnWriteMap(final Map map) { - this(map, View.Type.LIVE); - } - - /** - * Create a new empty {@link CopyOnWriteMap}. - * - * @deprecated since 0.0.12 use the versions that explicitly specify - * View.Type - */ - @Deprecated - protected CopyOnWriteMap() { - this(Collections. emptyMap(), View.Type.LIVE); - } - - /** - * Create a new {@link CopyOnWriteMap} with the supplied {@link Map} to - * initialize the values. This map may be optionally modified using any of - * the key, entry or value views - * - * @param map the initial map to initialize with - */ - protected CopyOnWriteMap(final Map map, final View.Type viewType) { - super(map, viewType); - } - - /** - * Create a new empty {@link CopyOnWriteMap}. This map may be optionally - * modified using any of the key, entry or value views - */ - protected CopyOnWriteMap(final View.Type viewType) { - super(Collections. emptyMap(), viewType); - } - - @Override - @GuardedBy("internal-lock") - protected abstract > Map copy(N map); - - // - // inner classes - // - - /** - * Uses {@link HashMap} instances as its internal storage. - */ - static class Hash extends CopyOnWriteMap { - private static final long serialVersionUID = 5221824943734164497L; - - Hash(final Map map, final Type viewType) { - super(map, viewType); - } - - @Override - public > Map copy(final N map) { - return new HashMap(map); - } - } - - /** - * Uses {@link LinkedHashMap} instances as its internal storage. - */ - static class Linked extends CopyOnWriteMap { - private static final long serialVersionUID = -8659999465009072124L; - - Linked(final Map map, final Type viewType) { - super(map, viewType); - } - - @Override - public > Map copy(final N map) { - return new LinkedHashMap(map); - } - } -} diff --git a/src/main/org/bson/util/Function.java b/src/main/org/bson/util/Function.java deleted file mode 100644 index 187f58c593d..00000000000 --- a/src/main/org/bson/util/Function.java +++ /dev/null @@ -1,5 +0,0 @@ -package org.bson.util; - -interface Function { - B apply(A a); -} diff --git a/src/main/org/bson/util/SimplePool.java b/src/main/org/bson/util/SimplePool.java deleted file mode 100644 index 5895eb6819a..00000000000 --- a/src/main/org/bson/util/SimplePool.java +++ /dev/null @@ -1,62 +0,0 @@ -// SimplePool.java - -/** - * Copyright (C) 2008 10gen Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.bson.util; - -import java.util.*; -import java.util.concurrent.*; - -/** - * @deprecated This class is NOT a part of public API and will be dropped in 3.x versions. - */ -@Deprecated -public abstract class SimplePool { - - public SimplePool( int max ){ - _max = max; - } - - public SimplePool(){ - _max = 1000; - } - - protected abstract T createNew(); - - protected boolean ok( T t ){ - return true; - } - - public T get(){ - T t = _stored.poll(); - if ( t != null ) - return t; - return createNew(); - } - - public void done( T t ){ - if ( ! ok( t ) ) - return; - - if ( _stored.size() > _max ) - return; - _stored.add( t ); - } - - final int _max; - private Queue _stored = new ConcurrentLinkedQueue(); -} diff --git a/src/main/org/bson/util/StringRangeSet.java b/src/main/org/bson/util/StringRangeSet.java deleted file mode 100644 index 1bd1ca0e489..00000000000 --- a/src/main/org/bson/util/StringRangeSet.java +++ /dev/null @@ -1,130 +0,0 @@ -/** - * Copyright (C) 2010 10gen Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.bson.util; - -import java.util.Collection; -import java.util.Iterator; -import java.util.Set; - -public class StringRangeSet implements Set { - - private final int size; - - private final static int NUMSTR_LEN = 100; - private final static String[] NUMSTRS = new String[100]; - static { - for (int i = 0; i < NUMSTR_LEN; ++i) - NUMSTRS[i] = String.valueOf(i); - } - - public StringRangeSet(int size) { - this.size = size; - } - - public int size() { - return size; - } - - public Iterator iterator() { - return new Iterator() { - - int index = 0; - - public boolean hasNext() { - return index < size; - } - - public String next() { - if (index < NUMSTR_LEN) - return NUMSTRS[index++]; - return String.valueOf(index++); - } - - public void remove() { - throw new UnsupportedOperationException(); - } - }; - } - - @Override - public boolean add(String e) { - throw new UnsupportedOperationException(); - } - - @Override - public boolean addAll(Collection c) { - throw new UnsupportedOperationException(); - } - - @Override - public void clear() { - throw new UnsupportedOperationException(); - } - - @Override - public boolean contains(Object o) { - int t = Integer.parseInt(String.valueOf(o)); - return t >= 0 && t < size; - } - - @Override - public boolean containsAll(Collection c) { - for (Object o : c) { - if (!contains(o)) { - return false; - } - } - return true; - } - - @Override - public boolean isEmpty() { - return false; - } - - @Override - public boolean remove(Object o) { - throw new UnsupportedOperationException(); - } - - @Override - public boolean removeAll(Collection c) { - throw new UnsupportedOperationException(); - } - - @Override - public boolean retainAll(Collection c) { - throw new UnsupportedOperationException(); - } - - @Override - public Object[] toArray() { - String[] array = new String[size()]; - for (int i = 0; i < size; ++i) { - if (i < NUMSTR_LEN) { - array[i] = NUMSTRS[i]; - } else { - array[i] = String.valueOf(i); - } - } - return array; - } - - @Override - public T[] toArray(T[] a) { - throw new UnsupportedOperationException(); - } -} diff --git a/src/main/org/bson/util/annotations/GuardedBy.java b/src/main/org/bson/util/annotations/GuardedBy.java deleted file mode 100644 index 587eedcf2cc..00000000000 --- a/src/main/org/bson/util/annotations/GuardedBy.java +++ /dev/null @@ -1,57 +0,0 @@ -/* - * Copyright (c) 2005 Brian Goetz and Tim Peierls - * Released under the Creative Commons Attribution License - * (http://creativecommons.org/licenses/by/2.5) - * Official home: http://www.jcip.net - * - * Any republication or derived work distributed in source code form - * must include this copyright and license notice. - */ - -package org.bson.util.annotations; - -import java.lang.annotation.ElementType; -import java.lang.annotation.Retention; -import java.lang.annotation.RetentionPolicy; -import java.lang.annotation.Target; - -/** - * The field or method to which this annotation is applied can only be accessed - * when holding a particular lock, which may be a built-in (synchronization) lock, - * or may be an explicit java.util.concurrent.Lock. - * - * The argument determines which lock guards the annotated field or method: - *

          - *
        • - * this : The intrinsic lock of the object in whose class the field is defined. - *
        • - *
        • - * class-name.this : For inner classes, it may be necessary to disambiguate 'this'; - * the class-name.this designation allows you to specify which 'this' reference is intended - *
        • - *
        • - * itself : For reference fields only; the object to which the field refers. - *
        • - *
        • - * field-name : The lock object is referenced by the (instance or static) field - * specified by field-name. - *
        • - *
        • - * class-name.field-name : The lock object is reference by the static field specified - * by class-name.field-name. - *
        • - *
        • - * method-name() : The lock object is returned by calling the named nil-ary method. - *
        • - *
        • - * class-name.class : The Class object for the specified class should be used as the lock object. - *
        • - * - * @deprecated This class is NOT a part of public API and will be dropped in 3.x versions. - */ -@Target({ElementType.FIELD, ElementType.METHOD}) -@Retention(RetentionPolicy.RUNTIME) -@Deprecated -public @interface GuardedBy { - String value(); -} diff --git a/src/main/org/bson/util/annotations/Immutable.java b/src/main/org/bson/util/annotations/Immutable.java deleted file mode 100644 index d3d89e24a81..00000000000 --- a/src/main/org/bson/util/annotations/Immutable.java +++ /dev/null @@ -1,38 +0,0 @@ -/* - * Copyright (c) 2005 Brian Goetz and Tim Peierls - * Released under the Creative Commons Attribution License - * (http://creativecommons.org/licenses/by/2.5) - * Official home: http://www.jcip.net - * - * Any republication or derived work distributed in source code form - * must include this copyright and license notice. - */ - -package org.bson.util.annotations; - -import java.lang.annotation.*; - -/** - * The class to which this annotation is applied is immutable. This means that - * its state cannot be seen to change by callers, which implies that - *
            - *
          • all public fields are final,
          • - *
          • all public final reference fields refer to other immutable objects, and
          • - *
          • constructors and methods do not publish references to any internal state - * which is potentially mutable by the implementation.
          • - *
          - * Immutable objects may still have internal mutable state for purposes of performance - * optimization; some state variables may be lazily computed, so long as they are computed - * from immutable state and that callers cannot tell the difference. - *

          - * Immutable objects are inherently thread-safe; they may be passed between threads or - * published without synchronization. - * - * @deprecated This class is NOT a part of public API and will be dropped in 3.x versions. - */ -@Documented -@Target(ElementType.TYPE) -@Retention(RetentionPolicy.RUNTIME) -@Deprecated -public @interface Immutable { -} diff --git a/src/main/org/bson/util/annotations/NotThreadSafe.java b/src/main/org/bson/util/annotations/NotThreadSafe.java deleted file mode 100644 index adbaabd9a98..00000000000 --- a/src/main/org/bson/util/annotations/NotThreadSafe.java +++ /dev/null @@ -1,30 +0,0 @@ -/* - * Copyright (c) 2005 Brian Goetz and Tim Peierls - * Released under the Creative Commons Attribution License - * (http://creativecommons.org/licenses/by/2.5) - * Official home: http://www.jcip.net - * - * Any republication or derived work distributed in source code form - * must include this copyright and license notice. - */ - -package org.bson.util.annotations; - -import java.lang.annotation.*; - - -/** - * The class to which this annotation is applied is not thread-safe. - * This annotation primarily exists for clarifying the non-thread-safety of a class - * that might otherwise be assumed to be thread-safe, despite the fact that it is a bad - * idea to assume a class is thread-safe without good reason. - * @see ThreadSafe - * - * @deprecated This class is NOT a part of public API and will be dropped in 3.x versions. - */ -@Documented -@Target(ElementType.TYPE) -@Retention(RetentionPolicy.RUNTIME) -@Deprecated -public @interface NotThreadSafe { -} diff --git a/src/main/org/bson/util/annotations/ThreadSafe.java b/src/main/org/bson/util/annotations/ThreadSafe.java deleted file mode 100644 index 0be429531f9..00000000000 --- a/src/main/org/bson/util/annotations/ThreadSafe.java +++ /dev/null @@ -1,29 +0,0 @@ -/* - * Copyright (c) 2005 Brian Goetz and Tim Peierls - * Released under the Creative Commons Attribution License - * (http://creativecommons.org/licenses/by/2.5) - * Official home: http://www.jcip.net - * - * Any republication or derived work distributed in source code form - * must include this copyright and license notice. - */ - -package org.bson.util.annotations; - -import java.lang.annotation.*; - - -/** - * The class to which this annotation is applied is thread-safe. This means that - * no sequences of accesses (reads and writes to public fields, calls to public methods) - * may put the object into an invalid state, regardless of the interleaving of those actions - * by the runtime, and without requiring any additional synchronization or coordination on the - * part of the caller. - * @deprecated This class is NOT a part of public API and will be dropped in 3.x versions. - */ -@Documented -@Target(ElementType.TYPE) -@Retention(RetentionPolicy.RUNTIME) -@Deprecated -public @interface ThreadSafe { -} diff --git a/src/main/org/bson/util/package.html b/src/main/org/bson/util/package.html deleted file mode 100644 index 50f680e334e..00000000000 --- a/src/main/org/bson/util/package.html +++ /dev/null @@ -1,3 +0,0 @@ - -

          Misc utils used by BSON.

          - diff --git a/src/test/com/mongodb/BasicDBObjectTest.java b/src/test/com/mongodb/BasicDBObjectTest.java deleted file mode 100644 index c875ba663d7..00000000000 --- a/src/test/com/mongodb/BasicDBObjectTest.java +++ /dev/null @@ -1,184 +0,0 @@ -// BasicDBObjectTest.java - -/** - * Copyright (C) 2008 10gen Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.mongodb; - -import org.bson.types.*; -import com.mongodb.util.*; - -import org.testng.annotations.*; - -// Java -import java.util.Date; - -public class BasicDBObjectTest extends TestCase { - - @Test(groups = {"basic"}) - public void testGetDate() { - final Date date = new Date(); - BasicDBObject doc = new BasicDBObject( "foo" , date); - assert( doc.getDate( "foo" ).equals( date ) ); - } - - @Test(groups = {"basic"}) - public void testGetDateWithDefault() { - final Date date = new Date(); - BasicDBObject doc = new BasicDBObject( "foo" , date); - assert( doc.getDate( "foo", new Date() ).equals( date ) ); - assert( doc.getDate( "bar", date ).equals( date ) ); - } - - @Test(groups = {"basic"}) - public void testGetObjectId() { - final ObjectId objId = ObjectId.get(); - BasicDBObject doc = new BasicDBObject( "foo" , objId); - assert( doc.getObjectId( "foo" ).equals( objId ) ); - } - - @Test(groups = {"basic"}) - public void testGetObjectIdWithDefault() { - final ObjectId objId = ObjectId.get(); - BasicDBObject doc = new BasicDBObject( "foo" , objId); - assert( doc.getObjectId( "foo", ObjectId.get() ).equals( objId ) ); - assert( doc.getObjectId( "bar", objId ).equals( objId ) ); - } - - @Test(groups = {"basic"}) - public void testGetLongWithDefault() { - final long test = 100; - BasicDBObject doc = new BasicDBObject( "foo" , test); - assert( doc.getLong( "foo", 0l ) == test ); - assert( doc.getLong( "bar", 0l ) == 0l ); - } - - @Test(groups = {"basic"}) - public void testGetDoubleWithDefault() { - BasicDBObject doc = new BasicDBObject( "foo" , Double.MAX_VALUE); - assert( doc.getDouble( "foo", (double)0 ) == Double.MAX_VALUE); - assert( doc.getDouble( "bar", Double.MIN_VALUE ) == Double.MIN_VALUE); - } - - @Test(groups = {"basic"}) - public void testGetStringWithDefault() { - BasicDBObject doc = new BasicDBObject( "foo" , "badmf"); - assert( doc.getString( "foo", "ID" ).equals("badmf")); - assert( doc.getString( "bar", "DEFAULT" ).equals("DEFAULT") ); - } - - @Test(groups = {"basic"}) - public void testBasic(){ - BasicDBObject a = new BasicDBObject( "x" , 1 ); - BasicDBObject b = new BasicDBObject( "x" , 1 ); - assert( a.equals( b ) ); - - Object x = JSON.parse( "{ 'x' : 1 }" ); - assert( a.equals( x ) ); - } - - - @Test(groups = {"basic"}) - public void testBasic2(){ - BasicDBObject a = new BasicDBObject( "x" , 1 ); - DBObject b = BasicDBObjectBuilder.start().append( "x" , 1 ).get(); - assert( a.equals( b ) ); - assert( a.equals( JSON.parse( "{ 'x' : 1 }" ) ) ); - assert( ! a.equals( JSON.parse( "{ 'x' : 2 }" ) ) ); - } - - @Test(groups = {"basic"}) - public void testBuilderIsEmpty(){ - BasicDBObjectBuilder b = BasicDBObjectBuilder.start(); - assert( b.isEmpty() ); - b.append( "a" , 1 ); - assert( !b.isEmpty() ); - assert( b.get().equals( JSON.parse( "{ 'a' : 1 }" ) ) ); - } - - @Test(groups = {"basic"}) - public void testBuilderNested(){ - BasicDBObjectBuilder b = BasicDBObjectBuilder.start(); - b.add( "a", 1 ); - b.push( "b" ).append( "c", 2 ).pop(); - DBObject a = b.get(); - assert( a.equals( JSON.parse( "{ 'a' : 1, 'b' : { 'c' : 2 } }" ) ) ); - } - - @Test(groups = {"basic"}) - public void testDown1(){ - BasicDBObjectBuilder b = BasicDBObjectBuilder.start(); - b.append( "x" , 1 ); - b.push("y"); - b.append( "a" , 2 ); - b.pop(); - b.push( "z" ); - b.append( "b" , 3 ); - - - Object x = b.get(); - Object y = JSON.parse( "{ 'x' : 1 , 'y' : { 'a' : 2 } , 'z' : { 'b' : 3 } }" ); - - assert( x.equals( y ) ); - } - - void _equal( BasicDBObject x , BasicDBObject y ){ - assert( x.equals( y ) ); - assert( y.equals( x ) ); - } - - void _notequal( BasicDBObject x , BasicDBObject y ){ - assert( ! x.equals( y ) ); - assert( ! y.equals( x ) ); - } - - @Test - public void testEquals(){ - BasicDBObject a = new BasicDBObject(); - BasicDBObject b = new BasicDBObject(); - - - _equal( a , b ); - - a.put( "x" , 1 ); - _notequal( a , b ); - - b.put( "x" , 1 ); - _equal( a , b ); - - a.removeField( "x" ); - _notequal( a , b ); - - b.removeField( "x" ); - _equal( a , b ); - - a.put( "x" , null ); - b.put( "x" , 2 ); - _notequal( a , b ); - - a.put( "x" , 2 ); - b.put( "x" , null ); - _notequal( a , b ); - } - - - public static void main( String args[] ) - throws Exception { - (new BasicDBObjectTest()).runConsole(); - - } - -} diff --git a/src/test/com/mongodb/ByteTest.java b/src/test/com/mongodb/ByteTest.java deleted file mode 100644 index 487c217f28a..00000000000 --- a/src/test/com/mongodb/ByteTest.java +++ /dev/null @@ -1,511 +0,0 @@ -/** - * Copyright (C) 2008 10gen Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.mongodb; - -import com.mongodb.util.TestCase; -import org.bson.BSON; -import org.bson.BSONDecoder; -import org.bson.BSONObject; -import org.bson.BasicBSONDecoder; -import org.bson.types.ObjectId; -import org.testng.annotations.Test; - -import java.io.ByteArrayInputStream; -import java.io.ByteArrayOutputStream; -import java.io.IOException; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collections; -import java.util.Date; -import java.util.List; -import java.util.Random; -import java.util.Set; -import java.util.regex.Pattern; - - -@SuppressWarnings({"unchecked", "rawtypes"}) -public class ByteTest extends TestCase { - - public ByteTest() throws IOException , MongoException { - super(); - cleanupDB = "com_mongodb_unittest_ByteTest"; - _db = cleanupMongo.getDB( cleanupDB ); - } - - @Test(groups = {"basic"}) - public void testObject1(){ - DBObject o = new BasicDBObject(); - o.put( "eliot" , "horowitz" ); - o.put( "num" , 517 ); - - - BSONObject read = BSON.decode( BSON.encode( o ) ); - - assertEquals( "horowitz" , read.get( "eliot" ).toString() ); - assertEquals( 517.0 , ((Integer)read.get( "num" )).doubleValue() ); - } - - @Test(groups = {"basic"}) - public void testString() - throws Exception { - - String eliot = java.net.URLDecoder.decode( "horowitza%C3%BCa" , "UTF-8" ); - - DBObject o = new BasicDBObject(); - o.put( "eliot" , eliot ); - o.put( "num" , 517 ); - - BSONObject read = BSON.decode( BSON.encode( o ) ); - - assertEquals( eliot , read.get( "eliot" ).toString() ); - assertEquals( 517.0 , ((Integer)read.get( "num" )).doubleValue() ); - - } - - @Test(groups = {"basic"}) - public void testObject2(){ - DBObject o = new BasicDBObject(); - o.put( "eliot" , "horowitz" ); - o.put( "num" , 517.3 ); - o.put( "z" , "y" ); - o.put( "asd" , null ); - - DBObject o2 = new BasicDBObject(); - o2.put( "a" , "b" ); - o2.put( "b" , "a" ); - o.put( "next" , o2 ); - - BSONObject read = BSON.decode( BSON.encode( o ) ); - - assertEquals( "horowitz" , read.get( "eliot" ).toString() ); - assertEquals( 517.3 , ((Double)read.get( "num" )).doubleValue() ); - assertEquals( "b" , ((BSONObject)read.get( "next" ) ).get( "a" ).toString() ); - assertEquals( "a" , ((BSONObject)read.get( "next" ) ).get( "b" ).toString() ); - assertEquals( "y" , read.get( "z" ).toString() ); - assertEquals( o.keySet().size() , read.keySet().size() ); - - } - - @Test(groups = {"basic"}) - public void testArray1(){ - DBObject o = new BasicDBObject(); - o.put( "eliot" , "horowitz" ); - o.put( "num" , 517 ); - o.put( "z" , "y" ); - o.put( "asd" , null ); - o.put( "myt" , true ); - o.put( "myf" , false ); - - List a = new ArrayList(); - a.add( "A" ); - a.add( "B" ); - a.add( "C" ); - o.put( "a" , a ); - - o.put( "d" , new Date() ); - //o.put( "r" , Pattern.compile( "\\d+" , "i" ) ); - - BSONObject read = BSON.decode( BSON.encode( o ) ); - - assertEquals( "horowitz" , read.get( "eliot" ).toString() ); - assertEquals( 517 , ((Integer)read.get( "num" )).intValue() ); - assertEquals( "y" , read.get( "z" ).toString() ); - assertEquals( o.keySet().size() , read.keySet().size() ); - assertEquals( 3 , a.size() ); - assertEquals( a.size() , ((List)read.get( "a" ) ).size() ); - assertEquals( "A" , ((List)read.get( "a" ) ).get( 0 ).toString() ); - assertEquals( "B" , ((List)read.get( "a" ) ).get( 1 ).toString() ); - assertEquals( "C" , ((List)read.get( "a" ) ).get( 2 ).toString() ); - assertEquals( ((Date)o.get("d")).getTime() , ((Date)read.get("d")).getTime() ); - assertEquals( true , (Boolean)o.get("myt") ); - assertEquals( false , (Boolean)o.get("myf") ); - //assertEquals( o.get( "r" ).toString() , read.get("r").toString() ); - - } - - @Test - public void testArray2(){ - DBObject x = new BasicDBObject(); - x.put( "a" , new String[]{ "a" , "b" , "c" } ); - x.put( "b" , new int[]{ 1 , 2 , 3 } ); - - BSONObject y = BSON.decode( BSON.encode( x ) ); - - List l = (List)y.get("a"); - assertEquals( 3 , l.size() ); - assertEquals( "a" , l.get(0) ); - assertEquals( "b" , l.get(1) ); - assertEquals( "c" , l.get(2) ); - - l = (List)y.get("b"); - assertEquals( 3 , l.size() ); - assertEquals( 1 , l.get(0) ); - assertEquals( 2 , l.get(1) ); - assertEquals( 3 , l.get(2) ); - } - - @Test - public void testCharacterEncode(){ - DBObject x = new BasicDBObject(); - x.put( "a" , new Character[]{ 'a' , 'b' , 'c' } ); - x.put( "b" , 's'); - - BSONObject y = BSON.decode( BSON.encode( x ) ); - - List l = (List)y.get("a"); - assertEquals( 3 , l.size() ); - assertEquals( "a" , l.get(0) ); - assertEquals( "b" , l.get(1) ); - assertEquals( "c" , l.get(2) ); - - assertEquals( "s" , y.get("b") ); - } - - @Test(groups = {"basic"}) - public void testObjcetId(){ - assertTrue( (new ObjectId()).compareTo( new ObjectId() ) < 0 ); - assertTrue( (new ObjectId(0 , 0 , 0 )).compareTo( new ObjectId() ) < 0 ); - assertTrue( (new ObjectId(0 , 0 , 0 )).compareTo( new ObjectId( 0 , 0 , 1 ) ) < 0 ); - - assertTrue( (new ObjectId(5 , 5 , 5 )).compareTo( new ObjectId( 5 , 5 , 6 ) ) < 0 ); - assertTrue( (new ObjectId(5 , 5 , 5 )).compareTo( new ObjectId( 5 , 6 , 5 ) ) < 0 ); - assertTrue( (new ObjectId(5 , 5 , 5 )).compareTo( new ObjectId( 6 , 5 , 5 ) ) < 0 ); - - } - - - @Test(groups = {"basic"}) - public void testBinary() { - byte barray[] = new byte[256]; - for( int i=0; i<256; i++ ) { - barray[i] = (byte)(i-128); - } - - DBObject o = new BasicDBObject(); - o.put( "bytes", barray ); - - byte[] encoded = BSON.encode( o ); - assertEquals( 273 , encoded.length ); - - BSONObject read = BSON.decode( encoded ); - byte[] b = (byte[])read.get( "bytes" ); - - assertEquals(barray.length, b.length); - for( int i=0; i<256; i++ ) { - assertEquals( b[i], barray[i] ); - } - assertEquals( o.keySet().size() , read.keySet().size() ); - } - - private void go( DBObject o , int serialized_len ) { - go( o, serialized_len, 0 ); - } - - private void go( DBObject o , int serialized_len, int transient_fields ) { - byte[] encoded = BSON.encode( o ); - assertEquals( serialized_len , encoded.length ); - - BSONObject read = BSON.decode( encoded ); - assertEquals( o.keySet().size() - transient_fields, read.keySet().size() ); - if ( transient_fields == 0 ) - assertEquals( o , read ); - } - - @Test(groups = {"basic"}) - public void testEncodeDecode() { - ArrayList t = new ArrayList(); - Object obj = null; - - // null object - boolean threw = false; - try { - go( (DBObject)null, 0 ); - } - catch( RuntimeException e ) { - threw = true; - } - assertEquals( threw, true ); - threw = false; - - DBObject o = new BasicDBObject(); - int serialized_len = 5; - - // empty obj - go( o, 5 ); - - // _id == null - o.put( "_id" , obj ); - assertEquals( Bytes.getType( obj ), Bytes.NULL ); - go( o, 10 ); - - // _id == non-objid - obj = new ArrayList(); - o.put( "_id" , obj ); - assertEquals( Bytes.getType( obj ), Bytes.ARRAY ); - go( o, 15 ); - - // _id == ObjectId - obj = new ObjectId(); - o.put( "_id" , obj ); - assertEquals( Bytes.getType( obj ), Bytes.OID ); - go( o, 22 ); - - // dbcollection - try { - obj = _db.getCollection( "test" ); - o.put( "_id" , obj ); - assertEquals( Bytes.getType( obj ), 0 ); - go( o, 22 ); - } - catch( RuntimeException e ) { - threw = true; - } - assertEquals( threw, true ); - threw = false; - - t.add( "collection" ); - o = new BasicDBObject(); - o.put( "collection" , _db.getCollection( "test" ) ); - o.put( "_transientFields" , t ); - go( o, 5, 2 ); - t.clear(); - - // transientFields - o = new BasicDBObject(); - o.put( "_transientFields", new ArrayList() ); - go( o, 5, 1 ); - - t.add( "foo" ); - o = new BasicDBObject(); - o.put( "_transientFields", t ); - o.put( "foo", "bar" ); - go( o, 5, 2 ); - t.clear(); - - o = new BasicDBObject(); - o.put( "z" , "" ); - go( o, 13 ); - t.clear(); - - // $where - /*o = new BasicDBObject(); - o.put( "$where", "eval( true )" ); - go( o, 30 ); - */ - - obj = 5; - o = new BasicDBObject(); - o.put( "$where", obj ); - assertEquals( Bytes.getType( obj ), Bytes.NUMBER_INT ); - go( o, 17 ); - } - - @Test(groups = {"basic"}) - public void testPatternFlags() { - boolean threw = false; - assertEquals( 0, Bytes.regexFlags( "" ) ); - assertEquals( "", Bytes.regexFlags( 0 ) ); - - try { - Bytes.regexFlags( "f" ); - } - catch( RuntimeException e ) { - threw = true; - } - assertEquals( threw, true ); - threw = false; - - try { - Bytes.regexFlags( 513 ); - } - catch( RuntimeException e ) { - threw = true; - } - assertEquals( threw, true ); - - Pattern lotsoflags = Pattern.compile( "foo", Pattern.CANON_EQ | - Pattern.DOTALL | - Pattern.CASE_INSENSITIVE | - Pattern.UNIX_LINES | - Pattern.MULTILINE | - Pattern.LITERAL | - Pattern.UNICODE_CASE | - Pattern.COMMENTS | - 256 ); - - String s = Bytes.regexFlags( lotsoflags.flags() ); - char prev = s.charAt( 0 ); - for( int i=1; i got ){ - assertEquals( want.length , got.size() ); - int pos = 0; - for ( String s : got ){ - assertEquals( want[pos++] , s ); - } - } - - @Test(groups = {"basic"}) - public void testBytes2(){ - DBObject x = BasicDBObjectBuilder.start( "x" , 1 ).add( "y" , "asdasd" ).get(); - byte[] b = Bytes.encode( x ); - assertEquals( x , Bytes.decode( b ) ); - } - - @Test - public void testMany() - throws IOException { - - DBObject orig = new BasicDBObject(); - orig.put( "a" , 5 ); - orig.put( "ab" , 5.1 ); - orig.put( "abc" , 5123L ); - orig.put( "abcd" , "asdasdasd" ); - orig.put( "abcde" , "asdasdasdasdasdasdasdasd" ); - orig.put( "abcdef" , Arrays.asList( new String[]{ "asdasdasdasdasdasdasdasd" , "asdasdasdasdasdasdasdasd" } ) ); - - byte[] b = Bytes.encode( orig ); - final int n = 1000; - - ByteArrayOutputStream out = new ByteArrayOutputStream(); - for ( int i=0; i 0 ) - return 1; - return 0; - } - - @Test - public void testObjcetIdCompare(){ - Random r = new Random( 171717 ); - - List l = new ArrayList(); - for ( int i=0; i<10000; i++ ){ - l.add( new ObjectId( new Date( Math.abs( r.nextLong() ) ) , Math.abs( r.nextInt() ) , Math.abs( r.nextInt() ) ) ); - } - - for ( int i=1; i out = c.find().sort( new BasicDBObject( "_id" , 1 ) ).toArray(); - - assertEquals( l.size() , out.size() ); - - for ( int i=0; i - *

          - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

          - * http://www.apache.org/licenses/LICENSE-2.0 - *

          - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.mongodb; - -import com.mongodb.util.TestCase; -import org.testng.annotations.Test; - -import java.net.UnknownHostException; - -public class CommandResultTest extends TestCase { - @Test - public void testOkCommandResult() throws UnknownHostException { - CommandResult commandResult = new CommandResult(new ServerAddress("localhost")); - commandResult.put("ok", 1); - assertNull(commandResult.getException()); - } - - @Test - public void testNullErrorCode() throws UnknownHostException { - CommandResult commandResult = new CommandResult(new ServerAddress("localhost")); - commandResult.put("ok", 0); - assertEquals(CommandFailureException.class, commandResult.getException().getClass()); - try { - commandResult.throwOnError(); - fail("Should throw"); - } catch (CommandFailureException e) { - assertEquals(commandResult, e.getCommandResult()); - assertEquals(-5, e.getCode()); - } - } - - @Test - public void testCommandFailure() throws UnknownHostException { - CommandResult commandResult = new CommandResult(new ServerAddress("localhost")); - final DBObject result = new BasicDBObject("ok", 0.0).append("errmsg", "no not found").append("code", 5000); - commandResult.putAll(result); - assertEquals(CommandFailureException.class, commandResult.getException().getClass()); - try { - commandResult.throwOnError(); - fail("Should throw"); - } catch (CommandFailureException e) { - assertEquals(commandResult, e.getCommandResult()); - assertEquals(5000, e.getCode()); - } - } -} diff --git a/src/test/com/mongodb/DBAddressTest.java b/src/test/com/mongodb/DBAddressTest.java deleted file mode 100644 index ce6163b8680..00000000000 --- a/src/test/com/mongodb/DBAddressTest.java +++ /dev/null @@ -1,80 +0,0 @@ -/** - * Copyright (C) 2008 10gen Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.mongodb; - -import java.net.*; - -import org.testng.annotations.Test; - -import com.mongodb.util.*; - -public class DBAddressTest extends TestCase { - - @Test(groups = {"basic"}) - public void testCTOR() - throws UnknownHostException { - DBAddress foo = new DBAddress( "www.10gen.com:1000/some.host" ); - DBAddress bar = new DBAddress( foo, "some.other.host" ); - assertEquals( foo.sameHost( "www.10gen.com:1000" ), true ); - assertEquals( foo.getSocketAddress().hashCode(), bar.getSocketAddress().hashCode() ); - } - - @Test(groups = {"basic"}) - public void testInvalid() - throws UnknownHostException { - boolean threw = false; - try { - new DBAddress( null ); - } - catch( NullPointerException e ) { - threw = true; - } - assertTrue( threw, "new DBAddress(null) didn't throw exception" ); - threw = false; - - try { - new DBAddress( " \t\n" ); - } - catch( IllegalArgumentException e ) { - threw = true; - } - assertTrue( threw, "new DBAddress(\" \") didn't throw exception" ); - threw = false; - } - - @Test - public void testBasics() - throws UnknownHostException { - assertEquals( 27017 , new ServerAddress().getPort() ); - assertEquals( 27017 , new ServerAddress( "localhost" ).getPort() ); - assertEquals( 9999 , new ServerAddress( "localhost:9999" ).getPort() ); - } - - @Test - public void testCons3() - throws UnknownHostException { - DBAddress a = new DBAddress( "9.9.9.9:9999" , "abc" ); - assertEquals( "9.9.9.9" , a.getHost() ); - assertEquals( 9999 , a.getPort() ); - assertEquals( "abc" , a.getDBName() ); - } - - public static void main( String args[] ) { - (new DBAddressTest()).runConsole(); - } -} - diff --git a/src/test/com/mongodb/DBApiLayerTest.java b/src/test/com/mongodb/DBApiLayerTest.java deleted file mode 100644 index 66e5e669ba4..00000000000 --- a/src/test/com/mongodb/DBApiLayerTest.java +++ /dev/null @@ -1,57 +0,0 @@ -/* - * Copyright (c) 2008 - 2013 10gen, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.mongodb; - -import com.mongodb.util.TestCase; -import org.testng.annotations.Test; - -import java.io.IOException; -import java.util.Arrays; - -public class DBApiLayerTest extends TestCase { - private final DBApiLayer db; - - public DBApiLayerTest() throws IOException, MongoException { - super(); - cleanupDB = "com_mongodb_unittest_DBApiLayerTest"; - db = (DBApiLayer) cleanupMongo.getDB( cleanupDB ); - } - - @Test - public void testCursorNotFoundException() { - DBCollection collection = db.getCollection("testCursorNotFoundException"); - for (int i = 0; i < 150; i++) { - collection.insert(new BasicDBObject()); - } - - DBCursor cursor = collection.find(); - cursor.next(); // force the query - - db.killCursors(cursor.getServerAddress(), Arrays.asList(cursor.getCursorId())); - - try { - while (cursor.hasNext()) { - cursor.next(); - } - fail("Cursor die!"); - } catch (MongoException.CursorNotFound e) { - assertEquals(cursor.getServerAddress(), e.getServerAddress()); - assertEquals(cursor.getCursorId(), e.getCursorId()); - } - } -} diff --git a/src/test/com/mongodb/DBCollectionTest.java b/src/test/com/mongodb/DBCollectionTest.java deleted file mode 100644 index ccacd8ddc71..00000000000 --- a/src/test/com/mongodb/DBCollectionTest.java +++ /dev/null @@ -1,400 +0,0 @@ -/** - * Copyright (C) 2008 10gen Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.mongodb; - -import com.mongodb.util.TestCase; - -import org.bson.types.ObjectId; -import org.testng.Assert; -import org.testng.annotations.Test; - -import java.io.IOException; -import java.util.List; - -public class DBCollectionTest extends TestCase { - - public DBCollectionTest() - throws IOException , MongoException { - super(); - cleanupDB = "com_mongodb_unittest_DBCollectionTest"; - _db = cleanupMongo.getDB( cleanupDB ); - } - - @Test(groups = {"basic"}) - public void testMultiInsert() { - DBCollection c = _db.getCollection("testmultiinsert"); - c.drop(); - - DBObject obj = c.findOne(); - assertEquals(obj, null); - - DBObject inserted1 = BasicDBObjectBuilder.start().add("x",1).add("y",2).get(); - DBObject inserted2 = BasicDBObjectBuilder.start().add("x",3).add("y",3).get(); - c.insert(inserted1,inserted2); - c.insert(new DBObject[] {inserted1,inserted2}); - } - - @Test(groups = {"basic"}) - public void testCappedCollection() { - String collectionName = "testCapped"; - int collectionSize = 1000; - - DBCollection c = _db.getCollection(collectionName); - c.drop(); - - DBObject options = new BasicDBObject("capped", true); - options.put("size", collectionSize); - c = _db.createCollection(collectionName, options); - - assertEquals(c.isCapped(), true); - } - - @Test(groups = {"basic"}) - public void testDuplicateKeyException() { - DBCollection c = _db.getCollection("testDuplicateKey"); - c.drop(); - - DBObject obj = new BasicDBObject(); - c.insert(obj, WriteConcern.SAFE); - try { - c.insert(obj, WriteConcern.SAFE); - Assert.fail(); - } - catch (MongoException.DuplicateKey e) { - assertNotNull(e.getCommandResult()); - assertEquals(11000, e.getCode()); - } - } - - @Test(groups = {"basic"}) - public void testFindOne() { - DBCollection c = _db.getCollection("test"); - c.drop(); - - DBObject obj = c.findOne(); - assertEquals(obj, null); - - obj = c.findOne(); - assertEquals(obj, null); - - obj = c.findOne(); - assertEquals(obj, null); - - // Test that findOne works when fields is specified but no match is found - // *** This is a Regression test for JAVA-411 *** - obj = c.findOne(null, new BasicDBObject("_id", true)); - - assertEquals(obj, null); - - DBObject inserted = BasicDBObjectBuilder.start().add("x",1).add("y",2).get(); - c.insert(inserted); - c.insert(BasicDBObjectBuilder.start().add("_id", 123).add("x",2).add("z",2).get()); - - obj = c.findOne(123); - assertEquals(obj.get("_id"), 123); - assertEquals(obj.get("x"), 2); - assertEquals(obj.get("z"), 2); - - obj = c.findOne(123, new BasicDBObject("x", 1)); - assertEquals(obj.get("_id"), 123); - assertEquals(obj.get("x"), 2); - assertEquals(obj.containsField("z"), false); - - obj = c.findOne(new BasicDBObject("x", 1)); - assertEquals(obj.get("x"), 1); - assertEquals(obj.get("y"), 2); - - obj = c.findOne(new BasicDBObject("x", 1), new BasicDBObject("y", 1)); - assertEquals(obj.containsField("x"), false); - assertEquals(obj.get("y"), 2); - } - - @Test - public void testFindOneSort(){ - - DBCollection c = _db.getCollection("test"); - c.drop(); - - DBObject obj = c.findOne(); - assertEquals(obj, null); - - c.insert(BasicDBObjectBuilder.start().add("_id", 1).add("x", 100).add("y", "abc").get()); - c.insert(BasicDBObjectBuilder.start().add("_id", 2).add("x", 200).add("y", "abc").get()); //max x - c.insert(BasicDBObjectBuilder.start().add("_id", 3).add("x", 1).add("y", "abc").get()); - c.insert(BasicDBObjectBuilder.start().add("_id", 4).add("x", -100).add("y", "xyz").get()); //min x - c.insert(BasicDBObjectBuilder.start().add("_id", 5).add("x", -50).add("y", "zzz").get()); //max y - c.insert(BasicDBObjectBuilder.start().add("_id", 6).add("x", 9).add("y", "aaa").get()); //min y - c.insert(BasicDBObjectBuilder.start().add("_id", 7).add("x", 1).add("y", "bbb").get()); - - //only sort - obj = c.findOne(new BasicDBObject(), null, new BasicDBObject("x", 1) ); - assertNotNull(obj); - assertEquals(4, obj.get("_id")); - - obj = c.findOne(new BasicDBObject(), null, new BasicDBObject("x", -1)); - assertNotNull(obj); - assertEquals(obj.get("_id"), 2); - - //query and sort - obj = c.findOne(new BasicDBObject("x", 1), null, BasicDBObjectBuilder.start().add("x", 1).add("y", 1).get() ); - assertNotNull(obj); - assertEquals(obj.get("_id"), 3); - - obj = c.findOne( QueryBuilder.start("x").lessThan(2).get(), null, BasicDBObjectBuilder.start().add("y", -1).get() ); - assertNotNull(obj); - assertEquals(obj.get("_id"), 5); - - } - - /** - * This was broken recently. Adding test. - */ - @Test - public void testDropDatabase() throws Exception { - final Mongo mongo = new MongoClient( "127.0.0.1" ); - mongo.getDB("com_mongodb_unittest_dropDatabaseTest").dropDatabase(); - mongo.close(); - } - - @Test - public void testDropIndex(){ - DBCollection c = _db.getCollection( "dropindex1" ); - c.drop(); - - c.save( new BasicDBObject( "x" , 1 ) ); - assertEquals( 1 , c.getIndexInfo().size() ); - - c.ensureIndex( new BasicDBObject( "x" , 1 ) ); - assertEquals( 2 , c.getIndexInfo().size() ); - - c.dropIndexes(); - assertEquals( 1 , c.getIndexInfo().size() ); - - c.ensureIndex( new BasicDBObject( "x" , 1 ) ); - assertEquals( 2 , c.getIndexInfo().size() ); - - c.ensureIndex( new BasicDBObject( "y" , 1 ) ); - assertEquals( 3 , c.getIndexInfo().size() ); - - c.dropIndex( new BasicDBObject( "x" , 1 ) ); - assertEquals( 2 , c.getIndexInfo().size() ); - - } - - @Test - public void testGenIndexName(){ - BasicDBObject o = new BasicDBObject(); - o.put( "x" , 1 ); - assertEquals("x_1", DBCollection.genIndexName(o)); - - o.put( "x" , "1" ); - assertEquals("x_1", DBCollection.genIndexName(o)); - - o.put( "x" , "2d" ); - assertEquals("x_2d", DBCollection.genIndexName(o)); - - o.put( "y" , -1 ); - assertEquals("x_2d_y_-1", DBCollection.genIndexName(o)); - - o.put( "x" , 1 ); - o.put( "y" , 1 ); - o.put( "a" , 1 ); - assertEquals( "x_1_y_1_a_1" , DBCollection.genIndexName(o) ); - - o = new BasicDBObject(); - o.put( "z" , 1 ); - o.put( "a" , 1 ); - assertEquals( "z_1_a_1" , DBCollection.genIndexName(o) ); - } - - @Test - public void testDistinct(){ - DBCollection c = _db.getCollection( "distinct1" ); - c.drop(); - - for ( int i=0; i<100; i++ ){ - BasicDBObject o = new BasicDBObject(); - o.put( "_id" , i ); - o.put( "x" , i % 10 ); - c.save( o ); - } - - List l = c.distinct( "x" ); - assertEquals( 10 , l.size() ); - - l = c.distinct( "x" , new BasicDBObject( "_id" , new BasicDBObject( "$gt" , 95 ) ) ); - assertEquals( 4 , l.size() ); - - } - - @Test - public void testEnsureIndex(){ - DBCollection c = _db.getCollection( "ensureIndex1" ); - c.drop(); - - c.save( new BasicDBObject( "x" , 1 ) ); - assertEquals( 1 , c.getIndexInfo().size() ); - - c.ensureIndex( new BasicDBObject( "x" , 1 ) , new BasicDBObject( "unique" , true ) ); - assertEquals( 2 , c.getIndexInfo().size() ); - assertEquals( Boolean.TRUE , c.getIndexInfo().get(1).get( "unique" ) ); - } - - @Test - public void testEnsureNestedIndex(){ - DBCollection c = _db.getCollection( "ensureNestedIndex1" ); - c.drop(); - - BasicDBObject newDoc = new BasicDBObject( "x", new BasicDBObject( "y", 1 ) ); - c.save( newDoc ); - - assertEquals( 1 , c.getIndexInfo().size() ); - c.ensureIndex( new BasicDBObject("x.y", 1), "nestedIdx1", false); - assertEquals( 2 , c.getIndexInfo().size() ); - } - - - @Test - public void testIndexExceptions(){ - DBCollection c = _db.getCollection( "indexExceptions" ); - c.drop(); - - c.insert( new BasicDBObject( "x" , 1 ) ); - c.insert( new BasicDBObject( "x" , 1 ) ); - - c.ensureIndex( new BasicDBObject( "y" , 1 ) ); - c.resetIndexCache(); - c.ensureIndex( new BasicDBObject( "y" , 1 ) ); // make sure this doesn't throw - c.resetIndexCache(); - - Exception failed = null; - try { - c.ensureIndex( new BasicDBObject( "x" , 1 ) , new BasicDBObject( "unique" , true ) ); - } - catch ( MongoException.DuplicateKey e ){ - failed = e; - } - assertNotNull( failed ); - - } - - @Test - public void testMultiInsertNoContinue() { - DBCollection c = _db.getCollection("testmultiinsertNoContinue"); - c.setWriteConcern( WriteConcern.NORMAL ); - c.drop(); - - DBObject obj = c.findOne(); - assertEquals(obj, null); - - ObjectId id = new ObjectId(); - DBObject inserted1 = BasicDBObjectBuilder.start("_id", id).add("x",1).add("y",2).get(); - DBObject inserted2 = BasicDBObjectBuilder.start("_id", id).add("x",3).add("y",4).get(); - DBObject inserted3 = BasicDBObjectBuilder.start().add("x",5).add("y",6).get(); - WriteResult r = c.insert(inserted1,inserted2, inserted3); - assertEquals(1, c.count()); - assertFalse(c.getWriteConcern().getContinueOnErrorForInsert()); - - assertEquals( c.count(), 1); - } - - @Test - public void testMultiInsertWithContinue() { - if (!serverIsAtLeastVersion(2.0)) { - return; - } - - DBCollection c = _db.getCollection("testmultiinsertWithContinue"); - c.drop(); - - DBObject obj = c.findOne(); - assertEquals(obj, null); - - ObjectId id = new ObjectId(); - DBObject inserted1 = BasicDBObjectBuilder.start("_id", id).add("x",1).add("y",2).get(); - DBObject inserted2 = BasicDBObjectBuilder.start("_id", id).add("x",3).add("y",4).get(); - DBObject inserted3 = BasicDBObjectBuilder.start().add("x",5).add("y",6).get(); - WriteConcern newWC = WriteConcern.SAFE.continueOnErrorForInsert(true); - try { - c.insert(newWC, inserted1, inserted2, inserted3); - fail("Insert should have failed"); - } catch (MongoException e) { - assertEquals(11000, e.getCode()); - } - assertEquals( c.count(), 2 ); - } - - @Test( expectedExceptions = IllegalArgumentException.class ) - public void testDotKeysFail() { - DBCollection c = _db.getCollection("testdotkeysFail"); - c.drop(); - - DBObject obj = BasicDBObjectBuilder.start().add("x",1).add("y",2).add("foo.bar","baz").get(); - c.insert(obj); - } - - @Test( expectedExceptions = IllegalArgumentException.class ) - public void testNullKeysFail() { - DBCollection c = _db.getCollection("testnullkeysFail"); - c.drop(); - - DBObject obj = BasicDBObjectBuilder.start().add("x",1).add("y",2).add("foo\0bar","baz").get(); - c.insert(obj); - } - - @Test( expectedExceptions = IllegalArgumentException.class ) - public void testNullKeysFailWhenNested() { - DBCollection c = _db.getCollection("testnullkeysFailWhenNested"); - c.drop(); - - final BasicDBList list = new BasicDBList(); - list.add(new BasicDBObject("foo\0bar","baz")); - DBObject obj = BasicDBObjectBuilder.start().add("x", list).get(); - c.insert(obj); - } - - @Test - public void testLazyDocKeysPass() { - DBCollection c = _db.getCollection("testLazyDotKeysPass"); - c.drop(); - - DBObject obj = BasicDBObjectBuilder.start().add("_id", "lazydottest1").add("x",1).add("y",2).add("foo.bar","baz").get(); - - //convert to a lazydbobject - DefaultDBEncoder encoder = new DefaultDBEncoder(); - byte[] encodedBytes = encoder.encode(obj); - - LazyDBDecoder lazyDecoder = new LazyDBDecoder(); - DBObject lazyObj = lazyDecoder.decode(encodedBytes, c); - - c.insert(lazyObj); - - DBObject insertedObj = c.findOne(); - assertEquals("lazydottest1", insertedObj.get("_id")); - assertEquals(1, insertedObj.get("x")); - assertEquals(2, insertedObj.get("y")); - assertEquals("baz", insertedObj.get("foo.bar")); - } - - final DB _db; - - public static void main( String args[] ) - throws Exception { - (new DBCollectionTest()).runConsole(); - } - -} diff --git a/src/test/com/mongodb/DBCursorTest.java b/src/test/com/mongodb/DBCursorTest.java deleted file mode 100644 index e502e6c3931..00000000000 --- a/src/test/com/mongodb/DBCursorTest.java +++ /dev/null @@ -1,525 +0,0 @@ -/** - * Copyright (C) 2008 10gen Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.mongodb; - -import com.mongodb.util.TestCase; -import org.testng.annotations.Test; - -import java.io.IOException; -import java.net.UnknownHostException; -import java.util.Iterator; -import java.util.NoSuchElementException; -import java.util.concurrent.Callable; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Executors; -import java.util.concurrent.Future; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.TimeoutException; - -public class DBCursorTest extends TestCase { - - public DBCursorTest() throws IOException , MongoException { - super(); - cleanupDB = "com_mongodb_unittest_DBCursorTest"; - _db = cleanupMongo.getDB(cleanupDB); - } - - @Test(groups = {"basic"}) - public void testGetServerAddressLoop() { - - final DBCollection c = _db.getCollection("getServerAddress"); - c.drop(); - - // Insert some data. - for (int i=0; i < 10; i++) c.insert(new BasicDBObject("one", "two")); - - final DBCursor cur = c.find(); - - while (cur.hasNext()) { - cur.next(); - assertNotNull(cur.getServerAddress()); - } - } - - @Test(groups = {"basic"}) - public void testGetServerAddressQuery() { - - final DBCollection c = _db.getCollection("getServerAddress"); - c.drop(); - - // Insert some data. - for (int i=0; i < 10; i++) c.insert(new BasicDBObject("one", "two")); - - final DBCursor cur = c.find(); - cur.hasNext(); - assertNotNull(cur.getServerAddress()); - } - - @Test(groups = {"basic"}) - public void testGetServerAddressQuery1() { - - final DBCollection c = _db.getCollection("getServerAddress"); - c.drop(); - - // Insert some data. - for (int i=0; i < 10; i++) c.insert(new BasicDBObject("one", i)); - - final DBCursor cur = c.find(new BasicDBObject("one", 9)); - cur.hasNext(); - assertNotNull(cur.getServerAddress()); - } - - @Test(groups = {"basic"}) - public void testCount() { - try { - DBCollection c = _db.getCollection("test"); - c.drop(); - - assertEquals(c.find().count(), 0); - - BasicDBObject obj = new BasicDBObject(); - obj.put("x", "foo"); - c.insert(obj); - - assertEquals(c.find().count(), 1); - } - catch (MongoException e) { - assertTrue(false); - } - } - - @Test(groups = {"basic"}) - public void testSnapshot() { - DBCollection c = _db.getCollection("snapshot1"); - c.drop(); - for ( int i=0; i<100; i++ ) - c.save( new BasicDBObject( "x" , i ) ); - assertEquals( 100 , c.find().count() ); - assertEquals( 100 , c.find().toArray().size() ); - assertEquals( 100 , c.find().snapshot().count() ); - assertEquals( 100 , c.find().snapshot().toArray().size() ); - assertEquals( 100 , c.find().snapshot().limit(50).count() ); - assertEquals( 50 , c.find().snapshot().limit(50).toArray().size() ); - } - - @Test(groups = {"basic"}) - public void testOptions() { - DBCollection c = _db.getCollection("test"); - DBCursor dbCursor = c.find(); - - assertEquals(0, dbCursor.getOptions()); - dbCursor.setOptions( Bytes.QUERYOPTION_TAILABLE ); - assertEquals(Bytes.QUERYOPTION_TAILABLE, dbCursor.getOptions()); - dbCursor.addOption( Bytes.QUERYOPTION_SLAVEOK ); - assertEquals(Bytes.QUERYOPTION_TAILABLE | Bytes.QUERYOPTION_SLAVEOK, dbCursor.getOptions()); - dbCursor.resetOptions(); - assertEquals(0, dbCursor.getOptions()); - } - -// @Test -// public void testTailable() { -// DBCollection c = _db.createCollection( "tailableTest", new BasicDBObject( "capped", true ).append( "size", 10000)); -// DBCursor cursor = c.find( ).addOption( Bytes.QUERYOPTION_TAILABLE ); -// -// long start = System.currentTimeMillis(); -// System.err.println( "[ " + start + " ] Has Next?" + cursor.hasNext()); -// cursor.next(); -// long end = System.currentTimeMillis(); -// System.err.println( "[ " + end + " ] Tailable next returned." ); -// assertLess(start - end, 100); -// c.drop(); -// } - - - @Test//(enabled = false) - public void testTailable() { - DBCollection c = _db.getCollection("tail1"); - c.drop(); - _db.createCollection("tail1", new BasicDBObject("capped", true).append("size", 10000)); - for (int i = 0; i < 10; i++) { - c.save(new BasicDBObject("x", i)); - } - - DBCursor cur = c.find().sort(new BasicDBObject("$natural", 1)).addOption(Bytes.QUERYOPTION_TAILABLE); - - while (cur.hasNext()) { - cur.next(); - //do nothing... - } - - assert (!cur.hasNext()); - c.save(new BasicDBObject("x", 12)); - assert (cur.hasNext()); - assertNotNull(cur.next()); - assert (!cur.hasNext()); - } - - @Test//(enabled = false) - public void testTailableAwait() throws ExecutionException, TimeoutException, InterruptedException { - DBCollection c = _db.getCollection("tail1"); - c.drop(); - _db.createCollection("tail1", new BasicDBObject("capped", true).append("size", 10000)); - for (int i = 0; i < 10; i++) { - c.save(new BasicDBObject("x", i), WriteConcern.SAFE); - } - - final DBCursor cur = c.find().sort(new BasicDBObject("$natural", 1)).addOption(Bytes.QUERYOPTION_TAILABLE | Bytes.QUERYOPTION_AWAITDATA); - Callable callable = new Callable() { - @Override - public Object call() throws Exception { - try { - // the following call will block on the last hasNext - int i = 0; - while (cur.hasNext()) { - DBObject obj = cur.next(); - i++; - if (i > 10) - return obj.get("x"); - } - - return null; - } catch (Throwable e) { - return e; - } - } - }; - - ExecutorService es = Executors.newSingleThreadExecutor(); - Future future = es.submit(callable); - - Thread.sleep(5000); - assertTrue(!future.isDone()); - - // this doc should unblock thread - c.save(new BasicDBObject("x", 10), WriteConcern.SAFE); - Object retVal = future.get(5, TimeUnit.SECONDS); - assertEquals(10, retVal); - } - - @Test - public void testBig(){ - DBCollection c = _db.getCollection("big1"); - c.drop(); - - String bigString; - { - StringBuilder buf = new StringBuilder( 16000 ); - for ( int i=0; i<16000; i++ ) - buf.append( "x" ); - bigString = buf.toString(); - } - - int numToInsert = ( 15 * 1024 * 1024 ) / bigString.length(); - - for ( int i=0; i curmax); - curmax = val; - } - - //x desc - cur = c.find().sort( new BasicDBObject("x", -1)); - curmax = 9999; - while(cur.hasNext()){ - int val = (Integer)cur.next().get("x"); - assertTrue( val < curmax); - curmax = val; - } - - //query and sort - cur = c.find( QueryBuilder.start("x").greaterThanEquals(500).get()).sort(new BasicDBObject("y", 1)); - assertEquals(500, cur.count()); - curmax = -100; - while(cur.hasNext()){ - int val = (Integer)cur.next().get("y"); - assertTrue( val > curmax); - curmax = val; - } - } - - @Test(expectedExceptions = NoSuchElementException.class) - public void testShouldThrowNoSuchElementException() { - DBCollection c = _db.getCollection("emptyCollection"); - - DBCursor cursor = c.find(); - - cursor.next(); - } - - @Test - public void testHasFinalizer() throws UnknownHostException { - DBCollection c = _db.getCollection( "HasFinalizerTest" ); - c.drop(); - - for ( int i=0; i<1000; i++ ) - c.save( new BasicDBObject("_id", i), WriteConcern.SAFE); - - // finalizer is on by default so after calling hasNext should report that it has one - DBCursor cursor = c.find(); - assertFalse(cursor.hasFinalizer()); - cursor.hasNext(); - assertTrue(cursor.hasFinalizer()); - cursor.close(); - - // no finalizer if there is no cursor, as there should not be for a query with only one result - cursor = c.find(new BasicDBObject("_id", 1)); - cursor.hasNext(); - assertFalse(cursor.hasFinalizer()); - cursor.close(); - - // no finalizer if there is no cursor, as there should not be for a query with negative batch size - cursor = c.find(); - cursor.batchSize(-1); - cursor.hasNext(); - assertFalse(cursor.hasFinalizer()); - cursor.close(); - - // finally, no finalizer if disabled in mongo options - MongoClientOptions mongoOptions = new MongoClientOptions.Builder().cursorFinalizerEnabled(false).build(); - Mongo m = new MongoClient("127.0.0.1", mongoOptions); - try { - c = m.getDB(cleanupDB).getCollection("HasFinalizerTest"); - cursor = c.find(); - cursor.hasNext(); - assertFalse(cursor.hasFinalizer()); - cursor.close(); - } finally { - m.close(); - } - } - - final DB _db; - - public static void main( String args[] ) - throws Exception { - (new DBCursorTest()).runConsole(); - - } - -} diff --git a/src/test/com/mongodb/DBObjectTest.java b/src/test/com/mongodb/DBObjectTest.java deleted file mode 100644 index cae35b90770..00000000000 --- a/src/test/com/mongodb/DBObjectTest.java +++ /dev/null @@ -1,180 +0,0 @@ -/** - * Copyright (C) 2008 10gen Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.mongodb; - -import com.mongodb.util.TestCase; -import org.bson.types.BasicBSONList; -import org.bson.types.ObjectId; -import org.testng.annotations.Test; - -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Map; - -@SuppressWarnings({"unchecked", "deprecation"}) -public class DBObjectTest extends TestCase { - - public DBObjectTest() { - super(); - cleanupDB = "com_monogodb_unittest_DBObjectTest"; - _db = cleanupMongo.getDB(cleanupDB); - } - - @Test(groups = {"basic"}) - public void testBasicDBObjectCTOR() { - Map m = new HashMap(); - m.put("key", "value"); - m.put("foo", 1); - m.put("bar", null); - - DBObject obj = new BasicDBObject(m); - assertEquals(obj.get("key"), "value"); - assertEquals(obj.get("foo"), 1); - assertEquals(obj.get("bar"), null); - } - - @Test(groups = {"basic"}) - public void testBasicDBObjectToString() { - Map m = new HashMap(); - m.put("key", new DBPointer("foo", new ObjectId("123456789012123456789012"))); - - DBObject obj = new BasicDBObject(m); - assertEquals(obj.get("key").toString(), "{ \"$ref\" : \"foo\", \"$id\" : ObjectId(\"123456789012123456789012\") }"); - } - - @Test(groups = {"basic"}) - public void testDBObjectBuilder() { - Map m = new HashMap(); - m.put("foo", "bar"); - - BasicDBObjectBuilder b = BasicDBObjectBuilder.start(m); - b.add("bar", "baz"); - - DBObject obj = b.get(); - assertEquals(obj.get("foo"), "bar"); - assertEquals(obj.get("bar"), "baz"); - - } - - @Test(groups = {"basic"}) - public void testToMap() { - Map m = BasicDBObjectBuilder.start().add("y", "z").add("z","a").get().toMap(); - assertEquals(m.get("y"), "z"); - assertEquals(m.get("z"), "a"); - } - - @Test(groups = {"basic"}) - public void testBasicBSONList() { - BasicBSONList l = new BasicBSONList(); - l.put(10, "x"); - assertEquals(l.get("10"), "x"); - assertEquals(l.get(3), null); - l.put("10", "y"); - assertEquals(l.get("10"), "y"); - - DBCollection c = _db.getCollection("dblist"); - c.drop(); - c.insert(BasicDBObjectBuilder.start().add("array", l).get()); - DBObject obj = c.findOne(); - assertEquals(obj.get("array") instanceof List, true); - } - - @Test(groups = {"basic"}) - public void testPutAll() { - DBObject start = BasicDBObjectBuilder.start().add( "a" , 1 ).add( "b" , 2 ).get(); - - assertEquals( 1 , start.get( "a" ) ); - - BasicDBObject next = new BasicDBObject(); - next.put( "a" , 3 ); - assertEquals( 3 , next.get( "a" ) ); - next.putAll( start ); - assertEquals( 2 , next.get( "b" ) ); - assertEquals( 1 , next.get( "a" ) ); - - } - - @Test(groups = {"basic"}) - public void testRemoveField() { - BasicDBObject obj = new BasicDBObject(); - obj.put("x", "y"); - obj.put("y", "z"); - - assertTrue(obj.containsKey("x")); - assertTrue(obj.containsKey("y")); - assertEquals(obj.toString(), "{ \"x\" : \"y\" , \"y\" : \"z\"}"); - - obj.removeField("x"); - - assertFalse(obj.containsKey("x")); - assertTrue(obj.containsKey("y")); - assertEquals(obj.toString(), "{ \"y\" : \"z\"}"); - - obj.put("x", "y"); - - assertTrue(obj.containsKey("x")); - assertTrue(obj.containsKey("y")); - assertEquals(obj.toString(), "{ \"y\" : \"z\" , \"x\" : \"y\"}"); - } - - - @Test(groups = {"basic"}) - public void testInnerDot() { - DBCollection _colTest = _db.getCollection("test_collection"); - - BasicDBObject dbObject = new BasicDBObject("test", "value"); - BasicDBObject innerObject = new BasicDBObject("test.member.name", true); - dbObject.put("inner", innerObject); - - boolean thrown = false; - try { - _colTest.save(dbObject); - } - catch (IllegalArgumentException e) { - if (e.getMessage().startsWith("Document field names can't have a . in them. (Bad Key: 'test.member.name')")) { - thrown = true; - } - } - assertTrue(thrown); - } - - @Test(groups = {"basic"}) - public void testEntrySetOrder() { - final List expectedKeys = new ArrayList(); - final BasicDBObject o = new BasicDBObject(); - for (int i = 1; i < 1000; i++) { - final String key = String.valueOf(i); - expectedKeys.add(key); - o.put(key, "Test" + key); - } - final List keysFromKeySet = new ArrayList(o.keySet()); - final List keysFromEntrySet = new ArrayList(); - for (final Map.Entry entry : o.entrySet()) { - keysFromEntrySet.add(entry.getKey()); - } - assertEquals(keysFromKeySet, expectedKeys); - assertEquals(keysFromEntrySet, expectedKeys); - } - - private DB _db; - - public static void main( String args[] ) { - (new DBObjectTest()).runConsole(); - } -} - diff --git a/src/test/com/mongodb/DBPortPoolTest.java b/src/test/com/mongodb/DBPortPoolTest.java deleted file mode 100644 index 54a2dd57f4f..00000000000 --- a/src/test/com/mongodb/DBPortPoolTest.java +++ /dev/null @@ -1,131 +0,0 @@ -// ThreadPoolTest.java - -/** - * Copyright (C) 2008 10gen Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.mongodb; - -import org.testng.annotations.Test; - -import java.net.UnknownHostException; -import java.util.concurrent.BrokenBarrierException; -import java.util.concurrent.Callable; -import java.util.concurrent.CountDownLatch; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Executors; -import java.util.concurrent.Future; -import java.util.concurrent.TimeUnit; - -public class DBPortPoolTest extends com.mongodb.util.TestCase { - - @Test - @SuppressWarnings("deprecation") - public void testReuse() throws Exception { - MongoOptions options = new MongoOptions(); - options.connectionsPerHost = 10; - final DBPortPool pool = new DBPortPool( new ServerAddress( "localhost" ), options ); - - // ensure that maximum number of connections are created - DBPort[] ports = new DBPort[10]; - for (int x = 0; x < options.connectionsPerHost; x++) { - ports[x] = pool.get(); - pool.done( ports[x] ); - ports[x]._lastThread = 0; - } - - int numTasks = 40; - - final CountDownLatch ready = new CountDownLatch(numTasks); - final CountDownLatch start = new CountDownLatch(1); - final CountDownLatch done = new CountDownLatch(numTasks); - - ExecutorService es = Executors.newFixedThreadPool( numTasks , Executors.defaultThreadFactory() ); - for(int x = 0; x callable = new Callable() { - @Override - public Boolean call() throws BrokenBarrierException, InterruptedException { - try { - ready.countDown(); - pool.get(); - return false; - } catch (MongoInterruptedException e) { - // return true if interrupted - return true; - } - } - }; - Future future = executor.submit(callable); - - ready.await(); - // Interrupt the thread - executor.shutdownNow(); - - try { - assertEquals(true, future.get()); - } catch (InterruptedException e) { - fail("Should not happen, since this thread was not interrupted"); - } catch (ExecutionException e) { - e.printStackTrace(); - fail("Should not happen"); - } - } - - public static void main( String args[] ){ - (new DBPortPoolTest()).runConsole(); - } -} diff --git a/src/test/com/mongodb/DBPortTest.java b/src/test/com/mongodb/DBPortTest.java deleted file mode 100644 index 7ae1b84fcbe..00000000000 --- a/src/test/com/mongodb/DBPortTest.java +++ /dev/null @@ -1,92 +0,0 @@ -/* - * Copyright (c) 2008 - 2013 10gen, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.mongodb; - -import com.mongodb.util.TestCase; -import org.testng.annotations.Test; - -import java.io.IOException; -import java.net.UnknownHostException; -import java.util.HashSet; -import java.util.Set; - -public class DBPortTest extends TestCase { - @Test - @SuppressWarnings("deprecation") - public void testAuthentication() throws IOException { - Mongo m = new MongoClient(); - DB db1 = m.getDB("DBPortTest1"); - DB db2 = m.getDB("DBPortTest2"); - db1.dropDatabase(); - db2.dropDatabase(); - - try { - db1.addUser("u1", "e".toCharArray()); - db2.addUser("u2", "e".toCharArray()); - - DBPort port = new DBPort(m.getAddress(), new DBPortPool(m.getAddress(), new MongoOptions()), new MongoOptions()); - port.checkAuth(m); - - Set expected = new HashSet(); - - assertEquals(expected, port.authenticatedDatabases); - - m.getAuthority().getCredentialsStore().add(MongoCredential.createMongoCRCredential("u1", "DBPortTest1", "e".toCharArray())); - m.getAuthority().getCredentialsStore().add(MongoCredential.createMongoCRCredential("u2", "DBPortTest2", "e".toCharArray())); - - port.checkAuth(m); - - expected.add("DBPortTest1"); - expected.add("DBPortTest2"); - assertEquals(expected, port.authenticatedDatabases); - - m.getAuthority().getCredentialsStore().add(MongoCredential.createMongoCRCredential("u2", "DBPortTest3", "e".toCharArray())); - - try { - port.checkAuth(m); - fail("should throw"); - } catch (CommandFailureException e) { - // all good - } - } - finally { - m.close(); - } - } - - @Test - @SuppressWarnings("deprecation") - public void testOpenFailure() throws UnknownHostException { - final MongoOptions options = new MongoOptions(); - options.autoConnectRetry = true; - options.maxAutoConnectRetryTime = 350; - - final DBPortPool portPool = new DBPortPool(new ServerAddress("localhost", 50051), options); - portPool._everWorked = true; - - DBPort port = new DBPort(new ServerAddress("localhost", 50051), portPool, options); - try { - port._open(); - fail("Open should fail"); - } catch (IOException e) { - // should get exception - } - - } - -} diff --git a/src/test/com/mongodb/DBRefTest.java b/src/test/com/mongodb/DBRefTest.java deleted file mode 100644 index 004d16eff3e..00000000000 --- a/src/test/com/mongodb/DBRefTest.java +++ /dev/null @@ -1,216 +0,0 @@ -/** - * Copyright (C) 2008 10gen Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.mongodb; - -import com.mongodb.util.TestCase; -import org.bson.BSONDecoder; -import org.bson.BasicBSONDecoder; -import org.bson.io.BasicOutputBuffer; -import org.bson.io.OutputBuffer; -import org.bson.types.ObjectId; -import org.testng.annotations.Test; - -import java.util.ArrayList; -import java.util.List; - -public class DBRefTest extends TestCase { - - public DBRefTest() { - cleanupDB = "com_monogodb_unittest_DBRefTest"; - _db = cleanupMongo.getDB(cleanupDB); - } - - @Test(groups = {"basic"}) - public void testEqualsAndHashCode() { - DBRef ref = new DBRef(_db, "foo.bar", 4); - DBRef other = new DBRef(_db, "foo.bar", 4); - assertEquals(ref, ref); - assertEquals(ref, other); - assertNotEquals(ref, new DBRefBase(_db, "foo.bar", 4)); - assertEquals(ref.hashCode(), other.hashCode()); - } - - @Test(groups = {"basic"}) - public void testDBRefBaseToString(){ - - ObjectId id = new ObjectId("123456789012345678901234"); - DBRefBase ref = new DBRefBase(_db, "foo.bar", id); - - assertEquals("{ \"$ref\" : \"foo.bar\", \"$id\" : \"123456789012345678901234\" }", ref.toString()); - } - - @Test(groups = {"basic"}) - public void testDBRef(){ - - DBRef ref = new DBRef(_db, "hello", "world"); - DBObject o = new BasicDBObject("!", ref); - - DBEncoder encoder = DefaultDBEncoder.FACTORY.create(); - OutputBuffer buf = new BasicOutputBuffer(); - - encoder.writeObject(buf, o); - - DefaultDBCallback cb = new DefaultDBCallback( null ); - BSONDecoder decoder = new BasicBSONDecoder(); - decoder.decode( buf.toByteArray() , cb ); - DBObject read = cb.dbget(); - - assertEquals("{\"!\":{\"$ref\":\"hello\",\"$id\":\"world\"}}", read.toString().replaceAll( " +" , "" )); - } - - @Test(groups = {"basic"}) - public void testDBRefFetches(){ - DBCollection coll = _db.getCollection("x"); - coll.drop(); - - BasicDBObject obj = new BasicDBObject("_id", 321325243); - coll.save(obj); - - DBRef ref = new DBRef(_db, "x", 321325243); - DBObject deref = ref.fetch(); - - assertTrue(deref != null); - assertEquals(321325243, ((Number)deref.get("_id")).intValue()); - - DBObject refobj = BasicDBObjectBuilder.start().add("$ref", "x").add("$id", 321325243).get(); - deref = DBRef.fetch(_db, refobj); - - assertTrue(deref != null); - assertEquals(321325243, ((Number)deref.get("_id")).intValue()); - } - - @SuppressWarnings("unchecked") - @Test - public void testRefListRoundTrip(){ - DBCollection a = _db.getCollection( "reflistfield" ); - List refs = new ArrayList(); - refs.add(new DBRef(_db, "other", 12)); - refs.add(new DBRef(_db, "other", 14)); - refs.add(new DBRef(_db, "other", 16)); - a.save( BasicDBObjectBuilder.start( "refs" , refs).get() ); - - DBObject loaded = a.findOne(); - assertNotNull( loaded ); - List refsLoaded = (List) loaded.get("refs"); - assertNotNull( refsLoaded ); - assertEquals(3, refsLoaded.size()); - assertEquals(DBRef.class, refsLoaded.get(0).getClass()); - assertEquals(12, refsLoaded.get(0).getId()); - assertEquals(14, refsLoaded.get(1).getId()); - assertEquals(16, refsLoaded.get(2).getId()); - - } - - - @Test - public void testRoundTrip(){ - DBCollection a = _db.getCollection( "refroundtripa" ); - DBCollection b = _db.getCollection( "refroundtripb" ); - a.drop(); - b.drop(); - - a.save( BasicDBObjectBuilder.start( "_id" , 17 ).add( "n" , 111 ).get() ); - b.save( BasicDBObjectBuilder.start( "n" , 12 ).add( "l" , new DBRef( _db , "refroundtripa" , 17 ) ).get() ); - - assertEquals( 12 , b.findOne().get( "n" ) ); - assertEquals( DBRef.class , b.findOne().get( "l" ).getClass() ); - assertEquals( 111 , ((DBRef)(b.findOne().get( "l" ))).fetch().get( "n" ) ); - - } - - @Test - public void testFindByDBRef(){ - DBCollection b = _db.getCollection( "b" ); - b.drop(); - DBRef ref = new DBRef( _db , "fake" , 17 ); - - b.save( BasicDBObjectBuilder.start( "n" , 12 ).add( "l" , ref ).get() ); - - assertEquals( 12 , b.findOne().get( "n" ) ); - assertEquals( DBRef.class , b.findOne().get( "l" ).getClass() ); - - DBObject loaded = b.findOne(BasicDBObjectBuilder.start( "l" , ref ).get() ); - assertEquals( 12 , loaded.get( "n" ) ); - assertEquals( DBRef.class , loaded.get( "l" ).getClass() ); - assertEquals( ref.getId(), ((DBRef)loaded.get( "l" )).getId()); - assertEquals( ref.getRef(), ((DBRef)loaded.get( "l" )).getRef()); - assertEquals( ref.getDB(), ((DBRef)loaded.get( "l" )).getDB()); - } - - @Test - public void testGetEntityWithSingleDBRefWithCompoundId() { - DBCollection a = _db.getCollection("a"); - a.drop(); - - BasicDBObject compoundId = new BasicDBObject("name", "someName").append("email", "test@example.com"); - BasicDBObject entity = new BasicDBObject("_id", "testId").append("ref", new DBRef(_db, "fake", compoundId)); - a.save(entity); - - DBObject fetched = a.findOne(new BasicDBObject("_id", "testId")); - - assertNotNull(fetched); - assertFalse(fetched.containsField("$id")); - assertEquals(fetched, entity); - } - - @Test - public void testGetEntityWithArrayOfDBRefsWithCompoundIds() { - DBCollection a = _db.getCollection("a"); - a.drop(); - - BasicDBObject compoundId1 = new BasicDBObject("name", "someName").append("email", "test@example.com"); - BasicDBObject compoundId2 = new BasicDBObject("name", "someName2").append("email", "test2@example.com"); - BasicDBList listOfRefs = new BasicDBList(); - listOfRefs.add(new DBRef(_db, "fake", compoundId1)); - listOfRefs.add(new DBRef(_db, "fake", compoundId2)); - BasicDBObject entity = new BasicDBObject("_id", "testId").append("refs", listOfRefs); - a.save(entity); - - DBObject fetched = a.findOne(new BasicDBObject("_id", "testId")); - - assertNotNull(fetched); - assertEquals(fetched, entity); - } - - @Test - public void testGetEntityWithMapOfDBRefsWithCompoundIds() { - DBCollection base = _db.getCollection("basecollection"); - base.drop(); - - BasicDBObject compoundId1 = new BasicDBObject("name", "someName").append("email", "test@example.com"); - BasicDBObject compoundId2 = new BasicDBObject("name", "someName2").append("email", "test2@example.com"); - BasicDBObject mapOfRefs = new BasicDBObject() - .append("someName", new DBRef(_db, "compoundkeys", compoundId1)) - .append("someName2", new DBRef(_db, "compoundkeys", compoundId2)); - BasicDBObject entity = new BasicDBObject("_id", "testId").append("refs", mapOfRefs); - base.save(entity); - - DBObject fetched = base.findOne(new BasicDBObject("_id", "testId")); - - assertNotNull(fetched); - DBObject fetchedRefs = (DBObject) fetched.get("refs"); - assertFalse(fetchedRefs.keySet().contains("$id")); - assertEquals(fetched, entity); - } - - DB _db; - - public static void main( String args[] ) { - (new DBRefTest()).runConsole(); - } -} - diff --git a/src/test/com/mongodb/DBTCPConnectorTest.java b/src/test/com/mongodb/DBTCPConnectorTest.java deleted file mode 100644 index 6abecca0e82..00000000000 --- a/src/test/com/mongodb/DBTCPConnectorTest.java +++ /dev/null @@ -1,166 +0,0 @@ -// DBTCPConnectorTest.java - -/** - * Copyright (C) 2011 10gen Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.mongodb; - -import com.mongodb.util.TestCase; -import org.testng.annotations.AfterClass; -import org.testng.annotations.BeforeClass; -import org.testng.annotations.BeforeMethod; -import org.testng.annotations.Test; - -import java.net.UnknownHostException; -import java.util.Arrays; - -/** - * Testing functionality of database TCP connector. The structure of this class is a bit unusual, - * as it creates its own MongoClient, yet still extends TestCase, which has its own. - */ -public class DBTCPConnectorTest extends TestCase { - - private MongoClient _mongoClient; - private DB _db; - private DBCollection _collection; - private DBTCPConnector _connector; - - @BeforeClass - public void beforeClass() throws UnknownHostException { - _mongoClient = new MongoClient(Arrays.asList(new ServerAddress("localhost:27017"), new ServerAddress("localhost:27018"))); - cleanupDB = "com_mongodb_DBTCPConnectorTest"; - _db = _mongoClient.getDB(cleanupDB); - _collection = _db.getCollection("testCol"); - } - - @AfterClass - public void afterClass() { - _mongoClient.close(); - _connector.close(); - } - - @BeforeMethod - public void beforeMethod() throws UnknownHostException { - _connector = new DBTCPConnector(_mongoClient); - _connector.start(); - } - - /** - * Test request reservation - */ - @Test - public void testRequestReservation() { - final DBTCPConnector.MyPort myPort = _connector.getMyPort(); - assertNull(myPort.getPinnedRequestStatusForThread()); - _connector.requestStart(); - try { - assertNull(myPort.getPinnedRequestPortForThread()); - assertNotNull(myPort.getPinnedRequestStatusForThread()); - _connector.requestDone(); - assertNull(myPort.getPinnedRequestStatusForThread()); - } finally { - _connector.requestDone(); - } - assertNull(myPort.getPinnedRequestPortForThread()); - } - - /** - * Tests that same connections is used for sequential writes - */ - @Test - public void testConnectionReservationForWrites() { - DBTCPConnector.MyPort myPort = _connector.getMyPort(); - _connector.requestStart(); - try { - _connector.say(_db, createOutMessageForInsert(), WriteConcern.SAFE); - assertNotNull(myPort.getPinnedRequestStatusForThread()); - DBPort requestPort = myPort.getPinnedRequestPortForThread(); - _connector.say(_db, createOutMessageForInsert(), WriteConcern.SAFE); - assertEquals(requestPort, myPort.getPinnedRequestPortForThread()); - } finally { - _connector.requestDone(); - } - } - - /** - * Tests that same connections is used for write followed by read - */ - @Test - public void testConnectionReservationForWriteThenRead() { - DBTCPConnector.MyPort myPort = _connector.getMyPort(); - _connector.requestStart(); - try { - _connector.say(_db, createOutMessageForInsert(), WriteConcern.SAFE); - DBPort requestPort = myPort.getPinnedRequestPortForThread(); - _connector.call(_db, _collection, - OutMessage.query(_collection, 0, 0, -1, new BasicDBObject(), new BasicDBObject(), ReadPreference.primary()), - null, 0); - assertEquals(requestPort, myPort.getPinnedRequestPortForThread()); - } finally { - _connector.requestDone(); - } - } - - /** - * Test that request port changes when read is followed by write with connection reservation - */ - @Test - public void testConnectionReservationForReadThenWrite() { - if (isStandalone(cleanupMongo)) { - return; - } - - DBTCPConnector.MyPort myPort = _connector.getMyPort(); - _connector.requestStart(); - try { - _connector.call(_db, _collection, - OutMessage.query(_collection, 0, 0, -1, new BasicDBObject(), new BasicDBObject(), ReadPreference.secondary()), - null, 0, ReadPreference.secondary(), null); - DBPort requestPort = myPort.getPinnedRequestPortForThread(); - _connector.say(_db, createOutMessageForInsert(), WriteConcern.SAFE); - assertNotEquals(requestPort, myPort.getPinnedRequestPortForThread()); - DBTCPConnector.PinnedRequestStatus status = myPort.getPinnedRequestStatusForThread(); - assertEquals(_connector.getReplicaSetStatus().getMaster(), myPort.getPinnedRequestPortForThread().serverAddress()); - } finally { - _connector.requestDone(); - } - } - - /** - * Tests that same connections is used for sequential reads - */ - @Test - public void testConnectionReservationForReads() { - DBTCPConnector.MyPort myPort = _connector.getMyPort(); - _connector.requestStart(); - try { - _connector.call(_db, _collection, - OutMessage.query(_collection, 0, 0, -1, new BasicDBObject(), new BasicDBObject(), ReadPreference.primary()), - null, 0); - assertNotNull(myPort.getPinnedRequestPortForThread()); - } finally { - _connector.requestDone(); - } - } - - - private OutMessage createOutMessageForInsert() { - OutMessage om = OutMessage.insert(_collection, new DefaultDBEncoder(), WriteConcern.NONE); - om.putObject( new BasicDBObject() ); - - return om; - } -} diff --git a/src/test/com/mongodb/DBTest.java b/src/test/com/mongodb/DBTest.java deleted file mode 100644 index 31687082e1e..00000000000 --- a/src/test/com/mongodb/DBTest.java +++ /dev/null @@ -1,227 +0,0 @@ -/** - * Copyright (C) 2008 10gen Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.mongodb; - -import com.mongodb.util.TestCase; -import org.testng.annotations.Test; - -import java.net.UnknownHostException; -import java.util.Arrays; - -public class DBTest extends TestCase { - - public DBTest() { - super(); - cleanupDB = "com_mongodb_unittest_DBTest"; - _db = cleanupMongo.getDB(cleanupDB); - } - - @Test(groups = {"basic"}) - public void testCreateCollection() { - _db.getCollection("foo1").drop(); - _db.getCollection("foo2").drop(); - _db.getCollection("foo3").drop(); - _db.getCollection("foo4").drop(); - - BasicDBObject o1 = new BasicDBObject("capped", false); - DBCollection c = _db.createCollection("foo1", o1); - - DBObject o2 = BasicDBObjectBuilder.start().add("capped", true) - .add("size", 100000).add("max", 10).get(); - c = _db.createCollection("foo2", o2); - for (int i = 0; i < 30; i++) { - c.insert(new BasicDBObject("x", i)); - } - assertTrue(c.find().count() <= 10); - - DBObject o3 = BasicDBObjectBuilder.start().add("capped", true) - .add("size", 1000).add("max", 2).get(); - c = _db.createCollection("foo3", o3); - for (int i = 0; i < 30; i++) { - c.insert(new BasicDBObject("x", i)); - } - assertEquals(c.find().count(), 2); - - try { - DBObject o4 = BasicDBObjectBuilder.start().add("capped", true) - .add("size", -20).get(); - c = _db.createCollection("foo4", o4); - } catch (MongoException e) { - return; - } - assertEquals(0, 1); - } - - @Test(groups = {"basic"}) - public void testForCollectionExistence() { - _db.getCollection("foo1").drop(); - _db.getCollection("foo2").drop(); - _db.getCollection("foo3").drop(); - _db.getCollection("foo4").drop(); - - assertFalse(_db.collectionExists("foo1")); - - BasicDBObject o1 = new BasicDBObject("capped", false); - DBCollection c = _db.createCollection("foo1", o1); - - assertTrue(_db.collectionExists("foo1"), "Collection 'foo' was supposed to be created, but 'collectionExists' did not return true."); - assertTrue(_db.collectionExists("FOO1")); - assertTrue(_db.collectionExists("fOo1")); - - _db.getCollection("foo1").drop(); - - assertFalse(_db.collectionExists("foo1")); - } - - @Test(groups = {"basic"}) - public void testReadPreferenceObedience() { - DBObject obj = new BasicDBObject("mapreduce", 1).append("out", "myColl"); - assertEquals(ReadPreference.primary(), _db.getCommandReadPreference(obj, ReadPreference.secondary())); - - obj = new BasicDBObject("mapreduce", 1).append("out", new BasicDBObject("replace", "myColl")); - assertEquals(ReadPreference.primary(), _db.getCommandReadPreference(obj, ReadPreference.secondary())); - - obj = new BasicDBObject("mapreduce", 1).append("out", new BasicDBObject("inline", 1)); - assertEquals(ReadPreference.secondary(), _db.getCommandReadPreference(obj, ReadPreference.secondary())); - - obj = new BasicDBObject("mapreduce", 1).append("out", new BasicDBObject("inline", null)); - assertEquals(ReadPreference.primary(), _db.getCommandReadPreference(obj, ReadPreference.secondary())); - - obj = new BasicDBObject("getnonce", 1); - assertEquals(ReadPreference.primaryPreferred(), _db.getCommandReadPreference(obj, ReadPreference.secondary())); - - obj = new BasicDBObject("authenticate", 1); - assertEquals(ReadPreference.primaryPreferred(), _db.getCommandReadPreference(obj, ReadPreference.secondary())); - - obj = new BasicDBObject("count", 1); - assertEquals(ReadPreference.secondary(), _db.getCommandReadPreference(obj, ReadPreference.secondary())); - - obj = new BasicDBObject("count", 1); - assertEquals(ReadPreference.secondary(), _db.getCommandReadPreference(obj, ReadPreference.secondary())); - - obj = new BasicDBObject("serverStatus", 1); - assertEquals(ReadPreference.primary(), _db.getCommandReadPreference(obj, ReadPreference.secondary())); - - obj = new BasicDBObject("count", 1); - assertEquals(ReadPreference.primary(), _db.getCommandReadPreference(obj, null)); - - obj = new BasicDBObject("collStats", 1); - assertEquals(ReadPreference.secondaryPreferred(), _db.getCommandReadPreference(obj, ReadPreference.secondaryPreferred())); - - obj = new BasicDBObject("text", 1); - assertEquals(ReadPreference.secondaryPreferred(), _db.getCommandReadPreference(obj, ReadPreference.secondaryPreferred())); - } - - @Test(groups = {"basic"}) - public void testEnsureConnection() throws UnknownHostException { - - Mongo m = new MongoClient(Arrays.asList(new ServerAddress("localhost"))); - - if (isStandalone(m)) { - return; - } - try { - DB db = m.getDB("com_mongodb_unittest_DBTest"); - db.requestStart(); - try { - db.requestEnsureConnection(); - } finally { - db.requestDone(); - } - } finally { - m.close(); - } - } - - @Test(groups = {"basic"}) - public void whenRequestStartCallsAreNestedThenTheConnectionShouldBeReleaseOnLastCallToRequestEnd() throws UnknownHostException { - Mongo m = new MongoClient(Arrays.asList(new ServerAddress("localhost")), - MongoClientOptions.builder().connectionsPerHost(1).maxWaitTime(10).build()); - DB db = m.getDB("com_mongodb_unittest_DBTest"); - - try { - db.requestStart(); - try { - db.command(new BasicDBObject("ping", 1)); - db.requestStart(); - try { - db.command(new BasicDBObject("ping", 1)); - } finally { - db.requestDone(); - } - } finally { - db.requestDone(); - } - } finally { - m.close(); - } - } - - @Test(groups = {"basic"}) - public void whenRequestDoneIsCalledWithoutFirstCallingRequestStartNoExceptionIsThrown() throws UnknownHostException { - _db.requestDone(); - } - - - /*public static class Person extends DBObject { - - public Person(){ - - } - - Person( String name ){ - _name = name; - } - - public String getName(){ - return _name; - } - - public void setName(String name){ - _name = name; - } - - String _name; - } - - public DBTest() - throws IOException { - _db = new Mongo( "127.0.0.1" , "dbbasetest" ); - } - - - @Test - public void test1(){ - DBCollection c = _db.getCollection( "persen.test1" ); - c.drop(); - c.setObjectClass( Person.class ); - - Person p = new Person( "eliot" ); - c.save( p ); - - DBObject out = c.findOne(); - assertEquals( "eliot" , out.get( "Name" ) ); - assertTrue( out instanceof Person , "didn't come out as Person" ); - } - */ - - final DB _db; - - public static void main(String args[]) - throws Exception { - (new DBTest()).runConsole(); - } -} diff --git a/src/test/com/mongodb/DBTests.java b/src/test/com/mongodb/DBTests.java deleted file mode 100644 index f5ee5359f6e..00000000000 --- a/src/test/com/mongodb/DBTests.java +++ /dev/null @@ -1,176 +0,0 @@ -/** - * Copyright (C) 2008 10gen Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.mongodb; - -import com.mongodb.util.TestCase; -import org.testng.annotations.Test; - -import java.net.UnknownHostException; -import java.util.Arrays; - -/** - * Tests aspect of the DB - not really driver tests - */ -public class DBTests extends TestCase { - - final Mongo _mongo; - final DB _db; - - public DBTests() { - _mongo = cleanupMongo; - cleanupDB = "java_com_mongodb_unittest_DBTests"; - _db = cleanupMongo.getDB(cleanupDB); - } - - @Test - public void testGetCollectionNames() throws MongoException { - String name = "testGetCollectionNames"; - DBCollection c = _db.getCollection(name); - c.drop(); - assertFalse(_db.getCollectionNames().contains(name)); - c.save(new BasicDBObject("x", 1)); - assertTrue(_db.getCollectionNames().contains(name)); - - } - - - @Test - public void testRename() throws MongoException { - String namea = "testRenameA"; - String nameb = "testRenameB"; - DBCollection a = _db.getCollection(namea); - DBCollection b = _db.getCollection(nameb); - - a.drop(); - b.drop(); - - assertEquals(0, a.find().count()); - assertEquals(0, b.find().count()); - - a.save(new BasicDBObject("x", 1)); - assertEquals(1, a.find().count()); - assertEquals(0, b.find().count()); - - DBCollection b2 = a.rename(nameb); - assertEquals(0, a.find().count()); - assertEquals(1, b.find().count()); - assertEquals(1, b2.find().count()); - - assertEquals(b.getName(), b2.getName()); - - } - - @Test - public void testRenameAndDrop() throws MongoException { - String namea = "testRenameA"; - String nameb = "testRenameB"; - DBCollection a = _db.getCollection(namea); - DBCollection b = _db.getCollection(nameb); - - a.drop(); - b.drop(); - - assertEquals(0, a.find().count()); - assertEquals(0, b.find().count()); - - a.save(new BasicDBObject("x", 1)); - b.save(new BasicDBObject("x", 1)); - assertEquals(1, a.find().count()); - assertEquals(1, b.find().count()); - - try { - DBCollection b2 = a.rename(nameb); - assertTrue(false, "Rename to existing collection must fail"); - } catch (MongoException e) { - assertEquals(e.getCode(), 10027); - } - - DBCollection b2 = a.rename(nameb, true); - assertEquals(0, a.find().count()); - assertEquals(1, b.find().count()); - assertEquals(1, b2.find().count()); - - assertEquals(b.getName(), b2.getName()); - - } - -// @Test -// public void testCommandToSecondary() throws MongoException, UnknownHostException { -// Mongo mongo = new Mongo(Arrays.asList(new ServerAddress("127.0.0.1"), new ServerAddress("127.0.0.1", 27018))); -// -// try { -// if (isStandalone(mongo)) { -// return; -// } -// -// String primary = getPrimaryAsString(mongo); -// -// DB db = mongo.getDB("secondaryTest"); -// db.setReadPreference(ReadPreference.SECONDARY); -// CommandResult result = db.command("ping"); -// assertNotEquals(primary, result.get("serverUsed")); -// } finally { -// mongo.close(); -// } -// } - - @Test - public void testGetCollectionNamesToSecondary() throws MongoException, UnknownHostException { - Mongo mongo = new MongoClient(Arrays.asList(new ServerAddress("127.0.0.1"), new ServerAddress("127.0.0.1", 27018))); - - try { - if (isStandalone(mongo)) { - return; - } - - String secondary = getASecondaryAsString(mongo); - mongo.close(); - mongo = new MongoClient(secondary); - DB db = mongo.getDB("secondaryTest"); - db.setReadPreference(ReadPreference.secondary()); - db.getCollectionNames(); - } finally { - mongo.close(); - } - } - - - - @Test - @SuppressWarnings("deprecation") - public void testTurnOffSlaveOk() throws MongoException, UnknownHostException { - MongoOptions mongoOptions = new MongoOptions(); - - mongoOptions.slaveOk = true; - - Mongo mongo = new Mongo("localhost", mongoOptions); - try { - mongo.addOption(Bytes.QUERYOPTION_PARTIAL); - mongo.addOption(Bytes.QUERYOPTION_AWAITDATA); - - int isSlaveOk = mongo.getOptions() & Bytes.QUERYOPTION_SLAVEOK; - - assertEquals(Bytes.QUERYOPTION_SLAVEOK, isSlaveOk); - - mongo.setOptions(mongo.getOptions() & (~Bytes.QUERYOPTION_SLAVEOK)); - - assertEquals(Bytes.QUERYOPTION_AWAITDATA | Bytes.QUERYOPTION_PARTIAL, mongo.getOptions()); - } finally { - mongo.close(); - } - } -} diff --git a/src/test/com/mongodb/ErrorTest.java b/src/test/com/mongodb/ErrorTest.java deleted file mode 100644 index 0cb5b27d108..00000000000 --- a/src/test/com/mongodb/ErrorTest.java +++ /dev/null @@ -1,96 +0,0 @@ -/** - * Copyright (C) 2008 10gen Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.mongodb; - -import org.testng.annotations.*; - -import com.mongodb.util.*; - -/** - * - */ -public class ErrorTest extends TestCase { - - DB _db; - - @BeforeClass - public void setUp() throws Exception{ - cleanupDB = "com_mongodb_unittest_ErrorTest"; - _db = cleanupMongo.getDB(cleanupDB); - } - - @Test - public void testLastError() - throws MongoException { - - _db.resetError(); - assert(_db.getLastError().get("err") == null); - - _db.forceError(); - - assert(_db.getLastError().get("err") != null); - - _db.resetError(); - assert(_db.getLastError().get("err") == null); - } - - @Test - public void testLastErrorWithConcern() - throws MongoException { - - _db.resetError(); - CommandResult cr = _db.getLastError(WriteConcern.FSYNC_SAFE); - assert(cr.get("err") == null); - assert(cr.containsField("fsyncFiles") || cr.containsField("waited")); - } - - @Test - public void testLastErrorWithConcernAndW() - throws MongoException { - if ( /* TODO: running with slaves */ false ){ - _db.resetError(); - CommandResult cr = _db.getLastError(WriteConcern.REPLICAS_SAFE); - assert(cr.get("err") == null); - assert(cr.containsField("wtime")); - } - } - - @Test - public void testPrevError() - throws MongoException { - - _db.resetError(); - - assert(_db.getLastError().get("err") == null); - assert(_db.getPreviousError().get("err") == null); - - _db.forceError(); - - assert(_db.getLastError().get("err") != null); - assert(_db.getPreviousError().get("err") != null); - - _db.getCollection("misc").insert(new BasicDBObject("foo", 1)); - - assert(_db.getLastError().get("err") == null); - assert(_db.getPreviousError().get("err") != null); - - _db.resetError(); - - assert(_db.getLastError().get("err") == null); - assert(_db.getPreviousError().get("err") == null); - } -} diff --git a/src/test/com/mongodb/JavaClientTest.java b/src/test/com/mongodb/JavaClientTest.java deleted file mode 100644 index fc6cdfe1487..00000000000 --- a/src/test/com/mongodb/JavaClientTest.java +++ /dev/null @@ -1,1184 +0,0 @@ -// JavaClientTest.java -/** - * Copyright (C) 2008 10gen Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.mongodb; - -import com.mongodb.util.JSON; -import com.mongodb.util.TestCase; -import com.mongodb.util.Util; -import org.bson.BSON; -import org.bson.Transformer; -import org.bson.types.BSONTimestamp; -import org.bson.types.Binary; -import org.bson.types.Code; -import org.bson.types.CodeWScope; -import org.bson.types.MaxKey; -import org.bson.types.MinKey; -import org.bson.types.ObjectId; -import org.testng.annotations.Test; - -import java.net.UnknownHostException; -import java.nio.ByteBuffer; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.UUID; -import java.util.regex.Pattern; - -public class JavaClientTest extends TestCase { - - public JavaClientTest() { - _mongo = cleanupMongo; - cleanupDB = "com_mongodb_unittest_JavaClientTest"; - _db = cleanupMongo.getDB( cleanupDB ); - } - - @Test - public void test1() - throws MongoException { - DBCollection c = _db.getCollection( "test1" ); - c.drop(); - - DBObject m = new BasicDBObject(); - m.put( "name" , "eliot" ); - m.put( "state" , "ny" ); - - c.save( m ); - assert( m.containsField( "_id" ) ); - - Map out = (Map)(c.findOne( m.get( "_id" ))); - assertEquals( "eliot" , out.get( "name" ) ); - assertEquals( "ny" , out.get( "state" ) ); - } - - @Test - public void test2() - throws MongoException { - DBCollection c = _db.getCollection( "test2" ); - c.drop(); - - DBObject m = new BasicDBObject(); - m.put( "name" , "eliot" ); - m.put( "state" , "ny" ); - - Map sub = new HashMap(); - sub.put( "bar" , "1z" ); - m.put( "foo" , sub ); - - c.save( m ); - - assert( m.containsField( "_id" ) ); - - Map out = (Map)(c.findOne( m.get( "_id" ))); - assertEquals( "eliot" , out.get( "name" ) ); - assertEquals( "ny" , out.get( "state" ) ); - - Map z = (Map)out.get( "foo" ); - assertNotNull( z ); - assertEquals( "1z" , z.get( "bar" ) ); - } - - @Test - public void testWhere1() - throws MongoException { - DBCollection c = _db.getCollection( "testWhere1" ); - c.drop(); - assertNull( c.findOne() ); - - c.save( BasicDBObjectBuilder.start().add( "a" , 1 ).get() ); - assertNotNull( c.findOne() != null ); - - assertNotNull( c.findOne( BasicDBObjectBuilder.start().add( "$where" , "this.a == 1" ).get() ) ); - assertNull( c.findOne( BasicDBObjectBuilder.start().add( "$where" , "this.a == 2" ).get() ) ); - } - - @Test - public void testCodeWScope() - throws MongoException { - DBCollection c = _db.getCollection( "testCodeWScope" ); - c.drop(); - assertNull( c.findOne() ); - - c.save( BasicDBObjectBuilder.start().add( "a" , 1 ).get() ); - assertNotNull( c.findOne() != null ); - - assertNotNull( c.findOne( BasicDBObjectBuilder.start().add( "$where" , new CodeWScope( "this.a == x" , new BasicDBObject( "x" , 1 ) ) ).get() ) ); - assertNull( c.findOne( BasicDBObjectBuilder.start().add( "$where" , new CodeWScope( "this.a == x" , new BasicDBObject( "x" , 2 ) ) ).get() ) ); - - - c.drop(); - BasicDBObject in = new BasicDBObject(); - in.put( "_id" , 1 ); - in.put( "a" , new Code("x=5") ); - in.put( "b" , new CodeWScope( "x=5" , new BasicDBObject( "x" , 2 ) ) ); - c.insert( in ); - - DBObject out = c.findOne(); - - assertEquals( in , out ); - } - - - @Test - public void testCount() - throws MongoException { - DBCollection c = _db.getCollection("testCount"); - - c.drop(); - assertNull(c.findOne()); - assertTrue(c.getCount() == 0); - - for (int i=0; i < 100; i++) { - c.insert(new BasicDBObject("i", i)); - } - - assertEquals( 100 , c.getCount() ); - assertEquals( 100 , c.find().count() ); - assertEquals( 100 , c.find().limit(10).count() ); - assertEquals( 10 , c.find().limit(10).size() ); - assertEquals( 90 , c.find().skip(10).size() ); - } - - @Test - public void testIndex() - throws MongoException { - DBCollection c = _db.getCollection("testIndex"); - - c.drop(); - assertNull(c.findOne()); - - for (int i=0; i < 100; i++) { - c.insert(new BasicDBObject("i", i)); - } - - assertTrue(c.getCount() == 100); - - c.createIndex(new BasicDBObject("i", 1)); - - List list = c.getIndexInfo(); - - assertTrue(list.size() == 2); - assertTrue(list.get(1).get("name").equals("i_1")); - } - - @Test - public void testBinary() - throws MongoException { - DBCollection c = _db.getCollection( "testBinary" ); - c.drop(); - c.save( BasicDBObjectBuilder.start().add( "a" , "eliot".getBytes() ).get() ); - - DBObject out = c.findOne(); - byte[] b = (byte[])(out.get( "a" ) ); - assertEquals( "eliot" , new String( b ) ); - - { - byte[] raw = new byte[9]; - ByteBuffer bb = ByteBuffer.wrap( raw ); - bb.order( Bytes.ORDER ); - bb.putInt( 5 ); - bb.put( "eliot".getBytes() ); - out.put( "a" , "eliot".getBytes() ); - c.save( out ); - - out = c.findOne(); - b = (byte[])(out.get( "a" ) ); - assertEquals( "eliot" , new String( b ) ); - - out.put( "a" , new Binary( (byte)111 , raw ) ); - c.save( out ); - Binary blah = (Binary)c.findOne().get( "a" ); - assertEquals( 111 , blah.getType() ); - assertEquals( Util.toHex( raw ) , Util.toHex( blah.getData() ) ); - } - - } - - @Test - public void testMinMaxKey() - throws MongoException { - DBCollection c = _db.getCollection( "testMinMaxKey" ); - c.drop(); - c.save( BasicDBObjectBuilder.start().add( "min" , new MinKey() ).add( "max" , new MaxKey() ).get() ); - - DBObject out = c.findOne(); - MinKey min = (MinKey)(out.get( "min" ) ); - MaxKey max = (MaxKey)(out.get( "max" ) ); - assertTrue( JSON.serialize(min).contains("$minKey") ); - assertTrue( JSON.serialize(max).contains("$maxKey") ); - } - - @Test - public void testBinaryOld() - throws MongoException { - DBCollection c = _db.getCollection( "testBinary" ); - c.drop(); - c.save( BasicDBObjectBuilder.start().add( "a" , "eliot".getBytes() ).get() ); - - DBObject out = c.findOne(); - byte[] b = (byte[])(out.get( "a" ) ); - assertEquals( "eliot" , new String( b ) ); - - { - byte[] raw = new byte[9]; - ByteBuffer bb = ByteBuffer.wrap( raw ); - bb.order( Bytes.ORDER ); - bb.putInt( 5 ); - bb.put( "eliot".getBytes() ); - out.put( "a" , new Binary( BSON.B_BINARY , "eliot".getBytes() ) ); - c.save( out ); - - // objects of subtype B_BINARY or B_GENERAL should becomes byte[] - out = c.findOne(); -// Binary blah = (Binary)(out.get( "a" ) ); - byte[] bytes = (byte[]) out.get("a"); - assertEquals( "eliot" , new String( bytes ) ); - - out.put( "a" , new Binary( (byte)111 , raw ) ); - c.save( out ); - Binary blah = (Binary)c.findOne().get( "a" ); - assertEquals( 111 , blah.getType() ); - assertEquals( Util.toHex( raw ) , Util.toHex( blah.getData() ) ); - } - - } - @Test - public void testUUID() - throws MongoException { - DBCollection c = _db.getCollection( "testUUID" ); - c.drop(); - c.save( BasicDBObjectBuilder.start().add( "a" , new UUID(1,2)).add("x",5).get() ); - - DBObject out = c.findOne(); - UUID b = (UUID)(out.get( "a" ) ); - assertEquals( new UUID(1,2), b); - assertEquals( 5 , out.get("x" ) ); - } - - @Test - public void testEval() - throws MongoException { - assertEquals( 17 , ((Number)(_db.eval( "return 17" ))).intValue() ); - assertEquals( 18 , ((Number)(_db.eval( "function(x){ return 17 + x; }" , 1 ))).intValue() ); - } - - @Test - public void testPartial1() - throws MongoException { - DBCollection c = _db.getCollection( "partial1" ); - c.drop(); - - c.save( BasicDBObjectBuilder.start().add( "a" , 1 ).add( "b" , 2 ).get() ); - - DBObject out = c.find().next(); - assertEquals( 1 , out.get( "a" ) ); - assertEquals( 2 , out.get( "b" ) ); - - out = c.find( new BasicDBObject() , BasicDBObjectBuilder.start().add( "a" , 1 ).get() ).next(); - assertEquals( 1 , out.get( "a" ) ); - assertNull( out.get( "b" ) ); - - out = c.find( null , BasicDBObjectBuilder.start().add( "a" , 1 ).get() ).next(); - assertEquals( 1 , out.get( "a" ) ); - assertNull( out.get( "b" ) ); - - // make sure can't insert back partial - try { - c.update(out, out); - assertTrue(false); - } catch (IllegalArgumentException ex) { - } - - out = c.findOne( null , BasicDBObjectBuilder.start().add( "b" , 1 ).get() ); - assertEquals( 2 , out.get( "b" ) ); - assertNull( out.get( "a" ) ); - - // make sure can't insert back partial - try { - c.update(out, out); - assertTrue(false); - } catch (IllegalArgumentException ex) { - } - - } - - @Test - public void testGroup() - throws MongoException { - - DBCollection c = _db.getCollection( "group1" ); - c.drop(); - c.save( BasicDBObjectBuilder.start().add( "x" , "a" ).get() ); - c.save( BasicDBObjectBuilder.start().add( "x" , "a" ).get() ); - c.save( BasicDBObjectBuilder.start().add( "x" , "a" ).get() ); - c.save( BasicDBObjectBuilder.start().add( "x" , "b" ).get() ); - - DBObject g = c.group( new BasicDBObject( "x" , 1 ) , null , new BasicDBObject( "count" , 0 ) , - "function( o , p ){ p.count++; }" ); - - List l = (List)g; - assertEquals( 2 , l.size() ); - } - - @Test - public void testSet() - throws MongoException { - - DBCollection c = _db.getCollection( "group1" ); - c.drop(); - c.save( BasicDBObjectBuilder.start().add( "id" , 1 ).add( "x" , true ).get() ); - assertEquals( Boolean.class , c.findOne().get( "x" ).getClass() ); - - c.update( new BasicDBObject( "id" , 1 ) , - new BasicDBObject( "$set" , - new BasicDBObject( "x" , 5.5 ) ) ); - assertEquals( Double.class , c.findOne().get( "x" ).getClass() ); - - } - - @Test - public void testKeys1() - throws MongoException { - - DBCollection c = _db.getCollection( "keys1" ); - c.drop(); - c.save( BasicDBObjectBuilder.start().push( "a" ).add( "x" , 1 ).get() ); - - assertEquals( 1, ((DBObject)c.findOne().get("a")).get("x" ) ); - c.update( new BasicDBObject() , BasicDBObjectBuilder.start().push( "$set" ).add( "a.x" , 2 ).get() ); - assertEquals( 1 , c.find().count() ); - assertEquals( 2, ((DBObject)c.findOne().get("a")).get("x" ) ); - - } - - @Test - public void testTimestamp() - throws MongoException { - - DBCollection c = _db.getCollection( "ts1" ); - c.drop(); - c.save( BasicDBObjectBuilder.start().add( "y" , new BSONTimestamp() ).get() ); - - BSONTimestamp t = (BSONTimestamp)c.findOne().get("y"); - assert( t.getTime() > 0 ); - assert( t.getInc() > 0 ); - } - - @Test - public void testStrictWriteSetInCollection(){ - DBCollection c = _db.getCollection( "write1" ); - c.drop(); - c.setWriteConcern( WriteConcern.SAFE); - c.insert( new BasicDBObject( "_id" , 1 ) ); - boolean gotError = false; - try { - c.insert( new BasicDBObject( "_id" , 1 ) ); - } - catch ( MongoException.DuplicateKey e ){ - gotError = true; - } - assertEquals( true , gotError ); - - assertEquals( 1 , c.find().count() ); - } - - @Test - public void testStrictWriteSetInMethod(){ - DBCollection c = _db.getCollection( "write1" ); - c.drop(); - c.insert( new BasicDBObject( "_id" , 1 )); - boolean gotError = false; - try { - c.insert( new BasicDBObject( "_id" , 1 ) , WriteConcern.SAFE); - } - catch ( MongoException.DuplicateKey e ){ - gotError = true; - } - assertEquals( true , gotError ); - - assertEquals( 1 , c.find().count() ); - } - - @Test - public void testPattern(){ - DBCollection c = _db.getCollection( "jp1" ); - c.drop(); - - c.insert( new BasicDBObject( "x" , "a" ) ); - c.insert( new BasicDBObject( "x" , "A" ) ); - - assertEquals( 1 , c.find( new BasicDBObject( "x" , Pattern.compile( "a" ) ) ).itcount() ); - assertEquals( 1 , c.find( new BasicDBObject( "x" , Pattern.compile( "A" ) ) ).itcount() ); - assertEquals( 2 , c.find( new BasicDBObject( "x" , Pattern.compile( "a" , Pattern.CASE_INSENSITIVE ) ) ).itcount() ); - assertEquals( 2 , c.find( new BasicDBObject( "x" , Pattern.compile( "A" , Pattern.CASE_INSENSITIVE ) ) ).itcount() ); - } - - - @Test - public void testDates(){ - DBCollection c = _db.getCollection( "dates1" ); - c.drop(); - - DBObject in = new BasicDBObject( "x" , new java.util.Date() ); - c.insert( in ); - DBObject out = c.findOne(); - assertEquals( java.util.Date.class , in.get("x").getClass() ); - assertEquals( in.get( "x" ).getClass() , out.get( "x" ).getClass() ); - } - - @Test - public void testMapReduce(){ - DBCollection c = _db.getCollection( "jmr1" ); - c.drop(); - - c.save( new BasicDBObject( "x" , new String[]{ "a" , "b" } ) ); - c.save( new BasicDBObject( "x" , new String[]{ "b" , "c" } ) ); - c.save( new BasicDBObject( "x" , new String[]{ "c" , "d" } ) ); - - MapReduceOutput out = - c.mapReduce( "function(){ for ( var i=0; i m = new HashMap(); - for ( DBObject r : out.results() ){ - m.put( r.get( "_id" ).toString() , ((Number)(r.get( "value" ))).intValue() ); - } - - assertEquals( 4 , m.size() ); - assertEquals( 1 , m.get( "a" ).intValue() ); - assertEquals( 2 , m.get( "b" ).intValue() ); - assertEquals( 2 , m.get( "c" ).intValue() ); - assertEquals( 1 , m.get( "d" ).intValue() ); - - } - - @Test - public void testMapReduceInline(){ - DBCollection c = _db.getCollection( "imr1" ); - c.drop(); - - c.save( new BasicDBObject( "x" , new String[]{ "a" , "b" } ) ); - c.save( new BasicDBObject( "x" , new String[]{ "b" , "c" } ) ); - c.save( new BasicDBObject( "x" , new String[]{ "c" , "d" } ) ); - - MapReduceOutput out = - c.mapReduce( "function(){ for ( var i=0; i m = new HashMap(); - for ( DBObject r : out.results() ){ - m.put( r.get( "_id" ).toString() , ((Number)(r.get( "value" ))).intValue() ); - } - - assertEquals( 4 , m.size() ); - assertEquals( 1 , m.get( "a" ).intValue() ); - assertEquals( 2 , m.get( "b" ).intValue() ); - assertEquals( 2 , m.get( "c" ).intValue() ); - assertEquals( 1 , m.get( "d" ).intValue() ); - - } - - - //If run against a replicaset this will verify that the inline map/reduce hits the secondary. - @Test - @SuppressWarnings("deprecation") - public void testMapReduceInlineSecondary() throws Exception { - Mongo mongo = new MongoClient(Arrays.asList(new ServerAddress("127.0.0.1", 27017), new ServerAddress("127.0.0.1", 27018)), - MongoClientOptions.builder().writeConcern(WriteConcern.UNACKNOWLEDGED).build()); - - if (isStandalone(mongo)) { - return; - } - - int size = getReplicaSetSize(mongo); - DBCollection c = mongo.getDB(_db.getName()).getCollection( "imr2" ); - //c.setReadPreference(ReadPreference.SECONDARY); - c.slaveOk(); - c.drop(); - - c.save( new BasicDBObject( "x" , new String[]{ "a" , "b" } ) ); - c.save( new BasicDBObject( "x" , new String[]{ "b" , "c" } ) ); - WriteResult wr = c.save( new BasicDBObject( "x" , new String[]{ "c" , "d" } ) ); - if (mongo.getReplicaSetStatus() != null && mongo.getReplicaSetStatus().getName() != null) { - wr.getLastError(new WriteConcern(size)); - } - - MapReduceOutput out = - c.mapReduce( "function(){ for ( var i=0; i m = new HashMap(); - for ( DBObject r : out.results() ){ - m.put( r.get( "_id" ).toString() , ((Number)(r.get( "value" ))).intValue() ); - } - - assertEquals( 4 , m.size() ); - assertEquals( 1 , m.get( "a" ).intValue() ); - assertEquals( 2 , m.get( "b" ).intValue() ); - assertEquals( 2 , m.get( "c" ).intValue() ); - assertEquals( 1 , m.get( "d" ).intValue() ); - ReplicaSetStatus replStatus = mongo.getReplicaSetStatus(); - //if it is a replicaset, and there is no master, or master is not the secondary - if( replStatus!= null && replStatus.getName() != null && ((replStatus.getMaster() == null) || (replStatus.getMaster() != null && !replStatus.getMaster().equals(replStatus.getASecondary())))) - assertTrue( !mongo.getReplicaSetStatus().isMaster( out.getCommandResult().getServerUsed() ), - "Had a replicaset but didn't use secondary! replSetStatus : " + mongo.getReplicaSetStatus() + " \n Used: " + out.getCommandResult().getServerUsed() + " \n "); - } - - @Test - public void testMapReduceInlineWScope(){ - DBCollection c = _db.getCollection( "jmr2" ); - c.drop(); - - c.save( new BasicDBObject( "x" , new String[]{ "a" , "b" } ) ); - c.save( new BasicDBObject( "x" , new String[]{ "b" , "c" } ) ); - c.save( new BasicDBObject( "x" , new String[]{ "c" , "d" } ) ); - - Map scope = new HashMap(); - scope.put("exclude", "a"); - - MapReduceCommand mrc = new MapReduceCommand( c, "function(){ for ( var i=0; i m = new HashMap(); - for ( DBObject r : out.results() ){ - m.put( r.get( "_id" ).toString() , ((Number)(r.get( "value" ))).intValue() ); - } - - assertEquals( 3 , m.size() ); - assertEquals( 2 , m.get( "b" ).intValue() ); - assertEquals( 2 , m.get( "c" ).intValue() ); - assertEquals( 1 , m.get( "d" ).intValue() ); - - } - - @Test - public void testAggregation(){ - if (!serverIsAtLeastVersion(2.1)) { - return; - } - - DBCollection c = _db.getCollection( "aggregationTest" ); - c.drop(); - - DBObject foo = new BasicDBObject( "name" , "foo" ) ; - DBObject bar = new BasicDBObject( "name" , "bar" ) ; - DBObject baz = new BasicDBObject( "name" , "foo" ) ; - foo.put( "count", 5 ); - bar.put( "count", 2 ); - baz.put( "count", 7 ); - c.insert( foo ); - c.insert( bar ); - c.insert( baz ); - - DBObject projFields = new BasicDBObject( "name", 1 ); - projFields.put("count", 1); - - DBObject group = new BasicDBObject( ); - group.put("_id", "$name" ); - group.put( "docsPerName", new BasicDBObject( "$sum", 1 )); - group.put( "countPerName", new BasicDBObject( "$sum", "$count" )); - - AggregationOutput out = c.aggregate( new BasicDBObject( "$project", projFields ), new BasicDBObject( "$group", group) ); - - Map results = new HashMap(); - for(DBObject result : out.results()) - results.put((String)result.get("_id"), result); - - DBObject fooResult = results.get("foo"); - assertNotNull(fooResult); - assertEquals(2, fooResult.get("docsPerName")); - assertEquals(12, fooResult.get("countPerName")); - - DBObject barResult = results.get("bar"); - assertNotNull(barResult); - assertEquals(1, barResult.get("docsPerName")); - assertEquals(2, barResult.get("countPerName")); - - DBObject aggregationCommand = out.getCommand(); - assertNotNull(aggregationCommand); - assertEquals(c.getName(), aggregationCommand.get("aggregate")); - assertNotNull(aggregationCommand.get("pipeline")); - } - - String _testMulti( DBCollection c ){ - String s = ""; - for ( DBObject z : c.find().sort( new BasicDBObject( "_id" , 1 ) ) ){ - if ( s.length() > 0 ) - s += ","; - s += z.get( "x" ); - } - return s; - } - - @Test - public void testMulti(){ - DBCollection c = _db.getCollection( "multi1" ); - c.drop(); - - c.insert( BasicDBObjectBuilder.start( "_id" , 1 ).add( "x" , 1 ).get() ); - c.insert( BasicDBObjectBuilder.start( "_id" , 2 ).add( "x" , 5 ).get() ); - - assertEquals( "1,5" , _testMulti( c ) ); - - c.update( new BasicDBObject() , BasicDBObjectBuilder.start().push( "$inc" ).add( "x" , 1 ).get() ); - assertEquals( "2,5" , _testMulti( c ) ); - - c.update( new BasicDBObject( "_id" , 2 ) , BasicDBObjectBuilder.start().push( "$inc" ).add( "x" , 1 ).get() ); - assertEquals( "2,6" , _testMulti( c ) ); - - c.updateMulti( new BasicDBObject() , BasicDBObjectBuilder.start().push( "$inc" ).add( "x" , 1 ).get() ); - assertEquals( "3,7" , _testMulti( c ) ); - - } - - @Test - public void testAuthenticate() throws UnknownHostException { - assertEquals( "26e3d12bd197368526409177b3e8aab6" , _db._hash( "e" , "j".toCharArray() ) ); - - Mongo m = new MongoClient(); - DB db = m.getDB(cleanupDB); - DBCollection usersCollection = db.getCollection( "system.users" ); - - try { - usersCollection.remove(new BasicDBObject()); - assertEquals(0, usersCollection.find().count()); - - db.addUser("xx" , "e".toCharArray() ); - assertEquals(1, usersCollection.find().count()); - - assertEquals(false, db.authenticate( "xx" , "f".toCharArray() ) ); - assertNull(db.getAuthenticationCredentials()); - assertNull(_mongo.getAuthority().getCredentialsStore().get(db.getName())); - assertEquals(true, db.authenticate("xx", "e".toCharArray())); - assertEquals(MongoCredential.createMongoCRCredential("xx", db.getName(), "e".toCharArray()), db.getAuthenticationCredentials()); - assertEquals(db.getAuthenticationCredentials(), m.getAuthority().getCredentialsStore().get(db.getName())); - - assertEquals(true, db.authenticate( "xx" , "e".toCharArray() ) ); - try { - db.authenticateCommand("xx", "f".toCharArray()); - fail("can't auth with different credentials"); - } catch (IllegalStateException e) { - // all good; - } - } - finally { - usersCollection.remove( new BasicDBObject() ); - m.close(); - } - } - - @Test - public void testAuthenticateCommand() throws UnknownHostException { - Mongo m = new MongoClient(); - DB db = m.getDB(cleanupDB); - DBCollection usersCollections = db.getCollection( "system.users" ); - - try { - usersCollections.remove(new BasicDBObject()); - assertEquals(0, usersCollections.find().count()); - - db.addUser("xx", "e".toCharArray()); - assertEquals(1, usersCollections.find().count()); - - try { - db.authenticateCommand( "xx" , "f".toCharArray()); - fail("Auth should have failed"); - } catch (CommandFailureException e) { - // all good - } - assertTrue(db.authenticateCommand("xx", "e".toCharArray()).ok()); - assertTrue(db.authenticateCommand("xx", "e".toCharArray()).ok()); - try { - db.authenticateCommand("xx", "f".toCharArray()); - fail("can't auth with different credentials"); - } catch (IllegalStateException e) { - // all good; - } - } - finally { - usersCollections.remove(new BasicDBObject()); - m.close(); - } - } - - @Test - public void testAuthenticateWithCredentialsInURIAndNoDatabase() throws UnknownHostException { - // First add the user - Mongo m = new MongoClient(new MongoClientURI("mongodb://localhost")); - DB db = m.getDB("admin"); - DBCollection usersCollection = db.getCollection( "system.users" ); - try { - usersCollection.remove(new BasicDBObject()); - assertEquals(0, usersCollection.find().count()); - - db.addUser("xx", "e".toCharArray()); - } - finally { - m.close(); - } - - m = new MongoClient(new MongoClientURI("mongodb://xx:e@localhost")); - db = m.getDB("admin"); - - try { - assertEquals(1, m.getDB("admin").getCollection("system.users").find().count()); - assertNotNull(db.getAuthenticationCredentials()); - assertEquals(true, db.authenticate("xx", "e".toCharArray()) ); - } - finally { - db.getCollection( "system.users" ).remove(new BasicDBObject()); - m.close(); - } - } - - @Test - public void testAuthenticateWithCredentialsInURI() throws UnknownHostException { - // First add the user - Mongo m = new MongoClient(new MongoClientURI("mongodb://localhost")); - DB db = m.getDB(cleanupDB); - DBCollection usersCollection = db.getCollection( "system.users" ); - try { - usersCollection.remove(new BasicDBObject()); - assertEquals(0, usersCollection.find().count()); - - db.addUser("xx", "e".toCharArray()); - assertEquals(1, usersCollection.find().count()); - } - finally { - m.close(); - } - - m = new MongoClient(new MongoClientURI("mongodb://xx:e@localhost/" + cleanupDB)); - db = m.getDB(cleanupDB); - - try { - assertNotNull(db.getAuthenticationCredentials()); - assertEquals(true, db.authenticate("xx", "e".toCharArray()) ); - } - finally { - db.getCollection( "system.users" ).remove(new BasicDBObject()); - m.close(); - } - } - - @Test - public void testAuthenticateCommandWithCredentialsInURI() throws UnknownHostException { - // First add the user - Mongo m = new MongoClient(new MongoClientURI("mongodb://localhost")); - DB db = m.getDB(cleanupDB); - DBCollection usersCollection = db.getCollection( "system.users" ); - try { - usersCollection.remove(new BasicDBObject()); - assertEquals(0, usersCollection.find().count()); - - db.addUser("xx", "e".toCharArray()); - assertEquals(1, usersCollection.find().count()); - } - finally { - m.close(); - } - - m = new MongoClient(new MongoClientURI("mongodb://xx:e@localhost/" + cleanupDB)); - db = m.getDB(cleanupDB); - - try { - assertNotNull(db.getAuthenticationCredentials()); - assertTrue(db.authenticateCommand("xx", "e".toCharArray()).ok()); - } - finally { - db.getCollection( "system.users" ).remove(new BasicDBObject()); - m.close(); - } - } - - @Test - public void testTransformers(){ - DBCollection c = _db.getCollection( "tt" ); - c.drop(); - - c.save( BasicDBObjectBuilder.start( "_id" , 1 ).add( "x" , 1.1 ).get() ); - assertEquals( Double.class , c.findOne().get( "x" ).getClass() ); - - Bytes.addEncodingHook( Double.class , new Transformer(){ - public Object transform( Object o ){ - return o.toString(); - } - } ); - - c.save( BasicDBObjectBuilder.start( "_id" , 1 ).add( "x" , 1.1 ).get() ); - assertEquals( String.class , c.findOne().get( "x" ).getClass() ); - - Bytes.clearAllHooks(); - c.save( BasicDBObjectBuilder.start( "_id" , 1 ).add( "x" , 1.1 ).get() ); - assertEquals( Double.class , c.findOne().get( "x" ).getClass() ); - - Bytes.addDecodingHook( Double.class , new Transformer(){ - public Object transform( Object o ){ - return o.toString(); - } - } ); - assertEquals( String.class , c.findOne().get( "x" ).getClass() ); - Bytes.clearAllHooks(); - assertEquals( Double.class , c.findOne().get( "x" ).getClass() ); - } - - - @Test - public void testObjectIdCompat(){ - DBCollection c = _db.getCollection( "oidc" ); - c.drop(); - - c.save( new BasicDBObject( "x" , 1 ) ); - _db.eval( "db.oidc.insert( { x : 2 } );" ); - - List l = c.find().toArray(); - assertEquals( 2 , l.size() ); - - ObjectId a = (ObjectId)(l.get(0).get("_id")); - ObjectId b = (ObjectId)(l.get(1).get("_id")); - - assertLess( Math.abs( a.getTime() - b.getTime() ) , 10000 ); - } - - @Test - public void testObjectIdCompat2(){ - DBCollection c = _db.getCollection( "oidc" ); - c.drop(); - - c.save( new BasicDBObject( "x" , 1 ) ); - - String o = (String) _db.eval( "return db.oidc.findOne()._id.toString()" ); - // printing on servers has changed in 2.1 - if (o.startsWith("ObjectId")) - o = (String) _db.eval( "return db.oidc.findOne()._id.valueOf()" ); - String x = c.findOne().get( "_id" ).toString(); - assertEquals( x , o ); - } - - - @Test - public void testLargeBulkInsert(){ - // max size should be obtained from server - int maxObjSize = _mongo.getMaxBsonObjectSize(); - DBCollection c = _db.getCollection( "largebulk" ); - c.drop(); - String s = "asdasdasd"; - while ( s.length() < 10000 ) - s += s; - List l = new ArrayList(); - final int num = 3 * ( maxObjSize / s.length() ); - - for ( int i=0; i a = c.find( new BasicDBObject( "x" , new BasicDBObject( "$in" , new Integer[]{ 2 , 3 } ) ) ).toArray(); - assertEquals( 2 , a.size() ); - } - - @Test - public void testWriteResultWithGetLastErrorWithDifferentConcerns(){ - DBCollection c = _db.getCollection( "writeresultwfle1" ); - c.drop(); - - c.insert( new BasicDBObject( "_id" , 1 ) ); - WriteResult res = c.update( new BasicDBObject( "_id" , 1 ) , new BasicDBObject( "$inc" , new BasicDBObject( "x" , 1 ) ) ); - assertEquals( 1 , res.getN() ); - assertTrue( res.isLazy() ); - - CommandResult cr = res.getLastError( WriteConcern.FSYNC_SAFE ); - assertEquals( 1 , cr.getInt( "n" ) ); - assertTrue(cr.containsField("fsyncFiles") || cr.containsField("waited")); - - CommandResult cr2 = res.getLastError( WriteConcern.FSYNC_SAFE ); - assertTrue( cr == cr2 ); - - CommandResult cr3 = res.getLastError( WriteConcern.NONE ); - assertTrue( cr3 == cr ); - - } - - @Test - public void testWriteResult(){ - DBCollection c = _db.getCollection( "writeresult1" ); - c.drop(); - - c.insert( new BasicDBObject( "_id" , 1 ) ); - WriteResult res = c.update( new BasicDBObject( "_id" , 1 ) , new BasicDBObject( "$inc" , new BasicDBObject( "x" , 1 ) ) ); - assertEquals( 1 , res.getN() ); - assertTrue( res.isLazy() ); - - c.setWriteConcern( WriteConcern.SAFE); - res = c.update( new BasicDBObject( "_id" , 1 ) , new BasicDBObject( "$inc" , new BasicDBObject( "x" , 1 ) ) ); - assertEquals( 1 , res.getN() ); - assertFalse( res.isLazy() ); - } - - @Test - public void testWriteResultMethodLevelWriteConcern(){ - DBCollection c = _db.getCollection( "writeresult2" ); - c.drop(); - - c.insert( new BasicDBObject( "_id" , 1 ) ); - WriteResult res = c.update( new BasicDBObject( "_id" , 1 ) , new BasicDBObject( "$inc" , new BasicDBObject( "x" , 1 ) ) ); - assertEquals( 1 , res.getN() ); - assertTrue(res.isLazy()); - - res = c.update( new BasicDBObject( "_id" , 1 ) , new BasicDBObject( "$inc" , new BasicDBObject( "x" , 1 ) ) , false , false , WriteConcern.SAFE); - assertEquals( 1 , res.getN() ); - assertFalse(res.isLazy()); - } - - @Test - public void testWriteConcernValueOf() { - WriteConcern wc1 = WriteConcern.NORMAL; - WriteConcern wc2 = WriteConcern.valueOf( "normal" ); - WriteConcern wc3 = WriteConcern.valueOf( "NORMAL" ); - - assertEquals( wc1, wc2 ); - assertEquals( wc1, wc3 ); - assertEquals( wc1.getW(), wc2.getW() ); - assertEquals( wc1.getWObject(), wc2.getWObject() ); - assertEquals( wc1.getW(), wc3.getW() ); - assertEquals( wc1.getWObject(), wc3.getWObject() ); - } - - @Test - public void testWriteConcernMajority() { - WriteConcern wc1 = WriteConcern.MAJORITY; - WriteConcern wc2 = WriteConcern.valueOf( "majority" ); - WriteConcern wc3 = WriteConcern.valueOf( "MAJORITY" ); - - assertEquals( wc1, wc2 ); - assertEquals( wc1, wc3 ); - assertEquals( wc1.getWString(), wc2.getWString() ); - assertEquals( wc1.getWObject(), wc2.getWObject() ); - assertEquals( wc1.getWString(), wc3.getWString() ); - assertEquals( wc1.getWObject(), wc3.getWObject() ); - } - - @Test - public void testFindAndModify(){ - DBCollection c = _db.getCollection( "findandmodify" ); - c.drop(); - - c.insert( new BasicDBObject( "_id" , 1 ) ); - //return old one - DBObject dbObj = c.findAndModify( new BasicDBObject( "_id" , 1 ) , null, new BasicDBObject( "x", 1)); - assertEquals( 1 , dbObj.keySet().size()); - assertEquals( 1 , c.findOne(new BasicDBObject( "_id" , 1 ) ).get( "x" )); - - //return new one - dbObj = c.findAndModify( new BasicDBObject( "_id" , 1 ) , null, null, false, new BasicDBObject( "x", 5), true, false); - assertEquals( 2 , dbObj.keySet().size()); - assertEquals( 5 , dbObj.get( "x" )); - assertEquals( 5 , c.findOne(new BasicDBObject( "_id" , 1 ) ).get( "x" )); - - //remove it, and return old one - dbObj = c.findAndRemove( new BasicDBObject( "_id" , 1 ) ); - assertEquals( 2 , dbObj.keySet().size()); - assertEquals( 5 , dbObj.get( "x" )); - assertNull( c.findOne(new BasicDBObject( "_id" , 1 ) )); - - // create new one with upsert and return it - dbObj = c.findAndModify( new BasicDBObject( "_id" , 2 ) , null, null, false, new BasicDBObject("$set", new BasicDBObject("a", 6)), true, true); - assertEquals( 2 , dbObj.keySet().size()); - assertEquals( 6 , dbObj.get( "a" )); - assertEquals( 6 , c.findOne(new BasicDBObject( "_id" , 2 ) ).get( "a" )); - - // create new one with upsert and don't return it - dbObj = c.findAndModify( new BasicDBObject( "_id" , 3 ) , null, null, false, new BasicDBObject("$set", new BasicDBObject("b", 7)), false, true); - - assertEquals( 7 , c.findOne(new BasicDBObject( "_id" , 3 ) ).get( "b" )); - if (serverIsAtLeastVersion(2.1)) { - assertNull(dbObj); - } else { - assertEquals(0, dbObj.keySet().size()); - } - - // test exception throwing - c.insert( new BasicDBObject( "a" , 1 ) ); - try { - c.findAndModify( null, null ); - fail("Exception not thrown when no update nor remove"); - } catch (MongoException e) { - } - - try { - dbObj = c.findAndModify( new BasicDBObject("a", "noexist"), null ); - if (!serverIsAtLeastVersion(2.1)) { - assertNull(dbObj); - } - } catch (MongoException e) { - if (!serverIsAtLeastVersion(2.1)) { - fail("Exception thrown when matching record"); - } - } - } - - @Test - public void testGetCollectionFromString(){ - DBCollection c = _db.getCollectionFromString( "foo" ); - assertEquals( "foo" , c.getName() ); - - c = _db.getCollectionFromString( "foo.bar" ); - assertEquals( "foo.bar" , c.getName() ); - - c = _db.getCollectionFromString( "foo.bar.zoo" ); - assertEquals( "foo.bar.zoo" , c.getName() ); - - c = _db.getCollectionFromString( "foo.bar.zoo.dork" ); - assertEquals( "foo.bar.zoo.dork" , c.getName() ); - - } - - @Test - public void testBadKey(){ - DBCollection c = _db.getCollectionFromString( "foo" ); - assertEquals( "foo" , c.getName() ); - - try { - c.insert(new BasicDBObject("a.b", 1)); - fail("Bad key was accepted"); - } catch (IllegalArgumentException e) {} - - try { - Map data = new HashMap(); - data.put("a.b", 1); - c.insert(new BasicDBObject("data", data)); - fail("Bad key was accepted"); - } catch (IllegalArgumentException e) {} - - try { - c.insert(new BasicDBObject("$a", 1)); - fail("Bad key was accepted"); - } catch (IllegalArgumentException e) {} - - try { - c.save(new BasicDBObject("a.b", 1)); - fail("Bad key was accepted"); - } catch (IllegalArgumentException e) {} - - try { - c.save(new BasicDBObject("$a", 1)); - fail("Bad key was accepted"); - } catch (IllegalArgumentException e) {} - - try { - final BasicDBList list = new BasicDBList(); - list.add(new BasicDBObject("$a", 1)); - c.save(new BasicDBObject("a", list)); - fail("Bad key was accepted"); - } catch (IllegalArgumentException e) {} - - try { - final List list = Arrays.asList(new BasicDBObject("$a", 1)); - c.save(new BasicDBObject("a", list)); - fail("Bad key was accepted"); - } catch (IllegalArgumentException e) {} - -// try { -// c.save(new BasicDBObject("a", Arrays.asList(new BasicDBObject("$a", 1)))); -// fail("Bad key was accepted"); -// } catch (IllegalArgumentException e) {} - - c.insert(new BasicDBObject("a", 1)); - try { - c.update(new BasicDBObject("a", 1), new BasicDBObject("a.b", 1)); - fail("Bad key was accepted"); - } catch (IllegalArgumentException e) {} - - // this should work because it's a query - c.update(new BasicDBObject("a", 1), new BasicDBObject("$set", new BasicDBObject("a.b", 1))); - } - - @Test - public void testAllTypes(){ - DBCollection c = _db.getCollectionFromString( "foo" ); - c.drop(); - String json = "{ 'str' : 'asdfasd' , 'long' : 5 , 'float' : 0.4 , 'bool' : false , 'date' : { '$date' : '2011-05-18T18:56:00Z'} , 'pat' : { '$regex' : '.*' , '$options' : ''} , 'oid' : { '$oid' : '4d83ab3ea39562db9c1ae2ae'} , 'ref' : { '$ref' : 'test.test' , '$id' : { '$oid' : '4d83ab59a39562db9c1ae2af'}} , 'code' : { '$code' : 'asdfdsa'} , 'codews' : { '$code' : 'ggggg' , '$scope' : { }} , 'ts' : { '$ts' : 1300474885 , '$inc' : 10} , 'null' : null, 'uuid' : { '$uuid' : '60f65152-6d4a-4f11-9c9b-590b575da7b5' }}"; - BasicDBObject a = (BasicDBObject) JSON.parse(json); - c.insert(a); - DBObject b = c.findOne(); - assertTrue(a.equals(b)); - } - - @Test - @SuppressWarnings("deprecation") - public void testMongoHolder() throws MongoException, UnknownHostException { - Mongo m1 = Mongo.Holder.singleton().connect( new MongoURI( "mongodb://localhost" ) ); - Mongo m2 = Mongo.Holder.singleton().connect( new MongoURI( "mongodb://localhost" ) ); - - assertEquals( m1, m2); - } - final Mongo _mongo; - final DB _db; - - public static void main( String args[] ) - throws Exception { - JavaClientTest ct = new JavaClientTest(); - ct.runConsole(); - } -} diff --git a/src/test/com/mongodb/LazyDBObjectTest.java b/src/test/com/mongodb/LazyDBObjectTest.java deleted file mode 100644 index 6b60c5ea412..00000000000 --- a/src/test/com/mongodb/LazyDBObjectTest.java +++ /dev/null @@ -1,390 +0,0 @@ -/** - * Copyright (C) 2008 10gen Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.mongodb; - -import com.mongodb.util.TestCase; -import org.bson.BSONEncoder; -import org.bson.BasicBSONEncoder; -import org.bson.io.BasicOutputBuffer; -import org.bson.io.OutputBuffer; -import org.bson.types.BSONTimestamp; -import org.bson.types.Binary; -import org.bson.types.Code; -import org.bson.types.MaxKey; -import org.bson.types.MinKey; -import org.bson.types.ObjectId; -import org.bson.types.Symbol; -import org.testng.annotations.BeforeMethod; -import org.testng.annotations.Test; - -import java.io.ByteArrayInputStream; -import java.io.ByteArrayOutputStream; -import java.io.IOException; -import java.util.Arrays; -import java.util.Date; -import java.util.Iterator; -import java.util.Map; -import java.util.Set; -import java.util.UUID; -import java.util.regex.Pattern; - -@SuppressWarnings( { "unchecked" , "deprecation" } ) -public class LazyDBObjectTest extends TestCase { - - public LazyDBObjectTest(){ - cleanupDB = "com_mongodb_unittest_LazyDBObjectTest"; - _db = cleanupMongo.getDB(cleanupDB); - } - - BSONEncoder e; - OutputBuffer buf; - ByteArrayOutputStream bios; - LazyDBDecoder lazyDBDecoder; - DefaultDBDecoder defaultDBDecoder; - - @BeforeMethod - public void beforeMethod() { - e = new BasicBSONEncoder(); - buf = new BasicOutputBuffer(); - e.set(buf); - bios = new ByteArrayOutputStream(); - lazyDBDecoder = new LazyDBDecoder(); - defaultDBDecoder = new DefaultDBDecoder(); - } - - @Test - public void testDecodeAllTypes() throws IOException { - - DBObject origDoc = createTestDoc(); - e.putObject(origDoc); - buf.pipe(bios); - DBObject doc = defaultDBDecoder.decode(new ByteArrayInputStream(bios.toByteArray()), (DBCollection) null); - compareDocs(origDoc, doc); - } - - @Test - public void testLazyDecodeAllTypes() - throws InterruptedException, IOException { - - DBObject origDoc = createTestDoc(); - e.putObject(origDoc); - buf.pipe(bios); - DBObject doc = lazyDBDecoder.decode(new ByteArrayInputStream(bios.toByteArray()), (DBCollection) null); - compareDocs(origDoc, doc); - } - - @Test - public void testMissingKey() throws IOException { - e.putObject(createSimpleTestDoc()); - buf.pipe(bios); - - DBObject decodedObj = lazyDBDecoder.decode(new ByteArrayInputStream(bios.toByteArray()), (DBCollection) null); - assertNull(decodedObj.get("missingKey")); - } - - @Test - public void testKeySet() throws IOException { - DBObject obj = createSimpleTestDoc(); - e.putObject(obj); - buf.pipe(bios); - - DBObject decodedObj = lazyDBDecoder.decode(new ByteArrayInputStream(bios.toByteArray()), (DBCollection) null); - assertNotNull(decodedObj); - assertTrue(decodedObj instanceof LazyDBObject); - LazyDBObject lazyDBObj = (LazyDBObject) decodedObj; - - Set keySet = lazyDBObj.keySet(); - - assertEquals(5, keySet.size()); - assertFalse(keySet.isEmpty()); - - Object[] keySetArray = keySet.toArray(); - assertEquals(5, keySetArray.length); - - String[] typedArray = keySet.toArray(new String[0]); - assertEquals(5, typedArray.length); - - typedArray = keySet.toArray(new String[6]); - assertEquals(6, typedArray.length); - assertNull(typedArray[5]); - - assertTrue(keySet.contains("first")); - assertFalse(keySet.contains("x")); - - assertTrue(keySet.containsAll(Arrays.asList("first", "second"))); - assertFalse(keySet.containsAll(Arrays.asList("first", "notFound"))); - - Iterator iter = keySet.iterator(); - - assertTrue(iter.hasNext()); - assertEquals("_id", iter.next()); - assertTrue(iter.hasNext()); - assertEquals("first", iter.next()); - assertTrue(iter.hasNext()); - assertEquals("second", iter.next()); - assertTrue(iter.hasNext()); - assertEquals("third", iter.next()); - assertTrue(iter.hasNext()); - assertEquals("fourth", iter.next()); - assertFalse(iter.hasNext()); - - assertEquals(obj.get("_id"), lazyDBObj.get("_id")); - assertEquals(obj.get("first"), lazyDBObj.get("first")); - assertEquals(obj.get("second"), lazyDBObj.get("second")); - assertEquals(obj.get("third"), lazyDBObj.get("third")); - assertEquals(obj.get("fourth"), lazyDBObj.get("fourth")); - } - - @Test - public void testEntrySet() throws IOException { - DBObject obj = createSimpleTestDoc(); - e.putObject(obj); - buf.pipe(bios); - - DBObject decodedObj = lazyDBDecoder.decode(new ByteArrayInputStream(bios.toByteArray()), (DBCollection) null); - LazyDBObject lazyDBObj = (LazyDBObject) decodedObj; - - Set> entrySet = lazyDBObj.entrySet(); - assertEquals(5, entrySet.size()); - assertFalse(entrySet.isEmpty()); - - Object[] entrySetArray = entrySet.toArray(); - assertEquals(5, entrySetArray.length); // kind of a lame test - - Map.Entry[] typedArray = entrySet.toArray(new Map.Entry[0]); - assertEquals(5, typedArray.length); - - typedArray = entrySet.toArray(new Map.Entry[6]); - assertEquals(6, typedArray.length); - assertNull(typedArray[5]); - - assertTrue(entrySet.contains(new TestMapEntry("first", 1))); - assertFalse(entrySet.contains(new TestMapEntry("first", 2))); - assertFalse(entrySet.contains(new TestMapEntry("x", 1))); - - assertTrue(entrySet.containsAll(Arrays.asList(new TestMapEntry("first", 1), new TestMapEntry("second", "str1")))); - assertFalse(entrySet.containsAll(Arrays.asList(new TestMapEntry("first", 1), new TestMapEntry("second", "str2")))); - - Iterator> entryIter = entrySet.iterator(); - - assertTrue(entryIter.hasNext()); - Map.Entry next = entryIter.next(); - assertEquals("_id", next.getKey()); - assertEquals(obj.get("_id"), next.getValue()); - - assertTrue(entryIter.hasNext()); - next = entryIter.next(); - assertEquals("first", next.getKey()); - assertEquals(obj.get("first"), next.getValue()); - - assertTrue(entryIter.hasNext()); - next = entryIter.next(); - assertEquals("second", next.getKey()); - assertEquals(obj.get("second"), next.getValue()); - - assertTrue(entryIter.hasNext()); - next = entryIter.next(); - assertEquals("third", next.getKey()); - assertEquals(obj.get("third"), next.getValue()); - - assertTrue(entryIter.hasNext()); - next = entryIter.next(); - assertEquals("fourth", next.getKey()); - assertEquals(obj.get("fourth"), next.getValue()); - - assertFalse(entryIter.hasNext()); - } - - @Test - public void testEqualsAndHashCode() throws IOException { - DBObject obj = createSimpleTestDoc(); - e.putObject(obj); - buf.pipe(bios); - - LazyDBObject lazyOne = (LazyDBObject) lazyDBDecoder.decode(new ByteArrayInputStream(bios.toByteArray()), - (DBCollection) null); - - LazyDBObject lazyTwo = (LazyDBObject) lazyDBDecoder.decode(new ByteArrayInputStream(bios.toByteArray()), - (DBCollection) null); - - assertTrue(lazyOne.equals(lazyTwo)); - assertEquals(lazyOne.hashCode(), lazyTwo.hashCode()); - } - - @Test - public void testPipe() throws IOException { - DBObject obj = createSimpleTestDoc(); - e.putObject(obj); - buf.pipe(bios); - - LazyDBObject lazyDBObj = (LazyDBObject) lazyDBDecoder.decode(new ByteArrayInputStream(bios.toByteArray()), - (DBCollection) null); - bios.reset(); - int byteCount = lazyDBObj.pipe(bios); - assertEquals(lazyDBObj.getBSONSize(), byteCount); - - LazyDBObject lazyDBObjectFromPipe = (LazyDBObject) lazyDBDecoder.decode(new ByteArrayInputStream(bios.toByteArray()), - (DBCollection) null); - - assertEquals(lazyDBObj, lazyDBObjectFromPipe); - } - - @Test - public void testLazyDBEncoder() throws IOException { - // this is all set up just to get a lazy db object that can be encoded - DBObject obj = createSimpleTestDoc(); - e.putObject(obj); - buf.pipe(bios); - LazyDBObject lazyDBObj = (LazyDBObject) lazyDBDecoder.decode( - new ByteArrayInputStream(bios.toByteArray()), (DBCollection) null); - - // now to the actual test - LazyDBEncoder encoder = new LazyDBEncoder(); - BasicOutputBuffer outputBuffer = new BasicOutputBuffer(); - int size = encoder.writeObject(outputBuffer, lazyDBObj); - assertEquals(lazyDBObj.getBSONSize(), size); - assertEquals(lazyDBObj.getBSONSize(), outputBuffer.size()); - - // this is just asserting that the encoder actually piped the correct bytes - ByteArrayOutputStream baos = new ByteArrayOutputStream(); - lazyDBObj.pipe(baos); - assertArrayEquals(baos.toByteArray(), outputBuffer.toByteArray()); - } - - private DBObject createSimpleTestDoc() { - DBObject obj = new BasicDBObject("_id", new ObjectId()); - obj.put("first", 1); - obj.put("second", "str1"); - obj.put("third", true); - obj.put("fourth", new BasicDBObject("firstNested", 1)); - return obj; - } - - - private DBObject createTestDoc() { - ObjectId oid = new ObjectId(); - ObjectId test_oid = new ObjectId(); - ObjectId test_ref_id = new ObjectId(); - DBObject test_doc = new BasicDBObject( "abc", "12345" ); - String[] test_arr = new String[] { "foo" , "bar" , "baz" , "x" , "y" , "z" }; - BSONTimestamp test_tsp = new BSONTimestamp(); - Date test_date = new Date(); - Binary test_bin = new Binary( "scott".getBytes() ); - UUID test_uuid = UUID.randomUUID(); - Pattern test_regex = Pattern.compile( "^test.*regex.*xyz$", Pattern.CASE_INSENSITIVE ); - BasicDBObjectBuilder b = BasicDBObjectBuilder.start(); - b.append( "_id", oid ); - b.append( "null", null ); - b.append( "max", new MaxKey() ); - b.append( "min", new MinKey() ); - b.append( "booleanTrue", true ); - b.append( "booleanFalse", false ); - b.append( "int1", 1 ); - b.append( "int1500", 1500 ); - b.append( "int3753", 3753 ); - b.append( "tsp", test_tsp ); - b.append( "date", test_date ); - b.append( "long5", 5L ); - b.append( "long3254525", 3254525L ); - b.append( "float324_582", 324.582f ); - b.append( "double245_6289", 245.6289 ); - b.append( "oid", test_oid ); - // Symbol wonky - b.append( "symbol", new Symbol( "foobar" ) ); - // Code wonky - b.append( "code", new Code( "var x = 12345;" ) ); - // TODO - Shell doesn't work with Code W/ Scope, return to this test later - /* - b.append( "code_scoped", new CodeWScope( "return x * 500;", test_doc ) );*/ - b.append( "str", "foobarbaz" ); - b.append( "ref", new DBRef( _db, "testRef", test_ref_id ) ); - b.append( "object", test_doc ); - b.append( "array", test_arr ); - b.append( "binary", test_bin ); - b.append( "uuid", test_uuid ); - b.append( "regex", test_regex ); - - return b.get(); - } - - private void compareDocs(DBObject origDoc, DBObject doc) { - assertEquals( origDoc.get( "str" ), doc.get( "str" ) ); - assertEquals( origDoc.get("_id"), doc.get("_id")); - assertNull( doc.get( "null" ) ); - assertEquals( origDoc.get("max"), doc.get( "max" ) ); - assertEquals( origDoc.get("min"), doc.get( "min" ) ); - assertEquals( true, doc.get( "booleanTrue" ) ); - assertEquals( false, doc.get( "booleanFalse" )); - assertEquals( origDoc.get( "int1" ), doc.get( "int1" ) ); - assertEquals( origDoc.get( "int1500" ), doc.get( "int1500" ) ); - assertEquals( origDoc.get( "int3753" ), doc.get( "int3753" ) ); - assertEquals( origDoc.get( "tsp" ), doc.get( "tsp" )); - assertEquals( doc.get( "date" ), doc.get( "date" ) ); - assertEquals( doc.get( "long5" ), 5L ); - assertEquals( doc.get( "long3254525" ), 3254525L ); - // Match against what is expected for MongoDB to store the float as - assertEquals( doc.get( "float324_582" ), 324.5820007324219 ); - assertEquals( doc.get( "double245_6289" ), 245.6289 ); - assertEquals( origDoc.get( "oid"), doc.get( "oid" ) ); - assertEquals( origDoc.get( "symbol"), doc.get( "symbol" ) ); - assertEquals( doc.get( "str" ), "foobarbaz" ); - assertEquals( origDoc.get( "ref" ), doc.get( "ref" ) ); - assertEquals(((DBObject) origDoc.get("object")).get("abc"), ((DBObject) doc.get("object")).get("abc")); - assertEquals( ( (DBObject) doc.get( "array" ) ).get( "0" ), "foo" ); - assertEquals( ( (DBObject) doc.get( "array" ) ).get( "1" ), "bar" ); - assertEquals( ( (DBObject) doc.get( "array" ) ).get( "2" ), "baz" ); - assertEquals( ( (DBObject) doc.get( "array" ) ).get( "3" ), "x" ); - assertEquals( ( (DBObject) doc.get( "array" ) ).get( "4" ), "y" ); - assertEquals( ( (DBObject) doc.get( "array" ) ).get( "5" ), "z" ); - assertEquals( new String( ((Binary) origDoc.get( "binary")).getData()), new String((byte[]) doc.get( "binary" ))); - assertEquals( origDoc.get( "uuid"), doc.get( "uuid" ) ); - assertEquals( ( (Pattern) origDoc.get( "regex" ) ).pattern(), ((Pattern) doc.get( "regex" ) ).pattern() ); - assertEquals( ( (Pattern) doc.get( "regex" ) ).flags(), ((Pattern) doc.get( "regex" ) ).flags() ); - } - - private static class TestMapEntry implements Map.Entry { - private String key; - private Object value; - - private TestMapEntry(String key, Object value) { - this.key = key; - this.value = value; - } - - @Override - public String getKey() { - return key; - } - - @Override - public Object getValue() { - return value; - } - - @Override - public Object setValue(Object value) { - throw new UnsupportedOperationException(); - } - } - - private DB _db; - - public static void main( String args[] ){ - ( new LazyDBObjectTest() ).runConsole(); - } -} - diff --git a/src/test/com/mongodb/MongoClientOptionsTest.java b/src/test/com/mongodb/MongoClientOptionsTest.java deleted file mode 100644 index f5958fc3e1d..00000000000 --- a/src/test/com/mongodb/MongoClientOptionsTest.java +++ /dev/null @@ -1,172 +0,0 @@ -package com.mongodb; - -import junit.framework.Assert; -import org.testng.annotations.Test; - -import javax.net.SocketFactory; -import javax.net.ssl.SSLSocketFactory; - -/** - * Copyright (c) 2008 - 2012 10gen, Inc. - *

          - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

          - * http://www.apache.org/licenses/LICENSE-2.0 - *

          - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -public class MongoClientOptionsTest { - - @Test - public void testBuilderDefaults() { - MongoClientOptions.Builder builder = new MongoClientOptions.Builder(); - MongoClientOptions options = builder.build(); - Assert.assertEquals(DefaultDBDecoder.FACTORY, options.getDbDecoderFactory()); - Assert.assertEquals(DefaultDBEncoder.FACTORY, options.getDbEncoderFactory()); - Assert.assertEquals(null, options.getDescription()); - Assert.assertEquals(SocketFactory.getDefault(), options.getSocketFactory()); - Assert.assertEquals(WriteConcern.ACKNOWLEDGED, options.getWriteConcern()); - Assert.assertEquals(100, options.getConnectionsPerHost()); - Assert.assertEquals(10000, options.getConnectTimeout()); - Assert.assertEquals(0, options.getMaxAutoConnectRetryTime()); - Assert.assertEquals(ReadPreference.primary(), options.getReadPreference()); - Assert.assertEquals(5, options.getThreadsAllowedToBlockForConnectionMultiplier()); - Assert.assertEquals(false, options.isSocketKeepAlive()); - Assert.assertEquals(true, options.isCursorFinalizerEnabled()); - Assert.assertEquals(false, options.isAutoConnectRetry()); - Assert.assertEquals(false, options.isAlwaysUseMBeans()); - } - - @Test - public void testIllegalArguments() { - MongoClientOptions.Builder builder = new MongoClientOptions.Builder(); - try { - builder.dbDecoderFactory(null); - Assert.fail(); - } catch (IllegalArgumentException e) { - // all good - } - try { - builder.dbEncoderFactory(null); - Assert.fail(); - } catch (IllegalArgumentException e) { - // all good - } - try { - builder.socketFactory(null); - Assert.fail(); - } catch (IllegalArgumentException e) { - // all good - } - try { - builder.writeConcern(null); - Assert.fail(); - } catch (IllegalArgumentException e) { - // all good - } - try { - builder.readPreference(null); - Assert.fail(); - } catch (IllegalArgumentException e) { - // all good - } - try { - builder.connectionsPerHost(0); - Assert.fail(); - } catch (IllegalArgumentException e) { - // all good - } - try { - builder.connectTimeout(-1); - Assert.fail(); - } catch (IllegalArgumentException e) { - // all good - } - try { - builder.maxAutoConnectRetryTime(-1); - Assert.fail(); - } catch (IllegalArgumentException e) { - // all good - } - try { - builder.threadsAllowedToBlockForConnectionMultiplier(0); - Assert.fail(); - } catch (IllegalArgumentException e) { - // all good - } - - } - - - @Test - public void testBuilderBuild() { - MongoClientOptions.Builder builder = new MongoClientOptions.Builder(); - builder.description("test"); - builder.readPreference(ReadPreference.secondary()); - builder.writeConcern(WriteConcern.JOURNAL_SAFE); - builder.autoConnectRetry(true); - builder.connectionsPerHost(500); - builder.connectTimeout(100); - builder.maxAutoConnectRetryTime(300); - builder.threadsAllowedToBlockForConnectionMultiplier(1); - builder.socketKeepAlive(true); - builder.cursorFinalizerEnabled(true); - builder.alwaysUseMBeans(true); - - SocketFactory socketFactory = SSLSocketFactory.getDefault(); - builder.socketFactory(socketFactory); - - DBEncoderFactory encoderFactory = new DBEncoderFactory() { - public DBEncoder create() { - return null; - } - }; - builder.dbEncoderFactory(encoderFactory); - - DBDecoderFactory decoderFactory = new DBDecoderFactory() { - public DBDecoder create() { - return null; - } - }; - builder.dbDecoderFactory(decoderFactory); - - MongoClientOptions options = builder.build(); - - Assert.assertEquals("test", options.getDescription()); - Assert.assertEquals(ReadPreference.secondary(), options.getReadPreference()); - Assert.assertEquals(WriteConcern.JOURNAL_SAFE, options.getWriteConcern()); - Assert.assertEquals(true, options.isAutoConnectRetry()); - Assert.assertEquals(500, options.getConnectionsPerHost()); - Assert.assertEquals(100, options.getConnectTimeout()); - Assert.assertEquals(300, options.getMaxAutoConnectRetryTime()); - Assert.assertEquals(1, options.getThreadsAllowedToBlockForConnectionMultiplier()); - Assert.assertEquals(true, options.isSocketKeepAlive()); - Assert.assertEquals(true, options.isCursorFinalizerEnabled()); - Assert.assertEquals(true, options.isAlwaysUseMBeans()); - - Assert.assertEquals(socketFactory, options.getSocketFactory()); - Assert.assertEquals(encoderFactory, options.getDbEncoderFactory()); - Assert.assertEquals(decoderFactory, options.getDbDecoderFactory()); - } - - @Test - public void testStaticBuilderCreate() { - Assert.assertNotNull(MongoClientOptions.builder()); - } - - @Test - public void testEqualsAndHashCode() { - Assert.assertEquals(MongoClientOptions.builder().build(), MongoClientOptions.builder().build()); - Assert.assertEquals(MongoClientOptions.builder().build().hashCode(), MongoClientOptions.builder().build().hashCode()); - - Assert.assertEquals(MongoClientOptions.builder().socketFactory(SSLSocketFactory.getDefault()).build(), - MongoClientOptions.builder().socketFactory(SSLSocketFactory.getDefault()).build()); - } -} diff --git a/src/test/com/mongodb/MongoClientTest.java b/src/test/com/mongodb/MongoClientTest.java deleted file mode 100644 index cef488e8f70..00000000000 --- a/src/test/com/mongodb/MongoClientTest.java +++ /dev/null @@ -1,130 +0,0 @@ -/** - * Copyright (c) 2008 - 2012 10gen, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.mongodb; - -import junit.framework.Assert; -import org.testng.annotations.Test; - -import java.net.UnknownHostException; -import java.util.Arrays; -import java.util.List; - -public class MongoClientTest { - @Test - @SuppressWarnings("deprecation") - public void testConstructors() throws UnknownHostException { - MongoClientOptions customClientOptions = new MongoClientOptions.Builder().connectionsPerHost(500).build(); - MongoOptions customOptions = new MongoOptions(customClientOptions); - MongoOptions defaultOptions = new MongoOptions(new MongoClientOptions.Builder().build()); - List emptyCredentials = Arrays.asList(); - MongoClient mc; - - mc = new MongoClient(); - Assert.assertEquals(new ServerAddress(), mc.getAddress()); - Assert.assertEquals(defaultOptions, mc.getMongoOptions()); - Assert.assertEquals(emptyCredentials, mc.getCredentialsList()); - Assert.assertEquals(MongoClientOptions.builder().build(), mc.getMongoClientOptions()); - mc.close(); - - mc = new MongoClient("127.0.0.1"); - Assert.assertEquals(new ServerAddress("127.0.0.1"), mc.getAddress()); - Assert.assertEquals(defaultOptions, mc.getMongoOptions()); - Assert.assertEquals(emptyCredentials, mc.getCredentialsList()); - Assert.assertEquals(MongoClientOptions.builder().build(), mc.getMongoClientOptions()); - mc.close(); - - mc = new MongoClient("127.0.0.1", customClientOptions); - Assert.assertEquals(new ServerAddress("127.0.0.1"), mc.getAddress()); - Assert.assertEquals(customOptions, mc.getMongoOptions()); - Assert.assertEquals(emptyCredentials, mc.getCredentialsList()); - Assert.assertEquals(customClientOptions, mc.getMongoClientOptions()); - mc.close(); - - mc = new MongoClient("127.0.0.1", 27018); - Assert.assertEquals(new ServerAddress("127.0.0.1", 27018), mc.getAddress()); - Assert.assertEquals(defaultOptions, mc.getMongoOptions()); - Assert.assertEquals(emptyCredentials, mc.getCredentialsList()); - Assert.assertEquals(MongoClientOptions.builder().build(), mc.getMongoClientOptions()); - mc.close(); - - mc = new MongoClient(new ServerAddress("127.0.0.1")); - Assert.assertEquals(new ServerAddress("127.0.0.1"), mc.getAddress()); - Assert.assertEquals(defaultOptions, mc.getMongoOptions()); - Assert.assertEquals(emptyCredentials, mc.getCredentialsList()); - Assert.assertEquals(MongoClientOptions.builder().build(), mc.getMongoClientOptions()); - mc.close(); - - final List credentialsList = Arrays.asList( - MongoCredential.createMongoCRCredential("user1", "test", "pwd".toCharArray())); - mc = new MongoClient(new ServerAddress("127.0.0.1"), credentialsList); - Assert.assertEquals(new ServerAddress("127.0.0.1"), mc.getAddress()); - Assert.assertEquals(defaultOptions, mc.getMongoOptions()); - Assert.assertEquals(credentialsList, mc.getCredentialsList()); - Assert.assertEquals(MongoClientOptions.builder().build(), mc.getMongoClientOptions()); - mc.close(); - - mc = new MongoClient(new ServerAddress("127.0.0.1"), customClientOptions); - Assert.assertEquals(new ServerAddress("127.0.0.1"), mc.getAddress()); - Assert.assertEquals(customOptions, mc.getMongoOptions()); - Assert.assertEquals(emptyCredentials, mc.getCredentialsList()); - Assert.assertEquals(customClientOptions, mc.getMongoClientOptions()); - mc.close(); - - mc = new MongoClient(new ServerAddress("127.0.0.1"), credentialsList, customClientOptions); - Assert.assertEquals(new ServerAddress("127.0.0.1"), mc.getAddress()); - Assert.assertEquals(customOptions, mc.getMongoOptions()); - Assert.assertEquals(credentialsList, mc.getCredentialsList()); - Assert.assertEquals(customClientOptions, mc.getMongoClientOptions()); - mc.close(); - - mc = new MongoClient(Arrays.asList(new ServerAddress("localhost", 27017), new ServerAddress("127.0.0.1", 27018))); - Assert.assertEquals(Arrays.asList(new ServerAddress("localhost", 27017), new ServerAddress("127.0.0.1", 27018)), mc.getAllAddress()); - Assert.assertEquals(defaultOptions, mc.getMongoOptions()); - Assert.assertEquals(emptyCredentials, mc.getCredentialsList()); - Assert.assertEquals(MongoClientOptions.builder().build(), mc.getMongoClientOptions()); - mc.close(); - - mc = new MongoClient(Arrays.asList(new ServerAddress("localhost", 27017), new ServerAddress("127.0.0.1", 27018)), customClientOptions); - Assert.assertEquals(Arrays.asList(new ServerAddress("localhost", 27017), new ServerAddress("127.0.0.1", 27018)), mc.getAllAddress()); - Assert.assertEquals(customOptions, mc.getMongoOptions()); - Assert.assertEquals(emptyCredentials, mc.getCredentialsList()); - Assert.assertEquals(customClientOptions, mc.getMongoClientOptions()); - mc.close(); - - mc = new MongoClient(Arrays.asList(new ServerAddress("localhost", 27017), new ServerAddress("127.0.0.1", 27018)), credentialsList, customClientOptions); - Assert.assertEquals(Arrays.asList(new ServerAddress("localhost", 27017), new ServerAddress("127.0.0.1", 27018)), mc.getAllAddress()); - Assert.assertEquals(customOptions, mc.getMongoOptions()); - Assert.assertEquals(credentialsList, mc.getCredentialsList()); - Assert.assertEquals(customClientOptions, mc.getMongoClientOptions()); - mc.close(); - - mc = new MongoClient(new MongoClientURI("mongodb://127.0.0.1")); - Assert.assertEquals(new ServerAddress("127.0.0.1"), mc.getAddress()); - Assert.assertEquals(defaultOptions, mc.getMongoOptions()); - Assert.assertEquals(emptyCredentials, mc.getCredentialsList()); - Assert.assertEquals(MongoClientOptions.builder().build(), mc.getMongoClientOptions()); - mc.close(); - - mc = new MongoClient(new MongoClientURI("mongodb://user1:pwd@127.0.0.1/test?maxPoolSize=500")); - Assert.assertEquals(new ServerAddress("127.0.0.1"), mc.getAddress()); - Assert.assertEquals(customOptions, mc.getMongoOptions()); - Assert.assertEquals(credentialsList, mc.getCredentialsList()); - Assert.assertEquals(customClientOptions, mc.getMongoClientOptions()); - mc.close(); - } -} diff --git a/src/test/com/mongodb/MongoClientURITest.java b/src/test/com/mongodb/MongoClientURITest.java deleted file mode 100644 index 413f4aebd47..00000000000 --- a/src/test/com/mongodb/MongoClientURITest.java +++ /dev/null @@ -1,261 +0,0 @@ -/** - * Copyright (c) 2008 - 2012 10gen, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.mongodb; - -import com.mongodb.util.TestCase; -import org.testng.annotations.Test; - -import javax.net.SocketFactory; -import javax.net.ssl.SSLSocketFactory; -import java.net.UnknownHostException; - -public class MongoClientURITest extends TestCase { - - @Test - public void testUnsupportedOption() { - new MongoClientURI("mongodb://localhost/?unknownOption=true"); - } - - @Test - public void testURIGetter() { - assertEquals("mongodb://localhost", new MongoClientURI("mongodb://localhost").getURI()); - } - - @Test - public void testOptionsWithoutTrailingSlash() { - try { - new MongoClientURI("mongodb://localhost?wTimeout=5"); - fail("This is not allowed"); - } catch (IllegalArgumentException e) { - // ok - } - - } - @Test() - public void testSingleServer() { - MongoClientURI u = new MongoClientURI("mongodb://db.example.com"); - assertEquals(1, u.getHosts().size()); - assertEquals("db.example.com", u.getHosts().get(0)); - assertNull(u.getDatabase()); - assertNull(u.getCollection()); - assertNull( u.getUsername()); - assertEquals(null, u.getPassword()); - } - - @Test() - public void testWithDatabase() { - MongoClientURI u = new MongoClientURI("mongodb://foo/bar"); - assertEquals(1, u.getHosts().size()); - assertEquals("foo", u.getHosts().get(0)); - assertEquals("bar", u.getDatabase()); - assertEquals(null, u.getCollection()); - assertEquals(null, u.getUsername()); - assertEquals(null, u.getPassword()); - } - - @Test() - public void testWithCollection() { - MongoClientURI u = new MongoClientURI("mongodb://localhost/test.my.coll"); - assertEquals("test", u.getDatabase()); - assertEquals("my.coll", u.getCollection()); - } - - @Test() - public void testBasic2() { - MongoClientURI u = new MongoClientURI("mongodb://foo/bar.goo"); - assertEquals(1, u.getHosts().size()); - assertEquals("foo", u.getHosts().get(0)); - assertEquals("bar", u.getDatabase()); - assertEquals("goo", u.getCollection()); - } - - @Test() - public void testUserPass() { - final String userName = "user"; - final char[] password = "pass".toCharArray(); - - MongoClientURI u = new MongoClientURI("mongodb://user:pass@host/bar"); - assertEquals(1, u.getHosts().size()); - assertEquals("host", u.getHosts().get(0)); - assertEquals(userName, u.getUsername()); - assertArrayEquals(password, u.getPassword()); - - assertEquals(MongoCredential.createMongoCRCredential(userName, "bar", password), u.getCredentials()); - - u = new MongoClientURI("mongodb://user@host/?authMechanism=GSSAPI"); - assertEquals(MongoCredential.createGSSAPICredential(userName), u.getCredentials()); - - u = new MongoClientURI("mongodb://user:pass@host/?authMechanism=MONGODB-CR"); - assertEquals(MongoCredential.createMongoCRCredential(userName, "admin", password), u.getCredentials()); - - u = new MongoClientURI("mongodb://user:pass@host/?authSource=test"); - assertEquals(MongoCredential.createMongoCRCredential(userName, "test", password), u.getCredentials()); - - u = new MongoClientURI("mongodb://user:pass@host"); - assertEquals(MongoCredential.createMongoCRCredential(userName, "admin", password), u.getCredentials()); - } - - @Test - public void testUnsupportedAuthMechanism() { - try { - new MongoClientURI("mongodb://user:pass@host/?authMechanism=UNKNOWN"); - fail("Should fail due to unknown authMechanism"); - } catch (IllegalArgumentException e) { - // expected - } - } - - @Test - public void testURIEncoding() { - MongoClientURI u = new MongoClientURI("mongodb://use%24:he%21%21o@localhost"); - assertEquals(MongoCredential.createMongoCRCredential("use$", "admin", "he!!o".toCharArray()), u.getCredentials()); - } - - @Test() - public void testUserPassAndPort() { - MongoClientURI u = new MongoClientURI("mongodb://user:pass@host:27011/bar"); - assertEquals(1, u.getHosts().size()); - assertEquals("host:27011", u.getHosts().get(0)); - assertEquals("user", u.getUsername()); - assertEquals("pass", new String(u.getPassword())); - } - - @Test() - public void testUserPassAndMultipleHostsWithPort() { - MongoClientURI u = new MongoClientURI("mongodb://user:pass@host:27011,host2:27012,host3:27013/bar"); - assertEquals(3, u.getHosts().size()); - assertEquals("host:27011", u.getHosts().get(0)); - assertEquals("host2:27012", u.getHosts().get(1)); - assertEquals("host3:27013", u.getHosts().get(2)); - assertEquals("user", u.getUsername()); - assertEquals("pass", new String(u.getPassword())); - } - - @Test() - public void testWriteConcern() { - MongoClientURI uri = new MongoClientURI("mongodb://localhost"); - assertEquals(WriteConcern.ACKNOWLEDGED, uri.getOptions().getWriteConcern()); - - uri = new MongoClientURI("mongodb://localhost/?wTimeout=5"); - assertEquals(new WriteConcern(1, 5, false, false), uri.getOptions().getWriteConcern()); - - uri = new MongoClientURI("mongodb://localhost/?fsync=true"); - assertEquals(new WriteConcern(1, 0, true, false), uri.getOptions().getWriteConcern()); - - uri = new MongoClientURI("mongodb://localhost/?j=true"); - assertEquals(new WriteConcern(1, 0, false, true), uri.getOptions().getWriteConcern()); - - uri = new MongoClientURI("mongodb://localhost/?w=2&wtimeout=5&fsync=true&j=true"); - assertEquals(new WriteConcern(2, 5, true, true), uri.getOptions().getWriteConcern()); - - uri = new MongoClientURI("mongodb://localhost/?w=majority&wtimeout=5&fsync=true&j=true"); - assertEquals(new WriteConcern("majority", 5, true, true), uri.getOptions().getWriteConcern()); - - uri = new MongoClientURI("mongodb://localhost/?safe=true"); - assertEquals(WriteConcern.ACKNOWLEDGED, uri.getOptions().getWriteConcern()); - - uri = new MongoClientURI("mongodb://localhost/?safe=false"); - assertEquals(WriteConcern.UNACKNOWLEDGED, uri.getOptions().getWriteConcern()); - - } - - @Test - public void testSSLOption() { - MongoClientURI uri = new MongoClientURI("mongodb://localhost/?ssl=false"); - assertFalse(uri.getOptions().getSocketFactory() instanceof SSLSocketFactory); - - uri = new MongoClientURI("mongodb://localhost/?ssl=true"); - assertTrue(uri.getOptions().getSocketFactory() instanceof SSLSocketFactory); - } - - @Test() - public void testOptions() { - MongoClientURI uAmp = new MongoClientURI("mongodb://localhost/?" + - "maxPoolSize=10&waitQueueMultiple=5&waitQueueTimeoutMS=150&" + - "connectTimeoutMS=2500&socketTimeoutMS=5500&autoConnectRetry=true&" + - "slaveOk=true&safe=false&w=1&wtimeout=2500&fsync=true"); - assertOnOptions(uAmp.getOptions()); - MongoClientURI uSemi = new MongoClientURI("mongodb://localhost/?" + - "maxPoolSize=10;waitQueueMultiple=5;waitQueueTimeoutMS=150;" + - "connectTimeoutMS=2500;socketTimeoutMS=5500;autoConnectRetry=true;" + - "slaveOk=true;safe=false;w=1;wtimeout=2500;fsync=true"); - assertOnOptions(uSemi.getOptions()); - MongoClientURI uMixed = new MongoClientURI("mongodb://localhost/test?" + - "maxPoolSize=10&waitQueueMultiple=5;waitQueueTimeoutMS=150;" + - "connectTimeoutMS=2500;socketTimeoutMS=5500&autoConnectRetry=true;" + - "slaveOk=true;safe=false&w=1;wtimeout=2500;fsync=true"); - assertOnOptions(uMixed.getOptions()); - } - - @Test - public void testBuilderOverrides() { - MongoClientURI uri = new MongoClientURI("mongodb://localhost/?maxPoolSize=150", - MongoClientOptions.builder().autoConnectRetry(true).connectionsPerHost(200)); - assertTrue(uri.getOptions().isAutoConnectRetry()); - assertEquals(150, uri.getOptions().getConnectionsPerHost()); - } - - @Test() - public void testURIDefaults() throws UnknownHostException { - MongoClientURI uri = new MongoClientURI("mongodb://localhost"); - MongoClientOptions options = uri.getOptions(); - - assertEquals(options.getConnectionsPerHost(), 100); - assertEquals(options.getThreadsAllowedToBlockForConnectionMultiplier(), 5); - assertEquals(options.getMaxWaitTime(), 120000); - assertEquals(options.getConnectTimeout(), 10000); - assertEquals(options.getSocketTimeout(), 0); - assertFalse(options.isSocketKeepAlive()); - assertFalse(options.isAutoConnectRetry()); - assertEquals(options.getMaxAutoConnectRetryTime(), 0); - assertEquals(options.getDbDecoderFactory(), DefaultDBDecoder.FACTORY); - assertEquals(options.getDbEncoderFactory(), DefaultDBEncoder.FACTORY); - assertEquals(options.getSocketFactory(), SocketFactory.getDefault()); - assertEquals(options.getDescription(), null); - assertEquals(options.getReadPreference(), ReadPreference.primary()); - assertTrue(options.isCursorFinalizerEnabled()); - assertNull(uri.getCredentials()); - } - - @Test() - public void testReadPreferenceOptions() { - MongoClientURI uri = new MongoClientURI("mongodb://localhost/?readPreference=secondaryPreferred"); - assertEquals(ReadPreference.secondaryPreferred(), uri.getOptions().getReadPreference()); - - uri = new MongoClientURI("mongodb://localhost/?readPreference=secondaryPreferred&" + - "readPreferenceTags=dc:ny,rack:1&readPreferenceTags=dc:ny&readPreferenceTags="); - assertEquals(ReadPreference.secondaryPreferred - ( - new BasicDBObject("dc", "ny").append("rack", "1"), - new BasicDBObject("dc", "ny"), - new BasicDBObject() - ), - uri.getOptions().getReadPreference()); - } - - @SuppressWarnings("deprecation") - private void assertOnOptions(MongoClientOptions options) { - assertEquals(10, options.getConnectionsPerHost(), 10); - assertEquals(5, options.getThreadsAllowedToBlockForConnectionMultiplier()); - assertEquals(150, options.getMaxWaitTime()); - assertEquals(5500, options.getSocketTimeout()); - assertTrue(options.isAutoConnectRetry()); - assertEquals(new WriteConcern(1, 2500, true), options.getWriteConcern()); - assertEquals(ReadPreference.secondaryPreferred(), options.getReadPreference()); - } -} diff --git a/src/test/com/mongodb/MongoCredentialTest.java b/src/test/com/mongodb/MongoCredentialTest.java deleted file mode 100644 index e7442d94119..00000000000 --- a/src/test/com/mongodb/MongoCredentialTest.java +++ /dev/null @@ -1,99 +0,0 @@ -/* - * Copyright (c) 2008 - 2013 10gen, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.mongodb; - -import com.mongodb.util.TestCase; -import org.testng.annotations.Test; - -import java.util.Arrays; -import java.util.HashSet; -import java.util.List; -import java.util.Set; - -public class MongoCredentialTest extends TestCase { - - @Test - public void testCredentials() { - MongoCredential credentials; - - final String mechanism = MongoCredential.MONGODB_CR_MECHANISM; - final String userName = "user"; - final String database = "test"; - final char[] password = "pwd".toCharArray(); - credentials = MongoCredential.createMongoCRCredential(userName, database, password); - - assertEquals(mechanism, credentials.getMechanism()); - assertEquals(userName, credentials.getUserName()); - assertEquals(database, credentials.getSource()); - assertArrayEquals(password, credentials.getPassword()); - assertEquals(MongoCredential.MONGODB_CR_MECHANISM, credentials.getMechanism()); - - try { - MongoCredential.createMongoCRCredential(userName, database, null); - fail("MONGO-CR must have a password"); - } catch (IllegalArgumentException e) { - // all good - } - } - - @Test - public void testCredentialsStore() { - char[] password = "pwd".toCharArray(); - MongoCredentialsStore store; - - store = new MongoCredentialsStore(); - assertTrue(store.getDatabases().isEmpty()); - assertNull(store.get("test")); - - store = new MongoCredentialsStore((MongoCredential) null); - assertTrue(store.getDatabases().isEmpty()); - assertNull(store.get("test")); - - MongoCredential credentials = MongoCredential.createMongoCRCredential("user", "admin", password); - store = new MongoCredentialsStore(credentials); - Set expected; - expected = new HashSet(); - expected.add("admin"); - assertEquals(expected, store.getDatabases()); - assertEquals(credentials, store.get("admin")); - assertNull(store.get("test")); - - List credentialsList; - - final MongoCredential credentials1 = MongoCredential.createMongoCRCredential("user", "db1", password); - final MongoCredential credentials2 = MongoCredential.createMongoCRCredential("user", "db2", password); - credentialsList = Arrays.asList(credentials1, credentials2); - store = new MongoCredentialsStore(credentialsList); - expected = new HashSet(); - expected.add("db1"); - expected.add("db2"); - assertEquals(expected, store.getDatabases()); - assertEquals(credentials1, store.get("db1")); - assertEquals(credentials2, store.get("db2")); - assertNull(store.get("db3")); - assertEquals(credentialsList, store.asList()); - - credentialsList = Arrays.asList(credentials1, MongoCredential.createMongoCRCredential("user2", "db1", password)); - try { - new MongoCredentialsStore(credentialsList); - fail("should throw"); - } catch (IllegalArgumentException e) { - // expected - } - } -} diff --git a/src/test/com/mongodb/MongoOptionsTest.java b/src/test/com/mongodb/MongoOptionsTest.java deleted file mode 100644 index a43d307e47f..00000000000 --- a/src/test/com/mongodb/MongoOptionsTest.java +++ /dev/null @@ -1,155 +0,0 @@ -/** - * Copyright (C) 2011 10gen Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.mongodb; - -import org.testng.annotations.Test; - -import com.mongodb.util.TestCase; - -/** - * The mongo options test. - */ -public class MongoOptionsTest extends TestCase { - - @Test - @SuppressWarnings("deprecation") - public void testCopy() throws Exception { - - final MongoOptions options = new MongoOptions(); - - options.connectionsPerHost = 100; - options.threadsAllowedToBlockForConnectionMultiplier = 101; - options.maxWaitTime = 102; - options.connectTimeout = 103; - options.socketTimeout = 104; - options.socketKeepAlive = true; - options.autoConnectRetry = true; - options.maxAutoConnectRetryTime = 105; - options.slaveOk = true; - options.safe = true; - options.w = 106; - options.wtimeout = 107; - options.fsync = true; - options.j = false; - options.dbDecoderFactory = null; - options.dbEncoderFactory = null; - options.socketFactory = null; - options.description = "cool"; - options.readPreference = ReadPreference.secondary(); - options.cursorFinalizerEnabled = true; - options.alwaysUseMBeans = true; - - final MongoOptions copy = options.copy(); - assertEquals(options.connectionsPerHost, copy.connectionsPerHost); - assertEquals(options.threadsAllowedToBlockForConnectionMultiplier, copy.threadsAllowedToBlockForConnectionMultiplier); - assertEquals(options.maxWaitTime, copy.maxWaitTime); - assertEquals(options.connectTimeout, copy.connectTimeout); - assertEquals(options.socketTimeout, copy.socketTimeout); - assertEquals(options.socketKeepAlive, copy.socketKeepAlive); - assertEquals(options.autoConnectRetry, copy.autoConnectRetry); - assertEquals(options.maxAutoConnectRetryTime, copy.maxAutoConnectRetryTime); - assertEquals(options.slaveOk, copy.slaveOk); - assertEquals(options.safe, copy.safe); - assertEquals(options.w, copy.w); - assertEquals(options.wtimeout, copy.wtimeout); - assertEquals(options.fsync, copy.fsync); - assertEquals(options.j, copy.j); - assertEquals(options.dbDecoderFactory, copy.dbDecoderFactory); - assertEquals(options.dbEncoderFactory, copy.dbEncoderFactory); - assertEquals(options.socketFactory, copy.socketFactory); - assertEquals(options.description, copy.description); - assertEquals(options.readPreference, copy.readPreference); - assertEquals(options.alwaysUseMBeans, copy.alwaysUseMBeans); - } - - @Test - @SuppressWarnings("deprecation") - public void testGetterSetters() throws Exception { - - final MongoOptions options = new MongoOptions(); - - options.setConnectionsPerHost(100); - options.setThreadsAllowedToBlockForConnectionMultiplier(101); - options.setMaxWaitTime(102); - options.setConnectTimeout(103); - options.setSocketTimeout(104); - options.setSocketKeepAlive(true); - options.setAutoConnectRetry(true); - options.setMaxAutoConnectRetryTime(105); - options.setSafe(true); - options.setW(106); - options.setWtimeout(107); - options.setFsync(true); - options.setJ(false); - options.setDbDecoderFactory(null); - options.setDbEncoderFactory(null); - options.setSocketFactory(null); - options.setDescription("very cool"); - options.setReadPreference(ReadPreference.secondary()); - options.setCursorFinalizerEnabled(true); - options.setAlwaysUseMBeans(true); - - assertEquals(options.getConnectionsPerHost(), 100); - assertEquals(options.getThreadsAllowedToBlockForConnectionMultiplier(), 101); - assertEquals(options.getMaxWaitTime(), 102); - assertEquals(options.getConnectTimeout(), 103); - assertEquals(options.getSocketTimeout(), 104); - assertEquals(options.isSocketKeepAlive(), true); - assertEquals(options.isAutoConnectRetry(), true); - assertEquals(options.getMaxAutoConnectRetryTime(), 105); - assertEquals(options.isSafe(), true); - assertEquals(options.getW(), 106); - assertEquals(options.getWtimeout(), 107); - assertEquals(options.isFsync(), true); - assertEquals(options.isJ(), false); - assertEquals(options.getDbDecoderFactory(), null); - assertEquals(options.getDbEncoderFactory(), null); - assertEquals(options.getSocketFactory(), null); - assertEquals(options.getDescription(), "very cool"); - assertEquals(options.getReadPreference(), ReadPreference.secondary()); - assertEquals(options.isCursorFinalizerEnabled(), true); - assertEquals(options.isAlwaysUseMBeans(), true); - } - - @Test - @SuppressWarnings("deprecation") - public void testGetWriteConcern() { - MongoOptions options = new MongoOptions(); - assertEquals(WriteConcern.NORMAL, options.getWriteConcern()); - - options.reset(); - options.safe = true; - assertEquals(WriteConcern.SAFE, options.getWriteConcern()); - - options.reset(); - options.w = 3; - assertEquals(new WriteConcern(3), options.getWriteConcern()); - - options.reset(); - options.wtimeout = 3000; - assertEquals(new WriteConcern(0, 3000), options.getWriteConcern()); - - options.reset(); - options.fsync = true; - assertEquals(new WriteConcern(0, 0, true), options.getWriteConcern()); - - options.reset(); - options.j = true; - assertEquals(new WriteConcern(0, 0, false, true), options.getWriteConcern()); - } -} - diff --git a/src/test/com/mongodb/MongoTest.java b/src/test/com/mongodb/MongoTest.java deleted file mode 100644 index 70a1d4f92f1..00000000000 --- a/src/test/com/mongodb/MongoTest.java +++ /dev/null @@ -1,100 +0,0 @@ -// MongoTest.java - -/** - * Copyright (C) 2008 10gen Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.mongodb; - -import com.mongodb.util.TestCase; -import org.testng.annotations.AfterTest; -import org.testng.annotations.BeforeTest; -import org.testng.annotations.Test; - -import java.io.IOException; -import java.net.UnknownHostException; - -@SuppressWarnings("deprecation") -public class MongoTest extends TestCase { - - public MongoTest() - throws IOException , MongoException { - _db = new MongoClient().getDB( "mongotest" ); - } - - final DB _db; - - int _originalCleanerIntervalMs; - - @BeforeTest - public void setUp() { - _originalCleanerIntervalMs = Mongo.cleanerIntervalMS; - } - - @Test - public void testClose_shouldNotReturnUntilCleanupThreadIsFinished() throws Exception { - Mongo.cleanerIntervalMS = 250000; //set to a suitably large value to avoid race conditions in the test - - Mongo mongo = new MongoClient(); - assertNotEquals(mongo._cleaner.getState(), Thread.State.NEW); - - mongo.close(); - - assertFalse(mongo._cleaner.isAlive()); - } - - @SuppressWarnings("deprecation") - @Test - public void testApplyOptions() throws UnknownHostException { - MongoOptions options = new MongoOptions(); - - // test defaults - Mongo m = new Mongo("localhost", options); - assertEquals(ReadPreference.primary(), m.getReadPreference()); - assertEquals(WriteConcern.NORMAL, m.getWriteConcern()); - assertEquals(0, m.getOptions() & Bytes.QUERYOPTION_SLAVEOK); - m.close(); - - // test setting options - options.setReadPreference(ReadPreference.nearest()); - options.slaveOk = true; - options.safe = true; - - m = new Mongo("localhost", options); - assertEquals(ReadPreference.nearest(), m.getReadPreference()); - assertEquals(WriteConcern.SAFE, m.getWriteConcern()); - assertEquals(Bytes.QUERYOPTION_SLAVEOK, m.getOptions() & Bytes.QUERYOPTION_SLAVEOK); - m.close(); - - } - - @Test - public void testMongoURIWithAuth() throws UnknownHostException { - Mongo mongo = new Mongo(new MongoURI("mongodb://user:pwd@localhost/authTest")); - assertNotNull(mongo.getDB("authTest").getAuthenticationCredentials()); - assertNull(mongo.getDB("test").getAuthenticationCredentials()); - } - - @AfterTest - public void tearDown() { - Mongo.cleanerIntervalMS = _originalCleanerIntervalMs; - } - - public static void main( String args[] ) - throws Exception { - (new MongoTest()).runConsole(); - } - -} diff --git a/src/test/com/mongodb/MongoURITest.java b/src/test/com/mongodb/MongoURITest.java deleted file mode 100644 index d17dffe60c9..00000000000 --- a/src/test/com/mongodb/MongoURITest.java +++ /dev/null @@ -1,77 +0,0 @@ -// MongoURITest.java - -/** - * Copyright (C) 2008 10gen Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.mongodb; - -import com.mongodb.util.TestCase; -import org.testng.annotations.Test; - -import javax.net.SocketFactory; -import java.util.Arrays; - - -@SuppressWarnings("deprecation") -public class MongoURITest extends TestCase { - - @Test - public void testGetters() { - MongoURI mongoURI = new MongoURI( "mongodb://user:pwd@localhost/test.mongoURITest?safe=false"); - assertEquals("user", mongoURI.getUsername()); - assertEquals("pwd", new String(mongoURI.getPassword())); - assertEquals(MongoCredential.createMongoCRCredential("user", "test", "pwd".toCharArray()), mongoURI.getCredentials()); - assertEquals(Arrays.asList("localhost"), mongoURI.getHosts()); - assertEquals("test", mongoURI.getDatabase()); - assertEquals("mongoURITest", mongoURI.getCollection()); - assertEquals(WriteConcern.NORMAL, mongoURI.getOptions().writeConcern); - } - - @SuppressWarnings("deprecation") - @Test - public void testOptionDefaults() { - MongoURI mongoURI = new MongoURI( "mongodb://localhost"); - MongoOptions options = mongoURI.getOptions(); - - assertEquals(options.getConnectionsPerHost(), 10); - assertEquals(options.getThreadsAllowedToBlockForConnectionMultiplier(), 5); - assertEquals(options.getMaxWaitTime(), 120000); - assertEquals(options.getConnectTimeout(), 10000); - assertEquals(options.getSocketTimeout(), 0); - assertEquals(options.isSocketKeepAlive(), false); - assertEquals(options.isAutoConnectRetry(), false); - assertEquals(options.getMaxAutoConnectRetryTime(), 0); - assertEquals(options.isSafe(), false); - assertEquals(options.getW(), 0); - assertEquals(options.getWtimeout(), 0); - assertEquals(options.isFsync(), false); - assertEquals(options.isJ(), false); - assertEquals(options.getDbDecoderFactory(), DefaultDBDecoder.FACTORY); - assertEquals(options.getDbEncoderFactory(), DefaultDBEncoder.FACTORY); - assertEquals(options.getSocketFactory(), SocketFactory.getDefault()); - assertEquals(options.getDescription(), null); - assertEquals(options.getReadPreference(), ReadPreference.primary()); - assertEquals(options.getWriteConcern(), WriteConcern.NORMAL); - assertEquals(options.slaveOk, false); - assertEquals(options.isCursorFinalizerEnabled(), true); - } - - @Test - public void testOptionSameInstance() { - MongoURI mongoURI = new MongoURI( "mongodb://localhost"); - assertSame(mongoURI.getOptions(), mongoURI.getOptions()); - } -} diff --git a/src/test/com/mongodb/ObjectIdTest.java b/src/test/com/mongodb/ObjectIdTest.java deleted file mode 100644 index 6adf3c365ed..00000000000 --- a/src/test/com/mongodb/ObjectIdTest.java +++ /dev/null @@ -1,164 +0,0 @@ -// ObjectIdTest.java - -/** - * Copyright (C) 2008 10gen Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.mongodb; - -import com.mongodb.util.TestCase; -import org.bson.types.ObjectId; -import org.testng.annotations.Test; - -import java.util.Date; -import java.util.Random; - -public class ObjectIdTest extends TestCase { - - final Mongo _mongo; - final DB _db; - - public ObjectIdTest() { - _mongo = cleanupMongo; - cleanupDB = "com_mongodb_unittest_ObjectIdTest"; - _db = cleanupMongo.getDB(cleanupDB); - } - - /* - @Test(groups = {"basic"}) - public void testTSM(){ - - ObjectId a = new ObjectId( 2667563522304714314L , -1912742877 ); - assertEquals( "4a26c3e2e316052523dcfd8d" , a.toStringMongod() ); - assertEquals( "250516e3e2c3264a8dfddc23" , a.toStringBabble() ); - assertEquals( "4a26c3e2e316052523dcfd8d" , a.toString() ); - } - */ - - @Test(groups = {"basic"}) - public void testRT1(){ - ObjectId a = new ObjectId(); - assertEquals( a.toStringBabble() , (new ObjectId( a.toStringBabble() , true ) ).toStringBabble() ); - assertEquals( a.toStringMongod() , (new ObjectId( a.toStringMongod() , false ) ).toStringMongod() ); - assertEquals( a.toStringMongod() , (new ObjectId( a.toStringMongod() ) ).toStringMongod() ); - assertEquals( a.toString() , (new ObjectId( a.toString() , false ) ).toString() ); - } - - @Test(groups = {"basic"}) - public void testBabbleToMongo(){ - ObjectId a = new ObjectId(); - assertEquals( a.toStringMongod() , ObjectId.babbleToMongod( a.toStringBabble() ) ); - } - - @Test - public void testBytes(){ - ObjectId a = new ObjectId(); - assertEquals( a , new ObjectId( a.toByteArray() ) ); - - byte b[] = new byte[12]; - java.util.Random r = new java.util.Random( 17 ); - for ( int i=0; i 0 ) - assertEquals( Y , y ); - } - - @Test - public void testFlip(){ - - _testFlip( 1 , 16777216 ); - _testFlip( 1231231 , 2143883776 ); - _testFlip( 0x12345678 , 0x78563412 ); - - Random r = new Random( 12312312 ); - for ( int i=0; i<1000; i++ ){ - int x = r.nextInt(); - _testFlip( r.nextInt() , 0 ); - } - - } - - /** - * Test that within same second, increment value correctly generates ordered ids - */ - @Test - public void testInc() { - ObjectId prev = null; - Date now = new Date(); - // need to loop more than value of byte, to check that endianness is correct - for (int i = 0; i < 1000; ++i) { - ObjectId id = new ObjectId(now); - assertEquals(id.getTime() / 1000, now.getTime() / 1000); - if (prev != null) { - assertTrue(prev.compareTo(id) < 0, "Wrong comparison for ids " + prev + " and " + id); - } - prev = id; - } - } - - public static void main( String args[] ) - throws Exception { - (new ObjectIdTest()).runConsole(); - - long num = 5000000; - - long start = System.currentTimeMillis(); - for ( long i=0; i - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.mongodb; - -import org.bson.types.ObjectId; -import org.testng.Assert; -import org.testng.annotations.BeforeTest; -import org.testng.annotations.Test; - -import java.io.ByteArrayOutputStream; -import java.io.IOException; -import java.net.UnknownHostException; - -public class OutMessageTest { - - Mongo m; - - @BeforeTest - public void setup() throws UnknownHostException { - m = new MongoClient(); - } - - // Ensure defensive code is in place after doneWithMessage is called. - @Test - public void testDoneWithMessage() throws IOException { - DBCollection collection = m.getDB("OutMessageTest").getCollection("doneWithMessage"); - - OutMessage om = OutMessage.insert(collection, DefaultDBEncoder.FACTORY.create(), WriteConcern.SAFE); - om.putObject(new BasicDBObject("_id", new ObjectId())); - - // This will release the buffer and put the object in an unusable state. - om.doneWithMessage(); - - try { - om.doneWithMessage(); - Assert.fail(); - } catch (IllegalStateException e) { - // expected - } - - try { - om.prepare(); - Assert.fail(); - } catch (IllegalStateException e) { - // expected - } - - try { - om.putObject(new BasicDBObject("_id", new ObjectId())); - Assert.fail(); - } catch (IllegalStateException e) { - // expected - } - - try { - om.pipe(new ByteArrayOutputStream(100)); - Assert.fail(); - } catch (IllegalStateException e) { - // expected - } - - try { - om.size(); - Assert.fail(); - } catch (IllegalStateException e) { - // expected - } - - } -} diff --git a/src/test/com/mongodb/PerformanceTest.java b/src/test/com/mongodb/PerformanceTest.java deleted file mode 100644 index 96ff9108c61..00000000000 --- a/src/test/com/mongodb/PerformanceTest.java +++ /dev/null @@ -1,299 +0,0 @@ -/** - * Copyright (C) 2008 10gen Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.mongodb; - -import org.bson.types.BasicBSONList; - -import java.net.UnknownHostException; -import java.util.ArrayList; -import java.util.Date; - -@SuppressWarnings("unchecked") -public class PerformanceTest { - - public static final int batchSize = 100; - public static final double perTrial = 5000; - - public static DBObject small; - public static DBObject medium; - public static DBObject large; - - private static void setup() { - small = new BasicDBObject(); - - BasicBSONList a = new BasicBSONList(); - a.put("0", "test"); - a.put("1", "benchmark"); - medium = BasicDBObjectBuilder.start() - .add("integer", 5) - .add("number", 5.05) - .add("boolean", false) - .add("array", a) - .get(); - - BasicBSONList harvest = new BasicBSONList(); - for (int i=0; i<20; i++) { - harvest.put(i*14+0, "10gen"); - harvest.put(i*14+1, "web"); - harvest.put(i*14+2, "open"); - harvest.put(i*14+3, "source"); - harvest.put(i*14+4, "application"); - harvest.put(i*14+5, "paas"); - harvest.put(i*14+6, "platform-as-a-service"); - harvest.put(i*14+7, "technology"); - harvest.put(i*14+8, "helps"); - harvest.put(i*14+9, "developers"); - harvest.put(i*14+10, "focus"); - harvest.put(i*14+11, "building"); - harvest.put(i*14+12, "mongodb"); - harvest.put(i*14+13, "mongo"); - } - large = BasicDBObjectBuilder.start() - .add("base_url", "http://www.example.com/test-me") - .add("total_word_count", 6743) - .add("access_time", new Date()) - .add("meta_tags", BasicDBObjectBuilder.start() - .add("description", "i am a long description string") - .add("author", "Holly Man") - .add("dynamically_created_meta_tag", "who know\n what") - .get()) - .add("page_structure", BasicDBObjectBuilder.start() - .add("counted_tags", 3450) - .add("no_of_js_attached", 10) - .add("no_of_images", 6) - .get()) - .add("harvested_words", harvest) - .get(); - } - - private static DBCollection getCollection(String name, boolean index) { - DBCollection c; - if (index) { - c = _db.getCollection(name+"_index"); - c.drop(); - c.ensureIndex(BasicDBObjectBuilder.start().add("x", 1).get()); - } - else { - c = _db.getCollection(name); - c.drop(); - } - c.findOne(); - return c; - } - - private static void doInserts(DBObject obj, String name, boolean index) { - DBCollection c = getCollection(name, index); - - double start = (double)System.currentTimeMillis(); - for (int i=0; i batches = new ArrayList(); - for (int i=0; i batch = new ArrayList(); - for (int j=0; jQueryBuilder - * @author Julson Lim - */ -public class QueryBuilderTest extends TestCase { - private DB _testDB; - - public QueryBuilderTest() { - _testDB = cleanupMongo.getDB( "queryBuilderTest" ); - _testDB.dropDatabase(); - } - - @Test - public void elemMatchTest() { - DBObject query = QueryBuilder.start("array").elemMatch(QueryBuilder.start("x").is(1).and("y").is(2).get()).get(); - DBObject expected = new BasicDBObject("array", new BasicDBObject("$elemMatch", - new BasicDBObject("x", 1).append("y", 2))); - assertEquals(expected, query); - // TODO: add integration test - } - - @Test - public void notTest() { - Pattern pattern = Pattern.compile("\\w*"); - DBObject query = QueryBuilder.start("x").not().regex(pattern).get(); - DBObject expected = new BasicDBObject("x", new BasicDBObject("$not", pattern)); - assertEquals(expected, query); - - query = QueryBuilder.start("x").not().regex(pattern).and("y").is("foo").get(); - expected = new BasicDBObject("x", new BasicDBObject("$not", pattern)).append("y", "foo"); - assertEquals(expected, query); - - query = QueryBuilder.start("x").not().greaterThan(2).get(); - expected = new BasicDBObject("x", new BasicDBObject("$not", new BasicDBObject("$gt", 2))); - assertEquals(expected, query); - - query = QueryBuilder.start("x").not().greaterThan(2).and("y").is("foo").get(); - expected = new BasicDBObject("x", new BasicDBObject("$not", new BasicDBObject("$gt", 2))).append("y", "foo"); - assertEquals(expected, query); - - - query = QueryBuilder.start("x").not().greaterThan(2).lessThan(0).get(); - expected = new BasicDBObject("x", new BasicDBObject("$not", new BasicDBObject("$gt", 2).append("$lt", 0))); - assertEquals(expected, query); - - } - - @Test - public void greaterThanTest() { - String key = "x"; - DBCollection collection = _testDB.getCollection("gt-test"); - saveTestDocument(collection, key, 0); - - DBObject queryTrue = QueryBuilder.start(key).greaterThan(-1).get(); - assertTrue(testQuery(collection, queryTrue)); - - DBObject queryFalse = QueryBuilder.start(key).greaterThan(0).get(); - assertFalse(testQuery(collection, queryFalse)); - } - - @Test - public void greaterThanEqualsTest() { - String key = "x"; - DBCollection collection = _testDB.getCollection("gte-test"); - saveTestDocument(collection, key, 0); - - DBObject queryTrue = QueryBuilder.start(key).greaterThanEquals(0).get(); - assertTrue(testQuery(collection, queryTrue)); - - DBObject queryTrue2 = QueryBuilder.start(key).greaterThanEquals(-1).get(); - assertTrue(testQuery(collection, queryTrue2)); - - DBObject queryFalse = QueryBuilder.start(key).greaterThanEquals(1).get(); - assertFalse(testQuery(collection, queryFalse)); - - } - - @Test - public void lessThanTest() { - String key = "x"; - DBCollection collection = _testDB.getCollection("lt-test"); - saveTestDocument(collection, key, 0); - - DBObject queryTrue = QueryBuilder.start(key).lessThan(1).get(); - assertTrue(testQuery(collection, queryTrue)); - - DBObject queryFalse = QueryBuilder.start(key).lessThan(0).get(); - assertFalse(testQuery(collection, queryFalse)); - - } - - @Test - public void lessThanEqualsTest() { - String key = "x"; - DBCollection collection = _testDB.getCollection("lte-test"); - saveTestDocument(collection, key, 0); - - DBObject queryTrue = QueryBuilder.start(key).lessThanEquals(1).get(); - assertTrue(testQuery(collection, queryTrue)); - - DBObject queryTrue2 = QueryBuilder.start(key).lessThanEquals(0).get(); - assertTrue(testQuery(collection, queryTrue2)); - - DBObject queryFalse = QueryBuilder.start(key).lessThanEquals(-1).get(); - assertFalse(testQuery(collection, queryFalse)); - } - - @Test - public void isTest() { - String key = "x"; - DBCollection collection = _testDB.getCollection("is-test"); - saveTestDocument(collection, key, "test"); - - DBObject queryTrue = QueryBuilder.start(key).is("test").get(); - assertTrue(testQuery(collection, queryTrue)); - - DBObject queryFalse = QueryBuilder.start(key).is("test1").get(); - assertFalse(testQuery(collection, queryFalse)); - } - - @Test - public void notEqualsTest() { - String key = "x"; - DBCollection collection = _testDB.getCollection("ne-test"); - saveTestDocument(collection, key, "test"); - - DBObject queryTrue = QueryBuilder.start(key).notEquals("test1").get(); - assertTrue(testQuery(collection, queryTrue)); - - DBObject queryFalse = QueryBuilder.start(key).notEquals("test").get(); - assertFalse(testQuery(collection, queryFalse)); - - } - - @Test - public void inTest() { - String key = "x"; - DBCollection collection = _testDB.getCollection("in-test"); - saveTestDocument(collection, key, 1); - - DBObject queryTrue = QueryBuilder.start(key).in(Arrays.asList(1, 2, 3)).get(); - assertTrue(testQuery(collection, queryTrue)); - - DBObject queryFalse = QueryBuilder.start(key).in(Arrays.asList(2, 3, 4)).get(); - assertFalse(testQuery(collection, queryFalse)); - } - - @Test - public void notInTest() { - String key = "x"; - DBCollection collection = _testDB.getCollection("nin-test"); - saveTestDocument(collection, key, 1); - - DBObject queryTrue = QueryBuilder.start(key).notIn(Arrays.asList(2, 3, 4)).get(); - assertTrue(testQuery(collection, queryTrue)); - - DBObject queryFalse = QueryBuilder.start(key).notIn(Arrays.asList(1, 2, 3)).get(); - assertFalse(testQuery(collection, queryFalse)); - } - - @Test - public void modTest() { - String key = "x"; - DBCollection collection = _testDB.getCollection("mod-test"); - saveTestDocument(collection, key, 9); - - DBObject queryTrue = QueryBuilder.start(key).mod(Arrays.asList(2, 1)).get(); - assertTrue(testQuery(collection, queryTrue)); - - DBObject queryFalse = QueryBuilder.start(key).mod(Arrays.asList(2, 0)).get(); - assertFalse(testQuery(collection, queryFalse)); - } - - @Test - public void allTest() { - String key = "x"; - DBCollection collection = _testDB.getCollection("all-test"); - saveTestDocument(collection, key, Arrays.asList(1, 2, 3)); - - DBObject query = QueryBuilder.start(key).all(Arrays.asList(1, 2, 3)).get(); - assertTrue(testQuery(collection, query)); - - DBObject queryFalse = QueryBuilder.start(key).all(Arrays.asList(2, 3, 4)).get(); - assertFalse(testQuery(collection, queryFalse)); - } - - @Test - public void sizeTest() { - String key = "x"; - DBCollection collection = _testDB.getCollection("size-test"); - saveTestDocument(collection, key, Arrays.asList(1, 2, 3)); - - DBObject queryTrue = QueryBuilder.start(key).size(3).get(); - assertTrue(testQuery(collection, queryTrue)); - - DBObject queryFalse = QueryBuilder.start(key).size(4).get(); - assertFalse(testQuery(collection, queryFalse)); - - DBObject queryFalse2 = QueryBuilder.start(key).size(2).get(); - assertFalse(testQuery(collection, queryFalse2)); - } - - @Test - public void existsTest() { - String key = "x"; - DBCollection collection = _testDB.getCollection("exists-test"); - saveTestDocument(collection, key, "test"); - - DBObject queryTrue = QueryBuilder.start(key).exists(true).get(); - assertTrue(testQuery(collection, queryTrue)); - - DBObject queryFalse = QueryBuilder.start(key).exists(false).get(); - assertFalse(testQuery(collection, queryFalse)); - } - - @Test - public void regexTest() { - String key = "x"; - DBCollection collection = _testDB.getCollection("regex-test"); - saveTestDocument(collection, key, "test"); - - DBObject queryTrue = QueryBuilder.start(key).regex(Pattern.compile("\\w*")).get(); - assertTrue(testQuery(collection, queryTrue)); - } - - @Test - public void rangeChainTest() { - String key = "x"; - DBCollection collection = _testDB.getCollection("range-test"); - saveTestDocument(collection, key, 2); - - DBObject queryTrue = QueryBuilder.start(key).greaterThan(0).lessThan(3).get(); - assertTrue(testQuery(collection, queryTrue)); - } - - @Test - public void compoundChainTest() { - String key = "x"; - String key2 = "y"; - String value = key; - DBCollection collection = _testDB.getCollection("compound-test"); - DBObject testDocument = new BasicDBObject(); - testDocument.put(key, value); - testDocument.put(key2, 9); - collection.save(testDocument); - - DBObject queryTrue = QueryBuilder.start(key).is(value).and(key2).mod(Arrays.asList(2,1)).get(); - assertTrue(testQuery(collection, queryTrue)); - } - - @Test - public void arrayChainTest() { - String key = "x"; - DBCollection collection = _testDB.getCollection("array-test"); - saveTestDocument(collection, key, Arrays.asList(1, 2, 3)); - - DBObject queryTrue = QueryBuilder.start(key).all(Arrays.asList(1,2,3)).size(3).get(); - assertTrue(testQuery(collection, queryTrue)); - } - - @Test - public void nearTest() { - String key = "loc"; - DBCollection collection = _testDB.getCollection("geoSpatial-test"); - BasicDBObject geoSpatialIndex = new BasicDBObject(); - geoSpatialIndex.put(key, "2d"); - collection.ensureIndex(geoSpatialIndex); - - Double[] coordinates = {(double) 50, (double) 30}; - saveTestDocument(collection, key, coordinates); - - DBObject queryTrue = QueryBuilder.start(key).near(45, 45).get(); - assertTrue(testQuery(collection, queryTrue)); - - queryTrue = QueryBuilder.start(key).near(45, 45, 16).get(); - assertTrue(testQuery(collection, queryTrue)); - - queryTrue = QueryBuilder.start(key).nearSphere(45, 45).get(); - assertTrue(testQuery(collection, queryTrue)); - - queryTrue = QueryBuilder.start(key).nearSphere(45, 45, 0.5).get(); - assertTrue(testQuery(collection, queryTrue)); - - queryTrue = QueryBuilder.start(key).withinCenterSphere(50, 30, 0.5).get(); - assertTrue(testQuery(collection, queryTrue)); - - if (serverIsAtLeastVersion(1.9)) { - ArrayList points = new ArrayList(); - points.add( new Double[] { (double)30, (double)30 }); - points.add( new Double[] { (double)70, (double)30 }); - points.add( new Double[] { (double)70, (double)30 }); - queryTrue = QueryBuilder.start(key).withinPolygon(points).get(); - assertTrue(testQuery(collection, queryTrue)); - } - - try{ - QueryBuilder.start(key).withinPolygon(null); - fail("IllegalArgumentException should have been thrown"); - }catch(IllegalArgumentException e) {} - - try{ - QueryBuilder.start(key).withinPolygon(new ArrayList()); - fail("IllegalArgumentException should have been thrown"); - }catch(IllegalArgumentException e) {} - - try{ - ArrayList tooFew = new ArrayList(); - tooFew.add( new Double[] { (double)30, (double)30 }); - QueryBuilder.start(key).withinPolygon(tooFew); - fail("IllegalArgumentException should have been thrown"); - }catch(IllegalArgumentException e) {} - } - - @Test - public void failureTest() { - boolean thrown = false; - try { - QueryBuilder.start("x").get(); - } catch(QueryBuilderException e) { - thrown = true; - } - assertTrue(thrown); - - boolean thrown2 = false; - try { - QueryBuilder.start("x").exists(true).and("y").get(); - } catch(QueryBuilderException e) { - thrown2 = true; - } - assertTrue(thrown2); - - boolean thrown3 = false; - try { - QueryBuilder.start("x").and("y").get(); - } catch(QueryBuilderException e) { - thrown3 = true; - } - assertTrue(thrown3); - } - - @Test - public void testOr() { - DBCollection c = _testDB.getCollection( "or1" ); - c.drop(); - c.insert( new BasicDBObject( "a" , 1 ) ); - c.insert( new BasicDBObject( "b" , 1 ) ); - - DBObject q = QueryBuilder.start() - .or( new BasicDBObject( "a" , 1 ) , - new BasicDBObject( "b" , 1 ) ) - .get(); - - assertEquals( 2 , c.find( q ).itcount() ); - } - - @Test - public void testAnd() { - if (!serverIsAtLeastVersion(2.0)) { - return; - } - - DBCollection c = _testDB.getCollection( "and1" ); - c.drop(); - c.insert( new BasicDBObject( "a" , 1 ).append( "b" , 1) ); - c.insert( new BasicDBObject( "b" , 1 ) ); - - DBObject q = QueryBuilder.start() - .and( new BasicDBObject( "a" , 1 ) , - new BasicDBObject( "b" , 1 ) ) - .get(); - - assertEquals( 1 , c.find( q ).itcount() ); - } - - @Test - public void testMultipleAnd() { - if (!serverIsAtLeastVersion(2.0)) { - return; - } - - DBCollection c = _testDB.getCollection( "and1" ); - c.drop(); - c.insert( new BasicDBObject( "a" , 1 ).append( "b" , 1) ); - c.insert( new BasicDBObject( "b" , 1 ) ); - - DBObject q = QueryBuilder.start() - .and( new BasicDBObject( "a" , 1 ) , - new BasicDBObject( "b" , 1 ) ) - .get(); - - assertEquals( 1 , c.find( q ).itcount() ); - } - - /** - * Convenience method that - * creates a new MongoDB Document with a key-value pair and saves it inside the specified collection - * @param collection Collection to save the new document to - * @param key key of the field to be inserted to the new document - * @param value value of the field to be inserted to the new document - */ - private void saveTestDocument(DBCollection collection, String key, Object value) { - DBObject testDocument = new BasicDBObject(); - testDocument.put(key, value); - collection.save(testDocument); - } - - private boolean testQuery(DBCollection collection, DBObject query) { - DBCursor cursor = collection.find(query); - return cursor.hasNext(); - } -} diff --git a/src/test/com/mongodb/QueryOpTest.java b/src/test/com/mongodb/QueryOpTest.java deleted file mode 100644 index d5910481259..00000000000 --- a/src/test/com/mongodb/QueryOpTest.java +++ /dev/null @@ -1,83 +0,0 @@ -package com.mongodb; - -import com.mongodb.util.TestCase; -import org.testng.annotations.*; - -/** - * Test that QueryOpBuilder creates query operations that abide by - * - * @author stevebriskin - */ -public class QueryOpTest extends TestCase { - - @Test - public void testQueryOnly() { - DBObject query = QueryBuilder.start("x").greaterThan(1).get(); - DBObject obj = new QueryOpBuilder().addQuery(QueryBuilder.start("x").greaterThan(1).get()).get(); - assertEquals(query, obj); - - assertNotNull(new QueryOpBuilder().get()); - - } - - @Test - public void testQueryAndOthers() { - DBObject query = QueryBuilder.start("x").greaterThan(1).get(); - DBObject orderBy = new BasicDBObject("x", 1); - DBObject hintObj = new BasicDBObject("x_i", 1); - String hintStr = "y_i"; - - - DBObject queryOp = new QueryOpBuilder().addQuery(query).addOrderBy(orderBy).addHint(hintStr).get(); - assertEquals(queryOp.get("$query"), query); - assertEquals(queryOp.get("$orderby"), orderBy); - assertEquals(queryOp.get("$hint"), hintStr); - assertNull(queryOp.get("$explain")); - assertNull(queryOp.get("$snapshot")); - - //orderby should only be there if added - queryOp = new QueryOpBuilder().addQuery(query).addHint(hintStr).get(); - assertEquals(queryOp.get("$query"), query); - assertNull(queryOp.get("$orderby")); - - //hintObj takes precedence over hintStr - queryOp = new QueryOpBuilder().addQuery(query).addOrderBy(orderBy).addHint(hintStr).addHint(hintObj).get(); - assertEquals(queryOp.get("$query"), query); - assertEquals(queryOp.get("$orderby"), orderBy); - assertEquals(queryOp.get("$hint"), hintObj); - - queryOp = new QueryOpBuilder().addQuery(query).addExplain(true).addSnapshot(true).get(); - assertEquals(queryOp.get("$query"), query); - assertNull(queryOp.get("$orderby")); - assertNull(queryOp.get("$hint")); - assertEquals(queryOp.get("$explain"), true); - assertEquals(queryOp.get("$snapshot"), true); - - queryOp = new QueryOpBuilder().addQuery(query).addSpecialFields(new BasicDBObject("flag", "val")).get(); - assertEquals(queryOp.get("flag"), "val"); - assertEquals(queryOp.get("$query"), query); - assertNull(queryOp.get("$orderby")); - assertNull(queryOp.get("$hint")); - assertNull(queryOp.get("$explain")); - assertNull(queryOp.get("$snapshot")); - - - // only append $readPreference if the read preference is not ReadPreference.primary() - - queryOp = new QueryOpBuilder().addQuery(query).addReadPreference(ReadPreference.primary()).get(); - assertEquals(queryOp.get("$query"), query); - assertNull(queryOp.get("$orderby")); - assertNull(queryOp.get("$hint")); - assertNull(queryOp.get("$explain")); - assertNull(queryOp.get("$snapshot")); - assertNull(queryOp.get("$readPreference")); - - queryOp = new QueryOpBuilder().addQuery(query).addReadPreference(ReadPreference.secondary()).get(); - assertEquals(queryOp.get("$query"), query); - assertNull(queryOp.get("$orderby")); - assertNull(queryOp.get("$hint")); - assertNull(queryOp.get("$explain")); - assertNull(queryOp.get("$snapshot")); - assertEquals(ReadPreference.secondary().toDBObject(), queryOp.get("$readPreference")); - } -} diff --git a/src/test/com/mongodb/ReadPreferenceGetNodeTest.java b/src/test/com/mongodb/ReadPreferenceGetNodeTest.java deleted file mode 100644 index 64e8219a96e..00000000000 --- a/src/test/com/mongodb/ReadPreferenceGetNodeTest.java +++ /dev/null @@ -1,287 +0,0 @@ -/* - * Copyright (c) 2008 - 2012 10gen, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package com.mongodb; - -import com.mongodb.util.TestCase; -import org.testng.annotations.BeforeMethod; -import org.testng.annotations.Test; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.HashSet; -import java.util.LinkedHashMap; -import java.util.List; -import java.util.Random; -import java.util.Set; - -public class ReadPreferenceGetNodeTest extends TestCase { - private static final String setName = "test"; - private static final boolean _isMaster = true; - private static final boolean _isSecondary = true; - private static final boolean _isOK = true; - - private static final float acceptableLatencyMS = 15; - - private final ReplicaSetStatus.ReplicaSetNode _primary, _secondary1, _secondary2, _secondary3, _recovering1; - private final ReplicaSetStatus.ReplicaSet _set; - private final ReplicaSetStatus.ReplicaSet _setNoSecondary; - private final ReplicaSetStatus.ReplicaSet _setNoPrimary; - private final ReplicaSetStatus.ReplicaSet _emptySet; - - private Set expectedNodeSet; - private Set nodeSet; - - public ReadPreferenceGetNodeTest() throws IOException, MongoException { - Set names = new HashSet(); - - LinkedHashMap tagSetPrimary = new LinkedHashMap(); - tagSetPrimary.put("dc", "ny"); - - LinkedHashMap tagSet = new LinkedHashMap(); - tagSet.put("dc", "ny"); - tagSet.put("rack", "1"); - - names.clear(); - names.add("primary"); - _primary = new ReplicaSetStatus.ReplicaSetNode(new ServerAddress("127.0.0.1", 27017), names, setName, 50f, _isOK, _isMaster, !_isSecondary, tagSetPrimary, Bytes.MAX_OBJECT_SIZE); - - names.clear(); - names.add("secondary1"); - _secondary1 = new ReplicaSetStatus.ReplicaSetNode(new ServerAddress("127.0.0.1", 27018), names, setName, 60f, _isOK, !_isMaster, _isSecondary, tagSet, Bytes.MAX_OBJECT_SIZE); - - names.clear(); - names.add("secondary2"); - _secondary2 = new ReplicaSetStatus.ReplicaSetNode(new ServerAddress("127.0.0.1", 27019), names, setName, 66f, _isOK, !_isMaster, _isSecondary, tagSet, Bytes.MAX_OBJECT_SIZE); - - names.clear(); - names.add("secondary3"); - _secondary3 = new ReplicaSetStatus.ReplicaSetNode(new ServerAddress("127.0.0.1", 27019), names, setName, 76f, _isOK, !_isMaster, _isSecondary, tagSet, Bytes.MAX_OBJECT_SIZE); - - names.clear(); - names.add("recovering1"); - _recovering1 = new ReplicaSetStatus.ReplicaSetNode(new ServerAddress("127.0.0.1", 27020), names, setName, 10f, _isOK, !_isMaster, !_isSecondary, tagSet, Bytes.MAX_OBJECT_SIZE); - - List nodeList = new ArrayList(); - nodeList.add(_primary); - nodeList.add(_secondary1); - nodeList.add(_secondary2); - nodeList.add(_secondary3); - nodeList.add(_recovering1); - - _set = new ReplicaSetStatus.ReplicaSet(nodeList, new Random(), (int) acceptableLatencyMS); - _setNoSecondary = new ReplicaSetStatus.ReplicaSet(Arrays.asList(_primary, _recovering1), new Random(), (int) acceptableLatencyMS); - _setNoPrimary = new ReplicaSetStatus.ReplicaSet(Arrays.asList(_secondary1, _secondary2, _secondary3, _recovering1), new Random(), (int) acceptableLatencyMS); - _emptySet = new ReplicaSetStatus.ReplicaSet(new ArrayList(), new Random(), (int) acceptableLatencyMS); - } - - @BeforeMethod - public void setUp() { - expectedNodeSet = new HashSet(); - nodeSet = new HashSet(); - } - - @Test - public void testNearest() { - for (int i = 0; i < 1000; i++) { - nodeSet.add(ReadPreference.nearest().getNode(_set)); - } - - expectedNodeSet.addAll(Arrays.asList(_primary, _secondary1)); - assertEquals(expectedNodeSet, nodeSet); - } - - @Test - public void testTaggedNearest() { - final TaggableReadPreference taggedNearestReadPreference = ReadPreference.nearest(new BasicDBObject("dc", "ny")); - for (int i = 0; i < 1000; i++) { - nodeSet.add(taggedNearestReadPreference.getNode(_set)); - } - - expectedNodeSet.addAll(Arrays.asList(_primary, _secondary1)); - assertEquals(expectedNodeSet, nodeSet); - } - - @Test - public void testSecondaryPreferredWithSecondary() { - for (int i = 0; i < 1000; i++) { - nodeSet.add(ReadPreference.secondaryPreferred().getNode(_set)); - } - - expectedNodeSet.addAll(Arrays.asList(_secondary1, _secondary2)); - assertEquals(expectedNodeSet, nodeSet); - } - - @Test - public void testSecondaryPreferredWithNoSecondary() { - for (int i = 0; i < 1000; i++) { - nodeSet.add(ReadPreference.secondaryPreferred().getNode(_setNoSecondary)); - } - - expectedNodeSet.addAll(Arrays.asList(_primary)); - assertEquals(expectedNodeSet, nodeSet); - } - - @Test - public void testSecondaryPreferredWithNoPrimaryOrSecondary() { - assertNull(ReadPreference.secondaryPreferred().getNode(_emptySet)); - } - - @Test - public void testTaggedSecondaryPreferredWithSecondary() { - final TaggableReadPreference readPreference = ReadPreference.secondaryPreferred(new BasicDBObject("dc", "ny")); - - for (int i = 0; i < 1000; i++) { - nodeSet.add(readPreference.getNode(_set)); - } - - expectedNodeSet.addAll(Arrays.asList(_secondary1, _secondary2)); - assertEquals(expectedNodeSet, nodeSet); - } - - @Test - public void testTaggedSecondaryPreferredWithNoSecondary() { - final TaggableReadPreference readPreference = ReadPreference.secondaryPreferred(new BasicDBObject("dc", "ny")); - for (int i = 0; i < 1000; i++) { - nodeSet.add(readPreference.getNode(_setNoSecondary)); - } - - expectedNodeSet.addAll(Arrays.asList(_primary)); - assertEquals(expectedNodeSet, nodeSet); - } - - @Test - public void testTaggedSecondaryPreferredWithNoPrimaryOrSecondary() { - final TaggableReadPreference readPreference = ReadPreference.secondaryPreferred(new BasicDBObject("dc", "ny")); - assertNull(readPreference.getNode(_emptySet)); - } - - @Test - public void testTaggedSecondaryPreferredWithNoSecondaryMatch() { - final TaggableReadPreference nonMatchingReadPreference = - ReadPreference.secondaryPreferred(new BasicDBObject("dc", "ca")); - - for (int i = 0; i < 1000; i++) { - nodeSet.add(nonMatchingReadPreference.getNode(_set)); - } - - expectedNodeSet.addAll(Arrays.asList(_primary)); - assertEquals(expectedNodeSet, nodeSet); - } - - @Test - public void testSecondaryWithSecondary() { - for (int i = 0; i < 1000; i++) { - nodeSet.add(ReadPreference.secondary().getNode(_set)); - } - - expectedNodeSet.addAll(Arrays.asList(_secondary1, _secondary2)); - assertEquals(expectedNodeSet, nodeSet); - } - - @Test - public void testSecondaryWithNoSecondary() { - assertNull(ReadPreference.secondary().getNode(_setNoSecondary)); - } - - @Test - public void testTaggedSecondaryWithSecondary() { - final TaggableReadPreference taggedSecondaryReadPreference = ReadPreference.secondary(new BasicDBObject("dc", "ny")); - for (int i = 0; i < 1000; i++) { - nodeSet.add(taggedSecondaryReadPreference.getNode(_set)); - } - - expectedNodeSet.addAll(Arrays.asList(_secondary1, _secondary2)); - assertEquals(expectedNodeSet, nodeSet); - } - - @Test - public void testTaggedSecondaryWithNoSecondary() { - final TaggableReadPreference taggedSecondaryReadPreference = ReadPreference.secondary(new BasicDBObject("dc", "ny")); - assertNull(taggedSecondaryReadPreference.getNode(_setNoSecondary)); - } - - @Test - public void testPrimaryWithPrimary() { - for (int i = 0; i < 1000; i++) { - nodeSet.add(ReadPreference.primary().getNode(_set)); - } - - expectedNodeSet.addAll(Arrays.asList(_primary)); - assertEquals(expectedNodeSet, nodeSet); - } - - @Test - public void testPrimaryWithNoPrimary() { - assertNull(ReadPreference.primary().getNode(_setNoPrimary)); - } - - @Test - public void testPrimaryPreferredWithPrimary() { - for (int i = 0; i < 1000; i++) { - nodeSet.add(ReadPreference.primaryPreferred().getNode(_set)); - } - - expectedNodeSet.addAll(Arrays.asList(_primary)); - assertEquals(expectedNodeSet, nodeSet); - } - - @Test - public void testPrimaryPreferredWithNoPrimary() { - for (int i = 0; i < 1000; i++) { - nodeSet.add(ReadPreference.primaryPreferred().getNode(_setNoPrimary)); - } - - expectedNodeSet.addAll(Arrays.asList(_secondary1, _secondary2)); - assertEquals(expectedNodeSet, nodeSet); - } - - @Test - public void testTaggedPrimaryPreferredWithPrimary() { - final TaggableReadPreference readPreference = ReadPreference.primaryPreferred(new BasicDBObject("dc", "ny")); - for (int i = 0; i < 1000; i++) { - nodeSet.add(readPreference.getNode(_set)); - } - - expectedNodeSet.addAll(Arrays.asList(_primary)); - assertEquals(expectedNodeSet, nodeSet); - } - - @Test - public void testTaggedPrimaryPreferredWithNoPrimary() { - final TaggableReadPreference readPreference = ReadPreference.primaryPreferred(new BasicDBObject("dc", "ny")); - for (int i = 0; i < 1000; i++) { - nodeSet.add(readPreference.getNode(_setNoPrimary)); - } - - expectedNodeSet.addAll(Arrays.asList(_secondary1, _secondary2)); - assertEquals(expectedNodeSet, nodeSet); - } - - @Test - @SuppressWarnings("deprecation") - public void testTaggedPreference() { - ReadPreference readPreference = new ReadPreference.TaggedReadPreference(new BasicDBObject("dc", "ny")); - for (int i = 0; i < 1000; i++) { - nodeSet.add(readPreference.getNode(_set)); - } - - expectedNodeSet.addAll(Arrays.asList(_secondary1, _secondary2)); - assertEquals(expectedNodeSet, nodeSet); - } -} \ No newline at end of file diff --git a/src/test/com/mongodb/ReadPreferenceTest.java b/src/test/com/mongodb/ReadPreferenceTest.java deleted file mode 100644 index 520d515a653..00000000000 --- a/src/test/com/mongodb/ReadPreferenceTest.java +++ /dev/null @@ -1,92 +0,0 @@ -package com.mongodb; - -import com.mongodb.util.TestCase; -import org.testng.annotations.Test; - -import java.util.Arrays; - -public class ReadPreferenceTest extends TestCase { - @Test - @SuppressWarnings("deprecation") - public void testDeprecatedStaticMembers() { - assertSame(ReadPreference.primary(), ReadPreference.PRIMARY); - assertSame(ReadPreference.secondaryPreferred(), ReadPreference.SECONDARY); - } - - @Test - public void testToString() { - assertEquals("{ \"mode\" : \"primary\"}", ReadPreference.primary().toDBObject().toString()); - assertEquals("{ \"mode\" : \"secondaryPreferred\"}", ReadPreference.secondaryPreferred().toDBObject().toString()); - assertEquals("{ \"mode\" : \"nearest\"}", ReadPreference.nearest().toDBObject().toString()); - } - - @Test - public void testSecondaryReadPreference() { - final BasicDBObject asDBObject = new BasicDBObject("mode", "secondary"); - assertEquals(asDBObject, ReadPreference.secondary().toDBObject()); - - assertEquals(asDBObject.append("tags", Arrays.asList(new BasicDBObject("tag", "1"))), - ReadPreference.secondary(new BasicDBObject("tag", "1")).toDBObject()); - } - - @Test - public void testPrimaryPreferredMode() { - final BasicDBObject asDBObject = new BasicDBObject("mode", "primaryPreferred"); - assertEquals(asDBObject, ReadPreference.primaryPreferred().toDBObject()); - - assertEquals(asDBObject.append("tags", Arrays.asList(new BasicDBObject("tag", "1"))), - ReadPreference.primaryPreferred(new BasicDBObject("tag", "1")).toDBObject()); - } - - @Test - public void testSecondaryPreferredMode() { - final BasicDBObject asDBObject = new BasicDBObject("mode", "secondaryPreferred"); - assertEquals(asDBObject, ReadPreference.secondaryPreferred().toDBObject()); - - assertEquals(asDBObject.append("tags", Arrays.asList(new BasicDBObject("tag", "1"))), - ReadPreference.secondaryPreferred(new BasicDBObject("tag", "1")).toDBObject()); - - } - - @Test - public void testNearestMode() { - final BasicDBObject asDBObject = new BasicDBObject("mode", "nearest"); - assertEquals(asDBObject, ReadPreference.nearest().toDBObject()); - - assertEquals(asDBObject.append("tags", Arrays.asList(new BasicDBObject("tag", "1"))), - ReadPreference.nearest(new BasicDBObject("tag", "1")).toDBObject()); - - } - - @Test - public void testValueOf() { - assertEquals(ReadPreference.primary(), ReadPreference.valueOf("primary")); - assertEquals(ReadPreference.secondary(), ReadPreference.valueOf("secondary")); - assertEquals(ReadPreference.primaryPreferred(), ReadPreference.valueOf("primaryPreferred")); - assertEquals(ReadPreference.secondaryPreferred(), ReadPreference.valueOf("secondaryPreferred")); - assertEquals(ReadPreference.nearest(), ReadPreference.valueOf("nearest")); - - DBObject first = new BasicDBObject("dy", "ny"); - DBObject remaining = new BasicDBObject(); - assertEquals(ReadPreference.secondary(first, remaining), ReadPreference.valueOf("secondary", first, remaining)); - assertEquals(ReadPreference.primaryPreferred(first, remaining), ReadPreference.valueOf("primaryPreferred", first, remaining)); - assertEquals(ReadPreference.secondaryPreferred(first, remaining), ReadPreference.valueOf("secondaryPreferred", first, remaining)); - assertEquals(ReadPreference.nearest(first, remaining), ReadPreference.valueOf("nearest", first, remaining)); - } - - @Test - public void testGetName() { - assertEquals("primary", ReadPreference.primary()); - assertEquals("secondary", ReadPreference.secondary()); - assertEquals("primaryPreferred", ReadPreference.primaryPreferred()); - assertEquals("secondaryPreferred", ReadPreference.secondaryPreferred()); - assertEquals("nearest", ReadPreference.nearest()); - - DBObject first = new BasicDBObject("dy", "ny"); - DBObject remaining = new BasicDBObject(); - assertEquals(ReadPreference.secondary(first, remaining), ReadPreference.valueOf("secondary", first, remaining)); - assertEquals(ReadPreference.primaryPreferred(first, remaining), ReadPreference.valueOf("primaryPreferred", first, remaining)); - assertEquals(ReadPreference.secondaryPreferred(first, remaining), ReadPreference.valueOf("secondaryPreferred", first, remaining)); - assertEquals(ReadPreference.nearest(first, remaining), ReadPreference.valueOf("nearest", first, remaining)); - } -} diff --git a/src/test/com/mongodb/ReflectionTest.java b/src/test/com/mongodb/ReflectionTest.java deleted file mode 100644 index 5bc32e4b6df..00000000000 --- a/src/test/com/mongodb/ReflectionTest.java +++ /dev/null @@ -1,154 +0,0 @@ -/** - * Copyright (C) 2008 10gen Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.mongodb; - -import com.mongodb.util.TestCase; -import org.testng.annotations.Test; - -public class ReflectionTest extends TestCase { - - public static class Person extends ReflectionDBObject { - - public Person(){ - } - - public String getName(){ - return _name; - } - - public void setName(String name){ - _name = name; - } - - String _name; - } - - public ReflectionTest() { - cleanupDB = "com_mongodb_unittest_ReflectionTest"; - _db = cleanupMongo.getDB( cleanupDB ); - } - - @Test - public void test1() - throws MongoException { - DBCollection c = _db.getCollection( "person.test1" ); - c.drop(); - c.setObjectClass( Person.class ); - - Person p = new Person(); - p.setName( "eliot" ); - c.save( p ); - - DBObject out = c.findOne(); - assertEquals( "eliot" , out.get( "Name" ) ); - assertEquals(Person.class, out.getClass()); - } - - public static class Outer extends ReflectionDBObject { - private Inner mInclude; - private String mName; - - public void setName(final String pName) { mName = pName; } - public String getName() { return mName; } - - public Inner getInner() { return mInclude; } - public void setInner(final Inner pV) { mInclude = pV; } - } - - public static class Inner extends ReflectionDBObject { - - public int mNumber; - - public Inner(){} - public Inner( int n ){ mNumber = n; } - - public int getNumber() { return mNumber; } - public void setNumber(final int pV) { mNumber = pV; } - } - - @Test - public void test2() - throws MongoException { - - DBCollection c = _db.getCollection( "embedref1" ); - c.drop(); - c.setObjectClass( Outer.class ); - - Outer o = new Outer(); - o.setName( "eliot" ); - o.setInner( new Inner( 17 ) ); - - c.save( o ); - - DBObject out = c.findOne(); - assertEquals( "eliot" , out.get( "Name" ) ); - assertEquals(Outer.class, out.getClass()); - o = (Outer)out; - assertEquals( "eliot" , o.getName() ); - assertEquals( 17 , o.getInner().getNumber() ); - } - - static class Process extends ReflectionDBObject { - - public Process() {} - - public String getName() { - return name; - } - - public void setName(String name) { - this.name = name; - } - - public int getStatus() { - return status; - } - - public void setStatus(int status) { - this.status = status; - } - - String name; - int status; - } - - @Test - public void testFindAndModify() { - DBCollection c = _db.getCollection( "findAndModify" ); - c.drop(); - c.setObjectClass( Process.class ); - - Process p = new Process(); - p.setName("test"); - p.setStatus(0); - c.save(p, WriteConcern.SAFE); - - DBObject obj = c.findAndModify(new BasicDBObject(), new BasicDBObject("$set", new BasicDBObject("status", 1))); - assertEquals(Process.class, obj.getClass()); - Process pModified = (Process) obj; - assertEquals(0, pModified.getStatus()); - assertEquals("test", pModified.getName()); - } - - final DB _db; - - public static void main( String args[] ) - throws Exception { - (new ReflectionTest()).runConsole(); - } - -} diff --git a/src/test/com/mongodb/ReplPairTest.java b/src/test/com/mongodb/ReplPairTest.java deleted file mode 100644 index 4a4a10e9d09..00000000000 --- a/src/test/com/mongodb/ReplPairTest.java +++ /dev/null @@ -1,86 +0,0 @@ -// ReplPairTest.java - -/** - * Copyright (C) 2008 10gen Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.mongodb; - -import java.util.*; - -public class ReplPairTest { - - static class R extends Thread { - @SuppressWarnings("deprecation") - R( ServerAddress a ){ - _a = a; - _mongo = new MongoClient(a); - _db = _mongo.getDB( "test" ); - _coll = _db.getCollection( "foo" ); - - _coll.slaveOk(); - } - - public void run(){ - while ( true ){ - try { - Thread.sleep( 500 ); - _coll.findOne(); - } - catch ( NullPointerException n ){ - n.printStackTrace(); - } - catch ( Exception e ){ - System.out.println( _a + "\t" + e ); - } - } - } - - final ServerAddress _a; - final Mongo _mongo; - final DB _db; - final DBCollection _coll; - } - - @SuppressWarnings("deprecation") - public static void main( String args[] ) - throws Exception { - - List addrs = new ArrayList(); - addrs.add( new ServerAddress( "localhost" , 9998 ) ); - addrs.add( new ServerAddress( "localhost" , 9999 ) ); - - Mongo m = new Mongo ( addrs ); - DB db = m.getDB( "test" ); - DBCollection c = db.getCollection( "foo" ); - c.insert( new BasicDBObject( "_id" , 17 ) ); - c.slaveOk(); - - for ( ServerAddress a : addrs ){ - new R(a).start(); - } - - while ( true ){ - Thread.sleep( 500 ); - try { - System.out.println( c.findOne() ); - c.update( new BasicDBObject( "_id" , 17 ) , new BasicDBObject( "$inc" , new BasicDBObject( "x" , 1 ) ) ); - } - catch ( Exception e ){ - e.printStackTrace(); - } - } - } -} diff --git a/src/test/com/mongodb/ReplSetTest.java b/src/test/com/mongodb/ReplSetTest.java deleted file mode 100644 index 6529faa07b7..00000000000 --- a/src/test/com/mongodb/ReplSetTest.java +++ /dev/null @@ -1,102 +0,0 @@ -// ReplSetTest.java - -/** - * Copyright (C) 2008 10gen Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.mongodb; - -import java.util.*; - -public class ReplSetTest { - - static void _sleep() - throws InterruptedException { - //Thread.sleep( 500 ); - } - - static class R extends Thread { - @SuppressWarnings("deprecation") - R( ServerAddress a ){ - _a = a; - _mongo = new MongoClient(a); - _db = _mongo.getDB( "test" ); - _coll = _db.getCollection( "foo" ); - - _coll.slaveOk(); - } - - public void run(){ - while ( true ){ - try { - _sleep(); - _coll.findOne(); - } - catch ( NullPointerException n ){ - n.printStackTrace(); - } - catch ( Exception e ){ - System.out.println( _a + "\t" + e ); - } - } - } - - final ServerAddress _a; - final Mongo _mongo; - final DB _db; - final DBCollection _coll; - } - - @SuppressWarnings("deprecation") - public static void main( String args[] ) - throws Exception { - - boolean rs = true; - - List addrs = new ArrayList(); - if ( rs ){ - addrs.add( new ServerAddress( "localhost" , 27018 ) ); - addrs.add( new ServerAddress( "localhost" , 27019 ) ); - addrs.add( new ServerAddress( "localhost" , 27020 ) ); - addrs.add( new ServerAddress( "localhost" , 27021 ) ); - } - - Mongo m = rs ? new MongoClient( addrs ) : new MongoClient(); - DB db = m.getDB( "test" ); - DBCollection c = db.getCollection( "foo" ); - c.drop(); - c.insert( new BasicDBObject( "_id" , 17 ) ); - c.slaveOk(); - - for ( ServerAddress a : addrs ){ - new R(a).start(); - } - - while ( true ){ - _sleep(); - try { - DBObject x = c.findOne(new BasicDBObject( "_id", 17 ), null); - System.out.println( x ); - Integer n = (Integer) x.get( "x" ); - if (n != null && n >= 150 ) - break; - c.update( new BasicDBObject( "_id", 17 ), new BasicDBObject( "$inc", new BasicDBObject( "x", 1 ) ) ); - } - catch ( Exception e ){ - e.printStackTrace(); - } - } - } -} diff --git a/src/test/com/mongodb/ReplicaSetStatusDomainModelTest.java b/src/test/com/mongodb/ReplicaSetStatusDomainModelTest.java deleted file mode 100644 index 5af33f256ee..00000000000 --- a/src/test/com/mongodb/ReplicaSetStatusDomainModelTest.java +++ /dev/null @@ -1,264 +0,0 @@ -package com.mongodb; - -import com.mongodb.ReplicaSetStatus.ReplicaSetNode; -import com.mongodb.util.TestCase; -import org.testng.Assert; -import org.testng.annotations.Test; - -import java.net.UnknownHostException; -import java.util.*; -import java.util.concurrent.atomic.AtomicInteger; -import java.util.concurrent.atomic.AtomicReference; -import java.util.logging.Logger; - -import static com.mongodb.ConnectionStatus.UpdatableNode.ConnectionState.Connected; - -/** - * Copyright (c) 2008 - 2011 10gen, Inc. - *

          - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

          - * http://www.apache.org/licenses/LICENSE-2.0 - *

          - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -public class ReplicaSetStatusDomainModelTest extends TestCase { - - @Test - public void testNode() throws UnknownHostException { - // test constructor - ServerAddress addr = new ServerAddress("127.0.0.1"); - Set names = new HashSet(); - names.add("1"); - float pingTime = 10; - boolean ok = true; - boolean isMaster = true; - boolean isSecondary = false; - int maxBsonObjectSize = Bytes.MAX_OBJECT_SIZE * 4; - LinkedHashMap tags = new LinkedHashMap(); - tags.put("foo", "1"); - tags.put("bar", "2"); - ReplicaSetStatus.ReplicaSetNode n = new ReplicaSetStatus.ReplicaSetNode(addr, names, "", pingTime, ok, isMaster, isSecondary, tags, - maxBsonObjectSize); - assertTrue(n.isOk()); - assertTrue(n.master()); - assertFalse(n.secondary()); - assertEquals(addr, n.getServerAddress()); - assertEquals(names, n.getNames()); - Set tagSet = new HashSet(); - tagSet.add(new ReplicaSetStatus.Tag("foo", "1")); - tagSet.add(new ReplicaSetStatus.Tag("bar", "2")); - assertEquals(tagSet, n.getTags()); - assertEquals(maxBsonObjectSize, n.getMaxBsonObjectSize()); - - // assert that collections are not modifiable - try { - n.getTags().clear(); - Assert.fail(); - } catch (UnsupportedOperationException e) { - // expected - } - try { - n.getNames().clear(); - Assert.fail(); - } catch (UnsupportedOperationException e) { - // expected - } - } - - @Test - public void testReplicaSet() throws Exception { - - List updatableNodes = new ArrayList(); - List nodes = new ArrayList(); - - final Random random = new Random(); - - - LinkedHashMap emptyTagMap = new LinkedHashMap(); - LinkedHashMap aTag = new LinkedHashMap(); - aTag.put("foo", "1"); - - LinkedHashMap anotherTag = new LinkedHashMap(); - anotherTag.put("bar", "2"); - - LinkedHashMap twoTags = new LinkedHashMap(); - twoTags.putAll(aTag); - twoTags.putAll(anotherTag); - - addNodeToLists("127.0.0.1", false, 10, updatableNodes, nodes, emptyTagMap); - addNodeToLists("127.0.0.2", true, 30, updatableNodes, nodes, emptyTagMap); - addNodeToLists("127.0.0.3", true, 30, updatableNodes, nodes, aTag); - addNodeToLists("127.0.0.4", true, 30, updatableNodes, nodes, anotherTag); - addNodeToLists("127.0.0.5", true, 10, updatableNodes, nodes, anotherTag); - addNodeToLists("127.0.0.6", true, 10, updatableNodes, nodes, aTag); - addNodeToLists("127.0.0.7", true, 10, updatableNodes, nodes, aTag); - addNodeToLists("127.0.0.8", true, 10, updatableNodes, nodes, twoTags); - addNodeToLists("127.0.0.9", true, 10, updatableNodes, nodes, twoTags); - - ReplicaSetStatus.ReplicaSet replicaSet = new ReplicaSetStatus.ReplicaSet(nodes, random, 15); - assertEquals(random, replicaSet.random); - assertEquals(nodes, replicaSet.all); - assertEquals(nodes.get(0), replicaSet.master); - assertTrue(replicaSet.hasMaster()); - - // test getting a secondary - final Map counters = new TreeMap(); - counters.put("127.0.0.5", new AtomicInteger(0)); - counters.put("127.0.0.6", new AtomicInteger(0)); - counters.put("127.0.0.7", new AtomicInteger(0)); - counters.put("127.0.0.8", new AtomicInteger(0)); - counters.put("127.0.0.9", new AtomicInteger(0)); - - for (int idx = 0; idx < 100000; idx++) { - final ServerAddress addr = replicaSet.getASecondary().getServerAddress(); - assertNotNull(addr); - counters.get(addr.getHost()).incrementAndGet(); - } - assertLess(((getHigh(counters) - getLow(counters)) / (double) getHigh(counters)), .05); - - // test getting a secondary by multiple tags - List twoTagsList = new ArrayList(); -// twoTagsList.add(new ReplicaSetStatus.Tag("baz", "3")); - twoTagsList.add(new ReplicaSetStatus.Tag("foo", "1")); - twoTagsList.add(new ReplicaSetStatus.Tag("bar", "2")); - ServerAddress address = replicaSet.getASecondary(twoTagsList).getServerAddress(); - List goodSecondariesByTag = replicaSet.getGoodSecondariesByTags(twoTagsList); - assertEquals(2, goodSecondariesByTag.size()); - assertEquals("127.0.0.8", goodSecondariesByTag.get(0).getServerAddress().getHost()); - assertEquals("127.0.0.9", goodSecondariesByTag.get(1).getServerAddress().getHost()); - - // test randomness of getting a secondary - counters.clear(); - counters.put("127.0.0.6", new AtomicInteger(0)); - counters.put("127.0.0.7", new AtomicInteger(0)); - counters.put("127.0.0.8", new AtomicInteger(0)); - counters.put("127.0.0.9", new AtomicInteger(0)); - - List tags = new ArrayList(); -// tags.add(new ReplicaSetStatus.Tag("baz", "3")); - tags.add(new ReplicaSetStatus.Tag("foo", "1")); -// tags.add(new ReplicaSetStatus.Tag("bar", "2")); - for (int idx = 0; idx < 100000; idx++) { - final ServerAddress addr = replicaSet.getASecondary(tags).getServerAddress(); - assertNotNull(addr); - counters.get(addr.getHost()).incrementAndGet(); - } - assertLess(((getHigh(counters) - getLow(counters)) / (double) getHigh(counters)), .05); - } - - @Test(expectedExceptions = MongoException.class) - public void testMultipleSetNames1() throws Exception { - float acceptableLatencyMS = 15; - float bestPingTime = 50f; - float acceptablePingTime = bestPingTime + (acceptableLatencyMS/2); - - ReplicaSetNode primary = new ReplicaSetNode(new ServerAddress("127.0.0.1", 27017), new HashSet(Arrays.asList("primaries")) , "setName1", acceptablePingTime, true, true, false, new LinkedHashMap(), Bytes.MAX_OBJECT_SIZE ); - ReplicaSetNode secondary1 = new ReplicaSetNode(new ServerAddress("127.0.0.1", 27018), new HashSet(Arrays.asList("secondaries")), "setName2", bestPingTime, true, false, true, new LinkedHashMap(), Bytes.MAX_OBJECT_SIZE ); - ReplicaSetNode secondary2 = new ReplicaSetNode(new ServerAddress("127.0.0.1", 27019), new HashSet(Arrays.asList("secondaries")), "setName2", bestPingTime, true, false, true, new LinkedHashMap(), Bytes.MAX_OBJECT_SIZE ); - - List nodeList = new ArrayList(); - nodeList.add(primary); - nodeList.add(secondary1); - nodeList.add(secondary2); - - ReplicaSetStatus.ReplicaSet set = new ReplicaSetStatus.ReplicaSet(nodeList, (new Random()), (int)acceptableLatencyMS); - set.getMaster(); - } - - @Test(expectedExceptions = MongoException.class) - public void testMultipleSetNames2() throws Exception { - float acceptableLatencyMS = 15; - float bestPingTime = 50f; - float acceptablePingTime = bestPingTime + (acceptableLatencyMS/2); - - //one entry is empty - ReplicaSetNode primary = new ReplicaSetNode(new ServerAddress("127.0.0.1", 27017), new HashSet(Arrays.asList("primaries")) , "setName1", acceptablePingTime, true, true, false, new LinkedHashMap(), Bytes.MAX_OBJECT_SIZE ); - ReplicaSetNode secondary1 = new ReplicaSetNode(new ServerAddress("127.0.0.1", 27018), new HashSet(Arrays.asList("secondaries")), "setName2", bestPingTime, true, false, true, new LinkedHashMap(), Bytes.MAX_OBJECT_SIZE ); - ReplicaSetNode secondary2 = new ReplicaSetNode(new ServerAddress("127.0.0.1", 27019), new HashSet(Arrays.asList("secondaries")), "", bestPingTime, true, false, true, new LinkedHashMap(), Bytes.MAX_OBJECT_SIZE ); - - List nodeList = new ArrayList(); - nodeList.add(primary); - nodeList.add(secondary1); - nodeList.add(secondary2); - - ReplicaSetStatus.ReplicaSet set = new ReplicaSetStatus.ReplicaSet(nodeList, (new Random()), (int)acceptableLatencyMS); - set.getMaster(); - } - - @Test - public void testMultipleSetNames3() throws Exception { - float acceptableLatencyMS = 15; - float bestPingTime = 50f; - float acceptablePingTime = bestPingTime + (acceptableLatencyMS/2); - - ReplicaSetNode primary = new ReplicaSetNode(new ServerAddress("127.0.0.1", 27017), new HashSet(Arrays.asList("primaries")) , "setName1", acceptablePingTime, true, true, false, new LinkedHashMap(), Bytes.MAX_OBJECT_SIZE ); - ReplicaSetNode secondary1 = new ReplicaSetNode(new ServerAddress("127.0.0.1", 27018), new HashSet(Arrays.asList("secondaries")), "", bestPingTime, true, false, true, new LinkedHashMap(), Bytes.MAX_OBJECT_SIZE ); - ReplicaSetNode secondary2 = new ReplicaSetNode(new ServerAddress("127.0.0.1", 27019), new HashSet(Arrays.asList("secondaries")), "", bestPingTime, true, false, true, new LinkedHashMap(), Bytes.MAX_OBJECT_SIZE ); - - List nodeList = new ArrayList(); - nodeList.add(primary); - nodeList.add(secondary1); - nodeList.add(secondary2); - - ReplicaSetStatus.ReplicaSet set = new ReplicaSetStatus.ReplicaSet(nodeList, (new Random()), (int)acceptableLatencyMS); - assertEquals(primary, set.getMaster()); - assertNotNull(set.getASecondary()); - } - - private int getLow(Map counters) { - int low = Integer.MAX_VALUE; - for (final String host : counters.keySet()) { - int cur = counters.get(host).get(); - if (cur < low) { - low = cur; - } - } - return low; - } - - private int getHigh(Map counters) { - int high = 0; - for (final String host : counters.keySet()) { - int cur = counters.get(host).get(); - if (cur > high) { - high = cur; - } - } - return high; - } - - private void addNodeToLists(String address, boolean isSecondary, float pingTime, - List updatableNodes, List nodes, - LinkedHashMap tags) - throws Exception { - - ServerAddress serverAddress = new ServerAddress(address); - ReplicaSetStatus.UpdatableReplicaSetNode updatableNode - = new ReplicaSetStatus.UpdatableReplicaSetNode(serverAddress, updatableNodes, _logger, null, _mongoOptions, _lastPrimarySignal); - updatableNode._connectionState = Connected; - updatableNode._pingTimeMS = pingTime; - updatableNode._isSecondary = isSecondary; - updatableNode._isMaster = !isSecondary; - updatableNode._tags.putAll(tags); - updatableNode._maxBsonObjectSize = Bytes.MAX_OBJECT_SIZE; - - updatableNodes.add(updatableNode); - - nodes.add(new ReplicaSetStatus.ReplicaSetNode(serverAddress, Collections.singleton(serverAddress.toString()), "", pingTime, - true, !isSecondary, isSecondary, tags, Bytes.MAX_OBJECT_SIZE)); - } - - @SuppressWarnings("deprecation") - private final MongoOptions _mongoOptions = new MongoOptions(); - private final AtomicReference _setName = new AtomicReference("test"); - private final AtomicReference _logger = new AtomicReference(Logger.getLogger("test")); - private final AtomicInteger _maxBsonObjectSize = new AtomicInteger(Bytes.MAX_OBJECT_SIZE); - private final AtomicReference _lastPrimarySignal = new AtomicReference("127.0.0.1"); -} diff --git a/src/test/com/mongodb/ReplicaSetStatusTest.java b/src/test/com/mongodb/ReplicaSetStatusTest.java deleted file mode 100644 index 1ea3b63b5db..00000000000 --- a/src/test/com/mongodb/ReplicaSetStatusTest.java +++ /dev/null @@ -1,70 +0,0 @@ -/** - * Copyright (C) 2011 10gen Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.mongodb; - -import com.mongodb.ReplicaSetStatus.ReplicaSetNode; -import org.testng.Assert; -import org.testng.annotations.AfterClass; -import org.testng.annotations.BeforeClass; -import org.testng.annotations.Test; - -import java.net.UnknownHostException; -import java.util.List; - - -/** - * This is a placeholder. A node needs to be able to be created outside of ReplicaSetStatus. - */ -public class ReplicaSetStatusTest { - private Mongo mongoClient; - - @BeforeClass - public void beforeClass() throws UnknownHostException { - mongoClient = new MongoClient(new MongoClientURI("mongodb://127.0.0.1:27017,127.0.0.1:27018")); - } - - @AfterClass - public void afterClass() { - mongoClient.close(); - } - - @Test - public void testClose() throws InterruptedException { - ReplicaSetStatus replicaSetStatus = new ReplicaSetStatus(mongoClient, mongoClient.getAllAddress()); - replicaSetStatus.start(); - Assert.assertNotNull(replicaSetStatus._replicaSetHolder.get()); - - replicaSetStatus.close(); - - replicaSetStatus._updater.join(5000); - - Assert.assertTrue(!replicaSetStatus._updater.isAlive()); - } - - @Test - public void testSetNames() throws Exception { - String replicaSetName = mongoClient.getConnector().getReplicaSetStatus().getName(); - - List nodes = mongoClient.getConnector().getReplicaSetStatus()._replicaSetHolder.get().getAll(); - - for(ReplicaSetNode node : nodes){ - Assert.assertEquals(replicaSetName, node.getSetName()); - } - - } -} - diff --git a/src/test/com/mongodb/SecondaryReadTest.java b/src/test/com/mongodb/SecondaryReadTest.java deleted file mode 100644 index e706392aab0..00000000000 --- a/src/test/com/mongodb/SecondaryReadTest.java +++ /dev/null @@ -1,279 +0,0 @@ -/** - * Copyright (C) 2008 10gen Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.mongodb; - -// Mongo - -import com.mongodb.util.TestCase; -import org.testng.annotations.Test; - -import java.util.ArrayList; -import java.util.List; - -// Java - -public class SecondaryReadTest extends TestCase { - - - private static final int TOTAL_COUNT = 5000; - - private static final double MAX_DEVIATION_PERCENT = 5.0; - - /** - * Assert that the percentage of reads to each secondary does not deviate by more than 1 % - */ - @Test(groups = {"basic"}) - public void testSecondaryReadBalance() throws Exception { - - final Mongo mongo = loadMongo(); - - try { - if (isStandalone(mongo)) { - return; - } - - final List testHosts = extractHosts(mongo); - - final DBCollection col = loadCleanDbCollection(mongo); - - // Get the opcounter/query data for the hosts. - loadQueryCount(testHosts, true); - - final int secondaryCount = getSecondaryCount(testHosts); - - // Perform some reads on the secondaries - col.setReadPreference(ReadPreference.secondary()); - - for (int idx=0; idx < TOTAL_COUNT; idx++) { - col.findOne(); - } - - loadQueryCount(testHosts, false); - - verifySecondaryCounts(secondaryCount, testHosts); - } finally { if (mongo != null) mongo.close(); } - } - - /** - * Assert that secondary reads actually are routed to a secondary - */ - @Test(groups = {"basic"}) - public void testSecondaryReadCursor() throws Exception { - final Mongo mongo = loadMongo(); - try { - if (isStandalone(mongo)) { - return; - } - - final List testHosts = extractHosts(mongo); - - final DBCollection col = loadCleanDbCollection(mongo); - - insertTestData(col, new WriteConcern(getSecondaryCount(testHosts) + 1, 10000)); - - // Get the opcounter/query data for the hosts. - loadQueryCount(testHosts, true); - - // Perform some reads on the secondaries - col.setReadPreference(ReadPreference.secondary()); - - final DBCursor cur = col.find(); - - cur.hasNext(); - - ServerAddress curServerAddress = cur.getServerAddress(); - - assertTrue(serverIsSecondary(curServerAddress, testHosts)); - - } finally { if (mongo != null) mongo.close(); } - } - - /* - @Test(groups = {"basic"}) - public void testSecondaryCalls() throws Exception{ - final Mongo mongo = loadMongo(); - - try { - if (isStandalone(mongo)) { - return; - } - - final List testHosts = extractHosts(mongo); - final DBCollection col = loadCleanDbCollection(mongo); - final DB db = col.getDB(); - - insertTestData(col, new WriteConcern(getSecondaryCount(testHosts) + 1)); - - //whole DB is secondary - db.setReadPreference(ReadPreference.SECONDARY); - - col.count(); - confirmSecondary(db, extractHosts(mongo)); - col.findOne(); - confirmSecondary(db, extractHosts(mongo)); - col.distinct("value"); - confirmSecondary(db, extractHosts(mongo)); - - - //DB is primary, Collection is secondary - db.setReadPreference(ReadPreference.PRIMARY); - db.setReadPreference(ReadPreference.SECONDARY); - - col.count(); - confirmSecondary(db, extractHosts(mongo)); - col.findOne(); - confirmSecondary(db, extractHosts(mongo)); - col.distinct("value"); - confirmSecondary(db, extractHosts(mongo)); - - - } finally { if (mongo != null) mongo.close(); } - - } - */ - - private void confirmSecondary(DB db, List pHosts) throws Exception{ - String server = db.getLastError().getString("serverUsed"); - String[] ipPort = server.split("[/:]"); - - ServerAddress servAddress = new ServerAddress(ipPort[0], Integer.parseInt(ipPort[2])); - - assertTrue(serverIsSecondary(servAddress, pHosts)); - - } - - private boolean serverIsSecondary(final ServerAddress pServerAddr, final List pHosts) { - for (final TestHost h : pHosts) { - if (!h.stateStr.equals("SECONDARY")) - continue; - final int portIdx = h.hostnameAndPort.indexOf(":"); - final int port = Integer.parseInt(h.hostnameAndPort.substring(portIdx+1, h.hostnameAndPort.length())); - final String hostname = h.hostnameAndPort.substring(0, portIdx); - - if (pServerAddr.getPort() == port && hostname.equals(pServerAddr.getHost())) - return true; - } - - return false; - } - - private Mongo loadMongo() throws Exception { - return new MongoClient(new MongoClientURI( - "mongodb://127.0.0.1:27017,127.0.0.1:27018,127.0.0.1:27019/?connectTimeoutMS=30000;socketTimeoutMS=30000;maxpoolsize=5;autoconnectretry=true")); - } - - @SuppressWarnings({"unchecked"}) - private List extractHosts(Mongo mongo) { - CommandResult result = runReplicaSetStatusCommand(mongo); - - List pHosts = new ArrayList(); - - // Extract the repl set members. - for (final BasicDBObject member : (List) result.get("members")) { - String hostnameAndPort = member.getString("name"); - if (!hostnameAndPort.contains(":")) { - hostnameAndPort = hostnameAndPort + ":27017"; - } - - final String stateStr = member.getString("stateStr"); - - pHosts.add(new TestHost(hostnameAndPort, stateStr)); - } - - return pHosts; - } - - private DBCollection loadCleanDbCollection(final Mongo pMongo) { - getDatabase(pMongo).dropDatabase(); - final DB db = getDatabase(pMongo);; - return db.getCollection("testBalance"); - } - - private DB getDatabase(final Mongo pMongo) { - return pMongo.getDB("com_mongodb_unittest_SecondaryReadTest"); - } - - private void insertTestData(final DBCollection pCol, WriteConcern writeConcern) throws Exception { - // Insert some test data. - for (int idx=0; idx < 1000; idx++) { - WriteConcern curWriteConcern = (idx < 999) ? WriteConcern.NONE : writeConcern; - WriteResult writeResult = pCol.insert(new BasicDBObject(), curWriteConcern); - writeResult.getLastError().throwOnError(); - } - } - - private int getSecondaryCount(final List pHosts) { - int secondaryCount = 0; - for (final TestHost testHost : pHosts) - if (testHost.stateStr.equals("SECONDARY")) - secondaryCount++; - return secondaryCount; - } - - private void verifySecondaryCounts(final int pSecondaryCount, final List pHosts) { - - // Verify the counts. - final int expectedPerSecondary = TOTAL_COUNT / pSecondaryCount; - - for (final TestHost testHost : pHosts) { - - if (!testHost.stateStr.equals("SECONDARY")) continue; - - final long queriesExecuted = testHost.getQueriesExecuted(); - - final double deviation; - if (queriesExecuted > expectedPerSecondary) { - deviation = (double)100 - (((double)expectedPerSecondary / (double)queriesExecuted) * (double)100); - } else { - deviation = (double)100 - (((double)queriesExecuted / (double)expectedPerSecondary) * (double)100); - } - assertLess(deviation, MAX_DEVIATION_PERCENT); - } - } - - private static void loadQueryCount(final List pHosts, final boolean pBefore) throws Exception { - for (final TestHost testHost : pHosts) { - final Mongo mongoHost = new MongoClient(new MongoClientURI("mongodb://"+testHost.hostnameAndPort+"/?connectTimeoutMS=30000;socketTimeoutMS=30000;maxpoolsize=5;autoconnectretry=true")); - try { - final CommandResult serverStatusResult - = mongoHost.getDB("com_mongodb_unittest_SecondaryReadTest").command(new BasicDBObject("serverStatus", 1)); - - final BasicDBObject opcounters = (BasicDBObject)serverStatusResult.get("opcounters"); - - if (pBefore) testHost.queriesBefore = opcounters.getLong("query"); - else testHost.queriesAfter = opcounters.getLong("query"); - - } finally { if (mongoHost != null) mongoHost.close(); } - } - } - - private static class TestHost { - private final String hostnameAndPort; - private final String stateStr; - - private long queriesBefore; - private long queriesAfter; - - public long getQueriesExecuted() { return queriesAfter - queriesBefore; } - - private TestHost(final String pHostnameAndPort, final String pStateStr) { - hostnameAndPort = pHostnameAndPort; - stateStr = pStateStr; - } - } -} - diff --git a/src/test/com/mongodb/SingleThreadedMongoPerfTest.java b/src/test/com/mongodb/SingleThreadedMongoPerfTest.java deleted file mode 100644 index 2e181ba5451..00000000000 --- a/src/test/com/mongodb/SingleThreadedMongoPerfTest.java +++ /dev/null @@ -1,53 +0,0 @@ -package com.mongodb; - -/** - * Copyright (c) 2008 - 2011 10gen, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on - * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the - * specific language governing permissions and limitations under the License. - */ - - - // Run this once with the 50K inserts uncommented, then comment it out again. Then run with - // java -verbosegc -XX:-PrintGCDetails -classpath "..." com.mongodb.perf.SingleThreadedMongoPerfTest | wc -l - // You should see reduced GC activity after moving the buffers back to BasicBSONDecoder - public class SingleThreadedMongoPerfTest { - public static void main(String... args) throws Exception { - // connection pool size is 10 - MongoClientOptions opts = new MongoClientOptions.Builder(). - writeConcern(WriteConcern.UNACKNOWLEDGED).connectionsPerHost(10).build(); - - ServerAddress addr = new ServerAddress("127.0.0.1", 27017); - MongoClient mongo = new MongoClient(addr, opts); - DB db = mongo.getDB("mongotest"); - DBCollection collection = db.getCollection("mongoperftest"); - - long start; - long end; - - /* // drop the existing test collection, if it exists - collection.drop(); - start = System.currentTimeMillis(); - for (int i = 0; i < 50000; i++) { - collection.insert(new BasicDBObject("_id", i), WriteConcern.SAFE); - } - end = System.currentTimeMillis(); - System.out.println("insert: " + (end - start) + "ms");*/ - - int i = 0; - start = System.currentTimeMillis(); - for (DBObject cur : collection.find()) { - i++; - } - end = System.currentTimeMillis(); - System.out.println("found " + i + " documents in " + (end - start) + "ms"); - - mongo.close(); - } -} diff --git a/src/test/com/mongodb/WriteConcernTest.java b/src/test/com/mongodb/WriteConcernTest.java deleted file mode 100644 index 896f590fdb5..00000000000 --- a/src/test/com/mongodb/WriteConcernTest.java +++ /dev/null @@ -1,165 +0,0 @@ -// WriteConcernSerializationTest.java - -/** - * Copyright (C) 2010 10gen Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.mongodb; - -import com.mongodb.util.TestCase; -import org.testng.Assert; -import org.testng.annotations.Test; - -import java.io.ByteArrayInputStream; -import java.io.ByteArrayOutputStream; -import java.io.IOException; -import java.io.ObjectInputStream; -import java.io.ObjectOutputStream; -import java.net.UnknownHostException; - -public class WriteConcernTest extends TestCase { - - @Test - public void testEqualityAndHashCode() { - Assert.assertEquals(new WriteConcern("majority"), new WriteConcern("majority")); - Assert.assertEquals(new WriteConcern("majority").hashCode(), new WriteConcern("majority").hashCode()); - Assert.assertNotEquals(new WriteConcern("majority"), new WriteConcern(1)); - Assert.assertNotEquals(new WriteConcern("majority").hashCode(), new WriteConcern(1).hashCode()); - - Assert.assertEquals(new WriteConcern(1), WriteConcern.ACKNOWLEDGED); - } - - @Test - public void testSerializeWriteConcern() throws IOException, ClassNotFoundException { - WriteConcern writeConcern = WriteConcern.SAFE; - - ByteArrayOutputStream outputStream = new ByteArrayOutputStream(); - ObjectOutputStream objectOutputStream = new ObjectOutputStream(outputStream); - - objectOutputStream.writeObject(writeConcern); - - ByteArrayInputStream inputStream = new ByteArrayInputStream(outputStream.toByteArray()); - ObjectInputStream objectInputStream = new ObjectInputStream(inputStream); - WriteConcern object2 = (WriteConcern) objectInputStream.readObject(); - - Assert.assertEquals(1, object2.getW()); - Assert.assertEquals(false, object2.getFsync()); - Assert.assertEquals(false, object2.getJ()); - Assert.assertEquals(false, object2.getContinueOnErrorForInsert()); - } - - @Test - public void testSerializeMajorityWriteConcern() throws IOException, ClassNotFoundException { - WriteConcern writeConcern = WriteConcern.MAJORITY; - - ByteArrayOutputStream outputStream = new ByteArrayOutputStream(); - ObjectOutputStream objectOutputStream = new ObjectOutputStream(outputStream); - - objectOutputStream.writeObject(writeConcern); - - ByteArrayInputStream inputStream = new ByteArrayInputStream(outputStream.toByteArray()); - ObjectInputStream objectInputStream = new ObjectInputStream(inputStream); - WriteConcern.Majority object2 = (WriteConcern.Majority) objectInputStream.readObject(); - - Assert.assertEquals("majority", object2.getWString()); - Assert.assertEquals(false, object2.getFsync()); - Assert.assertEquals(false, object2.getJ()); - Assert.assertEquals(false, object2.getContinueOnErrorForInsert()); - } - - @Test - public void testCheckLastError() { - Assert.assertFalse(WriteConcern.NONE.callGetLastError()); - Assert.assertFalse(WriteConcern.NORMAL.callGetLastError()); - Assert.assertFalse(WriteConcern.UNACKNOWLEDGED.callGetLastError()); - Assert.assertTrue(WriteConcern.SAFE.callGetLastError()); - Assert.assertTrue(WriteConcern.ACKNOWLEDGED.callGetLastError()); - Assert.assertTrue(WriteConcern.FSYNC_SAFE.callGetLastError()); - Assert.assertTrue(WriteConcern.JOURNAL_SAFE.callGetLastError()); - Assert.assertFalse(WriteConcern.ERRORS_IGNORED.callGetLastError()); - Assert.assertTrue(WriteConcern.JOURNALED.callGetLastError()); - Assert.assertTrue(WriteConcern.FSYNCED.callGetLastError()); - Assert.assertTrue(WriteConcern.REPLICA_ACKNOWLEDGED.callGetLastError()); - Assert.assertTrue(WriteConcern.MAJORITY.callGetLastError()); - Assert.assertTrue(WriteConcern.REPLICAS_SAFE.callGetLastError()); - Assert.assertTrue(new WriteConcern("custom").callGetLastError()); - Assert.assertFalse(new WriteConcern(0, 1000).callGetLastError()); - Assert.assertFalse(new WriteConcern(0, 0, true, false).callGetLastError()); - Assert.assertFalse(new WriteConcern(0, 0, false, true).callGetLastError()); - } - - @Test - public void testW() { - Assert.assertEquals(-1, WriteConcern.NONE.getW()); - Assert.assertEquals(0, WriteConcern.NORMAL.getW()); - Assert.assertEquals(0, WriteConcern.UNACKNOWLEDGED.getW()); - Assert.assertEquals(1, WriteConcern.SAFE.getW()); - Assert.assertEquals(1, WriteConcern.ACKNOWLEDGED.getW()); - Assert.assertEquals(1, WriteConcern.FSYNC_SAFE.getW()); - Assert.assertEquals(1, WriteConcern.JOURNAL_SAFE.getW()); - Assert.assertEquals(-1, WriteConcern.ERRORS_IGNORED.getW()); - Assert.assertEquals(1, WriteConcern.JOURNALED.getW()); - Assert.assertEquals(1, WriteConcern.FSYNCED.getW()); - Assert.assertEquals(2, WriteConcern.REPLICA_ACKNOWLEDGED.getW()); - Assert.assertEquals("majority", WriteConcern.MAJORITY.getWString()); - Assert.assertEquals(2, WriteConcern.REPLICAS_SAFE.getW()); - Assert.assertEquals("custom", new WriteConcern("custom").getWString()); - } - - @Test - public void testRaiseNetworkErrors() { - Assert.assertFalse(WriteConcern.NONE.raiseNetworkErrors()); - Assert.assertTrue(WriteConcern.NORMAL.raiseNetworkErrors()); - Assert.assertTrue(WriteConcern.UNACKNOWLEDGED.raiseNetworkErrors()); - Assert.assertTrue(WriteConcern.SAFE.raiseNetworkErrors()); - Assert.assertTrue(WriteConcern.ACKNOWLEDGED.raiseNetworkErrors()); - Assert.assertTrue(WriteConcern.FSYNC_SAFE.raiseNetworkErrors()); - Assert.assertTrue(WriteConcern.JOURNAL_SAFE.raiseNetworkErrors()); - Assert.assertFalse(WriteConcern.ERRORS_IGNORED.raiseNetworkErrors()); - Assert.assertTrue(WriteConcern.JOURNALED.raiseNetworkErrors()); - Assert.assertTrue(WriteConcern.FSYNCED.raiseNetworkErrors()); - Assert.assertTrue(WriteConcern.REPLICA_ACKNOWLEDGED.raiseNetworkErrors()); - Assert.assertTrue(WriteConcern.MAJORITY.raiseNetworkErrors()); - Assert.assertTrue(WriteConcern.REPLICAS_SAFE.raiseNetworkErrors()); - Assert.assertTrue(new WriteConcern("custom").raiseNetworkErrors()); - } - - @Test - public void testGetLastErrorCommand() { - assertEquals(new BasicDBObject("getlasterror", 1), WriteConcern.UNACKNOWLEDGED.getCommand()); - assertEquals(new BasicDBObject("getlasterror", 1), WriteConcern.ACKNOWLEDGED.getCommand()); - assertEquals(new BasicDBObject("getlasterror", 1), new WriteConcern(1).getCommand()); - assertEquals(new BasicDBObject("getlasterror", 1).append("wtimeout", 1000), new WriteConcern(0, 1000).getCommand()); - assertEquals(new BasicDBObject("getlasterror", 1).append("fsync", true), new WriteConcern(0, 0, true, false).getCommand()); - assertEquals(new BasicDBObject("getlasterror", 1).append("j", true), new WriteConcern(0, 0, false, true).getCommand()); - } - - - // integration test to ensure that server doesn't mind a getlasterror command with wtimeout but no w. - @Test - public void testGetLastError() throws UnknownHostException { - MongoClient mc = new MongoClient(); - DB db = mc.getDB("WriteConcernTest"); - DBCollection collection = db.getCollection("testGetLastError"); - try { - WriteConcern wc = new WriteConcern(0, 1000); - WriteResult res = collection.insert(new BasicDBObject(), wc); - Assert.assertTrue(res.getLastError().ok()); - } finally { - db.dropDatabase(); - mc.close(); - } - } -} diff --git a/src/test/com/mongodb/framework/Admin.java b/src/test/com/mongodb/framework/Admin.java deleted file mode 100644 index 026a6a9d089..00000000000 --- a/src/test/com/mongodb/framework/Admin.java +++ /dev/null @@ -1,30 +0,0 @@ -/** - * Copyright (C) 2008 10gen Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.mongodb.framework; - -import java.util.*; -import java.net.*; - -import com.mongodb.*; -import com.mongodb.util.*; - -public class Admin { - - public static void main(String[] args) { - throw new RuntimeException( "admin stuff not supported yet." ); - } -} diff --git a/src/test/com/mongodb/framework/Capped.java b/src/test/com/mongodb/framework/Capped.java deleted file mode 100644 index 187141b54f4..00000000000 --- a/src/test/com/mongodb/framework/Capped.java +++ /dev/null @@ -1,60 +0,0 @@ -/** - * Copyright (C) 2008 10gen Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.mongodb.framework; - -import com.mongodb.BasicDBObject; -import com.mongodb.DB; -import com.mongodb.DBCollection; -import com.mongodb.DBObject; -import com.mongodb.MongoClient; -import com.mongodb.MongoException; - -import java.net.UnknownHostException; - - -public class Capped { - - public static void main(String[] args) - throws MongoException , UnknownHostException { - - DB db = new MongoClient().getDB( "driver_test_framework" ); - DBObject foo = new BasicDBObject(); - foo.put( "create", "capped1" ); - foo.put( "capped", true ); - foo.put( "size", 500 ); - DBObject dbobj = db.command( foo ); - DBCollection c = db.getCollection( "capped1" ); - - DBObject obj1 = new BasicDBObject(); - obj1.put( "x", 1 ); - c.save( obj1 ); - DBObject obj2 = new BasicDBObject(); - obj2.put( "x", 2 ); - c.save( obj2 ); - - foo.put( "create", "capped2" ); - foo.put( "size", 1000 ); - db.command( foo ); - String s = ""; - c = db.getCollection( "capped2" ); - for( int i=1; i<= 100; i++ ) { - DBObject obj = new BasicDBObject(); - obj.put( "dashes", s ); - c.save( obj ); - s = s+"-"; - } - } -} diff --git a/src/test/com/mongodb/framework/Circular.java b/src/test/com/mongodb/framework/Circular.java deleted file mode 100644 index aa1a5aca4ad..00000000000 --- a/src/test/com/mongodb/framework/Circular.java +++ /dev/null @@ -1,52 +0,0 @@ -/** - * Copyright (C) 2008 10gen Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.mongodb.framework; - -import java.util.*; -import java.net.*; - -import com.mongodb.*; -import com.mongodb.util.*; - -import org.bson.*; -import org.bson.types.*; - -@SuppressWarnings("deprecation") -public class Circular { - - public static void main(String[] args) - throws Exception { - - DB db = new MongoClient().getDB( "driver_test_framework" ); - DBObject foo = new BasicDBObject(); - DBCollection b = db.getCollection( "b" ); - foo.put( "c", b ); - db.getCollection( "a" ).save( foo ); - - foo = new BasicDBObject(); - foo.put( "c", 1 ); - b.save( foo ); - - ObjectId id = new ObjectId(); - foo = new BasicDBObject(); - foo.put( "_id", id ); - foo.put( "that", 2 ); - DBPointer ref = new DBPointer( "c", id ); - foo.put( "thiz", ref ); - db.getCollection( "c" ).save( foo ); - - } -} diff --git a/src/test/com/mongodb/framework/Count1.java b/src/test/com/mongodb/framework/Count1.java deleted file mode 100644 index 617c0beca27..00000000000 --- a/src/test/com/mongodb/framework/Count1.java +++ /dev/null @@ -1,47 +0,0 @@ -/** - * Copyright (C) 2008 10gen Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.mongodb.framework; - -import java.util.*; -import java.net.*; - -import com.mongodb.*; -import com.mongodb.util.*; - - -public class Count1 { - - public static void main(String[] args) - throws Exception { - - DB db = new MongoClient().getDB( "driver_test_framework" ); - System.out.println( db.getCollection( "test1" ).find().count() ); - System.out.println( db.getCollection( "test2" ).find().count() ); - DBCollection coll = db.getCollection( "test3" ); - - DBObject foo = new BasicDBObject(); - foo.put( "i", "a" ); - System.out.println( coll.find( foo ).count() ); - - foo.put( "i", 3 ); - System.out.println( coll.find( foo ).count() ); - - DBObject bar = new BasicDBObject(); - bar.put( "$gte" , 67 ); - foo.put( "i", bar ); - System.out.println( coll.find( foo ).count() ); - } -} diff --git a/src/test/com/mongodb/framework/Dbs.java b/src/test/com/mongodb/framework/Dbs.java deleted file mode 100644 index ced3d4c1e7b..00000000000 --- a/src/test/com/mongodb/framework/Dbs.java +++ /dev/null @@ -1,68 +0,0 @@ -/** - * Copyright (C) 2008 10gen Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.mongodb.framework; - -import com.mongodb.BasicDBObject; -import com.mongodb.DB; -import com.mongodb.DBCollection; -import com.mongodb.DBObject; -import com.mongodb.MongoClient; -import com.mongodb.MongoException; - -import java.net.UnknownHostException; -import java.util.Arrays; -import java.util.Set; - -public class Dbs { - - public static void main(String[] args) - throws UnknownHostException , MongoException { - - DB db = new MongoClient().getDB( "driver_test_framework" ); - DBCollection coll = db.getCollection( "dbs_1" ); - DBObject o = new BasicDBObject(); - o.put( "foo", "bar" ); - coll.save( o ); - - coll = db.getCollection( "dbs_2" ); - o = new BasicDBObject(); - o.put( "psi", "phi" ); - coll.save( o ); - - Set set = db.getCollectionNames(); - String[] strs = set.toArray( new String[0] ); - Arrays.sort( strs ); - for( String s : strs ) { - if( s.startsWith( "dbs" ) ) { - System.out.println( s ); - } - } - - db.getCollection( "dbs_1" ).drop(); - o = new BasicDBObject(); - o.put( "create", "dbs_3" ); - db.command( o ); - - set = db.getCollectionNames(); - strs = set.toArray( new String[0] ); - Arrays.sort( strs ); - for( String s : strs ) { - if( s.startsWith( "dbs" ) ) { - System.out.println( s ); - } - } - } -} diff --git a/src/test/com/mongodb/framework/Find.java b/src/test/com/mongodb/framework/Find.java deleted file mode 100644 index c45e32d146c..00000000000 --- a/src/test/com/mongodb/framework/Find.java +++ /dev/null @@ -1,37 +0,0 @@ -/** - * Copyright (C) 2008 10gen Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.mongodb.framework; - -import com.mongodb.BasicDBObject; -import com.mongodb.DB; -import com.mongodb.DBCollection; -import com.mongodb.DBObject; -import com.mongodb.MongoClient; - -public class Find { - - public static void main(String[] args) - throws Exception { - - DB db = new MongoClient().getDB( "driver_test_framework" ); - DBCollection c = db.getCollection( "test" ); - - DBObject foo = new BasicDBObject(); - foo.put( "a", 2 ); - c.save( foo ); - } -} diff --git a/src/test/com/mongodb/framework/Find1.java b/src/test/com/mongodb/framework/Find1.java deleted file mode 100644 index e8d62075859..00000000000 --- a/src/test/com/mongodb/framework/Find1.java +++ /dev/null @@ -1,41 +0,0 @@ -/** - * Copyright (C) 2008 10gen Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.mongodb.framework; - -import com.mongodb.BasicDBObject; -import com.mongodb.DB; -import com.mongodb.DBCursor; -import com.mongodb.DBObject; -import com.mongodb.MongoClient; - - -public class Find1 { - - public static void main(String[] args) - throws Exception { - - DB db = new MongoClient().getDB( "driver_test_framework" ); - DBObject foo = new BasicDBObject(); - foo.put( "x", 1 ); - DBObject bar = new BasicDBObject(); - bar.put( "y", 1 ); - DBCursor cursor = db.getCollection( "c" ).find( foo ).sort( bar ).skip( 20 ).limit( 10 ); - - while( cursor.hasNext() ) { - System.out.println( cursor.next().get( "z" ) ); - } - } -} diff --git a/src/test/com/mongodb/framework/Remove.java b/src/test/com/mongodb/framework/Remove.java deleted file mode 100644 index fa95a632f7a..00000000000 --- a/src/test/com/mongodb/framework/Remove.java +++ /dev/null @@ -1,37 +0,0 @@ -/** - * Copyright (C) 2008 10gen Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.mongodb.framework; - -import com.mongodb.BasicDBObject; -import com.mongodb.DB; -import com.mongodb.DBObject; -import com.mongodb.MongoClient; - - -public class Remove { - - public static void main(String[] args) - throws Exception { - - DB db = new MongoClient().getDB( "driver_test_framework" ); - DBObject foo = new BasicDBObject(); - db.getCollection( "remove1" ).remove( foo ); - - foo.put( "a", 3 ); - db.getCollection( "remove2" ).remove( foo ); - } -} diff --git a/src/test/com/mongodb/framework/Stress1.java b/src/test/com/mongodb/framework/Stress1.java deleted file mode 100644 index 4c768146d3a..00000000000 --- a/src/test/com/mongodb/framework/Stress1.java +++ /dev/null @@ -1,66 +0,0 @@ -/** - * Copyright (C) 2008 10gen Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.mongodb.framework; - -import java.util.*; -import java.net.*; - -import com.mongodb.*; -import com.mongodb.util.*; - -public class Stress1 { - - public static void doStuff( DBCollection c, int count ) - throws MongoException { - DBObject obj = new BasicDBObject(); - obj.put( "id", count ); - DBObject x = c.findOne( obj ); - x.put( "subarray", "foo" + count ); - c.save( x ); - } - - public static void main(String[] args) - throws UnknownHostException , MongoException { - - DB db = new MongoClient().getDB( "driver_test_framework" ); - DBCollection c = db.getCollection( "stress1" ); - - String blah = "lksjhasoh1298alshasoidiohaskjasiouashoasasiugoas" + - "lksjhasoh1298alshasoidiohaskjasiouashoasasiugoas" + - "lksjhasoh1298alshasoidiohaskjasiouashoasasiugoas" + - "lksjhasoh1298alshasoidiohaskjasiouashoasasiugoas" + - "lksjhasoh1298alshasoidiohaskjasiouashoasasiugoas" + - "lksjhasoh1298alshasoidiohaskjasiouashoasasiugoas"; - - for( int i=0; i<50000; i++ ) { - DBObject foo = new BasicDBObject(); - foo.put( "name", "asdf"+i ); - foo.put( "date", new Date() ); - foo.put( "id", i ); - foo.put( "blah", blah ); - c.save( foo ); - } - - for( int count=0; count<10000; count++ ) { - doStuff( c, count ); - } - - DBObject idx = new BasicDBObject(); - idx.put( "date", 1 ); - c.ensureIndex( idx ); - } -} diff --git a/src/test/com/mongodb/framework/Test1.java b/src/test/com/mongodb/framework/Test1.java deleted file mode 100644 index 5550601237a..00000000000 --- a/src/test/com/mongodb/framework/Test1.java +++ /dev/null @@ -1,40 +0,0 @@ - -/** - * Copyright (C) 2008 10gen Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.mongodb.framework; - -import com.mongodb.BasicDBObject; -import com.mongodb.DB; -import com.mongodb.DBCollection; -import com.mongodb.DBObject; -import com.mongodb.MongoClient; - -public class Test1 { - - public static void main(String[] args) - throws Exception { - - DB db = new MongoClient().getDB( "driver_test_framework" ); - DBCollection coll = db.getCollection( "part1" ); - - for( int i=0; i<100; i++) { - DBObject o = new BasicDBObject(); - o.put( "x", i ); - coll.save( o ); - } - } -} diff --git a/src/test/com/mongodb/framework/dispatch b/src/test/com/mongodb/framework/dispatch deleted file mode 100755 index e9c2dbc4e70..00000000000 --- a/src/test/com/mongodb/framework/dispatch +++ /dev/null @@ -1,34 +0,0 @@ -#!/bin/bash - -export CLASSPATH=$CLASSPATH:../../../../../build -begintime=`date` -if [[ "$1" = "test1" ]]; then - java com.mongodb.framework.Test1 -#elif [[ "$1" = "count1" ]]; then -# java com.mongodb.framework.Count1 > $2 -elif [[ "$1" = "find" ]]; then - java com.mongodb.framework.Find -elif [[ "$1" = "find1" ]]; then - java com.mongodb.framework.Find1 > $2 -elif [[ "$1" = "remove" ]]; then - java com.mongodb.framework.Remove -elif [[ "$1" = "capped" ]]; then - java com.mongodb.framework.Capped -#elif [[ "$1" = "circular" ]]; then -# java com.mongodb.framework.Circular -elif [[ "$1" = "dbs" ]]; then - java com.mongodb.framework.Dbs > $2 -elif [[ "$1" = "stress1" ]]; then - java com.mongodb.framework.Stress1 -#elif [[ "$1" = "admin" ]]; then -# java com.mongodb.framework.Admin > $2 -else - exit 0 -fi -exitval=$? -endtime=`date` - -echo "begintime:$begintime" >> $2 -echo "endtime:$endtime" >> $2 -echo "exit_code:$exitval" >> $2 - diff --git a/src/test/com/mongodb/gridfs/GridFSTest.java b/src/test/com/mongodb/gridfs/GridFSTest.java deleted file mode 100644 index 97b847e1154..00000000000 --- a/src/test/com/mongodb/gridfs/GridFSTest.java +++ /dev/null @@ -1,348 +0,0 @@ -/** - * Copyright (C) 2008 10gen Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.mongodb.gridfs; - -import com.mongodb.BasicDBObject; -import com.mongodb.DB; -import com.mongodb.DBCursor; -import com.mongodb.MongoException; -import com.mongodb.util.TestCase; -import org.testng.annotations.Test; - -import java.io.ByteArrayOutputStream; -import java.io.IOException; -import java.io.InputStream; -import java.io.OutputStream; -import java.util.List; -import java.util.Random; - -public class GridFSTest extends TestCase { - - public GridFSTest() { - cleanupDB = "com_mongodb_unittest_GridFSTest"; - _db = cleanupMongo.getDB(cleanupDB); - _fs = new GridFS(_db); - } - - int[] _get(){ - int[] i = new int[2]; - i[0] = _fs._filesCollection.find().count(); - i[1] = _fs._chunkCollection.find().count(); - return i; - } - - void testInOut( String s ) - throws Exception { - - int[] start = _get(); - - GridFSInputFile in = _fs.createFile( s.getBytes() ); - in.save(); - GridFSDBFile out = _fs.findOne( new BasicDBObject( "_id" , in.getId() ) ); - assert( out.getId().equals( in.getId() ) ); - assert( out.getChunkSize() == (long)GridFS.DEFAULT_CHUNKSIZE ); - - ByteArrayOutputStream bout = new ByteArrayOutputStream(); - out.writeTo( bout ); - String outString = new String( bout.toByteArray() ); - assert( outString.equals( s ) ); - - out.remove(); - int[] end = _get(); - assertEquals( start[0] , end[0] ); - assertEquals( start[1] , end[1] ); - } - - @Test(groups = {"basic"}) - public void testSmall() - throws Exception { - testInOut( "this is a simple test" ); - } - - @Test(groups = {"basic"}) - public void testBig() - throws Exception { - int target = GridFS.DEFAULT_CHUNKSIZE * 3; - StringBuilder buf = new StringBuilder( target ); - while ( buf.length() < target ) - buf.append( "asdasdkjasldkjasldjlasjdlajsdljasldjlasjdlkasjdlaskjdlaskjdlsakjdlaskjdasldjsad" ); - String s = buf.toString(); - testInOut( s ); - } - - void testOutStream( String s ) throws Exception { - - int[] start = _get(); - - GridFSInputFile in = _fs.createFile(); - OutputStream writeStream = in.getOutputStream(); - writeStream.write( s.getBytes(), 0, s.length() ); - writeStream.close(); - GridFSDBFile out = _fs.findOne( new BasicDBObject( "_id" , in.getId() ) ); - assert ( out.getId().equals( in.getId() ) ); - assert ( out.getChunkSize() == (long) GridFS.DEFAULT_CHUNKSIZE ); - - ByteArrayOutputStream bout = new ByteArrayOutputStream(); - out.writeTo( bout ); - String outString = new String( bout.toByteArray() ); - assert (outString.equals( s )); - - out.remove(); - int[] end = _get(); - assertEquals( start[0], end[0] ); - assertEquals( start[1], end[1] ); - } - - @Test(groups = { "basic" }) - public void testOutStreamSmall() throws Exception { - testOutStream( "this is a simple test" ); - } - - @Test(groups = { "basic" }) - public void testOutStreamBig() throws Exception { - int target = (int) (GridFS.DEFAULT_CHUNKSIZE * 3.5); - StringBuilder buf = new StringBuilder( target ); - while ( buf.length() < target ) { - buf.append( "asdasdkjasldkjasldjlasjdlajsdljasldjlasjdlkasjdlaskjdlaskjdlsakjdlaskjdasldjsad" ); - } - String s = buf.toString(); - testOutStream( s ); - } - - @Test(groups = { "basic" }) - public void testOutStreamBigAligned() throws Exception { - int target = (GridFS.DEFAULT_CHUNKSIZE * 4); - StringBuilder buf = new StringBuilder( target ); - while ( buf.length() < target ) { - buf.append( "a" ); - } - String s = buf.toString(); - testOutStream( s ); - } - - @Test(groups = {"basic"}) - public void testMetadata() - throws Exception { - - GridFSInputFile in = _fs.createFile( "foo".getBytes() ); - in.put("meta", 5); - in.save(); - GridFSDBFile out = _fs.findOne( new BasicDBObject( "_id" , in.getId() ) ); - assert( out.get("meta").equals( 5 ) ); - } - - @Test(groups = {"basic"}) - public void testBadChunkSize() throws Exception { - int fileSize = 2 * _db.getMongo().getMaxBsonObjectSize(); - if (fileSize > 1024 * 1024 * 1024) - //If this is the case, GridFS is probably obsolete... - fileSize = 10 * 1024 * 1024; - - byte[] randomBytes = new byte[fileSize]; - for (int idx = 0; idx < fileSize; ++idx) - randomBytes[idx] = (byte)(256 * Math.random()); - - GridFSInputFile inputFile = _fs.createFile(randomBytes); - inputFile.setFilename("bad_chunk_size.bin"); - try{ - inputFile.save(0); - fail("should have received an exception about a chunk size being zero"); - }catch(MongoException mongoExc) { - //We expect this exception to complain about the chunksize - assertTrue(mongoExc.toString().contains("chunkSize must be greater than zero")); - } - - //For good measure let's save and restore the bytes - inputFile.save(_db.getMongo().getMaxBsonObjectSize() - 500 * 1000); - GridFSDBFile savedFile = _fs.findOne(new BasicDBObject("_id", inputFile.getId())); - ByteArrayOutputStream savedFileByteStream = new ByteArrayOutputStream(); - savedFile.writeTo(savedFileByteStream); - byte[] savedFileBytes = savedFileByteStream.toByteArray(); - - assertArrayEquals(randomBytes, savedFileBytes); - } - - @Test(groups = {"basic"}) - public void getBigChunkSize() throws Exception { - GridFSInputFile file = _fs.createFile("512kb_bucket"); - file.setChunkSize(file.getChunkSize() * 2); - OutputStream os = file.getOutputStream(); - for (int i = 0; i < 1024; i++) { - os.write(new byte[GridFS.DEFAULT_CHUNKSIZE / 1024 + 1]); - } - os.close(); - } - - - @Test(groups = {"basic"}) - public void testInputStreamSkipping() throws Exception { - //int chunkSize = 5; - int chunkSize = GridFS.DEFAULT_CHUNKSIZE; - int fileSize = (int)(7.25 * chunkSize); - - byte[] fileBytes = new byte[fileSize]; - for (int idx = 0; idx < fileSize; ++idx) - fileBytes[idx] = (byte)(idx % 251); - //Don't want chunks to be aligned at byte position 0 - - GridFSInputFile inputFile = _fs.createFile(fileBytes); - inputFile.setFilename("input_stream_skipping.bin"); - inputFile.save(chunkSize); - - GridFSDBFile savedFile = _fs.findOne(new BasicDBObject("_id", inputFile.getId())); - GridFSDBFile.MyInputStream inputStream = (GridFSDBFile.MyInputStream)savedFile.getInputStream(); - - //Quick run-through, make sure the file is as expected - for (int idx = 0; idx < fileSize; ++idx) - assertEquals((byte)(idx % 251), (byte)inputStream.read()); - - inputStream = (GridFSDBFile.MyInputStream)savedFile.getInputStream(); - - long skipped = inputStream.skip(1); - assertEquals(1, skipped); - int position = 1; - assertEquals((byte)(position++ % 251), (byte)inputStream.read()); - - skipped = inputStream.skip(chunkSize); - assertEquals(chunkSize, skipped); - position += chunkSize; - assertEquals((byte)(position++ % 251), (byte)inputStream.read()); - - skipped = inputStream.skip(-1); - assertEquals(0, skipped); - skipped = inputStream.skip(0); - assertEquals(0, skipped); - - skipped = inputStream.skip(3 * chunkSize); - assertEquals(3 * chunkSize, skipped); - position += 3 * chunkSize; - assertEquals((byte)(position++ % 251), (byte)inputStream.read()); - - //Make sure skipping works when we skip to an exact chunk boundary - long toSkip = inputStream.available(); - skipped = inputStream.skip(toSkip); - assertEquals(toSkip, skipped); - position += toSkip; - assertEquals((byte)(position++ % 251), (byte)inputStream.read()); - - skipped = inputStream.skip(2 * fileSize); - assertEquals(fileSize - position, skipped); - assertEquals(-1, inputStream.read()); - } - - @Test(groups = {"basic"}) - public void testCustomFileID() throws IOException { - int chunkSize = 10; - int fileSize = (int)(3.25 * chunkSize); - - byte[] fileBytes = new byte[fileSize]; - for (int idx = 0; idx < fileSize; ++idx) - fileBytes[idx] = (byte)(idx % 251); - - GridFSInputFile inputFile = _fs.createFile(fileBytes); - int id = 1; - inputFile.setId(id); - inputFile.setFilename("custom_file_id.bin"); - inputFile.save(chunkSize); - assertEquals(id, inputFile.getId()); - - GridFSDBFile savedFile = _fs.findOne(new BasicDBObject("_id", id)); - InputStream inputStream = savedFile.getInputStream(); - - for (int idx = 0; idx < fileSize; ++idx) - assertEquals((byte)(idx % 251), (byte)inputStream.read()); - } - - @Test(groups = {"basic"}) - public void testGetFileListWithSorting() throws Exception { - _fs.remove(new BasicDBObject()); - - int chunkSize = 10; - int fileSize = (int)(3.25 * chunkSize); - - byte[] fileBytes = new byte[fileSize]; - for (int idx = 0; idx < fileSize; ++idx) - fileBytes[idx] = (byte)(idx % 251); - - GridFSInputFile inputFile; - for (int i = 1; i < 5; i++){ - inputFile = _fs.createFile(fileBytes); - inputFile.setId(i); - inputFile.setFilename("custom_file_id" + i + ".bin"); - inputFile.put("orderTest", i + ""); - inputFile.save(chunkSize); - } - - DBCursor cursor = _fs.getFileList(null,new BasicDBObject("orderTest", 1)); - assertEquals(cursor.size(),4); - assertEquals(cursor.next().get("_id"),1); - assertEquals(cursor.next().get("_id"),2); - assertEquals(cursor.next().get("_id"),3); - assertEquals(cursor.next().get("_id"),4); - - cursor = _fs.getFileList(null,new BasicDBObject("filename", -1)); - assertEquals(cursor.size(),4); - assertEquals(cursor.next().get("_id"),4); - assertEquals(cursor.next().get("_id"),3); - assertEquals(cursor.next().get("_id"),2); - assertEquals(cursor.next().get("_id"),1); - } - - @Test(groups = {"basic"}) - public void testFindWithSorting() throws Exception { - _fs.remove(new BasicDBObject()); - - int chunkSize = 10; - int fileSize = (int)(3.25 * chunkSize); - - byte[] fileBytes = new byte[fileSize]; - for (int idx = 0; idx < fileSize; ++idx) - fileBytes[idx] = (byte)(idx % 251); - - GridFSInputFile inputFile; - for (int i = 1; i < 5; i++){ - inputFile = _fs.createFile(fileBytes); - inputFile.setId(i); - inputFile.setFilename("custom_file_id"+i+".bin"); - inputFile.put("orderTest", i+""); - inputFile.save(chunkSize); - } - - List result = _fs.find(new BasicDBObject(), new BasicDBObject("orderTest", 1)); - assertEquals(result.size(),4); - assertEquals(result.get(0).getId(),1); - assertEquals(result.get(1).getId(),2); - assertEquals(result.get(2).getId(),3); - assertEquals(result.get(3).getId(),4); - - result = _fs.find(new BasicDBObject(), new BasicDBObject("filename", -1)); - assertEquals(result.size(),4); - assertEquals(result.get(0).getId(),4); - assertEquals(result.get(1).getId(),3); - assertEquals(result.get(2).getId(),2); - assertEquals(result.get(3).getId(),1); - } - - final DB _db; - final GridFS _fs; - - public static void main( String args[] ) - throws Exception { - (new GridFSTest()).runConsole(); - } - -} diff --git a/src/test/com/mongodb/io/ByteBufferStreamTest.java b/src/test/com/mongodb/io/ByteBufferStreamTest.java deleted file mode 100644 index 305ae675bea..00000000000 --- a/src/test/com/mongodb/io/ByteBufferStreamTest.java +++ /dev/null @@ -1,129 +0,0 @@ -// ByteBufferStreamTest.java - -/** - * Copyright (C) 2008 10gen Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.mongodb.io; - -import java.io.*; -import java.nio.*; -import java.util.*; -import java.util.zip.*; - -import org.testng.annotations.Test; - -import com.mongodb.util.*; - -public class ByteBufferStreamTest extends TestCase { - - @Test(groups = {"basic"}) - public void test1() - throws IOException { - _testInOut( 16 , 128 ); - _testInOut( 16 , 4 ); - _testInOut( 1024 , 128 ); - _testInOut( 1024 , 2048 ); - } - - private void _testInOut( int dataSize , int bufSize ) - throws IOException { - String s = _getData( dataSize ); - byte[] bytes = s.getBytes(); - - ByteBufferOutputStream bout = new ByteBufferOutputStream( bufSize ); - bout.write( bytes ); - - assertEquals( (int)Math.ceil( (double)(bytes.length) / bufSize ) , bout.getBuffers().size() ); - - ByteBufferInputStream bin = new ByteBufferInputStream( bout.getBuffers() , true ); - String out = new String( StreamUtil.readBytesFully( bin ) ); - assertEquals( s , out ); - - } - - @Test(groups = {"basic"}) - public void testplay() - throws IOException { - _testplay( 16 , 128 ); - _testplay( 16 , 4 ); - _testplay( 1024 , 128 ); - _testplay( 1024 , 2048 ); - _testplay( 20000 , 200 ); - } - - private void _testplay( int dataSize , int bufSize ) - throws IOException { - String s = _getData( dataSize ); - byte[] bytes = s.getBytes(); - - ByteBufferOutputStream bout = new ByteBufferOutputStream( bufSize ); - bout.write( bytes ); - - assertEquals( (int)Math.ceil( (double)(bytes.length) / bufSize ) , bout.getBuffers().size() ); - - ByteBufferInputStream bin = new ByteBufferInputStream( bout.getBuffers() , true ); - ByteArrayInputStream arr = new ByteArrayInputStream( bytes ); - - assertEquals( bin.available() , arr.available() ); - while ( arr.available() > 0 ){ - assertEquals( bin.available() , arr.available() ); - assertEquals( bin.read() , arr.read() ); - assertEquals( bin.read( new byte[12] ) , arr.read( new byte[12] ) ); - } - - assertEquals( bin.available() , arr.available() ); - } - - - @Test(groups = {"basic"}) - public void testZip1() - throws IOException { - _testZip( 128 , 2048 ); - _testZip( 1024 , 128 ); - } - - void _testZip( int dataSize , int bufSize ) - throws IOException { - - String s = _getData( dataSize ); - - ByteBufferOutputStream bout = new ByteBufferOutputStream( bufSize ); - - GZIPOutputStream gout = new GZIPOutputStream( bout ); - gout.write( s.getBytes() ); - gout.flush(); - gout.close(); - - ByteBufferInputStream bin = new ByteBufferInputStream( bout.getBuffers() , true ); - GZIPInputStream gin = new GZIPInputStream( bin ); - String out = new String( StreamUtil.readBytesFully( gin ) ); - - assertEquals( s , out ); - } - - String _getData( int size ){ - StringBuilder buf = new StringBuilder( size + 200 ); - while ( buf.length() < size ) - buf.append( "eliot was here " + _rand.nextDouble() ); - return buf.toString(); - } - - static final Random _rand = new Random( 123123 ); - - public static void main( String args[] ){ - (new ByteBufferStreamTest()).runConsole(); - } -} diff --git a/src/test/com/mongodb/io/StreamUtil.java b/src/test/com/mongodb/io/StreamUtil.java deleted file mode 100644 index b1016c05e9a..00000000000 --- a/src/test/com/mongodb/io/StreamUtil.java +++ /dev/null @@ -1,54 +0,0 @@ -// StreamUtil.java - -/** - * Copyright (C) 2008 10gen Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.mongodb.io; - -import java.io.ByteArrayOutputStream; -import java.io.IOException; -import java.io.InputStream; -import java.io.OutputStream; - -public class StreamUtil { - - public static byte[] readBytesFully( InputStream is ) - throws IOException { - ByteArrayOutputStream baos = new ByteArrayOutputStream(); - pipe( is , baos ); - return baos.toByteArray(); - } - - public static int pipe( InputStream is , OutputStream out ) - throws IOException { - return pipe( is , out , -1 ); - } - - public static int pipe( InputStream is , OutputStream out , int maxSize ) - throws IOException { - byte buf[] = new byte [4096]; - int len; - int total = 0; - while ((len = is.read(buf)) != -1){ - out.write(buf, 0, len); - total += len; - if ( maxSize > 0 && total > maxSize ) - throw new IOException("too big"); - } - return total; - } - -} diff --git a/src/test/com/mongodb/tools/ConnectionPoolStatTest.java b/src/test/com/mongodb/tools/ConnectionPoolStatTest.java deleted file mode 100644 index 776418bab1c..00000000000 --- a/src/test/com/mongodb/tools/ConnectionPoolStatTest.java +++ /dev/null @@ -1,50 +0,0 @@ -/** - * Copyright (c) 2008 - 2012 10gen, Inc. - *

          - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

          - * http://www.apache.org/licenses/LICENSE-2.0 - *

          - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.mongodb.tools; - -import com.mongodb.DBObject; -import com.mongodb.util.JSON; -import com.mongodb.util.TestCase; -import org.testng.annotations.Test; - -import javax.management.JMException; -import java.io.IOException; - -public class ConnectionPoolStatTest extends TestCase { - - @Test - public void testBasic() throws IOException, JMException { - if (!isAtLeastJava6()) { - return; - } - ConnectionPoolStat stat = new ConnectionPoolStat(); - String stats = stat.getStats(); - assertNotNull(stats); - DBObject obj = (DBObject) JSON.parse(stats); - assertNotNull(obj); - } - - private boolean isAtLeastJava6() { - String javaSpecificationVersion = System.getProperty("java.specification.version"); - String minorVersionString = javaSpecificationVersion.substring(javaSpecificationVersion.lastIndexOf(".") + 1); - try { - return Integer.parseInt(minorVersionString) >= 6; - } catch (NumberFormatException e) { - return false; - } - } -} diff --git a/src/test/com/mongodb/util/Base64CodecTest.java b/src/test/com/mongodb/util/Base64CodecTest.java deleted file mode 100644 index 7318647e8c7..00000000000 --- a/src/test/com/mongodb/util/Base64CodecTest.java +++ /dev/null @@ -1,33 +0,0 @@ -package com.mongodb.util; - -import org.testng.annotations.Test; - -import static org.testng.Assert.assertEquals; - - -/** - * @author Sjoerd Mulder - */ -public class Base64CodecTest { - - private static final byte[] allBytes = new byte[255]; - private static final byte[] abc = new byte[] { 97, 98, 99} ; - private static final byte[] abcd = new byte[] { 97, 98, 99, 100}; - private static final byte[] abcde = new byte[] { 97, 98, 99, 100, 101}; - - static { - for (byte b = -128; b != 127; b++) { - allBytes[b + 128] = b; - } - } - - @Test - public void testDecodeEncode() throws Exception { - Base64Codec codec = new Base64Codec(); - assertEquals(abc, codec.decode(codec.encode(abc))); - assertEquals(abcd, codec.decode(codec.encode(abcd))); - assertEquals(abcde, codec.decode(codec.encode(abcde))); - assertEquals(allBytes, codec.decode(codec.encode(allBytes))); - } - -} diff --git a/src/test/com/mongodb/util/JSONCallbackTest.java b/src/test/com/mongodb/util/JSONCallbackTest.java deleted file mode 100644 index 14f7ba92703..00000000000 --- a/src/test/com/mongodb/util/JSONCallbackTest.java +++ /dev/null @@ -1,111 +0,0 @@ -package com.mongodb.util; - -import com.mongodb.DBObject; -import com.mongodb.DBRef; -import org.bson.BSON; -import org.bson.Transformer; -import org.bson.types.BSONTimestamp; -import org.bson.types.Binary; -import org.bson.types.ObjectId; - -import java.text.ParsePosition; -import java.text.SimpleDateFormat; -import java.util.Date; -import java.util.GregorianCalendar; -import java.util.SimpleTimeZone; -import java.util.regex.Pattern; - -public class JSONCallbackTest extends com.mongodb.util.TestCase { - - @org.testng.annotations.Test(groups = {"basic"}) - public void dateParsing() { - - SimpleDateFormat format = new SimpleDateFormat(JSONCallback._msDateFormat); - format.setCalendar(new GregorianCalendar(new SimpleTimeZone(0, "GMT"))); - - Date rightNow = new Date(); - rightNow.setTime(System.currentTimeMillis()); - - Date parsedDate = (Date) JSON.parse("{ \"$date\" : " + rightNow.getTime() + "}"); - assertEquals(rightNow.compareTo(parsedDate), 0); - - // Test formatted dates with ms granularity - parsedDate = (Date) JSON.parse("{ \"$date\" : \"" + format.format(rightNow) + "\"}"); - assertEquals( - parsedDate.compareTo(format.parse(format.format(rightNow), new ParsePosition(0))), 0); - - // Test formatted dates with sec granularity - format = new SimpleDateFormat(JSONCallback._secDateFormat); - format.setCalendar(new GregorianCalendar(new SimpleTimeZone(0, "GMT"))); - - parsedDate = (Date) JSON.parse("{ \"$date\" : \"" + format.format(rightNow) + "\"}"); - assertEquals( - parsedDate.compareTo(format.parse(format.format(rightNow), new ParsePosition(0))), 0); - - } - - @org.testng.annotations.Test(groups = {"basic"}) - public void encodingHooks() { - BSON.addDecodingHook(Date.class, new Transformer() { - @Override - public Object transform(final Object o) { - return ((Date) o).getTime(); - } - }); - - try { - Date now = new Date(); - - Object parsedDate = JSON.parse("{ \"$date\" : " + now.getTime() + "}"); - assertEquals(Long.class, parsedDate.getClass()); - - DBObject doc = (DBObject) JSON.parse("{ date : { \"$date\" : " + now.getTime() + "} }"); - assertEquals(Long.class, doc.get("date").getClass()); - } finally { - BSON.removeDecodingHooks(Date.class); - } - } - - @org.testng.annotations.Test(groups = {"basic"}) - public void binaryParsing() { - Binary parsedBinary = (Binary) JSON.parse(("{ \"$binary\" : \"YWJjZA==\", \"$type\" : 0 }")); - org.testng.Assert.assertEquals(parsedBinary.getType(), 0); - org.testng.Assert.assertEquals(parsedBinary.getData(), new byte[]{97, 98, 99, 100}); - } - - @org.testng.annotations.Test(groups = {"basic"}) - public void timestampParsing() { - BSONTimestamp timestamp = (BSONTimestamp) JSON.parse(("{ \"$timestamp\" : { \"t\": 123, \"i\": 456 } }")); - org.testng.Assert.assertEquals(timestamp.getInc(), 456); - org.testng.Assert.assertEquals(timestamp.getTime(), 123); - } - - - @org.testng.annotations.Test(groups = {"basic"}) - public void regexParsing() { - Pattern pattern = (Pattern) JSON.parse(("{ \"$regex\" : \".*\", \"$options\": \"i\" }")); - org.testng.Assert.assertEquals(pattern.pattern(), ".*"); - org.testng.Assert.assertEquals(pattern.flags(), Pattern.CASE_INSENSITIVE); - } - - @org.testng.annotations.Test(groups = {"basic"}) - public void oidParsing() { - ObjectId id = (ObjectId) JSON.parse(("{ \"$oid\" : \"01234567890123456789abcd\" }")); - org.testng.Assert.assertEquals(id, new ObjectId("01234567890123456789abcd")); - } - - @org.testng.annotations.Test(groups = {"basic"}) - public void refParsing() { - DBRef ref = (DBRef) JSON.parse(("{ \"$ref\" : \"friends\", \"$id\" : \"01234567890123456789abcd\" }")); - org.testng.Assert.assertEquals(ref.getRef(), "friends"); - org.testng.Assert.assertEquals(ref.getId(), new ObjectId("01234567890123456789abcd")); - } - -// No such concept in Java -// @org.testng.annotations.Test(groups = {"basic"}) -// public void undefinedParsing() { -// BasicDBObject undef = (BasicDBObject) JSON.parse(("{ \"$undefined\" : true }")); -// org.testng.Assert.assertEquals(undef, 123); -// } - -} diff --git a/src/test/com/mongodb/util/JSONSerializersTest.java b/src/test/com/mongodb/util/JSONSerializersTest.java deleted file mode 100644 index 7079fc40903..00000000000 --- a/src/test/com/mongodb/util/JSONSerializersTest.java +++ /dev/null @@ -1,223 +0,0 @@ -package com.mongodb.util; - -import com.mongodb.BasicDBObject; -import com.mongodb.DBRefBase; -import org.bson.types.*; - -import java.text.SimpleDateFormat; -import java.util.*; -import java.util.regex.Pattern; - -public class JSONSerializersTest extends com.mongodb.util.TestCase { - - @org.testng.annotations.Test(groups = {"basic"}) - public void testBinaryCodecs() { - - //Base64 encoding Test - Base64Codec encoder = new Base64Codec(); - assertEquals(encoder.encode("Base64 Serialization Test".getBytes()).toString(), - "QmFzZTY0IFNlcmlhbGl6YXRpb24gVGVzdA=="); - - // test legacy serialization - ObjectSerializer serializer = JSONSerializers.getLegacy(); - StringBuilder buf = new StringBuilder(); - serializer.serialize("Base64 Serialization Test".getBytes(), buf); - assertEquals(buf.toString(), ""); - } - - @org.testng.annotations.Test(groups = {"basic"}) - public void testLegacySerialization() { - ObjectSerializer serializer = JSONSerializers.getLegacy(); - - BasicDBObject testObj = new BasicDBObject(); - - // test ARRAY - BasicDBObject[] a = {new BasicDBObject("object1", "value1"), new BasicDBObject("object2", "value2")}; - testObj.put("array", a); - StringBuilder buf = new StringBuilder(); - serializer.serialize(a, buf); - assertEquals(buf.toString(), "[ { \"object1\" : \"value1\"} , { \"object2\" : \"value2\"}]"); - - // test BINARY - byte b[] = {1,2,3,4}; - testObj = new BasicDBObject("binary", new org.bson.types.Binary(b)); - buf = new StringBuilder(); - serializer.serialize(testObj, buf); - assertEquals(buf.toString(), "{ \"binary\" : }"); - - // test BOOLEAN - testObj = new BasicDBObject("boolean", new Boolean(true)); - buf = new StringBuilder(); - serializer.serialize(testObj, buf); - assertEquals(buf.toString(), "{ \"boolean\" : true}"); - - // test BSON_TIMESTAMP, - testObj = new BasicDBObject("timestamp", new BSONTimestamp()); - buf = new StringBuilder(); - serializer.serialize(testObj, buf); - assertEquals(buf.toString(), "{ \"timestamp\" : { \"$ts\" : 0 , \"$inc\" : 0}}"); - - // test BYTE_ARRAY - testObj = new BasicDBObject("byte_array", b); - buf = new StringBuilder(); - serializer.serialize(testObj, buf); - assertEquals(buf.toString(), "{ \"byte_array\" : }"); - - // test CODE - testObj = new BasicDBObject("code", new Code("test code")); - buf = new StringBuilder(); - serializer.serialize(testObj, buf); - assertEquals(buf.toString(), "{ \"code\" : { \"$code\" : \"test code\"}}"); - - // test CODE_W_SCOPE - testObj = new BasicDBObject("scope", "scope of code"); - CodeWScope codewscope = new CodeWScope("test code", testObj); - buf = new StringBuilder(); - serializer.serialize(codewscope, buf); - assertEquals(buf.toString(), "{ \"$code\" : \"test code\" , \"$scope\" : { \"scope\" : \"scope of code\"}}"); - - // test DATE - Date d = new Date(); - SimpleDateFormat format = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss.SSS'Z'"); - format.setCalendar(new GregorianCalendar(new SimpleTimeZone(0, "GMT"))); - buf = new StringBuilder(); - serializer.serialize(d, buf); - assertEquals(buf.toString(), "{ \"$date\" : \""+format.format(d)+"\"}"); - - // test DB_OBJECT implicit in preceding tests - - // test DB_REF_BASE - DBRefBase dbref = new DBRefBase(null, "test.test", "4d83ab59a39562db9c1ae2af"); - buf = new StringBuilder(); - serializer.serialize(dbref, buf); - assertEquals(buf.toString(), "{ \"$ref\" : \"test.test\" , \"$id\" : \"4d83ab59a39562db9c1ae2af\"}"); - - // test ITERABLE - BasicBSONList testList = new BasicBSONList(); - testList.add(new BasicDBObject("key1", "val1")); - testList.add(new BasicDBObject("key2", "val2")); - buf = new StringBuilder(); - serializer.serialize(testList, buf); - assertEquals(buf.toString(), "[ { \"key1\" : \"val1\"} , { \"key2\" : \"val2\"}]"); - - // test MAP - TreeMap testMap = new TreeMap(); - testMap.put("key1", "val1"); - testMap.put("key2", "val2"); - buf = new StringBuilder(); - serializer.serialize(testMap, buf); - assertEquals(buf.toString(), "{ \"key1\" : \"val1\" , \"key2\" : \"val2\"}"); - - // test MAXKEY - buf = new StringBuilder(); - serializer.serialize(new MaxKey(), buf); - assertEquals(buf.toString(), "{ \"$maxKey\" : 1}"); - - // test MINKEY - buf = new StringBuilder(); - serializer.serialize(new MinKey(), buf); - assertEquals(buf.toString(), "{ \"$minKey\" : 1}"); - - // test NULL - buf = new StringBuilder(); - serializer.serialize(null, buf); - assertEquals(buf.toString(), " null "); - - // test NUMBER - Random rand = new Random(); - long val = rand.nextLong(); - Long longVal = new Long(val); - buf = new StringBuilder(); - serializer.serialize(longVal, buf); - assertEquals(buf.toString(), val); - - // test OBJECT_ID - buf = new StringBuilder(); - serializer.serialize(new ObjectId("4d83ab3ea39562db9c1ae2ae"), buf); - assertEquals(buf.toString(), "{ \"$oid\" : \"4d83ab3ea39562db9c1ae2ae\"}"); - - // test PATTERN - buf = new StringBuilder(); - serializer.serialize(Pattern.compile("test"), buf); - assertEquals(buf.toString(), "{ \"$regex\" : \"test\"}"); - - // test STRING - buf = new StringBuilder(); - serializer.serialize("test string", buf); - assertEquals(buf.toString(), "\"test string\""); - - // test UUID; - UUID uuid = UUID.randomUUID(); - buf = new StringBuilder(); - serializer.serialize(uuid, buf); - assertEquals( buf.toString(), "{ \"$uuid\" : \""+uuid.toString()+"\"}"); - } - - @org.testng.annotations.Test(groups = {"basic"}) - public void testStrictSerialization() { - ObjectSerializer serializer = JSONSerializers.getStrict(); - - // test BINARY - byte b[] = {1,2,3,4}; - Base64Codec encoder = new Base64Codec(); - String base64 = encoder.encode(b); - StringBuilder buf = new StringBuilder(); - serializer.serialize(new Binary(b), buf); - assertEquals(buf.toString(), "{ \"$binary\" : \""+base64+"\" , \"$type\" : 0}"); - - // test BSON_TIMESTAMP - buf = new StringBuilder(); - serializer.serialize(new BSONTimestamp(123, 456), buf); - assertEquals(buf.toString(), "{ \"$timestamp\" : { \"t\" : 123 , \"i\" : 456}}"); - - // test BYTE_ARRAY - buf = new StringBuilder(); - serializer.serialize(b, buf); - assertEquals(buf.toString(), "{ \"$binary\" : \""+base64+"\" , \"$type\" : 0}"); - - // test DATE - Date d = new Date(); - buf = new StringBuilder(); - serializer.serialize(d, buf); - assertEquals(buf.toString(), "{ \"$date\" : "+(d.getTime())+"}"); - } - - @org.testng.annotations.Test(groups = {"basic"}) - public void testSerializationByAncestry() { - ClassMapBasedObjectSerializer serializer = new ClassMapBasedObjectSerializer(); - - // by superclass - serializer.addObjectSerializer( - Object.class, - new AbstractObjectSerializer(){ - - @Override - public void serialize(Object obj, StringBuilder buf) { - buf.append("serialized as Object class"); - } - } - ); - - // interface - serializer.addObjectSerializer(java.util.List.class, - new AbstractObjectSerializer() { - - @Override - public void serialize(Object obj, StringBuilder buf) { - buf.append(obj.toString()); - } - - }); - - ArrayList list = new ArrayList(); - list.add("val1"); - StringBuilder buf = new StringBuilder(); - serializer.serialize(list, buf); - assertEquals(buf.toString(), "[val1]"); - - CodeWScope code = new CodeWScope("code_test", null); - buf = new StringBuilder(); - serializer.serialize(code, buf); - assertEquals(buf.toString(), "serialized as Object class"); - } -} diff --git a/src/test/com/mongodb/util/JSONTest.java b/src/test/com/mongodb/util/JSONTest.java deleted file mode 100644 index 288edf08060..00000000000 --- a/src/test/com/mongodb/util/JSONTest.java +++ /dev/null @@ -1,385 +0,0 @@ -// JSONTest.java - -/** - * Copyright (C) 2008 10gen Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.mongodb.util; - -import com.mongodb.BasicDBObject; -import com.mongodb.BasicDBObjectBuilder; -import com.mongodb.DBObject; -import com.mongodb.DBRef; -import org.bson.BSON; -import org.bson.BasicBSONObject; -import org.bson.types.BSONTimestamp; -import org.bson.types.Code; -import org.bson.types.CodeWScope; -import org.bson.types.ObjectId; - -import java.text.ParseException; -import java.text.SimpleDateFormat; -import java.util.Date; -import java.util.GregorianCalendar; -import java.util.SimpleTimeZone; -import java.util.UUID; -import java.util.regex.Pattern; - -public class JSONTest extends com.mongodb.util.TestCase { - - @org.testng.annotations.Test(groups = {"basic"}) - public void testSerializationMethods(){ - - // basic test of each of JSON class' serialization methods - String json = "{ \"x\" : \"basic test\"}"; - StringBuilder buf = new StringBuilder(); - Object obj = JSON.parse(json); - - assertEquals(JSON.serialize(obj), json); - } - - @org.testng.annotations.Test(groups = {"basic"}) - public void testNumbers(){ - assertEquals(JSON.serialize(JSON.parse("{'x' : 5 }")), "{ \"x\" : 5}"); - assertEquals(JSON.serialize(JSON.parse("{'x' : 5.0 }")), "{ \"x\" : 5.0}"); - assertEquals(JSON.serialize(JSON.parse("{'x' : 0 }")), "{ \"x\" : 0}"); - assertEquals(JSON.serialize(JSON.parse("{'x' : 0.0 }")), "{ \"x\" : 0.0}"); - assertEquals(JSON.serialize(JSON.parse("{'x' : 500 }")), "{ \"x\" : 500}"); - assertEquals(JSON.serialize(JSON.parse("{'x' : 500.0 }")), "{ \"x\" : 500.0}"); - assertEquals(JSON.serialize(JSON.parse("{'x' : 0.500 }")), "{ \"x\" : 0.5}"); - assertEquals(JSON.serialize(JSON.parse("{'x' : 5. }")), "{ \"x\" : 5.0}"); - assertEquals(JSON.serialize(JSON.parse("{'x' : 5.0e+1 }")), "{ \"x\" : 50.0}"); - assertEquals(JSON.serialize(JSON.parse("{'x' : 5.0E-1 }")), "{ \"x\" : 0.5}"); - } - - @org.testng.annotations.Test(groups = {"basic"}) - public void testLongValues() { - Long bigVal = Integer.MAX_VALUE + 1L; - String test = String.format("{ \"x\" : %d}", bigVal); - assertEquals(JSON.serialize(JSON.parse(test)), test); - - Long smallVal = Integer.MIN_VALUE - 1L; - String test2 = String.format("{ \"x\" : %d}", smallVal); - assertEquals(JSON.serialize(JSON.parse(test2)), test2); - - try{ - JSON.parse("{\"ReallyBigNumber\": 10000000000000000000 }"); - fail("JSONParseException should have been thrown"); - }catch(JSONParseException e) { - // fall through - } - } - - @org.testng.annotations.Test(groups = {"basic"}) - public void testSimple() { - assertEquals(JSON.serialize(JSON.parse("{'csdf' : true}")), "{ \"csdf\" : true}"); - assertEquals(JSON.serialize(JSON.parse("{'csdf' : false}")), "{ \"csdf\" : false}"); - assertEquals(JSON.serialize(JSON.parse("{'csdf' : null}")), "{ \"csdf\" : null }"); - } - - @org.testng.annotations.Test(groups = {"basic"}) - public void testString() { - assertEquals(JSON.serialize(JSON.parse("{'csdf' : \"foo\"}")), "{ \"csdf\" : \"foo\"}") ; - assertEquals(JSON.serialize(JSON.parse("{'csdf' : \'foo\'}")), "{ \"csdf\" : \"foo\"}") ; - assertEquals(JSON.serialize(JSON.parse("{'csdf' : \"a\\\"b\"}")), "{ \"csdf\" : \"a\\\"b\"}"); - assertEquals(JSON.serialize(JSON.parse("{\n\t\"id\":\"1689c12eb234c54a84ebd100\",\n}")), - "{ \"id\" : \"1689c12eb234c54a84ebd100\"}"); - } - - @org.testng.annotations.Test(groups = {"basic"}) - public void testArray() { - assertEquals(JSON.serialize(JSON.parse("{'csdf' : [\"foo\"]}")), "{ \"csdf\" : [ \"foo\"]}") ; - assertEquals(JSON.serialize(JSON.parse("{'csdf' : [3, 5, \'foo\', null]}")), "{ \"csdf\" : [ 3 , 5 , \"foo\" , null ]}") ; - assertEquals(JSON.serialize(JSON.parse("{'csdf' : [3.0, 5.0, \'foo\', null]}")), "{ \"csdf\" : [ 3.0 , 5.0 , \"foo\" , null ]}") ; - assertEquals(JSON.serialize(JSON.parse("{'csdf' : [[],[[]],false]}")), "{ \"csdf\" : [ [ ] , [ [ ]] , false]}"); - } - - @org.testng.annotations.Test(groups = {"basic"}) - public void testObject() { - assertEquals(JSON.serialize(JSON.parse("{'csdf' : {}}")), "{ \"csdf\" : { }}") ; - assertEquals(JSON.serialize(JSON.parse("{'csdf' : {\"foo\":\"bar\"}}")), "{ \"csdf\" : { \"foo\" : \"bar\"}}") ; - assertEquals(JSON.serialize(JSON.parse("{'csdf' : {\'hi\':{\'hi\':[{}]}}}")), "{ \"csdf\" : { \"hi\" : { \"hi\" : [ { }]}}}"); - } - - @org.testng.annotations.Test(groups = {"basic"}) - public void testMulti() { - assertEquals(JSON.serialize(JSON.parse("{\'\' : \"\", \"34\" : -52.5}")), "{ \"\" : \"\" , \"34\" : -52.5}") ; - } - - @org.testng.annotations.Test(groups = {"basic"}) - public void testUnicode() { - assertEquals(JSON.serialize(JSON.parse("{'x' : \"hi\\u0020\"}")),"{ \"x\" : \"hi \"}") ; - assertEquals(JSON.serialize(JSON.parse("{ \"x\" : \"\\u0E01\\u2702\\uF900\"}")), "{ \"x\" : \"\u0E01\u2702\uF900\"}"); - assertEquals(JSON.serialize(JSON.parse("{ \"x\" : \"foo\\u0020bar\"}")), "{ \"x\" : \"foo bar\"}"); - } - - @org.testng.annotations.Test(groups = {"basic"}) - public void testBin() { - byte b[] = {'a', 'b', 0, 'd'}; - DBObject obj = BasicDBObjectBuilder.start().add("b", b).get(); - assertEquals(JSON.serialize(obj), "{ \"b\" : }"); - } - - - @org.testng.annotations.Test(groups = {"basic"}) - public void testErrors(){ - boolean threw = false; - try { - JSON.parse("{\"x\" : \""); - } - catch(JSONParseException e) { - threw = true; - } - assertEquals(threw, true); - threw = false; - try { - JSON.parse("{\"x\" : \"\\"); - } - catch(JSONParseException e) { - threw = true; - } - assertEquals(threw, true); - threw = false; - try { - JSON.parse("{\"x\" : 5.2"); - } - catch(JSONParseException e) { - threw = true; - } - assertEquals(threw, true); - threw = false; - try { - JSON.parse("{\"x\" : 5"); - } - catch(JSONParseException e) { - threw = true; - } - assertEquals(threw, true); - threw = false; - try { - JSON.parse("{\"x\" : 5,"); - } - catch(JSONParseException e) { - threw = true; - } - assertEquals(threw, true); - threw = false; - } - - @org.testng.annotations.Test(groups = {"basic"}) - public void testBasic(){ - assertEquals( JSON.serialize(JSON.parse("{}")), "{ }"); - assertEquals( JSON.parse(""), null ); - assertEquals( JSON.parse(" "), null); - assertEquals( JSON.parse(null), null); - - boolean threw = false; - try { - JSON.parse("{"); - } - catch(JSONParseException e) { - threw = true; - } - assertEquals(threw, true); - threw = false; - - try { - JSON.parse("}"); - } - catch(JSONParseException e) { - threw = true; - } - assertEquals(threw, true); - threw = false; - - try { - JSON.parse("{{}"); - } - catch(JSONParseException e) { - threw = true; - } - assertEquals(threw, true); - threw = false; - - try { - JSON.parse("4"); - } - catch(JSONParseException e) { - threw = true; - } - assertEquals(threw, false); - threw = false; - - assertEquals( 4 , JSON.parse( "4" ) ); - } - - @org.testng.annotations.Test - public void testNumbers2(){ - DBObject x = new BasicDBObject( "x" , 123 ); - assertEquals( x , JSON.parse( x.toString() ) ); - - x = new BasicDBObject( "x" , 123123123123L ); - assertEquals( x , JSON.parse( x.toString() ) ); - - x = new BasicDBObject( "x" , 123123123 ); - assertEquals( x , JSON.parse( x.toString() ) ); - } - - void _escapeChar( String s ){ - String thingy = "va" + s + "lue"; - DBObject x = new BasicDBObject( "name" , thingy ); - x = (DBObject)JSON.parse( x.toString() ); - assertEquals( thingy , x.get( "name" ) ); - - thingy = "va" + s + s + s + "lue" + s; - x = new BasicDBObject( "name" , thingy ); - x = (DBObject)JSON.parse( x.toString() ); - assertEquals( thingy , x.get( "name" ) ); - } - - - - @org.testng.annotations.Test - public void testEscape1(){ - String raw = "a\nb"; - - DBObject x = new BasicDBObject( "x" , raw ); - assertEquals( "\"a\\nb\"" , JSON.serialize( raw ) ); - assertEquals( x , JSON.parse( x.toString() ) ); - assertEquals( raw , ((DBObject)JSON.parse( x.toString() ) ).get( "x" ) ); - - x = new BasicDBObject( "x" , "a\nb\bc\td\re" ); - assertEquals( x , JSON.parse( x.toString() ) ); - - - String thingy = "va\"lue"; - x = new BasicDBObject( "name" , thingy ); - x = (DBObject)JSON.parse( x.toString() ); - assertEquals( thingy , x.get( "name" ) ); - - thingy = "va\\lue"; - x = new BasicDBObject( "name" , thingy ); - x = (DBObject)JSON.parse( x.toString() ); - assertEquals( thingy , x.get( "name" ) ); - - assertEquals( "va/lue" , (String)JSON.parse("\"va\\/lue\"") ); - assertEquals( "value" , (String)JSON.parse("\"va\\lue\"") ); - assertEquals( "va\\lue" , (String)JSON.parse("\"va\\\\lue\"") ); - - _escapeChar( "\t" ); - _escapeChar( "\b" ); - _escapeChar( "\n" ); - _escapeChar( "\r" ); - _escapeChar( "\'" ); - _escapeChar( "\"" ); - _escapeChar( "\\" ); - } - - @org.testng.annotations.Test - public void testPattern() { - String x = "^Hello$"; - String serializedPattern = - "{ \"$regex\" : \"" + x + "\" , \"$options\" : \"" + "i\"}"; - - Pattern pattern = Pattern.compile( x , Pattern.CASE_INSENSITIVE); - assertEquals( serializedPattern, JSON.serialize(pattern)); - - BasicDBObject a = new BasicDBObject( "x" , pattern ); - assertEquals( "{ \"x\" : " + serializedPattern + "}" , a.toString() ); - - DBObject b = (DBObject)JSON.parse( a.toString() ); - assertEquals( b.get("x").getClass(), Pattern.class ); - assertEquals( a.toString() , b.toString() ); - } - - @org.testng.annotations.Test - public void testRegexNoOptions() { - String x = "^Hello$"; - String serializedPattern = - "{ \"$regex\" : \"" + x + "\"}"; - - Pattern pattern = Pattern.compile( x ); - assertEquals( serializedPattern, JSON.serialize(pattern)); - - BasicDBObject a = new BasicDBObject( "x" , pattern ); - assertEquals( "{ \"x\" : " + serializedPattern + "}" , a.toString() ); - - DBObject b = (DBObject)JSON.parse( a.toString() ); - assertEquals( b.get("x").getClass(), Pattern.class ); - assertEquals( a.toString() , b.toString() ); - } - - @org.testng.annotations.Test - public void testObjectId() { - ObjectId oid = new ObjectId(new Date()); - - String serialized = JSON.serialize(oid); - assertEquals("{ \"$oid\" : \"" + oid + "\"}", serialized); - - ObjectId oid2 = (ObjectId)JSON.parse(serialized); - assertEquals(oid, oid2); - } - - @org.testng.annotations.Test - public void testDate() { - Date d = new Date(); - SimpleDateFormat format = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss.SSS'Z'"); - format.setCalendar(new GregorianCalendar(new SimpleTimeZone(0, "GMT"))); - String formattedDate = format.format(d); - - String serialized = JSON.serialize(d); - assertEquals("{ \"$date\" : \"" + formattedDate + "\"}", serialized); - - Date d2 = (Date)JSON.parse(serialized); - assertEquals(d.toString(), d2.toString()); - assertTrue(d.equals(d2)); - } - - @org.testng.annotations.Test - public void testJSONEncoding() throws ParseException { - String json = "{ 'str' : 'asdfasd' , 'long' : 123123123123 , 'int' : 5 , 'float' : 0.4 , 'bool' : false , 'date' : { '$date' : '2011-05-18T18:56:00Z'} , 'pat' : { '$regex' : '.*' , '$options' : ''} , 'oid' : { '$oid' : '4d83ab3ea39562db9c1ae2ae'} , 'ref' : { '$ref' : 'test.test' , '$id' : { '$oid' : '4d83ab59a39562db9c1ae2af'}} , 'code' : { '$code' : 'asdfdsa'} , 'codews' : { '$code' : 'ggggg' , '$scope' : { }} , 'ts' : { '$ts' : 1300474885 , '$inc' : 10} , 'null' : null, 'uuid' : { '$uuid' : '60f65152-6d4a-4f11-9c9b-590b575da7b5' }}"; - BasicDBObject a = (BasicDBObject) JSON.parse(json); - assert (a.get("str").equals("asdfasd")); - assert (a.get("int").equals(5)); - assert (a.get("long").equals(123123123123L)); - assert (a.get("float").equals(0.4d)); - assert (a.get("bool").equals(false)); - SimpleDateFormat format = - new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss'Z'"); - format.setCalendar(new GregorianCalendar(new SimpleTimeZone(0, "GMT"))); - assert (a.get("date").equals(format.parse("2011-05-18T18:56:00Z"))); - Pattern pat = (Pattern) a.get("pat"); - Pattern pat2 = Pattern.compile(".*", BSON.regexFlags("")); - assert (pat.pattern().equals(pat2.pattern())); - assert (pat.flags() == (pat2.flags())); - ObjectId oid = (ObjectId) a.get("oid"); - assert (oid.equals(new ObjectId("4d83ab3ea39562db9c1ae2ae"))); - DBRef ref = (DBRef) a.get("ref"); - assert (ref.equals(new DBRef(null, "test.test", new ObjectId("4d83ab59a39562db9c1ae2af")))); - assert (a.get("code").equals(new Code("asdfdsa"))); - assert (a.get("codews").equals(new CodeWScope("ggggg", new BasicBSONObject()))); - assert (a.get("ts").equals(new BSONTimestamp(1300474885, 10))); - assert (a.get("uuid").equals(UUID.fromString("60f65152-6d4a-4f11-9c9b-590b575da7b5"))); - String json2 = JSON.serialize(a); - BasicDBObject b = (BasicDBObject) JSON.parse(json2); - a.equals(b); - assert (a.equals(b)); - } - - public static void main( String args[] ){ - (new JSONTest()).runConsole(); - } - -} diff --git a/src/test/com/mongodb/util/SimplePoolTest.java b/src/test/com/mongodb/util/SimplePoolTest.java deleted file mode 100644 index 232eea98cae..00000000000 --- a/src/test/com/mongodb/util/SimplePoolTest.java +++ /dev/null @@ -1,217 +0,0 @@ -/** - * Copyright (C) 2008 - 2012 10gen Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.mongodb.util; - -import java.util.concurrent.Callable; -import java.util.concurrent.CountDownLatch; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Executors; -import java.util.concurrent.Future; - -public class SimplePoolTest extends com.mongodb.util.TestCase { - - class MyPool extends SimplePool { - - MyPool( int size ){ - super( "blah" , size ); - } - - public Integer createNew(){ - if (_throwError) - throw new OutOfMemoryError(); - - if (_returnNull) { - return null; - } - - return _num++; - } - - int _num = 0; - boolean _throwError; - boolean _returnNull; - } - - @org.testng.annotations.Test - public void testBasic1() throws InterruptedException { - MyPool p = new MyPool( 10 ); - - int a = p.get(); - assertEquals( 0 , a ); - - int b = p.get(); - assertEquals( 1 , b ); - - p.done( a ); - a = p.get(); - assertEquals( 0 , a ); - } - - @org.testng.annotations.Test - public void testMax1() throws InterruptedException { - MyPool p = new MyPool( 10 ); - - int a = p.get(); - assertEquals( 0 , a ); - - int b = p.get(); - assertEquals( 1 , b ); - - // TODO: Fix this test -// assertNull( p.get( 0 ) ); - } - - @org.testng.annotations.Test - public void testMax2() throws InterruptedException { - MyPool p = new MyPool( 10 ); - - int a = p.get(); - assertEquals( 0 , a ); - - int b = p.get(); - assertEquals( 1 , b ); - - assertEquals( 2 , (int)p.get( -1 ) ); - } - - @org.testng.annotations.Test - public void testMax3() throws InterruptedException { - MyPool p = new MyPool( 10 ); - - int a = p.get(); - assertEquals( 0 , a ); - - int b = p.get(); - assertEquals( 1 , b ); - - assertEquals( 2 , (int)p.get( 1 ) ); - } - - @org.testng.annotations.Test - public void testThrowErrorFromCreate() throws InterruptedException { - MyPool p = new MyPool( 1 ); - p._throwError = true; - - try { - p.get(); - fail("Should have thrown"); - } catch (OutOfMemoryError e) { - // expected - } - - p._throwError = false; - - // now make sure there is still a permit left - Integer a = p.get(0); - assertEquals( Integer.valueOf(0) , a ); - } - - @org.testng.annotations.Test - public void testCouldCreate() throws InterruptedException { - SimplePool p = new SimplePool("pool", 2) { - @Override - protected Integer createNew() { - return _num++; - } - - @Override - protected int pick(int recommended, boolean couldCreate) { - if (couldCreate) { - return -1; - } - return recommended; - } - int _num = 1; - }; - - Integer one = p.get(); - assertEquals(Integer.valueOf(1), one); - p.done(one); - - Integer two = p.get(); - assertEquals(Integer.valueOf(2), two); - - one = p.get(); - assertEquals(Integer.valueOf(1), one); - - } - - @org.testng.annotations.Test - public void testReturnNullFromCreate() throws InterruptedException { - MyPool p = new MyPool( 1 ); - p._returnNull = true; - - try { - p.get(); - fail("Should have thrown"); - } catch (IllegalStateException e) { - // expected - } - - p._returnNull = false; - - // now make sure there is still a permit left - Integer a = p.get(0); - assertEquals( Integer.valueOf(0) , a ); - } - - @org.testng.annotations.Test() - public void testThrowsInterruptedException() throws InterruptedException { - final MyPool p = new MyPool(1); - try { - p.get(); - } catch (InterruptedException e) { - fail("Should not throw InterruptedException here"); - } - - ExecutorService executor = Executors.newSingleThreadExecutor(); - final CountDownLatch ready = new CountDownLatch(1); - - Callable callable = new Callable() { - @Override - public Boolean call() { - try { - ready.countDown(); - p.get(); - return false; - } catch (InterruptedException e) { - // return true if interrupted - return true; - } - } - }; - Future future = executor.submit(callable); - - ready.await(); - // Interrupt the thread - executor.shutdownNow(); - - try { - assertEquals(true, future.get()); - } catch (InterruptedException e) { - fail("Should not happen, since this thread was not interrupted"); - } catch (ExecutionException e) { - fail("Should not happen"); - } - } - - public static void main( String args[] ){ - SimplePoolTest t = new SimplePoolTest(); - t.runConsole(); - } -} diff --git a/src/test/com/mongodb/util/TestCase.java b/src/test/com/mongodb/util/TestCase.java deleted file mode 100644 index 283af345932..00000000000 --- a/src/test/com/mongodb/util/TestCase.java +++ /dev/null @@ -1,380 +0,0 @@ -// TestCase.java - -/** - * Copyright (C) 2008 10gen Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.mongodb.util; - -import com.mongodb.BasicDBObject; -import com.mongodb.CommandResult; -import com.mongodb.Mongo; -import com.mongodb.MongoClient; -import com.mongodb.WriteConcern; -import org.testng.annotations.AfterClass; - -import java.io.BufferedReader; -import java.io.InputStreamReader; -import java.lang.reflect.InvocationTargetException; -import java.lang.reflect.Member; -import java.lang.reflect.Method; -import java.net.UnknownHostException; -import java.util.ArrayList; -import java.util.List; - -public class TestCase extends MyAsserts { - - static class Test { - Test( Object o , Method m ){ - _o = o; - _m = m; - } - - Result run(){ - try { - _m.invoke( _o ); - return new Result( this ); - } - catch ( IllegalAccessException e ){ - return new Result( this , e ); - } - catch ( InvocationTargetException ite ){ - return new Result( this , ite.getTargetException() ); - } - } - - public String toString(){ - String foo = _o.getClass().getName() + "." + _m.getName(); - if ( _name == null ) - return foo; - return _name + "(" + foo + ")"; - } - - protected String _name = null; - - final Object _o; - final Method _m; - } - - static class Result { - Result( Test t ){ - this( t , null ); - } - - Result( Test t , Throwable error ){ - _test = t; - _error = error; - } - - boolean ok(){ - return _error == null; - } - - public String toString(){ - StringBuilder buf = new StringBuilder(); - buf.append( _test ); - Throwable error = _error; - while ( error != null ){ - buf.append( "\n\t" + error + "\n" ); - for ( StackTraceElement ste : error.getStackTrace() ){ - buf.append( "\t\t" + ste + "\n" ); - } - error = error.getCause(); - } - return buf.toString(); - } - - final Test _test; - final Throwable _error; - } - - - /** - * this is for normal class tests - */ - - public TestCase(){ - this( null ); - } - - public TestCase( String name ) { - if (staticMongoClient == null) { - try { - staticMongoClient = new MongoClient(); - staticMongoClient.setWriteConcern(WriteConcern.UNACKNOWLEDGED); - } catch (UnknownHostException e) { - throw new RuntimeException(e); - } - } - cleanupMongo = staticMongoClient; - - for ( Method m : getClass().getMethods() ){ - - if ( ! m.getName().startsWith( "test" ) ) - continue; - - if ( ( m.getModifiers() & Member.PUBLIC ) > 0 ) - continue; - - Test t = new Test( this , m ); - t._name = name; - _tests.add( t ); - - } - } - - public TestCase( Object o , String m ) - throws NoSuchMethodException { - this( o , o.getClass().getDeclaredMethod( m ) ); - } - - public TestCase( Object o , Method m ){ - _tests.add( new Test( o , m ) ); - } - - public void add( TestCase tc ){ - _tests.addAll( tc._tests ); - } - - public String cleanupDB = null; - public Mongo cleanupMongo = null; - - private static MongoClient staticMongoClient; - - @AfterClass - public void cleanup() { - if (cleanupMongo != null) { - if (cleanupDB != null) { - cleanupMongo.dropDatabase(cleanupDB); - } - } - } - - /** - * @return true if everything succeeds - */ - public boolean runConsole(){ - List errors = new ArrayList(); - List fails = new ArrayList(); - - System.out.println( "Num Tests : " + _tests.size() ); - System.out.println( "----" ); - - for ( Test t : _tests ){ - Result r = t.run(); - if ( r.ok() ){ - System.out.print("."); - continue; - } - - System.out.print( "x" ); - - if ( r._error instanceof MyAssert ) - fails.add( r ); - else - errors.add( r ); - } - cleanup(); - System.out.println( "\n----" ); - - int pass = _tests.size() - ( errors.size() + fails.size() ); - - System.out.println( "Passes : " + pass + " / " + _tests.size() ); - System.out.println( "% Pass : " + ( ((double)pass*100) / _tests.size() ) ); - if ( pass == _tests.size() ){ - System.out.println( "SUCCESS" ); - return true; - } - - System.err.println( "Num Pass : " + ( _tests.size() - ( errors.size() + fails.size() ) ) ); - System.err.println( "Num Erros : " + ( errors.size() ) ); - System.err.println( "Num Fails : " + ( fails.size() ) ); - - System.err.println( "---------" ); - System.err.println( "ERRORS" ); - for ( Result r : errors ) - System.err.println( r ); - - System.err.println( "---------" ); - System.err.println( "FAILS" ); - for ( Result r : fails ) - System.err.println( r ); - - return false; - } - - public String toString(){ - return "TestCase numCase:" + _tests.size(); - } - - final List _tests = new ArrayList(); - - protected static void run( String args[] ){ - Args a = new Args( args ); - - boolean foundMe = false; - String theClass = null; - for ( StackTraceElement ste : Thread.currentThread().getStackTrace() ){ - if ( foundMe ){ - theClass = ste.getClassName(); - break; - } - - if ( ste.getClassName().equals( "com.mongodb.util.TestCase" ) && - ste.getMethodName().equals( "run" ) ) - foundMe = true; - } - - if ( theClass == null ) - throw new RuntimeException( "something is broken" ); - - try { - TestCase tc = (TestCase) Class.forName( theClass ).newInstance(); - - if ( a.getOption( "m" ) != null ) - tc = new TestCase( tc , a.getOption( "m" ) ); - - tc.runConsole(); - } - catch ( Exception e ){ - throw new RuntimeException( e ); - } - } - - /** - * - * @param version must be a major version, e.g. 1.8, 2,0, 2.2 - * @return true if server is at least specified version - */ - protected boolean serverIsAtLeastVersion(double version) { - String serverVersion = (String) cleanupMongo.getDB("admin").command("serverStatus").get("version"); - return Double.parseDouble(serverVersion.substring(0, 3)) >= version; - } - - /** - * - * @param mongo the connection - * @return true if connected to a standalone server - */ - protected boolean isStandalone(Mongo mongo) { - return runReplicaSetStatusCommand(mongo) == null; - } - - @SuppressWarnings({"unchecked"}) - protected String getPrimaryAsString(Mongo mongo) { - return getMemberNameByState(mongo, "primary"); - } - - @SuppressWarnings({"unchecked"}) - protected String getASecondaryAsString(Mongo mongo) { - return getMemberNameByState(mongo, "secondary"); - } - - @SuppressWarnings({"unchecked"}) - protected String getMemberNameByState(Mongo mongo, String stateStrToMatch) { - CommandResult replicaSetStatus = runReplicaSetStatusCommand(mongo); - - for (final BasicDBObject member : (List) replicaSetStatus.get("members")) { - String hostnameAndPort = member.getString("name"); - if (!hostnameAndPort.contains(":")) - hostnameAndPort = hostnameAndPort + ":27017"; - - final String stateStr = member.getString("stateStr"); - - if (stateStr.equalsIgnoreCase(stateStrToMatch)) - return hostnameAndPort; - } - - throw new IllegalStateException("No member found in state " + stateStrToMatch); - } - - @SuppressWarnings("unchecked") - protected int getReplicaSetSize(Mongo mongo) { - int size = 0; - - CommandResult replicaSetStatus = runReplicaSetStatusCommand(mongo); - - for (final BasicDBObject member : (List) replicaSetStatus.get("members")) { - - final String stateStr = member.getString("stateStr"); - - if (stateStr.equals("PRIMARY") || stateStr.equals("SECONDARY")) - size++; - } - - return size; - } - - - protected CommandResult runReplicaSetStatusCommand(final Mongo pMongo) { - // Check to see if this is a replica set... if not, get out of here. - final CommandResult result = pMongo.getDB("admin").command(new BasicDBObject("replSetGetStatus", 1)); - - final String errorMsg = result.getErrorMessage(); - - if (errorMsg != null && errorMsg.indexOf("--replSet") != -1) { - System.err.println("---- SecondaryReadTest: This is not a replica set - not testing secondary reads"); - return null; - } - - return result; - } - - - - public static void main( String args[] ) - throws Exception { - - String dir = "src/test"; - if ( args != null && args.length > 0 ) - dir = args[0]; - - Process p = Runtime.getRuntime().exec( "find " + dir ); - BufferedReader in = new BufferedReader( new InputStreamReader( p.getInputStream() ) ); - - TestCase theTestCase = new TestCase(); - - String line; - while ( ( line = in.readLine() ) != null ){ - - if ( ! line.endsWith( "Test.java" ) ) { - continue; - } - - line = line.substring( "src/test/".length() ); - line = line.substring( 0 , line.length() - ".java".length() ); - line = line.replaceAll( "//+" , "/" ); - line = line.replace( '/' , '.' ); - - - try { - Class c = Class.forName( line ); - Object thing = c.newInstance(); - if ( ! ( thing instanceof TestCase ) ) - continue; - - System.out.println( line ); - - TestCase tc = (TestCase)thing; - theTestCase._tests.addAll( tc._tests ); - } - catch ( Exception e ){ - e.printStackTrace(); - } - } - - theTestCase.runConsole(); - } -} diff --git a/src/test/com/mongodb/util/TestNGListener.java b/src/test/com/mongodb/util/TestNGListener.java deleted file mode 100644 index 9cf97ec4175..00000000000 --- a/src/test/com/mongodb/util/TestNGListener.java +++ /dev/null @@ -1,135 +0,0 @@ -// TestNGListener.java - -/** - * Copyright (C) 2008 10gen Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.mongodb.util; - -import com.mongodb.BasicDBObject; -import com.mongodb.DBObject; -import com.mongodb.Mongo; -import com.mongodb.MongoClient; -import org.testng.ITestContext; -import org.testng.ITestResult; -import org.testng.TestListenerAdapter; - -import java.net.UnknownHostException; - -public class TestNGListener extends TestListenerAdapter { - - public void onConfigurationFailure(ITestResult itr){ - super.onConfigurationFailure( itr ); - _print( itr.getThrowable() ); - } - - public void onTestFailure(ITestResult tr) { - super.onTestFailure( tr ); - log("F"); - } - - public void onTestSkipped(ITestResult tr) { - super.onTestSkipped( tr ); - log("S"); - } - - public void onTestSuccess(ITestResult tr) { - super.onTestSuccess( tr ); - log("."); - } - - private void log(String string) { - System.out.print(string); - if ( ++_count % 40 == 0) { - System.out.println(""); - } - System.out.flush(); - } - - public void onFinish(ITestContext context){ - System.out.println(); - - for ( ITestResult r : context.getFailedTests().getAllResults() ){ - System.out.println(r); - System.out.println("Exception : "); - _print( r.getThrowable() ); - } - - try { - _recordResults( context ); - } catch (UnknownHostException e) { - e.printStackTrace(); - } - } - - private void _recordResults( ITestContext context ) throws UnknownHostException { - DBObject obj = new BasicDBObject(); - for( ITestResult r : context.getPassedTests().getAllResults() ) { - obj.put( (r.getTestClass().getName() + "." + r.getName()).replace('.', '_'), - r.getEndMillis()-r.getStartMillis() ); - } - obj.put( "total", context.getEndDate().getTime()-context.getStartDate().getTime() ); - obj.put( "time", System.currentTimeMillis() ); - - Mongo mongo = new MongoClient(); - try { - mongo.getDB( "results" ).getCollection( "testng" ).save( obj ); - } - catch( Exception e ) { - System.err.println( "\nUnable to save test results to the db." ); - e.printStackTrace(); - } finally { - mongo.close(); - } - } - - private void _print( Throwable t ){ - - int otcount = 0; - int jlrcount = 0; - - if (t == null) { - return; - } - - System.out.println("-" + t.toString()+ "-"); - - for ( StackTraceElement e : t.getStackTrace() ){ - if ( e.getClassName().startsWith( "org.testng.")) { - if (otcount++ == 0) { - System.out.println(" " + e + " (with others of org.testng.* omitted)"); - } - } - else if (e.getClassName().startsWith( "java.lang.reflect.") || e.getClassName().startsWith("sun.reflect.") ) { - if (jlrcount++ == 0) { - System.out.println(" " + e + " (with others of java.lang.reflect.* or sun.reflect.* omitted)"); - } - } - else { - System.out.println(" " + e ); - } - } - - if (t.getCause() != null) { - System.out.println("Caused By : "); - - _print(t.getCause()); - } - - System.out.println(); - } - - private int _count = 0; -} diff --git a/src/test/com/mongodb/util/UniqueListTest.java b/src/test/com/mongodb/util/UniqueListTest.java deleted file mode 100644 index c76b39c903c..00000000000 --- a/src/test/com/mongodb/util/UniqueListTest.java +++ /dev/null @@ -1,38 +0,0 @@ -/** - * Copyright (C) 2008 10gen Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.mongodb.util; - -import org.testng.annotations.Test; - -public class UniqueListTest extends com.mongodb.util.TestCase { - - @org.testng.annotations.Test - @SuppressWarnings("unchecked") - public void test1(){ - UniqueList l = new UniqueList(); - l.add( "a" ); - assertEquals( 1 , l.size() ); - l.add( "a" ); - assertEquals( 1 , l.size() ); - l.add( "b" ); - assertEquals( 2 , l.size() ); - } - - public static void main( String args[] ){ - (new UniqueListTest()).runConsole(); - } -} diff --git a/src/test/org/bson/BSONTest.java b/src/test/org/bson/BSONTest.java deleted file mode 100644 index 7df98ebb4d1..00000000000 --- a/src/test/org/bson/BSONTest.java +++ /dev/null @@ -1,356 +0,0 @@ -// BSONTest.java - -/** - * Copyright (C) 2008 10gen Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.bson; - -import static org.testng.Assert.assertNotEquals; - -import java.io.ByteArrayInputStream; -import java.io.IOException; -import java.util.ArrayList; -import java.util.Date; -import java.util.List; -import java.util.Vector; -import org.bson.io.BasicOutputBuffer; -import org.bson.io.OutputBuffer; -import org.bson.types.CodeWScope; -import org.testng.Assert; -import org.testng.annotations.Test; - -public class BSONTest extends Assert { - - - public BSONTest(){ - for ( int x = 8; x<2048; x*=2 ){ - StringBuilder buf = new StringBuilder(); - while ( buf.length() < x ) - buf.append( x ); - _data.add( buf.toString() ); - } - } - - - void _test( BSONObject o , int size , String hash ) - throws IOException { - BSONEncoder e = new BasicBSONEncoder(); - OutputBuffer buf = new BasicOutputBuffer(); - e.set( buf ); - e.putObject( o ); - assertEquals( size , buf.size() ); - assertEquals( hash , buf.md5() ); - e.done(); - - BSONDecoder d = new BasicBSONDecoder(); - BSONCallback cb = new BasicBSONCallback(); - int s = d.decode( new ByteArrayInputStream( buf.toByteArray() ) , cb ); - assertEquals( size , s ); - - OutputBuffer buf2 = new BasicOutputBuffer(); - e.set( buf2 ); - e.putObject( (BSONObject)cb.get() ); - assertEquals( size , buf2.size() ); - assertEquals( hash , buf2.md5() ); - - } - - @Test - public void testBasic1() - throws IOException { -// BSONObject o = new BasicBSONObject(); - _test( new BasicBSONObject( "x" , true ) , 9 , "6fe24623e4efc5cf07f027f9c66b5456" ); - - _test( new BasicBSONObject( "x" , null ) , 8 , "12d43430ff6729af501faf0638e68888" ); - _test( new BasicBSONObject( "x" , 5.2 ) , 16 , "aaeeac4a58e9c30eec6b0b0319d0dff2" ); - _test( new BasicBSONObject( "x" , "eliot" ), 18 , "331a3b8b7cbbe0706c80acdb45d4ebbe" ); - _test( new BasicBSONObject( "x" , 5.2 ).append( "y" , "truth" ).append( "z" , 1.1 ) , - 40 , "7c77b3a6e63e2f988ede92624409da58" ); - - _test( new BasicBSONObject( "a" , new BasicBSONObject( "b" , 1.1 ) ) , 24 , "31887a4b9d55cd9f17752d6a8a45d51f" ); - _test( new BasicBSONObject( "x" , 5.2 ).append( "y" , new BasicBSONObject( "a" , "eliot" ).append( "b" , true ) ).append( "z" , null ) , 44 , "b3de8a0739ab329e7aea138d87235205" ); - _test( new BasicBSONObject( "x" , 5.2 ).append( "y" , new Object[]{ "a" , "eliot" , "b" , true } ).append( "z" , null ) , 62 , "cb7bad5697714ba0cbf51d113b6a0ee8" ); - - _test( new BasicBSONObject( "x" , 4 ) , 12 , "d1ed8dbf79b78fa215e2ded74548d89d" ); - } - - @Test( expectedExceptions = IllegalArgumentException.class ) - public void testNullKeysFail() { - BSONEncoder e = new BasicBSONEncoder(); - OutputBuffer buf = new BasicOutputBuffer(); - e.set( buf ); - e.putObject( new BasicBSONObject( "foo\0bar","baz" ) ); - } - - @Test - public void testArray() - throws IOException { - _test( new BasicBSONObject( "x" , new int[]{ 1 , 2 , 3 , 4} ) , 41 , "e63397fe37de1349c50e1e4377a45e2d" ); - } - - @Test - public void testOB1(){ - BasicOutputBuffer buf = new BasicOutputBuffer(); - buf.write( "eliot".getBytes() ); - assertEquals( 5 , buf.getPosition() ); - assertEquals( 5 , buf.size() ); - - assertEquals( "eliot" , buf.asString() ); - - buf.setPosition( 2 ); - buf.write( "z".getBytes() ); - assertEquals( "elzot" , buf.asString() ); - - buf.seekEnd(); - buf.write( "foo".getBytes() ); - assertEquals( "elzotfoo" , buf.asString() ); - - buf.seekStart(); - buf.write( "bar".getBytes() ); - assertEquals( "barotfoo" , buf.asString() ); - - } - - @Test - public void testCode() - throws IOException{ - BSONObject scope = new BasicBSONObject( "x", 1 ); - CodeWScope c = new CodeWScope( "function() { x += 1; }" , scope ); - BSONObject code_object = new BasicBSONObject( "map" , c); - _test( code_object , 53 , "52918d2367533165bfc617df50335cbb" ); - } - - @Test - public void testBinary() - throws IOException{ - byte[] data = new byte[10000]; - for(int i=0; i<10000; i++) { - data[i] = 1; - } - BSONObject binary_object = new BasicBSONObject( "bin" , data); - _test( binary_object , 10015 , "1d439ba5b959ecfe297a7862bf95bc10" ); - } - - @Test - public void testOBBig1(){ - BasicOutputBuffer a = new BasicOutputBuffer(); - StringBuilder b = new StringBuilder(); - for ( String x : _data ){ - a.write( x.getBytes() ); - b.append( x ); - } - assertEquals( a.asString() , b.toString() ); - } - - @Test - public void testUTF8(){ - for ( int i=1; i<=Character.MAX_CODE_POINT; i++ ){ - - if ( ! Character.isValidCodePoint( i ) ) - continue; - - String orig = new String( Character.toChars( i ) ); - BSONObject a = new BasicBSONObject( orig , orig ); - BSONObject b = BSON.decode( BSON.encode( a ) ); - assertEquals( a , b ); - } - - } - - @Test - @SuppressWarnings("unchecked") - public void testCustomEncoders() - throws IOException{ - // If clearEncodingHooks isn't working the first test will fail. - Transformer tf = new TestDateTransformer(); - BSON.addEncodingHook( TestDate.class, tf ); - BSON.clearEncodingHooks(); - TestDate td = new TestDate( 2009 , 01 , 23 , 10 , 53 , 42 ); - BSONObject o = new BasicBSONObject( "date" , td ); - BSONEncoder e = new BasicBSONEncoder(); - BSONDecoder d = new BasicBSONDecoder(); - BSONCallback cb = new BasicBSONCallback(); - OutputBuffer buf = new BasicOutputBuffer(); - e.set( buf ); - boolean encodeFailed = false; - try { - e.putObject( o ); - } - catch ( IllegalArgumentException ieE ) { - encodeFailed = true; - } - assertTrue( encodeFailed, "Expected encoding to fail but it didn't." ); - // Reset the buffer - buf.seekStart(); - assertTrue( td instanceof TestDate ); - assertTrue( tf.transform( td ) instanceof java.util.Date, "Transforming a TestDate should yield a JDK Date" ); - - BSON.addEncodingHook( TestDate.class, tf ); - e.putObject( o ); - e.done(); - - d.decode( new ByteArrayInputStream( buf.toByteArray() ), cb ); - Object result = cb.get(); - assertTrue( result instanceof BSONObject, "Expected to retrieve a BSONObject but got '" + result.getClass() + "' instead." ); - BSONObject bson = (BSONObject) result; - assertNotNull( bson.get( "date" ) ); - assertTrue( bson.get( "date" ) instanceof java.util.Date ); - - // Check that the hooks registered - assertNotNull( BSON.getEncodingHooks( TestDate.class ) ); - Vector expect = new Vector( 1 ); - expect.add( tf ); - assertEquals( BSON.getEncodingHooks( TestDate.class ), expect ); - assertTrue( BSON.getEncodingHooks( TestDate.class ).contains( tf ) ); - BSON.removeEncodingHook( TestDate.class, tf ); - assertFalse( BSON.getEncodingHooks( TestDate.class ).contains( tf ) ); - } - - @Test - @SuppressWarnings({"deprecation", "unchecked"}) - public void testCustomDecoders() - throws IOException{ - // If clearDecodingHooks isn't working this whole test will fail. - Transformer tf = new TestDateTransformer(); - BSON.addDecodingHook( Date.class, tf ); - BSON.clearDecodingHooks(); - TestDate td = new TestDate( 2009 , 01 , 23 , 10 , 53 , 42 ); - Date dt = new Date( 2009 , 01 , 23 , 10 , 53 , 42 ); - BSONObject o = new BasicBSONObject( "date" , dt ); - BSONDecoder d = new BasicBSONDecoder(); - BSONEncoder e = new BasicBSONEncoder(); - BSONCallback cb = new BasicBSONCallback(); - OutputBuffer buf = new BasicOutputBuffer(); - e.set( buf ); - e.putObject( o ); - e.done(); - - d.decode( new ByteArrayInputStream( buf.toByteArray() ), cb ); - Object result = cb.get(); - assertTrue( result instanceof BSONObject, "Expected to retrieve a BSONObject but got '" + result.getClass() + "' instead." ); - BSONObject bson = (BSONObject) result; - assertNotNull( bson.get( "date" ) ); - assertTrue( bson.get( "date" ) instanceof java.util.Date ); - - BSON.addDecodingHook( Date.class, tf ); - - d.decode( new ByteArrayInputStream( buf.toByteArray() ), cb ); - bson = (BSONObject) cb.get(); - assertNotNull( bson.get( "date" ) ); - assertTrue( bson.get( "date" ) instanceof TestDate ); - assertEquals( bson.get( "date" ), td ); - - // Check that the hooks registered - assertNotNull( BSON.getDecodingHooks( Date.class ) ); - Vector expect = new Vector( 1 ); - expect.add( tf ); - assertEquals( BSON.getDecodingHooks( Date.class ), expect ); - assertTrue( BSON.getDecodingHooks( Date.class ).contains( tf ) ); - BSON.removeDecodingHook( Date.class, tf ); - assertFalse( BSON.getDecodingHooks( Date.class ).contains( tf ) ); - - } - - @Test - public void testEquals() { - assertNotEquals(new BasicBSONObject("a", 1111111111111111111L), new BasicBSONObject("a", 1111111111111111112L), - "longs should not be equal"); - - assertNotEquals(new BasicBSONObject("a", 100.1D), new BasicBSONObject("a", 100.2D), - "doubles should not be equal"); - - assertNotEquals(new BasicBSONObject("a", 100.1F), new BasicBSONObject("a", 100.2F), - "floats should not be equal"); - - assertEquals(new BasicBSONObject("a", 100.1D), new BasicBSONObject("a", 100.1D), - "doubles should be equal"); - - assertEquals(new BasicBSONObject("a", 100.1F), new BasicBSONObject("a", 100.1F), - "floats should be equal"); - - assertEquals(new BasicBSONObject("a", 100), new BasicBSONObject("a", 100L), - "int and long should be equal"); - } - - private class TestDate { - final int year; - final int month; - final int date; - final int hour; - final int minute; - final int second; - - public TestDate(int year , int month , int date , int hour , int minute , int second) { - this.year = year; - this.month = month; - this.date = date; - this.hour = hour; - this.minute = minute; - this.second = second; - } - - public TestDate(int year , int month , int date) { - this( year , month , date , 0 , 0 , 0 ); - } - - @Override - public boolean equals( Object other ){ - if ( this == other ) - return true; - if ( !( other instanceof TestDate ) ) - return false; - - TestDate otherDt = (TestDate) other; - return ( otherDt.year == this.year && otherDt.month == this.month && otherDt.date == this.date && otherDt.hour == this.hour - && otherDt.minute == this.minute && otherDt.second == this.second ); - } - - @Override - public String toString(){ - return year + "-" + month + "-" + date + " " + hour + ":" + minute + ":" + second; - } - } - - private class TestDateTransformer implements Transformer { - @SuppressWarnings( "deprecation" ) - public Object transform( Object o ){ - if ( o instanceof TestDate ) { - TestDate td = (TestDate) o; - return new java.util.Date( td.year , td.month , td.date , td.hour , td.minute , td.second ); - } - else if ( o instanceof java.util.Date ) { - Date d = (Date) o; - return new TestDate( d.getYear() , d.getMonth() , d.getDate() , d.getHours() , d.getMinutes() , d.getSeconds() ); - } - else - return o; - } - } - - void _roundTrip( BSONObject o ){ - assertEquals( o , BSON.decode( BSON.encode( o ) ) ); - } - - @Test - public void testRandomRoundTrips(){ - _roundTrip( new BasicBSONObject( "a" , "" ) ); - _roundTrip( new BasicBSONObject( "a" , "a" ) ); - _roundTrip( new BasicBSONObject( "a" , "b" ) ); - } - - List _data = new ArrayList(); - -} diff --git a/src/test/org/bson/BSONTimestampTest.java b/src/test/org/bson/BSONTimestampTest.java deleted file mode 100644 index ba4c58e49bb..00000000000 --- a/src/test/org/bson/BSONTimestampTest.java +++ /dev/null @@ -1,29 +0,0 @@ -package org.bson; - -import org.bson.types.BSONTimestamp; -import org.testng.Assert; -import org.testng.annotations.Test; - -public class BSONTimestampTest extends Assert{ - - @Test - public void testComparable(){ - - int currTime = (int)(System.currentTimeMillis() / 1000); - - BSONTimestamp t1 = new BSONTimestamp(currTime, 1); - BSONTimestamp t2 = new BSONTimestamp(currTime, 1); - - assertEquals(0, t1.compareTo(t2)); - - t2 = new BSONTimestamp(currTime, 2); - - assertTrue(t1.compareTo(t2) < 0); - assertTrue(t2.compareTo(t1) > 0); - - t2 = new BSONTimestamp(currTime + 1, 1); - - assertTrue(t1.compareTo(t2) < 0); - assertTrue(t2.compareTo(t1) > 0); - } -} diff --git a/src/test/org/bson/BSONTypeSerializableTest.java b/src/test/org/bson/BSONTypeSerializableTest.java deleted file mode 100644 index 7fdade8b649..00000000000 --- a/src/test/org/bson/BSONTypeSerializableTest.java +++ /dev/null @@ -1,158 +0,0 @@ -// BSONTypeSerializableTest.java - -/** - * Copyright (C) 2010 10gen Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.bson; - -import org.bson.types.*; -import org.testng.Assert; -import org.testng.annotations.Test; - -import java.io.ByteArrayInputStream; -import java.io.ByteArrayOutputStream; -import java.io.ObjectInputStream; -import java.io.ObjectOutputStream; - -public class BSONTypeSerializableTest extends Assert { - - @Test - public void testSerializeMinKey() throws Exception { - MinKey key = new MinKey(); - - ByteArrayOutputStream outputStream = new ByteArrayOutputStream(); - ObjectOutputStream objectOutputStream = new ObjectOutputStream(outputStream); - - objectOutputStream.writeObject(key); - - ByteArrayInputStream inputStream = new ByteArrayInputStream(outputStream.toByteArray()); - ObjectInputStream objectInputStream = new ObjectInputStream(inputStream); - MinKey key2 = (MinKey) objectInputStream.readObject(); - } - - @Test - public void testSerializeMaxKey() throws Exception { - MaxKey key = new MaxKey(); - - ByteArrayOutputStream outputStream = new ByteArrayOutputStream(); - ObjectOutputStream objectOutputStream = new ObjectOutputStream(outputStream); - - objectOutputStream.writeObject(key); - - ByteArrayInputStream inputStream = new ByteArrayInputStream(outputStream.toByteArray()); - ObjectInputStream objectInputStream = new ObjectInputStream(inputStream); - MaxKey key2 = (MaxKey) objectInputStream.readObject(); - } - - @Test - public void testSerializeBinary() throws Exception { - Binary binary = new Binary((byte)0x00 , "hello world".getBytes()); - - ByteArrayOutputStream outputStream = new ByteArrayOutputStream(); - ObjectOutputStream objectOutputStream = new ObjectOutputStream(outputStream); - - objectOutputStream.writeObject(binary); - - ByteArrayInputStream inputStream = new ByteArrayInputStream(outputStream.toByteArray()); - ObjectInputStream objectInputStream = new ObjectInputStream(inputStream); - Binary binary2 = (Binary) objectInputStream.readObject(); - - Assert.assertEquals(binary.getData(), binary2.getData()); - Assert.assertEquals(binary.getType(), binary2.getType()); - } - - @Test - public void testSerializeBSONTimestamp() throws Exception { - BSONTimestamp object = new BSONTimestamp(100, 100); - - ByteArrayOutputStream outputStream = new ByteArrayOutputStream(); - ObjectOutputStream objectOutputStream = new ObjectOutputStream(outputStream); - - objectOutputStream.writeObject(object); - - ByteArrayInputStream inputStream = new ByteArrayInputStream(outputStream.toByteArray()); - ObjectInputStream objectInputStream = new ObjectInputStream(inputStream); - BSONTimestamp object2 = (BSONTimestamp) objectInputStream.readObject(); - - Assert.assertEquals(object.getTime(), object2.getTime()); - Assert.assertEquals(object.getInc(), object2.getInc()); - } - - @Test - public void testSerializeCode() throws Exception { - Code object = new Code("function() {}"); - - ByteArrayOutputStream outputStream = new ByteArrayOutputStream(); - ObjectOutputStream objectOutputStream = new ObjectOutputStream(outputStream); - - objectOutputStream.writeObject(object); - - ByteArrayInputStream inputStream = new ByteArrayInputStream(outputStream.toByteArray()); - ObjectInputStream objectInputStream = new ObjectInputStream(inputStream); - Code object2 = (Code) objectInputStream.readObject(); - - Assert.assertEquals(object.getCode(), object2.getCode()); - } - - @Test - public void testSerializeCodeWScope() throws Exception { - BSONObject scope = new BasicBSONObject("t", 1); - CodeWScope object = new CodeWScope("function() {}", scope); - - ByteArrayOutputStream outputStream = new ByteArrayOutputStream(); - ObjectOutputStream objectOutputStream = new ObjectOutputStream(outputStream); - - objectOutputStream.writeObject(object); - - ByteArrayInputStream inputStream = new ByteArrayInputStream(outputStream.toByteArray()); - ObjectInputStream objectInputStream = new ObjectInputStream(inputStream); - CodeWScope object2 = (CodeWScope) objectInputStream.readObject(); - - Assert.assertEquals(object.getCode(), object2.getCode()); - Assert.assertEquals(object.getScope().get("t"), object2.getScope().get("t")); - } - - @Test - public void testSerializeSymbol() throws Exception { - Symbol object = new Symbol("symbol"); - - ByteArrayOutputStream outputStream = new ByteArrayOutputStream(); - ObjectOutputStream objectOutputStream = new ObjectOutputStream(outputStream); - - objectOutputStream.writeObject(object); - - ByteArrayInputStream inputStream = new ByteArrayInputStream(outputStream.toByteArray()); - ObjectInputStream objectInputStream = new ObjectInputStream(inputStream); - Symbol object2 = (Symbol) objectInputStream.readObject(); - - Assert.assertEquals(object.getSymbol(), object2.getSymbol()); - } - - @Test - public void testSerializeObjectID() throws Exception { - ObjectId object = new ObjectId("001122334455667788990011"); - - ByteArrayOutputStream outputStream = new ByteArrayOutputStream(); - ObjectOutputStream objectOutputStream = new ObjectOutputStream(outputStream); - - objectOutputStream.writeObject(object); - - ByteArrayInputStream inputStream = new ByteArrayInputStream(outputStream.toByteArray()); - ObjectInputStream objectInputStream = new ObjectInputStream(inputStream); - ObjectId object2 = (ObjectId) objectInputStream.readObject(); - - Assert.assertEquals(object.toString(), object2.toString()); - } -} diff --git a/src/test/org/bson/BasicBSONEncoderTest.java b/src/test/org/bson/BasicBSONEncoderTest.java deleted file mode 100644 index 6f0640dbad4..00000000000 --- a/src/test/org/bson/BasicBSONEncoderTest.java +++ /dev/null @@ -1,32 +0,0 @@ -/* - * Copyright (c) 2008 - 2013 10gen, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.bson; - -import org.bson.io.BasicOutputBuffer; -import org.testng.Assert; -import org.testng.annotations.Test; - -public class BasicBSONEncoderTest extends Assert { - - @Test(expectedExceptions = BSONException.class) - public void testNullCharacterInCString() { - BasicBSONEncoder encoder = new BasicBSONEncoder(); - encoder.set(new BasicOutputBuffer()); - encoder.writeCString("hell\u0000world"); - } -} diff --git a/src/test/org/bson/NewBSONDecoderTest.java b/src/test/org/bson/NewBSONDecoderTest.java deleted file mode 100644 index 982380e09c4..00000000000 --- a/src/test/org/bson/NewBSONDecoderTest.java +++ /dev/null @@ -1,114 +0,0 @@ -/** - * Copyright (C) 2012 10gen Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.bson; - -// Bson -import org.bson.types.ObjectId; - -// TestNg -import org.testng.Assert; -import org.testng.annotations.Test; - - -public class NewBSONDecoderTest extends Assert { - - - public void testDecoder(final String pName, final BSONDecoder pDecoder) throws Exception { - - final BasicBSONObject origDoc = new BasicBSONObject("_id", ObjectId.get()); - origDoc.put("long", Long.MAX_VALUE); - origDoc.put("int", Integer.MAX_VALUE); - origDoc.put("string", "yay... we are finally making this faster"); - - - final BasicBSONObject origNested = new BasicBSONObject("id", ObjectId.get()); - origNested.put("long", Long.MAX_VALUE); - origNested.put("int", Integer.MAX_VALUE); - origDoc.put("nested", origNested); - - final byte [] orig = BSON.encode(origDoc); - - int count = 500000; - //int count = 1000000; - //int count = 100; - - long startTime = System.currentTimeMillis(); - - for (int idx=0; idx < count; idx++) { - final BasicBSONObject doc = (BasicBSONObject)pDecoder.readObject(orig); - assertEquals(origDoc.getLong("long"), doc.getLong("long")); - assertEquals(origDoc.getInt("int"), doc.getInt("int")); - - assertEquals(origDoc.getString("string"), doc.getString("string")); - - //System.out.println("--- ok: " + doc.getString("string")); - - assertEquals(origDoc.getObjectId("_id"), doc.getObjectId("_id")); - - final BasicBSONObject nested = (BasicBSONObject)doc.get("nested"); - - assertEquals(origNested.getLong("long"), nested.getLong("long")); - assertEquals(origNested.getInt("int"), nested.getInt("int")); - assertEquals(origNested.getObjectId("_id"), nested.getObjectId("_id")); - } - - //System.out.println(pName + ": " + (System.currentTimeMillis() - startTime)); - } - - @Test - public void testNewDecoder1() throws Exception { - final NewBSONDecoder decoder = new NewBSONDecoder(); - testDecoder("new", decoder); - } - - @Test - public void testNewDecoderCreate1() throws Exception { - long startTime = System.currentTimeMillis(); - for (int idx=0; idx < 1000000; idx++) { - final NewBSONDecoder decoder = new NewBSONDecoder(); - } - //System.out.println("new create 1: " + (System.currentTimeMillis() - startTime)); - } - - @Test - public void testCurrentDecoderCreate1() throws Exception { - long startTime = System.currentTimeMillis(); - for (int idx=0; idx < 1000000; idx++) { - final BasicBSONDecoder decoder = new BasicBSONDecoder(); - } - //System.out.println("current create 1: " + (System.currentTimeMillis() - startTime)); - } - - @Test - public void testCurrent1() throws Exception { - final BasicBSONDecoder decoder = new BasicBSONDecoder(); - testDecoder("current", decoder); - } - - @Test - public void testNewDecoder2() throws Exception { - final NewBSONDecoder decoder = new NewBSONDecoder(); - testDecoder("new", decoder); - } - - @Test - public void testCurrent2() throws Exception { - final BasicBSONDecoder decoder = new BasicBSONDecoder(); - testDecoder("current", decoder); - } -} - diff --git a/src/test/org/bson/PoolOutputBufferTest.java b/src/test/org/bson/PoolOutputBufferTest.java deleted file mode 100644 index 5a55f754e7e..00000000000 --- a/src/test/org/bson/PoolOutputBufferTest.java +++ /dev/null @@ -1,75 +0,0 @@ -// PoolOutputBufferTest.java - -/** - * Copyright (C) 2008 10gen Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.bson; - -import java.util.*; - -import org.testng.annotations.Test; - -import org.bson.io.*; -import org.testng.Assert; - -public class PoolOutputBufferTest extends Assert { - - public PoolOutputBufferTest(){ - for ( int x = 8; x<(PoolOutputBuffer.BUF_SIZE*3); x*=2 ){ - StringBuilder buf = new StringBuilder(); - while ( buf.length() < x ) - buf.append( x ); - _data.add( buf.toString() ); - } - } - - @Test - public void testBasic1(){ - PoolOutputBuffer buf = new PoolOutputBuffer(); - buf.write( "eliot".getBytes() ); - assertEquals( 5 , buf.getPosition() ); - assertEquals( 5 , buf.size() ); - - assertEquals( "eliot" , buf.asString() ); - - buf.setPosition( 2 ); - buf.write( "z".getBytes() ); - assertEquals( "elzot" , buf.asString() ); - - buf.seekEnd(); - buf.write( "foo".getBytes() ); - assertEquals( "elzotfoo" , buf.asString() ); - - buf.seekStart(); - buf.write( "bar".getBytes() ); - assertEquals( "barotfoo" , buf.asString() ); - - } - - @Test - public void testBig1(){ - PoolOutputBuffer a = new PoolOutputBuffer(); - StringBuilder b = new StringBuilder(); - for ( String x : _data ){ - a.write( x.getBytes() ); - b.append( x ); - } - assertEquals( a.asString() , b.toString() ); - } - - List _data = new ArrayList(); - -} diff --git a/src/test/org/bson/io/BitsTest.java b/src/test/org/bson/io/BitsTest.java deleted file mode 100644 index 34ef1f8e99a..00000000000 --- a/src/test/org/bson/io/BitsTest.java +++ /dev/null @@ -1,111 +0,0 @@ -/* - * Copyright (c) 2008 - 2013 10gen, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.bson.io; - -import org.testng.annotations.Test; - -import java.io.ByteArrayInputStream; -import java.io.IOException; -import java.util.Arrays; - -import static org.testng.Assert.assertEquals; - -public class BitsTest { - - private final byte[] BYTES = { - 41, 0, 0, 0, 16, 105, 49, 0, -12, - -1, -1, -1, 16, 105, 50, 0, 0, 0, - 0, -128, 18, 105, 51, 0, -1, -1, -1, - -1, -1, -1, -1, 127, 16, 105, 52, 0, - 0, 0, 0, 0, 0 - }; - - @Test - public void testReadFullyWithBufferLargerThanExpected() throws IOException { - final byte[] buffer = new byte[8192]; - Bits.readFully(new ByteArrayInputStream(BYTES), buffer, BYTES.length); - assertEquals(BYTES, copyOfRange(buffer, 0, BYTES.length)); - } - - @Test - public void testReadFullyWithOffset() throws IOException { - final int offset = 10; - final byte[] buffer = new byte[8192]; - Bits.readFully(new ByteArrayInputStream(BYTES), buffer, offset, BYTES.length); - assertEquals(BYTES, copyOfRange(buffer, offset, BYTES.length + offset)); - } - - @Test - public void testReadFullyWithBufferEqualsToExpected() throws IOException { - final int offset = 10; - final byte[] buffer = new byte[offset+BYTES.length]; - Bits.readFully(new ByteArrayInputStream(BYTES), buffer, offset, BYTES.length); - assertEquals(BYTES, copyOfRange(buffer, offset, BYTES.length + offset)); - } - - @Test (expectedExceptions = IllegalArgumentException.class) - public void testReadFullyUsingNotEnoughBigBuffer() throws IOException { - Bits.readFully(new ByteArrayInputStream(BYTES), new byte[2], BYTES.length); - } - - @Test (expectedExceptions = IllegalArgumentException.class) - public void testReadFullyUsingNotEnoughBigBufferWithOffset() throws IOException { - final int offset = 10; - final byte[] buffer = new byte[BYTES.length]; - Bits.readFully(new ByteArrayInputStream(BYTES), buffer, offset, BYTES.length); - } - - @Test - public void testReadInt() { - assertEquals(41, Bits.readInt(BYTES)); - } - - @Test - public void testReadIntFromInputStream() throws IOException { - assertEquals(41, Bits.readInt(new ByteArrayInputStream(BYTES))); - } - - @Test - public void testReadIntWithOffset() { - assertEquals(-12, Bits.readInt(BYTES, 8)); - } - - @Test - public void testReadIntInBigEndianNotation() { - assertEquals(-12, Bits.readIntBE(new byte[]{-1, -1, -1, -12}, 0)); - } - - @Test - public void testReadLong() { - assertEquals(Long.MAX_VALUE, Bits.readLong(BYTES, 24)); - } - - @Test(expectedExceptions = ArrayIndexOutOfBoundsException.class) - public void testReadLongWithNotEnoughData() { - Bits.readLong(copyOfRange(BYTES, 24, 30), 0); - } - - private static byte[] copyOfRange(byte[] original, int from, int to) { - final int newLength = to - from; - if (newLength < 0) { - throw new IllegalArgumentException(from + " > " + to); - } - final byte[] copy = new byte[newLength]; - System.arraycopy(original, from, copy, 0, Math.min(original.length - from, newLength)); - return copy; - } -} \ No newline at end of file diff --git a/src/test/org/bson/types/BinaryTest.java b/src/test/org/bson/types/BinaryTest.java deleted file mode 100644 index 76097397338..00000000000 --- a/src/test/org/bson/types/BinaryTest.java +++ /dev/null @@ -1,33 +0,0 @@ -package org.bson.types; - -import org.testng.annotations.Test; - -import static com.mongodb.util.MyAsserts.assertEquals; -import static com.mongodb.util.MyAsserts.assertFalse; -import static com.mongodb.util.MyAsserts.assertTrue; - -public class BinaryTest { - - Binary first = new Binary((byte) 0, new byte[]{0, 1, 2}); - Binary second = new Binary((byte) 1, new byte[]{0, 1, 2}); - Binary third = new Binary((byte) 0, new byte[]{0, 1, 2, 3}); - Binary fourth = new Binary((byte) 0, new byte[]{0, 1, 2}); - - @Test - public void testEquals() throws Exception { - assertFalse(first.equals(second)); - assertFalse(first.equals(third)); - assertEquals(first, fourth); - assertFalse(first.equals("abc")); - assertFalse(first.equals(null)); - } - - @Test - public void testHashCode() throws Exception { - assertTrue(first.hashCode() == fourth.hashCode()); - assertFalse(first.hashCode() == second.hashCode()); - assertFalse(first.hashCode() == third.hashCode()); - assertFalse(second.hashCode() == third.hashCode()); - - } -} diff --git a/src/test/org/bson/util/ClassMapTest.java b/src/test/org/bson/util/ClassMapTest.java deleted file mode 100644 index 23728256626..00000000000 --- a/src/test/org/bson/util/ClassMapTest.java +++ /dev/null @@ -1,54 +0,0 @@ -// ClassMapTest.java - -/** - * Copyright (C) 2008 10gen Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.bson.util; - -import java.io.Serializable; -import org.testng.annotations.Test; - -public class ClassMapTest extends com.mongodb.util.TestCase { - - @org.testng.annotations.Test - public void test(){ - // maps Classes to Strings - ClassMap m = new ClassMap(); - - m.put(Object.class, "Object"); - m.put(Boolean.class, "Boolean"); - assertEquals(m.get(Object.class), "Object"); - assertEquals(m.get(Boolean.class), "Boolean"); - assertEquals(m.get(Integer.class), "Object"); - - m.put(String.class, "String"); - m.put(Serializable.class, "Serializable"); - - assertEquals(m.get(String.class), "String"); - assertEquals(m.get(Integer.class), "Serializable"); - - m.put(Number.class, "Number"); - assertEquals(m.get(Integer.class), "Number"); - - m.put(Integer.class, "Integer"); - assertEquals(m.get(Integer.class), "Integer"); - } - - public static void main( String args[] ){ - ClassMapTest t = new ClassMapTest(); - t.runConsole(); - } -} diff --git a/src/test/org/bson/util/StringRangeSetTest.java b/src/test/org/bson/util/StringRangeSetTest.java deleted file mode 100644 index 77b313618e3..00000000000 --- a/src/test/org/bson/util/StringRangeSetTest.java +++ /dev/null @@ -1,44 +0,0 @@ -package org.bson.util; - -/** - * Copyright (C) 2010 10gen Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -public class StringRangeSetTest extends com.mongodb.util.TestCase { - - @org.testng.annotations.Test - public void rangeOfSizeProducesCorrectIteration() { - int len = 250; - StringRangeSet set = new StringRangeSet(len); - int i = 0; - for (String num : set) { - assertEquals(num, String.valueOf(i++)); - } - assertEquals(i, 250); - } - - @org.testng.annotations.Test - public void testToArray() { - int len = 1000; - StringRangeSet set = new StringRangeSet(len); - String[] array = (String[]) set.toArray(); - for (int i = 0; i < len; ++i) - assertEquals(array[i], String.valueOf(i)); - } - - public static void main(String args[]) { - (new StringRangeSetTest()).runConsole(); - } -} diff --git a/src/util/DocTaglet.java b/src/util/DocTaglet.java deleted file mode 100644 index b453ce3465f..00000000000 --- a/src/util/DocTaglet.java +++ /dev/null @@ -1,65 +0,0 @@ -/** - * Copyright (c) 2008 - 2012 10gen, Inc. - *

          - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

          - * http://www.apache.org/licenses/LICENSE-2.0 - *

          - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -import com.sun.javadoc.Tag; -import com.sun.tools.doclets.Taglet; - -public abstract class DocTaglet implements Taglet { - - public boolean inConstructor() { return true; } - public boolean inField() { return true; } - public boolean inMethod() { return true; } - public boolean inOverview() { return true; } - public boolean inPackage() { return true; } - public boolean inType() { return true; } - - public boolean isInlineTag() { return false; } - - public String toString( Tag[] tags ){ - if ( tags.length == 0 ) - return null; - - StringBuilder buf = new StringBuilder( "\n

          MongoDB Doc Links
          " ); - buf.append( "
            " ); - for ( Tag t : tags ){ - buf.append( "
          • " ).append( genLink( t.text() ) ).append( "
          • " ); - } - buf.append( "
          " ); - buf.append( "
          \n" ); - return buf.toString(); - } - - public String toString( Tag tag ){ - return toString(new Tag[]{tag}); - } - - protected String genLink( String text ){ - String relativePath = text; - String display = text; - - int firstSpace = text.indexOf(' '); - if (firstSpace != -1) { - relativePath = text.substring(0, firstSpace); - display = text.substring(firstSpace, text.length()).trim(); - } - - return new StringBuilder() - .append( "" ).append(display).append( "" ) - .toString(); - } - - protected abstract String getBaseDocURI(); -} diff --git a/src/util/DochubTaglet.java b/src/util/DochubTaglet.java deleted file mode 100644 index e4650637ed9..00000000000 --- a/src/util/DochubTaglet.java +++ /dev/null @@ -1,36 +0,0 @@ -/** - * Copyright (c) 2008 - 2012 10gen, Inc. - *

          - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

          - * http://www.apache.org/licenses/LICENSE-2.0 - *

          - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -import com.sun.tools.doclets.Taglet; - -import java.util.Map; - -public class DochubTaglet extends DocTaglet { - - public static void register( Map tagletMap ){ - DochubTaglet t = new DochubTaglet(); - tagletMap.put( t.getName() , t ); - } - - public String getName(){ - return "dochub"; - } - - @Override - protected String getBaseDocURI() { - return "http://dochub.mongodb.org/core/"; - } -} diff --git a/src/util/ManualTaglet.java b/src/util/ManualTaglet.java deleted file mode 100644 index dfe9cb4e0ac..00000000000 --- a/src/util/ManualTaglet.java +++ /dev/null @@ -1,37 +0,0 @@ -/** - * Copyright (c) 2008 - 2012 10gen, Inc. - *

          - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

          - * http://www.apache.org/licenses/LICENSE-2.0 - *

          - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -import com.sun.tools.doclets.Taglet; - -import java.util.Map; - -public class ManualTaglet extends DocTaglet { - - public static void register( Map tagletMap ){ - ManualTaglet t = new ManualTaglet(); - tagletMap.put( t.getName() , t ); - } - - public String getName(){ - return "mongodb.driver.manual"; - } - - @Override - protected String getBaseDocURI() { - return "http://docs.mongodb.org/manual/"; - } - -}

    Only supported when queryType is "range" and algorithm is "Range". + * + * @param expression the Match Expression or Aggregate Expression + * @param options the options + * @return the encrypted queryable range expression + * @since 4.9 + * @mongodb.server.release 8.0 + * @mongodb.driver.manual /core/queryable-encryption/ queryable encryption + * @mongodb.driver.manual reference/operator/aggregation/match/ $match + */ + BsonDocument encryptExpression(Bson expression, EncryptOptions options); + + /** + * Decrypt the given value. + * + * @param value the value to decrypt, which must be of subtype 6 + * @return the decrypted value + */ + BsonValue decrypt(BsonBinary value); + + /** + * Removes the key document with the given data key from the key vault collection. + * @param id the data key UUID (BSON binary subtype 0x04) + * @return the result + * @since 4.7 + */ + DeleteResult deleteKey(BsonBinary id); + + /** + * Finds a single key document with the given UUID (BSON binary subtype 0x04). + * + * @param id the data key UUID (BSON binary subtype 0x04) + * @return the single key document or null if there is no match + * @since 4.7 + */ + @Nullable + BsonDocument getKey(BsonBinary id); + + /** + * Finds all documents in the key vault collection. + * @return a find iterable for the documents in the key vault collection + * @since 4.7 + */ + FindIterable getKeys(); + + /** + * Adds a keyAltName to the keyAltNames array of the key document in the key vault collection with the given UUID. + * + * @param id the data key UUID (BSON binary subtype 0x04) + * @param keyAltName the alternative key name to add to the keyAltNames array + * @return the previous version of the key document or null if no match + * @since 4.7 + */ + @Nullable + BsonDocument addKeyAltName(BsonBinary id, String keyAltName); + + /** + * Removes a keyAltName from the keyAltNames array of the key document in the key vault collection with the given id. + * + * @param id the data key UUID (BSON binary subtype 0x04) + * @param keyAltName the alternative key name + * @return the previous version of the key document or null if no match + * @since 4.7 + */ + @Nullable + BsonDocument removeKeyAltName(BsonBinary id, String keyAltName); + + /** + * Returns a key document in the key vault collection with the given keyAltName. + * + * @param keyAltName the alternative key name + * @return a matching key document or null + * @since 4.7 + */ + @Nullable + BsonDocument getKeyByAltName(String keyAltName); + + /** + * Decrypts multiple data keys and (re-)encrypts them with the current masterKey. + * + * @param filter the filter + * @return the result + * @since 4.7 + */ + RewrapManyDataKeyResult rewrapManyDataKey(Bson filter); + + /** + * Decrypts multiple data keys and (re-)encrypts them with a new masterKey, or with their current masterKey if a new one is not given. + * + * @param filter the filter + * @param options the options + * @return the result + * @since 4.7 + */ + RewrapManyDataKeyResult rewrapManyDataKey(Bson filter, RewrapManyDataKeyOptions options); + + /** + * {@linkplain MongoDatabase#createCollection(String, CreateCollectionOptions) Create} a new collection with encrypted fields, + * automatically {@linkplain #createDataKey(String, DataKeyOptions) creating} + * new data encryption keys when needed based on the configured + * {@link CreateCollectionOptions#getEncryptedFields() encryptedFields}, which must be specified. + * This method does not modify the configured {@code encryptedFields} when creating new data keys, + * instead it creates a new configuration if needed. + * + * @param database The database to use for creating the collection. + * @param collectionName The name for the collection to create. + * @param createCollectionOptions Options for creating the collection. + * @param createEncryptedCollectionParams Auxiliary parameters for creating an encrypted collection. + * @return The (potentially updated) {@code encryptedFields} configuration that was used to create the collection. + * A user may use this document to configure {@link AutoEncryptionSettings#getEncryptedFieldsMap()}. + * @throws MongoUpdatedEncryptedFieldsException If an exception happens after creating at least one data key. + * This exception makes the updated {@code encryptedFields} + * {@linkplain MongoUpdatedEncryptedFieldsException#getEncryptedFields() available} to the caller. + * + * @since 4.9 + * @mongodb.server.release 7.0 + * @mongodb.driver.manual reference/command/create Create Command + */ + BsonDocument createEncryptedCollection(MongoDatabase database, String collectionName, CreateCollectionOptions createCollectionOptions, + CreateEncryptedCollectionParams createEncryptedCollectionParams); + + @Override + void close(); +} diff --git a/driver-sync/src/main/com/mongodb/client/vault/ClientEncryptions.java b/driver-sync/src/main/com/mongodb/client/vault/ClientEncryptions.java new file mode 100644 index 00000000000..e3efe6f4154 --- /dev/null +++ b/driver-sync/src/main/com/mongodb/client/vault/ClientEncryptions.java @@ -0,0 +1,41 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client.vault; + +import com.mongodb.ClientEncryptionSettings; +import com.mongodb.client.internal.ClientEncryptionImpl; + +/** + * Factory for ClientEncryption implementations. + * + * @since 3.11 + */ +public final class ClientEncryptions { + + /** + * Create a key vault with the given options. + * + * @param options the key vault options + * @return the key vault + */ + public static ClientEncryption create(final ClientEncryptionSettings options) { + return new ClientEncryptionImpl(options); + } + + private ClientEncryptions() { + } +} diff --git a/driver-sync/src/main/com/mongodb/client/vault/package-info.java b/driver-sync/src/main/com/mongodb/client/vault/package-info.java new file mode 100644 index 00000000000..89f3a158f25 --- /dev/null +++ b/driver-sync/src/main/com/mongodb/client/vault/package-info.java @@ -0,0 +1,26 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * This package contains the Key Vault API + * + * @since 3.11 + */ + +@NonNullApi +package com.mongodb.client.vault; + +import com.mongodb.lang.NonNullApi; diff --git a/driver-sync/src/test/functional/com/mongodb/client/AbstractAtlasSearchIndexManagementProseTest.java b/driver-sync/src/test/functional/com/mongodb/client/AbstractAtlasSearchIndexManagementProseTest.java new file mode 100644 index 00000000000..17c007e14ba --- /dev/null +++ b/driver-sync/src/test/functional/com/mongodb/client/AbstractAtlasSearchIndexManagementProseTest.java @@ -0,0 +1,362 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client; + +import com.mongodb.MongoClientSettings; +import com.mongodb.MongoCommandException; +import com.mongodb.ReadConcern; +import com.mongodb.WriteConcern; +import com.mongodb.client.model.SearchIndexModel; +import com.mongodb.client.model.SearchIndexType; +import com.mongodb.event.CommandListener; +import com.mongodb.event.CommandStartedEvent; +import org.bson.BsonDocument; +import org.bson.Document; +import org.bson.conversions.Bson; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.Assumptions; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.DisplayName; +import org.junit.jupiter.api.Test; + +import java.util.List; +import java.util.Map; +import java.util.UUID; +import java.util.concurrent.TimeUnit; +import java.util.function.Function; +import java.util.function.Predicate; +import java.util.stream.Collectors; +import java.util.stream.StreamSupport; + +import static com.mongodb.ClusterFixture.serverVersionAtLeast; +import static com.mongodb.assertions.Assertions.assertFalse; +import static com.mongodb.client.Fixture.getMongoClientSettings; +import static com.mongodb.client.Fixture.getMongoClientSettingsBuilder; +import static java.util.Arrays.asList; +import static java.util.Collections.singletonList; +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.contains; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; + +/** + * See Search Index Management Tests + */ +public abstract class AbstractAtlasSearchIndexManagementProseTest { + /** + * The maximum number of attempts for waiting for changes or completion. + * If this many attempts are made without success, the test will be marked as failed. + */ + private static final int MAX_WAIT_ATTEMPTS = 70; + + /** + * The duration in seconds to wait between each attempt when waiting for changes or completion. + */ + private static final int WAIT_INTERVAL_SECONDS = 5; + + private static final String TEST_SEARCH_INDEX_NAME_1 = "test-search-index"; + private static final String TEST_SEARCH_INDEX_NAME_2 = "test-search-index-2"; + private static final Document NOT_DYNAMIC_MAPPING_DEFINITION = Document.parse( + "{" + + " mappings: { dynamic: false }" + + "}"); + private static final Document DYNAMIC_MAPPING_DEFINITION = Document.parse( + "{" + + " mappings: { dynamic: true }" + + "}"); + private static final Document VECTOR_SEARCH_DEFINITION = Document.parse( + "{" + + " fields: [" + + " {" + + " type: 'vector'," + + " path: 'plot_embedding'," + + " numDimensions: 1536," + + " similarity: 'euclidean'," + + " }," + + " ]" + + "}"); + + private MongoClient client = createMongoClient(getMongoClientSettings()); + private MongoDatabase db; + private MongoCollection collection; + + protected abstract MongoClient createMongoClient(MongoClientSettings settings); + + protected AbstractAtlasSearchIndexManagementProseTest() { + Assumptions.assumeTrue(serverVersionAtLeast(6, 0)); + Assumptions.assumeTrue(hasAtlasSearchIndexHelperEnabled(), "Atlas Search Index tests are disabled"); + } + + private static boolean hasAtlasSearchIndexHelperEnabled() { + return Boolean.parseBoolean(System.getProperty("org.mongodb.test.atlas.search.index.helpers")); + } + + @BeforeEach + public void setUp() { + MongoClientSettings mongoClientSettings = getMongoClientSettingsBuilder() + .writeConcern(WriteConcern.MAJORITY) + .readConcern(ReadConcern.MAJORITY) + .addCommandListener(new CommandListener() { + @Override + public void commandStarted(final CommandStartedEvent event) { + /* This test case examines scenarios where the write or read concern is not forwarded to the server + for any Atlas Index Search commands. If a write or read concern is included in the command, + the server will return an error. */ + if (isSearchIndexCommand(event)) { + BsonDocument command = event.getCommand(); + assertFalse(command.containsKey("writeConcern")); + assertFalse(command.containsKey("readConcern")); + } + } + + private boolean isSearchIndexCommand(final CommandStartedEvent event) { + return event.getCommand().toJson().contains("SearchIndex"); + } + }) + .build(); + + client = createMongoClient(mongoClientSettings); + db = client.getDatabase("test"); + + String collectionName = UUID.randomUUID().toString(); + db.createCollection(collectionName); + collection = db.getCollection(collectionName); + } + + @AfterEach + void cleanUp() { + try { + collection.drop(); + db.drop(); + } finally { + client.close(); + } + } + + @Test + @DisplayName("Case 1: Driver can successfully create and list search indexes") + public void shouldCreateAndListSearchIndexes() throws InterruptedException { + //given + SearchIndexModel searchIndexModel = new SearchIndexModel(TEST_SEARCH_INDEX_NAME_1, NOT_DYNAMIC_MAPPING_DEFINITION); + + //when + String createdSearchIndexName = collection.createSearchIndex(TEST_SEARCH_INDEX_NAME_1, NOT_DYNAMIC_MAPPING_DEFINITION); + + //then + Assertions.assertEquals(TEST_SEARCH_INDEX_NAME_1, createdSearchIndexName); + assertIndexesChanges(isQueryable(), searchIndexModel); + } + + @Test + @DisplayName("Case 2: Driver can successfully create multiple indexes in batch") + public void shouldCreateMultipleIndexesInBatch() throws InterruptedException { + //given + SearchIndexModel searchIndexModel1 = new SearchIndexModel(TEST_SEARCH_INDEX_NAME_1, NOT_DYNAMIC_MAPPING_DEFINITION); + SearchIndexModel searchIndexModel2 = new SearchIndexModel(TEST_SEARCH_INDEX_NAME_2, NOT_DYNAMIC_MAPPING_DEFINITION); + + //when + List searchIndexes = collection.createSearchIndexes(asList(searchIndexModel1, searchIndexModel2)); + + //then + assertThat(searchIndexes, contains(TEST_SEARCH_INDEX_NAME_1, TEST_SEARCH_INDEX_NAME_2)); + assertIndexesChanges(isQueryable(), searchIndexModel1, searchIndexModel2); + } + + @Test + @DisplayName("Case 3: Driver can successfully drop search indexes") + public void shouldDropSearchIndex() throws InterruptedException { + //given + String createdSearchIndexName = collection.createSearchIndex(TEST_SEARCH_INDEX_NAME_1, NOT_DYNAMIC_MAPPING_DEFINITION); + Assertions.assertEquals(TEST_SEARCH_INDEX_NAME_1, createdSearchIndexName); + awaitIndexChanges(isQueryable(), new SearchIndexModel(TEST_SEARCH_INDEX_NAME_1, NOT_DYNAMIC_MAPPING_DEFINITION)); + + //when + collection.dropSearchIndex(TEST_SEARCH_INDEX_NAME_1); + + //then + assertIndexDeleted(); + } + + @Test + @DisplayName("Case 4: Driver can update a search index") + public void shouldUpdateSearchIndex() throws InterruptedException { + //given + String createdSearchIndexName = collection.createSearchIndex(TEST_SEARCH_INDEX_NAME_1, NOT_DYNAMIC_MAPPING_DEFINITION); + Assertions.assertEquals(TEST_SEARCH_INDEX_NAME_1, createdSearchIndexName); + awaitIndexChanges(isQueryable(), new SearchIndexModel(TEST_SEARCH_INDEX_NAME_1, NOT_DYNAMIC_MAPPING_DEFINITION)); + + //when + collection.updateSearchIndex(TEST_SEARCH_INDEX_NAME_1, DYNAMIC_MAPPING_DEFINITION); + + //then + assertIndexesChanges(isReady().and(isQueryable()), new SearchIndexModel(TEST_SEARCH_INDEX_NAME_1, DYNAMIC_MAPPING_DEFINITION)); + } + + @Test + @DisplayName("Case 5: dropSearchIndex suppresses namespace not found errors") + public void shouldSuppressNamespaceErrorWhenDroppingIndexWithoutCollection() { + //given + collection.drop(); + + //when + collection.dropSearchIndex("not existent index"); + } + + @Test + @DisplayName("Case 7 implicit: Driver can successfully handle search index types when creating indexes") + public void shouldHandleImplicitSearchIndexTypes() throws InterruptedException { + //given + String indexName = "test-search-index-case7-implicit"; + + //when + String result = collection.createSearchIndex( + indexName, + NOT_DYNAMIC_MAPPING_DEFINITION); + + //then + assertEquals(indexName, result); + awaitIndexChanges(isQueryable().and(hasSearchIndexType()), new SearchIndexModel(indexName, NOT_DYNAMIC_MAPPING_DEFINITION)); + } + + @Test + @DisplayName("Case 7 explicit 'search' type: Driver can successfully handle search index types when creating indexes") + public void shouldHandleExplicitSearchIndexTypes() throws InterruptedException { + //given + String indexName = "test-search-index-case7-explicit"; + + //when + List searchIndexes = collection.createSearchIndexes(singletonList(new SearchIndexModel( + indexName, + NOT_DYNAMIC_MAPPING_DEFINITION, + SearchIndexType.search()))); + + //then + assertEquals(1, searchIndexes.size()); + assertEquals(indexName, searchIndexes.get(0)); + awaitIndexChanges(isQueryable().and(hasSearchIndexType()), new SearchIndexModel(indexName, NOT_DYNAMIC_MAPPING_DEFINITION)); + } + + @Test + @DisplayName("Case 7 explicit 'vectorSearch' type: Driver can successfully handle search index types when creating indexes") + public void shouldHandleExplicitVectorSearchIndexTypes() throws InterruptedException { + //given + String indexName = "test-search-index-case7-vector"; + + //when + List searchIndexes = collection.createSearchIndexes(singletonList(new SearchIndexModel( + indexName, + VECTOR_SEARCH_DEFINITION, + SearchIndexType.vectorSearch()))); + + //then + assertEquals(1, searchIndexes.size()); + assertEquals(indexName, searchIndexes.get(0)); + awaitIndexChanges(isQueryable().and(hasVectorSearchIndexType()), new SearchIndexModel(indexName, NOT_DYNAMIC_MAPPING_DEFINITION)); + } + + @Test + @DisplayName("Case 8: Driver requires explicit type to create a vector search index") + public void shouldRequireExplicitTypeToCreateVectorSearchIndex() { + //given + String indexName = "test-search-index-case8-error"; + + //when & then + assertThrows(MongoCommandException.class, () -> collection.createSearchIndex( + indexName, + VECTOR_SEARCH_DEFINITION)); + } + + private void assertIndexDeleted() throws InterruptedException { + int attempts = MAX_WAIT_ATTEMPTS; + while (collection.listSearchIndexes().first() != null && checkAttempt(attempts--)) { + await(); + } + } + + private void assertIndexesChanges(final Predicate indexStatus, final SearchIndexModel... searchIndexModels) + throws InterruptedException { + + Map createdIndexes = awaitIndexChanges(indexStatus, searchIndexModels); + Assertions.assertEquals(searchIndexModels.length, createdIndexes.size()); + + for (SearchIndexModel searchIndexModel : searchIndexModels) { + Bson mappings = searchIndexModel.getDefinition(); + String searchIndexName = searchIndexModel.getName(); + + Document createdIndex = createdIndexes.get(searchIndexName); + Assertions.assertNotNull(createdIndex); + Assertions.assertEquals(createdIndex.get("latestDefinition"), mappings); + } + } + + + private Map awaitIndexChanges(final Predicate indexStatus, final SearchIndexModel... searchIndexModels) + throws InterruptedException { + int attempts = MAX_WAIT_ATTEMPTS; + while (checkAttempt(attempts--)) { + Map existingIndexes = StreamSupport.stream(collection.listSearchIndexes().spliterator(), false) + .filter(indexStatus) + .collect(Collectors.toMap(document -> document.getString("name"), Function.identity())); + + if (checkNames(existingIndexes, searchIndexModels)) { + return existingIndexes; + } + await(); + } + return Assertions.fail(); + } + + private Predicate isQueryable() { + return document -> document.getBoolean("queryable"); + } + + private Predicate isReady() { + return document -> "READY".equals(document.getString("status")); + } + + + private Predicate hasSearchIndexType() { + return document -> "search".equals(document.getString("type")); + } + + private Predicate hasVectorSearchIndexType() { + return document -> "vectorSearch".equals(document.getString("type")); + } + + + + private boolean checkAttempt(final int attempt) { + Assertions.assertFalse(attempt <= 0, "Exceeded maximum attempts waiting for Search Index changes in Atlas cluster"); + return true; + } + + private static void await() throws InterruptedException { + TimeUnit.SECONDS.sleep(WAIT_INTERVAL_SECONDS); + } + + private static boolean checkNames(final Map existingIndexes, final SearchIndexModel... searchIndexModels) { + for (SearchIndexModel searchIndexModel : searchIndexModels) { + String searchIndexName = searchIndexModel.getName(); + if (!existingIndexes.containsKey(searchIndexName)) { + return false; + } + + } + return true; + } +} diff --git a/driver-sync/src/test/functional/com/mongodb/client/AbstractClientEncryptionCustomEndpointTest.java b/driver-sync/src/test/functional/com/mongodb/client/AbstractClientEncryptionCustomEndpointTest.java new file mode 100644 index 00000000000..976c51844f8 --- /dev/null +++ b/driver-sync/src/test/functional/com/mongodb/client/AbstractClientEncryptionCustomEndpointTest.java @@ -0,0 +1,287 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client; + +import com.mongodb.ClientEncryptionSettings; +import com.mongodb.MongoClientException; +import com.mongodb.client.model.vault.DataKeyOptions; +import com.mongodb.client.model.vault.EncryptOptions; +import com.mongodb.client.vault.ClientEncryption; +import com.mongodb.crypt.capi.MongoCryptException; +import com.mongodb.lang.Nullable; +import org.bson.BsonBinary; +import org.bson.BsonDocument; +import org.bson.BsonString; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; + +import java.net.ConnectException; +import java.net.UnknownHostException; +import java.util.ArrayList; +import java.util.Collection; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import static com.mongodb.ClusterFixture.getEnv; +import static com.mongodb.ClusterFixture.hasEncryptionTestsEnabled; +import static com.mongodb.client.Fixture.getMongoClientSettings; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; +import static org.junit.Assume.assumeTrue; + +@RunWith(Parameterized.class) +public abstract class AbstractClientEncryptionCustomEndpointTest { + + private ClientEncryption clientEncryption; + private ClientEncryption invalidClientEncryption; + private final String provider; + private final BsonDocument masterKey; + private final boolean testInvalidClientEncryption; + private final Class exceptionClass; + private final Class wrappedExceptionClass; + private final String messageContainedInException; + + public AbstractClientEncryptionCustomEndpointTest(@SuppressWarnings("unused") final String name, + final String provider, + final BsonDocument masterKey, + final boolean testInvalidClientEncryption, + @Nullable final Class exceptionClass, + @Nullable final Class wrappedExceptionClass, + @Nullable final String messageContainedInException) { + this.provider = provider; + this.masterKey = masterKey; + this.testInvalidClientEncryption = testInvalidClientEncryption; + this.exceptionClass = exceptionClass; + this.wrappedExceptionClass = wrappedExceptionClass; + this.messageContainedInException = messageContainedInException; + } + + @Before + public void setUp() { + assumeTrue("Custom Endpoint tests disables", hasEncryptionTestsEnabled()); + + Map> kmsProviders = new HashMap>() {{ + put("aws", new HashMap() {{ + put("accessKeyId", getEnv("AWS_ACCESS_KEY_ID")); + put("secretAccessKey", getEnv("AWS_SECRET_ACCESS_KEY")); + }}); + put("azure", new HashMap() {{ + put("tenantId", getEnv("AZURE_TENANT_ID")); + put("clientId", getEnv("AZURE_CLIENT_ID")); + put("clientSecret", getEnv("AZURE_CLIENT_SECRET")); + put("identityPlatformEndpoint", "login.microsoftonline.com:443"); + }}); + put("gcp", new HashMap() {{ + put("email", getEnv("GCP_EMAIL")); + put("privateKey", getEnv("GCP_PRIVATE_KEY")); + put("endpoint", "oauth2.googleapis.com:443"); + }}); + put("kmip", new HashMap() {{ + put("endpoint", "localhost:5698"); + }}); + }}; + + clientEncryption = getClientEncryption(ClientEncryptionSettings.builder(). + keyVaultMongoClientSettings(getMongoClientSettings()) + .kmsProviders(kmsProviders) + .keyVaultNamespace("keyvault.datakeys").build()); + + Map> invalidKmsProviders = new HashMap>() {{ + put("azure", new HashMap() {{ + put("tenantId", getEnv("AZURE_TENANT_ID")); + put("clientId", getEnv("AZURE_CLIENT_ID")); + put("clientSecret", getEnv("AZURE_CLIENT_SECRET")); + put("identityPlatformEndpoint", "doesnotexist.invalid:443"); + }}); + put("gcp", new HashMap() {{ + put("email", getEnv("GCP_EMAIL")); + put("privateKey", getEnv("GCP_PRIVATE_KEY")); + put("endpoint", "doesnotexist.invalid:443"); + }}); + put("kmip", new HashMap() {{ + put("endpoint", "doesnotexist.invalid:5698"); + }}); + }}; + + invalidClientEncryption = getClientEncryption(ClientEncryptionSettings.builder(). + keyVaultMongoClientSettings(getMongoClientSettings()) + .kmsProviders(invalidKmsProviders) + .keyVaultNamespace("keyvault.datakeys") + .build()); + } + + @After + public void after() { + if (clientEncryption != null) { + try { + clientEncryption.close(); + } catch (Exception e) { + // ignore + } + } + + if (invalidClientEncryption != null) { + try { + invalidClientEncryption.close(); + } catch (Exception e) { + // ignore + } + } + } + + public abstract ClientEncryption getClientEncryption(ClientEncryptionSettings settings); + + @Test + public void testCustomEndpoint() { + if (testInvalidClientEncryption) { + testEndpoint(clientEncryption, null, null, null); + testEndpoint(invalidClientEncryption, exceptionClass, wrappedExceptionClass, messageContainedInException); + } else { + testEndpoint(clientEncryption, exceptionClass, wrappedExceptionClass, messageContainedInException); + } + } + + @Parameterized.Parameters(name = "{0}") + public static Collection data() { + List data = new ArrayList<>(); + + data.add(new Object[]{"1. [aws] valid endpoint", + "aws", + BsonDocument.parse("{" + + " region: \"us-east-1\"," + + " key: \"arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0\"" + + "}"), + false, null, null, null}); + data.add(new Object[]{"2. [aws] valid explicit endpoint", + "aws", + BsonDocument.parse("{" + + " region: \"us-east-1\"," + + " key: \"arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0\"," + + " endpoint: \"kms.us-east-1.amazonaws.com\"" + + "}"), + false, null, null, null}); + data.add(new Object[]{"3. [aws] valid explicit endpoint and port", + "aws", + BsonDocument.parse("{" + + " region: \"us-east-1\"," + + " key: \"arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0\"," + + " endpoint: \"kms.us-east-1.amazonaws.com:443\"" + + "}"), + false, null, null, null}); + data.add(new Object[]{"4. [kmip] Call client_encryption.createDataKey() and the following masterKey", + "kmip", + BsonDocument.parse("{\n" + + " \"keyId\": \"1\",\n" + + " \"endpoint\": \"localhost:12345\"" + + "}"), + false, MongoClientException.class, ConnectException.class, "Connection refused"}); + data.add(new Object[]{"5. [aws] invalid endpoint host", + "aws", + BsonDocument.parse("{\n" + + " region: \"us-east-1\",\n" + + " key: \"arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0\",\n" + + " endpoint: \"kms.us-east-2.amazonaws.com\"\n" + + "}"), + false, MongoClientException.class, MongoCryptException.class, null}); + data.add(new Object[]{"6. [aws] invalid endpoint host", + "aws", + BsonDocument.parse("{\n" + + " region: \"us-east-1\",\n" + + " key: \"arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0\",\n" + + " endpoint: \"doesnotexist.invalid\"\n" + + "}"), + false, MongoClientException.class, UnknownHostException.class, null}); + + data.add(new Object[]{"7. [azure] valid and invalid kms providers test", + "azure", + BsonDocument.parse("{\n" + + " \"keyVaultEndpoint\": \"key-vault-csfle.vault.azure.net\",\n" + + " \"keyName\": \"key-name-csfle\"\n" + + "}"), + true, MongoClientException.class, UnknownHostException.class, null}); + + data.add(new Object[]{"8. [gcp] valid and invalid kms providers test", + "gcp", + BsonDocument.parse("{\n" + + " \"projectId\": \"devprod-drivers\",\n" + + " \"location\": \"global\",\n" + + " \"keyRing\": \"key-ring-csfle\",\n" + + " \"keyName\": \"key-name-csfle\",\n" + + " \"endpoint\": \"cloudkms.googleapis.com:443\"\n" + + "}"), + true, MongoClientException.class, UnknownHostException.class, null}); + + data.add(new Object[]{"9. [gcp] invalid endpoint", + "gcp", + BsonDocument.parse("{\n" + + " \"projectId\": \"csfle-poc\",\n" + + " \"location\": \"global\",\n" + + " \"keyRing\": \"test\",\n" + + " \"keyName\": \"quickstart\",\n" + + " \"endpoint\": \"doesnotexist.invalid:443\"\n" + + "}"), + false, MongoClientException.class, MongoCryptException.class, "Invalid KMS response"}); + data.add(new Object[]{"10. [kmip] endpoint from KMS providers map", + "kmip", + BsonDocument.parse("{\n" + + " \"keyId\": \"1\"\n" + + "}"), + false, null, null, null}); + data.add(new Object[]{"11. [kmip] endpoint from DataKeyOptions", + "kmip", + BsonDocument.parse("{\n" + + " \"keyId\": \"1\",\n" + + " \"endpoint\": \"localhost:5698\"\n" + + "}"), + false, null, null, null}); + data.add(new Object[]{"12. [kmip] invalid endpoint from DataKeyOptions", + "kmip", + BsonDocument.parse("{\n" + + " \"keyId\": \"1\",\n" + + " \"endpoint\": \"doesnotexist.invalid:5698\"\n" + + "}"), + false, MongoClientException.class, UnknownHostException.class, null}); + return data; + } + + private void testEndpoint(final ClientEncryption clientEncryption, + @Nullable final Class exceptionClass, + @Nullable final Class wrappedExceptionClass, + @Nullable final String messageContainedInException) { + try { + BsonBinary dataKeyId = clientEncryption.createDataKey(provider, new DataKeyOptions().masterKey(masterKey)); + assertNull("Expected exception, but encryption succeeded", exceptionClass); + clientEncryption.encrypt(new BsonString("test"), new EncryptOptions("AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic") + .keyId(dataKeyId)); + + } catch (Exception e) { + if (exceptionClass == null) { + throw e; + } + assertEquals(exceptionClass, e.getClass()); + assertEquals(wrappedExceptionClass, e.getCause().getClass()); + if (messageContainedInException != null) { + assertTrue("Actual Error: " + e.getCause().getMessage(), e.getCause().getMessage().contains(messageContainedInException)); + } + } + } +} diff --git a/driver-sync/src/test/functional/com/mongodb/client/AbstractClientEncryptionRewrapManyDataKeyProseTest.java b/driver-sync/src/test/functional/com/mongodb/client/AbstractClientEncryptionRewrapManyDataKeyProseTest.java new file mode 100644 index 00000000000..9de4e2a6951 --- /dev/null +++ b/driver-sync/src/test/functional/com/mongodb/client/AbstractClientEncryptionRewrapManyDataKeyProseTest.java @@ -0,0 +1,182 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client; + +import com.mongodb.ClientEncryptionSettings; +import com.mongodb.MongoClientException; +import com.mongodb.MongoClientSettings; +import com.mongodb.client.model.vault.DataKeyOptions; +import com.mongodb.client.model.vault.EncryptOptions; +import com.mongodb.client.model.vault.RewrapManyDataKeyOptions; +import com.mongodb.client.model.vault.RewrapManyDataKeyResult; +import com.mongodb.client.vault.ClientEncryption; +import org.bson.BsonBinary; +import org.bson.BsonDocument; +import org.bson.BsonString; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.Assumptions; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.function.Executable; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.Arguments; +import org.junit.jupiter.params.provider.MethodSource; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Set; + +import static com.mongodb.ClusterFixture.getEnv; +import static com.mongodb.ClusterFixture.hasEncryptionTestsEnabled; +import static com.mongodb.client.Fixture.getMongoClient; +import static com.mongodb.client.Fixture.getMongoClientSettingsBuilder; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; + +/** + * See + * 16. Rewrap. + */ +public abstract class AbstractClientEncryptionRewrapManyDataKeyProseTest { + + private static final Map MASTER_KEYS_BY_PROVIDER = new HashMap<>(); + static { + MASTER_KEYS_BY_PROVIDER.put("aws", BsonDocument.parse("{\n" + + " \"region\": \"us-east-1\",\n" + + " \"key\": \"arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0\"\n" + + "}")); + MASTER_KEYS_BY_PROVIDER.put("azure", BsonDocument.parse("{\n" + + " \"keyVaultEndpoint\": \"key-vault-csfle.vault.azure.net\",\n" + + " \"keyName\": \"key-name-csfle\"\n" + + "}")); + MASTER_KEYS_BY_PROVIDER.put("gcp", BsonDocument.parse("{\n" + + " \"projectId\": \"devprod-drivers\",\n" + + " \"location\": \"global\",\n" + + " \"keyRing\": \"key-ring-csfle\",\n" + + " \"keyName\": \"key-name-csfle\"\n" + + "}")); + MASTER_KEYS_BY_PROVIDER.put("kmip", BsonDocument.parse("{}")); + MASTER_KEYS_BY_PROVIDER.put("local", null); + } + + private static final Map> KMS_PROVIDERS = new HashMap>() {{ + put("aws", new HashMap() {{ + put("accessKeyId", getEnv("AWS_ACCESS_KEY_ID")); + put("secretAccessKey", getEnv("AWS_SECRET_ACCESS_KEY")); + }}); + put("azure", new HashMap() {{ + put("tenantId", getEnv("AZURE_TENANT_ID")); + put("clientId", getEnv("AZURE_CLIENT_ID")); + put("clientSecret", getEnv("AZURE_CLIENT_SECRET")); + put("identityPlatformEndpoint", "login.microsoftonline.com:443"); + }}); + put("gcp", new HashMap() {{ + put("email", getEnv("GCP_EMAIL")); + put("privateKey", getEnv("GCP_PRIVATE_KEY")); + put("endpoint", "oauth2.googleapis.com:443"); + }}); + put("kmip", new HashMap() {{ + put("endpoint", "localhost:5698"); + }}); + put("local", new HashMap() {{ + put("key", "Mng0NCt4ZHVUYUJCa1kxNkVyNUR1QURhZ2h2UzR2d2RrZzh0cFBwM3R6NmdWMDFBM" + + "UN3YkQ5aXRRMkhGRGdQV09wOGVNYUMxT2k3NjZKelhaQmRCZGJkTXVyZG9uSjFk"); + }}); + }}; + + protected abstract MongoClient createMongoClient(MongoClientSettings settings); + public abstract ClientEncryption getClientEncryption(ClientEncryptionSettings settings); + + public static Collection data() { + List data = new ArrayList<>(); + Set types = MASTER_KEYS_BY_PROVIDER.keySet(); + for (String srcProvider : types) { + for (String dstProvider : types) { + data.add(Arguments.of(srcProvider, dstProvider)); + } + } + return data; + } + + protected AbstractClientEncryptionRewrapManyDataKeyProseTest() { + Assumptions.assumeTrue(hasEncryptionTestsEnabled(), "Custom Endpoint tests disables"); + } + + @AfterEach + void cleanUp(){ + getMongoClient().getDatabase("keyvault").getCollection("datakeys").drop(); + } + + @ParameterizedTest + @MethodSource("data") + public void rewrapWithSeparateClientEncryption(final String srcProvider, final String dstProvider) { + BsonDocument srcKey = MASTER_KEYS_BY_PROVIDER.get(srcProvider); + BsonDocument dstKey = MASTER_KEYS_BY_PROVIDER.get(dstProvider); + BsonString testString = new BsonString("test"); + + getMongoClient().getDatabase("keyvault").getCollection("datakeys").drop(); + + ClientEncryption clientEncryption1 = getClientEncryption(ClientEncryptionSettings.builder() + .keyVaultMongoClientSettings(getMongoClientSettingsBuilder().build()) + .keyVaultNamespace("keyvault.datakeys") + .kmsProviders(KMS_PROVIDERS) + .build()); + + BsonBinary keyId = clientEncryption1.createDataKey(srcProvider, new DataKeyOptions().masterKey(srcKey)); + + BsonBinary ciphertext = clientEncryption1.encrypt( + testString, + new EncryptOptions("AEAD_AES_256_CBC_HMAC_SHA_512-Deterministic").keyId(keyId)); + + ClientEncryption clientEncryption2 = getClientEncryption(ClientEncryptionSettings.builder() + .keyVaultMongoClientSettings(getMongoClientSettingsBuilder().build()) + .keyVaultNamespace("keyvault.datakeys") + .kmsProviders(KMS_PROVIDERS) + .build()); + + RewrapManyDataKeyResult result = clientEncryption2.rewrapManyDataKey( + new BsonDocument(), + new RewrapManyDataKeyOptions().provider(dstProvider).masterKey(dstKey)); + assertEquals(1, result.getBulkWriteResult().getModifiedCount()); + + assertEquals(testString, clientEncryption1.decrypt(ciphertext)); + assertEquals(testString, clientEncryption2.decrypt(ciphertext)); + } + + @Test + public void shouldThrowClientErrorWhenProviderIsNotSpecified() { + //given + ClientEncryption clientEncryption = getClientEncryption(ClientEncryptionSettings.builder() + .keyVaultMongoClientSettings(getMongoClientSettingsBuilder().build()) + .keyVaultNamespace("keyvault.datakeys") + .kmsProviders(KMS_PROVIDERS) + .build()); + + RewrapManyDataKeyOptions rewrapManyDataKeyOptions = new RewrapManyDataKeyOptions().masterKey(BsonDocument.parse("{}")); + + //when + Executable executable = () -> clientEncryption.rewrapManyDataKey(new BsonDocument(), rewrapManyDataKeyOptions); + + //then + MongoClientException mongoClientException = assertThrows(MongoClientException.class, executable); + + assertEquals("Missing the provider but supplied a master key in the RewrapManyDataKeyOptions", + mongoClientException.getMessage()); + } +} diff --git a/driver-sync/src/test/functional/com/mongodb/client/AbstractClientEncryptionTextExplicitEncryptionTest.java b/driver-sync/src/test/functional/com/mongodb/client/AbstractClientEncryptionTextExplicitEncryptionTest.java new file mode 100644 index 00000000000..68bcd764697 --- /dev/null +++ b/driver-sync/src/test/functional/com/mongodb/client/AbstractClientEncryptionTextExplicitEncryptionTest.java @@ -0,0 +1,324 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client; + +import com.mongodb.AutoEncryptionSettings; +import com.mongodb.ClientEncryptionSettings; +import com.mongodb.MongoClientSettings; +import com.mongodb.MongoException; +import com.mongodb.MongoNamespace; +import com.mongodb.WriteConcern; +import com.mongodb.client.model.CreateCollectionOptions; +import com.mongodb.client.model.DropCollectionOptions; +import com.mongodb.client.model.vault.EncryptOptions; +import com.mongodb.client.model.vault.TextOptions; +import com.mongodb.client.vault.ClientEncryption; +import com.mongodb.connection.ServerVersion; +import com.mongodb.fixture.EncryptionFixture; +import org.bson.BsonBinary; +import org.bson.BsonDocument; +import org.bson.BsonString; +import org.bson.Document; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.DisplayName; +import org.junit.jupiter.api.Test; + +import java.util.Map; + +import static com.mongodb.ClusterFixture.getDefaultDatabaseName; +import static com.mongodb.ClusterFixture.getMongoCryptVersion; +import static com.mongodb.ClusterFixture.hasEncryptionTestsEnabled; +import static com.mongodb.ClusterFixture.isStandalone; +import static com.mongodb.ClusterFixture.serverVersionAtLeast; +import static com.mongodb.client.Fixture.getDefaultDatabase; +import static com.mongodb.client.Fixture.getMongoClient; +import static com.mongodb.client.Fixture.getMongoClientSettings; +import static com.mongodb.client.Fixture.getMongoClientSettingsBuilder; +import static com.mongodb.fixture.EncryptionFixture.getKmsProviders; +import static java.util.Arrays.asList; +import static org.junit.Assume.assumeTrue; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assumptions.assumeFalse; +import static util.JsonPoweredTestHelper.getTestDocument; + +public abstract class AbstractClientEncryptionTextExplicitEncryptionTest { + + private static final ServerVersion REQUIRED_LIB_MONGOCRYPT_VERSION = new ServerVersion(asList(1, 15, 1)); + private MongoClient encryptedClient; + private MongoDatabase encryptedDatabase; + private ClientEncryption clientEncryption; + private BsonBinary key1Id; + + protected abstract MongoClient createMongoClient(MongoClientSettings settings); + protected abstract ClientEncryption createClientEncryption(ClientEncryptionSettings settings); + + + @BeforeEach + public void setUp() { + assumeTrue("Text explicit encryption tests disabled", hasEncryptionTestsEnabled()); + assumeTrue("Requires newer MongoCrypt version", getMongoCryptVersion().compareTo(REQUIRED_LIB_MONGOCRYPT_VERSION) >= 0); + assumeTrue(serverVersionAtLeast(8, 2)); + assumeFalse(isStandalone()); + + MongoNamespace dataKeysNamespace = new MongoNamespace("keyvault.datakeys"); + BsonDocument encryptedFieldsPrefixSuffix = bsonDocumentFromPath("encryptedFields-prefix-suffix.json"); + BsonDocument encryptedFieldsSubstring = bsonDocumentFromPath("encryptedFields-substring.json"); + BsonDocument key1Document = bsonDocumentFromPath("keys/key1-document.json"); + + MongoDatabase database = getDefaultDatabase().withWriteConcern(WriteConcern.MAJORITY); + database.getCollection("prefix-suffix") + .drop(new DropCollectionOptions().encryptedFields(encryptedFieldsPrefixSuffix)); + database.createCollection("prefix-suffix", + new CreateCollectionOptions().encryptedFields(encryptedFieldsPrefixSuffix)); + + database.getCollection("substring") + .drop(new DropCollectionOptions().encryptedFields(encryptedFieldsSubstring)); + database.createCollection("substring", + new CreateCollectionOptions().encryptedFields(encryptedFieldsSubstring)); + + MongoCollection dataKeysCollection = getMongoClient() + .getDatabase(dataKeysNamespace.getDatabaseName()) + .getCollection(dataKeysNamespace.getCollectionName(), BsonDocument.class) + .withWriteConcern(WriteConcern.MAJORITY); + + dataKeysCollection.drop(); + dataKeysCollection.insertOne(key1Document); + key1Id = key1Document.getBinary("_id"); + + Map> kmsProviders = getKmsProviders(EncryptionFixture.KmsProviderType.LOCAL); + + clientEncryption = createClientEncryption(ClientEncryptionSettings.builder() + .keyVaultMongoClientSettings(getMongoClientSettings()) + .keyVaultNamespace(dataKeysNamespace.getFullName()) + .kmsProviders(kmsProviders) + .build()); + + encryptedClient = createMongoClient(getMongoClientSettingsBuilder() + .autoEncryptionSettings( + AutoEncryptionSettings.builder() + .keyVaultNamespace(dataKeysNamespace.getFullName()) + .kmsProviders(kmsProviders) + .bypassQueryAnalysis(true) + .build()) + .build()); + + encryptedDatabase = encryptedClient.getDatabase(getDefaultDatabaseName()).withWriteConcern(WriteConcern.MAJORITY); + + EncryptOptions prefixSuffixEncryptOptions = new EncryptOptions("TextPreview") + .keyId(key1Id) + .contentionFactor(0L) + .textOptions(new TextOptions() + .caseSensitive(true) + .diacriticSensitive(true) + .prefixOptions(BsonDocument.parse("{strMaxQueryLength: 10, strMinQueryLength: 2}")) + .suffixOptions(BsonDocument.parse("{strMaxQueryLength: 10, strMinQueryLength: 2}")) + ); + + BsonBinary foobarbaz = clientEncryption.encrypt(new BsonString("foobarbaz"), prefixSuffixEncryptOptions); + + encryptedDatabase + .getCollection("prefix-suffix") + .insertOne(new Document("_id", 0).append("encryptedText", foobarbaz)); + + EncryptOptions substringEncryptOptions = new EncryptOptions("TextPreview") + .keyId(key1Id) + .contentionFactor(0L) + .textOptions(new TextOptions() + .caseSensitive(true) + .diacriticSensitive(true) + .substringOptions(BsonDocument.parse("{strMaxLength: 10, strMaxQueryLength: 10, strMinQueryLength: 2}")) + ); + foobarbaz = clientEncryption.encrypt(new BsonString("foobarbaz"), substringEncryptOptions); + + encryptedDatabase + .getCollection("substring") + .insertOne(new Document("_id", 0).append("encryptedText", foobarbaz)); + } + + @Test + @DisplayName("Case 1: can find a document by prefix") + public void test1CanFindADocumentByPrefix() { + EncryptOptions encryptOptions = new EncryptOptions("TextPreview") + .keyId(key1Id) + .contentionFactor(0L) + .queryType("prefixPreview") + .textOptions(new TextOptions() + .caseSensitive(true) + .diacriticSensitive(true) + .prefixOptions(BsonDocument.parse("{strMaxQueryLength: 10, strMinQueryLength: 2}")) + ); + + BsonBinary encrypted = clientEncryption.encrypt(new BsonString("foo"), encryptOptions); + Document result = encryptedDatabase.getCollection("prefix-suffix") + .find(new Document("$expr", + new Document("$encStrStartsWith", + new Document("input", "$encryptedText").append("prefix", encrypted)))).first(); + + assertDocumentEquals(Document.parse("{ \"_id\": 0, \"encryptedText\": \"foobarbaz\" }"), result); + } + + @Test + @DisplayName("Case 2: can find a document by suffix") + public void test2CanFindADocumentBySuffix() { + EncryptOptions encryptOptions = new EncryptOptions("TextPreview") + .keyId(key1Id) + .contentionFactor(0L) + .queryType("suffixPreview") + .textOptions(new TextOptions() + .caseSensitive(true) + .diacriticSensitive(true) + .suffixOptions(BsonDocument.parse("{strMaxQueryLength: 10, strMinQueryLength: 2}")) + ); + + BsonBinary encrypted = clientEncryption.encrypt(new BsonString("baz"), encryptOptions); + Document result = encryptedDatabase.getCollection("prefix-suffix") + .find(new Document("$expr", + new Document("$encStrEndsWith", + new Document("input", "$encryptedText").append("suffix", encrypted)))).first(); + + assertDocumentEquals(Document.parse("{ \"_id\": 0, \"encryptedText\": \"foobarbaz\" }"), result); + } + + @Test + @DisplayName("Case 3: assert no document found by prefix") + public void test3AssertNoDocumentFoundByPrefix() { + EncryptOptions encryptOptions = new EncryptOptions("TextPreview") + .keyId(key1Id) + .contentionFactor(0L) + .queryType("prefixPreview") + .textOptions(new TextOptions() + .caseSensitive(true) + .diacriticSensitive(true) + .prefixOptions(BsonDocument.parse("{strMaxQueryLength: 10, strMinQueryLength: 2}")) + ); + + BsonBinary encrypted = clientEncryption.encrypt(new BsonString("baz"), encryptOptions); + Document result = encryptedDatabase.getCollection("prefix-suffix") + .find(new Document("$expr", + new Document("$encStrStartsWith", + new Document("input", "$encryptedText").append("prefix", encrypted)))).first(); + + assertNull(result); + } + + @Test + @DisplayName("Case 4: assert no document found by suffix") + public void test4AssertNoDocumentFoundByPrefix() { + EncryptOptions encryptOptions = new EncryptOptions("TextPreview") + .keyId(key1Id) + .contentionFactor(0L) + .queryType("suffixPreview") + .textOptions(new TextOptions() + .caseSensitive(true) + .diacriticSensitive(true) + .suffixOptions(BsonDocument.parse("{strMaxQueryLength: 10, strMinQueryLength: 2}")) + ); + + BsonBinary encrypted = clientEncryption.encrypt(new BsonString("foo"), encryptOptions); + Document result = encryptedDatabase.getCollection("prefix-suffix") + .find(new Document("$expr", + new Document("$encStrEndsWith", + new Document("input", "$encryptedText").append("suffix", encrypted)))).first(); + + assertNull(result); + } + + @Test + @DisplayName("Case 5: can find a document by substring") + public void test5CanFindADocumentBySubstring() { + EncryptOptions encryptOptions = new EncryptOptions("TextPreview") + .keyId(key1Id) + .contentionFactor(0L) + .queryType("substringPreview") + .textOptions(new TextOptions() + .caseSensitive(true) + .diacriticSensitive(true) + .substringOptions(BsonDocument.parse("{strMaxLength: 10, strMaxQueryLength: 10, strMinQueryLength: 2}")) + ); + + BsonBinary encrypted = clientEncryption.encrypt(new BsonString("bar"), encryptOptions); + Document result = encryptedDatabase.getCollection("substring") + .find(new Document("$expr", + new Document("$encStrContains", + new Document("input", "$encryptedText").append("substring", encrypted)))).first(); + + assertDocumentEquals(Document.parse("{ \"_id\": 0, \"encryptedText\": \"foobarbaz\" }"), result); + } + + @Test + @DisplayName("Case 6: assert no document found by substring") + public void test6AssertNoDocumentFoundBySubstring() { + EncryptOptions encryptOptions = new EncryptOptions("TextPreview") + .keyId(key1Id) + .contentionFactor(0L) + .queryType("substringPreview") + .textOptions(new TextOptions() + .caseSensitive(true) + .diacriticSensitive(true) + .substringOptions(BsonDocument.parse("{strMaxLength: 10, strMaxQueryLength: 10, strMinQueryLength: 2}")) + ); + + BsonBinary encrypted = clientEncryption.encrypt(new BsonString("qux"), encryptOptions); + Document result = encryptedDatabase.getCollection("substring") + .find(new Document("$expr", + new Document("$encStrContains", + new Document("input", "$encryptedText").append("substring", encrypted)))).first(); + + assertNull(result); + } + + @Test + @DisplayName("Case 7: assert `contentionFactor` is required") + public void test7AssertContentionFactorIsRequired() { + EncryptOptions encryptOptions = new EncryptOptions("TextPreview") + .keyId(key1Id) + .queryType("prefixPreview") + .textOptions(new TextOptions() + .caseSensitive(true) + .diacriticSensitive(true) + .prefixOptions(BsonDocument.parse("{strMaxQueryLength: 10, strMinQueryLength: 2}")) + ); + MongoException exception = assertThrows(MongoException.class, () -> clientEncryption.encrypt(new BsonString("foo"), encryptOptions)); + assertTrue(exception.getMessage().contains("contention factor is required for textPreview algorithm")); + } + + + @AfterEach + @SuppressWarnings("try") + public void cleanUp() { + //noinspection EmptyTryBlock + getDefaultDatabase().withWriteConcern(WriteConcern.MAJORITY).drop(); + try (ClientEncryption ignored = this.clientEncryption; + MongoClient ignored1 = this.encryptedClient + ) { + // just using try-with-resources to ensure they all get closed, even in the case of exceptions + } + } + + private static void assertDocumentEquals(final Document expectedDocument, final Document actualDocument) { + actualDocument.remove("__safeContent__"); + assertEquals(expectedDocument, actualDocument); + } + + private static BsonDocument bsonDocumentFromPath(final String path) { + return getTestDocument("client-side-encryption/etc/data/" + path); + } +} diff --git a/driver-sync/src/test/functional/com/mongodb/client/AbstractClientMetadataProseTest.java b/driver-sync/src/test/functional/com/mongodb/client/AbstractClientMetadataProseTest.java new file mode 100644 index 00000000000..b958afcf145 --- /dev/null +++ b/driver-sync/src/test/functional/com/mongodb/client/AbstractClientMetadataProseTest.java @@ -0,0 +1,421 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client; + +import com.mongodb.MongoClientSettings; +import com.mongodb.MongoDriverInformation; +import com.mongodb.event.CommandStartedEvent; +import com.mongodb.internal.client.DriverInformation; +import com.mongodb.internal.connection.InternalStreamConnection; +import com.mongodb.internal.connection.TestCommandListener; +import com.mongodb.internal.connection.TestConnectionPoolListener; +import com.mongodb.lang.Nullable; +import org.bson.BsonDocument; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.DisplayName; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.Arguments; +import org.junit.jupiter.params.provider.MethodSource; + +import java.util.List; +import java.util.Optional; +import java.util.concurrent.TimeUnit; +import java.util.stream.Stream; + +import static com.mongodb.ClusterFixture.isAuthenticated; +import static com.mongodb.ClusterFixture.isLoadBalanced; +import static com.mongodb.ClusterFixture.sleep; +import static com.mongodb.assertions.Assertions.assertTrue; +import static java.util.Optional.ofNullable; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assumptions.assumeFalse; + +/** + * See Prose tests + */ +public abstract class AbstractClientMetadataProseTest { + + private TestCommandListener commandListener; + private TestConnectionPoolListener connectionPoolListener; + + protected abstract MongoClient createMongoClient(@Nullable MongoDriverInformation driverInformation, + MongoClientSettings mongoClientSettings); + + @BeforeEach + public void setUp() { + assumeFalse(isLoadBalanced()); + assumeFalse(isAuthenticated()); + + commandListener = new TestCommandListener(); + connectionPoolListener = new TestConnectionPoolListener(); + InternalStreamConnection.setRecordEverything(true); + } + + @AfterEach + public void tearDown() { + InternalStreamConnection.setRecordEverything(false); + } + + @DisplayName("Test 1: Test that the driver updates metadata") + @ParameterizedTest(name = "{index} => {arguments}") + @MethodSource("provideDriverInformation") + void testThatTheDriverUpdatesMetadata(final DriverInformation driverInformation) { + //given + try (MongoClient mongoClient = createMongoClient(getInitialMongoDriverInformation(), getMongoClientSettings())) { + sleep(5); // wait for connection to become idle + BsonDocument initialClientMetadata = executePingAndCaptureMetadataHandshake(mongoClient) + .orElseThrow(AbstractClientMetadataProseTest::failOnEmptyMetadata); + + BsonDocument generatedDriverInformation = initialClientMetadata.getDocument("driver"); + String generatedDriverName = generatedDriverInformation.get("name").asString().getValue(); + String generatedVersionName = generatedDriverInformation.get("version").asString().getValue(); + String generatedPlatformName = initialClientMetadata.get("platform").asString().getValue(); + + //when + sleep(5); // wait for connection to become idle + mongoClient.appendMetadata(getMongoDriverInformation(driverInformation)); + + //then + BsonDocument updatedClientMetadata = executePingAndCaptureMetadataHandshake(mongoClient) + .orElseThrow(AbstractClientMetadataProseTest::failOnEmptyMetadata); + BsonDocument updatedDriverInformation = updatedClientMetadata.getDocument("driver"); + + String driverName = driverInformation.getDriverName(); + String driverVersion = driverInformation.getDriverVersion(); + String driverPlatform = driverInformation.getDriverPlatform(); + String expectedDriverName = driverName == null ? generatedDriverName : generatedDriverName + "|" + driverName; + String expectedDriverVersion = driverVersion == null ? generatedVersionName : generatedVersionName + "|" + driverVersion; + String expectedDriverPlatform = driverPlatform == null ? generatedPlatformName : generatedPlatformName + "|" + driverPlatform; + + assertEquals(expectedDriverName, updatedDriverInformation.getString("name").getValue()); + assertTrue(updatedDriverInformation.getString("version").getValue().endsWith(expectedDriverVersion)); + assertTrue(updatedClientMetadata.getString("platform").getValue().endsWith(expectedDriverPlatform)); + + assertEquals( + withRemovedKeys(updatedClientMetadata, "driver", "platform"), + withRemovedKeys(initialClientMetadata, "driver", "platform")); + } + } + + @DisplayName("Test 2: Multiple Successive Metadata Updates") + @ParameterizedTest(name = "{index} => {arguments}") + @MethodSource("provideDriverInformation") + void testMultipleSuccessiveMetadataUpdates(final DriverInformation driverInformation) { + //given + try (MongoClient mongoClient = createMongoClient(null, getMongoClientSettings())) { + + mongoClient.appendMetadata(getInitialMongoDriverInformation()); + + BsonDocument initialClientMetadata = executePingAndCaptureMetadataHandshake(mongoClient) + .orElseThrow(AbstractClientMetadataProseTest::failOnEmptyMetadata); + + BsonDocument generatedDriverInformation = initialClientMetadata.getDocument("driver"); + String generatedDriverName = generatedDriverInformation.get("name").asString().getValue(); + String generatedVersionName = generatedDriverInformation.get("version").asString().getValue(); + String generatedPlatformName = initialClientMetadata.get("platform").asString().getValue(); + + //when + sleep(5); // wait for connection to become idle + mongoClient.appendMetadata(getMongoDriverInformation(driverInformation)); + + //then + BsonDocument updatedClientMetadata = executePingAndCaptureMetadataHandshake(mongoClient) + .orElseThrow(AbstractClientMetadataProseTest::failOnEmptyMetadata); + BsonDocument updatedDriverInformation = updatedClientMetadata.getDocument("driver"); + + String driverName = driverInformation.getDriverName(); + String driverVersion = driverInformation.getDriverVersion(); + String driverPlatform = driverInformation.getDriverPlatform(); + String expectedDriverName = driverName == null ? generatedDriverName : generatedDriverName + "|" + driverName; + String expectedDriverVersion = driverVersion == null ? generatedVersionName : generatedVersionName + "|" + driverVersion; + String expectedDriverPlatform = driverPlatform == null ? generatedPlatformName : generatedPlatformName + "|" + driverPlatform; + + assertEquals(expectedDriverName, updatedDriverInformation.getString("name").getValue()); + assertTrue(updatedDriverInformation.getString("version").getValue().endsWith(expectedDriverVersion)); + assertTrue(updatedClientMetadata.getString("platform").getValue().endsWith(expectedDriverPlatform)); + + assertEquals( + withRemovedKeys(updatedClientMetadata, "driver", "platform"), + withRemovedKeys(initialClientMetadata, "driver", "platform")); + } + } + + @DisplayName("Test 3: Multiple Successive Metadata Updates with Duplicate Data") + @ParameterizedTest(name = "{index} => {arguments}") + @MethodSource("provideDriverAndFrameworkInformation") + void testMultipleSuccessiveMetadataUpdatesWithDuplicateData(final DriverInformation driverInformation) { + //given + try (MongoClient mongoClient = createMongoClient(null, getMongoClientSettings())) { + mongoClient.appendMetadata(getInitialMongoDriverInformation()); + + BsonDocument initialClientMetadata = executePingAndCaptureMetadataHandshake(mongoClient) + .orElseThrow(AbstractClientMetadataProseTest::failOnEmptyMetadata); + + BsonDocument generatedDriverInformation = initialClientMetadata.getDocument("driver"); + String generatedDriverName = generatedDriverInformation.get("name").asString().getValue(); + String generatedVersionName = generatedDriverInformation.get("version").asString().getValue(); + String generatedPlatformName = initialClientMetadata.get("platform").asString().getValue(); + + //when + sleep(5); // wait for connection to become idle + mongoClient.appendMetadata(getMongoDriverInformation(driverInformation)); + + //then + BsonDocument updatedClientMetadata = executePingAndCaptureMetadataHandshake(mongoClient) + .orElseThrow(AbstractClientMetadataProseTest::failOnEmptyMetadata); + BsonDocument updatedDriverInformation = updatedClientMetadata.getDocument("driver"); + + String expectedDriverName = generatedDriverName; + String expectedDriverVersion = generatedVersionName; + String expectedDriverPlatform = generatedPlatformName; + + if (!(driverInformation.equals(INITIAL_DRIVER_INFORMATION))) { + expectedDriverName = generatedDriverName + "|" + driverInformation.getDriverName(); + expectedDriverVersion = generatedVersionName + "|" + driverInformation.getDriverVersion(); + expectedDriverPlatform = generatedPlatformName + "|" + driverInformation.getDriverPlatform(); + } + + assertEquals(expectedDriverName, updatedDriverInformation.getString("name").getValue()); + assertTrue(updatedDriverInformation.getString("version").getValue().endsWith(expectedDriverVersion)); + assertTrue(updatedClientMetadata.getString("platform").getValue().endsWith(expectedDriverPlatform)); + } + } + + @DisplayName("Test 4: Multiple Metadata Updates with Duplicate Data") + @Test + void testMultipleMetadataUpdatesWithDuplicateData() { + // given + try (MongoClient mongoClient = createMongoClient(null, getMongoClientSettings())) { + mongoClient.appendMetadata(getMongoDriverInformation(new DriverInformation("library", "1.2", "Library Platform"))); + + //when + sleep(5); // wait for connection to become idle + mongoClient.appendMetadata(getMongoDriverInformation(new DriverInformation("framework", "2.0", "Framework Platform"))); + BsonDocument clientMetaData = executePingAndCaptureMetadataHandshake(mongoClient) + .orElseThrow(AbstractClientMetadataProseTest::failOnEmptyMetadata); + + // then + sleep(5); // wait for connection to become idle + mongoClient.appendMetadata(getMongoDriverInformation(new DriverInformation("library", "1.2", "Library Platform"))); + + // then + BsonDocument updatedClientMetadata = executePingAndCaptureMetadataHandshake(mongoClient) + .orElseThrow(AbstractClientMetadataProseTest::failOnEmptyMetadata); + + assertEquals(clientMetaData, updatedClientMetadata); + } + } + + @DisplayName("Test 5: Metadata is not appended if identical to initial metadata") + @Test + void testMetadataIsNotAppendedIfIdenticalToInitialMetadata() { + // given + MongoDriverInformation initialWrappingLibraryDriverInformation = getInitialMongoDriverInformation(); + try (MongoClient mongoClient = createMongoClient(initialWrappingLibraryDriverInformation, getMongoClientSettings())) { + //when + BsonDocument clientMetaData = executePingAndCaptureMetadataHandshake(mongoClient) + .orElseThrow(AbstractClientMetadataProseTest::failOnEmptyMetadata); + + // then + sleep(5); // wait for connection to become idle + mongoClient.appendMetadata(getMongoDriverInformation(new DriverInformation("library", "1.2", "Library Platform"))); + + // then + BsonDocument updatedClientMetadata = executePingAndCaptureMetadataHandshake(mongoClient) + .orElseThrow(AbstractClientMetadataProseTest::failOnEmptyMetadata); + + assertEquals(clientMetaData, updatedClientMetadata); + } + } + + @DisplayName("Test 6: Metadata is not appended if identical to initial metadata (separated by non-identical metadata)") + @Test + void testMetadataIsNotAppendedIfIdenticalToInitialMetadataSeparatedByNonIdenticalMetadata() { + // given + MongoDriverInformation initialWrappingLibraryDriverInformation = getInitialMongoDriverInformation(); + try (MongoClient mongoClient = createMongoClient(initialWrappingLibraryDriverInformation, getMongoClientSettings())) { + //when + BsonDocument clientMetaData = executePingAndCaptureMetadataHandshake(mongoClient) + .orElseThrow(AbstractClientMetadataProseTest::failOnEmptyMetadata); + + // then + sleep(5); // wait for connection to become idle + mongoClient.appendMetadata(getMongoDriverInformation(new DriverInformation("library", "1.2", "Library Platform"))); + + // then + BsonDocument updatedClientMetadata = executePingAndCaptureMetadataHandshake(mongoClient) + .orElseThrow(AbstractClientMetadataProseTest::failOnEmptyMetadata); + + assertEquals(clientMetaData, updatedClientMetadata); + + // then + sleep(5); // wait for connection to become idle + mongoClient.appendMetadata(getMongoDriverInformation(new DriverInformation("framework", "1.2", "Library Platform"))); + + clientMetaData = executePingAndCaptureMetadataHandshake(mongoClient) + .orElseThrow(AbstractClientMetadataProseTest::failOnEmptyMetadata); + + // then + sleep(5); // wait for connection to become idle + mongoClient.appendMetadata(getMongoDriverInformation(new DriverInformation("library", "1.2", "Library Platform"))); + + updatedClientMetadata = executePingAndCaptureMetadataHandshake(mongoClient) + .orElseThrow(AbstractClientMetadataProseTest::failOnEmptyMetadata); + + assertEquals(clientMetaData, updatedClientMetadata); + } + } + + @DisplayName("Test 7: Empty strings are considered unset when appending duplicate metadata") + @ParameterizedTest(name = "{index} => {arguments}") + @MethodSource("provideDriverInformationWithNullsAndEmptyStrings") + void testEmptyStringsAreConsideredUnsetWhenAppendingDuplicateMetadata( + final DriverInformation initialDriverInformation, + final DriverInformation updatedDriverInformation) { + // given + try (MongoClient mongoClient = createMongoClient(null, getMongoClientSettings())) { + //when + mongoClient.appendMetadata(getMongoDriverInformation(initialDriverInformation)); + + BsonDocument clientMetaData = executePingAndCaptureMetadataHandshake(mongoClient) + .orElseThrow(AbstractClientMetadataProseTest::failOnEmptyMetadata); + + // then + sleep(5); // wait for connection to become idle + mongoClient.appendMetadata(getMongoDriverInformation(updatedDriverInformation)); + + // then + BsonDocument updatedClientMetadata = executePingAndCaptureMetadataHandshake(mongoClient) + .orElseThrow(AbstractClientMetadataProseTest::failOnEmptyMetadata); + + assertEquals(clientMetaData, updatedClientMetadata); + } + } + + @DisplayName("Test 8: Empty strings are considered unset when appending metadata identical to initial metadata") + @ParameterizedTest(name = "{index} => {arguments}") + @MethodSource("provideDriverInformationWithNullsAndEmptyStrings") + void testEmptyStringsAreConsideredUnsetWhenAppendingMetadataIdenticalToInitialMetadata( + final DriverInformation initialDriverInformation, + final DriverInformation updatedDriverInformation) { + // given + try (MongoClient mongoClient = createMongoClient(getMongoDriverInformation(initialDriverInformation), getMongoClientSettings())) { + //when + BsonDocument clientMetaData = executePingAndCaptureMetadataHandshake(mongoClient) + .orElseThrow(AbstractClientMetadataProseTest::failOnEmptyMetadata); + + // then + sleep(5); // wait for connection to become idle + mongoClient.appendMetadata(getMongoDriverInformation(updatedDriverInformation)); + + // then + BsonDocument updatedClientMetadata = executePingAndCaptureMetadataHandshake(mongoClient) + .orElseThrow(AbstractClientMetadataProseTest::failOnEmptyMetadata); + + assertEquals(clientMetaData, updatedClientMetadata); + } + } + + public static Stream provideDriverInformation() { + return Stream.of( + Arguments.of(new DriverInformation("framework", "2.0", "Framework Platform")), + Arguments.of(new DriverInformation("framework", "2.0", null)), + Arguments.of(new DriverInformation("framework", null, "Framework Platform")), + Arguments.of(new DriverInformation("framework", null, null)) + ); + } + + public static Stream provideDriverAndFrameworkInformation() { + return Stream.of( + Arguments.of(new DriverInformation("library", "1.2", "Library Platform")), + Arguments.of(new DriverInformation("framework", "1.2", "Library Platform")), + Arguments.of(new DriverInformation("library", "2.0", "Library Platform")), + Arguments.of(new DriverInformation("library", "1.2", "Framework Platform")), + Arguments.of(new DriverInformation("framework", "2.0", "Library Platform")), + Arguments.of(new DriverInformation("framework", "1.2", "Framework Platform")), + Arguments.of(new DriverInformation("library", "2.0", "Framework Platform")) + ); + } + + public static Stream provideDriverInformationWithNullsAndEmptyStrings() { + return Stream.of( + Arguments.of(new DriverInformation(null, "1.2", "Library Platform"), new DriverInformation("", "1.2", "Library Platform")), + Arguments.of(new DriverInformation("library", null, "Library Platform"), new DriverInformation("library", "", "Library Platform")), + Arguments.of(new DriverInformation("library", "1.2", null), new DriverInformation("library", "1.2", "")) + ); + } + + + private MongoClientSettings getMongoClientSettings() { + return getMongoClientSettingsBuilder() + .applyToConnectionPoolSettings(builder -> + builder.maxConnectionIdleTime(1, TimeUnit.MILLISECONDS)) + .build(); + } + + private Optional executePingAndCaptureMetadataHandshake(final MongoClient mongoClient) { + commandListener.reset(); + mongoClient.getDatabase("admin") + .runCommand(BsonDocument.parse("{ping: 1}")); + + List commandStartedEvents = commandListener.getCommandStartedEvents("isMaster"); + + if (commandStartedEvents.isEmpty()) { + return Optional.empty(); + } + CommandStartedEvent event = commandStartedEvents.get(0); + BsonDocument helloCommand = event.getCommand(); + return Optional.of(helloCommand.getDocument("client")); + } + + protected MongoClientSettings.Builder getMongoClientSettingsBuilder() { + return Fixture.getMongoClientSettingsBuilder() + .addCommandListener(commandListener) + .applyToConnectionPoolSettings(builder -> + builder.addConnectionPoolListener(connectionPoolListener)); + } + + private static BsonDocument withRemovedKeys(final BsonDocument updatedClientMetadata, + final String... keysToFilter) { + BsonDocument clone = updatedClientMetadata.clone(); + for (String keyToRemove : keysToFilter) { + clone.remove(keyToRemove); + } + return clone; + } + + private static final DriverInformation INITIAL_DRIVER_INFORMATION = new DriverInformation("library", "1.2", "Library Platform"); + + private static MongoDriverInformation getInitialMongoDriverInformation() { + return getMongoDriverInformation(INITIAL_DRIVER_INFORMATION); + } + + private static MongoDriverInformation getMongoDriverInformation(final DriverInformation driverInformation) { + MongoDriverInformation.Builder builder = MongoDriverInformation.builder(); + ofNullable(driverInformation.getDriverName()).ifPresent(builder::driverName); + ofNullable(driverInformation.getDriverVersion()).ifPresent(builder::driverVersion); + ofNullable(driverInformation.getDriverPlatform()).ifPresent(builder::driverPlatform); + return builder.build(); + } + + private static AssertionError failOnEmptyMetadata() { + return Assertions.fail("Client metadata was expected to be present after ping command"); + } +} diff --git a/driver-sync/src/test/functional/com/mongodb/client/AbstractClientSideEncryptionAutoDataKeysTest.java b/driver-sync/src/test/functional/com/mongodb/client/AbstractClientSideEncryptionAutoDataKeysTest.java new file mode 100644 index 00000000000..8343ffcf107 --- /dev/null +++ b/driver-sync/src/test/functional/com/mongodb/client/AbstractClientSideEncryptionAutoDataKeysTest.java @@ -0,0 +1,247 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * + */ + +package com.mongodb.client; + +import com.mongodb.ClientEncryptionSettings; +import com.mongodb.MongoClientSettings; +import com.mongodb.MongoCommandException; +import com.mongodb.MongoConfigurationException; +import com.mongodb.MongoNamespace; +import com.mongodb.MongoWriteException; +import com.mongodb.client.model.CreateCollectionOptions; +import com.mongodb.client.model.CreateEncryptedCollectionParams; +import com.mongodb.client.model.vault.EncryptOptions; +import com.mongodb.client.vault.ClientEncryption; +import org.bson.BsonBinary; +import org.bson.BsonDocument; +import org.bson.BsonString; +import org.bson.Document; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.Arguments; +import org.junit.jupiter.params.provider.MethodSource; + +import java.util.EnumSet; +import java.util.HashMap; +import java.util.Map; +import java.util.Set; +import java.util.function.Consumer; +import java.util.function.Supplier; +import java.util.stream.Stream; + +import static com.mongodb.ClusterFixture.getEnv; +import static com.mongodb.ClusterFixture.isStandalone; +import static com.mongodb.ClusterFixture.serverVersionAtLeast; +import static com.mongodb.client.Fixture.getMongoClientSettings; +import static com.mongodb.client.unified.UnifiedClientEncryptionHelper.localKmsProviderKey; +import static java.util.Collections.emptyMap; +import static java.util.stream.Collectors.toMap; +import static java.util.stream.StreamSupport.stream; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assumptions.assumeFalse; +import static org.junit.jupiter.api.Assumptions.assumeTrue; +import static org.junit.jupiter.params.ParameterizedTest.DISPLAY_NAME_PLACEHOLDER; + +/** + * See + * 21. Automatic Data Encryption Keys. + */ +public abstract class AbstractClientSideEncryptionAutoDataKeysTest { + private static final String COLL_NAME = "testing1"; + private static final MongoNamespace KEY_VAULT_NAMESPACE = new MongoNamespace("keyvault", "datakeys"); + + private MongoClient client; + private MongoDatabase db; + private ClientEncryption clientEncryption; + + @BeforeEach + public void setUp() { + assumeTrue(serverVersionAtLeast(7, 0)); + assumeFalse(isStandalone()); + + client = createMongoClient(getMongoClientSettings()); + Set kmsProviders = KmsProvider.detect(); + clientEncryption = createClientEncryption(ClientEncryptionSettings.builder() + .keyVaultMongoClientSettings(getMongoClientSettings()) + .keyVaultNamespace(KEY_VAULT_NAMESPACE.getFullName()) + .kmsProviders(kmsProviders.stream().collect(toMap( + provider -> provider.name, provider -> emptyMap()))) + .kmsProviderPropertySuppliers(kmsProviders.stream().collect(toMap( + provider -> provider.name, provider -> provider.propertiesSupplier))) + .build()); + client.getDatabase(KEY_VAULT_NAMESPACE.getDatabaseName()).drop(); + db = client.getDatabase("autoDataKeysTest"); + db.drop(); + } + + @AfterEach + @SuppressWarnings("try") + public void cleanUp() { + try (ClientEncryption ignored = clientEncryption; + MongoClient ignored1 = client) { + // empty + } + } + + /** + * See + * + * Case 1: Simple Creation and Validation. + */ + @ParameterizedTest(name = DISPLAY_NAME_PLACEHOLDER + " {0}") + @MethodSource("arguments") + void simpleCreationAndValidation(final KmsProvider kmsProvider) { + CreateCollectionOptions createCollectionOptions = new CreateCollectionOptions().encryptedFields(Document.parse( + "{" + + " fields: [{" + + " path: 'ssn'," + + " bsonType: 'string'," + + " keyId: null" + + " }]" + + "}")); + clientEncryption.createEncryptedCollection(db, COLL_NAME, createCollectionOptions, + kmsProvider.createEncryptedCollectionParamsSupplier.get()); + MongoCollection coll = db.getCollection(COLL_NAME); + assertEquals( + 121, // DocumentValidationFailure + assertThrows(MongoWriteException.class, () -> coll.insertOne(Document.parse("{ ssn: '123-45-6789' }"))) + .getCode()); + } + + /** + * See + * + * Case 2: Missing encryptedFields. + */ + @ParameterizedTest(name = DISPLAY_NAME_PLACEHOLDER + " {0}") + @MethodSource("arguments") + void missingEncryptedFields(final KmsProvider kmsProvider) { + assertThrows(MongoConfigurationException.class, () -> clientEncryption.createEncryptedCollection( + db, COLL_NAME, new CreateCollectionOptions(), kmsProvider.createEncryptedCollectionParamsSupplier.get())); + assertTrue(stream(db.listCollectionNames().spliterator(), false).noneMatch(name -> name.equals(COLL_NAME))); + } + + /** + * See + * + * Case 3: Invalid keyId. + */ + @ParameterizedTest(name = DISPLAY_NAME_PLACEHOLDER + " {0}") + @MethodSource("arguments") + void invalidKeyId(final KmsProvider kmsProvider) { + CreateCollectionOptions createCollectionOptions = new CreateCollectionOptions().encryptedFields(Document.parse( + "{" + + " fields: [{" + + " path: 'ssn'," + + " bsonType: 'string'," + + " keyId: false" + + " }]" + + "}")); + assertEquals( + 14, // TypeMismatch + assertThrows(MongoCommandException.class, () -> clientEncryption.createEncryptedCollection( + db, COLL_NAME, createCollectionOptions, kmsProvider.createEncryptedCollectionParamsSupplier.get())) + .getCode()); + } + + /** + * See + * + * Case 4: Insert encrypted value. + */ + @ParameterizedTest(name = DISPLAY_NAME_PLACEHOLDER + " {0}") + @MethodSource("arguments") + void insertEncryptedValue(final KmsProvider kmsProvider) { + CreateCollectionOptions createCollectionOptions = new CreateCollectionOptions().encryptedFields(Document.parse( + "{" + + " fields: [{" + + " path: 'ssn'," + + " bsonType: 'string'," + + " keyId: null" + + " }]" + + "}")); + BsonDocument encryptedFields = clientEncryption.createEncryptedCollection(db, COLL_NAME, createCollectionOptions, + kmsProvider.createEncryptedCollectionParamsSupplier.get()); + MongoCollection coll = db.getCollection(COLL_NAME); + BsonBinary dataKeyId = encryptedFields.getArray("fields").get(0).asDocument().getBinary("keyId"); + BsonBinary encryptedValue = clientEncryption.encrypt(new BsonString("123-45-6789"), + new EncryptOptions("Unindexed").keyId(dataKeyId)); + coll.insertOne(new Document("ssn", encryptedValue)); + } + + protected abstract MongoClient createMongoClient(MongoClientSettings settings); + + protected abstract ClientEncryption createClientEncryption(ClientEncryptionSettings settings); + + private static Stream arguments() { + return KmsProvider.detect().stream().map(Arguments::of); + } + + private enum KmsProvider { + LOCAL("local", + kmsProviderProperties -> kmsProviderProperties.put("key", localKmsProviderKey()), + createEncryptedCollectionParams -> {} + ), + AWS("aws", + kmsProviderProperties -> { + kmsProviderProperties.put("accessKeyId", getEnv("AWS_ACCESS_KEY_ID")); + kmsProviderProperties.put("secretAccessKey", getEnv("AWS_SECRET_ACCESS_KEY")); + }, + createEncryptedCollectionParams -> createEncryptedCollectionParams.masterKey(BsonDocument.parse( + "{" + + " region: 'us-east-1'," + + " key: 'arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0'" + + "}")) + ); + + private final String name; + private final Supplier> propertiesSupplier; + private final Supplier createEncryptedCollectionParamsSupplier; + + private static Set detect() { + String awsAccessKeyId = getEnv("AWS_ACCESS_KEY_ID"); + return awsAccessKeyId != null && !awsAccessKeyId.isEmpty() + ? EnumSet.allOf(KmsProvider.class) + : EnumSet.of(KmsProvider.LOCAL); + } + + KmsProvider(final String name, final Consumer> propertiesUpdater, + final Consumer encryptedCollectionParamsUpdater) { + this.name = name; + this.propertiesSupplier = () -> { + Map result = new HashMap<>(); + propertiesUpdater.accept(result); + return result; + }; + this.createEncryptedCollectionParamsSupplier = () -> { + CreateEncryptedCollectionParams result = new CreateEncryptedCollectionParams(name); + encryptedCollectionParamsUpdater.accept(result); + return result; + }; + } + + @Override + public String toString() { + return name; + } + } +} diff --git a/driver-sync/src/test/functional/com/mongodb/client/AbstractClientSideEncryptionAwsCredentialFromEnvironmentTest.java b/driver-sync/src/test/functional/com/mongodb/client/AbstractClientSideEncryptionAwsCredentialFromEnvironmentTest.java new file mode 100644 index 00000000000..fd91948b0b4 --- /dev/null +++ b/driver-sync/src/test/functional/com/mongodb/client/AbstractClientSideEncryptionAwsCredentialFromEnvironmentTest.java @@ -0,0 +1,319 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.mongodb.client; + +import com.mongodb.AutoEncryptionSettings; +import com.mongodb.ClientEncryptionSettings; +import com.mongodb.MongoClientSettings; +import com.mongodb.MongoConfigurationException; +import com.mongodb.client.model.vault.DataKeyOptions; +import com.mongodb.client.vault.ClientEncryption; +import com.mongodb.crypt.capi.MongoCryptException; +import com.mongodb.lang.NonNull; +import com.mongodb.lang.Nullable; +import org.bson.BsonBinary; +import org.bson.BsonDocument; +import org.bson.Document; +import org.junit.jupiter.api.Assertions; +import org.junit.jupiter.api.DisplayName; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.Arguments; +import org.junit.jupiter.params.provider.MethodSource; + +import java.util.Base64; +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; +import java.util.function.Supplier; +import java.util.stream.Stream; + +import static com.mongodb.ClusterFixture.getEnv; +import static com.mongodb.ClusterFixture.isClientSideEncryptionTest; +import static com.mongodb.client.Fixture.getMongoClientSettingsBuilder; +import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assumptions.assumeFalse; +import static org.junit.jupiter.api.Assumptions.assumeTrue; + +public abstract class AbstractClientSideEncryptionAwsCredentialFromEnvironmentTest { + + private static final String MASTER_KEY = "{" + + "region: \"us-east-1\", " + + "key: \"arn:aws:kms:us-east-1:579766882180:key/89fcc2c4-08b0-4bd9-9f25-e30687b580d0\"}"; + + protected abstract ClientEncryption createClientEncryption(ClientEncryptionSettings settings); + + protected abstract MongoClient createMongoClient(MongoClientSettings settings); + + @Test + public void testGetCredentialsFromEnvironment() { + assumeTrue(System.getenv().containsKey("AWS_ACCESS_KEY_ID")); + + Map> kmsProviders = new HashMap>() {{ + put("aws", new HashMap<>()); + }}; + + try (ClientEncryption clientEncryption = createClientEncryption(ClientEncryptionSettings.builder() + .keyVaultNamespace("test.datakeys") + .kmsProviders(kmsProviders) + .keyVaultMongoClientSettings(Fixture.getMongoClientSettings()) + .build())) { + + // If this succeeds, then it means credentials have been fetched from the environment as expected + BsonBinary dataKeyId = clientEncryption.createDataKey("aws", new DataKeyOptions().masterKey( + BsonDocument.parse(MASTER_KEY))); + + String base64DataKeyId = Base64.getEncoder().encodeToString(dataKeyId.getData()); + + Map schemaMap = new HashMap<>(); + schemaMap.put("test.coll", getSchema(base64DataKeyId)); + AutoEncryptionSettings autoEncryptionSettings = AutoEncryptionSettings.builder() + .kmsProviders(kmsProviders) + .keyVaultNamespace("test.datakeys") + .schemaMap(schemaMap) + .build(); + try (MongoClient client = createMongoClient(getMongoClientSettingsBuilder() + .autoEncryptionSettings(autoEncryptionSettings) + .build())) { + // If this succeeds, then it means credentials have been fetched from the environment as expected + client.getDatabase("test").getCollection("coll") + .insertOne(new Document("encryptedField", "encryptMe")); + } + } + } + @Test + public void testGetCredentialsFromSupplier() { + assumeFalse(System.getenv().containsKey("AWS_ACCESS_KEY_ID")); + assumeTrue(isClientSideEncryptionTest()); + + Map> kmsProviders = new HashMap>() {{ + put("aws", new HashMap<>()); + }}; + + Map>> kmsProviderPropertySuppliers = new HashMap>>() {{ + put("aws", () -> new HashMap() {{ + put("accessKeyId", getEnv("AWS_ACCESS_KEY_ID")); + put("secretAccessKey", getEnv("AWS_SECRET_ACCESS_KEY")); + }}); + }}; + + try (ClientEncryption clientEncryption = createClientEncryption(ClientEncryptionSettings.builder() + .keyVaultNamespace("test.datakeys") + .kmsProviders(kmsProviders) + .kmsProviderPropertySuppliers(kmsProviderPropertySuppliers) + .keyVaultMongoClientSettings(Fixture.getMongoClientSettings()) + .build())) { + + // If this succeeds, then it means credentials have been fetched from the supplier as expected + BsonBinary dataKeyId = clientEncryption.createDataKey("aws", new DataKeyOptions().masterKey( + BsonDocument.parse(MASTER_KEY))); + + String base64DataKeyId = Base64.getEncoder().encodeToString(dataKeyId.getData()); + + Map schemaMap = new HashMap<>(); + schemaMap.put("test.coll", getSchema(base64DataKeyId)); + AutoEncryptionSettings autoEncryptionSettings = AutoEncryptionSettings.builder() + .kmsProviders(kmsProviders) + .kmsProviderPropertySuppliers(kmsProviderPropertySuppliers) + .keyVaultNamespace("test.datakeys") + .schemaMap(schemaMap) + .build(); + try (MongoClient client = createMongoClient(getMongoClientSettingsBuilder() + .autoEncryptionSettings(autoEncryptionSettings) + .build())) { + // If this succeeds, then it means credentials have been fetched from the supplier as expected + client.getDatabase("test").getCollection("coll") + .insertOne(new Document("encryptedField", "encryptMe")); + } + } + } + + public static Stream createUnexpectedSupplierArguments() { + return Stream.of( + Arguments.of("ThrowsAnException", (Supplier>) () -> { + throw new RuntimeException(); + }, "", RuntimeException.class), + Arguments.of("ReturnsNull", (Supplier>) () -> null, " The returned value is null.", null), + Arguments.of("ReturnsEmptyMap", (Supplier>) Collections::emptyMap, " The returned value is empty.", + null) + ); + } + + @ParameterizedTest(name = "shouldThrowMongoConfigurationIfSupplier{0}") + @MethodSource("createUnexpectedSupplierArguments") + public void shouldThrowMongoConfigurationIfSupplierReturnsDoesSomethingUnexpected(final String testNameSuffix, + final Supplier> awsProviderPropertySupplier, final String exceptionMessageSuffix, + @Nullable final Class exceptionCauseType) { + assumeFalse(System.getenv().containsKey("AWS_ACCESS_KEY_ID")); + assumeTrue(isClientSideEncryptionTest()); + + Map> kmsProviders = new HashMap>() {{ + put("aws", new HashMap<>()); + }}; + + Map>> kmsProviderPropertySuppliers = new HashMap>>() {{ + put("aws", awsProviderPropertySupplier); + }}; + + try (ClientEncryption clientEncryption = createClientEncryption(ClientEncryptionSettings.builder() + .keyVaultNamespace("test.datakeys") + .kmsProviders(kmsProviders) + .kmsProviderPropertySuppliers(kmsProviderPropertySuppliers) + .keyVaultMongoClientSettings(Fixture.getMongoClientSettings()) + .build())) { + MongoConfigurationException e = assertThrows(MongoConfigurationException.class, () -> + clientEncryption.createDataKey("aws", new DataKeyOptions().masterKey( + BsonDocument.parse(MASTER_KEY)))); + assertEquals("Exception getting credential for kms provider aws from configured Supplier." + + exceptionMessageSuffix, e.getMessage()); + if (exceptionCauseType == null) { + assertNull(e.getCause()); + } else { + assertEquals(exceptionCauseType, e.getCause().getClass()); + } + } + } + + + /** + * This is a custom prose tests to enhance coverage. + *

    You can connect to a replica set by passing a + * list of servers to a MongoClient constructor. For example:

    + * encodedVector is not mutated nor stored in the returned {@link BinaryVector}. + */ + public static BinaryVector decodeBinaryToVector(final byte[] encodedVector) { + isTrue("Vector encoded array length must be at least 2, but found: " + encodedVector.length, encodedVector.length >= METADATA_SIZE); + BinaryVector.DataType dataType = determineVectorDType(encodedVector[0]); + byte padding = encodedVector[1]; + switch (dataType) { + case INT8: + return decodeInt8Vector(encodedVector, padding); + case PACKED_BIT: + return decodePackedBitVector(encodedVector, padding); + case FLOAT32: + return decodeFloat32Vector(encodedVector, padding); + default: + throw Assertions.fail(ERROR_MESSAGE_UNKNOWN_VECTOR_DATA_TYPE + dataType); + } + } + + private static Float32BinaryVector decodeFloat32Vector(final byte[] encodedVector, final byte padding) { + isTrue("Padding must be 0 for FLOAT32 data type, but found: " + padding, padding == 0); + return BinaryVector.floatVector(decodeLittleEndianFloats(encodedVector)); + } + + private static PackedBitBinaryVector decodePackedBitVector(final byte[] encodedVector, final byte padding) { + byte[] packedBitVector = extractVectorData(encodedVector); + isTrue("Padding must be 0 if vector is empty, but found: " + padding, padding == 0 || packedBitVector.length > 0); + isTrue("Padding must be between 0 and 7 bits, but found: " + padding, padding >= 0 && padding <= 7); + return BinaryVector.packedBitVector(packedBitVector, padding); + } + + private static Int8BinaryVector decodeInt8Vector(final byte[] encodedVector, final byte padding) { + isTrue("Padding must be 0 for INT8 data type, but found: " + padding, padding == 0); + byte[] int8Vector = extractVectorData(encodedVector); + return BinaryVector.int8Vector(int8Vector); + } + + private static byte[] extractVectorData(final byte[] encodedVector) { + int vectorDataLength = encodedVector.length - METADATA_SIZE; + byte[] vectorData = new byte[vectorDataLength]; + System.arraycopy(encodedVector, METADATA_SIZE, vectorData, 0, vectorDataLength); + return vectorData; + } + + private static byte[] encodeVector(final byte dType, final byte padding, final byte[] vectorData) { + final byte[] bytes = new byte[vectorData.length + METADATA_SIZE]; + bytes[0] = dType; + bytes[1] = padding; + System.arraycopy(vectorData, 0, bytes, METADATA_SIZE, vectorData.length); + return bytes; + } + + private static byte[] encodeVector(final byte dType, final float[] vectorData) { + final byte[] bytes = new byte[vectorData.length * Float.BYTES + METADATA_SIZE]; + + bytes[0] = dType; + bytes[1] = ZERO_PADDING; + + ByteBuffer buffer = ByteBuffer.wrap(bytes); + buffer.order(STORED_BYTE_ORDER); + buffer.position(METADATA_SIZE); + + FloatBuffer floatBuffer = buffer.asFloatBuffer(); + + // The JVM may optimize this operation internally, potentially using intrinsics + // or platform-specific optimizations (such as SIMD). If the byte order matches the underlying system's + // native order, the operation may involve a direct memory copy. + floatBuffer.put(vectorData); + + return bytes; + } + + private static float[] decodeLittleEndianFloats(final byte[] encodedVector) { + isTrue("Byte array length must be a multiple of 4 for FLOAT32 data type, but found: " + encodedVector.length, + (encodedVector.length - METADATA_SIZE) % Float.BYTES == 0); + + int vectorSize = encodedVector.length - METADATA_SIZE; + + int numFloats = vectorSize / Float.BYTES; + float[] floatArray = new float[numFloats]; + + ByteBuffer buffer = ByteBuffer.wrap(encodedVector, METADATA_SIZE, vectorSize); + buffer.order(STORED_BYTE_ORDER); + + // The JVM may optimize this operation internally, potentially using intrinsics + // or platform-specific optimizations (such as SIMD). If the byte order matches the underlying system's + // native order, the operation may involve a direct memory copy. + buffer.asFloatBuffer().get(floatArray); + return floatArray; + } + + public static BinaryVector.DataType determineVectorDType(final byte dType) { + BinaryVector.DataType[] values = BinaryVector.DataType.values(); + for (BinaryVector.DataType value : values) { + if (value.getValue() == dType) { + return value; + } + } + throw new BsonInvalidOperationException(ERROR_MESSAGE_UNKNOWN_VECTOR_DATA_TYPE + dType); + } + + private static void isTrue(final String message, final boolean condition) { + if (!condition) { + throw new BsonInvalidOperationException(message); + } + } +} diff --git a/bson/src/main/org/bson/io/BasicOutputBuffer.java b/bson/src/main/org/bson/io/BasicOutputBuffer.java new file mode 100644 index 00000000000..aaff34d6476 --- /dev/null +++ b/bson/src/main/org/bson/io/BasicOutputBuffer.java @@ -0,0 +1,215 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.io; + +import org.bson.ByteBuf; +import org.bson.ByteBufNIO; +import org.bson.types.ObjectId; + +import java.io.IOException; +import java.io.OutputStream; +import java.nio.Buffer; +import java.nio.ByteBuffer; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; + +import static java.lang.String.format; +import static java.nio.ByteOrder.LITTLE_ENDIAN; + +/** + * A BSON output stream that stores the output in a single, un-pooled byte array. + */ +public class BasicOutputBuffer extends OutputBuffer { + + /** + * This ByteBuffer allows us to write ObjectIDs without allocating a temporary array per object, and enables us + * to leverage JVM intrinsics for writing little-endian numeric values. + */ + private ByteBuffer buffer; + + /** + * Construct an instance with a default initial byte array size. + */ + public BasicOutputBuffer() { + this(1024); + } + + /** + * Construct an instance with the specified initial byte array size. + * + * @param initialSize the initial size of the byte array + */ + public BasicOutputBuffer(final int initialSize) { + // Allocate heap buffer to ensure we can access underlying array + buffer = ByteBuffer.allocate(initialSize).order(LITTLE_ENDIAN); + } + + /** + * Gets the internal buffer. + * + * @return the internal buffer + * @since 3.3 + */ + public byte[] getInternalBuffer() { + return buffer.array(); + } + + @Override + public void write(final byte[] b) { + writeBytes(b, 0, b.length); + } + + @Override + public byte[] toByteArray() { + ensureOpen(); + return Arrays.copyOf(buffer.array(), buffer.position()); + } + + @Override + public void writeInt32(final int value) { + ensureOpen(); + ensure(4); + buffer.putInt(value); + } + + @Override + public void writeInt32(final int position, final int value) { + ensureOpen(); + checkPosition(position, 4); + buffer.putInt(position, value); + } + + @Override + public void writeInt64(final long value) { + ensureOpen(); + ensure(8); + buffer.putLong(value); + } + + @Override + public void writeObjectId(final ObjectId value) { + ensureOpen(); + ensure(12); + value.putToByteBuffer(buffer); + } + + @Override + public void writeBytes(final byte[] bytes, final int offset, final int length) { + ensureOpen(); + + ensure(length); + buffer.put(bytes, offset, length); + } + + @Override + public void writeByte(final int value) { + ensureOpen(); + + ensure(1); + buffer.put((byte) (0xFF & value)); + } + + @Override + protected void write(final int absolutePosition, final int value) { + ensureOpen(); + checkPosition(absolutePosition, 1); + + buffer.put(absolutePosition, (byte) (0xFF & value)); + } + + @Override + public int getPosition() { + ensureOpen(); + return buffer.position(); + } + + /** + * @return size of data so far + */ + @Override + public int getSize() { + ensureOpen(); + return buffer.position(); + } + + @Override + public int pipe(final OutputStream out) throws IOException { + ensureOpen(); + out.write(buffer.array(), 0, buffer.position()); + return buffer.position(); + } + + @Override + public void truncateToPosition(final int newPosition) { + ensureOpen(); + if (newPosition > buffer.position() || newPosition < 0) { + throw new IllegalArgumentException(); + } + // The cast is required for compatibility with JDK 9+ where ByteBuffer's position method is inherited from Buffer. + ((Buffer) buffer).position(newPosition); + } + + @Override + public List getByteBuffers() { + ensureOpen(); + // Create a flipped copy of the buffer for reading. Note that ByteBufNIO overwrites the endian-ness. + ByteBuffer flipped = ByteBuffer.wrap(buffer.array(), 0, buffer.position()); + return Collections.singletonList(new ByteBufNIO(flipped)); + } + + @Override + public void close() { + buffer = null; + } + + private void ensureOpen() { + if (buffer == null) { + throw new IllegalStateException("The output is closed"); + } + } + + private void ensure(final int more) { + int length = buffer.position(); + int need = length + more; + if (need <= buffer.capacity()) { + return; + } + + int newSize = length * 2; + if (newSize < need) { + newSize = need + 128; + } + + ByteBuffer tmp = ByteBuffer.allocate(newSize).order(LITTLE_ENDIAN); + tmp.put(buffer.array(), 0, length); // Avoids covariant call to flip on jdk8 + this.buffer = tmp; + } + + /** + * Ensures that `absolutePosition` is a valid index in `this.buffer` and there is room to write at + * least `bytesToWrite` bytes. + */ + private void checkPosition(final int absolutePosition, final int bytesToWrite) { + if (absolutePosition < 0) { + throw new IllegalArgumentException(format("position must be >= 0 but was %d", absolutePosition)); + } + if (absolutePosition > buffer.position() - bytesToWrite) { + throw new IllegalArgumentException(format("position must be <= %d but was %d", buffer.position() - bytesToWrite, absolutePosition)); + } + } +} diff --git a/bson/src/main/org/bson/io/BsonInput.java b/bson/src/main/org/bson/io/BsonInput.java new file mode 100644 index 00000000000..823355fe3ee --- /dev/null +++ b/bson/src/main/org/bson/io/BsonInput.java @@ -0,0 +1,132 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.io; + +import org.bson.types.ObjectId; + +import java.io.Closeable; + +/** + * An input stream that is optimized for reading BSON values directly from the underlying stream. + * + * @since 3.0 + */ +public interface BsonInput extends Closeable { + /** + * Gets the current position in the stream + * + * @return the current position + */ + int getPosition(); + + /** + * Reads a single byte from the stream + * + * @return the byte value + */ + byte readByte(); + + /** + * Reads the specified number of bytes into the given byte array. This is equivalent to {@code readBytes(bytes, 0, bytes.length)}. + * + * @param bytes the byte array to write into + */ + void readBytes(byte[] bytes); + + /** + * Reads the specified number of bytes into the given byte array starting at the specified offset. + * + * @param bytes the byte array to write into + * @param offset the offset to start writing + * @param length the number of bytes to write + */ + void readBytes(byte[] bytes, int offset, int length); + + /** + * Reads a BSON Int64 value from the stream. + * + * @return the Int64 value + */ + long readInt64(); + + /** + * Reads a BSON Double value from the stream. + * + * @return the double value + */ + double readDouble(); + + /** + * Reads a BSON Int32 value from the stream. + * + * @return the Int32 value + */ + int readInt32(); + + /** + * Reads a BSON String value from the stream. + * + * @return the string + */ + String readString(); + + /** + * Reads a BSON ObjectId value from the stream. + * + * @return the ObjectId + */ + ObjectId readObjectId(); + + /** + * Reads a BSON CString value from the stream. + * + * @return the CString + */ + String readCString(); + + /** + * Skips a BSON CString value from the stream. + * + */ + void skipCString(); + + /** + * Skips the specified number of bytes in the stream. + * + * @param numBytes the number of bytes to skip + */ + void skip(int numBytes); + + /** + * Gets a mark for the current position in the stream. + * + * @param readLimit the maximum limit of bytes that can be read before the mark position becomes invalid + * @return the mark + * @since 3.7 + */ + BsonInputMark getMark(int readLimit); + + /** + * Returns true if there are more bytes left in the stream. + * + * @return true if there are more bytes left in the stream. + */ + boolean hasRemaining(); + + @Override + void close(); +} diff --git a/bson/src/main/org/bson/io/BsonInputMark.java b/bson/src/main/org/bson/io/BsonInputMark.java new file mode 100644 index 00000000000..fd0b9d8b5cc --- /dev/null +++ b/bson/src/main/org/bson/io/BsonInputMark.java @@ -0,0 +1,31 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.io; + +/** + * Represents a bookmark that can be used to reset a {@link BsonInput} to its state at the time the mark was created. + * + * @see BsonInput#getMark(int) + * + * @since 3.7 + */ +public interface BsonInputMark { + /** + * Reset the {@link BsonInput} to its state at the time the mark was created. + */ + void reset(); +} diff --git a/bson/src/main/org/bson/io/BsonOutput.java b/bson/src/main/org/bson/io/BsonOutput.java new file mode 100644 index 00000000000..b62fe412b1b --- /dev/null +++ b/bson/src/main/org/bson/io/BsonOutput.java @@ -0,0 +1,126 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.io; + +import org.bson.types.ObjectId; + +import java.io.Closeable; + +/** + * An output stream that is optimized for writing BSON values directly to the underlying stream. + * + * @since 3.0 + */ + +public interface BsonOutput extends Closeable { + + /** + * Gets the current position in the stream. + * + * @return the current position + */ + int getPosition(); + + /** + * Gets the current size of the stream in number of bytes. + * + * @return the size of the stream + */ + int getSize(); + + /** + * Truncates this stream to the new position. After this call, both size and position will equal the new position. + * @param newPosition the new position, which must be greater than or equal to 0 and less than the current size. + */ + void truncateToPosition(int newPosition); + + /** + * Writes all the bytes in the byte array to the stream. + * @param bytes the non-null byte array + */ + void writeBytes(byte[] bytes); + + /** + * Writes {@code length} bytes from the byte array, starting at {@code offset}. + * @param bytes the non-null byte array + * @param offset the offset to start writing from + * @param length the number of bytes to write + */ + void writeBytes(byte[] bytes, int offset, int length); + + /** + * Write a single byte to the stream. The byte to be written is the eight low-order bits of the specified value. The 24 + * high-order bits of the value are ignored. + * + * @param value the value + */ + void writeByte(int value); + + /** + * Writes a BSON CString to the stream. + * + * @param value the value + */ + void writeCString(String value); + + /** + * Writes a BSON String to the stream. + * + * @param value the value + */ + void writeString(String value); + + /** + * Writes a BSON double to the stream. + * + * @param value the value + */ + void writeDouble(double value); + + /** + * Writes a 32-bit BSON integer to the stream. + * + * @param value the value + */ + void writeInt32(int value); + + /** + * Writes a 32-bit BSON integer to the stream at the given position. This is useful for patching in the size of a document once the + * last byte of it has been encoded and its size it known. + * + * @param position the position to write the value, which must be greater than or equal to 0 and less than or equal to the current size + * @param value the value + */ + void writeInt32(int position, int value); + + /** + * Writes a 64-bit BSON integer to the stream. + * + * @param value the value + */ + void writeInt64(long value); + + /** + * Writes a BSON ObjectId to the stream. + * + * @param value the value + */ + void writeObjectId(ObjectId value); + + @Override + void close(); +} diff --git a/bson/src/main/org/bson/io/ByteBufferBsonInput.java b/bson/src/main/org/bson/io/ByteBufferBsonInput.java new file mode 100644 index 00000000000..2819bdcb091 --- /dev/null +++ b/bson/src/main/org/bson/io/ByteBufferBsonInput.java @@ -0,0 +1,295 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.io; + +import org.bson.BsonSerializationException; +import org.bson.ByteBuf; +import org.bson.types.ObjectId; + +import java.nio.ByteOrder; +import java.nio.charset.StandardCharsets; + +import static java.lang.String.format; + +/** + * An implementation of {@code BsonInput} that is backed by a {@code ByteBuf}. + * + * @since 3.0 + */ +public class ByteBufferBsonInput implements BsonInput { + + private static final String[] ONE_BYTE_ASCII_STRINGS = new String[Byte.MAX_VALUE + 1]; + /* A dynamically sized scratch buffer, that is reused across BSON String reads: + * 1. Reduces garbage collection by avoiding new byte array creation. + * 2. Improves cache utilization through temporal locality. + * 3. Avoids JVM allocation and zeroing cost for new memory allocations. + */ + private byte[] scratchBuffer; + + + static { + for (int b = 0; b < ONE_BYTE_ASCII_STRINGS.length; b++) { + ONE_BYTE_ASCII_STRINGS[b] = String.valueOf((char) b); + } + } + + private ByteBuf buffer; + + /** + * Construct an instance with the given byte buffer. The stream takes over ownership of the buffer and closes it when this instance is + * closed. + * + * @param buffer the byte buffer + */ + public ByteBufferBsonInput(final ByteBuf buffer) { + if (buffer == null) { + throw new IllegalArgumentException("buffer can not be null"); + } + this.buffer = buffer; + buffer.order(ByteOrder.LITTLE_ENDIAN); + } + + @Override + public int getPosition() { + ensureOpen(); + return buffer.position(); + } + + + @Override + public byte readByte() { + ensureOpen(); + ensureAvailable(1); + return buffer.get(); + } + + @Override + public void readBytes(final byte[] bytes) { + ensureOpen(); + ensureAvailable(bytes.length); + buffer.get(bytes); + } + + @Override + public void readBytes(final byte[] bytes, final int offset, final int length) { + ensureOpen(); + ensureAvailable(length); + buffer.get(bytes, offset, length); + } + + @Override + public long readInt64() { + ensureOpen(); + ensureAvailable(8); + return buffer.getLong(); + } + + @Override + public double readDouble() { + ensureOpen(); + ensureAvailable(8); + return buffer.getDouble(); + } + + @Override + public int readInt32() { + ensureOpen(); + ensureAvailable(4); + return buffer.getInt(); + } + + @Override + public ObjectId readObjectId() { + ensureOpen(); + byte[] bytes = new byte[12]; + readBytes(bytes); + return new ObjectId(bytes); + } + + @Override + public String readString() { + ensureOpen(); + int size = readInt32(); + if (size <= 0) { + throw new BsonSerializationException(format("While decoding a BSON string found a size that is not a positive number: %d", + size)); + } + ensureAvailable(size); + return readString(size); + } + + @Override + public String readCString() { + ensureOpen(); + int size = computeCStringLength(buffer.position()); + return readString(size); + } + + private String readString(final int bsonStringSize) { + if (bsonStringSize == 2) { + byte asciiByte = buffer.get(); // if only one byte in the string, it must be ascii. + byte nullByte = buffer.get(); // read null terminator + if (nullByte != 0) { + throw new BsonSerializationException("Found a BSON string that is not null-terminated"); + } + if (asciiByte < 0) { + return StandardCharsets.UTF_8.newDecoder().replacement(); + } + return ONE_BYTE_ASCII_STRINGS[asciiByte]; // this will throw if asciiByte is negative + } else { + if (buffer.isBackedByArray()) { + int position = buffer.position(); + int arrayOffset = buffer.arrayOffset(); + int newPosition = position + bsonStringSize; + buffer.position(newPosition); + + byte[] array = buffer.array(); + if (array[arrayOffset + newPosition - 1] != 0) { + throw new BsonSerializationException("Found a BSON string that is not null-terminated"); + } + return new String(array, arrayOffset + position, bsonStringSize - 1, StandardCharsets.UTF_8); + } else if (scratchBuffer == null || bsonStringSize > scratchBuffer.length) { + int scratchBufferSize = bsonStringSize + (bsonStringSize >>> 1); //1.5 times the size + scratchBuffer = new byte[scratchBufferSize]; + } + + buffer.get(scratchBuffer, 0, bsonStringSize); + if (scratchBuffer[bsonStringSize - 1] != 0) { + throw new BsonSerializationException("BSON string not null-terminated"); + } + return new String(scratchBuffer, 0, bsonStringSize - 1, StandardCharsets.UTF_8); + } + } + + @Override + public void skipCString() { + ensureOpen(); + int pos = buffer.position(); + int length = computeCStringLength(pos); + buffer.position(pos + length); + } + + /** + * Detects the position of the first NULL (0x00) byte in a 64-bit word using SWAR technique. + * + */ + private int computeCStringLength(final int prevPos) { + int pos = prevPos; + int limit = buffer.limit(); + + // `>>> 3` means dividing without remainder by `Long.BYTES` because `Long.BYTES` is 2^3 + int chunks = (limit - pos) >>> 3; + // `<< 3` means multiplying by `Long.BYTES` because `Long.BYTES` is 2^3 + int toPos = pos + (chunks << 3); + for (; pos < toPos; pos += Long.BYTES) { + long chunk = buffer.getLong(pos); + /* + Subtract 0x0101010101010101L to cause a borrow on 0x00 bytes. + if original byte is 00000000, then 00000000 - 00000001 = 11111111 (borrow causes the most significant bit set to 1). + */ + long mask = chunk - 0x0101010101010101L; + /* + mask will only have the most significant bit in each byte set iff it was a 0x00 byte (0x00 becomes 0xFF because of the borrow). + ~chunk will have bits that were originally 0 set to 1. + mask & ~chunk will have the most significant bit in each byte set iff original byte was 0x00. + */ + mask &= ~chunk; + /* + 0x8080808080808080: + 10000000 10000000 10000000 10000000 10000000 10000000 10000000 10000000 + + mask: + 00000000 00000000 11111111 00000000 00000001 00000001 00000000 00000111 + + ANDing mask with 0x8080808080808080 isolates the most significant bit in each byte where + the original byte was 0x00, thereby setting the most significant bit to 1 in each 0x00 original byte. + + result: + 00000000 00000000 10000000 00000000 00000000 00000000 00000000 00000000 + ^^^^^^^^ + The most significant bit is set in each 0x00 byte, and only there. + */ + mask &= 0x8080808080808080L; + if (mask != 0) { + /* + The UTF-8 data is endian-independent and stored in left-to-right order in the buffer, with the first byte at the lowest index. + After calling getLong() in little-endian mode, the first UTF-8 byte ends up in the least significant byte of the long (bits 0–7), + and the last one in the most significant byte (bits 56–63). + + numberOfTrailingZeros scans from the least significant bit, which aligns with the position of the first UTF-8 byte. + We then use >>> 3, which means dividing without remainder by Long.BYTES because Long.BYTES is 2^3, computing the byte offset + of the NULL terminator in the original UTF-8 data. + */ + int offset = Long.numberOfTrailingZeros(mask) >>> 3; + // Find the NULL terminator at pos + offset + return (pos - prevPos) + offset + 1; + } + } + + // Process remaining bytes one by one. + while (pos < limit) { + if (buffer.get(pos++) == 0) { + return (pos - prevPos); + } + } + + buffer.position(pos); + throw new BsonSerializationException("Found a BSON string that is not null-terminated"); + } + + @Override + public void skip(final int numBytes) { + ensureOpen(); + buffer.position(buffer.position() + numBytes); + } + + @Override + public BsonInputMark getMark(final int readLimit) { + return new BsonInputMark() { + private final int mark = buffer.position(); + @Override + public void reset() { + ensureOpen(); + buffer.position(mark); + } + }; + } + + @Override + public boolean hasRemaining() { + ensureOpen(); + return buffer.hasRemaining(); + } + + @Override + public void close() { + buffer.release(); + buffer = null; + } + + private void ensureOpen() { + if (buffer == null) { + throw new IllegalStateException("Stream is closed"); + } + } + private void ensureAvailable(final int bytesNeeded) { + if (buffer.remaining() < bytesNeeded) { + throw new BsonSerializationException(format("While decoding a BSON document %d bytes were required, " + + "but only %d remain", bytesNeeded, buffer.remaining())); + } + } +} diff --git a/bson/src/main/org/bson/io/OutputBuffer.java b/bson/src/main/org/bson/io/OutputBuffer.java new file mode 100644 index 00000000000..d4ae12d4245 --- /dev/null +++ b/bson/src/main/org/bson/io/OutputBuffer.java @@ -0,0 +1,246 @@ +/* + * Copyright 2008-present MongoDB, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.bson.io; + +import org.bson.BsonSerializationException; +import org.bson.ByteBuf; +import org.bson.types.ObjectId; + +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.io.OutputStream; +import java.util.List; + +import static java.lang.String.format; + +/** + * An abstract base class for classes implementing {@code BsonOutput}. + */ +public abstract class OutputBuffer extends OutputStream implements BsonOutput { + + @Override + public void write(final byte[] b) { + write(b, 0, b.length); + } + + @Override + public void close() { + } + + /** + * {@inheritDoc} + *